1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 */ 5 6 #include <linux/kernel.h> 7 #include <linux/bio.h> 8 #include <linux/buffer_head.h> 9 #include <linux/file.h> 10 #include <linux/fs.h> 11 #include <linux/pagemap.h> 12 #include <linux/highmem.h> 13 #include <linux/time.h> 14 #include <linux/init.h> 15 #include <linux/string.h> 16 #include <linux/backing-dev.h> 17 #include <linux/writeback.h> 18 #include <linux/compat.h> 19 #include <linux/xattr.h> 20 #include <linux/posix_acl.h> 21 #include <linux/falloc.h> 22 #include <linux/slab.h> 23 #include <linux/ratelimit.h> 24 #include <linux/btrfs.h> 25 #include <linux/blkdev.h> 26 #include <linux/posix_acl_xattr.h> 27 #include <linux/uio.h> 28 #include <linux/magic.h> 29 #include <linux/iversion.h> 30 #include <linux/swap.h> 31 #include <linux/sched/mm.h> 32 #include <asm/unaligned.h> 33 #include "misc.h" 34 #include "ctree.h" 35 #include "disk-io.h" 36 #include "transaction.h" 37 #include "btrfs_inode.h" 38 #include "print-tree.h" 39 #include "ordered-data.h" 40 #include "xattr.h" 41 #include "tree-log.h" 42 #include "volumes.h" 43 #include "compression.h" 44 #include "locking.h" 45 #include "free-space-cache.h" 46 #include "inode-map.h" 47 #include "backref.h" 48 #include "props.h" 49 #include "qgroup.h" 50 #include "delalloc-space.h" 51 #include "block-group.h" 52 53 struct btrfs_iget_args { 54 struct btrfs_key *location; 55 struct btrfs_root *root; 56 }; 57 58 struct btrfs_dio_data { 59 u64 reserve; 60 u64 unsubmitted_oe_range_start; 61 u64 unsubmitted_oe_range_end; 62 int overwrite; 63 }; 64 65 static const struct inode_operations btrfs_dir_inode_operations; 66 static const struct inode_operations btrfs_symlink_inode_operations; 67 static const struct inode_operations btrfs_dir_ro_inode_operations; 68 static const struct inode_operations btrfs_special_inode_operations; 69 static const struct inode_operations btrfs_file_inode_operations; 70 static const struct address_space_operations btrfs_aops; 71 static const struct file_operations btrfs_dir_file_operations; 72 static const struct extent_io_ops btrfs_extent_io_ops; 73 74 static struct kmem_cache *btrfs_inode_cachep; 75 struct kmem_cache *btrfs_trans_handle_cachep; 76 struct kmem_cache *btrfs_path_cachep; 77 struct kmem_cache *btrfs_free_space_cachep; 78 struct kmem_cache *btrfs_free_space_bitmap_cachep; 79 80 static int btrfs_setsize(struct inode *inode, struct iattr *attr); 81 static int btrfs_truncate(struct inode *inode, bool skip_writeback); 82 static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent); 83 static noinline int cow_file_range(struct inode *inode, 84 struct page *locked_page, 85 u64 start, u64 end, int *page_started, 86 unsigned long *nr_written, int unlock); 87 static struct extent_map *create_io_em(struct inode *inode, u64 start, u64 len, 88 u64 orig_start, u64 block_start, 89 u64 block_len, u64 orig_block_len, 90 u64 ram_bytes, int compress_type, 91 int type); 92 93 static void __endio_write_update_ordered(struct inode *inode, 94 const u64 offset, const u64 bytes, 95 const bool uptodate); 96 97 /* 98 * Cleanup all submitted ordered extents in specified range to handle errors 99 * from the btrfs_run_delalloc_range() callback. 100 * 101 * NOTE: caller must ensure that when an error happens, it can not call 102 * extent_clear_unlock_delalloc() to clear both the bits EXTENT_DO_ACCOUNTING 103 * and EXTENT_DELALLOC simultaneously, because that causes the reserved metadata 104 * to be released, which we want to happen only when finishing the ordered 105 * extent (btrfs_finish_ordered_io()). 106 */ 107 static inline void btrfs_cleanup_ordered_extents(struct inode *inode, 108 struct page *locked_page, 109 u64 offset, u64 bytes) 110 { 111 unsigned long index = offset >> PAGE_SHIFT; 112 unsigned long end_index = (offset + bytes - 1) >> PAGE_SHIFT; 113 u64 page_start = page_offset(locked_page); 114 u64 page_end = page_start + PAGE_SIZE - 1; 115 116 struct page *page; 117 118 while (index <= end_index) { 119 page = find_get_page(inode->i_mapping, index); 120 index++; 121 if (!page) 122 continue; 123 ClearPagePrivate2(page); 124 put_page(page); 125 } 126 127 /* 128 * In case this page belongs to the delalloc range being instantiated 129 * then skip it, since the first page of a range is going to be 130 * properly cleaned up by the caller of run_delalloc_range 131 */ 132 if (page_start >= offset && page_end <= (offset + bytes - 1)) { 133 offset += PAGE_SIZE; 134 bytes -= PAGE_SIZE; 135 } 136 137 return __endio_write_update_ordered(inode, offset, bytes, false); 138 } 139 140 static int btrfs_dirty_inode(struct inode *inode); 141 142 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 143 void btrfs_test_inode_set_ops(struct inode *inode) 144 { 145 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; 146 } 147 #endif 148 149 static int btrfs_init_inode_security(struct btrfs_trans_handle *trans, 150 struct inode *inode, struct inode *dir, 151 const struct qstr *qstr) 152 { 153 int err; 154 155 err = btrfs_init_acl(trans, inode, dir); 156 if (!err) 157 err = btrfs_xattr_security_init(trans, inode, dir, qstr); 158 return err; 159 } 160 161 /* 162 * this does all the hard work for inserting an inline extent into 163 * the btree. The caller should have done a btrfs_drop_extents so that 164 * no overlapping inline items exist in the btree 165 */ 166 static int insert_inline_extent(struct btrfs_trans_handle *trans, 167 struct btrfs_path *path, int extent_inserted, 168 struct btrfs_root *root, struct inode *inode, 169 u64 start, size_t size, size_t compressed_size, 170 int compress_type, 171 struct page **compressed_pages) 172 { 173 struct extent_buffer *leaf; 174 struct page *page = NULL; 175 char *kaddr; 176 unsigned long ptr; 177 struct btrfs_file_extent_item *ei; 178 int ret; 179 size_t cur_size = size; 180 unsigned long offset; 181 182 ASSERT((compressed_size > 0 && compressed_pages) || 183 (compressed_size == 0 && !compressed_pages)); 184 185 if (compressed_size && compressed_pages) 186 cur_size = compressed_size; 187 188 inode_add_bytes(inode, size); 189 190 if (!extent_inserted) { 191 struct btrfs_key key; 192 size_t datasize; 193 194 key.objectid = btrfs_ino(BTRFS_I(inode)); 195 key.offset = start; 196 key.type = BTRFS_EXTENT_DATA_KEY; 197 198 datasize = btrfs_file_extent_calc_inline_size(cur_size); 199 path->leave_spinning = 1; 200 ret = btrfs_insert_empty_item(trans, root, path, &key, 201 datasize); 202 if (ret) 203 goto fail; 204 } 205 leaf = path->nodes[0]; 206 ei = btrfs_item_ptr(leaf, path->slots[0], 207 struct btrfs_file_extent_item); 208 btrfs_set_file_extent_generation(leaf, ei, trans->transid); 209 btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE); 210 btrfs_set_file_extent_encryption(leaf, ei, 0); 211 btrfs_set_file_extent_other_encoding(leaf, ei, 0); 212 btrfs_set_file_extent_ram_bytes(leaf, ei, size); 213 ptr = btrfs_file_extent_inline_start(ei); 214 215 if (compress_type != BTRFS_COMPRESS_NONE) { 216 struct page *cpage; 217 int i = 0; 218 while (compressed_size > 0) { 219 cpage = compressed_pages[i]; 220 cur_size = min_t(unsigned long, compressed_size, 221 PAGE_SIZE); 222 223 kaddr = kmap_atomic(cpage); 224 write_extent_buffer(leaf, kaddr, ptr, cur_size); 225 kunmap_atomic(kaddr); 226 227 i++; 228 ptr += cur_size; 229 compressed_size -= cur_size; 230 } 231 btrfs_set_file_extent_compression(leaf, ei, 232 compress_type); 233 } else { 234 page = find_get_page(inode->i_mapping, 235 start >> PAGE_SHIFT); 236 btrfs_set_file_extent_compression(leaf, ei, 0); 237 kaddr = kmap_atomic(page); 238 offset = offset_in_page(start); 239 write_extent_buffer(leaf, kaddr + offset, ptr, size); 240 kunmap_atomic(kaddr); 241 put_page(page); 242 } 243 btrfs_mark_buffer_dirty(leaf); 244 btrfs_release_path(path); 245 246 /* 247 * we're an inline extent, so nobody can 248 * extend the file past i_size without locking 249 * a page we already have locked. 250 * 251 * We must do any isize and inode updates 252 * before we unlock the pages. Otherwise we 253 * could end up racing with unlink. 254 */ 255 BTRFS_I(inode)->disk_i_size = inode->i_size; 256 ret = btrfs_update_inode(trans, root, inode); 257 258 fail: 259 return ret; 260 } 261 262 263 /* 264 * conditionally insert an inline extent into the file. This 265 * does the checks required to make sure the data is small enough 266 * to fit as an inline extent. 267 */ 268 static noinline int cow_file_range_inline(struct inode *inode, u64 start, 269 u64 end, size_t compressed_size, 270 int compress_type, 271 struct page **compressed_pages) 272 { 273 struct btrfs_root *root = BTRFS_I(inode)->root; 274 struct btrfs_fs_info *fs_info = root->fs_info; 275 struct btrfs_trans_handle *trans; 276 u64 isize = i_size_read(inode); 277 u64 actual_end = min(end + 1, isize); 278 u64 inline_len = actual_end - start; 279 u64 aligned_end = ALIGN(end, fs_info->sectorsize); 280 u64 data_len = inline_len; 281 int ret; 282 struct btrfs_path *path; 283 int extent_inserted = 0; 284 u32 extent_item_size; 285 286 if (compressed_size) 287 data_len = compressed_size; 288 289 if (start > 0 || 290 actual_end > fs_info->sectorsize || 291 data_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info) || 292 (!compressed_size && 293 (actual_end & (fs_info->sectorsize - 1)) == 0) || 294 end + 1 < isize || 295 data_len > fs_info->max_inline) { 296 return 1; 297 } 298 299 path = btrfs_alloc_path(); 300 if (!path) 301 return -ENOMEM; 302 303 trans = btrfs_join_transaction(root); 304 if (IS_ERR(trans)) { 305 btrfs_free_path(path); 306 return PTR_ERR(trans); 307 } 308 trans->block_rsv = &BTRFS_I(inode)->block_rsv; 309 310 if (compressed_size && compressed_pages) 311 extent_item_size = btrfs_file_extent_calc_inline_size( 312 compressed_size); 313 else 314 extent_item_size = btrfs_file_extent_calc_inline_size( 315 inline_len); 316 317 ret = __btrfs_drop_extents(trans, root, inode, path, 318 start, aligned_end, NULL, 319 1, 1, extent_item_size, &extent_inserted); 320 if (ret) { 321 btrfs_abort_transaction(trans, ret); 322 goto out; 323 } 324 325 if (isize > actual_end) 326 inline_len = min_t(u64, isize, actual_end); 327 ret = insert_inline_extent(trans, path, extent_inserted, 328 root, inode, start, 329 inline_len, compressed_size, 330 compress_type, compressed_pages); 331 if (ret && ret != -ENOSPC) { 332 btrfs_abort_transaction(trans, ret); 333 goto out; 334 } else if (ret == -ENOSPC) { 335 ret = 1; 336 goto out; 337 } 338 339 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags); 340 btrfs_drop_extent_cache(BTRFS_I(inode), start, aligned_end - 1, 0); 341 out: 342 /* 343 * Don't forget to free the reserved space, as for inlined extent 344 * it won't count as data extent, free them directly here. 345 * And at reserve time, it's always aligned to page size, so 346 * just free one page here. 347 */ 348 btrfs_qgroup_free_data(inode, NULL, 0, PAGE_SIZE); 349 btrfs_free_path(path); 350 btrfs_end_transaction(trans); 351 return ret; 352 } 353 354 struct async_extent { 355 u64 start; 356 u64 ram_size; 357 u64 compressed_size; 358 struct page **pages; 359 unsigned long nr_pages; 360 int compress_type; 361 struct list_head list; 362 }; 363 364 struct async_chunk { 365 struct inode *inode; 366 struct page *locked_page; 367 u64 start; 368 u64 end; 369 unsigned int write_flags; 370 struct list_head extents; 371 struct btrfs_work work; 372 atomic_t *pending; 373 }; 374 375 struct async_cow { 376 /* Number of chunks in flight; must be first in the structure */ 377 atomic_t num_chunks; 378 struct async_chunk chunks[]; 379 }; 380 381 static noinline int add_async_extent(struct async_chunk *cow, 382 u64 start, u64 ram_size, 383 u64 compressed_size, 384 struct page **pages, 385 unsigned long nr_pages, 386 int compress_type) 387 { 388 struct async_extent *async_extent; 389 390 async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS); 391 BUG_ON(!async_extent); /* -ENOMEM */ 392 async_extent->start = start; 393 async_extent->ram_size = ram_size; 394 async_extent->compressed_size = compressed_size; 395 async_extent->pages = pages; 396 async_extent->nr_pages = nr_pages; 397 async_extent->compress_type = compress_type; 398 list_add_tail(&async_extent->list, &cow->extents); 399 return 0; 400 } 401 402 /* 403 * Check if the inode has flags compatible with compression 404 */ 405 static inline bool inode_can_compress(struct inode *inode) 406 { 407 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW || 408 BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM) 409 return false; 410 return true; 411 } 412 413 /* 414 * Check if the inode needs to be submitted to compression, based on mount 415 * options, defragmentation, properties or heuristics. 416 */ 417 static inline int inode_need_compress(struct inode *inode, u64 start, u64 end) 418 { 419 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 420 421 if (!inode_can_compress(inode)) { 422 WARN(IS_ENABLED(CONFIG_BTRFS_DEBUG), 423 KERN_ERR "BTRFS: unexpected compression for ino %llu\n", 424 btrfs_ino(BTRFS_I(inode))); 425 return 0; 426 } 427 /* force compress */ 428 if (btrfs_test_opt(fs_info, FORCE_COMPRESS)) 429 return 1; 430 /* defrag ioctl */ 431 if (BTRFS_I(inode)->defrag_compress) 432 return 1; 433 /* bad compression ratios */ 434 if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS) 435 return 0; 436 if (btrfs_test_opt(fs_info, COMPRESS) || 437 BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS || 438 BTRFS_I(inode)->prop_compress) 439 return btrfs_compress_heuristic(inode, start, end); 440 return 0; 441 } 442 443 static inline void inode_should_defrag(struct btrfs_inode *inode, 444 u64 start, u64 end, u64 num_bytes, u64 small_write) 445 { 446 /* If this is a small write inside eof, kick off a defrag */ 447 if (num_bytes < small_write && 448 (start > 0 || end + 1 < inode->disk_i_size)) 449 btrfs_add_inode_defrag(NULL, inode); 450 } 451 452 /* 453 * we create compressed extents in two phases. The first 454 * phase compresses a range of pages that have already been 455 * locked (both pages and state bits are locked). 456 * 457 * This is done inside an ordered work queue, and the compression 458 * is spread across many cpus. The actual IO submission is step 459 * two, and the ordered work queue takes care of making sure that 460 * happens in the same order things were put onto the queue by 461 * writepages and friends. 462 * 463 * If this code finds it can't get good compression, it puts an 464 * entry onto the work queue to write the uncompressed bytes. This 465 * makes sure that both compressed inodes and uncompressed inodes 466 * are written in the same order that the flusher thread sent them 467 * down. 468 */ 469 static noinline int compress_file_range(struct async_chunk *async_chunk) 470 { 471 struct inode *inode = async_chunk->inode; 472 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 473 u64 blocksize = fs_info->sectorsize; 474 u64 start = async_chunk->start; 475 u64 end = async_chunk->end; 476 u64 actual_end; 477 int ret = 0; 478 struct page **pages = NULL; 479 unsigned long nr_pages; 480 unsigned long total_compressed = 0; 481 unsigned long total_in = 0; 482 int i; 483 int will_compress; 484 int compress_type = fs_info->compress_type; 485 int compressed_extents = 0; 486 int redirty = 0; 487 488 inode_should_defrag(BTRFS_I(inode), start, end, end - start + 1, 489 SZ_16K); 490 491 actual_end = min_t(u64, i_size_read(inode), end + 1); 492 again: 493 will_compress = 0; 494 nr_pages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1; 495 BUILD_BUG_ON((BTRFS_MAX_COMPRESSED % PAGE_SIZE) != 0); 496 nr_pages = min_t(unsigned long, nr_pages, 497 BTRFS_MAX_COMPRESSED / PAGE_SIZE); 498 499 /* 500 * we don't want to send crud past the end of i_size through 501 * compression, that's just a waste of CPU time. So, if the 502 * end of the file is before the start of our current 503 * requested range of bytes, we bail out to the uncompressed 504 * cleanup code that can deal with all of this. 505 * 506 * It isn't really the fastest way to fix things, but this is a 507 * very uncommon corner. 508 */ 509 if (actual_end <= start) 510 goto cleanup_and_bail_uncompressed; 511 512 total_compressed = actual_end - start; 513 514 /* 515 * skip compression for a small file range(<=blocksize) that 516 * isn't an inline extent, since it doesn't save disk space at all. 517 */ 518 if (total_compressed <= blocksize && 519 (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size)) 520 goto cleanup_and_bail_uncompressed; 521 522 total_compressed = min_t(unsigned long, total_compressed, 523 BTRFS_MAX_UNCOMPRESSED); 524 total_in = 0; 525 ret = 0; 526 527 /* 528 * we do compression for mount -o compress and when the 529 * inode has not been flagged as nocompress. This flag can 530 * change at any time if we discover bad compression ratios. 531 */ 532 if (inode_need_compress(inode, start, end)) { 533 WARN_ON(pages); 534 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS); 535 if (!pages) { 536 /* just bail out to the uncompressed code */ 537 nr_pages = 0; 538 goto cont; 539 } 540 541 if (BTRFS_I(inode)->defrag_compress) 542 compress_type = BTRFS_I(inode)->defrag_compress; 543 else if (BTRFS_I(inode)->prop_compress) 544 compress_type = BTRFS_I(inode)->prop_compress; 545 546 /* 547 * we need to call clear_page_dirty_for_io on each 548 * page in the range. Otherwise applications with the file 549 * mmap'd can wander in and change the page contents while 550 * we are compressing them. 551 * 552 * If the compression fails for any reason, we set the pages 553 * dirty again later on. 554 * 555 * Note that the remaining part is redirtied, the start pointer 556 * has moved, the end is the original one. 557 */ 558 if (!redirty) { 559 extent_range_clear_dirty_for_io(inode, start, end); 560 redirty = 1; 561 } 562 563 /* Compression level is applied here and only here */ 564 ret = btrfs_compress_pages( 565 compress_type | (fs_info->compress_level << 4), 566 inode->i_mapping, start, 567 pages, 568 &nr_pages, 569 &total_in, 570 &total_compressed); 571 572 if (!ret) { 573 unsigned long offset = offset_in_page(total_compressed); 574 struct page *page = pages[nr_pages - 1]; 575 char *kaddr; 576 577 /* zero the tail end of the last page, we might be 578 * sending it down to disk 579 */ 580 if (offset) { 581 kaddr = kmap_atomic(page); 582 memset(kaddr + offset, 0, 583 PAGE_SIZE - offset); 584 kunmap_atomic(kaddr); 585 } 586 will_compress = 1; 587 } 588 } 589 cont: 590 if (start == 0) { 591 /* lets try to make an inline extent */ 592 if (ret || total_in < actual_end) { 593 /* we didn't compress the entire range, try 594 * to make an uncompressed inline extent. 595 */ 596 ret = cow_file_range_inline(inode, start, end, 0, 597 BTRFS_COMPRESS_NONE, NULL); 598 } else { 599 /* try making a compressed inline extent */ 600 ret = cow_file_range_inline(inode, start, end, 601 total_compressed, 602 compress_type, pages); 603 } 604 if (ret <= 0) { 605 unsigned long clear_flags = EXTENT_DELALLOC | 606 EXTENT_DELALLOC_NEW | EXTENT_DEFRAG | 607 EXTENT_DO_ACCOUNTING; 608 unsigned long page_error_op; 609 610 page_error_op = ret < 0 ? PAGE_SET_ERROR : 0; 611 612 /* 613 * inline extent creation worked or returned error, 614 * we don't need to create any more async work items. 615 * Unlock and free up our temp pages. 616 * 617 * We use DO_ACCOUNTING here because we need the 618 * delalloc_release_metadata to be done _after_ we drop 619 * our outstanding extent for clearing delalloc for this 620 * range. 621 */ 622 extent_clear_unlock_delalloc(inode, start, end, NULL, 623 clear_flags, 624 PAGE_UNLOCK | 625 PAGE_CLEAR_DIRTY | 626 PAGE_SET_WRITEBACK | 627 page_error_op | 628 PAGE_END_WRITEBACK); 629 630 for (i = 0; i < nr_pages; i++) { 631 WARN_ON(pages[i]->mapping); 632 put_page(pages[i]); 633 } 634 kfree(pages); 635 636 return 0; 637 } 638 } 639 640 if (will_compress) { 641 /* 642 * we aren't doing an inline extent round the compressed size 643 * up to a block size boundary so the allocator does sane 644 * things 645 */ 646 total_compressed = ALIGN(total_compressed, blocksize); 647 648 /* 649 * one last check to make sure the compression is really a 650 * win, compare the page count read with the blocks on disk, 651 * compression must free at least one sector size 652 */ 653 total_in = ALIGN(total_in, PAGE_SIZE); 654 if (total_compressed + blocksize <= total_in) { 655 compressed_extents++; 656 657 /* 658 * The async work queues will take care of doing actual 659 * allocation on disk for these compressed pages, and 660 * will submit them to the elevator. 661 */ 662 add_async_extent(async_chunk, start, total_in, 663 total_compressed, pages, nr_pages, 664 compress_type); 665 666 if (start + total_in < end) { 667 start += total_in; 668 pages = NULL; 669 cond_resched(); 670 goto again; 671 } 672 return compressed_extents; 673 } 674 } 675 if (pages) { 676 /* 677 * the compression code ran but failed to make things smaller, 678 * free any pages it allocated and our page pointer array 679 */ 680 for (i = 0; i < nr_pages; i++) { 681 WARN_ON(pages[i]->mapping); 682 put_page(pages[i]); 683 } 684 kfree(pages); 685 pages = NULL; 686 total_compressed = 0; 687 nr_pages = 0; 688 689 /* flag the file so we don't compress in the future */ 690 if (!btrfs_test_opt(fs_info, FORCE_COMPRESS) && 691 !(BTRFS_I(inode)->prop_compress)) { 692 BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS; 693 } 694 } 695 cleanup_and_bail_uncompressed: 696 /* 697 * No compression, but we still need to write the pages in the file 698 * we've been given so far. redirty the locked page if it corresponds 699 * to our extent and set things up for the async work queue to run 700 * cow_file_range to do the normal delalloc dance. 701 */ 702 if (page_offset(async_chunk->locked_page) >= start && 703 page_offset(async_chunk->locked_page) <= end) 704 __set_page_dirty_nobuffers(async_chunk->locked_page); 705 /* unlocked later on in the async handlers */ 706 707 if (redirty) 708 extent_range_redirty_for_io(inode, start, end); 709 add_async_extent(async_chunk, start, end - start + 1, 0, NULL, 0, 710 BTRFS_COMPRESS_NONE); 711 compressed_extents++; 712 713 return compressed_extents; 714 } 715 716 static void free_async_extent_pages(struct async_extent *async_extent) 717 { 718 int i; 719 720 if (!async_extent->pages) 721 return; 722 723 for (i = 0; i < async_extent->nr_pages; i++) { 724 WARN_ON(async_extent->pages[i]->mapping); 725 put_page(async_extent->pages[i]); 726 } 727 kfree(async_extent->pages); 728 async_extent->nr_pages = 0; 729 async_extent->pages = NULL; 730 } 731 732 /* 733 * phase two of compressed writeback. This is the ordered portion 734 * of the code, which only gets called in the order the work was 735 * queued. We walk all the async extents created by compress_file_range 736 * and send them down to the disk. 737 */ 738 static noinline void submit_compressed_extents(struct async_chunk *async_chunk) 739 { 740 struct inode *inode = async_chunk->inode; 741 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 742 struct async_extent *async_extent; 743 u64 alloc_hint = 0; 744 struct btrfs_key ins; 745 struct extent_map *em; 746 struct btrfs_root *root = BTRFS_I(inode)->root; 747 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 748 int ret = 0; 749 750 again: 751 while (!list_empty(&async_chunk->extents)) { 752 async_extent = list_entry(async_chunk->extents.next, 753 struct async_extent, list); 754 list_del(&async_extent->list); 755 756 retry: 757 lock_extent(io_tree, async_extent->start, 758 async_extent->start + async_extent->ram_size - 1); 759 /* did the compression code fall back to uncompressed IO? */ 760 if (!async_extent->pages) { 761 int page_started = 0; 762 unsigned long nr_written = 0; 763 764 /* allocate blocks */ 765 ret = cow_file_range(inode, async_chunk->locked_page, 766 async_extent->start, 767 async_extent->start + 768 async_extent->ram_size - 1, 769 &page_started, &nr_written, 0); 770 771 /* JDM XXX */ 772 773 /* 774 * if page_started, cow_file_range inserted an 775 * inline extent and took care of all the unlocking 776 * and IO for us. Otherwise, we need to submit 777 * all those pages down to the drive. 778 */ 779 if (!page_started && !ret) 780 extent_write_locked_range(inode, 781 async_extent->start, 782 async_extent->start + 783 async_extent->ram_size - 1, 784 WB_SYNC_ALL); 785 else if (ret) 786 unlock_page(async_chunk->locked_page); 787 kfree(async_extent); 788 cond_resched(); 789 continue; 790 } 791 792 ret = btrfs_reserve_extent(root, async_extent->ram_size, 793 async_extent->compressed_size, 794 async_extent->compressed_size, 795 0, alloc_hint, &ins, 1, 1); 796 if (ret) { 797 free_async_extent_pages(async_extent); 798 799 if (ret == -ENOSPC) { 800 unlock_extent(io_tree, async_extent->start, 801 async_extent->start + 802 async_extent->ram_size - 1); 803 804 /* 805 * we need to redirty the pages if we decide to 806 * fallback to uncompressed IO, otherwise we 807 * will not submit these pages down to lower 808 * layers. 809 */ 810 extent_range_redirty_for_io(inode, 811 async_extent->start, 812 async_extent->start + 813 async_extent->ram_size - 1); 814 815 goto retry; 816 } 817 goto out_free; 818 } 819 /* 820 * here we're doing allocation and writeback of the 821 * compressed pages 822 */ 823 em = create_io_em(inode, async_extent->start, 824 async_extent->ram_size, /* len */ 825 async_extent->start, /* orig_start */ 826 ins.objectid, /* block_start */ 827 ins.offset, /* block_len */ 828 ins.offset, /* orig_block_len */ 829 async_extent->ram_size, /* ram_bytes */ 830 async_extent->compress_type, 831 BTRFS_ORDERED_COMPRESSED); 832 if (IS_ERR(em)) 833 /* ret value is not necessary due to void function */ 834 goto out_free_reserve; 835 free_extent_map(em); 836 837 ret = btrfs_add_ordered_extent_compress(inode, 838 async_extent->start, 839 ins.objectid, 840 async_extent->ram_size, 841 ins.offset, 842 BTRFS_ORDERED_COMPRESSED, 843 async_extent->compress_type); 844 if (ret) { 845 btrfs_drop_extent_cache(BTRFS_I(inode), 846 async_extent->start, 847 async_extent->start + 848 async_extent->ram_size - 1, 0); 849 goto out_free_reserve; 850 } 851 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 852 853 /* 854 * clear dirty, set writeback and unlock the pages. 855 */ 856 extent_clear_unlock_delalloc(inode, async_extent->start, 857 async_extent->start + 858 async_extent->ram_size - 1, 859 NULL, EXTENT_LOCKED | EXTENT_DELALLOC, 860 PAGE_UNLOCK | PAGE_CLEAR_DIRTY | 861 PAGE_SET_WRITEBACK); 862 if (btrfs_submit_compressed_write(inode, 863 async_extent->start, 864 async_extent->ram_size, 865 ins.objectid, 866 ins.offset, async_extent->pages, 867 async_extent->nr_pages, 868 async_chunk->write_flags)) { 869 struct page *p = async_extent->pages[0]; 870 const u64 start = async_extent->start; 871 const u64 end = start + async_extent->ram_size - 1; 872 873 p->mapping = inode->i_mapping; 874 btrfs_writepage_endio_finish_ordered(p, start, end, 0); 875 876 p->mapping = NULL; 877 extent_clear_unlock_delalloc(inode, start, end, 878 NULL, 0, 879 PAGE_END_WRITEBACK | 880 PAGE_SET_ERROR); 881 free_async_extent_pages(async_extent); 882 } 883 alloc_hint = ins.objectid + ins.offset; 884 kfree(async_extent); 885 cond_resched(); 886 } 887 return; 888 out_free_reserve: 889 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 890 btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1); 891 out_free: 892 extent_clear_unlock_delalloc(inode, async_extent->start, 893 async_extent->start + 894 async_extent->ram_size - 1, 895 NULL, EXTENT_LOCKED | EXTENT_DELALLOC | 896 EXTENT_DELALLOC_NEW | 897 EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING, 898 PAGE_UNLOCK | PAGE_CLEAR_DIRTY | 899 PAGE_SET_WRITEBACK | PAGE_END_WRITEBACK | 900 PAGE_SET_ERROR); 901 free_async_extent_pages(async_extent); 902 kfree(async_extent); 903 goto again; 904 } 905 906 static u64 get_extent_allocation_hint(struct inode *inode, u64 start, 907 u64 num_bytes) 908 { 909 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 910 struct extent_map *em; 911 u64 alloc_hint = 0; 912 913 read_lock(&em_tree->lock); 914 em = search_extent_mapping(em_tree, start, num_bytes); 915 if (em) { 916 /* 917 * if block start isn't an actual block number then find the 918 * first block in this inode and use that as a hint. If that 919 * block is also bogus then just don't worry about it. 920 */ 921 if (em->block_start >= EXTENT_MAP_LAST_BYTE) { 922 free_extent_map(em); 923 em = search_extent_mapping(em_tree, 0, 0); 924 if (em && em->block_start < EXTENT_MAP_LAST_BYTE) 925 alloc_hint = em->block_start; 926 if (em) 927 free_extent_map(em); 928 } else { 929 alloc_hint = em->block_start; 930 free_extent_map(em); 931 } 932 } 933 read_unlock(&em_tree->lock); 934 935 return alloc_hint; 936 } 937 938 /* 939 * when extent_io.c finds a delayed allocation range in the file, 940 * the call backs end up in this code. The basic idea is to 941 * allocate extents on disk for the range, and create ordered data structs 942 * in ram to track those extents. 943 * 944 * locked_page is the page that writepage had locked already. We use 945 * it to make sure we don't do extra locks or unlocks. 946 * 947 * *page_started is set to one if we unlock locked_page and do everything 948 * required to start IO on it. It may be clean and already done with 949 * IO when we return. 950 */ 951 static noinline int cow_file_range(struct inode *inode, 952 struct page *locked_page, 953 u64 start, u64 end, int *page_started, 954 unsigned long *nr_written, int unlock) 955 { 956 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 957 struct btrfs_root *root = BTRFS_I(inode)->root; 958 u64 alloc_hint = 0; 959 u64 num_bytes; 960 unsigned long ram_size; 961 u64 cur_alloc_size = 0; 962 u64 blocksize = fs_info->sectorsize; 963 struct btrfs_key ins; 964 struct extent_map *em; 965 unsigned clear_bits; 966 unsigned long page_ops; 967 bool extent_reserved = false; 968 int ret = 0; 969 970 if (btrfs_is_free_space_inode(BTRFS_I(inode))) { 971 WARN_ON_ONCE(1); 972 ret = -EINVAL; 973 goto out_unlock; 974 } 975 976 num_bytes = ALIGN(end - start + 1, blocksize); 977 num_bytes = max(blocksize, num_bytes); 978 ASSERT(num_bytes <= btrfs_super_total_bytes(fs_info->super_copy)); 979 980 inode_should_defrag(BTRFS_I(inode), start, end, num_bytes, SZ_64K); 981 982 if (start == 0) { 983 /* lets try to make an inline extent */ 984 ret = cow_file_range_inline(inode, start, end, 0, 985 BTRFS_COMPRESS_NONE, NULL); 986 if (ret == 0) { 987 /* 988 * We use DO_ACCOUNTING here because we need the 989 * delalloc_release_metadata to be run _after_ we drop 990 * our outstanding extent for clearing delalloc for this 991 * range. 992 */ 993 extent_clear_unlock_delalloc(inode, start, end, NULL, 994 EXTENT_LOCKED | EXTENT_DELALLOC | 995 EXTENT_DELALLOC_NEW | EXTENT_DEFRAG | 996 EXTENT_DO_ACCOUNTING, PAGE_UNLOCK | 997 PAGE_CLEAR_DIRTY | PAGE_SET_WRITEBACK | 998 PAGE_END_WRITEBACK); 999 *nr_written = *nr_written + 1000 (end - start + PAGE_SIZE) / PAGE_SIZE; 1001 *page_started = 1; 1002 goto out; 1003 } else if (ret < 0) { 1004 goto out_unlock; 1005 } 1006 } 1007 1008 alloc_hint = get_extent_allocation_hint(inode, start, num_bytes); 1009 btrfs_drop_extent_cache(BTRFS_I(inode), start, 1010 start + num_bytes - 1, 0); 1011 1012 while (num_bytes > 0) { 1013 cur_alloc_size = num_bytes; 1014 ret = btrfs_reserve_extent(root, cur_alloc_size, cur_alloc_size, 1015 fs_info->sectorsize, 0, alloc_hint, 1016 &ins, 1, 1); 1017 if (ret < 0) 1018 goto out_unlock; 1019 cur_alloc_size = ins.offset; 1020 extent_reserved = true; 1021 1022 ram_size = ins.offset; 1023 em = create_io_em(inode, start, ins.offset, /* len */ 1024 start, /* orig_start */ 1025 ins.objectid, /* block_start */ 1026 ins.offset, /* block_len */ 1027 ins.offset, /* orig_block_len */ 1028 ram_size, /* ram_bytes */ 1029 BTRFS_COMPRESS_NONE, /* compress_type */ 1030 BTRFS_ORDERED_REGULAR /* type */); 1031 if (IS_ERR(em)) { 1032 ret = PTR_ERR(em); 1033 goto out_reserve; 1034 } 1035 free_extent_map(em); 1036 1037 ret = btrfs_add_ordered_extent(inode, start, ins.objectid, 1038 ram_size, cur_alloc_size, 0); 1039 if (ret) 1040 goto out_drop_extent_cache; 1041 1042 if (root->root_key.objectid == 1043 BTRFS_DATA_RELOC_TREE_OBJECTID) { 1044 ret = btrfs_reloc_clone_csums(inode, start, 1045 cur_alloc_size); 1046 /* 1047 * Only drop cache here, and process as normal. 1048 * 1049 * We must not allow extent_clear_unlock_delalloc() 1050 * at out_unlock label to free meta of this ordered 1051 * extent, as its meta should be freed by 1052 * btrfs_finish_ordered_io(). 1053 * 1054 * So we must continue until @start is increased to 1055 * skip current ordered extent. 1056 */ 1057 if (ret) 1058 btrfs_drop_extent_cache(BTRFS_I(inode), start, 1059 start + ram_size - 1, 0); 1060 } 1061 1062 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 1063 1064 /* we're not doing compressed IO, don't unlock the first 1065 * page (which the caller expects to stay locked), don't 1066 * clear any dirty bits and don't set any writeback bits 1067 * 1068 * Do set the Private2 bit so we know this page was properly 1069 * setup for writepage 1070 */ 1071 page_ops = unlock ? PAGE_UNLOCK : 0; 1072 page_ops |= PAGE_SET_PRIVATE2; 1073 1074 extent_clear_unlock_delalloc(inode, start, 1075 start + ram_size - 1, 1076 locked_page, 1077 EXTENT_LOCKED | EXTENT_DELALLOC, 1078 page_ops); 1079 if (num_bytes < cur_alloc_size) 1080 num_bytes = 0; 1081 else 1082 num_bytes -= cur_alloc_size; 1083 alloc_hint = ins.objectid + ins.offset; 1084 start += cur_alloc_size; 1085 extent_reserved = false; 1086 1087 /* 1088 * btrfs_reloc_clone_csums() error, since start is increased 1089 * extent_clear_unlock_delalloc() at out_unlock label won't 1090 * free metadata of current ordered extent, we're OK to exit. 1091 */ 1092 if (ret) 1093 goto out_unlock; 1094 } 1095 out: 1096 return ret; 1097 1098 out_drop_extent_cache: 1099 btrfs_drop_extent_cache(BTRFS_I(inode), start, start + ram_size - 1, 0); 1100 out_reserve: 1101 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 1102 btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1); 1103 out_unlock: 1104 clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DELALLOC_NEW | 1105 EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV; 1106 page_ops = PAGE_UNLOCK | PAGE_CLEAR_DIRTY | PAGE_SET_WRITEBACK | 1107 PAGE_END_WRITEBACK; 1108 /* 1109 * If we reserved an extent for our delalloc range (or a subrange) and 1110 * failed to create the respective ordered extent, then it means that 1111 * when we reserved the extent we decremented the extent's size from 1112 * the data space_info's bytes_may_use counter and incremented the 1113 * space_info's bytes_reserved counter by the same amount. We must make 1114 * sure extent_clear_unlock_delalloc() does not try to decrement again 1115 * the data space_info's bytes_may_use counter, therefore we do not pass 1116 * it the flag EXTENT_CLEAR_DATA_RESV. 1117 */ 1118 if (extent_reserved) { 1119 extent_clear_unlock_delalloc(inode, start, 1120 start + cur_alloc_size, 1121 locked_page, 1122 clear_bits, 1123 page_ops); 1124 start += cur_alloc_size; 1125 if (start >= end) 1126 goto out; 1127 } 1128 extent_clear_unlock_delalloc(inode, start, end, locked_page, 1129 clear_bits | EXTENT_CLEAR_DATA_RESV, 1130 page_ops); 1131 goto out; 1132 } 1133 1134 /* 1135 * work queue call back to started compression on a file and pages 1136 */ 1137 static noinline void async_cow_start(struct btrfs_work *work) 1138 { 1139 struct async_chunk *async_chunk; 1140 int compressed_extents; 1141 1142 async_chunk = container_of(work, struct async_chunk, work); 1143 1144 compressed_extents = compress_file_range(async_chunk); 1145 if (compressed_extents == 0) { 1146 btrfs_add_delayed_iput(async_chunk->inode); 1147 async_chunk->inode = NULL; 1148 } 1149 } 1150 1151 /* 1152 * work queue call back to submit previously compressed pages 1153 */ 1154 static noinline void async_cow_submit(struct btrfs_work *work) 1155 { 1156 struct async_chunk *async_chunk = container_of(work, struct async_chunk, 1157 work); 1158 struct btrfs_fs_info *fs_info = btrfs_work_owner(work); 1159 unsigned long nr_pages; 1160 1161 nr_pages = (async_chunk->end - async_chunk->start + PAGE_SIZE) >> 1162 PAGE_SHIFT; 1163 1164 /* atomic_sub_return implies a barrier */ 1165 if (atomic_sub_return(nr_pages, &fs_info->async_delalloc_pages) < 1166 5 * SZ_1M) 1167 cond_wake_up_nomb(&fs_info->async_submit_wait); 1168 1169 /* 1170 * ->inode could be NULL if async_chunk_start has failed to compress, 1171 * in which case we don't have anything to submit, yet we need to 1172 * always adjust ->async_delalloc_pages as its paired with the init 1173 * happening in cow_file_range_async 1174 */ 1175 if (async_chunk->inode) 1176 submit_compressed_extents(async_chunk); 1177 } 1178 1179 static noinline void async_cow_free(struct btrfs_work *work) 1180 { 1181 struct async_chunk *async_chunk; 1182 1183 async_chunk = container_of(work, struct async_chunk, work); 1184 if (async_chunk->inode) 1185 btrfs_add_delayed_iput(async_chunk->inode); 1186 /* 1187 * Since the pointer to 'pending' is at the beginning of the array of 1188 * async_chunk's, freeing it ensures the whole array has been freed. 1189 */ 1190 if (atomic_dec_and_test(async_chunk->pending)) 1191 kvfree(async_chunk->pending); 1192 } 1193 1194 static int cow_file_range_async(struct inode *inode, struct page *locked_page, 1195 u64 start, u64 end, int *page_started, 1196 unsigned long *nr_written, 1197 unsigned int write_flags) 1198 { 1199 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 1200 struct async_cow *ctx; 1201 struct async_chunk *async_chunk; 1202 unsigned long nr_pages; 1203 u64 cur_end; 1204 u64 num_chunks = DIV_ROUND_UP(end - start, SZ_512K); 1205 int i; 1206 bool should_compress; 1207 unsigned nofs_flag; 1208 1209 unlock_extent(&BTRFS_I(inode)->io_tree, start, end); 1210 1211 if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS && 1212 !btrfs_test_opt(fs_info, FORCE_COMPRESS)) { 1213 num_chunks = 1; 1214 should_compress = false; 1215 } else { 1216 should_compress = true; 1217 } 1218 1219 nofs_flag = memalloc_nofs_save(); 1220 ctx = kvmalloc(struct_size(ctx, chunks, num_chunks), GFP_KERNEL); 1221 memalloc_nofs_restore(nofs_flag); 1222 1223 if (!ctx) { 1224 unsigned clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC | 1225 EXTENT_DELALLOC_NEW | EXTENT_DEFRAG | 1226 EXTENT_DO_ACCOUNTING; 1227 unsigned long page_ops = PAGE_UNLOCK | PAGE_CLEAR_DIRTY | 1228 PAGE_SET_WRITEBACK | PAGE_END_WRITEBACK | 1229 PAGE_SET_ERROR; 1230 1231 extent_clear_unlock_delalloc(inode, start, end, locked_page, 1232 clear_bits, page_ops); 1233 return -ENOMEM; 1234 } 1235 1236 async_chunk = ctx->chunks; 1237 atomic_set(&ctx->num_chunks, num_chunks); 1238 1239 for (i = 0; i < num_chunks; i++) { 1240 if (should_compress) 1241 cur_end = min(end, start + SZ_512K - 1); 1242 else 1243 cur_end = end; 1244 1245 /* 1246 * igrab is called higher up in the call chain, take only the 1247 * lightweight reference for the callback lifetime 1248 */ 1249 ihold(inode); 1250 async_chunk[i].pending = &ctx->num_chunks; 1251 async_chunk[i].inode = inode; 1252 async_chunk[i].start = start; 1253 async_chunk[i].end = cur_end; 1254 async_chunk[i].locked_page = locked_page; 1255 async_chunk[i].write_flags = write_flags; 1256 INIT_LIST_HEAD(&async_chunk[i].extents); 1257 1258 btrfs_init_work(&async_chunk[i].work, 1259 btrfs_delalloc_helper, 1260 async_cow_start, async_cow_submit, 1261 async_cow_free); 1262 1263 nr_pages = DIV_ROUND_UP(cur_end - start, PAGE_SIZE); 1264 atomic_add(nr_pages, &fs_info->async_delalloc_pages); 1265 1266 btrfs_queue_work(fs_info->delalloc_workers, &async_chunk[i].work); 1267 1268 *nr_written += nr_pages; 1269 start = cur_end + 1; 1270 } 1271 *page_started = 1; 1272 return 0; 1273 } 1274 1275 static noinline int csum_exist_in_range(struct btrfs_fs_info *fs_info, 1276 u64 bytenr, u64 num_bytes) 1277 { 1278 int ret; 1279 struct btrfs_ordered_sum *sums; 1280 LIST_HEAD(list); 1281 1282 ret = btrfs_lookup_csums_range(fs_info->csum_root, bytenr, 1283 bytenr + num_bytes - 1, &list, 0); 1284 if (ret == 0 && list_empty(&list)) 1285 return 0; 1286 1287 while (!list_empty(&list)) { 1288 sums = list_entry(list.next, struct btrfs_ordered_sum, list); 1289 list_del(&sums->list); 1290 kfree(sums); 1291 } 1292 if (ret < 0) 1293 return ret; 1294 return 1; 1295 } 1296 1297 /* 1298 * when nowcow writeback call back. This checks for snapshots or COW copies 1299 * of the extents that exist in the file, and COWs the file as required. 1300 * 1301 * If no cow copies or snapshots exist, we write directly to the existing 1302 * blocks on disk 1303 */ 1304 static noinline int run_delalloc_nocow(struct inode *inode, 1305 struct page *locked_page, 1306 const u64 start, const u64 end, 1307 int *page_started, int force, 1308 unsigned long *nr_written) 1309 { 1310 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 1311 struct btrfs_root *root = BTRFS_I(inode)->root; 1312 struct btrfs_path *path; 1313 u64 cow_start = (u64)-1; 1314 u64 cur_offset = start; 1315 int ret; 1316 bool check_prev = true; 1317 const bool freespace_inode = btrfs_is_free_space_inode(BTRFS_I(inode)); 1318 u64 ino = btrfs_ino(BTRFS_I(inode)); 1319 bool nocow = false; 1320 u64 disk_bytenr = 0; 1321 1322 path = btrfs_alloc_path(); 1323 if (!path) { 1324 extent_clear_unlock_delalloc(inode, start, end, locked_page, 1325 EXTENT_LOCKED | EXTENT_DELALLOC | 1326 EXTENT_DO_ACCOUNTING | 1327 EXTENT_DEFRAG, PAGE_UNLOCK | 1328 PAGE_CLEAR_DIRTY | 1329 PAGE_SET_WRITEBACK | 1330 PAGE_END_WRITEBACK); 1331 return -ENOMEM; 1332 } 1333 1334 while (1) { 1335 struct btrfs_key found_key; 1336 struct btrfs_file_extent_item *fi; 1337 struct extent_buffer *leaf; 1338 u64 extent_end; 1339 u64 extent_offset; 1340 u64 num_bytes = 0; 1341 u64 disk_num_bytes; 1342 u64 ram_bytes; 1343 int extent_type; 1344 1345 nocow = false; 1346 1347 ret = btrfs_lookup_file_extent(NULL, root, path, ino, 1348 cur_offset, 0); 1349 if (ret < 0) 1350 goto error; 1351 1352 /* 1353 * If there is no extent for our range when doing the initial 1354 * search, then go back to the previous slot as it will be the 1355 * one containing the search offset 1356 */ 1357 if (ret > 0 && path->slots[0] > 0 && check_prev) { 1358 leaf = path->nodes[0]; 1359 btrfs_item_key_to_cpu(leaf, &found_key, 1360 path->slots[0] - 1); 1361 if (found_key.objectid == ino && 1362 found_key.type == BTRFS_EXTENT_DATA_KEY) 1363 path->slots[0]--; 1364 } 1365 check_prev = false; 1366 next_slot: 1367 /* Go to next leaf if we have exhausted the current one */ 1368 leaf = path->nodes[0]; 1369 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 1370 ret = btrfs_next_leaf(root, path); 1371 if (ret < 0) { 1372 if (cow_start != (u64)-1) 1373 cur_offset = cow_start; 1374 goto error; 1375 } 1376 if (ret > 0) 1377 break; 1378 leaf = path->nodes[0]; 1379 } 1380 1381 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 1382 1383 /* Didn't find anything for our INO */ 1384 if (found_key.objectid > ino) 1385 break; 1386 /* 1387 * Keep searching until we find an EXTENT_ITEM or there are no 1388 * more extents for this inode 1389 */ 1390 if (WARN_ON_ONCE(found_key.objectid < ino) || 1391 found_key.type < BTRFS_EXTENT_DATA_KEY) { 1392 path->slots[0]++; 1393 goto next_slot; 1394 } 1395 1396 /* Found key is not EXTENT_DATA_KEY or starts after req range */ 1397 if (found_key.type > BTRFS_EXTENT_DATA_KEY || 1398 found_key.offset > end) 1399 break; 1400 1401 /* 1402 * If the found extent starts after requested offset, then 1403 * adjust extent_end to be right before this extent begins 1404 */ 1405 if (found_key.offset > cur_offset) { 1406 extent_end = found_key.offset; 1407 extent_type = 0; 1408 goto out_check; 1409 } 1410 1411 /* 1412 * Found extent which begins before our range and potentially 1413 * intersect it 1414 */ 1415 fi = btrfs_item_ptr(leaf, path->slots[0], 1416 struct btrfs_file_extent_item); 1417 extent_type = btrfs_file_extent_type(leaf, fi); 1418 1419 ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi); 1420 if (extent_type == BTRFS_FILE_EXTENT_REG || 1421 extent_type == BTRFS_FILE_EXTENT_PREALLOC) { 1422 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 1423 extent_offset = btrfs_file_extent_offset(leaf, fi); 1424 extent_end = found_key.offset + 1425 btrfs_file_extent_num_bytes(leaf, fi); 1426 disk_num_bytes = 1427 btrfs_file_extent_disk_num_bytes(leaf, fi); 1428 /* 1429 * If extent we got ends before our range starts, skip 1430 * to next extent 1431 */ 1432 if (extent_end <= start) { 1433 path->slots[0]++; 1434 goto next_slot; 1435 } 1436 /* Skip holes */ 1437 if (disk_bytenr == 0) 1438 goto out_check; 1439 /* Skip compressed/encrypted/encoded extents */ 1440 if (btrfs_file_extent_compression(leaf, fi) || 1441 btrfs_file_extent_encryption(leaf, fi) || 1442 btrfs_file_extent_other_encoding(leaf, fi)) 1443 goto out_check; 1444 /* 1445 * If extent is created before the last volume's snapshot 1446 * this implies the extent is shared, hence we can't do 1447 * nocow. This is the same check as in 1448 * btrfs_cross_ref_exist but without calling 1449 * btrfs_search_slot. 1450 */ 1451 if (!freespace_inode && 1452 btrfs_file_extent_generation(leaf, fi) <= 1453 btrfs_root_last_snapshot(&root->root_item)) 1454 goto out_check; 1455 if (extent_type == BTRFS_FILE_EXTENT_REG && !force) 1456 goto out_check; 1457 /* If extent is RO, we must COW it */ 1458 if (btrfs_extent_readonly(fs_info, disk_bytenr)) 1459 goto out_check; 1460 ret = btrfs_cross_ref_exist(root, ino, 1461 found_key.offset - 1462 extent_offset, disk_bytenr); 1463 if (ret) { 1464 /* 1465 * ret could be -EIO if the above fails to read 1466 * metadata. 1467 */ 1468 if (ret < 0) { 1469 if (cow_start != (u64)-1) 1470 cur_offset = cow_start; 1471 goto error; 1472 } 1473 1474 WARN_ON_ONCE(freespace_inode); 1475 goto out_check; 1476 } 1477 disk_bytenr += extent_offset; 1478 disk_bytenr += cur_offset - found_key.offset; 1479 num_bytes = min(end + 1, extent_end) - cur_offset; 1480 /* 1481 * If there are pending snapshots for this root, we 1482 * fall into common COW way 1483 */ 1484 if (!freespace_inode && atomic_read(&root->snapshot_force_cow)) 1485 goto out_check; 1486 /* 1487 * force cow if csum exists in the range. 1488 * this ensure that csum for a given extent are 1489 * either valid or do not exist. 1490 */ 1491 ret = csum_exist_in_range(fs_info, disk_bytenr, 1492 num_bytes); 1493 if (ret) { 1494 /* 1495 * ret could be -EIO if the above fails to read 1496 * metadata. 1497 */ 1498 if (ret < 0) { 1499 if (cow_start != (u64)-1) 1500 cur_offset = cow_start; 1501 goto error; 1502 } 1503 WARN_ON_ONCE(freespace_inode); 1504 goto out_check; 1505 } 1506 if (!btrfs_inc_nocow_writers(fs_info, disk_bytenr)) 1507 goto out_check; 1508 nocow = true; 1509 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 1510 extent_end = found_key.offset + ram_bytes; 1511 extent_end = ALIGN(extent_end, fs_info->sectorsize); 1512 /* Skip extents outside of our requested range */ 1513 if (extent_end <= start) { 1514 path->slots[0]++; 1515 goto next_slot; 1516 } 1517 } else { 1518 /* If this triggers then we have a memory corruption */ 1519 BUG(); 1520 } 1521 out_check: 1522 /* 1523 * If nocow is false then record the beginning of the range 1524 * that needs to be COWed 1525 */ 1526 if (!nocow) { 1527 if (cow_start == (u64)-1) 1528 cow_start = cur_offset; 1529 cur_offset = extent_end; 1530 if (cur_offset > end) 1531 break; 1532 path->slots[0]++; 1533 goto next_slot; 1534 } 1535 1536 btrfs_release_path(path); 1537 1538 /* 1539 * COW range from cow_start to found_key.offset - 1. As the key 1540 * will contain the beginning of the first extent that can be 1541 * NOCOW, following one which needs to be COW'ed 1542 */ 1543 if (cow_start != (u64)-1) { 1544 ret = cow_file_range(inode, locked_page, 1545 cow_start, found_key.offset - 1, 1546 page_started, nr_written, 1); 1547 if (ret) { 1548 if (nocow) 1549 btrfs_dec_nocow_writers(fs_info, 1550 disk_bytenr); 1551 goto error; 1552 } 1553 cow_start = (u64)-1; 1554 } 1555 1556 if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) { 1557 u64 orig_start = found_key.offset - extent_offset; 1558 struct extent_map *em; 1559 1560 em = create_io_em(inode, cur_offset, num_bytes, 1561 orig_start, 1562 disk_bytenr, /* block_start */ 1563 num_bytes, /* block_len */ 1564 disk_num_bytes, /* orig_block_len */ 1565 ram_bytes, BTRFS_COMPRESS_NONE, 1566 BTRFS_ORDERED_PREALLOC); 1567 if (IS_ERR(em)) { 1568 if (nocow) 1569 btrfs_dec_nocow_writers(fs_info, 1570 disk_bytenr); 1571 ret = PTR_ERR(em); 1572 goto error; 1573 } 1574 free_extent_map(em); 1575 ret = btrfs_add_ordered_extent(inode, cur_offset, 1576 disk_bytenr, num_bytes, 1577 num_bytes, 1578 BTRFS_ORDERED_PREALLOC); 1579 if (ret) { 1580 btrfs_drop_extent_cache(BTRFS_I(inode), 1581 cur_offset, 1582 cur_offset + num_bytes - 1, 1583 0); 1584 goto error; 1585 } 1586 } else { 1587 ret = btrfs_add_ordered_extent(inode, cur_offset, 1588 disk_bytenr, num_bytes, 1589 num_bytes, 1590 BTRFS_ORDERED_NOCOW); 1591 if (ret) 1592 goto error; 1593 } 1594 1595 if (nocow) 1596 btrfs_dec_nocow_writers(fs_info, disk_bytenr); 1597 nocow = false; 1598 1599 if (root->root_key.objectid == 1600 BTRFS_DATA_RELOC_TREE_OBJECTID) 1601 /* 1602 * Error handled later, as we must prevent 1603 * extent_clear_unlock_delalloc() in error handler 1604 * from freeing metadata of created ordered extent. 1605 */ 1606 ret = btrfs_reloc_clone_csums(inode, cur_offset, 1607 num_bytes); 1608 1609 extent_clear_unlock_delalloc(inode, cur_offset, 1610 cur_offset + num_bytes - 1, 1611 locked_page, EXTENT_LOCKED | 1612 EXTENT_DELALLOC | 1613 EXTENT_CLEAR_DATA_RESV, 1614 PAGE_UNLOCK | PAGE_SET_PRIVATE2); 1615 1616 cur_offset = extent_end; 1617 1618 /* 1619 * btrfs_reloc_clone_csums() error, now we're OK to call error 1620 * handler, as metadata for created ordered extent will only 1621 * be freed by btrfs_finish_ordered_io(). 1622 */ 1623 if (ret) 1624 goto error; 1625 if (cur_offset > end) 1626 break; 1627 } 1628 btrfs_release_path(path); 1629 1630 if (cur_offset <= end && cow_start == (u64)-1) 1631 cow_start = cur_offset; 1632 1633 if (cow_start != (u64)-1) { 1634 cur_offset = end; 1635 ret = cow_file_range(inode, locked_page, cow_start, end, 1636 page_started, nr_written, 1); 1637 if (ret) 1638 goto error; 1639 } 1640 1641 error: 1642 if (nocow) 1643 btrfs_dec_nocow_writers(fs_info, disk_bytenr); 1644 1645 if (ret && cur_offset < end) 1646 extent_clear_unlock_delalloc(inode, cur_offset, end, 1647 locked_page, EXTENT_LOCKED | 1648 EXTENT_DELALLOC | EXTENT_DEFRAG | 1649 EXTENT_DO_ACCOUNTING, PAGE_UNLOCK | 1650 PAGE_CLEAR_DIRTY | 1651 PAGE_SET_WRITEBACK | 1652 PAGE_END_WRITEBACK); 1653 btrfs_free_path(path); 1654 return ret; 1655 } 1656 1657 static inline int need_force_cow(struct inode *inode, u64 start, u64 end) 1658 { 1659 1660 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) && 1661 !(BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC)) 1662 return 0; 1663 1664 /* 1665 * @defrag_bytes is a hint value, no spinlock held here, 1666 * if is not zero, it means the file is defragging. 1667 * Force cow if given extent needs to be defragged. 1668 */ 1669 if (BTRFS_I(inode)->defrag_bytes && 1670 test_range_bit(&BTRFS_I(inode)->io_tree, start, end, 1671 EXTENT_DEFRAG, 0, NULL)) 1672 return 1; 1673 1674 return 0; 1675 } 1676 1677 /* 1678 * Function to process delayed allocation (create CoW) for ranges which are 1679 * being touched for the first time. 1680 */ 1681 int btrfs_run_delalloc_range(struct inode *inode, struct page *locked_page, 1682 u64 start, u64 end, int *page_started, unsigned long *nr_written, 1683 struct writeback_control *wbc) 1684 { 1685 int ret; 1686 int force_cow = need_force_cow(inode, start, end); 1687 unsigned int write_flags = wbc_to_write_flags(wbc); 1688 1689 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW && !force_cow) { 1690 ret = run_delalloc_nocow(inode, locked_page, start, end, 1691 page_started, 1, nr_written); 1692 } else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC && !force_cow) { 1693 ret = run_delalloc_nocow(inode, locked_page, start, end, 1694 page_started, 0, nr_written); 1695 } else if (!inode_can_compress(inode) || 1696 !inode_need_compress(inode, start, end)) { 1697 ret = cow_file_range(inode, locked_page, start, end, 1698 page_started, nr_written, 1); 1699 } else { 1700 set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, 1701 &BTRFS_I(inode)->runtime_flags); 1702 ret = cow_file_range_async(inode, locked_page, start, end, 1703 page_started, nr_written, 1704 write_flags); 1705 } 1706 if (ret) 1707 btrfs_cleanup_ordered_extents(inode, locked_page, start, 1708 end - start + 1); 1709 return ret; 1710 } 1711 1712 void btrfs_split_delalloc_extent(struct inode *inode, 1713 struct extent_state *orig, u64 split) 1714 { 1715 u64 size; 1716 1717 /* not delalloc, ignore it */ 1718 if (!(orig->state & EXTENT_DELALLOC)) 1719 return; 1720 1721 size = orig->end - orig->start + 1; 1722 if (size > BTRFS_MAX_EXTENT_SIZE) { 1723 u32 num_extents; 1724 u64 new_size; 1725 1726 /* 1727 * See the explanation in btrfs_merge_delalloc_extent, the same 1728 * applies here, just in reverse. 1729 */ 1730 new_size = orig->end - split + 1; 1731 num_extents = count_max_extents(new_size); 1732 new_size = split - orig->start; 1733 num_extents += count_max_extents(new_size); 1734 if (count_max_extents(size) >= num_extents) 1735 return; 1736 } 1737 1738 spin_lock(&BTRFS_I(inode)->lock); 1739 btrfs_mod_outstanding_extents(BTRFS_I(inode), 1); 1740 spin_unlock(&BTRFS_I(inode)->lock); 1741 } 1742 1743 /* 1744 * Handle merged delayed allocation extents so we can keep track of new extents 1745 * that are just merged onto old extents, such as when we are doing sequential 1746 * writes, so we can properly account for the metadata space we'll need. 1747 */ 1748 void btrfs_merge_delalloc_extent(struct inode *inode, struct extent_state *new, 1749 struct extent_state *other) 1750 { 1751 u64 new_size, old_size; 1752 u32 num_extents; 1753 1754 /* not delalloc, ignore it */ 1755 if (!(other->state & EXTENT_DELALLOC)) 1756 return; 1757 1758 if (new->start > other->start) 1759 new_size = new->end - other->start + 1; 1760 else 1761 new_size = other->end - new->start + 1; 1762 1763 /* we're not bigger than the max, unreserve the space and go */ 1764 if (new_size <= BTRFS_MAX_EXTENT_SIZE) { 1765 spin_lock(&BTRFS_I(inode)->lock); 1766 btrfs_mod_outstanding_extents(BTRFS_I(inode), -1); 1767 spin_unlock(&BTRFS_I(inode)->lock); 1768 return; 1769 } 1770 1771 /* 1772 * We have to add up either side to figure out how many extents were 1773 * accounted for before we merged into one big extent. If the number of 1774 * extents we accounted for is <= the amount we need for the new range 1775 * then we can return, otherwise drop. Think of it like this 1776 * 1777 * [ 4k][MAX_SIZE] 1778 * 1779 * So we've grown the extent by a MAX_SIZE extent, this would mean we 1780 * need 2 outstanding extents, on one side we have 1 and the other side 1781 * we have 1 so they are == and we can return. But in this case 1782 * 1783 * [MAX_SIZE+4k][MAX_SIZE+4k] 1784 * 1785 * Each range on their own accounts for 2 extents, but merged together 1786 * they are only 3 extents worth of accounting, so we need to drop in 1787 * this case. 1788 */ 1789 old_size = other->end - other->start + 1; 1790 num_extents = count_max_extents(old_size); 1791 old_size = new->end - new->start + 1; 1792 num_extents += count_max_extents(old_size); 1793 if (count_max_extents(new_size) >= num_extents) 1794 return; 1795 1796 spin_lock(&BTRFS_I(inode)->lock); 1797 btrfs_mod_outstanding_extents(BTRFS_I(inode), -1); 1798 spin_unlock(&BTRFS_I(inode)->lock); 1799 } 1800 1801 static void btrfs_add_delalloc_inodes(struct btrfs_root *root, 1802 struct inode *inode) 1803 { 1804 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 1805 1806 spin_lock(&root->delalloc_lock); 1807 if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) { 1808 list_add_tail(&BTRFS_I(inode)->delalloc_inodes, 1809 &root->delalloc_inodes); 1810 set_bit(BTRFS_INODE_IN_DELALLOC_LIST, 1811 &BTRFS_I(inode)->runtime_flags); 1812 root->nr_delalloc_inodes++; 1813 if (root->nr_delalloc_inodes == 1) { 1814 spin_lock(&fs_info->delalloc_root_lock); 1815 BUG_ON(!list_empty(&root->delalloc_root)); 1816 list_add_tail(&root->delalloc_root, 1817 &fs_info->delalloc_roots); 1818 spin_unlock(&fs_info->delalloc_root_lock); 1819 } 1820 } 1821 spin_unlock(&root->delalloc_lock); 1822 } 1823 1824 1825 void __btrfs_del_delalloc_inode(struct btrfs_root *root, 1826 struct btrfs_inode *inode) 1827 { 1828 struct btrfs_fs_info *fs_info = root->fs_info; 1829 1830 if (!list_empty(&inode->delalloc_inodes)) { 1831 list_del_init(&inode->delalloc_inodes); 1832 clear_bit(BTRFS_INODE_IN_DELALLOC_LIST, 1833 &inode->runtime_flags); 1834 root->nr_delalloc_inodes--; 1835 if (!root->nr_delalloc_inodes) { 1836 ASSERT(list_empty(&root->delalloc_inodes)); 1837 spin_lock(&fs_info->delalloc_root_lock); 1838 BUG_ON(list_empty(&root->delalloc_root)); 1839 list_del_init(&root->delalloc_root); 1840 spin_unlock(&fs_info->delalloc_root_lock); 1841 } 1842 } 1843 } 1844 1845 static void btrfs_del_delalloc_inode(struct btrfs_root *root, 1846 struct btrfs_inode *inode) 1847 { 1848 spin_lock(&root->delalloc_lock); 1849 __btrfs_del_delalloc_inode(root, inode); 1850 spin_unlock(&root->delalloc_lock); 1851 } 1852 1853 /* 1854 * Properly track delayed allocation bytes in the inode and to maintain the 1855 * list of inodes that have pending delalloc work to be done. 1856 */ 1857 void btrfs_set_delalloc_extent(struct inode *inode, struct extent_state *state, 1858 unsigned *bits) 1859 { 1860 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 1861 1862 if ((*bits & EXTENT_DEFRAG) && !(*bits & EXTENT_DELALLOC)) 1863 WARN_ON(1); 1864 /* 1865 * set_bit and clear bit hooks normally require _irqsave/restore 1866 * but in this case, we are only testing for the DELALLOC 1867 * bit, which is only set or cleared with irqs on 1868 */ 1869 if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) { 1870 struct btrfs_root *root = BTRFS_I(inode)->root; 1871 u64 len = state->end + 1 - state->start; 1872 u32 num_extents = count_max_extents(len); 1873 bool do_list = !btrfs_is_free_space_inode(BTRFS_I(inode)); 1874 1875 spin_lock(&BTRFS_I(inode)->lock); 1876 btrfs_mod_outstanding_extents(BTRFS_I(inode), num_extents); 1877 spin_unlock(&BTRFS_I(inode)->lock); 1878 1879 /* For sanity tests */ 1880 if (btrfs_is_testing(fs_info)) 1881 return; 1882 1883 percpu_counter_add_batch(&fs_info->delalloc_bytes, len, 1884 fs_info->delalloc_batch); 1885 spin_lock(&BTRFS_I(inode)->lock); 1886 BTRFS_I(inode)->delalloc_bytes += len; 1887 if (*bits & EXTENT_DEFRAG) 1888 BTRFS_I(inode)->defrag_bytes += len; 1889 if (do_list && !test_bit(BTRFS_INODE_IN_DELALLOC_LIST, 1890 &BTRFS_I(inode)->runtime_flags)) 1891 btrfs_add_delalloc_inodes(root, inode); 1892 spin_unlock(&BTRFS_I(inode)->lock); 1893 } 1894 1895 if (!(state->state & EXTENT_DELALLOC_NEW) && 1896 (*bits & EXTENT_DELALLOC_NEW)) { 1897 spin_lock(&BTRFS_I(inode)->lock); 1898 BTRFS_I(inode)->new_delalloc_bytes += state->end + 1 - 1899 state->start; 1900 spin_unlock(&BTRFS_I(inode)->lock); 1901 } 1902 } 1903 1904 /* 1905 * Once a range is no longer delalloc this function ensures that proper 1906 * accounting happens. 1907 */ 1908 void btrfs_clear_delalloc_extent(struct inode *vfs_inode, 1909 struct extent_state *state, unsigned *bits) 1910 { 1911 struct btrfs_inode *inode = BTRFS_I(vfs_inode); 1912 struct btrfs_fs_info *fs_info = btrfs_sb(vfs_inode->i_sb); 1913 u64 len = state->end + 1 - state->start; 1914 u32 num_extents = count_max_extents(len); 1915 1916 if ((state->state & EXTENT_DEFRAG) && (*bits & EXTENT_DEFRAG)) { 1917 spin_lock(&inode->lock); 1918 inode->defrag_bytes -= len; 1919 spin_unlock(&inode->lock); 1920 } 1921 1922 /* 1923 * set_bit and clear bit hooks normally require _irqsave/restore 1924 * but in this case, we are only testing for the DELALLOC 1925 * bit, which is only set or cleared with irqs on 1926 */ 1927 if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) { 1928 struct btrfs_root *root = inode->root; 1929 bool do_list = !btrfs_is_free_space_inode(inode); 1930 1931 spin_lock(&inode->lock); 1932 btrfs_mod_outstanding_extents(inode, -num_extents); 1933 spin_unlock(&inode->lock); 1934 1935 /* 1936 * We don't reserve metadata space for space cache inodes so we 1937 * don't need to call delalloc_release_metadata if there is an 1938 * error. 1939 */ 1940 if (*bits & EXTENT_CLEAR_META_RESV && 1941 root != fs_info->tree_root) 1942 btrfs_delalloc_release_metadata(inode, len, false); 1943 1944 /* For sanity tests. */ 1945 if (btrfs_is_testing(fs_info)) 1946 return; 1947 1948 if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID && 1949 do_list && !(state->state & EXTENT_NORESERVE) && 1950 (*bits & EXTENT_CLEAR_DATA_RESV)) 1951 btrfs_free_reserved_data_space_noquota( 1952 &inode->vfs_inode, 1953 state->start, len); 1954 1955 percpu_counter_add_batch(&fs_info->delalloc_bytes, -len, 1956 fs_info->delalloc_batch); 1957 spin_lock(&inode->lock); 1958 inode->delalloc_bytes -= len; 1959 if (do_list && inode->delalloc_bytes == 0 && 1960 test_bit(BTRFS_INODE_IN_DELALLOC_LIST, 1961 &inode->runtime_flags)) 1962 btrfs_del_delalloc_inode(root, inode); 1963 spin_unlock(&inode->lock); 1964 } 1965 1966 if ((state->state & EXTENT_DELALLOC_NEW) && 1967 (*bits & EXTENT_DELALLOC_NEW)) { 1968 spin_lock(&inode->lock); 1969 ASSERT(inode->new_delalloc_bytes >= len); 1970 inode->new_delalloc_bytes -= len; 1971 spin_unlock(&inode->lock); 1972 } 1973 } 1974 1975 /* 1976 * btrfs_bio_fits_in_stripe - Checks whether the size of the given bio will fit 1977 * in a chunk's stripe. This function ensures that bios do not span a 1978 * stripe/chunk 1979 * 1980 * @page - The page we are about to add to the bio 1981 * @size - size we want to add to the bio 1982 * @bio - bio we want to ensure is smaller than a stripe 1983 * @bio_flags - flags of the bio 1984 * 1985 * return 1 if page cannot be added to the bio 1986 * return 0 if page can be added to the bio 1987 * return error otherwise 1988 */ 1989 int btrfs_bio_fits_in_stripe(struct page *page, size_t size, struct bio *bio, 1990 unsigned long bio_flags) 1991 { 1992 struct inode *inode = page->mapping->host; 1993 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 1994 u64 logical = (u64)bio->bi_iter.bi_sector << 9; 1995 u64 length = 0; 1996 u64 map_length; 1997 int ret; 1998 struct btrfs_io_geometry geom; 1999 2000 if (bio_flags & EXTENT_BIO_COMPRESSED) 2001 return 0; 2002 2003 length = bio->bi_iter.bi_size; 2004 map_length = length; 2005 ret = btrfs_get_io_geometry(fs_info, btrfs_op(bio), logical, map_length, 2006 &geom); 2007 if (ret < 0) 2008 return ret; 2009 2010 if (geom.len < length + size) 2011 return 1; 2012 return 0; 2013 } 2014 2015 /* 2016 * in order to insert checksums into the metadata in large chunks, 2017 * we wait until bio submission time. All the pages in the bio are 2018 * checksummed and sums are attached onto the ordered extent record. 2019 * 2020 * At IO completion time the cums attached on the ordered extent record 2021 * are inserted into the btree 2022 */ 2023 static blk_status_t btrfs_submit_bio_start(void *private_data, struct bio *bio, 2024 u64 bio_offset) 2025 { 2026 struct inode *inode = private_data; 2027 blk_status_t ret = 0; 2028 2029 ret = btrfs_csum_one_bio(inode, bio, 0, 0); 2030 BUG_ON(ret); /* -ENOMEM */ 2031 return 0; 2032 } 2033 2034 /* 2035 * extent_io.c submission hook. This does the right thing for csum calculation 2036 * on write, or reading the csums from the tree before a read. 2037 * 2038 * Rules about async/sync submit, 2039 * a) read: sync submit 2040 * 2041 * b) write without checksum: sync submit 2042 * 2043 * c) write with checksum: 2044 * c-1) if bio is issued by fsync: sync submit 2045 * (sync_writers != 0) 2046 * 2047 * c-2) if root is reloc root: sync submit 2048 * (only in case of buffered IO) 2049 * 2050 * c-3) otherwise: async submit 2051 */ 2052 static blk_status_t btrfs_submit_bio_hook(struct inode *inode, struct bio *bio, 2053 int mirror_num, 2054 unsigned long bio_flags) 2055 2056 { 2057 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 2058 struct btrfs_root *root = BTRFS_I(inode)->root; 2059 enum btrfs_wq_endio_type metadata = BTRFS_WQ_ENDIO_DATA; 2060 blk_status_t ret = 0; 2061 int skip_sum; 2062 int async = !atomic_read(&BTRFS_I(inode)->sync_writers); 2063 2064 skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; 2065 2066 if (btrfs_is_free_space_inode(BTRFS_I(inode))) 2067 metadata = BTRFS_WQ_ENDIO_FREE_SPACE; 2068 2069 if (bio_op(bio) != REQ_OP_WRITE) { 2070 ret = btrfs_bio_wq_end_io(fs_info, bio, metadata); 2071 if (ret) 2072 goto out; 2073 2074 if (bio_flags & EXTENT_BIO_COMPRESSED) { 2075 ret = btrfs_submit_compressed_read(inode, bio, 2076 mirror_num, 2077 bio_flags); 2078 goto out; 2079 } else if (!skip_sum) { 2080 ret = btrfs_lookup_bio_sums(inode, bio, NULL); 2081 if (ret) 2082 goto out; 2083 } 2084 goto mapit; 2085 } else if (async && !skip_sum) { 2086 /* csum items have already been cloned */ 2087 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID) 2088 goto mapit; 2089 /* we're doing a write, do the async checksumming */ 2090 ret = btrfs_wq_submit_bio(fs_info, bio, mirror_num, bio_flags, 2091 0, inode, btrfs_submit_bio_start); 2092 goto out; 2093 } else if (!skip_sum) { 2094 ret = btrfs_csum_one_bio(inode, bio, 0, 0); 2095 if (ret) 2096 goto out; 2097 } 2098 2099 mapit: 2100 ret = btrfs_map_bio(fs_info, bio, mirror_num, 0); 2101 2102 out: 2103 if (ret) { 2104 bio->bi_status = ret; 2105 bio_endio(bio); 2106 } 2107 return ret; 2108 } 2109 2110 /* 2111 * given a list of ordered sums record them in the inode. This happens 2112 * at IO completion time based on sums calculated at bio submission time. 2113 */ 2114 static noinline int add_pending_csums(struct btrfs_trans_handle *trans, 2115 struct inode *inode, struct list_head *list) 2116 { 2117 struct btrfs_ordered_sum *sum; 2118 int ret; 2119 2120 list_for_each_entry(sum, list, list) { 2121 trans->adding_csums = true; 2122 ret = btrfs_csum_file_blocks(trans, 2123 BTRFS_I(inode)->root->fs_info->csum_root, sum); 2124 trans->adding_csums = false; 2125 if (ret) 2126 return ret; 2127 } 2128 return 0; 2129 } 2130 2131 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end, 2132 unsigned int extra_bits, 2133 struct extent_state **cached_state) 2134 { 2135 WARN_ON(PAGE_ALIGNED(end)); 2136 return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end, 2137 extra_bits, cached_state); 2138 } 2139 2140 /* see btrfs_writepage_start_hook for details on why this is required */ 2141 struct btrfs_writepage_fixup { 2142 struct page *page; 2143 struct btrfs_work work; 2144 }; 2145 2146 static void btrfs_writepage_fixup_worker(struct btrfs_work *work) 2147 { 2148 struct btrfs_writepage_fixup *fixup; 2149 struct btrfs_ordered_extent *ordered; 2150 struct extent_state *cached_state = NULL; 2151 struct extent_changeset *data_reserved = NULL; 2152 struct page *page; 2153 struct inode *inode; 2154 u64 page_start; 2155 u64 page_end; 2156 int ret; 2157 2158 fixup = container_of(work, struct btrfs_writepage_fixup, work); 2159 page = fixup->page; 2160 again: 2161 lock_page(page); 2162 if (!page->mapping || !PageDirty(page) || !PageChecked(page)) { 2163 ClearPageChecked(page); 2164 goto out_page; 2165 } 2166 2167 inode = page->mapping->host; 2168 page_start = page_offset(page); 2169 page_end = page_offset(page) + PAGE_SIZE - 1; 2170 2171 lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, 2172 &cached_state); 2173 2174 /* already ordered? We're done */ 2175 if (PagePrivate2(page)) 2176 goto out; 2177 2178 ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), page_start, 2179 PAGE_SIZE); 2180 if (ordered) { 2181 unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, 2182 page_end, &cached_state); 2183 unlock_page(page); 2184 btrfs_start_ordered_extent(inode, ordered, 1); 2185 btrfs_put_ordered_extent(ordered); 2186 goto again; 2187 } 2188 2189 ret = btrfs_delalloc_reserve_space(inode, &data_reserved, page_start, 2190 PAGE_SIZE); 2191 if (ret) { 2192 mapping_set_error(page->mapping, ret); 2193 end_extent_writepage(page, ret, page_start, page_end); 2194 ClearPageChecked(page); 2195 goto out; 2196 } 2197 2198 ret = btrfs_set_extent_delalloc(inode, page_start, page_end, 0, 2199 &cached_state); 2200 if (ret) { 2201 mapping_set_error(page->mapping, ret); 2202 end_extent_writepage(page, ret, page_start, page_end); 2203 ClearPageChecked(page); 2204 goto out; 2205 } 2206 2207 ClearPageChecked(page); 2208 set_page_dirty(page); 2209 btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE, false); 2210 out: 2211 unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end, 2212 &cached_state); 2213 out_page: 2214 unlock_page(page); 2215 put_page(page); 2216 kfree(fixup); 2217 extent_changeset_free(data_reserved); 2218 } 2219 2220 /* 2221 * There are a few paths in the higher layers of the kernel that directly 2222 * set the page dirty bit without asking the filesystem if it is a 2223 * good idea. This causes problems because we want to make sure COW 2224 * properly happens and the data=ordered rules are followed. 2225 * 2226 * In our case any range that doesn't have the ORDERED bit set 2227 * hasn't been properly setup for IO. We kick off an async process 2228 * to fix it up. The async helper will wait for ordered extents, set 2229 * the delalloc bit and make it safe to write the page. 2230 */ 2231 int btrfs_writepage_cow_fixup(struct page *page, u64 start, u64 end) 2232 { 2233 struct inode *inode = page->mapping->host; 2234 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 2235 struct btrfs_writepage_fixup *fixup; 2236 2237 /* this page is properly in the ordered list */ 2238 if (TestClearPagePrivate2(page)) 2239 return 0; 2240 2241 if (PageChecked(page)) 2242 return -EAGAIN; 2243 2244 fixup = kzalloc(sizeof(*fixup), GFP_NOFS); 2245 if (!fixup) 2246 return -EAGAIN; 2247 2248 SetPageChecked(page); 2249 get_page(page); 2250 btrfs_init_work(&fixup->work, btrfs_fixup_helper, 2251 btrfs_writepage_fixup_worker, NULL, NULL); 2252 fixup->page = page; 2253 btrfs_queue_work(fs_info->fixup_workers, &fixup->work); 2254 return -EBUSY; 2255 } 2256 2257 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans, 2258 struct inode *inode, u64 file_pos, 2259 u64 disk_bytenr, u64 disk_num_bytes, 2260 u64 num_bytes, u64 ram_bytes, 2261 u8 compression, u8 encryption, 2262 u16 other_encoding, int extent_type) 2263 { 2264 struct btrfs_root *root = BTRFS_I(inode)->root; 2265 struct btrfs_file_extent_item *fi; 2266 struct btrfs_path *path; 2267 struct extent_buffer *leaf; 2268 struct btrfs_key ins; 2269 u64 qg_released; 2270 int extent_inserted = 0; 2271 int ret; 2272 2273 path = btrfs_alloc_path(); 2274 if (!path) 2275 return -ENOMEM; 2276 2277 /* 2278 * we may be replacing one extent in the tree with another. 2279 * The new extent is pinned in the extent map, and we don't want 2280 * to drop it from the cache until it is completely in the btree. 2281 * 2282 * So, tell btrfs_drop_extents to leave this extent in the cache. 2283 * the caller is expected to unpin it and allow it to be merged 2284 * with the others. 2285 */ 2286 ret = __btrfs_drop_extents(trans, root, inode, path, file_pos, 2287 file_pos + num_bytes, NULL, 0, 2288 1, sizeof(*fi), &extent_inserted); 2289 if (ret) 2290 goto out; 2291 2292 if (!extent_inserted) { 2293 ins.objectid = btrfs_ino(BTRFS_I(inode)); 2294 ins.offset = file_pos; 2295 ins.type = BTRFS_EXTENT_DATA_KEY; 2296 2297 path->leave_spinning = 1; 2298 ret = btrfs_insert_empty_item(trans, root, path, &ins, 2299 sizeof(*fi)); 2300 if (ret) 2301 goto out; 2302 } 2303 leaf = path->nodes[0]; 2304 fi = btrfs_item_ptr(leaf, path->slots[0], 2305 struct btrfs_file_extent_item); 2306 btrfs_set_file_extent_generation(leaf, fi, trans->transid); 2307 btrfs_set_file_extent_type(leaf, fi, extent_type); 2308 btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr); 2309 btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes); 2310 btrfs_set_file_extent_offset(leaf, fi, 0); 2311 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes); 2312 btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes); 2313 btrfs_set_file_extent_compression(leaf, fi, compression); 2314 btrfs_set_file_extent_encryption(leaf, fi, encryption); 2315 btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding); 2316 2317 btrfs_mark_buffer_dirty(leaf); 2318 btrfs_release_path(path); 2319 2320 inode_add_bytes(inode, num_bytes); 2321 2322 ins.objectid = disk_bytenr; 2323 ins.offset = disk_num_bytes; 2324 ins.type = BTRFS_EXTENT_ITEM_KEY; 2325 2326 /* 2327 * Release the reserved range from inode dirty range map, as it is 2328 * already moved into delayed_ref_head 2329 */ 2330 ret = btrfs_qgroup_release_data(inode, file_pos, ram_bytes); 2331 if (ret < 0) 2332 goto out; 2333 qg_released = ret; 2334 ret = btrfs_alloc_reserved_file_extent(trans, root, 2335 btrfs_ino(BTRFS_I(inode)), 2336 file_pos, qg_released, &ins); 2337 out: 2338 btrfs_free_path(path); 2339 2340 return ret; 2341 } 2342 2343 /* snapshot-aware defrag */ 2344 struct sa_defrag_extent_backref { 2345 struct rb_node node; 2346 struct old_sa_defrag_extent *old; 2347 u64 root_id; 2348 u64 inum; 2349 u64 file_pos; 2350 u64 extent_offset; 2351 u64 num_bytes; 2352 u64 generation; 2353 }; 2354 2355 struct old_sa_defrag_extent { 2356 struct list_head list; 2357 struct new_sa_defrag_extent *new; 2358 2359 u64 extent_offset; 2360 u64 bytenr; 2361 u64 offset; 2362 u64 len; 2363 int count; 2364 }; 2365 2366 struct new_sa_defrag_extent { 2367 struct rb_root root; 2368 struct list_head head; 2369 struct btrfs_path *path; 2370 struct inode *inode; 2371 u64 file_pos; 2372 u64 len; 2373 u64 bytenr; 2374 u64 disk_len; 2375 u8 compress_type; 2376 }; 2377 2378 static int backref_comp(struct sa_defrag_extent_backref *b1, 2379 struct sa_defrag_extent_backref *b2) 2380 { 2381 if (b1->root_id < b2->root_id) 2382 return -1; 2383 else if (b1->root_id > b2->root_id) 2384 return 1; 2385 2386 if (b1->inum < b2->inum) 2387 return -1; 2388 else if (b1->inum > b2->inum) 2389 return 1; 2390 2391 if (b1->file_pos < b2->file_pos) 2392 return -1; 2393 else if (b1->file_pos > b2->file_pos) 2394 return 1; 2395 2396 /* 2397 * [------------------------------] ===> (a range of space) 2398 * |<--->| |<---->| =============> (fs/file tree A) 2399 * |<---------------------------->| ===> (fs/file tree B) 2400 * 2401 * A range of space can refer to two file extents in one tree while 2402 * refer to only one file extent in another tree. 2403 * 2404 * So we may process a disk offset more than one time(two extents in A) 2405 * and locate at the same extent(one extent in B), then insert two same 2406 * backrefs(both refer to the extent in B). 2407 */ 2408 return 0; 2409 } 2410 2411 static void backref_insert(struct rb_root *root, 2412 struct sa_defrag_extent_backref *backref) 2413 { 2414 struct rb_node **p = &root->rb_node; 2415 struct rb_node *parent = NULL; 2416 struct sa_defrag_extent_backref *entry; 2417 int ret; 2418 2419 while (*p) { 2420 parent = *p; 2421 entry = rb_entry(parent, struct sa_defrag_extent_backref, node); 2422 2423 ret = backref_comp(backref, entry); 2424 if (ret < 0) 2425 p = &(*p)->rb_left; 2426 else 2427 p = &(*p)->rb_right; 2428 } 2429 2430 rb_link_node(&backref->node, parent, p); 2431 rb_insert_color(&backref->node, root); 2432 } 2433 2434 /* 2435 * Note the backref might has changed, and in this case we just return 0. 2436 */ 2437 static noinline int record_one_backref(u64 inum, u64 offset, u64 root_id, 2438 void *ctx) 2439 { 2440 struct btrfs_file_extent_item *extent; 2441 struct old_sa_defrag_extent *old = ctx; 2442 struct new_sa_defrag_extent *new = old->new; 2443 struct btrfs_path *path = new->path; 2444 struct btrfs_key key; 2445 struct btrfs_root *root; 2446 struct sa_defrag_extent_backref *backref; 2447 struct extent_buffer *leaf; 2448 struct inode *inode = new->inode; 2449 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 2450 int slot; 2451 int ret; 2452 u64 extent_offset; 2453 u64 num_bytes; 2454 2455 if (BTRFS_I(inode)->root->root_key.objectid == root_id && 2456 inum == btrfs_ino(BTRFS_I(inode))) 2457 return 0; 2458 2459 key.objectid = root_id; 2460 key.type = BTRFS_ROOT_ITEM_KEY; 2461 key.offset = (u64)-1; 2462 2463 root = btrfs_read_fs_root_no_name(fs_info, &key); 2464 if (IS_ERR(root)) { 2465 if (PTR_ERR(root) == -ENOENT) 2466 return 0; 2467 WARN_ON(1); 2468 btrfs_debug(fs_info, "inum=%llu, offset=%llu, root_id=%llu", 2469 inum, offset, root_id); 2470 return PTR_ERR(root); 2471 } 2472 2473 key.objectid = inum; 2474 key.type = BTRFS_EXTENT_DATA_KEY; 2475 if (offset > (u64)-1 << 32) 2476 key.offset = 0; 2477 else 2478 key.offset = offset; 2479 2480 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 2481 if (WARN_ON(ret < 0)) 2482 return ret; 2483 ret = 0; 2484 2485 while (1) { 2486 cond_resched(); 2487 2488 leaf = path->nodes[0]; 2489 slot = path->slots[0]; 2490 2491 if (slot >= btrfs_header_nritems(leaf)) { 2492 ret = btrfs_next_leaf(root, path); 2493 if (ret < 0) { 2494 goto out; 2495 } else if (ret > 0) { 2496 ret = 0; 2497 goto out; 2498 } 2499 continue; 2500 } 2501 2502 path->slots[0]++; 2503 2504 btrfs_item_key_to_cpu(leaf, &key, slot); 2505 2506 if (key.objectid > inum) 2507 goto out; 2508 2509 if (key.objectid < inum || key.type != BTRFS_EXTENT_DATA_KEY) 2510 continue; 2511 2512 extent = btrfs_item_ptr(leaf, slot, 2513 struct btrfs_file_extent_item); 2514 2515 if (btrfs_file_extent_disk_bytenr(leaf, extent) != old->bytenr) 2516 continue; 2517 2518 /* 2519 * 'offset' refers to the exact key.offset, 2520 * NOT the 'offset' field in btrfs_extent_data_ref, ie. 2521 * (key.offset - extent_offset). 2522 */ 2523 if (key.offset != offset) 2524 continue; 2525 2526 extent_offset = btrfs_file_extent_offset(leaf, extent); 2527 num_bytes = btrfs_file_extent_num_bytes(leaf, extent); 2528 2529 if (extent_offset >= old->extent_offset + old->offset + 2530 old->len || extent_offset + num_bytes <= 2531 old->extent_offset + old->offset) 2532 continue; 2533 break; 2534 } 2535 2536 backref = kmalloc(sizeof(*backref), GFP_NOFS); 2537 if (!backref) { 2538 ret = -ENOENT; 2539 goto out; 2540 } 2541 2542 backref->root_id = root_id; 2543 backref->inum = inum; 2544 backref->file_pos = offset; 2545 backref->num_bytes = num_bytes; 2546 backref->extent_offset = extent_offset; 2547 backref->generation = btrfs_file_extent_generation(leaf, extent); 2548 backref->old = old; 2549 backref_insert(&new->root, backref); 2550 old->count++; 2551 out: 2552 btrfs_release_path(path); 2553 WARN_ON(ret); 2554 return ret; 2555 } 2556 2557 static noinline bool record_extent_backrefs(struct btrfs_path *path, 2558 struct new_sa_defrag_extent *new) 2559 { 2560 struct btrfs_fs_info *fs_info = btrfs_sb(new->inode->i_sb); 2561 struct old_sa_defrag_extent *old, *tmp; 2562 int ret; 2563 2564 new->path = path; 2565 2566 list_for_each_entry_safe(old, tmp, &new->head, list) { 2567 ret = iterate_inodes_from_logical(old->bytenr + 2568 old->extent_offset, fs_info, 2569 path, record_one_backref, 2570 old, false); 2571 if (ret < 0 && ret != -ENOENT) 2572 return false; 2573 2574 /* no backref to be processed for this extent */ 2575 if (!old->count) { 2576 list_del(&old->list); 2577 kfree(old); 2578 } 2579 } 2580 2581 if (list_empty(&new->head)) 2582 return false; 2583 2584 return true; 2585 } 2586 2587 static int relink_is_mergable(struct extent_buffer *leaf, 2588 struct btrfs_file_extent_item *fi, 2589 struct new_sa_defrag_extent *new) 2590 { 2591 if (btrfs_file_extent_disk_bytenr(leaf, fi) != new->bytenr) 2592 return 0; 2593 2594 if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG) 2595 return 0; 2596 2597 if (btrfs_file_extent_compression(leaf, fi) != new->compress_type) 2598 return 0; 2599 2600 if (btrfs_file_extent_encryption(leaf, fi) || 2601 btrfs_file_extent_other_encoding(leaf, fi)) 2602 return 0; 2603 2604 return 1; 2605 } 2606 2607 /* 2608 * Note the backref might has changed, and in this case we just return 0. 2609 */ 2610 static noinline int relink_extent_backref(struct btrfs_path *path, 2611 struct sa_defrag_extent_backref *prev, 2612 struct sa_defrag_extent_backref *backref) 2613 { 2614 struct btrfs_file_extent_item *extent; 2615 struct btrfs_file_extent_item *item; 2616 struct btrfs_ordered_extent *ordered; 2617 struct btrfs_trans_handle *trans; 2618 struct btrfs_ref ref = { 0 }; 2619 struct btrfs_root *root; 2620 struct btrfs_key key; 2621 struct extent_buffer *leaf; 2622 struct old_sa_defrag_extent *old = backref->old; 2623 struct new_sa_defrag_extent *new = old->new; 2624 struct btrfs_fs_info *fs_info = btrfs_sb(new->inode->i_sb); 2625 struct inode *inode; 2626 struct extent_state *cached = NULL; 2627 int ret = 0; 2628 u64 start; 2629 u64 len; 2630 u64 lock_start; 2631 u64 lock_end; 2632 bool merge = false; 2633 int index; 2634 2635 if (prev && prev->root_id == backref->root_id && 2636 prev->inum == backref->inum && 2637 prev->file_pos + prev->num_bytes == backref->file_pos) 2638 merge = true; 2639 2640 /* step 1: get root */ 2641 key.objectid = backref->root_id; 2642 key.type = BTRFS_ROOT_ITEM_KEY; 2643 key.offset = (u64)-1; 2644 2645 index = srcu_read_lock(&fs_info->subvol_srcu); 2646 2647 root = btrfs_read_fs_root_no_name(fs_info, &key); 2648 if (IS_ERR(root)) { 2649 srcu_read_unlock(&fs_info->subvol_srcu, index); 2650 if (PTR_ERR(root) == -ENOENT) 2651 return 0; 2652 return PTR_ERR(root); 2653 } 2654 2655 if (btrfs_root_readonly(root)) { 2656 srcu_read_unlock(&fs_info->subvol_srcu, index); 2657 return 0; 2658 } 2659 2660 /* step 2: get inode */ 2661 key.objectid = backref->inum; 2662 key.type = BTRFS_INODE_ITEM_KEY; 2663 key.offset = 0; 2664 2665 inode = btrfs_iget(fs_info->sb, &key, root, NULL); 2666 if (IS_ERR(inode)) { 2667 srcu_read_unlock(&fs_info->subvol_srcu, index); 2668 return 0; 2669 } 2670 2671 srcu_read_unlock(&fs_info->subvol_srcu, index); 2672 2673 /* step 3: relink backref */ 2674 lock_start = backref->file_pos; 2675 lock_end = backref->file_pos + backref->num_bytes - 1; 2676 lock_extent_bits(&BTRFS_I(inode)->io_tree, lock_start, lock_end, 2677 &cached); 2678 2679 ordered = btrfs_lookup_first_ordered_extent(inode, lock_end); 2680 if (ordered) { 2681 btrfs_put_ordered_extent(ordered); 2682 goto out_unlock; 2683 } 2684 2685 trans = btrfs_join_transaction(root); 2686 if (IS_ERR(trans)) { 2687 ret = PTR_ERR(trans); 2688 goto out_unlock; 2689 } 2690 2691 key.objectid = backref->inum; 2692 key.type = BTRFS_EXTENT_DATA_KEY; 2693 key.offset = backref->file_pos; 2694 2695 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 2696 if (ret < 0) { 2697 goto out_free_path; 2698 } else if (ret > 0) { 2699 ret = 0; 2700 goto out_free_path; 2701 } 2702 2703 extent = btrfs_item_ptr(path->nodes[0], path->slots[0], 2704 struct btrfs_file_extent_item); 2705 2706 if (btrfs_file_extent_generation(path->nodes[0], extent) != 2707 backref->generation) 2708 goto out_free_path; 2709 2710 btrfs_release_path(path); 2711 2712 start = backref->file_pos; 2713 if (backref->extent_offset < old->extent_offset + old->offset) 2714 start += old->extent_offset + old->offset - 2715 backref->extent_offset; 2716 2717 len = min(backref->extent_offset + backref->num_bytes, 2718 old->extent_offset + old->offset + old->len); 2719 len -= max(backref->extent_offset, old->extent_offset + old->offset); 2720 2721 ret = btrfs_drop_extents(trans, root, inode, start, 2722 start + len, 1); 2723 if (ret) 2724 goto out_free_path; 2725 again: 2726 key.objectid = btrfs_ino(BTRFS_I(inode)); 2727 key.type = BTRFS_EXTENT_DATA_KEY; 2728 key.offset = start; 2729 2730 path->leave_spinning = 1; 2731 if (merge) { 2732 struct btrfs_file_extent_item *fi; 2733 u64 extent_len; 2734 struct btrfs_key found_key; 2735 2736 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 2737 if (ret < 0) 2738 goto out_free_path; 2739 2740 path->slots[0]--; 2741 leaf = path->nodes[0]; 2742 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 2743 2744 fi = btrfs_item_ptr(leaf, path->slots[0], 2745 struct btrfs_file_extent_item); 2746 extent_len = btrfs_file_extent_num_bytes(leaf, fi); 2747 2748 if (extent_len + found_key.offset == start && 2749 relink_is_mergable(leaf, fi, new)) { 2750 btrfs_set_file_extent_num_bytes(leaf, fi, 2751 extent_len + len); 2752 btrfs_mark_buffer_dirty(leaf); 2753 inode_add_bytes(inode, len); 2754 2755 ret = 1; 2756 goto out_free_path; 2757 } else { 2758 merge = false; 2759 btrfs_release_path(path); 2760 goto again; 2761 } 2762 } 2763 2764 ret = btrfs_insert_empty_item(trans, root, path, &key, 2765 sizeof(*extent)); 2766 if (ret) { 2767 btrfs_abort_transaction(trans, ret); 2768 goto out_free_path; 2769 } 2770 2771 leaf = path->nodes[0]; 2772 item = btrfs_item_ptr(leaf, path->slots[0], 2773 struct btrfs_file_extent_item); 2774 btrfs_set_file_extent_disk_bytenr(leaf, item, new->bytenr); 2775 btrfs_set_file_extent_disk_num_bytes(leaf, item, new->disk_len); 2776 btrfs_set_file_extent_offset(leaf, item, start - new->file_pos); 2777 btrfs_set_file_extent_num_bytes(leaf, item, len); 2778 btrfs_set_file_extent_ram_bytes(leaf, item, new->len); 2779 btrfs_set_file_extent_generation(leaf, item, trans->transid); 2780 btrfs_set_file_extent_type(leaf, item, BTRFS_FILE_EXTENT_REG); 2781 btrfs_set_file_extent_compression(leaf, item, new->compress_type); 2782 btrfs_set_file_extent_encryption(leaf, item, 0); 2783 btrfs_set_file_extent_other_encoding(leaf, item, 0); 2784 2785 btrfs_mark_buffer_dirty(leaf); 2786 inode_add_bytes(inode, len); 2787 btrfs_release_path(path); 2788 2789 btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, new->bytenr, 2790 new->disk_len, 0); 2791 btrfs_init_data_ref(&ref, backref->root_id, backref->inum, 2792 new->file_pos); /* start - extent_offset */ 2793 ret = btrfs_inc_extent_ref(trans, &ref); 2794 if (ret) { 2795 btrfs_abort_transaction(trans, ret); 2796 goto out_free_path; 2797 } 2798 2799 ret = 1; 2800 out_free_path: 2801 btrfs_release_path(path); 2802 path->leave_spinning = 0; 2803 btrfs_end_transaction(trans); 2804 out_unlock: 2805 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lock_start, lock_end, 2806 &cached); 2807 iput(inode); 2808 return ret; 2809 } 2810 2811 static void free_sa_defrag_extent(struct new_sa_defrag_extent *new) 2812 { 2813 struct old_sa_defrag_extent *old, *tmp; 2814 2815 if (!new) 2816 return; 2817 2818 list_for_each_entry_safe(old, tmp, &new->head, list) { 2819 kfree(old); 2820 } 2821 kfree(new); 2822 } 2823 2824 static void relink_file_extents(struct new_sa_defrag_extent *new) 2825 { 2826 struct btrfs_fs_info *fs_info = btrfs_sb(new->inode->i_sb); 2827 struct btrfs_path *path; 2828 struct sa_defrag_extent_backref *backref; 2829 struct sa_defrag_extent_backref *prev = NULL; 2830 struct rb_node *node; 2831 int ret; 2832 2833 path = btrfs_alloc_path(); 2834 if (!path) 2835 return; 2836 2837 if (!record_extent_backrefs(path, new)) { 2838 btrfs_free_path(path); 2839 goto out; 2840 } 2841 btrfs_release_path(path); 2842 2843 while (1) { 2844 node = rb_first(&new->root); 2845 if (!node) 2846 break; 2847 rb_erase(node, &new->root); 2848 2849 backref = rb_entry(node, struct sa_defrag_extent_backref, node); 2850 2851 ret = relink_extent_backref(path, prev, backref); 2852 WARN_ON(ret < 0); 2853 2854 kfree(prev); 2855 2856 if (ret == 1) 2857 prev = backref; 2858 else 2859 prev = NULL; 2860 cond_resched(); 2861 } 2862 kfree(prev); 2863 2864 btrfs_free_path(path); 2865 out: 2866 free_sa_defrag_extent(new); 2867 2868 atomic_dec(&fs_info->defrag_running); 2869 wake_up(&fs_info->transaction_wait); 2870 } 2871 2872 static struct new_sa_defrag_extent * 2873 record_old_file_extents(struct inode *inode, 2874 struct btrfs_ordered_extent *ordered) 2875 { 2876 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 2877 struct btrfs_root *root = BTRFS_I(inode)->root; 2878 struct btrfs_path *path; 2879 struct btrfs_key key; 2880 struct old_sa_defrag_extent *old; 2881 struct new_sa_defrag_extent *new; 2882 int ret; 2883 2884 new = kmalloc(sizeof(*new), GFP_NOFS); 2885 if (!new) 2886 return NULL; 2887 2888 new->inode = inode; 2889 new->file_pos = ordered->file_offset; 2890 new->len = ordered->len; 2891 new->bytenr = ordered->start; 2892 new->disk_len = ordered->disk_len; 2893 new->compress_type = ordered->compress_type; 2894 new->root = RB_ROOT; 2895 INIT_LIST_HEAD(&new->head); 2896 2897 path = btrfs_alloc_path(); 2898 if (!path) 2899 goto out_kfree; 2900 2901 key.objectid = btrfs_ino(BTRFS_I(inode)); 2902 key.type = BTRFS_EXTENT_DATA_KEY; 2903 key.offset = new->file_pos; 2904 2905 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 2906 if (ret < 0) 2907 goto out_free_path; 2908 if (ret > 0 && path->slots[0] > 0) 2909 path->slots[0]--; 2910 2911 /* find out all the old extents for the file range */ 2912 while (1) { 2913 struct btrfs_file_extent_item *extent; 2914 struct extent_buffer *l; 2915 int slot; 2916 u64 num_bytes; 2917 u64 offset; 2918 u64 end; 2919 u64 disk_bytenr; 2920 u64 extent_offset; 2921 2922 l = path->nodes[0]; 2923 slot = path->slots[0]; 2924 2925 if (slot >= btrfs_header_nritems(l)) { 2926 ret = btrfs_next_leaf(root, path); 2927 if (ret < 0) 2928 goto out_free_path; 2929 else if (ret > 0) 2930 break; 2931 continue; 2932 } 2933 2934 btrfs_item_key_to_cpu(l, &key, slot); 2935 2936 if (key.objectid != btrfs_ino(BTRFS_I(inode))) 2937 break; 2938 if (key.type != BTRFS_EXTENT_DATA_KEY) 2939 break; 2940 if (key.offset >= new->file_pos + new->len) 2941 break; 2942 2943 extent = btrfs_item_ptr(l, slot, struct btrfs_file_extent_item); 2944 2945 num_bytes = btrfs_file_extent_num_bytes(l, extent); 2946 if (key.offset + num_bytes < new->file_pos) 2947 goto next; 2948 2949 disk_bytenr = btrfs_file_extent_disk_bytenr(l, extent); 2950 if (!disk_bytenr) 2951 goto next; 2952 2953 extent_offset = btrfs_file_extent_offset(l, extent); 2954 2955 old = kmalloc(sizeof(*old), GFP_NOFS); 2956 if (!old) 2957 goto out_free_path; 2958 2959 offset = max(new->file_pos, key.offset); 2960 end = min(new->file_pos + new->len, key.offset + num_bytes); 2961 2962 old->bytenr = disk_bytenr; 2963 old->extent_offset = extent_offset; 2964 old->offset = offset - key.offset; 2965 old->len = end - offset; 2966 old->new = new; 2967 old->count = 0; 2968 list_add_tail(&old->list, &new->head); 2969 next: 2970 path->slots[0]++; 2971 cond_resched(); 2972 } 2973 2974 btrfs_free_path(path); 2975 atomic_inc(&fs_info->defrag_running); 2976 2977 return new; 2978 2979 out_free_path: 2980 btrfs_free_path(path); 2981 out_kfree: 2982 free_sa_defrag_extent(new); 2983 return NULL; 2984 } 2985 2986 static void btrfs_release_delalloc_bytes(struct btrfs_fs_info *fs_info, 2987 u64 start, u64 len) 2988 { 2989 struct btrfs_block_group_cache *cache; 2990 2991 cache = btrfs_lookup_block_group(fs_info, start); 2992 ASSERT(cache); 2993 2994 spin_lock(&cache->lock); 2995 cache->delalloc_bytes -= len; 2996 spin_unlock(&cache->lock); 2997 2998 btrfs_put_block_group(cache); 2999 } 3000 3001 /* as ordered data IO finishes, this gets called so we can finish 3002 * an ordered extent if the range of bytes in the file it covers are 3003 * fully written. 3004 */ 3005 static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent) 3006 { 3007 struct inode *inode = ordered_extent->inode; 3008 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 3009 struct btrfs_root *root = BTRFS_I(inode)->root; 3010 struct btrfs_trans_handle *trans = NULL; 3011 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 3012 struct extent_state *cached_state = NULL; 3013 struct new_sa_defrag_extent *new = NULL; 3014 int compress_type = 0; 3015 int ret = 0; 3016 u64 logical_len = ordered_extent->len; 3017 bool nolock; 3018 bool truncated = false; 3019 bool range_locked = false; 3020 bool clear_new_delalloc_bytes = false; 3021 bool clear_reserved_extent = true; 3022 3023 if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) && 3024 !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags) && 3025 !test_bit(BTRFS_ORDERED_DIRECT, &ordered_extent->flags)) 3026 clear_new_delalloc_bytes = true; 3027 3028 nolock = btrfs_is_free_space_inode(BTRFS_I(inode)); 3029 3030 if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) { 3031 ret = -EIO; 3032 goto out; 3033 } 3034 3035 btrfs_free_io_failure_record(BTRFS_I(inode), 3036 ordered_extent->file_offset, 3037 ordered_extent->file_offset + 3038 ordered_extent->len - 1); 3039 3040 if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) { 3041 truncated = true; 3042 logical_len = ordered_extent->truncated_len; 3043 /* Truncated the entire extent, don't bother adding */ 3044 if (!logical_len) 3045 goto out; 3046 } 3047 3048 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) { 3049 BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */ 3050 3051 /* 3052 * For mwrite(mmap + memset to write) case, we still reserve 3053 * space for NOCOW range. 3054 * As NOCOW won't cause a new delayed ref, just free the space 3055 */ 3056 btrfs_qgroup_free_data(inode, NULL, ordered_extent->file_offset, 3057 ordered_extent->len); 3058 btrfs_ordered_update_i_size(inode, 0, ordered_extent); 3059 if (nolock) 3060 trans = btrfs_join_transaction_nolock(root); 3061 else 3062 trans = btrfs_join_transaction(root); 3063 if (IS_ERR(trans)) { 3064 ret = PTR_ERR(trans); 3065 trans = NULL; 3066 goto out; 3067 } 3068 trans->block_rsv = &BTRFS_I(inode)->block_rsv; 3069 ret = btrfs_update_inode_fallback(trans, root, inode); 3070 if (ret) /* -ENOMEM or corruption */ 3071 btrfs_abort_transaction(trans, ret); 3072 goto out; 3073 } 3074 3075 range_locked = true; 3076 lock_extent_bits(io_tree, ordered_extent->file_offset, 3077 ordered_extent->file_offset + ordered_extent->len - 1, 3078 &cached_state); 3079 3080 ret = test_range_bit(io_tree, ordered_extent->file_offset, 3081 ordered_extent->file_offset + ordered_extent->len - 1, 3082 EXTENT_DEFRAG, 0, cached_state); 3083 if (ret) { 3084 u64 last_snapshot = btrfs_root_last_snapshot(&root->root_item); 3085 if (0 && last_snapshot >= BTRFS_I(inode)->generation) 3086 /* the inode is shared */ 3087 new = record_old_file_extents(inode, ordered_extent); 3088 3089 clear_extent_bit(io_tree, ordered_extent->file_offset, 3090 ordered_extent->file_offset + ordered_extent->len - 1, 3091 EXTENT_DEFRAG, 0, 0, &cached_state); 3092 } 3093 3094 if (nolock) 3095 trans = btrfs_join_transaction_nolock(root); 3096 else 3097 trans = btrfs_join_transaction(root); 3098 if (IS_ERR(trans)) { 3099 ret = PTR_ERR(trans); 3100 trans = NULL; 3101 goto out; 3102 } 3103 3104 trans->block_rsv = &BTRFS_I(inode)->block_rsv; 3105 3106 if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags)) 3107 compress_type = ordered_extent->compress_type; 3108 if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) { 3109 BUG_ON(compress_type); 3110 btrfs_qgroup_free_data(inode, NULL, ordered_extent->file_offset, 3111 ordered_extent->len); 3112 ret = btrfs_mark_extent_written(trans, BTRFS_I(inode), 3113 ordered_extent->file_offset, 3114 ordered_extent->file_offset + 3115 logical_len); 3116 } else { 3117 BUG_ON(root == fs_info->tree_root); 3118 ret = insert_reserved_file_extent(trans, inode, 3119 ordered_extent->file_offset, 3120 ordered_extent->start, 3121 ordered_extent->disk_len, 3122 logical_len, logical_len, 3123 compress_type, 0, 0, 3124 BTRFS_FILE_EXTENT_REG); 3125 if (!ret) { 3126 clear_reserved_extent = false; 3127 btrfs_release_delalloc_bytes(fs_info, 3128 ordered_extent->start, 3129 ordered_extent->disk_len); 3130 } 3131 } 3132 unpin_extent_cache(&BTRFS_I(inode)->extent_tree, 3133 ordered_extent->file_offset, ordered_extent->len, 3134 trans->transid); 3135 if (ret < 0) { 3136 btrfs_abort_transaction(trans, ret); 3137 goto out; 3138 } 3139 3140 ret = add_pending_csums(trans, inode, &ordered_extent->list); 3141 if (ret) { 3142 btrfs_abort_transaction(trans, ret); 3143 goto out; 3144 } 3145 3146 btrfs_ordered_update_i_size(inode, 0, ordered_extent); 3147 ret = btrfs_update_inode_fallback(trans, root, inode); 3148 if (ret) { /* -ENOMEM or corruption */ 3149 btrfs_abort_transaction(trans, ret); 3150 goto out; 3151 } 3152 ret = 0; 3153 out: 3154 if (range_locked || clear_new_delalloc_bytes) { 3155 unsigned int clear_bits = 0; 3156 3157 if (range_locked) 3158 clear_bits |= EXTENT_LOCKED; 3159 if (clear_new_delalloc_bytes) 3160 clear_bits |= EXTENT_DELALLOC_NEW; 3161 clear_extent_bit(&BTRFS_I(inode)->io_tree, 3162 ordered_extent->file_offset, 3163 ordered_extent->file_offset + 3164 ordered_extent->len - 1, 3165 clear_bits, 3166 (clear_bits & EXTENT_LOCKED) ? 1 : 0, 3167 0, &cached_state); 3168 } 3169 3170 if (trans) 3171 btrfs_end_transaction(trans); 3172 3173 if (ret || truncated) { 3174 u64 start, end; 3175 3176 if (truncated) 3177 start = ordered_extent->file_offset + logical_len; 3178 else 3179 start = ordered_extent->file_offset; 3180 end = ordered_extent->file_offset + ordered_extent->len - 1; 3181 clear_extent_uptodate(io_tree, start, end, NULL); 3182 3183 /* Drop the cache for the part of the extent we didn't write. */ 3184 btrfs_drop_extent_cache(BTRFS_I(inode), start, end, 0); 3185 3186 /* 3187 * If the ordered extent had an IOERR or something else went 3188 * wrong we need to return the space for this ordered extent 3189 * back to the allocator. We only free the extent in the 3190 * truncated case if we didn't write out the extent at all. 3191 * 3192 * If we made it past insert_reserved_file_extent before we 3193 * errored out then we don't need to do this as the accounting 3194 * has already been done. 3195 */ 3196 if ((ret || !logical_len) && 3197 clear_reserved_extent && 3198 !test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) && 3199 !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) 3200 btrfs_free_reserved_extent(fs_info, 3201 ordered_extent->start, 3202 ordered_extent->disk_len, 1); 3203 } 3204 3205 3206 /* 3207 * This needs to be done to make sure anybody waiting knows we are done 3208 * updating everything for this ordered extent. 3209 */ 3210 btrfs_remove_ordered_extent(inode, ordered_extent); 3211 3212 /* for snapshot-aware defrag */ 3213 if (new) { 3214 if (ret) { 3215 free_sa_defrag_extent(new); 3216 atomic_dec(&fs_info->defrag_running); 3217 } else { 3218 relink_file_extents(new); 3219 } 3220 } 3221 3222 /* once for us */ 3223 btrfs_put_ordered_extent(ordered_extent); 3224 /* once for the tree */ 3225 btrfs_put_ordered_extent(ordered_extent); 3226 3227 return ret; 3228 } 3229 3230 static void finish_ordered_fn(struct btrfs_work *work) 3231 { 3232 struct btrfs_ordered_extent *ordered_extent; 3233 ordered_extent = container_of(work, struct btrfs_ordered_extent, work); 3234 btrfs_finish_ordered_io(ordered_extent); 3235 } 3236 3237 void btrfs_writepage_endio_finish_ordered(struct page *page, u64 start, 3238 u64 end, int uptodate) 3239 { 3240 struct inode *inode = page->mapping->host; 3241 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 3242 struct btrfs_ordered_extent *ordered_extent = NULL; 3243 struct btrfs_workqueue *wq; 3244 btrfs_work_func_t func; 3245 3246 trace_btrfs_writepage_end_io_hook(page, start, end, uptodate); 3247 3248 ClearPagePrivate2(page); 3249 if (!btrfs_dec_test_ordered_pending(inode, &ordered_extent, start, 3250 end - start + 1, uptodate)) 3251 return; 3252 3253 if (btrfs_is_free_space_inode(BTRFS_I(inode))) { 3254 wq = fs_info->endio_freespace_worker; 3255 func = btrfs_freespace_write_helper; 3256 } else { 3257 wq = fs_info->endio_write_workers; 3258 func = btrfs_endio_write_helper; 3259 } 3260 3261 btrfs_init_work(&ordered_extent->work, func, finish_ordered_fn, NULL, 3262 NULL); 3263 btrfs_queue_work(wq, &ordered_extent->work); 3264 } 3265 3266 static int __readpage_endio_check(struct inode *inode, 3267 struct btrfs_io_bio *io_bio, 3268 int icsum, struct page *page, 3269 int pgoff, u64 start, size_t len) 3270 { 3271 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 3272 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); 3273 char *kaddr; 3274 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy); 3275 u8 *csum_expected; 3276 u8 csum[BTRFS_CSUM_SIZE]; 3277 3278 csum_expected = ((u8 *)io_bio->csum) + icsum * csum_size; 3279 3280 kaddr = kmap_atomic(page); 3281 shash->tfm = fs_info->csum_shash; 3282 3283 crypto_shash_init(shash); 3284 crypto_shash_update(shash, kaddr + pgoff, len); 3285 crypto_shash_final(shash, csum); 3286 3287 if (memcmp(csum, csum_expected, csum_size)) 3288 goto zeroit; 3289 3290 kunmap_atomic(kaddr); 3291 return 0; 3292 zeroit: 3293 btrfs_print_data_csum_error(BTRFS_I(inode), start, csum, csum_expected, 3294 io_bio->mirror_num); 3295 memset(kaddr + pgoff, 1, len); 3296 flush_dcache_page(page); 3297 kunmap_atomic(kaddr); 3298 return -EIO; 3299 } 3300 3301 /* 3302 * when reads are done, we need to check csums to verify the data is correct 3303 * if there's a match, we allow the bio to finish. If not, the code in 3304 * extent_io.c will try to find good copies for us. 3305 */ 3306 static int btrfs_readpage_end_io_hook(struct btrfs_io_bio *io_bio, 3307 u64 phy_offset, struct page *page, 3308 u64 start, u64 end, int mirror) 3309 { 3310 size_t offset = start - page_offset(page); 3311 struct inode *inode = page->mapping->host; 3312 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 3313 struct btrfs_root *root = BTRFS_I(inode)->root; 3314 3315 if (PageChecked(page)) { 3316 ClearPageChecked(page); 3317 return 0; 3318 } 3319 3320 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM) 3321 return 0; 3322 3323 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID && 3324 test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) { 3325 clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM); 3326 return 0; 3327 } 3328 3329 phy_offset >>= inode->i_sb->s_blocksize_bits; 3330 return __readpage_endio_check(inode, io_bio, phy_offset, page, offset, 3331 start, (size_t)(end - start + 1)); 3332 } 3333 3334 /* 3335 * btrfs_add_delayed_iput - perform a delayed iput on @inode 3336 * 3337 * @inode: The inode we want to perform iput on 3338 * 3339 * This function uses the generic vfs_inode::i_count to track whether we should 3340 * just decrement it (in case it's > 1) or if this is the last iput then link 3341 * the inode to the delayed iput machinery. Delayed iputs are processed at 3342 * transaction commit time/superblock commit/cleaner kthread. 3343 */ 3344 void btrfs_add_delayed_iput(struct inode *inode) 3345 { 3346 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 3347 struct btrfs_inode *binode = BTRFS_I(inode); 3348 3349 if (atomic_add_unless(&inode->i_count, -1, 1)) 3350 return; 3351 3352 atomic_inc(&fs_info->nr_delayed_iputs); 3353 spin_lock(&fs_info->delayed_iput_lock); 3354 ASSERT(list_empty(&binode->delayed_iput)); 3355 list_add_tail(&binode->delayed_iput, &fs_info->delayed_iputs); 3356 spin_unlock(&fs_info->delayed_iput_lock); 3357 if (!test_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags)) 3358 wake_up_process(fs_info->cleaner_kthread); 3359 } 3360 3361 static void run_delayed_iput_locked(struct btrfs_fs_info *fs_info, 3362 struct btrfs_inode *inode) 3363 { 3364 list_del_init(&inode->delayed_iput); 3365 spin_unlock(&fs_info->delayed_iput_lock); 3366 iput(&inode->vfs_inode); 3367 if (atomic_dec_and_test(&fs_info->nr_delayed_iputs)) 3368 wake_up(&fs_info->delayed_iputs_wait); 3369 spin_lock(&fs_info->delayed_iput_lock); 3370 } 3371 3372 static void btrfs_run_delayed_iput(struct btrfs_fs_info *fs_info, 3373 struct btrfs_inode *inode) 3374 { 3375 if (!list_empty(&inode->delayed_iput)) { 3376 spin_lock(&fs_info->delayed_iput_lock); 3377 if (!list_empty(&inode->delayed_iput)) 3378 run_delayed_iput_locked(fs_info, inode); 3379 spin_unlock(&fs_info->delayed_iput_lock); 3380 } 3381 } 3382 3383 void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info) 3384 { 3385 3386 spin_lock(&fs_info->delayed_iput_lock); 3387 while (!list_empty(&fs_info->delayed_iputs)) { 3388 struct btrfs_inode *inode; 3389 3390 inode = list_first_entry(&fs_info->delayed_iputs, 3391 struct btrfs_inode, delayed_iput); 3392 run_delayed_iput_locked(fs_info, inode); 3393 } 3394 spin_unlock(&fs_info->delayed_iput_lock); 3395 } 3396 3397 /** 3398 * btrfs_wait_on_delayed_iputs - wait on the delayed iputs to be done running 3399 * @fs_info - the fs_info for this fs 3400 * @return - EINTR if we were killed, 0 if nothing's pending 3401 * 3402 * This will wait on any delayed iputs that are currently running with KILLABLE 3403 * set. Once they are all done running we will return, unless we are killed in 3404 * which case we return EINTR. This helps in user operations like fallocate etc 3405 * that might get blocked on the iputs. 3406 */ 3407 int btrfs_wait_on_delayed_iputs(struct btrfs_fs_info *fs_info) 3408 { 3409 int ret = wait_event_killable(fs_info->delayed_iputs_wait, 3410 atomic_read(&fs_info->nr_delayed_iputs) == 0); 3411 if (ret) 3412 return -EINTR; 3413 return 0; 3414 } 3415 3416 /* 3417 * This creates an orphan entry for the given inode in case something goes wrong 3418 * in the middle of an unlink. 3419 */ 3420 int btrfs_orphan_add(struct btrfs_trans_handle *trans, 3421 struct btrfs_inode *inode) 3422 { 3423 int ret; 3424 3425 ret = btrfs_insert_orphan_item(trans, inode->root, btrfs_ino(inode)); 3426 if (ret && ret != -EEXIST) { 3427 btrfs_abort_transaction(trans, ret); 3428 return ret; 3429 } 3430 3431 return 0; 3432 } 3433 3434 /* 3435 * We have done the delete so we can go ahead and remove the orphan item for 3436 * this particular inode. 3437 */ 3438 static int btrfs_orphan_del(struct btrfs_trans_handle *trans, 3439 struct btrfs_inode *inode) 3440 { 3441 return btrfs_del_orphan_item(trans, inode->root, btrfs_ino(inode)); 3442 } 3443 3444 /* 3445 * this cleans up any orphans that may be left on the list from the last use 3446 * of this root. 3447 */ 3448 int btrfs_orphan_cleanup(struct btrfs_root *root) 3449 { 3450 struct btrfs_fs_info *fs_info = root->fs_info; 3451 struct btrfs_path *path; 3452 struct extent_buffer *leaf; 3453 struct btrfs_key key, found_key; 3454 struct btrfs_trans_handle *trans; 3455 struct inode *inode; 3456 u64 last_objectid = 0; 3457 int ret = 0, nr_unlink = 0; 3458 3459 if (cmpxchg(&root->orphan_cleanup_state, 0, ORPHAN_CLEANUP_STARTED)) 3460 return 0; 3461 3462 path = btrfs_alloc_path(); 3463 if (!path) { 3464 ret = -ENOMEM; 3465 goto out; 3466 } 3467 path->reada = READA_BACK; 3468 3469 key.objectid = BTRFS_ORPHAN_OBJECTID; 3470 key.type = BTRFS_ORPHAN_ITEM_KEY; 3471 key.offset = (u64)-1; 3472 3473 while (1) { 3474 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 3475 if (ret < 0) 3476 goto out; 3477 3478 /* 3479 * if ret == 0 means we found what we were searching for, which 3480 * is weird, but possible, so only screw with path if we didn't 3481 * find the key and see if we have stuff that matches 3482 */ 3483 if (ret > 0) { 3484 ret = 0; 3485 if (path->slots[0] == 0) 3486 break; 3487 path->slots[0]--; 3488 } 3489 3490 /* pull out the item */ 3491 leaf = path->nodes[0]; 3492 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 3493 3494 /* make sure the item matches what we want */ 3495 if (found_key.objectid != BTRFS_ORPHAN_OBJECTID) 3496 break; 3497 if (found_key.type != BTRFS_ORPHAN_ITEM_KEY) 3498 break; 3499 3500 /* release the path since we're done with it */ 3501 btrfs_release_path(path); 3502 3503 /* 3504 * this is where we are basically btrfs_lookup, without the 3505 * crossing root thing. we store the inode number in the 3506 * offset of the orphan item. 3507 */ 3508 3509 if (found_key.offset == last_objectid) { 3510 btrfs_err(fs_info, 3511 "Error removing orphan entry, stopping orphan cleanup"); 3512 ret = -EINVAL; 3513 goto out; 3514 } 3515 3516 last_objectid = found_key.offset; 3517 3518 found_key.objectid = found_key.offset; 3519 found_key.type = BTRFS_INODE_ITEM_KEY; 3520 found_key.offset = 0; 3521 inode = btrfs_iget(fs_info->sb, &found_key, root, NULL); 3522 ret = PTR_ERR_OR_ZERO(inode); 3523 if (ret && ret != -ENOENT) 3524 goto out; 3525 3526 if (ret == -ENOENT && root == fs_info->tree_root) { 3527 struct btrfs_root *dead_root; 3528 struct btrfs_fs_info *fs_info = root->fs_info; 3529 int is_dead_root = 0; 3530 3531 /* 3532 * this is an orphan in the tree root. Currently these 3533 * could come from 2 sources: 3534 * a) a snapshot deletion in progress 3535 * b) a free space cache inode 3536 * We need to distinguish those two, as the snapshot 3537 * orphan must not get deleted. 3538 * find_dead_roots already ran before us, so if this 3539 * is a snapshot deletion, we should find the root 3540 * in the dead_roots list 3541 */ 3542 spin_lock(&fs_info->trans_lock); 3543 list_for_each_entry(dead_root, &fs_info->dead_roots, 3544 root_list) { 3545 if (dead_root->root_key.objectid == 3546 found_key.objectid) { 3547 is_dead_root = 1; 3548 break; 3549 } 3550 } 3551 spin_unlock(&fs_info->trans_lock); 3552 if (is_dead_root) { 3553 /* prevent this orphan from being found again */ 3554 key.offset = found_key.objectid - 1; 3555 continue; 3556 } 3557 3558 } 3559 3560 /* 3561 * If we have an inode with links, there are a couple of 3562 * possibilities. Old kernels (before v3.12) used to create an 3563 * orphan item for truncate indicating that there were possibly 3564 * extent items past i_size that needed to be deleted. In v3.12, 3565 * truncate was changed to update i_size in sync with the extent 3566 * items, but the (useless) orphan item was still created. Since 3567 * v4.18, we don't create the orphan item for truncate at all. 3568 * 3569 * So, this item could mean that we need to do a truncate, but 3570 * only if this filesystem was last used on a pre-v3.12 kernel 3571 * and was not cleanly unmounted. The odds of that are quite 3572 * slim, and it's a pain to do the truncate now, so just delete 3573 * the orphan item. 3574 * 3575 * It's also possible that this orphan item was supposed to be 3576 * deleted but wasn't. The inode number may have been reused, 3577 * but either way, we can delete the orphan item. 3578 */ 3579 if (ret == -ENOENT || inode->i_nlink) { 3580 if (!ret) 3581 iput(inode); 3582 trans = btrfs_start_transaction(root, 1); 3583 if (IS_ERR(trans)) { 3584 ret = PTR_ERR(trans); 3585 goto out; 3586 } 3587 btrfs_debug(fs_info, "auto deleting %Lu", 3588 found_key.objectid); 3589 ret = btrfs_del_orphan_item(trans, root, 3590 found_key.objectid); 3591 btrfs_end_transaction(trans); 3592 if (ret) 3593 goto out; 3594 continue; 3595 } 3596 3597 nr_unlink++; 3598 3599 /* this will do delete_inode and everything for us */ 3600 iput(inode); 3601 } 3602 /* release the path since we're done with it */ 3603 btrfs_release_path(path); 3604 3605 root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE; 3606 3607 if (test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state)) { 3608 trans = btrfs_join_transaction(root); 3609 if (!IS_ERR(trans)) 3610 btrfs_end_transaction(trans); 3611 } 3612 3613 if (nr_unlink) 3614 btrfs_debug(fs_info, "unlinked %d orphans", nr_unlink); 3615 3616 out: 3617 if (ret) 3618 btrfs_err(fs_info, "could not do orphan cleanup %d", ret); 3619 btrfs_free_path(path); 3620 return ret; 3621 } 3622 3623 /* 3624 * very simple check to peek ahead in the leaf looking for xattrs. If we 3625 * don't find any xattrs, we know there can't be any acls. 3626 * 3627 * slot is the slot the inode is in, objectid is the objectid of the inode 3628 */ 3629 static noinline int acls_after_inode_item(struct extent_buffer *leaf, 3630 int slot, u64 objectid, 3631 int *first_xattr_slot) 3632 { 3633 u32 nritems = btrfs_header_nritems(leaf); 3634 struct btrfs_key found_key; 3635 static u64 xattr_access = 0; 3636 static u64 xattr_default = 0; 3637 int scanned = 0; 3638 3639 if (!xattr_access) { 3640 xattr_access = btrfs_name_hash(XATTR_NAME_POSIX_ACL_ACCESS, 3641 strlen(XATTR_NAME_POSIX_ACL_ACCESS)); 3642 xattr_default = btrfs_name_hash(XATTR_NAME_POSIX_ACL_DEFAULT, 3643 strlen(XATTR_NAME_POSIX_ACL_DEFAULT)); 3644 } 3645 3646 slot++; 3647 *first_xattr_slot = -1; 3648 while (slot < nritems) { 3649 btrfs_item_key_to_cpu(leaf, &found_key, slot); 3650 3651 /* we found a different objectid, there must not be acls */ 3652 if (found_key.objectid != objectid) 3653 return 0; 3654 3655 /* we found an xattr, assume we've got an acl */ 3656 if (found_key.type == BTRFS_XATTR_ITEM_KEY) { 3657 if (*first_xattr_slot == -1) 3658 *first_xattr_slot = slot; 3659 if (found_key.offset == xattr_access || 3660 found_key.offset == xattr_default) 3661 return 1; 3662 } 3663 3664 /* 3665 * we found a key greater than an xattr key, there can't 3666 * be any acls later on 3667 */ 3668 if (found_key.type > BTRFS_XATTR_ITEM_KEY) 3669 return 0; 3670 3671 slot++; 3672 scanned++; 3673 3674 /* 3675 * it goes inode, inode backrefs, xattrs, extents, 3676 * so if there are a ton of hard links to an inode there can 3677 * be a lot of backrefs. Don't waste time searching too hard, 3678 * this is just an optimization 3679 */ 3680 if (scanned >= 8) 3681 break; 3682 } 3683 /* we hit the end of the leaf before we found an xattr or 3684 * something larger than an xattr. We have to assume the inode 3685 * has acls 3686 */ 3687 if (*first_xattr_slot == -1) 3688 *first_xattr_slot = slot; 3689 return 1; 3690 } 3691 3692 /* 3693 * read an inode from the btree into the in-memory inode 3694 */ 3695 static int btrfs_read_locked_inode(struct inode *inode, 3696 struct btrfs_path *in_path) 3697 { 3698 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 3699 struct btrfs_path *path = in_path; 3700 struct extent_buffer *leaf; 3701 struct btrfs_inode_item *inode_item; 3702 struct btrfs_root *root = BTRFS_I(inode)->root; 3703 struct btrfs_key location; 3704 unsigned long ptr; 3705 int maybe_acls; 3706 u32 rdev; 3707 int ret; 3708 bool filled = false; 3709 int first_xattr_slot; 3710 3711 ret = btrfs_fill_inode(inode, &rdev); 3712 if (!ret) 3713 filled = true; 3714 3715 if (!path) { 3716 path = btrfs_alloc_path(); 3717 if (!path) 3718 return -ENOMEM; 3719 } 3720 3721 memcpy(&location, &BTRFS_I(inode)->location, sizeof(location)); 3722 3723 ret = btrfs_lookup_inode(NULL, root, path, &location, 0); 3724 if (ret) { 3725 if (path != in_path) 3726 btrfs_free_path(path); 3727 return ret; 3728 } 3729 3730 leaf = path->nodes[0]; 3731 3732 if (filled) 3733 goto cache_index; 3734 3735 inode_item = btrfs_item_ptr(leaf, path->slots[0], 3736 struct btrfs_inode_item); 3737 inode->i_mode = btrfs_inode_mode(leaf, inode_item); 3738 set_nlink(inode, btrfs_inode_nlink(leaf, inode_item)); 3739 i_uid_write(inode, btrfs_inode_uid(leaf, inode_item)); 3740 i_gid_write(inode, btrfs_inode_gid(leaf, inode_item)); 3741 btrfs_i_size_write(BTRFS_I(inode), btrfs_inode_size(leaf, inode_item)); 3742 3743 inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->atime); 3744 inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->atime); 3745 3746 inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->mtime); 3747 inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->mtime); 3748 3749 inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->ctime); 3750 inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->ctime); 3751 3752 BTRFS_I(inode)->i_otime.tv_sec = 3753 btrfs_timespec_sec(leaf, &inode_item->otime); 3754 BTRFS_I(inode)->i_otime.tv_nsec = 3755 btrfs_timespec_nsec(leaf, &inode_item->otime); 3756 3757 inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item)); 3758 BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item); 3759 BTRFS_I(inode)->last_trans = btrfs_inode_transid(leaf, inode_item); 3760 3761 inode_set_iversion_queried(inode, 3762 btrfs_inode_sequence(leaf, inode_item)); 3763 inode->i_generation = BTRFS_I(inode)->generation; 3764 inode->i_rdev = 0; 3765 rdev = btrfs_inode_rdev(leaf, inode_item); 3766 3767 BTRFS_I(inode)->index_cnt = (u64)-1; 3768 BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item); 3769 3770 cache_index: 3771 /* 3772 * If we were modified in the current generation and evicted from memory 3773 * and then re-read we need to do a full sync since we don't have any 3774 * idea about which extents were modified before we were evicted from 3775 * cache. 3776 * 3777 * This is required for both inode re-read from disk and delayed inode 3778 * in delayed_nodes_tree. 3779 */ 3780 if (BTRFS_I(inode)->last_trans == fs_info->generation) 3781 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 3782 &BTRFS_I(inode)->runtime_flags); 3783 3784 /* 3785 * We don't persist the id of the transaction where an unlink operation 3786 * against the inode was last made. So here we assume the inode might 3787 * have been evicted, and therefore the exact value of last_unlink_trans 3788 * lost, and set it to last_trans to avoid metadata inconsistencies 3789 * between the inode and its parent if the inode is fsync'ed and the log 3790 * replayed. For example, in the scenario: 3791 * 3792 * touch mydir/foo 3793 * ln mydir/foo mydir/bar 3794 * sync 3795 * unlink mydir/bar 3796 * echo 2 > /proc/sys/vm/drop_caches # evicts inode 3797 * xfs_io -c fsync mydir/foo 3798 * <power failure> 3799 * mount fs, triggers fsync log replay 3800 * 3801 * We must make sure that when we fsync our inode foo we also log its 3802 * parent inode, otherwise after log replay the parent still has the 3803 * dentry with the "bar" name but our inode foo has a link count of 1 3804 * and doesn't have an inode ref with the name "bar" anymore. 3805 * 3806 * Setting last_unlink_trans to last_trans is a pessimistic approach, 3807 * but it guarantees correctness at the expense of occasional full 3808 * transaction commits on fsync if our inode is a directory, or if our 3809 * inode is not a directory, logging its parent unnecessarily. 3810 */ 3811 BTRFS_I(inode)->last_unlink_trans = BTRFS_I(inode)->last_trans; 3812 3813 path->slots[0]++; 3814 if (inode->i_nlink != 1 || 3815 path->slots[0] >= btrfs_header_nritems(leaf)) 3816 goto cache_acl; 3817 3818 btrfs_item_key_to_cpu(leaf, &location, path->slots[0]); 3819 if (location.objectid != btrfs_ino(BTRFS_I(inode))) 3820 goto cache_acl; 3821 3822 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 3823 if (location.type == BTRFS_INODE_REF_KEY) { 3824 struct btrfs_inode_ref *ref; 3825 3826 ref = (struct btrfs_inode_ref *)ptr; 3827 BTRFS_I(inode)->dir_index = btrfs_inode_ref_index(leaf, ref); 3828 } else if (location.type == BTRFS_INODE_EXTREF_KEY) { 3829 struct btrfs_inode_extref *extref; 3830 3831 extref = (struct btrfs_inode_extref *)ptr; 3832 BTRFS_I(inode)->dir_index = btrfs_inode_extref_index(leaf, 3833 extref); 3834 } 3835 cache_acl: 3836 /* 3837 * try to precache a NULL acl entry for files that don't have 3838 * any xattrs or acls 3839 */ 3840 maybe_acls = acls_after_inode_item(leaf, path->slots[0], 3841 btrfs_ino(BTRFS_I(inode)), &first_xattr_slot); 3842 if (first_xattr_slot != -1) { 3843 path->slots[0] = first_xattr_slot; 3844 ret = btrfs_load_inode_props(inode, path); 3845 if (ret) 3846 btrfs_err(fs_info, 3847 "error loading props for ino %llu (root %llu): %d", 3848 btrfs_ino(BTRFS_I(inode)), 3849 root->root_key.objectid, ret); 3850 } 3851 if (path != in_path) 3852 btrfs_free_path(path); 3853 3854 if (!maybe_acls) 3855 cache_no_acl(inode); 3856 3857 switch (inode->i_mode & S_IFMT) { 3858 case S_IFREG: 3859 inode->i_mapping->a_ops = &btrfs_aops; 3860 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; 3861 inode->i_fop = &btrfs_file_operations; 3862 inode->i_op = &btrfs_file_inode_operations; 3863 break; 3864 case S_IFDIR: 3865 inode->i_fop = &btrfs_dir_file_operations; 3866 inode->i_op = &btrfs_dir_inode_operations; 3867 break; 3868 case S_IFLNK: 3869 inode->i_op = &btrfs_symlink_inode_operations; 3870 inode_nohighmem(inode); 3871 inode->i_mapping->a_ops = &btrfs_aops; 3872 break; 3873 default: 3874 inode->i_op = &btrfs_special_inode_operations; 3875 init_special_inode(inode, inode->i_mode, rdev); 3876 break; 3877 } 3878 3879 btrfs_sync_inode_flags_to_i_flags(inode); 3880 return 0; 3881 } 3882 3883 /* 3884 * given a leaf and an inode, copy the inode fields into the leaf 3885 */ 3886 static void fill_inode_item(struct btrfs_trans_handle *trans, 3887 struct extent_buffer *leaf, 3888 struct btrfs_inode_item *item, 3889 struct inode *inode) 3890 { 3891 struct btrfs_map_token token; 3892 3893 btrfs_init_map_token(&token, leaf); 3894 3895 btrfs_set_token_inode_uid(leaf, item, i_uid_read(inode), &token); 3896 btrfs_set_token_inode_gid(leaf, item, i_gid_read(inode), &token); 3897 btrfs_set_token_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size, 3898 &token); 3899 btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token); 3900 btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token); 3901 3902 btrfs_set_token_timespec_sec(leaf, &item->atime, 3903 inode->i_atime.tv_sec, &token); 3904 btrfs_set_token_timespec_nsec(leaf, &item->atime, 3905 inode->i_atime.tv_nsec, &token); 3906 3907 btrfs_set_token_timespec_sec(leaf, &item->mtime, 3908 inode->i_mtime.tv_sec, &token); 3909 btrfs_set_token_timespec_nsec(leaf, &item->mtime, 3910 inode->i_mtime.tv_nsec, &token); 3911 3912 btrfs_set_token_timespec_sec(leaf, &item->ctime, 3913 inode->i_ctime.tv_sec, &token); 3914 btrfs_set_token_timespec_nsec(leaf, &item->ctime, 3915 inode->i_ctime.tv_nsec, &token); 3916 3917 btrfs_set_token_timespec_sec(leaf, &item->otime, 3918 BTRFS_I(inode)->i_otime.tv_sec, &token); 3919 btrfs_set_token_timespec_nsec(leaf, &item->otime, 3920 BTRFS_I(inode)->i_otime.tv_nsec, &token); 3921 3922 btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode), 3923 &token); 3924 btrfs_set_token_inode_generation(leaf, item, BTRFS_I(inode)->generation, 3925 &token); 3926 btrfs_set_token_inode_sequence(leaf, item, inode_peek_iversion(inode), 3927 &token); 3928 btrfs_set_token_inode_transid(leaf, item, trans->transid, &token); 3929 btrfs_set_token_inode_rdev(leaf, item, inode->i_rdev, &token); 3930 btrfs_set_token_inode_flags(leaf, item, BTRFS_I(inode)->flags, &token); 3931 btrfs_set_token_inode_block_group(leaf, item, 0, &token); 3932 } 3933 3934 /* 3935 * copy everything in the in-memory inode into the btree. 3936 */ 3937 static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans, 3938 struct btrfs_root *root, struct inode *inode) 3939 { 3940 struct btrfs_inode_item *inode_item; 3941 struct btrfs_path *path; 3942 struct extent_buffer *leaf; 3943 int ret; 3944 3945 path = btrfs_alloc_path(); 3946 if (!path) 3947 return -ENOMEM; 3948 3949 path->leave_spinning = 1; 3950 ret = btrfs_lookup_inode(trans, root, path, &BTRFS_I(inode)->location, 3951 1); 3952 if (ret) { 3953 if (ret > 0) 3954 ret = -ENOENT; 3955 goto failed; 3956 } 3957 3958 leaf = path->nodes[0]; 3959 inode_item = btrfs_item_ptr(leaf, path->slots[0], 3960 struct btrfs_inode_item); 3961 3962 fill_inode_item(trans, leaf, inode_item, inode); 3963 btrfs_mark_buffer_dirty(leaf); 3964 btrfs_set_inode_last_trans(trans, inode); 3965 ret = 0; 3966 failed: 3967 btrfs_free_path(path); 3968 return ret; 3969 } 3970 3971 /* 3972 * copy everything in the in-memory inode into the btree. 3973 */ 3974 noinline int btrfs_update_inode(struct btrfs_trans_handle *trans, 3975 struct btrfs_root *root, struct inode *inode) 3976 { 3977 struct btrfs_fs_info *fs_info = root->fs_info; 3978 int ret; 3979 3980 /* 3981 * If the inode is a free space inode, we can deadlock during commit 3982 * if we put it into the delayed code. 3983 * 3984 * The data relocation inode should also be directly updated 3985 * without delay 3986 */ 3987 if (!btrfs_is_free_space_inode(BTRFS_I(inode)) 3988 && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID 3989 && !test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) { 3990 btrfs_update_root_times(trans, root); 3991 3992 ret = btrfs_delayed_update_inode(trans, root, inode); 3993 if (!ret) 3994 btrfs_set_inode_last_trans(trans, inode); 3995 return ret; 3996 } 3997 3998 return btrfs_update_inode_item(trans, root, inode); 3999 } 4000 4001 noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans, 4002 struct btrfs_root *root, 4003 struct inode *inode) 4004 { 4005 int ret; 4006 4007 ret = btrfs_update_inode(trans, root, inode); 4008 if (ret == -ENOSPC) 4009 return btrfs_update_inode_item(trans, root, inode); 4010 return ret; 4011 } 4012 4013 /* 4014 * unlink helper that gets used here in inode.c and in the tree logging 4015 * recovery code. It remove a link in a directory with a given name, and 4016 * also drops the back refs in the inode to the directory 4017 */ 4018 static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans, 4019 struct btrfs_root *root, 4020 struct btrfs_inode *dir, 4021 struct btrfs_inode *inode, 4022 const char *name, int name_len) 4023 { 4024 struct btrfs_fs_info *fs_info = root->fs_info; 4025 struct btrfs_path *path; 4026 int ret = 0; 4027 struct btrfs_dir_item *di; 4028 u64 index; 4029 u64 ino = btrfs_ino(inode); 4030 u64 dir_ino = btrfs_ino(dir); 4031 4032 path = btrfs_alloc_path(); 4033 if (!path) { 4034 ret = -ENOMEM; 4035 goto out; 4036 } 4037 4038 path->leave_spinning = 1; 4039 di = btrfs_lookup_dir_item(trans, root, path, dir_ino, 4040 name, name_len, -1); 4041 if (IS_ERR_OR_NULL(di)) { 4042 ret = di ? PTR_ERR(di) : -ENOENT; 4043 goto err; 4044 } 4045 ret = btrfs_delete_one_dir_name(trans, root, path, di); 4046 if (ret) 4047 goto err; 4048 btrfs_release_path(path); 4049 4050 /* 4051 * If we don't have dir index, we have to get it by looking up 4052 * the inode ref, since we get the inode ref, remove it directly, 4053 * it is unnecessary to do delayed deletion. 4054 * 4055 * But if we have dir index, needn't search inode ref to get it. 4056 * Since the inode ref is close to the inode item, it is better 4057 * that we delay to delete it, and just do this deletion when 4058 * we update the inode item. 4059 */ 4060 if (inode->dir_index) { 4061 ret = btrfs_delayed_delete_inode_ref(inode); 4062 if (!ret) { 4063 index = inode->dir_index; 4064 goto skip_backref; 4065 } 4066 } 4067 4068 ret = btrfs_del_inode_ref(trans, root, name, name_len, ino, 4069 dir_ino, &index); 4070 if (ret) { 4071 btrfs_info(fs_info, 4072 "failed to delete reference to %.*s, inode %llu parent %llu", 4073 name_len, name, ino, dir_ino); 4074 btrfs_abort_transaction(trans, ret); 4075 goto err; 4076 } 4077 skip_backref: 4078 ret = btrfs_delete_delayed_dir_index(trans, dir, index); 4079 if (ret) { 4080 btrfs_abort_transaction(trans, ret); 4081 goto err; 4082 } 4083 4084 ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len, inode, 4085 dir_ino); 4086 if (ret != 0 && ret != -ENOENT) { 4087 btrfs_abort_transaction(trans, ret); 4088 goto err; 4089 } 4090 4091 ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len, dir, 4092 index); 4093 if (ret == -ENOENT) 4094 ret = 0; 4095 else if (ret) 4096 btrfs_abort_transaction(trans, ret); 4097 4098 /* 4099 * If we have a pending delayed iput we could end up with the final iput 4100 * being run in btrfs-cleaner context. If we have enough of these built 4101 * up we can end up burning a lot of time in btrfs-cleaner without any 4102 * way to throttle the unlinks. Since we're currently holding a ref on 4103 * the inode we can run the delayed iput here without any issues as the 4104 * final iput won't be done until after we drop the ref we're currently 4105 * holding. 4106 */ 4107 btrfs_run_delayed_iput(fs_info, inode); 4108 err: 4109 btrfs_free_path(path); 4110 if (ret) 4111 goto out; 4112 4113 btrfs_i_size_write(dir, dir->vfs_inode.i_size - name_len * 2); 4114 inode_inc_iversion(&inode->vfs_inode); 4115 inode_inc_iversion(&dir->vfs_inode); 4116 inode->vfs_inode.i_ctime = dir->vfs_inode.i_mtime = 4117 dir->vfs_inode.i_ctime = current_time(&inode->vfs_inode); 4118 ret = btrfs_update_inode(trans, root, &dir->vfs_inode); 4119 out: 4120 return ret; 4121 } 4122 4123 int btrfs_unlink_inode(struct btrfs_trans_handle *trans, 4124 struct btrfs_root *root, 4125 struct btrfs_inode *dir, struct btrfs_inode *inode, 4126 const char *name, int name_len) 4127 { 4128 int ret; 4129 ret = __btrfs_unlink_inode(trans, root, dir, inode, name, name_len); 4130 if (!ret) { 4131 drop_nlink(&inode->vfs_inode); 4132 ret = btrfs_update_inode(trans, root, &inode->vfs_inode); 4133 } 4134 return ret; 4135 } 4136 4137 /* 4138 * helper to start transaction for unlink and rmdir. 4139 * 4140 * unlink and rmdir are special in btrfs, they do not always free space, so 4141 * if we cannot make our reservations the normal way try and see if there is 4142 * plenty of slack room in the global reserve to migrate, otherwise we cannot 4143 * allow the unlink to occur. 4144 */ 4145 static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir) 4146 { 4147 struct btrfs_root *root = BTRFS_I(dir)->root; 4148 4149 /* 4150 * 1 for the possible orphan item 4151 * 1 for the dir item 4152 * 1 for the dir index 4153 * 1 for the inode ref 4154 * 1 for the inode 4155 */ 4156 return btrfs_start_transaction_fallback_global_rsv(root, 5, 5); 4157 } 4158 4159 static int btrfs_unlink(struct inode *dir, struct dentry *dentry) 4160 { 4161 struct btrfs_root *root = BTRFS_I(dir)->root; 4162 struct btrfs_trans_handle *trans; 4163 struct inode *inode = d_inode(dentry); 4164 int ret; 4165 4166 trans = __unlink_start_trans(dir); 4167 if (IS_ERR(trans)) 4168 return PTR_ERR(trans); 4169 4170 btrfs_record_unlink_dir(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)), 4171 0); 4172 4173 ret = btrfs_unlink_inode(trans, root, BTRFS_I(dir), 4174 BTRFS_I(d_inode(dentry)), dentry->d_name.name, 4175 dentry->d_name.len); 4176 if (ret) 4177 goto out; 4178 4179 if (inode->i_nlink == 0) { 4180 ret = btrfs_orphan_add(trans, BTRFS_I(inode)); 4181 if (ret) 4182 goto out; 4183 } 4184 4185 out: 4186 btrfs_end_transaction(trans); 4187 btrfs_btree_balance_dirty(root->fs_info); 4188 return ret; 4189 } 4190 4191 static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, 4192 struct inode *dir, u64 objectid, 4193 const char *name, int name_len) 4194 { 4195 struct btrfs_root *root = BTRFS_I(dir)->root; 4196 struct btrfs_path *path; 4197 struct extent_buffer *leaf; 4198 struct btrfs_dir_item *di; 4199 struct btrfs_key key; 4200 u64 index; 4201 int ret; 4202 u64 dir_ino = btrfs_ino(BTRFS_I(dir)); 4203 4204 path = btrfs_alloc_path(); 4205 if (!path) 4206 return -ENOMEM; 4207 4208 di = btrfs_lookup_dir_item(trans, root, path, dir_ino, 4209 name, name_len, -1); 4210 if (IS_ERR_OR_NULL(di)) { 4211 ret = di ? PTR_ERR(di) : -ENOENT; 4212 goto out; 4213 } 4214 4215 leaf = path->nodes[0]; 4216 btrfs_dir_item_key_to_cpu(leaf, di, &key); 4217 WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid); 4218 ret = btrfs_delete_one_dir_name(trans, root, path, di); 4219 if (ret) { 4220 btrfs_abort_transaction(trans, ret); 4221 goto out; 4222 } 4223 btrfs_release_path(path); 4224 4225 ret = btrfs_del_root_ref(trans, objectid, root->root_key.objectid, 4226 dir_ino, &index, name, name_len); 4227 if (ret < 0) { 4228 if (ret != -ENOENT) { 4229 btrfs_abort_transaction(trans, ret); 4230 goto out; 4231 } 4232 di = btrfs_search_dir_index_item(root, path, dir_ino, 4233 name, name_len); 4234 if (IS_ERR_OR_NULL(di)) { 4235 if (!di) 4236 ret = -ENOENT; 4237 else 4238 ret = PTR_ERR(di); 4239 btrfs_abort_transaction(trans, ret); 4240 goto out; 4241 } 4242 4243 leaf = path->nodes[0]; 4244 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 4245 index = key.offset; 4246 } 4247 btrfs_release_path(path); 4248 4249 ret = btrfs_delete_delayed_dir_index(trans, BTRFS_I(dir), index); 4250 if (ret) { 4251 btrfs_abort_transaction(trans, ret); 4252 goto out; 4253 } 4254 4255 btrfs_i_size_write(BTRFS_I(dir), dir->i_size - name_len * 2); 4256 inode_inc_iversion(dir); 4257 dir->i_mtime = dir->i_ctime = current_time(dir); 4258 ret = btrfs_update_inode_fallback(trans, root, dir); 4259 if (ret) 4260 btrfs_abort_transaction(trans, ret); 4261 out: 4262 btrfs_free_path(path); 4263 return ret; 4264 } 4265 4266 /* 4267 * Helper to check if the subvolume references other subvolumes or if it's 4268 * default. 4269 */ 4270 static noinline int may_destroy_subvol(struct btrfs_root *root) 4271 { 4272 struct btrfs_fs_info *fs_info = root->fs_info; 4273 struct btrfs_path *path; 4274 struct btrfs_dir_item *di; 4275 struct btrfs_key key; 4276 u64 dir_id; 4277 int ret; 4278 4279 path = btrfs_alloc_path(); 4280 if (!path) 4281 return -ENOMEM; 4282 4283 /* Make sure this root isn't set as the default subvol */ 4284 dir_id = btrfs_super_root_dir(fs_info->super_copy); 4285 di = btrfs_lookup_dir_item(NULL, fs_info->tree_root, path, 4286 dir_id, "default", 7, 0); 4287 if (di && !IS_ERR(di)) { 4288 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key); 4289 if (key.objectid == root->root_key.objectid) { 4290 ret = -EPERM; 4291 btrfs_err(fs_info, 4292 "deleting default subvolume %llu is not allowed", 4293 key.objectid); 4294 goto out; 4295 } 4296 btrfs_release_path(path); 4297 } 4298 4299 key.objectid = root->root_key.objectid; 4300 key.type = BTRFS_ROOT_REF_KEY; 4301 key.offset = (u64)-1; 4302 4303 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); 4304 if (ret < 0) 4305 goto out; 4306 BUG_ON(ret == 0); 4307 4308 ret = 0; 4309 if (path->slots[0] > 0) { 4310 path->slots[0]--; 4311 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 4312 if (key.objectid == root->root_key.objectid && 4313 key.type == BTRFS_ROOT_REF_KEY) 4314 ret = -ENOTEMPTY; 4315 } 4316 out: 4317 btrfs_free_path(path); 4318 return ret; 4319 } 4320 4321 /* Delete all dentries for inodes belonging to the root */ 4322 static void btrfs_prune_dentries(struct btrfs_root *root) 4323 { 4324 struct btrfs_fs_info *fs_info = root->fs_info; 4325 struct rb_node *node; 4326 struct rb_node *prev; 4327 struct btrfs_inode *entry; 4328 struct inode *inode; 4329 u64 objectid = 0; 4330 4331 if (!test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) 4332 WARN_ON(btrfs_root_refs(&root->root_item) != 0); 4333 4334 spin_lock(&root->inode_lock); 4335 again: 4336 node = root->inode_tree.rb_node; 4337 prev = NULL; 4338 while (node) { 4339 prev = node; 4340 entry = rb_entry(node, struct btrfs_inode, rb_node); 4341 4342 if (objectid < btrfs_ino(entry)) 4343 node = node->rb_left; 4344 else if (objectid > btrfs_ino(entry)) 4345 node = node->rb_right; 4346 else 4347 break; 4348 } 4349 if (!node) { 4350 while (prev) { 4351 entry = rb_entry(prev, struct btrfs_inode, rb_node); 4352 if (objectid <= btrfs_ino(entry)) { 4353 node = prev; 4354 break; 4355 } 4356 prev = rb_next(prev); 4357 } 4358 } 4359 while (node) { 4360 entry = rb_entry(node, struct btrfs_inode, rb_node); 4361 objectid = btrfs_ino(entry) + 1; 4362 inode = igrab(&entry->vfs_inode); 4363 if (inode) { 4364 spin_unlock(&root->inode_lock); 4365 if (atomic_read(&inode->i_count) > 1) 4366 d_prune_aliases(inode); 4367 /* 4368 * btrfs_drop_inode will have it removed from the inode 4369 * cache when its usage count hits zero. 4370 */ 4371 iput(inode); 4372 cond_resched(); 4373 spin_lock(&root->inode_lock); 4374 goto again; 4375 } 4376 4377 if (cond_resched_lock(&root->inode_lock)) 4378 goto again; 4379 4380 node = rb_next(node); 4381 } 4382 spin_unlock(&root->inode_lock); 4383 } 4384 4385 int btrfs_delete_subvolume(struct inode *dir, struct dentry *dentry) 4386 { 4387 struct btrfs_fs_info *fs_info = btrfs_sb(dentry->d_sb); 4388 struct btrfs_root *root = BTRFS_I(dir)->root; 4389 struct inode *inode = d_inode(dentry); 4390 struct btrfs_root *dest = BTRFS_I(inode)->root; 4391 struct btrfs_trans_handle *trans; 4392 struct btrfs_block_rsv block_rsv; 4393 u64 root_flags; 4394 int ret; 4395 int err; 4396 4397 /* 4398 * Don't allow to delete a subvolume with send in progress. This is 4399 * inside the inode lock so the error handling that has to drop the bit 4400 * again is not run concurrently. 4401 */ 4402 spin_lock(&dest->root_item_lock); 4403 if (dest->send_in_progress) { 4404 spin_unlock(&dest->root_item_lock); 4405 btrfs_warn(fs_info, 4406 "attempt to delete subvolume %llu during send", 4407 dest->root_key.objectid); 4408 return -EPERM; 4409 } 4410 root_flags = btrfs_root_flags(&dest->root_item); 4411 btrfs_set_root_flags(&dest->root_item, 4412 root_flags | BTRFS_ROOT_SUBVOL_DEAD); 4413 spin_unlock(&dest->root_item_lock); 4414 4415 down_write(&fs_info->subvol_sem); 4416 4417 err = may_destroy_subvol(dest); 4418 if (err) 4419 goto out_up_write; 4420 4421 btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP); 4422 /* 4423 * One for dir inode, 4424 * two for dir entries, 4425 * two for root ref/backref. 4426 */ 4427 err = btrfs_subvolume_reserve_metadata(root, &block_rsv, 5, true); 4428 if (err) 4429 goto out_up_write; 4430 4431 trans = btrfs_start_transaction(root, 0); 4432 if (IS_ERR(trans)) { 4433 err = PTR_ERR(trans); 4434 goto out_release; 4435 } 4436 trans->block_rsv = &block_rsv; 4437 trans->bytes_reserved = block_rsv.size; 4438 4439 btrfs_record_snapshot_destroy(trans, BTRFS_I(dir)); 4440 4441 ret = btrfs_unlink_subvol(trans, dir, dest->root_key.objectid, 4442 dentry->d_name.name, dentry->d_name.len); 4443 if (ret) { 4444 err = ret; 4445 btrfs_abort_transaction(trans, ret); 4446 goto out_end_trans; 4447 } 4448 4449 btrfs_record_root_in_trans(trans, dest); 4450 4451 memset(&dest->root_item.drop_progress, 0, 4452 sizeof(dest->root_item.drop_progress)); 4453 dest->root_item.drop_level = 0; 4454 btrfs_set_root_refs(&dest->root_item, 0); 4455 4456 if (!test_and_set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &dest->state)) { 4457 ret = btrfs_insert_orphan_item(trans, 4458 fs_info->tree_root, 4459 dest->root_key.objectid); 4460 if (ret) { 4461 btrfs_abort_transaction(trans, ret); 4462 err = ret; 4463 goto out_end_trans; 4464 } 4465 } 4466 4467 ret = btrfs_uuid_tree_remove(trans, dest->root_item.uuid, 4468 BTRFS_UUID_KEY_SUBVOL, 4469 dest->root_key.objectid); 4470 if (ret && ret != -ENOENT) { 4471 btrfs_abort_transaction(trans, ret); 4472 err = ret; 4473 goto out_end_trans; 4474 } 4475 if (!btrfs_is_empty_uuid(dest->root_item.received_uuid)) { 4476 ret = btrfs_uuid_tree_remove(trans, 4477 dest->root_item.received_uuid, 4478 BTRFS_UUID_KEY_RECEIVED_SUBVOL, 4479 dest->root_key.objectid); 4480 if (ret && ret != -ENOENT) { 4481 btrfs_abort_transaction(trans, ret); 4482 err = ret; 4483 goto out_end_trans; 4484 } 4485 } 4486 4487 out_end_trans: 4488 trans->block_rsv = NULL; 4489 trans->bytes_reserved = 0; 4490 ret = btrfs_end_transaction(trans); 4491 if (ret && !err) 4492 err = ret; 4493 inode->i_flags |= S_DEAD; 4494 out_release: 4495 btrfs_subvolume_release_metadata(fs_info, &block_rsv); 4496 out_up_write: 4497 up_write(&fs_info->subvol_sem); 4498 if (err) { 4499 spin_lock(&dest->root_item_lock); 4500 root_flags = btrfs_root_flags(&dest->root_item); 4501 btrfs_set_root_flags(&dest->root_item, 4502 root_flags & ~BTRFS_ROOT_SUBVOL_DEAD); 4503 spin_unlock(&dest->root_item_lock); 4504 } else { 4505 d_invalidate(dentry); 4506 btrfs_prune_dentries(dest); 4507 ASSERT(dest->send_in_progress == 0); 4508 4509 /* the last ref */ 4510 if (dest->ino_cache_inode) { 4511 iput(dest->ino_cache_inode); 4512 dest->ino_cache_inode = NULL; 4513 } 4514 } 4515 4516 return err; 4517 } 4518 4519 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry) 4520 { 4521 struct inode *inode = d_inode(dentry); 4522 int err = 0; 4523 struct btrfs_root *root = BTRFS_I(dir)->root; 4524 struct btrfs_trans_handle *trans; 4525 u64 last_unlink_trans; 4526 4527 if (inode->i_size > BTRFS_EMPTY_DIR_SIZE) 4528 return -ENOTEMPTY; 4529 if (btrfs_ino(BTRFS_I(inode)) == BTRFS_FIRST_FREE_OBJECTID) 4530 return btrfs_delete_subvolume(dir, dentry); 4531 4532 trans = __unlink_start_trans(dir); 4533 if (IS_ERR(trans)) 4534 return PTR_ERR(trans); 4535 4536 if (unlikely(btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { 4537 err = btrfs_unlink_subvol(trans, dir, 4538 BTRFS_I(inode)->location.objectid, 4539 dentry->d_name.name, 4540 dentry->d_name.len); 4541 goto out; 4542 } 4543 4544 err = btrfs_orphan_add(trans, BTRFS_I(inode)); 4545 if (err) 4546 goto out; 4547 4548 last_unlink_trans = BTRFS_I(inode)->last_unlink_trans; 4549 4550 /* now the directory is empty */ 4551 err = btrfs_unlink_inode(trans, root, BTRFS_I(dir), 4552 BTRFS_I(d_inode(dentry)), dentry->d_name.name, 4553 dentry->d_name.len); 4554 if (!err) { 4555 btrfs_i_size_write(BTRFS_I(inode), 0); 4556 /* 4557 * Propagate the last_unlink_trans value of the deleted dir to 4558 * its parent directory. This is to prevent an unrecoverable 4559 * log tree in the case we do something like this: 4560 * 1) create dir foo 4561 * 2) create snapshot under dir foo 4562 * 3) delete the snapshot 4563 * 4) rmdir foo 4564 * 5) mkdir foo 4565 * 6) fsync foo or some file inside foo 4566 */ 4567 if (last_unlink_trans >= trans->transid) 4568 BTRFS_I(dir)->last_unlink_trans = last_unlink_trans; 4569 } 4570 out: 4571 btrfs_end_transaction(trans); 4572 btrfs_btree_balance_dirty(root->fs_info); 4573 4574 return err; 4575 } 4576 4577 /* 4578 * Return this if we need to call truncate_block for the last bit of the 4579 * truncate. 4580 */ 4581 #define NEED_TRUNCATE_BLOCK 1 4582 4583 /* 4584 * this can truncate away extent items, csum items and directory items. 4585 * It starts at a high offset and removes keys until it can't find 4586 * any higher than new_size 4587 * 4588 * csum items that cross the new i_size are truncated to the new size 4589 * as well. 4590 * 4591 * min_type is the minimum key type to truncate down to. If set to 0, this 4592 * will kill all the items on this inode, including the INODE_ITEM_KEY. 4593 */ 4594 int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, 4595 struct btrfs_root *root, 4596 struct inode *inode, 4597 u64 new_size, u32 min_type) 4598 { 4599 struct btrfs_fs_info *fs_info = root->fs_info; 4600 struct btrfs_path *path; 4601 struct extent_buffer *leaf; 4602 struct btrfs_file_extent_item *fi; 4603 struct btrfs_key key; 4604 struct btrfs_key found_key; 4605 u64 extent_start = 0; 4606 u64 extent_num_bytes = 0; 4607 u64 extent_offset = 0; 4608 u64 item_end = 0; 4609 u64 last_size = new_size; 4610 u32 found_type = (u8)-1; 4611 int found_extent; 4612 int del_item; 4613 int pending_del_nr = 0; 4614 int pending_del_slot = 0; 4615 int extent_type = -1; 4616 int ret; 4617 u64 ino = btrfs_ino(BTRFS_I(inode)); 4618 u64 bytes_deleted = 0; 4619 bool be_nice = false; 4620 bool should_throttle = false; 4621 4622 BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY); 4623 4624 /* 4625 * for non-free space inodes and ref cows, we want to back off from 4626 * time to time 4627 */ 4628 if (!btrfs_is_free_space_inode(BTRFS_I(inode)) && 4629 test_bit(BTRFS_ROOT_REF_COWS, &root->state)) 4630 be_nice = true; 4631 4632 path = btrfs_alloc_path(); 4633 if (!path) 4634 return -ENOMEM; 4635 path->reada = READA_BACK; 4636 4637 /* 4638 * We want to drop from the next block forward in case this new size is 4639 * not block aligned since we will be keeping the last block of the 4640 * extent just the way it is. 4641 */ 4642 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) || 4643 root == fs_info->tree_root) 4644 btrfs_drop_extent_cache(BTRFS_I(inode), ALIGN(new_size, 4645 fs_info->sectorsize), 4646 (u64)-1, 0); 4647 4648 /* 4649 * This function is also used to drop the items in the log tree before 4650 * we relog the inode, so if root != BTRFS_I(inode)->root, it means 4651 * it is used to drop the logged items. So we shouldn't kill the delayed 4652 * items. 4653 */ 4654 if (min_type == 0 && root == BTRFS_I(inode)->root) 4655 btrfs_kill_delayed_inode_items(BTRFS_I(inode)); 4656 4657 key.objectid = ino; 4658 key.offset = (u64)-1; 4659 key.type = (u8)-1; 4660 4661 search_again: 4662 /* 4663 * with a 16K leaf size and 128MB extents, you can actually queue 4664 * up a huge file in a single leaf. Most of the time that 4665 * bytes_deleted is > 0, it will be huge by the time we get here 4666 */ 4667 if (be_nice && bytes_deleted > SZ_32M && 4668 btrfs_should_end_transaction(trans)) { 4669 ret = -EAGAIN; 4670 goto out; 4671 } 4672 4673 path->leave_spinning = 1; 4674 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 4675 if (ret < 0) 4676 goto out; 4677 4678 if (ret > 0) { 4679 ret = 0; 4680 /* there are no items in the tree for us to truncate, we're 4681 * done 4682 */ 4683 if (path->slots[0] == 0) 4684 goto out; 4685 path->slots[0]--; 4686 } 4687 4688 while (1) { 4689 fi = NULL; 4690 leaf = path->nodes[0]; 4691 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 4692 found_type = found_key.type; 4693 4694 if (found_key.objectid != ino) 4695 break; 4696 4697 if (found_type < min_type) 4698 break; 4699 4700 item_end = found_key.offset; 4701 if (found_type == BTRFS_EXTENT_DATA_KEY) { 4702 fi = btrfs_item_ptr(leaf, path->slots[0], 4703 struct btrfs_file_extent_item); 4704 extent_type = btrfs_file_extent_type(leaf, fi); 4705 if (extent_type != BTRFS_FILE_EXTENT_INLINE) { 4706 item_end += 4707 btrfs_file_extent_num_bytes(leaf, fi); 4708 4709 trace_btrfs_truncate_show_fi_regular( 4710 BTRFS_I(inode), leaf, fi, 4711 found_key.offset); 4712 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 4713 item_end += btrfs_file_extent_ram_bytes(leaf, 4714 fi); 4715 4716 trace_btrfs_truncate_show_fi_inline( 4717 BTRFS_I(inode), leaf, fi, path->slots[0], 4718 found_key.offset); 4719 } 4720 item_end--; 4721 } 4722 if (found_type > min_type) { 4723 del_item = 1; 4724 } else { 4725 if (item_end < new_size) 4726 break; 4727 if (found_key.offset >= new_size) 4728 del_item = 1; 4729 else 4730 del_item = 0; 4731 } 4732 found_extent = 0; 4733 /* FIXME, shrink the extent if the ref count is only 1 */ 4734 if (found_type != BTRFS_EXTENT_DATA_KEY) 4735 goto delete; 4736 4737 if (extent_type != BTRFS_FILE_EXTENT_INLINE) { 4738 u64 num_dec; 4739 extent_start = btrfs_file_extent_disk_bytenr(leaf, fi); 4740 if (!del_item) { 4741 u64 orig_num_bytes = 4742 btrfs_file_extent_num_bytes(leaf, fi); 4743 extent_num_bytes = ALIGN(new_size - 4744 found_key.offset, 4745 fs_info->sectorsize); 4746 btrfs_set_file_extent_num_bytes(leaf, fi, 4747 extent_num_bytes); 4748 num_dec = (orig_num_bytes - 4749 extent_num_bytes); 4750 if (test_bit(BTRFS_ROOT_REF_COWS, 4751 &root->state) && 4752 extent_start != 0) 4753 inode_sub_bytes(inode, num_dec); 4754 btrfs_mark_buffer_dirty(leaf); 4755 } else { 4756 extent_num_bytes = 4757 btrfs_file_extent_disk_num_bytes(leaf, 4758 fi); 4759 extent_offset = found_key.offset - 4760 btrfs_file_extent_offset(leaf, fi); 4761 4762 /* FIXME blocksize != 4096 */ 4763 num_dec = btrfs_file_extent_num_bytes(leaf, fi); 4764 if (extent_start != 0) { 4765 found_extent = 1; 4766 if (test_bit(BTRFS_ROOT_REF_COWS, 4767 &root->state)) 4768 inode_sub_bytes(inode, num_dec); 4769 } 4770 } 4771 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 4772 /* 4773 * we can't truncate inline items that have had 4774 * special encodings 4775 */ 4776 if (!del_item && 4777 btrfs_file_extent_encryption(leaf, fi) == 0 && 4778 btrfs_file_extent_other_encoding(leaf, fi) == 0 && 4779 btrfs_file_extent_compression(leaf, fi) == 0) { 4780 u32 size = (u32)(new_size - found_key.offset); 4781 4782 btrfs_set_file_extent_ram_bytes(leaf, fi, size); 4783 size = btrfs_file_extent_calc_inline_size(size); 4784 btrfs_truncate_item(path, size, 1); 4785 } else if (!del_item) { 4786 /* 4787 * We have to bail so the last_size is set to 4788 * just before this extent. 4789 */ 4790 ret = NEED_TRUNCATE_BLOCK; 4791 break; 4792 } 4793 4794 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state)) 4795 inode_sub_bytes(inode, item_end + 1 - new_size); 4796 } 4797 delete: 4798 if (del_item) 4799 last_size = found_key.offset; 4800 else 4801 last_size = new_size; 4802 if (del_item) { 4803 if (!pending_del_nr) { 4804 /* no pending yet, add ourselves */ 4805 pending_del_slot = path->slots[0]; 4806 pending_del_nr = 1; 4807 } else if (pending_del_nr && 4808 path->slots[0] + 1 == pending_del_slot) { 4809 /* hop on the pending chunk */ 4810 pending_del_nr++; 4811 pending_del_slot = path->slots[0]; 4812 } else { 4813 BUG(); 4814 } 4815 } else { 4816 break; 4817 } 4818 should_throttle = false; 4819 4820 if (found_extent && 4821 (test_bit(BTRFS_ROOT_REF_COWS, &root->state) || 4822 root == fs_info->tree_root)) { 4823 struct btrfs_ref ref = { 0 }; 4824 4825 btrfs_set_path_blocking(path); 4826 bytes_deleted += extent_num_bytes; 4827 4828 btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, 4829 extent_start, extent_num_bytes, 0); 4830 ref.real_root = root->root_key.objectid; 4831 btrfs_init_data_ref(&ref, btrfs_header_owner(leaf), 4832 ino, extent_offset); 4833 ret = btrfs_free_extent(trans, &ref); 4834 if (ret) { 4835 btrfs_abort_transaction(trans, ret); 4836 break; 4837 } 4838 if (be_nice) { 4839 if (btrfs_should_throttle_delayed_refs(trans)) 4840 should_throttle = true; 4841 } 4842 } 4843 4844 if (found_type == BTRFS_INODE_ITEM_KEY) 4845 break; 4846 4847 if (path->slots[0] == 0 || 4848 path->slots[0] != pending_del_slot || 4849 should_throttle) { 4850 if (pending_del_nr) { 4851 ret = btrfs_del_items(trans, root, path, 4852 pending_del_slot, 4853 pending_del_nr); 4854 if (ret) { 4855 btrfs_abort_transaction(trans, ret); 4856 break; 4857 } 4858 pending_del_nr = 0; 4859 } 4860 btrfs_release_path(path); 4861 4862 /* 4863 * We can generate a lot of delayed refs, so we need to 4864 * throttle every once and a while and make sure we're 4865 * adding enough space to keep up with the work we are 4866 * generating. Since we hold a transaction here we 4867 * can't flush, and we don't want to FLUSH_LIMIT because 4868 * we could have generated too many delayed refs to 4869 * actually allocate, so just bail if we're short and 4870 * let the normal reservation dance happen higher up. 4871 */ 4872 if (should_throttle) { 4873 ret = btrfs_delayed_refs_rsv_refill(fs_info, 4874 BTRFS_RESERVE_NO_FLUSH); 4875 if (ret) { 4876 ret = -EAGAIN; 4877 break; 4878 } 4879 } 4880 goto search_again; 4881 } else { 4882 path->slots[0]--; 4883 } 4884 } 4885 out: 4886 if (ret >= 0 && pending_del_nr) { 4887 int err; 4888 4889 err = btrfs_del_items(trans, root, path, pending_del_slot, 4890 pending_del_nr); 4891 if (err) { 4892 btrfs_abort_transaction(trans, err); 4893 ret = err; 4894 } 4895 } 4896 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) { 4897 ASSERT(last_size >= new_size); 4898 if (!ret && last_size > new_size) 4899 last_size = new_size; 4900 btrfs_ordered_update_i_size(inode, last_size, NULL); 4901 } 4902 4903 btrfs_free_path(path); 4904 return ret; 4905 } 4906 4907 /* 4908 * btrfs_truncate_block - read, zero a chunk and write a block 4909 * @inode - inode that we're zeroing 4910 * @from - the offset to start zeroing 4911 * @len - the length to zero, 0 to zero the entire range respective to the 4912 * offset 4913 * @front - zero up to the offset instead of from the offset on 4914 * 4915 * This will find the block for the "from" offset and cow the block and zero the 4916 * part we want to zero. This is used with truncate and hole punching. 4917 */ 4918 int btrfs_truncate_block(struct inode *inode, loff_t from, loff_t len, 4919 int front) 4920 { 4921 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 4922 struct address_space *mapping = inode->i_mapping; 4923 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 4924 struct btrfs_ordered_extent *ordered; 4925 struct extent_state *cached_state = NULL; 4926 struct extent_changeset *data_reserved = NULL; 4927 char *kaddr; 4928 u32 blocksize = fs_info->sectorsize; 4929 pgoff_t index = from >> PAGE_SHIFT; 4930 unsigned offset = from & (blocksize - 1); 4931 struct page *page; 4932 gfp_t mask = btrfs_alloc_write_mask(mapping); 4933 int ret = 0; 4934 u64 block_start; 4935 u64 block_end; 4936 4937 if (IS_ALIGNED(offset, blocksize) && 4938 (!len || IS_ALIGNED(len, blocksize))) 4939 goto out; 4940 4941 block_start = round_down(from, blocksize); 4942 block_end = block_start + blocksize - 1; 4943 4944 ret = btrfs_delalloc_reserve_space(inode, &data_reserved, 4945 block_start, blocksize); 4946 if (ret) 4947 goto out; 4948 4949 again: 4950 page = find_or_create_page(mapping, index, mask); 4951 if (!page) { 4952 btrfs_delalloc_release_space(inode, data_reserved, 4953 block_start, blocksize, true); 4954 btrfs_delalloc_release_extents(BTRFS_I(inode), blocksize, true); 4955 ret = -ENOMEM; 4956 goto out; 4957 } 4958 4959 if (!PageUptodate(page)) { 4960 ret = btrfs_readpage(NULL, page); 4961 lock_page(page); 4962 if (page->mapping != mapping) { 4963 unlock_page(page); 4964 put_page(page); 4965 goto again; 4966 } 4967 if (!PageUptodate(page)) { 4968 ret = -EIO; 4969 goto out_unlock; 4970 } 4971 } 4972 wait_on_page_writeback(page); 4973 4974 lock_extent_bits(io_tree, block_start, block_end, &cached_state); 4975 set_page_extent_mapped(page); 4976 4977 ordered = btrfs_lookup_ordered_extent(inode, block_start); 4978 if (ordered) { 4979 unlock_extent_cached(io_tree, block_start, block_end, 4980 &cached_state); 4981 unlock_page(page); 4982 put_page(page); 4983 btrfs_start_ordered_extent(inode, ordered, 1); 4984 btrfs_put_ordered_extent(ordered); 4985 goto again; 4986 } 4987 4988 clear_extent_bit(&BTRFS_I(inode)->io_tree, block_start, block_end, 4989 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 4990 0, 0, &cached_state); 4991 4992 ret = btrfs_set_extent_delalloc(inode, block_start, block_end, 0, 4993 &cached_state); 4994 if (ret) { 4995 unlock_extent_cached(io_tree, block_start, block_end, 4996 &cached_state); 4997 goto out_unlock; 4998 } 4999 5000 if (offset != blocksize) { 5001 if (!len) 5002 len = blocksize - offset; 5003 kaddr = kmap(page); 5004 if (front) 5005 memset(kaddr + (block_start - page_offset(page)), 5006 0, offset); 5007 else 5008 memset(kaddr + (block_start - page_offset(page)) + offset, 5009 0, len); 5010 flush_dcache_page(page); 5011 kunmap(page); 5012 } 5013 ClearPageChecked(page); 5014 set_page_dirty(page); 5015 unlock_extent_cached(io_tree, block_start, block_end, &cached_state); 5016 5017 out_unlock: 5018 if (ret) 5019 btrfs_delalloc_release_space(inode, data_reserved, block_start, 5020 blocksize, true); 5021 btrfs_delalloc_release_extents(BTRFS_I(inode), blocksize, (ret != 0)); 5022 unlock_page(page); 5023 put_page(page); 5024 out: 5025 extent_changeset_free(data_reserved); 5026 return ret; 5027 } 5028 5029 static int maybe_insert_hole(struct btrfs_root *root, struct inode *inode, 5030 u64 offset, u64 len) 5031 { 5032 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 5033 struct btrfs_trans_handle *trans; 5034 int ret; 5035 5036 /* 5037 * Still need to make sure the inode looks like it's been updated so 5038 * that any holes get logged if we fsync. 5039 */ 5040 if (btrfs_fs_incompat(fs_info, NO_HOLES)) { 5041 BTRFS_I(inode)->last_trans = fs_info->generation; 5042 BTRFS_I(inode)->last_sub_trans = root->log_transid; 5043 BTRFS_I(inode)->last_log_commit = root->last_log_commit; 5044 return 0; 5045 } 5046 5047 /* 5048 * 1 - for the one we're dropping 5049 * 1 - for the one we're adding 5050 * 1 - for updating the inode. 5051 */ 5052 trans = btrfs_start_transaction(root, 3); 5053 if (IS_ERR(trans)) 5054 return PTR_ERR(trans); 5055 5056 ret = btrfs_drop_extents(trans, root, inode, offset, offset + len, 1); 5057 if (ret) { 5058 btrfs_abort_transaction(trans, ret); 5059 btrfs_end_transaction(trans); 5060 return ret; 5061 } 5062 5063 ret = btrfs_insert_file_extent(trans, root, btrfs_ino(BTRFS_I(inode)), 5064 offset, 0, 0, len, 0, len, 0, 0, 0); 5065 if (ret) 5066 btrfs_abort_transaction(trans, ret); 5067 else 5068 btrfs_update_inode(trans, root, inode); 5069 btrfs_end_transaction(trans); 5070 return ret; 5071 } 5072 5073 /* 5074 * This function puts in dummy file extents for the area we're creating a hole 5075 * for. So if we are truncating this file to a larger size we need to insert 5076 * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for 5077 * the range between oldsize and size 5078 */ 5079 int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size) 5080 { 5081 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 5082 struct btrfs_root *root = BTRFS_I(inode)->root; 5083 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 5084 struct extent_map *em = NULL; 5085 struct extent_state *cached_state = NULL; 5086 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 5087 u64 hole_start = ALIGN(oldsize, fs_info->sectorsize); 5088 u64 block_end = ALIGN(size, fs_info->sectorsize); 5089 u64 last_byte; 5090 u64 cur_offset; 5091 u64 hole_size; 5092 int err = 0; 5093 5094 /* 5095 * If our size started in the middle of a block we need to zero out the 5096 * rest of the block before we expand the i_size, otherwise we could 5097 * expose stale data. 5098 */ 5099 err = btrfs_truncate_block(inode, oldsize, 0, 0); 5100 if (err) 5101 return err; 5102 5103 if (size <= hole_start) 5104 return 0; 5105 5106 btrfs_lock_and_flush_ordered_range(io_tree, BTRFS_I(inode), hole_start, 5107 block_end - 1, &cached_state); 5108 cur_offset = hole_start; 5109 while (1) { 5110 em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, cur_offset, 5111 block_end - cur_offset, 0); 5112 if (IS_ERR(em)) { 5113 err = PTR_ERR(em); 5114 em = NULL; 5115 break; 5116 } 5117 last_byte = min(extent_map_end(em), block_end); 5118 last_byte = ALIGN(last_byte, fs_info->sectorsize); 5119 if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) { 5120 struct extent_map *hole_em; 5121 hole_size = last_byte - cur_offset; 5122 5123 err = maybe_insert_hole(root, inode, cur_offset, 5124 hole_size); 5125 if (err) 5126 break; 5127 btrfs_drop_extent_cache(BTRFS_I(inode), cur_offset, 5128 cur_offset + hole_size - 1, 0); 5129 hole_em = alloc_extent_map(); 5130 if (!hole_em) { 5131 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 5132 &BTRFS_I(inode)->runtime_flags); 5133 goto next; 5134 } 5135 hole_em->start = cur_offset; 5136 hole_em->len = hole_size; 5137 hole_em->orig_start = cur_offset; 5138 5139 hole_em->block_start = EXTENT_MAP_HOLE; 5140 hole_em->block_len = 0; 5141 hole_em->orig_block_len = 0; 5142 hole_em->ram_bytes = hole_size; 5143 hole_em->bdev = fs_info->fs_devices->latest_bdev; 5144 hole_em->compress_type = BTRFS_COMPRESS_NONE; 5145 hole_em->generation = fs_info->generation; 5146 5147 while (1) { 5148 write_lock(&em_tree->lock); 5149 err = add_extent_mapping(em_tree, hole_em, 1); 5150 write_unlock(&em_tree->lock); 5151 if (err != -EEXIST) 5152 break; 5153 btrfs_drop_extent_cache(BTRFS_I(inode), 5154 cur_offset, 5155 cur_offset + 5156 hole_size - 1, 0); 5157 } 5158 free_extent_map(hole_em); 5159 } 5160 next: 5161 free_extent_map(em); 5162 em = NULL; 5163 cur_offset = last_byte; 5164 if (cur_offset >= block_end) 5165 break; 5166 } 5167 free_extent_map(em); 5168 unlock_extent_cached(io_tree, hole_start, block_end - 1, &cached_state); 5169 return err; 5170 } 5171 5172 static int btrfs_setsize(struct inode *inode, struct iattr *attr) 5173 { 5174 struct btrfs_root *root = BTRFS_I(inode)->root; 5175 struct btrfs_trans_handle *trans; 5176 loff_t oldsize = i_size_read(inode); 5177 loff_t newsize = attr->ia_size; 5178 int mask = attr->ia_valid; 5179 int ret; 5180 5181 /* 5182 * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a 5183 * special case where we need to update the times despite not having 5184 * these flags set. For all other operations the VFS set these flags 5185 * explicitly if it wants a timestamp update. 5186 */ 5187 if (newsize != oldsize) { 5188 inode_inc_iversion(inode); 5189 if (!(mask & (ATTR_CTIME | ATTR_MTIME))) 5190 inode->i_ctime = inode->i_mtime = 5191 current_time(inode); 5192 } 5193 5194 if (newsize > oldsize) { 5195 /* 5196 * Don't do an expanding truncate while snapshotting is ongoing. 5197 * This is to ensure the snapshot captures a fully consistent 5198 * state of this file - if the snapshot captures this expanding 5199 * truncation, it must capture all writes that happened before 5200 * this truncation. 5201 */ 5202 btrfs_wait_for_snapshot_creation(root); 5203 ret = btrfs_cont_expand(inode, oldsize, newsize); 5204 if (ret) { 5205 btrfs_end_write_no_snapshotting(root); 5206 return ret; 5207 } 5208 5209 trans = btrfs_start_transaction(root, 1); 5210 if (IS_ERR(trans)) { 5211 btrfs_end_write_no_snapshotting(root); 5212 return PTR_ERR(trans); 5213 } 5214 5215 i_size_write(inode, newsize); 5216 btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL); 5217 pagecache_isize_extended(inode, oldsize, newsize); 5218 ret = btrfs_update_inode(trans, root, inode); 5219 btrfs_end_write_no_snapshotting(root); 5220 btrfs_end_transaction(trans); 5221 } else { 5222 5223 /* 5224 * We're truncating a file that used to have good data down to 5225 * zero. Make sure it gets into the ordered flush list so that 5226 * any new writes get down to disk quickly. 5227 */ 5228 if (newsize == 0) 5229 set_bit(BTRFS_INODE_ORDERED_DATA_CLOSE, 5230 &BTRFS_I(inode)->runtime_flags); 5231 5232 truncate_setsize(inode, newsize); 5233 5234 /* Disable nonlocked read DIO to avoid the endless truncate */ 5235 btrfs_inode_block_unlocked_dio(BTRFS_I(inode)); 5236 inode_dio_wait(inode); 5237 btrfs_inode_resume_unlocked_dio(BTRFS_I(inode)); 5238 5239 ret = btrfs_truncate(inode, newsize == oldsize); 5240 if (ret && inode->i_nlink) { 5241 int err; 5242 5243 /* 5244 * Truncate failed, so fix up the in-memory size. We 5245 * adjusted disk_i_size down as we removed extents, so 5246 * wait for disk_i_size to be stable and then update the 5247 * in-memory size to match. 5248 */ 5249 err = btrfs_wait_ordered_range(inode, 0, (u64)-1); 5250 if (err) 5251 return err; 5252 i_size_write(inode, BTRFS_I(inode)->disk_i_size); 5253 } 5254 } 5255 5256 return ret; 5257 } 5258 5259 static int btrfs_setattr(struct dentry *dentry, struct iattr *attr) 5260 { 5261 struct inode *inode = d_inode(dentry); 5262 struct btrfs_root *root = BTRFS_I(inode)->root; 5263 int err; 5264 5265 if (btrfs_root_readonly(root)) 5266 return -EROFS; 5267 5268 err = setattr_prepare(dentry, attr); 5269 if (err) 5270 return err; 5271 5272 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { 5273 err = btrfs_setsize(inode, attr); 5274 if (err) 5275 return err; 5276 } 5277 5278 if (attr->ia_valid) { 5279 setattr_copy(inode, attr); 5280 inode_inc_iversion(inode); 5281 err = btrfs_dirty_inode(inode); 5282 5283 if (!err && attr->ia_valid & ATTR_MODE) 5284 err = posix_acl_chmod(inode, inode->i_mode); 5285 } 5286 5287 return err; 5288 } 5289 5290 /* 5291 * While truncating the inode pages during eviction, we get the VFS calling 5292 * btrfs_invalidatepage() against each page of the inode. This is slow because 5293 * the calls to btrfs_invalidatepage() result in a huge amount of calls to 5294 * lock_extent_bits() and clear_extent_bit(), which keep merging and splitting 5295 * extent_state structures over and over, wasting lots of time. 5296 * 5297 * Therefore if the inode is being evicted, let btrfs_invalidatepage() skip all 5298 * those expensive operations on a per page basis and do only the ordered io 5299 * finishing, while we release here the extent_map and extent_state structures, 5300 * without the excessive merging and splitting. 5301 */ 5302 static void evict_inode_truncate_pages(struct inode *inode) 5303 { 5304 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 5305 struct extent_map_tree *map_tree = &BTRFS_I(inode)->extent_tree; 5306 struct rb_node *node; 5307 5308 ASSERT(inode->i_state & I_FREEING); 5309 truncate_inode_pages_final(&inode->i_data); 5310 5311 write_lock(&map_tree->lock); 5312 while (!RB_EMPTY_ROOT(&map_tree->map.rb_root)) { 5313 struct extent_map *em; 5314 5315 node = rb_first_cached(&map_tree->map); 5316 em = rb_entry(node, struct extent_map, rb_node); 5317 clear_bit(EXTENT_FLAG_PINNED, &em->flags); 5318 clear_bit(EXTENT_FLAG_LOGGING, &em->flags); 5319 remove_extent_mapping(map_tree, em); 5320 free_extent_map(em); 5321 if (need_resched()) { 5322 write_unlock(&map_tree->lock); 5323 cond_resched(); 5324 write_lock(&map_tree->lock); 5325 } 5326 } 5327 write_unlock(&map_tree->lock); 5328 5329 /* 5330 * Keep looping until we have no more ranges in the io tree. 5331 * We can have ongoing bios started by readpages (called from readahead) 5332 * that have their endio callback (extent_io.c:end_bio_extent_readpage) 5333 * still in progress (unlocked the pages in the bio but did not yet 5334 * unlocked the ranges in the io tree). Therefore this means some 5335 * ranges can still be locked and eviction started because before 5336 * submitting those bios, which are executed by a separate task (work 5337 * queue kthread), inode references (inode->i_count) were not taken 5338 * (which would be dropped in the end io callback of each bio). 5339 * Therefore here we effectively end up waiting for those bios and 5340 * anyone else holding locked ranges without having bumped the inode's 5341 * reference count - if we don't do it, when they access the inode's 5342 * io_tree to unlock a range it may be too late, leading to an 5343 * use-after-free issue. 5344 */ 5345 spin_lock(&io_tree->lock); 5346 while (!RB_EMPTY_ROOT(&io_tree->state)) { 5347 struct extent_state *state; 5348 struct extent_state *cached_state = NULL; 5349 u64 start; 5350 u64 end; 5351 unsigned state_flags; 5352 5353 node = rb_first(&io_tree->state); 5354 state = rb_entry(node, struct extent_state, rb_node); 5355 start = state->start; 5356 end = state->end; 5357 state_flags = state->state; 5358 spin_unlock(&io_tree->lock); 5359 5360 lock_extent_bits(io_tree, start, end, &cached_state); 5361 5362 /* 5363 * If still has DELALLOC flag, the extent didn't reach disk, 5364 * and its reserved space won't be freed by delayed_ref. 5365 * So we need to free its reserved space here. 5366 * (Refer to comment in btrfs_invalidatepage, case 2) 5367 * 5368 * Note, end is the bytenr of last byte, so we need + 1 here. 5369 */ 5370 if (state_flags & EXTENT_DELALLOC) 5371 btrfs_qgroup_free_data(inode, NULL, start, end - start + 1); 5372 5373 clear_extent_bit(io_tree, start, end, 5374 EXTENT_LOCKED | EXTENT_DELALLOC | 5375 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 1, 1, 5376 &cached_state); 5377 5378 cond_resched(); 5379 spin_lock(&io_tree->lock); 5380 } 5381 spin_unlock(&io_tree->lock); 5382 } 5383 5384 static struct btrfs_trans_handle *evict_refill_and_join(struct btrfs_root *root, 5385 struct btrfs_block_rsv *rsv) 5386 { 5387 struct btrfs_fs_info *fs_info = root->fs_info; 5388 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; 5389 struct btrfs_trans_handle *trans; 5390 u64 delayed_refs_extra = btrfs_calc_insert_metadata_size(fs_info, 1); 5391 int ret; 5392 5393 /* 5394 * Eviction should be taking place at some place safe because of our 5395 * delayed iputs. However the normal flushing code will run delayed 5396 * iputs, so we cannot use FLUSH_ALL otherwise we'll deadlock. 5397 * 5398 * We reserve the delayed_refs_extra here again because we can't use 5399 * btrfs_start_transaction(root, 0) for the same deadlocky reason as 5400 * above. We reserve our extra bit here because we generate a ton of 5401 * delayed refs activity by truncating. 5402 * 5403 * If we cannot make our reservation we'll attempt to steal from the 5404 * global reserve, because we really want to be able to free up space. 5405 */ 5406 ret = btrfs_block_rsv_refill(root, rsv, rsv->size + delayed_refs_extra, 5407 BTRFS_RESERVE_FLUSH_EVICT); 5408 if (ret) { 5409 /* 5410 * Try to steal from the global reserve if there is space for 5411 * it. 5412 */ 5413 if (btrfs_check_space_for_delayed_refs(fs_info) || 5414 btrfs_block_rsv_migrate(global_rsv, rsv, rsv->size, 0)) { 5415 btrfs_warn(fs_info, 5416 "could not allocate space for delete; will truncate on mount"); 5417 return ERR_PTR(-ENOSPC); 5418 } 5419 delayed_refs_extra = 0; 5420 } 5421 5422 trans = btrfs_join_transaction(root); 5423 if (IS_ERR(trans)) 5424 return trans; 5425 5426 if (delayed_refs_extra) { 5427 trans->block_rsv = &fs_info->trans_block_rsv; 5428 trans->bytes_reserved = delayed_refs_extra; 5429 btrfs_block_rsv_migrate(rsv, trans->block_rsv, 5430 delayed_refs_extra, 1); 5431 } 5432 return trans; 5433 } 5434 5435 void btrfs_evict_inode(struct inode *inode) 5436 { 5437 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 5438 struct btrfs_trans_handle *trans; 5439 struct btrfs_root *root = BTRFS_I(inode)->root; 5440 struct btrfs_block_rsv *rsv; 5441 int ret; 5442 5443 trace_btrfs_inode_evict(inode); 5444 5445 if (!root) { 5446 clear_inode(inode); 5447 return; 5448 } 5449 5450 evict_inode_truncate_pages(inode); 5451 5452 if (inode->i_nlink && 5453 ((btrfs_root_refs(&root->root_item) != 0 && 5454 root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID) || 5455 btrfs_is_free_space_inode(BTRFS_I(inode)))) 5456 goto no_delete; 5457 5458 if (is_bad_inode(inode)) 5459 goto no_delete; 5460 5461 btrfs_free_io_failure_record(BTRFS_I(inode), 0, (u64)-1); 5462 5463 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) 5464 goto no_delete; 5465 5466 if (inode->i_nlink > 0) { 5467 BUG_ON(btrfs_root_refs(&root->root_item) != 0 && 5468 root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID); 5469 goto no_delete; 5470 } 5471 5472 ret = btrfs_commit_inode_delayed_inode(BTRFS_I(inode)); 5473 if (ret) 5474 goto no_delete; 5475 5476 rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP); 5477 if (!rsv) 5478 goto no_delete; 5479 rsv->size = btrfs_calc_metadata_size(fs_info, 1); 5480 rsv->failfast = 1; 5481 5482 btrfs_i_size_write(BTRFS_I(inode), 0); 5483 5484 while (1) { 5485 trans = evict_refill_and_join(root, rsv); 5486 if (IS_ERR(trans)) 5487 goto free_rsv; 5488 5489 trans->block_rsv = rsv; 5490 5491 ret = btrfs_truncate_inode_items(trans, root, inode, 0, 0); 5492 trans->block_rsv = &fs_info->trans_block_rsv; 5493 btrfs_end_transaction(trans); 5494 btrfs_btree_balance_dirty(fs_info); 5495 if (ret && ret != -ENOSPC && ret != -EAGAIN) 5496 goto free_rsv; 5497 else if (!ret) 5498 break; 5499 } 5500 5501 /* 5502 * Errors here aren't a big deal, it just means we leave orphan items in 5503 * the tree. They will be cleaned up on the next mount. If the inode 5504 * number gets reused, cleanup deletes the orphan item without doing 5505 * anything, and unlink reuses the existing orphan item. 5506 * 5507 * If it turns out that we are dropping too many of these, we might want 5508 * to add a mechanism for retrying these after a commit. 5509 */ 5510 trans = evict_refill_and_join(root, rsv); 5511 if (!IS_ERR(trans)) { 5512 trans->block_rsv = rsv; 5513 btrfs_orphan_del(trans, BTRFS_I(inode)); 5514 trans->block_rsv = &fs_info->trans_block_rsv; 5515 btrfs_end_transaction(trans); 5516 } 5517 5518 if (!(root == fs_info->tree_root || 5519 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)) 5520 btrfs_return_ino(root, btrfs_ino(BTRFS_I(inode))); 5521 5522 free_rsv: 5523 btrfs_free_block_rsv(fs_info, rsv); 5524 no_delete: 5525 /* 5526 * If we didn't successfully delete, the orphan item will still be in 5527 * the tree and we'll retry on the next mount. Again, we might also want 5528 * to retry these periodically in the future. 5529 */ 5530 btrfs_remove_delayed_node(BTRFS_I(inode)); 5531 clear_inode(inode); 5532 } 5533 5534 /* 5535 * Return the key found in the dir entry in the location pointer, fill @type 5536 * with BTRFS_FT_*, and return 0. 5537 * 5538 * If no dir entries were found, returns -ENOENT. 5539 * If found a corrupted location in dir entry, returns -EUCLEAN. 5540 */ 5541 static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry, 5542 struct btrfs_key *location, u8 *type) 5543 { 5544 const char *name = dentry->d_name.name; 5545 int namelen = dentry->d_name.len; 5546 struct btrfs_dir_item *di; 5547 struct btrfs_path *path; 5548 struct btrfs_root *root = BTRFS_I(dir)->root; 5549 int ret = 0; 5550 5551 path = btrfs_alloc_path(); 5552 if (!path) 5553 return -ENOMEM; 5554 5555 di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(BTRFS_I(dir)), 5556 name, namelen, 0); 5557 if (IS_ERR_OR_NULL(di)) { 5558 ret = di ? PTR_ERR(di) : -ENOENT; 5559 goto out; 5560 } 5561 5562 btrfs_dir_item_key_to_cpu(path->nodes[0], di, location); 5563 if (location->type != BTRFS_INODE_ITEM_KEY && 5564 location->type != BTRFS_ROOT_ITEM_KEY) { 5565 ret = -EUCLEAN; 5566 btrfs_warn(root->fs_info, 5567 "%s gets something invalid in DIR_ITEM (name %s, directory ino %llu, location(%llu %u %llu))", 5568 __func__, name, btrfs_ino(BTRFS_I(dir)), 5569 location->objectid, location->type, location->offset); 5570 } 5571 if (!ret) 5572 *type = btrfs_dir_type(path->nodes[0], di); 5573 out: 5574 btrfs_free_path(path); 5575 return ret; 5576 } 5577 5578 /* 5579 * when we hit a tree root in a directory, the btrfs part of the inode 5580 * needs to be changed to reflect the root directory of the tree root. This 5581 * is kind of like crossing a mount point. 5582 */ 5583 static int fixup_tree_root_location(struct btrfs_fs_info *fs_info, 5584 struct inode *dir, 5585 struct dentry *dentry, 5586 struct btrfs_key *location, 5587 struct btrfs_root **sub_root) 5588 { 5589 struct btrfs_path *path; 5590 struct btrfs_root *new_root; 5591 struct btrfs_root_ref *ref; 5592 struct extent_buffer *leaf; 5593 struct btrfs_key key; 5594 int ret; 5595 int err = 0; 5596 5597 path = btrfs_alloc_path(); 5598 if (!path) { 5599 err = -ENOMEM; 5600 goto out; 5601 } 5602 5603 err = -ENOENT; 5604 key.objectid = BTRFS_I(dir)->root->root_key.objectid; 5605 key.type = BTRFS_ROOT_REF_KEY; 5606 key.offset = location->objectid; 5607 5608 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); 5609 if (ret) { 5610 if (ret < 0) 5611 err = ret; 5612 goto out; 5613 } 5614 5615 leaf = path->nodes[0]; 5616 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref); 5617 if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(BTRFS_I(dir)) || 5618 btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len) 5619 goto out; 5620 5621 ret = memcmp_extent_buffer(leaf, dentry->d_name.name, 5622 (unsigned long)(ref + 1), 5623 dentry->d_name.len); 5624 if (ret) 5625 goto out; 5626 5627 btrfs_release_path(path); 5628 5629 new_root = btrfs_read_fs_root_no_name(fs_info, location); 5630 if (IS_ERR(new_root)) { 5631 err = PTR_ERR(new_root); 5632 goto out; 5633 } 5634 5635 *sub_root = new_root; 5636 location->objectid = btrfs_root_dirid(&new_root->root_item); 5637 location->type = BTRFS_INODE_ITEM_KEY; 5638 location->offset = 0; 5639 err = 0; 5640 out: 5641 btrfs_free_path(path); 5642 return err; 5643 } 5644 5645 static void inode_tree_add(struct inode *inode) 5646 { 5647 struct btrfs_root *root = BTRFS_I(inode)->root; 5648 struct btrfs_inode *entry; 5649 struct rb_node **p; 5650 struct rb_node *parent; 5651 struct rb_node *new = &BTRFS_I(inode)->rb_node; 5652 u64 ino = btrfs_ino(BTRFS_I(inode)); 5653 5654 if (inode_unhashed(inode)) 5655 return; 5656 parent = NULL; 5657 spin_lock(&root->inode_lock); 5658 p = &root->inode_tree.rb_node; 5659 while (*p) { 5660 parent = *p; 5661 entry = rb_entry(parent, struct btrfs_inode, rb_node); 5662 5663 if (ino < btrfs_ino(entry)) 5664 p = &parent->rb_left; 5665 else if (ino > btrfs_ino(entry)) 5666 p = &parent->rb_right; 5667 else { 5668 WARN_ON(!(entry->vfs_inode.i_state & 5669 (I_WILL_FREE | I_FREEING))); 5670 rb_replace_node(parent, new, &root->inode_tree); 5671 RB_CLEAR_NODE(parent); 5672 spin_unlock(&root->inode_lock); 5673 return; 5674 } 5675 } 5676 rb_link_node(new, parent, p); 5677 rb_insert_color(new, &root->inode_tree); 5678 spin_unlock(&root->inode_lock); 5679 } 5680 5681 static void inode_tree_del(struct inode *inode) 5682 { 5683 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 5684 struct btrfs_root *root = BTRFS_I(inode)->root; 5685 int empty = 0; 5686 5687 spin_lock(&root->inode_lock); 5688 if (!RB_EMPTY_NODE(&BTRFS_I(inode)->rb_node)) { 5689 rb_erase(&BTRFS_I(inode)->rb_node, &root->inode_tree); 5690 RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node); 5691 empty = RB_EMPTY_ROOT(&root->inode_tree); 5692 } 5693 spin_unlock(&root->inode_lock); 5694 5695 if (empty && btrfs_root_refs(&root->root_item) == 0) { 5696 synchronize_srcu(&fs_info->subvol_srcu); 5697 spin_lock(&root->inode_lock); 5698 empty = RB_EMPTY_ROOT(&root->inode_tree); 5699 spin_unlock(&root->inode_lock); 5700 if (empty) 5701 btrfs_add_dead_root(root); 5702 } 5703 } 5704 5705 5706 static int btrfs_init_locked_inode(struct inode *inode, void *p) 5707 { 5708 struct btrfs_iget_args *args = p; 5709 inode->i_ino = args->location->objectid; 5710 memcpy(&BTRFS_I(inode)->location, args->location, 5711 sizeof(*args->location)); 5712 BTRFS_I(inode)->root = args->root; 5713 return 0; 5714 } 5715 5716 static int btrfs_find_actor(struct inode *inode, void *opaque) 5717 { 5718 struct btrfs_iget_args *args = opaque; 5719 return args->location->objectid == BTRFS_I(inode)->location.objectid && 5720 args->root == BTRFS_I(inode)->root; 5721 } 5722 5723 static struct inode *btrfs_iget_locked(struct super_block *s, 5724 struct btrfs_key *location, 5725 struct btrfs_root *root) 5726 { 5727 struct inode *inode; 5728 struct btrfs_iget_args args; 5729 unsigned long hashval = btrfs_inode_hash(location->objectid, root); 5730 5731 args.location = location; 5732 args.root = root; 5733 5734 inode = iget5_locked(s, hashval, btrfs_find_actor, 5735 btrfs_init_locked_inode, 5736 (void *)&args); 5737 return inode; 5738 } 5739 5740 /* Get an inode object given its location and corresponding root. 5741 * Returns in *is_new if the inode was read from disk 5742 */ 5743 struct inode *btrfs_iget_path(struct super_block *s, struct btrfs_key *location, 5744 struct btrfs_root *root, int *new, 5745 struct btrfs_path *path) 5746 { 5747 struct inode *inode; 5748 5749 inode = btrfs_iget_locked(s, location, root); 5750 if (!inode) 5751 return ERR_PTR(-ENOMEM); 5752 5753 if (inode->i_state & I_NEW) { 5754 int ret; 5755 5756 ret = btrfs_read_locked_inode(inode, path); 5757 if (!ret) { 5758 inode_tree_add(inode); 5759 unlock_new_inode(inode); 5760 if (new) 5761 *new = 1; 5762 } else { 5763 iget_failed(inode); 5764 /* 5765 * ret > 0 can come from btrfs_search_slot called by 5766 * btrfs_read_locked_inode, this means the inode item 5767 * was not found. 5768 */ 5769 if (ret > 0) 5770 ret = -ENOENT; 5771 inode = ERR_PTR(ret); 5772 } 5773 } 5774 5775 return inode; 5776 } 5777 5778 struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location, 5779 struct btrfs_root *root, int *new) 5780 { 5781 return btrfs_iget_path(s, location, root, new, NULL); 5782 } 5783 5784 static struct inode *new_simple_dir(struct super_block *s, 5785 struct btrfs_key *key, 5786 struct btrfs_root *root) 5787 { 5788 struct inode *inode = new_inode(s); 5789 5790 if (!inode) 5791 return ERR_PTR(-ENOMEM); 5792 5793 BTRFS_I(inode)->root = root; 5794 memcpy(&BTRFS_I(inode)->location, key, sizeof(*key)); 5795 set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags); 5796 5797 inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID; 5798 inode->i_op = &btrfs_dir_ro_inode_operations; 5799 inode->i_opflags &= ~IOP_XATTR; 5800 inode->i_fop = &simple_dir_operations; 5801 inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO; 5802 inode->i_mtime = current_time(inode); 5803 inode->i_atime = inode->i_mtime; 5804 inode->i_ctime = inode->i_mtime; 5805 BTRFS_I(inode)->i_otime = inode->i_mtime; 5806 5807 return inode; 5808 } 5809 5810 static inline u8 btrfs_inode_type(struct inode *inode) 5811 { 5812 /* 5813 * Compile-time asserts that generic FT_* types still match 5814 * BTRFS_FT_* types 5815 */ 5816 BUILD_BUG_ON(BTRFS_FT_UNKNOWN != FT_UNKNOWN); 5817 BUILD_BUG_ON(BTRFS_FT_REG_FILE != FT_REG_FILE); 5818 BUILD_BUG_ON(BTRFS_FT_DIR != FT_DIR); 5819 BUILD_BUG_ON(BTRFS_FT_CHRDEV != FT_CHRDEV); 5820 BUILD_BUG_ON(BTRFS_FT_BLKDEV != FT_BLKDEV); 5821 BUILD_BUG_ON(BTRFS_FT_FIFO != FT_FIFO); 5822 BUILD_BUG_ON(BTRFS_FT_SOCK != FT_SOCK); 5823 BUILD_BUG_ON(BTRFS_FT_SYMLINK != FT_SYMLINK); 5824 5825 return fs_umode_to_ftype(inode->i_mode); 5826 } 5827 5828 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry) 5829 { 5830 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); 5831 struct inode *inode; 5832 struct btrfs_root *root = BTRFS_I(dir)->root; 5833 struct btrfs_root *sub_root = root; 5834 struct btrfs_key location; 5835 u8 di_type = 0; 5836 int index; 5837 int ret = 0; 5838 5839 if (dentry->d_name.len > BTRFS_NAME_LEN) 5840 return ERR_PTR(-ENAMETOOLONG); 5841 5842 ret = btrfs_inode_by_name(dir, dentry, &location, &di_type); 5843 if (ret < 0) 5844 return ERR_PTR(ret); 5845 5846 if (location.type == BTRFS_INODE_ITEM_KEY) { 5847 inode = btrfs_iget(dir->i_sb, &location, root, NULL); 5848 if (IS_ERR(inode)) 5849 return inode; 5850 5851 /* Do extra check against inode mode with di_type */ 5852 if (btrfs_inode_type(inode) != di_type) { 5853 btrfs_crit(fs_info, 5854 "inode mode mismatch with dir: inode mode=0%o btrfs type=%u dir type=%u", 5855 inode->i_mode, btrfs_inode_type(inode), 5856 di_type); 5857 iput(inode); 5858 return ERR_PTR(-EUCLEAN); 5859 } 5860 return inode; 5861 } 5862 5863 index = srcu_read_lock(&fs_info->subvol_srcu); 5864 ret = fixup_tree_root_location(fs_info, dir, dentry, 5865 &location, &sub_root); 5866 if (ret < 0) { 5867 if (ret != -ENOENT) 5868 inode = ERR_PTR(ret); 5869 else 5870 inode = new_simple_dir(dir->i_sb, &location, sub_root); 5871 } else { 5872 inode = btrfs_iget(dir->i_sb, &location, sub_root, NULL); 5873 } 5874 srcu_read_unlock(&fs_info->subvol_srcu, index); 5875 5876 if (!IS_ERR(inode) && root != sub_root) { 5877 down_read(&fs_info->cleanup_work_sem); 5878 if (!sb_rdonly(inode->i_sb)) 5879 ret = btrfs_orphan_cleanup(sub_root); 5880 up_read(&fs_info->cleanup_work_sem); 5881 if (ret) { 5882 iput(inode); 5883 inode = ERR_PTR(ret); 5884 } 5885 } 5886 5887 return inode; 5888 } 5889 5890 static int btrfs_dentry_delete(const struct dentry *dentry) 5891 { 5892 struct btrfs_root *root; 5893 struct inode *inode = d_inode(dentry); 5894 5895 if (!inode && !IS_ROOT(dentry)) 5896 inode = d_inode(dentry->d_parent); 5897 5898 if (inode) { 5899 root = BTRFS_I(inode)->root; 5900 if (btrfs_root_refs(&root->root_item) == 0) 5901 return 1; 5902 5903 if (btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) 5904 return 1; 5905 } 5906 return 0; 5907 } 5908 5909 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry, 5910 unsigned int flags) 5911 { 5912 struct inode *inode = btrfs_lookup_dentry(dir, dentry); 5913 5914 if (inode == ERR_PTR(-ENOENT)) 5915 inode = NULL; 5916 return d_splice_alias(inode, dentry); 5917 } 5918 5919 /* 5920 * All this infrastructure exists because dir_emit can fault, and we are holding 5921 * the tree lock when doing readdir. For now just allocate a buffer and copy 5922 * our information into that, and then dir_emit from the buffer. This is 5923 * similar to what NFS does, only we don't keep the buffer around in pagecache 5924 * because I'm afraid I'll mess that up. Long term we need to make filldir do 5925 * copy_to_user_inatomic so we don't have to worry about page faulting under the 5926 * tree lock. 5927 */ 5928 static int btrfs_opendir(struct inode *inode, struct file *file) 5929 { 5930 struct btrfs_file_private *private; 5931 5932 private = kzalloc(sizeof(struct btrfs_file_private), GFP_KERNEL); 5933 if (!private) 5934 return -ENOMEM; 5935 private->filldir_buf = kzalloc(PAGE_SIZE, GFP_KERNEL); 5936 if (!private->filldir_buf) { 5937 kfree(private); 5938 return -ENOMEM; 5939 } 5940 file->private_data = private; 5941 return 0; 5942 } 5943 5944 struct dir_entry { 5945 u64 ino; 5946 u64 offset; 5947 unsigned type; 5948 int name_len; 5949 }; 5950 5951 static int btrfs_filldir(void *addr, int entries, struct dir_context *ctx) 5952 { 5953 while (entries--) { 5954 struct dir_entry *entry = addr; 5955 char *name = (char *)(entry + 1); 5956 5957 ctx->pos = get_unaligned(&entry->offset); 5958 if (!dir_emit(ctx, name, get_unaligned(&entry->name_len), 5959 get_unaligned(&entry->ino), 5960 get_unaligned(&entry->type))) 5961 return 1; 5962 addr += sizeof(struct dir_entry) + 5963 get_unaligned(&entry->name_len); 5964 ctx->pos++; 5965 } 5966 return 0; 5967 } 5968 5969 static int btrfs_real_readdir(struct file *file, struct dir_context *ctx) 5970 { 5971 struct inode *inode = file_inode(file); 5972 struct btrfs_root *root = BTRFS_I(inode)->root; 5973 struct btrfs_file_private *private = file->private_data; 5974 struct btrfs_dir_item *di; 5975 struct btrfs_key key; 5976 struct btrfs_key found_key; 5977 struct btrfs_path *path; 5978 void *addr; 5979 struct list_head ins_list; 5980 struct list_head del_list; 5981 int ret; 5982 struct extent_buffer *leaf; 5983 int slot; 5984 char *name_ptr; 5985 int name_len; 5986 int entries = 0; 5987 int total_len = 0; 5988 bool put = false; 5989 struct btrfs_key location; 5990 5991 if (!dir_emit_dots(file, ctx)) 5992 return 0; 5993 5994 path = btrfs_alloc_path(); 5995 if (!path) 5996 return -ENOMEM; 5997 5998 addr = private->filldir_buf; 5999 path->reada = READA_FORWARD; 6000 6001 INIT_LIST_HEAD(&ins_list); 6002 INIT_LIST_HEAD(&del_list); 6003 put = btrfs_readdir_get_delayed_items(inode, &ins_list, &del_list); 6004 6005 again: 6006 key.type = BTRFS_DIR_INDEX_KEY; 6007 key.offset = ctx->pos; 6008 key.objectid = btrfs_ino(BTRFS_I(inode)); 6009 6010 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 6011 if (ret < 0) 6012 goto err; 6013 6014 while (1) { 6015 struct dir_entry *entry; 6016 6017 leaf = path->nodes[0]; 6018 slot = path->slots[0]; 6019 if (slot >= btrfs_header_nritems(leaf)) { 6020 ret = btrfs_next_leaf(root, path); 6021 if (ret < 0) 6022 goto err; 6023 else if (ret > 0) 6024 break; 6025 continue; 6026 } 6027 6028 btrfs_item_key_to_cpu(leaf, &found_key, slot); 6029 6030 if (found_key.objectid != key.objectid) 6031 break; 6032 if (found_key.type != BTRFS_DIR_INDEX_KEY) 6033 break; 6034 if (found_key.offset < ctx->pos) 6035 goto next; 6036 if (btrfs_should_delete_dir_index(&del_list, found_key.offset)) 6037 goto next; 6038 di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item); 6039 name_len = btrfs_dir_name_len(leaf, di); 6040 if ((total_len + sizeof(struct dir_entry) + name_len) >= 6041 PAGE_SIZE) { 6042 btrfs_release_path(path); 6043 ret = btrfs_filldir(private->filldir_buf, entries, ctx); 6044 if (ret) 6045 goto nopos; 6046 addr = private->filldir_buf; 6047 entries = 0; 6048 total_len = 0; 6049 goto again; 6050 } 6051 6052 entry = addr; 6053 put_unaligned(name_len, &entry->name_len); 6054 name_ptr = (char *)(entry + 1); 6055 read_extent_buffer(leaf, name_ptr, (unsigned long)(di + 1), 6056 name_len); 6057 put_unaligned(fs_ftype_to_dtype(btrfs_dir_type(leaf, di)), 6058 &entry->type); 6059 btrfs_dir_item_key_to_cpu(leaf, di, &location); 6060 put_unaligned(location.objectid, &entry->ino); 6061 put_unaligned(found_key.offset, &entry->offset); 6062 entries++; 6063 addr += sizeof(struct dir_entry) + name_len; 6064 total_len += sizeof(struct dir_entry) + name_len; 6065 next: 6066 path->slots[0]++; 6067 } 6068 btrfs_release_path(path); 6069 6070 ret = btrfs_filldir(private->filldir_buf, entries, ctx); 6071 if (ret) 6072 goto nopos; 6073 6074 ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list); 6075 if (ret) 6076 goto nopos; 6077 6078 /* 6079 * Stop new entries from being returned after we return the last 6080 * entry. 6081 * 6082 * New directory entries are assigned a strictly increasing 6083 * offset. This means that new entries created during readdir 6084 * are *guaranteed* to be seen in the future by that readdir. 6085 * This has broken buggy programs which operate on names as 6086 * they're returned by readdir. Until we re-use freed offsets 6087 * we have this hack to stop new entries from being returned 6088 * under the assumption that they'll never reach this huge 6089 * offset. 6090 * 6091 * This is being careful not to overflow 32bit loff_t unless the 6092 * last entry requires it because doing so has broken 32bit apps 6093 * in the past. 6094 */ 6095 if (ctx->pos >= INT_MAX) 6096 ctx->pos = LLONG_MAX; 6097 else 6098 ctx->pos = INT_MAX; 6099 nopos: 6100 ret = 0; 6101 err: 6102 if (put) 6103 btrfs_readdir_put_delayed_items(inode, &ins_list, &del_list); 6104 btrfs_free_path(path); 6105 return ret; 6106 } 6107 6108 /* 6109 * This is somewhat expensive, updating the tree every time the 6110 * inode changes. But, it is most likely to find the inode in cache. 6111 * FIXME, needs more benchmarking...there are no reasons other than performance 6112 * to keep or drop this code. 6113 */ 6114 static int btrfs_dirty_inode(struct inode *inode) 6115 { 6116 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 6117 struct btrfs_root *root = BTRFS_I(inode)->root; 6118 struct btrfs_trans_handle *trans; 6119 int ret; 6120 6121 if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags)) 6122 return 0; 6123 6124 trans = btrfs_join_transaction(root); 6125 if (IS_ERR(trans)) 6126 return PTR_ERR(trans); 6127 6128 ret = btrfs_update_inode(trans, root, inode); 6129 if (ret && ret == -ENOSPC) { 6130 /* whoops, lets try again with the full transaction */ 6131 btrfs_end_transaction(trans); 6132 trans = btrfs_start_transaction(root, 1); 6133 if (IS_ERR(trans)) 6134 return PTR_ERR(trans); 6135 6136 ret = btrfs_update_inode(trans, root, inode); 6137 } 6138 btrfs_end_transaction(trans); 6139 if (BTRFS_I(inode)->delayed_node) 6140 btrfs_balance_delayed_items(fs_info); 6141 6142 return ret; 6143 } 6144 6145 /* 6146 * This is a copy of file_update_time. We need this so we can return error on 6147 * ENOSPC for updating the inode in the case of file write and mmap writes. 6148 */ 6149 static int btrfs_update_time(struct inode *inode, struct timespec64 *now, 6150 int flags) 6151 { 6152 struct btrfs_root *root = BTRFS_I(inode)->root; 6153 bool dirty = flags & ~S_VERSION; 6154 6155 if (btrfs_root_readonly(root)) 6156 return -EROFS; 6157 6158 if (flags & S_VERSION) 6159 dirty |= inode_maybe_inc_iversion(inode, dirty); 6160 if (flags & S_CTIME) 6161 inode->i_ctime = *now; 6162 if (flags & S_MTIME) 6163 inode->i_mtime = *now; 6164 if (flags & S_ATIME) 6165 inode->i_atime = *now; 6166 return dirty ? btrfs_dirty_inode(inode) : 0; 6167 } 6168 6169 /* 6170 * find the highest existing sequence number in a directory 6171 * and then set the in-memory index_cnt variable to reflect 6172 * free sequence numbers 6173 */ 6174 static int btrfs_set_inode_index_count(struct btrfs_inode *inode) 6175 { 6176 struct btrfs_root *root = inode->root; 6177 struct btrfs_key key, found_key; 6178 struct btrfs_path *path; 6179 struct extent_buffer *leaf; 6180 int ret; 6181 6182 key.objectid = btrfs_ino(inode); 6183 key.type = BTRFS_DIR_INDEX_KEY; 6184 key.offset = (u64)-1; 6185 6186 path = btrfs_alloc_path(); 6187 if (!path) 6188 return -ENOMEM; 6189 6190 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 6191 if (ret < 0) 6192 goto out; 6193 /* FIXME: we should be able to handle this */ 6194 if (ret == 0) 6195 goto out; 6196 ret = 0; 6197 6198 /* 6199 * MAGIC NUMBER EXPLANATION: 6200 * since we search a directory based on f_pos we have to start at 2 6201 * since '.' and '..' have f_pos of 0 and 1 respectively, so everybody 6202 * else has to start at 2 6203 */ 6204 if (path->slots[0] == 0) { 6205 inode->index_cnt = 2; 6206 goto out; 6207 } 6208 6209 path->slots[0]--; 6210 6211 leaf = path->nodes[0]; 6212 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 6213 6214 if (found_key.objectid != btrfs_ino(inode) || 6215 found_key.type != BTRFS_DIR_INDEX_KEY) { 6216 inode->index_cnt = 2; 6217 goto out; 6218 } 6219 6220 inode->index_cnt = found_key.offset + 1; 6221 out: 6222 btrfs_free_path(path); 6223 return ret; 6224 } 6225 6226 /* 6227 * helper to find a free sequence number in a given directory. This current 6228 * code is very simple, later versions will do smarter things in the btree 6229 */ 6230 int btrfs_set_inode_index(struct btrfs_inode *dir, u64 *index) 6231 { 6232 int ret = 0; 6233 6234 if (dir->index_cnt == (u64)-1) { 6235 ret = btrfs_inode_delayed_dir_index_count(dir); 6236 if (ret) { 6237 ret = btrfs_set_inode_index_count(dir); 6238 if (ret) 6239 return ret; 6240 } 6241 } 6242 6243 *index = dir->index_cnt; 6244 dir->index_cnt++; 6245 6246 return ret; 6247 } 6248 6249 static int btrfs_insert_inode_locked(struct inode *inode) 6250 { 6251 struct btrfs_iget_args args; 6252 args.location = &BTRFS_I(inode)->location; 6253 args.root = BTRFS_I(inode)->root; 6254 6255 return insert_inode_locked4(inode, 6256 btrfs_inode_hash(inode->i_ino, BTRFS_I(inode)->root), 6257 btrfs_find_actor, &args); 6258 } 6259 6260 /* 6261 * Inherit flags from the parent inode. 6262 * 6263 * Currently only the compression flags and the cow flags are inherited. 6264 */ 6265 static void btrfs_inherit_iflags(struct inode *inode, struct inode *dir) 6266 { 6267 unsigned int flags; 6268 6269 if (!dir) 6270 return; 6271 6272 flags = BTRFS_I(dir)->flags; 6273 6274 if (flags & BTRFS_INODE_NOCOMPRESS) { 6275 BTRFS_I(inode)->flags &= ~BTRFS_INODE_COMPRESS; 6276 BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS; 6277 } else if (flags & BTRFS_INODE_COMPRESS) { 6278 BTRFS_I(inode)->flags &= ~BTRFS_INODE_NOCOMPRESS; 6279 BTRFS_I(inode)->flags |= BTRFS_INODE_COMPRESS; 6280 } 6281 6282 if (flags & BTRFS_INODE_NODATACOW) { 6283 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW; 6284 if (S_ISREG(inode->i_mode)) 6285 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM; 6286 } 6287 6288 btrfs_sync_inode_flags_to_i_flags(inode); 6289 } 6290 6291 static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, 6292 struct btrfs_root *root, 6293 struct inode *dir, 6294 const char *name, int name_len, 6295 u64 ref_objectid, u64 objectid, 6296 umode_t mode, u64 *index) 6297 { 6298 struct btrfs_fs_info *fs_info = root->fs_info; 6299 struct inode *inode; 6300 struct btrfs_inode_item *inode_item; 6301 struct btrfs_key *location; 6302 struct btrfs_path *path; 6303 struct btrfs_inode_ref *ref; 6304 struct btrfs_key key[2]; 6305 u32 sizes[2]; 6306 int nitems = name ? 2 : 1; 6307 unsigned long ptr; 6308 int ret; 6309 6310 path = btrfs_alloc_path(); 6311 if (!path) 6312 return ERR_PTR(-ENOMEM); 6313 6314 inode = new_inode(fs_info->sb); 6315 if (!inode) { 6316 btrfs_free_path(path); 6317 return ERR_PTR(-ENOMEM); 6318 } 6319 6320 /* 6321 * O_TMPFILE, set link count to 0, so that after this point, 6322 * we fill in an inode item with the correct link count. 6323 */ 6324 if (!name) 6325 set_nlink(inode, 0); 6326 6327 /* 6328 * we have to initialize this early, so we can reclaim the inode 6329 * number if we fail afterwards in this function. 6330 */ 6331 inode->i_ino = objectid; 6332 6333 if (dir && name) { 6334 trace_btrfs_inode_request(dir); 6335 6336 ret = btrfs_set_inode_index(BTRFS_I(dir), index); 6337 if (ret) { 6338 btrfs_free_path(path); 6339 iput(inode); 6340 return ERR_PTR(ret); 6341 } 6342 } else if (dir) { 6343 *index = 0; 6344 } 6345 /* 6346 * index_cnt is ignored for everything but a dir, 6347 * btrfs_set_inode_index_count has an explanation for the magic 6348 * number 6349 */ 6350 BTRFS_I(inode)->index_cnt = 2; 6351 BTRFS_I(inode)->dir_index = *index; 6352 BTRFS_I(inode)->root = root; 6353 BTRFS_I(inode)->generation = trans->transid; 6354 inode->i_generation = BTRFS_I(inode)->generation; 6355 6356 /* 6357 * We could have gotten an inode number from somebody who was fsynced 6358 * and then removed in this same transaction, so let's just set full 6359 * sync since it will be a full sync anyway and this will blow away the 6360 * old info in the log. 6361 */ 6362 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags); 6363 6364 key[0].objectid = objectid; 6365 key[0].type = BTRFS_INODE_ITEM_KEY; 6366 key[0].offset = 0; 6367 6368 sizes[0] = sizeof(struct btrfs_inode_item); 6369 6370 if (name) { 6371 /* 6372 * Start new inodes with an inode_ref. This is slightly more 6373 * efficient for small numbers of hard links since they will 6374 * be packed into one item. Extended refs will kick in if we 6375 * add more hard links than can fit in the ref item. 6376 */ 6377 key[1].objectid = objectid; 6378 key[1].type = BTRFS_INODE_REF_KEY; 6379 key[1].offset = ref_objectid; 6380 6381 sizes[1] = name_len + sizeof(*ref); 6382 } 6383 6384 location = &BTRFS_I(inode)->location; 6385 location->objectid = objectid; 6386 location->offset = 0; 6387 location->type = BTRFS_INODE_ITEM_KEY; 6388 6389 ret = btrfs_insert_inode_locked(inode); 6390 if (ret < 0) { 6391 iput(inode); 6392 goto fail; 6393 } 6394 6395 path->leave_spinning = 1; 6396 ret = btrfs_insert_empty_items(trans, root, path, key, sizes, nitems); 6397 if (ret != 0) 6398 goto fail_unlock; 6399 6400 inode_init_owner(inode, dir, mode); 6401 inode_set_bytes(inode, 0); 6402 6403 inode->i_mtime = current_time(inode); 6404 inode->i_atime = inode->i_mtime; 6405 inode->i_ctime = inode->i_mtime; 6406 BTRFS_I(inode)->i_otime = inode->i_mtime; 6407 6408 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0], 6409 struct btrfs_inode_item); 6410 memzero_extent_buffer(path->nodes[0], (unsigned long)inode_item, 6411 sizeof(*inode_item)); 6412 fill_inode_item(trans, path->nodes[0], inode_item, inode); 6413 6414 if (name) { 6415 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1, 6416 struct btrfs_inode_ref); 6417 btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len); 6418 btrfs_set_inode_ref_index(path->nodes[0], ref, *index); 6419 ptr = (unsigned long)(ref + 1); 6420 write_extent_buffer(path->nodes[0], name, ptr, name_len); 6421 } 6422 6423 btrfs_mark_buffer_dirty(path->nodes[0]); 6424 btrfs_free_path(path); 6425 6426 btrfs_inherit_iflags(inode, dir); 6427 6428 if (S_ISREG(mode)) { 6429 if (btrfs_test_opt(fs_info, NODATASUM)) 6430 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM; 6431 if (btrfs_test_opt(fs_info, NODATACOW)) 6432 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW | 6433 BTRFS_INODE_NODATASUM; 6434 } 6435 6436 inode_tree_add(inode); 6437 6438 trace_btrfs_inode_new(inode); 6439 btrfs_set_inode_last_trans(trans, inode); 6440 6441 btrfs_update_root_times(trans, root); 6442 6443 ret = btrfs_inode_inherit_props(trans, inode, dir); 6444 if (ret) 6445 btrfs_err(fs_info, 6446 "error inheriting props for ino %llu (root %llu): %d", 6447 btrfs_ino(BTRFS_I(inode)), root->root_key.objectid, ret); 6448 6449 return inode; 6450 6451 fail_unlock: 6452 discard_new_inode(inode); 6453 fail: 6454 if (dir && name) 6455 BTRFS_I(dir)->index_cnt--; 6456 btrfs_free_path(path); 6457 return ERR_PTR(ret); 6458 } 6459 6460 /* 6461 * utility function to add 'inode' into 'parent_inode' with 6462 * a give name and a given sequence number. 6463 * if 'add_backref' is true, also insert a backref from the 6464 * inode to the parent directory. 6465 */ 6466 int btrfs_add_link(struct btrfs_trans_handle *trans, 6467 struct btrfs_inode *parent_inode, struct btrfs_inode *inode, 6468 const char *name, int name_len, int add_backref, u64 index) 6469 { 6470 int ret = 0; 6471 struct btrfs_key key; 6472 struct btrfs_root *root = parent_inode->root; 6473 u64 ino = btrfs_ino(inode); 6474 u64 parent_ino = btrfs_ino(parent_inode); 6475 6476 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { 6477 memcpy(&key, &inode->root->root_key, sizeof(key)); 6478 } else { 6479 key.objectid = ino; 6480 key.type = BTRFS_INODE_ITEM_KEY; 6481 key.offset = 0; 6482 } 6483 6484 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { 6485 ret = btrfs_add_root_ref(trans, key.objectid, 6486 root->root_key.objectid, parent_ino, 6487 index, name, name_len); 6488 } else if (add_backref) { 6489 ret = btrfs_insert_inode_ref(trans, root, name, name_len, ino, 6490 parent_ino, index); 6491 } 6492 6493 /* Nothing to clean up yet */ 6494 if (ret) 6495 return ret; 6496 6497 ret = btrfs_insert_dir_item(trans, name, name_len, parent_inode, &key, 6498 btrfs_inode_type(&inode->vfs_inode), index); 6499 if (ret == -EEXIST || ret == -EOVERFLOW) 6500 goto fail_dir_item; 6501 else if (ret) { 6502 btrfs_abort_transaction(trans, ret); 6503 return ret; 6504 } 6505 6506 btrfs_i_size_write(parent_inode, parent_inode->vfs_inode.i_size + 6507 name_len * 2); 6508 inode_inc_iversion(&parent_inode->vfs_inode); 6509 /* 6510 * If we are replaying a log tree, we do not want to update the mtime 6511 * and ctime of the parent directory with the current time, since the 6512 * log replay procedure is responsible for setting them to their correct 6513 * values (the ones it had when the fsync was done). 6514 */ 6515 if (!test_bit(BTRFS_FS_LOG_RECOVERING, &root->fs_info->flags)) { 6516 struct timespec64 now = current_time(&parent_inode->vfs_inode); 6517 6518 parent_inode->vfs_inode.i_mtime = now; 6519 parent_inode->vfs_inode.i_ctime = now; 6520 } 6521 ret = btrfs_update_inode(trans, root, &parent_inode->vfs_inode); 6522 if (ret) 6523 btrfs_abort_transaction(trans, ret); 6524 return ret; 6525 6526 fail_dir_item: 6527 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { 6528 u64 local_index; 6529 int err; 6530 err = btrfs_del_root_ref(trans, key.objectid, 6531 root->root_key.objectid, parent_ino, 6532 &local_index, name, name_len); 6533 if (err) 6534 btrfs_abort_transaction(trans, err); 6535 } else if (add_backref) { 6536 u64 local_index; 6537 int err; 6538 6539 err = btrfs_del_inode_ref(trans, root, name, name_len, 6540 ino, parent_ino, &local_index); 6541 if (err) 6542 btrfs_abort_transaction(trans, err); 6543 } 6544 6545 /* Return the original error code */ 6546 return ret; 6547 } 6548 6549 static int btrfs_add_nondir(struct btrfs_trans_handle *trans, 6550 struct btrfs_inode *dir, struct dentry *dentry, 6551 struct btrfs_inode *inode, int backref, u64 index) 6552 { 6553 int err = btrfs_add_link(trans, dir, inode, 6554 dentry->d_name.name, dentry->d_name.len, 6555 backref, index); 6556 if (err > 0) 6557 err = -EEXIST; 6558 return err; 6559 } 6560 6561 static int btrfs_mknod(struct inode *dir, struct dentry *dentry, 6562 umode_t mode, dev_t rdev) 6563 { 6564 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); 6565 struct btrfs_trans_handle *trans; 6566 struct btrfs_root *root = BTRFS_I(dir)->root; 6567 struct inode *inode = NULL; 6568 int err; 6569 u64 objectid; 6570 u64 index = 0; 6571 6572 /* 6573 * 2 for inode item and ref 6574 * 2 for dir items 6575 * 1 for xattr if selinux is on 6576 */ 6577 trans = btrfs_start_transaction(root, 5); 6578 if (IS_ERR(trans)) 6579 return PTR_ERR(trans); 6580 6581 err = btrfs_find_free_ino(root, &objectid); 6582 if (err) 6583 goto out_unlock; 6584 6585 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, 6586 dentry->d_name.len, btrfs_ino(BTRFS_I(dir)), objectid, 6587 mode, &index); 6588 if (IS_ERR(inode)) { 6589 err = PTR_ERR(inode); 6590 inode = NULL; 6591 goto out_unlock; 6592 } 6593 6594 /* 6595 * If the active LSM wants to access the inode during 6596 * d_instantiate it needs these. Smack checks to see 6597 * if the filesystem supports xattrs by looking at the 6598 * ops vector. 6599 */ 6600 inode->i_op = &btrfs_special_inode_operations; 6601 init_special_inode(inode, inode->i_mode, rdev); 6602 6603 err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); 6604 if (err) 6605 goto out_unlock; 6606 6607 err = btrfs_add_nondir(trans, BTRFS_I(dir), dentry, BTRFS_I(inode), 6608 0, index); 6609 if (err) 6610 goto out_unlock; 6611 6612 btrfs_update_inode(trans, root, inode); 6613 d_instantiate_new(dentry, inode); 6614 6615 out_unlock: 6616 btrfs_end_transaction(trans); 6617 btrfs_btree_balance_dirty(fs_info); 6618 if (err && inode) { 6619 inode_dec_link_count(inode); 6620 discard_new_inode(inode); 6621 } 6622 return err; 6623 } 6624 6625 static int btrfs_create(struct inode *dir, struct dentry *dentry, 6626 umode_t mode, bool excl) 6627 { 6628 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); 6629 struct btrfs_trans_handle *trans; 6630 struct btrfs_root *root = BTRFS_I(dir)->root; 6631 struct inode *inode = NULL; 6632 int err; 6633 u64 objectid; 6634 u64 index = 0; 6635 6636 /* 6637 * 2 for inode item and ref 6638 * 2 for dir items 6639 * 1 for xattr if selinux is on 6640 */ 6641 trans = btrfs_start_transaction(root, 5); 6642 if (IS_ERR(trans)) 6643 return PTR_ERR(trans); 6644 6645 err = btrfs_find_free_ino(root, &objectid); 6646 if (err) 6647 goto out_unlock; 6648 6649 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, 6650 dentry->d_name.len, btrfs_ino(BTRFS_I(dir)), objectid, 6651 mode, &index); 6652 if (IS_ERR(inode)) { 6653 err = PTR_ERR(inode); 6654 inode = NULL; 6655 goto out_unlock; 6656 } 6657 /* 6658 * If the active LSM wants to access the inode during 6659 * d_instantiate it needs these. Smack checks to see 6660 * if the filesystem supports xattrs by looking at the 6661 * ops vector. 6662 */ 6663 inode->i_fop = &btrfs_file_operations; 6664 inode->i_op = &btrfs_file_inode_operations; 6665 inode->i_mapping->a_ops = &btrfs_aops; 6666 6667 err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); 6668 if (err) 6669 goto out_unlock; 6670 6671 err = btrfs_update_inode(trans, root, inode); 6672 if (err) 6673 goto out_unlock; 6674 6675 err = btrfs_add_nondir(trans, BTRFS_I(dir), dentry, BTRFS_I(inode), 6676 0, index); 6677 if (err) 6678 goto out_unlock; 6679 6680 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; 6681 d_instantiate_new(dentry, inode); 6682 6683 out_unlock: 6684 btrfs_end_transaction(trans); 6685 if (err && inode) { 6686 inode_dec_link_count(inode); 6687 discard_new_inode(inode); 6688 } 6689 btrfs_btree_balance_dirty(fs_info); 6690 return err; 6691 } 6692 6693 static int btrfs_link(struct dentry *old_dentry, struct inode *dir, 6694 struct dentry *dentry) 6695 { 6696 struct btrfs_trans_handle *trans = NULL; 6697 struct btrfs_root *root = BTRFS_I(dir)->root; 6698 struct inode *inode = d_inode(old_dentry); 6699 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 6700 u64 index; 6701 int err; 6702 int drop_inode = 0; 6703 6704 /* do not allow sys_link's with other subvols of the same device */ 6705 if (root->root_key.objectid != BTRFS_I(inode)->root->root_key.objectid) 6706 return -EXDEV; 6707 6708 if (inode->i_nlink >= BTRFS_LINK_MAX) 6709 return -EMLINK; 6710 6711 err = btrfs_set_inode_index(BTRFS_I(dir), &index); 6712 if (err) 6713 goto fail; 6714 6715 /* 6716 * 2 items for inode and inode ref 6717 * 2 items for dir items 6718 * 1 item for parent inode 6719 * 1 item for orphan item deletion if O_TMPFILE 6720 */ 6721 trans = btrfs_start_transaction(root, inode->i_nlink ? 5 : 6); 6722 if (IS_ERR(trans)) { 6723 err = PTR_ERR(trans); 6724 trans = NULL; 6725 goto fail; 6726 } 6727 6728 /* There are several dir indexes for this inode, clear the cache. */ 6729 BTRFS_I(inode)->dir_index = 0ULL; 6730 inc_nlink(inode); 6731 inode_inc_iversion(inode); 6732 inode->i_ctime = current_time(inode); 6733 ihold(inode); 6734 set_bit(BTRFS_INODE_COPY_EVERYTHING, &BTRFS_I(inode)->runtime_flags); 6735 6736 err = btrfs_add_nondir(trans, BTRFS_I(dir), dentry, BTRFS_I(inode), 6737 1, index); 6738 6739 if (err) { 6740 drop_inode = 1; 6741 } else { 6742 struct dentry *parent = dentry->d_parent; 6743 int ret; 6744 6745 err = btrfs_update_inode(trans, root, inode); 6746 if (err) 6747 goto fail; 6748 if (inode->i_nlink == 1) { 6749 /* 6750 * If new hard link count is 1, it's a file created 6751 * with open(2) O_TMPFILE flag. 6752 */ 6753 err = btrfs_orphan_del(trans, BTRFS_I(inode)); 6754 if (err) 6755 goto fail; 6756 } 6757 d_instantiate(dentry, inode); 6758 ret = btrfs_log_new_name(trans, BTRFS_I(inode), NULL, parent, 6759 true, NULL); 6760 if (ret == BTRFS_NEED_TRANS_COMMIT) { 6761 err = btrfs_commit_transaction(trans); 6762 trans = NULL; 6763 } 6764 } 6765 6766 fail: 6767 if (trans) 6768 btrfs_end_transaction(trans); 6769 if (drop_inode) { 6770 inode_dec_link_count(inode); 6771 iput(inode); 6772 } 6773 btrfs_btree_balance_dirty(fs_info); 6774 return err; 6775 } 6776 6777 static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) 6778 { 6779 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); 6780 struct inode *inode = NULL; 6781 struct btrfs_trans_handle *trans; 6782 struct btrfs_root *root = BTRFS_I(dir)->root; 6783 int err = 0; 6784 u64 objectid = 0; 6785 u64 index = 0; 6786 6787 /* 6788 * 2 items for inode and ref 6789 * 2 items for dir items 6790 * 1 for xattr if selinux is on 6791 */ 6792 trans = btrfs_start_transaction(root, 5); 6793 if (IS_ERR(trans)) 6794 return PTR_ERR(trans); 6795 6796 err = btrfs_find_free_ino(root, &objectid); 6797 if (err) 6798 goto out_fail; 6799 6800 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, 6801 dentry->d_name.len, btrfs_ino(BTRFS_I(dir)), objectid, 6802 S_IFDIR | mode, &index); 6803 if (IS_ERR(inode)) { 6804 err = PTR_ERR(inode); 6805 inode = NULL; 6806 goto out_fail; 6807 } 6808 6809 /* these must be set before we unlock the inode */ 6810 inode->i_op = &btrfs_dir_inode_operations; 6811 inode->i_fop = &btrfs_dir_file_operations; 6812 6813 err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); 6814 if (err) 6815 goto out_fail; 6816 6817 btrfs_i_size_write(BTRFS_I(inode), 0); 6818 err = btrfs_update_inode(trans, root, inode); 6819 if (err) 6820 goto out_fail; 6821 6822 err = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), 6823 dentry->d_name.name, 6824 dentry->d_name.len, 0, index); 6825 if (err) 6826 goto out_fail; 6827 6828 d_instantiate_new(dentry, inode); 6829 6830 out_fail: 6831 btrfs_end_transaction(trans); 6832 if (err && inode) { 6833 inode_dec_link_count(inode); 6834 discard_new_inode(inode); 6835 } 6836 btrfs_btree_balance_dirty(fs_info); 6837 return err; 6838 } 6839 6840 static noinline int uncompress_inline(struct btrfs_path *path, 6841 struct page *page, 6842 size_t pg_offset, u64 extent_offset, 6843 struct btrfs_file_extent_item *item) 6844 { 6845 int ret; 6846 struct extent_buffer *leaf = path->nodes[0]; 6847 char *tmp; 6848 size_t max_size; 6849 unsigned long inline_size; 6850 unsigned long ptr; 6851 int compress_type; 6852 6853 WARN_ON(pg_offset != 0); 6854 compress_type = btrfs_file_extent_compression(leaf, item); 6855 max_size = btrfs_file_extent_ram_bytes(leaf, item); 6856 inline_size = btrfs_file_extent_inline_item_len(leaf, 6857 btrfs_item_nr(path->slots[0])); 6858 tmp = kmalloc(inline_size, GFP_NOFS); 6859 if (!tmp) 6860 return -ENOMEM; 6861 ptr = btrfs_file_extent_inline_start(item); 6862 6863 read_extent_buffer(leaf, tmp, ptr, inline_size); 6864 6865 max_size = min_t(unsigned long, PAGE_SIZE, max_size); 6866 ret = btrfs_decompress(compress_type, tmp, page, 6867 extent_offset, inline_size, max_size); 6868 6869 /* 6870 * decompression code contains a memset to fill in any space between the end 6871 * of the uncompressed data and the end of max_size in case the decompressed 6872 * data ends up shorter than ram_bytes. That doesn't cover the hole between 6873 * the end of an inline extent and the beginning of the next block, so we 6874 * cover that region here. 6875 */ 6876 6877 if (max_size + pg_offset < PAGE_SIZE) { 6878 char *map = kmap(page); 6879 memset(map + pg_offset + max_size, 0, PAGE_SIZE - max_size - pg_offset); 6880 kunmap(page); 6881 } 6882 kfree(tmp); 6883 return ret; 6884 } 6885 6886 /* 6887 * a bit scary, this does extent mapping from logical file offset to the disk. 6888 * the ugly parts come from merging extents from the disk with the in-ram 6889 * representation. This gets more complex because of the data=ordered code, 6890 * where the in-ram extents might be locked pending data=ordered completion. 6891 * 6892 * This also copies inline extents directly into the page. 6893 */ 6894 struct extent_map *btrfs_get_extent(struct btrfs_inode *inode, 6895 struct page *page, 6896 size_t pg_offset, u64 start, u64 len, 6897 int create) 6898 { 6899 struct btrfs_fs_info *fs_info = inode->root->fs_info; 6900 int ret; 6901 int err = 0; 6902 u64 extent_start = 0; 6903 u64 extent_end = 0; 6904 u64 objectid = btrfs_ino(inode); 6905 int extent_type = -1; 6906 struct btrfs_path *path = NULL; 6907 struct btrfs_root *root = inode->root; 6908 struct btrfs_file_extent_item *item; 6909 struct extent_buffer *leaf; 6910 struct btrfs_key found_key; 6911 struct extent_map *em = NULL; 6912 struct extent_map_tree *em_tree = &inode->extent_tree; 6913 struct extent_io_tree *io_tree = &inode->io_tree; 6914 const bool new_inline = !page || create; 6915 6916 read_lock(&em_tree->lock); 6917 em = lookup_extent_mapping(em_tree, start, len); 6918 if (em) 6919 em->bdev = fs_info->fs_devices->latest_bdev; 6920 read_unlock(&em_tree->lock); 6921 6922 if (em) { 6923 if (em->start > start || em->start + em->len <= start) 6924 free_extent_map(em); 6925 else if (em->block_start == EXTENT_MAP_INLINE && page) 6926 free_extent_map(em); 6927 else 6928 goto out; 6929 } 6930 em = alloc_extent_map(); 6931 if (!em) { 6932 err = -ENOMEM; 6933 goto out; 6934 } 6935 em->bdev = fs_info->fs_devices->latest_bdev; 6936 em->start = EXTENT_MAP_HOLE; 6937 em->orig_start = EXTENT_MAP_HOLE; 6938 em->len = (u64)-1; 6939 em->block_len = (u64)-1; 6940 6941 path = btrfs_alloc_path(); 6942 if (!path) { 6943 err = -ENOMEM; 6944 goto out; 6945 } 6946 6947 /* Chances are we'll be called again, so go ahead and do readahead */ 6948 path->reada = READA_FORWARD; 6949 6950 /* 6951 * Unless we're going to uncompress the inline extent, no sleep would 6952 * happen. 6953 */ 6954 path->leave_spinning = 1; 6955 6956 ret = btrfs_lookup_file_extent(NULL, root, path, objectid, start, 0); 6957 if (ret < 0) { 6958 err = ret; 6959 goto out; 6960 } else if (ret > 0) { 6961 if (path->slots[0] == 0) 6962 goto not_found; 6963 path->slots[0]--; 6964 } 6965 6966 leaf = path->nodes[0]; 6967 item = btrfs_item_ptr(leaf, path->slots[0], 6968 struct btrfs_file_extent_item); 6969 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 6970 if (found_key.objectid != objectid || 6971 found_key.type != BTRFS_EXTENT_DATA_KEY) { 6972 /* 6973 * If we backup past the first extent we want to move forward 6974 * and see if there is an extent in front of us, otherwise we'll 6975 * say there is a hole for our whole search range which can 6976 * cause problems. 6977 */ 6978 extent_end = start; 6979 goto next; 6980 } 6981 6982 extent_type = btrfs_file_extent_type(leaf, item); 6983 extent_start = found_key.offset; 6984 if (extent_type == BTRFS_FILE_EXTENT_REG || 6985 extent_type == BTRFS_FILE_EXTENT_PREALLOC) { 6986 /* Only regular file could have regular/prealloc extent */ 6987 if (!S_ISREG(inode->vfs_inode.i_mode)) { 6988 ret = -EUCLEAN; 6989 btrfs_crit(fs_info, 6990 "regular/prealloc extent found for non-regular inode %llu", 6991 btrfs_ino(inode)); 6992 goto out; 6993 } 6994 extent_end = extent_start + 6995 btrfs_file_extent_num_bytes(leaf, item); 6996 6997 trace_btrfs_get_extent_show_fi_regular(inode, leaf, item, 6998 extent_start); 6999 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 7000 size_t size; 7001 7002 size = btrfs_file_extent_ram_bytes(leaf, item); 7003 extent_end = ALIGN(extent_start + size, 7004 fs_info->sectorsize); 7005 7006 trace_btrfs_get_extent_show_fi_inline(inode, leaf, item, 7007 path->slots[0], 7008 extent_start); 7009 } 7010 next: 7011 if (start >= extent_end) { 7012 path->slots[0]++; 7013 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 7014 ret = btrfs_next_leaf(root, path); 7015 if (ret < 0) { 7016 err = ret; 7017 goto out; 7018 } else if (ret > 0) { 7019 goto not_found; 7020 } 7021 leaf = path->nodes[0]; 7022 } 7023 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 7024 if (found_key.objectid != objectid || 7025 found_key.type != BTRFS_EXTENT_DATA_KEY) 7026 goto not_found; 7027 if (start + len <= found_key.offset) 7028 goto not_found; 7029 if (start > found_key.offset) 7030 goto next; 7031 7032 /* New extent overlaps with existing one */ 7033 em->start = start; 7034 em->orig_start = start; 7035 em->len = found_key.offset - start; 7036 em->block_start = EXTENT_MAP_HOLE; 7037 goto insert; 7038 } 7039 7040 btrfs_extent_item_to_extent_map(inode, path, item, 7041 new_inline, em); 7042 7043 if (extent_type == BTRFS_FILE_EXTENT_REG || 7044 extent_type == BTRFS_FILE_EXTENT_PREALLOC) { 7045 goto insert; 7046 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 7047 unsigned long ptr; 7048 char *map; 7049 size_t size; 7050 size_t extent_offset; 7051 size_t copy_size; 7052 7053 if (new_inline) 7054 goto out; 7055 7056 size = btrfs_file_extent_ram_bytes(leaf, item); 7057 extent_offset = page_offset(page) + pg_offset - extent_start; 7058 copy_size = min_t(u64, PAGE_SIZE - pg_offset, 7059 size - extent_offset); 7060 em->start = extent_start + extent_offset; 7061 em->len = ALIGN(copy_size, fs_info->sectorsize); 7062 em->orig_block_len = em->len; 7063 em->orig_start = em->start; 7064 ptr = btrfs_file_extent_inline_start(item) + extent_offset; 7065 7066 btrfs_set_path_blocking(path); 7067 if (!PageUptodate(page)) { 7068 if (btrfs_file_extent_compression(leaf, item) != 7069 BTRFS_COMPRESS_NONE) { 7070 ret = uncompress_inline(path, page, pg_offset, 7071 extent_offset, item); 7072 if (ret) { 7073 err = ret; 7074 goto out; 7075 } 7076 } else { 7077 map = kmap(page); 7078 read_extent_buffer(leaf, map + pg_offset, ptr, 7079 copy_size); 7080 if (pg_offset + copy_size < PAGE_SIZE) { 7081 memset(map + pg_offset + copy_size, 0, 7082 PAGE_SIZE - pg_offset - 7083 copy_size); 7084 } 7085 kunmap(page); 7086 } 7087 flush_dcache_page(page); 7088 } 7089 set_extent_uptodate(io_tree, em->start, 7090 extent_map_end(em) - 1, NULL, GFP_NOFS); 7091 goto insert; 7092 } 7093 not_found: 7094 em->start = start; 7095 em->orig_start = start; 7096 em->len = len; 7097 em->block_start = EXTENT_MAP_HOLE; 7098 insert: 7099 btrfs_release_path(path); 7100 if (em->start > start || extent_map_end(em) <= start) { 7101 btrfs_err(fs_info, 7102 "bad extent! em: [%llu %llu] passed [%llu %llu]", 7103 em->start, em->len, start, len); 7104 err = -EIO; 7105 goto out; 7106 } 7107 7108 err = 0; 7109 write_lock(&em_tree->lock); 7110 err = btrfs_add_extent_mapping(fs_info, em_tree, &em, start, len); 7111 write_unlock(&em_tree->lock); 7112 out: 7113 btrfs_free_path(path); 7114 7115 trace_btrfs_get_extent(root, inode, em); 7116 7117 if (err) { 7118 free_extent_map(em); 7119 return ERR_PTR(err); 7120 } 7121 BUG_ON(!em); /* Error is always set */ 7122 return em; 7123 } 7124 7125 struct extent_map *btrfs_get_extent_fiemap(struct btrfs_inode *inode, 7126 u64 start, u64 len) 7127 { 7128 struct extent_map *em; 7129 struct extent_map *hole_em = NULL; 7130 u64 delalloc_start = start; 7131 u64 end; 7132 u64 delalloc_len; 7133 u64 delalloc_end; 7134 int err = 0; 7135 7136 em = btrfs_get_extent(inode, NULL, 0, start, len, 0); 7137 if (IS_ERR(em)) 7138 return em; 7139 /* 7140 * If our em maps to: 7141 * - a hole or 7142 * - a pre-alloc extent, 7143 * there might actually be delalloc bytes behind it. 7144 */ 7145 if (em->block_start != EXTENT_MAP_HOLE && 7146 !test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) 7147 return em; 7148 else 7149 hole_em = em; 7150 7151 /* check to see if we've wrapped (len == -1 or similar) */ 7152 end = start + len; 7153 if (end < start) 7154 end = (u64)-1; 7155 else 7156 end -= 1; 7157 7158 em = NULL; 7159 7160 /* ok, we didn't find anything, lets look for delalloc */ 7161 delalloc_len = count_range_bits(&inode->io_tree, &delalloc_start, 7162 end, len, EXTENT_DELALLOC, 1); 7163 delalloc_end = delalloc_start + delalloc_len; 7164 if (delalloc_end < delalloc_start) 7165 delalloc_end = (u64)-1; 7166 7167 /* 7168 * We didn't find anything useful, return the original results from 7169 * get_extent() 7170 */ 7171 if (delalloc_start > end || delalloc_end <= start) { 7172 em = hole_em; 7173 hole_em = NULL; 7174 goto out; 7175 } 7176 7177 /* 7178 * Adjust the delalloc_start to make sure it doesn't go backwards from 7179 * the start they passed in 7180 */ 7181 delalloc_start = max(start, delalloc_start); 7182 delalloc_len = delalloc_end - delalloc_start; 7183 7184 if (delalloc_len > 0) { 7185 u64 hole_start; 7186 u64 hole_len; 7187 const u64 hole_end = extent_map_end(hole_em); 7188 7189 em = alloc_extent_map(); 7190 if (!em) { 7191 err = -ENOMEM; 7192 goto out; 7193 } 7194 em->bdev = NULL; 7195 7196 ASSERT(hole_em); 7197 /* 7198 * When btrfs_get_extent can't find anything it returns one 7199 * huge hole 7200 * 7201 * Make sure what it found really fits our range, and adjust to 7202 * make sure it is based on the start from the caller 7203 */ 7204 if (hole_end <= start || hole_em->start > end) { 7205 free_extent_map(hole_em); 7206 hole_em = NULL; 7207 } else { 7208 hole_start = max(hole_em->start, start); 7209 hole_len = hole_end - hole_start; 7210 } 7211 7212 if (hole_em && delalloc_start > hole_start) { 7213 /* 7214 * Our hole starts before our delalloc, so we have to 7215 * return just the parts of the hole that go until the 7216 * delalloc starts 7217 */ 7218 em->len = min(hole_len, delalloc_start - hole_start); 7219 em->start = hole_start; 7220 em->orig_start = hole_start; 7221 /* 7222 * Don't adjust block start at all, it is fixed at 7223 * EXTENT_MAP_HOLE 7224 */ 7225 em->block_start = hole_em->block_start; 7226 em->block_len = hole_len; 7227 if (test_bit(EXTENT_FLAG_PREALLOC, &hole_em->flags)) 7228 set_bit(EXTENT_FLAG_PREALLOC, &em->flags); 7229 } else { 7230 /* 7231 * Hole is out of passed range or it starts after 7232 * delalloc range 7233 */ 7234 em->start = delalloc_start; 7235 em->len = delalloc_len; 7236 em->orig_start = delalloc_start; 7237 em->block_start = EXTENT_MAP_DELALLOC; 7238 em->block_len = delalloc_len; 7239 } 7240 } else { 7241 return hole_em; 7242 } 7243 out: 7244 7245 free_extent_map(hole_em); 7246 if (err) { 7247 free_extent_map(em); 7248 return ERR_PTR(err); 7249 } 7250 return em; 7251 } 7252 7253 static struct extent_map *btrfs_create_dio_extent(struct inode *inode, 7254 const u64 start, 7255 const u64 len, 7256 const u64 orig_start, 7257 const u64 block_start, 7258 const u64 block_len, 7259 const u64 orig_block_len, 7260 const u64 ram_bytes, 7261 const int type) 7262 { 7263 struct extent_map *em = NULL; 7264 int ret; 7265 7266 if (type != BTRFS_ORDERED_NOCOW) { 7267 em = create_io_em(inode, start, len, orig_start, 7268 block_start, block_len, orig_block_len, 7269 ram_bytes, 7270 BTRFS_COMPRESS_NONE, /* compress_type */ 7271 type); 7272 if (IS_ERR(em)) 7273 goto out; 7274 } 7275 ret = btrfs_add_ordered_extent_dio(inode, start, block_start, 7276 len, block_len, type); 7277 if (ret) { 7278 if (em) { 7279 free_extent_map(em); 7280 btrfs_drop_extent_cache(BTRFS_I(inode), start, 7281 start + len - 1, 0); 7282 } 7283 em = ERR_PTR(ret); 7284 } 7285 out: 7286 7287 return em; 7288 } 7289 7290 static struct extent_map *btrfs_new_extent_direct(struct inode *inode, 7291 u64 start, u64 len) 7292 { 7293 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 7294 struct btrfs_root *root = BTRFS_I(inode)->root; 7295 struct extent_map *em; 7296 struct btrfs_key ins; 7297 u64 alloc_hint; 7298 int ret; 7299 7300 alloc_hint = get_extent_allocation_hint(inode, start, len); 7301 ret = btrfs_reserve_extent(root, len, len, fs_info->sectorsize, 7302 0, alloc_hint, &ins, 1, 1); 7303 if (ret) 7304 return ERR_PTR(ret); 7305 7306 em = btrfs_create_dio_extent(inode, start, ins.offset, start, 7307 ins.objectid, ins.offset, ins.offset, 7308 ins.offset, BTRFS_ORDERED_REGULAR); 7309 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 7310 if (IS_ERR(em)) 7311 btrfs_free_reserved_extent(fs_info, ins.objectid, 7312 ins.offset, 1); 7313 7314 return em; 7315 } 7316 7317 /* 7318 * returns 1 when the nocow is safe, < 1 on error, 0 if the 7319 * block must be cow'd 7320 */ 7321 noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len, 7322 u64 *orig_start, u64 *orig_block_len, 7323 u64 *ram_bytes) 7324 { 7325 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 7326 struct btrfs_path *path; 7327 int ret; 7328 struct extent_buffer *leaf; 7329 struct btrfs_root *root = BTRFS_I(inode)->root; 7330 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 7331 struct btrfs_file_extent_item *fi; 7332 struct btrfs_key key; 7333 u64 disk_bytenr; 7334 u64 backref_offset; 7335 u64 extent_end; 7336 u64 num_bytes; 7337 int slot; 7338 int found_type; 7339 bool nocow = (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW); 7340 7341 path = btrfs_alloc_path(); 7342 if (!path) 7343 return -ENOMEM; 7344 7345 ret = btrfs_lookup_file_extent(NULL, root, path, 7346 btrfs_ino(BTRFS_I(inode)), offset, 0); 7347 if (ret < 0) 7348 goto out; 7349 7350 slot = path->slots[0]; 7351 if (ret == 1) { 7352 if (slot == 0) { 7353 /* can't find the item, must cow */ 7354 ret = 0; 7355 goto out; 7356 } 7357 slot--; 7358 } 7359 ret = 0; 7360 leaf = path->nodes[0]; 7361 btrfs_item_key_to_cpu(leaf, &key, slot); 7362 if (key.objectid != btrfs_ino(BTRFS_I(inode)) || 7363 key.type != BTRFS_EXTENT_DATA_KEY) { 7364 /* not our file or wrong item type, must cow */ 7365 goto out; 7366 } 7367 7368 if (key.offset > offset) { 7369 /* Wrong offset, must cow */ 7370 goto out; 7371 } 7372 7373 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); 7374 found_type = btrfs_file_extent_type(leaf, fi); 7375 if (found_type != BTRFS_FILE_EXTENT_REG && 7376 found_type != BTRFS_FILE_EXTENT_PREALLOC) { 7377 /* not a regular extent, must cow */ 7378 goto out; 7379 } 7380 7381 if (!nocow && found_type == BTRFS_FILE_EXTENT_REG) 7382 goto out; 7383 7384 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi); 7385 if (extent_end <= offset) 7386 goto out; 7387 7388 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 7389 if (disk_bytenr == 0) 7390 goto out; 7391 7392 if (btrfs_file_extent_compression(leaf, fi) || 7393 btrfs_file_extent_encryption(leaf, fi) || 7394 btrfs_file_extent_other_encoding(leaf, fi)) 7395 goto out; 7396 7397 /* 7398 * Do the same check as in btrfs_cross_ref_exist but without the 7399 * unnecessary search. 7400 */ 7401 if (btrfs_file_extent_generation(leaf, fi) <= 7402 btrfs_root_last_snapshot(&root->root_item)) 7403 goto out; 7404 7405 backref_offset = btrfs_file_extent_offset(leaf, fi); 7406 7407 if (orig_start) { 7408 *orig_start = key.offset - backref_offset; 7409 *orig_block_len = btrfs_file_extent_disk_num_bytes(leaf, fi); 7410 *ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi); 7411 } 7412 7413 if (btrfs_extent_readonly(fs_info, disk_bytenr)) 7414 goto out; 7415 7416 num_bytes = min(offset + *len, extent_end) - offset; 7417 if (!nocow && found_type == BTRFS_FILE_EXTENT_PREALLOC) { 7418 u64 range_end; 7419 7420 range_end = round_up(offset + num_bytes, 7421 root->fs_info->sectorsize) - 1; 7422 ret = test_range_bit(io_tree, offset, range_end, 7423 EXTENT_DELALLOC, 0, NULL); 7424 if (ret) { 7425 ret = -EAGAIN; 7426 goto out; 7427 } 7428 } 7429 7430 btrfs_release_path(path); 7431 7432 /* 7433 * look for other files referencing this extent, if we 7434 * find any we must cow 7435 */ 7436 7437 ret = btrfs_cross_ref_exist(root, btrfs_ino(BTRFS_I(inode)), 7438 key.offset - backref_offset, disk_bytenr); 7439 if (ret) { 7440 ret = 0; 7441 goto out; 7442 } 7443 7444 /* 7445 * adjust disk_bytenr and num_bytes to cover just the bytes 7446 * in this extent we are about to write. If there 7447 * are any csums in that range we have to cow in order 7448 * to keep the csums correct 7449 */ 7450 disk_bytenr += backref_offset; 7451 disk_bytenr += offset - key.offset; 7452 if (csum_exist_in_range(fs_info, disk_bytenr, num_bytes)) 7453 goto out; 7454 /* 7455 * all of the above have passed, it is safe to overwrite this extent 7456 * without cow 7457 */ 7458 *len = num_bytes; 7459 ret = 1; 7460 out: 7461 btrfs_free_path(path); 7462 return ret; 7463 } 7464 7465 static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend, 7466 struct extent_state **cached_state, int writing) 7467 { 7468 struct btrfs_ordered_extent *ordered; 7469 int ret = 0; 7470 7471 while (1) { 7472 lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, 7473 cached_state); 7474 /* 7475 * We're concerned with the entire range that we're going to be 7476 * doing DIO to, so we need to make sure there's no ordered 7477 * extents in this range. 7478 */ 7479 ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), lockstart, 7480 lockend - lockstart + 1); 7481 7482 /* 7483 * We need to make sure there are no buffered pages in this 7484 * range either, we could have raced between the invalidate in 7485 * generic_file_direct_write and locking the extent. The 7486 * invalidate needs to happen so that reads after a write do not 7487 * get stale data. 7488 */ 7489 if (!ordered && 7490 (!writing || !filemap_range_has_page(inode->i_mapping, 7491 lockstart, lockend))) 7492 break; 7493 7494 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend, 7495 cached_state); 7496 7497 if (ordered) { 7498 /* 7499 * If we are doing a DIO read and the ordered extent we 7500 * found is for a buffered write, we can not wait for it 7501 * to complete and retry, because if we do so we can 7502 * deadlock with concurrent buffered writes on page 7503 * locks. This happens only if our DIO read covers more 7504 * than one extent map, if at this point has already 7505 * created an ordered extent for a previous extent map 7506 * and locked its range in the inode's io tree, and a 7507 * concurrent write against that previous extent map's 7508 * range and this range started (we unlock the ranges 7509 * in the io tree only when the bios complete and 7510 * buffered writes always lock pages before attempting 7511 * to lock range in the io tree). 7512 */ 7513 if (writing || 7514 test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags)) 7515 btrfs_start_ordered_extent(inode, ordered, 1); 7516 else 7517 ret = -ENOTBLK; 7518 btrfs_put_ordered_extent(ordered); 7519 } else { 7520 /* 7521 * We could trigger writeback for this range (and wait 7522 * for it to complete) and then invalidate the pages for 7523 * this range (through invalidate_inode_pages2_range()), 7524 * but that can lead us to a deadlock with a concurrent 7525 * call to readpages() (a buffered read or a defrag call 7526 * triggered a readahead) on a page lock due to an 7527 * ordered dio extent we created before but did not have 7528 * yet a corresponding bio submitted (whence it can not 7529 * complete), which makes readpages() wait for that 7530 * ordered extent to complete while holding a lock on 7531 * that page. 7532 */ 7533 ret = -ENOTBLK; 7534 } 7535 7536 if (ret) 7537 break; 7538 7539 cond_resched(); 7540 } 7541 7542 return ret; 7543 } 7544 7545 /* The callers of this must take lock_extent() */ 7546 static struct extent_map *create_io_em(struct inode *inode, u64 start, u64 len, 7547 u64 orig_start, u64 block_start, 7548 u64 block_len, u64 orig_block_len, 7549 u64 ram_bytes, int compress_type, 7550 int type) 7551 { 7552 struct extent_map_tree *em_tree; 7553 struct extent_map *em; 7554 struct btrfs_root *root = BTRFS_I(inode)->root; 7555 int ret; 7556 7557 ASSERT(type == BTRFS_ORDERED_PREALLOC || 7558 type == BTRFS_ORDERED_COMPRESSED || 7559 type == BTRFS_ORDERED_NOCOW || 7560 type == BTRFS_ORDERED_REGULAR); 7561 7562 em_tree = &BTRFS_I(inode)->extent_tree; 7563 em = alloc_extent_map(); 7564 if (!em) 7565 return ERR_PTR(-ENOMEM); 7566 7567 em->start = start; 7568 em->orig_start = orig_start; 7569 em->len = len; 7570 em->block_len = block_len; 7571 em->block_start = block_start; 7572 em->bdev = root->fs_info->fs_devices->latest_bdev; 7573 em->orig_block_len = orig_block_len; 7574 em->ram_bytes = ram_bytes; 7575 em->generation = -1; 7576 set_bit(EXTENT_FLAG_PINNED, &em->flags); 7577 if (type == BTRFS_ORDERED_PREALLOC) { 7578 set_bit(EXTENT_FLAG_FILLING, &em->flags); 7579 } else if (type == BTRFS_ORDERED_COMPRESSED) { 7580 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); 7581 em->compress_type = compress_type; 7582 } 7583 7584 do { 7585 btrfs_drop_extent_cache(BTRFS_I(inode), em->start, 7586 em->start + em->len - 1, 0); 7587 write_lock(&em_tree->lock); 7588 ret = add_extent_mapping(em_tree, em, 1); 7589 write_unlock(&em_tree->lock); 7590 /* 7591 * The caller has taken lock_extent(), who could race with us 7592 * to add em? 7593 */ 7594 } while (ret == -EEXIST); 7595 7596 if (ret) { 7597 free_extent_map(em); 7598 return ERR_PTR(ret); 7599 } 7600 7601 /* em got 2 refs now, callers needs to do free_extent_map once. */ 7602 return em; 7603 } 7604 7605 7606 static int btrfs_get_blocks_direct_read(struct extent_map *em, 7607 struct buffer_head *bh_result, 7608 struct inode *inode, 7609 u64 start, u64 len) 7610 { 7611 if (em->block_start == EXTENT_MAP_HOLE || 7612 test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) 7613 return -ENOENT; 7614 7615 len = min(len, em->len - (start - em->start)); 7616 7617 bh_result->b_blocknr = (em->block_start + (start - em->start)) >> 7618 inode->i_blkbits; 7619 bh_result->b_size = len; 7620 bh_result->b_bdev = em->bdev; 7621 set_buffer_mapped(bh_result); 7622 7623 return 0; 7624 } 7625 7626 static int btrfs_get_blocks_direct_write(struct extent_map **map, 7627 struct buffer_head *bh_result, 7628 struct inode *inode, 7629 struct btrfs_dio_data *dio_data, 7630 u64 start, u64 len) 7631 { 7632 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 7633 struct extent_map *em = *map; 7634 int ret = 0; 7635 7636 /* 7637 * We don't allocate a new extent in the following cases 7638 * 7639 * 1) The inode is marked as NODATACOW. In this case we'll just use the 7640 * existing extent. 7641 * 2) The extent is marked as PREALLOC. We're good to go here and can 7642 * just use the extent. 7643 * 7644 */ 7645 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) || 7646 ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) && 7647 em->block_start != EXTENT_MAP_HOLE)) { 7648 int type; 7649 u64 block_start, orig_start, orig_block_len, ram_bytes; 7650 7651 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) 7652 type = BTRFS_ORDERED_PREALLOC; 7653 else 7654 type = BTRFS_ORDERED_NOCOW; 7655 len = min(len, em->len - (start - em->start)); 7656 block_start = em->block_start + (start - em->start); 7657 7658 if (can_nocow_extent(inode, start, &len, &orig_start, 7659 &orig_block_len, &ram_bytes) == 1 && 7660 btrfs_inc_nocow_writers(fs_info, block_start)) { 7661 struct extent_map *em2; 7662 7663 em2 = btrfs_create_dio_extent(inode, start, len, 7664 orig_start, block_start, 7665 len, orig_block_len, 7666 ram_bytes, type); 7667 btrfs_dec_nocow_writers(fs_info, block_start); 7668 if (type == BTRFS_ORDERED_PREALLOC) { 7669 free_extent_map(em); 7670 *map = em = em2; 7671 } 7672 7673 if (em2 && IS_ERR(em2)) { 7674 ret = PTR_ERR(em2); 7675 goto out; 7676 } 7677 /* 7678 * For inode marked NODATACOW or extent marked PREALLOC, 7679 * use the existing or preallocated extent, so does not 7680 * need to adjust btrfs_space_info's bytes_may_use. 7681 */ 7682 btrfs_free_reserved_data_space_noquota(inode, start, 7683 len); 7684 goto skip_cow; 7685 } 7686 } 7687 7688 /* this will cow the extent */ 7689 len = bh_result->b_size; 7690 free_extent_map(em); 7691 *map = em = btrfs_new_extent_direct(inode, start, len); 7692 if (IS_ERR(em)) { 7693 ret = PTR_ERR(em); 7694 goto out; 7695 } 7696 7697 len = min(len, em->len - (start - em->start)); 7698 7699 skip_cow: 7700 bh_result->b_blocknr = (em->block_start + (start - em->start)) >> 7701 inode->i_blkbits; 7702 bh_result->b_size = len; 7703 bh_result->b_bdev = em->bdev; 7704 set_buffer_mapped(bh_result); 7705 7706 if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) 7707 set_buffer_new(bh_result); 7708 7709 /* 7710 * Need to update the i_size under the extent lock so buffered 7711 * readers will get the updated i_size when we unlock. 7712 */ 7713 if (!dio_data->overwrite && start + len > i_size_read(inode)) 7714 i_size_write(inode, start + len); 7715 7716 WARN_ON(dio_data->reserve < len); 7717 dio_data->reserve -= len; 7718 dio_data->unsubmitted_oe_range_end = start + len; 7719 current->journal_info = dio_data; 7720 out: 7721 return ret; 7722 } 7723 7724 static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, 7725 struct buffer_head *bh_result, int create) 7726 { 7727 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 7728 struct extent_map *em; 7729 struct extent_state *cached_state = NULL; 7730 struct btrfs_dio_data *dio_data = NULL; 7731 u64 start = iblock << inode->i_blkbits; 7732 u64 lockstart, lockend; 7733 u64 len = bh_result->b_size; 7734 int ret = 0; 7735 7736 if (!create) 7737 len = min_t(u64, len, fs_info->sectorsize); 7738 7739 lockstart = start; 7740 lockend = start + len - 1; 7741 7742 if (current->journal_info) { 7743 /* 7744 * Need to pull our outstanding extents and set journal_info to NULL so 7745 * that anything that needs to check if there's a transaction doesn't get 7746 * confused. 7747 */ 7748 dio_data = current->journal_info; 7749 current->journal_info = NULL; 7750 } 7751 7752 /* 7753 * If this errors out it's because we couldn't invalidate pagecache for 7754 * this range and we need to fallback to buffered. 7755 */ 7756 if (lock_extent_direct(inode, lockstart, lockend, &cached_state, 7757 create)) { 7758 ret = -ENOTBLK; 7759 goto err; 7760 } 7761 7762 em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, len, 0); 7763 if (IS_ERR(em)) { 7764 ret = PTR_ERR(em); 7765 goto unlock_err; 7766 } 7767 7768 /* 7769 * Ok for INLINE and COMPRESSED extents we need to fallback on buffered 7770 * io. INLINE is special, and we could probably kludge it in here, but 7771 * it's still buffered so for safety lets just fall back to the generic 7772 * buffered path. 7773 * 7774 * For COMPRESSED we _have_ to read the entire extent in so we can 7775 * decompress it, so there will be buffering required no matter what we 7776 * do, so go ahead and fallback to buffered. 7777 * 7778 * We return -ENOTBLK because that's what makes DIO go ahead and go back 7779 * to buffered IO. Don't blame me, this is the price we pay for using 7780 * the generic code. 7781 */ 7782 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) || 7783 em->block_start == EXTENT_MAP_INLINE) { 7784 free_extent_map(em); 7785 ret = -ENOTBLK; 7786 goto unlock_err; 7787 } 7788 7789 if (create) { 7790 ret = btrfs_get_blocks_direct_write(&em, bh_result, inode, 7791 dio_data, start, len); 7792 if (ret < 0) 7793 goto unlock_err; 7794 7795 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, 7796 lockend, &cached_state); 7797 } else { 7798 ret = btrfs_get_blocks_direct_read(em, bh_result, inode, 7799 start, len); 7800 /* Can be negative only if we read from a hole */ 7801 if (ret < 0) { 7802 ret = 0; 7803 free_extent_map(em); 7804 goto unlock_err; 7805 } 7806 /* 7807 * We need to unlock only the end area that we aren't using. 7808 * The rest is going to be unlocked by the endio routine. 7809 */ 7810 lockstart = start + bh_result->b_size; 7811 if (lockstart < lockend) { 7812 unlock_extent_cached(&BTRFS_I(inode)->io_tree, 7813 lockstart, lockend, &cached_state); 7814 } else { 7815 free_extent_state(cached_state); 7816 } 7817 } 7818 7819 free_extent_map(em); 7820 7821 return 0; 7822 7823 unlock_err: 7824 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend, 7825 &cached_state); 7826 err: 7827 if (dio_data) 7828 current->journal_info = dio_data; 7829 return ret; 7830 } 7831 7832 static inline blk_status_t submit_dio_repair_bio(struct inode *inode, 7833 struct bio *bio, 7834 int mirror_num) 7835 { 7836 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 7837 blk_status_t ret; 7838 7839 BUG_ON(bio_op(bio) == REQ_OP_WRITE); 7840 7841 ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DIO_REPAIR); 7842 if (ret) 7843 return ret; 7844 7845 ret = btrfs_map_bio(fs_info, bio, mirror_num, 0); 7846 7847 return ret; 7848 } 7849 7850 static int btrfs_check_dio_repairable(struct inode *inode, 7851 struct bio *failed_bio, 7852 struct io_failure_record *failrec, 7853 int failed_mirror) 7854 { 7855 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 7856 int num_copies; 7857 7858 num_copies = btrfs_num_copies(fs_info, failrec->logical, failrec->len); 7859 if (num_copies == 1) { 7860 /* 7861 * we only have a single copy of the data, so don't bother with 7862 * all the retry and error correction code that follows. no 7863 * matter what the error is, it is very likely to persist. 7864 */ 7865 btrfs_debug(fs_info, 7866 "Check DIO Repairable: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d", 7867 num_copies, failrec->this_mirror, failed_mirror); 7868 return 0; 7869 } 7870 7871 failrec->failed_mirror = failed_mirror; 7872 failrec->this_mirror++; 7873 if (failrec->this_mirror == failed_mirror) 7874 failrec->this_mirror++; 7875 7876 if (failrec->this_mirror > num_copies) { 7877 btrfs_debug(fs_info, 7878 "Check DIO Repairable: (fail) num_copies=%d, next_mirror %d, failed_mirror %d", 7879 num_copies, failrec->this_mirror, failed_mirror); 7880 return 0; 7881 } 7882 7883 return 1; 7884 } 7885 7886 static blk_status_t dio_read_error(struct inode *inode, struct bio *failed_bio, 7887 struct page *page, unsigned int pgoff, 7888 u64 start, u64 end, int failed_mirror, 7889 bio_end_io_t *repair_endio, void *repair_arg) 7890 { 7891 struct io_failure_record *failrec; 7892 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 7893 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree; 7894 struct bio *bio; 7895 int isector; 7896 unsigned int read_mode = 0; 7897 int segs; 7898 int ret; 7899 blk_status_t status; 7900 struct bio_vec bvec; 7901 7902 BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE); 7903 7904 ret = btrfs_get_io_failure_record(inode, start, end, &failrec); 7905 if (ret) 7906 return errno_to_blk_status(ret); 7907 7908 ret = btrfs_check_dio_repairable(inode, failed_bio, failrec, 7909 failed_mirror); 7910 if (!ret) { 7911 free_io_failure(failure_tree, io_tree, failrec); 7912 return BLK_STS_IOERR; 7913 } 7914 7915 segs = bio_segments(failed_bio); 7916 bio_get_first_bvec(failed_bio, &bvec); 7917 if (segs > 1 || 7918 (bvec.bv_len > btrfs_inode_sectorsize(inode))) 7919 read_mode |= REQ_FAILFAST_DEV; 7920 7921 isector = start - btrfs_io_bio(failed_bio)->logical; 7922 isector >>= inode->i_sb->s_blocksize_bits; 7923 bio = btrfs_create_repair_bio(inode, failed_bio, failrec, page, 7924 pgoff, isector, repair_endio, repair_arg); 7925 bio->bi_opf = REQ_OP_READ | read_mode; 7926 7927 btrfs_debug(BTRFS_I(inode)->root->fs_info, 7928 "repair DIO read error: submitting new dio read[%#x] to this_mirror=%d, in_validation=%d", 7929 read_mode, failrec->this_mirror, failrec->in_validation); 7930 7931 status = submit_dio_repair_bio(inode, bio, failrec->this_mirror); 7932 if (status) { 7933 free_io_failure(failure_tree, io_tree, failrec); 7934 bio_put(bio); 7935 } 7936 7937 return status; 7938 } 7939 7940 struct btrfs_retry_complete { 7941 struct completion done; 7942 struct inode *inode; 7943 u64 start; 7944 int uptodate; 7945 }; 7946 7947 static void btrfs_retry_endio_nocsum(struct bio *bio) 7948 { 7949 struct btrfs_retry_complete *done = bio->bi_private; 7950 struct inode *inode = done->inode; 7951 struct bio_vec *bvec; 7952 struct extent_io_tree *io_tree, *failure_tree; 7953 struct bvec_iter_all iter_all; 7954 7955 if (bio->bi_status) 7956 goto end; 7957 7958 ASSERT(bio->bi_vcnt == 1); 7959 io_tree = &BTRFS_I(inode)->io_tree; 7960 failure_tree = &BTRFS_I(inode)->io_failure_tree; 7961 ASSERT(bio_first_bvec_all(bio)->bv_len == btrfs_inode_sectorsize(inode)); 7962 7963 done->uptodate = 1; 7964 ASSERT(!bio_flagged(bio, BIO_CLONED)); 7965 bio_for_each_segment_all(bvec, bio, iter_all) 7966 clean_io_failure(BTRFS_I(inode)->root->fs_info, failure_tree, 7967 io_tree, done->start, bvec->bv_page, 7968 btrfs_ino(BTRFS_I(inode)), 0); 7969 end: 7970 complete(&done->done); 7971 bio_put(bio); 7972 } 7973 7974 static blk_status_t __btrfs_correct_data_nocsum(struct inode *inode, 7975 struct btrfs_io_bio *io_bio) 7976 { 7977 struct btrfs_fs_info *fs_info; 7978 struct bio_vec bvec; 7979 struct bvec_iter iter; 7980 struct btrfs_retry_complete done; 7981 u64 start; 7982 unsigned int pgoff; 7983 u32 sectorsize; 7984 int nr_sectors; 7985 blk_status_t ret; 7986 blk_status_t err = BLK_STS_OK; 7987 7988 fs_info = BTRFS_I(inode)->root->fs_info; 7989 sectorsize = fs_info->sectorsize; 7990 7991 start = io_bio->logical; 7992 done.inode = inode; 7993 io_bio->bio.bi_iter = io_bio->iter; 7994 7995 bio_for_each_segment(bvec, &io_bio->bio, iter) { 7996 nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info, bvec.bv_len); 7997 pgoff = bvec.bv_offset; 7998 7999 next_block_or_try_again: 8000 done.uptodate = 0; 8001 done.start = start; 8002 init_completion(&done.done); 8003 8004 ret = dio_read_error(inode, &io_bio->bio, bvec.bv_page, 8005 pgoff, start, start + sectorsize - 1, 8006 io_bio->mirror_num, 8007 btrfs_retry_endio_nocsum, &done); 8008 if (ret) { 8009 err = ret; 8010 goto next; 8011 } 8012 8013 wait_for_completion_io(&done.done); 8014 8015 if (!done.uptodate) { 8016 /* We might have another mirror, so try again */ 8017 goto next_block_or_try_again; 8018 } 8019 8020 next: 8021 start += sectorsize; 8022 8023 nr_sectors--; 8024 if (nr_sectors) { 8025 pgoff += sectorsize; 8026 ASSERT(pgoff < PAGE_SIZE); 8027 goto next_block_or_try_again; 8028 } 8029 } 8030 8031 return err; 8032 } 8033 8034 static void btrfs_retry_endio(struct bio *bio) 8035 { 8036 struct btrfs_retry_complete *done = bio->bi_private; 8037 struct btrfs_io_bio *io_bio = btrfs_io_bio(bio); 8038 struct extent_io_tree *io_tree, *failure_tree; 8039 struct inode *inode = done->inode; 8040 struct bio_vec *bvec; 8041 int uptodate; 8042 int ret; 8043 int i = 0; 8044 struct bvec_iter_all iter_all; 8045 8046 if (bio->bi_status) 8047 goto end; 8048 8049 uptodate = 1; 8050 8051 ASSERT(bio->bi_vcnt == 1); 8052 ASSERT(bio_first_bvec_all(bio)->bv_len == btrfs_inode_sectorsize(done->inode)); 8053 8054 io_tree = &BTRFS_I(inode)->io_tree; 8055 failure_tree = &BTRFS_I(inode)->io_failure_tree; 8056 8057 ASSERT(!bio_flagged(bio, BIO_CLONED)); 8058 bio_for_each_segment_all(bvec, bio, iter_all) { 8059 ret = __readpage_endio_check(inode, io_bio, i, bvec->bv_page, 8060 bvec->bv_offset, done->start, 8061 bvec->bv_len); 8062 if (!ret) 8063 clean_io_failure(BTRFS_I(inode)->root->fs_info, 8064 failure_tree, io_tree, done->start, 8065 bvec->bv_page, 8066 btrfs_ino(BTRFS_I(inode)), 8067 bvec->bv_offset); 8068 else 8069 uptodate = 0; 8070 i++; 8071 } 8072 8073 done->uptodate = uptodate; 8074 end: 8075 complete(&done->done); 8076 bio_put(bio); 8077 } 8078 8079 static blk_status_t __btrfs_subio_endio_read(struct inode *inode, 8080 struct btrfs_io_bio *io_bio, blk_status_t err) 8081 { 8082 struct btrfs_fs_info *fs_info; 8083 struct bio_vec bvec; 8084 struct bvec_iter iter; 8085 struct btrfs_retry_complete done; 8086 u64 start; 8087 u64 offset = 0; 8088 u32 sectorsize; 8089 int nr_sectors; 8090 unsigned int pgoff; 8091 int csum_pos; 8092 bool uptodate = (err == 0); 8093 int ret; 8094 blk_status_t status; 8095 8096 fs_info = BTRFS_I(inode)->root->fs_info; 8097 sectorsize = fs_info->sectorsize; 8098 8099 err = BLK_STS_OK; 8100 start = io_bio->logical; 8101 done.inode = inode; 8102 io_bio->bio.bi_iter = io_bio->iter; 8103 8104 bio_for_each_segment(bvec, &io_bio->bio, iter) { 8105 nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info, bvec.bv_len); 8106 8107 pgoff = bvec.bv_offset; 8108 next_block: 8109 if (uptodate) { 8110 csum_pos = BTRFS_BYTES_TO_BLKS(fs_info, offset); 8111 ret = __readpage_endio_check(inode, io_bio, csum_pos, 8112 bvec.bv_page, pgoff, start, sectorsize); 8113 if (likely(!ret)) 8114 goto next; 8115 } 8116 try_again: 8117 done.uptodate = 0; 8118 done.start = start; 8119 init_completion(&done.done); 8120 8121 status = dio_read_error(inode, &io_bio->bio, bvec.bv_page, 8122 pgoff, start, start + sectorsize - 1, 8123 io_bio->mirror_num, btrfs_retry_endio, 8124 &done); 8125 if (status) { 8126 err = status; 8127 goto next; 8128 } 8129 8130 wait_for_completion_io(&done.done); 8131 8132 if (!done.uptodate) { 8133 /* We might have another mirror, so try again */ 8134 goto try_again; 8135 } 8136 next: 8137 offset += sectorsize; 8138 start += sectorsize; 8139 8140 ASSERT(nr_sectors); 8141 8142 nr_sectors--; 8143 if (nr_sectors) { 8144 pgoff += sectorsize; 8145 ASSERT(pgoff < PAGE_SIZE); 8146 goto next_block; 8147 } 8148 } 8149 8150 return err; 8151 } 8152 8153 static blk_status_t btrfs_subio_endio_read(struct inode *inode, 8154 struct btrfs_io_bio *io_bio, blk_status_t err) 8155 { 8156 bool skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; 8157 8158 if (skip_csum) { 8159 if (unlikely(err)) 8160 return __btrfs_correct_data_nocsum(inode, io_bio); 8161 else 8162 return BLK_STS_OK; 8163 } else { 8164 return __btrfs_subio_endio_read(inode, io_bio, err); 8165 } 8166 } 8167 8168 static void btrfs_endio_direct_read(struct bio *bio) 8169 { 8170 struct btrfs_dio_private *dip = bio->bi_private; 8171 struct inode *inode = dip->inode; 8172 struct bio *dio_bio; 8173 struct btrfs_io_bio *io_bio = btrfs_io_bio(bio); 8174 blk_status_t err = bio->bi_status; 8175 8176 if (dip->flags & BTRFS_DIO_ORIG_BIO_SUBMITTED) 8177 err = btrfs_subio_endio_read(inode, io_bio, err); 8178 8179 unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset, 8180 dip->logical_offset + dip->bytes - 1); 8181 dio_bio = dip->dio_bio; 8182 8183 kfree(dip); 8184 8185 dio_bio->bi_status = err; 8186 dio_end_io(dio_bio); 8187 btrfs_io_bio_free_csum(io_bio); 8188 bio_put(bio); 8189 } 8190 8191 static void __endio_write_update_ordered(struct inode *inode, 8192 const u64 offset, const u64 bytes, 8193 const bool uptodate) 8194 { 8195 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 8196 struct btrfs_ordered_extent *ordered = NULL; 8197 struct btrfs_workqueue *wq; 8198 btrfs_work_func_t func; 8199 u64 ordered_offset = offset; 8200 u64 ordered_bytes = bytes; 8201 u64 last_offset; 8202 8203 if (btrfs_is_free_space_inode(BTRFS_I(inode))) { 8204 wq = fs_info->endio_freespace_worker; 8205 func = btrfs_freespace_write_helper; 8206 } else { 8207 wq = fs_info->endio_write_workers; 8208 func = btrfs_endio_write_helper; 8209 } 8210 8211 while (ordered_offset < offset + bytes) { 8212 last_offset = ordered_offset; 8213 if (btrfs_dec_test_first_ordered_pending(inode, &ordered, 8214 &ordered_offset, 8215 ordered_bytes, 8216 uptodate)) { 8217 btrfs_init_work(&ordered->work, func, 8218 finish_ordered_fn, 8219 NULL, NULL); 8220 btrfs_queue_work(wq, &ordered->work); 8221 } 8222 /* 8223 * If btrfs_dec_test_ordered_pending does not find any ordered 8224 * extent in the range, we can exit. 8225 */ 8226 if (ordered_offset == last_offset) 8227 return; 8228 /* 8229 * Our bio might span multiple ordered extents. In this case 8230 * we keep going until we have accounted the whole dio. 8231 */ 8232 if (ordered_offset < offset + bytes) { 8233 ordered_bytes = offset + bytes - ordered_offset; 8234 ordered = NULL; 8235 } 8236 } 8237 } 8238 8239 static void btrfs_endio_direct_write(struct bio *bio) 8240 { 8241 struct btrfs_dio_private *dip = bio->bi_private; 8242 struct bio *dio_bio = dip->dio_bio; 8243 8244 __endio_write_update_ordered(dip->inode, dip->logical_offset, 8245 dip->bytes, !bio->bi_status); 8246 8247 kfree(dip); 8248 8249 dio_bio->bi_status = bio->bi_status; 8250 dio_end_io(dio_bio); 8251 bio_put(bio); 8252 } 8253 8254 static blk_status_t btrfs_submit_bio_start_direct_io(void *private_data, 8255 struct bio *bio, u64 offset) 8256 { 8257 struct inode *inode = private_data; 8258 blk_status_t ret; 8259 ret = btrfs_csum_one_bio(inode, bio, offset, 1); 8260 BUG_ON(ret); /* -ENOMEM */ 8261 return 0; 8262 } 8263 8264 static void btrfs_end_dio_bio(struct bio *bio) 8265 { 8266 struct btrfs_dio_private *dip = bio->bi_private; 8267 blk_status_t err = bio->bi_status; 8268 8269 if (err) 8270 btrfs_warn(BTRFS_I(dip->inode)->root->fs_info, 8271 "direct IO failed ino %llu rw %d,%u sector %#Lx len %u err no %d", 8272 btrfs_ino(BTRFS_I(dip->inode)), bio_op(bio), 8273 bio->bi_opf, 8274 (unsigned long long)bio->bi_iter.bi_sector, 8275 bio->bi_iter.bi_size, err); 8276 8277 if (dip->subio_endio) 8278 err = dip->subio_endio(dip->inode, btrfs_io_bio(bio), err); 8279 8280 if (err) { 8281 /* 8282 * We want to perceive the errors flag being set before 8283 * decrementing the reference count. We don't need a barrier 8284 * since atomic operations with a return value are fully 8285 * ordered as per atomic_t.txt 8286 */ 8287 dip->errors = 1; 8288 } 8289 8290 /* if there are more bios still pending for this dio, just exit */ 8291 if (!atomic_dec_and_test(&dip->pending_bios)) 8292 goto out; 8293 8294 if (dip->errors) { 8295 bio_io_error(dip->orig_bio); 8296 } else { 8297 dip->dio_bio->bi_status = BLK_STS_OK; 8298 bio_endio(dip->orig_bio); 8299 } 8300 out: 8301 bio_put(bio); 8302 } 8303 8304 static inline blk_status_t btrfs_lookup_and_bind_dio_csum(struct inode *inode, 8305 struct btrfs_dio_private *dip, 8306 struct bio *bio, 8307 u64 file_offset) 8308 { 8309 struct btrfs_io_bio *io_bio = btrfs_io_bio(bio); 8310 struct btrfs_io_bio *orig_io_bio = btrfs_io_bio(dip->orig_bio); 8311 blk_status_t ret; 8312 8313 /* 8314 * We load all the csum data we need when we submit 8315 * the first bio to reduce the csum tree search and 8316 * contention. 8317 */ 8318 if (dip->logical_offset == file_offset) { 8319 ret = btrfs_lookup_bio_sums_dio(inode, dip->orig_bio, 8320 file_offset); 8321 if (ret) 8322 return ret; 8323 } 8324 8325 if (bio == dip->orig_bio) 8326 return 0; 8327 8328 file_offset -= dip->logical_offset; 8329 file_offset >>= inode->i_sb->s_blocksize_bits; 8330 io_bio->csum = (u8 *)(((u32 *)orig_io_bio->csum) + file_offset); 8331 8332 return 0; 8333 } 8334 8335 static inline blk_status_t btrfs_submit_dio_bio(struct bio *bio, 8336 struct inode *inode, u64 file_offset, int async_submit) 8337 { 8338 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 8339 struct btrfs_dio_private *dip = bio->bi_private; 8340 bool write = bio_op(bio) == REQ_OP_WRITE; 8341 blk_status_t ret; 8342 8343 /* Check btrfs_submit_bio_hook() for rules about async submit. */ 8344 if (async_submit) 8345 async_submit = !atomic_read(&BTRFS_I(inode)->sync_writers); 8346 8347 if (!write) { 8348 ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA); 8349 if (ret) 8350 goto err; 8351 } 8352 8353 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM) 8354 goto map; 8355 8356 if (write && async_submit) { 8357 ret = btrfs_wq_submit_bio(fs_info, bio, 0, 0, 8358 file_offset, inode, 8359 btrfs_submit_bio_start_direct_io); 8360 goto err; 8361 } else if (write) { 8362 /* 8363 * If we aren't doing async submit, calculate the csum of the 8364 * bio now. 8365 */ 8366 ret = btrfs_csum_one_bio(inode, bio, file_offset, 1); 8367 if (ret) 8368 goto err; 8369 } else { 8370 ret = btrfs_lookup_and_bind_dio_csum(inode, dip, bio, 8371 file_offset); 8372 if (ret) 8373 goto err; 8374 } 8375 map: 8376 ret = btrfs_map_bio(fs_info, bio, 0, 0); 8377 err: 8378 return ret; 8379 } 8380 8381 static int btrfs_submit_direct_hook(struct btrfs_dio_private *dip) 8382 { 8383 struct inode *inode = dip->inode; 8384 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 8385 struct bio *bio; 8386 struct bio *orig_bio = dip->orig_bio; 8387 u64 start_sector = orig_bio->bi_iter.bi_sector; 8388 u64 file_offset = dip->logical_offset; 8389 int async_submit = 0; 8390 u64 submit_len; 8391 int clone_offset = 0; 8392 int clone_len; 8393 int ret; 8394 blk_status_t status; 8395 struct btrfs_io_geometry geom; 8396 8397 submit_len = orig_bio->bi_iter.bi_size; 8398 ret = btrfs_get_io_geometry(fs_info, btrfs_op(orig_bio), 8399 start_sector << 9, submit_len, &geom); 8400 if (ret) 8401 return -EIO; 8402 8403 if (geom.len >= submit_len) { 8404 bio = orig_bio; 8405 dip->flags |= BTRFS_DIO_ORIG_BIO_SUBMITTED; 8406 goto submit; 8407 } 8408 8409 /* async crcs make it difficult to collect full stripe writes. */ 8410 if (btrfs_data_alloc_profile(fs_info) & BTRFS_BLOCK_GROUP_RAID56_MASK) 8411 async_submit = 0; 8412 else 8413 async_submit = 1; 8414 8415 /* bio split */ 8416 ASSERT(geom.len <= INT_MAX); 8417 atomic_inc(&dip->pending_bios); 8418 do { 8419 clone_len = min_t(int, submit_len, geom.len); 8420 8421 /* 8422 * This will never fail as it's passing GPF_NOFS and 8423 * the allocation is backed by btrfs_bioset. 8424 */ 8425 bio = btrfs_bio_clone_partial(orig_bio, clone_offset, 8426 clone_len); 8427 bio->bi_private = dip; 8428 bio->bi_end_io = btrfs_end_dio_bio; 8429 btrfs_io_bio(bio)->logical = file_offset; 8430 8431 ASSERT(submit_len >= clone_len); 8432 submit_len -= clone_len; 8433 if (submit_len == 0) 8434 break; 8435 8436 /* 8437 * Increase the count before we submit the bio so we know 8438 * the end IO handler won't happen before we increase the 8439 * count. Otherwise, the dip might get freed before we're 8440 * done setting it up. 8441 */ 8442 atomic_inc(&dip->pending_bios); 8443 8444 status = btrfs_submit_dio_bio(bio, inode, file_offset, 8445 async_submit); 8446 if (status) { 8447 bio_put(bio); 8448 atomic_dec(&dip->pending_bios); 8449 goto out_err; 8450 } 8451 8452 clone_offset += clone_len; 8453 start_sector += clone_len >> 9; 8454 file_offset += clone_len; 8455 8456 ret = btrfs_get_io_geometry(fs_info, btrfs_op(orig_bio), 8457 start_sector << 9, submit_len, &geom); 8458 if (ret) 8459 goto out_err; 8460 } while (submit_len > 0); 8461 8462 submit: 8463 status = btrfs_submit_dio_bio(bio, inode, file_offset, async_submit); 8464 if (!status) 8465 return 0; 8466 8467 bio_put(bio); 8468 out_err: 8469 dip->errors = 1; 8470 /* 8471 * Before atomic variable goto zero, we must make sure dip->errors is 8472 * perceived to be set. This ordering is ensured by the fact that an 8473 * atomic operations with a return value are fully ordered as per 8474 * atomic_t.txt 8475 */ 8476 if (atomic_dec_and_test(&dip->pending_bios)) 8477 bio_io_error(dip->orig_bio); 8478 8479 /* bio_end_io() will handle error, so we needn't return it */ 8480 return 0; 8481 } 8482 8483 static void btrfs_submit_direct(struct bio *dio_bio, struct inode *inode, 8484 loff_t file_offset) 8485 { 8486 struct btrfs_dio_private *dip = NULL; 8487 struct bio *bio = NULL; 8488 struct btrfs_io_bio *io_bio; 8489 bool write = (bio_op(dio_bio) == REQ_OP_WRITE); 8490 int ret = 0; 8491 8492 bio = btrfs_bio_clone(dio_bio); 8493 8494 dip = kzalloc(sizeof(*dip), GFP_NOFS); 8495 if (!dip) { 8496 ret = -ENOMEM; 8497 goto free_ordered; 8498 } 8499 8500 dip->private = dio_bio->bi_private; 8501 dip->inode = inode; 8502 dip->logical_offset = file_offset; 8503 dip->bytes = dio_bio->bi_iter.bi_size; 8504 dip->disk_bytenr = (u64)dio_bio->bi_iter.bi_sector << 9; 8505 bio->bi_private = dip; 8506 dip->orig_bio = bio; 8507 dip->dio_bio = dio_bio; 8508 atomic_set(&dip->pending_bios, 0); 8509 io_bio = btrfs_io_bio(bio); 8510 io_bio->logical = file_offset; 8511 8512 if (write) { 8513 bio->bi_end_io = btrfs_endio_direct_write; 8514 } else { 8515 bio->bi_end_io = btrfs_endio_direct_read; 8516 dip->subio_endio = btrfs_subio_endio_read; 8517 } 8518 8519 /* 8520 * Reset the range for unsubmitted ordered extents (to a 0 length range) 8521 * even if we fail to submit a bio, because in such case we do the 8522 * corresponding error handling below and it must not be done a second 8523 * time by btrfs_direct_IO(). 8524 */ 8525 if (write) { 8526 struct btrfs_dio_data *dio_data = current->journal_info; 8527 8528 dio_data->unsubmitted_oe_range_end = dip->logical_offset + 8529 dip->bytes; 8530 dio_data->unsubmitted_oe_range_start = 8531 dio_data->unsubmitted_oe_range_end; 8532 } 8533 8534 ret = btrfs_submit_direct_hook(dip); 8535 if (!ret) 8536 return; 8537 8538 btrfs_io_bio_free_csum(io_bio); 8539 8540 free_ordered: 8541 /* 8542 * If we arrived here it means either we failed to submit the dip 8543 * or we either failed to clone the dio_bio or failed to allocate the 8544 * dip. If we cloned the dio_bio and allocated the dip, we can just 8545 * call bio_endio against our io_bio so that we get proper resource 8546 * cleanup if we fail to submit the dip, otherwise, we must do the 8547 * same as btrfs_endio_direct_[write|read] because we can't call these 8548 * callbacks - they require an allocated dip and a clone of dio_bio. 8549 */ 8550 if (bio && dip) { 8551 bio_io_error(bio); 8552 /* 8553 * The end io callbacks free our dip, do the final put on bio 8554 * and all the cleanup and final put for dio_bio (through 8555 * dio_end_io()). 8556 */ 8557 dip = NULL; 8558 bio = NULL; 8559 } else { 8560 if (write) 8561 __endio_write_update_ordered(inode, 8562 file_offset, 8563 dio_bio->bi_iter.bi_size, 8564 false); 8565 else 8566 unlock_extent(&BTRFS_I(inode)->io_tree, file_offset, 8567 file_offset + dio_bio->bi_iter.bi_size - 1); 8568 8569 dio_bio->bi_status = BLK_STS_IOERR; 8570 /* 8571 * Releases and cleans up our dio_bio, no need to bio_put() 8572 * nor bio_endio()/bio_io_error() against dio_bio. 8573 */ 8574 dio_end_io(dio_bio); 8575 } 8576 if (bio) 8577 bio_put(bio); 8578 kfree(dip); 8579 } 8580 8581 static ssize_t check_direct_IO(struct btrfs_fs_info *fs_info, 8582 const struct iov_iter *iter, loff_t offset) 8583 { 8584 int seg; 8585 int i; 8586 unsigned int blocksize_mask = fs_info->sectorsize - 1; 8587 ssize_t retval = -EINVAL; 8588 8589 if (offset & blocksize_mask) 8590 goto out; 8591 8592 if (iov_iter_alignment(iter) & blocksize_mask) 8593 goto out; 8594 8595 /* If this is a write we don't need to check anymore */ 8596 if (iov_iter_rw(iter) != READ || !iter_is_iovec(iter)) 8597 return 0; 8598 /* 8599 * Check to make sure we don't have duplicate iov_base's in this 8600 * iovec, if so return EINVAL, otherwise we'll get csum errors 8601 * when reading back. 8602 */ 8603 for (seg = 0; seg < iter->nr_segs; seg++) { 8604 for (i = seg + 1; i < iter->nr_segs; i++) { 8605 if (iter->iov[seg].iov_base == iter->iov[i].iov_base) 8606 goto out; 8607 } 8608 } 8609 retval = 0; 8610 out: 8611 return retval; 8612 } 8613 8614 static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) 8615 { 8616 struct file *file = iocb->ki_filp; 8617 struct inode *inode = file->f_mapping->host; 8618 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 8619 struct btrfs_dio_data dio_data = { 0 }; 8620 struct extent_changeset *data_reserved = NULL; 8621 loff_t offset = iocb->ki_pos; 8622 size_t count = 0; 8623 int flags = 0; 8624 bool wakeup = true; 8625 bool relock = false; 8626 ssize_t ret; 8627 8628 if (check_direct_IO(fs_info, iter, offset)) 8629 return 0; 8630 8631 inode_dio_begin(inode); 8632 8633 /* 8634 * The generic stuff only does filemap_write_and_wait_range, which 8635 * isn't enough if we've written compressed pages to this area, so 8636 * we need to flush the dirty pages again to make absolutely sure 8637 * that any outstanding dirty pages are on disk. 8638 */ 8639 count = iov_iter_count(iter); 8640 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, 8641 &BTRFS_I(inode)->runtime_flags)) 8642 filemap_fdatawrite_range(inode->i_mapping, offset, 8643 offset + count - 1); 8644 8645 if (iov_iter_rw(iter) == WRITE) { 8646 /* 8647 * If the write DIO is beyond the EOF, we need update 8648 * the isize, but it is protected by i_mutex. So we can 8649 * not unlock the i_mutex at this case. 8650 */ 8651 if (offset + count <= inode->i_size) { 8652 dio_data.overwrite = 1; 8653 inode_unlock(inode); 8654 relock = true; 8655 } else if (iocb->ki_flags & IOCB_NOWAIT) { 8656 ret = -EAGAIN; 8657 goto out; 8658 } 8659 ret = btrfs_delalloc_reserve_space(inode, &data_reserved, 8660 offset, count); 8661 if (ret) 8662 goto out; 8663 8664 /* 8665 * We need to know how many extents we reserved so that we can 8666 * do the accounting properly if we go over the number we 8667 * originally calculated. Abuse current->journal_info for this. 8668 */ 8669 dio_data.reserve = round_up(count, 8670 fs_info->sectorsize); 8671 dio_data.unsubmitted_oe_range_start = (u64)offset; 8672 dio_data.unsubmitted_oe_range_end = (u64)offset; 8673 current->journal_info = &dio_data; 8674 down_read(&BTRFS_I(inode)->dio_sem); 8675 } else if (test_bit(BTRFS_INODE_READDIO_NEED_LOCK, 8676 &BTRFS_I(inode)->runtime_flags)) { 8677 inode_dio_end(inode); 8678 flags = DIO_LOCKING | DIO_SKIP_HOLES; 8679 wakeup = false; 8680 } 8681 8682 ret = __blockdev_direct_IO(iocb, inode, 8683 fs_info->fs_devices->latest_bdev, 8684 iter, btrfs_get_blocks_direct, NULL, 8685 btrfs_submit_direct, flags); 8686 if (iov_iter_rw(iter) == WRITE) { 8687 up_read(&BTRFS_I(inode)->dio_sem); 8688 current->journal_info = NULL; 8689 if (ret < 0 && ret != -EIOCBQUEUED) { 8690 if (dio_data.reserve) 8691 btrfs_delalloc_release_space(inode, data_reserved, 8692 offset, dio_data.reserve, true); 8693 /* 8694 * On error we might have left some ordered extents 8695 * without submitting corresponding bios for them, so 8696 * cleanup them up to avoid other tasks getting them 8697 * and waiting for them to complete forever. 8698 */ 8699 if (dio_data.unsubmitted_oe_range_start < 8700 dio_data.unsubmitted_oe_range_end) 8701 __endio_write_update_ordered(inode, 8702 dio_data.unsubmitted_oe_range_start, 8703 dio_data.unsubmitted_oe_range_end - 8704 dio_data.unsubmitted_oe_range_start, 8705 false); 8706 } else if (ret >= 0 && (size_t)ret < count) 8707 btrfs_delalloc_release_space(inode, data_reserved, 8708 offset, count - (size_t)ret, true); 8709 btrfs_delalloc_release_extents(BTRFS_I(inode), count, false); 8710 } 8711 out: 8712 if (wakeup) 8713 inode_dio_end(inode); 8714 if (relock) 8715 inode_lock(inode); 8716 8717 extent_changeset_free(data_reserved); 8718 return ret; 8719 } 8720 8721 #define BTRFS_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC) 8722 8723 static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 8724 __u64 start, __u64 len) 8725 { 8726 int ret; 8727 8728 ret = fiemap_check_flags(fieinfo, BTRFS_FIEMAP_FLAGS); 8729 if (ret) 8730 return ret; 8731 8732 return extent_fiemap(inode, fieinfo, start, len); 8733 } 8734 8735 int btrfs_readpage(struct file *file, struct page *page) 8736 { 8737 struct extent_io_tree *tree; 8738 tree = &BTRFS_I(page->mapping->host)->io_tree; 8739 return extent_read_full_page(tree, page, btrfs_get_extent, 0); 8740 } 8741 8742 static int btrfs_writepage(struct page *page, struct writeback_control *wbc) 8743 { 8744 struct inode *inode = page->mapping->host; 8745 int ret; 8746 8747 if (current->flags & PF_MEMALLOC) { 8748 redirty_page_for_writepage(wbc, page); 8749 unlock_page(page); 8750 return 0; 8751 } 8752 8753 /* 8754 * If we are under memory pressure we will call this directly from the 8755 * VM, we need to make sure we have the inode referenced for the ordered 8756 * extent. If not just return like we didn't do anything. 8757 */ 8758 if (!igrab(inode)) { 8759 redirty_page_for_writepage(wbc, page); 8760 return AOP_WRITEPAGE_ACTIVATE; 8761 } 8762 ret = extent_write_full_page(page, wbc); 8763 btrfs_add_delayed_iput(inode); 8764 return ret; 8765 } 8766 8767 static int btrfs_writepages(struct address_space *mapping, 8768 struct writeback_control *wbc) 8769 { 8770 return extent_writepages(mapping, wbc); 8771 } 8772 8773 static int 8774 btrfs_readpages(struct file *file, struct address_space *mapping, 8775 struct list_head *pages, unsigned nr_pages) 8776 { 8777 return extent_readpages(mapping, pages, nr_pages); 8778 } 8779 8780 static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags) 8781 { 8782 int ret = try_release_extent_mapping(page, gfp_flags); 8783 if (ret == 1) { 8784 ClearPagePrivate(page); 8785 set_page_private(page, 0); 8786 put_page(page); 8787 } 8788 return ret; 8789 } 8790 8791 static int btrfs_releasepage(struct page *page, gfp_t gfp_flags) 8792 { 8793 if (PageWriteback(page) || PageDirty(page)) 8794 return 0; 8795 return __btrfs_releasepage(page, gfp_flags); 8796 } 8797 8798 static void btrfs_invalidatepage(struct page *page, unsigned int offset, 8799 unsigned int length) 8800 { 8801 struct inode *inode = page->mapping->host; 8802 struct extent_io_tree *tree; 8803 struct btrfs_ordered_extent *ordered; 8804 struct extent_state *cached_state = NULL; 8805 u64 page_start = page_offset(page); 8806 u64 page_end = page_start + PAGE_SIZE - 1; 8807 u64 start; 8808 u64 end; 8809 int inode_evicting = inode->i_state & I_FREEING; 8810 8811 /* 8812 * we have the page locked, so new writeback can't start, 8813 * and the dirty bit won't be cleared while we are here. 8814 * 8815 * Wait for IO on this page so that we can safely clear 8816 * the PagePrivate2 bit and do ordered accounting 8817 */ 8818 wait_on_page_writeback(page); 8819 8820 tree = &BTRFS_I(inode)->io_tree; 8821 if (offset) { 8822 btrfs_releasepage(page, GFP_NOFS); 8823 return; 8824 } 8825 8826 if (!inode_evicting) 8827 lock_extent_bits(tree, page_start, page_end, &cached_state); 8828 again: 8829 start = page_start; 8830 ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), start, 8831 page_end - start + 1); 8832 if (ordered) { 8833 end = min(page_end, ordered->file_offset + ordered->len - 1); 8834 /* 8835 * IO on this page will never be started, so we need 8836 * to account for any ordered extents now 8837 */ 8838 if (!inode_evicting) 8839 clear_extent_bit(tree, start, end, 8840 EXTENT_DELALLOC | EXTENT_DELALLOC_NEW | 8841 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING | 8842 EXTENT_DEFRAG, 1, 0, &cached_state); 8843 /* 8844 * whoever cleared the private bit is responsible 8845 * for the finish_ordered_io 8846 */ 8847 if (TestClearPagePrivate2(page)) { 8848 struct btrfs_ordered_inode_tree *tree; 8849 u64 new_len; 8850 8851 tree = &BTRFS_I(inode)->ordered_tree; 8852 8853 spin_lock_irq(&tree->lock); 8854 set_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags); 8855 new_len = start - ordered->file_offset; 8856 if (new_len < ordered->truncated_len) 8857 ordered->truncated_len = new_len; 8858 spin_unlock_irq(&tree->lock); 8859 8860 if (btrfs_dec_test_ordered_pending(inode, &ordered, 8861 start, 8862 end - start + 1, 1)) 8863 btrfs_finish_ordered_io(ordered); 8864 } 8865 btrfs_put_ordered_extent(ordered); 8866 if (!inode_evicting) { 8867 cached_state = NULL; 8868 lock_extent_bits(tree, start, end, 8869 &cached_state); 8870 } 8871 8872 start = end + 1; 8873 if (start < page_end) 8874 goto again; 8875 } 8876 8877 /* 8878 * Qgroup reserved space handler 8879 * Page here will be either 8880 * 1) Already written to disk 8881 * In this case, its reserved space is released from data rsv map 8882 * and will be freed by delayed_ref handler finally. 8883 * So even we call qgroup_free_data(), it won't decrease reserved 8884 * space. 8885 * 2) Not written to disk 8886 * This means the reserved space should be freed here. However, 8887 * if a truncate invalidates the page (by clearing PageDirty) 8888 * and the page is accounted for while allocating extent 8889 * in btrfs_check_data_free_space() we let delayed_ref to 8890 * free the entire extent. 8891 */ 8892 if (PageDirty(page)) 8893 btrfs_qgroup_free_data(inode, NULL, page_start, PAGE_SIZE); 8894 if (!inode_evicting) { 8895 clear_extent_bit(tree, page_start, page_end, EXTENT_LOCKED | 8896 EXTENT_DELALLOC | EXTENT_DELALLOC_NEW | 8897 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 1, 1, 8898 &cached_state); 8899 8900 __btrfs_releasepage(page, GFP_NOFS); 8901 } 8902 8903 ClearPageChecked(page); 8904 if (PagePrivate(page)) { 8905 ClearPagePrivate(page); 8906 set_page_private(page, 0); 8907 put_page(page); 8908 } 8909 } 8910 8911 /* 8912 * btrfs_page_mkwrite() is not allowed to change the file size as it gets 8913 * called from a page fault handler when a page is first dirtied. Hence we must 8914 * be careful to check for EOF conditions here. We set the page up correctly 8915 * for a written page which means we get ENOSPC checking when writing into 8916 * holes and correct delalloc and unwritten extent mapping on filesystems that 8917 * support these features. 8918 * 8919 * We are not allowed to take the i_mutex here so we have to play games to 8920 * protect against truncate races as the page could now be beyond EOF. Because 8921 * truncate_setsize() writes the inode size before removing pages, once we have 8922 * the page lock we can determine safely if the page is beyond EOF. If it is not 8923 * beyond EOF, then the page is guaranteed safe against truncation until we 8924 * unlock the page. 8925 */ 8926 vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf) 8927 { 8928 struct page *page = vmf->page; 8929 struct inode *inode = file_inode(vmf->vma->vm_file); 8930 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 8931 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 8932 struct btrfs_ordered_extent *ordered; 8933 struct extent_state *cached_state = NULL; 8934 struct extent_changeset *data_reserved = NULL; 8935 char *kaddr; 8936 unsigned long zero_start; 8937 loff_t size; 8938 vm_fault_t ret; 8939 int ret2; 8940 int reserved = 0; 8941 u64 reserved_space; 8942 u64 page_start; 8943 u64 page_end; 8944 u64 end; 8945 8946 reserved_space = PAGE_SIZE; 8947 8948 sb_start_pagefault(inode->i_sb); 8949 page_start = page_offset(page); 8950 page_end = page_start + PAGE_SIZE - 1; 8951 end = page_end; 8952 8953 /* 8954 * Reserving delalloc space after obtaining the page lock can lead to 8955 * deadlock. For example, if a dirty page is locked by this function 8956 * and the call to btrfs_delalloc_reserve_space() ends up triggering 8957 * dirty page write out, then the btrfs_writepage() function could 8958 * end up waiting indefinitely to get a lock on the page currently 8959 * being processed by btrfs_page_mkwrite() function. 8960 */ 8961 ret2 = btrfs_delalloc_reserve_space(inode, &data_reserved, page_start, 8962 reserved_space); 8963 if (!ret2) { 8964 ret2 = file_update_time(vmf->vma->vm_file); 8965 reserved = 1; 8966 } 8967 if (ret2) { 8968 ret = vmf_error(ret2); 8969 if (reserved) 8970 goto out; 8971 goto out_noreserve; 8972 } 8973 8974 ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */ 8975 again: 8976 lock_page(page); 8977 size = i_size_read(inode); 8978 8979 if ((page->mapping != inode->i_mapping) || 8980 (page_start >= size)) { 8981 /* page got truncated out from underneath us */ 8982 goto out_unlock; 8983 } 8984 wait_on_page_writeback(page); 8985 8986 lock_extent_bits(io_tree, page_start, page_end, &cached_state); 8987 set_page_extent_mapped(page); 8988 8989 /* 8990 * we can't set the delalloc bits if there are pending ordered 8991 * extents. Drop our locks and wait for them to finish 8992 */ 8993 ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), page_start, 8994 PAGE_SIZE); 8995 if (ordered) { 8996 unlock_extent_cached(io_tree, page_start, page_end, 8997 &cached_state); 8998 unlock_page(page); 8999 btrfs_start_ordered_extent(inode, ordered, 1); 9000 btrfs_put_ordered_extent(ordered); 9001 goto again; 9002 } 9003 9004 if (page->index == ((size - 1) >> PAGE_SHIFT)) { 9005 reserved_space = round_up(size - page_start, 9006 fs_info->sectorsize); 9007 if (reserved_space < PAGE_SIZE) { 9008 end = page_start + reserved_space - 1; 9009 btrfs_delalloc_release_space(inode, data_reserved, 9010 page_start, PAGE_SIZE - reserved_space, 9011 true); 9012 } 9013 } 9014 9015 /* 9016 * page_mkwrite gets called when the page is firstly dirtied after it's 9017 * faulted in, but write(2) could also dirty a page and set delalloc 9018 * bits, thus in this case for space account reason, we still need to 9019 * clear any delalloc bits within this page range since we have to 9020 * reserve data&meta space before lock_page() (see above comments). 9021 */ 9022 clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, end, 9023 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | 9024 EXTENT_DEFRAG, 0, 0, &cached_state); 9025 9026 ret2 = btrfs_set_extent_delalloc(inode, page_start, end, 0, 9027 &cached_state); 9028 if (ret2) { 9029 unlock_extent_cached(io_tree, page_start, page_end, 9030 &cached_state); 9031 ret = VM_FAULT_SIGBUS; 9032 goto out_unlock; 9033 } 9034 ret2 = 0; 9035 9036 /* page is wholly or partially inside EOF */ 9037 if (page_start + PAGE_SIZE > size) 9038 zero_start = offset_in_page(size); 9039 else 9040 zero_start = PAGE_SIZE; 9041 9042 if (zero_start != PAGE_SIZE) { 9043 kaddr = kmap(page); 9044 memset(kaddr + zero_start, 0, PAGE_SIZE - zero_start); 9045 flush_dcache_page(page); 9046 kunmap(page); 9047 } 9048 ClearPageChecked(page); 9049 set_page_dirty(page); 9050 SetPageUptodate(page); 9051 9052 BTRFS_I(inode)->last_trans = fs_info->generation; 9053 BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid; 9054 BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->root->last_log_commit; 9055 9056 unlock_extent_cached(io_tree, page_start, page_end, &cached_state); 9057 9058 if (!ret2) { 9059 btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE, true); 9060 sb_end_pagefault(inode->i_sb); 9061 extent_changeset_free(data_reserved); 9062 return VM_FAULT_LOCKED; 9063 } 9064 9065 out_unlock: 9066 unlock_page(page); 9067 out: 9068 btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE, (ret != 0)); 9069 btrfs_delalloc_release_space(inode, data_reserved, page_start, 9070 reserved_space, (ret != 0)); 9071 out_noreserve: 9072 sb_end_pagefault(inode->i_sb); 9073 extent_changeset_free(data_reserved); 9074 return ret; 9075 } 9076 9077 static int btrfs_truncate(struct inode *inode, bool skip_writeback) 9078 { 9079 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 9080 struct btrfs_root *root = BTRFS_I(inode)->root; 9081 struct btrfs_block_rsv *rsv; 9082 int ret; 9083 struct btrfs_trans_handle *trans; 9084 u64 mask = fs_info->sectorsize - 1; 9085 u64 min_size = btrfs_calc_metadata_size(fs_info, 1); 9086 9087 if (!skip_writeback) { 9088 ret = btrfs_wait_ordered_range(inode, inode->i_size & (~mask), 9089 (u64)-1); 9090 if (ret) 9091 return ret; 9092 } 9093 9094 /* 9095 * Yes ladies and gentlemen, this is indeed ugly. We have a couple of 9096 * things going on here: 9097 * 9098 * 1) We need to reserve space to update our inode. 9099 * 9100 * 2) We need to have something to cache all the space that is going to 9101 * be free'd up by the truncate operation, but also have some slack 9102 * space reserved in case it uses space during the truncate (thank you 9103 * very much snapshotting). 9104 * 9105 * And we need these to be separate. The fact is we can use a lot of 9106 * space doing the truncate, and we have no earthly idea how much space 9107 * we will use, so we need the truncate reservation to be separate so it 9108 * doesn't end up using space reserved for updating the inode. We also 9109 * need to be able to stop the transaction and start a new one, which 9110 * means we need to be able to update the inode several times, and we 9111 * have no idea of knowing how many times that will be, so we can't just 9112 * reserve 1 item for the entirety of the operation, so that has to be 9113 * done separately as well. 9114 * 9115 * So that leaves us with 9116 * 9117 * 1) rsv - for the truncate reservation, which we will steal from the 9118 * transaction reservation. 9119 * 2) fs_info->trans_block_rsv - this will have 1 items worth left for 9120 * updating the inode. 9121 */ 9122 rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP); 9123 if (!rsv) 9124 return -ENOMEM; 9125 rsv->size = min_size; 9126 rsv->failfast = 1; 9127 9128 /* 9129 * 1 for the truncate slack space 9130 * 1 for updating the inode. 9131 */ 9132 trans = btrfs_start_transaction(root, 2); 9133 if (IS_ERR(trans)) { 9134 ret = PTR_ERR(trans); 9135 goto out; 9136 } 9137 9138 /* Migrate the slack space for the truncate to our reserve */ 9139 ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv, 9140 min_size, false); 9141 BUG_ON(ret); 9142 9143 /* 9144 * So if we truncate and then write and fsync we normally would just 9145 * write the extents that changed, which is a problem if we need to 9146 * first truncate that entire inode. So set this flag so we write out 9147 * all of the extents in the inode to the sync log so we're completely 9148 * safe. 9149 */ 9150 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags); 9151 trans->block_rsv = rsv; 9152 9153 while (1) { 9154 ret = btrfs_truncate_inode_items(trans, root, inode, 9155 inode->i_size, 9156 BTRFS_EXTENT_DATA_KEY); 9157 trans->block_rsv = &fs_info->trans_block_rsv; 9158 if (ret != -ENOSPC && ret != -EAGAIN) 9159 break; 9160 9161 ret = btrfs_update_inode(trans, root, inode); 9162 if (ret) 9163 break; 9164 9165 btrfs_end_transaction(trans); 9166 btrfs_btree_balance_dirty(fs_info); 9167 9168 trans = btrfs_start_transaction(root, 2); 9169 if (IS_ERR(trans)) { 9170 ret = PTR_ERR(trans); 9171 trans = NULL; 9172 break; 9173 } 9174 9175 btrfs_block_rsv_release(fs_info, rsv, -1); 9176 ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, 9177 rsv, min_size, false); 9178 BUG_ON(ret); /* shouldn't happen */ 9179 trans->block_rsv = rsv; 9180 } 9181 9182 /* 9183 * We can't call btrfs_truncate_block inside a trans handle as we could 9184 * deadlock with freeze, if we got NEED_TRUNCATE_BLOCK then we know 9185 * we've truncated everything except the last little bit, and can do 9186 * btrfs_truncate_block and then update the disk_i_size. 9187 */ 9188 if (ret == NEED_TRUNCATE_BLOCK) { 9189 btrfs_end_transaction(trans); 9190 btrfs_btree_balance_dirty(fs_info); 9191 9192 ret = btrfs_truncate_block(inode, inode->i_size, 0, 0); 9193 if (ret) 9194 goto out; 9195 trans = btrfs_start_transaction(root, 1); 9196 if (IS_ERR(trans)) { 9197 ret = PTR_ERR(trans); 9198 goto out; 9199 } 9200 btrfs_ordered_update_i_size(inode, inode->i_size, NULL); 9201 } 9202 9203 if (trans) { 9204 int ret2; 9205 9206 trans->block_rsv = &fs_info->trans_block_rsv; 9207 ret2 = btrfs_update_inode(trans, root, inode); 9208 if (ret2 && !ret) 9209 ret = ret2; 9210 9211 ret2 = btrfs_end_transaction(trans); 9212 if (ret2 && !ret) 9213 ret = ret2; 9214 btrfs_btree_balance_dirty(fs_info); 9215 } 9216 out: 9217 btrfs_free_block_rsv(fs_info, rsv); 9218 9219 return ret; 9220 } 9221 9222 /* 9223 * create a new subvolume directory/inode (helper for the ioctl). 9224 */ 9225 int btrfs_create_subvol_root(struct btrfs_trans_handle *trans, 9226 struct btrfs_root *new_root, 9227 struct btrfs_root *parent_root, 9228 u64 new_dirid) 9229 { 9230 struct inode *inode; 9231 int err; 9232 u64 index = 0; 9233 9234 inode = btrfs_new_inode(trans, new_root, NULL, "..", 2, 9235 new_dirid, new_dirid, 9236 S_IFDIR | (~current_umask() & S_IRWXUGO), 9237 &index); 9238 if (IS_ERR(inode)) 9239 return PTR_ERR(inode); 9240 inode->i_op = &btrfs_dir_inode_operations; 9241 inode->i_fop = &btrfs_dir_file_operations; 9242 9243 set_nlink(inode, 1); 9244 btrfs_i_size_write(BTRFS_I(inode), 0); 9245 unlock_new_inode(inode); 9246 9247 err = btrfs_subvol_inherit_props(trans, new_root, parent_root); 9248 if (err) 9249 btrfs_err(new_root->fs_info, 9250 "error inheriting subvolume %llu properties: %d", 9251 new_root->root_key.objectid, err); 9252 9253 err = btrfs_update_inode(trans, new_root, inode); 9254 9255 iput(inode); 9256 return err; 9257 } 9258 9259 struct inode *btrfs_alloc_inode(struct super_block *sb) 9260 { 9261 struct btrfs_fs_info *fs_info = btrfs_sb(sb); 9262 struct btrfs_inode *ei; 9263 struct inode *inode; 9264 9265 ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_KERNEL); 9266 if (!ei) 9267 return NULL; 9268 9269 ei->root = NULL; 9270 ei->generation = 0; 9271 ei->last_trans = 0; 9272 ei->last_sub_trans = 0; 9273 ei->logged_trans = 0; 9274 ei->delalloc_bytes = 0; 9275 ei->new_delalloc_bytes = 0; 9276 ei->defrag_bytes = 0; 9277 ei->disk_i_size = 0; 9278 ei->flags = 0; 9279 ei->csum_bytes = 0; 9280 ei->index_cnt = (u64)-1; 9281 ei->dir_index = 0; 9282 ei->last_unlink_trans = 0; 9283 ei->last_log_commit = 0; 9284 9285 spin_lock_init(&ei->lock); 9286 ei->outstanding_extents = 0; 9287 if (sb->s_magic != BTRFS_TEST_MAGIC) 9288 btrfs_init_metadata_block_rsv(fs_info, &ei->block_rsv, 9289 BTRFS_BLOCK_RSV_DELALLOC); 9290 ei->runtime_flags = 0; 9291 ei->prop_compress = BTRFS_COMPRESS_NONE; 9292 ei->defrag_compress = BTRFS_COMPRESS_NONE; 9293 9294 ei->delayed_node = NULL; 9295 9296 ei->i_otime.tv_sec = 0; 9297 ei->i_otime.tv_nsec = 0; 9298 9299 inode = &ei->vfs_inode; 9300 extent_map_tree_init(&ei->extent_tree); 9301 extent_io_tree_init(fs_info, &ei->io_tree, IO_TREE_INODE_IO, inode); 9302 extent_io_tree_init(fs_info, &ei->io_failure_tree, 9303 IO_TREE_INODE_IO_FAILURE, inode); 9304 ei->io_tree.track_uptodate = true; 9305 ei->io_failure_tree.track_uptodate = true; 9306 atomic_set(&ei->sync_writers, 0); 9307 mutex_init(&ei->log_mutex); 9308 mutex_init(&ei->delalloc_mutex); 9309 btrfs_ordered_inode_tree_init(&ei->ordered_tree); 9310 INIT_LIST_HEAD(&ei->delalloc_inodes); 9311 INIT_LIST_HEAD(&ei->delayed_iput); 9312 RB_CLEAR_NODE(&ei->rb_node); 9313 init_rwsem(&ei->dio_sem); 9314 9315 return inode; 9316 } 9317 9318 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 9319 void btrfs_test_destroy_inode(struct inode *inode) 9320 { 9321 btrfs_drop_extent_cache(BTRFS_I(inode), 0, (u64)-1, 0); 9322 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode)); 9323 } 9324 #endif 9325 9326 void btrfs_free_inode(struct inode *inode) 9327 { 9328 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode)); 9329 } 9330 9331 void btrfs_destroy_inode(struct inode *inode) 9332 { 9333 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 9334 struct btrfs_ordered_extent *ordered; 9335 struct btrfs_root *root = BTRFS_I(inode)->root; 9336 9337 WARN_ON(!hlist_empty(&inode->i_dentry)); 9338 WARN_ON(inode->i_data.nrpages); 9339 WARN_ON(BTRFS_I(inode)->block_rsv.reserved); 9340 WARN_ON(BTRFS_I(inode)->block_rsv.size); 9341 WARN_ON(BTRFS_I(inode)->outstanding_extents); 9342 WARN_ON(BTRFS_I(inode)->delalloc_bytes); 9343 WARN_ON(BTRFS_I(inode)->new_delalloc_bytes); 9344 WARN_ON(BTRFS_I(inode)->csum_bytes); 9345 WARN_ON(BTRFS_I(inode)->defrag_bytes); 9346 9347 /* 9348 * This can happen where we create an inode, but somebody else also 9349 * created the same inode and we need to destroy the one we already 9350 * created. 9351 */ 9352 if (!root) 9353 return; 9354 9355 while (1) { 9356 ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1); 9357 if (!ordered) 9358 break; 9359 else { 9360 btrfs_err(fs_info, 9361 "found ordered extent %llu %llu on inode cleanup", 9362 ordered->file_offset, ordered->len); 9363 btrfs_remove_ordered_extent(inode, ordered); 9364 btrfs_put_ordered_extent(ordered); 9365 btrfs_put_ordered_extent(ordered); 9366 } 9367 } 9368 btrfs_qgroup_check_reserved_leak(inode); 9369 inode_tree_del(inode); 9370 btrfs_drop_extent_cache(BTRFS_I(inode), 0, (u64)-1, 0); 9371 } 9372 9373 int btrfs_drop_inode(struct inode *inode) 9374 { 9375 struct btrfs_root *root = BTRFS_I(inode)->root; 9376 9377 if (root == NULL) 9378 return 1; 9379 9380 /* the snap/subvol tree is on deleting */ 9381 if (btrfs_root_refs(&root->root_item) == 0) 9382 return 1; 9383 else 9384 return generic_drop_inode(inode); 9385 } 9386 9387 static void init_once(void *foo) 9388 { 9389 struct btrfs_inode *ei = (struct btrfs_inode *) foo; 9390 9391 inode_init_once(&ei->vfs_inode); 9392 } 9393 9394 void __cold btrfs_destroy_cachep(void) 9395 { 9396 /* 9397 * Make sure all delayed rcu free inodes are flushed before we 9398 * destroy cache. 9399 */ 9400 rcu_barrier(); 9401 kmem_cache_destroy(btrfs_inode_cachep); 9402 kmem_cache_destroy(btrfs_trans_handle_cachep); 9403 kmem_cache_destroy(btrfs_path_cachep); 9404 kmem_cache_destroy(btrfs_free_space_cachep); 9405 kmem_cache_destroy(btrfs_free_space_bitmap_cachep); 9406 } 9407 9408 int __init btrfs_init_cachep(void) 9409 { 9410 btrfs_inode_cachep = kmem_cache_create("btrfs_inode", 9411 sizeof(struct btrfs_inode), 0, 9412 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD | SLAB_ACCOUNT, 9413 init_once); 9414 if (!btrfs_inode_cachep) 9415 goto fail; 9416 9417 btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle", 9418 sizeof(struct btrfs_trans_handle), 0, 9419 SLAB_TEMPORARY | SLAB_MEM_SPREAD, NULL); 9420 if (!btrfs_trans_handle_cachep) 9421 goto fail; 9422 9423 btrfs_path_cachep = kmem_cache_create("btrfs_path", 9424 sizeof(struct btrfs_path), 0, 9425 SLAB_MEM_SPREAD, NULL); 9426 if (!btrfs_path_cachep) 9427 goto fail; 9428 9429 btrfs_free_space_cachep = kmem_cache_create("btrfs_free_space", 9430 sizeof(struct btrfs_free_space), 0, 9431 SLAB_MEM_SPREAD, NULL); 9432 if (!btrfs_free_space_cachep) 9433 goto fail; 9434 9435 btrfs_free_space_bitmap_cachep = kmem_cache_create("btrfs_free_space_bitmap", 9436 PAGE_SIZE, PAGE_SIZE, 9437 SLAB_RED_ZONE, NULL); 9438 if (!btrfs_free_space_bitmap_cachep) 9439 goto fail; 9440 9441 return 0; 9442 fail: 9443 btrfs_destroy_cachep(); 9444 return -ENOMEM; 9445 } 9446 9447 static int btrfs_getattr(const struct path *path, struct kstat *stat, 9448 u32 request_mask, unsigned int flags) 9449 { 9450 u64 delalloc_bytes; 9451 struct inode *inode = d_inode(path->dentry); 9452 u32 blocksize = inode->i_sb->s_blocksize; 9453 u32 bi_flags = BTRFS_I(inode)->flags; 9454 9455 stat->result_mask |= STATX_BTIME; 9456 stat->btime.tv_sec = BTRFS_I(inode)->i_otime.tv_sec; 9457 stat->btime.tv_nsec = BTRFS_I(inode)->i_otime.tv_nsec; 9458 if (bi_flags & BTRFS_INODE_APPEND) 9459 stat->attributes |= STATX_ATTR_APPEND; 9460 if (bi_flags & BTRFS_INODE_COMPRESS) 9461 stat->attributes |= STATX_ATTR_COMPRESSED; 9462 if (bi_flags & BTRFS_INODE_IMMUTABLE) 9463 stat->attributes |= STATX_ATTR_IMMUTABLE; 9464 if (bi_flags & BTRFS_INODE_NODUMP) 9465 stat->attributes |= STATX_ATTR_NODUMP; 9466 9467 stat->attributes_mask |= (STATX_ATTR_APPEND | 9468 STATX_ATTR_COMPRESSED | 9469 STATX_ATTR_IMMUTABLE | 9470 STATX_ATTR_NODUMP); 9471 9472 generic_fillattr(inode, stat); 9473 stat->dev = BTRFS_I(inode)->root->anon_dev; 9474 9475 spin_lock(&BTRFS_I(inode)->lock); 9476 delalloc_bytes = BTRFS_I(inode)->new_delalloc_bytes; 9477 spin_unlock(&BTRFS_I(inode)->lock); 9478 stat->blocks = (ALIGN(inode_get_bytes(inode), blocksize) + 9479 ALIGN(delalloc_bytes, blocksize)) >> 9; 9480 return 0; 9481 } 9482 9483 static int btrfs_rename_exchange(struct inode *old_dir, 9484 struct dentry *old_dentry, 9485 struct inode *new_dir, 9486 struct dentry *new_dentry) 9487 { 9488 struct btrfs_fs_info *fs_info = btrfs_sb(old_dir->i_sb); 9489 struct btrfs_trans_handle *trans; 9490 struct btrfs_root *root = BTRFS_I(old_dir)->root; 9491 struct btrfs_root *dest = BTRFS_I(new_dir)->root; 9492 struct inode *new_inode = new_dentry->d_inode; 9493 struct inode *old_inode = old_dentry->d_inode; 9494 struct timespec64 ctime = current_time(old_inode); 9495 struct dentry *parent; 9496 u64 old_ino = btrfs_ino(BTRFS_I(old_inode)); 9497 u64 new_ino = btrfs_ino(BTRFS_I(new_inode)); 9498 u64 old_idx = 0; 9499 u64 new_idx = 0; 9500 u64 root_objectid; 9501 int ret; 9502 bool root_log_pinned = false; 9503 bool dest_log_pinned = false; 9504 struct btrfs_log_ctx ctx_root; 9505 struct btrfs_log_ctx ctx_dest; 9506 bool sync_log_root = false; 9507 bool sync_log_dest = false; 9508 bool commit_transaction = false; 9509 9510 /* we only allow rename subvolume link between subvolumes */ 9511 if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest) 9512 return -EXDEV; 9513 9514 btrfs_init_log_ctx(&ctx_root, old_inode); 9515 btrfs_init_log_ctx(&ctx_dest, new_inode); 9516 9517 /* close the race window with snapshot create/destroy ioctl */ 9518 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) 9519 down_read(&fs_info->subvol_sem); 9520 if (new_ino == BTRFS_FIRST_FREE_OBJECTID) 9521 down_read(&fs_info->subvol_sem); 9522 9523 /* 9524 * We want to reserve the absolute worst case amount of items. So if 9525 * both inodes are subvols and we need to unlink them then that would 9526 * require 4 item modifications, but if they are both normal inodes it 9527 * would require 5 item modifications, so we'll assume their normal 9528 * inodes. So 5 * 2 is 10, plus 2 for the new links, so 12 total items 9529 * should cover the worst case number of items we'll modify. 9530 */ 9531 trans = btrfs_start_transaction(root, 12); 9532 if (IS_ERR(trans)) { 9533 ret = PTR_ERR(trans); 9534 goto out_notrans; 9535 } 9536 9537 /* 9538 * We need to find a free sequence number both in the source and 9539 * in the destination directory for the exchange. 9540 */ 9541 ret = btrfs_set_inode_index(BTRFS_I(new_dir), &old_idx); 9542 if (ret) 9543 goto out_fail; 9544 ret = btrfs_set_inode_index(BTRFS_I(old_dir), &new_idx); 9545 if (ret) 9546 goto out_fail; 9547 9548 BTRFS_I(old_inode)->dir_index = 0ULL; 9549 BTRFS_I(new_inode)->dir_index = 0ULL; 9550 9551 /* Reference for the source. */ 9552 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) { 9553 /* force full log commit if subvolume involved. */ 9554 btrfs_set_log_full_commit(trans); 9555 } else { 9556 btrfs_pin_log_trans(root); 9557 root_log_pinned = true; 9558 ret = btrfs_insert_inode_ref(trans, dest, 9559 new_dentry->d_name.name, 9560 new_dentry->d_name.len, 9561 old_ino, 9562 btrfs_ino(BTRFS_I(new_dir)), 9563 old_idx); 9564 if (ret) 9565 goto out_fail; 9566 } 9567 9568 /* And now for the dest. */ 9569 if (new_ino == BTRFS_FIRST_FREE_OBJECTID) { 9570 /* force full log commit if subvolume involved. */ 9571 btrfs_set_log_full_commit(trans); 9572 } else { 9573 btrfs_pin_log_trans(dest); 9574 dest_log_pinned = true; 9575 ret = btrfs_insert_inode_ref(trans, root, 9576 old_dentry->d_name.name, 9577 old_dentry->d_name.len, 9578 new_ino, 9579 btrfs_ino(BTRFS_I(old_dir)), 9580 new_idx); 9581 if (ret) 9582 goto out_fail; 9583 } 9584 9585 /* Update inode version and ctime/mtime. */ 9586 inode_inc_iversion(old_dir); 9587 inode_inc_iversion(new_dir); 9588 inode_inc_iversion(old_inode); 9589 inode_inc_iversion(new_inode); 9590 old_dir->i_ctime = old_dir->i_mtime = ctime; 9591 new_dir->i_ctime = new_dir->i_mtime = ctime; 9592 old_inode->i_ctime = ctime; 9593 new_inode->i_ctime = ctime; 9594 9595 if (old_dentry->d_parent != new_dentry->d_parent) { 9596 btrfs_record_unlink_dir(trans, BTRFS_I(old_dir), 9597 BTRFS_I(old_inode), 1); 9598 btrfs_record_unlink_dir(trans, BTRFS_I(new_dir), 9599 BTRFS_I(new_inode), 1); 9600 } 9601 9602 /* src is a subvolume */ 9603 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) { 9604 root_objectid = BTRFS_I(old_inode)->root->root_key.objectid; 9605 ret = btrfs_unlink_subvol(trans, old_dir, root_objectid, 9606 old_dentry->d_name.name, 9607 old_dentry->d_name.len); 9608 } else { /* src is an inode */ 9609 ret = __btrfs_unlink_inode(trans, root, BTRFS_I(old_dir), 9610 BTRFS_I(old_dentry->d_inode), 9611 old_dentry->d_name.name, 9612 old_dentry->d_name.len); 9613 if (!ret) 9614 ret = btrfs_update_inode(trans, root, old_inode); 9615 } 9616 if (ret) { 9617 btrfs_abort_transaction(trans, ret); 9618 goto out_fail; 9619 } 9620 9621 /* dest is a subvolume */ 9622 if (new_ino == BTRFS_FIRST_FREE_OBJECTID) { 9623 root_objectid = BTRFS_I(new_inode)->root->root_key.objectid; 9624 ret = btrfs_unlink_subvol(trans, new_dir, root_objectid, 9625 new_dentry->d_name.name, 9626 new_dentry->d_name.len); 9627 } else { /* dest is an inode */ 9628 ret = __btrfs_unlink_inode(trans, dest, BTRFS_I(new_dir), 9629 BTRFS_I(new_dentry->d_inode), 9630 new_dentry->d_name.name, 9631 new_dentry->d_name.len); 9632 if (!ret) 9633 ret = btrfs_update_inode(trans, dest, new_inode); 9634 } 9635 if (ret) { 9636 btrfs_abort_transaction(trans, ret); 9637 goto out_fail; 9638 } 9639 9640 ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode), 9641 new_dentry->d_name.name, 9642 new_dentry->d_name.len, 0, old_idx); 9643 if (ret) { 9644 btrfs_abort_transaction(trans, ret); 9645 goto out_fail; 9646 } 9647 9648 ret = btrfs_add_link(trans, BTRFS_I(old_dir), BTRFS_I(new_inode), 9649 old_dentry->d_name.name, 9650 old_dentry->d_name.len, 0, new_idx); 9651 if (ret) { 9652 btrfs_abort_transaction(trans, ret); 9653 goto out_fail; 9654 } 9655 9656 if (old_inode->i_nlink == 1) 9657 BTRFS_I(old_inode)->dir_index = old_idx; 9658 if (new_inode->i_nlink == 1) 9659 BTRFS_I(new_inode)->dir_index = new_idx; 9660 9661 if (root_log_pinned) { 9662 parent = new_dentry->d_parent; 9663 ret = btrfs_log_new_name(trans, BTRFS_I(old_inode), 9664 BTRFS_I(old_dir), parent, 9665 false, &ctx_root); 9666 if (ret == BTRFS_NEED_LOG_SYNC) 9667 sync_log_root = true; 9668 else if (ret == BTRFS_NEED_TRANS_COMMIT) 9669 commit_transaction = true; 9670 ret = 0; 9671 btrfs_end_log_trans(root); 9672 root_log_pinned = false; 9673 } 9674 if (dest_log_pinned) { 9675 if (!commit_transaction) { 9676 parent = old_dentry->d_parent; 9677 ret = btrfs_log_new_name(trans, BTRFS_I(new_inode), 9678 BTRFS_I(new_dir), parent, 9679 false, &ctx_dest); 9680 if (ret == BTRFS_NEED_LOG_SYNC) 9681 sync_log_dest = true; 9682 else if (ret == BTRFS_NEED_TRANS_COMMIT) 9683 commit_transaction = true; 9684 ret = 0; 9685 } 9686 btrfs_end_log_trans(dest); 9687 dest_log_pinned = false; 9688 } 9689 out_fail: 9690 /* 9691 * If we have pinned a log and an error happened, we unpin tasks 9692 * trying to sync the log and force them to fallback to a transaction 9693 * commit if the log currently contains any of the inodes involved in 9694 * this rename operation (to ensure we do not persist a log with an 9695 * inconsistent state for any of these inodes or leading to any 9696 * inconsistencies when replayed). If the transaction was aborted, the 9697 * abortion reason is propagated to userspace when attempting to commit 9698 * the transaction. If the log does not contain any of these inodes, we 9699 * allow the tasks to sync it. 9700 */ 9701 if (ret && (root_log_pinned || dest_log_pinned)) { 9702 if (btrfs_inode_in_log(BTRFS_I(old_dir), fs_info->generation) || 9703 btrfs_inode_in_log(BTRFS_I(new_dir), fs_info->generation) || 9704 btrfs_inode_in_log(BTRFS_I(old_inode), fs_info->generation) || 9705 (new_inode && 9706 btrfs_inode_in_log(BTRFS_I(new_inode), fs_info->generation))) 9707 btrfs_set_log_full_commit(trans); 9708 9709 if (root_log_pinned) { 9710 btrfs_end_log_trans(root); 9711 root_log_pinned = false; 9712 } 9713 if (dest_log_pinned) { 9714 btrfs_end_log_trans(dest); 9715 dest_log_pinned = false; 9716 } 9717 } 9718 if (!ret && sync_log_root && !commit_transaction) { 9719 ret = btrfs_sync_log(trans, BTRFS_I(old_inode)->root, 9720 &ctx_root); 9721 if (ret) 9722 commit_transaction = true; 9723 } 9724 if (!ret && sync_log_dest && !commit_transaction) { 9725 ret = btrfs_sync_log(trans, BTRFS_I(new_inode)->root, 9726 &ctx_dest); 9727 if (ret) 9728 commit_transaction = true; 9729 } 9730 if (commit_transaction) { 9731 ret = btrfs_commit_transaction(trans); 9732 } else { 9733 int ret2; 9734 9735 ret2 = btrfs_end_transaction(trans); 9736 ret = ret ? ret : ret2; 9737 } 9738 out_notrans: 9739 if (new_ino == BTRFS_FIRST_FREE_OBJECTID) 9740 up_read(&fs_info->subvol_sem); 9741 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) 9742 up_read(&fs_info->subvol_sem); 9743 9744 return ret; 9745 } 9746 9747 static int btrfs_whiteout_for_rename(struct btrfs_trans_handle *trans, 9748 struct btrfs_root *root, 9749 struct inode *dir, 9750 struct dentry *dentry) 9751 { 9752 int ret; 9753 struct inode *inode; 9754 u64 objectid; 9755 u64 index; 9756 9757 ret = btrfs_find_free_ino(root, &objectid); 9758 if (ret) 9759 return ret; 9760 9761 inode = btrfs_new_inode(trans, root, dir, 9762 dentry->d_name.name, 9763 dentry->d_name.len, 9764 btrfs_ino(BTRFS_I(dir)), 9765 objectid, 9766 S_IFCHR | WHITEOUT_MODE, 9767 &index); 9768 9769 if (IS_ERR(inode)) { 9770 ret = PTR_ERR(inode); 9771 return ret; 9772 } 9773 9774 inode->i_op = &btrfs_special_inode_operations; 9775 init_special_inode(inode, inode->i_mode, 9776 WHITEOUT_DEV); 9777 9778 ret = btrfs_init_inode_security(trans, inode, dir, 9779 &dentry->d_name); 9780 if (ret) 9781 goto out; 9782 9783 ret = btrfs_add_nondir(trans, BTRFS_I(dir), dentry, 9784 BTRFS_I(inode), 0, index); 9785 if (ret) 9786 goto out; 9787 9788 ret = btrfs_update_inode(trans, root, inode); 9789 out: 9790 unlock_new_inode(inode); 9791 if (ret) 9792 inode_dec_link_count(inode); 9793 iput(inode); 9794 9795 return ret; 9796 } 9797 9798 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, 9799 struct inode *new_dir, struct dentry *new_dentry, 9800 unsigned int flags) 9801 { 9802 struct btrfs_fs_info *fs_info = btrfs_sb(old_dir->i_sb); 9803 struct btrfs_trans_handle *trans; 9804 unsigned int trans_num_items; 9805 struct btrfs_root *root = BTRFS_I(old_dir)->root; 9806 struct btrfs_root *dest = BTRFS_I(new_dir)->root; 9807 struct inode *new_inode = d_inode(new_dentry); 9808 struct inode *old_inode = d_inode(old_dentry); 9809 u64 index = 0; 9810 u64 root_objectid; 9811 int ret; 9812 u64 old_ino = btrfs_ino(BTRFS_I(old_inode)); 9813 bool log_pinned = false; 9814 struct btrfs_log_ctx ctx; 9815 bool sync_log = false; 9816 bool commit_transaction = false; 9817 9818 if (btrfs_ino(BTRFS_I(new_dir)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) 9819 return -EPERM; 9820 9821 /* we only allow rename subvolume link between subvolumes */ 9822 if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest) 9823 return -EXDEV; 9824 9825 if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID || 9826 (new_inode && btrfs_ino(BTRFS_I(new_inode)) == BTRFS_FIRST_FREE_OBJECTID)) 9827 return -ENOTEMPTY; 9828 9829 if (S_ISDIR(old_inode->i_mode) && new_inode && 9830 new_inode->i_size > BTRFS_EMPTY_DIR_SIZE) 9831 return -ENOTEMPTY; 9832 9833 9834 /* check for collisions, even if the name isn't there */ 9835 ret = btrfs_check_dir_item_collision(dest, new_dir->i_ino, 9836 new_dentry->d_name.name, 9837 new_dentry->d_name.len); 9838 9839 if (ret) { 9840 if (ret == -EEXIST) { 9841 /* we shouldn't get 9842 * eexist without a new_inode */ 9843 if (WARN_ON(!new_inode)) { 9844 return ret; 9845 } 9846 } else { 9847 /* maybe -EOVERFLOW */ 9848 return ret; 9849 } 9850 } 9851 ret = 0; 9852 9853 /* 9854 * we're using rename to replace one file with another. Start IO on it 9855 * now so we don't add too much work to the end of the transaction 9856 */ 9857 if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size) 9858 filemap_flush(old_inode->i_mapping); 9859 9860 /* close the racy window with snapshot create/destroy ioctl */ 9861 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) 9862 down_read(&fs_info->subvol_sem); 9863 /* 9864 * We want to reserve the absolute worst case amount of items. So if 9865 * both inodes are subvols and we need to unlink them then that would 9866 * require 4 item modifications, but if they are both normal inodes it 9867 * would require 5 item modifications, so we'll assume they are normal 9868 * inodes. So 5 * 2 is 10, plus 1 for the new link, so 11 total items 9869 * should cover the worst case number of items we'll modify. 9870 * If our rename has the whiteout flag, we need more 5 units for the 9871 * new inode (1 inode item, 1 inode ref, 2 dir items and 1 xattr item 9872 * when selinux is enabled). 9873 */ 9874 trans_num_items = 11; 9875 if (flags & RENAME_WHITEOUT) 9876 trans_num_items += 5; 9877 trans = btrfs_start_transaction(root, trans_num_items); 9878 if (IS_ERR(trans)) { 9879 ret = PTR_ERR(trans); 9880 goto out_notrans; 9881 } 9882 9883 if (dest != root) 9884 btrfs_record_root_in_trans(trans, dest); 9885 9886 ret = btrfs_set_inode_index(BTRFS_I(new_dir), &index); 9887 if (ret) 9888 goto out_fail; 9889 9890 BTRFS_I(old_inode)->dir_index = 0ULL; 9891 if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) { 9892 /* force full log commit if subvolume involved. */ 9893 btrfs_set_log_full_commit(trans); 9894 } else { 9895 btrfs_pin_log_trans(root); 9896 log_pinned = true; 9897 ret = btrfs_insert_inode_ref(trans, dest, 9898 new_dentry->d_name.name, 9899 new_dentry->d_name.len, 9900 old_ino, 9901 btrfs_ino(BTRFS_I(new_dir)), index); 9902 if (ret) 9903 goto out_fail; 9904 } 9905 9906 inode_inc_iversion(old_dir); 9907 inode_inc_iversion(new_dir); 9908 inode_inc_iversion(old_inode); 9909 old_dir->i_ctime = old_dir->i_mtime = 9910 new_dir->i_ctime = new_dir->i_mtime = 9911 old_inode->i_ctime = current_time(old_dir); 9912 9913 if (old_dentry->d_parent != new_dentry->d_parent) 9914 btrfs_record_unlink_dir(trans, BTRFS_I(old_dir), 9915 BTRFS_I(old_inode), 1); 9916 9917 if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) { 9918 root_objectid = BTRFS_I(old_inode)->root->root_key.objectid; 9919 ret = btrfs_unlink_subvol(trans, old_dir, root_objectid, 9920 old_dentry->d_name.name, 9921 old_dentry->d_name.len); 9922 } else { 9923 ret = __btrfs_unlink_inode(trans, root, BTRFS_I(old_dir), 9924 BTRFS_I(d_inode(old_dentry)), 9925 old_dentry->d_name.name, 9926 old_dentry->d_name.len); 9927 if (!ret) 9928 ret = btrfs_update_inode(trans, root, old_inode); 9929 } 9930 if (ret) { 9931 btrfs_abort_transaction(trans, ret); 9932 goto out_fail; 9933 } 9934 9935 if (new_inode) { 9936 inode_inc_iversion(new_inode); 9937 new_inode->i_ctime = current_time(new_inode); 9938 if (unlikely(btrfs_ino(BTRFS_I(new_inode)) == 9939 BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { 9940 root_objectid = BTRFS_I(new_inode)->location.objectid; 9941 ret = btrfs_unlink_subvol(trans, new_dir, root_objectid, 9942 new_dentry->d_name.name, 9943 new_dentry->d_name.len); 9944 BUG_ON(new_inode->i_nlink == 0); 9945 } else { 9946 ret = btrfs_unlink_inode(trans, dest, BTRFS_I(new_dir), 9947 BTRFS_I(d_inode(new_dentry)), 9948 new_dentry->d_name.name, 9949 new_dentry->d_name.len); 9950 } 9951 if (!ret && new_inode->i_nlink == 0) 9952 ret = btrfs_orphan_add(trans, 9953 BTRFS_I(d_inode(new_dentry))); 9954 if (ret) { 9955 btrfs_abort_transaction(trans, ret); 9956 goto out_fail; 9957 } 9958 } 9959 9960 ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode), 9961 new_dentry->d_name.name, 9962 new_dentry->d_name.len, 0, index); 9963 if (ret) { 9964 btrfs_abort_transaction(trans, ret); 9965 goto out_fail; 9966 } 9967 9968 if (old_inode->i_nlink == 1) 9969 BTRFS_I(old_inode)->dir_index = index; 9970 9971 if (log_pinned) { 9972 struct dentry *parent = new_dentry->d_parent; 9973 9974 btrfs_init_log_ctx(&ctx, old_inode); 9975 ret = btrfs_log_new_name(trans, BTRFS_I(old_inode), 9976 BTRFS_I(old_dir), parent, 9977 false, &ctx); 9978 if (ret == BTRFS_NEED_LOG_SYNC) 9979 sync_log = true; 9980 else if (ret == BTRFS_NEED_TRANS_COMMIT) 9981 commit_transaction = true; 9982 ret = 0; 9983 btrfs_end_log_trans(root); 9984 log_pinned = false; 9985 } 9986 9987 if (flags & RENAME_WHITEOUT) { 9988 ret = btrfs_whiteout_for_rename(trans, root, old_dir, 9989 old_dentry); 9990 9991 if (ret) { 9992 btrfs_abort_transaction(trans, ret); 9993 goto out_fail; 9994 } 9995 } 9996 out_fail: 9997 /* 9998 * If we have pinned the log and an error happened, we unpin tasks 9999 * trying to sync the log and force them to fallback to a transaction 10000 * commit if the log currently contains any of the inodes involved in 10001 * this rename operation (to ensure we do not persist a log with an 10002 * inconsistent state for any of these inodes or leading to any 10003 * inconsistencies when replayed). If the transaction was aborted, the 10004 * abortion reason is propagated to userspace when attempting to commit 10005 * the transaction. If the log does not contain any of these inodes, we 10006 * allow the tasks to sync it. 10007 */ 10008 if (ret && log_pinned) { 10009 if (btrfs_inode_in_log(BTRFS_I(old_dir), fs_info->generation) || 10010 btrfs_inode_in_log(BTRFS_I(new_dir), fs_info->generation) || 10011 btrfs_inode_in_log(BTRFS_I(old_inode), fs_info->generation) || 10012 (new_inode && 10013 btrfs_inode_in_log(BTRFS_I(new_inode), fs_info->generation))) 10014 btrfs_set_log_full_commit(trans); 10015 10016 btrfs_end_log_trans(root); 10017 log_pinned = false; 10018 } 10019 if (!ret && sync_log) { 10020 ret = btrfs_sync_log(trans, BTRFS_I(old_inode)->root, &ctx); 10021 if (ret) 10022 commit_transaction = true; 10023 } 10024 if (commit_transaction) { 10025 ret = btrfs_commit_transaction(trans); 10026 } else { 10027 int ret2; 10028 10029 ret2 = btrfs_end_transaction(trans); 10030 ret = ret ? ret : ret2; 10031 } 10032 out_notrans: 10033 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) 10034 up_read(&fs_info->subvol_sem); 10035 10036 return ret; 10037 } 10038 10039 static int btrfs_rename2(struct inode *old_dir, struct dentry *old_dentry, 10040 struct inode *new_dir, struct dentry *new_dentry, 10041 unsigned int flags) 10042 { 10043 if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT)) 10044 return -EINVAL; 10045 10046 if (flags & RENAME_EXCHANGE) 10047 return btrfs_rename_exchange(old_dir, old_dentry, new_dir, 10048 new_dentry); 10049 10050 return btrfs_rename(old_dir, old_dentry, new_dir, new_dentry, flags); 10051 } 10052 10053 struct btrfs_delalloc_work { 10054 struct inode *inode; 10055 struct completion completion; 10056 struct list_head list; 10057 struct btrfs_work work; 10058 }; 10059 10060 static void btrfs_run_delalloc_work(struct btrfs_work *work) 10061 { 10062 struct btrfs_delalloc_work *delalloc_work; 10063 struct inode *inode; 10064 10065 delalloc_work = container_of(work, struct btrfs_delalloc_work, 10066 work); 10067 inode = delalloc_work->inode; 10068 filemap_flush(inode->i_mapping); 10069 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, 10070 &BTRFS_I(inode)->runtime_flags)) 10071 filemap_flush(inode->i_mapping); 10072 10073 iput(inode); 10074 complete(&delalloc_work->completion); 10075 } 10076 10077 static struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode) 10078 { 10079 struct btrfs_delalloc_work *work; 10080 10081 work = kmalloc(sizeof(*work), GFP_NOFS); 10082 if (!work) 10083 return NULL; 10084 10085 init_completion(&work->completion); 10086 INIT_LIST_HEAD(&work->list); 10087 work->inode = inode; 10088 btrfs_init_work(&work->work, btrfs_flush_delalloc_helper, 10089 btrfs_run_delalloc_work, NULL, NULL); 10090 10091 return work; 10092 } 10093 10094 /* 10095 * some fairly slow code that needs optimization. This walks the list 10096 * of all the inodes with pending delalloc and forces them to disk. 10097 */ 10098 static int start_delalloc_inodes(struct btrfs_root *root, int nr, bool snapshot) 10099 { 10100 struct btrfs_inode *binode; 10101 struct inode *inode; 10102 struct btrfs_delalloc_work *work, *next; 10103 struct list_head works; 10104 struct list_head splice; 10105 int ret = 0; 10106 10107 INIT_LIST_HEAD(&works); 10108 INIT_LIST_HEAD(&splice); 10109 10110 mutex_lock(&root->delalloc_mutex); 10111 spin_lock(&root->delalloc_lock); 10112 list_splice_init(&root->delalloc_inodes, &splice); 10113 while (!list_empty(&splice)) { 10114 binode = list_entry(splice.next, struct btrfs_inode, 10115 delalloc_inodes); 10116 10117 list_move_tail(&binode->delalloc_inodes, 10118 &root->delalloc_inodes); 10119 inode = igrab(&binode->vfs_inode); 10120 if (!inode) { 10121 cond_resched_lock(&root->delalloc_lock); 10122 continue; 10123 } 10124 spin_unlock(&root->delalloc_lock); 10125 10126 if (snapshot) 10127 set_bit(BTRFS_INODE_SNAPSHOT_FLUSH, 10128 &binode->runtime_flags); 10129 work = btrfs_alloc_delalloc_work(inode); 10130 if (!work) { 10131 iput(inode); 10132 ret = -ENOMEM; 10133 goto out; 10134 } 10135 list_add_tail(&work->list, &works); 10136 btrfs_queue_work(root->fs_info->flush_workers, 10137 &work->work); 10138 ret++; 10139 if (nr != -1 && ret >= nr) 10140 goto out; 10141 cond_resched(); 10142 spin_lock(&root->delalloc_lock); 10143 } 10144 spin_unlock(&root->delalloc_lock); 10145 10146 out: 10147 list_for_each_entry_safe(work, next, &works, list) { 10148 list_del_init(&work->list); 10149 wait_for_completion(&work->completion); 10150 kfree(work); 10151 } 10152 10153 if (!list_empty(&splice)) { 10154 spin_lock(&root->delalloc_lock); 10155 list_splice_tail(&splice, &root->delalloc_inodes); 10156 spin_unlock(&root->delalloc_lock); 10157 } 10158 mutex_unlock(&root->delalloc_mutex); 10159 return ret; 10160 } 10161 10162 int btrfs_start_delalloc_snapshot(struct btrfs_root *root) 10163 { 10164 struct btrfs_fs_info *fs_info = root->fs_info; 10165 int ret; 10166 10167 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) 10168 return -EROFS; 10169 10170 ret = start_delalloc_inodes(root, -1, true); 10171 if (ret > 0) 10172 ret = 0; 10173 return ret; 10174 } 10175 10176 int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, int nr) 10177 { 10178 struct btrfs_root *root; 10179 struct list_head splice; 10180 int ret; 10181 10182 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) 10183 return -EROFS; 10184 10185 INIT_LIST_HEAD(&splice); 10186 10187 mutex_lock(&fs_info->delalloc_root_mutex); 10188 spin_lock(&fs_info->delalloc_root_lock); 10189 list_splice_init(&fs_info->delalloc_roots, &splice); 10190 while (!list_empty(&splice) && nr) { 10191 root = list_first_entry(&splice, struct btrfs_root, 10192 delalloc_root); 10193 root = btrfs_grab_fs_root(root); 10194 BUG_ON(!root); 10195 list_move_tail(&root->delalloc_root, 10196 &fs_info->delalloc_roots); 10197 spin_unlock(&fs_info->delalloc_root_lock); 10198 10199 ret = start_delalloc_inodes(root, nr, false); 10200 btrfs_put_fs_root(root); 10201 if (ret < 0) 10202 goto out; 10203 10204 if (nr != -1) { 10205 nr -= ret; 10206 WARN_ON(nr < 0); 10207 } 10208 spin_lock(&fs_info->delalloc_root_lock); 10209 } 10210 spin_unlock(&fs_info->delalloc_root_lock); 10211 10212 ret = 0; 10213 out: 10214 if (!list_empty(&splice)) { 10215 spin_lock(&fs_info->delalloc_root_lock); 10216 list_splice_tail(&splice, &fs_info->delalloc_roots); 10217 spin_unlock(&fs_info->delalloc_root_lock); 10218 } 10219 mutex_unlock(&fs_info->delalloc_root_mutex); 10220 return ret; 10221 } 10222 10223 static int btrfs_symlink(struct inode *dir, struct dentry *dentry, 10224 const char *symname) 10225 { 10226 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); 10227 struct btrfs_trans_handle *trans; 10228 struct btrfs_root *root = BTRFS_I(dir)->root; 10229 struct btrfs_path *path; 10230 struct btrfs_key key; 10231 struct inode *inode = NULL; 10232 int err; 10233 u64 objectid; 10234 u64 index = 0; 10235 int name_len; 10236 int datasize; 10237 unsigned long ptr; 10238 struct btrfs_file_extent_item *ei; 10239 struct extent_buffer *leaf; 10240 10241 name_len = strlen(symname); 10242 if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info)) 10243 return -ENAMETOOLONG; 10244 10245 /* 10246 * 2 items for inode item and ref 10247 * 2 items for dir items 10248 * 1 item for updating parent inode item 10249 * 1 item for the inline extent item 10250 * 1 item for xattr if selinux is on 10251 */ 10252 trans = btrfs_start_transaction(root, 7); 10253 if (IS_ERR(trans)) 10254 return PTR_ERR(trans); 10255 10256 err = btrfs_find_free_ino(root, &objectid); 10257 if (err) 10258 goto out_unlock; 10259 10260 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, 10261 dentry->d_name.len, btrfs_ino(BTRFS_I(dir)), 10262 objectid, S_IFLNK|S_IRWXUGO, &index); 10263 if (IS_ERR(inode)) { 10264 err = PTR_ERR(inode); 10265 inode = NULL; 10266 goto out_unlock; 10267 } 10268 10269 /* 10270 * If the active LSM wants to access the inode during 10271 * d_instantiate it needs these. Smack checks to see 10272 * if the filesystem supports xattrs by looking at the 10273 * ops vector. 10274 */ 10275 inode->i_fop = &btrfs_file_operations; 10276 inode->i_op = &btrfs_file_inode_operations; 10277 inode->i_mapping->a_ops = &btrfs_aops; 10278 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; 10279 10280 err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); 10281 if (err) 10282 goto out_unlock; 10283 10284 path = btrfs_alloc_path(); 10285 if (!path) { 10286 err = -ENOMEM; 10287 goto out_unlock; 10288 } 10289 key.objectid = btrfs_ino(BTRFS_I(inode)); 10290 key.offset = 0; 10291 key.type = BTRFS_EXTENT_DATA_KEY; 10292 datasize = btrfs_file_extent_calc_inline_size(name_len); 10293 err = btrfs_insert_empty_item(trans, root, path, &key, 10294 datasize); 10295 if (err) { 10296 btrfs_free_path(path); 10297 goto out_unlock; 10298 } 10299 leaf = path->nodes[0]; 10300 ei = btrfs_item_ptr(leaf, path->slots[0], 10301 struct btrfs_file_extent_item); 10302 btrfs_set_file_extent_generation(leaf, ei, trans->transid); 10303 btrfs_set_file_extent_type(leaf, ei, 10304 BTRFS_FILE_EXTENT_INLINE); 10305 btrfs_set_file_extent_encryption(leaf, ei, 0); 10306 btrfs_set_file_extent_compression(leaf, ei, 0); 10307 btrfs_set_file_extent_other_encoding(leaf, ei, 0); 10308 btrfs_set_file_extent_ram_bytes(leaf, ei, name_len); 10309 10310 ptr = btrfs_file_extent_inline_start(ei); 10311 write_extent_buffer(leaf, symname, ptr, name_len); 10312 btrfs_mark_buffer_dirty(leaf); 10313 btrfs_free_path(path); 10314 10315 inode->i_op = &btrfs_symlink_inode_operations; 10316 inode_nohighmem(inode); 10317 inode_set_bytes(inode, name_len); 10318 btrfs_i_size_write(BTRFS_I(inode), name_len); 10319 err = btrfs_update_inode(trans, root, inode); 10320 /* 10321 * Last step, add directory indexes for our symlink inode. This is the 10322 * last step to avoid extra cleanup of these indexes if an error happens 10323 * elsewhere above. 10324 */ 10325 if (!err) 10326 err = btrfs_add_nondir(trans, BTRFS_I(dir), dentry, 10327 BTRFS_I(inode), 0, index); 10328 if (err) 10329 goto out_unlock; 10330 10331 d_instantiate_new(dentry, inode); 10332 10333 out_unlock: 10334 btrfs_end_transaction(trans); 10335 if (err && inode) { 10336 inode_dec_link_count(inode); 10337 discard_new_inode(inode); 10338 } 10339 btrfs_btree_balance_dirty(fs_info); 10340 return err; 10341 } 10342 10343 static int __btrfs_prealloc_file_range(struct inode *inode, int mode, 10344 u64 start, u64 num_bytes, u64 min_size, 10345 loff_t actual_len, u64 *alloc_hint, 10346 struct btrfs_trans_handle *trans) 10347 { 10348 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 10349 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 10350 struct extent_map *em; 10351 struct btrfs_root *root = BTRFS_I(inode)->root; 10352 struct btrfs_key ins; 10353 u64 cur_offset = start; 10354 u64 i_size; 10355 u64 cur_bytes; 10356 u64 last_alloc = (u64)-1; 10357 int ret = 0; 10358 bool own_trans = true; 10359 u64 end = start + num_bytes - 1; 10360 10361 if (trans) 10362 own_trans = false; 10363 while (num_bytes > 0) { 10364 if (own_trans) { 10365 trans = btrfs_start_transaction(root, 3); 10366 if (IS_ERR(trans)) { 10367 ret = PTR_ERR(trans); 10368 break; 10369 } 10370 } 10371 10372 cur_bytes = min_t(u64, num_bytes, SZ_256M); 10373 cur_bytes = max(cur_bytes, min_size); 10374 /* 10375 * If we are severely fragmented we could end up with really 10376 * small allocations, so if the allocator is returning small 10377 * chunks lets make its job easier by only searching for those 10378 * sized chunks. 10379 */ 10380 cur_bytes = min(cur_bytes, last_alloc); 10381 ret = btrfs_reserve_extent(root, cur_bytes, cur_bytes, 10382 min_size, 0, *alloc_hint, &ins, 1, 0); 10383 if (ret) { 10384 if (own_trans) 10385 btrfs_end_transaction(trans); 10386 break; 10387 } 10388 btrfs_dec_block_group_reservations(fs_info, ins.objectid); 10389 10390 last_alloc = ins.offset; 10391 ret = insert_reserved_file_extent(trans, inode, 10392 cur_offset, ins.objectid, 10393 ins.offset, ins.offset, 10394 ins.offset, 0, 0, 0, 10395 BTRFS_FILE_EXTENT_PREALLOC); 10396 if (ret) { 10397 btrfs_free_reserved_extent(fs_info, ins.objectid, 10398 ins.offset, 0); 10399 btrfs_abort_transaction(trans, ret); 10400 if (own_trans) 10401 btrfs_end_transaction(trans); 10402 break; 10403 } 10404 10405 btrfs_drop_extent_cache(BTRFS_I(inode), cur_offset, 10406 cur_offset + ins.offset -1, 0); 10407 10408 em = alloc_extent_map(); 10409 if (!em) { 10410 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 10411 &BTRFS_I(inode)->runtime_flags); 10412 goto next; 10413 } 10414 10415 em->start = cur_offset; 10416 em->orig_start = cur_offset; 10417 em->len = ins.offset; 10418 em->block_start = ins.objectid; 10419 em->block_len = ins.offset; 10420 em->orig_block_len = ins.offset; 10421 em->ram_bytes = ins.offset; 10422 em->bdev = fs_info->fs_devices->latest_bdev; 10423 set_bit(EXTENT_FLAG_PREALLOC, &em->flags); 10424 em->generation = trans->transid; 10425 10426 while (1) { 10427 write_lock(&em_tree->lock); 10428 ret = add_extent_mapping(em_tree, em, 1); 10429 write_unlock(&em_tree->lock); 10430 if (ret != -EEXIST) 10431 break; 10432 btrfs_drop_extent_cache(BTRFS_I(inode), cur_offset, 10433 cur_offset + ins.offset - 1, 10434 0); 10435 } 10436 free_extent_map(em); 10437 next: 10438 num_bytes -= ins.offset; 10439 cur_offset += ins.offset; 10440 *alloc_hint = ins.objectid + ins.offset; 10441 10442 inode_inc_iversion(inode); 10443 inode->i_ctime = current_time(inode); 10444 BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC; 10445 if (!(mode & FALLOC_FL_KEEP_SIZE) && 10446 (actual_len > inode->i_size) && 10447 (cur_offset > inode->i_size)) { 10448 if (cur_offset > actual_len) 10449 i_size = actual_len; 10450 else 10451 i_size = cur_offset; 10452 i_size_write(inode, i_size); 10453 btrfs_ordered_update_i_size(inode, i_size, NULL); 10454 } 10455 10456 ret = btrfs_update_inode(trans, root, inode); 10457 10458 if (ret) { 10459 btrfs_abort_transaction(trans, ret); 10460 if (own_trans) 10461 btrfs_end_transaction(trans); 10462 break; 10463 } 10464 10465 if (own_trans) 10466 btrfs_end_transaction(trans); 10467 } 10468 if (cur_offset < end) 10469 btrfs_free_reserved_data_space(inode, NULL, cur_offset, 10470 end - cur_offset + 1); 10471 return ret; 10472 } 10473 10474 int btrfs_prealloc_file_range(struct inode *inode, int mode, 10475 u64 start, u64 num_bytes, u64 min_size, 10476 loff_t actual_len, u64 *alloc_hint) 10477 { 10478 return __btrfs_prealloc_file_range(inode, mode, start, num_bytes, 10479 min_size, actual_len, alloc_hint, 10480 NULL); 10481 } 10482 10483 int btrfs_prealloc_file_range_trans(struct inode *inode, 10484 struct btrfs_trans_handle *trans, int mode, 10485 u64 start, u64 num_bytes, u64 min_size, 10486 loff_t actual_len, u64 *alloc_hint) 10487 { 10488 return __btrfs_prealloc_file_range(inode, mode, start, num_bytes, 10489 min_size, actual_len, alloc_hint, trans); 10490 } 10491 10492 static int btrfs_set_page_dirty(struct page *page) 10493 { 10494 return __set_page_dirty_nobuffers(page); 10495 } 10496 10497 static int btrfs_permission(struct inode *inode, int mask) 10498 { 10499 struct btrfs_root *root = BTRFS_I(inode)->root; 10500 umode_t mode = inode->i_mode; 10501 10502 if (mask & MAY_WRITE && 10503 (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) { 10504 if (btrfs_root_readonly(root)) 10505 return -EROFS; 10506 if (BTRFS_I(inode)->flags & BTRFS_INODE_READONLY) 10507 return -EACCES; 10508 } 10509 return generic_permission(inode, mask); 10510 } 10511 10512 static int btrfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode) 10513 { 10514 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); 10515 struct btrfs_trans_handle *trans; 10516 struct btrfs_root *root = BTRFS_I(dir)->root; 10517 struct inode *inode = NULL; 10518 u64 objectid; 10519 u64 index; 10520 int ret = 0; 10521 10522 /* 10523 * 5 units required for adding orphan entry 10524 */ 10525 trans = btrfs_start_transaction(root, 5); 10526 if (IS_ERR(trans)) 10527 return PTR_ERR(trans); 10528 10529 ret = btrfs_find_free_ino(root, &objectid); 10530 if (ret) 10531 goto out; 10532 10533 inode = btrfs_new_inode(trans, root, dir, NULL, 0, 10534 btrfs_ino(BTRFS_I(dir)), objectid, mode, &index); 10535 if (IS_ERR(inode)) { 10536 ret = PTR_ERR(inode); 10537 inode = NULL; 10538 goto out; 10539 } 10540 10541 inode->i_fop = &btrfs_file_operations; 10542 inode->i_op = &btrfs_file_inode_operations; 10543 10544 inode->i_mapping->a_ops = &btrfs_aops; 10545 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; 10546 10547 ret = btrfs_init_inode_security(trans, inode, dir, NULL); 10548 if (ret) 10549 goto out; 10550 10551 ret = btrfs_update_inode(trans, root, inode); 10552 if (ret) 10553 goto out; 10554 ret = btrfs_orphan_add(trans, BTRFS_I(inode)); 10555 if (ret) 10556 goto out; 10557 10558 /* 10559 * We set number of links to 0 in btrfs_new_inode(), and here we set 10560 * it to 1 because d_tmpfile() will issue a warning if the count is 0, 10561 * through: 10562 * 10563 * d_tmpfile() -> inode_dec_link_count() -> drop_nlink() 10564 */ 10565 set_nlink(inode, 1); 10566 d_tmpfile(dentry, inode); 10567 unlock_new_inode(inode); 10568 mark_inode_dirty(inode); 10569 out: 10570 btrfs_end_transaction(trans); 10571 if (ret && inode) 10572 discard_new_inode(inode); 10573 btrfs_btree_balance_dirty(fs_info); 10574 return ret; 10575 } 10576 10577 void btrfs_set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end) 10578 { 10579 struct inode *inode = tree->private_data; 10580 unsigned long index = start >> PAGE_SHIFT; 10581 unsigned long end_index = end >> PAGE_SHIFT; 10582 struct page *page; 10583 10584 while (index <= end_index) { 10585 page = find_get_page(inode->i_mapping, index); 10586 ASSERT(page); /* Pages should be in the extent_io_tree */ 10587 set_page_writeback(page); 10588 put_page(page); 10589 index++; 10590 } 10591 } 10592 10593 #ifdef CONFIG_SWAP 10594 /* 10595 * Add an entry indicating a block group or device which is pinned by a 10596 * swapfile. Returns 0 on success, 1 if there is already an entry for it, or a 10597 * negative errno on failure. 10598 */ 10599 static int btrfs_add_swapfile_pin(struct inode *inode, void *ptr, 10600 bool is_block_group) 10601 { 10602 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; 10603 struct btrfs_swapfile_pin *sp, *entry; 10604 struct rb_node **p; 10605 struct rb_node *parent = NULL; 10606 10607 sp = kmalloc(sizeof(*sp), GFP_NOFS); 10608 if (!sp) 10609 return -ENOMEM; 10610 sp->ptr = ptr; 10611 sp->inode = inode; 10612 sp->is_block_group = is_block_group; 10613 10614 spin_lock(&fs_info->swapfile_pins_lock); 10615 p = &fs_info->swapfile_pins.rb_node; 10616 while (*p) { 10617 parent = *p; 10618 entry = rb_entry(parent, struct btrfs_swapfile_pin, node); 10619 if (sp->ptr < entry->ptr || 10620 (sp->ptr == entry->ptr && sp->inode < entry->inode)) { 10621 p = &(*p)->rb_left; 10622 } else if (sp->ptr > entry->ptr || 10623 (sp->ptr == entry->ptr && sp->inode > entry->inode)) { 10624 p = &(*p)->rb_right; 10625 } else { 10626 spin_unlock(&fs_info->swapfile_pins_lock); 10627 kfree(sp); 10628 return 1; 10629 } 10630 } 10631 rb_link_node(&sp->node, parent, p); 10632 rb_insert_color(&sp->node, &fs_info->swapfile_pins); 10633 spin_unlock(&fs_info->swapfile_pins_lock); 10634 return 0; 10635 } 10636 10637 /* Free all of the entries pinned by this swapfile. */ 10638 static void btrfs_free_swapfile_pins(struct inode *inode) 10639 { 10640 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; 10641 struct btrfs_swapfile_pin *sp; 10642 struct rb_node *node, *next; 10643 10644 spin_lock(&fs_info->swapfile_pins_lock); 10645 node = rb_first(&fs_info->swapfile_pins); 10646 while (node) { 10647 next = rb_next(node); 10648 sp = rb_entry(node, struct btrfs_swapfile_pin, node); 10649 if (sp->inode == inode) { 10650 rb_erase(&sp->node, &fs_info->swapfile_pins); 10651 if (sp->is_block_group) 10652 btrfs_put_block_group(sp->ptr); 10653 kfree(sp); 10654 } 10655 node = next; 10656 } 10657 spin_unlock(&fs_info->swapfile_pins_lock); 10658 } 10659 10660 struct btrfs_swap_info { 10661 u64 start; 10662 u64 block_start; 10663 u64 block_len; 10664 u64 lowest_ppage; 10665 u64 highest_ppage; 10666 unsigned long nr_pages; 10667 int nr_extents; 10668 }; 10669 10670 static int btrfs_add_swap_extent(struct swap_info_struct *sis, 10671 struct btrfs_swap_info *bsi) 10672 { 10673 unsigned long nr_pages; 10674 u64 first_ppage, first_ppage_reported, next_ppage; 10675 int ret; 10676 10677 first_ppage = ALIGN(bsi->block_start, PAGE_SIZE) >> PAGE_SHIFT; 10678 next_ppage = ALIGN_DOWN(bsi->block_start + bsi->block_len, 10679 PAGE_SIZE) >> PAGE_SHIFT; 10680 10681 if (first_ppage >= next_ppage) 10682 return 0; 10683 nr_pages = next_ppage - first_ppage; 10684 10685 first_ppage_reported = first_ppage; 10686 if (bsi->start == 0) 10687 first_ppage_reported++; 10688 if (bsi->lowest_ppage > first_ppage_reported) 10689 bsi->lowest_ppage = first_ppage_reported; 10690 if (bsi->highest_ppage < (next_ppage - 1)) 10691 bsi->highest_ppage = next_ppage - 1; 10692 10693 ret = add_swap_extent(sis, bsi->nr_pages, nr_pages, first_ppage); 10694 if (ret < 0) 10695 return ret; 10696 bsi->nr_extents += ret; 10697 bsi->nr_pages += nr_pages; 10698 return 0; 10699 } 10700 10701 static void btrfs_swap_deactivate(struct file *file) 10702 { 10703 struct inode *inode = file_inode(file); 10704 10705 btrfs_free_swapfile_pins(inode); 10706 atomic_dec(&BTRFS_I(inode)->root->nr_swapfiles); 10707 } 10708 10709 static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file, 10710 sector_t *span) 10711 { 10712 struct inode *inode = file_inode(file); 10713 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; 10714 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 10715 struct extent_state *cached_state = NULL; 10716 struct extent_map *em = NULL; 10717 struct btrfs_device *device = NULL; 10718 struct btrfs_swap_info bsi = { 10719 .lowest_ppage = (sector_t)-1ULL, 10720 }; 10721 int ret = 0; 10722 u64 isize; 10723 u64 start; 10724 10725 /* 10726 * If the swap file was just created, make sure delalloc is done. If the 10727 * file changes again after this, the user is doing something stupid and 10728 * we don't really care. 10729 */ 10730 ret = btrfs_wait_ordered_range(inode, 0, (u64)-1); 10731 if (ret) 10732 return ret; 10733 10734 /* 10735 * The inode is locked, so these flags won't change after we check them. 10736 */ 10737 if (BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS) { 10738 btrfs_warn(fs_info, "swapfile must not be compressed"); 10739 return -EINVAL; 10740 } 10741 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW)) { 10742 btrfs_warn(fs_info, "swapfile must not be copy-on-write"); 10743 return -EINVAL; 10744 } 10745 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { 10746 btrfs_warn(fs_info, "swapfile must not be checksummed"); 10747 return -EINVAL; 10748 } 10749 10750 /* 10751 * Balance or device remove/replace/resize can move stuff around from 10752 * under us. The EXCL_OP flag makes sure they aren't running/won't run 10753 * concurrently while we are mapping the swap extents, and 10754 * fs_info->swapfile_pins prevents them from running while the swap file 10755 * is active and moving the extents. Note that this also prevents a 10756 * concurrent device add which isn't actually necessary, but it's not 10757 * really worth the trouble to allow it. 10758 */ 10759 if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) { 10760 btrfs_warn(fs_info, 10761 "cannot activate swapfile while exclusive operation is running"); 10762 return -EBUSY; 10763 } 10764 /* 10765 * Snapshots can create extents which require COW even if NODATACOW is 10766 * set. We use this counter to prevent snapshots. We must increment it 10767 * before walking the extents because we don't want a concurrent 10768 * snapshot to run after we've already checked the extents. 10769 */ 10770 atomic_inc(&BTRFS_I(inode)->root->nr_swapfiles); 10771 10772 isize = ALIGN_DOWN(inode->i_size, fs_info->sectorsize); 10773 10774 lock_extent_bits(io_tree, 0, isize - 1, &cached_state); 10775 start = 0; 10776 while (start < isize) { 10777 u64 logical_block_start, physical_block_start; 10778 struct btrfs_block_group_cache *bg; 10779 u64 len = isize - start; 10780 10781 em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, len, 0); 10782 if (IS_ERR(em)) { 10783 ret = PTR_ERR(em); 10784 goto out; 10785 } 10786 10787 if (em->block_start == EXTENT_MAP_HOLE) { 10788 btrfs_warn(fs_info, "swapfile must not have holes"); 10789 ret = -EINVAL; 10790 goto out; 10791 } 10792 if (em->block_start == EXTENT_MAP_INLINE) { 10793 /* 10794 * It's unlikely we'll ever actually find ourselves 10795 * here, as a file small enough to fit inline won't be 10796 * big enough to store more than the swap header, but in 10797 * case something changes in the future, let's catch it 10798 * here rather than later. 10799 */ 10800 btrfs_warn(fs_info, "swapfile must not be inline"); 10801 ret = -EINVAL; 10802 goto out; 10803 } 10804 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) { 10805 btrfs_warn(fs_info, "swapfile must not be compressed"); 10806 ret = -EINVAL; 10807 goto out; 10808 } 10809 10810 logical_block_start = em->block_start + (start - em->start); 10811 len = min(len, em->len - (start - em->start)); 10812 free_extent_map(em); 10813 em = NULL; 10814 10815 ret = can_nocow_extent(inode, start, &len, NULL, NULL, NULL); 10816 if (ret < 0) { 10817 goto out; 10818 } else if (ret) { 10819 ret = 0; 10820 } else { 10821 btrfs_warn(fs_info, 10822 "swapfile must not be copy-on-write"); 10823 ret = -EINVAL; 10824 goto out; 10825 } 10826 10827 em = btrfs_get_chunk_map(fs_info, logical_block_start, len); 10828 if (IS_ERR(em)) { 10829 ret = PTR_ERR(em); 10830 goto out; 10831 } 10832 10833 if (em->map_lookup->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) { 10834 btrfs_warn(fs_info, 10835 "swapfile must have single data profile"); 10836 ret = -EINVAL; 10837 goto out; 10838 } 10839 10840 if (device == NULL) { 10841 device = em->map_lookup->stripes[0].dev; 10842 ret = btrfs_add_swapfile_pin(inode, device, false); 10843 if (ret == 1) 10844 ret = 0; 10845 else if (ret) 10846 goto out; 10847 } else if (device != em->map_lookup->stripes[0].dev) { 10848 btrfs_warn(fs_info, "swapfile must be on one device"); 10849 ret = -EINVAL; 10850 goto out; 10851 } 10852 10853 physical_block_start = (em->map_lookup->stripes[0].physical + 10854 (logical_block_start - em->start)); 10855 len = min(len, em->len - (logical_block_start - em->start)); 10856 free_extent_map(em); 10857 em = NULL; 10858 10859 bg = btrfs_lookup_block_group(fs_info, logical_block_start); 10860 if (!bg) { 10861 btrfs_warn(fs_info, 10862 "could not find block group containing swapfile"); 10863 ret = -EINVAL; 10864 goto out; 10865 } 10866 10867 ret = btrfs_add_swapfile_pin(inode, bg, true); 10868 if (ret) { 10869 btrfs_put_block_group(bg); 10870 if (ret == 1) 10871 ret = 0; 10872 else 10873 goto out; 10874 } 10875 10876 if (bsi.block_len && 10877 bsi.block_start + bsi.block_len == physical_block_start) { 10878 bsi.block_len += len; 10879 } else { 10880 if (bsi.block_len) { 10881 ret = btrfs_add_swap_extent(sis, &bsi); 10882 if (ret) 10883 goto out; 10884 } 10885 bsi.start = start; 10886 bsi.block_start = physical_block_start; 10887 bsi.block_len = len; 10888 } 10889 10890 start += len; 10891 } 10892 10893 if (bsi.block_len) 10894 ret = btrfs_add_swap_extent(sis, &bsi); 10895 10896 out: 10897 if (!IS_ERR_OR_NULL(em)) 10898 free_extent_map(em); 10899 10900 unlock_extent_cached(io_tree, 0, isize - 1, &cached_state); 10901 10902 if (ret) 10903 btrfs_swap_deactivate(file); 10904 10905 clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags); 10906 10907 if (ret) 10908 return ret; 10909 10910 if (device) 10911 sis->bdev = device->bdev; 10912 *span = bsi.highest_ppage - bsi.lowest_ppage + 1; 10913 sis->max = bsi.nr_pages; 10914 sis->pages = bsi.nr_pages - 1; 10915 sis->highest_bit = bsi.nr_pages - 1; 10916 return bsi.nr_extents; 10917 } 10918 #else 10919 static void btrfs_swap_deactivate(struct file *file) 10920 { 10921 } 10922 10923 static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file, 10924 sector_t *span) 10925 { 10926 return -EOPNOTSUPP; 10927 } 10928 #endif 10929 10930 static const struct inode_operations btrfs_dir_inode_operations = { 10931 .getattr = btrfs_getattr, 10932 .lookup = btrfs_lookup, 10933 .create = btrfs_create, 10934 .unlink = btrfs_unlink, 10935 .link = btrfs_link, 10936 .mkdir = btrfs_mkdir, 10937 .rmdir = btrfs_rmdir, 10938 .rename = btrfs_rename2, 10939 .symlink = btrfs_symlink, 10940 .setattr = btrfs_setattr, 10941 .mknod = btrfs_mknod, 10942 .listxattr = btrfs_listxattr, 10943 .permission = btrfs_permission, 10944 .get_acl = btrfs_get_acl, 10945 .set_acl = btrfs_set_acl, 10946 .update_time = btrfs_update_time, 10947 .tmpfile = btrfs_tmpfile, 10948 }; 10949 static const struct inode_operations btrfs_dir_ro_inode_operations = { 10950 .lookup = btrfs_lookup, 10951 .permission = btrfs_permission, 10952 .update_time = btrfs_update_time, 10953 }; 10954 10955 static const struct file_operations btrfs_dir_file_operations = { 10956 .llseek = generic_file_llseek, 10957 .read = generic_read_dir, 10958 .iterate_shared = btrfs_real_readdir, 10959 .open = btrfs_opendir, 10960 .unlocked_ioctl = btrfs_ioctl, 10961 #ifdef CONFIG_COMPAT 10962 .compat_ioctl = btrfs_compat_ioctl, 10963 #endif 10964 .release = btrfs_release_file, 10965 .fsync = btrfs_sync_file, 10966 }; 10967 10968 static const struct extent_io_ops btrfs_extent_io_ops = { 10969 /* mandatory callbacks */ 10970 .submit_bio_hook = btrfs_submit_bio_hook, 10971 .readpage_end_io_hook = btrfs_readpage_end_io_hook, 10972 }; 10973 10974 /* 10975 * btrfs doesn't support the bmap operation because swapfiles 10976 * use bmap to make a mapping of extents in the file. They assume 10977 * these extents won't change over the life of the file and they 10978 * use the bmap result to do IO directly to the drive. 10979 * 10980 * the btrfs bmap call would return logical addresses that aren't 10981 * suitable for IO and they also will change frequently as COW 10982 * operations happen. So, swapfile + btrfs == corruption. 10983 * 10984 * For now we're avoiding this by dropping bmap. 10985 */ 10986 static const struct address_space_operations btrfs_aops = { 10987 .readpage = btrfs_readpage, 10988 .writepage = btrfs_writepage, 10989 .writepages = btrfs_writepages, 10990 .readpages = btrfs_readpages, 10991 .direct_IO = btrfs_direct_IO, 10992 .invalidatepage = btrfs_invalidatepage, 10993 .releasepage = btrfs_releasepage, 10994 .set_page_dirty = btrfs_set_page_dirty, 10995 .error_remove_page = generic_error_remove_page, 10996 .swap_activate = btrfs_swap_activate, 10997 .swap_deactivate = btrfs_swap_deactivate, 10998 }; 10999 11000 static const struct inode_operations btrfs_file_inode_operations = { 11001 .getattr = btrfs_getattr, 11002 .setattr = btrfs_setattr, 11003 .listxattr = btrfs_listxattr, 11004 .permission = btrfs_permission, 11005 .fiemap = btrfs_fiemap, 11006 .get_acl = btrfs_get_acl, 11007 .set_acl = btrfs_set_acl, 11008 .update_time = btrfs_update_time, 11009 }; 11010 static const struct inode_operations btrfs_special_inode_operations = { 11011 .getattr = btrfs_getattr, 11012 .setattr = btrfs_setattr, 11013 .permission = btrfs_permission, 11014 .listxattr = btrfs_listxattr, 11015 .get_acl = btrfs_get_acl, 11016 .set_acl = btrfs_set_acl, 11017 .update_time = btrfs_update_time, 11018 }; 11019 static const struct inode_operations btrfs_symlink_inode_operations = { 11020 .get_link = page_get_link, 11021 .getattr = btrfs_getattr, 11022 .setattr = btrfs_setattr, 11023 .permission = btrfs_permission, 11024 .listxattr = btrfs_listxattr, 11025 .update_time = btrfs_update_time, 11026 }; 11027 11028 const struct dentry_operations btrfs_dentry_operations = { 11029 .d_delete = btrfs_dentry_delete, 11030 }; 11031