1 /* 2 * Copyright (C) 2007 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #include <linux/kernel.h> 20 #include <linux/bio.h> 21 #include <linux/buffer_head.h> 22 #include <linux/file.h> 23 #include <linux/fs.h> 24 #include <linux/pagemap.h> 25 #include <linux/highmem.h> 26 #include <linux/time.h> 27 #include <linux/init.h> 28 #include <linux/string.h> 29 #include <linux/backing-dev.h> 30 #include <linux/mpage.h> 31 #include <linux/swap.h> 32 #include <linux/writeback.h> 33 #include <linux/statfs.h> 34 #include <linux/compat.h> 35 #include <linux/bit_spinlock.h> 36 #include <linux/xattr.h> 37 #include <linux/posix_acl.h> 38 #include <linux/falloc.h> 39 #include <linux/slab.h> 40 #include <linux/ratelimit.h> 41 #include <linux/mount.h> 42 #include <linux/btrfs.h> 43 #include <linux/blkdev.h> 44 #include <linux/posix_acl_xattr.h> 45 #include <linux/uio.h> 46 #include "ctree.h" 47 #include "disk-io.h" 48 #include "transaction.h" 49 #include "btrfs_inode.h" 50 #include "print-tree.h" 51 #include "ordered-data.h" 52 #include "xattr.h" 53 #include "tree-log.h" 54 #include "volumes.h" 55 #include "compression.h" 56 #include "locking.h" 57 #include "free-space-cache.h" 58 #include "inode-map.h" 59 #include "backref.h" 60 #include "hash.h" 61 #include "props.h" 62 #include "qgroup.h" 63 64 struct btrfs_iget_args { 65 struct btrfs_key *location; 66 struct btrfs_root *root; 67 }; 68 69 static const struct inode_operations btrfs_dir_inode_operations; 70 static const struct inode_operations btrfs_symlink_inode_operations; 71 static const struct inode_operations btrfs_dir_ro_inode_operations; 72 static const struct inode_operations btrfs_special_inode_operations; 73 static const struct inode_operations btrfs_file_inode_operations; 74 static const struct address_space_operations btrfs_aops; 75 static const struct address_space_operations btrfs_symlink_aops; 76 static const struct file_operations btrfs_dir_file_operations; 77 static struct extent_io_ops btrfs_extent_io_ops; 78 79 static struct kmem_cache *btrfs_inode_cachep; 80 static struct kmem_cache *btrfs_delalloc_work_cachep; 81 struct kmem_cache *btrfs_trans_handle_cachep; 82 struct kmem_cache *btrfs_transaction_cachep; 83 struct kmem_cache *btrfs_path_cachep; 84 struct kmem_cache *btrfs_free_space_cachep; 85 86 #define S_SHIFT 12 87 static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = { 88 [S_IFREG >> S_SHIFT] = BTRFS_FT_REG_FILE, 89 [S_IFDIR >> S_SHIFT] = BTRFS_FT_DIR, 90 [S_IFCHR >> S_SHIFT] = BTRFS_FT_CHRDEV, 91 [S_IFBLK >> S_SHIFT] = BTRFS_FT_BLKDEV, 92 [S_IFIFO >> S_SHIFT] = BTRFS_FT_FIFO, 93 [S_IFSOCK >> S_SHIFT] = BTRFS_FT_SOCK, 94 [S_IFLNK >> S_SHIFT] = BTRFS_FT_SYMLINK, 95 }; 96 97 static int btrfs_setsize(struct inode *inode, struct iattr *attr); 98 static int btrfs_truncate(struct inode *inode); 99 static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent); 100 static noinline int cow_file_range(struct inode *inode, 101 struct page *locked_page, 102 u64 start, u64 end, int *page_started, 103 unsigned long *nr_written, int unlock); 104 static struct extent_map *create_pinned_em(struct inode *inode, u64 start, 105 u64 len, u64 orig_start, 106 u64 block_start, u64 block_len, 107 u64 orig_block_len, u64 ram_bytes, 108 int type); 109 110 static int btrfs_dirty_inode(struct inode *inode); 111 112 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 113 void btrfs_test_inode_set_ops(struct inode *inode) 114 { 115 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; 116 } 117 #endif 118 119 static int btrfs_init_inode_security(struct btrfs_trans_handle *trans, 120 struct inode *inode, struct inode *dir, 121 const struct qstr *qstr) 122 { 123 int err; 124 125 err = btrfs_init_acl(trans, inode, dir); 126 if (!err) 127 err = btrfs_xattr_security_init(trans, inode, dir, qstr); 128 return err; 129 } 130 131 /* 132 * this does all the hard work for inserting an inline extent into 133 * the btree. The caller should have done a btrfs_drop_extents so that 134 * no overlapping inline items exist in the btree 135 */ 136 static int insert_inline_extent(struct btrfs_trans_handle *trans, 137 struct btrfs_path *path, int extent_inserted, 138 struct btrfs_root *root, struct inode *inode, 139 u64 start, size_t size, size_t compressed_size, 140 int compress_type, 141 struct page **compressed_pages) 142 { 143 struct extent_buffer *leaf; 144 struct page *page = NULL; 145 char *kaddr; 146 unsigned long ptr; 147 struct btrfs_file_extent_item *ei; 148 int err = 0; 149 int ret; 150 size_t cur_size = size; 151 unsigned long offset; 152 153 if (compressed_size && compressed_pages) 154 cur_size = compressed_size; 155 156 inode_add_bytes(inode, size); 157 158 if (!extent_inserted) { 159 struct btrfs_key key; 160 size_t datasize; 161 162 key.objectid = btrfs_ino(inode); 163 key.offset = start; 164 key.type = BTRFS_EXTENT_DATA_KEY; 165 166 datasize = btrfs_file_extent_calc_inline_size(cur_size); 167 path->leave_spinning = 1; 168 ret = btrfs_insert_empty_item(trans, root, path, &key, 169 datasize); 170 if (ret) { 171 err = ret; 172 goto fail; 173 } 174 } 175 leaf = path->nodes[0]; 176 ei = btrfs_item_ptr(leaf, path->slots[0], 177 struct btrfs_file_extent_item); 178 btrfs_set_file_extent_generation(leaf, ei, trans->transid); 179 btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE); 180 btrfs_set_file_extent_encryption(leaf, ei, 0); 181 btrfs_set_file_extent_other_encoding(leaf, ei, 0); 182 btrfs_set_file_extent_ram_bytes(leaf, ei, size); 183 ptr = btrfs_file_extent_inline_start(ei); 184 185 if (compress_type != BTRFS_COMPRESS_NONE) { 186 struct page *cpage; 187 int i = 0; 188 while (compressed_size > 0) { 189 cpage = compressed_pages[i]; 190 cur_size = min_t(unsigned long, compressed_size, 191 PAGE_CACHE_SIZE); 192 193 kaddr = kmap_atomic(cpage); 194 write_extent_buffer(leaf, kaddr, ptr, cur_size); 195 kunmap_atomic(kaddr); 196 197 i++; 198 ptr += cur_size; 199 compressed_size -= cur_size; 200 } 201 btrfs_set_file_extent_compression(leaf, ei, 202 compress_type); 203 } else { 204 page = find_get_page(inode->i_mapping, 205 start >> PAGE_CACHE_SHIFT); 206 btrfs_set_file_extent_compression(leaf, ei, 0); 207 kaddr = kmap_atomic(page); 208 offset = start & (PAGE_CACHE_SIZE - 1); 209 write_extent_buffer(leaf, kaddr + offset, ptr, size); 210 kunmap_atomic(kaddr); 211 page_cache_release(page); 212 } 213 btrfs_mark_buffer_dirty(leaf); 214 btrfs_release_path(path); 215 216 /* 217 * we're an inline extent, so nobody can 218 * extend the file past i_size without locking 219 * a page we already have locked. 220 * 221 * We must do any isize and inode updates 222 * before we unlock the pages. Otherwise we 223 * could end up racing with unlink. 224 */ 225 BTRFS_I(inode)->disk_i_size = inode->i_size; 226 ret = btrfs_update_inode(trans, root, inode); 227 228 return ret; 229 fail: 230 return err; 231 } 232 233 234 /* 235 * conditionally insert an inline extent into the file. This 236 * does the checks required to make sure the data is small enough 237 * to fit as an inline extent. 238 */ 239 static noinline int cow_file_range_inline(struct btrfs_root *root, 240 struct inode *inode, u64 start, 241 u64 end, size_t compressed_size, 242 int compress_type, 243 struct page **compressed_pages) 244 { 245 struct btrfs_trans_handle *trans; 246 u64 isize = i_size_read(inode); 247 u64 actual_end = min(end + 1, isize); 248 u64 inline_len = actual_end - start; 249 u64 aligned_end = ALIGN(end, root->sectorsize); 250 u64 data_len = inline_len; 251 int ret; 252 struct btrfs_path *path; 253 int extent_inserted = 0; 254 u32 extent_item_size; 255 256 if (compressed_size) 257 data_len = compressed_size; 258 259 if (start > 0 || 260 actual_end > PAGE_CACHE_SIZE || 261 data_len > BTRFS_MAX_INLINE_DATA_SIZE(root) || 262 (!compressed_size && 263 (actual_end & (root->sectorsize - 1)) == 0) || 264 end + 1 < isize || 265 data_len > root->fs_info->max_inline) { 266 return 1; 267 } 268 269 path = btrfs_alloc_path(); 270 if (!path) 271 return -ENOMEM; 272 273 trans = btrfs_join_transaction(root); 274 if (IS_ERR(trans)) { 275 btrfs_free_path(path); 276 return PTR_ERR(trans); 277 } 278 trans->block_rsv = &root->fs_info->delalloc_block_rsv; 279 280 if (compressed_size && compressed_pages) 281 extent_item_size = btrfs_file_extent_calc_inline_size( 282 compressed_size); 283 else 284 extent_item_size = btrfs_file_extent_calc_inline_size( 285 inline_len); 286 287 ret = __btrfs_drop_extents(trans, root, inode, path, 288 start, aligned_end, NULL, 289 1, 1, extent_item_size, &extent_inserted); 290 if (ret) { 291 btrfs_abort_transaction(trans, root, ret); 292 goto out; 293 } 294 295 if (isize > actual_end) 296 inline_len = min_t(u64, isize, actual_end); 297 ret = insert_inline_extent(trans, path, extent_inserted, 298 root, inode, start, 299 inline_len, compressed_size, 300 compress_type, compressed_pages); 301 if (ret && ret != -ENOSPC) { 302 btrfs_abort_transaction(trans, root, ret); 303 goto out; 304 } else if (ret == -ENOSPC) { 305 ret = 1; 306 goto out; 307 } 308 309 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags); 310 btrfs_delalloc_release_metadata(inode, end + 1 - start); 311 btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0); 312 out: 313 /* 314 * Don't forget to free the reserved space, as for inlined extent 315 * it won't count as data extent, free them directly here. 316 * And at reserve time, it's always aligned to page size, so 317 * just free one page here. 318 */ 319 btrfs_qgroup_free_data(inode, 0, PAGE_CACHE_SIZE); 320 btrfs_free_path(path); 321 btrfs_end_transaction(trans, root); 322 return ret; 323 } 324 325 struct async_extent { 326 u64 start; 327 u64 ram_size; 328 u64 compressed_size; 329 struct page **pages; 330 unsigned long nr_pages; 331 int compress_type; 332 struct list_head list; 333 }; 334 335 struct async_cow { 336 struct inode *inode; 337 struct btrfs_root *root; 338 struct page *locked_page; 339 u64 start; 340 u64 end; 341 struct list_head extents; 342 struct btrfs_work work; 343 }; 344 345 static noinline int add_async_extent(struct async_cow *cow, 346 u64 start, u64 ram_size, 347 u64 compressed_size, 348 struct page **pages, 349 unsigned long nr_pages, 350 int compress_type) 351 { 352 struct async_extent *async_extent; 353 354 async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS); 355 BUG_ON(!async_extent); /* -ENOMEM */ 356 async_extent->start = start; 357 async_extent->ram_size = ram_size; 358 async_extent->compressed_size = compressed_size; 359 async_extent->pages = pages; 360 async_extent->nr_pages = nr_pages; 361 async_extent->compress_type = compress_type; 362 list_add_tail(&async_extent->list, &cow->extents); 363 return 0; 364 } 365 366 static inline int inode_need_compress(struct inode *inode) 367 { 368 struct btrfs_root *root = BTRFS_I(inode)->root; 369 370 /* force compress */ 371 if (btrfs_test_opt(root, FORCE_COMPRESS)) 372 return 1; 373 /* bad compression ratios */ 374 if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS) 375 return 0; 376 if (btrfs_test_opt(root, COMPRESS) || 377 BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS || 378 BTRFS_I(inode)->force_compress) 379 return 1; 380 return 0; 381 } 382 383 /* 384 * we create compressed extents in two phases. The first 385 * phase compresses a range of pages that have already been 386 * locked (both pages and state bits are locked). 387 * 388 * This is done inside an ordered work queue, and the compression 389 * is spread across many cpus. The actual IO submission is step 390 * two, and the ordered work queue takes care of making sure that 391 * happens in the same order things were put onto the queue by 392 * writepages and friends. 393 * 394 * If this code finds it can't get good compression, it puts an 395 * entry onto the work queue to write the uncompressed bytes. This 396 * makes sure that both compressed inodes and uncompressed inodes 397 * are written in the same order that the flusher thread sent them 398 * down. 399 */ 400 static noinline void compress_file_range(struct inode *inode, 401 struct page *locked_page, 402 u64 start, u64 end, 403 struct async_cow *async_cow, 404 int *num_added) 405 { 406 struct btrfs_root *root = BTRFS_I(inode)->root; 407 u64 num_bytes; 408 u64 blocksize = root->sectorsize; 409 u64 actual_end; 410 u64 isize = i_size_read(inode); 411 int ret = 0; 412 struct page **pages = NULL; 413 unsigned long nr_pages; 414 unsigned long nr_pages_ret = 0; 415 unsigned long total_compressed = 0; 416 unsigned long total_in = 0; 417 unsigned long max_compressed = 128 * 1024; 418 unsigned long max_uncompressed = 128 * 1024; 419 int i; 420 int will_compress; 421 int compress_type = root->fs_info->compress_type; 422 int redirty = 0; 423 424 /* if this is a small write inside eof, kick off a defrag */ 425 if ((end - start + 1) < 16 * 1024 && 426 (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size)) 427 btrfs_add_inode_defrag(NULL, inode); 428 429 actual_end = min_t(u64, isize, end + 1); 430 again: 431 will_compress = 0; 432 nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1; 433 nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE); 434 435 /* 436 * we don't want to send crud past the end of i_size through 437 * compression, that's just a waste of CPU time. So, if the 438 * end of the file is before the start of our current 439 * requested range of bytes, we bail out to the uncompressed 440 * cleanup code that can deal with all of this. 441 * 442 * It isn't really the fastest way to fix things, but this is a 443 * very uncommon corner. 444 */ 445 if (actual_end <= start) 446 goto cleanup_and_bail_uncompressed; 447 448 total_compressed = actual_end - start; 449 450 /* 451 * skip compression for a small file range(<=blocksize) that 452 * isn't an inline extent, since it dosen't save disk space at all. 453 */ 454 if (total_compressed <= blocksize && 455 (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size)) 456 goto cleanup_and_bail_uncompressed; 457 458 /* we want to make sure that amount of ram required to uncompress 459 * an extent is reasonable, so we limit the total size in ram 460 * of a compressed extent to 128k. This is a crucial number 461 * because it also controls how easily we can spread reads across 462 * cpus for decompression. 463 * 464 * We also want to make sure the amount of IO required to do 465 * a random read is reasonably small, so we limit the size of 466 * a compressed extent to 128k. 467 */ 468 total_compressed = min(total_compressed, max_uncompressed); 469 num_bytes = ALIGN(end - start + 1, blocksize); 470 num_bytes = max(blocksize, num_bytes); 471 total_in = 0; 472 ret = 0; 473 474 /* 475 * we do compression for mount -o compress and when the 476 * inode has not been flagged as nocompress. This flag can 477 * change at any time if we discover bad compression ratios. 478 */ 479 if (inode_need_compress(inode)) { 480 WARN_ON(pages); 481 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS); 482 if (!pages) { 483 /* just bail out to the uncompressed code */ 484 goto cont; 485 } 486 487 if (BTRFS_I(inode)->force_compress) 488 compress_type = BTRFS_I(inode)->force_compress; 489 490 /* 491 * we need to call clear_page_dirty_for_io on each 492 * page in the range. Otherwise applications with the file 493 * mmap'd can wander in and change the page contents while 494 * we are compressing them. 495 * 496 * If the compression fails for any reason, we set the pages 497 * dirty again later on. 498 */ 499 extent_range_clear_dirty_for_io(inode, start, end); 500 redirty = 1; 501 ret = btrfs_compress_pages(compress_type, 502 inode->i_mapping, start, 503 total_compressed, pages, 504 nr_pages, &nr_pages_ret, 505 &total_in, 506 &total_compressed, 507 max_compressed); 508 509 if (!ret) { 510 unsigned long offset = total_compressed & 511 (PAGE_CACHE_SIZE - 1); 512 struct page *page = pages[nr_pages_ret - 1]; 513 char *kaddr; 514 515 /* zero the tail end of the last page, we might be 516 * sending it down to disk 517 */ 518 if (offset) { 519 kaddr = kmap_atomic(page); 520 memset(kaddr + offset, 0, 521 PAGE_CACHE_SIZE - offset); 522 kunmap_atomic(kaddr); 523 } 524 will_compress = 1; 525 } 526 } 527 cont: 528 if (start == 0) { 529 /* lets try to make an inline extent */ 530 if (ret || total_in < (actual_end - start)) { 531 /* we didn't compress the entire range, try 532 * to make an uncompressed inline extent. 533 */ 534 ret = cow_file_range_inline(root, inode, start, end, 535 0, 0, NULL); 536 } else { 537 /* try making a compressed inline extent */ 538 ret = cow_file_range_inline(root, inode, start, end, 539 total_compressed, 540 compress_type, pages); 541 } 542 if (ret <= 0) { 543 unsigned long clear_flags = EXTENT_DELALLOC | 544 EXTENT_DEFRAG; 545 unsigned long page_error_op; 546 547 clear_flags |= (ret < 0) ? EXTENT_DO_ACCOUNTING : 0; 548 page_error_op = ret < 0 ? PAGE_SET_ERROR : 0; 549 550 /* 551 * inline extent creation worked or returned error, 552 * we don't need to create any more async work items. 553 * Unlock and free up our temp pages. 554 */ 555 extent_clear_unlock_delalloc(inode, start, end, NULL, 556 clear_flags, PAGE_UNLOCK | 557 PAGE_CLEAR_DIRTY | 558 PAGE_SET_WRITEBACK | 559 page_error_op | 560 PAGE_END_WRITEBACK); 561 goto free_pages_out; 562 } 563 } 564 565 if (will_compress) { 566 /* 567 * we aren't doing an inline extent round the compressed size 568 * up to a block size boundary so the allocator does sane 569 * things 570 */ 571 total_compressed = ALIGN(total_compressed, blocksize); 572 573 /* 574 * one last check to make sure the compression is really a 575 * win, compare the page count read with the blocks on disk 576 */ 577 total_in = ALIGN(total_in, PAGE_CACHE_SIZE); 578 if (total_compressed >= total_in) { 579 will_compress = 0; 580 } else { 581 num_bytes = total_in; 582 } 583 } 584 if (!will_compress && pages) { 585 /* 586 * the compression code ran but failed to make things smaller, 587 * free any pages it allocated and our page pointer array 588 */ 589 for (i = 0; i < nr_pages_ret; i++) { 590 WARN_ON(pages[i]->mapping); 591 page_cache_release(pages[i]); 592 } 593 kfree(pages); 594 pages = NULL; 595 total_compressed = 0; 596 nr_pages_ret = 0; 597 598 /* flag the file so we don't compress in the future */ 599 if (!btrfs_test_opt(root, FORCE_COMPRESS) && 600 !(BTRFS_I(inode)->force_compress)) { 601 BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS; 602 } 603 } 604 if (will_compress) { 605 *num_added += 1; 606 607 /* the async work queues will take care of doing actual 608 * allocation on disk for these compressed pages, 609 * and will submit them to the elevator. 610 */ 611 add_async_extent(async_cow, start, num_bytes, 612 total_compressed, pages, nr_pages_ret, 613 compress_type); 614 615 if (start + num_bytes < end) { 616 start += num_bytes; 617 pages = NULL; 618 cond_resched(); 619 goto again; 620 } 621 } else { 622 cleanup_and_bail_uncompressed: 623 /* 624 * No compression, but we still need to write the pages in 625 * the file we've been given so far. redirty the locked 626 * page if it corresponds to our extent and set things up 627 * for the async work queue to run cow_file_range to do 628 * the normal delalloc dance 629 */ 630 if (page_offset(locked_page) >= start && 631 page_offset(locked_page) <= end) { 632 __set_page_dirty_nobuffers(locked_page); 633 /* unlocked later on in the async handlers */ 634 } 635 if (redirty) 636 extent_range_redirty_for_io(inode, start, end); 637 add_async_extent(async_cow, start, end - start + 1, 638 0, NULL, 0, BTRFS_COMPRESS_NONE); 639 *num_added += 1; 640 } 641 642 return; 643 644 free_pages_out: 645 for (i = 0; i < nr_pages_ret; i++) { 646 WARN_ON(pages[i]->mapping); 647 page_cache_release(pages[i]); 648 } 649 kfree(pages); 650 } 651 652 static void free_async_extent_pages(struct async_extent *async_extent) 653 { 654 int i; 655 656 if (!async_extent->pages) 657 return; 658 659 for (i = 0; i < async_extent->nr_pages; i++) { 660 WARN_ON(async_extent->pages[i]->mapping); 661 page_cache_release(async_extent->pages[i]); 662 } 663 kfree(async_extent->pages); 664 async_extent->nr_pages = 0; 665 async_extent->pages = NULL; 666 } 667 668 /* 669 * phase two of compressed writeback. This is the ordered portion 670 * of the code, which only gets called in the order the work was 671 * queued. We walk all the async extents created by compress_file_range 672 * and send them down to the disk. 673 */ 674 static noinline void submit_compressed_extents(struct inode *inode, 675 struct async_cow *async_cow) 676 { 677 struct async_extent *async_extent; 678 u64 alloc_hint = 0; 679 struct btrfs_key ins; 680 struct extent_map *em; 681 struct btrfs_root *root = BTRFS_I(inode)->root; 682 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 683 struct extent_io_tree *io_tree; 684 int ret = 0; 685 686 again: 687 while (!list_empty(&async_cow->extents)) { 688 async_extent = list_entry(async_cow->extents.next, 689 struct async_extent, list); 690 list_del(&async_extent->list); 691 692 io_tree = &BTRFS_I(inode)->io_tree; 693 694 retry: 695 /* did the compression code fall back to uncompressed IO? */ 696 if (!async_extent->pages) { 697 int page_started = 0; 698 unsigned long nr_written = 0; 699 700 lock_extent(io_tree, async_extent->start, 701 async_extent->start + 702 async_extent->ram_size - 1); 703 704 /* allocate blocks */ 705 ret = cow_file_range(inode, async_cow->locked_page, 706 async_extent->start, 707 async_extent->start + 708 async_extent->ram_size - 1, 709 &page_started, &nr_written, 0); 710 711 /* JDM XXX */ 712 713 /* 714 * if page_started, cow_file_range inserted an 715 * inline extent and took care of all the unlocking 716 * and IO for us. Otherwise, we need to submit 717 * all those pages down to the drive. 718 */ 719 if (!page_started && !ret) 720 extent_write_locked_range(io_tree, 721 inode, async_extent->start, 722 async_extent->start + 723 async_extent->ram_size - 1, 724 btrfs_get_extent, 725 WB_SYNC_ALL); 726 else if (ret) 727 unlock_page(async_cow->locked_page); 728 kfree(async_extent); 729 cond_resched(); 730 continue; 731 } 732 733 lock_extent(io_tree, async_extent->start, 734 async_extent->start + async_extent->ram_size - 1); 735 736 ret = btrfs_reserve_extent(root, 737 async_extent->compressed_size, 738 async_extent->compressed_size, 739 0, alloc_hint, &ins, 1, 1); 740 if (ret) { 741 free_async_extent_pages(async_extent); 742 743 if (ret == -ENOSPC) { 744 unlock_extent(io_tree, async_extent->start, 745 async_extent->start + 746 async_extent->ram_size - 1); 747 748 /* 749 * we need to redirty the pages if we decide to 750 * fallback to uncompressed IO, otherwise we 751 * will not submit these pages down to lower 752 * layers. 753 */ 754 extent_range_redirty_for_io(inode, 755 async_extent->start, 756 async_extent->start + 757 async_extent->ram_size - 1); 758 759 goto retry; 760 } 761 goto out_free; 762 } 763 /* 764 * here we're doing allocation and writeback of the 765 * compressed pages 766 */ 767 btrfs_drop_extent_cache(inode, async_extent->start, 768 async_extent->start + 769 async_extent->ram_size - 1, 0); 770 771 em = alloc_extent_map(); 772 if (!em) { 773 ret = -ENOMEM; 774 goto out_free_reserve; 775 } 776 em->start = async_extent->start; 777 em->len = async_extent->ram_size; 778 em->orig_start = em->start; 779 em->mod_start = em->start; 780 em->mod_len = em->len; 781 782 em->block_start = ins.objectid; 783 em->block_len = ins.offset; 784 em->orig_block_len = ins.offset; 785 em->ram_bytes = async_extent->ram_size; 786 em->bdev = root->fs_info->fs_devices->latest_bdev; 787 em->compress_type = async_extent->compress_type; 788 set_bit(EXTENT_FLAG_PINNED, &em->flags); 789 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); 790 em->generation = -1; 791 792 while (1) { 793 write_lock(&em_tree->lock); 794 ret = add_extent_mapping(em_tree, em, 1); 795 write_unlock(&em_tree->lock); 796 if (ret != -EEXIST) { 797 free_extent_map(em); 798 break; 799 } 800 btrfs_drop_extent_cache(inode, async_extent->start, 801 async_extent->start + 802 async_extent->ram_size - 1, 0); 803 } 804 805 if (ret) 806 goto out_free_reserve; 807 808 ret = btrfs_add_ordered_extent_compress(inode, 809 async_extent->start, 810 ins.objectid, 811 async_extent->ram_size, 812 ins.offset, 813 BTRFS_ORDERED_COMPRESSED, 814 async_extent->compress_type); 815 if (ret) { 816 btrfs_drop_extent_cache(inode, async_extent->start, 817 async_extent->start + 818 async_extent->ram_size - 1, 0); 819 goto out_free_reserve; 820 } 821 822 /* 823 * clear dirty, set writeback and unlock the pages. 824 */ 825 extent_clear_unlock_delalloc(inode, async_extent->start, 826 async_extent->start + 827 async_extent->ram_size - 1, 828 NULL, EXTENT_LOCKED | EXTENT_DELALLOC, 829 PAGE_UNLOCK | PAGE_CLEAR_DIRTY | 830 PAGE_SET_WRITEBACK); 831 ret = btrfs_submit_compressed_write(inode, 832 async_extent->start, 833 async_extent->ram_size, 834 ins.objectid, 835 ins.offset, async_extent->pages, 836 async_extent->nr_pages); 837 if (ret) { 838 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree; 839 struct page *p = async_extent->pages[0]; 840 const u64 start = async_extent->start; 841 const u64 end = start + async_extent->ram_size - 1; 842 843 p->mapping = inode->i_mapping; 844 tree->ops->writepage_end_io_hook(p, start, end, 845 NULL, 0); 846 p->mapping = NULL; 847 extent_clear_unlock_delalloc(inode, start, end, NULL, 0, 848 PAGE_END_WRITEBACK | 849 PAGE_SET_ERROR); 850 free_async_extent_pages(async_extent); 851 } 852 alloc_hint = ins.objectid + ins.offset; 853 kfree(async_extent); 854 cond_resched(); 855 } 856 return; 857 out_free_reserve: 858 btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1); 859 out_free: 860 extent_clear_unlock_delalloc(inode, async_extent->start, 861 async_extent->start + 862 async_extent->ram_size - 1, 863 NULL, EXTENT_LOCKED | EXTENT_DELALLOC | 864 EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING, 865 PAGE_UNLOCK | PAGE_CLEAR_DIRTY | 866 PAGE_SET_WRITEBACK | PAGE_END_WRITEBACK | 867 PAGE_SET_ERROR); 868 free_async_extent_pages(async_extent); 869 kfree(async_extent); 870 goto again; 871 } 872 873 static u64 get_extent_allocation_hint(struct inode *inode, u64 start, 874 u64 num_bytes) 875 { 876 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 877 struct extent_map *em; 878 u64 alloc_hint = 0; 879 880 read_lock(&em_tree->lock); 881 em = search_extent_mapping(em_tree, start, num_bytes); 882 if (em) { 883 /* 884 * if block start isn't an actual block number then find the 885 * first block in this inode and use that as a hint. If that 886 * block is also bogus then just don't worry about it. 887 */ 888 if (em->block_start >= EXTENT_MAP_LAST_BYTE) { 889 free_extent_map(em); 890 em = search_extent_mapping(em_tree, 0, 0); 891 if (em && em->block_start < EXTENT_MAP_LAST_BYTE) 892 alloc_hint = em->block_start; 893 if (em) 894 free_extent_map(em); 895 } else { 896 alloc_hint = em->block_start; 897 free_extent_map(em); 898 } 899 } 900 read_unlock(&em_tree->lock); 901 902 return alloc_hint; 903 } 904 905 /* 906 * when extent_io.c finds a delayed allocation range in the file, 907 * the call backs end up in this code. The basic idea is to 908 * allocate extents on disk for the range, and create ordered data structs 909 * in ram to track those extents. 910 * 911 * locked_page is the page that writepage had locked already. We use 912 * it to make sure we don't do extra locks or unlocks. 913 * 914 * *page_started is set to one if we unlock locked_page and do everything 915 * required to start IO on it. It may be clean and already done with 916 * IO when we return. 917 */ 918 static noinline int cow_file_range(struct inode *inode, 919 struct page *locked_page, 920 u64 start, u64 end, int *page_started, 921 unsigned long *nr_written, 922 int unlock) 923 { 924 struct btrfs_root *root = BTRFS_I(inode)->root; 925 u64 alloc_hint = 0; 926 u64 num_bytes; 927 unsigned long ram_size; 928 u64 disk_num_bytes; 929 u64 cur_alloc_size; 930 u64 blocksize = root->sectorsize; 931 struct btrfs_key ins; 932 struct extent_map *em; 933 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 934 int ret = 0; 935 936 if (btrfs_is_free_space_inode(inode)) { 937 WARN_ON_ONCE(1); 938 ret = -EINVAL; 939 goto out_unlock; 940 } 941 942 num_bytes = ALIGN(end - start + 1, blocksize); 943 num_bytes = max(blocksize, num_bytes); 944 disk_num_bytes = num_bytes; 945 946 /* if this is a small write inside eof, kick off defrag */ 947 if (num_bytes < 64 * 1024 && 948 (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size)) 949 btrfs_add_inode_defrag(NULL, inode); 950 951 if (start == 0) { 952 /* lets try to make an inline extent */ 953 ret = cow_file_range_inline(root, inode, start, end, 0, 0, 954 NULL); 955 if (ret == 0) { 956 extent_clear_unlock_delalloc(inode, start, end, NULL, 957 EXTENT_LOCKED | EXTENT_DELALLOC | 958 EXTENT_DEFRAG, PAGE_UNLOCK | 959 PAGE_CLEAR_DIRTY | PAGE_SET_WRITEBACK | 960 PAGE_END_WRITEBACK); 961 962 *nr_written = *nr_written + 963 (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE; 964 *page_started = 1; 965 goto out; 966 } else if (ret < 0) { 967 goto out_unlock; 968 } 969 } 970 971 BUG_ON(disk_num_bytes > 972 btrfs_super_total_bytes(root->fs_info->super_copy)); 973 974 alloc_hint = get_extent_allocation_hint(inode, start, num_bytes); 975 btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0); 976 977 while (disk_num_bytes > 0) { 978 unsigned long op; 979 980 cur_alloc_size = disk_num_bytes; 981 ret = btrfs_reserve_extent(root, cur_alloc_size, 982 root->sectorsize, 0, alloc_hint, 983 &ins, 1, 1); 984 if (ret < 0) 985 goto out_unlock; 986 987 em = alloc_extent_map(); 988 if (!em) { 989 ret = -ENOMEM; 990 goto out_reserve; 991 } 992 em->start = start; 993 em->orig_start = em->start; 994 ram_size = ins.offset; 995 em->len = ins.offset; 996 em->mod_start = em->start; 997 em->mod_len = em->len; 998 999 em->block_start = ins.objectid; 1000 em->block_len = ins.offset; 1001 em->orig_block_len = ins.offset; 1002 em->ram_bytes = ram_size; 1003 em->bdev = root->fs_info->fs_devices->latest_bdev; 1004 set_bit(EXTENT_FLAG_PINNED, &em->flags); 1005 em->generation = -1; 1006 1007 while (1) { 1008 write_lock(&em_tree->lock); 1009 ret = add_extent_mapping(em_tree, em, 1); 1010 write_unlock(&em_tree->lock); 1011 if (ret != -EEXIST) { 1012 free_extent_map(em); 1013 break; 1014 } 1015 btrfs_drop_extent_cache(inode, start, 1016 start + ram_size - 1, 0); 1017 } 1018 if (ret) 1019 goto out_reserve; 1020 1021 cur_alloc_size = ins.offset; 1022 ret = btrfs_add_ordered_extent(inode, start, ins.objectid, 1023 ram_size, cur_alloc_size, 0); 1024 if (ret) 1025 goto out_drop_extent_cache; 1026 1027 if (root->root_key.objectid == 1028 BTRFS_DATA_RELOC_TREE_OBJECTID) { 1029 ret = btrfs_reloc_clone_csums(inode, start, 1030 cur_alloc_size); 1031 if (ret) 1032 goto out_drop_extent_cache; 1033 } 1034 1035 if (disk_num_bytes < cur_alloc_size) 1036 break; 1037 1038 /* we're not doing compressed IO, don't unlock the first 1039 * page (which the caller expects to stay locked), don't 1040 * clear any dirty bits and don't set any writeback bits 1041 * 1042 * Do set the Private2 bit so we know this page was properly 1043 * setup for writepage 1044 */ 1045 op = unlock ? PAGE_UNLOCK : 0; 1046 op |= PAGE_SET_PRIVATE2; 1047 1048 extent_clear_unlock_delalloc(inode, start, 1049 start + ram_size - 1, locked_page, 1050 EXTENT_LOCKED | EXTENT_DELALLOC, 1051 op); 1052 disk_num_bytes -= cur_alloc_size; 1053 num_bytes -= cur_alloc_size; 1054 alloc_hint = ins.objectid + ins.offset; 1055 start += cur_alloc_size; 1056 } 1057 out: 1058 return ret; 1059 1060 out_drop_extent_cache: 1061 btrfs_drop_extent_cache(inode, start, start + ram_size - 1, 0); 1062 out_reserve: 1063 btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1); 1064 out_unlock: 1065 extent_clear_unlock_delalloc(inode, start, end, locked_page, 1066 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING | 1067 EXTENT_DELALLOC | EXTENT_DEFRAG, 1068 PAGE_UNLOCK | PAGE_CLEAR_DIRTY | 1069 PAGE_SET_WRITEBACK | PAGE_END_WRITEBACK); 1070 goto out; 1071 } 1072 1073 /* 1074 * work queue call back to started compression on a file and pages 1075 */ 1076 static noinline void async_cow_start(struct btrfs_work *work) 1077 { 1078 struct async_cow *async_cow; 1079 int num_added = 0; 1080 async_cow = container_of(work, struct async_cow, work); 1081 1082 compress_file_range(async_cow->inode, async_cow->locked_page, 1083 async_cow->start, async_cow->end, async_cow, 1084 &num_added); 1085 if (num_added == 0) { 1086 btrfs_add_delayed_iput(async_cow->inode); 1087 async_cow->inode = NULL; 1088 } 1089 } 1090 1091 /* 1092 * work queue call back to submit previously compressed pages 1093 */ 1094 static noinline void async_cow_submit(struct btrfs_work *work) 1095 { 1096 struct async_cow *async_cow; 1097 struct btrfs_root *root; 1098 unsigned long nr_pages; 1099 1100 async_cow = container_of(work, struct async_cow, work); 1101 1102 root = async_cow->root; 1103 nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >> 1104 PAGE_CACHE_SHIFT; 1105 1106 /* 1107 * atomic_sub_return implies a barrier for waitqueue_active 1108 */ 1109 if (atomic_sub_return(nr_pages, &root->fs_info->async_delalloc_pages) < 1110 5 * 1024 * 1024 && 1111 waitqueue_active(&root->fs_info->async_submit_wait)) 1112 wake_up(&root->fs_info->async_submit_wait); 1113 1114 if (async_cow->inode) 1115 submit_compressed_extents(async_cow->inode, async_cow); 1116 } 1117 1118 static noinline void async_cow_free(struct btrfs_work *work) 1119 { 1120 struct async_cow *async_cow; 1121 async_cow = container_of(work, struct async_cow, work); 1122 if (async_cow->inode) 1123 btrfs_add_delayed_iput(async_cow->inode); 1124 kfree(async_cow); 1125 } 1126 1127 static int cow_file_range_async(struct inode *inode, struct page *locked_page, 1128 u64 start, u64 end, int *page_started, 1129 unsigned long *nr_written) 1130 { 1131 struct async_cow *async_cow; 1132 struct btrfs_root *root = BTRFS_I(inode)->root; 1133 unsigned long nr_pages; 1134 u64 cur_end; 1135 int limit = 10 * 1024 * 1024; 1136 1137 clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED, 1138 1, 0, NULL, GFP_NOFS); 1139 while (start < end) { 1140 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS); 1141 BUG_ON(!async_cow); /* -ENOMEM */ 1142 async_cow->inode = igrab(inode); 1143 async_cow->root = root; 1144 async_cow->locked_page = locked_page; 1145 async_cow->start = start; 1146 1147 if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS && 1148 !btrfs_test_opt(root, FORCE_COMPRESS)) 1149 cur_end = end; 1150 else 1151 cur_end = min(end, start + 512 * 1024 - 1); 1152 1153 async_cow->end = cur_end; 1154 INIT_LIST_HEAD(&async_cow->extents); 1155 1156 btrfs_init_work(&async_cow->work, 1157 btrfs_delalloc_helper, 1158 async_cow_start, async_cow_submit, 1159 async_cow_free); 1160 1161 nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >> 1162 PAGE_CACHE_SHIFT; 1163 atomic_add(nr_pages, &root->fs_info->async_delalloc_pages); 1164 1165 btrfs_queue_work(root->fs_info->delalloc_workers, 1166 &async_cow->work); 1167 1168 if (atomic_read(&root->fs_info->async_delalloc_pages) > limit) { 1169 wait_event(root->fs_info->async_submit_wait, 1170 (atomic_read(&root->fs_info->async_delalloc_pages) < 1171 limit)); 1172 } 1173 1174 while (atomic_read(&root->fs_info->async_submit_draining) && 1175 atomic_read(&root->fs_info->async_delalloc_pages)) { 1176 wait_event(root->fs_info->async_submit_wait, 1177 (atomic_read(&root->fs_info->async_delalloc_pages) == 1178 0)); 1179 } 1180 1181 *nr_written += nr_pages; 1182 start = cur_end + 1; 1183 } 1184 *page_started = 1; 1185 return 0; 1186 } 1187 1188 static noinline int csum_exist_in_range(struct btrfs_root *root, 1189 u64 bytenr, u64 num_bytes) 1190 { 1191 int ret; 1192 struct btrfs_ordered_sum *sums; 1193 LIST_HEAD(list); 1194 1195 ret = btrfs_lookup_csums_range(root->fs_info->csum_root, bytenr, 1196 bytenr + num_bytes - 1, &list, 0); 1197 if (ret == 0 && list_empty(&list)) 1198 return 0; 1199 1200 while (!list_empty(&list)) { 1201 sums = list_entry(list.next, struct btrfs_ordered_sum, list); 1202 list_del(&sums->list); 1203 kfree(sums); 1204 } 1205 return 1; 1206 } 1207 1208 /* 1209 * when nowcow writeback call back. This checks for snapshots or COW copies 1210 * of the extents that exist in the file, and COWs the file as required. 1211 * 1212 * If no cow copies or snapshots exist, we write directly to the existing 1213 * blocks on disk 1214 */ 1215 static noinline int run_delalloc_nocow(struct inode *inode, 1216 struct page *locked_page, 1217 u64 start, u64 end, int *page_started, int force, 1218 unsigned long *nr_written) 1219 { 1220 struct btrfs_root *root = BTRFS_I(inode)->root; 1221 struct btrfs_trans_handle *trans; 1222 struct extent_buffer *leaf; 1223 struct btrfs_path *path; 1224 struct btrfs_file_extent_item *fi; 1225 struct btrfs_key found_key; 1226 u64 cow_start; 1227 u64 cur_offset; 1228 u64 extent_end; 1229 u64 extent_offset; 1230 u64 disk_bytenr; 1231 u64 num_bytes; 1232 u64 disk_num_bytes; 1233 u64 ram_bytes; 1234 int extent_type; 1235 int ret, err; 1236 int type; 1237 int nocow; 1238 int check_prev = 1; 1239 bool nolock; 1240 u64 ino = btrfs_ino(inode); 1241 1242 path = btrfs_alloc_path(); 1243 if (!path) { 1244 extent_clear_unlock_delalloc(inode, start, end, locked_page, 1245 EXTENT_LOCKED | EXTENT_DELALLOC | 1246 EXTENT_DO_ACCOUNTING | 1247 EXTENT_DEFRAG, PAGE_UNLOCK | 1248 PAGE_CLEAR_DIRTY | 1249 PAGE_SET_WRITEBACK | 1250 PAGE_END_WRITEBACK); 1251 return -ENOMEM; 1252 } 1253 1254 nolock = btrfs_is_free_space_inode(inode); 1255 1256 if (nolock) 1257 trans = btrfs_join_transaction_nolock(root); 1258 else 1259 trans = btrfs_join_transaction(root); 1260 1261 if (IS_ERR(trans)) { 1262 extent_clear_unlock_delalloc(inode, start, end, locked_page, 1263 EXTENT_LOCKED | EXTENT_DELALLOC | 1264 EXTENT_DO_ACCOUNTING | 1265 EXTENT_DEFRAG, PAGE_UNLOCK | 1266 PAGE_CLEAR_DIRTY | 1267 PAGE_SET_WRITEBACK | 1268 PAGE_END_WRITEBACK); 1269 btrfs_free_path(path); 1270 return PTR_ERR(trans); 1271 } 1272 1273 trans->block_rsv = &root->fs_info->delalloc_block_rsv; 1274 1275 cow_start = (u64)-1; 1276 cur_offset = start; 1277 while (1) { 1278 ret = btrfs_lookup_file_extent(trans, root, path, ino, 1279 cur_offset, 0); 1280 if (ret < 0) 1281 goto error; 1282 if (ret > 0 && path->slots[0] > 0 && check_prev) { 1283 leaf = path->nodes[0]; 1284 btrfs_item_key_to_cpu(leaf, &found_key, 1285 path->slots[0] - 1); 1286 if (found_key.objectid == ino && 1287 found_key.type == BTRFS_EXTENT_DATA_KEY) 1288 path->slots[0]--; 1289 } 1290 check_prev = 0; 1291 next_slot: 1292 leaf = path->nodes[0]; 1293 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 1294 ret = btrfs_next_leaf(root, path); 1295 if (ret < 0) 1296 goto error; 1297 if (ret > 0) 1298 break; 1299 leaf = path->nodes[0]; 1300 } 1301 1302 nocow = 0; 1303 disk_bytenr = 0; 1304 num_bytes = 0; 1305 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 1306 1307 if (found_key.objectid > ino) 1308 break; 1309 if (WARN_ON_ONCE(found_key.objectid < ino) || 1310 found_key.type < BTRFS_EXTENT_DATA_KEY) { 1311 path->slots[0]++; 1312 goto next_slot; 1313 } 1314 if (found_key.type > BTRFS_EXTENT_DATA_KEY || 1315 found_key.offset > end) 1316 break; 1317 1318 if (found_key.offset > cur_offset) { 1319 extent_end = found_key.offset; 1320 extent_type = 0; 1321 goto out_check; 1322 } 1323 1324 fi = btrfs_item_ptr(leaf, path->slots[0], 1325 struct btrfs_file_extent_item); 1326 extent_type = btrfs_file_extent_type(leaf, fi); 1327 1328 ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi); 1329 if (extent_type == BTRFS_FILE_EXTENT_REG || 1330 extent_type == BTRFS_FILE_EXTENT_PREALLOC) { 1331 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 1332 extent_offset = btrfs_file_extent_offset(leaf, fi); 1333 extent_end = found_key.offset + 1334 btrfs_file_extent_num_bytes(leaf, fi); 1335 disk_num_bytes = 1336 btrfs_file_extent_disk_num_bytes(leaf, fi); 1337 if (extent_end <= start) { 1338 path->slots[0]++; 1339 goto next_slot; 1340 } 1341 if (disk_bytenr == 0) 1342 goto out_check; 1343 if (btrfs_file_extent_compression(leaf, fi) || 1344 btrfs_file_extent_encryption(leaf, fi) || 1345 btrfs_file_extent_other_encoding(leaf, fi)) 1346 goto out_check; 1347 if (extent_type == BTRFS_FILE_EXTENT_REG && !force) 1348 goto out_check; 1349 if (btrfs_extent_readonly(root, disk_bytenr)) 1350 goto out_check; 1351 if (btrfs_cross_ref_exist(trans, root, ino, 1352 found_key.offset - 1353 extent_offset, disk_bytenr)) 1354 goto out_check; 1355 disk_bytenr += extent_offset; 1356 disk_bytenr += cur_offset - found_key.offset; 1357 num_bytes = min(end + 1, extent_end) - cur_offset; 1358 /* 1359 * if there are pending snapshots for this root, 1360 * we fall into common COW way. 1361 */ 1362 if (!nolock) { 1363 err = btrfs_start_write_no_snapshoting(root); 1364 if (!err) 1365 goto out_check; 1366 } 1367 /* 1368 * force cow if csum exists in the range. 1369 * this ensure that csum for a given extent are 1370 * either valid or do not exist. 1371 */ 1372 if (csum_exist_in_range(root, disk_bytenr, num_bytes)) 1373 goto out_check; 1374 nocow = 1; 1375 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 1376 extent_end = found_key.offset + 1377 btrfs_file_extent_inline_len(leaf, 1378 path->slots[0], fi); 1379 extent_end = ALIGN(extent_end, root->sectorsize); 1380 } else { 1381 BUG_ON(1); 1382 } 1383 out_check: 1384 if (extent_end <= start) { 1385 path->slots[0]++; 1386 if (!nolock && nocow) 1387 btrfs_end_write_no_snapshoting(root); 1388 goto next_slot; 1389 } 1390 if (!nocow) { 1391 if (cow_start == (u64)-1) 1392 cow_start = cur_offset; 1393 cur_offset = extent_end; 1394 if (cur_offset > end) 1395 break; 1396 path->slots[0]++; 1397 goto next_slot; 1398 } 1399 1400 btrfs_release_path(path); 1401 if (cow_start != (u64)-1) { 1402 ret = cow_file_range(inode, locked_page, 1403 cow_start, found_key.offset - 1, 1404 page_started, nr_written, 1); 1405 if (ret) { 1406 if (!nolock && nocow) 1407 btrfs_end_write_no_snapshoting(root); 1408 goto error; 1409 } 1410 cow_start = (u64)-1; 1411 } 1412 1413 if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) { 1414 struct extent_map *em; 1415 struct extent_map_tree *em_tree; 1416 em_tree = &BTRFS_I(inode)->extent_tree; 1417 em = alloc_extent_map(); 1418 BUG_ON(!em); /* -ENOMEM */ 1419 em->start = cur_offset; 1420 em->orig_start = found_key.offset - extent_offset; 1421 em->len = num_bytes; 1422 em->block_len = num_bytes; 1423 em->block_start = disk_bytenr; 1424 em->orig_block_len = disk_num_bytes; 1425 em->ram_bytes = ram_bytes; 1426 em->bdev = root->fs_info->fs_devices->latest_bdev; 1427 em->mod_start = em->start; 1428 em->mod_len = em->len; 1429 set_bit(EXTENT_FLAG_PINNED, &em->flags); 1430 set_bit(EXTENT_FLAG_FILLING, &em->flags); 1431 em->generation = -1; 1432 while (1) { 1433 write_lock(&em_tree->lock); 1434 ret = add_extent_mapping(em_tree, em, 1); 1435 write_unlock(&em_tree->lock); 1436 if (ret != -EEXIST) { 1437 free_extent_map(em); 1438 break; 1439 } 1440 btrfs_drop_extent_cache(inode, em->start, 1441 em->start + em->len - 1, 0); 1442 } 1443 type = BTRFS_ORDERED_PREALLOC; 1444 } else { 1445 type = BTRFS_ORDERED_NOCOW; 1446 } 1447 1448 ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr, 1449 num_bytes, num_bytes, type); 1450 BUG_ON(ret); /* -ENOMEM */ 1451 1452 if (root->root_key.objectid == 1453 BTRFS_DATA_RELOC_TREE_OBJECTID) { 1454 ret = btrfs_reloc_clone_csums(inode, cur_offset, 1455 num_bytes); 1456 if (ret) { 1457 if (!nolock && nocow) 1458 btrfs_end_write_no_snapshoting(root); 1459 goto error; 1460 } 1461 } 1462 1463 extent_clear_unlock_delalloc(inode, cur_offset, 1464 cur_offset + num_bytes - 1, 1465 locked_page, EXTENT_LOCKED | 1466 EXTENT_DELALLOC, PAGE_UNLOCK | 1467 PAGE_SET_PRIVATE2); 1468 if (!nolock && nocow) 1469 btrfs_end_write_no_snapshoting(root); 1470 cur_offset = extent_end; 1471 if (cur_offset > end) 1472 break; 1473 } 1474 btrfs_release_path(path); 1475 1476 if (cur_offset <= end && cow_start == (u64)-1) { 1477 cow_start = cur_offset; 1478 cur_offset = end; 1479 } 1480 1481 if (cow_start != (u64)-1) { 1482 ret = cow_file_range(inode, locked_page, cow_start, end, 1483 page_started, nr_written, 1); 1484 if (ret) 1485 goto error; 1486 } 1487 1488 error: 1489 err = btrfs_end_transaction(trans, root); 1490 if (!ret) 1491 ret = err; 1492 1493 if (ret && cur_offset < end) 1494 extent_clear_unlock_delalloc(inode, cur_offset, end, 1495 locked_page, EXTENT_LOCKED | 1496 EXTENT_DELALLOC | EXTENT_DEFRAG | 1497 EXTENT_DO_ACCOUNTING, PAGE_UNLOCK | 1498 PAGE_CLEAR_DIRTY | 1499 PAGE_SET_WRITEBACK | 1500 PAGE_END_WRITEBACK); 1501 btrfs_free_path(path); 1502 return ret; 1503 } 1504 1505 static inline int need_force_cow(struct inode *inode, u64 start, u64 end) 1506 { 1507 1508 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) && 1509 !(BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC)) 1510 return 0; 1511 1512 /* 1513 * @defrag_bytes is a hint value, no spinlock held here, 1514 * if is not zero, it means the file is defragging. 1515 * Force cow if given extent needs to be defragged. 1516 */ 1517 if (BTRFS_I(inode)->defrag_bytes && 1518 test_range_bit(&BTRFS_I(inode)->io_tree, start, end, 1519 EXTENT_DEFRAG, 0, NULL)) 1520 return 1; 1521 1522 return 0; 1523 } 1524 1525 /* 1526 * extent_io.c call back to do delayed allocation processing 1527 */ 1528 static int run_delalloc_range(struct inode *inode, struct page *locked_page, 1529 u64 start, u64 end, int *page_started, 1530 unsigned long *nr_written) 1531 { 1532 int ret; 1533 int force_cow = need_force_cow(inode, start, end); 1534 1535 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW && !force_cow) { 1536 ret = run_delalloc_nocow(inode, locked_page, start, end, 1537 page_started, 1, nr_written); 1538 } else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC && !force_cow) { 1539 ret = run_delalloc_nocow(inode, locked_page, start, end, 1540 page_started, 0, nr_written); 1541 } else if (!inode_need_compress(inode)) { 1542 ret = cow_file_range(inode, locked_page, start, end, 1543 page_started, nr_written, 1); 1544 } else { 1545 set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, 1546 &BTRFS_I(inode)->runtime_flags); 1547 ret = cow_file_range_async(inode, locked_page, start, end, 1548 page_started, nr_written); 1549 } 1550 return ret; 1551 } 1552 1553 static void btrfs_split_extent_hook(struct inode *inode, 1554 struct extent_state *orig, u64 split) 1555 { 1556 u64 size; 1557 1558 /* not delalloc, ignore it */ 1559 if (!(orig->state & EXTENT_DELALLOC)) 1560 return; 1561 1562 size = orig->end - orig->start + 1; 1563 if (size > BTRFS_MAX_EXTENT_SIZE) { 1564 u64 num_extents; 1565 u64 new_size; 1566 1567 /* 1568 * See the explanation in btrfs_merge_extent_hook, the same 1569 * applies here, just in reverse. 1570 */ 1571 new_size = orig->end - split + 1; 1572 num_extents = div64_u64(new_size + BTRFS_MAX_EXTENT_SIZE - 1, 1573 BTRFS_MAX_EXTENT_SIZE); 1574 new_size = split - orig->start; 1575 num_extents += div64_u64(new_size + BTRFS_MAX_EXTENT_SIZE - 1, 1576 BTRFS_MAX_EXTENT_SIZE); 1577 if (div64_u64(size + BTRFS_MAX_EXTENT_SIZE - 1, 1578 BTRFS_MAX_EXTENT_SIZE) >= num_extents) 1579 return; 1580 } 1581 1582 spin_lock(&BTRFS_I(inode)->lock); 1583 BTRFS_I(inode)->outstanding_extents++; 1584 spin_unlock(&BTRFS_I(inode)->lock); 1585 } 1586 1587 /* 1588 * extent_io.c merge_extent_hook, used to track merged delayed allocation 1589 * extents so we can keep track of new extents that are just merged onto old 1590 * extents, such as when we are doing sequential writes, so we can properly 1591 * account for the metadata space we'll need. 1592 */ 1593 static void btrfs_merge_extent_hook(struct inode *inode, 1594 struct extent_state *new, 1595 struct extent_state *other) 1596 { 1597 u64 new_size, old_size; 1598 u64 num_extents; 1599 1600 /* not delalloc, ignore it */ 1601 if (!(other->state & EXTENT_DELALLOC)) 1602 return; 1603 1604 if (new->start > other->start) 1605 new_size = new->end - other->start + 1; 1606 else 1607 new_size = other->end - new->start + 1; 1608 1609 /* we're not bigger than the max, unreserve the space and go */ 1610 if (new_size <= BTRFS_MAX_EXTENT_SIZE) { 1611 spin_lock(&BTRFS_I(inode)->lock); 1612 BTRFS_I(inode)->outstanding_extents--; 1613 spin_unlock(&BTRFS_I(inode)->lock); 1614 return; 1615 } 1616 1617 /* 1618 * We have to add up either side to figure out how many extents were 1619 * accounted for before we merged into one big extent. If the number of 1620 * extents we accounted for is <= the amount we need for the new range 1621 * then we can return, otherwise drop. Think of it like this 1622 * 1623 * [ 4k][MAX_SIZE] 1624 * 1625 * So we've grown the extent by a MAX_SIZE extent, this would mean we 1626 * need 2 outstanding extents, on one side we have 1 and the other side 1627 * we have 1 so they are == and we can return. But in this case 1628 * 1629 * [MAX_SIZE+4k][MAX_SIZE+4k] 1630 * 1631 * Each range on their own accounts for 2 extents, but merged together 1632 * they are only 3 extents worth of accounting, so we need to drop in 1633 * this case. 1634 */ 1635 old_size = other->end - other->start + 1; 1636 num_extents = div64_u64(old_size + BTRFS_MAX_EXTENT_SIZE - 1, 1637 BTRFS_MAX_EXTENT_SIZE); 1638 old_size = new->end - new->start + 1; 1639 num_extents += div64_u64(old_size + BTRFS_MAX_EXTENT_SIZE - 1, 1640 BTRFS_MAX_EXTENT_SIZE); 1641 1642 if (div64_u64(new_size + BTRFS_MAX_EXTENT_SIZE - 1, 1643 BTRFS_MAX_EXTENT_SIZE) >= num_extents) 1644 return; 1645 1646 spin_lock(&BTRFS_I(inode)->lock); 1647 BTRFS_I(inode)->outstanding_extents--; 1648 spin_unlock(&BTRFS_I(inode)->lock); 1649 } 1650 1651 static void btrfs_add_delalloc_inodes(struct btrfs_root *root, 1652 struct inode *inode) 1653 { 1654 spin_lock(&root->delalloc_lock); 1655 if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) { 1656 list_add_tail(&BTRFS_I(inode)->delalloc_inodes, 1657 &root->delalloc_inodes); 1658 set_bit(BTRFS_INODE_IN_DELALLOC_LIST, 1659 &BTRFS_I(inode)->runtime_flags); 1660 root->nr_delalloc_inodes++; 1661 if (root->nr_delalloc_inodes == 1) { 1662 spin_lock(&root->fs_info->delalloc_root_lock); 1663 BUG_ON(!list_empty(&root->delalloc_root)); 1664 list_add_tail(&root->delalloc_root, 1665 &root->fs_info->delalloc_roots); 1666 spin_unlock(&root->fs_info->delalloc_root_lock); 1667 } 1668 } 1669 spin_unlock(&root->delalloc_lock); 1670 } 1671 1672 static void btrfs_del_delalloc_inode(struct btrfs_root *root, 1673 struct inode *inode) 1674 { 1675 spin_lock(&root->delalloc_lock); 1676 if (!list_empty(&BTRFS_I(inode)->delalloc_inodes)) { 1677 list_del_init(&BTRFS_I(inode)->delalloc_inodes); 1678 clear_bit(BTRFS_INODE_IN_DELALLOC_LIST, 1679 &BTRFS_I(inode)->runtime_flags); 1680 root->nr_delalloc_inodes--; 1681 if (!root->nr_delalloc_inodes) { 1682 spin_lock(&root->fs_info->delalloc_root_lock); 1683 BUG_ON(list_empty(&root->delalloc_root)); 1684 list_del_init(&root->delalloc_root); 1685 spin_unlock(&root->fs_info->delalloc_root_lock); 1686 } 1687 } 1688 spin_unlock(&root->delalloc_lock); 1689 } 1690 1691 /* 1692 * extent_io.c set_bit_hook, used to track delayed allocation 1693 * bytes in this file, and to maintain the list of inodes that 1694 * have pending delalloc work to be done. 1695 */ 1696 static void btrfs_set_bit_hook(struct inode *inode, 1697 struct extent_state *state, unsigned *bits) 1698 { 1699 1700 if ((*bits & EXTENT_DEFRAG) && !(*bits & EXTENT_DELALLOC)) 1701 WARN_ON(1); 1702 /* 1703 * set_bit and clear bit hooks normally require _irqsave/restore 1704 * but in this case, we are only testing for the DELALLOC 1705 * bit, which is only set or cleared with irqs on 1706 */ 1707 if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) { 1708 struct btrfs_root *root = BTRFS_I(inode)->root; 1709 u64 len = state->end + 1 - state->start; 1710 bool do_list = !btrfs_is_free_space_inode(inode); 1711 1712 if (*bits & EXTENT_FIRST_DELALLOC) { 1713 *bits &= ~EXTENT_FIRST_DELALLOC; 1714 } else { 1715 spin_lock(&BTRFS_I(inode)->lock); 1716 BTRFS_I(inode)->outstanding_extents++; 1717 spin_unlock(&BTRFS_I(inode)->lock); 1718 } 1719 1720 /* For sanity tests */ 1721 if (btrfs_test_is_dummy_root(root)) 1722 return; 1723 1724 __percpu_counter_add(&root->fs_info->delalloc_bytes, len, 1725 root->fs_info->delalloc_batch); 1726 spin_lock(&BTRFS_I(inode)->lock); 1727 BTRFS_I(inode)->delalloc_bytes += len; 1728 if (*bits & EXTENT_DEFRAG) 1729 BTRFS_I(inode)->defrag_bytes += len; 1730 if (do_list && !test_bit(BTRFS_INODE_IN_DELALLOC_LIST, 1731 &BTRFS_I(inode)->runtime_flags)) 1732 btrfs_add_delalloc_inodes(root, inode); 1733 spin_unlock(&BTRFS_I(inode)->lock); 1734 } 1735 } 1736 1737 /* 1738 * extent_io.c clear_bit_hook, see set_bit_hook for why 1739 */ 1740 static void btrfs_clear_bit_hook(struct inode *inode, 1741 struct extent_state *state, 1742 unsigned *bits) 1743 { 1744 u64 len = state->end + 1 - state->start; 1745 u64 num_extents = div64_u64(len + BTRFS_MAX_EXTENT_SIZE -1, 1746 BTRFS_MAX_EXTENT_SIZE); 1747 1748 spin_lock(&BTRFS_I(inode)->lock); 1749 if ((state->state & EXTENT_DEFRAG) && (*bits & EXTENT_DEFRAG)) 1750 BTRFS_I(inode)->defrag_bytes -= len; 1751 spin_unlock(&BTRFS_I(inode)->lock); 1752 1753 /* 1754 * set_bit and clear bit hooks normally require _irqsave/restore 1755 * but in this case, we are only testing for the DELALLOC 1756 * bit, which is only set or cleared with irqs on 1757 */ 1758 if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) { 1759 struct btrfs_root *root = BTRFS_I(inode)->root; 1760 bool do_list = !btrfs_is_free_space_inode(inode); 1761 1762 if (*bits & EXTENT_FIRST_DELALLOC) { 1763 *bits &= ~EXTENT_FIRST_DELALLOC; 1764 } else if (!(*bits & EXTENT_DO_ACCOUNTING)) { 1765 spin_lock(&BTRFS_I(inode)->lock); 1766 BTRFS_I(inode)->outstanding_extents -= num_extents; 1767 spin_unlock(&BTRFS_I(inode)->lock); 1768 } 1769 1770 /* 1771 * We don't reserve metadata space for space cache inodes so we 1772 * don't need to call dellalloc_release_metadata if there is an 1773 * error. 1774 */ 1775 if (*bits & EXTENT_DO_ACCOUNTING && 1776 root != root->fs_info->tree_root) 1777 btrfs_delalloc_release_metadata(inode, len); 1778 1779 /* For sanity tests. */ 1780 if (btrfs_test_is_dummy_root(root)) 1781 return; 1782 1783 if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID 1784 && do_list && !(state->state & EXTENT_NORESERVE)) 1785 btrfs_free_reserved_data_space_noquota(inode, 1786 state->start, len); 1787 1788 __percpu_counter_add(&root->fs_info->delalloc_bytes, -len, 1789 root->fs_info->delalloc_batch); 1790 spin_lock(&BTRFS_I(inode)->lock); 1791 BTRFS_I(inode)->delalloc_bytes -= len; 1792 if (do_list && BTRFS_I(inode)->delalloc_bytes == 0 && 1793 test_bit(BTRFS_INODE_IN_DELALLOC_LIST, 1794 &BTRFS_I(inode)->runtime_flags)) 1795 btrfs_del_delalloc_inode(root, inode); 1796 spin_unlock(&BTRFS_I(inode)->lock); 1797 } 1798 } 1799 1800 /* 1801 * extent_io.c merge_bio_hook, this must check the chunk tree to make sure 1802 * we don't create bios that span stripes or chunks 1803 */ 1804 int btrfs_merge_bio_hook(int rw, struct page *page, unsigned long offset, 1805 size_t size, struct bio *bio, 1806 unsigned long bio_flags) 1807 { 1808 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root; 1809 u64 logical = (u64)bio->bi_iter.bi_sector << 9; 1810 u64 length = 0; 1811 u64 map_length; 1812 int ret; 1813 1814 if (bio_flags & EXTENT_BIO_COMPRESSED) 1815 return 0; 1816 1817 length = bio->bi_iter.bi_size; 1818 map_length = length; 1819 ret = btrfs_map_block(root->fs_info, rw, logical, 1820 &map_length, NULL, 0); 1821 /* Will always return 0 with map_multi == NULL */ 1822 BUG_ON(ret < 0); 1823 if (map_length < length + size) 1824 return 1; 1825 return 0; 1826 } 1827 1828 /* 1829 * in order to insert checksums into the metadata in large chunks, 1830 * we wait until bio submission time. All the pages in the bio are 1831 * checksummed and sums are attached onto the ordered extent record. 1832 * 1833 * At IO completion time the cums attached on the ordered extent record 1834 * are inserted into the btree 1835 */ 1836 static int __btrfs_submit_bio_start(struct inode *inode, int rw, 1837 struct bio *bio, int mirror_num, 1838 unsigned long bio_flags, 1839 u64 bio_offset) 1840 { 1841 struct btrfs_root *root = BTRFS_I(inode)->root; 1842 int ret = 0; 1843 1844 ret = btrfs_csum_one_bio(root, inode, bio, 0, 0); 1845 BUG_ON(ret); /* -ENOMEM */ 1846 return 0; 1847 } 1848 1849 /* 1850 * in order to insert checksums into the metadata in large chunks, 1851 * we wait until bio submission time. All the pages in the bio are 1852 * checksummed and sums are attached onto the ordered extent record. 1853 * 1854 * At IO completion time the cums attached on the ordered extent record 1855 * are inserted into the btree 1856 */ 1857 static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio, 1858 int mirror_num, unsigned long bio_flags, 1859 u64 bio_offset) 1860 { 1861 struct btrfs_root *root = BTRFS_I(inode)->root; 1862 int ret; 1863 1864 ret = btrfs_map_bio(root, rw, bio, mirror_num, 1); 1865 if (ret) { 1866 bio->bi_error = ret; 1867 bio_endio(bio); 1868 } 1869 return ret; 1870 } 1871 1872 /* 1873 * extent_io.c submission hook. This does the right thing for csum calculation 1874 * on write, or reading the csums from the tree before a read 1875 */ 1876 static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, 1877 int mirror_num, unsigned long bio_flags, 1878 u64 bio_offset) 1879 { 1880 struct btrfs_root *root = BTRFS_I(inode)->root; 1881 enum btrfs_wq_endio_type metadata = BTRFS_WQ_ENDIO_DATA; 1882 int ret = 0; 1883 int skip_sum; 1884 int async = !atomic_read(&BTRFS_I(inode)->sync_writers); 1885 1886 skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; 1887 1888 if (btrfs_is_free_space_inode(inode)) 1889 metadata = BTRFS_WQ_ENDIO_FREE_SPACE; 1890 1891 if (!(rw & REQ_WRITE)) { 1892 ret = btrfs_bio_wq_end_io(root->fs_info, bio, metadata); 1893 if (ret) 1894 goto out; 1895 1896 if (bio_flags & EXTENT_BIO_COMPRESSED) { 1897 ret = btrfs_submit_compressed_read(inode, bio, 1898 mirror_num, 1899 bio_flags); 1900 goto out; 1901 } else if (!skip_sum) { 1902 ret = btrfs_lookup_bio_sums(root, inode, bio, NULL); 1903 if (ret) 1904 goto out; 1905 } 1906 goto mapit; 1907 } else if (async && !skip_sum) { 1908 /* csum items have already been cloned */ 1909 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID) 1910 goto mapit; 1911 /* we're doing a write, do the async checksumming */ 1912 ret = btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info, 1913 inode, rw, bio, mirror_num, 1914 bio_flags, bio_offset, 1915 __btrfs_submit_bio_start, 1916 __btrfs_submit_bio_done); 1917 goto out; 1918 } else if (!skip_sum) { 1919 ret = btrfs_csum_one_bio(root, inode, bio, 0, 0); 1920 if (ret) 1921 goto out; 1922 } 1923 1924 mapit: 1925 ret = btrfs_map_bio(root, rw, bio, mirror_num, 0); 1926 1927 out: 1928 if (ret < 0) { 1929 bio->bi_error = ret; 1930 bio_endio(bio); 1931 } 1932 return ret; 1933 } 1934 1935 /* 1936 * given a list of ordered sums record them in the inode. This happens 1937 * at IO completion time based on sums calculated at bio submission time. 1938 */ 1939 static noinline int add_pending_csums(struct btrfs_trans_handle *trans, 1940 struct inode *inode, u64 file_offset, 1941 struct list_head *list) 1942 { 1943 struct btrfs_ordered_sum *sum; 1944 1945 list_for_each_entry(sum, list, list) { 1946 trans->adding_csums = 1; 1947 btrfs_csum_file_blocks(trans, 1948 BTRFS_I(inode)->root->fs_info->csum_root, sum); 1949 trans->adding_csums = 0; 1950 } 1951 return 0; 1952 } 1953 1954 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end, 1955 struct extent_state **cached_state) 1956 { 1957 WARN_ON((end & (PAGE_CACHE_SIZE - 1)) == 0); 1958 return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end, 1959 cached_state, GFP_NOFS); 1960 } 1961 1962 /* see btrfs_writepage_start_hook for details on why this is required */ 1963 struct btrfs_writepage_fixup { 1964 struct page *page; 1965 struct btrfs_work work; 1966 }; 1967 1968 static void btrfs_writepage_fixup_worker(struct btrfs_work *work) 1969 { 1970 struct btrfs_writepage_fixup *fixup; 1971 struct btrfs_ordered_extent *ordered; 1972 struct extent_state *cached_state = NULL; 1973 struct page *page; 1974 struct inode *inode; 1975 u64 page_start; 1976 u64 page_end; 1977 int ret; 1978 1979 fixup = container_of(work, struct btrfs_writepage_fixup, work); 1980 page = fixup->page; 1981 again: 1982 lock_page(page); 1983 if (!page->mapping || !PageDirty(page) || !PageChecked(page)) { 1984 ClearPageChecked(page); 1985 goto out_page; 1986 } 1987 1988 inode = page->mapping->host; 1989 page_start = page_offset(page); 1990 page_end = page_offset(page) + PAGE_CACHE_SIZE - 1; 1991 1992 lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, 0, 1993 &cached_state); 1994 1995 /* already ordered? We're done */ 1996 if (PagePrivate2(page)) 1997 goto out; 1998 1999 ordered = btrfs_lookup_ordered_extent(inode, page_start); 2000 if (ordered) { 2001 unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, 2002 page_end, &cached_state, GFP_NOFS); 2003 unlock_page(page); 2004 btrfs_start_ordered_extent(inode, ordered, 1); 2005 btrfs_put_ordered_extent(ordered); 2006 goto again; 2007 } 2008 2009 ret = btrfs_delalloc_reserve_space(inode, page_start, 2010 PAGE_CACHE_SIZE); 2011 if (ret) { 2012 mapping_set_error(page->mapping, ret); 2013 end_extent_writepage(page, ret, page_start, page_end); 2014 ClearPageChecked(page); 2015 goto out; 2016 } 2017 2018 btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state); 2019 ClearPageChecked(page); 2020 set_page_dirty(page); 2021 out: 2022 unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end, 2023 &cached_state, GFP_NOFS); 2024 out_page: 2025 unlock_page(page); 2026 page_cache_release(page); 2027 kfree(fixup); 2028 } 2029 2030 /* 2031 * There are a few paths in the higher layers of the kernel that directly 2032 * set the page dirty bit without asking the filesystem if it is a 2033 * good idea. This causes problems because we want to make sure COW 2034 * properly happens and the data=ordered rules are followed. 2035 * 2036 * In our case any range that doesn't have the ORDERED bit set 2037 * hasn't been properly setup for IO. We kick off an async process 2038 * to fix it up. The async helper will wait for ordered extents, set 2039 * the delalloc bit and make it safe to write the page. 2040 */ 2041 static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end) 2042 { 2043 struct inode *inode = page->mapping->host; 2044 struct btrfs_writepage_fixup *fixup; 2045 struct btrfs_root *root = BTRFS_I(inode)->root; 2046 2047 /* this page is properly in the ordered list */ 2048 if (TestClearPagePrivate2(page)) 2049 return 0; 2050 2051 if (PageChecked(page)) 2052 return -EAGAIN; 2053 2054 fixup = kzalloc(sizeof(*fixup), GFP_NOFS); 2055 if (!fixup) 2056 return -EAGAIN; 2057 2058 SetPageChecked(page); 2059 page_cache_get(page); 2060 btrfs_init_work(&fixup->work, btrfs_fixup_helper, 2061 btrfs_writepage_fixup_worker, NULL, NULL); 2062 fixup->page = page; 2063 btrfs_queue_work(root->fs_info->fixup_workers, &fixup->work); 2064 return -EBUSY; 2065 } 2066 2067 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans, 2068 struct inode *inode, u64 file_pos, 2069 u64 disk_bytenr, u64 disk_num_bytes, 2070 u64 num_bytes, u64 ram_bytes, 2071 u8 compression, u8 encryption, 2072 u16 other_encoding, int extent_type) 2073 { 2074 struct btrfs_root *root = BTRFS_I(inode)->root; 2075 struct btrfs_file_extent_item *fi; 2076 struct btrfs_path *path; 2077 struct extent_buffer *leaf; 2078 struct btrfs_key ins; 2079 int extent_inserted = 0; 2080 int ret; 2081 2082 path = btrfs_alloc_path(); 2083 if (!path) 2084 return -ENOMEM; 2085 2086 /* 2087 * we may be replacing one extent in the tree with another. 2088 * The new extent is pinned in the extent map, and we don't want 2089 * to drop it from the cache until it is completely in the btree. 2090 * 2091 * So, tell btrfs_drop_extents to leave this extent in the cache. 2092 * the caller is expected to unpin it and allow it to be merged 2093 * with the others. 2094 */ 2095 ret = __btrfs_drop_extents(trans, root, inode, path, file_pos, 2096 file_pos + num_bytes, NULL, 0, 2097 1, sizeof(*fi), &extent_inserted); 2098 if (ret) 2099 goto out; 2100 2101 if (!extent_inserted) { 2102 ins.objectid = btrfs_ino(inode); 2103 ins.offset = file_pos; 2104 ins.type = BTRFS_EXTENT_DATA_KEY; 2105 2106 path->leave_spinning = 1; 2107 ret = btrfs_insert_empty_item(trans, root, path, &ins, 2108 sizeof(*fi)); 2109 if (ret) 2110 goto out; 2111 } 2112 leaf = path->nodes[0]; 2113 fi = btrfs_item_ptr(leaf, path->slots[0], 2114 struct btrfs_file_extent_item); 2115 btrfs_set_file_extent_generation(leaf, fi, trans->transid); 2116 btrfs_set_file_extent_type(leaf, fi, extent_type); 2117 btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr); 2118 btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes); 2119 btrfs_set_file_extent_offset(leaf, fi, 0); 2120 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes); 2121 btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes); 2122 btrfs_set_file_extent_compression(leaf, fi, compression); 2123 btrfs_set_file_extent_encryption(leaf, fi, encryption); 2124 btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding); 2125 2126 btrfs_mark_buffer_dirty(leaf); 2127 btrfs_release_path(path); 2128 2129 inode_add_bytes(inode, num_bytes); 2130 2131 ins.objectid = disk_bytenr; 2132 ins.offset = disk_num_bytes; 2133 ins.type = BTRFS_EXTENT_ITEM_KEY; 2134 ret = btrfs_alloc_reserved_file_extent(trans, root, 2135 root->root_key.objectid, 2136 btrfs_ino(inode), file_pos, 2137 ram_bytes, &ins); 2138 /* 2139 * Release the reserved range from inode dirty range map, as it is 2140 * already moved into delayed_ref_head 2141 */ 2142 btrfs_qgroup_release_data(inode, file_pos, ram_bytes); 2143 out: 2144 btrfs_free_path(path); 2145 2146 return ret; 2147 } 2148 2149 /* snapshot-aware defrag */ 2150 struct sa_defrag_extent_backref { 2151 struct rb_node node; 2152 struct old_sa_defrag_extent *old; 2153 u64 root_id; 2154 u64 inum; 2155 u64 file_pos; 2156 u64 extent_offset; 2157 u64 num_bytes; 2158 u64 generation; 2159 }; 2160 2161 struct old_sa_defrag_extent { 2162 struct list_head list; 2163 struct new_sa_defrag_extent *new; 2164 2165 u64 extent_offset; 2166 u64 bytenr; 2167 u64 offset; 2168 u64 len; 2169 int count; 2170 }; 2171 2172 struct new_sa_defrag_extent { 2173 struct rb_root root; 2174 struct list_head head; 2175 struct btrfs_path *path; 2176 struct inode *inode; 2177 u64 file_pos; 2178 u64 len; 2179 u64 bytenr; 2180 u64 disk_len; 2181 u8 compress_type; 2182 }; 2183 2184 static int backref_comp(struct sa_defrag_extent_backref *b1, 2185 struct sa_defrag_extent_backref *b2) 2186 { 2187 if (b1->root_id < b2->root_id) 2188 return -1; 2189 else if (b1->root_id > b2->root_id) 2190 return 1; 2191 2192 if (b1->inum < b2->inum) 2193 return -1; 2194 else if (b1->inum > b2->inum) 2195 return 1; 2196 2197 if (b1->file_pos < b2->file_pos) 2198 return -1; 2199 else if (b1->file_pos > b2->file_pos) 2200 return 1; 2201 2202 /* 2203 * [------------------------------] ===> (a range of space) 2204 * |<--->| |<---->| =============> (fs/file tree A) 2205 * |<---------------------------->| ===> (fs/file tree B) 2206 * 2207 * A range of space can refer to two file extents in one tree while 2208 * refer to only one file extent in another tree. 2209 * 2210 * So we may process a disk offset more than one time(two extents in A) 2211 * and locate at the same extent(one extent in B), then insert two same 2212 * backrefs(both refer to the extent in B). 2213 */ 2214 return 0; 2215 } 2216 2217 static void backref_insert(struct rb_root *root, 2218 struct sa_defrag_extent_backref *backref) 2219 { 2220 struct rb_node **p = &root->rb_node; 2221 struct rb_node *parent = NULL; 2222 struct sa_defrag_extent_backref *entry; 2223 int ret; 2224 2225 while (*p) { 2226 parent = *p; 2227 entry = rb_entry(parent, struct sa_defrag_extent_backref, node); 2228 2229 ret = backref_comp(backref, entry); 2230 if (ret < 0) 2231 p = &(*p)->rb_left; 2232 else 2233 p = &(*p)->rb_right; 2234 } 2235 2236 rb_link_node(&backref->node, parent, p); 2237 rb_insert_color(&backref->node, root); 2238 } 2239 2240 /* 2241 * Note the backref might has changed, and in this case we just return 0. 2242 */ 2243 static noinline int record_one_backref(u64 inum, u64 offset, u64 root_id, 2244 void *ctx) 2245 { 2246 struct btrfs_file_extent_item *extent; 2247 struct btrfs_fs_info *fs_info; 2248 struct old_sa_defrag_extent *old = ctx; 2249 struct new_sa_defrag_extent *new = old->new; 2250 struct btrfs_path *path = new->path; 2251 struct btrfs_key key; 2252 struct btrfs_root *root; 2253 struct sa_defrag_extent_backref *backref; 2254 struct extent_buffer *leaf; 2255 struct inode *inode = new->inode; 2256 int slot; 2257 int ret; 2258 u64 extent_offset; 2259 u64 num_bytes; 2260 2261 if (BTRFS_I(inode)->root->root_key.objectid == root_id && 2262 inum == btrfs_ino(inode)) 2263 return 0; 2264 2265 key.objectid = root_id; 2266 key.type = BTRFS_ROOT_ITEM_KEY; 2267 key.offset = (u64)-1; 2268 2269 fs_info = BTRFS_I(inode)->root->fs_info; 2270 root = btrfs_read_fs_root_no_name(fs_info, &key); 2271 if (IS_ERR(root)) { 2272 if (PTR_ERR(root) == -ENOENT) 2273 return 0; 2274 WARN_ON(1); 2275 pr_debug("inum=%llu, offset=%llu, root_id=%llu\n", 2276 inum, offset, root_id); 2277 return PTR_ERR(root); 2278 } 2279 2280 key.objectid = inum; 2281 key.type = BTRFS_EXTENT_DATA_KEY; 2282 if (offset > (u64)-1 << 32) 2283 key.offset = 0; 2284 else 2285 key.offset = offset; 2286 2287 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 2288 if (WARN_ON(ret < 0)) 2289 return ret; 2290 ret = 0; 2291 2292 while (1) { 2293 cond_resched(); 2294 2295 leaf = path->nodes[0]; 2296 slot = path->slots[0]; 2297 2298 if (slot >= btrfs_header_nritems(leaf)) { 2299 ret = btrfs_next_leaf(root, path); 2300 if (ret < 0) { 2301 goto out; 2302 } else if (ret > 0) { 2303 ret = 0; 2304 goto out; 2305 } 2306 continue; 2307 } 2308 2309 path->slots[0]++; 2310 2311 btrfs_item_key_to_cpu(leaf, &key, slot); 2312 2313 if (key.objectid > inum) 2314 goto out; 2315 2316 if (key.objectid < inum || key.type != BTRFS_EXTENT_DATA_KEY) 2317 continue; 2318 2319 extent = btrfs_item_ptr(leaf, slot, 2320 struct btrfs_file_extent_item); 2321 2322 if (btrfs_file_extent_disk_bytenr(leaf, extent) != old->bytenr) 2323 continue; 2324 2325 /* 2326 * 'offset' refers to the exact key.offset, 2327 * NOT the 'offset' field in btrfs_extent_data_ref, ie. 2328 * (key.offset - extent_offset). 2329 */ 2330 if (key.offset != offset) 2331 continue; 2332 2333 extent_offset = btrfs_file_extent_offset(leaf, extent); 2334 num_bytes = btrfs_file_extent_num_bytes(leaf, extent); 2335 2336 if (extent_offset >= old->extent_offset + old->offset + 2337 old->len || extent_offset + num_bytes <= 2338 old->extent_offset + old->offset) 2339 continue; 2340 break; 2341 } 2342 2343 backref = kmalloc(sizeof(*backref), GFP_NOFS); 2344 if (!backref) { 2345 ret = -ENOENT; 2346 goto out; 2347 } 2348 2349 backref->root_id = root_id; 2350 backref->inum = inum; 2351 backref->file_pos = offset; 2352 backref->num_bytes = num_bytes; 2353 backref->extent_offset = extent_offset; 2354 backref->generation = btrfs_file_extent_generation(leaf, extent); 2355 backref->old = old; 2356 backref_insert(&new->root, backref); 2357 old->count++; 2358 out: 2359 btrfs_release_path(path); 2360 WARN_ON(ret); 2361 return ret; 2362 } 2363 2364 static noinline bool record_extent_backrefs(struct btrfs_path *path, 2365 struct new_sa_defrag_extent *new) 2366 { 2367 struct btrfs_fs_info *fs_info = BTRFS_I(new->inode)->root->fs_info; 2368 struct old_sa_defrag_extent *old, *tmp; 2369 int ret; 2370 2371 new->path = path; 2372 2373 list_for_each_entry_safe(old, tmp, &new->head, list) { 2374 ret = iterate_inodes_from_logical(old->bytenr + 2375 old->extent_offset, fs_info, 2376 path, record_one_backref, 2377 old); 2378 if (ret < 0 && ret != -ENOENT) 2379 return false; 2380 2381 /* no backref to be processed for this extent */ 2382 if (!old->count) { 2383 list_del(&old->list); 2384 kfree(old); 2385 } 2386 } 2387 2388 if (list_empty(&new->head)) 2389 return false; 2390 2391 return true; 2392 } 2393 2394 static int relink_is_mergable(struct extent_buffer *leaf, 2395 struct btrfs_file_extent_item *fi, 2396 struct new_sa_defrag_extent *new) 2397 { 2398 if (btrfs_file_extent_disk_bytenr(leaf, fi) != new->bytenr) 2399 return 0; 2400 2401 if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG) 2402 return 0; 2403 2404 if (btrfs_file_extent_compression(leaf, fi) != new->compress_type) 2405 return 0; 2406 2407 if (btrfs_file_extent_encryption(leaf, fi) || 2408 btrfs_file_extent_other_encoding(leaf, fi)) 2409 return 0; 2410 2411 return 1; 2412 } 2413 2414 /* 2415 * Note the backref might has changed, and in this case we just return 0. 2416 */ 2417 static noinline int relink_extent_backref(struct btrfs_path *path, 2418 struct sa_defrag_extent_backref *prev, 2419 struct sa_defrag_extent_backref *backref) 2420 { 2421 struct btrfs_file_extent_item *extent; 2422 struct btrfs_file_extent_item *item; 2423 struct btrfs_ordered_extent *ordered; 2424 struct btrfs_trans_handle *trans; 2425 struct btrfs_fs_info *fs_info; 2426 struct btrfs_root *root; 2427 struct btrfs_key key; 2428 struct extent_buffer *leaf; 2429 struct old_sa_defrag_extent *old = backref->old; 2430 struct new_sa_defrag_extent *new = old->new; 2431 struct inode *src_inode = new->inode; 2432 struct inode *inode; 2433 struct extent_state *cached = NULL; 2434 int ret = 0; 2435 u64 start; 2436 u64 len; 2437 u64 lock_start; 2438 u64 lock_end; 2439 bool merge = false; 2440 int index; 2441 2442 if (prev && prev->root_id == backref->root_id && 2443 prev->inum == backref->inum && 2444 prev->file_pos + prev->num_bytes == backref->file_pos) 2445 merge = true; 2446 2447 /* step 1: get root */ 2448 key.objectid = backref->root_id; 2449 key.type = BTRFS_ROOT_ITEM_KEY; 2450 key.offset = (u64)-1; 2451 2452 fs_info = BTRFS_I(src_inode)->root->fs_info; 2453 index = srcu_read_lock(&fs_info->subvol_srcu); 2454 2455 root = btrfs_read_fs_root_no_name(fs_info, &key); 2456 if (IS_ERR(root)) { 2457 srcu_read_unlock(&fs_info->subvol_srcu, index); 2458 if (PTR_ERR(root) == -ENOENT) 2459 return 0; 2460 return PTR_ERR(root); 2461 } 2462 2463 if (btrfs_root_readonly(root)) { 2464 srcu_read_unlock(&fs_info->subvol_srcu, index); 2465 return 0; 2466 } 2467 2468 /* step 2: get inode */ 2469 key.objectid = backref->inum; 2470 key.type = BTRFS_INODE_ITEM_KEY; 2471 key.offset = 0; 2472 2473 inode = btrfs_iget(fs_info->sb, &key, root, NULL); 2474 if (IS_ERR(inode)) { 2475 srcu_read_unlock(&fs_info->subvol_srcu, index); 2476 return 0; 2477 } 2478 2479 srcu_read_unlock(&fs_info->subvol_srcu, index); 2480 2481 /* step 3: relink backref */ 2482 lock_start = backref->file_pos; 2483 lock_end = backref->file_pos + backref->num_bytes - 1; 2484 lock_extent_bits(&BTRFS_I(inode)->io_tree, lock_start, lock_end, 2485 0, &cached); 2486 2487 ordered = btrfs_lookup_first_ordered_extent(inode, lock_end); 2488 if (ordered) { 2489 btrfs_put_ordered_extent(ordered); 2490 goto out_unlock; 2491 } 2492 2493 trans = btrfs_join_transaction(root); 2494 if (IS_ERR(trans)) { 2495 ret = PTR_ERR(trans); 2496 goto out_unlock; 2497 } 2498 2499 key.objectid = backref->inum; 2500 key.type = BTRFS_EXTENT_DATA_KEY; 2501 key.offset = backref->file_pos; 2502 2503 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 2504 if (ret < 0) { 2505 goto out_free_path; 2506 } else if (ret > 0) { 2507 ret = 0; 2508 goto out_free_path; 2509 } 2510 2511 extent = btrfs_item_ptr(path->nodes[0], path->slots[0], 2512 struct btrfs_file_extent_item); 2513 2514 if (btrfs_file_extent_generation(path->nodes[0], extent) != 2515 backref->generation) 2516 goto out_free_path; 2517 2518 btrfs_release_path(path); 2519 2520 start = backref->file_pos; 2521 if (backref->extent_offset < old->extent_offset + old->offset) 2522 start += old->extent_offset + old->offset - 2523 backref->extent_offset; 2524 2525 len = min(backref->extent_offset + backref->num_bytes, 2526 old->extent_offset + old->offset + old->len); 2527 len -= max(backref->extent_offset, old->extent_offset + old->offset); 2528 2529 ret = btrfs_drop_extents(trans, root, inode, start, 2530 start + len, 1); 2531 if (ret) 2532 goto out_free_path; 2533 again: 2534 key.objectid = btrfs_ino(inode); 2535 key.type = BTRFS_EXTENT_DATA_KEY; 2536 key.offset = start; 2537 2538 path->leave_spinning = 1; 2539 if (merge) { 2540 struct btrfs_file_extent_item *fi; 2541 u64 extent_len; 2542 struct btrfs_key found_key; 2543 2544 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 2545 if (ret < 0) 2546 goto out_free_path; 2547 2548 path->slots[0]--; 2549 leaf = path->nodes[0]; 2550 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 2551 2552 fi = btrfs_item_ptr(leaf, path->slots[0], 2553 struct btrfs_file_extent_item); 2554 extent_len = btrfs_file_extent_num_bytes(leaf, fi); 2555 2556 if (extent_len + found_key.offset == start && 2557 relink_is_mergable(leaf, fi, new)) { 2558 btrfs_set_file_extent_num_bytes(leaf, fi, 2559 extent_len + len); 2560 btrfs_mark_buffer_dirty(leaf); 2561 inode_add_bytes(inode, len); 2562 2563 ret = 1; 2564 goto out_free_path; 2565 } else { 2566 merge = false; 2567 btrfs_release_path(path); 2568 goto again; 2569 } 2570 } 2571 2572 ret = btrfs_insert_empty_item(trans, root, path, &key, 2573 sizeof(*extent)); 2574 if (ret) { 2575 btrfs_abort_transaction(trans, root, ret); 2576 goto out_free_path; 2577 } 2578 2579 leaf = path->nodes[0]; 2580 item = btrfs_item_ptr(leaf, path->slots[0], 2581 struct btrfs_file_extent_item); 2582 btrfs_set_file_extent_disk_bytenr(leaf, item, new->bytenr); 2583 btrfs_set_file_extent_disk_num_bytes(leaf, item, new->disk_len); 2584 btrfs_set_file_extent_offset(leaf, item, start - new->file_pos); 2585 btrfs_set_file_extent_num_bytes(leaf, item, len); 2586 btrfs_set_file_extent_ram_bytes(leaf, item, new->len); 2587 btrfs_set_file_extent_generation(leaf, item, trans->transid); 2588 btrfs_set_file_extent_type(leaf, item, BTRFS_FILE_EXTENT_REG); 2589 btrfs_set_file_extent_compression(leaf, item, new->compress_type); 2590 btrfs_set_file_extent_encryption(leaf, item, 0); 2591 btrfs_set_file_extent_other_encoding(leaf, item, 0); 2592 2593 btrfs_mark_buffer_dirty(leaf); 2594 inode_add_bytes(inode, len); 2595 btrfs_release_path(path); 2596 2597 ret = btrfs_inc_extent_ref(trans, root, new->bytenr, 2598 new->disk_len, 0, 2599 backref->root_id, backref->inum, 2600 new->file_pos); /* start - extent_offset */ 2601 if (ret) { 2602 btrfs_abort_transaction(trans, root, ret); 2603 goto out_free_path; 2604 } 2605 2606 ret = 1; 2607 out_free_path: 2608 btrfs_release_path(path); 2609 path->leave_spinning = 0; 2610 btrfs_end_transaction(trans, root); 2611 out_unlock: 2612 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lock_start, lock_end, 2613 &cached, GFP_NOFS); 2614 iput(inode); 2615 return ret; 2616 } 2617 2618 static void free_sa_defrag_extent(struct new_sa_defrag_extent *new) 2619 { 2620 struct old_sa_defrag_extent *old, *tmp; 2621 2622 if (!new) 2623 return; 2624 2625 list_for_each_entry_safe(old, tmp, &new->head, list) { 2626 kfree(old); 2627 } 2628 kfree(new); 2629 } 2630 2631 static void relink_file_extents(struct new_sa_defrag_extent *new) 2632 { 2633 struct btrfs_path *path; 2634 struct sa_defrag_extent_backref *backref; 2635 struct sa_defrag_extent_backref *prev = NULL; 2636 struct inode *inode; 2637 struct btrfs_root *root; 2638 struct rb_node *node; 2639 int ret; 2640 2641 inode = new->inode; 2642 root = BTRFS_I(inode)->root; 2643 2644 path = btrfs_alloc_path(); 2645 if (!path) 2646 return; 2647 2648 if (!record_extent_backrefs(path, new)) { 2649 btrfs_free_path(path); 2650 goto out; 2651 } 2652 btrfs_release_path(path); 2653 2654 while (1) { 2655 node = rb_first(&new->root); 2656 if (!node) 2657 break; 2658 rb_erase(node, &new->root); 2659 2660 backref = rb_entry(node, struct sa_defrag_extent_backref, node); 2661 2662 ret = relink_extent_backref(path, prev, backref); 2663 WARN_ON(ret < 0); 2664 2665 kfree(prev); 2666 2667 if (ret == 1) 2668 prev = backref; 2669 else 2670 prev = NULL; 2671 cond_resched(); 2672 } 2673 kfree(prev); 2674 2675 btrfs_free_path(path); 2676 out: 2677 free_sa_defrag_extent(new); 2678 2679 atomic_dec(&root->fs_info->defrag_running); 2680 wake_up(&root->fs_info->transaction_wait); 2681 } 2682 2683 static struct new_sa_defrag_extent * 2684 record_old_file_extents(struct inode *inode, 2685 struct btrfs_ordered_extent *ordered) 2686 { 2687 struct btrfs_root *root = BTRFS_I(inode)->root; 2688 struct btrfs_path *path; 2689 struct btrfs_key key; 2690 struct old_sa_defrag_extent *old; 2691 struct new_sa_defrag_extent *new; 2692 int ret; 2693 2694 new = kmalloc(sizeof(*new), GFP_NOFS); 2695 if (!new) 2696 return NULL; 2697 2698 new->inode = inode; 2699 new->file_pos = ordered->file_offset; 2700 new->len = ordered->len; 2701 new->bytenr = ordered->start; 2702 new->disk_len = ordered->disk_len; 2703 new->compress_type = ordered->compress_type; 2704 new->root = RB_ROOT; 2705 INIT_LIST_HEAD(&new->head); 2706 2707 path = btrfs_alloc_path(); 2708 if (!path) 2709 goto out_kfree; 2710 2711 key.objectid = btrfs_ino(inode); 2712 key.type = BTRFS_EXTENT_DATA_KEY; 2713 key.offset = new->file_pos; 2714 2715 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 2716 if (ret < 0) 2717 goto out_free_path; 2718 if (ret > 0 && path->slots[0] > 0) 2719 path->slots[0]--; 2720 2721 /* find out all the old extents for the file range */ 2722 while (1) { 2723 struct btrfs_file_extent_item *extent; 2724 struct extent_buffer *l; 2725 int slot; 2726 u64 num_bytes; 2727 u64 offset; 2728 u64 end; 2729 u64 disk_bytenr; 2730 u64 extent_offset; 2731 2732 l = path->nodes[0]; 2733 slot = path->slots[0]; 2734 2735 if (slot >= btrfs_header_nritems(l)) { 2736 ret = btrfs_next_leaf(root, path); 2737 if (ret < 0) 2738 goto out_free_path; 2739 else if (ret > 0) 2740 break; 2741 continue; 2742 } 2743 2744 btrfs_item_key_to_cpu(l, &key, slot); 2745 2746 if (key.objectid != btrfs_ino(inode)) 2747 break; 2748 if (key.type != BTRFS_EXTENT_DATA_KEY) 2749 break; 2750 if (key.offset >= new->file_pos + new->len) 2751 break; 2752 2753 extent = btrfs_item_ptr(l, slot, struct btrfs_file_extent_item); 2754 2755 num_bytes = btrfs_file_extent_num_bytes(l, extent); 2756 if (key.offset + num_bytes < new->file_pos) 2757 goto next; 2758 2759 disk_bytenr = btrfs_file_extent_disk_bytenr(l, extent); 2760 if (!disk_bytenr) 2761 goto next; 2762 2763 extent_offset = btrfs_file_extent_offset(l, extent); 2764 2765 old = kmalloc(sizeof(*old), GFP_NOFS); 2766 if (!old) 2767 goto out_free_path; 2768 2769 offset = max(new->file_pos, key.offset); 2770 end = min(new->file_pos + new->len, key.offset + num_bytes); 2771 2772 old->bytenr = disk_bytenr; 2773 old->extent_offset = extent_offset; 2774 old->offset = offset - key.offset; 2775 old->len = end - offset; 2776 old->new = new; 2777 old->count = 0; 2778 list_add_tail(&old->list, &new->head); 2779 next: 2780 path->slots[0]++; 2781 cond_resched(); 2782 } 2783 2784 btrfs_free_path(path); 2785 atomic_inc(&root->fs_info->defrag_running); 2786 2787 return new; 2788 2789 out_free_path: 2790 btrfs_free_path(path); 2791 out_kfree: 2792 free_sa_defrag_extent(new); 2793 return NULL; 2794 } 2795 2796 static void btrfs_release_delalloc_bytes(struct btrfs_root *root, 2797 u64 start, u64 len) 2798 { 2799 struct btrfs_block_group_cache *cache; 2800 2801 cache = btrfs_lookup_block_group(root->fs_info, start); 2802 ASSERT(cache); 2803 2804 spin_lock(&cache->lock); 2805 cache->delalloc_bytes -= len; 2806 spin_unlock(&cache->lock); 2807 2808 btrfs_put_block_group(cache); 2809 } 2810 2811 /* as ordered data IO finishes, this gets called so we can finish 2812 * an ordered extent if the range of bytes in the file it covers are 2813 * fully written. 2814 */ 2815 static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent) 2816 { 2817 struct inode *inode = ordered_extent->inode; 2818 struct btrfs_root *root = BTRFS_I(inode)->root; 2819 struct btrfs_trans_handle *trans = NULL; 2820 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 2821 struct extent_state *cached_state = NULL; 2822 struct new_sa_defrag_extent *new = NULL; 2823 int compress_type = 0; 2824 int ret = 0; 2825 u64 logical_len = ordered_extent->len; 2826 bool nolock; 2827 bool truncated = false; 2828 2829 nolock = btrfs_is_free_space_inode(inode); 2830 2831 if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) { 2832 ret = -EIO; 2833 goto out; 2834 } 2835 2836 btrfs_free_io_failure_record(inode, ordered_extent->file_offset, 2837 ordered_extent->file_offset + 2838 ordered_extent->len - 1); 2839 2840 if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) { 2841 truncated = true; 2842 logical_len = ordered_extent->truncated_len; 2843 /* Truncated the entire extent, don't bother adding */ 2844 if (!logical_len) 2845 goto out; 2846 } 2847 2848 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) { 2849 BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */ 2850 2851 /* 2852 * For mwrite(mmap + memset to write) case, we still reserve 2853 * space for NOCOW range. 2854 * As NOCOW won't cause a new delayed ref, just free the space 2855 */ 2856 btrfs_qgroup_free_data(inode, ordered_extent->file_offset, 2857 ordered_extent->len); 2858 btrfs_ordered_update_i_size(inode, 0, ordered_extent); 2859 if (nolock) 2860 trans = btrfs_join_transaction_nolock(root); 2861 else 2862 trans = btrfs_join_transaction(root); 2863 if (IS_ERR(trans)) { 2864 ret = PTR_ERR(trans); 2865 trans = NULL; 2866 goto out; 2867 } 2868 trans->block_rsv = &root->fs_info->delalloc_block_rsv; 2869 ret = btrfs_update_inode_fallback(trans, root, inode); 2870 if (ret) /* -ENOMEM or corruption */ 2871 btrfs_abort_transaction(trans, root, ret); 2872 goto out; 2873 } 2874 2875 lock_extent_bits(io_tree, ordered_extent->file_offset, 2876 ordered_extent->file_offset + ordered_extent->len - 1, 2877 0, &cached_state); 2878 2879 ret = test_range_bit(io_tree, ordered_extent->file_offset, 2880 ordered_extent->file_offset + ordered_extent->len - 1, 2881 EXTENT_DEFRAG, 1, cached_state); 2882 if (ret) { 2883 u64 last_snapshot = btrfs_root_last_snapshot(&root->root_item); 2884 if (0 && last_snapshot >= BTRFS_I(inode)->generation) 2885 /* the inode is shared */ 2886 new = record_old_file_extents(inode, ordered_extent); 2887 2888 clear_extent_bit(io_tree, ordered_extent->file_offset, 2889 ordered_extent->file_offset + ordered_extent->len - 1, 2890 EXTENT_DEFRAG, 0, 0, &cached_state, GFP_NOFS); 2891 } 2892 2893 if (nolock) 2894 trans = btrfs_join_transaction_nolock(root); 2895 else 2896 trans = btrfs_join_transaction(root); 2897 if (IS_ERR(trans)) { 2898 ret = PTR_ERR(trans); 2899 trans = NULL; 2900 goto out_unlock; 2901 } 2902 2903 trans->block_rsv = &root->fs_info->delalloc_block_rsv; 2904 2905 if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags)) 2906 compress_type = ordered_extent->compress_type; 2907 if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) { 2908 BUG_ON(compress_type); 2909 ret = btrfs_mark_extent_written(trans, inode, 2910 ordered_extent->file_offset, 2911 ordered_extent->file_offset + 2912 logical_len); 2913 } else { 2914 BUG_ON(root == root->fs_info->tree_root); 2915 ret = insert_reserved_file_extent(trans, inode, 2916 ordered_extent->file_offset, 2917 ordered_extent->start, 2918 ordered_extent->disk_len, 2919 logical_len, logical_len, 2920 compress_type, 0, 0, 2921 BTRFS_FILE_EXTENT_REG); 2922 if (!ret) 2923 btrfs_release_delalloc_bytes(root, 2924 ordered_extent->start, 2925 ordered_extent->disk_len); 2926 } 2927 unpin_extent_cache(&BTRFS_I(inode)->extent_tree, 2928 ordered_extent->file_offset, ordered_extent->len, 2929 trans->transid); 2930 if (ret < 0) { 2931 btrfs_abort_transaction(trans, root, ret); 2932 goto out_unlock; 2933 } 2934 2935 add_pending_csums(trans, inode, ordered_extent->file_offset, 2936 &ordered_extent->list); 2937 2938 btrfs_ordered_update_i_size(inode, 0, ordered_extent); 2939 ret = btrfs_update_inode_fallback(trans, root, inode); 2940 if (ret) { /* -ENOMEM or corruption */ 2941 btrfs_abort_transaction(trans, root, ret); 2942 goto out_unlock; 2943 } 2944 ret = 0; 2945 out_unlock: 2946 unlock_extent_cached(io_tree, ordered_extent->file_offset, 2947 ordered_extent->file_offset + 2948 ordered_extent->len - 1, &cached_state, GFP_NOFS); 2949 out: 2950 if (root != root->fs_info->tree_root) 2951 btrfs_delalloc_release_metadata(inode, ordered_extent->len); 2952 if (trans) 2953 btrfs_end_transaction(trans, root); 2954 2955 if (ret || truncated) { 2956 u64 start, end; 2957 2958 if (truncated) 2959 start = ordered_extent->file_offset + logical_len; 2960 else 2961 start = ordered_extent->file_offset; 2962 end = ordered_extent->file_offset + ordered_extent->len - 1; 2963 clear_extent_uptodate(io_tree, start, end, NULL, GFP_NOFS); 2964 2965 /* Drop the cache for the part of the extent we didn't write. */ 2966 btrfs_drop_extent_cache(inode, start, end, 0); 2967 2968 /* 2969 * If the ordered extent had an IOERR or something else went 2970 * wrong we need to return the space for this ordered extent 2971 * back to the allocator. We only free the extent in the 2972 * truncated case if we didn't write out the extent at all. 2973 */ 2974 if ((ret || !logical_len) && 2975 !test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) && 2976 !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) 2977 btrfs_free_reserved_extent(root, ordered_extent->start, 2978 ordered_extent->disk_len, 1); 2979 } 2980 2981 2982 /* 2983 * This needs to be done to make sure anybody waiting knows we are done 2984 * updating everything for this ordered extent. 2985 */ 2986 btrfs_remove_ordered_extent(inode, ordered_extent); 2987 2988 /* for snapshot-aware defrag */ 2989 if (new) { 2990 if (ret) { 2991 free_sa_defrag_extent(new); 2992 atomic_dec(&root->fs_info->defrag_running); 2993 } else { 2994 relink_file_extents(new); 2995 } 2996 } 2997 2998 /* once for us */ 2999 btrfs_put_ordered_extent(ordered_extent); 3000 /* once for the tree */ 3001 btrfs_put_ordered_extent(ordered_extent); 3002 3003 return ret; 3004 } 3005 3006 static void finish_ordered_fn(struct btrfs_work *work) 3007 { 3008 struct btrfs_ordered_extent *ordered_extent; 3009 ordered_extent = container_of(work, struct btrfs_ordered_extent, work); 3010 btrfs_finish_ordered_io(ordered_extent); 3011 } 3012 3013 static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end, 3014 struct extent_state *state, int uptodate) 3015 { 3016 struct inode *inode = page->mapping->host; 3017 struct btrfs_root *root = BTRFS_I(inode)->root; 3018 struct btrfs_ordered_extent *ordered_extent = NULL; 3019 struct btrfs_workqueue *wq; 3020 btrfs_work_func_t func; 3021 3022 trace_btrfs_writepage_end_io_hook(page, start, end, uptodate); 3023 3024 ClearPagePrivate2(page); 3025 if (!btrfs_dec_test_ordered_pending(inode, &ordered_extent, start, 3026 end - start + 1, uptodate)) 3027 return 0; 3028 3029 if (btrfs_is_free_space_inode(inode)) { 3030 wq = root->fs_info->endio_freespace_worker; 3031 func = btrfs_freespace_write_helper; 3032 } else { 3033 wq = root->fs_info->endio_write_workers; 3034 func = btrfs_endio_write_helper; 3035 } 3036 3037 btrfs_init_work(&ordered_extent->work, func, finish_ordered_fn, NULL, 3038 NULL); 3039 btrfs_queue_work(wq, &ordered_extent->work); 3040 3041 return 0; 3042 } 3043 3044 static int __readpage_endio_check(struct inode *inode, 3045 struct btrfs_io_bio *io_bio, 3046 int icsum, struct page *page, 3047 int pgoff, u64 start, size_t len) 3048 { 3049 char *kaddr; 3050 u32 csum_expected; 3051 u32 csum = ~(u32)0; 3052 3053 csum_expected = *(((u32 *)io_bio->csum) + icsum); 3054 3055 kaddr = kmap_atomic(page); 3056 csum = btrfs_csum_data(kaddr + pgoff, csum, len); 3057 btrfs_csum_final(csum, (char *)&csum); 3058 if (csum != csum_expected) 3059 goto zeroit; 3060 3061 kunmap_atomic(kaddr); 3062 return 0; 3063 zeroit: 3064 btrfs_warn_rl(BTRFS_I(inode)->root->fs_info, 3065 "csum failed ino %llu off %llu csum %u expected csum %u", 3066 btrfs_ino(inode), start, csum, csum_expected); 3067 memset(kaddr + pgoff, 1, len); 3068 flush_dcache_page(page); 3069 kunmap_atomic(kaddr); 3070 if (csum_expected == 0) 3071 return 0; 3072 return -EIO; 3073 } 3074 3075 /* 3076 * when reads are done, we need to check csums to verify the data is correct 3077 * if there's a match, we allow the bio to finish. If not, the code in 3078 * extent_io.c will try to find good copies for us. 3079 */ 3080 static int btrfs_readpage_end_io_hook(struct btrfs_io_bio *io_bio, 3081 u64 phy_offset, struct page *page, 3082 u64 start, u64 end, int mirror) 3083 { 3084 size_t offset = start - page_offset(page); 3085 struct inode *inode = page->mapping->host; 3086 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 3087 struct btrfs_root *root = BTRFS_I(inode)->root; 3088 3089 if (PageChecked(page)) { 3090 ClearPageChecked(page); 3091 return 0; 3092 } 3093 3094 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM) 3095 return 0; 3096 3097 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID && 3098 test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) { 3099 clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM, 3100 GFP_NOFS); 3101 return 0; 3102 } 3103 3104 phy_offset >>= inode->i_sb->s_blocksize_bits; 3105 return __readpage_endio_check(inode, io_bio, phy_offset, page, offset, 3106 start, (size_t)(end - start + 1)); 3107 } 3108 3109 struct delayed_iput { 3110 struct list_head list; 3111 struct inode *inode; 3112 }; 3113 3114 /* JDM: If this is fs-wide, why can't we add a pointer to 3115 * btrfs_inode instead and avoid the allocation? */ 3116 void btrfs_add_delayed_iput(struct inode *inode) 3117 { 3118 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; 3119 struct delayed_iput *delayed; 3120 3121 if (atomic_add_unless(&inode->i_count, -1, 1)) 3122 return; 3123 3124 delayed = kmalloc(sizeof(*delayed), GFP_NOFS | __GFP_NOFAIL); 3125 delayed->inode = inode; 3126 3127 spin_lock(&fs_info->delayed_iput_lock); 3128 list_add_tail(&delayed->list, &fs_info->delayed_iputs); 3129 spin_unlock(&fs_info->delayed_iput_lock); 3130 } 3131 3132 void btrfs_run_delayed_iputs(struct btrfs_root *root) 3133 { 3134 LIST_HEAD(list); 3135 struct btrfs_fs_info *fs_info = root->fs_info; 3136 struct delayed_iput *delayed; 3137 int empty; 3138 3139 spin_lock(&fs_info->delayed_iput_lock); 3140 empty = list_empty(&fs_info->delayed_iputs); 3141 spin_unlock(&fs_info->delayed_iput_lock); 3142 if (empty) 3143 return; 3144 3145 down_read(&fs_info->delayed_iput_sem); 3146 3147 spin_lock(&fs_info->delayed_iput_lock); 3148 list_splice_init(&fs_info->delayed_iputs, &list); 3149 spin_unlock(&fs_info->delayed_iput_lock); 3150 3151 while (!list_empty(&list)) { 3152 delayed = list_entry(list.next, struct delayed_iput, list); 3153 list_del(&delayed->list); 3154 iput(delayed->inode); 3155 kfree(delayed); 3156 } 3157 3158 up_read(&root->fs_info->delayed_iput_sem); 3159 } 3160 3161 /* 3162 * This is called in transaction commit time. If there are no orphan 3163 * files in the subvolume, it removes orphan item and frees block_rsv 3164 * structure. 3165 */ 3166 void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans, 3167 struct btrfs_root *root) 3168 { 3169 struct btrfs_block_rsv *block_rsv; 3170 int ret; 3171 3172 if (atomic_read(&root->orphan_inodes) || 3173 root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE) 3174 return; 3175 3176 spin_lock(&root->orphan_lock); 3177 if (atomic_read(&root->orphan_inodes)) { 3178 spin_unlock(&root->orphan_lock); 3179 return; 3180 } 3181 3182 if (root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE) { 3183 spin_unlock(&root->orphan_lock); 3184 return; 3185 } 3186 3187 block_rsv = root->orphan_block_rsv; 3188 root->orphan_block_rsv = NULL; 3189 spin_unlock(&root->orphan_lock); 3190 3191 if (test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state) && 3192 btrfs_root_refs(&root->root_item) > 0) { 3193 ret = btrfs_del_orphan_item(trans, root->fs_info->tree_root, 3194 root->root_key.objectid); 3195 if (ret) 3196 btrfs_abort_transaction(trans, root, ret); 3197 else 3198 clear_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, 3199 &root->state); 3200 } 3201 3202 if (block_rsv) { 3203 WARN_ON(block_rsv->size > 0); 3204 btrfs_free_block_rsv(root, block_rsv); 3205 } 3206 } 3207 3208 /* 3209 * This creates an orphan entry for the given inode in case something goes 3210 * wrong in the middle of an unlink/truncate. 3211 * 3212 * NOTE: caller of this function should reserve 5 units of metadata for 3213 * this function. 3214 */ 3215 int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode) 3216 { 3217 struct btrfs_root *root = BTRFS_I(inode)->root; 3218 struct btrfs_block_rsv *block_rsv = NULL; 3219 int reserve = 0; 3220 int insert = 0; 3221 int ret; 3222 3223 if (!root->orphan_block_rsv) { 3224 block_rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP); 3225 if (!block_rsv) 3226 return -ENOMEM; 3227 } 3228 3229 spin_lock(&root->orphan_lock); 3230 if (!root->orphan_block_rsv) { 3231 root->orphan_block_rsv = block_rsv; 3232 } else if (block_rsv) { 3233 btrfs_free_block_rsv(root, block_rsv); 3234 block_rsv = NULL; 3235 } 3236 3237 if (!test_and_set_bit(BTRFS_INODE_HAS_ORPHAN_ITEM, 3238 &BTRFS_I(inode)->runtime_flags)) { 3239 #if 0 3240 /* 3241 * For proper ENOSPC handling, we should do orphan 3242 * cleanup when mounting. But this introduces backward 3243 * compatibility issue. 3244 */ 3245 if (!xchg(&root->orphan_item_inserted, 1)) 3246 insert = 2; 3247 else 3248 insert = 1; 3249 #endif 3250 insert = 1; 3251 atomic_inc(&root->orphan_inodes); 3252 } 3253 3254 if (!test_and_set_bit(BTRFS_INODE_ORPHAN_META_RESERVED, 3255 &BTRFS_I(inode)->runtime_flags)) 3256 reserve = 1; 3257 spin_unlock(&root->orphan_lock); 3258 3259 /* grab metadata reservation from transaction handle */ 3260 if (reserve) { 3261 ret = btrfs_orphan_reserve_metadata(trans, inode); 3262 BUG_ON(ret); /* -ENOSPC in reservation; Logic error? JDM */ 3263 } 3264 3265 /* insert an orphan item to track this unlinked/truncated file */ 3266 if (insert >= 1) { 3267 ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode)); 3268 if (ret) { 3269 atomic_dec(&root->orphan_inodes); 3270 if (reserve) { 3271 clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED, 3272 &BTRFS_I(inode)->runtime_flags); 3273 btrfs_orphan_release_metadata(inode); 3274 } 3275 if (ret != -EEXIST) { 3276 clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM, 3277 &BTRFS_I(inode)->runtime_flags); 3278 btrfs_abort_transaction(trans, root, ret); 3279 return ret; 3280 } 3281 } 3282 ret = 0; 3283 } 3284 3285 /* insert an orphan item to track subvolume contains orphan files */ 3286 if (insert >= 2) { 3287 ret = btrfs_insert_orphan_item(trans, root->fs_info->tree_root, 3288 root->root_key.objectid); 3289 if (ret && ret != -EEXIST) { 3290 btrfs_abort_transaction(trans, root, ret); 3291 return ret; 3292 } 3293 } 3294 return 0; 3295 } 3296 3297 /* 3298 * We have done the truncate/delete so we can go ahead and remove the orphan 3299 * item for this particular inode. 3300 */ 3301 static int btrfs_orphan_del(struct btrfs_trans_handle *trans, 3302 struct inode *inode) 3303 { 3304 struct btrfs_root *root = BTRFS_I(inode)->root; 3305 int delete_item = 0; 3306 int release_rsv = 0; 3307 int ret = 0; 3308 3309 spin_lock(&root->orphan_lock); 3310 if (test_and_clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM, 3311 &BTRFS_I(inode)->runtime_flags)) 3312 delete_item = 1; 3313 3314 if (test_and_clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED, 3315 &BTRFS_I(inode)->runtime_flags)) 3316 release_rsv = 1; 3317 spin_unlock(&root->orphan_lock); 3318 3319 if (delete_item) { 3320 atomic_dec(&root->orphan_inodes); 3321 if (trans) 3322 ret = btrfs_del_orphan_item(trans, root, 3323 btrfs_ino(inode)); 3324 } 3325 3326 if (release_rsv) 3327 btrfs_orphan_release_metadata(inode); 3328 3329 return ret; 3330 } 3331 3332 /* 3333 * this cleans up any orphans that may be left on the list from the last use 3334 * of this root. 3335 */ 3336 int btrfs_orphan_cleanup(struct btrfs_root *root) 3337 { 3338 struct btrfs_path *path; 3339 struct extent_buffer *leaf; 3340 struct btrfs_key key, found_key; 3341 struct btrfs_trans_handle *trans; 3342 struct inode *inode; 3343 u64 last_objectid = 0; 3344 int ret = 0, nr_unlink = 0, nr_truncate = 0; 3345 3346 if (cmpxchg(&root->orphan_cleanup_state, 0, ORPHAN_CLEANUP_STARTED)) 3347 return 0; 3348 3349 path = btrfs_alloc_path(); 3350 if (!path) { 3351 ret = -ENOMEM; 3352 goto out; 3353 } 3354 path->reada = -1; 3355 3356 key.objectid = BTRFS_ORPHAN_OBJECTID; 3357 key.type = BTRFS_ORPHAN_ITEM_KEY; 3358 key.offset = (u64)-1; 3359 3360 while (1) { 3361 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 3362 if (ret < 0) 3363 goto out; 3364 3365 /* 3366 * if ret == 0 means we found what we were searching for, which 3367 * is weird, but possible, so only screw with path if we didn't 3368 * find the key and see if we have stuff that matches 3369 */ 3370 if (ret > 0) { 3371 ret = 0; 3372 if (path->slots[0] == 0) 3373 break; 3374 path->slots[0]--; 3375 } 3376 3377 /* pull out the item */ 3378 leaf = path->nodes[0]; 3379 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 3380 3381 /* make sure the item matches what we want */ 3382 if (found_key.objectid != BTRFS_ORPHAN_OBJECTID) 3383 break; 3384 if (found_key.type != BTRFS_ORPHAN_ITEM_KEY) 3385 break; 3386 3387 /* release the path since we're done with it */ 3388 btrfs_release_path(path); 3389 3390 /* 3391 * this is where we are basically btrfs_lookup, without the 3392 * crossing root thing. we store the inode number in the 3393 * offset of the orphan item. 3394 */ 3395 3396 if (found_key.offset == last_objectid) { 3397 btrfs_err(root->fs_info, 3398 "Error removing orphan entry, stopping orphan cleanup"); 3399 ret = -EINVAL; 3400 goto out; 3401 } 3402 3403 last_objectid = found_key.offset; 3404 3405 found_key.objectid = found_key.offset; 3406 found_key.type = BTRFS_INODE_ITEM_KEY; 3407 found_key.offset = 0; 3408 inode = btrfs_iget(root->fs_info->sb, &found_key, root, NULL); 3409 ret = PTR_ERR_OR_ZERO(inode); 3410 if (ret && ret != -ESTALE) 3411 goto out; 3412 3413 if (ret == -ESTALE && root == root->fs_info->tree_root) { 3414 struct btrfs_root *dead_root; 3415 struct btrfs_fs_info *fs_info = root->fs_info; 3416 int is_dead_root = 0; 3417 3418 /* 3419 * this is an orphan in the tree root. Currently these 3420 * could come from 2 sources: 3421 * a) a snapshot deletion in progress 3422 * b) a free space cache inode 3423 * We need to distinguish those two, as the snapshot 3424 * orphan must not get deleted. 3425 * find_dead_roots already ran before us, so if this 3426 * is a snapshot deletion, we should find the root 3427 * in the dead_roots list 3428 */ 3429 spin_lock(&fs_info->trans_lock); 3430 list_for_each_entry(dead_root, &fs_info->dead_roots, 3431 root_list) { 3432 if (dead_root->root_key.objectid == 3433 found_key.objectid) { 3434 is_dead_root = 1; 3435 break; 3436 } 3437 } 3438 spin_unlock(&fs_info->trans_lock); 3439 if (is_dead_root) { 3440 /* prevent this orphan from being found again */ 3441 key.offset = found_key.objectid - 1; 3442 continue; 3443 } 3444 } 3445 /* 3446 * Inode is already gone but the orphan item is still there, 3447 * kill the orphan item. 3448 */ 3449 if (ret == -ESTALE) { 3450 trans = btrfs_start_transaction(root, 1); 3451 if (IS_ERR(trans)) { 3452 ret = PTR_ERR(trans); 3453 goto out; 3454 } 3455 btrfs_debug(root->fs_info, "auto deleting %Lu", 3456 found_key.objectid); 3457 ret = btrfs_del_orphan_item(trans, root, 3458 found_key.objectid); 3459 btrfs_end_transaction(trans, root); 3460 if (ret) 3461 goto out; 3462 continue; 3463 } 3464 3465 /* 3466 * add this inode to the orphan list so btrfs_orphan_del does 3467 * the proper thing when we hit it 3468 */ 3469 set_bit(BTRFS_INODE_HAS_ORPHAN_ITEM, 3470 &BTRFS_I(inode)->runtime_flags); 3471 atomic_inc(&root->orphan_inodes); 3472 3473 /* if we have links, this was a truncate, lets do that */ 3474 if (inode->i_nlink) { 3475 if (WARN_ON(!S_ISREG(inode->i_mode))) { 3476 iput(inode); 3477 continue; 3478 } 3479 nr_truncate++; 3480 3481 /* 1 for the orphan item deletion. */ 3482 trans = btrfs_start_transaction(root, 1); 3483 if (IS_ERR(trans)) { 3484 iput(inode); 3485 ret = PTR_ERR(trans); 3486 goto out; 3487 } 3488 ret = btrfs_orphan_add(trans, inode); 3489 btrfs_end_transaction(trans, root); 3490 if (ret) { 3491 iput(inode); 3492 goto out; 3493 } 3494 3495 ret = btrfs_truncate(inode); 3496 if (ret) 3497 btrfs_orphan_del(NULL, inode); 3498 } else { 3499 nr_unlink++; 3500 } 3501 3502 /* this will do delete_inode and everything for us */ 3503 iput(inode); 3504 if (ret) 3505 goto out; 3506 } 3507 /* release the path since we're done with it */ 3508 btrfs_release_path(path); 3509 3510 root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE; 3511 3512 if (root->orphan_block_rsv) 3513 btrfs_block_rsv_release(root, root->orphan_block_rsv, 3514 (u64)-1); 3515 3516 if (root->orphan_block_rsv || 3517 test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state)) { 3518 trans = btrfs_join_transaction(root); 3519 if (!IS_ERR(trans)) 3520 btrfs_end_transaction(trans, root); 3521 } 3522 3523 if (nr_unlink) 3524 btrfs_debug(root->fs_info, "unlinked %d orphans", nr_unlink); 3525 if (nr_truncate) 3526 btrfs_debug(root->fs_info, "truncated %d orphans", nr_truncate); 3527 3528 out: 3529 if (ret) 3530 btrfs_err(root->fs_info, 3531 "could not do orphan cleanup %d", ret); 3532 btrfs_free_path(path); 3533 return ret; 3534 } 3535 3536 /* 3537 * very simple check to peek ahead in the leaf looking for xattrs. If we 3538 * don't find any xattrs, we know there can't be any acls. 3539 * 3540 * slot is the slot the inode is in, objectid is the objectid of the inode 3541 */ 3542 static noinline int acls_after_inode_item(struct extent_buffer *leaf, 3543 int slot, u64 objectid, 3544 int *first_xattr_slot) 3545 { 3546 u32 nritems = btrfs_header_nritems(leaf); 3547 struct btrfs_key found_key; 3548 static u64 xattr_access = 0; 3549 static u64 xattr_default = 0; 3550 int scanned = 0; 3551 3552 if (!xattr_access) { 3553 xattr_access = btrfs_name_hash(POSIX_ACL_XATTR_ACCESS, 3554 strlen(POSIX_ACL_XATTR_ACCESS)); 3555 xattr_default = btrfs_name_hash(POSIX_ACL_XATTR_DEFAULT, 3556 strlen(POSIX_ACL_XATTR_DEFAULT)); 3557 } 3558 3559 slot++; 3560 *first_xattr_slot = -1; 3561 while (slot < nritems) { 3562 btrfs_item_key_to_cpu(leaf, &found_key, slot); 3563 3564 /* we found a different objectid, there must not be acls */ 3565 if (found_key.objectid != objectid) 3566 return 0; 3567 3568 /* we found an xattr, assume we've got an acl */ 3569 if (found_key.type == BTRFS_XATTR_ITEM_KEY) { 3570 if (*first_xattr_slot == -1) 3571 *first_xattr_slot = slot; 3572 if (found_key.offset == xattr_access || 3573 found_key.offset == xattr_default) 3574 return 1; 3575 } 3576 3577 /* 3578 * we found a key greater than an xattr key, there can't 3579 * be any acls later on 3580 */ 3581 if (found_key.type > BTRFS_XATTR_ITEM_KEY) 3582 return 0; 3583 3584 slot++; 3585 scanned++; 3586 3587 /* 3588 * it goes inode, inode backrefs, xattrs, extents, 3589 * so if there are a ton of hard links to an inode there can 3590 * be a lot of backrefs. Don't waste time searching too hard, 3591 * this is just an optimization 3592 */ 3593 if (scanned >= 8) 3594 break; 3595 } 3596 /* we hit the end of the leaf before we found an xattr or 3597 * something larger than an xattr. We have to assume the inode 3598 * has acls 3599 */ 3600 if (*first_xattr_slot == -1) 3601 *first_xattr_slot = slot; 3602 return 1; 3603 } 3604 3605 /* 3606 * read an inode from the btree into the in-memory inode 3607 */ 3608 static void btrfs_read_locked_inode(struct inode *inode) 3609 { 3610 struct btrfs_path *path; 3611 struct extent_buffer *leaf; 3612 struct btrfs_inode_item *inode_item; 3613 struct btrfs_root *root = BTRFS_I(inode)->root; 3614 struct btrfs_key location; 3615 unsigned long ptr; 3616 int maybe_acls; 3617 u32 rdev; 3618 int ret; 3619 bool filled = false; 3620 int first_xattr_slot; 3621 3622 ret = btrfs_fill_inode(inode, &rdev); 3623 if (!ret) 3624 filled = true; 3625 3626 path = btrfs_alloc_path(); 3627 if (!path) 3628 goto make_bad; 3629 3630 memcpy(&location, &BTRFS_I(inode)->location, sizeof(location)); 3631 3632 ret = btrfs_lookup_inode(NULL, root, path, &location, 0); 3633 if (ret) 3634 goto make_bad; 3635 3636 leaf = path->nodes[0]; 3637 3638 if (filled) 3639 goto cache_index; 3640 3641 inode_item = btrfs_item_ptr(leaf, path->slots[0], 3642 struct btrfs_inode_item); 3643 inode->i_mode = btrfs_inode_mode(leaf, inode_item); 3644 set_nlink(inode, btrfs_inode_nlink(leaf, inode_item)); 3645 i_uid_write(inode, btrfs_inode_uid(leaf, inode_item)); 3646 i_gid_write(inode, btrfs_inode_gid(leaf, inode_item)); 3647 btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item)); 3648 3649 inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->atime); 3650 inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->atime); 3651 3652 inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->mtime); 3653 inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->mtime); 3654 3655 inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->ctime); 3656 inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->ctime); 3657 3658 BTRFS_I(inode)->i_otime.tv_sec = 3659 btrfs_timespec_sec(leaf, &inode_item->otime); 3660 BTRFS_I(inode)->i_otime.tv_nsec = 3661 btrfs_timespec_nsec(leaf, &inode_item->otime); 3662 3663 inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item)); 3664 BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item); 3665 BTRFS_I(inode)->last_trans = btrfs_inode_transid(leaf, inode_item); 3666 3667 inode->i_version = btrfs_inode_sequence(leaf, inode_item); 3668 inode->i_generation = BTRFS_I(inode)->generation; 3669 inode->i_rdev = 0; 3670 rdev = btrfs_inode_rdev(leaf, inode_item); 3671 3672 BTRFS_I(inode)->index_cnt = (u64)-1; 3673 BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item); 3674 3675 cache_index: 3676 /* 3677 * If we were modified in the current generation and evicted from memory 3678 * and then re-read we need to do a full sync since we don't have any 3679 * idea about which extents were modified before we were evicted from 3680 * cache. 3681 * 3682 * This is required for both inode re-read from disk and delayed inode 3683 * in delayed_nodes_tree. 3684 */ 3685 if (BTRFS_I(inode)->last_trans == root->fs_info->generation) 3686 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 3687 &BTRFS_I(inode)->runtime_flags); 3688 3689 /* 3690 * We don't persist the id of the transaction where an unlink operation 3691 * against the inode was last made. So here we assume the inode might 3692 * have been evicted, and therefore the exact value of last_unlink_trans 3693 * lost, and set it to last_trans to avoid metadata inconsistencies 3694 * between the inode and its parent if the inode is fsync'ed and the log 3695 * replayed. For example, in the scenario: 3696 * 3697 * touch mydir/foo 3698 * ln mydir/foo mydir/bar 3699 * sync 3700 * unlink mydir/bar 3701 * echo 2 > /proc/sys/vm/drop_caches # evicts inode 3702 * xfs_io -c fsync mydir/foo 3703 * <power failure> 3704 * mount fs, triggers fsync log replay 3705 * 3706 * We must make sure that when we fsync our inode foo we also log its 3707 * parent inode, otherwise after log replay the parent still has the 3708 * dentry with the "bar" name but our inode foo has a link count of 1 3709 * and doesn't have an inode ref with the name "bar" anymore. 3710 * 3711 * Setting last_unlink_trans to last_trans is a pessimistic approach, 3712 * but it guarantees correctness at the expense of ocassional full 3713 * transaction commits on fsync if our inode is a directory, or if our 3714 * inode is not a directory, logging its parent unnecessarily. 3715 */ 3716 BTRFS_I(inode)->last_unlink_trans = BTRFS_I(inode)->last_trans; 3717 3718 path->slots[0]++; 3719 if (inode->i_nlink != 1 || 3720 path->slots[0] >= btrfs_header_nritems(leaf)) 3721 goto cache_acl; 3722 3723 btrfs_item_key_to_cpu(leaf, &location, path->slots[0]); 3724 if (location.objectid != btrfs_ino(inode)) 3725 goto cache_acl; 3726 3727 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 3728 if (location.type == BTRFS_INODE_REF_KEY) { 3729 struct btrfs_inode_ref *ref; 3730 3731 ref = (struct btrfs_inode_ref *)ptr; 3732 BTRFS_I(inode)->dir_index = btrfs_inode_ref_index(leaf, ref); 3733 } else if (location.type == BTRFS_INODE_EXTREF_KEY) { 3734 struct btrfs_inode_extref *extref; 3735 3736 extref = (struct btrfs_inode_extref *)ptr; 3737 BTRFS_I(inode)->dir_index = btrfs_inode_extref_index(leaf, 3738 extref); 3739 } 3740 cache_acl: 3741 /* 3742 * try to precache a NULL acl entry for files that don't have 3743 * any xattrs or acls 3744 */ 3745 maybe_acls = acls_after_inode_item(leaf, path->slots[0], 3746 btrfs_ino(inode), &first_xattr_slot); 3747 if (first_xattr_slot != -1) { 3748 path->slots[0] = first_xattr_slot; 3749 ret = btrfs_load_inode_props(inode, path); 3750 if (ret) 3751 btrfs_err(root->fs_info, 3752 "error loading props for ino %llu (root %llu): %d", 3753 btrfs_ino(inode), 3754 root->root_key.objectid, ret); 3755 } 3756 btrfs_free_path(path); 3757 3758 if (!maybe_acls) 3759 cache_no_acl(inode); 3760 3761 switch (inode->i_mode & S_IFMT) { 3762 case S_IFREG: 3763 inode->i_mapping->a_ops = &btrfs_aops; 3764 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; 3765 inode->i_fop = &btrfs_file_operations; 3766 inode->i_op = &btrfs_file_inode_operations; 3767 break; 3768 case S_IFDIR: 3769 inode->i_fop = &btrfs_dir_file_operations; 3770 if (root == root->fs_info->tree_root) 3771 inode->i_op = &btrfs_dir_ro_inode_operations; 3772 else 3773 inode->i_op = &btrfs_dir_inode_operations; 3774 break; 3775 case S_IFLNK: 3776 inode->i_op = &btrfs_symlink_inode_operations; 3777 inode->i_mapping->a_ops = &btrfs_symlink_aops; 3778 break; 3779 default: 3780 inode->i_op = &btrfs_special_inode_operations; 3781 init_special_inode(inode, inode->i_mode, rdev); 3782 break; 3783 } 3784 3785 btrfs_update_iflags(inode); 3786 return; 3787 3788 make_bad: 3789 btrfs_free_path(path); 3790 make_bad_inode(inode); 3791 } 3792 3793 /* 3794 * given a leaf and an inode, copy the inode fields into the leaf 3795 */ 3796 static void fill_inode_item(struct btrfs_trans_handle *trans, 3797 struct extent_buffer *leaf, 3798 struct btrfs_inode_item *item, 3799 struct inode *inode) 3800 { 3801 struct btrfs_map_token token; 3802 3803 btrfs_init_map_token(&token); 3804 3805 btrfs_set_token_inode_uid(leaf, item, i_uid_read(inode), &token); 3806 btrfs_set_token_inode_gid(leaf, item, i_gid_read(inode), &token); 3807 btrfs_set_token_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size, 3808 &token); 3809 btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token); 3810 btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token); 3811 3812 btrfs_set_token_timespec_sec(leaf, &item->atime, 3813 inode->i_atime.tv_sec, &token); 3814 btrfs_set_token_timespec_nsec(leaf, &item->atime, 3815 inode->i_atime.tv_nsec, &token); 3816 3817 btrfs_set_token_timespec_sec(leaf, &item->mtime, 3818 inode->i_mtime.tv_sec, &token); 3819 btrfs_set_token_timespec_nsec(leaf, &item->mtime, 3820 inode->i_mtime.tv_nsec, &token); 3821 3822 btrfs_set_token_timespec_sec(leaf, &item->ctime, 3823 inode->i_ctime.tv_sec, &token); 3824 btrfs_set_token_timespec_nsec(leaf, &item->ctime, 3825 inode->i_ctime.tv_nsec, &token); 3826 3827 btrfs_set_token_timespec_sec(leaf, &item->otime, 3828 BTRFS_I(inode)->i_otime.tv_sec, &token); 3829 btrfs_set_token_timespec_nsec(leaf, &item->otime, 3830 BTRFS_I(inode)->i_otime.tv_nsec, &token); 3831 3832 btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode), 3833 &token); 3834 btrfs_set_token_inode_generation(leaf, item, BTRFS_I(inode)->generation, 3835 &token); 3836 btrfs_set_token_inode_sequence(leaf, item, inode->i_version, &token); 3837 btrfs_set_token_inode_transid(leaf, item, trans->transid, &token); 3838 btrfs_set_token_inode_rdev(leaf, item, inode->i_rdev, &token); 3839 btrfs_set_token_inode_flags(leaf, item, BTRFS_I(inode)->flags, &token); 3840 btrfs_set_token_inode_block_group(leaf, item, 0, &token); 3841 } 3842 3843 /* 3844 * copy everything in the in-memory inode into the btree. 3845 */ 3846 static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans, 3847 struct btrfs_root *root, struct inode *inode) 3848 { 3849 struct btrfs_inode_item *inode_item; 3850 struct btrfs_path *path; 3851 struct extent_buffer *leaf; 3852 int ret; 3853 3854 path = btrfs_alloc_path(); 3855 if (!path) 3856 return -ENOMEM; 3857 3858 path->leave_spinning = 1; 3859 ret = btrfs_lookup_inode(trans, root, path, &BTRFS_I(inode)->location, 3860 1); 3861 if (ret) { 3862 if (ret > 0) 3863 ret = -ENOENT; 3864 goto failed; 3865 } 3866 3867 leaf = path->nodes[0]; 3868 inode_item = btrfs_item_ptr(leaf, path->slots[0], 3869 struct btrfs_inode_item); 3870 3871 fill_inode_item(trans, leaf, inode_item, inode); 3872 btrfs_mark_buffer_dirty(leaf); 3873 btrfs_set_inode_last_trans(trans, inode); 3874 ret = 0; 3875 failed: 3876 btrfs_free_path(path); 3877 return ret; 3878 } 3879 3880 /* 3881 * copy everything in the in-memory inode into the btree. 3882 */ 3883 noinline int btrfs_update_inode(struct btrfs_trans_handle *trans, 3884 struct btrfs_root *root, struct inode *inode) 3885 { 3886 int ret; 3887 3888 /* 3889 * If the inode is a free space inode, we can deadlock during commit 3890 * if we put it into the delayed code. 3891 * 3892 * The data relocation inode should also be directly updated 3893 * without delay 3894 */ 3895 if (!btrfs_is_free_space_inode(inode) 3896 && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID 3897 && !root->fs_info->log_root_recovering) { 3898 btrfs_update_root_times(trans, root); 3899 3900 ret = btrfs_delayed_update_inode(trans, root, inode); 3901 if (!ret) 3902 btrfs_set_inode_last_trans(trans, inode); 3903 return ret; 3904 } 3905 3906 return btrfs_update_inode_item(trans, root, inode); 3907 } 3908 3909 noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans, 3910 struct btrfs_root *root, 3911 struct inode *inode) 3912 { 3913 int ret; 3914 3915 ret = btrfs_update_inode(trans, root, inode); 3916 if (ret == -ENOSPC) 3917 return btrfs_update_inode_item(trans, root, inode); 3918 return ret; 3919 } 3920 3921 /* 3922 * unlink helper that gets used here in inode.c and in the tree logging 3923 * recovery code. It remove a link in a directory with a given name, and 3924 * also drops the back refs in the inode to the directory 3925 */ 3926 static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans, 3927 struct btrfs_root *root, 3928 struct inode *dir, struct inode *inode, 3929 const char *name, int name_len) 3930 { 3931 struct btrfs_path *path; 3932 int ret = 0; 3933 struct extent_buffer *leaf; 3934 struct btrfs_dir_item *di; 3935 struct btrfs_key key; 3936 u64 index; 3937 u64 ino = btrfs_ino(inode); 3938 u64 dir_ino = btrfs_ino(dir); 3939 3940 path = btrfs_alloc_path(); 3941 if (!path) { 3942 ret = -ENOMEM; 3943 goto out; 3944 } 3945 3946 path->leave_spinning = 1; 3947 di = btrfs_lookup_dir_item(trans, root, path, dir_ino, 3948 name, name_len, -1); 3949 if (IS_ERR(di)) { 3950 ret = PTR_ERR(di); 3951 goto err; 3952 } 3953 if (!di) { 3954 ret = -ENOENT; 3955 goto err; 3956 } 3957 leaf = path->nodes[0]; 3958 btrfs_dir_item_key_to_cpu(leaf, di, &key); 3959 ret = btrfs_delete_one_dir_name(trans, root, path, di); 3960 if (ret) 3961 goto err; 3962 btrfs_release_path(path); 3963 3964 /* 3965 * If we don't have dir index, we have to get it by looking up 3966 * the inode ref, since we get the inode ref, remove it directly, 3967 * it is unnecessary to do delayed deletion. 3968 * 3969 * But if we have dir index, needn't search inode ref to get it. 3970 * Since the inode ref is close to the inode item, it is better 3971 * that we delay to delete it, and just do this deletion when 3972 * we update the inode item. 3973 */ 3974 if (BTRFS_I(inode)->dir_index) { 3975 ret = btrfs_delayed_delete_inode_ref(inode); 3976 if (!ret) { 3977 index = BTRFS_I(inode)->dir_index; 3978 goto skip_backref; 3979 } 3980 } 3981 3982 ret = btrfs_del_inode_ref(trans, root, name, name_len, ino, 3983 dir_ino, &index); 3984 if (ret) { 3985 btrfs_info(root->fs_info, 3986 "failed to delete reference to %.*s, inode %llu parent %llu", 3987 name_len, name, ino, dir_ino); 3988 btrfs_abort_transaction(trans, root, ret); 3989 goto err; 3990 } 3991 skip_backref: 3992 ret = btrfs_delete_delayed_dir_index(trans, root, dir, index); 3993 if (ret) { 3994 btrfs_abort_transaction(trans, root, ret); 3995 goto err; 3996 } 3997 3998 ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len, 3999 inode, dir_ino); 4000 if (ret != 0 && ret != -ENOENT) { 4001 btrfs_abort_transaction(trans, root, ret); 4002 goto err; 4003 } 4004 4005 ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len, 4006 dir, index); 4007 if (ret == -ENOENT) 4008 ret = 0; 4009 else if (ret) 4010 btrfs_abort_transaction(trans, root, ret); 4011 err: 4012 btrfs_free_path(path); 4013 if (ret) 4014 goto out; 4015 4016 btrfs_i_size_write(dir, dir->i_size - name_len * 2); 4017 inode_inc_iversion(inode); 4018 inode_inc_iversion(dir); 4019 inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME; 4020 ret = btrfs_update_inode(trans, root, dir); 4021 out: 4022 return ret; 4023 } 4024 4025 int btrfs_unlink_inode(struct btrfs_trans_handle *trans, 4026 struct btrfs_root *root, 4027 struct inode *dir, struct inode *inode, 4028 const char *name, int name_len) 4029 { 4030 int ret; 4031 ret = __btrfs_unlink_inode(trans, root, dir, inode, name, name_len); 4032 if (!ret) { 4033 drop_nlink(inode); 4034 ret = btrfs_update_inode(trans, root, inode); 4035 } 4036 return ret; 4037 } 4038 4039 /* 4040 * helper to start transaction for unlink and rmdir. 4041 * 4042 * unlink and rmdir are special in btrfs, they do not always free space, so 4043 * if we cannot make our reservations the normal way try and see if there is 4044 * plenty of slack room in the global reserve to migrate, otherwise we cannot 4045 * allow the unlink to occur. 4046 */ 4047 static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir) 4048 { 4049 struct btrfs_root *root = BTRFS_I(dir)->root; 4050 4051 /* 4052 * 1 for the possible orphan item 4053 * 1 for the dir item 4054 * 1 for the dir index 4055 * 1 for the inode ref 4056 * 1 for the inode 4057 */ 4058 return btrfs_start_transaction_fallback_global_rsv(root, 5, 5); 4059 } 4060 4061 static int btrfs_unlink(struct inode *dir, struct dentry *dentry) 4062 { 4063 struct btrfs_root *root = BTRFS_I(dir)->root; 4064 struct btrfs_trans_handle *trans; 4065 struct inode *inode = d_inode(dentry); 4066 int ret; 4067 4068 trans = __unlink_start_trans(dir); 4069 if (IS_ERR(trans)) 4070 return PTR_ERR(trans); 4071 4072 btrfs_record_unlink_dir(trans, dir, d_inode(dentry), 0); 4073 4074 ret = btrfs_unlink_inode(trans, root, dir, d_inode(dentry), 4075 dentry->d_name.name, dentry->d_name.len); 4076 if (ret) 4077 goto out; 4078 4079 if (inode->i_nlink == 0) { 4080 ret = btrfs_orphan_add(trans, inode); 4081 if (ret) 4082 goto out; 4083 } 4084 4085 out: 4086 btrfs_end_transaction(trans, root); 4087 btrfs_btree_balance_dirty(root); 4088 return ret; 4089 } 4090 4091 int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, 4092 struct btrfs_root *root, 4093 struct inode *dir, u64 objectid, 4094 const char *name, int name_len) 4095 { 4096 struct btrfs_path *path; 4097 struct extent_buffer *leaf; 4098 struct btrfs_dir_item *di; 4099 struct btrfs_key key; 4100 u64 index; 4101 int ret; 4102 u64 dir_ino = btrfs_ino(dir); 4103 4104 path = btrfs_alloc_path(); 4105 if (!path) 4106 return -ENOMEM; 4107 4108 di = btrfs_lookup_dir_item(trans, root, path, dir_ino, 4109 name, name_len, -1); 4110 if (IS_ERR_OR_NULL(di)) { 4111 if (!di) 4112 ret = -ENOENT; 4113 else 4114 ret = PTR_ERR(di); 4115 goto out; 4116 } 4117 4118 leaf = path->nodes[0]; 4119 btrfs_dir_item_key_to_cpu(leaf, di, &key); 4120 WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid); 4121 ret = btrfs_delete_one_dir_name(trans, root, path, di); 4122 if (ret) { 4123 btrfs_abort_transaction(trans, root, ret); 4124 goto out; 4125 } 4126 btrfs_release_path(path); 4127 4128 ret = btrfs_del_root_ref(trans, root->fs_info->tree_root, 4129 objectid, root->root_key.objectid, 4130 dir_ino, &index, name, name_len); 4131 if (ret < 0) { 4132 if (ret != -ENOENT) { 4133 btrfs_abort_transaction(trans, root, ret); 4134 goto out; 4135 } 4136 di = btrfs_search_dir_index_item(root, path, dir_ino, 4137 name, name_len); 4138 if (IS_ERR_OR_NULL(di)) { 4139 if (!di) 4140 ret = -ENOENT; 4141 else 4142 ret = PTR_ERR(di); 4143 btrfs_abort_transaction(trans, root, ret); 4144 goto out; 4145 } 4146 4147 leaf = path->nodes[0]; 4148 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 4149 btrfs_release_path(path); 4150 index = key.offset; 4151 } 4152 btrfs_release_path(path); 4153 4154 ret = btrfs_delete_delayed_dir_index(trans, root, dir, index); 4155 if (ret) { 4156 btrfs_abort_transaction(trans, root, ret); 4157 goto out; 4158 } 4159 4160 btrfs_i_size_write(dir, dir->i_size - name_len * 2); 4161 inode_inc_iversion(dir); 4162 dir->i_mtime = dir->i_ctime = CURRENT_TIME; 4163 ret = btrfs_update_inode_fallback(trans, root, dir); 4164 if (ret) 4165 btrfs_abort_transaction(trans, root, ret); 4166 out: 4167 btrfs_free_path(path); 4168 return ret; 4169 } 4170 4171 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry) 4172 { 4173 struct inode *inode = d_inode(dentry); 4174 int err = 0; 4175 struct btrfs_root *root = BTRFS_I(dir)->root; 4176 struct btrfs_trans_handle *trans; 4177 4178 if (inode->i_size > BTRFS_EMPTY_DIR_SIZE) 4179 return -ENOTEMPTY; 4180 if (btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID) 4181 return -EPERM; 4182 4183 trans = __unlink_start_trans(dir); 4184 if (IS_ERR(trans)) 4185 return PTR_ERR(trans); 4186 4187 if (unlikely(btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { 4188 err = btrfs_unlink_subvol(trans, root, dir, 4189 BTRFS_I(inode)->location.objectid, 4190 dentry->d_name.name, 4191 dentry->d_name.len); 4192 goto out; 4193 } 4194 4195 err = btrfs_orphan_add(trans, inode); 4196 if (err) 4197 goto out; 4198 4199 /* now the directory is empty */ 4200 err = btrfs_unlink_inode(trans, root, dir, d_inode(dentry), 4201 dentry->d_name.name, dentry->d_name.len); 4202 if (!err) 4203 btrfs_i_size_write(inode, 0); 4204 out: 4205 btrfs_end_transaction(trans, root); 4206 btrfs_btree_balance_dirty(root); 4207 4208 return err; 4209 } 4210 4211 static int truncate_space_check(struct btrfs_trans_handle *trans, 4212 struct btrfs_root *root, 4213 u64 bytes_deleted) 4214 { 4215 int ret; 4216 4217 bytes_deleted = btrfs_csum_bytes_to_leaves(root, bytes_deleted); 4218 ret = btrfs_block_rsv_add(root, &root->fs_info->trans_block_rsv, 4219 bytes_deleted, BTRFS_RESERVE_NO_FLUSH); 4220 if (!ret) 4221 trans->bytes_reserved += bytes_deleted; 4222 return ret; 4223 4224 } 4225 4226 static int truncate_inline_extent(struct inode *inode, 4227 struct btrfs_path *path, 4228 struct btrfs_key *found_key, 4229 const u64 item_end, 4230 const u64 new_size) 4231 { 4232 struct extent_buffer *leaf = path->nodes[0]; 4233 int slot = path->slots[0]; 4234 struct btrfs_file_extent_item *fi; 4235 u32 size = (u32)(new_size - found_key->offset); 4236 struct btrfs_root *root = BTRFS_I(inode)->root; 4237 4238 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); 4239 4240 if (btrfs_file_extent_compression(leaf, fi) != BTRFS_COMPRESS_NONE) { 4241 loff_t offset = new_size; 4242 loff_t page_end = ALIGN(offset, PAGE_CACHE_SIZE); 4243 4244 /* 4245 * Zero out the remaining of the last page of our inline extent, 4246 * instead of directly truncating our inline extent here - that 4247 * would be much more complex (decompressing all the data, then 4248 * compressing the truncated data, which might be bigger than 4249 * the size of the inline extent, resize the extent, etc). 4250 * We release the path because to get the page we might need to 4251 * read the extent item from disk (data not in the page cache). 4252 */ 4253 btrfs_release_path(path); 4254 return btrfs_truncate_page(inode, offset, page_end - offset, 0); 4255 } 4256 4257 btrfs_set_file_extent_ram_bytes(leaf, fi, size); 4258 size = btrfs_file_extent_calc_inline_size(size); 4259 btrfs_truncate_item(root, path, size, 1); 4260 4261 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state)) 4262 inode_sub_bytes(inode, item_end + 1 - new_size); 4263 4264 return 0; 4265 } 4266 4267 /* 4268 * this can truncate away extent items, csum items and directory items. 4269 * It starts at a high offset and removes keys until it can't find 4270 * any higher than new_size 4271 * 4272 * csum items that cross the new i_size are truncated to the new size 4273 * as well. 4274 * 4275 * min_type is the minimum key type to truncate down to. If set to 0, this 4276 * will kill all the items on this inode, including the INODE_ITEM_KEY. 4277 */ 4278 int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, 4279 struct btrfs_root *root, 4280 struct inode *inode, 4281 u64 new_size, u32 min_type) 4282 { 4283 struct btrfs_path *path; 4284 struct extent_buffer *leaf; 4285 struct btrfs_file_extent_item *fi; 4286 struct btrfs_key key; 4287 struct btrfs_key found_key; 4288 u64 extent_start = 0; 4289 u64 extent_num_bytes = 0; 4290 u64 extent_offset = 0; 4291 u64 item_end = 0; 4292 u64 last_size = new_size; 4293 u32 found_type = (u8)-1; 4294 int found_extent; 4295 int del_item; 4296 int pending_del_nr = 0; 4297 int pending_del_slot = 0; 4298 int extent_type = -1; 4299 int ret; 4300 int err = 0; 4301 u64 ino = btrfs_ino(inode); 4302 u64 bytes_deleted = 0; 4303 bool be_nice = 0; 4304 bool should_throttle = 0; 4305 bool should_end = 0; 4306 4307 BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY); 4308 4309 /* 4310 * for non-free space inodes and ref cows, we want to back off from 4311 * time to time 4312 */ 4313 if (!btrfs_is_free_space_inode(inode) && 4314 test_bit(BTRFS_ROOT_REF_COWS, &root->state)) 4315 be_nice = 1; 4316 4317 path = btrfs_alloc_path(); 4318 if (!path) 4319 return -ENOMEM; 4320 path->reada = -1; 4321 4322 /* 4323 * We want to drop from the next block forward in case this new size is 4324 * not block aligned since we will be keeping the last block of the 4325 * extent just the way it is. 4326 */ 4327 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) || 4328 root == root->fs_info->tree_root) 4329 btrfs_drop_extent_cache(inode, ALIGN(new_size, 4330 root->sectorsize), (u64)-1, 0); 4331 4332 /* 4333 * This function is also used to drop the items in the log tree before 4334 * we relog the inode, so if root != BTRFS_I(inode)->root, it means 4335 * it is used to drop the loged items. So we shouldn't kill the delayed 4336 * items. 4337 */ 4338 if (min_type == 0 && root == BTRFS_I(inode)->root) 4339 btrfs_kill_delayed_inode_items(inode); 4340 4341 key.objectid = ino; 4342 key.offset = (u64)-1; 4343 key.type = (u8)-1; 4344 4345 search_again: 4346 /* 4347 * with a 16K leaf size and 128MB extents, you can actually queue 4348 * up a huge file in a single leaf. Most of the time that 4349 * bytes_deleted is > 0, it will be huge by the time we get here 4350 */ 4351 if (be_nice && bytes_deleted > 32 * 1024 * 1024) { 4352 if (btrfs_should_end_transaction(trans, root)) { 4353 err = -EAGAIN; 4354 goto error; 4355 } 4356 } 4357 4358 4359 path->leave_spinning = 1; 4360 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 4361 if (ret < 0) { 4362 err = ret; 4363 goto out; 4364 } 4365 4366 if (ret > 0) { 4367 /* there are no items in the tree for us to truncate, we're 4368 * done 4369 */ 4370 if (path->slots[0] == 0) 4371 goto out; 4372 path->slots[0]--; 4373 } 4374 4375 while (1) { 4376 fi = NULL; 4377 leaf = path->nodes[0]; 4378 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 4379 found_type = found_key.type; 4380 4381 if (found_key.objectid != ino) 4382 break; 4383 4384 if (found_type < min_type) 4385 break; 4386 4387 item_end = found_key.offset; 4388 if (found_type == BTRFS_EXTENT_DATA_KEY) { 4389 fi = btrfs_item_ptr(leaf, path->slots[0], 4390 struct btrfs_file_extent_item); 4391 extent_type = btrfs_file_extent_type(leaf, fi); 4392 if (extent_type != BTRFS_FILE_EXTENT_INLINE) { 4393 item_end += 4394 btrfs_file_extent_num_bytes(leaf, fi); 4395 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 4396 item_end += btrfs_file_extent_inline_len(leaf, 4397 path->slots[0], fi); 4398 } 4399 item_end--; 4400 } 4401 if (found_type > min_type) { 4402 del_item = 1; 4403 } else { 4404 if (item_end < new_size) 4405 break; 4406 if (found_key.offset >= new_size) 4407 del_item = 1; 4408 else 4409 del_item = 0; 4410 } 4411 found_extent = 0; 4412 /* FIXME, shrink the extent if the ref count is only 1 */ 4413 if (found_type != BTRFS_EXTENT_DATA_KEY) 4414 goto delete; 4415 4416 if (del_item) 4417 last_size = found_key.offset; 4418 else 4419 last_size = new_size; 4420 4421 if (extent_type != BTRFS_FILE_EXTENT_INLINE) { 4422 u64 num_dec; 4423 extent_start = btrfs_file_extent_disk_bytenr(leaf, fi); 4424 if (!del_item) { 4425 u64 orig_num_bytes = 4426 btrfs_file_extent_num_bytes(leaf, fi); 4427 extent_num_bytes = ALIGN(new_size - 4428 found_key.offset, 4429 root->sectorsize); 4430 btrfs_set_file_extent_num_bytes(leaf, fi, 4431 extent_num_bytes); 4432 num_dec = (orig_num_bytes - 4433 extent_num_bytes); 4434 if (test_bit(BTRFS_ROOT_REF_COWS, 4435 &root->state) && 4436 extent_start != 0) 4437 inode_sub_bytes(inode, num_dec); 4438 btrfs_mark_buffer_dirty(leaf); 4439 } else { 4440 extent_num_bytes = 4441 btrfs_file_extent_disk_num_bytes(leaf, 4442 fi); 4443 extent_offset = found_key.offset - 4444 btrfs_file_extent_offset(leaf, fi); 4445 4446 /* FIXME blocksize != 4096 */ 4447 num_dec = btrfs_file_extent_num_bytes(leaf, fi); 4448 if (extent_start != 0) { 4449 found_extent = 1; 4450 if (test_bit(BTRFS_ROOT_REF_COWS, 4451 &root->state)) 4452 inode_sub_bytes(inode, num_dec); 4453 } 4454 } 4455 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 4456 /* 4457 * we can't truncate inline items that have had 4458 * special encodings 4459 */ 4460 if (!del_item && 4461 btrfs_file_extent_encryption(leaf, fi) == 0 && 4462 btrfs_file_extent_other_encoding(leaf, fi) == 0) { 4463 4464 /* 4465 * Need to release path in order to truncate a 4466 * compressed extent. So delete any accumulated 4467 * extent items so far. 4468 */ 4469 if (btrfs_file_extent_compression(leaf, fi) != 4470 BTRFS_COMPRESS_NONE && pending_del_nr) { 4471 err = btrfs_del_items(trans, root, path, 4472 pending_del_slot, 4473 pending_del_nr); 4474 if (err) { 4475 btrfs_abort_transaction(trans, 4476 root, 4477 err); 4478 goto error; 4479 } 4480 pending_del_nr = 0; 4481 } 4482 4483 err = truncate_inline_extent(inode, path, 4484 &found_key, 4485 item_end, 4486 new_size); 4487 if (err) { 4488 btrfs_abort_transaction(trans, 4489 root, err); 4490 goto error; 4491 } 4492 } else if (test_bit(BTRFS_ROOT_REF_COWS, 4493 &root->state)) { 4494 inode_sub_bytes(inode, item_end + 1 - new_size); 4495 } 4496 } 4497 delete: 4498 if (del_item) { 4499 if (!pending_del_nr) { 4500 /* no pending yet, add ourselves */ 4501 pending_del_slot = path->slots[0]; 4502 pending_del_nr = 1; 4503 } else if (pending_del_nr && 4504 path->slots[0] + 1 == pending_del_slot) { 4505 /* hop on the pending chunk */ 4506 pending_del_nr++; 4507 pending_del_slot = path->slots[0]; 4508 } else { 4509 BUG(); 4510 } 4511 } else { 4512 break; 4513 } 4514 should_throttle = 0; 4515 4516 if (found_extent && 4517 (test_bit(BTRFS_ROOT_REF_COWS, &root->state) || 4518 root == root->fs_info->tree_root)) { 4519 btrfs_set_path_blocking(path); 4520 bytes_deleted += extent_num_bytes; 4521 ret = btrfs_free_extent(trans, root, extent_start, 4522 extent_num_bytes, 0, 4523 btrfs_header_owner(leaf), 4524 ino, extent_offset); 4525 BUG_ON(ret); 4526 if (btrfs_should_throttle_delayed_refs(trans, root)) 4527 btrfs_async_run_delayed_refs(root, 4528 trans->delayed_ref_updates * 2, 0); 4529 if (be_nice) { 4530 if (truncate_space_check(trans, root, 4531 extent_num_bytes)) { 4532 should_end = 1; 4533 } 4534 if (btrfs_should_throttle_delayed_refs(trans, 4535 root)) { 4536 should_throttle = 1; 4537 } 4538 } 4539 } 4540 4541 if (found_type == BTRFS_INODE_ITEM_KEY) 4542 break; 4543 4544 if (path->slots[0] == 0 || 4545 path->slots[0] != pending_del_slot || 4546 should_throttle || should_end) { 4547 if (pending_del_nr) { 4548 ret = btrfs_del_items(trans, root, path, 4549 pending_del_slot, 4550 pending_del_nr); 4551 if (ret) { 4552 btrfs_abort_transaction(trans, 4553 root, ret); 4554 goto error; 4555 } 4556 pending_del_nr = 0; 4557 } 4558 btrfs_release_path(path); 4559 if (should_throttle) { 4560 unsigned long updates = trans->delayed_ref_updates; 4561 if (updates) { 4562 trans->delayed_ref_updates = 0; 4563 ret = btrfs_run_delayed_refs(trans, root, updates * 2); 4564 if (ret && !err) 4565 err = ret; 4566 } 4567 } 4568 /* 4569 * if we failed to refill our space rsv, bail out 4570 * and let the transaction restart 4571 */ 4572 if (should_end) { 4573 err = -EAGAIN; 4574 goto error; 4575 } 4576 goto search_again; 4577 } else { 4578 path->slots[0]--; 4579 } 4580 } 4581 out: 4582 if (pending_del_nr) { 4583 ret = btrfs_del_items(trans, root, path, pending_del_slot, 4584 pending_del_nr); 4585 if (ret) 4586 btrfs_abort_transaction(trans, root, ret); 4587 } 4588 error: 4589 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) 4590 btrfs_ordered_update_i_size(inode, last_size, NULL); 4591 4592 btrfs_free_path(path); 4593 4594 if (be_nice && bytes_deleted > 32 * 1024 * 1024) { 4595 unsigned long updates = trans->delayed_ref_updates; 4596 if (updates) { 4597 trans->delayed_ref_updates = 0; 4598 ret = btrfs_run_delayed_refs(trans, root, updates * 2); 4599 if (ret && !err) 4600 err = ret; 4601 } 4602 } 4603 return err; 4604 } 4605 4606 /* 4607 * btrfs_truncate_page - read, zero a chunk and write a page 4608 * @inode - inode that we're zeroing 4609 * @from - the offset to start zeroing 4610 * @len - the length to zero, 0 to zero the entire range respective to the 4611 * offset 4612 * @front - zero up to the offset instead of from the offset on 4613 * 4614 * This will find the page for the "from" offset and cow the page and zero the 4615 * part we want to zero. This is used with truncate and hole punching. 4616 */ 4617 int btrfs_truncate_page(struct inode *inode, loff_t from, loff_t len, 4618 int front) 4619 { 4620 struct address_space *mapping = inode->i_mapping; 4621 struct btrfs_root *root = BTRFS_I(inode)->root; 4622 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 4623 struct btrfs_ordered_extent *ordered; 4624 struct extent_state *cached_state = NULL; 4625 char *kaddr; 4626 u32 blocksize = root->sectorsize; 4627 pgoff_t index = from >> PAGE_CACHE_SHIFT; 4628 unsigned offset = from & (PAGE_CACHE_SIZE-1); 4629 struct page *page; 4630 gfp_t mask = btrfs_alloc_write_mask(mapping); 4631 int ret = 0; 4632 u64 page_start; 4633 u64 page_end; 4634 4635 if ((offset & (blocksize - 1)) == 0 && 4636 (!len || ((len & (blocksize - 1)) == 0))) 4637 goto out; 4638 ret = btrfs_delalloc_reserve_space(inode, 4639 round_down(from, PAGE_CACHE_SIZE), PAGE_CACHE_SIZE); 4640 if (ret) 4641 goto out; 4642 4643 again: 4644 page = find_or_create_page(mapping, index, mask); 4645 if (!page) { 4646 btrfs_delalloc_release_space(inode, 4647 round_down(from, PAGE_CACHE_SIZE), 4648 PAGE_CACHE_SIZE); 4649 ret = -ENOMEM; 4650 goto out; 4651 } 4652 4653 page_start = page_offset(page); 4654 page_end = page_start + PAGE_CACHE_SIZE - 1; 4655 4656 if (!PageUptodate(page)) { 4657 ret = btrfs_readpage(NULL, page); 4658 lock_page(page); 4659 if (page->mapping != mapping) { 4660 unlock_page(page); 4661 page_cache_release(page); 4662 goto again; 4663 } 4664 if (!PageUptodate(page)) { 4665 ret = -EIO; 4666 goto out_unlock; 4667 } 4668 } 4669 wait_on_page_writeback(page); 4670 4671 lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state); 4672 set_page_extent_mapped(page); 4673 4674 ordered = btrfs_lookup_ordered_extent(inode, page_start); 4675 if (ordered) { 4676 unlock_extent_cached(io_tree, page_start, page_end, 4677 &cached_state, GFP_NOFS); 4678 unlock_page(page); 4679 page_cache_release(page); 4680 btrfs_start_ordered_extent(inode, ordered, 1); 4681 btrfs_put_ordered_extent(ordered); 4682 goto again; 4683 } 4684 4685 clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end, 4686 EXTENT_DIRTY | EXTENT_DELALLOC | 4687 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 4688 0, 0, &cached_state, GFP_NOFS); 4689 4690 ret = btrfs_set_extent_delalloc(inode, page_start, page_end, 4691 &cached_state); 4692 if (ret) { 4693 unlock_extent_cached(io_tree, page_start, page_end, 4694 &cached_state, GFP_NOFS); 4695 goto out_unlock; 4696 } 4697 4698 if (offset != PAGE_CACHE_SIZE) { 4699 if (!len) 4700 len = PAGE_CACHE_SIZE - offset; 4701 kaddr = kmap(page); 4702 if (front) 4703 memset(kaddr, 0, offset); 4704 else 4705 memset(kaddr + offset, 0, len); 4706 flush_dcache_page(page); 4707 kunmap(page); 4708 } 4709 ClearPageChecked(page); 4710 set_page_dirty(page); 4711 unlock_extent_cached(io_tree, page_start, page_end, &cached_state, 4712 GFP_NOFS); 4713 4714 out_unlock: 4715 if (ret) 4716 btrfs_delalloc_release_space(inode, page_start, 4717 PAGE_CACHE_SIZE); 4718 unlock_page(page); 4719 page_cache_release(page); 4720 out: 4721 return ret; 4722 } 4723 4724 static int maybe_insert_hole(struct btrfs_root *root, struct inode *inode, 4725 u64 offset, u64 len) 4726 { 4727 struct btrfs_trans_handle *trans; 4728 int ret; 4729 4730 /* 4731 * Still need to make sure the inode looks like it's been updated so 4732 * that any holes get logged if we fsync. 4733 */ 4734 if (btrfs_fs_incompat(root->fs_info, NO_HOLES)) { 4735 BTRFS_I(inode)->last_trans = root->fs_info->generation; 4736 BTRFS_I(inode)->last_sub_trans = root->log_transid; 4737 BTRFS_I(inode)->last_log_commit = root->last_log_commit; 4738 return 0; 4739 } 4740 4741 /* 4742 * 1 - for the one we're dropping 4743 * 1 - for the one we're adding 4744 * 1 - for updating the inode. 4745 */ 4746 trans = btrfs_start_transaction(root, 3); 4747 if (IS_ERR(trans)) 4748 return PTR_ERR(trans); 4749 4750 ret = btrfs_drop_extents(trans, root, inode, offset, offset + len, 1); 4751 if (ret) { 4752 btrfs_abort_transaction(trans, root, ret); 4753 btrfs_end_transaction(trans, root); 4754 return ret; 4755 } 4756 4757 ret = btrfs_insert_file_extent(trans, root, btrfs_ino(inode), offset, 4758 0, 0, len, 0, len, 0, 0, 0); 4759 if (ret) 4760 btrfs_abort_transaction(trans, root, ret); 4761 else 4762 btrfs_update_inode(trans, root, inode); 4763 btrfs_end_transaction(trans, root); 4764 return ret; 4765 } 4766 4767 /* 4768 * This function puts in dummy file extents for the area we're creating a hole 4769 * for. So if we are truncating this file to a larger size we need to insert 4770 * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for 4771 * the range between oldsize and size 4772 */ 4773 int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size) 4774 { 4775 struct btrfs_root *root = BTRFS_I(inode)->root; 4776 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 4777 struct extent_map *em = NULL; 4778 struct extent_state *cached_state = NULL; 4779 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 4780 u64 hole_start = ALIGN(oldsize, root->sectorsize); 4781 u64 block_end = ALIGN(size, root->sectorsize); 4782 u64 last_byte; 4783 u64 cur_offset; 4784 u64 hole_size; 4785 int err = 0; 4786 4787 /* 4788 * If our size started in the middle of a page we need to zero out the 4789 * rest of the page before we expand the i_size, otherwise we could 4790 * expose stale data. 4791 */ 4792 err = btrfs_truncate_page(inode, oldsize, 0, 0); 4793 if (err) 4794 return err; 4795 4796 if (size <= hole_start) 4797 return 0; 4798 4799 while (1) { 4800 struct btrfs_ordered_extent *ordered; 4801 4802 lock_extent_bits(io_tree, hole_start, block_end - 1, 0, 4803 &cached_state); 4804 ordered = btrfs_lookup_ordered_range(inode, hole_start, 4805 block_end - hole_start); 4806 if (!ordered) 4807 break; 4808 unlock_extent_cached(io_tree, hole_start, block_end - 1, 4809 &cached_state, GFP_NOFS); 4810 btrfs_start_ordered_extent(inode, ordered, 1); 4811 btrfs_put_ordered_extent(ordered); 4812 } 4813 4814 cur_offset = hole_start; 4815 while (1) { 4816 em = btrfs_get_extent(inode, NULL, 0, cur_offset, 4817 block_end - cur_offset, 0); 4818 if (IS_ERR(em)) { 4819 err = PTR_ERR(em); 4820 em = NULL; 4821 break; 4822 } 4823 last_byte = min(extent_map_end(em), block_end); 4824 last_byte = ALIGN(last_byte , root->sectorsize); 4825 if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) { 4826 struct extent_map *hole_em; 4827 hole_size = last_byte - cur_offset; 4828 4829 err = maybe_insert_hole(root, inode, cur_offset, 4830 hole_size); 4831 if (err) 4832 break; 4833 btrfs_drop_extent_cache(inode, cur_offset, 4834 cur_offset + hole_size - 1, 0); 4835 hole_em = alloc_extent_map(); 4836 if (!hole_em) { 4837 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 4838 &BTRFS_I(inode)->runtime_flags); 4839 goto next; 4840 } 4841 hole_em->start = cur_offset; 4842 hole_em->len = hole_size; 4843 hole_em->orig_start = cur_offset; 4844 4845 hole_em->block_start = EXTENT_MAP_HOLE; 4846 hole_em->block_len = 0; 4847 hole_em->orig_block_len = 0; 4848 hole_em->ram_bytes = hole_size; 4849 hole_em->bdev = root->fs_info->fs_devices->latest_bdev; 4850 hole_em->compress_type = BTRFS_COMPRESS_NONE; 4851 hole_em->generation = root->fs_info->generation; 4852 4853 while (1) { 4854 write_lock(&em_tree->lock); 4855 err = add_extent_mapping(em_tree, hole_em, 1); 4856 write_unlock(&em_tree->lock); 4857 if (err != -EEXIST) 4858 break; 4859 btrfs_drop_extent_cache(inode, cur_offset, 4860 cur_offset + 4861 hole_size - 1, 0); 4862 } 4863 free_extent_map(hole_em); 4864 } 4865 next: 4866 free_extent_map(em); 4867 em = NULL; 4868 cur_offset = last_byte; 4869 if (cur_offset >= block_end) 4870 break; 4871 } 4872 free_extent_map(em); 4873 unlock_extent_cached(io_tree, hole_start, block_end - 1, &cached_state, 4874 GFP_NOFS); 4875 return err; 4876 } 4877 4878 static int wait_snapshoting_atomic_t(atomic_t *a) 4879 { 4880 schedule(); 4881 return 0; 4882 } 4883 4884 static void wait_for_snapshot_creation(struct btrfs_root *root) 4885 { 4886 while (true) { 4887 int ret; 4888 4889 ret = btrfs_start_write_no_snapshoting(root); 4890 if (ret) 4891 break; 4892 wait_on_atomic_t(&root->will_be_snapshoted, 4893 wait_snapshoting_atomic_t, 4894 TASK_UNINTERRUPTIBLE); 4895 } 4896 } 4897 4898 static int btrfs_setsize(struct inode *inode, struct iattr *attr) 4899 { 4900 struct btrfs_root *root = BTRFS_I(inode)->root; 4901 struct btrfs_trans_handle *trans; 4902 loff_t oldsize = i_size_read(inode); 4903 loff_t newsize = attr->ia_size; 4904 int mask = attr->ia_valid; 4905 int ret; 4906 4907 /* 4908 * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a 4909 * special case where we need to update the times despite not having 4910 * these flags set. For all other operations the VFS set these flags 4911 * explicitly if it wants a timestamp update. 4912 */ 4913 if (newsize != oldsize) { 4914 inode_inc_iversion(inode); 4915 if (!(mask & (ATTR_CTIME | ATTR_MTIME))) 4916 inode->i_ctime = inode->i_mtime = 4917 current_fs_time(inode->i_sb); 4918 } 4919 4920 if (newsize > oldsize) { 4921 truncate_pagecache(inode, newsize); 4922 /* 4923 * Don't do an expanding truncate while snapshoting is ongoing. 4924 * This is to ensure the snapshot captures a fully consistent 4925 * state of this file - if the snapshot captures this expanding 4926 * truncation, it must capture all writes that happened before 4927 * this truncation. 4928 */ 4929 wait_for_snapshot_creation(root); 4930 ret = btrfs_cont_expand(inode, oldsize, newsize); 4931 if (ret) { 4932 btrfs_end_write_no_snapshoting(root); 4933 return ret; 4934 } 4935 4936 trans = btrfs_start_transaction(root, 1); 4937 if (IS_ERR(trans)) { 4938 btrfs_end_write_no_snapshoting(root); 4939 return PTR_ERR(trans); 4940 } 4941 4942 i_size_write(inode, newsize); 4943 btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL); 4944 ret = btrfs_update_inode(trans, root, inode); 4945 btrfs_end_write_no_snapshoting(root); 4946 btrfs_end_transaction(trans, root); 4947 } else { 4948 4949 /* 4950 * We're truncating a file that used to have good data down to 4951 * zero. Make sure it gets into the ordered flush list so that 4952 * any new writes get down to disk quickly. 4953 */ 4954 if (newsize == 0) 4955 set_bit(BTRFS_INODE_ORDERED_DATA_CLOSE, 4956 &BTRFS_I(inode)->runtime_flags); 4957 4958 /* 4959 * 1 for the orphan item we're going to add 4960 * 1 for the orphan item deletion. 4961 */ 4962 trans = btrfs_start_transaction(root, 2); 4963 if (IS_ERR(trans)) 4964 return PTR_ERR(trans); 4965 4966 /* 4967 * We need to do this in case we fail at _any_ point during the 4968 * actual truncate. Once we do the truncate_setsize we could 4969 * invalidate pages which forces any outstanding ordered io to 4970 * be instantly completed which will give us extents that need 4971 * to be truncated. If we fail to get an orphan inode down we 4972 * could have left over extents that were never meant to live, 4973 * so we need to garuntee from this point on that everything 4974 * will be consistent. 4975 */ 4976 ret = btrfs_orphan_add(trans, inode); 4977 btrfs_end_transaction(trans, root); 4978 if (ret) 4979 return ret; 4980 4981 /* we don't support swapfiles, so vmtruncate shouldn't fail */ 4982 truncate_setsize(inode, newsize); 4983 4984 /* Disable nonlocked read DIO to avoid the end less truncate */ 4985 btrfs_inode_block_unlocked_dio(inode); 4986 inode_dio_wait(inode); 4987 btrfs_inode_resume_unlocked_dio(inode); 4988 4989 ret = btrfs_truncate(inode); 4990 if (ret && inode->i_nlink) { 4991 int err; 4992 4993 /* 4994 * failed to truncate, disk_i_size is only adjusted down 4995 * as we remove extents, so it should represent the true 4996 * size of the inode, so reset the in memory size and 4997 * delete our orphan entry. 4998 */ 4999 trans = btrfs_join_transaction(root); 5000 if (IS_ERR(trans)) { 5001 btrfs_orphan_del(NULL, inode); 5002 return ret; 5003 } 5004 i_size_write(inode, BTRFS_I(inode)->disk_i_size); 5005 err = btrfs_orphan_del(trans, inode); 5006 if (err) 5007 btrfs_abort_transaction(trans, root, err); 5008 btrfs_end_transaction(trans, root); 5009 } 5010 } 5011 5012 return ret; 5013 } 5014 5015 static int btrfs_setattr(struct dentry *dentry, struct iattr *attr) 5016 { 5017 struct inode *inode = d_inode(dentry); 5018 struct btrfs_root *root = BTRFS_I(inode)->root; 5019 int err; 5020 5021 if (btrfs_root_readonly(root)) 5022 return -EROFS; 5023 5024 err = inode_change_ok(inode, attr); 5025 if (err) 5026 return err; 5027 5028 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { 5029 err = btrfs_setsize(inode, attr); 5030 if (err) 5031 return err; 5032 } 5033 5034 if (attr->ia_valid) { 5035 setattr_copy(inode, attr); 5036 inode_inc_iversion(inode); 5037 err = btrfs_dirty_inode(inode); 5038 5039 if (!err && attr->ia_valid & ATTR_MODE) 5040 err = posix_acl_chmod(inode, inode->i_mode); 5041 } 5042 5043 return err; 5044 } 5045 5046 /* 5047 * While truncating the inode pages during eviction, we get the VFS calling 5048 * btrfs_invalidatepage() against each page of the inode. This is slow because 5049 * the calls to btrfs_invalidatepage() result in a huge amount of calls to 5050 * lock_extent_bits() and clear_extent_bit(), which keep merging and splitting 5051 * extent_state structures over and over, wasting lots of time. 5052 * 5053 * Therefore if the inode is being evicted, let btrfs_invalidatepage() skip all 5054 * those expensive operations on a per page basis and do only the ordered io 5055 * finishing, while we release here the extent_map and extent_state structures, 5056 * without the excessive merging and splitting. 5057 */ 5058 static void evict_inode_truncate_pages(struct inode *inode) 5059 { 5060 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 5061 struct extent_map_tree *map_tree = &BTRFS_I(inode)->extent_tree; 5062 struct rb_node *node; 5063 5064 ASSERT(inode->i_state & I_FREEING); 5065 truncate_inode_pages_final(&inode->i_data); 5066 5067 write_lock(&map_tree->lock); 5068 while (!RB_EMPTY_ROOT(&map_tree->map)) { 5069 struct extent_map *em; 5070 5071 node = rb_first(&map_tree->map); 5072 em = rb_entry(node, struct extent_map, rb_node); 5073 clear_bit(EXTENT_FLAG_PINNED, &em->flags); 5074 clear_bit(EXTENT_FLAG_LOGGING, &em->flags); 5075 remove_extent_mapping(map_tree, em); 5076 free_extent_map(em); 5077 if (need_resched()) { 5078 write_unlock(&map_tree->lock); 5079 cond_resched(); 5080 write_lock(&map_tree->lock); 5081 } 5082 } 5083 write_unlock(&map_tree->lock); 5084 5085 /* 5086 * Keep looping until we have no more ranges in the io tree. 5087 * We can have ongoing bios started by readpages (called from readahead) 5088 * that have their endio callback (extent_io.c:end_bio_extent_readpage) 5089 * still in progress (unlocked the pages in the bio but did not yet 5090 * unlocked the ranges in the io tree). Therefore this means some 5091 * ranges can still be locked and eviction started because before 5092 * submitting those bios, which are executed by a separate task (work 5093 * queue kthread), inode references (inode->i_count) were not taken 5094 * (which would be dropped in the end io callback of each bio). 5095 * Therefore here we effectively end up waiting for those bios and 5096 * anyone else holding locked ranges without having bumped the inode's 5097 * reference count - if we don't do it, when they access the inode's 5098 * io_tree to unlock a range it may be too late, leading to an 5099 * use-after-free issue. 5100 */ 5101 spin_lock(&io_tree->lock); 5102 while (!RB_EMPTY_ROOT(&io_tree->state)) { 5103 struct extent_state *state; 5104 struct extent_state *cached_state = NULL; 5105 u64 start; 5106 u64 end; 5107 5108 node = rb_first(&io_tree->state); 5109 state = rb_entry(node, struct extent_state, rb_node); 5110 start = state->start; 5111 end = state->end; 5112 spin_unlock(&io_tree->lock); 5113 5114 lock_extent_bits(io_tree, start, end, 0, &cached_state); 5115 5116 /* 5117 * If still has DELALLOC flag, the extent didn't reach disk, 5118 * and its reserved space won't be freed by delayed_ref. 5119 * So we need to free its reserved space here. 5120 * (Refer to comment in btrfs_invalidatepage, case 2) 5121 * 5122 * Note, end is the bytenr of last byte, so we need + 1 here. 5123 */ 5124 if (state->state & EXTENT_DELALLOC) 5125 btrfs_qgroup_free_data(inode, start, end - start + 1); 5126 5127 clear_extent_bit(io_tree, start, end, 5128 EXTENT_LOCKED | EXTENT_DIRTY | 5129 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | 5130 EXTENT_DEFRAG, 1, 1, 5131 &cached_state, GFP_NOFS); 5132 5133 cond_resched(); 5134 spin_lock(&io_tree->lock); 5135 } 5136 spin_unlock(&io_tree->lock); 5137 } 5138 5139 void btrfs_evict_inode(struct inode *inode) 5140 { 5141 struct btrfs_trans_handle *trans; 5142 struct btrfs_root *root = BTRFS_I(inode)->root; 5143 struct btrfs_block_rsv *rsv, *global_rsv; 5144 int steal_from_global = 0; 5145 u64 min_size = btrfs_calc_trunc_metadata_size(root, 1); 5146 int ret; 5147 5148 trace_btrfs_inode_evict(inode); 5149 5150 evict_inode_truncate_pages(inode); 5151 5152 if (inode->i_nlink && 5153 ((btrfs_root_refs(&root->root_item) != 0 && 5154 root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID) || 5155 btrfs_is_free_space_inode(inode))) 5156 goto no_delete; 5157 5158 if (is_bad_inode(inode)) { 5159 btrfs_orphan_del(NULL, inode); 5160 goto no_delete; 5161 } 5162 /* do we really want it for ->i_nlink > 0 and zero btrfs_root_refs? */ 5163 if (!special_file(inode->i_mode)) 5164 btrfs_wait_ordered_range(inode, 0, (u64)-1); 5165 5166 btrfs_free_io_failure_record(inode, 0, (u64)-1); 5167 5168 if (root->fs_info->log_root_recovering) { 5169 BUG_ON(test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM, 5170 &BTRFS_I(inode)->runtime_flags)); 5171 goto no_delete; 5172 } 5173 5174 if (inode->i_nlink > 0) { 5175 BUG_ON(btrfs_root_refs(&root->root_item) != 0 && 5176 root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID); 5177 goto no_delete; 5178 } 5179 5180 ret = btrfs_commit_inode_delayed_inode(inode); 5181 if (ret) { 5182 btrfs_orphan_del(NULL, inode); 5183 goto no_delete; 5184 } 5185 5186 rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP); 5187 if (!rsv) { 5188 btrfs_orphan_del(NULL, inode); 5189 goto no_delete; 5190 } 5191 rsv->size = min_size; 5192 rsv->failfast = 1; 5193 global_rsv = &root->fs_info->global_block_rsv; 5194 5195 btrfs_i_size_write(inode, 0); 5196 5197 /* 5198 * This is a bit simpler than btrfs_truncate since we've already 5199 * reserved our space for our orphan item in the unlink, so we just 5200 * need to reserve some slack space in case we add bytes and update 5201 * inode item when doing the truncate. 5202 */ 5203 while (1) { 5204 ret = btrfs_block_rsv_refill(root, rsv, min_size, 5205 BTRFS_RESERVE_FLUSH_LIMIT); 5206 5207 /* 5208 * Try and steal from the global reserve since we will 5209 * likely not use this space anyway, we want to try as 5210 * hard as possible to get this to work. 5211 */ 5212 if (ret) 5213 steal_from_global++; 5214 else 5215 steal_from_global = 0; 5216 ret = 0; 5217 5218 /* 5219 * steal_from_global == 0: we reserved stuff, hooray! 5220 * steal_from_global == 1: we didn't reserve stuff, boo! 5221 * steal_from_global == 2: we've committed, still not a lot of 5222 * room but maybe we'll have room in the global reserve this 5223 * time. 5224 * steal_from_global == 3: abandon all hope! 5225 */ 5226 if (steal_from_global > 2) { 5227 btrfs_warn(root->fs_info, 5228 "Could not get space for a delete, will truncate on mount %d", 5229 ret); 5230 btrfs_orphan_del(NULL, inode); 5231 btrfs_free_block_rsv(root, rsv); 5232 goto no_delete; 5233 } 5234 5235 trans = btrfs_join_transaction(root); 5236 if (IS_ERR(trans)) { 5237 btrfs_orphan_del(NULL, inode); 5238 btrfs_free_block_rsv(root, rsv); 5239 goto no_delete; 5240 } 5241 5242 /* 5243 * We can't just steal from the global reserve, we need tomake 5244 * sure there is room to do it, if not we need to commit and try 5245 * again. 5246 */ 5247 if (steal_from_global) { 5248 if (!btrfs_check_space_for_delayed_refs(trans, root)) 5249 ret = btrfs_block_rsv_migrate(global_rsv, rsv, 5250 min_size); 5251 else 5252 ret = -ENOSPC; 5253 } 5254 5255 /* 5256 * Couldn't steal from the global reserve, we have too much 5257 * pending stuff built up, commit the transaction and try it 5258 * again. 5259 */ 5260 if (ret) { 5261 ret = btrfs_commit_transaction(trans, root); 5262 if (ret) { 5263 btrfs_orphan_del(NULL, inode); 5264 btrfs_free_block_rsv(root, rsv); 5265 goto no_delete; 5266 } 5267 continue; 5268 } else { 5269 steal_from_global = 0; 5270 } 5271 5272 trans->block_rsv = rsv; 5273 5274 ret = btrfs_truncate_inode_items(trans, root, inode, 0, 0); 5275 if (ret != -ENOSPC && ret != -EAGAIN) 5276 break; 5277 5278 trans->block_rsv = &root->fs_info->trans_block_rsv; 5279 btrfs_end_transaction(trans, root); 5280 trans = NULL; 5281 btrfs_btree_balance_dirty(root); 5282 } 5283 5284 btrfs_free_block_rsv(root, rsv); 5285 5286 /* 5287 * Errors here aren't a big deal, it just means we leave orphan items 5288 * in the tree. They will be cleaned up on the next mount. 5289 */ 5290 if (ret == 0) { 5291 trans->block_rsv = root->orphan_block_rsv; 5292 btrfs_orphan_del(trans, inode); 5293 } else { 5294 btrfs_orphan_del(NULL, inode); 5295 } 5296 5297 trans->block_rsv = &root->fs_info->trans_block_rsv; 5298 if (!(root == root->fs_info->tree_root || 5299 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)) 5300 btrfs_return_ino(root, btrfs_ino(inode)); 5301 5302 btrfs_end_transaction(trans, root); 5303 btrfs_btree_balance_dirty(root); 5304 no_delete: 5305 btrfs_remove_delayed_node(inode); 5306 clear_inode(inode); 5307 return; 5308 } 5309 5310 /* 5311 * this returns the key found in the dir entry in the location pointer. 5312 * If no dir entries were found, location->objectid is 0. 5313 */ 5314 static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry, 5315 struct btrfs_key *location) 5316 { 5317 const char *name = dentry->d_name.name; 5318 int namelen = dentry->d_name.len; 5319 struct btrfs_dir_item *di; 5320 struct btrfs_path *path; 5321 struct btrfs_root *root = BTRFS_I(dir)->root; 5322 int ret = 0; 5323 5324 path = btrfs_alloc_path(); 5325 if (!path) 5326 return -ENOMEM; 5327 5328 di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(dir), name, 5329 namelen, 0); 5330 if (IS_ERR(di)) 5331 ret = PTR_ERR(di); 5332 5333 if (IS_ERR_OR_NULL(di)) 5334 goto out_err; 5335 5336 btrfs_dir_item_key_to_cpu(path->nodes[0], di, location); 5337 out: 5338 btrfs_free_path(path); 5339 return ret; 5340 out_err: 5341 location->objectid = 0; 5342 goto out; 5343 } 5344 5345 /* 5346 * when we hit a tree root in a directory, the btrfs part of the inode 5347 * needs to be changed to reflect the root directory of the tree root. This 5348 * is kind of like crossing a mount point. 5349 */ 5350 static int fixup_tree_root_location(struct btrfs_root *root, 5351 struct inode *dir, 5352 struct dentry *dentry, 5353 struct btrfs_key *location, 5354 struct btrfs_root **sub_root) 5355 { 5356 struct btrfs_path *path; 5357 struct btrfs_root *new_root; 5358 struct btrfs_root_ref *ref; 5359 struct extent_buffer *leaf; 5360 struct btrfs_key key; 5361 int ret; 5362 int err = 0; 5363 5364 path = btrfs_alloc_path(); 5365 if (!path) { 5366 err = -ENOMEM; 5367 goto out; 5368 } 5369 5370 err = -ENOENT; 5371 key.objectid = BTRFS_I(dir)->root->root_key.objectid; 5372 key.type = BTRFS_ROOT_REF_KEY; 5373 key.offset = location->objectid; 5374 5375 ret = btrfs_search_slot(NULL, root->fs_info->tree_root, &key, path, 5376 0, 0); 5377 if (ret) { 5378 if (ret < 0) 5379 err = ret; 5380 goto out; 5381 } 5382 5383 leaf = path->nodes[0]; 5384 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref); 5385 if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(dir) || 5386 btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len) 5387 goto out; 5388 5389 ret = memcmp_extent_buffer(leaf, dentry->d_name.name, 5390 (unsigned long)(ref + 1), 5391 dentry->d_name.len); 5392 if (ret) 5393 goto out; 5394 5395 btrfs_release_path(path); 5396 5397 new_root = btrfs_read_fs_root_no_name(root->fs_info, location); 5398 if (IS_ERR(new_root)) { 5399 err = PTR_ERR(new_root); 5400 goto out; 5401 } 5402 5403 *sub_root = new_root; 5404 location->objectid = btrfs_root_dirid(&new_root->root_item); 5405 location->type = BTRFS_INODE_ITEM_KEY; 5406 location->offset = 0; 5407 err = 0; 5408 out: 5409 btrfs_free_path(path); 5410 return err; 5411 } 5412 5413 static void inode_tree_add(struct inode *inode) 5414 { 5415 struct btrfs_root *root = BTRFS_I(inode)->root; 5416 struct btrfs_inode *entry; 5417 struct rb_node **p; 5418 struct rb_node *parent; 5419 struct rb_node *new = &BTRFS_I(inode)->rb_node; 5420 u64 ino = btrfs_ino(inode); 5421 5422 if (inode_unhashed(inode)) 5423 return; 5424 parent = NULL; 5425 spin_lock(&root->inode_lock); 5426 p = &root->inode_tree.rb_node; 5427 while (*p) { 5428 parent = *p; 5429 entry = rb_entry(parent, struct btrfs_inode, rb_node); 5430 5431 if (ino < btrfs_ino(&entry->vfs_inode)) 5432 p = &parent->rb_left; 5433 else if (ino > btrfs_ino(&entry->vfs_inode)) 5434 p = &parent->rb_right; 5435 else { 5436 WARN_ON(!(entry->vfs_inode.i_state & 5437 (I_WILL_FREE | I_FREEING))); 5438 rb_replace_node(parent, new, &root->inode_tree); 5439 RB_CLEAR_NODE(parent); 5440 spin_unlock(&root->inode_lock); 5441 return; 5442 } 5443 } 5444 rb_link_node(new, parent, p); 5445 rb_insert_color(new, &root->inode_tree); 5446 spin_unlock(&root->inode_lock); 5447 } 5448 5449 static void inode_tree_del(struct inode *inode) 5450 { 5451 struct btrfs_root *root = BTRFS_I(inode)->root; 5452 int empty = 0; 5453 5454 spin_lock(&root->inode_lock); 5455 if (!RB_EMPTY_NODE(&BTRFS_I(inode)->rb_node)) { 5456 rb_erase(&BTRFS_I(inode)->rb_node, &root->inode_tree); 5457 RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node); 5458 empty = RB_EMPTY_ROOT(&root->inode_tree); 5459 } 5460 spin_unlock(&root->inode_lock); 5461 5462 if (empty && btrfs_root_refs(&root->root_item) == 0) { 5463 synchronize_srcu(&root->fs_info->subvol_srcu); 5464 spin_lock(&root->inode_lock); 5465 empty = RB_EMPTY_ROOT(&root->inode_tree); 5466 spin_unlock(&root->inode_lock); 5467 if (empty) 5468 btrfs_add_dead_root(root); 5469 } 5470 } 5471 5472 void btrfs_invalidate_inodes(struct btrfs_root *root) 5473 { 5474 struct rb_node *node; 5475 struct rb_node *prev; 5476 struct btrfs_inode *entry; 5477 struct inode *inode; 5478 u64 objectid = 0; 5479 5480 if (!test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) 5481 WARN_ON(btrfs_root_refs(&root->root_item) != 0); 5482 5483 spin_lock(&root->inode_lock); 5484 again: 5485 node = root->inode_tree.rb_node; 5486 prev = NULL; 5487 while (node) { 5488 prev = node; 5489 entry = rb_entry(node, struct btrfs_inode, rb_node); 5490 5491 if (objectid < btrfs_ino(&entry->vfs_inode)) 5492 node = node->rb_left; 5493 else if (objectid > btrfs_ino(&entry->vfs_inode)) 5494 node = node->rb_right; 5495 else 5496 break; 5497 } 5498 if (!node) { 5499 while (prev) { 5500 entry = rb_entry(prev, struct btrfs_inode, rb_node); 5501 if (objectid <= btrfs_ino(&entry->vfs_inode)) { 5502 node = prev; 5503 break; 5504 } 5505 prev = rb_next(prev); 5506 } 5507 } 5508 while (node) { 5509 entry = rb_entry(node, struct btrfs_inode, rb_node); 5510 objectid = btrfs_ino(&entry->vfs_inode) + 1; 5511 inode = igrab(&entry->vfs_inode); 5512 if (inode) { 5513 spin_unlock(&root->inode_lock); 5514 if (atomic_read(&inode->i_count) > 1) 5515 d_prune_aliases(inode); 5516 /* 5517 * btrfs_drop_inode will have it removed from 5518 * the inode cache when its usage count 5519 * hits zero. 5520 */ 5521 iput(inode); 5522 cond_resched(); 5523 spin_lock(&root->inode_lock); 5524 goto again; 5525 } 5526 5527 if (cond_resched_lock(&root->inode_lock)) 5528 goto again; 5529 5530 node = rb_next(node); 5531 } 5532 spin_unlock(&root->inode_lock); 5533 } 5534 5535 static int btrfs_init_locked_inode(struct inode *inode, void *p) 5536 { 5537 struct btrfs_iget_args *args = p; 5538 inode->i_ino = args->location->objectid; 5539 memcpy(&BTRFS_I(inode)->location, args->location, 5540 sizeof(*args->location)); 5541 BTRFS_I(inode)->root = args->root; 5542 return 0; 5543 } 5544 5545 static int btrfs_find_actor(struct inode *inode, void *opaque) 5546 { 5547 struct btrfs_iget_args *args = opaque; 5548 return args->location->objectid == BTRFS_I(inode)->location.objectid && 5549 args->root == BTRFS_I(inode)->root; 5550 } 5551 5552 static struct inode *btrfs_iget_locked(struct super_block *s, 5553 struct btrfs_key *location, 5554 struct btrfs_root *root) 5555 { 5556 struct inode *inode; 5557 struct btrfs_iget_args args; 5558 unsigned long hashval = btrfs_inode_hash(location->objectid, root); 5559 5560 args.location = location; 5561 args.root = root; 5562 5563 inode = iget5_locked(s, hashval, btrfs_find_actor, 5564 btrfs_init_locked_inode, 5565 (void *)&args); 5566 return inode; 5567 } 5568 5569 /* Get an inode object given its location and corresponding root. 5570 * Returns in *is_new if the inode was read from disk 5571 */ 5572 struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location, 5573 struct btrfs_root *root, int *new) 5574 { 5575 struct inode *inode; 5576 5577 inode = btrfs_iget_locked(s, location, root); 5578 if (!inode) 5579 return ERR_PTR(-ENOMEM); 5580 5581 if (inode->i_state & I_NEW) { 5582 btrfs_read_locked_inode(inode); 5583 if (!is_bad_inode(inode)) { 5584 inode_tree_add(inode); 5585 unlock_new_inode(inode); 5586 if (new) 5587 *new = 1; 5588 } else { 5589 unlock_new_inode(inode); 5590 iput(inode); 5591 inode = ERR_PTR(-ESTALE); 5592 } 5593 } 5594 5595 return inode; 5596 } 5597 5598 static struct inode *new_simple_dir(struct super_block *s, 5599 struct btrfs_key *key, 5600 struct btrfs_root *root) 5601 { 5602 struct inode *inode = new_inode(s); 5603 5604 if (!inode) 5605 return ERR_PTR(-ENOMEM); 5606 5607 BTRFS_I(inode)->root = root; 5608 memcpy(&BTRFS_I(inode)->location, key, sizeof(*key)); 5609 set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags); 5610 5611 inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID; 5612 inode->i_op = &btrfs_dir_ro_inode_operations; 5613 inode->i_fop = &simple_dir_operations; 5614 inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO; 5615 inode->i_mtime = CURRENT_TIME; 5616 inode->i_atime = inode->i_mtime; 5617 inode->i_ctime = inode->i_mtime; 5618 BTRFS_I(inode)->i_otime = inode->i_mtime; 5619 5620 return inode; 5621 } 5622 5623 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry) 5624 { 5625 struct inode *inode; 5626 struct btrfs_root *root = BTRFS_I(dir)->root; 5627 struct btrfs_root *sub_root = root; 5628 struct btrfs_key location; 5629 int index; 5630 int ret = 0; 5631 5632 if (dentry->d_name.len > BTRFS_NAME_LEN) 5633 return ERR_PTR(-ENAMETOOLONG); 5634 5635 ret = btrfs_inode_by_name(dir, dentry, &location); 5636 if (ret < 0) 5637 return ERR_PTR(ret); 5638 5639 if (location.objectid == 0) 5640 return ERR_PTR(-ENOENT); 5641 5642 if (location.type == BTRFS_INODE_ITEM_KEY) { 5643 inode = btrfs_iget(dir->i_sb, &location, root, NULL); 5644 return inode; 5645 } 5646 5647 BUG_ON(location.type != BTRFS_ROOT_ITEM_KEY); 5648 5649 index = srcu_read_lock(&root->fs_info->subvol_srcu); 5650 ret = fixup_tree_root_location(root, dir, dentry, 5651 &location, &sub_root); 5652 if (ret < 0) { 5653 if (ret != -ENOENT) 5654 inode = ERR_PTR(ret); 5655 else 5656 inode = new_simple_dir(dir->i_sb, &location, sub_root); 5657 } else { 5658 inode = btrfs_iget(dir->i_sb, &location, sub_root, NULL); 5659 } 5660 srcu_read_unlock(&root->fs_info->subvol_srcu, index); 5661 5662 if (!IS_ERR(inode) && root != sub_root) { 5663 down_read(&root->fs_info->cleanup_work_sem); 5664 if (!(inode->i_sb->s_flags & MS_RDONLY)) 5665 ret = btrfs_orphan_cleanup(sub_root); 5666 up_read(&root->fs_info->cleanup_work_sem); 5667 if (ret) { 5668 iput(inode); 5669 inode = ERR_PTR(ret); 5670 } 5671 } 5672 5673 return inode; 5674 } 5675 5676 static int btrfs_dentry_delete(const struct dentry *dentry) 5677 { 5678 struct btrfs_root *root; 5679 struct inode *inode = d_inode(dentry); 5680 5681 if (!inode && !IS_ROOT(dentry)) 5682 inode = d_inode(dentry->d_parent); 5683 5684 if (inode) { 5685 root = BTRFS_I(inode)->root; 5686 if (btrfs_root_refs(&root->root_item) == 0) 5687 return 1; 5688 5689 if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) 5690 return 1; 5691 } 5692 return 0; 5693 } 5694 5695 static void btrfs_dentry_release(struct dentry *dentry) 5696 { 5697 kfree(dentry->d_fsdata); 5698 } 5699 5700 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry, 5701 unsigned int flags) 5702 { 5703 struct inode *inode; 5704 5705 inode = btrfs_lookup_dentry(dir, dentry); 5706 if (IS_ERR(inode)) { 5707 if (PTR_ERR(inode) == -ENOENT) 5708 inode = NULL; 5709 else 5710 return ERR_CAST(inode); 5711 } 5712 5713 return d_splice_alias(inode, dentry); 5714 } 5715 5716 unsigned char btrfs_filetype_table[] = { 5717 DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK 5718 }; 5719 5720 static int btrfs_real_readdir(struct file *file, struct dir_context *ctx) 5721 { 5722 struct inode *inode = file_inode(file); 5723 struct btrfs_root *root = BTRFS_I(inode)->root; 5724 struct btrfs_item *item; 5725 struct btrfs_dir_item *di; 5726 struct btrfs_key key; 5727 struct btrfs_key found_key; 5728 struct btrfs_path *path; 5729 struct list_head ins_list; 5730 struct list_head del_list; 5731 int ret; 5732 struct extent_buffer *leaf; 5733 int slot; 5734 unsigned char d_type; 5735 int over = 0; 5736 u32 di_cur; 5737 u32 di_total; 5738 u32 di_len; 5739 int key_type = BTRFS_DIR_INDEX_KEY; 5740 char tmp_name[32]; 5741 char *name_ptr; 5742 int name_len; 5743 int is_curr = 0; /* ctx->pos points to the current index? */ 5744 5745 /* FIXME, use a real flag for deciding about the key type */ 5746 if (root->fs_info->tree_root == root) 5747 key_type = BTRFS_DIR_ITEM_KEY; 5748 5749 if (!dir_emit_dots(file, ctx)) 5750 return 0; 5751 5752 path = btrfs_alloc_path(); 5753 if (!path) 5754 return -ENOMEM; 5755 5756 path->reada = 1; 5757 5758 if (key_type == BTRFS_DIR_INDEX_KEY) { 5759 INIT_LIST_HEAD(&ins_list); 5760 INIT_LIST_HEAD(&del_list); 5761 btrfs_get_delayed_items(inode, &ins_list, &del_list); 5762 } 5763 5764 key.type = key_type; 5765 key.offset = ctx->pos; 5766 key.objectid = btrfs_ino(inode); 5767 5768 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 5769 if (ret < 0) 5770 goto err; 5771 5772 while (1) { 5773 leaf = path->nodes[0]; 5774 slot = path->slots[0]; 5775 if (slot >= btrfs_header_nritems(leaf)) { 5776 ret = btrfs_next_leaf(root, path); 5777 if (ret < 0) 5778 goto err; 5779 else if (ret > 0) 5780 break; 5781 continue; 5782 } 5783 5784 item = btrfs_item_nr(slot); 5785 btrfs_item_key_to_cpu(leaf, &found_key, slot); 5786 5787 if (found_key.objectid != key.objectid) 5788 break; 5789 if (found_key.type != key_type) 5790 break; 5791 if (found_key.offset < ctx->pos) 5792 goto next; 5793 if (key_type == BTRFS_DIR_INDEX_KEY && 5794 btrfs_should_delete_dir_index(&del_list, 5795 found_key.offset)) 5796 goto next; 5797 5798 ctx->pos = found_key.offset; 5799 is_curr = 1; 5800 5801 di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item); 5802 di_cur = 0; 5803 di_total = btrfs_item_size(leaf, item); 5804 5805 while (di_cur < di_total) { 5806 struct btrfs_key location; 5807 5808 if (verify_dir_item(root, leaf, di)) 5809 break; 5810 5811 name_len = btrfs_dir_name_len(leaf, di); 5812 if (name_len <= sizeof(tmp_name)) { 5813 name_ptr = tmp_name; 5814 } else { 5815 name_ptr = kmalloc(name_len, GFP_NOFS); 5816 if (!name_ptr) { 5817 ret = -ENOMEM; 5818 goto err; 5819 } 5820 } 5821 read_extent_buffer(leaf, name_ptr, 5822 (unsigned long)(di + 1), name_len); 5823 5824 d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)]; 5825 btrfs_dir_item_key_to_cpu(leaf, di, &location); 5826 5827 5828 /* is this a reference to our own snapshot? If so 5829 * skip it. 5830 * 5831 * In contrast to old kernels, we insert the snapshot's 5832 * dir item and dir index after it has been created, so 5833 * we won't find a reference to our own snapshot. We 5834 * still keep the following code for backward 5835 * compatibility. 5836 */ 5837 if (location.type == BTRFS_ROOT_ITEM_KEY && 5838 location.objectid == root->root_key.objectid) { 5839 over = 0; 5840 goto skip; 5841 } 5842 over = !dir_emit(ctx, name_ptr, name_len, 5843 location.objectid, d_type); 5844 5845 skip: 5846 if (name_ptr != tmp_name) 5847 kfree(name_ptr); 5848 5849 if (over) 5850 goto nopos; 5851 di_len = btrfs_dir_name_len(leaf, di) + 5852 btrfs_dir_data_len(leaf, di) + sizeof(*di); 5853 di_cur += di_len; 5854 di = (struct btrfs_dir_item *)((char *)di + di_len); 5855 } 5856 next: 5857 path->slots[0]++; 5858 } 5859 5860 if (key_type == BTRFS_DIR_INDEX_KEY) { 5861 if (is_curr) 5862 ctx->pos++; 5863 ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list); 5864 if (ret) 5865 goto nopos; 5866 } 5867 5868 /* Reached end of directory/root. Bump pos past the last item. */ 5869 ctx->pos++; 5870 5871 /* 5872 * Stop new entries from being returned after we return the last 5873 * entry. 5874 * 5875 * New directory entries are assigned a strictly increasing 5876 * offset. This means that new entries created during readdir 5877 * are *guaranteed* to be seen in the future by that readdir. 5878 * This has broken buggy programs which operate on names as 5879 * they're returned by readdir. Until we re-use freed offsets 5880 * we have this hack to stop new entries from being returned 5881 * under the assumption that they'll never reach this huge 5882 * offset. 5883 * 5884 * This is being careful not to overflow 32bit loff_t unless the 5885 * last entry requires it because doing so has broken 32bit apps 5886 * in the past. 5887 */ 5888 if (key_type == BTRFS_DIR_INDEX_KEY) { 5889 if (ctx->pos >= INT_MAX) 5890 ctx->pos = LLONG_MAX; 5891 else 5892 ctx->pos = INT_MAX; 5893 } 5894 nopos: 5895 ret = 0; 5896 err: 5897 if (key_type == BTRFS_DIR_INDEX_KEY) 5898 btrfs_put_delayed_items(&ins_list, &del_list); 5899 btrfs_free_path(path); 5900 return ret; 5901 } 5902 5903 int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc) 5904 { 5905 struct btrfs_root *root = BTRFS_I(inode)->root; 5906 struct btrfs_trans_handle *trans; 5907 int ret = 0; 5908 bool nolock = false; 5909 5910 if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags)) 5911 return 0; 5912 5913 if (btrfs_fs_closing(root->fs_info) && btrfs_is_free_space_inode(inode)) 5914 nolock = true; 5915 5916 if (wbc->sync_mode == WB_SYNC_ALL) { 5917 if (nolock) 5918 trans = btrfs_join_transaction_nolock(root); 5919 else 5920 trans = btrfs_join_transaction(root); 5921 if (IS_ERR(trans)) 5922 return PTR_ERR(trans); 5923 ret = btrfs_commit_transaction(trans, root); 5924 } 5925 return ret; 5926 } 5927 5928 /* 5929 * This is somewhat expensive, updating the tree every time the 5930 * inode changes. But, it is most likely to find the inode in cache. 5931 * FIXME, needs more benchmarking...there are no reasons other than performance 5932 * to keep or drop this code. 5933 */ 5934 static int btrfs_dirty_inode(struct inode *inode) 5935 { 5936 struct btrfs_root *root = BTRFS_I(inode)->root; 5937 struct btrfs_trans_handle *trans; 5938 int ret; 5939 5940 if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags)) 5941 return 0; 5942 5943 trans = btrfs_join_transaction(root); 5944 if (IS_ERR(trans)) 5945 return PTR_ERR(trans); 5946 5947 ret = btrfs_update_inode(trans, root, inode); 5948 if (ret && ret == -ENOSPC) { 5949 /* whoops, lets try again with the full transaction */ 5950 btrfs_end_transaction(trans, root); 5951 trans = btrfs_start_transaction(root, 1); 5952 if (IS_ERR(trans)) 5953 return PTR_ERR(trans); 5954 5955 ret = btrfs_update_inode(trans, root, inode); 5956 } 5957 btrfs_end_transaction(trans, root); 5958 if (BTRFS_I(inode)->delayed_node) 5959 btrfs_balance_delayed_items(root); 5960 5961 return ret; 5962 } 5963 5964 /* 5965 * This is a copy of file_update_time. We need this so we can return error on 5966 * ENOSPC for updating the inode in the case of file write and mmap writes. 5967 */ 5968 static int btrfs_update_time(struct inode *inode, struct timespec *now, 5969 int flags) 5970 { 5971 struct btrfs_root *root = BTRFS_I(inode)->root; 5972 5973 if (btrfs_root_readonly(root)) 5974 return -EROFS; 5975 5976 if (flags & S_VERSION) 5977 inode_inc_iversion(inode); 5978 if (flags & S_CTIME) 5979 inode->i_ctime = *now; 5980 if (flags & S_MTIME) 5981 inode->i_mtime = *now; 5982 if (flags & S_ATIME) 5983 inode->i_atime = *now; 5984 return btrfs_dirty_inode(inode); 5985 } 5986 5987 /* 5988 * find the highest existing sequence number in a directory 5989 * and then set the in-memory index_cnt variable to reflect 5990 * free sequence numbers 5991 */ 5992 static int btrfs_set_inode_index_count(struct inode *inode) 5993 { 5994 struct btrfs_root *root = BTRFS_I(inode)->root; 5995 struct btrfs_key key, found_key; 5996 struct btrfs_path *path; 5997 struct extent_buffer *leaf; 5998 int ret; 5999 6000 key.objectid = btrfs_ino(inode); 6001 key.type = BTRFS_DIR_INDEX_KEY; 6002 key.offset = (u64)-1; 6003 6004 path = btrfs_alloc_path(); 6005 if (!path) 6006 return -ENOMEM; 6007 6008 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 6009 if (ret < 0) 6010 goto out; 6011 /* FIXME: we should be able to handle this */ 6012 if (ret == 0) 6013 goto out; 6014 ret = 0; 6015 6016 /* 6017 * MAGIC NUMBER EXPLANATION: 6018 * since we search a directory based on f_pos we have to start at 2 6019 * since '.' and '..' have f_pos of 0 and 1 respectively, so everybody 6020 * else has to start at 2 6021 */ 6022 if (path->slots[0] == 0) { 6023 BTRFS_I(inode)->index_cnt = 2; 6024 goto out; 6025 } 6026 6027 path->slots[0]--; 6028 6029 leaf = path->nodes[0]; 6030 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 6031 6032 if (found_key.objectid != btrfs_ino(inode) || 6033 found_key.type != BTRFS_DIR_INDEX_KEY) { 6034 BTRFS_I(inode)->index_cnt = 2; 6035 goto out; 6036 } 6037 6038 BTRFS_I(inode)->index_cnt = found_key.offset + 1; 6039 out: 6040 btrfs_free_path(path); 6041 return ret; 6042 } 6043 6044 /* 6045 * helper to find a free sequence number in a given directory. This current 6046 * code is very simple, later versions will do smarter things in the btree 6047 */ 6048 int btrfs_set_inode_index(struct inode *dir, u64 *index) 6049 { 6050 int ret = 0; 6051 6052 if (BTRFS_I(dir)->index_cnt == (u64)-1) { 6053 ret = btrfs_inode_delayed_dir_index_count(dir); 6054 if (ret) { 6055 ret = btrfs_set_inode_index_count(dir); 6056 if (ret) 6057 return ret; 6058 } 6059 } 6060 6061 *index = BTRFS_I(dir)->index_cnt; 6062 BTRFS_I(dir)->index_cnt++; 6063 6064 return ret; 6065 } 6066 6067 static int btrfs_insert_inode_locked(struct inode *inode) 6068 { 6069 struct btrfs_iget_args args; 6070 args.location = &BTRFS_I(inode)->location; 6071 args.root = BTRFS_I(inode)->root; 6072 6073 return insert_inode_locked4(inode, 6074 btrfs_inode_hash(inode->i_ino, BTRFS_I(inode)->root), 6075 btrfs_find_actor, &args); 6076 } 6077 6078 static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, 6079 struct btrfs_root *root, 6080 struct inode *dir, 6081 const char *name, int name_len, 6082 u64 ref_objectid, u64 objectid, 6083 umode_t mode, u64 *index) 6084 { 6085 struct inode *inode; 6086 struct btrfs_inode_item *inode_item; 6087 struct btrfs_key *location; 6088 struct btrfs_path *path; 6089 struct btrfs_inode_ref *ref; 6090 struct btrfs_key key[2]; 6091 u32 sizes[2]; 6092 int nitems = name ? 2 : 1; 6093 unsigned long ptr; 6094 int ret; 6095 6096 path = btrfs_alloc_path(); 6097 if (!path) 6098 return ERR_PTR(-ENOMEM); 6099 6100 inode = new_inode(root->fs_info->sb); 6101 if (!inode) { 6102 btrfs_free_path(path); 6103 return ERR_PTR(-ENOMEM); 6104 } 6105 6106 /* 6107 * O_TMPFILE, set link count to 0, so that after this point, 6108 * we fill in an inode item with the correct link count. 6109 */ 6110 if (!name) 6111 set_nlink(inode, 0); 6112 6113 /* 6114 * we have to initialize this early, so we can reclaim the inode 6115 * number if we fail afterwards in this function. 6116 */ 6117 inode->i_ino = objectid; 6118 6119 if (dir && name) { 6120 trace_btrfs_inode_request(dir); 6121 6122 ret = btrfs_set_inode_index(dir, index); 6123 if (ret) { 6124 btrfs_free_path(path); 6125 iput(inode); 6126 return ERR_PTR(ret); 6127 } 6128 } else if (dir) { 6129 *index = 0; 6130 } 6131 /* 6132 * index_cnt is ignored for everything but a dir, 6133 * btrfs_get_inode_index_count has an explanation for the magic 6134 * number 6135 */ 6136 BTRFS_I(inode)->index_cnt = 2; 6137 BTRFS_I(inode)->dir_index = *index; 6138 BTRFS_I(inode)->root = root; 6139 BTRFS_I(inode)->generation = trans->transid; 6140 inode->i_generation = BTRFS_I(inode)->generation; 6141 6142 /* 6143 * We could have gotten an inode number from somebody who was fsynced 6144 * and then removed in this same transaction, so let's just set full 6145 * sync since it will be a full sync anyway and this will blow away the 6146 * old info in the log. 6147 */ 6148 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags); 6149 6150 key[0].objectid = objectid; 6151 key[0].type = BTRFS_INODE_ITEM_KEY; 6152 key[0].offset = 0; 6153 6154 sizes[0] = sizeof(struct btrfs_inode_item); 6155 6156 if (name) { 6157 /* 6158 * Start new inodes with an inode_ref. This is slightly more 6159 * efficient for small numbers of hard links since they will 6160 * be packed into one item. Extended refs will kick in if we 6161 * add more hard links than can fit in the ref item. 6162 */ 6163 key[1].objectid = objectid; 6164 key[1].type = BTRFS_INODE_REF_KEY; 6165 key[1].offset = ref_objectid; 6166 6167 sizes[1] = name_len + sizeof(*ref); 6168 } 6169 6170 location = &BTRFS_I(inode)->location; 6171 location->objectid = objectid; 6172 location->offset = 0; 6173 location->type = BTRFS_INODE_ITEM_KEY; 6174 6175 ret = btrfs_insert_inode_locked(inode); 6176 if (ret < 0) 6177 goto fail; 6178 6179 path->leave_spinning = 1; 6180 ret = btrfs_insert_empty_items(trans, root, path, key, sizes, nitems); 6181 if (ret != 0) 6182 goto fail_unlock; 6183 6184 inode_init_owner(inode, dir, mode); 6185 inode_set_bytes(inode, 0); 6186 6187 inode->i_mtime = CURRENT_TIME; 6188 inode->i_atime = inode->i_mtime; 6189 inode->i_ctime = inode->i_mtime; 6190 BTRFS_I(inode)->i_otime = inode->i_mtime; 6191 6192 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0], 6193 struct btrfs_inode_item); 6194 memset_extent_buffer(path->nodes[0], 0, (unsigned long)inode_item, 6195 sizeof(*inode_item)); 6196 fill_inode_item(trans, path->nodes[0], inode_item, inode); 6197 6198 if (name) { 6199 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1, 6200 struct btrfs_inode_ref); 6201 btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len); 6202 btrfs_set_inode_ref_index(path->nodes[0], ref, *index); 6203 ptr = (unsigned long)(ref + 1); 6204 write_extent_buffer(path->nodes[0], name, ptr, name_len); 6205 } 6206 6207 btrfs_mark_buffer_dirty(path->nodes[0]); 6208 btrfs_free_path(path); 6209 6210 btrfs_inherit_iflags(inode, dir); 6211 6212 if (S_ISREG(mode)) { 6213 if (btrfs_test_opt(root, NODATASUM)) 6214 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM; 6215 if (btrfs_test_opt(root, NODATACOW)) 6216 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW | 6217 BTRFS_INODE_NODATASUM; 6218 } 6219 6220 inode_tree_add(inode); 6221 6222 trace_btrfs_inode_new(inode); 6223 btrfs_set_inode_last_trans(trans, inode); 6224 6225 btrfs_update_root_times(trans, root); 6226 6227 ret = btrfs_inode_inherit_props(trans, inode, dir); 6228 if (ret) 6229 btrfs_err(root->fs_info, 6230 "error inheriting props for ino %llu (root %llu): %d", 6231 btrfs_ino(inode), root->root_key.objectid, ret); 6232 6233 return inode; 6234 6235 fail_unlock: 6236 unlock_new_inode(inode); 6237 fail: 6238 if (dir && name) 6239 BTRFS_I(dir)->index_cnt--; 6240 btrfs_free_path(path); 6241 iput(inode); 6242 return ERR_PTR(ret); 6243 } 6244 6245 static inline u8 btrfs_inode_type(struct inode *inode) 6246 { 6247 return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT]; 6248 } 6249 6250 /* 6251 * utility function to add 'inode' into 'parent_inode' with 6252 * a give name and a given sequence number. 6253 * if 'add_backref' is true, also insert a backref from the 6254 * inode to the parent directory. 6255 */ 6256 int btrfs_add_link(struct btrfs_trans_handle *trans, 6257 struct inode *parent_inode, struct inode *inode, 6258 const char *name, int name_len, int add_backref, u64 index) 6259 { 6260 int ret = 0; 6261 struct btrfs_key key; 6262 struct btrfs_root *root = BTRFS_I(parent_inode)->root; 6263 u64 ino = btrfs_ino(inode); 6264 u64 parent_ino = btrfs_ino(parent_inode); 6265 6266 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { 6267 memcpy(&key, &BTRFS_I(inode)->root->root_key, sizeof(key)); 6268 } else { 6269 key.objectid = ino; 6270 key.type = BTRFS_INODE_ITEM_KEY; 6271 key.offset = 0; 6272 } 6273 6274 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { 6275 ret = btrfs_add_root_ref(trans, root->fs_info->tree_root, 6276 key.objectid, root->root_key.objectid, 6277 parent_ino, index, name, name_len); 6278 } else if (add_backref) { 6279 ret = btrfs_insert_inode_ref(trans, root, name, name_len, ino, 6280 parent_ino, index); 6281 } 6282 6283 /* Nothing to clean up yet */ 6284 if (ret) 6285 return ret; 6286 6287 ret = btrfs_insert_dir_item(trans, root, name, name_len, 6288 parent_inode, &key, 6289 btrfs_inode_type(inode), index); 6290 if (ret == -EEXIST || ret == -EOVERFLOW) 6291 goto fail_dir_item; 6292 else if (ret) { 6293 btrfs_abort_transaction(trans, root, ret); 6294 return ret; 6295 } 6296 6297 btrfs_i_size_write(parent_inode, parent_inode->i_size + 6298 name_len * 2); 6299 inode_inc_iversion(parent_inode); 6300 parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME; 6301 ret = btrfs_update_inode(trans, root, parent_inode); 6302 if (ret) 6303 btrfs_abort_transaction(trans, root, ret); 6304 return ret; 6305 6306 fail_dir_item: 6307 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { 6308 u64 local_index; 6309 int err; 6310 err = btrfs_del_root_ref(trans, root->fs_info->tree_root, 6311 key.objectid, root->root_key.objectid, 6312 parent_ino, &local_index, name, name_len); 6313 6314 } else if (add_backref) { 6315 u64 local_index; 6316 int err; 6317 6318 err = btrfs_del_inode_ref(trans, root, name, name_len, 6319 ino, parent_ino, &local_index); 6320 } 6321 return ret; 6322 } 6323 6324 static int btrfs_add_nondir(struct btrfs_trans_handle *trans, 6325 struct inode *dir, struct dentry *dentry, 6326 struct inode *inode, int backref, u64 index) 6327 { 6328 int err = btrfs_add_link(trans, dir, inode, 6329 dentry->d_name.name, dentry->d_name.len, 6330 backref, index); 6331 if (err > 0) 6332 err = -EEXIST; 6333 return err; 6334 } 6335 6336 static int btrfs_mknod(struct inode *dir, struct dentry *dentry, 6337 umode_t mode, dev_t rdev) 6338 { 6339 struct btrfs_trans_handle *trans; 6340 struct btrfs_root *root = BTRFS_I(dir)->root; 6341 struct inode *inode = NULL; 6342 int err; 6343 int drop_inode = 0; 6344 u64 objectid; 6345 u64 index = 0; 6346 6347 /* 6348 * 2 for inode item and ref 6349 * 2 for dir items 6350 * 1 for xattr if selinux is on 6351 */ 6352 trans = btrfs_start_transaction(root, 5); 6353 if (IS_ERR(trans)) 6354 return PTR_ERR(trans); 6355 6356 err = btrfs_find_free_ino(root, &objectid); 6357 if (err) 6358 goto out_unlock; 6359 6360 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, 6361 dentry->d_name.len, btrfs_ino(dir), objectid, 6362 mode, &index); 6363 if (IS_ERR(inode)) { 6364 err = PTR_ERR(inode); 6365 goto out_unlock; 6366 } 6367 6368 /* 6369 * If the active LSM wants to access the inode during 6370 * d_instantiate it needs these. Smack checks to see 6371 * if the filesystem supports xattrs by looking at the 6372 * ops vector. 6373 */ 6374 inode->i_op = &btrfs_special_inode_operations; 6375 init_special_inode(inode, inode->i_mode, rdev); 6376 6377 err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); 6378 if (err) 6379 goto out_unlock_inode; 6380 6381 err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index); 6382 if (err) { 6383 goto out_unlock_inode; 6384 } else { 6385 btrfs_update_inode(trans, root, inode); 6386 unlock_new_inode(inode); 6387 d_instantiate(dentry, inode); 6388 } 6389 6390 out_unlock: 6391 btrfs_end_transaction(trans, root); 6392 btrfs_balance_delayed_items(root); 6393 btrfs_btree_balance_dirty(root); 6394 if (drop_inode) { 6395 inode_dec_link_count(inode); 6396 iput(inode); 6397 } 6398 return err; 6399 6400 out_unlock_inode: 6401 drop_inode = 1; 6402 unlock_new_inode(inode); 6403 goto out_unlock; 6404 6405 } 6406 6407 static int btrfs_create(struct inode *dir, struct dentry *dentry, 6408 umode_t mode, bool excl) 6409 { 6410 struct btrfs_trans_handle *trans; 6411 struct btrfs_root *root = BTRFS_I(dir)->root; 6412 struct inode *inode = NULL; 6413 int drop_inode_on_err = 0; 6414 int err; 6415 u64 objectid; 6416 u64 index = 0; 6417 6418 /* 6419 * 2 for inode item and ref 6420 * 2 for dir items 6421 * 1 for xattr if selinux is on 6422 */ 6423 trans = btrfs_start_transaction(root, 5); 6424 if (IS_ERR(trans)) 6425 return PTR_ERR(trans); 6426 6427 err = btrfs_find_free_ino(root, &objectid); 6428 if (err) 6429 goto out_unlock; 6430 6431 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, 6432 dentry->d_name.len, btrfs_ino(dir), objectid, 6433 mode, &index); 6434 if (IS_ERR(inode)) { 6435 err = PTR_ERR(inode); 6436 goto out_unlock; 6437 } 6438 drop_inode_on_err = 1; 6439 /* 6440 * If the active LSM wants to access the inode during 6441 * d_instantiate it needs these. Smack checks to see 6442 * if the filesystem supports xattrs by looking at the 6443 * ops vector. 6444 */ 6445 inode->i_fop = &btrfs_file_operations; 6446 inode->i_op = &btrfs_file_inode_operations; 6447 inode->i_mapping->a_ops = &btrfs_aops; 6448 6449 err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); 6450 if (err) 6451 goto out_unlock_inode; 6452 6453 err = btrfs_update_inode(trans, root, inode); 6454 if (err) 6455 goto out_unlock_inode; 6456 6457 err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index); 6458 if (err) 6459 goto out_unlock_inode; 6460 6461 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; 6462 unlock_new_inode(inode); 6463 d_instantiate(dentry, inode); 6464 6465 out_unlock: 6466 btrfs_end_transaction(trans, root); 6467 if (err && drop_inode_on_err) { 6468 inode_dec_link_count(inode); 6469 iput(inode); 6470 } 6471 btrfs_balance_delayed_items(root); 6472 btrfs_btree_balance_dirty(root); 6473 return err; 6474 6475 out_unlock_inode: 6476 unlock_new_inode(inode); 6477 goto out_unlock; 6478 6479 } 6480 6481 static int btrfs_link(struct dentry *old_dentry, struct inode *dir, 6482 struct dentry *dentry) 6483 { 6484 struct btrfs_trans_handle *trans; 6485 struct btrfs_root *root = BTRFS_I(dir)->root; 6486 struct inode *inode = d_inode(old_dentry); 6487 u64 index; 6488 int err; 6489 int drop_inode = 0; 6490 6491 /* do not allow sys_link's with other subvols of the same device */ 6492 if (root->objectid != BTRFS_I(inode)->root->objectid) 6493 return -EXDEV; 6494 6495 if (inode->i_nlink >= BTRFS_LINK_MAX) 6496 return -EMLINK; 6497 6498 err = btrfs_set_inode_index(dir, &index); 6499 if (err) 6500 goto fail; 6501 6502 /* 6503 * 2 items for inode and inode ref 6504 * 2 items for dir items 6505 * 1 item for parent inode 6506 */ 6507 trans = btrfs_start_transaction(root, 5); 6508 if (IS_ERR(trans)) { 6509 err = PTR_ERR(trans); 6510 goto fail; 6511 } 6512 6513 /* There are several dir indexes for this inode, clear the cache. */ 6514 BTRFS_I(inode)->dir_index = 0ULL; 6515 inc_nlink(inode); 6516 inode_inc_iversion(inode); 6517 inode->i_ctime = CURRENT_TIME; 6518 ihold(inode); 6519 set_bit(BTRFS_INODE_COPY_EVERYTHING, &BTRFS_I(inode)->runtime_flags); 6520 6521 err = btrfs_add_nondir(trans, dir, dentry, inode, 1, index); 6522 6523 if (err) { 6524 drop_inode = 1; 6525 } else { 6526 struct dentry *parent = dentry->d_parent; 6527 err = btrfs_update_inode(trans, root, inode); 6528 if (err) 6529 goto fail; 6530 if (inode->i_nlink == 1) { 6531 /* 6532 * If new hard link count is 1, it's a file created 6533 * with open(2) O_TMPFILE flag. 6534 */ 6535 err = btrfs_orphan_del(trans, inode); 6536 if (err) 6537 goto fail; 6538 } 6539 d_instantiate(dentry, inode); 6540 btrfs_log_new_name(trans, inode, NULL, parent); 6541 } 6542 6543 btrfs_end_transaction(trans, root); 6544 btrfs_balance_delayed_items(root); 6545 fail: 6546 if (drop_inode) { 6547 inode_dec_link_count(inode); 6548 iput(inode); 6549 } 6550 btrfs_btree_balance_dirty(root); 6551 return err; 6552 } 6553 6554 static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) 6555 { 6556 struct inode *inode = NULL; 6557 struct btrfs_trans_handle *trans; 6558 struct btrfs_root *root = BTRFS_I(dir)->root; 6559 int err = 0; 6560 int drop_on_err = 0; 6561 u64 objectid = 0; 6562 u64 index = 0; 6563 6564 /* 6565 * 2 items for inode and ref 6566 * 2 items for dir items 6567 * 1 for xattr if selinux is on 6568 */ 6569 trans = btrfs_start_transaction(root, 5); 6570 if (IS_ERR(trans)) 6571 return PTR_ERR(trans); 6572 6573 err = btrfs_find_free_ino(root, &objectid); 6574 if (err) 6575 goto out_fail; 6576 6577 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, 6578 dentry->d_name.len, btrfs_ino(dir), objectid, 6579 S_IFDIR | mode, &index); 6580 if (IS_ERR(inode)) { 6581 err = PTR_ERR(inode); 6582 goto out_fail; 6583 } 6584 6585 drop_on_err = 1; 6586 /* these must be set before we unlock the inode */ 6587 inode->i_op = &btrfs_dir_inode_operations; 6588 inode->i_fop = &btrfs_dir_file_operations; 6589 6590 err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); 6591 if (err) 6592 goto out_fail_inode; 6593 6594 btrfs_i_size_write(inode, 0); 6595 err = btrfs_update_inode(trans, root, inode); 6596 if (err) 6597 goto out_fail_inode; 6598 6599 err = btrfs_add_link(trans, dir, inode, dentry->d_name.name, 6600 dentry->d_name.len, 0, index); 6601 if (err) 6602 goto out_fail_inode; 6603 6604 d_instantiate(dentry, inode); 6605 /* 6606 * mkdir is special. We're unlocking after we call d_instantiate 6607 * to avoid a race with nfsd calling d_instantiate. 6608 */ 6609 unlock_new_inode(inode); 6610 drop_on_err = 0; 6611 6612 out_fail: 6613 btrfs_end_transaction(trans, root); 6614 if (drop_on_err) { 6615 inode_dec_link_count(inode); 6616 iput(inode); 6617 } 6618 btrfs_balance_delayed_items(root); 6619 btrfs_btree_balance_dirty(root); 6620 return err; 6621 6622 out_fail_inode: 6623 unlock_new_inode(inode); 6624 goto out_fail; 6625 } 6626 6627 /* Find next extent map of a given extent map, caller needs to ensure locks */ 6628 static struct extent_map *next_extent_map(struct extent_map *em) 6629 { 6630 struct rb_node *next; 6631 6632 next = rb_next(&em->rb_node); 6633 if (!next) 6634 return NULL; 6635 return container_of(next, struct extent_map, rb_node); 6636 } 6637 6638 static struct extent_map *prev_extent_map(struct extent_map *em) 6639 { 6640 struct rb_node *prev; 6641 6642 prev = rb_prev(&em->rb_node); 6643 if (!prev) 6644 return NULL; 6645 return container_of(prev, struct extent_map, rb_node); 6646 } 6647 6648 /* helper for btfs_get_extent. Given an existing extent in the tree, 6649 * the existing extent is the nearest extent to map_start, 6650 * and an extent that you want to insert, deal with overlap and insert 6651 * the best fitted new extent into the tree. 6652 */ 6653 static int merge_extent_mapping(struct extent_map_tree *em_tree, 6654 struct extent_map *existing, 6655 struct extent_map *em, 6656 u64 map_start) 6657 { 6658 struct extent_map *prev; 6659 struct extent_map *next; 6660 u64 start; 6661 u64 end; 6662 u64 start_diff; 6663 6664 BUG_ON(map_start < em->start || map_start >= extent_map_end(em)); 6665 6666 if (existing->start > map_start) { 6667 next = existing; 6668 prev = prev_extent_map(next); 6669 } else { 6670 prev = existing; 6671 next = next_extent_map(prev); 6672 } 6673 6674 start = prev ? extent_map_end(prev) : em->start; 6675 start = max_t(u64, start, em->start); 6676 end = next ? next->start : extent_map_end(em); 6677 end = min_t(u64, end, extent_map_end(em)); 6678 start_diff = start - em->start; 6679 em->start = start; 6680 em->len = end - start; 6681 if (em->block_start < EXTENT_MAP_LAST_BYTE && 6682 !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) { 6683 em->block_start += start_diff; 6684 em->block_len -= start_diff; 6685 } 6686 return add_extent_mapping(em_tree, em, 0); 6687 } 6688 6689 static noinline int uncompress_inline(struct btrfs_path *path, 6690 struct inode *inode, struct page *page, 6691 size_t pg_offset, u64 extent_offset, 6692 struct btrfs_file_extent_item *item) 6693 { 6694 int ret; 6695 struct extent_buffer *leaf = path->nodes[0]; 6696 char *tmp; 6697 size_t max_size; 6698 unsigned long inline_size; 6699 unsigned long ptr; 6700 int compress_type; 6701 6702 WARN_ON(pg_offset != 0); 6703 compress_type = btrfs_file_extent_compression(leaf, item); 6704 max_size = btrfs_file_extent_ram_bytes(leaf, item); 6705 inline_size = btrfs_file_extent_inline_item_len(leaf, 6706 btrfs_item_nr(path->slots[0])); 6707 tmp = kmalloc(inline_size, GFP_NOFS); 6708 if (!tmp) 6709 return -ENOMEM; 6710 ptr = btrfs_file_extent_inline_start(item); 6711 6712 read_extent_buffer(leaf, tmp, ptr, inline_size); 6713 6714 max_size = min_t(unsigned long, PAGE_CACHE_SIZE, max_size); 6715 ret = btrfs_decompress(compress_type, tmp, page, 6716 extent_offset, inline_size, max_size); 6717 kfree(tmp); 6718 return ret; 6719 } 6720 6721 /* 6722 * a bit scary, this does extent mapping from logical file offset to the disk. 6723 * the ugly parts come from merging extents from the disk with the in-ram 6724 * representation. This gets more complex because of the data=ordered code, 6725 * where the in-ram extents might be locked pending data=ordered completion. 6726 * 6727 * This also copies inline extents directly into the page. 6728 */ 6729 6730 struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, 6731 size_t pg_offset, u64 start, u64 len, 6732 int create) 6733 { 6734 int ret; 6735 int err = 0; 6736 u64 extent_start = 0; 6737 u64 extent_end = 0; 6738 u64 objectid = btrfs_ino(inode); 6739 u32 found_type; 6740 struct btrfs_path *path = NULL; 6741 struct btrfs_root *root = BTRFS_I(inode)->root; 6742 struct btrfs_file_extent_item *item; 6743 struct extent_buffer *leaf; 6744 struct btrfs_key found_key; 6745 struct extent_map *em = NULL; 6746 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 6747 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 6748 struct btrfs_trans_handle *trans = NULL; 6749 const bool new_inline = !page || create; 6750 6751 again: 6752 read_lock(&em_tree->lock); 6753 em = lookup_extent_mapping(em_tree, start, len); 6754 if (em) 6755 em->bdev = root->fs_info->fs_devices->latest_bdev; 6756 read_unlock(&em_tree->lock); 6757 6758 if (em) { 6759 if (em->start > start || em->start + em->len <= start) 6760 free_extent_map(em); 6761 else if (em->block_start == EXTENT_MAP_INLINE && page) 6762 free_extent_map(em); 6763 else 6764 goto out; 6765 } 6766 em = alloc_extent_map(); 6767 if (!em) { 6768 err = -ENOMEM; 6769 goto out; 6770 } 6771 em->bdev = root->fs_info->fs_devices->latest_bdev; 6772 em->start = EXTENT_MAP_HOLE; 6773 em->orig_start = EXTENT_MAP_HOLE; 6774 em->len = (u64)-1; 6775 em->block_len = (u64)-1; 6776 6777 if (!path) { 6778 path = btrfs_alloc_path(); 6779 if (!path) { 6780 err = -ENOMEM; 6781 goto out; 6782 } 6783 /* 6784 * Chances are we'll be called again, so go ahead and do 6785 * readahead 6786 */ 6787 path->reada = 1; 6788 } 6789 6790 ret = btrfs_lookup_file_extent(trans, root, path, 6791 objectid, start, trans != NULL); 6792 if (ret < 0) { 6793 err = ret; 6794 goto out; 6795 } 6796 6797 if (ret != 0) { 6798 if (path->slots[0] == 0) 6799 goto not_found; 6800 path->slots[0]--; 6801 } 6802 6803 leaf = path->nodes[0]; 6804 item = btrfs_item_ptr(leaf, path->slots[0], 6805 struct btrfs_file_extent_item); 6806 /* are we inside the extent that was found? */ 6807 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 6808 found_type = found_key.type; 6809 if (found_key.objectid != objectid || 6810 found_type != BTRFS_EXTENT_DATA_KEY) { 6811 /* 6812 * If we backup past the first extent we want to move forward 6813 * and see if there is an extent in front of us, otherwise we'll 6814 * say there is a hole for our whole search range which can 6815 * cause problems. 6816 */ 6817 extent_end = start; 6818 goto next; 6819 } 6820 6821 found_type = btrfs_file_extent_type(leaf, item); 6822 extent_start = found_key.offset; 6823 if (found_type == BTRFS_FILE_EXTENT_REG || 6824 found_type == BTRFS_FILE_EXTENT_PREALLOC) { 6825 extent_end = extent_start + 6826 btrfs_file_extent_num_bytes(leaf, item); 6827 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) { 6828 size_t size; 6829 size = btrfs_file_extent_inline_len(leaf, path->slots[0], item); 6830 extent_end = ALIGN(extent_start + size, root->sectorsize); 6831 } 6832 next: 6833 if (start >= extent_end) { 6834 path->slots[0]++; 6835 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 6836 ret = btrfs_next_leaf(root, path); 6837 if (ret < 0) { 6838 err = ret; 6839 goto out; 6840 } 6841 if (ret > 0) 6842 goto not_found; 6843 leaf = path->nodes[0]; 6844 } 6845 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 6846 if (found_key.objectid != objectid || 6847 found_key.type != BTRFS_EXTENT_DATA_KEY) 6848 goto not_found; 6849 if (start + len <= found_key.offset) 6850 goto not_found; 6851 if (start > found_key.offset) 6852 goto next; 6853 em->start = start; 6854 em->orig_start = start; 6855 em->len = found_key.offset - start; 6856 goto not_found_em; 6857 } 6858 6859 btrfs_extent_item_to_extent_map(inode, path, item, new_inline, em); 6860 6861 if (found_type == BTRFS_FILE_EXTENT_REG || 6862 found_type == BTRFS_FILE_EXTENT_PREALLOC) { 6863 goto insert; 6864 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) { 6865 unsigned long ptr; 6866 char *map; 6867 size_t size; 6868 size_t extent_offset; 6869 size_t copy_size; 6870 6871 if (new_inline) 6872 goto out; 6873 6874 size = btrfs_file_extent_inline_len(leaf, path->slots[0], item); 6875 extent_offset = page_offset(page) + pg_offset - extent_start; 6876 copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset, 6877 size - extent_offset); 6878 em->start = extent_start + extent_offset; 6879 em->len = ALIGN(copy_size, root->sectorsize); 6880 em->orig_block_len = em->len; 6881 em->orig_start = em->start; 6882 ptr = btrfs_file_extent_inline_start(item) + extent_offset; 6883 if (create == 0 && !PageUptodate(page)) { 6884 if (btrfs_file_extent_compression(leaf, item) != 6885 BTRFS_COMPRESS_NONE) { 6886 ret = uncompress_inline(path, inode, page, 6887 pg_offset, 6888 extent_offset, item); 6889 if (ret) { 6890 err = ret; 6891 goto out; 6892 } 6893 } else { 6894 map = kmap(page); 6895 read_extent_buffer(leaf, map + pg_offset, ptr, 6896 copy_size); 6897 if (pg_offset + copy_size < PAGE_CACHE_SIZE) { 6898 memset(map + pg_offset + copy_size, 0, 6899 PAGE_CACHE_SIZE - pg_offset - 6900 copy_size); 6901 } 6902 kunmap(page); 6903 } 6904 flush_dcache_page(page); 6905 } else if (create && PageUptodate(page)) { 6906 BUG(); 6907 if (!trans) { 6908 kunmap(page); 6909 free_extent_map(em); 6910 em = NULL; 6911 6912 btrfs_release_path(path); 6913 trans = btrfs_join_transaction(root); 6914 6915 if (IS_ERR(trans)) 6916 return ERR_CAST(trans); 6917 goto again; 6918 } 6919 map = kmap(page); 6920 write_extent_buffer(leaf, map + pg_offset, ptr, 6921 copy_size); 6922 kunmap(page); 6923 btrfs_mark_buffer_dirty(leaf); 6924 } 6925 set_extent_uptodate(io_tree, em->start, 6926 extent_map_end(em) - 1, NULL, GFP_NOFS); 6927 goto insert; 6928 } 6929 not_found: 6930 em->start = start; 6931 em->orig_start = start; 6932 em->len = len; 6933 not_found_em: 6934 em->block_start = EXTENT_MAP_HOLE; 6935 set_bit(EXTENT_FLAG_VACANCY, &em->flags); 6936 insert: 6937 btrfs_release_path(path); 6938 if (em->start > start || extent_map_end(em) <= start) { 6939 btrfs_err(root->fs_info, "bad extent! em: [%llu %llu] passed [%llu %llu]", 6940 em->start, em->len, start, len); 6941 err = -EIO; 6942 goto out; 6943 } 6944 6945 err = 0; 6946 write_lock(&em_tree->lock); 6947 ret = add_extent_mapping(em_tree, em, 0); 6948 /* it is possible that someone inserted the extent into the tree 6949 * while we had the lock dropped. It is also possible that 6950 * an overlapping map exists in the tree 6951 */ 6952 if (ret == -EEXIST) { 6953 struct extent_map *existing; 6954 6955 ret = 0; 6956 6957 existing = search_extent_mapping(em_tree, start, len); 6958 /* 6959 * existing will always be non-NULL, since there must be 6960 * extent causing the -EEXIST. 6961 */ 6962 if (start >= extent_map_end(existing) || 6963 start <= existing->start) { 6964 /* 6965 * The existing extent map is the one nearest to 6966 * the [start, start + len) range which overlaps 6967 */ 6968 err = merge_extent_mapping(em_tree, existing, 6969 em, start); 6970 free_extent_map(existing); 6971 if (err) { 6972 free_extent_map(em); 6973 em = NULL; 6974 } 6975 } else { 6976 free_extent_map(em); 6977 em = existing; 6978 err = 0; 6979 } 6980 } 6981 write_unlock(&em_tree->lock); 6982 out: 6983 6984 trace_btrfs_get_extent(root, em); 6985 6986 btrfs_free_path(path); 6987 if (trans) { 6988 ret = btrfs_end_transaction(trans, root); 6989 if (!err) 6990 err = ret; 6991 } 6992 if (err) { 6993 free_extent_map(em); 6994 return ERR_PTR(err); 6995 } 6996 BUG_ON(!em); /* Error is always set */ 6997 return em; 6998 } 6999 7000 struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *page, 7001 size_t pg_offset, u64 start, u64 len, 7002 int create) 7003 { 7004 struct extent_map *em; 7005 struct extent_map *hole_em = NULL; 7006 u64 range_start = start; 7007 u64 end; 7008 u64 found; 7009 u64 found_end; 7010 int err = 0; 7011 7012 em = btrfs_get_extent(inode, page, pg_offset, start, len, create); 7013 if (IS_ERR(em)) 7014 return em; 7015 if (em) { 7016 /* 7017 * if our em maps to 7018 * - a hole or 7019 * - a pre-alloc extent, 7020 * there might actually be delalloc bytes behind it. 7021 */ 7022 if (em->block_start != EXTENT_MAP_HOLE && 7023 !test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) 7024 return em; 7025 else 7026 hole_em = em; 7027 } 7028 7029 /* check to see if we've wrapped (len == -1 or similar) */ 7030 end = start + len; 7031 if (end < start) 7032 end = (u64)-1; 7033 else 7034 end -= 1; 7035 7036 em = NULL; 7037 7038 /* ok, we didn't find anything, lets look for delalloc */ 7039 found = count_range_bits(&BTRFS_I(inode)->io_tree, &range_start, 7040 end, len, EXTENT_DELALLOC, 1); 7041 found_end = range_start + found; 7042 if (found_end < range_start) 7043 found_end = (u64)-1; 7044 7045 /* 7046 * we didn't find anything useful, return 7047 * the original results from get_extent() 7048 */ 7049 if (range_start > end || found_end <= start) { 7050 em = hole_em; 7051 hole_em = NULL; 7052 goto out; 7053 } 7054 7055 /* adjust the range_start to make sure it doesn't 7056 * go backwards from the start they passed in 7057 */ 7058 range_start = max(start, range_start); 7059 found = found_end - range_start; 7060 7061 if (found > 0) { 7062 u64 hole_start = start; 7063 u64 hole_len = len; 7064 7065 em = alloc_extent_map(); 7066 if (!em) { 7067 err = -ENOMEM; 7068 goto out; 7069 } 7070 /* 7071 * when btrfs_get_extent can't find anything it 7072 * returns one huge hole 7073 * 7074 * make sure what it found really fits our range, and 7075 * adjust to make sure it is based on the start from 7076 * the caller 7077 */ 7078 if (hole_em) { 7079 u64 calc_end = extent_map_end(hole_em); 7080 7081 if (calc_end <= start || (hole_em->start > end)) { 7082 free_extent_map(hole_em); 7083 hole_em = NULL; 7084 } else { 7085 hole_start = max(hole_em->start, start); 7086 hole_len = calc_end - hole_start; 7087 } 7088 } 7089 em->bdev = NULL; 7090 if (hole_em && range_start > hole_start) { 7091 /* our hole starts before our delalloc, so we 7092 * have to return just the parts of the hole 7093 * that go until the delalloc starts 7094 */ 7095 em->len = min(hole_len, 7096 range_start - hole_start); 7097 em->start = hole_start; 7098 em->orig_start = hole_start; 7099 /* 7100 * don't adjust block start at all, 7101 * it is fixed at EXTENT_MAP_HOLE 7102 */ 7103 em->block_start = hole_em->block_start; 7104 em->block_len = hole_len; 7105 if (test_bit(EXTENT_FLAG_PREALLOC, &hole_em->flags)) 7106 set_bit(EXTENT_FLAG_PREALLOC, &em->flags); 7107 } else { 7108 em->start = range_start; 7109 em->len = found; 7110 em->orig_start = range_start; 7111 em->block_start = EXTENT_MAP_DELALLOC; 7112 em->block_len = found; 7113 } 7114 } else if (hole_em) { 7115 return hole_em; 7116 } 7117 out: 7118 7119 free_extent_map(hole_em); 7120 if (err) { 7121 free_extent_map(em); 7122 return ERR_PTR(err); 7123 } 7124 return em; 7125 } 7126 7127 static struct extent_map *btrfs_new_extent_direct(struct inode *inode, 7128 u64 start, u64 len) 7129 { 7130 struct btrfs_root *root = BTRFS_I(inode)->root; 7131 struct extent_map *em; 7132 struct btrfs_key ins; 7133 u64 alloc_hint; 7134 int ret; 7135 7136 alloc_hint = get_extent_allocation_hint(inode, start, len); 7137 ret = btrfs_reserve_extent(root, len, root->sectorsize, 0, 7138 alloc_hint, &ins, 1, 1); 7139 if (ret) 7140 return ERR_PTR(ret); 7141 7142 em = create_pinned_em(inode, start, ins.offset, start, ins.objectid, 7143 ins.offset, ins.offset, ins.offset, 0); 7144 if (IS_ERR(em)) { 7145 btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1); 7146 return em; 7147 } 7148 7149 ret = btrfs_add_ordered_extent_dio(inode, start, ins.objectid, 7150 ins.offset, ins.offset, 0); 7151 if (ret) { 7152 btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1); 7153 free_extent_map(em); 7154 return ERR_PTR(ret); 7155 } 7156 7157 return em; 7158 } 7159 7160 /* 7161 * returns 1 when the nocow is safe, < 1 on error, 0 if the 7162 * block must be cow'd 7163 */ 7164 noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len, 7165 u64 *orig_start, u64 *orig_block_len, 7166 u64 *ram_bytes) 7167 { 7168 struct btrfs_trans_handle *trans; 7169 struct btrfs_path *path; 7170 int ret; 7171 struct extent_buffer *leaf; 7172 struct btrfs_root *root = BTRFS_I(inode)->root; 7173 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 7174 struct btrfs_file_extent_item *fi; 7175 struct btrfs_key key; 7176 u64 disk_bytenr; 7177 u64 backref_offset; 7178 u64 extent_end; 7179 u64 num_bytes; 7180 int slot; 7181 int found_type; 7182 bool nocow = (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW); 7183 7184 path = btrfs_alloc_path(); 7185 if (!path) 7186 return -ENOMEM; 7187 7188 ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(inode), 7189 offset, 0); 7190 if (ret < 0) 7191 goto out; 7192 7193 slot = path->slots[0]; 7194 if (ret == 1) { 7195 if (slot == 0) { 7196 /* can't find the item, must cow */ 7197 ret = 0; 7198 goto out; 7199 } 7200 slot--; 7201 } 7202 ret = 0; 7203 leaf = path->nodes[0]; 7204 btrfs_item_key_to_cpu(leaf, &key, slot); 7205 if (key.objectid != btrfs_ino(inode) || 7206 key.type != BTRFS_EXTENT_DATA_KEY) { 7207 /* not our file or wrong item type, must cow */ 7208 goto out; 7209 } 7210 7211 if (key.offset > offset) { 7212 /* Wrong offset, must cow */ 7213 goto out; 7214 } 7215 7216 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); 7217 found_type = btrfs_file_extent_type(leaf, fi); 7218 if (found_type != BTRFS_FILE_EXTENT_REG && 7219 found_type != BTRFS_FILE_EXTENT_PREALLOC) { 7220 /* not a regular extent, must cow */ 7221 goto out; 7222 } 7223 7224 if (!nocow && found_type == BTRFS_FILE_EXTENT_REG) 7225 goto out; 7226 7227 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi); 7228 if (extent_end <= offset) 7229 goto out; 7230 7231 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 7232 if (disk_bytenr == 0) 7233 goto out; 7234 7235 if (btrfs_file_extent_compression(leaf, fi) || 7236 btrfs_file_extent_encryption(leaf, fi) || 7237 btrfs_file_extent_other_encoding(leaf, fi)) 7238 goto out; 7239 7240 backref_offset = btrfs_file_extent_offset(leaf, fi); 7241 7242 if (orig_start) { 7243 *orig_start = key.offset - backref_offset; 7244 *orig_block_len = btrfs_file_extent_disk_num_bytes(leaf, fi); 7245 *ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi); 7246 } 7247 7248 if (btrfs_extent_readonly(root, disk_bytenr)) 7249 goto out; 7250 7251 num_bytes = min(offset + *len, extent_end) - offset; 7252 if (!nocow && found_type == BTRFS_FILE_EXTENT_PREALLOC) { 7253 u64 range_end; 7254 7255 range_end = round_up(offset + num_bytes, root->sectorsize) - 1; 7256 ret = test_range_bit(io_tree, offset, range_end, 7257 EXTENT_DELALLOC, 0, NULL); 7258 if (ret) { 7259 ret = -EAGAIN; 7260 goto out; 7261 } 7262 } 7263 7264 btrfs_release_path(path); 7265 7266 /* 7267 * look for other files referencing this extent, if we 7268 * find any we must cow 7269 */ 7270 trans = btrfs_join_transaction(root); 7271 if (IS_ERR(trans)) { 7272 ret = 0; 7273 goto out; 7274 } 7275 7276 ret = btrfs_cross_ref_exist(trans, root, btrfs_ino(inode), 7277 key.offset - backref_offset, disk_bytenr); 7278 btrfs_end_transaction(trans, root); 7279 if (ret) { 7280 ret = 0; 7281 goto out; 7282 } 7283 7284 /* 7285 * adjust disk_bytenr and num_bytes to cover just the bytes 7286 * in this extent we are about to write. If there 7287 * are any csums in that range we have to cow in order 7288 * to keep the csums correct 7289 */ 7290 disk_bytenr += backref_offset; 7291 disk_bytenr += offset - key.offset; 7292 if (csum_exist_in_range(root, disk_bytenr, num_bytes)) 7293 goto out; 7294 /* 7295 * all of the above have passed, it is safe to overwrite this extent 7296 * without cow 7297 */ 7298 *len = num_bytes; 7299 ret = 1; 7300 out: 7301 btrfs_free_path(path); 7302 return ret; 7303 } 7304 7305 bool btrfs_page_exists_in_range(struct inode *inode, loff_t start, loff_t end) 7306 { 7307 struct radix_tree_root *root = &inode->i_mapping->page_tree; 7308 int found = false; 7309 void **pagep = NULL; 7310 struct page *page = NULL; 7311 int start_idx; 7312 int end_idx; 7313 7314 start_idx = start >> PAGE_CACHE_SHIFT; 7315 7316 /* 7317 * end is the last byte in the last page. end == start is legal 7318 */ 7319 end_idx = end >> PAGE_CACHE_SHIFT; 7320 7321 rcu_read_lock(); 7322 7323 /* Most of the code in this while loop is lifted from 7324 * find_get_page. It's been modified to begin searching from a 7325 * page and return just the first page found in that range. If the 7326 * found idx is less than or equal to the end idx then we know that 7327 * a page exists. If no pages are found or if those pages are 7328 * outside of the range then we're fine (yay!) */ 7329 while (page == NULL && 7330 radix_tree_gang_lookup_slot(root, &pagep, NULL, start_idx, 1)) { 7331 page = radix_tree_deref_slot(pagep); 7332 if (unlikely(!page)) 7333 break; 7334 7335 if (radix_tree_exception(page)) { 7336 if (radix_tree_deref_retry(page)) { 7337 page = NULL; 7338 continue; 7339 } 7340 /* 7341 * Otherwise, shmem/tmpfs must be storing a swap entry 7342 * here as an exceptional entry: so return it without 7343 * attempting to raise page count. 7344 */ 7345 page = NULL; 7346 break; /* TODO: Is this relevant for this use case? */ 7347 } 7348 7349 if (!page_cache_get_speculative(page)) { 7350 page = NULL; 7351 continue; 7352 } 7353 7354 /* 7355 * Has the page moved? 7356 * This is part of the lockless pagecache protocol. See 7357 * include/linux/pagemap.h for details. 7358 */ 7359 if (unlikely(page != *pagep)) { 7360 page_cache_release(page); 7361 page = NULL; 7362 } 7363 } 7364 7365 if (page) { 7366 if (page->index <= end_idx) 7367 found = true; 7368 page_cache_release(page); 7369 } 7370 7371 rcu_read_unlock(); 7372 return found; 7373 } 7374 7375 static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend, 7376 struct extent_state **cached_state, int writing) 7377 { 7378 struct btrfs_ordered_extent *ordered; 7379 int ret = 0; 7380 7381 while (1) { 7382 lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, 7383 0, cached_state); 7384 /* 7385 * We're concerned with the entire range that we're going to be 7386 * doing DIO to, so we need to make sure theres no ordered 7387 * extents in this range. 7388 */ 7389 ordered = btrfs_lookup_ordered_range(inode, lockstart, 7390 lockend - lockstart + 1); 7391 7392 /* 7393 * We need to make sure there are no buffered pages in this 7394 * range either, we could have raced between the invalidate in 7395 * generic_file_direct_write and locking the extent. The 7396 * invalidate needs to happen so that reads after a write do not 7397 * get stale data. 7398 */ 7399 if (!ordered && 7400 (!writing || 7401 !btrfs_page_exists_in_range(inode, lockstart, lockend))) 7402 break; 7403 7404 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend, 7405 cached_state, GFP_NOFS); 7406 7407 if (ordered) { 7408 btrfs_start_ordered_extent(inode, ordered, 1); 7409 btrfs_put_ordered_extent(ordered); 7410 } else { 7411 /* Screw you mmap */ 7412 ret = btrfs_fdatawrite_range(inode, lockstart, lockend); 7413 if (ret) 7414 break; 7415 ret = filemap_fdatawait_range(inode->i_mapping, 7416 lockstart, 7417 lockend); 7418 if (ret) 7419 break; 7420 7421 /* 7422 * If we found a page that couldn't be invalidated just 7423 * fall back to buffered. 7424 */ 7425 ret = invalidate_inode_pages2_range(inode->i_mapping, 7426 lockstart >> PAGE_CACHE_SHIFT, 7427 lockend >> PAGE_CACHE_SHIFT); 7428 if (ret) 7429 break; 7430 } 7431 7432 cond_resched(); 7433 } 7434 7435 return ret; 7436 } 7437 7438 static struct extent_map *create_pinned_em(struct inode *inode, u64 start, 7439 u64 len, u64 orig_start, 7440 u64 block_start, u64 block_len, 7441 u64 orig_block_len, u64 ram_bytes, 7442 int type) 7443 { 7444 struct extent_map_tree *em_tree; 7445 struct extent_map *em; 7446 struct btrfs_root *root = BTRFS_I(inode)->root; 7447 int ret; 7448 7449 em_tree = &BTRFS_I(inode)->extent_tree; 7450 em = alloc_extent_map(); 7451 if (!em) 7452 return ERR_PTR(-ENOMEM); 7453 7454 em->start = start; 7455 em->orig_start = orig_start; 7456 em->mod_start = start; 7457 em->mod_len = len; 7458 em->len = len; 7459 em->block_len = block_len; 7460 em->block_start = block_start; 7461 em->bdev = root->fs_info->fs_devices->latest_bdev; 7462 em->orig_block_len = orig_block_len; 7463 em->ram_bytes = ram_bytes; 7464 em->generation = -1; 7465 set_bit(EXTENT_FLAG_PINNED, &em->flags); 7466 if (type == BTRFS_ORDERED_PREALLOC) 7467 set_bit(EXTENT_FLAG_FILLING, &em->flags); 7468 7469 do { 7470 btrfs_drop_extent_cache(inode, em->start, 7471 em->start + em->len - 1, 0); 7472 write_lock(&em_tree->lock); 7473 ret = add_extent_mapping(em_tree, em, 1); 7474 write_unlock(&em_tree->lock); 7475 } while (ret == -EEXIST); 7476 7477 if (ret) { 7478 free_extent_map(em); 7479 return ERR_PTR(ret); 7480 } 7481 7482 return em; 7483 } 7484 7485 struct btrfs_dio_data { 7486 u64 outstanding_extents; 7487 u64 reserve; 7488 }; 7489 7490 static void adjust_dio_outstanding_extents(struct inode *inode, 7491 struct btrfs_dio_data *dio_data, 7492 const u64 len) 7493 { 7494 unsigned num_extents; 7495 7496 num_extents = (unsigned) div64_u64(len + BTRFS_MAX_EXTENT_SIZE - 1, 7497 BTRFS_MAX_EXTENT_SIZE); 7498 /* 7499 * If we have an outstanding_extents count still set then we're 7500 * within our reservation, otherwise we need to adjust our inode 7501 * counter appropriately. 7502 */ 7503 if (dio_data->outstanding_extents) { 7504 dio_data->outstanding_extents -= num_extents; 7505 } else { 7506 spin_lock(&BTRFS_I(inode)->lock); 7507 BTRFS_I(inode)->outstanding_extents += num_extents; 7508 spin_unlock(&BTRFS_I(inode)->lock); 7509 } 7510 } 7511 7512 static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, 7513 struct buffer_head *bh_result, int create) 7514 { 7515 struct extent_map *em; 7516 struct btrfs_root *root = BTRFS_I(inode)->root; 7517 struct extent_state *cached_state = NULL; 7518 struct btrfs_dio_data *dio_data = NULL; 7519 u64 start = iblock << inode->i_blkbits; 7520 u64 lockstart, lockend; 7521 u64 len = bh_result->b_size; 7522 int unlock_bits = EXTENT_LOCKED; 7523 int ret = 0; 7524 7525 if (create) 7526 unlock_bits |= EXTENT_DIRTY; 7527 else 7528 len = min_t(u64, len, root->sectorsize); 7529 7530 lockstart = start; 7531 lockend = start + len - 1; 7532 7533 if (current->journal_info) { 7534 /* 7535 * Need to pull our outstanding extents and set journal_info to NULL so 7536 * that anything that needs to check if there's a transction doesn't get 7537 * confused. 7538 */ 7539 dio_data = current->journal_info; 7540 current->journal_info = NULL; 7541 } 7542 7543 /* 7544 * If this errors out it's because we couldn't invalidate pagecache for 7545 * this range and we need to fallback to buffered. 7546 */ 7547 if (lock_extent_direct(inode, lockstart, lockend, &cached_state, 7548 create)) { 7549 ret = -ENOTBLK; 7550 goto err; 7551 } 7552 7553 em = btrfs_get_extent(inode, NULL, 0, start, len, 0); 7554 if (IS_ERR(em)) { 7555 ret = PTR_ERR(em); 7556 goto unlock_err; 7557 } 7558 7559 /* 7560 * Ok for INLINE and COMPRESSED extents we need to fallback on buffered 7561 * io. INLINE is special, and we could probably kludge it in here, but 7562 * it's still buffered so for safety lets just fall back to the generic 7563 * buffered path. 7564 * 7565 * For COMPRESSED we _have_ to read the entire extent in so we can 7566 * decompress it, so there will be buffering required no matter what we 7567 * do, so go ahead and fallback to buffered. 7568 * 7569 * We return -ENOTBLK because thats what makes DIO go ahead and go back 7570 * to buffered IO. Don't blame me, this is the price we pay for using 7571 * the generic code. 7572 */ 7573 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) || 7574 em->block_start == EXTENT_MAP_INLINE) { 7575 free_extent_map(em); 7576 ret = -ENOTBLK; 7577 goto unlock_err; 7578 } 7579 7580 /* Just a good old fashioned hole, return */ 7581 if (!create && (em->block_start == EXTENT_MAP_HOLE || 7582 test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) { 7583 free_extent_map(em); 7584 goto unlock_err; 7585 } 7586 7587 /* 7588 * We don't allocate a new extent in the following cases 7589 * 7590 * 1) The inode is marked as NODATACOW. In this case we'll just use the 7591 * existing extent. 7592 * 2) The extent is marked as PREALLOC. We're good to go here and can 7593 * just use the extent. 7594 * 7595 */ 7596 if (!create) { 7597 len = min(len, em->len - (start - em->start)); 7598 lockstart = start + len; 7599 goto unlock; 7600 } 7601 7602 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) || 7603 ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) && 7604 em->block_start != EXTENT_MAP_HOLE)) { 7605 int type; 7606 u64 block_start, orig_start, orig_block_len, ram_bytes; 7607 7608 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) 7609 type = BTRFS_ORDERED_PREALLOC; 7610 else 7611 type = BTRFS_ORDERED_NOCOW; 7612 len = min(len, em->len - (start - em->start)); 7613 block_start = em->block_start + (start - em->start); 7614 7615 if (can_nocow_extent(inode, start, &len, &orig_start, 7616 &orig_block_len, &ram_bytes) == 1) { 7617 if (type == BTRFS_ORDERED_PREALLOC) { 7618 free_extent_map(em); 7619 em = create_pinned_em(inode, start, len, 7620 orig_start, 7621 block_start, len, 7622 orig_block_len, 7623 ram_bytes, type); 7624 if (IS_ERR(em)) { 7625 ret = PTR_ERR(em); 7626 goto unlock_err; 7627 } 7628 } 7629 7630 ret = btrfs_add_ordered_extent_dio(inode, start, 7631 block_start, len, len, type); 7632 if (ret) { 7633 free_extent_map(em); 7634 goto unlock_err; 7635 } 7636 goto unlock; 7637 } 7638 } 7639 7640 /* 7641 * this will cow the extent, reset the len in case we changed 7642 * it above 7643 */ 7644 len = bh_result->b_size; 7645 free_extent_map(em); 7646 em = btrfs_new_extent_direct(inode, start, len); 7647 if (IS_ERR(em)) { 7648 ret = PTR_ERR(em); 7649 goto unlock_err; 7650 } 7651 len = min(len, em->len - (start - em->start)); 7652 unlock: 7653 bh_result->b_blocknr = (em->block_start + (start - em->start)) >> 7654 inode->i_blkbits; 7655 bh_result->b_size = len; 7656 bh_result->b_bdev = em->bdev; 7657 set_buffer_mapped(bh_result); 7658 if (create) { 7659 if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) 7660 set_buffer_new(bh_result); 7661 7662 /* 7663 * Need to update the i_size under the extent lock so buffered 7664 * readers will get the updated i_size when we unlock. 7665 */ 7666 if (start + len > i_size_read(inode)) 7667 i_size_write(inode, start + len); 7668 7669 adjust_dio_outstanding_extents(inode, dio_data, len); 7670 btrfs_free_reserved_data_space(inode, start, len); 7671 WARN_ON(dio_data->reserve < len); 7672 dio_data->reserve -= len; 7673 current->journal_info = dio_data; 7674 } 7675 7676 /* 7677 * In the case of write we need to clear and unlock the entire range, 7678 * in the case of read we need to unlock only the end area that we 7679 * aren't using if there is any left over space. 7680 */ 7681 if (lockstart < lockend) { 7682 clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, 7683 lockend, unlock_bits, 1, 0, 7684 &cached_state, GFP_NOFS); 7685 } else { 7686 free_extent_state(cached_state); 7687 } 7688 7689 free_extent_map(em); 7690 7691 return 0; 7692 7693 unlock_err: 7694 clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend, 7695 unlock_bits, 1, 0, &cached_state, GFP_NOFS); 7696 err: 7697 if (dio_data) 7698 current->journal_info = dio_data; 7699 /* 7700 * Compensate the delalloc release we do in btrfs_direct_IO() when we 7701 * write less data then expected, so that we don't underflow our inode's 7702 * outstanding extents counter. 7703 */ 7704 if (create && dio_data) 7705 adjust_dio_outstanding_extents(inode, dio_data, len); 7706 7707 return ret; 7708 } 7709 7710 static inline int submit_dio_repair_bio(struct inode *inode, struct bio *bio, 7711 int rw, int mirror_num) 7712 { 7713 struct btrfs_root *root = BTRFS_I(inode)->root; 7714 int ret; 7715 7716 BUG_ON(rw & REQ_WRITE); 7717 7718 bio_get(bio); 7719 7720 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 7721 BTRFS_WQ_ENDIO_DIO_REPAIR); 7722 if (ret) 7723 goto err; 7724 7725 ret = btrfs_map_bio(root, rw, bio, mirror_num, 0); 7726 err: 7727 bio_put(bio); 7728 return ret; 7729 } 7730 7731 static int btrfs_check_dio_repairable(struct inode *inode, 7732 struct bio *failed_bio, 7733 struct io_failure_record *failrec, 7734 int failed_mirror) 7735 { 7736 int num_copies; 7737 7738 num_copies = btrfs_num_copies(BTRFS_I(inode)->root->fs_info, 7739 failrec->logical, failrec->len); 7740 if (num_copies == 1) { 7741 /* 7742 * we only have a single copy of the data, so don't bother with 7743 * all the retry and error correction code that follows. no 7744 * matter what the error is, it is very likely to persist. 7745 */ 7746 pr_debug("Check DIO Repairable: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d\n", 7747 num_copies, failrec->this_mirror, failed_mirror); 7748 return 0; 7749 } 7750 7751 failrec->failed_mirror = failed_mirror; 7752 failrec->this_mirror++; 7753 if (failrec->this_mirror == failed_mirror) 7754 failrec->this_mirror++; 7755 7756 if (failrec->this_mirror > num_copies) { 7757 pr_debug("Check DIO Repairable: (fail) num_copies=%d, next_mirror %d, failed_mirror %d\n", 7758 num_copies, failrec->this_mirror, failed_mirror); 7759 return 0; 7760 } 7761 7762 return 1; 7763 } 7764 7765 static int dio_read_error(struct inode *inode, struct bio *failed_bio, 7766 struct page *page, u64 start, u64 end, 7767 int failed_mirror, bio_end_io_t *repair_endio, 7768 void *repair_arg) 7769 { 7770 struct io_failure_record *failrec; 7771 struct bio *bio; 7772 int isector; 7773 int read_mode; 7774 int ret; 7775 7776 BUG_ON(failed_bio->bi_rw & REQ_WRITE); 7777 7778 ret = btrfs_get_io_failure_record(inode, start, end, &failrec); 7779 if (ret) 7780 return ret; 7781 7782 ret = btrfs_check_dio_repairable(inode, failed_bio, failrec, 7783 failed_mirror); 7784 if (!ret) { 7785 free_io_failure(inode, failrec); 7786 return -EIO; 7787 } 7788 7789 if (failed_bio->bi_vcnt > 1) 7790 read_mode = READ_SYNC | REQ_FAILFAST_DEV; 7791 else 7792 read_mode = READ_SYNC; 7793 7794 isector = start - btrfs_io_bio(failed_bio)->logical; 7795 isector >>= inode->i_sb->s_blocksize_bits; 7796 bio = btrfs_create_repair_bio(inode, failed_bio, failrec, page, 7797 0, isector, repair_endio, repair_arg); 7798 if (!bio) { 7799 free_io_failure(inode, failrec); 7800 return -EIO; 7801 } 7802 7803 btrfs_debug(BTRFS_I(inode)->root->fs_info, 7804 "Repair DIO Read Error: submitting new dio read[%#x] to this_mirror=%d, in_validation=%d\n", 7805 read_mode, failrec->this_mirror, failrec->in_validation); 7806 7807 ret = submit_dio_repair_bio(inode, bio, read_mode, 7808 failrec->this_mirror); 7809 if (ret) { 7810 free_io_failure(inode, failrec); 7811 bio_put(bio); 7812 } 7813 7814 return ret; 7815 } 7816 7817 struct btrfs_retry_complete { 7818 struct completion done; 7819 struct inode *inode; 7820 u64 start; 7821 int uptodate; 7822 }; 7823 7824 static void btrfs_retry_endio_nocsum(struct bio *bio) 7825 { 7826 struct btrfs_retry_complete *done = bio->bi_private; 7827 struct bio_vec *bvec; 7828 int i; 7829 7830 if (bio->bi_error) 7831 goto end; 7832 7833 done->uptodate = 1; 7834 bio_for_each_segment_all(bvec, bio, i) 7835 clean_io_failure(done->inode, done->start, bvec->bv_page, 0); 7836 end: 7837 complete(&done->done); 7838 bio_put(bio); 7839 } 7840 7841 static int __btrfs_correct_data_nocsum(struct inode *inode, 7842 struct btrfs_io_bio *io_bio) 7843 { 7844 struct bio_vec *bvec; 7845 struct btrfs_retry_complete done; 7846 u64 start; 7847 int i; 7848 int ret; 7849 7850 start = io_bio->logical; 7851 done.inode = inode; 7852 7853 bio_for_each_segment_all(bvec, &io_bio->bio, i) { 7854 try_again: 7855 done.uptodate = 0; 7856 done.start = start; 7857 init_completion(&done.done); 7858 7859 ret = dio_read_error(inode, &io_bio->bio, bvec->bv_page, start, 7860 start + bvec->bv_len - 1, 7861 io_bio->mirror_num, 7862 btrfs_retry_endio_nocsum, &done); 7863 if (ret) 7864 return ret; 7865 7866 wait_for_completion(&done.done); 7867 7868 if (!done.uptodate) { 7869 /* We might have another mirror, so try again */ 7870 goto try_again; 7871 } 7872 7873 start += bvec->bv_len; 7874 } 7875 7876 return 0; 7877 } 7878 7879 static void btrfs_retry_endio(struct bio *bio) 7880 { 7881 struct btrfs_retry_complete *done = bio->bi_private; 7882 struct btrfs_io_bio *io_bio = btrfs_io_bio(bio); 7883 struct bio_vec *bvec; 7884 int uptodate; 7885 int ret; 7886 int i; 7887 7888 if (bio->bi_error) 7889 goto end; 7890 7891 uptodate = 1; 7892 bio_for_each_segment_all(bvec, bio, i) { 7893 ret = __readpage_endio_check(done->inode, io_bio, i, 7894 bvec->bv_page, 0, 7895 done->start, bvec->bv_len); 7896 if (!ret) 7897 clean_io_failure(done->inode, done->start, 7898 bvec->bv_page, 0); 7899 else 7900 uptodate = 0; 7901 } 7902 7903 done->uptodate = uptodate; 7904 end: 7905 complete(&done->done); 7906 bio_put(bio); 7907 } 7908 7909 static int __btrfs_subio_endio_read(struct inode *inode, 7910 struct btrfs_io_bio *io_bio, int err) 7911 { 7912 struct bio_vec *bvec; 7913 struct btrfs_retry_complete done; 7914 u64 start; 7915 u64 offset = 0; 7916 int i; 7917 int ret; 7918 7919 err = 0; 7920 start = io_bio->logical; 7921 done.inode = inode; 7922 7923 bio_for_each_segment_all(bvec, &io_bio->bio, i) { 7924 ret = __readpage_endio_check(inode, io_bio, i, bvec->bv_page, 7925 0, start, bvec->bv_len); 7926 if (likely(!ret)) 7927 goto next; 7928 try_again: 7929 done.uptodate = 0; 7930 done.start = start; 7931 init_completion(&done.done); 7932 7933 ret = dio_read_error(inode, &io_bio->bio, bvec->bv_page, start, 7934 start + bvec->bv_len - 1, 7935 io_bio->mirror_num, 7936 btrfs_retry_endio, &done); 7937 if (ret) { 7938 err = ret; 7939 goto next; 7940 } 7941 7942 wait_for_completion(&done.done); 7943 7944 if (!done.uptodate) { 7945 /* We might have another mirror, so try again */ 7946 goto try_again; 7947 } 7948 next: 7949 offset += bvec->bv_len; 7950 start += bvec->bv_len; 7951 } 7952 7953 return err; 7954 } 7955 7956 static int btrfs_subio_endio_read(struct inode *inode, 7957 struct btrfs_io_bio *io_bio, int err) 7958 { 7959 bool skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; 7960 7961 if (skip_csum) { 7962 if (unlikely(err)) 7963 return __btrfs_correct_data_nocsum(inode, io_bio); 7964 else 7965 return 0; 7966 } else { 7967 return __btrfs_subio_endio_read(inode, io_bio, err); 7968 } 7969 } 7970 7971 static void btrfs_endio_direct_read(struct bio *bio) 7972 { 7973 struct btrfs_dio_private *dip = bio->bi_private; 7974 struct inode *inode = dip->inode; 7975 struct bio *dio_bio; 7976 struct btrfs_io_bio *io_bio = btrfs_io_bio(bio); 7977 int err = bio->bi_error; 7978 7979 if (dip->flags & BTRFS_DIO_ORIG_BIO_SUBMITTED) 7980 err = btrfs_subio_endio_read(inode, io_bio, err); 7981 7982 unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset, 7983 dip->logical_offset + dip->bytes - 1); 7984 dio_bio = dip->dio_bio; 7985 7986 kfree(dip); 7987 7988 dio_end_io(dio_bio, bio->bi_error); 7989 7990 if (io_bio->end_io) 7991 io_bio->end_io(io_bio, err); 7992 bio_put(bio); 7993 } 7994 7995 static void btrfs_endio_direct_write(struct bio *bio) 7996 { 7997 struct btrfs_dio_private *dip = bio->bi_private; 7998 struct inode *inode = dip->inode; 7999 struct btrfs_root *root = BTRFS_I(inode)->root; 8000 struct btrfs_ordered_extent *ordered = NULL; 8001 u64 ordered_offset = dip->logical_offset; 8002 u64 ordered_bytes = dip->bytes; 8003 struct bio *dio_bio; 8004 int ret; 8005 8006 again: 8007 ret = btrfs_dec_test_first_ordered_pending(inode, &ordered, 8008 &ordered_offset, 8009 ordered_bytes, 8010 !bio->bi_error); 8011 if (!ret) 8012 goto out_test; 8013 8014 btrfs_init_work(&ordered->work, btrfs_endio_write_helper, 8015 finish_ordered_fn, NULL, NULL); 8016 btrfs_queue_work(root->fs_info->endio_write_workers, 8017 &ordered->work); 8018 out_test: 8019 /* 8020 * our bio might span multiple ordered extents. If we haven't 8021 * completed the accounting for the whole dio, go back and try again 8022 */ 8023 if (ordered_offset < dip->logical_offset + dip->bytes) { 8024 ordered_bytes = dip->logical_offset + dip->bytes - 8025 ordered_offset; 8026 ordered = NULL; 8027 goto again; 8028 } 8029 dio_bio = dip->dio_bio; 8030 8031 kfree(dip); 8032 8033 dio_end_io(dio_bio, bio->bi_error); 8034 bio_put(bio); 8035 } 8036 8037 static int __btrfs_submit_bio_start_direct_io(struct inode *inode, int rw, 8038 struct bio *bio, int mirror_num, 8039 unsigned long bio_flags, u64 offset) 8040 { 8041 int ret; 8042 struct btrfs_root *root = BTRFS_I(inode)->root; 8043 ret = btrfs_csum_one_bio(root, inode, bio, offset, 1); 8044 BUG_ON(ret); /* -ENOMEM */ 8045 return 0; 8046 } 8047 8048 static void btrfs_end_dio_bio(struct bio *bio) 8049 { 8050 struct btrfs_dio_private *dip = bio->bi_private; 8051 int err = bio->bi_error; 8052 8053 if (err) 8054 btrfs_warn(BTRFS_I(dip->inode)->root->fs_info, 8055 "direct IO failed ino %llu rw %lu sector %#Lx len %u err no %d", 8056 btrfs_ino(dip->inode), bio->bi_rw, 8057 (unsigned long long)bio->bi_iter.bi_sector, 8058 bio->bi_iter.bi_size, err); 8059 8060 if (dip->subio_endio) 8061 err = dip->subio_endio(dip->inode, btrfs_io_bio(bio), err); 8062 8063 if (err) { 8064 dip->errors = 1; 8065 8066 /* 8067 * before atomic variable goto zero, we must make sure 8068 * dip->errors is perceived to be set. 8069 */ 8070 smp_mb__before_atomic(); 8071 } 8072 8073 /* if there are more bios still pending for this dio, just exit */ 8074 if (!atomic_dec_and_test(&dip->pending_bios)) 8075 goto out; 8076 8077 if (dip->errors) { 8078 bio_io_error(dip->orig_bio); 8079 } else { 8080 dip->dio_bio->bi_error = 0; 8081 bio_endio(dip->orig_bio); 8082 } 8083 out: 8084 bio_put(bio); 8085 } 8086 8087 static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev, 8088 u64 first_sector, gfp_t gfp_flags) 8089 { 8090 struct bio *bio; 8091 bio = btrfs_bio_alloc(bdev, first_sector, BIO_MAX_PAGES, gfp_flags); 8092 if (bio) 8093 bio_associate_current(bio); 8094 return bio; 8095 } 8096 8097 static inline int btrfs_lookup_and_bind_dio_csum(struct btrfs_root *root, 8098 struct inode *inode, 8099 struct btrfs_dio_private *dip, 8100 struct bio *bio, 8101 u64 file_offset) 8102 { 8103 struct btrfs_io_bio *io_bio = btrfs_io_bio(bio); 8104 struct btrfs_io_bio *orig_io_bio = btrfs_io_bio(dip->orig_bio); 8105 int ret; 8106 8107 /* 8108 * We load all the csum data we need when we submit 8109 * the first bio to reduce the csum tree search and 8110 * contention. 8111 */ 8112 if (dip->logical_offset == file_offset) { 8113 ret = btrfs_lookup_bio_sums_dio(root, inode, dip->orig_bio, 8114 file_offset); 8115 if (ret) 8116 return ret; 8117 } 8118 8119 if (bio == dip->orig_bio) 8120 return 0; 8121 8122 file_offset -= dip->logical_offset; 8123 file_offset >>= inode->i_sb->s_blocksize_bits; 8124 io_bio->csum = (u8 *)(((u32 *)orig_io_bio->csum) + file_offset); 8125 8126 return 0; 8127 } 8128 8129 static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode, 8130 int rw, u64 file_offset, int skip_sum, 8131 int async_submit) 8132 { 8133 struct btrfs_dio_private *dip = bio->bi_private; 8134 int write = rw & REQ_WRITE; 8135 struct btrfs_root *root = BTRFS_I(inode)->root; 8136 int ret; 8137 8138 if (async_submit) 8139 async_submit = !atomic_read(&BTRFS_I(inode)->sync_writers); 8140 8141 bio_get(bio); 8142 8143 if (!write) { 8144 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 8145 BTRFS_WQ_ENDIO_DATA); 8146 if (ret) 8147 goto err; 8148 } 8149 8150 if (skip_sum) 8151 goto map; 8152 8153 if (write && async_submit) { 8154 ret = btrfs_wq_submit_bio(root->fs_info, 8155 inode, rw, bio, 0, 0, 8156 file_offset, 8157 __btrfs_submit_bio_start_direct_io, 8158 __btrfs_submit_bio_done); 8159 goto err; 8160 } else if (write) { 8161 /* 8162 * If we aren't doing async submit, calculate the csum of the 8163 * bio now. 8164 */ 8165 ret = btrfs_csum_one_bio(root, inode, bio, file_offset, 1); 8166 if (ret) 8167 goto err; 8168 } else { 8169 ret = btrfs_lookup_and_bind_dio_csum(root, inode, dip, bio, 8170 file_offset); 8171 if (ret) 8172 goto err; 8173 } 8174 map: 8175 ret = btrfs_map_bio(root, rw, bio, 0, async_submit); 8176 err: 8177 bio_put(bio); 8178 return ret; 8179 } 8180 8181 static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip, 8182 int skip_sum) 8183 { 8184 struct inode *inode = dip->inode; 8185 struct btrfs_root *root = BTRFS_I(inode)->root; 8186 struct bio *bio; 8187 struct bio *orig_bio = dip->orig_bio; 8188 struct bio_vec *bvec = orig_bio->bi_io_vec; 8189 u64 start_sector = orig_bio->bi_iter.bi_sector; 8190 u64 file_offset = dip->logical_offset; 8191 u64 submit_len = 0; 8192 u64 map_length; 8193 int nr_pages = 0; 8194 int ret; 8195 int async_submit = 0; 8196 8197 map_length = orig_bio->bi_iter.bi_size; 8198 ret = btrfs_map_block(root->fs_info, rw, start_sector << 9, 8199 &map_length, NULL, 0); 8200 if (ret) 8201 return -EIO; 8202 8203 if (map_length >= orig_bio->bi_iter.bi_size) { 8204 bio = orig_bio; 8205 dip->flags |= BTRFS_DIO_ORIG_BIO_SUBMITTED; 8206 goto submit; 8207 } 8208 8209 /* async crcs make it difficult to collect full stripe writes. */ 8210 if (btrfs_get_alloc_profile(root, 1) & BTRFS_BLOCK_GROUP_RAID56_MASK) 8211 async_submit = 0; 8212 else 8213 async_submit = 1; 8214 8215 bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS); 8216 if (!bio) 8217 return -ENOMEM; 8218 8219 bio->bi_private = dip; 8220 bio->bi_end_io = btrfs_end_dio_bio; 8221 btrfs_io_bio(bio)->logical = file_offset; 8222 atomic_inc(&dip->pending_bios); 8223 8224 while (bvec <= (orig_bio->bi_io_vec + orig_bio->bi_vcnt - 1)) { 8225 if (map_length < submit_len + bvec->bv_len || 8226 bio_add_page(bio, bvec->bv_page, bvec->bv_len, 8227 bvec->bv_offset) < bvec->bv_len) { 8228 /* 8229 * inc the count before we submit the bio so 8230 * we know the end IO handler won't happen before 8231 * we inc the count. Otherwise, the dip might get freed 8232 * before we're done setting it up 8233 */ 8234 atomic_inc(&dip->pending_bios); 8235 ret = __btrfs_submit_dio_bio(bio, inode, rw, 8236 file_offset, skip_sum, 8237 async_submit); 8238 if (ret) { 8239 bio_put(bio); 8240 atomic_dec(&dip->pending_bios); 8241 goto out_err; 8242 } 8243 8244 start_sector += submit_len >> 9; 8245 file_offset += submit_len; 8246 8247 submit_len = 0; 8248 nr_pages = 0; 8249 8250 bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, 8251 start_sector, GFP_NOFS); 8252 if (!bio) 8253 goto out_err; 8254 bio->bi_private = dip; 8255 bio->bi_end_io = btrfs_end_dio_bio; 8256 btrfs_io_bio(bio)->logical = file_offset; 8257 8258 map_length = orig_bio->bi_iter.bi_size; 8259 ret = btrfs_map_block(root->fs_info, rw, 8260 start_sector << 9, 8261 &map_length, NULL, 0); 8262 if (ret) { 8263 bio_put(bio); 8264 goto out_err; 8265 } 8266 } else { 8267 submit_len += bvec->bv_len; 8268 nr_pages++; 8269 bvec++; 8270 } 8271 } 8272 8273 submit: 8274 ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum, 8275 async_submit); 8276 if (!ret) 8277 return 0; 8278 8279 bio_put(bio); 8280 out_err: 8281 dip->errors = 1; 8282 /* 8283 * before atomic variable goto zero, we must 8284 * make sure dip->errors is perceived to be set. 8285 */ 8286 smp_mb__before_atomic(); 8287 if (atomic_dec_and_test(&dip->pending_bios)) 8288 bio_io_error(dip->orig_bio); 8289 8290 /* bio_end_io() will handle error, so we needn't return it */ 8291 return 0; 8292 } 8293 8294 static void btrfs_submit_direct(int rw, struct bio *dio_bio, 8295 struct inode *inode, loff_t file_offset) 8296 { 8297 struct btrfs_dio_private *dip = NULL; 8298 struct bio *io_bio = NULL; 8299 struct btrfs_io_bio *btrfs_bio; 8300 int skip_sum; 8301 int write = rw & REQ_WRITE; 8302 int ret = 0; 8303 8304 skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; 8305 8306 io_bio = btrfs_bio_clone(dio_bio, GFP_NOFS); 8307 if (!io_bio) { 8308 ret = -ENOMEM; 8309 goto free_ordered; 8310 } 8311 8312 dip = kzalloc(sizeof(*dip), GFP_NOFS); 8313 if (!dip) { 8314 ret = -ENOMEM; 8315 goto free_ordered; 8316 } 8317 8318 dip->private = dio_bio->bi_private; 8319 dip->inode = inode; 8320 dip->logical_offset = file_offset; 8321 dip->bytes = dio_bio->bi_iter.bi_size; 8322 dip->disk_bytenr = (u64)dio_bio->bi_iter.bi_sector << 9; 8323 io_bio->bi_private = dip; 8324 dip->orig_bio = io_bio; 8325 dip->dio_bio = dio_bio; 8326 atomic_set(&dip->pending_bios, 0); 8327 btrfs_bio = btrfs_io_bio(io_bio); 8328 btrfs_bio->logical = file_offset; 8329 8330 if (write) { 8331 io_bio->bi_end_io = btrfs_endio_direct_write; 8332 } else { 8333 io_bio->bi_end_io = btrfs_endio_direct_read; 8334 dip->subio_endio = btrfs_subio_endio_read; 8335 } 8336 8337 ret = btrfs_submit_direct_hook(rw, dip, skip_sum); 8338 if (!ret) 8339 return; 8340 8341 if (btrfs_bio->end_io) 8342 btrfs_bio->end_io(btrfs_bio, ret); 8343 8344 free_ordered: 8345 /* 8346 * If we arrived here it means either we failed to submit the dip 8347 * or we either failed to clone the dio_bio or failed to allocate the 8348 * dip. If we cloned the dio_bio and allocated the dip, we can just 8349 * call bio_endio against our io_bio so that we get proper resource 8350 * cleanup if we fail to submit the dip, otherwise, we must do the 8351 * same as btrfs_endio_direct_[write|read] because we can't call these 8352 * callbacks - they require an allocated dip and a clone of dio_bio. 8353 */ 8354 if (io_bio && dip) { 8355 io_bio->bi_error = -EIO; 8356 bio_endio(io_bio); 8357 /* 8358 * The end io callbacks free our dip, do the final put on io_bio 8359 * and all the cleanup and final put for dio_bio (through 8360 * dio_end_io()). 8361 */ 8362 dip = NULL; 8363 io_bio = NULL; 8364 } else { 8365 if (write) { 8366 struct btrfs_ordered_extent *ordered; 8367 8368 ordered = btrfs_lookup_ordered_extent(inode, 8369 file_offset); 8370 set_bit(BTRFS_ORDERED_IOERR, &ordered->flags); 8371 /* 8372 * Decrements our ref on the ordered extent and removes 8373 * the ordered extent from the inode's ordered tree, 8374 * doing all the proper resource cleanup such as for the 8375 * reserved space and waking up any waiters for this 8376 * ordered extent (through btrfs_remove_ordered_extent). 8377 */ 8378 btrfs_finish_ordered_io(ordered); 8379 } else { 8380 unlock_extent(&BTRFS_I(inode)->io_tree, file_offset, 8381 file_offset + dio_bio->bi_iter.bi_size - 1); 8382 } 8383 dio_bio->bi_error = -EIO; 8384 /* 8385 * Releases and cleans up our dio_bio, no need to bio_put() 8386 * nor bio_endio()/bio_io_error() against dio_bio. 8387 */ 8388 dio_end_io(dio_bio, ret); 8389 } 8390 if (io_bio) 8391 bio_put(io_bio); 8392 kfree(dip); 8393 } 8394 8395 static ssize_t check_direct_IO(struct btrfs_root *root, struct kiocb *iocb, 8396 const struct iov_iter *iter, loff_t offset) 8397 { 8398 int seg; 8399 int i; 8400 unsigned blocksize_mask = root->sectorsize - 1; 8401 ssize_t retval = -EINVAL; 8402 8403 if (offset & blocksize_mask) 8404 goto out; 8405 8406 if (iov_iter_alignment(iter) & blocksize_mask) 8407 goto out; 8408 8409 /* If this is a write we don't need to check anymore */ 8410 if (iov_iter_rw(iter) == WRITE) 8411 return 0; 8412 /* 8413 * Check to make sure we don't have duplicate iov_base's in this 8414 * iovec, if so return EINVAL, otherwise we'll get csum errors 8415 * when reading back. 8416 */ 8417 for (seg = 0; seg < iter->nr_segs; seg++) { 8418 for (i = seg + 1; i < iter->nr_segs; i++) { 8419 if (iter->iov[seg].iov_base == iter->iov[i].iov_base) 8420 goto out; 8421 } 8422 } 8423 retval = 0; 8424 out: 8425 return retval; 8426 } 8427 8428 static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, 8429 loff_t offset) 8430 { 8431 struct file *file = iocb->ki_filp; 8432 struct inode *inode = file->f_mapping->host; 8433 struct btrfs_root *root = BTRFS_I(inode)->root; 8434 struct btrfs_dio_data dio_data = { 0 }; 8435 size_t count = 0; 8436 int flags = 0; 8437 bool wakeup = true; 8438 bool relock = false; 8439 ssize_t ret; 8440 8441 if (check_direct_IO(BTRFS_I(inode)->root, iocb, iter, offset)) 8442 return 0; 8443 8444 inode_dio_begin(inode); 8445 smp_mb__after_atomic(); 8446 8447 /* 8448 * The generic stuff only does filemap_write_and_wait_range, which 8449 * isn't enough if we've written compressed pages to this area, so 8450 * we need to flush the dirty pages again to make absolutely sure 8451 * that any outstanding dirty pages are on disk. 8452 */ 8453 count = iov_iter_count(iter); 8454 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, 8455 &BTRFS_I(inode)->runtime_flags)) 8456 filemap_fdatawrite_range(inode->i_mapping, offset, 8457 offset + count - 1); 8458 8459 if (iov_iter_rw(iter) == WRITE) { 8460 /* 8461 * If the write DIO is beyond the EOF, we need update 8462 * the isize, but it is protected by i_mutex. So we can 8463 * not unlock the i_mutex at this case. 8464 */ 8465 if (offset + count <= inode->i_size) { 8466 mutex_unlock(&inode->i_mutex); 8467 relock = true; 8468 } 8469 ret = btrfs_delalloc_reserve_space(inode, offset, count); 8470 if (ret) 8471 goto out; 8472 dio_data.outstanding_extents = div64_u64(count + 8473 BTRFS_MAX_EXTENT_SIZE - 1, 8474 BTRFS_MAX_EXTENT_SIZE); 8475 8476 /* 8477 * We need to know how many extents we reserved so that we can 8478 * do the accounting properly if we go over the number we 8479 * originally calculated. Abuse current->journal_info for this. 8480 */ 8481 dio_data.reserve = round_up(count, root->sectorsize); 8482 current->journal_info = &dio_data; 8483 } else if (test_bit(BTRFS_INODE_READDIO_NEED_LOCK, 8484 &BTRFS_I(inode)->runtime_flags)) { 8485 inode_dio_end(inode); 8486 flags = DIO_LOCKING | DIO_SKIP_HOLES; 8487 wakeup = false; 8488 } 8489 8490 ret = __blockdev_direct_IO(iocb, inode, 8491 BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev, 8492 iter, offset, btrfs_get_blocks_direct, NULL, 8493 btrfs_submit_direct, flags); 8494 if (iov_iter_rw(iter) == WRITE) { 8495 current->journal_info = NULL; 8496 if (ret < 0 && ret != -EIOCBQUEUED) { 8497 if (dio_data.reserve) 8498 btrfs_delalloc_release_space(inode, offset, 8499 dio_data.reserve); 8500 } else if (ret >= 0 && (size_t)ret < count) 8501 btrfs_delalloc_release_space(inode, offset, 8502 count - (size_t)ret); 8503 } 8504 out: 8505 if (wakeup) 8506 inode_dio_end(inode); 8507 if (relock) 8508 mutex_lock(&inode->i_mutex); 8509 8510 return ret; 8511 } 8512 8513 #define BTRFS_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC) 8514 8515 static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 8516 __u64 start, __u64 len) 8517 { 8518 int ret; 8519 8520 ret = fiemap_check_flags(fieinfo, BTRFS_FIEMAP_FLAGS); 8521 if (ret) 8522 return ret; 8523 8524 return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent_fiemap); 8525 } 8526 8527 int btrfs_readpage(struct file *file, struct page *page) 8528 { 8529 struct extent_io_tree *tree; 8530 tree = &BTRFS_I(page->mapping->host)->io_tree; 8531 return extent_read_full_page(tree, page, btrfs_get_extent, 0); 8532 } 8533 8534 static int btrfs_writepage(struct page *page, struct writeback_control *wbc) 8535 { 8536 struct extent_io_tree *tree; 8537 8538 8539 if (current->flags & PF_MEMALLOC) { 8540 redirty_page_for_writepage(wbc, page); 8541 unlock_page(page); 8542 return 0; 8543 } 8544 tree = &BTRFS_I(page->mapping->host)->io_tree; 8545 return extent_write_full_page(tree, page, btrfs_get_extent, wbc); 8546 } 8547 8548 static int btrfs_writepages(struct address_space *mapping, 8549 struct writeback_control *wbc) 8550 { 8551 struct extent_io_tree *tree; 8552 8553 tree = &BTRFS_I(mapping->host)->io_tree; 8554 return extent_writepages(tree, mapping, btrfs_get_extent, wbc); 8555 } 8556 8557 static int 8558 btrfs_readpages(struct file *file, struct address_space *mapping, 8559 struct list_head *pages, unsigned nr_pages) 8560 { 8561 struct extent_io_tree *tree; 8562 tree = &BTRFS_I(mapping->host)->io_tree; 8563 return extent_readpages(tree, mapping, pages, nr_pages, 8564 btrfs_get_extent); 8565 } 8566 static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags) 8567 { 8568 struct extent_io_tree *tree; 8569 struct extent_map_tree *map; 8570 int ret; 8571 8572 tree = &BTRFS_I(page->mapping->host)->io_tree; 8573 map = &BTRFS_I(page->mapping->host)->extent_tree; 8574 ret = try_release_extent_mapping(map, tree, page, gfp_flags); 8575 if (ret == 1) { 8576 ClearPagePrivate(page); 8577 set_page_private(page, 0); 8578 page_cache_release(page); 8579 } 8580 return ret; 8581 } 8582 8583 static int btrfs_releasepage(struct page *page, gfp_t gfp_flags) 8584 { 8585 if (PageWriteback(page) || PageDirty(page)) 8586 return 0; 8587 return __btrfs_releasepage(page, gfp_flags & GFP_NOFS); 8588 } 8589 8590 static void btrfs_invalidatepage(struct page *page, unsigned int offset, 8591 unsigned int length) 8592 { 8593 struct inode *inode = page->mapping->host; 8594 struct extent_io_tree *tree; 8595 struct btrfs_ordered_extent *ordered; 8596 struct extent_state *cached_state = NULL; 8597 u64 page_start = page_offset(page); 8598 u64 page_end = page_start + PAGE_CACHE_SIZE - 1; 8599 int inode_evicting = inode->i_state & I_FREEING; 8600 8601 /* 8602 * we have the page locked, so new writeback can't start, 8603 * and the dirty bit won't be cleared while we are here. 8604 * 8605 * Wait for IO on this page so that we can safely clear 8606 * the PagePrivate2 bit and do ordered accounting 8607 */ 8608 wait_on_page_writeback(page); 8609 8610 tree = &BTRFS_I(inode)->io_tree; 8611 if (offset) { 8612 btrfs_releasepage(page, GFP_NOFS); 8613 return; 8614 } 8615 8616 if (!inode_evicting) 8617 lock_extent_bits(tree, page_start, page_end, 0, &cached_state); 8618 ordered = btrfs_lookup_ordered_extent(inode, page_start); 8619 if (ordered) { 8620 /* 8621 * IO on this page will never be started, so we need 8622 * to account for any ordered extents now 8623 */ 8624 if (!inode_evicting) 8625 clear_extent_bit(tree, page_start, page_end, 8626 EXTENT_DIRTY | EXTENT_DELALLOC | 8627 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING | 8628 EXTENT_DEFRAG, 1, 0, &cached_state, 8629 GFP_NOFS); 8630 /* 8631 * whoever cleared the private bit is responsible 8632 * for the finish_ordered_io 8633 */ 8634 if (TestClearPagePrivate2(page)) { 8635 struct btrfs_ordered_inode_tree *tree; 8636 u64 new_len; 8637 8638 tree = &BTRFS_I(inode)->ordered_tree; 8639 8640 spin_lock_irq(&tree->lock); 8641 set_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags); 8642 new_len = page_start - ordered->file_offset; 8643 if (new_len < ordered->truncated_len) 8644 ordered->truncated_len = new_len; 8645 spin_unlock_irq(&tree->lock); 8646 8647 if (btrfs_dec_test_ordered_pending(inode, &ordered, 8648 page_start, 8649 PAGE_CACHE_SIZE, 1)) 8650 btrfs_finish_ordered_io(ordered); 8651 } 8652 btrfs_put_ordered_extent(ordered); 8653 if (!inode_evicting) { 8654 cached_state = NULL; 8655 lock_extent_bits(tree, page_start, page_end, 0, 8656 &cached_state); 8657 } 8658 } 8659 8660 /* 8661 * Qgroup reserved space handler 8662 * Page here will be either 8663 * 1) Already written to disk 8664 * In this case, its reserved space is released from data rsv map 8665 * and will be freed by delayed_ref handler finally. 8666 * So even we call qgroup_free_data(), it won't decrease reserved 8667 * space. 8668 * 2) Not written to disk 8669 * This means the reserved space should be freed here. 8670 */ 8671 btrfs_qgroup_free_data(inode, page_start, PAGE_CACHE_SIZE); 8672 if (!inode_evicting) { 8673 clear_extent_bit(tree, page_start, page_end, 8674 EXTENT_LOCKED | EXTENT_DIRTY | 8675 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | 8676 EXTENT_DEFRAG, 1, 1, 8677 &cached_state, GFP_NOFS); 8678 8679 __btrfs_releasepage(page, GFP_NOFS); 8680 } 8681 8682 ClearPageChecked(page); 8683 if (PagePrivate(page)) { 8684 ClearPagePrivate(page); 8685 set_page_private(page, 0); 8686 page_cache_release(page); 8687 } 8688 } 8689 8690 /* 8691 * btrfs_page_mkwrite() is not allowed to change the file size as it gets 8692 * called from a page fault handler when a page is first dirtied. Hence we must 8693 * be careful to check for EOF conditions here. We set the page up correctly 8694 * for a written page which means we get ENOSPC checking when writing into 8695 * holes and correct delalloc and unwritten extent mapping on filesystems that 8696 * support these features. 8697 * 8698 * We are not allowed to take the i_mutex here so we have to play games to 8699 * protect against truncate races as the page could now be beyond EOF. Because 8700 * vmtruncate() writes the inode size before removing pages, once we have the 8701 * page lock we can determine safely if the page is beyond EOF. If it is not 8702 * beyond EOF, then the page is guaranteed safe against truncation until we 8703 * unlock the page. 8704 */ 8705 int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) 8706 { 8707 struct page *page = vmf->page; 8708 struct inode *inode = file_inode(vma->vm_file); 8709 struct btrfs_root *root = BTRFS_I(inode)->root; 8710 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 8711 struct btrfs_ordered_extent *ordered; 8712 struct extent_state *cached_state = NULL; 8713 char *kaddr; 8714 unsigned long zero_start; 8715 loff_t size; 8716 int ret; 8717 int reserved = 0; 8718 u64 page_start; 8719 u64 page_end; 8720 8721 sb_start_pagefault(inode->i_sb); 8722 page_start = page_offset(page); 8723 page_end = page_start + PAGE_CACHE_SIZE - 1; 8724 8725 ret = btrfs_delalloc_reserve_space(inode, page_start, 8726 PAGE_CACHE_SIZE); 8727 if (!ret) { 8728 ret = file_update_time(vma->vm_file); 8729 reserved = 1; 8730 } 8731 if (ret) { 8732 if (ret == -ENOMEM) 8733 ret = VM_FAULT_OOM; 8734 else /* -ENOSPC, -EIO, etc */ 8735 ret = VM_FAULT_SIGBUS; 8736 if (reserved) 8737 goto out; 8738 goto out_noreserve; 8739 } 8740 8741 ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */ 8742 again: 8743 lock_page(page); 8744 size = i_size_read(inode); 8745 8746 if ((page->mapping != inode->i_mapping) || 8747 (page_start >= size)) { 8748 /* page got truncated out from underneath us */ 8749 goto out_unlock; 8750 } 8751 wait_on_page_writeback(page); 8752 8753 lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state); 8754 set_page_extent_mapped(page); 8755 8756 /* 8757 * we can't set the delalloc bits if there are pending ordered 8758 * extents. Drop our locks and wait for them to finish 8759 */ 8760 ordered = btrfs_lookup_ordered_extent(inode, page_start); 8761 if (ordered) { 8762 unlock_extent_cached(io_tree, page_start, page_end, 8763 &cached_state, GFP_NOFS); 8764 unlock_page(page); 8765 btrfs_start_ordered_extent(inode, ordered, 1); 8766 btrfs_put_ordered_extent(ordered); 8767 goto again; 8768 } 8769 8770 /* 8771 * XXX - page_mkwrite gets called every time the page is dirtied, even 8772 * if it was already dirty, so for space accounting reasons we need to 8773 * clear any delalloc bits for the range we are fixing to save. There 8774 * is probably a better way to do this, but for now keep consistent with 8775 * prepare_pages in the normal write path. 8776 */ 8777 clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end, 8778 EXTENT_DIRTY | EXTENT_DELALLOC | 8779 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 8780 0, 0, &cached_state, GFP_NOFS); 8781 8782 ret = btrfs_set_extent_delalloc(inode, page_start, page_end, 8783 &cached_state); 8784 if (ret) { 8785 unlock_extent_cached(io_tree, page_start, page_end, 8786 &cached_state, GFP_NOFS); 8787 ret = VM_FAULT_SIGBUS; 8788 goto out_unlock; 8789 } 8790 ret = 0; 8791 8792 /* page is wholly or partially inside EOF */ 8793 if (page_start + PAGE_CACHE_SIZE > size) 8794 zero_start = size & ~PAGE_CACHE_MASK; 8795 else 8796 zero_start = PAGE_CACHE_SIZE; 8797 8798 if (zero_start != PAGE_CACHE_SIZE) { 8799 kaddr = kmap(page); 8800 memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start); 8801 flush_dcache_page(page); 8802 kunmap(page); 8803 } 8804 ClearPageChecked(page); 8805 set_page_dirty(page); 8806 SetPageUptodate(page); 8807 8808 BTRFS_I(inode)->last_trans = root->fs_info->generation; 8809 BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid; 8810 BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->root->last_log_commit; 8811 8812 unlock_extent_cached(io_tree, page_start, page_end, &cached_state, GFP_NOFS); 8813 8814 out_unlock: 8815 if (!ret) { 8816 sb_end_pagefault(inode->i_sb); 8817 return VM_FAULT_LOCKED; 8818 } 8819 unlock_page(page); 8820 out: 8821 btrfs_delalloc_release_space(inode, page_start, PAGE_CACHE_SIZE); 8822 out_noreserve: 8823 sb_end_pagefault(inode->i_sb); 8824 return ret; 8825 } 8826 8827 static int btrfs_truncate(struct inode *inode) 8828 { 8829 struct btrfs_root *root = BTRFS_I(inode)->root; 8830 struct btrfs_block_rsv *rsv; 8831 int ret = 0; 8832 int err = 0; 8833 struct btrfs_trans_handle *trans; 8834 u64 mask = root->sectorsize - 1; 8835 u64 min_size = btrfs_calc_trunc_metadata_size(root, 1); 8836 8837 ret = btrfs_wait_ordered_range(inode, inode->i_size & (~mask), 8838 (u64)-1); 8839 if (ret) 8840 return ret; 8841 8842 /* 8843 * Yes ladies and gentelment, this is indeed ugly. The fact is we have 8844 * 3 things going on here 8845 * 8846 * 1) We need to reserve space for our orphan item and the space to 8847 * delete our orphan item. Lord knows we don't want to have a dangling 8848 * orphan item because we didn't reserve space to remove it. 8849 * 8850 * 2) We need to reserve space to update our inode. 8851 * 8852 * 3) We need to have something to cache all the space that is going to 8853 * be free'd up by the truncate operation, but also have some slack 8854 * space reserved in case it uses space during the truncate (thank you 8855 * very much snapshotting). 8856 * 8857 * And we need these to all be seperate. The fact is we can use alot of 8858 * space doing the truncate, and we have no earthly idea how much space 8859 * we will use, so we need the truncate reservation to be seperate so it 8860 * doesn't end up using space reserved for updating the inode or 8861 * removing the orphan item. We also need to be able to stop the 8862 * transaction and start a new one, which means we need to be able to 8863 * update the inode several times, and we have no idea of knowing how 8864 * many times that will be, so we can't just reserve 1 item for the 8865 * entirety of the opration, so that has to be done seperately as well. 8866 * Then there is the orphan item, which does indeed need to be held on 8867 * to for the whole operation, and we need nobody to touch this reserved 8868 * space except the orphan code. 8869 * 8870 * So that leaves us with 8871 * 8872 * 1) root->orphan_block_rsv - for the orphan deletion. 8873 * 2) rsv - for the truncate reservation, which we will steal from the 8874 * transaction reservation. 8875 * 3) fs_info->trans_block_rsv - this will have 1 items worth left for 8876 * updating the inode. 8877 */ 8878 rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP); 8879 if (!rsv) 8880 return -ENOMEM; 8881 rsv->size = min_size; 8882 rsv->failfast = 1; 8883 8884 /* 8885 * 1 for the truncate slack space 8886 * 1 for updating the inode. 8887 */ 8888 trans = btrfs_start_transaction(root, 2); 8889 if (IS_ERR(trans)) { 8890 err = PTR_ERR(trans); 8891 goto out; 8892 } 8893 8894 /* Migrate the slack space for the truncate to our reserve */ 8895 ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, rsv, 8896 min_size); 8897 BUG_ON(ret); 8898 8899 /* 8900 * So if we truncate and then write and fsync we normally would just 8901 * write the extents that changed, which is a problem if we need to 8902 * first truncate that entire inode. So set this flag so we write out 8903 * all of the extents in the inode to the sync log so we're completely 8904 * safe. 8905 */ 8906 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags); 8907 trans->block_rsv = rsv; 8908 8909 while (1) { 8910 ret = btrfs_truncate_inode_items(trans, root, inode, 8911 inode->i_size, 8912 BTRFS_EXTENT_DATA_KEY); 8913 if (ret != -ENOSPC && ret != -EAGAIN) { 8914 err = ret; 8915 break; 8916 } 8917 8918 trans->block_rsv = &root->fs_info->trans_block_rsv; 8919 ret = btrfs_update_inode(trans, root, inode); 8920 if (ret) { 8921 err = ret; 8922 break; 8923 } 8924 8925 btrfs_end_transaction(trans, root); 8926 btrfs_btree_balance_dirty(root); 8927 8928 trans = btrfs_start_transaction(root, 2); 8929 if (IS_ERR(trans)) { 8930 ret = err = PTR_ERR(trans); 8931 trans = NULL; 8932 break; 8933 } 8934 8935 ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, 8936 rsv, min_size); 8937 BUG_ON(ret); /* shouldn't happen */ 8938 trans->block_rsv = rsv; 8939 } 8940 8941 if (ret == 0 && inode->i_nlink > 0) { 8942 trans->block_rsv = root->orphan_block_rsv; 8943 ret = btrfs_orphan_del(trans, inode); 8944 if (ret) 8945 err = ret; 8946 } 8947 8948 if (trans) { 8949 trans->block_rsv = &root->fs_info->trans_block_rsv; 8950 ret = btrfs_update_inode(trans, root, inode); 8951 if (ret && !err) 8952 err = ret; 8953 8954 ret = btrfs_end_transaction(trans, root); 8955 btrfs_btree_balance_dirty(root); 8956 } 8957 8958 out: 8959 btrfs_free_block_rsv(root, rsv); 8960 8961 if (ret && !err) 8962 err = ret; 8963 8964 return err; 8965 } 8966 8967 /* 8968 * create a new subvolume directory/inode (helper for the ioctl). 8969 */ 8970 int btrfs_create_subvol_root(struct btrfs_trans_handle *trans, 8971 struct btrfs_root *new_root, 8972 struct btrfs_root *parent_root, 8973 u64 new_dirid) 8974 { 8975 struct inode *inode; 8976 int err; 8977 u64 index = 0; 8978 8979 inode = btrfs_new_inode(trans, new_root, NULL, "..", 2, 8980 new_dirid, new_dirid, 8981 S_IFDIR | (~current_umask() & S_IRWXUGO), 8982 &index); 8983 if (IS_ERR(inode)) 8984 return PTR_ERR(inode); 8985 inode->i_op = &btrfs_dir_inode_operations; 8986 inode->i_fop = &btrfs_dir_file_operations; 8987 8988 set_nlink(inode, 1); 8989 btrfs_i_size_write(inode, 0); 8990 unlock_new_inode(inode); 8991 8992 err = btrfs_subvol_inherit_props(trans, new_root, parent_root); 8993 if (err) 8994 btrfs_err(new_root->fs_info, 8995 "error inheriting subvolume %llu properties: %d", 8996 new_root->root_key.objectid, err); 8997 8998 err = btrfs_update_inode(trans, new_root, inode); 8999 9000 iput(inode); 9001 return err; 9002 } 9003 9004 struct inode *btrfs_alloc_inode(struct super_block *sb) 9005 { 9006 struct btrfs_inode *ei; 9007 struct inode *inode; 9008 9009 ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS); 9010 if (!ei) 9011 return NULL; 9012 9013 ei->root = NULL; 9014 ei->generation = 0; 9015 ei->last_trans = 0; 9016 ei->last_sub_trans = 0; 9017 ei->logged_trans = 0; 9018 ei->delalloc_bytes = 0; 9019 ei->defrag_bytes = 0; 9020 ei->disk_i_size = 0; 9021 ei->flags = 0; 9022 ei->csum_bytes = 0; 9023 ei->index_cnt = (u64)-1; 9024 ei->dir_index = 0; 9025 ei->last_unlink_trans = 0; 9026 ei->last_log_commit = 0; 9027 9028 spin_lock_init(&ei->lock); 9029 ei->outstanding_extents = 0; 9030 ei->reserved_extents = 0; 9031 9032 ei->runtime_flags = 0; 9033 ei->force_compress = BTRFS_COMPRESS_NONE; 9034 9035 ei->delayed_node = NULL; 9036 9037 ei->i_otime.tv_sec = 0; 9038 ei->i_otime.tv_nsec = 0; 9039 9040 inode = &ei->vfs_inode; 9041 extent_map_tree_init(&ei->extent_tree); 9042 extent_io_tree_init(&ei->io_tree, &inode->i_data); 9043 extent_io_tree_init(&ei->io_failure_tree, &inode->i_data); 9044 ei->io_tree.track_uptodate = 1; 9045 ei->io_failure_tree.track_uptodate = 1; 9046 atomic_set(&ei->sync_writers, 0); 9047 mutex_init(&ei->log_mutex); 9048 mutex_init(&ei->delalloc_mutex); 9049 btrfs_ordered_inode_tree_init(&ei->ordered_tree); 9050 INIT_LIST_HEAD(&ei->delalloc_inodes); 9051 RB_CLEAR_NODE(&ei->rb_node); 9052 9053 return inode; 9054 } 9055 9056 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 9057 void btrfs_test_destroy_inode(struct inode *inode) 9058 { 9059 btrfs_drop_extent_cache(inode, 0, (u64)-1, 0); 9060 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode)); 9061 } 9062 #endif 9063 9064 static void btrfs_i_callback(struct rcu_head *head) 9065 { 9066 struct inode *inode = container_of(head, struct inode, i_rcu); 9067 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode)); 9068 } 9069 9070 void btrfs_destroy_inode(struct inode *inode) 9071 { 9072 struct btrfs_ordered_extent *ordered; 9073 struct btrfs_root *root = BTRFS_I(inode)->root; 9074 9075 WARN_ON(!hlist_empty(&inode->i_dentry)); 9076 WARN_ON(inode->i_data.nrpages); 9077 WARN_ON(BTRFS_I(inode)->outstanding_extents); 9078 WARN_ON(BTRFS_I(inode)->reserved_extents); 9079 WARN_ON(BTRFS_I(inode)->delalloc_bytes); 9080 WARN_ON(BTRFS_I(inode)->csum_bytes); 9081 WARN_ON(BTRFS_I(inode)->defrag_bytes); 9082 9083 /* 9084 * This can happen where we create an inode, but somebody else also 9085 * created the same inode and we need to destroy the one we already 9086 * created. 9087 */ 9088 if (!root) 9089 goto free; 9090 9091 if (test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM, 9092 &BTRFS_I(inode)->runtime_flags)) { 9093 btrfs_info(root->fs_info, "inode %llu still on the orphan list", 9094 btrfs_ino(inode)); 9095 atomic_dec(&root->orphan_inodes); 9096 } 9097 9098 while (1) { 9099 ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1); 9100 if (!ordered) 9101 break; 9102 else { 9103 btrfs_err(root->fs_info, "found ordered extent %llu %llu on inode cleanup", 9104 ordered->file_offset, ordered->len); 9105 btrfs_remove_ordered_extent(inode, ordered); 9106 btrfs_put_ordered_extent(ordered); 9107 btrfs_put_ordered_extent(ordered); 9108 } 9109 } 9110 btrfs_qgroup_check_reserved_leak(inode); 9111 inode_tree_del(inode); 9112 btrfs_drop_extent_cache(inode, 0, (u64)-1, 0); 9113 free: 9114 call_rcu(&inode->i_rcu, btrfs_i_callback); 9115 } 9116 9117 int btrfs_drop_inode(struct inode *inode) 9118 { 9119 struct btrfs_root *root = BTRFS_I(inode)->root; 9120 9121 if (root == NULL) 9122 return 1; 9123 9124 /* the snap/subvol tree is on deleting */ 9125 if (btrfs_root_refs(&root->root_item) == 0) 9126 return 1; 9127 else 9128 return generic_drop_inode(inode); 9129 } 9130 9131 static void init_once(void *foo) 9132 { 9133 struct btrfs_inode *ei = (struct btrfs_inode *) foo; 9134 9135 inode_init_once(&ei->vfs_inode); 9136 } 9137 9138 void btrfs_destroy_cachep(void) 9139 { 9140 /* 9141 * Make sure all delayed rcu free inodes are flushed before we 9142 * destroy cache. 9143 */ 9144 rcu_barrier(); 9145 if (btrfs_inode_cachep) 9146 kmem_cache_destroy(btrfs_inode_cachep); 9147 if (btrfs_trans_handle_cachep) 9148 kmem_cache_destroy(btrfs_trans_handle_cachep); 9149 if (btrfs_transaction_cachep) 9150 kmem_cache_destroy(btrfs_transaction_cachep); 9151 if (btrfs_path_cachep) 9152 kmem_cache_destroy(btrfs_path_cachep); 9153 if (btrfs_free_space_cachep) 9154 kmem_cache_destroy(btrfs_free_space_cachep); 9155 if (btrfs_delalloc_work_cachep) 9156 kmem_cache_destroy(btrfs_delalloc_work_cachep); 9157 } 9158 9159 int btrfs_init_cachep(void) 9160 { 9161 btrfs_inode_cachep = kmem_cache_create("btrfs_inode", 9162 sizeof(struct btrfs_inode), 0, 9163 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, init_once); 9164 if (!btrfs_inode_cachep) 9165 goto fail; 9166 9167 btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle", 9168 sizeof(struct btrfs_trans_handle), 0, 9169 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); 9170 if (!btrfs_trans_handle_cachep) 9171 goto fail; 9172 9173 btrfs_transaction_cachep = kmem_cache_create("btrfs_transaction", 9174 sizeof(struct btrfs_transaction), 0, 9175 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); 9176 if (!btrfs_transaction_cachep) 9177 goto fail; 9178 9179 btrfs_path_cachep = kmem_cache_create("btrfs_path", 9180 sizeof(struct btrfs_path), 0, 9181 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); 9182 if (!btrfs_path_cachep) 9183 goto fail; 9184 9185 btrfs_free_space_cachep = kmem_cache_create("btrfs_free_space", 9186 sizeof(struct btrfs_free_space), 0, 9187 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); 9188 if (!btrfs_free_space_cachep) 9189 goto fail; 9190 9191 btrfs_delalloc_work_cachep = kmem_cache_create("btrfs_delalloc_work", 9192 sizeof(struct btrfs_delalloc_work), 0, 9193 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, 9194 NULL); 9195 if (!btrfs_delalloc_work_cachep) 9196 goto fail; 9197 9198 return 0; 9199 fail: 9200 btrfs_destroy_cachep(); 9201 return -ENOMEM; 9202 } 9203 9204 static int btrfs_getattr(struct vfsmount *mnt, 9205 struct dentry *dentry, struct kstat *stat) 9206 { 9207 u64 delalloc_bytes; 9208 struct inode *inode = d_inode(dentry); 9209 u32 blocksize = inode->i_sb->s_blocksize; 9210 9211 generic_fillattr(inode, stat); 9212 stat->dev = BTRFS_I(inode)->root->anon_dev; 9213 stat->blksize = PAGE_CACHE_SIZE; 9214 9215 spin_lock(&BTRFS_I(inode)->lock); 9216 delalloc_bytes = BTRFS_I(inode)->delalloc_bytes; 9217 spin_unlock(&BTRFS_I(inode)->lock); 9218 stat->blocks = (ALIGN(inode_get_bytes(inode), blocksize) + 9219 ALIGN(delalloc_bytes, blocksize)) >> 9; 9220 return 0; 9221 } 9222 9223 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, 9224 struct inode *new_dir, struct dentry *new_dentry) 9225 { 9226 struct btrfs_trans_handle *trans; 9227 struct btrfs_root *root = BTRFS_I(old_dir)->root; 9228 struct btrfs_root *dest = BTRFS_I(new_dir)->root; 9229 struct inode *new_inode = d_inode(new_dentry); 9230 struct inode *old_inode = d_inode(old_dentry); 9231 struct timespec ctime = CURRENT_TIME; 9232 u64 index = 0; 9233 u64 root_objectid; 9234 int ret; 9235 u64 old_ino = btrfs_ino(old_inode); 9236 9237 if (btrfs_ino(new_dir) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) 9238 return -EPERM; 9239 9240 /* we only allow rename subvolume link between subvolumes */ 9241 if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest) 9242 return -EXDEV; 9243 9244 if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID || 9245 (new_inode && btrfs_ino(new_inode) == BTRFS_FIRST_FREE_OBJECTID)) 9246 return -ENOTEMPTY; 9247 9248 if (S_ISDIR(old_inode->i_mode) && new_inode && 9249 new_inode->i_size > BTRFS_EMPTY_DIR_SIZE) 9250 return -ENOTEMPTY; 9251 9252 9253 /* check for collisions, even if the name isn't there */ 9254 ret = btrfs_check_dir_item_collision(dest, new_dir->i_ino, 9255 new_dentry->d_name.name, 9256 new_dentry->d_name.len); 9257 9258 if (ret) { 9259 if (ret == -EEXIST) { 9260 /* we shouldn't get 9261 * eexist without a new_inode */ 9262 if (WARN_ON(!new_inode)) { 9263 return ret; 9264 } 9265 } else { 9266 /* maybe -EOVERFLOW */ 9267 return ret; 9268 } 9269 } 9270 ret = 0; 9271 9272 /* 9273 * we're using rename to replace one file with another. Start IO on it 9274 * now so we don't add too much work to the end of the transaction 9275 */ 9276 if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size) 9277 filemap_flush(old_inode->i_mapping); 9278 9279 /* close the racy window with snapshot create/destroy ioctl */ 9280 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) 9281 down_read(&root->fs_info->subvol_sem); 9282 /* 9283 * We want to reserve the absolute worst case amount of items. So if 9284 * both inodes are subvols and we need to unlink them then that would 9285 * require 4 item modifications, but if they are both normal inodes it 9286 * would require 5 item modifications, so we'll assume their normal 9287 * inodes. So 5 * 2 is 10, plus 1 for the new link, so 11 total items 9288 * should cover the worst case number of items we'll modify. 9289 */ 9290 trans = btrfs_start_transaction(root, 11); 9291 if (IS_ERR(trans)) { 9292 ret = PTR_ERR(trans); 9293 goto out_notrans; 9294 } 9295 9296 if (dest != root) 9297 btrfs_record_root_in_trans(trans, dest); 9298 9299 ret = btrfs_set_inode_index(new_dir, &index); 9300 if (ret) 9301 goto out_fail; 9302 9303 BTRFS_I(old_inode)->dir_index = 0ULL; 9304 if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) { 9305 /* force full log commit if subvolume involved. */ 9306 btrfs_set_log_full_commit(root->fs_info, trans); 9307 } else { 9308 ret = btrfs_insert_inode_ref(trans, dest, 9309 new_dentry->d_name.name, 9310 new_dentry->d_name.len, 9311 old_ino, 9312 btrfs_ino(new_dir), index); 9313 if (ret) 9314 goto out_fail; 9315 /* 9316 * this is an ugly little race, but the rename is required 9317 * to make sure that if we crash, the inode is either at the 9318 * old name or the new one. pinning the log transaction lets 9319 * us make sure we don't allow a log commit to come in after 9320 * we unlink the name but before we add the new name back in. 9321 */ 9322 btrfs_pin_log_trans(root); 9323 } 9324 9325 inode_inc_iversion(old_dir); 9326 inode_inc_iversion(new_dir); 9327 inode_inc_iversion(old_inode); 9328 old_dir->i_ctime = old_dir->i_mtime = ctime; 9329 new_dir->i_ctime = new_dir->i_mtime = ctime; 9330 old_inode->i_ctime = ctime; 9331 9332 if (old_dentry->d_parent != new_dentry->d_parent) 9333 btrfs_record_unlink_dir(trans, old_dir, old_inode, 1); 9334 9335 if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) { 9336 root_objectid = BTRFS_I(old_inode)->root->root_key.objectid; 9337 ret = btrfs_unlink_subvol(trans, root, old_dir, root_objectid, 9338 old_dentry->d_name.name, 9339 old_dentry->d_name.len); 9340 } else { 9341 ret = __btrfs_unlink_inode(trans, root, old_dir, 9342 d_inode(old_dentry), 9343 old_dentry->d_name.name, 9344 old_dentry->d_name.len); 9345 if (!ret) 9346 ret = btrfs_update_inode(trans, root, old_inode); 9347 } 9348 if (ret) { 9349 btrfs_abort_transaction(trans, root, ret); 9350 goto out_fail; 9351 } 9352 9353 if (new_inode) { 9354 inode_inc_iversion(new_inode); 9355 new_inode->i_ctime = CURRENT_TIME; 9356 if (unlikely(btrfs_ino(new_inode) == 9357 BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { 9358 root_objectid = BTRFS_I(new_inode)->location.objectid; 9359 ret = btrfs_unlink_subvol(trans, dest, new_dir, 9360 root_objectid, 9361 new_dentry->d_name.name, 9362 new_dentry->d_name.len); 9363 BUG_ON(new_inode->i_nlink == 0); 9364 } else { 9365 ret = btrfs_unlink_inode(trans, dest, new_dir, 9366 d_inode(new_dentry), 9367 new_dentry->d_name.name, 9368 new_dentry->d_name.len); 9369 } 9370 if (!ret && new_inode->i_nlink == 0) 9371 ret = btrfs_orphan_add(trans, d_inode(new_dentry)); 9372 if (ret) { 9373 btrfs_abort_transaction(trans, root, ret); 9374 goto out_fail; 9375 } 9376 } 9377 9378 ret = btrfs_add_link(trans, new_dir, old_inode, 9379 new_dentry->d_name.name, 9380 new_dentry->d_name.len, 0, index); 9381 if (ret) { 9382 btrfs_abort_transaction(trans, root, ret); 9383 goto out_fail; 9384 } 9385 9386 if (old_inode->i_nlink == 1) 9387 BTRFS_I(old_inode)->dir_index = index; 9388 9389 if (old_ino != BTRFS_FIRST_FREE_OBJECTID) { 9390 struct dentry *parent = new_dentry->d_parent; 9391 btrfs_log_new_name(trans, old_inode, old_dir, parent); 9392 btrfs_end_log_trans(root); 9393 } 9394 out_fail: 9395 btrfs_end_transaction(trans, root); 9396 out_notrans: 9397 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) 9398 up_read(&root->fs_info->subvol_sem); 9399 9400 return ret; 9401 } 9402 9403 static int btrfs_rename2(struct inode *old_dir, struct dentry *old_dentry, 9404 struct inode *new_dir, struct dentry *new_dentry, 9405 unsigned int flags) 9406 { 9407 if (flags & ~RENAME_NOREPLACE) 9408 return -EINVAL; 9409 9410 return btrfs_rename(old_dir, old_dentry, new_dir, new_dentry); 9411 } 9412 9413 static void btrfs_run_delalloc_work(struct btrfs_work *work) 9414 { 9415 struct btrfs_delalloc_work *delalloc_work; 9416 struct inode *inode; 9417 9418 delalloc_work = container_of(work, struct btrfs_delalloc_work, 9419 work); 9420 inode = delalloc_work->inode; 9421 if (delalloc_work->wait) { 9422 btrfs_wait_ordered_range(inode, 0, (u64)-1); 9423 } else { 9424 filemap_flush(inode->i_mapping); 9425 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, 9426 &BTRFS_I(inode)->runtime_flags)) 9427 filemap_flush(inode->i_mapping); 9428 } 9429 9430 if (delalloc_work->delay_iput) 9431 btrfs_add_delayed_iput(inode); 9432 else 9433 iput(inode); 9434 complete(&delalloc_work->completion); 9435 } 9436 9437 struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode, 9438 int wait, int delay_iput) 9439 { 9440 struct btrfs_delalloc_work *work; 9441 9442 work = kmem_cache_zalloc(btrfs_delalloc_work_cachep, GFP_NOFS); 9443 if (!work) 9444 return NULL; 9445 9446 init_completion(&work->completion); 9447 INIT_LIST_HEAD(&work->list); 9448 work->inode = inode; 9449 work->wait = wait; 9450 work->delay_iput = delay_iput; 9451 WARN_ON_ONCE(!inode); 9452 btrfs_init_work(&work->work, btrfs_flush_delalloc_helper, 9453 btrfs_run_delalloc_work, NULL, NULL); 9454 9455 return work; 9456 } 9457 9458 void btrfs_wait_and_free_delalloc_work(struct btrfs_delalloc_work *work) 9459 { 9460 wait_for_completion(&work->completion); 9461 kmem_cache_free(btrfs_delalloc_work_cachep, work); 9462 } 9463 9464 /* 9465 * some fairly slow code that needs optimization. This walks the list 9466 * of all the inodes with pending delalloc and forces them to disk. 9467 */ 9468 static int __start_delalloc_inodes(struct btrfs_root *root, int delay_iput, 9469 int nr) 9470 { 9471 struct btrfs_inode *binode; 9472 struct inode *inode; 9473 struct btrfs_delalloc_work *work, *next; 9474 struct list_head works; 9475 struct list_head splice; 9476 int ret = 0; 9477 9478 INIT_LIST_HEAD(&works); 9479 INIT_LIST_HEAD(&splice); 9480 9481 mutex_lock(&root->delalloc_mutex); 9482 spin_lock(&root->delalloc_lock); 9483 list_splice_init(&root->delalloc_inodes, &splice); 9484 while (!list_empty(&splice)) { 9485 binode = list_entry(splice.next, struct btrfs_inode, 9486 delalloc_inodes); 9487 9488 list_move_tail(&binode->delalloc_inodes, 9489 &root->delalloc_inodes); 9490 inode = igrab(&binode->vfs_inode); 9491 if (!inode) { 9492 cond_resched_lock(&root->delalloc_lock); 9493 continue; 9494 } 9495 spin_unlock(&root->delalloc_lock); 9496 9497 work = btrfs_alloc_delalloc_work(inode, 0, delay_iput); 9498 if (!work) { 9499 if (delay_iput) 9500 btrfs_add_delayed_iput(inode); 9501 else 9502 iput(inode); 9503 ret = -ENOMEM; 9504 goto out; 9505 } 9506 list_add_tail(&work->list, &works); 9507 btrfs_queue_work(root->fs_info->flush_workers, 9508 &work->work); 9509 ret++; 9510 if (nr != -1 && ret >= nr) 9511 goto out; 9512 cond_resched(); 9513 spin_lock(&root->delalloc_lock); 9514 } 9515 spin_unlock(&root->delalloc_lock); 9516 9517 out: 9518 list_for_each_entry_safe(work, next, &works, list) { 9519 list_del_init(&work->list); 9520 btrfs_wait_and_free_delalloc_work(work); 9521 } 9522 9523 if (!list_empty_careful(&splice)) { 9524 spin_lock(&root->delalloc_lock); 9525 list_splice_tail(&splice, &root->delalloc_inodes); 9526 spin_unlock(&root->delalloc_lock); 9527 } 9528 mutex_unlock(&root->delalloc_mutex); 9529 return ret; 9530 } 9531 9532 int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput) 9533 { 9534 int ret; 9535 9536 if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) 9537 return -EROFS; 9538 9539 ret = __start_delalloc_inodes(root, delay_iput, -1); 9540 if (ret > 0) 9541 ret = 0; 9542 /* 9543 * the filemap_flush will queue IO into the worker threads, but 9544 * we have to make sure the IO is actually started and that 9545 * ordered extents get created before we return 9546 */ 9547 atomic_inc(&root->fs_info->async_submit_draining); 9548 while (atomic_read(&root->fs_info->nr_async_submits) || 9549 atomic_read(&root->fs_info->async_delalloc_pages)) { 9550 wait_event(root->fs_info->async_submit_wait, 9551 (atomic_read(&root->fs_info->nr_async_submits) == 0 && 9552 atomic_read(&root->fs_info->async_delalloc_pages) == 0)); 9553 } 9554 atomic_dec(&root->fs_info->async_submit_draining); 9555 return ret; 9556 } 9557 9558 int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, int delay_iput, 9559 int nr) 9560 { 9561 struct btrfs_root *root; 9562 struct list_head splice; 9563 int ret; 9564 9565 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) 9566 return -EROFS; 9567 9568 INIT_LIST_HEAD(&splice); 9569 9570 mutex_lock(&fs_info->delalloc_root_mutex); 9571 spin_lock(&fs_info->delalloc_root_lock); 9572 list_splice_init(&fs_info->delalloc_roots, &splice); 9573 while (!list_empty(&splice) && nr) { 9574 root = list_first_entry(&splice, struct btrfs_root, 9575 delalloc_root); 9576 root = btrfs_grab_fs_root(root); 9577 BUG_ON(!root); 9578 list_move_tail(&root->delalloc_root, 9579 &fs_info->delalloc_roots); 9580 spin_unlock(&fs_info->delalloc_root_lock); 9581 9582 ret = __start_delalloc_inodes(root, delay_iput, nr); 9583 btrfs_put_fs_root(root); 9584 if (ret < 0) 9585 goto out; 9586 9587 if (nr != -1) { 9588 nr -= ret; 9589 WARN_ON(nr < 0); 9590 } 9591 spin_lock(&fs_info->delalloc_root_lock); 9592 } 9593 spin_unlock(&fs_info->delalloc_root_lock); 9594 9595 ret = 0; 9596 atomic_inc(&fs_info->async_submit_draining); 9597 while (atomic_read(&fs_info->nr_async_submits) || 9598 atomic_read(&fs_info->async_delalloc_pages)) { 9599 wait_event(fs_info->async_submit_wait, 9600 (atomic_read(&fs_info->nr_async_submits) == 0 && 9601 atomic_read(&fs_info->async_delalloc_pages) == 0)); 9602 } 9603 atomic_dec(&fs_info->async_submit_draining); 9604 out: 9605 if (!list_empty_careful(&splice)) { 9606 spin_lock(&fs_info->delalloc_root_lock); 9607 list_splice_tail(&splice, &fs_info->delalloc_roots); 9608 spin_unlock(&fs_info->delalloc_root_lock); 9609 } 9610 mutex_unlock(&fs_info->delalloc_root_mutex); 9611 return ret; 9612 } 9613 9614 static int btrfs_symlink(struct inode *dir, struct dentry *dentry, 9615 const char *symname) 9616 { 9617 struct btrfs_trans_handle *trans; 9618 struct btrfs_root *root = BTRFS_I(dir)->root; 9619 struct btrfs_path *path; 9620 struct btrfs_key key; 9621 struct inode *inode = NULL; 9622 int err; 9623 int drop_inode = 0; 9624 u64 objectid; 9625 u64 index = 0; 9626 int name_len; 9627 int datasize; 9628 unsigned long ptr; 9629 struct btrfs_file_extent_item *ei; 9630 struct extent_buffer *leaf; 9631 9632 name_len = strlen(symname); 9633 if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root)) 9634 return -ENAMETOOLONG; 9635 9636 /* 9637 * 2 items for inode item and ref 9638 * 2 items for dir items 9639 * 1 item for xattr if selinux is on 9640 */ 9641 trans = btrfs_start_transaction(root, 5); 9642 if (IS_ERR(trans)) 9643 return PTR_ERR(trans); 9644 9645 err = btrfs_find_free_ino(root, &objectid); 9646 if (err) 9647 goto out_unlock; 9648 9649 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, 9650 dentry->d_name.len, btrfs_ino(dir), objectid, 9651 S_IFLNK|S_IRWXUGO, &index); 9652 if (IS_ERR(inode)) { 9653 err = PTR_ERR(inode); 9654 goto out_unlock; 9655 } 9656 9657 /* 9658 * If the active LSM wants to access the inode during 9659 * d_instantiate it needs these. Smack checks to see 9660 * if the filesystem supports xattrs by looking at the 9661 * ops vector. 9662 */ 9663 inode->i_fop = &btrfs_file_operations; 9664 inode->i_op = &btrfs_file_inode_operations; 9665 inode->i_mapping->a_ops = &btrfs_aops; 9666 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; 9667 9668 err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); 9669 if (err) 9670 goto out_unlock_inode; 9671 9672 err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index); 9673 if (err) 9674 goto out_unlock_inode; 9675 9676 path = btrfs_alloc_path(); 9677 if (!path) { 9678 err = -ENOMEM; 9679 goto out_unlock_inode; 9680 } 9681 key.objectid = btrfs_ino(inode); 9682 key.offset = 0; 9683 key.type = BTRFS_EXTENT_DATA_KEY; 9684 datasize = btrfs_file_extent_calc_inline_size(name_len); 9685 err = btrfs_insert_empty_item(trans, root, path, &key, 9686 datasize); 9687 if (err) { 9688 btrfs_free_path(path); 9689 goto out_unlock_inode; 9690 } 9691 leaf = path->nodes[0]; 9692 ei = btrfs_item_ptr(leaf, path->slots[0], 9693 struct btrfs_file_extent_item); 9694 btrfs_set_file_extent_generation(leaf, ei, trans->transid); 9695 btrfs_set_file_extent_type(leaf, ei, 9696 BTRFS_FILE_EXTENT_INLINE); 9697 btrfs_set_file_extent_encryption(leaf, ei, 0); 9698 btrfs_set_file_extent_compression(leaf, ei, 0); 9699 btrfs_set_file_extent_other_encoding(leaf, ei, 0); 9700 btrfs_set_file_extent_ram_bytes(leaf, ei, name_len); 9701 9702 ptr = btrfs_file_extent_inline_start(ei); 9703 write_extent_buffer(leaf, symname, ptr, name_len); 9704 btrfs_mark_buffer_dirty(leaf); 9705 btrfs_free_path(path); 9706 9707 inode->i_op = &btrfs_symlink_inode_operations; 9708 inode->i_mapping->a_ops = &btrfs_symlink_aops; 9709 inode_set_bytes(inode, name_len); 9710 btrfs_i_size_write(inode, name_len); 9711 err = btrfs_update_inode(trans, root, inode); 9712 if (err) { 9713 drop_inode = 1; 9714 goto out_unlock_inode; 9715 } 9716 9717 unlock_new_inode(inode); 9718 d_instantiate(dentry, inode); 9719 9720 out_unlock: 9721 btrfs_end_transaction(trans, root); 9722 if (drop_inode) { 9723 inode_dec_link_count(inode); 9724 iput(inode); 9725 } 9726 btrfs_btree_balance_dirty(root); 9727 return err; 9728 9729 out_unlock_inode: 9730 drop_inode = 1; 9731 unlock_new_inode(inode); 9732 goto out_unlock; 9733 } 9734 9735 static int __btrfs_prealloc_file_range(struct inode *inode, int mode, 9736 u64 start, u64 num_bytes, u64 min_size, 9737 loff_t actual_len, u64 *alloc_hint, 9738 struct btrfs_trans_handle *trans) 9739 { 9740 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 9741 struct extent_map *em; 9742 struct btrfs_root *root = BTRFS_I(inode)->root; 9743 struct btrfs_key ins; 9744 u64 cur_offset = start; 9745 u64 i_size; 9746 u64 cur_bytes; 9747 u64 last_alloc = (u64)-1; 9748 int ret = 0; 9749 bool own_trans = true; 9750 9751 if (trans) 9752 own_trans = false; 9753 while (num_bytes > 0) { 9754 if (own_trans) { 9755 trans = btrfs_start_transaction(root, 3); 9756 if (IS_ERR(trans)) { 9757 ret = PTR_ERR(trans); 9758 break; 9759 } 9760 } 9761 9762 cur_bytes = min(num_bytes, 256ULL * 1024 * 1024); 9763 cur_bytes = max(cur_bytes, min_size); 9764 /* 9765 * If we are severely fragmented we could end up with really 9766 * small allocations, so if the allocator is returning small 9767 * chunks lets make its job easier by only searching for those 9768 * sized chunks. 9769 */ 9770 cur_bytes = min(cur_bytes, last_alloc); 9771 ret = btrfs_reserve_extent(root, cur_bytes, min_size, 0, 9772 *alloc_hint, &ins, 1, 0); 9773 if (ret) { 9774 if (own_trans) 9775 btrfs_end_transaction(trans, root); 9776 break; 9777 } 9778 9779 last_alloc = ins.offset; 9780 ret = insert_reserved_file_extent(trans, inode, 9781 cur_offset, ins.objectid, 9782 ins.offset, ins.offset, 9783 ins.offset, 0, 0, 0, 9784 BTRFS_FILE_EXTENT_PREALLOC); 9785 if (ret) { 9786 btrfs_free_reserved_extent(root, ins.objectid, 9787 ins.offset, 0); 9788 btrfs_abort_transaction(trans, root, ret); 9789 if (own_trans) 9790 btrfs_end_transaction(trans, root); 9791 break; 9792 } 9793 9794 btrfs_drop_extent_cache(inode, cur_offset, 9795 cur_offset + ins.offset -1, 0); 9796 9797 em = alloc_extent_map(); 9798 if (!em) { 9799 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 9800 &BTRFS_I(inode)->runtime_flags); 9801 goto next; 9802 } 9803 9804 em->start = cur_offset; 9805 em->orig_start = cur_offset; 9806 em->len = ins.offset; 9807 em->block_start = ins.objectid; 9808 em->block_len = ins.offset; 9809 em->orig_block_len = ins.offset; 9810 em->ram_bytes = ins.offset; 9811 em->bdev = root->fs_info->fs_devices->latest_bdev; 9812 set_bit(EXTENT_FLAG_PREALLOC, &em->flags); 9813 em->generation = trans->transid; 9814 9815 while (1) { 9816 write_lock(&em_tree->lock); 9817 ret = add_extent_mapping(em_tree, em, 1); 9818 write_unlock(&em_tree->lock); 9819 if (ret != -EEXIST) 9820 break; 9821 btrfs_drop_extent_cache(inode, cur_offset, 9822 cur_offset + ins.offset - 1, 9823 0); 9824 } 9825 free_extent_map(em); 9826 next: 9827 num_bytes -= ins.offset; 9828 cur_offset += ins.offset; 9829 *alloc_hint = ins.objectid + ins.offset; 9830 9831 inode_inc_iversion(inode); 9832 inode->i_ctime = CURRENT_TIME; 9833 BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC; 9834 if (!(mode & FALLOC_FL_KEEP_SIZE) && 9835 (actual_len > inode->i_size) && 9836 (cur_offset > inode->i_size)) { 9837 if (cur_offset > actual_len) 9838 i_size = actual_len; 9839 else 9840 i_size = cur_offset; 9841 i_size_write(inode, i_size); 9842 btrfs_ordered_update_i_size(inode, i_size, NULL); 9843 } 9844 9845 ret = btrfs_update_inode(trans, root, inode); 9846 9847 if (ret) { 9848 btrfs_abort_transaction(trans, root, ret); 9849 if (own_trans) 9850 btrfs_end_transaction(trans, root); 9851 break; 9852 } 9853 9854 if (own_trans) 9855 btrfs_end_transaction(trans, root); 9856 } 9857 return ret; 9858 } 9859 9860 int btrfs_prealloc_file_range(struct inode *inode, int mode, 9861 u64 start, u64 num_bytes, u64 min_size, 9862 loff_t actual_len, u64 *alloc_hint) 9863 { 9864 return __btrfs_prealloc_file_range(inode, mode, start, num_bytes, 9865 min_size, actual_len, alloc_hint, 9866 NULL); 9867 } 9868 9869 int btrfs_prealloc_file_range_trans(struct inode *inode, 9870 struct btrfs_trans_handle *trans, int mode, 9871 u64 start, u64 num_bytes, u64 min_size, 9872 loff_t actual_len, u64 *alloc_hint) 9873 { 9874 return __btrfs_prealloc_file_range(inode, mode, start, num_bytes, 9875 min_size, actual_len, alloc_hint, trans); 9876 } 9877 9878 static int btrfs_set_page_dirty(struct page *page) 9879 { 9880 return __set_page_dirty_nobuffers(page); 9881 } 9882 9883 static int btrfs_permission(struct inode *inode, int mask) 9884 { 9885 struct btrfs_root *root = BTRFS_I(inode)->root; 9886 umode_t mode = inode->i_mode; 9887 9888 if (mask & MAY_WRITE && 9889 (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) { 9890 if (btrfs_root_readonly(root)) 9891 return -EROFS; 9892 if (BTRFS_I(inode)->flags & BTRFS_INODE_READONLY) 9893 return -EACCES; 9894 } 9895 return generic_permission(inode, mask); 9896 } 9897 9898 static int btrfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode) 9899 { 9900 struct btrfs_trans_handle *trans; 9901 struct btrfs_root *root = BTRFS_I(dir)->root; 9902 struct inode *inode = NULL; 9903 u64 objectid; 9904 u64 index; 9905 int ret = 0; 9906 9907 /* 9908 * 5 units required for adding orphan entry 9909 */ 9910 trans = btrfs_start_transaction(root, 5); 9911 if (IS_ERR(trans)) 9912 return PTR_ERR(trans); 9913 9914 ret = btrfs_find_free_ino(root, &objectid); 9915 if (ret) 9916 goto out; 9917 9918 inode = btrfs_new_inode(trans, root, dir, NULL, 0, 9919 btrfs_ino(dir), objectid, mode, &index); 9920 if (IS_ERR(inode)) { 9921 ret = PTR_ERR(inode); 9922 inode = NULL; 9923 goto out; 9924 } 9925 9926 inode->i_fop = &btrfs_file_operations; 9927 inode->i_op = &btrfs_file_inode_operations; 9928 9929 inode->i_mapping->a_ops = &btrfs_aops; 9930 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; 9931 9932 ret = btrfs_init_inode_security(trans, inode, dir, NULL); 9933 if (ret) 9934 goto out_inode; 9935 9936 ret = btrfs_update_inode(trans, root, inode); 9937 if (ret) 9938 goto out_inode; 9939 ret = btrfs_orphan_add(trans, inode); 9940 if (ret) 9941 goto out_inode; 9942 9943 /* 9944 * We set number of links to 0 in btrfs_new_inode(), and here we set 9945 * it to 1 because d_tmpfile() will issue a warning if the count is 0, 9946 * through: 9947 * 9948 * d_tmpfile() -> inode_dec_link_count() -> drop_nlink() 9949 */ 9950 set_nlink(inode, 1); 9951 unlock_new_inode(inode); 9952 d_tmpfile(dentry, inode); 9953 mark_inode_dirty(inode); 9954 9955 out: 9956 btrfs_end_transaction(trans, root); 9957 if (ret) 9958 iput(inode); 9959 btrfs_balance_delayed_items(root); 9960 btrfs_btree_balance_dirty(root); 9961 return ret; 9962 9963 out_inode: 9964 unlock_new_inode(inode); 9965 goto out; 9966 9967 } 9968 9969 /* Inspired by filemap_check_errors() */ 9970 int btrfs_inode_check_errors(struct inode *inode) 9971 { 9972 int ret = 0; 9973 9974 if (test_bit(AS_ENOSPC, &inode->i_mapping->flags) && 9975 test_and_clear_bit(AS_ENOSPC, &inode->i_mapping->flags)) 9976 ret = -ENOSPC; 9977 if (test_bit(AS_EIO, &inode->i_mapping->flags) && 9978 test_and_clear_bit(AS_EIO, &inode->i_mapping->flags)) 9979 ret = -EIO; 9980 9981 return ret; 9982 } 9983 9984 static const struct inode_operations btrfs_dir_inode_operations = { 9985 .getattr = btrfs_getattr, 9986 .lookup = btrfs_lookup, 9987 .create = btrfs_create, 9988 .unlink = btrfs_unlink, 9989 .link = btrfs_link, 9990 .mkdir = btrfs_mkdir, 9991 .rmdir = btrfs_rmdir, 9992 .rename2 = btrfs_rename2, 9993 .symlink = btrfs_symlink, 9994 .setattr = btrfs_setattr, 9995 .mknod = btrfs_mknod, 9996 .setxattr = btrfs_setxattr, 9997 .getxattr = btrfs_getxattr, 9998 .listxattr = btrfs_listxattr, 9999 .removexattr = btrfs_removexattr, 10000 .permission = btrfs_permission, 10001 .get_acl = btrfs_get_acl, 10002 .set_acl = btrfs_set_acl, 10003 .update_time = btrfs_update_time, 10004 .tmpfile = btrfs_tmpfile, 10005 }; 10006 static const struct inode_operations btrfs_dir_ro_inode_operations = { 10007 .lookup = btrfs_lookup, 10008 .permission = btrfs_permission, 10009 .get_acl = btrfs_get_acl, 10010 .set_acl = btrfs_set_acl, 10011 .update_time = btrfs_update_time, 10012 }; 10013 10014 static const struct file_operations btrfs_dir_file_operations = { 10015 .llseek = generic_file_llseek, 10016 .read = generic_read_dir, 10017 .iterate = btrfs_real_readdir, 10018 .unlocked_ioctl = btrfs_ioctl, 10019 #ifdef CONFIG_COMPAT 10020 .compat_ioctl = btrfs_ioctl, 10021 #endif 10022 .release = btrfs_release_file, 10023 .fsync = btrfs_sync_file, 10024 }; 10025 10026 static struct extent_io_ops btrfs_extent_io_ops = { 10027 .fill_delalloc = run_delalloc_range, 10028 .submit_bio_hook = btrfs_submit_bio_hook, 10029 .merge_bio_hook = btrfs_merge_bio_hook, 10030 .readpage_end_io_hook = btrfs_readpage_end_io_hook, 10031 .writepage_end_io_hook = btrfs_writepage_end_io_hook, 10032 .writepage_start_hook = btrfs_writepage_start_hook, 10033 .set_bit_hook = btrfs_set_bit_hook, 10034 .clear_bit_hook = btrfs_clear_bit_hook, 10035 .merge_extent_hook = btrfs_merge_extent_hook, 10036 .split_extent_hook = btrfs_split_extent_hook, 10037 }; 10038 10039 /* 10040 * btrfs doesn't support the bmap operation because swapfiles 10041 * use bmap to make a mapping of extents in the file. They assume 10042 * these extents won't change over the life of the file and they 10043 * use the bmap result to do IO directly to the drive. 10044 * 10045 * the btrfs bmap call would return logical addresses that aren't 10046 * suitable for IO and they also will change frequently as COW 10047 * operations happen. So, swapfile + btrfs == corruption. 10048 * 10049 * For now we're avoiding this by dropping bmap. 10050 */ 10051 static const struct address_space_operations btrfs_aops = { 10052 .readpage = btrfs_readpage, 10053 .writepage = btrfs_writepage, 10054 .writepages = btrfs_writepages, 10055 .readpages = btrfs_readpages, 10056 .direct_IO = btrfs_direct_IO, 10057 .invalidatepage = btrfs_invalidatepage, 10058 .releasepage = btrfs_releasepage, 10059 .set_page_dirty = btrfs_set_page_dirty, 10060 .error_remove_page = generic_error_remove_page, 10061 }; 10062 10063 static const struct address_space_operations btrfs_symlink_aops = { 10064 .readpage = btrfs_readpage, 10065 .writepage = btrfs_writepage, 10066 .invalidatepage = btrfs_invalidatepage, 10067 .releasepage = btrfs_releasepage, 10068 }; 10069 10070 static const struct inode_operations btrfs_file_inode_operations = { 10071 .getattr = btrfs_getattr, 10072 .setattr = btrfs_setattr, 10073 .setxattr = btrfs_setxattr, 10074 .getxattr = btrfs_getxattr, 10075 .listxattr = btrfs_listxattr, 10076 .removexattr = btrfs_removexattr, 10077 .permission = btrfs_permission, 10078 .fiemap = btrfs_fiemap, 10079 .get_acl = btrfs_get_acl, 10080 .set_acl = btrfs_set_acl, 10081 .update_time = btrfs_update_time, 10082 }; 10083 static const struct inode_operations btrfs_special_inode_operations = { 10084 .getattr = btrfs_getattr, 10085 .setattr = btrfs_setattr, 10086 .permission = btrfs_permission, 10087 .setxattr = btrfs_setxattr, 10088 .getxattr = btrfs_getxattr, 10089 .listxattr = btrfs_listxattr, 10090 .removexattr = btrfs_removexattr, 10091 .get_acl = btrfs_get_acl, 10092 .set_acl = btrfs_set_acl, 10093 .update_time = btrfs_update_time, 10094 }; 10095 static const struct inode_operations btrfs_symlink_inode_operations = { 10096 .readlink = generic_readlink, 10097 .follow_link = page_follow_link_light, 10098 .put_link = page_put_link, 10099 .getattr = btrfs_getattr, 10100 .setattr = btrfs_setattr, 10101 .permission = btrfs_permission, 10102 .setxattr = btrfs_setxattr, 10103 .getxattr = btrfs_getxattr, 10104 .listxattr = btrfs_listxattr, 10105 .removexattr = btrfs_removexattr, 10106 .update_time = btrfs_update_time, 10107 }; 10108 10109 const struct dentry_operations btrfs_dentry_operations = { 10110 .d_delete = btrfs_dentry_delete, 10111 .d_release = btrfs_dentry_release, 10112 }; 10113