1 /* 2 * Copyright (C) 2007 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #include <linux/kernel.h> 20 #include <linux/bio.h> 21 #include <linux/buffer_head.h> 22 #include <linux/file.h> 23 #include <linux/fs.h> 24 #include <linux/pagemap.h> 25 #include <linux/highmem.h> 26 #include <linux/time.h> 27 #include <linux/init.h> 28 #include <linux/string.h> 29 #include <linux/backing-dev.h> 30 #include <linux/mpage.h> 31 #include <linux/swap.h> 32 #include <linux/writeback.h> 33 #include <linux/statfs.h> 34 #include <linux/compat.h> 35 #include <linux/bit_spinlock.h> 36 #include <linux/xattr.h> 37 #include <linux/posix_acl.h> 38 #include <linux/falloc.h> 39 #include <linux/slab.h> 40 #include <linux/ratelimit.h> 41 #include <linux/mount.h> 42 #include "compat.h" 43 #include "ctree.h" 44 #include "disk-io.h" 45 #include "transaction.h" 46 #include "btrfs_inode.h" 47 #include "ioctl.h" 48 #include "print-tree.h" 49 #include "ordered-data.h" 50 #include "xattr.h" 51 #include "tree-log.h" 52 #include "volumes.h" 53 #include "compression.h" 54 #include "locking.h" 55 #include "free-space-cache.h" 56 #include "inode-map.h" 57 58 struct btrfs_iget_args { 59 u64 ino; 60 struct btrfs_root *root; 61 }; 62 63 static const struct inode_operations btrfs_dir_inode_operations; 64 static const struct inode_operations btrfs_symlink_inode_operations; 65 static const struct inode_operations btrfs_dir_ro_inode_operations; 66 static const struct inode_operations btrfs_special_inode_operations; 67 static const struct inode_operations btrfs_file_inode_operations; 68 static const struct address_space_operations btrfs_aops; 69 static const struct address_space_operations btrfs_symlink_aops; 70 static const struct file_operations btrfs_dir_file_operations; 71 static struct extent_io_ops btrfs_extent_io_ops; 72 73 static struct kmem_cache *btrfs_inode_cachep; 74 struct kmem_cache *btrfs_trans_handle_cachep; 75 struct kmem_cache *btrfs_transaction_cachep; 76 struct kmem_cache *btrfs_path_cachep; 77 struct kmem_cache *btrfs_free_space_cachep; 78 79 #define S_SHIFT 12 80 static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = { 81 [S_IFREG >> S_SHIFT] = BTRFS_FT_REG_FILE, 82 [S_IFDIR >> S_SHIFT] = BTRFS_FT_DIR, 83 [S_IFCHR >> S_SHIFT] = BTRFS_FT_CHRDEV, 84 [S_IFBLK >> S_SHIFT] = BTRFS_FT_BLKDEV, 85 [S_IFIFO >> S_SHIFT] = BTRFS_FT_FIFO, 86 [S_IFSOCK >> S_SHIFT] = BTRFS_FT_SOCK, 87 [S_IFLNK >> S_SHIFT] = BTRFS_FT_SYMLINK, 88 }; 89 90 static int btrfs_setsize(struct inode *inode, loff_t newsize); 91 static int btrfs_truncate(struct inode *inode); 92 static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end); 93 static noinline int cow_file_range(struct inode *inode, 94 struct page *locked_page, 95 u64 start, u64 end, int *page_started, 96 unsigned long *nr_written, int unlock); 97 static noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans, 98 struct btrfs_root *root, struct inode *inode); 99 100 static int btrfs_init_inode_security(struct btrfs_trans_handle *trans, 101 struct inode *inode, struct inode *dir, 102 const struct qstr *qstr) 103 { 104 int err; 105 106 err = btrfs_init_acl(trans, inode, dir); 107 if (!err) 108 err = btrfs_xattr_security_init(trans, inode, dir, qstr); 109 return err; 110 } 111 112 /* 113 * this does all the hard work for inserting an inline extent into 114 * the btree. The caller should have done a btrfs_drop_extents so that 115 * no overlapping inline items exist in the btree 116 */ 117 static noinline int insert_inline_extent(struct btrfs_trans_handle *trans, 118 struct btrfs_root *root, struct inode *inode, 119 u64 start, size_t size, size_t compressed_size, 120 int compress_type, 121 struct page **compressed_pages) 122 { 123 struct btrfs_key key; 124 struct btrfs_path *path; 125 struct extent_buffer *leaf; 126 struct page *page = NULL; 127 char *kaddr; 128 unsigned long ptr; 129 struct btrfs_file_extent_item *ei; 130 int err = 0; 131 int ret; 132 size_t cur_size = size; 133 size_t datasize; 134 unsigned long offset; 135 136 if (compressed_size && compressed_pages) 137 cur_size = compressed_size; 138 139 path = btrfs_alloc_path(); 140 if (!path) 141 return -ENOMEM; 142 143 path->leave_spinning = 1; 144 145 key.objectid = btrfs_ino(inode); 146 key.offset = start; 147 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY); 148 datasize = btrfs_file_extent_calc_inline_size(cur_size); 149 150 inode_add_bytes(inode, size); 151 ret = btrfs_insert_empty_item(trans, root, path, &key, 152 datasize); 153 if (ret) { 154 err = ret; 155 goto fail; 156 } 157 leaf = path->nodes[0]; 158 ei = btrfs_item_ptr(leaf, path->slots[0], 159 struct btrfs_file_extent_item); 160 btrfs_set_file_extent_generation(leaf, ei, trans->transid); 161 btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE); 162 btrfs_set_file_extent_encryption(leaf, ei, 0); 163 btrfs_set_file_extent_other_encoding(leaf, ei, 0); 164 btrfs_set_file_extent_ram_bytes(leaf, ei, size); 165 ptr = btrfs_file_extent_inline_start(ei); 166 167 if (compress_type != BTRFS_COMPRESS_NONE) { 168 struct page *cpage; 169 int i = 0; 170 while (compressed_size > 0) { 171 cpage = compressed_pages[i]; 172 cur_size = min_t(unsigned long, compressed_size, 173 PAGE_CACHE_SIZE); 174 175 kaddr = kmap_atomic(cpage); 176 write_extent_buffer(leaf, kaddr, ptr, cur_size); 177 kunmap_atomic(kaddr); 178 179 i++; 180 ptr += cur_size; 181 compressed_size -= cur_size; 182 } 183 btrfs_set_file_extent_compression(leaf, ei, 184 compress_type); 185 } else { 186 page = find_get_page(inode->i_mapping, 187 start >> PAGE_CACHE_SHIFT); 188 btrfs_set_file_extent_compression(leaf, ei, 0); 189 kaddr = kmap_atomic(page); 190 offset = start & (PAGE_CACHE_SIZE - 1); 191 write_extent_buffer(leaf, kaddr + offset, ptr, size); 192 kunmap_atomic(kaddr); 193 page_cache_release(page); 194 } 195 btrfs_mark_buffer_dirty(leaf); 196 btrfs_free_path(path); 197 198 /* 199 * we're an inline extent, so nobody can 200 * extend the file past i_size without locking 201 * a page we already have locked. 202 * 203 * We must do any isize and inode updates 204 * before we unlock the pages. Otherwise we 205 * could end up racing with unlink. 206 */ 207 BTRFS_I(inode)->disk_i_size = inode->i_size; 208 ret = btrfs_update_inode(trans, root, inode); 209 210 return ret; 211 fail: 212 btrfs_free_path(path); 213 return err; 214 } 215 216 217 /* 218 * conditionally insert an inline extent into the file. This 219 * does the checks required to make sure the data is small enough 220 * to fit as an inline extent. 221 */ 222 static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans, 223 struct btrfs_root *root, 224 struct inode *inode, u64 start, u64 end, 225 size_t compressed_size, int compress_type, 226 struct page **compressed_pages) 227 { 228 u64 isize = i_size_read(inode); 229 u64 actual_end = min(end + 1, isize); 230 u64 inline_len = actual_end - start; 231 u64 aligned_end = (end + root->sectorsize - 1) & 232 ~((u64)root->sectorsize - 1); 233 u64 hint_byte; 234 u64 data_len = inline_len; 235 int ret; 236 237 if (compressed_size) 238 data_len = compressed_size; 239 240 if (start > 0 || 241 actual_end >= PAGE_CACHE_SIZE || 242 data_len >= BTRFS_MAX_INLINE_DATA_SIZE(root) || 243 (!compressed_size && 244 (actual_end & (root->sectorsize - 1)) == 0) || 245 end + 1 < isize || 246 data_len > root->fs_info->max_inline) { 247 return 1; 248 } 249 250 ret = btrfs_drop_extents(trans, inode, start, aligned_end, 251 &hint_byte, 1); 252 if (ret) 253 return ret; 254 255 if (isize > actual_end) 256 inline_len = min_t(u64, isize, actual_end); 257 ret = insert_inline_extent(trans, root, inode, start, 258 inline_len, compressed_size, 259 compress_type, compressed_pages); 260 if (ret) { 261 btrfs_abort_transaction(trans, root, ret); 262 return ret; 263 } 264 btrfs_delalloc_release_metadata(inode, end + 1 - start); 265 btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0); 266 return 0; 267 } 268 269 struct async_extent { 270 u64 start; 271 u64 ram_size; 272 u64 compressed_size; 273 struct page **pages; 274 unsigned long nr_pages; 275 int compress_type; 276 struct list_head list; 277 }; 278 279 struct async_cow { 280 struct inode *inode; 281 struct btrfs_root *root; 282 struct page *locked_page; 283 u64 start; 284 u64 end; 285 struct list_head extents; 286 struct btrfs_work work; 287 }; 288 289 static noinline int add_async_extent(struct async_cow *cow, 290 u64 start, u64 ram_size, 291 u64 compressed_size, 292 struct page **pages, 293 unsigned long nr_pages, 294 int compress_type) 295 { 296 struct async_extent *async_extent; 297 298 async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS); 299 BUG_ON(!async_extent); /* -ENOMEM */ 300 async_extent->start = start; 301 async_extent->ram_size = ram_size; 302 async_extent->compressed_size = compressed_size; 303 async_extent->pages = pages; 304 async_extent->nr_pages = nr_pages; 305 async_extent->compress_type = compress_type; 306 list_add_tail(&async_extent->list, &cow->extents); 307 return 0; 308 } 309 310 /* 311 * we create compressed extents in two phases. The first 312 * phase compresses a range of pages that have already been 313 * locked (both pages and state bits are locked). 314 * 315 * This is done inside an ordered work queue, and the compression 316 * is spread across many cpus. The actual IO submission is step 317 * two, and the ordered work queue takes care of making sure that 318 * happens in the same order things were put onto the queue by 319 * writepages and friends. 320 * 321 * If this code finds it can't get good compression, it puts an 322 * entry onto the work queue to write the uncompressed bytes. This 323 * makes sure that both compressed inodes and uncompressed inodes 324 * are written in the same order that pdflush sent them down. 325 */ 326 static noinline int compress_file_range(struct inode *inode, 327 struct page *locked_page, 328 u64 start, u64 end, 329 struct async_cow *async_cow, 330 int *num_added) 331 { 332 struct btrfs_root *root = BTRFS_I(inode)->root; 333 struct btrfs_trans_handle *trans; 334 u64 num_bytes; 335 u64 blocksize = root->sectorsize; 336 u64 actual_end; 337 u64 isize = i_size_read(inode); 338 int ret = 0; 339 struct page **pages = NULL; 340 unsigned long nr_pages; 341 unsigned long nr_pages_ret = 0; 342 unsigned long total_compressed = 0; 343 unsigned long total_in = 0; 344 unsigned long max_compressed = 128 * 1024; 345 unsigned long max_uncompressed = 128 * 1024; 346 int i; 347 int will_compress; 348 int compress_type = root->fs_info->compress_type; 349 350 /* if this is a small write inside eof, kick off a defrag */ 351 if ((end - start + 1) < 16 * 1024 && 352 (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size)) 353 btrfs_add_inode_defrag(NULL, inode); 354 355 actual_end = min_t(u64, isize, end + 1); 356 again: 357 will_compress = 0; 358 nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1; 359 nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE); 360 361 /* 362 * we don't want to send crud past the end of i_size through 363 * compression, that's just a waste of CPU time. So, if the 364 * end of the file is before the start of our current 365 * requested range of bytes, we bail out to the uncompressed 366 * cleanup code that can deal with all of this. 367 * 368 * It isn't really the fastest way to fix things, but this is a 369 * very uncommon corner. 370 */ 371 if (actual_end <= start) 372 goto cleanup_and_bail_uncompressed; 373 374 total_compressed = actual_end - start; 375 376 /* we want to make sure that amount of ram required to uncompress 377 * an extent is reasonable, so we limit the total size in ram 378 * of a compressed extent to 128k. This is a crucial number 379 * because it also controls how easily we can spread reads across 380 * cpus for decompression. 381 * 382 * We also want to make sure the amount of IO required to do 383 * a random read is reasonably small, so we limit the size of 384 * a compressed extent to 128k. 385 */ 386 total_compressed = min(total_compressed, max_uncompressed); 387 num_bytes = (end - start + blocksize) & ~(blocksize - 1); 388 num_bytes = max(blocksize, num_bytes); 389 total_in = 0; 390 ret = 0; 391 392 /* 393 * we do compression for mount -o compress and when the 394 * inode has not been flagged as nocompress. This flag can 395 * change at any time if we discover bad compression ratios. 396 */ 397 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS) && 398 (btrfs_test_opt(root, COMPRESS) || 399 (BTRFS_I(inode)->force_compress) || 400 (BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS))) { 401 WARN_ON(pages); 402 pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS); 403 if (!pages) { 404 /* just bail out to the uncompressed code */ 405 goto cont; 406 } 407 408 if (BTRFS_I(inode)->force_compress) 409 compress_type = BTRFS_I(inode)->force_compress; 410 411 ret = btrfs_compress_pages(compress_type, 412 inode->i_mapping, start, 413 total_compressed, pages, 414 nr_pages, &nr_pages_ret, 415 &total_in, 416 &total_compressed, 417 max_compressed); 418 419 if (!ret) { 420 unsigned long offset = total_compressed & 421 (PAGE_CACHE_SIZE - 1); 422 struct page *page = pages[nr_pages_ret - 1]; 423 char *kaddr; 424 425 /* zero the tail end of the last page, we might be 426 * sending it down to disk 427 */ 428 if (offset) { 429 kaddr = kmap_atomic(page); 430 memset(kaddr + offset, 0, 431 PAGE_CACHE_SIZE - offset); 432 kunmap_atomic(kaddr); 433 } 434 will_compress = 1; 435 } 436 } 437 cont: 438 if (start == 0) { 439 trans = btrfs_join_transaction(root); 440 if (IS_ERR(trans)) { 441 ret = PTR_ERR(trans); 442 trans = NULL; 443 goto cleanup_and_out; 444 } 445 trans->block_rsv = &root->fs_info->delalloc_block_rsv; 446 447 /* lets try to make an inline extent */ 448 if (ret || total_in < (actual_end - start)) { 449 /* we didn't compress the entire range, try 450 * to make an uncompressed inline extent. 451 */ 452 ret = cow_file_range_inline(trans, root, inode, 453 start, end, 0, 0, NULL); 454 } else { 455 /* try making a compressed inline extent */ 456 ret = cow_file_range_inline(trans, root, inode, 457 start, end, 458 total_compressed, 459 compress_type, pages); 460 } 461 if (ret <= 0) { 462 /* 463 * inline extent creation worked or returned error, 464 * we don't need to create any more async work items. 465 * Unlock and free up our temp pages. 466 */ 467 extent_clear_unlock_delalloc(inode, 468 &BTRFS_I(inode)->io_tree, 469 start, end, NULL, 470 EXTENT_CLEAR_UNLOCK_PAGE | EXTENT_CLEAR_DIRTY | 471 EXTENT_CLEAR_DELALLOC | 472 EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK); 473 474 btrfs_end_transaction(trans, root); 475 goto free_pages_out; 476 } 477 btrfs_end_transaction(trans, root); 478 } 479 480 if (will_compress) { 481 /* 482 * we aren't doing an inline extent round the compressed size 483 * up to a block size boundary so the allocator does sane 484 * things 485 */ 486 total_compressed = (total_compressed + blocksize - 1) & 487 ~(blocksize - 1); 488 489 /* 490 * one last check to make sure the compression is really a 491 * win, compare the page count read with the blocks on disk 492 */ 493 total_in = (total_in + PAGE_CACHE_SIZE - 1) & 494 ~(PAGE_CACHE_SIZE - 1); 495 if (total_compressed >= total_in) { 496 will_compress = 0; 497 } else { 498 num_bytes = total_in; 499 } 500 } 501 if (!will_compress && pages) { 502 /* 503 * the compression code ran but failed to make things smaller, 504 * free any pages it allocated and our page pointer array 505 */ 506 for (i = 0; i < nr_pages_ret; i++) { 507 WARN_ON(pages[i]->mapping); 508 page_cache_release(pages[i]); 509 } 510 kfree(pages); 511 pages = NULL; 512 total_compressed = 0; 513 nr_pages_ret = 0; 514 515 /* flag the file so we don't compress in the future */ 516 if (!btrfs_test_opt(root, FORCE_COMPRESS) && 517 !(BTRFS_I(inode)->force_compress)) { 518 BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS; 519 } 520 } 521 if (will_compress) { 522 *num_added += 1; 523 524 /* the async work queues will take care of doing actual 525 * allocation on disk for these compressed pages, 526 * and will submit them to the elevator. 527 */ 528 add_async_extent(async_cow, start, num_bytes, 529 total_compressed, pages, nr_pages_ret, 530 compress_type); 531 532 if (start + num_bytes < end) { 533 start += num_bytes; 534 pages = NULL; 535 cond_resched(); 536 goto again; 537 } 538 } else { 539 cleanup_and_bail_uncompressed: 540 /* 541 * No compression, but we still need to write the pages in 542 * the file we've been given so far. redirty the locked 543 * page if it corresponds to our extent and set things up 544 * for the async work queue to run cow_file_range to do 545 * the normal delalloc dance 546 */ 547 if (page_offset(locked_page) >= start && 548 page_offset(locked_page) <= end) { 549 __set_page_dirty_nobuffers(locked_page); 550 /* unlocked later on in the async handlers */ 551 } 552 add_async_extent(async_cow, start, end - start + 1, 553 0, NULL, 0, BTRFS_COMPRESS_NONE); 554 *num_added += 1; 555 } 556 557 out: 558 return ret; 559 560 free_pages_out: 561 for (i = 0; i < nr_pages_ret; i++) { 562 WARN_ON(pages[i]->mapping); 563 page_cache_release(pages[i]); 564 } 565 kfree(pages); 566 567 goto out; 568 569 cleanup_and_out: 570 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree, 571 start, end, NULL, 572 EXTENT_CLEAR_UNLOCK_PAGE | 573 EXTENT_CLEAR_DIRTY | 574 EXTENT_CLEAR_DELALLOC | 575 EXTENT_SET_WRITEBACK | 576 EXTENT_END_WRITEBACK); 577 if (!trans || IS_ERR(trans)) 578 btrfs_error(root->fs_info, ret, "Failed to join transaction"); 579 else 580 btrfs_abort_transaction(trans, root, ret); 581 goto free_pages_out; 582 } 583 584 /* 585 * phase two of compressed writeback. This is the ordered portion 586 * of the code, which only gets called in the order the work was 587 * queued. We walk all the async extents created by compress_file_range 588 * and send them down to the disk. 589 */ 590 static noinline int submit_compressed_extents(struct inode *inode, 591 struct async_cow *async_cow) 592 { 593 struct async_extent *async_extent; 594 u64 alloc_hint = 0; 595 struct btrfs_trans_handle *trans; 596 struct btrfs_key ins; 597 struct extent_map *em; 598 struct btrfs_root *root = BTRFS_I(inode)->root; 599 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 600 struct extent_io_tree *io_tree; 601 int ret = 0; 602 603 if (list_empty(&async_cow->extents)) 604 return 0; 605 606 607 while (!list_empty(&async_cow->extents)) { 608 async_extent = list_entry(async_cow->extents.next, 609 struct async_extent, list); 610 list_del(&async_extent->list); 611 612 io_tree = &BTRFS_I(inode)->io_tree; 613 614 retry: 615 /* did the compression code fall back to uncompressed IO? */ 616 if (!async_extent->pages) { 617 int page_started = 0; 618 unsigned long nr_written = 0; 619 620 lock_extent(io_tree, async_extent->start, 621 async_extent->start + 622 async_extent->ram_size - 1); 623 624 /* allocate blocks */ 625 ret = cow_file_range(inode, async_cow->locked_page, 626 async_extent->start, 627 async_extent->start + 628 async_extent->ram_size - 1, 629 &page_started, &nr_written, 0); 630 631 /* JDM XXX */ 632 633 /* 634 * if page_started, cow_file_range inserted an 635 * inline extent and took care of all the unlocking 636 * and IO for us. Otherwise, we need to submit 637 * all those pages down to the drive. 638 */ 639 if (!page_started && !ret) 640 extent_write_locked_range(io_tree, 641 inode, async_extent->start, 642 async_extent->start + 643 async_extent->ram_size - 1, 644 btrfs_get_extent, 645 WB_SYNC_ALL); 646 kfree(async_extent); 647 cond_resched(); 648 continue; 649 } 650 651 lock_extent(io_tree, async_extent->start, 652 async_extent->start + async_extent->ram_size - 1); 653 654 trans = btrfs_join_transaction(root); 655 if (IS_ERR(trans)) { 656 ret = PTR_ERR(trans); 657 } else { 658 trans->block_rsv = &root->fs_info->delalloc_block_rsv; 659 ret = btrfs_reserve_extent(trans, root, 660 async_extent->compressed_size, 661 async_extent->compressed_size, 662 0, alloc_hint, &ins, 1); 663 if (ret) 664 btrfs_abort_transaction(trans, root, ret); 665 btrfs_end_transaction(trans, root); 666 } 667 668 if (ret) { 669 int i; 670 for (i = 0; i < async_extent->nr_pages; i++) { 671 WARN_ON(async_extent->pages[i]->mapping); 672 page_cache_release(async_extent->pages[i]); 673 } 674 kfree(async_extent->pages); 675 async_extent->nr_pages = 0; 676 async_extent->pages = NULL; 677 unlock_extent(io_tree, async_extent->start, 678 async_extent->start + 679 async_extent->ram_size - 1); 680 if (ret == -ENOSPC) 681 goto retry; 682 goto out_free; /* JDM: Requeue? */ 683 } 684 685 /* 686 * here we're doing allocation and writeback of the 687 * compressed pages 688 */ 689 btrfs_drop_extent_cache(inode, async_extent->start, 690 async_extent->start + 691 async_extent->ram_size - 1, 0); 692 693 em = alloc_extent_map(); 694 BUG_ON(!em); /* -ENOMEM */ 695 em->start = async_extent->start; 696 em->len = async_extent->ram_size; 697 em->orig_start = em->start; 698 699 em->block_start = ins.objectid; 700 em->block_len = ins.offset; 701 em->bdev = root->fs_info->fs_devices->latest_bdev; 702 em->compress_type = async_extent->compress_type; 703 set_bit(EXTENT_FLAG_PINNED, &em->flags); 704 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); 705 706 while (1) { 707 write_lock(&em_tree->lock); 708 ret = add_extent_mapping(em_tree, em); 709 write_unlock(&em_tree->lock); 710 if (ret != -EEXIST) { 711 free_extent_map(em); 712 break; 713 } 714 btrfs_drop_extent_cache(inode, async_extent->start, 715 async_extent->start + 716 async_extent->ram_size - 1, 0); 717 } 718 719 ret = btrfs_add_ordered_extent_compress(inode, 720 async_extent->start, 721 ins.objectid, 722 async_extent->ram_size, 723 ins.offset, 724 BTRFS_ORDERED_COMPRESSED, 725 async_extent->compress_type); 726 BUG_ON(ret); /* -ENOMEM */ 727 728 /* 729 * clear dirty, set writeback and unlock the pages. 730 */ 731 extent_clear_unlock_delalloc(inode, 732 &BTRFS_I(inode)->io_tree, 733 async_extent->start, 734 async_extent->start + 735 async_extent->ram_size - 1, 736 NULL, EXTENT_CLEAR_UNLOCK_PAGE | 737 EXTENT_CLEAR_UNLOCK | 738 EXTENT_CLEAR_DELALLOC | 739 EXTENT_CLEAR_DIRTY | EXTENT_SET_WRITEBACK); 740 741 ret = btrfs_submit_compressed_write(inode, 742 async_extent->start, 743 async_extent->ram_size, 744 ins.objectid, 745 ins.offset, async_extent->pages, 746 async_extent->nr_pages); 747 748 BUG_ON(ret); /* -ENOMEM */ 749 alloc_hint = ins.objectid + ins.offset; 750 kfree(async_extent); 751 cond_resched(); 752 } 753 ret = 0; 754 out: 755 return ret; 756 out_free: 757 kfree(async_extent); 758 goto out; 759 } 760 761 static u64 get_extent_allocation_hint(struct inode *inode, u64 start, 762 u64 num_bytes) 763 { 764 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 765 struct extent_map *em; 766 u64 alloc_hint = 0; 767 768 read_lock(&em_tree->lock); 769 em = search_extent_mapping(em_tree, start, num_bytes); 770 if (em) { 771 /* 772 * if block start isn't an actual block number then find the 773 * first block in this inode and use that as a hint. If that 774 * block is also bogus then just don't worry about it. 775 */ 776 if (em->block_start >= EXTENT_MAP_LAST_BYTE) { 777 free_extent_map(em); 778 em = search_extent_mapping(em_tree, 0, 0); 779 if (em && em->block_start < EXTENT_MAP_LAST_BYTE) 780 alloc_hint = em->block_start; 781 if (em) 782 free_extent_map(em); 783 } else { 784 alloc_hint = em->block_start; 785 free_extent_map(em); 786 } 787 } 788 read_unlock(&em_tree->lock); 789 790 return alloc_hint; 791 } 792 793 /* 794 * when extent_io.c finds a delayed allocation range in the file, 795 * the call backs end up in this code. The basic idea is to 796 * allocate extents on disk for the range, and create ordered data structs 797 * in ram to track those extents. 798 * 799 * locked_page is the page that writepage had locked already. We use 800 * it to make sure we don't do extra locks or unlocks. 801 * 802 * *page_started is set to one if we unlock locked_page and do everything 803 * required to start IO on it. It may be clean and already done with 804 * IO when we return. 805 */ 806 static noinline int cow_file_range(struct inode *inode, 807 struct page *locked_page, 808 u64 start, u64 end, int *page_started, 809 unsigned long *nr_written, 810 int unlock) 811 { 812 struct btrfs_root *root = BTRFS_I(inode)->root; 813 struct btrfs_trans_handle *trans; 814 u64 alloc_hint = 0; 815 u64 num_bytes; 816 unsigned long ram_size; 817 u64 disk_num_bytes; 818 u64 cur_alloc_size; 819 u64 blocksize = root->sectorsize; 820 struct btrfs_key ins; 821 struct extent_map *em; 822 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 823 int ret = 0; 824 825 BUG_ON(btrfs_is_free_space_inode(root, inode)); 826 trans = btrfs_join_transaction(root); 827 if (IS_ERR(trans)) { 828 extent_clear_unlock_delalloc(inode, 829 &BTRFS_I(inode)->io_tree, 830 start, end, NULL, 831 EXTENT_CLEAR_UNLOCK_PAGE | 832 EXTENT_CLEAR_UNLOCK | 833 EXTENT_CLEAR_DELALLOC | 834 EXTENT_CLEAR_DIRTY | 835 EXTENT_SET_WRITEBACK | 836 EXTENT_END_WRITEBACK); 837 return PTR_ERR(trans); 838 } 839 trans->block_rsv = &root->fs_info->delalloc_block_rsv; 840 841 num_bytes = (end - start + blocksize) & ~(blocksize - 1); 842 num_bytes = max(blocksize, num_bytes); 843 disk_num_bytes = num_bytes; 844 ret = 0; 845 846 /* if this is a small write inside eof, kick off defrag */ 847 if (num_bytes < 64 * 1024 && 848 (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size)) 849 btrfs_add_inode_defrag(trans, inode); 850 851 if (start == 0) { 852 /* lets try to make an inline extent */ 853 ret = cow_file_range_inline(trans, root, inode, 854 start, end, 0, 0, NULL); 855 if (ret == 0) { 856 extent_clear_unlock_delalloc(inode, 857 &BTRFS_I(inode)->io_tree, 858 start, end, NULL, 859 EXTENT_CLEAR_UNLOCK_PAGE | 860 EXTENT_CLEAR_UNLOCK | 861 EXTENT_CLEAR_DELALLOC | 862 EXTENT_CLEAR_DIRTY | 863 EXTENT_SET_WRITEBACK | 864 EXTENT_END_WRITEBACK); 865 866 *nr_written = *nr_written + 867 (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE; 868 *page_started = 1; 869 goto out; 870 } else if (ret < 0) { 871 btrfs_abort_transaction(trans, root, ret); 872 goto out_unlock; 873 } 874 } 875 876 BUG_ON(disk_num_bytes > 877 btrfs_super_total_bytes(root->fs_info->super_copy)); 878 879 alloc_hint = get_extent_allocation_hint(inode, start, num_bytes); 880 btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0); 881 882 while (disk_num_bytes > 0) { 883 unsigned long op; 884 885 cur_alloc_size = disk_num_bytes; 886 ret = btrfs_reserve_extent(trans, root, cur_alloc_size, 887 root->sectorsize, 0, alloc_hint, 888 &ins, 1); 889 if (ret < 0) { 890 btrfs_abort_transaction(trans, root, ret); 891 goto out_unlock; 892 } 893 894 em = alloc_extent_map(); 895 BUG_ON(!em); /* -ENOMEM */ 896 em->start = start; 897 em->orig_start = em->start; 898 ram_size = ins.offset; 899 em->len = ins.offset; 900 901 em->block_start = ins.objectid; 902 em->block_len = ins.offset; 903 em->bdev = root->fs_info->fs_devices->latest_bdev; 904 set_bit(EXTENT_FLAG_PINNED, &em->flags); 905 906 while (1) { 907 write_lock(&em_tree->lock); 908 ret = add_extent_mapping(em_tree, em); 909 write_unlock(&em_tree->lock); 910 if (ret != -EEXIST) { 911 free_extent_map(em); 912 break; 913 } 914 btrfs_drop_extent_cache(inode, start, 915 start + ram_size - 1, 0); 916 } 917 918 cur_alloc_size = ins.offset; 919 ret = btrfs_add_ordered_extent(inode, start, ins.objectid, 920 ram_size, cur_alloc_size, 0); 921 BUG_ON(ret); /* -ENOMEM */ 922 923 if (root->root_key.objectid == 924 BTRFS_DATA_RELOC_TREE_OBJECTID) { 925 ret = btrfs_reloc_clone_csums(inode, start, 926 cur_alloc_size); 927 if (ret) { 928 btrfs_abort_transaction(trans, root, ret); 929 goto out_unlock; 930 } 931 } 932 933 if (disk_num_bytes < cur_alloc_size) 934 break; 935 936 /* we're not doing compressed IO, don't unlock the first 937 * page (which the caller expects to stay locked), don't 938 * clear any dirty bits and don't set any writeback bits 939 * 940 * Do set the Private2 bit so we know this page was properly 941 * setup for writepage 942 */ 943 op = unlock ? EXTENT_CLEAR_UNLOCK_PAGE : 0; 944 op |= EXTENT_CLEAR_UNLOCK | EXTENT_CLEAR_DELALLOC | 945 EXTENT_SET_PRIVATE2; 946 947 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree, 948 start, start + ram_size - 1, 949 locked_page, op); 950 disk_num_bytes -= cur_alloc_size; 951 num_bytes -= cur_alloc_size; 952 alloc_hint = ins.objectid + ins.offset; 953 start += cur_alloc_size; 954 } 955 ret = 0; 956 out: 957 btrfs_end_transaction(trans, root); 958 959 return ret; 960 out_unlock: 961 extent_clear_unlock_delalloc(inode, 962 &BTRFS_I(inode)->io_tree, 963 start, end, NULL, 964 EXTENT_CLEAR_UNLOCK_PAGE | 965 EXTENT_CLEAR_UNLOCK | 966 EXTENT_CLEAR_DELALLOC | 967 EXTENT_CLEAR_DIRTY | 968 EXTENT_SET_WRITEBACK | 969 EXTENT_END_WRITEBACK); 970 971 goto out; 972 } 973 974 /* 975 * work queue call back to started compression on a file and pages 976 */ 977 static noinline void async_cow_start(struct btrfs_work *work) 978 { 979 struct async_cow *async_cow; 980 int num_added = 0; 981 async_cow = container_of(work, struct async_cow, work); 982 983 compress_file_range(async_cow->inode, async_cow->locked_page, 984 async_cow->start, async_cow->end, async_cow, 985 &num_added); 986 if (num_added == 0) 987 async_cow->inode = NULL; 988 } 989 990 /* 991 * work queue call back to submit previously compressed pages 992 */ 993 static noinline void async_cow_submit(struct btrfs_work *work) 994 { 995 struct async_cow *async_cow; 996 struct btrfs_root *root; 997 unsigned long nr_pages; 998 999 async_cow = container_of(work, struct async_cow, work); 1000 1001 root = async_cow->root; 1002 nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >> 1003 PAGE_CACHE_SHIFT; 1004 1005 atomic_sub(nr_pages, &root->fs_info->async_delalloc_pages); 1006 1007 if (atomic_read(&root->fs_info->async_delalloc_pages) < 1008 5 * 1042 * 1024 && 1009 waitqueue_active(&root->fs_info->async_submit_wait)) 1010 wake_up(&root->fs_info->async_submit_wait); 1011 1012 if (async_cow->inode) 1013 submit_compressed_extents(async_cow->inode, async_cow); 1014 } 1015 1016 static noinline void async_cow_free(struct btrfs_work *work) 1017 { 1018 struct async_cow *async_cow; 1019 async_cow = container_of(work, struct async_cow, work); 1020 kfree(async_cow); 1021 } 1022 1023 static int cow_file_range_async(struct inode *inode, struct page *locked_page, 1024 u64 start, u64 end, int *page_started, 1025 unsigned long *nr_written) 1026 { 1027 struct async_cow *async_cow; 1028 struct btrfs_root *root = BTRFS_I(inode)->root; 1029 unsigned long nr_pages; 1030 u64 cur_end; 1031 int limit = 10 * 1024 * 1042; 1032 1033 clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED, 1034 1, 0, NULL, GFP_NOFS); 1035 while (start < end) { 1036 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS); 1037 BUG_ON(!async_cow); /* -ENOMEM */ 1038 async_cow->inode = inode; 1039 async_cow->root = root; 1040 async_cow->locked_page = locked_page; 1041 async_cow->start = start; 1042 1043 if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS) 1044 cur_end = end; 1045 else 1046 cur_end = min(end, start + 512 * 1024 - 1); 1047 1048 async_cow->end = cur_end; 1049 INIT_LIST_HEAD(&async_cow->extents); 1050 1051 async_cow->work.func = async_cow_start; 1052 async_cow->work.ordered_func = async_cow_submit; 1053 async_cow->work.ordered_free = async_cow_free; 1054 async_cow->work.flags = 0; 1055 1056 nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >> 1057 PAGE_CACHE_SHIFT; 1058 atomic_add(nr_pages, &root->fs_info->async_delalloc_pages); 1059 1060 btrfs_queue_worker(&root->fs_info->delalloc_workers, 1061 &async_cow->work); 1062 1063 if (atomic_read(&root->fs_info->async_delalloc_pages) > limit) { 1064 wait_event(root->fs_info->async_submit_wait, 1065 (atomic_read(&root->fs_info->async_delalloc_pages) < 1066 limit)); 1067 } 1068 1069 while (atomic_read(&root->fs_info->async_submit_draining) && 1070 atomic_read(&root->fs_info->async_delalloc_pages)) { 1071 wait_event(root->fs_info->async_submit_wait, 1072 (atomic_read(&root->fs_info->async_delalloc_pages) == 1073 0)); 1074 } 1075 1076 *nr_written += nr_pages; 1077 start = cur_end + 1; 1078 } 1079 *page_started = 1; 1080 return 0; 1081 } 1082 1083 static noinline int csum_exist_in_range(struct btrfs_root *root, 1084 u64 bytenr, u64 num_bytes) 1085 { 1086 int ret; 1087 struct btrfs_ordered_sum *sums; 1088 LIST_HEAD(list); 1089 1090 ret = btrfs_lookup_csums_range(root->fs_info->csum_root, bytenr, 1091 bytenr + num_bytes - 1, &list, 0); 1092 if (ret == 0 && list_empty(&list)) 1093 return 0; 1094 1095 while (!list_empty(&list)) { 1096 sums = list_entry(list.next, struct btrfs_ordered_sum, list); 1097 list_del(&sums->list); 1098 kfree(sums); 1099 } 1100 return 1; 1101 } 1102 1103 /* 1104 * when nowcow writeback call back. This checks for snapshots or COW copies 1105 * of the extents that exist in the file, and COWs the file as required. 1106 * 1107 * If no cow copies or snapshots exist, we write directly to the existing 1108 * blocks on disk 1109 */ 1110 static noinline int run_delalloc_nocow(struct inode *inode, 1111 struct page *locked_page, 1112 u64 start, u64 end, int *page_started, int force, 1113 unsigned long *nr_written) 1114 { 1115 struct btrfs_root *root = BTRFS_I(inode)->root; 1116 struct btrfs_trans_handle *trans; 1117 struct extent_buffer *leaf; 1118 struct btrfs_path *path; 1119 struct btrfs_file_extent_item *fi; 1120 struct btrfs_key found_key; 1121 u64 cow_start; 1122 u64 cur_offset; 1123 u64 extent_end; 1124 u64 extent_offset; 1125 u64 disk_bytenr; 1126 u64 num_bytes; 1127 int extent_type; 1128 int ret, err; 1129 int type; 1130 int nocow; 1131 int check_prev = 1; 1132 bool nolock; 1133 u64 ino = btrfs_ino(inode); 1134 1135 path = btrfs_alloc_path(); 1136 if (!path) 1137 return -ENOMEM; 1138 1139 nolock = btrfs_is_free_space_inode(root, inode); 1140 1141 if (nolock) 1142 trans = btrfs_join_transaction_nolock(root); 1143 else 1144 trans = btrfs_join_transaction(root); 1145 1146 if (IS_ERR(trans)) { 1147 btrfs_free_path(path); 1148 return PTR_ERR(trans); 1149 } 1150 1151 trans->block_rsv = &root->fs_info->delalloc_block_rsv; 1152 1153 cow_start = (u64)-1; 1154 cur_offset = start; 1155 while (1) { 1156 ret = btrfs_lookup_file_extent(trans, root, path, ino, 1157 cur_offset, 0); 1158 if (ret < 0) { 1159 btrfs_abort_transaction(trans, root, ret); 1160 goto error; 1161 } 1162 if (ret > 0 && path->slots[0] > 0 && check_prev) { 1163 leaf = path->nodes[0]; 1164 btrfs_item_key_to_cpu(leaf, &found_key, 1165 path->slots[0] - 1); 1166 if (found_key.objectid == ino && 1167 found_key.type == BTRFS_EXTENT_DATA_KEY) 1168 path->slots[0]--; 1169 } 1170 check_prev = 0; 1171 next_slot: 1172 leaf = path->nodes[0]; 1173 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 1174 ret = btrfs_next_leaf(root, path); 1175 if (ret < 0) { 1176 btrfs_abort_transaction(trans, root, ret); 1177 goto error; 1178 } 1179 if (ret > 0) 1180 break; 1181 leaf = path->nodes[0]; 1182 } 1183 1184 nocow = 0; 1185 disk_bytenr = 0; 1186 num_bytes = 0; 1187 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 1188 1189 if (found_key.objectid > ino || 1190 found_key.type > BTRFS_EXTENT_DATA_KEY || 1191 found_key.offset > end) 1192 break; 1193 1194 if (found_key.offset > cur_offset) { 1195 extent_end = found_key.offset; 1196 extent_type = 0; 1197 goto out_check; 1198 } 1199 1200 fi = btrfs_item_ptr(leaf, path->slots[0], 1201 struct btrfs_file_extent_item); 1202 extent_type = btrfs_file_extent_type(leaf, fi); 1203 1204 if (extent_type == BTRFS_FILE_EXTENT_REG || 1205 extent_type == BTRFS_FILE_EXTENT_PREALLOC) { 1206 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 1207 extent_offset = btrfs_file_extent_offset(leaf, fi); 1208 extent_end = found_key.offset + 1209 btrfs_file_extent_num_bytes(leaf, fi); 1210 if (extent_end <= start) { 1211 path->slots[0]++; 1212 goto next_slot; 1213 } 1214 if (disk_bytenr == 0) 1215 goto out_check; 1216 if (btrfs_file_extent_compression(leaf, fi) || 1217 btrfs_file_extent_encryption(leaf, fi) || 1218 btrfs_file_extent_other_encoding(leaf, fi)) 1219 goto out_check; 1220 if (extent_type == BTRFS_FILE_EXTENT_REG && !force) 1221 goto out_check; 1222 if (btrfs_extent_readonly(root, disk_bytenr)) 1223 goto out_check; 1224 if (btrfs_cross_ref_exist(trans, root, ino, 1225 found_key.offset - 1226 extent_offset, disk_bytenr)) 1227 goto out_check; 1228 disk_bytenr += extent_offset; 1229 disk_bytenr += cur_offset - found_key.offset; 1230 num_bytes = min(end + 1, extent_end) - cur_offset; 1231 /* 1232 * force cow if csum exists in the range. 1233 * this ensure that csum for a given extent are 1234 * either valid or do not exist. 1235 */ 1236 if (csum_exist_in_range(root, disk_bytenr, num_bytes)) 1237 goto out_check; 1238 nocow = 1; 1239 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 1240 extent_end = found_key.offset + 1241 btrfs_file_extent_inline_len(leaf, fi); 1242 extent_end = ALIGN(extent_end, root->sectorsize); 1243 } else { 1244 BUG_ON(1); 1245 } 1246 out_check: 1247 if (extent_end <= start) { 1248 path->slots[0]++; 1249 goto next_slot; 1250 } 1251 if (!nocow) { 1252 if (cow_start == (u64)-1) 1253 cow_start = cur_offset; 1254 cur_offset = extent_end; 1255 if (cur_offset > end) 1256 break; 1257 path->slots[0]++; 1258 goto next_slot; 1259 } 1260 1261 btrfs_release_path(path); 1262 if (cow_start != (u64)-1) { 1263 ret = cow_file_range(inode, locked_page, cow_start, 1264 found_key.offset - 1, page_started, 1265 nr_written, 1); 1266 if (ret) { 1267 btrfs_abort_transaction(trans, root, ret); 1268 goto error; 1269 } 1270 cow_start = (u64)-1; 1271 } 1272 1273 if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) { 1274 struct extent_map *em; 1275 struct extent_map_tree *em_tree; 1276 em_tree = &BTRFS_I(inode)->extent_tree; 1277 em = alloc_extent_map(); 1278 BUG_ON(!em); /* -ENOMEM */ 1279 em->start = cur_offset; 1280 em->orig_start = em->start; 1281 em->len = num_bytes; 1282 em->block_len = num_bytes; 1283 em->block_start = disk_bytenr; 1284 em->bdev = root->fs_info->fs_devices->latest_bdev; 1285 set_bit(EXTENT_FLAG_PINNED, &em->flags); 1286 while (1) { 1287 write_lock(&em_tree->lock); 1288 ret = add_extent_mapping(em_tree, em); 1289 write_unlock(&em_tree->lock); 1290 if (ret != -EEXIST) { 1291 free_extent_map(em); 1292 break; 1293 } 1294 btrfs_drop_extent_cache(inode, em->start, 1295 em->start + em->len - 1, 0); 1296 } 1297 type = BTRFS_ORDERED_PREALLOC; 1298 } else { 1299 type = BTRFS_ORDERED_NOCOW; 1300 } 1301 1302 ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr, 1303 num_bytes, num_bytes, type); 1304 BUG_ON(ret); /* -ENOMEM */ 1305 1306 if (root->root_key.objectid == 1307 BTRFS_DATA_RELOC_TREE_OBJECTID) { 1308 ret = btrfs_reloc_clone_csums(inode, cur_offset, 1309 num_bytes); 1310 if (ret) { 1311 btrfs_abort_transaction(trans, root, ret); 1312 goto error; 1313 } 1314 } 1315 1316 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree, 1317 cur_offset, cur_offset + num_bytes - 1, 1318 locked_page, EXTENT_CLEAR_UNLOCK_PAGE | 1319 EXTENT_CLEAR_UNLOCK | EXTENT_CLEAR_DELALLOC | 1320 EXTENT_SET_PRIVATE2); 1321 cur_offset = extent_end; 1322 if (cur_offset > end) 1323 break; 1324 } 1325 btrfs_release_path(path); 1326 1327 if (cur_offset <= end && cow_start == (u64)-1) 1328 cow_start = cur_offset; 1329 if (cow_start != (u64)-1) { 1330 ret = cow_file_range(inode, locked_page, cow_start, end, 1331 page_started, nr_written, 1); 1332 if (ret) { 1333 btrfs_abort_transaction(trans, root, ret); 1334 goto error; 1335 } 1336 } 1337 1338 error: 1339 if (nolock) { 1340 err = btrfs_end_transaction_nolock(trans, root); 1341 } else { 1342 err = btrfs_end_transaction(trans, root); 1343 } 1344 if (!ret) 1345 ret = err; 1346 1347 btrfs_free_path(path); 1348 return ret; 1349 } 1350 1351 /* 1352 * extent_io.c call back to do delayed allocation processing 1353 */ 1354 static int run_delalloc_range(struct inode *inode, struct page *locked_page, 1355 u64 start, u64 end, int *page_started, 1356 unsigned long *nr_written) 1357 { 1358 int ret; 1359 struct btrfs_root *root = BTRFS_I(inode)->root; 1360 1361 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) 1362 ret = run_delalloc_nocow(inode, locked_page, start, end, 1363 page_started, 1, nr_written); 1364 else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC) 1365 ret = run_delalloc_nocow(inode, locked_page, start, end, 1366 page_started, 0, nr_written); 1367 else if (!btrfs_test_opt(root, COMPRESS) && 1368 !(BTRFS_I(inode)->force_compress) && 1369 !(BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS)) 1370 ret = cow_file_range(inode, locked_page, start, end, 1371 page_started, nr_written, 1); 1372 else 1373 ret = cow_file_range_async(inode, locked_page, start, end, 1374 page_started, nr_written); 1375 return ret; 1376 } 1377 1378 static void btrfs_split_extent_hook(struct inode *inode, 1379 struct extent_state *orig, u64 split) 1380 { 1381 /* not delalloc, ignore it */ 1382 if (!(orig->state & EXTENT_DELALLOC)) 1383 return; 1384 1385 spin_lock(&BTRFS_I(inode)->lock); 1386 BTRFS_I(inode)->outstanding_extents++; 1387 spin_unlock(&BTRFS_I(inode)->lock); 1388 } 1389 1390 /* 1391 * extent_io.c merge_extent_hook, used to track merged delayed allocation 1392 * extents so we can keep track of new extents that are just merged onto old 1393 * extents, such as when we are doing sequential writes, so we can properly 1394 * account for the metadata space we'll need. 1395 */ 1396 static void btrfs_merge_extent_hook(struct inode *inode, 1397 struct extent_state *new, 1398 struct extent_state *other) 1399 { 1400 /* not delalloc, ignore it */ 1401 if (!(other->state & EXTENT_DELALLOC)) 1402 return; 1403 1404 spin_lock(&BTRFS_I(inode)->lock); 1405 BTRFS_I(inode)->outstanding_extents--; 1406 spin_unlock(&BTRFS_I(inode)->lock); 1407 } 1408 1409 /* 1410 * extent_io.c set_bit_hook, used to track delayed allocation 1411 * bytes in this file, and to maintain the list of inodes that 1412 * have pending delalloc work to be done. 1413 */ 1414 static void btrfs_set_bit_hook(struct inode *inode, 1415 struct extent_state *state, int *bits) 1416 { 1417 1418 /* 1419 * set_bit and clear bit hooks normally require _irqsave/restore 1420 * but in this case, we are only testing for the DELALLOC 1421 * bit, which is only set or cleared with irqs on 1422 */ 1423 if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) { 1424 struct btrfs_root *root = BTRFS_I(inode)->root; 1425 u64 len = state->end + 1 - state->start; 1426 bool do_list = !btrfs_is_free_space_inode(root, inode); 1427 1428 if (*bits & EXTENT_FIRST_DELALLOC) { 1429 *bits &= ~EXTENT_FIRST_DELALLOC; 1430 } else { 1431 spin_lock(&BTRFS_I(inode)->lock); 1432 BTRFS_I(inode)->outstanding_extents++; 1433 spin_unlock(&BTRFS_I(inode)->lock); 1434 } 1435 1436 spin_lock(&root->fs_info->delalloc_lock); 1437 BTRFS_I(inode)->delalloc_bytes += len; 1438 root->fs_info->delalloc_bytes += len; 1439 if (do_list && list_empty(&BTRFS_I(inode)->delalloc_inodes)) { 1440 list_add_tail(&BTRFS_I(inode)->delalloc_inodes, 1441 &root->fs_info->delalloc_inodes); 1442 } 1443 spin_unlock(&root->fs_info->delalloc_lock); 1444 } 1445 } 1446 1447 /* 1448 * extent_io.c clear_bit_hook, see set_bit_hook for why 1449 */ 1450 static void btrfs_clear_bit_hook(struct inode *inode, 1451 struct extent_state *state, int *bits) 1452 { 1453 /* 1454 * set_bit and clear bit hooks normally require _irqsave/restore 1455 * but in this case, we are only testing for the DELALLOC 1456 * bit, which is only set or cleared with irqs on 1457 */ 1458 if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) { 1459 struct btrfs_root *root = BTRFS_I(inode)->root; 1460 u64 len = state->end + 1 - state->start; 1461 bool do_list = !btrfs_is_free_space_inode(root, inode); 1462 1463 if (*bits & EXTENT_FIRST_DELALLOC) { 1464 *bits &= ~EXTENT_FIRST_DELALLOC; 1465 } else if (!(*bits & EXTENT_DO_ACCOUNTING)) { 1466 spin_lock(&BTRFS_I(inode)->lock); 1467 BTRFS_I(inode)->outstanding_extents--; 1468 spin_unlock(&BTRFS_I(inode)->lock); 1469 } 1470 1471 if (*bits & EXTENT_DO_ACCOUNTING) 1472 btrfs_delalloc_release_metadata(inode, len); 1473 1474 if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID 1475 && do_list) 1476 btrfs_free_reserved_data_space(inode, len); 1477 1478 spin_lock(&root->fs_info->delalloc_lock); 1479 root->fs_info->delalloc_bytes -= len; 1480 BTRFS_I(inode)->delalloc_bytes -= len; 1481 1482 if (do_list && BTRFS_I(inode)->delalloc_bytes == 0 && 1483 !list_empty(&BTRFS_I(inode)->delalloc_inodes)) { 1484 list_del_init(&BTRFS_I(inode)->delalloc_inodes); 1485 } 1486 spin_unlock(&root->fs_info->delalloc_lock); 1487 } 1488 } 1489 1490 /* 1491 * extent_io.c merge_bio_hook, this must check the chunk tree to make sure 1492 * we don't create bios that span stripes or chunks 1493 */ 1494 int btrfs_merge_bio_hook(struct page *page, unsigned long offset, 1495 size_t size, struct bio *bio, 1496 unsigned long bio_flags) 1497 { 1498 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root; 1499 struct btrfs_mapping_tree *map_tree; 1500 u64 logical = (u64)bio->bi_sector << 9; 1501 u64 length = 0; 1502 u64 map_length; 1503 int ret; 1504 1505 if (bio_flags & EXTENT_BIO_COMPRESSED) 1506 return 0; 1507 1508 length = bio->bi_size; 1509 map_tree = &root->fs_info->mapping_tree; 1510 map_length = length; 1511 ret = btrfs_map_block(map_tree, READ, logical, 1512 &map_length, NULL, 0); 1513 /* Will always return 0 or 1 with map_multi == NULL */ 1514 BUG_ON(ret < 0); 1515 if (map_length < length + size) 1516 return 1; 1517 return 0; 1518 } 1519 1520 /* 1521 * in order to insert checksums into the metadata in large chunks, 1522 * we wait until bio submission time. All the pages in the bio are 1523 * checksummed and sums are attached onto the ordered extent record. 1524 * 1525 * At IO completion time the cums attached on the ordered extent record 1526 * are inserted into the btree 1527 */ 1528 static int __btrfs_submit_bio_start(struct inode *inode, int rw, 1529 struct bio *bio, int mirror_num, 1530 unsigned long bio_flags, 1531 u64 bio_offset) 1532 { 1533 struct btrfs_root *root = BTRFS_I(inode)->root; 1534 int ret = 0; 1535 1536 ret = btrfs_csum_one_bio(root, inode, bio, 0, 0); 1537 BUG_ON(ret); /* -ENOMEM */ 1538 return 0; 1539 } 1540 1541 /* 1542 * in order to insert checksums into the metadata in large chunks, 1543 * we wait until bio submission time. All the pages in the bio are 1544 * checksummed and sums are attached onto the ordered extent record. 1545 * 1546 * At IO completion time the cums attached on the ordered extent record 1547 * are inserted into the btree 1548 */ 1549 static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio, 1550 int mirror_num, unsigned long bio_flags, 1551 u64 bio_offset) 1552 { 1553 struct btrfs_root *root = BTRFS_I(inode)->root; 1554 return btrfs_map_bio(root, rw, bio, mirror_num, 1); 1555 } 1556 1557 /* 1558 * extent_io.c submission hook. This does the right thing for csum calculation 1559 * on write, or reading the csums from the tree before a read 1560 */ 1561 static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, 1562 int mirror_num, unsigned long bio_flags, 1563 u64 bio_offset) 1564 { 1565 struct btrfs_root *root = BTRFS_I(inode)->root; 1566 int ret = 0; 1567 int skip_sum; 1568 int metadata = 0; 1569 1570 skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; 1571 1572 if (btrfs_is_free_space_inode(root, inode)) 1573 metadata = 2; 1574 1575 ret = btrfs_bio_wq_end_io(root->fs_info, bio, metadata); 1576 if (ret) 1577 return ret; 1578 1579 if (!(rw & REQ_WRITE)) { 1580 if (bio_flags & EXTENT_BIO_COMPRESSED) { 1581 return btrfs_submit_compressed_read(inode, bio, 1582 mirror_num, bio_flags); 1583 } else if (!skip_sum) { 1584 ret = btrfs_lookup_bio_sums(root, inode, bio, NULL); 1585 if (ret) 1586 return ret; 1587 } 1588 goto mapit; 1589 } else if (!skip_sum) { 1590 /* csum items have already been cloned */ 1591 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID) 1592 goto mapit; 1593 /* we're doing a write, do the async checksumming */ 1594 return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info, 1595 inode, rw, bio, mirror_num, 1596 bio_flags, bio_offset, 1597 __btrfs_submit_bio_start, 1598 __btrfs_submit_bio_done); 1599 } 1600 1601 mapit: 1602 return btrfs_map_bio(root, rw, bio, mirror_num, 0); 1603 } 1604 1605 /* 1606 * given a list of ordered sums record them in the inode. This happens 1607 * at IO completion time based on sums calculated at bio submission time. 1608 */ 1609 static noinline int add_pending_csums(struct btrfs_trans_handle *trans, 1610 struct inode *inode, u64 file_offset, 1611 struct list_head *list) 1612 { 1613 struct btrfs_ordered_sum *sum; 1614 1615 list_for_each_entry(sum, list, list) { 1616 btrfs_csum_file_blocks(trans, 1617 BTRFS_I(inode)->root->fs_info->csum_root, sum); 1618 } 1619 return 0; 1620 } 1621 1622 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end, 1623 struct extent_state **cached_state) 1624 { 1625 if ((end & (PAGE_CACHE_SIZE - 1)) == 0) 1626 WARN_ON(1); 1627 return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end, 1628 cached_state, GFP_NOFS); 1629 } 1630 1631 /* see btrfs_writepage_start_hook for details on why this is required */ 1632 struct btrfs_writepage_fixup { 1633 struct page *page; 1634 struct btrfs_work work; 1635 }; 1636 1637 static void btrfs_writepage_fixup_worker(struct btrfs_work *work) 1638 { 1639 struct btrfs_writepage_fixup *fixup; 1640 struct btrfs_ordered_extent *ordered; 1641 struct extent_state *cached_state = NULL; 1642 struct page *page; 1643 struct inode *inode; 1644 u64 page_start; 1645 u64 page_end; 1646 int ret; 1647 1648 fixup = container_of(work, struct btrfs_writepage_fixup, work); 1649 page = fixup->page; 1650 again: 1651 lock_page(page); 1652 if (!page->mapping || !PageDirty(page) || !PageChecked(page)) { 1653 ClearPageChecked(page); 1654 goto out_page; 1655 } 1656 1657 inode = page->mapping->host; 1658 page_start = page_offset(page); 1659 page_end = page_offset(page) + PAGE_CACHE_SIZE - 1; 1660 1661 lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, 0, 1662 &cached_state); 1663 1664 /* already ordered? We're done */ 1665 if (PagePrivate2(page)) 1666 goto out; 1667 1668 ordered = btrfs_lookup_ordered_extent(inode, page_start); 1669 if (ordered) { 1670 unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, 1671 page_end, &cached_state, GFP_NOFS); 1672 unlock_page(page); 1673 btrfs_start_ordered_extent(inode, ordered, 1); 1674 btrfs_put_ordered_extent(ordered); 1675 goto again; 1676 } 1677 1678 ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE); 1679 if (ret) { 1680 mapping_set_error(page->mapping, ret); 1681 end_extent_writepage(page, ret, page_start, page_end); 1682 ClearPageChecked(page); 1683 goto out; 1684 } 1685 1686 btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state); 1687 ClearPageChecked(page); 1688 set_page_dirty(page); 1689 out: 1690 unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end, 1691 &cached_state, GFP_NOFS); 1692 out_page: 1693 unlock_page(page); 1694 page_cache_release(page); 1695 kfree(fixup); 1696 } 1697 1698 /* 1699 * There are a few paths in the higher layers of the kernel that directly 1700 * set the page dirty bit without asking the filesystem if it is a 1701 * good idea. This causes problems because we want to make sure COW 1702 * properly happens and the data=ordered rules are followed. 1703 * 1704 * In our case any range that doesn't have the ORDERED bit set 1705 * hasn't been properly setup for IO. We kick off an async process 1706 * to fix it up. The async helper will wait for ordered extents, set 1707 * the delalloc bit and make it safe to write the page. 1708 */ 1709 static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end) 1710 { 1711 struct inode *inode = page->mapping->host; 1712 struct btrfs_writepage_fixup *fixup; 1713 struct btrfs_root *root = BTRFS_I(inode)->root; 1714 1715 /* this page is properly in the ordered list */ 1716 if (TestClearPagePrivate2(page)) 1717 return 0; 1718 1719 if (PageChecked(page)) 1720 return -EAGAIN; 1721 1722 fixup = kzalloc(sizeof(*fixup), GFP_NOFS); 1723 if (!fixup) 1724 return -EAGAIN; 1725 1726 SetPageChecked(page); 1727 page_cache_get(page); 1728 fixup->work.func = btrfs_writepage_fixup_worker; 1729 fixup->page = page; 1730 btrfs_queue_worker(&root->fs_info->fixup_workers, &fixup->work); 1731 return -EBUSY; 1732 } 1733 1734 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans, 1735 struct inode *inode, u64 file_pos, 1736 u64 disk_bytenr, u64 disk_num_bytes, 1737 u64 num_bytes, u64 ram_bytes, 1738 u8 compression, u8 encryption, 1739 u16 other_encoding, int extent_type) 1740 { 1741 struct btrfs_root *root = BTRFS_I(inode)->root; 1742 struct btrfs_file_extent_item *fi; 1743 struct btrfs_path *path; 1744 struct extent_buffer *leaf; 1745 struct btrfs_key ins; 1746 u64 hint; 1747 int ret; 1748 1749 path = btrfs_alloc_path(); 1750 if (!path) 1751 return -ENOMEM; 1752 1753 path->leave_spinning = 1; 1754 1755 /* 1756 * we may be replacing one extent in the tree with another. 1757 * The new extent is pinned in the extent map, and we don't want 1758 * to drop it from the cache until it is completely in the btree. 1759 * 1760 * So, tell btrfs_drop_extents to leave this extent in the cache. 1761 * the caller is expected to unpin it and allow it to be merged 1762 * with the others. 1763 */ 1764 ret = btrfs_drop_extents(trans, inode, file_pos, file_pos + num_bytes, 1765 &hint, 0); 1766 if (ret) 1767 goto out; 1768 1769 ins.objectid = btrfs_ino(inode); 1770 ins.offset = file_pos; 1771 ins.type = BTRFS_EXTENT_DATA_KEY; 1772 ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*fi)); 1773 if (ret) 1774 goto out; 1775 leaf = path->nodes[0]; 1776 fi = btrfs_item_ptr(leaf, path->slots[0], 1777 struct btrfs_file_extent_item); 1778 btrfs_set_file_extent_generation(leaf, fi, trans->transid); 1779 btrfs_set_file_extent_type(leaf, fi, extent_type); 1780 btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr); 1781 btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes); 1782 btrfs_set_file_extent_offset(leaf, fi, 0); 1783 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes); 1784 btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes); 1785 btrfs_set_file_extent_compression(leaf, fi, compression); 1786 btrfs_set_file_extent_encryption(leaf, fi, encryption); 1787 btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding); 1788 1789 btrfs_unlock_up_safe(path, 1); 1790 btrfs_set_lock_blocking(leaf); 1791 1792 btrfs_mark_buffer_dirty(leaf); 1793 1794 inode_add_bytes(inode, num_bytes); 1795 1796 ins.objectid = disk_bytenr; 1797 ins.offset = disk_num_bytes; 1798 ins.type = BTRFS_EXTENT_ITEM_KEY; 1799 ret = btrfs_alloc_reserved_file_extent(trans, root, 1800 root->root_key.objectid, 1801 btrfs_ino(inode), file_pos, &ins); 1802 out: 1803 btrfs_free_path(path); 1804 1805 return ret; 1806 } 1807 1808 /* 1809 * helper function for btrfs_finish_ordered_io, this 1810 * just reads in some of the csum leaves to prime them into ram 1811 * before we start the transaction. It limits the amount of btree 1812 * reads required while inside the transaction. 1813 */ 1814 /* as ordered data IO finishes, this gets called so we can finish 1815 * an ordered extent if the range of bytes in the file it covers are 1816 * fully written. 1817 */ 1818 static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) 1819 { 1820 struct btrfs_root *root = BTRFS_I(inode)->root; 1821 struct btrfs_trans_handle *trans = NULL; 1822 struct btrfs_ordered_extent *ordered_extent = NULL; 1823 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 1824 struct extent_state *cached_state = NULL; 1825 int compress_type = 0; 1826 int ret; 1827 bool nolock; 1828 1829 ret = btrfs_dec_test_ordered_pending(inode, &ordered_extent, start, 1830 end - start + 1); 1831 if (!ret) 1832 return 0; 1833 BUG_ON(!ordered_extent); /* Logic error */ 1834 1835 nolock = btrfs_is_free_space_inode(root, inode); 1836 1837 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) { 1838 BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */ 1839 ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent); 1840 if (!ret) { 1841 if (nolock) 1842 trans = btrfs_join_transaction_nolock(root); 1843 else 1844 trans = btrfs_join_transaction(root); 1845 if (IS_ERR(trans)) 1846 return PTR_ERR(trans); 1847 trans->block_rsv = &root->fs_info->delalloc_block_rsv; 1848 ret = btrfs_update_inode_fallback(trans, root, inode); 1849 if (ret) /* -ENOMEM or corruption */ 1850 btrfs_abort_transaction(trans, root, ret); 1851 } 1852 goto out; 1853 } 1854 1855 lock_extent_bits(io_tree, ordered_extent->file_offset, 1856 ordered_extent->file_offset + ordered_extent->len - 1, 1857 0, &cached_state); 1858 1859 if (nolock) 1860 trans = btrfs_join_transaction_nolock(root); 1861 else 1862 trans = btrfs_join_transaction(root); 1863 if (IS_ERR(trans)) { 1864 ret = PTR_ERR(trans); 1865 trans = NULL; 1866 goto out_unlock; 1867 } 1868 trans->block_rsv = &root->fs_info->delalloc_block_rsv; 1869 1870 if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags)) 1871 compress_type = ordered_extent->compress_type; 1872 if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) { 1873 BUG_ON(compress_type); 1874 ret = btrfs_mark_extent_written(trans, inode, 1875 ordered_extent->file_offset, 1876 ordered_extent->file_offset + 1877 ordered_extent->len); 1878 } else { 1879 BUG_ON(root == root->fs_info->tree_root); 1880 ret = insert_reserved_file_extent(trans, inode, 1881 ordered_extent->file_offset, 1882 ordered_extent->start, 1883 ordered_extent->disk_len, 1884 ordered_extent->len, 1885 ordered_extent->len, 1886 compress_type, 0, 0, 1887 BTRFS_FILE_EXTENT_REG); 1888 unpin_extent_cache(&BTRFS_I(inode)->extent_tree, 1889 ordered_extent->file_offset, 1890 ordered_extent->len); 1891 } 1892 unlock_extent_cached(io_tree, ordered_extent->file_offset, 1893 ordered_extent->file_offset + 1894 ordered_extent->len - 1, &cached_state, GFP_NOFS); 1895 if (ret < 0) { 1896 btrfs_abort_transaction(trans, root, ret); 1897 goto out; 1898 } 1899 1900 add_pending_csums(trans, inode, ordered_extent->file_offset, 1901 &ordered_extent->list); 1902 1903 ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent); 1904 if (!ret || !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) { 1905 ret = btrfs_update_inode_fallback(trans, root, inode); 1906 if (ret) { /* -ENOMEM or corruption */ 1907 btrfs_abort_transaction(trans, root, ret); 1908 goto out; 1909 } 1910 } 1911 ret = 0; 1912 out: 1913 if (root != root->fs_info->tree_root) 1914 btrfs_delalloc_release_metadata(inode, ordered_extent->len); 1915 if (trans) { 1916 if (nolock) 1917 btrfs_end_transaction_nolock(trans, root); 1918 else 1919 btrfs_end_transaction(trans, root); 1920 } 1921 1922 /* once for us */ 1923 btrfs_put_ordered_extent(ordered_extent); 1924 /* once for the tree */ 1925 btrfs_put_ordered_extent(ordered_extent); 1926 1927 return 0; 1928 out_unlock: 1929 unlock_extent_cached(io_tree, ordered_extent->file_offset, 1930 ordered_extent->file_offset + 1931 ordered_extent->len - 1, &cached_state, GFP_NOFS); 1932 goto out; 1933 } 1934 1935 static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end, 1936 struct extent_state *state, int uptodate) 1937 { 1938 trace_btrfs_writepage_end_io_hook(page, start, end, uptodate); 1939 1940 ClearPagePrivate2(page); 1941 return btrfs_finish_ordered_io(page->mapping->host, start, end); 1942 } 1943 1944 /* 1945 * when reads are done, we need to check csums to verify the data is correct 1946 * if there's a match, we allow the bio to finish. If not, the code in 1947 * extent_io.c will try to find good copies for us. 1948 */ 1949 static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end, 1950 struct extent_state *state, int mirror) 1951 { 1952 size_t offset = start - ((u64)page->index << PAGE_CACHE_SHIFT); 1953 struct inode *inode = page->mapping->host; 1954 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 1955 char *kaddr; 1956 u64 private = ~(u32)0; 1957 int ret; 1958 struct btrfs_root *root = BTRFS_I(inode)->root; 1959 u32 csum = ~(u32)0; 1960 1961 if (PageChecked(page)) { 1962 ClearPageChecked(page); 1963 goto good; 1964 } 1965 1966 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM) 1967 goto good; 1968 1969 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID && 1970 test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) { 1971 clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM, 1972 GFP_NOFS); 1973 return 0; 1974 } 1975 1976 if (state && state->start == start) { 1977 private = state->private; 1978 ret = 0; 1979 } else { 1980 ret = get_state_private(io_tree, start, &private); 1981 } 1982 kaddr = kmap_atomic(page); 1983 if (ret) 1984 goto zeroit; 1985 1986 csum = btrfs_csum_data(root, kaddr + offset, csum, end - start + 1); 1987 btrfs_csum_final(csum, (char *)&csum); 1988 if (csum != private) 1989 goto zeroit; 1990 1991 kunmap_atomic(kaddr); 1992 good: 1993 return 0; 1994 1995 zeroit: 1996 printk_ratelimited(KERN_INFO "btrfs csum failed ino %llu off %llu csum %u " 1997 "private %llu\n", 1998 (unsigned long long)btrfs_ino(page->mapping->host), 1999 (unsigned long long)start, csum, 2000 (unsigned long long)private); 2001 memset(kaddr + offset, 1, end - start + 1); 2002 flush_dcache_page(page); 2003 kunmap_atomic(kaddr); 2004 if (private == 0) 2005 return 0; 2006 return -EIO; 2007 } 2008 2009 struct delayed_iput { 2010 struct list_head list; 2011 struct inode *inode; 2012 }; 2013 2014 /* JDM: If this is fs-wide, why can't we add a pointer to 2015 * btrfs_inode instead and avoid the allocation? */ 2016 void btrfs_add_delayed_iput(struct inode *inode) 2017 { 2018 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; 2019 struct delayed_iput *delayed; 2020 2021 if (atomic_add_unless(&inode->i_count, -1, 1)) 2022 return; 2023 2024 delayed = kmalloc(sizeof(*delayed), GFP_NOFS | __GFP_NOFAIL); 2025 delayed->inode = inode; 2026 2027 spin_lock(&fs_info->delayed_iput_lock); 2028 list_add_tail(&delayed->list, &fs_info->delayed_iputs); 2029 spin_unlock(&fs_info->delayed_iput_lock); 2030 } 2031 2032 void btrfs_run_delayed_iputs(struct btrfs_root *root) 2033 { 2034 LIST_HEAD(list); 2035 struct btrfs_fs_info *fs_info = root->fs_info; 2036 struct delayed_iput *delayed; 2037 int empty; 2038 2039 spin_lock(&fs_info->delayed_iput_lock); 2040 empty = list_empty(&fs_info->delayed_iputs); 2041 spin_unlock(&fs_info->delayed_iput_lock); 2042 if (empty) 2043 return; 2044 2045 down_read(&root->fs_info->cleanup_work_sem); 2046 spin_lock(&fs_info->delayed_iput_lock); 2047 list_splice_init(&fs_info->delayed_iputs, &list); 2048 spin_unlock(&fs_info->delayed_iput_lock); 2049 2050 while (!list_empty(&list)) { 2051 delayed = list_entry(list.next, struct delayed_iput, list); 2052 list_del(&delayed->list); 2053 iput(delayed->inode); 2054 kfree(delayed); 2055 } 2056 up_read(&root->fs_info->cleanup_work_sem); 2057 } 2058 2059 enum btrfs_orphan_cleanup_state { 2060 ORPHAN_CLEANUP_STARTED = 1, 2061 ORPHAN_CLEANUP_DONE = 2, 2062 }; 2063 2064 /* 2065 * This is called in transaction commit time. If there are no orphan 2066 * files in the subvolume, it removes orphan item and frees block_rsv 2067 * structure. 2068 */ 2069 void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans, 2070 struct btrfs_root *root) 2071 { 2072 struct btrfs_block_rsv *block_rsv; 2073 int ret; 2074 2075 if (!list_empty(&root->orphan_list) || 2076 root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE) 2077 return; 2078 2079 spin_lock(&root->orphan_lock); 2080 if (!list_empty(&root->orphan_list)) { 2081 spin_unlock(&root->orphan_lock); 2082 return; 2083 } 2084 2085 if (root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE) { 2086 spin_unlock(&root->orphan_lock); 2087 return; 2088 } 2089 2090 block_rsv = root->orphan_block_rsv; 2091 root->orphan_block_rsv = NULL; 2092 spin_unlock(&root->orphan_lock); 2093 2094 if (root->orphan_item_inserted && 2095 btrfs_root_refs(&root->root_item) > 0) { 2096 ret = btrfs_del_orphan_item(trans, root->fs_info->tree_root, 2097 root->root_key.objectid); 2098 BUG_ON(ret); 2099 root->orphan_item_inserted = 0; 2100 } 2101 2102 if (block_rsv) { 2103 WARN_ON(block_rsv->size > 0); 2104 btrfs_free_block_rsv(root, block_rsv); 2105 } 2106 } 2107 2108 /* 2109 * This creates an orphan entry for the given inode in case something goes 2110 * wrong in the middle of an unlink/truncate. 2111 * 2112 * NOTE: caller of this function should reserve 5 units of metadata for 2113 * this function. 2114 */ 2115 int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode) 2116 { 2117 struct btrfs_root *root = BTRFS_I(inode)->root; 2118 struct btrfs_block_rsv *block_rsv = NULL; 2119 int reserve = 0; 2120 int insert = 0; 2121 int ret; 2122 2123 if (!root->orphan_block_rsv) { 2124 block_rsv = btrfs_alloc_block_rsv(root); 2125 if (!block_rsv) 2126 return -ENOMEM; 2127 } 2128 2129 spin_lock(&root->orphan_lock); 2130 if (!root->orphan_block_rsv) { 2131 root->orphan_block_rsv = block_rsv; 2132 } else if (block_rsv) { 2133 btrfs_free_block_rsv(root, block_rsv); 2134 block_rsv = NULL; 2135 } 2136 2137 if (list_empty(&BTRFS_I(inode)->i_orphan)) { 2138 list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list); 2139 #if 0 2140 /* 2141 * For proper ENOSPC handling, we should do orphan 2142 * cleanup when mounting. But this introduces backward 2143 * compatibility issue. 2144 */ 2145 if (!xchg(&root->orphan_item_inserted, 1)) 2146 insert = 2; 2147 else 2148 insert = 1; 2149 #endif 2150 insert = 1; 2151 } 2152 2153 if (!BTRFS_I(inode)->orphan_meta_reserved) { 2154 BTRFS_I(inode)->orphan_meta_reserved = 1; 2155 reserve = 1; 2156 } 2157 spin_unlock(&root->orphan_lock); 2158 2159 /* grab metadata reservation from transaction handle */ 2160 if (reserve) { 2161 ret = btrfs_orphan_reserve_metadata(trans, inode); 2162 BUG_ON(ret); /* -ENOSPC in reservation; Logic error? JDM */ 2163 } 2164 2165 /* insert an orphan item to track this unlinked/truncated file */ 2166 if (insert >= 1) { 2167 ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode)); 2168 if (ret && ret != -EEXIST) { 2169 btrfs_abort_transaction(trans, root, ret); 2170 return ret; 2171 } 2172 ret = 0; 2173 } 2174 2175 /* insert an orphan item to track subvolume contains orphan files */ 2176 if (insert >= 2) { 2177 ret = btrfs_insert_orphan_item(trans, root->fs_info->tree_root, 2178 root->root_key.objectid); 2179 if (ret && ret != -EEXIST) { 2180 btrfs_abort_transaction(trans, root, ret); 2181 return ret; 2182 } 2183 } 2184 return 0; 2185 } 2186 2187 /* 2188 * We have done the truncate/delete so we can go ahead and remove the orphan 2189 * item for this particular inode. 2190 */ 2191 int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode) 2192 { 2193 struct btrfs_root *root = BTRFS_I(inode)->root; 2194 int delete_item = 0; 2195 int release_rsv = 0; 2196 int ret = 0; 2197 2198 spin_lock(&root->orphan_lock); 2199 if (!list_empty(&BTRFS_I(inode)->i_orphan)) { 2200 list_del_init(&BTRFS_I(inode)->i_orphan); 2201 delete_item = 1; 2202 } 2203 2204 if (BTRFS_I(inode)->orphan_meta_reserved) { 2205 BTRFS_I(inode)->orphan_meta_reserved = 0; 2206 release_rsv = 1; 2207 } 2208 spin_unlock(&root->orphan_lock); 2209 2210 if (trans && delete_item) { 2211 ret = btrfs_del_orphan_item(trans, root, btrfs_ino(inode)); 2212 BUG_ON(ret); /* -ENOMEM or corruption (JDM: Recheck) */ 2213 } 2214 2215 if (release_rsv) 2216 btrfs_orphan_release_metadata(inode); 2217 2218 return 0; 2219 } 2220 2221 /* 2222 * this cleans up any orphans that may be left on the list from the last use 2223 * of this root. 2224 */ 2225 int btrfs_orphan_cleanup(struct btrfs_root *root) 2226 { 2227 struct btrfs_path *path; 2228 struct extent_buffer *leaf; 2229 struct btrfs_key key, found_key; 2230 struct btrfs_trans_handle *trans; 2231 struct inode *inode; 2232 u64 last_objectid = 0; 2233 int ret = 0, nr_unlink = 0, nr_truncate = 0; 2234 2235 if (cmpxchg(&root->orphan_cleanup_state, 0, ORPHAN_CLEANUP_STARTED)) 2236 return 0; 2237 2238 path = btrfs_alloc_path(); 2239 if (!path) { 2240 ret = -ENOMEM; 2241 goto out; 2242 } 2243 path->reada = -1; 2244 2245 key.objectid = BTRFS_ORPHAN_OBJECTID; 2246 btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY); 2247 key.offset = (u64)-1; 2248 2249 while (1) { 2250 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 2251 if (ret < 0) 2252 goto out; 2253 2254 /* 2255 * if ret == 0 means we found what we were searching for, which 2256 * is weird, but possible, so only screw with path if we didn't 2257 * find the key and see if we have stuff that matches 2258 */ 2259 if (ret > 0) { 2260 ret = 0; 2261 if (path->slots[0] == 0) 2262 break; 2263 path->slots[0]--; 2264 } 2265 2266 /* pull out the item */ 2267 leaf = path->nodes[0]; 2268 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 2269 2270 /* make sure the item matches what we want */ 2271 if (found_key.objectid != BTRFS_ORPHAN_OBJECTID) 2272 break; 2273 if (btrfs_key_type(&found_key) != BTRFS_ORPHAN_ITEM_KEY) 2274 break; 2275 2276 /* release the path since we're done with it */ 2277 btrfs_release_path(path); 2278 2279 /* 2280 * this is where we are basically btrfs_lookup, without the 2281 * crossing root thing. we store the inode number in the 2282 * offset of the orphan item. 2283 */ 2284 2285 if (found_key.offset == last_objectid) { 2286 printk(KERN_ERR "btrfs: Error removing orphan entry, " 2287 "stopping orphan cleanup\n"); 2288 ret = -EINVAL; 2289 goto out; 2290 } 2291 2292 last_objectid = found_key.offset; 2293 2294 found_key.objectid = found_key.offset; 2295 found_key.type = BTRFS_INODE_ITEM_KEY; 2296 found_key.offset = 0; 2297 inode = btrfs_iget(root->fs_info->sb, &found_key, root, NULL); 2298 ret = PTR_RET(inode); 2299 if (ret && ret != -ESTALE) 2300 goto out; 2301 2302 if (ret == -ESTALE && root == root->fs_info->tree_root) { 2303 struct btrfs_root *dead_root; 2304 struct btrfs_fs_info *fs_info = root->fs_info; 2305 int is_dead_root = 0; 2306 2307 /* 2308 * this is an orphan in the tree root. Currently these 2309 * could come from 2 sources: 2310 * a) a snapshot deletion in progress 2311 * b) a free space cache inode 2312 * We need to distinguish those two, as the snapshot 2313 * orphan must not get deleted. 2314 * find_dead_roots already ran before us, so if this 2315 * is a snapshot deletion, we should find the root 2316 * in the dead_roots list 2317 */ 2318 spin_lock(&fs_info->trans_lock); 2319 list_for_each_entry(dead_root, &fs_info->dead_roots, 2320 root_list) { 2321 if (dead_root->root_key.objectid == 2322 found_key.objectid) { 2323 is_dead_root = 1; 2324 break; 2325 } 2326 } 2327 spin_unlock(&fs_info->trans_lock); 2328 if (is_dead_root) { 2329 /* prevent this orphan from being found again */ 2330 key.offset = found_key.objectid - 1; 2331 continue; 2332 } 2333 } 2334 /* 2335 * Inode is already gone but the orphan item is still there, 2336 * kill the orphan item. 2337 */ 2338 if (ret == -ESTALE) { 2339 trans = btrfs_start_transaction(root, 1); 2340 if (IS_ERR(trans)) { 2341 ret = PTR_ERR(trans); 2342 goto out; 2343 } 2344 ret = btrfs_del_orphan_item(trans, root, 2345 found_key.objectid); 2346 BUG_ON(ret); /* -ENOMEM or corruption (JDM: Recheck) */ 2347 btrfs_end_transaction(trans, root); 2348 continue; 2349 } 2350 2351 /* 2352 * add this inode to the orphan list so btrfs_orphan_del does 2353 * the proper thing when we hit it 2354 */ 2355 spin_lock(&root->orphan_lock); 2356 list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list); 2357 spin_unlock(&root->orphan_lock); 2358 2359 /* if we have links, this was a truncate, lets do that */ 2360 if (inode->i_nlink) { 2361 if (!S_ISREG(inode->i_mode)) { 2362 WARN_ON(1); 2363 iput(inode); 2364 continue; 2365 } 2366 nr_truncate++; 2367 ret = btrfs_truncate(inode); 2368 } else { 2369 nr_unlink++; 2370 } 2371 2372 /* this will do delete_inode and everything for us */ 2373 iput(inode); 2374 if (ret) 2375 goto out; 2376 } 2377 /* release the path since we're done with it */ 2378 btrfs_release_path(path); 2379 2380 root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE; 2381 2382 if (root->orphan_block_rsv) 2383 btrfs_block_rsv_release(root, root->orphan_block_rsv, 2384 (u64)-1); 2385 2386 if (root->orphan_block_rsv || root->orphan_item_inserted) { 2387 trans = btrfs_join_transaction(root); 2388 if (!IS_ERR(trans)) 2389 btrfs_end_transaction(trans, root); 2390 } 2391 2392 if (nr_unlink) 2393 printk(KERN_INFO "btrfs: unlinked %d orphans\n", nr_unlink); 2394 if (nr_truncate) 2395 printk(KERN_INFO "btrfs: truncated %d orphans\n", nr_truncate); 2396 2397 out: 2398 if (ret) 2399 printk(KERN_CRIT "btrfs: could not do orphan cleanup %d\n", ret); 2400 btrfs_free_path(path); 2401 return ret; 2402 } 2403 2404 /* 2405 * very simple check to peek ahead in the leaf looking for xattrs. If we 2406 * don't find any xattrs, we know there can't be any acls. 2407 * 2408 * slot is the slot the inode is in, objectid is the objectid of the inode 2409 */ 2410 static noinline int acls_after_inode_item(struct extent_buffer *leaf, 2411 int slot, u64 objectid) 2412 { 2413 u32 nritems = btrfs_header_nritems(leaf); 2414 struct btrfs_key found_key; 2415 int scanned = 0; 2416 2417 slot++; 2418 while (slot < nritems) { 2419 btrfs_item_key_to_cpu(leaf, &found_key, slot); 2420 2421 /* we found a different objectid, there must not be acls */ 2422 if (found_key.objectid != objectid) 2423 return 0; 2424 2425 /* we found an xattr, assume we've got an acl */ 2426 if (found_key.type == BTRFS_XATTR_ITEM_KEY) 2427 return 1; 2428 2429 /* 2430 * we found a key greater than an xattr key, there can't 2431 * be any acls later on 2432 */ 2433 if (found_key.type > BTRFS_XATTR_ITEM_KEY) 2434 return 0; 2435 2436 slot++; 2437 scanned++; 2438 2439 /* 2440 * it goes inode, inode backrefs, xattrs, extents, 2441 * so if there are a ton of hard links to an inode there can 2442 * be a lot of backrefs. Don't waste time searching too hard, 2443 * this is just an optimization 2444 */ 2445 if (scanned >= 8) 2446 break; 2447 } 2448 /* we hit the end of the leaf before we found an xattr or 2449 * something larger than an xattr. We have to assume the inode 2450 * has acls 2451 */ 2452 return 1; 2453 } 2454 2455 /* 2456 * read an inode from the btree into the in-memory inode 2457 */ 2458 static void btrfs_read_locked_inode(struct inode *inode) 2459 { 2460 struct btrfs_path *path; 2461 struct extent_buffer *leaf; 2462 struct btrfs_inode_item *inode_item; 2463 struct btrfs_timespec *tspec; 2464 struct btrfs_root *root = BTRFS_I(inode)->root; 2465 struct btrfs_key location; 2466 int maybe_acls; 2467 u32 rdev; 2468 int ret; 2469 bool filled = false; 2470 2471 ret = btrfs_fill_inode(inode, &rdev); 2472 if (!ret) 2473 filled = true; 2474 2475 path = btrfs_alloc_path(); 2476 if (!path) 2477 goto make_bad; 2478 2479 path->leave_spinning = 1; 2480 memcpy(&location, &BTRFS_I(inode)->location, sizeof(location)); 2481 2482 ret = btrfs_lookup_inode(NULL, root, path, &location, 0); 2483 if (ret) 2484 goto make_bad; 2485 2486 leaf = path->nodes[0]; 2487 2488 if (filled) 2489 goto cache_acl; 2490 2491 inode_item = btrfs_item_ptr(leaf, path->slots[0], 2492 struct btrfs_inode_item); 2493 inode->i_mode = btrfs_inode_mode(leaf, inode_item); 2494 set_nlink(inode, btrfs_inode_nlink(leaf, inode_item)); 2495 inode->i_uid = btrfs_inode_uid(leaf, inode_item); 2496 inode->i_gid = btrfs_inode_gid(leaf, inode_item); 2497 btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item)); 2498 2499 tspec = btrfs_inode_atime(inode_item); 2500 inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, tspec); 2501 inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, tspec); 2502 2503 tspec = btrfs_inode_mtime(inode_item); 2504 inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, tspec); 2505 inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, tspec); 2506 2507 tspec = btrfs_inode_ctime(inode_item); 2508 inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, tspec); 2509 inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, tspec); 2510 2511 inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item)); 2512 BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item); 2513 BTRFS_I(inode)->sequence = btrfs_inode_sequence(leaf, inode_item); 2514 inode->i_generation = BTRFS_I(inode)->generation; 2515 inode->i_rdev = 0; 2516 rdev = btrfs_inode_rdev(leaf, inode_item); 2517 2518 BTRFS_I(inode)->index_cnt = (u64)-1; 2519 BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item); 2520 cache_acl: 2521 /* 2522 * try to precache a NULL acl entry for files that don't have 2523 * any xattrs or acls 2524 */ 2525 maybe_acls = acls_after_inode_item(leaf, path->slots[0], 2526 btrfs_ino(inode)); 2527 if (!maybe_acls) 2528 cache_no_acl(inode); 2529 2530 btrfs_free_path(path); 2531 2532 switch (inode->i_mode & S_IFMT) { 2533 case S_IFREG: 2534 inode->i_mapping->a_ops = &btrfs_aops; 2535 inode->i_mapping->backing_dev_info = &root->fs_info->bdi; 2536 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; 2537 inode->i_fop = &btrfs_file_operations; 2538 inode->i_op = &btrfs_file_inode_operations; 2539 break; 2540 case S_IFDIR: 2541 inode->i_fop = &btrfs_dir_file_operations; 2542 if (root == root->fs_info->tree_root) 2543 inode->i_op = &btrfs_dir_ro_inode_operations; 2544 else 2545 inode->i_op = &btrfs_dir_inode_operations; 2546 break; 2547 case S_IFLNK: 2548 inode->i_op = &btrfs_symlink_inode_operations; 2549 inode->i_mapping->a_ops = &btrfs_symlink_aops; 2550 inode->i_mapping->backing_dev_info = &root->fs_info->bdi; 2551 break; 2552 default: 2553 inode->i_op = &btrfs_special_inode_operations; 2554 init_special_inode(inode, inode->i_mode, rdev); 2555 break; 2556 } 2557 2558 btrfs_update_iflags(inode); 2559 return; 2560 2561 make_bad: 2562 btrfs_free_path(path); 2563 make_bad_inode(inode); 2564 } 2565 2566 /* 2567 * given a leaf and an inode, copy the inode fields into the leaf 2568 */ 2569 static void fill_inode_item(struct btrfs_trans_handle *trans, 2570 struct extent_buffer *leaf, 2571 struct btrfs_inode_item *item, 2572 struct inode *inode) 2573 { 2574 btrfs_set_inode_uid(leaf, item, inode->i_uid); 2575 btrfs_set_inode_gid(leaf, item, inode->i_gid); 2576 btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size); 2577 btrfs_set_inode_mode(leaf, item, inode->i_mode); 2578 btrfs_set_inode_nlink(leaf, item, inode->i_nlink); 2579 2580 btrfs_set_timespec_sec(leaf, btrfs_inode_atime(item), 2581 inode->i_atime.tv_sec); 2582 btrfs_set_timespec_nsec(leaf, btrfs_inode_atime(item), 2583 inode->i_atime.tv_nsec); 2584 2585 btrfs_set_timespec_sec(leaf, btrfs_inode_mtime(item), 2586 inode->i_mtime.tv_sec); 2587 btrfs_set_timespec_nsec(leaf, btrfs_inode_mtime(item), 2588 inode->i_mtime.tv_nsec); 2589 2590 btrfs_set_timespec_sec(leaf, btrfs_inode_ctime(item), 2591 inode->i_ctime.tv_sec); 2592 btrfs_set_timespec_nsec(leaf, btrfs_inode_ctime(item), 2593 inode->i_ctime.tv_nsec); 2594 2595 btrfs_set_inode_nbytes(leaf, item, inode_get_bytes(inode)); 2596 btrfs_set_inode_generation(leaf, item, BTRFS_I(inode)->generation); 2597 btrfs_set_inode_sequence(leaf, item, BTRFS_I(inode)->sequence); 2598 btrfs_set_inode_transid(leaf, item, trans->transid); 2599 btrfs_set_inode_rdev(leaf, item, inode->i_rdev); 2600 btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags); 2601 btrfs_set_inode_block_group(leaf, item, 0); 2602 } 2603 2604 /* 2605 * copy everything in the in-memory inode into the btree. 2606 */ 2607 static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans, 2608 struct btrfs_root *root, struct inode *inode) 2609 { 2610 struct btrfs_inode_item *inode_item; 2611 struct btrfs_path *path; 2612 struct extent_buffer *leaf; 2613 int ret; 2614 2615 path = btrfs_alloc_path(); 2616 if (!path) 2617 return -ENOMEM; 2618 2619 path->leave_spinning = 1; 2620 ret = btrfs_lookup_inode(trans, root, path, &BTRFS_I(inode)->location, 2621 1); 2622 if (ret) { 2623 if (ret > 0) 2624 ret = -ENOENT; 2625 goto failed; 2626 } 2627 2628 btrfs_unlock_up_safe(path, 1); 2629 leaf = path->nodes[0]; 2630 inode_item = btrfs_item_ptr(leaf, path->slots[0], 2631 struct btrfs_inode_item); 2632 2633 fill_inode_item(trans, leaf, inode_item, inode); 2634 btrfs_mark_buffer_dirty(leaf); 2635 btrfs_set_inode_last_trans(trans, inode); 2636 ret = 0; 2637 failed: 2638 btrfs_free_path(path); 2639 return ret; 2640 } 2641 2642 /* 2643 * copy everything in the in-memory inode into the btree. 2644 */ 2645 noinline int btrfs_update_inode(struct btrfs_trans_handle *trans, 2646 struct btrfs_root *root, struct inode *inode) 2647 { 2648 int ret; 2649 2650 /* 2651 * If the inode is a free space inode, we can deadlock during commit 2652 * if we put it into the delayed code. 2653 * 2654 * The data relocation inode should also be directly updated 2655 * without delay 2656 */ 2657 if (!btrfs_is_free_space_inode(root, inode) 2658 && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID) { 2659 ret = btrfs_delayed_update_inode(trans, root, inode); 2660 if (!ret) 2661 btrfs_set_inode_last_trans(trans, inode); 2662 return ret; 2663 } 2664 2665 return btrfs_update_inode_item(trans, root, inode); 2666 } 2667 2668 static noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans, 2669 struct btrfs_root *root, struct inode *inode) 2670 { 2671 int ret; 2672 2673 ret = btrfs_update_inode(trans, root, inode); 2674 if (ret == -ENOSPC) 2675 return btrfs_update_inode_item(trans, root, inode); 2676 return ret; 2677 } 2678 2679 /* 2680 * unlink helper that gets used here in inode.c and in the tree logging 2681 * recovery code. It remove a link in a directory with a given name, and 2682 * also drops the back refs in the inode to the directory 2683 */ 2684 static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans, 2685 struct btrfs_root *root, 2686 struct inode *dir, struct inode *inode, 2687 const char *name, int name_len) 2688 { 2689 struct btrfs_path *path; 2690 int ret = 0; 2691 struct extent_buffer *leaf; 2692 struct btrfs_dir_item *di; 2693 struct btrfs_key key; 2694 u64 index; 2695 u64 ino = btrfs_ino(inode); 2696 u64 dir_ino = btrfs_ino(dir); 2697 2698 path = btrfs_alloc_path(); 2699 if (!path) { 2700 ret = -ENOMEM; 2701 goto out; 2702 } 2703 2704 path->leave_spinning = 1; 2705 di = btrfs_lookup_dir_item(trans, root, path, dir_ino, 2706 name, name_len, -1); 2707 if (IS_ERR(di)) { 2708 ret = PTR_ERR(di); 2709 goto err; 2710 } 2711 if (!di) { 2712 ret = -ENOENT; 2713 goto err; 2714 } 2715 leaf = path->nodes[0]; 2716 btrfs_dir_item_key_to_cpu(leaf, di, &key); 2717 ret = btrfs_delete_one_dir_name(trans, root, path, di); 2718 if (ret) 2719 goto err; 2720 btrfs_release_path(path); 2721 2722 ret = btrfs_del_inode_ref(trans, root, name, name_len, ino, 2723 dir_ino, &index); 2724 if (ret) { 2725 printk(KERN_INFO "btrfs failed to delete reference to %.*s, " 2726 "inode %llu parent %llu\n", name_len, name, 2727 (unsigned long long)ino, (unsigned long long)dir_ino); 2728 btrfs_abort_transaction(trans, root, ret); 2729 goto err; 2730 } 2731 2732 ret = btrfs_delete_delayed_dir_index(trans, root, dir, index); 2733 if (ret) { 2734 btrfs_abort_transaction(trans, root, ret); 2735 goto err; 2736 } 2737 2738 ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len, 2739 inode, dir_ino); 2740 if (ret != 0 && ret != -ENOENT) { 2741 btrfs_abort_transaction(trans, root, ret); 2742 goto err; 2743 } 2744 2745 ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len, 2746 dir, index); 2747 if (ret == -ENOENT) 2748 ret = 0; 2749 err: 2750 btrfs_free_path(path); 2751 if (ret) 2752 goto out; 2753 2754 btrfs_i_size_write(dir, dir->i_size - name_len * 2); 2755 inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME; 2756 btrfs_update_inode(trans, root, dir); 2757 out: 2758 return ret; 2759 } 2760 2761 int btrfs_unlink_inode(struct btrfs_trans_handle *trans, 2762 struct btrfs_root *root, 2763 struct inode *dir, struct inode *inode, 2764 const char *name, int name_len) 2765 { 2766 int ret; 2767 ret = __btrfs_unlink_inode(trans, root, dir, inode, name, name_len); 2768 if (!ret) { 2769 btrfs_drop_nlink(inode); 2770 ret = btrfs_update_inode(trans, root, inode); 2771 } 2772 return ret; 2773 } 2774 2775 2776 /* helper to check if there is any shared block in the path */ 2777 static int check_path_shared(struct btrfs_root *root, 2778 struct btrfs_path *path) 2779 { 2780 struct extent_buffer *eb; 2781 int level; 2782 u64 refs = 1; 2783 2784 for (level = 0; level < BTRFS_MAX_LEVEL; level++) { 2785 int ret; 2786 2787 if (!path->nodes[level]) 2788 break; 2789 eb = path->nodes[level]; 2790 if (!btrfs_block_can_be_shared(root, eb)) 2791 continue; 2792 ret = btrfs_lookup_extent_info(NULL, root, eb->start, eb->len, 2793 &refs, NULL); 2794 if (refs > 1) 2795 return 1; 2796 } 2797 return 0; 2798 } 2799 2800 /* 2801 * helper to start transaction for unlink and rmdir. 2802 * 2803 * unlink and rmdir are special in btrfs, they do not always free space. 2804 * so in enospc case, we should make sure they will free space before 2805 * allowing them to use the global metadata reservation. 2806 */ 2807 static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir, 2808 struct dentry *dentry) 2809 { 2810 struct btrfs_trans_handle *trans; 2811 struct btrfs_root *root = BTRFS_I(dir)->root; 2812 struct btrfs_path *path; 2813 struct btrfs_inode_ref *ref; 2814 struct btrfs_dir_item *di; 2815 struct inode *inode = dentry->d_inode; 2816 u64 index; 2817 int check_link = 1; 2818 int err = -ENOSPC; 2819 int ret; 2820 u64 ino = btrfs_ino(inode); 2821 u64 dir_ino = btrfs_ino(dir); 2822 2823 /* 2824 * 1 for the possible orphan item 2825 * 1 for the dir item 2826 * 1 for the dir index 2827 * 1 for the inode ref 2828 * 1 for the inode ref in the tree log 2829 * 2 for the dir entries in the log 2830 * 1 for the inode 2831 */ 2832 trans = btrfs_start_transaction(root, 8); 2833 if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC) 2834 return trans; 2835 2836 if (ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) 2837 return ERR_PTR(-ENOSPC); 2838 2839 /* check if there is someone else holds reference */ 2840 if (S_ISDIR(inode->i_mode) && atomic_read(&inode->i_count) > 1) 2841 return ERR_PTR(-ENOSPC); 2842 2843 if (atomic_read(&inode->i_count) > 2) 2844 return ERR_PTR(-ENOSPC); 2845 2846 if (xchg(&root->fs_info->enospc_unlink, 1)) 2847 return ERR_PTR(-ENOSPC); 2848 2849 path = btrfs_alloc_path(); 2850 if (!path) { 2851 root->fs_info->enospc_unlink = 0; 2852 return ERR_PTR(-ENOMEM); 2853 } 2854 2855 /* 1 for the orphan item */ 2856 trans = btrfs_start_transaction(root, 1); 2857 if (IS_ERR(trans)) { 2858 btrfs_free_path(path); 2859 root->fs_info->enospc_unlink = 0; 2860 return trans; 2861 } 2862 2863 path->skip_locking = 1; 2864 path->search_commit_root = 1; 2865 2866 ret = btrfs_lookup_inode(trans, root, path, 2867 &BTRFS_I(dir)->location, 0); 2868 if (ret < 0) { 2869 err = ret; 2870 goto out; 2871 } 2872 if (ret == 0) { 2873 if (check_path_shared(root, path)) 2874 goto out; 2875 } else { 2876 check_link = 0; 2877 } 2878 btrfs_release_path(path); 2879 2880 ret = btrfs_lookup_inode(trans, root, path, 2881 &BTRFS_I(inode)->location, 0); 2882 if (ret < 0) { 2883 err = ret; 2884 goto out; 2885 } 2886 if (ret == 0) { 2887 if (check_path_shared(root, path)) 2888 goto out; 2889 } else { 2890 check_link = 0; 2891 } 2892 btrfs_release_path(path); 2893 2894 if (ret == 0 && S_ISREG(inode->i_mode)) { 2895 ret = btrfs_lookup_file_extent(trans, root, path, 2896 ino, (u64)-1, 0); 2897 if (ret < 0) { 2898 err = ret; 2899 goto out; 2900 } 2901 BUG_ON(ret == 0); /* Corruption */ 2902 if (check_path_shared(root, path)) 2903 goto out; 2904 btrfs_release_path(path); 2905 } 2906 2907 if (!check_link) { 2908 err = 0; 2909 goto out; 2910 } 2911 2912 di = btrfs_lookup_dir_item(trans, root, path, dir_ino, 2913 dentry->d_name.name, dentry->d_name.len, 0); 2914 if (IS_ERR(di)) { 2915 err = PTR_ERR(di); 2916 goto out; 2917 } 2918 if (di) { 2919 if (check_path_shared(root, path)) 2920 goto out; 2921 } else { 2922 err = 0; 2923 goto out; 2924 } 2925 btrfs_release_path(path); 2926 2927 ref = btrfs_lookup_inode_ref(trans, root, path, 2928 dentry->d_name.name, dentry->d_name.len, 2929 ino, dir_ino, 0); 2930 if (IS_ERR(ref)) { 2931 err = PTR_ERR(ref); 2932 goto out; 2933 } 2934 BUG_ON(!ref); /* Logic error */ 2935 if (check_path_shared(root, path)) 2936 goto out; 2937 index = btrfs_inode_ref_index(path->nodes[0], ref); 2938 btrfs_release_path(path); 2939 2940 /* 2941 * This is a commit root search, if we can lookup inode item and other 2942 * relative items in the commit root, it means the transaction of 2943 * dir/file creation has been committed, and the dir index item that we 2944 * delay to insert has also been inserted into the commit root. So 2945 * we needn't worry about the delayed insertion of the dir index item 2946 * here. 2947 */ 2948 di = btrfs_lookup_dir_index_item(trans, root, path, dir_ino, index, 2949 dentry->d_name.name, dentry->d_name.len, 0); 2950 if (IS_ERR(di)) { 2951 err = PTR_ERR(di); 2952 goto out; 2953 } 2954 BUG_ON(ret == -ENOENT); 2955 if (check_path_shared(root, path)) 2956 goto out; 2957 2958 err = 0; 2959 out: 2960 btrfs_free_path(path); 2961 /* Migrate the orphan reservation over */ 2962 if (!err) 2963 err = btrfs_block_rsv_migrate(trans->block_rsv, 2964 &root->fs_info->global_block_rsv, 2965 trans->bytes_reserved); 2966 2967 if (err) { 2968 btrfs_end_transaction(trans, root); 2969 root->fs_info->enospc_unlink = 0; 2970 return ERR_PTR(err); 2971 } 2972 2973 trans->block_rsv = &root->fs_info->global_block_rsv; 2974 return trans; 2975 } 2976 2977 static void __unlink_end_trans(struct btrfs_trans_handle *trans, 2978 struct btrfs_root *root) 2979 { 2980 if (trans->block_rsv == &root->fs_info->global_block_rsv) { 2981 btrfs_block_rsv_release(root, trans->block_rsv, 2982 trans->bytes_reserved); 2983 trans->block_rsv = &root->fs_info->trans_block_rsv; 2984 BUG_ON(!root->fs_info->enospc_unlink); 2985 root->fs_info->enospc_unlink = 0; 2986 } 2987 btrfs_end_transaction(trans, root); 2988 } 2989 2990 static int btrfs_unlink(struct inode *dir, struct dentry *dentry) 2991 { 2992 struct btrfs_root *root = BTRFS_I(dir)->root; 2993 struct btrfs_trans_handle *trans; 2994 struct inode *inode = dentry->d_inode; 2995 int ret; 2996 unsigned long nr = 0; 2997 2998 trans = __unlink_start_trans(dir, dentry); 2999 if (IS_ERR(trans)) 3000 return PTR_ERR(trans); 3001 3002 btrfs_record_unlink_dir(trans, dir, dentry->d_inode, 0); 3003 3004 ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode, 3005 dentry->d_name.name, dentry->d_name.len); 3006 if (ret) 3007 goto out; 3008 3009 if (inode->i_nlink == 0) { 3010 ret = btrfs_orphan_add(trans, inode); 3011 if (ret) 3012 goto out; 3013 } 3014 3015 out: 3016 nr = trans->blocks_used; 3017 __unlink_end_trans(trans, root); 3018 btrfs_btree_balance_dirty(root, nr); 3019 return ret; 3020 } 3021 3022 int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, 3023 struct btrfs_root *root, 3024 struct inode *dir, u64 objectid, 3025 const char *name, int name_len) 3026 { 3027 struct btrfs_path *path; 3028 struct extent_buffer *leaf; 3029 struct btrfs_dir_item *di; 3030 struct btrfs_key key; 3031 u64 index; 3032 int ret; 3033 u64 dir_ino = btrfs_ino(dir); 3034 3035 path = btrfs_alloc_path(); 3036 if (!path) 3037 return -ENOMEM; 3038 3039 di = btrfs_lookup_dir_item(trans, root, path, dir_ino, 3040 name, name_len, -1); 3041 if (IS_ERR_OR_NULL(di)) { 3042 if (!di) 3043 ret = -ENOENT; 3044 else 3045 ret = PTR_ERR(di); 3046 goto out; 3047 } 3048 3049 leaf = path->nodes[0]; 3050 btrfs_dir_item_key_to_cpu(leaf, di, &key); 3051 WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid); 3052 ret = btrfs_delete_one_dir_name(trans, root, path, di); 3053 if (ret) { 3054 btrfs_abort_transaction(trans, root, ret); 3055 goto out; 3056 } 3057 btrfs_release_path(path); 3058 3059 ret = btrfs_del_root_ref(trans, root->fs_info->tree_root, 3060 objectid, root->root_key.objectid, 3061 dir_ino, &index, name, name_len); 3062 if (ret < 0) { 3063 if (ret != -ENOENT) { 3064 btrfs_abort_transaction(trans, root, ret); 3065 goto out; 3066 } 3067 di = btrfs_search_dir_index_item(root, path, dir_ino, 3068 name, name_len); 3069 if (IS_ERR_OR_NULL(di)) { 3070 if (!di) 3071 ret = -ENOENT; 3072 else 3073 ret = PTR_ERR(di); 3074 btrfs_abort_transaction(trans, root, ret); 3075 goto out; 3076 } 3077 3078 leaf = path->nodes[0]; 3079 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 3080 btrfs_release_path(path); 3081 index = key.offset; 3082 } 3083 btrfs_release_path(path); 3084 3085 ret = btrfs_delete_delayed_dir_index(trans, root, dir, index); 3086 if (ret) { 3087 btrfs_abort_transaction(trans, root, ret); 3088 goto out; 3089 } 3090 3091 btrfs_i_size_write(dir, dir->i_size - name_len * 2); 3092 dir->i_mtime = dir->i_ctime = CURRENT_TIME; 3093 ret = btrfs_update_inode(trans, root, dir); 3094 if (ret) 3095 btrfs_abort_transaction(trans, root, ret); 3096 out: 3097 btrfs_free_path(path); 3098 return ret; 3099 } 3100 3101 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry) 3102 { 3103 struct inode *inode = dentry->d_inode; 3104 int err = 0; 3105 struct btrfs_root *root = BTRFS_I(dir)->root; 3106 struct btrfs_trans_handle *trans; 3107 unsigned long nr = 0; 3108 3109 if (inode->i_size > BTRFS_EMPTY_DIR_SIZE || 3110 btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID) 3111 return -ENOTEMPTY; 3112 3113 trans = __unlink_start_trans(dir, dentry); 3114 if (IS_ERR(trans)) 3115 return PTR_ERR(trans); 3116 3117 if (unlikely(btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { 3118 err = btrfs_unlink_subvol(trans, root, dir, 3119 BTRFS_I(inode)->location.objectid, 3120 dentry->d_name.name, 3121 dentry->d_name.len); 3122 goto out; 3123 } 3124 3125 err = btrfs_orphan_add(trans, inode); 3126 if (err) 3127 goto out; 3128 3129 /* now the directory is empty */ 3130 err = btrfs_unlink_inode(trans, root, dir, dentry->d_inode, 3131 dentry->d_name.name, dentry->d_name.len); 3132 if (!err) 3133 btrfs_i_size_write(inode, 0); 3134 out: 3135 nr = trans->blocks_used; 3136 __unlink_end_trans(trans, root); 3137 btrfs_btree_balance_dirty(root, nr); 3138 3139 return err; 3140 } 3141 3142 /* 3143 * this can truncate away extent items, csum items and directory items. 3144 * It starts at a high offset and removes keys until it can't find 3145 * any higher than new_size 3146 * 3147 * csum items that cross the new i_size are truncated to the new size 3148 * as well. 3149 * 3150 * min_type is the minimum key type to truncate down to. If set to 0, this 3151 * will kill all the items on this inode, including the INODE_ITEM_KEY. 3152 */ 3153 int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, 3154 struct btrfs_root *root, 3155 struct inode *inode, 3156 u64 new_size, u32 min_type) 3157 { 3158 struct btrfs_path *path; 3159 struct extent_buffer *leaf; 3160 struct btrfs_file_extent_item *fi; 3161 struct btrfs_key key; 3162 struct btrfs_key found_key; 3163 u64 extent_start = 0; 3164 u64 extent_num_bytes = 0; 3165 u64 extent_offset = 0; 3166 u64 item_end = 0; 3167 u64 mask = root->sectorsize - 1; 3168 u32 found_type = (u8)-1; 3169 int found_extent; 3170 int del_item; 3171 int pending_del_nr = 0; 3172 int pending_del_slot = 0; 3173 int extent_type = -1; 3174 int ret; 3175 int err = 0; 3176 u64 ino = btrfs_ino(inode); 3177 3178 BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY); 3179 3180 path = btrfs_alloc_path(); 3181 if (!path) 3182 return -ENOMEM; 3183 path->reada = -1; 3184 3185 if (root->ref_cows || root == root->fs_info->tree_root) 3186 btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0); 3187 3188 /* 3189 * This function is also used to drop the items in the log tree before 3190 * we relog the inode, so if root != BTRFS_I(inode)->root, it means 3191 * it is used to drop the loged items. So we shouldn't kill the delayed 3192 * items. 3193 */ 3194 if (min_type == 0 && root == BTRFS_I(inode)->root) 3195 btrfs_kill_delayed_inode_items(inode); 3196 3197 key.objectid = ino; 3198 key.offset = (u64)-1; 3199 key.type = (u8)-1; 3200 3201 search_again: 3202 path->leave_spinning = 1; 3203 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 3204 if (ret < 0) { 3205 err = ret; 3206 goto out; 3207 } 3208 3209 if (ret > 0) { 3210 /* there are no items in the tree for us to truncate, we're 3211 * done 3212 */ 3213 if (path->slots[0] == 0) 3214 goto out; 3215 path->slots[0]--; 3216 } 3217 3218 while (1) { 3219 fi = NULL; 3220 leaf = path->nodes[0]; 3221 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 3222 found_type = btrfs_key_type(&found_key); 3223 3224 if (found_key.objectid != ino) 3225 break; 3226 3227 if (found_type < min_type) 3228 break; 3229 3230 item_end = found_key.offset; 3231 if (found_type == BTRFS_EXTENT_DATA_KEY) { 3232 fi = btrfs_item_ptr(leaf, path->slots[0], 3233 struct btrfs_file_extent_item); 3234 extent_type = btrfs_file_extent_type(leaf, fi); 3235 if (extent_type != BTRFS_FILE_EXTENT_INLINE) { 3236 item_end += 3237 btrfs_file_extent_num_bytes(leaf, fi); 3238 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 3239 item_end += btrfs_file_extent_inline_len(leaf, 3240 fi); 3241 } 3242 item_end--; 3243 } 3244 if (found_type > min_type) { 3245 del_item = 1; 3246 } else { 3247 if (item_end < new_size) 3248 break; 3249 if (found_key.offset >= new_size) 3250 del_item = 1; 3251 else 3252 del_item = 0; 3253 } 3254 found_extent = 0; 3255 /* FIXME, shrink the extent if the ref count is only 1 */ 3256 if (found_type != BTRFS_EXTENT_DATA_KEY) 3257 goto delete; 3258 3259 if (extent_type != BTRFS_FILE_EXTENT_INLINE) { 3260 u64 num_dec; 3261 extent_start = btrfs_file_extent_disk_bytenr(leaf, fi); 3262 if (!del_item) { 3263 u64 orig_num_bytes = 3264 btrfs_file_extent_num_bytes(leaf, fi); 3265 extent_num_bytes = new_size - 3266 found_key.offset + root->sectorsize - 1; 3267 extent_num_bytes = extent_num_bytes & 3268 ~((u64)root->sectorsize - 1); 3269 btrfs_set_file_extent_num_bytes(leaf, fi, 3270 extent_num_bytes); 3271 num_dec = (orig_num_bytes - 3272 extent_num_bytes); 3273 if (root->ref_cows && extent_start != 0) 3274 inode_sub_bytes(inode, num_dec); 3275 btrfs_mark_buffer_dirty(leaf); 3276 } else { 3277 extent_num_bytes = 3278 btrfs_file_extent_disk_num_bytes(leaf, 3279 fi); 3280 extent_offset = found_key.offset - 3281 btrfs_file_extent_offset(leaf, fi); 3282 3283 /* FIXME blocksize != 4096 */ 3284 num_dec = btrfs_file_extent_num_bytes(leaf, fi); 3285 if (extent_start != 0) { 3286 found_extent = 1; 3287 if (root->ref_cows) 3288 inode_sub_bytes(inode, num_dec); 3289 } 3290 } 3291 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 3292 /* 3293 * we can't truncate inline items that have had 3294 * special encodings 3295 */ 3296 if (!del_item && 3297 btrfs_file_extent_compression(leaf, fi) == 0 && 3298 btrfs_file_extent_encryption(leaf, fi) == 0 && 3299 btrfs_file_extent_other_encoding(leaf, fi) == 0) { 3300 u32 size = new_size - found_key.offset; 3301 3302 if (root->ref_cows) { 3303 inode_sub_bytes(inode, item_end + 1 - 3304 new_size); 3305 } 3306 size = 3307 btrfs_file_extent_calc_inline_size(size); 3308 btrfs_truncate_item(trans, root, path, 3309 size, 1); 3310 } else if (root->ref_cows) { 3311 inode_sub_bytes(inode, item_end + 1 - 3312 found_key.offset); 3313 } 3314 } 3315 delete: 3316 if (del_item) { 3317 if (!pending_del_nr) { 3318 /* no pending yet, add ourselves */ 3319 pending_del_slot = path->slots[0]; 3320 pending_del_nr = 1; 3321 } else if (pending_del_nr && 3322 path->slots[0] + 1 == pending_del_slot) { 3323 /* hop on the pending chunk */ 3324 pending_del_nr++; 3325 pending_del_slot = path->slots[0]; 3326 } else { 3327 BUG(); 3328 } 3329 } else { 3330 break; 3331 } 3332 if (found_extent && (root->ref_cows || 3333 root == root->fs_info->tree_root)) { 3334 btrfs_set_path_blocking(path); 3335 ret = btrfs_free_extent(trans, root, extent_start, 3336 extent_num_bytes, 0, 3337 btrfs_header_owner(leaf), 3338 ino, extent_offset, 0); 3339 BUG_ON(ret); 3340 } 3341 3342 if (found_type == BTRFS_INODE_ITEM_KEY) 3343 break; 3344 3345 if (path->slots[0] == 0 || 3346 path->slots[0] != pending_del_slot) { 3347 if (root->ref_cows && 3348 BTRFS_I(inode)->location.objectid != 3349 BTRFS_FREE_INO_OBJECTID) { 3350 err = -EAGAIN; 3351 goto out; 3352 } 3353 if (pending_del_nr) { 3354 ret = btrfs_del_items(trans, root, path, 3355 pending_del_slot, 3356 pending_del_nr); 3357 if (ret) { 3358 btrfs_abort_transaction(trans, 3359 root, ret); 3360 goto error; 3361 } 3362 pending_del_nr = 0; 3363 } 3364 btrfs_release_path(path); 3365 goto search_again; 3366 } else { 3367 path->slots[0]--; 3368 } 3369 } 3370 out: 3371 if (pending_del_nr) { 3372 ret = btrfs_del_items(trans, root, path, pending_del_slot, 3373 pending_del_nr); 3374 if (ret) 3375 btrfs_abort_transaction(trans, root, ret); 3376 } 3377 error: 3378 btrfs_free_path(path); 3379 return err; 3380 } 3381 3382 /* 3383 * taken from block_truncate_page, but does cow as it zeros out 3384 * any bytes left in the last page in the file. 3385 */ 3386 static int btrfs_truncate_page(struct address_space *mapping, loff_t from) 3387 { 3388 struct inode *inode = mapping->host; 3389 struct btrfs_root *root = BTRFS_I(inode)->root; 3390 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 3391 struct btrfs_ordered_extent *ordered; 3392 struct extent_state *cached_state = NULL; 3393 char *kaddr; 3394 u32 blocksize = root->sectorsize; 3395 pgoff_t index = from >> PAGE_CACHE_SHIFT; 3396 unsigned offset = from & (PAGE_CACHE_SIZE-1); 3397 struct page *page; 3398 gfp_t mask = btrfs_alloc_write_mask(mapping); 3399 int ret = 0; 3400 u64 page_start; 3401 u64 page_end; 3402 3403 if ((offset & (blocksize - 1)) == 0) 3404 goto out; 3405 ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE); 3406 if (ret) 3407 goto out; 3408 3409 ret = -ENOMEM; 3410 again: 3411 page = find_or_create_page(mapping, index, mask); 3412 if (!page) { 3413 btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE); 3414 goto out; 3415 } 3416 3417 page_start = page_offset(page); 3418 page_end = page_start + PAGE_CACHE_SIZE - 1; 3419 3420 if (!PageUptodate(page)) { 3421 ret = btrfs_readpage(NULL, page); 3422 lock_page(page); 3423 if (page->mapping != mapping) { 3424 unlock_page(page); 3425 page_cache_release(page); 3426 goto again; 3427 } 3428 if (!PageUptodate(page)) { 3429 ret = -EIO; 3430 goto out_unlock; 3431 } 3432 } 3433 wait_on_page_writeback(page); 3434 3435 lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state); 3436 set_page_extent_mapped(page); 3437 3438 ordered = btrfs_lookup_ordered_extent(inode, page_start); 3439 if (ordered) { 3440 unlock_extent_cached(io_tree, page_start, page_end, 3441 &cached_state, GFP_NOFS); 3442 unlock_page(page); 3443 page_cache_release(page); 3444 btrfs_start_ordered_extent(inode, ordered, 1); 3445 btrfs_put_ordered_extent(ordered); 3446 goto again; 3447 } 3448 3449 clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end, 3450 EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING, 3451 0, 0, &cached_state, GFP_NOFS); 3452 3453 ret = btrfs_set_extent_delalloc(inode, page_start, page_end, 3454 &cached_state); 3455 if (ret) { 3456 unlock_extent_cached(io_tree, page_start, page_end, 3457 &cached_state, GFP_NOFS); 3458 goto out_unlock; 3459 } 3460 3461 ret = 0; 3462 if (offset != PAGE_CACHE_SIZE) { 3463 kaddr = kmap(page); 3464 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset); 3465 flush_dcache_page(page); 3466 kunmap(page); 3467 } 3468 ClearPageChecked(page); 3469 set_page_dirty(page); 3470 unlock_extent_cached(io_tree, page_start, page_end, &cached_state, 3471 GFP_NOFS); 3472 3473 out_unlock: 3474 if (ret) 3475 btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE); 3476 unlock_page(page); 3477 page_cache_release(page); 3478 out: 3479 return ret; 3480 } 3481 3482 /* 3483 * This function puts in dummy file extents for the area we're creating a hole 3484 * for. So if we are truncating this file to a larger size we need to insert 3485 * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for 3486 * the range between oldsize and size 3487 */ 3488 int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size) 3489 { 3490 struct btrfs_trans_handle *trans; 3491 struct btrfs_root *root = BTRFS_I(inode)->root; 3492 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 3493 struct extent_map *em = NULL; 3494 struct extent_state *cached_state = NULL; 3495 u64 mask = root->sectorsize - 1; 3496 u64 hole_start = (oldsize + mask) & ~mask; 3497 u64 block_end = (size + mask) & ~mask; 3498 u64 last_byte; 3499 u64 cur_offset; 3500 u64 hole_size; 3501 int err = 0; 3502 3503 if (size <= hole_start) 3504 return 0; 3505 3506 while (1) { 3507 struct btrfs_ordered_extent *ordered; 3508 btrfs_wait_ordered_range(inode, hole_start, 3509 block_end - hole_start); 3510 lock_extent_bits(io_tree, hole_start, block_end - 1, 0, 3511 &cached_state); 3512 ordered = btrfs_lookup_ordered_extent(inode, hole_start); 3513 if (!ordered) 3514 break; 3515 unlock_extent_cached(io_tree, hole_start, block_end - 1, 3516 &cached_state, GFP_NOFS); 3517 btrfs_put_ordered_extent(ordered); 3518 } 3519 3520 cur_offset = hole_start; 3521 while (1) { 3522 em = btrfs_get_extent(inode, NULL, 0, cur_offset, 3523 block_end - cur_offset, 0); 3524 if (IS_ERR(em)) { 3525 err = PTR_ERR(em); 3526 break; 3527 } 3528 last_byte = min(extent_map_end(em), block_end); 3529 last_byte = (last_byte + mask) & ~mask; 3530 if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) { 3531 u64 hint_byte = 0; 3532 hole_size = last_byte - cur_offset; 3533 3534 trans = btrfs_start_transaction(root, 3); 3535 if (IS_ERR(trans)) { 3536 err = PTR_ERR(trans); 3537 break; 3538 } 3539 3540 err = btrfs_drop_extents(trans, inode, cur_offset, 3541 cur_offset + hole_size, 3542 &hint_byte, 1); 3543 if (err) { 3544 btrfs_abort_transaction(trans, root, err); 3545 btrfs_end_transaction(trans, root); 3546 break; 3547 } 3548 3549 err = btrfs_insert_file_extent(trans, root, 3550 btrfs_ino(inode), cur_offset, 0, 3551 0, hole_size, 0, hole_size, 3552 0, 0, 0); 3553 if (err) { 3554 btrfs_abort_transaction(trans, root, err); 3555 btrfs_end_transaction(trans, root); 3556 break; 3557 } 3558 3559 btrfs_drop_extent_cache(inode, hole_start, 3560 last_byte - 1, 0); 3561 3562 btrfs_update_inode(trans, root, inode); 3563 btrfs_end_transaction(trans, root); 3564 } 3565 free_extent_map(em); 3566 em = NULL; 3567 cur_offset = last_byte; 3568 if (cur_offset >= block_end) 3569 break; 3570 } 3571 3572 free_extent_map(em); 3573 unlock_extent_cached(io_tree, hole_start, block_end - 1, &cached_state, 3574 GFP_NOFS); 3575 return err; 3576 } 3577 3578 static int btrfs_setsize(struct inode *inode, loff_t newsize) 3579 { 3580 struct btrfs_root *root = BTRFS_I(inode)->root; 3581 struct btrfs_trans_handle *trans; 3582 loff_t oldsize = i_size_read(inode); 3583 int ret; 3584 3585 if (newsize == oldsize) 3586 return 0; 3587 3588 if (newsize > oldsize) { 3589 truncate_pagecache(inode, oldsize, newsize); 3590 ret = btrfs_cont_expand(inode, oldsize, newsize); 3591 if (ret) 3592 return ret; 3593 3594 trans = btrfs_start_transaction(root, 1); 3595 if (IS_ERR(trans)) 3596 return PTR_ERR(trans); 3597 3598 i_size_write(inode, newsize); 3599 btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL); 3600 ret = btrfs_update_inode(trans, root, inode); 3601 btrfs_end_transaction(trans, root); 3602 } else { 3603 3604 /* 3605 * We're truncating a file that used to have good data down to 3606 * zero. Make sure it gets into the ordered flush list so that 3607 * any new writes get down to disk quickly. 3608 */ 3609 if (newsize == 0) 3610 BTRFS_I(inode)->ordered_data_close = 1; 3611 3612 /* we don't support swapfiles, so vmtruncate shouldn't fail */ 3613 truncate_setsize(inode, newsize); 3614 ret = btrfs_truncate(inode); 3615 } 3616 3617 return ret; 3618 } 3619 3620 static int btrfs_setattr(struct dentry *dentry, struct iattr *attr) 3621 { 3622 struct inode *inode = dentry->d_inode; 3623 struct btrfs_root *root = BTRFS_I(inode)->root; 3624 int err; 3625 3626 if (btrfs_root_readonly(root)) 3627 return -EROFS; 3628 3629 err = inode_change_ok(inode, attr); 3630 if (err) 3631 return err; 3632 3633 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { 3634 err = btrfs_setsize(inode, attr->ia_size); 3635 if (err) 3636 return err; 3637 } 3638 3639 if (attr->ia_valid) { 3640 setattr_copy(inode, attr); 3641 err = btrfs_dirty_inode(inode); 3642 3643 if (!err && attr->ia_valid & ATTR_MODE) 3644 err = btrfs_acl_chmod(inode); 3645 } 3646 3647 return err; 3648 } 3649 3650 void btrfs_evict_inode(struct inode *inode) 3651 { 3652 struct btrfs_trans_handle *trans; 3653 struct btrfs_root *root = BTRFS_I(inode)->root; 3654 struct btrfs_block_rsv *rsv, *global_rsv; 3655 u64 min_size = btrfs_calc_trunc_metadata_size(root, 1); 3656 unsigned long nr; 3657 int ret; 3658 3659 trace_btrfs_inode_evict(inode); 3660 3661 truncate_inode_pages(&inode->i_data, 0); 3662 if (inode->i_nlink && (btrfs_root_refs(&root->root_item) != 0 || 3663 btrfs_is_free_space_inode(root, inode))) 3664 goto no_delete; 3665 3666 if (is_bad_inode(inode)) { 3667 btrfs_orphan_del(NULL, inode); 3668 goto no_delete; 3669 } 3670 /* do we really want it for ->i_nlink > 0 and zero btrfs_root_refs? */ 3671 btrfs_wait_ordered_range(inode, 0, (u64)-1); 3672 3673 if (root->fs_info->log_root_recovering) { 3674 BUG_ON(!list_empty(&BTRFS_I(inode)->i_orphan)); 3675 goto no_delete; 3676 } 3677 3678 if (inode->i_nlink > 0) { 3679 BUG_ON(btrfs_root_refs(&root->root_item) != 0); 3680 goto no_delete; 3681 } 3682 3683 rsv = btrfs_alloc_block_rsv(root); 3684 if (!rsv) { 3685 btrfs_orphan_del(NULL, inode); 3686 goto no_delete; 3687 } 3688 rsv->size = min_size; 3689 global_rsv = &root->fs_info->global_block_rsv; 3690 3691 btrfs_i_size_write(inode, 0); 3692 3693 /* 3694 * This is a bit simpler than btrfs_truncate since 3695 * 3696 * 1) We've already reserved our space for our orphan item in the 3697 * unlink. 3698 * 2) We're going to delete the inode item, so we don't need to update 3699 * it at all. 3700 * 3701 * So we just need to reserve some slack space in case we add bytes when 3702 * doing the truncate. 3703 */ 3704 while (1) { 3705 ret = btrfs_block_rsv_refill_noflush(root, rsv, min_size); 3706 3707 /* 3708 * Try and steal from the global reserve since we will 3709 * likely not use this space anyway, we want to try as 3710 * hard as possible to get this to work. 3711 */ 3712 if (ret) 3713 ret = btrfs_block_rsv_migrate(global_rsv, rsv, min_size); 3714 3715 if (ret) { 3716 printk(KERN_WARNING "Could not get space for a " 3717 "delete, will truncate on mount %d\n", ret); 3718 btrfs_orphan_del(NULL, inode); 3719 btrfs_free_block_rsv(root, rsv); 3720 goto no_delete; 3721 } 3722 3723 trans = btrfs_start_transaction(root, 0); 3724 if (IS_ERR(trans)) { 3725 btrfs_orphan_del(NULL, inode); 3726 btrfs_free_block_rsv(root, rsv); 3727 goto no_delete; 3728 } 3729 3730 trans->block_rsv = rsv; 3731 3732 ret = btrfs_truncate_inode_items(trans, root, inode, 0, 0); 3733 if (ret != -EAGAIN) 3734 break; 3735 3736 nr = trans->blocks_used; 3737 btrfs_end_transaction(trans, root); 3738 trans = NULL; 3739 btrfs_btree_balance_dirty(root, nr); 3740 } 3741 3742 btrfs_free_block_rsv(root, rsv); 3743 3744 if (ret == 0) { 3745 trans->block_rsv = root->orphan_block_rsv; 3746 ret = btrfs_orphan_del(trans, inode); 3747 BUG_ON(ret); 3748 } 3749 3750 trans->block_rsv = &root->fs_info->trans_block_rsv; 3751 if (!(root == root->fs_info->tree_root || 3752 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)) 3753 btrfs_return_ino(root, btrfs_ino(inode)); 3754 3755 nr = trans->blocks_used; 3756 btrfs_end_transaction(trans, root); 3757 btrfs_btree_balance_dirty(root, nr); 3758 no_delete: 3759 end_writeback(inode); 3760 return; 3761 } 3762 3763 /* 3764 * this returns the key found in the dir entry in the location pointer. 3765 * If no dir entries were found, location->objectid is 0. 3766 */ 3767 static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry, 3768 struct btrfs_key *location) 3769 { 3770 const char *name = dentry->d_name.name; 3771 int namelen = dentry->d_name.len; 3772 struct btrfs_dir_item *di; 3773 struct btrfs_path *path; 3774 struct btrfs_root *root = BTRFS_I(dir)->root; 3775 int ret = 0; 3776 3777 path = btrfs_alloc_path(); 3778 if (!path) 3779 return -ENOMEM; 3780 3781 di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(dir), name, 3782 namelen, 0); 3783 if (IS_ERR(di)) 3784 ret = PTR_ERR(di); 3785 3786 if (IS_ERR_OR_NULL(di)) 3787 goto out_err; 3788 3789 btrfs_dir_item_key_to_cpu(path->nodes[0], di, location); 3790 out: 3791 btrfs_free_path(path); 3792 return ret; 3793 out_err: 3794 location->objectid = 0; 3795 goto out; 3796 } 3797 3798 /* 3799 * when we hit a tree root in a directory, the btrfs part of the inode 3800 * needs to be changed to reflect the root directory of the tree root. This 3801 * is kind of like crossing a mount point. 3802 */ 3803 static int fixup_tree_root_location(struct btrfs_root *root, 3804 struct inode *dir, 3805 struct dentry *dentry, 3806 struct btrfs_key *location, 3807 struct btrfs_root **sub_root) 3808 { 3809 struct btrfs_path *path; 3810 struct btrfs_root *new_root; 3811 struct btrfs_root_ref *ref; 3812 struct extent_buffer *leaf; 3813 int ret; 3814 int err = 0; 3815 3816 path = btrfs_alloc_path(); 3817 if (!path) { 3818 err = -ENOMEM; 3819 goto out; 3820 } 3821 3822 err = -ENOENT; 3823 ret = btrfs_find_root_ref(root->fs_info->tree_root, path, 3824 BTRFS_I(dir)->root->root_key.objectid, 3825 location->objectid); 3826 if (ret) { 3827 if (ret < 0) 3828 err = ret; 3829 goto out; 3830 } 3831 3832 leaf = path->nodes[0]; 3833 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref); 3834 if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(dir) || 3835 btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len) 3836 goto out; 3837 3838 ret = memcmp_extent_buffer(leaf, dentry->d_name.name, 3839 (unsigned long)(ref + 1), 3840 dentry->d_name.len); 3841 if (ret) 3842 goto out; 3843 3844 btrfs_release_path(path); 3845 3846 new_root = btrfs_read_fs_root_no_name(root->fs_info, location); 3847 if (IS_ERR(new_root)) { 3848 err = PTR_ERR(new_root); 3849 goto out; 3850 } 3851 3852 if (btrfs_root_refs(&new_root->root_item) == 0) { 3853 err = -ENOENT; 3854 goto out; 3855 } 3856 3857 *sub_root = new_root; 3858 location->objectid = btrfs_root_dirid(&new_root->root_item); 3859 location->type = BTRFS_INODE_ITEM_KEY; 3860 location->offset = 0; 3861 err = 0; 3862 out: 3863 btrfs_free_path(path); 3864 return err; 3865 } 3866 3867 static void inode_tree_add(struct inode *inode) 3868 { 3869 struct btrfs_root *root = BTRFS_I(inode)->root; 3870 struct btrfs_inode *entry; 3871 struct rb_node **p; 3872 struct rb_node *parent; 3873 u64 ino = btrfs_ino(inode); 3874 again: 3875 p = &root->inode_tree.rb_node; 3876 parent = NULL; 3877 3878 if (inode_unhashed(inode)) 3879 return; 3880 3881 spin_lock(&root->inode_lock); 3882 while (*p) { 3883 parent = *p; 3884 entry = rb_entry(parent, struct btrfs_inode, rb_node); 3885 3886 if (ino < btrfs_ino(&entry->vfs_inode)) 3887 p = &parent->rb_left; 3888 else if (ino > btrfs_ino(&entry->vfs_inode)) 3889 p = &parent->rb_right; 3890 else { 3891 WARN_ON(!(entry->vfs_inode.i_state & 3892 (I_WILL_FREE | I_FREEING))); 3893 rb_erase(parent, &root->inode_tree); 3894 RB_CLEAR_NODE(parent); 3895 spin_unlock(&root->inode_lock); 3896 goto again; 3897 } 3898 } 3899 rb_link_node(&BTRFS_I(inode)->rb_node, parent, p); 3900 rb_insert_color(&BTRFS_I(inode)->rb_node, &root->inode_tree); 3901 spin_unlock(&root->inode_lock); 3902 } 3903 3904 static void inode_tree_del(struct inode *inode) 3905 { 3906 struct btrfs_root *root = BTRFS_I(inode)->root; 3907 int empty = 0; 3908 3909 spin_lock(&root->inode_lock); 3910 if (!RB_EMPTY_NODE(&BTRFS_I(inode)->rb_node)) { 3911 rb_erase(&BTRFS_I(inode)->rb_node, &root->inode_tree); 3912 RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node); 3913 empty = RB_EMPTY_ROOT(&root->inode_tree); 3914 } 3915 spin_unlock(&root->inode_lock); 3916 3917 /* 3918 * Free space cache has inodes in the tree root, but the tree root has a 3919 * root_refs of 0, so this could end up dropping the tree root as a 3920 * snapshot, so we need the extra !root->fs_info->tree_root check to 3921 * make sure we don't drop it. 3922 */ 3923 if (empty && btrfs_root_refs(&root->root_item) == 0 && 3924 root != root->fs_info->tree_root) { 3925 synchronize_srcu(&root->fs_info->subvol_srcu); 3926 spin_lock(&root->inode_lock); 3927 empty = RB_EMPTY_ROOT(&root->inode_tree); 3928 spin_unlock(&root->inode_lock); 3929 if (empty) 3930 btrfs_add_dead_root(root); 3931 } 3932 } 3933 3934 void btrfs_invalidate_inodes(struct btrfs_root *root) 3935 { 3936 struct rb_node *node; 3937 struct rb_node *prev; 3938 struct btrfs_inode *entry; 3939 struct inode *inode; 3940 u64 objectid = 0; 3941 3942 WARN_ON(btrfs_root_refs(&root->root_item) != 0); 3943 3944 spin_lock(&root->inode_lock); 3945 again: 3946 node = root->inode_tree.rb_node; 3947 prev = NULL; 3948 while (node) { 3949 prev = node; 3950 entry = rb_entry(node, struct btrfs_inode, rb_node); 3951 3952 if (objectid < btrfs_ino(&entry->vfs_inode)) 3953 node = node->rb_left; 3954 else if (objectid > btrfs_ino(&entry->vfs_inode)) 3955 node = node->rb_right; 3956 else 3957 break; 3958 } 3959 if (!node) { 3960 while (prev) { 3961 entry = rb_entry(prev, struct btrfs_inode, rb_node); 3962 if (objectid <= btrfs_ino(&entry->vfs_inode)) { 3963 node = prev; 3964 break; 3965 } 3966 prev = rb_next(prev); 3967 } 3968 } 3969 while (node) { 3970 entry = rb_entry(node, struct btrfs_inode, rb_node); 3971 objectid = btrfs_ino(&entry->vfs_inode) + 1; 3972 inode = igrab(&entry->vfs_inode); 3973 if (inode) { 3974 spin_unlock(&root->inode_lock); 3975 if (atomic_read(&inode->i_count) > 1) 3976 d_prune_aliases(inode); 3977 /* 3978 * btrfs_drop_inode will have it removed from 3979 * the inode cache when its usage count 3980 * hits zero. 3981 */ 3982 iput(inode); 3983 cond_resched(); 3984 spin_lock(&root->inode_lock); 3985 goto again; 3986 } 3987 3988 if (cond_resched_lock(&root->inode_lock)) 3989 goto again; 3990 3991 node = rb_next(node); 3992 } 3993 spin_unlock(&root->inode_lock); 3994 } 3995 3996 static int btrfs_init_locked_inode(struct inode *inode, void *p) 3997 { 3998 struct btrfs_iget_args *args = p; 3999 inode->i_ino = args->ino; 4000 BTRFS_I(inode)->root = args->root; 4001 btrfs_set_inode_space_info(args->root, inode); 4002 return 0; 4003 } 4004 4005 static int btrfs_find_actor(struct inode *inode, void *opaque) 4006 { 4007 struct btrfs_iget_args *args = opaque; 4008 return args->ino == btrfs_ino(inode) && 4009 args->root == BTRFS_I(inode)->root; 4010 } 4011 4012 static struct inode *btrfs_iget_locked(struct super_block *s, 4013 u64 objectid, 4014 struct btrfs_root *root) 4015 { 4016 struct inode *inode; 4017 struct btrfs_iget_args args; 4018 args.ino = objectid; 4019 args.root = root; 4020 4021 inode = iget5_locked(s, objectid, btrfs_find_actor, 4022 btrfs_init_locked_inode, 4023 (void *)&args); 4024 return inode; 4025 } 4026 4027 /* Get an inode object given its location and corresponding root. 4028 * Returns in *is_new if the inode was read from disk 4029 */ 4030 struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location, 4031 struct btrfs_root *root, int *new) 4032 { 4033 struct inode *inode; 4034 4035 inode = btrfs_iget_locked(s, location->objectid, root); 4036 if (!inode) 4037 return ERR_PTR(-ENOMEM); 4038 4039 if (inode->i_state & I_NEW) { 4040 BTRFS_I(inode)->root = root; 4041 memcpy(&BTRFS_I(inode)->location, location, sizeof(*location)); 4042 btrfs_read_locked_inode(inode); 4043 if (!is_bad_inode(inode)) { 4044 inode_tree_add(inode); 4045 unlock_new_inode(inode); 4046 if (new) 4047 *new = 1; 4048 } else { 4049 unlock_new_inode(inode); 4050 iput(inode); 4051 inode = ERR_PTR(-ESTALE); 4052 } 4053 } 4054 4055 return inode; 4056 } 4057 4058 static struct inode *new_simple_dir(struct super_block *s, 4059 struct btrfs_key *key, 4060 struct btrfs_root *root) 4061 { 4062 struct inode *inode = new_inode(s); 4063 4064 if (!inode) 4065 return ERR_PTR(-ENOMEM); 4066 4067 BTRFS_I(inode)->root = root; 4068 memcpy(&BTRFS_I(inode)->location, key, sizeof(*key)); 4069 BTRFS_I(inode)->dummy_inode = 1; 4070 4071 inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID; 4072 inode->i_op = &btrfs_dir_ro_inode_operations; 4073 inode->i_fop = &simple_dir_operations; 4074 inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO; 4075 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; 4076 4077 return inode; 4078 } 4079 4080 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry) 4081 { 4082 struct inode *inode; 4083 struct btrfs_root *root = BTRFS_I(dir)->root; 4084 struct btrfs_root *sub_root = root; 4085 struct btrfs_key location; 4086 int index; 4087 int ret = 0; 4088 4089 if (dentry->d_name.len > BTRFS_NAME_LEN) 4090 return ERR_PTR(-ENAMETOOLONG); 4091 4092 if (unlikely(d_need_lookup(dentry))) { 4093 memcpy(&location, dentry->d_fsdata, sizeof(struct btrfs_key)); 4094 kfree(dentry->d_fsdata); 4095 dentry->d_fsdata = NULL; 4096 /* This thing is hashed, drop it for now */ 4097 d_drop(dentry); 4098 } else { 4099 ret = btrfs_inode_by_name(dir, dentry, &location); 4100 } 4101 4102 if (ret < 0) 4103 return ERR_PTR(ret); 4104 4105 if (location.objectid == 0) 4106 return NULL; 4107 4108 if (location.type == BTRFS_INODE_ITEM_KEY) { 4109 inode = btrfs_iget(dir->i_sb, &location, root, NULL); 4110 return inode; 4111 } 4112 4113 BUG_ON(location.type != BTRFS_ROOT_ITEM_KEY); 4114 4115 index = srcu_read_lock(&root->fs_info->subvol_srcu); 4116 ret = fixup_tree_root_location(root, dir, dentry, 4117 &location, &sub_root); 4118 if (ret < 0) { 4119 if (ret != -ENOENT) 4120 inode = ERR_PTR(ret); 4121 else 4122 inode = new_simple_dir(dir->i_sb, &location, sub_root); 4123 } else { 4124 inode = btrfs_iget(dir->i_sb, &location, sub_root, NULL); 4125 } 4126 srcu_read_unlock(&root->fs_info->subvol_srcu, index); 4127 4128 if (!IS_ERR(inode) && root != sub_root) { 4129 down_read(&root->fs_info->cleanup_work_sem); 4130 if (!(inode->i_sb->s_flags & MS_RDONLY)) 4131 ret = btrfs_orphan_cleanup(sub_root); 4132 up_read(&root->fs_info->cleanup_work_sem); 4133 if (ret) 4134 inode = ERR_PTR(ret); 4135 } 4136 4137 return inode; 4138 } 4139 4140 static int btrfs_dentry_delete(const struct dentry *dentry) 4141 { 4142 struct btrfs_root *root; 4143 struct inode *inode = dentry->d_inode; 4144 4145 if (!inode && !IS_ROOT(dentry)) 4146 inode = dentry->d_parent->d_inode; 4147 4148 if (inode) { 4149 root = BTRFS_I(inode)->root; 4150 if (btrfs_root_refs(&root->root_item) == 0) 4151 return 1; 4152 4153 if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) 4154 return 1; 4155 } 4156 return 0; 4157 } 4158 4159 static void btrfs_dentry_release(struct dentry *dentry) 4160 { 4161 if (dentry->d_fsdata) 4162 kfree(dentry->d_fsdata); 4163 } 4164 4165 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry, 4166 struct nameidata *nd) 4167 { 4168 struct dentry *ret; 4169 4170 ret = d_splice_alias(btrfs_lookup_dentry(dir, dentry), dentry); 4171 if (unlikely(d_need_lookup(dentry))) { 4172 spin_lock(&dentry->d_lock); 4173 dentry->d_flags &= ~DCACHE_NEED_LOOKUP; 4174 spin_unlock(&dentry->d_lock); 4175 } 4176 return ret; 4177 } 4178 4179 unsigned char btrfs_filetype_table[] = { 4180 DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK 4181 }; 4182 4183 static int btrfs_real_readdir(struct file *filp, void *dirent, 4184 filldir_t filldir) 4185 { 4186 struct inode *inode = filp->f_dentry->d_inode; 4187 struct btrfs_root *root = BTRFS_I(inode)->root; 4188 struct btrfs_item *item; 4189 struct btrfs_dir_item *di; 4190 struct btrfs_key key; 4191 struct btrfs_key found_key; 4192 struct btrfs_path *path; 4193 struct list_head ins_list; 4194 struct list_head del_list; 4195 int ret; 4196 struct extent_buffer *leaf; 4197 int slot; 4198 unsigned char d_type; 4199 int over = 0; 4200 u32 di_cur; 4201 u32 di_total; 4202 u32 di_len; 4203 int key_type = BTRFS_DIR_INDEX_KEY; 4204 char tmp_name[32]; 4205 char *name_ptr; 4206 int name_len; 4207 int is_curr = 0; /* filp->f_pos points to the current index? */ 4208 4209 /* FIXME, use a real flag for deciding about the key type */ 4210 if (root->fs_info->tree_root == root) 4211 key_type = BTRFS_DIR_ITEM_KEY; 4212 4213 /* special case for "." */ 4214 if (filp->f_pos == 0) { 4215 over = filldir(dirent, ".", 1, 4216 filp->f_pos, btrfs_ino(inode), DT_DIR); 4217 if (over) 4218 return 0; 4219 filp->f_pos = 1; 4220 } 4221 /* special case for .., just use the back ref */ 4222 if (filp->f_pos == 1) { 4223 u64 pino = parent_ino(filp->f_path.dentry); 4224 over = filldir(dirent, "..", 2, 4225 filp->f_pos, pino, DT_DIR); 4226 if (over) 4227 return 0; 4228 filp->f_pos = 2; 4229 } 4230 path = btrfs_alloc_path(); 4231 if (!path) 4232 return -ENOMEM; 4233 4234 path->reada = 1; 4235 4236 if (key_type == BTRFS_DIR_INDEX_KEY) { 4237 INIT_LIST_HEAD(&ins_list); 4238 INIT_LIST_HEAD(&del_list); 4239 btrfs_get_delayed_items(inode, &ins_list, &del_list); 4240 } 4241 4242 btrfs_set_key_type(&key, key_type); 4243 key.offset = filp->f_pos; 4244 key.objectid = btrfs_ino(inode); 4245 4246 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4247 if (ret < 0) 4248 goto err; 4249 4250 while (1) { 4251 leaf = path->nodes[0]; 4252 slot = path->slots[0]; 4253 if (slot >= btrfs_header_nritems(leaf)) { 4254 ret = btrfs_next_leaf(root, path); 4255 if (ret < 0) 4256 goto err; 4257 else if (ret > 0) 4258 break; 4259 continue; 4260 } 4261 4262 item = btrfs_item_nr(leaf, slot); 4263 btrfs_item_key_to_cpu(leaf, &found_key, slot); 4264 4265 if (found_key.objectid != key.objectid) 4266 break; 4267 if (btrfs_key_type(&found_key) != key_type) 4268 break; 4269 if (found_key.offset < filp->f_pos) 4270 goto next; 4271 if (key_type == BTRFS_DIR_INDEX_KEY && 4272 btrfs_should_delete_dir_index(&del_list, 4273 found_key.offset)) 4274 goto next; 4275 4276 filp->f_pos = found_key.offset; 4277 is_curr = 1; 4278 4279 di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item); 4280 di_cur = 0; 4281 di_total = btrfs_item_size(leaf, item); 4282 4283 while (di_cur < di_total) { 4284 struct btrfs_key location; 4285 4286 if (verify_dir_item(root, leaf, di)) 4287 break; 4288 4289 name_len = btrfs_dir_name_len(leaf, di); 4290 if (name_len <= sizeof(tmp_name)) { 4291 name_ptr = tmp_name; 4292 } else { 4293 name_ptr = kmalloc(name_len, GFP_NOFS); 4294 if (!name_ptr) { 4295 ret = -ENOMEM; 4296 goto err; 4297 } 4298 } 4299 read_extent_buffer(leaf, name_ptr, 4300 (unsigned long)(di + 1), name_len); 4301 4302 d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)]; 4303 btrfs_dir_item_key_to_cpu(leaf, di, &location); 4304 4305 4306 /* is this a reference to our own snapshot? If so 4307 * skip it. 4308 * 4309 * In contrast to old kernels, we insert the snapshot's 4310 * dir item and dir index after it has been created, so 4311 * we won't find a reference to our own snapshot. We 4312 * still keep the following code for backward 4313 * compatibility. 4314 */ 4315 if (location.type == BTRFS_ROOT_ITEM_KEY && 4316 location.objectid == root->root_key.objectid) { 4317 over = 0; 4318 goto skip; 4319 } 4320 over = filldir(dirent, name_ptr, name_len, 4321 found_key.offset, location.objectid, 4322 d_type); 4323 4324 skip: 4325 if (name_ptr != tmp_name) 4326 kfree(name_ptr); 4327 4328 if (over) 4329 goto nopos; 4330 di_len = btrfs_dir_name_len(leaf, di) + 4331 btrfs_dir_data_len(leaf, di) + sizeof(*di); 4332 di_cur += di_len; 4333 di = (struct btrfs_dir_item *)((char *)di + di_len); 4334 } 4335 next: 4336 path->slots[0]++; 4337 } 4338 4339 if (key_type == BTRFS_DIR_INDEX_KEY) { 4340 if (is_curr) 4341 filp->f_pos++; 4342 ret = btrfs_readdir_delayed_dir_index(filp, dirent, filldir, 4343 &ins_list); 4344 if (ret) 4345 goto nopos; 4346 } 4347 4348 /* Reached end of directory/root. Bump pos past the last item. */ 4349 if (key_type == BTRFS_DIR_INDEX_KEY) 4350 /* 4351 * 32-bit glibc will use getdents64, but then strtol - 4352 * so the last number we can serve is this. 4353 */ 4354 filp->f_pos = 0x7fffffff; 4355 else 4356 filp->f_pos++; 4357 nopos: 4358 ret = 0; 4359 err: 4360 if (key_type == BTRFS_DIR_INDEX_KEY) 4361 btrfs_put_delayed_items(&ins_list, &del_list); 4362 btrfs_free_path(path); 4363 return ret; 4364 } 4365 4366 int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc) 4367 { 4368 struct btrfs_root *root = BTRFS_I(inode)->root; 4369 struct btrfs_trans_handle *trans; 4370 int ret = 0; 4371 bool nolock = false; 4372 4373 if (BTRFS_I(inode)->dummy_inode) 4374 return 0; 4375 4376 if (btrfs_fs_closing(root->fs_info) && btrfs_is_free_space_inode(root, inode)) 4377 nolock = true; 4378 4379 if (wbc->sync_mode == WB_SYNC_ALL) { 4380 if (nolock) 4381 trans = btrfs_join_transaction_nolock(root); 4382 else 4383 trans = btrfs_join_transaction(root); 4384 if (IS_ERR(trans)) 4385 return PTR_ERR(trans); 4386 if (nolock) 4387 ret = btrfs_end_transaction_nolock(trans, root); 4388 else 4389 ret = btrfs_commit_transaction(trans, root); 4390 } 4391 return ret; 4392 } 4393 4394 /* 4395 * This is somewhat expensive, updating the tree every time the 4396 * inode changes. But, it is most likely to find the inode in cache. 4397 * FIXME, needs more benchmarking...there are no reasons other than performance 4398 * to keep or drop this code. 4399 */ 4400 int btrfs_dirty_inode(struct inode *inode) 4401 { 4402 struct btrfs_root *root = BTRFS_I(inode)->root; 4403 struct btrfs_trans_handle *trans; 4404 int ret; 4405 4406 if (BTRFS_I(inode)->dummy_inode) 4407 return 0; 4408 4409 trans = btrfs_join_transaction(root); 4410 if (IS_ERR(trans)) 4411 return PTR_ERR(trans); 4412 4413 ret = btrfs_update_inode(trans, root, inode); 4414 if (ret && ret == -ENOSPC) { 4415 /* whoops, lets try again with the full transaction */ 4416 btrfs_end_transaction(trans, root); 4417 trans = btrfs_start_transaction(root, 1); 4418 if (IS_ERR(trans)) 4419 return PTR_ERR(trans); 4420 4421 ret = btrfs_update_inode(trans, root, inode); 4422 } 4423 btrfs_end_transaction(trans, root); 4424 if (BTRFS_I(inode)->delayed_node) 4425 btrfs_balance_delayed_items(root); 4426 4427 return ret; 4428 } 4429 4430 /* 4431 * This is a copy of file_update_time. We need this so we can return error on 4432 * ENOSPC for updating the inode in the case of file write and mmap writes. 4433 */ 4434 int btrfs_update_time(struct file *file) 4435 { 4436 struct inode *inode = file->f_path.dentry->d_inode; 4437 struct timespec now; 4438 int ret; 4439 enum { S_MTIME = 1, S_CTIME = 2, S_VERSION = 4 } sync_it = 0; 4440 4441 /* First try to exhaust all avenues to not sync */ 4442 if (IS_NOCMTIME(inode)) 4443 return 0; 4444 4445 now = current_fs_time(inode->i_sb); 4446 if (!timespec_equal(&inode->i_mtime, &now)) 4447 sync_it = S_MTIME; 4448 4449 if (!timespec_equal(&inode->i_ctime, &now)) 4450 sync_it |= S_CTIME; 4451 4452 if (IS_I_VERSION(inode)) 4453 sync_it |= S_VERSION; 4454 4455 if (!sync_it) 4456 return 0; 4457 4458 /* Finally allowed to write? Takes lock. */ 4459 if (mnt_want_write_file(file)) 4460 return 0; 4461 4462 /* Only change inode inside the lock region */ 4463 if (sync_it & S_VERSION) 4464 inode_inc_iversion(inode); 4465 if (sync_it & S_CTIME) 4466 inode->i_ctime = now; 4467 if (sync_it & S_MTIME) 4468 inode->i_mtime = now; 4469 ret = btrfs_dirty_inode(inode); 4470 if (!ret) 4471 mark_inode_dirty_sync(inode); 4472 mnt_drop_write(file->f_path.mnt); 4473 return ret; 4474 } 4475 4476 /* 4477 * find the highest existing sequence number in a directory 4478 * and then set the in-memory index_cnt variable to reflect 4479 * free sequence numbers 4480 */ 4481 static int btrfs_set_inode_index_count(struct inode *inode) 4482 { 4483 struct btrfs_root *root = BTRFS_I(inode)->root; 4484 struct btrfs_key key, found_key; 4485 struct btrfs_path *path; 4486 struct extent_buffer *leaf; 4487 int ret; 4488 4489 key.objectid = btrfs_ino(inode); 4490 btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY); 4491 key.offset = (u64)-1; 4492 4493 path = btrfs_alloc_path(); 4494 if (!path) 4495 return -ENOMEM; 4496 4497 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4498 if (ret < 0) 4499 goto out; 4500 /* FIXME: we should be able to handle this */ 4501 if (ret == 0) 4502 goto out; 4503 ret = 0; 4504 4505 /* 4506 * MAGIC NUMBER EXPLANATION: 4507 * since we search a directory based on f_pos we have to start at 2 4508 * since '.' and '..' have f_pos of 0 and 1 respectively, so everybody 4509 * else has to start at 2 4510 */ 4511 if (path->slots[0] == 0) { 4512 BTRFS_I(inode)->index_cnt = 2; 4513 goto out; 4514 } 4515 4516 path->slots[0]--; 4517 4518 leaf = path->nodes[0]; 4519 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 4520 4521 if (found_key.objectid != btrfs_ino(inode) || 4522 btrfs_key_type(&found_key) != BTRFS_DIR_INDEX_KEY) { 4523 BTRFS_I(inode)->index_cnt = 2; 4524 goto out; 4525 } 4526 4527 BTRFS_I(inode)->index_cnt = found_key.offset + 1; 4528 out: 4529 btrfs_free_path(path); 4530 return ret; 4531 } 4532 4533 /* 4534 * helper to find a free sequence number in a given directory. This current 4535 * code is very simple, later versions will do smarter things in the btree 4536 */ 4537 int btrfs_set_inode_index(struct inode *dir, u64 *index) 4538 { 4539 int ret = 0; 4540 4541 if (BTRFS_I(dir)->index_cnt == (u64)-1) { 4542 ret = btrfs_inode_delayed_dir_index_count(dir); 4543 if (ret) { 4544 ret = btrfs_set_inode_index_count(dir); 4545 if (ret) 4546 return ret; 4547 } 4548 } 4549 4550 *index = BTRFS_I(dir)->index_cnt; 4551 BTRFS_I(dir)->index_cnt++; 4552 4553 return ret; 4554 } 4555 4556 static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, 4557 struct btrfs_root *root, 4558 struct inode *dir, 4559 const char *name, int name_len, 4560 u64 ref_objectid, u64 objectid, 4561 umode_t mode, u64 *index) 4562 { 4563 struct inode *inode; 4564 struct btrfs_inode_item *inode_item; 4565 struct btrfs_key *location; 4566 struct btrfs_path *path; 4567 struct btrfs_inode_ref *ref; 4568 struct btrfs_key key[2]; 4569 u32 sizes[2]; 4570 unsigned long ptr; 4571 int ret; 4572 int owner; 4573 4574 path = btrfs_alloc_path(); 4575 if (!path) 4576 return ERR_PTR(-ENOMEM); 4577 4578 inode = new_inode(root->fs_info->sb); 4579 if (!inode) { 4580 btrfs_free_path(path); 4581 return ERR_PTR(-ENOMEM); 4582 } 4583 4584 /* 4585 * we have to initialize this early, so we can reclaim the inode 4586 * number if we fail afterwards in this function. 4587 */ 4588 inode->i_ino = objectid; 4589 4590 if (dir) { 4591 trace_btrfs_inode_request(dir); 4592 4593 ret = btrfs_set_inode_index(dir, index); 4594 if (ret) { 4595 btrfs_free_path(path); 4596 iput(inode); 4597 return ERR_PTR(ret); 4598 } 4599 } 4600 /* 4601 * index_cnt is ignored for everything but a dir, 4602 * btrfs_get_inode_index_count has an explanation for the magic 4603 * number 4604 */ 4605 BTRFS_I(inode)->index_cnt = 2; 4606 BTRFS_I(inode)->root = root; 4607 BTRFS_I(inode)->generation = trans->transid; 4608 inode->i_generation = BTRFS_I(inode)->generation; 4609 btrfs_set_inode_space_info(root, inode); 4610 4611 if (S_ISDIR(mode)) 4612 owner = 0; 4613 else 4614 owner = 1; 4615 4616 key[0].objectid = objectid; 4617 btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY); 4618 key[0].offset = 0; 4619 4620 key[1].objectid = objectid; 4621 btrfs_set_key_type(&key[1], BTRFS_INODE_REF_KEY); 4622 key[1].offset = ref_objectid; 4623 4624 sizes[0] = sizeof(struct btrfs_inode_item); 4625 sizes[1] = name_len + sizeof(*ref); 4626 4627 path->leave_spinning = 1; 4628 ret = btrfs_insert_empty_items(trans, root, path, key, sizes, 2); 4629 if (ret != 0) 4630 goto fail; 4631 4632 inode_init_owner(inode, dir, mode); 4633 inode_set_bytes(inode, 0); 4634 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; 4635 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0], 4636 struct btrfs_inode_item); 4637 fill_inode_item(trans, path->nodes[0], inode_item, inode); 4638 4639 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1, 4640 struct btrfs_inode_ref); 4641 btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len); 4642 btrfs_set_inode_ref_index(path->nodes[0], ref, *index); 4643 ptr = (unsigned long)(ref + 1); 4644 write_extent_buffer(path->nodes[0], name, ptr, name_len); 4645 4646 btrfs_mark_buffer_dirty(path->nodes[0]); 4647 btrfs_free_path(path); 4648 4649 location = &BTRFS_I(inode)->location; 4650 location->objectid = objectid; 4651 location->offset = 0; 4652 btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY); 4653 4654 btrfs_inherit_iflags(inode, dir); 4655 4656 if (S_ISREG(mode)) { 4657 if (btrfs_test_opt(root, NODATASUM)) 4658 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM; 4659 if (btrfs_test_opt(root, NODATACOW) || 4660 (BTRFS_I(dir)->flags & BTRFS_INODE_NODATACOW)) 4661 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW; 4662 } 4663 4664 insert_inode_hash(inode); 4665 inode_tree_add(inode); 4666 4667 trace_btrfs_inode_new(inode); 4668 btrfs_set_inode_last_trans(trans, inode); 4669 4670 return inode; 4671 fail: 4672 if (dir) 4673 BTRFS_I(dir)->index_cnt--; 4674 btrfs_free_path(path); 4675 iput(inode); 4676 return ERR_PTR(ret); 4677 } 4678 4679 static inline u8 btrfs_inode_type(struct inode *inode) 4680 { 4681 return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT]; 4682 } 4683 4684 /* 4685 * utility function to add 'inode' into 'parent_inode' with 4686 * a give name and a given sequence number. 4687 * if 'add_backref' is true, also insert a backref from the 4688 * inode to the parent directory. 4689 */ 4690 int btrfs_add_link(struct btrfs_trans_handle *trans, 4691 struct inode *parent_inode, struct inode *inode, 4692 const char *name, int name_len, int add_backref, u64 index) 4693 { 4694 int ret = 0; 4695 struct btrfs_key key; 4696 struct btrfs_root *root = BTRFS_I(parent_inode)->root; 4697 u64 ino = btrfs_ino(inode); 4698 u64 parent_ino = btrfs_ino(parent_inode); 4699 4700 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { 4701 memcpy(&key, &BTRFS_I(inode)->root->root_key, sizeof(key)); 4702 } else { 4703 key.objectid = ino; 4704 btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY); 4705 key.offset = 0; 4706 } 4707 4708 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { 4709 ret = btrfs_add_root_ref(trans, root->fs_info->tree_root, 4710 key.objectid, root->root_key.objectid, 4711 parent_ino, index, name, name_len); 4712 } else if (add_backref) { 4713 ret = btrfs_insert_inode_ref(trans, root, name, name_len, ino, 4714 parent_ino, index); 4715 } 4716 4717 /* Nothing to clean up yet */ 4718 if (ret) 4719 return ret; 4720 4721 ret = btrfs_insert_dir_item(trans, root, name, name_len, 4722 parent_inode, &key, 4723 btrfs_inode_type(inode), index); 4724 if (ret == -EEXIST) 4725 goto fail_dir_item; 4726 else if (ret) { 4727 btrfs_abort_transaction(trans, root, ret); 4728 return ret; 4729 } 4730 4731 btrfs_i_size_write(parent_inode, parent_inode->i_size + 4732 name_len * 2); 4733 parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME; 4734 ret = btrfs_update_inode(trans, root, parent_inode); 4735 if (ret) 4736 btrfs_abort_transaction(trans, root, ret); 4737 return ret; 4738 4739 fail_dir_item: 4740 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { 4741 u64 local_index; 4742 int err; 4743 err = btrfs_del_root_ref(trans, root->fs_info->tree_root, 4744 key.objectid, root->root_key.objectid, 4745 parent_ino, &local_index, name, name_len); 4746 4747 } else if (add_backref) { 4748 u64 local_index; 4749 int err; 4750 4751 err = btrfs_del_inode_ref(trans, root, name, name_len, 4752 ino, parent_ino, &local_index); 4753 } 4754 return ret; 4755 } 4756 4757 static int btrfs_add_nondir(struct btrfs_trans_handle *trans, 4758 struct inode *dir, struct dentry *dentry, 4759 struct inode *inode, int backref, u64 index) 4760 { 4761 int err = btrfs_add_link(trans, dir, inode, 4762 dentry->d_name.name, dentry->d_name.len, 4763 backref, index); 4764 if (err > 0) 4765 err = -EEXIST; 4766 return err; 4767 } 4768 4769 static int btrfs_mknod(struct inode *dir, struct dentry *dentry, 4770 umode_t mode, dev_t rdev) 4771 { 4772 struct btrfs_trans_handle *trans; 4773 struct btrfs_root *root = BTRFS_I(dir)->root; 4774 struct inode *inode = NULL; 4775 int err; 4776 int drop_inode = 0; 4777 u64 objectid; 4778 unsigned long nr = 0; 4779 u64 index = 0; 4780 4781 if (!new_valid_dev(rdev)) 4782 return -EINVAL; 4783 4784 /* 4785 * 2 for inode item and ref 4786 * 2 for dir items 4787 * 1 for xattr if selinux is on 4788 */ 4789 trans = btrfs_start_transaction(root, 5); 4790 if (IS_ERR(trans)) 4791 return PTR_ERR(trans); 4792 4793 err = btrfs_find_free_ino(root, &objectid); 4794 if (err) 4795 goto out_unlock; 4796 4797 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, 4798 dentry->d_name.len, btrfs_ino(dir), objectid, 4799 mode, &index); 4800 if (IS_ERR(inode)) { 4801 err = PTR_ERR(inode); 4802 goto out_unlock; 4803 } 4804 4805 err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); 4806 if (err) { 4807 drop_inode = 1; 4808 goto out_unlock; 4809 } 4810 4811 /* 4812 * If the active LSM wants to access the inode during 4813 * d_instantiate it needs these. Smack checks to see 4814 * if the filesystem supports xattrs by looking at the 4815 * ops vector. 4816 */ 4817 4818 inode->i_op = &btrfs_special_inode_operations; 4819 err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index); 4820 if (err) 4821 drop_inode = 1; 4822 else { 4823 init_special_inode(inode, inode->i_mode, rdev); 4824 btrfs_update_inode(trans, root, inode); 4825 d_instantiate(dentry, inode); 4826 } 4827 out_unlock: 4828 nr = trans->blocks_used; 4829 btrfs_end_transaction(trans, root); 4830 btrfs_btree_balance_dirty(root, nr); 4831 if (drop_inode) { 4832 inode_dec_link_count(inode); 4833 iput(inode); 4834 } 4835 return err; 4836 } 4837 4838 static int btrfs_create(struct inode *dir, struct dentry *dentry, 4839 umode_t mode, struct nameidata *nd) 4840 { 4841 struct btrfs_trans_handle *trans; 4842 struct btrfs_root *root = BTRFS_I(dir)->root; 4843 struct inode *inode = NULL; 4844 int drop_inode = 0; 4845 int err; 4846 unsigned long nr = 0; 4847 u64 objectid; 4848 u64 index = 0; 4849 4850 /* 4851 * 2 for inode item and ref 4852 * 2 for dir items 4853 * 1 for xattr if selinux is on 4854 */ 4855 trans = btrfs_start_transaction(root, 5); 4856 if (IS_ERR(trans)) 4857 return PTR_ERR(trans); 4858 4859 err = btrfs_find_free_ino(root, &objectid); 4860 if (err) 4861 goto out_unlock; 4862 4863 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, 4864 dentry->d_name.len, btrfs_ino(dir), objectid, 4865 mode, &index); 4866 if (IS_ERR(inode)) { 4867 err = PTR_ERR(inode); 4868 goto out_unlock; 4869 } 4870 4871 err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); 4872 if (err) { 4873 drop_inode = 1; 4874 goto out_unlock; 4875 } 4876 4877 /* 4878 * If the active LSM wants to access the inode during 4879 * d_instantiate it needs these. Smack checks to see 4880 * if the filesystem supports xattrs by looking at the 4881 * ops vector. 4882 */ 4883 inode->i_fop = &btrfs_file_operations; 4884 inode->i_op = &btrfs_file_inode_operations; 4885 4886 err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index); 4887 if (err) 4888 drop_inode = 1; 4889 else { 4890 inode->i_mapping->a_ops = &btrfs_aops; 4891 inode->i_mapping->backing_dev_info = &root->fs_info->bdi; 4892 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; 4893 d_instantiate(dentry, inode); 4894 } 4895 out_unlock: 4896 nr = trans->blocks_used; 4897 btrfs_end_transaction(trans, root); 4898 if (drop_inode) { 4899 inode_dec_link_count(inode); 4900 iput(inode); 4901 } 4902 btrfs_btree_balance_dirty(root, nr); 4903 return err; 4904 } 4905 4906 static int btrfs_link(struct dentry *old_dentry, struct inode *dir, 4907 struct dentry *dentry) 4908 { 4909 struct btrfs_trans_handle *trans; 4910 struct btrfs_root *root = BTRFS_I(dir)->root; 4911 struct inode *inode = old_dentry->d_inode; 4912 u64 index; 4913 unsigned long nr = 0; 4914 int err; 4915 int drop_inode = 0; 4916 4917 /* do not allow sys_link's with other subvols of the same device */ 4918 if (root->objectid != BTRFS_I(inode)->root->objectid) 4919 return -EXDEV; 4920 4921 if (inode->i_nlink == ~0U) 4922 return -EMLINK; 4923 4924 err = btrfs_set_inode_index(dir, &index); 4925 if (err) 4926 goto fail; 4927 4928 /* 4929 * 2 items for inode and inode ref 4930 * 2 items for dir items 4931 * 1 item for parent inode 4932 */ 4933 trans = btrfs_start_transaction(root, 5); 4934 if (IS_ERR(trans)) { 4935 err = PTR_ERR(trans); 4936 goto fail; 4937 } 4938 4939 btrfs_inc_nlink(inode); 4940 inode->i_ctime = CURRENT_TIME; 4941 ihold(inode); 4942 4943 err = btrfs_add_nondir(trans, dir, dentry, inode, 1, index); 4944 4945 if (err) { 4946 drop_inode = 1; 4947 } else { 4948 struct dentry *parent = dentry->d_parent; 4949 err = btrfs_update_inode(trans, root, inode); 4950 if (err) 4951 goto fail; 4952 d_instantiate(dentry, inode); 4953 btrfs_log_new_name(trans, inode, NULL, parent); 4954 } 4955 4956 nr = trans->blocks_used; 4957 btrfs_end_transaction(trans, root); 4958 fail: 4959 if (drop_inode) { 4960 inode_dec_link_count(inode); 4961 iput(inode); 4962 } 4963 btrfs_btree_balance_dirty(root, nr); 4964 return err; 4965 } 4966 4967 static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) 4968 { 4969 struct inode *inode = NULL; 4970 struct btrfs_trans_handle *trans; 4971 struct btrfs_root *root = BTRFS_I(dir)->root; 4972 int err = 0; 4973 int drop_on_err = 0; 4974 u64 objectid = 0; 4975 u64 index = 0; 4976 unsigned long nr = 1; 4977 4978 /* 4979 * 2 items for inode and ref 4980 * 2 items for dir items 4981 * 1 for xattr if selinux is on 4982 */ 4983 trans = btrfs_start_transaction(root, 5); 4984 if (IS_ERR(trans)) 4985 return PTR_ERR(trans); 4986 4987 err = btrfs_find_free_ino(root, &objectid); 4988 if (err) 4989 goto out_fail; 4990 4991 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, 4992 dentry->d_name.len, btrfs_ino(dir), objectid, 4993 S_IFDIR | mode, &index); 4994 if (IS_ERR(inode)) { 4995 err = PTR_ERR(inode); 4996 goto out_fail; 4997 } 4998 4999 drop_on_err = 1; 5000 5001 err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); 5002 if (err) 5003 goto out_fail; 5004 5005 inode->i_op = &btrfs_dir_inode_operations; 5006 inode->i_fop = &btrfs_dir_file_operations; 5007 5008 btrfs_i_size_write(inode, 0); 5009 err = btrfs_update_inode(trans, root, inode); 5010 if (err) 5011 goto out_fail; 5012 5013 err = btrfs_add_link(trans, dir, inode, dentry->d_name.name, 5014 dentry->d_name.len, 0, index); 5015 if (err) 5016 goto out_fail; 5017 5018 d_instantiate(dentry, inode); 5019 drop_on_err = 0; 5020 5021 out_fail: 5022 nr = trans->blocks_used; 5023 btrfs_end_transaction(trans, root); 5024 if (drop_on_err) 5025 iput(inode); 5026 btrfs_btree_balance_dirty(root, nr); 5027 return err; 5028 } 5029 5030 /* helper for btfs_get_extent. Given an existing extent in the tree, 5031 * and an extent that you want to insert, deal with overlap and insert 5032 * the new extent into the tree. 5033 */ 5034 static int merge_extent_mapping(struct extent_map_tree *em_tree, 5035 struct extent_map *existing, 5036 struct extent_map *em, 5037 u64 map_start, u64 map_len) 5038 { 5039 u64 start_diff; 5040 5041 BUG_ON(map_start < em->start || map_start >= extent_map_end(em)); 5042 start_diff = map_start - em->start; 5043 em->start = map_start; 5044 em->len = map_len; 5045 if (em->block_start < EXTENT_MAP_LAST_BYTE && 5046 !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) { 5047 em->block_start += start_diff; 5048 em->block_len -= start_diff; 5049 } 5050 return add_extent_mapping(em_tree, em); 5051 } 5052 5053 static noinline int uncompress_inline(struct btrfs_path *path, 5054 struct inode *inode, struct page *page, 5055 size_t pg_offset, u64 extent_offset, 5056 struct btrfs_file_extent_item *item) 5057 { 5058 int ret; 5059 struct extent_buffer *leaf = path->nodes[0]; 5060 char *tmp; 5061 size_t max_size; 5062 unsigned long inline_size; 5063 unsigned long ptr; 5064 int compress_type; 5065 5066 WARN_ON(pg_offset != 0); 5067 compress_type = btrfs_file_extent_compression(leaf, item); 5068 max_size = btrfs_file_extent_ram_bytes(leaf, item); 5069 inline_size = btrfs_file_extent_inline_item_len(leaf, 5070 btrfs_item_nr(leaf, path->slots[0])); 5071 tmp = kmalloc(inline_size, GFP_NOFS); 5072 if (!tmp) 5073 return -ENOMEM; 5074 ptr = btrfs_file_extent_inline_start(item); 5075 5076 read_extent_buffer(leaf, tmp, ptr, inline_size); 5077 5078 max_size = min_t(unsigned long, PAGE_CACHE_SIZE, max_size); 5079 ret = btrfs_decompress(compress_type, tmp, page, 5080 extent_offset, inline_size, max_size); 5081 if (ret) { 5082 char *kaddr = kmap_atomic(page); 5083 unsigned long copy_size = min_t(u64, 5084 PAGE_CACHE_SIZE - pg_offset, 5085 max_size - extent_offset); 5086 memset(kaddr + pg_offset, 0, copy_size); 5087 kunmap_atomic(kaddr); 5088 } 5089 kfree(tmp); 5090 return 0; 5091 } 5092 5093 /* 5094 * a bit scary, this does extent mapping from logical file offset to the disk. 5095 * the ugly parts come from merging extents from the disk with the in-ram 5096 * representation. This gets more complex because of the data=ordered code, 5097 * where the in-ram extents might be locked pending data=ordered completion. 5098 * 5099 * This also copies inline extents directly into the page. 5100 */ 5101 5102 struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, 5103 size_t pg_offset, u64 start, u64 len, 5104 int create) 5105 { 5106 int ret; 5107 int err = 0; 5108 u64 bytenr; 5109 u64 extent_start = 0; 5110 u64 extent_end = 0; 5111 u64 objectid = btrfs_ino(inode); 5112 u32 found_type; 5113 struct btrfs_path *path = NULL; 5114 struct btrfs_root *root = BTRFS_I(inode)->root; 5115 struct btrfs_file_extent_item *item; 5116 struct extent_buffer *leaf; 5117 struct btrfs_key found_key; 5118 struct extent_map *em = NULL; 5119 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 5120 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 5121 struct btrfs_trans_handle *trans = NULL; 5122 int compress_type; 5123 5124 again: 5125 read_lock(&em_tree->lock); 5126 em = lookup_extent_mapping(em_tree, start, len); 5127 if (em) 5128 em->bdev = root->fs_info->fs_devices->latest_bdev; 5129 read_unlock(&em_tree->lock); 5130 5131 if (em) { 5132 if (em->start > start || em->start + em->len <= start) 5133 free_extent_map(em); 5134 else if (em->block_start == EXTENT_MAP_INLINE && page) 5135 free_extent_map(em); 5136 else 5137 goto out; 5138 } 5139 em = alloc_extent_map(); 5140 if (!em) { 5141 err = -ENOMEM; 5142 goto out; 5143 } 5144 em->bdev = root->fs_info->fs_devices->latest_bdev; 5145 em->start = EXTENT_MAP_HOLE; 5146 em->orig_start = EXTENT_MAP_HOLE; 5147 em->len = (u64)-1; 5148 em->block_len = (u64)-1; 5149 5150 if (!path) { 5151 path = btrfs_alloc_path(); 5152 if (!path) { 5153 err = -ENOMEM; 5154 goto out; 5155 } 5156 /* 5157 * Chances are we'll be called again, so go ahead and do 5158 * readahead 5159 */ 5160 path->reada = 1; 5161 } 5162 5163 ret = btrfs_lookup_file_extent(trans, root, path, 5164 objectid, start, trans != NULL); 5165 if (ret < 0) { 5166 err = ret; 5167 goto out; 5168 } 5169 5170 if (ret != 0) { 5171 if (path->slots[0] == 0) 5172 goto not_found; 5173 path->slots[0]--; 5174 } 5175 5176 leaf = path->nodes[0]; 5177 item = btrfs_item_ptr(leaf, path->slots[0], 5178 struct btrfs_file_extent_item); 5179 /* are we inside the extent that was found? */ 5180 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 5181 found_type = btrfs_key_type(&found_key); 5182 if (found_key.objectid != objectid || 5183 found_type != BTRFS_EXTENT_DATA_KEY) { 5184 goto not_found; 5185 } 5186 5187 found_type = btrfs_file_extent_type(leaf, item); 5188 extent_start = found_key.offset; 5189 compress_type = btrfs_file_extent_compression(leaf, item); 5190 if (found_type == BTRFS_FILE_EXTENT_REG || 5191 found_type == BTRFS_FILE_EXTENT_PREALLOC) { 5192 extent_end = extent_start + 5193 btrfs_file_extent_num_bytes(leaf, item); 5194 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) { 5195 size_t size; 5196 size = btrfs_file_extent_inline_len(leaf, item); 5197 extent_end = (extent_start + size + root->sectorsize - 1) & 5198 ~((u64)root->sectorsize - 1); 5199 } 5200 5201 if (start >= extent_end) { 5202 path->slots[0]++; 5203 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 5204 ret = btrfs_next_leaf(root, path); 5205 if (ret < 0) { 5206 err = ret; 5207 goto out; 5208 } 5209 if (ret > 0) 5210 goto not_found; 5211 leaf = path->nodes[0]; 5212 } 5213 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 5214 if (found_key.objectid != objectid || 5215 found_key.type != BTRFS_EXTENT_DATA_KEY) 5216 goto not_found; 5217 if (start + len <= found_key.offset) 5218 goto not_found; 5219 em->start = start; 5220 em->len = found_key.offset - start; 5221 goto not_found_em; 5222 } 5223 5224 if (found_type == BTRFS_FILE_EXTENT_REG || 5225 found_type == BTRFS_FILE_EXTENT_PREALLOC) { 5226 em->start = extent_start; 5227 em->len = extent_end - extent_start; 5228 em->orig_start = extent_start - 5229 btrfs_file_extent_offset(leaf, item); 5230 bytenr = btrfs_file_extent_disk_bytenr(leaf, item); 5231 if (bytenr == 0) { 5232 em->block_start = EXTENT_MAP_HOLE; 5233 goto insert; 5234 } 5235 if (compress_type != BTRFS_COMPRESS_NONE) { 5236 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); 5237 em->compress_type = compress_type; 5238 em->block_start = bytenr; 5239 em->block_len = btrfs_file_extent_disk_num_bytes(leaf, 5240 item); 5241 } else { 5242 bytenr += btrfs_file_extent_offset(leaf, item); 5243 em->block_start = bytenr; 5244 em->block_len = em->len; 5245 if (found_type == BTRFS_FILE_EXTENT_PREALLOC) 5246 set_bit(EXTENT_FLAG_PREALLOC, &em->flags); 5247 } 5248 goto insert; 5249 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) { 5250 unsigned long ptr; 5251 char *map; 5252 size_t size; 5253 size_t extent_offset; 5254 size_t copy_size; 5255 5256 em->block_start = EXTENT_MAP_INLINE; 5257 if (!page || create) { 5258 em->start = extent_start; 5259 em->len = extent_end - extent_start; 5260 goto out; 5261 } 5262 5263 size = btrfs_file_extent_inline_len(leaf, item); 5264 extent_offset = page_offset(page) + pg_offset - extent_start; 5265 copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset, 5266 size - extent_offset); 5267 em->start = extent_start + extent_offset; 5268 em->len = (copy_size + root->sectorsize - 1) & 5269 ~((u64)root->sectorsize - 1); 5270 em->orig_start = EXTENT_MAP_INLINE; 5271 if (compress_type) { 5272 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); 5273 em->compress_type = compress_type; 5274 } 5275 ptr = btrfs_file_extent_inline_start(item) + extent_offset; 5276 if (create == 0 && !PageUptodate(page)) { 5277 if (btrfs_file_extent_compression(leaf, item) != 5278 BTRFS_COMPRESS_NONE) { 5279 ret = uncompress_inline(path, inode, page, 5280 pg_offset, 5281 extent_offset, item); 5282 BUG_ON(ret); /* -ENOMEM */ 5283 } else { 5284 map = kmap(page); 5285 read_extent_buffer(leaf, map + pg_offset, ptr, 5286 copy_size); 5287 if (pg_offset + copy_size < PAGE_CACHE_SIZE) { 5288 memset(map + pg_offset + copy_size, 0, 5289 PAGE_CACHE_SIZE - pg_offset - 5290 copy_size); 5291 } 5292 kunmap(page); 5293 } 5294 flush_dcache_page(page); 5295 } else if (create && PageUptodate(page)) { 5296 BUG(); 5297 if (!trans) { 5298 kunmap(page); 5299 free_extent_map(em); 5300 em = NULL; 5301 5302 btrfs_release_path(path); 5303 trans = btrfs_join_transaction(root); 5304 5305 if (IS_ERR(trans)) 5306 return ERR_CAST(trans); 5307 goto again; 5308 } 5309 map = kmap(page); 5310 write_extent_buffer(leaf, map + pg_offset, ptr, 5311 copy_size); 5312 kunmap(page); 5313 btrfs_mark_buffer_dirty(leaf); 5314 } 5315 set_extent_uptodate(io_tree, em->start, 5316 extent_map_end(em) - 1, NULL, GFP_NOFS); 5317 goto insert; 5318 } else { 5319 printk(KERN_ERR "btrfs unknown found_type %d\n", found_type); 5320 WARN_ON(1); 5321 } 5322 not_found: 5323 em->start = start; 5324 em->len = len; 5325 not_found_em: 5326 em->block_start = EXTENT_MAP_HOLE; 5327 set_bit(EXTENT_FLAG_VACANCY, &em->flags); 5328 insert: 5329 btrfs_release_path(path); 5330 if (em->start > start || extent_map_end(em) <= start) { 5331 printk(KERN_ERR "Btrfs: bad extent! em: [%llu %llu] passed " 5332 "[%llu %llu]\n", (unsigned long long)em->start, 5333 (unsigned long long)em->len, 5334 (unsigned long long)start, 5335 (unsigned long long)len); 5336 err = -EIO; 5337 goto out; 5338 } 5339 5340 err = 0; 5341 write_lock(&em_tree->lock); 5342 ret = add_extent_mapping(em_tree, em); 5343 /* it is possible that someone inserted the extent into the tree 5344 * while we had the lock dropped. It is also possible that 5345 * an overlapping map exists in the tree 5346 */ 5347 if (ret == -EEXIST) { 5348 struct extent_map *existing; 5349 5350 ret = 0; 5351 5352 existing = lookup_extent_mapping(em_tree, start, len); 5353 if (existing && (existing->start > start || 5354 existing->start + existing->len <= start)) { 5355 free_extent_map(existing); 5356 existing = NULL; 5357 } 5358 if (!existing) { 5359 existing = lookup_extent_mapping(em_tree, em->start, 5360 em->len); 5361 if (existing) { 5362 err = merge_extent_mapping(em_tree, existing, 5363 em, start, 5364 root->sectorsize); 5365 free_extent_map(existing); 5366 if (err) { 5367 free_extent_map(em); 5368 em = NULL; 5369 } 5370 } else { 5371 err = -EIO; 5372 free_extent_map(em); 5373 em = NULL; 5374 } 5375 } else { 5376 free_extent_map(em); 5377 em = existing; 5378 err = 0; 5379 } 5380 } 5381 write_unlock(&em_tree->lock); 5382 out: 5383 5384 trace_btrfs_get_extent(root, em); 5385 5386 if (path) 5387 btrfs_free_path(path); 5388 if (trans) { 5389 ret = btrfs_end_transaction(trans, root); 5390 if (!err) 5391 err = ret; 5392 } 5393 if (err) { 5394 free_extent_map(em); 5395 return ERR_PTR(err); 5396 } 5397 BUG_ON(!em); /* Error is always set */ 5398 return em; 5399 } 5400 5401 struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *page, 5402 size_t pg_offset, u64 start, u64 len, 5403 int create) 5404 { 5405 struct extent_map *em; 5406 struct extent_map *hole_em = NULL; 5407 u64 range_start = start; 5408 u64 end; 5409 u64 found; 5410 u64 found_end; 5411 int err = 0; 5412 5413 em = btrfs_get_extent(inode, page, pg_offset, start, len, create); 5414 if (IS_ERR(em)) 5415 return em; 5416 if (em) { 5417 /* 5418 * if our em maps to a hole, there might 5419 * actually be delalloc bytes behind it 5420 */ 5421 if (em->block_start != EXTENT_MAP_HOLE) 5422 return em; 5423 else 5424 hole_em = em; 5425 } 5426 5427 /* check to see if we've wrapped (len == -1 or similar) */ 5428 end = start + len; 5429 if (end < start) 5430 end = (u64)-1; 5431 else 5432 end -= 1; 5433 5434 em = NULL; 5435 5436 /* ok, we didn't find anything, lets look for delalloc */ 5437 found = count_range_bits(&BTRFS_I(inode)->io_tree, &range_start, 5438 end, len, EXTENT_DELALLOC, 1); 5439 found_end = range_start + found; 5440 if (found_end < range_start) 5441 found_end = (u64)-1; 5442 5443 /* 5444 * we didn't find anything useful, return 5445 * the original results from get_extent() 5446 */ 5447 if (range_start > end || found_end <= start) { 5448 em = hole_em; 5449 hole_em = NULL; 5450 goto out; 5451 } 5452 5453 /* adjust the range_start to make sure it doesn't 5454 * go backwards from the start they passed in 5455 */ 5456 range_start = max(start,range_start); 5457 found = found_end - range_start; 5458 5459 if (found > 0) { 5460 u64 hole_start = start; 5461 u64 hole_len = len; 5462 5463 em = alloc_extent_map(); 5464 if (!em) { 5465 err = -ENOMEM; 5466 goto out; 5467 } 5468 /* 5469 * when btrfs_get_extent can't find anything it 5470 * returns one huge hole 5471 * 5472 * make sure what it found really fits our range, and 5473 * adjust to make sure it is based on the start from 5474 * the caller 5475 */ 5476 if (hole_em) { 5477 u64 calc_end = extent_map_end(hole_em); 5478 5479 if (calc_end <= start || (hole_em->start > end)) { 5480 free_extent_map(hole_em); 5481 hole_em = NULL; 5482 } else { 5483 hole_start = max(hole_em->start, start); 5484 hole_len = calc_end - hole_start; 5485 } 5486 } 5487 em->bdev = NULL; 5488 if (hole_em && range_start > hole_start) { 5489 /* our hole starts before our delalloc, so we 5490 * have to return just the parts of the hole 5491 * that go until the delalloc starts 5492 */ 5493 em->len = min(hole_len, 5494 range_start - hole_start); 5495 em->start = hole_start; 5496 em->orig_start = hole_start; 5497 /* 5498 * don't adjust block start at all, 5499 * it is fixed at EXTENT_MAP_HOLE 5500 */ 5501 em->block_start = hole_em->block_start; 5502 em->block_len = hole_len; 5503 } else { 5504 em->start = range_start; 5505 em->len = found; 5506 em->orig_start = range_start; 5507 em->block_start = EXTENT_MAP_DELALLOC; 5508 em->block_len = found; 5509 } 5510 } else if (hole_em) { 5511 return hole_em; 5512 } 5513 out: 5514 5515 free_extent_map(hole_em); 5516 if (err) { 5517 free_extent_map(em); 5518 return ERR_PTR(err); 5519 } 5520 return em; 5521 } 5522 5523 static struct extent_map *btrfs_new_extent_direct(struct inode *inode, 5524 struct extent_map *em, 5525 u64 start, u64 len) 5526 { 5527 struct btrfs_root *root = BTRFS_I(inode)->root; 5528 struct btrfs_trans_handle *trans; 5529 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 5530 struct btrfs_key ins; 5531 u64 alloc_hint; 5532 int ret; 5533 bool insert = false; 5534 5535 /* 5536 * Ok if the extent map we looked up is a hole and is for the exact 5537 * range we want, there is no reason to allocate a new one, however if 5538 * it is not right then we need to free this one and drop the cache for 5539 * our range. 5540 */ 5541 if (em->block_start != EXTENT_MAP_HOLE || em->start != start || 5542 em->len != len) { 5543 free_extent_map(em); 5544 em = NULL; 5545 insert = true; 5546 btrfs_drop_extent_cache(inode, start, start + len - 1, 0); 5547 } 5548 5549 trans = btrfs_join_transaction(root); 5550 if (IS_ERR(trans)) 5551 return ERR_CAST(trans); 5552 5553 if (start <= BTRFS_I(inode)->disk_i_size && len < 64 * 1024) 5554 btrfs_add_inode_defrag(trans, inode); 5555 5556 trans->block_rsv = &root->fs_info->delalloc_block_rsv; 5557 5558 alloc_hint = get_extent_allocation_hint(inode, start, len); 5559 ret = btrfs_reserve_extent(trans, root, len, root->sectorsize, 0, 5560 alloc_hint, &ins, 1); 5561 if (ret) { 5562 em = ERR_PTR(ret); 5563 goto out; 5564 } 5565 5566 if (!em) { 5567 em = alloc_extent_map(); 5568 if (!em) { 5569 em = ERR_PTR(-ENOMEM); 5570 goto out; 5571 } 5572 } 5573 5574 em->start = start; 5575 em->orig_start = em->start; 5576 em->len = ins.offset; 5577 5578 em->block_start = ins.objectid; 5579 em->block_len = ins.offset; 5580 em->bdev = root->fs_info->fs_devices->latest_bdev; 5581 5582 /* 5583 * We need to do this because if we're using the original em we searched 5584 * for, we could have EXTENT_FLAG_VACANCY set, and we don't want that. 5585 */ 5586 em->flags = 0; 5587 set_bit(EXTENT_FLAG_PINNED, &em->flags); 5588 5589 while (insert) { 5590 write_lock(&em_tree->lock); 5591 ret = add_extent_mapping(em_tree, em); 5592 write_unlock(&em_tree->lock); 5593 if (ret != -EEXIST) 5594 break; 5595 btrfs_drop_extent_cache(inode, start, start + em->len - 1, 0); 5596 } 5597 5598 ret = btrfs_add_ordered_extent_dio(inode, start, ins.objectid, 5599 ins.offset, ins.offset, 0); 5600 if (ret) { 5601 btrfs_free_reserved_extent(root, ins.objectid, ins.offset); 5602 em = ERR_PTR(ret); 5603 } 5604 out: 5605 btrfs_end_transaction(trans, root); 5606 return em; 5607 } 5608 5609 /* 5610 * returns 1 when the nocow is safe, < 1 on error, 0 if the 5611 * block must be cow'd 5612 */ 5613 static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans, 5614 struct inode *inode, u64 offset, u64 len) 5615 { 5616 struct btrfs_path *path; 5617 int ret; 5618 struct extent_buffer *leaf; 5619 struct btrfs_root *root = BTRFS_I(inode)->root; 5620 struct btrfs_file_extent_item *fi; 5621 struct btrfs_key key; 5622 u64 disk_bytenr; 5623 u64 backref_offset; 5624 u64 extent_end; 5625 u64 num_bytes; 5626 int slot; 5627 int found_type; 5628 5629 path = btrfs_alloc_path(); 5630 if (!path) 5631 return -ENOMEM; 5632 5633 ret = btrfs_lookup_file_extent(trans, root, path, btrfs_ino(inode), 5634 offset, 0); 5635 if (ret < 0) 5636 goto out; 5637 5638 slot = path->slots[0]; 5639 if (ret == 1) { 5640 if (slot == 0) { 5641 /* can't find the item, must cow */ 5642 ret = 0; 5643 goto out; 5644 } 5645 slot--; 5646 } 5647 ret = 0; 5648 leaf = path->nodes[0]; 5649 btrfs_item_key_to_cpu(leaf, &key, slot); 5650 if (key.objectid != btrfs_ino(inode) || 5651 key.type != BTRFS_EXTENT_DATA_KEY) { 5652 /* not our file or wrong item type, must cow */ 5653 goto out; 5654 } 5655 5656 if (key.offset > offset) { 5657 /* Wrong offset, must cow */ 5658 goto out; 5659 } 5660 5661 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); 5662 found_type = btrfs_file_extent_type(leaf, fi); 5663 if (found_type != BTRFS_FILE_EXTENT_REG && 5664 found_type != BTRFS_FILE_EXTENT_PREALLOC) { 5665 /* not a regular extent, must cow */ 5666 goto out; 5667 } 5668 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 5669 backref_offset = btrfs_file_extent_offset(leaf, fi); 5670 5671 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi); 5672 if (extent_end < offset + len) { 5673 /* extent doesn't include our full range, must cow */ 5674 goto out; 5675 } 5676 5677 if (btrfs_extent_readonly(root, disk_bytenr)) 5678 goto out; 5679 5680 /* 5681 * look for other files referencing this extent, if we 5682 * find any we must cow 5683 */ 5684 if (btrfs_cross_ref_exist(trans, root, btrfs_ino(inode), 5685 key.offset - backref_offset, disk_bytenr)) 5686 goto out; 5687 5688 /* 5689 * adjust disk_bytenr and num_bytes to cover just the bytes 5690 * in this extent we are about to write. If there 5691 * are any csums in that range we have to cow in order 5692 * to keep the csums correct 5693 */ 5694 disk_bytenr += backref_offset; 5695 disk_bytenr += offset - key.offset; 5696 num_bytes = min(offset + len, extent_end) - offset; 5697 if (csum_exist_in_range(root, disk_bytenr, num_bytes)) 5698 goto out; 5699 /* 5700 * all of the above have passed, it is safe to overwrite this extent 5701 * without cow 5702 */ 5703 ret = 1; 5704 out: 5705 btrfs_free_path(path); 5706 return ret; 5707 } 5708 5709 static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, 5710 struct buffer_head *bh_result, int create) 5711 { 5712 struct extent_map *em; 5713 struct btrfs_root *root = BTRFS_I(inode)->root; 5714 u64 start = iblock << inode->i_blkbits; 5715 u64 len = bh_result->b_size; 5716 struct btrfs_trans_handle *trans; 5717 5718 em = btrfs_get_extent(inode, NULL, 0, start, len, 0); 5719 if (IS_ERR(em)) 5720 return PTR_ERR(em); 5721 5722 /* 5723 * Ok for INLINE and COMPRESSED extents we need to fallback on buffered 5724 * io. INLINE is special, and we could probably kludge it in here, but 5725 * it's still buffered so for safety lets just fall back to the generic 5726 * buffered path. 5727 * 5728 * For COMPRESSED we _have_ to read the entire extent in so we can 5729 * decompress it, so there will be buffering required no matter what we 5730 * do, so go ahead and fallback to buffered. 5731 * 5732 * We return -ENOTBLK because thats what makes DIO go ahead and go back 5733 * to buffered IO. Don't blame me, this is the price we pay for using 5734 * the generic code. 5735 */ 5736 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) || 5737 em->block_start == EXTENT_MAP_INLINE) { 5738 free_extent_map(em); 5739 return -ENOTBLK; 5740 } 5741 5742 /* Just a good old fashioned hole, return */ 5743 if (!create && (em->block_start == EXTENT_MAP_HOLE || 5744 test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) { 5745 free_extent_map(em); 5746 /* DIO will do one hole at a time, so just unlock a sector */ 5747 unlock_extent(&BTRFS_I(inode)->io_tree, start, 5748 start + root->sectorsize - 1); 5749 return 0; 5750 } 5751 5752 /* 5753 * We don't allocate a new extent in the following cases 5754 * 5755 * 1) The inode is marked as NODATACOW. In this case we'll just use the 5756 * existing extent. 5757 * 2) The extent is marked as PREALLOC. We're good to go here and can 5758 * just use the extent. 5759 * 5760 */ 5761 if (!create) { 5762 len = em->len - (start - em->start); 5763 goto map; 5764 } 5765 5766 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) || 5767 ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) && 5768 em->block_start != EXTENT_MAP_HOLE)) { 5769 int type; 5770 int ret; 5771 u64 block_start; 5772 5773 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) 5774 type = BTRFS_ORDERED_PREALLOC; 5775 else 5776 type = BTRFS_ORDERED_NOCOW; 5777 len = min(len, em->len - (start - em->start)); 5778 block_start = em->block_start + (start - em->start); 5779 5780 /* 5781 * we're not going to log anything, but we do need 5782 * to make sure the current transaction stays open 5783 * while we look for nocow cross refs 5784 */ 5785 trans = btrfs_join_transaction(root); 5786 if (IS_ERR(trans)) 5787 goto must_cow; 5788 5789 if (can_nocow_odirect(trans, inode, start, len) == 1) { 5790 ret = btrfs_add_ordered_extent_dio(inode, start, 5791 block_start, len, len, type); 5792 btrfs_end_transaction(trans, root); 5793 if (ret) { 5794 free_extent_map(em); 5795 return ret; 5796 } 5797 goto unlock; 5798 } 5799 btrfs_end_transaction(trans, root); 5800 } 5801 must_cow: 5802 /* 5803 * this will cow the extent, reset the len in case we changed 5804 * it above 5805 */ 5806 len = bh_result->b_size; 5807 em = btrfs_new_extent_direct(inode, em, start, len); 5808 if (IS_ERR(em)) 5809 return PTR_ERR(em); 5810 len = min(len, em->len - (start - em->start)); 5811 unlock: 5812 clear_extent_bit(&BTRFS_I(inode)->io_tree, start, start + len - 1, 5813 EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DIRTY, 1, 5814 0, NULL, GFP_NOFS); 5815 map: 5816 bh_result->b_blocknr = (em->block_start + (start - em->start)) >> 5817 inode->i_blkbits; 5818 bh_result->b_size = len; 5819 bh_result->b_bdev = em->bdev; 5820 set_buffer_mapped(bh_result); 5821 if (create && !test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) 5822 set_buffer_new(bh_result); 5823 5824 free_extent_map(em); 5825 5826 return 0; 5827 } 5828 5829 struct btrfs_dio_private { 5830 struct inode *inode; 5831 u64 logical_offset; 5832 u64 disk_bytenr; 5833 u64 bytes; 5834 u32 *csums; 5835 void *private; 5836 5837 /* number of bios pending for this dio */ 5838 atomic_t pending_bios; 5839 5840 /* IO errors */ 5841 int errors; 5842 5843 struct bio *orig_bio; 5844 }; 5845 5846 static void btrfs_endio_direct_read(struct bio *bio, int err) 5847 { 5848 struct btrfs_dio_private *dip = bio->bi_private; 5849 struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1; 5850 struct bio_vec *bvec = bio->bi_io_vec; 5851 struct inode *inode = dip->inode; 5852 struct btrfs_root *root = BTRFS_I(inode)->root; 5853 u64 start; 5854 u32 *private = dip->csums; 5855 5856 start = dip->logical_offset; 5857 do { 5858 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { 5859 struct page *page = bvec->bv_page; 5860 char *kaddr; 5861 u32 csum = ~(u32)0; 5862 unsigned long flags; 5863 5864 local_irq_save(flags); 5865 kaddr = kmap_atomic(page); 5866 csum = btrfs_csum_data(root, kaddr + bvec->bv_offset, 5867 csum, bvec->bv_len); 5868 btrfs_csum_final(csum, (char *)&csum); 5869 kunmap_atomic(kaddr); 5870 local_irq_restore(flags); 5871 5872 flush_dcache_page(bvec->bv_page); 5873 if (csum != *private) { 5874 printk(KERN_ERR "btrfs csum failed ino %llu off" 5875 " %llu csum %u private %u\n", 5876 (unsigned long long)btrfs_ino(inode), 5877 (unsigned long long)start, 5878 csum, *private); 5879 err = -EIO; 5880 } 5881 } 5882 5883 start += bvec->bv_len; 5884 private++; 5885 bvec++; 5886 } while (bvec <= bvec_end); 5887 5888 unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset, 5889 dip->logical_offset + dip->bytes - 1); 5890 bio->bi_private = dip->private; 5891 5892 kfree(dip->csums); 5893 kfree(dip); 5894 5895 /* If we had a csum failure make sure to clear the uptodate flag */ 5896 if (err) 5897 clear_bit(BIO_UPTODATE, &bio->bi_flags); 5898 dio_end_io(bio, err); 5899 } 5900 5901 static void btrfs_endio_direct_write(struct bio *bio, int err) 5902 { 5903 struct btrfs_dio_private *dip = bio->bi_private; 5904 struct inode *inode = dip->inode; 5905 struct btrfs_root *root = BTRFS_I(inode)->root; 5906 struct btrfs_trans_handle *trans; 5907 struct btrfs_ordered_extent *ordered = NULL; 5908 struct extent_state *cached_state = NULL; 5909 u64 ordered_offset = dip->logical_offset; 5910 u64 ordered_bytes = dip->bytes; 5911 int ret; 5912 5913 if (err) 5914 goto out_done; 5915 again: 5916 ret = btrfs_dec_test_first_ordered_pending(inode, &ordered, 5917 &ordered_offset, 5918 ordered_bytes); 5919 if (!ret) 5920 goto out_test; 5921 5922 BUG_ON(!ordered); 5923 5924 trans = btrfs_join_transaction(root); 5925 if (IS_ERR(trans)) { 5926 err = -ENOMEM; 5927 goto out; 5928 } 5929 trans->block_rsv = &root->fs_info->delalloc_block_rsv; 5930 5931 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) { 5932 ret = btrfs_ordered_update_i_size(inode, 0, ordered); 5933 if (!ret) 5934 err = btrfs_update_inode_fallback(trans, root, inode); 5935 goto out; 5936 } 5937 5938 lock_extent_bits(&BTRFS_I(inode)->io_tree, ordered->file_offset, 5939 ordered->file_offset + ordered->len - 1, 0, 5940 &cached_state); 5941 5942 if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags)) { 5943 ret = btrfs_mark_extent_written(trans, inode, 5944 ordered->file_offset, 5945 ordered->file_offset + 5946 ordered->len); 5947 if (ret) { 5948 err = ret; 5949 goto out_unlock; 5950 } 5951 } else { 5952 ret = insert_reserved_file_extent(trans, inode, 5953 ordered->file_offset, 5954 ordered->start, 5955 ordered->disk_len, 5956 ordered->len, 5957 ordered->len, 5958 0, 0, 0, 5959 BTRFS_FILE_EXTENT_REG); 5960 unpin_extent_cache(&BTRFS_I(inode)->extent_tree, 5961 ordered->file_offset, ordered->len); 5962 if (ret) { 5963 err = ret; 5964 WARN_ON(1); 5965 goto out_unlock; 5966 } 5967 } 5968 5969 add_pending_csums(trans, inode, ordered->file_offset, &ordered->list); 5970 ret = btrfs_ordered_update_i_size(inode, 0, ordered); 5971 if (!ret || !test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags)) 5972 btrfs_update_inode_fallback(trans, root, inode); 5973 ret = 0; 5974 out_unlock: 5975 unlock_extent_cached(&BTRFS_I(inode)->io_tree, ordered->file_offset, 5976 ordered->file_offset + ordered->len - 1, 5977 &cached_state, GFP_NOFS); 5978 out: 5979 btrfs_delalloc_release_metadata(inode, ordered->len); 5980 btrfs_end_transaction(trans, root); 5981 ordered_offset = ordered->file_offset + ordered->len; 5982 btrfs_put_ordered_extent(ordered); 5983 btrfs_put_ordered_extent(ordered); 5984 5985 out_test: 5986 /* 5987 * our bio might span multiple ordered extents. If we haven't 5988 * completed the accounting for the whole dio, go back and try again 5989 */ 5990 if (ordered_offset < dip->logical_offset + dip->bytes) { 5991 ordered_bytes = dip->logical_offset + dip->bytes - 5992 ordered_offset; 5993 goto again; 5994 } 5995 out_done: 5996 bio->bi_private = dip->private; 5997 5998 kfree(dip->csums); 5999 kfree(dip); 6000 6001 /* If we had an error make sure to clear the uptodate flag */ 6002 if (err) 6003 clear_bit(BIO_UPTODATE, &bio->bi_flags); 6004 dio_end_io(bio, err); 6005 } 6006 6007 static int __btrfs_submit_bio_start_direct_io(struct inode *inode, int rw, 6008 struct bio *bio, int mirror_num, 6009 unsigned long bio_flags, u64 offset) 6010 { 6011 int ret; 6012 struct btrfs_root *root = BTRFS_I(inode)->root; 6013 ret = btrfs_csum_one_bio(root, inode, bio, offset, 1); 6014 BUG_ON(ret); /* -ENOMEM */ 6015 return 0; 6016 } 6017 6018 static void btrfs_end_dio_bio(struct bio *bio, int err) 6019 { 6020 struct btrfs_dio_private *dip = bio->bi_private; 6021 6022 if (err) { 6023 printk(KERN_ERR "btrfs direct IO failed ino %llu rw %lu " 6024 "sector %#Lx len %u err no %d\n", 6025 (unsigned long long)btrfs_ino(dip->inode), bio->bi_rw, 6026 (unsigned long long)bio->bi_sector, bio->bi_size, err); 6027 dip->errors = 1; 6028 6029 /* 6030 * before atomic variable goto zero, we must make sure 6031 * dip->errors is perceived to be set. 6032 */ 6033 smp_mb__before_atomic_dec(); 6034 } 6035 6036 /* if there are more bios still pending for this dio, just exit */ 6037 if (!atomic_dec_and_test(&dip->pending_bios)) 6038 goto out; 6039 6040 if (dip->errors) 6041 bio_io_error(dip->orig_bio); 6042 else { 6043 set_bit(BIO_UPTODATE, &dip->orig_bio->bi_flags); 6044 bio_endio(dip->orig_bio, 0); 6045 } 6046 out: 6047 bio_put(bio); 6048 } 6049 6050 static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev, 6051 u64 first_sector, gfp_t gfp_flags) 6052 { 6053 int nr_vecs = bio_get_nr_vecs(bdev); 6054 return btrfs_bio_alloc(bdev, first_sector, nr_vecs, gfp_flags); 6055 } 6056 6057 static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode, 6058 int rw, u64 file_offset, int skip_sum, 6059 u32 *csums, int async_submit) 6060 { 6061 int write = rw & REQ_WRITE; 6062 struct btrfs_root *root = BTRFS_I(inode)->root; 6063 int ret; 6064 6065 bio_get(bio); 6066 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); 6067 if (ret) 6068 goto err; 6069 6070 if (skip_sum) 6071 goto map; 6072 6073 if (write && async_submit) { 6074 ret = btrfs_wq_submit_bio(root->fs_info, 6075 inode, rw, bio, 0, 0, 6076 file_offset, 6077 __btrfs_submit_bio_start_direct_io, 6078 __btrfs_submit_bio_done); 6079 goto err; 6080 } else if (write) { 6081 /* 6082 * If we aren't doing async submit, calculate the csum of the 6083 * bio now. 6084 */ 6085 ret = btrfs_csum_one_bio(root, inode, bio, file_offset, 1); 6086 if (ret) 6087 goto err; 6088 } else if (!skip_sum) { 6089 ret = btrfs_lookup_bio_sums_dio(root, inode, bio, 6090 file_offset, csums); 6091 if (ret) 6092 goto err; 6093 } 6094 6095 map: 6096 ret = btrfs_map_bio(root, rw, bio, 0, async_submit); 6097 err: 6098 bio_put(bio); 6099 return ret; 6100 } 6101 6102 static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip, 6103 int skip_sum) 6104 { 6105 struct inode *inode = dip->inode; 6106 struct btrfs_root *root = BTRFS_I(inode)->root; 6107 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree; 6108 struct bio *bio; 6109 struct bio *orig_bio = dip->orig_bio; 6110 struct bio_vec *bvec = orig_bio->bi_io_vec; 6111 u64 start_sector = orig_bio->bi_sector; 6112 u64 file_offset = dip->logical_offset; 6113 u64 submit_len = 0; 6114 u64 map_length; 6115 int nr_pages = 0; 6116 u32 *csums = dip->csums; 6117 int ret = 0; 6118 int async_submit = 0; 6119 int write = rw & REQ_WRITE; 6120 6121 map_length = orig_bio->bi_size; 6122 ret = btrfs_map_block(map_tree, READ, start_sector << 9, 6123 &map_length, NULL, 0); 6124 if (ret) { 6125 bio_put(orig_bio); 6126 return -EIO; 6127 } 6128 6129 if (map_length >= orig_bio->bi_size) { 6130 bio = orig_bio; 6131 goto submit; 6132 } 6133 6134 async_submit = 1; 6135 bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS); 6136 if (!bio) 6137 return -ENOMEM; 6138 bio->bi_private = dip; 6139 bio->bi_end_io = btrfs_end_dio_bio; 6140 atomic_inc(&dip->pending_bios); 6141 6142 while (bvec <= (orig_bio->bi_io_vec + orig_bio->bi_vcnt - 1)) { 6143 if (unlikely(map_length < submit_len + bvec->bv_len || 6144 bio_add_page(bio, bvec->bv_page, bvec->bv_len, 6145 bvec->bv_offset) < bvec->bv_len)) { 6146 /* 6147 * inc the count before we submit the bio so 6148 * we know the end IO handler won't happen before 6149 * we inc the count. Otherwise, the dip might get freed 6150 * before we're done setting it up 6151 */ 6152 atomic_inc(&dip->pending_bios); 6153 ret = __btrfs_submit_dio_bio(bio, inode, rw, 6154 file_offset, skip_sum, 6155 csums, async_submit); 6156 if (ret) { 6157 bio_put(bio); 6158 atomic_dec(&dip->pending_bios); 6159 goto out_err; 6160 } 6161 6162 /* Write's use the ordered csums */ 6163 if (!write && !skip_sum) 6164 csums = csums + nr_pages; 6165 start_sector += submit_len >> 9; 6166 file_offset += submit_len; 6167 6168 submit_len = 0; 6169 nr_pages = 0; 6170 6171 bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, 6172 start_sector, GFP_NOFS); 6173 if (!bio) 6174 goto out_err; 6175 bio->bi_private = dip; 6176 bio->bi_end_io = btrfs_end_dio_bio; 6177 6178 map_length = orig_bio->bi_size; 6179 ret = btrfs_map_block(map_tree, READ, start_sector << 9, 6180 &map_length, NULL, 0); 6181 if (ret) { 6182 bio_put(bio); 6183 goto out_err; 6184 } 6185 } else { 6186 submit_len += bvec->bv_len; 6187 nr_pages ++; 6188 bvec++; 6189 } 6190 } 6191 6192 submit: 6193 ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum, 6194 csums, async_submit); 6195 if (!ret) 6196 return 0; 6197 6198 bio_put(bio); 6199 out_err: 6200 dip->errors = 1; 6201 /* 6202 * before atomic variable goto zero, we must 6203 * make sure dip->errors is perceived to be set. 6204 */ 6205 smp_mb__before_atomic_dec(); 6206 if (atomic_dec_and_test(&dip->pending_bios)) 6207 bio_io_error(dip->orig_bio); 6208 6209 /* bio_end_io() will handle error, so we needn't return it */ 6210 return 0; 6211 } 6212 6213 static void btrfs_submit_direct(int rw, struct bio *bio, struct inode *inode, 6214 loff_t file_offset) 6215 { 6216 struct btrfs_root *root = BTRFS_I(inode)->root; 6217 struct btrfs_dio_private *dip; 6218 struct bio_vec *bvec = bio->bi_io_vec; 6219 int skip_sum; 6220 int write = rw & REQ_WRITE; 6221 int ret = 0; 6222 6223 skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; 6224 6225 dip = kmalloc(sizeof(*dip), GFP_NOFS); 6226 if (!dip) { 6227 ret = -ENOMEM; 6228 goto free_ordered; 6229 } 6230 dip->csums = NULL; 6231 6232 /* Write's use the ordered csum stuff, so we don't need dip->csums */ 6233 if (!write && !skip_sum) { 6234 dip->csums = kmalloc(sizeof(u32) * bio->bi_vcnt, GFP_NOFS); 6235 if (!dip->csums) { 6236 kfree(dip); 6237 ret = -ENOMEM; 6238 goto free_ordered; 6239 } 6240 } 6241 6242 dip->private = bio->bi_private; 6243 dip->inode = inode; 6244 dip->logical_offset = file_offset; 6245 6246 dip->bytes = 0; 6247 do { 6248 dip->bytes += bvec->bv_len; 6249 bvec++; 6250 } while (bvec <= (bio->bi_io_vec + bio->bi_vcnt - 1)); 6251 6252 dip->disk_bytenr = (u64)bio->bi_sector << 9; 6253 bio->bi_private = dip; 6254 dip->errors = 0; 6255 dip->orig_bio = bio; 6256 atomic_set(&dip->pending_bios, 0); 6257 6258 if (write) 6259 bio->bi_end_io = btrfs_endio_direct_write; 6260 else 6261 bio->bi_end_io = btrfs_endio_direct_read; 6262 6263 ret = btrfs_submit_direct_hook(rw, dip, skip_sum); 6264 if (!ret) 6265 return; 6266 free_ordered: 6267 /* 6268 * If this is a write, we need to clean up the reserved space and kill 6269 * the ordered extent. 6270 */ 6271 if (write) { 6272 struct btrfs_ordered_extent *ordered; 6273 ordered = btrfs_lookup_ordered_extent(inode, file_offset); 6274 if (!test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags) && 6275 !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) 6276 btrfs_free_reserved_extent(root, ordered->start, 6277 ordered->disk_len); 6278 btrfs_put_ordered_extent(ordered); 6279 btrfs_put_ordered_extent(ordered); 6280 } 6281 bio_endio(bio, ret); 6282 } 6283 6284 static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *iocb, 6285 const struct iovec *iov, loff_t offset, 6286 unsigned long nr_segs) 6287 { 6288 int seg; 6289 int i; 6290 size_t size; 6291 unsigned long addr; 6292 unsigned blocksize_mask = root->sectorsize - 1; 6293 ssize_t retval = -EINVAL; 6294 loff_t end = offset; 6295 6296 if (offset & blocksize_mask) 6297 goto out; 6298 6299 /* Check the memory alignment. Blocks cannot straddle pages */ 6300 for (seg = 0; seg < nr_segs; seg++) { 6301 addr = (unsigned long)iov[seg].iov_base; 6302 size = iov[seg].iov_len; 6303 end += size; 6304 if ((addr & blocksize_mask) || (size & blocksize_mask)) 6305 goto out; 6306 6307 /* If this is a write we don't need to check anymore */ 6308 if (rw & WRITE) 6309 continue; 6310 6311 /* 6312 * Check to make sure we don't have duplicate iov_base's in this 6313 * iovec, if so return EINVAL, otherwise we'll get csum errors 6314 * when reading back. 6315 */ 6316 for (i = seg + 1; i < nr_segs; i++) { 6317 if (iov[seg].iov_base == iov[i].iov_base) 6318 goto out; 6319 } 6320 } 6321 retval = 0; 6322 out: 6323 return retval; 6324 } 6325 static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb, 6326 const struct iovec *iov, loff_t offset, 6327 unsigned long nr_segs) 6328 { 6329 struct file *file = iocb->ki_filp; 6330 struct inode *inode = file->f_mapping->host; 6331 struct btrfs_ordered_extent *ordered; 6332 struct extent_state *cached_state = NULL; 6333 u64 lockstart, lockend; 6334 ssize_t ret; 6335 int writing = rw & WRITE; 6336 int write_bits = 0; 6337 size_t count = iov_length(iov, nr_segs); 6338 6339 if (check_direct_IO(BTRFS_I(inode)->root, rw, iocb, iov, 6340 offset, nr_segs)) { 6341 return 0; 6342 } 6343 6344 lockstart = offset; 6345 lockend = offset + count - 1; 6346 6347 if (writing) { 6348 ret = btrfs_delalloc_reserve_space(inode, count); 6349 if (ret) 6350 goto out; 6351 } 6352 6353 while (1) { 6354 lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, 6355 0, &cached_state); 6356 /* 6357 * We're concerned with the entire range that we're going to be 6358 * doing DIO to, so we need to make sure theres no ordered 6359 * extents in this range. 6360 */ 6361 ordered = btrfs_lookup_ordered_range(inode, lockstart, 6362 lockend - lockstart + 1); 6363 if (!ordered) 6364 break; 6365 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend, 6366 &cached_state, GFP_NOFS); 6367 btrfs_start_ordered_extent(inode, ordered, 1); 6368 btrfs_put_ordered_extent(ordered); 6369 cond_resched(); 6370 } 6371 6372 /* 6373 * we don't use btrfs_set_extent_delalloc because we don't want 6374 * the dirty or uptodate bits 6375 */ 6376 if (writing) { 6377 write_bits = EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING; 6378 ret = set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend, 6379 EXTENT_DELALLOC, NULL, &cached_state, 6380 GFP_NOFS); 6381 if (ret) { 6382 clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, 6383 lockend, EXTENT_LOCKED | write_bits, 6384 1, 0, &cached_state, GFP_NOFS); 6385 goto out; 6386 } 6387 } 6388 6389 free_extent_state(cached_state); 6390 cached_state = NULL; 6391 6392 ret = __blockdev_direct_IO(rw, iocb, inode, 6393 BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev, 6394 iov, offset, nr_segs, btrfs_get_blocks_direct, NULL, 6395 btrfs_submit_direct, 0); 6396 6397 if (ret < 0 && ret != -EIOCBQUEUED) { 6398 clear_extent_bit(&BTRFS_I(inode)->io_tree, offset, 6399 offset + iov_length(iov, nr_segs) - 1, 6400 EXTENT_LOCKED | write_bits, 1, 0, 6401 &cached_state, GFP_NOFS); 6402 } else if (ret >= 0 && ret < iov_length(iov, nr_segs)) { 6403 /* 6404 * We're falling back to buffered, unlock the section we didn't 6405 * do IO on. 6406 */ 6407 clear_extent_bit(&BTRFS_I(inode)->io_tree, offset + ret, 6408 offset + iov_length(iov, nr_segs) - 1, 6409 EXTENT_LOCKED | write_bits, 1, 0, 6410 &cached_state, GFP_NOFS); 6411 } 6412 out: 6413 free_extent_state(cached_state); 6414 return ret; 6415 } 6416 6417 static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 6418 __u64 start, __u64 len) 6419 { 6420 return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent_fiemap); 6421 } 6422 6423 int btrfs_readpage(struct file *file, struct page *page) 6424 { 6425 struct extent_io_tree *tree; 6426 tree = &BTRFS_I(page->mapping->host)->io_tree; 6427 return extent_read_full_page(tree, page, btrfs_get_extent, 0); 6428 } 6429 6430 static int btrfs_writepage(struct page *page, struct writeback_control *wbc) 6431 { 6432 struct extent_io_tree *tree; 6433 6434 6435 if (current->flags & PF_MEMALLOC) { 6436 redirty_page_for_writepage(wbc, page); 6437 unlock_page(page); 6438 return 0; 6439 } 6440 tree = &BTRFS_I(page->mapping->host)->io_tree; 6441 return extent_write_full_page(tree, page, btrfs_get_extent, wbc); 6442 } 6443 6444 int btrfs_writepages(struct address_space *mapping, 6445 struct writeback_control *wbc) 6446 { 6447 struct extent_io_tree *tree; 6448 6449 tree = &BTRFS_I(mapping->host)->io_tree; 6450 return extent_writepages(tree, mapping, btrfs_get_extent, wbc); 6451 } 6452 6453 static int 6454 btrfs_readpages(struct file *file, struct address_space *mapping, 6455 struct list_head *pages, unsigned nr_pages) 6456 { 6457 struct extent_io_tree *tree; 6458 tree = &BTRFS_I(mapping->host)->io_tree; 6459 return extent_readpages(tree, mapping, pages, nr_pages, 6460 btrfs_get_extent); 6461 } 6462 static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags) 6463 { 6464 struct extent_io_tree *tree; 6465 struct extent_map_tree *map; 6466 int ret; 6467 6468 tree = &BTRFS_I(page->mapping->host)->io_tree; 6469 map = &BTRFS_I(page->mapping->host)->extent_tree; 6470 ret = try_release_extent_mapping(map, tree, page, gfp_flags); 6471 if (ret == 1) { 6472 ClearPagePrivate(page); 6473 set_page_private(page, 0); 6474 page_cache_release(page); 6475 } 6476 return ret; 6477 } 6478 6479 static int btrfs_releasepage(struct page *page, gfp_t gfp_flags) 6480 { 6481 if (PageWriteback(page) || PageDirty(page)) 6482 return 0; 6483 return __btrfs_releasepage(page, gfp_flags & GFP_NOFS); 6484 } 6485 6486 static void btrfs_invalidatepage(struct page *page, unsigned long offset) 6487 { 6488 struct extent_io_tree *tree; 6489 struct btrfs_ordered_extent *ordered; 6490 struct extent_state *cached_state = NULL; 6491 u64 page_start = page_offset(page); 6492 u64 page_end = page_start + PAGE_CACHE_SIZE - 1; 6493 6494 6495 /* 6496 * we have the page locked, so new writeback can't start, 6497 * and the dirty bit won't be cleared while we are here. 6498 * 6499 * Wait for IO on this page so that we can safely clear 6500 * the PagePrivate2 bit and do ordered accounting 6501 */ 6502 wait_on_page_writeback(page); 6503 6504 tree = &BTRFS_I(page->mapping->host)->io_tree; 6505 if (offset) { 6506 btrfs_releasepage(page, GFP_NOFS); 6507 return; 6508 } 6509 lock_extent_bits(tree, page_start, page_end, 0, &cached_state); 6510 ordered = btrfs_lookup_ordered_extent(page->mapping->host, 6511 page_offset(page)); 6512 if (ordered) { 6513 /* 6514 * IO on this page will never be started, so we need 6515 * to account for any ordered extents now 6516 */ 6517 clear_extent_bit(tree, page_start, page_end, 6518 EXTENT_DIRTY | EXTENT_DELALLOC | 6519 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING, 1, 0, 6520 &cached_state, GFP_NOFS); 6521 /* 6522 * whoever cleared the private bit is responsible 6523 * for the finish_ordered_io 6524 */ 6525 if (TestClearPagePrivate2(page)) { 6526 btrfs_finish_ordered_io(page->mapping->host, 6527 page_start, page_end); 6528 } 6529 btrfs_put_ordered_extent(ordered); 6530 cached_state = NULL; 6531 lock_extent_bits(tree, page_start, page_end, 0, &cached_state); 6532 } 6533 clear_extent_bit(tree, page_start, page_end, 6534 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC | 6535 EXTENT_DO_ACCOUNTING, 1, 1, &cached_state, GFP_NOFS); 6536 __btrfs_releasepage(page, GFP_NOFS); 6537 6538 ClearPageChecked(page); 6539 if (PagePrivate(page)) { 6540 ClearPagePrivate(page); 6541 set_page_private(page, 0); 6542 page_cache_release(page); 6543 } 6544 } 6545 6546 /* 6547 * btrfs_page_mkwrite() is not allowed to change the file size as it gets 6548 * called from a page fault handler when a page is first dirtied. Hence we must 6549 * be careful to check for EOF conditions here. We set the page up correctly 6550 * for a written page which means we get ENOSPC checking when writing into 6551 * holes and correct delalloc and unwritten extent mapping on filesystems that 6552 * support these features. 6553 * 6554 * We are not allowed to take the i_mutex here so we have to play games to 6555 * protect against truncate races as the page could now be beyond EOF. Because 6556 * vmtruncate() writes the inode size before removing pages, once we have the 6557 * page lock we can determine safely if the page is beyond EOF. If it is not 6558 * beyond EOF, then the page is guaranteed safe against truncation until we 6559 * unlock the page. 6560 */ 6561 int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) 6562 { 6563 struct page *page = vmf->page; 6564 struct inode *inode = fdentry(vma->vm_file)->d_inode; 6565 struct btrfs_root *root = BTRFS_I(inode)->root; 6566 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 6567 struct btrfs_ordered_extent *ordered; 6568 struct extent_state *cached_state = NULL; 6569 char *kaddr; 6570 unsigned long zero_start; 6571 loff_t size; 6572 int ret; 6573 int reserved = 0; 6574 u64 page_start; 6575 u64 page_end; 6576 6577 ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE); 6578 if (!ret) { 6579 ret = btrfs_update_time(vma->vm_file); 6580 reserved = 1; 6581 } 6582 if (ret) { 6583 if (ret == -ENOMEM) 6584 ret = VM_FAULT_OOM; 6585 else /* -ENOSPC, -EIO, etc */ 6586 ret = VM_FAULT_SIGBUS; 6587 if (reserved) 6588 goto out; 6589 goto out_noreserve; 6590 } 6591 6592 ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */ 6593 again: 6594 lock_page(page); 6595 size = i_size_read(inode); 6596 page_start = page_offset(page); 6597 page_end = page_start + PAGE_CACHE_SIZE - 1; 6598 6599 if ((page->mapping != inode->i_mapping) || 6600 (page_start >= size)) { 6601 /* page got truncated out from underneath us */ 6602 goto out_unlock; 6603 } 6604 wait_on_page_writeback(page); 6605 6606 lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state); 6607 set_page_extent_mapped(page); 6608 6609 /* 6610 * we can't set the delalloc bits if there are pending ordered 6611 * extents. Drop our locks and wait for them to finish 6612 */ 6613 ordered = btrfs_lookup_ordered_extent(inode, page_start); 6614 if (ordered) { 6615 unlock_extent_cached(io_tree, page_start, page_end, 6616 &cached_state, GFP_NOFS); 6617 unlock_page(page); 6618 btrfs_start_ordered_extent(inode, ordered, 1); 6619 btrfs_put_ordered_extent(ordered); 6620 goto again; 6621 } 6622 6623 /* 6624 * XXX - page_mkwrite gets called every time the page is dirtied, even 6625 * if it was already dirty, so for space accounting reasons we need to 6626 * clear any delalloc bits for the range we are fixing to save. There 6627 * is probably a better way to do this, but for now keep consistent with 6628 * prepare_pages in the normal write path. 6629 */ 6630 clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end, 6631 EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING, 6632 0, 0, &cached_state, GFP_NOFS); 6633 6634 ret = btrfs_set_extent_delalloc(inode, page_start, page_end, 6635 &cached_state); 6636 if (ret) { 6637 unlock_extent_cached(io_tree, page_start, page_end, 6638 &cached_state, GFP_NOFS); 6639 ret = VM_FAULT_SIGBUS; 6640 goto out_unlock; 6641 } 6642 ret = 0; 6643 6644 /* page is wholly or partially inside EOF */ 6645 if (page_start + PAGE_CACHE_SIZE > size) 6646 zero_start = size & ~PAGE_CACHE_MASK; 6647 else 6648 zero_start = PAGE_CACHE_SIZE; 6649 6650 if (zero_start != PAGE_CACHE_SIZE) { 6651 kaddr = kmap(page); 6652 memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start); 6653 flush_dcache_page(page); 6654 kunmap(page); 6655 } 6656 ClearPageChecked(page); 6657 set_page_dirty(page); 6658 SetPageUptodate(page); 6659 6660 BTRFS_I(inode)->last_trans = root->fs_info->generation; 6661 BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid; 6662 6663 unlock_extent_cached(io_tree, page_start, page_end, &cached_state, GFP_NOFS); 6664 6665 out_unlock: 6666 if (!ret) 6667 return VM_FAULT_LOCKED; 6668 unlock_page(page); 6669 out: 6670 btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE); 6671 out_noreserve: 6672 return ret; 6673 } 6674 6675 static int btrfs_truncate(struct inode *inode) 6676 { 6677 struct btrfs_root *root = BTRFS_I(inode)->root; 6678 struct btrfs_block_rsv *rsv; 6679 int ret; 6680 int err = 0; 6681 struct btrfs_trans_handle *trans; 6682 unsigned long nr; 6683 u64 mask = root->sectorsize - 1; 6684 u64 min_size = btrfs_calc_trunc_metadata_size(root, 1); 6685 6686 ret = btrfs_truncate_page(inode->i_mapping, inode->i_size); 6687 if (ret) 6688 return ret; 6689 6690 btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1); 6691 btrfs_ordered_update_i_size(inode, inode->i_size, NULL); 6692 6693 /* 6694 * Yes ladies and gentelment, this is indeed ugly. The fact is we have 6695 * 3 things going on here 6696 * 6697 * 1) We need to reserve space for our orphan item and the space to 6698 * delete our orphan item. Lord knows we don't want to have a dangling 6699 * orphan item because we didn't reserve space to remove it. 6700 * 6701 * 2) We need to reserve space to update our inode. 6702 * 6703 * 3) We need to have something to cache all the space that is going to 6704 * be free'd up by the truncate operation, but also have some slack 6705 * space reserved in case it uses space during the truncate (thank you 6706 * very much snapshotting). 6707 * 6708 * And we need these to all be seperate. The fact is we can use alot of 6709 * space doing the truncate, and we have no earthly idea how much space 6710 * we will use, so we need the truncate reservation to be seperate so it 6711 * doesn't end up using space reserved for updating the inode or 6712 * removing the orphan item. We also need to be able to stop the 6713 * transaction and start a new one, which means we need to be able to 6714 * update the inode several times, and we have no idea of knowing how 6715 * many times that will be, so we can't just reserve 1 item for the 6716 * entirety of the opration, so that has to be done seperately as well. 6717 * Then there is the orphan item, which does indeed need to be held on 6718 * to for the whole operation, and we need nobody to touch this reserved 6719 * space except the orphan code. 6720 * 6721 * So that leaves us with 6722 * 6723 * 1) root->orphan_block_rsv - for the orphan deletion. 6724 * 2) rsv - for the truncate reservation, which we will steal from the 6725 * transaction reservation. 6726 * 3) fs_info->trans_block_rsv - this will have 1 items worth left for 6727 * updating the inode. 6728 */ 6729 rsv = btrfs_alloc_block_rsv(root); 6730 if (!rsv) 6731 return -ENOMEM; 6732 rsv->size = min_size; 6733 6734 /* 6735 * 1 for the truncate slack space 6736 * 1 for the orphan item we're going to add 6737 * 1 for the orphan item deletion 6738 * 1 for updating the inode. 6739 */ 6740 trans = btrfs_start_transaction(root, 4); 6741 if (IS_ERR(trans)) { 6742 err = PTR_ERR(trans); 6743 goto out; 6744 } 6745 6746 /* Migrate the slack space for the truncate to our reserve */ 6747 ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, rsv, 6748 min_size); 6749 BUG_ON(ret); 6750 6751 ret = btrfs_orphan_add(trans, inode); 6752 if (ret) { 6753 btrfs_end_transaction(trans, root); 6754 goto out; 6755 } 6756 6757 /* 6758 * setattr is responsible for setting the ordered_data_close flag, 6759 * but that is only tested during the last file release. That 6760 * could happen well after the next commit, leaving a great big 6761 * window where new writes may get lost if someone chooses to write 6762 * to this file after truncating to zero 6763 * 6764 * The inode doesn't have any dirty data here, and so if we commit 6765 * this is a noop. If someone immediately starts writing to the inode 6766 * it is very likely we'll catch some of their writes in this 6767 * transaction, and the commit will find this file on the ordered 6768 * data list with good things to send down. 6769 * 6770 * This is a best effort solution, there is still a window where 6771 * using truncate to replace the contents of the file will 6772 * end up with a zero length file after a crash. 6773 */ 6774 if (inode->i_size == 0 && BTRFS_I(inode)->ordered_data_close) 6775 btrfs_add_ordered_operation(trans, root, inode); 6776 6777 while (1) { 6778 ret = btrfs_block_rsv_refill(root, rsv, min_size); 6779 if (ret) { 6780 /* 6781 * This can only happen with the original transaction we 6782 * started above, every other time we shouldn't have a 6783 * transaction started yet. 6784 */ 6785 if (ret == -EAGAIN) 6786 goto end_trans; 6787 err = ret; 6788 break; 6789 } 6790 6791 if (!trans) { 6792 /* Just need the 1 for updating the inode */ 6793 trans = btrfs_start_transaction(root, 1); 6794 if (IS_ERR(trans)) { 6795 ret = err = PTR_ERR(trans); 6796 trans = NULL; 6797 break; 6798 } 6799 } 6800 6801 trans->block_rsv = rsv; 6802 6803 ret = btrfs_truncate_inode_items(trans, root, inode, 6804 inode->i_size, 6805 BTRFS_EXTENT_DATA_KEY); 6806 if (ret != -EAGAIN) { 6807 err = ret; 6808 break; 6809 } 6810 6811 trans->block_rsv = &root->fs_info->trans_block_rsv; 6812 ret = btrfs_update_inode(trans, root, inode); 6813 if (ret) { 6814 err = ret; 6815 break; 6816 } 6817 end_trans: 6818 nr = trans->blocks_used; 6819 btrfs_end_transaction(trans, root); 6820 trans = NULL; 6821 btrfs_btree_balance_dirty(root, nr); 6822 } 6823 6824 if (ret == 0 && inode->i_nlink > 0) { 6825 trans->block_rsv = root->orphan_block_rsv; 6826 ret = btrfs_orphan_del(trans, inode); 6827 if (ret) 6828 err = ret; 6829 } else if (ret && inode->i_nlink > 0) { 6830 /* 6831 * Failed to do the truncate, remove us from the in memory 6832 * orphan list. 6833 */ 6834 ret = btrfs_orphan_del(NULL, inode); 6835 } 6836 6837 if (trans) { 6838 trans->block_rsv = &root->fs_info->trans_block_rsv; 6839 ret = btrfs_update_inode(trans, root, inode); 6840 if (ret && !err) 6841 err = ret; 6842 6843 nr = trans->blocks_used; 6844 ret = btrfs_end_transaction(trans, root); 6845 btrfs_btree_balance_dirty(root, nr); 6846 } 6847 6848 out: 6849 btrfs_free_block_rsv(root, rsv); 6850 6851 if (ret && !err) 6852 err = ret; 6853 6854 return err; 6855 } 6856 6857 /* 6858 * create a new subvolume directory/inode (helper for the ioctl). 6859 */ 6860 int btrfs_create_subvol_root(struct btrfs_trans_handle *trans, 6861 struct btrfs_root *new_root, u64 new_dirid) 6862 { 6863 struct inode *inode; 6864 int err; 6865 u64 index = 0; 6866 6867 inode = btrfs_new_inode(trans, new_root, NULL, "..", 2, 6868 new_dirid, new_dirid, 6869 S_IFDIR | (~current_umask() & S_IRWXUGO), 6870 &index); 6871 if (IS_ERR(inode)) 6872 return PTR_ERR(inode); 6873 inode->i_op = &btrfs_dir_inode_operations; 6874 inode->i_fop = &btrfs_dir_file_operations; 6875 6876 set_nlink(inode, 1); 6877 btrfs_i_size_write(inode, 0); 6878 6879 err = btrfs_update_inode(trans, new_root, inode); 6880 6881 iput(inode); 6882 return err; 6883 } 6884 6885 struct inode *btrfs_alloc_inode(struct super_block *sb) 6886 { 6887 struct btrfs_inode *ei; 6888 struct inode *inode; 6889 6890 ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS); 6891 if (!ei) 6892 return NULL; 6893 6894 ei->root = NULL; 6895 ei->space_info = NULL; 6896 ei->generation = 0; 6897 ei->sequence = 0; 6898 ei->last_trans = 0; 6899 ei->last_sub_trans = 0; 6900 ei->logged_trans = 0; 6901 ei->delalloc_bytes = 0; 6902 ei->disk_i_size = 0; 6903 ei->flags = 0; 6904 ei->csum_bytes = 0; 6905 ei->index_cnt = (u64)-1; 6906 ei->last_unlink_trans = 0; 6907 6908 spin_lock_init(&ei->lock); 6909 ei->outstanding_extents = 0; 6910 ei->reserved_extents = 0; 6911 6912 ei->ordered_data_close = 0; 6913 ei->orphan_meta_reserved = 0; 6914 ei->dummy_inode = 0; 6915 ei->in_defrag = 0; 6916 ei->delalloc_meta_reserved = 0; 6917 ei->force_compress = BTRFS_COMPRESS_NONE; 6918 6919 ei->delayed_node = NULL; 6920 6921 inode = &ei->vfs_inode; 6922 extent_map_tree_init(&ei->extent_tree); 6923 extent_io_tree_init(&ei->io_tree, &inode->i_data); 6924 extent_io_tree_init(&ei->io_failure_tree, &inode->i_data); 6925 ei->io_tree.track_uptodate = 1; 6926 ei->io_failure_tree.track_uptodate = 1; 6927 mutex_init(&ei->log_mutex); 6928 mutex_init(&ei->delalloc_mutex); 6929 btrfs_ordered_inode_tree_init(&ei->ordered_tree); 6930 INIT_LIST_HEAD(&ei->i_orphan); 6931 INIT_LIST_HEAD(&ei->delalloc_inodes); 6932 INIT_LIST_HEAD(&ei->ordered_operations); 6933 RB_CLEAR_NODE(&ei->rb_node); 6934 6935 return inode; 6936 } 6937 6938 static void btrfs_i_callback(struct rcu_head *head) 6939 { 6940 struct inode *inode = container_of(head, struct inode, i_rcu); 6941 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode)); 6942 } 6943 6944 void btrfs_destroy_inode(struct inode *inode) 6945 { 6946 struct btrfs_ordered_extent *ordered; 6947 struct btrfs_root *root = BTRFS_I(inode)->root; 6948 6949 WARN_ON(!list_empty(&inode->i_dentry)); 6950 WARN_ON(inode->i_data.nrpages); 6951 WARN_ON(BTRFS_I(inode)->outstanding_extents); 6952 WARN_ON(BTRFS_I(inode)->reserved_extents); 6953 WARN_ON(BTRFS_I(inode)->delalloc_bytes); 6954 WARN_ON(BTRFS_I(inode)->csum_bytes); 6955 6956 /* 6957 * This can happen where we create an inode, but somebody else also 6958 * created the same inode and we need to destroy the one we already 6959 * created. 6960 */ 6961 if (!root) 6962 goto free; 6963 6964 /* 6965 * Make sure we're properly removed from the ordered operation 6966 * lists. 6967 */ 6968 smp_mb(); 6969 if (!list_empty(&BTRFS_I(inode)->ordered_operations)) { 6970 spin_lock(&root->fs_info->ordered_extent_lock); 6971 list_del_init(&BTRFS_I(inode)->ordered_operations); 6972 spin_unlock(&root->fs_info->ordered_extent_lock); 6973 } 6974 6975 spin_lock(&root->orphan_lock); 6976 if (!list_empty(&BTRFS_I(inode)->i_orphan)) { 6977 printk(KERN_INFO "BTRFS: inode %llu still on the orphan list\n", 6978 (unsigned long long)btrfs_ino(inode)); 6979 list_del_init(&BTRFS_I(inode)->i_orphan); 6980 } 6981 spin_unlock(&root->orphan_lock); 6982 6983 while (1) { 6984 ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1); 6985 if (!ordered) 6986 break; 6987 else { 6988 printk(KERN_ERR "btrfs found ordered " 6989 "extent %llu %llu on inode cleanup\n", 6990 (unsigned long long)ordered->file_offset, 6991 (unsigned long long)ordered->len); 6992 btrfs_remove_ordered_extent(inode, ordered); 6993 btrfs_put_ordered_extent(ordered); 6994 btrfs_put_ordered_extent(ordered); 6995 } 6996 } 6997 inode_tree_del(inode); 6998 btrfs_drop_extent_cache(inode, 0, (u64)-1, 0); 6999 free: 7000 btrfs_remove_delayed_node(inode); 7001 call_rcu(&inode->i_rcu, btrfs_i_callback); 7002 } 7003 7004 int btrfs_drop_inode(struct inode *inode) 7005 { 7006 struct btrfs_root *root = BTRFS_I(inode)->root; 7007 7008 if (btrfs_root_refs(&root->root_item) == 0 && 7009 !btrfs_is_free_space_inode(root, inode)) 7010 return 1; 7011 else 7012 return generic_drop_inode(inode); 7013 } 7014 7015 static void init_once(void *foo) 7016 { 7017 struct btrfs_inode *ei = (struct btrfs_inode *) foo; 7018 7019 inode_init_once(&ei->vfs_inode); 7020 } 7021 7022 void btrfs_destroy_cachep(void) 7023 { 7024 if (btrfs_inode_cachep) 7025 kmem_cache_destroy(btrfs_inode_cachep); 7026 if (btrfs_trans_handle_cachep) 7027 kmem_cache_destroy(btrfs_trans_handle_cachep); 7028 if (btrfs_transaction_cachep) 7029 kmem_cache_destroy(btrfs_transaction_cachep); 7030 if (btrfs_path_cachep) 7031 kmem_cache_destroy(btrfs_path_cachep); 7032 if (btrfs_free_space_cachep) 7033 kmem_cache_destroy(btrfs_free_space_cachep); 7034 } 7035 7036 int btrfs_init_cachep(void) 7037 { 7038 btrfs_inode_cachep = kmem_cache_create("btrfs_inode_cache", 7039 sizeof(struct btrfs_inode), 0, 7040 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, init_once); 7041 if (!btrfs_inode_cachep) 7042 goto fail; 7043 7044 btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle_cache", 7045 sizeof(struct btrfs_trans_handle), 0, 7046 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); 7047 if (!btrfs_trans_handle_cachep) 7048 goto fail; 7049 7050 btrfs_transaction_cachep = kmem_cache_create("btrfs_transaction_cache", 7051 sizeof(struct btrfs_transaction), 0, 7052 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); 7053 if (!btrfs_transaction_cachep) 7054 goto fail; 7055 7056 btrfs_path_cachep = kmem_cache_create("btrfs_path_cache", 7057 sizeof(struct btrfs_path), 0, 7058 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); 7059 if (!btrfs_path_cachep) 7060 goto fail; 7061 7062 btrfs_free_space_cachep = kmem_cache_create("btrfs_free_space_cache", 7063 sizeof(struct btrfs_free_space), 0, 7064 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); 7065 if (!btrfs_free_space_cachep) 7066 goto fail; 7067 7068 return 0; 7069 fail: 7070 btrfs_destroy_cachep(); 7071 return -ENOMEM; 7072 } 7073 7074 static int btrfs_getattr(struct vfsmount *mnt, 7075 struct dentry *dentry, struct kstat *stat) 7076 { 7077 struct inode *inode = dentry->d_inode; 7078 u32 blocksize = inode->i_sb->s_blocksize; 7079 7080 generic_fillattr(inode, stat); 7081 stat->dev = BTRFS_I(inode)->root->anon_dev; 7082 stat->blksize = PAGE_CACHE_SIZE; 7083 stat->blocks = (ALIGN(inode_get_bytes(inode), blocksize) + 7084 ALIGN(BTRFS_I(inode)->delalloc_bytes, blocksize)) >> 9; 7085 return 0; 7086 } 7087 7088 /* 7089 * If a file is moved, it will inherit the cow and compression flags of the new 7090 * directory. 7091 */ 7092 static void fixup_inode_flags(struct inode *dir, struct inode *inode) 7093 { 7094 struct btrfs_inode *b_dir = BTRFS_I(dir); 7095 struct btrfs_inode *b_inode = BTRFS_I(inode); 7096 7097 if (b_dir->flags & BTRFS_INODE_NODATACOW) 7098 b_inode->flags |= BTRFS_INODE_NODATACOW; 7099 else 7100 b_inode->flags &= ~BTRFS_INODE_NODATACOW; 7101 7102 if (b_dir->flags & BTRFS_INODE_COMPRESS) 7103 b_inode->flags |= BTRFS_INODE_COMPRESS; 7104 else 7105 b_inode->flags &= ~BTRFS_INODE_COMPRESS; 7106 } 7107 7108 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, 7109 struct inode *new_dir, struct dentry *new_dentry) 7110 { 7111 struct btrfs_trans_handle *trans; 7112 struct btrfs_root *root = BTRFS_I(old_dir)->root; 7113 struct btrfs_root *dest = BTRFS_I(new_dir)->root; 7114 struct inode *new_inode = new_dentry->d_inode; 7115 struct inode *old_inode = old_dentry->d_inode; 7116 struct timespec ctime = CURRENT_TIME; 7117 u64 index = 0; 7118 u64 root_objectid; 7119 int ret; 7120 u64 old_ino = btrfs_ino(old_inode); 7121 7122 if (btrfs_ino(new_dir) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) 7123 return -EPERM; 7124 7125 /* we only allow rename subvolume link between subvolumes */ 7126 if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest) 7127 return -EXDEV; 7128 7129 if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID || 7130 (new_inode && btrfs_ino(new_inode) == BTRFS_FIRST_FREE_OBJECTID)) 7131 return -ENOTEMPTY; 7132 7133 if (S_ISDIR(old_inode->i_mode) && new_inode && 7134 new_inode->i_size > BTRFS_EMPTY_DIR_SIZE) 7135 return -ENOTEMPTY; 7136 /* 7137 * we're using rename to replace one file with another. 7138 * and the replacement file is large. Start IO on it now so 7139 * we don't add too much work to the end of the transaction 7140 */ 7141 if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size && 7142 old_inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT) 7143 filemap_flush(old_inode->i_mapping); 7144 7145 /* close the racy window with snapshot create/destroy ioctl */ 7146 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) 7147 down_read(&root->fs_info->subvol_sem); 7148 /* 7149 * We want to reserve the absolute worst case amount of items. So if 7150 * both inodes are subvols and we need to unlink them then that would 7151 * require 4 item modifications, but if they are both normal inodes it 7152 * would require 5 item modifications, so we'll assume their normal 7153 * inodes. So 5 * 2 is 10, plus 1 for the new link, so 11 total items 7154 * should cover the worst case number of items we'll modify. 7155 */ 7156 trans = btrfs_start_transaction(root, 20); 7157 if (IS_ERR(trans)) { 7158 ret = PTR_ERR(trans); 7159 goto out_notrans; 7160 } 7161 7162 if (dest != root) 7163 btrfs_record_root_in_trans(trans, dest); 7164 7165 ret = btrfs_set_inode_index(new_dir, &index); 7166 if (ret) 7167 goto out_fail; 7168 7169 if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) { 7170 /* force full log commit if subvolume involved. */ 7171 root->fs_info->last_trans_log_full_commit = trans->transid; 7172 } else { 7173 ret = btrfs_insert_inode_ref(trans, dest, 7174 new_dentry->d_name.name, 7175 new_dentry->d_name.len, 7176 old_ino, 7177 btrfs_ino(new_dir), index); 7178 if (ret) 7179 goto out_fail; 7180 /* 7181 * this is an ugly little race, but the rename is required 7182 * to make sure that if we crash, the inode is either at the 7183 * old name or the new one. pinning the log transaction lets 7184 * us make sure we don't allow a log commit to come in after 7185 * we unlink the name but before we add the new name back in. 7186 */ 7187 btrfs_pin_log_trans(root); 7188 } 7189 /* 7190 * make sure the inode gets flushed if it is replacing 7191 * something. 7192 */ 7193 if (new_inode && new_inode->i_size && S_ISREG(old_inode->i_mode)) 7194 btrfs_add_ordered_operation(trans, root, old_inode); 7195 7196 old_dir->i_ctime = old_dir->i_mtime = ctime; 7197 new_dir->i_ctime = new_dir->i_mtime = ctime; 7198 old_inode->i_ctime = ctime; 7199 7200 if (old_dentry->d_parent != new_dentry->d_parent) 7201 btrfs_record_unlink_dir(trans, old_dir, old_inode, 1); 7202 7203 if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) { 7204 root_objectid = BTRFS_I(old_inode)->root->root_key.objectid; 7205 ret = btrfs_unlink_subvol(trans, root, old_dir, root_objectid, 7206 old_dentry->d_name.name, 7207 old_dentry->d_name.len); 7208 } else { 7209 ret = __btrfs_unlink_inode(trans, root, old_dir, 7210 old_dentry->d_inode, 7211 old_dentry->d_name.name, 7212 old_dentry->d_name.len); 7213 if (!ret) 7214 ret = btrfs_update_inode(trans, root, old_inode); 7215 } 7216 if (ret) { 7217 btrfs_abort_transaction(trans, root, ret); 7218 goto out_fail; 7219 } 7220 7221 if (new_inode) { 7222 new_inode->i_ctime = CURRENT_TIME; 7223 if (unlikely(btrfs_ino(new_inode) == 7224 BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { 7225 root_objectid = BTRFS_I(new_inode)->location.objectid; 7226 ret = btrfs_unlink_subvol(trans, dest, new_dir, 7227 root_objectid, 7228 new_dentry->d_name.name, 7229 new_dentry->d_name.len); 7230 BUG_ON(new_inode->i_nlink == 0); 7231 } else { 7232 ret = btrfs_unlink_inode(trans, dest, new_dir, 7233 new_dentry->d_inode, 7234 new_dentry->d_name.name, 7235 new_dentry->d_name.len); 7236 } 7237 if (!ret && new_inode->i_nlink == 0) { 7238 ret = btrfs_orphan_add(trans, new_dentry->d_inode); 7239 BUG_ON(ret); 7240 } 7241 if (ret) { 7242 btrfs_abort_transaction(trans, root, ret); 7243 goto out_fail; 7244 } 7245 } 7246 7247 fixup_inode_flags(new_dir, old_inode); 7248 7249 ret = btrfs_add_link(trans, new_dir, old_inode, 7250 new_dentry->d_name.name, 7251 new_dentry->d_name.len, 0, index); 7252 if (ret) { 7253 btrfs_abort_transaction(trans, root, ret); 7254 goto out_fail; 7255 } 7256 7257 if (old_ino != BTRFS_FIRST_FREE_OBJECTID) { 7258 struct dentry *parent = new_dentry->d_parent; 7259 btrfs_log_new_name(trans, old_inode, old_dir, parent); 7260 btrfs_end_log_trans(root); 7261 } 7262 out_fail: 7263 btrfs_end_transaction(trans, root); 7264 out_notrans: 7265 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) 7266 up_read(&root->fs_info->subvol_sem); 7267 7268 return ret; 7269 } 7270 7271 /* 7272 * some fairly slow code that needs optimization. This walks the list 7273 * of all the inodes with pending delalloc and forces them to disk. 7274 */ 7275 int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput) 7276 { 7277 struct list_head *head = &root->fs_info->delalloc_inodes; 7278 struct btrfs_inode *binode; 7279 struct inode *inode; 7280 7281 if (root->fs_info->sb->s_flags & MS_RDONLY) 7282 return -EROFS; 7283 7284 spin_lock(&root->fs_info->delalloc_lock); 7285 while (!list_empty(head)) { 7286 binode = list_entry(head->next, struct btrfs_inode, 7287 delalloc_inodes); 7288 inode = igrab(&binode->vfs_inode); 7289 if (!inode) 7290 list_del_init(&binode->delalloc_inodes); 7291 spin_unlock(&root->fs_info->delalloc_lock); 7292 if (inode) { 7293 filemap_flush(inode->i_mapping); 7294 if (delay_iput) 7295 btrfs_add_delayed_iput(inode); 7296 else 7297 iput(inode); 7298 } 7299 cond_resched(); 7300 spin_lock(&root->fs_info->delalloc_lock); 7301 } 7302 spin_unlock(&root->fs_info->delalloc_lock); 7303 7304 /* the filemap_flush will queue IO into the worker threads, but 7305 * we have to make sure the IO is actually started and that 7306 * ordered extents get created before we return 7307 */ 7308 atomic_inc(&root->fs_info->async_submit_draining); 7309 while (atomic_read(&root->fs_info->nr_async_submits) || 7310 atomic_read(&root->fs_info->async_delalloc_pages)) { 7311 wait_event(root->fs_info->async_submit_wait, 7312 (atomic_read(&root->fs_info->nr_async_submits) == 0 && 7313 atomic_read(&root->fs_info->async_delalloc_pages) == 0)); 7314 } 7315 atomic_dec(&root->fs_info->async_submit_draining); 7316 return 0; 7317 } 7318 7319 static int btrfs_symlink(struct inode *dir, struct dentry *dentry, 7320 const char *symname) 7321 { 7322 struct btrfs_trans_handle *trans; 7323 struct btrfs_root *root = BTRFS_I(dir)->root; 7324 struct btrfs_path *path; 7325 struct btrfs_key key; 7326 struct inode *inode = NULL; 7327 int err; 7328 int drop_inode = 0; 7329 u64 objectid; 7330 u64 index = 0 ; 7331 int name_len; 7332 int datasize; 7333 unsigned long ptr; 7334 struct btrfs_file_extent_item *ei; 7335 struct extent_buffer *leaf; 7336 unsigned long nr = 0; 7337 7338 name_len = strlen(symname) + 1; 7339 if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root)) 7340 return -ENAMETOOLONG; 7341 7342 /* 7343 * 2 items for inode item and ref 7344 * 2 items for dir items 7345 * 1 item for xattr if selinux is on 7346 */ 7347 trans = btrfs_start_transaction(root, 5); 7348 if (IS_ERR(trans)) 7349 return PTR_ERR(trans); 7350 7351 err = btrfs_find_free_ino(root, &objectid); 7352 if (err) 7353 goto out_unlock; 7354 7355 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, 7356 dentry->d_name.len, btrfs_ino(dir), objectid, 7357 S_IFLNK|S_IRWXUGO, &index); 7358 if (IS_ERR(inode)) { 7359 err = PTR_ERR(inode); 7360 goto out_unlock; 7361 } 7362 7363 err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); 7364 if (err) { 7365 drop_inode = 1; 7366 goto out_unlock; 7367 } 7368 7369 /* 7370 * If the active LSM wants to access the inode during 7371 * d_instantiate it needs these. Smack checks to see 7372 * if the filesystem supports xattrs by looking at the 7373 * ops vector. 7374 */ 7375 inode->i_fop = &btrfs_file_operations; 7376 inode->i_op = &btrfs_file_inode_operations; 7377 7378 err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index); 7379 if (err) 7380 drop_inode = 1; 7381 else { 7382 inode->i_mapping->a_ops = &btrfs_aops; 7383 inode->i_mapping->backing_dev_info = &root->fs_info->bdi; 7384 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; 7385 } 7386 if (drop_inode) 7387 goto out_unlock; 7388 7389 path = btrfs_alloc_path(); 7390 if (!path) { 7391 err = -ENOMEM; 7392 drop_inode = 1; 7393 goto out_unlock; 7394 } 7395 key.objectid = btrfs_ino(inode); 7396 key.offset = 0; 7397 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY); 7398 datasize = btrfs_file_extent_calc_inline_size(name_len); 7399 err = btrfs_insert_empty_item(trans, root, path, &key, 7400 datasize); 7401 if (err) { 7402 drop_inode = 1; 7403 btrfs_free_path(path); 7404 goto out_unlock; 7405 } 7406 leaf = path->nodes[0]; 7407 ei = btrfs_item_ptr(leaf, path->slots[0], 7408 struct btrfs_file_extent_item); 7409 btrfs_set_file_extent_generation(leaf, ei, trans->transid); 7410 btrfs_set_file_extent_type(leaf, ei, 7411 BTRFS_FILE_EXTENT_INLINE); 7412 btrfs_set_file_extent_encryption(leaf, ei, 0); 7413 btrfs_set_file_extent_compression(leaf, ei, 0); 7414 btrfs_set_file_extent_other_encoding(leaf, ei, 0); 7415 btrfs_set_file_extent_ram_bytes(leaf, ei, name_len); 7416 7417 ptr = btrfs_file_extent_inline_start(ei); 7418 write_extent_buffer(leaf, symname, ptr, name_len); 7419 btrfs_mark_buffer_dirty(leaf); 7420 btrfs_free_path(path); 7421 7422 inode->i_op = &btrfs_symlink_inode_operations; 7423 inode->i_mapping->a_ops = &btrfs_symlink_aops; 7424 inode->i_mapping->backing_dev_info = &root->fs_info->bdi; 7425 inode_set_bytes(inode, name_len); 7426 btrfs_i_size_write(inode, name_len - 1); 7427 err = btrfs_update_inode(trans, root, inode); 7428 if (err) 7429 drop_inode = 1; 7430 7431 out_unlock: 7432 if (!err) 7433 d_instantiate(dentry, inode); 7434 nr = trans->blocks_used; 7435 btrfs_end_transaction(trans, root); 7436 if (drop_inode) { 7437 inode_dec_link_count(inode); 7438 iput(inode); 7439 } 7440 btrfs_btree_balance_dirty(root, nr); 7441 return err; 7442 } 7443 7444 static int __btrfs_prealloc_file_range(struct inode *inode, int mode, 7445 u64 start, u64 num_bytes, u64 min_size, 7446 loff_t actual_len, u64 *alloc_hint, 7447 struct btrfs_trans_handle *trans) 7448 { 7449 struct btrfs_root *root = BTRFS_I(inode)->root; 7450 struct btrfs_key ins; 7451 u64 cur_offset = start; 7452 u64 i_size; 7453 int ret = 0; 7454 bool own_trans = true; 7455 7456 if (trans) 7457 own_trans = false; 7458 while (num_bytes > 0) { 7459 if (own_trans) { 7460 trans = btrfs_start_transaction(root, 3); 7461 if (IS_ERR(trans)) { 7462 ret = PTR_ERR(trans); 7463 break; 7464 } 7465 } 7466 7467 ret = btrfs_reserve_extent(trans, root, num_bytes, min_size, 7468 0, *alloc_hint, &ins, 1); 7469 if (ret) { 7470 if (own_trans) 7471 btrfs_end_transaction(trans, root); 7472 break; 7473 } 7474 7475 ret = insert_reserved_file_extent(trans, inode, 7476 cur_offset, ins.objectid, 7477 ins.offset, ins.offset, 7478 ins.offset, 0, 0, 0, 7479 BTRFS_FILE_EXTENT_PREALLOC); 7480 if (ret) { 7481 btrfs_abort_transaction(trans, root, ret); 7482 if (own_trans) 7483 btrfs_end_transaction(trans, root); 7484 break; 7485 } 7486 btrfs_drop_extent_cache(inode, cur_offset, 7487 cur_offset + ins.offset -1, 0); 7488 7489 num_bytes -= ins.offset; 7490 cur_offset += ins.offset; 7491 *alloc_hint = ins.objectid + ins.offset; 7492 7493 inode->i_ctime = CURRENT_TIME; 7494 BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC; 7495 if (!(mode & FALLOC_FL_KEEP_SIZE) && 7496 (actual_len > inode->i_size) && 7497 (cur_offset > inode->i_size)) { 7498 if (cur_offset > actual_len) 7499 i_size = actual_len; 7500 else 7501 i_size = cur_offset; 7502 i_size_write(inode, i_size); 7503 btrfs_ordered_update_i_size(inode, i_size, NULL); 7504 } 7505 7506 ret = btrfs_update_inode(trans, root, inode); 7507 7508 if (ret) { 7509 btrfs_abort_transaction(trans, root, ret); 7510 if (own_trans) 7511 btrfs_end_transaction(trans, root); 7512 break; 7513 } 7514 7515 if (own_trans) 7516 btrfs_end_transaction(trans, root); 7517 } 7518 return ret; 7519 } 7520 7521 int btrfs_prealloc_file_range(struct inode *inode, int mode, 7522 u64 start, u64 num_bytes, u64 min_size, 7523 loff_t actual_len, u64 *alloc_hint) 7524 { 7525 return __btrfs_prealloc_file_range(inode, mode, start, num_bytes, 7526 min_size, actual_len, alloc_hint, 7527 NULL); 7528 } 7529 7530 int btrfs_prealloc_file_range_trans(struct inode *inode, 7531 struct btrfs_trans_handle *trans, int mode, 7532 u64 start, u64 num_bytes, u64 min_size, 7533 loff_t actual_len, u64 *alloc_hint) 7534 { 7535 return __btrfs_prealloc_file_range(inode, mode, start, num_bytes, 7536 min_size, actual_len, alloc_hint, trans); 7537 } 7538 7539 static int btrfs_set_page_dirty(struct page *page) 7540 { 7541 return __set_page_dirty_nobuffers(page); 7542 } 7543 7544 static int btrfs_permission(struct inode *inode, int mask) 7545 { 7546 struct btrfs_root *root = BTRFS_I(inode)->root; 7547 umode_t mode = inode->i_mode; 7548 7549 if (mask & MAY_WRITE && 7550 (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) { 7551 if (btrfs_root_readonly(root)) 7552 return -EROFS; 7553 if (BTRFS_I(inode)->flags & BTRFS_INODE_READONLY) 7554 return -EACCES; 7555 } 7556 return generic_permission(inode, mask); 7557 } 7558 7559 static const struct inode_operations btrfs_dir_inode_operations = { 7560 .getattr = btrfs_getattr, 7561 .lookup = btrfs_lookup, 7562 .create = btrfs_create, 7563 .unlink = btrfs_unlink, 7564 .link = btrfs_link, 7565 .mkdir = btrfs_mkdir, 7566 .rmdir = btrfs_rmdir, 7567 .rename = btrfs_rename, 7568 .symlink = btrfs_symlink, 7569 .setattr = btrfs_setattr, 7570 .mknod = btrfs_mknod, 7571 .setxattr = btrfs_setxattr, 7572 .getxattr = btrfs_getxattr, 7573 .listxattr = btrfs_listxattr, 7574 .removexattr = btrfs_removexattr, 7575 .permission = btrfs_permission, 7576 .get_acl = btrfs_get_acl, 7577 }; 7578 static const struct inode_operations btrfs_dir_ro_inode_operations = { 7579 .lookup = btrfs_lookup, 7580 .permission = btrfs_permission, 7581 .get_acl = btrfs_get_acl, 7582 }; 7583 7584 static const struct file_operations btrfs_dir_file_operations = { 7585 .llseek = generic_file_llseek, 7586 .read = generic_read_dir, 7587 .readdir = btrfs_real_readdir, 7588 .unlocked_ioctl = btrfs_ioctl, 7589 #ifdef CONFIG_COMPAT 7590 .compat_ioctl = btrfs_ioctl, 7591 #endif 7592 .release = btrfs_release_file, 7593 .fsync = btrfs_sync_file, 7594 }; 7595 7596 static struct extent_io_ops btrfs_extent_io_ops = { 7597 .fill_delalloc = run_delalloc_range, 7598 .submit_bio_hook = btrfs_submit_bio_hook, 7599 .merge_bio_hook = btrfs_merge_bio_hook, 7600 .readpage_end_io_hook = btrfs_readpage_end_io_hook, 7601 .writepage_end_io_hook = btrfs_writepage_end_io_hook, 7602 .writepage_start_hook = btrfs_writepage_start_hook, 7603 .set_bit_hook = btrfs_set_bit_hook, 7604 .clear_bit_hook = btrfs_clear_bit_hook, 7605 .merge_extent_hook = btrfs_merge_extent_hook, 7606 .split_extent_hook = btrfs_split_extent_hook, 7607 }; 7608 7609 /* 7610 * btrfs doesn't support the bmap operation because swapfiles 7611 * use bmap to make a mapping of extents in the file. They assume 7612 * these extents won't change over the life of the file and they 7613 * use the bmap result to do IO directly to the drive. 7614 * 7615 * the btrfs bmap call would return logical addresses that aren't 7616 * suitable for IO and they also will change frequently as COW 7617 * operations happen. So, swapfile + btrfs == corruption. 7618 * 7619 * For now we're avoiding this by dropping bmap. 7620 */ 7621 static const struct address_space_operations btrfs_aops = { 7622 .readpage = btrfs_readpage, 7623 .writepage = btrfs_writepage, 7624 .writepages = btrfs_writepages, 7625 .readpages = btrfs_readpages, 7626 .direct_IO = btrfs_direct_IO, 7627 .invalidatepage = btrfs_invalidatepage, 7628 .releasepage = btrfs_releasepage, 7629 .set_page_dirty = btrfs_set_page_dirty, 7630 .error_remove_page = generic_error_remove_page, 7631 }; 7632 7633 static const struct address_space_operations btrfs_symlink_aops = { 7634 .readpage = btrfs_readpage, 7635 .writepage = btrfs_writepage, 7636 .invalidatepage = btrfs_invalidatepage, 7637 .releasepage = btrfs_releasepage, 7638 }; 7639 7640 static const struct inode_operations btrfs_file_inode_operations = { 7641 .getattr = btrfs_getattr, 7642 .setattr = btrfs_setattr, 7643 .setxattr = btrfs_setxattr, 7644 .getxattr = btrfs_getxattr, 7645 .listxattr = btrfs_listxattr, 7646 .removexattr = btrfs_removexattr, 7647 .permission = btrfs_permission, 7648 .fiemap = btrfs_fiemap, 7649 .get_acl = btrfs_get_acl, 7650 }; 7651 static const struct inode_operations btrfs_special_inode_operations = { 7652 .getattr = btrfs_getattr, 7653 .setattr = btrfs_setattr, 7654 .permission = btrfs_permission, 7655 .setxattr = btrfs_setxattr, 7656 .getxattr = btrfs_getxattr, 7657 .listxattr = btrfs_listxattr, 7658 .removexattr = btrfs_removexattr, 7659 .get_acl = btrfs_get_acl, 7660 }; 7661 static const struct inode_operations btrfs_symlink_inode_operations = { 7662 .readlink = generic_readlink, 7663 .follow_link = page_follow_link_light, 7664 .put_link = page_put_link, 7665 .getattr = btrfs_getattr, 7666 .setattr = btrfs_setattr, 7667 .permission = btrfs_permission, 7668 .setxattr = btrfs_setxattr, 7669 .getxattr = btrfs_getxattr, 7670 .listxattr = btrfs_listxattr, 7671 .removexattr = btrfs_removexattr, 7672 .get_acl = btrfs_get_acl, 7673 }; 7674 7675 const struct dentry_operations btrfs_dentry_operations = { 7676 .d_delete = btrfs_dentry_delete, 7677 .d_release = btrfs_dentry_release, 7678 }; 7679