1 /* 2 * Copyright (C) 2007 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #include <linux/fs.h> 20 #include <linux/pagemap.h> 21 #include <linux/highmem.h> 22 #include <linux/time.h> 23 #include <linux/init.h> 24 #include <linux/string.h> 25 #include <linux/backing-dev.h> 26 #include <linux/mpage.h> 27 #include <linux/swap.h> 28 #include <linux/writeback.h> 29 #include <linux/statfs.h> 30 #include <linux/compat.h> 31 #include <linux/slab.h> 32 #include "ctree.h" 33 #include "disk-io.h" 34 #include "transaction.h" 35 #include "btrfs_inode.h" 36 #include "ioctl.h" 37 #include "print-tree.h" 38 #include "tree-log.h" 39 #include "locking.h" 40 #include "compat.h" 41 42 43 /* simple helper to fault in pages and copy. This should go away 44 * and be replaced with calls into generic code. 45 */ 46 static noinline int btrfs_copy_from_user(loff_t pos, int num_pages, 47 int write_bytes, 48 struct page **prepared_pages, 49 struct iov_iter *i) 50 { 51 size_t copied = 0; 52 int pg = 0; 53 int offset = pos & (PAGE_CACHE_SIZE - 1); 54 int total_copied = 0; 55 56 while (write_bytes > 0) { 57 size_t count = min_t(size_t, 58 PAGE_CACHE_SIZE - offset, write_bytes); 59 struct page *page = prepared_pages[pg]; 60 /* 61 * Copy data from userspace to the current page 62 * 63 * Disable pagefault to avoid recursive lock since 64 * the pages are already locked 65 */ 66 pagefault_disable(); 67 copied = iov_iter_copy_from_user_atomic(page, i, offset, count); 68 pagefault_enable(); 69 70 /* Flush processor's dcache for this page */ 71 flush_dcache_page(page); 72 iov_iter_advance(i, copied); 73 write_bytes -= copied; 74 total_copied += copied; 75 76 /* Return to btrfs_file_aio_write to fault page */ 77 if (unlikely(copied == 0)) { 78 break; 79 } 80 81 if (unlikely(copied < PAGE_CACHE_SIZE - offset)) { 82 offset += copied; 83 } else { 84 pg++; 85 offset = 0; 86 } 87 } 88 return total_copied; 89 } 90 91 /* 92 * unlocks pages after btrfs_file_write is done with them 93 */ 94 static noinline void btrfs_drop_pages(struct page **pages, size_t num_pages) 95 { 96 size_t i; 97 for (i = 0; i < num_pages; i++) { 98 if (!pages[i]) 99 break; 100 /* page checked is some magic around finding pages that 101 * have been modified without going through btrfs_set_page_dirty 102 * clear it here 103 */ 104 ClearPageChecked(pages[i]); 105 unlock_page(pages[i]); 106 mark_page_accessed(pages[i]); 107 page_cache_release(pages[i]); 108 } 109 } 110 111 /* 112 * after copy_from_user, pages need to be dirtied and we need to make 113 * sure holes are created between the current EOF and the start of 114 * any next extents (if required). 115 * 116 * this also makes the decision about creating an inline extent vs 117 * doing real data extents, marking pages dirty and delalloc as required. 118 */ 119 static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans, 120 struct btrfs_root *root, 121 struct file *file, 122 struct page **pages, 123 size_t num_pages, 124 loff_t pos, 125 size_t write_bytes) 126 { 127 int err = 0; 128 int i; 129 struct inode *inode = fdentry(file)->d_inode; 130 u64 num_bytes; 131 u64 start_pos; 132 u64 end_of_last_block; 133 u64 end_pos = pos + write_bytes; 134 loff_t isize = i_size_read(inode); 135 136 start_pos = pos & ~((u64)root->sectorsize - 1); 137 num_bytes = (write_bytes + pos - start_pos + 138 root->sectorsize - 1) & ~((u64)root->sectorsize - 1); 139 140 end_of_last_block = start_pos + num_bytes - 1; 141 err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block, 142 NULL); 143 BUG_ON(err); 144 145 for (i = 0; i < num_pages; i++) { 146 struct page *p = pages[i]; 147 SetPageUptodate(p); 148 ClearPageChecked(p); 149 set_page_dirty(p); 150 } 151 if (end_pos > isize) { 152 i_size_write(inode, end_pos); 153 /* we've only changed i_size in ram, and we haven't updated 154 * the disk i_size. There is no need to log the inode 155 * at this time. 156 */ 157 } 158 return 0; 159 } 160 161 /* 162 * this drops all the extents in the cache that intersect the range 163 * [start, end]. Existing extents are split as required. 164 */ 165 int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, 166 int skip_pinned) 167 { 168 struct extent_map *em; 169 struct extent_map *split = NULL; 170 struct extent_map *split2 = NULL; 171 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 172 u64 len = end - start + 1; 173 int ret; 174 int testend = 1; 175 unsigned long flags; 176 int compressed = 0; 177 178 WARN_ON(end < start); 179 if (end == (u64)-1) { 180 len = (u64)-1; 181 testend = 0; 182 } 183 while (1) { 184 if (!split) 185 split = alloc_extent_map(GFP_NOFS); 186 if (!split2) 187 split2 = alloc_extent_map(GFP_NOFS); 188 189 write_lock(&em_tree->lock); 190 em = lookup_extent_mapping(em_tree, start, len); 191 if (!em) { 192 write_unlock(&em_tree->lock); 193 break; 194 } 195 flags = em->flags; 196 if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) { 197 if (testend && em->start + em->len >= start + len) { 198 free_extent_map(em); 199 write_unlock(&em_tree->lock); 200 break; 201 } 202 start = em->start + em->len; 203 if (testend) 204 len = start + len - (em->start + em->len); 205 free_extent_map(em); 206 write_unlock(&em_tree->lock); 207 continue; 208 } 209 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags); 210 clear_bit(EXTENT_FLAG_PINNED, &em->flags); 211 remove_extent_mapping(em_tree, em); 212 213 if (em->block_start < EXTENT_MAP_LAST_BYTE && 214 em->start < start) { 215 split->start = em->start; 216 split->len = start - em->start; 217 split->orig_start = em->orig_start; 218 split->block_start = em->block_start; 219 220 if (compressed) 221 split->block_len = em->block_len; 222 else 223 split->block_len = split->len; 224 225 split->bdev = em->bdev; 226 split->flags = flags; 227 ret = add_extent_mapping(em_tree, split); 228 BUG_ON(ret); 229 free_extent_map(split); 230 split = split2; 231 split2 = NULL; 232 } 233 if (em->block_start < EXTENT_MAP_LAST_BYTE && 234 testend && em->start + em->len > start + len) { 235 u64 diff = start + len - em->start; 236 237 split->start = start + len; 238 split->len = em->start + em->len - (start + len); 239 split->bdev = em->bdev; 240 split->flags = flags; 241 242 if (compressed) { 243 split->block_len = em->block_len; 244 split->block_start = em->block_start; 245 split->orig_start = em->orig_start; 246 } else { 247 split->block_len = split->len; 248 split->block_start = em->block_start + diff; 249 split->orig_start = split->start; 250 } 251 252 ret = add_extent_mapping(em_tree, split); 253 BUG_ON(ret); 254 free_extent_map(split); 255 split = NULL; 256 } 257 write_unlock(&em_tree->lock); 258 259 /* once for us */ 260 free_extent_map(em); 261 /* once for the tree*/ 262 free_extent_map(em); 263 } 264 if (split) 265 free_extent_map(split); 266 if (split2) 267 free_extent_map(split2); 268 return 0; 269 } 270 271 /* 272 * this is very complex, but the basic idea is to drop all extents 273 * in the range start - end. hint_block is filled in with a block number 274 * that would be a good hint to the block allocator for this file. 275 * 276 * If an extent intersects the range but is not entirely inside the range 277 * it is either truncated or split. Anything entirely inside the range 278 * is deleted from the tree. 279 */ 280 int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode, 281 u64 start, u64 end, u64 *hint_byte, int drop_cache) 282 { 283 struct btrfs_root *root = BTRFS_I(inode)->root; 284 struct extent_buffer *leaf; 285 struct btrfs_file_extent_item *fi; 286 struct btrfs_path *path; 287 struct btrfs_key key; 288 struct btrfs_key new_key; 289 u64 search_start = start; 290 u64 disk_bytenr = 0; 291 u64 num_bytes = 0; 292 u64 extent_offset = 0; 293 u64 extent_end = 0; 294 int del_nr = 0; 295 int del_slot = 0; 296 int extent_type; 297 int recow; 298 int ret; 299 300 if (drop_cache) 301 btrfs_drop_extent_cache(inode, start, end - 1, 0); 302 303 path = btrfs_alloc_path(); 304 if (!path) 305 return -ENOMEM; 306 307 while (1) { 308 recow = 0; 309 ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino, 310 search_start, -1); 311 if (ret < 0) 312 break; 313 if (ret > 0 && path->slots[0] > 0 && search_start == start) { 314 leaf = path->nodes[0]; 315 btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1); 316 if (key.objectid == inode->i_ino && 317 key.type == BTRFS_EXTENT_DATA_KEY) 318 path->slots[0]--; 319 } 320 ret = 0; 321 next_slot: 322 leaf = path->nodes[0]; 323 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 324 BUG_ON(del_nr > 0); 325 ret = btrfs_next_leaf(root, path); 326 if (ret < 0) 327 break; 328 if (ret > 0) { 329 ret = 0; 330 break; 331 } 332 leaf = path->nodes[0]; 333 recow = 1; 334 } 335 336 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 337 if (key.objectid > inode->i_ino || 338 key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end) 339 break; 340 341 fi = btrfs_item_ptr(leaf, path->slots[0], 342 struct btrfs_file_extent_item); 343 extent_type = btrfs_file_extent_type(leaf, fi); 344 345 if (extent_type == BTRFS_FILE_EXTENT_REG || 346 extent_type == BTRFS_FILE_EXTENT_PREALLOC) { 347 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 348 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi); 349 extent_offset = btrfs_file_extent_offset(leaf, fi); 350 extent_end = key.offset + 351 btrfs_file_extent_num_bytes(leaf, fi); 352 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 353 extent_end = key.offset + 354 btrfs_file_extent_inline_len(leaf, fi); 355 } else { 356 WARN_ON(1); 357 extent_end = search_start; 358 } 359 360 if (extent_end <= search_start) { 361 path->slots[0]++; 362 goto next_slot; 363 } 364 365 search_start = max(key.offset, start); 366 if (recow) { 367 btrfs_release_path(root, path); 368 continue; 369 } 370 371 /* 372 * | - range to drop - | 373 * | -------- extent -------- | 374 */ 375 if (start > key.offset && end < extent_end) { 376 BUG_ON(del_nr > 0); 377 BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE); 378 379 memcpy(&new_key, &key, sizeof(new_key)); 380 new_key.offset = start; 381 ret = btrfs_duplicate_item(trans, root, path, 382 &new_key); 383 if (ret == -EAGAIN) { 384 btrfs_release_path(root, path); 385 continue; 386 } 387 if (ret < 0) 388 break; 389 390 leaf = path->nodes[0]; 391 fi = btrfs_item_ptr(leaf, path->slots[0] - 1, 392 struct btrfs_file_extent_item); 393 btrfs_set_file_extent_num_bytes(leaf, fi, 394 start - key.offset); 395 396 fi = btrfs_item_ptr(leaf, path->slots[0], 397 struct btrfs_file_extent_item); 398 399 extent_offset += start - key.offset; 400 btrfs_set_file_extent_offset(leaf, fi, extent_offset); 401 btrfs_set_file_extent_num_bytes(leaf, fi, 402 extent_end - start); 403 btrfs_mark_buffer_dirty(leaf); 404 405 if (disk_bytenr > 0) { 406 ret = btrfs_inc_extent_ref(trans, root, 407 disk_bytenr, num_bytes, 0, 408 root->root_key.objectid, 409 new_key.objectid, 410 start - extent_offset); 411 BUG_ON(ret); 412 *hint_byte = disk_bytenr; 413 } 414 key.offset = start; 415 } 416 /* 417 * | ---- range to drop ----- | 418 * | -------- extent -------- | 419 */ 420 if (start <= key.offset && end < extent_end) { 421 BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE); 422 423 memcpy(&new_key, &key, sizeof(new_key)); 424 new_key.offset = end; 425 btrfs_set_item_key_safe(trans, root, path, &new_key); 426 427 extent_offset += end - key.offset; 428 btrfs_set_file_extent_offset(leaf, fi, extent_offset); 429 btrfs_set_file_extent_num_bytes(leaf, fi, 430 extent_end - end); 431 btrfs_mark_buffer_dirty(leaf); 432 if (disk_bytenr > 0) { 433 inode_sub_bytes(inode, end - key.offset); 434 *hint_byte = disk_bytenr; 435 } 436 break; 437 } 438 439 search_start = extent_end; 440 /* 441 * | ---- range to drop ----- | 442 * | -------- extent -------- | 443 */ 444 if (start > key.offset && end >= extent_end) { 445 BUG_ON(del_nr > 0); 446 BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE); 447 448 btrfs_set_file_extent_num_bytes(leaf, fi, 449 start - key.offset); 450 btrfs_mark_buffer_dirty(leaf); 451 if (disk_bytenr > 0) { 452 inode_sub_bytes(inode, extent_end - start); 453 *hint_byte = disk_bytenr; 454 } 455 if (end == extent_end) 456 break; 457 458 path->slots[0]++; 459 goto next_slot; 460 } 461 462 /* 463 * | ---- range to drop ----- | 464 * | ------ extent ------ | 465 */ 466 if (start <= key.offset && end >= extent_end) { 467 if (del_nr == 0) { 468 del_slot = path->slots[0]; 469 del_nr = 1; 470 } else { 471 BUG_ON(del_slot + del_nr != path->slots[0]); 472 del_nr++; 473 } 474 475 if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 476 inode_sub_bytes(inode, 477 extent_end - key.offset); 478 extent_end = ALIGN(extent_end, 479 root->sectorsize); 480 } else if (disk_bytenr > 0) { 481 ret = btrfs_free_extent(trans, root, 482 disk_bytenr, num_bytes, 0, 483 root->root_key.objectid, 484 key.objectid, key.offset - 485 extent_offset); 486 BUG_ON(ret); 487 inode_sub_bytes(inode, 488 extent_end - key.offset); 489 *hint_byte = disk_bytenr; 490 } 491 492 if (end == extent_end) 493 break; 494 495 if (path->slots[0] + 1 < btrfs_header_nritems(leaf)) { 496 path->slots[0]++; 497 goto next_slot; 498 } 499 500 ret = btrfs_del_items(trans, root, path, del_slot, 501 del_nr); 502 BUG_ON(ret); 503 504 del_nr = 0; 505 del_slot = 0; 506 507 btrfs_release_path(root, path); 508 continue; 509 } 510 511 BUG_ON(1); 512 } 513 514 if (del_nr > 0) { 515 ret = btrfs_del_items(trans, root, path, del_slot, del_nr); 516 BUG_ON(ret); 517 } 518 519 btrfs_free_path(path); 520 return ret; 521 } 522 523 static int extent_mergeable(struct extent_buffer *leaf, int slot, 524 u64 objectid, u64 bytenr, u64 orig_offset, 525 u64 *start, u64 *end) 526 { 527 struct btrfs_file_extent_item *fi; 528 struct btrfs_key key; 529 u64 extent_end; 530 531 if (slot < 0 || slot >= btrfs_header_nritems(leaf)) 532 return 0; 533 534 btrfs_item_key_to_cpu(leaf, &key, slot); 535 if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY) 536 return 0; 537 538 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); 539 if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG || 540 btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr || 541 btrfs_file_extent_offset(leaf, fi) != key.offset - orig_offset || 542 btrfs_file_extent_compression(leaf, fi) || 543 btrfs_file_extent_encryption(leaf, fi) || 544 btrfs_file_extent_other_encoding(leaf, fi)) 545 return 0; 546 547 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi); 548 if ((*start && *start != key.offset) || (*end && *end != extent_end)) 549 return 0; 550 551 *start = key.offset; 552 *end = extent_end; 553 return 1; 554 } 555 556 /* 557 * Mark extent in the range start - end as written. 558 * 559 * This changes extent type from 'pre-allocated' to 'regular'. If only 560 * part of extent is marked as written, the extent will be split into 561 * two or three. 562 */ 563 int btrfs_mark_extent_written(struct btrfs_trans_handle *trans, 564 struct inode *inode, u64 start, u64 end) 565 { 566 struct btrfs_root *root = BTRFS_I(inode)->root; 567 struct extent_buffer *leaf; 568 struct btrfs_path *path; 569 struct btrfs_file_extent_item *fi; 570 struct btrfs_key key; 571 struct btrfs_key new_key; 572 u64 bytenr; 573 u64 num_bytes; 574 u64 extent_end; 575 u64 orig_offset; 576 u64 other_start; 577 u64 other_end; 578 u64 split; 579 int del_nr = 0; 580 int del_slot = 0; 581 int recow; 582 int ret; 583 584 btrfs_drop_extent_cache(inode, start, end - 1, 0); 585 586 path = btrfs_alloc_path(); 587 BUG_ON(!path); 588 again: 589 recow = 0; 590 split = start; 591 key.objectid = inode->i_ino; 592 key.type = BTRFS_EXTENT_DATA_KEY; 593 key.offset = split; 594 595 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 596 if (ret > 0 && path->slots[0] > 0) 597 path->slots[0]--; 598 599 leaf = path->nodes[0]; 600 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 601 BUG_ON(key.objectid != inode->i_ino || 602 key.type != BTRFS_EXTENT_DATA_KEY); 603 fi = btrfs_item_ptr(leaf, path->slots[0], 604 struct btrfs_file_extent_item); 605 BUG_ON(btrfs_file_extent_type(leaf, fi) != 606 BTRFS_FILE_EXTENT_PREALLOC); 607 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi); 608 BUG_ON(key.offset > start || extent_end < end); 609 610 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 611 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi); 612 orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi); 613 memcpy(&new_key, &key, sizeof(new_key)); 614 615 if (start == key.offset && end < extent_end) { 616 other_start = 0; 617 other_end = start; 618 if (extent_mergeable(leaf, path->slots[0] - 1, 619 inode->i_ino, bytenr, orig_offset, 620 &other_start, &other_end)) { 621 new_key.offset = end; 622 btrfs_set_item_key_safe(trans, root, path, &new_key); 623 fi = btrfs_item_ptr(leaf, path->slots[0], 624 struct btrfs_file_extent_item); 625 btrfs_set_file_extent_num_bytes(leaf, fi, 626 extent_end - end); 627 btrfs_set_file_extent_offset(leaf, fi, 628 end - orig_offset); 629 fi = btrfs_item_ptr(leaf, path->slots[0] - 1, 630 struct btrfs_file_extent_item); 631 btrfs_set_file_extent_num_bytes(leaf, fi, 632 end - other_start); 633 btrfs_mark_buffer_dirty(leaf); 634 goto out; 635 } 636 } 637 638 if (start > key.offset && end == extent_end) { 639 other_start = end; 640 other_end = 0; 641 if (extent_mergeable(leaf, path->slots[0] + 1, 642 inode->i_ino, bytenr, orig_offset, 643 &other_start, &other_end)) { 644 fi = btrfs_item_ptr(leaf, path->slots[0], 645 struct btrfs_file_extent_item); 646 btrfs_set_file_extent_num_bytes(leaf, fi, 647 start - key.offset); 648 path->slots[0]++; 649 new_key.offset = start; 650 btrfs_set_item_key_safe(trans, root, path, &new_key); 651 652 fi = btrfs_item_ptr(leaf, path->slots[0], 653 struct btrfs_file_extent_item); 654 btrfs_set_file_extent_num_bytes(leaf, fi, 655 other_end - start); 656 btrfs_set_file_extent_offset(leaf, fi, 657 start - orig_offset); 658 btrfs_mark_buffer_dirty(leaf); 659 goto out; 660 } 661 } 662 663 while (start > key.offset || end < extent_end) { 664 if (key.offset == start) 665 split = end; 666 667 new_key.offset = split; 668 ret = btrfs_duplicate_item(trans, root, path, &new_key); 669 if (ret == -EAGAIN) { 670 btrfs_release_path(root, path); 671 goto again; 672 } 673 BUG_ON(ret < 0); 674 675 leaf = path->nodes[0]; 676 fi = btrfs_item_ptr(leaf, path->slots[0] - 1, 677 struct btrfs_file_extent_item); 678 btrfs_set_file_extent_num_bytes(leaf, fi, 679 split - key.offset); 680 681 fi = btrfs_item_ptr(leaf, path->slots[0], 682 struct btrfs_file_extent_item); 683 684 btrfs_set_file_extent_offset(leaf, fi, split - orig_offset); 685 btrfs_set_file_extent_num_bytes(leaf, fi, 686 extent_end - split); 687 btrfs_mark_buffer_dirty(leaf); 688 689 ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0, 690 root->root_key.objectid, 691 inode->i_ino, orig_offset); 692 BUG_ON(ret); 693 694 if (split == start) { 695 key.offset = start; 696 } else { 697 BUG_ON(start != key.offset); 698 path->slots[0]--; 699 extent_end = end; 700 } 701 recow = 1; 702 } 703 704 other_start = end; 705 other_end = 0; 706 if (extent_mergeable(leaf, path->slots[0] + 1, 707 inode->i_ino, bytenr, orig_offset, 708 &other_start, &other_end)) { 709 if (recow) { 710 btrfs_release_path(root, path); 711 goto again; 712 } 713 extent_end = other_end; 714 del_slot = path->slots[0] + 1; 715 del_nr++; 716 ret = btrfs_free_extent(trans, root, bytenr, num_bytes, 717 0, root->root_key.objectid, 718 inode->i_ino, orig_offset); 719 BUG_ON(ret); 720 } 721 other_start = 0; 722 other_end = start; 723 if (extent_mergeable(leaf, path->slots[0] - 1, 724 inode->i_ino, bytenr, orig_offset, 725 &other_start, &other_end)) { 726 if (recow) { 727 btrfs_release_path(root, path); 728 goto again; 729 } 730 key.offset = other_start; 731 del_slot = path->slots[0]; 732 del_nr++; 733 ret = btrfs_free_extent(trans, root, bytenr, num_bytes, 734 0, root->root_key.objectid, 735 inode->i_ino, orig_offset); 736 BUG_ON(ret); 737 } 738 if (del_nr == 0) { 739 fi = btrfs_item_ptr(leaf, path->slots[0], 740 struct btrfs_file_extent_item); 741 btrfs_set_file_extent_type(leaf, fi, 742 BTRFS_FILE_EXTENT_REG); 743 btrfs_mark_buffer_dirty(leaf); 744 } else { 745 fi = btrfs_item_ptr(leaf, del_slot - 1, 746 struct btrfs_file_extent_item); 747 btrfs_set_file_extent_type(leaf, fi, 748 BTRFS_FILE_EXTENT_REG); 749 btrfs_set_file_extent_num_bytes(leaf, fi, 750 extent_end - key.offset); 751 btrfs_mark_buffer_dirty(leaf); 752 753 ret = btrfs_del_items(trans, root, path, del_slot, del_nr); 754 BUG_ON(ret); 755 } 756 out: 757 btrfs_free_path(path); 758 return 0; 759 } 760 761 /* 762 * this gets pages into the page cache and locks them down, it also properly 763 * waits for data=ordered extents to finish before allowing the pages to be 764 * modified. 765 */ 766 static noinline int prepare_pages(struct btrfs_root *root, struct file *file, 767 struct page **pages, size_t num_pages, 768 loff_t pos, unsigned long first_index, 769 unsigned long last_index, size_t write_bytes) 770 { 771 struct extent_state *cached_state = NULL; 772 int i; 773 unsigned long index = pos >> PAGE_CACHE_SHIFT; 774 struct inode *inode = fdentry(file)->d_inode; 775 int err = 0; 776 u64 start_pos; 777 u64 last_pos; 778 779 start_pos = pos & ~((u64)root->sectorsize - 1); 780 last_pos = ((u64)index + num_pages) << PAGE_CACHE_SHIFT; 781 782 if (start_pos > inode->i_size) { 783 err = btrfs_cont_expand(inode, start_pos); 784 if (err) 785 return err; 786 } 787 788 memset(pages, 0, num_pages * sizeof(struct page *)); 789 again: 790 for (i = 0; i < num_pages; i++) { 791 pages[i] = grab_cache_page(inode->i_mapping, index + i); 792 if (!pages[i]) { 793 err = -ENOMEM; 794 BUG_ON(1); 795 } 796 wait_on_page_writeback(pages[i]); 797 } 798 if (start_pos < inode->i_size) { 799 struct btrfs_ordered_extent *ordered; 800 lock_extent_bits(&BTRFS_I(inode)->io_tree, 801 start_pos, last_pos - 1, 0, &cached_state, 802 GFP_NOFS); 803 ordered = btrfs_lookup_first_ordered_extent(inode, 804 last_pos - 1); 805 if (ordered && 806 ordered->file_offset + ordered->len > start_pos && 807 ordered->file_offset < last_pos) { 808 btrfs_put_ordered_extent(ordered); 809 unlock_extent_cached(&BTRFS_I(inode)->io_tree, 810 start_pos, last_pos - 1, 811 &cached_state, GFP_NOFS); 812 for (i = 0; i < num_pages; i++) { 813 unlock_page(pages[i]); 814 page_cache_release(pages[i]); 815 } 816 btrfs_wait_ordered_range(inode, start_pos, 817 last_pos - start_pos); 818 goto again; 819 } 820 if (ordered) 821 btrfs_put_ordered_extent(ordered); 822 823 clear_extent_bit(&BTRFS_I(inode)->io_tree, start_pos, 824 last_pos - 1, EXTENT_DIRTY | EXTENT_DELALLOC | 825 EXTENT_DO_ACCOUNTING, 0, 0, &cached_state, 826 GFP_NOFS); 827 unlock_extent_cached(&BTRFS_I(inode)->io_tree, 828 start_pos, last_pos - 1, &cached_state, 829 GFP_NOFS); 830 } 831 for (i = 0; i < num_pages; i++) { 832 clear_page_dirty_for_io(pages[i]); 833 set_page_extent_mapped(pages[i]); 834 WARN_ON(!PageLocked(pages[i])); 835 } 836 return 0; 837 } 838 839 static ssize_t btrfs_file_aio_write(struct kiocb *iocb, 840 const struct iovec *iov, 841 unsigned long nr_segs, loff_t pos) 842 { 843 struct file *file = iocb->ki_filp; 844 struct inode *inode = fdentry(file)->d_inode; 845 struct btrfs_root *root = BTRFS_I(inode)->root; 846 struct page *pinned[2]; 847 struct page **pages = NULL; 848 struct iov_iter i; 849 loff_t *ppos = &iocb->ki_pos; 850 loff_t start_pos; 851 ssize_t num_written = 0; 852 ssize_t err = 0; 853 size_t count; 854 size_t ocount; 855 int ret = 0; 856 int nrptrs; 857 unsigned long first_index; 858 unsigned long last_index; 859 int will_write; 860 int buffered = 0; 861 int copied = 0; 862 int dirty_pages = 0; 863 864 will_write = ((file->f_flags & O_DSYNC) || IS_SYNC(inode) || 865 (file->f_flags & O_DIRECT)); 866 867 pinned[0] = NULL; 868 pinned[1] = NULL; 869 870 start_pos = pos; 871 872 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); 873 874 mutex_lock(&inode->i_mutex); 875 876 err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ); 877 if (err) 878 goto out; 879 count = ocount; 880 881 current->backing_dev_info = inode->i_mapping->backing_dev_info; 882 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); 883 if (err) 884 goto out; 885 886 if (count == 0) 887 goto out; 888 889 err = file_remove_suid(file); 890 if (err) 891 goto out; 892 893 file_update_time(file); 894 BTRFS_I(inode)->sequence++; 895 896 if (unlikely(file->f_flags & O_DIRECT)) { 897 num_written = generic_file_direct_write(iocb, iov, &nr_segs, 898 pos, ppos, count, 899 ocount); 900 /* 901 * the generic O_DIRECT will update in-memory i_size after the 902 * DIOs are done. But our endio handlers that update the on 903 * disk i_size never update past the in memory i_size. So we 904 * need one more update here to catch any additions to the 905 * file 906 */ 907 if (inode->i_size != BTRFS_I(inode)->disk_i_size) { 908 btrfs_ordered_update_i_size(inode, inode->i_size, NULL); 909 mark_inode_dirty(inode); 910 } 911 912 if (num_written < 0) { 913 ret = num_written; 914 num_written = 0; 915 goto out; 916 } else if (num_written == count) { 917 /* pick up pos changes done by the generic code */ 918 pos = *ppos; 919 goto out; 920 } 921 /* 922 * We are going to do buffered for the rest of the range, so we 923 * need to make sure to invalidate the buffered pages when we're 924 * done. 925 */ 926 buffered = 1; 927 pos += num_written; 928 } 929 930 iov_iter_init(&i, iov, nr_segs, count, num_written); 931 nrptrs = min((iov_iter_count(&i) + PAGE_CACHE_SIZE - 1) / 932 PAGE_CACHE_SIZE, PAGE_CACHE_SIZE / 933 (sizeof(struct page *))); 934 pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL); 935 936 /* generic_write_checks can change our pos */ 937 start_pos = pos; 938 939 first_index = pos >> PAGE_CACHE_SHIFT; 940 last_index = (pos + iov_iter_count(&i)) >> PAGE_CACHE_SHIFT; 941 942 /* 943 * there are lots of better ways to do this, but this code 944 * makes sure the first and last page in the file range are 945 * up to date and ready for cow 946 */ 947 if ((pos & (PAGE_CACHE_SIZE - 1))) { 948 pinned[0] = grab_cache_page(inode->i_mapping, first_index); 949 if (!PageUptodate(pinned[0])) { 950 ret = btrfs_readpage(NULL, pinned[0]); 951 BUG_ON(ret); 952 wait_on_page_locked(pinned[0]); 953 } else { 954 unlock_page(pinned[0]); 955 } 956 } 957 if ((pos + iov_iter_count(&i)) & (PAGE_CACHE_SIZE - 1)) { 958 pinned[1] = grab_cache_page(inode->i_mapping, last_index); 959 if (!PageUptodate(pinned[1])) { 960 ret = btrfs_readpage(NULL, pinned[1]); 961 BUG_ON(ret); 962 wait_on_page_locked(pinned[1]); 963 } else { 964 unlock_page(pinned[1]); 965 } 966 } 967 968 while (iov_iter_count(&i) > 0) { 969 size_t offset = pos & (PAGE_CACHE_SIZE - 1); 970 size_t write_bytes = min(iov_iter_count(&i), 971 nrptrs * (size_t)PAGE_CACHE_SIZE - 972 offset); 973 size_t num_pages = (write_bytes + PAGE_CACHE_SIZE - 1) >> 974 PAGE_CACHE_SHIFT; 975 976 WARN_ON(num_pages > nrptrs); 977 memset(pages, 0, sizeof(struct page *) * nrptrs); 978 979 /* 980 * Fault pages before locking them in prepare_pages 981 * to avoid recursive lock 982 */ 983 if (unlikely(iov_iter_fault_in_readable(&i, write_bytes))) { 984 ret = -EFAULT; 985 goto out; 986 } 987 988 ret = btrfs_delalloc_reserve_space(inode, 989 num_pages << PAGE_CACHE_SHIFT); 990 if (ret) 991 goto out; 992 993 ret = prepare_pages(root, file, pages, num_pages, 994 pos, first_index, last_index, 995 write_bytes); 996 if (ret) { 997 btrfs_delalloc_release_space(inode, 998 num_pages << PAGE_CACHE_SHIFT); 999 goto out; 1000 } 1001 1002 copied = btrfs_copy_from_user(pos, num_pages, 1003 write_bytes, pages, &i); 1004 dirty_pages = (copied + PAGE_CACHE_SIZE - 1) >> 1005 PAGE_CACHE_SHIFT; 1006 1007 if (num_pages > dirty_pages) { 1008 if (copied > 0) 1009 atomic_inc( 1010 &BTRFS_I(inode)->outstanding_extents); 1011 btrfs_delalloc_release_space(inode, 1012 (num_pages - dirty_pages) << 1013 PAGE_CACHE_SHIFT); 1014 } 1015 1016 if (copied > 0) { 1017 dirty_and_release_pages(NULL, root, file, pages, 1018 dirty_pages, pos, copied); 1019 } 1020 1021 btrfs_drop_pages(pages, num_pages); 1022 1023 if (copied > 0) { 1024 if (will_write) { 1025 filemap_fdatawrite_range(inode->i_mapping, pos, 1026 pos + copied - 1); 1027 } else { 1028 balance_dirty_pages_ratelimited_nr( 1029 inode->i_mapping, 1030 dirty_pages); 1031 if (dirty_pages < 1032 (root->leafsize >> PAGE_CACHE_SHIFT) + 1) 1033 btrfs_btree_balance_dirty(root, 1); 1034 btrfs_throttle(root); 1035 } 1036 } 1037 1038 pos += copied; 1039 num_written += copied; 1040 1041 cond_resched(); 1042 } 1043 out: 1044 mutex_unlock(&inode->i_mutex); 1045 if (ret) 1046 err = ret; 1047 1048 kfree(pages); 1049 if (pinned[0]) 1050 page_cache_release(pinned[0]); 1051 if (pinned[1]) 1052 page_cache_release(pinned[1]); 1053 *ppos = pos; 1054 1055 /* 1056 * we want to make sure fsync finds this change 1057 * but we haven't joined a transaction running right now. 1058 * 1059 * Later on, someone is sure to update the inode and get the 1060 * real transid recorded. 1061 * 1062 * We set last_trans now to the fs_info generation + 1, 1063 * this will either be one more than the running transaction 1064 * or the generation used for the next transaction if there isn't 1065 * one running right now. 1066 */ 1067 BTRFS_I(inode)->last_trans = root->fs_info->generation + 1; 1068 1069 if (num_written > 0 && will_write) { 1070 struct btrfs_trans_handle *trans; 1071 1072 err = btrfs_wait_ordered_range(inode, start_pos, num_written); 1073 if (err) 1074 num_written = err; 1075 1076 if ((file->f_flags & O_DSYNC) || IS_SYNC(inode)) { 1077 trans = btrfs_start_transaction(root, 0); 1078 if (IS_ERR(trans)) { 1079 num_written = PTR_ERR(trans); 1080 goto done; 1081 } 1082 mutex_lock(&inode->i_mutex); 1083 ret = btrfs_log_dentry_safe(trans, root, 1084 file->f_dentry); 1085 mutex_unlock(&inode->i_mutex); 1086 if (ret == 0) { 1087 ret = btrfs_sync_log(trans, root); 1088 if (ret == 0) 1089 btrfs_end_transaction(trans, root); 1090 else 1091 btrfs_commit_transaction(trans, root); 1092 } else if (ret != BTRFS_NO_LOG_SYNC) { 1093 btrfs_commit_transaction(trans, root); 1094 } else { 1095 btrfs_end_transaction(trans, root); 1096 } 1097 } 1098 if (file->f_flags & O_DIRECT && buffered) { 1099 invalidate_mapping_pages(inode->i_mapping, 1100 start_pos >> PAGE_CACHE_SHIFT, 1101 (start_pos + num_written - 1) >> PAGE_CACHE_SHIFT); 1102 } 1103 } 1104 done: 1105 current->backing_dev_info = NULL; 1106 return num_written ? num_written : err; 1107 } 1108 1109 int btrfs_release_file(struct inode *inode, struct file *filp) 1110 { 1111 /* 1112 * ordered_data_close is set by settattr when we are about to truncate 1113 * a file from a non-zero size to a zero size. This tries to 1114 * flush down new bytes that may have been written if the 1115 * application were using truncate to replace a file in place. 1116 */ 1117 if (BTRFS_I(inode)->ordered_data_close) { 1118 BTRFS_I(inode)->ordered_data_close = 0; 1119 btrfs_add_ordered_operation(NULL, BTRFS_I(inode)->root, inode); 1120 if (inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT) 1121 filemap_flush(inode->i_mapping); 1122 } 1123 if (filp->private_data) 1124 btrfs_ioctl_trans_end(filp); 1125 return 0; 1126 } 1127 1128 /* 1129 * fsync call for both files and directories. This logs the inode into 1130 * the tree log instead of forcing full commits whenever possible. 1131 * 1132 * It needs to call filemap_fdatawait so that all ordered extent updates are 1133 * in the metadata btree are up to date for copying to the log. 1134 * 1135 * It drops the inode mutex before doing the tree log commit. This is an 1136 * important optimization for directories because holding the mutex prevents 1137 * new operations on the dir while we write to disk. 1138 */ 1139 int btrfs_sync_file(struct file *file, int datasync) 1140 { 1141 struct dentry *dentry = file->f_path.dentry; 1142 struct inode *inode = dentry->d_inode; 1143 struct btrfs_root *root = BTRFS_I(inode)->root; 1144 int ret = 0; 1145 struct btrfs_trans_handle *trans; 1146 1147 1148 /* we wait first, since the writeback may change the inode */ 1149 root->log_batch++; 1150 /* the VFS called filemap_fdatawrite for us */ 1151 btrfs_wait_ordered_range(inode, 0, (u64)-1); 1152 root->log_batch++; 1153 1154 /* 1155 * check the transaction that last modified this inode 1156 * and see if its already been committed 1157 */ 1158 if (!BTRFS_I(inode)->last_trans) 1159 goto out; 1160 1161 /* 1162 * if the last transaction that changed this file was before 1163 * the current transaction, we can bail out now without any 1164 * syncing 1165 */ 1166 mutex_lock(&root->fs_info->trans_mutex); 1167 if (BTRFS_I(inode)->last_trans <= 1168 root->fs_info->last_trans_committed) { 1169 BTRFS_I(inode)->last_trans = 0; 1170 mutex_unlock(&root->fs_info->trans_mutex); 1171 goto out; 1172 } 1173 mutex_unlock(&root->fs_info->trans_mutex); 1174 1175 /* 1176 * ok we haven't committed the transaction yet, lets do a commit 1177 */ 1178 if (file->private_data) 1179 btrfs_ioctl_trans_end(file); 1180 1181 trans = btrfs_start_transaction(root, 0); 1182 if (IS_ERR(trans)) { 1183 ret = PTR_ERR(trans); 1184 goto out; 1185 } 1186 1187 ret = btrfs_log_dentry_safe(trans, root, dentry); 1188 if (ret < 0) 1189 goto out; 1190 1191 /* we've logged all the items and now have a consistent 1192 * version of the file in the log. It is possible that 1193 * someone will come in and modify the file, but that's 1194 * fine because the log is consistent on disk, and we 1195 * have references to all of the file's extents 1196 * 1197 * It is possible that someone will come in and log the 1198 * file again, but that will end up using the synchronization 1199 * inside btrfs_sync_log to keep things safe. 1200 */ 1201 mutex_unlock(&dentry->d_inode->i_mutex); 1202 1203 if (ret != BTRFS_NO_LOG_SYNC) { 1204 if (ret > 0) { 1205 ret = btrfs_commit_transaction(trans, root); 1206 } else { 1207 ret = btrfs_sync_log(trans, root); 1208 if (ret == 0) 1209 ret = btrfs_end_transaction(trans, root); 1210 else 1211 ret = btrfs_commit_transaction(trans, root); 1212 } 1213 } else { 1214 ret = btrfs_end_transaction(trans, root); 1215 } 1216 mutex_lock(&dentry->d_inode->i_mutex); 1217 out: 1218 return ret > 0 ? -EIO : ret; 1219 } 1220 1221 static const struct vm_operations_struct btrfs_file_vm_ops = { 1222 .fault = filemap_fault, 1223 .page_mkwrite = btrfs_page_mkwrite, 1224 }; 1225 1226 static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma) 1227 { 1228 struct address_space *mapping = filp->f_mapping; 1229 1230 if (!mapping->a_ops->readpage) 1231 return -ENOEXEC; 1232 1233 file_accessed(filp); 1234 vma->vm_ops = &btrfs_file_vm_ops; 1235 vma->vm_flags |= VM_CAN_NONLINEAR; 1236 1237 return 0; 1238 } 1239 1240 const struct file_operations btrfs_file_operations = { 1241 .llseek = generic_file_llseek, 1242 .read = do_sync_read, 1243 .write = do_sync_write, 1244 .aio_read = generic_file_aio_read, 1245 .splice_read = generic_file_splice_read, 1246 .aio_write = btrfs_file_aio_write, 1247 .mmap = btrfs_file_mmap, 1248 .open = generic_file_open, 1249 .release = btrfs_release_file, 1250 .fsync = btrfs_sync_file, 1251 .unlocked_ioctl = btrfs_ioctl, 1252 #ifdef CONFIG_COMPAT 1253 .compat_ioctl = btrfs_ioctl, 1254 #endif 1255 }; 1256