1 /* 2 * Copyright (C) 2007 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #include <linux/fs.h> 20 #include <linux/pagemap.h> 21 #include <linux/highmem.h> 22 #include <linux/time.h> 23 #include <linux/init.h> 24 #include <linux/string.h> 25 #include <linux/backing-dev.h> 26 #include <linux/mpage.h> 27 #include <linux/falloc.h> 28 #include <linux/swap.h> 29 #include <linux/writeback.h> 30 #include <linux/statfs.h> 31 #include <linux/compat.h> 32 #include <linux/slab.h> 33 #include "ctree.h" 34 #include "disk-io.h" 35 #include "transaction.h" 36 #include "btrfs_inode.h" 37 #include "ioctl.h" 38 #include "print-tree.h" 39 #include "tree-log.h" 40 #include "locking.h" 41 #include "compat.h" 42 43 44 /* simple helper to fault in pages and copy. This should go away 45 * and be replaced with calls into generic code. 46 */ 47 static noinline int btrfs_copy_from_user(loff_t pos, int num_pages, 48 int write_bytes, 49 struct page **prepared_pages, 50 struct iov_iter *i) 51 { 52 size_t copied = 0; 53 int pg = 0; 54 int offset = pos & (PAGE_CACHE_SIZE - 1); 55 int total_copied = 0; 56 57 while (write_bytes > 0) { 58 size_t count = min_t(size_t, 59 PAGE_CACHE_SIZE - offset, write_bytes); 60 struct page *page = prepared_pages[pg]; 61 /* 62 * Copy data from userspace to the current page 63 * 64 * Disable pagefault to avoid recursive lock since 65 * the pages are already locked 66 */ 67 pagefault_disable(); 68 copied = iov_iter_copy_from_user_atomic(page, i, offset, count); 69 pagefault_enable(); 70 71 /* Flush processor's dcache for this page */ 72 flush_dcache_page(page); 73 iov_iter_advance(i, copied); 74 write_bytes -= copied; 75 total_copied += copied; 76 77 /* Return to btrfs_file_aio_write to fault page */ 78 if (unlikely(copied == 0)) { 79 break; 80 } 81 82 if (unlikely(copied < PAGE_CACHE_SIZE - offset)) { 83 offset += copied; 84 } else { 85 pg++; 86 offset = 0; 87 } 88 } 89 return total_copied; 90 } 91 92 /* 93 * unlocks pages after btrfs_file_write is done with them 94 */ 95 static noinline void btrfs_drop_pages(struct page **pages, size_t num_pages) 96 { 97 size_t i; 98 for (i = 0; i < num_pages; i++) { 99 if (!pages[i]) 100 break; 101 /* page checked is some magic around finding pages that 102 * have been modified without going through btrfs_set_page_dirty 103 * clear it here 104 */ 105 ClearPageChecked(pages[i]); 106 unlock_page(pages[i]); 107 mark_page_accessed(pages[i]); 108 page_cache_release(pages[i]); 109 } 110 } 111 112 /* 113 * after copy_from_user, pages need to be dirtied and we need to make 114 * sure holes are created between the current EOF and the start of 115 * any next extents (if required). 116 * 117 * this also makes the decision about creating an inline extent vs 118 * doing real data extents, marking pages dirty and delalloc as required. 119 */ 120 static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans, 121 struct btrfs_root *root, 122 struct file *file, 123 struct page **pages, 124 size_t num_pages, 125 loff_t pos, 126 size_t write_bytes) 127 { 128 int err = 0; 129 int i; 130 struct inode *inode = fdentry(file)->d_inode; 131 u64 num_bytes; 132 u64 start_pos; 133 u64 end_of_last_block; 134 u64 end_pos = pos + write_bytes; 135 loff_t isize = i_size_read(inode); 136 137 start_pos = pos & ~((u64)root->sectorsize - 1); 138 num_bytes = (write_bytes + pos - start_pos + 139 root->sectorsize - 1) & ~((u64)root->sectorsize - 1); 140 141 end_of_last_block = start_pos + num_bytes - 1; 142 err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block, 143 NULL); 144 BUG_ON(err); 145 146 for (i = 0; i < num_pages; i++) { 147 struct page *p = pages[i]; 148 SetPageUptodate(p); 149 ClearPageChecked(p); 150 set_page_dirty(p); 151 } 152 if (end_pos > isize) { 153 i_size_write(inode, end_pos); 154 /* we've only changed i_size in ram, and we haven't updated 155 * the disk i_size. There is no need to log the inode 156 * at this time. 157 */ 158 } 159 return 0; 160 } 161 162 /* 163 * this drops all the extents in the cache that intersect the range 164 * [start, end]. Existing extents are split as required. 165 */ 166 int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, 167 int skip_pinned) 168 { 169 struct extent_map *em; 170 struct extent_map *split = NULL; 171 struct extent_map *split2 = NULL; 172 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 173 u64 len = end - start + 1; 174 int ret; 175 int testend = 1; 176 unsigned long flags; 177 int compressed = 0; 178 179 WARN_ON(end < start); 180 if (end == (u64)-1) { 181 len = (u64)-1; 182 testend = 0; 183 } 184 while (1) { 185 if (!split) 186 split = alloc_extent_map(GFP_NOFS); 187 if (!split2) 188 split2 = alloc_extent_map(GFP_NOFS); 189 BUG_ON(!split || !split2); 190 191 write_lock(&em_tree->lock); 192 em = lookup_extent_mapping(em_tree, start, len); 193 if (!em) { 194 write_unlock(&em_tree->lock); 195 break; 196 } 197 flags = em->flags; 198 if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) { 199 if (testend && em->start + em->len >= start + len) { 200 free_extent_map(em); 201 write_unlock(&em_tree->lock); 202 break; 203 } 204 start = em->start + em->len; 205 if (testend) 206 len = start + len - (em->start + em->len); 207 free_extent_map(em); 208 write_unlock(&em_tree->lock); 209 continue; 210 } 211 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags); 212 clear_bit(EXTENT_FLAG_PINNED, &em->flags); 213 remove_extent_mapping(em_tree, em); 214 215 if (em->block_start < EXTENT_MAP_LAST_BYTE && 216 em->start < start) { 217 split->start = em->start; 218 split->len = start - em->start; 219 split->orig_start = em->orig_start; 220 split->block_start = em->block_start; 221 222 if (compressed) 223 split->block_len = em->block_len; 224 else 225 split->block_len = split->len; 226 227 split->bdev = em->bdev; 228 split->flags = flags; 229 split->compress_type = em->compress_type; 230 ret = add_extent_mapping(em_tree, split); 231 BUG_ON(ret); 232 free_extent_map(split); 233 split = split2; 234 split2 = NULL; 235 } 236 if (em->block_start < EXTENT_MAP_LAST_BYTE && 237 testend && em->start + em->len > start + len) { 238 u64 diff = start + len - em->start; 239 240 split->start = start + len; 241 split->len = em->start + em->len - (start + len); 242 split->bdev = em->bdev; 243 split->flags = flags; 244 split->compress_type = em->compress_type; 245 246 if (compressed) { 247 split->block_len = em->block_len; 248 split->block_start = em->block_start; 249 split->orig_start = em->orig_start; 250 } else { 251 split->block_len = split->len; 252 split->block_start = em->block_start + diff; 253 split->orig_start = split->start; 254 } 255 256 ret = add_extent_mapping(em_tree, split); 257 BUG_ON(ret); 258 free_extent_map(split); 259 split = NULL; 260 } 261 write_unlock(&em_tree->lock); 262 263 /* once for us */ 264 free_extent_map(em); 265 /* once for the tree*/ 266 free_extent_map(em); 267 } 268 if (split) 269 free_extent_map(split); 270 if (split2) 271 free_extent_map(split2); 272 return 0; 273 } 274 275 /* 276 * this is very complex, but the basic idea is to drop all extents 277 * in the range start - end. hint_block is filled in with a block number 278 * that would be a good hint to the block allocator for this file. 279 * 280 * If an extent intersects the range but is not entirely inside the range 281 * it is either truncated or split. Anything entirely inside the range 282 * is deleted from the tree. 283 */ 284 int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode, 285 u64 start, u64 end, u64 *hint_byte, int drop_cache) 286 { 287 struct btrfs_root *root = BTRFS_I(inode)->root; 288 struct extent_buffer *leaf; 289 struct btrfs_file_extent_item *fi; 290 struct btrfs_path *path; 291 struct btrfs_key key; 292 struct btrfs_key new_key; 293 u64 search_start = start; 294 u64 disk_bytenr = 0; 295 u64 num_bytes = 0; 296 u64 extent_offset = 0; 297 u64 extent_end = 0; 298 int del_nr = 0; 299 int del_slot = 0; 300 int extent_type; 301 int recow; 302 int ret; 303 304 if (drop_cache) 305 btrfs_drop_extent_cache(inode, start, end - 1, 0); 306 307 path = btrfs_alloc_path(); 308 if (!path) 309 return -ENOMEM; 310 311 while (1) { 312 recow = 0; 313 ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino, 314 search_start, -1); 315 if (ret < 0) 316 break; 317 if (ret > 0 && path->slots[0] > 0 && search_start == start) { 318 leaf = path->nodes[0]; 319 btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1); 320 if (key.objectid == inode->i_ino && 321 key.type == BTRFS_EXTENT_DATA_KEY) 322 path->slots[0]--; 323 } 324 ret = 0; 325 next_slot: 326 leaf = path->nodes[0]; 327 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 328 BUG_ON(del_nr > 0); 329 ret = btrfs_next_leaf(root, path); 330 if (ret < 0) 331 break; 332 if (ret > 0) { 333 ret = 0; 334 break; 335 } 336 leaf = path->nodes[0]; 337 recow = 1; 338 } 339 340 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 341 if (key.objectid > inode->i_ino || 342 key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end) 343 break; 344 345 fi = btrfs_item_ptr(leaf, path->slots[0], 346 struct btrfs_file_extent_item); 347 extent_type = btrfs_file_extent_type(leaf, fi); 348 349 if (extent_type == BTRFS_FILE_EXTENT_REG || 350 extent_type == BTRFS_FILE_EXTENT_PREALLOC) { 351 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 352 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi); 353 extent_offset = btrfs_file_extent_offset(leaf, fi); 354 extent_end = key.offset + 355 btrfs_file_extent_num_bytes(leaf, fi); 356 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 357 extent_end = key.offset + 358 btrfs_file_extent_inline_len(leaf, fi); 359 } else { 360 WARN_ON(1); 361 extent_end = search_start; 362 } 363 364 if (extent_end <= search_start) { 365 path->slots[0]++; 366 goto next_slot; 367 } 368 369 search_start = max(key.offset, start); 370 if (recow) { 371 btrfs_release_path(root, path); 372 continue; 373 } 374 375 /* 376 * | - range to drop - | 377 * | -------- extent -------- | 378 */ 379 if (start > key.offset && end < extent_end) { 380 BUG_ON(del_nr > 0); 381 BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE); 382 383 memcpy(&new_key, &key, sizeof(new_key)); 384 new_key.offset = start; 385 ret = btrfs_duplicate_item(trans, root, path, 386 &new_key); 387 if (ret == -EAGAIN) { 388 btrfs_release_path(root, path); 389 continue; 390 } 391 if (ret < 0) 392 break; 393 394 leaf = path->nodes[0]; 395 fi = btrfs_item_ptr(leaf, path->slots[0] - 1, 396 struct btrfs_file_extent_item); 397 btrfs_set_file_extent_num_bytes(leaf, fi, 398 start - key.offset); 399 400 fi = btrfs_item_ptr(leaf, path->slots[0], 401 struct btrfs_file_extent_item); 402 403 extent_offset += start - key.offset; 404 btrfs_set_file_extent_offset(leaf, fi, extent_offset); 405 btrfs_set_file_extent_num_bytes(leaf, fi, 406 extent_end - start); 407 btrfs_mark_buffer_dirty(leaf); 408 409 if (disk_bytenr > 0) { 410 ret = btrfs_inc_extent_ref(trans, root, 411 disk_bytenr, num_bytes, 0, 412 root->root_key.objectid, 413 new_key.objectid, 414 start - extent_offset); 415 BUG_ON(ret); 416 *hint_byte = disk_bytenr; 417 } 418 key.offset = start; 419 } 420 /* 421 * | ---- range to drop ----- | 422 * | -------- extent -------- | 423 */ 424 if (start <= key.offset && end < extent_end) { 425 BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE); 426 427 memcpy(&new_key, &key, sizeof(new_key)); 428 new_key.offset = end; 429 btrfs_set_item_key_safe(trans, root, path, &new_key); 430 431 extent_offset += end - key.offset; 432 btrfs_set_file_extent_offset(leaf, fi, extent_offset); 433 btrfs_set_file_extent_num_bytes(leaf, fi, 434 extent_end - end); 435 btrfs_mark_buffer_dirty(leaf); 436 if (disk_bytenr > 0) { 437 inode_sub_bytes(inode, end - key.offset); 438 *hint_byte = disk_bytenr; 439 } 440 break; 441 } 442 443 search_start = extent_end; 444 /* 445 * | ---- range to drop ----- | 446 * | -------- extent -------- | 447 */ 448 if (start > key.offset && end >= extent_end) { 449 BUG_ON(del_nr > 0); 450 BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE); 451 452 btrfs_set_file_extent_num_bytes(leaf, fi, 453 start - key.offset); 454 btrfs_mark_buffer_dirty(leaf); 455 if (disk_bytenr > 0) { 456 inode_sub_bytes(inode, extent_end - start); 457 *hint_byte = disk_bytenr; 458 } 459 if (end == extent_end) 460 break; 461 462 path->slots[0]++; 463 goto next_slot; 464 } 465 466 /* 467 * | ---- range to drop ----- | 468 * | ------ extent ------ | 469 */ 470 if (start <= key.offset && end >= extent_end) { 471 if (del_nr == 0) { 472 del_slot = path->slots[0]; 473 del_nr = 1; 474 } else { 475 BUG_ON(del_slot + del_nr != path->slots[0]); 476 del_nr++; 477 } 478 479 if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 480 inode_sub_bytes(inode, 481 extent_end - key.offset); 482 extent_end = ALIGN(extent_end, 483 root->sectorsize); 484 } else if (disk_bytenr > 0) { 485 ret = btrfs_free_extent(trans, root, 486 disk_bytenr, num_bytes, 0, 487 root->root_key.objectid, 488 key.objectid, key.offset - 489 extent_offset); 490 BUG_ON(ret); 491 inode_sub_bytes(inode, 492 extent_end - key.offset); 493 *hint_byte = disk_bytenr; 494 } 495 496 if (end == extent_end) 497 break; 498 499 if (path->slots[0] + 1 < btrfs_header_nritems(leaf)) { 500 path->slots[0]++; 501 goto next_slot; 502 } 503 504 ret = btrfs_del_items(trans, root, path, del_slot, 505 del_nr); 506 BUG_ON(ret); 507 508 del_nr = 0; 509 del_slot = 0; 510 511 btrfs_release_path(root, path); 512 continue; 513 } 514 515 BUG_ON(1); 516 } 517 518 if (del_nr > 0) { 519 ret = btrfs_del_items(trans, root, path, del_slot, del_nr); 520 BUG_ON(ret); 521 } 522 523 btrfs_free_path(path); 524 return ret; 525 } 526 527 static int extent_mergeable(struct extent_buffer *leaf, int slot, 528 u64 objectid, u64 bytenr, u64 orig_offset, 529 u64 *start, u64 *end) 530 { 531 struct btrfs_file_extent_item *fi; 532 struct btrfs_key key; 533 u64 extent_end; 534 535 if (slot < 0 || slot >= btrfs_header_nritems(leaf)) 536 return 0; 537 538 btrfs_item_key_to_cpu(leaf, &key, slot); 539 if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY) 540 return 0; 541 542 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); 543 if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG || 544 btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr || 545 btrfs_file_extent_offset(leaf, fi) != key.offset - orig_offset || 546 btrfs_file_extent_compression(leaf, fi) || 547 btrfs_file_extent_encryption(leaf, fi) || 548 btrfs_file_extent_other_encoding(leaf, fi)) 549 return 0; 550 551 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi); 552 if ((*start && *start != key.offset) || (*end && *end != extent_end)) 553 return 0; 554 555 *start = key.offset; 556 *end = extent_end; 557 return 1; 558 } 559 560 /* 561 * Mark extent in the range start - end as written. 562 * 563 * This changes extent type from 'pre-allocated' to 'regular'. If only 564 * part of extent is marked as written, the extent will be split into 565 * two or three. 566 */ 567 int btrfs_mark_extent_written(struct btrfs_trans_handle *trans, 568 struct inode *inode, u64 start, u64 end) 569 { 570 struct btrfs_root *root = BTRFS_I(inode)->root; 571 struct extent_buffer *leaf; 572 struct btrfs_path *path; 573 struct btrfs_file_extent_item *fi; 574 struct btrfs_key key; 575 struct btrfs_key new_key; 576 u64 bytenr; 577 u64 num_bytes; 578 u64 extent_end; 579 u64 orig_offset; 580 u64 other_start; 581 u64 other_end; 582 u64 split; 583 int del_nr = 0; 584 int del_slot = 0; 585 int recow; 586 int ret; 587 588 btrfs_drop_extent_cache(inode, start, end - 1, 0); 589 590 path = btrfs_alloc_path(); 591 BUG_ON(!path); 592 again: 593 recow = 0; 594 split = start; 595 key.objectid = inode->i_ino; 596 key.type = BTRFS_EXTENT_DATA_KEY; 597 key.offset = split; 598 599 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 600 if (ret > 0 && path->slots[0] > 0) 601 path->slots[0]--; 602 603 leaf = path->nodes[0]; 604 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 605 BUG_ON(key.objectid != inode->i_ino || 606 key.type != BTRFS_EXTENT_DATA_KEY); 607 fi = btrfs_item_ptr(leaf, path->slots[0], 608 struct btrfs_file_extent_item); 609 BUG_ON(btrfs_file_extent_type(leaf, fi) != 610 BTRFS_FILE_EXTENT_PREALLOC); 611 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi); 612 BUG_ON(key.offset > start || extent_end < end); 613 614 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 615 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi); 616 orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi); 617 memcpy(&new_key, &key, sizeof(new_key)); 618 619 if (start == key.offset && end < extent_end) { 620 other_start = 0; 621 other_end = start; 622 if (extent_mergeable(leaf, path->slots[0] - 1, 623 inode->i_ino, bytenr, orig_offset, 624 &other_start, &other_end)) { 625 new_key.offset = end; 626 btrfs_set_item_key_safe(trans, root, path, &new_key); 627 fi = btrfs_item_ptr(leaf, path->slots[0], 628 struct btrfs_file_extent_item); 629 btrfs_set_file_extent_num_bytes(leaf, fi, 630 extent_end - end); 631 btrfs_set_file_extent_offset(leaf, fi, 632 end - orig_offset); 633 fi = btrfs_item_ptr(leaf, path->slots[0] - 1, 634 struct btrfs_file_extent_item); 635 btrfs_set_file_extent_num_bytes(leaf, fi, 636 end - other_start); 637 btrfs_mark_buffer_dirty(leaf); 638 goto out; 639 } 640 } 641 642 if (start > key.offset && end == extent_end) { 643 other_start = end; 644 other_end = 0; 645 if (extent_mergeable(leaf, path->slots[0] + 1, 646 inode->i_ino, bytenr, orig_offset, 647 &other_start, &other_end)) { 648 fi = btrfs_item_ptr(leaf, path->slots[0], 649 struct btrfs_file_extent_item); 650 btrfs_set_file_extent_num_bytes(leaf, fi, 651 start - key.offset); 652 path->slots[0]++; 653 new_key.offset = start; 654 btrfs_set_item_key_safe(trans, root, path, &new_key); 655 656 fi = btrfs_item_ptr(leaf, path->slots[0], 657 struct btrfs_file_extent_item); 658 btrfs_set_file_extent_num_bytes(leaf, fi, 659 other_end - start); 660 btrfs_set_file_extent_offset(leaf, fi, 661 start - orig_offset); 662 btrfs_mark_buffer_dirty(leaf); 663 goto out; 664 } 665 } 666 667 while (start > key.offset || end < extent_end) { 668 if (key.offset == start) 669 split = end; 670 671 new_key.offset = split; 672 ret = btrfs_duplicate_item(trans, root, path, &new_key); 673 if (ret == -EAGAIN) { 674 btrfs_release_path(root, path); 675 goto again; 676 } 677 BUG_ON(ret < 0); 678 679 leaf = path->nodes[0]; 680 fi = btrfs_item_ptr(leaf, path->slots[0] - 1, 681 struct btrfs_file_extent_item); 682 btrfs_set_file_extent_num_bytes(leaf, fi, 683 split - key.offset); 684 685 fi = btrfs_item_ptr(leaf, path->slots[0], 686 struct btrfs_file_extent_item); 687 688 btrfs_set_file_extent_offset(leaf, fi, split - orig_offset); 689 btrfs_set_file_extent_num_bytes(leaf, fi, 690 extent_end - split); 691 btrfs_mark_buffer_dirty(leaf); 692 693 ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0, 694 root->root_key.objectid, 695 inode->i_ino, orig_offset); 696 BUG_ON(ret); 697 698 if (split == start) { 699 key.offset = start; 700 } else { 701 BUG_ON(start != key.offset); 702 path->slots[0]--; 703 extent_end = end; 704 } 705 recow = 1; 706 } 707 708 other_start = end; 709 other_end = 0; 710 if (extent_mergeable(leaf, path->slots[0] + 1, 711 inode->i_ino, bytenr, orig_offset, 712 &other_start, &other_end)) { 713 if (recow) { 714 btrfs_release_path(root, path); 715 goto again; 716 } 717 extent_end = other_end; 718 del_slot = path->slots[0] + 1; 719 del_nr++; 720 ret = btrfs_free_extent(trans, root, bytenr, num_bytes, 721 0, root->root_key.objectid, 722 inode->i_ino, orig_offset); 723 BUG_ON(ret); 724 } 725 other_start = 0; 726 other_end = start; 727 if (extent_mergeable(leaf, path->slots[0] - 1, 728 inode->i_ino, bytenr, orig_offset, 729 &other_start, &other_end)) { 730 if (recow) { 731 btrfs_release_path(root, path); 732 goto again; 733 } 734 key.offset = other_start; 735 del_slot = path->slots[0]; 736 del_nr++; 737 ret = btrfs_free_extent(trans, root, bytenr, num_bytes, 738 0, root->root_key.objectid, 739 inode->i_ino, orig_offset); 740 BUG_ON(ret); 741 } 742 if (del_nr == 0) { 743 fi = btrfs_item_ptr(leaf, path->slots[0], 744 struct btrfs_file_extent_item); 745 btrfs_set_file_extent_type(leaf, fi, 746 BTRFS_FILE_EXTENT_REG); 747 btrfs_mark_buffer_dirty(leaf); 748 } else { 749 fi = btrfs_item_ptr(leaf, del_slot - 1, 750 struct btrfs_file_extent_item); 751 btrfs_set_file_extent_type(leaf, fi, 752 BTRFS_FILE_EXTENT_REG); 753 btrfs_set_file_extent_num_bytes(leaf, fi, 754 extent_end - key.offset); 755 btrfs_mark_buffer_dirty(leaf); 756 757 ret = btrfs_del_items(trans, root, path, del_slot, del_nr); 758 BUG_ON(ret); 759 } 760 out: 761 btrfs_free_path(path); 762 return 0; 763 } 764 765 /* 766 * this gets pages into the page cache and locks them down, it also properly 767 * waits for data=ordered extents to finish before allowing the pages to be 768 * modified. 769 */ 770 static noinline int prepare_pages(struct btrfs_root *root, struct file *file, 771 struct page **pages, size_t num_pages, 772 loff_t pos, unsigned long first_index, 773 unsigned long last_index, size_t write_bytes) 774 { 775 struct extent_state *cached_state = NULL; 776 int i; 777 unsigned long index = pos >> PAGE_CACHE_SHIFT; 778 struct inode *inode = fdentry(file)->d_inode; 779 int err = 0; 780 u64 start_pos; 781 u64 last_pos; 782 783 start_pos = pos & ~((u64)root->sectorsize - 1); 784 last_pos = ((u64)index + num_pages) << PAGE_CACHE_SHIFT; 785 786 if (start_pos > inode->i_size) { 787 err = btrfs_cont_expand(inode, start_pos); 788 if (err) 789 return err; 790 } 791 792 memset(pages, 0, num_pages * sizeof(struct page *)); 793 again: 794 for (i = 0; i < num_pages; i++) { 795 pages[i] = grab_cache_page(inode->i_mapping, index + i); 796 if (!pages[i]) { 797 int c; 798 for (c = i - 1; c >= 0; c--) { 799 unlock_page(pages[c]); 800 page_cache_release(pages[c]); 801 } 802 return -ENOMEM; 803 } 804 wait_on_page_writeback(pages[i]); 805 } 806 if (start_pos < inode->i_size) { 807 struct btrfs_ordered_extent *ordered; 808 lock_extent_bits(&BTRFS_I(inode)->io_tree, 809 start_pos, last_pos - 1, 0, &cached_state, 810 GFP_NOFS); 811 ordered = btrfs_lookup_first_ordered_extent(inode, 812 last_pos - 1); 813 if (ordered && 814 ordered->file_offset + ordered->len > start_pos && 815 ordered->file_offset < last_pos) { 816 btrfs_put_ordered_extent(ordered); 817 unlock_extent_cached(&BTRFS_I(inode)->io_tree, 818 start_pos, last_pos - 1, 819 &cached_state, GFP_NOFS); 820 for (i = 0; i < num_pages; i++) { 821 unlock_page(pages[i]); 822 page_cache_release(pages[i]); 823 } 824 btrfs_wait_ordered_range(inode, start_pos, 825 last_pos - start_pos); 826 goto again; 827 } 828 if (ordered) 829 btrfs_put_ordered_extent(ordered); 830 831 clear_extent_bit(&BTRFS_I(inode)->io_tree, start_pos, 832 last_pos - 1, EXTENT_DIRTY | EXTENT_DELALLOC | 833 EXTENT_DO_ACCOUNTING, 0, 0, &cached_state, 834 GFP_NOFS); 835 unlock_extent_cached(&BTRFS_I(inode)->io_tree, 836 start_pos, last_pos - 1, &cached_state, 837 GFP_NOFS); 838 } 839 for (i = 0; i < num_pages; i++) { 840 clear_page_dirty_for_io(pages[i]); 841 set_page_extent_mapped(pages[i]); 842 WARN_ON(!PageLocked(pages[i])); 843 } 844 return 0; 845 } 846 847 static ssize_t btrfs_file_aio_write(struct kiocb *iocb, 848 const struct iovec *iov, 849 unsigned long nr_segs, loff_t pos) 850 { 851 struct file *file = iocb->ki_filp; 852 struct inode *inode = fdentry(file)->d_inode; 853 struct btrfs_root *root = BTRFS_I(inode)->root; 854 struct page *pinned[2]; 855 struct page **pages = NULL; 856 struct iov_iter i; 857 loff_t *ppos = &iocb->ki_pos; 858 loff_t start_pos; 859 ssize_t num_written = 0; 860 ssize_t err = 0; 861 size_t count; 862 size_t ocount; 863 int ret = 0; 864 int nrptrs; 865 unsigned long first_index; 866 unsigned long last_index; 867 int will_write; 868 int buffered = 0; 869 int copied = 0; 870 int dirty_pages = 0; 871 872 will_write = ((file->f_flags & O_DSYNC) || IS_SYNC(inode) || 873 (file->f_flags & O_DIRECT)); 874 875 pinned[0] = NULL; 876 pinned[1] = NULL; 877 878 start_pos = pos; 879 880 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); 881 882 mutex_lock(&inode->i_mutex); 883 884 err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ); 885 if (err) 886 goto out; 887 count = ocount; 888 889 current->backing_dev_info = inode->i_mapping->backing_dev_info; 890 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); 891 if (err) 892 goto out; 893 894 if (count == 0) 895 goto out; 896 897 err = file_remove_suid(file); 898 if (err) 899 goto out; 900 901 /* 902 * If BTRFS flips readonly due to some impossible error 903 * (fs_info->fs_state now has BTRFS_SUPER_FLAG_ERROR), 904 * although we have opened a file as writable, we have 905 * to stop this write operation to ensure FS consistency. 906 */ 907 if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) { 908 err = -EROFS; 909 goto out; 910 } 911 912 file_update_time(file); 913 BTRFS_I(inode)->sequence++; 914 915 if (unlikely(file->f_flags & O_DIRECT)) { 916 num_written = generic_file_direct_write(iocb, iov, &nr_segs, 917 pos, ppos, count, 918 ocount); 919 /* 920 * the generic O_DIRECT will update in-memory i_size after the 921 * DIOs are done. But our endio handlers that update the on 922 * disk i_size never update past the in memory i_size. So we 923 * need one more update here to catch any additions to the 924 * file 925 */ 926 if (inode->i_size != BTRFS_I(inode)->disk_i_size) { 927 btrfs_ordered_update_i_size(inode, inode->i_size, NULL); 928 mark_inode_dirty(inode); 929 } 930 931 if (num_written < 0) { 932 ret = num_written; 933 num_written = 0; 934 goto out; 935 } else if (num_written == count) { 936 /* pick up pos changes done by the generic code */ 937 pos = *ppos; 938 goto out; 939 } 940 /* 941 * We are going to do buffered for the rest of the range, so we 942 * need to make sure to invalidate the buffered pages when we're 943 * done. 944 */ 945 buffered = 1; 946 pos += num_written; 947 } 948 949 iov_iter_init(&i, iov, nr_segs, count, num_written); 950 nrptrs = min((iov_iter_count(&i) + PAGE_CACHE_SIZE - 1) / 951 PAGE_CACHE_SIZE, PAGE_CACHE_SIZE / 952 (sizeof(struct page *))); 953 pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL); 954 if (!pages) { 955 ret = -ENOMEM; 956 goto out; 957 } 958 959 /* generic_write_checks can change our pos */ 960 start_pos = pos; 961 962 first_index = pos >> PAGE_CACHE_SHIFT; 963 last_index = (pos + iov_iter_count(&i)) >> PAGE_CACHE_SHIFT; 964 965 /* 966 * there are lots of better ways to do this, but this code 967 * makes sure the first and last page in the file range are 968 * up to date and ready for cow 969 */ 970 if ((pos & (PAGE_CACHE_SIZE - 1))) { 971 pinned[0] = grab_cache_page(inode->i_mapping, first_index); 972 if (!PageUptodate(pinned[0])) { 973 ret = btrfs_readpage(NULL, pinned[0]); 974 BUG_ON(ret); 975 wait_on_page_locked(pinned[0]); 976 } else { 977 unlock_page(pinned[0]); 978 } 979 } 980 if ((pos + iov_iter_count(&i)) & (PAGE_CACHE_SIZE - 1)) { 981 pinned[1] = grab_cache_page(inode->i_mapping, last_index); 982 if (!PageUptodate(pinned[1])) { 983 ret = btrfs_readpage(NULL, pinned[1]); 984 BUG_ON(ret); 985 wait_on_page_locked(pinned[1]); 986 } else { 987 unlock_page(pinned[1]); 988 } 989 } 990 991 while (iov_iter_count(&i) > 0) { 992 size_t offset = pos & (PAGE_CACHE_SIZE - 1); 993 size_t write_bytes = min(iov_iter_count(&i), 994 nrptrs * (size_t)PAGE_CACHE_SIZE - 995 offset); 996 size_t num_pages = (write_bytes + offset + 997 PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 998 999 WARN_ON(num_pages > nrptrs); 1000 memset(pages, 0, sizeof(struct page *) * nrptrs); 1001 1002 /* 1003 * Fault pages before locking them in prepare_pages 1004 * to avoid recursive lock 1005 */ 1006 if (unlikely(iov_iter_fault_in_readable(&i, write_bytes))) { 1007 ret = -EFAULT; 1008 goto out; 1009 } 1010 1011 ret = btrfs_delalloc_reserve_space(inode, 1012 num_pages << PAGE_CACHE_SHIFT); 1013 if (ret) 1014 goto out; 1015 1016 ret = prepare_pages(root, file, pages, num_pages, 1017 pos, first_index, last_index, 1018 write_bytes); 1019 if (ret) { 1020 btrfs_delalloc_release_space(inode, 1021 num_pages << PAGE_CACHE_SHIFT); 1022 goto out; 1023 } 1024 1025 copied = btrfs_copy_from_user(pos, num_pages, 1026 write_bytes, pages, &i); 1027 dirty_pages = (copied + offset + PAGE_CACHE_SIZE - 1) >> 1028 PAGE_CACHE_SHIFT; 1029 1030 if (num_pages > dirty_pages) { 1031 if (copied > 0) 1032 atomic_inc( 1033 &BTRFS_I(inode)->outstanding_extents); 1034 btrfs_delalloc_release_space(inode, 1035 (num_pages - dirty_pages) << 1036 PAGE_CACHE_SHIFT); 1037 } 1038 1039 if (copied > 0) { 1040 dirty_and_release_pages(NULL, root, file, pages, 1041 dirty_pages, pos, copied); 1042 } 1043 1044 btrfs_drop_pages(pages, num_pages); 1045 1046 if (copied > 0) { 1047 if (will_write) { 1048 filemap_fdatawrite_range(inode->i_mapping, pos, 1049 pos + copied - 1); 1050 } else { 1051 balance_dirty_pages_ratelimited_nr( 1052 inode->i_mapping, 1053 dirty_pages); 1054 if (dirty_pages < 1055 (root->leafsize >> PAGE_CACHE_SHIFT) + 1) 1056 btrfs_btree_balance_dirty(root, 1); 1057 btrfs_throttle(root); 1058 } 1059 } 1060 1061 pos += copied; 1062 num_written += copied; 1063 1064 cond_resched(); 1065 } 1066 out: 1067 mutex_unlock(&inode->i_mutex); 1068 if (ret) 1069 err = ret; 1070 1071 kfree(pages); 1072 if (pinned[0]) 1073 page_cache_release(pinned[0]); 1074 if (pinned[1]) 1075 page_cache_release(pinned[1]); 1076 *ppos = pos; 1077 1078 /* 1079 * we want to make sure fsync finds this change 1080 * but we haven't joined a transaction running right now. 1081 * 1082 * Later on, someone is sure to update the inode and get the 1083 * real transid recorded. 1084 * 1085 * We set last_trans now to the fs_info generation + 1, 1086 * this will either be one more than the running transaction 1087 * or the generation used for the next transaction if there isn't 1088 * one running right now. 1089 */ 1090 BTRFS_I(inode)->last_trans = root->fs_info->generation + 1; 1091 1092 if (num_written > 0 && will_write) { 1093 struct btrfs_trans_handle *trans; 1094 1095 err = btrfs_wait_ordered_range(inode, start_pos, num_written); 1096 if (err) 1097 num_written = err; 1098 1099 if ((file->f_flags & O_DSYNC) || IS_SYNC(inode)) { 1100 trans = btrfs_start_transaction(root, 0); 1101 if (IS_ERR(trans)) { 1102 num_written = PTR_ERR(trans); 1103 goto done; 1104 } 1105 mutex_lock(&inode->i_mutex); 1106 ret = btrfs_log_dentry_safe(trans, root, 1107 file->f_dentry); 1108 mutex_unlock(&inode->i_mutex); 1109 if (ret == 0) { 1110 ret = btrfs_sync_log(trans, root); 1111 if (ret == 0) 1112 btrfs_end_transaction(trans, root); 1113 else 1114 btrfs_commit_transaction(trans, root); 1115 } else if (ret != BTRFS_NO_LOG_SYNC) { 1116 btrfs_commit_transaction(trans, root); 1117 } else { 1118 btrfs_end_transaction(trans, root); 1119 } 1120 } 1121 if (file->f_flags & O_DIRECT && buffered) { 1122 invalidate_mapping_pages(inode->i_mapping, 1123 start_pos >> PAGE_CACHE_SHIFT, 1124 (start_pos + num_written - 1) >> PAGE_CACHE_SHIFT); 1125 } 1126 } 1127 done: 1128 current->backing_dev_info = NULL; 1129 return num_written ? num_written : err; 1130 } 1131 1132 int btrfs_release_file(struct inode *inode, struct file *filp) 1133 { 1134 /* 1135 * ordered_data_close is set by settattr when we are about to truncate 1136 * a file from a non-zero size to a zero size. This tries to 1137 * flush down new bytes that may have been written if the 1138 * application were using truncate to replace a file in place. 1139 */ 1140 if (BTRFS_I(inode)->ordered_data_close) { 1141 BTRFS_I(inode)->ordered_data_close = 0; 1142 btrfs_add_ordered_operation(NULL, BTRFS_I(inode)->root, inode); 1143 if (inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT) 1144 filemap_flush(inode->i_mapping); 1145 } 1146 if (filp->private_data) 1147 btrfs_ioctl_trans_end(filp); 1148 return 0; 1149 } 1150 1151 /* 1152 * fsync call for both files and directories. This logs the inode into 1153 * the tree log instead of forcing full commits whenever possible. 1154 * 1155 * It needs to call filemap_fdatawait so that all ordered extent updates are 1156 * in the metadata btree are up to date for copying to the log. 1157 * 1158 * It drops the inode mutex before doing the tree log commit. This is an 1159 * important optimization for directories because holding the mutex prevents 1160 * new operations on the dir while we write to disk. 1161 */ 1162 int btrfs_sync_file(struct file *file, int datasync) 1163 { 1164 struct dentry *dentry = file->f_path.dentry; 1165 struct inode *inode = dentry->d_inode; 1166 struct btrfs_root *root = BTRFS_I(inode)->root; 1167 int ret = 0; 1168 struct btrfs_trans_handle *trans; 1169 1170 1171 /* we wait first, since the writeback may change the inode */ 1172 root->log_batch++; 1173 /* the VFS called filemap_fdatawrite for us */ 1174 btrfs_wait_ordered_range(inode, 0, (u64)-1); 1175 root->log_batch++; 1176 1177 /* 1178 * check the transaction that last modified this inode 1179 * and see if its already been committed 1180 */ 1181 if (!BTRFS_I(inode)->last_trans) 1182 goto out; 1183 1184 /* 1185 * if the last transaction that changed this file was before 1186 * the current transaction, we can bail out now without any 1187 * syncing 1188 */ 1189 mutex_lock(&root->fs_info->trans_mutex); 1190 if (BTRFS_I(inode)->last_trans <= 1191 root->fs_info->last_trans_committed) { 1192 BTRFS_I(inode)->last_trans = 0; 1193 mutex_unlock(&root->fs_info->trans_mutex); 1194 goto out; 1195 } 1196 mutex_unlock(&root->fs_info->trans_mutex); 1197 1198 /* 1199 * ok we haven't committed the transaction yet, lets do a commit 1200 */ 1201 if (file->private_data) 1202 btrfs_ioctl_trans_end(file); 1203 1204 trans = btrfs_start_transaction(root, 0); 1205 if (IS_ERR(trans)) { 1206 ret = PTR_ERR(trans); 1207 goto out; 1208 } 1209 1210 ret = btrfs_log_dentry_safe(trans, root, dentry); 1211 if (ret < 0) 1212 goto out; 1213 1214 /* we've logged all the items and now have a consistent 1215 * version of the file in the log. It is possible that 1216 * someone will come in and modify the file, but that's 1217 * fine because the log is consistent on disk, and we 1218 * have references to all of the file's extents 1219 * 1220 * It is possible that someone will come in and log the 1221 * file again, but that will end up using the synchronization 1222 * inside btrfs_sync_log to keep things safe. 1223 */ 1224 mutex_unlock(&dentry->d_inode->i_mutex); 1225 1226 if (ret != BTRFS_NO_LOG_SYNC) { 1227 if (ret > 0) { 1228 ret = btrfs_commit_transaction(trans, root); 1229 } else { 1230 ret = btrfs_sync_log(trans, root); 1231 if (ret == 0) 1232 ret = btrfs_end_transaction(trans, root); 1233 else 1234 ret = btrfs_commit_transaction(trans, root); 1235 } 1236 } else { 1237 ret = btrfs_end_transaction(trans, root); 1238 } 1239 mutex_lock(&dentry->d_inode->i_mutex); 1240 out: 1241 return ret > 0 ? -EIO : ret; 1242 } 1243 1244 static const struct vm_operations_struct btrfs_file_vm_ops = { 1245 .fault = filemap_fault, 1246 .page_mkwrite = btrfs_page_mkwrite, 1247 }; 1248 1249 static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma) 1250 { 1251 struct address_space *mapping = filp->f_mapping; 1252 1253 if (!mapping->a_ops->readpage) 1254 return -ENOEXEC; 1255 1256 file_accessed(filp); 1257 vma->vm_ops = &btrfs_file_vm_ops; 1258 vma->vm_flags |= VM_CAN_NONLINEAR; 1259 1260 return 0; 1261 } 1262 1263 static long btrfs_fallocate(struct file *file, int mode, 1264 loff_t offset, loff_t len) 1265 { 1266 struct inode *inode = file->f_path.dentry->d_inode; 1267 struct extent_state *cached_state = NULL; 1268 u64 cur_offset; 1269 u64 last_byte; 1270 u64 alloc_start; 1271 u64 alloc_end; 1272 u64 alloc_hint = 0; 1273 u64 locked_end; 1274 u64 mask = BTRFS_I(inode)->root->sectorsize - 1; 1275 struct extent_map *em; 1276 int ret; 1277 1278 alloc_start = offset & ~mask; 1279 alloc_end = (offset + len + mask) & ~mask; 1280 1281 /* We only support the FALLOC_FL_KEEP_SIZE mode */ 1282 if (mode & ~FALLOC_FL_KEEP_SIZE) 1283 return -EOPNOTSUPP; 1284 1285 /* 1286 * wait for ordered IO before we have any locks. We'll loop again 1287 * below with the locks held. 1288 */ 1289 btrfs_wait_ordered_range(inode, alloc_start, alloc_end - alloc_start); 1290 1291 mutex_lock(&inode->i_mutex); 1292 ret = inode_newsize_ok(inode, alloc_end); 1293 if (ret) 1294 goto out; 1295 1296 if (alloc_start > inode->i_size) { 1297 ret = btrfs_cont_expand(inode, alloc_start); 1298 if (ret) 1299 goto out; 1300 } 1301 1302 ret = btrfs_check_data_free_space(inode, alloc_end - alloc_start); 1303 if (ret) 1304 goto out; 1305 1306 locked_end = alloc_end - 1; 1307 while (1) { 1308 struct btrfs_ordered_extent *ordered; 1309 1310 /* the extent lock is ordered inside the running 1311 * transaction 1312 */ 1313 lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start, 1314 locked_end, 0, &cached_state, GFP_NOFS); 1315 ordered = btrfs_lookup_first_ordered_extent(inode, 1316 alloc_end - 1); 1317 if (ordered && 1318 ordered->file_offset + ordered->len > alloc_start && 1319 ordered->file_offset < alloc_end) { 1320 btrfs_put_ordered_extent(ordered); 1321 unlock_extent_cached(&BTRFS_I(inode)->io_tree, 1322 alloc_start, locked_end, 1323 &cached_state, GFP_NOFS); 1324 /* 1325 * we can't wait on the range with the transaction 1326 * running or with the extent lock held 1327 */ 1328 btrfs_wait_ordered_range(inode, alloc_start, 1329 alloc_end - alloc_start); 1330 } else { 1331 if (ordered) 1332 btrfs_put_ordered_extent(ordered); 1333 break; 1334 } 1335 } 1336 1337 cur_offset = alloc_start; 1338 while (1) { 1339 em = btrfs_get_extent(inode, NULL, 0, cur_offset, 1340 alloc_end - cur_offset, 0); 1341 BUG_ON(IS_ERR(em) || !em); 1342 last_byte = min(extent_map_end(em), alloc_end); 1343 last_byte = (last_byte + mask) & ~mask; 1344 if (em->block_start == EXTENT_MAP_HOLE || 1345 (cur_offset >= inode->i_size && 1346 !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) { 1347 ret = btrfs_prealloc_file_range(inode, mode, cur_offset, 1348 last_byte - cur_offset, 1349 1 << inode->i_blkbits, 1350 offset + len, 1351 &alloc_hint); 1352 if (ret < 0) { 1353 free_extent_map(em); 1354 break; 1355 } 1356 } 1357 free_extent_map(em); 1358 1359 cur_offset = last_byte; 1360 if (cur_offset >= alloc_end) { 1361 ret = 0; 1362 break; 1363 } 1364 } 1365 unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end, 1366 &cached_state, GFP_NOFS); 1367 1368 btrfs_free_reserved_data_space(inode, alloc_end - alloc_start); 1369 out: 1370 mutex_unlock(&inode->i_mutex); 1371 return ret; 1372 } 1373 1374 const struct file_operations btrfs_file_operations = { 1375 .llseek = generic_file_llseek, 1376 .read = do_sync_read, 1377 .write = do_sync_write, 1378 .aio_read = generic_file_aio_read, 1379 .splice_read = generic_file_splice_read, 1380 .aio_write = btrfs_file_aio_write, 1381 .mmap = btrfs_file_mmap, 1382 .open = generic_file_open, 1383 .release = btrfs_release_file, 1384 .fsync = btrfs_sync_file, 1385 .fallocate = btrfs_fallocate, 1386 .unlocked_ioctl = btrfs_ioctl, 1387 #ifdef CONFIG_COMPAT 1388 .compat_ioctl = btrfs_ioctl, 1389 #endif 1390 }; 1391