1 /* 2 * Copyright (C) 2007 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #include <linux/fs.h> 20 #include <linux/pagemap.h> 21 #include <linux/highmem.h> 22 #include <linux/time.h> 23 #include <linux/init.h> 24 #include <linux/string.h> 25 #include <linux/smp_lock.h> 26 #include <linux/backing-dev.h> 27 #include <linux/mpage.h> 28 #include <linux/swap.h> 29 #include <linux/writeback.h> 30 #include <linux/statfs.h> 31 #include <linux/compat.h> 32 #include "ctree.h" 33 #include "disk-io.h" 34 #include "transaction.h" 35 #include "btrfs_inode.h" 36 #include "ioctl.h" 37 #include "print-tree.h" 38 #include "tree-log.h" 39 #include "locking.h" 40 #include "compat.h" 41 42 43 /* simple helper to fault in pages and copy. This should go away 44 * and be replaced with calls into generic code. 45 */ 46 static noinline int btrfs_copy_from_user(loff_t pos, int num_pages, 47 int write_bytes, 48 struct page **prepared_pages, 49 const char __user *buf) 50 { 51 long page_fault = 0; 52 int i; 53 int offset = pos & (PAGE_CACHE_SIZE - 1); 54 55 for (i = 0; i < num_pages && write_bytes > 0; i++, offset = 0) { 56 size_t count = min_t(size_t, 57 PAGE_CACHE_SIZE - offset, write_bytes); 58 struct page *page = prepared_pages[i]; 59 fault_in_pages_readable(buf, count); 60 61 /* Copy data from userspace to the current page */ 62 kmap(page); 63 page_fault = __copy_from_user(page_address(page) + offset, 64 buf, count); 65 /* Flush processor's dcache for this page */ 66 flush_dcache_page(page); 67 kunmap(page); 68 buf += count; 69 write_bytes -= count; 70 71 if (page_fault) 72 break; 73 } 74 return page_fault ? -EFAULT : 0; 75 } 76 77 /* 78 * unlocks pages after btrfs_file_write is done with them 79 */ 80 static noinline void btrfs_drop_pages(struct page **pages, size_t num_pages) 81 { 82 size_t i; 83 for (i = 0; i < num_pages; i++) { 84 if (!pages[i]) 85 break; 86 /* page checked is some magic around finding pages that 87 * have been modified without going through btrfs_set_page_dirty 88 * clear it here 89 */ 90 ClearPageChecked(pages[i]); 91 unlock_page(pages[i]); 92 mark_page_accessed(pages[i]); 93 page_cache_release(pages[i]); 94 } 95 } 96 97 /* 98 * after copy_from_user, pages need to be dirtied and we need to make 99 * sure holes are created between the current EOF and the start of 100 * any next extents (if required). 101 * 102 * this also makes the decision about creating an inline extent vs 103 * doing real data extents, marking pages dirty and delalloc as required. 104 */ 105 static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans, 106 struct btrfs_root *root, 107 struct file *file, 108 struct page **pages, 109 size_t num_pages, 110 loff_t pos, 111 size_t write_bytes) 112 { 113 int err = 0; 114 int i; 115 struct inode *inode = fdentry(file)->d_inode; 116 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 117 u64 hint_byte; 118 u64 num_bytes; 119 u64 start_pos; 120 u64 end_of_last_block; 121 u64 end_pos = pos + write_bytes; 122 loff_t isize = i_size_read(inode); 123 124 start_pos = pos & ~((u64)root->sectorsize - 1); 125 num_bytes = (write_bytes + pos - start_pos + 126 root->sectorsize - 1) & ~((u64)root->sectorsize - 1); 127 128 end_of_last_block = start_pos + num_bytes - 1; 129 130 lock_extent(io_tree, start_pos, end_of_last_block, GFP_NOFS); 131 trans = btrfs_join_transaction(root, 1); 132 if (!trans) { 133 err = -ENOMEM; 134 goto out_unlock; 135 } 136 btrfs_set_trans_block_group(trans, inode); 137 hint_byte = 0; 138 139 set_extent_uptodate(io_tree, start_pos, end_of_last_block, GFP_NOFS); 140 141 /* check for reserved extents on each page, we don't want 142 * to reset the delalloc bit on things that already have 143 * extents reserved. 144 */ 145 btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block); 146 for (i = 0; i < num_pages; i++) { 147 struct page *p = pages[i]; 148 SetPageUptodate(p); 149 ClearPageChecked(p); 150 set_page_dirty(p); 151 } 152 if (end_pos > isize) { 153 i_size_write(inode, end_pos); 154 btrfs_update_inode(trans, root, inode); 155 } 156 err = btrfs_end_transaction(trans, root); 157 out_unlock: 158 unlock_extent(io_tree, start_pos, end_of_last_block, GFP_NOFS); 159 return err; 160 } 161 162 /* 163 * this drops all the extents in the cache that intersect the range 164 * [start, end]. Existing extents are split as required. 165 */ 166 int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, 167 int skip_pinned) 168 { 169 struct extent_map *em; 170 struct extent_map *split = NULL; 171 struct extent_map *split2 = NULL; 172 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 173 u64 len = end - start + 1; 174 int ret; 175 int testend = 1; 176 unsigned long flags; 177 int compressed = 0; 178 179 WARN_ON(end < start); 180 if (end == (u64)-1) { 181 len = (u64)-1; 182 testend = 0; 183 } 184 while (1) { 185 if (!split) 186 split = alloc_extent_map(GFP_NOFS); 187 if (!split2) 188 split2 = alloc_extent_map(GFP_NOFS); 189 190 spin_lock(&em_tree->lock); 191 em = lookup_extent_mapping(em_tree, start, len); 192 if (!em) { 193 spin_unlock(&em_tree->lock); 194 break; 195 } 196 flags = em->flags; 197 if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) { 198 spin_unlock(&em_tree->lock); 199 if (em->start <= start && 200 (!testend || em->start + em->len >= start + len)) { 201 free_extent_map(em); 202 break; 203 } 204 if (start < em->start) { 205 len = em->start - start; 206 } else { 207 len = start + len - (em->start + em->len); 208 start = em->start + em->len; 209 } 210 free_extent_map(em); 211 continue; 212 } 213 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags); 214 clear_bit(EXTENT_FLAG_PINNED, &em->flags); 215 remove_extent_mapping(em_tree, em); 216 217 if (em->block_start < EXTENT_MAP_LAST_BYTE && 218 em->start < start) { 219 split->start = em->start; 220 split->len = start - em->start; 221 split->orig_start = em->orig_start; 222 split->block_start = em->block_start; 223 224 if (compressed) 225 split->block_len = em->block_len; 226 else 227 split->block_len = split->len; 228 229 split->bdev = em->bdev; 230 split->flags = flags; 231 ret = add_extent_mapping(em_tree, split); 232 BUG_ON(ret); 233 free_extent_map(split); 234 split = split2; 235 split2 = NULL; 236 } 237 if (em->block_start < EXTENT_MAP_LAST_BYTE && 238 testend && em->start + em->len > start + len) { 239 u64 diff = start + len - em->start; 240 241 split->start = start + len; 242 split->len = em->start + em->len - (start + len); 243 split->bdev = em->bdev; 244 split->flags = flags; 245 246 if (compressed) { 247 split->block_len = em->block_len; 248 split->block_start = em->block_start; 249 split->orig_start = em->orig_start; 250 } else { 251 split->block_len = split->len; 252 split->block_start = em->block_start + diff; 253 split->orig_start = split->start; 254 } 255 256 ret = add_extent_mapping(em_tree, split); 257 BUG_ON(ret); 258 free_extent_map(split); 259 split = NULL; 260 } 261 spin_unlock(&em_tree->lock); 262 263 /* once for us */ 264 free_extent_map(em); 265 /* once for the tree*/ 266 free_extent_map(em); 267 } 268 if (split) 269 free_extent_map(split); 270 if (split2) 271 free_extent_map(split2); 272 return 0; 273 } 274 275 int btrfs_check_file(struct btrfs_root *root, struct inode *inode) 276 { 277 return 0; 278 #if 0 279 struct btrfs_path *path; 280 struct btrfs_key found_key; 281 struct extent_buffer *leaf; 282 struct btrfs_file_extent_item *extent; 283 u64 last_offset = 0; 284 int nritems; 285 int slot; 286 int found_type; 287 int ret; 288 int err = 0; 289 u64 extent_end = 0; 290 291 path = btrfs_alloc_path(); 292 ret = btrfs_lookup_file_extent(NULL, root, path, inode->i_ino, 293 last_offset, 0); 294 while (1) { 295 nritems = btrfs_header_nritems(path->nodes[0]); 296 if (path->slots[0] >= nritems) { 297 ret = btrfs_next_leaf(root, path); 298 if (ret) 299 goto out; 300 nritems = btrfs_header_nritems(path->nodes[0]); 301 } 302 slot = path->slots[0]; 303 leaf = path->nodes[0]; 304 btrfs_item_key_to_cpu(leaf, &found_key, slot); 305 if (found_key.objectid != inode->i_ino) 306 break; 307 if (found_key.type != BTRFS_EXTENT_DATA_KEY) 308 goto out; 309 310 if (found_key.offset < last_offset) { 311 WARN_ON(1); 312 btrfs_print_leaf(root, leaf); 313 printk(KERN_ERR "inode %lu found offset %llu " 314 "expected %llu\n", inode->i_ino, 315 (unsigned long long)found_key.offset, 316 (unsigned long long)last_offset); 317 err = 1; 318 goto out; 319 } 320 extent = btrfs_item_ptr(leaf, slot, 321 struct btrfs_file_extent_item); 322 found_type = btrfs_file_extent_type(leaf, extent); 323 if (found_type == BTRFS_FILE_EXTENT_REG) { 324 extent_end = found_key.offset + 325 btrfs_file_extent_num_bytes(leaf, extent); 326 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) { 327 struct btrfs_item *item; 328 item = btrfs_item_nr(leaf, slot); 329 extent_end = found_key.offset + 330 btrfs_file_extent_inline_len(leaf, extent); 331 extent_end = (extent_end + root->sectorsize - 1) & 332 ~((u64)root->sectorsize - 1); 333 } 334 last_offset = extent_end; 335 path->slots[0]++; 336 } 337 if (0 && last_offset < inode->i_size) { 338 WARN_ON(1); 339 btrfs_print_leaf(root, leaf); 340 printk(KERN_ERR "inode %lu found offset %llu size %llu\n", 341 inode->i_ino, (unsigned long long)last_offset, 342 (unsigned long long)inode->i_size); 343 err = 1; 344 345 } 346 out: 347 btrfs_free_path(path); 348 return err; 349 #endif 350 } 351 352 /* 353 * this is very complex, but the basic idea is to drop all extents 354 * in the range start - end. hint_block is filled in with a block number 355 * that would be a good hint to the block allocator for this file. 356 * 357 * If an extent intersects the range but is not entirely inside the range 358 * it is either truncated or split. Anything entirely inside the range 359 * is deleted from the tree. 360 * 361 * inline_limit is used to tell this code which offsets in the file to keep 362 * if they contain inline extents. 363 */ 364 noinline int btrfs_drop_extents(struct btrfs_trans_handle *trans, 365 struct btrfs_root *root, struct inode *inode, 366 u64 start, u64 end, u64 inline_limit, u64 *hint_byte) 367 { 368 u64 extent_end = 0; 369 u64 locked_end = end; 370 u64 search_start = start; 371 u64 leaf_start; 372 u64 ram_bytes = 0; 373 u64 orig_parent = 0; 374 u64 disk_bytenr = 0; 375 u8 compression; 376 u8 encryption; 377 u16 other_encoding = 0; 378 u64 root_gen; 379 u64 root_owner; 380 struct extent_buffer *leaf; 381 struct btrfs_file_extent_item *extent; 382 struct btrfs_path *path; 383 struct btrfs_key key; 384 struct btrfs_file_extent_item old; 385 int keep; 386 int slot; 387 int bookend; 388 int found_type = 0; 389 int found_extent; 390 int found_inline; 391 int recow; 392 int ret; 393 394 inline_limit = 0; 395 btrfs_drop_extent_cache(inode, start, end - 1, 0); 396 397 path = btrfs_alloc_path(); 398 if (!path) 399 return -ENOMEM; 400 while (1) { 401 recow = 0; 402 btrfs_release_path(root, path); 403 ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino, 404 search_start, -1); 405 if (ret < 0) 406 goto out; 407 if (ret > 0) { 408 if (path->slots[0] == 0) { 409 ret = 0; 410 goto out; 411 } 412 path->slots[0]--; 413 } 414 next_slot: 415 keep = 0; 416 bookend = 0; 417 found_extent = 0; 418 found_inline = 0; 419 leaf_start = 0; 420 root_gen = 0; 421 root_owner = 0; 422 compression = 0; 423 encryption = 0; 424 extent = NULL; 425 leaf = path->nodes[0]; 426 slot = path->slots[0]; 427 ret = 0; 428 btrfs_item_key_to_cpu(leaf, &key, slot); 429 if (btrfs_key_type(&key) == BTRFS_EXTENT_DATA_KEY && 430 key.offset >= end) { 431 goto out; 432 } 433 if (btrfs_key_type(&key) > BTRFS_EXTENT_DATA_KEY || 434 key.objectid != inode->i_ino) { 435 goto out; 436 } 437 if (recow) { 438 search_start = max(key.offset, start); 439 continue; 440 } 441 if (btrfs_key_type(&key) == BTRFS_EXTENT_DATA_KEY) { 442 extent = btrfs_item_ptr(leaf, slot, 443 struct btrfs_file_extent_item); 444 found_type = btrfs_file_extent_type(leaf, extent); 445 compression = btrfs_file_extent_compression(leaf, 446 extent); 447 encryption = btrfs_file_extent_encryption(leaf, 448 extent); 449 other_encoding = btrfs_file_extent_other_encoding(leaf, 450 extent); 451 if (found_type == BTRFS_FILE_EXTENT_REG || 452 found_type == BTRFS_FILE_EXTENT_PREALLOC) { 453 extent_end = 454 btrfs_file_extent_disk_bytenr(leaf, 455 extent); 456 if (extent_end) 457 *hint_byte = extent_end; 458 459 extent_end = key.offset + 460 btrfs_file_extent_num_bytes(leaf, extent); 461 ram_bytes = btrfs_file_extent_ram_bytes(leaf, 462 extent); 463 found_extent = 1; 464 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) { 465 found_inline = 1; 466 extent_end = key.offset + 467 btrfs_file_extent_inline_len(leaf, extent); 468 } 469 } else { 470 extent_end = search_start; 471 } 472 473 /* we found nothing we can drop */ 474 if ((!found_extent && !found_inline) || 475 search_start >= extent_end) { 476 int nextret; 477 u32 nritems; 478 nritems = btrfs_header_nritems(leaf); 479 if (slot >= nritems - 1) { 480 nextret = btrfs_next_leaf(root, path); 481 if (nextret) 482 goto out; 483 recow = 1; 484 } else { 485 path->slots[0]++; 486 } 487 goto next_slot; 488 } 489 490 if (end <= extent_end && start >= key.offset && found_inline) 491 *hint_byte = EXTENT_MAP_INLINE; 492 493 if (found_extent) { 494 read_extent_buffer(leaf, &old, (unsigned long)extent, 495 sizeof(old)); 496 root_gen = btrfs_header_generation(leaf); 497 root_owner = btrfs_header_owner(leaf); 498 leaf_start = leaf->start; 499 } 500 501 if (end < extent_end && end >= key.offset) { 502 bookend = 1; 503 if (found_inline && start <= key.offset) 504 keep = 1; 505 } 506 507 if (bookend && found_extent) { 508 if (locked_end < extent_end) { 509 ret = try_lock_extent(&BTRFS_I(inode)->io_tree, 510 locked_end, extent_end - 1, 511 GFP_NOFS); 512 if (!ret) { 513 btrfs_release_path(root, path); 514 lock_extent(&BTRFS_I(inode)->io_tree, 515 locked_end, extent_end - 1, 516 GFP_NOFS); 517 locked_end = extent_end; 518 continue; 519 } 520 locked_end = extent_end; 521 } 522 orig_parent = path->nodes[0]->start; 523 disk_bytenr = le64_to_cpu(old.disk_bytenr); 524 if (disk_bytenr != 0) { 525 ret = btrfs_inc_extent_ref(trans, root, 526 disk_bytenr, 527 le64_to_cpu(old.disk_num_bytes), 528 orig_parent, root->root_key.objectid, 529 trans->transid, inode->i_ino); 530 BUG_ON(ret); 531 } 532 } 533 534 if (found_inline) { 535 u64 mask = root->sectorsize - 1; 536 search_start = (extent_end + mask) & ~mask; 537 } else 538 search_start = extent_end; 539 540 /* truncate existing extent */ 541 if (start > key.offset) { 542 u64 new_num; 543 u64 old_num; 544 keep = 1; 545 WARN_ON(start & (root->sectorsize - 1)); 546 if (found_extent) { 547 new_num = start - key.offset; 548 old_num = btrfs_file_extent_num_bytes(leaf, 549 extent); 550 *hint_byte = 551 btrfs_file_extent_disk_bytenr(leaf, 552 extent); 553 if (btrfs_file_extent_disk_bytenr(leaf, 554 extent)) { 555 inode_sub_bytes(inode, old_num - 556 new_num); 557 } 558 btrfs_set_file_extent_num_bytes(leaf, 559 extent, new_num); 560 btrfs_mark_buffer_dirty(leaf); 561 } else if (key.offset < inline_limit && 562 (end > extent_end) && 563 (inline_limit < extent_end)) { 564 u32 new_size; 565 new_size = btrfs_file_extent_calc_inline_size( 566 inline_limit - key.offset); 567 inode_sub_bytes(inode, extent_end - 568 inline_limit); 569 btrfs_set_file_extent_ram_bytes(leaf, extent, 570 new_size); 571 if (!compression && !encryption) { 572 btrfs_truncate_item(trans, root, path, 573 new_size, 1); 574 } 575 } 576 } 577 /* delete the entire extent */ 578 if (!keep) { 579 if (found_inline) 580 inode_sub_bytes(inode, extent_end - 581 key.offset); 582 ret = btrfs_del_item(trans, root, path); 583 /* TODO update progress marker and return */ 584 BUG_ON(ret); 585 extent = NULL; 586 btrfs_release_path(root, path); 587 /* the extent will be freed later */ 588 } 589 if (bookend && found_inline && start <= key.offset) { 590 u32 new_size; 591 new_size = btrfs_file_extent_calc_inline_size( 592 extent_end - end); 593 inode_sub_bytes(inode, end - key.offset); 594 btrfs_set_file_extent_ram_bytes(leaf, extent, 595 new_size); 596 if (!compression && !encryption) 597 ret = btrfs_truncate_item(trans, root, path, 598 new_size, 0); 599 BUG_ON(ret); 600 } 601 /* create bookend, splitting the extent in two */ 602 if (bookend && found_extent) { 603 struct btrfs_key ins; 604 ins.objectid = inode->i_ino; 605 ins.offset = end; 606 btrfs_set_key_type(&ins, BTRFS_EXTENT_DATA_KEY); 607 608 btrfs_release_path(root, path); 609 path->leave_spinning = 1; 610 ret = btrfs_insert_empty_item(trans, root, path, &ins, 611 sizeof(*extent)); 612 BUG_ON(ret); 613 614 leaf = path->nodes[0]; 615 extent = btrfs_item_ptr(leaf, path->slots[0], 616 struct btrfs_file_extent_item); 617 write_extent_buffer(leaf, &old, 618 (unsigned long)extent, sizeof(old)); 619 620 btrfs_set_file_extent_compression(leaf, extent, 621 compression); 622 btrfs_set_file_extent_encryption(leaf, extent, 623 encryption); 624 btrfs_set_file_extent_other_encoding(leaf, extent, 625 other_encoding); 626 btrfs_set_file_extent_offset(leaf, extent, 627 le64_to_cpu(old.offset) + end - key.offset); 628 WARN_ON(le64_to_cpu(old.num_bytes) < 629 (extent_end - end)); 630 btrfs_set_file_extent_num_bytes(leaf, extent, 631 extent_end - end); 632 633 /* 634 * set the ram bytes to the size of the full extent 635 * before splitting. This is a worst case flag, 636 * but its the best we can do because we don't know 637 * how splitting affects compression 638 */ 639 btrfs_set_file_extent_ram_bytes(leaf, extent, 640 ram_bytes); 641 btrfs_set_file_extent_type(leaf, extent, found_type); 642 643 btrfs_unlock_up_safe(path, 1); 644 btrfs_mark_buffer_dirty(path->nodes[0]); 645 btrfs_set_lock_blocking(path->nodes[0]); 646 647 if (disk_bytenr != 0) { 648 ret = btrfs_update_extent_ref(trans, root, 649 disk_bytenr, 650 le64_to_cpu(old.disk_num_bytes), 651 orig_parent, 652 leaf->start, 653 root->root_key.objectid, 654 trans->transid, ins.objectid); 655 656 BUG_ON(ret); 657 } 658 path->leave_spinning = 0; 659 btrfs_release_path(root, path); 660 if (disk_bytenr != 0) 661 inode_add_bytes(inode, extent_end - end); 662 } 663 664 if (found_extent && !keep) { 665 u64 old_disk_bytenr = le64_to_cpu(old.disk_bytenr); 666 667 if (old_disk_bytenr != 0) { 668 inode_sub_bytes(inode, 669 le64_to_cpu(old.num_bytes)); 670 ret = btrfs_free_extent(trans, root, 671 old_disk_bytenr, 672 le64_to_cpu(old.disk_num_bytes), 673 leaf_start, root_owner, 674 root_gen, key.objectid, 0); 675 BUG_ON(ret); 676 *hint_byte = old_disk_bytenr; 677 } 678 } 679 680 if (search_start >= end) { 681 ret = 0; 682 goto out; 683 } 684 } 685 out: 686 btrfs_free_path(path); 687 if (locked_end > end) { 688 unlock_extent(&BTRFS_I(inode)->io_tree, end, locked_end - 1, 689 GFP_NOFS); 690 } 691 btrfs_check_file(root, inode); 692 return ret; 693 } 694 695 static int extent_mergeable(struct extent_buffer *leaf, int slot, 696 u64 objectid, u64 bytenr, u64 *start, u64 *end) 697 { 698 struct btrfs_file_extent_item *fi; 699 struct btrfs_key key; 700 u64 extent_end; 701 702 if (slot < 0 || slot >= btrfs_header_nritems(leaf)) 703 return 0; 704 705 btrfs_item_key_to_cpu(leaf, &key, slot); 706 if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY) 707 return 0; 708 709 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); 710 if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG || 711 btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr || 712 btrfs_file_extent_compression(leaf, fi) || 713 btrfs_file_extent_encryption(leaf, fi) || 714 btrfs_file_extent_other_encoding(leaf, fi)) 715 return 0; 716 717 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi); 718 if ((*start && *start != key.offset) || (*end && *end != extent_end)) 719 return 0; 720 721 *start = key.offset; 722 *end = extent_end; 723 return 1; 724 } 725 726 /* 727 * Mark extent in the range start - end as written. 728 * 729 * This changes extent type from 'pre-allocated' to 'regular'. If only 730 * part of extent is marked as written, the extent will be split into 731 * two or three. 732 */ 733 int btrfs_mark_extent_written(struct btrfs_trans_handle *trans, 734 struct btrfs_root *root, 735 struct inode *inode, u64 start, u64 end) 736 { 737 struct extent_buffer *leaf; 738 struct btrfs_path *path; 739 struct btrfs_file_extent_item *fi; 740 struct btrfs_key key; 741 u64 bytenr; 742 u64 num_bytes; 743 u64 extent_end; 744 u64 extent_offset; 745 u64 other_start; 746 u64 other_end; 747 u64 split = start; 748 u64 locked_end = end; 749 u64 orig_parent; 750 int extent_type; 751 int split_end = 1; 752 int ret; 753 754 btrfs_drop_extent_cache(inode, start, end - 1, 0); 755 756 path = btrfs_alloc_path(); 757 BUG_ON(!path); 758 again: 759 key.objectid = inode->i_ino; 760 key.type = BTRFS_EXTENT_DATA_KEY; 761 if (split == start) 762 key.offset = split; 763 else 764 key.offset = split - 1; 765 766 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 767 if (ret > 0 && path->slots[0] > 0) 768 path->slots[0]--; 769 770 leaf = path->nodes[0]; 771 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 772 BUG_ON(key.objectid != inode->i_ino || 773 key.type != BTRFS_EXTENT_DATA_KEY); 774 fi = btrfs_item_ptr(leaf, path->slots[0], 775 struct btrfs_file_extent_item); 776 extent_type = btrfs_file_extent_type(leaf, fi); 777 BUG_ON(extent_type != BTRFS_FILE_EXTENT_PREALLOC); 778 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi); 779 BUG_ON(key.offset > start || extent_end < end); 780 781 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 782 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi); 783 extent_offset = btrfs_file_extent_offset(leaf, fi); 784 785 if (key.offset == start) 786 split = end; 787 788 if (key.offset == start && extent_end == end) { 789 int del_nr = 0; 790 int del_slot = 0; 791 u64 leaf_owner = btrfs_header_owner(leaf); 792 u64 leaf_gen = btrfs_header_generation(leaf); 793 other_start = end; 794 other_end = 0; 795 if (extent_mergeable(leaf, path->slots[0] + 1, inode->i_ino, 796 bytenr, &other_start, &other_end)) { 797 extent_end = other_end; 798 del_slot = path->slots[0] + 1; 799 del_nr++; 800 ret = btrfs_free_extent(trans, root, bytenr, num_bytes, 801 leaf->start, leaf_owner, 802 leaf_gen, inode->i_ino, 0); 803 BUG_ON(ret); 804 } 805 other_start = 0; 806 other_end = start; 807 if (extent_mergeable(leaf, path->slots[0] - 1, inode->i_ino, 808 bytenr, &other_start, &other_end)) { 809 key.offset = other_start; 810 del_slot = path->slots[0]; 811 del_nr++; 812 ret = btrfs_free_extent(trans, root, bytenr, num_bytes, 813 leaf->start, leaf_owner, 814 leaf_gen, inode->i_ino, 0); 815 BUG_ON(ret); 816 } 817 split_end = 0; 818 if (del_nr == 0) { 819 btrfs_set_file_extent_type(leaf, fi, 820 BTRFS_FILE_EXTENT_REG); 821 goto done; 822 } 823 824 fi = btrfs_item_ptr(leaf, del_slot - 1, 825 struct btrfs_file_extent_item); 826 btrfs_set_file_extent_type(leaf, fi, BTRFS_FILE_EXTENT_REG); 827 btrfs_set_file_extent_num_bytes(leaf, fi, 828 extent_end - key.offset); 829 btrfs_mark_buffer_dirty(leaf); 830 831 ret = btrfs_del_items(trans, root, path, del_slot, del_nr); 832 BUG_ON(ret); 833 goto done; 834 } else if (split == start) { 835 if (locked_end < extent_end) { 836 ret = try_lock_extent(&BTRFS_I(inode)->io_tree, 837 locked_end, extent_end - 1, GFP_NOFS); 838 if (!ret) { 839 btrfs_release_path(root, path); 840 lock_extent(&BTRFS_I(inode)->io_tree, 841 locked_end, extent_end - 1, GFP_NOFS); 842 locked_end = extent_end; 843 goto again; 844 } 845 locked_end = extent_end; 846 } 847 btrfs_set_file_extent_num_bytes(leaf, fi, split - key.offset); 848 extent_offset += split - key.offset; 849 } else { 850 BUG_ON(key.offset != start); 851 btrfs_set_file_extent_offset(leaf, fi, extent_offset + 852 split - key.offset); 853 btrfs_set_file_extent_num_bytes(leaf, fi, extent_end - split); 854 key.offset = split; 855 btrfs_set_item_key_safe(trans, root, path, &key); 856 extent_end = split; 857 } 858 859 if (extent_end == end) { 860 split_end = 0; 861 extent_type = BTRFS_FILE_EXTENT_REG; 862 } 863 if (extent_end == end && split == start) { 864 other_start = end; 865 other_end = 0; 866 if (extent_mergeable(leaf, path->slots[0] + 1, inode->i_ino, 867 bytenr, &other_start, &other_end)) { 868 path->slots[0]++; 869 fi = btrfs_item_ptr(leaf, path->slots[0], 870 struct btrfs_file_extent_item); 871 key.offset = split; 872 btrfs_set_item_key_safe(trans, root, path, &key); 873 btrfs_set_file_extent_offset(leaf, fi, extent_offset); 874 btrfs_set_file_extent_num_bytes(leaf, fi, 875 other_end - split); 876 goto done; 877 } 878 } 879 if (extent_end == end && split == end) { 880 other_start = 0; 881 other_end = start; 882 if (extent_mergeable(leaf, path->slots[0] - 1 , inode->i_ino, 883 bytenr, &other_start, &other_end)) { 884 path->slots[0]--; 885 fi = btrfs_item_ptr(leaf, path->slots[0], 886 struct btrfs_file_extent_item); 887 btrfs_set_file_extent_num_bytes(leaf, fi, extent_end - 888 other_start); 889 goto done; 890 } 891 } 892 893 btrfs_mark_buffer_dirty(leaf); 894 895 orig_parent = leaf->start; 896 ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 897 orig_parent, root->root_key.objectid, 898 trans->transid, inode->i_ino); 899 BUG_ON(ret); 900 btrfs_release_path(root, path); 901 902 key.offset = start; 903 ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*fi)); 904 BUG_ON(ret); 905 906 leaf = path->nodes[0]; 907 fi = btrfs_item_ptr(leaf, path->slots[0], 908 struct btrfs_file_extent_item); 909 btrfs_set_file_extent_generation(leaf, fi, trans->transid); 910 btrfs_set_file_extent_type(leaf, fi, extent_type); 911 btrfs_set_file_extent_disk_bytenr(leaf, fi, bytenr); 912 btrfs_set_file_extent_disk_num_bytes(leaf, fi, num_bytes); 913 btrfs_set_file_extent_offset(leaf, fi, extent_offset); 914 btrfs_set_file_extent_num_bytes(leaf, fi, extent_end - key.offset); 915 btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes); 916 btrfs_set_file_extent_compression(leaf, fi, 0); 917 btrfs_set_file_extent_encryption(leaf, fi, 0); 918 btrfs_set_file_extent_other_encoding(leaf, fi, 0); 919 920 if (orig_parent != leaf->start) { 921 ret = btrfs_update_extent_ref(trans, root, bytenr, num_bytes, 922 orig_parent, leaf->start, 923 root->root_key.objectid, 924 trans->transid, inode->i_ino); 925 BUG_ON(ret); 926 } 927 done: 928 btrfs_mark_buffer_dirty(leaf); 929 btrfs_release_path(root, path); 930 if (split_end && split == start) { 931 split = end; 932 goto again; 933 } 934 if (locked_end > end) { 935 unlock_extent(&BTRFS_I(inode)->io_tree, end, locked_end - 1, 936 GFP_NOFS); 937 } 938 btrfs_free_path(path); 939 return 0; 940 } 941 942 /* 943 * this gets pages into the page cache and locks them down, it also properly 944 * waits for data=ordered extents to finish before allowing the pages to be 945 * modified. 946 */ 947 static noinline int prepare_pages(struct btrfs_root *root, struct file *file, 948 struct page **pages, size_t num_pages, 949 loff_t pos, unsigned long first_index, 950 unsigned long last_index, size_t write_bytes) 951 { 952 int i; 953 unsigned long index = pos >> PAGE_CACHE_SHIFT; 954 struct inode *inode = fdentry(file)->d_inode; 955 int err = 0; 956 u64 start_pos; 957 u64 last_pos; 958 959 start_pos = pos & ~((u64)root->sectorsize - 1); 960 last_pos = ((u64)index + num_pages) << PAGE_CACHE_SHIFT; 961 962 if (start_pos > inode->i_size) { 963 err = btrfs_cont_expand(inode, start_pos); 964 if (err) 965 return err; 966 } 967 968 memset(pages, 0, num_pages * sizeof(struct page *)); 969 again: 970 for (i = 0; i < num_pages; i++) { 971 pages[i] = grab_cache_page(inode->i_mapping, index + i); 972 if (!pages[i]) { 973 err = -ENOMEM; 974 BUG_ON(1); 975 } 976 wait_on_page_writeback(pages[i]); 977 } 978 if (start_pos < inode->i_size) { 979 struct btrfs_ordered_extent *ordered; 980 lock_extent(&BTRFS_I(inode)->io_tree, 981 start_pos, last_pos - 1, GFP_NOFS); 982 ordered = btrfs_lookup_first_ordered_extent(inode, 983 last_pos - 1); 984 if (ordered && 985 ordered->file_offset + ordered->len > start_pos && 986 ordered->file_offset < last_pos) { 987 btrfs_put_ordered_extent(ordered); 988 unlock_extent(&BTRFS_I(inode)->io_tree, 989 start_pos, last_pos - 1, GFP_NOFS); 990 for (i = 0; i < num_pages; i++) { 991 unlock_page(pages[i]); 992 page_cache_release(pages[i]); 993 } 994 btrfs_wait_ordered_range(inode, start_pos, 995 last_pos - start_pos); 996 goto again; 997 } 998 if (ordered) 999 btrfs_put_ordered_extent(ordered); 1000 1001 clear_extent_bits(&BTRFS_I(inode)->io_tree, start_pos, 1002 last_pos - 1, EXTENT_DIRTY | EXTENT_DELALLOC, 1003 GFP_NOFS); 1004 unlock_extent(&BTRFS_I(inode)->io_tree, 1005 start_pos, last_pos - 1, GFP_NOFS); 1006 } 1007 for (i = 0; i < num_pages; i++) { 1008 clear_page_dirty_for_io(pages[i]); 1009 set_page_extent_mapped(pages[i]); 1010 WARN_ON(!PageLocked(pages[i])); 1011 } 1012 return 0; 1013 } 1014 1015 static ssize_t btrfs_file_write(struct file *file, const char __user *buf, 1016 size_t count, loff_t *ppos) 1017 { 1018 loff_t pos; 1019 loff_t start_pos; 1020 ssize_t num_written = 0; 1021 ssize_t err = 0; 1022 int ret = 0; 1023 struct inode *inode = fdentry(file)->d_inode; 1024 struct btrfs_root *root = BTRFS_I(inode)->root; 1025 struct page **pages = NULL; 1026 int nrptrs; 1027 struct page *pinned[2]; 1028 unsigned long first_index; 1029 unsigned long last_index; 1030 int will_write; 1031 1032 will_write = ((file->f_flags & O_SYNC) || IS_SYNC(inode) || 1033 (file->f_flags & O_DIRECT)); 1034 1035 nrptrs = min((count + PAGE_CACHE_SIZE - 1) / PAGE_CACHE_SIZE, 1036 PAGE_CACHE_SIZE / (sizeof(struct page *))); 1037 pinned[0] = NULL; 1038 pinned[1] = NULL; 1039 1040 pos = *ppos; 1041 start_pos = pos; 1042 1043 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); 1044 current->backing_dev_info = inode->i_mapping->backing_dev_info; 1045 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); 1046 if (err) 1047 goto out_nolock; 1048 if (count == 0) 1049 goto out_nolock; 1050 1051 err = file_remove_suid(file); 1052 if (err) 1053 goto out_nolock; 1054 file_update_time(file); 1055 1056 pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL); 1057 1058 mutex_lock(&inode->i_mutex); 1059 BTRFS_I(inode)->sequence++; 1060 first_index = pos >> PAGE_CACHE_SHIFT; 1061 last_index = (pos + count) >> PAGE_CACHE_SHIFT; 1062 1063 /* 1064 * there are lots of better ways to do this, but this code 1065 * makes sure the first and last page in the file range are 1066 * up to date and ready for cow 1067 */ 1068 if ((pos & (PAGE_CACHE_SIZE - 1))) { 1069 pinned[0] = grab_cache_page(inode->i_mapping, first_index); 1070 if (!PageUptodate(pinned[0])) { 1071 ret = btrfs_readpage(NULL, pinned[0]); 1072 BUG_ON(ret); 1073 wait_on_page_locked(pinned[0]); 1074 } else { 1075 unlock_page(pinned[0]); 1076 } 1077 } 1078 if ((pos + count) & (PAGE_CACHE_SIZE - 1)) { 1079 pinned[1] = grab_cache_page(inode->i_mapping, last_index); 1080 if (!PageUptodate(pinned[1])) { 1081 ret = btrfs_readpage(NULL, pinned[1]); 1082 BUG_ON(ret); 1083 wait_on_page_locked(pinned[1]); 1084 } else { 1085 unlock_page(pinned[1]); 1086 } 1087 } 1088 1089 while (count > 0) { 1090 size_t offset = pos & (PAGE_CACHE_SIZE - 1); 1091 size_t write_bytes = min(count, nrptrs * 1092 (size_t)PAGE_CACHE_SIZE - 1093 offset); 1094 size_t num_pages = (write_bytes + PAGE_CACHE_SIZE - 1) >> 1095 PAGE_CACHE_SHIFT; 1096 1097 WARN_ON(num_pages > nrptrs); 1098 memset(pages, 0, sizeof(struct page *) * nrptrs); 1099 1100 ret = btrfs_check_data_free_space(root, inode, write_bytes); 1101 if (ret) 1102 goto out; 1103 1104 ret = prepare_pages(root, file, pages, num_pages, 1105 pos, first_index, last_index, 1106 write_bytes); 1107 if (ret) { 1108 btrfs_free_reserved_data_space(root, inode, 1109 write_bytes); 1110 goto out; 1111 } 1112 1113 ret = btrfs_copy_from_user(pos, num_pages, 1114 write_bytes, pages, buf); 1115 if (ret) { 1116 btrfs_free_reserved_data_space(root, inode, 1117 write_bytes); 1118 btrfs_drop_pages(pages, num_pages); 1119 goto out; 1120 } 1121 1122 ret = dirty_and_release_pages(NULL, root, file, pages, 1123 num_pages, pos, write_bytes); 1124 btrfs_drop_pages(pages, num_pages); 1125 if (ret) { 1126 btrfs_free_reserved_data_space(root, inode, 1127 write_bytes); 1128 goto out; 1129 } 1130 1131 if (will_write) { 1132 btrfs_fdatawrite_range(inode->i_mapping, pos, 1133 pos + write_bytes - 1, 1134 WB_SYNC_NONE); 1135 } else { 1136 balance_dirty_pages_ratelimited_nr(inode->i_mapping, 1137 num_pages); 1138 if (num_pages < 1139 (root->leafsize >> PAGE_CACHE_SHIFT) + 1) 1140 btrfs_btree_balance_dirty(root, 1); 1141 btrfs_throttle(root); 1142 } 1143 1144 buf += write_bytes; 1145 count -= write_bytes; 1146 pos += write_bytes; 1147 num_written += write_bytes; 1148 1149 cond_resched(); 1150 } 1151 out: 1152 mutex_unlock(&inode->i_mutex); 1153 if (ret) 1154 err = ret; 1155 1156 out_nolock: 1157 kfree(pages); 1158 if (pinned[0]) 1159 page_cache_release(pinned[0]); 1160 if (pinned[1]) 1161 page_cache_release(pinned[1]); 1162 *ppos = pos; 1163 1164 /* 1165 * we want to make sure fsync finds this change 1166 * but we haven't joined a transaction running right now. 1167 * 1168 * Later on, someone is sure to update the inode and get the 1169 * real transid recorded. 1170 * 1171 * We set last_trans now to the fs_info generation + 1, 1172 * this will either be one more than the running transaction 1173 * or the generation used for the next transaction if there isn't 1174 * one running right now. 1175 */ 1176 BTRFS_I(inode)->last_trans = root->fs_info->generation + 1; 1177 1178 if (num_written > 0 && will_write) { 1179 struct btrfs_trans_handle *trans; 1180 1181 err = btrfs_wait_ordered_range(inode, start_pos, num_written); 1182 if (err) 1183 num_written = err; 1184 1185 if ((file->f_flags & O_SYNC) || IS_SYNC(inode)) { 1186 trans = btrfs_start_transaction(root, 1); 1187 ret = btrfs_log_dentry_safe(trans, root, 1188 file->f_dentry); 1189 if (ret == 0) { 1190 ret = btrfs_sync_log(trans, root); 1191 if (ret == 0) 1192 btrfs_end_transaction(trans, root); 1193 else 1194 btrfs_commit_transaction(trans, root); 1195 } else { 1196 btrfs_commit_transaction(trans, root); 1197 } 1198 } 1199 if (file->f_flags & O_DIRECT) { 1200 invalidate_mapping_pages(inode->i_mapping, 1201 start_pos >> PAGE_CACHE_SHIFT, 1202 (start_pos + num_written - 1) >> PAGE_CACHE_SHIFT); 1203 } 1204 } 1205 current->backing_dev_info = NULL; 1206 return num_written ? num_written : err; 1207 } 1208 1209 int btrfs_release_file(struct inode *inode, struct file *filp) 1210 { 1211 /* 1212 * ordered_data_close is set by settattr when we are about to truncate 1213 * a file from a non-zero size to a zero size. This tries to 1214 * flush down new bytes that may have been written if the 1215 * application were using truncate to replace a file in place. 1216 */ 1217 if (BTRFS_I(inode)->ordered_data_close) { 1218 BTRFS_I(inode)->ordered_data_close = 0; 1219 btrfs_add_ordered_operation(NULL, BTRFS_I(inode)->root, inode); 1220 if (inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT) 1221 filemap_flush(inode->i_mapping); 1222 } 1223 if (filp->private_data) 1224 btrfs_ioctl_trans_end(filp); 1225 return 0; 1226 } 1227 1228 /* 1229 * fsync call for both files and directories. This logs the inode into 1230 * the tree log instead of forcing full commits whenever possible. 1231 * 1232 * It needs to call filemap_fdatawait so that all ordered extent updates are 1233 * in the metadata btree are up to date for copying to the log. 1234 * 1235 * It drops the inode mutex before doing the tree log commit. This is an 1236 * important optimization for directories because holding the mutex prevents 1237 * new operations on the dir while we write to disk. 1238 */ 1239 int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync) 1240 { 1241 struct inode *inode = dentry->d_inode; 1242 struct btrfs_root *root = BTRFS_I(inode)->root; 1243 int ret = 0; 1244 struct btrfs_trans_handle *trans; 1245 1246 /* 1247 * check the transaction that last modified this inode 1248 * and see if its already been committed 1249 */ 1250 if (!BTRFS_I(inode)->last_trans) 1251 goto out; 1252 1253 mutex_lock(&root->fs_info->trans_mutex); 1254 if (BTRFS_I(inode)->last_trans <= 1255 root->fs_info->last_trans_committed) { 1256 BTRFS_I(inode)->last_trans = 0; 1257 mutex_unlock(&root->fs_info->trans_mutex); 1258 goto out; 1259 } 1260 mutex_unlock(&root->fs_info->trans_mutex); 1261 1262 root->log_batch++; 1263 filemap_fdatawrite(inode->i_mapping); 1264 btrfs_wait_ordered_range(inode, 0, (u64)-1); 1265 root->log_batch++; 1266 1267 /* 1268 * ok we haven't committed the transaction yet, lets do a commit 1269 */ 1270 if (file && file->private_data) 1271 btrfs_ioctl_trans_end(file); 1272 1273 trans = btrfs_start_transaction(root, 1); 1274 if (!trans) { 1275 ret = -ENOMEM; 1276 goto out; 1277 } 1278 1279 ret = btrfs_log_dentry_safe(trans, root, dentry); 1280 if (ret < 0) 1281 goto out; 1282 1283 /* we've logged all the items and now have a consistent 1284 * version of the file in the log. It is possible that 1285 * someone will come in and modify the file, but that's 1286 * fine because the log is consistent on disk, and we 1287 * have references to all of the file's extents 1288 * 1289 * It is possible that someone will come in and log the 1290 * file again, but that will end up using the synchronization 1291 * inside btrfs_sync_log to keep things safe. 1292 */ 1293 mutex_unlock(&dentry->d_inode->i_mutex); 1294 1295 if (ret > 0) { 1296 ret = btrfs_commit_transaction(trans, root); 1297 } else { 1298 ret = btrfs_sync_log(trans, root); 1299 if (ret == 0) 1300 ret = btrfs_end_transaction(trans, root); 1301 else 1302 ret = btrfs_commit_transaction(trans, root); 1303 } 1304 mutex_lock(&dentry->d_inode->i_mutex); 1305 out: 1306 return ret > 0 ? EIO : ret; 1307 } 1308 1309 static struct vm_operations_struct btrfs_file_vm_ops = { 1310 .fault = filemap_fault, 1311 .page_mkwrite = btrfs_page_mkwrite, 1312 }; 1313 1314 static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma) 1315 { 1316 vma->vm_ops = &btrfs_file_vm_ops; 1317 file_accessed(filp); 1318 return 0; 1319 } 1320 1321 struct file_operations btrfs_file_operations = { 1322 .llseek = generic_file_llseek, 1323 .read = do_sync_read, 1324 .aio_read = generic_file_aio_read, 1325 .splice_read = generic_file_splice_read, 1326 .write = btrfs_file_write, 1327 .mmap = btrfs_file_mmap, 1328 .open = generic_file_open, 1329 .release = btrfs_release_file, 1330 .fsync = btrfs_sync_file, 1331 .unlocked_ioctl = btrfs_ioctl, 1332 #ifdef CONFIG_COMPAT 1333 .compat_ioctl = btrfs_ioctl, 1334 #endif 1335 }; 1336