1 /* 2 * fs/f2fs/file.c 3 * 4 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 5 * http://www.samsung.com/ 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 #include <linux/fs.h> 12 #include <linux/f2fs_fs.h> 13 #include <linux/stat.h> 14 #include <linux/buffer_head.h> 15 #include <linux/writeback.h> 16 #include <linux/blkdev.h> 17 #include <linux/falloc.h> 18 #include <linux/types.h> 19 #include <linux/compat.h> 20 #include <linux/uaccess.h> 21 #include <linux/mount.h> 22 #include <linux/pagevec.h> 23 24 #include "f2fs.h" 25 #include "node.h" 26 #include "segment.h" 27 #include "xattr.h" 28 #include "acl.h" 29 #include <trace/events/f2fs.h> 30 31 static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma, 32 struct vm_fault *vmf) 33 { 34 struct page *page = vmf->page; 35 struct inode *inode = file_inode(vma->vm_file); 36 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 37 struct dnode_of_data dn; 38 int err; 39 40 f2fs_balance_fs(sbi); 41 42 sb_start_pagefault(inode->i_sb); 43 44 f2fs_bug_on(sbi, f2fs_has_inline_data(inode)); 45 46 /* block allocation */ 47 f2fs_lock_op(sbi); 48 set_new_dnode(&dn, inode, NULL, NULL, 0); 49 err = f2fs_reserve_block(&dn, page->index); 50 if (err) { 51 f2fs_unlock_op(sbi); 52 goto out; 53 } 54 f2fs_put_dnode(&dn); 55 f2fs_unlock_op(sbi); 56 57 file_update_time(vma->vm_file); 58 lock_page(page); 59 if (unlikely(page->mapping != inode->i_mapping || 60 page_offset(page) > i_size_read(inode) || 61 !PageUptodate(page))) { 62 unlock_page(page); 63 err = -EFAULT; 64 goto out; 65 } 66 67 /* 68 * check to see if the page is mapped already (no holes) 69 */ 70 if (PageMappedToDisk(page)) 71 goto mapped; 72 73 /* page is wholly or partially inside EOF */ 74 if (((page->index + 1) << PAGE_CACHE_SHIFT) > i_size_read(inode)) { 75 unsigned offset; 76 offset = i_size_read(inode) & ~PAGE_CACHE_MASK; 77 zero_user_segment(page, offset, PAGE_CACHE_SIZE); 78 } 79 set_page_dirty(page); 80 SetPageUptodate(page); 81 82 trace_f2fs_vm_page_mkwrite(page, DATA); 83 mapped: 84 /* fill the page */ 85 f2fs_wait_on_page_writeback(page, DATA); 86 out: 87 sb_end_pagefault(inode->i_sb); 88 return block_page_mkwrite_return(err); 89 } 90 91 static const struct vm_operations_struct f2fs_file_vm_ops = { 92 .fault = filemap_fault, 93 .map_pages = filemap_map_pages, 94 .page_mkwrite = f2fs_vm_page_mkwrite, 95 .remap_pages = generic_file_remap_pages, 96 }; 97 98 static int get_parent_ino(struct inode *inode, nid_t *pino) 99 { 100 struct dentry *dentry; 101 102 inode = igrab(inode); 103 dentry = d_find_any_alias(inode); 104 iput(inode); 105 if (!dentry) 106 return 0; 107 108 if (update_dent_inode(inode, &dentry->d_name)) { 109 dput(dentry); 110 return 0; 111 } 112 113 *pino = parent_ino(dentry); 114 dput(dentry); 115 return 1; 116 } 117 118 static inline bool need_do_checkpoint(struct inode *inode) 119 { 120 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 121 bool need_cp = false; 122 123 if (!S_ISREG(inode->i_mode) || inode->i_nlink != 1) 124 need_cp = true; 125 else if (file_wrong_pino(inode)) 126 need_cp = true; 127 else if (!space_for_roll_forward(sbi)) 128 need_cp = true; 129 else if (!is_checkpointed_node(sbi, F2FS_I(inode)->i_pino)) 130 need_cp = true; 131 else if (F2FS_I(inode)->xattr_ver == cur_cp_version(F2FS_CKPT(sbi))) 132 need_cp = true; 133 else if (test_opt(sbi, FASTBOOT)) 134 need_cp = true; 135 else if (sbi->active_logs == 2) 136 need_cp = true; 137 138 return need_cp; 139 } 140 141 static bool need_inode_page_update(struct f2fs_sb_info *sbi, nid_t ino) 142 { 143 struct page *i = find_get_page(NODE_MAPPING(sbi), ino); 144 bool ret = false; 145 /* But we need to avoid that there are some inode updates */ 146 if ((i && PageDirty(i)) || need_inode_block_update(sbi, ino)) 147 ret = true; 148 f2fs_put_page(i, 0); 149 return ret; 150 } 151 152 static void try_to_fix_pino(struct inode *inode) 153 { 154 struct f2fs_inode_info *fi = F2FS_I(inode); 155 nid_t pino; 156 157 down_write(&fi->i_sem); 158 fi->xattr_ver = 0; 159 if (file_wrong_pino(inode) && inode->i_nlink == 1 && 160 get_parent_ino(inode, &pino)) { 161 fi->i_pino = pino; 162 file_got_pino(inode); 163 up_write(&fi->i_sem); 164 165 mark_inode_dirty_sync(inode); 166 f2fs_write_inode(inode, NULL); 167 } else { 168 up_write(&fi->i_sem); 169 } 170 } 171 172 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) 173 { 174 struct inode *inode = file->f_mapping->host; 175 struct f2fs_inode_info *fi = F2FS_I(inode); 176 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 177 nid_t ino = inode->i_ino; 178 int ret = 0; 179 bool need_cp = false; 180 struct writeback_control wbc = { 181 .sync_mode = WB_SYNC_ALL, 182 .nr_to_write = LONG_MAX, 183 .for_reclaim = 0, 184 }; 185 186 if (unlikely(f2fs_readonly(inode->i_sb))) 187 return 0; 188 189 trace_f2fs_sync_file_enter(inode); 190 191 /* if fdatasync is triggered, let's do in-place-update */ 192 if (get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks) 193 set_inode_flag(fi, FI_NEED_IPU); 194 ret = filemap_write_and_wait_range(inode->i_mapping, start, end); 195 clear_inode_flag(fi, FI_NEED_IPU); 196 197 if (ret) { 198 trace_f2fs_sync_file_exit(inode, need_cp, datasync, ret); 199 return ret; 200 } 201 202 /* if the inode is dirty, let's recover all the time */ 203 if (!datasync && is_inode_flag_set(fi, FI_DIRTY_INODE)) { 204 update_inode_page(inode); 205 goto go_write; 206 } 207 208 /* 209 * if there is no written data, don't waste time to write recovery info. 210 */ 211 if (!is_inode_flag_set(fi, FI_APPEND_WRITE) && 212 !exist_written_data(sbi, ino, APPEND_INO)) { 213 214 /* it may call write_inode just prior to fsync */ 215 if (need_inode_page_update(sbi, ino)) 216 goto go_write; 217 218 if (is_inode_flag_set(fi, FI_UPDATE_WRITE) || 219 exist_written_data(sbi, ino, UPDATE_INO)) 220 goto flush_out; 221 goto out; 222 } 223 go_write: 224 /* guarantee free sections for fsync */ 225 f2fs_balance_fs(sbi); 226 227 /* 228 * Both of fdatasync() and fsync() are able to be recovered from 229 * sudden-power-off. 230 */ 231 down_read(&fi->i_sem); 232 need_cp = need_do_checkpoint(inode); 233 up_read(&fi->i_sem); 234 235 if (need_cp) { 236 /* all the dirty node pages should be flushed for POR */ 237 ret = f2fs_sync_fs(inode->i_sb, 1); 238 239 /* 240 * We've secured consistency through sync_fs. Following pino 241 * will be used only for fsynced inodes after checkpoint. 242 */ 243 try_to_fix_pino(inode); 244 goto out; 245 } 246 sync_nodes: 247 sync_node_pages(sbi, ino, &wbc); 248 249 if (need_inode_block_update(sbi, ino)) { 250 mark_inode_dirty_sync(inode); 251 f2fs_write_inode(inode, NULL); 252 goto sync_nodes; 253 } 254 255 ret = wait_on_node_pages_writeback(sbi, ino); 256 if (ret) 257 goto out; 258 259 /* once recovery info is written, don't need to tack this */ 260 remove_dirty_inode(sbi, ino, APPEND_INO); 261 clear_inode_flag(fi, FI_APPEND_WRITE); 262 flush_out: 263 remove_dirty_inode(sbi, ino, UPDATE_INO); 264 clear_inode_flag(fi, FI_UPDATE_WRITE); 265 ret = f2fs_issue_flush(sbi); 266 out: 267 trace_f2fs_sync_file_exit(inode, need_cp, datasync, ret); 268 return ret; 269 } 270 271 static pgoff_t __get_first_dirty_index(struct address_space *mapping, 272 pgoff_t pgofs, int whence) 273 { 274 struct pagevec pvec; 275 int nr_pages; 276 277 if (whence != SEEK_DATA) 278 return 0; 279 280 /* find first dirty page index */ 281 pagevec_init(&pvec, 0); 282 nr_pages = pagevec_lookup_tag(&pvec, mapping, &pgofs, 283 PAGECACHE_TAG_DIRTY, 1); 284 pgofs = nr_pages ? pvec.pages[0]->index : LONG_MAX; 285 pagevec_release(&pvec); 286 return pgofs; 287 } 288 289 static bool __found_offset(block_t blkaddr, pgoff_t dirty, pgoff_t pgofs, 290 int whence) 291 { 292 switch (whence) { 293 case SEEK_DATA: 294 if ((blkaddr == NEW_ADDR && dirty == pgofs) || 295 (blkaddr != NEW_ADDR && blkaddr != NULL_ADDR)) 296 return true; 297 break; 298 case SEEK_HOLE: 299 if (blkaddr == NULL_ADDR) 300 return true; 301 break; 302 } 303 return false; 304 } 305 306 static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence) 307 { 308 struct inode *inode = file->f_mapping->host; 309 loff_t maxbytes = inode->i_sb->s_maxbytes; 310 struct dnode_of_data dn; 311 pgoff_t pgofs, end_offset, dirty; 312 loff_t data_ofs = offset; 313 loff_t isize; 314 int err = 0; 315 316 mutex_lock(&inode->i_mutex); 317 318 isize = i_size_read(inode); 319 if (offset >= isize) 320 goto fail; 321 322 /* handle inline data case */ 323 if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) { 324 if (whence == SEEK_HOLE) 325 data_ofs = isize; 326 goto found; 327 } 328 329 pgofs = (pgoff_t)(offset >> PAGE_CACHE_SHIFT); 330 331 dirty = __get_first_dirty_index(inode->i_mapping, pgofs, whence); 332 333 for (; data_ofs < isize; data_ofs = pgofs << PAGE_CACHE_SHIFT) { 334 set_new_dnode(&dn, inode, NULL, NULL, 0); 335 err = get_dnode_of_data(&dn, pgofs, LOOKUP_NODE_RA); 336 if (err && err != -ENOENT) { 337 goto fail; 338 } else if (err == -ENOENT) { 339 /* direct node does not exists */ 340 if (whence == SEEK_DATA) { 341 pgofs = PGOFS_OF_NEXT_DNODE(pgofs, 342 F2FS_I(inode)); 343 continue; 344 } else { 345 goto found; 346 } 347 } 348 349 end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode)); 350 351 /* find data/hole in dnode block */ 352 for (; dn.ofs_in_node < end_offset; 353 dn.ofs_in_node++, pgofs++, 354 data_ofs = pgofs << PAGE_CACHE_SHIFT) { 355 block_t blkaddr; 356 blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node); 357 358 if (__found_offset(blkaddr, dirty, pgofs, whence)) { 359 f2fs_put_dnode(&dn); 360 goto found; 361 } 362 } 363 f2fs_put_dnode(&dn); 364 } 365 366 if (whence == SEEK_DATA) 367 goto fail; 368 found: 369 if (whence == SEEK_HOLE && data_ofs > isize) 370 data_ofs = isize; 371 mutex_unlock(&inode->i_mutex); 372 return vfs_setpos(file, data_ofs, maxbytes); 373 fail: 374 mutex_unlock(&inode->i_mutex); 375 return -ENXIO; 376 } 377 378 static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence) 379 { 380 struct inode *inode = file->f_mapping->host; 381 loff_t maxbytes = inode->i_sb->s_maxbytes; 382 383 switch (whence) { 384 case SEEK_SET: 385 case SEEK_CUR: 386 case SEEK_END: 387 return generic_file_llseek_size(file, offset, whence, 388 maxbytes, i_size_read(inode)); 389 case SEEK_DATA: 390 case SEEK_HOLE: 391 if (offset < 0) 392 return -ENXIO; 393 return f2fs_seek_block(file, offset, whence); 394 } 395 396 return -EINVAL; 397 } 398 399 static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma) 400 { 401 struct inode *inode = file_inode(file); 402 403 /* we don't need to use inline_data strictly */ 404 if (f2fs_has_inline_data(inode)) { 405 int err = f2fs_convert_inline_inode(inode); 406 if (err) 407 return err; 408 } 409 410 file_accessed(file); 411 vma->vm_ops = &f2fs_file_vm_ops; 412 return 0; 413 } 414 415 int truncate_data_blocks_range(struct dnode_of_data *dn, int count) 416 { 417 int nr_free = 0, ofs = dn->ofs_in_node; 418 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 419 struct f2fs_node *raw_node; 420 __le32 *addr; 421 422 raw_node = F2FS_NODE(dn->node_page); 423 addr = blkaddr_in_node(raw_node) + ofs; 424 425 for (; count > 0; count--, addr++, dn->ofs_in_node++) { 426 block_t blkaddr = le32_to_cpu(*addr); 427 if (blkaddr == NULL_ADDR) 428 continue; 429 430 update_extent_cache(NULL_ADDR, dn); 431 invalidate_blocks(sbi, blkaddr); 432 nr_free++; 433 } 434 if (nr_free) { 435 dec_valid_block_count(sbi, dn->inode, nr_free); 436 set_page_dirty(dn->node_page); 437 sync_inode_page(dn); 438 } 439 dn->ofs_in_node = ofs; 440 441 trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid, 442 dn->ofs_in_node, nr_free); 443 return nr_free; 444 } 445 446 void truncate_data_blocks(struct dnode_of_data *dn) 447 { 448 truncate_data_blocks_range(dn, ADDRS_PER_BLOCK); 449 } 450 451 static int truncate_partial_data_page(struct inode *inode, u64 from) 452 { 453 unsigned offset = from & (PAGE_CACHE_SIZE - 1); 454 struct page *page; 455 456 if (!offset) 457 return 0; 458 459 page = find_data_page(inode, from >> PAGE_CACHE_SHIFT, false); 460 if (IS_ERR(page)) 461 return 0; 462 463 lock_page(page); 464 if (unlikely(!PageUptodate(page) || 465 page->mapping != inode->i_mapping)) 466 goto out; 467 468 f2fs_wait_on_page_writeback(page, DATA); 469 zero_user(page, offset, PAGE_CACHE_SIZE - offset); 470 set_page_dirty(page); 471 out: 472 f2fs_put_page(page, 1); 473 return 0; 474 } 475 476 int truncate_blocks(struct inode *inode, u64 from, bool lock) 477 { 478 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 479 unsigned int blocksize = inode->i_sb->s_blocksize; 480 struct dnode_of_data dn; 481 pgoff_t free_from; 482 int count = 0, err = 0; 483 struct page *ipage; 484 485 trace_f2fs_truncate_blocks_enter(inode, from); 486 487 free_from = (pgoff_t) 488 ((from + blocksize - 1) >> (sbi->log_blocksize)); 489 490 if (lock) 491 f2fs_lock_op(sbi); 492 493 ipage = get_node_page(sbi, inode->i_ino); 494 if (IS_ERR(ipage)) { 495 err = PTR_ERR(ipage); 496 goto out; 497 } 498 499 if (f2fs_has_inline_data(inode)) { 500 f2fs_put_page(ipage, 1); 501 goto out; 502 } 503 504 set_new_dnode(&dn, inode, ipage, NULL, 0); 505 err = get_dnode_of_data(&dn, free_from, LOOKUP_NODE); 506 if (err) { 507 if (err == -ENOENT) 508 goto free_next; 509 goto out; 510 } 511 512 count = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode)); 513 514 count -= dn.ofs_in_node; 515 f2fs_bug_on(sbi, count < 0); 516 517 if (dn.ofs_in_node || IS_INODE(dn.node_page)) { 518 truncate_data_blocks_range(&dn, count); 519 free_from += count; 520 } 521 522 f2fs_put_dnode(&dn); 523 free_next: 524 err = truncate_inode_blocks(inode, free_from); 525 out: 526 if (lock) 527 f2fs_unlock_op(sbi); 528 529 /* lastly zero out the first data page */ 530 if (!err) 531 err = truncate_partial_data_page(inode, from); 532 533 trace_f2fs_truncate_blocks_exit(inode, err); 534 return err; 535 } 536 537 void f2fs_truncate(struct inode *inode) 538 { 539 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 540 S_ISLNK(inode->i_mode))) 541 return; 542 543 trace_f2fs_truncate(inode); 544 545 /* we should check inline_data size */ 546 if (f2fs_has_inline_data(inode) && !f2fs_may_inline(inode)) { 547 if (f2fs_convert_inline_inode(inode)) 548 return; 549 } 550 551 if (!truncate_blocks(inode, i_size_read(inode), true)) { 552 inode->i_mtime = inode->i_ctime = CURRENT_TIME; 553 mark_inode_dirty(inode); 554 } 555 } 556 557 int f2fs_getattr(struct vfsmount *mnt, 558 struct dentry *dentry, struct kstat *stat) 559 { 560 struct inode *inode = dentry->d_inode; 561 generic_fillattr(inode, stat); 562 stat->blocks <<= 3; 563 return 0; 564 } 565 566 #ifdef CONFIG_F2FS_FS_POSIX_ACL 567 static void __setattr_copy(struct inode *inode, const struct iattr *attr) 568 { 569 struct f2fs_inode_info *fi = F2FS_I(inode); 570 unsigned int ia_valid = attr->ia_valid; 571 572 if (ia_valid & ATTR_UID) 573 inode->i_uid = attr->ia_uid; 574 if (ia_valid & ATTR_GID) 575 inode->i_gid = attr->ia_gid; 576 if (ia_valid & ATTR_ATIME) 577 inode->i_atime = timespec_trunc(attr->ia_atime, 578 inode->i_sb->s_time_gran); 579 if (ia_valid & ATTR_MTIME) 580 inode->i_mtime = timespec_trunc(attr->ia_mtime, 581 inode->i_sb->s_time_gran); 582 if (ia_valid & ATTR_CTIME) 583 inode->i_ctime = timespec_trunc(attr->ia_ctime, 584 inode->i_sb->s_time_gran); 585 if (ia_valid & ATTR_MODE) { 586 umode_t mode = attr->ia_mode; 587 588 if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID)) 589 mode &= ~S_ISGID; 590 set_acl_inode(fi, mode); 591 } 592 } 593 #else 594 #define __setattr_copy setattr_copy 595 #endif 596 597 int f2fs_setattr(struct dentry *dentry, struct iattr *attr) 598 { 599 struct inode *inode = dentry->d_inode; 600 struct f2fs_inode_info *fi = F2FS_I(inode); 601 int err; 602 603 err = inode_change_ok(inode, attr); 604 if (err) 605 return err; 606 607 if (attr->ia_valid & ATTR_SIZE) { 608 if (attr->ia_size != i_size_read(inode)) { 609 truncate_setsize(inode, attr->ia_size); 610 f2fs_truncate(inode); 611 f2fs_balance_fs(F2FS_I_SB(inode)); 612 } else { 613 /* 614 * giving a chance to truncate blocks past EOF which 615 * are fallocated with FALLOC_FL_KEEP_SIZE. 616 */ 617 f2fs_truncate(inode); 618 } 619 } 620 621 __setattr_copy(inode, attr); 622 623 if (attr->ia_valid & ATTR_MODE) { 624 err = posix_acl_chmod(inode, get_inode_mode(inode)); 625 if (err || is_inode_flag_set(fi, FI_ACL_MODE)) { 626 inode->i_mode = fi->i_acl_mode; 627 clear_inode_flag(fi, FI_ACL_MODE); 628 } 629 } 630 631 mark_inode_dirty(inode); 632 return err; 633 } 634 635 const struct inode_operations f2fs_file_inode_operations = { 636 .getattr = f2fs_getattr, 637 .setattr = f2fs_setattr, 638 .get_acl = f2fs_get_acl, 639 .set_acl = f2fs_set_acl, 640 #ifdef CONFIG_F2FS_FS_XATTR 641 .setxattr = generic_setxattr, 642 .getxattr = generic_getxattr, 643 .listxattr = f2fs_listxattr, 644 .removexattr = generic_removexattr, 645 #endif 646 .fiemap = f2fs_fiemap, 647 }; 648 649 static void fill_zero(struct inode *inode, pgoff_t index, 650 loff_t start, loff_t len) 651 { 652 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 653 struct page *page; 654 655 if (!len) 656 return; 657 658 f2fs_balance_fs(sbi); 659 660 f2fs_lock_op(sbi); 661 page = get_new_data_page(inode, NULL, index, false); 662 f2fs_unlock_op(sbi); 663 664 if (!IS_ERR(page)) { 665 f2fs_wait_on_page_writeback(page, DATA); 666 zero_user(page, start, len); 667 set_page_dirty(page); 668 f2fs_put_page(page, 1); 669 } 670 } 671 672 int truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end) 673 { 674 pgoff_t index; 675 int err; 676 677 for (index = pg_start; index < pg_end; index++) { 678 struct dnode_of_data dn; 679 680 set_new_dnode(&dn, inode, NULL, NULL, 0); 681 err = get_dnode_of_data(&dn, index, LOOKUP_NODE); 682 if (err) { 683 if (err == -ENOENT) 684 continue; 685 return err; 686 } 687 688 if (dn.data_blkaddr != NULL_ADDR) 689 truncate_data_blocks_range(&dn, 1); 690 f2fs_put_dnode(&dn); 691 } 692 return 0; 693 } 694 695 static int punch_hole(struct inode *inode, loff_t offset, loff_t len) 696 { 697 pgoff_t pg_start, pg_end; 698 loff_t off_start, off_end; 699 int ret = 0; 700 701 if (!S_ISREG(inode->i_mode)) 702 return -EOPNOTSUPP; 703 704 /* skip punching hole beyond i_size */ 705 if (offset >= inode->i_size) 706 return ret; 707 708 if (f2fs_has_inline_data(inode)) { 709 ret = f2fs_convert_inline_inode(inode); 710 if (ret) 711 return ret; 712 } 713 714 pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT; 715 pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT; 716 717 off_start = offset & (PAGE_CACHE_SIZE - 1); 718 off_end = (offset + len) & (PAGE_CACHE_SIZE - 1); 719 720 if (pg_start == pg_end) { 721 fill_zero(inode, pg_start, off_start, 722 off_end - off_start); 723 } else { 724 if (off_start) 725 fill_zero(inode, pg_start++, off_start, 726 PAGE_CACHE_SIZE - off_start); 727 if (off_end) 728 fill_zero(inode, pg_end, 0, off_end); 729 730 if (pg_start < pg_end) { 731 struct address_space *mapping = inode->i_mapping; 732 loff_t blk_start, blk_end; 733 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 734 735 f2fs_balance_fs(sbi); 736 737 blk_start = pg_start << PAGE_CACHE_SHIFT; 738 blk_end = pg_end << PAGE_CACHE_SHIFT; 739 truncate_inode_pages_range(mapping, blk_start, 740 blk_end - 1); 741 742 f2fs_lock_op(sbi); 743 ret = truncate_hole(inode, pg_start, pg_end); 744 f2fs_unlock_op(sbi); 745 } 746 } 747 748 return ret; 749 } 750 751 static int expand_inode_data(struct inode *inode, loff_t offset, 752 loff_t len, int mode) 753 { 754 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 755 pgoff_t index, pg_start, pg_end; 756 loff_t new_size = i_size_read(inode); 757 loff_t off_start, off_end; 758 int ret = 0; 759 760 f2fs_balance_fs(sbi); 761 762 ret = inode_newsize_ok(inode, (len + offset)); 763 if (ret) 764 return ret; 765 766 if (f2fs_has_inline_data(inode)) { 767 ret = f2fs_convert_inline_inode(inode); 768 if (ret) 769 return ret; 770 } 771 772 pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT; 773 pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT; 774 775 off_start = offset & (PAGE_CACHE_SIZE - 1); 776 off_end = (offset + len) & (PAGE_CACHE_SIZE - 1); 777 778 f2fs_lock_op(sbi); 779 780 for (index = pg_start; index <= pg_end; index++) { 781 struct dnode_of_data dn; 782 783 if (index == pg_end && !off_end) 784 goto noalloc; 785 786 set_new_dnode(&dn, inode, NULL, NULL, 0); 787 ret = f2fs_reserve_block(&dn, index); 788 if (ret) 789 break; 790 noalloc: 791 if (pg_start == pg_end) 792 new_size = offset + len; 793 else if (index == pg_start && off_start) 794 new_size = (index + 1) << PAGE_CACHE_SHIFT; 795 else if (index == pg_end) 796 new_size = (index << PAGE_CACHE_SHIFT) + off_end; 797 else 798 new_size += PAGE_CACHE_SIZE; 799 } 800 801 if (!(mode & FALLOC_FL_KEEP_SIZE) && 802 i_size_read(inode) < new_size) { 803 i_size_write(inode, new_size); 804 mark_inode_dirty(inode); 805 update_inode_page(inode); 806 } 807 f2fs_unlock_op(sbi); 808 809 return ret; 810 } 811 812 static long f2fs_fallocate(struct file *file, int mode, 813 loff_t offset, loff_t len) 814 { 815 struct inode *inode = file_inode(file); 816 long ret; 817 818 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) 819 return -EOPNOTSUPP; 820 821 mutex_lock(&inode->i_mutex); 822 823 if (mode & FALLOC_FL_PUNCH_HOLE) 824 ret = punch_hole(inode, offset, len); 825 else 826 ret = expand_inode_data(inode, offset, len, mode); 827 828 if (!ret) { 829 inode->i_mtime = inode->i_ctime = CURRENT_TIME; 830 mark_inode_dirty(inode); 831 } 832 833 mutex_unlock(&inode->i_mutex); 834 835 trace_f2fs_fallocate(inode, mode, offset, len, ret); 836 return ret; 837 } 838 839 #define F2FS_REG_FLMASK (~(FS_DIRSYNC_FL | FS_TOPDIR_FL)) 840 #define F2FS_OTHER_FLMASK (FS_NODUMP_FL | FS_NOATIME_FL) 841 842 static inline __u32 f2fs_mask_flags(umode_t mode, __u32 flags) 843 { 844 if (S_ISDIR(mode)) 845 return flags; 846 else if (S_ISREG(mode)) 847 return flags & F2FS_REG_FLMASK; 848 else 849 return flags & F2FS_OTHER_FLMASK; 850 } 851 852 static int f2fs_ioc_getflags(struct file *filp, unsigned long arg) 853 { 854 struct inode *inode = file_inode(filp); 855 struct f2fs_inode_info *fi = F2FS_I(inode); 856 unsigned int flags = fi->i_flags & FS_FL_USER_VISIBLE; 857 return put_user(flags, (int __user *)arg); 858 } 859 860 static int f2fs_ioc_setflags(struct file *filp, unsigned long arg) 861 { 862 struct inode *inode = file_inode(filp); 863 struct f2fs_inode_info *fi = F2FS_I(inode); 864 unsigned int flags = fi->i_flags & FS_FL_USER_VISIBLE; 865 unsigned int oldflags; 866 int ret; 867 868 ret = mnt_want_write_file(filp); 869 if (ret) 870 return ret; 871 872 if (!inode_owner_or_capable(inode)) { 873 ret = -EACCES; 874 goto out; 875 } 876 877 if (get_user(flags, (int __user *)arg)) { 878 ret = -EFAULT; 879 goto out; 880 } 881 882 flags = f2fs_mask_flags(inode->i_mode, flags); 883 884 mutex_lock(&inode->i_mutex); 885 886 oldflags = fi->i_flags; 887 888 if ((flags ^ oldflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL)) { 889 if (!capable(CAP_LINUX_IMMUTABLE)) { 890 mutex_unlock(&inode->i_mutex); 891 ret = -EPERM; 892 goto out; 893 } 894 } 895 896 flags = flags & FS_FL_USER_MODIFIABLE; 897 flags |= oldflags & ~FS_FL_USER_MODIFIABLE; 898 fi->i_flags = flags; 899 mutex_unlock(&inode->i_mutex); 900 901 f2fs_set_inode_flags(inode); 902 inode->i_ctime = CURRENT_TIME; 903 mark_inode_dirty(inode); 904 out: 905 mnt_drop_write_file(filp); 906 return ret; 907 } 908 909 static int f2fs_ioc_start_atomic_write(struct file *filp) 910 { 911 struct inode *inode = file_inode(filp); 912 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 913 914 if (!inode_owner_or_capable(inode)) 915 return -EACCES; 916 917 f2fs_balance_fs(sbi); 918 919 set_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE); 920 921 return f2fs_convert_inline_inode(inode); 922 } 923 924 static int f2fs_release_file(struct inode *inode, struct file *filp) 925 { 926 /* some remained atomic pages should discarded */ 927 if (f2fs_is_atomic_file(inode) || f2fs_is_volatile_file(inode)) 928 commit_inmem_pages(inode, true); 929 return 0; 930 } 931 932 static int f2fs_ioc_commit_atomic_write(struct file *filp) 933 { 934 struct inode *inode = file_inode(filp); 935 int ret; 936 937 if (!inode_owner_or_capable(inode)) 938 return -EACCES; 939 940 if (f2fs_is_volatile_file(inode)) 941 return 0; 942 943 ret = mnt_want_write_file(filp); 944 if (ret) 945 return ret; 946 947 if (f2fs_is_atomic_file(inode)) 948 commit_inmem_pages(inode, false); 949 950 ret = f2fs_sync_file(filp, 0, LONG_MAX, 0); 951 mnt_drop_write_file(filp); 952 return ret; 953 } 954 955 static int f2fs_ioc_start_volatile_write(struct file *filp) 956 { 957 struct inode *inode = file_inode(filp); 958 959 if (!inode_owner_or_capable(inode)) 960 return -EACCES; 961 962 set_inode_flag(F2FS_I(inode), FI_VOLATILE_FILE); 963 964 return f2fs_convert_inline_inode(inode); 965 } 966 967 static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg) 968 { 969 struct inode *inode = file_inode(filp); 970 struct super_block *sb = inode->i_sb; 971 struct request_queue *q = bdev_get_queue(sb->s_bdev); 972 struct fstrim_range range; 973 int ret; 974 975 if (!capable(CAP_SYS_ADMIN)) 976 return -EPERM; 977 978 if (!blk_queue_discard(q)) 979 return -EOPNOTSUPP; 980 981 if (copy_from_user(&range, (struct fstrim_range __user *)arg, 982 sizeof(range))) 983 return -EFAULT; 984 985 range.minlen = max((unsigned int)range.minlen, 986 q->limits.discard_granularity); 987 ret = f2fs_trim_fs(F2FS_SB(sb), &range); 988 if (ret < 0) 989 return ret; 990 991 if (copy_to_user((struct fstrim_range __user *)arg, &range, 992 sizeof(range))) 993 return -EFAULT; 994 return 0; 995 } 996 997 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 998 { 999 switch (cmd) { 1000 case F2FS_IOC_GETFLAGS: 1001 return f2fs_ioc_getflags(filp, arg); 1002 case F2FS_IOC_SETFLAGS: 1003 return f2fs_ioc_setflags(filp, arg); 1004 case F2FS_IOC_START_ATOMIC_WRITE: 1005 return f2fs_ioc_start_atomic_write(filp); 1006 case F2FS_IOC_COMMIT_ATOMIC_WRITE: 1007 return f2fs_ioc_commit_atomic_write(filp); 1008 case F2FS_IOC_START_VOLATILE_WRITE: 1009 return f2fs_ioc_start_volatile_write(filp); 1010 case FITRIM: 1011 return f2fs_ioc_fitrim(filp, arg); 1012 default: 1013 return -ENOTTY; 1014 } 1015 } 1016 1017 #ifdef CONFIG_COMPAT 1018 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 1019 { 1020 switch (cmd) { 1021 case F2FS_IOC32_GETFLAGS: 1022 cmd = F2FS_IOC_GETFLAGS; 1023 break; 1024 case F2FS_IOC32_SETFLAGS: 1025 cmd = F2FS_IOC_SETFLAGS; 1026 break; 1027 default: 1028 return -ENOIOCTLCMD; 1029 } 1030 return f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg)); 1031 } 1032 #endif 1033 1034 const struct file_operations f2fs_file_operations = { 1035 .llseek = f2fs_llseek, 1036 .read = new_sync_read, 1037 .write = new_sync_write, 1038 .read_iter = generic_file_read_iter, 1039 .write_iter = generic_file_write_iter, 1040 .open = generic_file_open, 1041 .release = f2fs_release_file, 1042 .mmap = f2fs_file_mmap, 1043 .fsync = f2fs_sync_file, 1044 .fallocate = f2fs_fallocate, 1045 .unlocked_ioctl = f2fs_ioctl, 1046 #ifdef CONFIG_COMPAT 1047 .compat_ioctl = f2fs_compat_ioctl, 1048 #endif 1049 .splice_read = generic_file_splice_read, 1050 .splice_write = iter_file_splice_write, 1051 }; 1052