1 /* 2 * fs/f2fs/file.c 3 * 4 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 5 * http://www.samsung.com/ 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 #include <linux/fs.h> 12 #include <linux/f2fs_fs.h> 13 #include <linux/stat.h> 14 #include <linux/buffer_head.h> 15 #include <linux/writeback.h> 16 #include <linux/blkdev.h> 17 #include <linux/falloc.h> 18 #include <linux/types.h> 19 #include <linux/compat.h> 20 #include <linux/uaccess.h> 21 #include <linux/mount.h> 22 #include <linux/pagevec.h> 23 #include <linux/uio.h> 24 #include <linux/uuid.h> 25 #include <linux/file.h> 26 27 #include "f2fs.h" 28 #include "node.h" 29 #include "segment.h" 30 #include "xattr.h" 31 #include "acl.h" 32 #include "gc.h" 33 #include "trace.h" 34 #include <trace/events/f2fs.h> 35 36 static int f2fs_filemap_fault(struct vm_fault *vmf) 37 { 38 struct inode *inode = file_inode(vmf->vma->vm_file); 39 int err; 40 41 down_read(&F2FS_I(inode)->i_mmap_sem); 42 err = filemap_fault(vmf); 43 up_read(&F2FS_I(inode)->i_mmap_sem); 44 45 return err; 46 } 47 48 static int f2fs_vm_page_mkwrite(struct vm_fault *vmf) 49 { 50 struct page *page = vmf->page; 51 struct inode *inode = file_inode(vmf->vma->vm_file); 52 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 53 struct dnode_of_data dn; 54 int err; 55 56 if (unlikely(f2fs_cp_error(sbi))) { 57 err = -EIO; 58 goto err; 59 } 60 61 sb_start_pagefault(inode->i_sb); 62 63 f2fs_bug_on(sbi, f2fs_has_inline_data(inode)); 64 65 /* block allocation */ 66 f2fs_lock_op(sbi); 67 set_new_dnode(&dn, inode, NULL, NULL, 0); 68 err = f2fs_reserve_block(&dn, page->index); 69 if (err) { 70 f2fs_unlock_op(sbi); 71 goto out; 72 } 73 f2fs_put_dnode(&dn); 74 f2fs_unlock_op(sbi); 75 76 f2fs_balance_fs(sbi, dn.node_changed); 77 78 file_update_time(vmf->vma->vm_file); 79 down_read(&F2FS_I(inode)->i_mmap_sem); 80 lock_page(page); 81 if (unlikely(page->mapping != inode->i_mapping || 82 page_offset(page) > i_size_read(inode) || 83 !PageUptodate(page))) { 84 unlock_page(page); 85 err = -EFAULT; 86 goto out_sem; 87 } 88 89 /* 90 * check to see if the page is mapped already (no holes) 91 */ 92 if (PageMappedToDisk(page)) 93 goto mapped; 94 95 /* page is wholly or partially inside EOF */ 96 if (((loff_t)(page->index + 1) << PAGE_SHIFT) > 97 i_size_read(inode)) { 98 unsigned offset; 99 offset = i_size_read(inode) & ~PAGE_MASK; 100 zero_user_segment(page, offset, PAGE_SIZE); 101 } 102 set_page_dirty(page); 103 if (!PageUptodate(page)) 104 SetPageUptodate(page); 105 106 f2fs_update_iostat(sbi, APP_MAPPED_IO, F2FS_BLKSIZE); 107 108 trace_f2fs_vm_page_mkwrite(page, DATA); 109 mapped: 110 /* fill the page */ 111 f2fs_wait_on_page_writeback(page, DATA, false); 112 113 /* wait for GCed encrypted page writeback */ 114 if (f2fs_encrypted_file(inode)) 115 f2fs_wait_on_block_writeback(sbi, dn.data_blkaddr); 116 117 out_sem: 118 up_read(&F2FS_I(inode)->i_mmap_sem); 119 out: 120 sb_end_pagefault(inode->i_sb); 121 f2fs_update_time(sbi, REQ_TIME); 122 err: 123 return block_page_mkwrite_return(err); 124 } 125 126 static const struct vm_operations_struct f2fs_file_vm_ops = { 127 .fault = f2fs_filemap_fault, 128 .map_pages = filemap_map_pages, 129 .page_mkwrite = f2fs_vm_page_mkwrite, 130 }; 131 132 static int get_parent_ino(struct inode *inode, nid_t *pino) 133 { 134 struct dentry *dentry; 135 136 inode = igrab(inode); 137 dentry = d_find_any_alias(inode); 138 iput(inode); 139 if (!dentry) 140 return 0; 141 142 *pino = parent_ino(dentry); 143 dput(dentry); 144 return 1; 145 } 146 147 static inline enum cp_reason_type need_do_checkpoint(struct inode *inode) 148 { 149 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 150 enum cp_reason_type cp_reason = CP_NO_NEEDED; 151 152 if (!S_ISREG(inode->i_mode)) 153 cp_reason = CP_NON_REGULAR; 154 else if (inode->i_nlink != 1) 155 cp_reason = CP_HARDLINK; 156 else if (is_sbi_flag_set(sbi, SBI_NEED_CP)) 157 cp_reason = CP_SB_NEED_CP; 158 else if (file_wrong_pino(inode)) 159 cp_reason = CP_WRONG_PINO; 160 else if (!space_for_roll_forward(sbi)) 161 cp_reason = CP_NO_SPC_ROLL; 162 else if (!is_checkpointed_node(sbi, F2FS_I(inode)->i_pino)) 163 cp_reason = CP_NODE_NEED_CP; 164 else if (test_opt(sbi, FASTBOOT)) 165 cp_reason = CP_FASTBOOT_MODE; 166 else if (sbi->active_logs == 2) 167 cp_reason = CP_SPEC_LOG_NUM; 168 169 return cp_reason; 170 } 171 172 static bool need_inode_page_update(struct f2fs_sb_info *sbi, nid_t ino) 173 { 174 struct page *i = find_get_page(NODE_MAPPING(sbi), ino); 175 bool ret = false; 176 /* But we need to avoid that there are some inode updates */ 177 if ((i && PageDirty(i)) || need_inode_block_update(sbi, ino)) 178 ret = true; 179 f2fs_put_page(i, 0); 180 return ret; 181 } 182 183 static void try_to_fix_pino(struct inode *inode) 184 { 185 struct f2fs_inode_info *fi = F2FS_I(inode); 186 nid_t pino; 187 188 down_write(&fi->i_sem); 189 if (file_wrong_pino(inode) && inode->i_nlink == 1 && 190 get_parent_ino(inode, &pino)) { 191 f2fs_i_pino_write(inode, pino); 192 file_got_pino(inode); 193 } 194 up_write(&fi->i_sem); 195 } 196 197 static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end, 198 int datasync, bool atomic) 199 { 200 struct inode *inode = file->f_mapping->host; 201 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 202 nid_t ino = inode->i_ino; 203 int ret = 0; 204 enum cp_reason_type cp_reason = 0; 205 struct writeback_control wbc = { 206 .sync_mode = WB_SYNC_ALL, 207 .nr_to_write = LONG_MAX, 208 .for_reclaim = 0, 209 }; 210 211 if (unlikely(f2fs_readonly(inode->i_sb))) 212 return 0; 213 214 trace_f2fs_sync_file_enter(inode); 215 216 /* if fdatasync is triggered, let's do in-place-update */ 217 if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks) 218 set_inode_flag(inode, FI_NEED_IPU); 219 ret = file_write_and_wait_range(file, start, end); 220 clear_inode_flag(inode, FI_NEED_IPU); 221 222 if (ret) { 223 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret); 224 return ret; 225 } 226 227 /* if the inode is dirty, let's recover all the time */ 228 if (!f2fs_skip_inode_update(inode, datasync)) { 229 f2fs_write_inode(inode, NULL); 230 goto go_write; 231 } 232 233 /* 234 * if there is no written data, don't waste time to write recovery info. 235 */ 236 if (!is_inode_flag_set(inode, FI_APPEND_WRITE) && 237 !exist_written_data(sbi, ino, APPEND_INO)) { 238 239 /* it may call write_inode just prior to fsync */ 240 if (need_inode_page_update(sbi, ino)) 241 goto go_write; 242 243 if (is_inode_flag_set(inode, FI_UPDATE_WRITE) || 244 exist_written_data(sbi, ino, UPDATE_INO)) 245 goto flush_out; 246 goto out; 247 } 248 go_write: 249 /* 250 * Both of fdatasync() and fsync() are able to be recovered from 251 * sudden-power-off. 252 */ 253 down_read(&F2FS_I(inode)->i_sem); 254 cp_reason = need_do_checkpoint(inode); 255 up_read(&F2FS_I(inode)->i_sem); 256 257 if (cp_reason) { 258 /* all the dirty node pages should be flushed for POR */ 259 ret = f2fs_sync_fs(inode->i_sb, 1); 260 261 /* 262 * We've secured consistency through sync_fs. Following pino 263 * will be used only for fsynced inodes after checkpoint. 264 */ 265 try_to_fix_pino(inode); 266 clear_inode_flag(inode, FI_APPEND_WRITE); 267 clear_inode_flag(inode, FI_UPDATE_WRITE); 268 goto out; 269 } 270 sync_nodes: 271 ret = fsync_node_pages(sbi, inode, &wbc, atomic); 272 if (ret) 273 goto out; 274 275 /* if cp_error was enabled, we should avoid infinite loop */ 276 if (unlikely(f2fs_cp_error(sbi))) { 277 ret = -EIO; 278 goto out; 279 } 280 281 if (need_inode_block_update(sbi, ino)) { 282 f2fs_mark_inode_dirty_sync(inode, true); 283 f2fs_write_inode(inode, NULL); 284 goto sync_nodes; 285 } 286 287 /* 288 * If it's atomic_write, it's just fine to keep write ordering. So 289 * here we don't need to wait for node write completion, since we use 290 * node chain which serializes node blocks. If one of node writes are 291 * reordered, we can see simply broken chain, resulting in stopping 292 * roll-forward recovery. It means we'll recover all or none node blocks 293 * given fsync mark. 294 */ 295 if (!atomic) { 296 ret = wait_on_node_pages_writeback(sbi, ino); 297 if (ret) 298 goto out; 299 } 300 301 /* once recovery info is written, don't need to tack this */ 302 remove_ino_entry(sbi, ino, APPEND_INO); 303 clear_inode_flag(inode, FI_APPEND_WRITE); 304 flush_out: 305 if (!atomic) 306 ret = f2fs_issue_flush(sbi, inode->i_ino); 307 if (!ret) { 308 remove_ino_entry(sbi, ino, UPDATE_INO); 309 clear_inode_flag(inode, FI_UPDATE_WRITE); 310 remove_ino_entry(sbi, ino, FLUSH_INO); 311 } 312 f2fs_update_time(sbi, REQ_TIME); 313 out: 314 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret); 315 f2fs_trace_ios(NULL, 1); 316 return ret; 317 } 318 319 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) 320 { 321 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file))))) 322 return -EIO; 323 return f2fs_do_sync_file(file, start, end, datasync, false); 324 } 325 326 static pgoff_t __get_first_dirty_index(struct address_space *mapping, 327 pgoff_t pgofs, int whence) 328 { 329 struct page *page; 330 int nr_pages; 331 332 if (whence != SEEK_DATA) 333 return 0; 334 335 /* find first dirty page index */ 336 nr_pages = find_get_pages_tag(mapping, &pgofs, PAGECACHE_TAG_DIRTY, 337 1, &page); 338 if (!nr_pages) 339 return ULONG_MAX; 340 pgofs = page->index; 341 put_page(page); 342 return pgofs; 343 } 344 345 static bool __found_offset(block_t blkaddr, pgoff_t dirty, pgoff_t pgofs, 346 int whence) 347 { 348 switch (whence) { 349 case SEEK_DATA: 350 if ((blkaddr == NEW_ADDR && dirty == pgofs) || 351 (blkaddr != NEW_ADDR && blkaddr != NULL_ADDR)) 352 return true; 353 break; 354 case SEEK_HOLE: 355 if (blkaddr == NULL_ADDR) 356 return true; 357 break; 358 } 359 return false; 360 } 361 362 static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence) 363 { 364 struct inode *inode = file->f_mapping->host; 365 loff_t maxbytes = inode->i_sb->s_maxbytes; 366 struct dnode_of_data dn; 367 pgoff_t pgofs, end_offset, dirty; 368 loff_t data_ofs = offset; 369 loff_t isize; 370 int err = 0; 371 372 inode_lock(inode); 373 374 isize = i_size_read(inode); 375 if (offset >= isize) 376 goto fail; 377 378 /* handle inline data case */ 379 if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) { 380 if (whence == SEEK_HOLE) 381 data_ofs = isize; 382 goto found; 383 } 384 385 pgofs = (pgoff_t)(offset >> PAGE_SHIFT); 386 387 dirty = __get_first_dirty_index(inode->i_mapping, pgofs, whence); 388 389 for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_SHIFT) { 390 set_new_dnode(&dn, inode, NULL, NULL, 0); 391 err = get_dnode_of_data(&dn, pgofs, LOOKUP_NODE); 392 if (err && err != -ENOENT) { 393 goto fail; 394 } else if (err == -ENOENT) { 395 /* direct node does not exists */ 396 if (whence == SEEK_DATA) { 397 pgofs = get_next_page_offset(&dn, pgofs); 398 continue; 399 } else { 400 goto found; 401 } 402 } 403 404 end_offset = ADDRS_PER_PAGE(dn.node_page, inode); 405 406 /* find data/hole in dnode block */ 407 for (; dn.ofs_in_node < end_offset; 408 dn.ofs_in_node++, pgofs++, 409 data_ofs = (loff_t)pgofs << PAGE_SHIFT) { 410 block_t blkaddr; 411 blkaddr = datablock_addr(dn.inode, 412 dn.node_page, dn.ofs_in_node); 413 414 if (__found_offset(blkaddr, dirty, pgofs, whence)) { 415 f2fs_put_dnode(&dn); 416 goto found; 417 } 418 } 419 f2fs_put_dnode(&dn); 420 } 421 422 if (whence == SEEK_DATA) 423 goto fail; 424 found: 425 if (whence == SEEK_HOLE && data_ofs > isize) 426 data_ofs = isize; 427 inode_unlock(inode); 428 return vfs_setpos(file, data_ofs, maxbytes); 429 fail: 430 inode_unlock(inode); 431 return -ENXIO; 432 } 433 434 static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence) 435 { 436 struct inode *inode = file->f_mapping->host; 437 loff_t maxbytes = inode->i_sb->s_maxbytes; 438 439 switch (whence) { 440 case SEEK_SET: 441 case SEEK_CUR: 442 case SEEK_END: 443 return generic_file_llseek_size(file, offset, whence, 444 maxbytes, i_size_read(inode)); 445 case SEEK_DATA: 446 case SEEK_HOLE: 447 if (offset < 0) 448 return -ENXIO; 449 return f2fs_seek_block(file, offset, whence); 450 } 451 452 return -EINVAL; 453 } 454 455 static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma) 456 { 457 struct inode *inode = file_inode(file); 458 int err; 459 460 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) 461 return -EIO; 462 463 /* we don't need to use inline_data strictly */ 464 err = f2fs_convert_inline_inode(inode); 465 if (err) 466 return err; 467 468 file_accessed(file); 469 vma->vm_ops = &f2fs_file_vm_ops; 470 return 0; 471 } 472 473 static int f2fs_file_open(struct inode *inode, struct file *filp) 474 { 475 struct dentry *dir; 476 477 if (f2fs_encrypted_inode(inode)) { 478 int ret = fscrypt_get_encryption_info(inode); 479 if (ret) 480 return -EACCES; 481 if (!fscrypt_has_encryption_key(inode)) 482 return -ENOKEY; 483 } 484 dir = dget_parent(file_dentry(filp)); 485 if (f2fs_encrypted_inode(d_inode(dir)) && 486 !fscrypt_has_permitted_context(d_inode(dir), inode)) { 487 dput(dir); 488 return -EPERM; 489 } 490 dput(dir); 491 return dquot_file_open(inode, filp); 492 } 493 494 int truncate_data_blocks_range(struct dnode_of_data *dn, int count) 495 { 496 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 497 struct f2fs_node *raw_node; 498 int nr_free = 0, ofs = dn->ofs_in_node, len = count; 499 __le32 *addr; 500 int base = 0; 501 502 if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode)) 503 base = get_extra_isize(dn->inode); 504 505 raw_node = F2FS_NODE(dn->node_page); 506 addr = blkaddr_in_node(raw_node) + base + ofs; 507 508 for (; count > 0; count--, addr++, dn->ofs_in_node++) { 509 block_t blkaddr = le32_to_cpu(*addr); 510 if (blkaddr == NULL_ADDR) 511 continue; 512 513 dn->data_blkaddr = NULL_ADDR; 514 set_data_blkaddr(dn); 515 invalidate_blocks(sbi, blkaddr); 516 if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page)) 517 clear_inode_flag(dn->inode, FI_FIRST_BLOCK_WRITTEN); 518 nr_free++; 519 } 520 521 if (nr_free) { 522 pgoff_t fofs; 523 /* 524 * once we invalidate valid blkaddr in range [ofs, ofs + count], 525 * we will invalidate all blkaddr in the whole range. 526 */ 527 fofs = start_bidx_of_node(ofs_of_node(dn->node_page), 528 dn->inode) + ofs; 529 f2fs_update_extent_cache_range(dn, fofs, 0, len); 530 dec_valid_block_count(sbi, dn->inode, nr_free); 531 } 532 dn->ofs_in_node = ofs; 533 534 f2fs_update_time(sbi, REQ_TIME); 535 trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid, 536 dn->ofs_in_node, nr_free); 537 return nr_free; 538 } 539 540 void truncate_data_blocks(struct dnode_of_data *dn) 541 { 542 truncate_data_blocks_range(dn, ADDRS_PER_BLOCK); 543 } 544 545 static int truncate_partial_data_page(struct inode *inode, u64 from, 546 bool cache_only) 547 { 548 unsigned offset = from & (PAGE_SIZE - 1); 549 pgoff_t index = from >> PAGE_SHIFT; 550 struct address_space *mapping = inode->i_mapping; 551 struct page *page; 552 553 if (!offset && !cache_only) 554 return 0; 555 556 if (cache_only) { 557 page = find_lock_page(mapping, index); 558 if (page && PageUptodate(page)) 559 goto truncate_out; 560 f2fs_put_page(page, 1); 561 return 0; 562 } 563 564 page = get_lock_data_page(inode, index, true); 565 if (IS_ERR(page)) 566 return PTR_ERR(page) == -ENOENT ? 0 : PTR_ERR(page); 567 truncate_out: 568 f2fs_wait_on_page_writeback(page, DATA, true); 569 zero_user(page, offset, PAGE_SIZE - offset); 570 571 /* An encrypted inode should have a key and truncate the last page. */ 572 f2fs_bug_on(F2FS_I_SB(inode), cache_only && f2fs_encrypted_inode(inode)); 573 if (!cache_only) 574 set_page_dirty(page); 575 f2fs_put_page(page, 1); 576 return 0; 577 } 578 579 int truncate_blocks(struct inode *inode, u64 from, bool lock) 580 { 581 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 582 unsigned int blocksize = inode->i_sb->s_blocksize; 583 struct dnode_of_data dn; 584 pgoff_t free_from; 585 int count = 0, err = 0; 586 struct page *ipage; 587 bool truncate_page = false; 588 589 trace_f2fs_truncate_blocks_enter(inode, from); 590 591 free_from = (pgoff_t)F2FS_BYTES_TO_BLK(from + blocksize - 1); 592 593 if (free_from >= sbi->max_file_blocks) 594 goto free_partial; 595 596 if (lock) 597 f2fs_lock_op(sbi); 598 599 ipage = get_node_page(sbi, inode->i_ino); 600 if (IS_ERR(ipage)) { 601 err = PTR_ERR(ipage); 602 goto out; 603 } 604 605 if (f2fs_has_inline_data(inode)) { 606 truncate_inline_inode(inode, ipage, from); 607 f2fs_put_page(ipage, 1); 608 truncate_page = true; 609 goto out; 610 } 611 612 set_new_dnode(&dn, inode, ipage, NULL, 0); 613 err = get_dnode_of_data(&dn, free_from, LOOKUP_NODE_RA); 614 if (err) { 615 if (err == -ENOENT) 616 goto free_next; 617 goto out; 618 } 619 620 count = ADDRS_PER_PAGE(dn.node_page, inode); 621 622 count -= dn.ofs_in_node; 623 f2fs_bug_on(sbi, count < 0); 624 625 if (dn.ofs_in_node || IS_INODE(dn.node_page)) { 626 truncate_data_blocks_range(&dn, count); 627 free_from += count; 628 } 629 630 f2fs_put_dnode(&dn); 631 free_next: 632 err = truncate_inode_blocks(inode, free_from); 633 out: 634 if (lock) 635 f2fs_unlock_op(sbi); 636 free_partial: 637 /* lastly zero out the first data page */ 638 if (!err) 639 err = truncate_partial_data_page(inode, from, truncate_page); 640 641 trace_f2fs_truncate_blocks_exit(inode, err); 642 return err; 643 } 644 645 int f2fs_truncate(struct inode *inode) 646 { 647 int err; 648 649 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) 650 return -EIO; 651 652 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 653 S_ISLNK(inode->i_mode))) 654 return 0; 655 656 trace_f2fs_truncate(inode); 657 658 #ifdef CONFIG_F2FS_FAULT_INJECTION 659 if (time_to_inject(F2FS_I_SB(inode), FAULT_TRUNCATE)) { 660 f2fs_show_injection_info(FAULT_TRUNCATE); 661 return -EIO; 662 } 663 #endif 664 /* we should check inline_data size */ 665 if (!f2fs_may_inline_data(inode)) { 666 err = f2fs_convert_inline_inode(inode); 667 if (err) 668 return err; 669 } 670 671 err = truncate_blocks(inode, i_size_read(inode), true); 672 if (err) 673 return err; 674 675 inode->i_mtime = inode->i_ctime = current_time(inode); 676 f2fs_mark_inode_dirty_sync(inode, false); 677 return 0; 678 } 679 680 int f2fs_getattr(const struct path *path, struct kstat *stat, 681 u32 request_mask, unsigned int query_flags) 682 { 683 struct inode *inode = d_inode(path->dentry); 684 struct f2fs_inode_info *fi = F2FS_I(inode); 685 unsigned int flags; 686 687 flags = fi->i_flags & (FS_FL_USER_VISIBLE | FS_PROJINHERIT_FL); 688 if (flags & FS_APPEND_FL) 689 stat->attributes |= STATX_ATTR_APPEND; 690 if (flags & FS_COMPR_FL) 691 stat->attributes |= STATX_ATTR_COMPRESSED; 692 if (f2fs_encrypted_inode(inode)) 693 stat->attributes |= STATX_ATTR_ENCRYPTED; 694 if (flags & FS_IMMUTABLE_FL) 695 stat->attributes |= STATX_ATTR_IMMUTABLE; 696 if (flags & FS_NODUMP_FL) 697 stat->attributes |= STATX_ATTR_NODUMP; 698 699 stat->attributes_mask |= (STATX_ATTR_APPEND | 700 STATX_ATTR_COMPRESSED | 701 STATX_ATTR_ENCRYPTED | 702 STATX_ATTR_IMMUTABLE | 703 STATX_ATTR_NODUMP); 704 705 generic_fillattr(inode, stat); 706 707 /* we need to show initial sectors used for inline_data/dentries */ 708 if ((S_ISREG(inode->i_mode) && f2fs_has_inline_data(inode)) || 709 f2fs_has_inline_dentry(inode)) 710 stat->blocks += (stat->size + 511) >> 9; 711 712 return 0; 713 } 714 715 #ifdef CONFIG_F2FS_FS_POSIX_ACL 716 static void __setattr_copy(struct inode *inode, const struct iattr *attr) 717 { 718 unsigned int ia_valid = attr->ia_valid; 719 720 if (ia_valid & ATTR_UID) 721 inode->i_uid = attr->ia_uid; 722 if (ia_valid & ATTR_GID) 723 inode->i_gid = attr->ia_gid; 724 if (ia_valid & ATTR_ATIME) 725 inode->i_atime = timespec_trunc(attr->ia_atime, 726 inode->i_sb->s_time_gran); 727 if (ia_valid & ATTR_MTIME) 728 inode->i_mtime = timespec_trunc(attr->ia_mtime, 729 inode->i_sb->s_time_gran); 730 if (ia_valid & ATTR_CTIME) 731 inode->i_ctime = timespec_trunc(attr->ia_ctime, 732 inode->i_sb->s_time_gran); 733 if (ia_valid & ATTR_MODE) { 734 umode_t mode = attr->ia_mode; 735 736 if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID)) 737 mode &= ~S_ISGID; 738 set_acl_inode(inode, mode); 739 } 740 } 741 #else 742 #define __setattr_copy setattr_copy 743 #endif 744 745 int f2fs_setattr(struct dentry *dentry, struct iattr *attr) 746 { 747 struct inode *inode = d_inode(dentry); 748 int err; 749 bool size_changed = false; 750 751 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) 752 return -EIO; 753 754 err = setattr_prepare(dentry, attr); 755 if (err) 756 return err; 757 758 if (is_quota_modification(inode, attr)) { 759 err = dquot_initialize(inode); 760 if (err) 761 return err; 762 } 763 if ((attr->ia_valid & ATTR_UID && 764 !uid_eq(attr->ia_uid, inode->i_uid)) || 765 (attr->ia_valid & ATTR_GID && 766 !gid_eq(attr->ia_gid, inode->i_gid))) { 767 err = dquot_transfer(inode, attr); 768 if (err) 769 return err; 770 } 771 772 if (attr->ia_valid & ATTR_SIZE) { 773 if (f2fs_encrypted_inode(inode)) { 774 err = fscrypt_get_encryption_info(inode); 775 if (err) 776 return err; 777 if (!fscrypt_has_encryption_key(inode)) 778 return -ENOKEY; 779 } 780 781 if (attr->ia_size <= i_size_read(inode)) { 782 down_write(&F2FS_I(inode)->i_mmap_sem); 783 truncate_setsize(inode, attr->ia_size); 784 err = f2fs_truncate(inode); 785 up_write(&F2FS_I(inode)->i_mmap_sem); 786 if (err) 787 return err; 788 } else { 789 /* 790 * do not trim all blocks after i_size if target size is 791 * larger than i_size. 792 */ 793 down_write(&F2FS_I(inode)->i_mmap_sem); 794 truncate_setsize(inode, attr->ia_size); 795 up_write(&F2FS_I(inode)->i_mmap_sem); 796 797 /* should convert inline inode here */ 798 if (!f2fs_may_inline_data(inode)) { 799 err = f2fs_convert_inline_inode(inode); 800 if (err) 801 return err; 802 } 803 inode->i_mtime = inode->i_ctime = current_time(inode); 804 } 805 806 down_write(&F2FS_I(inode)->i_sem); 807 F2FS_I(inode)->last_disk_size = i_size_read(inode); 808 up_write(&F2FS_I(inode)->i_sem); 809 810 size_changed = true; 811 } 812 813 __setattr_copy(inode, attr); 814 815 if (attr->ia_valid & ATTR_MODE) { 816 err = posix_acl_chmod(inode, get_inode_mode(inode)); 817 if (err || is_inode_flag_set(inode, FI_ACL_MODE)) { 818 inode->i_mode = F2FS_I(inode)->i_acl_mode; 819 clear_inode_flag(inode, FI_ACL_MODE); 820 } 821 } 822 823 /* file size may changed here */ 824 f2fs_mark_inode_dirty_sync(inode, size_changed); 825 826 /* inode change will produce dirty node pages flushed by checkpoint */ 827 f2fs_balance_fs(F2FS_I_SB(inode), true); 828 829 return err; 830 } 831 832 const struct inode_operations f2fs_file_inode_operations = { 833 .getattr = f2fs_getattr, 834 .setattr = f2fs_setattr, 835 .get_acl = f2fs_get_acl, 836 .set_acl = f2fs_set_acl, 837 #ifdef CONFIG_F2FS_FS_XATTR 838 .listxattr = f2fs_listxattr, 839 #endif 840 .fiemap = f2fs_fiemap, 841 }; 842 843 static int fill_zero(struct inode *inode, pgoff_t index, 844 loff_t start, loff_t len) 845 { 846 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 847 struct page *page; 848 849 if (!len) 850 return 0; 851 852 f2fs_balance_fs(sbi, true); 853 854 f2fs_lock_op(sbi); 855 page = get_new_data_page(inode, NULL, index, false); 856 f2fs_unlock_op(sbi); 857 858 if (IS_ERR(page)) 859 return PTR_ERR(page); 860 861 f2fs_wait_on_page_writeback(page, DATA, true); 862 zero_user(page, start, len); 863 set_page_dirty(page); 864 f2fs_put_page(page, 1); 865 return 0; 866 } 867 868 int truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end) 869 { 870 int err; 871 872 while (pg_start < pg_end) { 873 struct dnode_of_data dn; 874 pgoff_t end_offset, count; 875 876 set_new_dnode(&dn, inode, NULL, NULL, 0); 877 err = get_dnode_of_data(&dn, pg_start, LOOKUP_NODE); 878 if (err) { 879 if (err == -ENOENT) { 880 pg_start = get_next_page_offset(&dn, pg_start); 881 continue; 882 } 883 return err; 884 } 885 886 end_offset = ADDRS_PER_PAGE(dn.node_page, inode); 887 count = min(end_offset - dn.ofs_in_node, pg_end - pg_start); 888 889 f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset); 890 891 truncate_data_blocks_range(&dn, count); 892 f2fs_put_dnode(&dn); 893 894 pg_start += count; 895 } 896 return 0; 897 } 898 899 static int punch_hole(struct inode *inode, loff_t offset, loff_t len) 900 { 901 pgoff_t pg_start, pg_end; 902 loff_t off_start, off_end; 903 int ret; 904 905 ret = f2fs_convert_inline_inode(inode); 906 if (ret) 907 return ret; 908 909 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT; 910 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT; 911 912 off_start = offset & (PAGE_SIZE - 1); 913 off_end = (offset + len) & (PAGE_SIZE - 1); 914 915 if (pg_start == pg_end) { 916 ret = fill_zero(inode, pg_start, off_start, 917 off_end - off_start); 918 if (ret) 919 return ret; 920 } else { 921 if (off_start) { 922 ret = fill_zero(inode, pg_start++, off_start, 923 PAGE_SIZE - off_start); 924 if (ret) 925 return ret; 926 } 927 if (off_end) { 928 ret = fill_zero(inode, pg_end, 0, off_end); 929 if (ret) 930 return ret; 931 } 932 933 if (pg_start < pg_end) { 934 struct address_space *mapping = inode->i_mapping; 935 loff_t blk_start, blk_end; 936 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 937 938 f2fs_balance_fs(sbi, true); 939 940 blk_start = (loff_t)pg_start << PAGE_SHIFT; 941 blk_end = (loff_t)pg_end << PAGE_SHIFT; 942 down_write(&F2FS_I(inode)->i_mmap_sem); 943 truncate_inode_pages_range(mapping, blk_start, 944 blk_end - 1); 945 946 f2fs_lock_op(sbi); 947 ret = truncate_hole(inode, pg_start, pg_end); 948 f2fs_unlock_op(sbi); 949 up_write(&F2FS_I(inode)->i_mmap_sem); 950 } 951 } 952 953 return ret; 954 } 955 956 static int __read_out_blkaddrs(struct inode *inode, block_t *blkaddr, 957 int *do_replace, pgoff_t off, pgoff_t len) 958 { 959 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 960 struct dnode_of_data dn; 961 int ret, done, i; 962 963 next_dnode: 964 set_new_dnode(&dn, inode, NULL, NULL, 0); 965 ret = get_dnode_of_data(&dn, off, LOOKUP_NODE_RA); 966 if (ret && ret != -ENOENT) { 967 return ret; 968 } else if (ret == -ENOENT) { 969 if (dn.max_level == 0) 970 return -ENOENT; 971 done = min((pgoff_t)ADDRS_PER_BLOCK - dn.ofs_in_node, len); 972 blkaddr += done; 973 do_replace += done; 974 goto next; 975 } 976 977 done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, inode) - 978 dn.ofs_in_node, len); 979 for (i = 0; i < done; i++, blkaddr++, do_replace++, dn.ofs_in_node++) { 980 *blkaddr = datablock_addr(dn.inode, 981 dn.node_page, dn.ofs_in_node); 982 if (!is_checkpointed_data(sbi, *blkaddr)) { 983 984 if (test_opt(sbi, LFS)) { 985 f2fs_put_dnode(&dn); 986 return -ENOTSUPP; 987 } 988 989 /* do not invalidate this block address */ 990 f2fs_update_data_blkaddr(&dn, NULL_ADDR); 991 *do_replace = 1; 992 } 993 } 994 f2fs_put_dnode(&dn); 995 next: 996 len -= done; 997 off += done; 998 if (len) 999 goto next_dnode; 1000 return 0; 1001 } 1002 1003 static int __roll_back_blkaddrs(struct inode *inode, block_t *blkaddr, 1004 int *do_replace, pgoff_t off, int len) 1005 { 1006 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1007 struct dnode_of_data dn; 1008 int ret, i; 1009 1010 for (i = 0; i < len; i++, do_replace++, blkaddr++) { 1011 if (*do_replace == 0) 1012 continue; 1013 1014 set_new_dnode(&dn, inode, NULL, NULL, 0); 1015 ret = get_dnode_of_data(&dn, off + i, LOOKUP_NODE_RA); 1016 if (ret) { 1017 dec_valid_block_count(sbi, inode, 1); 1018 invalidate_blocks(sbi, *blkaddr); 1019 } else { 1020 f2fs_update_data_blkaddr(&dn, *blkaddr); 1021 } 1022 f2fs_put_dnode(&dn); 1023 } 1024 return 0; 1025 } 1026 1027 static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode, 1028 block_t *blkaddr, int *do_replace, 1029 pgoff_t src, pgoff_t dst, pgoff_t len, bool full) 1030 { 1031 struct f2fs_sb_info *sbi = F2FS_I_SB(src_inode); 1032 pgoff_t i = 0; 1033 int ret; 1034 1035 while (i < len) { 1036 if (blkaddr[i] == NULL_ADDR && !full) { 1037 i++; 1038 continue; 1039 } 1040 1041 if (do_replace[i] || blkaddr[i] == NULL_ADDR) { 1042 struct dnode_of_data dn; 1043 struct node_info ni; 1044 size_t new_size; 1045 pgoff_t ilen; 1046 1047 set_new_dnode(&dn, dst_inode, NULL, NULL, 0); 1048 ret = get_dnode_of_data(&dn, dst + i, ALLOC_NODE); 1049 if (ret) 1050 return ret; 1051 1052 get_node_info(sbi, dn.nid, &ni); 1053 ilen = min((pgoff_t) 1054 ADDRS_PER_PAGE(dn.node_page, dst_inode) - 1055 dn.ofs_in_node, len - i); 1056 do { 1057 dn.data_blkaddr = datablock_addr(dn.inode, 1058 dn.node_page, dn.ofs_in_node); 1059 truncate_data_blocks_range(&dn, 1); 1060 1061 if (do_replace[i]) { 1062 f2fs_i_blocks_write(src_inode, 1063 1, false, false); 1064 f2fs_i_blocks_write(dst_inode, 1065 1, true, false); 1066 f2fs_replace_block(sbi, &dn, dn.data_blkaddr, 1067 blkaddr[i], ni.version, true, false); 1068 1069 do_replace[i] = 0; 1070 } 1071 dn.ofs_in_node++; 1072 i++; 1073 new_size = (dst + i) << PAGE_SHIFT; 1074 if (dst_inode->i_size < new_size) 1075 f2fs_i_size_write(dst_inode, new_size); 1076 } while (--ilen && (do_replace[i] || blkaddr[i] == NULL_ADDR)); 1077 1078 f2fs_put_dnode(&dn); 1079 } else { 1080 struct page *psrc, *pdst; 1081 1082 psrc = get_lock_data_page(src_inode, src + i, true); 1083 if (IS_ERR(psrc)) 1084 return PTR_ERR(psrc); 1085 pdst = get_new_data_page(dst_inode, NULL, dst + i, 1086 true); 1087 if (IS_ERR(pdst)) { 1088 f2fs_put_page(psrc, 1); 1089 return PTR_ERR(pdst); 1090 } 1091 f2fs_copy_page(psrc, pdst); 1092 set_page_dirty(pdst); 1093 f2fs_put_page(pdst, 1); 1094 f2fs_put_page(psrc, 1); 1095 1096 ret = truncate_hole(src_inode, src + i, src + i + 1); 1097 if (ret) 1098 return ret; 1099 i++; 1100 } 1101 } 1102 return 0; 1103 } 1104 1105 static int __exchange_data_block(struct inode *src_inode, 1106 struct inode *dst_inode, pgoff_t src, pgoff_t dst, 1107 pgoff_t len, bool full) 1108 { 1109 block_t *src_blkaddr; 1110 int *do_replace; 1111 pgoff_t olen; 1112 int ret; 1113 1114 while (len) { 1115 olen = min((pgoff_t)4 * ADDRS_PER_BLOCK, len); 1116 1117 src_blkaddr = kvzalloc(sizeof(block_t) * olen, GFP_KERNEL); 1118 if (!src_blkaddr) 1119 return -ENOMEM; 1120 1121 do_replace = kvzalloc(sizeof(int) * olen, GFP_KERNEL); 1122 if (!do_replace) { 1123 kvfree(src_blkaddr); 1124 return -ENOMEM; 1125 } 1126 1127 ret = __read_out_blkaddrs(src_inode, src_blkaddr, 1128 do_replace, src, olen); 1129 if (ret) 1130 goto roll_back; 1131 1132 ret = __clone_blkaddrs(src_inode, dst_inode, src_blkaddr, 1133 do_replace, src, dst, olen, full); 1134 if (ret) 1135 goto roll_back; 1136 1137 src += olen; 1138 dst += olen; 1139 len -= olen; 1140 1141 kvfree(src_blkaddr); 1142 kvfree(do_replace); 1143 } 1144 return 0; 1145 1146 roll_back: 1147 __roll_back_blkaddrs(src_inode, src_blkaddr, do_replace, src, len); 1148 kvfree(src_blkaddr); 1149 kvfree(do_replace); 1150 return ret; 1151 } 1152 1153 static int f2fs_do_collapse(struct inode *inode, pgoff_t start, pgoff_t end) 1154 { 1155 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1156 pgoff_t nrpages = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE; 1157 int ret; 1158 1159 f2fs_balance_fs(sbi, true); 1160 f2fs_lock_op(sbi); 1161 1162 f2fs_drop_extent_tree(inode); 1163 1164 ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true); 1165 f2fs_unlock_op(sbi); 1166 return ret; 1167 } 1168 1169 static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len) 1170 { 1171 pgoff_t pg_start, pg_end; 1172 loff_t new_size; 1173 int ret; 1174 1175 if (offset + len >= i_size_read(inode)) 1176 return -EINVAL; 1177 1178 /* collapse range should be aligned to block size of f2fs. */ 1179 if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1)) 1180 return -EINVAL; 1181 1182 ret = f2fs_convert_inline_inode(inode); 1183 if (ret) 1184 return ret; 1185 1186 pg_start = offset >> PAGE_SHIFT; 1187 pg_end = (offset + len) >> PAGE_SHIFT; 1188 1189 down_write(&F2FS_I(inode)->i_mmap_sem); 1190 /* write out all dirty pages from offset */ 1191 ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX); 1192 if (ret) 1193 goto out; 1194 1195 /* avoid gc operation during block exchange */ 1196 down_write(&F2FS_I(inode)->dio_rwsem[WRITE]); 1197 1198 truncate_pagecache(inode, offset); 1199 1200 ret = f2fs_do_collapse(inode, pg_start, pg_end); 1201 if (ret) 1202 goto out_unlock; 1203 1204 /* write out all moved pages, if possible */ 1205 filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX); 1206 truncate_pagecache(inode, offset); 1207 1208 new_size = i_size_read(inode) - len; 1209 truncate_pagecache(inode, new_size); 1210 1211 ret = truncate_blocks(inode, new_size, true); 1212 if (!ret) 1213 f2fs_i_size_write(inode, new_size); 1214 out_unlock: 1215 up_write(&F2FS_I(inode)->dio_rwsem[WRITE]); 1216 out: 1217 up_write(&F2FS_I(inode)->i_mmap_sem); 1218 return ret; 1219 } 1220 1221 static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start, 1222 pgoff_t end) 1223 { 1224 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 1225 pgoff_t index = start; 1226 unsigned int ofs_in_node = dn->ofs_in_node; 1227 blkcnt_t count = 0; 1228 int ret; 1229 1230 for (; index < end; index++, dn->ofs_in_node++) { 1231 if (datablock_addr(dn->inode, dn->node_page, 1232 dn->ofs_in_node) == NULL_ADDR) 1233 count++; 1234 } 1235 1236 dn->ofs_in_node = ofs_in_node; 1237 ret = reserve_new_blocks(dn, count); 1238 if (ret) 1239 return ret; 1240 1241 dn->ofs_in_node = ofs_in_node; 1242 for (index = start; index < end; index++, dn->ofs_in_node++) { 1243 dn->data_blkaddr = datablock_addr(dn->inode, 1244 dn->node_page, dn->ofs_in_node); 1245 /* 1246 * reserve_new_blocks will not guarantee entire block 1247 * allocation. 1248 */ 1249 if (dn->data_blkaddr == NULL_ADDR) { 1250 ret = -ENOSPC; 1251 break; 1252 } 1253 if (dn->data_blkaddr != NEW_ADDR) { 1254 invalidate_blocks(sbi, dn->data_blkaddr); 1255 dn->data_blkaddr = NEW_ADDR; 1256 set_data_blkaddr(dn); 1257 } 1258 } 1259 1260 f2fs_update_extent_cache_range(dn, start, 0, index - start); 1261 1262 return ret; 1263 } 1264 1265 static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len, 1266 int mode) 1267 { 1268 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1269 struct address_space *mapping = inode->i_mapping; 1270 pgoff_t index, pg_start, pg_end; 1271 loff_t new_size = i_size_read(inode); 1272 loff_t off_start, off_end; 1273 int ret = 0; 1274 1275 ret = inode_newsize_ok(inode, (len + offset)); 1276 if (ret) 1277 return ret; 1278 1279 ret = f2fs_convert_inline_inode(inode); 1280 if (ret) 1281 return ret; 1282 1283 down_write(&F2FS_I(inode)->i_mmap_sem); 1284 ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1); 1285 if (ret) 1286 goto out_sem; 1287 1288 truncate_pagecache_range(inode, offset, offset + len - 1); 1289 1290 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT; 1291 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT; 1292 1293 off_start = offset & (PAGE_SIZE - 1); 1294 off_end = (offset + len) & (PAGE_SIZE - 1); 1295 1296 if (pg_start == pg_end) { 1297 ret = fill_zero(inode, pg_start, off_start, 1298 off_end - off_start); 1299 if (ret) 1300 goto out_sem; 1301 1302 new_size = max_t(loff_t, new_size, offset + len); 1303 } else { 1304 if (off_start) { 1305 ret = fill_zero(inode, pg_start++, off_start, 1306 PAGE_SIZE - off_start); 1307 if (ret) 1308 goto out_sem; 1309 1310 new_size = max_t(loff_t, new_size, 1311 (loff_t)pg_start << PAGE_SHIFT); 1312 } 1313 1314 for (index = pg_start; index < pg_end;) { 1315 struct dnode_of_data dn; 1316 unsigned int end_offset; 1317 pgoff_t end; 1318 1319 f2fs_lock_op(sbi); 1320 1321 set_new_dnode(&dn, inode, NULL, NULL, 0); 1322 ret = get_dnode_of_data(&dn, index, ALLOC_NODE); 1323 if (ret) { 1324 f2fs_unlock_op(sbi); 1325 goto out; 1326 } 1327 1328 end_offset = ADDRS_PER_PAGE(dn.node_page, inode); 1329 end = min(pg_end, end_offset - dn.ofs_in_node + index); 1330 1331 ret = f2fs_do_zero_range(&dn, index, end); 1332 f2fs_put_dnode(&dn); 1333 f2fs_unlock_op(sbi); 1334 1335 f2fs_balance_fs(sbi, dn.node_changed); 1336 1337 if (ret) 1338 goto out; 1339 1340 index = end; 1341 new_size = max_t(loff_t, new_size, 1342 (loff_t)index << PAGE_SHIFT); 1343 } 1344 1345 if (off_end) { 1346 ret = fill_zero(inode, pg_end, 0, off_end); 1347 if (ret) 1348 goto out; 1349 1350 new_size = max_t(loff_t, new_size, offset + len); 1351 } 1352 } 1353 1354 out: 1355 if (!(mode & FALLOC_FL_KEEP_SIZE) && i_size_read(inode) < new_size) 1356 f2fs_i_size_write(inode, new_size); 1357 out_sem: 1358 up_write(&F2FS_I(inode)->i_mmap_sem); 1359 1360 return ret; 1361 } 1362 1363 static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len) 1364 { 1365 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1366 pgoff_t nr, pg_start, pg_end, delta, idx; 1367 loff_t new_size; 1368 int ret = 0; 1369 1370 new_size = i_size_read(inode) + len; 1371 ret = inode_newsize_ok(inode, new_size); 1372 if (ret) 1373 return ret; 1374 1375 if (offset >= i_size_read(inode)) 1376 return -EINVAL; 1377 1378 /* insert range should be aligned to block size of f2fs. */ 1379 if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1)) 1380 return -EINVAL; 1381 1382 ret = f2fs_convert_inline_inode(inode); 1383 if (ret) 1384 return ret; 1385 1386 f2fs_balance_fs(sbi, true); 1387 1388 down_write(&F2FS_I(inode)->i_mmap_sem); 1389 ret = truncate_blocks(inode, i_size_read(inode), true); 1390 if (ret) 1391 goto out; 1392 1393 /* write out all dirty pages from offset */ 1394 ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX); 1395 if (ret) 1396 goto out; 1397 1398 /* avoid gc operation during block exchange */ 1399 down_write(&F2FS_I(inode)->dio_rwsem[WRITE]); 1400 1401 truncate_pagecache(inode, offset); 1402 1403 pg_start = offset >> PAGE_SHIFT; 1404 pg_end = (offset + len) >> PAGE_SHIFT; 1405 delta = pg_end - pg_start; 1406 idx = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE; 1407 1408 while (!ret && idx > pg_start) { 1409 nr = idx - pg_start; 1410 if (nr > delta) 1411 nr = delta; 1412 idx -= nr; 1413 1414 f2fs_lock_op(sbi); 1415 f2fs_drop_extent_tree(inode); 1416 1417 ret = __exchange_data_block(inode, inode, idx, 1418 idx + delta, nr, false); 1419 f2fs_unlock_op(sbi); 1420 } 1421 1422 /* write out all moved pages, if possible */ 1423 filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX); 1424 truncate_pagecache(inode, offset); 1425 1426 if (!ret) 1427 f2fs_i_size_write(inode, new_size); 1428 1429 up_write(&F2FS_I(inode)->dio_rwsem[WRITE]); 1430 out: 1431 up_write(&F2FS_I(inode)->i_mmap_sem); 1432 return ret; 1433 } 1434 1435 static int expand_inode_data(struct inode *inode, loff_t offset, 1436 loff_t len, int mode) 1437 { 1438 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1439 struct f2fs_map_blocks map = { .m_next_pgofs = NULL }; 1440 pgoff_t pg_end; 1441 loff_t new_size = i_size_read(inode); 1442 loff_t off_end; 1443 int err; 1444 1445 err = inode_newsize_ok(inode, (len + offset)); 1446 if (err) 1447 return err; 1448 1449 err = f2fs_convert_inline_inode(inode); 1450 if (err) 1451 return err; 1452 1453 f2fs_balance_fs(sbi, true); 1454 1455 pg_end = ((unsigned long long)offset + len) >> PAGE_SHIFT; 1456 off_end = (offset + len) & (PAGE_SIZE - 1); 1457 1458 map.m_lblk = ((unsigned long long)offset) >> PAGE_SHIFT; 1459 map.m_len = pg_end - map.m_lblk; 1460 if (off_end) 1461 map.m_len++; 1462 1463 err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO); 1464 if (err) { 1465 pgoff_t last_off; 1466 1467 if (!map.m_len) 1468 return err; 1469 1470 last_off = map.m_lblk + map.m_len - 1; 1471 1472 /* update new size to the failed position */ 1473 new_size = (last_off == pg_end) ? offset + len: 1474 (loff_t)(last_off + 1) << PAGE_SHIFT; 1475 } else { 1476 new_size = ((loff_t)pg_end << PAGE_SHIFT) + off_end; 1477 } 1478 1479 if (new_size > i_size_read(inode)) { 1480 if (mode & FALLOC_FL_KEEP_SIZE) 1481 file_set_keep_isize(inode); 1482 else 1483 f2fs_i_size_write(inode, new_size); 1484 } 1485 1486 return err; 1487 } 1488 1489 static long f2fs_fallocate(struct file *file, int mode, 1490 loff_t offset, loff_t len) 1491 { 1492 struct inode *inode = file_inode(file); 1493 long ret = 0; 1494 1495 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) 1496 return -EIO; 1497 1498 /* f2fs only support ->fallocate for regular file */ 1499 if (!S_ISREG(inode->i_mode)) 1500 return -EINVAL; 1501 1502 if (f2fs_encrypted_inode(inode) && 1503 (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE))) 1504 return -EOPNOTSUPP; 1505 1506 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | 1507 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE | 1508 FALLOC_FL_INSERT_RANGE)) 1509 return -EOPNOTSUPP; 1510 1511 inode_lock(inode); 1512 1513 if (mode & FALLOC_FL_PUNCH_HOLE) { 1514 if (offset >= inode->i_size) 1515 goto out; 1516 1517 ret = punch_hole(inode, offset, len); 1518 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) { 1519 ret = f2fs_collapse_range(inode, offset, len); 1520 } else if (mode & FALLOC_FL_ZERO_RANGE) { 1521 ret = f2fs_zero_range(inode, offset, len, mode); 1522 } else if (mode & FALLOC_FL_INSERT_RANGE) { 1523 ret = f2fs_insert_range(inode, offset, len); 1524 } else { 1525 ret = expand_inode_data(inode, offset, len, mode); 1526 } 1527 1528 if (!ret) { 1529 inode->i_mtime = inode->i_ctime = current_time(inode); 1530 f2fs_mark_inode_dirty_sync(inode, false); 1531 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); 1532 } 1533 1534 out: 1535 inode_unlock(inode); 1536 1537 trace_f2fs_fallocate(inode, mode, offset, len, ret); 1538 return ret; 1539 } 1540 1541 static int f2fs_release_file(struct inode *inode, struct file *filp) 1542 { 1543 /* 1544 * f2fs_relase_file is called at every close calls. So we should 1545 * not drop any inmemory pages by close called by other process. 1546 */ 1547 if (!(filp->f_mode & FMODE_WRITE) || 1548 atomic_read(&inode->i_writecount) != 1) 1549 return 0; 1550 1551 /* some remained atomic pages should discarded */ 1552 if (f2fs_is_atomic_file(inode)) 1553 drop_inmem_pages(inode); 1554 if (f2fs_is_volatile_file(inode)) { 1555 clear_inode_flag(inode, FI_VOLATILE_FILE); 1556 stat_dec_volatile_write(inode); 1557 set_inode_flag(inode, FI_DROP_CACHE); 1558 filemap_fdatawrite(inode->i_mapping); 1559 clear_inode_flag(inode, FI_DROP_CACHE); 1560 } 1561 return 0; 1562 } 1563 1564 static int f2fs_file_flush(struct file *file, fl_owner_t id) 1565 { 1566 struct inode *inode = file_inode(file); 1567 1568 /* 1569 * If the process doing a transaction is crashed, we should do 1570 * roll-back. Otherwise, other reader/write can see corrupted database 1571 * until all the writers close its file. Since this should be done 1572 * before dropping file lock, it needs to do in ->flush. 1573 */ 1574 if (f2fs_is_atomic_file(inode) && 1575 F2FS_I(inode)->inmem_task == current) 1576 drop_inmem_pages(inode); 1577 return 0; 1578 } 1579 1580 static int f2fs_ioc_getflags(struct file *filp, unsigned long arg) 1581 { 1582 struct inode *inode = file_inode(filp); 1583 struct f2fs_inode_info *fi = F2FS_I(inode); 1584 unsigned int flags = fi->i_flags & 1585 (FS_FL_USER_VISIBLE | FS_PROJINHERIT_FL); 1586 return put_user(flags, (int __user *)arg); 1587 } 1588 1589 static int __f2fs_ioc_setflags(struct inode *inode, unsigned int flags) 1590 { 1591 struct f2fs_inode_info *fi = F2FS_I(inode); 1592 unsigned int oldflags; 1593 1594 /* Is it quota file? Do not allow user to mess with it */ 1595 if (IS_NOQUOTA(inode)) 1596 return -EPERM; 1597 1598 flags = f2fs_mask_flags(inode->i_mode, flags); 1599 1600 oldflags = fi->i_flags; 1601 1602 if ((flags ^ oldflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL)) 1603 if (!capable(CAP_LINUX_IMMUTABLE)) 1604 return -EPERM; 1605 1606 flags = flags & (FS_FL_USER_MODIFIABLE | FS_PROJINHERIT_FL); 1607 flags |= oldflags & ~(FS_FL_USER_MODIFIABLE | FS_PROJINHERIT_FL); 1608 fi->i_flags = flags; 1609 1610 if (fi->i_flags & FS_PROJINHERIT_FL) 1611 set_inode_flag(inode, FI_PROJ_INHERIT); 1612 else 1613 clear_inode_flag(inode, FI_PROJ_INHERIT); 1614 1615 inode->i_ctime = current_time(inode); 1616 f2fs_set_inode_flags(inode); 1617 f2fs_mark_inode_dirty_sync(inode, false); 1618 return 0; 1619 } 1620 1621 static int f2fs_ioc_setflags(struct file *filp, unsigned long arg) 1622 { 1623 struct inode *inode = file_inode(filp); 1624 unsigned int flags; 1625 int ret; 1626 1627 if (!inode_owner_or_capable(inode)) 1628 return -EACCES; 1629 1630 if (get_user(flags, (int __user *)arg)) 1631 return -EFAULT; 1632 1633 ret = mnt_want_write_file(filp); 1634 if (ret) 1635 return ret; 1636 1637 inode_lock(inode); 1638 1639 ret = __f2fs_ioc_setflags(inode, flags); 1640 1641 inode_unlock(inode); 1642 mnt_drop_write_file(filp); 1643 return ret; 1644 } 1645 1646 static int f2fs_ioc_getversion(struct file *filp, unsigned long arg) 1647 { 1648 struct inode *inode = file_inode(filp); 1649 1650 return put_user(inode->i_generation, (int __user *)arg); 1651 } 1652 1653 static int f2fs_ioc_start_atomic_write(struct file *filp) 1654 { 1655 struct inode *inode = file_inode(filp); 1656 int ret; 1657 1658 if (!inode_owner_or_capable(inode)) 1659 return -EACCES; 1660 1661 if (!S_ISREG(inode->i_mode)) 1662 return -EINVAL; 1663 1664 ret = mnt_want_write_file(filp); 1665 if (ret) 1666 return ret; 1667 1668 inode_lock(inode); 1669 1670 if (f2fs_is_atomic_file(inode)) 1671 goto out; 1672 1673 ret = f2fs_convert_inline_inode(inode); 1674 if (ret) 1675 goto out; 1676 1677 set_inode_flag(inode, FI_ATOMIC_FILE); 1678 set_inode_flag(inode, FI_HOT_DATA); 1679 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); 1680 1681 if (!get_dirty_pages(inode)) 1682 goto inc_stat; 1683 1684 f2fs_msg(F2FS_I_SB(inode)->sb, KERN_WARNING, 1685 "Unexpected flush for atomic writes: ino=%lu, npages=%u", 1686 inode->i_ino, get_dirty_pages(inode)); 1687 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX); 1688 if (ret) { 1689 clear_inode_flag(inode, FI_ATOMIC_FILE); 1690 clear_inode_flag(inode, FI_HOT_DATA); 1691 goto out; 1692 } 1693 1694 inc_stat: 1695 F2FS_I(inode)->inmem_task = current; 1696 stat_inc_atomic_write(inode); 1697 stat_update_max_atomic_write(inode); 1698 out: 1699 inode_unlock(inode); 1700 mnt_drop_write_file(filp); 1701 return ret; 1702 } 1703 1704 static int f2fs_ioc_commit_atomic_write(struct file *filp) 1705 { 1706 struct inode *inode = file_inode(filp); 1707 int ret; 1708 1709 if (!inode_owner_or_capable(inode)) 1710 return -EACCES; 1711 1712 ret = mnt_want_write_file(filp); 1713 if (ret) 1714 return ret; 1715 1716 inode_lock(inode); 1717 1718 if (f2fs_is_volatile_file(inode)) 1719 goto err_out; 1720 1721 if (f2fs_is_atomic_file(inode)) { 1722 ret = commit_inmem_pages(inode); 1723 if (ret) 1724 goto err_out; 1725 1726 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true); 1727 if (!ret) { 1728 clear_inode_flag(inode, FI_ATOMIC_FILE); 1729 clear_inode_flag(inode, FI_HOT_DATA); 1730 stat_dec_atomic_write(inode); 1731 } 1732 } else { 1733 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 1, false); 1734 } 1735 err_out: 1736 inode_unlock(inode); 1737 mnt_drop_write_file(filp); 1738 return ret; 1739 } 1740 1741 static int f2fs_ioc_start_volatile_write(struct file *filp) 1742 { 1743 struct inode *inode = file_inode(filp); 1744 int ret; 1745 1746 if (!inode_owner_or_capable(inode)) 1747 return -EACCES; 1748 1749 if (!S_ISREG(inode->i_mode)) 1750 return -EINVAL; 1751 1752 ret = mnt_want_write_file(filp); 1753 if (ret) 1754 return ret; 1755 1756 inode_lock(inode); 1757 1758 if (f2fs_is_volatile_file(inode)) 1759 goto out; 1760 1761 ret = f2fs_convert_inline_inode(inode); 1762 if (ret) 1763 goto out; 1764 1765 stat_inc_volatile_write(inode); 1766 stat_update_max_volatile_write(inode); 1767 1768 set_inode_flag(inode, FI_VOLATILE_FILE); 1769 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); 1770 out: 1771 inode_unlock(inode); 1772 mnt_drop_write_file(filp); 1773 return ret; 1774 } 1775 1776 static int f2fs_ioc_release_volatile_write(struct file *filp) 1777 { 1778 struct inode *inode = file_inode(filp); 1779 int ret; 1780 1781 if (!inode_owner_or_capable(inode)) 1782 return -EACCES; 1783 1784 ret = mnt_want_write_file(filp); 1785 if (ret) 1786 return ret; 1787 1788 inode_lock(inode); 1789 1790 if (!f2fs_is_volatile_file(inode)) 1791 goto out; 1792 1793 if (!f2fs_is_first_block_written(inode)) { 1794 ret = truncate_partial_data_page(inode, 0, true); 1795 goto out; 1796 } 1797 1798 ret = punch_hole(inode, 0, F2FS_BLKSIZE); 1799 out: 1800 inode_unlock(inode); 1801 mnt_drop_write_file(filp); 1802 return ret; 1803 } 1804 1805 static int f2fs_ioc_abort_volatile_write(struct file *filp) 1806 { 1807 struct inode *inode = file_inode(filp); 1808 int ret; 1809 1810 if (!inode_owner_or_capable(inode)) 1811 return -EACCES; 1812 1813 ret = mnt_want_write_file(filp); 1814 if (ret) 1815 return ret; 1816 1817 inode_lock(inode); 1818 1819 if (f2fs_is_atomic_file(inode)) 1820 drop_inmem_pages(inode); 1821 if (f2fs_is_volatile_file(inode)) { 1822 clear_inode_flag(inode, FI_VOLATILE_FILE); 1823 stat_dec_volatile_write(inode); 1824 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true); 1825 } 1826 1827 inode_unlock(inode); 1828 1829 mnt_drop_write_file(filp); 1830 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); 1831 return ret; 1832 } 1833 1834 static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg) 1835 { 1836 struct inode *inode = file_inode(filp); 1837 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1838 struct super_block *sb = sbi->sb; 1839 __u32 in; 1840 int ret; 1841 1842 if (!capable(CAP_SYS_ADMIN)) 1843 return -EPERM; 1844 1845 if (get_user(in, (__u32 __user *)arg)) 1846 return -EFAULT; 1847 1848 ret = mnt_want_write_file(filp); 1849 if (ret) 1850 return ret; 1851 1852 switch (in) { 1853 case F2FS_GOING_DOWN_FULLSYNC: 1854 sb = freeze_bdev(sb->s_bdev); 1855 if (sb && !IS_ERR(sb)) { 1856 f2fs_stop_checkpoint(sbi, false); 1857 thaw_bdev(sb->s_bdev, sb); 1858 } 1859 break; 1860 case F2FS_GOING_DOWN_METASYNC: 1861 /* do checkpoint only */ 1862 f2fs_sync_fs(sb, 1); 1863 f2fs_stop_checkpoint(sbi, false); 1864 break; 1865 case F2FS_GOING_DOWN_NOSYNC: 1866 f2fs_stop_checkpoint(sbi, false); 1867 break; 1868 case F2FS_GOING_DOWN_METAFLUSH: 1869 sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO); 1870 f2fs_stop_checkpoint(sbi, false); 1871 break; 1872 default: 1873 ret = -EINVAL; 1874 goto out; 1875 } 1876 f2fs_update_time(sbi, REQ_TIME); 1877 out: 1878 mnt_drop_write_file(filp); 1879 return ret; 1880 } 1881 1882 static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg) 1883 { 1884 struct inode *inode = file_inode(filp); 1885 struct super_block *sb = inode->i_sb; 1886 struct request_queue *q = bdev_get_queue(sb->s_bdev); 1887 struct fstrim_range range; 1888 int ret; 1889 1890 if (!capable(CAP_SYS_ADMIN)) 1891 return -EPERM; 1892 1893 if (!blk_queue_discard(q)) 1894 return -EOPNOTSUPP; 1895 1896 if (copy_from_user(&range, (struct fstrim_range __user *)arg, 1897 sizeof(range))) 1898 return -EFAULT; 1899 1900 ret = mnt_want_write_file(filp); 1901 if (ret) 1902 return ret; 1903 1904 range.minlen = max((unsigned int)range.minlen, 1905 q->limits.discard_granularity); 1906 ret = f2fs_trim_fs(F2FS_SB(sb), &range); 1907 mnt_drop_write_file(filp); 1908 if (ret < 0) 1909 return ret; 1910 1911 if (copy_to_user((struct fstrim_range __user *)arg, &range, 1912 sizeof(range))) 1913 return -EFAULT; 1914 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); 1915 return 0; 1916 } 1917 1918 static bool uuid_is_nonzero(__u8 u[16]) 1919 { 1920 int i; 1921 1922 for (i = 0; i < 16; i++) 1923 if (u[i]) 1924 return true; 1925 return false; 1926 } 1927 1928 static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg) 1929 { 1930 struct inode *inode = file_inode(filp); 1931 1932 if (!f2fs_sb_has_crypto(inode->i_sb)) 1933 return -EOPNOTSUPP; 1934 1935 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); 1936 1937 return fscrypt_ioctl_set_policy(filp, (const void __user *)arg); 1938 } 1939 1940 static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg) 1941 { 1942 if (!f2fs_sb_has_crypto(file_inode(filp)->i_sb)) 1943 return -EOPNOTSUPP; 1944 return fscrypt_ioctl_get_policy(filp, (void __user *)arg); 1945 } 1946 1947 static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg) 1948 { 1949 struct inode *inode = file_inode(filp); 1950 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1951 int err; 1952 1953 if (!f2fs_sb_has_crypto(inode->i_sb)) 1954 return -EOPNOTSUPP; 1955 1956 if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt)) 1957 goto got_it; 1958 1959 err = mnt_want_write_file(filp); 1960 if (err) 1961 return err; 1962 1963 /* update superblock with uuid */ 1964 generate_random_uuid(sbi->raw_super->encrypt_pw_salt); 1965 1966 err = f2fs_commit_super(sbi, false); 1967 if (err) { 1968 /* undo new data */ 1969 memset(sbi->raw_super->encrypt_pw_salt, 0, 16); 1970 mnt_drop_write_file(filp); 1971 return err; 1972 } 1973 mnt_drop_write_file(filp); 1974 got_it: 1975 if (copy_to_user((__u8 __user *)arg, sbi->raw_super->encrypt_pw_salt, 1976 16)) 1977 return -EFAULT; 1978 return 0; 1979 } 1980 1981 static int f2fs_ioc_gc(struct file *filp, unsigned long arg) 1982 { 1983 struct inode *inode = file_inode(filp); 1984 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1985 __u32 sync; 1986 int ret; 1987 1988 if (!capable(CAP_SYS_ADMIN)) 1989 return -EPERM; 1990 1991 if (get_user(sync, (__u32 __user *)arg)) 1992 return -EFAULT; 1993 1994 if (f2fs_readonly(sbi->sb)) 1995 return -EROFS; 1996 1997 ret = mnt_want_write_file(filp); 1998 if (ret) 1999 return ret; 2000 2001 if (!sync) { 2002 if (!mutex_trylock(&sbi->gc_mutex)) { 2003 ret = -EBUSY; 2004 goto out; 2005 } 2006 } else { 2007 mutex_lock(&sbi->gc_mutex); 2008 } 2009 2010 ret = f2fs_gc(sbi, sync, true, NULL_SEGNO); 2011 out: 2012 mnt_drop_write_file(filp); 2013 return ret; 2014 } 2015 2016 static int f2fs_ioc_gc_range(struct file *filp, unsigned long arg) 2017 { 2018 struct inode *inode = file_inode(filp); 2019 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2020 struct f2fs_gc_range range; 2021 u64 end; 2022 int ret; 2023 2024 if (!capable(CAP_SYS_ADMIN)) 2025 return -EPERM; 2026 2027 if (copy_from_user(&range, (struct f2fs_gc_range __user *)arg, 2028 sizeof(range))) 2029 return -EFAULT; 2030 2031 if (f2fs_readonly(sbi->sb)) 2032 return -EROFS; 2033 2034 ret = mnt_want_write_file(filp); 2035 if (ret) 2036 return ret; 2037 2038 end = range.start + range.len; 2039 if (range.start < MAIN_BLKADDR(sbi) || end >= MAX_BLKADDR(sbi)) 2040 return -EINVAL; 2041 do_more: 2042 if (!range.sync) { 2043 if (!mutex_trylock(&sbi->gc_mutex)) { 2044 ret = -EBUSY; 2045 goto out; 2046 } 2047 } else { 2048 mutex_lock(&sbi->gc_mutex); 2049 } 2050 2051 ret = f2fs_gc(sbi, range.sync, true, GET_SEGNO(sbi, range.start)); 2052 range.start += sbi->blocks_per_seg; 2053 if (range.start <= end) 2054 goto do_more; 2055 out: 2056 mnt_drop_write_file(filp); 2057 return ret; 2058 } 2059 2060 static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg) 2061 { 2062 struct inode *inode = file_inode(filp); 2063 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2064 int ret; 2065 2066 if (!capable(CAP_SYS_ADMIN)) 2067 return -EPERM; 2068 2069 if (f2fs_readonly(sbi->sb)) 2070 return -EROFS; 2071 2072 ret = mnt_want_write_file(filp); 2073 if (ret) 2074 return ret; 2075 2076 ret = f2fs_sync_fs(sbi->sb, 1); 2077 2078 mnt_drop_write_file(filp); 2079 return ret; 2080 } 2081 2082 static int f2fs_defragment_range(struct f2fs_sb_info *sbi, 2083 struct file *filp, 2084 struct f2fs_defragment *range) 2085 { 2086 struct inode *inode = file_inode(filp); 2087 struct f2fs_map_blocks map = { .m_next_pgofs = NULL }; 2088 struct extent_info ei = {0,0,0}; 2089 pgoff_t pg_start, pg_end; 2090 unsigned int blk_per_seg = sbi->blocks_per_seg; 2091 unsigned int total = 0, sec_num; 2092 block_t blk_end = 0; 2093 bool fragmented = false; 2094 int err; 2095 2096 /* if in-place-update policy is enabled, don't waste time here */ 2097 if (need_inplace_update_policy(inode, NULL)) 2098 return -EINVAL; 2099 2100 pg_start = range->start >> PAGE_SHIFT; 2101 pg_end = (range->start + range->len) >> PAGE_SHIFT; 2102 2103 f2fs_balance_fs(sbi, true); 2104 2105 inode_lock(inode); 2106 2107 /* writeback all dirty pages in the range */ 2108 err = filemap_write_and_wait_range(inode->i_mapping, range->start, 2109 range->start + range->len - 1); 2110 if (err) 2111 goto out; 2112 2113 /* 2114 * lookup mapping info in extent cache, skip defragmenting if physical 2115 * block addresses are continuous. 2116 */ 2117 if (f2fs_lookup_extent_cache(inode, pg_start, &ei)) { 2118 if (ei.fofs + ei.len >= pg_end) 2119 goto out; 2120 } 2121 2122 map.m_lblk = pg_start; 2123 2124 /* 2125 * lookup mapping info in dnode page cache, skip defragmenting if all 2126 * physical block addresses are continuous even if there are hole(s) 2127 * in logical blocks. 2128 */ 2129 while (map.m_lblk < pg_end) { 2130 map.m_len = pg_end - map.m_lblk; 2131 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT); 2132 if (err) 2133 goto out; 2134 2135 if (!(map.m_flags & F2FS_MAP_FLAGS)) { 2136 map.m_lblk++; 2137 continue; 2138 } 2139 2140 if (blk_end && blk_end != map.m_pblk) { 2141 fragmented = true; 2142 break; 2143 } 2144 blk_end = map.m_pblk + map.m_len; 2145 2146 map.m_lblk += map.m_len; 2147 } 2148 2149 if (!fragmented) 2150 goto out; 2151 2152 map.m_lblk = pg_start; 2153 map.m_len = pg_end - pg_start; 2154 2155 sec_num = (map.m_len + BLKS_PER_SEC(sbi) - 1) / BLKS_PER_SEC(sbi); 2156 2157 /* 2158 * make sure there are enough free section for LFS allocation, this can 2159 * avoid defragment running in SSR mode when free section are allocated 2160 * intensively 2161 */ 2162 if (has_not_enough_free_secs(sbi, 0, sec_num)) { 2163 err = -EAGAIN; 2164 goto out; 2165 } 2166 2167 while (map.m_lblk < pg_end) { 2168 pgoff_t idx; 2169 int cnt = 0; 2170 2171 do_map: 2172 map.m_len = pg_end - map.m_lblk; 2173 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT); 2174 if (err) 2175 goto clear_out; 2176 2177 if (!(map.m_flags & F2FS_MAP_FLAGS)) { 2178 map.m_lblk++; 2179 continue; 2180 } 2181 2182 set_inode_flag(inode, FI_DO_DEFRAG); 2183 2184 idx = map.m_lblk; 2185 while (idx < map.m_lblk + map.m_len && cnt < blk_per_seg) { 2186 struct page *page; 2187 2188 page = get_lock_data_page(inode, idx, true); 2189 if (IS_ERR(page)) { 2190 err = PTR_ERR(page); 2191 goto clear_out; 2192 } 2193 2194 set_page_dirty(page); 2195 f2fs_put_page(page, 1); 2196 2197 idx++; 2198 cnt++; 2199 total++; 2200 } 2201 2202 map.m_lblk = idx; 2203 2204 if (idx < pg_end && cnt < blk_per_seg) 2205 goto do_map; 2206 2207 clear_inode_flag(inode, FI_DO_DEFRAG); 2208 2209 err = filemap_fdatawrite(inode->i_mapping); 2210 if (err) 2211 goto out; 2212 } 2213 clear_out: 2214 clear_inode_flag(inode, FI_DO_DEFRAG); 2215 out: 2216 inode_unlock(inode); 2217 if (!err) 2218 range->len = (u64)total << PAGE_SHIFT; 2219 return err; 2220 } 2221 2222 static int f2fs_ioc_defragment(struct file *filp, unsigned long arg) 2223 { 2224 struct inode *inode = file_inode(filp); 2225 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2226 struct f2fs_defragment range; 2227 int err; 2228 2229 if (!capable(CAP_SYS_ADMIN)) 2230 return -EPERM; 2231 2232 if (!S_ISREG(inode->i_mode) || f2fs_is_atomic_file(inode)) 2233 return -EINVAL; 2234 2235 if (f2fs_readonly(sbi->sb)) 2236 return -EROFS; 2237 2238 if (copy_from_user(&range, (struct f2fs_defragment __user *)arg, 2239 sizeof(range))) 2240 return -EFAULT; 2241 2242 /* verify alignment of offset & size */ 2243 if (range.start & (F2FS_BLKSIZE - 1) || range.len & (F2FS_BLKSIZE - 1)) 2244 return -EINVAL; 2245 2246 if (unlikely((range.start + range.len) >> PAGE_SHIFT > 2247 sbi->max_file_blocks)) 2248 return -EINVAL; 2249 2250 err = mnt_want_write_file(filp); 2251 if (err) 2252 return err; 2253 2254 err = f2fs_defragment_range(sbi, filp, &range); 2255 mnt_drop_write_file(filp); 2256 2257 f2fs_update_time(sbi, REQ_TIME); 2258 if (err < 0) 2259 return err; 2260 2261 if (copy_to_user((struct f2fs_defragment __user *)arg, &range, 2262 sizeof(range))) 2263 return -EFAULT; 2264 2265 return 0; 2266 } 2267 2268 static int f2fs_move_file_range(struct file *file_in, loff_t pos_in, 2269 struct file *file_out, loff_t pos_out, size_t len) 2270 { 2271 struct inode *src = file_inode(file_in); 2272 struct inode *dst = file_inode(file_out); 2273 struct f2fs_sb_info *sbi = F2FS_I_SB(src); 2274 size_t olen = len, dst_max_i_size = 0; 2275 size_t dst_osize; 2276 int ret; 2277 2278 if (file_in->f_path.mnt != file_out->f_path.mnt || 2279 src->i_sb != dst->i_sb) 2280 return -EXDEV; 2281 2282 if (unlikely(f2fs_readonly(src->i_sb))) 2283 return -EROFS; 2284 2285 if (!S_ISREG(src->i_mode) || !S_ISREG(dst->i_mode)) 2286 return -EINVAL; 2287 2288 if (f2fs_encrypted_inode(src) || f2fs_encrypted_inode(dst)) 2289 return -EOPNOTSUPP; 2290 2291 if (src == dst) { 2292 if (pos_in == pos_out) 2293 return 0; 2294 if (pos_out > pos_in && pos_out < pos_in + len) 2295 return -EINVAL; 2296 } 2297 2298 inode_lock(src); 2299 down_write(&F2FS_I(src)->dio_rwsem[WRITE]); 2300 if (src != dst) { 2301 ret = -EBUSY; 2302 if (!inode_trylock(dst)) 2303 goto out; 2304 if (!down_write_trylock(&F2FS_I(dst)->dio_rwsem[WRITE])) { 2305 inode_unlock(dst); 2306 goto out; 2307 } 2308 } 2309 2310 ret = -EINVAL; 2311 if (pos_in + len > src->i_size || pos_in + len < pos_in) 2312 goto out_unlock; 2313 if (len == 0) 2314 olen = len = src->i_size - pos_in; 2315 if (pos_in + len == src->i_size) 2316 len = ALIGN(src->i_size, F2FS_BLKSIZE) - pos_in; 2317 if (len == 0) { 2318 ret = 0; 2319 goto out_unlock; 2320 } 2321 2322 dst_osize = dst->i_size; 2323 if (pos_out + olen > dst->i_size) 2324 dst_max_i_size = pos_out + olen; 2325 2326 /* verify the end result is block aligned */ 2327 if (!IS_ALIGNED(pos_in, F2FS_BLKSIZE) || 2328 !IS_ALIGNED(pos_in + len, F2FS_BLKSIZE) || 2329 !IS_ALIGNED(pos_out, F2FS_BLKSIZE)) 2330 goto out_unlock; 2331 2332 ret = f2fs_convert_inline_inode(src); 2333 if (ret) 2334 goto out_unlock; 2335 2336 ret = f2fs_convert_inline_inode(dst); 2337 if (ret) 2338 goto out_unlock; 2339 2340 /* write out all dirty pages from offset */ 2341 ret = filemap_write_and_wait_range(src->i_mapping, 2342 pos_in, pos_in + len); 2343 if (ret) 2344 goto out_unlock; 2345 2346 ret = filemap_write_and_wait_range(dst->i_mapping, 2347 pos_out, pos_out + len); 2348 if (ret) 2349 goto out_unlock; 2350 2351 f2fs_balance_fs(sbi, true); 2352 f2fs_lock_op(sbi); 2353 ret = __exchange_data_block(src, dst, pos_in >> F2FS_BLKSIZE_BITS, 2354 pos_out >> F2FS_BLKSIZE_BITS, 2355 len >> F2FS_BLKSIZE_BITS, false); 2356 2357 if (!ret) { 2358 if (dst_max_i_size) 2359 f2fs_i_size_write(dst, dst_max_i_size); 2360 else if (dst_osize != dst->i_size) 2361 f2fs_i_size_write(dst, dst_osize); 2362 } 2363 f2fs_unlock_op(sbi); 2364 out_unlock: 2365 if (src != dst) { 2366 up_write(&F2FS_I(dst)->dio_rwsem[WRITE]); 2367 inode_unlock(dst); 2368 } 2369 out: 2370 up_write(&F2FS_I(src)->dio_rwsem[WRITE]); 2371 inode_unlock(src); 2372 return ret; 2373 } 2374 2375 static int f2fs_ioc_move_range(struct file *filp, unsigned long arg) 2376 { 2377 struct f2fs_move_range range; 2378 struct fd dst; 2379 int err; 2380 2381 if (!(filp->f_mode & FMODE_READ) || 2382 !(filp->f_mode & FMODE_WRITE)) 2383 return -EBADF; 2384 2385 if (copy_from_user(&range, (struct f2fs_move_range __user *)arg, 2386 sizeof(range))) 2387 return -EFAULT; 2388 2389 dst = fdget(range.dst_fd); 2390 if (!dst.file) 2391 return -EBADF; 2392 2393 if (!(dst.file->f_mode & FMODE_WRITE)) { 2394 err = -EBADF; 2395 goto err_out; 2396 } 2397 2398 err = mnt_want_write_file(filp); 2399 if (err) 2400 goto err_out; 2401 2402 err = f2fs_move_file_range(filp, range.pos_in, dst.file, 2403 range.pos_out, range.len); 2404 2405 mnt_drop_write_file(filp); 2406 if (err) 2407 goto err_out; 2408 2409 if (copy_to_user((struct f2fs_move_range __user *)arg, 2410 &range, sizeof(range))) 2411 err = -EFAULT; 2412 err_out: 2413 fdput(dst); 2414 return err; 2415 } 2416 2417 static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg) 2418 { 2419 struct inode *inode = file_inode(filp); 2420 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2421 struct sit_info *sm = SIT_I(sbi); 2422 unsigned int start_segno = 0, end_segno = 0; 2423 unsigned int dev_start_segno = 0, dev_end_segno = 0; 2424 struct f2fs_flush_device range; 2425 int ret; 2426 2427 if (!capable(CAP_SYS_ADMIN)) 2428 return -EPERM; 2429 2430 if (f2fs_readonly(sbi->sb)) 2431 return -EROFS; 2432 2433 if (copy_from_user(&range, (struct f2fs_flush_device __user *)arg, 2434 sizeof(range))) 2435 return -EFAULT; 2436 2437 if (sbi->s_ndevs <= 1 || sbi->s_ndevs - 1 <= range.dev_num || 2438 sbi->segs_per_sec != 1) { 2439 f2fs_msg(sbi->sb, KERN_WARNING, 2440 "Can't flush %u in %d for segs_per_sec %u != 1\n", 2441 range.dev_num, sbi->s_ndevs, 2442 sbi->segs_per_sec); 2443 return -EINVAL; 2444 } 2445 2446 ret = mnt_want_write_file(filp); 2447 if (ret) 2448 return ret; 2449 2450 if (range.dev_num != 0) 2451 dev_start_segno = GET_SEGNO(sbi, FDEV(range.dev_num).start_blk); 2452 dev_end_segno = GET_SEGNO(sbi, FDEV(range.dev_num).end_blk); 2453 2454 start_segno = sm->last_victim[FLUSH_DEVICE]; 2455 if (start_segno < dev_start_segno || start_segno >= dev_end_segno) 2456 start_segno = dev_start_segno; 2457 end_segno = min(start_segno + range.segments, dev_end_segno); 2458 2459 while (start_segno < end_segno) { 2460 if (!mutex_trylock(&sbi->gc_mutex)) { 2461 ret = -EBUSY; 2462 goto out; 2463 } 2464 sm->last_victim[GC_CB] = end_segno + 1; 2465 sm->last_victim[GC_GREEDY] = end_segno + 1; 2466 sm->last_victim[ALLOC_NEXT] = end_segno + 1; 2467 ret = f2fs_gc(sbi, true, true, start_segno); 2468 if (ret == -EAGAIN) 2469 ret = 0; 2470 else if (ret < 0) 2471 break; 2472 start_segno++; 2473 } 2474 out: 2475 mnt_drop_write_file(filp); 2476 return ret; 2477 } 2478 2479 static int f2fs_ioc_get_features(struct file *filp, unsigned long arg) 2480 { 2481 struct inode *inode = file_inode(filp); 2482 u32 sb_feature = le32_to_cpu(F2FS_I_SB(inode)->raw_super->feature); 2483 2484 /* Must validate to set it with SQLite behavior in Android. */ 2485 sb_feature |= F2FS_FEATURE_ATOMIC_WRITE; 2486 2487 return put_user(sb_feature, (u32 __user *)arg); 2488 } 2489 2490 #ifdef CONFIG_QUOTA 2491 static int f2fs_ioc_setproject(struct file *filp, __u32 projid) 2492 { 2493 struct inode *inode = file_inode(filp); 2494 struct f2fs_inode_info *fi = F2FS_I(inode); 2495 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2496 struct super_block *sb = sbi->sb; 2497 struct dquot *transfer_to[MAXQUOTAS] = {}; 2498 struct page *ipage; 2499 kprojid_t kprojid; 2500 int err; 2501 2502 if (!f2fs_sb_has_project_quota(sb)) { 2503 if (projid != F2FS_DEF_PROJID) 2504 return -EOPNOTSUPP; 2505 else 2506 return 0; 2507 } 2508 2509 if (!f2fs_has_extra_attr(inode)) 2510 return -EOPNOTSUPP; 2511 2512 kprojid = make_kprojid(&init_user_ns, (projid_t)projid); 2513 2514 if (projid_eq(kprojid, F2FS_I(inode)->i_projid)) 2515 return 0; 2516 2517 err = mnt_want_write_file(filp); 2518 if (err) 2519 return err; 2520 2521 err = -EPERM; 2522 inode_lock(inode); 2523 2524 /* Is it quota file? Do not allow user to mess with it */ 2525 if (IS_NOQUOTA(inode)) 2526 goto out_unlock; 2527 2528 ipage = get_node_page(sbi, inode->i_ino); 2529 if (IS_ERR(ipage)) { 2530 err = PTR_ERR(ipage); 2531 goto out_unlock; 2532 } 2533 2534 if (!F2FS_FITS_IN_INODE(F2FS_INODE(ipage), fi->i_extra_isize, 2535 i_projid)) { 2536 err = -EOVERFLOW; 2537 f2fs_put_page(ipage, 1); 2538 goto out_unlock; 2539 } 2540 f2fs_put_page(ipage, 1); 2541 2542 dquot_initialize(inode); 2543 2544 transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid)); 2545 if (!IS_ERR(transfer_to[PRJQUOTA])) { 2546 err = __dquot_transfer(inode, transfer_to); 2547 dqput(transfer_to[PRJQUOTA]); 2548 if (err) 2549 goto out_dirty; 2550 } 2551 2552 F2FS_I(inode)->i_projid = kprojid; 2553 inode->i_ctime = current_time(inode); 2554 out_dirty: 2555 f2fs_mark_inode_dirty_sync(inode, true); 2556 out_unlock: 2557 inode_unlock(inode); 2558 mnt_drop_write_file(filp); 2559 return err; 2560 } 2561 #else 2562 static int f2fs_ioc_setproject(struct file *filp, __u32 projid) 2563 { 2564 if (projid != F2FS_DEF_PROJID) 2565 return -EOPNOTSUPP; 2566 return 0; 2567 } 2568 #endif 2569 2570 /* Transfer internal flags to xflags */ 2571 static inline __u32 f2fs_iflags_to_xflags(unsigned long iflags) 2572 { 2573 __u32 xflags = 0; 2574 2575 if (iflags & FS_SYNC_FL) 2576 xflags |= FS_XFLAG_SYNC; 2577 if (iflags & FS_IMMUTABLE_FL) 2578 xflags |= FS_XFLAG_IMMUTABLE; 2579 if (iflags & FS_APPEND_FL) 2580 xflags |= FS_XFLAG_APPEND; 2581 if (iflags & FS_NODUMP_FL) 2582 xflags |= FS_XFLAG_NODUMP; 2583 if (iflags & FS_NOATIME_FL) 2584 xflags |= FS_XFLAG_NOATIME; 2585 if (iflags & FS_PROJINHERIT_FL) 2586 xflags |= FS_XFLAG_PROJINHERIT; 2587 return xflags; 2588 } 2589 2590 #define F2FS_SUPPORTED_FS_XFLAGS (FS_XFLAG_SYNC | FS_XFLAG_IMMUTABLE | \ 2591 FS_XFLAG_APPEND | FS_XFLAG_NODUMP | \ 2592 FS_XFLAG_NOATIME | FS_XFLAG_PROJINHERIT) 2593 2594 /* Flags we can manipulate with through EXT4_IOC_FSSETXATTR */ 2595 #define F2FS_FL_XFLAG_VISIBLE (FS_SYNC_FL | \ 2596 FS_IMMUTABLE_FL | \ 2597 FS_APPEND_FL | \ 2598 FS_NODUMP_FL | \ 2599 FS_NOATIME_FL | \ 2600 FS_PROJINHERIT_FL) 2601 2602 /* Transfer xflags flags to internal */ 2603 static inline unsigned long f2fs_xflags_to_iflags(__u32 xflags) 2604 { 2605 unsigned long iflags = 0; 2606 2607 if (xflags & FS_XFLAG_SYNC) 2608 iflags |= FS_SYNC_FL; 2609 if (xflags & FS_XFLAG_IMMUTABLE) 2610 iflags |= FS_IMMUTABLE_FL; 2611 if (xflags & FS_XFLAG_APPEND) 2612 iflags |= FS_APPEND_FL; 2613 if (xflags & FS_XFLAG_NODUMP) 2614 iflags |= FS_NODUMP_FL; 2615 if (xflags & FS_XFLAG_NOATIME) 2616 iflags |= FS_NOATIME_FL; 2617 if (xflags & FS_XFLAG_PROJINHERIT) 2618 iflags |= FS_PROJINHERIT_FL; 2619 2620 return iflags; 2621 } 2622 2623 static int f2fs_ioc_fsgetxattr(struct file *filp, unsigned long arg) 2624 { 2625 struct inode *inode = file_inode(filp); 2626 struct f2fs_inode_info *fi = F2FS_I(inode); 2627 struct fsxattr fa; 2628 2629 memset(&fa, 0, sizeof(struct fsxattr)); 2630 fa.fsx_xflags = f2fs_iflags_to_xflags(fi->i_flags & 2631 (FS_FL_USER_VISIBLE | FS_PROJINHERIT_FL)); 2632 2633 if (f2fs_sb_has_project_quota(inode->i_sb)) 2634 fa.fsx_projid = (__u32)from_kprojid(&init_user_ns, 2635 fi->i_projid); 2636 2637 if (copy_to_user((struct fsxattr __user *)arg, &fa, sizeof(fa))) 2638 return -EFAULT; 2639 return 0; 2640 } 2641 2642 static int f2fs_ioc_fssetxattr(struct file *filp, unsigned long arg) 2643 { 2644 struct inode *inode = file_inode(filp); 2645 struct f2fs_inode_info *fi = F2FS_I(inode); 2646 struct fsxattr fa; 2647 unsigned int flags; 2648 int err; 2649 2650 if (copy_from_user(&fa, (struct fsxattr __user *)arg, sizeof(fa))) 2651 return -EFAULT; 2652 2653 /* Make sure caller has proper permission */ 2654 if (!inode_owner_or_capable(inode)) 2655 return -EACCES; 2656 2657 if (fa.fsx_xflags & ~F2FS_SUPPORTED_FS_XFLAGS) 2658 return -EOPNOTSUPP; 2659 2660 flags = f2fs_xflags_to_iflags(fa.fsx_xflags); 2661 if (f2fs_mask_flags(inode->i_mode, flags) != flags) 2662 return -EOPNOTSUPP; 2663 2664 err = mnt_want_write_file(filp); 2665 if (err) 2666 return err; 2667 2668 inode_lock(inode); 2669 flags = (fi->i_flags & ~F2FS_FL_XFLAG_VISIBLE) | 2670 (flags & F2FS_FL_XFLAG_VISIBLE); 2671 err = __f2fs_ioc_setflags(inode, flags); 2672 inode_unlock(inode); 2673 mnt_drop_write_file(filp); 2674 if (err) 2675 return err; 2676 2677 err = f2fs_ioc_setproject(filp, fa.fsx_projid); 2678 if (err) 2679 return err; 2680 2681 return 0; 2682 } 2683 2684 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 2685 { 2686 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp))))) 2687 return -EIO; 2688 2689 switch (cmd) { 2690 case F2FS_IOC_GETFLAGS: 2691 return f2fs_ioc_getflags(filp, arg); 2692 case F2FS_IOC_SETFLAGS: 2693 return f2fs_ioc_setflags(filp, arg); 2694 case F2FS_IOC_GETVERSION: 2695 return f2fs_ioc_getversion(filp, arg); 2696 case F2FS_IOC_START_ATOMIC_WRITE: 2697 return f2fs_ioc_start_atomic_write(filp); 2698 case F2FS_IOC_COMMIT_ATOMIC_WRITE: 2699 return f2fs_ioc_commit_atomic_write(filp); 2700 case F2FS_IOC_START_VOLATILE_WRITE: 2701 return f2fs_ioc_start_volatile_write(filp); 2702 case F2FS_IOC_RELEASE_VOLATILE_WRITE: 2703 return f2fs_ioc_release_volatile_write(filp); 2704 case F2FS_IOC_ABORT_VOLATILE_WRITE: 2705 return f2fs_ioc_abort_volatile_write(filp); 2706 case F2FS_IOC_SHUTDOWN: 2707 return f2fs_ioc_shutdown(filp, arg); 2708 case FITRIM: 2709 return f2fs_ioc_fitrim(filp, arg); 2710 case F2FS_IOC_SET_ENCRYPTION_POLICY: 2711 return f2fs_ioc_set_encryption_policy(filp, arg); 2712 case F2FS_IOC_GET_ENCRYPTION_POLICY: 2713 return f2fs_ioc_get_encryption_policy(filp, arg); 2714 case F2FS_IOC_GET_ENCRYPTION_PWSALT: 2715 return f2fs_ioc_get_encryption_pwsalt(filp, arg); 2716 case F2FS_IOC_GARBAGE_COLLECT: 2717 return f2fs_ioc_gc(filp, arg); 2718 case F2FS_IOC_GARBAGE_COLLECT_RANGE: 2719 return f2fs_ioc_gc_range(filp, arg); 2720 case F2FS_IOC_WRITE_CHECKPOINT: 2721 return f2fs_ioc_write_checkpoint(filp, arg); 2722 case F2FS_IOC_DEFRAGMENT: 2723 return f2fs_ioc_defragment(filp, arg); 2724 case F2FS_IOC_MOVE_RANGE: 2725 return f2fs_ioc_move_range(filp, arg); 2726 case F2FS_IOC_FLUSH_DEVICE: 2727 return f2fs_ioc_flush_device(filp, arg); 2728 case F2FS_IOC_GET_FEATURES: 2729 return f2fs_ioc_get_features(filp, arg); 2730 case F2FS_IOC_FSGETXATTR: 2731 return f2fs_ioc_fsgetxattr(filp, arg); 2732 case F2FS_IOC_FSSETXATTR: 2733 return f2fs_ioc_fssetxattr(filp, arg); 2734 default: 2735 return -ENOTTY; 2736 } 2737 } 2738 2739 static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 2740 { 2741 struct file *file = iocb->ki_filp; 2742 struct inode *inode = file_inode(file); 2743 struct blk_plug plug; 2744 ssize_t ret; 2745 2746 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) 2747 return -EIO; 2748 2749 inode_lock(inode); 2750 ret = generic_write_checks(iocb, from); 2751 if (ret > 0) { 2752 int err; 2753 2754 if (iov_iter_fault_in_readable(from, iov_iter_count(from))) 2755 set_inode_flag(inode, FI_NO_PREALLOC); 2756 2757 err = f2fs_preallocate_blocks(iocb, from); 2758 if (err) { 2759 clear_inode_flag(inode, FI_NO_PREALLOC); 2760 inode_unlock(inode); 2761 return err; 2762 } 2763 blk_start_plug(&plug); 2764 ret = __generic_file_write_iter(iocb, from); 2765 blk_finish_plug(&plug); 2766 clear_inode_flag(inode, FI_NO_PREALLOC); 2767 2768 if (ret > 0) 2769 f2fs_update_iostat(F2FS_I_SB(inode), APP_WRITE_IO, ret); 2770 } 2771 inode_unlock(inode); 2772 2773 if (ret > 0) 2774 ret = generic_write_sync(iocb, ret); 2775 return ret; 2776 } 2777 2778 #ifdef CONFIG_COMPAT 2779 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 2780 { 2781 switch (cmd) { 2782 case F2FS_IOC32_GETFLAGS: 2783 cmd = F2FS_IOC_GETFLAGS; 2784 break; 2785 case F2FS_IOC32_SETFLAGS: 2786 cmd = F2FS_IOC_SETFLAGS; 2787 break; 2788 case F2FS_IOC32_GETVERSION: 2789 cmd = F2FS_IOC_GETVERSION; 2790 break; 2791 case F2FS_IOC_START_ATOMIC_WRITE: 2792 case F2FS_IOC_COMMIT_ATOMIC_WRITE: 2793 case F2FS_IOC_START_VOLATILE_WRITE: 2794 case F2FS_IOC_RELEASE_VOLATILE_WRITE: 2795 case F2FS_IOC_ABORT_VOLATILE_WRITE: 2796 case F2FS_IOC_SHUTDOWN: 2797 case F2FS_IOC_SET_ENCRYPTION_POLICY: 2798 case F2FS_IOC_GET_ENCRYPTION_PWSALT: 2799 case F2FS_IOC_GET_ENCRYPTION_POLICY: 2800 case F2FS_IOC_GARBAGE_COLLECT: 2801 case F2FS_IOC_GARBAGE_COLLECT_RANGE: 2802 case F2FS_IOC_WRITE_CHECKPOINT: 2803 case F2FS_IOC_DEFRAGMENT: 2804 case F2FS_IOC_MOVE_RANGE: 2805 case F2FS_IOC_FLUSH_DEVICE: 2806 case F2FS_IOC_GET_FEATURES: 2807 case F2FS_IOC_FSGETXATTR: 2808 case F2FS_IOC_FSSETXATTR: 2809 break; 2810 default: 2811 return -ENOIOCTLCMD; 2812 } 2813 return f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg)); 2814 } 2815 #endif 2816 2817 const struct file_operations f2fs_file_operations = { 2818 .llseek = f2fs_llseek, 2819 .read_iter = generic_file_read_iter, 2820 .write_iter = f2fs_file_write_iter, 2821 .open = f2fs_file_open, 2822 .release = f2fs_release_file, 2823 .mmap = f2fs_file_mmap, 2824 .flush = f2fs_file_flush, 2825 .fsync = f2fs_sync_file, 2826 .fallocate = f2fs_fallocate, 2827 .unlocked_ioctl = f2fs_ioctl, 2828 #ifdef CONFIG_COMPAT 2829 .compat_ioctl = f2fs_compat_ioctl, 2830 #endif 2831 .splice_read = generic_file_splice_read, 2832 .splice_write = iter_file_splice_write, 2833 }; 2834