1 /* 2 * fs/f2fs/file.c 3 * 4 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 5 * http://www.samsung.com/ 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 #include <linux/fs.h> 12 #include <linux/f2fs_fs.h> 13 #include <linux/stat.h> 14 #include <linux/buffer_head.h> 15 #include <linux/writeback.h> 16 #include <linux/blkdev.h> 17 #include <linux/falloc.h> 18 #include <linux/types.h> 19 #include <linux/compat.h> 20 #include <linux/uaccess.h> 21 #include <linux/mount.h> 22 #include <linux/pagevec.h> 23 #include <linux/uio.h> 24 #include <linux/uuid.h> 25 #include <linux/file.h> 26 27 #include "f2fs.h" 28 #include "node.h" 29 #include "segment.h" 30 #include "xattr.h" 31 #include "acl.h" 32 #include "gc.h" 33 #include "trace.h" 34 #include <trace/events/f2fs.h> 35 36 static int f2fs_filemap_fault(struct vm_fault *vmf) 37 { 38 struct inode *inode = file_inode(vmf->vma->vm_file); 39 int err; 40 41 down_read(&F2FS_I(inode)->i_mmap_sem); 42 err = filemap_fault(vmf); 43 up_read(&F2FS_I(inode)->i_mmap_sem); 44 45 return err; 46 } 47 48 static int f2fs_vm_page_mkwrite(struct vm_fault *vmf) 49 { 50 struct page *page = vmf->page; 51 struct inode *inode = file_inode(vmf->vma->vm_file); 52 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 53 struct dnode_of_data dn; 54 int err; 55 56 sb_start_pagefault(inode->i_sb); 57 58 f2fs_bug_on(sbi, f2fs_has_inline_data(inode)); 59 60 /* block allocation */ 61 f2fs_lock_op(sbi); 62 set_new_dnode(&dn, inode, NULL, NULL, 0); 63 err = f2fs_reserve_block(&dn, page->index); 64 if (err) { 65 f2fs_unlock_op(sbi); 66 goto out; 67 } 68 f2fs_put_dnode(&dn); 69 f2fs_unlock_op(sbi); 70 71 f2fs_balance_fs(sbi, dn.node_changed); 72 73 file_update_time(vmf->vma->vm_file); 74 down_read(&F2FS_I(inode)->i_mmap_sem); 75 lock_page(page); 76 if (unlikely(page->mapping != inode->i_mapping || 77 page_offset(page) > i_size_read(inode) || 78 !PageUptodate(page))) { 79 unlock_page(page); 80 err = -EFAULT; 81 goto out_sem; 82 } 83 84 /* 85 * check to see if the page is mapped already (no holes) 86 */ 87 if (PageMappedToDisk(page)) 88 goto mapped; 89 90 /* page is wholly or partially inside EOF */ 91 if (((loff_t)(page->index + 1) << PAGE_SHIFT) > 92 i_size_read(inode)) { 93 unsigned offset; 94 offset = i_size_read(inode) & ~PAGE_MASK; 95 zero_user_segment(page, offset, PAGE_SIZE); 96 } 97 set_page_dirty(page); 98 if (!PageUptodate(page)) 99 SetPageUptodate(page); 100 101 f2fs_update_iostat(sbi, APP_MAPPED_IO, F2FS_BLKSIZE); 102 103 trace_f2fs_vm_page_mkwrite(page, DATA); 104 mapped: 105 /* fill the page */ 106 f2fs_wait_on_page_writeback(page, DATA, false); 107 108 /* wait for GCed encrypted page writeback */ 109 if (f2fs_encrypted_file(inode)) 110 f2fs_wait_on_block_writeback(sbi, dn.data_blkaddr); 111 112 out_sem: 113 up_read(&F2FS_I(inode)->i_mmap_sem); 114 out: 115 sb_end_pagefault(inode->i_sb); 116 f2fs_update_time(sbi, REQ_TIME); 117 return block_page_mkwrite_return(err); 118 } 119 120 static const struct vm_operations_struct f2fs_file_vm_ops = { 121 .fault = f2fs_filemap_fault, 122 .map_pages = filemap_map_pages, 123 .page_mkwrite = f2fs_vm_page_mkwrite, 124 }; 125 126 static int get_parent_ino(struct inode *inode, nid_t *pino) 127 { 128 struct dentry *dentry; 129 130 inode = igrab(inode); 131 dentry = d_find_any_alias(inode); 132 iput(inode); 133 if (!dentry) 134 return 0; 135 136 *pino = parent_ino(dentry); 137 dput(dentry); 138 return 1; 139 } 140 141 static inline bool need_do_checkpoint(struct inode *inode) 142 { 143 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 144 bool need_cp = false; 145 146 if (!S_ISREG(inode->i_mode) || inode->i_nlink != 1) 147 need_cp = true; 148 else if (is_sbi_flag_set(sbi, SBI_NEED_CP)) 149 need_cp = true; 150 else if (file_wrong_pino(inode)) 151 need_cp = true; 152 else if (!space_for_roll_forward(sbi)) 153 need_cp = true; 154 else if (!is_checkpointed_node(sbi, F2FS_I(inode)->i_pino)) 155 need_cp = true; 156 else if (test_opt(sbi, FASTBOOT)) 157 need_cp = true; 158 else if (sbi->active_logs == 2) 159 need_cp = true; 160 161 return need_cp; 162 } 163 164 static bool need_inode_page_update(struct f2fs_sb_info *sbi, nid_t ino) 165 { 166 struct page *i = find_get_page(NODE_MAPPING(sbi), ino); 167 bool ret = false; 168 /* But we need to avoid that there are some inode updates */ 169 if ((i && PageDirty(i)) || need_inode_block_update(sbi, ino)) 170 ret = true; 171 f2fs_put_page(i, 0); 172 return ret; 173 } 174 175 static void try_to_fix_pino(struct inode *inode) 176 { 177 struct f2fs_inode_info *fi = F2FS_I(inode); 178 nid_t pino; 179 180 down_write(&fi->i_sem); 181 if (file_wrong_pino(inode) && inode->i_nlink == 1 && 182 get_parent_ino(inode, &pino)) { 183 f2fs_i_pino_write(inode, pino); 184 file_got_pino(inode); 185 } 186 up_write(&fi->i_sem); 187 } 188 189 static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end, 190 int datasync, bool atomic) 191 { 192 struct inode *inode = file->f_mapping->host; 193 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 194 nid_t ino = inode->i_ino; 195 int ret = 0; 196 bool need_cp = false; 197 struct writeback_control wbc = { 198 .sync_mode = WB_SYNC_ALL, 199 .nr_to_write = LONG_MAX, 200 .for_reclaim = 0, 201 }; 202 203 if (unlikely(f2fs_readonly(inode->i_sb))) 204 return 0; 205 206 trace_f2fs_sync_file_enter(inode); 207 208 /* if fdatasync is triggered, let's do in-place-update */ 209 if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks) 210 set_inode_flag(inode, FI_NEED_IPU); 211 ret = file_write_and_wait_range(file, start, end); 212 clear_inode_flag(inode, FI_NEED_IPU); 213 214 if (ret) { 215 trace_f2fs_sync_file_exit(inode, need_cp, datasync, ret); 216 return ret; 217 } 218 219 /* if the inode is dirty, let's recover all the time */ 220 if (!f2fs_skip_inode_update(inode, datasync)) { 221 f2fs_write_inode(inode, NULL); 222 goto go_write; 223 } 224 225 /* 226 * if there is no written data, don't waste time to write recovery info. 227 */ 228 if (!is_inode_flag_set(inode, FI_APPEND_WRITE) && 229 !exist_written_data(sbi, ino, APPEND_INO)) { 230 231 /* it may call write_inode just prior to fsync */ 232 if (need_inode_page_update(sbi, ino)) 233 goto go_write; 234 235 if (is_inode_flag_set(inode, FI_UPDATE_WRITE) || 236 exist_written_data(sbi, ino, UPDATE_INO)) 237 goto flush_out; 238 goto out; 239 } 240 go_write: 241 /* 242 * Both of fdatasync() and fsync() are able to be recovered from 243 * sudden-power-off. 244 */ 245 down_read(&F2FS_I(inode)->i_sem); 246 need_cp = need_do_checkpoint(inode); 247 up_read(&F2FS_I(inode)->i_sem); 248 249 if (need_cp) { 250 /* all the dirty node pages should be flushed for POR */ 251 ret = f2fs_sync_fs(inode->i_sb, 1); 252 253 /* 254 * We've secured consistency through sync_fs. Following pino 255 * will be used only for fsynced inodes after checkpoint. 256 */ 257 try_to_fix_pino(inode); 258 clear_inode_flag(inode, FI_APPEND_WRITE); 259 clear_inode_flag(inode, FI_UPDATE_WRITE); 260 goto out; 261 } 262 sync_nodes: 263 ret = fsync_node_pages(sbi, inode, &wbc, atomic); 264 if (ret) 265 goto out; 266 267 /* if cp_error was enabled, we should avoid infinite loop */ 268 if (unlikely(f2fs_cp_error(sbi))) { 269 ret = -EIO; 270 goto out; 271 } 272 273 if (need_inode_block_update(sbi, ino)) { 274 f2fs_mark_inode_dirty_sync(inode, true); 275 f2fs_write_inode(inode, NULL); 276 goto sync_nodes; 277 } 278 279 /* 280 * If it's atomic_write, it's just fine to keep write ordering. So 281 * here we don't need to wait for node write completion, since we use 282 * node chain which serializes node blocks. If one of node writes are 283 * reordered, we can see simply broken chain, resulting in stopping 284 * roll-forward recovery. It means we'll recover all or none node blocks 285 * given fsync mark. 286 */ 287 if (!atomic) { 288 ret = wait_on_node_pages_writeback(sbi, ino); 289 if (ret) 290 goto out; 291 } 292 293 /* once recovery info is written, don't need to tack this */ 294 remove_ino_entry(sbi, ino, APPEND_INO); 295 clear_inode_flag(inode, FI_APPEND_WRITE); 296 flush_out: 297 remove_ino_entry(sbi, ino, UPDATE_INO); 298 clear_inode_flag(inode, FI_UPDATE_WRITE); 299 if (!atomic) 300 ret = f2fs_issue_flush(sbi); 301 f2fs_update_time(sbi, REQ_TIME); 302 out: 303 trace_f2fs_sync_file_exit(inode, need_cp, datasync, ret); 304 f2fs_trace_ios(NULL, 1); 305 return ret; 306 } 307 308 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) 309 { 310 return f2fs_do_sync_file(file, start, end, datasync, false); 311 } 312 313 static pgoff_t __get_first_dirty_index(struct address_space *mapping, 314 pgoff_t pgofs, int whence) 315 { 316 struct pagevec pvec; 317 int nr_pages; 318 319 if (whence != SEEK_DATA) 320 return 0; 321 322 /* find first dirty page index */ 323 pagevec_init(&pvec, 0); 324 nr_pages = pagevec_lookup_tag(&pvec, mapping, &pgofs, 325 PAGECACHE_TAG_DIRTY, 1); 326 pgofs = nr_pages ? pvec.pages[0]->index : ULONG_MAX; 327 pagevec_release(&pvec); 328 return pgofs; 329 } 330 331 static bool __found_offset(block_t blkaddr, pgoff_t dirty, pgoff_t pgofs, 332 int whence) 333 { 334 switch (whence) { 335 case SEEK_DATA: 336 if ((blkaddr == NEW_ADDR && dirty == pgofs) || 337 (blkaddr != NEW_ADDR && blkaddr != NULL_ADDR)) 338 return true; 339 break; 340 case SEEK_HOLE: 341 if (blkaddr == NULL_ADDR) 342 return true; 343 break; 344 } 345 return false; 346 } 347 348 static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence) 349 { 350 struct inode *inode = file->f_mapping->host; 351 loff_t maxbytes = inode->i_sb->s_maxbytes; 352 struct dnode_of_data dn; 353 pgoff_t pgofs, end_offset, dirty; 354 loff_t data_ofs = offset; 355 loff_t isize; 356 int err = 0; 357 358 inode_lock(inode); 359 360 isize = i_size_read(inode); 361 if (offset >= isize) 362 goto fail; 363 364 /* handle inline data case */ 365 if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) { 366 if (whence == SEEK_HOLE) 367 data_ofs = isize; 368 goto found; 369 } 370 371 pgofs = (pgoff_t)(offset >> PAGE_SHIFT); 372 373 dirty = __get_first_dirty_index(inode->i_mapping, pgofs, whence); 374 375 for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_SHIFT) { 376 set_new_dnode(&dn, inode, NULL, NULL, 0); 377 err = get_dnode_of_data(&dn, pgofs, LOOKUP_NODE); 378 if (err && err != -ENOENT) { 379 goto fail; 380 } else if (err == -ENOENT) { 381 /* direct node does not exists */ 382 if (whence == SEEK_DATA) { 383 pgofs = get_next_page_offset(&dn, pgofs); 384 continue; 385 } else { 386 goto found; 387 } 388 } 389 390 end_offset = ADDRS_PER_PAGE(dn.node_page, inode); 391 392 /* find data/hole in dnode block */ 393 for (; dn.ofs_in_node < end_offset; 394 dn.ofs_in_node++, pgofs++, 395 data_ofs = (loff_t)pgofs << PAGE_SHIFT) { 396 block_t blkaddr; 397 blkaddr = datablock_addr(dn.inode, 398 dn.node_page, dn.ofs_in_node); 399 400 if (__found_offset(blkaddr, dirty, pgofs, whence)) { 401 f2fs_put_dnode(&dn); 402 goto found; 403 } 404 } 405 f2fs_put_dnode(&dn); 406 } 407 408 if (whence == SEEK_DATA) 409 goto fail; 410 found: 411 if (whence == SEEK_HOLE && data_ofs > isize) 412 data_ofs = isize; 413 inode_unlock(inode); 414 return vfs_setpos(file, data_ofs, maxbytes); 415 fail: 416 inode_unlock(inode); 417 return -ENXIO; 418 } 419 420 static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence) 421 { 422 struct inode *inode = file->f_mapping->host; 423 loff_t maxbytes = inode->i_sb->s_maxbytes; 424 425 switch (whence) { 426 case SEEK_SET: 427 case SEEK_CUR: 428 case SEEK_END: 429 return generic_file_llseek_size(file, offset, whence, 430 maxbytes, i_size_read(inode)); 431 case SEEK_DATA: 432 case SEEK_HOLE: 433 if (offset < 0) 434 return -ENXIO; 435 return f2fs_seek_block(file, offset, whence); 436 } 437 438 return -EINVAL; 439 } 440 441 static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma) 442 { 443 struct inode *inode = file_inode(file); 444 int err; 445 446 /* we don't need to use inline_data strictly */ 447 err = f2fs_convert_inline_inode(inode); 448 if (err) 449 return err; 450 451 file_accessed(file); 452 vma->vm_ops = &f2fs_file_vm_ops; 453 return 0; 454 } 455 456 static int f2fs_file_open(struct inode *inode, struct file *filp) 457 { 458 struct dentry *dir; 459 460 if (f2fs_encrypted_inode(inode)) { 461 int ret = fscrypt_get_encryption_info(inode); 462 if (ret) 463 return -EACCES; 464 if (!fscrypt_has_encryption_key(inode)) 465 return -ENOKEY; 466 } 467 dir = dget_parent(file_dentry(filp)); 468 if (f2fs_encrypted_inode(d_inode(dir)) && 469 !fscrypt_has_permitted_context(d_inode(dir), inode)) { 470 dput(dir); 471 return -EPERM; 472 } 473 dput(dir); 474 return dquot_file_open(inode, filp); 475 } 476 477 int truncate_data_blocks_range(struct dnode_of_data *dn, int count) 478 { 479 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 480 struct f2fs_node *raw_node; 481 int nr_free = 0, ofs = dn->ofs_in_node, len = count; 482 __le32 *addr; 483 int base = 0; 484 485 if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode)) 486 base = get_extra_isize(dn->inode); 487 488 raw_node = F2FS_NODE(dn->node_page); 489 addr = blkaddr_in_node(raw_node) + base + ofs; 490 491 for (; count > 0; count--, addr++, dn->ofs_in_node++) { 492 block_t blkaddr = le32_to_cpu(*addr); 493 if (blkaddr == NULL_ADDR) 494 continue; 495 496 dn->data_blkaddr = NULL_ADDR; 497 set_data_blkaddr(dn); 498 invalidate_blocks(sbi, blkaddr); 499 if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page)) 500 clear_inode_flag(dn->inode, FI_FIRST_BLOCK_WRITTEN); 501 nr_free++; 502 } 503 504 if (nr_free) { 505 pgoff_t fofs; 506 /* 507 * once we invalidate valid blkaddr in range [ofs, ofs + count], 508 * we will invalidate all blkaddr in the whole range. 509 */ 510 fofs = start_bidx_of_node(ofs_of_node(dn->node_page), 511 dn->inode) + ofs; 512 f2fs_update_extent_cache_range(dn, fofs, 0, len); 513 dec_valid_block_count(sbi, dn->inode, nr_free); 514 } 515 dn->ofs_in_node = ofs; 516 517 f2fs_update_time(sbi, REQ_TIME); 518 trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid, 519 dn->ofs_in_node, nr_free); 520 return nr_free; 521 } 522 523 void truncate_data_blocks(struct dnode_of_data *dn) 524 { 525 truncate_data_blocks_range(dn, ADDRS_PER_BLOCK); 526 } 527 528 static int truncate_partial_data_page(struct inode *inode, u64 from, 529 bool cache_only) 530 { 531 unsigned offset = from & (PAGE_SIZE - 1); 532 pgoff_t index = from >> PAGE_SHIFT; 533 struct address_space *mapping = inode->i_mapping; 534 struct page *page; 535 536 if (!offset && !cache_only) 537 return 0; 538 539 if (cache_only) { 540 page = find_lock_page(mapping, index); 541 if (page && PageUptodate(page)) 542 goto truncate_out; 543 f2fs_put_page(page, 1); 544 return 0; 545 } 546 547 page = get_lock_data_page(inode, index, true); 548 if (IS_ERR(page)) 549 return PTR_ERR(page) == -ENOENT ? 0 : PTR_ERR(page); 550 truncate_out: 551 f2fs_wait_on_page_writeback(page, DATA, true); 552 zero_user(page, offset, PAGE_SIZE - offset); 553 554 /* An encrypted inode should have a key and truncate the last page. */ 555 f2fs_bug_on(F2FS_I_SB(inode), cache_only && f2fs_encrypted_inode(inode)); 556 if (!cache_only) 557 set_page_dirty(page); 558 f2fs_put_page(page, 1); 559 return 0; 560 } 561 562 int truncate_blocks(struct inode *inode, u64 from, bool lock) 563 { 564 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 565 unsigned int blocksize = inode->i_sb->s_blocksize; 566 struct dnode_of_data dn; 567 pgoff_t free_from; 568 int count = 0, err = 0; 569 struct page *ipage; 570 bool truncate_page = false; 571 572 trace_f2fs_truncate_blocks_enter(inode, from); 573 574 free_from = (pgoff_t)F2FS_BYTES_TO_BLK(from + blocksize - 1); 575 576 if (free_from >= sbi->max_file_blocks) 577 goto free_partial; 578 579 if (lock) 580 f2fs_lock_op(sbi); 581 582 ipage = get_node_page(sbi, inode->i_ino); 583 if (IS_ERR(ipage)) { 584 err = PTR_ERR(ipage); 585 goto out; 586 } 587 588 if (f2fs_has_inline_data(inode)) { 589 truncate_inline_inode(inode, ipage, from); 590 f2fs_put_page(ipage, 1); 591 truncate_page = true; 592 goto out; 593 } 594 595 set_new_dnode(&dn, inode, ipage, NULL, 0); 596 err = get_dnode_of_data(&dn, free_from, LOOKUP_NODE_RA); 597 if (err) { 598 if (err == -ENOENT) 599 goto free_next; 600 goto out; 601 } 602 603 count = ADDRS_PER_PAGE(dn.node_page, inode); 604 605 count -= dn.ofs_in_node; 606 f2fs_bug_on(sbi, count < 0); 607 608 if (dn.ofs_in_node || IS_INODE(dn.node_page)) { 609 truncate_data_blocks_range(&dn, count); 610 free_from += count; 611 } 612 613 f2fs_put_dnode(&dn); 614 free_next: 615 err = truncate_inode_blocks(inode, free_from); 616 out: 617 if (lock) 618 f2fs_unlock_op(sbi); 619 free_partial: 620 /* lastly zero out the first data page */ 621 if (!err) 622 err = truncate_partial_data_page(inode, from, truncate_page); 623 624 trace_f2fs_truncate_blocks_exit(inode, err); 625 return err; 626 } 627 628 int f2fs_truncate(struct inode *inode) 629 { 630 int err; 631 632 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 633 S_ISLNK(inode->i_mode))) 634 return 0; 635 636 trace_f2fs_truncate(inode); 637 638 #ifdef CONFIG_F2FS_FAULT_INJECTION 639 if (time_to_inject(F2FS_I_SB(inode), FAULT_TRUNCATE)) { 640 f2fs_show_injection_info(FAULT_TRUNCATE); 641 return -EIO; 642 } 643 #endif 644 /* we should check inline_data size */ 645 if (!f2fs_may_inline_data(inode)) { 646 err = f2fs_convert_inline_inode(inode); 647 if (err) 648 return err; 649 } 650 651 err = truncate_blocks(inode, i_size_read(inode), true); 652 if (err) 653 return err; 654 655 inode->i_mtime = inode->i_ctime = current_time(inode); 656 f2fs_mark_inode_dirty_sync(inode, false); 657 return 0; 658 } 659 660 int f2fs_getattr(const struct path *path, struct kstat *stat, 661 u32 request_mask, unsigned int query_flags) 662 { 663 struct inode *inode = d_inode(path->dentry); 664 struct f2fs_inode_info *fi = F2FS_I(inode); 665 unsigned int flags; 666 667 flags = fi->i_flags & (FS_FL_USER_VISIBLE | FS_PROJINHERIT_FL); 668 if (flags & FS_APPEND_FL) 669 stat->attributes |= STATX_ATTR_APPEND; 670 if (flags & FS_COMPR_FL) 671 stat->attributes |= STATX_ATTR_COMPRESSED; 672 if (f2fs_encrypted_inode(inode)) 673 stat->attributes |= STATX_ATTR_ENCRYPTED; 674 if (flags & FS_IMMUTABLE_FL) 675 stat->attributes |= STATX_ATTR_IMMUTABLE; 676 if (flags & FS_NODUMP_FL) 677 stat->attributes |= STATX_ATTR_NODUMP; 678 679 stat->attributes_mask |= (STATX_ATTR_APPEND | 680 STATX_ATTR_COMPRESSED | 681 STATX_ATTR_ENCRYPTED | 682 STATX_ATTR_IMMUTABLE | 683 STATX_ATTR_NODUMP); 684 685 generic_fillattr(inode, stat); 686 return 0; 687 } 688 689 #ifdef CONFIG_F2FS_FS_POSIX_ACL 690 static void __setattr_copy(struct inode *inode, const struct iattr *attr) 691 { 692 unsigned int ia_valid = attr->ia_valid; 693 694 if (ia_valid & ATTR_UID) 695 inode->i_uid = attr->ia_uid; 696 if (ia_valid & ATTR_GID) 697 inode->i_gid = attr->ia_gid; 698 if (ia_valid & ATTR_ATIME) 699 inode->i_atime = timespec_trunc(attr->ia_atime, 700 inode->i_sb->s_time_gran); 701 if (ia_valid & ATTR_MTIME) 702 inode->i_mtime = timespec_trunc(attr->ia_mtime, 703 inode->i_sb->s_time_gran); 704 if (ia_valid & ATTR_CTIME) 705 inode->i_ctime = timespec_trunc(attr->ia_ctime, 706 inode->i_sb->s_time_gran); 707 if (ia_valid & ATTR_MODE) { 708 umode_t mode = attr->ia_mode; 709 710 if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID)) 711 mode &= ~S_ISGID; 712 set_acl_inode(inode, mode); 713 } 714 } 715 #else 716 #define __setattr_copy setattr_copy 717 #endif 718 719 int f2fs_setattr(struct dentry *dentry, struct iattr *attr) 720 { 721 struct inode *inode = d_inode(dentry); 722 int err; 723 bool size_changed = false; 724 725 err = setattr_prepare(dentry, attr); 726 if (err) 727 return err; 728 729 if (is_quota_modification(inode, attr)) { 730 err = dquot_initialize(inode); 731 if (err) 732 return err; 733 } 734 if ((attr->ia_valid & ATTR_UID && 735 !uid_eq(attr->ia_uid, inode->i_uid)) || 736 (attr->ia_valid & ATTR_GID && 737 !gid_eq(attr->ia_gid, inode->i_gid))) { 738 err = dquot_transfer(inode, attr); 739 if (err) 740 return err; 741 } 742 743 if (attr->ia_valid & ATTR_SIZE) { 744 if (f2fs_encrypted_inode(inode)) { 745 err = fscrypt_get_encryption_info(inode); 746 if (err) 747 return err; 748 if (!fscrypt_has_encryption_key(inode)) 749 return -ENOKEY; 750 } 751 752 if (attr->ia_size <= i_size_read(inode)) { 753 down_write(&F2FS_I(inode)->i_mmap_sem); 754 truncate_setsize(inode, attr->ia_size); 755 err = f2fs_truncate(inode); 756 up_write(&F2FS_I(inode)->i_mmap_sem); 757 if (err) 758 return err; 759 } else { 760 /* 761 * do not trim all blocks after i_size if target size is 762 * larger than i_size. 763 */ 764 down_write(&F2FS_I(inode)->i_mmap_sem); 765 truncate_setsize(inode, attr->ia_size); 766 up_write(&F2FS_I(inode)->i_mmap_sem); 767 768 /* should convert inline inode here */ 769 if (!f2fs_may_inline_data(inode)) { 770 err = f2fs_convert_inline_inode(inode); 771 if (err) 772 return err; 773 } 774 inode->i_mtime = inode->i_ctime = current_time(inode); 775 } 776 777 size_changed = true; 778 } 779 780 __setattr_copy(inode, attr); 781 782 if (attr->ia_valid & ATTR_MODE) { 783 err = posix_acl_chmod(inode, get_inode_mode(inode)); 784 if (err || is_inode_flag_set(inode, FI_ACL_MODE)) { 785 inode->i_mode = F2FS_I(inode)->i_acl_mode; 786 clear_inode_flag(inode, FI_ACL_MODE); 787 } 788 } 789 790 /* file size may changed here */ 791 f2fs_mark_inode_dirty_sync(inode, size_changed); 792 793 /* inode change will produce dirty node pages flushed by checkpoint */ 794 f2fs_balance_fs(F2FS_I_SB(inode), true); 795 796 return err; 797 } 798 799 const struct inode_operations f2fs_file_inode_operations = { 800 .getattr = f2fs_getattr, 801 .setattr = f2fs_setattr, 802 .get_acl = f2fs_get_acl, 803 .set_acl = f2fs_set_acl, 804 #ifdef CONFIG_F2FS_FS_XATTR 805 .listxattr = f2fs_listxattr, 806 #endif 807 .fiemap = f2fs_fiemap, 808 }; 809 810 static int fill_zero(struct inode *inode, pgoff_t index, 811 loff_t start, loff_t len) 812 { 813 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 814 struct page *page; 815 816 if (!len) 817 return 0; 818 819 f2fs_balance_fs(sbi, true); 820 821 f2fs_lock_op(sbi); 822 page = get_new_data_page(inode, NULL, index, false); 823 f2fs_unlock_op(sbi); 824 825 if (IS_ERR(page)) 826 return PTR_ERR(page); 827 828 f2fs_wait_on_page_writeback(page, DATA, true); 829 zero_user(page, start, len); 830 set_page_dirty(page); 831 f2fs_put_page(page, 1); 832 return 0; 833 } 834 835 int truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end) 836 { 837 int err; 838 839 while (pg_start < pg_end) { 840 struct dnode_of_data dn; 841 pgoff_t end_offset, count; 842 843 set_new_dnode(&dn, inode, NULL, NULL, 0); 844 err = get_dnode_of_data(&dn, pg_start, LOOKUP_NODE); 845 if (err) { 846 if (err == -ENOENT) { 847 pg_start++; 848 continue; 849 } 850 return err; 851 } 852 853 end_offset = ADDRS_PER_PAGE(dn.node_page, inode); 854 count = min(end_offset - dn.ofs_in_node, pg_end - pg_start); 855 856 f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset); 857 858 truncate_data_blocks_range(&dn, count); 859 f2fs_put_dnode(&dn); 860 861 pg_start += count; 862 } 863 return 0; 864 } 865 866 static int punch_hole(struct inode *inode, loff_t offset, loff_t len) 867 { 868 pgoff_t pg_start, pg_end; 869 loff_t off_start, off_end; 870 int ret; 871 872 ret = f2fs_convert_inline_inode(inode); 873 if (ret) 874 return ret; 875 876 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT; 877 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT; 878 879 off_start = offset & (PAGE_SIZE - 1); 880 off_end = (offset + len) & (PAGE_SIZE - 1); 881 882 if (pg_start == pg_end) { 883 ret = fill_zero(inode, pg_start, off_start, 884 off_end - off_start); 885 if (ret) 886 return ret; 887 } else { 888 if (off_start) { 889 ret = fill_zero(inode, pg_start++, off_start, 890 PAGE_SIZE - off_start); 891 if (ret) 892 return ret; 893 } 894 if (off_end) { 895 ret = fill_zero(inode, pg_end, 0, off_end); 896 if (ret) 897 return ret; 898 } 899 900 if (pg_start < pg_end) { 901 struct address_space *mapping = inode->i_mapping; 902 loff_t blk_start, blk_end; 903 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 904 905 f2fs_balance_fs(sbi, true); 906 907 blk_start = (loff_t)pg_start << PAGE_SHIFT; 908 blk_end = (loff_t)pg_end << PAGE_SHIFT; 909 down_write(&F2FS_I(inode)->i_mmap_sem); 910 truncate_inode_pages_range(mapping, blk_start, 911 blk_end - 1); 912 913 f2fs_lock_op(sbi); 914 ret = truncate_hole(inode, pg_start, pg_end); 915 f2fs_unlock_op(sbi); 916 up_write(&F2FS_I(inode)->i_mmap_sem); 917 } 918 } 919 920 return ret; 921 } 922 923 static int __read_out_blkaddrs(struct inode *inode, block_t *blkaddr, 924 int *do_replace, pgoff_t off, pgoff_t len) 925 { 926 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 927 struct dnode_of_data dn; 928 int ret, done, i; 929 930 next_dnode: 931 set_new_dnode(&dn, inode, NULL, NULL, 0); 932 ret = get_dnode_of_data(&dn, off, LOOKUP_NODE_RA); 933 if (ret && ret != -ENOENT) { 934 return ret; 935 } else if (ret == -ENOENT) { 936 if (dn.max_level == 0) 937 return -ENOENT; 938 done = min((pgoff_t)ADDRS_PER_BLOCK - dn.ofs_in_node, len); 939 blkaddr += done; 940 do_replace += done; 941 goto next; 942 } 943 944 done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, inode) - 945 dn.ofs_in_node, len); 946 for (i = 0; i < done; i++, blkaddr++, do_replace++, dn.ofs_in_node++) { 947 *blkaddr = datablock_addr(dn.inode, 948 dn.node_page, dn.ofs_in_node); 949 if (!is_checkpointed_data(sbi, *blkaddr)) { 950 951 if (test_opt(sbi, LFS)) { 952 f2fs_put_dnode(&dn); 953 return -ENOTSUPP; 954 } 955 956 /* do not invalidate this block address */ 957 f2fs_update_data_blkaddr(&dn, NULL_ADDR); 958 *do_replace = 1; 959 } 960 } 961 f2fs_put_dnode(&dn); 962 next: 963 len -= done; 964 off += done; 965 if (len) 966 goto next_dnode; 967 return 0; 968 } 969 970 static int __roll_back_blkaddrs(struct inode *inode, block_t *blkaddr, 971 int *do_replace, pgoff_t off, int len) 972 { 973 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 974 struct dnode_of_data dn; 975 int ret, i; 976 977 for (i = 0; i < len; i++, do_replace++, blkaddr++) { 978 if (*do_replace == 0) 979 continue; 980 981 set_new_dnode(&dn, inode, NULL, NULL, 0); 982 ret = get_dnode_of_data(&dn, off + i, LOOKUP_NODE_RA); 983 if (ret) { 984 dec_valid_block_count(sbi, inode, 1); 985 invalidate_blocks(sbi, *blkaddr); 986 } else { 987 f2fs_update_data_blkaddr(&dn, *blkaddr); 988 } 989 f2fs_put_dnode(&dn); 990 } 991 return 0; 992 } 993 994 static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode, 995 block_t *blkaddr, int *do_replace, 996 pgoff_t src, pgoff_t dst, pgoff_t len, bool full) 997 { 998 struct f2fs_sb_info *sbi = F2FS_I_SB(src_inode); 999 pgoff_t i = 0; 1000 int ret; 1001 1002 while (i < len) { 1003 if (blkaddr[i] == NULL_ADDR && !full) { 1004 i++; 1005 continue; 1006 } 1007 1008 if (do_replace[i] || blkaddr[i] == NULL_ADDR) { 1009 struct dnode_of_data dn; 1010 struct node_info ni; 1011 size_t new_size; 1012 pgoff_t ilen; 1013 1014 set_new_dnode(&dn, dst_inode, NULL, NULL, 0); 1015 ret = get_dnode_of_data(&dn, dst + i, ALLOC_NODE); 1016 if (ret) 1017 return ret; 1018 1019 get_node_info(sbi, dn.nid, &ni); 1020 ilen = min((pgoff_t) 1021 ADDRS_PER_PAGE(dn.node_page, dst_inode) - 1022 dn.ofs_in_node, len - i); 1023 do { 1024 dn.data_blkaddr = datablock_addr(dn.inode, 1025 dn.node_page, dn.ofs_in_node); 1026 truncate_data_blocks_range(&dn, 1); 1027 1028 if (do_replace[i]) { 1029 f2fs_i_blocks_write(src_inode, 1030 1, false, false); 1031 f2fs_i_blocks_write(dst_inode, 1032 1, true, false); 1033 f2fs_replace_block(sbi, &dn, dn.data_blkaddr, 1034 blkaddr[i], ni.version, true, false); 1035 1036 do_replace[i] = 0; 1037 } 1038 dn.ofs_in_node++; 1039 i++; 1040 new_size = (dst + i) << PAGE_SHIFT; 1041 if (dst_inode->i_size < new_size) 1042 f2fs_i_size_write(dst_inode, new_size); 1043 } while (--ilen && (do_replace[i] || blkaddr[i] == NULL_ADDR)); 1044 1045 f2fs_put_dnode(&dn); 1046 } else { 1047 struct page *psrc, *pdst; 1048 1049 psrc = get_lock_data_page(src_inode, src + i, true); 1050 if (IS_ERR(psrc)) 1051 return PTR_ERR(psrc); 1052 pdst = get_new_data_page(dst_inode, NULL, dst + i, 1053 true); 1054 if (IS_ERR(pdst)) { 1055 f2fs_put_page(psrc, 1); 1056 return PTR_ERR(pdst); 1057 } 1058 f2fs_copy_page(psrc, pdst); 1059 set_page_dirty(pdst); 1060 f2fs_put_page(pdst, 1); 1061 f2fs_put_page(psrc, 1); 1062 1063 ret = truncate_hole(src_inode, src + i, src + i + 1); 1064 if (ret) 1065 return ret; 1066 i++; 1067 } 1068 } 1069 return 0; 1070 } 1071 1072 static int __exchange_data_block(struct inode *src_inode, 1073 struct inode *dst_inode, pgoff_t src, pgoff_t dst, 1074 pgoff_t len, bool full) 1075 { 1076 block_t *src_blkaddr; 1077 int *do_replace; 1078 pgoff_t olen; 1079 int ret; 1080 1081 while (len) { 1082 olen = min((pgoff_t)4 * ADDRS_PER_BLOCK, len); 1083 1084 src_blkaddr = kvzalloc(sizeof(block_t) * olen, GFP_KERNEL); 1085 if (!src_blkaddr) 1086 return -ENOMEM; 1087 1088 do_replace = kvzalloc(sizeof(int) * olen, GFP_KERNEL); 1089 if (!do_replace) { 1090 kvfree(src_blkaddr); 1091 return -ENOMEM; 1092 } 1093 1094 ret = __read_out_blkaddrs(src_inode, src_blkaddr, 1095 do_replace, src, olen); 1096 if (ret) 1097 goto roll_back; 1098 1099 ret = __clone_blkaddrs(src_inode, dst_inode, src_blkaddr, 1100 do_replace, src, dst, olen, full); 1101 if (ret) 1102 goto roll_back; 1103 1104 src += olen; 1105 dst += olen; 1106 len -= olen; 1107 1108 kvfree(src_blkaddr); 1109 kvfree(do_replace); 1110 } 1111 return 0; 1112 1113 roll_back: 1114 __roll_back_blkaddrs(src_inode, src_blkaddr, do_replace, src, len); 1115 kvfree(src_blkaddr); 1116 kvfree(do_replace); 1117 return ret; 1118 } 1119 1120 static int f2fs_do_collapse(struct inode *inode, pgoff_t start, pgoff_t end) 1121 { 1122 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1123 pgoff_t nrpages = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE; 1124 int ret; 1125 1126 f2fs_balance_fs(sbi, true); 1127 f2fs_lock_op(sbi); 1128 1129 f2fs_drop_extent_tree(inode); 1130 1131 ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true); 1132 f2fs_unlock_op(sbi); 1133 return ret; 1134 } 1135 1136 static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len) 1137 { 1138 pgoff_t pg_start, pg_end; 1139 loff_t new_size; 1140 int ret; 1141 1142 if (offset + len >= i_size_read(inode)) 1143 return -EINVAL; 1144 1145 /* collapse range should be aligned to block size of f2fs. */ 1146 if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1)) 1147 return -EINVAL; 1148 1149 ret = f2fs_convert_inline_inode(inode); 1150 if (ret) 1151 return ret; 1152 1153 pg_start = offset >> PAGE_SHIFT; 1154 pg_end = (offset + len) >> PAGE_SHIFT; 1155 1156 down_write(&F2FS_I(inode)->i_mmap_sem); 1157 /* write out all dirty pages from offset */ 1158 ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX); 1159 if (ret) 1160 goto out; 1161 1162 truncate_pagecache(inode, offset); 1163 1164 ret = f2fs_do_collapse(inode, pg_start, pg_end); 1165 if (ret) 1166 goto out; 1167 1168 /* write out all moved pages, if possible */ 1169 filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX); 1170 truncate_pagecache(inode, offset); 1171 1172 new_size = i_size_read(inode) - len; 1173 truncate_pagecache(inode, new_size); 1174 1175 ret = truncate_blocks(inode, new_size, true); 1176 if (!ret) 1177 f2fs_i_size_write(inode, new_size); 1178 1179 out: 1180 up_write(&F2FS_I(inode)->i_mmap_sem); 1181 return ret; 1182 } 1183 1184 static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start, 1185 pgoff_t end) 1186 { 1187 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 1188 pgoff_t index = start; 1189 unsigned int ofs_in_node = dn->ofs_in_node; 1190 blkcnt_t count = 0; 1191 int ret; 1192 1193 for (; index < end; index++, dn->ofs_in_node++) { 1194 if (datablock_addr(dn->inode, dn->node_page, 1195 dn->ofs_in_node) == NULL_ADDR) 1196 count++; 1197 } 1198 1199 dn->ofs_in_node = ofs_in_node; 1200 ret = reserve_new_blocks(dn, count); 1201 if (ret) 1202 return ret; 1203 1204 dn->ofs_in_node = ofs_in_node; 1205 for (index = start; index < end; index++, dn->ofs_in_node++) { 1206 dn->data_blkaddr = datablock_addr(dn->inode, 1207 dn->node_page, dn->ofs_in_node); 1208 /* 1209 * reserve_new_blocks will not guarantee entire block 1210 * allocation. 1211 */ 1212 if (dn->data_blkaddr == NULL_ADDR) { 1213 ret = -ENOSPC; 1214 break; 1215 } 1216 if (dn->data_blkaddr != NEW_ADDR) { 1217 invalidate_blocks(sbi, dn->data_blkaddr); 1218 dn->data_blkaddr = NEW_ADDR; 1219 set_data_blkaddr(dn); 1220 } 1221 } 1222 1223 f2fs_update_extent_cache_range(dn, start, 0, index - start); 1224 1225 return ret; 1226 } 1227 1228 static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len, 1229 int mode) 1230 { 1231 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1232 struct address_space *mapping = inode->i_mapping; 1233 pgoff_t index, pg_start, pg_end; 1234 loff_t new_size = i_size_read(inode); 1235 loff_t off_start, off_end; 1236 int ret = 0; 1237 1238 ret = inode_newsize_ok(inode, (len + offset)); 1239 if (ret) 1240 return ret; 1241 1242 ret = f2fs_convert_inline_inode(inode); 1243 if (ret) 1244 return ret; 1245 1246 down_write(&F2FS_I(inode)->i_mmap_sem); 1247 ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1); 1248 if (ret) 1249 goto out_sem; 1250 1251 truncate_pagecache_range(inode, offset, offset + len - 1); 1252 1253 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT; 1254 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT; 1255 1256 off_start = offset & (PAGE_SIZE - 1); 1257 off_end = (offset + len) & (PAGE_SIZE - 1); 1258 1259 if (pg_start == pg_end) { 1260 ret = fill_zero(inode, pg_start, off_start, 1261 off_end - off_start); 1262 if (ret) 1263 goto out_sem; 1264 1265 new_size = max_t(loff_t, new_size, offset + len); 1266 } else { 1267 if (off_start) { 1268 ret = fill_zero(inode, pg_start++, off_start, 1269 PAGE_SIZE - off_start); 1270 if (ret) 1271 goto out_sem; 1272 1273 new_size = max_t(loff_t, new_size, 1274 (loff_t)pg_start << PAGE_SHIFT); 1275 } 1276 1277 for (index = pg_start; index < pg_end;) { 1278 struct dnode_of_data dn; 1279 unsigned int end_offset; 1280 pgoff_t end; 1281 1282 f2fs_lock_op(sbi); 1283 1284 set_new_dnode(&dn, inode, NULL, NULL, 0); 1285 ret = get_dnode_of_data(&dn, index, ALLOC_NODE); 1286 if (ret) { 1287 f2fs_unlock_op(sbi); 1288 goto out; 1289 } 1290 1291 end_offset = ADDRS_PER_PAGE(dn.node_page, inode); 1292 end = min(pg_end, end_offset - dn.ofs_in_node + index); 1293 1294 ret = f2fs_do_zero_range(&dn, index, end); 1295 f2fs_put_dnode(&dn); 1296 f2fs_unlock_op(sbi); 1297 1298 f2fs_balance_fs(sbi, dn.node_changed); 1299 1300 if (ret) 1301 goto out; 1302 1303 index = end; 1304 new_size = max_t(loff_t, new_size, 1305 (loff_t)index << PAGE_SHIFT); 1306 } 1307 1308 if (off_end) { 1309 ret = fill_zero(inode, pg_end, 0, off_end); 1310 if (ret) 1311 goto out; 1312 1313 new_size = max_t(loff_t, new_size, offset + len); 1314 } 1315 } 1316 1317 out: 1318 if (!(mode & FALLOC_FL_KEEP_SIZE) && i_size_read(inode) < new_size) 1319 f2fs_i_size_write(inode, new_size); 1320 out_sem: 1321 up_write(&F2FS_I(inode)->i_mmap_sem); 1322 1323 return ret; 1324 } 1325 1326 static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len) 1327 { 1328 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1329 pgoff_t nr, pg_start, pg_end, delta, idx; 1330 loff_t new_size; 1331 int ret = 0; 1332 1333 new_size = i_size_read(inode) + len; 1334 ret = inode_newsize_ok(inode, new_size); 1335 if (ret) 1336 return ret; 1337 1338 if (offset >= i_size_read(inode)) 1339 return -EINVAL; 1340 1341 /* insert range should be aligned to block size of f2fs. */ 1342 if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1)) 1343 return -EINVAL; 1344 1345 ret = f2fs_convert_inline_inode(inode); 1346 if (ret) 1347 return ret; 1348 1349 f2fs_balance_fs(sbi, true); 1350 1351 down_write(&F2FS_I(inode)->i_mmap_sem); 1352 ret = truncate_blocks(inode, i_size_read(inode), true); 1353 if (ret) 1354 goto out; 1355 1356 /* write out all dirty pages from offset */ 1357 ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX); 1358 if (ret) 1359 goto out; 1360 1361 truncate_pagecache(inode, offset); 1362 1363 pg_start = offset >> PAGE_SHIFT; 1364 pg_end = (offset + len) >> PAGE_SHIFT; 1365 delta = pg_end - pg_start; 1366 idx = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE; 1367 1368 while (!ret && idx > pg_start) { 1369 nr = idx - pg_start; 1370 if (nr > delta) 1371 nr = delta; 1372 idx -= nr; 1373 1374 f2fs_lock_op(sbi); 1375 f2fs_drop_extent_tree(inode); 1376 1377 ret = __exchange_data_block(inode, inode, idx, 1378 idx + delta, nr, false); 1379 f2fs_unlock_op(sbi); 1380 } 1381 1382 /* write out all moved pages, if possible */ 1383 filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX); 1384 truncate_pagecache(inode, offset); 1385 1386 if (!ret) 1387 f2fs_i_size_write(inode, new_size); 1388 out: 1389 up_write(&F2FS_I(inode)->i_mmap_sem); 1390 return ret; 1391 } 1392 1393 static int expand_inode_data(struct inode *inode, loff_t offset, 1394 loff_t len, int mode) 1395 { 1396 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1397 struct f2fs_map_blocks map = { .m_next_pgofs = NULL }; 1398 pgoff_t pg_end; 1399 loff_t new_size = i_size_read(inode); 1400 loff_t off_end; 1401 int err; 1402 1403 err = inode_newsize_ok(inode, (len + offset)); 1404 if (err) 1405 return err; 1406 1407 err = f2fs_convert_inline_inode(inode); 1408 if (err) 1409 return err; 1410 1411 f2fs_balance_fs(sbi, true); 1412 1413 pg_end = ((unsigned long long)offset + len) >> PAGE_SHIFT; 1414 off_end = (offset + len) & (PAGE_SIZE - 1); 1415 1416 map.m_lblk = ((unsigned long long)offset) >> PAGE_SHIFT; 1417 map.m_len = pg_end - map.m_lblk; 1418 if (off_end) 1419 map.m_len++; 1420 1421 err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO); 1422 if (err) { 1423 pgoff_t last_off; 1424 1425 if (!map.m_len) 1426 return err; 1427 1428 last_off = map.m_lblk + map.m_len - 1; 1429 1430 /* update new size to the failed position */ 1431 new_size = (last_off == pg_end) ? offset + len: 1432 (loff_t)(last_off + 1) << PAGE_SHIFT; 1433 } else { 1434 new_size = ((loff_t)pg_end << PAGE_SHIFT) + off_end; 1435 } 1436 1437 if (!(mode & FALLOC_FL_KEEP_SIZE) && i_size_read(inode) < new_size) 1438 f2fs_i_size_write(inode, new_size); 1439 1440 return err; 1441 } 1442 1443 static long f2fs_fallocate(struct file *file, int mode, 1444 loff_t offset, loff_t len) 1445 { 1446 struct inode *inode = file_inode(file); 1447 long ret = 0; 1448 1449 /* f2fs only support ->fallocate for regular file */ 1450 if (!S_ISREG(inode->i_mode)) 1451 return -EINVAL; 1452 1453 if (f2fs_encrypted_inode(inode) && 1454 (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE))) 1455 return -EOPNOTSUPP; 1456 1457 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | 1458 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE | 1459 FALLOC_FL_INSERT_RANGE)) 1460 return -EOPNOTSUPP; 1461 1462 inode_lock(inode); 1463 1464 if (mode & FALLOC_FL_PUNCH_HOLE) { 1465 if (offset >= inode->i_size) 1466 goto out; 1467 1468 ret = punch_hole(inode, offset, len); 1469 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) { 1470 ret = f2fs_collapse_range(inode, offset, len); 1471 } else if (mode & FALLOC_FL_ZERO_RANGE) { 1472 ret = f2fs_zero_range(inode, offset, len, mode); 1473 } else if (mode & FALLOC_FL_INSERT_RANGE) { 1474 ret = f2fs_insert_range(inode, offset, len); 1475 } else { 1476 ret = expand_inode_data(inode, offset, len, mode); 1477 } 1478 1479 if (!ret) { 1480 inode->i_mtime = inode->i_ctime = current_time(inode); 1481 f2fs_mark_inode_dirty_sync(inode, false); 1482 if (mode & FALLOC_FL_KEEP_SIZE) 1483 file_set_keep_isize(inode); 1484 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); 1485 } 1486 1487 out: 1488 inode_unlock(inode); 1489 1490 trace_f2fs_fallocate(inode, mode, offset, len, ret); 1491 return ret; 1492 } 1493 1494 static int f2fs_release_file(struct inode *inode, struct file *filp) 1495 { 1496 /* 1497 * f2fs_relase_file is called at every close calls. So we should 1498 * not drop any inmemory pages by close called by other process. 1499 */ 1500 if (!(filp->f_mode & FMODE_WRITE) || 1501 atomic_read(&inode->i_writecount) != 1) 1502 return 0; 1503 1504 /* some remained atomic pages should discarded */ 1505 if (f2fs_is_atomic_file(inode)) 1506 drop_inmem_pages(inode); 1507 if (f2fs_is_volatile_file(inode)) { 1508 clear_inode_flag(inode, FI_VOLATILE_FILE); 1509 stat_dec_volatile_write(inode); 1510 set_inode_flag(inode, FI_DROP_CACHE); 1511 filemap_fdatawrite(inode->i_mapping); 1512 clear_inode_flag(inode, FI_DROP_CACHE); 1513 } 1514 return 0; 1515 } 1516 1517 static int f2fs_file_flush(struct file *file, fl_owner_t id) 1518 { 1519 struct inode *inode = file_inode(file); 1520 1521 /* 1522 * If the process doing a transaction is crashed, we should do 1523 * roll-back. Otherwise, other reader/write can see corrupted database 1524 * until all the writers close its file. Since this should be done 1525 * before dropping file lock, it needs to do in ->flush. 1526 */ 1527 if (f2fs_is_atomic_file(inode) && 1528 F2FS_I(inode)->inmem_task == current) 1529 drop_inmem_pages(inode); 1530 return 0; 1531 } 1532 1533 static int f2fs_ioc_getflags(struct file *filp, unsigned long arg) 1534 { 1535 struct inode *inode = file_inode(filp); 1536 struct f2fs_inode_info *fi = F2FS_I(inode); 1537 unsigned int flags = fi->i_flags & 1538 (FS_FL_USER_VISIBLE | FS_PROJINHERIT_FL); 1539 return put_user(flags, (int __user *)arg); 1540 } 1541 1542 static int __f2fs_ioc_setflags(struct inode *inode, unsigned int flags) 1543 { 1544 struct f2fs_inode_info *fi = F2FS_I(inode); 1545 unsigned int oldflags; 1546 1547 /* Is it quota file? Do not allow user to mess with it */ 1548 if (IS_NOQUOTA(inode)) 1549 return -EPERM; 1550 1551 flags = f2fs_mask_flags(inode->i_mode, flags); 1552 1553 oldflags = fi->i_flags; 1554 1555 if ((flags ^ oldflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL)) 1556 if (!capable(CAP_LINUX_IMMUTABLE)) 1557 return -EPERM; 1558 1559 flags = flags & (FS_FL_USER_MODIFIABLE | FS_PROJINHERIT_FL); 1560 flags |= oldflags & ~(FS_FL_USER_MODIFIABLE | FS_PROJINHERIT_FL); 1561 fi->i_flags = flags; 1562 1563 if (fi->i_flags & FS_PROJINHERIT_FL) 1564 set_inode_flag(inode, FI_PROJ_INHERIT); 1565 else 1566 clear_inode_flag(inode, FI_PROJ_INHERIT); 1567 1568 inode->i_ctime = current_time(inode); 1569 f2fs_set_inode_flags(inode); 1570 f2fs_mark_inode_dirty_sync(inode, false); 1571 return 0; 1572 } 1573 1574 static int f2fs_ioc_setflags(struct file *filp, unsigned long arg) 1575 { 1576 struct inode *inode = file_inode(filp); 1577 unsigned int flags; 1578 int ret; 1579 1580 if (!inode_owner_or_capable(inode)) 1581 return -EACCES; 1582 1583 if (get_user(flags, (int __user *)arg)) 1584 return -EFAULT; 1585 1586 ret = mnt_want_write_file(filp); 1587 if (ret) 1588 return ret; 1589 1590 inode_lock(inode); 1591 1592 ret = __f2fs_ioc_setflags(inode, flags); 1593 1594 inode_unlock(inode); 1595 mnt_drop_write_file(filp); 1596 return ret; 1597 } 1598 1599 static int f2fs_ioc_getversion(struct file *filp, unsigned long arg) 1600 { 1601 struct inode *inode = file_inode(filp); 1602 1603 return put_user(inode->i_generation, (int __user *)arg); 1604 } 1605 1606 static int f2fs_ioc_start_atomic_write(struct file *filp) 1607 { 1608 struct inode *inode = file_inode(filp); 1609 int ret; 1610 1611 if (!inode_owner_or_capable(inode)) 1612 return -EACCES; 1613 1614 if (!S_ISREG(inode->i_mode)) 1615 return -EINVAL; 1616 1617 ret = mnt_want_write_file(filp); 1618 if (ret) 1619 return ret; 1620 1621 inode_lock(inode); 1622 1623 if (f2fs_is_atomic_file(inode)) 1624 goto out; 1625 1626 ret = f2fs_convert_inline_inode(inode); 1627 if (ret) 1628 goto out; 1629 1630 set_inode_flag(inode, FI_ATOMIC_FILE); 1631 set_inode_flag(inode, FI_HOT_DATA); 1632 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); 1633 1634 if (!get_dirty_pages(inode)) 1635 goto inc_stat; 1636 1637 f2fs_msg(F2FS_I_SB(inode)->sb, KERN_WARNING, 1638 "Unexpected flush for atomic writes: ino=%lu, npages=%u", 1639 inode->i_ino, get_dirty_pages(inode)); 1640 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX); 1641 if (ret) { 1642 clear_inode_flag(inode, FI_ATOMIC_FILE); 1643 clear_inode_flag(inode, FI_HOT_DATA); 1644 goto out; 1645 } 1646 1647 inc_stat: 1648 F2FS_I(inode)->inmem_task = current; 1649 stat_inc_atomic_write(inode); 1650 stat_update_max_atomic_write(inode); 1651 out: 1652 inode_unlock(inode); 1653 mnt_drop_write_file(filp); 1654 return ret; 1655 } 1656 1657 static int f2fs_ioc_commit_atomic_write(struct file *filp) 1658 { 1659 struct inode *inode = file_inode(filp); 1660 int ret; 1661 1662 if (!inode_owner_or_capable(inode)) 1663 return -EACCES; 1664 1665 ret = mnt_want_write_file(filp); 1666 if (ret) 1667 return ret; 1668 1669 inode_lock(inode); 1670 1671 if (f2fs_is_volatile_file(inode)) 1672 goto err_out; 1673 1674 if (f2fs_is_atomic_file(inode)) { 1675 ret = commit_inmem_pages(inode); 1676 if (ret) 1677 goto err_out; 1678 1679 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true); 1680 if (!ret) { 1681 clear_inode_flag(inode, FI_ATOMIC_FILE); 1682 clear_inode_flag(inode, FI_HOT_DATA); 1683 stat_dec_atomic_write(inode); 1684 } 1685 } else { 1686 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 1, false); 1687 } 1688 err_out: 1689 inode_unlock(inode); 1690 mnt_drop_write_file(filp); 1691 return ret; 1692 } 1693 1694 static int f2fs_ioc_start_volatile_write(struct file *filp) 1695 { 1696 struct inode *inode = file_inode(filp); 1697 int ret; 1698 1699 if (!inode_owner_or_capable(inode)) 1700 return -EACCES; 1701 1702 if (!S_ISREG(inode->i_mode)) 1703 return -EINVAL; 1704 1705 ret = mnt_want_write_file(filp); 1706 if (ret) 1707 return ret; 1708 1709 inode_lock(inode); 1710 1711 if (f2fs_is_volatile_file(inode)) 1712 goto out; 1713 1714 ret = f2fs_convert_inline_inode(inode); 1715 if (ret) 1716 goto out; 1717 1718 stat_inc_volatile_write(inode); 1719 stat_update_max_volatile_write(inode); 1720 1721 set_inode_flag(inode, FI_VOLATILE_FILE); 1722 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); 1723 out: 1724 inode_unlock(inode); 1725 mnt_drop_write_file(filp); 1726 return ret; 1727 } 1728 1729 static int f2fs_ioc_release_volatile_write(struct file *filp) 1730 { 1731 struct inode *inode = file_inode(filp); 1732 int ret; 1733 1734 if (!inode_owner_or_capable(inode)) 1735 return -EACCES; 1736 1737 ret = mnt_want_write_file(filp); 1738 if (ret) 1739 return ret; 1740 1741 inode_lock(inode); 1742 1743 if (!f2fs_is_volatile_file(inode)) 1744 goto out; 1745 1746 if (!f2fs_is_first_block_written(inode)) { 1747 ret = truncate_partial_data_page(inode, 0, true); 1748 goto out; 1749 } 1750 1751 ret = punch_hole(inode, 0, F2FS_BLKSIZE); 1752 out: 1753 inode_unlock(inode); 1754 mnt_drop_write_file(filp); 1755 return ret; 1756 } 1757 1758 static int f2fs_ioc_abort_volatile_write(struct file *filp) 1759 { 1760 struct inode *inode = file_inode(filp); 1761 int ret; 1762 1763 if (!inode_owner_or_capable(inode)) 1764 return -EACCES; 1765 1766 ret = mnt_want_write_file(filp); 1767 if (ret) 1768 return ret; 1769 1770 inode_lock(inode); 1771 1772 if (f2fs_is_atomic_file(inode)) 1773 drop_inmem_pages(inode); 1774 if (f2fs_is_volatile_file(inode)) { 1775 clear_inode_flag(inode, FI_VOLATILE_FILE); 1776 stat_dec_volatile_write(inode); 1777 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true); 1778 } 1779 1780 inode_unlock(inode); 1781 1782 mnt_drop_write_file(filp); 1783 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); 1784 return ret; 1785 } 1786 1787 static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg) 1788 { 1789 struct inode *inode = file_inode(filp); 1790 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1791 struct super_block *sb = sbi->sb; 1792 __u32 in; 1793 int ret; 1794 1795 if (!capable(CAP_SYS_ADMIN)) 1796 return -EPERM; 1797 1798 if (get_user(in, (__u32 __user *)arg)) 1799 return -EFAULT; 1800 1801 ret = mnt_want_write_file(filp); 1802 if (ret) 1803 return ret; 1804 1805 switch (in) { 1806 case F2FS_GOING_DOWN_FULLSYNC: 1807 sb = freeze_bdev(sb->s_bdev); 1808 if (sb && !IS_ERR(sb)) { 1809 f2fs_stop_checkpoint(sbi, false); 1810 thaw_bdev(sb->s_bdev, sb); 1811 } 1812 break; 1813 case F2FS_GOING_DOWN_METASYNC: 1814 /* do checkpoint only */ 1815 f2fs_sync_fs(sb, 1); 1816 f2fs_stop_checkpoint(sbi, false); 1817 break; 1818 case F2FS_GOING_DOWN_NOSYNC: 1819 f2fs_stop_checkpoint(sbi, false); 1820 break; 1821 case F2FS_GOING_DOWN_METAFLUSH: 1822 sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO); 1823 f2fs_stop_checkpoint(sbi, false); 1824 break; 1825 default: 1826 ret = -EINVAL; 1827 goto out; 1828 } 1829 f2fs_update_time(sbi, REQ_TIME); 1830 out: 1831 mnt_drop_write_file(filp); 1832 return ret; 1833 } 1834 1835 static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg) 1836 { 1837 struct inode *inode = file_inode(filp); 1838 struct super_block *sb = inode->i_sb; 1839 struct request_queue *q = bdev_get_queue(sb->s_bdev); 1840 struct fstrim_range range; 1841 int ret; 1842 1843 if (!capable(CAP_SYS_ADMIN)) 1844 return -EPERM; 1845 1846 if (!blk_queue_discard(q)) 1847 return -EOPNOTSUPP; 1848 1849 if (copy_from_user(&range, (struct fstrim_range __user *)arg, 1850 sizeof(range))) 1851 return -EFAULT; 1852 1853 ret = mnt_want_write_file(filp); 1854 if (ret) 1855 return ret; 1856 1857 range.minlen = max((unsigned int)range.minlen, 1858 q->limits.discard_granularity); 1859 ret = f2fs_trim_fs(F2FS_SB(sb), &range); 1860 mnt_drop_write_file(filp); 1861 if (ret < 0) 1862 return ret; 1863 1864 if (copy_to_user((struct fstrim_range __user *)arg, &range, 1865 sizeof(range))) 1866 return -EFAULT; 1867 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); 1868 return 0; 1869 } 1870 1871 static bool uuid_is_nonzero(__u8 u[16]) 1872 { 1873 int i; 1874 1875 for (i = 0; i < 16; i++) 1876 if (u[i]) 1877 return true; 1878 return false; 1879 } 1880 1881 static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg) 1882 { 1883 struct inode *inode = file_inode(filp); 1884 1885 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); 1886 1887 return fscrypt_ioctl_set_policy(filp, (const void __user *)arg); 1888 } 1889 1890 static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg) 1891 { 1892 return fscrypt_ioctl_get_policy(filp, (void __user *)arg); 1893 } 1894 1895 static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg) 1896 { 1897 struct inode *inode = file_inode(filp); 1898 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1899 int err; 1900 1901 if (!f2fs_sb_has_crypto(inode->i_sb)) 1902 return -EOPNOTSUPP; 1903 1904 if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt)) 1905 goto got_it; 1906 1907 err = mnt_want_write_file(filp); 1908 if (err) 1909 return err; 1910 1911 /* update superblock with uuid */ 1912 generate_random_uuid(sbi->raw_super->encrypt_pw_salt); 1913 1914 err = f2fs_commit_super(sbi, false); 1915 if (err) { 1916 /* undo new data */ 1917 memset(sbi->raw_super->encrypt_pw_salt, 0, 16); 1918 mnt_drop_write_file(filp); 1919 return err; 1920 } 1921 mnt_drop_write_file(filp); 1922 got_it: 1923 if (copy_to_user((__u8 __user *)arg, sbi->raw_super->encrypt_pw_salt, 1924 16)) 1925 return -EFAULT; 1926 return 0; 1927 } 1928 1929 static int f2fs_ioc_gc(struct file *filp, unsigned long arg) 1930 { 1931 struct inode *inode = file_inode(filp); 1932 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1933 __u32 sync; 1934 int ret; 1935 1936 if (!capable(CAP_SYS_ADMIN)) 1937 return -EPERM; 1938 1939 if (get_user(sync, (__u32 __user *)arg)) 1940 return -EFAULT; 1941 1942 if (f2fs_readonly(sbi->sb)) 1943 return -EROFS; 1944 1945 ret = mnt_want_write_file(filp); 1946 if (ret) 1947 return ret; 1948 1949 if (!sync) { 1950 if (!mutex_trylock(&sbi->gc_mutex)) { 1951 ret = -EBUSY; 1952 goto out; 1953 } 1954 } else { 1955 mutex_lock(&sbi->gc_mutex); 1956 } 1957 1958 ret = f2fs_gc(sbi, sync, true, NULL_SEGNO); 1959 out: 1960 mnt_drop_write_file(filp); 1961 return ret; 1962 } 1963 1964 static int f2fs_ioc_gc_range(struct file *filp, unsigned long arg) 1965 { 1966 struct inode *inode = file_inode(filp); 1967 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1968 struct f2fs_gc_range range; 1969 u64 end; 1970 int ret; 1971 1972 if (!capable(CAP_SYS_ADMIN)) 1973 return -EPERM; 1974 1975 if (copy_from_user(&range, (struct f2fs_gc_range __user *)arg, 1976 sizeof(range))) 1977 return -EFAULT; 1978 1979 if (f2fs_readonly(sbi->sb)) 1980 return -EROFS; 1981 1982 ret = mnt_want_write_file(filp); 1983 if (ret) 1984 return ret; 1985 1986 end = range.start + range.len; 1987 if (range.start < MAIN_BLKADDR(sbi) || end >= MAX_BLKADDR(sbi)) 1988 return -EINVAL; 1989 do_more: 1990 if (!range.sync) { 1991 if (!mutex_trylock(&sbi->gc_mutex)) { 1992 ret = -EBUSY; 1993 goto out; 1994 } 1995 } else { 1996 mutex_lock(&sbi->gc_mutex); 1997 } 1998 1999 ret = f2fs_gc(sbi, range.sync, true, GET_SEGNO(sbi, range.start)); 2000 range.start += sbi->blocks_per_seg; 2001 if (range.start <= end) 2002 goto do_more; 2003 out: 2004 mnt_drop_write_file(filp); 2005 return ret; 2006 } 2007 2008 static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg) 2009 { 2010 struct inode *inode = file_inode(filp); 2011 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2012 int ret; 2013 2014 if (!capable(CAP_SYS_ADMIN)) 2015 return -EPERM; 2016 2017 if (f2fs_readonly(sbi->sb)) 2018 return -EROFS; 2019 2020 ret = mnt_want_write_file(filp); 2021 if (ret) 2022 return ret; 2023 2024 ret = f2fs_sync_fs(sbi->sb, 1); 2025 2026 mnt_drop_write_file(filp); 2027 return ret; 2028 } 2029 2030 static int f2fs_defragment_range(struct f2fs_sb_info *sbi, 2031 struct file *filp, 2032 struct f2fs_defragment *range) 2033 { 2034 struct inode *inode = file_inode(filp); 2035 struct f2fs_map_blocks map = { .m_next_pgofs = NULL }; 2036 struct extent_info ei = {0,0,0}; 2037 pgoff_t pg_start, pg_end; 2038 unsigned int blk_per_seg = sbi->blocks_per_seg; 2039 unsigned int total = 0, sec_num; 2040 block_t blk_end = 0; 2041 bool fragmented = false; 2042 int err; 2043 2044 /* if in-place-update policy is enabled, don't waste time here */ 2045 if (need_inplace_update_policy(inode, NULL)) 2046 return -EINVAL; 2047 2048 pg_start = range->start >> PAGE_SHIFT; 2049 pg_end = (range->start + range->len) >> PAGE_SHIFT; 2050 2051 f2fs_balance_fs(sbi, true); 2052 2053 inode_lock(inode); 2054 2055 /* writeback all dirty pages in the range */ 2056 err = filemap_write_and_wait_range(inode->i_mapping, range->start, 2057 range->start + range->len - 1); 2058 if (err) 2059 goto out; 2060 2061 /* 2062 * lookup mapping info in extent cache, skip defragmenting if physical 2063 * block addresses are continuous. 2064 */ 2065 if (f2fs_lookup_extent_cache(inode, pg_start, &ei)) { 2066 if (ei.fofs + ei.len >= pg_end) 2067 goto out; 2068 } 2069 2070 map.m_lblk = pg_start; 2071 2072 /* 2073 * lookup mapping info in dnode page cache, skip defragmenting if all 2074 * physical block addresses are continuous even if there are hole(s) 2075 * in logical blocks. 2076 */ 2077 while (map.m_lblk < pg_end) { 2078 map.m_len = pg_end - map.m_lblk; 2079 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT); 2080 if (err) 2081 goto out; 2082 2083 if (!(map.m_flags & F2FS_MAP_FLAGS)) { 2084 map.m_lblk++; 2085 continue; 2086 } 2087 2088 if (blk_end && blk_end != map.m_pblk) { 2089 fragmented = true; 2090 break; 2091 } 2092 blk_end = map.m_pblk + map.m_len; 2093 2094 map.m_lblk += map.m_len; 2095 } 2096 2097 if (!fragmented) 2098 goto out; 2099 2100 map.m_lblk = pg_start; 2101 map.m_len = pg_end - pg_start; 2102 2103 sec_num = (map.m_len + BLKS_PER_SEC(sbi) - 1) / BLKS_PER_SEC(sbi); 2104 2105 /* 2106 * make sure there are enough free section for LFS allocation, this can 2107 * avoid defragment running in SSR mode when free section are allocated 2108 * intensively 2109 */ 2110 if (has_not_enough_free_secs(sbi, 0, sec_num)) { 2111 err = -EAGAIN; 2112 goto out; 2113 } 2114 2115 while (map.m_lblk < pg_end) { 2116 pgoff_t idx; 2117 int cnt = 0; 2118 2119 do_map: 2120 map.m_len = pg_end - map.m_lblk; 2121 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT); 2122 if (err) 2123 goto clear_out; 2124 2125 if (!(map.m_flags & F2FS_MAP_FLAGS)) { 2126 map.m_lblk++; 2127 continue; 2128 } 2129 2130 set_inode_flag(inode, FI_DO_DEFRAG); 2131 2132 idx = map.m_lblk; 2133 while (idx < map.m_lblk + map.m_len && cnt < blk_per_seg) { 2134 struct page *page; 2135 2136 page = get_lock_data_page(inode, idx, true); 2137 if (IS_ERR(page)) { 2138 err = PTR_ERR(page); 2139 goto clear_out; 2140 } 2141 2142 set_page_dirty(page); 2143 f2fs_put_page(page, 1); 2144 2145 idx++; 2146 cnt++; 2147 total++; 2148 } 2149 2150 map.m_lblk = idx; 2151 2152 if (idx < pg_end && cnt < blk_per_seg) 2153 goto do_map; 2154 2155 clear_inode_flag(inode, FI_DO_DEFRAG); 2156 2157 err = filemap_fdatawrite(inode->i_mapping); 2158 if (err) 2159 goto out; 2160 } 2161 clear_out: 2162 clear_inode_flag(inode, FI_DO_DEFRAG); 2163 out: 2164 inode_unlock(inode); 2165 if (!err) 2166 range->len = (u64)total << PAGE_SHIFT; 2167 return err; 2168 } 2169 2170 static int f2fs_ioc_defragment(struct file *filp, unsigned long arg) 2171 { 2172 struct inode *inode = file_inode(filp); 2173 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2174 struct f2fs_defragment range; 2175 int err; 2176 2177 if (!capable(CAP_SYS_ADMIN)) 2178 return -EPERM; 2179 2180 if (!S_ISREG(inode->i_mode) || f2fs_is_atomic_file(inode)) 2181 return -EINVAL; 2182 2183 if (f2fs_readonly(sbi->sb)) 2184 return -EROFS; 2185 2186 if (copy_from_user(&range, (struct f2fs_defragment __user *)arg, 2187 sizeof(range))) 2188 return -EFAULT; 2189 2190 /* verify alignment of offset & size */ 2191 if (range.start & (F2FS_BLKSIZE - 1) || range.len & (F2FS_BLKSIZE - 1)) 2192 return -EINVAL; 2193 2194 if (unlikely((range.start + range.len) >> PAGE_SHIFT > 2195 sbi->max_file_blocks)) 2196 return -EINVAL; 2197 2198 err = mnt_want_write_file(filp); 2199 if (err) 2200 return err; 2201 2202 err = f2fs_defragment_range(sbi, filp, &range); 2203 mnt_drop_write_file(filp); 2204 2205 f2fs_update_time(sbi, REQ_TIME); 2206 if (err < 0) 2207 return err; 2208 2209 if (copy_to_user((struct f2fs_defragment __user *)arg, &range, 2210 sizeof(range))) 2211 return -EFAULT; 2212 2213 return 0; 2214 } 2215 2216 static int f2fs_move_file_range(struct file *file_in, loff_t pos_in, 2217 struct file *file_out, loff_t pos_out, size_t len) 2218 { 2219 struct inode *src = file_inode(file_in); 2220 struct inode *dst = file_inode(file_out); 2221 struct f2fs_sb_info *sbi = F2FS_I_SB(src); 2222 size_t olen = len, dst_max_i_size = 0; 2223 size_t dst_osize; 2224 int ret; 2225 2226 if (file_in->f_path.mnt != file_out->f_path.mnt || 2227 src->i_sb != dst->i_sb) 2228 return -EXDEV; 2229 2230 if (unlikely(f2fs_readonly(src->i_sb))) 2231 return -EROFS; 2232 2233 if (!S_ISREG(src->i_mode) || !S_ISREG(dst->i_mode)) 2234 return -EINVAL; 2235 2236 if (f2fs_encrypted_inode(src) || f2fs_encrypted_inode(dst)) 2237 return -EOPNOTSUPP; 2238 2239 if (src == dst) { 2240 if (pos_in == pos_out) 2241 return 0; 2242 if (pos_out > pos_in && pos_out < pos_in + len) 2243 return -EINVAL; 2244 } 2245 2246 inode_lock(src); 2247 if (src != dst) { 2248 if (!inode_trylock(dst)) { 2249 ret = -EBUSY; 2250 goto out; 2251 } 2252 } 2253 2254 ret = -EINVAL; 2255 if (pos_in + len > src->i_size || pos_in + len < pos_in) 2256 goto out_unlock; 2257 if (len == 0) 2258 olen = len = src->i_size - pos_in; 2259 if (pos_in + len == src->i_size) 2260 len = ALIGN(src->i_size, F2FS_BLKSIZE) - pos_in; 2261 if (len == 0) { 2262 ret = 0; 2263 goto out_unlock; 2264 } 2265 2266 dst_osize = dst->i_size; 2267 if (pos_out + olen > dst->i_size) 2268 dst_max_i_size = pos_out + olen; 2269 2270 /* verify the end result is block aligned */ 2271 if (!IS_ALIGNED(pos_in, F2FS_BLKSIZE) || 2272 !IS_ALIGNED(pos_in + len, F2FS_BLKSIZE) || 2273 !IS_ALIGNED(pos_out, F2FS_BLKSIZE)) 2274 goto out_unlock; 2275 2276 ret = f2fs_convert_inline_inode(src); 2277 if (ret) 2278 goto out_unlock; 2279 2280 ret = f2fs_convert_inline_inode(dst); 2281 if (ret) 2282 goto out_unlock; 2283 2284 /* write out all dirty pages from offset */ 2285 ret = filemap_write_and_wait_range(src->i_mapping, 2286 pos_in, pos_in + len); 2287 if (ret) 2288 goto out_unlock; 2289 2290 ret = filemap_write_and_wait_range(dst->i_mapping, 2291 pos_out, pos_out + len); 2292 if (ret) 2293 goto out_unlock; 2294 2295 f2fs_balance_fs(sbi, true); 2296 f2fs_lock_op(sbi); 2297 ret = __exchange_data_block(src, dst, pos_in >> F2FS_BLKSIZE_BITS, 2298 pos_out >> F2FS_BLKSIZE_BITS, 2299 len >> F2FS_BLKSIZE_BITS, false); 2300 2301 if (!ret) { 2302 if (dst_max_i_size) 2303 f2fs_i_size_write(dst, dst_max_i_size); 2304 else if (dst_osize != dst->i_size) 2305 f2fs_i_size_write(dst, dst_osize); 2306 } 2307 f2fs_unlock_op(sbi); 2308 out_unlock: 2309 if (src != dst) 2310 inode_unlock(dst); 2311 out: 2312 inode_unlock(src); 2313 return ret; 2314 } 2315 2316 static int f2fs_ioc_move_range(struct file *filp, unsigned long arg) 2317 { 2318 struct f2fs_move_range range; 2319 struct fd dst; 2320 int err; 2321 2322 if (!(filp->f_mode & FMODE_READ) || 2323 !(filp->f_mode & FMODE_WRITE)) 2324 return -EBADF; 2325 2326 if (copy_from_user(&range, (struct f2fs_move_range __user *)arg, 2327 sizeof(range))) 2328 return -EFAULT; 2329 2330 dst = fdget(range.dst_fd); 2331 if (!dst.file) 2332 return -EBADF; 2333 2334 if (!(dst.file->f_mode & FMODE_WRITE)) { 2335 err = -EBADF; 2336 goto err_out; 2337 } 2338 2339 err = mnt_want_write_file(filp); 2340 if (err) 2341 goto err_out; 2342 2343 err = f2fs_move_file_range(filp, range.pos_in, dst.file, 2344 range.pos_out, range.len); 2345 2346 mnt_drop_write_file(filp); 2347 if (err) 2348 goto err_out; 2349 2350 if (copy_to_user((struct f2fs_move_range __user *)arg, 2351 &range, sizeof(range))) 2352 err = -EFAULT; 2353 err_out: 2354 fdput(dst); 2355 return err; 2356 } 2357 2358 static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg) 2359 { 2360 struct inode *inode = file_inode(filp); 2361 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2362 struct sit_info *sm = SIT_I(sbi); 2363 unsigned int start_segno = 0, end_segno = 0; 2364 unsigned int dev_start_segno = 0, dev_end_segno = 0; 2365 struct f2fs_flush_device range; 2366 int ret; 2367 2368 if (!capable(CAP_SYS_ADMIN)) 2369 return -EPERM; 2370 2371 if (f2fs_readonly(sbi->sb)) 2372 return -EROFS; 2373 2374 if (copy_from_user(&range, (struct f2fs_flush_device __user *)arg, 2375 sizeof(range))) 2376 return -EFAULT; 2377 2378 if (sbi->s_ndevs <= 1 || sbi->s_ndevs - 1 <= range.dev_num || 2379 sbi->segs_per_sec != 1) { 2380 f2fs_msg(sbi->sb, KERN_WARNING, 2381 "Can't flush %u in %d for segs_per_sec %u != 1\n", 2382 range.dev_num, sbi->s_ndevs, 2383 sbi->segs_per_sec); 2384 return -EINVAL; 2385 } 2386 2387 ret = mnt_want_write_file(filp); 2388 if (ret) 2389 return ret; 2390 2391 if (range.dev_num != 0) 2392 dev_start_segno = GET_SEGNO(sbi, FDEV(range.dev_num).start_blk); 2393 dev_end_segno = GET_SEGNO(sbi, FDEV(range.dev_num).end_blk); 2394 2395 start_segno = sm->last_victim[FLUSH_DEVICE]; 2396 if (start_segno < dev_start_segno || start_segno >= dev_end_segno) 2397 start_segno = dev_start_segno; 2398 end_segno = min(start_segno + range.segments, dev_end_segno); 2399 2400 while (start_segno < end_segno) { 2401 if (!mutex_trylock(&sbi->gc_mutex)) { 2402 ret = -EBUSY; 2403 goto out; 2404 } 2405 sm->last_victim[GC_CB] = end_segno + 1; 2406 sm->last_victim[GC_GREEDY] = end_segno + 1; 2407 sm->last_victim[ALLOC_NEXT] = end_segno + 1; 2408 ret = f2fs_gc(sbi, true, true, start_segno); 2409 if (ret == -EAGAIN) 2410 ret = 0; 2411 else if (ret < 0) 2412 break; 2413 start_segno++; 2414 } 2415 out: 2416 mnt_drop_write_file(filp); 2417 return ret; 2418 } 2419 2420 static int f2fs_ioc_get_features(struct file *filp, unsigned long arg) 2421 { 2422 struct inode *inode = file_inode(filp); 2423 u32 sb_feature = le32_to_cpu(F2FS_I_SB(inode)->raw_super->feature); 2424 2425 /* Must validate to set it with SQLite behavior in Android. */ 2426 sb_feature |= F2FS_FEATURE_ATOMIC_WRITE; 2427 2428 return put_user(sb_feature, (u32 __user *)arg); 2429 } 2430 2431 #ifdef CONFIG_QUOTA 2432 static int f2fs_ioc_setproject(struct file *filp, __u32 projid) 2433 { 2434 struct inode *inode = file_inode(filp); 2435 struct f2fs_inode_info *fi = F2FS_I(inode); 2436 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2437 struct super_block *sb = sbi->sb; 2438 struct dquot *transfer_to[MAXQUOTAS] = {}; 2439 struct page *ipage; 2440 kprojid_t kprojid; 2441 int err; 2442 2443 if (!f2fs_sb_has_project_quota(sb)) { 2444 if (projid != F2FS_DEF_PROJID) 2445 return -EOPNOTSUPP; 2446 else 2447 return 0; 2448 } 2449 2450 if (!f2fs_has_extra_attr(inode)) 2451 return -EOPNOTSUPP; 2452 2453 kprojid = make_kprojid(&init_user_ns, (projid_t)projid); 2454 2455 if (projid_eq(kprojid, F2FS_I(inode)->i_projid)) 2456 return 0; 2457 2458 err = mnt_want_write_file(filp); 2459 if (err) 2460 return err; 2461 2462 err = -EPERM; 2463 inode_lock(inode); 2464 2465 /* Is it quota file? Do not allow user to mess with it */ 2466 if (IS_NOQUOTA(inode)) 2467 goto out_unlock; 2468 2469 ipage = get_node_page(sbi, inode->i_ino); 2470 if (IS_ERR(ipage)) { 2471 err = PTR_ERR(ipage); 2472 goto out_unlock; 2473 } 2474 2475 if (!F2FS_FITS_IN_INODE(F2FS_INODE(ipage), fi->i_extra_isize, 2476 i_projid)) { 2477 err = -EOVERFLOW; 2478 f2fs_put_page(ipage, 1); 2479 goto out_unlock; 2480 } 2481 f2fs_put_page(ipage, 1); 2482 2483 dquot_initialize(inode); 2484 2485 transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid)); 2486 if (!IS_ERR(transfer_to[PRJQUOTA])) { 2487 err = __dquot_transfer(inode, transfer_to); 2488 dqput(transfer_to[PRJQUOTA]); 2489 if (err) 2490 goto out_dirty; 2491 } 2492 2493 F2FS_I(inode)->i_projid = kprojid; 2494 inode->i_ctime = current_time(inode); 2495 out_dirty: 2496 f2fs_mark_inode_dirty_sync(inode, true); 2497 out_unlock: 2498 inode_unlock(inode); 2499 mnt_drop_write_file(filp); 2500 return err; 2501 } 2502 #else 2503 static int f2fs_ioc_setproject(struct file *filp, __u32 projid) 2504 { 2505 if (projid != F2FS_DEF_PROJID) 2506 return -EOPNOTSUPP; 2507 return 0; 2508 } 2509 #endif 2510 2511 /* Transfer internal flags to xflags */ 2512 static inline __u32 f2fs_iflags_to_xflags(unsigned long iflags) 2513 { 2514 __u32 xflags = 0; 2515 2516 if (iflags & FS_SYNC_FL) 2517 xflags |= FS_XFLAG_SYNC; 2518 if (iflags & FS_IMMUTABLE_FL) 2519 xflags |= FS_XFLAG_IMMUTABLE; 2520 if (iflags & FS_APPEND_FL) 2521 xflags |= FS_XFLAG_APPEND; 2522 if (iflags & FS_NODUMP_FL) 2523 xflags |= FS_XFLAG_NODUMP; 2524 if (iflags & FS_NOATIME_FL) 2525 xflags |= FS_XFLAG_NOATIME; 2526 if (iflags & FS_PROJINHERIT_FL) 2527 xflags |= FS_XFLAG_PROJINHERIT; 2528 return xflags; 2529 } 2530 2531 #define F2FS_SUPPORTED_FS_XFLAGS (FS_XFLAG_SYNC | FS_XFLAG_IMMUTABLE | \ 2532 FS_XFLAG_APPEND | FS_XFLAG_NODUMP | \ 2533 FS_XFLAG_NOATIME | FS_XFLAG_PROJINHERIT) 2534 2535 /* Flags we can manipulate with through EXT4_IOC_FSSETXATTR */ 2536 #define F2FS_FL_XFLAG_VISIBLE (FS_SYNC_FL | \ 2537 FS_IMMUTABLE_FL | \ 2538 FS_APPEND_FL | \ 2539 FS_NODUMP_FL | \ 2540 FS_NOATIME_FL | \ 2541 FS_PROJINHERIT_FL) 2542 2543 /* Transfer xflags flags to internal */ 2544 static inline unsigned long f2fs_xflags_to_iflags(__u32 xflags) 2545 { 2546 unsigned long iflags = 0; 2547 2548 if (xflags & FS_XFLAG_SYNC) 2549 iflags |= FS_SYNC_FL; 2550 if (xflags & FS_XFLAG_IMMUTABLE) 2551 iflags |= FS_IMMUTABLE_FL; 2552 if (xflags & FS_XFLAG_APPEND) 2553 iflags |= FS_APPEND_FL; 2554 if (xflags & FS_XFLAG_NODUMP) 2555 iflags |= FS_NODUMP_FL; 2556 if (xflags & FS_XFLAG_NOATIME) 2557 iflags |= FS_NOATIME_FL; 2558 if (xflags & FS_XFLAG_PROJINHERIT) 2559 iflags |= FS_PROJINHERIT_FL; 2560 2561 return iflags; 2562 } 2563 2564 static int f2fs_ioc_fsgetxattr(struct file *filp, unsigned long arg) 2565 { 2566 struct inode *inode = file_inode(filp); 2567 struct f2fs_inode_info *fi = F2FS_I(inode); 2568 struct fsxattr fa; 2569 2570 memset(&fa, 0, sizeof(struct fsxattr)); 2571 fa.fsx_xflags = f2fs_iflags_to_xflags(fi->i_flags & 2572 (FS_FL_USER_VISIBLE | FS_PROJINHERIT_FL)); 2573 2574 if (f2fs_sb_has_project_quota(inode->i_sb)) 2575 fa.fsx_projid = (__u32)from_kprojid(&init_user_ns, 2576 fi->i_projid); 2577 2578 if (copy_to_user((struct fsxattr __user *)arg, &fa, sizeof(fa))) 2579 return -EFAULT; 2580 return 0; 2581 } 2582 2583 static int f2fs_ioc_fssetxattr(struct file *filp, unsigned long arg) 2584 { 2585 struct inode *inode = file_inode(filp); 2586 struct f2fs_inode_info *fi = F2FS_I(inode); 2587 struct fsxattr fa; 2588 unsigned int flags; 2589 int err; 2590 2591 if (copy_from_user(&fa, (struct fsxattr __user *)arg, sizeof(fa))) 2592 return -EFAULT; 2593 2594 /* Make sure caller has proper permission */ 2595 if (!inode_owner_or_capable(inode)) 2596 return -EACCES; 2597 2598 if (fa.fsx_xflags & ~F2FS_SUPPORTED_FS_XFLAGS) 2599 return -EOPNOTSUPP; 2600 2601 flags = f2fs_xflags_to_iflags(fa.fsx_xflags); 2602 if (f2fs_mask_flags(inode->i_mode, flags) != flags) 2603 return -EOPNOTSUPP; 2604 2605 err = mnt_want_write_file(filp); 2606 if (err) 2607 return err; 2608 2609 inode_lock(inode); 2610 flags = (fi->i_flags & ~F2FS_FL_XFLAG_VISIBLE) | 2611 (flags & F2FS_FL_XFLAG_VISIBLE); 2612 err = __f2fs_ioc_setflags(inode, flags); 2613 inode_unlock(inode); 2614 mnt_drop_write_file(filp); 2615 if (err) 2616 return err; 2617 2618 err = f2fs_ioc_setproject(filp, fa.fsx_projid); 2619 if (err) 2620 return err; 2621 2622 return 0; 2623 } 2624 2625 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 2626 { 2627 switch (cmd) { 2628 case F2FS_IOC_GETFLAGS: 2629 return f2fs_ioc_getflags(filp, arg); 2630 case F2FS_IOC_SETFLAGS: 2631 return f2fs_ioc_setflags(filp, arg); 2632 case F2FS_IOC_GETVERSION: 2633 return f2fs_ioc_getversion(filp, arg); 2634 case F2FS_IOC_START_ATOMIC_WRITE: 2635 return f2fs_ioc_start_atomic_write(filp); 2636 case F2FS_IOC_COMMIT_ATOMIC_WRITE: 2637 return f2fs_ioc_commit_atomic_write(filp); 2638 case F2FS_IOC_START_VOLATILE_WRITE: 2639 return f2fs_ioc_start_volatile_write(filp); 2640 case F2FS_IOC_RELEASE_VOLATILE_WRITE: 2641 return f2fs_ioc_release_volatile_write(filp); 2642 case F2FS_IOC_ABORT_VOLATILE_WRITE: 2643 return f2fs_ioc_abort_volatile_write(filp); 2644 case F2FS_IOC_SHUTDOWN: 2645 return f2fs_ioc_shutdown(filp, arg); 2646 case FITRIM: 2647 return f2fs_ioc_fitrim(filp, arg); 2648 case F2FS_IOC_SET_ENCRYPTION_POLICY: 2649 return f2fs_ioc_set_encryption_policy(filp, arg); 2650 case F2FS_IOC_GET_ENCRYPTION_POLICY: 2651 return f2fs_ioc_get_encryption_policy(filp, arg); 2652 case F2FS_IOC_GET_ENCRYPTION_PWSALT: 2653 return f2fs_ioc_get_encryption_pwsalt(filp, arg); 2654 case F2FS_IOC_GARBAGE_COLLECT: 2655 return f2fs_ioc_gc(filp, arg); 2656 case F2FS_IOC_GARBAGE_COLLECT_RANGE: 2657 return f2fs_ioc_gc_range(filp, arg); 2658 case F2FS_IOC_WRITE_CHECKPOINT: 2659 return f2fs_ioc_write_checkpoint(filp, arg); 2660 case F2FS_IOC_DEFRAGMENT: 2661 return f2fs_ioc_defragment(filp, arg); 2662 case F2FS_IOC_MOVE_RANGE: 2663 return f2fs_ioc_move_range(filp, arg); 2664 case F2FS_IOC_FLUSH_DEVICE: 2665 return f2fs_ioc_flush_device(filp, arg); 2666 case F2FS_IOC_GET_FEATURES: 2667 return f2fs_ioc_get_features(filp, arg); 2668 case F2FS_IOC_FSGETXATTR: 2669 return f2fs_ioc_fsgetxattr(filp, arg); 2670 case F2FS_IOC_FSSETXATTR: 2671 return f2fs_ioc_fssetxattr(filp, arg); 2672 default: 2673 return -ENOTTY; 2674 } 2675 } 2676 2677 static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 2678 { 2679 struct file *file = iocb->ki_filp; 2680 struct inode *inode = file_inode(file); 2681 struct blk_plug plug; 2682 ssize_t ret; 2683 2684 inode_lock(inode); 2685 ret = generic_write_checks(iocb, from); 2686 if (ret > 0) { 2687 int err; 2688 2689 if (iov_iter_fault_in_readable(from, iov_iter_count(from))) 2690 set_inode_flag(inode, FI_NO_PREALLOC); 2691 2692 err = f2fs_preallocate_blocks(iocb, from); 2693 if (err) { 2694 inode_unlock(inode); 2695 return err; 2696 } 2697 blk_start_plug(&plug); 2698 ret = __generic_file_write_iter(iocb, from); 2699 blk_finish_plug(&plug); 2700 clear_inode_flag(inode, FI_NO_PREALLOC); 2701 2702 if (ret > 0) 2703 f2fs_update_iostat(F2FS_I_SB(inode), APP_WRITE_IO, ret); 2704 } 2705 inode_unlock(inode); 2706 2707 if (ret > 0) 2708 ret = generic_write_sync(iocb, ret); 2709 return ret; 2710 } 2711 2712 #ifdef CONFIG_COMPAT 2713 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 2714 { 2715 switch (cmd) { 2716 case F2FS_IOC32_GETFLAGS: 2717 cmd = F2FS_IOC_GETFLAGS; 2718 break; 2719 case F2FS_IOC32_SETFLAGS: 2720 cmd = F2FS_IOC_SETFLAGS; 2721 break; 2722 case F2FS_IOC32_GETVERSION: 2723 cmd = F2FS_IOC_GETVERSION; 2724 break; 2725 case F2FS_IOC_START_ATOMIC_WRITE: 2726 case F2FS_IOC_COMMIT_ATOMIC_WRITE: 2727 case F2FS_IOC_START_VOLATILE_WRITE: 2728 case F2FS_IOC_RELEASE_VOLATILE_WRITE: 2729 case F2FS_IOC_ABORT_VOLATILE_WRITE: 2730 case F2FS_IOC_SHUTDOWN: 2731 case F2FS_IOC_SET_ENCRYPTION_POLICY: 2732 case F2FS_IOC_GET_ENCRYPTION_PWSALT: 2733 case F2FS_IOC_GET_ENCRYPTION_POLICY: 2734 case F2FS_IOC_GARBAGE_COLLECT: 2735 case F2FS_IOC_GARBAGE_COLLECT_RANGE: 2736 case F2FS_IOC_WRITE_CHECKPOINT: 2737 case F2FS_IOC_DEFRAGMENT: 2738 case F2FS_IOC_MOVE_RANGE: 2739 case F2FS_IOC_FLUSH_DEVICE: 2740 case F2FS_IOC_GET_FEATURES: 2741 case F2FS_IOC_FSGETXATTR: 2742 case F2FS_IOC_FSSETXATTR: 2743 break; 2744 default: 2745 return -ENOIOCTLCMD; 2746 } 2747 return f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg)); 2748 } 2749 #endif 2750 2751 const struct file_operations f2fs_file_operations = { 2752 .llseek = f2fs_llseek, 2753 .read_iter = generic_file_read_iter, 2754 .write_iter = f2fs_file_write_iter, 2755 .open = f2fs_file_open, 2756 .release = f2fs_release_file, 2757 .mmap = f2fs_file_mmap, 2758 .flush = f2fs_file_flush, 2759 .fsync = f2fs_sync_file, 2760 .fallocate = f2fs_fallocate, 2761 .unlocked_ioctl = f2fs_ioctl, 2762 #ifdef CONFIG_COMPAT 2763 .compat_ioctl = f2fs_compat_ioctl, 2764 #endif 2765 .splice_read = generic_file_splice_read, 2766 .splice_write = iter_file_splice_write, 2767 }; 2768