1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * fs/f2fs/file.c 4 * 5 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 6 * http://www.samsung.com/ 7 */ 8 #include <linux/fs.h> 9 #include <linux/f2fs_fs.h> 10 #include <linux/stat.h> 11 #include <linux/buffer_head.h> 12 #include <linux/writeback.h> 13 #include <linux/blkdev.h> 14 #include <linux/falloc.h> 15 #include <linux/types.h> 16 #include <linux/compat.h> 17 #include <linux/uaccess.h> 18 #include <linux/mount.h> 19 #include <linux/pagevec.h> 20 #include <linux/uio.h> 21 #include <linux/uuid.h> 22 #include <linux/file.h> 23 24 #include "f2fs.h" 25 #include "node.h" 26 #include "segment.h" 27 #include "xattr.h" 28 #include "acl.h" 29 #include "gc.h" 30 #include "trace.h" 31 #include <trace/events/f2fs.h> 32 33 static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf) 34 { 35 struct inode *inode = file_inode(vmf->vma->vm_file); 36 vm_fault_t ret; 37 38 down_read(&F2FS_I(inode)->i_mmap_sem); 39 ret = filemap_fault(vmf); 40 up_read(&F2FS_I(inode)->i_mmap_sem); 41 42 trace_f2fs_filemap_fault(inode, vmf->pgoff, (unsigned long)ret); 43 44 return ret; 45 } 46 47 static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf) 48 { 49 struct page *page = vmf->page; 50 struct inode *inode = file_inode(vmf->vma->vm_file); 51 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 52 struct dnode_of_data dn = { .node_changed = false }; 53 int err; 54 55 if (unlikely(f2fs_cp_error(sbi))) { 56 err = -EIO; 57 goto err; 58 } 59 60 sb_start_pagefault(inode->i_sb); 61 62 f2fs_bug_on(sbi, f2fs_has_inline_data(inode)); 63 64 file_update_time(vmf->vma->vm_file); 65 down_read(&F2FS_I(inode)->i_mmap_sem); 66 lock_page(page); 67 if (unlikely(page->mapping != inode->i_mapping || 68 page_offset(page) > i_size_read(inode) || 69 !PageUptodate(page))) { 70 unlock_page(page); 71 err = -EFAULT; 72 goto out_sem; 73 } 74 75 /* block allocation */ 76 __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true); 77 set_new_dnode(&dn, inode, NULL, NULL, 0); 78 err = f2fs_get_block(&dn, page->index); 79 f2fs_put_dnode(&dn); 80 __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false); 81 if (err) { 82 unlock_page(page); 83 goto out_sem; 84 } 85 86 /* fill the page */ 87 f2fs_wait_on_page_writeback(page, DATA, false, true); 88 89 /* wait for GCed page writeback via META_MAPPING */ 90 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr); 91 92 /* 93 * check to see if the page is mapped already (no holes) 94 */ 95 if (PageMappedToDisk(page)) 96 goto out_sem; 97 98 /* page is wholly or partially inside EOF */ 99 if (((loff_t)(page->index + 1) << PAGE_SHIFT) > 100 i_size_read(inode)) { 101 loff_t offset; 102 103 offset = i_size_read(inode) & ~PAGE_MASK; 104 zero_user_segment(page, offset, PAGE_SIZE); 105 } 106 set_page_dirty(page); 107 if (!PageUptodate(page)) 108 SetPageUptodate(page); 109 110 f2fs_update_iostat(sbi, APP_MAPPED_IO, F2FS_BLKSIZE); 111 f2fs_update_time(sbi, REQ_TIME); 112 113 trace_f2fs_vm_page_mkwrite(page, DATA); 114 out_sem: 115 up_read(&F2FS_I(inode)->i_mmap_sem); 116 117 f2fs_balance_fs(sbi, dn.node_changed); 118 119 sb_end_pagefault(inode->i_sb); 120 err: 121 return block_page_mkwrite_return(err); 122 } 123 124 static const struct vm_operations_struct f2fs_file_vm_ops = { 125 .fault = f2fs_filemap_fault, 126 .map_pages = filemap_map_pages, 127 .page_mkwrite = f2fs_vm_page_mkwrite, 128 }; 129 130 static int get_parent_ino(struct inode *inode, nid_t *pino) 131 { 132 struct dentry *dentry; 133 134 inode = igrab(inode); 135 dentry = d_find_any_alias(inode); 136 iput(inode); 137 if (!dentry) 138 return 0; 139 140 *pino = parent_ino(dentry); 141 dput(dentry); 142 return 1; 143 } 144 145 static inline enum cp_reason_type need_do_checkpoint(struct inode *inode) 146 { 147 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 148 enum cp_reason_type cp_reason = CP_NO_NEEDED; 149 150 if (!S_ISREG(inode->i_mode)) 151 cp_reason = CP_NON_REGULAR; 152 else if (inode->i_nlink != 1) 153 cp_reason = CP_HARDLINK; 154 else if (is_sbi_flag_set(sbi, SBI_NEED_CP)) 155 cp_reason = CP_SB_NEED_CP; 156 else if (file_wrong_pino(inode)) 157 cp_reason = CP_WRONG_PINO; 158 else if (!f2fs_space_for_roll_forward(sbi)) 159 cp_reason = CP_NO_SPC_ROLL; 160 else if (!f2fs_is_checkpointed_node(sbi, F2FS_I(inode)->i_pino)) 161 cp_reason = CP_NODE_NEED_CP; 162 else if (test_opt(sbi, FASTBOOT)) 163 cp_reason = CP_FASTBOOT_MODE; 164 else if (F2FS_OPTION(sbi).active_logs == 2) 165 cp_reason = CP_SPEC_LOG_NUM; 166 else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT && 167 f2fs_need_dentry_mark(sbi, inode->i_ino) && 168 f2fs_exist_written_data(sbi, F2FS_I(inode)->i_pino, 169 TRANS_DIR_INO)) 170 cp_reason = CP_RECOVER_DIR; 171 172 return cp_reason; 173 } 174 175 static bool need_inode_page_update(struct f2fs_sb_info *sbi, nid_t ino) 176 { 177 struct page *i = find_get_page(NODE_MAPPING(sbi), ino); 178 bool ret = false; 179 /* But we need to avoid that there are some inode updates */ 180 if ((i && PageDirty(i)) || f2fs_need_inode_block_update(sbi, ino)) 181 ret = true; 182 f2fs_put_page(i, 0); 183 return ret; 184 } 185 186 static void try_to_fix_pino(struct inode *inode) 187 { 188 struct f2fs_inode_info *fi = F2FS_I(inode); 189 nid_t pino; 190 191 down_write(&fi->i_sem); 192 if (file_wrong_pino(inode) && inode->i_nlink == 1 && 193 get_parent_ino(inode, &pino)) { 194 f2fs_i_pino_write(inode, pino); 195 file_got_pino(inode); 196 } 197 up_write(&fi->i_sem); 198 } 199 200 static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end, 201 int datasync, bool atomic) 202 { 203 struct inode *inode = file->f_mapping->host; 204 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 205 nid_t ino = inode->i_ino; 206 int ret = 0; 207 enum cp_reason_type cp_reason = 0; 208 struct writeback_control wbc = { 209 .sync_mode = WB_SYNC_ALL, 210 .nr_to_write = LONG_MAX, 211 .for_reclaim = 0, 212 }; 213 unsigned int seq_id = 0; 214 215 if (unlikely(f2fs_readonly(inode->i_sb) || 216 is_sbi_flag_set(sbi, SBI_CP_DISABLED))) 217 return 0; 218 219 trace_f2fs_sync_file_enter(inode); 220 221 if (S_ISDIR(inode->i_mode)) 222 goto go_write; 223 224 /* if fdatasync is triggered, let's do in-place-update */ 225 if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks) 226 set_inode_flag(inode, FI_NEED_IPU); 227 ret = file_write_and_wait_range(file, start, end); 228 clear_inode_flag(inode, FI_NEED_IPU); 229 230 if (ret) { 231 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret); 232 return ret; 233 } 234 235 /* if the inode is dirty, let's recover all the time */ 236 if (!f2fs_skip_inode_update(inode, datasync)) { 237 f2fs_write_inode(inode, NULL); 238 goto go_write; 239 } 240 241 /* 242 * if there is no written data, don't waste time to write recovery info. 243 */ 244 if (!is_inode_flag_set(inode, FI_APPEND_WRITE) && 245 !f2fs_exist_written_data(sbi, ino, APPEND_INO)) { 246 247 /* it may call write_inode just prior to fsync */ 248 if (need_inode_page_update(sbi, ino)) 249 goto go_write; 250 251 if (is_inode_flag_set(inode, FI_UPDATE_WRITE) || 252 f2fs_exist_written_data(sbi, ino, UPDATE_INO)) 253 goto flush_out; 254 goto out; 255 } 256 go_write: 257 /* 258 * Both of fdatasync() and fsync() are able to be recovered from 259 * sudden-power-off. 260 */ 261 down_read(&F2FS_I(inode)->i_sem); 262 cp_reason = need_do_checkpoint(inode); 263 up_read(&F2FS_I(inode)->i_sem); 264 265 if (cp_reason) { 266 /* all the dirty node pages should be flushed for POR */ 267 ret = f2fs_sync_fs(inode->i_sb, 1); 268 269 /* 270 * We've secured consistency through sync_fs. Following pino 271 * will be used only for fsynced inodes after checkpoint. 272 */ 273 try_to_fix_pino(inode); 274 clear_inode_flag(inode, FI_APPEND_WRITE); 275 clear_inode_flag(inode, FI_UPDATE_WRITE); 276 goto out; 277 } 278 sync_nodes: 279 atomic_inc(&sbi->wb_sync_req[NODE]); 280 ret = f2fs_fsync_node_pages(sbi, inode, &wbc, atomic, &seq_id); 281 atomic_dec(&sbi->wb_sync_req[NODE]); 282 if (ret) 283 goto out; 284 285 /* if cp_error was enabled, we should avoid infinite loop */ 286 if (unlikely(f2fs_cp_error(sbi))) { 287 ret = -EIO; 288 goto out; 289 } 290 291 if (f2fs_need_inode_block_update(sbi, ino)) { 292 f2fs_mark_inode_dirty_sync(inode, true); 293 f2fs_write_inode(inode, NULL); 294 goto sync_nodes; 295 } 296 297 /* 298 * If it's atomic_write, it's just fine to keep write ordering. So 299 * here we don't need to wait for node write completion, since we use 300 * node chain which serializes node blocks. If one of node writes are 301 * reordered, we can see simply broken chain, resulting in stopping 302 * roll-forward recovery. It means we'll recover all or none node blocks 303 * given fsync mark. 304 */ 305 if (!atomic) { 306 ret = f2fs_wait_on_node_pages_writeback(sbi, seq_id); 307 if (ret) 308 goto out; 309 } 310 311 /* once recovery info is written, don't need to tack this */ 312 f2fs_remove_ino_entry(sbi, ino, APPEND_INO); 313 clear_inode_flag(inode, FI_APPEND_WRITE); 314 flush_out: 315 if (!atomic && F2FS_OPTION(sbi).fsync_mode != FSYNC_MODE_NOBARRIER) 316 ret = f2fs_issue_flush(sbi, inode->i_ino); 317 if (!ret) { 318 f2fs_remove_ino_entry(sbi, ino, UPDATE_INO); 319 clear_inode_flag(inode, FI_UPDATE_WRITE); 320 f2fs_remove_ino_entry(sbi, ino, FLUSH_INO); 321 } 322 f2fs_update_time(sbi, REQ_TIME); 323 out: 324 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret); 325 f2fs_trace_ios(NULL, 1); 326 return ret; 327 } 328 329 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) 330 { 331 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file))))) 332 return -EIO; 333 return f2fs_do_sync_file(file, start, end, datasync, false); 334 } 335 336 static pgoff_t __get_first_dirty_index(struct address_space *mapping, 337 pgoff_t pgofs, int whence) 338 { 339 struct page *page; 340 int nr_pages; 341 342 if (whence != SEEK_DATA) 343 return 0; 344 345 /* find first dirty page index */ 346 nr_pages = find_get_pages_tag(mapping, &pgofs, PAGECACHE_TAG_DIRTY, 347 1, &page); 348 if (!nr_pages) 349 return ULONG_MAX; 350 pgofs = page->index; 351 put_page(page); 352 return pgofs; 353 } 354 355 static bool __found_offset(struct f2fs_sb_info *sbi, block_t blkaddr, 356 pgoff_t dirty, pgoff_t pgofs, int whence) 357 { 358 switch (whence) { 359 case SEEK_DATA: 360 if ((blkaddr == NEW_ADDR && dirty == pgofs) || 361 __is_valid_data_blkaddr(blkaddr)) 362 return true; 363 break; 364 case SEEK_HOLE: 365 if (blkaddr == NULL_ADDR) 366 return true; 367 break; 368 } 369 return false; 370 } 371 372 static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence) 373 { 374 struct inode *inode = file->f_mapping->host; 375 loff_t maxbytes = inode->i_sb->s_maxbytes; 376 struct dnode_of_data dn; 377 pgoff_t pgofs, end_offset, dirty; 378 loff_t data_ofs = offset; 379 loff_t isize; 380 int err = 0; 381 382 inode_lock(inode); 383 384 isize = i_size_read(inode); 385 if (offset >= isize) 386 goto fail; 387 388 /* handle inline data case */ 389 if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) { 390 if (whence == SEEK_HOLE) 391 data_ofs = isize; 392 goto found; 393 } 394 395 pgofs = (pgoff_t)(offset >> PAGE_SHIFT); 396 397 dirty = __get_first_dirty_index(inode->i_mapping, pgofs, whence); 398 399 for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_SHIFT) { 400 set_new_dnode(&dn, inode, NULL, NULL, 0); 401 err = f2fs_get_dnode_of_data(&dn, pgofs, LOOKUP_NODE); 402 if (err && err != -ENOENT) { 403 goto fail; 404 } else if (err == -ENOENT) { 405 /* direct node does not exists */ 406 if (whence == SEEK_DATA) { 407 pgofs = f2fs_get_next_page_offset(&dn, pgofs); 408 continue; 409 } else { 410 goto found; 411 } 412 } 413 414 end_offset = ADDRS_PER_PAGE(dn.node_page, inode); 415 416 /* find data/hole in dnode block */ 417 for (; dn.ofs_in_node < end_offset; 418 dn.ofs_in_node++, pgofs++, 419 data_ofs = (loff_t)pgofs << PAGE_SHIFT) { 420 block_t blkaddr; 421 422 blkaddr = datablock_addr(dn.inode, 423 dn.node_page, dn.ofs_in_node); 424 425 if (__is_valid_data_blkaddr(blkaddr) && 426 !f2fs_is_valid_blkaddr(F2FS_I_SB(inode), 427 blkaddr, DATA_GENERIC_ENHANCE)) { 428 f2fs_put_dnode(&dn); 429 goto fail; 430 } 431 432 if (__found_offset(F2FS_I_SB(inode), blkaddr, dirty, 433 pgofs, whence)) { 434 f2fs_put_dnode(&dn); 435 goto found; 436 } 437 } 438 f2fs_put_dnode(&dn); 439 } 440 441 if (whence == SEEK_DATA) 442 goto fail; 443 found: 444 if (whence == SEEK_HOLE && data_ofs > isize) 445 data_ofs = isize; 446 inode_unlock(inode); 447 return vfs_setpos(file, data_ofs, maxbytes); 448 fail: 449 inode_unlock(inode); 450 return -ENXIO; 451 } 452 453 static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence) 454 { 455 struct inode *inode = file->f_mapping->host; 456 loff_t maxbytes = inode->i_sb->s_maxbytes; 457 458 switch (whence) { 459 case SEEK_SET: 460 case SEEK_CUR: 461 case SEEK_END: 462 return generic_file_llseek_size(file, offset, whence, 463 maxbytes, i_size_read(inode)); 464 case SEEK_DATA: 465 case SEEK_HOLE: 466 if (offset < 0) 467 return -ENXIO; 468 return f2fs_seek_block(file, offset, whence); 469 } 470 471 return -EINVAL; 472 } 473 474 static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma) 475 { 476 struct inode *inode = file_inode(file); 477 int err; 478 479 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) 480 return -EIO; 481 482 /* we don't need to use inline_data strictly */ 483 err = f2fs_convert_inline_inode(inode); 484 if (err) 485 return err; 486 487 file_accessed(file); 488 vma->vm_ops = &f2fs_file_vm_ops; 489 return 0; 490 } 491 492 static int f2fs_file_open(struct inode *inode, struct file *filp) 493 { 494 int err = fscrypt_file_open(inode, filp); 495 496 if (err) 497 return err; 498 499 err = fsverity_file_open(inode, filp); 500 if (err) 501 return err; 502 503 filp->f_mode |= FMODE_NOWAIT; 504 505 return dquot_file_open(inode, filp); 506 } 507 508 void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count) 509 { 510 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 511 struct f2fs_node *raw_node; 512 int nr_free = 0, ofs = dn->ofs_in_node, len = count; 513 __le32 *addr; 514 int base = 0; 515 516 if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode)) 517 base = get_extra_isize(dn->inode); 518 519 raw_node = F2FS_NODE(dn->node_page); 520 addr = blkaddr_in_node(raw_node) + base + ofs; 521 522 for (; count > 0; count--, addr++, dn->ofs_in_node++) { 523 block_t blkaddr = le32_to_cpu(*addr); 524 525 if (blkaddr == NULL_ADDR) 526 continue; 527 528 dn->data_blkaddr = NULL_ADDR; 529 f2fs_set_data_blkaddr(dn); 530 531 if (__is_valid_data_blkaddr(blkaddr) && 532 !f2fs_is_valid_blkaddr(sbi, blkaddr, 533 DATA_GENERIC_ENHANCE)) 534 continue; 535 536 f2fs_invalidate_blocks(sbi, blkaddr); 537 if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page)) 538 clear_inode_flag(dn->inode, FI_FIRST_BLOCK_WRITTEN); 539 nr_free++; 540 } 541 542 if (nr_free) { 543 pgoff_t fofs; 544 /* 545 * once we invalidate valid blkaddr in range [ofs, ofs + count], 546 * we will invalidate all blkaddr in the whole range. 547 */ 548 fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page), 549 dn->inode) + ofs; 550 f2fs_update_extent_cache_range(dn, fofs, 0, len); 551 dec_valid_block_count(sbi, dn->inode, nr_free); 552 } 553 dn->ofs_in_node = ofs; 554 555 f2fs_update_time(sbi, REQ_TIME); 556 trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid, 557 dn->ofs_in_node, nr_free); 558 } 559 560 void f2fs_truncate_data_blocks(struct dnode_of_data *dn) 561 { 562 f2fs_truncate_data_blocks_range(dn, ADDRS_PER_BLOCK(dn->inode)); 563 } 564 565 static int truncate_partial_data_page(struct inode *inode, u64 from, 566 bool cache_only) 567 { 568 loff_t offset = from & (PAGE_SIZE - 1); 569 pgoff_t index = from >> PAGE_SHIFT; 570 struct address_space *mapping = inode->i_mapping; 571 struct page *page; 572 573 if (!offset && !cache_only) 574 return 0; 575 576 if (cache_only) { 577 page = find_lock_page(mapping, index); 578 if (page && PageUptodate(page)) 579 goto truncate_out; 580 f2fs_put_page(page, 1); 581 return 0; 582 } 583 584 page = f2fs_get_lock_data_page(inode, index, true); 585 if (IS_ERR(page)) 586 return PTR_ERR(page) == -ENOENT ? 0 : PTR_ERR(page); 587 truncate_out: 588 f2fs_wait_on_page_writeback(page, DATA, true, true); 589 zero_user(page, offset, PAGE_SIZE - offset); 590 591 /* An encrypted inode should have a key and truncate the last page. */ 592 f2fs_bug_on(F2FS_I_SB(inode), cache_only && IS_ENCRYPTED(inode)); 593 if (!cache_only) 594 set_page_dirty(page); 595 f2fs_put_page(page, 1); 596 return 0; 597 } 598 599 int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock) 600 { 601 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 602 struct dnode_of_data dn; 603 pgoff_t free_from; 604 int count = 0, err = 0; 605 struct page *ipage; 606 bool truncate_page = false; 607 608 trace_f2fs_truncate_blocks_enter(inode, from); 609 610 free_from = (pgoff_t)F2FS_BLK_ALIGN(from); 611 612 if (free_from >= sbi->max_file_blocks) 613 goto free_partial; 614 615 if (lock) 616 f2fs_lock_op(sbi); 617 618 ipage = f2fs_get_node_page(sbi, inode->i_ino); 619 if (IS_ERR(ipage)) { 620 err = PTR_ERR(ipage); 621 goto out; 622 } 623 624 if (f2fs_has_inline_data(inode)) { 625 f2fs_truncate_inline_inode(inode, ipage, from); 626 f2fs_put_page(ipage, 1); 627 truncate_page = true; 628 goto out; 629 } 630 631 set_new_dnode(&dn, inode, ipage, NULL, 0); 632 err = f2fs_get_dnode_of_data(&dn, free_from, LOOKUP_NODE_RA); 633 if (err) { 634 if (err == -ENOENT) 635 goto free_next; 636 goto out; 637 } 638 639 count = ADDRS_PER_PAGE(dn.node_page, inode); 640 641 count -= dn.ofs_in_node; 642 f2fs_bug_on(sbi, count < 0); 643 644 if (dn.ofs_in_node || IS_INODE(dn.node_page)) { 645 f2fs_truncate_data_blocks_range(&dn, count); 646 free_from += count; 647 } 648 649 f2fs_put_dnode(&dn); 650 free_next: 651 err = f2fs_truncate_inode_blocks(inode, free_from); 652 out: 653 if (lock) 654 f2fs_unlock_op(sbi); 655 free_partial: 656 /* lastly zero out the first data page */ 657 if (!err) 658 err = truncate_partial_data_page(inode, from, truncate_page); 659 660 trace_f2fs_truncate_blocks_exit(inode, err); 661 return err; 662 } 663 664 int f2fs_truncate(struct inode *inode) 665 { 666 int err; 667 668 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) 669 return -EIO; 670 671 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 672 S_ISLNK(inode->i_mode))) 673 return 0; 674 675 trace_f2fs_truncate(inode); 676 677 if (time_to_inject(F2FS_I_SB(inode), FAULT_TRUNCATE)) { 678 f2fs_show_injection_info(FAULT_TRUNCATE); 679 return -EIO; 680 } 681 682 /* we should check inline_data size */ 683 if (!f2fs_may_inline_data(inode)) { 684 err = f2fs_convert_inline_inode(inode); 685 if (err) 686 return err; 687 } 688 689 err = f2fs_truncate_blocks(inode, i_size_read(inode), true); 690 if (err) 691 return err; 692 693 inode->i_mtime = inode->i_ctime = current_time(inode); 694 f2fs_mark_inode_dirty_sync(inode, false); 695 return 0; 696 } 697 698 int f2fs_getattr(const struct path *path, struct kstat *stat, 699 u32 request_mask, unsigned int query_flags) 700 { 701 struct inode *inode = d_inode(path->dentry); 702 struct f2fs_inode_info *fi = F2FS_I(inode); 703 struct f2fs_inode *ri; 704 unsigned int flags; 705 706 if (f2fs_has_extra_attr(inode) && 707 f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) && 708 F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) { 709 stat->result_mask |= STATX_BTIME; 710 stat->btime.tv_sec = fi->i_crtime.tv_sec; 711 stat->btime.tv_nsec = fi->i_crtime.tv_nsec; 712 } 713 714 flags = fi->i_flags; 715 if (flags & F2FS_APPEND_FL) 716 stat->attributes |= STATX_ATTR_APPEND; 717 if (IS_ENCRYPTED(inode)) 718 stat->attributes |= STATX_ATTR_ENCRYPTED; 719 if (flags & F2FS_IMMUTABLE_FL) 720 stat->attributes |= STATX_ATTR_IMMUTABLE; 721 if (flags & F2FS_NODUMP_FL) 722 stat->attributes |= STATX_ATTR_NODUMP; 723 724 stat->attributes_mask |= (STATX_ATTR_APPEND | 725 STATX_ATTR_ENCRYPTED | 726 STATX_ATTR_IMMUTABLE | 727 STATX_ATTR_NODUMP); 728 729 generic_fillattr(inode, stat); 730 731 /* we need to show initial sectors used for inline_data/dentries */ 732 if ((S_ISREG(inode->i_mode) && f2fs_has_inline_data(inode)) || 733 f2fs_has_inline_dentry(inode)) 734 stat->blocks += (stat->size + 511) >> 9; 735 736 return 0; 737 } 738 739 #ifdef CONFIG_F2FS_FS_POSIX_ACL 740 static void __setattr_copy(struct inode *inode, const struct iattr *attr) 741 { 742 unsigned int ia_valid = attr->ia_valid; 743 744 if (ia_valid & ATTR_UID) 745 inode->i_uid = attr->ia_uid; 746 if (ia_valid & ATTR_GID) 747 inode->i_gid = attr->ia_gid; 748 if (ia_valid & ATTR_ATIME) 749 inode->i_atime = timespec64_trunc(attr->ia_atime, 750 inode->i_sb->s_time_gran); 751 if (ia_valid & ATTR_MTIME) 752 inode->i_mtime = timespec64_trunc(attr->ia_mtime, 753 inode->i_sb->s_time_gran); 754 if (ia_valid & ATTR_CTIME) 755 inode->i_ctime = timespec64_trunc(attr->ia_ctime, 756 inode->i_sb->s_time_gran); 757 if (ia_valid & ATTR_MODE) { 758 umode_t mode = attr->ia_mode; 759 760 if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID)) 761 mode &= ~S_ISGID; 762 set_acl_inode(inode, mode); 763 } 764 } 765 #else 766 #define __setattr_copy setattr_copy 767 #endif 768 769 int f2fs_setattr(struct dentry *dentry, struct iattr *attr) 770 { 771 struct inode *inode = d_inode(dentry); 772 int err; 773 774 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) 775 return -EIO; 776 777 err = setattr_prepare(dentry, attr); 778 if (err) 779 return err; 780 781 err = fscrypt_prepare_setattr(dentry, attr); 782 if (err) 783 return err; 784 785 err = fsverity_prepare_setattr(dentry, attr); 786 if (err) 787 return err; 788 789 if (is_quota_modification(inode, attr)) { 790 err = dquot_initialize(inode); 791 if (err) 792 return err; 793 } 794 if ((attr->ia_valid & ATTR_UID && 795 !uid_eq(attr->ia_uid, inode->i_uid)) || 796 (attr->ia_valid & ATTR_GID && 797 !gid_eq(attr->ia_gid, inode->i_gid))) { 798 f2fs_lock_op(F2FS_I_SB(inode)); 799 err = dquot_transfer(inode, attr); 800 if (err) { 801 set_sbi_flag(F2FS_I_SB(inode), 802 SBI_QUOTA_NEED_REPAIR); 803 f2fs_unlock_op(F2FS_I_SB(inode)); 804 return err; 805 } 806 /* 807 * update uid/gid under lock_op(), so that dquot and inode can 808 * be updated atomically. 809 */ 810 if (attr->ia_valid & ATTR_UID) 811 inode->i_uid = attr->ia_uid; 812 if (attr->ia_valid & ATTR_GID) 813 inode->i_gid = attr->ia_gid; 814 f2fs_mark_inode_dirty_sync(inode, true); 815 f2fs_unlock_op(F2FS_I_SB(inode)); 816 } 817 818 if (attr->ia_valid & ATTR_SIZE) { 819 bool to_smaller = (attr->ia_size <= i_size_read(inode)); 820 821 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 822 down_write(&F2FS_I(inode)->i_mmap_sem); 823 824 truncate_setsize(inode, attr->ia_size); 825 826 if (to_smaller) 827 err = f2fs_truncate(inode); 828 /* 829 * do not trim all blocks after i_size if target size is 830 * larger than i_size. 831 */ 832 up_write(&F2FS_I(inode)->i_mmap_sem); 833 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 834 835 if (err) 836 return err; 837 838 if (!to_smaller) { 839 /* should convert inline inode here */ 840 if (!f2fs_may_inline_data(inode)) { 841 err = f2fs_convert_inline_inode(inode); 842 if (err) 843 return err; 844 } 845 inode->i_mtime = inode->i_ctime = current_time(inode); 846 } 847 848 down_write(&F2FS_I(inode)->i_sem); 849 F2FS_I(inode)->last_disk_size = i_size_read(inode); 850 up_write(&F2FS_I(inode)->i_sem); 851 } 852 853 __setattr_copy(inode, attr); 854 855 if (attr->ia_valid & ATTR_MODE) { 856 err = posix_acl_chmod(inode, f2fs_get_inode_mode(inode)); 857 if (err || is_inode_flag_set(inode, FI_ACL_MODE)) { 858 inode->i_mode = F2FS_I(inode)->i_acl_mode; 859 clear_inode_flag(inode, FI_ACL_MODE); 860 } 861 } 862 863 /* file size may changed here */ 864 f2fs_mark_inode_dirty_sync(inode, true); 865 866 /* inode change will produce dirty node pages flushed by checkpoint */ 867 f2fs_balance_fs(F2FS_I_SB(inode), true); 868 869 return err; 870 } 871 872 const struct inode_operations f2fs_file_inode_operations = { 873 .getattr = f2fs_getattr, 874 .setattr = f2fs_setattr, 875 .get_acl = f2fs_get_acl, 876 .set_acl = f2fs_set_acl, 877 #ifdef CONFIG_F2FS_FS_XATTR 878 .listxattr = f2fs_listxattr, 879 #endif 880 .fiemap = f2fs_fiemap, 881 }; 882 883 static int fill_zero(struct inode *inode, pgoff_t index, 884 loff_t start, loff_t len) 885 { 886 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 887 struct page *page; 888 889 if (!len) 890 return 0; 891 892 f2fs_balance_fs(sbi, true); 893 894 f2fs_lock_op(sbi); 895 page = f2fs_get_new_data_page(inode, NULL, index, false); 896 f2fs_unlock_op(sbi); 897 898 if (IS_ERR(page)) 899 return PTR_ERR(page); 900 901 f2fs_wait_on_page_writeback(page, DATA, true, true); 902 zero_user(page, start, len); 903 set_page_dirty(page); 904 f2fs_put_page(page, 1); 905 return 0; 906 } 907 908 int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end) 909 { 910 int err; 911 912 while (pg_start < pg_end) { 913 struct dnode_of_data dn; 914 pgoff_t end_offset, count; 915 916 set_new_dnode(&dn, inode, NULL, NULL, 0); 917 err = f2fs_get_dnode_of_data(&dn, pg_start, LOOKUP_NODE); 918 if (err) { 919 if (err == -ENOENT) { 920 pg_start = f2fs_get_next_page_offset(&dn, 921 pg_start); 922 continue; 923 } 924 return err; 925 } 926 927 end_offset = ADDRS_PER_PAGE(dn.node_page, inode); 928 count = min(end_offset - dn.ofs_in_node, pg_end - pg_start); 929 930 f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset); 931 932 f2fs_truncate_data_blocks_range(&dn, count); 933 f2fs_put_dnode(&dn); 934 935 pg_start += count; 936 } 937 return 0; 938 } 939 940 static int punch_hole(struct inode *inode, loff_t offset, loff_t len) 941 { 942 pgoff_t pg_start, pg_end; 943 loff_t off_start, off_end; 944 int ret; 945 946 ret = f2fs_convert_inline_inode(inode); 947 if (ret) 948 return ret; 949 950 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT; 951 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT; 952 953 off_start = offset & (PAGE_SIZE - 1); 954 off_end = (offset + len) & (PAGE_SIZE - 1); 955 956 if (pg_start == pg_end) { 957 ret = fill_zero(inode, pg_start, off_start, 958 off_end - off_start); 959 if (ret) 960 return ret; 961 } else { 962 if (off_start) { 963 ret = fill_zero(inode, pg_start++, off_start, 964 PAGE_SIZE - off_start); 965 if (ret) 966 return ret; 967 } 968 if (off_end) { 969 ret = fill_zero(inode, pg_end, 0, off_end); 970 if (ret) 971 return ret; 972 } 973 974 if (pg_start < pg_end) { 975 struct address_space *mapping = inode->i_mapping; 976 loff_t blk_start, blk_end; 977 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 978 979 f2fs_balance_fs(sbi, true); 980 981 blk_start = (loff_t)pg_start << PAGE_SHIFT; 982 blk_end = (loff_t)pg_end << PAGE_SHIFT; 983 984 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 985 down_write(&F2FS_I(inode)->i_mmap_sem); 986 987 truncate_inode_pages_range(mapping, blk_start, 988 blk_end - 1); 989 990 f2fs_lock_op(sbi); 991 ret = f2fs_truncate_hole(inode, pg_start, pg_end); 992 f2fs_unlock_op(sbi); 993 994 up_write(&F2FS_I(inode)->i_mmap_sem); 995 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 996 } 997 } 998 999 return ret; 1000 } 1001 1002 static int __read_out_blkaddrs(struct inode *inode, block_t *blkaddr, 1003 int *do_replace, pgoff_t off, pgoff_t len) 1004 { 1005 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1006 struct dnode_of_data dn; 1007 int ret, done, i; 1008 1009 next_dnode: 1010 set_new_dnode(&dn, inode, NULL, NULL, 0); 1011 ret = f2fs_get_dnode_of_data(&dn, off, LOOKUP_NODE_RA); 1012 if (ret && ret != -ENOENT) { 1013 return ret; 1014 } else if (ret == -ENOENT) { 1015 if (dn.max_level == 0) 1016 return -ENOENT; 1017 done = min((pgoff_t)ADDRS_PER_BLOCK(inode) - dn.ofs_in_node, 1018 len); 1019 blkaddr += done; 1020 do_replace += done; 1021 goto next; 1022 } 1023 1024 done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, inode) - 1025 dn.ofs_in_node, len); 1026 for (i = 0; i < done; i++, blkaddr++, do_replace++, dn.ofs_in_node++) { 1027 *blkaddr = datablock_addr(dn.inode, 1028 dn.node_page, dn.ofs_in_node); 1029 1030 if (__is_valid_data_blkaddr(*blkaddr) && 1031 !f2fs_is_valid_blkaddr(sbi, *blkaddr, 1032 DATA_GENERIC_ENHANCE)) { 1033 f2fs_put_dnode(&dn); 1034 return -EFSCORRUPTED; 1035 } 1036 1037 if (!f2fs_is_checkpointed_data(sbi, *blkaddr)) { 1038 1039 if (test_opt(sbi, LFS)) { 1040 f2fs_put_dnode(&dn); 1041 return -ENOTSUPP; 1042 } 1043 1044 /* do not invalidate this block address */ 1045 f2fs_update_data_blkaddr(&dn, NULL_ADDR); 1046 *do_replace = 1; 1047 } 1048 } 1049 f2fs_put_dnode(&dn); 1050 next: 1051 len -= done; 1052 off += done; 1053 if (len) 1054 goto next_dnode; 1055 return 0; 1056 } 1057 1058 static int __roll_back_blkaddrs(struct inode *inode, block_t *blkaddr, 1059 int *do_replace, pgoff_t off, int len) 1060 { 1061 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1062 struct dnode_of_data dn; 1063 int ret, i; 1064 1065 for (i = 0; i < len; i++, do_replace++, blkaddr++) { 1066 if (*do_replace == 0) 1067 continue; 1068 1069 set_new_dnode(&dn, inode, NULL, NULL, 0); 1070 ret = f2fs_get_dnode_of_data(&dn, off + i, LOOKUP_NODE_RA); 1071 if (ret) { 1072 dec_valid_block_count(sbi, inode, 1); 1073 f2fs_invalidate_blocks(sbi, *blkaddr); 1074 } else { 1075 f2fs_update_data_blkaddr(&dn, *blkaddr); 1076 } 1077 f2fs_put_dnode(&dn); 1078 } 1079 return 0; 1080 } 1081 1082 static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode, 1083 block_t *blkaddr, int *do_replace, 1084 pgoff_t src, pgoff_t dst, pgoff_t len, bool full) 1085 { 1086 struct f2fs_sb_info *sbi = F2FS_I_SB(src_inode); 1087 pgoff_t i = 0; 1088 int ret; 1089 1090 while (i < len) { 1091 if (blkaddr[i] == NULL_ADDR && !full) { 1092 i++; 1093 continue; 1094 } 1095 1096 if (do_replace[i] || blkaddr[i] == NULL_ADDR) { 1097 struct dnode_of_data dn; 1098 struct node_info ni; 1099 size_t new_size; 1100 pgoff_t ilen; 1101 1102 set_new_dnode(&dn, dst_inode, NULL, NULL, 0); 1103 ret = f2fs_get_dnode_of_data(&dn, dst + i, ALLOC_NODE); 1104 if (ret) 1105 return ret; 1106 1107 ret = f2fs_get_node_info(sbi, dn.nid, &ni); 1108 if (ret) { 1109 f2fs_put_dnode(&dn); 1110 return ret; 1111 } 1112 1113 ilen = min((pgoff_t) 1114 ADDRS_PER_PAGE(dn.node_page, dst_inode) - 1115 dn.ofs_in_node, len - i); 1116 do { 1117 dn.data_blkaddr = datablock_addr(dn.inode, 1118 dn.node_page, dn.ofs_in_node); 1119 f2fs_truncate_data_blocks_range(&dn, 1); 1120 1121 if (do_replace[i]) { 1122 f2fs_i_blocks_write(src_inode, 1123 1, false, false); 1124 f2fs_i_blocks_write(dst_inode, 1125 1, true, false); 1126 f2fs_replace_block(sbi, &dn, dn.data_blkaddr, 1127 blkaddr[i], ni.version, true, false); 1128 1129 do_replace[i] = 0; 1130 } 1131 dn.ofs_in_node++; 1132 i++; 1133 new_size = (dst + i) << PAGE_SHIFT; 1134 if (dst_inode->i_size < new_size) 1135 f2fs_i_size_write(dst_inode, new_size); 1136 } while (--ilen && (do_replace[i] || blkaddr[i] == NULL_ADDR)); 1137 1138 f2fs_put_dnode(&dn); 1139 } else { 1140 struct page *psrc, *pdst; 1141 1142 psrc = f2fs_get_lock_data_page(src_inode, 1143 src + i, true); 1144 if (IS_ERR(psrc)) 1145 return PTR_ERR(psrc); 1146 pdst = f2fs_get_new_data_page(dst_inode, NULL, dst + i, 1147 true); 1148 if (IS_ERR(pdst)) { 1149 f2fs_put_page(psrc, 1); 1150 return PTR_ERR(pdst); 1151 } 1152 f2fs_copy_page(psrc, pdst); 1153 set_page_dirty(pdst); 1154 f2fs_put_page(pdst, 1); 1155 f2fs_put_page(psrc, 1); 1156 1157 ret = f2fs_truncate_hole(src_inode, 1158 src + i, src + i + 1); 1159 if (ret) 1160 return ret; 1161 i++; 1162 } 1163 } 1164 return 0; 1165 } 1166 1167 static int __exchange_data_block(struct inode *src_inode, 1168 struct inode *dst_inode, pgoff_t src, pgoff_t dst, 1169 pgoff_t len, bool full) 1170 { 1171 block_t *src_blkaddr; 1172 int *do_replace; 1173 pgoff_t olen; 1174 int ret; 1175 1176 while (len) { 1177 olen = min((pgoff_t)4 * ADDRS_PER_BLOCK(src_inode), len); 1178 1179 src_blkaddr = f2fs_kvzalloc(F2FS_I_SB(src_inode), 1180 array_size(olen, sizeof(block_t)), 1181 GFP_KERNEL); 1182 if (!src_blkaddr) 1183 return -ENOMEM; 1184 1185 do_replace = f2fs_kvzalloc(F2FS_I_SB(src_inode), 1186 array_size(olen, sizeof(int)), 1187 GFP_KERNEL); 1188 if (!do_replace) { 1189 kvfree(src_blkaddr); 1190 return -ENOMEM; 1191 } 1192 1193 ret = __read_out_blkaddrs(src_inode, src_blkaddr, 1194 do_replace, src, olen); 1195 if (ret) 1196 goto roll_back; 1197 1198 ret = __clone_blkaddrs(src_inode, dst_inode, src_blkaddr, 1199 do_replace, src, dst, olen, full); 1200 if (ret) 1201 goto roll_back; 1202 1203 src += olen; 1204 dst += olen; 1205 len -= olen; 1206 1207 kvfree(src_blkaddr); 1208 kvfree(do_replace); 1209 } 1210 return 0; 1211 1212 roll_back: 1213 __roll_back_blkaddrs(src_inode, src_blkaddr, do_replace, src, olen); 1214 kvfree(src_blkaddr); 1215 kvfree(do_replace); 1216 return ret; 1217 } 1218 1219 static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len) 1220 { 1221 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1222 pgoff_t nrpages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); 1223 pgoff_t start = offset >> PAGE_SHIFT; 1224 pgoff_t end = (offset + len) >> PAGE_SHIFT; 1225 int ret; 1226 1227 f2fs_balance_fs(sbi, true); 1228 1229 /* avoid gc operation during block exchange */ 1230 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1231 down_write(&F2FS_I(inode)->i_mmap_sem); 1232 1233 f2fs_lock_op(sbi); 1234 f2fs_drop_extent_tree(inode); 1235 truncate_pagecache(inode, offset); 1236 ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true); 1237 f2fs_unlock_op(sbi); 1238 1239 up_write(&F2FS_I(inode)->i_mmap_sem); 1240 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1241 return ret; 1242 } 1243 1244 static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len) 1245 { 1246 loff_t new_size; 1247 int ret; 1248 1249 if (offset + len >= i_size_read(inode)) 1250 return -EINVAL; 1251 1252 /* collapse range should be aligned to block size of f2fs. */ 1253 if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1)) 1254 return -EINVAL; 1255 1256 ret = f2fs_convert_inline_inode(inode); 1257 if (ret) 1258 return ret; 1259 1260 /* write out all dirty pages from offset */ 1261 ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX); 1262 if (ret) 1263 return ret; 1264 1265 ret = f2fs_do_collapse(inode, offset, len); 1266 if (ret) 1267 return ret; 1268 1269 /* write out all moved pages, if possible */ 1270 down_write(&F2FS_I(inode)->i_mmap_sem); 1271 filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX); 1272 truncate_pagecache(inode, offset); 1273 1274 new_size = i_size_read(inode) - len; 1275 truncate_pagecache(inode, new_size); 1276 1277 ret = f2fs_truncate_blocks(inode, new_size, true); 1278 up_write(&F2FS_I(inode)->i_mmap_sem); 1279 if (!ret) 1280 f2fs_i_size_write(inode, new_size); 1281 return ret; 1282 } 1283 1284 static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start, 1285 pgoff_t end) 1286 { 1287 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 1288 pgoff_t index = start; 1289 unsigned int ofs_in_node = dn->ofs_in_node; 1290 blkcnt_t count = 0; 1291 int ret; 1292 1293 for (; index < end; index++, dn->ofs_in_node++) { 1294 if (datablock_addr(dn->inode, dn->node_page, 1295 dn->ofs_in_node) == NULL_ADDR) 1296 count++; 1297 } 1298 1299 dn->ofs_in_node = ofs_in_node; 1300 ret = f2fs_reserve_new_blocks(dn, count); 1301 if (ret) 1302 return ret; 1303 1304 dn->ofs_in_node = ofs_in_node; 1305 for (index = start; index < end; index++, dn->ofs_in_node++) { 1306 dn->data_blkaddr = datablock_addr(dn->inode, 1307 dn->node_page, dn->ofs_in_node); 1308 /* 1309 * f2fs_reserve_new_blocks will not guarantee entire block 1310 * allocation. 1311 */ 1312 if (dn->data_blkaddr == NULL_ADDR) { 1313 ret = -ENOSPC; 1314 break; 1315 } 1316 if (dn->data_blkaddr != NEW_ADDR) { 1317 f2fs_invalidate_blocks(sbi, dn->data_blkaddr); 1318 dn->data_blkaddr = NEW_ADDR; 1319 f2fs_set_data_blkaddr(dn); 1320 } 1321 } 1322 1323 f2fs_update_extent_cache_range(dn, start, 0, index - start); 1324 1325 return ret; 1326 } 1327 1328 static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len, 1329 int mode) 1330 { 1331 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1332 struct address_space *mapping = inode->i_mapping; 1333 pgoff_t index, pg_start, pg_end; 1334 loff_t new_size = i_size_read(inode); 1335 loff_t off_start, off_end; 1336 int ret = 0; 1337 1338 ret = inode_newsize_ok(inode, (len + offset)); 1339 if (ret) 1340 return ret; 1341 1342 ret = f2fs_convert_inline_inode(inode); 1343 if (ret) 1344 return ret; 1345 1346 ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1); 1347 if (ret) 1348 return ret; 1349 1350 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT; 1351 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT; 1352 1353 off_start = offset & (PAGE_SIZE - 1); 1354 off_end = (offset + len) & (PAGE_SIZE - 1); 1355 1356 if (pg_start == pg_end) { 1357 ret = fill_zero(inode, pg_start, off_start, 1358 off_end - off_start); 1359 if (ret) 1360 return ret; 1361 1362 new_size = max_t(loff_t, new_size, offset + len); 1363 } else { 1364 if (off_start) { 1365 ret = fill_zero(inode, pg_start++, off_start, 1366 PAGE_SIZE - off_start); 1367 if (ret) 1368 return ret; 1369 1370 new_size = max_t(loff_t, new_size, 1371 (loff_t)pg_start << PAGE_SHIFT); 1372 } 1373 1374 for (index = pg_start; index < pg_end;) { 1375 struct dnode_of_data dn; 1376 unsigned int end_offset; 1377 pgoff_t end; 1378 1379 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1380 down_write(&F2FS_I(inode)->i_mmap_sem); 1381 1382 truncate_pagecache_range(inode, 1383 (loff_t)index << PAGE_SHIFT, 1384 ((loff_t)pg_end << PAGE_SHIFT) - 1); 1385 1386 f2fs_lock_op(sbi); 1387 1388 set_new_dnode(&dn, inode, NULL, NULL, 0); 1389 ret = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE); 1390 if (ret) { 1391 f2fs_unlock_op(sbi); 1392 up_write(&F2FS_I(inode)->i_mmap_sem); 1393 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1394 goto out; 1395 } 1396 1397 end_offset = ADDRS_PER_PAGE(dn.node_page, inode); 1398 end = min(pg_end, end_offset - dn.ofs_in_node + index); 1399 1400 ret = f2fs_do_zero_range(&dn, index, end); 1401 f2fs_put_dnode(&dn); 1402 1403 f2fs_unlock_op(sbi); 1404 up_write(&F2FS_I(inode)->i_mmap_sem); 1405 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1406 1407 f2fs_balance_fs(sbi, dn.node_changed); 1408 1409 if (ret) 1410 goto out; 1411 1412 index = end; 1413 new_size = max_t(loff_t, new_size, 1414 (loff_t)index << PAGE_SHIFT); 1415 } 1416 1417 if (off_end) { 1418 ret = fill_zero(inode, pg_end, 0, off_end); 1419 if (ret) 1420 goto out; 1421 1422 new_size = max_t(loff_t, new_size, offset + len); 1423 } 1424 } 1425 1426 out: 1427 if (new_size > i_size_read(inode)) { 1428 if (mode & FALLOC_FL_KEEP_SIZE) 1429 file_set_keep_isize(inode); 1430 else 1431 f2fs_i_size_write(inode, new_size); 1432 } 1433 return ret; 1434 } 1435 1436 static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len) 1437 { 1438 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1439 pgoff_t nr, pg_start, pg_end, delta, idx; 1440 loff_t new_size; 1441 int ret = 0; 1442 1443 new_size = i_size_read(inode) + len; 1444 ret = inode_newsize_ok(inode, new_size); 1445 if (ret) 1446 return ret; 1447 1448 if (offset >= i_size_read(inode)) 1449 return -EINVAL; 1450 1451 /* insert range should be aligned to block size of f2fs. */ 1452 if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1)) 1453 return -EINVAL; 1454 1455 ret = f2fs_convert_inline_inode(inode); 1456 if (ret) 1457 return ret; 1458 1459 f2fs_balance_fs(sbi, true); 1460 1461 down_write(&F2FS_I(inode)->i_mmap_sem); 1462 ret = f2fs_truncate_blocks(inode, i_size_read(inode), true); 1463 up_write(&F2FS_I(inode)->i_mmap_sem); 1464 if (ret) 1465 return ret; 1466 1467 /* write out all dirty pages from offset */ 1468 ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX); 1469 if (ret) 1470 return ret; 1471 1472 pg_start = offset >> PAGE_SHIFT; 1473 pg_end = (offset + len) >> PAGE_SHIFT; 1474 delta = pg_end - pg_start; 1475 idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); 1476 1477 /* avoid gc operation during block exchange */ 1478 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1479 down_write(&F2FS_I(inode)->i_mmap_sem); 1480 truncate_pagecache(inode, offset); 1481 1482 while (!ret && idx > pg_start) { 1483 nr = idx - pg_start; 1484 if (nr > delta) 1485 nr = delta; 1486 idx -= nr; 1487 1488 f2fs_lock_op(sbi); 1489 f2fs_drop_extent_tree(inode); 1490 1491 ret = __exchange_data_block(inode, inode, idx, 1492 idx + delta, nr, false); 1493 f2fs_unlock_op(sbi); 1494 } 1495 up_write(&F2FS_I(inode)->i_mmap_sem); 1496 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1497 1498 /* write out all moved pages, if possible */ 1499 down_write(&F2FS_I(inode)->i_mmap_sem); 1500 filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX); 1501 truncate_pagecache(inode, offset); 1502 up_write(&F2FS_I(inode)->i_mmap_sem); 1503 1504 if (!ret) 1505 f2fs_i_size_write(inode, new_size); 1506 return ret; 1507 } 1508 1509 static int expand_inode_data(struct inode *inode, loff_t offset, 1510 loff_t len, int mode) 1511 { 1512 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1513 struct f2fs_map_blocks map = { .m_next_pgofs = NULL, 1514 .m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE, 1515 .m_may_create = true }; 1516 pgoff_t pg_end; 1517 loff_t new_size = i_size_read(inode); 1518 loff_t off_end; 1519 int err; 1520 1521 err = inode_newsize_ok(inode, (len + offset)); 1522 if (err) 1523 return err; 1524 1525 err = f2fs_convert_inline_inode(inode); 1526 if (err) 1527 return err; 1528 1529 f2fs_balance_fs(sbi, true); 1530 1531 pg_end = ((unsigned long long)offset + len) >> PAGE_SHIFT; 1532 off_end = (offset + len) & (PAGE_SIZE - 1); 1533 1534 map.m_lblk = ((unsigned long long)offset) >> PAGE_SHIFT; 1535 map.m_len = pg_end - map.m_lblk; 1536 if (off_end) 1537 map.m_len++; 1538 1539 if (f2fs_is_pinned_file(inode)) 1540 map.m_seg_type = CURSEG_COLD_DATA; 1541 1542 err = f2fs_map_blocks(inode, &map, 1, (f2fs_is_pinned_file(inode) ? 1543 F2FS_GET_BLOCK_PRE_DIO : 1544 F2FS_GET_BLOCK_PRE_AIO)); 1545 if (err) { 1546 pgoff_t last_off; 1547 1548 if (!map.m_len) 1549 return err; 1550 1551 last_off = map.m_lblk + map.m_len - 1; 1552 1553 /* update new size to the failed position */ 1554 new_size = (last_off == pg_end) ? offset + len : 1555 (loff_t)(last_off + 1) << PAGE_SHIFT; 1556 } else { 1557 new_size = ((loff_t)pg_end << PAGE_SHIFT) + off_end; 1558 } 1559 1560 if (new_size > i_size_read(inode)) { 1561 if (mode & FALLOC_FL_KEEP_SIZE) 1562 file_set_keep_isize(inode); 1563 else 1564 f2fs_i_size_write(inode, new_size); 1565 } 1566 1567 return err; 1568 } 1569 1570 static long f2fs_fallocate(struct file *file, int mode, 1571 loff_t offset, loff_t len) 1572 { 1573 struct inode *inode = file_inode(file); 1574 long ret = 0; 1575 1576 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) 1577 return -EIO; 1578 1579 /* f2fs only support ->fallocate for regular file */ 1580 if (!S_ISREG(inode->i_mode)) 1581 return -EINVAL; 1582 1583 if (IS_ENCRYPTED(inode) && 1584 (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE))) 1585 return -EOPNOTSUPP; 1586 1587 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | 1588 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE | 1589 FALLOC_FL_INSERT_RANGE)) 1590 return -EOPNOTSUPP; 1591 1592 inode_lock(inode); 1593 1594 if (mode & FALLOC_FL_PUNCH_HOLE) { 1595 if (offset >= inode->i_size) 1596 goto out; 1597 1598 ret = punch_hole(inode, offset, len); 1599 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) { 1600 ret = f2fs_collapse_range(inode, offset, len); 1601 } else if (mode & FALLOC_FL_ZERO_RANGE) { 1602 ret = f2fs_zero_range(inode, offset, len, mode); 1603 } else if (mode & FALLOC_FL_INSERT_RANGE) { 1604 ret = f2fs_insert_range(inode, offset, len); 1605 } else { 1606 ret = expand_inode_data(inode, offset, len, mode); 1607 } 1608 1609 if (!ret) { 1610 inode->i_mtime = inode->i_ctime = current_time(inode); 1611 f2fs_mark_inode_dirty_sync(inode, false); 1612 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); 1613 } 1614 1615 out: 1616 inode_unlock(inode); 1617 1618 trace_f2fs_fallocate(inode, mode, offset, len, ret); 1619 return ret; 1620 } 1621 1622 static int f2fs_release_file(struct inode *inode, struct file *filp) 1623 { 1624 /* 1625 * f2fs_relase_file is called at every close calls. So we should 1626 * not drop any inmemory pages by close called by other process. 1627 */ 1628 if (!(filp->f_mode & FMODE_WRITE) || 1629 atomic_read(&inode->i_writecount) != 1) 1630 return 0; 1631 1632 /* some remained atomic pages should discarded */ 1633 if (f2fs_is_atomic_file(inode)) 1634 f2fs_drop_inmem_pages(inode); 1635 if (f2fs_is_volatile_file(inode)) { 1636 set_inode_flag(inode, FI_DROP_CACHE); 1637 filemap_fdatawrite(inode->i_mapping); 1638 clear_inode_flag(inode, FI_DROP_CACHE); 1639 clear_inode_flag(inode, FI_VOLATILE_FILE); 1640 stat_dec_volatile_write(inode); 1641 } 1642 return 0; 1643 } 1644 1645 static int f2fs_file_flush(struct file *file, fl_owner_t id) 1646 { 1647 struct inode *inode = file_inode(file); 1648 1649 /* 1650 * If the process doing a transaction is crashed, we should do 1651 * roll-back. Otherwise, other reader/write can see corrupted database 1652 * until all the writers close its file. Since this should be done 1653 * before dropping file lock, it needs to do in ->flush. 1654 */ 1655 if (f2fs_is_atomic_file(inode) && 1656 F2FS_I(inode)->inmem_task == current) 1657 f2fs_drop_inmem_pages(inode); 1658 return 0; 1659 } 1660 1661 static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask) 1662 { 1663 struct f2fs_inode_info *fi = F2FS_I(inode); 1664 1665 /* Is it quota file? Do not allow user to mess with it */ 1666 if (IS_NOQUOTA(inode)) 1667 return -EPERM; 1668 1669 fi->i_flags = iflags | (fi->i_flags & ~mask); 1670 1671 if (fi->i_flags & F2FS_PROJINHERIT_FL) 1672 set_inode_flag(inode, FI_PROJ_INHERIT); 1673 else 1674 clear_inode_flag(inode, FI_PROJ_INHERIT); 1675 1676 inode->i_ctime = current_time(inode); 1677 f2fs_set_inode_flags(inode); 1678 f2fs_mark_inode_dirty_sync(inode, true); 1679 return 0; 1680 } 1681 1682 /* FS_IOC_GETFLAGS and FS_IOC_SETFLAGS support */ 1683 1684 /* 1685 * To make a new on-disk f2fs i_flag gettable via FS_IOC_GETFLAGS, add an entry 1686 * for it to f2fs_fsflags_map[], and add its FS_*_FL equivalent to 1687 * F2FS_GETTABLE_FS_FL. To also make it settable via FS_IOC_SETFLAGS, also add 1688 * its FS_*_FL equivalent to F2FS_SETTABLE_FS_FL. 1689 */ 1690 1691 static const struct { 1692 u32 iflag; 1693 u32 fsflag; 1694 } f2fs_fsflags_map[] = { 1695 { F2FS_SYNC_FL, FS_SYNC_FL }, 1696 { F2FS_IMMUTABLE_FL, FS_IMMUTABLE_FL }, 1697 { F2FS_APPEND_FL, FS_APPEND_FL }, 1698 { F2FS_NODUMP_FL, FS_NODUMP_FL }, 1699 { F2FS_NOATIME_FL, FS_NOATIME_FL }, 1700 { F2FS_INDEX_FL, FS_INDEX_FL }, 1701 { F2FS_DIRSYNC_FL, FS_DIRSYNC_FL }, 1702 { F2FS_PROJINHERIT_FL, FS_PROJINHERIT_FL }, 1703 }; 1704 1705 #define F2FS_GETTABLE_FS_FL ( \ 1706 FS_SYNC_FL | \ 1707 FS_IMMUTABLE_FL | \ 1708 FS_APPEND_FL | \ 1709 FS_NODUMP_FL | \ 1710 FS_NOATIME_FL | \ 1711 FS_INDEX_FL | \ 1712 FS_DIRSYNC_FL | \ 1713 FS_PROJINHERIT_FL | \ 1714 FS_ENCRYPT_FL | \ 1715 FS_INLINE_DATA_FL | \ 1716 FS_NOCOW_FL | \ 1717 FS_VERITY_FL) 1718 1719 #define F2FS_SETTABLE_FS_FL ( \ 1720 FS_SYNC_FL | \ 1721 FS_IMMUTABLE_FL | \ 1722 FS_APPEND_FL | \ 1723 FS_NODUMP_FL | \ 1724 FS_NOATIME_FL | \ 1725 FS_DIRSYNC_FL | \ 1726 FS_PROJINHERIT_FL) 1727 1728 /* Convert f2fs on-disk i_flags to FS_IOC_{GET,SET}FLAGS flags */ 1729 static inline u32 f2fs_iflags_to_fsflags(u32 iflags) 1730 { 1731 u32 fsflags = 0; 1732 int i; 1733 1734 for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++) 1735 if (iflags & f2fs_fsflags_map[i].iflag) 1736 fsflags |= f2fs_fsflags_map[i].fsflag; 1737 1738 return fsflags; 1739 } 1740 1741 /* Convert FS_IOC_{GET,SET}FLAGS flags to f2fs on-disk i_flags */ 1742 static inline u32 f2fs_fsflags_to_iflags(u32 fsflags) 1743 { 1744 u32 iflags = 0; 1745 int i; 1746 1747 for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++) 1748 if (fsflags & f2fs_fsflags_map[i].fsflag) 1749 iflags |= f2fs_fsflags_map[i].iflag; 1750 1751 return iflags; 1752 } 1753 1754 static int f2fs_ioc_getflags(struct file *filp, unsigned long arg) 1755 { 1756 struct inode *inode = file_inode(filp); 1757 struct f2fs_inode_info *fi = F2FS_I(inode); 1758 u32 fsflags = f2fs_iflags_to_fsflags(fi->i_flags); 1759 1760 if (IS_ENCRYPTED(inode)) 1761 fsflags |= FS_ENCRYPT_FL; 1762 if (IS_VERITY(inode)) 1763 fsflags |= FS_VERITY_FL; 1764 if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) 1765 fsflags |= FS_INLINE_DATA_FL; 1766 if (is_inode_flag_set(inode, FI_PIN_FILE)) 1767 fsflags |= FS_NOCOW_FL; 1768 1769 fsflags &= F2FS_GETTABLE_FS_FL; 1770 1771 return put_user(fsflags, (int __user *)arg); 1772 } 1773 1774 static int f2fs_ioc_setflags(struct file *filp, unsigned long arg) 1775 { 1776 struct inode *inode = file_inode(filp); 1777 struct f2fs_inode_info *fi = F2FS_I(inode); 1778 u32 fsflags, old_fsflags; 1779 u32 iflags; 1780 int ret; 1781 1782 if (!inode_owner_or_capable(inode)) 1783 return -EACCES; 1784 1785 if (get_user(fsflags, (int __user *)arg)) 1786 return -EFAULT; 1787 1788 if (fsflags & ~F2FS_GETTABLE_FS_FL) 1789 return -EOPNOTSUPP; 1790 fsflags &= F2FS_SETTABLE_FS_FL; 1791 1792 iflags = f2fs_fsflags_to_iflags(fsflags); 1793 if (f2fs_mask_flags(inode->i_mode, iflags) != iflags) 1794 return -EOPNOTSUPP; 1795 1796 ret = mnt_want_write_file(filp); 1797 if (ret) 1798 return ret; 1799 1800 inode_lock(inode); 1801 1802 old_fsflags = f2fs_iflags_to_fsflags(fi->i_flags); 1803 ret = vfs_ioc_setflags_prepare(inode, old_fsflags, fsflags); 1804 if (ret) 1805 goto out; 1806 1807 ret = f2fs_setflags_common(inode, iflags, 1808 f2fs_fsflags_to_iflags(F2FS_SETTABLE_FS_FL)); 1809 out: 1810 inode_unlock(inode); 1811 mnt_drop_write_file(filp); 1812 return ret; 1813 } 1814 1815 static int f2fs_ioc_getversion(struct file *filp, unsigned long arg) 1816 { 1817 struct inode *inode = file_inode(filp); 1818 1819 return put_user(inode->i_generation, (int __user *)arg); 1820 } 1821 1822 static int f2fs_ioc_start_atomic_write(struct file *filp) 1823 { 1824 struct inode *inode = file_inode(filp); 1825 int ret; 1826 1827 if (!inode_owner_or_capable(inode)) 1828 return -EACCES; 1829 1830 if (!S_ISREG(inode->i_mode)) 1831 return -EINVAL; 1832 1833 ret = mnt_want_write_file(filp); 1834 if (ret) 1835 return ret; 1836 1837 inode_lock(inode); 1838 1839 if (f2fs_is_atomic_file(inode)) { 1840 if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) 1841 ret = -EINVAL; 1842 goto out; 1843 } 1844 1845 ret = f2fs_convert_inline_inode(inode); 1846 if (ret) 1847 goto out; 1848 1849 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1850 1851 /* 1852 * Should wait end_io to count F2FS_WB_CP_DATA correctly by 1853 * f2fs_is_atomic_file. 1854 */ 1855 if (get_dirty_pages(inode)) 1856 f2fs_warn(F2FS_I_SB(inode), "Unexpected flush for atomic writes: ino=%lu, npages=%u", 1857 inode->i_ino, get_dirty_pages(inode)); 1858 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX); 1859 if (ret) { 1860 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1861 goto out; 1862 } 1863 1864 set_inode_flag(inode, FI_ATOMIC_FILE); 1865 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST); 1866 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1867 1868 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); 1869 F2FS_I(inode)->inmem_task = current; 1870 stat_inc_atomic_write(inode); 1871 stat_update_max_atomic_write(inode); 1872 out: 1873 inode_unlock(inode); 1874 mnt_drop_write_file(filp); 1875 return ret; 1876 } 1877 1878 static int f2fs_ioc_commit_atomic_write(struct file *filp) 1879 { 1880 struct inode *inode = file_inode(filp); 1881 int ret; 1882 1883 if (!inode_owner_or_capable(inode)) 1884 return -EACCES; 1885 1886 ret = mnt_want_write_file(filp); 1887 if (ret) 1888 return ret; 1889 1890 f2fs_balance_fs(F2FS_I_SB(inode), true); 1891 1892 inode_lock(inode); 1893 1894 if (f2fs_is_volatile_file(inode)) { 1895 ret = -EINVAL; 1896 goto err_out; 1897 } 1898 1899 if (f2fs_is_atomic_file(inode)) { 1900 ret = f2fs_commit_inmem_pages(inode); 1901 if (ret) 1902 goto err_out; 1903 1904 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true); 1905 if (!ret) { 1906 clear_inode_flag(inode, FI_ATOMIC_FILE); 1907 F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC] = 0; 1908 stat_dec_atomic_write(inode); 1909 } 1910 } else { 1911 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 1, false); 1912 } 1913 err_out: 1914 if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) { 1915 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST); 1916 ret = -EINVAL; 1917 } 1918 inode_unlock(inode); 1919 mnt_drop_write_file(filp); 1920 return ret; 1921 } 1922 1923 static int f2fs_ioc_start_volatile_write(struct file *filp) 1924 { 1925 struct inode *inode = file_inode(filp); 1926 int ret; 1927 1928 if (!inode_owner_or_capable(inode)) 1929 return -EACCES; 1930 1931 if (!S_ISREG(inode->i_mode)) 1932 return -EINVAL; 1933 1934 ret = mnt_want_write_file(filp); 1935 if (ret) 1936 return ret; 1937 1938 inode_lock(inode); 1939 1940 if (f2fs_is_volatile_file(inode)) 1941 goto out; 1942 1943 ret = f2fs_convert_inline_inode(inode); 1944 if (ret) 1945 goto out; 1946 1947 stat_inc_volatile_write(inode); 1948 stat_update_max_volatile_write(inode); 1949 1950 set_inode_flag(inode, FI_VOLATILE_FILE); 1951 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); 1952 out: 1953 inode_unlock(inode); 1954 mnt_drop_write_file(filp); 1955 return ret; 1956 } 1957 1958 static int f2fs_ioc_release_volatile_write(struct file *filp) 1959 { 1960 struct inode *inode = file_inode(filp); 1961 int ret; 1962 1963 if (!inode_owner_or_capable(inode)) 1964 return -EACCES; 1965 1966 ret = mnt_want_write_file(filp); 1967 if (ret) 1968 return ret; 1969 1970 inode_lock(inode); 1971 1972 if (!f2fs_is_volatile_file(inode)) 1973 goto out; 1974 1975 if (!f2fs_is_first_block_written(inode)) { 1976 ret = truncate_partial_data_page(inode, 0, true); 1977 goto out; 1978 } 1979 1980 ret = punch_hole(inode, 0, F2FS_BLKSIZE); 1981 out: 1982 inode_unlock(inode); 1983 mnt_drop_write_file(filp); 1984 return ret; 1985 } 1986 1987 static int f2fs_ioc_abort_volatile_write(struct file *filp) 1988 { 1989 struct inode *inode = file_inode(filp); 1990 int ret; 1991 1992 if (!inode_owner_or_capable(inode)) 1993 return -EACCES; 1994 1995 ret = mnt_want_write_file(filp); 1996 if (ret) 1997 return ret; 1998 1999 inode_lock(inode); 2000 2001 if (f2fs_is_atomic_file(inode)) 2002 f2fs_drop_inmem_pages(inode); 2003 if (f2fs_is_volatile_file(inode)) { 2004 clear_inode_flag(inode, FI_VOLATILE_FILE); 2005 stat_dec_volatile_write(inode); 2006 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true); 2007 } 2008 2009 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST); 2010 2011 inode_unlock(inode); 2012 2013 mnt_drop_write_file(filp); 2014 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); 2015 return ret; 2016 } 2017 2018 static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg) 2019 { 2020 struct inode *inode = file_inode(filp); 2021 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2022 struct super_block *sb = sbi->sb; 2023 __u32 in; 2024 int ret = 0; 2025 2026 if (!capable(CAP_SYS_ADMIN)) 2027 return -EPERM; 2028 2029 if (get_user(in, (__u32 __user *)arg)) 2030 return -EFAULT; 2031 2032 if (in != F2FS_GOING_DOWN_FULLSYNC) { 2033 ret = mnt_want_write_file(filp); 2034 if (ret) 2035 return ret; 2036 } 2037 2038 switch (in) { 2039 case F2FS_GOING_DOWN_FULLSYNC: 2040 sb = freeze_bdev(sb->s_bdev); 2041 if (IS_ERR(sb)) { 2042 ret = PTR_ERR(sb); 2043 goto out; 2044 } 2045 if (sb) { 2046 f2fs_stop_checkpoint(sbi, false); 2047 set_sbi_flag(sbi, SBI_IS_SHUTDOWN); 2048 thaw_bdev(sb->s_bdev, sb); 2049 } 2050 break; 2051 case F2FS_GOING_DOWN_METASYNC: 2052 /* do checkpoint only */ 2053 ret = f2fs_sync_fs(sb, 1); 2054 if (ret) 2055 goto out; 2056 f2fs_stop_checkpoint(sbi, false); 2057 set_sbi_flag(sbi, SBI_IS_SHUTDOWN); 2058 break; 2059 case F2FS_GOING_DOWN_NOSYNC: 2060 f2fs_stop_checkpoint(sbi, false); 2061 set_sbi_flag(sbi, SBI_IS_SHUTDOWN); 2062 break; 2063 case F2FS_GOING_DOWN_METAFLUSH: 2064 f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO); 2065 f2fs_stop_checkpoint(sbi, false); 2066 set_sbi_flag(sbi, SBI_IS_SHUTDOWN); 2067 break; 2068 case F2FS_GOING_DOWN_NEED_FSCK: 2069 set_sbi_flag(sbi, SBI_NEED_FSCK); 2070 set_sbi_flag(sbi, SBI_CP_DISABLED_QUICK); 2071 set_sbi_flag(sbi, SBI_IS_DIRTY); 2072 /* do checkpoint only */ 2073 ret = f2fs_sync_fs(sb, 1); 2074 goto out; 2075 default: 2076 ret = -EINVAL; 2077 goto out; 2078 } 2079 2080 f2fs_stop_gc_thread(sbi); 2081 f2fs_stop_discard_thread(sbi); 2082 2083 f2fs_drop_discard_cmd(sbi); 2084 clear_opt(sbi, DISCARD); 2085 2086 f2fs_update_time(sbi, REQ_TIME); 2087 out: 2088 if (in != F2FS_GOING_DOWN_FULLSYNC) 2089 mnt_drop_write_file(filp); 2090 2091 trace_f2fs_shutdown(sbi, in, ret); 2092 2093 return ret; 2094 } 2095 2096 static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg) 2097 { 2098 struct inode *inode = file_inode(filp); 2099 struct super_block *sb = inode->i_sb; 2100 struct request_queue *q = bdev_get_queue(sb->s_bdev); 2101 struct fstrim_range range; 2102 int ret; 2103 2104 if (!capable(CAP_SYS_ADMIN)) 2105 return -EPERM; 2106 2107 if (!f2fs_hw_support_discard(F2FS_SB(sb))) 2108 return -EOPNOTSUPP; 2109 2110 if (copy_from_user(&range, (struct fstrim_range __user *)arg, 2111 sizeof(range))) 2112 return -EFAULT; 2113 2114 ret = mnt_want_write_file(filp); 2115 if (ret) 2116 return ret; 2117 2118 range.minlen = max((unsigned int)range.minlen, 2119 q->limits.discard_granularity); 2120 ret = f2fs_trim_fs(F2FS_SB(sb), &range); 2121 mnt_drop_write_file(filp); 2122 if (ret < 0) 2123 return ret; 2124 2125 if (copy_to_user((struct fstrim_range __user *)arg, &range, 2126 sizeof(range))) 2127 return -EFAULT; 2128 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); 2129 return 0; 2130 } 2131 2132 static bool uuid_is_nonzero(__u8 u[16]) 2133 { 2134 int i; 2135 2136 for (i = 0; i < 16; i++) 2137 if (u[i]) 2138 return true; 2139 return false; 2140 } 2141 2142 static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg) 2143 { 2144 struct inode *inode = file_inode(filp); 2145 2146 if (!f2fs_sb_has_encrypt(F2FS_I_SB(inode))) 2147 return -EOPNOTSUPP; 2148 2149 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); 2150 2151 return fscrypt_ioctl_set_policy(filp, (const void __user *)arg); 2152 } 2153 2154 static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg) 2155 { 2156 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp)))) 2157 return -EOPNOTSUPP; 2158 return fscrypt_ioctl_get_policy(filp, (void __user *)arg); 2159 } 2160 2161 static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg) 2162 { 2163 struct inode *inode = file_inode(filp); 2164 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2165 int err; 2166 2167 if (!f2fs_sb_has_encrypt(sbi)) 2168 return -EOPNOTSUPP; 2169 2170 err = mnt_want_write_file(filp); 2171 if (err) 2172 return err; 2173 2174 down_write(&sbi->sb_lock); 2175 2176 if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt)) 2177 goto got_it; 2178 2179 /* update superblock with uuid */ 2180 generate_random_uuid(sbi->raw_super->encrypt_pw_salt); 2181 2182 err = f2fs_commit_super(sbi, false); 2183 if (err) { 2184 /* undo new data */ 2185 memset(sbi->raw_super->encrypt_pw_salt, 0, 16); 2186 goto out_err; 2187 } 2188 got_it: 2189 if (copy_to_user((__u8 __user *)arg, sbi->raw_super->encrypt_pw_salt, 2190 16)) 2191 err = -EFAULT; 2192 out_err: 2193 up_write(&sbi->sb_lock); 2194 mnt_drop_write_file(filp); 2195 return err; 2196 } 2197 2198 static int f2fs_ioc_get_encryption_policy_ex(struct file *filp, 2199 unsigned long arg) 2200 { 2201 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp)))) 2202 return -EOPNOTSUPP; 2203 2204 return fscrypt_ioctl_get_policy_ex(filp, (void __user *)arg); 2205 } 2206 2207 static int f2fs_ioc_add_encryption_key(struct file *filp, unsigned long arg) 2208 { 2209 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp)))) 2210 return -EOPNOTSUPP; 2211 2212 return fscrypt_ioctl_add_key(filp, (void __user *)arg); 2213 } 2214 2215 static int f2fs_ioc_remove_encryption_key(struct file *filp, unsigned long arg) 2216 { 2217 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp)))) 2218 return -EOPNOTSUPP; 2219 2220 return fscrypt_ioctl_remove_key(filp, (void __user *)arg); 2221 } 2222 2223 static int f2fs_ioc_remove_encryption_key_all_users(struct file *filp, 2224 unsigned long arg) 2225 { 2226 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp)))) 2227 return -EOPNOTSUPP; 2228 2229 return fscrypt_ioctl_remove_key_all_users(filp, (void __user *)arg); 2230 } 2231 2232 static int f2fs_ioc_get_encryption_key_status(struct file *filp, 2233 unsigned long arg) 2234 { 2235 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp)))) 2236 return -EOPNOTSUPP; 2237 2238 return fscrypt_ioctl_get_key_status(filp, (void __user *)arg); 2239 } 2240 2241 static int f2fs_ioc_gc(struct file *filp, unsigned long arg) 2242 { 2243 struct inode *inode = file_inode(filp); 2244 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2245 __u32 sync; 2246 int ret; 2247 2248 if (!capable(CAP_SYS_ADMIN)) 2249 return -EPERM; 2250 2251 if (get_user(sync, (__u32 __user *)arg)) 2252 return -EFAULT; 2253 2254 if (f2fs_readonly(sbi->sb)) 2255 return -EROFS; 2256 2257 ret = mnt_want_write_file(filp); 2258 if (ret) 2259 return ret; 2260 2261 if (!sync) { 2262 if (!mutex_trylock(&sbi->gc_mutex)) { 2263 ret = -EBUSY; 2264 goto out; 2265 } 2266 } else { 2267 mutex_lock(&sbi->gc_mutex); 2268 } 2269 2270 ret = f2fs_gc(sbi, sync, true, NULL_SEGNO); 2271 out: 2272 mnt_drop_write_file(filp); 2273 return ret; 2274 } 2275 2276 static int f2fs_ioc_gc_range(struct file *filp, unsigned long arg) 2277 { 2278 struct inode *inode = file_inode(filp); 2279 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2280 struct f2fs_gc_range range; 2281 u64 end; 2282 int ret; 2283 2284 if (!capable(CAP_SYS_ADMIN)) 2285 return -EPERM; 2286 2287 if (copy_from_user(&range, (struct f2fs_gc_range __user *)arg, 2288 sizeof(range))) 2289 return -EFAULT; 2290 2291 if (f2fs_readonly(sbi->sb)) 2292 return -EROFS; 2293 2294 end = range.start + range.len; 2295 if (range.start < MAIN_BLKADDR(sbi) || end >= MAX_BLKADDR(sbi)) { 2296 return -EINVAL; 2297 } 2298 2299 ret = mnt_want_write_file(filp); 2300 if (ret) 2301 return ret; 2302 2303 do_more: 2304 if (!range.sync) { 2305 if (!mutex_trylock(&sbi->gc_mutex)) { 2306 ret = -EBUSY; 2307 goto out; 2308 } 2309 } else { 2310 mutex_lock(&sbi->gc_mutex); 2311 } 2312 2313 ret = f2fs_gc(sbi, range.sync, true, GET_SEGNO(sbi, range.start)); 2314 range.start += BLKS_PER_SEC(sbi); 2315 if (range.start <= end) 2316 goto do_more; 2317 out: 2318 mnt_drop_write_file(filp); 2319 return ret; 2320 } 2321 2322 static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg) 2323 { 2324 struct inode *inode = file_inode(filp); 2325 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2326 int ret; 2327 2328 if (!capable(CAP_SYS_ADMIN)) 2329 return -EPERM; 2330 2331 if (f2fs_readonly(sbi->sb)) 2332 return -EROFS; 2333 2334 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { 2335 f2fs_info(sbi, "Skipping Checkpoint. Checkpoints currently disabled."); 2336 return -EINVAL; 2337 } 2338 2339 ret = mnt_want_write_file(filp); 2340 if (ret) 2341 return ret; 2342 2343 ret = f2fs_sync_fs(sbi->sb, 1); 2344 2345 mnt_drop_write_file(filp); 2346 return ret; 2347 } 2348 2349 static int f2fs_defragment_range(struct f2fs_sb_info *sbi, 2350 struct file *filp, 2351 struct f2fs_defragment *range) 2352 { 2353 struct inode *inode = file_inode(filp); 2354 struct f2fs_map_blocks map = { .m_next_extent = NULL, 2355 .m_seg_type = NO_CHECK_TYPE , 2356 .m_may_create = false }; 2357 struct extent_info ei = {0, 0, 0}; 2358 pgoff_t pg_start, pg_end, next_pgofs; 2359 unsigned int blk_per_seg = sbi->blocks_per_seg; 2360 unsigned int total = 0, sec_num; 2361 block_t blk_end = 0; 2362 bool fragmented = false; 2363 int err; 2364 2365 /* if in-place-update policy is enabled, don't waste time here */ 2366 if (f2fs_should_update_inplace(inode, NULL)) 2367 return -EINVAL; 2368 2369 pg_start = range->start >> PAGE_SHIFT; 2370 pg_end = (range->start + range->len) >> PAGE_SHIFT; 2371 2372 f2fs_balance_fs(sbi, true); 2373 2374 inode_lock(inode); 2375 2376 /* writeback all dirty pages in the range */ 2377 err = filemap_write_and_wait_range(inode->i_mapping, range->start, 2378 range->start + range->len - 1); 2379 if (err) 2380 goto out; 2381 2382 /* 2383 * lookup mapping info in extent cache, skip defragmenting if physical 2384 * block addresses are continuous. 2385 */ 2386 if (f2fs_lookup_extent_cache(inode, pg_start, &ei)) { 2387 if (ei.fofs + ei.len >= pg_end) 2388 goto out; 2389 } 2390 2391 map.m_lblk = pg_start; 2392 map.m_next_pgofs = &next_pgofs; 2393 2394 /* 2395 * lookup mapping info in dnode page cache, skip defragmenting if all 2396 * physical block addresses are continuous even if there are hole(s) 2397 * in logical blocks. 2398 */ 2399 while (map.m_lblk < pg_end) { 2400 map.m_len = pg_end - map.m_lblk; 2401 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT); 2402 if (err) 2403 goto out; 2404 2405 if (!(map.m_flags & F2FS_MAP_FLAGS)) { 2406 map.m_lblk = next_pgofs; 2407 continue; 2408 } 2409 2410 if (blk_end && blk_end != map.m_pblk) 2411 fragmented = true; 2412 2413 /* record total count of block that we're going to move */ 2414 total += map.m_len; 2415 2416 blk_end = map.m_pblk + map.m_len; 2417 2418 map.m_lblk += map.m_len; 2419 } 2420 2421 if (!fragmented) 2422 goto out; 2423 2424 sec_num = DIV_ROUND_UP(total, BLKS_PER_SEC(sbi)); 2425 2426 /* 2427 * make sure there are enough free section for LFS allocation, this can 2428 * avoid defragment running in SSR mode when free section are allocated 2429 * intensively 2430 */ 2431 if (has_not_enough_free_secs(sbi, 0, sec_num)) { 2432 err = -EAGAIN; 2433 goto out; 2434 } 2435 2436 map.m_lblk = pg_start; 2437 map.m_len = pg_end - pg_start; 2438 total = 0; 2439 2440 while (map.m_lblk < pg_end) { 2441 pgoff_t idx; 2442 int cnt = 0; 2443 2444 do_map: 2445 map.m_len = pg_end - map.m_lblk; 2446 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT); 2447 if (err) 2448 goto clear_out; 2449 2450 if (!(map.m_flags & F2FS_MAP_FLAGS)) { 2451 map.m_lblk = next_pgofs; 2452 continue; 2453 } 2454 2455 set_inode_flag(inode, FI_DO_DEFRAG); 2456 2457 idx = map.m_lblk; 2458 while (idx < map.m_lblk + map.m_len && cnt < blk_per_seg) { 2459 struct page *page; 2460 2461 page = f2fs_get_lock_data_page(inode, idx, true); 2462 if (IS_ERR(page)) { 2463 err = PTR_ERR(page); 2464 goto clear_out; 2465 } 2466 2467 set_page_dirty(page); 2468 f2fs_put_page(page, 1); 2469 2470 idx++; 2471 cnt++; 2472 total++; 2473 } 2474 2475 map.m_lblk = idx; 2476 2477 if (idx < pg_end && cnt < blk_per_seg) 2478 goto do_map; 2479 2480 clear_inode_flag(inode, FI_DO_DEFRAG); 2481 2482 err = filemap_fdatawrite(inode->i_mapping); 2483 if (err) 2484 goto out; 2485 } 2486 clear_out: 2487 clear_inode_flag(inode, FI_DO_DEFRAG); 2488 out: 2489 inode_unlock(inode); 2490 if (!err) 2491 range->len = (u64)total << PAGE_SHIFT; 2492 return err; 2493 } 2494 2495 static int f2fs_ioc_defragment(struct file *filp, unsigned long arg) 2496 { 2497 struct inode *inode = file_inode(filp); 2498 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2499 struct f2fs_defragment range; 2500 int err; 2501 2502 if (!capable(CAP_SYS_ADMIN)) 2503 return -EPERM; 2504 2505 if (!S_ISREG(inode->i_mode) || f2fs_is_atomic_file(inode)) 2506 return -EINVAL; 2507 2508 if (f2fs_readonly(sbi->sb)) 2509 return -EROFS; 2510 2511 if (copy_from_user(&range, (struct f2fs_defragment __user *)arg, 2512 sizeof(range))) 2513 return -EFAULT; 2514 2515 /* verify alignment of offset & size */ 2516 if (range.start & (F2FS_BLKSIZE - 1) || range.len & (F2FS_BLKSIZE - 1)) 2517 return -EINVAL; 2518 2519 if (unlikely((range.start + range.len) >> PAGE_SHIFT > 2520 sbi->max_file_blocks)) 2521 return -EINVAL; 2522 2523 err = mnt_want_write_file(filp); 2524 if (err) 2525 return err; 2526 2527 err = f2fs_defragment_range(sbi, filp, &range); 2528 mnt_drop_write_file(filp); 2529 2530 f2fs_update_time(sbi, REQ_TIME); 2531 if (err < 0) 2532 return err; 2533 2534 if (copy_to_user((struct f2fs_defragment __user *)arg, &range, 2535 sizeof(range))) 2536 return -EFAULT; 2537 2538 return 0; 2539 } 2540 2541 static int f2fs_move_file_range(struct file *file_in, loff_t pos_in, 2542 struct file *file_out, loff_t pos_out, size_t len) 2543 { 2544 struct inode *src = file_inode(file_in); 2545 struct inode *dst = file_inode(file_out); 2546 struct f2fs_sb_info *sbi = F2FS_I_SB(src); 2547 size_t olen = len, dst_max_i_size = 0; 2548 size_t dst_osize; 2549 int ret; 2550 2551 if (file_in->f_path.mnt != file_out->f_path.mnt || 2552 src->i_sb != dst->i_sb) 2553 return -EXDEV; 2554 2555 if (unlikely(f2fs_readonly(src->i_sb))) 2556 return -EROFS; 2557 2558 if (!S_ISREG(src->i_mode) || !S_ISREG(dst->i_mode)) 2559 return -EINVAL; 2560 2561 if (IS_ENCRYPTED(src) || IS_ENCRYPTED(dst)) 2562 return -EOPNOTSUPP; 2563 2564 if (src == dst) { 2565 if (pos_in == pos_out) 2566 return 0; 2567 if (pos_out > pos_in && pos_out < pos_in + len) 2568 return -EINVAL; 2569 } 2570 2571 inode_lock(src); 2572 if (src != dst) { 2573 ret = -EBUSY; 2574 if (!inode_trylock(dst)) 2575 goto out; 2576 } 2577 2578 ret = -EINVAL; 2579 if (pos_in + len > src->i_size || pos_in + len < pos_in) 2580 goto out_unlock; 2581 if (len == 0) 2582 olen = len = src->i_size - pos_in; 2583 if (pos_in + len == src->i_size) 2584 len = ALIGN(src->i_size, F2FS_BLKSIZE) - pos_in; 2585 if (len == 0) { 2586 ret = 0; 2587 goto out_unlock; 2588 } 2589 2590 dst_osize = dst->i_size; 2591 if (pos_out + olen > dst->i_size) 2592 dst_max_i_size = pos_out + olen; 2593 2594 /* verify the end result is block aligned */ 2595 if (!IS_ALIGNED(pos_in, F2FS_BLKSIZE) || 2596 !IS_ALIGNED(pos_in + len, F2FS_BLKSIZE) || 2597 !IS_ALIGNED(pos_out, F2FS_BLKSIZE)) 2598 goto out_unlock; 2599 2600 ret = f2fs_convert_inline_inode(src); 2601 if (ret) 2602 goto out_unlock; 2603 2604 ret = f2fs_convert_inline_inode(dst); 2605 if (ret) 2606 goto out_unlock; 2607 2608 /* write out all dirty pages from offset */ 2609 ret = filemap_write_and_wait_range(src->i_mapping, 2610 pos_in, pos_in + len); 2611 if (ret) 2612 goto out_unlock; 2613 2614 ret = filemap_write_and_wait_range(dst->i_mapping, 2615 pos_out, pos_out + len); 2616 if (ret) 2617 goto out_unlock; 2618 2619 f2fs_balance_fs(sbi, true); 2620 2621 down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]); 2622 if (src != dst) { 2623 ret = -EBUSY; 2624 if (!down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE])) 2625 goto out_src; 2626 } 2627 2628 f2fs_lock_op(sbi); 2629 ret = __exchange_data_block(src, dst, pos_in >> F2FS_BLKSIZE_BITS, 2630 pos_out >> F2FS_BLKSIZE_BITS, 2631 len >> F2FS_BLKSIZE_BITS, false); 2632 2633 if (!ret) { 2634 if (dst_max_i_size) 2635 f2fs_i_size_write(dst, dst_max_i_size); 2636 else if (dst_osize != dst->i_size) 2637 f2fs_i_size_write(dst, dst_osize); 2638 } 2639 f2fs_unlock_op(sbi); 2640 2641 if (src != dst) 2642 up_write(&F2FS_I(dst)->i_gc_rwsem[WRITE]); 2643 out_src: 2644 up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]); 2645 out_unlock: 2646 if (src != dst) 2647 inode_unlock(dst); 2648 out: 2649 inode_unlock(src); 2650 return ret; 2651 } 2652 2653 static int f2fs_ioc_move_range(struct file *filp, unsigned long arg) 2654 { 2655 struct f2fs_move_range range; 2656 struct fd dst; 2657 int err; 2658 2659 if (!(filp->f_mode & FMODE_READ) || 2660 !(filp->f_mode & FMODE_WRITE)) 2661 return -EBADF; 2662 2663 if (copy_from_user(&range, (struct f2fs_move_range __user *)arg, 2664 sizeof(range))) 2665 return -EFAULT; 2666 2667 dst = fdget(range.dst_fd); 2668 if (!dst.file) 2669 return -EBADF; 2670 2671 if (!(dst.file->f_mode & FMODE_WRITE)) { 2672 err = -EBADF; 2673 goto err_out; 2674 } 2675 2676 err = mnt_want_write_file(filp); 2677 if (err) 2678 goto err_out; 2679 2680 err = f2fs_move_file_range(filp, range.pos_in, dst.file, 2681 range.pos_out, range.len); 2682 2683 mnt_drop_write_file(filp); 2684 if (err) 2685 goto err_out; 2686 2687 if (copy_to_user((struct f2fs_move_range __user *)arg, 2688 &range, sizeof(range))) 2689 err = -EFAULT; 2690 err_out: 2691 fdput(dst); 2692 return err; 2693 } 2694 2695 static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg) 2696 { 2697 struct inode *inode = file_inode(filp); 2698 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2699 struct sit_info *sm = SIT_I(sbi); 2700 unsigned int start_segno = 0, end_segno = 0; 2701 unsigned int dev_start_segno = 0, dev_end_segno = 0; 2702 struct f2fs_flush_device range; 2703 int ret; 2704 2705 if (!capable(CAP_SYS_ADMIN)) 2706 return -EPERM; 2707 2708 if (f2fs_readonly(sbi->sb)) 2709 return -EROFS; 2710 2711 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) 2712 return -EINVAL; 2713 2714 if (copy_from_user(&range, (struct f2fs_flush_device __user *)arg, 2715 sizeof(range))) 2716 return -EFAULT; 2717 2718 if (!f2fs_is_multi_device(sbi) || sbi->s_ndevs - 1 <= range.dev_num || 2719 __is_large_section(sbi)) { 2720 f2fs_warn(sbi, "Can't flush %u in %d for segs_per_sec %u != 1", 2721 range.dev_num, sbi->s_ndevs, sbi->segs_per_sec); 2722 return -EINVAL; 2723 } 2724 2725 ret = mnt_want_write_file(filp); 2726 if (ret) 2727 return ret; 2728 2729 if (range.dev_num != 0) 2730 dev_start_segno = GET_SEGNO(sbi, FDEV(range.dev_num).start_blk); 2731 dev_end_segno = GET_SEGNO(sbi, FDEV(range.dev_num).end_blk); 2732 2733 start_segno = sm->last_victim[FLUSH_DEVICE]; 2734 if (start_segno < dev_start_segno || start_segno >= dev_end_segno) 2735 start_segno = dev_start_segno; 2736 end_segno = min(start_segno + range.segments, dev_end_segno); 2737 2738 while (start_segno < end_segno) { 2739 if (!mutex_trylock(&sbi->gc_mutex)) { 2740 ret = -EBUSY; 2741 goto out; 2742 } 2743 sm->last_victim[GC_CB] = end_segno + 1; 2744 sm->last_victim[GC_GREEDY] = end_segno + 1; 2745 sm->last_victim[ALLOC_NEXT] = end_segno + 1; 2746 ret = f2fs_gc(sbi, true, true, start_segno); 2747 if (ret == -EAGAIN) 2748 ret = 0; 2749 else if (ret < 0) 2750 break; 2751 start_segno++; 2752 } 2753 out: 2754 mnt_drop_write_file(filp); 2755 return ret; 2756 } 2757 2758 static int f2fs_ioc_get_features(struct file *filp, unsigned long arg) 2759 { 2760 struct inode *inode = file_inode(filp); 2761 u32 sb_feature = le32_to_cpu(F2FS_I_SB(inode)->raw_super->feature); 2762 2763 /* Must validate to set it with SQLite behavior in Android. */ 2764 sb_feature |= F2FS_FEATURE_ATOMIC_WRITE; 2765 2766 return put_user(sb_feature, (u32 __user *)arg); 2767 } 2768 2769 #ifdef CONFIG_QUOTA 2770 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid) 2771 { 2772 struct dquot *transfer_to[MAXQUOTAS] = {}; 2773 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2774 struct super_block *sb = sbi->sb; 2775 int err = 0; 2776 2777 transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid)); 2778 if (!IS_ERR(transfer_to[PRJQUOTA])) { 2779 err = __dquot_transfer(inode, transfer_to); 2780 if (err) 2781 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR); 2782 dqput(transfer_to[PRJQUOTA]); 2783 } 2784 return err; 2785 } 2786 2787 static int f2fs_ioc_setproject(struct file *filp, __u32 projid) 2788 { 2789 struct inode *inode = file_inode(filp); 2790 struct f2fs_inode_info *fi = F2FS_I(inode); 2791 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2792 struct page *ipage; 2793 kprojid_t kprojid; 2794 int err; 2795 2796 if (!f2fs_sb_has_project_quota(sbi)) { 2797 if (projid != F2FS_DEF_PROJID) 2798 return -EOPNOTSUPP; 2799 else 2800 return 0; 2801 } 2802 2803 if (!f2fs_has_extra_attr(inode)) 2804 return -EOPNOTSUPP; 2805 2806 kprojid = make_kprojid(&init_user_ns, (projid_t)projid); 2807 2808 if (projid_eq(kprojid, F2FS_I(inode)->i_projid)) 2809 return 0; 2810 2811 err = -EPERM; 2812 /* Is it quota file? Do not allow user to mess with it */ 2813 if (IS_NOQUOTA(inode)) 2814 return err; 2815 2816 ipage = f2fs_get_node_page(sbi, inode->i_ino); 2817 if (IS_ERR(ipage)) 2818 return PTR_ERR(ipage); 2819 2820 if (!F2FS_FITS_IN_INODE(F2FS_INODE(ipage), fi->i_extra_isize, 2821 i_projid)) { 2822 err = -EOVERFLOW; 2823 f2fs_put_page(ipage, 1); 2824 return err; 2825 } 2826 f2fs_put_page(ipage, 1); 2827 2828 err = dquot_initialize(inode); 2829 if (err) 2830 return err; 2831 2832 f2fs_lock_op(sbi); 2833 err = f2fs_transfer_project_quota(inode, kprojid); 2834 if (err) 2835 goto out_unlock; 2836 2837 F2FS_I(inode)->i_projid = kprojid; 2838 inode->i_ctime = current_time(inode); 2839 f2fs_mark_inode_dirty_sync(inode, true); 2840 out_unlock: 2841 f2fs_unlock_op(sbi); 2842 return err; 2843 } 2844 #else 2845 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid) 2846 { 2847 return 0; 2848 } 2849 2850 static int f2fs_ioc_setproject(struct file *filp, __u32 projid) 2851 { 2852 if (projid != F2FS_DEF_PROJID) 2853 return -EOPNOTSUPP; 2854 return 0; 2855 } 2856 #endif 2857 2858 /* FS_IOC_FSGETXATTR and FS_IOC_FSSETXATTR support */ 2859 2860 /* 2861 * To make a new on-disk f2fs i_flag gettable via FS_IOC_FSGETXATTR and settable 2862 * via FS_IOC_FSSETXATTR, add an entry for it to f2fs_xflags_map[], and add its 2863 * FS_XFLAG_* equivalent to F2FS_SUPPORTED_XFLAGS. 2864 */ 2865 2866 static const struct { 2867 u32 iflag; 2868 u32 xflag; 2869 } f2fs_xflags_map[] = { 2870 { F2FS_SYNC_FL, FS_XFLAG_SYNC }, 2871 { F2FS_IMMUTABLE_FL, FS_XFLAG_IMMUTABLE }, 2872 { F2FS_APPEND_FL, FS_XFLAG_APPEND }, 2873 { F2FS_NODUMP_FL, FS_XFLAG_NODUMP }, 2874 { F2FS_NOATIME_FL, FS_XFLAG_NOATIME }, 2875 { F2FS_PROJINHERIT_FL, FS_XFLAG_PROJINHERIT }, 2876 }; 2877 2878 #define F2FS_SUPPORTED_XFLAGS ( \ 2879 FS_XFLAG_SYNC | \ 2880 FS_XFLAG_IMMUTABLE | \ 2881 FS_XFLAG_APPEND | \ 2882 FS_XFLAG_NODUMP | \ 2883 FS_XFLAG_NOATIME | \ 2884 FS_XFLAG_PROJINHERIT) 2885 2886 /* Convert f2fs on-disk i_flags to FS_IOC_FS{GET,SET}XATTR flags */ 2887 static inline u32 f2fs_iflags_to_xflags(u32 iflags) 2888 { 2889 u32 xflags = 0; 2890 int i; 2891 2892 for (i = 0; i < ARRAY_SIZE(f2fs_xflags_map); i++) 2893 if (iflags & f2fs_xflags_map[i].iflag) 2894 xflags |= f2fs_xflags_map[i].xflag; 2895 2896 return xflags; 2897 } 2898 2899 /* Convert FS_IOC_FS{GET,SET}XATTR flags to f2fs on-disk i_flags */ 2900 static inline u32 f2fs_xflags_to_iflags(u32 xflags) 2901 { 2902 u32 iflags = 0; 2903 int i; 2904 2905 for (i = 0; i < ARRAY_SIZE(f2fs_xflags_map); i++) 2906 if (xflags & f2fs_xflags_map[i].xflag) 2907 iflags |= f2fs_xflags_map[i].iflag; 2908 2909 return iflags; 2910 } 2911 2912 static void f2fs_fill_fsxattr(struct inode *inode, struct fsxattr *fa) 2913 { 2914 struct f2fs_inode_info *fi = F2FS_I(inode); 2915 2916 simple_fill_fsxattr(fa, f2fs_iflags_to_xflags(fi->i_flags)); 2917 2918 if (f2fs_sb_has_project_quota(F2FS_I_SB(inode))) 2919 fa->fsx_projid = from_kprojid(&init_user_ns, fi->i_projid); 2920 } 2921 2922 static int f2fs_ioc_fsgetxattr(struct file *filp, unsigned long arg) 2923 { 2924 struct inode *inode = file_inode(filp); 2925 struct fsxattr fa; 2926 2927 f2fs_fill_fsxattr(inode, &fa); 2928 2929 if (copy_to_user((struct fsxattr __user *)arg, &fa, sizeof(fa))) 2930 return -EFAULT; 2931 return 0; 2932 } 2933 2934 static int f2fs_ioc_fssetxattr(struct file *filp, unsigned long arg) 2935 { 2936 struct inode *inode = file_inode(filp); 2937 struct fsxattr fa, old_fa; 2938 u32 iflags; 2939 int err; 2940 2941 if (copy_from_user(&fa, (struct fsxattr __user *)arg, sizeof(fa))) 2942 return -EFAULT; 2943 2944 /* Make sure caller has proper permission */ 2945 if (!inode_owner_or_capable(inode)) 2946 return -EACCES; 2947 2948 if (fa.fsx_xflags & ~F2FS_SUPPORTED_XFLAGS) 2949 return -EOPNOTSUPP; 2950 2951 iflags = f2fs_xflags_to_iflags(fa.fsx_xflags); 2952 if (f2fs_mask_flags(inode->i_mode, iflags) != iflags) 2953 return -EOPNOTSUPP; 2954 2955 err = mnt_want_write_file(filp); 2956 if (err) 2957 return err; 2958 2959 inode_lock(inode); 2960 2961 f2fs_fill_fsxattr(inode, &old_fa); 2962 err = vfs_ioc_fssetxattr_check(inode, &old_fa, &fa); 2963 if (err) 2964 goto out; 2965 2966 err = f2fs_setflags_common(inode, iflags, 2967 f2fs_xflags_to_iflags(F2FS_SUPPORTED_XFLAGS)); 2968 if (err) 2969 goto out; 2970 2971 err = f2fs_ioc_setproject(filp, fa.fsx_projid); 2972 out: 2973 inode_unlock(inode); 2974 mnt_drop_write_file(filp); 2975 return err; 2976 } 2977 2978 int f2fs_pin_file_control(struct inode *inode, bool inc) 2979 { 2980 struct f2fs_inode_info *fi = F2FS_I(inode); 2981 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2982 2983 /* Use i_gc_failures for normal file as a risk signal. */ 2984 if (inc) 2985 f2fs_i_gc_failures_write(inode, 2986 fi->i_gc_failures[GC_FAILURE_PIN] + 1); 2987 2988 if (fi->i_gc_failures[GC_FAILURE_PIN] > sbi->gc_pin_file_threshold) { 2989 f2fs_warn(sbi, "%s: Enable GC = ino %lx after %x GC trials", 2990 __func__, inode->i_ino, 2991 fi->i_gc_failures[GC_FAILURE_PIN]); 2992 clear_inode_flag(inode, FI_PIN_FILE); 2993 return -EAGAIN; 2994 } 2995 return 0; 2996 } 2997 2998 static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg) 2999 { 3000 struct inode *inode = file_inode(filp); 3001 __u32 pin; 3002 int ret = 0; 3003 3004 if (get_user(pin, (__u32 __user *)arg)) 3005 return -EFAULT; 3006 3007 if (!S_ISREG(inode->i_mode)) 3008 return -EINVAL; 3009 3010 if (f2fs_readonly(F2FS_I_SB(inode)->sb)) 3011 return -EROFS; 3012 3013 ret = mnt_want_write_file(filp); 3014 if (ret) 3015 return ret; 3016 3017 inode_lock(inode); 3018 3019 if (f2fs_should_update_outplace(inode, NULL)) { 3020 ret = -EINVAL; 3021 goto out; 3022 } 3023 3024 if (!pin) { 3025 clear_inode_flag(inode, FI_PIN_FILE); 3026 f2fs_i_gc_failures_write(inode, 0); 3027 goto done; 3028 } 3029 3030 if (f2fs_pin_file_control(inode, false)) { 3031 ret = -EAGAIN; 3032 goto out; 3033 } 3034 ret = f2fs_convert_inline_inode(inode); 3035 if (ret) 3036 goto out; 3037 3038 set_inode_flag(inode, FI_PIN_FILE); 3039 ret = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN]; 3040 done: 3041 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); 3042 out: 3043 inode_unlock(inode); 3044 mnt_drop_write_file(filp); 3045 return ret; 3046 } 3047 3048 static int f2fs_ioc_get_pin_file(struct file *filp, unsigned long arg) 3049 { 3050 struct inode *inode = file_inode(filp); 3051 __u32 pin = 0; 3052 3053 if (is_inode_flag_set(inode, FI_PIN_FILE)) 3054 pin = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN]; 3055 return put_user(pin, (u32 __user *)arg); 3056 } 3057 3058 int f2fs_precache_extents(struct inode *inode) 3059 { 3060 struct f2fs_inode_info *fi = F2FS_I(inode); 3061 struct f2fs_map_blocks map; 3062 pgoff_t m_next_extent; 3063 loff_t end; 3064 int err; 3065 3066 if (is_inode_flag_set(inode, FI_NO_EXTENT)) 3067 return -EOPNOTSUPP; 3068 3069 map.m_lblk = 0; 3070 map.m_next_pgofs = NULL; 3071 map.m_next_extent = &m_next_extent; 3072 map.m_seg_type = NO_CHECK_TYPE; 3073 map.m_may_create = false; 3074 end = F2FS_I_SB(inode)->max_file_blocks; 3075 3076 while (map.m_lblk < end) { 3077 map.m_len = end - map.m_lblk; 3078 3079 down_write(&fi->i_gc_rwsem[WRITE]); 3080 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_PRECACHE); 3081 up_write(&fi->i_gc_rwsem[WRITE]); 3082 if (err) 3083 return err; 3084 3085 map.m_lblk = m_next_extent; 3086 } 3087 3088 return err; 3089 } 3090 3091 static int f2fs_ioc_precache_extents(struct file *filp, unsigned long arg) 3092 { 3093 return f2fs_precache_extents(file_inode(filp)); 3094 } 3095 3096 static int f2fs_ioc_resize_fs(struct file *filp, unsigned long arg) 3097 { 3098 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp)); 3099 __u64 block_count; 3100 int ret; 3101 3102 if (!capable(CAP_SYS_ADMIN)) 3103 return -EPERM; 3104 3105 if (f2fs_readonly(sbi->sb)) 3106 return -EROFS; 3107 3108 if (copy_from_user(&block_count, (void __user *)arg, 3109 sizeof(block_count))) 3110 return -EFAULT; 3111 3112 ret = f2fs_resize_fs(sbi, block_count); 3113 3114 return ret; 3115 } 3116 3117 static int f2fs_ioc_enable_verity(struct file *filp, unsigned long arg) 3118 { 3119 struct inode *inode = file_inode(filp); 3120 3121 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); 3122 3123 if (!f2fs_sb_has_verity(F2FS_I_SB(inode))) { 3124 f2fs_warn(F2FS_I_SB(inode), 3125 "Can't enable fs-verity on inode %lu: the verity feature is not enabled on this filesystem.\n", 3126 inode->i_ino); 3127 return -EOPNOTSUPP; 3128 } 3129 3130 return fsverity_ioctl_enable(filp, (const void __user *)arg); 3131 } 3132 3133 static int f2fs_ioc_measure_verity(struct file *filp, unsigned long arg) 3134 { 3135 if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp)))) 3136 return -EOPNOTSUPP; 3137 3138 return fsverity_ioctl_measure(filp, (void __user *)arg); 3139 } 3140 3141 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 3142 { 3143 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp))))) 3144 return -EIO; 3145 3146 switch (cmd) { 3147 case F2FS_IOC_GETFLAGS: 3148 return f2fs_ioc_getflags(filp, arg); 3149 case F2FS_IOC_SETFLAGS: 3150 return f2fs_ioc_setflags(filp, arg); 3151 case F2FS_IOC_GETVERSION: 3152 return f2fs_ioc_getversion(filp, arg); 3153 case F2FS_IOC_START_ATOMIC_WRITE: 3154 return f2fs_ioc_start_atomic_write(filp); 3155 case F2FS_IOC_COMMIT_ATOMIC_WRITE: 3156 return f2fs_ioc_commit_atomic_write(filp); 3157 case F2FS_IOC_START_VOLATILE_WRITE: 3158 return f2fs_ioc_start_volatile_write(filp); 3159 case F2FS_IOC_RELEASE_VOLATILE_WRITE: 3160 return f2fs_ioc_release_volatile_write(filp); 3161 case F2FS_IOC_ABORT_VOLATILE_WRITE: 3162 return f2fs_ioc_abort_volatile_write(filp); 3163 case F2FS_IOC_SHUTDOWN: 3164 return f2fs_ioc_shutdown(filp, arg); 3165 case FITRIM: 3166 return f2fs_ioc_fitrim(filp, arg); 3167 case F2FS_IOC_SET_ENCRYPTION_POLICY: 3168 return f2fs_ioc_set_encryption_policy(filp, arg); 3169 case F2FS_IOC_GET_ENCRYPTION_POLICY: 3170 return f2fs_ioc_get_encryption_policy(filp, arg); 3171 case F2FS_IOC_GET_ENCRYPTION_PWSALT: 3172 return f2fs_ioc_get_encryption_pwsalt(filp, arg); 3173 case FS_IOC_GET_ENCRYPTION_POLICY_EX: 3174 return f2fs_ioc_get_encryption_policy_ex(filp, arg); 3175 case FS_IOC_ADD_ENCRYPTION_KEY: 3176 return f2fs_ioc_add_encryption_key(filp, arg); 3177 case FS_IOC_REMOVE_ENCRYPTION_KEY: 3178 return f2fs_ioc_remove_encryption_key(filp, arg); 3179 case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS: 3180 return f2fs_ioc_remove_encryption_key_all_users(filp, arg); 3181 case FS_IOC_GET_ENCRYPTION_KEY_STATUS: 3182 return f2fs_ioc_get_encryption_key_status(filp, arg); 3183 case F2FS_IOC_GARBAGE_COLLECT: 3184 return f2fs_ioc_gc(filp, arg); 3185 case F2FS_IOC_GARBAGE_COLLECT_RANGE: 3186 return f2fs_ioc_gc_range(filp, arg); 3187 case F2FS_IOC_WRITE_CHECKPOINT: 3188 return f2fs_ioc_write_checkpoint(filp, arg); 3189 case F2FS_IOC_DEFRAGMENT: 3190 return f2fs_ioc_defragment(filp, arg); 3191 case F2FS_IOC_MOVE_RANGE: 3192 return f2fs_ioc_move_range(filp, arg); 3193 case F2FS_IOC_FLUSH_DEVICE: 3194 return f2fs_ioc_flush_device(filp, arg); 3195 case F2FS_IOC_GET_FEATURES: 3196 return f2fs_ioc_get_features(filp, arg); 3197 case F2FS_IOC_FSGETXATTR: 3198 return f2fs_ioc_fsgetxattr(filp, arg); 3199 case F2FS_IOC_FSSETXATTR: 3200 return f2fs_ioc_fssetxattr(filp, arg); 3201 case F2FS_IOC_GET_PIN_FILE: 3202 return f2fs_ioc_get_pin_file(filp, arg); 3203 case F2FS_IOC_SET_PIN_FILE: 3204 return f2fs_ioc_set_pin_file(filp, arg); 3205 case F2FS_IOC_PRECACHE_EXTENTS: 3206 return f2fs_ioc_precache_extents(filp, arg); 3207 case F2FS_IOC_RESIZE_FS: 3208 return f2fs_ioc_resize_fs(filp, arg); 3209 case FS_IOC_ENABLE_VERITY: 3210 return f2fs_ioc_enable_verity(filp, arg); 3211 case FS_IOC_MEASURE_VERITY: 3212 return f2fs_ioc_measure_verity(filp, arg); 3213 default: 3214 return -ENOTTY; 3215 } 3216 } 3217 3218 static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 3219 { 3220 struct file *file = iocb->ki_filp; 3221 struct inode *inode = file_inode(file); 3222 ssize_t ret; 3223 3224 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) { 3225 ret = -EIO; 3226 goto out; 3227 } 3228 3229 if ((iocb->ki_flags & IOCB_NOWAIT) && !(iocb->ki_flags & IOCB_DIRECT)) { 3230 ret = -EINVAL; 3231 goto out; 3232 } 3233 3234 if (!inode_trylock(inode)) { 3235 if (iocb->ki_flags & IOCB_NOWAIT) { 3236 ret = -EAGAIN; 3237 goto out; 3238 } 3239 inode_lock(inode); 3240 } 3241 3242 ret = generic_write_checks(iocb, from); 3243 if (ret > 0) { 3244 bool preallocated = false; 3245 size_t target_size = 0; 3246 int err; 3247 3248 if (iov_iter_fault_in_readable(from, iov_iter_count(from))) 3249 set_inode_flag(inode, FI_NO_PREALLOC); 3250 3251 if ((iocb->ki_flags & IOCB_NOWAIT)) { 3252 if (!f2fs_overwrite_io(inode, iocb->ki_pos, 3253 iov_iter_count(from)) || 3254 f2fs_has_inline_data(inode) || 3255 f2fs_force_buffered_io(inode, iocb, from)) { 3256 clear_inode_flag(inode, FI_NO_PREALLOC); 3257 inode_unlock(inode); 3258 ret = -EAGAIN; 3259 goto out; 3260 } 3261 } else { 3262 preallocated = true; 3263 target_size = iocb->ki_pos + iov_iter_count(from); 3264 3265 err = f2fs_preallocate_blocks(iocb, from); 3266 if (err) { 3267 clear_inode_flag(inode, FI_NO_PREALLOC); 3268 inode_unlock(inode); 3269 ret = err; 3270 goto out; 3271 } 3272 } 3273 ret = __generic_file_write_iter(iocb, from); 3274 clear_inode_flag(inode, FI_NO_PREALLOC); 3275 3276 /* if we couldn't write data, we should deallocate blocks. */ 3277 if (preallocated && i_size_read(inode) < target_size) 3278 f2fs_truncate(inode); 3279 3280 if (ret > 0) 3281 f2fs_update_iostat(F2FS_I_SB(inode), APP_WRITE_IO, ret); 3282 } 3283 inode_unlock(inode); 3284 out: 3285 trace_f2fs_file_write_iter(inode, iocb->ki_pos, 3286 iov_iter_count(from), ret); 3287 if (ret > 0) 3288 ret = generic_write_sync(iocb, ret); 3289 return ret; 3290 } 3291 3292 #ifdef CONFIG_COMPAT 3293 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 3294 { 3295 switch (cmd) { 3296 case F2FS_IOC32_GETFLAGS: 3297 cmd = F2FS_IOC_GETFLAGS; 3298 break; 3299 case F2FS_IOC32_SETFLAGS: 3300 cmd = F2FS_IOC_SETFLAGS; 3301 break; 3302 case F2FS_IOC32_GETVERSION: 3303 cmd = F2FS_IOC_GETVERSION; 3304 break; 3305 case F2FS_IOC_START_ATOMIC_WRITE: 3306 case F2FS_IOC_COMMIT_ATOMIC_WRITE: 3307 case F2FS_IOC_START_VOLATILE_WRITE: 3308 case F2FS_IOC_RELEASE_VOLATILE_WRITE: 3309 case F2FS_IOC_ABORT_VOLATILE_WRITE: 3310 case F2FS_IOC_SHUTDOWN: 3311 case F2FS_IOC_SET_ENCRYPTION_POLICY: 3312 case F2FS_IOC_GET_ENCRYPTION_PWSALT: 3313 case F2FS_IOC_GET_ENCRYPTION_POLICY: 3314 case FS_IOC_GET_ENCRYPTION_POLICY_EX: 3315 case FS_IOC_ADD_ENCRYPTION_KEY: 3316 case FS_IOC_REMOVE_ENCRYPTION_KEY: 3317 case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS: 3318 case FS_IOC_GET_ENCRYPTION_KEY_STATUS: 3319 case F2FS_IOC_GARBAGE_COLLECT: 3320 case F2FS_IOC_GARBAGE_COLLECT_RANGE: 3321 case F2FS_IOC_WRITE_CHECKPOINT: 3322 case F2FS_IOC_DEFRAGMENT: 3323 case F2FS_IOC_MOVE_RANGE: 3324 case F2FS_IOC_FLUSH_DEVICE: 3325 case F2FS_IOC_GET_FEATURES: 3326 case F2FS_IOC_FSGETXATTR: 3327 case F2FS_IOC_FSSETXATTR: 3328 case F2FS_IOC_GET_PIN_FILE: 3329 case F2FS_IOC_SET_PIN_FILE: 3330 case F2FS_IOC_PRECACHE_EXTENTS: 3331 case F2FS_IOC_RESIZE_FS: 3332 case FS_IOC_ENABLE_VERITY: 3333 case FS_IOC_MEASURE_VERITY: 3334 break; 3335 default: 3336 return -ENOIOCTLCMD; 3337 } 3338 return f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg)); 3339 } 3340 #endif 3341 3342 const struct file_operations f2fs_file_operations = { 3343 .llseek = f2fs_llseek, 3344 .read_iter = generic_file_read_iter, 3345 .write_iter = f2fs_file_write_iter, 3346 .open = f2fs_file_open, 3347 .release = f2fs_release_file, 3348 .mmap = f2fs_file_mmap, 3349 .flush = f2fs_file_flush, 3350 .fsync = f2fs_sync_file, 3351 .fallocate = f2fs_fallocate, 3352 .unlocked_ioctl = f2fs_ioctl, 3353 #ifdef CONFIG_COMPAT 3354 .compat_ioctl = f2fs_compat_ioctl, 3355 #endif 3356 .splice_read = generic_file_splice_read, 3357 .splice_write = iter_file_splice_write, 3358 }; 3359