1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * fs/f2fs/file.c 4 * 5 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 6 * http://www.samsung.com/ 7 */ 8 #include <linux/fs.h> 9 #include <linux/f2fs_fs.h> 10 #include <linux/stat.h> 11 #include <linux/buffer_head.h> 12 #include <linux/writeback.h> 13 #include <linux/blkdev.h> 14 #include <linux/falloc.h> 15 #include <linux/types.h> 16 #include <linux/compat.h> 17 #include <linux/uaccess.h> 18 #include <linux/mount.h> 19 #include <linux/pagevec.h> 20 #include <linux/uio.h> 21 #include <linux/uuid.h> 22 #include <linux/file.h> 23 24 #include "f2fs.h" 25 #include "node.h" 26 #include "segment.h" 27 #include "xattr.h" 28 #include "acl.h" 29 #include "gc.h" 30 #include "trace.h" 31 #include <trace/events/f2fs.h> 32 33 static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf) 34 { 35 struct inode *inode = file_inode(vmf->vma->vm_file); 36 vm_fault_t ret; 37 38 down_read(&F2FS_I(inode)->i_mmap_sem); 39 ret = filemap_fault(vmf); 40 up_read(&F2FS_I(inode)->i_mmap_sem); 41 42 return ret; 43 } 44 45 static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf) 46 { 47 struct page *page = vmf->page; 48 struct inode *inode = file_inode(vmf->vma->vm_file); 49 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 50 struct dnode_of_data dn = { .node_changed = false }; 51 int err; 52 53 if (unlikely(f2fs_cp_error(sbi))) { 54 err = -EIO; 55 goto err; 56 } 57 58 sb_start_pagefault(inode->i_sb); 59 60 f2fs_bug_on(sbi, f2fs_has_inline_data(inode)); 61 62 file_update_time(vmf->vma->vm_file); 63 down_read(&F2FS_I(inode)->i_mmap_sem); 64 lock_page(page); 65 if (unlikely(page->mapping != inode->i_mapping || 66 page_offset(page) > i_size_read(inode) || 67 !PageUptodate(page))) { 68 unlock_page(page); 69 err = -EFAULT; 70 goto out_sem; 71 } 72 73 /* block allocation */ 74 __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true); 75 set_new_dnode(&dn, inode, NULL, NULL, 0); 76 err = f2fs_get_block(&dn, page->index); 77 f2fs_put_dnode(&dn); 78 __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false); 79 if (err) { 80 unlock_page(page); 81 goto out_sem; 82 } 83 84 /* fill the page */ 85 f2fs_wait_on_page_writeback(page, DATA, false); 86 87 /* wait for GCed page writeback via META_MAPPING */ 88 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr); 89 90 /* 91 * check to see if the page is mapped already (no holes) 92 */ 93 if (PageMappedToDisk(page)) 94 goto out_sem; 95 96 /* page is wholly or partially inside EOF */ 97 if (((loff_t)(page->index + 1) << PAGE_SHIFT) > 98 i_size_read(inode)) { 99 loff_t offset; 100 101 offset = i_size_read(inode) & ~PAGE_MASK; 102 zero_user_segment(page, offset, PAGE_SIZE); 103 } 104 set_page_dirty(page); 105 if (!PageUptodate(page)) 106 SetPageUptodate(page); 107 108 f2fs_update_iostat(sbi, APP_MAPPED_IO, F2FS_BLKSIZE); 109 110 trace_f2fs_vm_page_mkwrite(page, DATA); 111 out_sem: 112 up_read(&F2FS_I(inode)->i_mmap_sem); 113 114 f2fs_balance_fs(sbi, dn.node_changed); 115 116 sb_end_pagefault(inode->i_sb); 117 f2fs_update_time(sbi, REQ_TIME); 118 err: 119 return block_page_mkwrite_return(err); 120 } 121 122 static const struct vm_operations_struct f2fs_file_vm_ops = { 123 .fault = f2fs_filemap_fault, 124 .map_pages = filemap_map_pages, 125 .page_mkwrite = f2fs_vm_page_mkwrite, 126 }; 127 128 static int get_parent_ino(struct inode *inode, nid_t *pino) 129 { 130 struct dentry *dentry; 131 132 inode = igrab(inode); 133 dentry = d_find_any_alias(inode); 134 iput(inode); 135 if (!dentry) 136 return 0; 137 138 *pino = parent_ino(dentry); 139 dput(dentry); 140 return 1; 141 } 142 143 static inline enum cp_reason_type need_do_checkpoint(struct inode *inode) 144 { 145 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 146 enum cp_reason_type cp_reason = CP_NO_NEEDED; 147 148 if (!S_ISREG(inode->i_mode)) 149 cp_reason = CP_NON_REGULAR; 150 else if (inode->i_nlink != 1) 151 cp_reason = CP_HARDLINK; 152 else if (is_sbi_flag_set(sbi, SBI_NEED_CP)) 153 cp_reason = CP_SB_NEED_CP; 154 else if (file_wrong_pino(inode)) 155 cp_reason = CP_WRONG_PINO; 156 else if (!f2fs_space_for_roll_forward(sbi)) 157 cp_reason = CP_NO_SPC_ROLL; 158 else if (!f2fs_is_checkpointed_node(sbi, F2FS_I(inode)->i_pino)) 159 cp_reason = CP_NODE_NEED_CP; 160 else if (test_opt(sbi, FASTBOOT)) 161 cp_reason = CP_FASTBOOT_MODE; 162 else if (F2FS_OPTION(sbi).active_logs == 2) 163 cp_reason = CP_SPEC_LOG_NUM; 164 else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT && 165 f2fs_need_dentry_mark(sbi, inode->i_ino) && 166 f2fs_exist_written_data(sbi, F2FS_I(inode)->i_pino, 167 TRANS_DIR_INO)) 168 cp_reason = CP_RECOVER_DIR; 169 170 return cp_reason; 171 } 172 173 static bool need_inode_page_update(struct f2fs_sb_info *sbi, nid_t ino) 174 { 175 struct page *i = find_get_page(NODE_MAPPING(sbi), ino); 176 bool ret = false; 177 /* But we need to avoid that there are some inode updates */ 178 if ((i && PageDirty(i)) || f2fs_need_inode_block_update(sbi, ino)) 179 ret = true; 180 f2fs_put_page(i, 0); 181 return ret; 182 } 183 184 static void try_to_fix_pino(struct inode *inode) 185 { 186 struct f2fs_inode_info *fi = F2FS_I(inode); 187 nid_t pino; 188 189 down_write(&fi->i_sem); 190 if (file_wrong_pino(inode) && inode->i_nlink == 1 && 191 get_parent_ino(inode, &pino)) { 192 f2fs_i_pino_write(inode, pino); 193 file_got_pino(inode); 194 } 195 up_write(&fi->i_sem); 196 } 197 198 static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end, 199 int datasync, bool atomic) 200 { 201 struct inode *inode = file->f_mapping->host; 202 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 203 nid_t ino = inode->i_ino; 204 int ret = 0; 205 enum cp_reason_type cp_reason = 0; 206 struct writeback_control wbc = { 207 .sync_mode = WB_SYNC_ALL, 208 .nr_to_write = LONG_MAX, 209 .for_reclaim = 0, 210 }; 211 unsigned int seq_id = 0; 212 213 if (unlikely(f2fs_readonly(inode->i_sb))) 214 return 0; 215 216 trace_f2fs_sync_file_enter(inode); 217 218 /* if fdatasync is triggered, let's do in-place-update */ 219 if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks) 220 set_inode_flag(inode, FI_NEED_IPU); 221 ret = file_write_and_wait_range(file, start, end); 222 clear_inode_flag(inode, FI_NEED_IPU); 223 224 if (ret) { 225 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret); 226 return ret; 227 } 228 229 /* if the inode is dirty, let's recover all the time */ 230 if (!f2fs_skip_inode_update(inode, datasync)) { 231 f2fs_write_inode(inode, NULL); 232 goto go_write; 233 } 234 235 /* 236 * if there is no written data, don't waste time to write recovery info. 237 */ 238 if (!is_inode_flag_set(inode, FI_APPEND_WRITE) && 239 !f2fs_exist_written_data(sbi, ino, APPEND_INO)) { 240 241 /* it may call write_inode just prior to fsync */ 242 if (need_inode_page_update(sbi, ino)) 243 goto go_write; 244 245 if (is_inode_flag_set(inode, FI_UPDATE_WRITE) || 246 f2fs_exist_written_data(sbi, ino, UPDATE_INO)) 247 goto flush_out; 248 goto out; 249 } 250 go_write: 251 /* 252 * Both of fdatasync() and fsync() are able to be recovered from 253 * sudden-power-off. 254 */ 255 down_read(&F2FS_I(inode)->i_sem); 256 cp_reason = need_do_checkpoint(inode); 257 up_read(&F2FS_I(inode)->i_sem); 258 259 if (cp_reason) { 260 /* all the dirty node pages should be flushed for POR */ 261 ret = f2fs_sync_fs(inode->i_sb, 1); 262 263 /* 264 * We've secured consistency through sync_fs. Following pino 265 * will be used only for fsynced inodes after checkpoint. 266 */ 267 try_to_fix_pino(inode); 268 clear_inode_flag(inode, FI_APPEND_WRITE); 269 clear_inode_flag(inode, FI_UPDATE_WRITE); 270 goto out; 271 } 272 sync_nodes: 273 atomic_inc(&sbi->wb_sync_req[NODE]); 274 ret = f2fs_fsync_node_pages(sbi, inode, &wbc, atomic, &seq_id); 275 atomic_dec(&sbi->wb_sync_req[NODE]); 276 if (ret) 277 goto out; 278 279 /* if cp_error was enabled, we should avoid infinite loop */ 280 if (unlikely(f2fs_cp_error(sbi))) { 281 ret = -EIO; 282 goto out; 283 } 284 285 if (f2fs_need_inode_block_update(sbi, ino)) { 286 f2fs_mark_inode_dirty_sync(inode, true); 287 f2fs_write_inode(inode, NULL); 288 goto sync_nodes; 289 } 290 291 /* 292 * If it's atomic_write, it's just fine to keep write ordering. So 293 * here we don't need to wait for node write completion, since we use 294 * node chain which serializes node blocks. If one of node writes are 295 * reordered, we can see simply broken chain, resulting in stopping 296 * roll-forward recovery. It means we'll recover all or none node blocks 297 * given fsync mark. 298 */ 299 if (!atomic) { 300 ret = f2fs_wait_on_node_pages_writeback(sbi, seq_id); 301 if (ret) 302 goto out; 303 } 304 305 /* once recovery info is written, don't need to tack this */ 306 f2fs_remove_ino_entry(sbi, ino, APPEND_INO); 307 clear_inode_flag(inode, FI_APPEND_WRITE); 308 flush_out: 309 if (!atomic && F2FS_OPTION(sbi).fsync_mode != FSYNC_MODE_NOBARRIER) 310 ret = f2fs_issue_flush(sbi, inode->i_ino); 311 if (!ret) { 312 f2fs_remove_ino_entry(sbi, ino, UPDATE_INO); 313 clear_inode_flag(inode, FI_UPDATE_WRITE); 314 f2fs_remove_ino_entry(sbi, ino, FLUSH_INO); 315 } 316 f2fs_update_time(sbi, REQ_TIME); 317 out: 318 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret); 319 f2fs_trace_ios(NULL, 1); 320 return ret; 321 } 322 323 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) 324 { 325 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file))))) 326 return -EIO; 327 return f2fs_do_sync_file(file, start, end, datasync, false); 328 } 329 330 static pgoff_t __get_first_dirty_index(struct address_space *mapping, 331 pgoff_t pgofs, int whence) 332 { 333 struct page *page; 334 int nr_pages; 335 336 if (whence != SEEK_DATA) 337 return 0; 338 339 /* find first dirty page index */ 340 nr_pages = find_get_pages_tag(mapping, &pgofs, PAGECACHE_TAG_DIRTY, 341 1, &page); 342 if (!nr_pages) 343 return ULONG_MAX; 344 pgofs = page->index; 345 put_page(page); 346 return pgofs; 347 } 348 349 static bool __found_offset(struct f2fs_sb_info *sbi, block_t blkaddr, 350 pgoff_t dirty, pgoff_t pgofs, int whence) 351 { 352 switch (whence) { 353 case SEEK_DATA: 354 if ((blkaddr == NEW_ADDR && dirty == pgofs) || 355 is_valid_data_blkaddr(sbi, blkaddr)) 356 return true; 357 break; 358 case SEEK_HOLE: 359 if (blkaddr == NULL_ADDR) 360 return true; 361 break; 362 } 363 return false; 364 } 365 366 static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence) 367 { 368 struct inode *inode = file->f_mapping->host; 369 loff_t maxbytes = inode->i_sb->s_maxbytes; 370 struct dnode_of_data dn; 371 pgoff_t pgofs, end_offset, dirty; 372 loff_t data_ofs = offset; 373 loff_t isize; 374 int err = 0; 375 376 inode_lock(inode); 377 378 isize = i_size_read(inode); 379 if (offset >= isize) 380 goto fail; 381 382 /* handle inline data case */ 383 if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) { 384 if (whence == SEEK_HOLE) 385 data_ofs = isize; 386 goto found; 387 } 388 389 pgofs = (pgoff_t)(offset >> PAGE_SHIFT); 390 391 dirty = __get_first_dirty_index(inode->i_mapping, pgofs, whence); 392 393 for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_SHIFT) { 394 set_new_dnode(&dn, inode, NULL, NULL, 0); 395 err = f2fs_get_dnode_of_data(&dn, pgofs, LOOKUP_NODE); 396 if (err && err != -ENOENT) { 397 goto fail; 398 } else if (err == -ENOENT) { 399 /* direct node does not exists */ 400 if (whence == SEEK_DATA) { 401 pgofs = f2fs_get_next_page_offset(&dn, pgofs); 402 continue; 403 } else { 404 goto found; 405 } 406 } 407 408 end_offset = ADDRS_PER_PAGE(dn.node_page, inode); 409 410 /* find data/hole in dnode block */ 411 for (; dn.ofs_in_node < end_offset; 412 dn.ofs_in_node++, pgofs++, 413 data_ofs = (loff_t)pgofs << PAGE_SHIFT) { 414 block_t blkaddr; 415 416 blkaddr = datablock_addr(dn.inode, 417 dn.node_page, dn.ofs_in_node); 418 419 if (__is_valid_data_blkaddr(blkaddr) && 420 !f2fs_is_valid_blkaddr(F2FS_I_SB(inode), 421 blkaddr, DATA_GENERIC)) { 422 f2fs_put_dnode(&dn); 423 goto fail; 424 } 425 426 if (__found_offset(F2FS_I_SB(inode), blkaddr, dirty, 427 pgofs, whence)) { 428 f2fs_put_dnode(&dn); 429 goto found; 430 } 431 } 432 f2fs_put_dnode(&dn); 433 } 434 435 if (whence == SEEK_DATA) 436 goto fail; 437 found: 438 if (whence == SEEK_HOLE && data_ofs > isize) 439 data_ofs = isize; 440 inode_unlock(inode); 441 return vfs_setpos(file, data_ofs, maxbytes); 442 fail: 443 inode_unlock(inode); 444 return -ENXIO; 445 } 446 447 static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence) 448 { 449 struct inode *inode = file->f_mapping->host; 450 loff_t maxbytes = inode->i_sb->s_maxbytes; 451 452 switch (whence) { 453 case SEEK_SET: 454 case SEEK_CUR: 455 case SEEK_END: 456 return generic_file_llseek_size(file, offset, whence, 457 maxbytes, i_size_read(inode)); 458 case SEEK_DATA: 459 case SEEK_HOLE: 460 if (offset < 0) 461 return -ENXIO; 462 return f2fs_seek_block(file, offset, whence); 463 } 464 465 return -EINVAL; 466 } 467 468 static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma) 469 { 470 struct inode *inode = file_inode(file); 471 int err; 472 473 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) 474 return -EIO; 475 476 /* we don't need to use inline_data strictly */ 477 err = f2fs_convert_inline_inode(inode); 478 if (err) 479 return err; 480 481 file_accessed(file); 482 vma->vm_ops = &f2fs_file_vm_ops; 483 return 0; 484 } 485 486 static int f2fs_file_open(struct inode *inode, struct file *filp) 487 { 488 int err = fscrypt_file_open(inode, filp); 489 490 if (err) 491 return err; 492 493 filp->f_mode |= FMODE_NOWAIT; 494 495 return dquot_file_open(inode, filp); 496 } 497 498 void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count) 499 { 500 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 501 struct f2fs_node *raw_node; 502 int nr_free = 0, ofs = dn->ofs_in_node, len = count; 503 __le32 *addr; 504 int base = 0; 505 506 if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode)) 507 base = get_extra_isize(dn->inode); 508 509 raw_node = F2FS_NODE(dn->node_page); 510 addr = blkaddr_in_node(raw_node) + base + ofs; 511 512 for (; count > 0; count--, addr++, dn->ofs_in_node++) { 513 block_t blkaddr = le32_to_cpu(*addr); 514 515 if (blkaddr == NULL_ADDR) 516 continue; 517 518 dn->data_blkaddr = NULL_ADDR; 519 f2fs_set_data_blkaddr(dn); 520 521 if (__is_valid_data_blkaddr(blkaddr) && 522 !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC)) 523 continue; 524 525 f2fs_invalidate_blocks(sbi, blkaddr); 526 if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page)) 527 clear_inode_flag(dn->inode, FI_FIRST_BLOCK_WRITTEN); 528 nr_free++; 529 } 530 531 if (nr_free) { 532 pgoff_t fofs; 533 /* 534 * once we invalidate valid blkaddr in range [ofs, ofs + count], 535 * we will invalidate all blkaddr in the whole range. 536 */ 537 fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page), 538 dn->inode) + ofs; 539 f2fs_update_extent_cache_range(dn, fofs, 0, len); 540 dec_valid_block_count(sbi, dn->inode, nr_free); 541 } 542 dn->ofs_in_node = ofs; 543 544 f2fs_update_time(sbi, REQ_TIME); 545 trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid, 546 dn->ofs_in_node, nr_free); 547 } 548 549 void f2fs_truncate_data_blocks(struct dnode_of_data *dn) 550 { 551 f2fs_truncate_data_blocks_range(dn, ADDRS_PER_BLOCK); 552 } 553 554 static int truncate_partial_data_page(struct inode *inode, u64 from, 555 bool cache_only) 556 { 557 loff_t offset = from & (PAGE_SIZE - 1); 558 pgoff_t index = from >> PAGE_SHIFT; 559 struct address_space *mapping = inode->i_mapping; 560 struct page *page; 561 562 if (!offset && !cache_only) 563 return 0; 564 565 if (cache_only) { 566 page = find_lock_page(mapping, index); 567 if (page && PageUptodate(page)) 568 goto truncate_out; 569 f2fs_put_page(page, 1); 570 return 0; 571 } 572 573 page = f2fs_get_lock_data_page(inode, index, true); 574 if (IS_ERR(page)) 575 return PTR_ERR(page) == -ENOENT ? 0 : PTR_ERR(page); 576 truncate_out: 577 f2fs_wait_on_page_writeback(page, DATA, true); 578 zero_user(page, offset, PAGE_SIZE - offset); 579 580 /* An encrypted inode should have a key and truncate the last page. */ 581 f2fs_bug_on(F2FS_I_SB(inode), cache_only && f2fs_encrypted_inode(inode)); 582 if (!cache_only) 583 set_page_dirty(page); 584 f2fs_put_page(page, 1); 585 return 0; 586 } 587 588 int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock) 589 { 590 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 591 struct dnode_of_data dn; 592 pgoff_t free_from; 593 int count = 0, err = 0; 594 struct page *ipage; 595 bool truncate_page = false; 596 597 trace_f2fs_truncate_blocks_enter(inode, from); 598 599 free_from = (pgoff_t)F2FS_BLK_ALIGN(from); 600 601 if (free_from >= sbi->max_file_blocks) 602 goto free_partial; 603 604 if (lock) 605 f2fs_lock_op(sbi); 606 607 ipage = f2fs_get_node_page(sbi, inode->i_ino); 608 if (IS_ERR(ipage)) { 609 err = PTR_ERR(ipage); 610 goto out; 611 } 612 613 if (f2fs_has_inline_data(inode)) { 614 f2fs_truncate_inline_inode(inode, ipage, from); 615 f2fs_put_page(ipage, 1); 616 truncate_page = true; 617 goto out; 618 } 619 620 set_new_dnode(&dn, inode, ipage, NULL, 0); 621 err = f2fs_get_dnode_of_data(&dn, free_from, LOOKUP_NODE_RA); 622 if (err) { 623 if (err == -ENOENT) 624 goto free_next; 625 goto out; 626 } 627 628 count = ADDRS_PER_PAGE(dn.node_page, inode); 629 630 count -= dn.ofs_in_node; 631 f2fs_bug_on(sbi, count < 0); 632 633 if (dn.ofs_in_node || IS_INODE(dn.node_page)) { 634 f2fs_truncate_data_blocks_range(&dn, count); 635 free_from += count; 636 } 637 638 f2fs_put_dnode(&dn); 639 free_next: 640 err = f2fs_truncate_inode_blocks(inode, free_from); 641 out: 642 if (lock) 643 f2fs_unlock_op(sbi); 644 free_partial: 645 /* lastly zero out the first data page */ 646 if (!err) 647 err = truncate_partial_data_page(inode, from, truncate_page); 648 649 trace_f2fs_truncate_blocks_exit(inode, err); 650 return err; 651 } 652 653 int f2fs_truncate(struct inode *inode) 654 { 655 int err; 656 657 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) 658 return -EIO; 659 660 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 661 S_ISLNK(inode->i_mode))) 662 return 0; 663 664 trace_f2fs_truncate(inode); 665 666 if (time_to_inject(F2FS_I_SB(inode), FAULT_TRUNCATE)) { 667 f2fs_show_injection_info(FAULT_TRUNCATE); 668 return -EIO; 669 } 670 671 /* we should check inline_data size */ 672 if (!f2fs_may_inline_data(inode)) { 673 err = f2fs_convert_inline_inode(inode); 674 if (err) 675 return err; 676 } 677 678 err = f2fs_truncate_blocks(inode, i_size_read(inode), true); 679 if (err) 680 return err; 681 682 inode->i_mtime = inode->i_ctime = current_time(inode); 683 f2fs_mark_inode_dirty_sync(inode, false); 684 return 0; 685 } 686 687 int f2fs_getattr(const struct path *path, struct kstat *stat, 688 u32 request_mask, unsigned int query_flags) 689 { 690 struct inode *inode = d_inode(path->dentry); 691 struct f2fs_inode_info *fi = F2FS_I(inode); 692 struct f2fs_inode *ri; 693 unsigned int flags; 694 695 if (f2fs_has_extra_attr(inode) && 696 f2fs_sb_has_inode_crtime(inode->i_sb) && 697 F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) { 698 stat->result_mask |= STATX_BTIME; 699 stat->btime.tv_sec = fi->i_crtime.tv_sec; 700 stat->btime.tv_nsec = fi->i_crtime.tv_nsec; 701 } 702 703 flags = fi->i_flags & F2FS_FL_USER_VISIBLE; 704 if (flags & F2FS_APPEND_FL) 705 stat->attributes |= STATX_ATTR_APPEND; 706 if (flags & F2FS_COMPR_FL) 707 stat->attributes |= STATX_ATTR_COMPRESSED; 708 if (f2fs_encrypted_inode(inode)) 709 stat->attributes |= STATX_ATTR_ENCRYPTED; 710 if (flags & F2FS_IMMUTABLE_FL) 711 stat->attributes |= STATX_ATTR_IMMUTABLE; 712 if (flags & F2FS_NODUMP_FL) 713 stat->attributes |= STATX_ATTR_NODUMP; 714 715 stat->attributes_mask |= (STATX_ATTR_APPEND | 716 STATX_ATTR_COMPRESSED | 717 STATX_ATTR_ENCRYPTED | 718 STATX_ATTR_IMMUTABLE | 719 STATX_ATTR_NODUMP); 720 721 generic_fillattr(inode, stat); 722 723 /* we need to show initial sectors used for inline_data/dentries */ 724 if ((S_ISREG(inode->i_mode) && f2fs_has_inline_data(inode)) || 725 f2fs_has_inline_dentry(inode)) 726 stat->blocks += (stat->size + 511) >> 9; 727 728 return 0; 729 } 730 731 #ifdef CONFIG_F2FS_FS_POSIX_ACL 732 static void __setattr_copy(struct inode *inode, const struct iattr *attr) 733 { 734 unsigned int ia_valid = attr->ia_valid; 735 736 if (ia_valid & ATTR_UID) 737 inode->i_uid = attr->ia_uid; 738 if (ia_valid & ATTR_GID) 739 inode->i_gid = attr->ia_gid; 740 if (ia_valid & ATTR_ATIME) 741 inode->i_atime = timespec64_trunc(attr->ia_atime, 742 inode->i_sb->s_time_gran); 743 if (ia_valid & ATTR_MTIME) 744 inode->i_mtime = timespec64_trunc(attr->ia_mtime, 745 inode->i_sb->s_time_gran); 746 if (ia_valid & ATTR_CTIME) 747 inode->i_ctime = timespec64_trunc(attr->ia_ctime, 748 inode->i_sb->s_time_gran); 749 if (ia_valid & ATTR_MODE) { 750 umode_t mode = attr->ia_mode; 751 752 if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID)) 753 mode &= ~S_ISGID; 754 set_acl_inode(inode, mode); 755 } 756 } 757 #else 758 #define __setattr_copy setattr_copy 759 #endif 760 761 int f2fs_setattr(struct dentry *dentry, struct iattr *attr) 762 { 763 struct inode *inode = d_inode(dentry); 764 int err; 765 bool size_changed = false; 766 767 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) 768 return -EIO; 769 770 err = setattr_prepare(dentry, attr); 771 if (err) 772 return err; 773 774 err = fscrypt_prepare_setattr(dentry, attr); 775 if (err) 776 return err; 777 778 if (is_quota_modification(inode, attr)) { 779 err = dquot_initialize(inode); 780 if (err) 781 return err; 782 } 783 if ((attr->ia_valid & ATTR_UID && 784 !uid_eq(attr->ia_uid, inode->i_uid)) || 785 (attr->ia_valid & ATTR_GID && 786 !gid_eq(attr->ia_gid, inode->i_gid))) { 787 err = dquot_transfer(inode, attr); 788 if (err) 789 return err; 790 } 791 792 if (attr->ia_valid & ATTR_SIZE) { 793 bool to_smaller = (attr->ia_size <= i_size_read(inode)); 794 795 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 796 down_write(&F2FS_I(inode)->i_mmap_sem); 797 798 truncate_setsize(inode, attr->ia_size); 799 800 if (to_smaller) 801 err = f2fs_truncate(inode); 802 /* 803 * do not trim all blocks after i_size if target size is 804 * larger than i_size. 805 */ 806 up_write(&F2FS_I(inode)->i_mmap_sem); 807 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 808 809 if (err) 810 return err; 811 812 if (!to_smaller) { 813 /* should convert inline inode here */ 814 if (!f2fs_may_inline_data(inode)) { 815 err = f2fs_convert_inline_inode(inode); 816 if (err) 817 return err; 818 } 819 inode->i_mtime = inode->i_ctime = current_time(inode); 820 } 821 822 down_write(&F2FS_I(inode)->i_sem); 823 F2FS_I(inode)->last_disk_size = i_size_read(inode); 824 up_write(&F2FS_I(inode)->i_sem); 825 826 size_changed = true; 827 } 828 829 __setattr_copy(inode, attr); 830 831 if (attr->ia_valid & ATTR_MODE) { 832 err = posix_acl_chmod(inode, f2fs_get_inode_mode(inode)); 833 if (err || is_inode_flag_set(inode, FI_ACL_MODE)) { 834 inode->i_mode = F2FS_I(inode)->i_acl_mode; 835 clear_inode_flag(inode, FI_ACL_MODE); 836 } 837 } 838 839 /* file size may changed here */ 840 f2fs_mark_inode_dirty_sync(inode, size_changed); 841 842 /* inode change will produce dirty node pages flushed by checkpoint */ 843 f2fs_balance_fs(F2FS_I_SB(inode), true); 844 845 return err; 846 } 847 848 const struct inode_operations f2fs_file_inode_operations = { 849 .getattr = f2fs_getattr, 850 .setattr = f2fs_setattr, 851 .get_acl = f2fs_get_acl, 852 .set_acl = f2fs_set_acl, 853 #ifdef CONFIG_F2FS_FS_XATTR 854 .listxattr = f2fs_listxattr, 855 #endif 856 .fiemap = f2fs_fiemap, 857 }; 858 859 static int fill_zero(struct inode *inode, pgoff_t index, 860 loff_t start, loff_t len) 861 { 862 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 863 struct page *page; 864 865 if (!len) 866 return 0; 867 868 f2fs_balance_fs(sbi, true); 869 870 f2fs_lock_op(sbi); 871 page = f2fs_get_new_data_page(inode, NULL, index, false); 872 f2fs_unlock_op(sbi); 873 874 if (IS_ERR(page)) 875 return PTR_ERR(page); 876 877 f2fs_wait_on_page_writeback(page, DATA, true); 878 zero_user(page, start, len); 879 set_page_dirty(page); 880 f2fs_put_page(page, 1); 881 return 0; 882 } 883 884 int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end) 885 { 886 int err; 887 888 while (pg_start < pg_end) { 889 struct dnode_of_data dn; 890 pgoff_t end_offset, count; 891 892 set_new_dnode(&dn, inode, NULL, NULL, 0); 893 err = f2fs_get_dnode_of_data(&dn, pg_start, LOOKUP_NODE); 894 if (err) { 895 if (err == -ENOENT) { 896 pg_start = f2fs_get_next_page_offset(&dn, 897 pg_start); 898 continue; 899 } 900 return err; 901 } 902 903 end_offset = ADDRS_PER_PAGE(dn.node_page, inode); 904 count = min(end_offset - dn.ofs_in_node, pg_end - pg_start); 905 906 f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset); 907 908 f2fs_truncate_data_blocks_range(&dn, count); 909 f2fs_put_dnode(&dn); 910 911 pg_start += count; 912 } 913 return 0; 914 } 915 916 static int punch_hole(struct inode *inode, loff_t offset, loff_t len) 917 { 918 pgoff_t pg_start, pg_end; 919 loff_t off_start, off_end; 920 int ret; 921 922 ret = f2fs_convert_inline_inode(inode); 923 if (ret) 924 return ret; 925 926 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT; 927 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT; 928 929 off_start = offset & (PAGE_SIZE - 1); 930 off_end = (offset + len) & (PAGE_SIZE - 1); 931 932 if (pg_start == pg_end) { 933 ret = fill_zero(inode, pg_start, off_start, 934 off_end - off_start); 935 if (ret) 936 return ret; 937 } else { 938 if (off_start) { 939 ret = fill_zero(inode, pg_start++, off_start, 940 PAGE_SIZE - off_start); 941 if (ret) 942 return ret; 943 } 944 if (off_end) { 945 ret = fill_zero(inode, pg_end, 0, off_end); 946 if (ret) 947 return ret; 948 } 949 950 if (pg_start < pg_end) { 951 struct address_space *mapping = inode->i_mapping; 952 loff_t blk_start, blk_end; 953 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 954 955 f2fs_balance_fs(sbi, true); 956 957 blk_start = (loff_t)pg_start << PAGE_SHIFT; 958 blk_end = (loff_t)pg_end << PAGE_SHIFT; 959 960 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 961 down_write(&F2FS_I(inode)->i_mmap_sem); 962 963 truncate_inode_pages_range(mapping, blk_start, 964 blk_end - 1); 965 966 f2fs_lock_op(sbi); 967 ret = f2fs_truncate_hole(inode, pg_start, pg_end); 968 f2fs_unlock_op(sbi); 969 970 up_write(&F2FS_I(inode)->i_mmap_sem); 971 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 972 } 973 } 974 975 return ret; 976 } 977 978 static int __read_out_blkaddrs(struct inode *inode, block_t *blkaddr, 979 int *do_replace, pgoff_t off, pgoff_t len) 980 { 981 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 982 struct dnode_of_data dn; 983 int ret, done, i; 984 985 next_dnode: 986 set_new_dnode(&dn, inode, NULL, NULL, 0); 987 ret = f2fs_get_dnode_of_data(&dn, off, LOOKUP_NODE_RA); 988 if (ret && ret != -ENOENT) { 989 return ret; 990 } else if (ret == -ENOENT) { 991 if (dn.max_level == 0) 992 return -ENOENT; 993 done = min((pgoff_t)ADDRS_PER_BLOCK - dn.ofs_in_node, len); 994 blkaddr += done; 995 do_replace += done; 996 goto next; 997 } 998 999 done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, inode) - 1000 dn.ofs_in_node, len); 1001 for (i = 0; i < done; i++, blkaddr++, do_replace++, dn.ofs_in_node++) { 1002 *blkaddr = datablock_addr(dn.inode, 1003 dn.node_page, dn.ofs_in_node); 1004 if (!f2fs_is_checkpointed_data(sbi, *blkaddr)) { 1005 1006 if (test_opt(sbi, LFS)) { 1007 f2fs_put_dnode(&dn); 1008 return -ENOTSUPP; 1009 } 1010 1011 /* do not invalidate this block address */ 1012 f2fs_update_data_blkaddr(&dn, NULL_ADDR); 1013 *do_replace = 1; 1014 } 1015 } 1016 f2fs_put_dnode(&dn); 1017 next: 1018 len -= done; 1019 off += done; 1020 if (len) 1021 goto next_dnode; 1022 return 0; 1023 } 1024 1025 static int __roll_back_blkaddrs(struct inode *inode, block_t *blkaddr, 1026 int *do_replace, pgoff_t off, int len) 1027 { 1028 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1029 struct dnode_of_data dn; 1030 int ret, i; 1031 1032 for (i = 0; i < len; i++, do_replace++, blkaddr++) { 1033 if (*do_replace == 0) 1034 continue; 1035 1036 set_new_dnode(&dn, inode, NULL, NULL, 0); 1037 ret = f2fs_get_dnode_of_data(&dn, off + i, LOOKUP_NODE_RA); 1038 if (ret) { 1039 dec_valid_block_count(sbi, inode, 1); 1040 f2fs_invalidate_blocks(sbi, *blkaddr); 1041 } else { 1042 f2fs_update_data_blkaddr(&dn, *blkaddr); 1043 } 1044 f2fs_put_dnode(&dn); 1045 } 1046 return 0; 1047 } 1048 1049 static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode, 1050 block_t *blkaddr, int *do_replace, 1051 pgoff_t src, pgoff_t dst, pgoff_t len, bool full) 1052 { 1053 struct f2fs_sb_info *sbi = F2FS_I_SB(src_inode); 1054 pgoff_t i = 0; 1055 int ret; 1056 1057 while (i < len) { 1058 if (blkaddr[i] == NULL_ADDR && !full) { 1059 i++; 1060 continue; 1061 } 1062 1063 if (do_replace[i] || blkaddr[i] == NULL_ADDR) { 1064 struct dnode_of_data dn; 1065 struct node_info ni; 1066 size_t new_size; 1067 pgoff_t ilen; 1068 1069 set_new_dnode(&dn, dst_inode, NULL, NULL, 0); 1070 ret = f2fs_get_dnode_of_data(&dn, dst + i, ALLOC_NODE); 1071 if (ret) 1072 return ret; 1073 1074 ret = f2fs_get_node_info(sbi, dn.nid, &ni); 1075 if (ret) { 1076 f2fs_put_dnode(&dn); 1077 return ret; 1078 } 1079 1080 ilen = min((pgoff_t) 1081 ADDRS_PER_PAGE(dn.node_page, dst_inode) - 1082 dn.ofs_in_node, len - i); 1083 do { 1084 dn.data_blkaddr = datablock_addr(dn.inode, 1085 dn.node_page, dn.ofs_in_node); 1086 f2fs_truncate_data_blocks_range(&dn, 1); 1087 1088 if (do_replace[i]) { 1089 f2fs_i_blocks_write(src_inode, 1090 1, false, false); 1091 f2fs_i_blocks_write(dst_inode, 1092 1, true, false); 1093 f2fs_replace_block(sbi, &dn, dn.data_blkaddr, 1094 blkaddr[i], ni.version, true, false); 1095 1096 do_replace[i] = 0; 1097 } 1098 dn.ofs_in_node++; 1099 i++; 1100 new_size = (dst + i) << PAGE_SHIFT; 1101 if (dst_inode->i_size < new_size) 1102 f2fs_i_size_write(dst_inode, new_size); 1103 } while (--ilen && (do_replace[i] || blkaddr[i] == NULL_ADDR)); 1104 1105 f2fs_put_dnode(&dn); 1106 } else { 1107 struct page *psrc, *pdst; 1108 1109 psrc = f2fs_get_lock_data_page(src_inode, 1110 src + i, true); 1111 if (IS_ERR(psrc)) 1112 return PTR_ERR(psrc); 1113 pdst = f2fs_get_new_data_page(dst_inode, NULL, dst + i, 1114 true); 1115 if (IS_ERR(pdst)) { 1116 f2fs_put_page(psrc, 1); 1117 return PTR_ERR(pdst); 1118 } 1119 f2fs_copy_page(psrc, pdst); 1120 set_page_dirty(pdst); 1121 f2fs_put_page(pdst, 1); 1122 f2fs_put_page(psrc, 1); 1123 1124 ret = f2fs_truncate_hole(src_inode, 1125 src + i, src + i + 1); 1126 if (ret) 1127 return ret; 1128 i++; 1129 } 1130 } 1131 return 0; 1132 } 1133 1134 static int __exchange_data_block(struct inode *src_inode, 1135 struct inode *dst_inode, pgoff_t src, pgoff_t dst, 1136 pgoff_t len, bool full) 1137 { 1138 block_t *src_blkaddr; 1139 int *do_replace; 1140 pgoff_t olen; 1141 int ret; 1142 1143 while (len) { 1144 olen = min((pgoff_t)4 * ADDRS_PER_BLOCK, len); 1145 1146 src_blkaddr = f2fs_kvzalloc(F2FS_I_SB(src_inode), 1147 array_size(olen, sizeof(block_t)), 1148 GFP_KERNEL); 1149 if (!src_blkaddr) 1150 return -ENOMEM; 1151 1152 do_replace = f2fs_kvzalloc(F2FS_I_SB(src_inode), 1153 array_size(olen, sizeof(int)), 1154 GFP_KERNEL); 1155 if (!do_replace) { 1156 kvfree(src_blkaddr); 1157 return -ENOMEM; 1158 } 1159 1160 ret = __read_out_blkaddrs(src_inode, src_blkaddr, 1161 do_replace, src, olen); 1162 if (ret) 1163 goto roll_back; 1164 1165 ret = __clone_blkaddrs(src_inode, dst_inode, src_blkaddr, 1166 do_replace, src, dst, olen, full); 1167 if (ret) 1168 goto roll_back; 1169 1170 src += olen; 1171 dst += olen; 1172 len -= olen; 1173 1174 kvfree(src_blkaddr); 1175 kvfree(do_replace); 1176 } 1177 return 0; 1178 1179 roll_back: 1180 __roll_back_blkaddrs(src_inode, src_blkaddr, do_replace, src, olen); 1181 kvfree(src_blkaddr); 1182 kvfree(do_replace); 1183 return ret; 1184 } 1185 1186 static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len) 1187 { 1188 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1189 pgoff_t nrpages = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE; 1190 pgoff_t start = offset >> PAGE_SHIFT; 1191 pgoff_t end = (offset + len) >> PAGE_SHIFT; 1192 int ret; 1193 1194 f2fs_balance_fs(sbi, true); 1195 1196 /* avoid gc operation during block exchange */ 1197 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1198 down_write(&F2FS_I(inode)->i_mmap_sem); 1199 1200 f2fs_lock_op(sbi); 1201 f2fs_drop_extent_tree(inode); 1202 truncate_pagecache(inode, offset); 1203 ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true); 1204 f2fs_unlock_op(sbi); 1205 1206 up_write(&F2FS_I(inode)->i_mmap_sem); 1207 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1208 return ret; 1209 } 1210 1211 static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len) 1212 { 1213 loff_t new_size; 1214 int ret; 1215 1216 if (offset + len >= i_size_read(inode)) 1217 return -EINVAL; 1218 1219 /* collapse range should be aligned to block size of f2fs. */ 1220 if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1)) 1221 return -EINVAL; 1222 1223 ret = f2fs_convert_inline_inode(inode); 1224 if (ret) 1225 return ret; 1226 1227 /* write out all dirty pages from offset */ 1228 ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX); 1229 if (ret) 1230 return ret; 1231 1232 ret = f2fs_do_collapse(inode, offset, len); 1233 if (ret) 1234 return ret; 1235 1236 /* write out all moved pages, if possible */ 1237 down_write(&F2FS_I(inode)->i_mmap_sem); 1238 filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX); 1239 truncate_pagecache(inode, offset); 1240 1241 new_size = i_size_read(inode) - len; 1242 truncate_pagecache(inode, new_size); 1243 1244 ret = f2fs_truncate_blocks(inode, new_size, true); 1245 up_write(&F2FS_I(inode)->i_mmap_sem); 1246 if (!ret) 1247 f2fs_i_size_write(inode, new_size); 1248 return ret; 1249 } 1250 1251 static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start, 1252 pgoff_t end) 1253 { 1254 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 1255 pgoff_t index = start; 1256 unsigned int ofs_in_node = dn->ofs_in_node; 1257 blkcnt_t count = 0; 1258 int ret; 1259 1260 for (; index < end; index++, dn->ofs_in_node++) { 1261 if (datablock_addr(dn->inode, dn->node_page, 1262 dn->ofs_in_node) == NULL_ADDR) 1263 count++; 1264 } 1265 1266 dn->ofs_in_node = ofs_in_node; 1267 ret = f2fs_reserve_new_blocks(dn, count); 1268 if (ret) 1269 return ret; 1270 1271 dn->ofs_in_node = ofs_in_node; 1272 for (index = start; index < end; index++, dn->ofs_in_node++) { 1273 dn->data_blkaddr = datablock_addr(dn->inode, 1274 dn->node_page, dn->ofs_in_node); 1275 /* 1276 * f2fs_reserve_new_blocks will not guarantee entire block 1277 * allocation. 1278 */ 1279 if (dn->data_blkaddr == NULL_ADDR) { 1280 ret = -ENOSPC; 1281 break; 1282 } 1283 if (dn->data_blkaddr != NEW_ADDR) { 1284 f2fs_invalidate_blocks(sbi, dn->data_blkaddr); 1285 dn->data_blkaddr = NEW_ADDR; 1286 f2fs_set_data_blkaddr(dn); 1287 } 1288 } 1289 1290 f2fs_update_extent_cache_range(dn, start, 0, index - start); 1291 1292 return ret; 1293 } 1294 1295 static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len, 1296 int mode) 1297 { 1298 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1299 struct address_space *mapping = inode->i_mapping; 1300 pgoff_t index, pg_start, pg_end; 1301 loff_t new_size = i_size_read(inode); 1302 loff_t off_start, off_end; 1303 int ret = 0; 1304 1305 ret = inode_newsize_ok(inode, (len + offset)); 1306 if (ret) 1307 return ret; 1308 1309 ret = f2fs_convert_inline_inode(inode); 1310 if (ret) 1311 return ret; 1312 1313 ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1); 1314 if (ret) 1315 return ret; 1316 1317 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT; 1318 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT; 1319 1320 off_start = offset & (PAGE_SIZE - 1); 1321 off_end = (offset + len) & (PAGE_SIZE - 1); 1322 1323 if (pg_start == pg_end) { 1324 ret = fill_zero(inode, pg_start, off_start, 1325 off_end - off_start); 1326 if (ret) 1327 return ret; 1328 1329 new_size = max_t(loff_t, new_size, offset + len); 1330 } else { 1331 if (off_start) { 1332 ret = fill_zero(inode, pg_start++, off_start, 1333 PAGE_SIZE - off_start); 1334 if (ret) 1335 return ret; 1336 1337 new_size = max_t(loff_t, new_size, 1338 (loff_t)pg_start << PAGE_SHIFT); 1339 } 1340 1341 for (index = pg_start; index < pg_end;) { 1342 struct dnode_of_data dn; 1343 unsigned int end_offset; 1344 pgoff_t end; 1345 1346 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1347 down_write(&F2FS_I(inode)->i_mmap_sem); 1348 1349 truncate_pagecache_range(inode, 1350 (loff_t)index << PAGE_SHIFT, 1351 ((loff_t)pg_end << PAGE_SHIFT) - 1); 1352 1353 f2fs_lock_op(sbi); 1354 1355 set_new_dnode(&dn, inode, NULL, NULL, 0); 1356 ret = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE); 1357 if (ret) { 1358 f2fs_unlock_op(sbi); 1359 up_write(&F2FS_I(inode)->i_mmap_sem); 1360 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1361 goto out; 1362 } 1363 1364 end_offset = ADDRS_PER_PAGE(dn.node_page, inode); 1365 end = min(pg_end, end_offset - dn.ofs_in_node + index); 1366 1367 ret = f2fs_do_zero_range(&dn, index, end); 1368 f2fs_put_dnode(&dn); 1369 1370 f2fs_unlock_op(sbi); 1371 up_write(&F2FS_I(inode)->i_mmap_sem); 1372 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1373 1374 f2fs_balance_fs(sbi, dn.node_changed); 1375 1376 if (ret) 1377 goto out; 1378 1379 index = end; 1380 new_size = max_t(loff_t, new_size, 1381 (loff_t)index << PAGE_SHIFT); 1382 } 1383 1384 if (off_end) { 1385 ret = fill_zero(inode, pg_end, 0, off_end); 1386 if (ret) 1387 goto out; 1388 1389 new_size = max_t(loff_t, new_size, offset + len); 1390 } 1391 } 1392 1393 out: 1394 if (new_size > i_size_read(inode)) { 1395 if (mode & FALLOC_FL_KEEP_SIZE) 1396 file_set_keep_isize(inode); 1397 else 1398 f2fs_i_size_write(inode, new_size); 1399 } 1400 return ret; 1401 } 1402 1403 static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len) 1404 { 1405 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1406 pgoff_t nr, pg_start, pg_end, delta, idx; 1407 loff_t new_size; 1408 int ret = 0; 1409 1410 new_size = i_size_read(inode) + len; 1411 ret = inode_newsize_ok(inode, new_size); 1412 if (ret) 1413 return ret; 1414 1415 if (offset >= i_size_read(inode)) 1416 return -EINVAL; 1417 1418 /* insert range should be aligned to block size of f2fs. */ 1419 if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1)) 1420 return -EINVAL; 1421 1422 ret = f2fs_convert_inline_inode(inode); 1423 if (ret) 1424 return ret; 1425 1426 f2fs_balance_fs(sbi, true); 1427 1428 down_write(&F2FS_I(inode)->i_mmap_sem); 1429 ret = f2fs_truncate_blocks(inode, i_size_read(inode), true); 1430 up_write(&F2FS_I(inode)->i_mmap_sem); 1431 if (ret) 1432 return ret; 1433 1434 /* write out all dirty pages from offset */ 1435 ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX); 1436 if (ret) 1437 return ret; 1438 1439 pg_start = offset >> PAGE_SHIFT; 1440 pg_end = (offset + len) >> PAGE_SHIFT; 1441 delta = pg_end - pg_start; 1442 idx = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE; 1443 1444 /* avoid gc operation during block exchange */ 1445 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1446 down_write(&F2FS_I(inode)->i_mmap_sem); 1447 truncate_pagecache(inode, offset); 1448 1449 while (!ret && idx > pg_start) { 1450 nr = idx - pg_start; 1451 if (nr > delta) 1452 nr = delta; 1453 idx -= nr; 1454 1455 f2fs_lock_op(sbi); 1456 f2fs_drop_extent_tree(inode); 1457 1458 ret = __exchange_data_block(inode, inode, idx, 1459 idx + delta, nr, false); 1460 f2fs_unlock_op(sbi); 1461 } 1462 up_write(&F2FS_I(inode)->i_mmap_sem); 1463 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1464 1465 /* write out all moved pages, if possible */ 1466 down_write(&F2FS_I(inode)->i_mmap_sem); 1467 filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX); 1468 truncate_pagecache(inode, offset); 1469 up_write(&F2FS_I(inode)->i_mmap_sem); 1470 1471 if (!ret) 1472 f2fs_i_size_write(inode, new_size); 1473 return ret; 1474 } 1475 1476 static int expand_inode_data(struct inode *inode, loff_t offset, 1477 loff_t len, int mode) 1478 { 1479 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1480 struct f2fs_map_blocks map = { .m_next_pgofs = NULL, 1481 .m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE }; 1482 pgoff_t pg_end; 1483 loff_t new_size = i_size_read(inode); 1484 loff_t off_end; 1485 int err; 1486 1487 err = inode_newsize_ok(inode, (len + offset)); 1488 if (err) 1489 return err; 1490 1491 err = f2fs_convert_inline_inode(inode); 1492 if (err) 1493 return err; 1494 1495 f2fs_balance_fs(sbi, true); 1496 1497 pg_end = ((unsigned long long)offset + len) >> PAGE_SHIFT; 1498 off_end = (offset + len) & (PAGE_SIZE - 1); 1499 1500 map.m_lblk = ((unsigned long long)offset) >> PAGE_SHIFT; 1501 map.m_len = pg_end - map.m_lblk; 1502 if (off_end) 1503 map.m_len++; 1504 1505 err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO); 1506 if (err) { 1507 pgoff_t last_off; 1508 1509 if (!map.m_len) 1510 return err; 1511 1512 last_off = map.m_lblk + map.m_len - 1; 1513 1514 /* update new size to the failed position */ 1515 new_size = (last_off == pg_end) ? offset + len : 1516 (loff_t)(last_off + 1) << PAGE_SHIFT; 1517 } else { 1518 new_size = ((loff_t)pg_end << PAGE_SHIFT) + off_end; 1519 } 1520 1521 if (new_size > i_size_read(inode)) { 1522 if (mode & FALLOC_FL_KEEP_SIZE) 1523 file_set_keep_isize(inode); 1524 else 1525 f2fs_i_size_write(inode, new_size); 1526 } 1527 1528 return err; 1529 } 1530 1531 static long f2fs_fallocate(struct file *file, int mode, 1532 loff_t offset, loff_t len) 1533 { 1534 struct inode *inode = file_inode(file); 1535 long ret = 0; 1536 1537 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) 1538 return -EIO; 1539 1540 /* f2fs only support ->fallocate for regular file */ 1541 if (!S_ISREG(inode->i_mode)) 1542 return -EINVAL; 1543 1544 if (f2fs_encrypted_inode(inode) && 1545 (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE))) 1546 return -EOPNOTSUPP; 1547 1548 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | 1549 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE | 1550 FALLOC_FL_INSERT_RANGE)) 1551 return -EOPNOTSUPP; 1552 1553 inode_lock(inode); 1554 1555 if (mode & FALLOC_FL_PUNCH_HOLE) { 1556 if (offset >= inode->i_size) 1557 goto out; 1558 1559 ret = punch_hole(inode, offset, len); 1560 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) { 1561 ret = f2fs_collapse_range(inode, offset, len); 1562 } else if (mode & FALLOC_FL_ZERO_RANGE) { 1563 ret = f2fs_zero_range(inode, offset, len, mode); 1564 } else if (mode & FALLOC_FL_INSERT_RANGE) { 1565 ret = f2fs_insert_range(inode, offset, len); 1566 } else { 1567 ret = expand_inode_data(inode, offset, len, mode); 1568 } 1569 1570 if (!ret) { 1571 inode->i_mtime = inode->i_ctime = current_time(inode); 1572 f2fs_mark_inode_dirty_sync(inode, false); 1573 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); 1574 } 1575 1576 out: 1577 inode_unlock(inode); 1578 1579 trace_f2fs_fallocate(inode, mode, offset, len, ret); 1580 return ret; 1581 } 1582 1583 static int f2fs_release_file(struct inode *inode, struct file *filp) 1584 { 1585 /* 1586 * f2fs_relase_file is called at every close calls. So we should 1587 * not drop any inmemory pages by close called by other process. 1588 */ 1589 if (!(filp->f_mode & FMODE_WRITE) || 1590 atomic_read(&inode->i_writecount) != 1) 1591 return 0; 1592 1593 /* some remained atomic pages should discarded */ 1594 if (f2fs_is_atomic_file(inode)) 1595 f2fs_drop_inmem_pages(inode); 1596 if (f2fs_is_volatile_file(inode)) { 1597 set_inode_flag(inode, FI_DROP_CACHE); 1598 filemap_fdatawrite(inode->i_mapping); 1599 clear_inode_flag(inode, FI_DROP_CACHE); 1600 clear_inode_flag(inode, FI_VOLATILE_FILE); 1601 stat_dec_volatile_write(inode); 1602 } 1603 return 0; 1604 } 1605 1606 static int f2fs_file_flush(struct file *file, fl_owner_t id) 1607 { 1608 struct inode *inode = file_inode(file); 1609 1610 /* 1611 * If the process doing a transaction is crashed, we should do 1612 * roll-back. Otherwise, other reader/write can see corrupted database 1613 * until all the writers close its file. Since this should be done 1614 * before dropping file lock, it needs to do in ->flush. 1615 */ 1616 if (f2fs_is_atomic_file(inode) && 1617 F2FS_I(inode)->inmem_task == current) 1618 f2fs_drop_inmem_pages(inode); 1619 return 0; 1620 } 1621 1622 static int f2fs_ioc_getflags(struct file *filp, unsigned long arg) 1623 { 1624 struct inode *inode = file_inode(filp); 1625 struct f2fs_inode_info *fi = F2FS_I(inode); 1626 unsigned int flags = fi->i_flags; 1627 1628 if (f2fs_encrypted_inode(inode)) 1629 flags |= F2FS_ENCRYPT_FL; 1630 if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) 1631 flags |= F2FS_INLINE_DATA_FL; 1632 1633 flags &= F2FS_FL_USER_VISIBLE; 1634 1635 return put_user(flags, (int __user *)arg); 1636 } 1637 1638 static int __f2fs_ioc_setflags(struct inode *inode, unsigned int flags) 1639 { 1640 struct f2fs_inode_info *fi = F2FS_I(inode); 1641 unsigned int oldflags; 1642 1643 /* Is it quota file? Do not allow user to mess with it */ 1644 if (IS_NOQUOTA(inode)) 1645 return -EPERM; 1646 1647 flags = f2fs_mask_flags(inode->i_mode, flags); 1648 1649 oldflags = fi->i_flags; 1650 1651 if ((flags ^ oldflags) & (F2FS_APPEND_FL | F2FS_IMMUTABLE_FL)) 1652 if (!capable(CAP_LINUX_IMMUTABLE)) 1653 return -EPERM; 1654 1655 flags = flags & F2FS_FL_USER_MODIFIABLE; 1656 flags |= oldflags & ~F2FS_FL_USER_MODIFIABLE; 1657 fi->i_flags = flags; 1658 1659 if (fi->i_flags & F2FS_PROJINHERIT_FL) 1660 set_inode_flag(inode, FI_PROJ_INHERIT); 1661 else 1662 clear_inode_flag(inode, FI_PROJ_INHERIT); 1663 1664 inode->i_ctime = current_time(inode); 1665 f2fs_set_inode_flags(inode); 1666 f2fs_mark_inode_dirty_sync(inode, false); 1667 return 0; 1668 } 1669 1670 static int f2fs_ioc_setflags(struct file *filp, unsigned long arg) 1671 { 1672 struct inode *inode = file_inode(filp); 1673 unsigned int flags; 1674 int ret; 1675 1676 if (!inode_owner_or_capable(inode)) 1677 return -EACCES; 1678 1679 if (get_user(flags, (int __user *)arg)) 1680 return -EFAULT; 1681 1682 ret = mnt_want_write_file(filp); 1683 if (ret) 1684 return ret; 1685 1686 inode_lock(inode); 1687 1688 ret = __f2fs_ioc_setflags(inode, flags); 1689 1690 inode_unlock(inode); 1691 mnt_drop_write_file(filp); 1692 return ret; 1693 } 1694 1695 static int f2fs_ioc_getversion(struct file *filp, unsigned long arg) 1696 { 1697 struct inode *inode = file_inode(filp); 1698 1699 return put_user(inode->i_generation, (int __user *)arg); 1700 } 1701 1702 static int f2fs_ioc_start_atomic_write(struct file *filp) 1703 { 1704 struct inode *inode = file_inode(filp); 1705 int ret; 1706 1707 if (!inode_owner_or_capable(inode)) 1708 return -EACCES; 1709 1710 if (!S_ISREG(inode->i_mode)) 1711 return -EINVAL; 1712 1713 ret = mnt_want_write_file(filp); 1714 if (ret) 1715 return ret; 1716 1717 inode_lock(inode); 1718 1719 if (f2fs_is_atomic_file(inode)) { 1720 if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) 1721 ret = -EINVAL; 1722 goto out; 1723 } 1724 1725 ret = f2fs_convert_inline_inode(inode); 1726 if (ret) 1727 goto out; 1728 1729 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1730 1731 if (!get_dirty_pages(inode)) 1732 goto skip_flush; 1733 1734 f2fs_msg(F2FS_I_SB(inode)->sb, KERN_WARNING, 1735 "Unexpected flush for atomic writes: ino=%lu, npages=%u", 1736 inode->i_ino, get_dirty_pages(inode)); 1737 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX); 1738 if (ret) { 1739 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1740 goto out; 1741 } 1742 skip_flush: 1743 set_inode_flag(inode, FI_ATOMIC_FILE); 1744 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST); 1745 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1746 1747 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); 1748 F2FS_I(inode)->inmem_task = current; 1749 stat_inc_atomic_write(inode); 1750 stat_update_max_atomic_write(inode); 1751 out: 1752 inode_unlock(inode); 1753 mnt_drop_write_file(filp); 1754 return ret; 1755 } 1756 1757 static int f2fs_ioc_commit_atomic_write(struct file *filp) 1758 { 1759 struct inode *inode = file_inode(filp); 1760 int ret; 1761 1762 if (!inode_owner_or_capable(inode)) 1763 return -EACCES; 1764 1765 ret = mnt_want_write_file(filp); 1766 if (ret) 1767 return ret; 1768 1769 f2fs_balance_fs(F2FS_I_SB(inode), true); 1770 1771 inode_lock(inode); 1772 1773 if (f2fs_is_volatile_file(inode)) { 1774 ret = -EINVAL; 1775 goto err_out; 1776 } 1777 1778 if (f2fs_is_atomic_file(inode)) { 1779 ret = f2fs_commit_inmem_pages(inode); 1780 if (ret) 1781 goto err_out; 1782 1783 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true); 1784 if (!ret) { 1785 clear_inode_flag(inode, FI_ATOMIC_FILE); 1786 F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC] = 0; 1787 stat_dec_atomic_write(inode); 1788 } 1789 } else { 1790 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 1, false); 1791 } 1792 err_out: 1793 if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) { 1794 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST); 1795 ret = -EINVAL; 1796 } 1797 inode_unlock(inode); 1798 mnt_drop_write_file(filp); 1799 return ret; 1800 } 1801 1802 static int f2fs_ioc_start_volatile_write(struct file *filp) 1803 { 1804 struct inode *inode = file_inode(filp); 1805 int ret; 1806 1807 if (!inode_owner_or_capable(inode)) 1808 return -EACCES; 1809 1810 if (!S_ISREG(inode->i_mode)) 1811 return -EINVAL; 1812 1813 ret = mnt_want_write_file(filp); 1814 if (ret) 1815 return ret; 1816 1817 inode_lock(inode); 1818 1819 if (f2fs_is_volatile_file(inode)) 1820 goto out; 1821 1822 ret = f2fs_convert_inline_inode(inode); 1823 if (ret) 1824 goto out; 1825 1826 stat_inc_volatile_write(inode); 1827 stat_update_max_volatile_write(inode); 1828 1829 set_inode_flag(inode, FI_VOLATILE_FILE); 1830 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); 1831 out: 1832 inode_unlock(inode); 1833 mnt_drop_write_file(filp); 1834 return ret; 1835 } 1836 1837 static int f2fs_ioc_release_volatile_write(struct file *filp) 1838 { 1839 struct inode *inode = file_inode(filp); 1840 int ret; 1841 1842 if (!inode_owner_or_capable(inode)) 1843 return -EACCES; 1844 1845 ret = mnt_want_write_file(filp); 1846 if (ret) 1847 return ret; 1848 1849 inode_lock(inode); 1850 1851 if (!f2fs_is_volatile_file(inode)) 1852 goto out; 1853 1854 if (!f2fs_is_first_block_written(inode)) { 1855 ret = truncate_partial_data_page(inode, 0, true); 1856 goto out; 1857 } 1858 1859 ret = punch_hole(inode, 0, F2FS_BLKSIZE); 1860 out: 1861 inode_unlock(inode); 1862 mnt_drop_write_file(filp); 1863 return ret; 1864 } 1865 1866 static int f2fs_ioc_abort_volatile_write(struct file *filp) 1867 { 1868 struct inode *inode = file_inode(filp); 1869 int ret; 1870 1871 if (!inode_owner_or_capable(inode)) 1872 return -EACCES; 1873 1874 ret = mnt_want_write_file(filp); 1875 if (ret) 1876 return ret; 1877 1878 inode_lock(inode); 1879 1880 if (f2fs_is_atomic_file(inode)) 1881 f2fs_drop_inmem_pages(inode); 1882 if (f2fs_is_volatile_file(inode)) { 1883 clear_inode_flag(inode, FI_VOLATILE_FILE); 1884 stat_dec_volatile_write(inode); 1885 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true); 1886 } 1887 1888 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST); 1889 1890 inode_unlock(inode); 1891 1892 mnt_drop_write_file(filp); 1893 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); 1894 return ret; 1895 } 1896 1897 static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg) 1898 { 1899 struct inode *inode = file_inode(filp); 1900 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1901 struct super_block *sb = sbi->sb; 1902 __u32 in; 1903 int ret = 0; 1904 1905 if (!capable(CAP_SYS_ADMIN)) 1906 return -EPERM; 1907 1908 if (get_user(in, (__u32 __user *)arg)) 1909 return -EFAULT; 1910 1911 if (in != F2FS_GOING_DOWN_FULLSYNC) { 1912 ret = mnt_want_write_file(filp); 1913 if (ret) 1914 return ret; 1915 } 1916 1917 switch (in) { 1918 case F2FS_GOING_DOWN_FULLSYNC: 1919 sb = freeze_bdev(sb->s_bdev); 1920 if (IS_ERR(sb)) { 1921 ret = PTR_ERR(sb); 1922 goto out; 1923 } 1924 if (sb) { 1925 f2fs_stop_checkpoint(sbi, false); 1926 set_sbi_flag(sbi, SBI_IS_SHUTDOWN); 1927 thaw_bdev(sb->s_bdev, sb); 1928 } 1929 break; 1930 case F2FS_GOING_DOWN_METASYNC: 1931 /* do checkpoint only */ 1932 ret = f2fs_sync_fs(sb, 1); 1933 if (ret) 1934 goto out; 1935 f2fs_stop_checkpoint(sbi, false); 1936 set_sbi_flag(sbi, SBI_IS_SHUTDOWN); 1937 break; 1938 case F2FS_GOING_DOWN_NOSYNC: 1939 f2fs_stop_checkpoint(sbi, false); 1940 set_sbi_flag(sbi, SBI_IS_SHUTDOWN); 1941 break; 1942 case F2FS_GOING_DOWN_METAFLUSH: 1943 f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO); 1944 f2fs_stop_checkpoint(sbi, false); 1945 set_sbi_flag(sbi, SBI_IS_SHUTDOWN); 1946 break; 1947 default: 1948 ret = -EINVAL; 1949 goto out; 1950 } 1951 1952 f2fs_stop_gc_thread(sbi); 1953 f2fs_stop_discard_thread(sbi); 1954 1955 f2fs_drop_discard_cmd(sbi); 1956 clear_opt(sbi, DISCARD); 1957 1958 f2fs_update_time(sbi, REQ_TIME); 1959 out: 1960 if (in != F2FS_GOING_DOWN_FULLSYNC) 1961 mnt_drop_write_file(filp); 1962 return ret; 1963 } 1964 1965 static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg) 1966 { 1967 struct inode *inode = file_inode(filp); 1968 struct super_block *sb = inode->i_sb; 1969 struct request_queue *q = bdev_get_queue(sb->s_bdev); 1970 struct fstrim_range range; 1971 int ret; 1972 1973 if (!capable(CAP_SYS_ADMIN)) 1974 return -EPERM; 1975 1976 if (!f2fs_hw_support_discard(F2FS_SB(sb))) 1977 return -EOPNOTSUPP; 1978 1979 if (copy_from_user(&range, (struct fstrim_range __user *)arg, 1980 sizeof(range))) 1981 return -EFAULT; 1982 1983 ret = mnt_want_write_file(filp); 1984 if (ret) 1985 return ret; 1986 1987 range.minlen = max((unsigned int)range.minlen, 1988 q->limits.discard_granularity); 1989 ret = f2fs_trim_fs(F2FS_SB(sb), &range); 1990 mnt_drop_write_file(filp); 1991 if (ret < 0) 1992 return ret; 1993 1994 if (copy_to_user((struct fstrim_range __user *)arg, &range, 1995 sizeof(range))) 1996 return -EFAULT; 1997 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); 1998 return 0; 1999 } 2000 2001 static bool uuid_is_nonzero(__u8 u[16]) 2002 { 2003 int i; 2004 2005 for (i = 0; i < 16; i++) 2006 if (u[i]) 2007 return true; 2008 return false; 2009 } 2010 2011 static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg) 2012 { 2013 struct inode *inode = file_inode(filp); 2014 2015 if (!f2fs_sb_has_encrypt(inode->i_sb)) 2016 return -EOPNOTSUPP; 2017 2018 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); 2019 2020 return fscrypt_ioctl_set_policy(filp, (const void __user *)arg); 2021 } 2022 2023 static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg) 2024 { 2025 if (!f2fs_sb_has_encrypt(file_inode(filp)->i_sb)) 2026 return -EOPNOTSUPP; 2027 return fscrypt_ioctl_get_policy(filp, (void __user *)arg); 2028 } 2029 2030 static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg) 2031 { 2032 struct inode *inode = file_inode(filp); 2033 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2034 int err; 2035 2036 if (!f2fs_sb_has_encrypt(inode->i_sb)) 2037 return -EOPNOTSUPP; 2038 2039 err = mnt_want_write_file(filp); 2040 if (err) 2041 return err; 2042 2043 down_write(&sbi->sb_lock); 2044 2045 if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt)) 2046 goto got_it; 2047 2048 /* update superblock with uuid */ 2049 generate_random_uuid(sbi->raw_super->encrypt_pw_salt); 2050 2051 err = f2fs_commit_super(sbi, false); 2052 if (err) { 2053 /* undo new data */ 2054 memset(sbi->raw_super->encrypt_pw_salt, 0, 16); 2055 goto out_err; 2056 } 2057 got_it: 2058 if (copy_to_user((__u8 __user *)arg, sbi->raw_super->encrypt_pw_salt, 2059 16)) 2060 err = -EFAULT; 2061 out_err: 2062 up_write(&sbi->sb_lock); 2063 mnt_drop_write_file(filp); 2064 return err; 2065 } 2066 2067 static int f2fs_ioc_gc(struct file *filp, unsigned long arg) 2068 { 2069 struct inode *inode = file_inode(filp); 2070 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2071 __u32 sync; 2072 int ret; 2073 2074 if (!capable(CAP_SYS_ADMIN)) 2075 return -EPERM; 2076 2077 if (get_user(sync, (__u32 __user *)arg)) 2078 return -EFAULT; 2079 2080 if (f2fs_readonly(sbi->sb)) 2081 return -EROFS; 2082 2083 ret = mnt_want_write_file(filp); 2084 if (ret) 2085 return ret; 2086 2087 if (!sync) { 2088 if (!mutex_trylock(&sbi->gc_mutex)) { 2089 ret = -EBUSY; 2090 goto out; 2091 } 2092 } else { 2093 mutex_lock(&sbi->gc_mutex); 2094 } 2095 2096 ret = f2fs_gc(sbi, sync, true, NULL_SEGNO); 2097 out: 2098 mnt_drop_write_file(filp); 2099 return ret; 2100 } 2101 2102 static int f2fs_ioc_gc_range(struct file *filp, unsigned long arg) 2103 { 2104 struct inode *inode = file_inode(filp); 2105 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2106 struct f2fs_gc_range range; 2107 u64 end; 2108 int ret; 2109 2110 if (!capable(CAP_SYS_ADMIN)) 2111 return -EPERM; 2112 2113 if (copy_from_user(&range, (struct f2fs_gc_range __user *)arg, 2114 sizeof(range))) 2115 return -EFAULT; 2116 2117 if (f2fs_readonly(sbi->sb)) 2118 return -EROFS; 2119 2120 end = range.start + range.len; 2121 if (range.start < MAIN_BLKADDR(sbi) || end >= MAX_BLKADDR(sbi)) { 2122 return -EINVAL; 2123 } 2124 2125 ret = mnt_want_write_file(filp); 2126 if (ret) 2127 return ret; 2128 2129 do_more: 2130 if (!range.sync) { 2131 if (!mutex_trylock(&sbi->gc_mutex)) { 2132 ret = -EBUSY; 2133 goto out; 2134 } 2135 } else { 2136 mutex_lock(&sbi->gc_mutex); 2137 } 2138 2139 ret = f2fs_gc(sbi, range.sync, true, GET_SEGNO(sbi, range.start)); 2140 range.start += sbi->blocks_per_seg; 2141 if (range.start <= end) 2142 goto do_more; 2143 out: 2144 mnt_drop_write_file(filp); 2145 return ret; 2146 } 2147 2148 static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg) 2149 { 2150 struct inode *inode = file_inode(filp); 2151 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2152 int ret; 2153 2154 if (!capable(CAP_SYS_ADMIN)) 2155 return -EPERM; 2156 2157 if (f2fs_readonly(sbi->sb)) 2158 return -EROFS; 2159 2160 ret = mnt_want_write_file(filp); 2161 if (ret) 2162 return ret; 2163 2164 ret = f2fs_sync_fs(sbi->sb, 1); 2165 2166 mnt_drop_write_file(filp); 2167 return ret; 2168 } 2169 2170 static int f2fs_defragment_range(struct f2fs_sb_info *sbi, 2171 struct file *filp, 2172 struct f2fs_defragment *range) 2173 { 2174 struct inode *inode = file_inode(filp); 2175 struct f2fs_map_blocks map = { .m_next_extent = NULL, 2176 .m_seg_type = NO_CHECK_TYPE }; 2177 struct extent_info ei = {0, 0, 0}; 2178 pgoff_t pg_start, pg_end, next_pgofs; 2179 unsigned int blk_per_seg = sbi->blocks_per_seg; 2180 unsigned int total = 0, sec_num; 2181 block_t blk_end = 0; 2182 bool fragmented = false; 2183 int err; 2184 2185 /* if in-place-update policy is enabled, don't waste time here */ 2186 if (f2fs_should_update_inplace(inode, NULL)) 2187 return -EINVAL; 2188 2189 pg_start = range->start >> PAGE_SHIFT; 2190 pg_end = (range->start + range->len) >> PAGE_SHIFT; 2191 2192 f2fs_balance_fs(sbi, true); 2193 2194 inode_lock(inode); 2195 2196 /* writeback all dirty pages in the range */ 2197 err = filemap_write_and_wait_range(inode->i_mapping, range->start, 2198 range->start + range->len - 1); 2199 if (err) 2200 goto out; 2201 2202 /* 2203 * lookup mapping info in extent cache, skip defragmenting if physical 2204 * block addresses are continuous. 2205 */ 2206 if (f2fs_lookup_extent_cache(inode, pg_start, &ei)) { 2207 if (ei.fofs + ei.len >= pg_end) 2208 goto out; 2209 } 2210 2211 map.m_lblk = pg_start; 2212 map.m_next_pgofs = &next_pgofs; 2213 2214 /* 2215 * lookup mapping info in dnode page cache, skip defragmenting if all 2216 * physical block addresses are continuous even if there are hole(s) 2217 * in logical blocks. 2218 */ 2219 while (map.m_lblk < pg_end) { 2220 map.m_len = pg_end - map.m_lblk; 2221 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT); 2222 if (err) 2223 goto out; 2224 2225 if (!(map.m_flags & F2FS_MAP_FLAGS)) { 2226 map.m_lblk = next_pgofs; 2227 continue; 2228 } 2229 2230 if (blk_end && blk_end != map.m_pblk) 2231 fragmented = true; 2232 2233 /* record total count of block that we're going to move */ 2234 total += map.m_len; 2235 2236 blk_end = map.m_pblk + map.m_len; 2237 2238 map.m_lblk += map.m_len; 2239 } 2240 2241 if (!fragmented) 2242 goto out; 2243 2244 sec_num = (total + BLKS_PER_SEC(sbi) - 1) / BLKS_PER_SEC(sbi); 2245 2246 /* 2247 * make sure there are enough free section for LFS allocation, this can 2248 * avoid defragment running in SSR mode when free section are allocated 2249 * intensively 2250 */ 2251 if (has_not_enough_free_secs(sbi, 0, sec_num)) { 2252 err = -EAGAIN; 2253 goto out; 2254 } 2255 2256 map.m_lblk = pg_start; 2257 map.m_len = pg_end - pg_start; 2258 total = 0; 2259 2260 while (map.m_lblk < pg_end) { 2261 pgoff_t idx; 2262 int cnt = 0; 2263 2264 do_map: 2265 map.m_len = pg_end - map.m_lblk; 2266 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT); 2267 if (err) 2268 goto clear_out; 2269 2270 if (!(map.m_flags & F2FS_MAP_FLAGS)) { 2271 map.m_lblk = next_pgofs; 2272 continue; 2273 } 2274 2275 set_inode_flag(inode, FI_DO_DEFRAG); 2276 2277 idx = map.m_lblk; 2278 while (idx < map.m_lblk + map.m_len && cnt < blk_per_seg) { 2279 struct page *page; 2280 2281 page = f2fs_get_lock_data_page(inode, idx, true); 2282 if (IS_ERR(page)) { 2283 err = PTR_ERR(page); 2284 goto clear_out; 2285 } 2286 2287 set_page_dirty(page); 2288 f2fs_put_page(page, 1); 2289 2290 idx++; 2291 cnt++; 2292 total++; 2293 } 2294 2295 map.m_lblk = idx; 2296 2297 if (idx < pg_end && cnt < blk_per_seg) 2298 goto do_map; 2299 2300 clear_inode_flag(inode, FI_DO_DEFRAG); 2301 2302 err = filemap_fdatawrite(inode->i_mapping); 2303 if (err) 2304 goto out; 2305 } 2306 clear_out: 2307 clear_inode_flag(inode, FI_DO_DEFRAG); 2308 out: 2309 inode_unlock(inode); 2310 if (!err) 2311 range->len = (u64)total << PAGE_SHIFT; 2312 return err; 2313 } 2314 2315 static int f2fs_ioc_defragment(struct file *filp, unsigned long arg) 2316 { 2317 struct inode *inode = file_inode(filp); 2318 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2319 struct f2fs_defragment range; 2320 int err; 2321 2322 if (!capable(CAP_SYS_ADMIN)) 2323 return -EPERM; 2324 2325 if (!S_ISREG(inode->i_mode) || f2fs_is_atomic_file(inode)) 2326 return -EINVAL; 2327 2328 if (f2fs_readonly(sbi->sb)) 2329 return -EROFS; 2330 2331 if (copy_from_user(&range, (struct f2fs_defragment __user *)arg, 2332 sizeof(range))) 2333 return -EFAULT; 2334 2335 /* verify alignment of offset & size */ 2336 if (range.start & (F2FS_BLKSIZE - 1) || range.len & (F2FS_BLKSIZE - 1)) 2337 return -EINVAL; 2338 2339 if (unlikely((range.start + range.len) >> PAGE_SHIFT > 2340 sbi->max_file_blocks)) 2341 return -EINVAL; 2342 2343 err = mnt_want_write_file(filp); 2344 if (err) 2345 return err; 2346 2347 err = f2fs_defragment_range(sbi, filp, &range); 2348 mnt_drop_write_file(filp); 2349 2350 f2fs_update_time(sbi, REQ_TIME); 2351 if (err < 0) 2352 return err; 2353 2354 if (copy_to_user((struct f2fs_defragment __user *)arg, &range, 2355 sizeof(range))) 2356 return -EFAULT; 2357 2358 return 0; 2359 } 2360 2361 static int f2fs_move_file_range(struct file *file_in, loff_t pos_in, 2362 struct file *file_out, loff_t pos_out, size_t len) 2363 { 2364 struct inode *src = file_inode(file_in); 2365 struct inode *dst = file_inode(file_out); 2366 struct f2fs_sb_info *sbi = F2FS_I_SB(src); 2367 size_t olen = len, dst_max_i_size = 0; 2368 size_t dst_osize; 2369 int ret; 2370 2371 if (file_in->f_path.mnt != file_out->f_path.mnt || 2372 src->i_sb != dst->i_sb) 2373 return -EXDEV; 2374 2375 if (unlikely(f2fs_readonly(src->i_sb))) 2376 return -EROFS; 2377 2378 if (!S_ISREG(src->i_mode) || !S_ISREG(dst->i_mode)) 2379 return -EINVAL; 2380 2381 if (f2fs_encrypted_inode(src) || f2fs_encrypted_inode(dst)) 2382 return -EOPNOTSUPP; 2383 2384 if (src == dst) { 2385 if (pos_in == pos_out) 2386 return 0; 2387 if (pos_out > pos_in && pos_out < pos_in + len) 2388 return -EINVAL; 2389 } 2390 2391 inode_lock(src); 2392 if (src != dst) { 2393 ret = -EBUSY; 2394 if (!inode_trylock(dst)) 2395 goto out; 2396 } 2397 2398 ret = -EINVAL; 2399 if (pos_in + len > src->i_size || pos_in + len < pos_in) 2400 goto out_unlock; 2401 if (len == 0) 2402 olen = len = src->i_size - pos_in; 2403 if (pos_in + len == src->i_size) 2404 len = ALIGN(src->i_size, F2FS_BLKSIZE) - pos_in; 2405 if (len == 0) { 2406 ret = 0; 2407 goto out_unlock; 2408 } 2409 2410 dst_osize = dst->i_size; 2411 if (pos_out + olen > dst->i_size) 2412 dst_max_i_size = pos_out + olen; 2413 2414 /* verify the end result is block aligned */ 2415 if (!IS_ALIGNED(pos_in, F2FS_BLKSIZE) || 2416 !IS_ALIGNED(pos_in + len, F2FS_BLKSIZE) || 2417 !IS_ALIGNED(pos_out, F2FS_BLKSIZE)) 2418 goto out_unlock; 2419 2420 ret = f2fs_convert_inline_inode(src); 2421 if (ret) 2422 goto out_unlock; 2423 2424 ret = f2fs_convert_inline_inode(dst); 2425 if (ret) 2426 goto out_unlock; 2427 2428 /* write out all dirty pages from offset */ 2429 ret = filemap_write_and_wait_range(src->i_mapping, 2430 pos_in, pos_in + len); 2431 if (ret) 2432 goto out_unlock; 2433 2434 ret = filemap_write_and_wait_range(dst->i_mapping, 2435 pos_out, pos_out + len); 2436 if (ret) 2437 goto out_unlock; 2438 2439 f2fs_balance_fs(sbi, true); 2440 2441 down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]); 2442 if (src != dst) { 2443 ret = -EBUSY; 2444 if (!down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE])) 2445 goto out_src; 2446 } 2447 2448 f2fs_lock_op(sbi); 2449 ret = __exchange_data_block(src, dst, pos_in >> F2FS_BLKSIZE_BITS, 2450 pos_out >> F2FS_BLKSIZE_BITS, 2451 len >> F2FS_BLKSIZE_BITS, false); 2452 2453 if (!ret) { 2454 if (dst_max_i_size) 2455 f2fs_i_size_write(dst, dst_max_i_size); 2456 else if (dst_osize != dst->i_size) 2457 f2fs_i_size_write(dst, dst_osize); 2458 } 2459 f2fs_unlock_op(sbi); 2460 2461 if (src != dst) 2462 up_write(&F2FS_I(dst)->i_gc_rwsem[WRITE]); 2463 out_src: 2464 up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]); 2465 out_unlock: 2466 if (src != dst) 2467 inode_unlock(dst); 2468 out: 2469 inode_unlock(src); 2470 return ret; 2471 } 2472 2473 static int f2fs_ioc_move_range(struct file *filp, unsigned long arg) 2474 { 2475 struct f2fs_move_range range; 2476 struct fd dst; 2477 int err; 2478 2479 if (!(filp->f_mode & FMODE_READ) || 2480 !(filp->f_mode & FMODE_WRITE)) 2481 return -EBADF; 2482 2483 if (copy_from_user(&range, (struct f2fs_move_range __user *)arg, 2484 sizeof(range))) 2485 return -EFAULT; 2486 2487 dst = fdget(range.dst_fd); 2488 if (!dst.file) 2489 return -EBADF; 2490 2491 if (!(dst.file->f_mode & FMODE_WRITE)) { 2492 err = -EBADF; 2493 goto err_out; 2494 } 2495 2496 err = mnt_want_write_file(filp); 2497 if (err) 2498 goto err_out; 2499 2500 err = f2fs_move_file_range(filp, range.pos_in, dst.file, 2501 range.pos_out, range.len); 2502 2503 mnt_drop_write_file(filp); 2504 if (err) 2505 goto err_out; 2506 2507 if (copy_to_user((struct f2fs_move_range __user *)arg, 2508 &range, sizeof(range))) 2509 err = -EFAULT; 2510 err_out: 2511 fdput(dst); 2512 return err; 2513 } 2514 2515 static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg) 2516 { 2517 struct inode *inode = file_inode(filp); 2518 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2519 struct sit_info *sm = SIT_I(sbi); 2520 unsigned int start_segno = 0, end_segno = 0; 2521 unsigned int dev_start_segno = 0, dev_end_segno = 0; 2522 struct f2fs_flush_device range; 2523 int ret; 2524 2525 if (!capable(CAP_SYS_ADMIN)) 2526 return -EPERM; 2527 2528 if (f2fs_readonly(sbi->sb)) 2529 return -EROFS; 2530 2531 if (copy_from_user(&range, (struct f2fs_flush_device __user *)arg, 2532 sizeof(range))) 2533 return -EFAULT; 2534 2535 if (sbi->s_ndevs <= 1 || sbi->s_ndevs - 1 <= range.dev_num || 2536 sbi->segs_per_sec != 1) { 2537 f2fs_msg(sbi->sb, KERN_WARNING, 2538 "Can't flush %u in %d for segs_per_sec %u != 1\n", 2539 range.dev_num, sbi->s_ndevs, 2540 sbi->segs_per_sec); 2541 return -EINVAL; 2542 } 2543 2544 ret = mnt_want_write_file(filp); 2545 if (ret) 2546 return ret; 2547 2548 if (range.dev_num != 0) 2549 dev_start_segno = GET_SEGNO(sbi, FDEV(range.dev_num).start_blk); 2550 dev_end_segno = GET_SEGNO(sbi, FDEV(range.dev_num).end_blk); 2551 2552 start_segno = sm->last_victim[FLUSH_DEVICE]; 2553 if (start_segno < dev_start_segno || start_segno >= dev_end_segno) 2554 start_segno = dev_start_segno; 2555 end_segno = min(start_segno + range.segments, dev_end_segno); 2556 2557 while (start_segno < end_segno) { 2558 if (!mutex_trylock(&sbi->gc_mutex)) { 2559 ret = -EBUSY; 2560 goto out; 2561 } 2562 sm->last_victim[GC_CB] = end_segno + 1; 2563 sm->last_victim[GC_GREEDY] = end_segno + 1; 2564 sm->last_victim[ALLOC_NEXT] = end_segno + 1; 2565 ret = f2fs_gc(sbi, true, true, start_segno); 2566 if (ret == -EAGAIN) 2567 ret = 0; 2568 else if (ret < 0) 2569 break; 2570 start_segno++; 2571 } 2572 out: 2573 mnt_drop_write_file(filp); 2574 return ret; 2575 } 2576 2577 static int f2fs_ioc_get_features(struct file *filp, unsigned long arg) 2578 { 2579 struct inode *inode = file_inode(filp); 2580 u32 sb_feature = le32_to_cpu(F2FS_I_SB(inode)->raw_super->feature); 2581 2582 /* Must validate to set it with SQLite behavior in Android. */ 2583 sb_feature |= F2FS_FEATURE_ATOMIC_WRITE; 2584 2585 return put_user(sb_feature, (u32 __user *)arg); 2586 } 2587 2588 #ifdef CONFIG_QUOTA 2589 static int f2fs_ioc_setproject(struct file *filp, __u32 projid) 2590 { 2591 struct inode *inode = file_inode(filp); 2592 struct f2fs_inode_info *fi = F2FS_I(inode); 2593 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2594 struct super_block *sb = sbi->sb; 2595 struct dquot *transfer_to[MAXQUOTAS] = {}; 2596 struct page *ipage; 2597 kprojid_t kprojid; 2598 int err; 2599 2600 if (!f2fs_sb_has_project_quota(sb)) { 2601 if (projid != F2FS_DEF_PROJID) 2602 return -EOPNOTSUPP; 2603 else 2604 return 0; 2605 } 2606 2607 if (!f2fs_has_extra_attr(inode)) 2608 return -EOPNOTSUPP; 2609 2610 kprojid = make_kprojid(&init_user_ns, (projid_t)projid); 2611 2612 if (projid_eq(kprojid, F2FS_I(inode)->i_projid)) 2613 return 0; 2614 2615 err = -EPERM; 2616 /* Is it quota file? Do not allow user to mess with it */ 2617 if (IS_NOQUOTA(inode)) 2618 return err; 2619 2620 ipage = f2fs_get_node_page(sbi, inode->i_ino); 2621 if (IS_ERR(ipage)) 2622 return PTR_ERR(ipage); 2623 2624 if (!F2FS_FITS_IN_INODE(F2FS_INODE(ipage), fi->i_extra_isize, 2625 i_projid)) { 2626 err = -EOVERFLOW; 2627 f2fs_put_page(ipage, 1); 2628 return err; 2629 } 2630 f2fs_put_page(ipage, 1); 2631 2632 err = dquot_initialize(inode); 2633 if (err) 2634 return err; 2635 2636 transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid)); 2637 if (!IS_ERR(transfer_to[PRJQUOTA])) { 2638 err = __dquot_transfer(inode, transfer_to); 2639 dqput(transfer_to[PRJQUOTA]); 2640 if (err) 2641 goto out_dirty; 2642 } 2643 2644 F2FS_I(inode)->i_projid = kprojid; 2645 inode->i_ctime = current_time(inode); 2646 out_dirty: 2647 f2fs_mark_inode_dirty_sync(inode, true); 2648 return err; 2649 } 2650 #else 2651 static int f2fs_ioc_setproject(struct file *filp, __u32 projid) 2652 { 2653 if (projid != F2FS_DEF_PROJID) 2654 return -EOPNOTSUPP; 2655 return 0; 2656 } 2657 #endif 2658 2659 /* Transfer internal flags to xflags */ 2660 static inline __u32 f2fs_iflags_to_xflags(unsigned long iflags) 2661 { 2662 __u32 xflags = 0; 2663 2664 if (iflags & F2FS_SYNC_FL) 2665 xflags |= FS_XFLAG_SYNC; 2666 if (iflags & F2FS_IMMUTABLE_FL) 2667 xflags |= FS_XFLAG_IMMUTABLE; 2668 if (iflags & F2FS_APPEND_FL) 2669 xflags |= FS_XFLAG_APPEND; 2670 if (iflags & F2FS_NODUMP_FL) 2671 xflags |= FS_XFLAG_NODUMP; 2672 if (iflags & F2FS_NOATIME_FL) 2673 xflags |= FS_XFLAG_NOATIME; 2674 if (iflags & F2FS_PROJINHERIT_FL) 2675 xflags |= FS_XFLAG_PROJINHERIT; 2676 return xflags; 2677 } 2678 2679 #define F2FS_SUPPORTED_FS_XFLAGS (FS_XFLAG_SYNC | FS_XFLAG_IMMUTABLE | \ 2680 FS_XFLAG_APPEND | FS_XFLAG_NODUMP | \ 2681 FS_XFLAG_NOATIME | FS_XFLAG_PROJINHERIT) 2682 2683 /* Transfer xflags flags to internal */ 2684 static inline unsigned long f2fs_xflags_to_iflags(__u32 xflags) 2685 { 2686 unsigned long iflags = 0; 2687 2688 if (xflags & FS_XFLAG_SYNC) 2689 iflags |= F2FS_SYNC_FL; 2690 if (xflags & FS_XFLAG_IMMUTABLE) 2691 iflags |= F2FS_IMMUTABLE_FL; 2692 if (xflags & FS_XFLAG_APPEND) 2693 iflags |= F2FS_APPEND_FL; 2694 if (xflags & FS_XFLAG_NODUMP) 2695 iflags |= F2FS_NODUMP_FL; 2696 if (xflags & FS_XFLAG_NOATIME) 2697 iflags |= F2FS_NOATIME_FL; 2698 if (xflags & FS_XFLAG_PROJINHERIT) 2699 iflags |= F2FS_PROJINHERIT_FL; 2700 2701 return iflags; 2702 } 2703 2704 static int f2fs_ioc_fsgetxattr(struct file *filp, unsigned long arg) 2705 { 2706 struct inode *inode = file_inode(filp); 2707 struct f2fs_inode_info *fi = F2FS_I(inode); 2708 struct fsxattr fa; 2709 2710 memset(&fa, 0, sizeof(struct fsxattr)); 2711 fa.fsx_xflags = f2fs_iflags_to_xflags(fi->i_flags & 2712 F2FS_FL_USER_VISIBLE); 2713 2714 if (f2fs_sb_has_project_quota(inode->i_sb)) 2715 fa.fsx_projid = (__u32)from_kprojid(&init_user_ns, 2716 fi->i_projid); 2717 2718 if (copy_to_user((struct fsxattr __user *)arg, &fa, sizeof(fa))) 2719 return -EFAULT; 2720 return 0; 2721 } 2722 2723 static int f2fs_ioctl_check_project(struct inode *inode, struct fsxattr *fa) 2724 { 2725 /* 2726 * Project Quota ID state is only allowed to change from within the init 2727 * namespace. Enforce that restriction only if we are trying to change 2728 * the quota ID state. Everything else is allowed in user namespaces. 2729 */ 2730 if (current_user_ns() == &init_user_ns) 2731 return 0; 2732 2733 if (__kprojid_val(F2FS_I(inode)->i_projid) != fa->fsx_projid) 2734 return -EINVAL; 2735 2736 if (F2FS_I(inode)->i_flags & F2FS_PROJINHERIT_FL) { 2737 if (!(fa->fsx_xflags & FS_XFLAG_PROJINHERIT)) 2738 return -EINVAL; 2739 } else { 2740 if (fa->fsx_xflags & FS_XFLAG_PROJINHERIT) 2741 return -EINVAL; 2742 } 2743 2744 return 0; 2745 } 2746 2747 static int f2fs_ioc_fssetxattr(struct file *filp, unsigned long arg) 2748 { 2749 struct inode *inode = file_inode(filp); 2750 struct f2fs_inode_info *fi = F2FS_I(inode); 2751 struct fsxattr fa; 2752 unsigned int flags; 2753 int err; 2754 2755 if (copy_from_user(&fa, (struct fsxattr __user *)arg, sizeof(fa))) 2756 return -EFAULT; 2757 2758 /* Make sure caller has proper permission */ 2759 if (!inode_owner_or_capable(inode)) 2760 return -EACCES; 2761 2762 if (fa.fsx_xflags & ~F2FS_SUPPORTED_FS_XFLAGS) 2763 return -EOPNOTSUPP; 2764 2765 flags = f2fs_xflags_to_iflags(fa.fsx_xflags); 2766 if (f2fs_mask_flags(inode->i_mode, flags) != flags) 2767 return -EOPNOTSUPP; 2768 2769 err = mnt_want_write_file(filp); 2770 if (err) 2771 return err; 2772 2773 inode_lock(inode); 2774 err = f2fs_ioctl_check_project(inode, &fa); 2775 if (err) 2776 goto out; 2777 flags = (fi->i_flags & ~F2FS_FL_XFLAG_VISIBLE) | 2778 (flags & F2FS_FL_XFLAG_VISIBLE); 2779 err = __f2fs_ioc_setflags(inode, flags); 2780 if (err) 2781 goto out; 2782 2783 err = f2fs_ioc_setproject(filp, fa.fsx_projid); 2784 out: 2785 inode_unlock(inode); 2786 mnt_drop_write_file(filp); 2787 return err; 2788 } 2789 2790 int f2fs_pin_file_control(struct inode *inode, bool inc) 2791 { 2792 struct f2fs_inode_info *fi = F2FS_I(inode); 2793 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2794 2795 /* Use i_gc_failures for normal file as a risk signal. */ 2796 if (inc) 2797 f2fs_i_gc_failures_write(inode, 2798 fi->i_gc_failures[GC_FAILURE_PIN] + 1); 2799 2800 if (fi->i_gc_failures[GC_FAILURE_PIN] > sbi->gc_pin_file_threshold) { 2801 f2fs_msg(sbi->sb, KERN_WARNING, 2802 "%s: Enable GC = ino %lx after %x GC trials\n", 2803 __func__, inode->i_ino, 2804 fi->i_gc_failures[GC_FAILURE_PIN]); 2805 clear_inode_flag(inode, FI_PIN_FILE); 2806 return -EAGAIN; 2807 } 2808 return 0; 2809 } 2810 2811 static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg) 2812 { 2813 struct inode *inode = file_inode(filp); 2814 __u32 pin; 2815 int ret = 0; 2816 2817 if (!inode_owner_or_capable(inode)) 2818 return -EACCES; 2819 2820 if (get_user(pin, (__u32 __user *)arg)) 2821 return -EFAULT; 2822 2823 if (!S_ISREG(inode->i_mode)) 2824 return -EINVAL; 2825 2826 if (f2fs_readonly(F2FS_I_SB(inode)->sb)) 2827 return -EROFS; 2828 2829 ret = mnt_want_write_file(filp); 2830 if (ret) 2831 return ret; 2832 2833 inode_lock(inode); 2834 2835 if (f2fs_should_update_outplace(inode, NULL)) { 2836 ret = -EINVAL; 2837 goto out; 2838 } 2839 2840 if (!pin) { 2841 clear_inode_flag(inode, FI_PIN_FILE); 2842 f2fs_i_gc_failures_write(inode, 0); 2843 goto done; 2844 } 2845 2846 if (f2fs_pin_file_control(inode, false)) { 2847 ret = -EAGAIN; 2848 goto out; 2849 } 2850 ret = f2fs_convert_inline_inode(inode); 2851 if (ret) 2852 goto out; 2853 2854 set_inode_flag(inode, FI_PIN_FILE); 2855 ret = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN]; 2856 done: 2857 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); 2858 out: 2859 inode_unlock(inode); 2860 mnt_drop_write_file(filp); 2861 return ret; 2862 } 2863 2864 static int f2fs_ioc_get_pin_file(struct file *filp, unsigned long arg) 2865 { 2866 struct inode *inode = file_inode(filp); 2867 __u32 pin = 0; 2868 2869 if (is_inode_flag_set(inode, FI_PIN_FILE)) 2870 pin = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN]; 2871 return put_user(pin, (u32 __user *)arg); 2872 } 2873 2874 int f2fs_precache_extents(struct inode *inode) 2875 { 2876 struct f2fs_inode_info *fi = F2FS_I(inode); 2877 struct f2fs_map_blocks map; 2878 pgoff_t m_next_extent; 2879 loff_t end; 2880 int err; 2881 2882 if (is_inode_flag_set(inode, FI_NO_EXTENT)) 2883 return -EOPNOTSUPP; 2884 2885 map.m_lblk = 0; 2886 map.m_next_pgofs = NULL; 2887 map.m_next_extent = &m_next_extent; 2888 map.m_seg_type = NO_CHECK_TYPE; 2889 end = F2FS_I_SB(inode)->max_file_blocks; 2890 2891 while (map.m_lblk < end) { 2892 map.m_len = end - map.m_lblk; 2893 2894 down_write(&fi->i_gc_rwsem[WRITE]); 2895 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_PRECACHE); 2896 up_write(&fi->i_gc_rwsem[WRITE]); 2897 if (err) 2898 return err; 2899 2900 map.m_lblk = m_next_extent; 2901 } 2902 2903 return err; 2904 } 2905 2906 static int f2fs_ioc_precache_extents(struct file *filp, unsigned long arg) 2907 { 2908 return f2fs_precache_extents(file_inode(filp)); 2909 } 2910 2911 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 2912 { 2913 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp))))) 2914 return -EIO; 2915 2916 switch (cmd) { 2917 case F2FS_IOC_GETFLAGS: 2918 return f2fs_ioc_getflags(filp, arg); 2919 case F2FS_IOC_SETFLAGS: 2920 return f2fs_ioc_setflags(filp, arg); 2921 case F2FS_IOC_GETVERSION: 2922 return f2fs_ioc_getversion(filp, arg); 2923 case F2FS_IOC_START_ATOMIC_WRITE: 2924 return f2fs_ioc_start_atomic_write(filp); 2925 case F2FS_IOC_COMMIT_ATOMIC_WRITE: 2926 return f2fs_ioc_commit_atomic_write(filp); 2927 case F2FS_IOC_START_VOLATILE_WRITE: 2928 return f2fs_ioc_start_volatile_write(filp); 2929 case F2FS_IOC_RELEASE_VOLATILE_WRITE: 2930 return f2fs_ioc_release_volatile_write(filp); 2931 case F2FS_IOC_ABORT_VOLATILE_WRITE: 2932 return f2fs_ioc_abort_volatile_write(filp); 2933 case F2FS_IOC_SHUTDOWN: 2934 return f2fs_ioc_shutdown(filp, arg); 2935 case FITRIM: 2936 return f2fs_ioc_fitrim(filp, arg); 2937 case F2FS_IOC_SET_ENCRYPTION_POLICY: 2938 return f2fs_ioc_set_encryption_policy(filp, arg); 2939 case F2FS_IOC_GET_ENCRYPTION_POLICY: 2940 return f2fs_ioc_get_encryption_policy(filp, arg); 2941 case F2FS_IOC_GET_ENCRYPTION_PWSALT: 2942 return f2fs_ioc_get_encryption_pwsalt(filp, arg); 2943 case F2FS_IOC_GARBAGE_COLLECT: 2944 return f2fs_ioc_gc(filp, arg); 2945 case F2FS_IOC_GARBAGE_COLLECT_RANGE: 2946 return f2fs_ioc_gc_range(filp, arg); 2947 case F2FS_IOC_WRITE_CHECKPOINT: 2948 return f2fs_ioc_write_checkpoint(filp, arg); 2949 case F2FS_IOC_DEFRAGMENT: 2950 return f2fs_ioc_defragment(filp, arg); 2951 case F2FS_IOC_MOVE_RANGE: 2952 return f2fs_ioc_move_range(filp, arg); 2953 case F2FS_IOC_FLUSH_DEVICE: 2954 return f2fs_ioc_flush_device(filp, arg); 2955 case F2FS_IOC_GET_FEATURES: 2956 return f2fs_ioc_get_features(filp, arg); 2957 case F2FS_IOC_FSGETXATTR: 2958 return f2fs_ioc_fsgetxattr(filp, arg); 2959 case F2FS_IOC_FSSETXATTR: 2960 return f2fs_ioc_fssetxattr(filp, arg); 2961 case F2FS_IOC_GET_PIN_FILE: 2962 return f2fs_ioc_get_pin_file(filp, arg); 2963 case F2FS_IOC_SET_PIN_FILE: 2964 return f2fs_ioc_set_pin_file(filp, arg); 2965 case F2FS_IOC_PRECACHE_EXTENTS: 2966 return f2fs_ioc_precache_extents(filp, arg); 2967 default: 2968 return -ENOTTY; 2969 } 2970 } 2971 2972 static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 2973 { 2974 struct file *file = iocb->ki_filp; 2975 struct inode *inode = file_inode(file); 2976 ssize_t ret; 2977 2978 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) 2979 return -EIO; 2980 2981 if ((iocb->ki_flags & IOCB_NOWAIT) && !(iocb->ki_flags & IOCB_DIRECT)) 2982 return -EINVAL; 2983 2984 if (!inode_trylock(inode)) { 2985 if (iocb->ki_flags & IOCB_NOWAIT) 2986 return -EAGAIN; 2987 inode_lock(inode); 2988 } 2989 2990 ret = generic_write_checks(iocb, from); 2991 if (ret > 0) { 2992 bool preallocated = false; 2993 size_t target_size = 0; 2994 int err; 2995 2996 if (iov_iter_fault_in_readable(from, iov_iter_count(from))) 2997 set_inode_flag(inode, FI_NO_PREALLOC); 2998 2999 if ((iocb->ki_flags & IOCB_NOWAIT) && 3000 (iocb->ki_flags & IOCB_DIRECT)) { 3001 if (!f2fs_overwrite_io(inode, iocb->ki_pos, 3002 iov_iter_count(from)) || 3003 f2fs_has_inline_data(inode) || 3004 f2fs_force_buffered_io(inode, 3005 iocb, from)) { 3006 clear_inode_flag(inode, 3007 FI_NO_PREALLOC); 3008 inode_unlock(inode); 3009 return -EAGAIN; 3010 } 3011 3012 } else { 3013 preallocated = true; 3014 target_size = iocb->ki_pos + iov_iter_count(from); 3015 3016 err = f2fs_preallocate_blocks(iocb, from); 3017 if (err) { 3018 clear_inode_flag(inode, FI_NO_PREALLOC); 3019 inode_unlock(inode); 3020 return err; 3021 } 3022 } 3023 ret = __generic_file_write_iter(iocb, from); 3024 clear_inode_flag(inode, FI_NO_PREALLOC); 3025 3026 /* if we couldn't write data, we should deallocate blocks. */ 3027 if (preallocated && i_size_read(inode) < target_size) 3028 f2fs_truncate(inode); 3029 3030 if (ret > 0) 3031 f2fs_update_iostat(F2FS_I_SB(inode), APP_WRITE_IO, ret); 3032 } 3033 inode_unlock(inode); 3034 3035 if (ret > 0) 3036 ret = generic_write_sync(iocb, ret); 3037 return ret; 3038 } 3039 3040 #ifdef CONFIG_COMPAT 3041 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 3042 { 3043 switch (cmd) { 3044 case F2FS_IOC32_GETFLAGS: 3045 cmd = F2FS_IOC_GETFLAGS; 3046 break; 3047 case F2FS_IOC32_SETFLAGS: 3048 cmd = F2FS_IOC_SETFLAGS; 3049 break; 3050 case F2FS_IOC32_GETVERSION: 3051 cmd = F2FS_IOC_GETVERSION; 3052 break; 3053 case F2FS_IOC_START_ATOMIC_WRITE: 3054 case F2FS_IOC_COMMIT_ATOMIC_WRITE: 3055 case F2FS_IOC_START_VOLATILE_WRITE: 3056 case F2FS_IOC_RELEASE_VOLATILE_WRITE: 3057 case F2FS_IOC_ABORT_VOLATILE_WRITE: 3058 case F2FS_IOC_SHUTDOWN: 3059 case F2FS_IOC_SET_ENCRYPTION_POLICY: 3060 case F2FS_IOC_GET_ENCRYPTION_PWSALT: 3061 case F2FS_IOC_GET_ENCRYPTION_POLICY: 3062 case F2FS_IOC_GARBAGE_COLLECT: 3063 case F2FS_IOC_GARBAGE_COLLECT_RANGE: 3064 case F2FS_IOC_WRITE_CHECKPOINT: 3065 case F2FS_IOC_DEFRAGMENT: 3066 case F2FS_IOC_MOVE_RANGE: 3067 case F2FS_IOC_FLUSH_DEVICE: 3068 case F2FS_IOC_GET_FEATURES: 3069 case F2FS_IOC_FSGETXATTR: 3070 case F2FS_IOC_FSSETXATTR: 3071 case F2FS_IOC_GET_PIN_FILE: 3072 case F2FS_IOC_SET_PIN_FILE: 3073 case F2FS_IOC_PRECACHE_EXTENTS: 3074 break; 3075 default: 3076 return -ENOIOCTLCMD; 3077 } 3078 return f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg)); 3079 } 3080 #endif 3081 3082 const struct file_operations f2fs_file_operations = { 3083 .llseek = f2fs_llseek, 3084 .read_iter = generic_file_read_iter, 3085 .write_iter = f2fs_file_write_iter, 3086 .open = f2fs_file_open, 3087 .release = f2fs_release_file, 3088 .mmap = f2fs_file_mmap, 3089 .flush = f2fs_file_flush, 3090 .fsync = f2fs_sync_file, 3091 .fallocate = f2fs_fallocate, 3092 .unlocked_ioctl = f2fs_ioctl, 3093 #ifdef CONFIG_COMPAT 3094 .compat_ioctl = f2fs_compat_ioctl, 3095 #endif 3096 .splice_read = generic_file_splice_read, 3097 .splice_write = iter_file_splice_write, 3098 }; 3099