1 /* 2 * fs/f2fs/file.c 3 * 4 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 5 * http://www.samsung.com/ 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 #include <linux/fs.h> 12 #include <linux/f2fs_fs.h> 13 #include <linux/stat.h> 14 #include <linux/buffer_head.h> 15 #include <linux/writeback.h> 16 #include <linux/blkdev.h> 17 #include <linux/falloc.h> 18 #include <linux/types.h> 19 #include <linux/compat.h> 20 #include <linux/uaccess.h> 21 #include <linux/mount.h> 22 #include <linux/pagevec.h> 23 #include <linux/uio.h> 24 #include <linux/uuid.h> 25 #include <linux/file.h> 26 27 #include "f2fs.h" 28 #include "node.h" 29 #include "segment.h" 30 #include "xattr.h" 31 #include "acl.h" 32 #include "gc.h" 33 #include "trace.h" 34 #include <trace/events/f2fs.h> 35 36 static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf) 37 { 38 struct inode *inode = file_inode(vmf->vma->vm_file); 39 vm_fault_t ret; 40 41 down_read(&F2FS_I(inode)->i_mmap_sem); 42 ret = filemap_fault(vmf); 43 up_read(&F2FS_I(inode)->i_mmap_sem); 44 45 return ret; 46 } 47 48 static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf) 49 { 50 struct page *page = vmf->page; 51 struct inode *inode = file_inode(vmf->vma->vm_file); 52 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 53 struct dnode_of_data dn; 54 int err; 55 56 if (unlikely(f2fs_cp_error(sbi))) { 57 err = -EIO; 58 goto err; 59 } 60 61 sb_start_pagefault(inode->i_sb); 62 63 f2fs_bug_on(sbi, f2fs_has_inline_data(inode)); 64 65 /* block allocation */ 66 f2fs_lock_op(sbi); 67 set_new_dnode(&dn, inode, NULL, NULL, 0); 68 err = f2fs_reserve_block(&dn, page->index); 69 if (err) { 70 f2fs_unlock_op(sbi); 71 goto out; 72 } 73 f2fs_put_dnode(&dn); 74 f2fs_unlock_op(sbi); 75 76 f2fs_balance_fs(sbi, dn.node_changed); 77 78 file_update_time(vmf->vma->vm_file); 79 down_read(&F2FS_I(inode)->i_mmap_sem); 80 lock_page(page); 81 if (unlikely(page->mapping != inode->i_mapping || 82 page_offset(page) > i_size_read(inode) || 83 !PageUptodate(page))) { 84 unlock_page(page); 85 err = -EFAULT; 86 goto out_sem; 87 } 88 89 /* 90 * check to see if the page is mapped already (no holes) 91 */ 92 if (PageMappedToDisk(page)) 93 goto mapped; 94 95 /* page is wholly or partially inside EOF */ 96 if (((loff_t)(page->index + 1) << PAGE_SHIFT) > 97 i_size_read(inode)) { 98 loff_t offset; 99 100 offset = i_size_read(inode) & ~PAGE_MASK; 101 zero_user_segment(page, offset, PAGE_SIZE); 102 } 103 set_page_dirty(page); 104 if (!PageUptodate(page)) 105 SetPageUptodate(page); 106 107 f2fs_update_iostat(sbi, APP_MAPPED_IO, F2FS_BLKSIZE); 108 109 trace_f2fs_vm_page_mkwrite(page, DATA); 110 mapped: 111 /* fill the page */ 112 f2fs_wait_on_page_writeback(page, DATA, false); 113 114 /* wait for GCed page writeback via META_MAPPING */ 115 if (f2fs_post_read_required(inode)) 116 f2fs_wait_on_block_writeback(sbi, dn.data_blkaddr); 117 118 out_sem: 119 up_read(&F2FS_I(inode)->i_mmap_sem); 120 out: 121 sb_end_pagefault(inode->i_sb); 122 f2fs_update_time(sbi, REQ_TIME); 123 err: 124 return block_page_mkwrite_return(err); 125 } 126 127 static const struct vm_operations_struct f2fs_file_vm_ops = { 128 .fault = f2fs_filemap_fault, 129 .map_pages = filemap_map_pages, 130 .page_mkwrite = f2fs_vm_page_mkwrite, 131 }; 132 133 static int get_parent_ino(struct inode *inode, nid_t *pino) 134 { 135 struct dentry *dentry; 136 137 inode = igrab(inode); 138 dentry = d_find_any_alias(inode); 139 iput(inode); 140 if (!dentry) 141 return 0; 142 143 *pino = parent_ino(dentry); 144 dput(dentry); 145 return 1; 146 } 147 148 static inline enum cp_reason_type need_do_checkpoint(struct inode *inode) 149 { 150 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 151 enum cp_reason_type cp_reason = CP_NO_NEEDED; 152 153 if (!S_ISREG(inode->i_mode)) 154 cp_reason = CP_NON_REGULAR; 155 else if (inode->i_nlink != 1) 156 cp_reason = CP_HARDLINK; 157 else if (is_sbi_flag_set(sbi, SBI_NEED_CP)) 158 cp_reason = CP_SB_NEED_CP; 159 else if (file_wrong_pino(inode)) 160 cp_reason = CP_WRONG_PINO; 161 else if (!f2fs_space_for_roll_forward(sbi)) 162 cp_reason = CP_NO_SPC_ROLL; 163 else if (!f2fs_is_checkpointed_node(sbi, F2FS_I(inode)->i_pino)) 164 cp_reason = CP_NODE_NEED_CP; 165 else if (test_opt(sbi, FASTBOOT)) 166 cp_reason = CP_FASTBOOT_MODE; 167 else if (F2FS_OPTION(sbi).active_logs == 2) 168 cp_reason = CP_SPEC_LOG_NUM; 169 else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT && 170 f2fs_need_dentry_mark(sbi, inode->i_ino) && 171 f2fs_exist_written_data(sbi, F2FS_I(inode)->i_pino, 172 TRANS_DIR_INO)) 173 cp_reason = CP_RECOVER_DIR; 174 175 return cp_reason; 176 } 177 178 static bool need_inode_page_update(struct f2fs_sb_info *sbi, nid_t ino) 179 { 180 struct page *i = find_get_page(NODE_MAPPING(sbi), ino); 181 bool ret = false; 182 /* But we need to avoid that there are some inode updates */ 183 if ((i && PageDirty(i)) || f2fs_need_inode_block_update(sbi, ino)) 184 ret = true; 185 f2fs_put_page(i, 0); 186 return ret; 187 } 188 189 static void try_to_fix_pino(struct inode *inode) 190 { 191 struct f2fs_inode_info *fi = F2FS_I(inode); 192 nid_t pino; 193 194 down_write(&fi->i_sem); 195 if (file_wrong_pino(inode) && inode->i_nlink == 1 && 196 get_parent_ino(inode, &pino)) { 197 f2fs_i_pino_write(inode, pino); 198 file_got_pino(inode); 199 } 200 up_write(&fi->i_sem); 201 } 202 203 static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end, 204 int datasync, bool atomic) 205 { 206 struct inode *inode = file->f_mapping->host; 207 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 208 nid_t ino = inode->i_ino; 209 int ret = 0; 210 enum cp_reason_type cp_reason = 0; 211 struct writeback_control wbc = { 212 .sync_mode = WB_SYNC_ALL, 213 .nr_to_write = LONG_MAX, 214 .for_reclaim = 0, 215 }; 216 217 if (unlikely(f2fs_readonly(inode->i_sb))) 218 return 0; 219 220 trace_f2fs_sync_file_enter(inode); 221 222 /* if fdatasync is triggered, let's do in-place-update */ 223 if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks) 224 set_inode_flag(inode, FI_NEED_IPU); 225 ret = file_write_and_wait_range(file, start, end); 226 clear_inode_flag(inode, FI_NEED_IPU); 227 228 if (ret) { 229 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret); 230 return ret; 231 } 232 233 /* if the inode is dirty, let's recover all the time */ 234 if (!f2fs_skip_inode_update(inode, datasync)) { 235 f2fs_write_inode(inode, NULL); 236 goto go_write; 237 } 238 239 /* 240 * if there is no written data, don't waste time to write recovery info. 241 */ 242 if (!is_inode_flag_set(inode, FI_APPEND_WRITE) && 243 !f2fs_exist_written_data(sbi, ino, APPEND_INO)) { 244 245 /* it may call write_inode just prior to fsync */ 246 if (need_inode_page_update(sbi, ino)) 247 goto go_write; 248 249 if (is_inode_flag_set(inode, FI_UPDATE_WRITE) || 250 f2fs_exist_written_data(sbi, ino, UPDATE_INO)) 251 goto flush_out; 252 goto out; 253 } 254 go_write: 255 /* 256 * Both of fdatasync() and fsync() are able to be recovered from 257 * sudden-power-off. 258 */ 259 down_read(&F2FS_I(inode)->i_sem); 260 cp_reason = need_do_checkpoint(inode); 261 up_read(&F2FS_I(inode)->i_sem); 262 263 if (cp_reason) { 264 /* all the dirty node pages should be flushed for POR */ 265 ret = f2fs_sync_fs(inode->i_sb, 1); 266 267 /* 268 * We've secured consistency through sync_fs. Following pino 269 * will be used only for fsynced inodes after checkpoint. 270 */ 271 try_to_fix_pino(inode); 272 clear_inode_flag(inode, FI_APPEND_WRITE); 273 clear_inode_flag(inode, FI_UPDATE_WRITE); 274 goto out; 275 } 276 sync_nodes: 277 atomic_inc(&sbi->wb_sync_req[NODE]); 278 ret = f2fs_fsync_node_pages(sbi, inode, &wbc, atomic); 279 atomic_dec(&sbi->wb_sync_req[NODE]); 280 if (ret) 281 goto out; 282 283 /* if cp_error was enabled, we should avoid infinite loop */ 284 if (unlikely(f2fs_cp_error(sbi))) { 285 ret = -EIO; 286 goto out; 287 } 288 289 if (f2fs_need_inode_block_update(sbi, ino)) { 290 f2fs_mark_inode_dirty_sync(inode, true); 291 f2fs_write_inode(inode, NULL); 292 goto sync_nodes; 293 } 294 295 /* 296 * If it's atomic_write, it's just fine to keep write ordering. So 297 * here we don't need to wait for node write completion, since we use 298 * node chain which serializes node blocks. If one of node writes are 299 * reordered, we can see simply broken chain, resulting in stopping 300 * roll-forward recovery. It means we'll recover all or none node blocks 301 * given fsync mark. 302 */ 303 if (!atomic) { 304 ret = f2fs_wait_on_node_pages_writeback(sbi, ino); 305 if (ret) 306 goto out; 307 } 308 309 /* once recovery info is written, don't need to tack this */ 310 f2fs_remove_ino_entry(sbi, ino, APPEND_INO); 311 clear_inode_flag(inode, FI_APPEND_WRITE); 312 flush_out: 313 if (!atomic && F2FS_OPTION(sbi).fsync_mode != FSYNC_MODE_NOBARRIER) 314 ret = f2fs_issue_flush(sbi, inode->i_ino); 315 if (!ret) { 316 f2fs_remove_ino_entry(sbi, ino, UPDATE_INO); 317 clear_inode_flag(inode, FI_UPDATE_WRITE); 318 f2fs_remove_ino_entry(sbi, ino, FLUSH_INO); 319 } 320 f2fs_update_time(sbi, REQ_TIME); 321 out: 322 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret); 323 f2fs_trace_ios(NULL, 1); 324 return ret; 325 } 326 327 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) 328 { 329 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file))))) 330 return -EIO; 331 return f2fs_do_sync_file(file, start, end, datasync, false); 332 } 333 334 static pgoff_t __get_first_dirty_index(struct address_space *mapping, 335 pgoff_t pgofs, int whence) 336 { 337 struct page *page; 338 int nr_pages; 339 340 if (whence != SEEK_DATA) 341 return 0; 342 343 /* find first dirty page index */ 344 nr_pages = find_get_pages_tag(mapping, &pgofs, PAGECACHE_TAG_DIRTY, 345 1, &page); 346 if (!nr_pages) 347 return ULONG_MAX; 348 pgofs = page->index; 349 put_page(page); 350 return pgofs; 351 } 352 353 static bool __found_offset(block_t blkaddr, pgoff_t dirty, pgoff_t pgofs, 354 int whence) 355 { 356 switch (whence) { 357 case SEEK_DATA: 358 if ((blkaddr == NEW_ADDR && dirty == pgofs) || 359 is_valid_blkaddr(blkaddr)) 360 return true; 361 break; 362 case SEEK_HOLE: 363 if (blkaddr == NULL_ADDR) 364 return true; 365 break; 366 } 367 return false; 368 } 369 370 static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence) 371 { 372 struct inode *inode = file->f_mapping->host; 373 loff_t maxbytes = inode->i_sb->s_maxbytes; 374 struct dnode_of_data dn; 375 pgoff_t pgofs, end_offset, dirty; 376 loff_t data_ofs = offset; 377 loff_t isize; 378 int err = 0; 379 380 inode_lock(inode); 381 382 isize = i_size_read(inode); 383 if (offset >= isize) 384 goto fail; 385 386 /* handle inline data case */ 387 if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) { 388 if (whence == SEEK_HOLE) 389 data_ofs = isize; 390 goto found; 391 } 392 393 pgofs = (pgoff_t)(offset >> PAGE_SHIFT); 394 395 dirty = __get_first_dirty_index(inode->i_mapping, pgofs, whence); 396 397 for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_SHIFT) { 398 set_new_dnode(&dn, inode, NULL, NULL, 0); 399 err = f2fs_get_dnode_of_data(&dn, pgofs, LOOKUP_NODE); 400 if (err && err != -ENOENT) { 401 goto fail; 402 } else if (err == -ENOENT) { 403 /* direct node does not exists */ 404 if (whence == SEEK_DATA) { 405 pgofs = f2fs_get_next_page_offset(&dn, pgofs); 406 continue; 407 } else { 408 goto found; 409 } 410 } 411 412 end_offset = ADDRS_PER_PAGE(dn.node_page, inode); 413 414 /* find data/hole in dnode block */ 415 for (; dn.ofs_in_node < end_offset; 416 dn.ofs_in_node++, pgofs++, 417 data_ofs = (loff_t)pgofs << PAGE_SHIFT) { 418 block_t blkaddr; 419 420 blkaddr = datablock_addr(dn.inode, 421 dn.node_page, dn.ofs_in_node); 422 423 if (__found_offset(blkaddr, dirty, pgofs, whence)) { 424 f2fs_put_dnode(&dn); 425 goto found; 426 } 427 } 428 f2fs_put_dnode(&dn); 429 } 430 431 if (whence == SEEK_DATA) 432 goto fail; 433 found: 434 if (whence == SEEK_HOLE && data_ofs > isize) 435 data_ofs = isize; 436 inode_unlock(inode); 437 return vfs_setpos(file, data_ofs, maxbytes); 438 fail: 439 inode_unlock(inode); 440 return -ENXIO; 441 } 442 443 static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence) 444 { 445 struct inode *inode = file->f_mapping->host; 446 loff_t maxbytes = inode->i_sb->s_maxbytes; 447 448 switch (whence) { 449 case SEEK_SET: 450 case SEEK_CUR: 451 case SEEK_END: 452 return generic_file_llseek_size(file, offset, whence, 453 maxbytes, i_size_read(inode)); 454 case SEEK_DATA: 455 case SEEK_HOLE: 456 if (offset < 0) 457 return -ENXIO; 458 return f2fs_seek_block(file, offset, whence); 459 } 460 461 return -EINVAL; 462 } 463 464 static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma) 465 { 466 struct inode *inode = file_inode(file); 467 int err; 468 469 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) 470 return -EIO; 471 472 /* we don't need to use inline_data strictly */ 473 err = f2fs_convert_inline_inode(inode); 474 if (err) 475 return err; 476 477 file_accessed(file); 478 vma->vm_ops = &f2fs_file_vm_ops; 479 return 0; 480 } 481 482 static int f2fs_file_open(struct inode *inode, struct file *filp) 483 { 484 int err = fscrypt_file_open(inode, filp); 485 486 if (err) 487 return err; 488 489 filp->f_mode |= FMODE_NOWAIT; 490 491 return dquot_file_open(inode, filp); 492 } 493 494 void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count) 495 { 496 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 497 struct f2fs_node *raw_node; 498 int nr_free = 0, ofs = dn->ofs_in_node, len = count; 499 __le32 *addr; 500 int base = 0; 501 502 if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode)) 503 base = get_extra_isize(dn->inode); 504 505 raw_node = F2FS_NODE(dn->node_page); 506 addr = blkaddr_in_node(raw_node) + base + ofs; 507 508 for (; count > 0; count--, addr++, dn->ofs_in_node++) { 509 block_t blkaddr = le32_to_cpu(*addr); 510 511 if (blkaddr == NULL_ADDR) 512 continue; 513 514 dn->data_blkaddr = NULL_ADDR; 515 f2fs_set_data_blkaddr(dn); 516 f2fs_invalidate_blocks(sbi, blkaddr); 517 if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page)) 518 clear_inode_flag(dn->inode, FI_FIRST_BLOCK_WRITTEN); 519 nr_free++; 520 } 521 522 if (nr_free) { 523 pgoff_t fofs; 524 /* 525 * once we invalidate valid blkaddr in range [ofs, ofs + count], 526 * we will invalidate all blkaddr in the whole range. 527 */ 528 fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page), 529 dn->inode) + ofs; 530 f2fs_update_extent_cache_range(dn, fofs, 0, len); 531 dec_valid_block_count(sbi, dn->inode, nr_free); 532 } 533 dn->ofs_in_node = ofs; 534 535 f2fs_update_time(sbi, REQ_TIME); 536 trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid, 537 dn->ofs_in_node, nr_free); 538 } 539 540 void f2fs_truncate_data_blocks(struct dnode_of_data *dn) 541 { 542 f2fs_truncate_data_blocks_range(dn, ADDRS_PER_BLOCK); 543 } 544 545 static int truncate_partial_data_page(struct inode *inode, u64 from, 546 bool cache_only) 547 { 548 loff_t offset = from & (PAGE_SIZE - 1); 549 pgoff_t index = from >> PAGE_SHIFT; 550 struct address_space *mapping = inode->i_mapping; 551 struct page *page; 552 553 if (!offset && !cache_only) 554 return 0; 555 556 if (cache_only) { 557 page = find_lock_page(mapping, index); 558 if (page && PageUptodate(page)) 559 goto truncate_out; 560 f2fs_put_page(page, 1); 561 return 0; 562 } 563 564 page = f2fs_get_lock_data_page(inode, index, true); 565 if (IS_ERR(page)) 566 return PTR_ERR(page) == -ENOENT ? 0 : PTR_ERR(page); 567 truncate_out: 568 f2fs_wait_on_page_writeback(page, DATA, true); 569 zero_user(page, offset, PAGE_SIZE - offset); 570 571 /* An encrypted inode should have a key and truncate the last page. */ 572 f2fs_bug_on(F2FS_I_SB(inode), cache_only && f2fs_encrypted_inode(inode)); 573 if (!cache_only) 574 set_page_dirty(page); 575 f2fs_put_page(page, 1); 576 return 0; 577 } 578 579 int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock) 580 { 581 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 582 struct dnode_of_data dn; 583 pgoff_t free_from; 584 int count = 0, err = 0; 585 struct page *ipage; 586 bool truncate_page = false; 587 588 trace_f2fs_truncate_blocks_enter(inode, from); 589 590 free_from = (pgoff_t)F2FS_BLK_ALIGN(from); 591 592 if (free_from >= sbi->max_file_blocks) 593 goto free_partial; 594 595 if (lock) 596 f2fs_lock_op(sbi); 597 598 ipage = f2fs_get_node_page(sbi, inode->i_ino); 599 if (IS_ERR(ipage)) { 600 err = PTR_ERR(ipage); 601 goto out; 602 } 603 604 if (f2fs_has_inline_data(inode)) { 605 f2fs_truncate_inline_inode(inode, ipage, from); 606 f2fs_put_page(ipage, 1); 607 truncate_page = true; 608 goto out; 609 } 610 611 set_new_dnode(&dn, inode, ipage, NULL, 0); 612 err = f2fs_get_dnode_of_data(&dn, free_from, LOOKUP_NODE_RA); 613 if (err) { 614 if (err == -ENOENT) 615 goto free_next; 616 goto out; 617 } 618 619 count = ADDRS_PER_PAGE(dn.node_page, inode); 620 621 count -= dn.ofs_in_node; 622 f2fs_bug_on(sbi, count < 0); 623 624 if (dn.ofs_in_node || IS_INODE(dn.node_page)) { 625 f2fs_truncate_data_blocks_range(&dn, count); 626 free_from += count; 627 } 628 629 f2fs_put_dnode(&dn); 630 free_next: 631 err = f2fs_truncate_inode_blocks(inode, free_from); 632 out: 633 if (lock) 634 f2fs_unlock_op(sbi); 635 free_partial: 636 /* lastly zero out the first data page */ 637 if (!err) 638 err = truncate_partial_data_page(inode, from, truncate_page); 639 640 trace_f2fs_truncate_blocks_exit(inode, err); 641 return err; 642 } 643 644 int f2fs_truncate(struct inode *inode) 645 { 646 int err; 647 648 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) 649 return -EIO; 650 651 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 652 S_ISLNK(inode->i_mode))) 653 return 0; 654 655 trace_f2fs_truncate(inode); 656 657 #ifdef CONFIG_F2FS_FAULT_INJECTION 658 if (time_to_inject(F2FS_I_SB(inode), FAULT_TRUNCATE)) { 659 f2fs_show_injection_info(FAULT_TRUNCATE); 660 return -EIO; 661 } 662 #endif 663 /* we should check inline_data size */ 664 if (!f2fs_may_inline_data(inode)) { 665 err = f2fs_convert_inline_inode(inode); 666 if (err) 667 return err; 668 } 669 670 err = f2fs_truncate_blocks(inode, i_size_read(inode), true); 671 if (err) 672 return err; 673 674 inode->i_mtime = inode->i_ctime = current_time(inode); 675 f2fs_mark_inode_dirty_sync(inode, false); 676 return 0; 677 } 678 679 int f2fs_getattr(const struct path *path, struct kstat *stat, 680 u32 request_mask, unsigned int query_flags) 681 { 682 struct inode *inode = d_inode(path->dentry); 683 struct f2fs_inode_info *fi = F2FS_I(inode); 684 struct f2fs_inode *ri; 685 unsigned int flags; 686 687 if (f2fs_has_extra_attr(inode) && 688 f2fs_sb_has_inode_crtime(inode->i_sb) && 689 F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) { 690 stat->result_mask |= STATX_BTIME; 691 stat->btime.tv_sec = fi->i_crtime.tv_sec; 692 stat->btime.tv_nsec = fi->i_crtime.tv_nsec; 693 } 694 695 flags = fi->i_flags & F2FS_FL_USER_VISIBLE; 696 if (flags & F2FS_APPEND_FL) 697 stat->attributes |= STATX_ATTR_APPEND; 698 if (flags & F2FS_COMPR_FL) 699 stat->attributes |= STATX_ATTR_COMPRESSED; 700 if (f2fs_encrypted_inode(inode)) 701 stat->attributes |= STATX_ATTR_ENCRYPTED; 702 if (flags & F2FS_IMMUTABLE_FL) 703 stat->attributes |= STATX_ATTR_IMMUTABLE; 704 if (flags & F2FS_NODUMP_FL) 705 stat->attributes |= STATX_ATTR_NODUMP; 706 707 stat->attributes_mask |= (STATX_ATTR_APPEND | 708 STATX_ATTR_COMPRESSED | 709 STATX_ATTR_ENCRYPTED | 710 STATX_ATTR_IMMUTABLE | 711 STATX_ATTR_NODUMP); 712 713 generic_fillattr(inode, stat); 714 715 /* we need to show initial sectors used for inline_data/dentries */ 716 if ((S_ISREG(inode->i_mode) && f2fs_has_inline_data(inode)) || 717 f2fs_has_inline_dentry(inode)) 718 stat->blocks += (stat->size + 511) >> 9; 719 720 return 0; 721 } 722 723 #ifdef CONFIG_F2FS_FS_POSIX_ACL 724 static void __setattr_copy(struct inode *inode, const struct iattr *attr) 725 { 726 unsigned int ia_valid = attr->ia_valid; 727 728 if (ia_valid & ATTR_UID) 729 inode->i_uid = attr->ia_uid; 730 if (ia_valid & ATTR_GID) 731 inode->i_gid = attr->ia_gid; 732 if (ia_valid & ATTR_ATIME) 733 inode->i_atime = timespec64_trunc(attr->ia_atime, 734 inode->i_sb->s_time_gran); 735 if (ia_valid & ATTR_MTIME) 736 inode->i_mtime = timespec64_trunc(attr->ia_mtime, 737 inode->i_sb->s_time_gran); 738 if (ia_valid & ATTR_CTIME) 739 inode->i_ctime = timespec64_trunc(attr->ia_ctime, 740 inode->i_sb->s_time_gran); 741 if (ia_valid & ATTR_MODE) { 742 umode_t mode = attr->ia_mode; 743 744 if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID)) 745 mode &= ~S_ISGID; 746 set_acl_inode(inode, mode); 747 } 748 } 749 #else 750 #define __setattr_copy setattr_copy 751 #endif 752 753 int f2fs_setattr(struct dentry *dentry, struct iattr *attr) 754 { 755 struct inode *inode = d_inode(dentry); 756 int err; 757 bool size_changed = false; 758 759 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) 760 return -EIO; 761 762 err = setattr_prepare(dentry, attr); 763 if (err) 764 return err; 765 766 err = fscrypt_prepare_setattr(dentry, attr); 767 if (err) 768 return err; 769 770 if (is_quota_modification(inode, attr)) { 771 err = dquot_initialize(inode); 772 if (err) 773 return err; 774 } 775 if ((attr->ia_valid & ATTR_UID && 776 !uid_eq(attr->ia_uid, inode->i_uid)) || 777 (attr->ia_valid & ATTR_GID && 778 !gid_eq(attr->ia_gid, inode->i_gid))) { 779 err = dquot_transfer(inode, attr); 780 if (err) 781 return err; 782 } 783 784 if (attr->ia_valid & ATTR_SIZE) { 785 if (attr->ia_size <= i_size_read(inode)) { 786 down_write(&F2FS_I(inode)->i_mmap_sem); 787 truncate_setsize(inode, attr->ia_size); 788 err = f2fs_truncate(inode); 789 up_write(&F2FS_I(inode)->i_mmap_sem); 790 if (err) 791 return err; 792 } else { 793 /* 794 * do not trim all blocks after i_size if target size is 795 * larger than i_size. 796 */ 797 down_write(&F2FS_I(inode)->i_mmap_sem); 798 truncate_setsize(inode, attr->ia_size); 799 up_write(&F2FS_I(inode)->i_mmap_sem); 800 801 /* should convert inline inode here */ 802 if (!f2fs_may_inline_data(inode)) { 803 err = f2fs_convert_inline_inode(inode); 804 if (err) 805 return err; 806 } 807 inode->i_mtime = inode->i_ctime = current_time(inode); 808 } 809 810 down_write(&F2FS_I(inode)->i_sem); 811 F2FS_I(inode)->last_disk_size = i_size_read(inode); 812 up_write(&F2FS_I(inode)->i_sem); 813 814 size_changed = true; 815 } 816 817 __setattr_copy(inode, attr); 818 819 if (attr->ia_valid & ATTR_MODE) { 820 err = posix_acl_chmod(inode, f2fs_get_inode_mode(inode)); 821 if (err || is_inode_flag_set(inode, FI_ACL_MODE)) { 822 inode->i_mode = F2FS_I(inode)->i_acl_mode; 823 clear_inode_flag(inode, FI_ACL_MODE); 824 } 825 } 826 827 /* file size may changed here */ 828 f2fs_mark_inode_dirty_sync(inode, size_changed); 829 830 /* inode change will produce dirty node pages flushed by checkpoint */ 831 f2fs_balance_fs(F2FS_I_SB(inode), true); 832 833 return err; 834 } 835 836 const struct inode_operations f2fs_file_inode_operations = { 837 .getattr = f2fs_getattr, 838 .setattr = f2fs_setattr, 839 .get_acl = f2fs_get_acl, 840 .set_acl = f2fs_set_acl, 841 #ifdef CONFIG_F2FS_FS_XATTR 842 .listxattr = f2fs_listxattr, 843 #endif 844 .fiemap = f2fs_fiemap, 845 }; 846 847 static int fill_zero(struct inode *inode, pgoff_t index, 848 loff_t start, loff_t len) 849 { 850 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 851 struct page *page; 852 853 if (!len) 854 return 0; 855 856 f2fs_balance_fs(sbi, true); 857 858 f2fs_lock_op(sbi); 859 page = f2fs_get_new_data_page(inode, NULL, index, false); 860 f2fs_unlock_op(sbi); 861 862 if (IS_ERR(page)) 863 return PTR_ERR(page); 864 865 f2fs_wait_on_page_writeback(page, DATA, true); 866 zero_user(page, start, len); 867 set_page_dirty(page); 868 f2fs_put_page(page, 1); 869 return 0; 870 } 871 872 int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end) 873 { 874 int err; 875 876 while (pg_start < pg_end) { 877 struct dnode_of_data dn; 878 pgoff_t end_offset, count; 879 880 set_new_dnode(&dn, inode, NULL, NULL, 0); 881 err = f2fs_get_dnode_of_data(&dn, pg_start, LOOKUP_NODE); 882 if (err) { 883 if (err == -ENOENT) { 884 pg_start = f2fs_get_next_page_offset(&dn, 885 pg_start); 886 continue; 887 } 888 return err; 889 } 890 891 end_offset = ADDRS_PER_PAGE(dn.node_page, inode); 892 count = min(end_offset - dn.ofs_in_node, pg_end - pg_start); 893 894 f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset); 895 896 f2fs_truncate_data_blocks_range(&dn, count); 897 f2fs_put_dnode(&dn); 898 899 pg_start += count; 900 } 901 return 0; 902 } 903 904 static int punch_hole(struct inode *inode, loff_t offset, loff_t len) 905 { 906 pgoff_t pg_start, pg_end; 907 loff_t off_start, off_end; 908 int ret; 909 910 ret = f2fs_convert_inline_inode(inode); 911 if (ret) 912 return ret; 913 914 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT; 915 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT; 916 917 off_start = offset & (PAGE_SIZE - 1); 918 off_end = (offset + len) & (PAGE_SIZE - 1); 919 920 if (pg_start == pg_end) { 921 ret = fill_zero(inode, pg_start, off_start, 922 off_end - off_start); 923 if (ret) 924 return ret; 925 } else { 926 if (off_start) { 927 ret = fill_zero(inode, pg_start++, off_start, 928 PAGE_SIZE - off_start); 929 if (ret) 930 return ret; 931 } 932 if (off_end) { 933 ret = fill_zero(inode, pg_end, 0, off_end); 934 if (ret) 935 return ret; 936 } 937 938 if (pg_start < pg_end) { 939 struct address_space *mapping = inode->i_mapping; 940 loff_t blk_start, blk_end; 941 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 942 943 f2fs_balance_fs(sbi, true); 944 945 blk_start = (loff_t)pg_start << PAGE_SHIFT; 946 blk_end = (loff_t)pg_end << PAGE_SHIFT; 947 down_write(&F2FS_I(inode)->i_mmap_sem); 948 truncate_inode_pages_range(mapping, blk_start, 949 blk_end - 1); 950 951 f2fs_lock_op(sbi); 952 ret = f2fs_truncate_hole(inode, pg_start, pg_end); 953 f2fs_unlock_op(sbi); 954 up_write(&F2FS_I(inode)->i_mmap_sem); 955 } 956 } 957 958 return ret; 959 } 960 961 static int __read_out_blkaddrs(struct inode *inode, block_t *blkaddr, 962 int *do_replace, pgoff_t off, pgoff_t len) 963 { 964 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 965 struct dnode_of_data dn; 966 int ret, done, i; 967 968 next_dnode: 969 set_new_dnode(&dn, inode, NULL, NULL, 0); 970 ret = f2fs_get_dnode_of_data(&dn, off, LOOKUP_NODE_RA); 971 if (ret && ret != -ENOENT) { 972 return ret; 973 } else if (ret == -ENOENT) { 974 if (dn.max_level == 0) 975 return -ENOENT; 976 done = min((pgoff_t)ADDRS_PER_BLOCK - dn.ofs_in_node, len); 977 blkaddr += done; 978 do_replace += done; 979 goto next; 980 } 981 982 done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, inode) - 983 dn.ofs_in_node, len); 984 for (i = 0; i < done; i++, blkaddr++, do_replace++, dn.ofs_in_node++) { 985 *blkaddr = datablock_addr(dn.inode, 986 dn.node_page, dn.ofs_in_node); 987 if (!f2fs_is_checkpointed_data(sbi, *blkaddr)) { 988 989 if (test_opt(sbi, LFS)) { 990 f2fs_put_dnode(&dn); 991 return -ENOTSUPP; 992 } 993 994 /* do not invalidate this block address */ 995 f2fs_update_data_blkaddr(&dn, NULL_ADDR); 996 *do_replace = 1; 997 } 998 } 999 f2fs_put_dnode(&dn); 1000 next: 1001 len -= done; 1002 off += done; 1003 if (len) 1004 goto next_dnode; 1005 return 0; 1006 } 1007 1008 static int __roll_back_blkaddrs(struct inode *inode, block_t *blkaddr, 1009 int *do_replace, pgoff_t off, int len) 1010 { 1011 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1012 struct dnode_of_data dn; 1013 int ret, i; 1014 1015 for (i = 0; i < len; i++, do_replace++, blkaddr++) { 1016 if (*do_replace == 0) 1017 continue; 1018 1019 set_new_dnode(&dn, inode, NULL, NULL, 0); 1020 ret = f2fs_get_dnode_of_data(&dn, off + i, LOOKUP_NODE_RA); 1021 if (ret) { 1022 dec_valid_block_count(sbi, inode, 1); 1023 f2fs_invalidate_blocks(sbi, *blkaddr); 1024 } else { 1025 f2fs_update_data_blkaddr(&dn, *blkaddr); 1026 } 1027 f2fs_put_dnode(&dn); 1028 } 1029 return 0; 1030 } 1031 1032 static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode, 1033 block_t *blkaddr, int *do_replace, 1034 pgoff_t src, pgoff_t dst, pgoff_t len, bool full) 1035 { 1036 struct f2fs_sb_info *sbi = F2FS_I_SB(src_inode); 1037 pgoff_t i = 0; 1038 int ret; 1039 1040 while (i < len) { 1041 if (blkaddr[i] == NULL_ADDR && !full) { 1042 i++; 1043 continue; 1044 } 1045 1046 if (do_replace[i] || blkaddr[i] == NULL_ADDR) { 1047 struct dnode_of_data dn; 1048 struct node_info ni; 1049 size_t new_size; 1050 pgoff_t ilen; 1051 1052 set_new_dnode(&dn, dst_inode, NULL, NULL, 0); 1053 ret = f2fs_get_dnode_of_data(&dn, dst + i, ALLOC_NODE); 1054 if (ret) 1055 return ret; 1056 1057 f2fs_get_node_info(sbi, dn.nid, &ni); 1058 ilen = min((pgoff_t) 1059 ADDRS_PER_PAGE(dn.node_page, dst_inode) - 1060 dn.ofs_in_node, len - i); 1061 do { 1062 dn.data_blkaddr = datablock_addr(dn.inode, 1063 dn.node_page, dn.ofs_in_node); 1064 f2fs_truncate_data_blocks_range(&dn, 1); 1065 1066 if (do_replace[i]) { 1067 f2fs_i_blocks_write(src_inode, 1068 1, false, false); 1069 f2fs_i_blocks_write(dst_inode, 1070 1, true, false); 1071 f2fs_replace_block(sbi, &dn, dn.data_blkaddr, 1072 blkaddr[i], ni.version, true, false); 1073 1074 do_replace[i] = 0; 1075 } 1076 dn.ofs_in_node++; 1077 i++; 1078 new_size = (dst + i) << PAGE_SHIFT; 1079 if (dst_inode->i_size < new_size) 1080 f2fs_i_size_write(dst_inode, new_size); 1081 } while (--ilen && (do_replace[i] || blkaddr[i] == NULL_ADDR)); 1082 1083 f2fs_put_dnode(&dn); 1084 } else { 1085 struct page *psrc, *pdst; 1086 1087 psrc = f2fs_get_lock_data_page(src_inode, 1088 src + i, true); 1089 if (IS_ERR(psrc)) 1090 return PTR_ERR(psrc); 1091 pdst = f2fs_get_new_data_page(dst_inode, NULL, dst + i, 1092 true); 1093 if (IS_ERR(pdst)) { 1094 f2fs_put_page(psrc, 1); 1095 return PTR_ERR(pdst); 1096 } 1097 f2fs_copy_page(psrc, pdst); 1098 set_page_dirty(pdst); 1099 f2fs_put_page(pdst, 1); 1100 f2fs_put_page(psrc, 1); 1101 1102 ret = f2fs_truncate_hole(src_inode, 1103 src + i, src + i + 1); 1104 if (ret) 1105 return ret; 1106 i++; 1107 } 1108 } 1109 return 0; 1110 } 1111 1112 static int __exchange_data_block(struct inode *src_inode, 1113 struct inode *dst_inode, pgoff_t src, pgoff_t dst, 1114 pgoff_t len, bool full) 1115 { 1116 block_t *src_blkaddr; 1117 int *do_replace; 1118 pgoff_t olen; 1119 int ret; 1120 1121 while (len) { 1122 olen = min((pgoff_t)4 * ADDRS_PER_BLOCK, len); 1123 1124 src_blkaddr = f2fs_kvzalloc(F2FS_I_SB(src_inode), 1125 array_size(olen, sizeof(block_t)), 1126 GFP_KERNEL); 1127 if (!src_blkaddr) 1128 return -ENOMEM; 1129 1130 do_replace = f2fs_kvzalloc(F2FS_I_SB(src_inode), 1131 array_size(olen, sizeof(int)), 1132 GFP_KERNEL); 1133 if (!do_replace) { 1134 kvfree(src_blkaddr); 1135 return -ENOMEM; 1136 } 1137 1138 ret = __read_out_blkaddrs(src_inode, src_blkaddr, 1139 do_replace, src, olen); 1140 if (ret) 1141 goto roll_back; 1142 1143 ret = __clone_blkaddrs(src_inode, dst_inode, src_blkaddr, 1144 do_replace, src, dst, olen, full); 1145 if (ret) 1146 goto roll_back; 1147 1148 src += olen; 1149 dst += olen; 1150 len -= olen; 1151 1152 kvfree(src_blkaddr); 1153 kvfree(do_replace); 1154 } 1155 return 0; 1156 1157 roll_back: 1158 __roll_back_blkaddrs(src_inode, src_blkaddr, do_replace, src, olen); 1159 kvfree(src_blkaddr); 1160 kvfree(do_replace); 1161 return ret; 1162 } 1163 1164 static int f2fs_do_collapse(struct inode *inode, pgoff_t start, pgoff_t end) 1165 { 1166 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1167 pgoff_t nrpages = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE; 1168 int ret; 1169 1170 f2fs_balance_fs(sbi, true); 1171 f2fs_lock_op(sbi); 1172 1173 f2fs_drop_extent_tree(inode); 1174 1175 ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true); 1176 f2fs_unlock_op(sbi); 1177 return ret; 1178 } 1179 1180 static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len) 1181 { 1182 pgoff_t pg_start, pg_end; 1183 loff_t new_size; 1184 int ret; 1185 1186 if (offset + len >= i_size_read(inode)) 1187 return -EINVAL; 1188 1189 /* collapse range should be aligned to block size of f2fs. */ 1190 if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1)) 1191 return -EINVAL; 1192 1193 ret = f2fs_convert_inline_inode(inode); 1194 if (ret) 1195 return ret; 1196 1197 pg_start = offset >> PAGE_SHIFT; 1198 pg_end = (offset + len) >> PAGE_SHIFT; 1199 1200 /* avoid gc operation during block exchange */ 1201 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1202 1203 down_write(&F2FS_I(inode)->i_mmap_sem); 1204 /* write out all dirty pages from offset */ 1205 ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX); 1206 if (ret) 1207 goto out_unlock; 1208 1209 truncate_pagecache(inode, offset); 1210 1211 ret = f2fs_do_collapse(inode, pg_start, pg_end); 1212 if (ret) 1213 goto out_unlock; 1214 1215 /* write out all moved pages, if possible */ 1216 filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX); 1217 truncate_pagecache(inode, offset); 1218 1219 new_size = i_size_read(inode) - len; 1220 truncate_pagecache(inode, new_size); 1221 1222 ret = f2fs_truncate_blocks(inode, new_size, true); 1223 if (!ret) 1224 f2fs_i_size_write(inode, new_size); 1225 out_unlock: 1226 up_write(&F2FS_I(inode)->i_mmap_sem); 1227 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1228 return ret; 1229 } 1230 1231 static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start, 1232 pgoff_t end) 1233 { 1234 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 1235 pgoff_t index = start; 1236 unsigned int ofs_in_node = dn->ofs_in_node; 1237 blkcnt_t count = 0; 1238 int ret; 1239 1240 for (; index < end; index++, dn->ofs_in_node++) { 1241 if (datablock_addr(dn->inode, dn->node_page, 1242 dn->ofs_in_node) == NULL_ADDR) 1243 count++; 1244 } 1245 1246 dn->ofs_in_node = ofs_in_node; 1247 ret = f2fs_reserve_new_blocks(dn, count); 1248 if (ret) 1249 return ret; 1250 1251 dn->ofs_in_node = ofs_in_node; 1252 for (index = start; index < end; index++, dn->ofs_in_node++) { 1253 dn->data_blkaddr = datablock_addr(dn->inode, 1254 dn->node_page, dn->ofs_in_node); 1255 /* 1256 * f2fs_reserve_new_blocks will not guarantee entire block 1257 * allocation. 1258 */ 1259 if (dn->data_blkaddr == NULL_ADDR) { 1260 ret = -ENOSPC; 1261 break; 1262 } 1263 if (dn->data_blkaddr != NEW_ADDR) { 1264 f2fs_invalidate_blocks(sbi, dn->data_blkaddr); 1265 dn->data_blkaddr = NEW_ADDR; 1266 f2fs_set_data_blkaddr(dn); 1267 } 1268 } 1269 1270 f2fs_update_extent_cache_range(dn, start, 0, index - start); 1271 1272 return ret; 1273 } 1274 1275 static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len, 1276 int mode) 1277 { 1278 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1279 struct address_space *mapping = inode->i_mapping; 1280 pgoff_t index, pg_start, pg_end; 1281 loff_t new_size = i_size_read(inode); 1282 loff_t off_start, off_end; 1283 int ret = 0; 1284 1285 ret = inode_newsize_ok(inode, (len + offset)); 1286 if (ret) 1287 return ret; 1288 1289 ret = f2fs_convert_inline_inode(inode); 1290 if (ret) 1291 return ret; 1292 1293 down_write(&F2FS_I(inode)->i_mmap_sem); 1294 ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1); 1295 if (ret) 1296 goto out_sem; 1297 1298 truncate_pagecache_range(inode, offset, offset + len - 1); 1299 1300 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT; 1301 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT; 1302 1303 off_start = offset & (PAGE_SIZE - 1); 1304 off_end = (offset + len) & (PAGE_SIZE - 1); 1305 1306 if (pg_start == pg_end) { 1307 ret = fill_zero(inode, pg_start, off_start, 1308 off_end - off_start); 1309 if (ret) 1310 goto out_sem; 1311 1312 new_size = max_t(loff_t, new_size, offset + len); 1313 } else { 1314 if (off_start) { 1315 ret = fill_zero(inode, pg_start++, off_start, 1316 PAGE_SIZE - off_start); 1317 if (ret) 1318 goto out_sem; 1319 1320 new_size = max_t(loff_t, new_size, 1321 (loff_t)pg_start << PAGE_SHIFT); 1322 } 1323 1324 for (index = pg_start; index < pg_end;) { 1325 struct dnode_of_data dn; 1326 unsigned int end_offset; 1327 pgoff_t end; 1328 1329 f2fs_lock_op(sbi); 1330 1331 set_new_dnode(&dn, inode, NULL, NULL, 0); 1332 ret = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE); 1333 if (ret) { 1334 f2fs_unlock_op(sbi); 1335 goto out; 1336 } 1337 1338 end_offset = ADDRS_PER_PAGE(dn.node_page, inode); 1339 end = min(pg_end, end_offset - dn.ofs_in_node + index); 1340 1341 ret = f2fs_do_zero_range(&dn, index, end); 1342 f2fs_put_dnode(&dn); 1343 f2fs_unlock_op(sbi); 1344 1345 f2fs_balance_fs(sbi, dn.node_changed); 1346 1347 if (ret) 1348 goto out; 1349 1350 index = end; 1351 new_size = max_t(loff_t, new_size, 1352 (loff_t)index << PAGE_SHIFT); 1353 } 1354 1355 if (off_end) { 1356 ret = fill_zero(inode, pg_end, 0, off_end); 1357 if (ret) 1358 goto out; 1359 1360 new_size = max_t(loff_t, new_size, offset + len); 1361 } 1362 } 1363 1364 out: 1365 if (new_size > i_size_read(inode)) { 1366 if (mode & FALLOC_FL_KEEP_SIZE) 1367 file_set_keep_isize(inode); 1368 else 1369 f2fs_i_size_write(inode, new_size); 1370 } 1371 out_sem: 1372 up_write(&F2FS_I(inode)->i_mmap_sem); 1373 1374 return ret; 1375 } 1376 1377 static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len) 1378 { 1379 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1380 pgoff_t nr, pg_start, pg_end, delta, idx; 1381 loff_t new_size; 1382 int ret = 0; 1383 1384 new_size = i_size_read(inode) + len; 1385 ret = inode_newsize_ok(inode, new_size); 1386 if (ret) 1387 return ret; 1388 1389 if (offset >= i_size_read(inode)) 1390 return -EINVAL; 1391 1392 /* insert range should be aligned to block size of f2fs. */ 1393 if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1)) 1394 return -EINVAL; 1395 1396 ret = f2fs_convert_inline_inode(inode); 1397 if (ret) 1398 return ret; 1399 1400 f2fs_balance_fs(sbi, true); 1401 1402 /* avoid gc operation during block exchange */ 1403 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1404 1405 down_write(&F2FS_I(inode)->i_mmap_sem); 1406 ret = f2fs_truncate_blocks(inode, i_size_read(inode), true); 1407 if (ret) 1408 goto out; 1409 1410 /* write out all dirty pages from offset */ 1411 ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX); 1412 if (ret) 1413 goto out; 1414 1415 truncate_pagecache(inode, offset); 1416 1417 pg_start = offset >> PAGE_SHIFT; 1418 pg_end = (offset + len) >> PAGE_SHIFT; 1419 delta = pg_end - pg_start; 1420 idx = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE; 1421 1422 while (!ret && idx > pg_start) { 1423 nr = idx - pg_start; 1424 if (nr > delta) 1425 nr = delta; 1426 idx -= nr; 1427 1428 f2fs_lock_op(sbi); 1429 f2fs_drop_extent_tree(inode); 1430 1431 ret = __exchange_data_block(inode, inode, idx, 1432 idx + delta, nr, false); 1433 f2fs_unlock_op(sbi); 1434 } 1435 1436 /* write out all moved pages, if possible */ 1437 filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX); 1438 truncate_pagecache(inode, offset); 1439 1440 if (!ret) 1441 f2fs_i_size_write(inode, new_size); 1442 out: 1443 up_write(&F2FS_I(inode)->i_mmap_sem); 1444 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1445 return ret; 1446 } 1447 1448 static int expand_inode_data(struct inode *inode, loff_t offset, 1449 loff_t len, int mode) 1450 { 1451 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1452 struct f2fs_map_blocks map = { .m_next_pgofs = NULL, 1453 .m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE }; 1454 pgoff_t pg_end; 1455 loff_t new_size = i_size_read(inode); 1456 loff_t off_end; 1457 int err; 1458 1459 err = inode_newsize_ok(inode, (len + offset)); 1460 if (err) 1461 return err; 1462 1463 err = f2fs_convert_inline_inode(inode); 1464 if (err) 1465 return err; 1466 1467 f2fs_balance_fs(sbi, true); 1468 1469 pg_end = ((unsigned long long)offset + len) >> PAGE_SHIFT; 1470 off_end = (offset + len) & (PAGE_SIZE - 1); 1471 1472 map.m_lblk = ((unsigned long long)offset) >> PAGE_SHIFT; 1473 map.m_len = pg_end - map.m_lblk; 1474 if (off_end) 1475 map.m_len++; 1476 1477 err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO); 1478 if (err) { 1479 pgoff_t last_off; 1480 1481 if (!map.m_len) 1482 return err; 1483 1484 last_off = map.m_lblk + map.m_len - 1; 1485 1486 /* update new size to the failed position */ 1487 new_size = (last_off == pg_end) ? offset + len : 1488 (loff_t)(last_off + 1) << PAGE_SHIFT; 1489 } else { 1490 new_size = ((loff_t)pg_end << PAGE_SHIFT) + off_end; 1491 } 1492 1493 if (new_size > i_size_read(inode)) { 1494 if (mode & FALLOC_FL_KEEP_SIZE) 1495 file_set_keep_isize(inode); 1496 else 1497 f2fs_i_size_write(inode, new_size); 1498 } 1499 1500 return err; 1501 } 1502 1503 static long f2fs_fallocate(struct file *file, int mode, 1504 loff_t offset, loff_t len) 1505 { 1506 struct inode *inode = file_inode(file); 1507 long ret = 0; 1508 1509 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) 1510 return -EIO; 1511 1512 /* f2fs only support ->fallocate for regular file */ 1513 if (!S_ISREG(inode->i_mode)) 1514 return -EINVAL; 1515 1516 if (f2fs_encrypted_inode(inode) && 1517 (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE))) 1518 return -EOPNOTSUPP; 1519 1520 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | 1521 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE | 1522 FALLOC_FL_INSERT_RANGE)) 1523 return -EOPNOTSUPP; 1524 1525 inode_lock(inode); 1526 1527 if (mode & FALLOC_FL_PUNCH_HOLE) { 1528 if (offset >= inode->i_size) 1529 goto out; 1530 1531 ret = punch_hole(inode, offset, len); 1532 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) { 1533 ret = f2fs_collapse_range(inode, offset, len); 1534 } else if (mode & FALLOC_FL_ZERO_RANGE) { 1535 ret = f2fs_zero_range(inode, offset, len, mode); 1536 } else if (mode & FALLOC_FL_INSERT_RANGE) { 1537 ret = f2fs_insert_range(inode, offset, len); 1538 } else { 1539 ret = expand_inode_data(inode, offset, len, mode); 1540 } 1541 1542 if (!ret) { 1543 inode->i_mtime = inode->i_ctime = current_time(inode); 1544 f2fs_mark_inode_dirty_sync(inode, false); 1545 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); 1546 } 1547 1548 out: 1549 inode_unlock(inode); 1550 1551 trace_f2fs_fallocate(inode, mode, offset, len, ret); 1552 return ret; 1553 } 1554 1555 static int f2fs_release_file(struct inode *inode, struct file *filp) 1556 { 1557 /* 1558 * f2fs_relase_file is called at every close calls. So we should 1559 * not drop any inmemory pages by close called by other process. 1560 */ 1561 if (!(filp->f_mode & FMODE_WRITE) || 1562 atomic_read(&inode->i_writecount) != 1) 1563 return 0; 1564 1565 /* some remained atomic pages should discarded */ 1566 if (f2fs_is_atomic_file(inode)) 1567 f2fs_drop_inmem_pages(inode); 1568 if (f2fs_is_volatile_file(inode)) { 1569 set_inode_flag(inode, FI_DROP_CACHE); 1570 filemap_fdatawrite(inode->i_mapping); 1571 clear_inode_flag(inode, FI_DROP_CACHE); 1572 clear_inode_flag(inode, FI_VOLATILE_FILE); 1573 stat_dec_volatile_write(inode); 1574 } 1575 return 0; 1576 } 1577 1578 static int f2fs_file_flush(struct file *file, fl_owner_t id) 1579 { 1580 struct inode *inode = file_inode(file); 1581 1582 /* 1583 * If the process doing a transaction is crashed, we should do 1584 * roll-back. Otherwise, other reader/write can see corrupted database 1585 * until all the writers close its file. Since this should be done 1586 * before dropping file lock, it needs to do in ->flush. 1587 */ 1588 if (f2fs_is_atomic_file(inode) && 1589 F2FS_I(inode)->inmem_task == current) 1590 f2fs_drop_inmem_pages(inode); 1591 return 0; 1592 } 1593 1594 static int f2fs_ioc_getflags(struct file *filp, unsigned long arg) 1595 { 1596 struct inode *inode = file_inode(filp); 1597 struct f2fs_inode_info *fi = F2FS_I(inode); 1598 unsigned int flags = fi->i_flags; 1599 1600 if (file_is_encrypt(inode)) 1601 flags |= F2FS_ENCRYPT_FL; 1602 if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) 1603 flags |= F2FS_INLINE_DATA_FL; 1604 1605 flags &= F2FS_FL_USER_VISIBLE; 1606 1607 return put_user(flags, (int __user *)arg); 1608 } 1609 1610 static int __f2fs_ioc_setflags(struct inode *inode, unsigned int flags) 1611 { 1612 struct f2fs_inode_info *fi = F2FS_I(inode); 1613 unsigned int oldflags; 1614 1615 /* Is it quota file? Do not allow user to mess with it */ 1616 if (IS_NOQUOTA(inode)) 1617 return -EPERM; 1618 1619 flags = f2fs_mask_flags(inode->i_mode, flags); 1620 1621 oldflags = fi->i_flags; 1622 1623 if ((flags ^ oldflags) & (F2FS_APPEND_FL | F2FS_IMMUTABLE_FL)) 1624 if (!capable(CAP_LINUX_IMMUTABLE)) 1625 return -EPERM; 1626 1627 flags = flags & F2FS_FL_USER_MODIFIABLE; 1628 flags |= oldflags & ~F2FS_FL_USER_MODIFIABLE; 1629 fi->i_flags = flags; 1630 1631 if (fi->i_flags & F2FS_PROJINHERIT_FL) 1632 set_inode_flag(inode, FI_PROJ_INHERIT); 1633 else 1634 clear_inode_flag(inode, FI_PROJ_INHERIT); 1635 1636 inode->i_ctime = current_time(inode); 1637 f2fs_set_inode_flags(inode); 1638 f2fs_mark_inode_dirty_sync(inode, false); 1639 return 0; 1640 } 1641 1642 static int f2fs_ioc_setflags(struct file *filp, unsigned long arg) 1643 { 1644 struct inode *inode = file_inode(filp); 1645 unsigned int flags; 1646 int ret; 1647 1648 if (!inode_owner_or_capable(inode)) 1649 return -EACCES; 1650 1651 if (get_user(flags, (int __user *)arg)) 1652 return -EFAULT; 1653 1654 ret = mnt_want_write_file(filp); 1655 if (ret) 1656 return ret; 1657 1658 inode_lock(inode); 1659 1660 ret = __f2fs_ioc_setflags(inode, flags); 1661 1662 inode_unlock(inode); 1663 mnt_drop_write_file(filp); 1664 return ret; 1665 } 1666 1667 static int f2fs_ioc_getversion(struct file *filp, unsigned long arg) 1668 { 1669 struct inode *inode = file_inode(filp); 1670 1671 return put_user(inode->i_generation, (int __user *)arg); 1672 } 1673 1674 static int f2fs_ioc_start_atomic_write(struct file *filp) 1675 { 1676 struct inode *inode = file_inode(filp); 1677 int ret; 1678 1679 if (!inode_owner_or_capable(inode)) 1680 return -EACCES; 1681 1682 if (!S_ISREG(inode->i_mode)) 1683 return -EINVAL; 1684 1685 ret = mnt_want_write_file(filp); 1686 if (ret) 1687 return ret; 1688 1689 inode_lock(inode); 1690 1691 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1692 1693 if (f2fs_is_atomic_file(inode)) 1694 goto out; 1695 1696 ret = f2fs_convert_inline_inode(inode); 1697 if (ret) 1698 goto out; 1699 1700 if (!get_dirty_pages(inode)) 1701 goto skip_flush; 1702 1703 f2fs_msg(F2FS_I_SB(inode)->sb, KERN_WARNING, 1704 "Unexpected flush for atomic writes: ino=%lu, npages=%u", 1705 inode->i_ino, get_dirty_pages(inode)); 1706 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX); 1707 if (ret) 1708 goto out; 1709 skip_flush: 1710 set_inode_flag(inode, FI_ATOMIC_FILE); 1711 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST); 1712 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); 1713 1714 F2FS_I(inode)->inmem_task = current; 1715 stat_inc_atomic_write(inode); 1716 stat_update_max_atomic_write(inode); 1717 out: 1718 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1719 inode_unlock(inode); 1720 mnt_drop_write_file(filp); 1721 return ret; 1722 } 1723 1724 static int f2fs_ioc_commit_atomic_write(struct file *filp) 1725 { 1726 struct inode *inode = file_inode(filp); 1727 int ret; 1728 1729 if (!inode_owner_or_capable(inode)) 1730 return -EACCES; 1731 1732 ret = mnt_want_write_file(filp); 1733 if (ret) 1734 return ret; 1735 1736 inode_lock(inode); 1737 1738 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1739 1740 if (f2fs_is_volatile_file(inode)) { 1741 ret = -EINVAL; 1742 goto err_out; 1743 } 1744 1745 if (f2fs_is_atomic_file(inode)) { 1746 ret = f2fs_commit_inmem_pages(inode); 1747 if (ret) 1748 goto err_out; 1749 1750 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true); 1751 if (!ret) { 1752 clear_inode_flag(inode, FI_ATOMIC_FILE); 1753 F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC] = 0; 1754 stat_dec_atomic_write(inode); 1755 } 1756 } else { 1757 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 1, false); 1758 } 1759 err_out: 1760 if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) { 1761 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST); 1762 ret = -EINVAL; 1763 } 1764 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1765 inode_unlock(inode); 1766 mnt_drop_write_file(filp); 1767 return ret; 1768 } 1769 1770 static int f2fs_ioc_start_volatile_write(struct file *filp) 1771 { 1772 struct inode *inode = file_inode(filp); 1773 int ret; 1774 1775 if (!inode_owner_or_capable(inode)) 1776 return -EACCES; 1777 1778 if (!S_ISREG(inode->i_mode)) 1779 return -EINVAL; 1780 1781 ret = mnt_want_write_file(filp); 1782 if (ret) 1783 return ret; 1784 1785 inode_lock(inode); 1786 1787 if (f2fs_is_volatile_file(inode)) 1788 goto out; 1789 1790 ret = f2fs_convert_inline_inode(inode); 1791 if (ret) 1792 goto out; 1793 1794 stat_inc_volatile_write(inode); 1795 stat_update_max_volatile_write(inode); 1796 1797 set_inode_flag(inode, FI_VOLATILE_FILE); 1798 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); 1799 out: 1800 inode_unlock(inode); 1801 mnt_drop_write_file(filp); 1802 return ret; 1803 } 1804 1805 static int f2fs_ioc_release_volatile_write(struct file *filp) 1806 { 1807 struct inode *inode = file_inode(filp); 1808 int ret; 1809 1810 if (!inode_owner_or_capable(inode)) 1811 return -EACCES; 1812 1813 ret = mnt_want_write_file(filp); 1814 if (ret) 1815 return ret; 1816 1817 inode_lock(inode); 1818 1819 if (!f2fs_is_volatile_file(inode)) 1820 goto out; 1821 1822 if (!f2fs_is_first_block_written(inode)) { 1823 ret = truncate_partial_data_page(inode, 0, true); 1824 goto out; 1825 } 1826 1827 ret = punch_hole(inode, 0, F2FS_BLKSIZE); 1828 out: 1829 inode_unlock(inode); 1830 mnt_drop_write_file(filp); 1831 return ret; 1832 } 1833 1834 static int f2fs_ioc_abort_volatile_write(struct file *filp) 1835 { 1836 struct inode *inode = file_inode(filp); 1837 int ret; 1838 1839 if (!inode_owner_or_capable(inode)) 1840 return -EACCES; 1841 1842 ret = mnt_want_write_file(filp); 1843 if (ret) 1844 return ret; 1845 1846 inode_lock(inode); 1847 1848 if (f2fs_is_atomic_file(inode)) 1849 f2fs_drop_inmem_pages(inode); 1850 if (f2fs_is_volatile_file(inode)) { 1851 clear_inode_flag(inode, FI_VOLATILE_FILE); 1852 stat_dec_volatile_write(inode); 1853 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true); 1854 } 1855 1856 inode_unlock(inode); 1857 1858 mnt_drop_write_file(filp); 1859 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); 1860 return ret; 1861 } 1862 1863 static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg) 1864 { 1865 struct inode *inode = file_inode(filp); 1866 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1867 struct super_block *sb = sbi->sb; 1868 __u32 in; 1869 int ret; 1870 1871 if (!capable(CAP_SYS_ADMIN)) 1872 return -EPERM; 1873 1874 if (get_user(in, (__u32 __user *)arg)) 1875 return -EFAULT; 1876 1877 if (in != F2FS_GOING_DOWN_FULLSYNC) { 1878 ret = mnt_want_write_file(filp); 1879 if (ret) 1880 return ret; 1881 } 1882 1883 switch (in) { 1884 case F2FS_GOING_DOWN_FULLSYNC: 1885 sb = freeze_bdev(sb->s_bdev); 1886 if (IS_ERR(sb)) { 1887 ret = PTR_ERR(sb); 1888 goto out; 1889 } 1890 if (sb) { 1891 f2fs_stop_checkpoint(sbi, false); 1892 thaw_bdev(sb->s_bdev, sb); 1893 } 1894 break; 1895 case F2FS_GOING_DOWN_METASYNC: 1896 /* do checkpoint only */ 1897 ret = f2fs_sync_fs(sb, 1); 1898 if (ret) 1899 goto out; 1900 f2fs_stop_checkpoint(sbi, false); 1901 break; 1902 case F2FS_GOING_DOWN_NOSYNC: 1903 f2fs_stop_checkpoint(sbi, false); 1904 break; 1905 case F2FS_GOING_DOWN_METAFLUSH: 1906 f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO); 1907 f2fs_stop_checkpoint(sbi, false); 1908 break; 1909 default: 1910 ret = -EINVAL; 1911 goto out; 1912 } 1913 1914 f2fs_stop_gc_thread(sbi); 1915 f2fs_stop_discard_thread(sbi); 1916 1917 f2fs_drop_discard_cmd(sbi); 1918 clear_opt(sbi, DISCARD); 1919 1920 f2fs_update_time(sbi, REQ_TIME); 1921 out: 1922 if (in != F2FS_GOING_DOWN_FULLSYNC) 1923 mnt_drop_write_file(filp); 1924 return ret; 1925 } 1926 1927 static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg) 1928 { 1929 struct inode *inode = file_inode(filp); 1930 struct super_block *sb = inode->i_sb; 1931 struct request_queue *q = bdev_get_queue(sb->s_bdev); 1932 struct fstrim_range range; 1933 int ret; 1934 1935 if (!capable(CAP_SYS_ADMIN)) 1936 return -EPERM; 1937 1938 if (!blk_queue_discard(q)) 1939 return -EOPNOTSUPP; 1940 1941 if (copy_from_user(&range, (struct fstrim_range __user *)arg, 1942 sizeof(range))) 1943 return -EFAULT; 1944 1945 ret = mnt_want_write_file(filp); 1946 if (ret) 1947 return ret; 1948 1949 range.minlen = max((unsigned int)range.minlen, 1950 q->limits.discard_granularity); 1951 ret = f2fs_trim_fs(F2FS_SB(sb), &range); 1952 mnt_drop_write_file(filp); 1953 if (ret < 0) 1954 return ret; 1955 1956 if (copy_to_user((struct fstrim_range __user *)arg, &range, 1957 sizeof(range))) 1958 return -EFAULT; 1959 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); 1960 return 0; 1961 } 1962 1963 static bool uuid_is_nonzero(__u8 u[16]) 1964 { 1965 int i; 1966 1967 for (i = 0; i < 16; i++) 1968 if (u[i]) 1969 return true; 1970 return false; 1971 } 1972 1973 static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg) 1974 { 1975 struct inode *inode = file_inode(filp); 1976 1977 if (!f2fs_sb_has_encrypt(inode->i_sb)) 1978 return -EOPNOTSUPP; 1979 1980 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); 1981 1982 return fscrypt_ioctl_set_policy(filp, (const void __user *)arg); 1983 } 1984 1985 static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg) 1986 { 1987 if (!f2fs_sb_has_encrypt(file_inode(filp)->i_sb)) 1988 return -EOPNOTSUPP; 1989 return fscrypt_ioctl_get_policy(filp, (void __user *)arg); 1990 } 1991 1992 static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg) 1993 { 1994 struct inode *inode = file_inode(filp); 1995 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1996 int err; 1997 1998 if (!f2fs_sb_has_encrypt(inode->i_sb)) 1999 return -EOPNOTSUPP; 2000 2001 err = mnt_want_write_file(filp); 2002 if (err) 2003 return err; 2004 2005 down_write(&sbi->sb_lock); 2006 2007 if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt)) 2008 goto got_it; 2009 2010 /* update superblock with uuid */ 2011 generate_random_uuid(sbi->raw_super->encrypt_pw_salt); 2012 2013 err = f2fs_commit_super(sbi, false); 2014 if (err) { 2015 /* undo new data */ 2016 memset(sbi->raw_super->encrypt_pw_salt, 0, 16); 2017 goto out_err; 2018 } 2019 got_it: 2020 if (copy_to_user((__u8 __user *)arg, sbi->raw_super->encrypt_pw_salt, 2021 16)) 2022 err = -EFAULT; 2023 out_err: 2024 up_write(&sbi->sb_lock); 2025 mnt_drop_write_file(filp); 2026 return err; 2027 } 2028 2029 static int f2fs_ioc_gc(struct file *filp, unsigned long arg) 2030 { 2031 struct inode *inode = file_inode(filp); 2032 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2033 __u32 sync; 2034 int ret; 2035 2036 if (!capable(CAP_SYS_ADMIN)) 2037 return -EPERM; 2038 2039 if (get_user(sync, (__u32 __user *)arg)) 2040 return -EFAULT; 2041 2042 if (f2fs_readonly(sbi->sb)) 2043 return -EROFS; 2044 2045 ret = mnt_want_write_file(filp); 2046 if (ret) 2047 return ret; 2048 2049 if (!sync) { 2050 if (!mutex_trylock(&sbi->gc_mutex)) { 2051 ret = -EBUSY; 2052 goto out; 2053 } 2054 } else { 2055 mutex_lock(&sbi->gc_mutex); 2056 } 2057 2058 ret = f2fs_gc(sbi, sync, true, NULL_SEGNO); 2059 out: 2060 mnt_drop_write_file(filp); 2061 return ret; 2062 } 2063 2064 static int f2fs_ioc_gc_range(struct file *filp, unsigned long arg) 2065 { 2066 struct inode *inode = file_inode(filp); 2067 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2068 struct f2fs_gc_range range; 2069 u64 end; 2070 int ret; 2071 2072 if (!capable(CAP_SYS_ADMIN)) 2073 return -EPERM; 2074 2075 if (copy_from_user(&range, (struct f2fs_gc_range __user *)arg, 2076 sizeof(range))) 2077 return -EFAULT; 2078 2079 if (f2fs_readonly(sbi->sb)) 2080 return -EROFS; 2081 2082 end = range.start + range.len; 2083 if (range.start < MAIN_BLKADDR(sbi) || end >= MAX_BLKADDR(sbi)) { 2084 return -EINVAL; 2085 } 2086 2087 ret = mnt_want_write_file(filp); 2088 if (ret) 2089 return ret; 2090 2091 do_more: 2092 if (!range.sync) { 2093 if (!mutex_trylock(&sbi->gc_mutex)) { 2094 ret = -EBUSY; 2095 goto out; 2096 } 2097 } else { 2098 mutex_lock(&sbi->gc_mutex); 2099 } 2100 2101 ret = f2fs_gc(sbi, range.sync, true, GET_SEGNO(sbi, range.start)); 2102 range.start += sbi->blocks_per_seg; 2103 if (range.start <= end) 2104 goto do_more; 2105 out: 2106 mnt_drop_write_file(filp); 2107 return ret; 2108 } 2109 2110 static int f2fs_ioc_f2fs_write_checkpoint(struct file *filp, unsigned long arg) 2111 { 2112 struct inode *inode = file_inode(filp); 2113 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2114 int ret; 2115 2116 if (!capable(CAP_SYS_ADMIN)) 2117 return -EPERM; 2118 2119 if (f2fs_readonly(sbi->sb)) 2120 return -EROFS; 2121 2122 ret = mnt_want_write_file(filp); 2123 if (ret) 2124 return ret; 2125 2126 ret = f2fs_sync_fs(sbi->sb, 1); 2127 2128 mnt_drop_write_file(filp); 2129 return ret; 2130 } 2131 2132 static int f2fs_defragment_range(struct f2fs_sb_info *sbi, 2133 struct file *filp, 2134 struct f2fs_defragment *range) 2135 { 2136 struct inode *inode = file_inode(filp); 2137 struct f2fs_map_blocks map = { .m_next_extent = NULL, 2138 .m_seg_type = NO_CHECK_TYPE }; 2139 struct extent_info ei = {0, 0, 0}; 2140 pgoff_t pg_start, pg_end, next_pgofs; 2141 unsigned int blk_per_seg = sbi->blocks_per_seg; 2142 unsigned int total = 0, sec_num; 2143 block_t blk_end = 0; 2144 bool fragmented = false; 2145 int err; 2146 2147 /* if in-place-update policy is enabled, don't waste time here */ 2148 if (f2fs_should_update_inplace(inode, NULL)) 2149 return -EINVAL; 2150 2151 pg_start = range->start >> PAGE_SHIFT; 2152 pg_end = (range->start + range->len) >> PAGE_SHIFT; 2153 2154 f2fs_balance_fs(sbi, true); 2155 2156 inode_lock(inode); 2157 2158 /* writeback all dirty pages in the range */ 2159 err = filemap_write_and_wait_range(inode->i_mapping, range->start, 2160 range->start + range->len - 1); 2161 if (err) 2162 goto out; 2163 2164 /* 2165 * lookup mapping info in extent cache, skip defragmenting if physical 2166 * block addresses are continuous. 2167 */ 2168 if (f2fs_lookup_extent_cache(inode, pg_start, &ei)) { 2169 if (ei.fofs + ei.len >= pg_end) 2170 goto out; 2171 } 2172 2173 map.m_lblk = pg_start; 2174 map.m_next_pgofs = &next_pgofs; 2175 2176 /* 2177 * lookup mapping info in dnode page cache, skip defragmenting if all 2178 * physical block addresses are continuous even if there are hole(s) 2179 * in logical blocks. 2180 */ 2181 while (map.m_lblk < pg_end) { 2182 map.m_len = pg_end - map.m_lblk; 2183 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT); 2184 if (err) 2185 goto out; 2186 2187 if (!(map.m_flags & F2FS_MAP_FLAGS)) { 2188 map.m_lblk = next_pgofs; 2189 continue; 2190 } 2191 2192 if (blk_end && blk_end != map.m_pblk) 2193 fragmented = true; 2194 2195 /* record total count of block that we're going to move */ 2196 total += map.m_len; 2197 2198 blk_end = map.m_pblk + map.m_len; 2199 2200 map.m_lblk += map.m_len; 2201 } 2202 2203 if (!fragmented) 2204 goto out; 2205 2206 sec_num = (total + BLKS_PER_SEC(sbi) - 1) / BLKS_PER_SEC(sbi); 2207 2208 /* 2209 * make sure there are enough free section for LFS allocation, this can 2210 * avoid defragment running in SSR mode when free section are allocated 2211 * intensively 2212 */ 2213 if (has_not_enough_free_secs(sbi, 0, sec_num)) { 2214 err = -EAGAIN; 2215 goto out; 2216 } 2217 2218 map.m_lblk = pg_start; 2219 map.m_len = pg_end - pg_start; 2220 total = 0; 2221 2222 while (map.m_lblk < pg_end) { 2223 pgoff_t idx; 2224 int cnt = 0; 2225 2226 do_map: 2227 map.m_len = pg_end - map.m_lblk; 2228 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT); 2229 if (err) 2230 goto clear_out; 2231 2232 if (!(map.m_flags & F2FS_MAP_FLAGS)) { 2233 map.m_lblk = next_pgofs; 2234 continue; 2235 } 2236 2237 set_inode_flag(inode, FI_DO_DEFRAG); 2238 2239 idx = map.m_lblk; 2240 while (idx < map.m_lblk + map.m_len && cnt < blk_per_seg) { 2241 struct page *page; 2242 2243 page = f2fs_get_lock_data_page(inode, idx, true); 2244 if (IS_ERR(page)) { 2245 err = PTR_ERR(page); 2246 goto clear_out; 2247 } 2248 2249 set_page_dirty(page); 2250 f2fs_put_page(page, 1); 2251 2252 idx++; 2253 cnt++; 2254 total++; 2255 } 2256 2257 map.m_lblk = idx; 2258 2259 if (idx < pg_end && cnt < blk_per_seg) 2260 goto do_map; 2261 2262 clear_inode_flag(inode, FI_DO_DEFRAG); 2263 2264 err = filemap_fdatawrite(inode->i_mapping); 2265 if (err) 2266 goto out; 2267 } 2268 clear_out: 2269 clear_inode_flag(inode, FI_DO_DEFRAG); 2270 out: 2271 inode_unlock(inode); 2272 if (!err) 2273 range->len = (u64)total << PAGE_SHIFT; 2274 return err; 2275 } 2276 2277 static int f2fs_ioc_defragment(struct file *filp, unsigned long arg) 2278 { 2279 struct inode *inode = file_inode(filp); 2280 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2281 struct f2fs_defragment range; 2282 int err; 2283 2284 if (!capable(CAP_SYS_ADMIN)) 2285 return -EPERM; 2286 2287 if (!S_ISREG(inode->i_mode) || f2fs_is_atomic_file(inode)) 2288 return -EINVAL; 2289 2290 if (f2fs_readonly(sbi->sb)) 2291 return -EROFS; 2292 2293 if (copy_from_user(&range, (struct f2fs_defragment __user *)arg, 2294 sizeof(range))) 2295 return -EFAULT; 2296 2297 /* verify alignment of offset & size */ 2298 if (range.start & (F2FS_BLKSIZE - 1) || range.len & (F2FS_BLKSIZE - 1)) 2299 return -EINVAL; 2300 2301 if (unlikely((range.start + range.len) >> PAGE_SHIFT > 2302 sbi->max_file_blocks)) 2303 return -EINVAL; 2304 2305 err = mnt_want_write_file(filp); 2306 if (err) 2307 return err; 2308 2309 err = f2fs_defragment_range(sbi, filp, &range); 2310 mnt_drop_write_file(filp); 2311 2312 f2fs_update_time(sbi, REQ_TIME); 2313 if (err < 0) 2314 return err; 2315 2316 if (copy_to_user((struct f2fs_defragment __user *)arg, &range, 2317 sizeof(range))) 2318 return -EFAULT; 2319 2320 return 0; 2321 } 2322 2323 static int f2fs_move_file_range(struct file *file_in, loff_t pos_in, 2324 struct file *file_out, loff_t pos_out, size_t len) 2325 { 2326 struct inode *src = file_inode(file_in); 2327 struct inode *dst = file_inode(file_out); 2328 struct f2fs_sb_info *sbi = F2FS_I_SB(src); 2329 size_t olen = len, dst_max_i_size = 0; 2330 size_t dst_osize; 2331 int ret; 2332 2333 if (file_in->f_path.mnt != file_out->f_path.mnt || 2334 src->i_sb != dst->i_sb) 2335 return -EXDEV; 2336 2337 if (unlikely(f2fs_readonly(src->i_sb))) 2338 return -EROFS; 2339 2340 if (!S_ISREG(src->i_mode) || !S_ISREG(dst->i_mode)) 2341 return -EINVAL; 2342 2343 if (f2fs_encrypted_inode(src) || f2fs_encrypted_inode(dst)) 2344 return -EOPNOTSUPP; 2345 2346 if (src == dst) { 2347 if (pos_in == pos_out) 2348 return 0; 2349 if (pos_out > pos_in && pos_out < pos_in + len) 2350 return -EINVAL; 2351 } 2352 2353 inode_lock(src); 2354 down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]); 2355 if (src != dst) { 2356 ret = -EBUSY; 2357 if (!inode_trylock(dst)) 2358 goto out; 2359 if (!down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE])) { 2360 inode_unlock(dst); 2361 goto out; 2362 } 2363 } 2364 2365 ret = -EINVAL; 2366 if (pos_in + len > src->i_size || pos_in + len < pos_in) 2367 goto out_unlock; 2368 if (len == 0) 2369 olen = len = src->i_size - pos_in; 2370 if (pos_in + len == src->i_size) 2371 len = ALIGN(src->i_size, F2FS_BLKSIZE) - pos_in; 2372 if (len == 0) { 2373 ret = 0; 2374 goto out_unlock; 2375 } 2376 2377 dst_osize = dst->i_size; 2378 if (pos_out + olen > dst->i_size) 2379 dst_max_i_size = pos_out + olen; 2380 2381 /* verify the end result is block aligned */ 2382 if (!IS_ALIGNED(pos_in, F2FS_BLKSIZE) || 2383 !IS_ALIGNED(pos_in + len, F2FS_BLKSIZE) || 2384 !IS_ALIGNED(pos_out, F2FS_BLKSIZE)) 2385 goto out_unlock; 2386 2387 ret = f2fs_convert_inline_inode(src); 2388 if (ret) 2389 goto out_unlock; 2390 2391 ret = f2fs_convert_inline_inode(dst); 2392 if (ret) 2393 goto out_unlock; 2394 2395 /* write out all dirty pages from offset */ 2396 ret = filemap_write_and_wait_range(src->i_mapping, 2397 pos_in, pos_in + len); 2398 if (ret) 2399 goto out_unlock; 2400 2401 ret = filemap_write_and_wait_range(dst->i_mapping, 2402 pos_out, pos_out + len); 2403 if (ret) 2404 goto out_unlock; 2405 2406 f2fs_balance_fs(sbi, true); 2407 f2fs_lock_op(sbi); 2408 ret = __exchange_data_block(src, dst, pos_in >> F2FS_BLKSIZE_BITS, 2409 pos_out >> F2FS_BLKSIZE_BITS, 2410 len >> F2FS_BLKSIZE_BITS, false); 2411 2412 if (!ret) { 2413 if (dst_max_i_size) 2414 f2fs_i_size_write(dst, dst_max_i_size); 2415 else if (dst_osize != dst->i_size) 2416 f2fs_i_size_write(dst, dst_osize); 2417 } 2418 f2fs_unlock_op(sbi); 2419 out_unlock: 2420 if (src != dst) { 2421 up_write(&F2FS_I(dst)->i_gc_rwsem[WRITE]); 2422 inode_unlock(dst); 2423 } 2424 out: 2425 up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]); 2426 inode_unlock(src); 2427 return ret; 2428 } 2429 2430 static int f2fs_ioc_move_range(struct file *filp, unsigned long arg) 2431 { 2432 struct f2fs_move_range range; 2433 struct fd dst; 2434 int err; 2435 2436 if (!(filp->f_mode & FMODE_READ) || 2437 !(filp->f_mode & FMODE_WRITE)) 2438 return -EBADF; 2439 2440 if (copy_from_user(&range, (struct f2fs_move_range __user *)arg, 2441 sizeof(range))) 2442 return -EFAULT; 2443 2444 dst = fdget(range.dst_fd); 2445 if (!dst.file) 2446 return -EBADF; 2447 2448 if (!(dst.file->f_mode & FMODE_WRITE)) { 2449 err = -EBADF; 2450 goto err_out; 2451 } 2452 2453 err = mnt_want_write_file(filp); 2454 if (err) 2455 goto err_out; 2456 2457 err = f2fs_move_file_range(filp, range.pos_in, dst.file, 2458 range.pos_out, range.len); 2459 2460 mnt_drop_write_file(filp); 2461 if (err) 2462 goto err_out; 2463 2464 if (copy_to_user((struct f2fs_move_range __user *)arg, 2465 &range, sizeof(range))) 2466 err = -EFAULT; 2467 err_out: 2468 fdput(dst); 2469 return err; 2470 } 2471 2472 static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg) 2473 { 2474 struct inode *inode = file_inode(filp); 2475 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2476 struct sit_info *sm = SIT_I(sbi); 2477 unsigned int start_segno = 0, end_segno = 0; 2478 unsigned int dev_start_segno = 0, dev_end_segno = 0; 2479 struct f2fs_flush_device range; 2480 int ret; 2481 2482 if (!capable(CAP_SYS_ADMIN)) 2483 return -EPERM; 2484 2485 if (f2fs_readonly(sbi->sb)) 2486 return -EROFS; 2487 2488 if (copy_from_user(&range, (struct f2fs_flush_device __user *)arg, 2489 sizeof(range))) 2490 return -EFAULT; 2491 2492 if (sbi->s_ndevs <= 1 || sbi->s_ndevs - 1 <= range.dev_num || 2493 sbi->segs_per_sec != 1) { 2494 f2fs_msg(sbi->sb, KERN_WARNING, 2495 "Can't flush %u in %d for segs_per_sec %u != 1\n", 2496 range.dev_num, sbi->s_ndevs, 2497 sbi->segs_per_sec); 2498 return -EINVAL; 2499 } 2500 2501 ret = mnt_want_write_file(filp); 2502 if (ret) 2503 return ret; 2504 2505 if (range.dev_num != 0) 2506 dev_start_segno = GET_SEGNO(sbi, FDEV(range.dev_num).start_blk); 2507 dev_end_segno = GET_SEGNO(sbi, FDEV(range.dev_num).end_blk); 2508 2509 start_segno = sm->last_victim[FLUSH_DEVICE]; 2510 if (start_segno < dev_start_segno || start_segno >= dev_end_segno) 2511 start_segno = dev_start_segno; 2512 end_segno = min(start_segno + range.segments, dev_end_segno); 2513 2514 while (start_segno < end_segno) { 2515 if (!mutex_trylock(&sbi->gc_mutex)) { 2516 ret = -EBUSY; 2517 goto out; 2518 } 2519 sm->last_victim[GC_CB] = end_segno + 1; 2520 sm->last_victim[GC_GREEDY] = end_segno + 1; 2521 sm->last_victim[ALLOC_NEXT] = end_segno + 1; 2522 ret = f2fs_gc(sbi, true, true, start_segno); 2523 if (ret == -EAGAIN) 2524 ret = 0; 2525 else if (ret < 0) 2526 break; 2527 start_segno++; 2528 } 2529 out: 2530 mnt_drop_write_file(filp); 2531 return ret; 2532 } 2533 2534 static int f2fs_ioc_get_features(struct file *filp, unsigned long arg) 2535 { 2536 struct inode *inode = file_inode(filp); 2537 u32 sb_feature = le32_to_cpu(F2FS_I_SB(inode)->raw_super->feature); 2538 2539 /* Must validate to set it with SQLite behavior in Android. */ 2540 sb_feature |= F2FS_FEATURE_ATOMIC_WRITE; 2541 2542 return put_user(sb_feature, (u32 __user *)arg); 2543 } 2544 2545 #ifdef CONFIG_QUOTA 2546 static int f2fs_ioc_setproject(struct file *filp, __u32 projid) 2547 { 2548 struct inode *inode = file_inode(filp); 2549 struct f2fs_inode_info *fi = F2FS_I(inode); 2550 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2551 struct super_block *sb = sbi->sb; 2552 struct dquot *transfer_to[MAXQUOTAS] = {}; 2553 struct page *ipage; 2554 kprojid_t kprojid; 2555 int err; 2556 2557 if (!f2fs_sb_has_project_quota(sb)) { 2558 if (projid != F2FS_DEF_PROJID) 2559 return -EOPNOTSUPP; 2560 else 2561 return 0; 2562 } 2563 2564 if (!f2fs_has_extra_attr(inode)) 2565 return -EOPNOTSUPP; 2566 2567 kprojid = make_kprojid(&init_user_ns, (projid_t)projid); 2568 2569 if (projid_eq(kprojid, F2FS_I(inode)->i_projid)) 2570 return 0; 2571 2572 err = mnt_want_write_file(filp); 2573 if (err) 2574 return err; 2575 2576 err = -EPERM; 2577 inode_lock(inode); 2578 2579 /* Is it quota file? Do not allow user to mess with it */ 2580 if (IS_NOQUOTA(inode)) 2581 goto out_unlock; 2582 2583 ipage = f2fs_get_node_page(sbi, inode->i_ino); 2584 if (IS_ERR(ipage)) { 2585 err = PTR_ERR(ipage); 2586 goto out_unlock; 2587 } 2588 2589 if (!F2FS_FITS_IN_INODE(F2FS_INODE(ipage), fi->i_extra_isize, 2590 i_projid)) { 2591 err = -EOVERFLOW; 2592 f2fs_put_page(ipage, 1); 2593 goto out_unlock; 2594 } 2595 f2fs_put_page(ipage, 1); 2596 2597 err = dquot_initialize(inode); 2598 if (err) 2599 goto out_unlock; 2600 2601 transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid)); 2602 if (!IS_ERR(transfer_to[PRJQUOTA])) { 2603 err = __dquot_transfer(inode, transfer_to); 2604 dqput(transfer_to[PRJQUOTA]); 2605 if (err) 2606 goto out_dirty; 2607 } 2608 2609 F2FS_I(inode)->i_projid = kprojid; 2610 inode->i_ctime = current_time(inode); 2611 out_dirty: 2612 f2fs_mark_inode_dirty_sync(inode, true); 2613 out_unlock: 2614 inode_unlock(inode); 2615 mnt_drop_write_file(filp); 2616 return err; 2617 } 2618 #else 2619 static int f2fs_ioc_setproject(struct file *filp, __u32 projid) 2620 { 2621 if (projid != F2FS_DEF_PROJID) 2622 return -EOPNOTSUPP; 2623 return 0; 2624 } 2625 #endif 2626 2627 /* Transfer internal flags to xflags */ 2628 static inline __u32 f2fs_iflags_to_xflags(unsigned long iflags) 2629 { 2630 __u32 xflags = 0; 2631 2632 if (iflags & F2FS_SYNC_FL) 2633 xflags |= FS_XFLAG_SYNC; 2634 if (iflags & F2FS_IMMUTABLE_FL) 2635 xflags |= FS_XFLAG_IMMUTABLE; 2636 if (iflags & F2FS_APPEND_FL) 2637 xflags |= FS_XFLAG_APPEND; 2638 if (iflags & F2FS_NODUMP_FL) 2639 xflags |= FS_XFLAG_NODUMP; 2640 if (iflags & F2FS_NOATIME_FL) 2641 xflags |= FS_XFLAG_NOATIME; 2642 if (iflags & F2FS_PROJINHERIT_FL) 2643 xflags |= FS_XFLAG_PROJINHERIT; 2644 return xflags; 2645 } 2646 2647 #define F2FS_SUPPORTED_FS_XFLAGS (FS_XFLAG_SYNC | FS_XFLAG_IMMUTABLE | \ 2648 FS_XFLAG_APPEND | FS_XFLAG_NODUMP | \ 2649 FS_XFLAG_NOATIME | FS_XFLAG_PROJINHERIT) 2650 2651 /* Transfer xflags flags to internal */ 2652 static inline unsigned long f2fs_xflags_to_iflags(__u32 xflags) 2653 { 2654 unsigned long iflags = 0; 2655 2656 if (xflags & FS_XFLAG_SYNC) 2657 iflags |= F2FS_SYNC_FL; 2658 if (xflags & FS_XFLAG_IMMUTABLE) 2659 iflags |= F2FS_IMMUTABLE_FL; 2660 if (xflags & FS_XFLAG_APPEND) 2661 iflags |= F2FS_APPEND_FL; 2662 if (xflags & FS_XFLAG_NODUMP) 2663 iflags |= F2FS_NODUMP_FL; 2664 if (xflags & FS_XFLAG_NOATIME) 2665 iflags |= F2FS_NOATIME_FL; 2666 if (xflags & FS_XFLAG_PROJINHERIT) 2667 iflags |= F2FS_PROJINHERIT_FL; 2668 2669 return iflags; 2670 } 2671 2672 static int f2fs_ioc_fsgetxattr(struct file *filp, unsigned long arg) 2673 { 2674 struct inode *inode = file_inode(filp); 2675 struct f2fs_inode_info *fi = F2FS_I(inode); 2676 struct fsxattr fa; 2677 2678 memset(&fa, 0, sizeof(struct fsxattr)); 2679 fa.fsx_xflags = f2fs_iflags_to_xflags(fi->i_flags & 2680 F2FS_FL_USER_VISIBLE); 2681 2682 if (f2fs_sb_has_project_quota(inode->i_sb)) 2683 fa.fsx_projid = (__u32)from_kprojid(&init_user_ns, 2684 fi->i_projid); 2685 2686 if (copy_to_user((struct fsxattr __user *)arg, &fa, sizeof(fa))) 2687 return -EFAULT; 2688 return 0; 2689 } 2690 2691 static int f2fs_ioc_fssetxattr(struct file *filp, unsigned long arg) 2692 { 2693 struct inode *inode = file_inode(filp); 2694 struct f2fs_inode_info *fi = F2FS_I(inode); 2695 struct fsxattr fa; 2696 unsigned int flags; 2697 int err; 2698 2699 if (copy_from_user(&fa, (struct fsxattr __user *)arg, sizeof(fa))) 2700 return -EFAULT; 2701 2702 /* Make sure caller has proper permission */ 2703 if (!inode_owner_or_capable(inode)) 2704 return -EACCES; 2705 2706 if (fa.fsx_xflags & ~F2FS_SUPPORTED_FS_XFLAGS) 2707 return -EOPNOTSUPP; 2708 2709 flags = f2fs_xflags_to_iflags(fa.fsx_xflags); 2710 if (f2fs_mask_flags(inode->i_mode, flags) != flags) 2711 return -EOPNOTSUPP; 2712 2713 err = mnt_want_write_file(filp); 2714 if (err) 2715 return err; 2716 2717 inode_lock(inode); 2718 flags = (fi->i_flags & ~F2FS_FL_XFLAG_VISIBLE) | 2719 (flags & F2FS_FL_XFLAG_VISIBLE); 2720 err = __f2fs_ioc_setflags(inode, flags); 2721 inode_unlock(inode); 2722 mnt_drop_write_file(filp); 2723 if (err) 2724 return err; 2725 2726 err = f2fs_ioc_setproject(filp, fa.fsx_projid); 2727 if (err) 2728 return err; 2729 2730 return 0; 2731 } 2732 2733 int f2fs_pin_file_control(struct inode *inode, bool inc) 2734 { 2735 struct f2fs_inode_info *fi = F2FS_I(inode); 2736 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2737 2738 /* Use i_gc_failures for normal file as a risk signal. */ 2739 if (inc) 2740 f2fs_i_gc_failures_write(inode, 2741 fi->i_gc_failures[GC_FAILURE_PIN] + 1); 2742 2743 if (fi->i_gc_failures[GC_FAILURE_PIN] > sbi->gc_pin_file_threshold) { 2744 f2fs_msg(sbi->sb, KERN_WARNING, 2745 "%s: Enable GC = ino %lx after %x GC trials\n", 2746 __func__, inode->i_ino, 2747 fi->i_gc_failures[GC_FAILURE_PIN]); 2748 clear_inode_flag(inode, FI_PIN_FILE); 2749 return -EAGAIN; 2750 } 2751 return 0; 2752 } 2753 2754 static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg) 2755 { 2756 struct inode *inode = file_inode(filp); 2757 __u32 pin; 2758 int ret = 0; 2759 2760 if (!inode_owner_or_capable(inode)) 2761 return -EACCES; 2762 2763 if (get_user(pin, (__u32 __user *)arg)) 2764 return -EFAULT; 2765 2766 if (!S_ISREG(inode->i_mode)) 2767 return -EINVAL; 2768 2769 if (f2fs_readonly(F2FS_I_SB(inode)->sb)) 2770 return -EROFS; 2771 2772 ret = mnt_want_write_file(filp); 2773 if (ret) 2774 return ret; 2775 2776 inode_lock(inode); 2777 2778 if (f2fs_should_update_outplace(inode, NULL)) { 2779 ret = -EINVAL; 2780 goto out; 2781 } 2782 2783 if (!pin) { 2784 clear_inode_flag(inode, FI_PIN_FILE); 2785 F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN] = 1; 2786 goto done; 2787 } 2788 2789 if (f2fs_pin_file_control(inode, false)) { 2790 ret = -EAGAIN; 2791 goto out; 2792 } 2793 ret = f2fs_convert_inline_inode(inode); 2794 if (ret) 2795 goto out; 2796 2797 set_inode_flag(inode, FI_PIN_FILE); 2798 ret = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN]; 2799 done: 2800 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); 2801 out: 2802 inode_unlock(inode); 2803 mnt_drop_write_file(filp); 2804 return ret; 2805 } 2806 2807 static int f2fs_ioc_get_pin_file(struct file *filp, unsigned long arg) 2808 { 2809 struct inode *inode = file_inode(filp); 2810 __u32 pin = 0; 2811 2812 if (is_inode_flag_set(inode, FI_PIN_FILE)) 2813 pin = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN]; 2814 return put_user(pin, (u32 __user *)arg); 2815 } 2816 2817 int f2fs_precache_extents(struct inode *inode) 2818 { 2819 struct f2fs_inode_info *fi = F2FS_I(inode); 2820 struct f2fs_map_blocks map; 2821 pgoff_t m_next_extent; 2822 loff_t end; 2823 int err; 2824 2825 if (is_inode_flag_set(inode, FI_NO_EXTENT)) 2826 return -EOPNOTSUPP; 2827 2828 map.m_lblk = 0; 2829 map.m_next_pgofs = NULL; 2830 map.m_next_extent = &m_next_extent; 2831 map.m_seg_type = NO_CHECK_TYPE; 2832 end = F2FS_I_SB(inode)->max_file_blocks; 2833 2834 while (map.m_lblk < end) { 2835 map.m_len = end - map.m_lblk; 2836 2837 down_write(&fi->i_gc_rwsem[WRITE]); 2838 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_PRECACHE); 2839 up_write(&fi->i_gc_rwsem[WRITE]); 2840 if (err) 2841 return err; 2842 2843 map.m_lblk = m_next_extent; 2844 } 2845 2846 return err; 2847 } 2848 2849 static int f2fs_ioc_precache_extents(struct file *filp, unsigned long arg) 2850 { 2851 return f2fs_precache_extents(file_inode(filp)); 2852 } 2853 2854 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 2855 { 2856 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp))))) 2857 return -EIO; 2858 2859 switch (cmd) { 2860 case F2FS_IOC_GETFLAGS: 2861 return f2fs_ioc_getflags(filp, arg); 2862 case F2FS_IOC_SETFLAGS: 2863 return f2fs_ioc_setflags(filp, arg); 2864 case F2FS_IOC_GETVERSION: 2865 return f2fs_ioc_getversion(filp, arg); 2866 case F2FS_IOC_START_ATOMIC_WRITE: 2867 return f2fs_ioc_start_atomic_write(filp); 2868 case F2FS_IOC_COMMIT_ATOMIC_WRITE: 2869 return f2fs_ioc_commit_atomic_write(filp); 2870 case F2FS_IOC_START_VOLATILE_WRITE: 2871 return f2fs_ioc_start_volatile_write(filp); 2872 case F2FS_IOC_RELEASE_VOLATILE_WRITE: 2873 return f2fs_ioc_release_volatile_write(filp); 2874 case F2FS_IOC_ABORT_VOLATILE_WRITE: 2875 return f2fs_ioc_abort_volatile_write(filp); 2876 case F2FS_IOC_SHUTDOWN: 2877 return f2fs_ioc_shutdown(filp, arg); 2878 case FITRIM: 2879 return f2fs_ioc_fitrim(filp, arg); 2880 case F2FS_IOC_SET_ENCRYPTION_POLICY: 2881 return f2fs_ioc_set_encryption_policy(filp, arg); 2882 case F2FS_IOC_GET_ENCRYPTION_POLICY: 2883 return f2fs_ioc_get_encryption_policy(filp, arg); 2884 case F2FS_IOC_GET_ENCRYPTION_PWSALT: 2885 return f2fs_ioc_get_encryption_pwsalt(filp, arg); 2886 case F2FS_IOC_GARBAGE_COLLECT: 2887 return f2fs_ioc_gc(filp, arg); 2888 case F2FS_IOC_GARBAGE_COLLECT_RANGE: 2889 return f2fs_ioc_gc_range(filp, arg); 2890 case F2FS_IOC_WRITE_CHECKPOINT: 2891 return f2fs_ioc_f2fs_write_checkpoint(filp, arg); 2892 case F2FS_IOC_DEFRAGMENT: 2893 return f2fs_ioc_defragment(filp, arg); 2894 case F2FS_IOC_MOVE_RANGE: 2895 return f2fs_ioc_move_range(filp, arg); 2896 case F2FS_IOC_FLUSH_DEVICE: 2897 return f2fs_ioc_flush_device(filp, arg); 2898 case F2FS_IOC_GET_FEATURES: 2899 return f2fs_ioc_get_features(filp, arg); 2900 case F2FS_IOC_FSGETXATTR: 2901 return f2fs_ioc_fsgetxattr(filp, arg); 2902 case F2FS_IOC_FSSETXATTR: 2903 return f2fs_ioc_fssetxattr(filp, arg); 2904 case F2FS_IOC_GET_PIN_FILE: 2905 return f2fs_ioc_get_pin_file(filp, arg); 2906 case F2FS_IOC_SET_PIN_FILE: 2907 return f2fs_ioc_set_pin_file(filp, arg); 2908 case F2FS_IOC_PRECACHE_EXTENTS: 2909 return f2fs_ioc_precache_extents(filp, arg); 2910 default: 2911 return -ENOTTY; 2912 } 2913 } 2914 2915 static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 2916 { 2917 struct file *file = iocb->ki_filp; 2918 struct inode *inode = file_inode(file); 2919 ssize_t ret; 2920 2921 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) 2922 return -EIO; 2923 2924 if ((iocb->ki_flags & IOCB_NOWAIT) && !(iocb->ki_flags & IOCB_DIRECT)) 2925 return -EINVAL; 2926 2927 if (!inode_trylock(inode)) { 2928 if (iocb->ki_flags & IOCB_NOWAIT) 2929 return -EAGAIN; 2930 inode_lock(inode); 2931 } 2932 2933 ret = generic_write_checks(iocb, from); 2934 if (ret > 0) { 2935 bool preallocated = false; 2936 size_t target_size = 0; 2937 int err; 2938 2939 if (iov_iter_fault_in_readable(from, iov_iter_count(from))) 2940 set_inode_flag(inode, FI_NO_PREALLOC); 2941 2942 if ((iocb->ki_flags & IOCB_NOWAIT) && 2943 (iocb->ki_flags & IOCB_DIRECT)) { 2944 if (!f2fs_overwrite_io(inode, iocb->ki_pos, 2945 iov_iter_count(from)) || 2946 f2fs_has_inline_data(inode) || 2947 f2fs_force_buffered_io(inode, WRITE)) { 2948 clear_inode_flag(inode, 2949 FI_NO_PREALLOC); 2950 inode_unlock(inode); 2951 return -EAGAIN; 2952 } 2953 2954 } else { 2955 preallocated = true; 2956 target_size = iocb->ki_pos + iov_iter_count(from); 2957 2958 err = f2fs_preallocate_blocks(iocb, from); 2959 if (err) { 2960 clear_inode_flag(inode, FI_NO_PREALLOC); 2961 inode_unlock(inode); 2962 return err; 2963 } 2964 } 2965 ret = __generic_file_write_iter(iocb, from); 2966 clear_inode_flag(inode, FI_NO_PREALLOC); 2967 2968 /* if we couldn't write data, we should deallocate blocks. */ 2969 if (preallocated && i_size_read(inode) < target_size) 2970 f2fs_truncate(inode); 2971 2972 if (ret > 0) 2973 f2fs_update_iostat(F2FS_I_SB(inode), APP_WRITE_IO, ret); 2974 } 2975 inode_unlock(inode); 2976 2977 if (ret > 0) 2978 ret = generic_write_sync(iocb, ret); 2979 return ret; 2980 } 2981 2982 #ifdef CONFIG_COMPAT 2983 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 2984 { 2985 switch (cmd) { 2986 case F2FS_IOC32_GETFLAGS: 2987 cmd = F2FS_IOC_GETFLAGS; 2988 break; 2989 case F2FS_IOC32_SETFLAGS: 2990 cmd = F2FS_IOC_SETFLAGS; 2991 break; 2992 case F2FS_IOC32_GETVERSION: 2993 cmd = F2FS_IOC_GETVERSION; 2994 break; 2995 case F2FS_IOC_START_ATOMIC_WRITE: 2996 case F2FS_IOC_COMMIT_ATOMIC_WRITE: 2997 case F2FS_IOC_START_VOLATILE_WRITE: 2998 case F2FS_IOC_RELEASE_VOLATILE_WRITE: 2999 case F2FS_IOC_ABORT_VOLATILE_WRITE: 3000 case F2FS_IOC_SHUTDOWN: 3001 case F2FS_IOC_SET_ENCRYPTION_POLICY: 3002 case F2FS_IOC_GET_ENCRYPTION_PWSALT: 3003 case F2FS_IOC_GET_ENCRYPTION_POLICY: 3004 case F2FS_IOC_GARBAGE_COLLECT: 3005 case F2FS_IOC_GARBAGE_COLLECT_RANGE: 3006 case F2FS_IOC_WRITE_CHECKPOINT: 3007 case F2FS_IOC_DEFRAGMENT: 3008 case F2FS_IOC_MOVE_RANGE: 3009 case F2FS_IOC_FLUSH_DEVICE: 3010 case F2FS_IOC_GET_FEATURES: 3011 case F2FS_IOC_FSGETXATTR: 3012 case F2FS_IOC_FSSETXATTR: 3013 case F2FS_IOC_GET_PIN_FILE: 3014 case F2FS_IOC_SET_PIN_FILE: 3015 case F2FS_IOC_PRECACHE_EXTENTS: 3016 break; 3017 default: 3018 return -ENOIOCTLCMD; 3019 } 3020 return f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg)); 3021 } 3022 #endif 3023 3024 const struct file_operations f2fs_file_operations = { 3025 .llseek = f2fs_llseek, 3026 .read_iter = generic_file_read_iter, 3027 .write_iter = f2fs_file_write_iter, 3028 .open = f2fs_file_open, 3029 .release = f2fs_release_file, 3030 .mmap = f2fs_file_mmap, 3031 .flush = f2fs_file_flush, 3032 .fsync = f2fs_sync_file, 3033 .fallocate = f2fs_fallocate, 3034 .unlocked_ioctl = f2fs_ioctl, 3035 #ifdef CONFIG_COMPAT 3036 .compat_ioctl = f2fs_compat_ioctl, 3037 #endif 3038 .splice_read = generic_file_splice_read, 3039 .splice_write = iter_file_splice_write, 3040 }; 3041