1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * fs/f2fs/file.c 4 * 5 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 6 * http://www.samsung.com/ 7 */ 8 #include <linux/fs.h> 9 #include <linux/f2fs_fs.h> 10 #include <linux/stat.h> 11 #include <linux/buffer_head.h> 12 #include <linux/writeback.h> 13 #include <linux/blkdev.h> 14 #include <linux/falloc.h> 15 #include <linux/types.h> 16 #include <linux/compat.h> 17 #include <linux/uaccess.h> 18 #include <linux/mount.h> 19 #include <linux/pagevec.h> 20 #include <linux/uio.h> 21 #include <linux/uuid.h> 22 #include <linux/file.h> 23 #include <linux/nls.h> 24 #include <linux/sched/signal.h> 25 26 #include "f2fs.h" 27 #include "node.h" 28 #include "segment.h" 29 #include "xattr.h" 30 #include "acl.h" 31 #include "gc.h" 32 #include "trace.h" 33 #include <trace/events/f2fs.h> 34 35 static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf) 36 { 37 struct inode *inode = file_inode(vmf->vma->vm_file); 38 vm_fault_t ret; 39 40 down_read(&F2FS_I(inode)->i_mmap_sem); 41 ret = filemap_fault(vmf); 42 up_read(&F2FS_I(inode)->i_mmap_sem); 43 44 if (!ret) 45 f2fs_update_iostat(F2FS_I_SB(inode), APP_MAPPED_READ_IO, 46 F2FS_BLKSIZE); 47 48 trace_f2fs_filemap_fault(inode, vmf->pgoff, (unsigned long)ret); 49 50 return ret; 51 } 52 53 static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf) 54 { 55 struct page *page = vmf->page; 56 struct inode *inode = file_inode(vmf->vma->vm_file); 57 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 58 struct dnode_of_data dn; 59 bool need_alloc = true; 60 int err = 0; 61 62 if (unlikely(f2fs_cp_error(sbi))) { 63 err = -EIO; 64 goto err; 65 } 66 67 if (!f2fs_is_checkpoint_ready(sbi)) { 68 err = -ENOSPC; 69 goto err; 70 } 71 72 #ifdef CONFIG_F2FS_FS_COMPRESSION 73 if (f2fs_compressed_file(inode)) { 74 int ret = f2fs_is_compressed_cluster(inode, page->index); 75 76 if (ret < 0) { 77 err = ret; 78 goto err; 79 } else if (ret) { 80 if (ret < F2FS_I(inode)->i_cluster_size) { 81 err = -EAGAIN; 82 goto err; 83 } 84 need_alloc = false; 85 } 86 } 87 #endif 88 /* should do out of any locked page */ 89 if (need_alloc) 90 f2fs_balance_fs(sbi, true); 91 92 sb_start_pagefault(inode->i_sb); 93 94 f2fs_bug_on(sbi, f2fs_has_inline_data(inode)); 95 96 file_update_time(vmf->vma->vm_file); 97 down_read(&F2FS_I(inode)->i_mmap_sem); 98 lock_page(page); 99 if (unlikely(page->mapping != inode->i_mapping || 100 page_offset(page) > i_size_read(inode) || 101 !PageUptodate(page))) { 102 unlock_page(page); 103 err = -EFAULT; 104 goto out_sem; 105 } 106 107 if (need_alloc) { 108 /* block allocation */ 109 f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true); 110 set_new_dnode(&dn, inode, NULL, NULL, 0); 111 err = f2fs_get_block(&dn, page->index); 112 f2fs_put_dnode(&dn); 113 f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false); 114 } 115 116 #ifdef CONFIG_F2FS_FS_COMPRESSION 117 if (!need_alloc) { 118 set_new_dnode(&dn, inode, NULL, NULL, 0); 119 err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE); 120 f2fs_put_dnode(&dn); 121 } 122 #endif 123 if (err) { 124 unlock_page(page); 125 goto out_sem; 126 } 127 128 f2fs_wait_on_page_writeback(page, DATA, false, true); 129 130 /* wait for GCed page writeback via META_MAPPING */ 131 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr); 132 133 /* 134 * check to see if the page is mapped already (no holes) 135 */ 136 if (PageMappedToDisk(page)) 137 goto out_sem; 138 139 /* page is wholly or partially inside EOF */ 140 if (((loff_t)(page->index + 1) << PAGE_SHIFT) > 141 i_size_read(inode)) { 142 loff_t offset; 143 144 offset = i_size_read(inode) & ~PAGE_MASK; 145 zero_user_segment(page, offset, PAGE_SIZE); 146 } 147 set_page_dirty(page); 148 if (!PageUptodate(page)) 149 SetPageUptodate(page); 150 151 f2fs_update_iostat(sbi, APP_MAPPED_IO, F2FS_BLKSIZE); 152 f2fs_update_time(sbi, REQ_TIME); 153 154 trace_f2fs_vm_page_mkwrite(page, DATA); 155 out_sem: 156 up_read(&F2FS_I(inode)->i_mmap_sem); 157 158 sb_end_pagefault(inode->i_sb); 159 err: 160 return block_page_mkwrite_return(err); 161 } 162 163 static const struct vm_operations_struct f2fs_file_vm_ops = { 164 .fault = f2fs_filemap_fault, 165 .map_pages = filemap_map_pages, 166 .page_mkwrite = f2fs_vm_page_mkwrite, 167 }; 168 169 static int get_parent_ino(struct inode *inode, nid_t *pino) 170 { 171 struct dentry *dentry; 172 173 /* 174 * Make sure to get the non-deleted alias. The alias associated with 175 * the open file descriptor being fsync()'ed may be deleted already. 176 */ 177 dentry = d_find_alias(inode); 178 if (!dentry) 179 return 0; 180 181 *pino = parent_ino(dentry); 182 dput(dentry); 183 return 1; 184 } 185 186 static inline enum cp_reason_type need_do_checkpoint(struct inode *inode) 187 { 188 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 189 enum cp_reason_type cp_reason = CP_NO_NEEDED; 190 191 if (!S_ISREG(inode->i_mode)) 192 cp_reason = CP_NON_REGULAR; 193 else if (f2fs_compressed_file(inode)) 194 cp_reason = CP_COMPRESSED; 195 else if (inode->i_nlink != 1) 196 cp_reason = CP_HARDLINK; 197 else if (is_sbi_flag_set(sbi, SBI_NEED_CP)) 198 cp_reason = CP_SB_NEED_CP; 199 else if (file_wrong_pino(inode)) 200 cp_reason = CP_WRONG_PINO; 201 else if (!f2fs_space_for_roll_forward(sbi)) 202 cp_reason = CP_NO_SPC_ROLL; 203 else if (!f2fs_is_checkpointed_node(sbi, F2FS_I(inode)->i_pino)) 204 cp_reason = CP_NODE_NEED_CP; 205 else if (test_opt(sbi, FASTBOOT)) 206 cp_reason = CP_FASTBOOT_MODE; 207 else if (F2FS_OPTION(sbi).active_logs == 2) 208 cp_reason = CP_SPEC_LOG_NUM; 209 else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT && 210 f2fs_need_dentry_mark(sbi, inode->i_ino) && 211 f2fs_exist_written_data(sbi, F2FS_I(inode)->i_pino, 212 TRANS_DIR_INO)) 213 cp_reason = CP_RECOVER_DIR; 214 215 return cp_reason; 216 } 217 218 static bool need_inode_page_update(struct f2fs_sb_info *sbi, nid_t ino) 219 { 220 struct page *i = find_get_page(NODE_MAPPING(sbi), ino); 221 bool ret = false; 222 /* But we need to avoid that there are some inode updates */ 223 if ((i && PageDirty(i)) || f2fs_need_inode_block_update(sbi, ino)) 224 ret = true; 225 f2fs_put_page(i, 0); 226 return ret; 227 } 228 229 static void try_to_fix_pino(struct inode *inode) 230 { 231 struct f2fs_inode_info *fi = F2FS_I(inode); 232 nid_t pino; 233 234 down_write(&fi->i_sem); 235 if (file_wrong_pino(inode) && inode->i_nlink == 1 && 236 get_parent_ino(inode, &pino)) { 237 f2fs_i_pino_write(inode, pino); 238 file_got_pino(inode); 239 } 240 up_write(&fi->i_sem); 241 } 242 243 static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end, 244 int datasync, bool atomic) 245 { 246 struct inode *inode = file->f_mapping->host; 247 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 248 nid_t ino = inode->i_ino; 249 int ret = 0; 250 enum cp_reason_type cp_reason = 0; 251 struct writeback_control wbc = { 252 .sync_mode = WB_SYNC_ALL, 253 .nr_to_write = LONG_MAX, 254 .for_reclaim = 0, 255 }; 256 unsigned int seq_id = 0; 257 258 if (unlikely(f2fs_readonly(inode->i_sb) || 259 is_sbi_flag_set(sbi, SBI_CP_DISABLED))) 260 return 0; 261 262 trace_f2fs_sync_file_enter(inode); 263 264 if (S_ISDIR(inode->i_mode)) 265 goto go_write; 266 267 /* if fdatasync is triggered, let's do in-place-update */ 268 if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks) 269 set_inode_flag(inode, FI_NEED_IPU); 270 ret = file_write_and_wait_range(file, start, end); 271 clear_inode_flag(inode, FI_NEED_IPU); 272 273 if (ret) { 274 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret); 275 return ret; 276 } 277 278 /* if the inode is dirty, let's recover all the time */ 279 if (!f2fs_skip_inode_update(inode, datasync)) { 280 f2fs_write_inode(inode, NULL); 281 goto go_write; 282 } 283 284 /* 285 * if there is no written data, don't waste time to write recovery info. 286 */ 287 if (!is_inode_flag_set(inode, FI_APPEND_WRITE) && 288 !f2fs_exist_written_data(sbi, ino, APPEND_INO)) { 289 290 /* it may call write_inode just prior to fsync */ 291 if (need_inode_page_update(sbi, ino)) 292 goto go_write; 293 294 if (is_inode_flag_set(inode, FI_UPDATE_WRITE) || 295 f2fs_exist_written_data(sbi, ino, UPDATE_INO)) 296 goto flush_out; 297 goto out; 298 } 299 go_write: 300 /* 301 * Both of fdatasync() and fsync() are able to be recovered from 302 * sudden-power-off. 303 */ 304 down_read(&F2FS_I(inode)->i_sem); 305 cp_reason = need_do_checkpoint(inode); 306 up_read(&F2FS_I(inode)->i_sem); 307 308 if (cp_reason) { 309 /* all the dirty node pages should be flushed for POR */ 310 ret = f2fs_sync_fs(inode->i_sb, 1); 311 312 /* 313 * We've secured consistency through sync_fs. Following pino 314 * will be used only for fsynced inodes after checkpoint. 315 */ 316 try_to_fix_pino(inode); 317 clear_inode_flag(inode, FI_APPEND_WRITE); 318 clear_inode_flag(inode, FI_UPDATE_WRITE); 319 goto out; 320 } 321 sync_nodes: 322 atomic_inc(&sbi->wb_sync_req[NODE]); 323 ret = f2fs_fsync_node_pages(sbi, inode, &wbc, atomic, &seq_id); 324 atomic_dec(&sbi->wb_sync_req[NODE]); 325 if (ret) 326 goto out; 327 328 /* if cp_error was enabled, we should avoid infinite loop */ 329 if (unlikely(f2fs_cp_error(sbi))) { 330 ret = -EIO; 331 goto out; 332 } 333 334 if (f2fs_need_inode_block_update(sbi, ino)) { 335 f2fs_mark_inode_dirty_sync(inode, true); 336 f2fs_write_inode(inode, NULL); 337 goto sync_nodes; 338 } 339 340 /* 341 * If it's atomic_write, it's just fine to keep write ordering. So 342 * here we don't need to wait for node write completion, since we use 343 * node chain which serializes node blocks. If one of node writes are 344 * reordered, we can see simply broken chain, resulting in stopping 345 * roll-forward recovery. It means we'll recover all or none node blocks 346 * given fsync mark. 347 */ 348 if (!atomic) { 349 ret = f2fs_wait_on_node_pages_writeback(sbi, seq_id); 350 if (ret) 351 goto out; 352 } 353 354 /* once recovery info is written, don't need to tack this */ 355 f2fs_remove_ino_entry(sbi, ino, APPEND_INO); 356 clear_inode_flag(inode, FI_APPEND_WRITE); 357 flush_out: 358 if (!atomic && F2FS_OPTION(sbi).fsync_mode != FSYNC_MODE_NOBARRIER) 359 ret = f2fs_issue_flush(sbi, inode->i_ino); 360 if (!ret) { 361 f2fs_remove_ino_entry(sbi, ino, UPDATE_INO); 362 clear_inode_flag(inode, FI_UPDATE_WRITE); 363 f2fs_remove_ino_entry(sbi, ino, FLUSH_INO); 364 } 365 f2fs_update_time(sbi, REQ_TIME); 366 out: 367 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret); 368 f2fs_trace_ios(NULL, 1); 369 return ret; 370 } 371 372 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) 373 { 374 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file))))) 375 return -EIO; 376 return f2fs_do_sync_file(file, start, end, datasync, false); 377 } 378 379 static bool __found_offset(struct address_space *mapping, block_t blkaddr, 380 pgoff_t index, int whence) 381 { 382 switch (whence) { 383 case SEEK_DATA: 384 if (__is_valid_data_blkaddr(blkaddr)) 385 return true; 386 if (blkaddr == NEW_ADDR && 387 xa_get_mark(&mapping->i_pages, index, PAGECACHE_TAG_DIRTY)) 388 return true; 389 break; 390 case SEEK_HOLE: 391 if (blkaddr == NULL_ADDR) 392 return true; 393 break; 394 } 395 return false; 396 } 397 398 static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence) 399 { 400 struct inode *inode = file->f_mapping->host; 401 loff_t maxbytes = inode->i_sb->s_maxbytes; 402 struct dnode_of_data dn; 403 pgoff_t pgofs, end_offset; 404 loff_t data_ofs = offset; 405 loff_t isize; 406 int err = 0; 407 408 inode_lock(inode); 409 410 isize = i_size_read(inode); 411 if (offset >= isize) 412 goto fail; 413 414 /* handle inline data case */ 415 if (f2fs_has_inline_data(inode) && whence == SEEK_HOLE) { 416 data_ofs = isize; 417 goto found; 418 } 419 420 pgofs = (pgoff_t)(offset >> PAGE_SHIFT); 421 422 for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_SHIFT) { 423 set_new_dnode(&dn, inode, NULL, NULL, 0); 424 err = f2fs_get_dnode_of_data(&dn, pgofs, LOOKUP_NODE); 425 if (err && err != -ENOENT) { 426 goto fail; 427 } else if (err == -ENOENT) { 428 /* direct node does not exists */ 429 if (whence == SEEK_DATA) { 430 pgofs = f2fs_get_next_page_offset(&dn, pgofs); 431 continue; 432 } else { 433 goto found; 434 } 435 } 436 437 end_offset = ADDRS_PER_PAGE(dn.node_page, inode); 438 439 /* find data/hole in dnode block */ 440 for (; dn.ofs_in_node < end_offset; 441 dn.ofs_in_node++, pgofs++, 442 data_ofs = (loff_t)pgofs << PAGE_SHIFT) { 443 block_t blkaddr; 444 445 blkaddr = f2fs_data_blkaddr(&dn); 446 447 if (__is_valid_data_blkaddr(blkaddr) && 448 !f2fs_is_valid_blkaddr(F2FS_I_SB(inode), 449 blkaddr, DATA_GENERIC_ENHANCE)) { 450 f2fs_put_dnode(&dn); 451 goto fail; 452 } 453 454 if (__found_offset(file->f_mapping, blkaddr, 455 pgofs, whence)) { 456 f2fs_put_dnode(&dn); 457 goto found; 458 } 459 } 460 f2fs_put_dnode(&dn); 461 } 462 463 if (whence == SEEK_DATA) 464 goto fail; 465 found: 466 if (whence == SEEK_HOLE && data_ofs > isize) 467 data_ofs = isize; 468 inode_unlock(inode); 469 return vfs_setpos(file, data_ofs, maxbytes); 470 fail: 471 inode_unlock(inode); 472 return -ENXIO; 473 } 474 475 static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence) 476 { 477 struct inode *inode = file->f_mapping->host; 478 loff_t maxbytes = inode->i_sb->s_maxbytes; 479 480 switch (whence) { 481 case SEEK_SET: 482 case SEEK_CUR: 483 case SEEK_END: 484 return generic_file_llseek_size(file, offset, whence, 485 maxbytes, i_size_read(inode)); 486 case SEEK_DATA: 487 case SEEK_HOLE: 488 if (offset < 0) 489 return -ENXIO; 490 return f2fs_seek_block(file, offset, whence); 491 } 492 493 return -EINVAL; 494 } 495 496 static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma) 497 { 498 struct inode *inode = file_inode(file); 499 int err; 500 501 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) 502 return -EIO; 503 504 if (!f2fs_is_compress_backend_ready(inode)) 505 return -EOPNOTSUPP; 506 507 /* we don't need to use inline_data strictly */ 508 err = f2fs_convert_inline_inode(inode); 509 if (err) 510 return err; 511 512 file_accessed(file); 513 vma->vm_ops = &f2fs_file_vm_ops; 514 set_inode_flag(inode, FI_MMAP_FILE); 515 return 0; 516 } 517 518 static int f2fs_file_open(struct inode *inode, struct file *filp) 519 { 520 int err = fscrypt_file_open(inode, filp); 521 522 if (err) 523 return err; 524 525 if (!f2fs_is_compress_backend_ready(inode)) 526 return -EOPNOTSUPP; 527 528 err = fsverity_file_open(inode, filp); 529 if (err) 530 return err; 531 532 filp->f_mode |= FMODE_NOWAIT; 533 534 return dquot_file_open(inode, filp); 535 } 536 537 void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count) 538 { 539 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 540 struct f2fs_node *raw_node; 541 int nr_free = 0, ofs = dn->ofs_in_node, len = count; 542 __le32 *addr; 543 int base = 0; 544 bool compressed_cluster = false; 545 int cluster_index = 0, valid_blocks = 0; 546 int cluster_size = F2FS_I(dn->inode)->i_cluster_size; 547 bool released = !atomic_read(&F2FS_I(dn->inode)->i_compr_blocks); 548 549 if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode)) 550 base = get_extra_isize(dn->inode); 551 552 raw_node = F2FS_NODE(dn->node_page); 553 addr = blkaddr_in_node(raw_node) + base + ofs; 554 555 /* Assumption: truncateion starts with cluster */ 556 for (; count > 0; count--, addr++, dn->ofs_in_node++, cluster_index++) { 557 block_t blkaddr = le32_to_cpu(*addr); 558 559 if (f2fs_compressed_file(dn->inode) && 560 !(cluster_index & (cluster_size - 1))) { 561 if (compressed_cluster) 562 f2fs_i_compr_blocks_update(dn->inode, 563 valid_blocks, false); 564 compressed_cluster = (blkaddr == COMPRESS_ADDR); 565 valid_blocks = 0; 566 } 567 568 if (blkaddr == NULL_ADDR) 569 continue; 570 571 dn->data_blkaddr = NULL_ADDR; 572 f2fs_set_data_blkaddr(dn); 573 574 if (__is_valid_data_blkaddr(blkaddr)) { 575 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, 576 DATA_GENERIC_ENHANCE)) 577 continue; 578 if (compressed_cluster) 579 valid_blocks++; 580 } 581 582 if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page)) 583 clear_inode_flag(dn->inode, FI_FIRST_BLOCK_WRITTEN); 584 585 f2fs_invalidate_blocks(sbi, blkaddr); 586 587 if (!released || blkaddr != COMPRESS_ADDR) 588 nr_free++; 589 } 590 591 if (compressed_cluster) 592 f2fs_i_compr_blocks_update(dn->inode, valid_blocks, false); 593 594 if (nr_free) { 595 pgoff_t fofs; 596 /* 597 * once we invalidate valid blkaddr in range [ofs, ofs + count], 598 * we will invalidate all blkaddr in the whole range. 599 */ 600 fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page), 601 dn->inode) + ofs; 602 f2fs_update_extent_cache_range(dn, fofs, 0, len); 603 dec_valid_block_count(sbi, dn->inode, nr_free); 604 } 605 dn->ofs_in_node = ofs; 606 607 f2fs_update_time(sbi, REQ_TIME); 608 trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid, 609 dn->ofs_in_node, nr_free); 610 } 611 612 void f2fs_truncate_data_blocks(struct dnode_of_data *dn) 613 { 614 f2fs_truncate_data_blocks_range(dn, ADDRS_PER_BLOCK(dn->inode)); 615 } 616 617 static int truncate_partial_data_page(struct inode *inode, u64 from, 618 bool cache_only) 619 { 620 loff_t offset = from & (PAGE_SIZE - 1); 621 pgoff_t index = from >> PAGE_SHIFT; 622 struct address_space *mapping = inode->i_mapping; 623 struct page *page; 624 625 if (!offset && !cache_only) 626 return 0; 627 628 if (cache_only) { 629 page = find_lock_page(mapping, index); 630 if (page && PageUptodate(page)) 631 goto truncate_out; 632 f2fs_put_page(page, 1); 633 return 0; 634 } 635 636 page = f2fs_get_lock_data_page(inode, index, true); 637 if (IS_ERR(page)) 638 return PTR_ERR(page) == -ENOENT ? 0 : PTR_ERR(page); 639 truncate_out: 640 f2fs_wait_on_page_writeback(page, DATA, true, true); 641 zero_user(page, offset, PAGE_SIZE - offset); 642 643 /* An encrypted inode should have a key and truncate the last page. */ 644 f2fs_bug_on(F2FS_I_SB(inode), cache_only && IS_ENCRYPTED(inode)); 645 if (!cache_only) 646 set_page_dirty(page); 647 f2fs_put_page(page, 1); 648 return 0; 649 } 650 651 int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock) 652 { 653 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 654 struct dnode_of_data dn; 655 pgoff_t free_from; 656 int count = 0, err = 0; 657 struct page *ipage; 658 bool truncate_page = false; 659 660 trace_f2fs_truncate_blocks_enter(inode, from); 661 662 free_from = (pgoff_t)F2FS_BLK_ALIGN(from); 663 664 if (free_from >= sbi->max_file_blocks) 665 goto free_partial; 666 667 if (lock) 668 f2fs_lock_op(sbi); 669 670 ipage = f2fs_get_node_page(sbi, inode->i_ino); 671 if (IS_ERR(ipage)) { 672 err = PTR_ERR(ipage); 673 goto out; 674 } 675 676 if (f2fs_has_inline_data(inode)) { 677 f2fs_truncate_inline_inode(inode, ipage, from); 678 f2fs_put_page(ipage, 1); 679 truncate_page = true; 680 goto out; 681 } 682 683 set_new_dnode(&dn, inode, ipage, NULL, 0); 684 err = f2fs_get_dnode_of_data(&dn, free_from, LOOKUP_NODE_RA); 685 if (err) { 686 if (err == -ENOENT) 687 goto free_next; 688 goto out; 689 } 690 691 count = ADDRS_PER_PAGE(dn.node_page, inode); 692 693 count -= dn.ofs_in_node; 694 f2fs_bug_on(sbi, count < 0); 695 696 if (dn.ofs_in_node || IS_INODE(dn.node_page)) { 697 f2fs_truncate_data_blocks_range(&dn, count); 698 free_from += count; 699 } 700 701 f2fs_put_dnode(&dn); 702 free_next: 703 err = f2fs_truncate_inode_blocks(inode, free_from); 704 out: 705 if (lock) 706 f2fs_unlock_op(sbi); 707 free_partial: 708 /* lastly zero out the first data page */ 709 if (!err) 710 err = truncate_partial_data_page(inode, from, truncate_page); 711 712 trace_f2fs_truncate_blocks_exit(inode, err); 713 return err; 714 } 715 716 int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock) 717 { 718 u64 free_from = from; 719 int err; 720 721 #ifdef CONFIG_F2FS_FS_COMPRESSION 722 /* 723 * for compressed file, only support cluster size 724 * aligned truncation. 725 */ 726 if (f2fs_compressed_file(inode)) 727 free_from = round_up(from, 728 F2FS_I(inode)->i_cluster_size << PAGE_SHIFT); 729 #endif 730 731 err = f2fs_do_truncate_blocks(inode, free_from, lock); 732 if (err) 733 return err; 734 735 #ifdef CONFIG_F2FS_FS_COMPRESSION 736 if (from != free_from) { 737 err = f2fs_truncate_partial_cluster(inode, from, lock); 738 if (err) 739 return err; 740 } 741 #endif 742 743 return 0; 744 } 745 746 int f2fs_truncate(struct inode *inode) 747 { 748 int err; 749 750 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) 751 return -EIO; 752 753 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 754 S_ISLNK(inode->i_mode))) 755 return 0; 756 757 trace_f2fs_truncate(inode); 758 759 if (time_to_inject(F2FS_I_SB(inode), FAULT_TRUNCATE)) { 760 f2fs_show_injection_info(F2FS_I_SB(inode), FAULT_TRUNCATE); 761 return -EIO; 762 } 763 764 /* we should check inline_data size */ 765 if (!f2fs_may_inline_data(inode)) { 766 err = f2fs_convert_inline_inode(inode); 767 if (err) 768 return err; 769 } 770 771 err = f2fs_truncate_blocks(inode, i_size_read(inode), true); 772 if (err) 773 return err; 774 775 inode->i_mtime = inode->i_ctime = current_time(inode); 776 f2fs_mark_inode_dirty_sync(inode, false); 777 return 0; 778 } 779 780 int f2fs_getattr(const struct path *path, struct kstat *stat, 781 u32 request_mask, unsigned int query_flags) 782 { 783 struct inode *inode = d_inode(path->dentry); 784 struct f2fs_inode_info *fi = F2FS_I(inode); 785 struct f2fs_inode *ri; 786 unsigned int flags; 787 788 if (f2fs_has_extra_attr(inode) && 789 f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) && 790 F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) { 791 stat->result_mask |= STATX_BTIME; 792 stat->btime.tv_sec = fi->i_crtime.tv_sec; 793 stat->btime.tv_nsec = fi->i_crtime.tv_nsec; 794 } 795 796 flags = fi->i_flags; 797 if (flags & F2FS_COMPR_FL) 798 stat->attributes |= STATX_ATTR_COMPRESSED; 799 if (flags & F2FS_APPEND_FL) 800 stat->attributes |= STATX_ATTR_APPEND; 801 if (IS_ENCRYPTED(inode)) 802 stat->attributes |= STATX_ATTR_ENCRYPTED; 803 if (flags & F2FS_IMMUTABLE_FL) 804 stat->attributes |= STATX_ATTR_IMMUTABLE; 805 if (flags & F2FS_NODUMP_FL) 806 stat->attributes |= STATX_ATTR_NODUMP; 807 if (IS_VERITY(inode)) 808 stat->attributes |= STATX_ATTR_VERITY; 809 810 stat->attributes_mask |= (STATX_ATTR_COMPRESSED | 811 STATX_ATTR_APPEND | 812 STATX_ATTR_ENCRYPTED | 813 STATX_ATTR_IMMUTABLE | 814 STATX_ATTR_NODUMP | 815 STATX_ATTR_VERITY); 816 817 generic_fillattr(inode, stat); 818 819 /* we need to show initial sectors used for inline_data/dentries */ 820 if ((S_ISREG(inode->i_mode) && f2fs_has_inline_data(inode)) || 821 f2fs_has_inline_dentry(inode)) 822 stat->blocks += (stat->size + 511) >> 9; 823 824 return 0; 825 } 826 827 #ifdef CONFIG_F2FS_FS_POSIX_ACL 828 static void __setattr_copy(struct inode *inode, const struct iattr *attr) 829 { 830 unsigned int ia_valid = attr->ia_valid; 831 832 if (ia_valid & ATTR_UID) 833 inode->i_uid = attr->ia_uid; 834 if (ia_valid & ATTR_GID) 835 inode->i_gid = attr->ia_gid; 836 if (ia_valid & ATTR_ATIME) 837 inode->i_atime = attr->ia_atime; 838 if (ia_valid & ATTR_MTIME) 839 inode->i_mtime = attr->ia_mtime; 840 if (ia_valid & ATTR_CTIME) 841 inode->i_ctime = attr->ia_ctime; 842 if (ia_valid & ATTR_MODE) { 843 umode_t mode = attr->ia_mode; 844 845 if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID)) 846 mode &= ~S_ISGID; 847 set_acl_inode(inode, mode); 848 } 849 } 850 #else 851 #define __setattr_copy setattr_copy 852 #endif 853 854 int f2fs_setattr(struct dentry *dentry, struct iattr *attr) 855 { 856 struct inode *inode = d_inode(dentry); 857 int err; 858 859 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) 860 return -EIO; 861 862 if ((attr->ia_valid & ATTR_SIZE) && 863 !f2fs_is_compress_backend_ready(inode)) 864 return -EOPNOTSUPP; 865 866 err = setattr_prepare(dentry, attr); 867 if (err) 868 return err; 869 870 err = fscrypt_prepare_setattr(dentry, attr); 871 if (err) 872 return err; 873 874 err = fsverity_prepare_setattr(dentry, attr); 875 if (err) 876 return err; 877 878 if (is_quota_modification(inode, attr)) { 879 err = dquot_initialize(inode); 880 if (err) 881 return err; 882 } 883 if ((attr->ia_valid & ATTR_UID && 884 !uid_eq(attr->ia_uid, inode->i_uid)) || 885 (attr->ia_valid & ATTR_GID && 886 !gid_eq(attr->ia_gid, inode->i_gid))) { 887 f2fs_lock_op(F2FS_I_SB(inode)); 888 err = dquot_transfer(inode, attr); 889 if (err) { 890 set_sbi_flag(F2FS_I_SB(inode), 891 SBI_QUOTA_NEED_REPAIR); 892 f2fs_unlock_op(F2FS_I_SB(inode)); 893 return err; 894 } 895 /* 896 * update uid/gid under lock_op(), so that dquot and inode can 897 * be updated atomically. 898 */ 899 if (attr->ia_valid & ATTR_UID) 900 inode->i_uid = attr->ia_uid; 901 if (attr->ia_valid & ATTR_GID) 902 inode->i_gid = attr->ia_gid; 903 f2fs_mark_inode_dirty_sync(inode, true); 904 f2fs_unlock_op(F2FS_I_SB(inode)); 905 } 906 907 if (attr->ia_valid & ATTR_SIZE) { 908 loff_t old_size = i_size_read(inode); 909 910 if (attr->ia_size > MAX_INLINE_DATA(inode)) { 911 /* 912 * should convert inline inode before i_size_write to 913 * keep smaller than inline_data size with inline flag. 914 */ 915 err = f2fs_convert_inline_inode(inode); 916 if (err) 917 return err; 918 } 919 920 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 921 down_write(&F2FS_I(inode)->i_mmap_sem); 922 923 truncate_setsize(inode, attr->ia_size); 924 925 if (attr->ia_size <= old_size) 926 err = f2fs_truncate(inode); 927 /* 928 * do not trim all blocks after i_size if target size is 929 * larger than i_size. 930 */ 931 up_write(&F2FS_I(inode)->i_mmap_sem); 932 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 933 if (err) 934 return err; 935 936 spin_lock(&F2FS_I(inode)->i_size_lock); 937 inode->i_mtime = inode->i_ctime = current_time(inode); 938 F2FS_I(inode)->last_disk_size = i_size_read(inode); 939 spin_unlock(&F2FS_I(inode)->i_size_lock); 940 } 941 942 __setattr_copy(inode, attr); 943 944 if (attr->ia_valid & ATTR_MODE) { 945 err = posix_acl_chmod(inode, f2fs_get_inode_mode(inode)); 946 if (err || is_inode_flag_set(inode, FI_ACL_MODE)) { 947 inode->i_mode = F2FS_I(inode)->i_acl_mode; 948 clear_inode_flag(inode, FI_ACL_MODE); 949 } 950 } 951 952 /* file size may changed here */ 953 f2fs_mark_inode_dirty_sync(inode, true); 954 955 /* inode change will produce dirty node pages flushed by checkpoint */ 956 f2fs_balance_fs(F2FS_I_SB(inode), true); 957 958 return err; 959 } 960 961 const struct inode_operations f2fs_file_inode_operations = { 962 .getattr = f2fs_getattr, 963 .setattr = f2fs_setattr, 964 .get_acl = f2fs_get_acl, 965 .set_acl = f2fs_set_acl, 966 .listxattr = f2fs_listxattr, 967 .fiemap = f2fs_fiemap, 968 }; 969 970 static int fill_zero(struct inode *inode, pgoff_t index, 971 loff_t start, loff_t len) 972 { 973 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 974 struct page *page; 975 976 if (!len) 977 return 0; 978 979 f2fs_balance_fs(sbi, true); 980 981 f2fs_lock_op(sbi); 982 page = f2fs_get_new_data_page(inode, NULL, index, false); 983 f2fs_unlock_op(sbi); 984 985 if (IS_ERR(page)) 986 return PTR_ERR(page); 987 988 f2fs_wait_on_page_writeback(page, DATA, true, true); 989 zero_user(page, start, len); 990 set_page_dirty(page); 991 f2fs_put_page(page, 1); 992 return 0; 993 } 994 995 int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end) 996 { 997 int err; 998 999 while (pg_start < pg_end) { 1000 struct dnode_of_data dn; 1001 pgoff_t end_offset, count; 1002 1003 set_new_dnode(&dn, inode, NULL, NULL, 0); 1004 err = f2fs_get_dnode_of_data(&dn, pg_start, LOOKUP_NODE); 1005 if (err) { 1006 if (err == -ENOENT) { 1007 pg_start = f2fs_get_next_page_offset(&dn, 1008 pg_start); 1009 continue; 1010 } 1011 return err; 1012 } 1013 1014 end_offset = ADDRS_PER_PAGE(dn.node_page, inode); 1015 count = min(end_offset - dn.ofs_in_node, pg_end - pg_start); 1016 1017 f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset); 1018 1019 f2fs_truncate_data_blocks_range(&dn, count); 1020 f2fs_put_dnode(&dn); 1021 1022 pg_start += count; 1023 } 1024 return 0; 1025 } 1026 1027 static int punch_hole(struct inode *inode, loff_t offset, loff_t len) 1028 { 1029 pgoff_t pg_start, pg_end; 1030 loff_t off_start, off_end; 1031 int ret; 1032 1033 ret = f2fs_convert_inline_inode(inode); 1034 if (ret) 1035 return ret; 1036 1037 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT; 1038 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT; 1039 1040 off_start = offset & (PAGE_SIZE - 1); 1041 off_end = (offset + len) & (PAGE_SIZE - 1); 1042 1043 if (pg_start == pg_end) { 1044 ret = fill_zero(inode, pg_start, off_start, 1045 off_end - off_start); 1046 if (ret) 1047 return ret; 1048 } else { 1049 if (off_start) { 1050 ret = fill_zero(inode, pg_start++, off_start, 1051 PAGE_SIZE - off_start); 1052 if (ret) 1053 return ret; 1054 } 1055 if (off_end) { 1056 ret = fill_zero(inode, pg_end, 0, off_end); 1057 if (ret) 1058 return ret; 1059 } 1060 1061 if (pg_start < pg_end) { 1062 struct address_space *mapping = inode->i_mapping; 1063 loff_t blk_start, blk_end; 1064 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1065 1066 f2fs_balance_fs(sbi, true); 1067 1068 blk_start = (loff_t)pg_start << PAGE_SHIFT; 1069 blk_end = (loff_t)pg_end << PAGE_SHIFT; 1070 1071 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1072 down_write(&F2FS_I(inode)->i_mmap_sem); 1073 1074 truncate_inode_pages_range(mapping, blk_start, 1075 blk_end - 1); 1076 1077 f2fs_lock_op(sbi); 1078 ret = f2fs_truncate_hole(inode, pg_start, pg_end); 1079 f2fs_unlock_op(sbi); 1080 1081 up_write(&F2FS_I(inode)->i_mmap_sem); 1082 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1083 } 1084 } 1085 1086 return ret; 1087 } 1088 1089 static int __read_out_blkaddrs(struct inode *inode, block_t *blkaddr, 1090 int *do_replace, pgoff_t off, pgoff_t len) 1091 { 1092 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1093 struct dnode_of_data dn; 1094 int ret, done, i; 1095 1096 next_dnode: 1097 set_new_dnode(&dn, inode, NULL, NULL, 0); 1098 ret = f2fs_get_dnode_of_data(&dn, off, LOOKUP_NODE_RA); 1099 if (ret && ret != -ENOENT) { 1100 return ret; 1101 } else if (ret == -ENOENT) { 1102 if (dn.max_level == 0) 1103 return -ENOENT; 1104 done = min((pgoff_t)ADDRS_PER_BLOCK(inode) - 1105 dn.ofs_in_node, len); 1106 blkaddr += done; 1107 do_replace += done; 1108 goto next; 1109 } 1110 1111 done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, inode) - 1112 dn.ofs_in_node, len); 1113 for (i = 0; i < done; i++, blkaddr++, do_replace++, dn.ofs_in_node++) { 1114 *blkaddr = f2fs_data_blkaddr(&dn); 1115 1116 if (__is_valid_data_blkaddr(*blkaddr) && 1117 !f2fs_is_valid_blkaddr(sbi, *blkaddr, 1118 DATA_GENERIC_ENHANCE)) { 1119 f2fs_put_dnode(&dn); 1120 return -EFSCORRUPTED; 1121 } 1122 1123 if (!f2fs_is_checkpointed_data(sbi, *blkaddr)) { 1124 1125 if (f2fs_lfs_mode(sbi)) { 1126 f2fs_put_dnode(&dn); 1127 return -EOPNOTSUPP; 1128 } 1129 1130 /* do not invalidate this block address */ 1131 f2fs_update_data_blkaddr(&dn, NULL_ADDR); 1132 *do_replace = 1; 1133 } 1134 } 1135 f2fs_put_dnode(&dn); 1136 next: 1137 len -= done; 1138 off += done; 1139 if (len) 1140 goto next_dnode; 1141 return 0; 1142 } 1143 1144 static int __roll_back_blkaddrs(struct inode *inode, block_t *blkaddr, 1145 int *do_replace, pgoff_t off, int len) 1146 { 1147 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1148 struct dnode_of_data dn; 1149 int ret, i; 1150 1151 for (i = 0; i < len; i++, do_replace++, blkaddr++) { 1152 if (*do_replace == 0) 1153 continue; 1154 1155 set_new_dnode(&dn, inode, NULL, NULL, 0); 1156 ret = f2fs_get_dnode_of_data(&dn, off + i, LOOKUP_NODE_RA); 1157 if (ret) { 1158 dec_valid_block_count(sbi, inode, 1); 1159 f2fs_invalidate_blocks(sbi, *blkaddr); 1160 } else { 1161 f2fs_update_data_blkaddr(&dn, *blkaddr); 1162 } 1163 f2fs_put_dnode(&dn); 1164 } 1165 return 0; 1166 } 1167 1168 static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode, 1169 block_t *blkaddr, int *do_replace, 1170 pgoff_t src, pgoff_t dst, pgoff_t len, bool full) 1171 { 1172 struct f2fs_sb_info *sbi = F2FS_I_SB(src_inode); 1173 pgoff_t i = 0; 1174 int ret; 1175 1176 while (i < len) { 1177 if (blkaddr[i] == NULL_ADDR && !full) { 1178 i++; 1179 continue; 1180 } 1181 1182 if (do_replace[i] || blkaddr[i] == NULL_ADDR) { 1183 struct dnode_of_data dn; 1184 struct node_info ni; 1185 size_t new_size; 1186 pgoff_t ilen; 1187 1188 set_new_dnode(&dn, dst_inode, NULL, NULL, 0); 1189 ret = f2fs_get_dnode_of_data(&dn, dst + i, ALLOC_NODE); 1190 if (ret) 1191 return ret; 1192 1193 ret = f2fs_get_node_info(sbi, dn.nid, &ni); 1194 if (ret) { 1195 f2fs_put_dnode(&dn); 1196 return ret; 1197 } 1198 1199 ilen = min((pgoff_t) 1200 ADDRS_PER_PAGE(dn.node_page, dst_inode) - 1201 dn.ofs_in_node, len - i); 1202 do { 1203 dn.data_blkaddr = f2fs_data_blkaddr(&dn); 1204 f2fs_truncate_data_blocks_range(&dn, 1); 1205 1206 if (do_replace[i]) { 1207 f2fs_i_blocks_write(src_inode, 1208 1, false, false); 1209 f2fs_i_blocks_write(dst_inode, 1210 1, true, false); 1211 f2fs_replace_block(sbi, &dn, dn.data_blkaddr, 1212 blkaddr[i], ni.version, true, false); 1213 1214 do_replace[i] = 0; 1215 } 1216 dn.ofs_in_node++; 1217 i++; 1218 new_size = (loff_t)(dst + i) << PAGE_SHIFT; 1219 if (dst_inode->i_size < new_size) 1220 f2fs_i_size_write(dst_inode, new_size); 1221 } while (--ilen && (do_replace[i] || blkaddr[i] == NULL_ADDR)); 1222 1223 f2fs_put_dnode(&dn); 1224 } else { 1225 struct page *psrc, *pdst; 1226 1227 psrc = f2fs_get_lock_data_page(src_inode, 1228 src + i, true); 1229 if (IS_ERR(psrc)) 1230 return PTR_ERR(psrc); 1231 pdst = f2fs_get_new_data_page(dst_inode, NULL, dst + i, 1232 true); 1233 if (IS_ERR(pdst)) { 1234 f2fs_put_page(psrc, 1); 1235 return PTR_ERR(pdst); 1236 } 1237 f2fs_copy_page(psrc, pdst); 1238 set_page_dirty(pdst); 1239 f2fs_put_page(pdst, 1); 1240 f2fs_put_page(psrc, 1); 1241 1242 ret = f2fs_truncate_hole(src_inode, 1243 src + i, src + i + 1); 1244 if (ret) 1245 return ret; 1246 i++; 1247 } 1248 } 1249 return 0; 1250 } 1251 1252 static int __exchange_data_block(struct inode *src_inode, 1253 struct inode *dst_inode, pgoff_t src, pgoff_t dst, 1254 pgoff_t len, bool full) 1255 { 1256 block_t *src_blkaddr; 1257 int *do_replace; 1258 pgoff_t olen; 1259 int ret; 1260 1261 while (len) { 1262 olen = min((pgoff_t)4 * ADDRS_PER_BLOCK(src_inode), len); 1263 1264 src_blkaddr = f2fs_kvzalloc(F2FS_I_SB(src_inode), 1265 array_size(olen, sizeof(block_t)), 1266 GFP_NOFS); 1267 if (!src_blkaddr) 1268 return -ENOMEM; 1269 1270 do_replace = f2fs_kvzalloc(F2FS_I_SB(src_inode), 1271 array_size(olen, sizeof(int)), 1272 GFP_NOFS); 1273 if (!do_replace) { 1274 kvfree(src_blkaddr); 1275 return -ENOMEM; 1276 } 1277 1278 ret = __read_out_blkaddrs(src_inode, src_blkaddr, 1279 do_replace, src, olen); 1280 if (ret) 1281 goto roll_back; 1282 1283 ret = __clone_blkaddrs(src_inode, dst_inode, src_blkaddr, 1284 do_replace, src, dst, olen, full); 1285 if (ret) 1286 goto roll_back; 1287 1288 src += olen; 1289 dst += olen; 1290 len -= olen; 1291 1292 kvfree(src_blkaddr); 1293 kvfree(do_replace); 1294 } 1295 return 0; 1296 1297 roll_back: 1298 __roll_back_blkaddrs(src_inode, src_blkaddr, do_replace, src, olen); 1299 kvfree(src_blkaddr); 1300 kvfree(do_replace); 1301 return ret; 1302 } 1303 1304 static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len) 1305 { 1306 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1307 pgoff_t nrpages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); 1308 pgoff_t start = offset >> PAGE_SHIFT; 1309 pgoff_t end = (offset + len) >> PAGE_SHIFT; 1310 int ret; 1311 1312 f2fs_balance_fs(sbi, true); 1313 1314 /* avoid gc operation during block exchange */ 1315 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1316 down_write(&F2FS_I(inode)->i_mmap_sem); 1317 1318 f2fs_lock_op(sbi); 1319 f2fs_drop_extent_tree(inode); 1320 truncate_pagecache(inode, offset); 1321 ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true); 1322 f2fs_unlock_op(sbi); 1323 1324 up_write(&F2FS_I(inode)->i_mmap_sem); 1325 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1326 return ret; 1327 } 1328 1329 static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len) 1330 { 1331 loff_t new_size; 1332 int ret; 1333 1334 if (offset + len >= i_size_read(inode)) 1335 return -EINVAL; 1336 1337 /* collapse range should be aligned to block size of f2fs. */ 1338 if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1)) 1339 return -EINVAL; 1340 1341 ret = f2fs_convert_inline_inode(inode); 1342 if (ret) 1343 return ret; 1344 1345 /* write out all dirty pages from offset */ 1346 ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX); 1347 if (ret) 1348 return ret; 1349 1350 ret = f2fs_do_collapse(inode, offset, len); 1351 if (ret) 1352 return ret; 1353 1354 /* write out all moved pages, if possible */ 1355 down_write(&F2FS_I(inode)->i_mmap_sem); 1356 filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX); 1357 truncate_pagecache(inode, offset); 1358 1359 new_size = i_size_read(inode) - len; 1360 ret = f2fs_truncate_blocks(inode, new_size, true); 1361 up_write(&F2FS_I(inode)->i_mmap_sem); 1362 if (!ret) 1363 f2fs_i_size_write(inode, new_size); 1364 return ret; 1365 } 1366 1367 static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start, 1368 pgoff_t end) 1369 { 1370 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 1371 pgoff_t index = start; 1372 unsigned int ofs_in_node = dn->ofs_in_node; 1373 blkcnt_t count = 0; 1374 int ret; 1375 1376 for (; index < end; index++, dn->ofs_in_node++) { 1377 if (f2fs_data_blkaddr(dn) == NULL_ADDR) 1378 count++; 1379 } 1380 1381 dn->ofs_in_node = ofs_in_node; 1382 ret = f2fs_reserve_new_blocks(dn, count); 1383 if (ret) 1384 return ret; 1385 1386 dn->ofs_in_node = ofs_in_node; 1387 for (index = start; index < end; index++, dn->ofs_in_node++) { 1388 dn->data_blkaddr = f2fs_data_blkaddr(dn); 1389 /* 1390 * f2fs_reserve_new_blocks will not guarantee entire block 1391 * allocation. 1392 */ 1393 if (dn->data_blkaddr == NULL_ADDR) { 1394 ret = -ENOSPC; 1395 break; 1396 } 1397 if (dn->data_blkaddr != NEW_ADDR) { 1398 f2fs_invalidate_blocks(sbi, dn->data_blkaddr); 1399 dn->data_blkaddr = NEW_ADDR; 1400 f2fs_set_data_blkaddr(dn); 1401 } 1402 } 1403 1404 f2fs_update_extent_cache_range(dn, start, 0, index - start); 1405 1406 return ret; 1407 } 1408 1409 static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len, 1410 int mode) 1411 { 1412 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1413 struct address_space *mapping = inode->i_mapping; 1414 pgoff_t index, pg_start, pg_end; 1415 loff_t new_size = i_size_read(inode); 1416 loff_t off_start, off_end; 1417 int ret = 0; 1418 1419 ret = inode_newsize_ok(inode, (len + offset)); 1420 if (ret) 1421 return ret; 1422 1423 ret = f2fs_convert_inline_inode(inode); 1424 if (ret) 1425 return ret; 1426 1427 ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1); 1428 if (ret) 1429 return ret; 1430 1431 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT; 1432 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT; 1433 1434 off_start = offset & (PAGE_SIZE - 1); 1435 off_end = (offset + len) & (PAGE_SIZE - 1); 1436 1437 if (pg_start == pg_end) { 1438 ret = fill_zero(inode, pg_start, off_start, 1439 off_end - off_start); 1440 if (ret) 1441 return ret; 1442 1443 new_size = max_t(loff_t, new_size, offset + len); 1444 } else { 1445 if (off_start) { 1446 ret = fill_zero(inode, pg_start++, off_start, 1447 PAGE_SIZE - off_start); 1448 if (ret) 1449 return ret; 1450 1451 new_size = max_t(loff_t, new_size, 1452 (loff_t)pg_start << PAGE_SHIFT); 1453 } 1454 1455 for (index = pg_start; index < pg_end;) { 1456 struct dnode_of_data dn; 1457 unsigned int end_offset; 1458 pgoff_t end; 1459 1460 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1461 down_write(&F2FS_I(inode)->i_mmap_sem); 1462 1463 truncate_pagecache_range(inode, 1464 (loff_t)index << PAGE_SHIFT, 1465 ((loff_t)pg_end << PAGE_SHIFT) - 1); 1466 1467 f2fs_lock_op(sbi); 1468 1469 set_new_dnode(&dn, inode, NULL, NULL, 0); 1470 ret = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE); 1471 if (ret) { 1472 f2fs_unlock_op(sbi); 1473 up_write(&F2FS_I(inode)->i_mmap_sem); 1474 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1475 goto out; 1476 } 1477 1478 end_offset = ADDRS_PER_PAGE(dn.node_page, inode); 1479 end = min(pg_end, end_offset - dn.ofs_in_node + index); 1480 1481 ret = f2fs_do_zero_range(&dn, index, end); 1482 f2fs_put_dnode(&dn); 1483 1484 f2fs_unlock_op(sbi); 1485 up_write(&F2FS_I(inode)->i_mmap_sem); 1486 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1487 1488 f2fs_balance_fs(sbi, dn.node_changed); 1489 1490 if (ret) 1491 goto out; 1492 1493 index = end; 1494 new_size = max_t(loff_t, new_size, 1495 (loff_t)index << PAGE_SHIFT); 1496 } 1497 1498 if (off_end) { 1499 ret = fill_zero(inode, pg_end, 0, off_end); 1500 if (ret) 1501 goto out; 1502 1503 new_size = max_t(loff_t, new_size, offset + len); 1504 } 1505 } 1506 1507 out: 1508 if (new_size > i_size_read(inode)) { 1509 if (mode & FALLOC_FL_KEEP_SIZE) 1510 file_set_keep_isize(inode); 1511 else 1512 f2fs_i_size_write(inode, new_size); 1513 } 1514 return ret; 1515 } 1516 1517 static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len) 1518 { 1519 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1520 pgoff_t nr, pg_start, pg_end, delta, idx; 1521 loff_t new_size; 1522 int ret = 0; 1523 1524 new_size = i_size_read(inode) + len; 1525 ret = inode_newsize_ok(inode, new_size); 1526 if (ret) 1527 return ret; 1528 1529 if (offset >= i_size_read(inode)) 1530 return -EINVAL; 1531 1532 /* insert range should be aligned to block size of f2fs. */ 1533 if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1)) 1534 return -EINVAL; 1535 1536 ret = f2fs_convert_inline_inode(inode); 1537 if (ret) 1538 return ret; 1539 1540 f2fs_balance_fs(sbi, true); 1541 1542 down_write(&F2FS_I(inode)->i_mmap_sem); 1543 ret = f2fs_truncate_blocks(inode, i_size_read(inode), true); 1544 up_write(&F2FS_I(inode)->i_mmap_sem); 1545 if (ret) 1546 return ret; 1547 1548 /* write out all dirty pages from offset */ 1549 ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX); 1550 if (ret) 1551 return ret; 1552 1553 pg_start = offset >> PAGE_SHIFT; 1554 pg_end = (offset + len) >> PAGE_SHIFT; 1555 delta = pg_end - pg_start; 1556 idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); 1557 1558 /* avoid gc operation during block exchange */ 1559 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1560 down_write(&F2FS_I(inode)->i_mmap_sem); 1561 truncate_pagecache(inode, offset); 1562 1563 while (!ret && idx > pg_start) { 1564 nr = idx - pg_start; 1565 if (nr > delta) 1566 nr = delta; 1567 idx -= nr; 1568 1569 f2fs_lock_op(sbi); 1570 f2fs_drop_extent_tree(inode); 1571 1572 ret = __exchange_data_block(inode, inode, idx, 1573 idx + delta, nr, false); 1574 f2fs_unlock_op(sbi); 1575 } 1576 up_write(&F2FS_I(inode)->i_mmap_sem); 1577 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1578 1579 /* write out all moved pages, if possible */ 1580 down_write(&F2FS_I(inode)->i_mmap_sem); 1581 filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX); 1582 truncate_pagecache(inode, offset); 1583 up_write(&F2FS_I(inode)->i_mmap_sem); 1584 1585 if (!ret) 1586 f2fs_i_size_write(inode, new_size); 1587 return ret; 1588 } 1589 1590 static int expand_inode_data(struct inode *inode, loff_t offset, 1591 loff_t len, int mode) 1592 { 1593 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1594 struct f2fs_map_blocks map = { .m_next_pgofs = NULL, 1595 .m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE, 1596 .m_may_create = true }; 1597 pgoff_t pg_end; 1598 loff_t new_size = i_size_read(inode); 1599 loff_t off_end; 1600 int err; 1601 1602 err = inode_newsize_ok(inode, (len + offset)); 1603 if (err) 1604 return err; 1605 1606 err = f2fs_convert_inline_inode(inode); 1607 if (err) 1608 return err; 1609 1610 f2fs_balance_fs(sbi, true); 1611 1612 pg_end = ((unsigned long long)offset + len) >> PAGE_SHIFT; 1613 off_end = (offset + len) & (PAGE_SIZE - 1); 1614 1615 map.m_lblk = ((unsigned long long)offset) >> PAGE_SHIFT; 1616 map.m_len = pg_end - map.m_lblk; 1617 if (off_end) 1618 map.m_len++; 1619 1620 if (!map.m_len) 1621 return 0; 1622 1623 if (f2fs_is_pinned_file(inode)) { 1624 block_t len = (map.m_len >> sbi->log_blocks_per_seg) << 1625 sbi->log_blocks_per_seg; 1626 block_t done = 0; 1627 1628 if (map.m_len % sbi->blocks_per_seg) 1629 len += sbi->blocks_per_seg; 1630 1631 map.m_len = sbi->blocks_per_seg; 1632 next_alloc: 1633 if (has_not_enough_free_secs(sbi, 0, 1634 GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) { 1635 down_write(&sbi->gc_lock); 1636 err = f2fs_gc(sbi, true, false, NULL_SEGNO); 1637 if (err && err != -ENODATA && err != -EAGAIN) 1638 goto out_err; 1639 } 1640 1641 down_write(&sbi->pin_sem); 1642 1643 f2fs_lock_op(sbi); 1644 f2fs_allocate_new_segment(sbi, CURSEG_COLD_DATA_PINNED); 1645 f2fs_unlock_op(sbi); 1646 1647 map.m_seg_type = CURSEG_COLD_DATA_PINNED; 1648 err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO); 1649 1650 up_write(&sbi->pin_sem); 1651 1652 done += map.m_len; 1653 len -= map.m_len; 1654 map.m_lblk += map.m_len; 1655 if (!err && len) 1656 goto next_alloc; 1657 1658 map.m_len = done; 1659 } else { 1660 err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO); 1661 } 1662 out_err: 1663 if (err) { 1664 pgoff_t last_off; 1665 1666 if (!map.m_len) 1667 return err; 1668 1669 last_off = map.m_lblk + map.m_len - 1; 1670 1671 /* update new size to the failed position */ 1672 new_size = (last_off == pg_end) ? offset + len : 1673 (loff_t)(last_off + 1) << PAGE_SHIFT; 1674 } else { 1675 new_size = ((loff_t)pg_end << PAGE_SHIFT) + off_end; 1676 } 1677 1678 if (new_size > i_size_read(inode)) { 1679 if (mode & FALLOC_FL_KEEP_SIZE) 1680 file_set_keep_isize(inode); 1681 else 1682 f2fs_i_size_write(inode, new_size); 1683 } 1684 1685 return err; 1686 } 1687 1688 static long f2fs_fallocate(struct file *file, int mode, 1689 loff_t offset, loff_t len) 1690 { 1691 struct inode *inode = file_inode(file); 1692 long ret = 0; 1693 1694 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) 1695 return -EIO; 1696 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(inode))) 1697 return -ENOSPC; 1698 if (!f2fs_is_compress_backend_ready(inode)) 1699 return -EOPNOTSUPP; 1700 1701 /* f2fs only support ->fallocate for regular file */ 1702 if (!S_ISREG(inode->i_mode)) 1703 return -EINVAL; 1704 1705 if (IS_ENCRYPTED(inode) && 1706 (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE))) 1707 return -EOPNOTSUPP; 1708 1709 if (f2fs_compressed_file(inode) && 1710 (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE | 1711 FALLOC_FL_ZERO_RANGE | FALLOC_FL_INSERT_RANGE))) 1712 return -EOPNOTSUPP; 1713 1714 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | 1715 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE | 1716 FALLOC_FL_INSERT_RANGE)) 1717 return -EOPNOTSUPP; 1718 1719 inode_lock(inode); 1720 1721 if (mode & FALLOC_FL_PUNCH_HOLE) { 1722 if (offset >= inode->i_size) 1723 goto out; 1724 1725 ret = punch_hole(inode, offset, len); 1726 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) { 1727 ret = f2fs_collapse_range(inode, offset, len); 1728 } else if (mode & FALLOC_FL_ZERO_RANGE) { 1729 ret = f2fs_zero_range(inode, offset, len, mode); 1730 } else if (mode & FALLOC_FL_INSERT_RANGE) { 1731 ret = f2fs_insert_range(inode, offset, len); 1732 } else { 1733 ret = expand_inode_data(inode, offset, len, mode); 1734 } 1735 1736 if (!ret) { 1737 inode->i_mtime = inode->i_ctime = current_time(inode); 1738 f2fs_mark_inode_dirty_sync(inode, false); 1739 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); 1740 } 1741 1742 out: 1743 inode_unlock(inode); 1744 1745 trace_f2fs_fallocate(inode, mode, offset, len, ret); 1746 return ret; 1747 } 1748 1749 static int f2fs_release_file(struct inode *inode, struct file *filp) 1750 { 1751 /* 1752 * f2fs_relase_file is called at every close calls. So we should 1753 * not drop any inmemory pages by close called by other process. 1754 */ 1755 if (!(filp->f_mode & FMODE_WRITE) || 1756 atomic_read(&inode->i_writecount) != 1) 1757 return 0; 1758 1759 /* some remained atomic pages should discarded */ 1760 if (f2fs_is_atomic_file(inode)) 1761 f2fs_drop_inmem_pages(inode); 1762 if (f2fs_is_volatile_file(inode)) { 1763 set_inode_flag(inode, FI_DROP_CACHE); 1764 filemap_fdatawrite(inode->i_mapping); 1765 clear_inode_flag(inode, FI_DROP_CACHE); 1766 clear_inode_flag(inode, FI_VOLATILE_FILE); 1767 stat_dec_volatile_write(inode); 1768 } 1769 return 0; 1770 } 1771 1772 static int f2fs_file_flush(struct file *file, fl_owner_t id) 1773 { 1774 struct inode *inode = file_inode(file); 1775 1776 /* 1777 * If the process doing a transaction is crashed, we should do 1778 * roll-back. Otherwise, other reader/write can see corrupted database 1779 * until all the writers close its file. Since this should be done 1780 * before dropping file lock, it needs to do in ->flush. 1781 */ 1782 if (f2fs_is_atomic_file(inode) && 1783 F2FS_I(inode)->inmem_task == current) 1784 f2fs_drop_inmem_pages(inode); 1785 return 0; 1786 } 1787 1788 static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask) 1789 { 1790 struct f2fs_inode_info *fi = F2FS_I(inode); 1791 u32 masked_flags = fi->i_flags & mask; 1792 1793 f2fs_bug_on(F2FS_I_SB(inode), (iflags & ~mask)); 1794 1795 /* Is it quota file? Do not allow user to mess with it */ 1796 if (IS_NOQUOTA(inode)) 1797 return -EPERM; 1798 1799 if ((iflags ^ masked_flags) & F2FS_CASEFOLD_FL) { 1800 if (!f2fs_sb_has_casefold(F2FS_I_SB(inode))) 1801 return -EOPNOTSUPP; 1802 if (!f2fs_empty_dir(inode)) 1803 return -ENOTEMPTY; 1804 } 1805 1806 if (iflags & (F2FS_COMPR_FL | F2FS_NOCOMP_FL)) { 1807 if (!f2fs_sb_has_compression(F2FS_I_SB(inode))) 1808 return -EOPNOTSUPP; 1809 if ((iflags & F2FS_COMPR_FL) && (iflags & F2FS_NOCOMP_FL)) 1810 return -EINVAL; 1811 } 1812 1813 if ((iflags ^ masked_flags) & F2FS_COMPR_FL) { 1814 if (masked_flags & F2FS_COMPR_FL) { 1815 if (!f2fs_disable_compressed_file(inode)) 1816 return -EINVAL; 1817 } 1818 if (iflags & F2FS_NOCOMP_FL) 1819 return -EINVAL; 1820 if (iflags & F2FS_COMPR_FL) { 1821 if (!f2fs_may_compress(inode)) 1822 return -EINVAL; 1823 if (S_ISREG(inode->i_mode) && inode->i_size) 1824 return -EINVAL; 1825 1826 set_compress_context(inode); 1827 } 1828 } 1829 if ((iflags ^ masked_flags) & F2FS_NOCOMP_FL) { 1830 if (masked_flags & F2FS_COMPR_FL) 1831 return -EINVAL; 1832 } 1833 1834 fi->i_flags = iflags | (fi->i_flags & ~mask); 1835 f2fs_bug_on(F2FS_I_SB(inode), (fi->i_flags & F2FS_COMPR_FL) && 1836 (fi->i_flags & F2FS_NOCOMP_FL)); 1837 1838 if (fi->i_flags & F2FS_PROJINHERIT_FL) 1839 set_inode_flag(inode, FI_PROJ_INHERIT); 1840 else 1841 clear_inode_flag(inode, FI_PROJ_INHERIT); 1842 1843 inode->i_ctime = current_time(inode); 1844 f2fs_set_inode_flags(inode); 1845 f2fs_mark_inode_dirty_sync(inode, true); 1846 return 0; 1847 } 1848 1849 /* FS_IOC_GETFLAGS and FS_IOC_SETFLAGS support */ 1850 1851 /* 1852 * To make a new on-disk f2fs i_flag gettable via FS_IOC_GETFLAGS, add an entry 1853 * for it to f2fs_fsflags_map[], and add its FS_*_FL equivalent to 1854 * F2FS_GETTABLE_FS_FL. To also make it settable via FS_IOC_SETFLAGS, also add 1855 * its FS_*_FL equivalent to F2FS_SETTABLE_FS_FL. 1856 */ 1857 1858 static const struct { 1859 u32 iflag; 1860 u32 fsflag; 1861 } f2fs_fsflags_map[] = { 1862 { F2FS_COMPR_FL, FS_COMPR_FL }, 1863 { F2FS_SYNC_FL, FS_SYNC_FL }, 1864 { F2FS_IMMUTABLE_FL, FS_IMMUTABLE_FL }, 1865 { F2FS_APPEND_FL, FS_APPEND_FL }, 1866 { F2FS_NODUMP_FL, FS_NODUMP_FL }, 1867 { F2FS_NOATIME_FL, FS_NOATIME_FL }, 1868 { F2FS_NOCOMP_FL, FS_NOCOMP_FL }, 1869 { F2FS_INDEX_FL, FS_INDEX_FL }, 1870 { F2FS_DIRSYNC_FL, FS_DIRSYNC_FL }, 1871 { F2FS_PROJINHERIT_FL, FS_PROJINHERIT_FL }, 1872 { F2FS_CASEFOLD_FL, FS_CASEFOLD_FL }, 1873 }; 1874 1875 #define F2FS_GETTABLE_FS_FL ( \ 1876 FS_COMPR_FL | \ 1877 FS_SYNC_FL | \ 1878 FS_IMMUTABLE_FL | \ 1879 FS_APPEND_FL | \ 1880 FS_NODUMP_FL | \ 1881 FS_NOATIME_FL | \ 1882 FS_NOCOMP_FL | \ 1883 FS_INDEX_FL | \ 1884 FS_DIRSYNC_FL | \ 1885 FS_PROJINHERIT_FL | \ 1886 FS_ENCRYPT_FL | \ 1887 FS_INLINE_DATA_FL | \ 1888 FS_NOCOW_FL | \ 1889 FS_VERITY_FL | \ 1890 FS_CASEFOLD_FL) 1891 1892 #define F2FS_SETTABLE_FS_FL ( \ 1893 FS_COMPR_FL | \ 1894 FS_SYNC_FL | \ 1895 FS_IMMUTABLE_FL | \ 1896 FS_APPEND_FL | \ 1897 FS_NODUMP_FL | \ 1898 FS_NOATIME_FL | \ 1899 FS_NOCOMP_FL | \ 1900 FS_DIRSYNC_FL | \ 1901 FS_PROJINHERIT_FL | \ 1902 FS_CASEFOLD_FL) 1903 1904 /* Convert f2fs on-disk i_flags to FS_IOC_{GET,SET}FLAGS flags */ 1905 static inline u32 f2fs_iflags_to_fsflags(u32 iflags) 1906 { 1907 u32 fsflags = 0; 1908 int i; 1909 1910 for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++) 1911 if (iflags & f2fs_fsflags_map[i].iflag) 1912 fsflags |= f2fs_fsflags_map[i].fsflag; 1913 1914 return fsflags; 1915 } 1916 1917 /* Convert FS_IOC_{GET,SET}FLAGS flags to f2fs on-disk i_flags */ 1918 static inline u32 f2fs_fsflags_to_iflags(u32 fsflags) 1919 { 1920 u32 iflags = 0; 1921 int i; 1922 1923 for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++) 1924 if (fsflags & f2fs_fsflags_map[i].fsflag) 1925 iflags |= f2fs_fsflags_map[i].iflag; 1926 1927 return iflags; 1928 } 1929 1930 static int f2fs_ioc_getflags(struct file *filp, unsigned long arg) 1931 { 1932 struct inode *inode = file_inode(filp); 1933 struct f2fs_inode_info *fi = F2FS_I(inode); 1934 u32 fsflags = f2fs_iflags_to_fsflags(fi->i_flags); 1935 1936 if (IS_ENCRYPTED(inode)) 1937 fsflags |= FS_ENCRYPT_FL; 1938 if (IS_VERITY(inode)) 1939 fsflags |= FS_VERITY_FL; 1940 if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) 1941 fsflags |= FS_INLINE_DATA_FL; 1942 if (is_inode_flag_set(inode, FI_PIN_FILE)) 1943 fsflags |= FS_NOCOW_FL; 1944 1945 fsflags &= F2FS_GETTABLE_FS_FL; 1946 1947 return put_user(fsflags, (int __user *)arg); 1948 } 1949 1950 static int f2fs_ioc_setflags(struct file *filp, unsigned long arg) 1951 { 1952 struct inode *inode = file_inode(filp); 1953 struct f2fs_inode_info *fi = F2FS_I(inode); 1954 u32 fsflags, old_fsflags; 1955 u32 iflags; 1956 int ret; 1957 1958 if (!inode_owner_or_capable(inode)) 1959 return -EACCES; 1960 1961 if (get_user(fsflags, (int __user *)arg)) 1962 return -EFAULT; 1963 1964 if (fsflags & ~F2FS_GETTABLE_FS_FL) 1965 return -EOPNOTSUPP; 1966 fsflags &= F2FS_SETTABLE_FS_FL; 1967 1968 iflags = f2fs_fsflags_to_iflags(fsflags); 1969 if (f2fs_mask_flags(inode->i_mode, iflags) != iflags) 1970 return -EOPNOTSUPP; 1971 1972 ret = mnt_want_write_file(filp); 1973 if (ret) 1974 return ret; 1975 1976 inode_lock(inode); 1977 1978 old_fsflags = f2fs_iflags_to_fsflags(fi->i_flags); 1979 ret = vfs_ioc_setflags_prepare(inode, old_fsflags, fsflags); 1980 if (ret) 1981 goto out; 1982 1983 ret = f2fs_setflags_common(inode, iflags, 1984 f2fs_fsflags_to_iflags(F2FS_SETTABLE_FS_FL)); 1985 out: 1986 inode_unlock(inode); 1987 mnt_drop_write_file(filp); 1988 return ret; 1989 } 1990 1991 static int f2fs_ioc_getversion(struct file *filp, unsigned long arg) 1992 { 1993 struct inode *inode = file_inode(filp); 1994 1995 return put_user(inode->i_generation, (int __user *)arg); 1996 } 1997 1998 static int f2fs_ioc_start_atomic_write(struct file *filp) 1999 { 2000 struct inode *inode = file_inode(filp); 2001 struct f2fs_inode_info *fi = F2FS_I(inode); 2002 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2003 int ret; 2004 2005 if (!inode_owner_or_capable(inode)) 2006 return -EACCES; 2007 2008 if (!S_ISREG(inode->i_mode)) 2009 return -EINVAL; 2010 2011 if (filp->f_flags & O_DIRECT) 2012 return -EINVAL; 2013 2014 ret = mnt_want_write_file(filp); 2015 if (ret) 2016 return ret; 2017 2018 inode_lock(inode); 2019 2020 f2fs_disable_compressed_file(inode); 2021 2022 if (f2fs_is_atomic_file(inode)) { 2023 if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) 2024 ret = -EINVAL; 2025 goto out; 2026 } 2027 2028 ret = f2fs_convert_inline_inode(inode); 2029 if (ret) 2030 goto out; 2031 2032 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 2033 2034 /* 2035 * Should wait end_io to count F2FS_WB_CP_DATA correctly by 2036 * f2fs_is_atomic_file. 2037 */ 2038 if (get_dirty_pages(inode)) 2039 f2fs_warn(F2FS_I_SB(inode), "Unexpected flush for atomic writes: ino=%lu, npages=%u", 2040 inode->i_ino, get_dirty_pages(inode)); 2041 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX); 2042 if (ret) { 2043 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 2044 goto out; 2045 } 2046 2047 spin_lock(&sbi->inode_lock[ATOMIC_FILE]); 2048 if (list_empty(&fi->inmem_ilist)) 2049 list_add_tail(&fi->inmem_ilist, &sbi->inode_list[ATOMIC_FILE]); 2050 sbi->atomic_files++; 2051 spin_unlock(&sbi->inode_lock[ATOMIC_FILE]); 2052 2053 /* add inode in inmem_list first and set atomic_file */ 2054 set_inode_flag(inode, FI_ATOMIC_FILE); 2055 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST); 2056 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 2057 2058 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); 2059 F2FS_I(inode)->inmem_task = current; 2060 stat_update_max_atomic_write(inode); 2061 out: 2062 inode_unlock(inode); 2063 mnt_drop_write_file(filp); 2064 return ret; 2065 } 2066 2067 static int f2fs_ioc_commit_atomic_write(struct file *filp) 2068 { 2069 struct inode *inode = file_inode(filp); 2070 int ret; 2071 2072 if (!inode_owner_or_capable(inode)) 2073 return -EACCES; 2074 2075 ret = mnt_want_write_file(filp); 2076 if (ret) 2077 return ret; 2078 2079 f2fs_balance_fs(F2FS_I_SB(inode), true); 2080 2081 inode_lock(inode); 2082 2083 if (f2fs_is_volatile_file(inode)) { 2084 ret = -EINVAL; 2085 goto err_out; 2086 } 2087 2088 if (f2fs_is_atomic_file(inode)) { 2089 ret = f2fs_commit_inmem_pages(inode); 2090 if (ret) 2091 goto err_out; 2092 2093 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true); 2094 if (!ret) 2095 f2fs_drop_inmem_pages(inode); 2096 } else { 2097 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 1, false); 2098 } 2099 err_out: 2100 if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) { 2101 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST); 2102 ret = -EINVAL; 2103 } 2104 inode_unlock(inode); 2105 mnt_drop_write_file(filp); 2106 return ret; 2107 } 2108 2109 static int f2fs_ioc_start_volatile_write(struct file *filp) 2110 { 2111 struct inode *inode = file_inode(filp); 2112 int ret; 2113 2114 if (!inode_owner_or_capable(inode)) 2115 return -EACCES; 2116 2117 if (!S_ISREG(inode->i_mode)) 2118 return -EINVAL; 2119 2120 ret = mnt_want_write_file(filp); 2121 if (ret) 2122 return ret; 2123 2124 inode_lock(inode); 2125 2126 if (f2fs_is_volatile_file(inode)) 2127 goto out; 2128 2129 ret = f2fs_convert_inline_inode(inode); 2130 if (ret) 2131 goto out; 2132 2133 stat_inc_volatile_write(inode); 2134 stat_update_max_volatile_write(inode); 2135 2136 set_inode_flag(inode, FI_VOLATILE_FILE); 2137 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); 2138 out: 2139 inode_unlock(inode); 2140 mnt_drop_write_file(filp); 2141 return ret; 2142 } 2143 2144 static int f2fs_ioc_release_volatile_write(struct file *filp) 2145 { 2146 struct inode *inode = file_inode(filp); 2147 int ret; 2148 2149 if (!inode_owner_or_capable(inode)) 2150 return -EACCES; 2151 2152 ret = mnt_want_write_file(filp); 2153 if (ret) 2154 return ret; 2155 2156 inode_lock(inode); 2157 2158 if (!f2fs_is_volatile_file(inode)) 2159 goto out; 2160 2161 if (!f2fs_is_first_block_written(inode)) { 2162 ret = truncate_partial_data_page(inode, 0, true); 2163 goto out; 2164 } 2165 2166 ret = punch_hole(inode, 0, F2FS_BLKSIZE); 2167 out: 2168 inode_unlock(inode); 2169 mnt_drop_write_file(filp); 2170 return ret; 2171 } 2172 2173 static int f2fs_ioc_abort_volatile_write(struct file *filp) 2174 { 2175 struct inode *inode = file_inode(filp); 2176 int ret; 2177 2178 if (!inode_owner_or_capable(inode)) 2179 return -EACCES; 2180 2181 ret = mnt_want_write_file(filp); 2182 if (ret) 2183 return ret; 2184 2185 inode_lock(inode); 2186 2187 if (f2fs_is_atomic_file(inode)) 2188 f2fs_drop_inmem_pages(inode); 2189 if (f2fs_is_volatile_file(inode)) { 2190 clear_inode_flag(inode, FI_VOLATILE_FILE); 2191 stat_dec_volatile_write(inode); 2192 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true); 2193 } 2194 2195 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST); 2196 2197 inode_unlock(inode); 2198 2199 mnt_drop_write_file(filp); 2200 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); 2201 return ret; 2202 } 2203 2204 static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg) 2205 { 2206 struct inode *inode = file_inode(filp); 2207 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2208 struct super_block *sb = sbi->sb; 2209 __u32 in; 2210 int ret = 0; 2211 2212 if (!capable(CAP_SYS_ADMIN)) 2213 return -EPERM; 2214 2215 if (get_user(in, (__u32 __user *)arg)) 2216 return -EFAULT; 2217 2218 if (in != F2FS_GOING_DOWN_FULLSYNC) { 2219 ret = mnt_want_write_file(filp); 2220 if (ret) { 2221 if (ret == -EROFS) { 2222 ret = 0; 2223 f2fs_stop_checkpoint(sbi, false); 2224 set_sbi_flag(sbi, SBI_IS_SHUTDOWN); 2225 trace_f2fs_shutdown(sbi, in, ret); 2226 } 2227 return ret; 2228 } 2229 } 2230 2231 switch (in) { 2232 case F2FS_GOING_DOWN_FULLSYNC: 2233 sb = freeze_bdev(sb->s_bdev); 2234 if (IS_ERR(sb)) { 2235 ret = PTR_ERR(sb); 2236 goto out; 2237 } 2238 if (sb) { 2239 f2fs_stop_checkpoint(sbi, false); 2240 set_sbi_flag(sbi, SBI_IS_SHUTDOWN); 2241 thaw_bdev(sb->s_bdev, sb); 2242 } 2243 break; 2244 case F2FS_GOING_DOWN_METASYNC: 2245 /* do checkpoint only */ 2246 ret = f2fs_sync_fs(sb, 1); 2247 if (ret) 2248 goto out; 2249 f2fs_stop_checkpoint(sbi, false); 2250 set_sbi_flag(sbi, SBI_IS_SHUTDOWN); 2251 break; 2252 case F2FS_GOING_DOWN_NOSYNC: 2253 f2fs_stop_checkpoint(sbi, false); 2254 set_sbi_flag(sbi, SBI_IS_SHUTDOWN); 2255 break; 2256 case F2FS_GOING_DOWN_METAFLUSH: 2257 f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO); 2258 f2fs_stop_checkpoint(sbi, false); 2259 set_sbi_flag(sbi, SBI_IS_SHUTDOWN); 2260 break; 2261 case F2FS_GOING_DOWN_NEED_FSCK: 2262 set_sbi_flag(sbi, SBI_NEED_FSCK); 2263 set_sbi_flag(sbi, SBI_CP_DISABLED_QUICK); 2264 set_sbi_flag(sbi, SBI_IS_DIRTY); 2265 /* do checkpoint only */ 2266 ret = f2fs_sync_fs(sb, 1); 2267 goto out; 2268 default: 2269 ret = -EINVAL; 2270 goto out; 2271 } 2272 2273 f2fs_stop_gc_thread(sbi); 2274 f2fs_stop_discard_thread(sbi); 2275 2276 f2fs_drop_discard_cmd(sbi); 2277 clear_opt(sbi, DISCARD); 2278 2279 f2fs_update_time(sbi, REQ_TIME); 2280 out: 2281 if (in != F2FS_GOING_DOWN_FULLSYNC) 2282 mnt_drop_write_file(filp); 2283 2284 trace_f2fs_shutdown(sbi, in, ret); 2285 2286 return ret; 2287 } 2288 2289 static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg) 2290 { 2291 struct inode *inode = file_inode(filp); 2292 struct super_block *sb = inode->i_sb; 2293 struct request_queue *q = bdev_get_queue(sb->s_bdev); 2294 struct fstrim_range range; 2295 int ret; 2296 2297 if (!capable(CAP_SYS_ADMIN)) 2298 return -EPERM; 2299 2300 if (!f2fs_hw_support_discard(F2FS_SB(sb))) 2301 return -EOPNOTSUPP; 2302 2303 if (copy_from_user(&range, (struct fstrim_range __user *)arg, 2304 sizeof(range))) 2305 return -EFAULT; 2306 2307 ret = mnt_want_write_file(filp); 2308 if (ret) 2309 return ret; 2310 2311 range.minlen = max((unsigned int)range.minlen, 2312 q->limits.discard_granularity); 2313 ret = f2fs_trim_fs(F2FS_SB(sb), &range); 2314 mnt_drop_write_file(filp); 2315 if (ret < 0) 2316 return ret; 2317 2318 if (copy_to_user((struct fstrim_range __user *)arg, &range, 2319 sizeof(range))) 2320 return -EFAULT; 2321 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); 2322 return 0; 2323 } 2324 2325 static bool uuid_is_nonzero(__u8 u[16]) 2326 { 2327 int i; 2328 2329 for (i = 0; i < 16; i++) 2330 if (u[i]) 2331 return true; 2332 return false; 2333 } 2334 2335 static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg) 2336 { 2337 struct inode *inode = file_inode(filp); 2338 2339 if (!f2fs_sb_has_encrypt(F2FS_I_SB(inode))) 2340 return -EOPNOTSUPP; 2341 2342 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); 2343 2344 return fscrypt_ioctl_set_policy(filp, (const void __user *)arg); 2345 } 2346 2347 static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg) 2348 { 2349 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp)))) 2350 return -EOPNOTSUPP; 2351 return fscrypt_ioctl_get_policy(filp, (void __user *)arg); 2352 } 2353 2354 static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg) 2355 { 2356 struct inode *inode = file_inode(filp); 2357 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2358 int err; 2359 2360 if (!f2fs_sb_has_encrypt(sbi)) 2361 return -EOPNOTSUPP; 2362 2363 err = mnt_want_write_file(filp); 2364 if (err) 2365 return err; 2366 2367 down_write(&sbi->sb_lock); 2368 2369 if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt)) 2370 goto got_it; 2371 2372 /* update superblock with uuid */ 2373 generate_random_uuid(sbi->raw_super->encrypt_pw_salt); 2374 2375 err = f2fs_commit_super(sbi, false); 2376 if (err) { 2377 /* undo new data */ 2378 memset(sbi->raw_super->encrypt_pw_salt, 0, 16); 2379 goto out_err; 2380 } 2381 got_it: 2382 if (copy_to_user((__u8 __user *)arg, sbi->raw_super->encrypt_pw_salt, 2383 16)) 2384 err = -EFAULT; 2385 out_err: 2386 up_write(&sbi->sb_lock); 2387 mnt_drop_write_file(filp); 2388 return err; 2389 } 2390 2391 static int f2fs_ioc_get_encryption_policy_ex(struct file *filp, 2392 unsigned long arg) 2393 { 2394 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp)))) 2395 return -EOPNOTSUPP; 2396 2397 return fscrypt_ioctl_get_policy_ex(filp, (void __user *)arg); 2398 } 2399 2400 static int f2fs_ioc_add_encryption_key(struct file *filp, unsigned long arg) 2401 { 2402 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp)))) 2403 return -EOPNOTSUPP; 2404 2405 return fscrypt_ioctl_add_key(filp, (void __user *)arg); 2406 } 2407 2408 static int f2fs_ioc_remove_encryption_key(struct file *filp, unsigned long arg) 2409 { 2410 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp)))) 2411 return -EOPNOTSUPP; 2412 2413 return fscrypt_ioctl_remove_key(filp, (void __user *)arg); 2414 } 2415 2416 static int f2fs_ioc_remove_encryption_key_all_users(struct file *filp, 2417 unsigned long arg) 2418 { 2419 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp)))) 2420 return -EOPNOTSUPP; 2421 2422 return fscrypt_ioctl_remove_key_all_users(filp, (void __user *)arg); 2423 } 2424 2425 static int f2fs_ioc_get_encryption_key_status(struct file *filp, 2426 unsigned long arg) 2427 { 2428 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp)))) 2429 return -EOPNOTSUPP; 2430 2431 return fscrypt_ioctl_get_key_status(filp, (void __user *)arg); 2432 } 2433 2434 static int f2fs_ioc_get_encryption_nonce(struct file *filp, unsigned long arg) 2435 { 2436 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp)))) 2437 return -EOPNOTSUPP; 2438 2439 return fscrypt_ioctl_get_nonce(filp, (void __user *)arg); 2440 } 2441 2442 static int f2fs_ioc_gc(struct file *filp, unsigned long arg) 2443 { 2444 struct inode *inode = file_inode(filp); 2445 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2446 __u32 sync; 2447 int ret; 2448 2449 if (!capable(CAP_SYS_ADMIN)) 2450 return -EPERM; 2451 2452 if (get_user(sync, (__u32 __user *)arg)) 2453 return -EFAULT; 2454 2455 if (f2fs_readonly(sbi->sb)) 2456 return -EROFS; 2457 2458 ret = mnt_want_write_file(filp); 2459 if (ret) 2460 return ret; 2461 2462 if (!sync) { 2463 if (!down_write_trylock(&sbi->gc_lock)) { 2464 ret = -EBUSY; 2465 goto out; 2466 } 2467 } else { 2468 down_write(&sbi->gc_lock); 2469 } 2470 2471 ret = f2fs_gc(sbi, sync, true, NULL_SEGNO); 2472 out: 2473 mnt_drop_write_file(filp); 2474 return ret; 2475 } 2476 2477 static int f2fs_ioc_gc_range(struct file *filp, unsigned long arg) 2478 { 2479 struct inode *inode = file_inode(filp); 2480 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2481 struct f2fs_gc_range range; 2482 u64 end; 2483 int ret; 2484 2485 if (!capable(CAP_SYS_ADMIN)) 2486 return -EPERM; 2487 2488 if (copy_from_user(&range, (struct f2fs_gc_range __user *)arg, 2489 sizeof(range))) 2490 return -EFAULT; 2491 2492 if (f2fs_readonly(sbi->sb)) 2493 return -EROFS; 2494 2495 end = range.start + range.len; 2496 if (end < range.start || range.start < MAIN_BLKADDR(sbi) || 2497 end >= MAX_BLKADDR(sbi)) 2498 return -EINVAL; 2499 2500 ret = mnt_want_write_file(filp); 2501 if (ret) 2502 return ret; 2503 2504 do_more: 2505 if (!range.sync) { 2506 if (!down_write_trylock(&sbi->gc_lock)) { 2507 ret = -EBUSY; 2508 goto out; 2509 } 2510 } else { 2511 down_write(&sbi->gc_lock); 2512 } 2513 2514 ret = f2fs_gc(sbi, range.sync, true, GET_SEGNO(sbi, range.start)); 2515 if (ret) { 2516 if (ret == -EBUSY) 2517 ret = -EAGAIN; 2518 goto out; 2519 } 2520 range.start += BLKS_PER_SEC(sbi); 2521 if (range.start <= end) 2522 goto do_more; 2523 out: 2524 mnt_drop_write_file(filp); 2525 return ret; 2526 } 2527 2528 static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg) 2529 { 2530 struct inode *inode = file_inode(filp); 2531 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2532 int ret; 2533 2534 if (!capable(CAP_SYS_ADMIN)) 2535 return -EPERM; 2536 2537 if (f2fs_readonly(sbi->sb)) 2538 return -EROFS; 2539 2540 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { 2541 f2fs_info(sbi, "Skipping Checkpoint. Checkpoints currently disabled."); 2542 return -EINVAL; 2543 } 2544 2545 ret = mnt_want_write_file(filp); 2546 if (ret) 2547 return ret; 2548 2549 ret = f2fs_sync_fs(sbi->sb, 1); 2550 2551 mnt_drop_write_file(filp); 2552 return ret; 2553 } 2554 2555 static int f2fs_defragment_range(struct f2fs_sb_info *sbi, 2556 struct file *filp, 2557 struct f2fs_defragment *range) 2558 { 2559 struct inode *inode = file_inode(filp); 2560 struct f2fs_map_blocks map = { .m_next_extent = NULL, 2561 .m_seg_type = NO_CHECK_TYPE , 2562 .m_may_create = false }; 2563 struct extent_info ei = {0, 0, 0}; 2564 pgoff_t pg_start, pg_end, next_pgofs; 2565 unsigned int blk_per_seg = sbi->blocks_per_seg; 2566 unsigned int total = 0, sec_num; 2567 block_t blk_end = 0; 2568 bool fragmented = false; 2569 int err; 2570 2571 /* if in-place-update policy is enabled, don't waste time here */ 2572 if (f2fs_should_update_inplace(inode, NULL)) 2573 return -EINVAL; 2574 2575 pg_start = range->start >> PAGE_SHIFT; 2576 pg_end = (range->start + range->len) >> PAGE_SHIFT; 2577 2578 f2fs_balance_fs(sbi, true); 2579 2580 inode_lock(inode); 2581 2582 /* writeback all dirty pages in the range */ 2583 err = filemap_write_and_wait_range(inode->i_mapping, range->start, 2584 range->start + range->len - 1); 2585 if (err) 2586 goto out; 2587 2588 /* 2589 * lookup mapping info in extent cache, skip defragmenting if physical 2590 * block addresses are continuous. 2591 */ 2592 if (f2fs_lookup_extent_cache(inode, pg_start, &ei)) { 2593 if (ei.fofs + ei.len >= pg_end) 2594 goto out; 2595 } 2596 2597 map.m_lblk = pg_start; 2598 map.m_next_pgofs = &next_pgofs; 2599 2600 /* 2601 * lookup mapping info in dnode page cache, skip defragmenting if all 2602 * physical block addresses are continuous even if there are hole(s) 2603 * in logical blocks. 2604 */ 2605 while (map.m_lblk < pg_end) { 2606 map.m_len = pg_end - map.m_lblk; 2607 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT); 2608 if (err) 2609 goto out; 2610 2611 if (!(map.m_flags & F2FS_MAP_FLAGS)) { 2612 map.m_lblk = next_pgofs; 2613 continue; 2614 } 2615 2616 if (blk_end && blk_end != map.m_pblk) 2617 fragmented = true; 2618 2619 /* record total count of block that we're going to move */ 2620 total += map.m_len; 2621 2622 blk_end = map.m_pblk + map.m_len; 2623 2624 map.m_lblk += map.m_len; 2625 } 2626 2627 if (!fragmented) { 2628 total = 0; 2629 goto out; 2630 } 2631 2632 sec_num = DIV_ROUND_UP(total, BLKS_PER_SEC(sbi)); 2633 2634 /* 2635 * make sure there are enough free section for LFS allocation, this can 2636 * avoid defragment running in SSR mode when free section are allocated 2637 * intensively 2638 */ 2639 if (has_not_enough_free_secs(sbi, 0, sec_num)) { 2640 err = -EAGAIN; 2641 goto out; 2642 } 2643 2644 map.m_lblk = pg_start; 2645 map.m_len = pg_end - pg_start; 2646 total = 0; 2647 2648 while (map.m_lblk < pg_end) { 2649 pgoff_t idx; 2650 int cnt = 0; 2651 2652 do_map: 2653 map.m_len = pg_end - map.m_lblk; 2654 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT); 2655 if (err) 2656 goto clear_out; 2657 2658 if (!(map.m_flags & F2FS_MAP_FLAGS)) { 2659 map.m_lblk = next_pgofs; 2660 goto check; 2661 } 2662 2663 set_inode_flag(inode, FI_DO_DEFRAG); 2664 2665 idx = map.m_lblk; 2666 while (idx < map.m_lblk + map.m_len && cnt < blk_per_seg) { 2667 struct page *page; 2668 2669 page = f2fs_get_lock_data_page(inode, idx, true); 2670 if (IS_ERR(page)) { 2671 err = PTR_ERR(page); 2672 goto clear_out; 2673 } 2674 2675 set_page_dirty(page); 2676 f2fs_put_page(page, 1); 2677 2678 idx++; 2679 cnt++; 2680 total++; 2681 } 2682 2683 map.m_lblk = idx; 2684 check: 2685 if (map.m_lblk < pg_end && cnt < blk_per_seg) 2686 goto do_map; 2687 2688 clear_inode_flag(inode, FI_DO_DEFRAG); 2689 2690 err = filemap_fdatawrite(inode->i_mapping); 2691 if (err) 2692 goto out; 2693 } 2694 clear_out: 2695 clear_inode_flag(inode, FI_DO_DEFRAG); 2696 out: 2697 inode_unlock(inode); 2698 if (!err) 2699 range->len = (u64)total << PAGE_SHIFT; 2700 return err; 2701 } 2702 2703 static int f2fs_ioc_defragment(struct file *filp, unsigned long arg) 2704 { 2705 struct inode *inode = file_inode(filp); 2706 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2707 struct f2fs_defragment range; 2708 int err; 2709 2710 if (!capable(CAP_SYS_ADMIN)) 2711 return -EPERM; 2712 2713 if (!S_ISREG(inode->i_mode) || f2fs_is_atomic_file(inode)) 2714 return -EINVAL; 2715 2716 if (f2fs_readonly(sbi->sb)) 2717 return -EROFS; 2718 2719 if (copy_from_user(&range, (struct f2fs_defragment __user *)arg, 2720 sizeof(range))) 2721 return -EFAULT; 2722 2723 /* verify alignment of offset & size */ 2724 if (range.start & (F2FS_BLKSIZE - 1) || range.len & (F2FS_BLKSIZE - 1)) 2725 return -EINVAL; 2726 2727 if (unlikely((range.start + range.len) >> PAGE_SHIFT > 2728 sbi->max_file_blocks)) 2729 return -EINVAL; 2730 2731 err = mnt_want_write_file(filp); 2732 if (err) 2733 return err; 2734 2735 err = f2fs_defragment_range(sbi, filp, &range); 2736 mnt_drop_write_file(filp); 2737 2738 f2fs_update_time(sbi, REQ_TIME); 2739 if (err < 0) 2740 return err; 2741 2742 if (copy_to_user((struct f2fs_defragment __user *)arg, &range, 2743 sizeof(range))) 2744 return -EFAULT; 2745 2746 return 0; 2747 } 2748 2749 static int f2fs_move_file_range(struct file *file_in, loff_t pos_in, 2750 struct file *file_out, loff_t pos_out, size_t len) 2751 { 2752 struct inode *src = file_inode(file_in); 2753 struct inode *dst = file_inode(file_out); 2754 struct f2fs_sb_info *sbi = F2FS_I_SB(src); 2755 size_t olen = len, dst_max_i_size = 0; 2756 size_t dst_osize; 2757 int ret; 2758 2759 if (file_in->f_path.mnt != file_out->f_path.mnt || 2760 src->i_sb != dst->i_sb) 2761 return -EXDEV; 2762 2763 if (unlikely(f2fs_readonly(src->i_sb))) 2764 return -EROFS; 2765 2766 if (!S_ISREG(src->i_mode) || !S_ISREG(dst->i_mode)) 2767 return -EINVAL; 2768 2769 if (IS_ENCRYPTED(src) || IS_ENCRYPTED(dst)) 2770 return -EOPNOTSUPP; 2771 2772 if (pos_out < 0 || pos_in < 0) 2773 return -EINVAL; 2774 2775 if (src == dst) { 2776 if (pos_in == pos_out) 2777 return 0; 2778 if (pos_out > pos_in && pos_out < pos_in + len) 2779 return -EINVAL; 2780 } 2781 2782 inode_lock(src); 2783 if (src != dst) { 2784 ret = -EBUSY; 2785 if (!inode_trylock(dst)) 2786 goto out; 2787 } 2788 2789 ret = -EINVAL; 2790 if (pos_in + len > src->i_size || pos_in + len < pos_in) 2791 goto out_unlock; 2792 if (len == 0) 2793 olen = len = src->i_size - pos_in; 2794 if (pos_in + len == src->i_size) 2795 len = ALIGN(src->i_size, F2FS_BLKSIZE) - pos_in; 2796 if (len == 0) { 2797 ret = 0; 2798 goto out_unlock; 2799 } 2800 2801 dst_osize = dst->i_size; 2802 if (pos_out + olen > dst->i_size) 2803 dst_max_i_size = pos_out + olen; 2804 2805 /* verify the end result is block aligned */ 2806 if (!IS_ALIGNED(pos_in, F2FS_BLKSIZE) || 2807 !IS_ALIGNED(pos_in + len, F2FS_BLKSIZE) || 2808 !IS_ALIGNED(pos_out, F2FS_BLKSIZE)) 2809 goto out_unlock; 2810 2811 ret = f2fs_convert_inline_inode(src); 2812 if (ret) 2813 goto out_unlock; 2814 2815 ret = f2fs_convert_inline_inode(dst); 2816 if (ret) 2817 goto out_unlock; 2818 2819 /* write out all dirty pages from offset */ 2820 ret = filemap_write_and_wait_range(src->i_mapping, 2821 pos_in, pos_in + len); 2822 if (ret) 2823 goto out_unlock; 2824 2825 ret = filemap_write_and_wait_range(dst->i_mapping, 2826 pos_out, pos_out + len); 2827 if (ret) 2828 goto out_unlock; 2829 2830 f2fs_balance_fs(sbi, true); 2831 2832 down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]); 2833 if (src != dst) { 2834 ret = -EBUSY; 2835 if (!down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE])) 2836 goto out_src; 2837 } 2838 2839 f2fs_lock_op(sbi); 2840 ret = __exchange_data_block(src, dst, pos_in >> F2FS_BLKSIZE_BITS, 2841 pos_out >> F2FS_BLKSIZE_BITS, 2842 len >> F2FS_BLKSIZE_BITS, false); 2843 2844 if (!ret) { 2845 if (dst_max_i_size) 2846 f2fs_i_size_write(dst, dst_max_i_size); 2847 else if (dst_osize != dst->i_size) 2848 f2fs_i_size_write(dst, dst_osize); 2849 } 2850 f2fs_unlock_op(sbi); 2851 2852 if (src != dst) 2853 up_write(&F2FS_I(dst)->i_gc_rwsem[WRITE]); 2854 out_src: 2855 up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]); 2856 out_unlock: 2857 if (src != dst) 2858 inode_unlock(dst); 2859 out: 2860 inode_unlock(src); 2861 return ret; 2862 } 2863 2864 static int f2fs_ioc_move_range(struct file *filp, unsigned long arg) 2865 { 2866 struct f2fs_move_range range; 2867 struct fd dst; 2868 int err; 2869 2870 if (!(filp->f_mode & FMODE_READ) || 2871 !(filp->f_mode & FMODE_WRITE)) 2872 return -EBADF; 2873 2874 if (copy_from_user(&range, (struct f2fs_move_range __user *)arg, 2875 sizeof(range))) 2876 return -EFAULT; 2877 2878 dst = fdget(range.dst_fd); 2879 if (!dst.file) 2880 return -EBADF; 2881 2882 if (!(dst.file->f_mode & FMODE_WRITE)) { 2883 err = -EBADF; 2884 goto err_out; 2885 } 2886 2887 err = mnt_want_write_file(filp); 2888 if (err) 2889 goto err_out; 2890 2891 err = f2fs_move_file_range(filp, range.pos_in, dst.file, 2892 range.pos_out, range.len); 2893 2894 mnt_drop_write_file(filp); 2895 if (err) 2896 goto err_out; 2897 2898 if (copy_to_user((struct f2fs_move_range __user *)arg, 2899 &range, sizeof(range))) 2900 err = -EFAULT; 2901 err_out: 2902 fdput(dst); 2903 return err; 2904 } 2905 2906 static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg) 2907 { 2908 struct inode *inode = file_inode(filp); 2909 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2910 struct sit_info *sm = SIT_I(sbi); 2911 unsigned int start_segno = 0, end_segno = 0; 2912 unsigned int dev_start_segno = 0, dev_end_segno = 0; 2913 struct f2fs_flush_device range; 2914 int ret; 2915 2916 if (!capable(CAP_SYS_ADMIN)) 2917 return -EPERM; 2918 2919 if (f2fs_readonly(sbi->sb)) 2920 return -EROFS; 2921 2922 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) 2923 return -EINVAL; 2924 2925 if (copy_from_user(&range, (struct f2fs_flush_device __user *)arg, 2926 sizeof(range))) 2927 return -EFAULT; 2928 2929 if (!f2fs_is_multi_device(sbi) || sbi->s_ndevs - 1 <= range.dev_num || 2930 __is_large_section(sbi)) { 2931 f2fs_warn(sbi, "Can't flush %u in %d for segs_per_sec %u != 1", 2932 range.dev_num, sbi->s_ndevs, sbi->segs_per_sec); 2933 return -EINVAL; 2934 } 2935 2936 ret = mnt_want_write_file(filp); 2937 if (ret) 2938 return ret; 2939 2940 if (range.dev_num != 0) 2941 dev_start_segno = GET_SEGNO(sbi, FDEV(range.dev_num).start_blk); 2942 dev_end_segno = GET_SEGNO(sbi, FDEV(range.dev_num).end_blk); 2943 2944 start_segno = sm->last_victim[FLUSH_DEVICE]; 2945 if (start_segno < dev_start_segno || start_segno >= dev_end_segno) 2946 start_segno = dev_start_segno; 2947 end_segno = min(start_segno + range.segments, dev_end_segno); 2948 2949 while (start_segno < end_segno) { 2950 if (!down_write_trylock(&sbi->gc_lock)) { 2951 ret = -EBUSY; 2952 goto out; 2953 } 2954 sm->last_victim[GC_CB] = end_segno + 1; 2955 sm->last_victim[GC_GREEDY] = end_segno + 1; 2956 sm->last_victim[ALLOC_NEXT] = end_segno + 1; 2957 ret = f2fs_gc(sbi, true, true, start_segno); 2958 if (ret == -EAGAIN) 2959 ret = 0; 2960 else if (ret < 0) 2961 break; 2962 start_segno++; 2963 } 2964 out: 2965 mnt_drop_write_file(filp); 2966 return ret; 2967 } 2968 2969 static int f2fs_ioc_get_features(struct file *filp, unsigned long arg) 2970 { 2971 struct inode *inode = file_inode(filp); 2972 u32 sb_feature = le32_to_cpu(F2FS_I_SB(inode)->raw_super->feature); 2973 2974 /* Must validate to set it with SQLite behavior in Android. */ 2975 sb_feature |= F2FS_FEATURE_ATOMIC_WRITE; 2976 2977 return put_user(sb_feature, (u32 __user *)arg); 2978 } 2979 2980 #ifdef CONFIG_QUOTA 2981 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid) 2982 { 2983 struct dquot *transfer_to[MAXQUOTAS] = {}; 2984 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2985 struct super_block *sb = sbi->sb; 2986 int err = 0; 2987 2988 transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid)); 2989 if (!IS_ERR(transfer_to[PRJQUOTA])) { 2990 err = __dquot_transfer(inode, transfer_to); 2991 if (err) 2992 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR); 2993 dqput(transfer_to[PRJQUOTA]); 2994 } 2995 return err; 2996 } 2997 2998 static int f2fs_ioc_setproject(struct file *filp, __u32 projid) 2999 { 3000 struct inode *inode = file_inode(filp); 3001 struct f2fs_inode_info *fi = F2FS_I(inode); 3002 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 3003 struct page *ipage; 3004 kprojid_t kprojid; 3005 int err; 3006 3007 if (!f2fs_sb_has_project_quota(sbi)) { 3008 if (projid != F2FS_DEF_PROJID) 3009 return -EOPNOTSUPP; 3010 else 3011 return 0; 3012 } 3013 3014 if (!f2fs_has_extra_attr(inode)) 3015 return -EOPNOTSUPP; 3016 3017 kprojid = make_kprojid(&init_user_ns, (projid_t)projid); 3018 3019 if (projid_eq(kprojid, F2FS_I(inode)->i_projid)) 3020 return 0; 3021 3022 err = -EPERM; 3023 /* Is it quota file? Do not allow user to mess with it */ 3024 if (IS_NOQUOTA(inode)) 3025 return err; 3026 3027 ipage = f2fs_get_node_page(sbi, inode->i_ino); 3028 if (IS_ERR(ipage)) 3029 return PTR_ERR(ipage); 3030 3031 if (!F2FS_FITS_IN_INODE(F2FS_INODE(ipage), fi->i_extra_isize, 3032 i_projid)) { 3033 err = -EOVERFLOW; 3034 f2fs_put_page(ipage, 1); 3035 return err; 3036 } 3037 f2fs_put_page(ipage, 1); 3038 3039 err = dquot_initialize(inode); 3040 if (err) 3041 return err; 3042 3043 f2fs_lock_op(sbi); 3044 err = f2fs_transfer_project_quota(inode, kprojid); 3045 if (err) 3046 goto out_unlock; 3047 3048 F2FS_I(inode)->i_projid = kprojid; 3049 inode->i_ctime = current_time(inode); 3050 f2fs_mark_inode_dirty_sync(inode, true); 3051 out_unlock: 3052 f2fs_unlock_op(sbi); 3053 return err; 3054 } 3055 #else 3056 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid) 3057 { 3058 return 0; 3059 } 3060 3061 static int f2fs_ioc_setproject(struct file *filp, __u32 projid) 3062 { 3063 if (projid != F2FS_DEF_PROJID) 3064 return -EOPNOTSUPP; 3065 return 0; 3066 } 3067 #endif 3068 3069 /* FS_IOC_FSGETXATTR and FS_IOC_FSSETXATTR support */ 3070 3071 /* 3072 * To make a new on-disk f2fs i_flag gettable via FS_IOC_FSGETXATTR and settable 3073 * via FS_IOC_FSSETXATTR, add an entry for it to f2fs_xflags_map[], and add its 3074 * FS_XFLAG_* equivalent to F2FS_SUPPORTED_XFLAGS. 3075 */ 3076 3077 static const struct { 3078 u32 iflag; 3079 u32 xflag; 3080 } f2fs_xflags_map[] = { 3081 { F2FS_SYNC_FL, FS_XFLAG_SYNC }, 3082 { F2FS_IMMUTABLE_FL, FS_XFLAG_IMMUTABLE }, 3083 { F2FS_APPEND_FL, FS_XFLAG_APPEND }, 3084 { F2FS_NODUMP_FL, FS_XFLAG_NODUMP }, 3085 { F2FS_NOATIME_FL, FS_XFLAG_NOATIME }, 3086 { F2FS_PROJINHERIT_FL, FS_XFLAG_PROJINHERIT }, 3087 }; 3088 3089 #define F2FS_SUPPORTED_XFLAGS ( \ 3090 FS_XFLAG_SYNC | \ 3091 FS_XFLAG_IMMUTABLE | \ 3092 FS_XFLAG_APPEND | \ 3093 FS_XFLAG_NODUMP | \ 3094 FS_XFLAG_NOATIME | \ 3095 FS_XFLAG_PROJINHERIT) 3096 3097 /* Convert f2fs on-disk i_flags to FS_IOC_FS{GET,SET}XATTR flags */ 3098 static inline u32 f2fs_iflags_to_xflags(u32 iflags) 3099 { 3100 u32 xflags = 0; 3101 int i; 3102 3103 for (i = 0; i < ARRAY_SIZE(f2fs_xflags_map); i++) 3104 if (iflags & f2fs_xflags_map[i].iflag) 3105 xflags |= f2fs_xflags_map[i].xflag; 3106 3107 return xflags; 3108 } 3109 3110 /* Convert FS_IOC_FS{GET,SET}XATTR flags to f2fs on-disk i_flags */ 3111 static inline u32 f2fs_xflags_to_iflags(u32 xflags) 3112 { 3113 u32 iflags = 0; 3114 int i; 3115 3116 for (i = 0; i < ARRAY_SIZE(f2fs_xflags_map); i++) 3117 if (xflags & f2fs_xflags_map[i].xflag) 3118 iflags |= f2fs_xflags_map[i].iflag; 3119 3120 return iflags; 3121 } 3122 3123 static void f2fs_fill_fsxattr(struct inode *inode, struct fsxattr *fa) 3124 { 3125 struct f2fs_inode_info *fi = F2FS_I(inode); 3126 3127 simple_fill_fsxattr(fa, f2fs_iflags_to_xflags(fi->i_flags)); 3128 3129 if (f2fs_sb_has_project_quota(F2FS_I_SB(inode))) 3130 fa->fsx_projid = from_kprojid(&init_user_ns, fi->i_projid); 3131 } 3132 3133 static int f2fs_ioc_fsgetxattr(struct file *filp, unsigned long arg) 3134 { 3135 struct inode *inode = file_inode(filp); 3136 struct fsxattr fa; 3137 3138 f2fs_fill_fsxattr(inode, &fa); 3139 3140 if (copy_to_user((struct fsxattr __user *)arg, &fa, sizeof(fa))) 3141 return -EFAULT; 3142 return 0; 3143 } 3144 3145 static int f2fs_ioc_fssetxattr(struct file *filp, unsigned long arg) 3146 { 3147 struct inode *inode = file_inode(filp); 3148 struct fsxattr fa, old_fa; 3149 u32 iflags; 3150 int err; 3151 3152 if (copy_from_user(&fa, (struct fsxattr __user *)arg, sizeof(fa))) 3153 return -EFAULT; 3154 3155 /* Make sure caller has proper permission */ 3156 if (!inode_owner_or_capable(inode)) 3157 return -EACCES; 3158 3159 if (fa.fsx_xflags & ~F2FS_SUPPORTED_XFLAGS) 3160 return -EOPNOTSUPP; 3161 3162 iflags = f2fs_xflags_to_iflags(fa.fsx_xflags); 3163 if (f2fs_mask_flags(inode->i_mode, iflags) != iflags) 3164 return -EOPNOTSUPP; 3165 3166 err = mnt_want_write_file(filp); 3167 if (err) 3168 return err; 3169 3170 inode_lock(inode); 3171 3172 f2fs_fill_fsxattr(inode, &old_fa); 3173 err = vfs_ioc_fssetxattr_check(inode, &old_fa, &fa); 3174 if (err) 3175 goto out; 3176 3177 err = f2fs_setflags_common(inode, iflags, 3178 f2fs_xflags_to_iflags(F2FS_SUPPORTED_XFLAGS)); 3179 if (err) 3180 goto out; 3181 3182 err = f2fs_ioc_setproject(filp, fa.fsx_projid); 3183 out: 3184 inode_unlock(inode); 3185 mnt_drop_write_file(filp); 3186 return err; 3187 } 3188 3189 int f2fs_pin_file_control(struct inode *inode, bool inc) 3190 { 3191 struct f2fs_inode_info *fi = F2FS_I(inode); 3192 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 3193 3194 /* Use i_gc_failures for normal file as a risk signal. */ 3195 if (inc) 3196 f2fs_i_gc_failures_write(inode, 3197 fi->i_gc_failures[GC_FAILURE_PIN] + 1); 3198 3199 if (fi->i_gc_failures[GC_FAILURE_PIN] > sbi->gc_pin_file_threshold) { 3200 f2fs_warn(sbi, "%s: Enable GC = ino %lx after %x GC trials", 3201 __func__, inode->i_ino, 3202 fi->i_gc_failures[GC_FAILURE_PIN]); 3203 clear_inode_flag(inode, FI_PIN_FILE); 3204 return -EAGAIN; 3205 } 3206 return 0; 3207 } 3208 3209 static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg) 3210 { 3211 struct inode *inode = file_inode(filp); 3212 __u32 pin; 3213 int ret = 0; 3214 3215 if (get_user(pin, (__u32 __user *)arg)) 3216 return -EFAULT; 3217 3218 if (!S_ISREG(inode->i_mode)) 3219 return -EINVAL; 3220 3221 if (f2fs_readonly(F2FS_I_SB(inode)->sb)) 3222 return -EROFS; 3223 3224 ret = mnt_want_write_file(filp); 3225 if (ret) 3226 return ret; 3227 3228 inode_lock(inode); 3229 3230 if (f2fs_should_update_outplace(inode, NULL)) { 3231 ret = -EINVAL; 3232 goto out; 3233 } 3234 3235 if (!pin) { 3236 clear_inode_flag(inode, FI_PIN_FILE); 3237 f2fs_i_gc_failures_write(inode, 0); 3238 goto done; 3239 } 3240 3241 if (f2fs_pin_file_control(inode, false)) { 3242 ret = -EAGAIN; 3243 goto out; 3244 } 3245 3246 ret = f2fs_convert_inline_inode(inode); 3247 if (ret) 3248 goto out; 3249 3250 if (!f2fs_disable_compressed_file(inode)) { 3251 ret = -EOPNOTSUPP; 3252 goto out; 3253 } 3254 3255 set_inode_flag(inode, FI_PIN_FILE); 3256 ret = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN]; 3257 done: 3258 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); 3259 out: 3260 inode_unlock(inode); 3261 mnt_drop_write_file(filp); 3262 return ret; 3263 } 3264 3265 static int f2fs_ioc_get_pin_file(struct file *filp, unsigned long arg) 3266 { 3267 struct inode *inode = file_inode(filp); 3268 __u32 pin = 0; 3269 3270 if (is_inode_flag_set(inode, FI_PIN_FILE)) 3271 pin = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN]; 3272 return put_user(pin, (u32 __user *)arg); 3273 } 3274 3275 int f2fs_precache_extents(struct inode *inode) 3276 { 3277 struct f2fs_inode_info *fi = F2FS_I(inode); 3278 struct f2fs_map_blocks map; 3279 pgoff_t m_next_extent; 3280 loff_t end; 3281 int err; 3282 3283 if (is_inode_flag_set(inode, FI_NO_EXTENT)) 3284 return -EOPNOTSUPP; 3285 3286 map.m_lblk = 0; 3287 map.m_next_pgofs = NULL; 3288 map.m_next_extent = &m_next_extent; 3289 map.m_seg_type = NO_CHECK_TYPE; 3290 map.m_may_create = false; 3291 end = F2FS_I_SB(inode)->max_file_blocks; 3292 3293 while (map.m_lblk < end) { 3294 map.m_len = end - map.m_lblk; 3295 3296 down_write(&fi->i_gc_rwsem[WRITE]); 3297 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_PRECACHE); 3298 up_write(&fi->i_gc_rwsem[WRITE]); 3299 if (err) 3300 return err; 3301 3302 map.m_lblk = m_next_extent; 3303 } 3304 3305 return err; 3306 } 3307 3308 static int f2fs_ioc_precache_extents(struct file *filp, unsigned long arg) 3309 { 3310 return f2fs_precache_extents(file_inode(filp)); 3311 } 3312 3313 static int f2fs_ioc_resize_fs(struct file *filp, unsigned long arg) 3314 { 3315 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp)); 3316 __u64 block_count; 3317 3318 if (!capable(CAP_SYS_ADMIN)) 3319 return -EPERM; 3320 3321 if (f2fs_readonly(sbi->sb)) 3322 return -EROFS; 3323 3324 if (copy_from_user(&block_count, (void __user *)arg, 3325 sizeof(block_count))) 3326 return -EFAULT; 3327 3328 return f2fs_resize_fs(sbi, block_count); 3329 } 3330 3331 static int f2fs_ioc_enable_verity(struct file *filp, unsigned long arg) 3332 { 3333 struct inode *inode = file_inode(filp); 3334 3335 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); 3336 3337 if (!f2fs_sb_has_verity(F2FS_I_SB(inode))) { 3338 f2fs_warn(F2FS_I_SB(inode), 3339 "Can't enable fs-verity on inode %lu: the verity feature is not enabled on this filesystem.\n", 3340 inode->i_ino); 3341 return -EOPNOTSUPP; 3342 } 3343 3344 return fsverity_ioctl_enable(filp, (const void __user *)arg); 3345 } 3346 3347 static int f2fs_ioc_measure_verity(struct file *filp, unsigned long arg) 3348 { 3349 if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp)))) 3350 return -EOPNOTSUPP; 3351 3352 return fsverity_ioctl_measure(filp, (void __user *)arg); 3353 } 3354 3355 static int f2fs_ioc_getfslabel(struct file *filp, unsigned long arg) 3356 { 3357 struct inode *inode = file_inode(filp); 3358 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 3359 char *vbuf; 3360 int count; 3361 int err = 0; 3362 3363 vbuf = f2fs_kzalloc(sbi, MAX_VOLUME_NAME, GFP_KERNEL); 3364 if (!vbuf) 3365 return -ENOMEM; 3366 3367 down_read(&sbi->sb_lock); 3368 count = utf16s_to_utf8s(sbi->raw_super->volume_name, 3369 ARRAY_SIZE(sbi->raw_super->volume_name), 3370 UTF16_LITTLE_ENDIAN, vbuf, MAX_VOLUME_NAME); 3371 up_read(&sbi->sb_lock); 3372 3373 if (copy_to_user((char __user *)arg, vbuf, 3374 min(FSLABEL_MAX, count))) 3375 err = -EFAULT; 3376 3377 kfree(vbuf); 3378 return err; 3379 } 3380 3381 static int f2fs_ioc_setfslabel(struct file *filp, unsigned long arg) 3382 { 3383 struct inode *inode = file_inode(filp); 3384 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 3385 char *vbuf; 3386 int err = 0; 3387 3388 if (!capable(CAP_SYS_ADMIN)) 3389 return -EPERM; 3390 3391 vbuf = strndup_user((const char __user *)arg, FSLABEL_MAX); 3392 if (IS_ERR(vbuf)) 3393 return PTR_ERR(vbuf); 3394 3395 err = mnt_want_write_file(filp); 3396 if (err) 3397 goto out; 3398 3399 down_write(&sbi->sb_lock); 3400 3401 memset(sbi->raw_super->volume_name, 0, 3402 sizeof(sbi->raw_super->volume_name)); 3403 utf8s_to_utf16s(vbuf, strlen(vbuf), UTF16_LITTLE_ENDIAN, 3404 sbi->raw_super->volume_name, 3405 ARRAY_SIZE(sbi->raw_super->volume_name)); 3406 3407 err = f2fs_commit_super(sbi, false); 3408 3409 up_write(&sbi->sb_lock); 3410 3411 mnt_drop_write_file(filp); 3412 out: 3413 kfree(vbuf); 3414 return err; 3415 } 3416 3417 static int f2fs_get_compress_blocks(struct file *filp, unsigned long arg) 3418 { 3419 struct inode *inode = file_inode(filp); 3420 __u64 blocks; 3421 3422 if (!f2fs_sb_has_compression(F2FS_I_SB(inode))) 3423 return -EOPNOTSUPP; 3424 3425 if (!f2fs_compressed_file(inode)) 3426 return -EINVAL; 3427 3428 blocks = atomic_read(&F2FS_I(inode)->i_compr_blocks); 3429 return put_user(blocks, (u64 __user *)arg); 3430 } 3431 3432 static int release_compress_blocks(struct dnode_of_data *dn, pgoff_t count) 3433 { 3434 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 3435 unsigned int released_blocks = 0; 3436 int cluster_size = F2FS_I(dn->inode)->i_cluster_size; 3437 block_t blkaddr; 3438 int i; 3439 3440 for (i = 0; i < count; i++) { 3441 blkaddr = data_blkaddr(dn->inode, dn->node_page, 3442 dn->ofs_in_node + i); 3443 3444 if (!__is_valid_data_blkaddr(blkaddr)) 3445 continue; 3446 if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr, 3447 DATA_GENERIC_ENHANCE))) 3448 return -EFSCORRUPTED; 3449 } 3450 3451 while (count) { 3452 int compr_blocks = 0; 3453 3454 for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) { 3455 blkaddr = f2fs_data_blkaddr(dn); 3456 3457 if (i == 0) { 3458 if (blkaddr == COMPRESS_ADDR) 3459 continue; 3460 dn->ofs_in_node += cluster_size; 3461 goto next; 3462 } 3463 3464 if (__is_valid_data_blkaddr(blkaddr)) 3465 compr_blocks++; 3466 3467 if (blkaddr != NEW_ADDR) 3468 continue; 3469 3470 dn->data_blkaddr = NULL_ADDR; 3471 f2fs_set_data_blkaddr(dn); 3472 } 3473 3474 f2fs_i_compr_blocks_update(dn->inode, compr_blocks, false); 3475 dec_valid_block_count(sbi, dn->inode, 3476 cluster_size - compr_blocks); 3477 3478 released_blocks += cluster_size - compr_blocks; 3479 next: 3480 count -= cluster_size; 3481 } 3482 3483 return released_blocks; 3484 } 3485 3486 static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg) 3487 { 3488 struct inode *inode = file_inode(filp); 3489 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 3490 pgoff_t page_idx = 0, last_idx; 3491 unsigned int released_blocks = 0; 3492 int ret; 3493 int writecount; 3494 3495 if (!f2fs_sb_has_compression(F2FS_I_SB(inode))) 3496 return -EOPNOTSUPP; 3497 3498 if (!f2fs_compressed_file(inode)) 3499 return -EINVAL; 3500 3501 if (f2fs_readonly(sbi->sb)) 3502 return -EROFS; 3503 3504 ret = mnt_want_write_file(filp); 3505 if (ret) 3506 return ret; 3507 3508 f2fs_balance_fs(F2FS_I_SB(inode), true); 3509 3510 inode_lock(inode); 3511 3512 writecount = atomic_read(&inode->i_writecount); 3513 if ((filp->f_mode & FMODE_WRITE && writecount != 1) || 3514 (!(filp->f_mode & FMODE_WRITE) && writecount)) { 3515 ret = -EBUSY; 3516 goto out; 3517 } 3518 3519 if (IS_IMMUTABLE(inode)) { 3520 ret = -EINVAL; 3521 goto out; 3522 } 3523 3524 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX); 3525 if (ret) 3526 goto out; 3527 3528 F2FS_I(inode)->i_flags |= F2FS_IMMUTABLE_FL; 3529 f2fs_set_inode_flags(inode); 3530 inode->i_ctime = current_time(inode); 3531 f2fs_mark_inode_dirty_sync(inode, true); 3532 3533 if (!atomic_read(&F2FS_I(inode)->i_compr_blocks)) 3534 goto out; 3535 3536 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 3537 down_write(&F2FS_I(inode)->i_mmap_sem); 3538 3539 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); 3540 3541 while (page_idx < last_idx) { 3542 struct dnode_of_data dn; 3543 pgoff_t end_offset, count; 3544 3545 set_new_dnode(&dn, inode, NULL, NULL, 0); 3546 ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE); 3547 if (ret) { 3548 if (ret == -ENOENT) { 3549 page_idx = f2fs_get_next_page_offset(&dn, 3550 page_idx); 3551 ret = 0; 3552 continue; 3553 } 3554 break; 3555 } 3556 3557 end_offset = ADDRS_PER_PAGE(dn.node_page, inode); 3558 count = min(end_offset - dn.ofs_in_node, last_idx - page_idx); 3559 count = round_up(count, F2FS_I(inode)->i_cluster_size); 3560 3561 ret = release_compress_blocks(&dn, count); 3562 3563 f2fs_put_dnode(&dn); 3564 3565 if (ret < 0) 3566 break; 3567 3568 page_idx += count; 3569 released_blocks += ret; 3570 } 3571 3572 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 3573 up_write(&F2FS_I(inode)->i_mmap_sem); 3574 out: 3575 inode_unlock(inode); 3576 3577 mnt_drop_write_file(filp); 3578 3579 if (ret >= 0) { 3580 ret = put_user(released_blocks, (u64 __user *)arg); 3581 } else if (released_blocks && 3582 atomic_read(&F2FS_I(inode)->i_compr_blocks)) { 3583 set_sbi_flag(sbi, SBI_NEED_FSCK); 3584 f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx " 3585 "iblocks=%llu, released=%u, compr_blocks=%u, " 3586 "run fsck to fix.", 3587 __func__, inode->i_ino, inode->i_blocks, 3588 released_blocks, 3589 atomic_read(&F2FS_I(inode)->i_compr_blocks)); 3590 } 3591 3592 return ret; 3593 } 3594 3595 static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count) 3596 { 3597 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 3598 unsigned int reserved_blocks = 0; 3599 int cluster_size = F2FS_I(dn->inode)->i_cluster_size; 3600 block_t blkaddr; 3601 int i; 3602 3603 for (i = 0; i < count; i++) { 3604 blkaddr = data_blkaddr(dn->inode, dn->node_page, 3605 dn->ofs_in_node + i); 3606 3607 if (!__is_valid_data_blkaddr(blkaddr)) 3608 continue; 3609 if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr, 3610 DATA_GENERIC_ENHANCE))) 3611 return -EFSCORRUPTED; 3612 } 3613 3614 while (count) { 3615 int compr_blocks = 0; 3616 blkcnt_t reserved; 3617 int ret; 3618 3619 for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) { 3620 blkaddr = f2fs_data_blkaddr(dn); 3621 3622 if (i == 0) { 3623 if (blkaddr == COMPRESS_ADDR) 3624 continue; 3625 dn->ofs_in_node += cluster_size; 3626 goto next; 3627 } 3628 3629 if (__is_valid_data_blkaddr(blkaddr)) { 3630 compr_blocks++; 3631 continue; 3632 } 3633 3634 dn->data_blkaddr = NEW_ADDR; 3635 f2fs_set_data_blkaddr(dn); 3636 } 3637 3638 reserved = cluster_size - compr_blocks; 3639 ret = inc_valid_block_count(sbi, dn->inode, &reserved); 3640 if (ret) 3641 return ret; 3642 3643 if (reserved != cluster_size - compr_blocks) 3644 return -ENOSPC; 3645 3646 f2fs_i_compr_blocks_update(dn->inode, compr_blocks, true); 3647 3648 reserved_blocks += reserved; 3649 next: 3650 count -= cluster_size; 3651 } 3652 3653 return reserved_blocks; 3654 } 3655 3656 static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg) 3657 { 3658 struct inode *inode = file_inode(filp); 3659 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 3660 pgoff_t page_idx = 0, last_idx; 3661 unsigned int reserved_blocks = 0; 3662 int ret; 3663 3664 if (!f2fs_sb_has_compression(F2FS_I_SB(inode))) 3665 return -EOPNOTSUPP; 3666 3667 if (!f2fs_compressed_file(inode)) 3668 return -EINVAL; 3669 3670 if (f2fs_readonly(sbi->sb)) 3671 return -EROFS; 3672 3673 ret = mnt_want_write_file(filp); 3674 if (ret) 3675 return ret; 3676 3677 if (atomic_read(&F2FS_I(inode)->i_compr_blocks)) 3678 goto out; 3679 3680 f2fs_balance_fs(F2FS_I_SB(inode), true); 3681 3682 inode_lock(inode); 3683 3684 if (!IS_IMMUTABLE(inode)) { 3685 ret = -EINVAL; 3686 goto unlock_inode; 3687 } 3688 3689 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 3690 down_write(&F2FS_I(inode)->i_mmap_sem); 3691 3692 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); 3693 3694 while (page_idx < last_idx) { 3695 struct dnode_of_data dn; 3696 pgoff_t end_offset, count; 3697 3698 set_new_dnode(&dn, inode, NULL, NULL, 0); 3699 ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE); 3700 if (ret) { 3701 if (ret == -ENOENT) { 3702 page_idx = f2fs_get_next_page_offset(&dn, 3703 page_idx); 3704 ret = 0; 3705 continue; 3706 } 3707 break; 3708 } 3709 3710 end_offset = ADDRS_PER_PAGE(dn.node_page, inode); 3711 count = min(end_offset - dn.ofs_in_node, last_idx - page_idx); 3712 count = round_up(count, F2FS_I(inode)->i_cluster_size); 3713 3714 ret = reserve_compress_blocks(&dn, count); 3715 3716 f2fs_put_dnode(&dn); 3717 3718 if (ret < 0) 3719 break; 3720 3721 page_idx += count; 3722 reserved_blocks += ret; 3723 } 3724 3725 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 3726 up_write(&F2FS_I(inode)->i_mmap_sem); 3727 3728 if (ret >= 0) { 3729 F2FS_I(inode)->i_flags &= ~F2FS_IMMUTABLE_FL; 3730 f2fs_set_inode_flags(inode); 3731 inode->i_ctime = current_time(inode); 3732 f2fs_mark_inode_dirty_sync(inode, true); 3733 } 3734 unlock_inode: 3735 inode_unlock(inode); 3736 out: 3737 mnt_drop_write_file(filp); 3738 3739 if (ret >= 0) { 3740 ret = put_user(reserved_blocks, (u64 __user *)arg); 3741 } else if (reserved_blocks && 3742 atomic_read(&F2FS_I(inode)->i_compr_blocks)) { 3743 set_sbi_flag(sbi, SBI_NEED_FSCK); 3744 f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx " 3745 "iblocks=%llu, reserved=%u, compr_blocks=%u, " 3746 "run fsck to fix.", 3747 __func__, inode->i_ino, inode->i_blocks, 3748 reserved_blocks, 3749 atomic_read(&F2FS_I(inode)->i_compr_blocks)); 3750 } 3751 3752 return ret; 3753 } 3754 3755 static int f2fs_secure_erase(struct block_device *bdev, struct inode *inode, 3756 pgoff_t off, block_t block, block_t len, u32 flags) 3757 { 3758 struct request_queue *q = bdev_get_queue(bdev); 3759 sector_t sector = SECTOR_FROM_BLOCK(block); 3760 sector_t nr_sects = SECTOR_FROM_BLOCK(len); 3761 int ret = 0; 3762 3763 if (!q) 3764 return -ENXIO; 3765 3766 if (flags & F2FS_TRIM_FILE_DISCARD) 3767 ret = blkdev_issue_discard(bdev, sector, nr_sects, GFP_NOFS, 3768 blk_queue_secure_erase(q) ? 3769 BLKDEV_DISCARD_SECURE : 0); 3770 3771 if (!ret && (flags & F2FS_TRIM_FILE_ZEROOUT)) { 3772 if (IS_ENCRYPTED(inode)) 3773 ret = fscrypt_zeroout_range(inode, off, block, len); 3774 else 3775 ret = blkdev_issue_zeroout(bdev, sector, nr_sects, 3776 GFP_NOFS, 0); 3777 } 3778 3779 return ret; 3780 } 3781 3782 static int f2fs_sec_trim_file(struct file *filp, unsigned long arg) 3783 { 3784 struct inode *inode = file_inode(filp); 3785 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 3786 struct address_space *mapping = inode->i_mapping; 3787 struct block_device *prev_bdev = NULL; 3788 struct f2fs_sectrim_range range; 3789 pgoff_t index, pg_end, prev_index = 0; 3790 block_t prev_block = 0, len = 0; 3791 loff_t end_addr; 3792 bool to_end = false; 3793 int ret = 0; 3794 3795 if (!(filp->f_mode & FMODE_WRITE)) 3796 return -EBADF; 3797 3798 if (copy_from_user(&range, (struct f2fs_sectrim_range __user *)arg, 3799 sizeof(range))) 3800 return -EFAULT; 3801 3802 if (range.flags == 0 || (range.flags & ~F2FS_TRIM_FILE_MASK) || 3803 !S_ISREG(inode->i_mode)) 3804 return -EINVAL; 3805 3806 if (((range.flags & F2FS_TRIM_FILE_DISCARD) && 3807 !f2fs_hw_support_discard(sbi)) || 3808 ((range.flags & F2FS_TRIM_FILE_ZEROOUT) && 3809 IS_ENCRYPTED(inode) && f2fs_is_multi_device(sbi))) 3810 return -EOPNOTSUPP; 3811 3812 file_start_write(filp); 3813 inode_lock(inode); 3814 3815 if (f2fs_is_atomic_file(inode) || f2fs_compressed_file(inode) || 3816 range.start >= inode->i_size) { 3817 ret = -EINVAL; 3818 goto err; 3819 } 3820 3821 if (range.len == 0) 3822 goto err; 3823 3824 if (inode->i_size - range.start > range.len) { 3825 end_addr = range.start + range.len; 3826 } else { 3827 end_addr = range.len == (u64)-1 ? 3828 sbi->sb->s_maxbytes : inode->i_size; 3829 to_end = true; 3830 } 3831 3832 if (!IS_ALIGNED(range.start, F2FS_BLKSIZE) || 3833 (!to_end && !IS_ALIGNED(end_addr, F2FS_BLKSIZE))) { 3834 ret = -EINVAL; 3835 goto err; 3836 } 3837 3838 index = F2FS_BYTES_TO_BLK(range.start); 3839 pg_end = DIV_ROUND_UP(end_addr, F2FS_BLKSIZE); 3840 3841 ret = f2fs_convert_inline_inode(inode); 3842 if (ret) 3843 goto err; 3844 3845 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 3846 down_write(&F2FS_I(inode)->i_mmap_sem); 3847 3848 ret = filemap_write_and_wait_range(mapping, range.start, 3849 to_end ? LLONG_MAX : end_addr - 1); 3850 if (ret) 3851 goto out; 3852 3853 truncate_inode_pages_range(mapping, range.start, 3854 to_end ? -1 : end_addr - 1); 3855 3856 while (index < pg_end) { 3857 struct dnode_of_data dn; 3858 pgoff_t end_offset, count; 3859 int i; 3860 3861 set_new_dnode(&dn, inode, NULL, NULL, 0); 3862 ret = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE); 3863 if (ret) { 3864 if (ret == -ENOENT) { 3865 index = f2fs_get_next_page_offset(&dn, index); 3866 continue; 3867 } 3868 goto out; 3869 } 3870 3871 end_offset = ADDRS_PER_PAGE(dn.node_page, inode); 3872 count = min(end_offset - dn.ofs_in_node, pg_end - index); 3873 for (i = 0; i < count; i++, index++, dn.ofs_in_node++) { 3874 struct block_device *cur_bdev; 3875 block_t blkaddr = f2fs_data_blkaddr(&dn); 3876 3877 if (!__is_valid_data_blkaddr(blkaddr)) 3878 continue; 3879 3880 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, 3881 DATA_GENERIC_ENHANCE)) { 3882 ret = -EFSCORRUPTED; 3883 f2fs_put_dnode(&dn); 3884 goto out; 3885 } 3886 3887 cur_bdev = f2fs_target_device(sbi, blkaddr, NULL); 3888 if (f2fs_is_multi_device(sbi)) { 3889 int di = f2fs_target_device_index(sbi, blkaddr); 3890 3891 blkaddr -= FDEV(di).start_blk; 3892 } 3893 3894 if (len) { 3895 if (prev_bdev == cur_bdev && 3896 index == prev_index + len && 3897 blkaddr == prev_block + len) { 3898 len++; 3899 } else { 3900 ret = f2fs_secure_erase(prev_bdev, 3901 inode, prev_index, prev_block, 3902 len, range.flags); 3903 if (ret) { 3904 f2fs_put_dnode(&dn); 3905 goto out; 3906 } 3907 3908 len = 0; 3909 } 3910 } 3911 3912 if (!len) { 3913 prev_bdev = cur_bdev; 3914 prev_index = index; 3915 prev_block = blkaddr; 3916 len = 1; 3917 } 3918 } 3919 3920 f2fs_put_dnode(&dn); 3921 3922 if (fatal_signal_pending(current)) { 3923 ret = -EINTR; 3924 goto out; 3925 } 3926 cond_resched(); 3927 } 3928 3929 if (len) 3930 ret = f2fs_secure_erase(prev_bdev, inode, prev_index, 3931 prev_block, len, range.flags); 3932 out: 3933 up_write(&F2FS_I(inode)->i_mmap_sem); 3934 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 3935 err: 3936 inode_unlock(inode); 3937 file_end_write(filp); 3938 3939 return ret; 3940 } 3941 3942 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 3943 { 3944 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp))))) 3945 return -EIO; 3946 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(filp)))) 3947 return -ENOSPC; 3948 3949 switch (cmd) { 3950 case FS_IOC_GETFLAGS: 3951 return f2fs_ioc_getflags(filp, arg); 3952 case FS_IOC_SETFLAGS: 3953 return f2fs_ioc_setflags(filp, arg); 3954 case FS_IOC_GETVERSION: 3955 return f2fs_ioc_getversion(filp, arg); 3956 case F2FS_IOC_START_ATOMIC_WRITE: 3957 return f2fs_ioc_start_atomic_write(filp); 3958 case F2FS_IOC_COMMIT_ATOMIC_WRITE: 3959 return f2fs_ioc_commit_atomic_write(filp); 3960 case F2FS_IOC_START_VOLATILE_WRITE: 3961 return f2fs_ioc_start_volatile_write(filp); 3962 case F2FS_IOC_RELEASE_VOLATILE_WRITE: 3963 return f2fs_ioc_release_volatile_write(filp); 3964 case F2FS_IOC_ABORT_VOLATILE_WRITE: 3965 return f2fs_ioc_abort_volatile_write(filp); 3966 case F2FS_IOC_SHUTDOWN: 3967 return f2fs_ioc_shutdown(filp, arg); 3968 case FITRIM: 3969 return f2fs_ioc_fitrim(filp, arg); 3970 case FS_IOC_SET_ENCRYPTION_POLICY: 3971 return f2fs_ioc_set_encryption_policy(filp, arg); 3972 case FS_IOC_GET_ENCRYPTION_POLICY: 3973 return f2fs_ioc_get_encryption_policy(filp, arg); 3974 case FS_IOC_GET_ENCRYPTION_PWSALT: 3975 return f2fs_ioc_get_encryption_pwsalt(filp, arg); 3976 case FS_IOC_GET_ENCRYPTION_POLICY_EX: 3977 return f2fs_ioc_get_encryption_policy_ex(filp, arg); 3978 case FS_IOC_ADD_ENCRYPTION_KEY: 3979 return f2fs_ioc_add_encryption_key(filp, arg); 3980 case FS_IOC_REMOVE_ENCRYPTION_KEY: 3981 return f2fs_ioc_remove_encryption_key(filp, arg); 3982 case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS: 3983 return f2fs_ioc_remove_encryption_key_all_users(filp, arg); 3984 case FS_IOC_GET_ENCRYPTION_KEY_STATUS: 3985 return f2fs_ioc_get_encryption_key_status(filp, arg); 3986 case FS_IOC_GET_ENCRYPTION_NONCE: 3987 return f2fs_ioc_get_encryption_nonce(filp, arg); 3988 case F2FS_IOC_GARBAGE_COLLECT: 3989 return f2fs_ioc_gc(filp, arg); 3990 case F2FS_IOC_GARBAGE_COLLECT_RANGE: 3991 return f2fs_ioc_gc_range(filp, arg); 3992 case F2FS_IOC_WRITE_CHECKPOINT: 3993 return f2fs_ioc_write_checkpoint(filp, arg); 3994 case F2FS_IOC_DEFRAGMENT: 3995 return f2fs_ioc_defragment(filp, arg); 3996 case F2FS_IOC_MOVE_RANGE: 3997 return f2fs_ioc_move_range(filp, arg); 3998 case F2FS_IOC_FLUSH_DEVICE: 3999 return f2fs_ioc_flush_device(filp, arg); 4000 case F2FS_IOC_GET_FEATURES: 4001 return f2fs_ioc_get_features(filp, arg); 4002 case FS_IOC_FSGETXATTR: 4003 return f2fs_ioc_fsgetxattr(filp, arg); 4004 case FS_IOC_FSSETXATTR: 4005 return f2fs_ioc_fssetxattr(filp, arg); 4006 case F2FS_IOC_GET_PIN_FILE: 4007 return f2fs_ioc_get_pin_file(filp, arg); 4008 case F2FS_IOC_SET_PIN_FILE: 4009 return f2fs_ioc_set_pin_file(filp, arg); 4010 case F2FS_IOC_PRECACHE_EXTENTS: 4011 return f2fs_ioc_precache_extents(filp, arg); 4012 case F2FS_IOC_RESIZE_FS: 4013 return f2fs_ioc_resize_fs(filp, arg); 4014 case FS_IOC_ENABLE_VERITY: 4015 return f2fs_ioc_enable_verity(filp, arg); 4016 case FS_IOC_MEASURE_VERITY: 4017 return f2fs_ioc_measure_verity(filp, arg); 4018 case FS_IOC_GETFSLABEL: 4019 return f2fs_ioc_getfslabel(filp, arg); 4020 case FS_IOC_SETFSLABEL: 4021 return f2fs_ioc_setfslabel(filp, arg); 4022 case F2FS_IOC_GET_COMPRESS_BLOCKS: 4023 return f2fs_get_compress_blocks(filp, arg); 4024 case F2FS_IOC_RELEASE_COMPRESS_BLOCKS: 4025 return f2fs_release_compress_blocks(filp, arg); 4026 case F2FS_IOC_RESERVE_COMPRESS_BLOCKS: 4027 return f2fs_reserve_compress_blocks(filp, arg); 4028 case F2FS_IOC_SEC_TRIM_FILE: 4029 return f2fs_sec_trim_file(filp, arg); 4030 default: 4031 return -ENOTTY; 4032 } 4033 } 4034 4035 static ssize_t f2fs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter) 4036 { 4037 struct file *file = iocb->ki_filp; 4038 struct inode *inode = file_inode(file); 4039 int ret; 4040 4041 if (!f2fs_is_compress_backend_ready(inode)) 4042 return -EOPNOTSUPP; 4043 4044 ret = generic_file_read_iter(iocb, iter); 4045 4046 if (ret > 0) 4047 f2fs_update_iostat(F2FS_I_SB(inode), APP_READ_IO, ret); 4048 4049 return ret; 4050 } 4051 4052 static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 4053 { 4054 struct file *file = iocb->ki_filp; 4055 struct inode *inode = file_inode(file); 4056 ssize_t ret; 4057 4058 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) { 4059 ret = -EIO; 4060 goto out; 4061 } 4062 4063 if (!f2fs_is_compress_backend_ready(inode)) { 4064 ret = -EOPNOTSUPP; 4065 goto out; 4066 } 4067 4068 if (iocb->ki_flags & IOCB_NOWAIT) { 4069 if (!inode_trylock(inode)) { 4070 ret = -EAGAIN; 4071 goto out; 4072 } 4073 } else { 4074 inode_lock(inode); 4075 } 4076 4077 ret = generic_write_checks(iocb, from); 4078 if (ret > 0) { 4079 bool preallocated = false; 4080 size_t target_size = 0; 4081 int err; 4082 4083 if (iov_iter_fault_in_readable(from, iov_iter_count(from))) 4084 set_inode_flag(inode, FI_NO_PREALLOC); 4085 4086 if ((iocb->ki_flags & IOCB_NOWAIT)) { 4087 if (!f2fs_overwrite_io(inode, iocb->ki_pos, 4088 iov_iter_count(from)) || 4089 f2fs_has_inline_data(inode) || 4090 f2fs_force_buffered_io(inode, iocb, from)) { 4091 clear_inode_flag(inode, FI_NO_PREALLOC); 4092 inode_unlock(inode); 4093 ret = -EAGAIN; 4094 goto out; 4095 } 4096 goto write; 4097 } 4098 4099 if (is_inode_flag_set(inode, FI_NO_PREALLOC)) 4100 goto write; 4101 4102 if (iocb->ki_flags & IOCB_DIRECT) { 4103 /* 4104 * Convert inline data for Direct I/O before entering 4105 * f2fs_direct_IO(). 4106 */ 4107 err = f2fs_convert_inline_inode(inode); 4108 if (err) 4109 goto out_err; 4110 /* 4111 * If force_buffere_io() is true, we have to allocate 4112 * blocks all the time, since f2fs_direct_IO will fall 4113 * back to buffered IO. 4114 */ 4115 if (!f2fs_force_buffered_io(inode, iocb, from) && 4116 allow_outplace_dio(inode, iocb, from)) 4117 goto write; 4118 } 4119 preallocated = true; 4120 target_size = iocb->ki_pos + iov_iter_count(from); 4121 4122 err = f2fs_preallocate_blocks(iocb, from); 4123 if (err) { 4124 out_err: 4125 clear_inode_flag(inode, FI_NO_PREALLOC); 4126 inode_unlock(inode); 4127 ret = err; 4128 goto out; 4129 } 4130 write: 4131 ret = __generic_file_write_iter(iocb, from); 4132 clear_inode_flag(inode, FI_NO_PREALLOC); 4133 4134 /* if we couldn't write data, we should deallocate blocks. */ 4135 if (preallocated && i_size_read(inode) < target_size) 4136 f2fs_truncate(inode); 4137 4138 if (ret > 0) 4139 f2fs_update_iostat(F2FS_I_SB(inode), APP_WRITE_IO, ret); 4140 } 4141 inode_unlock(inode); 4142 out: 4143 trace_f2fs_file_write_iter(inode, iocb->ki_pos, 4144 iov_iter_count(from), ret); 4145 if (ret > 0) 4146 ret = generic_write_sync(iocb, ret); 4147 return ret; 4148 } 4149 4150 #ifdef CONFIG_COMPAT 4151 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 4152 { 4153 switch (cmd) { 4154 case FS_IOC32_GETFLAGS: 4155 cmd = FS_IOC_GETFLAGS; 4156 break; 4157 case FS_IOC32_SETFLAGS: 4158 cmd = FS_IOC_SETFLAGS; 4159 break; 4160 case FS_IOC32_GETVERSION: 4161 cmd = FS_IOC_GETVERSION; 4162 break; 4163 case F2FS_IOC_START_ATOMIC_WRITE: 4164 case F2FS_IOC_COMMIT_ATOMIC_WRITE: 4165 case F2FS_IOC_START_VOLATILE_WRITE: 4166 case F2FS_IOC_RELEASE_VOLATILE_WRITE: 4167 case F2FS_IOC_ABORT_VOLATILE_WRITE: 4168 case F2FS_IOC_SHUTDOWN: 4169 case FITRIM: 4170 case FS_IOC_SET_ENCRYPTION_POLICY: 4171 case FS_IOC_GET_ENCRYPTION_PWSALT: 4172 case FS_IOC_GET_ENCRYPTION_POLICY: 4173 case FS_IOC_GET_ENCRYPTION_POLICY_EX: 4174 case FS_IOC_ADD_ENCRYPTION_KEY: 4175 case FS_IOC_REMOVE_ENCRYPTION_KEY: 4176 case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS: 4177 case FS_IOC_GET_ENCRYPTION_KEY_STATUS: 4178 case FS_IOC_GET_ENCRYPTION_NONCE: 4179 case F2FS_IOC_GARBAGE_COLLECT: 4180 case F2FS_IOC_GARBAGE_COLLECT_RANGE: 4181 case F2FS_IOC_WRITE_CHECKPOINT: 4182 case F2FS_IOC_DEFRAGMENT: 4183 case F2FS_IOC_MOVE_RANGE: 4184 case F2FS_IOC_FLUSH_DEVICE: 4185 case F2FS_IOC_GET_FEATURES: 4186 case FS_IOC_FSGETXATTR: 4187 case FS_IOC_FSSETXATTR: 4188 case F2FS_IOC_GET_PIN_FILE: 4189 case F2FS_IOC_SET_PIN_FILE: 4190 case F2FS_IOC_PRECACHE_EXTENTS: 4191 case F2FS_IOC_RESIZE_FS: 4192 case FS_IOC_ENABLE_VERITY: 4193 case FS_IOC_MEASURE_VERITY: 4194 case FS_IOC_GETFSLABEL: 4195 case FS_IOC_SETFSLABEL: 4196 case F2FS_IOC_GET_COMPRESS_BLOCKS: 4197 case F2FS_IOC_RELEASE_COMPRESS_BLOCKS: 4198 case F2FS_IOC_RESERVE_COMPRESS_BLOCKS: 4199 case F2FS_IOC_SEC_TRIM_FILE: 4200 break; 4201 default: 4202 return -ENOIOCTLCMD; 4203 } 4204 return f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg)); 4205 } 4206 #endif 4207 4208 const struct file_operations f2fs_file_operations = { 4209 .llseek = f2fs_llseek, 4210 .read_iter = f2fs_file_read_iter, 4211 .write_iter = f2fs_file_write_iter, 4212 .open = f2fs_file_open, 4213 .release = f2fs_release_file, 4214 .mmap = f2fs_file_mmap, 4215 .flush = f2fs_file_flush, 4216 .fsync = f2fs_sync_file, 4217 .fallocate = f2fs_fallocate, 4218 .unlocked_ioctl = f2fs_ioctl, 4219 #ifdef CONFIG_COMPAT 4220 .compat_ioctl = f2fs_compat_ioctl, 4221 #endif 4222 .splice_read = generic_file_splice_read, 4223 .splice_write = iter_file_splice_write, 4224 }; 4225