xref: /openbmc/linux/fs/f2fs/file.c (revision 9f7d35d9)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * fs/f2fs/file.c
4  *
5  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6  *             http://www.samsung.com/
7  */
8 #include <linux/fs.h>
9 #include <linux/f2fs_fs.h>
10 #include <linux/stat.h>
11 #include <linux/buffer_head.h>
12 #include <linux/writeback.h>
13 #include <linux/blkdev.h>
14 #include <linux/falloc.h>
15 #include <linux/types.h>
16 #include <linux/compat.h>
17 #include <linux/uaccess.h>
18 #include <linux/mount.h>
19 #include <linux/pagevec.h>
20 #include <linux/uio.h>
21 #include <linux/uuid.h>
22 #include <linux/file.h>
23 
24 #include "f2fs.h"
25 #include "node.h"
26 #include "segment.h"
27 #include "xattr.h"
28 #include "acl.h"
29 #include "gc.h"
30 #include "trace.h"
31 #include <trace/events/f2fs.h>
32 
33 static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf)
34 {
35 	struct inode *inode = file_inode(vmf->vma->vm_file);
36 	vm_fault_t ret;
37 
38 	down_read(&F2FS_I(inode)->i_mmap_sem);
39 	ret = filemap_fault(vmf);
40 	up_read(&F2FS_I(inode)->i_mmap_sem);
41 
42 	return ret;
43 }
44 
45 static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
46 {
47 	struct page *page = vmf->page;
48 	struct inode *inode = file_inode(vmf->vma->vm_file);
49 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
50 	struct dnode_of_data dn = { .node_changed = false };
51 	int err;
52 
53 	if (unlikely(f2fs_cp_error(sbi))) {
54 		err = -EIO;
55 		goto err;
56 	}
57 
58 	sb_start_pagefault(inode->i_sb);
59 
60 	f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
61 
62 	file_update_time(vmf->vma->vm_file);
63 	down_read(&F2FS_I(inode)->i_mmap_sem);
64 	lock_page(page);
65 	if (unlikely(page->mapping != inode->i_mapping ||
66 			page_offset(page) > i_size_read(inode) ||
67 			!PageUptodate(page))) {
68 		unlock_page(page);
69 		err = -EFAULT;
70 		goto out_sem;
71 	}
72 
73 	/* block allocation */
74 	__do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
75 	set_new_dnode(&dn, inode, NULL, NULL, 0);
76 	err = f2fs_get_block(&dn, page->index);
77 	f2fs_put_dnode(&dn);
78 	__do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
79 	if (err) {
80 		unlock_page(page);
81 		goto out_sem;
82 	}
83 
84 	/* fill the page */
85 	f2fs_wait_on_page_writeback(page, DATA, false, true);
86 
87 	/* wait for GCed page writeback via META_MAPPING */
88 	f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
89 
90 	/*
91 	 * check to see if the page is mapped already (no holes)
92 	 */
93 	if (PageMappedToDisk(page))
94 		goto out_sem;
95 
96 	/* page is wholly or partially inside EOF */
97 	if (((loff_t)(page->index + 1) << PAGE_SHIFT) >
98 						i_size_read(inode)) {
99 		loff_t offset;
100 
101 		offset = i_size_read(inode) & ~PAGE_MASK;
102 		zero_user_segment(page, offset, PAGE_SIZE);
103 	}
104 	set_page_dirty(page);
105 	if (!PageUptodate(page))
106 		SetPageUptodate(page);
107 
108 	f2fs_update_iostat(sbi, APP_MAPPED_IO, F2FS_BLKSIZE);
109 	f2fs_update_time(sbi, REQ_TIME);
110 
111 	trace_f2fs_vm_page_mkwrite(page, DATA);
112 out_sem:
113 	up_read(&F2FS_I(inode)->i_mmap_sem);
114 
115 	f2fs_balance_fs(sbi, dn.node_changed);
116 
117 	sb_end_pagefault(inode->i_sb);
118 err:
119 	return block_page_mkwrite_return(err);
120 }
121 
122 static const struct vm_operations_struct f2fs_file_vm_ops = {
123 	.fault		= f2fs_filemap_fault,
124 	.map_pages	= filemap_map_pages,
125 	.page_mkwrite	= f2fs_vm_page_mkwrite,
126 };
127 
128 static int get_parent_ino(struct inode *inode, nid_t *pino)
129 {
130 	struct dentry *dentry;
131 
132 	inode = igrab(inode);
133 	dentry = d_find_any_alias(inode);
134 	iput(inode);
135 	if (!dentry)
136 		return 0;
137 
138 	*pino = parent_ino(dentry);
139 	dput(dentry);
140 	return 1;
141 }
142 
143 static inline enum cp_reason_type need_do_checkpoint(struct inode *inode)
144 {
145 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
146 	enum cp_reason_type cp_reason = CP_NO_NEEDED;
147 
148 	if (!S_ISREG(inode->i_mode))
149 		cp_reason = CP_NON_REGULAR;
150 	else if (inode->i_nlink != 1)
151 		cp_reason = CP_HARDLINK;
152 	else if (is_sbi_flag_set(sbi, SBI_NEED_CP))
153 		cp_reason = CP_SB_NEED_CP;
154 	else if (file_wrong_pino(inode))
155 		cp_reason = CP_WRONG_PINO;
156 	else if (!f2fs_space_for_roll_forward(sbi))
157 		cp_reason = CP_NO_SPC_ROLL;
158 	else if (!f2fs_is_checkpointed_node(sbi, F2FS_I(inode)->i_pino))
159 		cp_reason = CP_NODE_NEED_CP;
160 	else if (test_opt(sbi, FASTBOOT))
161 		cp_reason = CP_FASTBOOT_MODE;
162 	else if (F2FS_OPTION(sbi).active_logs == 2)
163 		cp_reason = CP_SPEC_LOG_NUM;
164 	else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT &&
165 		f2fs_need_dentry_mark(sbi, inode->i_ino) &&
166 		f2fs_exist_written_data(sbi, F2FS_I(inode)->i_pino,
167 							TRANS_DIR_INO))
168 		cp_reason = CP_RECOVER_DIR;
169 
170 	return cp_reason;
171 }
172 
173 static bool need_inode_page_update(struct f2fs_sb_info *sbi, nid_t ino)
174 {
175 	struct page *i = find_get_page(NODE_MAPPING(sbi), ino);
176 	bool ret = false;
177 	/* But we need to avoid that there are some inode updates */
178 	if ((i && PageDirty(i)) || f2fs_need_inode_block_update(sbi, ino))
179 		ret = true;
180 	f2fs_put_page(i, 0);
181 	return ret;
182 }
183 
184 static void try_to_fix_pino(struct inode *inode)
185 {
186 	struct f2fs_inode_info *fi = F2FS_I(inode);
187 	nid_t pino;
188 
189 	down_write(&fi->i_sem);
190 	if (file_wrong_pino(inode) && inode->i_nlink == 1 &&
191 			get_parent_ino(inode, &pino)) {
192 		f2fs_i_pino_write(inode, pino);
193 		file_got_pino(inode);
194 	}
195 	up_write(&fi->i_sem);
196 }
197 
198 static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
199 						int datasync, bool atomic)
200 {
201 	struct inode *inode = file->f_mapping->host;
202 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
203 	nid_t ino = inode->i_ino;
204 	int ret = 0;
205 	enum cp_reason_type cp_reason = 0;
206 	struct writeback_control wbc = {
207 		.sync_mode = WB_SYNC_ALL,
208 		.nr_to_write = LONG_MAX,
209 		.for_reclaim = 0,
210 	};
211 	unsigned int seq_id = 0;
212 
213 	if (unlikely(f2fs_readonly(inode->i_sb) ||
214 				is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
215 		return 0;
216 
217 	trace_f2fs_sync_file_enter(inode);
218 
219 	if (S_ISDIR(inode->i_mode))
220 		goto go_write;
221 
222 	/* if fdatasync is triggered, let's do in-place-update */
223 	if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks)
224 		set_inode_flag(inode, FI_NEED_IPU);
225 	ret = file_write_and_wait_range(file, start, end);
226 	clear_inode_flag(inode, FI_NEED_IPU);
227 
228 	if (ret) {
229 		trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
230 		return ret;
231 	}
232 
233 	/* if the inode is dirty, let's recover all the time */
234 	if (!f2fs_skip_inode_update(inode, datasync)) {
235 		f2fs_write_inode(inode, NULL);
236 		goto go_write;
237 	}
238 
239 	/*
240 	 * if there is no written data, don't waste time to write recovery info.
241 	 */
242 	if (!is_inode_flag_set(inode, FI_APPEND_WRITE) &&
243 			!f2fs_exist_written_data(sbi, ino, APPEND_INO)) {
244 
245 		/* it may call write_inode just prior to fsync */
246 		if (need_inode_page_update(sbi, ino))
247 			goto go_write;
248 
249 		if (is_inode_flag_set(inode, FI_UPDATE_WRITE) ||
250 				f2fs_exist_written_data(sbi, ino, UPDATE_INO))
251 			goto flush_out;
252 		goto out;
253 	}
254 go_write:
255 	/*
256 	 * Both of fdatasync() and fsync() are able to be recovered from
257 	 * sudden-power-off.
258 	 */
259 	down_read(&F2FS_I(inode)->i_sem);
260 	cp_reason = need_do_checkpoint(inode);
261 	up_read(&F2FS_I(inode)->i_sem);
262 
263 	if (cp_reason) {
264 		/* all the dirty node pages should be flushed for POR */
265 		ret = f2fs_sync_fs(inode->i_sb, 1);
266 
267 		/*
268 		 * We've secured consistency through sync_fs. Following pino
269 		 * will be used only for fsynced inodes after checkpoint.
270 		 */
271 		try_to_fix_pino(inode);
272 		clear_inode_flag(inode, FI_APPEND_WRITE);
273 		clear_inode_flag(inode, FI_UPDATE_WRITE);
274 		goto out;
275 	}
276 sync_nodes:
277 	atomic_inc(&sbi->wb_sync_req[NODE]);
278 	ret = f2fs_fsync_node_pages(sbi, inode, &wbc, atomic, &seq_id);
279 	atomic_dec(&sbi->wb_sync_req[NODE]);
280 	if (ret)
281 		goto out;
282 
283 	/* if cp_error was enabled, we should avoid infinite loop */
284 	if (unlikely(f2fs_cp_error(sbi))) {
285 		ret = -EIO;
286 		goto out;
287 	}
288 
289 	if (f2fs_need_inode_block_update(sbi, ino)) {
290 		f2fs_mark_inode_dirty_sync(inode, true);
291 		f2fs_write_inode(inode, NULL);
292 		goto sync_nodes;
293 	}
294 
295 	/*
296 	 * If it's atomic_write, it's just fine to keep write ordering. So
297 	 * here we don't need to wait for node write completion, since we use
298 	 * node chain which serializes node blocks. If one of node writes are
299 	 * reordered, we can see simply broken chain, resulting in stopping
300 	 * roll-forward recovery. It means we'll recover all or none node blocks
301 	 * given fsync mark.
302 	 */
303 	if (!atomic) {
304 		ret = f2fs_wait_on_node_pages_writeback(sbi, seq_id);
305 		if (ret)
306 			goto out;
307 	}
308 
309 	/* once recovery info is written, don't need to tack this */
310 	f2fs_remove_ino_entry(sbi, ino, APPEND_INO);
311 	clear_inode_flag(inode, FI_APPEND_WRITE);
312 flush_out:
313 	if (!atomic && F2FS_OPTION(sbi).fsync_mode != FSYNC_MODE_NOBARRIER)
314 		ret = f2fs_issue_flush(sbi, inode->i_ino);
315 	if (!ret) {
316 		f2fs_remove_ino_entry(sbi, ino, UPDATE_INO);
317 		clear_inode_flag(inode, FI_UPDATE_WRITE);
318 		f2fs_remove_ino_entry(sbi, ino, FLUSH_INO);
319 	}
320 	f2fs_update_time(sbi, REQ_TIME);
321 out:
322 	trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
323 	f2fs_trace_ios(NULL, 1);
324 	return ret;
325 }
326 
327 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
328 {
329 	if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
330 		return -EIO;
331 	return f2fs_do_sync_file(file, start, end, datasync, false);
332 }
333 
334 static pgoff_t __get_first_dirty_index(struct address_space *mapping,
335 						pgoff_t pgofs, int whence)
336 {
337 	struct page *page;
338 	int nr_pages;
339 
340 	if (whence != SEEK_DATA)
341 		return 0;
342 
343 	/* find first dirty page index */
344 	nr_pages = find_get_pages_tag(mapping, &pgofs, PAGECACHE_TAG_DIRTY,
345 				      1, &page);
346 	if (!nr_pages)
347 		return ULONG_MAX;
348 	pgofs = page->index;
349 	put_page(page);
350 	return pgofs;
351 }
352 
353 static bool __found_offset(struct f2fs_sb_info *sbi, block_t blkaddr,
354 				pgoff_t dirty, pgoff_t pgofs, int whence)
355 {
356 	switch (whence) {
357 	case SEEK_DATA:
358 		if ((blkaddr == NEW_ADDR && dirty == pgofs) ||
359 			is_valid_data_blkaddr(sbi, blkaddr))
360 			return true;
361 		break;
362 	case SEEK_HOLE:
363 		if (blkaddr == NULL_ADDR)
364 			return true;
365 		break;
366 	}
367 	return false;
368 }
369 
370 static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
371 {
372 	struct inode *inode = file->f_mapping->host;
373 	loff_t maxbytes = inode->i_sb->s_maxbytes;
374 	struct dnode_of_data dn;
375 	pgoff_t pgofs, end_offset, dirty;
376 	loff_t data_ofs = offset;
377 	loff_t isize;
378 	int err = 0;
379 
380 	inode_lock(inode);
381 
382 	isize = i_size_read(inode);
383 	if (offset >= isize)
384 		goto fail;
385 
386 	/* handle inline data case */
387 	if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) {
388 		if (whence == SEEK_HOLE)
389 			data_ofs = isize;
390 		goto found;
391 	}
392 
393 	pgofs = (pgoff_t)(offset >> PAGE_SHIFT);
394 
395 	dirty = __get_first_dirty_index(inode->i_mapping, pgofs, whence);
396 
397 	for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
398 		set_new_dnode(&dn, inode, NULL, NULL, 0);
399 		err = f2fs_get_dnode_of_data(&dn, pgofs, LOOKUP_NODE);
400 		if (err && err != -ENOENT) {
401 			goto fail;
402 		} else if (err == -ENOENT) {
403 			/* direct node does not exists */
404 			if (whence == SEEK_DATA) {
405 				pgofs = f2fs_get_next_page_offset(&dn, pgofs);
406 				continue;
407 			} else {
408 				goto found;
409 			}
410 		}
411 
412 		end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
413 
414 		/* find data/hole in dnode block */
415 		for (; dn.ofs_in_node < end_offset;
416 				dn.ofs_in_node++, pgofs++,
417 				data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
418 			block_t blkaddr;
419 
420 			blkaddr = datablock_addr(dn.inode,
421 					dn.node_page, dn.ofs_in_node);
422 
423 			if (__is_valid_data_blkaddr(blkaddr) &&
424 				!f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
425 						blkaddr, DATA_GENERIC)) {
426 				f2fs_put_dnode(&dn);
427 				goto fail;
428 			}
429 
430 			if (__found_offset(F2FS_I_SB(inode), blkaddr, dirty,
431 							pgofs, whence)) {
432 				f2fs_put_dnode(&dn);
433 				goto found;
434 			}
435 		}
436 		f2fs_put_dnode(&dn);
437 	}
438 
439 	if (whence == SEEK_DATA)
440 		goto fail;
441 found:
442 	if (whence == SEEK_HOLE && data_ofs > isize)
443 		data_ofs = isize;
444 	inode_unlock(inode);
445 	return vfs_setpos(file, data_ofs, maxbytes);
446 fail:
447 	inode_unlock(inode);
448 	return -ENXIO;
449 }
450 
451 static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence)
452 {
453 	struct inode *inode = file->f_mapping->host;
454 	loff_t maxbytes = inode->i_sb->s_maxbytes;
455 
456 	switch (whence) {
457 	case SEEK_SET:
458 	case SEEK_CUR:
459 	case SEEK_END:
460 		return generic_file_llseek_size(file, offset, whence,
461 						maxbytes, i_size_read(inode));
462 	case SEEK_DATA:
463 	case SEEK_HOLE:
464 		if (offset < 0)
465 			return -ENXIO;
466 		return f2fs_seek_block(file, offset, whence);
467 	}
468 
469 	return -EINVAL;
470 }
471 
472 static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
473 {
474 	struct inode *inode = file_inode(file);
475 	int err;
476 
477 	if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
478 		return -EIO;
479 
480 	/* we don't need to use inline_data strictly */
481 	err = f2fs_convert_inline_inode(inode);
482 	if (err)
483 		return err;
484 
485 	file_accessed(file);
486 	vma->vm_ops = &f2fs_file_vm_ops;
487 	return 0;
488 }
489 
490 static int f2fs_file_open(struct inode *inode, struct file *filp)
491 {
492 	int err = fscrypt_file_open(inode, filp);
493 
494 	if (err)
495 		return err;
496 
497 	filp->f_mode |= FMODE_NOWAIT;
498 
499 	return dquot_file_open(inode, filp);
500 }
501 
502 void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
503 {
504 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
505 	struct f2fs_node *raw_node;
506 	int nr_free = 0, ofs = dn->ofs_in_node, len = count;
507 	__le32 *addr;
508 	int base = 0;
509 
510 	if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
511 		base = get_extra_isize(dn->inode);
512 
513 	raw_node = F2FS_NODE(dn->node_page);
514 	addr = blkaddr_in_node(raw_node) + base + ofs;
515 
516 	for (; count > 0; count--, addr++, dn->ofs_in_node++) {
517 		block_t blkaddr = le32_to_cpu(*addr);
518 
519 		if (blkaddr == NULL_ADDR)
520 			continue;
521 
522 		dn->data_blkaddr = NULL_ADDR;
523 		f2fs_set_data_blkaddr(dn);
524 
525 		if (__is_valid_data_blkaddr(blkaddr) &&
526 			!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC))
527 			continue;
528 
529 		f2fs_invalidate_blocks(sbi, blkaddr);
530 		if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
531 			clear_inode_flag(dn->inode, FI_FIRST_BLOCK_WRITTEN);
532 		nr_free++;
533 	}
534 
535 	if (nr_free) {
536 		pgoff_t fofs;
537 		/*
538 		 * once we invalidate valid blkaddr in range [ofs, ofs + count],
539 		 * we will invalidate all blkaddr in the whole range.
540 		 */
541 		fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page),
542 							dn->inode) + ofs;
543 		f2fs_update_extent_cache_range(dn, fofs, 0, len);
544 		dec_valid_block_count(sbi, dn->inode, nr_free);
545 	}
546 	dn->ofs_in_node = ofs;
547 
548 	f2fs_update_time(sbi, REQ_TIME);
549 	trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid,
550 					 dn->ofs_in_node, nr_free);
551 }
552 
553 void f2fs_truncate_data_blocks(struct dnode_of_data *dn)
554 {
555 	f2fs_truncate_data_blocks_range(dn, ADDRS_PER_BLOCK);
556 }
557 
558 static int truncate_partial_data_page(struct inode *inode, u64 from,
559 								bool cache_only)
560 {
561 	loff_t offset = from & (PAGE_SIZE - 1);
562 	pgoff_t index = from >> PAGE_SHIFT;
563 	struct address_space *mapping = inode->i_mapping;
564 	struct page *page;
565 
566 	if (!offset && !cache_only)
567 		return 0;
568 
569 	if (cache_only) {
570 		page = find_lock_page(mapping, index);
571 		if (page && PageUptodate(page))
572 			goto truncate_out;
573 		f2fs_put_page(page, 1);
574 		return 0;
575 	}
576 
577 	page = f2fs_get_lock_data_page(inode, index, true);
578 	if (IS_ERR(page))
579 		return PTR_ERR(page) == -ENOENT ? 0 : PTR_ERR(page);
580 truncate_out:
581 	f2fs_wait_on_page_writeback(page, DATA, true, true);
582 	zero_user(page, offset, PAGE_SIZE - offset);
583 
584 	/* An encrypted inode should have a key and truncate the last page. */
585 	f2fs_bug_on(F2FS_I_SB(inode), cache_only && f2fs_encrypted_inode(inode));
586 	if (!cache_only)
587 		set_page_dirty(page);
588 	f2fs_put_page(page, 1);
589 	return 0;
590 }
591 
592 int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock,
593 							bool buf_write)
594 {
595 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
596 	struct dnode_of_data dn;
597 	pgoff_t free_from;
598 	int count = 0, err = 0;
599 	struct page *ipage;
600 	bool truncate_page = false;
601 	int flag = buf_write ? F2FS_GET_BLOCK_PRE_AIO : F2FS_GET_BLOCK_PRE_DIO;
602 
603 	trace_f2fs_truncate_blocks_enter(inode, from);
604 
605 	free_from = (pgoff_t)F2FS_BLK_ALIGN(from);
606 
607 	if (free_from >= sbi->max_file_blocks)
608 		goto free_partial;
609 
610 	if (lock)
611 		__do_map_lock(sbi, flag, true);
612 
613 	ipage = f2fs_get_node_page(sbi, inode->i_ino);
614 	if (IS_ERR(ipage)) {
615 		err = PTR_ERR(ipage);
616 		goto out;
617 	}
618 
619 	if (f2fs_has_inline_data(inode)) {
620 		f2fs_truncate_inline_inode(inode, ipage, from);
621 		f2fs_put_page(ipage, 1);
622 		truncate_page = true;
623 		goto out;
624 	}
625 
626 	set_new_dnode(&dn, inode, ipage, NULL, 0);
627 	err = f2fs_get_dnode_of_data(&dn, free_from, LOOKUP_NODE_RA);
628 	if (err) {
629 		if (err == -ENOENT)
630 			goto free_next;
631 		goto out;
632 	}
633 
634 	count = ADDRS_PER_PAGE(dn.node_page, inode);
635 
636 	count -= dn.ofs_in_node;
637 	f2fs_bug_on(sbi, count < 0);
638 
639 	if (dn.ofs_in_node || IS_INODE(dn.node_page)) {
640 		f2fs_truncate_data_blocks_range(&dn, count);
641 		free_from += count;
642 	}
643 
644 	f2fs_put_dnode(&dn);
645 free_next:
646 	err = f2fs_truncate_inode_blocks(inode, free_from);
647 out:
648 	if (lock)
649 		__do_map_lock(sbi, flag, false);
650 free_partial:
651 	/* lastly zero out the first data page */
652 	if (!err)
653 		err = truncate_partial_data_page(inode, from, truncate_page);
654 
655 	trace_f2fs_truncate_blocks_exit(inode, err);
656 	return err;
657 }
658 
659 int f2fs_truncate(struct inode *inode)
660 {
661 	int err;
662 
663 	if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
664 		return -EIO;
665 
666 	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
667 				S_ISLNK(inode->i_mode)))
668 		return 0;
669 
670 	trace_f2fs_truncate(inode);
671 
672 	if (time_to_inject(F2FS_I_SB(inode), FAULT_TRUNCATE)) {
673 		f2fs_show_injection_info(FAULT_TRUNCATE);
674 		return -EIO;
675 	}
676 
677 	/* we should check inline_data size */
678 	if (!f2fs_may_inline_data(inode)) {
679 		err = f2fs_convert_inline_inode(inode);
680 		if (err)
681 			return err;
682 	}
683 
684 	err = f2fs_truncate_blocks(inode, i_size_read(inode), true, false);
685 	if (err)
686 		return err;
687 
688 	inode->i_mtime = inode->i_ctime = current_time(inode);
689 	f2fs_mark_inode_dirty_sync(inode, false);
690 	return 0;
691 }
692 
693 int f2fs_getattr(const struct path *path, struct kstat *stat,
694 		 u32 request_mask, unsigned int query_flags)
695 {
696 	struct inode *inode = d_inode(path->dentry);
697 	struct f2fs_inode_info *fi = F2FS_I(inode);
698 	struct f2fs_inode *ri;
699 	unsigned int flags;
700 
701 	if (f2fs_has_extra_attr(inode) &&
702 			f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) &&
703 			F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) {
704 		stat->result_mask |= STATX_BTIME;
705 		stat->btime.tv_sec = fi->i_crtime.tv_sec;
706 		stat->btime.tv_nsec = fi->i_crtime.tv_nsec;
707 	}
708 
709 	flags = fi->i_flags & F2FS_FL_USER_VISIBLE;
710 	if (flags & F2FS_APPEND_FL)
711 		stat->attributes |= STATX_ATTR_APPEND;
712 	if (flags & F2FS_COMPR_FL)
713 		stat->attributes |= STATX_ATTR_COMPRESSED;
714 	if (f2fs_encrypted_inode(inode))
715 		stat->attributes |= STATX_ATTR_ENCRYPTED;
716 	if (flags & F2FS_IMMUTABLE_FL)
717 		stat->attributes |= STATX_ATTR_IMMUTABLE;
718 	if (flags & F2FS_NODUMP_FL)
719 		stat->attributes |= STATX_ATTR_NODUMP;
720 
721 	stat->attributes_mask |= (STATX_ATTR_APPEND |
722 				  STATX_ATTR_COMPRESSED |
723 				  STATX_ATTR_ENCRYPTED |
724 				  STATX_ATTR_IMMUTABLE |
725 				  STATX_ATTR_NODUMP);
726 
727 	generic_fillattr(inode, stat);
728 
729 	/* we need to show initial sectors used for inline_data/dentries */
730 	if ((S_ISREG(inode->i_mode) && f2fs_has_inline_data(inode)) ||
731 					f2fs_has_inline_dentry(inode))
732 		stat->blocks += (stat->size + 511) >> 9;
733 
734 	return 0;
735 }
736 
737 #ifdef CONFIG_F2FS_FS_POSIX_ACL
738 static void __setattr_copy(struct inode *inode, const struct iattr *attr)
739 {
740 	unsigned int ia_valid = attr->ia_valid;
741 
742 	if (ia_valid & ATTR_UID)
743 		inode->i_uid = attr->ia_uid;
744 	if (ia_valid & ATTR_GID)
745 		inode->i_gid = attr->ia_gid;
746 	if (ia_valid & ATTR_ATIME)
747 		inode->i_atime = timespec64_trunc(attr->ia_atime,
748 						  inode->i_sb->s_time_gran);
749 	if (ia_valid & ATTR_MTIME)
750 		inode->i_mtime = timespec64_trunc(attr->ia_mtime,
751 						  inode->i_sb->s_time_gran);
752 	if (ia_valid & ATTR_CTIME)
753 		inode->i_ctime = timespec64_trunc(attr->ia_ctime,
754 						  inode->i_sb->s_time_gran);
755 	if (ia_valid & ATTR_MODE) {
756 		umode_t mode = attr->ia_mode;
757 
758 		if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
759 			mode &= ~S_ISGID;
760 		set_acl_inode(inode, mode);
761 	}
762 }
763 #else
764 #define __setattr_copy setattr_copy
765 #endif
766 
767 int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
768 {
769 	struct inode *inode = d_inode(dentry);
770 	int err;
771 	bool size_changed = false;
772 
773 	if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
774 		return -EIO;
775 
776 	err = setattr_prepare(dentry, attr);
777 	if (err)
778 		return err;
779 
780 	err = fscrypt_prepare_setattr(dentry, attr);
781 	if (err)
782 		return err;
783 
784 	if (is_quota_modification(inode, attr)) {
785 		err = dquot_initialize(inode);
786 		if (err)
787 			return err;
788 	}
789 	if ((attr->ia_valid & ATTR_UID &&
790 		!uid_eq(attr->ia_uid, inode->i_uid)) ||
791 		(attr->ia_valid & ATTR_GID &&
792 		!gid_eq(attr->ia_gid, inode->i_gid))) {
793 		f2fs_lock_op(F2FS_I_SB(inode));
794 		err = dquot_transfer(inode, attr);
795 		if (err) {
796 			set_sbi_flag(F2FS_I_SB(inode),
797 					SBI_QUOTA_NEED_REPAIR);
798 			f2fs_unlock_op(F2FS_I_SB(inode));
799 			return err;
800 		}
801 		/*
802 		 * update uid/gid under lock_op(), so that dquot and inode can
803 		 * be updated atomically.
804 		 */
805 		if (attr->ia_valid & ATTR_UID)
806 			inode->i_uid = attr->ia_uid;
807 		if (attr->ia_valid & ATTR_GID)
808 			inode->i_gid = attr->ia_gid;
809 		f2fs_mark_inode_dirty_sync(inode, true);
810 		f2fs_unlock_op(F2FS_I_SB(inode));
811 	}
812 
813 	if (attr->ia_valid & ATTR_SIZE) {
814 		bool to_smaller = (attr->ia_size <= i_size_read(inode));
815 
816 		down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
817 		down_write(&F2FS_I(inode)->i_mmap_sem);
818 
819 		truncate_setsize(inode, attr->ia_size);
820 
821 		if (to_smaller)
822 			err = f2fs_truncate(inode);
823 		/*
824 		 * do not trim all blocks after i_size if target size is
825 		 * larger than i_size.
826 		 */
827 		up_write(&F2FS_I(inode)->i_mmap_sem);
828 		up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
829 
830 		if (err)
831 			return err;
832 
833 		if (!to_smaller) {
834 			/* should convert inline inode here */
835 			if (!f2fs_may_inline_data(inode)) {
836 				err = f2fs_convert_inline_inode(inode);
837 				if (err)
838 					return err;
839 			}
840 			inode->i_mtime = inode->i_ctime = current_time(inode);
841 		}
842 
843 		down_write(&F2FS_I(inode)->i_sem);
844 		F2FS_I(inode)->last_disk_size = i_size_read(inode);
845 		up_write(&F2FS_I(inode)->i_sem);
846 
847 		size_changed = true;
848 	}
849 
850 	__setattr_copy(inode, attr);
851 
852 	if (attr->ia_valid & ATTR_MODE) {
853 		err = posix_acl_chmod(inode, f2fs_get_inode_mode(inode));
854 		if (err || is_inode_flag_set(inode, FI_ACL_MODE)) {
855 			inode->i_mode = F2FS_I(inode)->i_acl_mode;
856 			clear_inode_flag(inode, FI_ACL_MODE);
857 		}
858 	}
859 
860 	/* file size may changed here */
861 	f2fs_mark_inode_dirty_sync(inode, size_changed);
862 
863 	/* inode change will produce dirty node pages flushed by checkpoint */
864 	f2fs_balance_fs(F2FS_I_SB(inode), true);
865 
866 	return err;
867 }
868 
869 const struct inode_operations f2fs_file_inode_operations = {
870 	.getattr	= f2fs_getattr,
871 	.setattr	= f2fs_setattr,
872 	.get_acl	= f2fs_get_acl,
873 	.set_acl	= f2fs_set_acl,
874 #ifdef CONFIG_F2FS_FS_XATTR
875 	.listxattr	= f2fs_listxattr,
876 #endif
877 	.fiemap		= f2fs_fiemap,
878 };
879 
880 static int fill_zero(struct inode *inode, pgoff_t index,
881 					loff_t start, loff_t len)
882 {
883 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
884 	struct page *page;
885 
886 	if (!len)
887 		return 0;
888 
889 	f2fs_balance_fs(sbi, true);
890 
891 	f2fs_lock_op(sbi);
892 	page = f2fs_get_new_data_page(inode, NULL, index, false);
893 	f2fs_unlock_op(sbi);
894 
895 	if (IS_ERR(page))
896 		return PTR_ERR(page);
897 
898 	f2fs_wait_on_page_writeback(page, DATA, true, true);
899 	zero_user(page, start, len);
900 	set_page_dirty(page);
901 	f2fs_put_page(page, 1);
902 	return 0;
903 }
904 
905 int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
906 {
907 	int err;
908 
909 	while (pg_start < pg_end) {
910 		struct dnode_of_data dn;
911 		pgoff_t end_offset, count;
912 
913 		set_new_dnode(&dn, inode, NULL, NULL, 0);
914 		err = f2fs_get_dnode_of_data(&dn, pg_start, LOOKUP_NODE);
915 		if (err) {
916 			if (err == -ENOENT) {
917 				pg_start = f2fs_get_next_page_offset(&dn,
918 								pg_start);
919 				continue;
920 			}
921 			return err;
922 		}
923 
924 		end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
925 		count = min(end_offset - dn.ofs_in_node, pg_end - pg_start);
926 
927 		f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset);
928 
929 		f2fs_truncate_data_blocks_range(&dn, count);
930 		f2fs_put_dnode(&dn);
931 
932 		pg_start += count;
933 	}
934 	return 0;
935 }
936 
937 static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
938 {
939 	pgoff_t pg_start, pg_end;
940 	loff_t off_start, off_end;
941 	int ret;
942 
943 	ret = f2fs_convert_inline_inode(inode);
944 	if (ret)
945 		return ret;
946 
947 	pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
948 	pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
949 
950 	off_start = offset & (PAGE_SIZE - 1);
951 	off_end = (offset + len) & (PAGE_SIZE - 1);
952 
953 	if (pg_start == pg_end) {
954 		ret = fill_zero(inode, pg_start, off_start,
955 						off_end - off_start);
956 		if (ret)
957 			return ret;
958 	} else {
959 		if (off_start) {
960 			ret = fill_zero(inode, pg_start++, off_start,
961 						PAGE_SIZE - off_start);
962 			if (ret)
963 				return ret;
964 		}
965 		if (off_end) {
966 			ret = fill_zero(inode, pg_end, 0, off_end);
967 			if (ret)
968 				return ret;
969 		}
970 
971 		if (pg_start < pg_end) {
972 			struct address_space *mapping = inode->i_mapping;
973 			loff_t blk_start, blk_end;
974 			struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
975 
976 			f2fs_balance_fs(sbi, true);
977 
978 			blk_start = (loff_t)pg_start << PAGE_SHIFT;
979 			blk_end = (loff_t)pg_end << PAGE_SHIFT;
980 
981 			down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
982 			down_write(&F2FS_I(inode)->i_mmap_sem);
983 
984 			truncate_inode_pages_range(mapping, blk_start,
985 					blk_end - 1);
986 
987 			f2fs_lock_op(sbi);
988 			ret = f2fs_truncate_hole(inode, pg_start, pg_end);
989 			f2fs_unlock_op(sbi);
990 
991 			up_write(&F2FS_I(inode)->i_mmap_sem);
992 			up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
993 		}
994 	}
995 
996 	return ret;
997 }
998 
999 static int __read_out_blkaddrs(struct inode *inode, block_t *blkaddr,
1000 				int *do_replace, pgoff_t off, pgoff_t len)
1001 {
1002 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1003 	struct dnode_of_data dn;
1004 	int ret, done, i;
1005 
1006 next_dnode:
1007 	set_new_dnode(&dn, inode, NULL, NULL, 0);
1008 	ret = f2fs_get_dnode_of_data(&dn, off, LOOKUP_NODE_RA);
1009 	if (ret && ret != -ENOENT) {
1010 		return ret;
1011 	} else if (ret == -ENOENT) {
1012 		if (dn.max_level == 0)
1013 			return -ENOENT;
1014 		done = min((pgoff_t)ADDRS_PER_BLOCK - dn.ofs_in_node, len);
1015 		blkaddr += done;
1016 		do_replace += done;
1017 		goto next;
1018 	}
1019 
1020 	done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, inode) -
1021 							dn.ofs_in_node, len);
1022 	for (i = 0; i < done; i++, blkaddr++, do_replace++, dn.ofs_in_node++) {
1023 		*blkaddr = datablock_addr(dn.inode,
1024 					dn.node_page, dn.ofs_in_node);
1025 		if (!f2fs_is_checkpointed_data(sbi, *blkaddr)) {
1026 
1027 			if (test_opt(sbi, LFS)) {
1028 				f2fs_put_dnode(&dn);
1029 				return -ENOTSUPP;
1030 			}
1031 
1032 			/* do not invalidate this block address */
1033 			f2fs_update_data_blkaddr(&dn, NULL_ADDR);
1034 			*do_replace = 1;
1035 		}
1036 	}
1037 	f2fs_put_dnode(&dn);
1038 next:
1039 	len -= done;
1040 	off += done;
1041 	if (len)
1042 		goto next_dnode;
1043 	return 0;
1044 }
1045 
1046 static int __roll_back_blkaddrs(struct inode *inode, block_t *blkaddr,
1047 				int *do_replace, pgoff_t off, int len)
1048 {
1049 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1050 	struct dnode_of_data dn;
1051 	int ret, i;
1052 
1053 	for (i = 0; i < len; i++, do_replace++, blkaddr++) {
1054 		if (*do_replace == 0)
1055 			continue;
1056 
1057 		set_new_dnode(&dn, inode, NULL, NULL, 0);
1058 		ret = f2fs_get_dnode_of_data(&dn, off + i, LOOKUP_NODE_RA);
1059 		if (ret) {
1060 			dec_valid_block_count(sbi, inode, 1);
1061 			f2fs_invalidate_blocks(sbi, *blkaddr);
1062 		} else {
1063 			f2fs_update_data_blkaddr(&dn, *blkaddr);
1064 		}
1065 		f2fs_put_dnode(&dn);
1066 	}
1067 	return 0;
1068 }
1069 
1070 static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
1071 			block_t *blkaddr, int *do_replace,
1072 			pgoff_t src, pgoff_t dst, pgoff_t len, bool full)
1073 {
1074 	struct f2fs_sb_info *sbi = F2FS_I_SB(src_inode);
1075 	pgoff_t i = 0;
1076 	int ret;
1077 
1078 	while (i < len) {
1079 		if (blkaddr[i] == NULL_ADDR && !full) {
1080 			i++;
1081 			continue;
1082 		}
1083 
1084 		if (do_replace[i] || blkaddr[i] == NULL_ADDR) {
1085 			struct dnode_of_data dn;
1086 			struct node_info ni;
1087 			size_t new_size;
1088 			pgoff_t ilen;
1089 
1090 			set_new_dnode(&dn, dst_inode, NULL, NULL, 0);
1091 			ret = f2fs_get_dnode_of_data(&dn, dst + i, ALLOC_NODE);
1092 			if (ret)
1093 				return ret;
1094 
1095 			ret = f2fs_get_node_info(sbi, dn.nid, &ni);
1096 			if (ret) {
1097 				f2fs_put_dnode(&dn);
1098 				return ret;
1099 			}
1100 
1101 			ilen = min((pgoff_t)
1102 				ADDRS_PER_PAGE(dn.node_page, dst_inode) -
1103 						dn.ofs_in_node, len - i);
1104 			do {
1105 				dn.data_blkaddr = datablock_addr(dn.inode,
1106 						dn.node_page, dn.ofs_in_node);
1107 				f2fs_truncate_data_blocks_range(&dn, 1);
1108 
1109 				if (do_replace[i]) {
1110 					f2fs_i_blocks_write(src_inode,
1111 							1, false, false);
1112 					f2fs_i_blocks_write(dst_inode,
1113 							1, true, false);
1114 					f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
1115 					blkaddr[i], ni.version, true, false);
1116 
1117 					do_replace[i] = 0;
1118 				}
1119 				dn.ofs_in_node++;
1120 				i++;
1121 				new_size = (dst + i) << PAGE_SHIFT;
1122 				if (dst_inode->i_size < new_size)
1123 					f2fs_i_size_write(dst_inode, new_size);
1124 			} while (--ilen && (do_replace[i] || blkaddr[i] == NULL_ADDR));
1125 
1126 			f2fs_put_dnode(&dn);
1127 		} else {
1128 			struct page *psrc, *pdst;
1129 
1130 			psrc = f2fs_get_lock_data_page(src_inode,
1131 							src + i, true);
1132 			if (IS_ERR(psrc))
1133 				return PTR_ERR(psrc);
1134 			pdst = f2fs_get_new_data_page(dst_inode, NULL, dst + i,
1135 								true);
1136 			if (IS_ERR(pdst)) {
1137 				f2fs_put_page(psrc, 1);
1138 				return PTR_ERR(pdst);
1139 			}
1140 			f2fs_copy_page(psrc, pdst);
1141 			set_page_dirty(pdst);
1142 			f2fs_put_page(pdst, 1);
1143 			f2fs_put_page(psrc, 1);
1144 
1145 			ret = f2fs_truncate_hole(src_inode,
1146 						src + i, src + i + 1);
1147 			if (ret)
1148 				return ret;
1149 			i++;
1150 		}
1151 	}
1152 	return 0;
1153 }
1154 
1155 static int __exchange_data_block(struct inode *src_inode,
1156 			struct inode *dst_inode, pgoff_t src, pgoff_t dst,
1157 			pgoff_t len, bool full)
1158 {
1159 	block_t *src_blkaddr;
1160 	int *do_replace;
1161 	pgoff_t olen;
1162 	int ret;
1163 
1164 	while (len) {
1165 		olen = min((pgoff_t)4 * ADDRS_PER_BLOCK, len);
1166 
1167 		src_blkaddr = f2fs_kvzalloc(F2FS_I_SB(src_inode),
1168 					array_size(olen, sizeof(block_t)),
1169 					GFP_KERNEL);
1170 		if (!src_blkaddr)
1171 			return -ENOMEM;
1172 
1173 		do_replace = f2fs_kvzalloc(F2FS_I_SB(src_inode),
1174 					array_size(olen, sizeof(int)),
1175 					GFP_KERNEL);
1176 		if (!do_replace) {
1177 			kvfree(src_blkaddr);
1178 			return -ENOMEM;
1179 		}
1180 
1181 		ret = __read_out_blkaddrs(src_inode, src_blkaddr,
1182 					do_replace, src, olen);
1183 		if (ret)
1184 			goto roll_back;
1185 
1186 		ret = __clone_blkaddrs(src_inode, dst_inode, src_blkaddr,
1187 					do_replace, src, dst, olen, full);
1188 		if (ret)
1189 			goto roll_back;
1190 
1191 		src += olen;
1192 		dst += olen;
1193 		len -= olen;
1194 
1195 		kvfree(src_blkaddr);
1196 		kvfree(do_replace);
1197 	}
1198 	return 0;
1199 
1200 roll_back:
1201 	__roll_back_blkaddrs(src_inode, src_blkaddr, do_replace, src, olen);
1202 	kvfree(src_blkaddr);
1203 	kvfree(do_replace);
1204 	return ret;
1205 }
1206 
1207 static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len)
1208 {
1209 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1210 	pgoff_t nrpages = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE;
1211 	pgoff_t start = offset >> PAGE_SHIFT;
1212 	pgoff_t end = (offset + len) >> PAGE_SHIFT;
1213 	int ret;
1214 
1215 	f2fs_balance_fs(sbi, true);
1216 
1217 	/* avoid gc operation during block exchange */
1218 	down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1219 	down_write(&F2FS_I(inode)->i_mmap_sem);
1220 
1221 	f2fs_lock_op(sbi);
1222 	f2fs_drop_extent_tree(inode);
1223 	truncate_pagecache(inode, offset);
1224 	ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true);
1225 	f2fs_unlock_op(sbi);
1226 
1227 	up_write(&F2FS_I(inode)->i_mmap_sem);
1228 	up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1229 	return ret;
1230 }
1231 
1232 static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
1233 {
1234 	loff_t new_size;
1235 	int ret;
1236 
1237 	if (offset + len >= i_size_read(inode))
1238 		return -EINVAL;
1239 
1240 	/* collapse range should be aligned to block size of f2fs. */
1241 	if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1242 		return -EINVAL;
1243 
1244 	ret = f2fs_convert_inline_inode(inode);
1245 	if (ret)
1246 		return ret;
1247 
1248 	/* write out all dirty pages from offset */
1249 	ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1250 	if (ret)
1251 		return ret;
1252 
1253 	ret = f2fs_do_collapse(inode, offset, len);
1254 	if (ret)
1255 		return ret;
1256 
1257 	/* write out all moved pages, if possible */
1258 	down_write(&F2FS_I(inode)->i_mmap_sem);
1259 	filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1260 	truncate_pagecache(inode, offset);
1261 
1262 	new_size = i_size_read(inode) - len;
1263 	truncate_pagecache(inode, new_size);
1264 
1265 	ret = f2fs_truncate_blocks(inode, new_size, true, false);
1266 	up_write(&F2FS_I(inode)->i_mmap_sem);
1267 	if (!ret)
1268 		f2fs_i_size_write(inode, new_size);
1269 	return ret;
1270 }
1271 
1272 static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start,
1273 								pgoff_t end)
1274 {
1275 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1276 	pgoff_t index = start;
1277 	unsigned int ofs_in_node = dn->ofs_in_node;
1278 	blkcnt_t count = 0;
1279 	int ret;
1280 
1281 	for (; index < end; index++, dn->ofs_in_node++) {
1282 		if (datablock_addr(dn->inode, dn->node_page,
1283 					dn->ofs_in_node) == NULL_ADDR)
1284 			count++;
1285 	}
1286 
1287 	dn->ofs_in_node = ofs_in_node;
1288 	ret = f2fs_reserve_new_blocks(dn, count);
1289 	if (ret)
1290 		return ret;
1291 
1292 	dn->ofs_in_node = ofs_in_node;
1293 	for (index = start; index < end; index++, dn->ofs_in_node++) {
1294 		dn->data_blkaddr = datablock_addr(dn->inode,
1295 					dn->node_page, dn->ofs_in_node);
1296 		/*
1297 		 * f2fs_reserve_new_blocks will not guarantee entire block
1298 		 * allocation.
1299 		 */
1300 		if (dn->data_blkaddr == NULL_ADDR) {
1301 			ret = -ENOSPC;
1302 			break;
1303 		}
1304 		if (dn->data_blkaddr != NEW_ADDR) {
1305 			f2fs_invalidate_blocks(sbi, dn->data_blkaddr);
1306 			dn->data_blkaddr = NEW_ADDR;
1307 			f2fs_set_data_blkaddr(dn);
1308 		}
1309 	}
1310 
1311 	f2fs_update_extent_cache_range(dn, start, 0, index - start);
1312 
1313 	return ret;
1314 }
1315 
1316 static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
1317 								int mode)
1318 {
1319 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1320 	struct address_space *mapping = inode->i_mapping;
1321 	pgoff_t index, pg_start, pg_end;
1322 	loff_t new_size = i_size_read(inode);
1323 	loff_t off_start, off_end;
1324 	int ret = 0;
1325 
1326 	ret = inode_newsize_ok(inode, (len + offset));
1327 	if (ret)
1328 		return ret;
1329 
1330 	ret = f2fs_convert_inline_inode(inode);
1331 	if (ret)
1332 		return ret;
1333 
1334 	ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1);
1335 	if (ret)
1336 		return ret;
1337 
1338 	pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1339 	pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
1340 
1341 	off_start = offset & (PAGE_SIZE - 1);
1342 	off_end = (offset + len) & (PAGE_SIZE - 1);
1343 
1344 	if (pg_start == pg_end) {
1345 		ret = fill_zero(inode, pg_start, off_start,
1346 						off_end - off_start);
1347 		if (ret)
1348 			return ret;
1349 
1350 		new_size = max_t(loff_t, new_size, offset + len);
1351 	} else {
1352 		if (off_start) {
1353 			ret = fill_zero(inode, pg_start++, off_start,
1354 						PAGE_SIZE - off_start);
1355 			if (ret)
1356 				return ret;
1357 
1358 			new_size = max_t(loff_t, new_size,
1359 					(loff_t)pg_start << PAGE_SHIFT);
1360 		}
1361 
1362 		for (index = pg_start; index < pg_end;) {
1363 			struct dnode_of_data dn;
1364 			unsigned int end_offset;
1365 			pgoff_t end;
1366 
1367 			down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1368 			down_write(&F2FS_I(inode)->i_mmap_sem);
1369 
1370 			truncate_pagecache_range(inode,
1371 				(loff_t)index << PAGE_SHIFT,
1372 				((loff_t)pg_end << PAGE_SHIFT) - 1);
1373 
1374 			f2fs_lock_op(sbi);
1375 
1376 			set_new_dnode(&dn, inode, NULL, NULL, 0);
1377 			ret = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE);
1378 			if (ret) {
1379 				f2fs_unlock_op(sbi);
1380 				up_write(&F2FS_I(inode)->i_mmap_sem);
1381 				up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1382 				goto out;
1383 			}
1384 
1385 			end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1386 			end = min(pg_end, end_offset - dn.ofs_in_node + index);
1387 
1388 			ret = f2fs_do_zero_range(&dn, index, end);
1389 			f2fs_put_dnode(&dn);
1390 
1391 			f2fs_unlock_op(sbi);
1392 			up_write(&F2FS_I(inode)->i_mmap_sem);
1393 			up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1394 
1395 			f2fs_balance_fs(sbi, dn.node_changed);
1396 
1397 			if (ret)
1398 				goto out;
1399 
1400 			index = end;
1401 			new_size = max_t(loff_t, new_size,
1402 					(loff_t)index << PAGE_SHIFT);
1403 		}
1404 
1405 		if (off_end) {
1406 			ret = fill_zero(inode, pg_end, 0, off_end);
1407 			if (ret)
1408 				goto out;
1409 
1410 			new_size = max_t(loff_t, new_size, offset + len);
1411 		}
1412 	}
1413 
1414 out:
1415 	if (new_size > i_size_read(inode)) {
1416 		if (mode & FALLOC_FL_KEEP_SIZE)
1417 			file_set_keep_isize(inode);
1418 		else
1419 			f2fs_i_size_write(inode, new_size);
1420 	}
1421 	return ret;
1422 }
1423 
1424 static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
1425 {
1426 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1427 	pgoff_t nr, pg_start, pg_end, delta, idx;
1428 	loff_t new_size;
1429 	int ret = 0;
1430 
1431 	new_size = i_size_read(inode) + len;
1432 	ret = inode_newsize_ok(inode, new_size);
1433 	if (ret)
1434 		return ret;
1435 
1436 	if (offset >= i_size_read(inode))
1437 		return -EINVAL;
1438 
1439 	/* insert range should be aligned to block size of f2fs. */
1440 	if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1441 		return -EINVAL;
1442 
1443 	ret = f2fs_convert_inline_inode(inode);
1444 	if (ret)
1445 		return ret;
1446 
1447 	f2fs_balance_fs(sbi, true);
1448 
1449 	down_write(&F2FS_I(inode)->i_mmap_sem);
1450 	ret = f2fs_truncate_blocks(inode, i_size_read(inode), true, false);
1451 	up_write(&F2FS_I(inode)->i_mmap_sem);
1452 	if (ret)
1453 		return ret;
1454 
1455 	/* write out all dirty pages from offset */
1456 	ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1457 	if (ret)
1458 		return ret;
1459 
1460 	pg_start = offset >> PAGE_SHIFT;
1461 	pg_end = (offset + len) >> PAGE_SHIFT;
1462 	delta = pg_end - pg_start;
1463 	idx = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE;
1464 
1465 	/* avoid gc operation during block exchange */
1466 	down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1467 	down_write(&F2FS_I(inode)->i_mmap_sem);
1468 	truncate_pagecache(inode, offset);
1469 
1470 	while (!ret && idx > pg_start) {
1471 		nr = idx - pg_start;
1472 		if (nr > delta)
1473 			nr = delta;
1474 		idx -= nr;
1475 
1476 		f2fs_lock_op(sbi);
1477 		f2fs_drop_extent_tree(inode);
1478 
1479 		ret = __exchange_data_block(inode, inode, idx,
1480 					idx + delta, nr, false);
1481 		f2fs_unlock_op(sbi);
1482 	}
1483 	up_write(&F2FS_I(inode)->i_mmap_sem);
1484 	up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1485 
1486 	/* write out all moved pages, if possible */
1487 	down_write(&F2FS_I(inode)->i_mmap_sem);
1488 	filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1489 	truncate_pagecache(inode, offset);
1490 	up_write(&F2FS_I(inode)->i_mmap_sem);
1491 
1492 	if (!ret)
1493 		f2fs_i_size_write(inode, new_size);
1494 	return ret;
1495 }
1496 
1497 static int expand_inode_data(struct inode *inode, loff_t offset,
1498 					loff_t len, int mode)
1499 {
1500 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1501 	struct f2fs_map_blocks map = { .m_next_pgofs = NULL,
1502 			.m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE,
1503 			.m_may_create = true };
1504 	pgoff_t pg_end;
1505 	loff_t new_size = i_size_read(inode);
1506 	loff_t off_end;
1507 	int err;
1508 
1509 	err = inode_newsize_ok(inode, (len + offset));
1510 	if (err)
1511 		return err;
1512 
1513 	err = f2fs_convert_inline_inode(inode);
1514 	if (err)
1515 		return err;
1516 
1517 	f2fs_balance_fs(sbi, true);
1518 
1519 	pg_end = ((unsigned long long)offset + len) >> PAGE_SHIFT;
1520 	off_end = (offset + len) & (PAGE_SIZE - 1);
1521 
1522 	map.m_lblk = ((unsigned long long)offset) >> PAGE_SHIFT;
1523 	map.m_len = pg_end - map.m_lblk;
1524 	if (off_end)
1525 		map.m_len++;
1526 
1527 	err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
1528 	if (err) {
1529 		pgoff_t last_off;
1530 
1531 		if (!map.m_len)
1532 			return err;
1533 
1534 		last_off = map.m_lblk + map.m_len - 1;
1535 
1536 		/* update new size to the failed position */
1537 		new_size = (last_off == pg_end) ? offset + len :
1538 					(loff_t)(last_off + 1) << PAGE_SHIFT;
1539 	} else {
1540 		new_size = ((loff_t)pg_end << PAGE_SHIFT) + off_end;
1541 	}
1542 
1543 	if (new_size > i_size_read(inode)) {
1544 		if (mode & FALLOC_FL_KEEP_SIZE)
1545 			file_set_keep_isize(inode);
1546 		else
1547 			f2fs_i_size_write(inode, new_size);
1548 	}
1549 
1550 	return err;
1551 }
1552 
1553 static long f2fs_fallocate(struct file *file, int mode,
1554 				loff_t offset, loff_t len)
1555 {
1556 	struct inode *inode = file_inode(file);
1557 	long ret = 0;
1558 
1559 	if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
1560 		return -EIO;
1561 
1562 	/* f2fs only support ->fallocate for regular file */
1563 	if (!S_ISREG(inode->i_mode))
1564 		return -EINVAL;
1565 
1566 	if (f2fs_encrypted_inode(inode) &&
1567 		(mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)))
1568 		return -EOPNOTSUPP;
1569 
1570 	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
1571 			FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |
1572 			FALLOC_FL_INSERT_RANGE))
1573 		return -EOPNOTSUPP;
1574 
1575 	inode_lock(inode);
1576 
1577 	if (mode & FALLOC_FL_PUNCH_HOLE) {
1578 		if (offset >= inode->i_size)
1579 			goto out;
1580 
1581 		ret = punch_hole(inode, offset, len);
1582 	} else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
1583 		ret = f2fs_collapse_range(inode, offset, len);
1584 	} else if (mode & FALLOC_FL_ZERO_RANGE) {
1585 		ret = f2fs_zero_range(inode, offset, len, mode);
1586 	} else if (mode & FALLOC_FL_INSERT_RANGE) {
1587 		ret = f2fs_insert_range(inode, offset, len);
1588 	} else {
1589 		ret = expand_inode_data(inode, offset, len, mode);
1590 	}
1591 
1592 	if (!ret) {
1593 		inode->i_mtime = inode->i_ctime = current_time(inode);
1594 		f2fs_mark_inode_dirty_sync(inode, false);
1595 		f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1596 	}
1597 
1598 out:
1599 	inode_unlock(inode);
1600 
1601 	trace_f2fs_fallocate(inode, mode, offset, len, ret);
1602 	return ret;
1603 }
1604 
1605 static int f2fs_release_file(struct inode *inode, struct file *filp)
1606 {
1607 	/*
1608 	 * f2fs_relase_file is called at every close calls. So we should
1609 	 * not drop any inmemory pages by close called by other process.
1610 	 */
1611 	if (!(filp->f_mode & FMODE_WRITE) ||
1612 			atomic_read(&inode->i_writecount) != 1)
1613 		return 0;
1614 
1615 	/* some remained atomic pages should discarded */
1616 	if (f2fs_is_atomic_file(inode))
1617 		f2fs_drop_inmem_pages(inode);
1618 	if (f2fs_is_volatile_file(inode)) {
1619 		set_inode_flag(inode, FI_DROP_CACHE);
1620 		filemap_fdatawrite(inode->i_mapping);
1621 		clear_inode_flag(inode, FI_DROP_CACHE);
1622 		clear_inode_flag(inode, FI_VOLATILE_FILE);
1623 		stat_dec_volatile_write(inode);
1624 	}
1625 	return 0;
1626 }
1627 
1628 static int f2fs_file_flush(struct file *file, fl_owner_t id)
1629 {
1630 	struct inode *inode = file_inode(file);
1631 
1632 	/*
1633 	 * If the process doing a transaction is crashed, we should do
1634 	 * roll-back. Otherwise, other reader/write can see corrupted database
1635 	 * until all the writers close its file. Since this should be done
1636 	 * before dropping file lock, it needs to do in ->flush.
1637 	 */
1638 	if (f2fs_is_atomic_file(inode) &&
1639 			F2FS_I(inode)->inmem_task == current)
1640 		f2fs_drop_inmem_pages(inode);
1641 	return 0;
1642 }
1643 
1644 static int f2fs_ioc_getflags(struct file *filp, unsigned long arg)
1645 {
1646 	struct inode *inode = file_inode(filp);
1647 	struct f2fs_inode_info *fi = F2FS_I(inode);
1648 	unsigned int flags = fi->i_flags;
1649 
1650 	if (f2fs_encrypted_inode(inode))
1651 		flags |= F2FS_ENCRYPT_FL;
1652 	if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode))
1653 		flags |= F2FS_INLINE_DATA_FL;
1654 
1655 	flags &= F2FS_FL_USER_VISIBLE;
1656 
1657 	return put_user(flags, (int __user *)arg);
1658 }
1659 
1660 static int __f2fs_ioc_setflags(struct inode *inode, unsigned int flags)
1661 {
1662 	struct f2fs_inode_info *fi = F2FS_I(inode);
1663 	unsigned int oldflags;
1664 
1665 	/* Is it quota file? Do not allow user to mess with it */
1666 	if (IS_NOQUOTA(inode))
1667 		return -EPERM;
1668 
1669 	flags = f2fs_mask_flags(inode->i_mode, flags);
1670 
1671 	oldflags = fi->i_flags;
1672 
1673 	if ((flags ^ oldflags) & (F2FS_APPEND_FL | F2FS_IMMUTABLE_FL))
1674 		if (!capable(CAP_LINUX_IMMUTABLE))
1675 			return -EPERM;
1676 
1677 	flags = flags & F2FS_FL_USER_MODIFIABLE;
1678 	flags |= oldflags & ~F2FS_FL_USER_MODIFIABLE;
1679 	fi->i_flags = flags;
1680 
1681 	if (fi->i_flags & F2FS_PROJINHERIT_FL)
1682 		set_inode_flag(inode, FI_PROJ_INHERIT);
1683 	else
1684 		clear_inode_flag(inode, FI_PROJ_INHERIT);
1685 
1686 	inode->i_ctime = current_time(inode);
1687 	f2fs_set_inode_flags(inode);
1688 	f2fs_mark_inode_dirty_sync(inode, true);
1689 	return 0;
1690 }
1691 
1692 static int f2fs_ioc_setflags(struct file *filp, unsigned long arg)
1693 {
1694 	struct inode *inode = file_inode(filp);
1695 	unsigned int flags;
1696 	int ret;
1697 
1698 	if (!inode_owner_or_capable(inode))
1699 		return -EACCES;
1700 
1701 	if (get_user(flags, (int __user *)arg))
1702 		return -EFAULT;
1703 
1704 	ret = mnt_want_write_file(filp);
1705 	if (ret)
1706 		return ret;
1707 
1708 	inode_lock(inode);
1709 
1710 	ret = __f2fs_ioc_setflags(inode, flags);
1711 
1712 	inode_unlock(inode);
1713 	mnt_drop_write_file(filp);
1714 	return ret;
1715 }
1716 
1717 static int f2fs_ioc_getversion(struct file *filp, unsigned long arg)
1718 {
1719 	struct inode *inode = file_inode(filp);
1720 
1721 	return put_user(inode->i_generation, (int __user *)arg);
1722 }
1723 
1724 static int f2fs_ioc_start_atomic_write(struct file *filp)
1725 {
1726 	struct inode *inode = file_inode(filp);
1727 	int ret;
1728 
1729 	if (!inode_owner_or_capable(inode))
1730 		return -EACCES;
1731 
1732 	if (!S_ISREG(inode->i_mode))
1733 		return -EINVAL;
1734 
1735 	ret = mnt_want_write_file(filp);
1736 	if (ret)
1737 		return ret;
1738 
1739 	inode_lock(inode);
1740 
1741 	if (f2fs_is_atomic_file(inode)) {
1742 		if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST))
1743 			ret = -EINVAL;
1744 		goto out;
1745 	}
1746 
1747 	ret = f2fs_convert_inline_inode(inode);
1748 	if (ret)
1749 		goto out;
1750 
1751 	down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1752 
1753 	if (!get_dirty_pages(inode))
1754 		goto skip_flush;
1755 
1756 	f2fs_msg(F2FS_I_SB(inode)->sb, KERN_WARNING,
1757 		"Unexpected flush for atomic writes: ino=%lu, npages=%u",
1758 					inode->i_ino, get_dirty_pages(inode));
1759 	ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
1760 	if (ret) {
1761 		up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1762 		goto out;
1763 	}
1764 skip_flush:
1765 	set_inode_flag(inode, FI_ATOMIC_FILE);
1766 	clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
1767 	up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1768 
1769 	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1770 	F2FS_I(inode)->inmem_task = current;
1771 	stat_inc_atomic_write(inode);
1772 	stat_update_max_atomic_write(inode);
1773 out:
1774 	inode_unlock(inode);
1775 	mnt_drop_write_file(filp);
1776 	return ret;
1777 }
1778 
1779 static int f2fs_ioc_commit_atomic_write(struct file *filp)
1780 {
1781 	struct inode *inode = file_inode(filp);
1782 	int ret;
1783 
1784 	if (!inode_owner_or_capable(inode))
1785 		return -EACCES;
1786 
1787 	ret = mnt_want_write_file(filp);
1788 	if (ret)
1789 		return ret;
1790 
1791 	f2fs_balance_fs(F2FS_I_SB(inode), true);
1792 
1793 	inode_lock(inode);
1794 
1795 	if (f2fs_is_volatile_file(inode)) {
1796 		ret = -EINVAL;
1797 		goto err_out;
1798 	}
1799 
1800 	if (f2fs_is_atomic_file(inode)) {
1801 		ret = f2fs_commit_inmem_pages(inode);
1802 		if (ret)
1803 			goto err_out;
1804 
1805 		ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
1806 		if (!ret) {
1807 			clear_inode_flag(inode, FI_ATOMIC_FILE);
1808 			F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC] = 0;
1809 			stat_dec_atomic_write(inode);
1810 		}
1811 	} else {
1812 		ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 1, false);
1813 	}
1814 err_out:
1815 	if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) {
1816 		clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
1817 		ret = -EINVAL;
1818 	}
1819 	inode_unlock(inode);
1820 	mnt_drop_write_file(filp);
1821 	return ret;
1822 }
1823 
1824 static int f2fs_ioc_start_volatile_write(struct file *filp)
1825 {
1826 	struct inode *inode = file_inode(filp);
1827 	int ret;
1828 
1829 	if (!inode_owner_or_capable(inode))
1830 		return -EACCES;
1831 
1832 	if (!S_ISREG(inode->i_mode))
1833 		return -EINVAL;
1834 
1835 	ret = mnt_want_write_file(filp);
1836 	if (ret)
1837 		return ret;
1838 
1839 	inode_lock(inode);
1840 
1841 	if (f2fs_is_volatile_file(inode))
1842 		goto out;
1843 
1844 	ret = f2fs_convert_inline_inode(inode);
1845 	if (ret)
1846 		goto out;
1847 
1848 	stat_inc_volatile_write(inode);
1849 	stat_update_max_volatile_write(inode);
1850 
1851 	set_inode_flag(inode, FI_VOLATILE_FILE);
1852 	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1853 out:
1854 	inode_unlock(inode);
1855 	mnt_drop_write_file(filp);
1856 	return ret;
1857 }
1858 
1859 static int f2fs_ioc_release_volatile_write(struct file *filp)
1860 {
1861 	struct inode *inode = file_inode(filp);
1862 	int ret;
1863 
1864 	if (!inode_owner_or_capable(inode))
1865 		return -EACCES;
1866 
1867 	ret = mnt_want_write_file(filp);
1868 	if (ret)
1869 		return ret;
1870 
1871 	inode_lock(inode);
1872 
1873 	if (!f2fs_is_volatile_file(inode))
1874 		goto out;
1875 
1876 	if (!f2fs_is_first_block_written(inode)) {
1877 		ret = truncate_partial_data_page(inode, 0, true);
1878 		goto out;
1879 	}
1880 
1881 	ret = punch_hole(inode, 0, F2FS_BLKSIZE);
1882 out:
1883 	inode_unlock(inode);
1884 	mnt_drop_write_file(filp);
1885 	return ret;
1886 }
1887 
1888 static int f2fs_ioc_abort_volatile_write(struct file *filp)
1889 {
1890 	struct inode *inode = file_inode(filp);
1891 	int ret;
1892 
1893 	if (!inode_owner_or_capable(inode))
1894 		return -EACCES;
1895 
1896 	ret = mnt_want_write_file(filp);
1897 	if (ret)
1898 		return ret;
1899 
1900 	inode_lock(inode);
1901 
1902 	if (f2fs_is_atomic_file(inode))
1903 		f2fs_drop_inmem_pages(inode);
1904 	if (f2fs_is_volatile_file(inode)) {
1905 		clear_inode_flag(inode, FI_VOLATILE_FILE);
1906 		stat_dec_volatile_write(inode);
1907 		ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
1908 	}
1909 
1910 	clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
1911 
1912 	inode_unlock(inode);
1913 
1914 	mnt_drop_write_file(filp);
1915 	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1916 	return ret;
1917 }
1918 
1919 static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
1920 {
1921 	struct inode *inode = file_inode(filp);
1922 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1923 	struct super_block *sb = sbi->sb;
1924 	__u32 in;
1925 	int ret = 0;
1926 
1927 	if (!capable(CAP_SYS_ADMIN))
1928 		return -EPERM;
1929 
1930 	if (get_user(in, (__u32 __user *)arg))
1931 		return -EFAULT;
1932 
1933 	if (in != F2FS_GOING_DOWN_FULLSYNC) {
1934 		ret = mnt_want_write_file(filp);
1935 		if (ret)
1936 			return ret;
1937 	}
1938 
1939 	switch (in) {
1940 	case F2FS_GOING_DOWN_FULLSYNC:
1941 		sb = freeze_bdev(sb->s_bdev);
1942 		if (IS_ERR(sb)) {
1943 			ret = PTR_ERR(sb);
1944 			goto out;
1945 		}
1946 		if (sb) {
1947 			f2fs_stop_checkpoint(sbi, false);
1948 			set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
1949 			thaw_bdev(sb->s_bdev, sb);
1950 		}
1951 		break;
1952 	case F2FS_GOING_DOWN_METASYNC:
1953 		/* do checkpoint only */
1954 		ret = f2fs_sync_fs(sb, 1);
1955 		if (ret)
1956 			goto out;
1957 		f2fs_stop_checkpoint(sbi, false);
1958 		set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
1959 		break;
1960 	case F2FS_GOING_DOWN_NOSYNC:
1961 		f2fs_stop_checkpoint(sbi, false);
1962 		set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
1963 		break;
1964 	case F2FS_GOING_DOWN_METAFLUSH:
1965 		f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO);
1966 		f2fs_stop_checkpoint(sbi, false);
1967 		set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
1968 		break;
1969 	case F2FS_GOING_DOWN_NEED_FSCK:
1970 		set_sbi_flag(sbi, SBI_NEED_FSCK);
1971 		/* do checkpoint only */
1972 		ret = f2fs_sync_fs(sb, 1);
1973 		if (ret)
1974 			goto out;
1975 		break;
1976 	default:
1977 		ret = -EINVAL;
1978 		goto out;
1979 	}
1980 
1981 	f2fs_stop_gc_thread(sbi);
1982 	f2fs_stop_discard_thread(sbi);
1983 
1984 	f2fs_drop_discard_cmd(sbi);
1985 	clear_opt(sbi, DISCARD);
1986 
1987 	f2fs_update_time(sbi, REQ_TIME);
1988 out:
1989 	if (in != F2FS_GOING_DOWN_FULLSYNC)
1990 		mnt_drop_write_file(filp);
1991 	return ret;
1992 }
1993 
1994 static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg)
1995 {
1996 	struct inode *inode = file_inode(filp);
1997 	struct super_block *sb = inode->i_sb;
1998 	struct request_queue *q = bdev_get_queue(sb->s_bdev);
1999 	struct fstrim_range range;
2000 	int ret;
2001 
2002 	if (!capable(CAP_SYS_ADMIN))
2003 		return -EPERM;
2004 
2005 	if (!f2fs_hw_support_discard(F2FS_SB(sb)))
2006 		return -EOPNOTSUPP;
2007 
2008 	if (copy_from_user(&range, (struct fstrim_range __user *)arg,
2009 				sizeof(range)))
2010 		return -EFAULT;
2011 
2012 	ret = mnt_want_write_file(filp);
2013 	if (ret)
2014 		return ret;
2015 
2016 	range.minlen = max((unsigned int)range.minlen,
2017 				q->limits.discard_granularity);
2018 	ret = f2fs_trim_fs(F2FS_SB(sb), &range);
2019 	mnt_drop_write_file(filp);
2020 	if (ret < 0)
2021 		return ret;
2022 
2023 	if (copy_to_user((struct fstrim_range __user *)arg, &range,
2024 				sizeof(range)))
2025 		return -EFAULT;
2026 	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2027 	return 0;
2028 }
2029 
2030 static bool uuid_is_nonzero(__u8 u[16])
2031 {
2032 	int i;
2033 
2034 	for (i = 0; i < 16; i++)
2035 		if (u[i])
2036 			return true;
2037 	return false;
2038 }
2039 
2040 static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg)
2041 {
2042 	struct inode *inode = file_inode(filp);
2043 
2044 	if (!f2fs_sb_has_encrypt(F2FS_I_SB(inode)))
2045 		return -EOPNOTSUPP;
2046 
2047 	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2048 
2049 	return fscrypt_ioctl_set_policy(filp, (const void __user *)arg);
2050 }
2051 
2052 static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg)
2053 {
2054 	if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2055 		return -EOPNOTSUPP;
2056 	return fscrypt_ioctl_get_policy(filp, (void __user *)arg);
2057 }
2058 
2059 static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg)
2060 {
2061 	struct inode *inode = file_inode(filp);
2062 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2063 	int err;
2064 
2065 	if (!f2fs_sb_has_encrypt(sbi))
2066 		return -EOPNOTSUPP;
2067 
2068 	err = mnt_want_write_file(filp);
2069 	if (err)
2070 		return err;
2071 
2072 	down_write(&sbi->sb_lock);
2073 
2074 	if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt))
2075 		goto got_it;
2076 
2077 	/* update superblock with uuid */
2078 	generate_random_uuid(sbi->raw_super->encrypt_pw_salt);
2079 
2080 	err = f2fs_commit_super(sbi, false);
2081 	if (err) {
2082 		/* undo new data */
2083 		memset(sbi->raw_super->encrypt_pw_salt, 0, 16);
2084 		goto out_err;
2085 	}
2086 got_it:
2087 	if (copy_to_user((__u8 __user *)arg, sbi->raw_super->encrypt_pw_salt,
2088 									16))
2089 		err = -EFAULT;
2090 out_err:
2091 	up_write(&sbi->sb_lock);
2092 	mnt_drop_write_file(filp);
2093 	return err;
2094 }
2095 
2096 static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
2097 {
2098 	struct inode *inode = file_inode(filp);
2099 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2100 	__u32 sync;
2101 	int ret;
2102 
2103 	if (!capable(CAP_SYS_ADMIN))
2104 		return -EPERM;
2105 
2106 	if (get_user(sync, (__u32 __user *)arg))
2107 		return -EFAULT;
2108 
2109 	if (f2fs_readonly(sbi->sb))
2110 		return -EROFS;
2111 
2112 	ret = mnt_want_write_file(filp);
2113 	if (ret)
2114 		return ret;
2115 
2116 	if (!sync) {
2117 		if (!mutex_trylock(&sbi->gc_mutex)) {
2118 			ret = -EBUSY;
2119 			goto out;
2120 		}
2121 	} else {
2122 		mutex_lock(&sbi->gc_mutex);
2123 	}
2124 
2125 	ret = f2fs_gc(sbi, sync, true, NULL_SEGNO);
2126 out:
2127 	mnt_drop_write_file(filp);
2128 	return ret;
2129 }
2130 
2131 static int f2fs_ioc_gc_range(struct file *filp, unsigned long arg)
2132 {
2133 	struct inode *inode = file_inode(filp);
2134 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2135 	struct f2fs_gc_range range;
2136 	u64 end;
2137 	int ret;
2138 
2139 	if (!capable(CAP_SYS_ADMIN))
2140 		return -EPERM;
2141 
2142 	if (copy_from_user(&range, (struct f2fs_gc_range __user *)arg,
2143 							sizeof(range)))
2144 		return -EFAULT;
2145 
2146 	if (f2fs_readonly(sbi->sb))
2147 		return -EROFS;
2148 
2149 	end = range.start + range.len;
2150 	if (range.start < MAIN_BLKADDR(sbi) || end >= MAX_BLKADDR(sbi)) {
2151 		return -EINVAL;
2152 	}
2153 
2154 	ret = mnt_want_write_file(filp);
2155 	if (ret)
2156 		return ret;
2157 
2158 do_more:
2159 	if (!range.sync) {
2160 		if (!mutex_trylock(&sbi->gc_mutex)) {
2161 			ret = -EBUSY;
2162 			goto out;
2163 		}
2164 	} else {
2165 		mutex_lock(&sbi->gc_mutex);
2166 	}
2167 
2168 	ret = f2fs_gc(sbi, range.sync, true, GET_SEGNO(sbi, range.start));
2169 	range.start += BLKS_PER_SEC(sbi);
2170 	if (range.start <= end)
2171 		goto do_more;
2172 out:
2173 	mnt_drop_write_file(filp);
2174 	return ret;
2175 }
2176 
2177 static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg)
2178 {
2179 	struct inode *inode = file_inode(filp);
2180 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2181 	int ret;
2182 
2183 	if (!capable(CAP_SYS_ADMIN))
2184 		return -EPERM;
2185 
2186 	if (f2fs_readonly(sbi->sb))
2187 		return -EROFS;
2188 
2189 	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
2190 		f2fs_msg(sbi->sb, KERN_INFO,
2191 			"Skipping Checkpoint. Checkpoints currently disabled.");
2192 		return -EINVAL;
2193 	}
2194 
2195 	ret = mnt_want_write_file(filp);
2196 	if (ret)
2197 		return ret;
2198 
2199 	ret = f2fs_sync_fs(sbi->sb, 1);
2200 
2201 	mnt_drop_write_file(filp);
2202 	return ret;
2203 }
2204 
2205 static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
2206 					struct file *filp,
2207 					struct f2fs_defragment *range)
2208 {
2209 	struct inode *inode = file_inode(filp);
2210 	struct f2fs_map_blocks map = { .m_next_extent = NULL,
2211 					.m_seg_type = NO_CHECK_TYPE ,
2212 					.m_may_create = false };
2213 	struct extent_info ei = {0, 0, 0};
2214 	pgoff_t pg_start, pg_end, next_pgofs;
2215 	unsigned int blk_per_seg = sbi->blocks_per_seg;
2216 	unsigned int total = 0, sec_num;
2217 	block_t blk_end = 0;
2218 	bool fragmented = false;
2219 	int err;
2220 
2221 	/* if in-place-update policy is enabled, don't waste time here */
2222 	if (f2fs_should_update_inplace(inode, NULL))
2223 		return -EINVAL;
2224 
2225 	pg_start = range->start >> PAGE_SHIFT;
2226 	pg_end = (range->start + range->len) >> PAGE_SHIFT;
2227 
2228 	f2fs_balance_fs(sbi, true);
2229 
2230 	inode_lock(inode);
2231 
2232 	/* writeback all dirty pages in the range */
2233 	err = filemap_write_and_wait_range(inode->i_mapping, range->start,
2234 						range->start + range->len - 1);
2235 	if (err)
2236 		goto out;
2237 
2238 	/*
2239 	 * lookup mapping info in extent cache, skip defragmenting if physical
2240 	 * block addresses are continuous.
2241 	 */
2242 	if (f2fs_lookup_extent_cache(inode, pg_start, &ei)) {
2243 		if (ei.fofs + ei.len >= pg_end)
2244 			goto out;
2245 	}
2246 
2247 	map.m_lblk = pg_start;
2248 	map.m_next_pgofs = &next_pgofs;
2249 
2250 	/*
2251 	 * lookup mapping info in dnode page cache, skip defragmenting if all
2252 	 * physical block addresses are continuous even if there are hole(s)
2253 	 * in logical blocks.
2254 	 */
2255 	while (map.m_lblk < pg_end) {
2256 		map.m_len = pg_end - map.m_lblk;
2257 		err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
2258 		if (err)
2259 			goto out;
2260 
2261 		if (!(map.m_flags & F2FS_MAP_FLAGS)) {
2262 			map.m_lblk = next_pgofs;
2263 			continue;
2264 		}
2265 
2266 		if (blk_end && blk_end != map.m_pblk)
2267 			fragmented = true;
2268 
2269 		/* record total count of block that we're going to move */
2270 		total += map.m_len;
2271 
2272 		blk_end = map.m_pblk + map.m_len;
2273 
2274 		map.m_lblk += map.m_len;
2275 	}
2276 
2277 	if (!fragmented)
2278 		goto out;
2279 
2280 	sec_num = (total + BLKS_PER_SEC(sbi) - 1) / BLKS_PER_SEC(sbi);
2281 
2282 	/*
2283 	 * make sure there are enough free section for LFS allocation, this can
2284 	 * avoid defragment running in SSR mode when free section are allocated
2285 	 * intensively
2286 	 */
2287 	if (has_not_enough_free_secs(sbi, 0, sec_num)) {
2288 		err = -EAGAIN;
2289 		goto out;
2290 	}
2291 
2292 	map.m_lblk = pg_start;
2293 	map.m_len = pg_end - pg_start;
2294 	total = 0;
2295 
2296 	while (map.m_lblk < pg_end) {
2297 		pgoff_t idx;
2298 		int cnt = 0;
2299 
2300 do_map:
2301 		map.m_len = pg_end - map.m_lblk;
2302 		err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
2303 		if (err)
2304 			goto clear_out;
2305 
2306 		if (!(map.m_flags & F2FS_MAP_FLAGS)) {
2307 			map.m_lblk = next_pgofs;
2308 			continue;
2309 		}
2310 
2311 		set_inode_flag(inode, FI_DO_DEFRAG);
2312 
2313 		idx = map.m_lblk;
2314 		while (idx < map.m_lblk + map.m_len && cnt < blk_per_seg) {
2315 			struct page *page;
2316 
2317 			page = f2fs_get_lock_data_page(inode, idx, true);
2318 			if (IS_ERR(page)) {
2319 				err = PTR_ERR(page);
2320 				goto clear_out;
2321 			}
2322 
2323 			set_page_dirty(page);
2324 			f2fs_put_page(page, 1);
2325 
2326 			idx++;
2327 			cnt++;
2328 			total++;
2329 		}
2330 
2331 		map.m_lblk = idx;
2332 
2333 		if (idx < pg_end && cnt < blk_per_seg)
2334 			goto do_map;
2335 
2336 		clear_inode_flag(inode, FI_DO_DEFRAG);
2337 
2338 		err = filemap_fdatawrite(inode->i_mapping);
2339 		if (err)
2340 			goto out;
2341 	}
2342 clear_out:
2343 	clear_inode_flag(inode, FI_DO_DEFRAG);
2344 out:
2345 	inode_unlock(inode);
2346 	if (!err)
2347 		range->len = (u64)total << PAGE_SHIFT;
2348 	return err;
2349 }
2350 
2351 static int f2fs_ioc_defragment(struct file *filp, unsigned long arg)
2352 {
2353 	struct inode *inode = file_inode(filp);
2354 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2355 	struct f2fs_defragment range;
2356 	int err;
2357 
2358 	if (!capable(CAP_SYS_ADMIN))
2359 		return -EPERM;
2360 
2361 	if (!S_ISREG(inode->i_mode) || f2fs_is_atomic_file(inode))
2362 		return -EINVAL;
2363 
2364 	if (f2fs_readonly(sbi->sb))
2365 		return -EROFS;
2366 
2367 	if (copy_from_user(&range, (struct f2fs_defragment __user *)arg,
2368 							sizeof(range)))
2369 		return -EFAULT;
2370 
2371 	/* verify alignment of offset & size */
2372 	if (range.start & (F2FS_BLKSIZE - 1) || range.len & (F2FS_BLKSIZE - 1))
2373 		return -EINVAL;
2374 
2375 	if (unlikely((range.start + range.len) >> PAGE_SHIFT >
2376 					sbi->max_file_blocks))
2377 		return -EINVAL;
2378 
2379 	err = mnt_want_write_file(filp);
2380 	if (err)
2381 		return err;
2382 
2383 	err = f2fs_defragment_range(sbi, filp, &range);
2384 	mnt_drop_write_file(filp);
2385 
2386 	f2fs_update_time(sbi, REQ_TIME);
2387 	if (err < 0)
2388 		return err;
2389 
2390 	if (copy_to_user((struct f2fs_defragment __user *)arg, &range,
2391 							sizeof(range)))
2392 		return -EFAULT;
2393 
2394 	return 0;
2395 }
2396 
2397 static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
2398 			struct file *file_out, loff_t pos_out, size_t len)
2399 {
2400 	struct inode *src = file_inode(file_in);
2401 	struct inode *dst = file_inode(file_out);
2402 	struct f2fs_sb_info *sbi = F2FS_I_SB(src);
2403 	size_t olen = len, dst_max_i_size = 0;
2404 	size_t dst_osize;
2405 	int ret;
2406 
2407 	if (file_in->f_path.mnt != file_out->f_path.mnt ||
2408 				src->i_sb != dst->i_sb)
2409 		return -EXDEV;
2410 
2411 	if (unlikely(f2fs_readonly(src->i_sb)))
2412 		return -EROFS;
2413 
2414 	if (!S_ISREG(src->i_mode) || !S_ISREG(dst->i_mode))
2415 		return -EINVAL;
2416 
2417 	if (f2fs_encrypted_inode(src) || f2fs_encrypted_inode(dst))
2418 		return -EOPNOTSUPP;
2419 
2420 	if (src == dst) {
2421 		if (pos_in == pos_out)
2422 			return 0;
2423 		if (pos_out > pos_in && pos_out < pos_in + len)
2424 			return -EINVAL;
2425 	}
2426 
2427 	inode_lock(src);
2428 	if (src != dst) {
2429 		ret = -EBUSY;
2430 		if (!inode_trylock(dst))
2431 			goto out;
2432 	}
2433 
2434 	ret = -EINVAL;
2435 	if (pos_in + len > src->i_size || pos_in + len < pos_in)
2436 		goto out_unlock;
2437 	if (len == 0)
2438 		olen = len = src->i_size - pos_in;
2439 	if (pos_in + len == src->i_size)
2440 		len = ALIGN(src->i_size, F2FS_BLKSIZE) - pos_in;
2441 	if (len == 0) {
2442 		ret = 0;
2443 		goto out_unlock;
2444 	}
2445 
2446 	dst_osize = dst->i_size;
2447 	if (pos_out + olen > dst->i_size)
2448 		dst_max_i_size = pos_out + olen;
2449 
2450 	/* verify the end result is block aligned */
2451 	if (!IS_ALIGNED(pos_in, F2FS_BLKSIZE) ||
2452 			!IS_ALIGNED(pos_in + len, F2FS_BLKSIZE) ||
2453 			!IS_ALIGNED(pos_out, F2FS_BLKSIZE))
2454 		goto out_unlock;
2455 
2456 	ret = f2fs_convert_inline_inode(src);
2457 	if (ret)
2458 		goto out_unlock;
2459 
2460 	ret = f2fs_convert_inline_inode(dst);
2461 	if (ret)
2462 		goto out_unlock;
2463 
2464 	/* write out all dirty pages from offset */
2465 	ret = filemap_write_and_wait_range(src->i_mapping,
2466 					pos_in, pos_in + len);
2467 	if (ret)
2468 		goto out_unlock;
2469 
2470 	ret = filemap_write_and_wait_range(dst->i_mapping,
2471 					pos_out, pos_out + len);
2472 	if (ret)
2473 		goto out_unlock;
2474 
2475 	f2fs_balance_fs(sbi, true);
2476 
2477 	down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
2478 	if (src != dst) {
2479 		ret = -EBUSY;
2480 		if (!down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE]))
2481 			goto out_src;
2482 	}
2483 
2484 	f2fs_lock_op(sbi);
2485 	ret = __exchange_data_block(src, dst, pos_in >> F2FS_BLKSIZE_BITS,
2486 				pos_out >> F2FS_BLKSIZE_BITS,
2487 				len >> F2FS_BLKSIZE_BITS, false);
2488 
2489 	if (!ret) {
2490 		if (dst_max_i_size)
2491 			f2fs_i_size_write(dst, dst_max_i_size);
2492 		else if (dst_osize != dst->i_size)
2493 			f2fs_i_size_write(dst, dst_osize);
2494 	}
2495 	f2fs_unlock_op(sbi);
2496 
2497 	if (src != dst)
2498 		up_write(&F2FS_I(dst)->i_gc_rwsem[WRITE]);
2499 out_src:
2500 	up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
2501 out_unlock:
2502 	if (src != dst)
2503 		inode_unlock(dst);
2504 out:
2505 	inode_unlock(src);
2506 	return ret;
2507 }
2508 
2509 static int f2fs_ioc_move_range(struct file *filp, unsigned long arg)
2510 {
2511 	struct f2fs_move_range range;
2512 	struct fd dst;
2513 	int err;
2514 
2515 	if (!(filp->f_mode & FMODE_READ) ||
2516 			!(filp->f_mode & FMODE_WRITE))
2517 		return -EBADF;
2518 
2519 	if (copy_from_user(&range, (struct f2fs_move_range __user *)arg,
2520 							sizeof(range)))
2521 		return -EFAULT;
2522 
2523 	dst = fdget(range.dst_fd);
2524 	if (!dst.file)
2525 		return -EBADF;
2526 
2527 	if (!(dst.file->f_mode & FMODE_WRITE)) {
2528 		err = -EBADF;
2529 		goto err_out;
2530 	}
2531 
2532 	err = mnt_want_write_file(filp);
2533 	if (err)
2534 		goto err_out;
2535 
2536 	err = f2fs_move_file_range(filp, range.pos_in, dst.file,
2537 					range.pos_out, range.len);
2538 
2539 	mnt_drop_write_file(filp);
2540 	if (err)
2541 		goto err_out;
2542 
2543 	if (copy_to_user((struct f2fs_move_range __user *)arg,
2544 						&range, sizeof(range)))
2545 		err = -EFAULT;
2546 err_out:
2547 	fdput(dst);
2548 	return err;
2549 }
2550 
2551 static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
2552 {
2553 	struct inode *inode = file_inode(filp);
2554 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2555 	struct sit_info *sm = SIT_I(sbi);
2556 	unsigned int start_segno = 0, end_segno = 0;
2557 	unsigned int dev_start_segno = 0, dev_end_segno = 0;
2558 	struct f2fs_flush_device range;
2559 	int ret;
2560 
2561 	if (!capable(CAP_SYS_ADMIN))
2562 		return -EPERM;
2563 
2564 	if (f2fs_readonly(sbi->sb))
2565 		return -EROFS;
2566 
2567 	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
2568 		return -EINVAL;
2569 
2570 	if (copy_from_user(&range, (struct f2fs_flush_device __user *)arg,
2571 							sizeof(range)))
2572 		return -EFAULT;
2573 
2574 	if (sbi->s_ndevs <= 1 || sbi->s_ndevs - 1 <= range.dev_num ||
2575 			__is_large_section(sbi)) {
2576 		f2fs_msg(sbi->sb, KERN_WARNING,
2577 			"Can't flush %u in %d for segs_per_sec %u != 1\n",
2578 				range.dev_num, sbi->s_ndevs,
2579 				sbi->segs_per_sec);
2580 		return -EINVAL;
2581 	}
2582 
2583 	ret = mnt_want_write_file(filp);
2584 	if (ret)
2585 		return ret;
2586 
2587 	if (range.dev_num != 0)
2588 		dev_start_segno = GET_SEGNO(sbi, FDEV(range.dev_num).start_blk);
2589 	dev_end_segno = GET_SEGNO(sbi, FDEV(range.dev_num).end_blk);
2590 
2591 	start_segno = sm->last_victim[FLUSH_DEVICE];
2592 	if (start_segno < dev_start_segno || start_segno >= dev_end_segno)
2593 		start_segno = dev_start_segno;
2594 	end_segno = min(start_segno + range.segments, dev_end_segno);
2595 
2596 	while (start_segno < end_segno) {
2597 		if (!mutex_trylock(&sbi->gc_mutex)) {
2598 			ret = -EBUSY;
2599 			goto out;
2600 		}
2601 		sm->last_victim[GC_CB] = end_segno + 1;
2602 		sm->last_victim[GC_GREEDY] = end_segno + 1;
2603 		sm->last_victim[ALLOC_NEXT] = end_segno + 1;
2604 		ret = f2fs_gc(sbi, true, true, start_segno);
2605 		if (ret == -EAGAIN)
2606 			ret = 0;
2607 		else if (ret < 0)
2608 			break;
2609 		start_segno++;
2610 	}
2611 out:
2612 	mnt_drop_write_file(filp);
2613 	return ret;
2614 }
2615 
2616 static int f2fs_ioc_get_features(struct file *filp, unsigned long arg)
2617 {
2618 	struct inode *inode = file_inode(filp);
2619 	u32 sb_feature = le32_to_cpu(F2FS_I_SB(inode)->raw_super->feature);
2620 
2621 	/* Must validate to set it with SQLite behavior in Android. */
2622 	sb_feature |= F2FS_FEATURE_ATOMIC_WRITE;
2623 
2624 	return put_user(sb_feature, (u32 __user *)arg);
2625 }
2626 
2627 #ifdef CONFIG_QUOTA
2628 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
2629 {
2630 	struct dquot *transfer_to[MAXQUOTAS] = {};
2631 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2632 	struct super_block *sb = sbi->sb;
2633 	int err = 0;
2634 
2635 	transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid));
2636 	if (!IS_ERR(transfer_to[PRJQUOTA])) {
2637 		err = __dquot_transfer(inode, transfer_to);
2638 		if (err)
2639 			set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
2640 		dqput(transfer_to[PRJQUOTA]);
2641 	}
2642 	return err;
2643 }
2644 
2645 static int f2fs_ioc_setproject(struct file *filp, __u32 projid)
2646 {
2647 	struct inode *inode = file_inode(filp);
2648 	struct f2fs_inode_info *fi = F2FS_I(inode);
2649 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2650 	struct page *ipage;
2651 	kprojid_t kprojid;
2652 	int err;
2653 
2654 	if (!f2fs_sb_has_project_quota(sbi)) {
2655 		if (projid != F2FS_DEF_PROJID)
2656 			return -EOPNOTSUPP;
2657 		else
2658 			return 0;
2659 	}
2660 
2661 	if (!f2fs_has_extra_attr(inode))
2662 		return -EOPNOTSUPP;
2663 
2664 	kprojid = make_kprojid(&init_user_ns, (projid_t)projid);
2665 
2666 	if (projid_eq(kprojid, F2FS_I(inode)->i_projid))
2667 		return 0;
2668 
2669 	err = -EPERM;
2670 	/* Is it quota file? Do not allow user to mess with it */
2671 	if (IS_NOQUOTA(inode))
2672 		return err;
2673 
2674 	ipage = f2fs_get_node_page(sbi, inode->i_ino);
2675 	if (IS_ERR(ipage))
2676 		return PTR_ERR(ipage);
2677 
2678 	if (!F2FS_FITS_IN_INODE(F2FS_INODE(ipage), fi->i_extra_isize,
2679 								i_projid)) {
2680 		err = -EOVERFLOW;
2681 		f2fs_put_page(ipage, 1);
2682 		return err;
2683 	}
2684 	f2fs_put_page(ipage, 1);
2685 
2686 	err = dquot_initialize(inode);
2687 	if (err)
2688 		return err;
2689 
2690 	f2fs_lock_op(sbi);
2691 	err = f2fs_transfer_project_quota(inode, kprojid);
2692 	if (err)
2693 		goto out_unlock;
2694 
2695 	F2FS_I(inode)->i_projid = kprojid;
2696 	inode->i_ctime = current_time(inode);
2697 	f2fs_mark_inode_dirty_sync(inode, true);
2698 out_unlock:
2699 	f2fs_unlock_op(sbi);
2700 	return err;
2701 }
2702 #else
2703 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
2704 {
2705 	return 0;
2706 }
2707 
2708 static int f2fs_ioc_setproject(struct file *filp, __u32 projid)
2709 {
2710 	if (projid != F2FS_DEF_PROJID)
2711 		return -EOPNOTSUPP;
2712 	return 0;
2713 }
2714 #endif
2715 
2716 /* Transfer internal flags to xflags */
2717 static inline __u32 f2fs_iflags_to_xflags(unsigned long iflags)
2718 {
2719 	__u32 xflags = 0;
2720 
2721 	if (iflags & F2FS_SYNC_FL)
2722 		xflags |= FS_XFLAG_SYNC;
2723 	if (iflags & F2FS_IMMUTABLE_FL)
2724 		xflags |= FS_XFLAG_IMMUTABLE;
2725 	if (iflags & F2FS_APPEND_FL)
2726 		xflags |= FS_XFLAG_APPEND;
2727 	if (iflags & F2FS_NODUMP_FL)
2728 		xflags |= FS_XFLAG_NODUMP;
2729 	if (iflags & F2FS_NOATIME_FL)
2730 		xflags |= FS_XFLAG_NOATIME;
2731 	if (iflags & F2FS_PROJINHERIT_FL)
2732 		xflags |= FS_XFLAG_PROJINHERIT;
2733 	return xflags;
2734 }
2735 
2736 #define F2FS_SUPPORTED_FS_XFLAGS (FS_XFLAG_SYNC | FS_XFLAG_IMMUTABLE | \
2737 				  FS_XFLAG_APPEND | FS_XFLAG_NODUMP | \
2738 				  FS_XFLAG_NOATIME | FS_XFLAG_PROJINHERIT)
2739 
2740 /* Transfer xflags flags to internal */
2741 static inline unsigned long f2fs_xflags_to_iflags(__u32 xflags)
2742 {
2743 	unsigned long iflags = 0;
2744 
2745 	if (xflags & FS_XFLAG_SYNC)
2746 		iflags |= F2FS_SYNC_FL;
2747 	if (xflags & FS_XFLAG_IMMUTABLE)
2748 		iflags |= F2FS_IMMUTABLE_FL;
2749 	if (xflags & FS_XFLAG_APPEND)
2750 		iflags |= F2FS_APPEND_FL;
2751 	if (xflags & FS_XFLAG_NODUMP)
2752 		iflags |= F2FS_NODUMP_FL;
2753 	if (xflags & FS_XFLAG_NOATIME)
2754 		iflags |= F2FS_NOATIME_FL;
2755 	if (xflags & FS_XFLAG_PROJINHERIT)
2756 		iflags |= F2FS_PROJINHERIT_FL;
2757 
2758 	return iflags;
2759 }
2760 
2761 static int f2fs_ioc_fsgetxattr(struct file *filp, unsigned long arg)
2762 {
2763 	struct inode *inode = file_inode(filp);
2764 	struct f2fs_inode_info *fi = F2FS_I(inode);
2765 	struct fsxattr fa;
2766 
2767 	memset(&fa, 0, sizeof(struct fsxattr));
2768 	fa.fsx_xflags = f2fs_iflags_to_xflags(fi->i_flags &
2769 				F2FS_FL_USER_VISIBLE);
2770 
2771 	if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)))
2772 		fa.fsx_projid = (__u32)from_kprojid(&init_user_ns,
2773 							fi->i_projid);
2774 
2775 	if (copy_to_user((struct fsxattr __user *)arg, &fa, sizeof(fa)))
2776 		return -EFAULT;
2777 	return 0;
2778 }
2779 
2780 static int f2fs_ioctl_check_project(struct inode *inode, struct fsxattr *fa)
2781 {
2782 	/*
2783 	 * Project Quota ID state is only allowed to change from within the init
2784 	 * namespace. Enforce that restriction only if we are trying to change
2785 	 * the quota ID state. Everything else is allowed in user namespaces.
2786 	 */
2787 	if (current_user_ns() == &init_user_ns)
2788 		return 0;
2789 
2790 	if (__kprojid_val(F2FS_I(inode)->i_projid) != fa->fsx_projid)
2791 		return -EINVAL;
2792 
2793 	if (F2FS_I(inode)->i_flags & F2FS_PROJINHERIT_FL) {
2794 		if (!(fa->fsx_xflags & FS_XFLAG_PROJINHERIT))
2795 			return -EINVAL;
2796 	} else {
2797 		if (fa->fsx_xflags & FS_XFLAG_PROJINHERIT)
2798 			return -EINVAL;
2799 	}
2800 
2801 	return 0;
2802 }
2803 
2804 static int f2fs_ioc_fssetxattr(struct file *filp, unsigned long arg)
2805 {
2806 	struct inode *inode = file_inode(filp);
2807 	struct f2fs_inode_info *fi = F2FS_I(inode);
2808 	struct fsxattr fa;
2809 	unsigned int flags;
2810 	int err;
2811 
2812 	if (copy_from_user(&fa, (struct fsxattr __user *)arg, sizeof(fa)))
2813 		return -EFAULT;
2814 
2815 	/* Make sure caller has proper permission */
2816 	if (!inode_owner_or_capable(inode))
2817 		return -EACCES;
2818 
2819 	if (fa.fsx_xflags & ~F2FS_SUPPORTED_FS_XFLAGS)
2820 		return -EOPNOTSUPP;
2821 
2822 	flags = f2fs_xflags_to_iflags(fa.fsx_xflags);
2823 	if (f2fs_mask_flags(inode->i_mode, flags) != flags)
2824 		return -EOPNOTSUPP;
2825 
2826 	err = mnt_want_write_file(filp);
2827 	if (err)
2828 		return err;
2829 
2830 	inode_lock(inode);
2831 	err = f2fs_ioctl_check_project(inode, &fa);
2832 	if (err)
2833 		goto out;
2834 	flags = (fi->i_flags & ~F2FS_FL_XFLAG_VISIBLE) |
2835 				(flags & F2FS_FL_XFLAG_VISIBLE);
2836 	err = __f2fs_ioc_setflags(inode, flags);
2837 	if (err)
2838 		goto out;
2839 
2840 	err = f2fs_ioc_setproject(filp, fa.fsx_projid);
2841 out:
2842 	inode_unlock(inode);
2843 	mnt_drop_write_file(filp);
2844 	return err;
2845 }
2846 
2847 int f2fs_pin_file_control(struct inode *inode, bool inc)
2848 {
2849 	struct f2fs_inode_info *fi = F2FS_I(inode);
2850 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2851 
2852 	/* Use i_gc_failures for normal file as a risk signal. */
2853 	if (inc)
2854 		f2fs_i_gc_failures_write(inode,
2855 				fi->i_gc_failures[GC_FAILURE_PIN] + 1);
2856 
2857 	if (fi->i_gc_failures[GC_FAILURE_PIN] > sbi->gc_pin_file_threshold) {
2858 		f2fs_msg(sbi->sb, KERN_WARNING,
2859 			"%s: Enable GC = ino %lx after %x GC trials\n",
2860 			__func__, inode->i_ino,
2861 			fi->i_gc_failures[GC_FAILURE_PIN]);
2862 		clear_inode_flag(inode, FI_PIN_FILE);
2863 		return -EAGAIN;
2864 	}
2865 	return 0;
2866 }
2867 
2868 static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg)
2869 {
2870 	struct inode *inode = file_inode(filp);
2871 	__u32 pin;
2872 	int ret = 0;
2873 
2874 	if (!inode_owner_or_capable(inode))
2875 		return -EACCES;
2876 
2877 	if (get_user(pin, (__u32 __user *)arg))
2878 		return -EFAULT;
2879 
2880 	if (!S_ISREG(inode->i_mode))
2881 		return -EINVAL;
2882 
2883 	if (f2fs_readonly(F2FS_I_SB(inode)->sb))
2884 		return -EROFS;
2885 
2886 	ret = mnt_want_write_file(filp);
2887 	if (ret)
2888 		return ret;
2889 
2890 	inode_lock(inode);
2891 
2892 	if (f2fs_should_update_outplace(inode, NULL)) {
2893 		ret = -EINVAL;
2894 		goto out;
2895 	}
2896 
2897 	if (!pin) {
2898 		clear_inode_flag(inode, FI_PIN_FILE);
2899 		f2fs_i_gc_failures_write(inode, 0);
2900 		goto done;
2901 	}
2902 
2903 	if (f2fs_pin_file_control(inode, false)) {
2904 		ret = -EAGAIN;
2905 		goto out;
2906 	}
2907 	ret = f2fs_convert_inline_inode(inode);
2908 	if (ret)
2909 		goto out;
2910 
2911 	set_inode_flag(inode, FI_PIN_FILE);
2912 	ret = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
2913 done:
2914 	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2915 out:
2916 	inode_unlock(inode);
2917 	mnt_drop_write_file(filp);
2918 	return ret;
2919 }
2920 
2921 static int f2fs_ioc_get_pin_file(struct file *filp, unsigned long arg)
2922 {
2923 	struct inode *inode = file_inode(filp);
2924 	__u32 pin = 0;
2925 
2926 	if (is_inode_flag_set(inode, FI_PIN_FILE))
2927 		pin = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
2928 	return put_user(pin, (u32 __user *)arg);
2929 }
2930 
2931 int f2fs_precache_extents(struct inode *inode)
2932 {
2933 	struct f2fs_inode_info *fi = F2FS_I(inode);
2934 	struct f2fs_map_blocks map;
2935 	pgoff_t m_next_extent;
2936 	loff_t end;
2937 	int err;
2938 
2939 	if (is_inode_flag_set(inode, FI_NO_EXTENT))
2940 		return -EOPNOTSUPP;
2941 
2942 	map.m_lblk = 0;
2943 	map.m_next_pgofs = NULL;
2944 	map.m_next_extent = &m_next_extent;
2945 	map.m_seg_type = NO_CHECK_TYPE;
2946 	map.m_may_create = false;
2947 	end = F2FS_I_SB(inode)->max_file_blocks;
2948 
2949 	while (map.m_lblk < end) {
2950 		map.m_len = end - map.m_lblk;
2951 
2952 		down_write(&fi->i_gc_rwsem[WRITE]);
2953 		err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_PRECACHE);
2954 		up_write(&fi->i_gc_rwsem[WRITE]);
2955 		if (err)
2956 			return err;
2957 
2958 		map.m_lblk = m_next_extent;
2959 	}
2960 
2961 	return err;
2962 }
2963 
2964 static int f2fs_ioc_precache_extents(struct file *filp, unsigned long arg)
2965 {
2966 	return f2fs_precache_extents(file_inode(filp));
2967 }
2968 
2969 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
2970 {
2971 	if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp)))))
2972 		return -EIO;
2973 
2974 	switch (cmd) {
2975 	case F2FS_IOC_GETFLAGS:
2976 		return f2fs_ioc_getflags(filp, arg);
2977 	case F2FS_IOC_SETFLAGS:
2978 		return f2fs_ioc_setflags(filp, arg);
2979 	case F2FS_IOC_GETVERSION:
2980 		return f2fs_ioc_getversion(filp, arg);
2981 	case F2FS_IOC_START_ATOMIC_WRITE:
2982 		return f2fs_ioc_start_atomic_write(filp);
2983 	case F2FS_IOC_COMMIT_ATOMIC_WRITE:
2984 		return f2fs_ioc_commit_atomic_write(filp);
2985 	case F2FS_IOC_START_VOLATILE_WRITE:
2986 		return f2fs_ioc_start_volatile_write(filp);
2987 	case F2FS_IOC_RELEASE_VOLATILE_WRITE:
2988 		return f2fs_ioc_release_volatile_write(filp);
2989 	case F2FS_IOC_ABORT_VOLATILE_WRITE:
2990 		return f2fs_ioc_abort_volatile_write(filp);
2991 	case F2FS_IOC_SHUTDOWN:
2992 		return f2fs_ioc_shutdown(filp, arg);
2993 	case FITRIM:
2994 		return f2fs_ioc_fitrim(filp, arg);
2995 	case F2FS_IOC_SET_ENCRYPTION_POLICY:
2996 		return f2fs_ioc_set_encryption_policy(filp, arg);
2997 	case F2FS_IOC_GET_ENCRYPTION_POLICY:
2998 		return f2fs_ioc_get_encryption_policy(filp, arg);
2999 	case F2FS_IOC_GET_ENCRYPTION_PWSALT:
3000 		return f2fs_ioc_get_encryption_pwsalt(filp, arg);
3001 	case F2FS_IOC_GARBAGE_COLLECT:
3002 		return f2fs_ioc_gc(filp, arg);
3003 	case F2FS_IOC_GARBAGE_COLLECT_RANGE:
3004 		return f2fs_ioc_gc_range(filp, arg);
3005 	case F2FS_IOC_WRITE_CHECKPOINT:
3006 		return f2fs_ioc_write_checkpoint(filp, arg);
3007 	case F2FS_IOC_DEFRAGMENT:
3008 		return f2fs_ioc_defragment(filp, arg);
3009 	case F2FS_IOC_MOVE_RANGE:
3010 		return f2fs_ioc_move_range(filp, arg);
3011 	case F2FS_IOC_FLUSH_DEVICE:
3012 		return f2fs_ioc_flush_device(filp, arg);
3013 	case F2FS_IOC_GET_FEATURES:
3014 		return f2fs_ioc_get_features(filp, arg);
3015 	case F2FS_IOC_FSGETXATTR:
3016 		return f2fs_ioc_fsgetxattr(filp, arg);
3017 	case F2FS_IOC_FSSETXATTR:
3018 		return f2fs_ioc_fssetxattr(filp, arg);
3019 	case F2FS_IOC_GET_PIN_FILE:
3020 		return f2fs_ioc_get_pin_file(filp, arg);
3021 	case F2FS_IOC_SET_PIN_FILE:
3022 		return f2fs_ioc_set_pin_file(filp, arg);
3023 	case F2FS_IOC_PRECACHE_EXTENTS:
3024 		return f2fs_ioc_precache_extents(filp, arg);
3025 	default:
3026 		return -ENOTTY;
3027 	}
3028 }
3029 
3030 static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
3031 {
3032 	struct file *file = iocb->ki_filp;
3033 	struct inode *inode = file_inode(file);
3034 	ssize_t ret;
3035 
3036 	if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
3037 		return -EIO;
3038 
3039 	if ((iocb->ki_flags & IOCB_NOWAIT) && !(iocb->ki_flags & IOCB_DIRECT))
3040 		return -EINVAL;
3041 
3042 	if (!inode_trylock(inode)) {
3043 		if (iocb->ki_flags & IOCB_NOWAIT)
3044 			return -EAGAIN;
3045 		inode_lock(inode);
3046 	}
3047 
3048 	ret = generic_write_checks(iocb, from);
3049 	if (ret > 0) {
3050 		bool preallocated = false;
3051 		size_t target_size = 0;
3052 		int err;
3053 
3054 		if (iov_iter_fault_in_readable(from, iov_iter_count(from)))
3055 			set_inode_flag(inode, FI_NO_PREALLOC);
3056 
3057 		if ((iocb->ki_flags & IOCB_NOWAIT) &&
3058 			(iocb->ki_flags & IOCB_DIRECT)) {
3059 				if (!f2fs_overwrite_io(inode, iocb->ki_pos,
3060 						iov_iter_count(from)) ||
3061 					f2fs_has_inline_data(inode) ||
3062 					f2fs_force_buffered_io(inode,
3063 							iocb, from)) {
3064 						clear_inode_flag(inode,
3065 								FI_NO_PREALLOC);
3066 						inode_unlock(inode);
3067 						return -EAGAIN;
3068 				}
3069 
3070 		} else {
3071 			preallocated = true;
3072 			target_size = iocb->ki_pos + iov_iter_count(from);
3073 
3074 			err = f2fs_preallocate_blocks(iocb, from);
3075 			if (err) {
3076 				clear_inode_flag(inode, FI_NO_PREALLOC);
3077 				inode_unlock(inode);
3078 				return err;
3079 			}
3080 		}
3081 		ret = __generic_file_write_iter(iocb, from);
3082 		clear_inode_flag(inode, FI_NO_PREALLOC);
3083 
3084 		/* if we couldn't write data, we should deallocate blocks. */
3085 		if (preallocated && i_size_read(inode) < target_size)
3086 			f2fs_truncate(inode);
3087 
3088 		if (ret > 0)
3089 			f2fs_update_iostat(F2FS_I_SB(inode), APP_WRITE_IO, ret);
3090 	}
3091 	inode_unlock(inode);
3092 
3093 	if (ret > 0)
3094 		ret = generic_write_sync(iocb, ret);
3095 	return ret;
3096 }
3097 
3098 #ifdef CONFIG_COMPAT
3099 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
3100 {
3101 	switch (cmd) {
3102 	case F2FS_IOC32_GETFLAGS:
3103 		cmd = F2FS_IOC_GETFLAGS;
3104 		break;
3105 	case F2FS_IOC32_SETFLAGS:
3106 		cmd = F2FS_IOC_SETFLAGS;
3107 		break;
3108 	case F2FS_IOC32_GETVERSION:
3109 		cmd = F2FS_IOC_GETVERSION;
3110 		break;
3111 	case F2FS_IOC_START_ATOMIC_WRITE:
3112 	case F2FS_IOC_COMMIT_ATOMIC_WRITE:
3113 	case F2FS_IOC_START_VOLATILE_WRITE:
3114 	case F2FS_IOC_RELEASE_VOLATILE_WRITE:
3115 	case F2FS_IOC_ABORT_VOLATILE_WRITE:
3116 	case F2FS_IOC_SHUTDOWN:
3117 	case F2FS_IOC_SET_ENCRYPTION_POLICY:
3118 	case F2FS_IOC_GET_ENCRYPTION_PWSALT:
3119 	case F2FS_IOC_GET_ENCRYPTION_POLICY:
3120 	case F2FS_IOC_GARBAGE_COLLECT:
3121 	case F2FS_IOC_GARBAGE_COLLECT_RANGE:
3122 	case F2FS_IOC_WRITE_CHECKPOINT:
3123 	case F2FS_IOC_DEFRAGMENT:
3124 	case F2FS_IOC_MOVE_RANGE:
3125 	case F2FS_IOC_FLUSH_DEVICE:
3126 	case F2FS_IOC_GET_FEATURES:
3127 	case F2FS_IOC_FSGETXATTR:
3128 	case F2FS_IOC_FSSETXATTR:
3129 	case F2FS_IOC_GET_PIN_FILE:
3130 	case F2FS_IOC_SET_PIN_FILE:
3131 	case F2FS_IOC_PRECACHE_EXTENTS:
3132 		break;
3133 	default:
3134 		return -ENOIOCTLCMD;
3135 	}
3136 	return f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
3137 }
3138 #endif
3139 
3140 const struct file_operations f2fs_file_operations = {
3141 	.llseek		= f2fs_llseek,
3142 	.read_iter	= generic_file_read_iter,
3143 	.write_iter	= f2fs_file_write_iter,
3144 	.open		= f2fs_file_open,
3145 	.release	= f2fs_release_file,
3146 	.mmap		= f2fs_file_mmap,
3147 	.flush		= f2fs_file_flush,
3148 	.fsync		= f2fs_sync_file,
3149 	.fallocate	= f2fs_fallocate,
3150 	.unlocked_ioctl	= f2fs_ioctl,
3151 #ifdef CONFIG_COMPAT
3152 	.compat_ioctl	= f2fs_compat_ioctl,
3153 #endif
3154 	.splice_read	= generic_file_splice_read,
3155 	.splice_write	= iter_file_splice_write,
3156 };
3157