xref: /openbmc/linux/fs/f2fs/file.c (revision e1a3e724)
1 /*
2  * fs/f2fs/file.c
3  *
4  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5  *             http://www.samsung.com/
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 #include <linux/fs.h>
12 #include <linux/f2fs_fs.h>
13 #include <linux/stat.h>
14 #include <linux/buffer_head.h>
15 #include <linux/writeback.h>
16 #include <linux/blkdev.h>
17 #include <linux/falloc.h>
18 #include <linux/types.h>
19 #include <linux/compat.h>
20 #include <linux/uaccess.h>
21 #include <linux/mount.h>
22 #include <linux/pagevec.h>
23 #include <linux/random.h>
24 
25 #include "f2fs.h"
26 #include "node.h"
27 #include "segment.h"
28 #include "xattr.h"
29 #include "acl.h"
30 #include "gc.h"
31 #include "trace.h"
32 #include <trace/events/f2fs.h>
33 
34 static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma,
35 						struct vm_fault *vmf)
36 {
37 	struct page *page = vmf->page;
38 	struct inode *inode = file_inode(vma->vm_file);
39 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
40 	struct dnode_of_data dn;
41 	int err;
42 
43 	f2fs_balance_fs(sbi);
44 
45 	sb_start_pagefault(inode->i_sb);
46 
47 	f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
48 
49 	/* block allocation */
50 	f2fs_lock_op(sbi);
51 	set_new_dnode(&dn, inode, NULL, NULL, 0);
52 	err = f2fs_reserve_block(&dn, page->index);
53 	if (err) {
54 		f2fs_unlock_op(sbi);
55 		goto out;
56 	}
57 	f2fs_put_dnode(&dn);
58 	f2fs_unlock_op(sbi);
59 
60 	file_update_time(vma->vm_file);
61 	lock_page(page);
62 	if (unlikely(page->mapping != inode->i_mapping ||
63 			page_offset(page) > i_size_read(inode) ||
64 			!PageUptodate(page))) {
65 		unlock_page(page);
66 		err = -EFAULT;
67 		goto out;
68 	}
69 
70 	/*
71 	 * check to see if the page is mapped already (no holes)
72 	 */
73 	if (PageMappedToDisk(page))
74 		goto mapped;
75 
76 	/* page is wholly or partially inside EOF */
77 	if (((page->index + 1) << PAGE_CACHE_SHIFT) > i_size_read(inode)) {
78 		unsigned offset;
79 		offset = i_size_read(inode) & ~PAGE_CACHE_MASK;
80 		zero_user_segment(page, offset, PAGE_CACHE_SIZE);
81 	}
82 	set_page_dirty(page);
83 	SetPageUptodate(page);
84 
85 	trace_f2fs_vm_page_mkwrite(page, DATA);
86 mapped:
87 	/* fill the page */
88 	f2fs_wait_on_page_writeback(page, DATA);
89 	/* if gced page is attached, don't write to cold segment */
90 	clear_cold_data(page);
91 out:
92 	sb_end_pagefault(inode->i_sb);
93 	return block_page_mkwrite_return(err);
94 }
95 
96 static const struct vm_operations_struct f2fs_file_vm_ops = {
97 	.fault		= filemap_fault,
98 	.map_pages	= filemap_map_pages,
99 	.page_mkwrite	= f2fs_vm_page_mkwrite,
100 };
101 
102 static int get_parent_ino(struct inode *inode, nid_t *pino)
103 {
104 	struct dentry *dentry;
105 
106 	inode = igrab(inode);
107 	dentry = d_find_any_alias(inode);
108 	iput(inode);
109 	if (!dentry)
110 		return 0;
111 
112 	if (update_dent_inode(inode, inode, &dentry->d_name)) {
113 		dput(dentry);
114 		return 0;
115 	}
116 
117 	*pino = parent_ino(dentry);
118 	dput(dentry);
119 	return 1;
120 }
121 
122 static inline bool need_do_checkpoint(struct inode *inode)
123 {
124 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
125 	bool need_cp = false;
126 
127 	if (!S_ISREG(inode->i_mode) || inode->i_nlink != 1)
128 		need_cp = true;
129 	else if (file_enc_name(inode) && need_dentry_mark(sbi, inode->i_ino))
130 		need_cp = true;
131 	else if (file_wrong_pino(inode))
132 		need_cp = true;
133 	else if (!space_for_roll_forward(sbi))
134 		need_cp = true;
135 	else if (!is_checkpointed_node(sbi, F2FS_I(inode)->i_pino))
136 		need_cp = true;
137 	else if (F2FS_I(inode)->xattr_ver == cur_cp_version(F2FS_CKPT(sbi)))
138 		need_cp = true;
139 	else if (test_opt(sbi, FASTBOOT))
140 		need_cp = true;
141 	else if (sbi->active_logs == 2)
142 		need_cp = true;
143 
144 	return need_cp;
145 }
146 
147 static bool need_inode_page_update(struct f2fs_sb_info *sbi, nid_t ino)
148 {
149 	struct page *i = find_get_page(NODE_MAPPING(sbi), ino);
150 	bool ret = false;
151 	/* But we need to avoid that there are some inode updates */
152 	if ((i && PageDirty(i)) || need_inode_block_update(sbi, ino))
153 		ret = true;
154 	f2fs_put_page(i, 0);
155 	return ret;
156 }
157 
158 static void try_to_fix_pino(struct inode *inode)
159 {
160 	struct f2fs_inode_info *fi = F2FS_I(inode);
161 	nid_t pino;
162 
163 	down_write(&fi->i_sem);
164 	fi->xattr_ver = 0;
165 	if (file_wrong_pino(inode) && inode->i_nlink == 1 &&
166 			get_parent_ino(inode, &pino)) {
167 		fi->i_pino = pino;
168 		file_got_pino(inode);
169 		up_write(&fi->i_sem);
170 
171 		mark_inode_dirty_sync(inode);
172 		f2fs_write_inode(inode, NULL);
173 	} else {
174 		up_write(&fi->i_sem);
175 	}
176 }
177 
178 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
179 {
180 	struct inode *inode = file->f_mapping->host;
181 	struct f2fs_inode_info *fi = F2FS_I(inode);
182 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
183 	nid_t ino = inode->i_ino;
184 	int ret = 0;
185 	bool need_cp = false;
186 	struct writeback_control wbc = {
187 		.sync_mode = WB_SYNC_ALL,
188 		.nr_to_write = LONG_MAX,
189 		.for_reclaim = 0,
190 	};
191 
192 	if (unlikely(f2fs_readonly(inode->i_sb)))
193 		return 0;
194 
195 	trace_f2fs_sync_file_enter(inode);
196 
197 	/* if fdatasync is triggered, let's do in-place-update */
198 	if (get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks)
199 		set_inode_flag(fi, FI_NEED_IPU);
200 	ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
201 	clear_inode_flag(fi, FI_NEED_IPU);
202 
203 	if (ret) {
204 		trace_f2fs_sync_file_exit(inode, need_cp, datasync, ret);
205 		return ret;
206 	}
207 
208 	/* if the inode is dirty, let's recover all the time */
209 	if (!datasync) {
210 		f2fs_write_inode(inode, NULL);
211 		goto go_write;
212 	}
213 
214 	/*
215 	 * if there is no written data, don't waste time to write recovery info.
216 	 */
217 	if (!is_inode_flag_set(fi, FI_APPEND_WRITE) &&
218 			!exist_written_data(sbi, ino, APPEND_INO)) {
219 
220 		/* it may call write_inode just prior to fsync */
221 		if (need_inode_page_update(sbi, ino))
222 			goto go_write;
223 
224 		if (is_inode_flag_set(fi, FI_UPDATE_WRITE) ||
225 				exist_written_data(sbi, ino, UPDATE_INO))
226 			goto flush_out;
227 		goto out;
228 	}
229 go_write:
230 	/* guarantee free sections for fsync */
231 	f2fs_balance_fs(sbi);
232 
233 	/*
234 	 * Both of fdatasync() and fsync() are able to be recovered from
235 	 * sudden-power-off.
236 	 */
237 	down_read(&fi->i_sem);
238 	need_cp = need_do_checkpoint(inode);
239 	up_read(&fi->i_sem);
240 
241 	if (need_cp) {
242 		/* all the dirty node pages should be flushed for POR */
243 		ret = f2fs_sync_fs(inode->i_sb, 1);
244 
245 		/*
246 		 * We've secured consistency through sync_fs. Following pino
247 		 * will be used only for fsynced inodes after checkpoint.
248 		 */
249 		try_to_fix_pino(inode);
250 		clear_inode_flag(fi, FI_APPEND_WRITE);
251 		clear_inode_flag(fi, FI_UPDATE_WRITE);
252 		goto out;
253 	}
254 sync_nodes:
255 	sync_node_pages(sbi, ino, &wbc);
256 
257 	/* if cp_error was enabled, we should avoid infinite loop */
258 	if (unlikely(f2fs_cp_error(sbi)))
259 		goto out;
260 
261 	if (need_inode_block_update(sbi, ino)) {
262 		mark_inode_dirty_sync(inode);
263 		f2fs_write_inode(inode, NULL);
264 		goto sync_nodes;
265 	}
266 
267 	ret = wait_on_node_pages_writeback(sbi, ino);
268 	if (ret)
269 		goto out;
270 
271 	/* once recovery info is written, don't need to tack this */
272 	remove_dirty_inode(sbi, ino, APPEND_INO);
273 	clear_inode_flag(fi, FI_APPEND_WRITE);
274 flush_out:
275 	remove_dirty_inode(sbi, ino, UPDATE_INO);
276 	clear_inode_flag(fi, FI_UPDATE_WRITE);
277 	ret = f2fs_issue_flush(sbi);
278 out:
279 	trace_f2fs_sync_file_exit(inode, need_cp, datasync, ret);
280 	f2fs_trace_ios(NULL, 1);
281 	return ret;
282 }
283 
284 static pgoff_t __get_first_dirty_index(struct address_space *mapping,
285 						pgoff_t pgofs, int whence)
286 {
287 	struct pagevec pvec;
288 	int nr_pages;
289 
290 	if (whence != SEEK_DATA)
291 		return 0;
292 
293 	/* find first dirty page index */
294 	pagevec_init(&pvec, 0);
295 	nr_pages = pagevec_lookup_tag(&pvec, mapping, &pgofs,
296 					PAGECACHE_TAG_DIRTY, 1);
297 	pgofs = nr_pages ? pvec.pages[0]->index : LONG_MAX;
298 	pagevec_release(&pvec);
299 	return pgofs;
300 }
301 
302 static bool __found_offset(block_t blkaddr, pgoff_t dirty, pgoff_t pgofs,
303 							int whence)
304 {
305 	switch (whence) {
306 	case SEEK_DATA:
307 		if ((blkaddr == NEW_ADDR && dirty == pgofs) ||
308 			(blkaddr != NEW_ADDR && blkaddr != NULL_ADDR))
309 			return true;
310 		break;
311 	case SEEK_HOLE:
312 		if (blkaddr == NULL_ADDR)
313 			return true;
314 		break;
315 	}
316 	return false;
317 }
318 
319 static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
320 {
321 	struct inode *inode = file->f_mapping->host;
322 	loff_t maxbytes = inode->i_sb->s_maxbytes;
323 	struct dnode_of_data dn;
324 	pgoff_t pgofs, end_offset, dirty;
325 	loff_t data_ofs = offset;
326 	loff_t isize;
327 	int err = 0;
328 
329 	mutex_lock(&inode->i_mutex);
330 
331 	isize = i_size_read(inode);
332 	if (offset >= isize)
333 		goto fail;
334 
335 	/* handle inline data case */
336 	if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) {
337 		if (whence == SEEK_HOLE)
338 			data_ofs = isize;
339 		goto found;
340 	}
341 
342 	pgofs = (pgoff_t)(offset >> PAGE_CACHE_SHIFT);
343 
344 	dirty = __get_first_dirty_index(inode->i_mapping, pgofs, whence);
345 
346 	for (; data_ofs < isize; data_ofs = pgofs << PAGE_CACHE_SHIFT) {
347 		set_new_dnode(&dn, inode, NULL, NULL, 0);
348 		err = get_dnode_of_data(&dn, pgofs, LOOKUP_NODE_RA);
349 		if (err && err != -ENOENT) {
350 			goto fail;
351 		} else if (err == -ENOENT) {
352 			/* direct node does not exists */
353 			if (whence == SEEK_DATA) {
354 				pgofs = PGOFS_OF_NEXT_DNODE(pgofs,
355 							F2FS_I(inode));
356 				continue;
357 			} else {
358 				goto found;
359 			}
360 		}
361 
362 		end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
363 
364 		/* find data/hole in dnode block */
365 		for (; dn.ofs_in_node < end_offset;
366 				dn.ofs_in_node++, pgofs++,
367 				data_ofs = (loff_t)pgofs << PAGE_CACHE_SHIFT) {
368 			block_t blkaddr;
369 			blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
370 
371 			if (__found_offset(blkaddr, dirty, pgofs, whence)) {
372 				f2fs_put_dnode(&dn);
373 				goto found;
374 			}
375 		}
376 		f2fs_put_dnode(&dn);
377 	}
378 
379 	if (whence == SEEK_DATA)
380 		goto fail;
381 found:
382 	if (whence == SEEK_HOLE && data_ofs > isize)
383 		data_ofs = isize;
384 	mutex_unlock(&inode->i_mutex);
385 	return vfs_setpos(file, data_ofs, maxbytes);
386 fail:
387 	mutex_unlock(&inode->i_mutex);
388 	return -ENXIO;
389 }
390 
391 static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence)
392 {
393 	struct inode *inode = file->f_mapping->host;
394 	loff_t maxbytes = inode->i_sb->s_maxbytes;
395 
396 	switch (whence) {
397 	case SEEK_SET:
398 	case SEEK_CUR:
399 	case SEEK_END:
400 		return generic_file_llseek_size(file, offset, whence,
401 						maxbytes, i_size_read(inode));
402 	case SEEK_DATA:
403 	case SEEK_HOLE:
404 		if (offset < 0)
405 			return -ENXIO;
406 		return f2fs_seek_block(file, offset, whence);
407 	}
408 
409 	return -EINVAL;
410 }
411 
412 static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
413 {
414 	struct inode *inode = file_inode(file);
415 
416 	if (f2fs_encrypted_inode(inode)) {
417 		int err = f2fs_get_encryption_info(inode);
418 		if (err)
419 			return 0;
420 	}
421 
422 	/* we don't need to use inline_data strictly */
423 	if (f2fs_has_inline_data(inode)) {
424 		int err = f2fs_convert_inline_inode(inode);
425 		if (err)
426 			return err;
427 	}
428 
429 	file_accessed(file);
430 	vma->vm_ops = &f2fs_file_vm_ops;
431 	return 0;
432 }
433 
434 static int f2fs_file_open(struct inode *inode, struct file *filp)
435 {
436 	int ret = generic_file_open(inode, filp);
437 
438 	if (!ret && f2fs_encrypted_inode(inode)) {
439 		ret = f2fs_get_encryption_info(inode);
440 		if (ret)
441 			ret = -EACCES;
442 	}
443 	return ret;
444 }
445 
446 int truncate_data_blocks_range(struct dnode_of_data *dn, int count)
447 {
448 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
449 	struct f2fs_node *raw_node;
450 	int nr_free = 0, ofs = dn->ofs_in_node, len = count;
451 	__le32 *addr;
452 
453 	raw_node = F2FS_NODE(dn->node_page);
454 	addr = blkaddr_in_node(raw_node) + ofs;
455 
456 	for (; count > 0; count--, addr++, dn->ofs_in_node++) {
457 		block_t blkaddr = le32_to_cpu(*addr);
458 		if (blkaddr == NULL_ADDR)
459 			continue;
460 
461 		dn->data_blkaddr = NULL_ADDR;
462 		set_data_blkaddr(dn);
463 		invalidate_blocks(sbi, blkaddr);
464 		if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
465 			clear_inode_flag(F2FS_I(dn->inode),
466 						FI_FIRST_BLOCK_WRITTEN);
467 		nr_free++;
468 	}
469 
470 	if (nr_free) {
471 		pgoff_t fofs;
472 		/*
473 		 * once we invalidate valid blkaddr in range [ofs, ofs + count],
474 		 * we will invalidate all blkaddr in the whole range.
475 		 */
476 		fofs = start_bidx_of_node(ofs_of_node(dn->node_page),
477 						F2FS_I(dn->inode)) + ofs;
478 		f2fs_update_extent_cache_range(dn, fofs, 0, len);
479 		dec_valid_block_count(sbi, dn->inode, nr_free);
480 		set_page_dirty(dn->node_page);
481 		sync_inode_page(dn);
482 	}
483 	dn->ofs_in_node = ofs;
484 
485 	trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid,
486 					 dn->ofs_in_node, nr_free);
487 	return nr_free;
488 }
489 
490 void truncate_data_blocks(struct dnode_of_data *dn)
491 {
492 	truncate_data_blocks_range(dn, ADDRS_PER_BLOCK);
493 }
494 
495 static int truncate_partial_data_page(struct inode *inode, u64 from,
496 								bool cache_only)
497 {
498 	unsigned offset = from & (PAGE_CACHE_SIZE - 1);
499 	pgoff_t index = from >> PAGE_CACHE_SHIFT;
500 	struct address_space *mapping = inode->i_mapping;
501 	struct page *page;
502 
503 	if (!offset && !cache_only)
504 		return 0;
505 
506 	if (cache_only) {
507 		page = grab_cache_page(mapping, index);
508 		if (page && PageUptodate(page))
509 			goto truncate_out;
510 		f2fs_put_page(page, 1);
511 		return 0;
512 	}
513 
514 	page = get_lock_data_page(inode, index);
515 	if (IS_ERR(page))
516 		return 0;
517 truncate_out:
518 	f2fs_wait_on_page_writeback(page, DATA);
519 	zero_user(page, offset, PAGE_CACHE_SIZE - offset);
520 	if (!cache_only || !f2fs_encrypted_inode(inode) || !S_ISREG(inode->i_mode))
521 		set_page_dirty(page);
522 	f2fs_put_page(page, 1);
523 	return 0;
524 }
525 
526 int truncate_blocks(struct inode *inode, u64 from, bool lock)
527 {
528 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
529 	unsigned int blocksize = inode->i_sb->s_blocksize;
530 	struct dnode_of_data dn;
531 	pgoff_t free_from;
532 	int count = 0, err = 0;
533 	struct page *ipage;
534 	bool truncate_page = false;
535 
536 	trace_f2fs_truncate_blocks_enter(inode, from);
537 
538 	free_from = (pgoff_t)F2FS_BYTES_TO_BLK(from + blocksize - 1);
539 
540 	if (lock)
541 		f2fs_lock_op(sbi);
542 
543 	ipage = get_node_page(sbi, inode->i_ino);
544 	if (IS_ERR(ipage)) {
545 		err = PTR_ERR(ipage);
546 		goto out;
547 	}
548 
549 	if (f2fs_has_inline_data(inode)) {
550 		if (truncate_inline_inode(ipage, from))
551 			set_page_dirty(ipage);
552 		f2fs_put_page(ipage, 1);
553 		truncate_page = true;
554 		goto out;
555 	}
556 
557 	set_new_dnode(&dn, inode, ipage, NULL, 0);
558 	err = get_dnode_of_data(&dn, free_from, LOOKUP_NODE);
559 	if (err) {
560 		if (err == -ENOENT)
561 			goto free_next;
562 		goto out;
563 	}
564 
565 	count = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
566 
567 	count -= dn.ofs_in_node;
568 	f2fs_bug_on(sbi, count < 0);
569 
570 	if (dn.ofs_in_node || IS_INODE(dn.node_page)) {
571 		truncate_data_blocks_range(&dn, count);
572 		free_from += count;
573 	}
574 
575 	f2fs_put_dnode(&dn);
576 free_next:
577 	err = truncate_inode_blocks(inode, free_from);
578 out:
579 	if (lock)
580 		f2fs_unlock_op(sbi);
581 
582 	/* lastly zero out the first data page */
583 	if (!err)
584 		err = truncate_partial_data_page(inode, from, truncate_page);
585 
586 	trace_f2fs_truncate_blocks_exit(inode, err);
587 	return err;
588 }
589 
590 int f2fs_truncate(struct inode *inode, bool lock)
591 {
592 	int err;
593 
594 	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
595 				S_ISLNK(inode->i_mode)))
596 		return 0;
597 
598 	trace_f2fs_truncate(inode);
599 
600 	/* we should check inline_data size */
601 	if (f2fs_has_inline_data(inode) && !f2fs_may_inline_data(inode)) {
602 		err = f2fs_convert_inline_inode(inode);
603 		if (err)
604 			return err;
605 	}
606 
607 	err = truncate_blocks(inode, i_size_read(inode), lock);
608 	if (err)
609 		return err;
610 
611 	inode->i_mtime = inode->i_ctime = CURRENT_TIME;
612 	mark_inode_dirty(inode);
613 	return 0;
614 }
615 
616 int f2fs_getattr(struct vfsmount *mnt,
617 			 struct dentry *dentry, struct kstat *stat)
618 {
619 	struct inode *inode = d_inode(dentry);
620 	generic_fillattr(inode, stat);
621 	stat->blocks <<= 3;
622 	return 0;
623 }
624 
625 #ifdef CONFIG_F2FS_FS_POSIX_ACL
626 static void __setattr_copy(struct inode *inode, const struct iattr *attr)
627 {
628 	struct f2fs_inode_info *fi = F2FS_I(inode);
629 	unsigned int ia_valid = attr->ia_valid;
630 
631 	if (ia_valid & ATTR_UID)
632 		inode->i_uid = attr->ia_uid;
633 	if (ia_valid & ATTR_GID)
634 		inode->i_gid = attr->ia_gid;
635 	if (ia_valid & ATTR_ATIME)
636 		inode->i_atime = timespec_trunc(attr->ia_atime,
637 						inode->i_sb->s_time_gran);
638 	if (ia_valid & ATTR_MTIME)
639 		inode->i_mtime = timespec_trunc(attr->ia_mtime,
640 						inode->i_sb->s_time_gran);
641 	if (ia_valid & ATTR_CTIME)
642 		inode->i_ctime = timespec_trunc(attr->ia_ctime,
643 						inode->i_sb->s_time_gran);
644 	if (ia_valid & ATTR_MODE) {
645 		umode_t mode = attr->ia_mode;
646 
647 		if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
648 			mode &= ~S_ISGID;
649 		set_acl_inode(fi, mode);
650 	}
651 }
652 #else
653 #define __setattr_copy setattr_copy
654 #endif
655 
656 int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
657 {
658 	struct inode *inode = d_inode(dentry);
659 	struct f2fs_inode_info *fi = F2FS_I(inode);
660 	int err;
661 
662 	err = inode_change_ok(inode, attr);
663 	if (err)
664 		return err;
665 
666 	if (attr->ia_valid & ATTR_SIZE) {
667 		if (f2fs_encrypted_inode(inode) &&
668 				f2fs_get_encryption_info(inode))
669 			return -EACCES;
670 
671 		if (attr->ia_size <= i_size_read(inode)) {
672 			truncate_setsize(inode, attr->ia_size);
673 			err = f2fs_truncate(inode, true);
674 			if (err)
675 				return err;
676 			f2fs_balance_fs(F2FS_I_SB(inode));
677 		} else {
678 			/*
679 			 * do not trim all blocks after i_size if target size is
680 			 * larger than i_size.
681 			 */
682 			truncate_setsize(inode, attr->ia_size);
683 		}
684 	}
685 
686 	__setattr_copy(inode, attr);
687 
688 	if (attr->ia_valid & ATTR_MODE) {
689 		err = posix_acl_chmod(inode, get_inode_mode(inode));
690 		if (err || is_inode_flag_set(fi, FI_ACL_MODE)) {
691 			inode->i_mode = fi->i_acl_mode;
692 			clear_inode_flag(fi, FI_ACL_MODE);
693 		}
694 	}
695 
696 	mark_inode_dirty(inode);
697 	return err;
698 }
699 
700 const struct inode_operations f2fs_file_inode_operations = {
701 	.getattr	= f2fs_getattr,
702 	.setattr	= f2fs_setattr,
703 	.get_acl	= f2fs_get_acl,
704 	.set_acl	= f2fs_set_acl,
705 #ifdef CONFIG_F2FS_FS_XATTR
706 	.setxattr	= generic_setxattr,
707 	.getxattr	= generic_getxattr,
708 	.listxattr	= f2fs_listxattr,
709 	.removexattr	= generic_removexattr,
710 #endif
711 	.fiemap		= f2fs_fiemap,
712 };
713 
714 static int fill_zero(struct inode *inode, pgoff_t index,
715 					loff_t start, loff_t len)
716 {
717 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
718 	struct page *page;
719 
720 	if (!len)
721 		return 0;
722 
723 	f2fs_balance_fs(sbi);
724 
725 	f2fs_lock_op(sbi);
726 	page = get_new_data_page(inode, NULL, index, false);
727 	f2fs_unlock_op(sbi);
728 
729 	if (IS_ERR(page))
730 		return PTR_ERR(page);
731 
732 	f2fs_wait_on_page_writeback(page, DATA);
733 	zero_user(page, start, len);
734 	set_page_dirty(page);
735 	f2fs_put_page(page, 1);
736 	return 0;
737 }
738 
739 int truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
740 {
741 	pgoff_t index;
742 	int err;
743 
744 	for (index = pg_start; index < pg_end; index++) {
745 		struct dnode_of_data dn;
746 
747 		set_new_dnode(&dn, inode, NULL, NULL, 0);
748 		err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
749 		if (err) {
750 			if (err == -ENOENT)
751 				continue;
752 			return err;
753 		}
754 
755 		if (dn.data_blkaddr != NULL_ADDR)
756 			truncate_data_blocks_range(&dn, 1);
757 		f2fs_put_dnode(&dn);
758 	}
759 	return 0;
760 }
761 
762 static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
763 {
764 	pgoff_t pg_start, pg_end;
765 	loff_t off_start, off_end;
766 	int ret = 0;
767 
768 	if (!S_ISREG(inode->i_mode))
769 		return -EOPNOTSUPP;
770 
771 	if (f2fs_has_inline_data(inode)) {
772 		ret = f2fs_convert_inline_inode(inode);
773 		if (ret)
774 			return ret;
775 	}
776 
777 	pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT;
778 	pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT;
779 
780 	off_start = offset & (PAGE_CACHE_SIZE - 1);
781 	off_end = (offset + len) & (PAGE_CACHE_SIZE - 1);
782 
783 	if (pg_start == pg_end) {
784 		ret = fill_zero(inode, pg_start, off_start,
785 						off_end - off_start);
786 		if (ret)
787 			return ret;
788 	} else {
789 		if (off_start) {
790 			ret = fill_zero(inode, pg_start++, off_start,
791 						PAGE_CACHE_SIZE - off_start);
792 			if (ret)
793 				return ret;
794 		}
795 		if (off_end) {
796 			ret = fill_zero(inode, pg_end, 0, off_end);
797 			if (ret)
798 				return ret;
799 		}
800 
801 		if (pg_start < pg_end) {
802 			struct address_space *mapping = inode->i_mapping;
803 			loff_t blk_start, blk_end;
804 			struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
805 
806 			f2fs_balance_fs(sbi);
807 
808 			blk_start = pg_start << PAGE_CACHE_SHIFT;
809 			blk_end = pg_end << PAGE_CACHE_SHIFT;
810 			truncate_inode_pages_range(mapping, blk_start,
811 					blk_end - 1);
812 
813 			f2fs_lock_op(sbi);
814 			ret = truncate_hole(inode, pg_start, pg_end);
815 			f2fs_unlock_op(sbi);
816 		}
817 	}
818 
819 	return ret;
820 }
821 
822 static int f2fs_do_collapse(struct inode *inode, pgoff_t start, pgoff_t end)
823 {
824 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
825 	struct dnode_of_data dn;
826 	pgoff_t nrpages = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE;
827 	int ret = 0;
828 
829 	for (; end < nrpages; start++, end++) {
830 		block_t new_addr, old_addr;
831 
832 		f2fs_lock_op(sbi);
833 
834 		set_new_dnode(&dn, inode, NULL, NULL, 0);
835 		ret = get_dnode_of_data(&dn, end, LOOKUP_NODE_RA);
836 		if (ret && ret != -ENOENT) {
837 			goto out;
838 		} else if (ret == -ENOENT) {
839 			new_addr = NULL_ADDR;
840 		} else {
841 			new_addr = dn.data_blkaddr;
842 			truncate_data_blocks_range(&dn, 1);
843 			f2fs_put_dnode(&dn);
844 		}
845 
846 		if (new_addr == NULL_ADDR) {
847 			set_new_dnode(&dn, inode, NULL, NULL, 0);
848 			ret = get_dnode_of_data(&dn, start, LOOKUP_NODE_RA);
849 			if (ret && ret != -ENOENT) {
850 				goto out;
851 			} else if (ret == -ENOENT) {
852 				f2fs_unlock_op(sbi);
853 				continue;
854 			}
855 
856 			if (dn.data_blkaddr == NULL_ADDR) {
857 				f2fs_put_dnode(&dn);
858 				f2fs_unlock_op(sbi);
859 				continue;
860 			} else {
861 				truncate_data_blocks_range(&dn, 1);
862 			}
863 
864 			f2fs_put_dnode(&dn);
865 		} else {
866 			struct page *ipage;
867 
868 			ipage = get_node_page(sbi, inode->i_ino);
869 			if (IS_ERR(ipage)) {
870 				ret = PTR_ERR(ipage);
871 				goto out;
872 			}
873 
874 			set_new_dnode(&dn, inode, ipage, NULL, 0);
875 			ret = f2fs_reserve_block(&dn, start);
876 			if (ret)
877 				goto out;
878 
879 			old_addr = dn.data_blkaddr;
880 			if (old_addr != NEW_ADDR && new_addr == NEW_ADDR) {
881 				dn.data_blkaddr = NULL_ADDR;
882 				f2fs_update_extent_cache(&dn);
883 				invalidate_blocks(sbi, old_addr);
884 
885 				dn.data_blkaddr = new_addr;
886 				set_data_blkaddr(&dn);
887 			} else if (new_addr != NEW_ADDR) {
888 				struct node_info ni;
889 
890 				get_node_info(sbi, dn.nid, &ni);
891 				f2fs_replace_block(sbi, &dn, old_addr, new_addr,
892 							ni.version, true);
893 			}
894 
895 			f2fs_put_dnode(&dn);
896 		}
897 		f2fs_unlock_op(sbi);
898 	}
899 	return 0;
900 out:
901 	f2fs_unlock_op(sbi);
902 	return ret;
903 }
904 
905 static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
906 {
907 	pgoff_t pg_start, pg_end;
908 	loff_t new_size;
909 	int ret;
910 
911 	if (!S_ISREG(inode->i_mode))
912 		return -EINVAL;
913 
914 	if (offset + len >= i_size_read(inode))
915 		return -EINVAL;
916 
917 	/* collapse range should be aligned to block size of f2fs. */
918 	if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
919 		return -EINVAL;
920 
921 	f2fs_balance_fs(F2FS_I_SB(inode));
922 
923 	if (f2fs_has_inline_data(inode)) {
924 		ret = f2fs_convert_inline_inode(inode);
925 		if (ret)
926 			return ret;
927 	}
928 
929 	pg_start = offset >> PAGE_CACHE_SHIFT;
930 	pg_end = (offset + len) >> PAGE_CACHE_SHIFT;
931 
932 	/* write out all dirty pages from offset */
933 	ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
934 	if (ret)
935 		return ret;
936 
937 	truncate_pagecache(inode, offset);
938 
939 	ret = f2fs_do_collapse(inode, pg_start, pg_end);
940 	if (ret)
941 		return ret;
942 
943 	new_size = i_size_read(inode) - len;
944 
945 	ret = truncate_blocks(inode, new_size, true);
946 	if (!ret)
947 		i_size_write(inode, new_size);
948 
949 	return ret;
950 }
951 
952 static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
953 								int mode)
954 {
955 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
956 	struct address_space *mapping = inode->i_mapping;
957 	pgoff_t index, pg_start, pg_end;
958 	loff_t new_size = i_size_read(inode);
959 	loff_t off_start, off_end;
960 	int ret = 0;
961 
962 	if (!S_ISREG(inode->i_mode))
963 		return -EINVAL;
964 
965 	ret = inode_newsize_ok(inode, (len + offset));
966 	if (ret)
967 		return ret;
968 
969 	f2fs_balance_fs(sbi);
970 
971 	if (f2fs_has_inline_data(inode)) {
972 		ret = f2fs_convert_inline_inode(inode);
973 		if (ret)
974 			return ret;
975 	}
976 
977 	ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1);
978 	if (ret)
979 		return ret;
980 
981 	truncate_pagecache_range(inode, offset, offset + len - 1);
982 
983 	pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT;
984 	pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT;
985 
986 	off_start = offset & (PAGE_CACHE_SIZE - 1);
987 	off_end = (offset + len) & (PAGE_CACHE_SIZE - 1);
988 
989 	if (pg_start == pg_end) {
990 		ret = fill_zero(inode, pg_start, off_start,
991 						off_end - off_start);
992 		if (ret)
993 			return ret;
994 
995 		if (offset + len > new_size)
996 			new_size = offset + len;
997 		new_size = max_t(loff_t, new_size, offset + len);
998 	} else {
999 		if (off_start) {
1000 			ret = fill_zero(inode, pg_start++, off_start,
1001 						PAGE_CACHE_SIZE - off_start);
1002 			if (ret)
1003 				return ret;
1004 
1005 			new_size = max_t(loff_t, new_size,
1006 						pg_start << PAGE_CACHE_SHIFT);
1007 		}
1008 
1009 		for (index = pg_start; index < pg_end; index++) {
1010 			struct dnode_of_data dn;
1011 			struct page *ipage;
1012 
1013 			f2fs_lock_op(sbi);
1014 
1015 			ipage = get_node_page(sbi, inode->i_ino);
1016 			if (IS_ERR(ipage)) {
1017 				ret = PTR_ERR(ipage);
1018 				f2fs_unlock_op(sbi);
1019 				goto out;
1020 			}
1021 
1022 			set_new_dnode(&dn, inode, ipage, NULL, 0);
1023 			ret = f2fs_reserve_block(&dn, index);
1024 			if (ret) {
1025 				f2fs_unlock_op(sbi);
1026 				goto out;
1027 			}
1028 
1029 			if (dn.data_blkaddr != NEW_ADDR) {
1030 				invalidate_blocks(sbi, dn.data_blkaddr);
1031 
1032 				dn.data_blkaddr = NEW_ADDR;
1033 				set_data_blkaddr(&dn);
1034 
1035 				dn.data_blkaddr = NULL_ADDR;
1036 				f2fs_update_extent_cache(&dn);
1037 			}
1038 			f2fs_put_dnode(&dn);
1039 			f2fs_unlock_op(sbi);
1040 
1041 			new_size = max_t(loff_t, new_size,
1042 					(index + 1) << PAGE_CACHE_SHIFT);
1043 		}
1044 
1045 		if (off_end) {
1046 			ret = fill_zero(inode, pg_end, 0, off_end);
1047 			if (ret)
1048 				goto out;
1049 
1050 			new_size = max_t(loff_t, new_size, offset + len);
1051 		}
1052 	}
1053 
1054 out:
1055 	if (!(mode & FALLOC_FL_KEEP_SIZE) && i_size_read(inode) < new_size) {
1056 		i_size_write(inode, new_size);
1057 		mark_inode_dirty(inode);
1058 		update_inode_page(inode);
1059 	}
1060 
1061 	return ret;
1062 }
1063 
1064 static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
1065 {
1066 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1067 	pgoff_t pg_start, pg_end, delta, nrpages, idx;
1068 	loff_t new_size;
1069 	int ret;
1070 
1071 	if (!S_ISREG(inode->i_mode))
1072 		return -EINVAL;
1073 
1074 	new_size = i_size_read(inode) + len;
1075 	if (new_size > inode->i_sb->s_maxbytes)
1076 		return -EFBIG;
1077 
1078 	if (offset >= i_size_read(inode))
1079 		return -EINVAL;
1080 
1081 	/* insert range should be aligned to block size of f2fs. */
1082 	if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1083 		return -EINVAL;
1084 
1085 	f2fs_balance_fs(sbi);
1086 
1087 	if (f2fs_has_inline_data(inode)) {
1088 		ret = f2fs_convert_inline_inode(inode);
1089 		if (ret)
1090 			return ret;
1091 	}
1092 
1093 	ret = truncate_blocks(inode, i_size_read(inode), true);
1094 	if (ret)
1095 		return ret;
1096 
1097 	/* write out all dirty pages from offset */
1098 	ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1099 	if (ret)
1100 		return ret;
1101 
1102 	truncate_pagecache(inode, offset);
1103 
1104 	pg_start = offset >> PAGE_CACHE_SHIFT;
1105 	pg_end = (offset + len) >> PAGE_CACHE_SHIFT;
1106 	delta = pg_end - pg_start;
1107 	nrpages = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE;
1108 
1109 	for (idx = nrpages - 1; idx >= pg_start && idx != -1; idx--) {
1110 		struct dnode_of_data dn;
1111 		struct page *ipage;
1112 		block_t new_addr, old_addr;
1113 
1114 		f2fs_lock_op(sbi);
1115 
1116 		set_new_dnode(&dn, inode, NULL, NULL, 0);
1117 		ret = get_dnode_of_data(&dn, idx, LOOKUP_NODE_RA);
1118 		if (ret && ret != -ENOENT) {
1119 			goto out;
1120 		} else if (ret == -ENOENT) {
1121 			goto next;
1122 		} else if (dn.data_blkaddr == NULL_ADDR) {
1123 			f2fs_put_dnode(&dn);
1124 			goto next;
1125 		} else {
1126 			new_addr = dn.data_blkaddr;
1127 			truncate_data_blocks_range(&dn, 1);
1128 			f2fs_put_dnode(&dn);
1129 		}
1130 
1131 		ipage = get_node_page(sbi, inode->i_ino);
1132 		if (IS_ERR(ipage)) {
1133 			ret = PTR_ERR(ipage);
1134 			goto out;
1135 		}
1136 
1137 		set_new_dnode(&dn, inode, ipage, NULL, 0);
1138 		ret = f2fs_reserve_block(&dn, idx + delta);
1139 		if (ret)
1140 			goto out;
1141 
1142 		old_addr = dn.data_blkaddr;
1143 		f2fs_bug_on(sbi, old_addr != NEW_ADDR);
1144 
1145 		if (new_addr != NEW_ADDR) {
1146 			struct node_info ni;
1147 
1148 			get_node_info(sbi, dn.nid, &ni);
1149 			f2fs_replace_block(sbi, &dn, old_addr, new_addr,
1150 							ni.version, true);
1151 		}
1152 		f2fs_put_dnode(&dn);
1153 next:
1154 		f2fs_unlock_op(sbi);
1155 	}
1156 
1157 	i_size_write(inode, new_size);
1158 	return 0;
1159 out:
1160 	f2fs_unlock_op(sbi);
1161 	return ret;
1162 }
1163 
1164 static int expand_inode_data(struct inode *inode, loff_t offset,
1165 					loff_t len, int mode)
1166 {
1167 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1168 	pgoff_t index, pg_start, pg_end;
1169 	loff_t new_size = i_size_read(inode);
1170 	loff_t off_start, off_end;
1171 	int ret = 0;
1172 
1173 	f2fs_balance_fs(sbi);
1174 
1175 	ret = inode_newsize_ok(inode, (len + offset));
1176 	if (ret)
1177 		return ret;
1178 
1179 	if (f2fs_has_inline_data(inode)) {
1180 		ret = f2fs_convert_inline_inode(inode);
1181 		if (ret)
1182 			return ret;
1183 	}
1184 
1185 	pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT;
1186 	pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT;
1187 
1188 	off_start = offset & (PAGE_CACHE_SIZE - 1);
1189 	off_end = (offset + len) & (PAGE_CACHE_SIZE - 1);
1190 
1191 	f2fs_lock_op(sbi);
1192 
1193 	for (index = pg_start; index <= pg_end; index++) {
1194 		struct dnode_of_data dn;
1195 
1196 		if (index == pg_end && !off_end)
1197 			goto noalloc;
1198 
1199 		set_new_dnode(&dn, inode, NULL, NULL, 0);
1200 		ret = f2fs_reserve_block(&dn, index);
1201 		if (ret)
1202 			break;
1203 noalloc:
1204 		if (pg_start == pg_end)
1205 			new_size = offset + len;
1206 		else if (index == pg_start && off_start)
1207 			new_size = (index + 1) << PAGE_CACHE_SHIFT;
1208 		else if (index == pg_end)
1209 			new_size = (index << PAGE_CACHE_SHIFT) + off_end;
1210 		else
1211 			new_size += PAGE_CACHE_SIZE;
1212 	}
1213 
1214 	if (!(mode & FALLOC_FL_KEEP_SIZE) &&
1215 		i_size_read(inode) < new_size) {
1216 		i_size_write(inode, new_size);
1217 		mark_inode_dirty(inode);
1218 		update_inode_page(inode);
1219 	}
1220 	f2fs_unlock_op(sbi);
1221 
1222 	return ret;
1223 }
1224 
1225 static long f2fs_fallocate(struct file *file, int mode,
1226 				loff_t offset, loff_t len)
1227 {
1228 	struct inode *inode = file_inode(file);
1229 	long ret = 0;
1230 
1231 	if (f2fs_encrypted_inode(inode) &&
1232 		(mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)))
1233 		return -EOPNOTSUPP;
1234 
1235 	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
1236 			FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |
1237 			FALLOC_FL_INSERT_RANGE))
1238 		return -EOPNOTSUPP;
1239 
1240 	mutex_lock(&inode->i_mutex);
1241 
1242 	if (mode & FALLOC_FL_PUNCH_HOLE) {
1243 		if (offset >= inode->i_size)
1244 			goto out;
1245 
1246 		ret = punch_hole(inode, offset, len);
1247 	} else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
1248 		ret = f2fs_collapse_range(inode, offset, len);
1249 	} else if (mode & FALLOC_FL_ZERO_RANGE) {
1250 		ret = f2fs_zero_range(inode, offset, len, mode);
1251 	} else if (mode & FALLOC_FL_INSERT_RANGE) {
1252 		ret = f2fs_insert_range(inode, offset, len);
1253 	} else {
1254 		ret = expand_inode_data(inode, offset, len, mode);
1255 	}
1256 
1257 	if (!ret) {
1258 		inode->i_mtime = inode->i_ctime = CURRENT_TIME;
1259 		mark_inode_dirty(inode);
1260 	}
1261 
1262 out:
1263 	mutex_unlock(&inode->i_mutex);
1264 
1265 	trace_f2fs_fallocate(inode, mode, offset, len, ret);
1266 	return ret;
1267 }
1268 
1269 static int f2fs_release_file(struct inode *inode, struct file *filp)
1270 {
1271 	/* some remained atomic pages should discarded */
1272 	if (f2fs_is_atomic_file(inode))
1273 		commit_inmem_pages(inode, true);
1274 	if (f2fs_is_volatile_file(inode)) {
1275 		set_inode_flag(F2FS_I(inode), FI_DROP_CACHE);
1276 		filemap_fdatawrite(inode->i_mapping);
1277 		clear_inode_flag(F2FS_I(inode), FI_DROP_CACHE);
1278 	}
1279 	return 0;
1280 }
1281 
1282 #define F2FS_REG_FLMASK		(~(FS_DIRSYNC_FL | FS_TOPDIR_FL))
1283 #define F2FS_OTHER_FLMASK	(FS_NODUMP_FL | FS_NOATIME_FL)
1284 
1285 static inline __u32 f2fs_mask_flags(umode_t mode, __u32 flags)
1286 {
1287 	if (S_ISDIR(mode))
1288 		return flags;
1289 	else if (S_ISREG(mode))
1290 		return flags & F2FS_REG_FLMASK;
1291 	else
1292 		return flags & F2FS_OTHER_FLMASK;
1293 }
1294 
1295 static int f2fs_ioc_getflags(struct file *filp, unsigned long arg)
1296 {
1297 	struct inode *inode = file_inode(filp);
1298 	struct f2fs_inode_info *fi = F2FS_I(inode);
1299 	unsigned int flags = fi->i_flags & FS_FL_USER_VISIBLE;
1300 	return put_user(flags, (int __user *)arg);
1301 }
1302 
1303 static int f2fs_ioc_setflags(struct file *filp, unsigned long arg)
1304 {
1305 	struct inode *inode = file_inode(filp);
1306 	struct f2fs_inode_info *fi = F2FS_I(inode);
1307 	unsigned int flags = fi->i_flags & FS_FL_USER_VISIBLE;
1308 	unsigned int oldflags;
1309 	int ret;
1310 
1311 	ret = mnt_want_write_file(filp);
1312 	if (ret)
1313 		return ret;
1314 
1315 	if (!inode_owner_or_capable(inode)) {
1316 		ret = -EACCES;
1317 		goto out;
1318 	}
1319 
1320 	if (get_user(flags, (int __user *)arg)) {
1321 		ret = -EFAULT;
1322 		goto out;
1323 	}
1324 
1325 	flags = f2fs_mask_flags(inode->i_mode, flags);
1326 
1327 	mutex_lock(&inode->i_mutex);
1328 
1329 	oldflags = fi->i_flags;
1330 
1331 	if ((flags ^ oldflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL)) {
1332 		if (!capable(CAP_LINUX_IMMUTABLE)) {
1333 			mutex_unlock(&inode->i_mutex);
1334 			ret = -EPERM;
1335 			goto out;
1336 		}
1337 	}
1338 
1339 	flags = flags & FS_FL_USER_MODIFIABLE;
1340 	flags |= oldflags & ~FS_FL_USER_MODIFIABLE;
1341 	fi->i_flags = flags;
1342 	mutex_unlock(&inode->i_mutex);
1343 
1344 	f2fs_set_inode_flags(inode);
1345 	inode->i_ctime = CURRENT_TIME;
1346 	mark_inode_dirty(inode);
1347 out:
1348 	mnt_drop_write_file(filp);
1349 	return ret;
1350 }
1351 
1352 static int f2fs_ioc_getversion(struct file *filp, unsigned long arg)
1353 {
1354 	struct inode *inode = file_inode(filp);
1355 
1356 	return put_user(inode->i_generation, (int __user *)arg);
1357 }
1358 
1359 static int f2fs_ioc_start_atomic_write(struct file *filp)
1360 {
1361 	struct inode *inode = file_inode(filp);
1362 	int ret;
1363 
1364 	if (!inode_owner_or_capable(inode))
1365 		return -EACCES;
1366 
1367 	f2fs_balance_fs(F2FS_I_SB(inode));
1368 
1369 	if (f2fs_is_atomic_file(inode))
1370 		return 0;
1371 
1372 	ret = f2fs_convert_inline_inode(inode);
1373 	if (ret)
1374 		return ret;
1375 
1376 	set_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
1377 	return 0;
1378 }
1379 
1380 static int f2fs_ioc_commit_atomic_write(struct file *filp)
1381 {
1382 	struct inode *inode = file_inode(filp);
1383 	int ret;
1384 
1385 	if (!inode_owner_or_capable(inode))
1386 		return -EACCES;
1387 
1388 	if (f2fs_is_volatile_file(inode))
1389 		return 0;
1390 
1391 	ret = mnt_want_write_file(filp);
1392 	if (ret)
1393 		return ret;
1394 
1395 	if (f2fs_is_atomic_file(inode)) {
1396 		clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
1397 		ret = commit_inmem_pages(inode, false);
1398 		if (ret)
1399 			goto err_out;
1400 	}
1401 
1402 	ret = f2fs_sync_file(filp, 0, LLONG_MAX, 0);
1403 err_out:
1404 	mnt_drop_write_file(filp);
1405 	return ret;
1406 }
1407 
1408 static int f2fs_ioc_start_volatile_write(struct file *filp)
1409 {
1410 	struct inode *inode = file_inode(filp);
1411 	int ret;
1412 
1413 	if (!inode_owner_or_capable(inode))
1414 		return -EACCES;
1415 
1416 	if (f2fs_is_volatile_file(inode))
1417 		return 0;
1418 
1419 	ret = f2fs_convert_inline_inode(inode);
1420 	if (ret)
1421 		return ret;
1422 
1423 	set_inode_flag(F2FS_I(inode), FI_VOLATILE_FILE);
1424 	return 0;
1425 }
1426 
1427 static int f2fs_ioc_release_volatile_write(struct file *filp)
1428 {
1429 	struct inode *inode = file_inode(filp);
1430 
1431 	if (!inode_owner_or_capable(inode))
1432 		return -EACCES;
1433 
1434 	if (!f2fs_is_volatile_file(inode))
1435 		return 0;
1436 
1437 	if (!f2fs_is_first_block_written(inode))
1438 		return truncate_partial_data_page(inode, 0, true);
1439 
1440 	punch_hole(inode, 0, F2FS_BLKSIZE);
1441 	return 0;
1442 }
1443 
1444 static int f2fs_ioc_abort_volatile_write(struct file *filp)
1445 {
1446 	struct inode *inode = file_inode(filp);
1447 	int ret;
1448 
1449 	if (!inode_owner_or_capable(inode))
1450 		return -EACCES;
1451 
1452 	ret = mnt_want_write_file(filp);
1453 	if (ret)
1454 		return ret;
1455 
1456 	f2fs_balance_fs(F2FS_I_SB(inode));
1457 
1458 	if (f2fs_is_atomic_file(inode)) {
1459 		clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
1460 		commit_inmem_pages(inode, true);
1461 	}
1462 
1463 	if (f2fs_is_volatile_file(inode))
1464 		clear_inode_flag(F2FS_I(inode), FI_VOLATILE_FILE);
1465 
1466 	mnt_drop_write_file(filp);
1467 	return ret;
1468 }
1469 
1470 static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
1471 {
1472 	struct inode *inode = file_inode(filp);
1473 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1474 	struct super_block *sb = sbi->sb;
1475 	__u32 in;
1476 
1477 	if (!capable(CAP_SYS_ADMIN))
1478 		return -EPERM;
1479 
1480 	if (get_user(in, (__u32 __user *)arg))
1481 		return -EFAULT;
1482 
1483 	switch (in) {
1484 	case F2FS_GOING_DOWN_FULLSYNC:
1485 		sb = freeze_bdev(sb->s_bdev);
1486 		if (sb && !IS_ERR(sb)) {
1487 			f2fs_stop_checkpoint(sbi);
1488 			thaw_bdev(sb->s_bdev, sb);
1489 		}
1490 		break;
1491 	case F2FS_GOING_DOWN_METASYNC:
1492 		/* do checkpoint only */
1493 		f2fs_sync_fs(sb, 1);
1494 		f2fs_stop_checkpoint(sbi);
1495 		break;
1496 	case F2FS_GOING_DOWN_NOSYNC:
1497 		f2fs_stop_checkpoint(sbi);
1498 		break;
1499 	default:
1500 		return -EINVAL;
1501 	}
1502 	return 0;
1503 }
1504 
1505 static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg)
1506 {
1507 	struct inode *inode = file_inode(filp);
1508 	struct super_block *sb = inode->i_sb;
1509 	struct request_queue *q = bdev_get_queue(sb->s_bdev);
1510 	struct fstrim_range range;
1511 	int ret;
1512 
1513 	if (!capable(CAP_SYS_ADMIN))
1514 		return -EPERM;
1515 
1516 	if (!blk_queue_discard(q))
1517 		return -EOPNOTSUPP;
1518 
1519 	if (copy_from_user(&range, (struct fstrim_range __user *)arg,
1520 				sizeof(range)))
1521 		return -EFAULT;
1522 
1523 	range.minlen = max((unsigned int)range.minlen,
1524 				q->limits.discard_granularity);
1525 	ret = f2fs_trim_fs(F2FS_SB(sb), &range);
1526 	if (ret < 0)
1527 		return ret;
1528 
1529 	if (copy_to_user((struct fstrim_range __user *)arg, &range,
1530 				sizeof(range)))
1531 		return -EFAULT;
1532 	return 0;
1533 }
1534 
1535 static bool uuid_is_nonzero(__u8 u[16])
1536 {
1537 	int i;
1538 
1539 	for (i = 0; i < 16; i++)
1540 		if (u[i])
1541 			return true;
1542 	return false;
1543 }
1544 
1545 static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg)
1546 {
1547 #ifdef CONFIG_F2FS_FS_ENCRYPTION
1548 	struct f2fs_encryption_policy policy;
1549 	struct inode *inode = file_inode(filp);
1550 
1551 	if (copy_from_user(&policy, (struct f2fs_encryption_policy __user *)arg,
1552 				sizeof(policy)))
1553 		return -EFAULT;
1554 
1555 	return f2fs_process_policy(&policy, inode);
1556 #else
1557 	return -EOPNOTSUPP;
1558 #endif
1559 }
1560 
1561 static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg)
1562 {
1563 #ifdef CONFIG_F2FS_FS_ENCRYPTION
1564 	struct f2fs_encryption_policy policy;
1565 	struct inode *inode = file_inode(filp);
1566 	int err;
1567 
1568 	err = f2fs_get_policy(inode, &policy);
1569 	if (err)
1570 		return err;
1571 
1572 	if (copy_to_user((struct f2fs_encryption_policy __user *)arg, &policy,
1573 							sizeof(policy)))
1574 		return -EFAULT;
1575 	return 0;
1576 #else
1577 	return -EOPNOTSUPP;
1578 #endif
1579 }
1580 
1581 static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg)
1582 {
1583 	struct inode *inode = file_inode(filp);
1584 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1585 	int err;
1586 
1587 	if (!f2fs_sb_has_crypto(inode->i_sb))
1588 		return -EOPNOTSUPP;
1589 
1590 	if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt))
1591 		goto got_it;
1592 
1593 	err = mnt_want_write_file(filp);
1594 	if (err)
1595 		return err;
1596 
1597 	/* update superblock with uuid */
1598 	generate_random_uuid(sbi->raw_super->encrypt_pw_salt);
1599 
1600 	err = f2fs_commit_super(sbi, false);
1601 
1602 	mnt_drop_write_file(filp);
1603 	if (err) {
1604 		/* undo new data */
1605 		memset(sbi->raw_super->encrypt_pw_salt, 0, 16);
1606 		return err;
1607 	}
1608 got_it:
1609 	if (copy_to_user((__u8 __user *)arg, sbi->raw_super->encrypt_pw_salt,
1610 									16))
1611 		return -EFAULT;
1612 	return 0;
1613 }
1614 
1615 static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
1616 {
1617 	struct inode *inode = file_inode(filp);
1618 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1619 	__u32 i, count;
1620 
1621 	if (!capable(CAP_SYS_ADMIN))
1622 		return -EPERM;
1623 
1624 	if (get_user(count, (__u32 __user *)arg))
1625 		return -EFAULT;
1626 
1627 	if (!count || count > F2FS_BATCH_GC_MAX_NUM)
1628 		return -EINVAL;
1629 
1630 	for (i = 0; i < count; i++) {
1631 		if (!mutex_trylock(&sbi->gc_mutex))
1632 			break;
1633 
1634 		if (f2fs_gc(sbi))
1635 			break;
1636 	}
1637 
1638 	if (put_user(i, (__u32 __user *)arg))
1639 		return -EFAULT;
1640 
1641 	return 0;
1642 }
1643 
1644 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1645 {
1646 	switch (cmd) {
1647 	case F2FS_IOC_GETFLAGS:
1648 		return f2fs_ioc_getflags(filp, arg);
1649 	case F2FS_IOC_SETFLAGS:
1650 		return f2fs_ioc_setflags(filp, arg);
1651 	case F2FS_IOC_GETVERSION:
1652 		return f2fs_ioc_getversion(filp, arg);
1653 	case F2FS_IOC_START_ATOMIC_WRITE:
1654 		return f2fs_ioc_start_atomic_write(filp);
1655 	case F2FS_IOC_COMMIT_ATOMIC_WRITE:
1656 		return f2fs_ioc_commit_atomic_write(filp);
1657 	case F2FS_IOC_START_VOLATILE_WRITE:
1658 		return f2fs_ioc_start_volatile_write(filp);
1659 	case F2FS_IOC_RELEASE_VOLATILE_WRITE:
1660 		return f2fs_ioc_release_volatile_write(filp);
1661 	case F2FS_IOC_ABORT_VOLATILE_WRITE:
1662 		return f2fs_ioc_abort_volatile_write(filp);
1663 	case F2FS_IOC_SHUTDOWN:
1664 		return f2fs_ioc_shutdown(filp, arg);
1665 	case FITRIM:
1666 		return f2fs_ioc_fitrim(filp, arg);
1667 	case F2FS_IOC_SET_ENCRYPTION_POLICY:
1668 		return f2fs_ioc_set_encryption_policy(filp, arg);
1669 	case F2FS_IOC_GET_ENCRYPTION_POLICY:
1670 		return f2fs_ioc_get_encryption_policy(filp, arg);
1671 	case F2FS_IOC_GET_ENCRYPTION_PWSALT:
1672 		return f2fs_ioc_get_encryption_pwsalt(filp, arg);
1673 	case F2FS_IOC_GARBAGE_COLLECT:
1674 		return f2fs_ioc_gc(filp, arg);
1675 	default:
1676 		return -ENOTTY;
1677 	}
1678 }
1679 
1680 static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
1681 {
1682 	struct inode *inode = file_inode(iocb->ki_filp);
1683 
1684 	if (f2fs_encrypted_inode(inode) &&
1685 				!f2fs_has_encryption_key(inode) &&
1686 				f2fs_get_encryption_info(inode))
1687 		return -EACCES;
1688 
1689 	return generic_file_write_iter(iocb, from);
1690 }
1691 
1692 #ifdef CONFIG_COMPAT
1693 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1694 {
1695 	switch (cmd) {
1696 	case F2FS_IOC32_GETFLAGS:
1697 		cmd = F2FS_IOC_GETFLAGS;
1698 		break;
1699 	case F2FS_IOC32_SETFLAGS:
1700 		cmd = F2FS_IOC_SETFLAGS;
1701 		break;
1702 	default:
1703 		return -ENOIOCTLCMD;
1704 	}
1705 	return f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
1706 }
1707 #endif
1708 
1709 const struct file_operations f2fs_file_operations = {
1710 	.llseek		= f2fs_llseek,
1711 	.read_iter	= generic_file_read_iter,
1712 	.write_iter	= f2fs_file_write_iter,
1713 	.open		= f2fs_file_open,
1714 	.release	= f2fs_release_file,
1715 	.mmap		= f2fs_file_mmap,
1716 	.fsync		= f2fs_sync_file,
1717 	.fallocate	= f2fs_fallocate,
1718 	.unlocked_ioctl	= f2fs_ioctl,
1719 #ifdef CONFIG_COMPAT
1720 	.compat_ioctl	= f2fs_compat_ioctl,
1721 #endif
1722 	.splice_read	= generic_file_splice_read,
1723 	.splice_write	= iter_file_splice_write,
1724 };
1725