xref: /openbmc/linux/fs/f2fs/file.c (revision ca79522c)
1 /*
2  * fs/f2fs/file.c
3  *
4  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5  *             http://www.samsung.com/
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 #include <linux/fs.h>
12 #include <linux/f2fs_fs.h>
13 #include <linux/stat.h>
14 #include <linux/buffer_head.h>
15 #include <linux/writeback.h>
16 #include <linux/blkdev.h>
17 #include <linux/falloc.h>
18 #include <linux/types.h>
19 #include <linux/compat.h>
20 #include <linux/uaccess.h>
21 #include <linux/mount.h>
22 
23 #include "f2fs.h"
24 #include "node.h"
25 #include "segment.h"
26 #include "xattr.h"
27 #include "acl.h"
28 #include <trace/events/f2fs.h>
29 
30 static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma,
31 						struct vm_fault *vmf)
32 {
33 	struct page *page = vmf->page;
34 	struct inode *inode = file_inode(vma->vm_file);
35 	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
36 	block_t old_blk_addr;
37 	struct dnode_of_data dn;
38 	int err, ilock;
39 
40 	f2fs_balance_fs(sbi);
41 
42 	sb_start_pagefault(inode->i_sb);
43 
44 	/* block allocation */
45 	ilock = mutex_lock_op(sbi);
46 	set_new_dnode(&dn, inode, NULL, NULL, 0);
47 	err = get_dnode_of_data(&dn, page->index, ALLOC_NODE);
48 	if (err) {
49 		mutex_unlock_op(sbi, ilock);
50 		goto out;
51 	}
52 
53 	old_blk_addr = dn.data_blkaddr;
54 
55 	if (old_blk_addr == NULL_ADDR) {
56 		err = reserve_new_block(&dn);
57 		if (err) {
58 			f2fs_put_dnode(&dn);
59 			mutex_unlock_op(sbi, ilock);
60 			goto out;
61 		}
62 	}
63 	f2fs_put_dnode(&dn);
64 	mutex_unlock_op(sbi, ilock);
65 
66 	lock_page(page);
67 	if (page->mapping != inode->i_mapping ||
68 			page_offset(page) >= i_size_read(inode) ||
69 			!PageUptodate(page)) {
70 		unlock_page(page);
71 		err = -EFAULT;
72 		goto out;
73 	}
74 
75 	/*
76 	 * check to see if the page is mapped already (no holes)
77 	 */
78 	if (PageMappedToDisk(page))
79 		goto out;
80 
81 	/* fill the page */
82 	wait_on_page_writeback(page);
83 
84 	/* page is wholly or partially inside EOF */
85 	if (((page->index + 1) << PAGE_CACHE_SHIFT) > i_size_read(inode)) {
86 		unsigned offset;
87 		offset = i_size_read(inode) & ~PAGE_CACHE_MASK;
88 		zero_user_segment(page, offset, PAGE_CACHE_SIZE);
89 	}
90 	set_page_dirty(page);
91 	SetPageUptodate(page);
92 
93 	file_update_time(vma->vm_file);
94 out:
95 	sb_end_pagefault(inode->i_sb);
96 	return block_page_mkwrite_return(err);
97 }
98 
99 static const struct vm_operations_struct f2fs_file_vm_ops = {
100 	.fault		= filemap_fault,
101 	.page_mkwrite	= f2fs_vm_page_mkwrite,
102 	.remap_pages	= generic_file_remap_pages,
103 };
104 
105 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
106 {
107 	struct inode *inode = file->f_mapping->host;
108 	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
109 	int ret = 0;
110 	bool need_cp = false;
111 	struct writeback_control wbc = {
112 		.sync_mode = WB_SYNC_ALL,
113 		.nr_to_write = LONG_MAX,
114 		.for_reclaim = 0,
115 	};
116 
117 	if (inode->i_sb->s_flags & MS_RDONLY)
118 		return 0;
119 
120 	trace_f2fs_sync_file_enter(inode);
121 	ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
122 	if (ret) {
123 		trace_f2fs_sync_file_exit(inode, need_cp, datasync, ret);
124 		return ret;
125 	}
126 
127 	/* guarantee free sections for fsync */
128 	f2fs_balance_fs(sbi);
129 
130 	mutex_lock(&inode->i_mutex);
131 
132 	if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
133 		goto out;
134 
135 	if (!S_ISREG(inode->i_mode) || inode->i_nlink != 1)
136 		need_cp = true;
137 	else if (is_cp_file(inode))
138 		need_cp = true;
139 	else if (!space_for_roll_forward(sbi))
140 		need_cp = true;
141 	else if (!is_checkpointed_node(sbi, F2FS_I(inode)->i_pino))
142 		need_cp = true;
143 
144 	if (need_cp) {
145 		/* all the dirty node pages should be flushed for POR */
146 		ret = f2fs_sync_fs(inode->i_sb, 1);
147 	} else {
148 		/* if there is no written node page, write its inode page */
149 		while (!sync_node_pages(sbi, inode->i_ino, &wbc)) {
150 			ret = f2fs_write_inode(inode, NULL);
151 			if (ret)
152 				goto out;
153 		}
154 		filemap_fdatawait_range(sbi->node_inode->i_mapping,
155 							0, LONG_MAX);
156 		ret = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
157 	}
158 out:
159 	mutex_unlock(&inode->i_mutex);
160 	trace_f2fs_sync_file_exit(inode, need_cp, datasync, ret);
161 	return ret;
162 }
163 
164 static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
165 {
166 	file_accessed(file);
167 	vma->vm_ops = &f2fs_file_vm_ops;
168 	return 0;
169 }
170 
171 static int truncate_data_blocks_range(struct dnode_of_data *dn, int count)
172 {
173 	int nr_free = 0, ofs = dn->ofs_in_node;
174 	struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
175 	struct f2fs_node *raw_node;
176 	__le32 *addr;
177 
178 	raw_node = page_address(dn->node_page);
179 	addr = blkaddr_in_node(raw_node) + ofs;
180 
181 	for ( ; count > 0; count--, addr++, dn->ofs_in_node++) {
182 		block_t blkaddr = le32_to_cpu(*addr);
183 		if (blkaddr == NULL_ADDR)
184 			continue;
185 
186 		update_extent_cache(NULL_ADDR, dn);
187 		invalidate_blocks(sbi, blkaddr);
188 		dec_valid_block_count(sbi, dn->inode, 1);
189 		nr_free++;
190 	}
191 	if (nr_free) {
192 		set_page_dirty(dn->node_page);
193 		sync_inode_page(dn);
194 	}
195 	dn->ofs_in_node = ofs;
196 
197 	trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid,
198 					 dn->ofs_in_node, nr_free);
199 	return nr_free;
200 }
201 
202 void truncate_data_blocks(struct dnode_of_data *dn)
203 {
204 	truncate_data_blocks_range(dn, ADDRS_PER_BLOCK);
205 }
206 
207 static void truncate_partial_data_page(struct inode *inode, u64 from)
208 {
209 	unsigned offset = from & (PAGE_CACHE_SIZE - 1);
210 	struct page *page;
211 
212 	if (!offset)
213 		return;
214 
215 	page = find_data_page(inode, from >> PAGE_CACHE_SHIFT, false);
216 	if (IS_ERR(page))
217 		return;
218 
219 	lock_page(page);
220 	if (page->mapping != inode->i_mapping) {
221 		f2fs_put_page(page, 1);
222 		return;
223 	}
224 	wait_on_page_writeback(page);
225 	zero_user(page, offset, PAGE_CACHE_SIZE - offset);
226 	set_page_dirty(page);
227 	f2fs_put_page(page, 1);
228 }
229 
230 static int truncate_blocks(struct inode *inode, u64 from)
231 {
232 	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
233 	unsigned int blocksize = inode->i_sb->s_blocksize;
234 	struct dnode_of_data dn;
235 	pgoff_t free_from;
236 	int count = 0, ilock = -1;
237 	int err;
238 
239 	trace_f2fs_truncate_blocks_enter(inode, from);
240 
241 	free_from = (pgoff_t)
242 			((from + blocksize - 1) >> (sbi->log_blocksize));
243 
244 	ilock = mutex_lock_op(sbi);
245 	set_new_dnode(&dn, inode, NULL, NULL, 0);
246 	err = get_dnode_of_data(&dn, free_from, LOOKUP_NODE);
247 	if (err) {
248 		if (err == -ENOENT)
249 			goto free_next;
250 		mutex_unlock_op(sbi, ilock);
251 		trace_f2fs_truncate_blocks_exit(inode, err);
252 		return err;
253 	}
254 
255 	if (IS_INODE(dn.node_page))
256 		count = ADDRS_PER_INODE;
257 	else
258 		count = ADDRS_PER_BLOCK;
259 
260 	count -= dn.ofs_in_node;
261 	BUG_ON(count < 0);
262 
263 	if (dn.ofs_in_node || IS_INODE(dn.node_page)) {
264 		truncate_data_blocks_range(&dn, count);
265 		free_from += count;
266 	}
267 
268 	f2fs_put_dnode(&dn);
269 free_next:
270 	err = truncate_inode_blocks(inode, free_from);
271 	mutex_unlock_op(sbi, ilock);
272 
273 	/* lastly zero out the first data page */
274 	truncate_partial_data_page(inode, from);
275 
276 	trace_f2fs_truncate_blocks_exit(inode, err);
277 	return err;
278 }
279 
280 void f2fs_truncate(struct inode *inode)
281 {
282 	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
283 				S_ISLNK(inode->i_mode)))
284 		return;
285 
286 	trace_f2fs_truncate(inode);
287 
288 	if (!truncate_blocks(inode, i_size_read(inode))) {
289 		inode->i_mtime = inode->i_ctime = CURRENT_TIME;
290 		mark_inode_dirty(inode);
291 	}
292 }
293 
294 static int f2fs_getattr(struct vfsmount *mnt,
295 			 struct dentry *dentry, struct kstat *stat)
296 {
297 	struct inode *inode = dentry->d_inode;
298 	generic_fillattr(inode, stat);
299 	stat->blocks <<= 3;
300 	return 0;
301 }
302 
303 #ifdef CONFIG_F2FS_FS_POSIX_ACL
304 static void __setattr_copy(struct inode *inode, const struct iattr *attr)
305 {
306 	struct f2fs_inode_info *fi = F2FS_I(inode);
307 	unsigned int ia_valid = attr->ia_valid;
308 
309 	if (ia_valid & ATTR_UID)
310 		inode->i_uid = attr->ia_uid;
311 	if (ia_valid & ATTR_GID)
312 		inode->i_gid = attr->ia_gid;
313 	if (ia_valid & ATTR_ATIME)
314 		inode->i_atime = timespec_trunc(attr->ia_atime,
315 						inode->i_sb->s_time_gran);
316 	if (ia_valid & ATTR_MTIME)
317 		inode->i_mtime = timespec_trunc(attr->ia_mtime,
318 						inode->i_sb->s_time_gran);
319 	if (ia_valid & ATTR_CTIME)
320 		inode->i_ctime = timespec_trunc(attr->ia_ctime,
321 						inode->i_sb->s_time_gran);
322 	if (ia_valid & ATTR_MODE) {
323 		umode_t mode = attr->ia_mode;
324 
325 		if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
326 			mode &= ~S_ISGID;
327 		set_acl_inode(fi, mode);
328 	}
329 }
330 #else
331 #define __setattr_copy setattr_copy
332 #endif
333 
334 int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
335 {
336 	struct inode *inode = dentry->d_inode;
337 	struct f2fs_inode_info *fi = F2FS_I(inode);
338 	int err;
339 
340 	err = inode_change_ok(inode, attr);
341 	if (err)
342 		return err;
343 
344 	if ((attr->ia_valid & ATTR_SIZE) &&
345 			attr->ia_size != i_size_read(inode)) {
346 		truncate_setsize(inode, attr->ia_size);
347 		f2fs_truncate(inode);
348 		f2fs_balance_fs(F2FS_SB(inode->i_sb));
349 	}
350 
351 	__setattr_copy(inode, attr);
352 
353 	if (attr->ia_valid & ATTR_MODE) {
354 		err = f2fs_acl_chmod(inode);
355 		if (err || is_inode_flag_set(fi, FI_ACL_MODE)) {
356 			inode->i_mode = fi->i_acl_mode;
357 			clear_inode_flag(fi, FI_ACL_MODE);
358 		}
359 	}
360 
361 	mark_inode_dirty(inode);
362 	return err;
363 }
364 
365 const struct inode_operations f2fs_file_inode_operations = {
366 	.getattr	= f2fs_getattr,
367 	.setattr	= f2fs_setattr,
368 	.get_acl	= f2fs_get_acl,
369 #ifdef CONFIG_F2FS_FS_XATTR
370 	.setxattr	= generic_setxattr,
371 	.getxattr	= generic_getxattr,
372 	.listxattr	= f2fs_listxattr,
373 	.removexattr	= generic_removexattr,
374 #endif
375 };
376 
377 static void fill_zero(struct inode *inode, pgoff_t index,
378 					loff_t start, loff_t len)
379 {
380 	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
381 	struct page *page;
382 	int ilock;
383 
384 	if (!len)
385 		return;
386 
387 	f2fs_balance_fs(sbi);
388 
389 	ilock = mutex_lock_op(sbi);
390 	page = get_new_data_page(inode, index, false);
391 	mutex_unlock_op(sbi, ilock);
392 
393 	if (!IS_ERR(page)) {
394 		wait_on_page_writeback(page);
395 		zero_user(page, start, len);
396 		set_page_dirty(page);
397 		f2fs_put_page(page, 1);
398 	}
399 }
400 
401 int truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
402 {
403 	pgoff_t index;
404 	int err;
405 
406 	for (index = pg_start; index < pg_end; index++) {
407 		struct dnode_of_data dn;
408 
409 		set_new_dnode(&dn, inode, NULL, NULL, 0);
410 		err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
411 		if (err) {
412 			if (err == -ENOENT)
413 				continue;
414 			return err;
415 		}
416 
417 		if (dn.data_blkaddr != NULL_ADDR)
418 			truncate_data_blocks_range(&dn, 1);
419 		f2fs_put_dnode(&dn);
420 	}
421 	return 0;
422 }
423 
424 static int punch_hole(struct inode *inode, loff_t offset, loff_t len, int mode)
425 {
426 	pgoff_t pg_start, pg_end;
427 	loff_t off_start, off_end;
428 	int ret = 0;
429 
430 	pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT;
431 	pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT;
432 
433 	off_start = offset & (PAGE_CACHE_SIZE - 1);
434 	off_end = (offset + len) & (PAGE_CACHE_SIZE - 1);
435 
436 	if (pg_start == pg_end) {
437 		fill_zero(inode, pg_start, off_start,
438 						off_end - off_start);
439 	} else {
440 		if (off_start)
441 			fill_zero(inode, pg_start++, off_start,
442 					PAGE_CACHE_SIZE - off_start);
443 		if (off_end)
444 			fill_zero(inode, pg_end, 0, off_end);
445 
446 		if (pg_start < pg_end) {
447 			struct address_space *mapping = inode->i_mapping;
448 			loff_t blk_start, blk_end;
449 			struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
450 			int ilock;
451 
452 			f2fs_balance_fs(sbi);
453 
454 			blk_start = pg_start << PAGE_CACHE_SHIFT;
455 			blk_end = pg_end << PAGE_CACHE_SHIFT;
456 			truncate_inode_pages_range(mapping, blk_start,
457 					blk_end - 1);
458 
459 			ilock = mutex_lock_op(sbi);
460 			ret = truncate_hole(inode, pg_start, pg_end);
461 			mutex_unlock_op(sbi, ilock);
462 		}
463 	}
464 
465 	if (!(mode & FALLOC_FL_KEEP_SIZE) &&
466 		i_size_read(inode) <= (offset + len)) {
467 		i_size_write(inode, offset);
468 		mark_inode_dirty(inode);
469 	}
470 
471 	return ret;
472 }
473 
474 static int expand_inode_data(struct inode *inode, loff_t offset,
475 					loff_t len, int mode)
476 {
477 	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
478 	pgoff_t index, pg_start, pg_end;
479 	loff_t new_size = i_size_read(inode);
480 	loff_t off_start, off_end;
481 	int ret = 0;
482 
483 	ret = inode_newsize_ok(inode, (len + offset));
484 	if (ret)
485 		return ret;
486 
487 	pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT;
488 	pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT;
489 
490 	off_start = offset & (PAGE_CACHE_SIZE - 1);
491 	off_end = (offset + len) & (PAGE_CACHE_SIZE - 1);
492 
493 	for (index = pg_start; index <= pg_end; index++) {
494 		struct dnode_of_data dn;
495 		int ilock;
496 
497 		ilock = mutex_lock_op(sbi);
498 		set_new_dnode(&dn, inode, NULL, NULL, 0);
499 		ret = get_dnode_of_data(&dn, index, ALLOC_NODE);
500 		if (ret) {
501 			mutex_unlock_op(sbi, ilock);
502 			break;
503 		}
504 
505 		if (dn.data_blkaddr == NULL_ADDR) {
506 			ret = reserve_new_block(&dn);
507 			if (ret) {
508 				f2fs_put_dnode(&dn);
509 				mutex_unlock_op(sbi, ilock);
510 				break;
511 			}
512 		}
513 		f2fs_put_dnode(&dn);
514 		mutex_unlock_op(sbi, ilock);
515 
516 		if (pg_start == pg_end)
517 			new_size = offset + len;
518 		else if (index == pg_start && off_start)
519 			new_size = (index + 1) << PAGE_CACHE_SHIFT;
520 		else if (index == pg_end)
521 			new_size = (index << PAGE_CACHE_SHIFT) + off_end;
522 		else
523 			new_size += PAGE_CACHE_SIZE;
524 	}
525 
526 	if (!(mode & FALLOC_FL_KEEP_SIZE) &&
527 		i_size_read(inode) < new_size) {
528 		i_size_write(inode, new_size);
529 		mark_inode_dirty(inode);
530 	}
531 
532 	return ret;
533 }
534 
535 static long f2fs_fallocate(struct file *file, int mode,
536 				loff_t offset, loff_t len)
537 {
538 	struct inode *inode = file_inode(file);
539 	long ret;
540 
541 	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
542 		return -EOPNOTSUPP;
543 
544 	if (mode & FALLOC_FL_PUNCH_HOLE)
545 		ret = punch_hole(inode, offset, len, mode);
546 	else
547 		ret = expand_inode_data(inode, offset, len, mode);
548 
549 	if (!ret) {
550 		inode->i_mtime = inode->i_ctime = CURRENT_TIME;
551 		mark_inode_dirty(inode);
552 	}
553 	trace_f2fs_fallocate(inode, mode, offset, len, ret);
554 	return ret;
555 }
556 
557 #define F2FS_REG_FLMASK		(~(FS_DIRSYNC_FL | FS_TOPDIR_FL))
558 #define F2FS_OTHER_FLMASK	(FS_NODUMP_FL | FS_NOATIME_FL)
559 
560 static inline __u32 f2fs_mask_flags(umode_t mode, __u32 flags)
561 {
562 	if (S_ISDIR(mode))
563 		return flags;
564 	else if (S_ISREG(mode))
565 		return flags & F2FS_REG_FLMASK;
566 	else
567 		return flags & F2FS_OTHER_FLMASK;
568 }
569 
570 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
571 {
572 	struct inode *inode = file_inode(filp);
573 	struct f2fs_inode_info *fi = F2FS_I(inode);
574 	unsigned int flags;
575 	int ret;
576 
577 	switch (cmd) {
578 	case FS_IOC_GETFLAGS:
579 		flags = fi->i_flags & FS_FL_USER_VISIBLE;
580 		return put_user(flags, (int __user *) arg);
581 	case FS_IOC_SETFLAGS:
582 	{
583 		unsigned int oldflags;
584 
585 		ret = mnt_want_write_file(filp);
586 		if (ret)
587 			return ret;
588 
589 		if (!inode_owner_or_capable(inode)) {
590 			ret = -EACCES;
591 			goto out;
592 		}
593 
594 		if (get_user(flags, (int __user *) arg)) {
595 			ret = -EFAULT;
596 			goto out;
597 		}
598 
599 		flags = f2fs_mask_flags(inode->i_mode, flags);
600 
601 		mutex_lock(&inode->i_mutex);
602 
603 		oldflags = fi->i_flags;
604 
605 		if ((flags ^ oldflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL)) {
606 			if (!capable(CAP_LINUX_IMMUTABLE)) {
607 				mutex_unlock(&inode->i_mutex);
608 				ret = -EPERM;
609 				goto out;
610 			}
611 		}
612 
613 		flags = flags & FS_FL_USER_MODIFIABLE;
614 		flags |= oldflags & ~FS_FL_USER_MODIFIABLE;
615 		fi->i_flags = flags;
616 		mutex_unlock(&inode->i_mutex);
617 
618 		f2fs_set_inode_flags(inode);
619 		inode->i_ctime = CURRENT_TIME;
620 		mark_inode_dirty(inode);
621 out:
622 		mnt_drop_write_file(filp);
623 		return ret;
624 	}
625 	default:
626 		return -ENOTTY;
627 	}
628 }
629 
630 #ifdef CONFIG_COMPAT
631 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
632 {
633 	switch (cmd) {
634 	case F2FS_IOC32_GETFLAGS:
635 		cmd = F2FS_IOC_GETFLAGS;
636 		break;
637 	case F2FS_IOC32_SETFLAGS:
638 		cmd = F2FS_IOC_SETFLAGS;
639 		break;
640 	default:
641 		return -ENOIOCTLCMD;
642 	}
643 	return f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
644 }
645 #endif
646 
647 const struct file_operations f2fs_file_operations = {
648 	.llseek		= generic_file_llseek,
649 	.read		= do_sync_read,
650 	.write		= do_sync_write,
651 	.aio_read	= generic_file_aio_read,
652 	.aio_write	= generic_file_aio_write,
653 	.open		= generic_file_open,
654 	.mmap		= f2fs_file_mmap,
655 	.fsync		= f2fs_sync_file,
656 	.fallocate	= f2fs_fallocate,
657 	.unlocked_ioctl	= f2fs_ioctl,
658 #ifdef CONFIG_COMPAT
659 	.compat_ioctl	= f2fs_compat_ioctl,
660 #endif
661 	.splice_read	= generic_file_splice_read,
662 	.splice_write	= generic_file_splice_write,
663 };
664