xref: /openbmc/linux/fs/f2fs/inode.c (revision f1288bdb)
1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * fs/f2fs/inode.c
4   *
5   * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6   *             http://www.samsung.com/
7   */
8  #include <linux/fs.h>
9  #include <linux/f2fs_fs.h>
10  #include <linux/buffer_head.h>
11  #include <linux/writeback.h>
12  #include <linux/sched/mm.h>
13  
14  #include "f2fs.h"
15  #include "node.h"
16  #include "segment.h"
17  #include "xattr.h"
18  
19  #include <trace/events/f2fs.h>
20  
21  #ifdef CONFIG_F2FS_FS_COMPRESSION
22  extern const struct address_space_operations f2fs_compress_aops;
23  #endif
24  
25  void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync)
26  {
27  	if (is_inode_flag_set(inode, FI_NEW_INODE))
28  		return;
29  
30  	if (f2fs_inode_dirtied(inode, sync))
31  		return;
32  
33  	mark_inode_dirty_sync(inode);
34  }
35  
36  void f2fs_set_inode_flags(struct inode *inode)
37  {
38  	unsigned int flags = F2FS_I(inode)->i_flags;
39  	unsigned int new_fl = 0;
40  
41  	if (flags & F2FS_SYNC_FL)
42  		new_fl |= S_SYNC;
43  	if (flags & F2FS_APPEND_FL)
44  		new_fl |= S_APPEND;
45  	if (flags & F2FS_IMMUTABLE_FL)
46  		new_fl |= S_IMMUTABLE;
47  	if (flags & F2FS_NOATIME_FL)
48  		new_fl |= S_NOATIME;
49  	if (flags & F2FS_DIRSYNC_FL)
50  		new_fl |= S_DIRSYNC;
51  	if (file_is_encrypt(inode))
52  		new_fl |= S_ENCRYPTED;
53  	if (file_is_verity(inode))
54  		new_fl |= S_VERITY;
55  	if (flags & F2FS_CASEFOLD_FL)
56  		new_fl |= S_CASEFOLD;
57  	inode_set_flags(inode, new_fl,
58  			S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|
59  			S_ENCRYPTED|S_VERITY|S_CASEFOLD);
60  }
61  
62  static void __get_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
63  {
64  	int extra_size = get_extra_isize(inode);
65  
66  	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
67  			S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
68  		if (ri->i_addr[extra_size])
69  			inode->i_rdev = old_decode_dev(
70  				le32_to_cpu(ri->i_addr[extra_size]));
71  		else
72  			inode->i_rdev = new_decode_dev(
73  				le32_to_cpu(ri->i_addr[extra_size + 1]));
74  	}
75  }
76  
77  static int __written_first_block(struct f2fs_sb_info *sbi,
78  					struct f2fs_inode *ri)
79  {
80  	block_t addr = le32_to_cpu(ri->i_addr[offset_in_addr(ri)]);
81  
82  	if (!__is_valid_data_blkaddr(addr))
83  		return 1;
84  	if (!f2fs_is_valid_blkaddr(sbi, addr, DATA_GENERIC_ENHANCE))
85  		return -EFSCORRUPTED;
86  	return 0;
87  }
88  
89  static void __set_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
90  {
91  	int extra_size = get_extra_isize(inode);
92  
93  	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
94  		if (old_valid_dev(inode->i_rdev)) {
95  			ri->i_addr[extra_size] =
96  				cpu_to_le32(old_encode_dev(inode->i_rdev));
97  			ri->i_addr[extra_size + 1] = 0;
98  		} else {
99  			ri->i_addr[extra_size] = 0;
100  			ri->i_addr[extra_size + 1] =
101  				cpu_to_le32(new_encode_dev(inode->i_rdev));
102  			ri->i_addr[extra_size + 2] = 0;
103  		}
104  	}
105  }
106  
107  static void __recover_inline_status(struct inode *inode, struct page *ipage)
108  {
109  	void *inline_data = inline_data_addr(inode, ipage);
110  	__le32 *start = inline_data;
111  	__le32 *end = start + MAX_INLINE_DATA(inode) / sizeof(__le32);
112  
113  	while (start < end) {
114  		if (*start++) {
115  			f2fs_wait_on_page_writeback(ipage, NODE, true, true);
116  
117  			set_inode_flag(inode, FI_DATA_EXIST);
118  			set_raw_inline(inode, F2FS_INODE(ipage));
119  			set_page_dirty(ipage);
120  			return;
121  		}
122  	}
123  	return;
124  }
125  
126  static bool f2fs_enable_inode_chksum(struct f2fs_sb_info *sbi, struct page *page)
127  {
128  	struct f2fs_inode *ri = &F2FS_NODE(page)->i;
129  
130  	if (!f2fs_sb_has_inode_chksum(sbi))
131  		return false;
132  
133  	if (!IS_INODE(page) || !(ri->i_inline & F2FS_EXTRA_ATTR))
134  		return false;
135  
136  	if (!F2FS_FITS_IN_INODE(ri, le16_to_cpu(ri->i_extra_isize),
137  				i_inode_checksum))
138  		return false;
139  
140  	return true;
141  }
142  
143  static __u32 f2fs_inode_chksum(struct f2fs_sb_info *sbi, struct page *page)
144  {
145  	struct f2fs_node *node = F2FS_NODE(page);
146  	struct f2fs_inode *ri = &node->i;
147  	__le32 ino = node->footer.ino;
148  	__le32 gen = ri->i_generation;
149  	__u32 chksum, chksum_seed;
150  	__u32 dummy_cs = 0;
151  	unsigned int offset = offsetof(struct f2fs_inode, i_inode_checksum);
152  	unsigned int cs_size = sizeof(dummy_cs);
153  
154  	chksum = f2fs_chksum(sbi, sbi->s_chksum_seed, (__u8 *)&ino,
155  							sizeof(ino));
156  	chksum_seed = f2fs_chksum(sbi, chksum, (__u8 *)&gen, sizeof(gen));
157  
158  	chksum = f2fs_chksum(sbi, chksum_seed, (__u8 *)ri, offset);
159  	chksum = f2fs_chksum(sbi, chksum, (__u8 *)&dummy_cs, cs_size);
160  	offset += cs_size;
161  	chksum = f2fs_chksum(sbi, chksum, (__u8 *)ri + offset,
162  						F2FS_BLKSIZE - offset);
163  	return chksum;
164  }
165  
166  bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page)
167  {
168  	struct f2fs_inode *ri;
169  	__u32 provided, calculated;
170  
171  	if (unlikely(is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN)))
172  		return true;
173  
174  #ifdef CONFIG_F2FS_CHECK_FS
175  	if (!f2fs_enable_inode_chksum(sbi, page))
176  #else
177  	if (!f2fs_enable_inode_chksum(sbi, page) ||
178  			PageDirty(page) || PageWriteback(page))
179  #endif
180  		return true;
181  
182  	ri = &F2FS_NODE(page)->i;
183  	provided = le32_to_cpu(ri->i_inode_checksum);
184  	calculated = f2fs_inode_chksum(sbi, page);
185  
186  	if (provided != calculated)
187  		f2fs_warn(sbi, "checksum invalid, nid = %lu, ino_of_node = %x, %x vs. %x",
188  			  page->index, ino_of_node(page), provided, calculated);
189  
190  	return provided == calculated;
191  }
192  
193  void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct page *page)
194  {
195  	struct f2fs_inode *ri = &F2FS_NODE(page)->i;
196  
197  	if (!f2fs_enable_inode_chksum(sbi, page))
198  		return;
199  
200  	ri->i_inode_checksum = cpu_to_le32(f2fs_inode_chksum(sbi, page));
201  }
202  
203  static bool sanity_check_inode(struct inode *inode, struct page *node_page)
204  {
205  	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
206  	struct f2fs_inode_info *fi = F2FS_I(inode);
207  	struct f2fs_inode *ri = F2FS_INODE(node_page);
208  	unsigned long long iblocks;
209  
210  	iblocks = le64_to_cpu(F2FS_INODE(node_page)->i_blocks);
211  	if (!iblocks) {
212  		set_sbi_flag(sbi, SBI_NEED_FSCK);
213  		f2fs_warn(sbi, "%s: corrupted inode i_blocks i_ino=%lx iblocks=%llu, run fsck to fix.",
214  			  __func__, inode->i_ino, iblocks);
215  		return false;
216  	}
217  
218  	if (ino_of_node(node_page) != nid_of_node(node_page)) {
219  		set_sbi_flag(sbi, SBI_NEED_FSCK);
220  		f2fs_warn(sbi, "%s: corrupted inode footer i_ino=%lx, ino,nid: [%u, %u] run fsck to fix.",
221  			  __func__, inode->i_ino,
222  			  ino_of_node(node_page), nid_of_node(node_page));
223  		return false;
224  	}
225  
226  	if (f2fs_sb_has_flexible_inline_xattr(sbi)
227  			&& !f2fs_has_extra_attr(inode)) {
228  		set_sbi_flag(sbi, SBI_NEED_FSCK);
229  		f2fs_warn(sbi, "%s: corrupted inode ino=%lx, run fsck to fix.",
230  			  __func__, inode->i_ino);
231  		return false;
232  	}
233  
234  	if (f2fs_has_extra_attr(inode) &&
235  			!f2fs_sb_has_extra_attr(sbi)) {
236  		set_sbi_flag(sbi, SBI_NEED_FSCK);
237  		f2fs_warn(sbi, "%s: inode (ino=%lx) is with extra_attr, but extra_attr feature is off",
238  			  __func__, inode->i_ino);
239  		return false;
240  	}
241  
242  	if (fi->i_extra_isize > F2FS_TOTAL_EXTRA_ATTR_SIZE ||
243  			fi->i_extra_isize % sizeof(__le32)) {
244  		set_sbi_flag(sbi, SBI_NEED_FSCK);
245  		f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_extra_isize: %d, max: %zu",
246  			  __func__, inode->i_ino, fi->i_extra_isize,
247  			  F2FS_TOTAL_EXTRA_ATTR_SIZE);
248  		return false;
249  	}
250  
251  	if (f2fs_has_extra_attr(inode) &&
252  		f2fs_sb_has_flexible_inline_xattr(sbi) &&
253  		f2fs_has_inline_xattr(inode) &&
254  		(!fi->i_inline_xattr_size ||
255  		fi->i_inline_xattr_size > MAX_INLINE_XATTR_SIZE)) {
256  		set_sbi_flag(sbi, SBI_NEED_FSCK);
257  		f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_inline_xattr_size: %d, max: %zu",
258  			  __func__, inode->i_ino, fi->i_inline_xattr_size,
259  			  MAX_INLINE_XATTR_SIZE);
260  		return false;
261  	}
262  
263  	if (fi->extent_tree) {
264  		struct extent_info *ei = &fi->extent_tree->largest;
265  
266  		if (ei->len &&
267  			(!f2fs_is_valid_blkaddr(sbi, ei->blk,
268  						DATA_GENERIC_ENHANCE) ||
269  			!f2fs_is_valid_blkaddr(sbi, ei->blk + ei->len - 1,
270  						DATA_GENERIC_ENHANCE))) {
271  			set_sbi_flag(sbi, SBI_NEED_FSCK);
272  			f2fs_warn(sbi, "%s: inode (ino=%lx) extent info [%u, %u, %u] is incorrect, run fsck to fix",
273  				  __func__, inode->i_ino,
274  				  ei->blk, ei->fofs, ei->len);
275  			return false;
276  		}
277  	}
278  
279  	if (f2fs_sanity_check_inline_data(inode)) {
280  		set_sbi_flag(sbi, SBI_NEED_FSCK);
281  		f2fs_warn(sbi, "%s: inode (ino=%lx, mode=%u) should not have inline_data, run fsck to fix",
282  			  __func__, inode->i_ino, inode->i_mode);
283  		return false;
284  	}
285  
286  	if (f2fs_has_inline_dentry(inode) && !S_ISDIR(inode->i_mode)) {
287  		set_sbi_flag(sbi, SBI_NEED_FSCK);
288  		f2fs_warn(sbi, "%s: inode (ino=%lx, mode=%u) should not have inline_dentry, run fsck to fix",
289  			  __func__, inode->i_ino, inode->i_mode);
290  		return false;
291  	}
292  
293  	if ((fi->i_flags & F2FS_CASEFOLD_FL) && !f2fs_sb_has_casefold(sbi)) {
294  		set_sbi_flag(sbi, SBI_NEED_FSCK);
295  		f2fs_warn(sbi, "%s: inode (ino=%lx) has casefold flag, but casefold feature is off",
296  			  __func__, inode->i_ino);
297  		return false;
298  	}
299  
300  	if (f2fs_has_extra_attr(inode) && f2fs_sb_has_compression(sbi) &&
301  			fi->i_flags & F2FS_COMPR_FL &&
302  			F2FS_FITS_IN_INODE(ri, fi->i_extra_isize,
303  						i_log_cluster_size)) {
304  		if (ri->i_compress_algorithm >= COMPRESS_MAX) {
305  			set_sbi_flag(sbi, SBI_NEED_FSCK);
306  			f2fs_warn(sbi, "%s: inode (ino=%lx) has unsupported "
307  				"compress algorithm: %u, run fsck to fix",
308  				  __func__, inode->i_ino,
309  				  ri->i_compress_algorithm);
310  			return false;
311  		}
312  		if (le64_to_cpu(ri->i_compr_blocks) >
313  				SECTOR_TO_BLOCK(inode->i_blocks)) {
314  			set_sbi_flag(sbi, SBI_NEED_FSCK);
315  			f2fs_warn(sbi, "%s: inode (ino=%lx) has inconsistent "
316  				"i_compr_blocks:%llu, i_blocks:%llu, run fsck to fix",
317  				  __func__, inode->i_ino,
318  				  le64_to_cpu(ri->i_compr_blocks),
319  				  SECTOR_TO_BLOCK(inode->i_blocks));
320  			return false;
321  		}
322  		if (ri->i_log_cluster_size < MIN_COMPRESS_LOG_SIZE ||
323  			ri->i_log_cluster_size > MAX_COMPRESS_LOG_SIZE) {
324  			set_sbi_flag(sbi, SBI_NEED_FSCK);
325  			f2fs_warn(sbi, "%s: inode (ino=%lx) has unsupported "
326  				"log cluster size: %u, run fsck to fix",
327  				  __func__, inode->i_ino,
328  				  ri->i_log_cluster_size);
329  			return false;
330  		}
331  	}
332  
333  	return true;
334  }
335  
336  static int do_read_inode(struct inode *inode)
337  {
338  	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
339  	struct f2fs_inode_info *fi = F2FS_I(inode);
340  	struct page *node_page;
341  	struct f2fs_inode *ri;
342  	projid_t i_projid;
343  	int err;
344  
345  	/* Check if ino is within scope */
346  	if (f2fs_check_nid_range(sbi, inode->i_ino))
347  		return -EINVAL;
348  
349  	node_page = f2fs_get_node_page(sbi, inode->i_ino);
350  	if (IS_ERR(node_page))
351  		return PTR_ERR(node_page);
352  
353  	ri = F2FS_INODE(node_page);
354  
355  	inode->i_mode = le16_to_cpu(ri->i_mode);
356  	i_uid_write(inode, le32_to_cpu(ri->i_uid));
357  	i_gid_write(inode, le32_to_cpu(ri->i_gid));
358  	set_nlink(inode, le32_to_cpu(ri->i_links));
359  	inode->i_size = le64_to_cpu(ri->i_size);
360  	inode->i_blocks = SECTOR_FROM_BLOCK(le64_to_cpu(ri->i_blocks) - 1);
361  
362  	inode->i_atime.tv_sec = le64_to_cpu(ri->i_atime);
363  	inode->i_ctime.tv_sec = le64_to_cpu(ri->i_ctime);
364  	inode->i_mtime.tv_sec = le64_to_cpu(ri->i_mtime);
365  	inode->i_atime.tv_nsec = le32_to_cpu(ri->i_atime_nsec);
366  	inode->i_ctime.tv_nsec = le32_to_cpu(ri->i_ctime_nsec);
367  	inode->i_mtime.tv_nsec = le32_to_cpu(ri->i_mtime_nsec);
368  	inode->i_generation = le32_to_cpu(ri->i_generation);
369  	if (S_ISDIR(inode->i_mode))
370  		fi->i_current_depth = le32_to_cpu(ri->i_current_depth);
371  	else if (S_ISREG(inode->i_mode))
372  		fi->i_gc_failures[GC_FAILURE_PIN] =
373  					le16_to_cpu(ri->i_gc_failures);
374  	fi->i_xattr_nid = le32_to_cpu(ri->i_xattr_nid);
375  	fi->i_flags = le32_to_cpu(ri->i_flags);
376  	if (S_ISREG(inode->i_mode))
377  		fi->i_flags &= ~F2FS_PROJINHERIT_FL;
378  	bitmap_zero(fi->flags, FI_MAX);
379  	fi->i_advise = ri->i_advise;
380  	fi->i_pino = le32_to_cpu(ri->i_pino);
381  	fi->i_dir_level = ri->i_dir_level;
382  
383  	f2fs_init_extent_tree(inode, node_page);
384  
385  	get_inline_info(inode, ri);
386  
387  	fi->i_extra_isize = f2fs_has_extra_attr(inode) ?
388  					le16_to_cpu(ri->i_extra_isize) : 0;
389  
390  	if (f2fs_sb_has_flexible_inline_xattr(sbi)) {
391  		fi->i_inline_xattr_size = le16_to_cpu(ri->i_inline_xattr_size);
392  	} else if (f2fs_has_inline_xattr(inode) ||
393  				f2fs_has_inline_dentry(inode)) {
394  		fi->i_inline_xattr_size = DEFAULT_INLINE_XATTR_ADDRS;
395  	} else {
396  
397  		/*
398  		 * Previous inline data or directory always reserved 200 bytes
399  		 * in inode layout, even if inline_xattr is disabled. In order
400  		 * to keep inline_dentry's structure for backward compatibility,
401  		 * we get the space back only from inline_data.
402  		 */
403  		fi->i_inline_xattr_size = 0;
404  	}
405  
406  	if (!sanity_check_inode(inode, node_page)) {
407  		f2fs_put_page(node_page, 1);
408  		return -EFSCORRUPTED;
409  	}
410  
411  	/* check data exist */
412  	if (f2fs_has_inline_data(inode) && !f2fs_exist_data(inode))
413  		__recover_inline_status(inode, node_page);
414  
415  	/* try to recover cold bit for non-dir inode */
416  	if (!S_ISDIR(inode->i_mode) && !is_cold_node(node_page)) {
417  		f2fs_wait_on_page_writeback(node_page, NODE, true, true);
418  		set_cold_node(node_page, false);
419  		set_page_dirty(node_page);
420  	}
421  
422  	/* get rdev by using inline_info */
423  	__get_inode_rdev(inode, ri);
424  
425  	if (S_ISREG(inode->i_mode)) {
426  		err = __written_first_block(sbi, ri);
427  		if (err < 0) {
428  			f2fs_put_page(node_page, 1);
429  			return err;
430  		}
431  		if (!err)
432  			set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
433  	}
434  
435  	if (!f2fs_need_inode_block_update(sbi, inode->i_ino))
436  		fi->last_disk_size = inode->i_size;
437  
438  	if (fi->i_flags & F2FS_PROJINHERIT_FL)
439  		set_inode_flag(inode, FI_PROJ_INHERIT);
440  
441  	if (f2fs_has_extra_attr(inode) && f2fs_sb_has_project_quota(sbi) &&
442  			F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_projid))
443  		i_projid = (projid_t)le32_to_cpu(ri->i_projid);
444  	else
445  		i_projid = F2FS_DEF_PROJID;
446  	fi->i_projid = make_kprojid(&init_user_ns, i_projid);
447  
448  	if (f2fs_has_extra_attr(inode) && f2fs_sb_has_inode_crtime(sbi) &&
449  			F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) {
450  		fi->i_crtime.tv_sec = le64_to_cpu(ri->i_crtime);
451  		fi->i_crtime.tv_nsec = le32_to_cpu(ri->i_crtime_nsec);
452  	}
453  
454  	if (f2fs_has_extra_attr(inode) && f2fs_sb_has_compression(sbi) &&
455  					(fi->i_flags & F2FS_COMPR_FL)) {
456  		if (F2FS_FITS_IN_INODE(ri, fi->i_extra_isize,
457  					i_log_cluster_size)) {
458  			atomic_set(&fi->i_compr_blocks,
459  					le64_to_cpu(ri->i_compr_blocks));
460  			fi->i_compress_algorithm = ri->i_compress_algorithm;
461  			fi->i_log_cluster_size = ri->i_log_cluster_size;
462  			fi->i_compress_flag = le16_to_cpu(ri->i_compress_flag);
463  			fi->i_cluster_size = 1 << fi->i_log_cluster_size;
464  			set_inode_flag(inode, FI_COMPRESSED_FILE);
465  		}
466  	}
467  
468  	fi->i_disk_time[0] = inode->i_atime;
469  	fi->i_disk_time[1] = inode->i_ctime;
470  	fi->i_disk_time[2] = inode->i_mtime;
471  	fi->i_disk_time[3] = fi->i_crtime;
472  	f2fs_put_page(node_page, 1);
473  
474  	stat_inc_inline_xattr(inode);
475  	stat_inc_inline_inode(inode);
476  	stat_inc_inline_dir(inode);
477  	stat_inc_compr_inode(inode);
478  	stat_add_compr_blocks(inode, atomic_read(&fi->i_compr_blocks));
479  
480  	return 0;
481  }
482  
483  struct inode *f2fs_iget(struct super_block *sb, unsigned long ino)
484  {
485  	struct f2fs_sb_info *sbi = F2FS_SB(sb);
486  	struct inode *inode;
487  	int ret = 0;
488  
489  	inode = iget_locked(sb, ino);
490  	if (!inode)
491  		return ERR_PTR(-ENOMEM);
492  
493  	if (!(inode->i_state & I_NEW)) {
494  		trace_f2fs_iget(inode);
495  		return inode;
496  	}
497  	if (ino == F2FS_NODE_INO(sbi) || ino == F2FS_META_INO(sbi))
498  		goto make_now;
499  
500  #ifdef CONFIG_F2FS_FS_COMPRESSION
501  	if (ino == F2FS_COMPRESS_INO(sbi))
502  		goto make_now;
503  #endif
504  
505  	ret = do_read_inode(inode);
506  	if (ret)
507  		goto bad_inode;
508  make_now:
509  	if (ino == F2FS_NODE_INO(sbi)) {
510  		inode->i_mapping->a_ops = &f2fs_node_aops;
511  		mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
512  	} else if (ino == F2FS_META_INO(sbi)) {
513  		inode->i_mapping->a_ops = &f2fs_meta_aops;
514  		mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
515  	} else if (ino == F2FS_COMPRESS_INO(sbi)) {
516  #ifdef CONFIG_F2FS_FS_COMPRESSION
517  		inode->i_mapping->a_ops = &f2fs_compress_aops;
518  		/*
519  		 * generic_error_remove_page only truncates pages of regular
520  		 * inode
521  		 */
522  		inode->i_mode |= S_IFREG;
523  #endif
524  		mapping_set_gfp_mask(inode->i_mapping,
525  			GFP_NOFS | __GFP_HIGHMEM | __GFP_MOVABLE);
526  	} else if (S_ISREG(inode->i_mode)) {
527  		inode->i_op = &f2fs_file_inode_operations;
528  		inode->i_fop = &f2fs_file_operations;
529  		inode->i_mapping->a_ops = &f2fs_dblock_aops;
530  	} else if (S_ISDIR(inode->i_mode)) {
531  		inode->i_op = &f2fs_dir_inode_operations;
532  		inode->i_fop = &f2fs_dir_operations;
533  		inode->i_mapping->a_ops = &f2fs_dblock_aops;
534  		mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
535  	} else if (S_ISLNK(inode->i_mode)) {
536  		if (file_is_encrypt(inode))
537  			inode->i_op = &f2fs_encrypted_symlink_inode_operations;
538  		else
539  			inode->i_op = &f2fs_symlink_inode_operations;
540  		inode_nohighmem(inode);
541  		inode->i_mapping->a_ops = &f2fs_dblock_aops;
542  	} else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
543  			S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
544  		inode->i_op = &f2fs_special_inode_operations;
545  		init_special_inode(inode, inode->i_mode, inode->i_rdev);
546  	} else {
547  		ret = -EIO;
548  		goto bad_inode;
549  	}
550  	f2fs_set_inode_flags(inode);
551  
552  	if (file_should_truncate(inode) &&
553  			!is_sbi_flag_set(sbi, SBI_POR_DOING)) {
554  		ret = f2fs_truncate(inode);
555  		if (ret)
556  			goto bad_inode;
557  		file_dont_truncate(inode);
558  	}
559  
560  	unlock_new_inode(inode);
561  	trace_f2fs_iget(inode);
562  	return inode;
563  
564  bad_inode:
565  	f2fs_inode_synced(inode);
566  	iget_failed(inode);
567  	trace_f2fs_iget_exit(inode, ret);
568  	return ERR_PTR(ret);
569  }
570  
571  struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino)
572  {
573  	struct inode *inode;
574  retry:
575  	inode = f2fs_iget(sb, ino);
576  	if (IS_ERR(inode)) {
577  		if (PTR_ERR(inode) == -ENOMEM) {
578  			memalloc_retry_wait(GFP_NOFS);
579  			goto retry;
580  		}
581  	}
582  	return inode;
583  }
584  
585  void f2fs_update_inode(struct inode *inode, struct page *node_page)
586  {
587  	struct f2fs_inode *ri;
588  	struct extent_tree *et = F2FS_I(inode)->extent_tree;
589  
590  	f2fs_wait_on_page_writeback(node_page, NODE, true, true);
591  	set_page_dirty(node_page);
592  
593  	f2fs_inode_synced(inode);
594  
595  	ri = F2FS_INODE(node_page);
596  
597  	ri->i_mode = cpu_to_le16(inode->i_mode);
598  	ri->i_advise = F2FS_I(inode)->i_advise;
599  	ri->i_uid = cpu_to_le32(i_uid_read(inode));
600  	ri->i_gid = cpu_to_le32(i_gid_read(inode));
601  	ri->i_links = cpu_to_le32(inode->i_nlink);
602  	ri->i_size = cpu_to_le64(i_size_read(inode));
603  	ri->i_blocks = cpu_to_le64(SECTOR_TO_BLOCK(inode->i_blocks) + 1);
604  
605  	if (et) {
606  		read_lock(&et->lock);
607  		set_raw_extent(&et->largest, &ri->i_ext);
608  		read_unlock(&et->lock);
609  	} else {
610  		memset(&ri->i_ext, 0, sizeof(ri->i_ext));
611  	}
612  	set_raw_inline(inode, ri);
613  
614  	ri->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
615  	ri->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
616  	ri->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
617  	ri->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
618  	ri->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
619  	ri->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
620  	if (S_ISDIR(inode->i_mode))
621  		ri->i_current_depth =
622  			cpu_to_le32(F2FS_I(inode)->i_current_depth);
623  	else if (S_ISREG(inode->i_mode))
624  		ri->i_gc_failures =
625  			cpu_to_le16(F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN]);
626  	ri->i_xattr_nid = cpu_to_le32(F2FS_I(inode)->i_xattr_nid);
627  	ri->i_flags = cpu_to_le32(F2FS_I(inode)->i_flags);
628  	ri->i_pino = cpu_to_le32(F2FS_I(inode)->i_pino);
629  	ri->i_generation = cpu_to_le32(inode->i_generation);
630  	ri->i_dir_level = F2FS_I(inode)->i_dir_level;
631  
632  	if (f2fs_has_extra_attr(inode)) {
633  		ri->i_extra_isize = cpu_to_le16(F2FS_I(inode)->i_extra_isize);
634  
635  		if (f2fs_sb_has_flexible_inline_xattr(F2FS_I_SB(inode)))
636  			ri->i_inline_xattr_size =
637  				cpu_to_le16(F2FS_I(inode)->i_inline_xattr_size);
638  
639  		if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)) &&
640  			F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
641  								i_projid)) {
642  			projid_t i_projid;
643  
644  			i_projid = from_kprojid(&init_user_ns,
645  						F2FS_I(inode)->i_projid);
646  			ri->i_projid = cpu_to_le32(i_projid);
647  		}
648  
649  		if (f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) &&
650  			F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
651  								i_crtime)) {
652  			ri->i_crtime =
653  				cpu_to_le64(F2FS_I(inode)->i_crtime.tv_sec);
654  			ri->i_crtime_nsec =
655  				cpu_to_le32(F2FS_I(inode)->i_crtime.tv_nsec);
656  		}
657  
658  		if (f2fs_sb_has_compression(F2FS_I_SB(inode)) &&
659  			F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
660  							i_log_cluster_size)) {
661  			ri->i_compr_blocks =
662  				cpu_to_le64(atomic_read(
663  					&F2FS_I(inode)->i_compr_blocks));
664  			ri->i_compress_algorithm =
665  				F2FS_I(inode)->i_compress_algorithm;
666  			ri->i_compress_flag =
667  				cpu_to_le16(F2FS_I(inode)->i_compress_flag);
668  			ri->i_log_cluster_size =
669  				F2FS_I(inode)->i_log_cluster_size;
670  		}
671  	}
672  
673  	__set_inode_rdev(inode, ri);
674  
675  	/* deleted inode */
676  	if (inode->i_nlink == 0)
677  		clear_page_private_inline(node_page);
678  
679  	F2FS_I(inode)->i_disk_time[0] = inode->i_atime;
680  	F2FS_I(inode)->i_disk_time[1] = inode->i_ctime;
681  	F2FS_I(inode)->i_disk_time[2] = inode->i_mtime;
682  	F2FS_I(inode)->i_disk_time[3] = F2FS_I(inode)->i_crtime;
683  
684  #ifdef CONFIG_F2FS_CHECK_FS
685  	f2fs_inode_chksum_set(F2FS_I_SB(inode), node_page);
686  #endif
687  }
688  
689  void f2fs_update_inode_page(struct inode *inode)
690  {
691  	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
692  	struct page *node_page;
693  retry:
694  	node_page = f2fs_get_node_page(sbi, inode->i_ino);
695  	if (IS_ERR(node_page)) {
696  		int err = PTR_ERR(node_page);
697  
698  		if (err == -ENOMEM) {
699  			cond_resched();
700  			goto retry;
701  		} else if (err != -ENOENT) {
702  			f2fs_stop_checkpoint(sbi, false);
703  		}
704  		return;
705  	}
706  	f2fs_update_inode(inode, node_page);
707  	f2fs_put_page(node_page, 1);
708  }
709  
710  int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
711  {
712  	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
713  
714  	if (inode->i_ino == F2FS_NODE_INO(sbi) ||
715  			inode->i_ino == F2FS_META_INO(sbi))
716  		return 0;
717  
718  	/*
719  	 * atime could be updated without dirtying f2fs inode in lazytime mode
720  	 */
721  	if (f2fs_is_time_consistent(inode) &&
722  		!is_inode_flag_set(inode, FI_DIRTY_INODE))
723  		return 0;
724  
725  	if (!f2fs_is_checkpoint_ready(sbi))
726  		return -ENOSPC;
727  
728  	/*
729  	 * We need to balance fs here to prevent from producing dirty node pages
730  	 * during the urgent cleaning time when running out of free sections.
731  	 */
732  	f2fs_update_inode_page(inode);
733  	if (wbc && wbc->nr_to_write)
734  		f2fs_balance_fs(sbi, true);
735  	return 0;
736  }
737  
738  /*
739   * Called at the last iput() if i_nlink is zero
740   */
741  void f2fs_evict_inode(struct inode *inode)
742  {
743  	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
744  	nid_t xnid = F2FS_I(inode)->i_xattr_nid;
745  	int err = 0;
746  
747  	f2fs_abort_atomic_write(inode, true);
748  
749  	trace_f2fs_evict_inode(inode);
750  	truncate_inode_pages_final(&inode->i_data);
751  
752  	if ((inode->i_nlink || is_bad_inode(inode)) &&
753  		test_opt(sbi, COMPRESS_CACHE) && f2fs_compressed_file(inode))
754  		f2fs_invalidate_compress_pages(sbi, inode->i_ino);
755  
756  	if (inode->i_ino == F2FS_NODE_INO(sbi) ||
757  			inode->i_ino == F2FS_META_INO(sbi) ||
758  			inode->i_ino == F2FS_COMPRESS_INO(sbi))
759  		goto out_clear;
760  
761  	f2fs_bug_on(sbi, get_dirty_pages(inode));
762  	f2fs_remove_dirty_inode(inode);
763  
764  	f2fs_destroy_extent_tree(inode);
765  
766  	if (inode->i_nlink || is_bad_inode(inode))
767  		goto no_delete;
768  
769  	err = f2fs_dquot_initialize(inode);
770  	if (err) {
771  		err = 0;
772  		set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
773  	}
774  
775  	f2fs_remove_ino_entry(sbi, inode->i_ino, APPEND_INO);
776  	f2fs_remove_ino_entry(sbi, inode->i_ino, UPDATE_INO);
777  	f2fs_remove_ino_entry(sbi, inode->i_ino, FLUSH_INO);
778  
779  	if (!is_sbi_flag_set(sbi, SBI_IS_FREEZING))
780  		sb_start_intwrite(inode->i_sb);
781  	set_inode_flag(inode, FI_NO_ALLOC);
782  	i_size_write(inode, 0);
783  retry:
784  	if (F2FS_HAS_BLOCKS(inode))
785  		err = f2fs_truncate(inode);
786  
787  	if (time_to_inject(sbi, FAULT_EVICT_INODE)) {
788  		f2fs_show_injection_info(sbi, FAULT_EVICT_INODE);
789  		err = -EIO;
790  	}
791  
792  	if (!err) {
793  		f2fs_lock_op(sbi);
794  		err = f2fs_remove_inode_page(inode);
795  		f2fs_unlock_op(sbi);
796  		if (err == -ENOENT) {
797  			err = 0;
798  
799  			/*
800  			 * in fuzzed image, another node may has the same
801  			 * block address as inode's, if it was truncated
802  			 * previously, truncation of inode node will fail.
803  			 */
804  			if (is_inode_flag_set(inode, FI_DIRTY_INODE)) {
805  				f2fs_warn(F2FS_I_SB(inode),
806  					"f2fs_evict_inode: inconsistent node id, ino:%lu",
807  					inode->i_ino);
808  				f2fs_inode_synced(inode);
809  				set_sbi_flag(sbi, SBI_NEED_FSCK);
810  			}
811  		}
812  	}
813  
814  	/* give more chances, if ENOMEM case */
815  	if (err == -ENOMEM) {
816  		err = 0;
817  		goto retry;
818  	}
819  
820  	if (err) {
821  		f2fs_update_inode_page(inode);
822  		if (dquot_initialize_needed(inode))
823  			set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
824  	}
825  	if (!is_sbi_flag_set(sbi, SBI_IS_FREEZING))
826  		sb_end_intwrite(inode->i_sb);
827  no_delete:
828  	dquot_drop(inode);
829  
830  	stat_dec_inline_xattr(inode);
831  	stat_dec_inline_dir(inode);
832  	stat_dec_inline_inode(inode);
833  	stat_dec_compr_inode(inode);
834  	stat_sub_compr_blocks(inode,
835  			atomic_read(&F2FS_I(inode)->i_compr_blocks));
836  
837  	if (likely(!f2fs_cp_error(sbi) &&
838  				!is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
839  		f2fs_bug_on(sbi, is_inode_flag_set(inode, FI_DIRTY_INODE));
840  	else
841  		f2fs_inode_synced(inode);
842  
843  	/* for the case f2fs_new_inode() was failed, .i_ino is zero, skip it */
844  	if (inode->i_ino)
845  		invalidate_mapping_pages(NODE_MAPPING(sbi), inode->i_ino,
846  							inode->i_ino);
847  	if (xnid)
848  		invalidate_mapping_pages(NODE_MAPPING(sbi), xnid, xnid);
849  	if (inode->i_nlink) {
850  		if (is_inode_flag_set(inode, FI_APPEND_WRITE))
851  			f2fs_add_ino_entry(sbi, inode->i_ino, APPEND_INO);
852  		if (is_inode_flag_set(inode, FI_UPDATE_WRITE))
853  			f2fs_add_ino_entry(sbi, inode->i_ino, UPDATE_INO);
854  	}
855  	if (is_inode_flag_set(inode, FI_FREE_NID)) {
856  		f2fs_alloc_nid_failed(sbi, inode->i_ino);
857  		clear_inode_flag(inode, FI_FREE_NID);
858  	} else {
859  		/*
860  		 * If xattr nid is corrupted, we can reach out error condition,
861  		 * err & !f2fs_exist_written_data(sbi, inode->i_ino, ORPHAN_INO)).
862  		 * In that case, f2fs_check_nid_range() is enough to give a clue.
863  		 */
864  	}
865  out_clear:
866  	fscrypt_put_encryption_info(inode);
867  	fsverity_cleanup_inode(inode);
868  	clear_inode(inode);
869  }
870  
871  /* caller should call f2fs_lock_op() */
872  void f2fs_handle_failed_inode(struct inode *inode)
873  {
874  	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
875  	struct node_info ni;
876  	int err;
877  
878  	/*
879  	 * clear nlink of inode in order to release resource of inode
880  	 * immediately.
881  	 */
882  	clear_nlink(inode);
883  
884  	/*
885  	 * we must call this to avoid inode being remained as dirty, resulting
886  	 * in a panic when flushing dirty inodes in gdirty_list.
887  	 */
888  	f2fs_update_inode_page(inode);
889  	f2fs_inode_synced(inode);
890  
891  	/* don't make bad inode, since it becomes a regular file. */
892  	unlock_new_inode(inode);
893  
894  	/*
895  	 * Note: we should add inode to orphan list before f2fs_unlock_op()
896  	 * so we can prevent losing this orphan when encoutering checkpoint
897  	 * and following suddenly power-off.
898  	 */
899  	err = f2fs_get_node_info(sbi, inode->i_ino, &ni, false);
900  	if (err) {
901  		set_sbi_flag(sbi, SBI_NEED_FSCK);
902  		set_inode_flag(inode, FI_FREE_NID);
903  		f2fs_warn(sbi, "May loss orphan inode, run fsck to fix.");
904  		goto out;
905  	}
906  
907  	if (ni.blk_addr != NULL_ADDR) {
908  		err = f2fs_acquire_orphan_inode(sbi);
909  		if (err) {
910  			set_sbi_flag(sbi, SBI_NEED_FSCK);
911  			f2fs_warn(sbi, "Too many orphan inodes, run fsck to fix.");
912  		} else {
913  			f2fs_add_orphan_inode(inode);
914  		}
915  		f2fs_alloc_nid_done(sbi, inode->i_ino);
916  	} else {
917  		set_inode_flag(inode, FI_FREE_NID);
918  	}
919  
920  out:
921  	f2fs_unlock_op(sbi);
922  
923  	/* iput will drop the inode object */
924  	iput(inode);
925  }
926