xref: /openbmc/linux/fs/f2fs/inode.c (revision 39f555fb)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * fs/f2fs/inode.c
4  *
5  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6  *             http://www.samsung.com/
7  */
8 #include <linux/fs.h>
9 #include <linux/f2fs_fs.h>
10 #include <linux/buffer_head.h>
11 #include <linux/writeback.h>
12 #include <linux/sched/mm.h>
13 #include <linux/lz4.h>
14 #include <linux/zstd.h>
15 
16 #include "f2fs.h"
17 #include "node.h"
18 #include "segment.h"
19 #include "xattr.h"
20 
21 #include <trace/events/f2fs.h>
22 
23 #ifdef CONFIG_F2FS_FS_COMPRESSION
24 extern const struct address_space_operations f2fs_compress_aops;
25 #endif
26 
27 void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync)
28 {
29 	if (is_inode_flag_set(inode, FI_NEW_INODE))
30 		return;
31 
32 	if (f2fs_inode_dirtied(inode, sync))
33 		return;
34 
35 	mark_inode_dirty_sync(inode);
36 }
37 
38 void f2fs_set_inode_flags(struct inode *inode)
39 {
40 	unsigned int flags = F2FS_I(inode)->i_flags;
41 	unsigned int new_fl = 0;
42 
43 	if (flags & F2FS_SYNC_FL)
44 		new_fl |= S_SYNC;
45 	if (flags & F2FS_APPEND_FL)
46 		new_fl |= S_APPEND;
47 	if (flags & F2FS_IMMUTABLE_FL)
48 		new_fl |= S_IMMUTABLE;
49 	if (flags & F2FS_NOATIME_FL)
50 		new_fl |= S_NOATIME;
51 	if (flags & F2FS_DIRSYNC_FL)
52 		new_fl |= S_DIRSYNC;
53 	if (file_is_encrypt(inode))
54 		new_fl |= S_ENCRYPTED;
55 	if (file_is_verity(inode))
56 		new_fl |= S_VERITY;
57 	if (flags & F2FS_CASEFOLD_FL)
58 		new_fl |= S_CASEFOLD;
59 	inode_set_flags(inode, new_fl,
60 			S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|
61 			S_ENCRYPTED|S_VERITY|S_CASEFOLD);
62 }
63 
64 static void __get_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
65 {
66 	int extra_size = get_extra_isize(inode);
67 
68 	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
69 			S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
70 		if (ri->i_addr[extra_size])
71 			inode->i_rdev = old_decode_dev(
72 				le32_to_cpu(ri->i_addr[extra_size]));
73 		else
74 			inode->i_rdev = new_decode_dev(
75 				le32_to_cpu(ri->i_addr[extra_size + 1]));
76 	}
77 }
78 
79 static int __written_first_block(struct f2fs_sb_info *sbi,
80 					struct f2fs_inode *ri)
81 {
82 	block_t addr = le32_to_cpu(ri->i_addr[offset_in_addr(ri)]);
83 
84 	if (!__is_valid_data_blkaddr(addr))
85 		return 1;
86 	if (!f2fs_is_valid_blkaddr(sbi, addr, DATA_GENERIC_ENHANCE)) {
87 		f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
88 		return -EFSCORRUPTED;
89 	}
90 	return 0;
91 }
92 
93 static void __set_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
94 {
95 	int extra_size = get_extra_isize(inode);
96 
97 	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
98 		if (old_valid_dev(inode->i_rdev)) {
99 			ri->i_addr[extra_size] =
100 				cpu_to_le32(old_encode_dev(inode->i_rdev));
101 			ri->i_addr[extra_size + 1] = 0;
102 		} else {
103 			ri->i_addr[extra_size] = 0;
104 			ri->i_addr[extra_size + 1] =
105 				cpu_to_le32(new_encode_dev(inode->i_rdev));
106 			ri->i_addr[extra_size + 2] = 0;
107 		}
108 	}
109 }
110 
111 static void __recover_inline_status(struct inode *inode, struct page *ipage)
112 {
113 	void *inline_data = inline_data_addr(inode, ipage);
114 	__le32 *start = inline_data;
115 	__le32 *end = start + MAX_INLINE_DATA(inode) / sizeof(__le32);
116 
117 	while (start < end) {
118 		if (*start++) {
119 			f2fs_wait_on_page_writeback(ipage, NODE, true, true);
120 
121 			set_inode_flag(inode, FI_DATA_EXIST);
122 			set_raw_inline(inode, F2FS_INODE(ipage));
123 			set_page_dirty(ipage);
124 			return;
125 		}
126 	}
127 	return;
128 }
129 
130 static bool f2fs_enable_inode_chksum(struct f2fs_sb_info *sbi, struct page *page)
131 {
132 	struct f2fs_inode *ri = &F2FS_NODE(page)->i;
133 
134 	if (!f2fs_sb_has_inode_chksum(sbi))
135 		return false;
136 
137 	if (!IS_INODE(page) || !(ri->i_inline & F2FS_EXTRA_ATTR))
138 		return false;
139 
140 	if (!F2FS_FITS_IN_INODE(ri, le16_to_cpu(ri->i_extra_isize),
141 				i_inode_checksum))
142 		return false;
143 
144 	return true;
145 }
146 
147 static __u32 f2fs_inode_chksum(struct f2fs_sb_info *sbi, struct page *page)
148 {
149 	struct f2fs_node *node = F2FS_NODE(page);
150 	struct f2fs_inode *ri = &node->i;
151 	__le32 ino = node->footer.ino;
152 	__le32 gen = ri->i_generation;
153 	__u32 chksum, chksum_seed;
154 	__u32 dummy_cs = 0;
155 	unsigned int offset = offsetof(struct f2fs_inode, i_inode_checksum);
156 	unsigned int cs_size = sizeof(dummy_cs);
157 
158 	chksum = f2fs_chksum(sbi, sbi->s_chksum_seed, (__u8 *)&ino,
159 							sizeof(ino));
160 	chksum_seed = f2fs_chksum(sbi, chksum, (__u8 *)&gen, sizeof(gen));
161 
162 	chksum = f2fs_chksum(sbi, chksum_seed, (__u8 *)ri, offset);
163 	chksum = f2fs_chksum(sbi, chksum, (__u8 *)&dummy_cs, cs_size);
164 	offset += cs_size;
165 	chksum = f2fs_chksum(sbi, chksum, (__u8 *)ri + offset,
166 						F2FS_BLKSIZE - offset);
167 	return chksum;
168 }
169 
170 bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page)
171 {
172 	struct f2fs_inode *ri;
173 	__u32 provided, calculated;
174 
175 	if (unlikely(is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN)))
176 		return true;
177 
178 #ifdef CONFIG_F2FS_CHECK_FS
179 	if (!f2fs_enable_inode_chksum(sbi, page))
180 #else
181 	if (!f2fs_enable_inode_chksum(sbi, page) ||
182 			PageDirty(page) || PageWriteback(page))
183 #endif
184 		return true;
185 
186 	ri = &F2FS_NODE(page)->i;
187 	provided = le32_to_cpu(ri->i_inode_checksum);
188 	calculated = f2fs_inode_chksum(sbi, page);
189 
190 	if (provided != calculated)
191 		f2fs_warn(sbi, "checksum invalid, nid = %lu, ino_of_node = %x, %x vs. %x",
192 			  page->index, ino_of_node(page), provided, calculated);
193 
194 	return provided == calculated;
195 }
196 
197 void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct page *page)
198 {
199 	struct f2fs_inode *ri = &F2FS_NODE(page)->i;
200 
201 	if (!f2fs_enable_inode_chksum(sbi, page))
202 		return;
203 
204 	ri->i_inode_checksum = cpu_to_le32(f2fs_inode_chksum(sbi, page));
205 }
206 
207 static bool sanity_check_compress_inode(struct inode *inode,
208 			struct f2fs_inode *ri)
209 {
210 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
211 	unsigned char clevel;
212 
213 	if (ri->i_compress_algorithm >= COMPRESS_MAX) {
214 		f2fs_warn(sbi,
215 			"%s: inode (ino=%lx) has unsupported compress algorithm: %u, run fsck to fix",
216 			__func__, inode->i_ino, ri->i_compress_algorithm);
217 		return false;
218 	}
219 	if (le64_to_cpu(ri->i_compr_blocks) >
220 			SECTOR_TO_BLOCK(inode->i_blocks)) {
221 		f2fs_warn(sbi,
222 			"%s: inode (ino=%lx) has inconsistent i_compr_blocks:%llu, i_blocks:%llu, run fsck to fix",
223 			__func__, inode->i_ino, le64_to_cpu(ri->i_compr_blocks),
224 			SECTOR_TO_BLOCK(inode->i_blocks));
225 		return false;
226 	}
227 	if (ri->i_log_cluster_size < MIN_COMPRESS_LOG_SIZE ||
228 		ri->i_log_cluster_size > MAX_COMPRESS_LOG_SIZE) {
229 		f2fs_warn(sbi,
230 			"%s: inode (ino=%lx) has unsupported log cluster size: %u, run fsck to fix",
231 			__func__, inode->i_ino, ri->i_log_cluster_size);
232 		return false;
233 	}
234 
235 	clevel = le16_to_cpu(ri->i_compress_flag) >>
236 				COMPRESS_LEVEL_OFFSET;
237 	switch (ri->i_compress_algorithm) {
238 	case COMPRESS_LZO:
239 #ifdef CONFIG_F2FS_FS_LZO
240 		if (clevel)
241 			goto err_level;
242 #endif
243 		break;
244 	case COMPRESS_LZORLE:
245 #ifdef CONFIG_F2FS_FS_LZORLE
246 		if (clevel)
247 			goto err_level;
248 #endif
249 		break;
250 	case COMPRESS_LZ4:
251 #ifdef CONFIG_F2FS_FS_LZ4
252 #ifdef CONFIG_F2FS_FS_LZ4HC
253 		if (clevel &&
254 		   (clevel < LZ4HC_MIN_CLEVEL || clevel > LZ4HC_MAX_CLEVEL))
255 			goto err_level;
256 #else
257 		if (clevel)
258 			goto err_level;
259 #endif
260 #endif
261 		break;
262 	case COMPRESS_ZSTD:
263 #ifdef CONFIG_F2FS_FS_ZSTD
264 		if (clevel < zstd_min_clevel() || clevel > zstd_max_clevel())
265 			goto err_level;
266 #endif
267 		break;
268 	default:
269 		goto err_level;
270 	}
271 
272 	return true;
273 err_level:
274 	f2fs_warn(sbi, "%s: inode (ino=%lx) has unsupported compress level: %u, run fsck to fix",
275 		  __func__, inode->i_ino, clevel);
276 	return false;
277 }
278 
279 static bool sanity_check_inode(struct inode *inode, struct page *node_page)
280 {
281 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
282 	struct f2fs_inode_info *fi = F2FS_I(inode);
283 	struct f2fs_inode *ri = F2FS_INODE(node_page);
284 	unsigned long long iblocks;
285 
286 	iblocks = le64_to_cpu(F2FS_INODE(node_page)->i_blocks);
287 	if (!iblocks) {
288 		f2fs_warn(sbi, "%s: corrupted inode i_blocks i_ino=%lx iblocks=%llu, run fsck to fix.",
289 			  __func__, inode->i_ino, iblocks);
290 		return false;
291 	}
292 
293 	if (ino_of_node(node_page) != nid_of_node(node_page)) {
294 		f2fs_warn(sbi, "%s: corrupted inode footer i_ino=%lx, ino,nid: [%u, %u] run fsck to fix.",
295 			  __func__, inode->i_ino,
296 			  ino_of_node(node_page), nid_of_node(node_page));
297 		return false;
298 	}
299 
300 	if (f2fs_has_extra_attr(inode)) {
301 		if (!f2fs_sb_has_extra_attr(sbi)) {
302 			f2fs_warn(sbi, "%s: inode (ino=%lx) is with extra_attr, but extra_attr feature is off",
303 				  __func__, inode->i_ino);
304 			return false;
305 		}
306 		if (fi->i_extra_isize > F2FS_TOTAL_EXTRA_ATTR_SIZE ||
307 			fi->i_extra_isize < F2FS_MIN_EXTRA_ATTR_SIZE ||
308 			fi->i_extra_isize % sizeof(__le32)) {
309 			f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_extra_isize: %d, max: %zu",
310 				  __func__, inode->i_ino, fi->i_extra_isize,
311 				  F2FS_TOTAL_EXTRA_ATTR_SIZE);
312 			return false;
313 		}
314 		if (f2fs_sb_has_flexible_inline_xattr(sbi) &&
315 			f2fs_has_inline_xattr(inode) &&
316 			(!fi->i_inline_xattr_size ||
317 			fi->i_inline_xattr_size > MAX_INLINE_XATTR_SIZE)) {
318 			f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_inline_xattr_size: %d, max: %zu",
319 				  __func__, inode->i_ino, fi->i_inline_xattr_size,
320 				  MAX_INLINE_XATTR_SIZE);
321 			return false;
322 		}
323 		if (f2fs_sb_has_compression(sbi) &&
324 			fi->i_flags & F2FS_COMPR_FL &&
325 			F2FS_FITS_IN_INODE(ri, fi->i_extra_isize,
326 						i_compress_flag)) {
327 			if (!sanity_check_compress_inode(inode, ri))
328 				return false;
329 		}
330 	} else if (f2fs_sb_has_flexible_inline_xattr(sbi)) {
331 		f2fs_warn(sbi, "%s: corrupted inode ino=%lx, run fsck to fix.",
332 			  __func__, inode->i_ino);
333 		return false;
334 	}
335 
336 	if (!f2fs_sb_has_extra_attr(sbi)) {
337 		if (f2fs_sb_has_project_quota(sbi)) {
338 			f2fs_warn(sbi, "%s: corrupted inode ino=%lx, wrong feature flag: %u, run fsck to fix.",
339 				  __func__, inode->i_ino, F2FS_FEATURE_PRJQUOTA);
340 			return false;
341 		}
342 		if (f2fs_sb_has_inode_chksum(sbi)) {
343 			f2fs_warn(sbi, "%s: corrupted inode ino=%lx, wrong feature flag: %u, run fsck to fix.",
344 				  __func__, inode->i_ino, F2FS_FEATURE_INODE_CHKSUM);
345 			return false;
346 		}
347 		if (f2fs_sb_has_flexible_inline_xattr(sbi)) {
348 			f2fs_warn(sbi, "%s: corrupted inode ino=%lx, wrong feature flag: %u, run fsck to fix.",
349 				  __func__, inode->i_ino, F2FS_FEATURE_FLEXIBLE_INLINE_XATTR);
350 			return false;
351 		}
352 		if (f2fs_sb_has_inode_crtime(sbi)) {
353 			f2fs_warn(sbi, "%s: corrupted inode ino=%lx, wrong feature flag: %u, run fsck to fix.",
354 				  __func__, inode->i_ino, F2FS_FEATURE_INODE_CRTIME);
355 			return false;
356 		}
357 		if (f2fs_sb_has_compression(sbi)) {
358 			f2fs_warn(sbi, "%s: corrupted inode ino=%lx, wrong feature flag: %u, run fsck to fix.",
359 				  __func__, inode->i_ino, F2FS_FEATURE_COMPRESSION);
360 			return false;
361 		}
362 	}
363 
364 	if (f2fs_sanity_check_inline_data(inode)) {
365 		f2fs_warn(sbi, "%s: inode (ino=%lx, mode=%u) should not have inline_data, run fsck to fix",
366 			  __func__, inode->i_ino, inode->i_mode);
367 		return false;
368 	}
369 
370 	if (f2fs_has_inline_dentry(inode) && !S_ISDIR(inode->i_mode)) {
371 		f2fs_warn(sbi, "%s: inode (ino=%lx, mode=%u) should not have inline_dentry, run fsck to fix",
372 			  __func__, inode->i_ino, inode->i_mode);
373 		return false;
374 	}
375 
376 	if ((fi->i_flags & F2FS_CASEFOLD_FL) && !f2fs_sb_has_casefold(sbi)) {
377 		f2fs_warn(sbi, "%s: inode (ino=%lx) has casefold flag, but casefold feature is off",
378 			  __func__, inode->i_ino);
379 		return false;
380 	}
381 
382 	return true;
383 }
384 
385 static void init_idisk_time(struct inode *inode)
386 {
387 	struct f2fs_inode_info *fi = F2FS_I(inode);
388 
389 	fi->i_disk_time[0] = inode->i_atime;
390 	fi->i_disk_time[1] = inode_get_ctime(inode);
391 	fi->i_disk_time[2] = inode->i_mtime;
392 }
393 
394 static int do_read_inode(struct inode *inode)
395 {
396 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
397 	struct f2fs_inode_info *fi = F2FS_I(inode);
398 	struct page *node_page;
399 	struct f2fs_inode *ri;
400 	projid_t i_projid;
401 	int err;
402 
403 	/* Check if ino is within scope */
404 	if (f2fs_check_nid_range(sbi, inode->i_ino))
405 		return -EINVAL;
406 
407 	node_page = f2fs_get_node_page(sbi, inode->i_ino);
408 	if (IS_ERR(node_page))
409 		return PTR_ERR(node_page);
410 
411 	ri = F2FS_INODE(node_page);
412 
413 	inode->i_mode = le16_to_cpu(ri->i_mode);
414 	i_uid_write(inode, le32_to_cpu(ri->i_uid));
415 	i_gid_write(inode, le32_to_cpu(ri->i_gid));
416 	set_nlink(inode, le32_to_cpu(ri->i_links));
417 	inode->i_size = le64_to_cpu(ri->i_size);
418 	inode->i_blocks = SECTOR_FROM_BLOCK(le64_to_cpu(ri->i_blocks) - 1);
419 
420 	inode->i_atime.tv_sec = le64_to_cpu(ri->i_atime);
421 	inode_set_ctime(inode, le64_to_cpu(ri->i_ctime),
422 			le32_to_cpu(ri->i_ctime_nsec));
423 	inode->i_mtime.tv_sec = le64_to_cpu(ri->i_mtime);
424 	inode->i_atime.tv_nsec = le32_to_cpu(ri->i_atime_nsec);
425 	inode->i_mtime.tv_nsec = le32_to_cpu(ri->i_mtime_nsec);
426 	inode->i_generation = le32_to_cpu(ri->i_generation);
427 	if (S_ISDIR(inode->i_mode))
428 		fi->i_current_depth = le32_to_cpu(ri->i_current_depth);
429 	else if (S_ISREG(inode->i_mode))
430 		fi->i_gc_failures[GC_FAILURE_PIN] =
431 					le16_to_cpu(ri->i_gc_failures);
432 	fi->i_xattr_nid = le32_to_cpu(ri->i_xattr_nid);
433 	fi->i_flags = le32_to_cpu(ri->i_flags);
434 	if (S_ISREG(inode->i_mode))
435 		fi->i_flags &= ~F2FS_PROJINHERIT_FL;
436 	bitmap_zero(fi->flags, FI_MAX);
437 	fi->i_advise = ri->i_advise;
438 	fi->i_pino = le32_to_cpu(ri->i_pino);
439 	fi->i_dir_level = ri->i_dir_level;
440 
441 	get_inline_info(inode, ri);
442 
443 	fi->i_extra_isize = f2fs_has_extra_attr(inode) ?
444 					le16_to_cpu(ri->i_extra_isize) : 0;
445 
446 	if (f2fs_sb_has_flexible_inline_xattr(sbi)) {
447 		fi->i_inline_xattr_size = le16_to_cpu(ri->i_inline_xattr_size);
448 	} else if (f2fs_has_inline_xattr(inode) ||
449 				f2fs_has_inline_dentry(inode)) {
450 		fi->i_inline_xattr_size = DEFAULT_INLINE_XATTR_ADDRS;
451 	} else {
452 
453 		/*
454 		 * Previous inline data or directory always reserved 200 bytes
455 		 * in inode layout, even if inline_xattr is disabled. In order
456 		 * to keep inline_dentry's structure for backward compatibility,
457 		 * we get the space back only from inline_data.
458 		 */
459 		fi->i_inline_xattr_size = 0;
460 	}
461 
462 	if (!sanity_check_inode(inode, node_page)) {
463 		f2fs_put_page(node_page, 1);
464 		set_sbi_flag(sbi, SBI_NEED_FSCK);
465 		f2fs_handle_error(sbi, ERROR_CORRUPTED_INODE);
466 		return -EFSCORRUPTED;
467 	}
468 
469 	/* check data exist */
470 	if (f2fs_has_inline_data(inode) && !f2fs_exist_data(inode))
471 		__recover_inline_status(inode, node_page);
472 
473 	/* try to recover cold bit for non-dir inode */
474 	if (!S_ISDIR(inode->i_mode) && !is_cold_node(node_page)) {
475 		f2fs_wait_on_page_writeback(node_page, NODE, true, true);
476 		set_cold_node(node_page, false);
477 		set_page_dirty(node_page);
478 	}
479 
480 	/* get rdev by using inline_info */
481 	__get_inode_rdev(inode, ri);
482 
483 	if (S_ISREG(inode->i_mode)) {
484 		err = __written_first_block(sbi, ri);
485 		if (err < 0) {
486 			f2fs_put_page(node_page, 1);
487 			return err;
488 		}
489 		if (!err)
490 			set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
491 	}
492 
493 	if (!f2fs_need_inode_block_update(sbi, inode->i_ino))
494 		fi->last_disk_size = inode->i_size;
495 
496 	if (fi->i_flags & F2FS_PROJINHERIT_FL)
497 		set_inode_flag(inode, FI_PROJ_INHERIT);
498 
499 	if (f2fs_has_extra_attr(inode) && f2fs_sb_has_project_quota(sbi) &&
500 			F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_projid))
501 		i_projid = (projid_t)le32_to_cpu(ri->i_projid);
502 	else
503 		i_projid = F2FS_DEF_PROJID;
504 	fi->i_projid = make_kprojid(&init_user_ns, i_projid);
505 
506 	if (f2fs_has_extra_attr(inode) && f2fs_sb_has_inode_crtime(sbi) &&
507 			F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) {
508 		fi->i_crtime.tv_sec = le64_to_cpu(ri->i_crtime);
509 		fi->i_crtime.tv_nsec = le32_to_cpu(ri->i_crtime_nsec);
510 	}
511 
512 	if (f2fs_has_extra_attr(inode) && f2fs_sb_has_compression(sbi) &&
513 					(fi->i_flags & F2FS_COMPR_FL)) {
514 		if (F2FS_FITS_IN_INODE(ri, fi->i_extra_isize,
515 					i_compress_flag)) {
516 			unsigned short compress_flag;
517 
518 			atomic_set(&fi->i_compr_blocks,
519 					le64_to_cpu(ri->i_compr_blocks));
520 			fi->i_compress_algorithm = ri->i_compress_algorithm;
521 			fi->i_log_cluster_size = ri->i_log_cluster_size;
522 			compress_flag = le16_to_cpu(ri->i_compress_flag);
523 			fi->i_compress_level = compress_flag >>
524 						COMPRESS_LEVEL_OFFSET;
525 			fi->i_compress_flag = compress_flag &
526 					GENMASK(COMPRESS_LEVEL_OFFSET - 1, 0);
527 			fi->i_cluster_size = BIT(fi->i_log_cluster_size);
528 			set_inode_flag(inode, FI_COMPRESSED_FILE);
529 		}
530 	}
531 
532 	init_idisk_time(inode);
533 
534 	/* Need all the flag bits */
535 	f2fs_init_read_extent_tree(inode, node_page);
536 	f2fs_init_age_extent_tree(inode);
537 
538 	if (!sanity_check_extent_cache(inode)) {
539 		f2fs_put_page(node_page, 1);
540 		f2fs_handle_error(sbi, ERROR_CORRUPTED_INODE);
541 		return -EFSCORRUPTED;
542 	}
543 
544 	f2fs_put_page(node_page, 1);
545 
546 	stat_inc_inline_xattr(inode);
547 	stat_inc_inline_inode(inode);
548 	stat_inc_inline_dir(inode);
549 	stat_inc_compr_inode(inode);
550 	stat_add_compr_blocks(inode, atomic_read(&fi->i_compr_blocks));
551 
552 	return 0;
553 }
554 
555 static bool is_meta_ino(struct f2fs_sb_info *sbi, unsigned int ino)
556 {
557 	return ino == F2FS_NODE_INO(sbi) || ino == F2FS_META_INO(sbi) ||
558 		ino == F2FS_COMPRESS_INO(sbi);
559 }
560 
561 struct inode *f2fs_iget(struct super_block *sb, unsigned long ino)
562 {
563 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
564 	struct inode *inode;
565 	int ret = 0;
566 
567 	inode = iget_locked(sb, ino);
568 	if (!inode)
569 		return ERR_PTR(-ENOMEM);
570 
571 	if (!(inode->i_state & I_NEW)) {
572 		if (is_meta_ino(sbi, ino)) {
573 			f2fs_err(sbi, "inaccessible inode: %lu, run fsck to repair", ino);
574 			set_sbi_flag(sbi, SBI_NEED_FSCK);
575 			ret = -EFSCORRUPTED;
576 			trace_f2fs_iget_exit(inode, ret);
577 			iput(inode);
578 			f2fs_handle_error(sbi, ERROR_CORRUPTED_INODE);
579 			return ERR_PTR(ret);
580 		}
581 
582 		trace_f2fs_iget(inode);
583 		return inode;
584 	}
585 
586 	if (is_meta_ino(sbi, ino))
587 		goto make_now;
588 
589 	ret = do_read_inode(inode);
590 	if (ret)
591 		goto bad_inode;
592 make_now:
593 	if (ino == F2FS_NODE_INO(sbi)) {
594 		inode->i_mapping->a_ops = &f2fs_node_aops;
595 		mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
596 	} else if (ino == F2FS_META_INO(sbi)) {
597 		inode->i_mapping->a_ops = &f2fs_meta_aops;
598 		mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
599 	} else if (ino == F2FS_COMPRESS_INO(sbi)) {
600 #ifdef CONFIG_F2FS_FS_COMPRESSION
601 		inode->i_mapping->a_ops = &f2fs_compress_aops;
602 		/*
603 		 * generic_error_remove_page only truncates pages of regular
604 		 * inode
605 		 */
606 		inode->i_mode |= S_IFREG;
607 #endif
608 		mapping_set_gfp_mask(inode->i_mapping,
609 			GFP_NOFS | __GFP_HIGHMEM | __GFP_MOVABLE);
610 	} else if (S_ISREG(inode->i_mode)) {
611 		inode->i_op = &f2fs_file_inode_operations;
612 		inode->i_fop = &f2fs_file_operations;
613 		inode->i_mapping->a_ops = &f2fs_dblock_aops;
614 	} else if (S_ISDIR(inode->i_mode)) {
615 		inode->i_op = &f2fs_dir_inode_operations;
616 		inode->i_fop = &f2fs_dir_operations;
617 		inode->i_mapping->a_ops = &f2fs_dblock_aops;
618 		mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
619 	} else if (S_ISLNK(inode->i_mode)) {
620 		if (file_is_encrypt(inode))
621 			inode->i_op = &f2fs_encrypted_symlink_inode_operations;
622 		else
623 			inode->i_op = &f2fs_symlink_inode_operations;
624 		inode_nohighmem(inode);
625 		inode->i_mapping->a_ops = &f2fs_dblock_aops;
626 	} else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
627 			S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
628 		inode->i_op = &f2fs_special_inode_operations;
629 		init_special_inode(inode, inode->i_mode, inode->i_rdev);
630 	} else {
631 		ret = -EIO;
632 		goto bad_inode;
633 	}
634 	f2fs_set_inode_flags(inode);
635 
636 	if (file_should_truncate(inode) &&
637 			!is_sbi_flag_set(sbi, SBI_POR_DOING)) {
638 		ret = f2fs_truncate(inode);
639 		if (ret)
640 			goto bad_inode;
641 		file_dont_truncate(inode);
642 	}
643 
644 	unlock_new_inode(inode);
645 	trace_f2fs_iget(inode);
646 	return inode;
647 
648 bad_inode:
649 	f2fs_inode_synced(inode);
650 	iget_failed(inode);
651 	trace_f2fs_iget_exit(inode, ret);
652 	return ERR_PTR(ret);
653 }
654 
655 struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino)
656 {
657 	struct inode *inode;
658 retry:
659 	inode = f2fs_iget(sb, ino);
660 	if (IS_ERR(inode)) {
661 		if (PTR_ERR(inode) == -ENOMEM) {
662 			memalloc_retry_wait(GFP_NOFS);
663 			goto retry;
664 		}
665 	}
666 	return inode;
667 }
668 
669 void f2fs_update_inode(struct inode *inode, struct page *node_page)
670 {
671 	struct f2fs_inode *ri;
672 	struct extent_tree *et = F2FS_I(inode)->extent_tree[EX_READ];
673 
674 	f2fs_wait_on_page_writeback(node_page, NODE, true, true);
675 	set_page_dirty(node_page);
676 
677 	f2fs_inode_synced(inode);
678 
679 	ri = F2FS_INODE(node_page);
680 
681 	ri->i_mode = cpu_to_le16(inode->i_mode);
682 	ri->i_advise = F2FS_I(inode)->i_advise;
683 	ri->i_uid = cpu_to_le32(i_uid_read(inode));
684 	ri->i_gid = cpu_to_le32(i_gid_read(inode));
685 	ri->i_links = cpu_to_le32(inode->i_nlink);
686 	ri->i_blocks = cpu_to_le64(SECTOR_TO_BLOCK(inode->i_blocks) + 1);
687 
688 	if (!f2fs_is_atomic_file(inode) ||
689 			is_inode_flag_set(inode, FI_ATOMIC_COMMITTED))
690 		ri->i_size = cpu_to_le64(i_size_read(inode));
691 
692 	if (et) {
693 		read_lock(&et->lock);
694 		set_raw_read_extent(&et->largest, &ri->i_ext);
695 		read_unlock(&et->lock);
696 	} else {
697 		memset(&ri->i_ext, 0, sizeof(ri->i_ext));
698 	}
699 	set_raw_inline(inode, ri);
700 
701 	ri->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
702 	ri->i_ctime = cpu_to_le64(inode_get_ctime(inode).tv_sec);
703 	ri->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
704 	ri->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
705 	ri->i_ctime_nsec = cpu_to_le32(inode_get_ctime(inode).tv_nsec);
706 	ri->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
707 	if (S_ISDIR(inode->i_mode))
708 		ri->i_current_depth =
709 			cpu_to_le32(F2FS_I(inode)->i_current_depth);
710 	else if (S_ISREG(inode->i_mode))
711 		ri->i_gc_failures =
712 			cpu_to_le16(F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN]);
713 	ri->i_xattr_nid = cpu_to_le32(F2FS_I(inode)->i_xattr_nid);
714 	ri->i_flags = cpu_to_le32(F2FS_I(inode)->i_flags);
715 	ri->i_pino = cpu_to_le32(F2FS_I(inode)->i_pino);
716 	ri->i_generation = cpu_to_le32(inode->i_generation);
717 	ri->i_dir_level = F2FS_I(inode)->i_dir_level;
718 
719 	if (f2fs_has_extra_attr(inode)) {
720 		ri->i_extra_isize = cpu_to_le16(F2FS_I(inode)->i_extra_isize);
721 
722 		if (f2fs_sb_has_flexible_inline_xattr(F2FS_I_SB(inode)))
723 			ri->i_inline_xattr_size =
724 				cpu_to_le16(F2FS_I(inode)->i_inline_xattr_size);
725 
726 		if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)) &&
727 			F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
728 								i_projid)) {
729 			projid_t i_projid;
730 
731 			i_projid = from_kprojid(&init_user_ns,
732 						F2FS_I(inode)->i_projid);
733 			ri->i_projid = cpu_to_le32(i_projid);
734 		}
735 
736 		if (f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) &&
737 			F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
738 								i_crtime)) {
739 			ri->i_crtime =
740 				cpu_to_le64(F2FS_I(inode)->i_crtime.tv_sec);
741 			ri->i_crtime_nsec =
742 				cpu_to_le32(F2FS_I(inode)->i_crtime.tv_nsec);
743 		}
744 
745 		if (f2fs_sb_has_compression(F2FS_I_SB(inode)) &&
746 			F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
747 							i_compress_flag)) {
748 			unsigned short compress_flag;
749 
750 			ri->i_compr_blocks =
751 				cpu_to_le64(atomic_read(
752 					&F2FS_I(inode)->i_compr_blocks));
753 			ri->i_compress_algorithm =
754 				F2FS_I(inode)->i_compress_algorithm;
755 			compress_flag = F2FS_I(inode)->i_compress_flag |
756 				F2FS_I(inode)->i_compress_level <<
757 						COMPRESS_LEVEL_OFFSET;
758 			ri->i_compress_flag = cpu_to_le16(compress_flag);
759 			ri->i_log_cluster_size =
760 				F2FS_I(inode)->i_log_cluster_size;
761 		}
762 	}
763 
764 	__set_inode_rdev(inode, ri);
765 
766 	/* deleted inode */
767 	if (inode->i_nlink == 0)
768 		clear_page_private_inline(node_page);
769 
770 	init_idisk_time(inode);
771 #ifdef CONFIG_F2FS_CHECK_FS
772 	f2fs_inode_chksum_set(F2FS_I_SB(inode), node_page);
773 #endif
774 }
775 
776 void f2fs_update_inode_page(struct inode *inode)
777 {
778 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
779 	struct page *node_page;
780 	int count = 0;
781 retry:
782 	node_page = f2fs_get_node_page(sbi, inode->i_ino);
783 	if (IS_ERR(node_page)) {
784 		int err = PTR_ERR(node_page);
785 
786 		/* The node block was truncated. */
787 		if (err == -ENOENT)
788 			return;
789 
790 		if (err == -ENOMEM || ++count <= DEFAULT_RETRY_IO_COUNT)
791 			goto retry;
792 		f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_UPDATE_INODE);
793 		return;
794 	}
795 	f2fs_update_inode(inode, node_page);
796 	f2fs_put_page(node_page, 1);
797 }
798 
799 int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
800 {
801 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
802 
803 	if (inode->i_ino == F2FS_NODE_INO(sbi) ||
804 			inode->i_ino == F2FS_META_INO(sbi))
805 		return 0;
806 
807 	/*
808 	 * atime could be updated without dirtying f2fs inode in lazytime mode
809 	 */
810 	if (f2fs_is_time_consistent(inode) &&
811 		!is_inode_flag_set(inode, FI_DIRTY_INODE))
812 		return 0;
813 
814 	if (!f2fs_is_checkpoint_ready(sbi))
815 		return -ENOSPC;
816 
817 	/*
818 	 * We need to balance fs here to prevent from producing dirty node pages
819 	 * during the urgent cleaning time when running out of free sections.
820 	 */
821 	f2fs_update_inode_page(inode);
822 	if (wbc && wbc->nr_to_write)
823 		f2fs_balance_fs(sbi, true);
824 	return 0;
825 }
826 
827 /*
828  * Called at the last iput() if i_nlink is zero
829  */
830 void f2fs_evict_inode(struct inode *inode)
831 {
832 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
833 	struct f2fs_inode_info *fi = F2FS_I(inode);
834 	nid_t xnid = fi->i_xattr_nid;
835 	int err = 0;
836 
837 	f2fs_abort_atomic_write(inode, true);
838 
839 	if (fi->cow_inode) {
840 		clear_inode_flag(fi->cow_inode, FI_COW_FILE);
841 		iput(fi->cow_inode);
842 		fi->cow_inode = NULL;
843 	}
844 
845 	trace_f2fs_evict_inode(inode);
846 	truncate_inode_pages_final(&inode->i_data);
847 
848 	if ((inode->i_nlink || is_bad_inode(inode)) &&
849 		test_opt(sbi, COMPRESS_CACHE) && f2fs_compressed_file(inode))
850 		f2fs_invalidate_compress_pages(sbi, inode->i_ino);
851 
852 	if (inode->i_ino == F2FS_NODE_INO(sbi) ||
853 			inode->i_ino == F2FS_META_INO(sbi) ||
854 			inode->i_ino == F2FS_COMPRESS_INO(sbi))
855 		goto out_clear;
856 
857 	f2fs_bug_on(sbi, get_dirty_pages(inode));
858 	f2fs_remove_dirty_inode(inode);
859 
860 	f2fs_destroy_extent_tree(inode);
861 
862 	if (inode->i_nlink || is_bad_inode(inode))
863 		goto no_delete;
864 
865 	err = f2fs_dquot_initialize(inode);
866 	if (err) {
867 		err = 0;
868 		set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
869 	}
870 
871 	f2fs_remove_ino_entry(sbi, inode->i_ino, APPEND_INO);
872 	f2fs_remove_ino_entry(sbi, inode->i_ino, UPDATE_INO);
873 	f2fs_remove_ino_entry(sbi, inode->i_ino, FLUSH_INO);
874 
875 	if (!is_sbi_flag_set(sbi, SBI_IS_FREEZING))
876 		sb_start_intwrite(inode->i_sb);
877 	set_inode_flag(inode, FI_NO_ALLOC);
878 	i_size_write(inode, 0);
879 retry:
880 	if (F2FS_HAS_BLOCKS(inode))
881 		err = f2fs_truncate(inode);
882 
883 	if (time_to_inject(sbi, FAULT_EVICT_INODE))
884 		err = -EIO;
885 
886 	if (!err) {
887 		f2fs_lock_op(sbi);
888 		err = f2fs_remove_inode_page(inode);
889 		f2fs_unlock_op(sbi);
890 		if (err == -ENOENT) {
891 			err = 0;
892 
893 			/*
894 			 * in fuzzed image, another node may has the same
895 			 * block address as inode's, if it was truncated
896 			 * previously, truncation of inode node will fail.
897 			 */
898 			if (is_inode_flag_set(inode, FI_DIRTY_INODE)) {
899 				f2fs_warn(F2FS_I_SB(inode),
900 					"f2fs_evict_inode: inconsistent node id, ino:%lu",
901 					inode->i_ino);
902 				f2fs_inode_synced(inode);
903 				set_sbi_flag(sbi, SBI_NEED_FSCK);
904 			}
905 		}
906 	}
907 
908 	/* give more chances, if ENOMEM case */
909 	if (err == -ENOMEM) {
910 		err = 0;
911 		goto retry;
912 	}
913 
914 	if (err) {
915 		f2fs_update_inode_page(inode);
916 		if (dquot_initialize_needed(inode))
917 			set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
918 	}
919 	if (!is_sbi_flag_set(sbi, SBI_IS_FREEZING))
920 		sb_end_intwrite(inode->i_sb);
921 no_delete:
922 	dquot_drop(inode);
923 
924 	stat_dec_inline_xattr(inode);
925 	stat_dec_inline_dir(inode);
926 	stat_dec_inline_inode(inode);
927 	stat_dec_compr_inode(inode);
928 	stat_sub_compr_blocks(inode,
929 			atomic_read(&fi->i_compr_blocks));
930 
931 	if (likely(!f2fs_cp_error(sbi) &&
932 				!is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
933 		f2fs_bug_on(sbi, is_inode_flag_set(inode, FI_DIRTY_INODE));
934 	else
935 		f2fs_inode_synced(inode);
936 
937 	/* for the case f2fs_new_inode() was failed, .i_ino is zero, skip it */
938 	if (inode->i_ino)
939 		invalidate_mapping_pages(NODE_MAPPING(sbi), inode->i_ino,
940 							inode->i_ino);
941 	if (xnid)
942 		invalidate_mapping_pages(NODE_MAPPING(sbi), xnid, xnid);
943 	if (inode->i_nlink) {
944 		if (is_inode_flag_set(inode, FI_APPEND_WRITE))
945 			f2fs_add_ino_entry(sbi, inode->i_ino, APPEND_INO);
946 		if (is_inode_flag_set(inode, FI_UPDATE_WRITE))
947 			f2fs_add_ino_entry(sbi, inode->i_ino, UPDATE_INO);
948 	}
949 	if (is_inode_flag_set(inode, FI_FREE_NID)) {
950 		f2fs_alloc_nid_failed(sbi, inode->i_ino);
951 		clear_inode_flag(inode, FI_FREE_NID);
952 	} else {
953 		/*
954 		 * If xattr nid is corrupted, we can reach out error condition,
955 		 * err & !f2fs_exist_written_data(sbi, inode->i_ino, ORPHAN_INO)).
956 		 * In that case, f2fs_check_nid_range() is enough to give a clue.
957 		 */
958 	}
959 out_clear:
960 	fscrypt_put_encryption_info(inode);
961 	fsverity_cleanup_inode(inode);
962 	clear_inode(inode);
963 }
964 
965 /* caller should call f2fs_lock_op() */
966 void f2fs_handle_failed_inode(struct inode *inode)
967 {
968 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
969 	struct node_info ni;
970 	int err;
971 
972 	/*
973 	 * clear nlink of inode in order to release resource of inode
974 	 * immediately.
975 	 */
976 	clear_nlink(inode);
977 
978 	/*
979 	 * we must call this to avoid inode being remained as dirty, resulting
980 	 * in a panic when flushing dirty inodes in gdirty_list.
981 	 */
982 	f2fs_update_inode_page(inode);
983 	f2fs_inode_synced(inode);
984 
985 	/* don't make bad inode, since it becomes a regular file. */
986 	unlock_new_inode(inode);
987 
988 	/*
989 	 * Note: we should add inode to orphan list before f2fs_unlock_op()
990 	 * so we can prevent losing this orphan when encoutering checkpoint
991 	 * and following suddenly power-off.
992 	 */
993 	err = f2fs_get_node_info(sbi, inode->i_ino, &ni, false);
994 	if (err) {
995 		set_sbi_flag(sbi, SBI_NEED_FSCK);
996 		set_inode_flag(inode, FI_FREE_NID);
997 		f2fs_warn(sbi, "May loss orphan inode, run fsck to fix.");
998 		goto out;
999 	}
1000 
1001 	if (ni.blk_addr != NULL_ADDR) {
1002 		err = f2fs_acquire_orphan_inode(sbi);
1003 		if (err) {
1004 			set_sbi_flag(sbi, SBI_NEED_FSCK);
1005 			f2fs_warn(sbi, "Too many orphan inodes, run fsck to fix.");
1006 		} else {
1007 			f2fs_add_orphan_inode(inode);
1008 		}
1009 		f2fs_alloc_nid_done(sbi, inode->i_ino);
1010 	} else {
1011 		set_inode_flag(inode, FI_FREE_NID);
1012 	}
1013 
1014 out:
1015 	f2fs_unlock_op(sbi);
1016 
1017 	/* iput will drop the inode object */
1018 	iput(inode);
1019 }
1020