xref: /openbmc/linux/fs/f2fs/inode.c (revision 081e8df6)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * fs/f2fs/inode.c
4  *
5  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6  *             http://www.samsung.com/
7  */
8 #include <linux/fs.h>
9 #include <linux/f2fs_fs.h>
10 #include <linux/buffer_head.h>
11 #include <linux/writeback.h>
12 #include <linux/sched/mm.h>
13 
14 #include "f2fs.h"
15 #include "node.h"
16 #include "segment.h"
17 #include "xattr.h"
18 
19 #include <trace/events/f2fs.h>
20 
21 #ifdef CONFIG_F2FS_FS_COMPRESSION
22 extern const struct address_space_operations f2fs_compress_aops;
23 #endif
24 
25 void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync)
26 {
27 	if (is_inode_flag_set(inode, FI_NEW_INODE))
28 		return;
29 
30 	if (f2fs_inode_dirtied(inode, sync))
31 		return;
32 
33 	mark_inode_dirty_sync(inode);
34 }
35 
36 void f2fs_set_inode_flags(struct inode *inode)
37 {
38 	unsigned int flags = F2FS_I(inode)->i_flags;
39 	unsigned int new_fl = 0;
40 
41 	if (flags & F2FS_SYNC_FL)
42 		new_fl |= S_SYNC;
43 	if (flags & F2FS_APPEND_FL)
44 		new_fl |= S_APPEND;
45 	if (flags & F2FS_IMMUTABLE_FL)
46 		new_fl |= S_IMMUTABLE;
47 	if (flags & F2FS_NOATIME_FL)
48 		new_fl |= S_NOATIME;
49 	if (flags & F2FS_DIRSYNC_FL)
50 		new_fl |= S_DIRSYNC;
51 	if (file_is_encrypt(inode))
52 		new_fl |= S_ENCRYPTED;
53 	if (file_is_verity(inode))
54 		new_fl |= S_VERITY;
55 	if (flags & F2FS_CASEFOLD_FL)
56 		new_fl |= S_CASEFOLD;
57 	inode_set_flags(inode, new_fl,
58 			S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|
59 			S_ENCRYPTED|S_VERITY|S_CASEFOLD);
60 }
61 
62 static void __get_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
63 {
64 	int extra_size = get_extra_isize(inode);
65 
66 	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
67 			S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
68 		if (ri->i_addr[extra_size])
69 			inode->i_rdev = old_decode_dev(
70 				le32_to_cpu(ri->i_addr[extra_size]));
71 		else
72 			inode->i_rdev = new_decode_dev(
73 				le32_to_cpu(ri->i_addr[extra_size + 1]));
74 	}
75 }
76 
77 static int __written_first_block(struct f2fs_sb_info *sbi,
78 					struct f2fs_inode *ri)
79 {
80 	block_t addr = le32_to_cpu(ri->i_addr[offset_in_addr(ri)]);
81 
82 	if (!__is_valid_data_blkaddr(addr))
83 		return 1;
84 	if (!f2fs_is_valid_blkaddr(sbi, addr, DATA_GENERIC_ENHANCE)) {
85 		f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
86 		return -EFSCORRUPTED;
87 	}
88 	return 0;
89 }
90 
91 static void __set_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
92 {
93 	int extra_size = get_extra_isize(inode);
94 
95 	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
96 		if (old_valid_dev(inode->i_rdev)) {
97 			ri->i_addr[extra_size] =
98 				cpu_to_le32(old_encode_dev(inode->i_rdev));
99 			ri->i_addr[extra_size + 1] = 0;
100 		} else {
101 			ri->i_addr[extra_size] = 0;
102 			ri->i_addr[extra_size + 1] =
103 				cpu_to_le32(new_encode_dev(inode->i_rdev));
104 			ri->i_addr[extra_size + 2] = 0;
105 		}
106 	}
107 }
108 
109 static void __recover_inline_status(struct inode *inode, struct page *ipage)
110 {
111 	void *inline_data = inline_data_addr(inode, ipage);
112 	__le32 *start = inline_data;
113 	__le32 *end = start + MAX_INLINE_DATA(inode) / sizeof(__le32);
114 
115 	while (start < end) {
116 		if (*start++) {
117 			f2fs_wait_on_page_writeback(ipage, NODE, true, true);
118 
119 			set_inode_flag(inode, FI_DATA_EXIST);
120 			set_raw_inline(inode, F2FS_INODE(ipage));
121 			set_page_dirty(ipage);
122 			return;
123 		}
124 	}
125 	return;
126 }
127 
128 static bool f2fs_enable_inode_chksum(struct f2fs_sb_info *sbi, struct page *page)
129 {
130 	struct f2fs_inode *ri = &F2FS_NODE(page)->i;
131 
132 	if (!f2fs_sb_has_inode_chksum(sbi))
133 		return false;
134 
135 	if (!IS_INODE(page) || !(ri->i_inline & F2FS_EXTRA_ATTR))
136 		return false;
137 
138 	if (!F2FS_FITS_IN_INODE(ri, le16_to_cpu(ri->i_extra_isize),
139 				i_inode_checksum))
140 		return false;
141 
142 	return true;
143 }
144 
145 static __u32 f2fs_inode_chksum(struct f2fs_sb_info *sbi, struct page *page)
146 {
147 	struct f2fs_node *node = F2FS_NODE(page);
148 	struct f2fs_inode *ri = &node->i;
149 	__le32 ino = node->footer.ino;
150 	__le32 gen = ri->i_generation;
151 	__u32 chksum, chksum_seed;
152 	__u32 dummy_cs = 0;
153 	unsigned int offset = offsetof(struct f2fs_inode, i_inode_checksum);
154 	unsigned int cs_size = sizeof(dummy_cs);
155 
156 	chksum = f2fs_chksum(sbi, sbi->s_chksum_seed, (__u8 *)&ino,
157 							sizeof(ino));
158 	chksum_seed = f2fs_chksum(sbi, chksum, (__u8 *)&gen, sizeof(gen));
159 
160 	chksum = f2fs_chksum(sbi, chksum_seed, (__u8 *)ri, offset);
161 	chksum = f2fs_chksum(sbi, chksum, (__u8 *)&dummy_cs, cs_size);
162 	offset += cs_size;
163 	chksum = f2fs_chksum(sbi, chksum, (__u8 *)ri + offset,
164 						F2FS_BLKSIZE - offset);
165 	return chksum;
166 }
167 
168 bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page)
169 {
170 	struct f2fs_inode *ri;
171 	__u32 provided, calculated;
172 
173 	if (unlikely(is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN)))
174 		return true;
175 
176 #ifdef CONFIG_F2FS_CHECK_FS
177 	if (!f2fs_enable_inode_chksum(sbi, page))
178 #else
179 	if (!f2fs_enable_inode_chksum(sbi, page) ||
180 			PageDirty(page) || PageWriteback(page))
181 #endif
182 		return true;
183 
184 	ri = &F2FS_NODE(page)->i;
185 	provided = le32_to_cpu(ri->i_inode_checksum);
186 	calculated = f2fs_inode_chksum(sbi, page);
187 
188 	if (provided != calculated)
189 		f2fs_warn(sbi, "checksum invalid, nid = %lu, ino_of_node = %x, %x vs. %x",
190 			  page->index, ino_of_node(page), provided, calculated);
191 
192 	return provided == calculated;
193 }
194 
195 void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct page *page)
196 {
197 	struct f2fs_inode *ri = &F2FS_NODE(page)->i;
198 
199 	if (!f2fs_enable_inode_chksum(sbi, page))
200 		return;
201 
202 	ri->i_inode_checksum = cpu_to_le32(f2fs_inode_chksum(sbi, page));
203 }
204 
205 static bool sanity_check_inode(struct inode *inode, struct page *node_page)
206 {
207 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
208 	struct f2fs_inode_info *fi = F2FS_I(inode);
209 	struct f2fs_inode *ri = F2FS_INODE(node_page);
210 	unsigned long long iblocks;
211 
212 	iblocks = le64_to_cpu(F2FS_INODE(node_page)->i_blocks);
213 	if (!iblocks) {
214 		set_sbi_flag(sbi, SBI_NEED_FSCK);
215 		f2fs_warn(sbi, "%s: corrupted inode i_blocks i_ino=%lx iblocks=%llu, run fsck to fix.",
216 			  __func__, inode->i_ino, iblocks);
217 		return false;
218 	}
219 
220 	if (ino_of_node(node_page) != nid_of_node(node_page)) {
221 		set_sbi_flag(sbi, SBI_NEED_FSCK);
222 		f2fs_warn(sbi, "%s: corrupted inode footer i_ino=%lx, ino,nid: [%u, %u] run fsck to fix.",
223 			  __func__, inode->i_ino,
224 			  ino_of_node(node_page), nid_of_node(node_page));
225 		return false;
226 	}
227 
228 	if (f2fs_sb_has_flexible_inline_xattr(sbi)
229 			&& !f2fs_has_extra_attr(inode)) {
230 		set_sbi_flag(sbi, SBI_NEED_FSCK);
231 		f2fs_warn(sbi, "%s: corrupted inode ino=%lx, run fsck to fix.",
232 			  __func__, inode->i_ino);
233 		return false;
234 	}
235 
236 	if (f2fs_has_extra_attr(inode) &&
237 			!f2fs_sb_has_extra_attr(sbi)) {
238 		set_sbi_flag(sbi, SBI_NEED_FSCK);
239 		f2fs_warn(sbi, "%s: inode (ino=%lx) is with extra_attr, but extra_attr feature is off",
240 			  __func__, inode->i_ino);
241 		return false;
242 	}
243 
244 	if (fi->i_extra_isize > F2FS_TOTAL_EXTRA_ATTR_SIZE ||
245 			fi->i_extra_isize % sizeof(__le32)) {
246 		set_sbi_flag(sbi, SBI_NEED_FSCK);
247 		f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_extra_isize: %d, max: %zu",
248 			  __func__, inode->i_ino, fi->i_extra_isize,
249 			  F2FS_TOTAL_EXTRA_ATTR_SIZE);
250 		return false;
251 	}
252 
253 	if (f2fs_has_extra_attr(inode) &&
254 		f2fs_sb_has_flexible_inline_xattr(sbi) &&
255 		f2fs_has_inline_xattr(inode) &&
256 		(!fi->i_inline_xattr_size ||
257 		fi->i_inline_xattr_size > MAX_INLINE_XATTR_SIZE)) {
258 		set_sbi_flag(sbi, SBI_NEED_FSCK);
259 		f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_inline_xattr_size: %d, max: %zu",
260 			  __func__, inode->i_ino, fi->i_inline_xattr_size,
261 			  MAX_INLINE_XATTR_SIZE);
262 		return false;
263 	}
264 
265 	if (f2fs_sanity_check_inline_data(inode)) {
266 		set_sbi_flag(sbi, SBI_NEED_FSCK);
267 		f2fs_warn(sbi, "%s: inode (ino=%lx, mode=%u) should not have inline_data, run fsck to fix",
268 			  __func__, inode->i_ino, inode->i_mode);
269 		return false;
270 	}
271 
272 	if (f2fs_has_inline_dentry(inode) && !S_ISDIR(inode->i_mode)) {
273 		set_sbi_flag(sbi, SBI_NEED_FSCK);
274 		f2fs_warn(sbi, "%s: inode (ino=%lx, mode=%u) should not have inline_dentry, run fsck to fix",
275 			  __func__, inode->i_ino, inode->i_mode);
276 		return false;
277 	}
278 
279 	if ((fi->i_flags & F2FS_CASEFOLD_FL) && !f2fs_sb_has_casefold(sbi)) {
280 		set_sbi_flag(sbi, SBI_NEED_FSCK);
281 		f2fs_warn(sbi, "%s: inode (ino=%lx) has casefold flag, but casefold feature is off",
282 			  __func__, inode->i_ino);
283 		return false;
284 	}
285 
286 	if (f2fs_has_extra_attr(inode) && f2fs_sb_has_compression(sbi) &&
287 			fi->i_flags & F2FS_COMPR_FL &&
288 			F2FS_FITS_IN_INODE(ri, fi->i_extra_isize,
289 						i_log_cluster_size)) {
290 		if (ri->i_compress_algorithm >= COMPRESS_MAX) {
291 			set_sbi_flag(sbi, SBI_NEED_FSCK);
292 			f2fs_warn(sbi, "%s: inode (ino=%lx) has unsupported "
293 				"compress algorithm: %u, run fsck to fix",
294 				  __func__, inode->i_ino,
295 				  ri->i_compress_algorithm);
296 			return false;
297 		}
298 		if (le64_to_cpu(ri->i_compr_blocks) >
299 				SECTOR_TO_BLOCK(inode->i_blocks)) {
300 			set_sbi_flag(sbi, SBI_NEED_FSCK);
301 			f2fs_warn(sbi, "%s: inode (ino=%lx) has inconsistent "
302 				"i_compr_blocks:%llu, i_blocks:%llu, run fsck to fix",
303 				  __func__, inode->i_ino,
304 				  le64_to_cpu(ri->i_compr_blocks),
305 				  SECTOR_TO_BLOCK(inode->i_blocks));
306 			return false;
307 		}
308 		if (ri->i_log_cluster_size < MIN_COMPRESS_LOG_SIZE ||
309 			ri->i_log_cluster_size > MAX_COMPRESS_LOG_SIZE) {
310 			set_sbi_flag(sbi, SBI_NEED_FSCK);
311 			f2fs_warn(sbi, "%s: inode (ino=%lx) has unsupported "
312 				"log cluster size: %u, run fsck to fix",
313 				  __func__, inode->i_ino,
314 				  ri->i_log_cluster_size);
315 			return false;
316 		}
317 	}
318 
319 	return true;
320 }
321 
322 static void init_idisk_time(struct inode *inode)
323 {
324 	struct f2fs_inode_info *fi = F2FS_I(inode);
325 
326 	fi->i_disk_time[0] = inode->i_atime;
327 	fi->i_disk_time[1] = inode->i_ctime;
328 	fi->i_disk_time[2] = inode->i_mtime;
329 }
330 
331 static int do_read_inode(struct inode *inode)
332 {
333 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
334 	struct f2fs_inode_info *fi = F2FS_I(inode);
335 	struct page *node_page;
336 	struct f2fs_inode *ri;
337 	projid_t i_projid;
338 	int err;
339 
340 	/* Check if ino is within scope */
341 	if (f2fs_check_nid_range(sbi, inode->i_ino))
342 		return -EINVAL;
343 
344 	node_page = f2fs_get_node_page(sbi, inode->i_ino);
345 	if (IS_ERR(node_page))
346 		return PTR_ERR(node_page);
347 
348 	ri = F2FS_INODE(node_page);
349 
350 	inode->i_mode = le16_to_cpu(ri->i_mode);
351 	i_uid_write(inode, le32_to_cpu(ri->i_uid));
352 	i_gid_write(inode, le32_to_cpu(ri->i_gid));
353 	set_nlink(inode, le32_to_cpu(ri->i_links));
354 	inode->i_size = le64_to_cpu(ri->i_size);
355 	inode->i_blocks = SECTOR_FROM_BLOCK(le64_to_cpu(ri->i_blocks) - 1);
356 
357 	inode->i_atime.tv_sec = le64_to_cpu(ri->i_atime);
358 	inode->i_ctime.tv_sec = le64_to_cpu(ri->i_ctime);
359 	inode->i_mtime.tv_sec = le64_to_cpu(ri->i_mtime);
360 	inode->i_atime.tv_nsec = le32_to_cpu(ri->i_atime_nsec);
361 	inode->i_ctime.tv_nsec = le32_to_cpu(ri->i_ctime_nsec);
362 	inode->i_mtime.tv_nsec = le32_to_cpu(ri->i_mtime_nsec);
363 	inode->i_generation = le32_to_cpu(ri->i_generation);
364 	if (S_ISDIR(inode->i_mode))
365 		fi->i_current_depth = le32_to_cpu(ri->i_current_depth);
366 	else if (S_ISREG(inode->i_mode))
367 		fi->i_gc_failures[GC_FAILURE_PIN] =
368 					le16_to_cpu(ri->i_gc_failures);
369 	fi->i_xattr_nid = le32_to_cpu(ri->i_xattr_nid);
370 	fi->i_flags = le32_to_cpu(ri->i_flags);
371 	if (S_ISREG(inode->i_mode))
372 		fi->i_flags &= ~F2FS_PROJINHERIT_FL;
373 	bitmap_zero(fi->flags, FI_MAX);
374 	fi->i_advise = ri->i_advise;
375 	fi->i_pino = le32_to_cpu(ri->i_pino);
376 	fi->i_dir_level = ri->i_dir_level;
377 
378 	get_inline_info(inode, ri);
379 
380 	fi->i_extra_isize = f2fs_has_extra_attr(inode) ?
381 					le16_to_cpu(ri->i_extra_isize) : 0;
382 
383 	if (f2fs_sb_has_flexible_inline_xattr(sbi)) {
384 		fi->i_inline_xattr_size = le16_to_cpu(ri->i_inline_xattr_size);
385 	} else if (f2fs_has_inline_xattr(inode) ||
386 				f2fs_has_inline_dentry(inode)) {
387 		fi->i_inline_xattr_size = DEFAULT_INLINE_XATTR_ADDRS;
388 	} else {
389 
390 		/*
391 		 * Previous inline data or directory always reserved 200 bytes
392 		 * in inode layout, even if inline_xattr is disabled. In order
393 		 * to keep inline_dentry's structure for backward compatibility,
394 		 * we get the space back only from inline_data.
395 		 */
396 		fi->i_inline_xattr_size = 0;
397 	}
398 
399 	/* check data exist */
400 	if (f2fs_has_inline_data(inode) && !f2fs_exist_data(inode))
401 		__recover_inline_status(inode, node_page);
402 
403 	/* try to recover cold bit for non-dir inode */
404 	if (!S_ISDIR(inode->i_mode) && !is_cold_node(node_page)) {
405 		f2fs_wait_on_page_writeback(node_page, NODE, true, true);
406 		set_cold_node(node_page, false);
407 		set_page_dirty(node_page);
408 	}
409 
410 	/* get rdev by using inline_info */
411 	__get_inode_rdev(inode, ri);
412 
413 	if (S_ISREG(inode->i_mode)) {
414 		err = __written_first_block(sbi, ri);
415 		if (err < 0) {
416 			f2fs_put_page(node_page, 1);
417 			return err;
418 		}
419 		if (!err)
420 			set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
421 	}
422 
423 	if (!f2fs_need_inode_block_update(sbi, inode->i_ino))
424 		fi->last_disk_size = inode->i_size;
425 
426 	if (fi->i_flags & F2FS_PROJINHERIT_FL)
427 		set_inode_flag(inode, FI_PROJ_INHERIT);
428 
429 	if (f2fs_has_extra_attr(inode) && f2fs_sb_has_project_quota(sbi) &&
430 			F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_projid))
431 		i_projid = (projid_t)le32_to_cpu(ri->i_projid);
432 	else
433 		i_projid = F2FS_DEF_PROJID;
434 	fi->i_projid = make_kprojid(&init_user_ns, i_projid);
435 
436 	if (f2fs_has_extra_attr(inode) && f2fs_sb_has_inode_crtime(sbi) &&
437 			F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) {
438 		fi->i_crtime.tv_sec = le64_to_cpu(ri->i_crtime);
439 		fi->i_crtime.tv_nsec = le32_to_cpu(ri->i_crtime_nsec);
440 	}
441 
442 	if (f2fs_has_extra_attr(inode) && f2fs_sb_has_compression(sbi) &&
443 					(fi->i_flags & F2FS_COMPR_FL)) {
444 		if (F2FS_FITS_IN_INODE(ri, fi->i_extra_isize,
445 					i_log_cluster_size)) {
446 			unsigned short compress_flag;
447 
448 			atomic_set(&fi->i_compr_blocks,
449 					le64_to_cpu(ri->i_compr_blocks));
450 			fi->i_compress_algorithm = ri->i_compress_algorithm;
451 			fi->i_log_cluster_size = ri->i_log_cluster_size;
452 			compress_flag = le16_to_cpu(ri->i_compress_flag);
453 			fi->i_compress_level = compress_flag >>
454 						COMPRESS_LEVEL_OFFSET;
455 			fi->i_compress_flag = compress_flag &
456 					GENMASK(COMPRESS_LEVEL_OFFSET - 1, 0);
457 			fi->i_cluster_size = BIT(fi->i_log_cluster_size);
458 			set_inode_flag(inode, FI_COMPRESSED_FILE);
459 		}
460 	}
461 
462 	init_idisk_time(inode);
463 
464 	/* Need all the flag bits */
465 	f2fs_init_read_extent_tree(inode, node_page);
466 	f2fs_init_age_extent_tree(inode);
467 
468 	if (!sanity_check_inode(inode, node_page)) {
469 		f2fs_put_page(node_page, 1);
470 		f2fs_handle_error(sbi, ERROR_CORRUPTED_INODE);
471 		return -EFSCORRUPTED;
472 	}
473 
474 	if (!sanity_check_extent_cache(inode)) {
475 		f2fs_put_page(node_page, 1);
476 		f2fs_handle_error(sbi, ERROR_CORRUPTED_INODE);
477 		return -EFSCORRUPTED;
478 	}
479 
480 	f2fs_put_page(node_page, 1);
481 
482 	stat_inc_inline_xattr(inode);
483 	stat_inc_inline_inode(inode);
484 	stat_inc_inline_dir(inode);
485 	stat_inc_compr_inode(inode);
486 	stat_add_compr_blocks(inode, atomic_read(&fi->i_compr_blocks));
487 
488 	return 0;
489 }
490 
491 static bool is_meta_ino(struct f2fs_sb_info *sbi, unsigned int ino)
492 {
493 	return ino == F2FS_NODE_INO(sbi) || ino == F2FS_META_INO(sbi) ||
494 		ino == F2FS_COMPRESS_INO(sbi);
495 }
496 
497 struct inode *f2fs_iget(struct super_block *sb, unsigned long ino)
498 {
499 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
500 	struct inode *inode;
501 	int ret = 0;
502 
503 	inode = iget_locked(sb, ino);
504 	if (!inode)
505 		return ERR_PTR(-ENOMEM);
506 
507 	if (!(inode->i_state & I_NEW)) {
508 		if (is_meta_ino(sbi, ino)) {
509 			f2fs_err(sbi, "inaccessible inode: %lu, run fsck to repair", ino);
510 			set_sbi_flag(sbi, SBI_NEED_FSCK);
511 			ret = -EFSCORRUPTED;
512 			trace_f2fs_iget_exit(inode, ret);
513 			iput(inode);
514 			f2fs_handle_error(sbi, ERROR_CORRUPTED_INODE);
515 			return ERR_PTR(ret);
516 		}
517 
518 		trace_f2fs_iget(inode);
519 		return inode;
520 	}
521 
522 	if (is_meta_ino(sbi, ino))
523 		goto make_now;
524 
525 	ret = do_read_inode(inode);
526 	if (ret)
527 		goto bad_inode;
528 make_now:
529 	if (ino == F2FS_NODE_INO(sbi)) {
530 		inode->i_mapping->a_ops = &f2fs_node_aops;
531 		mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
532 	} else if (ino == F2FS_META_INO(sbi)) {
533 		inode->i_mapping->a_ops = &f2fs_meta_aops;
534 		mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
535 	} else if (ino == F2FS_COMPRESS_INO(sbi)) {
536 #ifdef CONFIG_F2FS_FS_COMPRESSION
537 		inode->i_mapping->a_ops = &f2fs_compress_aops;
538 		/*
539 		 * generic_error_remove_page only truncates pages of regular
540 		 * inode
541 		 */
542 		inode->i_mode |= S_IFREG;
543 #endif
544 		mapping_set_gfp_mask(inode->i_mapping,
545 			GFP_NOFS | __GFP_HIGHMEM | __GFP_MOVABLE);
546 	} else if (S_ISREG(inode->i_mode)) {
547 		inode->i_op = &f2fs_file_inode_operations;
548 		inode->i_fop = &f2fs_file_operations;
549 		inode->i_mapping->a_ops = &f2fs_dblock_aops;
550 	} else if (S_ISDIR(inode->i_mode)) {
551 		inode->i_op = &f2fs_dir_inode_operations;
552 		inode->i_fop = &f2fs_dir_operations;
553 		inode->i_mapping->a_ops = &f2fs_dblock_aops;
554 		mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
555 	} else if (S_ISLNK(inode->i_mode)) {
556 		if (file_is_encrypt(inode))
557 			inode->i_op = &f2fs_encrypted_symlink_inode_operations;
558 		else
559 			inode->i_op = &f2fs_symlink_inode_operations;
560 		inode_nohighmem(inode);
561 		inode->i_mapping->a_ops = &f2fs_dblock_aops;
562 	} else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
563 			S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
564 		inode->i_op = &f2fs_special_inode_operations;
565 		init_special_inode(inode, inode->i_mode, inode->i_rdev);
566 	} else {
567 		ret = -EIO;
568 		goto bad_inode;
569 	}
570 	f2fs_set_inode_flags(inode);
571 
572 	if (file_should_truncate(inode) &&
573 			!is_sbi_flag_set(sbi, SBI_POR_DOING)) {
574 		ret = f2fs_truncate(inode);
575 		if (ret)
576 			goto bad_inode;
577 		file_dont_truncate(inode);
578 	}
579 
580 	unlock_new_inode(inode);
581 	trace_f2fs_iget(inode);
582 	return inode;
583 
584 bad_inode:
585 	f2fs_inode_synced(inode);
586 	iget_failed(inode);
587 	trace_f2fs_iget_exit(inode, ret);
588 	return ERR_PTR(ret);
589 }
590 
591 struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino)
592 {
593 	struct inode *inode;
594 retry:
595 	inode = f2fs_iget(sb, ino);
596 	if (IS_ERR(inode)) {
597 		if (PTR_ERR(inode) == -ENOMEM) {
598 			memalloc_retry_wait(GFP_NOFS);
599 			goto retry;
600 		}
601 	}
602 	return inode;
603 }
604 
605 void f2fs_update_inode(struct inode *inode, struct page *node_page)
606 {
607 	struct f2fs_inode *ri;
608 	struct extent_tree *et = F2FS_I(inode)->extent_tree[EX_READ];
609 
610 	f2fs_wait_on_page_writeback(node_page, NODE, true, true);
611 	set_page_dirty(node_page);
612 
613 	f2fs_inode_synced(inode);
614 
615 	ri = F2FS_INODE(node_page);
616 
617 	ri->i_mode = cpu_to_le16(inode->i_mode);
618 	ri->i_advise = F2FS_I(inode)->i_advise;
619 	ri->i_uid = cpu_to_le32(i_uid_read(inode));
620 	ri->i_gid = cpu_to_le32(i_gid_read(inode));
621 	ri->i_links = cpu_to_le32(inode->i_nlink);
622 	ri->i_blocks = cpu_to_le64(SECTOR_TO_BLOCK(inode->i_blocks) + 1);
623 
624 	if (!f2fs_is_atomic_file(inode) ||
625 			is_inode_flag_set(inode, FI_ATOMIC_COMMITTED))
626 		ri->i_size = cpu_to_le64(i_size_read(inode));
627 
628 	if (et) {
629 		read_lock(&et->lock);
630 		set_raw_read_extent(&et->largest, &ri->i_ext);
631 		read_unlock(&et->lock);
632 	} else {
633 		memset(&ri->i_ext, 0, sizeof(ri->i_ext));
634 	}
635 	set_raw_inline(inode, ri);
636 
637 	ri->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
638 	ri->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
639 	ri->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
640 	ri->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
641 	ri->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
642 	ri->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
643 	if (S_ISDIR(inode->i_mode))
644 		ri->i_current_depth =
645 			cpu_to_le32(F2FS_I(inode)->i_current_depth);
646 	else if (S_ISREG(inode->i_mode))
647 		ri->i_gc_failures =
648 			cpu_to_le16(F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN]);
649 	ri->i_xattr_nid = cpu_to_le32(F2FS_I(inode)->i_xattr_nid);
650 	ri->i_flags = cpu_to_le32(F2FS_I(inode)->i_flags);
651 	ri->i_pino = cpu_to_le32(F2FS_I(inode)->i_pino);
652 	ri->i_generation = cpu_to_le32(inode->i_generation);
653 	ri->i_dir_level = F2FS_I(inode)->i_dir_level;
654 
655 	if (f2fs_has_extra_attr(inode)) {
656 		ri->i_extra_isize = cpu_to_le16(F2FS_I(inode)->i_extra_isize);
657 
658 		if (f2fs_sb_has_flexible_inline_xattr(F2FS_I_SB(inode)))
659 			ri->i_inline_xattr_size =
660 				cpu_to_le16(F2FS_I(inode)->i_inline_xattr_size);
661 
662 		if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)) &&
663 			F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
664 								i_projid)) {
665 			projid_t i_projid;
666 
667 			i_projid = from_kprojid(&init_user_ns,
668 						F2FS_I(inode)->i_projid);
669 			ri->i_projid = cpu_to_le32(i_projid);
670 		}
671 
672 		if (f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) &&
673 			F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
674 								i_crtime)) {
675 			ri->i_crtime =
676 				cpu_to_le64(F2FS_I(inode)->i_crtime.tv_sec);
677 			ri->i_crtime_nsec =
678 				cpu_to_le32(F2FS_I(inode)->i_crtime.tv_nsec);
679 		}
680 
681 		if (f2fs_sb_has_compression(F2FS_I_SB(inode)) &&
682 			F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
683 							i_log_cluster_size)) {
684 			unsigned short compress_flag;
685 
686 			ri->i_compr_blocks =
687 				cpu_to_le64(atomic_read(
688 					&F2FS_I(inode)->i_compr_blocks));
689 			ri->i_compress_algorithm =
690 				F2FS_I(inode)->i_compress_algorithm;
691 			compress_flag = F2FS_I(inode)->i_compress_flag |
692 				F2FS_I(inode)->i_compress_level <<
693 						COMPRESS_LEVEL_OFFSET;
694 			ri->i_compress_flag = cpu_to_le16(compress_flag);
695 			ri->i_log_cluster_size =
696 				F2FS_I(inode)->i_log_cluster_size;
697 		}
698 	}
699 
700 	__set_inode_rdev(inode, ri);
701 
702 	/* deleted inode */
703 	if (inode->i_nlink == 0)
704 		clear_page_private_inline(node_page);
705 
706 	init_idisk_time(inode);
707 #ifdef CONFIG_F2FS_CHECK_FS
708 	f2fs_inode_chksum_set(F2FS_I_SB(inode), node_page);
709 #endif
710 }
711 
712 void f2fs_update_inode_page(struct inode *inode)
713 {
714 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
715 	struct page *node_page;
716 	int count = 0;
717 retry:
718 	node_page = f2fs_get_node_page(sbi, inode->i_ino);
719 	if (IS_ERR(node_page)) {
720 		int err = PTR_ERR(node_page);
721 
722 		/* The node block was truncated. */
723 		if (err == -ENOENT)
724 			return;
725 
726 		if (err == -ENOMEM || ++count <= DEFAULT_RETRY_IO_COUNT)
727 			goto retry;
728 		f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_UPDATE_INODE);
729 		return;
730 	}
731 	f2fs_update_inode(inode, node_page);
732 	f2fs_put_page(node_page, 1);
733 }
734 
735 int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
736 {
737 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
738 
739 	if (inode->i_ino == F2FS_NODE_INO(sbi) ||
740 			inode->i_ino == F2FS_META_INO(sbi))
741 		return 0;
742 
743 	/*
744 	 * atime could be updated without dirtying f2fs inode in lazytime mode
745 	 */
746 	if (f2fs_is_time_consistent(inode) &&
747 		!is_inode_flag_set(inode, FI_DIRTY_INODE))
748 		return 0;
749 
750 	if (!f2fs_is_checkpoint_ready(sbi))
751 		return -ENOSPC;
752 
753 	/*
754 	 * We need to balance fs here to prevent from producing dirty node pages
755 	 * during the urgent cleaning time when running out of free sections.
756 	 */
757 	f2fs_update_inode_page(inode);
758 	if (wbc && wbc->nr_to_write)
759 		f2fs_balance_fs(sbi, true);
760 	return 0;
761 }
762 
763 /*
764  * Called at the last iput() if i_nlink is zero
765  */
766 void f2fs_evict_inode(struct inode *inode)
767 {
768 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
769 	struct f2fs_inode_info *fi = F2FS_I(inode);
770 	nid_t xnid = fi->i_xattr_nid;
771 	int err = 0;
772 
773 	f2fs_abort_atomic_write(inode, true);
774 
775 	if (fi->cow_inode) {
776 		clear_inode_flag(fi->cow_inode, FI_COW_FILE);
777 		iput(fi->cow_inode);
778 		fi->cow_inode = NULL;
779 	}
780 
781 	trace_f2fs_evict_inode(inode);
782 	truncate_inode_pages_final(&inode->i_data);
783 
784 	if ((inode->i_nlink || is_bad_inode(inode)) &&
785 		test_opt(sbi, COMPRESS_CACHE) && f2fs_compressed_file(inode))
786 		f2fs_invalidate_compress_pages(sbi, inode->i_ino);
787 
788 	if (inode->i_ino == F2FS_NODE_INO(sbi) ||
789 			inode->i_ino == F2FS_META_INO(sbi) ||
790 			inode->i_ino == F2FS_COMPRESS_INO(sbi))
791 		goto out_clear;
792 
793 	f2fs_bug_on(sbi, get_dirty_pages(inode));
794 	f2fs_remove_dirty_inode(inode);
795 
796 	f2fs_destroy_extent_tree(inode);
797 
798 	if (inode->i_nlink || is_bad_inode(inode))
799 		goto no_delete;
800 
801 	err = f2fs_dquot_initialize(inode);
802 	if (err) {
803 		err = 0;
804 		set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
805 	}
806 
807 	f2fs_remove_ino_entry(sbi, inode->i_ino, APPEND_INO);
808 	f2fs_remove_ino_entry(sbi, inode->i_ino, UPDATE_INO);
809 	f2fs_remove_ino_entry(sbi, inode->i_ino, FLUSH_INO);
810 
811 	if (!is_sbi_flag_set(sbi, SBI_IS_FREEZING))
812 		sb_start_intwrite(inode->i_sb);
813 	set_inode_flag(inode, FI_NO_ALLOC);
814 	i_size_write(inode, 0);
815 retry:
816 	if (F2FS_HAS_BLOCKS(inode))
817 		err = f2fs_truncate(inode);
818 
819 	if (time_to_inject(sbi, FAULT_EVICT_INODE))
820 		err = -EIO;
821 
822 	if (!err) {
823 		f2fs_lock_op(sbi);
824 		err = f2fs_remove_inode_page(inode);
825 		f2fs_unlock_op(sbi);
826 		if (err == -ENOENT) {
827 			err = 0;
828 
829 			/*
830 			 * in fuzzed image, another node may has the same
831 			 * block address as inode's, if it was truncated
832 			 * previously, truncation of inode node will fail.
833 			 */
834 			if (is_inode_flag_set(inode, FI_DIRTY_INODE)) {
835 				f2fs_warn(F2FS_I_SB(inode),
836 					"f2fs_evict_inode: inconsistent node id, ino:%lu",
837 					inode->i_ino);
838 				f2fs_inode_synced(inode);
839 				set_sbi_flag(sbi, SBI_NEED_FSCK);
840 			}
841 		}
842 	}
843 
844 	/* give more chances, if ENOMEM case */
845 	if (err == -ENOMEM) {
846 		err = 0;
847 		goto retry;
848 	}
849 
850 	if (err) {
851 		f2fs_update_inode_page(inode);
852 		if (dquot_initialize_needed(inode))
853 			set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
854 	}
855 	if (!is_sbi_flag_set(sbi, SBI_IS_FREEZING))
856 		sb_end_intwrite(inode->i_sb);
857 no_delete:
858 	dquot_drop(inode);
859 
860 	stat_dec_inline_xattr(inode);
861 	stat_dec_inline_dir(inode);
862 	stat_dec_inline_inode(inode);
863 	stat_dec_compr_inode(inode);
864 	stat_sub_compr_blocks(inode,
865 			atomic_read(&fi->i_compr_blocks));
866 
867 	if (likely(!f2fs_cp_error(sbi) &&
868 				!is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
869 		f2fs_bug_on(sbi, is_inode_flag_set(inode, FI_DIRTY_INODE));
870 	else
871 		f2fs_inode_synced(inode);
872 
873 	/* for the case f2fs_new_inode() was failed, .i_ino is zero, skip it */
874 	if (inode->i_ino)
875 		invalidate_mapping_pages(NODE_MAPPING(sbi), inode->i_ino,
876 							inode->i_ino);
877 	if (xnid)
878 		invalidate_mapping_pages(NODE_MAPPING(sbi), xnid, xnid);
879 	if (inode->i_nlink) {
880 		if (is_inode_flag_set(inode, FI_APPEND_WRITE))
881 			f2fs_add_ino_entry(sbi, inode->i_ino, APPEND_INO);
882 		if (is_inode_flag_set(inode, FI_UPDATE_WRITE))
883 			f2fs_add_ino_entry(sbi, inode->i_ino, UPDATE_INO);
884 	}
885 	if (is_inode_flag_set(inode, FI_FREE_NID)) {
886 		f2fs_alloc_nid_failed(sbi, inode->i_ino);
887 		clear_inode_flag(inode, FI_FREE_NID);
888 	} else {
889 		/*
890 		 * If xattr nid is corrupted, we can reach out error condition,
891 		 * err & !f2fs_exist_written_data(sbi, inode->i_ino, ORPHAN_INO)).
892 		 * In that case, f2fs_check_nid_range() is enough to give a clue.
893 		 */
894 	}
895 out_clear:
896 	fscrypt_put_encryption_info(inode);
897 	fsverity_cleanup_inode(inode);
898 	clear_inode(inode);
899 }
900 
901 /* caller should call f2fs_lock_op() */
902 void f2fs_handle_failed_inode(struct inode *inode)
903 {
904 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
905 	struct node_info ni;
906 	int err;
907 
908 	/*
909 	 * clear nlink of inode in order to release resource of inode
910 	 * immediately.
911 	 */
912 	clear_nlink(inode);
913 
914 	/*
915 	 * we must call this to avoid inode being remained as dirty, resulting
916 	 * in a panic when flushing dirty inodes in gdirty_list.
917 	 */
918 	f2fs_update_inode_page(inode);
919 	f2fs_inode_synced(inode);
920 
921 	/* don't make bad inode, since it becomes a regular file. */
922 	unlock_new_inode(inode);
923 
924 	/*
925 	 * Note: we should add inode to orphan list before f2fs_unlock_op()
926 	 * so we can prevent losing this orphan when encoutering checkpoint
927 	 * and following suddenly power-off.
928 	 */
929 	err = f2fs_get_node_info(sbi, inode->i_ino, &ni, false);
930 	if (err) {
931 		set_sbi_flag(sbi, SBI_NEED_FSCK);
932 		set_inode_flag(inode, FI_FREE_NID);
933 		f2fs_warn(sbi, "May loss orphan inode, run fsck to fix.");
934 		goto out;
935 	}
936 
937 	if (ni.blk_addr != NULL_ADDR) {
938 		err = f2fs_acquire_orphan_inode(sbi);
939 		if (err) {
940 			set_sbi_flag(sbi, SBI_NEED_FSCK);
941 			f2fs_warn(sbi, "Too many orphan inodes, run fsck to fix.");
942 		} else {
943 			f2fs_add_orphan_inode(inode);
944 		}
945 		f2fs_alloc_nid_done(sbi, inode->i_ino);
946 	} else {
947 		set_inode_flag(inode, FI_FREE_NID);
948 	}
949 
950 out:
951 	f2fs_unlock_op(sbi);
952 
953 	/* iput will drop the inode object */
954 	iput(inode);
955 }
956