1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * fs/f2fs/inode.c
4 *
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
7 */
8 #include <linux/fs.h>
9 #include <linux/f2fs_fs.h>
10 #include <linux/buffer_head.h>
11 #include <linux/writeback.h>
12 #include <linux/sched/mm.h>
13 #include <linux/lz4.h>
14 #include <linux/zstd.h>
15
16 #include "f2fs.h"
17 #include "node.h"
18 #include "segment.h"
19 #include "xattr.h"
20
21 #include <trace/events/f2fs.h>
22
23 #ifdef CONFIG_F2FS_FS_COMPRESSION
24 extern const struct address_space_operations f2fs_compress_aops;
25 #endif
26
f2fs_mark_inode_dirty_sync(struct inode * inode,bool sync)27 void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync)
28 {
29 if (is_inode_flag_set(inode, FI_NEW_INODE))
30 return;
31
32 if (f2fs_readonly(F2FS_I_SB(inode)->sb))
33 return;
34
35 if (f2fs_inode_dirtied(inode, sync))
36 return;
37
38 /* only atomic file w/ FI_ATOMIC_COMMITTED can be set vfs dirty */
39 if (f2fs_is_atomic_file(inode) &&
40 !is_inode_flag_set(inode, FI_ATOMIC_COMMITTED))
41 return;
42
43 mark_inode_dirty_sync(inode);
44 }
45
f2fs_set_inode_flags(struct inode * inode)46 void f2fs_set_inode_flags(struct inode *inode)
47 {
48 unsigned int flags = F2FS_I(inode)->i_flags;
49 unsigned int new_fl = 0;
50
51 if (flags & F2FS_SYNC_FL)
52 new_fl |= S_SYNC;
53 if (flags & F2FS_APPEND_FL)
54 new_fl |= S_APPEND;
55 if (flags & F2FS_IMMUTABLE_FL)
56 new_fl |= S_IMMUTABLE;
57 if (flags & F2FS_NOATIME_FL)
58 new_fl |= S_NOATIME;
59 if (flags & F2FS_DIRSYNC_FL)
60 new_fl |= S_DIRSYNC;
61 if (file_is_encrypt(inode))
62 new_fl |= S_ENCRYPTED;
63 if (file_is_verity(inode))
64 new_fl |= S_VERITY;
65 if (flags & F2FS_CASEFOLD_FL)
66 new_fl |= S_CASEFOLD;
67 inode_set_flags(inode, new_fl,
68 S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|
69 S_ENCRYPTED|S_VERITY|S_CASEFOLD);
70 }
71
__get_inode_rdev(struct inode * inode,struct page * node_page)72 static void __get_inode_rdev(struct inode *inode, struct page *node_page)
73 {
74 __le32 *addr = get_dnode_addr(inode, node_page);
75
76 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
77 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
78 if (addr[0])
79 inode->i_rdev = old_decode_dev(le32_to_cpu(addr[0]));
80 else
81 inode->i_rdev = new_decode_dev(le32_to_cpu(addr[1]));
82 }
83 }
84
__set_inode_rdev(struct inode * inode,struct page * node_page)85 static void __set_inode_rdev(struct inode *inode, struct page *node_page)
86 {
87 __le32 *addr = get_dnode_addr(inode, node_page);
88
89 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
90 if (old_valid_dev(inode->i_rdev)) {
91 addr[0] = cpu_to_le32(old_encode_dev(inode->i_rdev));
92 addr[1] = 0;
93 } else {
94 addr[0] = 0;
95 addr[1] = cpu_to_le32(new_encode_dev(inode->i_rdev));
96 addr[2] = 0;
97 }
98 }
99 }
100
__recover_inline_status(struct inode * inode,struct page * ipage)101 static void __recover_inline_status(struct inode *inode, struct page *ipage)
102 {
103 void *inline_data = inline_data_addr(inode, ipage);
104 __le32 *start = inline_data;
105 __le32 *end = start + MAX_INLINE_DATA(inode) / sizeof(__le32);
106
107 while (start < end) {
108 if (*start++) {
109 f2fs_wait_on_page_writeback(ipage, NODE, true, true);
110
111 set_inode_flag(inode, FI_DATA_EXIST);
112 set_raw_inline(inode, F2FS_INODE(ipage));
113 set_page_dirty(ipage);
114 return;
115 }
116 }
117 return;
118 }
119
f2fs_enable_inode_chksum(struct f2fs_sb_info * sbi,struct page * page)120 static bool f2fs_enable_inode_chksum(struct f2fs_sb_info *sbi, struct page *page)
121 {
122 struct f2fs_inode *ri = &F2FS_NODE(page)->i;
123
124 if (!f2fs_sb_has_inode_chksum(sbi))
125 return false;
126
127 if (!IS_INODE(page) || !(ri->i_inline & F2FS_EXTRA_ATTR))
128 return false;
129
130 if (!F2FS_FITS_IN_INODE(ri, le16_to_cpu(ri->i_extra_isize),
131 i_inode_checksum))
132 return false;
133
134 return true;
135 }
136
f2fs_inode_chksum(struct f2fs_sb_info * sbi,struct page * page)137 static __u32 f2fs_inode_chksum(struct f2fs_sb_info *sbi, struct page *page)
138 {
139 struct f2fs_node *node = F2FS_NODE(page);
140 struct f2fs_inode *ri = &node->i;
141 __le32 ino = node->footer.ino;
142 __le32 gen = ri->i_generation;
143 __u32 chksum, chksum_seed;
144 __u32 dummy_cs = 0;
145 unsigned int offset = offsetof(struct f2fs_inode, i_inode_checksum);
146 unsigned int cs_size = sizeof(dummy_cs);
147
148 chksum = f2fs_chksum(sbi, sbi->s_chksum_seed, (__u8 *)&ino,
149 sizeof(ino));
150 chksum_seed = f2fs_chksum(sbi, chksum, (__u8 *)&gen, sizeof(gen));
151
152 chksum = f2fs_chksum(sbi, chksum_seed, (__u8 *)ri, offset);
153 chksum = f2fs_chksum(sbi, chksum, (__u8 *)&dummy_cs, cs_size);
154 offset += cs_size;
155 chksum = f2fs_chksum(sbi, chksum, (__u8 *)ri + offset,
156 F2FS_BLKSIZE - offset);
157 return chksum;
158 }
159
f2fs_inode_chksum_verify(struct f2fs_sb_info * sbi,struct page * page)160 bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page)
161 {
162 struct f2fs_inode *ri;
163 __u32 provided, calculated;
164
165 if (unlikely(is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN)))
166 return true;
167
168 #ifdef CONFIG_F2FS_CHECK_FS
169 if (!f2fs_enable_inode_chksum(sbi, page))
170 #else
171 if (!f2fs_enable_inode_chksum(sbi, page) ||
172 PageDirty(page) || PageWriteback(page))
173 #endif
174 return true;
175
176 ri = &F2FS_NODE(page)->i;
177 provided = le32_to_cpu(ri->i_inode_checksum);
178 calculated = f2fs_inode_chksum(sbi, page);
179
180 if (provided != calculated)
181 f2fs_warn(sbi, "checksum invalid, nid = %lu, ino_of_node = %x, %x vs. %x",
182 page->index, ino_of_node(page), provided, calculated);
183
184 return provided == calculated;
185 }
186
f2fs_inode_chksum_set(struct f2fs_sb_info * sbi,struct page * page)187 void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct page *page)
188 {
189 struct f2fs_inode *ri = &F2FS_NODE(page)->i;
190
191 if (!f2fs_enable_inode_chksum(sbi, page))
192 return;
193
194 ri->i_inode_checksum = cpu_to_le32(f2fs_inode_chksum(sbi, page));
195 }
196
sanity_check_compress_inode(struct inode * inode,struct f2fs_inode * ri)197 static bool sanity_check_compress_inode(struct inode *inode,
198 struct f2fs_inode *ri)
199 {
200 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
201 unsigned char clevel;
202
203 if (ri->i_compress_algorithm >= COMPRESS_MAX) {
204 f2fs_warn(sbi,
205 "%s: inode (ino=%lx) has unsupported compress algorithm: %u, run fsck to fix",
206 __func__, inode->i_ino, ri->i_compress_algorithm);
207 return false;
208 }
209 if (le64_to_cpu(ri->i_compr_blocks) >
210 SECTOR_TO_BLOCK(inode->i_blocks)) {
211 f2fs_warn(sbi,
212 "%s: inode (ino=%lx) has inconsistent i_compr_blocks:%llu, i_blocks:%llu, run fsck to fix",
213 __func__, inode->i_ino, le64_to_cpu(ri->i_compr_blocks),
214 SECTOR_TO_BLOCK(inode->i_blocks));
215 return false;
216 }
217 if (ri->i_log_cluster_size < MIN_COMPRESS_LOG_SIZE ||
218 ri->i_log_cluster_size > MAX_COMPRESS_LOG_SIZE) {
219 f2fs_warn(sbi,
220 "%s: inode (ino=%lx) has unsupported log cluster size: %u, run fsck to fix",
221 __func__, inode->i_ino, ri->i_log_cluster_size);
222 return false;
223 }
224
225 clevel = le16_to_cpu(ri->i_compress_flag) >>
226 COMPRESS_LEVEL_OFFSET;
227 switch (ri->i_compress_algorithm) {
228 case COMPRESS_LZO:
229 #ifdef CONFIG_F2FS_FS_LZO
230 if (clevel)
231 goto err_level;
232 #endif
233 break;
234 case COMPRESS_LZORLE:
235 #ifdef CONFIG_F2FS_FS_LZORLE
236 if (clevel)
237 goto err_level;
238 #endif
239 break;
240 case COMPRESS_LZ4:
241 #ifdef CONFIG_F2FS_FS_LZ4
242 #ifdef CONFIG_F2FS_FS_LZ4HC
243 if (clevel &&
244 (clevel < LZ4HC_MIN_CLEVEL || clevel > LZ4HC_MAX_CLEVEL))
245 goto err_level;
246 #else
247 if (clevel)
248 goto err_level;
249 #endif
250 #endif
251 break;
252 case COMPRESS_ZSTD:
253 #ifdef CONFIG_F2FS_FS_ZSTD
254 if (clevel < zstd_min_clevel() || clevel > zstd_max_clevel())
255 goto err_level;
256 #endif
257 break;
258 default:
259 goto err_level;
260 }
261
262 return true;
263 err_level:
264 f2fs_warn(sbi, "%s: inode (ino=%lx) has unsupported compress level: %u, run fsck to fix",
265 __func__, inode->i_ino, clevel);
266 return false;
267 }
268
sanity_check_inode(struct inode * inode,struct page * node_page)269 static bool sanity_check_inode(struct inode *inode, struct page *node_page)
270 {
271 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
272 struct f2fs_inode_info *fi = F2FS_I(inode);
273 struct f2fs_inode *ri = F2FS_INODE(node_page);
274 unsigned long long iblocks;
275
276 iblocks = le64_to_cpu(F2FS_INODE(node_page)->i_blocks);
277 if (!iblocks) {
278 f2fs_warn(sbi, "%s: corrupted inode i_blocks i_ino=%lx iblocks=%llu, run fsck to fix.",
279 __func__, inode->i_ino, iblocks);
280 return false;
281 }
282
283 if (ino_of_node(node_page) != nid_of_node(node_page)) {
284 f2fs_warn(sbi, "%s: corrupted inode footer i_ino=%lx, ino,nid: [%u, %u] run fsck to fix.",
285 __func__, inode->i_ino,
286 ino_of_node(node_page), nid_of_node(node_page));
287 return false;
288 }
289
290 if (ino_of_node(node_page) == fi->i_xattr_nid) {
291 f2fs_warn(sbi, "%s: corrupted inode i_ino=%lx, xnid=%x, run fsck to fix.",
292 __func__, inode->i_ino, fi->i_xattr_nid);
293 return false;
294 }
295
296 if (f2fs_has_extra_attr(inode)) {
297 if (!f2fs_sb_has_extra_attr(sbi)) {
298 f2fs_warn(sbi, "%s: inode (ino=%lx) is with extra_attr, but extra_attr feature is off",
299 __func__, inode->i_ino);
300 return false;
301 }
302 if (fi->i_extra_isize > F2FS_TOTAL_EXTRA_ATTR_SIZE ||
303 fi->i_extra_isize < F2FS_MIN_EXTRA_ATTR_SIZE ||
304 fi->i_extra_isize % sizeof(__le32)) {
305 f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_extra_isize: %d, max: %zu",
306 __func__, inode->i_ino, fi->i_extra_isize,
307 F2FS_TOTAL_EXTRA_ATTR_SIZE);
308 return false;
309 }
310 if (f2fs_sb_has_flexible_inline_xattr(sbi) &&
311 f2fs_has_inline_xattr(inode) &&
312 (!fi->i_inline_xattr_size ||
313 fi->i_inline_xattr_size > MAX_INLINE_XATTR_SIZE)) {
314 f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_inline_xattr_size: %d, max: %zu",
315 __func__, inode->i_ino, fi->i_inline_xattr_size,
316 MAX_INLINE_XATTR_SIZE);
317 return false;
318 }
319 if (f2fs_sb_has_compression(sbi) &&
320 fi->i_flags & F2FS_COMPR_FL &&
321 F2FS_FITS_IN_INODE(ri, fi->i_extra_isize,
322 i_compress_flag)) {
323 if (!sanity_check_compress_inode(inode, ri))
324 return false;
325 }
326 } else if (f2fs_sb_has_flexible_inline_xattr(sbi)) {
327 f2fs_warn(sbi, "%s: corrupted inode ino=%lx, run fsck to fix.",
328 __func__, inode->i_ino);
329 return false;
330 }
331
332 if (!f2fs_sb_has_extra_attr(sbi)) {
333 if (f2fs_sb_has_project_quota(sbi)) {
334 f2fs_warn(sbi, "%s: corrupted inode ino=%lx, wrong feature flag: %u, run fsck to fix.",
335 __func__, inode->i_ino, F2FS_FEATURE_PRJQUOTA);
336 return false;
337 }
338 if (f2fs_sb_has_inode_chksum(sbi)) {
339 f2fs_warn(sbi, "%s: corrupted inode ino=%lx, wrong feature flag: %u, run fsck to fix.",
340 __func__, inode->i_ino, F2FS_FEATURE_INODE_CHKSUM);
341 return false;
342 }
343 if (f2fs_sb_has_flexible_inline_xattr(sbi)) {
344 f2fs_warn(sbi, "%s: corrupted inode ino=%lx, wrong feature flag: %u, run fsck to fix.",
345 __func__, inode->i_ino, F2FS_FEATURE_FLEXIBLE_INLINE_XATTR);
346 return false;
347 }
348 if (f2fs_sb_has_inode_crtime(sbi)) {
349 f2fs_warn(sbi, "%s: corrupted inode ino=%lx, wrong feature flag: %u, run fsck to fix.",
350 __func__, inode->i_ino, F2FS_FEATURE_INODE_CRTIME);
351 return false;
352 }
353 if (f2fs_sb_has_compression(sbi)) {
354 f2fs_warn(sbi, "%s: corrupted inode ino=%lx, wrong feature flag: %u, run fsck to fix.",
355 __func__, inode->i_ino, F2FS_FEATURE_COMPRESSION);
356 return false;
357 }
358 }
359
360 if (f2fs_sanity_check_inline_data(inode, node_page)) {
361 f2fs_warn(sbi, "%s: inode (ino=%lx, mode=%u) should not have inline_data, run fsck to fix",
362 __func__, inode->i_ino, inode->i_mode);
363 return false;
364 }
365
366 if (f2fs_has_inline_dentry(inode) && !S_ISDIR(inode->i_mode)) {
367 f2fs_warn(sbi, "%s: inode (ino=%lx, mode=%u) should not have inline_dentry, run fsck to fix",
368 __func__, inode->i_ino, inode->i_mode);
369 return false;
370 }
371
372 if ((fi->i_flags & F2FS_CASEFOLD_FL) && !f2fs_sb_has_casefold(sbi)) {
373 f2fs_warn(sbi, "%s: inode (ino=%lx) has casefold flag, but casefold feature is off",
374 __func__, inode->i_ino);
375 return false;
376 }
377
378 if (fi->i_xattr_nid && f2fs_check_nid_range(sbi, fi->i_xattr_nid)) {
379 f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_xattr_nid: %u, run fsck to fix.",
380 __func__, inode->i_ino, fi->i_xattr_nid);
381 return false;
382 }
383
384 return true;
385 }
386
init_idisk_time(struct inode * inode)387 static void init_idisk_time(struct inode *inode)
388 {
389 struct f2fs_inode_info *fi = F2FS_I(inode);
390
391 fi->i_disk_time[0] = inode->i_atime;
392 fi->i_disk_time[1] = inode_get_ctime(inode);
393 fi->i_disk_time[2] = inode->i_mtime;
394 }
395
do_read_inode(struct inode * inode)396 static int do_read_inode(struct inode *inode)
397 {
398 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
399 struct f2fs_inode_info *fi = F2FS_I(inode);
400 struct page *node_page;
401 struct f2fs_inode *ri;
402 projid_t i_projid;
403
404 /* Check if ino is within scope */
405 if (f2fs_check_nid_range(sbi, inode->i_ino))
406 return -EINVAL;
407
408 node_page = f2fs_get_node_page(sbi, inode->i_ino);
409 if (IS_ERR(node_page))
410 return PTR_ERR(node_page);
411
412 ri = F2FS_INODE(node_page);
413
414 inode->i_mode = le16_to_cpu(ri->i_mode);
415 i_uid_write(inode, le32_to_cpu(ri->i_uid));
416 i_gid_write(inode, le32_to_cpu(ri->i_gid));
417 set_nlink(inode, le32_to_cpu(ri->i_links));
418 inode->i_size = le64_to_cpu(ri->i_size);
419 inode->i_blocks = SECTOR_FROM_BLOCK(le64_to_cpu(ri->i_blocks) - 1);
420
421 inode->i_atime.tv_sec = le64_to_cpu(ri->i_atime);
422 inode_set_ctime(inode, le64_to_cpu(ri->i_ctime),
423 le32_to_cpu(ri->i_ctime_nsec));
424 inode->i_mtime.tv_sec = le64_to_cpu(ri->i_mtime);
425 inode->i_atime.tv_nsec = le32_to_cpu(ri->i_atime_nsec);
426 inode->i_mtime.tv_nsec = le32_to_cpu(ri->i_mtime_nsec);
427 inode->i_generation = le32_to_cpu(ri->i_generation);
428 if (S_ISDIR(inode->i_mode))
429 fi->i_current_depth = le32_to_cpu(ri->i_current_depth);
430 else if (S_ISREG(inode->i_mode))
431 fi->i_gc_failures[GC_FAILURE_PIN] =
432 le16_to_cpu(ri->i_gc_failures);
433 fi->i_xattr_nid = le32_to_cpu(ri->i_xattr_nid);
434 fi->i_flags = le32_to_cpu(ri->i_flags);
435 if (S_ISREG(inode->i_mode))
436 fi->i_flags &= ~F2FS_PROJINHERIT_FL;
437 bitmap_zero(fi->flags, FI_MAX);
438 fi->i_advise = ri->i_advise;
439 fi->i_pino = le32_to_cpu(ri->i_pino);
440 fi->i_dir_level = ri->i_dir_level;
441
442 get_inline_info(inode, ri);
443
444 fi->i_extra_isize = f2fs_has_extra_attr(inode) ?
445 le16_to_cpu(ri->i_extra_isize) : 0;
446
447 if (f2fs_sb_has_flexible_inline_xattr(sbi)) {
448 fi->i_inline_xattr_size = le16_to_cpu(ri->i_inline_xattr_size);
449 } else if (f2fs_has_inline_xattr(inode) ||
450 f2fs_has_inline_dentry(inode)) {
451 fi->i_inline_xattr_size = DEFAULT_INLINE_XATTR_ADDRS;
452 } else {
453
454 /*
455 * Previous inline data or directory always reserved 200 bytes
456 * in inode layout, even if inline_xattr is disabled. In order
457 * to keep inline_dentry's structure for backward compatibility,
458 * we get the space back only from inline_data.
459 */
460 fi->i_inline_xattr_size = 0;
461 }
462
463 if (!sanity_check_inode(inode, node_page)) {
464 f2fs_put_page(node_page, 1);
465 set_sbi_flag(sbi, SBI_NEED_FSCK);
466 f2fs_handle_error(sbi, ERROR_CORRUPTED_INODE);
467 return -EFSCORRUPTED;
468 }
469
470 /* check data exist */
471 if (f2fs_has_inline_data(inode) && !f2fs_exist_data(inode))
472 __recover_inline_status(inode, node_page);
473
474 /* try to recover cold bit for non-dir inode */
475 if (!S_ISDIR(inode->i_mode) && !is_cold_node(node_page)) {
476 f2fs_wait_on_page_writeback(node_page, NODE, true, true);
477 set_cold_node(node_page, false);
478 set_page_dirty(node_page);
479 }
480
481 /* get rdev by using inline_info */
482 __get_inode_rdev(inode, node_page);
483
484 if (!f2fs_need_inode_block_update(sbi, inode->i_ino))
485 fi->last_disk_size = inode->i_size;
486
487 if (fi->i_flags & F2FS_PROJINHERIT_FL)
488 set_inode_flag(inode, FI_PROJ_INHERIT);
489
490 if (f2fs_has_extra_attr(inode) && f2fs_sb_has_project_quota(sbi) &&
491 F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_projid))
492 i_projid = (projid_t)le32_to_cpu(ri->i_projid);
493 else
494 i_projid = F2FS_DEF_PROJID;
495 fi->i_projid = make_kprojid(&init_user_ns, i_projid);
496
497 if (f2fs_has_extra_attr(inode) && f2fs_sb_has_inode_crtime(sbi) &&
498 F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) {
499 fi->i_crtime.tv_sec = le64_to_cpu(ri->i_crtime);
500 fi->i_crtime.tv_nsec = le32_to_cpu(ri->i_crtime_nsec);
501 }
502
503 if (f2fs_has_extra_attr(inode) && f2fs_sb_has_compression(sbi) &&
504 (fi->i_flags & F2FS_COMPR_FL)) {
505 if (F2FS_FITS_IN_INODE(ri, fi->i_extra_isize,
506 i_compress_flag)) {
507 unsigned short compress_flag;
508
509 atomic_set(&fi->i_compr_blocks,
510 le64_to_cpu(ri->i_compr_blocks));
511 fi->i_compress_algorithm = ri->i_compress_algorithm;
512 fi->i_log_cluster_size = ri->i_log_cluster_size;
513 compress_flag = le16_to_cpu(ri->i_compress_flag);
514 fi->i_compress_level = compress_flag >>
515 COMPRESS_LEVEL_OFFSET;
516 fi->i_compress_flag = compress_flag &
517 GENMASK(COMPRESS_LEVEL_OFFSET - 1, 0);
518 fi->i_cluster_size = BIT(fi->i_log_cluster_size);
519 set_inode_flag(inode, FI_COMPRESSED_FILE);
520 }
521 }
522
523 init_idisk_time(inode);
524
525 if (!sanity_check_extent_cache(inode, node_page)) {
526 f2fs_put_page(node_page, 1);
527 f2fs_handle_error(sbi, ERROR_CORRUPTED_INODE);
528 return -EFSCORRUPTED;
529 }
530
531 /* Need all the flag bits */
532 f2fs_init_read_extent_tree(inode, node_page);
533 f2fs_init_age_extent_tree(inode);
534
535 f2fs_put_page(node_page, 1);
536
537 stat_inc_inline_xattr(inode);
538 stat_inc_inline_inode(inode);
539 stat_inc_inline_dir(inode);
540 stat_inc_compr_inode(inode);
541 stat_add_compr_blocks(inode, atomic_read(&fi->i_compr_blocks));
542
543 return 0;
544 }
545
is_meta_ino(struct f2fs_sb_info * sbi,unsigned int ino)546 static bool is_meta_ino(struct f2fs_sb_info *sbi, unsigned int ino)
547 {
548 return ino == F2FS_NODE_INO(sbi) || ino == F2FS_META_INO(sbi) ||
549 ino == F2FS_COMPRESS_INO(sbi);
550 }
551
f2fs_iget(struct super_block * sb,unsigned long ino)552 struct inode *f2fs_iget(struct super_block *sb, unsigned long ino)
553 {
554 struct f2fs_sb_info *sbi = F2FS_SB(sb);
555 struct inode *inode;
556 int ret = 0;
557
558 inode = iget_locked(sb, ino);
559 if (!inode)
560 return ERR_PTR(-ENOMEM);
561
562 if (!(inode->i_state & I_NEW)) {
563 if (is_meta_ino(sbi, ino)) {
564 f2fs_err(sbi, "inaccessible inode: %lu, run fsck to repair", ino);
565 set_sbi_flag(sbi, SBI_NEED_FSCK);
566 ret = -EFSCORRUPTED;
567 trace_f2fs_iget_exit(inode, ret);
568 iput(inode);
569 f2fs_handle_error(sbi, ERROR_CORRUPTED_INODE);
570 return ERR_PTR(ret);
571 }
572
573 trace_f2fs_iget(inode);
574 return inode;
575 }
576
577 if (is_meta_ino(sbi, ino))
578 goto make_now;
579
580 ret = do_read_inode(inode);
581 if (ret)
582 goto bad_inode;
583 make_now:
584 if (ino == F2FS_NODE_INO(sbi)) {
585 inode->i_mapping->a_ops = &f2fs_node_aops;
586 mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
587 } else if (ino == F2FS_META_INO(sbi)) {
588 inode->i_mapping->a_ops = &f2fs_meta_aops;
589 mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
590 } else if (ino == F2FS_COMPRESS_INO(sbi)) {
591 #ifdef CONFIG_F2FS_FS_COMPRESSION
592 inode->i_mapping->a_ops = &f2fs_compress_aops;
593 /*
594 * generic_error_remove_page only truncates pages of regular
595 * inode
596 */
597 inode->i_mode |= S_IFREG;
598 #endif
599 mapping_set_gfp_mask(inode->i_mapping,
600 GFP_NOFS | __GFP_HIGHMEM | __GFP_MOVABLE);
601 } else if (S_ISREG(inode->i_mode)) {
602 inode->i_op = &f2fs_file_inode_operations;
603 inode->i_fop = &f2fs_file_operations;
604 inode->i_mapping->a_ops = &f2fs_dblock_aops;
605 } else if (S_ISDIR(inode->i_mode)) {
606 inode->i_op = &f2fs_dir_inode_operations;
607 inode->i_fop = &f2fs_dir_operations;
608 inode->i_mapping->a_ops = &f2fs_dblock_aops;
609 mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
610 } else if (S_ISLNK(inode->i_mode)) {
611 if (file_is_encrypt(inode))
612 inode->i_op = &f2fs_encrypted_symlink_inode_operations;
613 else
614 inode->i_op = &f2fs_symlink_inode_operations;
615 inode_nohighmem(inode);
616 inode->i_mapping->a_ops = &f2fs_dblock_aops;
617 } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
618 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
619 inode->i_op = &f2fs_special_inode_operations;
620 init_special_inode(inode, inode->i_mode, inode->i_rdev);
621 } else {
622 ret = -EIO;
623 goto bad_inode;
624 }
625 f2fs_set_inode_flags(inode);
626
627 unlock_new_inode(inode);
628 trace_f2fs_iget(inode);
629 return inode;
630
631 bad_inode:
632 f2fs_inode_synced(inode);
633 iget_failed(inode);
634 trace_f2fs_iget_exit(inode, ret);
635 return ERR_PTR(ret);
636 }
637
f2fs_iget_retry(struct super_block * sb,unsigned long ino)638 struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino)
639 {
640 struct inode *inode;
641 retry:
642 inode = f2fs_iget(sb, ino);
643 if (IS_ERR(inode)) {
644 if (PTR_ERR(inode) == -ENOMEM) {
645 memalloc_retry_wait(GFP_NOFS);
646 goto retry;
647 }
648 }
649 return inode;
650 }
651
f2fs_update_inode(struct inode * inode,struct page * node_page)652 void f2fs_update_inode(struct inode *inode, struct page *node_page)
653 {
654 struct f2fs_inode *ri;
655 struct extent_tree *et = F2FS_I(inode)->extent_tree[EX_READ];
656
657 f2fs_wait_on_page_writeback(node_page, NODE, true, true);
658 set_page_dirty(node_page);
659
660 f2fs_inode_synced(inode);
661
662 ri = F2FS_INODE(node_page);
663
664 ri->i_mode = cpu_to_le16(inode->i_mode);
665 ri->i_advise = F2FS_I(inode)->i_advise;
666 ri->i_uid = cpu_to_le32(i_uid_read(inode));
667 ri->i_gid = cpu_to_le32(i_gid_read(inode));
668 ri->i_links = cpu_to_le32(inode->i_nlink);
669 ri->i_blocks = cpu_to_le64(SECTOR_TO_BLOCK(inode->i_blocks) + 1);
670
671 if (!f2fs_is_atomic_file(inode) ||
672 is_inode_flag_set(inode, FI_ATOMIC_COMMITTED))
673 ri->i_size = cpu_to_le64(i_size_read(inode));
674
675 if (et) {
676 read_lock(&et->lock);
677 set_raw_read_extent(&et->largest, &ri->i_ext);
678 read_unlock(&et->lock);
679 } else {
680 memset(&ri->i_ext, 0, sizeof(ri->i_ext));
681 }
682 set_raw_inline(inode, ri);
683
684 ri->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
685 ri->i_ctime = cpu_to_le64(inode_get_ctime(inode).tv_sec);
686 ri->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
687 ri->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
688 ri->i_ctime_nsec = cpu_to_le32(inode_get_ctime(inode).tv_nsec);
689 ri->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
690 if (S_ISDIR(inode->i_mode))
691 ri->i_current_depth =
692 cpu_to_le32(F2FS_I(inode)->i_current_depth);
693 else if (S_ISREG(inode->i_mode))
694 ri->i_gc_failures =
695 cpu_to_le16(F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN]);
696 ri->i_xattr_nid = cpu_to_le32(F2FS_I(inode)->i_xattr_nid);
697 ri->i_flags = cpu_to_le32(F2FS_I(inode)->i_flags);
698 ri->i_pino = cpu_to_le32(F2FS_I(inode)->i_pino);
699 ri->i_generation = cpu_to_le32(inode->i_generation);
700 ri->i_dir_level = F2FS_I(inode)->i_dir_level;
701
702 if (f2fs_has_extra_attr(inode)) {
703 ri->i_extra_isize = cpu_to_le16(F2FS_I(inode)->i_extra_isize);
704
705 if (f2fs_sb_has_flexible_inline_xattr(F2FS_I_SB(inode)))
706 ri->i_inline_xattr_size =
707 cpu_to_le16(F2FS_I(inode)->i_inline_xattr_size);
708
709 if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)) &&
710 F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
711 i_projid)) {
712 projid_t i_projid;
713
714 i_projid = from_kprojid(&init_user_ns,
715 F2FS_I(inode)->i_projid);
716 ri->i_projid = cpu_to_le32(i_projid);
717 }
718
719 if (f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) &&
720 F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
721 i_crtime)) {
722 ri->i_crtime =
723 cpu_to_le64(F2FS_I(inode)->i_crtime.tv_sec);
724 ri->i_crtime_nsec =
725 cpu_to_le32(F2FS_I(inode)->i_crtime.tv_nsec);
726 }
727
728 if (f2fs_sb_has_compression(F2FS_I_SB(inode)) &&
729 F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
730 i_compress_flag)) {
731 unsigned short compress_flag;
732
733 ri->i_compr_blocks =
734 cpu_to_le64(atomic_read(
735 &F2FS_I(inode)->i_compr_blocks));
736 ri->i_compress_algorithm =
737 F2FS_I(inode)->i_compress_algorithm;
738 compress_flag = F2FS_I(inode)->i_compress_flag |
739 F2FS_I(inode)->i_compress_level <<
740 COMPRESS_LEVEL_OFFSET;
741 ri->i_compress_flag = cpu_to_le16(compress_flag);
742 ri->i_log_cluster_size =
743 F2FS_I(inode)->i_log_cluster_size;
744 }
745 }
746
747 __set_inode_rdev(inode, node_page);
748
749 /* deleted inode */
750 if (inode->i_nlink == 0)
751 clear_page_private_inline(node_page);
752
753 init_idisk_time(inode);
754 #ifdef CONFIG_F2FS_CHECK_FS
755 f2fs_inode_chksum_set(F2FS_I_SB(inode), node_page);
756 #endif
757 }
758
f2fs_update_inode_page(struct inode * inode)759 void f2fs_update_inode_page(struct inode *inode)
760 {
761 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
762 struct page *node_page;
763 int count = 0;
764 retry:
765 node_page = f2fs_get_node_page(sbi, inode->i_ino);
766 if (IS_ERR(node_page)) {
767 int err = PTR_ERR(node_page);
768
769 /* The node block was truncated. */
770 if (err == -ENOENT)
771 return;
772
773 if (err == -EFSCORRUPTED)
774 goto stop_checkpoint;
775
776 if (err == -ENOMEM || ++count <= DEFAULT_RETRY_IO_COUNT)
777 goto retry;
778 stop_checkpoint:
779 f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_UPDATE_INODE);
780 return;
781 }
782 f2fs_update_inode(inode, node_page);
783 f2fs_put_page(node_page, 1);
784 }
785
f2fs_write_inode(struct inode * inode,struct writeback_control * wbc)786 int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
787 {
788 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
789
790 if (inode->i_ino == F2FS_NODE_INO(sbi) ||
791 inode->i_ino == F2FS_META_INO(sbi))
792 return 0;
793
794 /*
795 * atime could be updated without dirtying f2fs inode in lazytime mode
796 */
797 if (f2fs_is_time_consistent(inode) &&
798 !is_inode_flag_set(inode, FI_DIRTY_INODE))
799 return 0;
800
801 if (!f2fs_is_checkpoint_ready(sbi)) {
802 f2fs_mark_inode_dirty_sync(inode, true);
803 return -ENOSPC;
804 }
805
806 /*
807 * We need to balance fs here to prevent from producing dirty node pages
808 * during the urgent cleaning time when running out of free sections.
809 */
810 f2fs_update_inode_page(inode);
811 if (wbc && wbc->nr_to_write)
812 f2fs_balance_fs(sbi, true);
813 return 0;
814 }
815
816 /*
817 * Called at the last iput() if i_nlink is zero
818 */
f2fs_evict_inode(struct inode * inode)819 void f2fs_evict_inode(struct inode *inode)
820 {
821 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
822 struct f2fs_inode_info *fi = F2FS_I(inode);
823 nid_t xnid = fi->i_xattr_nid;
824 int err = 0;
825
826 f2fs_abort_atomic_write(inode, true);
827
828 if (fi->cow_inode && f2fs_is_cow_file(fi->cow_inode)) {
829 clear_inode_flag(fi->cow_inode, FI_COW_FILE);
830 F2FS_I(fi->cow_inode)->atomic_inode = NULL;
831 iput(fi->cow_inode);
832 fi->cow_inode = NULL;
833 }
834
835 trace_f2fs_evict_inode(inode);
836 truncate_inode_pages_final(&inode->i_data);
837
838 if ((inode->i_nlink || is_bad_inode(inode)) &&
839 test_opt(sbi, COMPRESS_CACHE) && f2fs_compressed_file(inode))
840 f2fs_invalidate_compress_pages(sbi, inode->i_ino);
841
842 if (inode->i_ino == F2FS_NODE_INO(sbi) ||
843 inode->i_ino == F2FS_META_INO(sbi) ||
844 inode->i_ino == F2FS_COMPRESS_INO(sbi))
845 goto out_clear;
846
847 f2fs_bug_on(sbi, get_dirty_pages(inode));
848 f2fs_remove_dirty_inode(inode);
849
850 f2fs_destroy_extent_tree(inode);
851
852 if (inode->i_nlink || is_bad_inode(inode))
853 goto no_delete;
854
855 err = f2fs_dquot_initialize(inode);
856 if (err) {
857 err = 0;
858 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
859 }
860
861 f2fs_remove_ino_entry(sbi, inode->i_ino, APPEND_INO);
862 f2fs_remove_ino_entry(sbi, inode->i_ino, UPDATE_INO);
863 f2fs_remove_ino_entry(sbi, inode->i_ino, FLUSH_INO);
864
865 if (!is_sbi_flag_set(sbi, SBI_IS_FREEZING))
866 sb_start_intwrite(inode->i_sb);
867 set_inode_flag(inode, FI_NO_ALLOC);
868 i_size_write(inode, 0);
869 retry:
870 if (F2FS_HAS_BLOCKS(inode))
871 err = f2fs_truncate(inode);
872
873 if (time_to_inject(sbi, FAULT_EVICT_INODE))
874 err = -EIO;
875
876 if (!err) {
877 f2fs_lock_op(sbi);
878 err = f2fs_remove_inode_page(inode);
879 f2fs_unlock_op(sbi);
880 if (err == -ENOENT) {
881 err = 0;
882
883 /*
884 * in fuzzed image, another node may has the same
885 * block address as inode's, if it was truncated
886 * previously, truncation of inode node will fail.
887 */
888 if (is_inode_flag_set(inode, FI_DIRTY_INODE)) {
889 f2fs_warn(F2FS_I_SB(inode),
890 "f2fs_evict_inode: inconsistent node id, ino:%lu",
891 inode->i_ino);
892 f2fs_inode_synced(inode);
893 set_sbi_flag(sbi, SBI_NEED_FSCK);
894 }
895 }
896 }
897
898 /* give more chances, if ENOMEM case */
899 if (err == -ENOMEM) {
900 err = 0;
901 goto retry;
902 }
903
904 if (err) {
905 f2fs_update_inode_page(inode);
906 if (dquot_initialize_needed(inode))
907 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
908 }
909 if (!is_sbi_flag_set(sbi, SBI_IS_FREEZING))
910 sb_end_intwrite(inode->i_sb);
911 no_delete:
912 dquot_drop(inode);
913
914 stat_dec_inline_xattr(inode);
915 stat_dec_inline_dir(inode);
916 stat_dec_inline_inode(inode);
917 stat_dec_compr_inode(inode);
918 stat_sub_compr_blocks(inode,
919 atomic_read(&fi->i_compr_blocks));
920
921 if (likely(!f2fs_cp_error(sbi) &&
922 !is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
923 f2fs_bug_on(sbi, is_inode_flag_set(inode, FI_DIRTY_INODE));
924 else
925 f2fs_inode_synced(inode);
926
927 /* for the case f2fs_new_inode() was failed, .i_ino is zero, skip it */
928 if (inode->i_ino)
929 invalidate_mapping_pages(NODE_MAPPING(sbi), inode->i_ino,
930 inode->i_ino);
931 if (xnid)
932 invalidate_mapping_pages(NODE_MAPPING(sbi), xnid, xnid);
933 if (inode->i_nlink) {
934 if (is_inode_flag_set(inode, FI_APPEND_WRITE))
935 f2fs_add_ino_entry(sbi, inode->i_ino, APPEND_INO);
936 if (is_inode_flag_set(inode, FI_UPDATE_WRITE))
937 f2fs_add_ino_entry(sbi, inode->i_ino, UPDATE_INO);
938 }
939 if (is_inode_flag_set(inode, FI_FREE_NID)) {
940 f2fs_alloc_nid_failed(sbi, inode->i_ino);
941 clear_inode_flag(inode, FI_FREE_NID);
942 } else {
943 /*
944 * If xattr nid is corrupted, we can reach out error condition,
945 * err & !f2fs_exist_written_data(sbi, inode->i_ino, ORPHAN_INO)).
946 * In that case, f2fs_check_nid_range() is enough to give a clue.
947 */
948 }
949 out_clear:
950 fscrypt_put_encryption_info(inode);
951 fsverity_cleanup_inode(inode);
952 clear_inode(inode);
953 }
954
955 /* caller should call f2fs_lock_op() */
f2fs_handle_failed_inode(struct inode * inode)956 void f2fs_handle_failed_inode(struct inode *inode)
957 {
958 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
959 struct node_info ni;
960 int err;
961
962 /*
963 * clear nlink of inode in order to release resource of inode
964 * immediately.
965 */
966 clear_nlink(inode);
967
968 /*
969 * we must call this to avoid inode being remained as dirty, resulting
970 * in a panic when flushing dirty inodes in gdirty_list.
971 */
972 f2fs_update_inode_page(inode);
973 f2fs_inode_synced(inode);
974
975 /* don't make bad inode, since it becomes a regular file. */
976 unlock_new_inode(inode);
977
978 /*
979 * Note: we should add inode to orphan list before f2fs_unlock_op()
980 * so we can prevent losing this orphan when encoutering checkpoint
981 * and following suddenly power-off.
982 */
983 err = f2fs_get_node_info(sbi, inode->i_ino, &ni, false);
984 if (err) {
985 set_sbi_flag(sbi, SBI_NEED_FSCK);
986 set_inode_flag(inode, FI_FREE_NID);
987 f2fs_warn(sbi, "May loss orphan inode, run fsck to fix.");
988 goto out;
989 }
990
991 if (ni.blk_addr != NULL_ADDR) {
992 err = f2fs_acquire_orphan_inode(sbi);
993 if (err) {
994 set_sbi_flag(sbi, SBI_NEED_FSCK);
995 f2fs_warn(sbi, "Too many orphan inodes, run fsck to fix.");
996 } else {
997 f2fs_add_orphan_inode(inode);
998 }
999 f2fs_alloc_nid_done(sbi, inode->i_ino);
1000 } else {
1001 set_inode_flag(inode, FI_FREE_NID);
1002 }
1003
1004 out:
1005 f2fs_unlock_op(sbi);
1006
1007 /* iput will drop the inode object */
1008 iput(inode);
1009 }
1010