1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * fs/f2fs/inode.c 4 * 5 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 6 * http://www.samsung.com/ 7 */ 8 #include <linux/fs.h> 9 #include <linux/f2fs_fs.h> 10 #include <linux/buffer_head.h> 11 #include <linux/writeback.h> 12 #include <linux/sched/mm.h> 13 #include <linux/lz4.h> 14 #include <linux/zstd.h> 15 16 #include "f2fs.h" 17 #include "node.h" 18 #include "segment.h" 19 #include "xattr.h" 20 21 #include <trace/events/f2fs.h> 22 23 #ifdef CONFIG_F2FS_FS_COMPRESSION 24 extern const struct address_space_operations f2fs_compress_aops; 25 #endif 26 27 void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync) 28 { 29 if (is_inode_flag_set(inode, FI_NEW_INODE)) 30 return; 31 32 if (f2fs_readonly(F2FS_I_SB(inode)->sb)) 33 return; 34 35 if (f2fs_inode_dirtied(inode, sync)) 36 return; 37 38 mark_inode_dirty_sync(inode); 39 } 40 41 void f2fs_set_inode_flags(struct inode *inode) 42 { 43 unsigned int flags = F2FS_I(inode)->i_flags; 44 unsigned int new_fl = 0; 45 46 if (flags & F2FS_SYNC_FL) 47 new_fl |= S_SYNC; 48 if (flags & F2FS_APPEND_FL) 49 new_fl |= S_APPEND; 50 if (flags & F2FS_IMMUTABLE_FL) 51 new_fl |= S_IMMUTABLE; 52 if (flags & F2FS_NOATIME_FL) 53 new_fl |= S_NOATIME; 54 if (flags & F2FS_DIRSYNC_FL) 55 new_fl |= S_DIRSYNC; 56 if (file_is_encrypt(inode)) 57 new_fl |= S_ENCRYPTED; 58 if (file_is_verity(inode)) 59 new_fl |= S_VERITY; 60 if (flags & F2FS_CASEFOLD_FL) 61 new_fl |= S_CASEFOLD; 62 inode_set_flags(inode, new_fl, 63 S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC| 64 S_ENCRYPTED|S_VERITY|S_CASEFOLD); 65 } 66 67 static void __get_inode_rdev(struct inode *inode, struct page *node_page) 68 { 69 __le32 *addr = get_dnode_addr(inode, node_page); 70 71 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || 72 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { 73 if (addr[0]) 74 inode->i_rdev = old_decode_dev(le32_to_cpu(addr[0])); 75 else 76 inode->i_rdev = new_decode_dev(le32_to_cpu(addr[1])); 77 } 78 } 79 80 static void __set_inode_rdev(struct inode *inode, struct page *node_page) 81 { 82 __le32 *addr = get_dnode_addr(inode, node_page); 83 84 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { 85 if (old_valid_dev(inode->i_rdev)) { 86 addr[0] = cpu_to_le32(old_encode_dev(inode->i_rdev)); 87 addr[1] = 0; 88 } else { 89 addr[0] = 0; 90 addr[1] = cpu_to_le32(new_encode_dev(inode->i_rdev)); 91 addr[2] = 0; 92 } 93 } 94 } 95 96 static void __recover_inline_status(struct inode *inode, struct page *ipage) 97 { 98 void *inline_data = inline_data_addr(inode, ipage); 99 __le32 *start = inline_data; 100 __le32 *end = start + MAX_INLINE_DATA(inode) / sizeof(__le32); 101 102 while (start < end) { 103 if (*start++) { 104 f2fs_wait_on_page_writeback(ipage, NODE, true, true); 105 106 set_inode_flag(inode, FI_DATA_EXIST); 107 set_raw_inline(inode, F2FS_INODE(ipage)); 108 set_page_dirty(ipage); 109 return; 110 } 111 } 112 return; 113 } 114 115 static bool f2fs_enable_inode_chksum(struct f2fs_sb_info *sbi, struct page *page) 116 { 117 struct f2fs_inode *ri = &F2FS_NODE(page)->i; 118 119 if (!f2fs_sb_has_inode_chksum(sbi)) 120 return false; 121 122 if (!IS_INODE(page) || !(ri->i_inline & F2FS_EXTRA_ATTR)) 123 return false; 124 125 if (!F2FS_FITS_IN_INODE(ri, le16_to_cpu(ri->i_extra_isize), 126 i_inode_checksum)) 127 return false; 128 129 return true; 130 } 131 132 static __u32 f2fs_inode_chksum(struct f2fs_sb_info *sbi, struct page *page) 133 { 134 struct f2fs_node *node = F2FS_NODE(page); 135 struct f2fs_inode *ri = &node->i; 136 __le32 ino = node->footer.ino; 137 __le32 gen = ri->i_generation; 138 __u32 chksum, chksum_seed; 139 __u32 dummy_cs = 0; 140 unsigned int offset = offsetof(struct f2fs_inode, i_inode_checksum); 141 unsigned int cs_size = sizeof(dummy_cs); 142 143 chksum = f2fs_chksum(sbi, sbi->s_chksum_seed, (__u8 *)&ino, 144 sizeof(ino)); 145 chksum_seed = f2fs_chksum(sbi, chksum, (__u8 *)&gen, sizeof(gen)); 146 147 chksum = f2fs_chksum(sbi, chksum_seed, (__u8 *)ri, offset); 148 chksum = f2fs_chksum(sbi, chksum, (__u8 *)&dummy_cs, cs_size); 149 offset += cs_size; 150 chksum = f2fs_chksum(sbi, chksum, (__u8 *)ri + offset, 151 F2FS_BLKSIZE - offset); 152 return chksum; 153 } 154 155 bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page) 156 { 157 struct f2fs_inode *ri; 158 __u32 provided, calculated; 159 160 if (unlikely(is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN))) 161 return true; 162 163 #ifdef CONFIG_F2FS_CHECK_FS 164 if (!f2fs_enable_inode_chksum(sbi, page)) 165 #else 166 if (!f2fs_enable_inode_chksum(sbi, page) || 167 PageDirty(page) || PageWriteback(page)) 168 #endif 169 return true; 170 171 ri = &F2FS_NODE(page)->i; 172 provided = le32_to_cpu(ri->i_inode_checksum); 173 calculated = f2fs_inode_chksum(sbi, page); 174 175 if (provided != calculated) 176 f2fs_warn(sbi, "checksum invalid, nid = %lu, ino_of_node = %x, %x vs. %x", 177 page->index, ino_of_node(page), provided, calculated); 178 179 return provided == calculated; 180 } 181 182 void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct page *page) 183 { 184 struct f2fs_inode *ri = &F2FS_NODE(page)->i; 185 186 if (!f2fs_enable_inode_chksum(sbi, page)) 187 return; 188 189 ri->i_inode_checksum = cpu_to_le32(f2fs_inode_chksum(sbi, page)); 190 } 191 192 static bool sanity_check_compress_inode(struct inode *inode, 193 struct f2fs_inode *ri) 194 { 195 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 196 unsigned char clevel; 197 198 if (ri->i_compress_algorithm >= COMPRESS_MAX) { 199 f2fs_warn(sbi, 200 "%s: inode (ino=%lx) has unsupported compress algorithm: %u, run fsck to fix", 201 __func__, inode->i_ino, ri->i_compress_algorithm); 202 return false; 203 } 204 if (le64_to_cpu(ri->i_compr_blocks) > 205 SECTOR_TO_BLOCK(inode->i_blocks)) { 206 f2fs_warn(sbi, 207 "%s: inode (ino=%lx) has inconsistent i_compr_blocks:%llu, i_blocks:%llu, run fsck to fix", 208 __func__, inode->i_ino, le64_to_cpu(ri->i_compr_blocks), 209 SECTOR_TO_BLOCK(inode->i_blocks)); 210 return false; 211 } 212 if (ri->i_log_cluster_size < MIN_COMPRESS_LOG_SIZE || 213 ri->i_log_cluster_size > MAX_COMPRESS_LOG_SIZE) { 214 f2fs_warn(sbi, 215 "%s: inode (ino=%lx) has unsupported log cluster size: %u, run fsck to fix", 216 __func__, inode->i_ino, ri->i_log_cluster_size); 217 return false; 218 } 219 220 clevel = le16_to_cpu(ri->i_compress_flag) >> 221 COMPRESS_LEVEL_OFFSET; 222 switch (ri->i_compress_algorithm) { 223 case COMPRESS_LZO: 224 #ifdef CONFIG_F2FS_FS_LZO 225 if (clevel) 226 goto err_level; 227 #endif 228 break; 229 case COMPRESS_LZORLE: 230 #ifdef CONFIG_F2FS_FS_LZORLE 231 if (clevel) 232 goto err_level; 233 #endif 234 break; 235 case COMPRESS_LZ4: 236 #ifdef CONFIG_F2FS_FS_LZ4 237 #ifdef CONFIG_F2FS_FS_LZ4HC 238 if (clevel && 239 (clevel < LZ4HC_MIN_CLEVEL || clevel > LZ4HC_MAX_CLEVEL)) 240 goto err_level; 241 #else 242 if (clevel) 243 goto err_level; 244 #endif 245 #endif 246 break; 247 case COMPRESS_ZSTD: 248 #ifdef CONFIG_F2FS_FS_ZSTD 249 if (clevel < zstd_min_clevel() || clevel > zstd_max_clevel()) 250 goto err_level; 251 #endif 252 break; 253 default: 254 goto err_level; 255 } 256 257 return true; 258 err_level: 259 f2fs_warn(sbi, "%s: inode (ino=%lx) has unsupported compress level: %u, run fsck to fix", 260 __func__, inode->i_ino, clevel); 261 return false; 262 } 263 264 static bool sanity_check_inode(struct inode *inode, struct page *node_page) 265 { 266 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 267 struct f2fs_inode_info *fi = F2FS_I(inode); 268 struct f2fs_inode *ri = F2FS_INODE(node_page); 269 unsigned long long iblocks; 270 271 iblocks = le64_to_cpu(F2FS_INODE(node_page)->i_blocks); 272 if (!iblocks) { 273 f2fs_warn(sbi, "%s: corrupted inode i_blocks i_ino=%lx iblocks=%llu, run fsck to fix.", 274 __func__, inode->i_ino, iblocks); 275 return false; 276 } 277 278 if (ino_of_node(node_page) != nid_of_node(node_page)) { 279 f2fs_warn(sbi, "%s: corrupted inode footer i_ino=%lx, ino,nid: [%u, %u] run fsck to fix.", 280 __func__, inode->i_ino, 281 ino_of_node(node_page), nid_of_node(node_page)); 282 return false; 283 } 284 285 if (f2fs_has_extra_attr(inode)) { 286 if (!f2fs_sb_has_extra_attr(sbi)) { 287 f2fs_warn(sbi, "%s: inode (ino=%lx) is with extra_attr, but extra_attr feature is off", 288 __func__, inode->i_ino); 289 return false; 290 } 291 if (fi->i_extra_isize > F2FS_TOTAL_EXTRA_ATTR_SIZE || 292 fi->i_extra_isize < F2FS_MIN_EXTRA_ATTR_SIZE || 293 fi->i_extra_isize % sizeof(__le32)) { 294 f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_extra_isize: %d, max: %zu", 295 __func__, inode->i_ino, fi->i_extra_isize, 296 F2FS_TOTAL_EXTRA_ATTR_SIZE); 297 return false; 298 } 299 if (f2fs_sb_has_flexible_inline_xattr(sbi) && 300 f2fs_has_inline_xattr(inode) && 301 (!fi->i_inline_xattr_size || 302 fi->i_inline_xattr_size > MAX_INLINE_XATTR_SIZE)) { 303 f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_inline_xattr_size: %d, max: %zu", 304 __func__, inode->i_ino, fi->i_inline_xattr_size, 305 MAX_INLINE_XATTR_SIZE); 306 return false; 307 } 308 if (f2fs_sb_has_compression(sbi) && 309 fi->i_flags & F2FS_COMPR_FL && 310 F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, 311 i_compress_flag)) { 312 if (!sanity_check_compress_inode(inode, ri)) 313 return false; 314 } 315 } else if (f2fs_sb_has_flexible_inline_xattr(sbi)) { 316 f2fs_warn(sbi, "%s: corrupted inode ino=%lx, run fsck to fix.", 317 __func__, inode->i_ino); 318 return false; 319 } 320 321 if (!f2fs_sb_has_extra_attr(sbi)) { 322 if (f2fs_sb_has_project_quota(sbi)) { 323 f2fs_warn(sbi, "%s: corrupted inode ino=%lx, wrong feature flag: %u, run fsck to fix.", 324 __func__, inode->i_ino, F2FS_FEATURE_PRJQUOTA); 325 return false; 326 } 327 if (f2fs_sb_has_inode_chksum(sbi)) { 328 f2fs_warn(sbi, "%s: corrupted inode ino=%lx, wrong feature flag: %u, run fsck to fix.", 329 __func__, inode->i_ino, F2FS_FEATURE_INODE_CHKSUM); 330 return false; 331 } 332 if (f2fs_sb_has_flexible_inline_xattr(sbi)) { 333 f2fs_warn(sbi, "%s: corrupted inode ino=%lx, wrong feature flag: %u, run fsck to fix.", 334 __func__, inode->i_ino, F2FS_FEATURE_FLEXIBLE_INLINE_XATTR); 335 return false; 336 } 337 if (f2fs_sb_has_inode_crtime(sbi)) { 338 f2fs_warn(sbi, "%s: corrupted inode ino=%lx, wrong feature flag: %u, run fsck to fix.", 339 __func__, inode->i_ino, F2FS_FEATURE_INODE_CRTIME); 340 return false; 341 } 342 if (f2fs_sb_has_compression(sbi)) { 343 f2fs_warn(sbi, "%s: corrupted inode ino=%lx, wrong feature flag: %u, run fsck to fix.", 344 __func__, inode->i_ino, F2FS_FEATURE_COMPRESSION); 345 return false; 346 } 347 } 348 349 if (f2fs_sanity_check_inline_data(inode)) { 350 f2fs_warn(sbi, "%s: inode (ino=%lx, mode=%u) should not have inline_data, run fsck to fix", 351 __func__, inode->i_ino, inode->i_mode); 352 return false; 353 } 354 355 if (f2fs_has_inline_dentry(inode) && !S_ISDIR(inode->i_mode)) { 356 f2fs_warn(sbi, "%s: inode (ino=%lx, mode=%u) should not have inline_dentry, run fsck to fix", 357 __func__, inode->i_ino, inode->i_mode); 358 return false; 359 } 360 361 if ((fi->i_flags & F2FS_CASEFOLD_FL) && !f2fs_sb_has_casefold(sbi)) { 362 f2fs_warn(sbi, "%s: inode (ino=%lx) has casefold flag, but casefold feature is off", 363 __func__, inode->i_ino); 364 return false; 365 } 366 367 if (fi->i_xattr_nid && f2fs_check_nid_range(sbi, fi->i_xattr_nid)) { 368 f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_xattr_nid: %u, run fsck to fix.", 369 __func__, inode->i_ino, fi->i_xattr_nid); 370 return false; 371 } 372 373 return true; 374 } 375 376 static void init_idisk_time(struct inode *inode) 377 { 378 struct f2fs_inode_info *fi = F2FS_I(inode); 379 380 fi->i_disk_time[0] = inode->i_atime; 381 fi->i_disk_time[1] = inode_get_ctime(inode); 382 fi->i_disk_time[2] = inode->i_mtime; 383 } 384 385 static int do_read_inode(struct inode *inode) 386 { 387 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 388 struct f2fs_inode_info *fi = F2FS_I(inode); 389 struct page *node_page; 390 struct f2fs_inode *ri; 391 projid_t i_projid; 392 393 /* Check if ino is within scope */ 394 if (f2fs_check_nid_range(sbi, inode->i_ino)) 395 return -EINVAL; 396 397 node_page = f2fs_get_node_page(sbi, inode->i_ino); 398 if (IS_ERR(node_page)) 399 return PTR_ERR(node_page); 400 401 ri = F2FS_INODE(node_page); 402 403 inode->i_mode = le16_to_cpu(ri->i_mode); 404 i_uid_write(inode, le32_to_cpu(ri->i_uid)); 405 i_gid_write(inode, le32_to_cpu(ri->i_gid)); 406 set_nlink(inode, le32_to_cpu(ri->i_links)); 407 inode->i_size = le64_to_cpu(ri->i_size); 408 inode->i_blocks = SECTOR_FROM_BLOCK(le64_to_cpu(ri->i_blocks) - 1); 409 410 inode->i_atime.tv_sec = le64_to_cpu(ri->i_atime); 411 inode_set_ctime(inode, le64_to_cpu(ri->i_ctime), 412 le32_to_cpu(ri->i_ctime_nsec)); 413 inode->i_mtime.tv_sec = le64_to_cpu(ri->i_mtime); 414 inode->i_atime.tv_nsec = le32_to_cpu(ri->i_atime_nsec); 415 inode->i_mtime.tv_nsec = le32_to_cpu(ri->i_mtime_nsec); 416 inode->i_generation = le32_to_cpu(ri->i_generation); 417 if (S_ISDIR(inode->i_mode)) 418 fi->i_current_depth = le32_to_cpu(ri->i_current_depth); 419 else if (S_ISREG(inode->i_mode)) 420 fi->i_gc_failures[GC_FAILURE_PIN] = 421 le16_to_cpu(ri->i_gc_failures); 422 fi->i_xattr_nid = le32_to_cpu(ri->i_xattr_nid); 423 fi->i_flags = le32_to_cpu(ri->i_flags); 424 if (S_ISREG(inode->i_mode)) 425 fi->i_flags &= ~F2FS_PROJINHERIT_FL; 426 bitmap_zero(fi->flags, FI_MAX); 427 fi->i_advise = ri->i_advise; 428 fi->i_pino = le32_to_cpu(ri->i_pino); 429 fi->i_dir_level = ri->i_dir_level; 430 431 get_inline_info(inode, ri); 432 433 fi->i_extra_isize = f2fs_has_extra_attr(inode) ? 434 le16_to_cpu(ri->i_extra_isize) : 0; 435 436 if (f2fs_sb_has_flexible_inline_xattr(sbi)) { 437 fi->i_inline_xattr_size = le16_to_cpu(ri->i_inline_xattr_size); 438 } else if (f2fs_has_inline_xattr(inode) || 439 f2fs_has_inline_dentry(inode)) { 440 fi->i_inline_xattr_size = DEFAULT_INLINE_XATTR_ADDRS; 441 } else { 442 443 /* 444 * Previous inline data or directory always reserved 200 bytes 445 * in inode layout, even if inline_xattr is disabled. In order 446 * to keep inline_dentry's structure for backward compatibility, 447 * we get the space back only from inline_data. 448 */ 449 fi->i_inline_xattr_size = 0; 450 } 451 452 if (!sanity_check_inode(inode, node_page)) { 453 f2fs_put_page(node_page, 1); 454 set_sbi_flag(sbi, SBI_NEED_FSCK); 455 f2fs_handle_error(sbi, ERROR_CORRUPTED_INODE); 456 return -EFSCORRUPTED; 457 } 458 459 /* check data exist */ 460 if (f2fs_has_inline_data(inode) && !f2fs_exist_data(inode)) 461 __recover_inline_status(inode, node_page); 462 463 /* try to recover cold bit for non-dir inode */ 464 if (!S_ISDIR(inode->i_mode) && !is_cold_node(node_page)) { 465 f2fs_wait_on_page_writeback(node_page, NODE, true, true); 466 set_cold_node(node_page, false); 467 set_page_dirty(node_page); 468 } 469 470 /* get rdev by using inline_info */ 471 __get_inode_rdev(inode, node_page); 472 473 if (!f2fs_need_inode_block_update(sbi, inode->i_ino)) 474 fi->last_disk_size = inode->i_size; 475 476 if (fi->i_flags & F2FS_PROJINHERIT_FL) 477 set_inode_flag(inode, FI_PROJ_INHERIT); 478 479 if (f2fs_has_extra_attr(inode) && f2fs_sb_has_project_quota(sbi) && 480 F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_projid)) 481 i_projid = (projid_t)le32_to_cpu(ri->i_projid); 482 else 483 i_projid = F2FS_DEF_PROJID; 484 fi->i_projid = make_kprojid(&init_user_ns, i_projid); 485 486 if (f2fs_has_extra_attr(inode) && f2fs_sb_has_inode_crtime(sbi) && 487 F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) { 488 fi->i_crtime.tv_sec = le64_to_cpu(ri->i_crtime); 489 fi->i_crtime.tv_nsec = le32_to_cpu(ri->i_crtime_nsec); 490 } 491 492 if (f2fs_has_extra_attr(inode) && f2fs_sb_has_compression(sbi) && 493 (fi->i_flags & F2FS_COMPR_FL)) { 494 if (F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, 495 i_compress_flag)) { 496 unsigned short compress_flag; 497 498 atomic_set(&fi->i_compr_blocks, 499 le64_to_cpu(ri->i_compr_blocks)); 500 fi->i_compress_algorithm = ri->i_compress_algorithm; 501 fi->i_log_cluster_size = ri->i_log_cluster_size; 502 compress_flag = le16_to_cpu(ri->i_compress_flag); 503 fi->i_compress_level = compress_flag >> 504 COMPRESS_LEVEL_OFFSET; 505 fi->i_compress_flag = compress_flag & 506 GENMASK(COMPRESS_LEVEL_OFFSET - 1, 0); 507 fi->i_cluster_size = BIT(fi->i_log_cluster_size); 508 set_inode_flag(inode, FI_COMPRESSED_FILE); 509 } 510 } 511 512 init_idisk_time(inode); 513 514 /* Need all the flag bits */ 515 f2fs_init_read_extent_tree(inode, node_page); 516 f2fs_init_age_extent_tree(inode); 517 518 if (!sanity_check_extent_cache(inode)) { 519 f2fs_put_page(node_page, 1); 520 f2fs_handle_error(sbi, ERROR_CORRUPTED_INODE); 521 return -EFSCORRUPTED; 522 } 523 524 f2fs_put_page(node_page, 1); 525 526 stat_inc_inline_xattr(inode); 527 stat_inc_inline_inode(inode); 528 stat_inc_inline_dir(inode); 529 stat_inc_compr_inode(inode); 530 stat_add_compr_blocks(inode, atomic_read(&fi->i_compr_blocks)); 531 532 return 0; 533 } 534 535 static bool is_meta_ino(struct f2fs_sb_info *sbi, unsigned int ino) 536 { 537 return ino == F2FS_NODE_INO(sbi) || ino == F2FS_META_INO(sbi) || 538 ino == F2FS_COMPRESS_INO(sbi); 539 } 540 541 struct inode *f2fs_iget(struct super_block *sb, unsigned long ino) 542 { 543 struct f2fs_sb_info *sbi = F2FS_SB(sb); 544 struct inode *inode; 545 int ret = 0; 546 547 inode = iget_locked(sb, ino); 548 if (!inode) 549 return ERR_PTR(-ENOMEM); 550 551 if (!(inode->i_state & I_NEW)) { 552 if (is_meta_ino(sbi, ino)) { 553 f2fs_err(sbi, "inaccessible inode: %lu, run fsck to repair", ino); 554 set_sbi_flag(sbi, SBI_NEED_FSCK); 555 ret = -EFSCORRUPTED; 556 trace_f2fs_iget_exit(inode, ret); 557 iput(inode); 558 f2fs_handle_error(sbi, ERROR_CORRUPTED_INODE); 559 return ERR_PTR(ret); 560 } 561 562 trace_f2fs_iget(inode); 563 return inode; 564 } 565 566 if (is_meta_ino(sbi, ino)) 567 goto make_now; 568 569 ret = do_read_inode(inode); 570 if (ret) 571 goto bad_inode; 572 make_now: 573 if (ino == F2FS_NODE_INO(sbi)) { 574 inode->i_mapping->a_ops = &f2fs_node_aops; 575 mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS); 576 } else if (ino == F2FS_META_INO(sbi)) { 577 inode->i_mapping->a_ops = &f2fs_meta_aops; 578 mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS); 579 } else if (ino == F2FS_COMPRESS_INO(sbi)) { 580 #ifdef CONFIG_F2FS_FS_COMPRESSION 581 inode->i_mapping->a_ops = &f2fs_compress_aops; 582 /* 583 * generic_error_remove_page only truncates pages of regular 584 * inode 585 */ 586 inode->i_mode |= S_IFREG; 587 #endif 588 mapping_set_gfp_mask(inode->i_mapping, 589 GFP_NOFS | __GFP_HIGHMEM | __GFP_MOVABLE); 590 } else if (S_ISREG(inode->i_mode)) { 591 inode->i_op = &f2fs_file_inode_operations; 592 inode->i_fop = &f2fs_file_operations; 593 inode->i_mapping->a_ops = &f2fs_dblock_aops; 594 } else if (S_ISDIR(inode->i_mode)) { 595 inode->i_op = &f2fs_dir_inode_operations; 596 inode->i_fop = &f2fs_dir_operations; 597 inode->i_mapping->a_ops = &f2fs_dblock_aops; 598 mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS); 599 } else if (S_ISLNK(inode->i_mode)) { 600 if (file_is_encrypt(inode)) 601 inode->i_op = &f2fs_encrypted_symlink_inode_operations; 602 else 603 inode->i_op = &f2fs_symlink_inode_operations; 604 inode_nohighmem(inode); 605 inode->i_mapping->a_ops = &f2fs_dblock_aops; 606 } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || 607 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { 608 inode->i_op = &f2fs_special_inode_operations; 609 init_special_inode(inode, inode->i_mode, inode->i_rdev); 610 } else { 611 ret = -EIO; 612 goto bad_inode; 613 } 614 f2fs_set_inode_flags(inode); 615 616 unlock_new_inode(inode); 617 trace_f2fs_iget(inode); 618 return inode; 619 620 bad_inode: 621 f2fs_inode_synced(inode); 622 iget_failed(inode); 623 trace_f2fs_iget_exit(inode, ret); 624 return ERR_PTR(ret); 625 } 626 627 struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino) 628 { 629 struct inode *inode; 630 retry: 631 inode = f2fs_iget(sb, ino); 632 if (IS_ERR(inode)) { 633 if (PTR_ERR(inode) == -ENOMEM) { 634 memalloc_retry_wait(GFP_NOFS); 635 goto retry; 636 } 637 } 638 return inode; 639 } 640 641 void f2fs_update_inode(struct inode *inode, struct page *node_page) 642 { 643 struct f2fs_inode *ri; 644 struct extent_tree *et = F2FS_I(inode)->extent_tree[EX_READ]; 645 646 f2fs_wait_on_page_writeback(node_page, NODE, true, true); 647 set_page_dirty(node_page); 648 649 f2fs_inode_synced(inode); 650 651 ri = F2FS_INODE(node_page); 652 653 ri->i_mode = cpu_to_le16(inode->i_mode); 654 ri->i_advise = F2FS_I(inode)->i_advise; 655 ri->i_uid = cpu_to_le32(i_uid_read(inode)); 656 ri->i_gid = cpu_to_le32(i_gid_read(inode)); 657 ri->i_links = cpu_to_le32(inode->i_nlink); 658 ri->i_blocks = cpu_to_le64(SECTOR_TO_BLOCK(inode->i_blocks) + 1); 659 660 if (!f2fs_is_atomic_file(inode) || 661 is_inode_flag_set(inode, FI_ATOMIC_COMMITTED)) 662 ri->i_size = cpu_to_le64(i_size_read(inode)); 663 664 if (et) { 665 read_lock(&et->lock); 666 set_raw_read_extent(&et->largest, &ri->i_ext); 667 read_unlock(&et->lock); 668 } else { 669 memset(&ri->i_ext, 0, sizeof(ri->i_ext)); 670 } 671 set_raw_inline(inode, ri); 672 673 ri->i_atime = cpu_to_le64(inode->i_atime.tv_sec); 674 ri->i_ctime = cpu_to_le64(inode_get_ctime(inode).tv_sec); 675 ri->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec); 676 ri->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec); 677 ri->i_ctime_nsec = cpu_to_le32(inode_get_ctime(inode).tv_nsec); 678 ri->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec); 679 if (S_ISDIR(inode->i_mode)) 680 ri->i_current_depth = 681 cpu_to_le32(F2FS_I(inode)->i_current_depth); 682 else if (S_ISREG(inode->i_mode)) 683 ri->i_gc_failures = 684 cpu_to_le16(F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN]); 685 ri->i_xattr_nid = cpu_to_le32(F2FS_I(inode)->i_xattr_nid); 686 ri->i_flags = cpu_to_le32(F2FS_I(inode)->i_flags); 687 ri->i_pino = cpu_to_le32(F2FS_I(inode)->i_pino); 688 ri->i_generation = cpu_to_le32(inode->i_generation); 689 ri->i_dir_level = F2FS_I(inode)->i_dir_level; 690 691 if (f2fs_has_extra_attr(inode)) { 692 ri->i_extra_isize = cpu_to_le16(F2FS_I(inode)->i_extra_isize); 693 694 if (f2fs_sb_has_flexible_inline_xattr(F2FS_I_SB(inode))) 695 ri->i_inline_xattr_size = 696 cpu_to_le16(F2FS_I(inode)->i_inline_xattr_size); 697 698 if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)) && 699 F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize, 700 i_projid)) { 701 projid_t i_projid; 702 703 i_projid = from_kprojid(&init_user_ns, 704 F2FS_I(inode)->i_projid); 705 ri->i_projid = cpu_to_le32(i_projid); 706 } 707 708 if (f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) && 709 F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize, 710 i_crtime)) { 711 ri->i_crtime = 712 cpu_to_le64(F2FS_I(inode)->i_crtime.tv_sec); 713 ri->i_crtime_nsec = 714 cpu_to_le32(F2FS_I(inode)->i_crtime.tv_nsec); 715 } 716 717 if (f2fs_sb_has_compression(F2FS_I_SB(inode)) && 718 F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize, 719 i_compress_flag)) { 720 unsigned short compress_flag; 721 722 ri->i_compr_blocks = 723 cpu_to_le64(atomic_read( 724 &F2FS_I(inode)->i_compr_blocks)); 725 ri->i_compress_algorithm = 726 F2FS_I(inode)->i_compress_algorithm; 727 compress_flag = F2FS_I(inode)->i_compress_flag | 728 F2FS_I(inode)->i_compress_level << 729 COMPRESS_LEVEL_OFFSET; 730 ri->i_compress_flag = cpu_to_le16(compress_flag); 731 ri->i_log_cluster_size = 732 F2FS_I(inode)->i_log_cluster_size; 733 } 734 } 735 736 __set_inode_rdev(inode, node_page); 737 738 /* deleted inode */ 739 if (inode->i_nlink == 0) 740 clear_page_private_inline(node_page); 741 742 init_idisk_time(inode); 743 #ifdef CONFIG_F2FS_CHECK_FS 744 f2fs_inode_chksum_set(F2FS_I_SB(inode), node_page); 745 #endif 746 } 747 748 void f2fs_update_inode_page(struct inode *inode) 749 { 750 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 751 struct page *node_page; 752 int count = 0; 753 retry: 754 node_page = f2fs_get_node_page(sbi, inode->i_ino); 755 if (IS_ERR(node_page)) { 756 int err = PTR_ERR(node_page); 757 758 /* The node block was truncated. */ 759 if (err == -ENOENT) 760 return; 761 762 if (err == -ENOMEM || ++count <= DEFAULT_RETRY_IO_COUNT) 763 goto retry; 764 f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_UPDATE_INODE); 765 return; 766 } 767 f2fs_update_inode(inode, node_page); 768 f2fs_put_page(node_page, 1); 769 } 770 771 int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc) 772 { 773 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 774 775 if (inode->i_ino == F2FS_NODE_INO(sbi) || 776 inode->i_ino == F2FS_META_INO(sbi)) 777 return 0; 778 779 /* 780 * atime could be updated without dirtying f2fs inode in lazytime mode 781 */ 782 if (f2fs_is_time_consistent(inode) && 783 !is_inode_flag_set(inode, FI_DIRTY_INODE)) 784 return 0; 785 786 if (!f2fs_is_checkpoint_ready(sbi)) 787 return -ENOSPC; 788 789 /* 790 * We need to balance fs here to prevent from producing dirty node pages 791 * during the urgent cleaning time when running out of free sections. 792 */ 793 f2fs_update_inode_page(inode); 794 if (wbc && wbc->nr_to_write) 795 f2fs_balance_fs(sbi, true); 796 return 0; 797 } 798 799 /* 800 * Called at the last iput() if i_nlink is zero 801 */ 802 void f2fs_evict_inode(struct inode *inode) 803 { 804 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 805 struct f2fs_inode_info *fi = F2FS_I(inode); 806 nid_t xnid = fi->i_xattr_nid; 807 int err = 0; 808 809 f2fs_abort_atomic_write(inode, true); 810 811 if (fi->cow_inode && f2fs_is_cow_file(fi->cow_inode)) { 812 clear_inode_flag(fi->cow_inode, FI_COW_FILE); 813 F2FS_I(fi->cow_inode)->atomic_inode = NULL; 814 iput(fi->cow_inode); 815 fi->cow_inode = NULL; 816 } 817 818 trace_f2fs_evict_inode(inode); 819 truncate_inode_pages_final(&inode->i_data); 820 821 if ((inode->i_nlink || is_bad_inode(inode)) && 822 test_opt(sbi, COMPRESS_CACHE) && f2fs_compressed_file(inode)) 823 f2fs_invalidate_compress_pages(sbi, inode->i_ino); 824 825 if (inode->i_ino == F2FS_NODE_INO(sbi) || 826 inode->i_ino == F2FS_META_INO(sbi) || 827 inode->i_ino == F2FS_COMPRESS_INO(sbi)) 828 goto out_clear; 829 830 f2fs_bug_on(sbi, get_dirty_pages(inode)); 831 f2fs_remove_dirty_inode(inode); 832 833 f2fs_destroy_extent_tree(inode); 834 835 if (inode->i_nlink || is_bad_inode(inode)) 836 goto no_delete; 837 838 err = f2fs_dquot_initialize(inode); 839 if (err) { 840 err = 0; 841 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR); 842 } 843 844 f2fs_remove_ino_entry(sbi, inode->i_ino, APPEND_INO); 845 f2fs_remove_ino_entry(sbi, inode->i_ino, UPDATE_INO); 846 f2fs_remove_ino_entry(sbi, inode->i_ino, FLUSH_INO); 847 848 if (!is_sbi_flag_set(sbi, SBI_IS_FREEZING)) 849 sb_start_intwrite(inode->i_sb); 850 set_inode_flag(inode, FI_NO_ALLOC); 851 i_size_write(inode, 0); 852 retry: 853 if (F2FS_HAS_BLOCKS(inode)) 854 err = f2fs_truncate(inode); 855 856 if (time_to_inject(sbi, FAULT_EVICT_INODE)) 857 err = -EIO; 858 859 if (!err) { 860 f2fs_lock_op(sbi); 861 err = f2fs_remove_inode_page(inode); 862 f2fs_unlock_op(sbi); 863 if (err == -ENOENT) { 864 err = 0; 865 866 /* 867 * in fuzzed image, another node may has the same 868 * block address as inode's, if it was truncated 869 * previously, truncation of inode node will fail. 870 */ 871 if (is_inode_flag_set(inode, FI_DIRTY_INODE)) { 872 f2fs_warn(F2FS_I_SB(inode), 873 "f2fs_evict_inode: inconsistent node id, ino:%lu", 874 inode->i_ino); 875 f2fs_inode_synced(inode); 876 set_sbi_flag(sbi, SBI_NEED_FSCK); 877 } 878 } 879 } 880 881 /* give more chances, if ENOMEM case */ 882 if (err == -ENOMEM) { 883 err = 0; 884 goto retry; 885 } 886 887 if (err) { 888 f2fs_update_inode_page(inode); 889 if (dquot_initialize_needed(inode)) 890 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR); 891 } 892 if (!is_sbi_flag_set(sbi, SBI_IS_FREEZING)) 893 sb_end_intwrite(inode->i_sb); 894 no_delete: 895 dquot_drop(inode); 896 897 stat_dec_inline_xattr(inode); 898 stat_dec_inline_dir(inode); 899 stat_dec_inline_inode(inode); 900 stat_dec_compr_inode(inode); 901 stat_sub_compr_blocks(inode, 902 atomic_read(&fi->i_compr_blocks)); 903 904 if (likely(!f2fs_cp_error(sbi) && 905 !is_sbi_flag_set(sbi, SBI_CP_DISABLED))) 906 f2fs_bug_on(sbi, is_inode_flag_set(inode, FI_DIRTY_INODE)); 907 else 908 f2fs_inode_synced(inode); 909 910 /* for the case f2fs_new_inode() was failed, .i_ino is zero, skip it */ 911 if (inode->i_ino) 912 invalidate_mapping_pages(NODE_MAPPING(sbi), inode->i_ino, 913 inode->i_ino); 914 if (xnid) 915 invalidate_mapping_pages(NODE_MAPPING(sbi), xnid, xnid); 916 if (inode->i_nlink) { 917 if (is_inode_flag_set(inode, FI_APPEND_WRITE)) 918 f2fs_add_ino_entry(sbi, inode->i_ino, APPEND_INO); 919 if (is_inode_flag_set(inode, FI_UPDATE_WRITE)) 920 f2fs_add_ino_entry(sbi, inode->i_ino, UPDATE_INO); 921 } 922 if (is_inode_flag_set(inode, FI_FREE_NID)) { 923 f2fs_alloc_nid_failed(sbi, inode->i_ino); 924 clear_inode_flag(inode, FI_FREE_NID); 925 } else { 926 /* 927 * If xattr nid is corrupted, we can reach out error condition, 928 * err & !f2fs_exist_written_data(sbi, inode->i_ino, ORPHAN_INO)). 929 * In that case, f2fs_check_nid_range() is enough to give a clue. 930 */ 931 } 932 out_clear: 933 fscrypt_put_encryption_info(inode); 934 fsverity_cleanup_inode(inode); 935 clear_inode(inode); 936 } 937 938 /* caller should call f2fs_lock_op() */ 939 void f2fs_handle_failed_inode(struct inode *inode) 940 { 941 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 942 struct node_info ni; 943 int err; 944 945 /* 946 * clear nlink of inode in order to release resource of inode 947 * immediately. 948 */ 949 clear_nlink(inode); 950 951 /* 952 * we must call this to avoid inode being remained as dirty, resulting 953 * in a panic when flushing dirty inodes in gdirty_list. 954 */ 955 f2fs_update_inode_page(inode); 956 f2fs_inode_synced(inode); 957 958 /* don't make bad inode, since it becomes a regular file. */ 959 unlock_new_inode(inode); 960 961 /* 962 * Note: we should add inode to orphan list before f2fs_unlock_op() 963 * so we can prevent losing this orphan when encoutering checkpoint 964 * and following suddenly power-off. 965 */ 966 err = f2fs_get_node_info(sbi, inode->i_ino, &ni, false); 967 if (err) { 968 set_sbi_flag(sbi, SBI_NEED_FSCK); 969 set_inode_flag(inode, FI_FREE_NID); 970 f2fs_warn(sbi, "May loss orphan inode, run fsck to fix."); 971 goto out; 972 } 973 974 if (ni.blk_addr != NULL_ADDR) { 975 err = f2fs_acquire_orphan_inode(sbi); 976 if (err) { 977 set_sbi_flag(sbi, SBI_NEED_FSCK); 978 f2fs_warn(sbi, "Too many orphan inodes, run fsck to fix."); 979 } else { 980 f2fs_add_orphan_inode(inode); 981 } 982 f2fs_alloc_nid_done(sbi, inode->i_ino); 983 } else { 984 set_inode_flag(inode, FI_FREE_NID); 985 } 986 987 out: 988 f2fs_unlock_op(sbi); 989 990 /* iput will drop the inode object */ 991 iput(inode); 992 } 993