1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * fs/f2fs/inode.c 4 * 5 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 6 * http://www.samsung.com/ 7 */ 8 #include <linux/fs.h> 9 #include <linux/f2fs_fs.h> 10 #include <linux/buffer_head.h> 11 #include <linux/backing-dev.h> 12 #include <linux/writeback.h> 13 14 #include "f2fs.h" 15 #include "node.h" 16 #include "segment.h" 17 18 #include <trace/events/f2fs.h> 19 20 void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync) 21 { 22 if (is_inode_flag_set(inode, FI_NEW_INODE)) 23 return; 24 25 if (f2fs_inode_dirtied(inode, sync)) 26 return; 27 28 mark_inode_dirty_sync(inode); 29 } 30 31 void f2fs_set_inode_flags(struct inode *inode) 32 { 33 unsigned int flags = F2FS_I(inode)->i_flags; 34 unsigned int new_fl = 0; 35 36 if (flags & F2FS_SYNC_FL) 37 new_fl |= S_SYNC; 38 if (flags & F2FS_APPEND_FL) 39 new_fl |= S_APPEND; 40 if (flags & F2FS_IMMUTABLE_FL) 41 new_fl |= S_IMMUTABLE; 42 if (flags & F2FS_NOATIME_FL) 43 new_fl |= S_NOATIME; 44 if (flags & F2FS_DIRSYNC_FL) 45 new_fl |= S_DIRSYNC; 46 if (f2fs_encrypted_inode(inode)) 47 new_fl |= S_ENCRYPTED; 48 inode_set_flags(inode, new_fl, 49 S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC| 50 S_ENCRYPTED); 51 } 52 53 static void __get_inode_rdev(struct inode *inode, struct f2fs_inode *ri) 54 { 55 int extra_size = get_extra_isize(inode); 56 57 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || 58 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { 59 if (ri->i_addr[extra_size]) 60 inode->i_rdev = old_decode_dev( 61 le32_to_cpu(ri->i_addr[extra_size])); 62 else 63 inode->i_rdev = new_decode_dev( 64 le32_to_cpu(ri->i_addr[extra_size + 1])); 65 } 66 } 67 68 static int __written_first_block(struct f2fs_sb_info *sbi, 69 struct f2fs_inode *ri) 70 { 71 block_t addr = le32_to_cpu(ri->i_addr[offset_in_addr(ri)]); 72 73 if (!__is_valid_data_blkaddr(addr)) 74 return 1; 75 if (!f2fs_is_valid_blkaddr(sbi, addr, DATA_GENERIC)) 76 return -EFAULT; 77 return 0; 78 } 79 80 static void __set_inode_rdev(struct inode *inode, struct f2fs_inode *ri) 81 { 82 int extra_size = get_extra_isize(inode); 83 84 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { 85 if (old_valid_dev(inode->i_rdev)) { 86 ri->i_addr[extra_size] = 87 cpu_to_le32(old_encode_dev(inode->i_rdev)); 88 ri->i_addr[extra_size + 1] = 0; 89 } else { 90 ri->i_addr[extra_size] = 0; 91 ri->i_addr[extra_size + 1] = 92 cpu_to_le32(new_encode_dev(inode->i_rdev)); 93 ri->i_addr[extra_size + 2] = 0; 94 } 95 } 96 } 97 98 static void __recover_inline_status(struct inode *inode, struct page *ipage) 99 { 100 void *inline_data = inline_data_addr(inode, ipage); 101 __le32 *start = inline_data; 102 __le32 *end = start + MAX_INLINE_DATA(inode) / sizeof(__le32); 103 104 while (start < end) { 105 if (*start++) { 106 f2fs_wait_on_page_writeback(ipage, NODE, true); 107 108 set_inode_flag(inode, FI_DATA_EXIST); 109 set_raw_inline(inode, F2FS_INODE(ipage)); 110 set_page_dirty(ipage); 111 return; 112 } 113 } 114 return; 115 } 116 117 static bool f2fs_enable_inode_chksum(struct f2fs_sb_info *sbi, struct page *page) 118 { 119 struct f2fs_inode *ri = &F2FS_NODE(page)->i; 120 121 if (!f2fs_sb_has_inode_chksum(sbi->sb)) 122 return false; 123 124 if (!IS_INODE(page) || !(ri->i_inline & F2FS_EXTRA_ATTR)) 125 return false; 126 127 if (!F2FS_FITS_IN_INODE(ri, le16_to_cpu(ri->i_extra_isize), 128 i_inode_checksum)) 129 return false; 130 131 return true; 132 } 133 134 static __u32 f2fs_inode_chksum(struct f2fs_sb_info *sbi, struct page *page) 135 { 136 struct f2fs_node *node = F2FS_NODE(page); 137 struct f2fs_inode *ri = &node->i; 138 __le32 ino = node->footer.ino; 139 __le32 gen = ri->i_generation; 140 __u32 chksum, chksum_seed; 141 __u32 dummy_cs = 0; 142 unsigned int offset = offsetof(struct f2fs_inode, i_inode_checksum); 143 unsigned int cs_size = sizeof(dummy_cs); 144 145 chksum = f2fs_chksum(sbi, sbi->s_chksum_seed, (__u8 *)&ino, 146 sizeof(ino)); 147 chksum_seed = f2fs_chksum(sbi, chksum, (__u8 *)&gen, sizeof(gen)); 148 149 chksum = f2fs_chksum(sbi, chksum_seed, (__u8 *)ri, offset); 150 chksum = f2fs_chksum(sbi, chksum, (__u8 *)&dummy_cs, cs_size); 151 offset += cs_size; 152 chksum = f2fs_chksum(sbi, chksum, (__u8 *)ri + offset, 153 F2FS_BLKSIZE - offset); 154 return chksum; 155 } 156 157 bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page) 158 { 159 struct f2fs_inode *ri; 160 __u32 provided, calculated; 161 162 if (unlikely(is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN))) 163 return true; 164 165 #ifdef CONFIG_F2FS_CHECK_FS 166 if (!f2fs_enable_inode_chksum(sbi, page)) 167 #else 168 if (!f2fs_enable_inode_chksum(sbi, page) || 169 PageDirty(page) || PageWriteback(page)) 170 #endif 171 return true; 172 173 ri = &F2FS_NODE(page)->i; 174 provided = le32_to_cpu(ri->i_inode_checksum); 175 calculated = f2fs_inode_chksum(sbi, page); 176 177 if (provided != calculated) 178 f2fs_msg(sbi->sb, KERN_WARNING, 179 "checksum invalid, ino = %x, %x vs. %x", 180 ino_of_node(page), provided, calculated); 181 182 return provided == calculated; 183 } 184 185 void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct page *page) 186 { 187 struct f2fs_inode *ri = &F2FS_NODE(page)->i; 188 189 if (!f2fs_enable_inode_chksum(sbi, page)) 190 return; 191 192 ri->i_inode_checksum = cpu_to_le32(f2fs_inode_chksum(sbi, page)); 193 } 194 195 static bool sanity_check_inode(struct inode *inode, struct page *node_page) 196 { 197 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 198 struct f2fs_inode_info *fi = F2FS_I(inode); 199 unsigned long long iblocks; 200 201 iblocks = le64_to_cpu(F2FS_INODE(node_page)->i_blocks); 202 if (!iblocks) { 203 set_sbi_flag(sbi, SBI_NEED_FSCK); 204 f2fs_msg(sbi->sb, KERN_WARNING, 205 "%s: corrupted inode i_blocks i_ino=%lx iblocks=%llu, " 206 "run fsck to fix.", 207 __func__, inode->i_ino, iblocks); 208 return false; 209 } 210 211 if (ino_of_node(node_page) != nid_of_node(node_page)) { 212 set_sbi_flag(sbi, SBI_NEED_FSCK); 213 f2fs_msg(sbi->sb, KERN_WARNING, 214 "%s: corrupted inode footer i_ino=%lx, ino,nid: " 215 "[%u, %u] run fsck to fix.", 216 __func__, inode->i_ino, 217 ino_of_node(node_page), nid_of_node(node_page)); 218 return false; 219 } 220 221 if (f2fs_sb_has_flexible_inline_xattr(sbi->sb) 222 && !f2fs_has_extra_attr(inode)) { 223 set_sbi_flag(sbi, SBI_NEED_FSCK); 224 f2fs_msg(sbi->sb, KERN_WARNING, 225 "%s: corrupted inode ino=%lx, run fsck to fix.", 226 __func__, inode->i_ino); 227 return false; 228 } 229 230 if (f2fs_has_extra_attr(inode) && 231 !f2fs_sb_has_extra_attr(sbi->sb)) { 232 set_sbi_flag(sbi, SBI_NEED_FSCK); 233 f2fs_msg(sbi->sb, KERN_WARNING, 234 "%s: inode (ino=%lx) is with extra_attr, " 235 "but extra_attr feature is off", 236 __func__, inode->i_ino); 237 return false; 238 } 239 240 if (fi->i_extra_isize > F2FS_TOTAL_EXTRA_ATTR_SIZE || 241 fi->i_extra_isize % sizeof(__le32)) { 242 set_sbi_flag(sbi, SBI_NEED_FSCK); 243 f2fs_msg(sbi->sb, KERN_WARNING, 244 "%s: inode (ino=%lx) has corrupted i_extra_isize: %d, " 245 "max: %zu", 246 __func__, inode->i_ino, fi->i_extra_isize, 247 F2FS_TOTAL_EXTRA_ATTR_SIZE); 248 return false; 249 } 250 251 if (F2FS_I(inode)->extent_tree) { 252 struct extent_info *ei = &F2FS_I(inode)->extent_tree->largest; 253 254 if (ei->len && 255 (!f2fs_is_valid_blkaddr(sbi, ei->blk, DATA_GENERIC) || 256 !f2fs_is_valid_blkaddr(sbi, ei->blk + ei->len - 1, 257 DATA_GENERIC))) { 258 set_sbi_flag(sbi, SBI_NEED_FSCK); 259 f2fs_msg(sbi->sb, KERN_WARNING, 260 "%s: inode (ino=%lx) extent info [%u, %u, %u] " 261 "is incorrect, run fsck to fix", 262 __func__, inode->i_ino, 263 ei->blk, ei->fofs, ei->len); 264 return false; 265 } 266 } 267 268 if (f2fs_has_inline_data(inode) && 269 (!S_ISREG(inode->i_mode) && !S_ISLNK(inode->i_mode))) { 270 set_sbi_flag(sbi, SBI_NEED_FSCK); 271 f2fs_msg(sbi->sb, KERN_WARNING, 272 "%s: inode (ino=%lx, mode=%u) should not have " 273 "inline_data, run fsck to fix", 274 __func__, inode->i_ino, inode->i_mode); 275 return false; 276 } 277 278 if (f2fs_has_inline_dentry(inode) && !S_ISDIR(inode->i_mode)) { 279 set_sbi_flag(sbi, SBI_NEED_FSCK); 280 f2fs_msg(sbi->sb, KERN_WARNING, 281 "%s: inode (ino=%lx, mode=%u) should not have " 282 "inline_dentry, run fsck to fix", 283 __func__, inode->i_ino, inode->i_mode); 284 return false; 285 } 286 287 return true; 288 } 289 290 static int do_read_inode(struct inode *inode) 291 { 292 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 293 struct f2fs_inode_info *fi = F2FS_I(inode); 294 struct page *node_page; 295 struct f2fs_inode *ri; 296 projid_t i_projid; 297 int err; 298 299 /* Check if ino is within scope */ 300 if (f2fs_check_nid_range(sbi, inode->i_ino)) 301 return -EINVAL; 302 303 node_page = f2fs_get_node_page(sbi, inode->i_ino); 304 if (IS_ERR(node_page)) 305 return PTR_ERR(node_page); 306 307 ri = F2FS_INODE(node_page); 308 309 inode->i_mode = le16_to_cpu(ri->i_mode); 310 i_uid_write(inode, le32_to_cpu(ri->i_uid)); 311 i_gid_write(inode, le32_to_cpu(ri->i_gid)); 312 set_nlink(inode, le32_to_cpu(ri->i_links)); 313 inode->i_size = le64_to_cpu(ri->i_size); 314 inode->i_blocks = SECTOR_FROM_BLOCK(le64_to_cpu(ri->i_blocks) - 1); 315 316 inode->i_atime.tv_sec = le64_to_cpu(ri->i_atime); 317 inode->i_ctime.tv_sec = le64_to_cpu(ri->i_ctime); 318 inode->i_mtime.tv_sec = le64_to_cpu(ri->i_mtime); 319 inode->i_atime.tv_nsec = le32_to_cpu(ri->i_atime_nsec); 320 inode->i_ctime.tv_nsec = le32_to_cpu(ri->i_ctime_nsec); 321 inode->i_mtime.tv_nsec = le32_to_cpu(ri->i_mtime_nsec); 322 inode->i_generation = le32_to_cpu(ri->i_generation); 323 if (S_ISDIR(inode->i_mode)) 324 fi->i_current_depth = le32_to_cpu(ri->i_current_depth); 325 else if (S_ISREG(inode->i_mode)) 326 fi->i_gc_failures[GC_FAILURE_PIN] = 327 le16_to_cpu(ri->i_gc_failures); 328 fi->i_xattr_nid = le32_to_cpu(ri->i_xattr_nid); 329 fi->i_flags = le32_to_cpu(ri->i_flags); 330 fi->flags = 0; 331 fi->i_advise = ri->i_advise; 332 fi->i_pino = le32_to_cpu(ri->i_pino); 333 fi->i_dir_level = ri->i_dir_level; 334 335 if (f2fs_init_extent_tree(inode, &ri->i_ext)) 336 set_page_dirty(node_page); 337 338 get_inline_info(inode, ri); 339 340 fi->i_extra_isize = f2fs_has_extra_attr(inode) ? 341 le16_to_cpu(ri->i_extra_isize) : 0; 342 343 if (f2fs_sb_has_flexible_inline_xattr(sbi->sb)) { 344 fi->i_inline_xattr_size = le16_to_cpu(ri->i_inline_xattr_size); 345 } else if (f2fs_has_inline_xattr(inode) || 346 f2fs_has_inline_dentry(inode)) { 347 fi->i_inline_xattr_size = DEFAULT_INLINE_XATTR_ADDRS; 348 } else { 349 350 /* 351 * Previous inline data or directory always reserved 200 bytes 352 * in inode layout, even if inline_xattr is disabled. In order 353 * to keep inline_dentry's structure for backward compatibility, 354 * we get the space back only from inline_data. 355 */ 356 fi->i_inline_xattr_size = 0; 357 } 358 359 if (!sanity_check_inode(inode, node_page)) { 360 f2fs_put_page(node_page, 1); 361 return -EINVAL; 362 } 363 364 /* check data exist */ 365 if (f2fs_has_inline_data(inode) && !f2fs_exist_data(inode)) 366 __recover_inline_status(inode, node_page); 367 368 /* get rdev by using inline_info */ 369 __get_inode_rdev(inode, ri); 370 371 if (S_ISREG(inode->i_mode)) { 372 err = __written_first_block(sbi, ri); 373 if (err < 0) { 374 f2fs_put_page(node_page, 1); 375 return err; 376 } 377 if (!err) 378 set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN); 379 } 380 381 if (!f2fs_need_inode_block_update(sbi, inode->i_ino)) 382 fi->last_disk_size = inode->i_size; 383 384 if (fi->i_flags & F2FS_PROJINHERIT_FL) 385 set_inode_flag(inode, FI_PROJ_INHERIT); 386 387 if (f2fs_has_extra_attr(inode) && f2fs_sb_has_project_quota(sbi->sb) && 388 F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_projid)) 389 i_projid = (projid_t)le32_to_cpu(ri->i_projid); 390 else 391 i_projid = F2FS_DEF_PROJID; 392 fi->i_projid = make_kprojid(&init_user_ns, i_projid); 393 394 if (f2fs_has_extra_attr(inode) && f2fs_sb_has_inode_crtime(sbi->sb) && 395 F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) { 396 fi->i_crtime.tv_sec = le64_to_cpu(ri->i_crtime); 397 fi->i_crtime.tv_nsec = le32_to_cpu(ri->i_crtime_nsec); 398 } 399 400 F2FS_I(inode)->i_disk_time[0] = inode->i_atime; 401 F2FS_I(inode)->i_disk_time[1] = inode->i_ctime; 402 F2FS_I(inode)->i_disk_time[2] = inode->i_mtime; 403 F2FS_I(inode)->i_disk_time[3] = F2FS_I(inode)->i_crtime; 404 f2fs_put_page(node_page, 1); 405 406 stat_inc_inline_xattr(inode); 407 stat_inc_inline_inode(inode); 408 stat_inc_inline_dir(inode); 409 410 return 0; 411 } 412 413 struct inode *f2fs_iget(struct super_block *sb, unsigned long ino) 414 { 415 struct f2fs_sb_info *sbi = F2FS_SB(sb); 416 struct inode *inode; 417 int ret = 0; 418 419 inode = iget_locked(sb, ino); 420 if (!inode) 421 return ERR_PTR(-ENOMEM); 422 423 if (!(inode->i_state & I_NEW)) { 424 trace_f2fs_iget(inode); 425 return inode; 426 } 427 if (ino == F2FS_NODE_INO(sbi) || ino == F2FS_META_INO(sbi)) 428 goto make_now; 429 430 ret = do_read_inode(inode); 431 if (ret) 432 goto bad_inode; 433 make_now: 434 if (ino == F2FS_NODE_INO(sbi)) { 435 inode->i_mapping->a_ops = &f2fs_node_aops; 436 mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS); 437 } else if (ino == F2FS_META_INO(sbi)) { 438 inode->i_mapping->a_ops = &f2fs_meta_aops; 439 mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS); 440 } else if (S_ISREG(inode->i_mode)) { 441 inode->i_op = &f2fs_file_inode_operations; 442 inode->i_fop = &f2fs_file_operations; 443 inode->i_mapping->a_ops = &f2fs_dblock_aops; 444 } else if (S_ISDIR(inode->i_mode)) { 445 inode->i_op = &f2fs_dir_inode_operations; 446 inode->i_fop = &f2fs_dir_operations; 447 inode->i_mapping->a_ops = &f2fs_dblock_aops; 448 inode_nohighmem(inode); 449 } else if (S_ISLNK(inode->i_mode)) { 450 if (f2fs_encrypted_inode(inode)) 451 inode->i_op = &f2fs_encrypted_symlink_inode_operations; 452 else 453 inode->i_op = &f2fs_symlink_inode_operations; 454 inode_nohighmem(inode); 455 inode->i_mapping->a_ops = &f2fs_dblock_aops; 456 } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || 457 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { 458 inode->i_op = &f2fs_special_inode_operations; 459 init_special_inode(inode, inode->i_mode, inode->i_rdev); 460 } else { 461 ret = -EIO; 462 goto bad_inode; 463 } 464 f2fs_set_inode_flags(inode); 465 unlock_new_inode(inode); 466 trace_f2fs_iget(inode); 467 return inode; 468 469 bad_inode: 470 iget_failed(inode); 471 trace_f2fs_iget_exit(inode, ret); 472 return ERR_PTR(ret); 473 } 474 475 struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino) 476 { 477 struct inode *inode; 478 retry: 479 inode = f2fs_iget(sb, ino); 480 if (IS_ERR(inode)) { 481 if (PTR_ERR(inode) == -ENOMEM) { 482 congestion_wait(BLK_RW_ASYNC, HZ/50); 483 goto retry; 484 } 485 } 486 return inode; 487 } 488 489 void f2fs_update_inode(struct inode *inode, struct page *node_page) 490 { 491 struct f2fs_inode *ri; 492 struct extent_tree *et = F2FS_I(inode)->extent_tree; 493 494 f2fs_wait_on_page_writeback(node_page, NODE, true); 495 set_page_dirty(node_page); 496 497 f2fs_inode_synced(inode); 498 499 ri = F2FS_INODE(node_page); 500 501 ri->i_mode = cpu_to_le16(inode->i_mode); 502 ri->i_advise = F2FS_I(inode)->i_advise; 503 ri->i_uid = cpu_to_le32(i_uid_read(inode)); 504 ri->i_gid = cpu_to_le32(i_gid_read(inode)); 505 ri->i_links = cpu_to_le32(inode->i_nlink); 506 ri->i_size = cpu_to_le64(i_size_read(inode)); 507 ri->i_blocks = cpu_to_le64(SECTOR_TO_BLOCK(inode->i_blocks) + 1); 508 509 if (et) { 510 read_lock(&et->lock); 511 set_raw_extent(&et->largest, &ri->i_ext); 512 read_unlock(&et->lock); 513 } else { 514 memset(&ri->i_ext, 0, sizeof(ri->i_ext)); 515 } 516 set_raw_inline(inode, ri); 517 518 ri->i_atime = cpu_to_le64(inode->i_atime.tv_sec); 519 ri->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec); 520 ri->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec); 521 ri->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec); 522 ri->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec); 523 ri->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec); 524 if (S_ISDIR(inode->i_mode)) 525 ri->i_current_depth = 526 cpu_to_le32(F2FS_I(inode)->i_current_depth); 527 else if (S_ISREG(inode->i_mode)) 528 ri->i_gc_failures = 529 cpu_to_le16(F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN]); 530 ri->i_xattr_nid = cpu_to_le32(F2FS_I(inode)->i_xattr_nid); 531 ri->i_flags = cpu_to_le32(F2FS_I(inode)->i_flags); 532 ri->i_pino = cpu_to_le32(F2FS_I(inode)->i_pino); 533 ri->i_generation = cpu_to_le32(inode->i_generation); 534 ri->i_dir_level = F2FS_I(inode)->i_dir_level; 535 536 if (f2fs_has_extra_attr(inode)) { 537 ri->i_extra_isize = cpu_to_le16(F2FS_I(inode)->i_extra_isize); 538 539 if (f2fs_sb_has_flexible_inline_xattr(F2FS_I_SB(inode)->sb)) 540 ri->i_inline_xattr_size = 541 cpu_to_le16(F2FS_I(inode)->i_inline_xattr_size); 542 543 if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)->sb) && 544 F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize, 545 i_projid)) { 546 projid_t i_projid; 547 548 i_projid = from_kprojid(&init_user_ns, 549 F2FS_I(inode)->i_projid); 550 ri->i_projid = cpu_to_le32(i_projid); 551 } 552 553 if (f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)->sb) && 554 F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize, 555 i_crtime)) { 556 ri->i_crtime = 557 cpu_to_le64(F2FS_I(inode)->i_crtime.tv_sec); 558 ri->i_crtime_nsec = 559 cpu_to_le32(F2FS_I(inode)->i_crtime.tv_nsec); 560 } 561 } 562 563 __set_inode_rdev(inode, ri); 564 565 /* deleted inode */ 566 if (inode->i_nlink == 0) 567 clear_inline_node(node_page); 568 569 F2FS_I(inode)->i_disk_time[0] = inode->i_atime; 570 F2FS_I(inode)->i_disk_time[1] = inode->i_ctime; 571 F2FS_I(inode)->i_disk_time[2] = inode->i_mtime; 572 F2FS_I(inode)->i_disk_time[3] = F2FS_I(inode)->i_crtime; 573 574 #ifdef CONFIG_F2FS_CHECK_FS 575 f2fs_inode_chksum_set(F2FS_I_SB(inode), node_page); 576 #endif 577 } 578 579 void f2fs_update_inode_page(struct inode *inode) 580 { 581 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 582 struct page *node_page; 583 retry: 584 node_page = f2fs_get_node_page(sbi, inode->i_ino); 585 if (IS_ERR(node_page)) { 586 int err = PTR_ERR(node_page); 587 if (err == -ENOMEM) { 588 cond_resched(); 589 goto retry; 590 } else if (err != -ENOENT) { 591 f2fs_stop_checkpoint(sbi, false); 592 } 593 return; 594 } 595 f2fs_update_inode(inode, node_page); 596 f2fs_put_page(node_page, 1); 597 } 598 599 int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc) 600 { 601 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 602 603 if (inode->i_ino == F2FS_NODE_INO(sbi) || 604 inode->i_ino == F2FS_META_INO(sbi)) 605 return 0; 606 607 if (!is_inode_flag_set(inode, FI_DIRTY_INODE)) 608 return 0; 609 610 if (f2fs_is_checkpoint_ready(sbi)) 611 return -ENOSPC; 612 613 /* 614 * We need to balance fs here to prevent from producing dirty node pages 615 * during the urgent cleaning time when runing out of free sections. 616 */ 617 f2fs_update_inode_page(inode); 618 if (wbc && wbc->nr_to_write) 619 f2fs_balance_fs(sbi, true); 620 return 0; 621 } 622 623 /* 624 * Called at the last iput() if i_nlink is zero 625 */ 626 void f2fs_evict_inode(struct inode *inode) 627 { 628 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 629 nid_t xnid = F2FS_I(inode)->i_xattr_nid; 630 int err = 0; 631 632 /* some remained atomic pages should discarded */ 633 if (f2fs_is_atomic_file(inode)) 634 f2fs_drop_inmem_pages(inode); 635 636 trace_f2fs_evict_inode(inode); 637 truncate_inode_pages_final(&inode->i_data); 638 639 if (inode->i_ino == F2FS_NODE_INO(sbi) || 640 inode->i_ino == F2FS_META_INO(sbi)) 641 goto out_clear; 642 643 f2fs_bug_on(sbi, get_dirty_pages(inode)); 644 f2fs_remove_dirty_inode(inode); 645 646 f2fs_destroy_extent_tree(inode); 647 648 if (inode->i_nlink || is_bad_inode(inode)) 649 goto no_delete; 650 651 dquot_initialize(inode); 652 653 f2fs_remove_ino_entry(sbi, inode->i_ino, APPEND_INO); 654 f2fs_remove_ino_entry(sbi, inode->i_ino, UPDATE_INO); 655 f2fs_remove_ino_entry(sbi, inode->i_ino, FLUSH_INO); 656 657 sb_start_intwrite(inode->i_sb); 658 set_inode_flag(inode, FI_NO_ALLOC); 659 i_size_write(inode, 0); 660 retry: 661 if (F2FS_HAS_BLOCKS(inode)) 662 err = f2fs_truncate(inode); 663 664 if (time_to_inject(sbi, FAULT_EVICT_INODE)) { 665 f2fs_show_injection_info(FAULT_EVICT_INODE); 666 err = -EIO; 667 } 668 669 if (!err) { 670 f2fs_lock_op(sbi); 671 err = f2fs_remove_inode_page(inode); 672 f2fs_unlock_op(sbi); 673 if (err == -ENOENT) 674 err = 0; 675 } 676 677 /* give more chances, if ENOMEM case */ 678 if (err == -ENOMEM) { 679 err = 0; 680 goto retry; 681 } 682 683 if (err) 684 f2fs_update_inode_page(inode); 685 dquot_free_inode(inode); 686 sb_end_intwrite(inode->i_sb); 687 no_delete: 688 dquot_drop(inode); 689 690 stat_dec_inline_xattr(inode); 691 stat_dec_inline_dir(inode); 692 stat_dec_inline_inode(inode); 693 694 if (likely(!is_set_ckpt_flags(sbi, CP_ERROR_FLAG) && 695 !is_sbi_flag_set(sbi, SBI_CP_DISABLED))) 696 f2fs_bug_on(sbi, is_inode_flag_set(inode, FI_DIRTY_INODE)); 697 else 698 f2fs_inode_synced(inode); 699 700 /* ino == 0, if f2fs_new_inode() was failed t*/ 701 if (inode->i_ino) 702 invalidate_mapping_pages(NODE_MAPPING(sbi), inode->i_ino, 703 inode->i_ino); 704 if (xnid) 705 invalidate_mapping_pages(NODE_MAPPING(sbi), xnid, xnid); 706 if (inode->i_nlink) { 707 if (is_inode_flag_set(inode, FI_APPEND_WRITE)) 708 f2fs_add_ino_entry(sbi, inode->i_ino, APPEND_INO); 709 if (is_inode_flag_set(inode, FI_UPDATE_WRITE)) 710 f2fs_add_ino_entry(sbi, inode->i_ino, UPDATE_INO); 711 } 712 if (is_inode_flag_set(inode, FI_FREE_NID)) { 713 f2fs_alloc_nid_failed(sbi, inode->i_ino); 714 clear_inode_flag(inode, FI_FREE_NID); 715 } else { 716 /* 717 * If xattr nid is corrupted, we can reach out error condition, 718 * err & !f2fs_exist_written_data(sbi, inode->i_ino, ORPHAN_INO)). 719 * In that case, f2fs_check_nid_range() is enough to give a clue. 720 */ 721 } 722 out_clear: 723 fscrypt_put_encryption_info(inode); 724 clear_inode(inode); 725 } 726 727 /* caller should call f2fs_lock_op() */ 728 void f2fs_handle_failed_inode(struct inode *inode) 729 { 730 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 731 struct node_info ni; 732 int err; 733 734 /* 735 * clear nlink of inode in order to release resource of inode 736 * immediately. 737 */ 738 clear_nlink(inode); 739 740 /* 741 * we must call this to avoid inode being remained as dirty, resulting 742 * in a panic when flushing dirty inodes in gdirty_list. 743 */ 744 f2fs_update_inode_page(inode); 745 f2fs_inode_synced(inode); 746 747 /* don't make bad inode, since it becomes a regular file. */ 748 unlock_new_inode(inode); 749 750 /* 751 * Note: we should add inode to orphan list before f2fs_unlock_op() 752 * so we can prevent losing this orphan when encoutering checkpoint 753 * and following suddenly power-off. 754 */ 755 err = f2fs_get_node_info(sbi, inode->i_ino, &ni); 756 if (err) { 757 set_sbi_flag(sbi, SBI_NEED_FSCK); 758 f2fs_msg(sbi->sb, KERN_WARNING, 759 "May loss orphan inode, run fsck to fix."); 760 goto out; 761 } 762 763 if (ni.blk_addr != NULL_ADDR) { 764 err = f2fs_acquire_orphan_inode(sbi); 765 if (err) { 766 set_sbi_flag(sbi, SBI_NEED_FSCK); 767 f2fs_msg(sbi->sb, KERN_WARNING, 768 "Too many orphan inodes, run fsck to fix."); 769 } else { 770 f2fs_add_orphan_inode(inode); 771 } 772 f2fs_alloc_nid_done(sbi, inode->i_ino); 773 } else { 774 set_inode_flag(inode, FI_FREE_NID); 775 } 776 777 out: 778 f2fs_unlock_op(sbi); 779 780 /* iput will drop the inode object */ 781 iput(inode); 782 } 783