1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * fs/f2fs/inode.c 4 * 5 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 6 * http://www.samsung.com/ 7 */ 8 #include <linux/fs.h> 9 #include <linux/f2fs_fs.h> 10 #include <linux/buffer_head.h> 11 #include <linux/backing-dev.h> 12 #include <linux/writeback.h> 13 14 #include "f2fs.h" 15 #include "node.h" 16 #include "segment.h" 17 #include "xattr.h" 18 19 #include <trace/events/f2fs.h> 20 21 void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync) 22 { 23 if (is_inode_flag_set(inode, FI_NEW_INODE)) 24 return; 25 26 if (f2fs_inode_dirtied(inode, sync)) 27 return; 28 29 mark_inode_dirty_sync(inode); 30 } 31 32 void f2fs_set_inode_flags(struct inode *inode) 33 { 34 unsigned int flags = F2FS_I(inode)->i_flags; 35 unsigned int new_fl = 0; 36 37 if (flags & F2FS_SYNC_FL) 38 new_fl |= S_SYNC; 39 if (flags & F2FS_APPEND_FL) 40 new_fl |= S_APPEND; 41 if (flags & F2FS_IMMUTABLE_FL) 42 new_fl |= S_IMMUTABLE; 43 if (flags & F2FS_NOATIME_FL) 44 new_fl |= S_NOATIME; 45 if (flags & F2FS_DIRSYNC_FL) 46 new_fl |= S_DIRSYNC; 47 if (file_is_encrypt(inode)) 48 new_fl |= S_ENCRYPTED; 49 inode_set_flags(inode, new_fl, 50 S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC| 51 S_ENCRYPTED); 52 } 53 54 static void __get_inode_rdev(struct inode *inode, struct f2fs_inode *ri) 55 { 56 int extra_size = get_extra_isize(inode); 57 58 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || 59 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { 60 if (ri->i_addr[extra_size]) 61 inode->i_rdev = old_decode_dev( 62 le32_to_cpu(ri->i_addr[extra_size])); 63 else 64 inode->i_rdev = new_decode_dev( 65 le32_to_cpu(ri->i_addr[extra_size + 1])); 66 } 67 } 68 69 static int __written_first_block(struct f2fs_sb_info *sbi, 70 struct f2fs_inode *ri) 71 { 72 block_t addr = le32_to_cpu(ri->i_addr[offset_in_addr(ri)]); 73 74 if (!__is_valid_data_blkaddr(addr)) 75 return 1; 76 if (!f2fs_is_valid_blkaddr(sbi, addr, DATA_GENERIC_ENHANCE)) 77 return -EFSCORRUPTED; 78 return 0; 79 } 80 81 static void __set_inode_rdev(struct inode *inode, struct f2fs_inode *ri) 82 { 83 int extra_size = get_extra_isize(inode); 84 85 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { 86 if (old_valid_dev(inode->i_rdev)) { 87 ri->i_addr[extra_size] = 88 cpu_to_le32(old_encode_dev(inode->i_rdev)); 89 ri->i_addr[extra_size + 1] = 0; 90 } else { 91 ri->i_addr[extra_size] = 0; 92 ri->i_addr[extra_size + 1] = 93 cpu_to_le32(new_encode_dev(inode->i_rdev)); 94 ri->i_addr[extra_size + 2] = 0; 95 } 96 } 97 } 98 99 static void __recover_inline_status(struct inode *inode, struct page *ipage) 100 { 101 void *inline_data = inline_data_addr(inode, ipage); 102 __le32 *start = inline_data; 103 __le32 *end = start + MAX_INLINE_DATA(inode) / sizeof(__le32); 104 105 while (start < end) { 106 if (*start++) { 107 f2fs_wait_on_page_writeback(ipage, NODE, true, true); 108 109 set_inode_flag(inode, FI_DATA_EXIST); 110 set_raw_inline(inode, F2FS_INODE(ipage)); 111 set_page_dirty(ipage); 112 return; 113 } 114 } 115 return; 116 } 117 118 static bool f2fs_enable_inode_chksum(struct f2fs_sb_info *sbi, struct page *page) 119 { 120 struct f2fs_inode *ri = &F2FS_NODE(page)->i; 121 122 if (!f2fs_sb_has_inode_chksum(sbi)) 123 return false; 124 125 if (!IS_INODE(page) || !(ri->i_inline & F2FS_EXTRA_ATTR)) 126 return false; 127 128 if (!F2FS_FITS_IN_INODE(ri, le16_to_cpu(ri->i_extra_isize), 129 i_inode_checksum)) 130 return false; 131 132 return true; 133 } 134 135 static __u32 f2fs_inode_chksum(struct f2fs_sb_info *sbi, struct page *page) 136 { 137 struct f2fs_node *node = F2FS_NODE(page); 138 struct f2fs_inode *ri = &node->i; 139 __le32 ino = node->footer.ino; 140 __le32 gen = ri->i_generation; 141 __u32 chksum, chksum_seed; 142 __u32 dummy_cs = 0; 143 unsigned int offset = offsetof(struct f2fs_inode, i_inode_checksum); 144 unsigned int cs_size = sizeof(dummy_cs); 145 146 chksum = f2fs_chksum(sbi, sbi->s_chksum_seed, (__u8 *)&ino, 147 sizeof(ino)); 148 chksum_seed = f2fs_chksum(sbi, chksum, (__u8 *)&gen, sizeof(gen)); 149 150 chksum = f2fs_chksum(sbi, chksum_seed, (__u8 *)ri, offset); 151 chksum = f2fs_chksum(sbi, chksum, (__u8 *)&dummy_cs, cs_size); 152 offset += cs_size; 153 chksum = f2fs_chksum(sbi, chksum, (__u8 *)ri + offset, 154 F2FS_BLKSIZE - offset); 155 return chksum; 156 } 157 158 bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page) 159 { 160 struct f2fs_inode *ri; 161 __u32 provided, calculated; 162 163 if (unlikely(is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN))) 164 return true; 165 166 #ifdef CONFIG_F2FS_CHECK_FS 167 if (!f2fs_enable_inode_chksum(sbi, page)) 168 #else 169 if (!f2fs_enable_inode_chksum(sbi, page) || 170 PageDirty(page) || PageWriteback(page)) 171 #endif 172 return true; 173 174 ri = &F2FS_NODE(page)->i; 175 provided = le32_to_cpu(ri->i_inode_checksum); 176 calculated = f2fs_inode_chksum(sbi, page); 177 178 if (provided != calculated) 179 f2fs_warn(sbi, "checksum invalid, nid = %lu, ino_of_node = %x, %x vs. %x", 180 page->index, ino_of_node(page), provided, calculated); 181 182 return provided == calculated; 183 } 184 185 void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct page *page) 186 { 187 struct f2fs_inode *ri = &F2FS_NODE(page)->i; 188 189 if (!f2fs_enable_inode_chksum(sbi, page)) 190 return; 191 192 ri->i_inode_checksum = cpu_to_le32(f2fs_inode_chksum(sbi, page)); 193 } 194 195 static bool sanity_check_inode(struct inode *inode, struct page *node_page) 196 { 197 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 198 struct f2fs_inode_info *fi = F2FS_I(inode); 199 unsigned long long iblocks; 200 201 iblocks = le64_to_cpu(F2FS_INODE(node_page)->i_blocks); 202 if (!iblocks) { 203 set_sbi_flag(sbi, SBI_NEED_FSCK); 204 f2fs_warn(sbi, "%s: corrupted inode i_blocks i_ino=%lx iblocks=%llu, run fsck to fix.", 205 __func__, inode->i_ino, iblocks); 206 return false; 207 } 208 209 if (ino_of_node(node_page) != nid_of_node(node_page)) { 210 set_sbi_flag(sbi, SBI_NEED_FSCK); 211 f2fs_warn(sbi, "%s: corrupted inode footer i_ino=%lx, ino,nid: [%u, %u] run fsck to fix.", 212 __func__, inode->i_ino, 213 ino_of_node(node_page), nid_of_node(node_page)); 214 return false; 215 } 216 217 if (f2fs_sb_has_flexible_inline_xattr(sbi) 218 && !f2fs_has_extra_attr(inode)) { 219 set_sbi_flag(sbi, SBI_NEED_FSCK); 220 f2fs_warn(sbi, "%s: corrupted inode ino=%lx, run fsck to fix.", 221 __func__, inode->i_ino); 222 return false; 223 } 224 225 if (f2fs_has_extra_attr(inode) && 226 !f2fs_sb_has_extra_attr(sbi)) { 227 set_sbi_flag(sbi, SBI_NEED_FSCK); 228 f2fs_warn(sbi, "%s: inode (ino=%lx) is with extra_attr, but extra_attr feature is off", 229 __func__, inode->i_ino); 230 return false; 231 } 232 233 if (fi->i_extra_isize > F2FS_TOTAL_EXTRA_ATTR_SIZE || 234 fi->i_extra_isize % sizeof(__le32)) { 235 set_sbi_flag(sbi, SBI_NEED_FSCK); 236 f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_extra_isize: %d, max: %zu", 237 __func__, inode->i_ino, fi->i_extra_isize, 238 F2FS_TOTAL_EXTRA_ATTR_SIZE); 239 return false; 240 } 241 242 if (f2fs_has_extra_attr(inode) && 243 f2fs_sb_has_flexible_inline_xattr(sbi) && 244 f2fs_has_inline_xattr(inode) && 245 (!fi->i_inline_xattr_size || 246 fi->i_inline_xattr_size > MAX_INLINE_XATTR_SIZE)) { 247 set_sbi_flag(sbi, SBI_NEED_FSCK); 248 f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_inline_xattr_size: %d, max: %zu", 249 __func__, inode->i_ino, fi->i_inline_xattr_size, 250 MAX_INLINE_XATTR_SIZE); 251 return false; 252 } 253 254 if (F2FS_I(inode)->extent_tree) { 255 struct extent_info *ei = &F2FS_I(inode)->extent_tree->largest; 256 257 if (ei->len && 258 (!f2fs_is_valid_blkaddr(sbi, ei->blk, 259 DATA_GENERIC_ENHANCE) || 260 !f2fs_is_valid_blkaddr(sbi, ei->blk + ei->len - 1, 261 DATA_GENERIC_ENHANCE))) { 262 set_sbi_flag(sbi, SBI_NEED_FSCK); 263 f2fs_warn(sbi, "%s: inode (ino=%lx) extent info [%u, %u, %u] is incorrect, run fsck to fix", 264 __func__, inode->i_ino, 265 ei->blk, ei->fofs, ei->len); 266 return false; 267 } 268 } 269 270 if (f2fs_has_inline_data(inode) && 271 (!S_ISREG(inode->i_mode) && !S_ISLNK(inode->i_mode))) { 272 set_sbi_flag(sbi, SBI_NEED_FSCK); 273 f2fs_warn(sbi, "%s: inode (ino=%lx, mode=%u) should not have inline_data, run fsck to fix", 274 __func__, inode->i_ino, inode->i_mode); 275 return false; 276 } 277 278 if (f2fs_has_inline_dentry(inode) && !S_ISDIR(inode->i_mode)) { 279 set_sbi_flag(sbi, SBI_NEED_FSCK); 280 f2fs_warn(sbi, "%s: inode (ino=%lx, mode=%u) should not have inline_dentry, run fsck to fix", 281 __func__, inode->i_ino, inode->i_mode); 282 return false; 283 } 284 285 return true; 286 } 287 288 static int do_read_inode(struct inode *inode) 289 { 290 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 291 struct f2fs_inode_info *fi = F2FS_I(inode); 292 struct page *node_page; 293 struct f2fs_inode *ri; 294 projid_t i_projid; 295 int err; 296 297 /* Check if ino is within scope */ 298 if (f2fs_check_nid_range(sbi, inode->i_ino)) 299 return -EINVAL; 300 301 node_page = f2fs_get_node_page(sbi, inode->i_ino); 302 if (IS_ERR(node_page)) 303 return PTR_ERR(node_page); 304 305 ri = F2FS_INODE(node_page); 306 307 inode->i_mode = le16_to_cpu(ri->i_mode); 308 i_uid_write(inode, le32_to_cpu(ri->i_uid)); 309 i_gid_write(inode, le32_to_cpu(ri->i_gid)); 310 set_nlink(inode, le32_to_cpu(ri->i_links)); 311 inode->i_size = le64_to_cpu(ri->i_size); 312 inode->i_blocks = SECTOR_FROM_BLOCK(le64_to_cpu(ri->i_blocks) - 1); 313 314 inode->i_atime.tv_sec = le64_to_cpu(ri->i_atime); 315 inode->i_ctime.tv_sec = le64_to_cpu(ri->i_ctime); 316 inode->i_mtime.tv_sec = le64_to_cpu(ri->i_mtime); 317 inode->i_atime.tv_nsec = le32_to_cpu(ri->i_atime_nsec); 318 inode->i_ctime.tv_nsec = le32_to_cpu(ri->i_ctime_nsec); 319 inode->i_mtime.tv_nsec = le32_to_cpu(ri->i_mtime_nsec); 320 inode->i_generation = le32_to_cpu(ri->i_generation); 321 if (S_ISDIR(inode->i_mode)) 322 fi->i_current_depth = le32_to_cpu(ri->i_current_depth); 323 else if (S_ISREG(inode->i_mode)) 324 fi->i_gc_failures[GC_FAILURE_PIN] = 325 le16_to_cpu(ri->i_gc_failures); 326 fi->i_xattr_nid = le32_to_cpu(ri->i_xattr_nid); 327 fi->i_flags = le32_to_cpu(ri->i_flags); 328 if (S_ISREG(inode->i_mode)) 329 fi->i_flags &= ~F2FS_PROJINHERIT_FL; 330 fi->flags = 0; 331 fi->i_advise = ri->i_advise; 332 fi->i_pino = le32_to_cpu(ri->i_pino); 333 fi->i_dir_level = ri->i_dir_level; 334 335 if (f2fs_init_extent_tree(inode, &ri->i_ext)) 336 set_page_dirty(node_page); 337 338 get_inline_info(inode, ri); 339 340 fi->i_extra_isize = f2fs_has_extra_attr(inode) ? 341 le16_to_cpu(ri->i_extra_isize) : 0; 342 343 if (f2fs_sb_has_flexible_inline_xattr(sbi)) { 344 fi->i_inline_xattr_size = le16_to_cpu(ri->i_inline_xattr_size); 345 } else if (f2fs_has_inline_xattr(inode) || 346 f2fs_has_inline_dentry(inode)) { 347 fi->i_inline_xattr_size = DEFAULT_INLINE_XATTR_ADDRS; 348 } else { 349 350 /* 351 * Previous inline data or directory always reserved 200 bytes 352 * in inode layout, even if inline_xattr is disabled. In order 353 * to keep inline_dentry's structure for backward compatibility, 354 * we get the space back only from inline_data. 355 */ 356 fi->i_inline_xattr_size = 0; 357 } 358 359 if (!sanity_check_inode(inode, node_page)) { 360 f2fs_put_page(node_page, 1); 361 return -EFSCORRUPTED; 362 } 363 364 /* check data exist */ 365 if (f2fs_has_inline_data(inode) && !f2fs_exist_data(inode)) 366 __recover_inline_status(inode, node_page); 367 368 /* try to recover cold bit for non-dir inode */ 369 if (!S_ISDIR(inode->i_mode) && !is_cold_node(node_page)) { 370 set_cold_node(node_page, false); 371 set_page_dirty(node_page); 372 } 373 374 /* get rdev by using inline_info */ 375 __get_inode_rdev(inode, ri); 376 377 if (S_ISREG(inode->i_mode)) { 378 err = __written_first_block(sbi, ri); 379 if (err < 0) { 380 f2fs_put_page(node_page, 1); 381 return err; 382 } 383 if (!err) 384 set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN); 385 } 386 387 if (!f2fs_need_inode_block_update(sbi, inode->i_ino)) 388 fi->last_disk_size = inode->i_size; 389 390 if (fi->i_flags & F2FS_PROJINHERIT_FL) 391 set_inode_flag(inode, FI_PROJ_INHERIT); 392 393 if (f2fs_has_extra_attr(inode) && f2fs_sb_has_project_quota(sbi) && 394 F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_projid)) 395 i_projid = (projid_t)le32_to_cpu(ri->i_projid); 396 else 397 i_projid = F2FS_DEF_PROJID; 398 fi->i_projid = make_kprojid(&init_user_ns, i_projid); 399 400 if (f2fs_has_extra_attr(inode) && f2fs_sb_has_inode_crtime(sbi) && 401 F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) { 402 fi->i_crtime.tv_sec = le64_to_cpu(ri->i_crtime); 403 fi->i_crtime.tv_nsec = le32_to_cpu(ri->i_crtime_nsec); 404 } 405 406 F2FS_I(inode)->i_disk_time[0] = inode->i_atime; 407 F2FS_I(inode)->i_disk_time[1] = inode->i_ctime; 408 F2FS_I(inode)->i_disk_time[2] = inode->i_mtime; 409 F2FS_I(inode)->i_disk_time[3] = F2FS_I(inode)->i_crtime; 410 f2fs_put_page(node_page, 1); 411 412 stat_inc_inline_xattr(inode); 413 stat_inc_inline_inode(inode); 414 stat_inc_inline_dir(inode); 415 416 return 0; 417 } 418 419 struct inode *f2fs_iget(struct super_block *sb, unsigned long ino) 420 { 421 struct f2fs_sb_info *sbi = F2FS_SB(sb); 422 struct inode *inode; 423 int ret = 0; 424 425 inode = iget_locked(sb, ino); 426 if (!inode) 427 return ERR_PTR(-ENOMEM); 428 429 if (!(inode->i_state & I_NEW)) { 430 trace_f2fs_iget(inode); 431 return inode; 432 } 433 if (ino == F2FS_NODE_INO(sbi) || ino == F2FS_META_INO(sbi)) 434 goto make_now; 435 436 ret = do_read_inode(inode); 437 if (ret) 438 goto bad_inode; 439 make_now: 440 if (ino == F2FS_NODE_INO(sbi)) { 441 inode->i_mapping->a_ops = &f2fs_node_aops; 442 mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS); 443 } else if (ino == F2FS_META_INO(sbi)) { 444 inode->i_mapping->a_ops = &f2fs_meta_aops; 445 mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS); 446 } else if (S_ISREG(inode->i_mode)) { 447 inode->i_op = &f2fs_file_inode_operations; 448 inode->i_fop = &f2fs_file_operations; 449 inode->i_mapping->a_ops = &f2fs_dblock_aops; 450 } else if (S_ISDIR(inode->i_mode)) { 451 inode->i_op = &f2fs_dir_inode_operations; 452 inode->i_fop = &f2fs_dir_operations; 453 inode->i_mapping->a_ops = &f2fs_dblock_aops; 454 inode_nohighmem(inode); 455 } else if (S_ISLNK(inode->i_mode)) { 456 if (file_is_encrypt(inode)) 457 inode->i_op = &f2fs_encrypted_symlink_inode_operations; 458 else 459 inode->i_op = &f2fs_symlink_inode_operations; 460 inode_nohighmem(inode); 461 inode->i_mapping->a_ops = &f2fs_dblock_aops; 462 } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || 463 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { 464 inode->i_op = &f2fs_special_inode_operations; 465 init_special_inode(inode, inode->i_mode, inode->i_rdev); 466 } else { 467 ret = -EIO; 468 goto bad_inode; 469 } 470 f2fs_set_inode_flags(inode); 471 unlock_new_inode(inode); 472 trace_f2fs_iget(inode); 473 return inode; 474 475 bad_inode: 476 f2fs_inode_synced(inode); 477 iget_failed(inode); 478 trace_f2fs_iget_exit(inode, ret); 479 return ERR_PTR(ret); 480 } 481 482 struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino) 483 { 484 struct inode *inode; 485 retry: 486 inode = f2fs_iget(sb, ino); 487 if (IS_ERR(inode)) { 488 if (PTR_ERR(inode) == -ENOMEM) { 489 congestion_wait(BLK_RW_ASYNC, HZ/50); 490 goto retry; 491 } 492 } 493 return inode; 494 } 495 496 void f2fs_update_inode(struct inode *inode, struct page *node_page) 497 { 498 struct f2fs_inode *ri; 499 struct extent_tree *et = F2FS_I(inode)->extent_tree; 500 501 f2fs_wait_on_page_writeback(node_page, NODE, true, true); 502 set_page_dirty(node_page); 503 504 f2fs_inode_synced(inode); 505 506 ri = F2FS_INODE(node_page); 507 508 ri->i_mode = cpu_to_le16(inode->i_mode); 509 ri->i_advise = F2FS_I(inode)->i_advise; 510 ri->i_uid = cpu_to_le32(i_uid_read(inode)); 511 ri->i_gid = cpu_to_le32(i_gid_read(inode)); 512 ri->i_links = cpu_to_le32(inode->i_nlink); 513 ri->i_size = cpu_to_le64(i_size_read(inode)); 514 ri->i_blocks = cpu_to_le64(SECTOR_TO_BLOCK(inode->i_blocks) + 1); 515 516 if (et) { 517 read_lock(&et->lock); 518 set_raw_extent(&et->largest, &ri->i_ext); 519 read_unlock(&et->lock); 520 } else { 521 memset(&ri->i_ext, 0, sizeof(ri->i_ext)); 522 } 523 set_raw_inline(inode, ri); 524 525 ri->i_atime = cpu_to_le64(inode->i_atime.tv_sec); 526 ri->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec); 527 ri->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec); 528 ri->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec); 529 ri->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec); 530 ri->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec); 531 if (S_ISDIR(inode->i_mode)) 532 ri->i_current_depth = 533 cpu_to_le32(F2FS_I(inode)->i_current_depth); 534 else if (S_ISREG(inode->i_mode)) 535 ri->i_gc_failures = 536 cpu_to_le16(F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN]); 537 ri->i_xattr_nid = cpu_to_le32(F2FS_I(inode)->i_xattr_nid); 538 ri->i_flags = cpu_to_le32(F2FS_I(inode)->i_flags); 539 ri->i_pino = cpu_to_le32(F2FS_I(inode)->i_pino); 540 ri->i_generation = cpu_to_le32(inode->i_generation); 541 ri->i_dir_level = F2FS_I(inode)->i_dir_level; 542 543 if (f2fs_has_extra_attr(inode)) { 544 ri->i_extra_isize = cpu_to_le16(F2FS_I(inode)->i_extra_isize); 545 546 if (f2fs_sb_has_flexible_inline_xattr(F2FS_I_SB(inode))) 547 ri->i_inline_xattr_size = 548 cpu_to_le16(F2FS_I(inode)->i_inline_xattr_size); 549 550 if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)) && 551 F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize, 552 i_projid)) { 553 projid_t i_projid; 554 555 i_projid = from_kprojid(&init_user_ns, 556 F2FS_I(inode)->i_projid); 557 ri->i_projid = cpu_to_le32(i_projid); 558 } 559 560 if (f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) && 561 F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize, 562 i_crtime)) { 563 ri->i_crtime = 564 cpu_to_le64(F2FS_I(inode)->i_crtime.tv_sec); 565 ri->i_crtime_nsec = 566 cpu_to_le32(F2FS_I(inode)->i_crtime.tv_nsec); 567 } 568 } 569 570 __set_inode_rdev(inode, ri); 571 572 /* deleted inode */ 573 if (inode->i_nlink == 0) 574 clear_inline_node(node_page); 575 576 F2FS_I(inode)->i_disk_time[0] = inode->i_atime; 577 F2FS_I(inode)->i_disk_time[1] = inode->i_ctime; 578 F2FS_I(inode)->i_disk_time[2] = inode->i_mtime; 579 F2FS_I(inode)->i_disk_time[3] = F2FS_I(inode)->i_crtime; 580 581 #ifdef CONFIG_F2FS_CHECK_FS 582 f2fs_inode_chksum_set(F2FS_I_SB(inode), node_page); 583 #endif 584 } 585 586 void f2fs_update_inode_page(struct inode *inode) 587 { 588 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 589 struct page *node_page; 590 retry: 591 node_page = f2fs_get_node_page(sbi, inode->i_ino); 592 if (IS_ERR(node_page)) { 593 int err = PTR_ERR(node_page); 594 if (err == -ENOMEM) { 595 cond_resched(); 596 goto retry; 597 } else if (err != -ENOENT) { 598 f2fs_stop_checkpoint(sbi, false); 599 } 600 return; 601 } 602 f2fs_update_inode(inode, node_page); 603 f2fs_put_page(node_page, 1); 604 } 605 606 int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc) 607 { 608 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 609 610 if (inode->i_ino == F2FS_NODE_INO(sbi) || 611 inode->i_ino == F2FS_META_INO(sbi)) 612 return 0; 613 614 if (!is_inode_flag_set(inode, FI_DIRTY_INODE)) 615 return 0; 616 617 if (f2fs_is_checkpoint_ready(sbi)) 618 return -ENOSPC; 619 620 /* 621 * We need to balance fs here to prevent from producing dirty node pages 622 * during the urgent cleaning time when runing out of free sections. 623 */ 624 f2fs_update_inode_page(inode); 625 if (wbc && wbc->nr_to_write) 626 f2fs_balance_fs(sbi, true); 627 return 0; 628 } 629 630 /* 631 * Called at the last iput() if i_nlink is zero 632 */ 633 void f2fs_evict_inode(struct inode *inode) 634 { 635 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 636 nid_t xnid = F2FS_I(inode)->i_xattr_nid; 637 int err = 0; 638 639 /* some remained atomic pages should discarded */ 640 if (f2fs_is_atomic_file(inode)) 641 f2fs_drop_inmem_pages(inode); 642 643 trace_f2fs_evict_inode(inode); 644 truncate_inode_pages_final(&inode->i_data); 645 646 if (inode->i_ino == F2FS_NODE_INO(sbi) || 647 inode->i_ino == F2FS_META_INO(sbi)) 648 goto out_clear; 649 650 f2fs_bug_on(sbi, get_dirty_pages(inode)); 651 f2fs_remove_dirty_inode(inode); 652 653 f2fs_destroy_extent_tree(inode); 654 655 if (inode->i_nlink || is_bad_inode(inode)) 656 goto no_delete; 657 658 err = dquot_initialize(inode); 659 if (err) { 660 err = 0; 661 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR); 662 } 663 664 f2fs_remove_ino_entry(sbi, inode->i_ino, APPEND_INO); 665 f2fs_remove_ino_entry(sbi, inode->i_ino, UPDATE_INO); 666 f2fs_remove_ino_entry(sbi, inode->i_ino, FLUSH_INO); 667 668 sb_start_intwrite(inode->i_sb); 669 set_inode_flag(inode, FI_NO_ALLOC); 670 i_size_write(inode, 0); 671 retry: 672 if (F2FS_HAS_BLOCKS(inode)) 673 err = f2fs_truncate(inode); 674 675 if (time_to_inject(sbi, FAULT_EVICT_INODE)) { 676 f2fs_show_injection_info(FAULT_EVICT_INODE); 677 err = -EIO; 678 } 679 680 if (!err) { 681 f2fs_lock_op(sbi); 682 err = f2fs_remove_inode_page(inode); 683 f2fs_unlock_op(sbi); 684 if (err == -ENOENT) 685 err = 0; 686 } 687 688 /* give more chances, if ENOMEM case */ 689 if (err == -ENOMEM) { 690 err = 0; 691 goto retry; 692 } 693 694 if (err) { 695 f2fs_update_inode_page(inode); 696 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR); 697 } 698 sb_end_intwrite(inode->i_sb); 699 no_delete: 700 dquot_drop(inode); 701 702 stat_dec_inline_xattr(inode); 703 stat_dec_inline_dir(inode); 704 stat_dec_inline_inode(inode); 705 706 if (likely(!is_set_ckpt_flags(sbi, CP_ERROR_FLAG) && 707 !is_sbi_flag_set(sbi, SBI_CP_DISABLED))) 708 f2fs_bug_on(sbi, is_inode_flag_set(inode, FI_DIRTY_INODE)); 709 else 710 f2fs_inode_synced(inode); 711 712 /* ino == 0, if f2fs_new_inode() was failed t*/ 713 if (inode->i_ino) 714 invalidate_mapping_pages(NODE_MAPPING(sbi), inode->i_ino, 715 inode->i_ino); 716 if (xnid) 717 invalidate_mapping_pages(NODE_MAPPING(sbi), xnid, xnid); 718 if (inode->i_nlink) { 719 if (is_inode_flag_set(inode, FI_APPEND_WRITE)) 720 f2fs_add_ino_entry(sbi, inode->i_ino, APPEND_INO); 721 if (is_inode_flag_set(inode, FI_UPDATE_WRITE)) 722 f2fs_add_ino_entry(sbi, inode->i_ino, UPDATE_INO); 723 } 724 if (is_inode_flag_set(inode, FI_FREE_NID)) { 725 f2fs_alloc_nid_failed(sbi, inode->i_ino); 726 clear_inode_flag(inode, FI_FREE_NID); 727 } else { 728 /* 729 * If xattr nid is corrupted, we can reach out error condition, 730 * err & !f2fs_exist_written_data(sbi, inode->i_ino, ORPHAN_INO)). 731 * In that case, f2fs_check_nid_range() is enough to give a clue. 732 */ 733 } 734 out_clear: 735 fscrypt_put_encryption_info(inode); 736 clear_inode(inode); 737 } 738 739 /* caller should call f2fs_lock_op() */ 740 void f2fs_handle_failed_inode(struct inode *inode) 741 { 742 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 743 struct node_info ni; 744 int err; 745 746 /* 747 * clear nlink of inode in order to release resource of inode 748 * immediately. 749 */ 750 clear_nlink(inode); 751 752 /* 753 * we must call this to avoid inode being remained as dirty, resulting 754 * in a panic when flushing dirty inodes in gdirty_list. 755 */ 756 f2fs_update_inode_page(inode); 757 f2fs_inode_synced(inode); 758 759 /* don't make bad inode, since it becomes a regular file. */ 760 unlock_new_inode(inode); 761 762 /* 763 * Note: we should add inode to orphan list before f2fs_unlock_op() 764 * so we can prevent losing this orphan when encoutering checkpoint 765 * and following suddenly power-off. 766 */ 767 err = f2fs_get_node_info(sbi, inode->i_ino, &ni); 768 if (err) { 769 set_sbi_flag(sbi, SBI_NEED_FSCK); 770 f2fs_warn(sbi, "May loss orphan inode, run fsck to fix."); 771 goto out; 772 } 773 774 if (ni.blk_addr != NULL_ADDR) { 775 err = f2fs_acquire_orphan_inode(sbi); 776 if (err) { 777 set_sbi_flag(sbi, SBI_NEED_FSCK); 778 f2fs_warn(sbi, "Too many orphan inodes, run fsck to fix."); 779 } else { 780 f2fs_add_orphan_inode(inode); 781 } 782 f2fs_alloc_nid_done(sbi, inode->i_ino); 783 } else { 784 set_inode_flag(inode, FI_FREE_NID); 785 } 786 787 out: 788 f2fs_unlock_op(sbi); 789 790 /* iput will drop the inode object */ 791 iput(inode); 792 } 793