17c1a000dSChao Yu // SPDX-License-Identifier: GPL-2.0 20a8165d7SJaegeuk Kim /* 3d624c96fSJaegeuk Kim * fs/f2fs/recovery.c 4d624c96fSJaegeuk Kim * 5d624c96fSJaegeuk Kim * Copyright (c) 2012 Samsung Electronics Co., Ltd. 6d624c96fSJaegeuk Kim * http://www.samsung.com/ 7d624c96fSJaegeuk Kim */ 87ad08a58SDaniel Rosenberg #include <asm/unaligned.h> 9d624c96fSJaegeuk Kim #include <linux/fs.h> 10d624c96fSJaegeuk Kim #include <linux/f2fs_fs.h> 114034247aSNeilBrown #include <linux/sched/mm.h> 12d624c96fSJaegeuk Kim #include "f2fs.h" 13d624c96fSJaegeuk Kim #include "node.h" 14d624c96fSJaegeuk Kim #include "segment.h" 15d624c96fSJaegeuk Kim 16441ac5cbSJaegeuk Kim /* 17441ac5cbSJaegeuk Kim * Roll forward recovery scenarios. 18441ac5cbSJaegeuk Kim * 19441ac5cbSJaegeuk Kim * [Term] F: fsync_mark, D: dentry_mark 20441ac5cbSJaegeuk Kim * 21441ac5cbSJaegeuk Kim * 1. inode(x) | CP | inode(x) | dnode(F) 22441ac5cbSJaegeuk Kim * -> Update the latest inode(x). 23441ac5cbSJaegeuk Kim * 24441ac5cbSJaegeuk Kim * 2. inode(x) | CP | inode(F) | dnode(F) 25441ac5cbSJaegeuk Kim * -> No problem. 26441ac5cbSJaegeuk Kim * 27441ac5cbSJaegeuk Kim * 3. inode(x) | CP | dnode(F) | inode(x) 28441ac5cbSJaegeuk Kim * -> Recover to the latest dnode(F), and drop the last inode(x) 29441ac5cbSJaegeuk Kim * 30441ac5cbSJaegeuk Kim * 4. inode(x) | CP | dnode(F) | inode(F) 31441ac5cbSJaegeuk Kim * -> No problem. 32441ac5cbSJaegeuk Kim * 33441ac5cbSJaegeuk Kim * 5. CP | inode(x) | dnode(F) 34441ac5cbSJaegeuk Kim * -> The inode(DF) was missing. Should drop this dnode(F). 35441ac5cbSJaegeuk Kim * 36441ac5cbSJaegeuk Kim * 6. CP | inode(DF) | dnode(F) 37441ac5cbSJaegeuk Kim * -> No problem. 38441ac5cbSJaegeuk Kim * 39441ac5cbSJaegeuk Kim * 7. CP | dnode(F) | inode(DF) 40441ac5cbSJaegeuk Kim * -> If f2fs_iget fails, then goto next to find inode(DF). 41441ac5cbSJaegeuk Kim * 42441ac5cbSJaegeuk Kim * 8. CP | dnode(F) | inode(x) 43441ac5cbSJaegeuk Kim * -> If f2fs_iget fails, then goto next to find inode(DF). 44441ac5cbSJaegeuk Kim * But it will fail due to no inode(DF). 45441ac5cbSJaegeuk Kim */ 46441ac5cbSJaegeuk Kim 47d624c96fSJaegeuk Kim static struct kmem_cache *fsync_entry_slab; 48d624c96fSJaegeuk Kim 495298d4bfSChristoph Hellwig #if IS_ENABLED(CONFIG_UNICODE) 504d9a2bb1SChao Yu extern struct kmem_cache *f2fs_cf_name_slab; 514d9a2bb1SChao Yu #endif 524d9a2bb1SChao Yu 534d57b86dSChao Yu bool f2fs_space_for_roll_forward(struct f2fs_sb_info *sbi) 54d624c96fSJaegeuk Kim { 5541382ec4SJaegeuk Kim s64 nalloc = percpu_counter_sum_positive(&sbi->alloc_valid_block_count); 5641382ec4SJaegeuk Kim 5741382ec4SJaegeuk Kim if (sbi->last_valid_block_count + nalloc > sbi->user_block_count) 58d624c96fSJaegeuk Kim return false; 5947c8ebccSJaegeuk Kim if (NM_I(sbi)->max_rf_node_blocks && 6047c8ebccSJaegeuk Kim percpu_counter_sum_positive(&sbi->rf_node_block_count) >= 6147c8ebccSJaegeuk Kim NM_I(sbi)->max_rf_node_blocks) 6247c8ebccSJaegeuk Kim return false; 63d624c96fSJaegeuk Kim return true; 64d624c96fSJaegeuk Kim } 65d624c96fSJaegeuk Kim 66d624c96fSJaegeuk Kim static struct fsync_inode_entry *get_fsync_inode(struct list_head *head, 67d624c96fSJaegeuk Kim nid_t ino) 68d624c96fSJaegeuk Kim { 69d624c96fSJaegeuk Kim struct fsync_inode_entry *entry; 70d624c96fSJaegeuk Kim 712d7b822aSChao Yu list_for_each_entry(entry, head, list) 72d624c96fSJaegeuk Kim if (entry->inode->i_ino == ino) 73d624c96fSJaegeuk Kim return entry; 742d7b822aSChao Yu 75d624c96fSJaegeuk Kim return NULL; 76d624c96fSJaegeuk Kim } 77d624c96fSJaegeuk Kim 78f4702d61SJaegeuk Kim static struct fsync_inode_entry *add_fsync_inode(struct f2fs_sb_info *sbi, 794b2414d0SChao Yu struct list_head *head, nid_t ino, bool quota_inode) 803f8ab270SChao Yu { 81e8ea9b3dSJaegeuk Kim struct inode *inode; 823f8ab270SChao Yu struct fsync_inode_entry *entry; 834b2414d0SChao Yu int err; 843f8ab270SChao Yu 85e8ea9b3dSJaegeuk Kim inode = f2fs_iget_retry(sbi->sb, ino); 86f4702d61SJaegeuk Kim if (IS_ERR(inode)) 87f4702d61SJaegeuk Kim return ERR_CAST(inode); 88f4702d61SJaegeuk Kim 8910a26878SChao Yu err = f2fs_dquot_initialize(inode); 904b2414d0SChao Yu if (err) 914b2414d0SChao Yu goto err_out; 924b2414d0SChao Yu 934b2414d0SChao Yu if (quota_inode) { 944b2414d0SChao Yu err = dquot_alloc_inode(inode); 954b2414d0SChao Yu if (err) 964b2414d0SChao Yu goto err_out; 974b2414d0SChao Yu } 984b2414d0SChao Yu 9932410577SChao Yu entry = f2fs_kmem_cache_alloc(fsync_entry_slab, 10032410577SChao Yu GFP_F2FS_ZERO, true, NULL); 1013f8ab270SChao Yu entry->inode = inode; 1023f8ab270SChao Yu list_add_tail(&entry->list, head); 1033f8ab270SChao Yu 1043f8ab270SChao Yu return entry; 1054b2414d0SChao Yu err_out: 1064b2414d0SChao Yu iput(inode); 1074b2414d0SChao Yu return ERR_PTR(err); 1083f8ab270SChao Yu } 1093f8ab270SChao Yu 11026b5a079SSheng Yong static void del_fsync_inode(struct fsync_inode_entry *entry, int drop) 1113f8ab270SChao Yu { 11226b5a079SSheng Yong if (drop) { 11326b5a079SSheng Yong /* inode should not be recovered, drop it */ 11426b5a079SSheng Yong f2fs_inode_synced(entry->inode); 11526b5a079SSheng Yong } 1163f8ab270SChao Yu iput(entry->inode); 1173f8ab270SChao Yu list_del(&entry->list); 1183f8ab270SChao Yu kmem_cache_free(fsync_entry_slab, entry); 1193f8ab270SChao Yu } 1203f8ab270SChao Yu 12143c780baSEric Biggers static int init_recovered_filename(const struct inode *dir, 12243c780baSEric Biggers struct f2fs_inode *raw_inode, 12343c780baSEric Biggers struct f2fs_filename *fname, 12443c780baSEric Biggers struct qstr *usr_fname) 12543c780baSEric Biggers { 12643c780baSEric Biggers int err; 12743c780baSEric Biggers 12843c780baSEric Biggers memset(fname, 0, sizeof(*fname)); 12943c780baSEric Biggers fname->disk_name.len = le32_to_cpu(raw_inode->i_namelen); 13043c780baSEric Biggers fname->disk_name.name = raw_inode->i_name; 13143c780baSEric Biggers 13243c780baSEric Biggers if (WARN_ON(fname->disk_name.len > F2FS_NAME_LEN)) 13343c780baSEric Biggers return -ENAMETOOLONG; 13443c780baSEric Biggers 13543c780baSEric Biggers if (!IS_ENCRYPTED(dir)) { 13643c780baSEric Biggers usr_fname->name = fname->disk_name.name; 13743c780baSEric Biggers usr_fname->len = fname->disk_name.len; 13843c780baSEric Biggers fname->usr_fname = usr_fname; 13943c780baSEric Biggers } 14043c780baSEric Biggers 14143c780baSEric Biggers /* Compute the hash of the filename */ 1427ad08a58SDaniel Rosenberg if (IS_ENCRYPTED(dir) && IS_CASEFOLDED(dir)) { 1437ad08a58SDaniel Rosenberg /* 1447ad08a58SDaniel Rosenberg * In this case the hash isn't computable without the key, so it 1457ad08a58SDaniel Rosenberg * was saved on-disk. 1467ad08a58SDaniel Rosenberg */ 1477ad08a58SDaniel Rosenberg if (fname->disk_name.len + sizeof(f2fs_hash_t) > F2FS_NAME_LEN) 1487ad08a58SDaniel Rosenberg return -EINVAL; 1497ad08a58SDaniel Rosenberg fname->hash = get_unaligned((f2fs_hash_t *) 1507ad08a58SDaniel Rosenberg &raw_inode->i_name[fname->disk_name.len]); 1517ad08a58SDaniel Rosenberg } else if (IS_CASEFOLDED(dir)) { 15243c780baSEric Biggers err = f2fs_init_casefolded_name(dir, fname); 15343c780baSEric Biggers if (err) 15443c780baSEric Biggers return err; 15543c780baSEric Biggers f2fs_hash_filename(dir, fname); 1565298d4bfSChristoph Hellwig #if IS_ENABLED(CONFIG_UNICODE) 15743c780baSEric Biggers /* Case-sensitive match is fine for recovery */ 1584d9a2bb1SChao Yu kmem_cache_free(f2fs_cf_name_slab, fname->cf_name.name); 15943c780baSEric Biggers fname->cf_name.name = NULL; 16043c780baSEric Biggers #endif 16143c780baSEric Biggers } else { 16243c780baSEric Biggers f2fs_hash_filename(dir, fname); 16343c780baSEric Biggers } 16443c780baSEric Biggers return 0; 16543c780baSEric Biggers } 16643c780baSEric Biggers 167f61cce5bSChao Yu static int recover_dentry(struct inode *inode, struct page *ipage, 168f61cce5bSChao Yu struct list_head *dir_list) 169d624c96fSJaegeuk Kim { 17058bfaf44SJaegeuk Kim struct f2fs_inode *raw_inode = F2FS_INODE(ipage); 17174d0b917SJaegeuk Kim nid_t pino = le32_to_cpu(raw_inode->i_pino); 1726b8213d9SJaegeuk Kim struct f2fs_dir_entry *de; 17343c780baSEric Biggers struct f2fs_filename fname; 17443c780baSEric Biggers struct qstr usr_fname; 175d624c96fSJaegeuk Kim struct page *page; 1766b8213d9SJaegeuk Kim struct inode *dir, *einode; 177f61cce5bSChao Yu struct fsync_inode_entry *entry; 178d624c96fSJaegeuk Kim int err = 0; 179e7ba108aSShuoran Liu char *name; 180d624c96fSJaegeuk Kim 181f61cce5bSChao Yu entry = get_fsync_inode(dir_list, pino); 182f61cce5bSChao Yu if (!entry) { 1834b2414d0SChao Yu entry = add_fsync_inode(F2FS_I_SB(inode), dir_list, 1844b2414d0SChao Yu pino, false); 185f4702d61SJaegeuk Kim if (IS_ERR(entry)) { 186f4702d61SJaegeuk Kim dir = ERR_CAST(entry); 187f4702d61SJaegeuk Kim err = PTR_ERR(entry); 188f61cce5bSChao Yu goto out; 189e7d55452SJaegeuk Kim } 190f61cce5bSChao Yu } 191f61cce5bSChao Yu 192f61cce5bSChao Yu dir = entry->inode; 19343c780baSEric Biggers err = init_recovered_filename(dir, raw_inode, &fname, &usr_fname); 19443c780baSEric Biggers if (err) 195f61cce5bSChao Yu goto out; 1966b8213d9SJaegeuk Kim retry: 197e7ba108aSShuoran Liu de = __f2fs_find_entry(dir, &fname, &page); 198418f6c27SJaegeuk Kim if (de && inode->i_ino == le32_to_cpu(de->ino)) 199bdbc90faSYunlong Song goto out_put; 200418f6c27SJaegeuk Kim 2016b8213d9SJaegeuk Kim if (de) { 202e8ea9b3dSJaegeuk Kim einode = f2fs_iget_retry(inode->i_sb, le32_to_cpu(de->ino)); 2036b8213d9SJaegeuk Kim if (IS_ERR(einode)) { 2046b8213d9SJaegeuk Kim WARN_ON(1); 2055c1f9927SChao Yu err = PTR_ERR(einode); 2065c1f9927SChao Yu if (err == -ENOENT) 2076b8213d9SJaegeuk Kim err = -EEXIST; 208bdbc90faSYunlong Song goto out_put; 2092e5558f4SRuss W. Knize } 2104b2414d0SChao Yu 21110a26878SChao Yu err = f2fs_dquot_initialize(einode); 2124b2414d0SChao Yu if (err) { 2134b2414d0SChao Yu iput(einode); 214bdbc90faSYunlong Song goto out_put; 2154b2414d0SChao Yu } 2164b2414d0SChao Yu 2174d57b86dSChao Yu err = f2fs_acquire_orphan_inode(F2FS_I_SB(inode)); 2182e5558f4SRuss W. Knize if (err) { 2192e5558f4SRuss W. Knize iput(einode); 220bdbc90faSYunlong Song goto out_put; 2216b8213d9SJaegeuk Kim } 222dbeacf02SChao Yu f2fs_delete_entry(de, page, dir, einode); 2236b8213d9SJaegeuk Kim iput(einode); 2246b8213d9SJaegeuk Kim goto retry; 22591246c21SChao Yu } else if (IS_ERR(page)) { 22691246c21SChao Yu err = PTR_ERR(page); 22791246c21SChao Yu } else { 2284d57b86dSChao Yu err = f2fs_add_dentry(dir, &fname, inode, 22991246c21SChao Yu inode->i_ino, inode->i_mode); 2306b8213d9SJaegeuk Kim } 231e8ea9b3dSJaegeuk Kim if (err == -ENOMEM) 232e8ea9b3dSJaegeuk Kim goto retry; 2332e5558f4SRuss W. Knize goto out; 2342e5558f4SRuss W. Knize 235bdbc90faSYunlong Song out_put: 2362e5558f4SRuss W. Knize f2fs_put_page(page, 0); 237d624c96fSJaegeuk Kim out: 238e7ba108aSShuoran Liu if (file_enc_name(inode)) 239e7ba108aSShuoran Liu name = "<encrypted>"; 240e7ba108aSShuoran Liu else 241e7ba108aSShuoran Liu name = raw_inode->i_name; 242dcbb4c10SJoe Perches f2fs_notice(F2FS_I_SB(inode), "%s: ino = %x, name = %s, dir = %lx, err = %d", 243e7ba108aSShuoran Liu __func__, ino_of_node(ipage), name, 244f28c06faSDan Carpenter IS_ERR(dir) ? 0 : dir->i_ino, err); 245d624c96fSJaegeuk Kim return err; 246d624c96fSJaegeuk Kim } 247d624c96fSJaegeuk Kim 248af033b2aSChao Yu static int recover_quota_data(struct inode *inode, struct page *page) 249af033b2aSChao Yu { 250af033b2aSChao Yu struct f2fs_inode *raw = F2FS_INODE(page); 251af033b2aSChao Yu struct iattr attr; 252af033b2aSChao Yu uid_t i_uid = le32_to_cpu(raw->i_uid); 253af033b2aSChao Yu gid_t i_gid = le32_to_cpu(raw->i_gid); 254af033b2aSChao Yu int err; 255af033b2aSChao Yu 256af033b2aSChao Yu memset(&attr, 0, sizeof(attr)); 257af033b2aSChao Yu 258b27c82e1SChristian Brauner attr.ia_vfsuid = VFSUIDT_INIT(make_kuid(inode->i_sb->s_user_ns, i_uid)); 259b27c82e1SChristian Brauner attr.ia_vfsgid = VFSGIDT_INIT(make_kgid(inode->i_sb->s_user_ns, i_gid)); 260af033b2aSChao Yu 261b27c82e1SChristian Brauner if (!vfsuid_eq(attr.ia_vfsuid, i_uid_into_vfsuid(&init_user_ns, inode))) 262af033b2aSChao Yu attr.ia_valid |= ATTR_UID; 263b27c82e1SChristian Brauner if (!vfsgid_eq(attr.ia_vfsgid, i_gid_into_vfsgid(&init_user_ns, inode))) 264af033b2aSChao Yu attr.ia_valid |= ATTR_GID; 265af033b2aSChao Yu 266af033b2aSChao Yu if (!attr.ia_valid) 267af033b2aSChao Yu return 0; 268af033b2aSChao Yu 26971e7b535SChristian Brauner err = dquot_transfer(&init_user_ns, inode, &attr); 270af033b2aSChao Yu if (err) 271af033b2aSChao Yu set_sbi_flag(F2FS_I_SB(inode), SBI_QUOTA_NEED_REPAIR); 272af033b2aSChao Yu return err; 273af033b2aSChao Yu } 274af033b2aSChao Yu 27537a086f0SJaegeuk Kim static void recover_inline_flags(struct inode *inode, struct f2fs_inode *ri) 27637a086f0SJaegeuk Kim { 27737a086f0SJaegeuk Kim if (ri->i_inline & F2FS_PIN_FILE) 27837a086f0SJaegeuk Kim set_inode_flag(inode, FI_PIN_FILE); 27937a086f0SJaegeuk Kim else 28037a086f0SJaegeuk Kim clear_inode_flag(inode, FI_PIN_FILE); 28137a086f0SJaegeuk Kim if (ri->i_inline & F2FS_DATA_EXIST) 28237a086f0SJaegeuk Kim set_inode_flag(inode, FI_DATA_EXIST); 28337a086f0SJaegeuk Kim else 28437a086f0SJaegeuk Kim clear_inode_flag(inode, FI_DATA_EXIST); 28537a086f0SJaegeuk Kim } 28637a086f0SJaegeuk Kim 287af033b2aSChao Yu static int recover_inode(struct inode *inode, struct page *page) 288441ac5cbSJaegeuk Kim { 289441ac5cbSJaegeuk Kim struct f2fs_inode *raw = F2FS_INODE(page); 290e7d55452SJaegeuk Kim char *name; 291af033b2aSChao Yu int err; 292441ac5cbSJaegeuk Kim 293441ac5cbSJaegeuk Kim inode->i_mode = le16_to_cpu(raw->i_mode); 294af033b2aSChao Yu 295af033b2aSChao Yu err = recover_quota_data(inode, page); 296af033b2aSChao Yu if (err) 297af033b2aSChao Yu return err; 298af033b2aSChao Yu 299dc4cd125SChao Yu i_uid_write(inode, le32_to_cpu(raw->i_uid)); 300dc4cd125SChao Yu i_gid_write(inode, le32_to_cpu(raw->i_gid)); 301f4474aa6SChao Yu 302f4474aa6SChao Yu if (raw->i_inline & F2FS_EXTRA_ATTR) { 3037beb01f7SChao Yu if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)) && 304f4474aa6SChao Yu F2FS_FITS_IN_INODE(raw, le16_to_cpu(raw->i_extra_isize), 305f4474aa6SChao Yu i_projid)) { 306f4474aa6SChao Yu projid_t i_projid; 30778130819SChao Yu kprojid_t kprojid; 308f4474aa6SChao Yu 309f4474aa6SChao Yu i_projid = (projid_t)le32_to_cpu(raw->i_projid); 31078130819SChao Yu kprojid = make_kprojid(&init_user_ns, i_projid); 31178130819SChao Yu 31278130819SChao Yu if (!projid_eq(kprojid, F2FS_I(inode)->i_projid)) { 31378130819SChao Yu err = f2fs_transfer_project_quota(inode, 31478130819SChao Yu kprojid); 31578130819SChao Yu if (err) 31678130819SChao Yu return err; 31778130819SChao Yu F2FS_I(inode)->i_projid = kprojid; 31878130819SChao Yu } 319f4474aa6SChao Yu } 320f4474aa6SChao Yu } 321f4474aa6SChao Yu 322fc9581c8SJaegeuk Kim f2fs_i_size_write(inode, le64_to_cpu(raw->i_size)); 3239f0552e0SChao Yu inode->i_atime.tv_sec = le64_to_cpu(raw->i_atime); 324441ac5cbSJaegeuk Kim inode->i_ctime.tv_sec = le64_to_cpu(raw->i_ctime); 325441ac5cbSJaegeuk Kim inode->i_mtime.tv_sec = le64_to_cpu(raw->i_mtime); 3269f0552e0SChao Yu inode->i_atime.tv_nsec = le32_to_cpu(raw->i_atime_nsec); 327441ac5cbSJaegeuk Kim inode->i_ctime.tv_nsec = le32_to_cpu(raw->i_ctime_nsec); 328441ac5cbSJaegeuk Kim inode->i_mtime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec); 329f356fe0cSJaegeuk Kim 33026787236SJaegeuk Kim F2FS_I(inode)->i_advise = raw->i_advise; 33119c73a69SChao Yu F2FS_I(inode)->i_flags = le32_to_cpu(raw->i_flags); 3320c093b59SChao Yu f2fs_set_inode_flags(inode); 3337de36cf3SChao Yu F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN] = 3347de36cf3SChao Yu le16_to_cpu(raw->i_gc_failures); 33526787236SJaegeuk Kim 33637a086f0SJaegeuk Kim recover_inline_flags(inode, raw); 33737a086f0SJaegeuk Kim 3384a1728caSChao Yu f2fs_mark_inode_dirty_sync(inode, true); 3394a1728caSChao Yu 340e7d55452SJaegeuk Kim if (file_enc_name(inode)) 341e7d55452SJaegeuk Kim name = "<encrypted>"; 342e7d55452SJaegeuk Kim else 343e7d55452SJaegeuk Kim name = F2FS_INODE(page)->i_name; 344e7d55452SJaegeuk Kim 345dcbb4c10SJoe Perches f2fs_notice(F2FS_I_SB(inode), "recover_inode: ino = %x, name = %s, inline = %x", 34637a086f0SJaegeuk Kim ino_of_node(page), name, raw->i_inline); 347af033b2aSChao Yu return 0; 348d624c96fSJaegeuk Kim } 349d624c96fSJaegeuk Kim 350430f163bSChao Yu static unsigned int adjust_por_ra_blocks(struct f2fs_sb_info *sbi, 351430f163bSChao Yu unsigned int ra_blocks, unsigned int blkaddr, 352430f163bSChao Yu unsigned int next_blkaddr) 353430f163bSChao Yu { 354430f163bSChao Yu if (blkaddr + 1 == next_blkaddr) 355430f163bSChao Yu ra_blocks = min_t(unsigned int, RECOVERY_MAX_RA_BLOCKS, 356430f163bSChao Yu ra_blocks * 2); 357430f163bSChao Yu else if (next_blkaddr % sbi->blocks_per_seg) 358430f163bSChao Yu ra_blocks = max_t(unsigned int, RECOVERY_MIN_RA_BLOCKS, 359430f163bSChao Yu ra_blocks / 2); 360430f163bSChao Yu return ra_blocks; 361430f163bSChao Yu } 362430f163bSChao Yu 363d40d30c5SJaegeuk Kim static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head, 364d40d30c5SJaegeuk Kim bool check_only) 365d624c96fSJaegeuk Kim { 366d624c96fSJaegeuk Kim struct curseg_info *curseg; 3674c521f49SJaegeuk Kim struct page *page = NULL; 368d624c96fSJaegeuk Kim block_t blkaddr; 369fb0e72c8SChao Yu unsigned int loop_cnt = 0; 370430f163bSChao Yu unsigned int ra_blocks = RECOVERY_MAX_RA_BLOCKS; 37182902c06SChao Yu unsigned int free_blocks = MAIN_SEGS(sbi) * sbi->blocks_per_seg - 372fb0e72c8SChao Yu valid_user_blocks(sbi); 373d624c96fSJaegeuk Kim int err = 0; 374d624c96fSJaegeuk Kim 375d624c96fSJaegeuk Kim /* get node pages in the current segment */ 376d624c96fSJaegeuk Kim curseg = CURSEG_I(sbi, CURSEG_WARM_NODE); 377695fd1edSChao Yu blkaddr = NEXT_FREE_BLKADDR(sbi, curseg); 378d624c96fSJaegeuk Kim 379d624c96fSJaegeuk Kim while (1) { 380d624c96fSJaegeuk Kim struct fsync_inode_entry *entry; 381d624c96fSJaegeuk Kim 382e1da7872SChao Yu if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR)) 3834c521f49SJaegeuk Kim return 0; 384d624c96fSJaegeuk Kim 3854d57b86dSChao Yu page = f2fs_get_tmp_page(sbi, blkaddr); 3867735730dSChao Yu if (IS_ERR(page)) { 3877735730dSChao Yu err = PTR_ERR(page); 3887735730dSChao Yu break; 3897735730dSChao Yu } 390393ff91fSJaegeuk Kim 39198838579SChao Yu if (!is_recoverable_dnode(page)) { 39298838579SChao Yu f2fs_put_page(page, 1); 393f356fe0cSJaegeuk Kim break; 39498838579SChao Yu } 395d624c96fSJaegeuk Kim 396d624c96fSJaegeuk Kim if (!is_fsync_dnode(page)) 397d624c96fSJaegeuk Kim goto next; 398d624c96fSJaegeuk Kim 399d624c96fSJaegeuk Kim entry = get_fsync_inode(head, ino_of_node(page)); 400d47b8715SChao Yu if (!entry) { 4014b2414d0SChao Yu bool quota_inode = false; 4024b2414d0SChao Yu 403d40d30c5SJaegeuk Kim if (!check_only && 404d40d30c5SJaegeuk Kim IS_INODE(page) && is_dent_dnode(page)) { 4054d57b86dSChao Yu err = f2fs_recover_inode_page(sbi, page); 40698838579SChao Yu if (err) { 40798838579SChao Yu f2fs_put_page(page, 1); 408f356fe0cSJaegeuk Kim break; 40998838579SChao Yu } 4104b2414d0SChao Yu quota_inode = true; 411d624c96fSJaegeuk Kim } 412d624c96fSJaegeuk Kim 413441ac5cbSJaegeuk Kim /* 414441ac5cbSJaegeuk Kim * CP | dnode(F) | inode(DF) 415441ac5cbSJaegeuk Kim * For this case, we should not give up now. 416441ac5cbSJaegeuk Kim */ 4174b2414d0SChao Yu entry = add_fsync_inode(sbi, head, ino_of_node(page), 4184b2414d0SChao Yu quota_inode); 419f4702d61SJaegeuk Kim if (IS_ERR(entry)) { 420f4702d61SJaegeuk Kim err = PTR_ERR(entry); 4218fbc418fSJaegeuk Kim if (err == -ENOENT) { 4228fbc418fSJaegeuk Kim err = 0; 423441ac5cbSJaegeuk Kim goto next; 4248fbc418fSJaegeuk Kim } 42598838579SChao Yu f2fs_put_page(page, 1); 426f356fe0cSJaegeuk Kim break; 427d624c96fSJaegeuk Kim } 428d624c96fSJaegeuk Kim } 429addbe45bSJaegeuk Kim entry->blkaddr = blkaddr; 430addbe45bSJaegeuk Kim 431608514deSJaegeuk Kim if (IS_INODE(page) && is_dent_dnode(page)) 432c52e1b10SJaegeuk Kim entry->last_dentry = blkaddr; 433d624c96fSJaegeuk Kim next: 434fb0e72c8SChao Yu /* sanity check in order to detect looped node chain */ 435fb0e72c8SChao Yu if (++loop_cnt >= free_blocks || 436fb0e72c8SChao Yu blkaddr == next_blkaddr_of_node(page)) { 437dcbb4c10SJoe Perches f2fs_notice(sbi, "%s: detect looped node chain, blkaddr:%u, next:%u", 438dcbb4c10SJoe Perches __func__, blkaddr, 439dcbb4c10SJoe Perches next_blkaddr_of_node(page)); 44098838579SChao Yu f2fs_put_page(page, 1); 441fb0e72c8SChao Yu err = -EINVAL; 442fb0e72c8SChao Yu break; 443fb0e72c8SChao Yu } 444fb0e72c8SChao Yu 445430f163bSChao Yu ra_blocks = adjust_por_ra_blocks(sbi, ra_blocks, blkaddr, 446430f163bSChao Yu next_blkaddr_of_node(page)); 447430f163bSChao Yu 448d624c96fSJaegeuk Kim /* check next segment */ 449d624c96fSJaegeuk Kim blkaddr = next_blkaddr_of_node(page); 4504c521f49SJaegeuk Kim f2fs_put_page(page, 1); 451635aee1fSChao Yu 452430f163bSChao Yu f2fs_ra_meta_pages_cond(sbi, blkaddr, ra_blocks); 453d624c96fSJaegeuk Kim } 454d624c96fSJaegeuk Kim return err; 455d624c96fSJaegeuk Kim } 456d624c96fSJaegeuk Kim 45726b5a079SSheng Yong static void destroy_fsync_dnodes(struct list_head *head, int drop) 458d624c96fSJaegeuk Kim { 459d8b79b2fSDan Carpenter struct fsync_inode_entry *entry, *tmp; 460d8b79b2fSDan Carpenter 4613f8ab270SChao Yu list_for_each_entry_safe(entry, tmp, head, list) 46226b5a079SSheng Yong del_fsync_inode(entry, drop); 463d624c96fSJaegeuk Kim } 464d624c96fSJaegeuk Kim 46539cf72cfSJaegeuk Kim static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi, 466b292dcabSJaegeuk Kim block_t blkaddr, struct dnode_of_data *dn) 467d624c96fSJaegeuk Kim { 468d624c96fSJaegeuk Kim struct seg_entry *sentry; 469d624c96fSJaegeuk Kim unsigned int segno = GET_SEGNO(sbi, blkaddr); 470491c0854SJaegeuk Kim unsigned short blkoff = GET_BLKOFF_FROM_SEG0(sbi, blkaddr); 471f6517cfcSJaegeuk Kim struct f2fs_summary_block *sum_node; 472d624c96fSJaegeuk Kim struct f2fs_summary sum; 473f6517cfcSJaegeuk Kim struct page *sum_page, *node_page; 474c9ef4810SJaegeuk Kim struct dnode_of_data tdn = *dn; 475b292dcabSJaegeuk Kim nid_t ino, nid; 476d624c96fSJaegeuk Kim struct inode *inode; 477c6ad7fd1SChao Yu unsigned int offset, ofs_in_node, max_addrs; 478d624c96fSJaegeuk Kim block_t bidx; 479d624c96fSJaegeuk Kim int i; 480d624c96fSJaegeuk Kim 481d624c96fSJaegeuk Kim sentry = get_seg_entry(sbi, segno); 482d624c96fSJaegeuk Kim if (!f2fs_test_bit(blkoff, sentry->cur_valid_map)) 48339cf72cfSJaegeuk Kim return 0; 484d624c96fSJaegeuk Kim 485d624c96fSJaegeuk Kim /* Get the previous summary */ 486125c9fb1SJaegeuk Kim for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { 487d624c96fSJaegeuk Kim struct curseg_info *curseg = CURSEG_I(sbi, i); 4885f029c04SYi Zhuang 489d624c96fSJaegeuk Kim if (curseg->segno == segno) { 490d624c96fSJaegeuk Kim sum = curseg->sum_blk->entries[blkoff]; 491f6517cfcSJaegeuk Kim goto got_it; 492d624c96fSJaegeuk Kim } 493d624c96fSJaegeuk Kim } 494d624c96fSJaegeuk Kim 4954d57b86dSChao Yu sum_page = f2fs_get_sum_page(sbi, segno); 496edc55aafSJaegeuk Kim if (IS_ERR(sum_page)) 497edc55aafSJaegeuk Kim return PTR_ERR(sum_page); 498f6517cfcSJaegeuk Kim sum_node = (struct f2fs_summary_block *)page_address(sum_page); 499f6517cfcSJaegeuk Kim sum = sum_node->entries[blkoff]; 500f6517cfcSJaegeuk Kim f2fs_put_page(sum_page, 1); 501f6517cfcSJaegeuk Kim got_it: 502b292dcabSJaegeuk Kim /* Use the locked dnode page and inode */ 503b292dcabSJaegeuk Kim nid = le32_to_cpu(sum.nid); 504c6ad7fd1SChao Yu ofs_in_node = le16_to_cpu(sum.ofs_in_node); 505c6ad7fd1SChao Yu 506c6ad7fd1SChao Yu max_addrs = ADDRS_PER_PAGE(dn->node_page, dn->inode); 507c6ad7fd1SChao Yu if (ofs_in_node >= max_addrs) { 508c6ad7fd1SChao Yu f2fs_err(sbi, "Inconsistent ofs_in_node:%u in summary, ino:%lu, nid:%u, max:%u", 509c6ad7fd1SChao Yu ofs_in_node, dn->inode->i_ino, nid, max_addrs); 51095fa90c9SChao Yu f2fs_handle_error(sbi, ERROR_INCONSISTENT_SUMMARY); 511c6ad7fd1SChao Yu return -EFSCORRUPTED; 512c6ad7fd1SChao Yu } 513c6ad7fd1SChao Yu 514b292dcabSJaegeuk Kim if (dn->inode->i_ino == nid) { 515b292dcabSJaegeuk Kim tdn.nid = nid; 516c9ef4810SJaegeuk Kim if (!dn->inode_page_locked) 517c9ef4810SJaegeuk Kim lock_page(dn->inode_page); 518b292dcabSJaegeuk Kim tdn.node_page = dn->inode_page; 519c6ad7fd1SChao Yu tdn.ofs_in_node = ofs_in_node; 520c9ef4810SJaegeuk Kim goto truncate_out; 521b292dcabSJaegeuk Kim } else if (dn->nid == nid) { 522c6ad7fd1SChao Yu tdn.ofs_in_node = ofs_in_node; 523c9ef4810SJaegeuk Kim goto truncate_out; 524b292dcabSJaegeuk Kim } 525b292dcabSJaegeuk Kim 526d624c96fSJaegeuk Kim /* Get the node page */ 5274d57b86dSChao Yu node_page = f2fs_get_node_page(sbi, nid); 52839cf72cfSJaegeuk Kim if (IS_ERR(node_page)) 52939cf72cfSJaegeuk Kim return PTR_ERR(node_page); 530de93653fSJaegeuk Kim 531de93653fSJaegeuk Kim offset = ofs_of_node(node_page); 532d624c96fSJaegeuk Kim ino = ino_of_node(node_page); 533d624c96fSJaegeuk Kim f2fs_put_page(node_page, 1); 534d624c96fSJaegeuk Kim 53560979115SJaegeuk Kim if (ino != dn->inode->i_ino) { 5364b2414d0SChao Yu int ret; 5374b2414d0SChao Yu 538d624c96fSJaegeuk Kim /* Deallocate previous index in the node page */ 539e8ea9b3dSJaegeuk Kim inode = f2fs_iget_retry(sbi->sb, ino); 54006025f4dSNamjae Jeon if (IS_ERR(inode)) 54139cf72cfSJaegeuk Kim return PTR_ERR(inode); 5424b2414d0SChao Yu 54310a26878SChao Yu ret = f2fs_dquot_initialize(inode); 5444b2414d0SChao Yu if (ret) { 5454b2414d0SChao Yu iput(inode); 5464b2414d0SChao Yu return ret; 5474b2414d0SChao Yu } 54860979115SJaegeuk Kim } else { 54960979115SJaegeuk Kim inode = dn->inode; 55060979115SJaegeuk Kim } 55106025f4dSNamjae Jeon 5524d57b86dSChao Yu bidx = f2fs_start_bidx_of_node(offset, inode) + 5534d57b86dSChao Yu le16_to_cpu(sum.ofs_in_node); 554de93653fSJaegeuk Kim 555c9ef4810SJaegeuk Kim /* 556c9ef4810SJaegeuk Kim * if inode page is locked, unlock temporarily, but its reference 557c9ef4810SJaegeuk Kim * count keeps alive. 558c9ef4810SJaegeuk Kim */ 559c9ef4810SJaegeuk Kim if (ino == dn->inode->i_ino && dn->inode_page_locked) 560c9ef4810SJaegeuk Kim unlock_page(dn->inode_page); 561c9ef4810SJaegeuk Kim 562c9ef4810SJaegeuk Kim set_new_dnode(&tdn, inode, NULL, NULL, 0); 5634d57b86dSChao Yu if (f2fs_get_dnode_of_data(&tdn, bidx, LOOKUP_NODE)) 564c9ef4810SJaegeuk Kim goto out; 565c9ef4810SJaegeuk Kim 566c9ef4810SJaegeuk Kim if (tdn.data_blkaddr == blkaddr) 5674d57b86dSChao Yu f2fs_truncate_data_blocks_range(&tdn, 1); 568c9ef4810SJaegeuk Kim 569c9ef4810SJaegeuk Kim f2fs_put_dnode(&tdn); 570c9ef4810SJaegeuk Kim out: 571c9ef4810SJaegeuk Kim if (ino != dn->inode->i_ino) 572c9ef4810SJaegeuk Kim iput(inode); 573c9ef4810SJaegeuk Kim else if (dn->inode_page_locked) 574c9ef4810SJaegeuk Kim lock_page(dn->inode_page); 575c9ef4810SJaegeuk Kim return 0; 576c9ef4810SJaegeuk Kim 577c9ef4810SJaegeuk Kim truncate_out: 578a2ced1ceSChao Yu if (f2fs_data_blkaddr(&tdn) == blkaddr) 5794d57b86dSChao Yu f2fs_truncate_data_blocks_range(&tdn, 1); 580c9ef4810SJaegeuk Kim if (dn->inode->i_ino == nid && !dn->inode_page_locked) 581c9ef4810SJaegeuk Kim unlock_page(dn->inode_page); 58239cf72cfSJaegeuk Kim return 0; 583d624c96fSJaegeuk Kim } 584d624c96fSJaegeuk Kim 5856ead1142SJaegeuk Kim static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode, 586e17d488bSSheng Yong struct page *page) 587d624c96fSJaegeuk Kim { 588d624c96fSJaegeuk Kim struct dnode_of_data dn; 589d624c96fSJaegeuk Kim struct node_info ni; 59081ca7350SChao Yu unsigned int start, end; 591f356fe0cSJaegeuk Kim int err = 0, recovered = 0; 592d624c96fSJaegeuk Kim 5931c35a90eSJaegeuk Kim /* step 1: recover xattr */ 5941c35a90eSJaegeuk Kim if (IS_INODE(page)) { 5959627a7b3SChao Yu err = f2fs_recover_inline_xattr(inode, page); 5969627a7b3SChao Yu if (err) 5979627a7b3SChao Yu goto out; 5981c35a90eSJaegeuk Kim } else if (f2fs_has_xattr_block(ofs_of_node(page))) { 5994d57b86dSChao Yu err = f2fs_recover_xattr_data(inode, page); 600d260081cSChao Yu if (!err) 601d260081cSChao Yu recovered++; 6021c35a90eSJaegeuk Kim goto out; 6031c35a90eSJaegeuk Kim } 60470cfed88SChao Yu 6051c35a90eSJaegeuk Kim /* step 2: recover inline data */ 6069627a7b3SChao Yu err = f2fs_recover_inline_data(inode, page); 6079627a7b3SChao Yu if (err) { 6089627a7b3SChao Yu if (err == 1) 6099627a7b3SChao Yu err = 0; 6101e1bb4baSJaegeuk Kim goto out; 6119627a7b3SChao Yu } 6121e1bb4baSJaegeuk Kim 6131c35a90eSJaegeuk Kim /* step 3: recover data indices */ 6144d57b86dSChao Yu start = f2fs_start_bidx_of_node(ofs_of_node(page), inode); 61581ca7350SChao Yu end = start + ADDRS_PER_PAGE(page, inode); 616d624c96fSJaegeuk Kim 617d624c96fSJaegeuk Kim set_new_dnode(&dn, inode, NULL, NULL, 0); 618e8ea9b3dSJaegeuk Kim retry_dn: 6194d57b86dSChao Yu err = f2fs_get_dnode_of_data(&dn, start, ALLOC_NODE); 620e8ea9b3dSJaegeuk Kim if (err) { 621e8ea9b3dSJaegeuk Kim if (err == -ENOMEM) { 6224034247aSNeilBrown memalloc_retry_wait(GFP_NOFS); 623e8ea9b3dSJaegeuk Kim goto retry_dn; 624e8ea9b3dSJaegeuk Kim } 6251e1bb4baSJaegeuk Kim goto out; 626e8ea9b3dSJaegeuk Kim } 627d624c96fSJaegeuk Kim 628bae0ee7aSChao Yu f2fs_wait_on_page_writeback(dn.node_page, NODE, true, true); 629d624c96fSJaegeuk Kim 630a9419b63SJaegeuk Kim err = f2fs_get_node_info(sbi, dn.nid, &ni, false); 6317735730dSChao Yu if (err) 6327735730dSChao Yu goto err; 6337735730dSChao Yu 6349850cf4aSJaegeuk Kim f2fs_bug_on(sbi, ni.ino != ino_of_node(page)); 63522d61e28SChao Yu 63622d61e28SChao Yu if (ofs_of_node(dn.node_page) != ofs_of_node(page)) { 637dcbb4c10SJoe Perches f2fs_warn(sbi, "Inconsistent ofs_of_node, ino:%lu, ofs:%u, %u", 63822d61e28SChao Yu inode->i_ino, ofs_of_node(dn.node_page), 63922d61e28SChao Yu ofs_of_node(page)); 64010f966bbSChao Yu err = -EFSCORRUPTED; 64195fa90c9SChao Yu f2fs_handle_error(sbi, ERROR_INCONSISTENT_FOOTER); 64222d61e28SChao Yu goto err; 64322d61e28SChao Yu } 644d624c96fSJaegeuk Kim 64512a8343eSChao Yu for (; start < end; start++, dn.ofs_in_node++) { 646d624c96fSJaegeuk Kim block_t src, dest; 647d624c96fSJaegeuk Kim 648a2ced1ceSChao Yu src = f2fs_data_blkaddr(&dn); 649a2ced1ceSChao Yu dest = data_blkaddr(dn.inode, page, dn.ofs_in_node); 650d624c96fSJaegeuk Kim 65193770ab7SChao Yu if (__is_valid_data_blkaddr(src) && 65293770ab7SChao Yu !f2fs_is_valid_blkaddr(sbi, src, META_POR)) { 65310f966bbSChao Yu err = -EFSCORRUPTED; 65495fa90c9SChao Yu f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR); 65593770ab7SChao Yu goto err; 65693770ab7SChao Yu } 65793770ab7SChao Yu 65893770ab7SChao Yu if (__is_valid_data_blkaddr(dest) && 65993770ab7SChao Yu !f2fs_is_valid_blkaddr(sbi, dest, META_POR)) { 66010f966bbSChao Yu err = -EFSCORRUPTED; 66195fa90c9SChao Yu f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR); 66293770ab7SChao Yu goto err; 66393770ab7SChao Yu } 66493770ab7SChao Yu 66512a8343eSChao Yu /* skip recovering if dest is the same as src */ 66612a8343eSChao Yu if (src == dest) 66712a8343eSChao Yu continue; 66812a8343eSChao Yu 66912a8343eSChao Yu /* dest is invalid, just invalidate src block */ 67012a8343eSChao Yu if (dest == NULL_ADDR) { 6714d57b86dSChao Yu f2fs_truncate_data_blocks_range(&dn, 1); 67212a8343eSChao Yu continue; 67312a8343eSChao Yu } 67412a8343eSChao Yu 67526787236SJaegeuk Kim if (!file_keep_isize(inode) && 676dba79f38SChao Yu (i_size_read(inode) <= ((loff_t)start << PAGE_SHIFT))) 677dba79f38SChao Yu f2fs_i_size_write(inode, 678dba79f38SChao Yu (loff_t)(start + 1) << PAGE_SHIFT); 67926de9b11SJaegeuk Kim 68012a8343eSChao Yu /* 68112a8343eSChao Yu * dest is reserved block, invalidate src block 68212a8343eSChao Yu * and then reserve one new block in dnode page. 68312a8343eSChao Yu */ 68412a8343eSChao Yu if (dest == NEW_ADDR) { 6854d57b86dSChao Yu f2fs_truncate_data_blocks_range(&dn, 1); 6864d57b86dSChao Yu f2fs_reserve_new_block(&dn); 68712a8343eSChao Yu continue; 68812a8343eSChao Yu } 68912a8343eSChao Yu 69012a8343eSChao Yu /* dest is valid block, try to recover from src to dest */ 691e1da7872SChao Yu if (f2fs_is_valid_blkaddr(sbi, dest, META_POR)) { 692e03b07d9SJaegeuk Kim 693d624c96fSJaegeuk Kim if (src == NULL_ADDR) { 6944d57b86dSChao Yu err = f2fs_reserve_new_block(&dn); 6957fa750a1SArnd Bergmann while (err && 6967fa750a1SArnd Bergmann IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION)) 6974d57b86dSChao Yu err = f2fs_reserve_new_block(&dn); 698d624c96fSJaegeuk Kim /* We should not get -ENOSPC */ 6999850cf4aSJaegeuk Kim f2fs_bug_on(sbi, err); 7006f3ec995SJaegeuk Kim if (err) 7016f3ec995SJaegeuk Kim goto err; 702d624c96fSJaegeuk Kim } 703e8ea9b3dSJaegeuk Kim retry_prev: 704d624c96fSJaegeuk Kim /* Check the previous node page having this index */ 70539cf72cfSJaegeuk Kim err = check_index_in_prev_nodes(sbi, dest, &dn); 706e8ea9b3dSJaegeuk Kim if (err) { 707e8ea9b3dSJaegeuk Kim if (err == -ENOMEM) { 7084034247aSNeilBrown memalloc_retry_wait(GFP_NOFS); 709e8ea9b3dSJaegeuk Kim goto retry_prev; 710e8ea9b3dSJaegeuk Kim } 71139cf72cfSJaegeuk Kim goto err; 712e8ea9b3dSJaegeuk Kim } 713d624c96fSJaegeuk Kim 7140ef4ca04SChao Yu if (f2fs_is_valid_blkaddr(sbi, dest, 7150ef4ca04SChao Yu DATA_GENERIC_ENHANCE_UPDATE)) { 7160ef4ca04SChao Yu f2fs_err(sbi, "Inconsistent dest blkaddr:%u, ino:%lu, ofs:%u", 7170ef4ca04SChao Yu dest, inode->i_ino, dn.ofs_in_node); 7180ef4ca04SChao Yu err = -EFSCORRUPTED; 71995fa90c9SChao Yu f2fs_handle_error(sbi, 72095fa90c9SChao Yu ERROR_INVALID_BLKADDR); 7210ef4ca04SChao Yu goto err; 7220ef4ca04SChao Yu } 7230ef4ca04SChao Yu 724d624c96fSJaegeuk Kim /* write dummy data page */ 725528e3459SChao Yu f2fs_replace_block(sbi, &dn, src, dest, 72628bc106bSChao Yu ni.version, false, false); 727f356fe0cSJaegeuk Kim recovered++; 728d624c96fSJaegeuk Kim } 729d624c96fSJaegeuk Kim } 730d624c96fSJaegeuk Kim 731d624c96fSJaegeuk Kim copy_node_footer(dn.node_page, page); 732d624c96fSJaegeuk Kim fill_node_footer(dn.node_page, dn.nid, ni.ino, 733d624c96fSJaegeuk Kim ofs_of_node(page), false); 734d624c96fSJaegeuk Kim set_page_dirty(dn.node_page); 73539cf72cfSJaegeuk Kim err: 736d624c96fSJaegeuk Kim f2fs_put_dnode(&dn); 7371e1bb4baSJaegeuk Kim out: 738dcbb4c10SJoe Perches f2fs_notice(sbi, "recover_data: ino = %lx (i_size: %s) recovered = %d, err = %d", 739dcbb4c10SJoe Perches inode->i_ino, file_keep_isize(inode) ? "keep" : "recover", 74026787236SJaegeuk Kim recovered, err); 74139cf72cfSJaegeuk Kim return err; 742d624c96fSJaegeuk Kim } 743d624c96fSJaegeuk Kim 744f61cce5bSChao Yu static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list, 74526b5a079SSheng Yong struct list_head *tmp_inode_list, struct list_head *dir_list) 746d624c96fSJaegeuk Kim { 747d624c96fSJaegeuk Kim struct curseg_info *curseg; 7484c521f49SJaegeuk Kim struct page *page = NULL; 7496ead1142SJaegeuk Kim int err = 0; 750d624c96fSJaegeuk Kim block_t blkaddr; 751430f163bSChao Yu unsigned int ra_blocks = RECOVERY_MAX_RA_BLOCKS; 752d624c96fSJaegeuk Kim 753d624c96fSJaegeuk Kim /* get node pages in the current segment */ 754b7973f23SChao Yu curseg = CURSEG_I(sbi, CURSEG_WARM_NODE); 755d624c96fSJaegeuk Kim blkaddr = NEXT_FREE_BLKADDR(sbi, curseg); 756d624c96fSJaegeuk Kim 757d624c96fSJaegeuk Kim while (1) { 758d624c96fSJaegeuk Kim struct fsync_inode_entry *entry; 759d624c96fSJaegeuk Kim 760e1da7872SChao Yu if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR)) 76145856affSJaegeuk Kim break; 762d624c96fSJaegeuk Kim 7634d57b86dSChao Yu page = f2fs_get_tmp_page(sbi, blkaddr); 7647735730dSChao Yu if (IS_ERR(page)) { 7657735730dSChao Yu err = PTR_ERR(page); 7667735730dSChao Yu break; 7677735730dSChao Yu } 7684c521f49SJaegeuk Kim 769a468f0efSJaegeuk Kim if (!is_recoverable_dnode(page)) { 7704c521f49SJaegeuk Kim f2fs_put_page(page, 1); 7714c521f49SJaegeuk Kim break; 7724c521f49SJaegeuk Kim } 7734c521f49SJaegeuk Kim 774f61cce5bSChao Yu entry = get_fsync_inode(inode_list, ino_of_node(page)); 775d624c96fSJaegeuk Kim if (!entry) 776d624c96fSJaegeuk Kim goto next; 777441ac5cbSJaegeuk Kim /* 778441ac5cbSJaegeuk Kim * inode(x) | CP | inode(x) | dnode(F) 779441ac5cbSJaegeuk Kim * In this case, we can lose the latest inode(x). 780c52e1b10SJaegeuk Kim * So, call recover_inode for the inode update. 781441ac5cbSJaegeuk Kim */ 782af033b2aSChao Yu if (IS_INODE(page)) { 783af033b2aSChao Yu err = recover_inode(entry->inode, page); 78498838579SChao Yu if (err) { 78598838579SChao Yu f2fs_put_page(page, 1); 786af033b2aSChao Yu break; 787af033b2aSChao Yu } 78898838579SChao Yu } 789c52e1b10SJaegeuk Kim if (entry->last_dentry == blkaddr) { 790f61cce5bSChao Yu err = recover_dentry(entry->inode, page, dir_list); 791c52e1b10SJaegeuk Kim if (err) { 792c52e1b10SJaegeuk Kim f2fs_put_page(page, 1); 793c52e1b10SJaegeuk Kim break; 794c52e1b10SJaegeuk Kim } 795c52e1b10SJaegeuk Kim } 796e17d488bSSheng Yong err = do_recover_data(sbi, entry->inode, page); 7974c521f49SJaegeuk Kim if (err) { 7984c521f49SJaegeuk Kim f2fs_put_page(page, 1); 79945856affSJaegeuk Kim break; 8004c521f49SJaegeuk Kim } 801d624c96fSJaegeuk Kim 8023f8ab270SChao Yu if (entry->blkaddr == blkaddr) 80326b5a079SSheng Yong list_move_tail(&entry->list, tmp_inode_list); 804d624c96fSJaegeuk Kim next: 805430f163bSChao Yu ra_blocks = adjust_por_ra_blocks(sbi, ra_blocks, blkaddr, 806430f163bSChao Yu next_blkaddr_of_node(page)); 807430f163bSChao Yu 808d624c96fSJaegeuk Kim /* check next segment */ 809d624c96fSJaegeuk Kim blkaddr = next_blkaddr_of_node(page); 8104c521f49SJaegeuk Kim f2fs_put_page(page, 1); 811430f163bSChao Yu 812430f163bSChao Yu f2fs_ra_meta_pages_cond(sbi, blkaddr, ra_blocks); 813d624c96fSJaegeuk Kim } 8146ead1142SJaegeuk Kim if (!err) 815901d745fSChao Yu f2fs_allocate_new_segments(sbi); 8166ead1142SJaegeuk Kim return err; 817d624c96fSJaegeuk Kim } 818d624c96fSJaegeuk Kim 8194d57b86dSChao Yu int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only) 820d624c96fSJaegeuk Kim { 82126b5a079SSheng Yong struct list_head inode_list, tmp_inode_list; 822f61cce5bSChao Yu struct list_head dir_list; 8236ead1142SJaegeuk Kim int err; 8246781eabbSJaegeuk Kim int ret = 0; 8254b2414d0SChao Yu unsigned long s_flags = sbi->sb->s_flags; 826aabe5136SHaicheng Li bool need_writecp = false; 827c426d991SShin'ichiro Kawasaki bool fix_curseg_write_pointer = false; 828ea676733SJaegeuk Kim #ifdef CONFIG_QUOTA 829ea676733SJaegeuk Kim int quota_enabled; 830ea676733SJaegeuk Kim #endif 831d624c96fSJaegeuk Kim 8321751e8a6SLinus Torvalds if (s_flags & SB_RDONLY) { 833dcbb4c10SJoe Perches f2fs_info(sbi, "recover fsync data on readonly fs"); 8341751e8a6SLinus Torvalds sbi->sb->s_flags &= ~SB_RDONLY; 8354b2414d0SChao Yu } 8364b2414d0SChao Yu 8374b2414d0SChao Yu #ifdef CONFIG_QUOTA 8384b2414d0SChao Yu /* Turn on quotas so that they are updated correctly */ 8391751e8a6SLinus Torvalds quota_enabled = f2fs_enable_quota_files(sbi, s_flags & SB_RDONLY); 8404b2414d0SChao Yu #endif 8414b2414d0SChao Yu 842d624c96fSJaegeuk Kim INIT_LIST_HEAD(&inode_list); 84326b5a079SSheng Yong INIT_LIST_HEAD(&tmp_inode_list); 844f61cce5bSChao Yu INIT_LIST_HEAD(&dir_list); 845d624c96fSJaegeuk Kim 84614f4e690SJaegeuk Kim /* prevent checkpoint */ 847e4544b63STim Murray f2fs_down_write(&sbi->cp_global_sem); 84814f4e690SJaegeuk Kim 849315df839SJaegeuk Kim /* step #1: find fsynced inode numbers */ 850d40d30c5SJaegeuk Kim err = find_fsync_dnodes(sbi, &inode_list, check_only); 8516781eabbSJaegeuk Kim if (err || list_empty(&inode_list)) 8524b2414d0SChao Yu goto skip; 853d624c96fSJaegeuk Kim 8546781eabbSJaegeuk Kim if (check_only) { 8556781eabbSJaegeuk Kim ret = 1; 8564b2414d0SChao Yu goto skip; 8576781eabbSJaegeuk Kim } 858d624c96fSJaegeuk Kim 859aabe5136SHaicheng Li need_writecp = true; 860691c6fd2SChao Yu 861d624c96fSJaegeuk Kim /* step #2: recover data */ 86226b5a079SSheng Yong err = recover_data(sbi, &inode_list, &tmp_inode_list, &dir_list); 863b307384eSJaegeuk Kim if (!err) 8649850cf4aSJaegeuk Kim f2fs_bug_on(sbi, !list_empty(&inode_list)); 865c02599f2SChao Yu else 866c02599f2SChao Yu f2fs_bug_on(sbi, sbi->sb->s_flags & SB_ACTIVE); 8674b2414d0SChao Yu skip: 868c426d991SShin'ichiro Kawasaki fix_curseg_write_pointer = !check_only || list_empty(&inode_list); 869c426d991SShin'ichiro Kawasaki 87026b5a079SSheng Yong destroy_fsync_dnodes(&inode_list, err); 87126b5a079SSheng Yong destroy_fsync_dnodes(&tmp_inode_list, err); 872cf2271e7SJaegeuk Kim 8734c521f49SJaegeuk Kim /* truncate meta pages to be used by the recovery */ 8744c521f49SJaegeuk Kim truncate_inode_pages_range(META_MAPPING(sbi), 87509cbfeafSKirill A. Shutemov (loff_t)MAIN_BLKADDR(sbi) << PAGE_SHIFT, -1); 8764c521f49SJaegeuk Kim 877cf2271e7SJaegeuk Kim if (err) { 878cf2271e7SJaegeuk Kim truncate_inode_pages_final(NODE_MAPPING(sbi)); 879cf2271e7SJaegeuk Kim truncate_inode_pages_final(META_MAPPING(sbi)); 88026b5a079SSheng Yong } 881c426d991SShin'ichiro Kawasaki 882c426d991SShin'ichiro Kawasaki /* 883c426d991SShin'ichiro Kawasaki * If fsync data succeeds or there is no fsync data to recover, 884c426d991SShin'ichiro Kawasaki * and the f2fs is not read only, check and fix zoned block devices' 885c426d991SShin'ichiro Kawasaki * write pointer consistency. 886c426d991SShin'ichiro Kawasaki */ 887c426d991SShin'ichiro Kawasaki if (!err && fix_curseg_write_pointer && !f2fs_readonly(sbi->sb) && 888c426d991SShin'ichiro Kawasaki f2fs_sb_has_blkzoned(sbi)) { 889c426d991SShin'ichiro Kawasaki err = f2fs_fix_curseg_write_pointer(sbi); 890c426d991SShin'ichiro Kawasaki ret = err; 891c426d991SShin'ichiro Kawasaki } 892c426d991SShin'ichiro Kawasaki 893c426d991SShin'ichiro Kawasaki if (!err) 894c426d991SShin'ichiro Kawasaki clear_sbi_flag(sbi, SBI_POR_DOING); 895c426d991SShin'ichiro Kawasaki 896e4544b63STim Murray f2fs_up_write(&sbi->cp_global_sem); 897a468f0efSJaegeuk Kim 8989e1e6df4SJaegeuk Kim /* let's drop all the directory inodes for clean checkpoint */ 89926b5a079SSheng Yong destroy_fsync_dnodes(&dir_list, err); 9009e1e6df4SJaegeuk Kim 9011378752bSChao Yu if (need_writecp) { 9021378752bSChao Yu set_sbi_flag(sbi, SBI_IS_RECOVERED); 9031378752bSChao Yu 9041378752bSChao Yu if (!err) { 90575ab4cb8SJaegeuk Kim struct cp_control cpc = { 90610027551SJaegeuk Kim .reason = CP_RECOVERY, 90775ab4cb8SJaegeuk Kim }; 9084d57b86dSChao Yu err = f2fs_write_checkpoint(sbi, &cpc); 909cf2271e7SJaegeuk Kim } 9101378752bSChao Yu } 911f61cce5bSChao Yu 9124b2414d0SChao Yu #ifdef CONFIG_QUOTA 9134b2414d0SChao Yu /* Turn quotas off */ 914ea676733SJaegeuk Kim if (quota_enabled) 9154b2414d0SChao Yu f2fs_quota_off_umount(sbi->sb); 9164b2414d0SChao Yu #endif 9171751e8a6SLinus Torvalds sbi->sb->s_flags = s_flags; /* Restore SB_RDONLY status */ 9184b2414d0SChao Yu 9196781eabbSJaegeuk Kim return ret ? ret : err; 920d624c96fSJaegeuk Kim } 921cad83c96SChao Yu 922cad83c96SChao Yu int __init f2fs_create_recovery_cache(void) 923cad83c96SChao Yu { 924cad83c96SChao Yu fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry", 925cad83c96SChao Yu sizeof(struct fsync_inode_entry)); 926*870af777SYangtao Li return fsync_entry_slab ? 0 : -ENOMEM; 927cad83c96SChao Yu } 928cad83c96SChao Yu 929cad83c96SChao Yu void f2fs_destroy_recovery_cache(void) 930cad83c96SChao Yu { 931cad83c96SChao Yu kmem_cache_destroy(fsync_entry_slab); 932cad83c96SChao Yu } 933