17c1a000dSChao Yu // SPDX-License-Identifier: GPL-2.0
20a8165d7SJaegeuk Kim /*
3e05df3b1SJaegeuk Kim * fs/f2fs/node.c
4e05df3b1SJaegeuk Kim *
5e05df3b1SJaegeuk Kim * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6e05df3b1SJaegeuk Kim * http://www.samsung.com/
7e05df3b1SJaegeuk Kim */
8e05df3b1SJaegeuk Kim #include <linux/fs.h>
9e05df3b1SJaegeuk Kim #include <linux/f2fs_fs.h>
10e05df3b1SJaegeuk Kim #include <linux/mpage.h>
114034247aSNeilBrown #include <linux/sched/mm.h>
12e05df3b1SJaegeuk Kim #include <linux/blkdev.h>
13e05df3b1SJaegeuk Kim #include <linux/pagevec.h>
14e05df3b1SJaegeuk Kim #include <linux/swap.h>
15e05df3b1SJaegeuk Kim
16e05df3b1SJaegeuk Kim #include "f2fs.h"
17e05df3b1SJaegeuk Kim #include "node.h"
18e05df3b1SJaegeuk Kim #include "segment.h"
1987905682SYunlei He #include "xattr.h"
2052118743SDaeho Jeong #include "iostat.h"
2151dd6249SNamjae Jeon #include <trace/events/f2fs.h>
22e05df3b1SJaegeuk Kim
234d57b86dSChao Yu #define on_f2fs_build_free_nids(nmi) mutex_is_locked(&(nm_i)->build_lock)
24f978f5a0SGu Zheng
25e05df3b1SJaegeuk Kim static struct kmem_cache *nat_entry_slab;
26e05df3b1SJaegeuk Kim static struct kmem_cache *free_nid_slab;
27aec71382SChao Yu static struct kmem_cache *nat_entry_set_slab;
2850fa53ecSChao Yu static struct kmem_cache *fsync_node_entry_slab;
29e05df3b1SJaegeuk Kim
30a4f843bdSJaegeuk Kim /*
31a4f843bdSJaegeuk Kim * Check whether the given nid is within node id range.
32a4f843bdSJaegeuk Kim */
f2fs_check_nid_range(struct f2fs_sb_info * sbi,nid_t nid)334d57b86dSChao Yu int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid)
34a4f843bdSJaegeuk Kim {
35a4f843bdSJaegeuk Kim if (unlikely(nid < F2FS_ROOT_INO(sbi) || nid >= NM_I(sbi)->max_nid)) {
36a4f843bdSJaegeuk Kim set_sbi_flag(sbi, SBI_NEED_FSCK);
37dcbb4c10SJoe Perches f2fs_warn(sbi, "%s: out-of-range nid=%x, run fsck to fix.",
38a4f843bdSJaegeuk Kim __func__, nid);
3995fa90c9SChao Yu f2fs_handle_error(sbi, ERROR_CORRUPTED_INODE);
4010f966bbSChao Yu return -EFSCORRUPTED;
41a4f843bdSJaegeuk Kim }
42a4f843bdSJaegeuk Kim return 0;
43a4f843bdSJaegeuk Kim }
44a4f843bdSJaegeuk Kim
f2fs_available_free_memory(struct f2fs_sb_info * sbi,int type)454d57b86dSChao Yu bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type)
46cdfc41c1SJaegeuk Kim {
476fb03f3aSJaegeuk Kim struct f2fs_nm_info *nm_i = NM_I(sbi);
48d6d2b491SSahitya Tummala struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
49cdfc41c1SJaegeuk Kim struct sysinfo val;
50e5e7ea3cSJaegeuk Kim unsigned long avail_ram;
51cdfc41c1SJaegeuk Kim unsigned long mem_size = 0;
526fb03f3aSJaegeuk Kim bool res = false;
53cdfc41c1SJaegeuk Kim
54d6d2b491SSahitya Tummala if (!nm_i)
55d6d2b491SSahitya Tummala return true;
56d6d2b491SSahitya Tummala
57cdfc41c1SJaegeuk Kim si_meminfo(&val);
58e5e7ea3cSJaegeuk Kim
59e5e7ea3cSJaegeuk Kim /* only uses low memory */
60e5e7ea3cSJaegeuk Kim avail_ram = val.totalram - val.totalhigh;
61e5e7ea3cSJaegeuk Kim
62429511cdSChao Yu /*
6371644dffSJaegeuk Kim * give 25%, 25%, 50%, 50%, 25%, 25% memory for each components respectively
64429511cdSChao Yu */
656fb03f3aSJaegeuk Kim if (type == FREE_NIDS) {
669a4ffdf5SChao Yu mem_size = (nm_i->nid_cnt[FREE_NID] *
67b8559dc2SChao Yu sizeof(struct free_nid)) >> PAGE_SHIFT;
68e5e7ea3cSJaegeuk Kim res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
696fb03f3aSJaegeuk Kim } else if (type == NAT_ENTRIES) {
70a95ba66aSJaegeuk Kim mem_size = (nm_i->nat_cnt[TOTAL_NAT] *
71a95ba66aSJaegeuk Kim sizeof(struct nat_entry)) >> PAGE_SHIFT;
72e5e7ea3cSJaegeuk Kim res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
73e589c2c4SJaegeuk Kim if (excess_cached_nats(sbi))
74e589c2c4SJaegeuk Kim res = false;
75a1257023SJaegeuk Kim } else if (type == DIRTY_DENTS) {
76a1257023SJaegeuk Kim if (sbi->sb->s_bdi->wb.dirty_exceeded)
77a1257023SJaegeuk Kim return false;
78a1257023SJaegeuk Kim mem_size = get_pages(sbi, F2FS_DIRTY_DENTS);
79a1257023SJaegeuk Kim res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
80e5e7ea3cSJaegeuk Kim } else if (type == INO_ENTRIES) {
81e5e7ea3cSJaegeuk Kim int i;
82e5e7ea3cSJaegeuk Kim
8339d787beSChao Yu for (i = 0; i < MAX_INO_ENTRY; i++)
848f73cbb7SKinglong Mee mem_size += sbi->im[i].ino_num *
858f73cbb7SKinglong Mee sizeof(struct ino_entry);
868f73cbb7SKinglong Mee mem_size >>= PAGE_SHIFT;
87e5e7ea3cSJaegeuk Kim res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
8871644dffSJaegeuk Kim } else if (type == READ_EXTENT_CACHE || type == AGE_EXTENT_CACHE) {
8971644dffSJaegeuk Kim enum extent_type etype = type == READ_EXTENT_CACHE ?
9071644dffSJaegeuk Kim EX_READ : EX_BLOCK_AGE;
9171644dffSJaegeuk Kim struct extent_tree_info *eti = &sbi->extent_tree[etype];
92e7547dacSJaegeuk Kim
93e7547dacSJaegeuk Kim mem_size = (atomic_read(&eti->total_ext_tree) *
947441ccefSJaegeuk Kim sizeof(struct extent_tree) +
95e7547dacSJaegeuk Kim atomic_read(&eti->total_ext_node) *
9609cbfeafSKirill A. Shutemov sizeof(struct extent_node)) >> PAGE_SHIFT;
9771644dffSJaegeuk Kim res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
98d6d2b491SSahitya Tummala } else if (type == DISCARD_CACHE) {
99d6d2b491SSahitya Tummala mem_size = (atomic_read(&dcc->discard_cmd_cnt) *
100d6d2b491SSahitya Tummala sizeof(struct discard_cmd)) >> PAGE_SHIFT;
101d6d2b491SSahitya Tummala res = mem_size < (avail_ram * nm_i->ram_thresh / 100);
1026ce19affSChao Yu } else if (type == COMPRESS_PAGE) {
1036ce19affSChao Yu #ifdef CONFIG_F2FS_FS_COMPRESSION
1046ce19affSChao Yu unsigned long free_ram = val.freeram;
1056ce19affSChao Yu
1066ce19affSChao Yu /*
1076ce19affSChao Yu * free memory is lower than watermark or cached page count
1086ce19affSChao Yu * exceed threshold, deny caching compress page.
1096ce19affSChao Yu */
1106ce19affSChao Yu res = (free_ram > avail_ram * sbi->compress_watermark / 100) &&
1116ce19affSChao Yu (COMPRESS_MAPPING(sbi)->nrpages <
1126ce19affSChao Yu free_ram * sbi->compress_percent / 100);
1136ce19affSChao Yu #else
1146ce19affSChao Yu res = false;
1156ce19affSChao Yu #endif
1161e84371fSJaegeuk Kim } else {
1171663cae4SJaegeuk Kim if (!sbi->sb->s_bdi->wb.dirty_exceeded)
1181663cae4SJaegeuk Kim return true;
1196fb03f3aSJaegeuk Kim }
1206fb03f3aSJaegeuk Kim return res;
121cdfc41c1SJaegeuk Kim }
122cdfc41c1SJaegeuk Kim
clear_node_page_dirty(struct page * page)123e05df3b1SJaegeuk Kim static void clear_node_page_dirty(struct page *page)
124e05df3b1SJaegeuk Kim {
125e05df3b1SJaegeuk Kim if (PageDirty(page)) {
1265ec2d99dSMatthew Wilcox f2fs_clear_page_cache_dirty_tag(page);
127e05df3b1SJaegeuk Kim clear_page_dirty_for_io(page);
128aec2f729SChao Yu dec_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES);
129e05df3b1SJaegeuk Kim }
130e05df3b1SJaegeuk Kim ClearPageUptodate(page);
131e05df3b1SJaegeuk Kim }
132e05df3b1SJaegeuk Kim
get_current_nat_page(struct f2fs_sb_info * sbi,nid_t nid)133e05df3b1SJaegeuk Kim static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
134e05df3b1SJaegeuk Kim {
1353acc4522SJaegeuk Kim return f2fs_get_meta_page_retry(sbi, current_nat_addr(sbi, nid));
136e05df3b1SJaegeuk Kim }
137e05df3b1SJaegeuk Kim
get_next_nat_page(struct f2fs_sb_info * sbi,nid_t nid)138e05df3b1SJaegeuk Kim static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
139e05df3b1SJaegeuk Kim {
140e05df3b1SJaegeuk Kim struct page *src_page;
141e05df3b1SJaegeuk Kim struct page *dst_page;
142e05df3b1SJaegeuk Kim pgoff_t dst_off;
143e05df3b1SJaegeuk Kim void *src_addr;
144e05df3b1SJaegeuk Kim void *dst_addr;
145e05df3b1SJaegeuk Kim struct f2fs_nm_info *nm_i = NM_I(sbi);
146e05df3b1SJaegeuk Kim
14780551d17SChao Yu dst_off = next_nat_addr(sbi, current_nat_addr(sbi, nid));
148e05df3b1SJaegeuk Kim
149e05df3b1SJaegeuk Kim /* get current nat block page with lock */
15080551d17SChao Yu src_page = get_current_nat_page(sbi, nid);
151edc55aafSJaegeuk Kim if (IS_ERR(src_page))
152edc55aafSJaegeuk Kim return src_page;
1534d57b86dSChao Yu dst_page = f2fs_grab_meta_page(sbi, dst_off);
1549850cf4aSJaegeuk Kim f2fs_bug_on(sbi, PageDirty(src_page));
155e05df3b1SJaegeuk Kim
156e05df3b1SJaegeuk Kim src_addr = page_address(src_page);
157e05df3b1SJaegeuk Kim dst_addr = page_address(dst_page);
15809cbfeafSKirill A. Shutemov memcpy(dst_addr, src_addr, PAGE_SIZE);
159e05df3b1SJaegeuk Kim set_page_dirty(dst_page);
160e05df3b1SJaegeuk Kim f2fs_put_page(src_page, 1);
161e05df3b1SJaegeuk Kim
162e05df3b1SJaegeuk Kim set_to_next_nat(nm_i, nid);
163e05df3b1SJaegeuk Kim
164e05df3b1SJaegeuk Kim return dst_page;
165e05df3b1SJaegeuk Kim }
166e05df3b1SJaegeuk Kim
__alloc_nat_entry(struct f2fs_sb_info * sbi,nid_t nid,bool no_fail)16732410577SChao Yu static struct nat_entry *__alloc_nat_entry(struct f2fs_sb_info *sbi,
16832410577SChao Yu nid_t nid, bool no_fail)
16912f9ef37SYunlei He {
17012f9ef37SYunlei He struct nat_entry *new;
17112f9ef37SYunlei He
17232410577SChao Yu new = f2fs_kmem_cache_alloc(nat_entry_slab,
17332410577SChao Yu GFP_F2FS_ZERO, no_fail, sbi);
17412f9ef37SYunlei He if (new) {
17512f9ef37SYunlei He nat_set_nid(new, nid);
17612f9ef37SYunlei He nat_reset_flag(new);
17712f9ef37SYunlei He }
17812f9ef37SYunlei He return new;
17912f9ef37SYunlei He }
18012f9ef37SYunlei He
__free_nat_entry(struct nat_entry * e)18112f9ef37SYunlei He static void __free_nat_entry(struct nat_entry *e)
18212f9ef37SYunlei He {
18312f9ef37SYunlei He kmem_cache_free(nat_entry_slab, e);
18412f9ef37SYunlei He }
18512f9ef37SYunlei He
18612f9ef37SYunlei He /* must be locked by nat_tree_lock */
__init_nat_entry(struct f2fs_nm_info * nm_i,struct nat_entry * ne,struct f2fs_nat_entry * raw_ne,bool no_fail)18712f9ef37SYunlei He static struct nat_entry *__init_nat_entry(struct f2fs_nm_info *nm_i,
18812f9ef37SYunlei He struct nat_entry *ne, struct f2fs_nat_entry *raw_ne, bool no_fail)
18912f9ef37SYunlei He {
19012f9ef37SYunlei He if (no_fail)
19112f9ef37SYunlei He f2fs_radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne);
19212f9ef37SYunlei He else if (radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne))
19312f9ef37SYunlei He return NULL;
19412f9ef37SYunlei He
19512f9ef37SYunlei He if (raw_ne)
19612f9ef37SYunlei He node_info_from_raw_nat(&ne->ni, raw_ne);
19722969158SChao Yu
19822969158SChao Yu spin_lock(&nm_i->nat_list_lock);
19912f9ef37SYunlei He list_add_tail(&ne->list, &nm_i->nat_entries);
20022969158SChao Yu spin_unlock(&nm_i->nat_list_lock);
20122969158SChao Yu
202a95ba66aSJaegeuk Kim nm_i->nat_cnt[TOTAL_NAT]++;
203a95ba66aSJaegeuk Kim nm_i->nat_cnt[RECLAIMABLE_NAT]++;
20412f9ef37SYunlei He return ne;
20512f9ef37SYunlei He }
20612f9ef37SYunlei He
__lookup_nat_cache(struct f2fs_nm_info * nm_i,nid_t n)207e05df3b1SJaegeuk Kim static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n)
208e05df3b1SJaegeuk Kim {
20922969158SChao Yu struct nat_entry *ne;
21022969158SChao Yu
21122969158SChao Yu ne = radix_tree_lookup(&nm_i->nat_root, n);
21222969158SChao Yu
21322969158SChao Yu /* for recent accessed nat entry, move it to tail of lru list */
21422969158SChao Yu if (ne && !get_nat_flag(ne, IS_DIRTY)) {
21522969158SChao Yu spin_lock(&nm_i->nat_list_lock);
21622969158SChao Yu if (!list_empty(&ne->list))
21722969158SChao Yu list_move_tail(&ne->list, &nm_i->nat_entries);
21822969158SChao Yu spin_unlock(&nm_i->nat_list_lock);
21922969158SChao Yu }
22022969158SChao Yu
22122969158SChao Yu return ne;
222e05df3b1SJaegeuk Kim }
223e05df3b1SJaegeuk Kim
__gang_lookup_nat_cache(struct f2fs_nm_info * nm_i,nid_t start,unsigned int nr,struct nat_entry ** ep)224e05df3b1SJaegeuk Kim static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i,
225e05df3b1SJaegeuk Kim nid_t start, unsigned int nr, struct nat_entry **ep)
226e05df3b1SJaegeuk Kim {
227e05df3b1SJaegeuk Kim return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr);
228e05df3b1SJaegeuk Kim }
229e05df3b1SJaegeuk Kim
__del_from_nat_cache(struct f2fs_nm_info * nm_i,struct nat_entry * e)230e05df3b1SJaegeuk Kim static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e)
231e05df3b1SJaegeuk Kim {
232e05df3b1SJaegeuk Kim radix_tree_delete(&nm_i->nat_root, nat_get_nid(e));
233a95ba66aSJaegeuk Kim nm_i->nat_cnt[TOTAL_NAT]--;
234a95ba66aSJaegeuk Kim nm_i->nat_cnt[RECLAIMABLE_NAT]--;
23512f9ef37SYunlei He __free_nat_entry(e);
236e05df3b1SJaegeuk Kim }
237e05df3b1SJaegeuk Kim
__grab_nat_entry_set(struct f2fs_nm_info * nm_i,struct nat_entry * ne)238780de47cSChao Yu static struct nat_entry_set *__grab_nat_entry_set(struct f2fs_nm_info *nm_i,
239309cc2b6SJaegeuk Kim struct nat_entry *ne)
240309cc2b6SJaegeuk Kim {
241309cc2b6SJaegeuk Kim nid_t set = NAT_BLOCK_OFFSET(ne->ni.nid);
242309cc2b6SJaegeuk Kim struct nat_entry_set *head;
243309cc2b6SJaegeuk Kim
244309cc2b6SJaegeuk Kim head = radix_tree_lookup(&nm_i->nat_set_root, set);
245309cc2b6SJaegeuk Kim if (!head) {
24632410577SChao Yu head = f2fs_kmem_cache_alloc(nat_entry_set_slab,
24732410577SChao Yu GFP_NOFS, true, NULL);
248309cc2b6SJaegeuk Kim
249309cc2b6SJaegeuk Kim INIT_LIST_HEAD(&head->entry_list);
250309cc2b6SJaegeuk Kim INIT_LIST_HEAD(&head->set_list);
251309cc2b6SJaegeuk Kim head->set = set;
252309cc2b6SJaegeuk Kim head->entry_cnt = 0;
2539be32d72SJaegeuk Kim f2fs_radix_tree_insert(&nm_i->nat_set_root, set, head);
254309cc2b6SJaegeuk Kim }
255780de47cSChao Yu return head;
256780de47cSChao Yu }
257780de47cSChao Yu
__set_nat_cache_dirty(struct f2fs_nm_info * nm_i,struct nat_entry * ne)258780de47cSChao Yu static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i,
259780de47cSChao Yu struct nat_entry *ne)
260780de47cSChao Yu {
261780de47cSChao Yu struct nat_entry_set *head;
262780de47cSChao Yu bool new_ne = nat_get_blkaddr(ne) == NEW_ADDR;
263780de47cSChao Yu
264780de47cSChao Yu if (!new_ne)
265780de47cSChao Yu head = __grab_nat_entry_set(nm_i, ne);
266780de47cSChao Yu
267780de47cSChao Yu /*
268780de47cSChao Yu * update entry_cnt in below condition:
269780de47cSChao Yu * 1. update NEW_ADDR to valid block address;
270780de47cSChao Yu * 2. update old block address to new one;
271780de47cSChao Yu */
272780de47cSChao Yu if (!new_ne && (get_nat_flag(ne, IS_PREALLOC) ||
273780de47cSChao Yu !get_nat_flag(ne, IS_DIRTY)))
274780de47cSChao Yu head->entry_cnt++;
275780de47cSChao Yu
276780de47cSChao Yu set_nat_flag(ne, IS_PREALLOC, new_ne);
277febeca6dSChao Yu
278febeca6dSChao Yu if (get_nat_flag(ne, IS_DIRTY))
279febeca6dSChao Yu goto refresh_list;
280febeca6dSChao Yu
281a95ba66aSJaegeuk Kim nm_i->nat_cnt[DIRTY_NAT]++;
282a95ba66aSJaegeuk Kim nm_i->nat_cnt[RECLAIMABLE_NAT]--;
283309cc2b6SJaegeuk Kim set_nat_flag(ne, IS_DIRTY, true);
284febeca6dSChao Yu refresh_list:
28522969158SChao Yu spin_lock(&nm_i->nat_list_lock);
286780de47cSChao Yu if (new_ne)
287febeca6dSChao Yu list_del_init(&ne->list);
288febeca6dSChao Yu else
289febeca6dSChao Yu list_move_tail(&ne->list, &head->entry_list);
29022969158SChao Yu spin_unlock(&nm_i->nat_list_lock);
291309cc2b6SJaegeuk Kim }
292309cc2b6SJaegeuk Kim
__clear_nat_cache_dirty(struct f2fs_nm_info * nm_i,struct nat_entry_set * set,struct nat_entry * ne)293309cc2b6SJaegeuk Kim static void __clear_nat_cache_dirty(struct f2fs_nm_info *nm_i,
2940b28b71eSKinglong Mee struct nat_entry_set *set, struct nat_entry *ne)
295309cc2b6SJaegeuk Kim {
29622969158SChao Yu spin_lock(&nm_i->nat_list_lock);
297309cc2b6SJaegeuk Kim list_move_tail(&ne->list, &nm_i->nat_entries);
29822969158SChao Yu spin_unlock(&nm_i->nat_list_lock);
29922969158SChao Yu
300309cc2b6SJaegeuk Kim set_nat_flag(ne, IS_DIRTY, false);
3010b28b71eSKinglong Mee set->entry_cnt--;
302a95ba66aSJaegeuk Kim nm_i->nat_cnt[DIRTY_NAT]--;
303a95ba66aSJaegeuk Kim nm_i->nat_cnt[RECLAIMABLE_NAT]++;
304309cc2b6SJaegeuk Kim }
305309cc2b6SJaegeuk Kim
__gang_lookup_nat_set(struct f2fs_nm_info * nm_i,nid_t start,unsigned int nr,struct nat_entry_set ** ep)306309cc2b6SJaegeuk Kim static unsigned int __gang_lookup_nat_set(struct f2fs_nm_info *nm_i,
307309cc2b6SJaegeuk Kim nid_t start, unsigned int nr, struct nat_entry_set **ep)
308309cc2b6SJaegeuk Kim {
309309cc2b6SJaegeuk Kim return radix_tree_gang_lookup(&nm_i->nat_set_root, (void **)ep,
310309cc2b6SJaegeuk Kim start, nr);
311309cc2b6SJaegeuk Kim }
312309cc2b6SJaegeuk Kim
f2fs_in_warm_node_list(struct f2fs_sb_info * sbi,struct page * page)31350fa53ecSChao Yu bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page)
31450fa53ecSChao Yu {
31550fa53ecSChao Yu return NODE_MAPPING(sbi) == page->mapping &&
31650fa53ecSChao Yu IS_DNODE(page) && is_cold_node(page);
31750fa53ecSChao Yu }
31850fa53ecSChao Yu
f2fs_init_fsync_node_info(struct f2fs_sb_info * sbi)31950fa53ecSChao Yu void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi)
32050fa53ecSChao Yu {
32150fa53ecSChao Yu spin_lock_init(&sbi->fsync_node_lock);
32250fa53ecSChao Yu INIT_LIST_HEAD(&sbi->fsync_node_list);
32350fa53ecSChao Yu sbi->fsync_seg_id = 0;
32450fa53ecSChao Yu sbi->fsync_node_num = 0;
32550fa53ecSChao Yu }
32650fa53ecSChao Yu
f2fs_add_fsync_node_entry(struct f2fs_sb_info * sbi,struct page * page)32750fa53ecSChao Yu static unsigned int f2fs_add_fsync_node_entry(struct f2fs_sb_info *sbi,
32850fa53ecSChao Yu struct page *page)
32950fa53ecSChao Yu {
33050fa53ecSChao Yu struct fsync_node_entry *fn;
33150fa53ecSChao Yu unsigned long flags;
33250fa53ecSChao Yu unsigned int seq_id;
33350fa53ecSChao Yu
33432410577SChao Yu fn = f2fs_kmem_cache_alloc(fsync_node_entry_slab,
33532410577SChao Yu GFP_NOFS, true, NULL);
33650fa53ecSChao Yu
33750fa53ecSChao Yu get_page(page);
33850fa53ecSChao Yu fn->page = page;
33950fa53ecSChao Yu INIT_LIST_HEAD(&fn->list);
34050fa53ecSChao Yu
34150fa53ecSChao Yu spin_lock_irqsave(&sbi->fsync_node_lock, flags);
34250fa53ecSChao Yu list_add_tail(&fn->list, &sbi->fsync_node_list);
34350fa53ecSChao Yu fn->seq_id = sbi->fsync_seg_id++;
34450fa53ecSChao Yu seq_id = fn->seq_id;
34550fa53ecSChao Yu sbi->fsync_node_num++;
34650fa53ecSChao Yu spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
34750fa53ecSChao Yu
34850fa53ecSChao Yu return seq_id;
34950fa53ecSChao Yu }
35050fa53ecSChao Yu
f2fs_del_fsync_node_entry(struct f2fs_sb_info * sbi,struct page * page)35150fa53ecSChao Yu void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page)
35250fa53ecSChao Yu {
35350fa53ecSChao Yu struct fsync_node_entry *fn;
35450fa53ecSChao Yu unsigned long flags;
35550fa53ecSChao Yu
35650fa53ecSChao Yu spin_lock_irqsave(&sbi->fsync_node_lock, flags);
35750fa53ecSChao Yu list_for_each_entry(fn, &sbi->fsync_node_list, list) {
35850fa53ecSChao Yu if (fn->page == page) {
35950fa53ecSChao Yu list_del(&fn->list);
36050fa53ecSChao Yu sbi->fsync_node_num--;
36150fa53ecSChao Yu spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
36250fa53ecSChao Yu kmem_cache_free(fsync_node_entry_slab, fn);
36350fa53ecSChao Yu put_page(page);
36450fa53ecSChao Yu return;
36550fa53ecSChao Yu }
36650fa53ecSChao Yu }
36750fa53ecSChao Yu spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
36850fa53ecSChao Yu f2fs_bug_on(sbi, 1);
36950fa53ecSChao Yu }
37050fa53ecSChao Yu
f2fs_reset_fsync_node_info(struct f2fs_sb_info * sbi)37150fa53ecSChao Yu void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi)
37250fa53ecSChao Yu {
37350fa53ecSChao Yu unsigned long flags;
37450fa53ecSChao Yu
37550fa53ecSChao Yu spin_lock_irqsave(&sbi->fsync_node_lock, flags);
37650fa53ecSChao Yu sbi->fsync_seg_id = 0;
37750fa53ecSChao Yu spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
37850fa53ecSChao Yu }
37950fa53ecSChao Yu
f2fs_need_dentry_mark(struct f2fs_sb_info * sbi,nid_t nid)3804d57b86dSChao Yu int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid)
3812dcf51abSJaegeuk Kim {
3822dcf51abSJaegeuk Kim struct f2fs_nm_info *nm_i = NM_I(sbi);
3832dcf51abSJaegeuk Kim struct nat_entry *e;
3842dcf51abSJaegeuk Kim bool need = false;
3852dcf51abSJaegeuk Kim
386e4544b63STim Murray f2fs_down_read(&nm_i->nat_tree_lock);
3872dcf51abSJaegeuk Kim e = __lookup_nat_cache(nm_i, nid);
3882dcf51abSJaegeuk Kim if (e) {
3892dcf51abSJaegeuk Kim if (!get_nat_flag(e, IS_CHECKPOINTED) &&
3902dcf51abSJaegeuk Kim !get_nat_flag(e, HAS_FSYNCED_INODE))
3912dcf51abSJaegeuk Kim need = true;
3922dcf51abSJaegeuk Kim }
393e4544b63STim Murray f2fs_up_read(&nm_i->nat_tree_lock);
3942dcf51abSJaegeuk Kim return need;
3952dcf51abSJaegeuk Kim }
3962dcf51abSJaegeuk Kim
f2fs_is_checkpointed_node(struct f2fs_sb_info * sbi,nid_t nid)3974d57b86dSChao Yu bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid)
398e05df3b1SJaegeuk Kim {
399e05df3b1SJaegeuk Kim struct f2fs_nm_info *nm_i = NM_I(sbi);
400e05df3b1SJaegeuk Kim struct nat_entry *e;
40188bd02c9SJaegeuk Kim bool is_cp = true;
402e05df3b1SJaegeuk Kim
403e4544b63STim Murray f2fs_down_read(&nm_i->nat_tree_lock);
404e05df3b1SJaegeuk Kim e = __lookup_nat_cache(nm_i, nid);
4057ef35e3bSJaegeuk Kim if (e && !get_nat_flag(e, IS_CHECKPOINTED))
40688bd02c9SJaegeuk Kim is_cp = false;
407e4544b63STim Murray f2fs_up_read(&nm_i->nat_tree_lock);
408e05df3b1SJaegeuk Kim return is_cp;
409e05df3b1SJaegeuk Kim }
410e05df3b1SJaegeuk Kim
f2fs_need_inode_block_update(struct f2fs_sb_info * sbi,nid_t ino)4114d57b86dSChao Yu bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino)
412b6fe5873SJaegeuk Kim {
413b6fe5873SJaegeuk Kim struct f2fs_nm_info *nm_i = NM_I(sbi);
414b6fe5873SJaegeuk Kim struct nat_entry *e;
41588bd02c9SJaegeuk Kim bool need_update = true;
416b6fe5873SJaegeuk Kim
417e4544b63STim Murray f2fs_down_read(&nm_i->nat_tree_lock);
41888bd02c9SJaegeuk Kim e = __lookup_nat_cache(nm_i, ino);
41988bd02c9SJaegeuk Kim if (e && get_nat_flag(e, HAS_LAST_FSYNC) &&
42088bd02c9SJaegeuk Kim (get_nat_flag(e, IS_CHECKPOINTED) ||
42188bd02c9SJaegeuk Kim get_nat_flag(e, HAS_FSYNCED_INODE)))
42288bd02c9SJaegeuk Kim need_update = false;
423e4544b63STim Murray f2fs_up_read(&nm_i->nat_tree_lock);
42488bd02c9SJaegeuk Kim return need_update;
425b6fe5873SJaegeuk Kim }
426b6fe5873SJaegeuk Kim
42712f9ef37SYunlei He /* must be locked by nat_tree_lock */
cache_nat_entry(struct f2fs_sb_info * sbi,nid_t nid,struct f2fs_nat_entry * ne)4281515aef0SChao Yu static void cache_nat_entry(struct f2fs_sb_info *sbi, nid_t nid,
429e05df3b1SJaegeuk Kim struct f2fs_nat_entry *ne)
430e05df3b1SJaegeuk Kim {
4311515aef0SChao Yu struct f2fs_nm_info *nm_i = NM_I(sbi);
43212f9ef37SYunlei He struct nat_entry *new, *e;
4339be32d72SJaegeuk Kim
4340df035c7SJaegeuk Kim /* Let's mitigate lock contention of nat_tree_lock during checkpoint */
435e4544b63STim Murray if (f2fs_rwsem_is_locked(&sbi->cp_global_sem))
4360df035c7SJaegeuk Kim return;
4370df035c7SJaegeuk Kim
43832410577SChao Yu new = __alloc_nat_entry(sbi, nid, false);
43912f9ef37SYunlei He if (!new)
44012f9ef37SYunlei He return;
44112f9ef37SYunlei He
442e4544b63STim Murray f2fs_down_write(&nm_i->nat_tree_lock);
443e05df3b1SJaegeuk Kim e = __lookup_nat_cache(nm_i, nid);
44412f9ef37SYunlei He if (!e)
44512f9ef37SYunlei He e = __init_nat_entry(nm_i, new, ne, false);
44612f9ef37SYunlei He else
4470c0b471eSEric Biggers f2fs_bug_on(sbi, nat_get_ino(e) != le32_to_cpu(ne->ino) ||
4480c0b471eSEric Biggers nat_get_blkaddr(e) !=
4490c0b471eSEric Biggers le32_to_cpu(ne->block_addr) ||
4501515aef0SChao Yu nat_get_version(e) != ne->version);
451e4544b63STim Murray f2fs_up_write(&nm_i->nat_tree_lock);
45212f9ef37SYunlei He if (e != new)
45312f9ef37SYunlei He __free_nat_entry(new);
454e05df3b1SJaegeuk Kim }
455e05df3b1SJaegeuk Kim
set_node_addr(struct f2fs_sb_info * sbi,struct node_info * ni,block_t new_blkaddr,bool fsync_done)456e05df3b1SJaegeuk Kim static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
457479f40c4SJaegeuk Kim block_t new_blkaddr, bool fsync_done)
458e05df3b1SJaegeuk Kim {
459e05df3b1SJaegeuk Kim struct f2fs_nm_info *nm_i = NM_I(sbi);
460e05df3b1SJaegeuk Kim struct nat_entry *e;
46132410577SChao Yu struct nat_entry *new = __alloc_nat_entry(sbi, ni->nid, true);
4629be32d72SJaegeuk Kim
463e4544b63STim Murray f2fs_down_write(&nm_i->nat_tree_lock);
464e05df3b1SJaegeuk Kim e = __lookup_nat_cache(nm_i, ni->nid);
465e05df3b1SJaegeuk Kim if (!e) {
46612f9ef37SYunlei He e = __init_nat_entry(nm_i, new, NULL, true);
4675c27f4eeSChao Yu copy_node_info(&e->ni, ni);
4689850cf4aSJaegeuk Kim f2fs_bug_on(sbi, ni->blk_addr == NEW_ADDR);
469e05df3b1SJaegeuk Kim } else if (new_blkaddr == NEW_ADDR) {
470e05df3b1SJaegeuk Kim /*
471e05df3b1SJaegeuk Kim * when nid is reallocated,
472e05df3b1SJaegeuk Kim * previous nat entry can be remained in nat cache.
473e05df3b1SJaegeuk Kim * So, reinitialize it with new information.
474e05df3b1SJaegeuk Kim */
4755c27f4eeSChao Yu copy_node_info(&e->ni, ni);
4769850cf4aSJaegeuk Kim f2fs_bug_on(sbi, ni->blk_addr != NULL_ADDR);
477e05df3b1SJaegeuk Kim }
47812f9ef37SYunlei He /* let's free early to reduce memory consumption */
47912f9ef37SYunlei He if (e != new)
48012f9ef37SYunlei He __free_nat_entry(new);
481e05df3b1SJaegeuk Kim
482e05df3b1SJaegeuk Kim /* sanity check */
4839850cf4aSJaegeuk Kim f2fs_bug_on(sbi, nat_get_blkaddr(e) != ni->blk_addr);
4849850cf4aSJaegeuk Kim f2fs_bug_on(sbi, nat_get_blkaddr(e) == NULL_ADDR &&
485e05df3b1SJaegeuk Kim new_blkaddr == NULL_ADDR);
4869850cf4aSJaegeuk Kim f2fs_bug_on(sbi, nat_get_blkaddr(e) == NEW_ADDR &&
487e05df3b1SJaegeuk Kim new_blkaddr == NEW_ADDR);
48893770ab7SChao Yu f2fs_bug_on(sbi, __is_valid_data_blkaddr(nat_get_blkaddr(e)) &&
489e05df3b1SJaegeuk Kim new_blkaddr == NEW_ADDR);
490e05df3b1SJaegeuk Kim
491e1c42045Sarter97 /* increment version no as node is removed */
492e05df3b1SJaegeuk Kim if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) {
493e05df3b1SJaegeuk Kim unsigned char version = nat_get_version(e);
4945f029c04SYi Zhuang
495e05df3b1SJaegeuk Kim nat_set_version(e, inc_node_version(version));
496e05df3b1SJaegeuk Kim }
497e05df3b1SJaegeuk Kim
498e05df3b1SJaegeuk Kim /* change address */
499e05df3b1SJaegeuk Kim nat_set_blkaddr(e, new_blkaddr);
50093770ab7SChao Yu if (!__is_valid_data_blkaddr(new_blkaddr))
50188bd02c9SJaegeuk Kim set_nat_flag(e, IS_CHECKPOINTED, false);
502e05df3b1SJaegeuk Kim __set_nat_cache_dirty(nm_i, e);
503479f40c4SJaegeuk Kim
504479f40c4SJaegeuk Kim /* update fsync_mark if its inode nat entry is still alive */
505d5b692b7SChao Yu if (ni->nid != ni->ino)
506479f40c4SJaegeuk Kim e = __lookup_nat_cache(nm_i, ni->ino);
50788bd02c9SJaegeuk Kim if (e) {
50888bd02c9SJaegeuk Kim if (fsync_done && ni->nid == ni->ino)
50988bd02c9SJaegeuk Kim set_nat_flag(e, HAS_FSYNCED_INODE, true);
51088bd02c9SJaegeuk Kim set_nat_flag(e, HAS_LAST_FSYNC, fsync_done);
51188bd02c9SJaegeuk Kim }
512e4544b63STim Murray f2fs_up_write(&nm_i->nat_tree_lock);
513e05df3b1SJaegeuk Kim }
514e05df3b1SJaegeuk Kim
f2fs_try_to_free_nats(struct f2fs_sb_info * sbi,int nr_shrink)5154d57b86dSChao Yu int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
516e05df3b1SJaegeuk Kim {
517e05df3b1SJaegeuk Kim struct f2fs_nm_info *nm_i = NM_I(sbi);
5181b38dc8eSJaegeuk Kim int nr = nr_shrink;
519e05df3b1SJaegeuk Kim
520e4544b63STim Murray if (!f2fs_down_write_trylock(&nm_i->nat_tree_lock))
521b873b798SJaegeuk Kim return 0;
522e05df3b1SJaegeuk Kim
52322969158SChao Yu spin_lock(&nm_i->nat_list_lock);
52422969158SChao Yu while (nr_shrink) {
525e05df3b1SJaegeuk Kim struct nat_entry *ne;
52622969158SChao Yu
52722969158SChao Yu if (list_empty(&nm_i->nat_entries))
52822969158SChao Yu break;
52922969158SChao Yu
530e05df3b1SJaegeuk Kim ne = list_first_entry(&nm_i->nat_entries,
531e05df3b1SJaegeuk Kim struct nat_entry, list);
53222969158SChao Yu list_del(&ne->list);
53322969158SChao Yu spin_unlock(&nm_i->nat_list_lock);
53422969158SChao Yu
535e05df3b1SJaegeuk Kim __del_from_nat_cache(nm_i, ne);
536e05df3b1SJaegeuk Kim nr_shrink--;
53722969158SChao Yu
53822969158SChao Yu spin_lock(&nm_i->nat_list_lock);
539e05df3b1SJaegeuk Kim }
54022969158SChao Yu spin_unlock(&nm_i->nat_list_lock);
54122969158SChao Yu
542e4544b63STim Murray f2fs_up_write(&nm_i->nat_tree_lock);
5431b38dc8eSJaegeuk Kim return nr - nr_shrink;
544e05df3b1SJaegeuk Kim }
545e05df3b1SJaegeuk Kim
f2fs_get_node_info(struct f2fs_sb_info * sbi,nid_t nid,struct node_info * ni,bool checkpoint_context)5467735730dSChao Yu int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
547a9419b63SJaegeuk Kim struct node_info *ni, bool checkpoint_context)
548e05df3b1SJaegeuk Kim {
549e05df3b1SJaegeuk Kim struct f2fs_nm_info *nm_i = NM_I(sbi);
550e05df3b1SJaegeuk Kim struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
551b7ad7512SChao Yu struct f2fs_journal *journal = curseg->journal;
552e05df3b1SJaegeuk Kim nid_t start_nid = START_NID(nid);
553e05df3b1SJaegeuk Kim struct f2fs_nat_block *nat_blk;
554e05df3b1SJaegeuk Kim struct page *page = NULL;
555e05df3b1SJaegeuk Kim struct f2fs_nat_entry ne;
556e05df3b1SJaegeuk Kim struct nat_entry *e;
55766a82d1fSYunlei He pgoff_t index;
55893770ab7SChao Yu block_t blkaddr;
559e05df3b1SJaegeuk Kim int i;
560e05df3b1SJaegeuk Kim
561e05df3b1SJaegeuk Kim ni->nid = nid;
5622eeb0dceSJaegeuk Kim retry:
563e05df3b1SJaegeuk Kim /* Check nat cache */
564e4544b63STim Murray f2fs_down_read(&nm_i->nat_tree_lock);
565e05df3b1SJaegeuk Kim e = __lookup_nat_cache(nm_i, nid);
566e05df3b1SJaegeuk Kim if (e) {
567e05df3b1SJaegeuk Kim ni->ino = nat_get_ino(e);
568e05df3b1SJaegeuk Kim ni->blk_addr = nat_get_blkaddr(e);
569e05df3b1SJaegeuk Kim ni->version = nat_get_version(e);
570e4544b63STim Murray f2fs_up_read(&nm_i->nat_tree_lock);
5717735730dSChao Yu return 0;
5721515aef0SChao Yu }
573e05df3b1SJaegeuk Kim
5742eeb0dceSJaegeuk Kim /*
5752eeb0dceSJaegeuk Kim * Check current segment summary by trying to grab journal_rwsem first.
5762eeb0dceSJaegeuk Kim * This sem is on the critical path on the checkpoint requiring the above
5772eeb0dceSJaegeuk Kim * nat_tree_lock. Therefore, we should retry, if we failed to grab here
5782eeb0dceSJaegeuk Kim * while not bothering checkpoint.
5792eeb0dceSJaegeuk Kim */
580e4544b63STim Murray if (!f2fs_rwsem_is_locked(&sbi->cp_global_sem) || checkpoint_context) {
581b7ad7512SChao Yu down_read(&curseg->journal_rwsem);
582e4544b63STim Murray } else if (f2fs_rwsem_is_contended(&nm_i->nat_tree_lock) ||
583a9419b63SJaegeuk Kim !down_read_trylock(&curseg->journal_rwsem)) {
584e4544b63STim Murray f2fs_up_read(&nm_i->nat_tree_lock);
5852eeb0dceSJaegeuk Kim goto retry;
5862eeb0dceSJaegeuk Kim }
5872eeb0dceSJaegeuk Kim
5884d57b86dSChao Yu i = f2fs_lookup_journal_in_cursum(journal, NAT_JOURNAL, nid, 0);
589e05df3b1SJaegeuk Kim if (i >= 0) {
590dfc08a12SChao Yu ne = nat_in_journal(journal, i);
591e05df3b1SJaegeuk Kim node_info_from_raw_nat(ni, &ne);
592e05df3b1SJaegeuk Kim }
593b7ad7512SChao Yu up_read(&curseg->journal_rwsem);
59466a82d1fSYunlei He if (i >= 0) {
595e4544b63STim Murray f2fs_up_read(&nm_i->nat_tree_lock);
596e05df3b1SJaegeuk Kim goto cache;
59766a82d1fSYunlei He }
598e05df3b1SJaegeuk Kim
599e05df3b1SJaegeuk Kim /* Fill node_info from nat page */
60066a82d1fSYunlei He index = current_nat_addr(sbi, nid);
601e4544b63STim Murray f2fs_up_read(&nm_i->nat_tree_lock);
60266a82d1fSYunlei He
6034d57b86dSChao Yu page = f2fs_get_meta_page(sbi, index);
6047735730dSChao Yu if (IS_ERR(page))
6057735730dSChao Yu return PTR_ERR(page);
6067735730dSChao Yu
607e05df3b1SJaegeuk Kim nat_blk = (struct f2fs_nat_block *)page_address(page);
608e05df3b1SJaegeuk Kim ne = nat_blk->entries[nid - start_nid];
609e05df3b1SJaegeuk Kim node_info_from_raw_nat(ni, &ne);
610e05df3b1SJaegeuk Kim f2fs_put_page(page, 1);
611e05df3b1SJaegeuk Kim cache:
61293770ab7SChao Yu blkaddr = le32_to_cpu(ne.block_addr);
61393770ab7SChao Yu if (__is_valid_data_blkaddr(blkaddr) &&
61493770ab7SChao Yu !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE))
61593770ab7SChao Yu return -EFAULT;
61693770ab7SChao Yu
617e05df3b1SJaegeuk Kim /* cache nat entry */
6181515aef0SChao Yu cache_nat_entry(sbi, nid, &ne);
6197735730dSChao Yu return 0;
620e05df3b1SJaegeuk Kim }
621e05df3b1SJaegeuk Kim
62279344efbSJaegeuk Kim /*
62379344efbSJaegeuk Kim * readahead MAX_RA_NODE number of node pages.
62479344efbSJaegeuk Kim */
f2fs_ra_node_pages(struct page * parent,int start,int n)6254d57b86dSChao Yu static void f2fs_ra_node_pages(struct page *parent, int start, int n)
62679344efbSJaegeuk Kim {
62779344efbSJaegeuk Kim struct f2fs_sb_info *sbi = F2FS_P_SB(parent);
62879344efbSJaegeuk Kim struct blk_plug plug;
62979344efbSJaegeuk Kim int i, end;
63079344efbSJaegeuk Kim nid_t nid;
63179344efbSJaegeuk Kim
63279344efbSJaegeuk Kim blk_start_plug(&plug);
63379344efbSJaegeuk Kim
63479344efbSJaegeuk Kim /* Then, try readahead for siblings of the desired node */
63579344efbSJaegeuk Kim end = start + n;
63679344efbSJaegeuk Kim end = min(end, NIDS_PER_BLOCK);
63779344efbSJaegeuk Kim for (i = start; i < end; i++) {
63879344efbSJaegeuk Kim nid = get_nid(parent, i, false);
6394d57b86dSChao Yu f2fs_ra_node_page(sbi, nid);
64079344efbSJaegeuk Kim }
64179344efbSJaegeuk Kim
64279344efbSJaegeuk Kim blk_finish_plug(&plug);
64379344efbSJaegeuk Kim }
64479344efbSJaegeuk Kim
f2fs_get_next_page_offset(struct dnode_of_data * dn,pgoff_t pgofs)6454d57b86dSChao Yu pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs)
6463cf45747SChao Yu {
6473cf45747SChao Yu const long direct_index = ADDRS_PER_INODE(dn->inode);
648d02a6e61SChao Yu const long direct_blks = ADDRS_PER_BLOCK(dn->inode);
649d02a6e61SChao Yu const long indirect_blks = ADDRS_PER_BLOCK(dn->inode) * NIDS_PER_BLOCK;
650d02a6e61SChao Yu unsigned int skipped_unit = ADDRS_PER_BLOCK(dn->inode);
6513cf45747SChao Yu int cur_level = dn->cur_level;
6523cf45747SChao Yu int max_level = dn->max_level;
6533cf45747SChao Yu pgoff_t base = 0;
6543cf45747SChao Yu
6553cf45747SChao Yu if (!dn->max_level)
6563cf45747SChao Yu return pgofs + 1;
6573cf45747SChao Yu
6583cf45747SChao Yu while (max_level-- > cur_level)
6593cf45747SChao Yu skipped_unit *= NIDS_PER_BLOCK;
6603cf45747SChao Yu
6613cf45747SChao Yu switch (dn->max_level) {
6623cf45747SChao Yu case 3:
6633cf45747SChao Yu base += 2 * indirect_blks;
664df561f66SGustavo A. R. Silva fallthrough;
6653cf45747SChao Yu case 2:
6663cf45747SChao Yu base += 2 * direct_blks;
667df561f66SGustavo A. R. Silva fallthrough;
6683cf45747SChao Yu case 1:
6693cf45747SChao Yu base += direct_index;
6703cf45747SChao Yu break;
6713cf45747SChao Yu default:
6723cf45747SChao Yu f2fs_bug_on(F2FS_I_SB(dn->inode), 1);
6733cf45747SChao Yu }
6743cf45747SChao Yu
6753cf45747SChao Yu return ((pgofs - base) / skipped_unit + 1) * skipped_unit + base;
6763cf45747SChao Yu }
6773cf45747SChao Yu
6780a8165d7SJaegeuk Kim /*
679e05df3b1SJaegeuk Kim * The maximum depth is four.
680e05df3b1SJaegeuk Kim * Offset[0] will have raw inode offset.
681e05df3b1SJaegeuk Kim */
get_node_path(struct inode * inode,long block,int offset[4],unsigned int noffset[4])68281ca7350SChao Yu static int get_node_path(struct inode *inode, long block,
683de93653fSJaegeuk Kim int offset[4], unsigned int noffset[4])
684e05df3b1SJaegeuk Kim {
68581ca7350SChao Yu const long direct_index = ADDRS_PER_INODE(inode);
686d02a6e61SChao Yu const long direct_blks = ADDRS_PER_BLOCK(inode);
687e05df3b1SJaegeuk Kim const long dptrs_per_blk = NIDS_PER_BLOCK;
688d02a6e61SChao Yu const long indirect_blks = ADDRS_PER_BLOCK(inode) * NIDS_PER_BLOCK;
689e05df3b1SJaegeuk Kim const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK;
690e05df3b1SJaegeuk Kim int n = 0;
691e05df3b1SJaegeuk Kim int level = 0;
692e05df3b1SJaegeuk Kim
693e05df3b1SJaegeuk Kim noffset[0] = 0;
694e05df3b1SJaegeuk Kim
695e05df3b1SJaegeuk Kim if (block < direct_index) {
69625c0a6e5SNamjae Jeon offset[n] = block;
697e05df3b1SJaegeuk Kim goto got;
698e05df3b1SJaegeuk Kim }
699e05df3b1SJaegeuk Kim block -= direct_index;
700e05df3b1SJaegeuk Kim if (block < direct_blks) {
701e05df3b1SJaegeuk Kim offset[n++] = NODE_DIR1_BLOCK;
702e05df3b1SJaegeuk Kim noffset[n] = 1;
70325c0a6e5SNamjae Jeon offset[n] = block;
704e05df3b1SJaegeuk Kim level = 1;
705e05df3b1SJaegeuk Kim goto got;
706e05df3b1SJaegeuk Kim }
707e05df3b1SJaegeuk Kim block -= direct_blks;
708e05df3b1SJaegeuk Kim if (block < direct_blks) {
709e05df3b1SJaegeuk Kim offset[n++] = NODE_DIR2_BLOCK;
710e05df3b1SJaegeuk Kim noffset[n] = 2;
71125c0a6e5SNamjae Jeon offset[n] = block;
712e05df3b1SJaegeuk Kim level = 1;
713e05df3b1SJaegeuk Kim goto got;
714e05df3b1SJaegeuk Kim }
715e05df3b1SJaegeuk Kim block -= direct_blks;
716e05df3b1SJaegeuk Kim if (block < indirect_blks) {
717e05df3b1SJaegeuk Kim offset[n++] = NODE_IND1_BLOCK;
718e05df3b1SJaegeuk Kim noffset[n] = 3;
719e05df3b1SJaegeuk Kim offset[n++] = block / direct_blks;
720e05df3b1SJaegeuk Kim noffset[n] = 4 + offset[n - 1];
72125c0a6e5SNamjae Jeon offset[n] = block % direct_blks;
722e05df3b1SJaegeuk Kim level = 2;
723e05df3b1SJaegeuk Kim goto got;
724e05df3b1SJaegeuk Kim }
725e05df3b1SJaegeuk Kim block -= indirect_blks;
726e05df3b1SJaegeuk Kim if (block < indirect_blks) {
727e05df3b1SJaegeuk Kim offset[n++] = NODE_IND2_BLOCK;
728e05df3b1SJaegeuk Kim noffset[n] = 4 + dptrs_per_blk;
729e05df3b1SJaegeuk Kim offset[n++] = block / direct_blks;
730e05df3b1SJaegeuk Kim noffset[n] = 5 + dptrs_per_blk + offset[n - 1];
73125c0a6e5SNamjae Jeon offset[n] = block % direct_blks;
732e05df3b1SJaegeuk Kim level = 2;
733e05df3b1SJaegeuk Kim goto got;
734e05df3b1SJaegeuk Kim }
735e05df3b1SJaegeuk Kim block -= indirect_blks;
736e05df3b1SJaegeuk Kim if (block < dindirect_blks) {
737e05df3b1SJaegeuk Kim offset[n++] = NODE_DIND_BLOCK;
738e05df3b1SJaegeuk Kim noffset[n] = 5 + (dptrs_per_blk * 2);
739e05df3b1SJaegeuk Kim offset[n++] = block / indirect_blks;
740e05df3b1SJaegeuk Kim noffset[n] = 6 + (dptrs_per_blk * 2) +
741e05df3b1SJaegeuk Kim offset[n - 1] * (dptrs_per_blk + 1);
742e05df3b1SJaegeuk Kim offset[n++] = (block / direct_blks) % dptrs_per_blk;
743e05df3b1SJaegeuk Kim noffset[n] = 7 + (dptrs_per_blk * 2) +
744e05df3b1SJaegeuk Kim offset[n - 2] * (dptrs_per_blk + 1) +
745e05df3b1SJaegeuk Kim offset[n - 1];
74625c0a6e5SNamjae Jeon offset[n] = block % direct_blks;
747e05df3b1SJaegeuk Kim level = 3;
748e05df3b1SJaegeuk Kim goto got;
749e05df3b1SJaegeuk Kim } else {
750adb6dc19SJaegeuk Kim return -E2BIG;
751e05df3b1SJaegeuk Kim }
752e05df3b1SJaegeuk Kim got:
753e05df3b1SJaegeuk Kim return level;
754e05df3b1SJaegeuk Kim }
755e05df3b1SJaegeuk Kim
756e05df3b1SJaegeuk Kim /*
757e05df3b1SJaegeuk Kim * Caller should call f2fs_put_dnode(dn).
7584f4124d0SChao Yu * Also, it should grab and release a rwsem by calling f2fs_lock_op() and
7597a88ddb5SChao Yu * f2fs_unlock_op() only if mode is set with ALLOC_NODE.
760e05df3b1SJaegeuk Kim */
f2fs_get_dnode_of_data(struct dnode_of_data * dn,pgoff_t index,int mode)7614d57b86dSChao Yu int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
762e05df3b1SJaegeuk Kim {
7634081363fSJaegeuk Kim struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
764e05df3b1SJaegeuk Kim struct page *npage[4];
765f1a3b98eSJaegeuk Kim struct page *parent = NULL;
766e05df3b1SJaegeuk Kim int offset[4];
767e05df3b1SJaegeuk Kim unsigned int noffset[4];
768e05df3b1SJaegeuk Kim nid_t nids[4];
7693cf45747SChao Yu int level, i = 0;
770e05df3b1SJaegeuk Kim int err = 0;
771e05df3b1SJaegeuk Kim
77281ca7350SChao Yu level = get_node_path(dn->inode, index, offset, noffset);
773adb6dc19SJaegeuk Kim if (level < 0)
774adb6dc19SJaegeuk Kim return level;
775e05df3b1SJaegeuk Kim
776e05df3b1SJaegeuk Kim nids[0] = dn->inode->i_ino;
7771646cfacSJaegeuk Kim npage[0] = dn->inode_page;
7781646cfacSJaegeuk Kim
7791646cfacSJaegeuk Kim if (!npage[0]) {
7804d57b86dSChao Yu npage[0] = f2fs_get_node_page(sbi, nids[0]);
781e05df3b1SJaegeuk Kim if (IS_ERR(npage[0]))
782e05df3b1SJaegeuk Kim return PTR_ERR(npage[0]);
7831646cfacSJaegeuk Kim }
784f1a3b98eSJaegeuk Kim
785f1a3b98eSJaegeuk Kim /* if inline_data is set, should not report any block indices */
786f1a3b98eSJaegeuk Kim if (f2fs_has_inline_data(dn->inode) && index) {
78776629165SJaegeuk Kim err = -ENOENT;
788f1a3b98eSJaegeuk Kim f2fs_put_page(npage[0], 1);
789f1a3b98eSJaegeuk Kim goto release_out;
790f1a3b98eSJaegeuk Kim }
791f1a3b98eSJaegeuk Kim
792e05df3b1SJaegeuk Kim parent = npage[0];
79352c2db3fSChangman Lee if (level != 0)
794e05df3b1SJaegeuk Kim nids[1] = get_nid(parent, offset[0], true);
795e05df3b1SJaegeuk Kim dn->inode_page = npage[0];
796e05df3b1SJaegeuk Kim dn->inode_page_locked = true;
797e05df3b1SJaegeuk Kim
798e05df3b1SJaegeuk Kim /* get indirect or direct nodes */
799e05df3b1SJaegeuk Kim for (i = 1; i <= level; i++) {
800e05df3b1SJaegeuk Kim bool done = false;
801e05df3b1SJaegeuk Kim
802266e97a8SJaegeuk Kim if (!nids[i] && mode == ALLOC_NODE) {
803e05df3b1SJaegeuk Kim /* alloc new node */
8044d57b86dSChao Yu if (!f2fs_alloc_nid(sbi, &(nids[i]))) {
805e05df3b1SJaegeuk Kim err = -ENOSPC;
806e05df3b1SJaegeuk Kim goto release_pages;
807e05df3b1SJaegeuk Kim }
808e05df3b1SJaegeuk Kim
809e05df3b1SJaegeuk Kim dn->nid = nids[i];
8104d57b86dSChao Yu npage[i] = f2fs_new_node_page(dn, noffset[i]);
811e05df3b1SJaegeuk Kim if (IS_ERR(npage[i])) {
8124d57b86dSChao Yu f2fs_alloc_nid_failed(sbi, nids[i]);
813e05df3b1SJaegeuk Kim err = PTR_ERR(npage[i]);
814e05df3b1SJaegeuk Kim goto release_pages;
815e05df3b1SJaegeuk Kim }
816e05df3b1SJaegeuk Kim
817e05df3b1SJaegeuk Kim set_nid(parent, offset[i - 1], nids[i], i == 1);
8184d57b86dSChao Yu f2fs_alloc_nid_done(sbi, nids[i]);
819e05df3b1SJaegeuk Kim done = true;
820266e97a8SJaegeuk Kim } else if (mode == LOOKUP_NODE_RA && i == level && level > 1) {
8214d57b86dSChao Yu npage[i] = f2fs_get_node_page_ra(parent, offset[i - 1]);
822e05df3b1SJaegeuk Kim if (IS_ERR(npage[i])) {
823e05df3b1SJaegeuk Kim err = PTR_ERR(npage[i]);
824e05df3b1SJaegeuk Kim goto release_pages;
825e05df3b1SJaegeuk Kim }
826e05df3b1SJaegeuk Kim done = true;
827e05df3b1SJaegeuk Kim }
828e05df3b1SJaegeuk Kim if (i == 1) {
829e05df3b1SJaegeuk Kim dn->inode_page_locked = false;
830e05df3b1SJaegeuk Kim unlock_page(parent);
831e05df3b1SJaegeuk Kim } else {
832e05df3b1SJaegeuk Kim f2fs_put_page(parent, 1);
833e05df3b1SJaegeuk Kim }
834e05df3b1SJaegeuk Kim
835e05df3b1SJaegeuk Kim if (!done) {
8364d57b86dSChao Yu npage[i] = f2fs_get_node_page(sbi, nids[i]);
837e05df3b1SJaegeuk Kim if (IS_ERR(npage[i])) {
838e05df3b1SJaegeuk Kim err = PTR_ERR(npage[i]);
839e05df3b1SJaegeuk Kim f2fs_put_page(npage[0], 0);
840e05df3b1SJaegeuk Kim goto release_out;
841e05df3b1SJaegeuk Kim }
842e05df3b1SJaegeuk Kim }
843e05df3b1SJaegeuk Kim if (i < level) {
844e05df3b1SJaegeuk Kim parent = npage[i];
845e05df3b1SJaegeuk Kim nids[i + 1] = get_nid(parent, offset[i], false);
846e05df3b1SJaegeuk Kim }
847e05df3b1SJaegeuk Kim }
848e05df3b1SJaegeuk Kim dn->nid = nids[level];
849e05df3b1SJaegeuk Kim dn->ofs_in_node = offset[level];
850e05df3b1SJaegeuk Kim dn->node_page = npage[level];
851a2ced1ceSChao Yu dn->data_blkaddr = f2fs_data_blkaddr(dn);
85294afd6d6SChao Yu
85394afd6d6SChao Yu if (is_inode_flag_set(dn->inode, FI_COMPRESSED_FILE) &&
85494afd6d6SChao Yu f2fs_sb_has_readonly(sbi)) {
855*fa36f5ffSChao Yu unsigned int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
856*fa36f5ffSChao Yu unsigned int ofs_in_node = dn->ofs_in_node;
857*fa36f5ffSChao Yu pgoff_t fofs = index;
858*fa36f5ffSChao Yu unsigned int c_len;
85994afd6d6SChao Yu block_t blkaddr;
86094afd6d6SChao Yu
861*fa36f5ffSChao Yu /* should align fofs and ofs_in_node to cluster_size */
862*fa36f5ffSChao Yu if (fofs % cluster_size) {
863*fa36f5ffSChao Yu fofs = round_down(fofs, cluster_size);
864*fa36f5ffSChao Yu ofs_in_node = round_down(ofs_in_node, cluster_size);
865*fa36f5ffSChao Yu }
866*fa36f5ffSChao Yu
867*fa36f5ffSChao Yu c_len = f2fs_cluster_blocks_are_contiguous(dn, ofs_in_node);
86894afd6d6SChao Yu if (!c_len)
86994afd6d6SChao Yu goto out;
87094afd6d6SChao Yu
871*fa36f5ffSChao Yu blkaddr = data_blkaddr(dn->inode, dn->node_page, ofs_in_node);
87294afd6d6SChao Yu if (blkaddr == COMPRESS_ADDR)
87394afd6d6SChao Yu blkaddr = data_blkaddr(dn->inode, dn->node_page,
874*fa36f5ffSChao Yu ofs_in_node + 1);
87594afd6d6SChao Yu
876e7547dacSJaegeuk Kim f2fs_update_read_extent_tree_range_compressed(dn->inode,
877*fa36f5ffSChao Yu fofs, blkaddr, cluster_size, c_len);
87894afd6d6SChao Yu }
87994afd6d6SChao Yu out:
880e05df3b1SJaegeuk Kim return 0;
881e05df3b1SJaegeuk Kim
882e05df3b1SJaegeuk Kim release_pages:
883e05df3b1SJaegeuk Kim f2fs_put_page(parent, 1);
884e05df3b1SJaegeuk Kim if (i > 1)
885e05df3b1SJaegeuk Kim f2fs_put_page(npage[0], 0);
886e05df3b1SJaegeuk Kim release_out:
887e05df3b1SJaegeuk Kim dn->inode_page = NULL;
888e05df3b1SJaegeuk Kim dn->node_page = NULL;
8893cf45747SChao Yu if (err == -ENOENT) {
8903cf45747SChao Yu dn->cur_level = i;
8913cf45747SChao Yu dn->max_level = level;
8920a2aa8fbSJaegeuk Kim dn->ofs_in_node = offset[level];
8933cf45747SChao Yu }
894e05df3b1SJaegeuk Kim return err;
895e05df3b1SJaegeuk Kim }
896e05df3b1SJaegeuk Kim
truncate_node(struct dnode_of_data * dn)8977735730dSChao Yu static int truncate_node(struct dnode_of_data *dn)
898e05df3b1SJaegeuk Kim {
8994081363fSJaegeuk Kim struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
900e05df3b1SJaegeuk Kim struct node_info ni;
9017735730dSChao Yu int err;
9020ea295ddSPan Bian pgoff_t index;
903e05df3b1SJaegeuk Kim
904a9419b63SJaegeuk Kim err = f2fs_get_node_info(sbi, dn->nid, &ni, false);
9057735730dSChao Yu if (err)
9067735730dSChao Yu return err;
907e05df3b1SJaegeuk Kim
908e05df3b1SJaegeuk Kim /* Deallocate node address */
9094d57b86dSChao Yu f2fs_invalidate_blocks(sbi, ni.blk_addr);
910000519f2SChao Yu dec_valid_node_count(sbi, dn->inode, dn->nid == dn->inode->i_ino);
911479f40c4SJaegeuk Kim set_node_addr(sbi, &ni, NULL_ADDR, false);
912e05df3b1SJaegeuk Kim
913e05df3b1SJaegeuk Kim if (dn->nid == dn->inode->i_ino) {
9144d57b86dSChao Yu f2fs_remove_orphan_inode(sbi, dn->nid);
915e05df3b1SJaegeuk Kim dec_valid_inode_count(sbi);
9160f18b462SJaegeuk Kim f2fs_inode_synced(dn->inode);
917e05df3b1SJaegeuk Kim }
918000519f2SChao Yu
919e05df3b1SJaegeuk Kim clear_node_page_dirty(dn->node_page);
920caf0047eSChao Yu set_sbi_flag(sbi, SBI_IS_DIRTY);
921e05df3b1SJaegeuk Kim
9220ea295ddSPan Bian index = dn->node_page->index;
923e05df3b1SJaegeuk Kim f2fs_put_page(dn->node_page, 1);
924bf39c00aSJaegeuk Kim
925bf39c00aSJaegeuk Kim invalidate_mapping_pages(NODE_MAPPING(sbi),
9260ea295ddSPan Bian index, index);
927bf39c00aSJaegeuk Kim
928e05df3b1SJaegeuk Kim dn->node_page = NULL;
92951dd6249SNamjae Jeon trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr);
9307735730dSChao Yu
9317735730dSChao Yu return 0;
932e05df3b1SJaegeuk Kim }
933e05df3b1SJaegeuk Kim
truncate_dnode(struct dnode_of_data * dn)934e05df3b1SJaegeuk Kim static int truncate_dnode(struct dnode_of_data *dn)
935e05df3b1SJaegeuk Kim {
936a6ec8378SChao Yu struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
937e05df3b1SJaegeuk Kim struct page *page;
9387735730dSChao Yu int err;
939e05df3b1SJaegeuk Kim
940e05df3b1SJaegeuk Kim if (dn->nid == 0)
941e05df3b1SJaegeuk Kim return 1;
942e05df3b1SJaegeuk Kim
943e05df3b1SJaegeuk Kim /* get direct node */
944a6ec8378SChao Yu page = f2fs_get_node_page(sbi, dn->nid);
94545586c70SMasahiro Yamada if (PTR_ERR(page) == -ENOENT)
946e05df3b1SJaegeuk Kim return 1;
947e05df3b1SJaegeuk Kim else if (IS_ERR(page))
948e05df3b1SJaegeuk Kim return PTR_ERR(page);
949e05df3b1SJaegeuk Kim
950a6ec8378SChao Yu if (IS_INODE(page) || ino_of_node(page) != dn->inode->i_ino) {
951a6ec8378SChao Yu f2fs_err(sbi, "incorrect node reference, ino: %lu, nid: %u, ino_of_node: %u",
952a6ec8378SChao Yu dn->inode->i_ino, dn->nid, ino_of_node(page));
953a6ec8378SChao Yu set_sbi_flag(sbi, SBI_NEED_FSCK);
954a6ec8378SChao Yu f2fs_handle_error(sbi, ERROR_INVALID_NODE_REFERENCE);
955a6ec8378SChao Yu f2fs_put_page(page, 1);
956a6ec8378SChao Yu return -EFSCORRUPTED;
957a6ec8378SChao Yu }
958a6ec8378SChao Yu
959e05df3b1SJaegeuk Kim /* Make dnode_of_data for parameter */
960e05df3b1SJaegeuk Kim dn->node_page = page;
961e05df3b1SJaegeuk Kim dn->ofs_in_node = 0;
962a6ec8378SChao Yu f2fs_truncate_data_blocks_range(dn, ADDRS_PER_BLOCK(dn->inode));
9637735730dSChao Yu err = truncate_node(dn);
9640135c482SChao Yu if (err) {
9650135c482SChao Yu f2fs_put_page(page, 1);
9667735730dSChao Yu return err;
9670135c482SChao Yu }
9687735730dSChao Yu
969e05df3b1SJaegeuk Kim return 1;
970e05df3b1SJaegeuk Kim }
971e05df3b1SJaegeuk Kim
truncate_nodes(struct dnode_of_data * dn,unsigned int nofs,int ofs,int depth)972e05df3b1SJaegeuk Kim static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
973e05df3b1SJaegeuk Kim int ofs, int depth)
974e05df3b1SJaegeuk Kim {
975e05df3b1SJaegeuk Kim struct dnode_of_data rdn = *dn;
976e05df3b1SJaegeuk Kim struct page *page;
977e05df3b1SJaegeuk Kim struct f2fs_node *rn;
978e05df3b1SJaegeuk Kim nid_t child_nid;
979e05df3b1SJaegeuk Kim unsigned int child_nofs;
980e05df3b1SJaegeuk Kim int freed = 0;
981e05df3b1SJaegeuk Kim int i, ret;
982e05df3b1SJaegeuk Kim
983e05df3b1SJaegeuk Kim if (dn->nid == 0)
984e05df3b1SJaegeuk Kim return NIDS_PER_BLOCK + 1;
985e05df3b1SJaegeuk Kim
98651dd6249SNamjae Jeon trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr);
98751dd6249SNamjae Jeon
9884d57b86dSChao Yu page = f2fs_get_node_page(F2FS_I_SB(dn->inode), dn->nid);
98951dd6249SNamjae Jeon if (IS_ERR(page)) {
99051dd6249SNamjae Jeon trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(page));
991e05df3b1SJaegeuk Kim return PTR_ERR(page);
99251dd6249SNamjae Jeon }
993e05df3b1SJaegeuk Kim
9944d57b86dSChao Yu f2fs_ra_node_pages(page, ofs, NIDS_PER_BLOCK);
99579344efbSJaegeuk Kim
99645590710SGu Zheng rn = F2FS_NODE(page);
997e05df3b1SJaegeuk Kim if (depth < 3) {
998e05df3b1SJaegeuk Kim for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) {
999e05df3b1SJaegeuk Kim child_nid = le32_to_cpu(rn->in.nid[i]);
1000e05df3b1SJaegeuk Kim if (child_nid == 0)
1001e05df3b1SJaegeuk Kim continue;
1002e05df3b1SJaegeuk Kim rdn.nid = child_nid;
1003e05df3b1SJaegeuk Kim ret = truncate_dnode(&rdn);
1004e05df3b1SJaegeuk Kim if (ret < 0)
1005e05df3b1SJaegeuk Kim goto out_err;
100612719ae1SJaegeuk Kim if (set_nid(page, i, 0, false))
100793bae099SJaegeuk Kim dn->node_changed = true;
1008e05df3b1SJaegeuk Kim }
1009e05df3b1SJaegeuk Kim } else {
1010e05df3b1SJaegeuk Kim child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1;
1011e05df3b1SJaegeuk Kim for (i = ofs; i < NIDS_PER_BLOCK; i++) {
1012e05df3b1SJaegeuk Kim child_nid = le32_to_cpu(rn->in.nid[i]);
1013e05df3b1SJaegeuk Kim if (child_nid == 0) {
1014e05df3b1SJaegeuk Kim child_nofs += NIDS_PER_BLOCK + 1;
1015e05df3b1SJaegeuk Kim continue;
1016e05df3b1SJaegeuk Kim }
1017e05df3b1SJaegeuk Kim rdn.nid = child_nid;
1018e05df3b1SJaegeuk Kim ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1);
1019e05df3b1SJaegeuk Kim if (ret == (NIDS_PER_BLOCK + 1)) {
102012719ae1SJaegeuk Kim if (set_nid(page, i, 0, false))
102193bae099SJaegeuk Kim dn->node_changed = true;
1022e05df3b1SJaegeuk Kim child_nofs += ret;
1023e05df3b1SJaegeuk Kim } else if (ret < 0 && ret != -ENOENT) {
1024e05df3b1SJaegeuk Kim goto out_err;
1025e05df3b1SJaegeuk Kim }
1026e05df3b1SJaegeuk Kim }
1027e05df3b1SJaegeuk Kim freed = child_nofs;
1028e05df3b1SJaegeuk Kim }
1029e05df3b1SJaegeuk Kim
1030e05df3b1SJaegeuk Kim if (!ofs) {
1031e05df3b1SJaegeuk Kim /* remove current indirect node */
1032e05df3b1SJaegeuk Kim dn->node_page = page;
10337735730dSChao Yu ret = truncate_node(dn);
10347735730dSChao Yu if (ret)
10357735730dSChao Yu goto out_err;
1036e05df3b1SJaegeuk Kim freed++;
1037e05df3b1SJaegeuk Kim } else {
1038e05df3b1SJaegeuk Kim f2fs_put_page(page, 1);
1039e05df3b1SJaegeuk Kim }
104051dd6249SNamjae Jeon trace_f2fs_truncate_nodes_exit(dn->inode, freed);
1041e05df3b1SJaegeuk Kim return freed;
1042e05df3b1SJaegeuk Kim
1043e05df3b1SJaegeuk Kim out_err:
1044e05df3b1SJaegeuk Kim f2fs_put_page(page, 1);
104551dd6249SNamjae Jeon trace_f2fs_truncate_nodes_exit(dn->inode, ret);
1046e05df3b1SJaegeuk Kim return ret;
1047e05df3b1SJaegeuk Kim }
1048e05df3b1SJaegeuk Kim
truncate_partial_nodes(struct dnode_of_data * dn,struct f2fs_inode * ri,int * offset,int depth)1049e05df3b1SJaegeuk Kim static int truncate_partial_nodes(struct dnode_of_data *dn,
1050e05df3b1SJaegeuk Kim struct f2fs_inode *ri, int *offset, int depth)
1051e05df3b1SJaegeuk Kim {
1052e05df3b1SJaegeuk Kim struct page *pages[2];
1053e05df3b1SJaegeuk Kim nid_t nid[3];
1054e05df3b1SJaegeuk Kim nid_t child_nid;
1055e05df3b1SJaegeuk Kim int err = 0;
1056e05df3b1SJaegeuk Kim int i;
1057e05df3b1SJaegeuk Kim int idx = depth - 2;
1058e05df3b1SJaegeuk Kim
1059e05df3b1SJaegeuk Kim nid[0] = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
1060e05df3b1SJaegeuk Kim if (!nid[0])
1061e05df3b1SJaegeuk Kim return 0;
1062e05df3b1SJaegeuk Kim
1063e05df3b1SJaegeuk Kim /* get indirect nodes in the path */
1064a225dca3Sshifei10.ge for (i = 0; i < idx + 1; i++) {
1065e1c42045Sarter97 /* reference count'll be increased */
10664d57b86dSChao Yu pages[i] = f2fs_get_node_page(F2FS_I_SB(dn->inode), nid[i]);
1067e05df3b1SJaegeuk Kim if (IS_ERR(pages[i])) {
1068e05df3b1SJaegeuk Kim err = PTR_ERR(pages[i]);
1069a225dca3Sshifei10.ge idx = i - 1;
1070e05df3b1SJaegeuk Kim goto fail;
1071e05df3b1SJaegeuk Kim }
1072e05df3b1SJaegeuk Kim nid[i + 1] = get_nid(pages[i], offset[i + 1], false);
1073e05df3b1SJaegeuk Kim }
1074e05df3b1SJaegeuk Kim
10754d57b86dSChao Yu f2fs_ra_node_pages(pages[idx], offset[idx + 1], NIDS_PER_BLOCK);
107679344efbSJaegeuk Kim
1077e05df3b1SJaegeuk Kim /* free direct nodes linked to a partial indirect node */
1078a225dca3Sshifei10.ge for (i = offset[idx + 1]; i < NIDS_PER_BLOCK; i++) {
1079e05df3b1SJaegeuk Kim child_nid = get_nid(pages[idx], i, false);
1080e05df3b1SJaegeuk Kim if (!child_nid)
1081e05df3b1SJaegeuk Kim continue;
1082e05df3b1SJaegeuk Kim dn->nid = child_nid;
1083e05df3b1SJaegeuk Kim err = truncate_dnode(dn);
1084e05df3b1SJaegeuk Kim if (err < 0)
1085e05df3b1SJaegeuk Kim goto fail;
108612719ae1SJaegeuk Kim if (set_nid(pages[idx], i, 0, false))
108793bae099SJaegeuk Kim dn->node_changed = true;
1088e05df3b1SJaegeuk Kim }
1089e05df3b1SJaegeuk Kim
1090a225dca3Sshifei10.ge if (offset[idx + 1] == 0) {
1091e05df3b1SJaegeuk Kim dn->node_page = pages[idx];
1092e05df3b1SJaegeuk Kim dn->nid = nid[idx];
10937735730dSChao Yu err = truncate_node(dn);
10947735730dSChao Yu if (err)
10957735730dSChao Yu goto fail;
1096e05df3b1SJaegeuk Kim } else {
1097e05df3b1SJaegeuk Kim f2fs_put_page(pages[idx], 1);
1098e05df3b1SJaegeuk Kim }
1099e05df3b1SJaegeuk Kim offset[idx]++;
1100a225dca3Sshifei10.ge offset[idx + 1] = 0;
1101a225dca3Sshifei10.ge idx--;
1102e05df3b1SJaegeuk Kim fail:
1103a225dca3Sshifei10.ge for (i = idx; i >= 0; i--)
1104e05df3b1SJaegeuk Kim f2fs_put_page(pages[i], 1);
110551dd6249SNamjae Jeon
110651dd6249SNamjae Jeon trace_f2fs_truncate_partial_nodes(dn->inode, nid, depth, err);
110751dd6249SNamjae Jeon
1108e05df3b1SJaegeuk Kim return err;
1109e05df3b1SJaegeuk Kim }
1110e05df3b1SJaegeuk Kim
11110a8165d7SJaegeuk Kim /*
1112e05df3b1SJaegeuk Kim * All the block addresses of data and nodes should be nullified.
1113e05df3b1SJaegeuk Kim */
f2fs_truncate_inode_blocks(struct inode * inode,pgoff_t from)11144d57b86dSChao Yu int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from)
1115e05df3b1SJaegeuk Kim {
11164081363fSJaegeuk Kim struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1117e05df3b1SJaegeuk Kim int err = 0, cont = 1;
1118e05df3b1SJaegeuk Kim int level, offset[4], noffset[4];
11197dd690c8SJaegeuk Kim unsigned int nofs = 0;
112058bfaf44SJaegeuk Kim struct f2fs_inode *ri;
1121e05df3b1SJaegeuk Kim struct dnode_of_data dn;
1122e05df3b1SJaegeuk Kim struct page *page;
1123e05df3b1SJaegeuk Kim
112451dd6249SNamjae Jeon trace_f2fs_truncate_inode_blocks_enter(inode, from);
112551dd6249SNamjae Jeon
112681ca7350SChao Yu level = get_node_path(inode, from, offset, noffset);
11279039d835SYubo Feng if (level < 0) {
11289039d835SYubo Feng trace_f2fs_truncate_inode_blocks_exit(inode, level);
1129adb6dc19SJaegeuk Kim return level;
11309039d835SYubo Feng }
1131ff373558SJaegeuk Kim
11324d57b86dSChao Yu page = f2fs_get_node_page(sbi, inode->i_ino);
113351dd6249SNamjae Jeon if (IS_ERR(page)) {
113451dd6249SNamjae Jeon trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page));
1135e05df3b1SJaegeuk Kim return PTR_ERR(page);
113651dd6249SNamjae Jeon }
1137e05df3b1SJaegeuk Kim
1138e05df3b1SJaegeuk Kim set_new_dnode(&dn, inode, page, NULL, 0);
1139e05df3b1SJaegeuk Kim unlock_page(page);
1140e05df3b1SJaegeuk Kim
114158bfaf44SJaegeuk Kim ri = F2FS_INODE(page);
1142e05df3b1SJaegeuk Kim switch (level) {
1143e05df3b1SJaegeuk Kim case 0:
1144e05df3b1SJaegeuk Kim case 1:
1145e05df3b1SJaegeuk Kim nofs = noffset[1];
1146e05df3b1SJaegeuk Kim break;
1147e05df3b1SJaegeuk Kim case 2:
1148e05df3b1SJaegeuk Kim nofs = noffset[1];
1149e05df3b1SJaegeuk Kim if (!offset[level - 1])
1150e05df3b1SJaegeuk Kim goto skip_partial;
115158bfaf44SJaegeuk Kim err = truncate_partial_nodes(&dn, ri, offset, level);
1152e05df3b1SJaegeuk Kim if (err < 0 && err != -ENOENT)
1153e05df3b1SJaegeuk Kim goto fail;
1154e05df3b1SJaegeuk Kim nofs += 1 + NIDS_PER_BLOCK;
1155e05df3b1SJaegeuk Kim break;
1156e05df3b1SJaegeuk Kim case 3:
1157e05df3b1SJaegeuk Kim nofs = 5 + 2 * NIDS_PER_BLOCK;
1158e05df3b1SJaegeuk Kim if (!offset[level - 1])
1159e05df3b1SJaegeuk Kim goto skip_partial;
116058bfaf44SJaegeuk Kim err = truncate_partial_nodes(&dn, ri, offset, level);
1161e05df3b1SJaegeuk Kim if (err < 0 && err != -ENOENT)
1162e05df3b1SJaegeuk Kim goto fail;
1163e05df3b1SJaegeuk Kim break;
1164e05df3b1SJaegeuk Kim default:
1165e05df3b1SJaegeuk Kim BUG();
1166e05df3b1SJaegeuk Kim }
1167e05df3b1SJaegeuk Kim
1168e05df3b1SJaegeuk Kim skip_partial:
1169e05df3b1SJaegeuk Kim while (cont) {
117058bfaf44SJaegeuk Kim dn.nid = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
1171e05df3b1SJaegeuk Kim switch (offset[0]) {
1172e05df3b1SJaegeuk Kim case NODE_DIR1_BLOCK:
1173e05df3b1SJaegeuk Kim case NODE_DIR2_BLOCK:
1174e05df3b1SJaegeuk Kim err = truncate_dnode(&dn);
1175e05df3b1SJaegeuk Kim break;
1176e05df3b1SJaegeuk Kim
1177e05df3b1SJaegeuk Kim case NODE_IND1_BLOCK:
1178e05df3b1SJaegeuk Kim case NODE_IND2_BLOCK:
1179e05df3b1SJaegeuk Kim err = truncate_nodes(&dn, nofs, offset[1], 2);
1180e05df3b1SJaegeuk Kim break;
1181e05df3b1SJaegeuk Kim
1182e05df3b1SJaegeuk Kim case NODE_DIND_BLOCK:
1183e05df3b1SJaegeuk Kim err = truncate_nodes(&dn, nofs, offset[1], 3);
1184e05df3b1SJaegeuk Kim cont = 0;
1185e05df3b1SJaegeuk Kim break;
1186e05df3b1SJaegeuk Kim
1187e05df3b1SJaegeuk Kim default:
1188e05df3b1SJaegeuk Kim BUG();
1189e05df3b1SJaegeuk Kim }
1190e05df3b1SJaegeuk Kim if (err < 0 && err != -ENOENT)
1191e05df3b1SJaegeuk Kim goto fail;
1192e05df3b1SJaegeuk Kim if (offset[1] == 0 &&
119358bfaf44SJaegeuk Kim ri->i_nid[offset[0] - NODE_DIR1_BLOCK]) {
1194e05df3b1SJaegeuk Kim lock_page(page);
1195ff373558SJaegeuk Kim BUG_ON(page->mapping != NODE_MAPPING(sbi));
1196bae0ee7aSChao Yu f2fs_wait_on_page_writeback(page, NODE, true, true);
119758bfaf44SJaegeuk Kim ri->i_nid[offset[0] - NODE_DIR1_BLOCK] = 0;
1198e05df3b1SJaegeuk Kim set_page_dirty(page);
1199e05df3b1SJaegeuk Kim unlock_page(page);
1200e05df3b1SJaegeuk Kim }
1201e05df3b1SJaegeuk Kim offset[1] = 0;
1202e05df3b1SJaegeuk Kim offset[0]++;
1203e05df3b1SJaegeuk Kim nofs += err;
1204e05df3b1SJaegeuk Kim }
1205e05df3b1SJaegeuk Kim fail:
1206e05df3b1SJaegeuk Kim f2fs_put_page(page, 0);
120751dd6249SNamjae Jeon trace_f2fs_truncate_inode_blocks_exit(inode, err);
1208e05df3b1SJaegeuk Kim return err > 0 ? 0 : err;
1209e05df3b1SJaegeuk Kim }
1210e05df3b1SJaegeuk Kim
12119c77f754SJaegeuk Kim /* caller must lock inode page */
f2fs_truncate_xattr_node(struct inode * inode)12124d57b86dSChao Yu int f2fs_truncate_xattr_node(struct inode *inode)
12134f16fb0fSJaegeuk Kim {
12144081363fSJaegeuk Kim struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
12154f16fb0fSJaegeuk Kim nid_t nid = F2FS_I(inode)->i_xattr_nid;
12164f16fb0fSJaegeuk Kim struct dnode_of_data dn;
12174f16fb0fSJaegeuk Kim struct page *npage;
12187735730dSChao Yu int err;
12194f16fb0fSJaegeuk Kim
12204f16fb0fSJaegeuk Kim if (!nid)
12214f16fb0fSJaegeuk Kim return 0;
12224f16fb0fSJaegeuk Kim
12234d57b86dSChao Yu npage = f2fs_get_node_page(sbi, nid);
12244f16fb0fSJaegeuk Kim if (IS_ERR(npage))
12254f16fb0fSJaegeuk Kim return PTR_ERR(npage);
12264f16fb0fSJaegeuk Kim
12277735730dSChao Yu set_new_dnode(&dn, inode, NULL, npage, nid);
12287735730dSChao Yu err = truncate_node(&dn);
12297735730dSChao Yu if (err) {
12307735730dSChao Yu f2fs_put_page(npage, 1);
12317735730dSChao Yu return err;
12327735730dSChao Yu }
12337735730dSChao Yu
1234205b9822SJaegeuk Kim f2fs_i_xnid_write(inode, 0);
123565985d93SJaegeuk Kim
12364f16fb0fSJaegeuk Kim return 0;
12374f16fb0fSJaegeuk Kim }
12384f16fb0fSJaegeuk Kim
123939936837SJaegeuk Kim /*
12404f4124d0SChao Yu * Caller should grab and release a rwsem by calling f2fs_lock_op() and
12414f4124d0SChao Yu * f2fs_unlock_op().
124239936837SJaegeuk Kim */
f2fs_remove_inode_page(struct inode * inode)12434d57b86dSChao Yu int f2fs_remove_inode_page(struct inode *inode)
1244e05df3b1SJaegeuk Kim {
1245e05df3b1SJaegeuk Kim struct dnode_of_data dn;
124613ec7297SChao Yu int err;
1247e05df3b1SJaegeuk Kim
1248c2e69583SJaegeuk Kim set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
12494d57b86dSChao Yu err = f2fs_get_dnode_of_data(&dn, 0, LOOKUP_NODE);
125013ec7297SChao Yu if (err)
125113ec7297SChao Yu return err;
1252e05df3b1SJaegeuk Kim
12534d57b86dSChao Yu err = f2fs_truncate_xattr_node(inode);
125413ec7297SChao Yu if (err) {
1255c2e69583SJaegeuk Kim f2fs_put_dnode(&dn);
125613ec7297SChao Yu return err;
1257e05df3b1SJaegeuk Kim }
1258c2e69583SJaegeuk Kim
1259c2e69583SJaegeuk Kim /* remove potential inline_data blocks */
1260c2e69583SJaegeuk Kim if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1261c2e69583SJaegeuk Kim S_ISLNK(inode->i_mode))
12624d57b86dSChao Yu f2fs_truncate_data_blocks_range(&dn, 1);
1263c2e69583SJaegeuk Kim
1264e1c42045Sarter97 /* 0 is possible, after f2fs_new_inode() has failed */
12658d714f8aSJaegeuk Kim if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
12668d714f8aSJaegeuk Kim f2fs_put_dnode(&dn);
12678d714f8aSJaegeuk Kim return -EIO;
12688d714f8aSJaegeuk Kim }
12698b6810f8SChao Yu
12708b6810f8SChao Yu if (unlikely(inode->i_blocks != 0 && inode->i_blocks != 8)) {
1271097a7686SChao Yu f2fs_warn(F2FS_I_SB(inode),
1272097a7686SChao Yu "f2fs_remove_inode_page: inconsistent i_blocks, ino:%lu, iblocks:%llu",
1273dcbb4c10SJoe Perches inode->i_ino, (unsigned long long)inode->i_blocks);
12748b6810f8SChao Yu set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
12758b6810f8SChao Yu }
1276c2e69583SJaegeuk Kim
1277c2e69583SJaegeuk Kim /* will put inode & node pages */
12787735730dSChao Yu err = truncate_node(&dn);
12797735730dSChao Yu if (err) {
12807735730dSChao Yu f2fs_put_dnode(&dn);
12817735730dSChao Yu return err;
12827735730dSChao Yu }
128313ec7297SChao Yu return 0;
1284e05df3b1SJaegeuk Kim }
1285e05df3b1SJaegeuk Kim
f2fs_new_inode_page(struct inode * inode)12864d57b86dSChao Yu struct page *f2fs_new_inode_page(struct inode *inode)
1287e05df3b1SJaegeuk Kim {
1288e05df3b1SJaegeuk Kim struct dnode_of_data dn;
1289e05df3b1SJaegeuk Kim
1290e05df3b1SJaegeuk Kim /* allocate inode page for new inode */
1291e05df3b1SJaegeuk Kim set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
129244a83ff6SJaegeuk Kim
129344a83ff6SJaegeuk Kim /* caller should f2fs_put_page(page, 1); */
12944d57b86dSChao Yu return f2fs_new_node_page(&dn, 0);
1295e05df3b1SJaegeuk Kim }
1296e05df3b1SJaegeuk Kim
f2fs_new_node_page(struct dnode_of_data * dn,unsigned int ofs)12974d57b86dSChao Yu struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs)
1298e05df3b1SJaegeuk Kim {
12994081363fSJaegeuk Kim struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
130025cc5d3bSJaegeuk Kim struct node_info new_ni;
1301e05df3b1SJaegeuk Kim struct page *page;
1302e05df3b1SJaegeuk Kim int err;
1303e05df3b1SJaegeuk Kim
130491942321SJaegeuk Kim if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
1305e05df3b1SJaegeuk Kim return ERR_PTR(-EPERM);
1306e05df3b1SJaegeuk Kim
1307300e129cSJaegeuk Kim page = f2fs_grab_cache_page(NODE_MAPPING(sbi), dn->nid, false);
1308e05df3b1SJaegeuk Kim if (!page)
1309e05df3b1SJaegeuk Kim return ERR_PTR(-ENOMEM);
1310e05df3b1SJaegeuk Kim
13110abd675eSChao Yu if (unlikely((err = inc_valid_node_count(sbi, dn->inode, !ofs))))
13129c02740cSJaegeuk Kim goto fail;
13130abd675eSChao Yu
131425cc5d3bSJaegeuk Kim #ifdef CONFIG_F2FS_CHECK_FS
1315a9419b63SJaegeuk Kim err = f2fs_get_node_info(sbi, dn->nid, &new_ni, false);
13167735730dSChao Yu if (err) {
13177735730dSChao Yu dec_valid_node_count(sbi, dn->inode, !ofs);
13187735730dSChao Yu goto fail;
13197735730dSChao Yu }
1320141170b7SChao Yu if (unlikely(new_ni.blk_addr != NULL_ADDR)) {
1321141170b7SChao Yu err = -EFSCORRUPTED;
1322141170b7SChao Yu set_sbi_flag(sbi, SBI_NEED_FSCK);
132395fa90c9SChao Yu f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
1324141170b7SChao Yu goto fail;
1325141170b7SChao Yu }
132625cc5d3bSJaegeuk Kim #endif
132725cc5d3bSJaegeuk Kim new_ni.nid = dn->nid;
1328e05df3b1SJaegeuk Kim new_ni.ino = dn->inode->i_ino;
132925cc5d3bSJaegeuk Kim new_ni.blk_addr = NULL_ADDR;
133025cc5d3bSJaegeuk Kim new_ni.flag = 0;
133125cc5d3bSJaegeuk Kim new_ni.version = 0;
1332479f40c4SJaegeuk Kim set_node_addr(sbi, &new_ni, NEW_ADDR, false);
13339c02740cSJaegeuk Kim
1334bae0ee7aSChao Yu f2fs_wait_on_page_writeback(page, NODE, true, true);
13359c02740cSJaegeuk Kim fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true);
1336c5667575SChao Yu set_cold_node(page, S_ISDIR(dn->inode->i_mode));
1337237c0790SJaegeuk Kim if (!PageUptodate(page))
13389c02740cSJaegeuk Kim SetPageUptodate(page);
133912719ae1SJaegeuk Kim if (set_page_dirty(page))
134012719ae1SJaegeuk Kim dn->node_changed = true;
1341e05df3b1SJaegeuk Kim
13424bc8e9bcSChao Yu if (f2fs_has_xattr_block(ofs))
1343205b9822SJaegeuk Kim f2fs_i_xnid_write(dn->inode, dn->nid);
1344479bd73aSJaegeuk Kim
1345e05df3b1SJaegeuk Kim if (ofs == 0)
1346e05df3b1SJaegeuk Kim inc_valid_inode_count(sbi);
1347e05df3b1SJaegeuk Kim return page;
1348e05df3b1SJaegeuk Kim
1349e05df3b1SJaegeuk Kim fail:
135071e9fec5SJaegeuk Kim clear_node_page_dirty(page);
1351e05df3b1SJaegeuk Kim f2fs_put_page(page, 1);
1352e05df3b1SJaegeuk Kim return ERR_PTR(err);
1353e05df3b1SJaegeuk Kim }
1354e05df3b1SJaegeuk Kim
135556ae674cSJaegeuk Kim /*
135656ae674cSJaegeuk Kim * Caller should do after getting the following values.
135756ae674cSJaegeuk Kim * 0: f2fs_put_page(page, 0)
135886531d6bSJaegeuk Kim * LOCKED_PAGE or error: f2fs_put_page(page, 1)
135956ae674cSJaegeuk Kim */
read_node_page(struct page * page,blk_opf_t op_flags)13607649c873SBart Van Assche static int read_node_page(struct page *page, blk_opf_t op_flags)
1361e05df3b1SJaegeuk Kim {
13624081363fSJaegeuk Kim struct f2fs_sb_info *sbi = F2FS_P_SB(page);
1363e05df3b1SJaegeuk Kim struct node_info ni;
1364cf04e8ebSJaegeuk Kim struct f2fs_io_info fio = {
136505ca3632SJaegeuk Kim .sbi = sbi,
1366cf04e8ebSJaegeuk Kim .type = NODE,
136704d328deSMike Christie .op = REQ_OP_READ,
136804d328deSMike Christie .op_flags = op_flags,
136905ca3632SJaegeuk Kim .page = page,
13704375a336SJaegeuk Kim .encrypted_page = NULL,
1371cf04e8ebSJaegeuk Kim };
13727735730dSChao Yu int err;
1373e05df3b1SJaegeuk Kim
137454c55c4eSWeichao Guo if (PageUptodate(page)) {
1375b42b179bSChao Yu if (!f2fs_inode_chksum_verify(sbi, page)) {
1376b42b179bSChao Yu ClearPageUptodate(page);
137710f966bbSChao Yu return -EFSBADCRC;
1378b42b179bSChao Yu }
13793bdad3c7SJaegeuk Kim return LOCKED_PAGE;
138054c55c4eSWeichao Guo }
13813bdad3c7SJaegeuk Kim
1382a9419b63SJaegeuk Kim err = f2fs_get_node_info(sbi, page->index, &ni, false);
13837735730dSChao Yu if (err)
13847735730dSChao Yu return err;
1385e05df3b1SJaegeuk Kim
1386b7ec2061SJaegeuk Kim /* NEW_ADDR can be seen, after cp_error drops some dirty node pages */
1387e6ecb142SJaegeuk Kim if (unlikely(ni.blk_addr == NULL_ADDR || ni.blk_addr == NEW_ADDR)) {
13882bca1e23SJaegeuk Kim ClearPageUptodate(page);
1389e05df3b1SJaegeuk Kim return -ENOENT;
1390393ff91fSJaegeuk Kim }
1391393ff91fSJaegeuk Kim
13927a9d7548SChao Yu fio.new_blkaddr = fio.old_blkaddr = ni.blk_addr;
13938b83ac81SChao Yu
13948b83ac81SChao Yu err = f2fs_submit_page_bio(&fio);
13958b83ac81SChao Yu
13968b83ac81SChao Yu if (!err)
139734a23525SChao Yu f2fs_update_iostat(sbi, NULL, FS_NODE_READ_IO, F2FS_BLKSIZE);
13988b83ac81SChao Yu
13998b83ac81SChao Yu return err;
1400e05df3b1SJaegeuk Kim }
1401e05df3b1SJaegeuk Kim
14020a8165d7SJaegeuk Kim /*
1403e05df3b1SJaegeuk Kim * Readahead a node page
1404e05df3b1SJaegeuk Kim */
f2fs_ra_node_page(struct f2fs_sb_info * sbi,nid_t nid)14054d57b86dSChao Yu void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
1406e05df3b1SJaegeuk Kim {
1407e05df3b1SJaegeuk Kim struct page *apage;
140856ae674cSJaegeuk Kim int err;
1409e05df3b1SJaegeuk Kim
1410e8458725SChao Yu if (!nid)
1411e8458725SChao Yu return;
14124d57b86dSChao Yu if (f2fs_check_nid_range(sbi, nid))
1413a4f843bdSJaegeuk Kim return;
1414e8458725SChao Yu
14155ec2d99dSMatthew Wilcox apage = xa_load(&NODE_MAPPING(sbi)->i_pages, nid);
1416999270deSFan Li if (apage)
1417393ff91fSJaegeuk Kim return;
1418e05df3b1SJaegeuk Kim
1419300e129cSJaegeuk Kim apage = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false);
1420e05df3b1SJaegeuk Kim if (!apage)
1421e05df3b1SJaegeuk Kim return;
1422e05df3b1SJaegeuk Kim
142370246286SChristoph Hellwig err = read_node_page(apage, REQ_RAHEAD);
142486531d6bSJaegeuk Kim f2fs_put_page(apage, err ? 1 : 0);
1425e05df3b1SJaegeuk Kim }
1426e05df3b1SJaegeuk Kim
__get_node_page(struct f2fs_sb_info * sbi,pgoff_t nid,struct page * parent,int start)142717a0ee55SJaegeuk Kim static struct page *__get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid,
14280e022ea8SChao Yu struct page *parent, int start)
1429e05df3b1SJaegeuk Kim {
143056ae674cSJaegeuk Kim struct page *page;
143156ae674cSJaegeuk Kim int err;
14324aa69d56SJaegeuk Kim
14334aa69d56SJaegeuk Kim if (!nid)
14344aa69d56SJaegeuk Kim return ERR_PTR(-ENOENT);
14354d57b86dSChao Yu if (f2fs_check_nid_range(sbi, nid))
1436a4f843bdSJaegeuk Kim return ERR_PTR(-EINVAL);
1437afcb7ca0SJaegeuk Kim repeat:
1438300e129cSJaegeuk Kim page = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false);
1439e05df3b1SJaegeuk Kim if (!page)
1440e05df3b1SJaegeuk Kim return ERR_PTR(-ENOMEM);
1441e05df3b1SJaegeuk Kim
144270fd7614SChristoph Hellwig err = read_node_page(page, 0);
144386531d6bSJaegeuk Kim if (err < 0) {
1444a7b8618aSJaegeuk Kim goto out_put_err;
1445e1c51b9fSChao Yu } else if (err == LOCKED_PAGE) {
14461f258ec1SChao Yu err = 0;
1447e1c51b9fSChao Yu goto page_hit;
144886531d6bSJaegeuk Kim }
1449aaf96075SJaegeuk Kim
14500e022ea8SChao Yu if (parent)
14514d57b86dSChao Yu f2fs_ra_node_pages(parent, start + 1, MAX_RA_NODE);
14520e022ea8SChao Yu
1453e1c51b9fSChao Yu lock_page(page);
1454e1c51b9fSChao Yu
14554ef51a8fSJaegeuk Kim if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1456afcb7ca0SJaegeuk Kim f2fs_put_page(page, 1);
1457afcb7ca0SJaegeuk Kim goto repeat;
1458afcb7ca0SJaegeuk Kim }
14591563ac75SChao Yu
14601f258ec1SChao Yu if (unlikely(!PageUptodate(page))) {
14611f258ec1SChao Yu err = -EIO;
14621563ac75SChao Yu goto out_err;
14631f258ec1SChao Yu }
1464704956ecSChao Yu
1465704956ecSChao Yu if (!f2fs_inode_chksum_verify(sbi, page)) {
146610f966bbSChao Yu err = -EFSBADCRC;
1467704956ecSChao Yu goto out_err;
1468704956ecSChao Yu }
1469e1c51b9fSChao Yu page_hit:
1470a7b8618aSJaegeuk Kim if (likely(nid == nid_of_node(page)))
1471a7b8618aSJaegeuk Kim return page;
1472a7b8618aSJaegeuk Kim
1473dcbb4c10SJoe Perches f2fs_warn(sbi, "inconsistent node block, nid:%lu, node_footer[nid:%u,ino:%u,ofs:%u,cpver:%llu,blkaddr:%u]",
14741f258ec1SChao Yu nid, nid_of_node(page), ino_of_node(page),
14751f258ec1SChao Yu ofs_of_node(page), cpver_of_node(page),
14761f258ec1SChao Yu next_blkaddr_of_node(page));
14776663b138SWeichao Guo set_sbi_flag(sbi, SBI_NEED_FSCK);
1478b7b01c78SZhiguo Niu f2fs_handle_error(sbi, ERROR_INCONSISTENT_FOOTER);
1479b7b01c78SZhiguo Niu err = -EFSCORRUPTED;
14800c9df7fbSYunlong Song out_err:
1481ee605234SJaegeuk Kim ClearPageUptodate(page);
1482a7b8618aSJaegeuk Kim out_put_err:
148382c7863eSJaegeuk Kim /* ENOENT comes from read_node_page which is not an error. */
148482c7863eSJaegeuk Kim if (err != -ENOENT)
1485a7b8618aSJaegeuk Kim f2fs_handle_page_eio(sbi, page->index, NODE);
14860c9df7fbSYunlong Song f2fs_put_page(page, 1);
14871f258ec1SChao Yu return ERR_PTR(err);
14880c9df7fbSYunlong Song }
1489e05df3b1SJaegeuk Kim
f2fs_get_node_page(struct f2fs_sb_info * sbi,pgoff_t nid)14904d57b86dSChao Yu struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
14910e022ea8SChao Yu {
14920e022ea8SChao Yu return __get_node_page(sbi, nid, NULL, 0);
14930e022ea8SChao Yu }
14940e022ea8SChao Yu
f2fs_get_node_page_ra(struct page * parent,int start)14954d57b86dSChao Yu struct page *f2fs_get_node_page_ra(struct page *parent, int start)
1496e05df3b1SJaegeuk Kim {
14974081363fSJaegeuk Kim struct f2fs_sb_info *sbi = F2FS_P_SB(parent);
14980e022ea8SChao Yu nid_t nid = get_nid(parent, start, false);
1499e05df3b1SJaegeuk Kim
15000e022ea8SChao Yu return __get_node_page(sbi, nid, parent, start);
1501e05df3b1SJaegeuk Kim }
1502e05df3b1SJaegeuk Kim
flush_inline_data(struct f2fs_sb_info * sbi,nid_t ino)15032049d4fcSJaegeuk Kim static void flush_inline_data(struct f2fs_sb_info *sbi, nid_t ino)
15042049d4fcSJaegeuk Kim {
15052049d4fcSJaegeuk Kim struct inode *inode;
15062049d4fcSJaegeuk Kim struct page *page;
15070f3311a8SChao Yu int ret;
15082049d4fcSJaegeuk Kim
15092049d4fcSJaegeuk Kim /* should flush inline_data before evict_inode */
15102049d4fcSJaegeuk Kim inode = ilookup(sbi->sb, ino);
15112049d4fcSJaegeuk Kim if (!inode)
15122049d4fcSJaegeuk Kim return;
15132049d4fcSJaegeuk Kim
151401eccef7SChao Yu page = f2fs_pagecache_get_page(inode->i_mapping, 0,
151501eccef7SChao Yu FGP_LOCK|FGP_NOWAIT, 0);
15162049d4fcSJaegeuk Kim if (!page)
15172049d4fcSJaegeuk Kim goto iput_out;
15182049d4fcSJaegeuk Kim
15192049d4fcSJaegeuk Kim if (!PageUptodate(page))
15202049d4fcSJaegeuk Kim goto page_out;
15212049d4fcSJaegeuk Kim
15222049d4fcSJaegeuk Kim if (!PageDirty(page))
15232049d4fcSJaegeuk Kim goto page_out;
15242049d4fcSJaegeuk Kim
15252049d4fcSJaegeuk Kim if (!clear_page_dirty_for_io(page))
15262049d4fcSJaegeuk Kim goto page_out;
15272049d4fcSJaegeuk Kim
15280f3311a8SChao Yu ret = f2fs_write_inline_data(inode, page);
15292049d4fcSJaegeuk Kim inode_dec_dirty_pages(inode);
15304d57b86dSChao Yu f2fs_remove_dirty_inode(inode);
15310f3311a8SChao Yu if (ret)
15322049d4fcSJaegeuk Kim set_page_dirty(page);
15332049d4fcSJaegeuk Kim page_out:
15344a6de50dSJaegeuk Kim f2fs_put_page(page, 1);
15352049d4fcSJaegeuk Kim iput_out:
15362049d4fcSJaegeuk Kim iput(inode);
15372049d4fcSJaegeuk Kim }
15382049d4fcSJaegeuk Kim
last_fsync_dnode(struct f2fs_sb_info * sbi,nid_t ino)1539608514deSJaegeuk Kim static struct page *last_fsync_dnode(struct f2fs_sb_info *sbi, nid_t ino)
1540e05df3b1SJaegeuk Kim {
1541028a63a6SJan Kara pgoff_t index;
15424f4a4f0fSVishal Moola (Oracle) struct folio_batch fbatch;
1543608514deSJaegeuk Kim struct page *last_page = NULL;
15444f4a4f0fSVishal Moola (Oracle) int nr_folios;
154552681375SJaegeuk Kim
15464f4a4f0fSVishal Moola (Oracle) folio_batch_init(&fbatch);
154752681375SJaegeuk Kim index = 0;
154852681375SJaegeuk Kim
15494f4a4f0fSVishal Moola (Oracle) while ((nr_folios = filemap_get_folios_tag(NODE_MAPPING(sbi), &index,
15504f4a4f0fSVishal Moola (Oracle) (pgoff_t)-1, PAGECACHE_TAG_DIRTY,
15514f4a4f0fSVishal Moola (Oracle) &fbatch))) {
1552028a63a6SJan Kara int i;
155352681375SJaegeuk Kim
15544f4a4f0fSVishal Moola (Oracle) for (i = 0; i < nr_folios; i++) {
15554f4a4f0fSVishal Moola (Oracle) struct page *page = &fbatch.folios[i]->page;
155652681375SJaegeuk Kim
155752681375SJaegeuk Kim if (unlikely(f2fs_cp_error(sbi))) {
1558608514deSJaegeuk Kim f2fs_put_page(last_page, 0);
15594f4a4f0fSVishal Moola (Oracle) folio_batch_release(&fbatch);
1560608514deSJaegeuk Kim return ERR_PTR(-EIO);
156152681375SJaegeuk Kim }
156252681375SJaegeuk Kim
156352681375SJaegeuk Kim if (!IS_DNODE(page) || !is_cold_node(page))
156452681375SJaegeuk Kim continue;
156552681375SJaegeuk Kim if (ino_of_node(page) != ino)
156652681375SJaegeuk Kim continue;
156752681375SJaegeuk Kim
156852681375SJaegeuk Kim lock_page(page);
156952681375SJaegeuk Kim
157052681375SJaegeuk Kim if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
157152681375SJaegeuk Kim continue_unlock:
157252681375SJaegeuk Kim unlock_page(page);
157352681375SJaegeuk Kim continue;
157452681375SJaegeuk Kim }
157552681375SJaegeuk Kim if (ino_of_node(page) != ino)
157652681375SJaegeuk Kim goto continue_unlock;
157752681375SJaegeuk Kim
157852681375SJaegeuk Kim if (!PageDirty(page)) {
157952681375SJaegeuk Kim /* someone wrote it for us */
158052681375SJaegeuk Kim goto continue_unlock;
158152681375SJaegeuk Kim }
158252681375SJaegeuk Kim
1583608514deSJaegeuk Kim if (last_page)
1584608514deSJaegeuk Kim f2fs_put_page(last_page, 0);
1585608514deSJaegeuk Kim
1586608514deSJaegeuk Kim get_page(page);
1587608514deSJaegeuk Kim last_page = page;
1588608514deSJaegeuk Kim unlock_page(page);
1589608514deSJaegeuk Kim }
15904f4a4f0fSVishal Moola (Oracle) folio_batch_release(&fbatch);
1591608514deSJaegeuk Kim cond_resched();
1592608514deSJaegeuk Kim }
1593608514deSJaegeuk Kim return last_page;
1594608514deSJaegeuk Kim }
1595608514deSJaegeuk Kim
__write_node_page(struct page * page,bool atomic,bool * submitted,struct writeback_control * wbc,bool do_balance,enum iostat_type io_type,unsigned int * seq_id)1596d68f735bSJaegeuk Kim static int __write_node_page(struct page *page, bool atomic, bool *submitted,
1597b0af6d49SChao Yu struct writeback_control *wbc, bool do_balance,
159850fa53ecSChao Yu enum iostat_type io_type, unsigned int *seq_id)
1599faa24895SJaegeuk Kim {
1600faa24895SJaegeuk Kim struct f2fs_sb_info *sbi = F2FS_P_SB(page);
1601faa24895SJaegeuk Kim nid_t nid;
1602faa24895SJaegeuk Kim struct node_info ni;
1603faa24895SJaegeuk Kim struct f2fs_io_info fio = {
1604faa24895SJaegeuk Kim .sbi = sbi,
160539d787beSChao Yu .ino = ino_of_node(page),
1606faa24895SJaegeuk Kim .type = NODE,
1607faa24895SJaegeuk Kim .op = REQ_OP_WRITE,
1608faa24895SJaegeuk Kim .op_flags = wbc_to_write_flags(wbc),
1609faa24895SJaegeuk Kim .page = page,
1610faa24895SJaegeuk Kim .encrypted_page = NULL,
16112eae077eSChao Yu .submitted = 0,
1612b0af6d49SChao Yu .io_type = io_type,
1613578c6478SYufen Yu .io_wbc = wbc,
1614faa24895SJaegeuk Kim };
161550fa53ecSChao Yu unsigned int seq;
1616faa24895SJaegeuk Kim
1617faa24895SJaegeuk Kim trace_f2fs_writepage(page, NODE);
1618faa24895SJaegeuk Kim
16196d7c865cSJaegeuk Kim if (unlikely(f2fs_cp_error(sbi))) {
1620b62e71beSChao Yu /* keep node pages in remount-ro mode */
1621b62e71beSChao Yu if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_READONLY)
1622b62e71beSChao Yu goto redirty_out;
16236d7c865cSJaegeuk Kim ClearPageUptodate(page);
16246d7c865cSJaegeuk Kim dec_page_count(sbi, F2FS_DIRTY_NODES);
16256d7c865cSJaegeuk Kim unlock_page(page);
16266d7c865cSJaegeuk Kim return 0;
16276d7c865cSJaegeuk Kim }
1628db198ae0SChao Yu
1629faa24895SJaegeuk Kim if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1630faa24895SJaegeuk Kim goto redirty_out;
1631faa24895SJaegeuk Kim
1632100c0655SJaegeuk Kim if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
1633100c0655SJaegeuk Kim wbc->sync_mode == WB_SYNC_NONE &&
1634fd8c8cafSChao Yu IS_DNODE(page) && is_cold_node(page))
1635fd8c8cafSChao Yu goto redirty_out;
1636fd8c8cafSChao Yu
1637faa24895SJaegeuk Kim /* get old block addr of this node page */
1638faa24895SJaegeuk Kim nid = nid_of_node(page);
1639faa24895SJaegeuk Kim f2fs_bug_on(sbi, page->index != nid);
1640faa24895SJaegeuk Kim
1641a9419b63SJaegeuk Kim if (f2fs_get_node_info(sbi, nid, &ni, !do_balance))
16427735730dSChao Yu goto redirty_out;
16437735730dSChao Yu
1644faa24895SJaegeuk Kim if (wbc->for_reclaim) {
1645e4544b63STim Murray if (!f2fs_down_read_trylock(&sbi->node_write))
1646faa24895SJaegeuk Kim goto redirty_out;
1647faa24895SJaegeuk Kim } else {
1648e4544b63STim Murray f2fs_down_read(&sbi->node_write);
1649faa24895SJaegeuk Kim }
1650faa24895SJaegeuk Kim
1651faa24895SJaegeuk Kim /* This page is already truncated */
1652faa24895SJaegeuk Kim if (unlikely(ni.blk_addr == NULL_ADDR)) {
1653faa24895SJaegeuk Kim ClearPageUptodate(page);
1654faa24895SJaegeuk Kim dec_page_count(sbi, F2FS_DIRTY_NODES);
1655e4544b63STim Murray f2fs_up_read(&sbi->node_write);
1656faa24895SJaegeuk Kim unlock_page(page);
1657faa24895SJaegeuk Kim return 0;
1658faa24895SJaegeuk Kim }
1659faa24895SJaegeuk Kim
1660c9b60788SChao Yu if (__is_valid_data_blkaddr(ni.blk_addr) &&
166193770ab7SChao Yu !f2fs_is_valid_blkaddr(sbi, ni.blk_addr,
166293770ab7SChao Yu DATA_GENERIC_ENHANCE)) {
1663e4544b63STim Murray f2fs_up_read(&sbi->node_write);
1664c9b60788SChao Yu goto redirty_out;
166589d13c38SJaegeuk Kim }
1666c9b60788SChao Yu
1667c550e25bSJaegeuk Kim if (atomic && !test_opt(sbi, NOBARRIER) && !f2fs_sb_has_blkzoned(sbi))
1668e7c75ab0SJaegeuk Kim fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
1669e7c75ab0SJaegeuk Kim
1670dc5a9412SChao Yu /* should add to global list before clearing PAGECACHE status */
167150fa53ecSChao Yu if (f2fs_in_warm_node_list(sbi, page)) {
167250fa53ecSChao Yu seq = f2fs_add_fsync_node_entry(sbi, page);
167350fa53ecSChao Yu if (seq_id)
167450fa53ecSChao Yu *seq_id = seq;
167550fa53ecSChao Yu }
167650fa53ecSChao Yu
1677dc5a9412SChao Yu set_page_writeback(page);
1678dc5a9412SChao Yu
1679faa24895SJaegeuk Kim fio.old_blkaddr = ni.blk_addr;
16804d57b86dSChao Yu f2fs_do_write_node_page(nid, &fio);
1681faa24895SJaegeuk Kim set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(page));
1682faa24895SJaegeuk Kim dec_page_count(sbi, F2FS_DIRTY_NODES);
1683e4544b63STim Murray f2fs_up_read(&sbi->node_write);
1684faa24895SJaegeuk Kim
1685d68f735bSJaegeuk Kim if (wbc->for_reclaim) {
1686bab475c5SChao Yu f2fs_submit_merged_write_cond(sbi, NULL, page, 0, NODE);
1687d68f735bSJaegeuk Kim submitted = NULL;
1688d68f735bSJaegeuk Kim }
1689faa24895SJaegeuk Kim
1690faa24895SJaegeuk Kim unlock_page(page);
1691faa24895SJaegeuk Kim
1692d68f735bSJaegeuk Kim if (unlikely(f2fs_cp_error(sbi))) {
1693b9109b0eSJaegeuk Kim f2fs_submit_merged_write(sbi, NODE);
1694d68f735bSJaegeuk Kim submitted = NULL;
1695d68f735bSJaegeuk Kim }
1696d68f735bSJaegeuk Kim if (submitted)
1697d68f735bSJaegeuk Kim *submitted = fio.submitted;
1698faa24895SJaegeuk Kim
1699401db79fSYunlong Song if (do_balance)
1700401db79fSYunlong Song f2fs_balance_fs(sbi, false);
1701faa24895SJaegeuk Kim return 0;
1702faa24895SJaegeuk Kim
1703faa24895SJaegeuk Kim redirty_out:
1704faa24895SJaegeuk Kim redirty_page_for_writepage(wbc, page);
1705faa24895SJaegeuk Kim return AOP_WRITEPAGE_ACTIVATE;
1706faa24895SJaegeuk Kim }
1707faa24895SJaegeuk Kim
f2fs_move_node_page(struct page * node_page,int gc_type)170848018b4cSChao Yu int f2fs_move_node_page(struct page *node_page, int gc_type)
1709f15194fcSYunlei He {
171048018b4cSChao Yu int err = 0;
171148018b4cSChao Yu
1712f15194fcSYunlei He if (gc_type == FG_GC) {
1713f15194fcSYunlei He struct writeback_control wbc = {
1714f15194fcSYunlei He .sync_mode = WB_SYNC_ALL,
1715f15194fcSYunlei He .nr_to_write = 1,
1716f15194fcSYunlei He .for_reclaim = 0,
1717f15194fcSYunlei He };
1718f15194fcSYunlei He
1719bae0ee7aSChao Yu f2fs_wait_on_page_writeback(node_page, NODE, true, true);
17208d64d365SChao Yu
17218d64d365SChao Yu set_page_dirty(node_page);
17228d64d365SChao Yu
172348018b4cSChao Yu if (!clear_page_dirty_for_io(node_page)) {
172448018b4cSChao Yu err = -EAGAIN;
1725f15194fcSYunlei He goto out_page;
172648018b4cSChao Yu }
1727f15194fcSYunlei He
1728f15194fcSYunlei He if (__write_node_page(node_page, false, NULL,
172948018b4cSChao Yu &wbc, false, FS_GC_NODE_IO, NULL)) {
173048018b4cSChao Yu err = -EAGAIN;
1731f15194fcSYunlei He unlock_page(node_page);
173248018b4cSChao Yu }
1733f15194fcSYunlei He goto release_page;
1734f15194fcSYunlei He } else {
1735f15194fcSYunlei He /* set page dirty and write it */
1736f15194fcSYunlei He if (!PageWriteback(node_page))
1737f15194fcSYunlei He set_page_dirty(node_page);
1738f15194fcSYunlei He }
1739f15194fcSYunlei He out_page:
1740f15194fcSYunlei He unlock_page(node_page);
1741f15194fcSYunlei He release_page:
1742f15194fcSYunlei He f2fs_put_page(node_page, 0);
174348018b4cSChao Yu return err;
1744f15194fcSYunlei He }
1745f15194fcSYunlei He
f2fs_write_node_page(struct page * page,struct writeback_control * wbc)1746faa24895SJaegeuk Kim static int f2fs_write_node_page(struct page *page,
1747faa24895SJaegeuk Kim struct writeback_control *wbc)
1748faa24895SJaegeuk Kim {
174950fa53ecSChao Yu return __write_node_page(page, false, NULL, wbc, false,
175050fa53ecSChao Yu FS_NODE_IO, NULL);
1751faa24895SJaegeuk Kim }
1752faa24895SJaegeuk Kim
f2fs_fsync_node_pages(struct f2fs_sb_info * sbi,struct inode * inode,struct writeback_control * wbc,bool atomic,unsigned int * seq_id)17534d57b86dSChao Yu int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
175450fa53ecSChao Yu struct writeback_control *wbc, bool atomic,
175550fa53ecSChao Yu unsigned int *seq_id)
1756608514deSJaegeuk Kim {
1757028a63a6SJan Kara pgoff_t index;
1758e6e46e1eSVishal Moola (Oracle) struct folio_batch fbatch;
1759608514deSJaegeuk Kim int ret = 0;
1760608514deSJaegeuk Kim struct page *last_page = NULL;
1761608514deSJaegeuk Kim bool marked = false;
176226de9b11SJaegeuk Kim nid_t ino = inode->i_ino;
1763e6e46e1eSVishal Moola (Oracle) int nr_folios;
1764bab475c5SChao Yu int nwritten = 0;
1765608514deSJaegeuk Kim
1766608514deSJaegeuk Kim if (atomic) {
1767608514deSJaegeuk Kim last_page = last_fsync_dnode(sbi, ino);
1768608514deSJaegeuk Kim if (IS_ERR_OR_NULL(last_page))
1769608514deSJaegeuk Kim return PTR_ERR_OR_ZERO(last_page);
1770608514deSJaegeuk Kim }
1771608514deSJaegeuk Kim retry:
1772e6e46e1eSVishal Moola (Oracle) folio_batch_init(&fbatch);
1773608514deSJaegeuk Kim index = 0;
1774608514deSJaegeuk Kim
1775e6e46e1eSVishal Moola (Oracle) while ((nr_folios = filemap_get_folios_tag(NODE_MAPPING(sbi), &index,
1776e6e46e1eSVishal Moola (Oracle) (pgoff_t)-1, PAGECACHE_TAG_DIRTY,
1777e6e46e1eSVishal Moola (Oracle) &fbatch))) {
1778028a63a6SJan Kara int i;
1779608514deSJaegeuk Kim
1780e6e46e1eSVishal Moola (Oracle) for (i = 0; i < nr_folios; i++) {
1781e6e46e1eSVishal Moola (Oracle) struct page *page = &fbatch.folios[i]->page;
1782d68f735bSJaegeuk Kim bool submitted = false;
1783608514deSJaegeuk Kim
1784608514deSJaegeuk Kim if (unlikely(f2fs_cp_error(sbi))) {
1785608514deSJaegeuk Kim f2fs_put_page(last_page, 0);
1786e6e46e1eSVishal Moola (Oracle) folio_batch_release(&fbatch);
17879de69279SChao Yu ret = -EIO;
17889de69279SChao Yu goto out;
1789608514deSJaegeuk Kim }
1790608514deSJaegeuk Kim
1791608514deSJaegeuk Kim if (!IS_DNODE(page) || !is_cold_node(page))
1792608514deSJaegeuk Kim continue;
1793608514deSJaegeuk Kim if (ino_of_node(page) != ino)
1794608514deSJaegeuk Kim continue;
1795608514deSJaegeuk Kim
1796608514deSJaegeuk Kim lock_page(page);
1797608514deSJaegeuk Kim
1798608514deSJaegeuk Kim if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1799608514deSJaegeuk Kim continue_unlock:
1800608514deSJaegeuk Kim unlock_page(page);
1801608514deSJaegeuk Kim continue;
1802608514deSJaegeuk Kim }
1803608514deSJaegeuk Kim if (ino_of_node(page) != ino)
180452681375SJaegeuk Kim goto continue_unlock;
180552681375SJaegeuk Kim
1806608514deSJaegeuk Kim if (!PageDirty(page) && page != last_page) {
1807608514deSJaegeuk Kim /* someone wrote it for us */
1808608514deSJaegeuk Kim goto continue_unlock;
1809608514deSJaegeuk Kim }
1810608514deSJaegeuk Kim
1811bae0ee7aSChao Yu f2fs_wait_on_page_writeback(page, NODE, true, true);
1812608514deSJaegeuk Kim
1813d29fd172SJaegeuk Kim set_fsync_mark(page, 0);
1814d29fd172SJaegeuk Kim set_dentry_mark(page, 0);
1815d29fd172SJaegeuk Kim
1816608514deSJaegeuk Kim if (!atomic || page == last_page) {
181752681375SJaegeuk Kim set_fsync_mark(page, 1);
181847c8ebccSJaegeuk Kim percpu_counter_inc(&sbi->rf_node_block_count);
181926de9b11SJaegeuk Kim if (IS_INODE(page)) {
182026de9b11SJaegeuk Kim if (is_inode_flag_set(inode,
182126de9b11SJaegeuk Kim FI_DIRTY_INODE))
18224d57b86dSChao Yu f2fs_update_inode(inode, page);
182352681375SJaegeuk Kim set_dentry_mark(page,
18244d57b86dSChao Yu f2fs_need_dentry_mark(sbi, ino));
182526de9b11SJaegeuk Kim }
1826608514deSJaegeuk Kim /* may be written by other thread */
1827608514deSJaegeuk Kim if (!PageDirty(page))
1828608514deSJaegeuk Kim set_page_dirty(page);
1829608514deSJaegeuk Kim }
1830608514deSJaegeuk Kim
1831608514deSJaegeuk Kim if (!clear_page_dirty_for_io(page))
1832608514deSJaegeuk Kim goto continue_unlock;
183352681375SJaegeuk Kim
1834e7c75ab0SJaegeuk Kim ret = __write_node_page(page, atomic &&
1835d68f735bSJaegeuk Kim page == last_page,
1836b0af6d49SChao Yu &submitted, wbc, true,
183750fa53ecSChao Yu FS_NODE_IO, seq_id);
1838c267ec15SJaegeuk Kim if (ret) {
183952681375SJaegeuk Kim unlock_page(page);
1840608514deSJaegeuk Kim f2fs_put_page(last_page, 0);
1841608514deSJaegeuk Kim break;
1842d68f735bSJaegeuk Kim } else if (submitted) {
1843bab475c5SChao Yu nwritten++;
1844608514deSJaegeuk Kim }
18453f5f4959SChao Yu
1846608514deSJaegeuk Kim if (page == last_page) {
1847608514deSJaegeuk Kim f2fs_put_page(page, 0);
1848608514deSJaegeuk Kim marked = true;
184952681375SJaegeuk Kim break;
185052681375SJaegeuk Kim }
1851c267ec15SJaegeuk Kim }
1852e6e46e1eSVishal Moola (Oracle) folio_batch_release(&fbatch);
185352681375SJaegeuk Kim cond_resched();
185452681375SJaegeuk Kim
1855608514deSJaegeuk Kim if (ret || marked)
185652681375SJaegeuk Kim break;
185752681375SJaegeuk Kim }
1858608514deSJaegeuk Kim if (!ret && atomic && !marked) {
1859dcbb4c10SJoe Perches f2fs_debug(sbi, "Retry to write fsync mark: ino=%u, idx=%lx",
1860608514deSJaegeuk Kim ino, last_page->index);
1861608514deSJaegeuk Kim lock_page(last_page);
1862bae0ee7aSChao Yu f2fs_wait_on_page_writeback(last_page, NODE, true, true);
1863608514deSJaegeuk Kim set_page_dirty(last_page);
1864608514deSJaegeuk Kim unlock_page(last_page);
1865608514deSJaegeuk Kim goto retry;
1866608514deSJaegeuk Kim }
18679de69279SChao Yu out:
1868bab475c5SChao Yu if (nwritten)
1869bab475c5SChao Yu f2fs_submit_merged_write_cond(sbi, NULL, NULL, ino, NODE);
1870c267ec15SJaegeuk Kim return ret ? -EIO : 0;
187152681375SJaegeuk Kim }
187252681375SJaegeuk Kim
f2fs_match_ino(struct inode * inode,unsigned long ino,void * data)1873052a82d8SChao Yu static int f2fs_match_ino(struct inode *inode, unsigned long ino, void *data)
1874052a82d8SChao Yu {
1875052a82d8SChao Yu struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1876052a82d8SChao Yu bool clean;
1877052a82d8SChao Yu
1878052a82d8SChao Yu if (inode->i_ino != ino)
1879052a82d8SChao Yu return 0;
1880052a82d8SChao Yu
1881052a82d8SChao Yu if (!is_inode_flag_set(inode, FI_DIRTY_INODE))
1882052a82d8SChao Yu return 0;
1883052a82d8SChao Yu
1884052a82d8SChao Yu spin_lock(&sbi->inode_lock[DIRTY_META]);
1885052a82d8SChao Yu clean = list_empty(&F2FS_I(inode)->gdirty_list);
1886052a82d8SChao Yu spin_unlock(&sbi->inode_lock[DIRTY_META]);
1887052a82d8SChao Yu
1888052a82d8SChao Yu if (clean)
1889052a82d8SChao Yu return 0;
1890052a82d8SChao Yu
1891052a82d8SChao Yu inode = igrab(inode);
1892052a82d8SChao Yu if (!inode)
1893052a82d8SChao Yu return 0;
1894052a82d8SChao Yu return 1;
1895052a82d8SChao Yu }
1896052a82d8SChao Yu
flush_dirty_inode(struct page * page)1897052a82d8SChao Yu static bool flush_dirty_inode(struct page *page)
1898052a82d8SChao Yu {
1899052a82d8SChao Yu struct f2fs_sb_info *sbi = F2FS_P_SB(page);
1900052a82d8SChao Yu struct inode *inode;
1901052a82d8SChao Yu nid_t ino = ino_of_node(page);
1902052a82d8SChao Yu
1903052a82d8SChao Yu inode = find_inode_nowait(sbi->sb, ino, f2fs_match_ino, NULL);
1904052a82d8SChao Yu if (!inode)
1905052a82d8SChao Yu return false;
1906052a82d8SChao Yu
1907052a82d8SChao Yu f2fs_update_inode(inode, page);
1908052a82d8SChao Yu unlock_page(page);
1909052a82d8SChao Yu
1910052a82d8SChao Yu iput(inode);
1911052a82d8SChao Yu return true;
1912052a82d8SChao Yu }
1913052a82d8SChao Yu
f2fs_flush_inline_data(struct f2fs_sb_info * sbi)191468e79bafSJia Yang void f2fs_flush_inline_data(struct f2fs_sb_info *sbi)
191534c061adSSayali Lokhande {
191634c061adSSayali Lokhande pgoff_t index = 0;
1917a40a4ad1SVishal Moola (Oracle) struct folio_batch fbatch;
1918a40a4ad1SVishal Moola (Oracle) int nr_folios;
191934c061adSSayali Lokhande
1920a40a4ad1SVishal Moola (Oracle) folio_batch_init(&fbatch);
192134c061adSSayali Lokhande
1922a40a4ad1SVishal Moola (Oracle) while ((nr_folios = filemap_get_folios_tag(NODE_MAPPING(sbi), &index,
1923a40a4ad1SVishal Moola (Oracle) (pgoff_t)-1, PAGECACHE_TAG_DIRTY,
1924a40a4ad1SVishal Moola (Oracle) &fbatch))) {
192534c061adSSayali Lokhande int i;
192634c061adSSayali Lokhande
1927a40a4ad1SVishal Moola (Oracle) for (i = 0; i < nr_folios; i++) {
1928a40a4ad1SVishal Moola (Oracle) struct page *page = &fbatch.folios[i]->page;
192934c061adSSayali Lokhande
193034c061adSSayali Lokhande if (!IS_DNODE(page))
193134c061adSSayali Lokhande continue;
193234c061adSSayali Lokhande
193334c061adSSayali Lokhande lock_page(page);
193434c061adSSayali Lokhande
193534c061adSSayali Lokhande if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
193634c061adSSayali Lokhande continue_unlock:
193734c061adSSayali Lokhande unlock_page(page);
193834c061adSSayali Lokhande continue;
193934c061adSSayali Lokhande }
194034c061adSSayali Lokhande
194134c061adSSayali Lokhande if (!PageDirty(page)) {
194234c061adSSayali Lokhande /* someone wrote it for us */
194334c061adSSayali Lokhande goto continue_unlock;
194434c061adSSayali Lokhande }
194534c061adSSayali Lokhande
194634c061adSSayali Lokhande /* flush inline_data, if it's async context. */
1947b763f3beSChao Yu if (page_private_inline(page)) {
1948b763f3beSChao Yu clear_page_private_inline(page);
194934c061adSSayali Lokhande unlock_page(page);
195034c061adSSayali Lokhande flush_inline_data(sbi, ino_of_node(page));
195134c061adSSayali Lokhande continue;
195234c061adSSayali Lokhande }
195334c061adSSayali Lokhande unlock_page(page);
195434c061adSSayali Lokhande }
1955a40a4ad1SVishal Moola (Oracle) folio_batch_release(&fbatch);
195634c061adSSayali Lokhande cond_resched();
195734c061adSSayali Lokhande }
195834c061adSSayali Lokhande }
195934c061adSSayali Lokhande
f2fs_sync_node_pages(struct f2fs_sb_info * sbi,struct writeback_control * wbc,bool do_balance,enum iostat_type io_type)19604d57b86dSChao Yu int f2fs_sync_node_pages(struct f2fs_sb_info *sbi,
19614d57b86dSChao Yu struct writeback_control *wbc,
1962b0af6d49SChao Yu bool do_balance, enum iostat_type io_type)
196352681375SJaegeuk Kim {
1964028a63a6SJan Kara pgoff_t index;
19657525486aSVishal Moola (Oracle) struct folio_batch fbatch;
196652681375SJaegeuk Kim int step = 0;
196712bb0a8fSJaegeuk Kim int nwritten = 0;
19683f5f4959SChao Yu int ret = 0;
19697525486aSVishal Moola (Oracle) int nr_folios, done = 0;
1970e05df3b1SJaegeuk Kim
19717525486aSVishal Moola (Oracle) folio_batch_init(&fbatch);
1972e05df3b1SJaegeuk Kim
1973e05df3b1SJaegeuk Kim next_step:
1974e05df3b1SJaegeuk Kim index = 0;
1975e05df3b1SJaegeuk Kim
19767525486aSVishal Moola (Oracle) while (!done && (nr_folios = filemap_get_folios_tag(NODE_MAPPING(sbi),
19777525486aSVishal Moola (Oracle) &index, (pgoff_t)-1, PAGECACHE_TAG_DIRTY,
19787525486aSVishal Moola (Oracle) &fbatch))) {
1979028a63a6SJan Kara int i;
1980e05df3b1SJaegeuk Kim
19817525486aSVishal Moola (Oracle) for (i = 0; i < nr_folios; i++) {
19827525486aSVishal Moola (Oracle) struct page *page = &fbatch.folios[i]->page;
1983d68f735bSJaegeuk Kim bool submitted = false;
1984e05df3b1SJaegeuk Kim
1985c29fd0c0SChao Yu /* give a priority to WB_SYNC threads */
1986c29fd0c0SChao Yu if (atomic_read(&sbi->wb_sync_req[NODE]) &&
1987c29fd0c0SChao Yu wbc->sync_mode == WB_SYNC_NONE) {
1988c29fd0c0SChao Yu done = 1;
1989c29fd0c0SChao Yu break;
1990c29fd0c0SChao Yu }
1991c29fd0c0SChao Yu
1992e05df3b1SJaegeuk Kim /*
1993e05df3b1SJaegeuk Kim * flushing sequence with step:
1994e05df3b1SJaegeuk Kim * 0. indirect nodes
1995e05df3b1SJaegeuk Kim * 1. dentry dnodes
1996e05df3b1SJaegeuk Kim * 2. file dnodes
1997e05df3b1SJaegeuk Kim */
1998e05df3b1SJaegeuk Kim if (step == 0 && IS_DNODE(page))
1999e05df3b1SJaegeuk Kim continue;
2000e05df3b1SJaegeuk Kim if (step == 1 && (!IS_DNODE(page) ||
2001e05df3b1SJaegeuk Kim is_cold_node(page)))
2002e05df3b1SJaegeuk Kim continue;
2003e05df3b1SJaegeuk Kim if (step == 2 && (!IS_DNODE(page) ||
2004e05df3b1SJaegeuk Kim !is_cold_node(page)))
2005e05df3b1SJaegeuk Kim continue;
20069a4cbc9eSChao Yu lock_node:
20074b270a8cSChao Yu if (wbc->sync_mode == WB_SYNC_ALL)
20084b270a8cSChao Yu lock_page(page);
20094b270a8cSChao Yu else if (!trylock_page(page))
2010e05df3b1SJaegeuk Kim continue;
2011e05df3b1SJaegeuk Kim
20124ef51a8fSJaegeuk Kim if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
2013e05df3b1SJaegeuk Kim continue_unlock:
2014e05df3b1SJaegeuk Kim unlock_page(page);
2015e05df3b1SJaegeuk Kim continue;
2016e05df3b1SJaegeuk Kim }
2017e05df3b1SJaegeuk Kim
2018e05df3b1SJaegeuk Kim if (!PageDirty(page)) {
2019e05df3b1SJaegeuk Kim /* someone wrote it for us */
2020e05df3b1SJaegeuk Kim goto continue_unlock;
2021e05df3b1SJaegeuk Kim }
2022e05df3b1SJaegeuk Kim
2023b0f3b87fSJaegeuk Kim /* flush inline_data/inode, if it's async context. */
2024b0f3b87fSJaegeuk Kim if (!do_balance)
2025b0f3b87fSJaegeuk Kim goto write_node;
2026b0f3b87fSJaegeuk Kim
2027b0f3b87fSJaegeuk Kim /* flush inline_data */
2028b763f3beSChao Yu if (page_private_inline(page)) {
2029b763f3beSChao Yu clear_page_private_inline(page);
20302049d4fcSJaegeuk Kim unlock_page(page);
20312049d4fcSJaegeuk Kim flush_inline_data(sbi, ino_of_node(page));
20329a4cbc9eSChao Yu goto lock_node;
20332049d4fcSJaegeuk Kim }
20342049d4fcSJaegeuk Kim
2035052a82d8SChao Yu /* flush dirty inode */
20367859e97fSJaegeuk Kim if (IS_INODE(page) && flush_dirty_inode(page))
2037052a82d8SChao Yu goto lock_node;
2038b0f3b87fSJaegeuk Kim write_node:
2039bae0ee7aSChao Yu f2fs_wait_on_page_writeback(page, NODE, true, true);
2040fa3d2bdfSJaegeuk Kim
2041e05df3b1SJaegeuk Kim if (!clear_page_dirty_for_io(page))
2042e05df3b1SJaegeuk Kim goto continue_unlock;
2043e05df3b1SJaegeuk Kim
2044e05df3b1SJaegeuk Kim set_fsync_mark(page, 0);
2045e05df3b1SJaegeuk Kim set_dentry_mark(page, 0);
204652746519SJaegeuk Kim
2047401db79fSYunlong Song ret = __write_node_page(page, false, &submitted,
204850fa53ecSChao Yu wbc, do_balance, io_type, NULL);
2049d68f735bSJaegeuk Kim if (ret)
205052746519SJaegeuk Kim unlock_page(page);
2051d68f735bSJaegeuk Kim else if (submitted)
20523f5f4959SChao Yu nwritten++;
2053e05df3b1SJaegeuk Kim
2054e05df3b1SJaegeuk Kim if (--wbc->nr_to_write == 0)
2055e05df3b1SJaegeuk Kim break;
2056e05df3b1SJaegeuk Kim }
20577525486aSVishal Moola (Oracle) folio_batch_release(&fbatch);
2058e05df3b1SJaegeuk Kim cond_resched();
2059e05df3b1SJaegeuk Kim
2060e05df3b1SJaegeuk Kim if (wbc->nr_to_write == 0) {
2061e05df3b1SJaegeuk Kim step = 2;
2062e05df3b1SJaegeuk Kim break;
2063e05df3b1SJaegeuk Kim }
2064e05df3b1SJaegeuk Kim }
2065e05df3b1SJaegeuk Kim
2066e05df3b1SJaegeuk Kim if (step < 2) {
2067100c0655SJaegeuk Kim if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
2068100c0655SJaegeuk Kim wbc->sync_mode == WB_SYNC_NONE && step == 1)
2069fd8c8cafSChao Yu goto out;
2070e05df3b1SJaegeuk Kim step++;
2071e05df3b1SJaegeuk Kim goto next_step;
2072e05df3b1SJaegeuk Kim }
2073fd8c8cafSChao Yu out:
20743f5f4959SChao Yu if (nwritten)
2075b9109b0eSJaegeuk Kim f2fs_submit_merged_write(sbi, NODE);
2076db198ae0SChao Yu
2077db198ae0SChao Yu if (unlikely(f2fs_cp_error(sbi)))
2078db198ae0SChao Yu return -EIO;
20793f5f4959SChao Yu return ret;
2080e05df3b1SJaegeuk Kim }
2081e05df3b1SJaegeuk Kim
f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info * sbi,unsigned int seq_id)208250fa53ecSChao Yu int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi,
208350fa53ecSChao Yu unsigned int seq_id)
2084cfe58f9dSJaegeuk Kim {
208550fa53ecSChao Yu struct fsync_node_entry *fn;
208650fa53ecSChao Yu struct page *page;
208750fa53ecSChao Yu struct list_head *head = &sbi->fsync_node_list;
208850fa53ecSChao Yu unsigned long flags;
208950fa53ecSChao Yu unsigned int cur_seq_id = 0;
2090cfe58f9dSJaegeuk Kim
209150fa53ecSChao Yu while (seq_id && cur_seq_id < seq_id) {
209250fa53ecSChao Yu spin_lock_irqsave(&sbi->fsync_node_lock, flags);
209350fa53ecSChao Yu if (list_empty(head)) {
209450fa53ecSChao Yu spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
209550fa53ecSChao Yu break;
209650fa53ecSChao Yu }
209750fa53ecSChao Yu fn = list_first_entry(head, struct fsync_node_entry, list);
209850fa53ecSChao Yu if (fn->seq_id > seq_id) {
209950fa53ecSChao Yu spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
210050fa53ecSChao Yu break;
210150fa53ecSChao Yu }
210250fa53ecSChao Yu cur_seq_id = fn->seq_id;
210350fa53ecSChao Yu page = fn->page;
210450fa53ecSChao Yu get_page(page);
210550fa53ecSChao Yu spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
21064ef51a8fSJaegeuk Kim
2107bae0ee7aSChao Yu f2fs_wait_on_page_writeback(page, NODE, true, false);
210850fa53ecSChao Yu
210950fa53ecSChao Yu put_page(page);
2110cfe58f9dSJaegeuk Kim }
2111cfe58f9dSJaegeuk Kim
211208c3eab5SChristophe JAILLET return filemap_check_errors(NODE_MAPPING(sbi));
2113cfe58f9dSJaegeuk Kim }
2114cfe58f9dSJaegeuk Kim
f2fs_write_node_pages(struct address_space * mapping,struct writeback_control * wbc)2115e05df3b1SJaegeuk Kim static int f2fs_write_node_pages(struct address_space *mapping,
2116e05df3b1SJaegeuk Kim struct writeback_control *wbc)
2117e05df3b1SJaegeuk Kim {
21184081363fSJaegeuk Kim struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
21199dfa1bafSJaegeuk Kim struct blk_plug plug;
212050c8cdb3SJaegeuk Kim long diff;
2121e05df3b1SJaegeuk Kim
21220771fcc7SChao Yu if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
21230771fcc7SChao Yu goto skip_write;
21240771fcc7SChao Yu
21254660f9c0SJaegeuk Kim /* balancing f2fs's metadata in background */
21267bcd0cfaSChao Yu f2fs_balance_fs_bg(sbi, true);
2127e05df3b1SJaegeuk Kim
2128a7fdffbdSJaegeuk Kim /* collect a number of dirty node pages and write together */
2129812a9597SJaegeuk Kim if (wbc->sync_mode != WB_SYNC_ALL &&
2130812a9597SJaegeuk Kim get_pages(sbi, F2FS_DIRTY_NODES) <
2131812a9597SJaegeuk Kim nr_pages_to_skip(sbi, NODE))
2132d3baf95dSJaegeuk Kim goto skip_write;
2133a7fdffbdSJaegeuk Kim
2134c29fd0c0SChao Yu if (wbc->sync_mode == WB_SYNC_ALL)
2135c29fd0c0SChao Yu atomic_inc(&sbi->wb_sync_req[NODE]);
213634415099SChao Yu else if (atomic_read(&sbi->wb_sync_req[NODE])) {
213734415099SChao Yu /* to avoid potential deadlock */
213834415099SChao Yu if (current->plug)
213934415099SChao Yu blk_finish_plug(current->plug);
2140c29fd0c0SChao Yu goto skip_write;
214134415099SChao Yu }
2142c29fd0c0SChao Yu
2143d31c7c3fSYunlei He trace_f2fs_writepages(mapping->host, wbc, NODE);
2144d31c7c3fSYunlei He
214550c8cdb3SJaegeuk Kim diff = nr_pages_to_write(sbi, NODE, wbc);
21469dfa1bafSJaegeuk Kim blk_start_plug(&plug);
21474d57b86dSChao Yu f2fs_sync_node_pages(sbi, wbc, true, FS_NODE_IO);
21489dfa1bafSJaegeuk Kim blk_finish_plug(&plug);
214950c8cdb3SJaegeuk Kim wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
2150c29fd0c0SChao Yu
2151c29fd0c0SChao Yu if (wbc->sync_mode == WB_SYNC_ALL)
2152c29fd0c0SChao Yu atomic_dec(&sbi->wb_sync_req[NODE]);
2153e05df3b1SJaegeuk Kim return 0;
2154d3baf95dSJaegeuk Kim
2155d3baf95dSJaegeuk Kim skip_write:
2156d3baf95dSJaegeuk Kim wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_NODES);
2157d31c7c3fSYunlei He trace_f2fs_writepages(mapping->host, wbc, NODE);
2158d3baf95dSJaegeuk Kim return 0;
2159e05df3b1SJaegeuk Kim }
2160e05df3b1SJaegeuk Kim
f2fs_dirty_node_folio(struct address_space * mapping,struct folio * folio)2161cbc975b1SMatthew Wilcox (Oracle) static bool f2fs_dirty_node_folio(struct address_space *mapping,
2162cbc975b1SMatthew Wilcox (Oracle) struct folio *folio)
2163e05df3b1SJaegeuk Kim {
2164cbc975b1SMatthew Wilcox (Oracle) trace_f2fs_set_page_dirty(&folio->page, NODE);
216526c6b887SJaegeuk Kim
2166cbc975b1SMatthew Wilcox (Oracle) if (!folio_test_uptodate(folio))
2167cbc975b1SMatthew Wilcox (Oracle) folio_mark_uptodate(folio);
216854c55c4eSWeichao Guo #ifdef CONFIG_F2FS_CHECK_FS
2169cbc975b1SMatthew Wilcox (Oracle) if (IS_INODE(&folio->page))
217029c87793SMatthew Wilcox (Oracle) f2fs_inode_chksum_set(F2FS_M_SB(mapping), &folio->page);
217154c55c4eSWeichao Guo #endif
21729b7eadd9SShuqi Zhang if (filemap_dirty_folio(mapping, folio)) {
217329c87793SMatthew Wilcox (Oracle) inc_page_count(F2FS_M_SB(mapping), F2FS_DIRTY_NODES);
2174cbc975b1SMatthew Wilcox (Oracle) set_page_private_reference(&folio->page);
2175cbc975b1SMatthew Wilcox (Oracle) return true;
2176e05df3b1SJaegeuk Kim }
2177cbc975b1SMatthew Wilcox (Oracle) return false;
2178e05df3b1SJaegeuk Kim }
2179e05df3b1SJaegeuk Kim
21800a8165d7SJaegeuk Kim /*
2181e05df3b1SJaegeuk Kim * Structure of the f2fs node operations
2182e05df3b1SJaegeuk Kim */
2183e05df3b1SJaegeuk Kim const struct address_space_operations f2fs_node_aops = {
2184e05df3b1SJaegeuk Kim .writepage = f2fs_write_node_page,
2185e05df3b1SJaegeuk Kim .writepages = f2fs_write_node_pages,
2186cbc975b1SMatthew Wilcox (Oracle) .dirty_folio = f2fs_dirty_node_folio,
218791503996SMatthew Wilcox (Oracle) .invalidate_folio = f2fs_invalidate_folio,
2188c26cd045SMatthew Wilcox (Oracle) .release_folio = f2fs_release_folio,
21891d5b9bd6SMatthew Wilcox (Oracle) .migrate_folio = filemap_migrate_folio,
2190e05df3b1SJaegeuk Kim };
2191e05df3b1SJaegeuk Kim
__lookup_free_nid_list(struct f2fs_nm_info * nm_i,nid_t n)21928a7ed66aSJaegeuk Kim static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i,
21938a7ed66aSJaegeuk Kim nid_t n)
2194e05df3b1SJaegeuk Kim {
21958a7ed66aSJaegeuk Kim return radix_tree_lookup(&nm_i->free_nid_root, n);
21963aa770a9SNamjae Jeon }
2197e05df3b1SJaegeuk Kim
__insert_free_nid(struct f2fs_sb_info * sbi,struct free_nid * i)21989a4ffdf5SChao Yu static int __insert_free_nid(struct f2fs_sb_info *sbi,
2199b815bdc7SLiu Song struct free_nid *i)
2200e05df3b1SJaegeuk Kim {
2201b8559dc2SChao Yu struct f2fs_nm_info *nm_i = NM_I(sbi);
2202eb0aa4b8SJaegeuk Kim int err = radix_tree_insert(&nm_i->free_nid_root, i->nid, i);
22035f029c04SYi Zhuang
2204eb0aa4b8SJaegeuk Kim if (err)
2205eb0aa4b8SJaegeuk Kim return err;
2206eb0aa4b8SJaegeuk Kim
2207b815bdc7SLiu Song nm_i->nid_cnt[FREE_NID]++;
22089a4ffdf5SChao Yu list_add_tail(&i->list, &nm_i->free_nid_list);
2209eb0aa4b8SJaegeuk Kim return 0;
2210b8559dc2SChao Yu }
2211b8559dc2SChao Yu
__remove_free_nid(struct f2fs_sb_info * sbi,struct free_nid * i,enum nid_state state)22129a4ffdf5SChao Yu static void __remove_free_nid(struct f2fs_sb_info *sbi,
2213a0761f63SFan Li struct free_nid *i, enum nid_state state)
2214b8559dc2SChao Yu {
2215b8559dc2SChao Yu struct f2fs_nm_info *nm_i = NM_I(sbi);
2216b8559dc2SChao Yu
22179a4ffdf5SChao Yu f2fs_bug_on(sbi, state != i->state);
22189a4ffdf5SChao Yu nm_i->nid_cnt[state]--;
22199a4ffdf5SChao Yu if (state == FREE_NID)
2220e05df3b1SJaegeuk Kim list_del(&i->list);
22218a7ed66aSJaegeuk Kim radix_tree_delete(&nm_i->free_nid_root, i->nid);
2222e05df3b1SJaegeuk Kim }
2223e05df3b1SJaegeuk Kim
__move_free_nid(struct f2fs_sb_info * sbi,struct free_nid * i,enum nid_state org_state,enum nid_state dst_state)2224a0761f63SFan Li static void __move_free_nid(struct f2fs_sb_info *sbi, struct free_nid *i,
2225a0761f63SFan Li enum nid_state org_state, enum nid_state dst_state)
2226a0761f63SFan Li {
2227a0761f63SFan Li struct f2fs_nm_info *nm_i = NM_I(sbi);
2228a0761f63SFan Li
2229a0761f63SFan Li f2fs_bug_on(sbi, org_state != i->state);
2230a0761f63SFan Li i->state = dst_state;
2231a0761f63SFan Li nm_i->nid_cnt[org_state]--;
2232a0761f63SFan Li nm_i->nid_cnt[dst_state]++;
2233a0761f63SFan Li
2234a0761f63SFan Li switch (dst_state) {
2235a0761f63SFan Li case PREALLOC_NID:
2236a0761f63SFan Li list_del(&i->list);
2237a0761f63SFan Li break;
2238a0761f63SFan Li case FREE_NID:
2239a0761f63SFan Li list_add_tail(&i->list, &nm_i->free_nid_list);
2240a0761f63SFan Li break;
2241a0761f63SFan Li default:
2242a0761f63SFan Li BUG_ON(1);
2243a0761f63SFan Li }
2244a0761f63SFan Li }
2245a0761f63SFan Li
f2fs_nat_bitmap_enabled(struct f2fs_sb_info * sbi)224694c821fbSChao Yu bool f2fs_nat_bitmap_enabled(struct f2fs_sb_info *sbi)
224794c821fbSChao Yu {
224894c821fbSChao Yu struct f2fs_nm_info *nm_i = NM_I(sbi);
224994c821fbSChao Yu unsigned int i;
225094c821fbSChao Yu bool ret = true;
225194c821fbSChao Yu
2252e4544b63STim Murray f2fs_down_read(&nm_i->nat_tree_lock);
225394c821fbSChao Yu for (i = 0; i < nm_i->nat_blocks; i++) {
225494c821fbSChao Yu if (!test_bit_le(i, nm_i->nat_block_bitmap)) {
225594c821fbSChao Yu ret = false;
225694c821fbSChao Yu break;
225794c821fbSChao Yu }
225894c821fbSChao Yu }
2259e4544b63STim Murray f2fs_up_read(&nm_i->nat_tree_lock);
226094c821fbSChao Yu
226194c821fbSChao Yu return ret;
226294c821fbSChao Yu }
226394c821fbSChao Yu
update_free_nid_bitmap(struct f2fs_sb_info * sbi,nid_t nid,bool set,bool build)22645921aaa1SLiFan static void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid,
22655921aaa1SLiFan bool set, bool build)
22665921aaa1SLiFan {
22675921aaa1SLiFan struct f2fs_nm_info *nm_i = NM_I(sbi);
22685921aaa1SLiFan unsigned int nat_ofs = NAT_BLOCK_OFFSET(nid);
22695921aaa1SLiFan unsigned int nid_ofs = nid - START_NID(nid);
22705921aaa1SLiFan
22715921aaa1SLiFan if (!test_bit_le(nat_ofs, nm_i->nat_block_bitmap))
22725921aaa1SLiFan return;
22735921aaa1SLiFan
22745921aaa1SLiFan if (set) {
22755921aaa1SLiFan if (test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]))
22765921aaa1SLiFan return;
22775921aaa1SLiFan __set_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
22785921aaa1SLiFan nm_i->free_nid_count[nat_ofs]++;
22795921aaa1SLiFan } else {
22805921aaa1SLiFan if (!test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]))
22815921aaa1SLiFan return;
22825921aaa1SLiFan __clear_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
22835921aaa1SLiFan if (!build)
22845921aaa1SLiFan nm_i->free_nid_count[nat_ofs]--;
22855921aaa1SLiFan }
22865921aaa1SLiFan }
22875921aaa1SLiFan
22884ac91242SChao Yu /* return if the nid is recognized as free */
add_free_nid(struct f2fs_sb_info * sbi,nid_t nid,bool build,bool update)22895921aaa1SLiFan static bool add_free_nid(struct f2fs_sb_info *sbi,
22905921aaa1SLiFan nid_t nid, bool build, bool update)
2291e05df3b1SJaegeuk Kim {
22926fb03f3aSJaegeuk Kim struct f2fs_nm_info *nm_i = NM_I(sbi);
229330a61ddfSChao Yu struct free_nid *i, *e;
229459bbd474SJaegeuk Kim struct nat_entry *ne;
229530a61ddfSChao Yu int err = -EINVAL;
229630a61ddfSChao Yu bool ret = false;
22979198acebSJaegeuk Kim
22989198acebSJaegeuk Kim /* 0 nid should not be used */
2299cfb271d4SChao Yu if (unlikely(nid == 0))
23004ac91242SChao Yu return false;
230159bbd474SJaegeuk Kim
2302626bcf2bSChao Yu if (unlikely(f2fs_check_nid_range(sbi, nid)))
2303626bcf2bSChao Yu return false;
2304626bcf2bSChao Yu
230532410577SChao Yu i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS, true, NULL);
2306e05df3b1SJaegeuk Kim i->nid = nid;
23079a4ffdf5SChao Yu i->state = FREE_NID;
2308e05df3b1SJaegeuk Kim
23095921aaa1SLiFan radix_tree_preload(GFP_NOFS | __GFP_NOFAIL);
2310769ec6e5SJaegeuk Kim
2311b8559dc2SChao Yu spin_lock(&nm_i->nid_list_lock);
231230a61ddfSChao Yu
231330a61ddfSChao Yu if (build) {
231430a61ddfSChao Yu /*
231530a61ddfSChao Yu * Thread A Thread B
231630a61ddfSChao Yu * - f2fs_create
231730a61ddfSChao Yu * - f2fs_new_inode
23184d57b86dSChao Yu * - f2fs_alloc_nid
23199a4ffdf5SChao Yu * - __insert_nid_to_list(PREALLOC_NID)
232030a61ddfSChao Yu * - f2fs_balance_fs_bg
23214d57b86dSChao Yu * - f2fs_build_free_nids
23224d57b86dSChao Yu * - __f2fs_build_free_nids
232330a61ddfSChao Yu * - scan_nat_page
232430a61ddfSChao Yu * - add_free_nid
232530a61ddfSChao Yu * - __lookup_nat_cache
232630a61ddfSChao Yu * - f2fs_add_link
23274d57b86dSChao Yu * - f2fs_init_inode_metadata
23284d57b86dSChao Yu * - f2fs_new_inode_page
23294d57b86dSChao Yu * - f2fs_new_node_page
233030a61ddfSChao Yu * - set_node_addr
23314d57b86dSChao Yu * - f2fs_alloc_nid_done
23329a4ffdf5SChao Yu * - __remove_nid_from_list(PREALLOC_NID)
23339a4ffdf5SChao Yu * - __insert_nid_to_list(FREE_NID)
233430a61ddfSChao Yu */
233530a61ddfSChao Yu ne = __lookup_nat_cache(nm_i, nid);
233630a61ddfSChao Yu if (ne && (!get_nat_flag(ne, IS_CHECKPOINTED) ||
233730a61ddfSChao Yu nat_get_blkaddr(ne) != NULL_ADDR))
233830a61ddfSChao Yu goto err_out;
233930a61ddfSChao Yu
234030a61ddfSChao Yu e = __lookup_free_nid_list(nm_i, nid);
234130a61ddfSChao Yu if (e) {
23429a4ffdf5SChao Yu if (e->state == FREE_NID)
234330a61ddfSChao Yu ret = true;
234430a61ddfSChao Yu goto err_out;
234530a61ddfSChao Yu }
234630a61ddfSChao Yu }
234730a61ddfSChao Yu ret = true;
2348b815bdc7SLiu Song err = __insert_free_nid(sbi, i);
234930a61ddfSChao Yu err_out:
23505921aaa1SLiFan if (update) {
23515921aaa1SLiFan update_free_nid_bitmap(sbi, nid, ret, build);
23525921aaa1SLiFan if (!build)
23535921aaa1SLiFan nm_i->available_nids++;
23545921aaa1SLiFan }
2355b8559dc2SChao Yu spin_unlock(&nm_i->nid_list_lock);
2356769ec6e5SJaegeuk Kim radix_tree_preload_end();
23575921aaa1SLiFan
235830a61ddfSChao Yu if (err)
2359e05df3b1SJaegeuk Kim kmem_cache_free(free_nid_slab, i);
236030a61ddfSChao Yu return ret;
2361e05df3b1SJaegeuk Kim }
2362e05df3b1SJaegeuk Kim
remove_free_nid(struct f2fs_sb_info * sbi,nid_t nid)2363b8559dc2SChao Yu static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid)
2364e05df3b1SJaegeuk Kim {
2365b8559dc2SChao Yu struct f2fs_nm_info *nm_i = NM_I(sbi);
2366e05df3b1SJaegeuk Kim struct free_nid *i;
2367cf0ee0f0SChao Yu bool need_free = false;
2368cf0ee0f0SChao Yu
2369b8559dc2SChao Yu spin_lock(&nm_i->nid_list_lock);
23708a7ed66aSJaegeuk Kim i = __lookup_free_nid_list(nm_i, nid);
23719a4ffdf5SChao Yu if (i && i->state == FREE_NID) {
2372a0761f63SFan Li __remove_free_nid(sbi, i, FREE_NID);
2373cf0ee0f0SChao Yu need_free = true;
2374e05df3b1SJaegeuk Kim }
2375b8559dc2SChao Yu spin_unlock(&nm_i->nid_list_lock);
2376cf0ee0f0SChao Yu
2377cf0ee0f0SChao Yu if (need_free)
2378cf0ee0f0SChao Yu kmem_cache_free(free_nid_slab, i);
2379e05df3b1SJaegeuk Kim }
2380e05df3b1SJaegeuk Kim
scan_nat_page(struct f2fs_sb_info * sbi,struct page * nat_page,nid_t start_nid)2381e2374015SChao Yu static int scan_nat_page(struct f2fs_sb_info *sbi,
2382e05df3b1SJaegeuk Kim struct page *nat_page, nid_t start_nid)
2383e05df3b1SJaegeuk Kim {
23846fb03f3aSJaegeuk Kim struct f2fs_nm_info *nm_i = NM_I(sbi);
2385e05df3b1SJaegeuk Kim struct f2fs_nat_block *nat_blk = page_address(nat_page);
2386e05df3b1SJaegeuk Kim block_t blk_addr;
23874ac91242SChao Yu unsigned int nat_ofs = NAT_BLOCK_OFFSET(start_nid);
2388e05df3b1SJaegeuk Kim int i;
2389e05df3b1SJaegeuk Kim
239023380b85SJaegeuk Kim __set_bit_le(nat_ofs, nm_i->nat_block_bitmap);
23914ac91242SChao Yu
2392e05df3b1SJaegeuk Kim i = start_nid % NAT_ENTRY_PER_BLOCK;
2393e05df3b1SJaegeuk Kim
2394e05df3b1SJaegeuk Kim for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) {
2395cfb271d4SChao Yu if (unlikely(start_nid >= nm_i->max_nid))
239604431c44SJaegeuk Kim break;
239723d38844SHaicheng Li
2398e05df3b1SJaegeuk Kim blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
2399e2374015SChao Yu
2400e2374015SChao Yu if (blk_addr == NEW_ADDR)
24010237975dSZhiguo Niu return -EFSCORRUPTED;
2402e2374015SChao Yu
24035921aaa1SLiFan if (blk_addr == NULL_ADDR) {
24045921aaa1SLiFan add_free_nid(sbi, start_nid, true, true);
24055921aaa1SLiFan } else {
2406346fe752SChao Yu spin_lock(&NM_I(sbi)->nid_list_lock);
24075921aaa1SLiFan update_free_nid_bitmap(sbi, start_nid, false, true);
2408346fe752SChao Yu spin_unlock(&NM_I(sbi)->nid_list_lock);
2409e05df3b1SJaegeuk Kim }
2410e05df3b1SJaegeuk Kim }
2411e2374015SChao Yu
2412e2374015SChao Yu return 0;
24135921aaa1SLiFan }
2414e05df3b1SJaegeuk Kim
scan_curseg_cache(struct f2fs_sb_info * sbi)24152fbaa25fSChao Yu static void scan_curseg_cache(struct f2fs_sb_info *sbi)
24164ac91242SChao Yu {
24174ac91242SChao Yu struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
24184ac91242SChao Yu struct f2fs_journal *journal = curseg->journal;
24192fbaa25fSChao Yu int i;
24204ac91242SChao Yu
24214ac91242SChao Yu down_read(&curseg->journal_rwsem);
24224ac91242SChao Yu for (i = 0; i < nats_in_cursum(journal); i++) {
24234ac91242SChao Yu block_t addr;
24244ac91242SChao Yu nid_t nid;
24254ac91242SChao Yu
24264ac91242SChao Yu addr = le32_to_cpu(nat_in_journal(journal, i).block_addr);
24274ac91242SChao Yu nid = le32_to_cpu(nid_in_journal(journal, i));
24284ac91242SChao Yu if (addr == NULL_ADDR)
24295921aaa1SLiFan add_free_nid(sbi, nid, true, false);
24304ac91242SChao Yu else
24314ac91242SChao Yu remove_free_nid(sbi, nid);
24324ac91242SChao Yu }
24334ac91242SChao Yu up_read(&curseg->journal_rwsem);
24342fbaa25fSChao Yu }
24352fbaa25fSChao Yu
scan_free_nid_bits(struct f2fs_sb_info * sbi)2436e05df3b1SJaegeuk Kim static void scan_free_nid_bits(struct f2fs_sb_info *sbi)
24374ac91242SChao Yu {
24384ac91242SChao Yu struct f2fs_nm_info *nm_i = NM_I(sbi);
24394ac91242SChao Yu unsigned int i, idx;
244097456574SFan Li nid_t nid;
24414ac91242SChao Yu
2442e4544b63STim Murray f2fs_down_read(&nm_i->nat_tree_lock);
24434ac91242SChao Yu
24444ac91242SChao Yu for (i = 0; i < nm_i->nat_blocks; i++) {
24454ac91242SChao Yu if (!test_bit_le(i, nm_i->nat_block_bitmap))
24464ac91242SChao Yu continue;
24474ac91242SChao Yu if (!nm_i->free_nid_count[i])
24484ac91242SChao Yu continue;
24494ac91242SChao Yu for (idx = 0; idx < NAT_ENTRY_PER_BLOCK; idx++) {
245097456574SFan Li idx = find_next_bit_le(nm_i->free_nid_bitmap[i],
245197456574SFan Li NAT_ENTRY_PER_BLOCK, idx);
245297456574SFan Li if (idx >= NAT_ENTRY_PER_BLOCK)
245397456574SFan Li break;
24544ac91242SChao Yu
24554ac91242SChao Yu nid = i * NAT_ENTRY_PER_BLOCK + idx;
24565921aaa1SLiFan add_free_nid(sbi, nid, true, false);
24574ac91242SChao Yu
24589a4ffdf5SChao Yu if (nm_i->nid_cnt[FREE_NID] >= MAX_FREE_NIDS)
24594ac91242SChao Yu goto out;
24604ac91242SChao Yu }
24614ac91242SChao Yu }
24624ac91242SChao Yu out:
24632fbaa25fSChao Yu scan_curseg_cache(sbi);
24644ac91242SChao Yu
2465e4544b63STim Murray f2fs_up_read(&nm_i->nat_tree_lock);
24664ac91242SChao Yu }
24674ac91242SChao Yu
__f2fs_build_free_nids(struct f2fs_sb_info * sbi,bool sync,bool mount)2468e2374015SChao Yu static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
24694d57b86dSChao Yu bool sync, bool mount)
2470e05df3b1SJaegeuk Kim {
2471e05df3b1SJaegeuk Kim struct f2fs_nm_info *nm_i = NM_I(sbi);
2472e2374015SChao Yu int i = 0, ret;
247355008d84SJaegeuk Kim nid_t nid = nm_i->next_scan_nid;
2474e05df3b1SJaegeuk Kim
2475e9cdd307SYunlei He if (unlikely(nid >= nm_i->max_nid))
2476e9cdd307SYunlei He nid = 0;
2477e9cdd307SYunlei He
2478e2cab031SSahitya Tummala if (unlikely(nid % NAT_ENTRY_PER_BLOCK))
2479e2cab031SSahitya Tummala nid = NAT_BLOCK_OFFSET(nid) * NAT_ENTRY_PER_BLOCK;
2480e2cab031SSahitya Tummala
248155008d84SJaegeuk Kim /* Enough entries */
24829a4ffdf5SChao Yu if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
2483e2374015SChao Yu return 0;
2484e05df3b1SJaegeuk Kim
24854d57b86dSChao Yu if (!sync && !f2fs_available_free_memory(sbi, FREE_NIDS))
2486e2374015SChao Yu return 0;
2487e05df3b1SJaegeuk Kim
24884ac91242SChao Yu if (!mount) {
24894ac91242SChao Yu /* try to find free nids in free_nid_bitmap */
24904ac91242SChao Yu scan_free_nid_bits(sbi);
24914ac91242SChao Yu
249274986213SFan Li if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
2493e2374015SChao Yu return 0;
249422ad0b6aSJaegeuk Kim }
249522ad0b6aSJaegeuk Kim
249655008d84SJaegeuk Kim /* readahead nat pages to be scanned */
24974d57b86dSChao Yu f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES,
249826879fb1SChao Yu META_NAT, true);
2499e05df3b1SJaegeuk Kim
2500e4544b63STim Murray f2fs_down_read(&nm_i->nat_tree_lock);
2501a5131193SJaegeuk Kim
2502e05df3b1SJaegeuk Kim while (1) {
250366e83361SYunlei He if (!test_bit_le(NAT_BLOCK_OFFSET(nid),
250466e83361SYunlei He nm_i->nat_block_bitmap)) {
2505e05df3b1SJaegeuk Kim struct page *page = get_current_nat_page(sbi, nid);
2506e05df3b1SJaegeuk Kim
2507edc55aafSJaegeuk Kim if (IS_ERR(page)) {
2508edc55aafSJaegeuk Kim ret = PTR_ERR(page);
2509edc55aafSJaegeuk Kim } else {
2510e2374015SChao Yu ret = scan_nat_page(sbi, page, nid);
2511e05df3b1SJaegeuk Kim f2fs_put_page(page, 1);
2512edc55aafSJaegeuk Kim }
2513e2374015SChao Yu
2514e2374015SChao Yu if (ret) {
2515e4544b63STim Murray f2fs_up_read(&nm_i->nat_tree_lock);
25160237975dSZhiguo Niu
25170237975dSZhiguo Niu if (ret == -EFSCORRUPTED) {
2518dcbb4c10SJoe Perches f2fs_err(sbi, "NAT is corrupt, run fsck to fix it");
25190237975dSZhiguo Niu set_sbi_flag(sbi, SBI_NEED_FSCK);
25200237975dSZhiguo Niu f2fs_handle_error(sbi,
25210237975dSZhiguo Niu ERROR_INCONSISTENT_NAT);
25220237975dSZhiguo Niu }
25230237975dSZhiguo Niu
2524edc55aafSJaegeuk Kim return ret;
2525e2374015SChao Yu }
252666e83361SYunlei He }
2527e05df3b1SJaegeuk Kim
2528e05df3b1SJaegeuk Kim nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK));
2529cfb271d4SChao Yu if (unlikely(nid >= nm_i->max_nid))
2530e05df3b1SJaegeuk Kim nid = 0;
253155008d84SJaegeuk Kim
2532a6d494b6SChao Yu if (++i >= FREE_NID_PAGES)
2533e05df3b1SJaegeuk Kim break;
2534e05df3b1SJaegeuk Kim }
2535e05df3b1SJaegeuk Kim
253655008d84SJaegeuk Kim /* go to the next free nat pages to find free nids abundantly */
253755008d84SJaegeuk Kim nm_i->next_scan_nid = nid;
2538e05df3b1SJaegeuk Kim
2539e05df3b1SJaegeuk Kim /* find free nids from current sum_pages */
25402fbaa25fSChao Yu scan_curseg_cache(sbi);
2541dfc08a12SChao Yu
2542e4544b63STim Murray f2fs_up_read(&nm_i->nat_tree_lock);
25432db2388fSChao Yu
25444d57b86dSChao Yu f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid),
2545ea1a29a0SChao Yu nm_i->ra_nid_pages, META_NAT, false);
2546e2374015SChao Yu
2547e2374015SChao Yu return 0;
2548e05df3b1SJaegeuk Kim }
2549e05df3b1SJaegeuk Kim
f2fs_build_free_nids(struct f2fs_sb_info * sbi,bool sync,bool mount)2550e2374015SChao Yu int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
25512411cf5bSChao Yu {
2552e2374015SChao Yu int ret;
2553e2374015SChao Yu
25542411cf5bSChao Yu mutex_lock(&NM_I(sbi)->build_lock);
2555e2374015SChao Yu ret = __f2fs_build_free_nids(sbi, sync, mount);
25562411cf5bSChao Yu mutex_unlock(&NM_I(sbi)->build_lock);
2557e2374015SChao Yu
2558e2374015SChao Yu return ret;
25592411cf5bSChao Yu }
25602411cf5bSChao Yu
2561e05df3b1SJaegeuk Kim /*
2562e05df3b1SJaegeuk Kim * If this function returns success, caller can obtain a new nid
2563e05df3b1SJaegeuk Kim * from second parameter of this function.
2564e05df3b1SJaegeuk Kim * The returned nid could be used ino as well as nid when inode is created.
2565e05df3b1SJaegeuk Kim */
f2fs_alloc_nid(struct f2fs_sb_info * sbi,nid_t * nid)25664d57b86dSChao Yu bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
2567e05df3b1SJaegeuk Kim {
2568e05df3b1SJaegeuk Kim struct f2fs_nm_info *nm_i = NM_I(sbi);
2569e05df3b1SJaegeuk Kim struct free_nid *i = NULL;
2570e05df3b1SJaegeuk Kim retry:
2571c40e15a9SYangtao Li if (time_to_inject(sbi, FAULT_ALLOC_NID))
2572cb78942bSJaegeuk Kim return false;
25737fa750a1SArnd Bergmann
2574b8559dc2SChao Yu spin_lock(&nm_i->nid_list_lock);
2575e05df3b1SJaegeuk Kim
257604d47e67SChao Yu if (unlikely(nm_i->available_nids == 0)) {
257704d47e67SChao Yu spin_unlock(&nm_i->nid_list_lock);
257804d47e67SChao Yu return false;
257904d47e67SChao Yu }
2580e05df3b1SJaegeuk Kim
25814d57b86dSChao Yu /* We should not use stale free nids created by f2fs_build_free_nids */
25824d57b86dSChao Yu if (nm_i->nid_cnt[FREE_NID] && !on_f2fs_build_free_nids(nm_i)) {
25839a4ffdf5SChao Yu f2fs_bug_on(sbi, list_empty(&nm_i->free_nid_list));
25849a4ffdf5SChao Yu i = list_first_entry(&nm_i->free_nid_list,
2585b8559dc2SChao Yu struct free_nid, list);
2586e05df3b1SJaegeuk Kim *nid = i->nid;
2587b8559dc2SChao Yu
2588a0761f63SFan Li __move_free_nid(sbi, i, FREE_NID, PREALLOC_NID);
258904d47e67SChao Yu nm_i->available_nids--;
25904ac91242SChao Yu
2591346fe752SChao Yu update_free_nid_bitmap(sbi, *nid, false, false);
25924ac91242SChao Yu
2593b8559dc2SChao Yu spin_unlock(&nm_i->nid_list_lock);
2594e05df3b1SJaegeuk Kim return true;
2595e05df3b1SJaegeuk Kim }
2596b8559dc2SChao Yu spin_unlock(&nm_i->nid_list_lock);
259755008d84SJaegeuk Kim
259855008d84SJaegeuk Kim /* Let's scan nat pages and its caches to get free nids */
2599f84262b0SJaegeuk Kim if (!f2fs_build_free_nids(sbi, true, false))
260055008d84SJaegeuk Kim goto retry;
2601f84262b0SJaegeuk Kim return false;
260255008d84SJaegeuk Kim }
2603e05df3b1SJaegeuk Kim
26040a8165d7SJaegeuk Kim /*
26054d57b86dSChao Yu * f2fs_alloc_nid() should be called prior to this function.
2606e05df3b1SJaegeuk Kim */
f2fs_alloc_nid_done(struct f2fs_sb_info * sbi,nid_t nid)26074d57b86dSChao Yu void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid)
2608e05df3b1SJaegeuk Kim {
2609e05df3b1SJaegeuk Kim struct f2fs_nm_info *nm_i = NM_I(sbi);
2610e05df3b1SJaegeuk Kim struct free_nid *i;
2611e05df3b1SJaegeuk Kim
2612b8559dc2SChao Yu spin_lock(&nm_i->nid_list_lock);
26138a7ed66aSJaegeuk Kim i = __lookup_free_nid_list(nm_i, nid);
2614b8559dc2SChao Yu f2fs_bug_on(sbi, !i);
2615a0761f63SFan Li __remove_free_nid(sbi, i, PREALLOC_NID);
2616b8559dc2SChao Yu spin_unlock(&nm_i->nid_list_lock);
2617cf0ee0f0SChao Yu
2618cf0ee0f0SChao Yu kmem_cache_free(free_nid_slab, i);
2619e05df3b1SJaegeuk Kim }
2620e05df3b1SJaegeuk Kim
26210a8165d7SJaegeuk Kim /*
26224d57b86dSChao Yu * f2fs_alloc_nid() should be called prior to this function.
2623e05df3b1SJaegeuk Kim */
f2fs_alloc_nid_failed(struct f2fs_sb_info * sbi,nid_t nid)26244d57b86dSChao Yu void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
2625e05df3b1SJaegeuk Kim {
262649952fa1SJaegeuk Kim struct f2fs_nm_info *nm_i = NM_I(sbi);
262749952fa1SJaegeuk Kim struct free_nid *i;
2628cf0ee0f0SChao Yu bool need_free = false;
262949952fa1SJaegeuk Kim
263065985d93SJaegeuk Kim if (!nid)
263165985d93SJaegeuk Kim return;
263265985d93SJaegeuk Kim
2633b8559dc2SChao Yu spin_lock(&nm_i->nid_list_lock);
26348a7ed66aSJaegeuk Kim i = __lookup_free_nid_list(nm_i, nid);
2635b8559dc2SChao Yu f2fs_bug_on(sbi, !i);
2636b8559dc2SChao Yu
26374d57b86dSChao Yu if (!f2fs_available_free_memory(sbi, FREE_NIDS)) {
2638a0761f63SFan Li __remove_free_nid(sbi, i, PREALLOC_NID);
2639cf0ee0f0SChao Yu need_free = true;
264095630cbaSHaicheng Li } else {
2641a0761f63SFan Li __move_free_nid(sbi, i, PREALLOC_NID, FREE_NID);
264295630cbaSHaicheng Li }
264304d47e67SChao Yu
264404d47e67SChao Yu nm_i->available_nids++;
264504d47e67SChao Yu
2646346fe752SChao Yu update_free_nid_bitmap(sbi, nid, true, false);
26474ac91242SChao Yu
2648b8559dc2SChao Yu spin_unlock(&nm_i->nid_list_lock);
2649cf0ee0f0SChao Yu
2650cf0ee0f0SChao Yu if (need_free)
2651cf0ee0f0SChao Yu kmem_cache_free(free_nid_slab, i);
2652e05df3b1SJaegeuk Kim }
2653e05df3b1SJaegeuk Kim
f2fs_try_to_free_nids(struct f2fs_sb_info * sbi,int nr_shrink)26544d57b86dSChao Yu int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink)
265531696580SChao Yu {
265631696580SChao Yu struct f2fs_nm_info *nm_i = NM_I(sbi);
265731696580SChao Yu int nr = nr_shrink;
265831696580SChao Yu
26599a4ffdf5SChao Yu if (nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS)
2660ad4edb83SJaegeuk Kim return 0;
2661ad4edb83SJaegeuk Kim
266231696580SChao Yu if (!mutex_trylock(&nm_i->build_lock))
266331696580SChao Yu return 0;
266431696580SChao Yu
2665042be373SChao Yu while (nr_shrink && nm_i->nid_cnt[FREE_NID] > MAX_FREE_NIDS) {
2666042be373SChao Yu struct free_nid *i, *next;
2667042be373SChao Yu unsigned int batch = SHRINK_NID_BATCH_SIZE;
2668042be373SChao Yu
2669b8559dc2SChao Yu spin_lock(&nm_i->nid_list_lock);
26709a4ffdf5SChao Yu list_for_each_entry_safe(i, next, &nm_i->free_nid_list, list) {
2671042be373SChao Yu if (!nr_shrink || !batch ||
26729a4ffdf5SChao Yu nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS)
267331696580SChao Yu break;
2674a0761f63SFan Li __remove_free_nid(sbi, i, FREE_NID);
267531696580SChao Yu kmem_cache_free(free_nid_slab, i);
267631696580SChao Yu nr_shrink--;
2677042be373SChao Yu batch--;
267831696580SChao Yu }
2679b8559dc2SChao Yu spin_unlock(&nm_i->nid_list_lock);
2680042be373SChao Yu }
2681042be373SChao Yu
268231696580SChao Yu mutex_unlock(&nm_i->build_lock);
268331696580SChao Yu
268431696580SChao Yu return nr - nr_shrink;
268531696580SChao Yu }
268631696580SChao Yu
f2fs_recover_inline_xattr(struct inode * inode,struct page * page)26879627a7b3SChao Yu int f2fs_recover_inline_xattr(struct inode *inode, struct page *page)
268828cdce04SChao Yu {
268928cdce04SChao Yu void *src_addr, *dst_addr;
269028cdce04SChao Yu size_t inline_size;
269128cdce04SChao Yu struct page *ipage;
269228cdce04SChao Yu struct f2fs_inode *ri;
269328cdce04SChao Yu
26944d57b86dSChao Yu ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
26959627a7b3SChao Yu if (IS_ERR(ipage))
26969627a7b3SChao Yu return PTR_ERR(ipage);
269728cdce04SChao Yu
2698e3b4d43fSJaegeuk Kim ri = F2FS_INODE(page);
26991eca05aaSYunlei He if (ri->i_inline & F2FS_INLINE_XATTR) {
270096dd0251SChao Yu if (!f2fs_has_inline_xattr(inode)) {
27011eca05aaSYunlei He set_inode_flag(inode, FI_INLINE_XATTR);
270296dd0251SChao Yu stat_inc_inline_xattr(inode);
270396dd0251SChao Yu }
27041eca05aaSYunlei He } else {
270596dd0251SChao Yu if (f2fs_has_inline_xattr(inode)) {
270696dd0251SChao Yu stat_dec_inline_xattr(inode);
270791942321SJaegeuk Kim clear_inode_flag(inode, FI_INLINE_XATTR);
270896dd0251SChao Yu }
2709e3b4d43fSJaegeuk Kim goto update_inode;
2710e3b4d43fSJaegeuk Kim }
2711e3b4d43fSJaegeuk Kim
27126afc662eSChao Yu dst_addr = inline_xattr_addr(inode, ipage);
27136afc662eSChao Yu src_addr = inline_xattr_addr(inode, page);
271428cdce04SChao Yu inline_size = inline_xattr_size(inode);
271528cdce04SChao Yu
2716bae0ee7aSChao Yu f2fs_wait_on_page_writeback(ipage, NODE, true, true);
271728cdce04SChao Yu memcpy(dst_addr, src_addr, inline_size);
2718e3b4d43fSJaegeuk Kim update_inode:
27194d57b86dSChao Yu f2fs_update_inode(inode, ipage);
272028cdce04SChao Yu f2fs_put_page(ipage, 1);
27219627a7b3SChao Yu return 0;
272228cdce04SChao Yu }
272328cdce04SChao Yu
f2fs_recover_xattr_data(struct inode * inode,struct page * page)27244d57b86dSChao Yu int f2fs_recover_xattr_data(struct inode *inode, struct page *page)
2725abb2366cSJaegeuk Kim {
27264081363fSJaegeuk Kim struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2727abb2366cSJaegeuk Kim nid_t prev_xnid = F2FS_I(inode)->i_xattr_nid;
272887905682SYunlei He nid_t new_xnid;
272987905682SYunlei He struct dnode_of_data dn;
2730abb2366cSJaegeuk Kim struct node_info ni;
2731d260081cSChao Yu struct page *xpage;
27327735730dSChao Yu int err;
2733abb2366cSJaegeuk Kim
2734abb2366cSJaegeuk Kim if (!prev_xnid)
2735abb2366cSJaegeuk Kim goto recover_xnid;
2736abb2366cSJaegeuk Kim
2737d260081cSChao Yu /* 1: invalidate the previous xattr nid */
2738a9419b63SJaegeuk Kim err = f2fs_get_node_info(sbi, prev_xnid, &ni, false);
27397735730dSChao Yu if (err)
27407735730dSChao Yu return err;
27417735730dSChao Yu
27424d57b86dSChao Yu f2fs_invalidate_blocks(sbi, ni.blk_addr);
2743000519f2SChao Yu dec_valid_node_count(sbi, inode, false);
2744479f40c4SJaegeuk Kim set_node_addr(sbi, &ni, NULL_ADDR, false);
2745abb2366cSJaegeuk Kim
2746abb2366cSJaegeuk Kim recover_xnid:
2747d260081cSChao Yu /* 2: update xattr nid in inode */
27484d57b86dSChao Yu if (!f2fs_alloc_nid(sbi, &new_xnid))
274987905682SYunlei He return -ENOSPC;
275087905682SYunlei He
275187905682SYunlei He set_new_dnode(&dn, inode, NULL, NULL, new_xnid);
27524d57b86dSChao Yu xpage = f2fs_new_node_page(&dn, XATTR_NODE_OFFSET);
275387905682SYunlei He if (IS_ERR(xpage)) {
27544d57b86dSChao Yu f2fs_alloc_nid_failed(sbi, new_xnid);
275587905682SYunlei He return PTR_ERR(xpage);
275687905682SYunlei He }
275787905682SYunlei He
27584d57b86dSChao Yu f2fs_alloc_nid_done(sbi, new_xnid);
27594d57b86dSChao Yu f2fs_update_inode_page(inode);
2760abb2366cSJaegeuk Kim
2761d260081cSChao Yu /* 3: update and set xattr node page dirty */
2762cf8a10d4SZhiguo Niu if (page) {
27639de9f078SJaegeuk Kim memcpy(F2FS_NODE(xpage), F2FS_NODE(page),
27649de9f078SJaegeuk Kim VALID_XATTR_BLOCK_SIZE);
2765d260081cSChao Yu set_page_dirty(xpage);
2766cf8a10d4SZhiguo Niu }
2767d260081cSChao Yu f2fs_put_page(xpage, 1);
2768abb2366cSJaegeuk Kim
2769d260081cSChao Yu return 0;
2770abb2366cSJaegeuk Kim }
2771abb2366cSJaegeuk Kim
f2fs_recover_inode_page(struct f2fs_sb_info * sbi,struct page * page)27724d57b86dSChao Yu int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
2773e05df3b1SJaegeuk Kim {
277458bfaf44SJaegeuk Kim struct f2fs_inode *src, *dst;
2775e05df3b1SJaegeuk Kim nid_t ino = ino_of_node(page);
2776e05df3b1SJaegeuk Kim struct node_info old_ni, new_ni;
2777e05df3b1SJaegeuk Kim struct page *ipage;
27787735730dSChao Yu int err;
2779e05df3b1SJaegeuk Kim
2780a9419b63SJaegeuk Kim err = f2fs_get_node_info(sbi, ino, &old_ni, false);
27817735730dSChao Yu if (err)
27827735730dSChao Yu return err;
2783e8271fa3SJaegeuk Kim
2784e8271fa3SJaegeuk Kim if (unlikely(old_ni.blk_addr != NULL_ADDR))
2785e8271fa3SJaegeuk Kim return -EINVAL;
2786e8ea9b3dSJaegeuk Kim retry:
2787300e129cSJaegeuk Kim ipage = f2fs_grab_cache_page(NODE_MAPPING(sbi), ino, false);
2788e8ea9b3dSJaegeuk Kim if (!ipage) {
27894034247aSNeilBrown memalloc_retry_wait(GFP_NOFS);
2790e8ea9b3dSJaegeuk Kim goto retry;
2791e8ea9b3dSJaegeuk Kim }
2792e05df3b1SJaegeuk Kim
2793e05df3b1SJaegeuk Kim /* Should not use this inode from free nid list */
2794b8559dc2SChao Yu remove_free_nid(sbi, ino);
2795e05df3b1SJaegeuk Kim
2796237c0790SJaegeuk Kim if (!PageUptodate(ipage))
2797e05df3b1SJaegeuk Kim SetPageUptodate(ipage);
2798e05df3b1SJaegeuk Kim fill_node_footer(ipage, ino, ino, 0, true);
2799ef2a0071SChao Yu set_cold_node(ipage, false);
2800e05df3b1SJaegeuk Kim
280158bfaf44SJaegeuk Kim src = F2FS_INODE(page);
280258bfaf44SJaegeuk Kim dst = F2FS_INODE(ipage);
2803e05df3b1SJaegeuk Kim
280436218b81SZheng Yongjun memcpy(dst, src, offsetof(struct f2fs_inode, i_ext));
280558bfaf44SJaegeuk Kim dst->i_size = 0;
280658bfaf44SJaegeuk Kim dst->i_blocks = cpu_to_le64(1);
280758bfaf44SJaegeuk Kim dst->i_links = cpu_to_le32(1);
280858bfaf44SJaegeuk Kim dst->i_xattr_nid = 0;
28097a2af766SChao Yu dst->i_inline = src->i_inline & (F2FS_INLINE_XATTR | F2FS_EXTRA_ATTR);
28105c57132eSChao Yu if (dst->i_inline & F2FS_EXTRA_ATTR) {
28117a2af766SChao Yu dst->i_extra_isize = src->i_extra_isize;
28126afc662eSChao Yu
28137beb01f7SChao Yu if (f2fs_sb_has_flexible_inline_xattr(sbi) &&
28146afc662eSChao Yu F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
28156afc662eSChao Yu i_inline_xattr_size))
28166afc662eSChao Yu dst->i_inline_xattr_size = src->i_inline_xattr_size;
28176afc662eSChao Yu
28187beb01f7SChao Yu if (f2fs_sb_has_project_quota(sbi) &&
28195c57132eSChao Yu F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
28205c57132eSChao Yu i_projid))
28215c57132eSChao Yu dst->i_projid = src->i_projid;
28225cd1f387SChao Yu
28237beb01f7SChao Yu if (f2fs_sb_has_inode_crtime(sbi) &&
28245cd1f387SChao Yu F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
28255cd1f387SChao Yu i_crtime_nsec)) {
28265cd1f387SChao Yu dst->i_crtime = src->i_crtime;
28275cd1f387SChao Yu dst->i_crtime_nsec = src->i_crtime_nsec;
28285cd1f387SChao Yu }
28295c57132eSChao Yu }
2830e05df3b1SJaegeuk Kim
2831e05df3b1SJaegeuk Kim new_ni = old_ni;
2832e05df3b1SJaegeuk Kim new_ni.ino = ino;
2833e05df3b1SJaegeuk Kim
28340abd675eSChao Yu if (unlikely(inc_valid_node_count(sbi, NULL, true)))
283565e5cd0aSJaegeuk Kim WARN_ON(1);
2836479f40c4SJaegeuk Kim set_node_addr(sbi, &new_ni, NEW_ADDR, false);
2837e05df3b1SJaegeuk Kim inc_valid_inode_count(sbi);
2838617deb8cSJaegeuk Kim set_page_dirty(ipage);
2839e05df3b1SJaegeuk Kim f2fs_put_page(ipage, 1);
2840e05df3b1SJaegeuk Kim return 0;
2841e05df3b1SJaegeuk Kim }
2842e05df3b1SJaegeuk Kim
f2fs_restore_node_summary(struct f2fs_sb_info * sbi,unsigned int segno,struct f2fs_summary_block * sum)28437735730dSChao Yu int f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
2844e05df3b1SJaegeuk Kim unsigned int segno, struct f2fs_summary_block *sum)
2845e05df3b1SJaegeuk Kim {
2846e05df3b1SJaegeuk Kim struct f2fs_node *rn;
2847e05df3b1SJaegeuk Kim struct f2fs_summary *sum_entry;
2848e05df3b1SJaegeuk Kim block_t addr;
28499ecf4b80SChao Yu int i, idx, last_offset, nrpages;
2850e05df3b1SJaegeuk Kim
2851e05df3b1SJaegeuk Kim /* scan the node segment */
2852e05df3b1SJaegeuk Kim last_offset = sbi->blocks_per_seg;
2853e05df3b1SJaegeuk Kim addr = START_BLOCK(sbi, segno);
2854e05df3b1SJaegeuk Kim sum_entry = &sum->entries[0];
2855e05df3b1SJaegeuk Kim
28569ecf4b80SChao Yu for (i = 0; i < last_offset; i += nrpages, addr += nrpages) {
28575f7136dbSMatthew Wilcox (Oracle) nrpages = bio_max_segs(last_offset - i);
2858393ff91fSJaegeuk Kim
28599af0ff1cSChao Yu /* readahead node pages */
28604d57b86dSChao Yu f2fs_ra_meta_pages(sbi, addr, nrpages, META_POR, true);
28619af0ff1cSChao Yu
28629ecf4b80SChao Yu for (idx = addr; idx < addr + nrpages; idx++) {
28634d57b86dSChao Yu struct page *page = f2fs_get_tmp_page(sbi, idx);
2864393ff91fSJaegeuk Kim
28657735730dSChao Yu if (IS_ERR(page))
28667735730dSChao Yu return PTR_ERR(page);
28677735730dSChao Yu
28689ecf4b80SChao Yu rn = F2FS_NODE(page);
2869393ff91fSJaegeuk Kim sum_entry->nid = rn->footer.nid;
2870393ff91fSJaegeuk Kim sum_entry->version = 0;
2871393ff91fSJaegeuk Kim sum_entry->ofs_in_node = 0;
28729af0ff1cSChao Yu sum_entry++;
28739ecf4b80SChao Yu f2fs_put_page(page, 1);
28749af0ff1cSChao Yu }
2875bac4eef6SChao Yu
28769ecf4b80SChao Yu invalidate_mapping_pages(META_MAPPING(sbi), addr,
2877bac4eef6SChao Yu addr + nrpages);
28789af0ff1cSChao Yu }
28797735730dSChao Yu return 0;
2880e05df3b1SJaegeuk Kim }
2881e05df3b1SJaegeuk Kim
remove_nats_in_journal(struct f2fs_sb_info * sbi)2882aec71382SChao Yu static void remove_nats_in_journal(struct f2fs_sb_info *sbi)
2883e05df3b1SJaegeuk Kim {
2884e05df3b1SJaegeuk Kim struct f2fs_nm_info *nm_i = NM_I(sbi);
2885e05df3b1SJaegeuk Kim struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2886b7ad7512SChao Yu struct f2fs_journal *journal = curseg->journal;
2887e05df3b1SJaegeuk Kim int i;
2888e05df3b1SJaegeuk Kim
2889b7ad7512SChao Yu down_write(&curseg->journal_rwsem);
2890dfc08a12SChao Yu for (i = 0; i < nats_in_cursum(journal); i++) {
2891e05df3b1SJaegeuk Kim struct nat_entry *ne;
2892e05df3b1SJaegeuk Kim struct f2fs_nat_entry raw_ne;
2893dfc08a12SChao Yu nid_t nid = le32_to_cpu(nid_in_journal(journal, i));
2894e05df3b1SJaegeuk Kim
2895b862676eSChao Yu if (f2fs_check_nid_range(sbi, nid))
2896b862676eSChao Yu continue;
2897b862676eSChao Yu
2898dfc08a12SChao Yu raw_ne = nat_in_journal(journal, i);
28999be32d72SJaegeuk Kim
2900e05df3b1SJaegeuk Kim ne = __lookup_nat_cache(nm_i, nid);
2901e05df3b1SJaegeuk Kim if (!ne) {
290232410577SChao Yu ne = __alloc_nat_entry(sbi, nid, true);
290312f9ef37SYunlei He __init_nat_entry(nm_i, ne, &raw_ne, true);
29049be32d72SJaegeuk Kim }
290504d47e67SChao Yu
290604d47e67SChao Yu /*
290704d47e67SChao Yu * if a free nat in journal has not been used after last
290804d47e67SChao Yu * checkpoint, we should remove it from available nids,
290904d47e67SChao Yu * since later we will add it again.
291004d47e67SChao Yu */
291104d47e67SChao Yu if (!get_nat_flag(ne, IS_DIRTY) &&
291204d47e67SChao Yu le32_to_cpu(raw_ne.block_addr) == NULL_ADDR) {
291304d47e67SChao Yu spin_lock(&nm_i->nid_list_lock);
291404d47e67SChao Yu nm_i->available_nids--;
291504d47e67SChao Yu spin_unlock(&nm_i->nid_list_lock);
291604d47e67SChao Yu }
291704d47e67SChao Yu
2918e05df3b1SJaegeuk Kim __set_nat_cache_dirty(nm_i, ne);
2919e05df3b1SJaegeuk Kim }
2920dfc08a12SChao Yu update_nats_in_cursum(journal, -i);
2921b7ad7512SChao Yu up_write(&curseg->journal_rwsem);
2922e05df3b1SJaegeuk Kim }
2923e05df3b1SJaegeuk Kim
__adjust_nat_entry_set(struct nat_entry_set * nes,struct list_head * head,int max)2924309cc2b6SJaegeuk Kim static void __adjust_nat_entry_set(struct nat_entry_set *nes,
2925309cc2b6SJaegeuk Kim struct list_head *head, int max)
2926e05df3b1SJaegeuk Kim {
2927309cc2b6SJaegeuk Kim struct nat_entry_set *cur;
2928e05df3b1SJaegeuk Kim
2929309cc2b6SJaegeuk Kim if (nes->entry_cnt >= max)
2930309cc2b6SJaegeuk Kim goto add_out;
2931e05df3b1SJaegeuk Kim
2932309cc2b6SJaegeuk Kim list_for_each_entry(cur, head, set_list) {
2933309cc2b6SJaegeuk Kim if (cur->entry_cnt >= nes->entry_cnt) {
2934309cc2b6SJaegeuk Kim list_add(&nes->set_list, cur->set_list.prev);
2935309cc2b6SJaegeuk Kim return;
2936309cc2b6SJaegeuk Kim }
2937309cc2b6SJaegeuk Kim }
2938309cc2b6SJaegeuk Kim add_out:
2939309cc2b6SJaegeuk Kim list_add_tail(&nes->set_list, head);
2940aec71382SChao Yu }
2941aec71382SChao Yu
__update_nat_bits(struct f2fs_nm_info * nm_i,unsigned int nat_ofs,unsigned int valid)294294c821fbSChao Yu static void __update_nat_bits(struct f2fs_nm_info *nm_i, unsigned int nat_ofs,
294394c821fbSChao Yu unsigned int valid)
294494c821fbSChao Yu {
294594c821fbSChao Yu if (valid == 0) {
294694c821fbSChao Yu __set_bit_le(nat_ofs, nm_i->empty_nat_bits);
294794c821fbSChao Yu __clear_bit_le(nat_ofs, nm_i->full_nat_bits);
294894c821fbSChao Yu return;
294994c821fbSChao Yu }
295094c821fbSChao Yu
295194c821fbSChao Yu __clear_bit_le(nat_ofs, nm_i->empty_nat_bits);
295294c821fbSChao Yu if (valid == NAT_ENTRY_PER_BLOCK)
295394c821fbSChao Yu __set_bit_le(nat_ofs, nm_i->full_nat_bits);
295494c821fbSChao Yu else
295594c821fbSChao Yu __clear_bit_le(nat_ofs, nm_i->full_nat_bits);
295694c821fbSChao Yu }
295794c821fbSChao Yu
update_nat_bits(struct f2fs_sb_info * sbi,nid_t start_nid,struct page * page)295894c821fbSChao Yu static void update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid,
295922ad0b6aSJaegeuk Kim struct page *page)
296022ad0b6aSJaegeuk Kim {
296122ad0b6aSJaegeuk Kim struct f2fs_nm_info *nm_i = NM_I(sbi);
296222ad0b6aSJaegeuk Kim unsigned int nat_index = start_nid / NAT_ENTRY_PER_BLOCK;
296322ad0b6aSJaegeuk Kim struct f2fs_nat_block *nat_blk = page_address(page);
296422ad0b6aSJaegeuk Kim int valid = 0;
296537a0ab2aSFan Li int i = 0;
296622ad0b6aSJaegeuk Kim
296794c821fbSChao Yu if (!is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG))
296822ad0b6aSJaegeuk Kim return;
296922ad0b6aSJaegeuk Kim
297037a0ab2aSFan Li if (nat_index == 0) {
297137a0ab2aSFan Li valid = 1;
297237a0ab2aSFan Li i = 1;
297337a0ab2aSFan Li }
297437a0ab2aSFan Li for (; i < NAT_ENTRY_PER_BLOCK; i++) {
297536af5f40SChao Yu if (le32_to_cpu(nat_blk->entries[i].block_addr) != NULL_ADDR)
297622ad0b6aSJaegeuk Kim valid++;
297722ad0b6aSJaegeuk Kim }
297894c821fbSChao Yu
297994c821fbSChao Yu __update_nat_bits(nm_i, nat_index, valid);
298022ad0b6aSJaegeuk Kim }
298122ad0b6aSJaegeuk Kim
f2fs_enable_nat_bits(struct f2fs_sb_info * sbi)298294c821fbSChao Yu void f2fs_enable_nat_bits(struct f2fs_sb_info *sbi)
298394c821fbSChao Yu {
298494c821fbSChao Yu struct f2fs_nm_info *nm_i = NM_I(sbi);
298594c821fbSChao Yu unsigned int nat_ofs;
298694c821fbSChao Yu
2987e4544b63STim Murray f2fs_down_read(&nm_i->nat_tree_lock);
298894c821fbSChao Yu
298994c821fbSChao Yu for (nat_ofs = 0; nat_ofs < nm_i->nat_blocks; nat_ofs++) {
299094c821fbSChao Yu unsigned int valid = 0, nid_ofs = 0;
299194c821fbSChao Yu
299294c821fbSChao Yu /* handle nid zero due to it should never be used */
299394c821fbSChao Yu if (unlikely(nat_ofs == 0)) {
299494c821fbSChao Yu valid = 1;
299594c821fbSChao Yu nid_ofs = 1;
299694c821fbSChao Yu }
299794c821fbSChao Yu
299894c821fbSChao Yu for (; nid_ofs < NAT_ENTRY_PER_BLOCK; nid_ofs++) {
299994c821fbSChao Yu if (!test_bit_le(nid_ofs,
300094c821fbSChao Yu nm_i->free_nid_bitmap[nat_ofs]))
300194c821fbSChao Yu valid++;
300294c821fbSChao Yu }
300394c821fbSChao Yu
300494c821fbSChao Yu __update_nat_bits(nm_i, nat_ofs, valid);
300594c821fbSChao Yu }
300694c821fbSChao Yu
3007e4544b63STim Murray f2fs_up_read(&nm_i->nat_tree_lock);
300822ad0b6aSJaegeuk Kim }
300922ad0b6aSJaegeuk Kim
__flush_nat_entry_set(struct f2fs_sb_info * sbi,struct nat_entry_set * set,struct cp_control * cpc)3010edc55aafSJaegeuk Kim static int __flush_nat_entry_set(struct f2fs_sb_info *sbi,
301122ad0b6aSJaegeuk Kim struct nat_entry_set *set, struct cp_control *cpc)
3012309cc2b6SJaegeuk Kim {
3013309cc2b6SJaegeuk Kim struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
3014b7ad7512SChao Yu struct f2fs_journal *journal = curseg->journal;
3015309cc2b6SJaegeuk Kim nid_t start_nid = set->set * NAT_ENTRY_PER_BLOCK;
3016309cc2b6SJaegeuk Kim bool to_journal = true;
3017309cc2b6SJaegeuk Kim struct f2fs_nat_block *nat_blk;
3018309cc2b6SJaegeuk Kim struct nat_entry *ne, *cur;
3019309cc2b6SJaegeuk Kim struct page *page = NULL;
3020aec71382SChao Yu
3021aec71382SChao Yu /*
3022aec71382SChao Yu * there are two steps to flush nat entries:
3023aec71382SChao Yu * #1, flush nat entries to journal in current hot data summary block.
3024aec71382SChao Yu * #2, flush nat entries to nat page.
3025aec71382SChao Yu */
302694c821fbSChao Yu if ((cpc->reason & CP_UMOUNT) ||
302722ad0b6aSJaegeuk Kim !__has_cursum_space(journal, set->entry_cnt, NAT_JOURNAL))
3028aec71382SChao Yu to_journal = false;
3029aec71382SChao Yu
3030aec71382SChao Yu if (to_journal) {
3031b7ad7512SChao Yu down_write(&curseg->journal_rwsem);
3032aec71382SChao Yu } else {
3033e05df3b1SJaegeuk Kim page = get_next_nat_page(sbi, start_nid);
3034edc55aafSJaegeuk Kim if (IS_ERR(page))
3035edc55aafSJaegeuk Kim return PTR_ERR(page);
3036edc55aafSJaegeuk Kim
3037e05df3b1SJaegeuk Kim nat_blk = page_address(page);
30389850cf4aSJaegeuk Kim f2fs_bug_on(sbi, !nat_blk);
3039e05df3b1SJaegeuk Kim }
3040e05df3b1SJaegeuk Kim
3041aec71382SChao Yu /* flush dirty nats in nat entry set */
3042309cc2b6SJaegeuk Kim list_for_each_entry_safe(ne, cur, &set->entry_list, list) {
3043aec71382SChao Yu struct f2fs_nat_entry *raw_ne;
3044aec71382SChao Yu nid_t nid = nat_get_nid(ne);
3045aec71382SChao Yu int offset;
3046aec71382SChao Yu
3047febeca6dSChao Yu f2fs_bug_on(sbi, nat_get_blkaddr(ne) == NEW_ADDR);
3048309cc2b6SJaegeuk Kim
3049aec71382SChao Yu if (to_journal) {
30504d57b86dSChao Yu offset = f2fs_lookup_journal_in_cursum(journal,
3051aec71382SChao Yu NAT_JOURNAL, nid, 1);
30529850cf4aSJaegeuk Kim f2fs_bug_on(sbi, offset < 0);
3053dfc08a12SChao Yu raw_ne = &nat_in_journal(journal, offset);
3054dfc08a12SChao Yu nid_in_journal(journal, offset) = cpu_to_le32(nid);
3055aec71382SChao Yu } else {
3056aec71382SChao Yu raw_ne = &nat_blk->entries[nid - start_nid];
3057aec71382SChao Yu }
3058aec71382SChao Yu raw_nat_from_node_info(raw_ne, &ne->ni);
305988bd02c9SJaegeuk Kim nat_reset_flag(ne);
30600b28b71eSKinglong Mee __clear_nat_cache_dirty(NM_I(sbi), set, ne);
306104d47e67SChao Yu if (nat_get_blkaddr(ne) == NULL_ADDR) {
30625921aaa1SLiFan add_free_nid(sbi, nid, false, true);
30634ac91242SChao Yu } else {
30644ac91242SChao Yu spin_lock(&NM_I(sbi)->nid_list_lock);
3065346fe752SChao Yu update_free_nid_bitmap(sbi, nid, false, false);
306604d47e67SChao Yu spin_unlock(&NM_I(sbi)->nid_list_lock);
306704d47e67SChao Yu }
3068e05df3b1SJaegeuk Kim }
3069aec71382SChao Yu
307022ad0b6aSJaegeuk Kim if (to_journal) {
3071b7ad7512SChao Yu up_write(&curseg->journal_rwsem);
307222ad0b6aSJaegeuk Kim } else {
307394c821fbSChao Yu update_nat_bits(sbi, start_nid, page);
3074e05df3b1SJaegeuk Kim f2fs_put_page(page, 1);
307522ad0b6aSJaegeuk Kim }
3076aec71382SChao Yu
307759c9081bSYunlei He /* Allow dirty nats by node block allocation in write_begin */
307859c9081bSYunlei He if (!set->entry_cnt) {
3079309cc2b6SJaegeuk Kim radix_tree_delete(&NM_I(sbi)->nat_set_root, set->set);
3080309cc2b6SJaegeuk Kim kmem_cache_free(nat_entry_set_slab, set);
3081309cc2b6SJaegeuk Kim }
3082edc55aafSJaegeuk Kim return 0;
308359c9081bSYunlei He }
3084aec71382SChao Yu
3085309cc2b6SJaegeuk Kim /*
3086309cc2b6SJaegeuk Kim * This function is called during the checkpointing process.
3087309cc2b6SJaegeuk Kim */
f2fs_flush_nat_entries(struct f2fs_sb_info * sbi,struct cp_control * cpc)3088edc55aafSJaegeuk Kim int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
3089309cc2b6SJaegeuk Kim {
3090309cc2b6SJaegeuk Kim struct f2fs_nm_info *nm_i = NM_I(sbi);
3091309cc2b6SJaegeuk Kim struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
3092b7ad7512SChao Yu struct f2fs_journal *journal = curseg->journal;
3093c31e4961SChao Yu struct nat_entry_set *setvec[NAT_VEC_SIZE];
3094309cc2b6SJaegeuk Kim struct nat_entry_set *set, *tmp;
3095309cc2b6SJaegeuk Kim unsigned int found;
3096309cc2b6SJaegeuk Kim nid_t set_idx = 0;
3097309cc2b6SJaegeuk Kim LIST_HEAD(sets);
3098edc55aafSJaegeuk Kim int err = 0;
3099309cc2b6SJaegeuk Kim
3100a95ba66aSJaegeuk Kim /*
3101a95ba66aSJaegeuk Kim * during unmount, let's flush nat_bits before checking
3102a95ba66aSJaegeuk Kim * nat_cnt[DIRTY_NAT].
3103a95ba66aSJaegeuk Kim */
310494c821fbSChao Yu if (cpc->reason & CP_UMOUNT) {
3105e4544b63STim Murray f2fs_down_write(&nm_i->nat_tree_lock);
31067f2ecdd8SJaegeuk Kim remove_nats_in_journal(sbi);
3107e4544b63STim Murray f2fs_up_write(&nm_i->nat_tree_lock);
31087f2ecdd8SJaegeuk Kim }
31097f2ecdd8SJaegeuk Kim
3110a95ba66aSJaegeuk Kim if (!nm_i->nat_cnt[DIRTY_NAT])
3111edc55aafSJaegeuk Kim return 0;
3112a5131193SJaegeuk Kim
3113e4544b63STim Murray f2fs_down_write(&nm_i->nat_tree_lock);
3114a5131193SJaegeuk Kim
3115309cc2b6SJaegeuk Kim /*
3116309cc2b6SJaegeuk Kim * if there are no enough space in journal to store dirty nat
3117309cc2b6SJaegeuk Kim * entries, remove all entries from journal and merge them
3118309cc2b6SJaegeuk Kim * into nat entry set.
3119309cc2b6SJaegeuk Kim */
312094c821fbSChao Yu if (cpc->reason & CP_UMOUNT ||
3121a95ba66aSJaegeuk Kim !__has_cursum_space(journal,
3122a95ba66aSJaegeuk Kim nm_i->nat_cnt[DIRTY_NAT], NAT_JOURNAL))
3123309cc2b6SJaegeuk Kim remove_nats_in_journal(sbi);
3124309cc2b6SJaegeuk Kim
3125309cc2b6SJaegeuk Kim while ((found = __gang_lookup_nat_set(nm_i,
3126c31e4961SChao Yu set_idx, NAT_VEC_SIZE, setvec))) {
3127309cc2b6SJaegeuk Kim unsigned idx;
31285f029c04SYi Zhuang
3129309cc2b6SJaegeuk Kim set_idx = setvec[found - 1]->set + 1;
3130309cc2b6SJaegeuk Kim for (idx = 0; idx < found; idx++)
3131309cc2b6SJaegeuk Kim __adjust_nat_entry_set(setvec[idx], &sets,
3132dfc08a12SChao Yu MAX_NAT_JENTRIES(journal));
3133309cc2b6SJaegeuk Kim }
3134309cc2b6SJaegeuk Kim
3135309cc2b6SJaegeuk Kim /* flush dirty nats in nat entry set */
3136edc55aafSJaegeuk Kim list_for_each_entry_safe(set, tmp, &sets, set_list) {
3137edc55aafSJaegeuk Kim err = __flush_nat_entry_set(sbi, set, cpc);
3138edc55aafSJaegeuk Kim if (err)
3139edc55aafSJaegeuk Kim break;
3140edc55aafSJaegeuk Kim }
3141309cc2b6SJaegeuk Kim
3142e4544b63STim Murray f2fs_up_write(&nm_i->nat_tree_lock);
314359c9081bSYunlei He /* Allow dirty nats by node block allocation in write_begin */
3144edc55aafSJaegeuk Kim
3145edc55aafSJaegeuk Kim return err;
3146e05df3b1SJaegeuk Kim }
3147e05df3b1SJaegeuk Kim
__get_nat_bitmaps(struct f2fs_sb_info * sbi)314822ad0b6aSJaegeuk Kim static int __get_nat_bitmaps(struct f2fs_sb_info *sbi)
314922ad0b6aSJaegeuk Kim {
315022ad0b6aSJaegeuk Kim struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
315122ad0b6aSJaegeuk Kim struct f2fs_nm_info *nm_i = NM_I(sbi);
315222ad0b6aSJaegeuk Kim unsigned int nat_bits_bytes = nm_i->nat_blocks / BITS_PER_BYTE;
315322ad0b6aSJaegeuk Kim unsigned int i;
315422ad0b6aSJaegeuk Kim __u64 cp_ver = cur_cp_version(ckpt);
315522ad0b6aSJaegeuk Kim block_t nat_bits_addr;
315622ad0b6aSJaegeuk Kim
3157df033cafSChao Yu nm_i->nat_bits_blocks = F2FS_BLK_ALIGN((nat_bits_bytes << 1) + 8);
31580b6d4ca0SEric Biggers nm_i->nat_bits = f2fs_kvzalloc(sbi,
3159acbf054dSChao Yu nm_i->nat_bits_blocks << F2FS_BLKSIZE_BITS, GFP_KERNEL);
316022ad0b6aSJaegeuk Kim if (!nm_i->nat_bits)
316122ad0b6aSJaegeuk Kim return -ENOMEM;
316222ad0b6aSJaegeuk Kim
316394c821fbSChao Yu nm_i->full_nat_bits = nm_i->nat_bits + 8;
316494c821fbSChao Yu nm_i->empty_nat_bits = nm_i->full_nat_bits + nat_bits_bytes;
316594c821fbSChao Yu
316694c821fbSChao Yu if (!is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG))
316794c821fbSChao Yu return 0;
316894c821fbSChao Yu
316922ad0b6aSJaegeuk Kim nat_bits_addr = __start_cp_addr(sbi) + sbi->blocks_per_seg -
317022ad0b6aSJaegeuk Kim nm_i->nat_bits_blocks;
317122ad0b6aSJaegeuk Kim for (i = 0; i < nm_i->nat_bits_blocks; i++) {
31727735730dSChao Yu struct page *page;
31737735730dSChao Yu
31747735730dSChao Yu page = f2fs_get_meta_page(sbi, nat_bits_addr++);
31753b30eb19SChao Yu if (IS_ERR(page))
31767735730dSChao Yu return PTR_ERR(page);
317722ad0b6aSJaegeuk Kim
317822ad0b6aSJaegeuk Kim memcpy(nm_i->nat_bits + (i << F2FS_BLKSIZE_BITS),
317922ad0b6aSJaegeuk Kim page_address(page), F2FS_BLKSIZE);
318022ad0b6aSJaegeuk Kim f2fs_put_page(page, 1);
318122ad0b6aSJaegeuk Kim }
318222ad0b6aSJaegeuk Kim
3183ced2c7eaSKinglong Mee cp_ver |= (cur_cp_crc(ckpt) << 32);
318422ad0b6aSJaegeuk Kim if (cpu_to_le64(cp_ver) != *(__le64 *)nm_i->nat_bits) {
318594c821fbSChao Yu clear_ckpt_flags(sbi, CP_NAT_BITS_FLAG);
318694c821fbSChao Yu f2fs_notice(sbi, "Disable nat_bits due to incorrect cp_ver (%llu, %llu)",
318794c821fbSChao Yu cp_ver, le64_to_cpu(*(__le64 *)nm_i->nat_bits));
318822ad0b6aSJaegeuk Kim return 0;
318922ad0b6aSJaegeuk Kim }
319022ad0b6aSJaegeuk Kim
3191dcbb4c10SJoe Perches f2fs_notice(sbi, "Found nat_bits in checkpoint");
319222ad0b6aSJaegeuk Kim return 0;
319322ad0b6aSJaegeuk Kim }
319422ad0b6aSJaegeuk Kim
load_free_nid_bitmap(struct f2fs_sb_info * sbi)3195bd80a4b9SHou Pengyang static inline void load_free_nid_bitmap(struct f2fs_sb_info *sbi)
31967041d5d2SChao Yu {
31977041d5d2SChao Yu struct f2fs_nm_info *nm_i = NM_I(sbi);
31987041d5d2SChao Yu unsigned int i = 0;
31997041d5d2SChao Yu nid_t nid, last_nid;
32007041d5d2SChao Yu
320194c821fbSChao Yu if (!is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG))
32027041d5d2SChao Yu return;
32037041d5d2SChao Yu
32047041d5d2SChao Yu for (i = 0; i < nm_i->nat_blocks; i++) {
32057041d5d2SChao Yu i = find_next_bit_le(nm_i->empty_nat_bits, nm_i->nat_blocks, i);
32067041d5d2SChao Yu if (i >= nm_i->nat_blocks)
32077041d5d2SChao Yu break;
32087041d5d2SChao Yu
32097041d5d2SChao Yu __set_bit_le(i, nm_i->nat_block_bitmap);
32107041d5d2SChao Yu
32117041d5d2SChao Yu nid = i * NAT_ENTRY_PER_BLOCK;
3212f6986edeSFan Li last_nid = nid + NAT_ENTRY_PER_BLOCK;
32137041d5d2SChao Yu
3214346fe752SChao Yu spin_lock(&NM_I(sbi)->nid_list_lock);
32157041d5d2SChao Yu for (; nid < last_nid; nid++)
3216346fe752SChao Yu update_free_nid_bitmap(sbi, nid, true, true);
3217346fe752SChao Yu spin_unlock(&NM_I(sbi)->nid_list_lock);
32187041d5d2SChao Yu }
32197041d5d2SChao Yu
32207041d5d2SChao Yu for (i = 0; i < nm_i->nat_blocks; i++) {
32217041d5d2SChao Yu i = find_next_bit_le(nm_i->full_nat_bits, nm_i->nat_blocks, i);
32227041d5d2SChao Yu if (i >= nm_i->nat_blocks)
32237041d5d2SChao Yu break;
32247041d5d2SChao Yu
32257041d5d2SChao Yu __set_bit_le(i, nm_i->nat_block_bitmap);
32267041d5d2SChao Yu }
32277041d5d2SChao Yu }
32287041d5d2SChao Yu
init_node_manager(struct f2fs_sb_info * sbi)3229e05df3b1SJaegeuk Kim static int init_node_manager(struct f2fs_sb_info *sbi)
3230e05df3b1SJaegeuk Kim {
3231e05df3b1SJaegeuk Kim struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi);
3232e05df3b1SJaegeuk Kim struct f2fs_nm_info *nm_i = NM_I(sbi);
3233e05df3b1SJaegeuk Kim unsigned char *version_bitmap;
323422ad0b6aSJaegeuk Kim unsigned int nat_segs;
323522ad0b6aSJaegeuk Kim int err;
3236e05df3b1SJaegeuk Kim
3237e05df3b1SJaegeuk Kim nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr);
3238e05df3b1SJaegeuk Kim
3239e05df3b1SJaegeuk Kim /* segment_count_nat includes pair segment so divide to 2. */
3240e05df3b1SJaegeuk Kim nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1;
324122ad0b6aSJaegeuk Kim nm_i->nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg);
324222ad0b6aSJaegeuk Kim nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nm_i->nat_blocks;
32437ee0eeabSJaegeuk Kim
3244b63da15eSJaegeuk Kim /* not used nids: 0, node, meta, (and root counted as valid node) */
324504d47e67SChao Yu nm_i->available_nids = nm_i->max_nid - sbi->total_valid_node_count -
324627cae0bcSChao Yu F2FS_RESERVED_NODE_NUM;
32479a4ffdf5SChao Yu nm_i->nid_cnt[FREE_NID] = 0;
32489a4ffdf5SChao Yu nm_i->nid_cnt[PREALLOC_NID] = 0;
3249cdfc41c1SJaegeuk Kim nm_i->ram_thresh = DEF_RAM_THRESHOLD;
3250ea1a29a0SChao Yu nm_i->ra_nid_pages = DEF_RA_NID_PAGES;
32512304cb0cSChao Yu nm_i->dirty_nats_ratio = DEF_DIRTY_NAT_RATIO_THRESHOLD;
325247c8ebccSJaegeuk Kim nm_i->max_rf_node_blocks = DEF_RF_NODE_BLOCKS;
3253e05df3b1SJaegeuk Kim
32548a7ed66aSJaegeuk Kim INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC);
32559a4ffdf5SChao Yu INIT_LIST_HEAD(&nm_i->free_nid_list);
3256769ec6e5SJaegeuk Kim INIT_RADIX_TREE(&nm_i->nat_root, GFP_NOIO);
3257769ec6e5SJaegeuk Kim INIT_RADIX_TREE(&nm_i->nat_set_root, GFP_NOIO);
3258e05df3b1SJaegeuk Kim INIT_LIST_HEAD(&nm_i->nat_entries);
325922969158SChao Yu spin_lock_init(&nm_i->nat_list_lock);
3260e05df3b1SJaegeuk Kim
3261e05df3b1SJaegeuk Kim mutex_init(&nm_i->build_lock);
3262b8559dc2SChao Yu spin_lock_init(&nm_i->nid_list_lock);
3263e4544b63STim Murray init_f2fs_rwsem(&nm_i->nat_tree_lock);
3264e05df3b1SJaegeuk Kim
3265e05df3b1SJaegeuk Kim nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
326679b5793bSAlexandru Gheorghiu nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
3267e05df3b1SJaegeuk Kim version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP);
326879b5793bSAlexandru Gheorghiu nm_i->nat_bitmap = kmemdup(version_bitmap, nm_i->bitmap_size,
326979b5793bSAlexandru Gheorghiu GFP_KERNEL);
327079b5793bSAlexandru Gheorghiu if (!nm_i->nat_bitmap)
327179b5793bSAlexandru Gheorghiu return -ENOMEM;
3272599a09b2SChao Yu
327322ad0b6aSJaegeuk Kim err = __get_nat_bitmaps(sbi);
327422ad0b6aSJaegeuk Kim if (err)
327522ad0b6aSJaegeuk Kim return err;
327622ad0b6aSJaegeuk Kim
3277599a09b2SChao Yu #ifdef CONFIG_F2FS_CHECK_FS
3278599a09b2SChao Yu nm_i->nat_bitmap_mir = kmemdup(version_bitmap, nm_i->bitmap_size,
3279599a09b2SChao Yu GFP_KERNEL);
3280599a09b2SChao Yu if (!nm_i->nat_bitmap_mir)
3281599a09b2SChao Yu return -ENOMEM;
3282599a09b2SChao Yu #endif
3283599a09b2SChao Yu
3284e05df3b1SJaegeuk Kim return 0;
3285e05df3b1SJaegeuk Kim }
3286e05df3b1SJaegeuk Kim
init_free_nid_cache(struct f2fs_sb_info * sbi)32879f7e4a2cSJaegeuk Kim static int init_free_nid_cache(struct f2fs_sb_info *sbi)
32884ac91242SChao Yu {
32894ac91242SChao Yu struct f2fs_nm_info *nm_i = NM_I(sbi);
3290bb1105e4SJaegeuk Kim int i;
32914ac91242SChao Yu
3292026f0507SKees Cook nm_i->free_nid_bitmap =
32930b6d4ca0SEric Biggers f2fs_kvzalloc(sbi, array_size(sizeof(unsigned char *),
3294026f0507SKees Cook nm_i->nat_blocks),
3295026f0507SKees Cook GFP_KERNEL);
32964ac91242SChao Yu if (!nm_i->free_nid_bitmap)
32974ac91242SChao Yu return -ENOMEM;
32984ac91242SChao Yu
3299bb1105e4SJaegeuk Kim for (i = 0; i < nm_i->nat_blocks; i++) {
3300bb1105e4SJaegeuk Kim nm_i->free_nid_bitmap[i] = f2fs_kvzalloc(sbi,
3301e15d54d5SYunlei He f2fs_bitmap_size(NAT_ENTRY_PER_BLOCK), GFP_KERNEL);
330268c43a23SYunlei He if (!nm_i->free_nid_bitmap[i])
3303bb1105e4SJaegeuk Kim return -ENOMEM;
3304bb1105e4SJaegeuk Kim }
3305bb1105e4SJaegeuk Kim
3306628b3d14SChao Yu nm_i->nat_block_bitmap = f2fs_kvzalloc(sbi, nm_i->nat_blocks / 8,
33074ac91242SChao Yu GFP_KERNEL);
33084ac91242SChao Yu if (!nm_i->nat_block_bitmap)
33094ac91242SChao Yu return -ENOMEM;
3310586d1492SChao Yu
33119d2a789cSKees Cook nm_i->free_nid_count =
33129d2a789cSKees Cook f2fs_kvzalloc(sbi, array_size(sizeof(unsigned short),
33139d2a789cSKees Cook nm_i->nat_blocks),
33149d2a789cSKees Cook GFP_KERNEL);
3315586d1492SChao Yu if (!nm_i->free_nid_count)
3316586d1492SChao Yu return -ENOMEM;
33174ac91242SChao Yu return 0;
33184ac91242SChao Yu }
33194ac91242SChao Yu
f2fs_build_node_manager(struct f2fs_sb_info * sbi)33204d57b86dSChao Yu int f2fs_build_node_manager(struct f2fs_sb_info *sbi)
3321e05df3b1SJaegeuk Kim {
3322e05df3b1SJaegeuk Kim int err;
3323e05df3b1SJaegeuk Kim
3324acbf054dSChao Yu sbi->nm_info = f2fs_kzalloc(sbi, sizeof(struct f2fs_nm_info),
3325acbf054dSChao Yu GFP_KERNEL);
3326e05df3b1SJaegeuk Kim if (!sbi->nm_info)
3327e05df3b1SJaegeuk Kim return -ENOMEM;
3328e05df3b1SJaegeuk Kim
3329e05df3b1SJaegeuk Kim err = init_node_manager(sbi);
3330e05df3b1SJaegeuk Kim if (err)
3331e05df3b1SJaegeuk Kim return err;
3332e05df3b1SJaegeuk Kim
33334ac91242SChao Yu err = init_free_nid_cache(sbi);
33344ac91242SChao Yu if (err)
33354ac91242SChao Yu return err;
33364ac91242SChao Yu
33377041d5d2SChao Yu /* load free nid status from nat_bits table */
33387041d5d2SChao Yu load_free_nid_bitmap(sbi);
33397041d5d2SChao Yu
3340e2374015SChao Yu return f2fs_build_free_nids(sbi, true, true);
3341e05df3b1SJaegeuk Kim }
3342e05df3b1SJaegeuk Kim
f2fs_destroy_node_manager(struct f2fs_sb_info * sbi)33434d57b86dSChao Yu void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi)
3344e05df3b1SJaegeuk Kim {
3345e05df3b1SJaegeuk Kim struct f2fs_nm_info *nm_i = NM_I(sbi);
3346e05df3b1SJaegeuk Kim struct free_nid *i, *next_i;
3347c31e4961SChao Yu void *vec[NAT_VEC_SIZE];
3348c31e4961SChao Yu struct nat_entry **natvec = (struct nat_entry **)vec;
3349c31e4961SChao Yu struct nat_entry_set **setvec = (struct nat_entry_set **)vec;
3350e05df3b1SJaegeuk Kim nid_t nid = 0;
3351e05df3b1SJaegeuk Kim unsigned int found;
3352e05df3b1SJaegeuk Kim
3353e05df3b1SJaegeuk Kim if (!nm_i)
3354e05df3b1SJaegeuk Kim return;
3355e05df3b1SJaegeuk Kim
3356e05df3b1SJaegeuk Kim /* destroy free nid list */
3357b8559dc2SChao Yu spin_lock(&nm_i->nid_list_lock);
33589a4ffdf5SChao Yu list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) {
3359a0761f63SFan Li __remove_free_nid(sbi, i, FREE_NID);
3360b8559dc2SChao Yu spin_unlock(&nm_i->nid_list_lock);
3361cf0ee0f0SChao Yu kmem_cache_free(free_nid_slab, i);
3362b8559dc2SChao Yu spin_lock(&nm_i->nid_list_lock);
3363e05df3b1SJaegeuk Kim }
33649a4ffdf5SChao Yu f2fs_bug_on(sbi, nm_i->nid_cnt[FREE_NID]);
33659a4ffdf5SChao Yu f2fs_bug_on(sbi, nm_i->nid_cnt[PREALLOC_NID]);
33669a4ffdf5SChao Yu f2fs_bug_on(sbi, !list_empty(&nm_i->free_nid_list));
3367b8559dc2SChao Yu spin_unlock(&nm_i->nid_list_lock);
3368e05df3b1SJaegeuk Kim
3369e05df3b1SJaegeuk Kim /* destroy nat cache */
3370e4544b63STim Murray f2fs_down_write(&nm_i->nat_tree_lock);
3371e05df3b1SJaegeuk Kim while ((found = __gang_lookup_nat_cache(nm_i,
3372c31e4961SChao Yu nid, NAT_VEC_SIZE, natvec))) {
3373e05df3b1SJaegeuk Kim unsigned idx;
33747aed0d45SJaegeuk Kim
3375b6ce391eSGu Zheng nid = nat_get_nid(natvec[found - 1]) + 1;
337622969158SChao Yu for (idx = 0; idx < found; idx++) {
337722969158SChao Yu spin_lock(&nm_i->nat_list_lock);
337822969158SChao Yu list_del(&natvec[idx]->list);
337922969158SChao Yu spin_unlock(&nm_i->nat_list_lock);
338022969158SChao Yu
3381b6ce391eSGu Zheng __del_from_nat_cache(nm_i, natvec[idx]);
3382e05df3b1SJaegeuk Kim }
338322969158SChao Yu }
3384a95ba66aSJaegeuk Kim f2fs_bug_on(sbi, nm_i->nat_cnt[TOTAL_NAT]);
33857aed0d45SJaegeuk Kim
33867aed0d45SJaegeuk Kim /* destroy nat set cache */
33877aed0d45SJaegeuk Kim nid = 0;
3388c31e4961SChao Yu memset(vec, 0, sizeof(void *) * NAT_VEC_SIZE);
33897aed0d45SJaegeuk Kim while ((found = __gang_lookup_nat_set(nm_i,
3390c31e4961SChao Yu nid, NAT_VEC_SIZE, setvec))) {
33917aed0d45SJaegeuk Kim unsigned idx;
33927aed0d45SJaegeuk Kim
33937aed0d45SJaegeuk Kim nid = setvec[found - 1]->set + 1;
33947aed0d45SJaegeuk Kim for (idx = 0; idx < found; idx++) {
33957aed0d45SJaegeuk Kim /* entry_cnt is not zero, when cp_error was occurred */
33967aed0d45SJaegeuk Kim f2fs_bug_on(sbi, !list_empty(&setvec[idx]->entry_list));
33977aed0d45SJaegeuk Kim radix_tree_delete(&nm_i->nat_set_root, setvec[idx]->set);
33987aed0d45SJaegeuk Kim kmem_cache_free(nat_entry_set_slab, setvec[idx]);
33997aed0d45SJaegeuk Kim }
34007aed0d45SJaegeuk Kim }
3401e4544b63STim Murray f2fs_up_write(&nm_i->nat_tree_lock);
3402e05df3b1SJaegeuk Kim
34034ac91242SChao Yu kvfree(nm_i->nat_block_bitmap);
3404bb1105e4SJaegeuk Kim if (nm_i->free_nid_bitmap) {
3405bb1105e4SJaegeuk Kim int i;
3406bb1105e4SJaegeuk Kim
3407bb1105e4SJaegeuk Kim for (i = 0; i < nm_i->nat_blocks; i++)
3408bb1105e4SJaegeuk Kim kvfree(nm_i->free_nid_bitmap[i]);
34095222595dSJaegeuk Kim kvfree(nm_i->free_nid_bitmap);
3410bb1105e4SJaegeuk Kim }
3411586d1492SChao Yu kvfree(nm_i->free_nid_count);
34124ac91242SChao Yu
34135222595dSJaegeuk Kim kvfree(nm_i->nat_bitmap);
34145222595dSJaegeuk Kim kvfree(nm_i->nat_bits);
3415599a09b2SChao Yu #ifdef CONFIG_F2FS_CHECK_FS
34165222595dSJaegeuk Kim kvfree(nm_i->nat_bitmap_mir);
3417599a09b2SChao Yu #endif
3418e05df3b1SJaegeuk Kim sbi->nm_info = NULL;
3419c8eb7024SChao Yu kfree(nm_i);
3420e05df3b1SJaegeuk Kim }
3421e05df3b1SJaegeuk Kim
f2fs_create_node_manager_caches(void)34224d57b86dSChao Yu int __init f2fs_create_node_manager_caches(void)
3423e05df3b1SJaegeuk Kim {
342498510003SChao Yu nat_entry_slab = f2fs_kmem_cache_create("f2fs_nat_entry",
3425e8512d2eSGu Zheng sizeof(struct nat_entry));
3426e05df3b1SJaegeuk Kim if (!nat_entry_slab)
3427aec71382SChao Yu goto fail;
3428e05df3b1SJaegeuk Kim
342998510003SChao Yu free_nid_slab = f2fs_kmem_cache_create("f2fs_free_nid",
3430e8512d2eSGu Zheng sizeof(struct free_nid));
3431aec71382SChao Yu if (!free_nid_slab)
3432ce3e6d25SMarkus Elfring goto destroy_nat_entry;
3433aec71382SChao Yu
343498510003SChao Yu nat_entry_set_slab = f2fs_kmem_cache_create("f2fs_nat_entry_set",
3435aec71382SChao Yu sizeof(struct nat_entry_set));
3436aec71382SChao Yu if (!nat_entry_set_slab)
3437ce3e6d25SMarkus Elfring goto destroy_free_nid;
343850fa53ecSChao Yu
343998510003SChao Yu fsync_node_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_node_entry",
344050fa53ecSChao Yu sizeof(struct fsync_node_entry));
344150fa53ecSChao Yu if (!fsync_node_entry_slab)
344250fa53ecSChao Yu goto destroy_nat_entry_set;
3443e05df3b1SJaegeuk Kim return 0;
3444aec71382SChao Yu
344550fa53ecSChao Yu destroy_nat_entry_set:
344650fa53ecSChao Yu kmem_cache_destroy(nat_entry_set_slab);
3447ce3e6d25SMarkus Elfring destroy_free_nid:
3448aec71382SChao Yu kmem_cache_destroy(free_nid_slab);
3449ce3e6d25SMarkus Elfring destroy_nat_entry:
3450aec71382SChao Yu kmem_cache_destroy(nat_entry_slab);
3451aec71382SChao Yu fail:
3452aec71382SChao Yu return -ENOMEM;
3453e05df3b1SJaegeuk Kim }
3454e05df3b1SJaegeuk Kim
f2fs_destroy_node_manager_caches(void)34554d57b86dSChao Yu void f2fs_destroy_node_manager_caches(void)
3456e05df3b1SJaegeuk Kim {
345750fa53ecSChao Yu kmem_cache_destroy(fsync_node_entry_slab);
3458aec71382SChao Yu kmem_cache_destroy(nat_entry_set_slab);
3459e05df3b1SJaegeuk Kim kmem_cache_destroy(free_nid_slab);
3460e05df3b1SJaegeuk Kim kmem_cache_destroy(nat_entry_slab);
3461e05df3b1SJaegeuk Kim }
3462