xref: /openbmc/linux/fs/f2fs/node.c (revision 80551d17)
10a8165d7SJaegeuk Kim /*
2e05df3b1SJaegeuk Kim  * fs/f2fs/node.c
3e05df3b1SJaegeuk Kim  *
4e05df3b1SJaegeuk Kim  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5e05df3b1SJaegeuk Kim  *             http://www.samsung.com/
6e05df3b1SJaegeuk Kim  *
7e05df3b1SJaegeuk Kim  * This program is free software; you can redistribute it and/or modify
8e05df3b1SJaegeuk Kim  * it under the terms of the GNU General Public License version 2 as
9e05df3b1SJaegeuk Kim  * published by the Free Software Foundation.
10e05df3b1SJaegeuk Kim  */
11e05df3b1SJaegeuk Kim #include <linux/fs.h>
12e05df3b1SJaegeuk Kim #include <linux/f2fs_fs.h>
13e05df3b1SJaegeuk Kim #include <linux/mpage.h>
14e05df3b1SJaegeuk Kim #include <linux/backing-dev.h>
15e05df3b1SJaegeuk Kim #include <linux/blkdev.h>
16e05df3b1SJaegeuk Kim #include <linux/pagevec.h>
17e05df3b1SJaegeuk Kim #include <linux/swap.h>
18e05df3b1SJaegeuk Kim 
19e05df3b1SJaegeuk Kim #include "f2fs.h"
20e05df3b1SJaegeuk Kim #include "node.h"
21e05df3b1SJaegeuk Kim #include "segment.h"
2287905682SYunlei He #include "xattr.h"
239e4ded3fSJaegeuk Kim #include "trace.h"
2451dd6249SNamjae Jeon #include <trace/events/f2fs.h>
25e05df3b1SJaegeuk Kim 
264d57b86dSChao Yu #define on_f2fs_build_free_nids(nmi) mutex_is_locked(&(nm_i)->build_lock)
27f978f5a0SGu Zheng 
28e05df3b1SJaegeuk Kim static struct kmem_cache *nat_entry_slab;
29e05df3b1SJaegeuk Kim static struct kmem_cache *free_nid_slab;
30aec71382SChao Yu static struct kmem_cache *nat_entry_set_slab;
31e05df3b1SJaegeuk Kim 
32a4f843bdSJaegeuk Kim /*
33a4f843bdSJaegeuk Kim  * Check whether the given nid is within node id range.
34a4f843bdSJaegeuk Kim  */
354d57b86dSChao Yu int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid)
36a4f843bdSJaegeuk Kim {
37a4f843bdSJaegeuk Kim 	if (unlikely(nid < F2FS_ROOT_INO(sbi) || nid >= NM_I(sbi)->max_nid)) {
38a4f843bdSJaegeuk Kim 		set_sbi_flag(sbi, SBI_NEED_FSCK);
39a4f843bdSJaegeuk Kim 		f2fs_msg(sbi->sb, KERN_WARNING,
40a4f843bdSJaegeuk Kim 				"%s: out-of-range nid=%x, run fsck to fix.",
41a4f843bdSJaegeuk Kim 				__func__, nid);
42a4f843bdSJaegeuk Kim 		return -EINVAL;
43a4f843bdSJaegeuk Kim 	}
44a4f843bdSJaegeuk Kim 	return 0;
45a4f843bdSJaegeuk Kim }
46a4f843bdSJaegeuk Kim 
474d57b86dSChao Yu bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type)
48cdfc41c1SJaegeuk Kim {
496fb03f3aSJaegeuk Kim 	struct f2fs_nm_info *nm_i = NM_I(sbi);
50cdfc41c1SJaegeuk Kim 	struct sysinfo val;
51e5e7ea3cSJaegeuk Kim 	unsigned long avail_ram;
52cdfc41c1SJaegeuk Kim 	unsigned long mem_size = 0;
536fb03f3aSJaegeuk Kim 	bool res = false;
54cdfc41c1SJaegeuk Kim 
55cdfc41c1SJaegeuk Kim 	si_meminfo(&val);
56e5e7ea3cSJaegeuk Kim 
57e5e7ea3cSJaegeuk Kim 	/* only uses low memory */
58e5e7ea3cSJaegeuk Kim 	avail_ram = val.totalram - val.totalhigh;
59e5e7ea3cSJaegeuk Kim 
60429511cdSChao Yu 	/*
61429511cdSChao Yu 	 * give 25%, 25%, 50%, 50%, 50% memory for each components respectively
62429511cdSChao Yu 	 */
636fb03f3aSJaegeuk Kim 	if (type == FREE_NIDS) {
649a4ffdf5SChao Yu 		mem_size = (nm_i->nid_cnt[FREE_NID] *
65b8559dc2SChao Yu 				sizeof(struct free_nid)) >> PAGE_SHIFT;
66e5e7ea3cSJaegeuk Kim 		res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
676fb03f3aSJaegeuk Kim 	} else if (type == NAT_ENTRIES) {
68e5e7ea3cSJaegeuk Kim 		mem_size = (nm_i->nat_cnt * sizeof(struct nat_entry)) >>
6909cbfeafSKirill A. Shutemov 							PAGE_SHIFT;
70e5e7ea3cSJaegeuk Kim 		res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
71e589c2c4SJaegeuk Kim 		if (excess_cached_nats(sbi))
72e589c2c4SJaegeuk Kim 			res = false;
73a1257023SJaegeuk Kim 	} else if (type == DIRTY_DENTS) {
74a1257023SJaegeuk Kim 		if (sbi->sb->s_bdi->wb.dirty_exceeded)
75a1257023SJaegeuk Kim 			return false;
76a1257023SJaegeuk Kim 		mem_size = get_pages(sbi, F2FS_DIRTY_DENTS);
77a1257023SJaegeuk Kim 		res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
78e5e7ea3cSJaegeuk Kim 	} else if (type == INO_ENTRIES) {
79e5e7ea3cSJaegeuk Kim 		int i;
80e5e7ea3cSJaegeuk Kim 
8139d787beSChao Yu 		for (i = 0; i < MAX_INO_ENTRY; i++)
828f73cbb7SKinglong Mee 			mem_size += sbi->im[i].ino_num *
838f73cbb7SKinglong Mee 						sizeof(struct ino_entry);
848f73cbb7SKinglong Mee 		mem_size >>= PAGE_SHIFT;
85e5e7ea3cSJaegeuk Kim 		res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
86429511cdSChao Yu 	} else if (type == EXTENT_CACHE) {
877441ccefSJaegeuk Kim 		mem_size = (atomic_read(&sbi->total_ext_tree) *
887441ccefSJaegeuk Kim 				sizeof(struct extent_tree) +
89429511cdSChao Yu 				atomic_read(&sbi->total_ext_node) *
9009cbfeafSKirill A. Shutemov 				sizeof(struct extent_node)) >> PAGE_SHIFT;
91429511cdSChao Yu 		res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
9257864ae5SJaegeuk Kim 	} else if (type == INMEM_PAGES) {
9357864ae5SJaegeuk Kim 		/* it allows 20% / total_ram for inmemory pages */
9457864ae5SJaegeuk Kim 		mem_size = get_pages(sbi, F2FS_INMEM_PAGES);
9557864ae5SJaegeuk Kim 		res = mem_size < (val.totalram / 5);
961e84371fSJaegeuk Kim 	} else {
971663cae4SJaegeuk Kim 		if (!sbi->sb->s_bdi->wb.dirty_exceeded)
981663cae4SJaegeuk Kim 			return true;
996fb03f3aSJaegeuk Kim 	}
1006fb03f3aSJaegeuk Kim 	return res;
101cdfc41c1SJaegeuk Kim }
102cdfc41c1SJaegeuk Kim 
103e05df3b1SJaegeuk Kim static void clear_node_page_dirty(struct page *page)
104e05df3b1SJaegeuk Kim {
105e05df3b1SJaegeuk Kim 	if (PageDirty(page)) {
1064d57b86dSChao Yu 		f2fs_clear_radix_tree_dirty_tag(page);
107e05df3b1SJaegeuk Kim 		clear_page_dirty_for_io(page);
108aec2f729SChao Yu 		dec_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES);
109e05df3b1SJaegeuk Kim 	}
110e05df3b1SJaegeuk Kim 	ClearPageUptodate(page);
111e05df3b1SJaegeuk Kim }
112e05df3b1SJaegeuk Kim 
113e05df3b1SJaegeuk Kim static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
114e05df3b1SJaegeuk Kim {
11580551d17SChao Yu 	return f2fs_get_meta_page_nofail(sbi, current_nat_addr(sbi, nid));
116e05df3b1SJaegeuk Kim }
117e05df3b1SJaegeuk Kim 
118e05df3b1SJaegeuk Kim static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
119e05df3b1SJaegeuk Kim {
120e05df3b1SJaegeuk Kim 	struct page *src_page;
121e05df3b1SJaegeuk Kim 	struct page *dst_page;
122e05df3b1SJaegeuk Kim 	pgoff_t dst_off;
123e05df3b1SJaegeuk Kim 	void *src_addr;
124e05df3b1SJaegeuk Kim 	void *dst_addr;
125e05df3b1SJaegeuk Kim 	struct f2fs_nm_info *nm_i = NM_I(sbi);
126e05df3b1SJaegeuk Kim 
12780551d17SChao Yu 	dst_off = next_nat_addr(sbi, current_nat_addr(sbi, nid));
128e05df3b1SJaegeuk Kim 
129e05df3b1SJaegeuk Kim 	/* get current nat block page with lock */
13080551d17SChao Yu 	src_page = get_current_nat_page(sbi, nid);
1314d57b86dSChao Yu 	dst_page = f2fs_grab_meta_page(sbi, dst_off);
1329850cf4aSJaegeuk Kim 	f2fs_bug_on(sbi, PageDirty(src_page));
133e05df3b1SJaegeuk Kim 
134e05df3b1SJaegeuk Kim 	src_addr = page_address(src_page);
135e05df3b1SJaegeuk Kim 	dst_addr = page_address(dst_page);
13609cbfeafSKirill A. Shutemov 	memcpy(dst_addr, src_addr, PAGE_SIZE);
137e05df3b1SJaegeuk Kim 	set_page_dirty(dst_page);
138e05df3b1SJaegeuk Kim 	f2fs_put_page(src_page, 1);
139e05df3b1SJaegeuk Kim 
140e05df3b1SJaegeuk Kim 	set_to_next_nat(nm_i, nid);
141e05df3b1SJaegeuk Kim 
142e05df3b1SJaegeuk Kim 	return dst_page;
143e05df3b1SJaegeuk Kim }
144e05df3b1SJaegeuk Kim 
14512f9ef37SYunlei He static struct nat_entry *__alloc_nat_entry(nid_t nid, bool no_fail)
14612f9ef37SYunlei He {
14712f9ef37SYunlei He 	struct nat_entry *new;
14812f9ef37SYunlei He 
14912f9ef37SYunlei He 	if (no_fail)
1502882d343SChao Yu 		new = f2fs_kmem_cache_alloc(nat_entry_slab, GFP_F2FS_ZERO);
15112f9ef37SYunlei He 	else
1522882d343SChao Yu 		new = kmem_cache_alloc(nat_entry_slab, GFP_F2FS_ZERO);
15312f9ef37SYunlei He 	if (new) {
15412f9ef37SYunlei He 		nat_set_nid(new, nid);
15512f9ef37SYunlei He 		nat_reset_flag(new);
15612f9ef37SYunlei He 	}
15712f9ef37SYunlei He 	return new;
15812f9ef37SYunlei He }
15912f9ef37SYunlei He 
16012f9ef37SYunlei He static void __free_nat_entry(struct nat_entry *e)
16112f9ef37SYunlei He {
16212f9ef37SYunlei He 	kmem_cache_free(nat_entry_slab, e);
16312f9ef37SYunlei He }
16412f9ef37SYunlei He 
16512f9ef37SYunlei He /* must be locked by nat_tree_lock */
16612f9ef37SYunlei He static struct nat_entry *__init_nat_entry(struct f2fs_nm_info *nm_i,
16712f9ef37SYunlei He 	struct nat_entry *ne, struct f2fs_nat_entry *raw_ne, bool no_fail)
16812f9ef37SYunlei He {
16912f9ef37SYunlei He 	if (no_fail)
17012f9ef37SYunlei He 		f2fs_radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne);
17112f9ef37SYunlei He 	else if (radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne))
17212f9ef37SYunlei He 		return NULL;
17312f9ef37SYunlei He 
17412f9ef37SYunlei He 	if (raw_ne)
17512f9ef37SYunlei He 		node_info_from_raw_nat(&ne->ni, raw_ne);
17612f9ef37SYunlei He 	list_add_tail(&ne->list, &nm_i->nat_entries);
17712f9ef37SYunlei He 	nm_i->nat_cnt++;
17812f9ef37SYunlei He 	return ne;
17912f9ef37SYunlei He }
18012f9ef37SYunlei He 
181e05df3b1SJaegeuk Kim static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n)
182e05df3b1SJaegeuk Kim {
183e05df3b1SJaegeuk Kim 	return radix_tree_lookup(&nm_i->nat_root, n);
184e05df3b1SJaegeuk Kim }
185e05df3b1SJaegeuk Kim 
186e05df3b1SJaegeuk Kim static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i,
187e05df3b1SJaegeuk Kim 		nid_t start, unsigned int nr, struct nat_entry **ep)
188e05df3b1SJaegeuk Kim {
189e05df3b1SJaegeuk Kim 	return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr);
190e05df3b1SJaegeuk Kim }
191e05df3b1SJaegeuk Kim 
192e05df3b1SJaegeuk Kim static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e)
193e05df3b1SJaegeuk Kim {
194e05df3b1SJaegeuk Kim 	list_del(&e->list);
195e05df3b1SJaegeuk Kim 	radix_tree_delete(&nm_i->nat_root, nat_get_nid(e));
196e05df3b1SJaegeuk Kim 	nm_i->nat_cnt--;
19712f9ef37SYunlei He 	__free_nat_entry(e);
198e05df3b1SJaegeuk Kim }
199e05df3b1SJaegeuk Kim 
200780de47cSChao Yu static struct nat_entry_set *__grab_nat_entry_set(struct f2fs_nm_info *nm_i,
201309cc2b6SJaegeuk Kim 							struct nat_entry *ne)
202309cc2b6SJaegeuk Kim {
203309cc2b6SJaegeuk Kim 	nid_t set = NAT_BLOCK_OFFSET(ne->ni.nid);
204309cc2b6SJaegeuk Kim 	struct nat_entry_set *head;
205309cc2b6SJaegeuk Kim 
206309cc2b6SJaegeuk Kim 	head = radix_tree_lookup(&nm_i->nat_set_root, set);
207309cc2b6SJaegeuk Kim 	if (!head) {
20880c54505SJaegeuk Kim 		head = f2fs_kmem_cache_alloc(nat_entry_set_slab, GFP_NOFS);
209309cc2b6SJaegeuk Kim 
210309cc2b6SJaegeuk Kim 		INIT_LIST_HEAD(&head->entry_list);
211309cc2b6SJaegeuk Kim 		INIT_LIST_HEAD(&head->set_list);
212309cc2b6SJaegeuk Kim 		head->set = set;
213309cc2b6SJaegeuk Kim 		head->entry_cnt = 0;
2149be32d72SJaegeuk Kim 		f2fs_radix_tree_insert(&nm_i->nat_set_root, set, head);
215309cc2b6SJaegeuk Kim 	}
216780de47cSChao Yu 	return head;
217780de47cSChao Yu }
218780de47cSChao Yu 
219780de47cSChao Yu static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i,
220780de47cSChao Yu 						struct nat_entry *ne)
221780de47cSChao Yu {
222780de47cSChao Yu 	struct nat_entry_set *head;
223780de47cSChao Yu 	bool new_ne = nat_get_blkaddr(ne) == NEW_ADDR;
224780de47cSChao Yu 
225780de47cSChao Yu 	if (!new_ne)
226780de47cSChao Yu 		head = __grab_nat_entry_set(nm_i, ne);
227780de47cSChao Yu 
228780de47cSChao Yu 	/*
229780de47cSChao Yu 	 * update entry_cnt in below condition:
230780de47cSChao Yu 	 * 1. update NEW_ADDR to valid block address;
231780de47cSChao Yu 	 * 2. update old block address to new one;
232780de47cSChao Yu 	 */
233780de47cSChao Yu 	if (!new_ne && (get_nat_flag(ne, IS_PREALLOC) ||
234780de47cSChao Yu 				!get_nat_flag(ne, IS_DIRTY)))
235780de47cSChao Yu 		head->entry_cnt++;
236780de47cSChao Yu 
237780de47cSChao Yu 	set_nat_flag(ne, IS_PREALLOC, new_ne);
238febeca6dSChao Yu 
239febeca6dSChao Yu 	if (get_nat_flag(ne, IS_DIRTY))
240febeca6dSChao Yu 		goto refresh_list;
241febeca6dSChao Yu 
242309cc2b6SJaegeuk Kim 	nm_i->dirty_nat_cnt++;
243309cc2b6SJaegeuk Kim 	set_nat_flag(ne, IS_DIRTY, true);
244febeca6dSChao Yu refresh_list:
245780de47cSChao Yu 	if (new_ne)
246febeca6dSChao Yu 		list_del_init(&ne->list);
247febeca6dSChao Yu 	else
248febeca6dSChao Yu 		list_move_tail(&ne->list, &head->entry_list);
249309cc2b6SJaegeuk Kim }
250309cc2b6SJaegeuk Kim 
251309cc2b6SJaegeuk Kim static void __clear_nat_cache_dirty(struct f2fs_nm_info *nm_i,
2520b28b71eSKinglong Mee 		struct nat_entry_set *set, struct nat_entry *ne)
253309cc2b6SJaegeuk Kim {
254309cc2b6SJaegeuk Kim 	list_move_tail(&ne->list, &nm_i->nat_entries);
255309cc2b6SJaegeuk Kim 	set_nat_flag(ne, IS_DIRTY, false);
2560b28b71eSKinglong Mee 	set->entry_cnt--;
257309cc2b6SJaegeuk Kim 	nm_i->dirty_nat_cnt--;
258309cc2b6SJaegeuk Kim }
259309cc2b6SJaegeuk Kim 
260309cc2b6SJaegeuk Kim static unsigned int __gang_lookup_nat_set(struct f2fs_nm_info *nm_i,
261309cc2b6SJaegeuk Kim 		nid_t start, unsigned int nr, struct nat_entry_set **ep)
262309cc2b6SJaegeuk Kim {
263309cc2b6SJaegeuk Kim 	return radix_tree_gang_lookup(&nm_i->nat_set_root, (void **)ep,
264309cc2b6SJaegeuk Kim 							start, nr);
265309cc2b6SJaegeuk Kim }
266309cc2b6SJaegeuk Kim 
2674d57b86dSChao Yu int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid)
2682dcf51abSJaegeuk Kim {
2692dcf51abSJaegeuk Kim 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2702dcf51abSJaegeuk Kim 	struct nat_entry *e;
2712dcf51abSJaegeuk Kim 	bool need = false;
2722dcf51abSJaegeuk Kim 
273b873b798SJaegeuk Kim 	down_read(&nm_i->nat_tree_lock);
2742dcf51abSJaegeuk Kim 	e = __lookup_nat_cache(nm_i, nid);
2752dcf51abSJaegeuk Kim 	if (e) {
2762dcf51abSJaegeuk Kim 		if (!get_nat_flag(e, IS_CHECKPOINTED) &&
2772dcf51abSJaegeuk Kim 				!get_nat_flag(e, HAS_FSYNCED_INODE))
2782dcf51abSJaegeuk Kim 			need = true;
2792dcf51abSJaegeuk Kim 	}
280b873b798SJaegeuk Kim 	up_read(&nm_i->nat_tree_lock);
2812dcf51abSJaegeuk Kim 	return need;
2822dcf51abSJaegeuk Kim }
2832dcf51abSJaegeuk Kim 
2844d57b86dSChao Yu bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid)
285e05df3b1SJaegeuk Kim {
286e05df3b1SJaegeuk Kim 	struct f2fs_nm_info *nm_i = NM_I(sbi);
287e05df3b1SJaegeuk Kim 	struct nat_entry *e;
28888bd02c9SJaegeuk Kim 	bool is_cp = true;
289e05df3b1SJaegeuk Kim 
290b873b798SJaegeuk Kim 	down_read(&nm_i->nat_tree_lock);
291e05df3b1SJaegeuk Kim 	e = __lookup_nat_cache(nm_i, nid);
2927ef35e3bSJaegeuk Kim 	if (e && !get_nat_flag(e, IS_CHECKPOINTED))
29388bd02c9SJaegeuk Kim 		is_cp = false;
294b873b798SJaegeuk Kim 	up_read(&nm_i->nat_tree_lock);
295e05df3b1SJaegeuk Kim 	return is_cp;
296e05df3b1SJaegeuk Kim }
297e05df3b1SJaegeuk Kim 
2984d57b86dSChao Yu bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino)
299b6fe5873SJaegeuk Kim {
300b6fe5873SJaegeuk Kim 	struct f2fs_nm_info *nm_i = NM_I(sbi);
301b6fe5873SJaegeuk Kim 	struct nat_entry *e;
30288bd02c9SJaegeuk Kim 	bool need_update = true;
303b6fe5873SJaegeuk Kim 
304b873b798SJaegeuk Kim 	down_read(&nm_i->nat_tree_lock);
30588bd02c9SJaegeuk Kim 	e = __lookup_nat_cache(nm_i, ino);
30688bd02c9SJaegeuk Kim 	if (e && get_nat_flag(e, HAS_LAST_FSYNC) &&
30788bd02c9SJaegeuk Kim 			(get_nat_flag(e, IS_CHECKPOINTED) ||
30888bd02c9SJaegeuk Kim 			 get_nat_flag(e, HAS_FSYNCED_INODE)))
30988bd02c9SJaegeuk Kim 		need_update = false;
310b873b798SJaegeuk Kim 	up_read(&nm_i->nat_tree_lock);
31188bd02c9SJaegeuk Kim 	return need_update;
312b6fe5873SJaegeuk Kim }
313b6fe5873SJaegeuk Kim 
31412f9ef37SYunlei He /* must be locked by nat_tree_lock */
3151515aef0SChao Yu static void cache_nat_entry(struct f2fs_sb_info *sbi, nid_t nid,
316e05df3b1SJaegeuk Kim 						struct f2fs_nat_entry *ne)
317e05df3b1SJaegeuk Kim {
3181515aef0SChao Yu 	struct f2fs_nm_info *nm_i = NM_I(sbi);
31912f9ef37SYunlei He 	struct nat_entry *new, *e;
3209be32d72SJaegeuk Kim 
32112f9ef37SYunlei He 	new = __alloc_nat_entry(nid, false);
32212f9ef37SYunlei He 	if (!new)
32312f9ef37SYunlei He 		return;
32412f9ef37SYunlei He 
32512f9ef37SYunlei He 	down_write(&nm_i->nat_tree_lock);
326e05df3b1SJaegeuk Kim 	e = __lookup_nat_cache(nm_i, nid);
32712f9ef37SYunlei He 	if (!e)
32812f9ef37SYunlei He 		e = __init_nat_entry(nm_i, new, ne, false);
32912f9ef37SYunlei He 	else
3300c0b471eSEric Biggers 		f2fs_bug_on(sbi, nat_get_ino(e) != le32_to_cpu(ne->ino) ||
3310c0b471eSEric Biggers 				nat_get_blkaddr(e) !=
3320c0b471eSEric Biggers 					le32_to_cpu(ne->block_addr) ||
3331515aef0SChao Yu 				nat_get_version(e) != ne->version);
33412f9ef37SYunlei He 	up_write(&nm_i->nat_tree_lock);
33512f9ef37SYunlei He 	if (e != new)
33612f9ef37SYunlei He 		__free_nat_entry(new);
337e05df3b1SJaegeuk Kim }
338e05df3b1SJaegeuk Kim 
339e05df3b1SJaegeuk Kim static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
340479f40c4SJaegeuk Kim 			block_t new_blkaddr, bool fsync_done)
341e05df3b1SJaegeuk Kim {
342e05df3b1SJaegeuk Kim 	struct f2fs_nm_info *nm_i = NM_I(sbi);
343e05df3b1SJaegeuk Kim 	struct nat_entry *e;
34412f9ef37SYunlei He 	struct nat_entry *new = __alloc_nat_entry(ni->nid, true);
3459be32d72SJaegeuk Kim 
346b873b798SJaegeuk Kim 	down_write(&nm_i->nat_tree_lock);
347e05df3b1SJaegeuk Kim 	e = __lookup_nat_cache(nm_i, ni->nid);
348e05df3b1SJaegeuk Kim 	if (!e) {
34912f9ef37SYunlei He 		e = __init_nat_entry(nm_i, new, NULL, true);
3505c27f4eeSChao Yu 		copy_node_info(&e->ni, ni);
3519850cf4aSJaegeuk Kim 		f2fs_bug_on(sbi, ni->blk_addr == NEW_ADDR);
352e05df3b1SJaegeuk Kim 	} else if (new_blkaddr == NEW_ADDR) {
353e05df3b1SJaegeuk Kim 		/*
354e05df3b1SJaegeuk Kim 		 * when nid is reallocated,
355e05df3b1SJaegeuk Kim 		 * previous nat entry can be remained in nat cache.
356e05df3b1SJaegeuk Kim 		 * So, reinitialize it with new information.
357e05df3b1SJaegeuk Kim 		 */
3585c27f4eeSChao Yu 		copy_node_info(&e->ni, ni);
3599850cf4aSJaegeuk Kim 		f2fs_bug_on(sbi, ni->blk_addr != NULL_ADDR);
360e05df3b1SJaegeuk Kim 	}
36112f9ef37SYunlei He 	/* let's free early to reduce memory consumption */
36212f9ef37SYunlei He 	if (e != new)
36312f9ef37SYunlei He 		__free_nat_entry(new);
364e05df3b1SJaegeuk Kim 
365e05df3b1SJaegeuk Kim 	/* sanity check */
3669850cf4aSJaegeuk Kim 	f2fs_bug_on(sbi, nat_get_blkaddr(e) != ni->blk_addr);
3679850cf4aSJaegeuk Kim 	f2fs_bug_on(sbi, nat_get_blkaddr(e) == NULL_ADDR &&
368e05df3b1SJaegeuk Kim 			new_blkaddr == NULL_ADDR);
3699850cf4aSJaegeuk Kim 	f2fs_bug_on(sbi, nat_get_blkaddr(e) == NEW_ADDR &&
370e05df3b1SJaegeuk Kim 			new_blkaddr == NEW_ADDR);
371e1da7872SChao Yu 	f2fs_bug_on(sbi, is_valid_data_blkaddr(sbi, nat_get_blkaddr(e)) &&
372e05df3b1SJaegeuk Kim 			new_blkaddr == NEW_ADDR);
373e05df3b1SJaegeuk Kim 
374e1c42045Sarter97 	/* increment version no as node is removed */
375e05df3b1SJaegeuk Kim 	if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) {
376e05df3b1SJaegeuk Kim 		unsigned char version = nat_get_version(e);
377e05df3b1SJaegeuk Kim 		nat_set_version(e, inc_node_version(version));
378e05df3b1SJaegeuk Kim 	}
379e05df3b1SJaegeuk Kim 
380e05df3b1SJaegeuk Kim 	/* change address */
381e05df3b1SJaegeuk Kim 	nat_set_blkaddr(e, new_blkaddr);
382e1da7872SChao Yu 	if (!is_valid_data_blkaddr(sbi, new_blkaddr))
38388bd02c9SJaegeuk Kim 		set_nat_flag(e, IS_CHECKPOINTED, false);
384e05df3b1SJaegeuk Kim 	__set_nat_cache_dirty(nm_i, e);
385479f40c4SJaegeuk Kim 
386479f40c4SJaegeuk Kim 	/* update fsync_mark if its inode nat entry is still alive */
387d5b692b7SChao Yu 	if (ni->nid != ni->ino)
388479f40c4SJaegeuk Kim 		e = __lookup_nat_cache(nm_i, ni->ino);
38988bd02c9SJaegeuk Kim 	if (e) {
39088bd02c9SJaegeuk Kim 		if (fsync_done && ni->nid == ni->ino)
39188bd02c9SJaegeuk Kim 			set_nat_flag(e, HAS_FSYNCED_INODE, true);
39288bd02c9SJaegeuk Kim 		set_nat_flag(e, HAS_LAST_FSYNC, fsync_done);
39388bd02c9SJaegeuk Kim 	}
394b873b798SJaegeuk Kim 	up_write(&nm_i->nat_tree_lock);
395e05df3b1SJaegeuk Kim }
396e05df3b1SJaegeuk Kim 
3974d57b86dSChao Yu int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
398e05df3b1SJaegeuk Kim {
399e05df3b1SJaegeuk Kim 	struct f2fs_nm_info *nm_i = NM_I(sbi);
4001b38dc8eSJaegeuk Kim 	int nr = nr_shrink;
401e05df3b1SJaegeuk Kim 
402b873b798SJaegeuk Kim 	if (!down_write_trylock(&nm_i->nat_tree_lock))
403b873b798SJaegeuk Kim 		return 0;
404e05df3b1SJaegeuk Kim 
405e05df3b1SJaegeuk Kim 	while (nr_shrink && !list_empty(&nm_i->nat_entries)) {
406e05df3b1SJaegeuk Kim 		struct nat_entry *ne;
407e05df3b1SJaegeuk Kim 		ne = list_first_entry(&nm_i->nat_entries,
408e05df3b1SJaegeuk Kim 					struct nat_entry, list);
409e05df3b1SJaegeuk Kim 		__del_from_nat_cache(nm_i, ne);
410e05df3b1SJaegeuk Kim 		nr_shrink--;
411e05df3b1SJaegeuk Kim 	}
412b873b798SJaegeuk Kim 	up_write(&nm_i->nat_tree_lock);
4131b38dc8eSJaegeuk Kim 	return nr - nr_shrink;
414e05df3b1SJaegeuk Kim }
415e05df3b1SJaegeuk Kim 
4160a8165d7SJaegeuk Kim /*
417e1c42045Sarter97  * This function always returns success
418e05df3b1SJaegeuk Kim  */
4197735730dSChao Yu int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
4204d57b86dSChao Yu 						struct node_info *ni)
421e05df3b1SJaegeuk Kim {
422e05df3b1SJaegeuk Kim 	struct f2fs_nm_info *nm_i = NM_I(sbi);
423e05df3b1SJaegeuk Kim 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
424b7ad7512SChao Yu 	struct f2fs_journal *journal = curseg->journal;
425e05df3b1SJaegeuk Kim 	nid_t start_nid = START_NID(nid);
426e05df3b1SJaegeuk Kim 	struct f2fs_nat_block *nat_blk;
427e05df3b1SJaegeuk Kim 	struct page *page = NULL;
428e05df3b1SJaegeuk Kim 	struct f2fs_nat_entry ne;
429e05df3b1SJaegeuk Kim 	struct nat_entry *e;
43066a82d1fSYunlei He 	pgoff_t index;
431e05df3b1SJaegeuk Kim 	int i;
432e05df3b1SJaegeuk Kim 
433e05df3b1SJaegeuk Kim 	ni->nid = nid;
434e05df3b1SJaegeuk Kim 
435e05df3b1SJaegeuk Kim 	/* Check nat cache */
436b873b798SJaegeuk Kim 	down_read(&nm_i->nat_tree_lock);
437e05df3b1SJaegeuk Kim 	e = __lookup_nat_cache(nm_i, nid);
438e05df3b1SJaegeuk Kim 	if (e) {
439e05df3b1SJaegeuk Kim 		ni->ino = nat_get_ino(e);
440e05df3b1SJaegeuk Kim 		ni->blk_addr = nat_get_blkaddr(e);
441e05df3b1SJaegeuk Kim 		ni->version = nat_get_version(e);
442b873b798SJaegeuk Kim 		up_read(&nm_i->nat_tree_lock);
4437735730dSChao Yu 		return 0;
4441515aef0SChao Yu 	}
445e05df3b1SJaegeuk Kim 
4463547ea96SJaegeuk Kim 	memset(&ne, 0, sizeof(struct f2fs_nat_entry));
4473547ea96SJaegeuk Kim 
448e05df3b1SJaegeuk Kim 	/* Check current segment summary */
449b7ad7512SChao Yu 	down_read(&curseg->journal_rwsem);
4504d57b86dSChao Yu 	i = f2fs_lookup_journal_in_cursum(journal, NAT_JOURNAL, nid, 0);
451e05df3b1SJaegeuk Kim 	if (i >= 0) {
452dfc08a12SChao Yu 		ne = nat_in_journal(journal, i);
453e05df3b1SJaegeuk Kim 		node_info_from_raw_nat(ni, &ne);
454e05df3b1SJaegeuk Kim 	}
455b7ad7512SChao Yu 	up_read(&curseg->journal_rwsem);
45666a82d1fSYunlei He 	if (i >= 0) {
45766a82d1fSYunlei He 		up_read(&nm_i->nat_tree_lock);
458e05df3b1SJaegeuk Kim 		goto cache;
45966a82d1fSYunlei He 	}
460e05df3b1SJaegeuk Kim 
461e05df3b1SJaegeuk Kim 	/* Fill node_info from nat page */
46266a82d1fSYunlei He 	index = current_nat_addr(sbi, nid);
46366a82d1fSYunlei He 	up_read(&nm_i->nat_tree_lock);
46466a82d1fSYunlei He 
4654d57b86dSChao Yu 	page = f2fs_get_meta_page(sbi, index);
4667735730dSChao Yu 	if (IS_ERR(page))
4677735730dSChao Yu 		return PTR_ERR(page);
4687735730dSChao Yu 
469e05df3b1SJaegeuk Kim 	nat_blk = (struct f2fs_nat_block *)page_address(page);
470e05df3b1SJaegeuk Kim 	ne = nat_blk->entries[nid - start_nid];
471e05df3b1SJaegeuk Kim 	node_info_from_raw_nat(ni, &ne);
472e05df3b1SJaegeuk Kim 	f2fs_put_page(page, 1);
473e05df3b1SJaegeuk Kim cache:
474e05df3b1SJaegeuk Kim 	/* cache nat entry */
4751515aef0SChao Yu 	cache_nat_entry(sbi, nid, &ne);
4767735730dSChao Yu 	return 0;
477e05df3b1SJaegeuk Kim }
478e05df3b1SJaegeuk Kim 
47979344efbSJaegeuk Kim /*
48079344efbSJaegeuk Kim  * readahead MAX_RA_NODE number of node pages.
48179344efbSJaegeuk Kim  */
4824d57b86dSChao Yu static void f2fs_ra_node_pages(struct page *parent, int start, int n)
48379344efbSJaegeuk Kim {
48479344efbSJaegeuk Kim 	struct f2fs_sb_info *sbi = F2FS_P_SB(parent);
48579344efbSJaegeuk Kim 	struct blk_plug plug;
48679344efbSJaegeuk Kim 	int i, end;
48779344efbSJaegeuk Kim 	nid_t nid;
48879344efbSJaegeuk Kim 
48979344efbSJaegeuk Kim 	blk_start_plug(&plug);
49079344efbSJaegeuk Kim 
49179344efbSJaegeuk Kim 	/* Then, try readahead for siblings of the desired node */
49279344efbSJaegeuk Kim 	end = start + n;
49379344efbSJaegeuk Kim 	end = min(end, NIDS_PER_BLOCK);
49479344efbSJaegeuk Kim 	for (i = start; i < end; i++) {
49579344efbSJaegeuk Kim 		nid = get_nid(parent, i, false);
4964d57b86dSChao Yu 		f2fs_ra_node_page(sbi, nid);
49779344efbSJaegeuk Kim 	}
49879344efbSJaegeuk Kim 
49979344efbSJaegeuk Kim 	blk_finish_plug(&plug);
50079344efbSJaegeuk Kim }
50179344efbSJaegeuk Kim 
5024d57b86dSChao Yu pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs)
5033cf45747SChao Yu {
5043cf45747SChao Yu 	const long direct_index = ADDRS_PER_INODE(dn->inode);
5053cf45747SChao Yu 	const long direct_blks = ADDRS_PER_BLOCK;
5063cf45747SChao Yu 	const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK;
5073cf45747SChao Yu 	unsigned int skipped_unit = ADDRS_PER_BLOCK;
5083cf45747SChao Yu 	int cur_level = dn->cur_level;
5093cf45747SChao Yu 	int max_level = dn->max_level;
5103cf45747SChao Yu 	pgoff_t base = 0;
5113cf45747SChao Yu 
5123cf45747SChao Yu 	if (!dn->max_level)
5133cf45747SChao Yu 		return pgofs + 1;
5143cf45747SChao Yu 
5153cf45747SChao Yu 	while (max_level-- > cur_level)
5163cf45747SChao Yu 		skipped_unit *= NIDS_PER_BLOCK;
5173cf45747SChao Yu 
5183cf45747SChao Yu 	switch (dn->max_level) {
5193cf45747SChao Yu 	case 3:
5203cf45747SChao Yu 		base += 2 * indirect_blks;
5213cf45747SChao Yu 	case 2:
5223cf45747SChao Yu 		base += 2 * direct_blks;
5233cf45747SChao Yu 	case 1:
5243cf45747SChao Yu 		base += direct_index;
5253cf45747SChao Yu 		break;
5263cf45747SChao Yu 	default:
5273cf45747SChao Yu 		f2fs_bug_on(F2FS_I_SB(dn->inode), 1);
5283cf45747SChao Yu 	}
5293cf45747SChao Yu 
5303cf45747SChao Yu 	return ((pgofs - base) / skipped_unit + 1) * skipped_unit + base;
5313cf45747SChao Yu }
5323cf45747SChao Yu 
5330a8165d7SJaegeuk Kim /*
534e05df3b1SJaegeuk Kim  * The maximum depth is four.
535e05df3b1SJaegeuk Kim  * Offset[0] will have raw inode offset.
536e05df3b1SJaegeuk Kim  */
53781ca7350SChao Yu static int get_node_path(struct inode *inode, long block,
538de93653fSJaegeuk Kim 				int offset[4], unsigned int noffset[4])
539e05df3b1SJaegeuk Kim {
54081ca7350SChao Yu 	const long direct_index = ADDRS_PER_INODE(inode);
541e05df3b1SJaegeuk Kim 	const long direct_blks = ADDRS_PER_BLOCK;
542e05df3b1SJaegeuk Kim 	const long dptrs_per_blk = NIDS_PER_BLOCK;
543e05df3b1SJaegeuk Kim 	const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK;
544e05df3b1SJaegeuk Kim 	const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK;
545e05df3b1SJaegeuk Kim 	int n = 0;
546e05df3b1SJaegeuk Kim 	int level = 0;
547e05df3b1SJaegeuk Kim 
548e05df3b1SJaegeuk Kim 	noffset[0] = 0;
549e05df3b1SJaegeuk Kim 
550e05df3b1SJaegeuk Kim 	if (block < direct_index) {
55125c0a6e5SNamjae Jeon 		offset[n] = block;
552e05df3b1SJaegeuk Kim 		goto got;
553e05df3b1SJaegeuk Kim 	}
554e05df3b1SJaegeuk Kim 	block -= direct_index;
555e05df3b1SJaegeuk Kim 	if (block < direct_blks) {
556e05df3b1SJaegeuk Kim 		offset[n++] = NODE_DIR1_BLOCK;
557e05df3b1SJaegeuk Kim 		noffset[n] = 1;
55825c0a6e5SNamjae Jeon 		offset[n] = block;
559e05df3b1SJaegeuk Kim 		level = 1;
560e05df3b1SJaegeuk Kim 		goto got;
561e05df3b1SJaegeuk Kim 	}
562e05df3b1SJaegeuk Kim 	block -= direct_blks;
563e05df3b1SJaegeuk Kim 	if (block < direct_blks) {
564e05df3b1SJaegeuk Kim 		offset[n++] = NODE_DIR2_BLOCK;
565e05df3b1SJaegeuk Kim 		noffset[n] = 2;
56625c0a6e5SNamjae Jeon 		offset[n] = block;
567e05df3b1SJaegeuk Kim 		level = 1;
568e05df3b1SJaegeuk Kim 		goto got;
569e05df3b1SJaegeuk Kim 	}
570e05df3b1SJaegeuk Kim 	block -= direct_blks;
571e05df3b1SJaegeuk Kim 	if (block < indirect_blks) {
572e05df3b1SJaegeuk Kim 		offset[n++] = NODE_IND1_BLOCK;
573e05df3b1SJaegeuk Kim 		noffset[n] = 3;
574e05df3b1SJaegeuk Kim 		offset[n++] = block / direct_blks;
575e05df3b1SJaegeuk Kim 		noffset[n] = 4 + offset[n - 1];
57625c0a6e5SNamjae Jeon 		offset[n] = block % direct_blks;
577e05df3b1SJaegeuk Kim 		level = 2;
578e05df3b1SJaegeuk Kim 		goto got;
579e05df3b1SJaegeuk Kim 	}
580e05df3b1SJaegeuk Kim 	block -= indirect_blks;
581e05df3b1SJaegeuk Kim 	if (block < indirect_blks) {
582e05df3b1SJaegeuk Kim 		offset[n++] = NODE_IND2_BLOCK;
583e05df3b1SJaegeuk Kim 		noffset[n] = 4 + dptrs_per_blk;
584e05df3b1SJaegeuk Kim 		offset[n++] = block / direct_blks;
585e05df3b1SJaegeuk Kim 		noffset[n] = 5 + dptrs_per_blk + offset[n - 1];
58625c0a6e5SNamjae Jeon 		offset[n] = block % direct_blks;
587e05df3b1SJaegeuk Kim 		level = 2;
588e05df3b1SJaegeuk Kim 		goto got;
589e05df3b1SJaegeuk Kim 	}
590e05df3b1SJaegeuk Kim 	block -= indirect_blks;
591e05df3b1SJaegeuk Kim 	if (block < dindirect_blks) {
592e05df3b1SJaegeuk Kim 		offset[n++] = NODE_DIND_BLOCK;
593e05df3b1SJaegeuk Kim 		noffset[n] = 5 + (dptrs_per_blk * 2);
594e05df3b1SJaegeuk Kim 		offset[n++] = block / indirect_blks;
595e05df3b1SJaegeuk Kim 		noffset[n] = 6 + (dptrs_per_blk * 2) +
596e05df3b1SJaegeuk Kim 			      offset[n - 1] * (dptrs_per_blk + 1);
597e05df3b1SJaegeuk Kim 		offset[n++] = (block / direct_blks) % dptrs_per_blk;
598e05df3b1SJaegeuk Kim 		noffset[n] = 7 + (dptrs_per_blk * 2) +
599e05df3b1SJaegeuk Kim 			      offset[n - 2] * (dptrs_per_blk + 1) +
600e05df3b1SJaegeuk Kim 			      offset[n - 1];
60125c0a6e5SNamjae Jeon 		offset[n] = block % direct_blks;
602e05df3b1SJaegeuk Kim 		level = 3;
603e05df3b1SJaegeuk Kim 		goto got;
604e05df3b1SJaegeuk Kim 	} else {
605adb6dc19SJaegeuk Kim 		return -E2BIG;
606e05df3b1SJaegeuk Kim 	}
607e05df3b1SJaegeuk Kim got:
608e05df3b1SJaegeuk Kim 	return level;
609e05df3b1SJaegeuk Kim }
610e05df3b1SJaegeuk Kim 
611e05df3b1SJaegeuk Kim /*
612e05df3b1SJaegeuk Kim  * Caller should call f2fs_put_dnode(dn).
6134f4124d0SChao Yu  * Also, it should grab and release a rwsem by calling f2fs_lock_op() and
6144f4124d0SChao Yu  * f2fs_unlock_op() only if ro is not set RDONLY_NODE.
61539936837SJaegeuk Kim  * In the case of RDONLY_NODE, we don't need to care about mutex.
616e05df3b1SJaegeuk Kim  */
6174d57b86dSChao Yu int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
618e05df3b1SJaegeuk Kim {
6194081363fSJaegeuk Kim 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
620e05df3b1SJaegeuk Kim 	struct page *npage[4];
621f1a3b98eSJaegeuk Kim 	struct page *parent = NULL;
622e05df3b1SJaegeuk Kim 	int offset[4];
623e05df3b1SJaegeuk Kim 	unsigned int noffset[4];
624e05df3b1SJaegeuk Kim 	nid_t nids[4];
6253cf45747SChao Yu 	int level, i = 0;
626e05df3b1SJaegeuk Kim 	int err = 0;
627e05df3b1SJaegeuk Kim 
62881ca7350SChao Yu 	level = get_node_path(dn->inode, index, offset, noffset);
629adb6dc19SJaegeuk Kim 	if (level < 0)
630adb6dc19SJaegeuk Kim 		return level;
631e05df3b1SJaegeuk Kim 
632e05df3b1SJaegeuk Kim 	nids[0] = dn->inode->i_ino;
6331646cfacSJaegeuk Kim 	npage[0] = dn->inode_page;
6341646cfacSJaegeuk Kim 
6351646cfacSJaegeuk Kim 	if (!npage[0]) {
6364d57b86dSChao Yu 		npage[0] = f2fs_get_node_page(sbi, nids[0]);
637e05df3b1SJaegeuk Kim 		if (IS_ERR(npage[0]))
638e05df3b1SJaegeuk Kim 			return PTR_ERR(npage[0]);
6391646cfacSJaegeuk Kim 	}
640f1a3b98eSJaegeuk Kim 
641f1a3b98eSJaegeuk Kim 	/* if inline_data is set, should not report any block indices */
642f1a3b98eSJaegeuk Kim 	if (f2fs_has_inline_data(dn->inode) && index) {
64376629165SJaegeuk Kim 		err = -ENOENT;
644f1a3b98eSJaegeuk Kim 		f2fs_put_page(npage[0], 1);
645f1a3b98eSJaegeuk Kim 		goto release_out;
646f1a3b98eSJaegeuk Kim 	}
647f1a3b98eSJaegeuk Kim 
648e05df3b1SJaegeuk Kim 	parent = npage[0];
64952c2db3fSChangman Lee 	if (level != 0)
650e05df3b1SJaegeuk Kim 		nids[1] = get_nid(parent, offset[0], true);
651e05df3b1SJaegeuk Kim 	dn->inode_page = npage[0];
652e05df3b1SJaegeuk Kim 	dn->inode_page_locked = true;
653e05df3b1SJaegeuk Kim 
654e05df3b1SJaegeuk Kim 	/* get indirect or direct nodes */
655e05df3b1SJaegeuk Kim 	for (i = 1; i <= level; i++) {
656e05df3b1SJaegeuk Kim 		bool done = false;
657e05df3b1SJaegeuk Kim 
658266e97a8SJaegeuk Kim 		if (!nids[i] && mode == ALLOC_NODE) {
659e05df3b1SJaegeuk Kim 			/* alloc new node */
6604d57b86dSChao Yu 			if (!f2fs_alloc_nid(sbi, &(nids[i]))) {
661e05df3b1SJaegeuk Kim 				err = -ENOSPC;
662e05df3b1SJaegeuk Kim 				goto release_pages;
663e05df3b1SJaegeuk Kim 			}
664e05df3b1SJaegeuk Kim 
665e05df3b1SJaegeuk Kim 			dn->nid = nids[i];
6664d57b86dSChao Yu 			npage[i] = f2fs_new_node_page(dn, noffset[i]);
667e05df3b1SJaegeuk Kim 			if (IS_ERR(npage[i])) {
6684d57b86dSChao Yu 				f2fs_alloc_nid_failed(sbi, nids[i]);
669e05df3b1SJaegeuk Kim 				err = PTR_ERR(npage[i]);
670e05df3b1SJaegeuk Kim 				goto release_pages;
671e05df3b1SJaegeuk Kim 			}
672e05df3b1SJaegeuk Kim 
673e05df3b1SJaegeuk Kim 			set_nid(parent, offset[i - 1], nids[i], i == 1);
6744d57b86dSChao Yu 			f2fs_alloc_nid_done(sbi, nids[i]);
675e05df3b1SJaegeuk Kim 			done = true;
676266e97a8SJaegeuk Kim 		} else if (mode == LOOKUP_NODE_RA && i == level && level > 1) {
6774d57b86dSChao Yu 			npage[i] = f2fs_get_node_page_ra(parent, offset[i - 1]);
678e05df3b1SJaegeuk Kim 			if (IS_ERR(npage[i])) {
679e05df3b1SJaegeuk Kim 				err = PTR_ERR(npage[i]);
680e05df3b1SJaegeuk Kim 				goto release_pages;
681e05df3b1SJaegeuk Kim 			}
682e05df3b1SJaegeuk Kim 			done = true;
683e05df3b1SJaegeuk Kim 		}
684e05df3b1SJaegeuk Kim 		if (i == 1) {
685e05df3b1SJaegeuk Kim 			dn->inode_page_locked = false;
686e05df3b1SJaegeuk Kim 			unlock_page(parent);
687e05df3b1SJaegeuk Kim 		} else {
688e05df3b1SJaegeuk Kim 			f2fs_put_page(parent, 1);
689e05df3b1SJaegeuk Kim 		}
690e05df3b1SJaegeuk Kim 
691e05df3b1SJaegeuk Kim 		if (!done) {
6924d57b86dSChao Yu 			npage[i] = f2fs_get_node_page(sbi, nids[i]);
693e05df3b1SJaegeuk Kim 			if (IS_ERR(npage[i])) {
694e05df3b1SJaegeuk Kim 				err = PTR_ERR(npage[i]);
695e05df3b1SJaegeuk Kim 				f2fs_put_page(npage[0], 0);
696e05df3b1SJaegeuk Kim 				goto release_out;
697e05df3b1SJaegeuk Kim 			}
698e05df3b1SJaegeuk Kim 		}
699e05df3b1SJaegeuk Kim 		if (i < level) {
700e05df3b1SJaegeuk Kim 			parent = npage[i];
701e05df3b1SJaegeuk Kim 			nids[i + 1] = get_nid(parent, offset[i], false);
702e05df3b1SJaegeuk Kim 		}
703e05df3b1SJaegeuk Kim 	}
704e05df3b1SJaegeuk Kim 	dn->nid = nids[level];
705e05df3b1SJaegeuk Kim 	dn->ofs_in_node = offset[level];
706e05df3b1SJaegeuk Kim 	dn->node_page = npage[level];
7077a2af766SChao Yu 	dn->data_blkaddr = datablock_addr(dn->inode,
7087a2af766SChao Yu 				dn->node_page, dn->ofs_in_node);
709e05df3b1SJaegeuk Kim 	return 0;
710e05df3b1SJaegeuk Kim 
711e05df3b1SJaegeuk Kim release_pages:
712e05df3b1SJaegeuk Kim 	f2fs_put_page(parent, 1);
713e05df3b1SJaegeuk Kim 	if (i > 1)
714e05df3b1SJaegeuk Kim 		f2fs_put_page(npage[0], 0);
715e05df3b1SJaegeuk Kim release_out:
716e05df3b1SJaegeuk Kim 	dn->inode_page = NULL;
717e05df3b1SJaegeuk Kim 	dn->node_page = NULL;
7183cf45747SChao Yu 	if (err == -ENOENT) {
7193cf45747SChao Yu 		dn->cur_level = i;
7203cf45747SChao Yu 		dn->max_level = level;
7210a2aa8fbSJaegeuk Kim 		dn->ofs_in_node = offset[level];
7223cf45747SChao Yu 	}
723e05df3b1SJaegeuk Kim 	return err;
724e05df3b1SJaegeuk Kim }
725e05df3b1SJaegeuk Kim 
7267735730dSChao Yu static int truncate_node(struct dnode_of_data *dn)
727e05df3b1SJaegeuk Kim {
7284081363fSJaegeuk Kim 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
729e05df3b1SJaegeuk Kim 	struct node_info ni;
7307735730dSChao Yu 	int err;
731e05df3b1SJaegeuk Kim 
7327735730dSChao Yu 	err = f2fs_get_node_info(sbi, dn->nid, &ni);
7337735730dSChao Yu 	if (err)
7347735730dSChao Yu 		return err;
735e05df3b1SJaegeuk Kim 
736e05df3b1SJaegeuk Kim 	/* Deallocate node address */
7374d57b86dSChao Yu 	f2fs_invalidate_blocks(sbi, ni.blk_addr);
738000519f2SChao Yu 	dec_valid_node_count(sbi, dn->inode, dn->nid == dn->inode->i_ino);
739479f40c4SJaegeuk Kim 	set_node_addr(sbi, &ni, NULL_ADDR, false);
740e05df3b1SJaegeuk Kim 
741e05df3b1SJaegeuk Kim 	if (dn->nid == dn->inode->i_ino) {
7424d57b86dSChao Yu 		f2fs_remove_orphan_inode(sbi, dn->nid);
743e05df3b1SJaegeuk Kim 		dec_valid_inode_count(sbi);
7440f18b462SJaegeuk Kim 		f2fs_inode_synced(dn->inode);
745e05df3b1SJaegeuk Kim 	}
746000519f2SChao Yu 
747e05df3b1SJaegeuk Kim 	clear_node_page_dirty(dn->node_page);
748caf0047eSChao Yu 	set_sbi_flag(sbi, SBI_IS_DIRTY);
749e05df3b1SJaegeuk Kim 
750e05df3b1SJaegeuk Kim 	f2fs_put_page(dn->node_page, 1);
751bf39c00aSJaegeuk Kim 
752bf39c00aSJaegeuk Kim 	invalidate_mapping_pages(NODE_MAPPING(sbi),
753bf39c00aSJaegeuk Kim 			dn->node_page->index, dn->node_page->index);
754bf39c00aSJaegeuk Kim 
755e05df3b1SJaegeuk Kim 	dn->node_page = NULL;
75651dd6249SNamjae Jeon 	trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr);
7577735730dSChao Yu 
7587735730dSChao Yu 	return 0;
759e05df3b1SJaegeuk Kim }
760e05df3b1SJaegeuk Kim 
761e05df3b1SJaegeuk Kim static int truncate_dnode(struct dnode_of_data *dn)
762e05df3b1SJaegeuk Kim {
763e05df3b1SJaegeuk Kim 	struct page *page;
7647735730dSChao Yu 	int err;
765e05df3b1SJaegeuk Kim 
766e05df3b1SJaegeuk Kim 	if (dn->nid == 0)
767e05df3b1SJaegeuk Kim 		return 1;
768e05df3b1SJaegeuk Kim 
769e05df3b1SJaegeuk Kim 	/* get direct node */
7704d57b86dSChao Yu 	page = f2fs_get_node_page(F2FS_I_SB(dn->inode), dn->nid);
771e05df3b1SJaegeuk Kim 	if (IS_ERR(page) && PTR_ERR(page) == -ENOENT)
772e05df3b1SJaegeuk Kim 		return 1;
773e05df3b1SJaegeuk Kim 	else if (IS_ERR(page))
774e05df3b1SJaegeuk Kim 		return PTR_ERR(page);
775e05df3b1SJaegeuk Kim 
776e05df3b1SJaegeuk Kim 	/* Make dnode_of_data for parameter */
777e05df3b1SJaegeuk Kim 	dn->node_page = page;
778e05df3b1SJaegeuk Kim 	dn->ofs_in_node = 0;
7794d57b86dSChao Yu 	f2fs_truncate_data_blocks(dn);
7807735730dSChao Yu 	err = truncate_node(dn);
7817735730dSChao Yu 	if (err)
7827735730dSChao Yu 		return err;
7837735730dSChao Yu 
784e05df3b1SJaegeuk Kim 	return 1;
785e05df3b1SJaegeuk Kim }
786e05df3b1SJaegeuk Kim 
787e05df3b1SJaegeuk Kim static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
788e05df3b1SJaegeuk Kim 						int ofs, int depth)
789e05df3b1SJaegeuk Kim {
790e05df3b1SJaegeuk Kim 	struct dnode_of_data rdn = *dn;
791e05df3b1SJaegeuk Kim 	struct page *page;
792e05df3b1SJaegeuk Kim 	struct f2fs_node *rn;
793e05df3b1SJaegeuk Kim 	nid_t child_nid;
794e05df3b1SJaegeuk Kim 	unsigned int child_nofs;
795e05df3b1SJaegeuk Kim 	int freed = 0;
796e05df3b1SJaegeuk Kim 	int i, ret;
797e05df3b1SJaegeuk Kim 
798e05df3b1SJaegeuk Kim 	if (dn->nid == 0)
799e05df3b1SJaegeuk Kim 		return NIDS_PER_BLOCK + 1;
800e05df3b1SJaegeuk Kim 
80151dd6249SNamjae Jeon 	trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr);
80251dd6249SNamjae Jeon 
8034d57b86dSChao Yu 	page = f2fs_get_node_page(F2FS_I_SB(dn->inode), dn->nid);
80451dd6249SNamjae Jeon 	if (IS_ERR(page)) {
80551dd6249SNamjae Jeon 		trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(page));
806e05df3b1SJaegeuk Kim 		return PTR_ERR(page);
80751dd6249SNamjae Jeon 	}
808e05df3b1SJaegeuk Kim 
8094d57b86dSChao Yu 	f2fs_ra_node_pages(page, ofs, NIDS_PER_BLOCK);
81079344efbSJaegeuk Kim 
81145590710SGu Zheng 	rn = F2FS_NODE(page);
812e05df3b1SJaegeuk Kim 	if (depth < 3) {
813e05df3b1SJaegeuk Kim 		for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) {
814e05df3b1SJaegeuk Kim 			child_nid = le32_to_cpu(rn->in.nid[i]);
815e05df3b1SJaegeuk Kim 			if (child_nid == 0)
816e05df3b1SJaegeuk Kim 				continue;
817e05df3b1SJaegeuk Kim 			rdn.nid = child_nid;
818e05df3b1SJaegeuk Kim 			ret = truncate_dnode(&rdn);
819e05df3b1SJaegeuk Kim 			if (ret < 0)
820e05df3b1SJaegeuk Kim 				goto out_err;
82112719ae1SJaegeuk Kim 			if (set_nid(page, i, 0, false))
82293bae099SJaegeuk Kim 				dn->node_changed = true;
823e05df3b1SJaegeuk Kim 		}
824e05df3b1SJaegeuk Kim 	} else {
825e05df3b1SJaegeuk Kim 		child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1;
826e05df3b1SJaegeuk Kim 		for (i = ofs; i < NIDS_PER_BLOCK; i++) {
827e05df3b1SJaegeuk Kim 			child_nid = le32_to_cpu(rn->in.nid[i]);
828e05df3b1SJaegeuk Kim 			if (child_nid == 0) {
829e05df3b1SJaegeuk Kim 				child_nofs += NIDS_PER_BLOCK + 1;
830e05df3b1SJaegeuk Kim 				continue;
831e05df3b1SJaegeuk Kim 			}
832e05df3b1SJaegeuk Kim 			rdn.nid = child_nid;
833e05df3b1SJaegeuk Kim 			ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1);
834e05df3b1SJaegeuk Kim 			if (ret == (NIDS_PER_BLOCK + 1)) {
83512719ae1SJaegeuk Kim 				if (set_nid(page, i, 0, false))
83693bae099SJaegeuk Kim 					dn->node_changed = true;
837e05df3b1SJaegeuk Kim 				child_nofs += ret;
838e05df3b1SJaegeuk Kim 			} else if (ret < 0 && ret != -ENOENT) {
839e05df3b1SJaegeuk Kim 				goto out_err;
840e05df3b1SJaegeuk Kim 			}
841e05df3b1SJaegeuk Kim 		}
842e05df3b1SJaegeuk Kim 		freed = child_nofs;
843e05df3b1SJaegeuk Kim 	}
844e05df3b1SJaegeuk Kim 
845e05df3b1SJaegeuk Kim 	if (!ofs) {
846e05df3b1SJaegeuk Kim 		/* remove current indirect node */
847e05df3b1SJaegeuk Kim 		dn->node_page = page;
8487735730dSChao Yu 		ret = truncate_node(dn);
8497735730dSChao Yu 		if (ret)
8507735730dSChao Yu 			goto out_err;
851e05df3b1SJaegeuk Kim 		freed++;
852e05df3b1SJaegeuk Kim 	} else {
853e05df3b1SJaegeuk Kim 		f2fs_put_page(page, 1);
854e05df3b1SJaegeuk Kim 	}
85551dd6249SNamjae Jeon 	trace_f2fs_truncate_nodes_exit(dn->inode, freed);
856e05df3b1SJaegeuk Kim 	return freed;
857e05df3b1SJaegeuk Kim 
858e05df3b1SJaegeuk Kim out_err:
859e05df3b1SJaegeuk Kim 	f2fs_put_page(page, 1);
86051dd6249SNamjae Jeon 	trace_f2fs_truncate_nodes_exit(dn->inode, ret);
861e05df3b1SJaegeuk Kim 	return ret;
862e05df3b1SJaegeuk Kim }
863e05df3b1SJaegeuk Kim 
864e05df3b1SJaegeuk Kim static int truncate_partial_nodes(struct dnode_of_data *dn,
865e05df3b1SJaegeuk Kim 			struct f2fs_inode *ri, int *offset, int depth)
866e05df3b1SJaegeuk Kim {
867e05df3b1SJaegeuk Kim 	struct page *pages[2];
868e05df3b1SJaegeuk Kim 	nid_t nid[3];
869e05df3b1SJaegeuk Kim 	nid_t child_nid;
870e05df3b1SJaegeuk Kim 	int err = 0;
871e05df3b1SJaegeuk Kim 	int i;
872e05df3b1SJaegeuk Kim 	int idx = depth - 2;
873e05df3b1SJaegeuk Kim 
874e05df3b1SJaegeuk Kim 	nid[0] = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
875e05df3b1SJaegeuk Kim 	if (!nid[0])
876e05df3b1SJaegeuk Kim 		return 0;
877e05df3b1SJaegeuk Kim 
878e05df3b1SJaegeuk Kim 	/* get indirect nodes in the path */
879a225dca3Sshifei10.ge 	for (i = 0; i < idx + 1; i++) {
880e1c42045Sarter97 		/* reference count'll be increased */
8814d57b86dSChao Yu 		pages[i] = f2fs_get_node_page(F2FS_I_SB(dn->inode), nid[i]);
882e05df3b1SJaegeuk Kim 		if (IS_ERR(pages[i])) {
883e05df3b1SJaegeuk Kim 			err = PTR_ERR(pages[i]);
884a225dca3Sshifei10.ge 			idx = i - 1;
885e05df3b1SJaegeuk Kim 			goto fail;
886e05df3b1SJaegeuk Kim 		}
887e05df3b1SJaegeuk Kim 		nid[i + 1] = get_nid(pages[i], offset[i + 1], false);
888e05df3b1SJaegeuk Kim 	}
889e05df3b1SJaegeuk Kim 
8904d57b86dSChao Yu 	f2fs_ra_node_pages(pages[idx], offset[idx + 1], NIDS_PER_BLOCK);
89179344efbSJaegeuk Kim 
892e05df3b1SJaegeuk Kim 	/* free direct nodes linked to a partial indirect node */
893a225dca3Sshifei10.ge 	for (i = offset[idx + 1]; i < NIDS_PER_BLOCK; i++) {
894e05df3b1SJaegeuk Kim 		child_nid = get_nid(pages[idx], i, false);
895e05df3b1SJaegeuk Kim 		if (!child_nid)
896e05df3b1SJaegeuk Kim 			continue;
897e05df3b1SJaegeuk Kim 		dn->nid = child_nid;
898e05df3b1SJaegeuk Kim 		err = truncate_dnode(dn);
899e05df3b1SJaegeuk Kim 		if (err < 0)
900e05df3b1SJaegeuk Kim 			goto fail;
90112719ae1SJaegeuk Kim 		if (set_nid(pages[idx], i, 0, false))
90293bae099SJaegeuk Kim 			dn->node_changed = true;
903e05df3b1SJaegeuk Kim 	}
904e05df3b1SJaegeuk Kim 
905a225dca3Sshifei10.ge 	if (offset[idx + 1] == 0) {
906e05df3b1SJaegeuk Kim 		dn->node_page = pages[idx];
907e05df3b1SJaegeuk Kim 		dn->nid = nid[idx];
9087735730dSChao Yu 		err = truncate_node(dn);
9097735730dSChao Yu 		if (err)
9107735730dSChao Yu 			goto fail;
911e05df3b1SJaegeuk Kim 	} else {
912e05df3b1SJaegeuk Kim 		f2fs_put_page(pages[idx], 1);
913e05df3b1SJaegeuk Kim 	}
914e05df3b1SJaegeuk Kim 	offset[idx]++;
915a225dca3Sshifei10.ge 	offset[idx + 1] = 0;
916a225dca3Sshifei10.ge 	idx--;
917e05df3b1SJaegeuk Kim fail:
918a225dca3Sshifei10.ge 	for (i = idx; i >= 0; i--)
919e05df3b1SJaegeuk Kim 		f2fs_put_page(pages[i], 1);
92051dd6249SNamjae Jeon 
92151dd6249SNamjae Jeon 	trace_f2fs_truncate_partial_nodes(dn->inode, nid, depth, err);
92251dd6249SNamjae Jeon 
923e05df3b1SJaegeuk Kim 	return err;
924e05df3b1SJaegeuk Kim }
925e05df3b1SJaegeuk Kim 
9260a8165d7SJaegeuk Kim /*
927e05df3b1SJaegeuk Kim  * All the block addresses of data and nodes should be nullified.
928e05df3b1SJaegeuk Kim  */
9294d57b86dSChao Yu int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from)
930e05df3b1SJaegeuk Kim {
9314081363fSJaegeuk Kim 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
932e05df3b1SJaegeuk Kim 	int err = 0, cont = 1;
933e05df3b1SJaegeuk Kim 	int level, offset[4], noffset[4];
9347dd690c8SJaegeuk Kim 	unsigned int nofs = 0;
93558bfaf44SJaegeuk Kim 	struct f2fs_inode *ri;
936e05df3b1SJaegeuk Kim 	struct dnode_of_data dn;
937e05df3b1SJaegeuk Kim 	struct page *page;
938e05df3b1SJaegeuk Kim 
93951dd6249SNamjae Jeon 	trace_f2fs_truncate_inode_blocks_enter(inode, from);
94051dd6249SNamjae Jeon 
94181ca7350SChao Yu 	level = get_node_path(inode, from, offset, noffset);
942adb6dc19SJaegeuk Kim 	if (level < 0)
943adb6dc19SJaegeuk Kim 		return level;
944ff373558SJaegeuk Kim 
9454d57b86dSChao Yu 	page = f2fs_get_node_page(sbi, inode->i_ino);
94651dd6249SNamjae Jeon 	if (IS_ERR(page)) {
94751dd6249SNamjae Jeon 		trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page));
948e05df3b1SJaegeuk Kim 		return PTR_ERR(page);
94951dd6249SNamjae Jeon 	}
950e05df3b1SJaegeuk Kim 
951e05df3b1SJaegeuk Kim 	set_new_dnode(&dn, inode, page, NULL, 0);
952e05df3b1SJaegeuk Kim 	unlock_page(page);
953e05df3b1SJaegeuk Kim 
95458bfaf44SJaegeuk Kim 	ri = F2FS_INODE(page);
955e05df3b1SJaegeuk Kim 	switch (level) {
956e05df3b1SJaegeuk Kim 	case 0:
957e05df3b1SJaegeuk Kim 	case 1:
958e05df3b1SJaegeuk Kim 		nofs = noffset[1];
959e05df3b1SJaegeuk Kim 		break;
960e05df3b1SJaegeuk Kim 	case 2:
961e05df3b1SJaegeuk Kim 		nofs = noffset[1];
962e05df3b1SJaegeuk Kim 		if (!offset[level - 1])
963e05df3b1SJaegeuk Kim 			goto skip_partial;
96458bfaf44SJaegeuk Kim 		err = truncate_partial_nodes(&dn, ri, offset, level);
965e05df3b1SJaegeuk Kim 		if (err < 0 && err != -ENOENT)
966e05df3b1SJaegeuk Kim 			goto fail;
967e05df3b1SJaegeuk Kim 		nofs += 1 + NIDS_PER_BLOCK;
968e05df3b1SJaegeuk Kim 		break;
969e05df3b1SJaegeuk Kim 	case 3:
970e05df3b1SJaegeuk Kim 		nofs = 5 + 2 * NIDS_PER_BLOCK;
971e05df3b1SJaegeuk Kim 		if (!offset[level - 1])
972e05df3b1SJaegeuk Kim 			goto skip_partial;
97358bfaf44SJaegeuk Kim 		err = truncate_partial_nodes(&dn, ri, offset, level);
974e05df3b1SJaegeuk Kim 		if (err < 0 && err != -ENOENT)
975e05df3b1SJaegeuk Kim 			goto fail;
976e05df3b1SJaegeuk Kim 		break;
977e05df3b1SJaegeuk Kim 	default:
978e05df3b1SJaegeuk Kim 		BUG();
979e05df3b1SJaegeuk Kim 	}
980e05df3b1SJaegeuk Kim 
981e05df3b1SJaegeuk Kim skip_partial:
982e05df3b1SJaegeuk Kim 	while (cont) {
98358bfaf44SJaegeuk Kim 		dn.nid = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
984e05df3b1SJaegeuk Kim 		switch (offset[0]) {
985e05df3b1SJaegeuk Kim 		case NODE_DIR1_BLOCK:
986e05df3b1SJaegeuk Kim 		case NODE_DIR2_BLOCK:
987e05df3b1SJaegeuk Kim 			err = truncate_dnode(&dn);
988e05df3b1SJaegeuk Kim 			break;
989e05df3b1SJaegeuk Kim 
990e05df3b1SJaegeuk Kim 		case NODE_IND1_BLOCK:
991e05df3b1SJaegeuk Kim 		case NODE_IND2_BLOCK:
992e05df3b1SJaegeuk Kim 			err = truncate_nodes(&dn, nofs, offset[1], 2);
993e05df3b1SJaegeuk Kim 			break;
994e05df3b1SJaegeuk Kim 
995e05df3b1SJaegeuk Kim 		case NODE_DIND_BLOCK:
996e05df3b1SJaegeuk Kim 			err = truncate_nodes(&dn, nofs, offset[1], 3);
997e05df3b1SJaegeuk Kim 			cont = 0;
998e05df3b1SJaegeuk Kim 			break;
999e05df3b1SJaegeuk Kim 
1000e05df3b1SJaegeuk Kim 		default:
1001e05df3b1SJaegeuk Kim 			BUG();
1002e05df3b1SJaegeuk Kim 		}
1003e05df3b1SJaegeuk Kim 		if (err < 0 && err != -ENOENT)
1004e05df3b1SJaegeuk Kim 			goto fail;
1005e05df3b1SJaegeuk Kim 		if (offset[1] == 0 &&
100658bfaf44SJaegeuk Kim 				ri->i_nid[offset[0] - NODE_DIR1_BLOCK]) {
1007e05df3b1SJaegeuk Kim 			lock_page(page);
1008ff373558SJaegeuk Kim 			BUG_ON(page->mapping != NODE_MAPPING(sbi));
1009fec1d657SJaegeuk Kim 			f2fs_wait_on_page_writeback(page, NODE, true);
101058bfaf44SJaegeuk Kim 			ri->i_nid[offset[0] - NODE_DIR1_BLOCK] = 0;
1011e05df3b1SJaegeuk Kim 			set_page_dirty(page);
1012e05df3b1SJaegeuk Kim 			unlock_page(page);
1013e05df3b1SJaegeuk Kim 		}
1014e05df3b1SJaegeuk Kim 		offset[1] = 0;
1015e05df3b1SJaegeuk Kim 		offset[0]++;
1016e05df3b1SJaegeuk Kim 		nofs += err;
1017e05df3b1SJaegeuk Kim 	}
1018e05df3b1SJaegeuk Kim fail:
1019e05df3b1SJaegeuk Kim 	f2fs_put_page(page, 0);
102051dd6249SNamjae Jeon 	trace_f2fs_truncate_inode_blocks_exit(inode, err);
1021e05df3b1SJaegeuk Kim 	return err > 0 ? 0 : err;
1022e05df3b1SJaegeuk Kim }
1023e05df3b1SJaegeuk Kim 
10249c77f754SJaegeuk Kim /* caller must lock inode page */
10254d57b86dSChao Yu int f2fs_truncate_xattr_node(struct inode *inode)
10264f16fb0fSJaegeuk Kim {
10274081363fSJaegeuk Kim 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
10284f16fb0fSJaegeuk Kim 	nid_t nid = F2FS_I(inode)->i_xattr_nid;
10294f16fb0fSJaegeuk Kim 	struct dnode_of_data dn;
10304f16fb0fSJaegeuk Kim 	struct page *npage;
10317735730dSChao Yu 	int err;
10324f16fb0fSJaegeuk Kim 
10334f16fb0fSJaegeuk Kim 	if (!nid)
10344f16fb0fSJaegeuk Kim 		return 0;
10354f16fb0fSJaegeuk Kim 
10364d57b86dSChao Yu 	npage = f2fs_get_node_page(sbi, nid);
10374f16fb0fSJaegeuk Kim 	if (IS_ERR(npage))
10384f16fb0fSJaegeuk Kim 		return PTR_ERR(npage);
10394f16fb0fSJaegeuk Kim 
10407735730dSChao Yu 	set_new_dnode(&dn, inode, NULL, npage, nid);
10417735730dSChao Yu 	err = truncate_node(&dn);
10427735730dSChao Yu 	if (err) {
10437735730dSChao Yu 		f2fs_put_page(npage, 1);
10447735730dSChao Yu 		return err;
10457735730dSChao Yu 	}
10467735730dSChao Yu 
1047205b9822SJaegeuk Kim 	f2fs_i_xnid_write(inode, 0);
104865985d93SJaegeuk Kim 
10494f16fb0fSJaegeuk Kim 	return 0;
10504f16fb0fSJaegeuk Kim }
10514f16fb0fSJaegeuk Kim 
105239936837SJaegeuk Kim /*
10534f4124d0SChao Yu  * Caller should grab and release a rwsem by calling f2fs_lock_op() and
10544f4124d0SChao Yu  * f2fs_unlock_op().
105539936837SJaegeuk Kim  */
10564d57b86dSChao Yu int f2fs_remove_inode_page(struct inode *inode)
1057e05df3b1SJaegeuk Kim {
1058e05df3b1SJaegeuk Kim 	struct dnode_of_data dn;
105913ec7297SChao Yu 	int err;
1060e05df3b1SJaegeuk Kim 
1061c2e69583SJaegeuk Kim 	set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
10624d57b86dSChao Yu 	err = f2fs_get_dnode_of_data(&dn, 0, LOOKUP_NODE);
106313ec7297SChao Yu 	if (err)
106413ec7297SChao Yu 		return err;
1065e05df3b1SJaegeuk Kim 
10664d57b86dSChao Yu 	err = f2fs_truncate_xattr_node(inode);
106713ec7297SChao Yu 	if (err) {
1068c2e69583SJaegeuk Kim 		f2fs_put_dnode(&dn);
106913ec7297SChao Yu 		return err;
1070e05df3b1SJaegeuk Kim 	}
1071c2e69583SJaegeuk Kim 
1072c2e69583SJaegeuk Kim 	/* remove potential inline_data blocks */
1073c2e69583SJaegeuk Kim 	if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1074c2e69583SJaegeuk Kim 				S_ISLNK(inode->i_mode))
10754d57b86dSChao Yu 		f2fs_truncate_data_blocks_range(&dn, 1);
1076c2e69583SJaegeuk Kim 
1077e1c42045Sarter97 	/* 0 is possible, after f2fs_new_inode() has failed */
10789850cf4aSJaegeuk Kim 	f2fs_bug_on(F2FS_I_SB(inode),
10790eb0adadSChao Yu 			inode->i_blocks != 0 && inode->i_blocks != 8);
1080c2e69583SJaegeuk Kim 
1081c2e69583SJaegeuk Kim 	/* will put inode & node pages */
10827735730dSChao Yu 	err = truncate_node(&dn);
10837735730dSChao Yu 	if (err) {
10847735730dSChao Yu 		f2fs_put_dnode(&dn);
10857735730dSChao Yu 		return err;
10867735730dSChao Yu 	}
108713ec7297SChao Yu 	return 0;
1088e05df3b1SJaegeuk Kim }
1089e05df3b1SJaegeuk Kim 
10904d57b86dSChao Yu struct page *f2fs_new_inode_page(struct inode *inode)
1091e05df3b1SJaegeuk Kim {
1092e05df3b1SJaegeuk Kim 	struct dnode_of_data dn;
1093e05df3b1SJaegeuk Kim 
1094e05df3b1SJaegeuk Kim 	/* allocate inode page for new inode */
1095e05df3b1SJaegeuk Kim 	set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
109644a83ff6SJaegeuk Kim 
109744a83ff6SJaegeuk Kim 	/* caller should f2fs_put_page(page, 1); */
10984d57b86dSChao Yu 	return f2fs_new_node_page(&dn, 0);
1099e05df3b1SJaegeuk Kim }
1100e05df3b1SJaegeuk Kim 
11014d57b86dSChao Yu struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs)
1102e05df3b1SJaegeuk Kim {
11034081363fSJaegeuk Kim 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
110425cc5d3bSJaegeuk Kim 	struct node_info new_ni;
1105e05df3b1SJaegeuk Kim 	struct page *page;
1106e05df3b1SJaegeuk Kim 	int err;
1107e05df3b1SJaegeuk Kim 
110891942321SJaegeuk Kim 	if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
1109e05df3b1SJaegeuk Kim 		return ERR_PTR(-EPERM);
1110e05df3b1SJaegeuk Kim 
1111300e129cSJaegeuk Kim 	page = f2fs_grab_cache_page(NODE_MAPPING(sbi), dn->nid, false);
1112e05df3b1SJaegeuk Kim 	if (!page)
1113e05df3b1SJaegeuk Kim 		return ERR_PTR(-ENOMEM);
1114e05df3b1SJaegeuk Kim 
11150abd675eSChao Yu 	if (unlikely((err = inc_valid_node_count(sbi, dn->inode, !ofs))))
11169c02740cSJaegeuk Kim 		goto fail;
11170abd675eSChao Yu 
111825cc5d3bSJaegeuk Kim #ifdef CONFIG_F2FS_CHECK_FS
11197735730dSChao Yu 	err = f2fs_get_node_info(sbi, dn->nid, &new_ni);
11207735730dSChao Yu 	if (err) {
11217735730dSChao Yu 		dec_valid_node_count(sbi, dn->inode, !ofs);
11227735730dSChao Yu 		goto fail;
11237735730dSChao Yu 	}
112425cc5d3bSJaegeuk Kim 	f2fs_bug_on(sbi, new_ni.blk_addr != NULL_ADDR);
112525cc5d3bSJaegeuk Kim #endif
112625cc5d3bSJaegeuk Kim 	new_ni.nid = dn->nid;
1127e05df3b1SJaegeuk Kim 	new_ni.ino = dn->inode->i_ino;
112825cc5d3bSJaegeuk Kim 	new_ni.blk_addr = NULL_ADDR;
112925cc5d3bSJaegeuk Kim 	new_ni.flag = 0;
113025cc5d3bSJaegeuk Kim 	new_ni.version = 0;
1131479f40c4SJaegeuk Kim 	set_node_addr(sbi, &new_ni, NEW_ADDR, false);
11329c02740cSJaegeuk Kim 
1133fec1d657SJaegeuk Kim 	f2fs_wait_on_page_writeback(page, NODE, true);
11349c02740cSJaegeuk Kim 	fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true);
1135c5667575SChao Yu 	set_cold_node(page, S_ISDIR(dn->inode->i_mode));
1136237c0790SJaegeuk Kim 	if (!PageUptodate(page))
11379c02740cSJaegeuk Kim 		SetPageUptodate(page);
113812719ae1SJaegeuk Kim 	if (set_page_dirty(page))
113912719ae1SJaegeuk Kim 		dn->node_changed = true;
1140e05df3b1SJaegeuk Kim 
11414bc8e9bcSChao Yu 	if (f2fs_has_xattr_block(ofs))
1142205b9822SJaegeuk Kim 		f2fs_i_xnid_write(dn->inode, dn->nid);
1143479bd73aSJaegeuk Kim 
1144e05df3b1SJaegeuk Kim 	if (ofs == 0)
1145e05df3b1SJaegeuk Kim 		inc_valid_inode_count(sbi);
1146e05df3b1SJaegeuk Kim 	return page;
1147e05df3b1SJaegeuk Kim 
1148e05df3b1SJaegeuk Kim fail:
114971e9fec5SJaegeuk Kim 	clear_node_page_dirty(page);
1150e05df3b1SJaegeuk Kim 	f2fs_put_page(page, 1);
1151e05df3b1SJaegeuk Kim 	return ERR_PTR(err);
1152e05df3b1SJaegeuk Kim }
1153e05df3b1SJaegeuk Kim 
115456ae674cSJaegeuk Kim /*
115556ae674cSJaegeuk Kim  * Caller should do after getting the following values.
115656ae674cSJaegeuk Kim  * 0: f2fs_put_page(page, 0)
115786531d6bSJaegeuk Kim  * LOCKED_PAGE or error: f2fs_put_page(page, 1)
115856ae674cSJaegeuk Kim  */
115904d328deSMike Christie static int read_node_page(struct page *page, int op_flags)
1160e05df3b1SJaegeuk Kim {
11614081363fSJaegeuk Kim 	struct f2fs_sb_info *sbi = F2FS_P_SB(page);
1162e05df3b1SJaegeuk Kim 	struct node_info ni;
1163cf04e8ebSJaegeuk Kim 	struct f2fs_io_info fio = {
116405ca3632SJaegeuk Kim 		.sbi = sbi,
1165cf04e8ebSJaegeuk Kim 		.type = NODE,
116604d328deSMike Christie 		.op = REQ_OP_READ,
116704d328deSMike Christie 		.op_flags = op_flags,
116805ca3632SJaegeuk Kim 		.page = page,
11694375a336SJaegeuk Kim 		.encrypted_page = NULL,
1170cf04e8ebSJaegeuk Kim 	};
11717735730dSChao Yu 	int err;
1172e05df3b1SJaegeuk Kim 
117354c55c4eSWeichao Guo 	if (PageUptodate(page)) {
117454c55c4eSWeichao Guo #ifdef CONFIG_F2FS_CHECK_FS
117554c55c4eSWeichao Guo 		f2fs_bug_on(sbi, !f2fs_inode_chksum_verify(sbi, page));
117654c55c4eSWeichao Guo #endif
11773bdad3c7SJaegeuk Kim 		return LOCKED_PAGE;
117854c55c4eSWeichao Guo 	}
11793bdad3c7SJaegeuk Kim 
11807735730dSChao Yu 	err = f2fs_get_node_info(sbi, page->index, &ni);
11817735730dSChao Yu 	if (err)
11827735730dSChao Yu 		return err;
1183e05df3b1SJaegeuk Kim 
118483a3bfdbSJaegeuk Kim 	if (unlikely(ni.blk_addr == NULL_ADDR) ||
118583a3bfdbSJaegeuk Kim 			is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN)) {
11862bca1e23SJaegeuk Kim 		ClearPageUptodate(page);
1187e05df3b1SJaegeuk Kim 		return -ENOENT;
1188393ff91fSJaegeuk Kim 	}
1189393ff91fSJaegeuk Kim 
11907a9d7548SChao Yu 	fio.new_blkaddr = fio.old_blkaddr = ni.blk_addr;
119105ca3632SJaegeuk Kim 	return f2fs_submit_page_bio(&fio);
1192e05df3b1SJaegeuk Kim }
1193e05df3b1SJaegeuk Kim 
11940a8165d7SJaegeuk Kim /*
1195e05df3b1SJaegeuk Kim  * Readahead a node page
1196e05df3b1SJaegeuk Kim  */
11974d57b86dSChao Yu void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
1198e05df3b1SJaegeuk Kim {
1199e05df3b1SJaegeuk Kim 	struct page *apage;
120056ae674cSJaegeuk Kim 	int err;
1201e05df3b1SJaegeuk Kim 
1202e8458725SChao Yu 	if (!nid)
1203e8458725SChao Yu 		return;
12044d57b86dSChao Yu 	if (f2fs_check_nid_range(sbi, nid))
1205a4f843bdSJaegeuk Kim 		return;
1206e8458725SChao Yu 
1207999270deSFan Li 	rcu_read_lock();
1208b93b0163SMatthew Wilcox 	apage = radix_tree_lookup(&NODE_MAPPING(sbi)->i_pages, nid);
1209999270deSFan Li 	rcu_read_unlock();
1210999270deSFan Li 	if (apage)
1211393ff91fSJaegeuk Kim 		return;
1212e05df3b1SJaegeuk Kim 
1213300e129cSJaegeuk Kim 	apage = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false);
1214e05df3b1SJaegeuk Kim 	if (!apage)
1215e05df3b1SJaegeuk Kim 		return;
1216e05df3b1SJaegeuk Kim 
121770246286SChristoph Hellwig 	err = read_node_page(apage, REQ_RAHEAD);
121886531d6bSJaegeuk Kim 	f2fs_put_page(apage, err ? 1 : 0);
1219e05df3b1SJaegeuk Kim }
1220e05df3b1SJaegeuk Kim 
122117a0ee55SJaegeuk Kim static struct page *__get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid,
12220e022ea8SChao Yu 					struct page *parent, int start)
1223e05df3b1SJaegeuk Kim {
122456ae674cSJaegeuk Kim 	struct page *page;
122556ae674cSJaegeuk Kim 	int err;
12264aa69d56SJaegeuk Kim 
12274aa69d56SJaegeuk Kim 	if (!nid)
12284aa69d56SJaegeuk Kim 		return ERR_PTR(-ENOENT);
12294d57b86dSChao Yu 	if (f2fs_check_nid_range(sbi, nid))
1230a4f843bdSJaegeuk Kim 		return ERR_PTR(-EINVAL);
1231afcb7ca0SJaegeuk Kim repeat:
1232300e129cSJaegeuk Kim 	page = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false);
1233e05df3b1SJaegeuk Kim 	if (!page)
1234e05df3b1SJaegeuk Kim 		return ERR_PTR(-ENOMEM);
1235e05df3b1SJaegeuk Kim 
123670fd7614SChristoph Hellwig 	err = read_node_page(page, 0);
123786531d6bSJaegeuk Kim 	if (err < 0) {
123886531d6bSJaegeuk Kim 		f2fs_put_page(page, 1);
1239e05df3b1SJaegeuk Kim 		return ERR_PTR(err);
1240e1c51b9fSChao Yu 	} else if (err == LOCKED_PAGE) {
12411f258ec1SChao Yu 		err = 0;
1242e1c51b9fSChao Yu 		goto page_hit;
124386531d6bSJaegeuk Kim 	}
1244aaf96075SJaegeuk Kim 
12450e022ea8SChao Yu 	if (parent)
12464d57b86dSChao Yu 		f2fs_ra_node_pages(parent, start + 1, MAX_RA_NODE);
12470e022ea8SChao Yu 
1248e1c51b9fSChao Yu 	lock_page(page);
1249e1c51b9fSChao Yu 
12504ef51a8fSJaegeuk Kim 	if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1251afcb7ca0SJaegeuk Kim 		f2fs_put_page(page, 1);
1252afcb7ca0SJaegeuk Kim 		goto repeat;
1253afcb7ca0SJaegeuk Kim 	}
12541563ac75SChao Yu 
12551f258ec1SChao Yu 	if (unlikely(!PageUptodate(page))) {
12561f258ec1SChao Yu 		err = -EIO;
12571563ac75SChao Yu 		goto out_err;
12581f258ec1SChao Yu 	}
1259704956ecSChao Yu 
1260704956ecSChao Yu 	if (!f2fs_inode_chksum_verify(sbi, page)) {
1261704956ecSChao Yu 		err = -EBADMSG;
1262704956ecSChao Yu 		goto out_err;
1263704956ecSChao Yu 	}
1264e1c51b9fSChao Yu page_hit:
12650c9df7fbSYunlong Song 	if(unlikely(nid != nid_of_node(page))) {
12661f258ec1SChao Yu 		f2fs_msg(sbi->sb, KERN_WARNING, "inconsistent node block, "
12671f258ec1SChao Yu 			"nid:%lu, node_footer[nid:%u,ino:%u,ofs:%u,cpver:%llu,blkaddr:%u]",
12681f258ec1SChao Yu 			nid, nid_of_node(page), ino_of_node(page),
12691f258ec1SChao Yu 			ofs_of_node(page), cpver_of_node(page),
12701f258ec1SChao Yu 			next_blkaddr_of_node(page));
12711f258ec1SChao Yu 		err = -EINVAL;
12720c9df7fbSYunlong Song out_err:
1273ee605234SJaegeuk Kim 		ClearPageUptodate(page);
12740c9df7fbSYunlong Song 		f2fs_put_page(page, 1);
12751f258ec1SChao Yu 		return ERR_PTR(err);
12760c9df7fbSYunlong Song 	}
1277e05df3b1SJaegeuk Kim 	return page;
1278e05df3b1SJaegeuk Kim }
1279e05df3b1SJaegeuk Kim 
12804d57b86dSChao Yu struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
12810e022ea8SChao Yu {
12820e022ea8SChao Yu 	return __get_node_page(sbi, nid, NULL, 0);
12830e022ea8SChao Yu }
12840e022ea8SChao Yu 
12854d57b86dSChao Yu struct page *f2fs_get_node_page_ra(struct page *parent, int start)
1286e05df3b1SJaegeuk Kim {
12874081363fSJaegeuk Kim 	struct f2fs_sb_info *sbi = F2FS_P_SB(parent);
12880e022ea8SChao Yu 	nid_t nid = get_nid(parent, start, false);
1289e05df3b1SJaegeuk Kim 
12900e022ea8SChao Yu 	return __get_node_page(sbi, nid, parent, start);
1291e05df3b1SJaegeuk Kim }
1292e05df3b1SJaegeuk Kim 
12932049d4fcSJaegeuk Kim static void flush_inline_data(struct f2fs_sb_info *sbi, nid_t ino)
12942049d4fcSJaegeuk Kim {
12952049d4fcSJaegeuk Kim 	struct inode *inode;
12962049d4fcSJaegeuk Kim 	struct page *page;
12970f3311a8SChao Yu 	int ret;
12982049d4fcSJaegeuk Kim 
12992049d4fcSJaegeuk Kim 	/* should flush inline_data before evict_inode */
13002049d4fcSJaegeuk Kim 	inode = ilookup(sbi->sb, ino);
13012049d4fcSJaegeuk Kim 	if (!inode)
13022049d4fcSJaegeuk Kim 		return;
13032049d4fcSJaegeuk Kim 
130401eccef7SChao Yu 	page = f2fs_pagecache_get_page(inode->i_mapping, 0,
130501eccef7SChao Yu 					FGP_LOCK|FGP_NOWAIT, 0);
13062049d4fcSJaegeuk Kim 	if (!page)
13072049d4fcSJaegeuk Kim 		goto iput_out;
13082049d4fcSJaegeuk Kim 
13092049d4fcSJaegeuk Kim 	if (!PageUptodate(page))
13102049d4fcSJaegeuk Kim 		goto page_out;
13112049d4fcSJaegeuk Kim 
13122049d4fcSJaegeuk Kim 	if (!PageDirty(page))
13132049d4fcSJaegeuk Kim 		goto page_out;
13142049d4fcSJaegeuk Kim 
13152049d4fcSJaegeuk Kim 	if (!clear_page_dirty_for_io(page))
13162049d4fcSJaegeuk Kim 		goto page_out;
13172049d4fcSJaegeuk Kim 
13180f3311a8SChao Yu 	ret = f2fs_write_inline_data(inode, page);
13192049d4fcSJaegeuk Kim 	inode_dec_dirty_pages(inode);
13204d57b86dSChao Yu 	f2fs_remove_dirty_inode(inode);
13210f3311a8SChao Yu 	if (ret)
13222049d4fcSJaegeuk Kim 		set_page_dirty(page);
13232049d4fcSJaegeuk Kim page_out:
13244a6de50dSJaegeuk Kim 	f2fs_put_page(page, 1);
13252049d4fcSJaegeuk Kim iput_out:
13262049d4fcSJaegeuk Kim 	iput(inode);
13272049d4fcSJaegeuk Kim }
13282049d4fcSJaegeuk Kim 
1329608514deSJaegeuk Kim static struct page *last_fsync_dnode(struct f2fs_sb_info *sbi, nid_t ino)
1330e05df3b1SJaegeuk Kim {
1331028a63a6SJan Kara 	pgoff_t index;
1332e05df3b1SJaegeuk Kim 	struct pagevec pvec;
1333608514deSJaegeuk Kim 	struct page *last_page = NULL;
1334028a63a6SJan Kara 	int nr_pages;
133552681375SJaegeuk Kim 
133686679820SMel Gorman 	pagevec_init(&pvec);
133752681375SJaegeuk Kim 	index = 0;
133852681375SJaegeuk Kim 
1339028a63a6SJan Kara 	while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
134067fd707fSJan Kara 				PAGECACHE_TAG_DIRTY))) {
1341028a63a6SJan Kara 		int i;
134252681375SJaegeuk Kim 
134352681375SJaegeuk Kim 		for (i = 0; i < nr_pages; i++) {
134452681375SJaegeuk Kim 			struct page *page = pvec.pages[i];
134552681375SJaegeuk Kim 
134652681375SJaegeuk Kim 			if (unlikely(f2fs_cp_error(sbi))) {
1347608514deSJaegeuk Kim 				f2fs_put_page(last_page, 0);
134852681375SJaegeuk Kim 				pagevec_release(&pvec);
1349608514deSJaegeuk Kim 				return ERR_PTR(-EIO);
135052681375SJaegeuk Kim 			}
135152681375SJaegeuk Kim 
135252681375SJaegeuk Kim 			if (!IS_DNODE(page) || !is_cold_node(page))
135352681375SJaegeuk Kim 				continue;
135452681375SJaegeuk Kim 			if (ino_of_node(page) != ino)
135552681375SJaegeuk Kim 				continue;
135652681375SJaegeuk Kim 
135752681375SJaegeuk Kim 			lock_page(page);
135852681375SJaegeuk Kim 
135952681375SJaegeuk Kim 			if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
136052681375SJaegeuk Kim continue_unlock:
136152681375SJaegeuk Kim 				unlock_page(page);
136252681375SJaegeuk Kim 				continue;
136352681375SJaegeuk Kim 			}
136452681375SJaegeuk Kim 			if (ino_of_node(page) != ino)
136552681375SJaegeuk Kim 				goto continue_unlock;
136652681375SJaegeuk Kim 
136752681375SJaegeuk Kim 			if (!PageDirty(page)) {
136852681375SJaegeuk Kim 				/* someone wrote it for us */
136952681375SJaegeuk Kim 				goto continue_unlock;
137052681375SJaegeuk Kim 			}
137152681375SJaegeuk Kim 
1372608514deSJaegeuk Kim 			if (last_page)
1373608514deSJaegeuk Kim 				f2fs_put_page(last_page, 0);
1374608514deSJaegeuk Kim 
1375608514deSJaegeuk Kim 			get_page(page);
1376608514deSJaegeuk Kim 			last_page = page;
1377608514deSJaegeuk Kim 			unlock_page(page);
1378608514deSJaegeuk Kim 		}
1379608514deSJaegeuk Kim 		pagevec_release(&pvec);
1380608514deSJaegeuk Kim 		cond_resched();
1381608514deSJaegeuk Kim 	}
1382608514deSJaegeuk Kim 	return last_page;
1383608514deSJaegeuk Kim }
1384608514deSJaegeuk Kim 
1385d68f735bSJaegeuk Kim static int __write_node_page(struct page *page, bool atomic, bool *submitted,
1386b0af6d49SChao Yu 				struct writeback_control *wbc, bool do_balance,
1387b0af6d49SChao Yu 				enum iostat_type io_type)
1388faa24895SJaegeuk Kim {
1389faa24895SJaegeuk Kim 	struct f2fs_sb_info *sbi = F2FS_P_SB(page);
1390faa24895SJaegeuk Kim 	nid_t nid;
1391faa24895SJaegeuk Kim 	struct node_info ni;
1392faa24895SJaegeuk Kim 	struct f2fs_io_info fio = {
1393faa24895SJaegeuk Kim 		.sbi = sbi,
139439d787beSChao Yu 		.ino = ino_of_node(page),
1395faa24895SJaegeuk Kim 		.type = NODE,
1396faa24895SJaegeuk Kim 		.op = REQ_OP_WRITE,
1397faa24895SJaegeuk Kim 		.op_flags = wbc_to_write_flags(wbc),
1398faa24895SJaegeuk Kim 		.page = page,
1399faa24895SJaegeuk Kim 		.encrypted_page = NULL,
1400d68f735bSJaegeuk Kim 		.submitted = false,
1401b0af6d49SChao Yu 		.io_type = io_type,
1402578c6478SYufen Yu 		.io_wbc = wbc,
1403faa24895SJaegeuk Kim 	};
1404faa24895SJaegeuk Kim 
1405faa24895SJaegeuk Kim 	trace_f2fs_writepage(page, NODE);
1406faa24895SJaegeuk Kim 
1407868de613SJaegeuk Kim 	if (unlikely(f2fs_cp_error(sbi)))
1408868de613SJaegeuk Kim 		goto redirty_out;
1409db198ae0SChao Yu 
1410faa24895SJaegeuk Kim 	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1411faa24895SJaegeuk Kim 		goto redirty_out;
1412faa24895SJaegeuk Kim 
1413faa24895SJaegeuk Kim 	/* get old block addr of this node page */
1414faa24895SJaegeuk Kim 	nid = nid_of_node(page);
1415faa24895SJaegeuk Kim 	f2fs_bug_on(sbi, page->index != nid);
1416faa24895SJaegeuk Kim 
14177735730dSChao Yu 	if (f2fs_get_node_info(sbi, nid, &ni))
14187735730dSChao Yu 		goto redirty_out;
14197735730dSChao Yu 
1420faa24895SJaegeuk Kim 	if (wbc->for_reclaim) {
1421faa24895SJaegeuk Kim 		if (!down_read_trylock(&sbi->node_write))
1422faa24895SJaegeuk Kim 			goto redirty_out;
1423faa24895SJaegeuk Kim 	} else {
1424faa24895SJaegeuk Kim 		down_read(&sbi->node_write);
1425faa24895SJaegeuk Kim 	}
1426faa24895SJaegeuk Kim 
1427faa24895SJaegeuk Kim 	/* This page is already truncated */
1428faa24895SJaegeuk Kim 	if (unlikely(ni.blk_addr == NULL_ADDR)) {
1429faa24895SJaegeuk Kim 		ClearPageUptodate(page);
1430faa24895SJaegeuk Kim 		dec_page_count(sbi, F2FS_DIRTY_NODES);
1431faa24895SJaegeuk Kim 		up_read(&sbi->node_write);
1432faa24895SJaegeuk Kim 		unlock_page(page);
1433faa24895SJaegeuk Kim 		return 0;
1434faa24895SJaegeuk Kim 	}
1435faa24895SJaegeuk Kim 
1436c9b60788SChao Yu 	if (__is_valid_data_blkaddr(ni.blk_addr) &&
1437c9b60788SChao Yu 		!f2fs_is_valid_blkaddr(sbi, ni.blk_addr, DATA_GENERIC))
1438c9b60788SChao Yu 		goto redirty_out;
1439c9b60788SChao Yu 
1440e7c75ab0SJaegeuk Kim 	if (atomic && !test_opt(sbi, NOBARRIER))
1441e7c75ab0SJaegeuk Kim 		fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
1442e7c75ab0SJaegeuk Kim 
1443faa24895SJaegeuk Kim 	set_page_writeback(page);
144417c50035SJaegeuk Kim 	ClearPageError(page);
1445faa24895SJaegeuk Kim 	fio.old_blkaddr = ni.blk_addr;
14464d57b86dSChao Yu 	f2fs_do_write_node_page(nid, &fio);
1447faa24895SJaegeuk Kim 	set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(page));
1448faa24895SJaegeuk Kim 	dec_page_count(sbi, F2FS_DIRTY_NODES);
1449faa24895SJaegeuk Kim 	up_read(&sbi->node_write);
1450faa24895SJaegeuk Kim 
1451d68f735bSJaegeuk Kim 	if (wbc->for_reclaim) {
1452b9109b0eSJaegeuk Kim 		f2fs_submit_merged_write_cond(sbi, page->mapping->host, 0,
1453b9109b0eSJaegeuk Kim 						page->index, NODE);
1454d68f735bSJaegeuk Kim 		submitted = NULL;
1455d68f735bSJaegeuk Kim 	}
1456faa24895SJaegeuk Kim 
1457faa24895SJaegeuk Kim 	unlock_page(page);
1458faa24895SJaegeuk Kim 
1459d68f735bSJaegeuk Kim 	if (unlikely(f2fs_cp_error(sbi))) {
1460b9109b0eSJaegeuk Kim 		f2fs_submit_merged_write(sbi, NODE);
1461d68f735bSJaegeuk Kim 		submitted = NULL;
1462d68f735bSJaegeuk Kim 	}
1463d68f735bSJaegeuk Kim 	if (submitted)
1464d68f735bSJaegeuk Kim 		*submitted = fio.submitted;
1465faa24895SJaegeuk Kim 
1466401db79fSYunlong Song 	if (do_balance)
1467401db79fSYunlong Song 		f2fs_balance_fs(sbi, false);
1468faa24895SJaegeuk Kim 	return 0;
1469faa24895SJaegeuk Kim 
1470faa24895SJaegeuk Kim redirty_out:
1471faa24895SJaegeuk Kim 	redirty_page_for_writepage(wbc, page);
1472faa24895SJaegeuk Kim 	return AOP_WRITEPAGE_ACTIVATE;
1473faa24895SJaegeuk Kim }
1474faa24895SJaegeuk Kim 
14754d57b86dSChao Yu void f2fs_move_node_page(struct page *node_page, int gc_type)
1476f15194fcSYunlei He {
1477f15194fcSYunlei He 	if (gc_type == FG_GC) {
1478f15194fcSYunlei He 		struct writeback_control wbc = {
1479f15194fcSYunlei He 			.sync_mode = WB_SYNC_ALL,
1480f15194fcSYunlei He 			.nr_to_write = 1,
1481f15194fcSYunlei He 			.for_reclaim = 0,
1482f15194fcSYunlei He 		};
1483f15194fcSYunlei He 
1484f15194fcSYunlei He 		set_page_dirty(node_page);
1485f15194fcSYunlei He 		f2fs_wait_on_page_writeback(node_page, NODE, true);
1486f15194fcSYunlei He 
1487f15194fcSYunlei He 		f2fs_bug_on(F2FS_P_SB(node_page), PageWriteback(node_page));
1488f15194fcSYunlei He 		if (!clear_page_dirty_for_io(node_page))
1489f15194fcSYunlei He 			goto out_page;
1490f15194fcSYunlei He 
1491f15194fcSYunlei He 		if (__write_node_page(node_page, false, NULL,
1492f15194fcSYunlei He 					&wbc, false, FS_GC_NODE_IO))
1493f15194fcSYunlei He 			unlock_page(node_page);
1494f15194fcSYunlei He 		goto release_page;
1495f15194fcSYunlei He 	} else {
1496f15194fcSYunlei He 		/* set page dirty and write it */
1497f15194fcSYunlei He 		if (!PageWriteback(node_page))
1498f15194fcSYunlei He 			set_page_dirty(node_page);
1499f15194fcSYunlei He 	}
1500f15194fcSYunlei He out_page:
1501f15194fcSYunlei He 	unlock_page(node_page);
1502f15194fcSYunlei He release_page:
1503f15194fcSYunlei He 	f2fs_put_page(node_page, 0);
1504f15194fcSYunlei He }
1505f15194fcSYunlei He 
1506faa24895SJaegeuk Kim static int f2fs_write_node_page(struct page *page,
1507faa24895SJaegeuk Kim 				struct writeback_control *wbc)
1508faa24895SJaegeuk Kim {
1509b0af6d49SChao Yu 	return __write_node_page(page, false, NULL, wbc, false, FS_NODE_IO);
1510faa24895SJaegeuk Kim }
1511faa24895SJaegeuk Kim 
15124d57b86dSChao Yu int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
1513608514deSJaegeuk Kim 			struct writeback_control *wbc, bool atomic)
1514608514deSJaegeuk Kim {
1515028a63a6SJan Kara 	pgoff_t index;
1516942fd319SJaegeuk Kim 	pgoff_t last_idx = ULONG_MAX;
1517608514deSJaegeuk Kim 	struct pagevec pvec;
1518608514deSJaegeuk Kim 	int ret = 0;
1519608514deSJaegeuk Kim 	struct page *last_page = NULL;
1520608514deSJaegeuk Kim 	bool marked = false;
152126de9b11SJaegeuk Kim 	nid_t ino = inode->i_ino;
1522028a63a6SJan Kara 	int nr_pages;
1523608514deSJaegeuk Kim 
1524608514deSJaegeuk Kim 	if (atomic) {
1525608514deSJaegeuk Kim 		last_page = last_fsync_dnode(sbi, ino);
1526608514deSJaegeuk Kim 		if (IS_ERR_OR_NULL(last_page))
1527608514deSJaegeuk Kim 			return PTR_ERR_OR_ZERO(last_page);
1528608514deSJaegeuk Kim 	}
1529608514deSJaegeuk Kim retry:
153086679820SMel Gorman 	pagevec_init(&pvec);
1531608514deSJaegeuk Kim 	index = 0;
1532608514deSJaegeuk Kim 
1533028a63a6SJan Kara 	while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
153467fd707fSJan Kara 				PAGECACHE_TAG_DIRTY))) {
1535028a63a6SJan Kara 		int i;
1536608514deSJaegeuk Kim 
1537608514deSJaegeuk Kim 		for (i = 0; i < nr_pages; i++) {
1538608514deSJaegeuk Kim 			struct page *page = pvec.pages[i];
1539d68f735bSJaegeuk Kim 			bool submitted = false;
1540608514deSJaegeuk Kim 
1541608514deSJaegeuk Kim 			if (unlikely(f2fs_cp_error(sbi))) {
1542608514deSJaegeuk Kim 				f2fs_put_page(last_page, 0);
1543608514deSJaegeuk Kim 				pagevec_release(&pvec);
15449de69279SChao Yu 				ret = -EIO;
15459de69279SChao Yu 				goto out;
1546608514deSJaegeuk Kim 			}
1547608514deSJaegeuk Kim 
1548608514deSJaegeuk Kim 			if (!IS_DNODE(page) || !is_cold_node(page))
1549608514deSJaegeuk Kim 				continue;
1550608514deSJaegeuk Kim 			if (ino_of_node(page) != ino)
1551608514deSJaegeuk Kim 				continue;
1552608514deSJaegeuk Kim 
1553608514deSJaegeuk Kim 			lock_page(page);
1554608514deSJaegeuk Kim 
1555608514deSJaegeuk Kim 			if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1556608514deSJaegeuk Kim continue_unlock:
1557608514deSJaegeuk Kim 				unlock_page(page);
1558608514deSJaegeuk Kim 				continue;
1559608514deSJaegeuk Kim 			}
1560608514deSJaegeuk Kim 			if (ino_of_node(page) != ino)
156152681375SJaegeuk Kim 				goto continue_unlock;
156252681375SJaegeuk Kim 
1563608514deSJaegeuk Kim 			if (!PageDirty(page) && page != last_page) {
1564608514deSJaegeuk Kim 				/* someone wrote it for us */
1565608514deSJaegeuk Kim 				goto continue_unlock;
1566608514deSJaegeuk Kim 			}
1567608514deSJaegeuk Kim 
1568608514deSJaegeuk Kim 			f2fs_wait_on_page_writeback(page, NODE, true);
1569608514deSJaegeuk Kim 			BUG_ON(PageWriteback(page));
1570608514deSJaegeuk Kim 
1571d29fd172SJaegeuk Kim 			set_fsync_mark(page, 0);
1572d29fd172SJaegeuk Kim 			set_dentry_mark(page, 0);
1573d29fd172SJaegeuk Kim 
1574608514deSJaegeuk Kim 			if (!atomic || page == last_page) {
157552681375SJaegeuk Kim 				set_fsync_mark(page, 1);
157626de9b11SJaegeuk Kim 				if (IS_INODE(page)) {
157726de9b11SJaegeuk Kim 					if (is_inode_flag_set(inode,
157826de9b11SJaegeuk Kim 								FI_DIRTY_INODE))
15794d57b86dSChao Yu 						f2fs_update_inode(inode, page);
158052681375SJaegeuk Kim 					set_dentry_mark(page,
15814d57b86dSChao Yu 						f2fs_need_dentry_mark(sbi, ino));
158226de9b11SJaegeuk Kim 				}
1583608514deSJaegeuk Kim 				/*  may be written by other thread */
1584608514deSJaegeuk Kim 				if (!PageDirty(page))
1585608514deSJaegeuk Kim 					set_page_dirty(page);
1586608514deSJaegeuk Kim 			}
1587608514deSJaegeuk Kim 
1588608514deSJaegeuk Kim 			if (!clear_page_dirty_for_io(page))
1589608514deSJaegeuk Kim 				goto continue_unlock;
159052681375SJaegeuk Kim 
1591e7c75ab0SJaegeuk Kim 			ret = __write_node_page(page, atomic &&
1592d68f735bSJaegeuk Kim 						page == last_page,
1593b0af6d49SChao Yu 						&submitted, wbc, true,
1594b0af6d49SChao Yu 						FS_NODE_IO);
1595c267ec15SJaegeuk Kim 			if (ret) {
159652681375SJaegeuk Kim 				unlock_page(page);
1597608514deSJaegeuk Kim 				f2fs_put_page(last_page, 0);
1598608514deSJaegeuk Kim 				break;
1599d68f735bSJaegeuk Kim 			} else if (submitted) {
1600942fd319SJaegeuk Kim 				last_idx = page->index;
1601608514deSJaegeuk Kim 			}
16023f5f4959SChao Yu 
1603608514deSJaegeuk Kim 			if (page == last_page) {
1604608514deSJaegeuk Kim 				f2fs_put_page(page, 0);
1605608514deSJaegeuk Kim 				marked = true;
160652681375SJaegeuk Kim 				break;
160752681375SJaegeuk Kim 			}
1608c267ec15SJaegeuk Kim 		}
160952681375SJaegeuk Kim 		pagevec_release(&pvec);
161052681375SJaegeuk Kim 		cond_resched();
161152681375SJaegeuk Kim 
1612608514deSJaegeuk Kim 		if (ret || marked)
161352681375SJaegeuk Kim 			break;
161452681375SJaegeuk Kim 	}
1615608514deSJaegeuk Kim 	if (!ret && atomic && !marked) {
1616608514deSJaegeuk Kim 		f2fs_msg(sbi->sb, KERN_DEBUG,
1617608514deSJaegeuk Kim 			"Retry to write fsync mark: ino=%u, idx=%lx",
1618608514deSJaegeuk Kim 					ino, last_page->index);
1619608514deSJaegeuk Kim 		lock_page(last_page);
1620d40a43afSYunlei He 		f2fs_wait_on_page_writeback(last_page, NODE, true);
1621608514deSJaegeuk Kim 		set_page_dirty(last_page);
1622608514deSJaegeuk Kim 		unlock_page(last_page);
1623608514deSJaegeuk Kim 		goto retry;
1624608514deSJaegeuk Kim 	}
16259de69279SChao Yu out:
1626942fd319SJaegeuk Kim 	if (last_idx != ULONG_MAX)
1627b9109b0eSJaegeuk Kim 		f2fs_submit_merged_write_cond(sbi, NULL, ino, last_idx, NODE);
1628c267ec15SJaegeuk Kim 	return ret ? -EIO: 0;
162952681375SJaegeuk Kim }
163052681375SJaegeuk Kim 
16314d57b86dSChao Yu int f2fs_sync_node_pages(struct f2fs_sb_info *sbi,
16324d57b86dSChao Yu 				struct writeback_control *wbc,
1633b0af6d49SChao Yu 				bool do_balance, enum iostat_type io_type)
163452681375SJaegeuk Kim {
1635028a63a6SJan Kara 	pgoff_t index;
163652681375SJaegeuk Kim 	struct pagevec pvec;
163752681375SJaegeuk Kim 	int step = 0;
163812bb0a8fSJaegeuk Kim 	int nwritten = 0;
16393f5f4959SChao Yu 	int ret = 0;
1640c29fd0c0SChao Yu 	int nr_pages, done = 0;
1641e05df3b1SJaegeuk Kim 
164286679820SMel Gorman 	pagevec_init(&pvec);
1643e05df3b1SJaegeuk Kim 
1644e05df3b1SJaegeuk Kim next_step:
1645e05df3b1SJaegeuk Kim 	index = 0;
1646e05df3b1SJaegeuk Kim 
1647c29fd0c0SChao Yu 	while (!done && (nr_pages = pagevec_lookup_tag(&pvec,
1648c29fd0c0SChao Yu 			NODE_MAPPING(sbi), &index, PAGECACHE_TAG_DIRTY))) {
1649028a63a6SJan Kara 		int i;
1650e05df3b1SJaegeuk Kim 
1651e05df3b1SJaegeuk Kim 		for (i = 0; i < nr_pages; i++) {
1652e05df3b1SJaegeuk Kim 			struct page *page = pvec.pages[i];
1653d68f735bSJaegeuk Kim 			bool submitted = false;
1654e05df3b1SJaegeuk Kim 
1655c29fd0c0SChao Yu 			/* give a priority to WB_SYNC threads */
1656c29fd0c0SChao Yu 			if (atomic_read(&sbi->wb_sync_req[NODE]) &&
1657c29fd0c0SChao Yu 					wbc->sync_mode == WB_SYNC_NONE) {
1658c29fd0c0SChao Yu 				done = 1;
1659c29fd0c0SChao Yu 				break;
1660c29fd0c0SChao Yu 			}
1661c29fd0c0SChao Yu 
1662e05df3b1SJaegeuk Kim 			/*
1663e05df3b1SJaegeuk Kim 			 * flushing sequence with step:
1664e05df3b1SJaegeuk Kim 			 * 0. indirect nodes
1665e05df3b1SJaegeuk Kim 			 * 1. dentry dnodes
1666e05df3b1SJaegeuk Kim 			 * 2. file dnodes
1667e05df3b1SJaegeuk Kim 			 */
1668e05df3b1SJaegeuk Kim 			if (step == 0 && IS_DNODE(page))
1669e05df3b1SJaegeuk Kim 				continue;
1670e05df3b1SJaegeuk Kim 			if (step == 1 && (!IS_DNODE(page) ||
1671e05df3b1SJaegeuk Kim 						is_cold_node(page)))
1672e05df3b1SJaegeuk Kim 				continue;
1673e05df3b1SJaegeuk Kim 			if (step == 2 && (!IS_DNODE(page) ||
1674e05df3b1SJaegeuk Kim 						!is_cold_node(page)))
1675e05df3b1SJaegeuk Kim 				continue;
16769a4cbc9eSChao Yu lock_node:
16774b270a8cSChao Yu 			if (wbc->sync_mode == WB_SYNC_ALL)
16784b270a8cSChao Yu 				lock_page(page);
16794b270a8cSChao Yu 			else if (!trylock_page(page))
1680e05df3b1SJaegeuk Kim 				continue;
1681e05df3b1SJaegeuk Kim 
16824ef51a8fSJaegeuk Kim 			if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1683e05df3b1SJaegeuk Kim continue_unlock:
1684e05df3b1SJaegeuk Kim 				unlock_page(page);
1685e05df3b1SJaegeuk Kim 				continue;
1686e05df3b1SJaegeuk Kim 			}
1687e05df3b1SJaegeuk Kim 
1688e05df3b1SJaegeuk Kim 			if (!PageDirty(page)) {
1689e05df3b1SJaegeuk Kim 				/* someone wrote it for us */
1690e05df3b1SJaegeuk Kim 				goto continue_unlock;
1691e05df3b1SJaegeuk Kim 			}
1692e05df3b1SJaegeuk Kim 
16932049d4fcSJaegeuk Kim 			/* flush inline_data */
169452681375SJaegeuk Kim 			if (is_inline_node(page)) {
16952049d4fcSJaegeuk Kim 				clear_inline_node(page);
16962049d4fcSJaegeuk Kim 				unlock_page(page);
16972049d4fcSJaegeuk Kim 				flush_inline_data(sbi, ino_of_node(page));
16989a4cbc9eSChao Yu 				goto lock_node;
16992049d4fcSJaegeuk Kim 			}
17002049d4fcSJaegeuk Kim 
1701fa3d2bdfSJaegeuk Kim 			f2fs_wait_on_page_writeback(page, NODE, true);
1702fa3d2bdfSJaegeuk Kim 
1703fa3d2bdfSJaegeuk Kim 			BUG_ON(PageWriteback(page));
1704e05df3b1SJaegeuk Kim 			if (!clear_page_dirty_for_io(page))
1705e05df3b1SJaegeuk Kim 				goto continue_unlock;
1706e05df3b1SJaegeuk Kim 
1707e05df3b1SJaegeuk Kim 			set_fsync_mark(page, 0);
1708e05df3b1SJaegeuk Kim 			set_dentry_mark(page, 0);
170952746519SJaegeuk Kim 
1710401db79fSYunlong Song 			ret = __write_node_page(page, false, &submitted,
1711b0af6d49SChao Yu 						wbc, do_balance, io_type);
1712d68f735bSJaegeuk Kim 			if (ret)
171352746519SJaegeuk Kim 				unlock_page(page);
1714d68f735bSJaegeuk Kim 			else if (submitted)
17153f5f4959SChao Yu 				nwritten++;
1716e05df3b1SJaegeuk Kim 
1717e05df3b1SJaegeuk Kim 			if (--wbc->nr_to_write == 0)
1718e05df3b1SJaegeuk Kim 				break;
1719e05df3b1SJaegeuk Kim 		}
1720e05df3b1SJaegeuk Kim 		pagevec_release(&pvec);
1721e05df3b1SJaegeuk Kim 		cond_resched();
1722e05df3b1SJaegeuk Kim 
1723e05df3b1SJaegeuk Kim 		if (wbc->nr_to_write == 0) {
1724e05df3b1SJaegeuk Kim 			step = 2;
1725e05df3b1SJaegeuk Kim 			break;
1726e05df3b1SJaegeuk Kim 		}
1727e05df3b1SJaegeuk Kim 	}
1728e05df3b1SJaegeuk Kim 
1729e05df3b1SJaegeuk Kim 	if (step < 2) {
1730e05df3b1SJaegeuk Kim 		step++;
1731e05df3b1SJaegeuk Kim 		goto next_step;
1732e05df3b1SJaegeuk Kim 	}
1733db198ae0SChao Yu 
17343f5f4959SChao Yu 	if (nwritten)
1735b9109b0eSJaegeuk Kim 		f2fs_submit_merged_write(sbi, NODE);
1736db198ae0SChao Yu 
1737db198ae0SChao Yu 	if (unlikely(f2fs_cp_error(sbi)))
1738db198ae0SChao Yu 		return -EIO;
17393f5f4959SChao Yu 	return ret;
1740e05df3b1SJaegeuk Kim }
1741e05df3b1SJaegeuk Kim 
17424d57b86dSChao Yu int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, nid_t ino)
1743cfe58f9dSJaegeuk Kim {
1744028a63a6SJan Kara 	pgoff_t index = 0;
1745cfe58f9dSJaegeuk Kim 	struct pagevec pvec;
1746280db3c8SMiklos Szeredi 	int ret2, ret = 0;
1747028a63a6SJan Kara 	int nr_pages;
1748cfe58f9dSJaegeuk Kim 
174986679820SMel Gorman 	pagevec_init(&pvec);
17504ef51a8fSJaegeuk Kim 
1751028a63a6SJan Kara 	while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
175267fd707fSJan Kara 				PAGECACHE_TAG_WRITEBACK))) {
1753028a63a6SJan Kara 		int i;
1754cfe58f9dSJaegeuk Kim 
1755cfe58f9dSJaegeuk Kim 		for (i = 0; i < nr_pages; i++) {
1756cfe58f9dSJaegeuk Kim 			struct page *page = pvec.pages[i];
1757cfe58f9dSJaegeuk Kim 
17584bf08ff6SChao Yu 			if (ino && ino_of_node(page) == ino) {
1759fec1d657SJaegeuk Kim 				f2fs_wait_on_page_writeback(page, NODE, true);
1760cfe58f9dSJaegeuk Kim 				if (TestClearPageError(page))
1761cfe58f9dSJaegeuk Kim 					ret = -EIO;
1762cfe58f9dSJaegeuk Kim 			}
17634bf08ff6SChao Yu 		}
1764cfe58f9dSJaegeuk Kim 		pagevec_release(&pvec);
1765cfe58f9dSJaegeuk Kim 		cond_resched();
1766cfe58f9dSJaegeuk Kim 	}
1767cfe58f9dSJaegeuk Kim 
1768280db3c8SMiklos Szeredi 	ret2 = filemap_check_errors(NODE_MAPPING(sbi));
1769cfe58f9dSJaegeuk Kim 	if (!ret)
1770cfe58f9dSJaegeuk Kim 		ret = ret2;
1771cfe58f9dSJaegeuk Kim 	return ret;
1772cfe58f9dSJaegeuk Kim }
1773cfe58f9dSJaegeuk Kim 
1774e05df3b1SJaegeuk Kim static int f2fs_write_node_pages(struct address_space *mapping,
1775e05df3b1SJaegeuk Kim 			    struct writeback_control *wbc)
1776e05df3b1SJaegeuk Kim {
17774081363fSJaegeuk Kim 	struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
17789dfa1bafSJaegeuk Kim 	struct blk_plug plug;
177950c8cdb3SJaegeuk Kim 	long diff;
1780e05df3b1SJaegeuk Kim 
17810771fcc7SChao Yu 	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
17820771fcc7SChao Yu 		goto skip_write;
17830771fcc7SChao Yu 
17844660f9c0SJaegeuk Kim 	/* balancing f2fs's metadata in background */
17854660f9c0SJaegeuk Kim 	f2fs_balance_fs_bg(sbi);
1786e05df3b1SJaegeuk Kim 
1787a7fdffbdSJaegeuk Kim 	/* collect a number of dirty node pages and write together */
178887d6f890SJaegeuk Kim 	if (get_pages(sbi, F2FS_DIRTY_NODES) < nr_pages_to_skip(sbi, NODE))
1789d3baf95dSJaegeuk Kim 		goto skip_write;
1790a7fdffbdSJaegeuk Kim 
1791c29fd0c0SChao Yu 	if (wbc->sync_mode == WB_SYNC_ALL)
1792c29fd0c0SChao Yu 		atomic_inc(&sbi->wb_sync_req[NODE]);
1793c29fd0c0SChao Yu 	else if (atomic_read(&sbi->wb_sync_req[NODE]))
1794c29fd0c0SChao Yu 		goto skip_write;
1795c29fd0c0SChao Yu 
1796d31c7c3fSYunlei He 	trace_f2fs_writepages(mapping->host, wbc, NODE);
1797d31c7c3fSYunlei He 
179850c8cdb3SJaegeuk Kim 	diff = nr_pages_to_write(sbi, NODE, wbc);
17999dfa1bafSJaegeuk Kim 	blk_start_plug(&plug);
18004d57b86dSChao Yu 	f2fs_sync_node_pages(sbi, wbc, true, FS_NODE_IO);
18019dfa1bafSJaegeuk Kim 	blk_finish_plug(&plug);
180250c8cdb3SJaegeuk Kim 	wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
1803c29fd0c0SChao Yu 
1804c29fd0c0SChao Yu 	if (wbc->sync_mode == WB_SYNC_ALL)
1805c29fd0c0SChao Yu 		atomic_dec(&sbi->wb_sync_req[NODE]);
1806e05df3b1SJaegeuk Kim 	return 0;
1807d3baf95dSJaegeuk Kim 
1808d3baf95dSJaegeuk Kim skip_write:
1809d3baf95dSJaegeuk Kim 	wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_NODES);
1810d31c7c3fSYunlei He 	trace_f2fs_writepages(mapping->host, wbc, NODE);
1811d3baf95dSJaegeuk Kim 	return 0;
1812e05df3b1SJaegeuk Kim }
1813e05df3b1SJaegeuk Kim 
1814e05df3b1SJaegeuk Kim static int f2fs_set_node_page_dirty(struct page *page)
1815e05df3b1SJaegeuk Kim {
181626c6b887SJaegeuk Kim 	trace_f2fs_set_page_dirty(page, NODE);
181726c6b887SJaegeuk Kim 
1818237c0790SJaegeuk Kim 	if (!PageUptodate(page))
1819e05df3b1SJaegeuk Kim 		SetPageUptodate(page);
182054c55c4eSWeichao Guo #ifdef CONFIG_F2FS_CHECK_FS
182154c55c4eSWeichao Guo 	if (IS_INODE(page))
182254c55c4eSWeichao Guo 		f2fs_inode_chksum_set(F2FS_P_SB(page), page);
182354c55c4eSWeichao Guo #endif
1824e05df3b1SJaegeuk Kim 	if (!PageDirty(page)) {
1825b87078adSJaegeuk Kim 		__set_page_dirty_nobuffers(page);
18264081363fSJaegeuk Kim 		inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES);
1827e05df3b1SJaegeuk Kim 		SetPagePrivate(page);
18289e4ded3fSJaegeuk Kim 		f2fs_trace_pid(page);
1829e05df3b1SJaegeuk Kim 		return 1;
1830e05df3b1SJaegeuk Kim 	}
1831e05df3b1SJaegeuk Kim 	return 0;
1832e05df3b1SJaegeuk Kim }
1833e05df3b1SJaegeuk Kim 
18340a8165d7SJaegeuk Kim /*
1835e05df3b1SJaegeuk Kim  * Structure of the f2fs node operations
1836e05df3b1SJaegeuk Kim  */
1837e05df3b1SJaegeuk Kim const struct address_space_operations f2fs_node_aops = {
1838e05df3b1SJaegeuk Kim 	.writepage	= f2fs_write_node_page,
1839e05df3b1SJaegeuk Kim 	.writepages	= f2fs_write_node_pages,
1840e05df3b1SJaegeuk Kim 	.set_page_dirty	= f2fs_set_node_page_dirty,
1841487261f3SChao Yu 	.invalidatepage	= f2fs_invalidate_page,
1842487261f3SChao Yu 	.releasepage	= f2fs_release_page,
18435b7a487cSWeichao Guo #ifdef CONFIG_MIGRATION
18445b7a487cSWeichao Guo 	.migratepage    = f2fs_migrate_page,
18455b7a487cSWeichao Guo #endif
1846e05df3b1SJaegeuk Kim };
1847e05df3b1SJaegeuk Kim 
18488a7ed66aSJaegeuk Kim static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i,
18498a7ed66aSJaegeuk Kim 						nid_t n)
1850e05df3b1SJaegeuk Kim {
18518a7ed66aSJaegeuk Kim 	return radix_tree_lookup(&nm_i->free_nid_root, n);
18523aa770a9SNamjae Jeon }
1853e05df3b1SJaegeuk Kim 
18549a4ffdf5SChao Yu static int __insert_free_nid(struct f2fs_sb_info *sbi,
1855a0761f63SFan Li 			struct free_nid *i, enum nid_state state)
1856e05df3b1SJaegeuk Kim {
1857b8559dc2SChao Yu 	struct f2fs_nm_info *nm_i = NM_I(sbi);
1858b8559dc2SChao Yu 
1859eb0aa4b8SJaegeuk Kim 	int err = radix_tree_insert(&nm_i->free_nid_root, i->nid, i);
1860eb0aa4b8SJaegeuk Kim 	if (err)
1861eb0aa4b8SJaegeuk Kim 		return err;
1862eb0aa4b8SJaegeuk Kim 
18639a4ffdf5SChao Yu 	f2fs_bug_on(sbi, state != i->state);
18649a4ffdf5SChao Yu 	nm_i->nid_cnt[state]++;
18659a4ffdf5SChao Yu 	if (state == FREE_NID)
18669a4ffdf5SChao Yu 		list_add_tail(&i->list, &nm_i->free_nid_list);
1867eb0aa4b8SJaegeuk Kim 	return 0;
1868b8559dc2SChao Yu }
1869b8559dc2SChao Yu 
18709a4ffdf5SChao Yu static void __remove_free_nid(struct f2fs_sb_info *sbi,
1871a0761f63SFan Li 			struct free_nid *i, enum nid_state state)
1872b8559dc2SChao Yu {
1873b8559dc2SChao Yu 	struct f2fs_nm_info *nm_i = NM_I(sbi);
1874b8559dc2SChao Yu 
18759a4ffdf5SChao Yu 	f2fs_bug_on(sbi, state != i->state);
18769a4ffdf5SChao Yu 	nm_i->nid_cnt[state]--;
18779a4ffdf5SChao Yu 	if (state == FREE_NID)
1878e05df3b1SJaegeuk Kim 		list_del(&i->list);
18798a7ed66aSJaegeuk Kim 	radix_tree_delete(&nm_i->free_nid_root, i->nid);
1880e05df3b1SJaegeuk Kim }
1881e05df3b1SJaegeuk Kim 
1882a0761f63SFan Li static void __move_free_nid(struct f2fs_sb_info *sbi, struct free_nid *i,
1883a0761f63SFan Li 			enum nid_state org_state, enum nid_state dst_state)
1884a0761f63SFan Li {
1885a0761f63SFan Li 	struct f2fs_nm_info *nm_i = NM_I(sbi);
1886a0761f63SFan Li 
1887a0761f63SFan Li 	f2fs_bug_on(sbi, org_state != i->state);
1888a0761f63SFan Li 	i->state = dst_state;
1889a0761f63SFan Li 	nm_i->nid_cnt[org_state]--;
1890a0761f63SFan Li 	nm_i->nid_cnt[dst_state]++;
1891a0761f63SFan Li 
1892a0761f63SFan Li 	switch (dst_state) {
1893a0761f63SFan Li 	case PREALLOC_NID:
1894a0761f63SFan Li 		list_del(&i->list);
1895a0761f63SFan Li 		break;
1896a0761f63SFan Li 	case FREE_NID:
1897a0761f63SFan Li 		list_add_tail(&i->list, &nm_i->free_nid_list);
1898a0761f63SFan Li 		break;
1899a0761f63SFan Li 	default:
1900a0761f63SFan Li 		BUG_ON(1);
1901a0761f63SFan Li 	}
1902a0761f63SFan Li }
1903a0761f63SFan Li 
19045921aaa1SLiFan static void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid,
19055921aaa1SLiFan 							bool set, bool build)
19065921aaa1SLiFan {
19075921aaa1SLiFan 	struct f2fs_nm_info *nm_i = NM_I(sbi);
19085921aaa1SLiFan 	unsigned int nat_ofs = NAT_BLOCK_OFFSET(nid);
19095921aaa1SLiFan 	unsigned int nid_ofs = nid - START_NID(nid);
19105921aaa1SLiFan 
19115921aaa1SLiFan 	if (!test_bit_le(nat_ofs, nm_i->nat_block_bitmap))
19125921aaa1SLiFan 		return;
19135921aaa1SLiFan 
19145921aaa1SLiFan 	if (set) {
19155921aaa1SLiFan 		if (test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]))
19165921aaa1SLiFan 			return;
19175921aaa1SLiFan 		__set_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
19185921aaa1SLiFan 		nm_i->free_nid_count[nat_ofs]++;
19195921aaa1SLiFan 	} else {
19205921aaa1SLiFan 		if (!test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]))
19215921aaa1SLiFan 			return;
19225921aaa1SLiFan 		__clear_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
19235921aaa1SLiFan 		if (!build)
19245921aaa1SLiFan 			nm_i->free_nid_count[nat_ofs]--;
19255921aaa1SLiFan 	}
19265921aaa1SLiFan }
19275921aaa1SLiFan 
19284ac91242SChao Yu /* return if the nid is recognized as free */
19295921aaa1SLiFan static bool add_free_nid(struct f2fs_sb_info *sbi,
19305921aaa1SLiFan 				nid_t nid, bool build, bool update)
1931e05df3b1SJaegeuk Kim {
19326fb03f3aSJaegeuk Kim 	struct f2fs_nm_info *nm_i = NM_I(sbi);
193330a61ddfSChao Yu 	struct free_nid *i, *e;
193459bbd474SJaegeuk Kim 	struct nat_entry *ne;
193530a61ddfSChao Yu 	int err = -EINVAL;
193630a61ddfSChao Yu 	bool ret = false;
19379198acebSJaegeuk Kim 
19389198acebSJaegeuk Kim 	/* 0 nid should not be used */
1939cfb271d4SChao Yu 	if (unlikely(nid == 0))
19404ac91242SChao Yu 		return false;
194159bbd474SJaegeuk Kim 
19427bd59381SGu Zheng 	i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS);
1943e05df3b1SJaegeuk Kim 	i->nid = nid;
19449a4ffdf5SChao Yu 	i->state = FREE_NID;
1945e05df3b1SJaegeuk Kim 
19465921aaa1SLiFan 	radix_tree_preload(GFP_NOFS | __GFP_NOFAIL);
1947769ec6e5SJaegeuk Kim 
1948b8559dc2SChao Yu 	spin_lock(&nm_i->nid_list_lock);
194930a61ddfSChao Yu 
195030a61ddfSChao Yu 	if (build) {
195130a61ddfSChao Yu 		/*
195230a61ddfSChao Yu 		 *   Thread A             Thread B
195330a61ddfSChao Yu 		 *  - f2fs_create
195430a61ddfSChao Yu 		 *   - f2fs_new_inode
19554d57b86dSChao Yu 		 *    - f2fs_alloc_nid
19569a4ffdf5SChao Yu 		 *     - __insert_nid_to_list(PREALLOC_NID)
195730a61ddfSChao Yu 		 *                     - f2fs_balance_fs_bg
19584d57b86dSChao Yu 		 *                      - f2fs_build_free_nids
19594d57b86dSChao Yu 		 *                       - __f2fs_build_free_nids
196030a61ddfSChao Yu 		 *                        - scan_nat_page
196130a61ddfSChao Yu 		 *                         - add_free_nid
196230a61ddfSChao Yu 		 *                          - __lookup_nat_cache
196330a61ddfSChao Yu 		 *  - f2fs_add_link
19644d57b86dSChao Yu 		 *   - f2fs_init_inode_metadata
19654d57b86dSChao Yu 		 *    - f2fs_new_inode_page
19664d57b86dSChao Yu 		 *     - f2fs_new_node_page
196730a61ddfSChao Yu 		 *      - set_node_addr
19684d57b86dSChao Yu 		 *  - f2fs_alloc_nid_done
19699a4ffdf5SChao Yu 		 *   - __remove_nid_from_list(PREALLOC_NID)
19709a4ffdf5SChao Yu 		 *                         - __insert_nid_to_list(FREE_NID)
197130a61ddfSChao Yu 		 */
197230a61ddfSChao Yu 		ne = __lookup_nat_cache(nm_i, nid);
197330a61ddfSChao Yu 		if (ne && (!get_nat_flag(ne, IS_CHECKPOINTED) ||
197430a61ddfSChao Yu 				nat_get_blkaddr(ne) != NULL_ADDR))
197530a61ddfSChao Yu 			goto err_out;
197630a61ddfSChao Yu 
197730a61ddfSChao Yu 		e = __lookup_free_nid_list(nm_i, nid);
197830a61ddfSChao Yu 		if (e) {
19799a4ffdf5SChao Yu 			if (e->state == FREE_NID)
198030a61ddfSChao Yu 				ret = true;
198130a61ddfSChao Yu 			goto err_out;
198230a61ddfSChao Yu 		}
198330a61ddfSChao Yu 	}
198430a61ddfSChao Yu 	ret = true;
1985a0761f63SFan Li 	err = __insert_free_nid(sbi, i, FREE_NID);
198630a61ddfSChao Yu err_out:
19875921aaa1SLiFan 	if (update) {
19885921aaa1SLiFan 		update_free_nid_bitmap(sbi, nid, ret, build);
19895921aaa1SLiFan 		if (!build)
19905921aaa1SLiFan 			nm_i->available_nids++;
19915921aaa1SLiFan 	}
1992b8559dc2SChao Yu 	spin_unlock(&nm_i->nid_list_lock);
1993769ec6e5SJaegeuk Kim 	radix_tree_preload_end();
19945921aaa1SLiFan 
199530a61ddfSChao Yu 	if (err)
1996e05df3b1SJaegeuk Kim 		kmem_cache_free(free_nid_slab, i);
199730a61ddfSChao Yu 	return ret;
1998e05df3b1SJaegeuk Kim }
1999e05df3b1SJaegeuk Kim 
2000b8559dc2SChao Yu static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid)
2001e05df3b1SJaegeuk Kim {
2002b8559dc2SChao Yu 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2003e05df3b1SJaegeuk Kim 	struct free_nid *i;
2004cf0ee0f0SChao Yu 	bool need_free = false;
2005cf0ee0f0SChao Yu 
2006b8559dc2SChao Yu 	spin_lock(&nm_i->nid_list_lock);
20078a7ed66aSJaegeuk Kim 	i = __lookup_free_nid_list(nm_i, nid);
20089a4ffdf5SChao Yu 	if (i && i->state == FREE_NID) {
2009a0761f63SFan Li 		__remove_free_nid(sbi, i, FREE_NID);
2010cf0ee0f0SChao Yu 		need_free = true;
2011e05df3b1SJaegeuk Kim 	}
2012b8559dc2SChao Yu 	spin_unlock(&nm_i->nid_list_lock);
2013cf0ee0f0SChao Yu 
2014cf0ee0f0SChao Yu 	if (need_free)
2015cf0ee0f0SChao Yu 		kmem_cache_free(free_nid_slab, i);
2016e05df3b1SJaegeuk Kim }
2017e05df3b1SJaegeuk Kim 
2018e2374015SChao Yu static int scan_nat_page(struct f2fs_sb_info *sbi,
2019e05df3b1SJaegeuk Kim 			struct page *nat_page, nid_t start_nid)
2020e05df3b1SJaegeuk Kim {
20216fb03f3aSJaegeuk Kim 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2022e05df3b1SJaegeuk Kim 	struct f2fs_nat_block *nat_blk = page_address(nat_page);
2023e05df3b1SJaegeuk Kim 	block_t blk_addr;
20244ac91242SChao Yu 	unsigned int nat_ofs = NAT_BLOCK_OFFSET(start_nid);
2025e05df3b1SJaegeuk Kim 	int i;
2026e05df3b1SJaegeuk Kim 
202723380b85SJaegeuk Kim 	__set_bit_le(nat_ofs, nm_i->nat_block_bitmap);
20284ac91242SChao Yu 
2029e05df3b1SJaegeuk Kim 	i = start_nid % NAT_ENTRY_PER_BLOCK;
2030e05df3b1SJaegeuk Kim 
2031e05df3b1SJaegeuk Kim 	for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) {
2032cfb271d4SChao Yu 		if (unlikely(start_nid >= nm_i->max_nid))
203304431c44SJaegeuk Kim 			break;
203423d38844SHaicheng Li 
2035e05df3b1SJaegeuk Kim 		blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
2036e2374015SChao Yu 
2037e2374015SChao Yu 		if (blk_addr == NEW_ADDR)
2038e2374015SChao Yu 			return -EINVAL;
2039e2374015SChao Yu 
20405921aaa1SLiFan 		if (blk_addr == NULL_ADDR) {
20415921aaa1SLiFan 			add_free_nid(sbi, start_nid, true, true);
20425921aaa1SLiFan 		} else {
2043346fe752SChao Yu 			spin_lock(&NM_I(sbi)->nid_list_lock);
20445921aaa1SLiFan 			update_free_nid_bitmap(sbi, start_nid, false, true);
2045346fe752SChao Yu 			spin_unlock(&NM_I(sbi)->nid_list_lock);
2046e05df3b1SJaegeuk Kim 		}
2047e05df3b1SJaegeuk Kim 	}
2048e2374015SChao Yu 
2049e2374015SChao Yu 	return 0;
20505921aaa1SLiFan }
2051e05df3b1SJaegeuk Kim 
20522fbaa25fSChao Yu static void scan_curseg_cache(struct f2fs_sb_info *sbi)
20534ac91242SChao Yu {
20544ac91242SChao Yu 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
20554ac91242SChao Yu 	struct f2fs_journal *journal = curseg->journal;
20562fbaa25fSChao Yu 	int i;
20574ac91242SChao Yu 
20584ac91242SChao Yu 	down_read(&curseg->journal_rwsem);
20594ac91242SChao Yu 	for (i = 0; i < nats_in_cursum(journal); i++) {
20604ac91242SChao Yu 		block_t addr;
20614ac91242SChao Yu 		nid_t nid;
20624ac91242SChao Yu 
20634ac91242SChao Yu 		addr = le32_to_cpu(nat_in_journal(journal, i).block_addr);
20644ac91242SChao Yu 		nid = le32_to_cpu(nid_in_journal(journal, i));
20654ac91242SChao Yu 		if (addr == NULL_ADDR)
20665921aaa1SLiFan 			add_free_nid(sbi, nid, true, false);
20674ac91242SChao Yu 		else
20684ac91242SChao Yu 			remove_free_nid(sbi, nid);
20694ac91242SChao Yu 	}
20704ac91242SChao Yu 	up_read(&curseg->journal_rwsem);
20712fbaa25fSChao Yu }
20722fbaa25fSChao Yu 
2073e05df3b1SJaegeuk Kim static void scan_free_nid_bits(struct f2fs_sb_info *sbi)
20744ac91242SChao Yu {
20754ac91242SChao Yu 	struct f2fs_nm_info *nm_i = NM_I(sbi);
20764ac91242SChao Yu 	unsigned int i, idx;
207797456574SFan Li 	nid_t nid;
20784ac91242SChao Yu 
20794ac91242SChao Yu 	down_read(&nm_i->nat_tree_lock);
20804ac91242SChao Yu 
20814ac91242SChao Yu 	for (i = 0; i < nm_i->nat_blocks; i++) {
20824ac91242SChao Yu 		if (!test_bit_le(i, nm_i->nat_block_bitmap))
20834ac91242SChao Yu 			continue;
20844ac91242SChao Yu 		if (!nm_i->free_nid_count[i])
20854ac91242SChao Yu 			continue;
20864ac91242SChao Yu 		for (idx = 0; idx < NAT_ENTRY_PER_BLOCK; idx++) {
208797456574SFan Li 			idx = find_next_bit_le(nm_i->free_nid_bitmap[i],
208897456574SFan Li 						NAT_ENTRY_PER_BLOCK, idx);
208997456574SFan Li 			if (idx >= NAT_ENTRY_PER_BLOCK)
209097456574SFan Li 				break;
20914ac91242SChao Yu 
20924ac91242SChao Yu 			nid = i * NAT_ENTRY_PER_BLOCK + idx;
20935921aaa1SLiFan 			add_free_nid(sbi, nid, true, false);
20944ac91242SChao Yu 
20959a4ffdf5SChao Yu 			if (nm_i->nid_cnt[FREE_NID] >= MAX_FREE_NIDS)
20964ac91242SChao Yu 				goto out;
20974ac91242SChao Yu 		}
20984ac91242SChao Yu 	}
20994ac91242SChao Yu out:
21002fbaa25fSChao Yu 	scan_curseg_cache(sbi);
21014ac91242SChao Yu 
21024ac91242SChao Yu 	up_read(&nm_i->nat_tree_lock);
21034ac91242SChao Yu }
21044ac91242SChao Yu 
2105e2374015SChao Yu static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
21064d57b86dSChao Yu 						bool sync, bool mount)
2107e05df3b1SJaegeuk Kim {
2108e05df3b1SJaegeuk Kim 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2109e2374015SChao Yu 	int i = 0, ret;
211055008d84SJaegeuk Kim 	nid_t nid = nm_i->next_scan_nid;
2111e05df3b1SJaegeuk Kim 
2112e9cdd307SYunlei He 	if (unlikely(nid >= nm_i->max_nid))
2113e9cdd307SYunlei He 		nid = 0;
2114e9cdd307SYunlei He 
211555008d84SJaegeuk Kim 	/* Enough entries */
21169a4ffdf5SChao Yu 	if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
2117e2374015SChao Yu 		return 0;
2118e05df3b1SJaegeuk Kim 
21194d57b86dSChao Yu 	if (!sync && !f2fs_available_free_memory(sbi, FREE_NIDS))
2120e2374015SChao Yu 		return 0;
2121e05df3b1SJaegeuk Kim 
21224ac91242SChao Yu 	if (!mount) {
21234ac91242SChao Yu 		/* try to find free nids in free_nid_bitmap */
21244ac91242SChao Yu 		scan_free_nid_bits(sbi);
21254ac91242SChao Yu 
212674986213SFan Li 		if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
2127e2374015SChao Yu 			return 0;
212822ad0b6aSJaegeuk Kim 	}
212922ad0b6aSJaegeuk Kim 
213055008d84SJaegeuk Kim 	/* readahead nat pages to be scanned */
21314d57b86dSChao Yu 	f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES,
213226879fb1SChao Yu 							META_NAT, true);
2133e05df3b1SJaegeuk Kim 
2134b873b798SJaegeuk Kim 	down_read(&nm_i->nat_tree_lock);
2135a5131193SJaegeuk Kim 
2136e05df3b1SJaegeuk Kim 	while (1) {
213766e83361SYunlei He 		if (!test_bit_le(NAT_BLOCK_OFFSET(nid),
213866e83361SYunlei He 						nm_i->nat_block_bitmap)) {
2139e05df3b1SJaegeuk Kim 			struct page *page = get_current_nat_page(sbi, nid);
2140e05df3b1SJaegeuk Kim 
2141e2374015SChao Yu 			ret = scan_nat_page(sbi, page, nid);
2142e05df3b1SJaegeuk Kim 			f2fs_put_page(page, 1);
2143e2374015SChao Yu 
2144e2374015SChao Yu 			if (ret) {
2145e2374015SChao Yu 				up_read(&nm_i->nat_tree_lock);
2146e2374015SChao Yu 				f2fs_bug_on(sbi, !mount);
2147e2374015SChao Yu 				f2fs_msg(sbi->sb, KERN_ERR,
2148e2374015SChao Yu 					"NAT is corrupt, run fsck to fix it");
2149e2374015SChao Yu 				return -EINVAL;
2150e2374015SChao Yu 			}
215166e83361SYunlei He 		}
2152e05df3b1SJaegeuk Kim 
2153e05df3b1SJaegeuk Kim 		nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK));
2154cfb271d4SChao Yu 		if (unlikely(nid >= nm_i->max_nid))
2155e05df3b1SJaegeuk Kim 			nid = 0;
215655008d84SJaegeuk Kim 
2157a6d494b6SChao Yu 		if (++i >= FREE_NID_PAGES)
2158e05df3b1SJaegeuk Kim 			break;
2159e05df3b1SJaegeuk Kim 	}
2160e05df3b1SJaegeuk Kim 
216155008d84SJaegeuk Kim 	/* go to the next free nat pages to find free nids abundantly */
216255008d84SJaegeuk Kim 	nm_i->next_scan_nid = nid;
2163e05df3b1SJaegeuk Kim 
2164e05df3b1SJaegeuk Kim 	/* find free nids from current sum_pages */
21652fbaa25fSChao Yu 	scan_curseg_cache(sbi);
2166dfc08a12SChao Yu 
2167b873b798SJaegeuk Kim 	up_read(&nm_i->nat_tree_lock);
21682db2388fSChao Yu 
21694d57b86dSChao Yu 	f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid),
2170ea1a29a0SChao Yu 					nm_i->ra_nid_pages, META_NAT, false);
2171e2374015SChao Yu 
2172e2374015SChao Yu 	return 0;
2173e05df3b1SJaegeuk Kim }
2174e05df3b1SJaegeuk Kim 
2175e2374015SChao Yu int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
21762411cf5bSChao Yu {
2177e2374015SChao Yu 	int ret;
2178e2374015SChao Yu 
21792411cf5bSChao Yu 	mutex_lock(&NM_I(sbi)->build_lock);
2180e2374015SChao Yu 	ret = __f2fs_build_free_nids(sbi, sync, mount);
21812411cf5bSChao Yu 	mutex_unlock(&NM_I(sbi)->build_lock);
2182e2374015SChao Yu 
2183e2374015SChao Yu 	return ret;
21842411cf5bSChao Yu }
21852411cf5bSChao Yu 
2186e05df3b1SJaegeuk Kim /*
2187e05df3b1SJaegeuk Kim  * If this function returns success, caller can obtain a new nid
2188e05df3b1SJaegeuk Kim  * from second parameter of this function.
2189e05df3b1SJaegeuk Kim  * The returned nid could be used ino as well as nid when inode is created.
2190e05df3b1SJaegeuk Kim  */
21914d57b86dSChao Yu bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
2192e05df3b1SJaegeuk Kim {
2193e05df3b1SJaegeuk Kim 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2194e05df3b1SJaegeuk Kim 	struct free_nid *i = NULL;
2195e05df3b1SJaegeuk Kim retry:
2196cb78942bSJaegeuk Kim #ifdef CONFIG_F2FS_FAULT_INJECTION
219755523519SChao Yu 	if (time_to_inject(sbi, FAULT_ALLOC_NID)) {
219855523519SChao Yu 		f2fs_show_injection_info(FAULT_ALLOC_NID);
2199cb78942bSJaegeuk Kim 		return false;
220055523519SChao Yu 	}
2201cb78942bSJaegeuk Kim #endif
2202b8559dc2SChao Yu 	spin_lock(&nm_i->nid_list_lock);
2203e05df3b1SJaegeuk Kim 
220404d47e67SChao Yu 	if (unlikely(nm_i->available_nids == 0)) {
220504d47e67SChao Yu 		spin_unlock(&nm_i->nid_list_lock);
220604d47e67SChao Yu 		return false;
220704d47e67SChao Yu 	}
2208e05df3b1SJaegeuk Kim 
22094d57b86dSChao Yu 	/* We should not use stale free nids created by f2fs_build_free_nids */
22104d57b86dSChao Yu 	if (nm_i->nid_cnt[FREE_NID] && !on_f2fs_build_free_nids(nm_i)) {
22119a4ffdf5SChao Yu 		f2fs_bug_on(sbi, list_empty(&nm_i->free_nid_list));
22129a4ffdf5SChao Yu 		i = list_first_entry(&nm_i->free_nid_list,
2213b8559dc2SChao Yu 					struct free_nid, list);
2214e05df3b1SJaegeuk Kim 		*nid = i->nid;
2215b8559dc2SChao Yu 
2216a0761f63SFan Li 		__move_free_nid(sbi, i, FREE_NID, PREALLOC_NID);
221704d47e67SChao Yu 		nm_i->available_nids--;
22184ac91242SChao Yu 
2219346fe752SChao Yu 		update_free_nid_bitmap(sbi, *nid, false, false);
22204ac91242SChao Yu 
2221b8559dc2SChao Yu 		spin_unlock(&nm_i->nid_list_lock);
2222e05df3b1SJaegeuk Kim 		return true;
2223e05df3b1SJaegeuk Kim 	}
2224b8559dc2SChao Yu 	spin_unlock(&nm_i->nid_list_lock);
222555008d84SJaegeuk Kim 
222655008d84SJaegeuk Kim 	/* Let's scan nat pages and its caches to get free nids */
22274d57b86dSChao Yu 	f2fs_build_free_nids(sbi, true, false);
222855008d84SJaegeuk Kim 	goto retry;
222955008d84SJaegeuk Kim }
2230e05df3b1SJaegeuk Kim 
22310a8165d7SJaegeuk Kim /*
22324d57b86dSChao Yu  * f2fs_alloc_nid() should be called prior to this function.
2233e05df3b1SJaegeuk Kim  */
22344d57b86dSChao Yu void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid)
2235e05df3b1SJaegeuk Kim {
2236e05df3b1SJaegeuk Kim 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2237e05df3b1SJaegeuk Kim 	struct free_nid *i;
2238e05df3b1SJaegeuk Kim 
2239b8559dc2SChao Yu 	spin_lock(&nm_i->nid_list_lock);
22408a7ed66aSJaegeuk Kim 	i = __lookup_free_nid_list(nm_i, nid);
2241b8559dc2SChao Yu 	f2fs_bug_on(sbi, !i);
2242a0761f63SFan Li 	__remove_free_nid(sbi, i, PREALLOC_NID);
2243b8559dc2SChao Yu 	spin_unlock(&nm_i->nid_list_lock);
2244cf0ee0f0SChao Yu 
2245cf0ee0f0SChao Yu 	kmem_cache_free(free_nid_slab, i);
2246e05df3b1SJaegeuk Kim }
2247e05df3b1SJaegeuk Kim 
22480a8165d7SJaegeuk Kim /*
22494d57b86dSChao Yu  * f2fs_alloc_nid() should be called prior to this function.
2250e05df3b1SJaegeuk Kim  */
22514d57b86dSChao Yu void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
2252e05df3b1SJaegeuk Kim {
225349952fa1SJaegeuk Kim 	struct f2fs_nm_info *nm_i = NM_I(sbi);
225449952fa1SJaegeuk Kim 	struct free_nid *i;
2255cf0ee0f0SChao Yu 	bool need_free = false;
225649952fa1SJaegeuk Kim 
225765985d93SJaegeuk Kim 	if (!nid)
225865985d93SJaegeuk Kim 		return;
225965985d93SJaegeuk Kim 
2260b8559dc2SChao Yu 	spin_lock(&nm_i->nid_list_lock);
22618a7ed66aSJaegeuk Kim 	i = __lookup_free_nid_list(nm_i, nid);
2262b8559dc2SChao Yu 	f2fs_bug_on(sbi, !i);
2263b8559dc2SChao Yu 
22644d57b86dSChao Yu 	if (!f2fs_available_free_memory(sbi, FREE_NIDS)) {
2265a0761f63SFan Li 		__remove_free_nid(sbi, i, PREALLOC_NID);
2266cf0ee0f0SChao Yu 		need_free = true;
226795630cbaSHaicheng Li 	} else {
2268a0761f63SFan Li 		__move_free_nid(sbi, i, PREALLOC_NID, FREE_NID);
226995630cbaSHaicheng Li 	}
227004d47e67SChao Yu 
227104d47e67SChao Yu 	nm_i->available_nids++;
227204d47e67SChao Yu 
2273346fe752SChao Yu 	update_free_nid_bitmap(sbi, nid, true, false);
22744ac91242SChao Yu 
2275b8559dc2SChao Yu 	spin_unlock(&nm_i->nid_list_lock);
2276cf0ee0f0SChao Yu 
2277cf0ee0f0SChao Yu 	if (need_free)
2278cf0ee0f0SChao Yu 		kmem_cache_free(free_nid_slab, i);
2279e05df3b1SJaegeuk Kim }
2280e05df3b1SJaegeuk Kim 
22814d57b86dSChao Yu int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink)
228231696580SChao Yu {
228331696580SChao Yu 	struct f2fs_nm_info *nm_i = NM_I(sbi);
228431696580SChao Yu 	struct free_nid *i, *next;
228531696580SChao Yu 	int nr = nr_shrink;
228631696580SChao Yu 
22879a4ffdf5SChao Yu 	if (nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS)
2288ad4edb83SJaegeuk Kim 		return 0;
2289ad4edb83SJaegeuk Kim 
229031696580SChao Yu 	if (!mutex_trylock(&nm_i->build_lock))
229131696580SChao Yu 		return 0;
229231696580SChao Yu 
2293b8559dc2SChao Yu 	spin_lock(&nm_i->nid_list_lock);
22949a4ffdf5SChao Yu 	list_for_each_entry_safe(i, next, &nm_i->free_nid_list, list) {
2295b8559dc2SChao Yu 		if (nr_shrink <= 0 ||
22969a4ffdf5SChao Yu 				nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS)
229731696580SChao Yu 			break;
2298b8559dc2SChao Yu 
2299a0761f63SFan Li 		__remove_free_nid(sbi, i, FREE_NID);
230031696580SChao Yu 		kmem_cache_free(free_nid_slab, i);
230131696580SChao Yu 		nr_shrink--;
230231696580SChao Yu 	}
2303b8559dc2SChao Yu 	spin_unlock(&nm_i->nid_list_lock);
230431696580SChao Yu 	mutex_unlock(&nm_i->build_lock);
230531696580SChao Yu 
230631696580SChao Yu 	return nr - nr_shrink;
230731696580SChao Yu }
230831696580SChao Yu 
23094d57b86dSChao Yu void f2fs_recover_inline_xattr(struct inode *inode, struct page *page)
231028cdce04SChao Yu {
231128cdce04SChao Yu 	void *src_addr, *dst_addr;
231228cdce04SChao Yu 	size_t inline_size;
231328cdce04SChao Yu 	struct page *ipage;
231428cdce04SChao Yu 	struct f2fs_inode *ri;
231528cdce04SChao Yu 
23164d57b86dSChao Yu 	ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
23179850cf4aSJaegeuk Kim 	f2fs_bug_on(F2FS_I_SB(inode), IS_ERR(ipage));
231828cdce04SChao Yu 
2319e3b4d43fSJaegeuk Kim 	ri = F2FS_INODE(page);
23201eca05aaSYunlei He 	if (ri->i_inline & F2FS_INLINE_XATTR) {
23211eca05aaSYunlei He 		set_inode_flag(inode, FI_INLINE_XATTR);
23221eca05aaSYunlei He 	} else {
232391942321SJaegeuk Kim 		clear_inode_flag(inode, FI_INLINE_XATTR);
2324e3b4d43fSJaegeuk Kim 		goto update_inode;
2325e3b4d43fSJaegeuk Kim 	}
2326e3b4d43fSJaegeuk Kim 
23276afc662eSChao Yu 	dst_addr = inline_xattr_addr(inode, ipage);
23286afc662eSChao Yu 	src_addr = inline_xattr_addr(inode, page);
232928cdce04SChao Yu 	inline_size = inline_xattr_size(inode);
233028cdce04SChao Yu 
2331fec1d657SJaegeuk Kim 	f2fs_wait_on_page_writeback(ipage, NODE, true);
233228cdce04SChao Yu 	memcpy(dst_addr, src_addr, inline_size);
2333e3b4d43fSJaegeuk Kim update_inode:
23344d57b86dSChao Yu 	f2fs_update_inode(inode, ipage);
233528cdce04SChao Yu 	f2fs_put_page(ipage, 1);
233628cdce04SChao Yu }
233728cdce04SChao Yu 
23384d57b86dSChao Yu int f2fs_recover_xattr_data(struct inode *inode, struct page *page)
2339abb2366cSJaegeuk Kim {
23404081363fSJaegeuk Kim 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2341abb2366cSJaegeuk Kim 	nid_t prev_xnid = F2FS_I(inode)->i_xattr_nid;
234287905682SYunlei He 	nid_t new_xnid;
234387905682SYunlei He 	struct dnode_of_data dn;
2344abb2366cSJaegeuk Kim 	struct node_info ni;
2345d260081cSChao Yu 	struct page *xpage;
23467735730dSChao Yu 	int err;
2347abb2366cSJaegeuk Kim 
2348abb2366cSJaegeuk Kim 	if (!prev_xnid)
2349abb2366cSJaegeuk Kim 		goto recover_xnid;
2350abb2366cSJaegeuk Kim 
2351d260081cSChao Yu 	/* 1: invalidate the previous xattr nid */
23527735730dSChao Yu 	err = f2fs_get_node_info(sbi, prev_xnid, &ni);
23537735730dSChao Yu 	if (err)
23547735730dSChao Yu 		return err;
23557735730dSChao Yu 
23564d57b86dSChao Yu 	f2fs_invalidate_blocks(sbi, ni.blk_addr);
2357000519f2SChao Yu 	dec_valid_node_count(sbi, inode, false);
2358479f40c4SJaegeuk Kim 	set_node_addr(sbi, &ni, NULL_ADDR, false);
2359abb2366cSJaegeuk Kim 
2360abb2366cSJaegeuk Kim recover_xnid:
2361d260081cSChao Yu 	/* 2: update xattr nid in inode */
23624d57b86dSChao Yu 	if (!f2fs_alloc_nid(sbi, &new_xnid))
236387905682SYunlei He 		return -ENOSPC;
236487905682SYunlei He 
236587905682SYunlei He 	set_new_dnode(&dn, inode, NULL, NULL, new_xnid);
23664d57b86dSChao Yu 	xpage = f2fs_new_node_page(&dn, XATTR_NODE_OFFSET);
236787905682SYunlei He 	if (IS_ERR(xpage)) {
23684d57b86dSChao Yu 		f2fs_alloc_nid_failed(sbi, new_xnid);
236987905682SYunlei He 		return PTR_ERR(xpage);
237087905682SYunlei He 	}
237187905682SYunlei He 
23724d57b86dSChao Yu 	f2fs_alloc_nid_done(sbi, new_xnid);
23734d57b86dSChao Yu 	f2fs_update_inode_page(inode);
2374abb2366cSJaegeuk Kim 
2375d260081cSChao Yu 	/* 3: update and set xattr node page dirty */
237687905682SYunlei He 	memcpy(F2FS_NODE(xpage), F2FS_NODE(page), VALID_XATTR_BLOCK_SIZE);
2377d260081cSChao Yu 
2378d260081cSChao Yu 	set_page_dirty(xpage);
2379d260081cSChao Yu 	f2fs_put_page(xpage, 1);
2380abb2366cSJaegeuk Kim 
2381d260081cSChao Yu 	return 0;
2382abb2366cSJaegeuk Kim }
2383abb2366cSJaegeuk Kim 
23844d57b86dSChao Yu int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
2385e05df3b1SJaegeuk Kim {
238658bfaf44SJaegeuk Kim 	struct f2fs_inode *src, *dst;
2387e05df3b1SJaegeuk Kim 	nid_t ino = ino_of_node(page);
2388e05df3b1SJaegeuk Kim 	struct node_info old_ni, new_ni;
2389e05df3b1SJaegeuk Kim 	struct page *ipage;
23907735730dSChao Yu 	int err;
2391e05df3b1SJaegeuk Kim 
23927735730dSChao Yu 	err = f2fs_get_node_info(sbi, ino, &old_ni);
23937735730dSChao Yu 	if (err)
23947735730dSChao Yu 		return err;
2395e8271fa3SJaegeuk Kim 
2396e8271fa3SJaegeuk Kim 	if (unlikely(old_ni.blk_addr != NULL_ADDR))
2397e8271fa3SJaegeuk Kim 		return -EINVAL;
2398e8ea9b3dSJaegeuk Kim retry:
2399300e129cSJaegeuk Kim 	ipage = f2fs_grab_cache_page(NODE_MAPPING(sbi), ino, false);
2400e8ea9b3dSJaegeuk Kim 	if (!ipage) {
2401e8ea9b3dSJaegeuk Kim 		congestion_wait(BLK_RW_ASYNC, HZ/50);
2402e8ea9b3dSJaegeuk Kim 		goto retry;
2403e8ea9b3dSJaegeuk Kim 	}
2404e05df3b1SJaegeuk Kim 
2405e05df3b1SJaegeuk Kim 	/* Should not use this inode from free nid list */
2406b8559dc2SChao Yu 	remove_free_nid(sbi, ino);
2407e05df3b1SJaegeuk Kim 
2408237c0790SJaegeuk Kim 	if (!PageUptodate(ipage))
2409e05df3b1SJaegeuk Kim 		SetPageUptodate(ipage);
2410e05df3b1SJaegeuk Kim 	fill_node_footer(ipage, ino, ino, 0, true);
2411c5667575SChao Yu 	set_cold_node(page, false);
2412e05df3b1SJaegeuk Kim 
241358bfaf44SJaegeuk Kim 	src = F2FS_INODE(page);
241458bfaf44SJaegeuk Kim 	dst = F2FS_INODE(ipage);
2415e05df3b1SJaegeuk Kim 
241658bfaf44SJaegeuk Kim 	memcpy(dst, src, (unsigned long)&src->i_ext - (unsigned long)src);
241758bfaf44SJaegeuk Kim 	dst->i_size = 0;
241858bfaf44SJaegeuk Kim 	dst->i_blocks = cpu_to_le64(1);
241958bfaf44SJaegeuk Kim 	dst->i_links = cpu_to_le32(1);
242058bfaf44SJaegeuk Kim 	dst->i_xattr_nid = 0;
24217a2af766SChao Yu 	dst->i_inline = src->i_inline & (F2FS_INLINE_XATTR | F2FS_EXTRA_ATTR);
24225c57132eSChao Yu 	if (dst->i_inline & F2FS_EXTRA_ATTR) {
24237a2af766SChao Yu 		dst->i_extra_isize = src->i_extra_isize;
24246afc662eSChao Yu 
24256afc662eSChao Yu 		if (f2fs_sb_has_flexible_inline_xattr(sbi->sb) &&
24266afc662eSChao Yu 			F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
24276afc662eSChao Yu 							i_inline_xattr_size))
24286afc662eSChao Yu 			dst->i_inline_xattr_size = src->i_inline_xattr_size;
24296afc662eSChao Yu 
24305c57132eSChao Yu 		if (f2fs_sb_has_project_quota(sbi->sb) &&
24315c57132eSChao Yu 			F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
24325c57132eSChao Yu 								i_projid))
24335c57132eSChao Yu 			dst->i_projid = src->i_projid;
24345c57132eSChao Yu 	}
2435e05df3b1SJaegeuk Kim 
2436e05df3b1SJaegeuk Kim 	new_ni = old_ni;
2437e05df3b1SJaegeuk Kim 	new_ni.ino = ino;
2438e05df3b1SJaegeuk Kim 
24390abd675eSChao Yu 	if (unlikely(inc_valid_node_count(sbi, NULL, true)))
244065e5cd0aSJaegeuk Kim 		WARN_ON(1);
2441479f40c4SJaegeuk Kim 	set_node_addr(sbi, &new_ni, NEW_ADDR, false);
2442e05df3b1SJaegeuk Kim 	inc_valid_inode_count(sbi);
2443617deb8cSJaegeuk Kim 	set_page_dirty(ipage);
2444e05df3b1SJaegeuk Kim 	f2fs_put_page(ipage, 1);
2445e05df3b1SJaegeuk Kim 	return 0;
2446e05df3b1SJaegeuk Kim }
2447e05df3b1SJaegeuk Kim 
24487735730dSChao Yu int f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
2449e05df3b1SJaegeuk Kim 			unsigned int segno, struct f2fs_summary_block *sum)
2450e05df3b1SJaegeuk Kim {
2451e05df3b1SJaegeuk Kim 	struct f2fs_node *rn;
2452e05df3b1SJaegeuk Kim 	struct f2fs_summary *sum_entry;
2453e05df3b1SJaegeuk Kim 	block_t addr;
24549ecf4b80SChao Yu 	int i, idx, last_offset, nrpages;
2455e05df3b1SJaegeuk Kim 
2456e05df3b1SJaegeuk Kim 	/* scan the node segment */
2457e05df3b1SJaegeuk Kim 	last_offset = sbi->blocks_per_seg;
2458e05df3b1SJaegeuk Kim 	addr = START_BLOCK(sbi, segno);
2459e05df3b1SJaegeuk Kim 	sum_entry = &sum->entries[0];
2460e05df3b1SJaegeuk Kim 
24619ecf4b80SChao Yu 	for (i = 0; i < last_offset; i += nrpages, addr += nrpages) {
2462664ba972SJaegeuk Kim 		nrpages = min(last_offset - i, BIO_MAX_PAGES);
2463393ff91fSJaegeuk Kim 
24649af0ff1cSChao Yu 		/* readahead node pages */
24654d57b86dSChao Yu 		f2fs_ra_meta_pages(sbi, addr, nrpages, META_POR, true);
24669af0ff1cSChao Yu 
24679ecf4b80SChao Yu 		for (idx = addr; idx < addr + nrpages; idx++) {
24684d57b86dSChao Yu 			struct page *page = f2fs_get_tmp_page(sbi, idx);
2469393ff91fSJaegeuk Kim 
24707735730dSChao Yu 			if (IS_ERR(page))
24717735730dSChao Yu 				return PTR_ERR(page);
24727735730dSChao Yu 
24739ecf4b80SChao Yu 			rn = F2FS_NODE(page);
2474393ff91fSJaegeuk Kim 			sum_entry->nid = rn->footer.nid;
2475393ff91fSJaegeuk Kim 			sum_entry->version = 0;
2476393ff91fSJaegeuk Kim 			sum_entry->ofs_in_node = 0;
24779af0ff1cSChao Yu 			sum_entry++;
24789ecf4b80SChao Yu 			f2fs_put_page(page, 1);
24799af0ff1cSChao Yu 		}
2480bac4eef6SChao Yu 
24819ecf4b80SChao Yu 		invalidate_mapping_pages(META_MAPPING(sbi), addr,
2482bac4eef6SChao Yu 							addr + nrpages);
24839af0ff1cSChao Yu 	}
24847735730dSChao Yu 	return 0;
2485e05df3b1SJaegeuk Kim }
2486e05df3b1SJaegeuk Kim 
2487aec71382SChao Yu static void remove_nats_in_journal(struct f2fs_sb_info *sbi)
2488e05df3b1SJaegeuk Kim {
2489e05df3b1SJaegeuk Kim 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2490e05df3b1SJaegeuk Kim 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2491b7ad7512SChao Yu 	struct f2fs_journal *journal = curseg->journal;
2492e05df3b1SJaegeuk Kim 	int i;
2493e05df3b1SJaegeuk Kim 
2494b7ad7512SChao Yu 	down_write(&curseg->journal_rwsem);
2495dfc08a12SChao Yu 	for (i = 0; i < nats_in_cursum(journal); i++) {
2496e05df3b1SJaegeuk Kim 		struct nat_entry *ne;
2497e05df3b1SJaegeuk Kim 		struct f2fs_nat_entry raw_ne;
2498dfc08a12SChao Yu 		nid_t nid = le32_to_cpu(nid_in_journal(journal, i));
2499e05df3b1SJaegeuk Kim 
2500dfc08a12SChao Yu 		raw_ne = nat_in_journal(journal, i);
25019be32d72SJaegeuk Kim 
2502e05df3b1SJaegeuk Kim 		ne = __lookup_nat_cache(nm_i, nid);
2503e05df3b1SJaegeuk Kim 		if (!ne) {
250412f9ef37SYunlei He 			ne = __alloc_nat_entry(nid, true);
250512f9ef37SYunlei He 			__init_nat_entry(nm_i, ne, &raw_ne, true);
25069be32d72SJaegeuk Kim 		}
250704d47e67SChao Yu 
250804d47e67SChao Yu 		/*
250904d47e67SChao Yu 		 * if a free nat in journal has not been used after last
251004d47e67SChao Yu 		 * checkpoint, we should remove it from available nids,
251104d47e67SChao Yu 		 * since later we will add it again.
251204d47e67SChao Yu 		 */
251304d47e67SChao Yu 		if (!get_nat_flag(ne, IS_DIRTY) &&
251404d47e67SChao Yu 				le32_to_cpu(raw_ne.block_addr) == NULL_ADDR) {
251504d47e67SChao Yu 			spin_lock(&nm_i->nid_list_lock);
251604d47e67SChao Yu 			nm_i->available_nids--;
251704d47e67SChao Yu 			spin_unlock(&nm_i->nid_list_lock);
251804d47e67SChao Yu 		}
251904d47e67SChao Yu 
2520e05df3b1SJaegeuk Kim 		__set_nat_cache_dirty(nm_i, ne);
2521e05df3b1SJaegeuk Kim 	}
2522dfc08a12SChao Yu 	update_nats_in_cursum(journal, -i);
2523b7ad7512SChao Yu 	up_write(&curseg->journal_rwsem);
2524e05df3b1SJaegeuk Kim }
2525e05df3b1SJaegeuk Kim 
2526309cc2b6SJaegeuk Kim static void __adjust_nat_entry_set(struct nat_entry_set *nes,
2527309cc2b6SJaegeuk Kim 						struct list_head *head, int max)
2528e05df3b1SJaegeuk Kim {
2529309cc2b6SJaegeuk Kim 	struct nat_entry_set *cur;
2530e05df3b1SJaegeuk Kim 
2531309cc2b6SJaegeuk Kim 	if (nes->entry_cnt >= max)
2532309cc2b6SJaegeuk Kim 		goto add_out;
2533e05df3b1SJaegeuk Kim 
2534309cc2b6SJaegeuk Kim 	list_for_each_entry(cur, head, set_list) {
2535309cc2b6SJaegeuk Kim 		if (cur->entry_cnt >= nes->entry_cnt) {
2536309cc2b6SJaegeuk Kim 			list_add(&nes->set_list, cur->set_list.prev);
2537309cc2b6SJaegeuk Kim 			return;
2538309cc2b6SJaegeuk Kim 		}
2539309cc2b6SJaegeuk Kim 	}
2540309cc2b6SJaegeuk Kim add_out:
2541309cc2b6SJaegeuk Kim 	list_add_tail(&nes->set_list, head);
2542aec71382SChao Yu }
2543aec71382SChao Yu 
25449f7e4a2cSJaegeuk Kim static void __update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid,
254522ad0b6aSJaegeuk Kim 						struct page *page)
254622ad0b6aSJaegeuk Kim {
254722ad0b6aSJaegeuk Kim 	struct f2fs_nm_info *nm_i = NM_I(sbi);
254822ad0b6aSJaegeuk Kim 	unsigned int nat_index = start_nid / NAT_ENTRY_PER_BLOCK;
254922ad0b6aSJaegeuk Kim 	struct f2fs_nat_block *nat_blk = page_address(page);
255022ad0b6aSJaegeuk Kim 	int valid = 0;
255137a0ab2aSFan Li 	int i = 0;
255222ad0b6aSJaegeuk Kim 
255322ad0b6aSJaegeuk Kim 	if (!enabled_nat_bits(sbi, NULL))
255422ad0b6aSJaegeuk Kim 		return;
255522ad0b6aSJaegeuk Kim 
255637a0ab2aSFan Li 	if (nat_index == 0) {
255737a0ab2aSFan Li 		valid = 1;
255837a0ab2aSFan Li 		i = 1;
255937a0ab2aSFan Li 	}
256037a0ab2aSFan Li 	for (; i < NAT_ENTRY_PER_BLOCK; i++) {
256137a0ab2aSFan Li 		if (nat_blk->entries[i].block_addr != NULL_ADDR)
256222ad0b6aSJaegeuk Kim 			valid++;
256322ad0b6aSJaegeuk Kim 	}
256422ad0b6aSJaegeuk Kim 	if (valid == 0) {
256523380b85SJaegeuk Kim 		__set_bit_le(nat_index, nm_i->empty_nat_bits);
256623380b85SJaegeuk Kim 		__clear_bit_le(nat_index, nm_i->full_nat_bits);
256722ad0b6aSJaegeuk Kim 		return;
256822ad0b6aSJaegeuk Kim 	}
256922ad0b6aSJaegeuk Kim 
257023380b85SJaegeuk Kim 	__clear_bit_le(nat_index, nm_i->empty_nat_bits);
257122ad0b6aSJaegeuk Kim 	if (valid == NAT_ENTRY_PER_BLOCK)
257223380b85SJaegeuk Kim 		__set_bit_le(nat_index, nm_i->full_nat_bits);
257322ad0b6aSJaegeuk Kim 	else
257423380b85SJaegeuk Kim 		__clear_bit_le(nat_index, nm_i->full_nat_bits);
257522ad0b6aSJaegeuk Kim }
257622ad0b6aSJaegeuk Kim 
2577309cc2b6SJaegeuk Kim static void __flush_nat_entry_set(struct f2fs_sb_info *sbi,
257822ad0b6aSJaegeuk Kim 		struct nat_entry_set *set, struct cp_control *cpc)
2579309cc2b6SJaegeuk Kim {
2580309cc2b6SJaegeuk Kim 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2581b7ad7512SChao Yu 	struct f2fs_journal *journal = curseg->journal;
2582309cc2b6SJaegeuk Kim 	nid_t start_nid = set->set * NAT_ENTRY_PER_BLOCK;
2583309cc2b6SJaegeuk Kim 	bool to_journal = true;
2584309cc2b6SJaegeuk Kim 	struct f2fs_nat_block *nat_blk;
2585309cc2b6SJaegeuk Kim 	struct nat_entry *ne, *cur;
2586309cc2b6SJaegeuk Kim 	struct page *page = NULL;
2587aec71382SChao Yu 
2588aec71382SChao Yu 	/*
2589aec71382SChao Yu 	 * there are two steps to flush nat entries:
2590aec71382SChao Yu 	 * #1, flush nat entries to journal in current hot data summary block.
2591aec71382SChao Yu 	 * #2, flush nat entries to nat page.
2592aec71382SChao Yu 	 */
259322ad0b6aSJaegeuk Kim 	if (enabled_nat_bits(sbi, cpc) ||
259422ad0b6aSJaegeuk Kim 		!__has_cursum_space(journal, set->entry_cnt, NAT_JOURNAL))
2595aec71382SChao Yu 		to_journal = false;
2596aec71382SChao Yu 
2597aec71382SChao Yu 	if (to_journal) {
2598b7ad7512SChao Yu 		down_write(&curseg->journal_rwsem);
2599aec71382SChao Yu 	} else {
2600e05df3b1SJaegeuk Kim 		page = get_next_nat_page(sbi, start_nid);
2601e05df3b1SJaegeuk Kim 		nat_blk = page_address(page);
26029850cf4aSJaegeuk Kim 		f2fs_bug_on(sbi, !nat_blk);
2603e05df3b1SJaegeuk Kim 	}
2604e05df3b1SJaegeuk Kim 
2605aec71382SChao Yu 	/* flush dirty nats in nat entry set */
2606309cc2b6SJaegeuk Kim 	list_for_each_entry_safe(ne, cur, &set->entry_list, list) {
2607aec71382SChao Yu 		struct f2fs_nat_entry *raw_ne;
2608aec71382SChao Yu 		nid_t nid = nat_get_nid(ne);
2609aec71382SChao Yu 		int offset;
2610aec71382SChao Yu 
2611febeca6dSChao Yu 		f2fs_bug_on(sbi, nat_get_blkaddr(ne) == NEW_ADDR);
2612309cc2b6SJaegeuk Kim 
2613aec71382SChao Yu 		if (to_journal) {
26144d57b86dSChao Yu 			offset = f2fs_lookup_journal_in_cursum(journal,
2615aec71382SChao Yu 							NAT_JOURNAL, nid, 1);
26169850cf4aSJaegeuk Kim 			f2fs_bug_on(sbi, offset < 0);
2617dfc08a12SChao Yu 			raw_ne = &nat_in_journal(journal, offset);
2618dfc08a12SChao Yu 			nid_in_journal(journal, offset) = cpu_to_le32(nid);
2619aec71382SChao Yu 		} else {
2620aec71382SChao Yu 			raw_ne = &nat_blk->entries[nid - start_nid];
2621aec71382SChao Yu 		}
2622aec71382SChao Yu 		raw_nat_from_node_info(raw_ne, &ne->ni);
262388bd02c9SJaegeuk Kim 		nat_reset_flag(ne);
26240b28b71eSKinglong Mee 		__clear_nat_cache_dirty(NM_I(sbi), set, ne);
262504d47e67SChao Yu 		if (nat_get_blkaddr(ne) == NULL_ADDR) {
26265921aaa1SLiFan 			add_free_nid(sbi, nid, false, true);
26274ac91242SChao Yu 		} else {
26284ac91242SChao Yu 			spin_lock(&NM_I(sbi)->nid_list_lock);
2629346fe752SChao Yu 			update_free_nid_bitmap(sbi, nid, false, false);
263004d47e67SChao Yu 			spin_unlock(&NM_I(sbi)->nid_list_lock);
263104d47e67SChao Yu 		}
2632e05df3b1SJaegeuk Kim 	}
2633aec71382SChao Yu 
263422ad0b6aSJaegeuk Kim 	if (to_journal) {
2635b7ad7512SChao Yu 		up_write(&curseg->journal_rwsem);
263622ad0b6aSJaegeuk Kim 	} else {
263722ad0b6aSJaegeuk Kim 		__update_nat_bits(sbi, start_nid, page);
2638e05df3b1SJaegeuk Kim 		f2fs_put_page(page, 1);
263922ad0b6aSJaegeuk Kim 	}
2640aec71382SChao Yu 
264159c9081bSYunlei He 	/* Allow dirty nats by node block allocation in write_begin */
264259c9081bSYunlei He 	if (!set->entry_cnt) {
2643309cc2b6SJaegeuk Kim 		radix_tree_delete(&NM_I(sbi)->nat_set_root, set->set);
2644309cc2b6SJaegeuk Kim 		kmem_cache_free(nat_entry_set_slab, set);
2645309cc2b6SJaegeuk Kim 	}
264659c9081bSYunlei He }
2647aec71382SChao Yu 
2648309cc2b6SJaegeuk Kim /*
2649309cc2b6SJaegeuk Kim  * This function is called during the checkpointing process.
2650309cc2b6SJaegeuk Kim  */
26514d57b86dSChao Yu void f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
2652309cc2b6SJaegeuk Kim {
2653309cc2b6SJaegeuk Kim 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2654309cc2b6SJaegeuk Kim 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2655b7ad7512SChao Yu 	struct f2fs_journal *journal = curseg->journal;
26567aed0d45SJaegeuk Kim 	struct nat_entry_set *setvec[SETVEC_SIZE];
2657309cc2b6SJaegeuk Kim 	struct nat_entry_set *set, *tmp;
2658309cc2b6SJaegeuk Kim 	unsigned int found;
2659309cc2b6SJaegeuk Kim 	nid_t set_idx = 0;
2660309cc2b6SJaegeuk Kim 	LIST_HEAD(sets);
2661309cc2b6SJaegeuk Kim 
26627f2ecdd8SJaegeuk Kim 	/* during unmount, let's flush nat_bits before checking dirty_nat_cnt */
26637f2ecdd8SJaegeuk Kim 	if (enabled_nat_bits(sbi, cpc)) {
26647f2ecdd8SJaegeuk Kim 		down_write(&nm_i->nat_tree_lock);
26657f2ecdd8SJaegeuk Kim 		remove_nats_in_journal(sbi);
26667f2ecdd8SJaegeuk Kim 		up_write(&nm_i->nat_tree_lock);
26677f2ecdd8SJaegeuk Kim 	}
26687f2ecdd8SJaegeuk Kim 
266920d047c8SChangman Lee 	if (!nm_i->dirty_nat_cnt)
267020d047c8SChangman Lee 		return;
2671a5131193SJaegeuk Kim 
2672b873b798SJaegeuk Kim 	down_write(&nm_i->nat_tree_lock);
2673a5131193SJaegeuk Kim 
2674309cc2b6SJaegeuk Kim 	/*
2675309cc2b6SJaegeuk Kim 	 * if there are no enough space in journal to store dirty nat
2676309cc2b6SJaegeuk Kim 	 * entries, remove all entries from journal and merge them
2677309cc2b6SJaegeuk Kim 	 * into nat entry set.
2678309cc2b6SJaegeuk Kim 	 */
2679900f7362SJaegeuk Kim 	if (enabled_nat_bits(sbi, cpc) ||
268022ad0b6aSJaegeuk Kim 		!__has_cursum_space(journal, nm_i->dirty_nat_cnt, NAT_JOURNAL))
2681309cc2b6SJaegeuk Kim 		remove_nats_in_journal(sbi);
2682309cc2b6SJaegeuk Kim 
2683309cc2b6SJaegeuk Kim 	while ((found = __gang_lookup_nat_set(nm_i,
26847aed0d45SJaegeuk Kim 					set_idx, SETVEC_SIZE, setvec))) {
2685309cc2b6SJaegeuk Kim 		unsigned idx;
2686309cc2b6SJaegeuk Kim 		set_idx = setvec[found - 1]->set + 1;
2687309cc2b6SJaegeuk Kim 		for (idx = 0; idx < found; idx++)
2688309cc2b6SJaegeuk Kim 			__adjust_nat_entry_set(setvec[idx], &sets,
2689dfc08a12SChao Yu 						MAX_NAT_JENTRIES(journal));
2690309cc2b6SJaegeuk Kim 	}
2691309cc2b6SJaegeuk Kim 
2692309cc2b6SJaegeuk Kim 	/* flush dirty nats in nat entry set */
2693309cc2b6SJaegeuk Kim 	list_for_each_entry_safe(set, tmp, &sets, set_list)
269422ad0b6aSJaegeuk Kim 		__flush_nat_entry_set(sbi, set, cpc);
2695309cc2b6SJaegeuk Kim 
2696b873b798SJaegeuk Kim 	up_write(&nm_i->nat_tree_lock);
269759c9081bSYunlei He 	/* Allow dirty nats by node block allocation in write_begin */
2698e05df3b1SJaegeuk Kim }
2699e05df3b1SJaegeuk Kim 
270022ad0b6aSJaegeuk Kim static int __get_nat_bitmaps(struct f2fs_sb_info *sbi)
270122ad0b6aSJaegeuk Kim {
270222ad0b6aSJaegeuk Kim 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
270322ad0b6aSJaegeuk Kim 	struct f2fs_nm_info *nm_i = NM_I(sbi);
270422ad0b6aSJaegeuk Kim 	unsigned int nat_bits_bytes = nm_i->nat_blocks / BITS_PER_BYTE;
270522ad0b6aSJaegeuk Kim 	unsigned int i;
270622ad0b6aSJaegeuk Kim 	__u64 cp_ver = cur_cp_version(ckpt);
270722ad0b6aSJaegeuk Kim 	block_t nat_bits_addr;
270822ad0b6aSJaegeuk Kim 
270922ad0b6aSJaegeuk Kim 	if (!enabled_nat_bits(sbi, NULL))
271022ad0b6aSJaegeuk Kim 		return 0;
271122ad0b6aSJaegeuk Kim 
2712df033cafSChao Yu 	nm_i->nat_bits_blocks = F2FS_BLK_ALIGN((nat_bits_bytes << 1) + 8);
2713acbf054dSChao Yu 	nm_i->nat_bits = f2fs_kzalloc(sbi,
2714acbf054dSChao Yu 			nm_i->nat_bits_blocks << F2FS_BLKSIZE_BITS, GFP_KERNEL);
271522ad0b6aSJaegeuk Kim 	if (!nm_i->nat_bits)
271622ad0b6aSJaegeuk Kim 		return -ENOMEM;
271722ad0b6aSJaegeuk Kim 
271822ad0b6aSJaegeuk Kim 	nat_bits_addr = __start_cp_addr(sbi) + sbi->blocks_per_seg -
271922ad0b6aSJaegeuk Kim 						nm_i->nat_bits_blocks;
272022ad0b6aSJaegeuk Kim 	for (i = 0; i < nm_i->nat_bits_blocks; i++) {
27217735730dSChao Yu 		struct page *page;
27227735730dSChao Yu 
27237735730dSChao Yu 		page = f2fs_get_meta_page(sbi, nat_bits_addr++);
27247735730dSChao Yu 		if (IS_ERR(page)) {
27257735730dSChao Yu 			disable_nat_bits(sbi, true);
27267735730dSChao Yu 			return PTR_ERR(page);
27277735730dSChao Yu 		}
272822ad0b6aSJaegeuk Kim 
272922ad0b6aSJaegeuk Kim 		memcpy(nm_i->nat_bits + (i << F2FS_BLKSIZE_BITS),
273022ad0b6aSJaegeuk Kim 					page_address(page), F2FS_BLKSIZE);
273122ad0b6aSJaegeuk Kim 		f2fs_put_page(page, 1);
273222ad0b6aSJaegeuk Kim 	}
273322ad0b6aSJaegeuk Kim 
2734ced2c7eaSKinglong Mee 	cp_ver |= (cur_cp_crc(ckpt) << 32);
273522ad0b6aSJaegeuk Kim 	if (cpu_to_le64(cp_ver) != *(__le64 *)nm_i->nat_bits) {
273622ad0b6aSJaegeuk Kim 		disable_nat_bits(sbi, true);
273722ad0b6aSJaegeuk Kim 		return 0;
273822ad0b6aSJaegeuk Kim 	}
273922ad0b6aSJaegeuk Kim 
274022ad0b6aSJaegeuk Kim 	nm_i->full_nat_bits = nm_i->nat_bits + 8;
274122ad0b6aSJaegeuk Kim 	nm_i->empty_nat_bits = nm_i->full_nat_bits + nat_bits_bytes;
274222ad0b6aSJaegeuk Kim 
274322ad0b6aSJaegeuk Kim 	f2fs_msg(sbi->sb, KERN_NOTICE, "Found nat_bits in checkpoint");
274422ad0b6aSJaegeuk Kim 	return 0;
274522ad0b6aSJaegeuk Kim }
274622ad0b6aSJaegeuk Kim 
2747bd80a4b9SHou Pengyang static inline void load_free_nid_bitmap(struct f2fs_sb_info *sbi)
27487041d5d2SChao Yu {
27497041d5d2SChao Yu 	struct f2fs_nm_info *nm_i = NM_I(sbi);
27507041d5d2SChao Yu 	unsigned int i = 0;
27517041d5d2SChao Yu 	nid_t nid, last_nid;
27527041d5d2SChao Yu 
27537041d5d2SChao Yu 	if (!enabled_nat_bits(sbi, NULL))
27547041d5d2SChao Yu 		return;
27557041d5d2SChao Yu 
27567041d5d2SChao Yu 	for (i = 0; i < nm_i->nat_blocks; i++) {
27577041d5d2SChao Yu 		i = find_next_bit_le(nm_i->empty_nat_bits, nm_i->nat_blocks, i);
27587041d5d2SChao Yu 		if (i >= nm_i->nat_blocks)
27597041d5d2SChao Yu 			break;
27607041d5d2SChao Yu 
27617041d5d2SChao Yu 		__set_bit_le(i, nm_i->nat_block_bitmap);
27627041d5d2SChao Yu 
27637041d5d2SChao Yu 		nid = i * NAT_ENTRY_PER_BLOCK;
2764f6986edeSFan Li 		last_nid = nid + NAT_ENTRY_PER_BLOCK;
27657041d5d2SChao Yu 
2766346fe752SChao Yu 		spin_lock(&NM_I(sbi)->nid_list_lock);
27677041d5d2SChao Yu 		for (; nid < last_nid; nid++)
2768346fe752SChao Yu 			update_free_nid_bitmap(sbi, nid, true, true);
2769346fe752SChao Yu 		spin_unlock(&NM_I(sbi)->nid_list_lock);
27707041d5d2SChao Yu 	}
27717041d5d2SChao Yu 
27727041d5d2SChao Yu 	for (i = 0; i < nm_i->nat_blocks; i++) {
27737041d5d2SChao Yu 		i = find_next_bit_le(nm_i->full_nat_bits, nm_i->nat_blocks, i);
27747041d5d2SChao Yu 		if (i >= nm_i->nat_blocks)
27757041d5d2SChao Yu 			break;
27767041d5d2SChao Yu 
27777041d5d2SChao Yu 		__set_bit_le(i, nm_i->nat_block_bitmap);
27787041d5d2SChao Yu 	}
27797041d5d2SChao Yu }
27807041d5d2SChao Yu 
2781e05df3b1SJaegeuk Kim static int init_node_manager(struct f2fs_sb_info *sbi)
2782e05df3b1SJaegeuk Kim {
2783e05df3b1SJaegeuk Kim 	struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi);
2784e05df3b1SJaegeuk Kim 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2785e05df3b1SJaegeuk Kim 	unsigned char *version_bitmap;
278622ad0b6aSJaegeuk Kim 	unsigned int nat_segs;
278722ad0b6aSJaegeuk Kim 	int err;
2788e05df3b1SJaegeuk Kim 
2789e05df3b1SJaegeuk Kim 	nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr);
2790e05df3b1SJaegeuk Kim 
2791e05df3b1SJaegeuk Kim 	/* segment_count_nat includes pair segment so divide to 2. */
2792e05df3b1SJaegeuk Kim 	nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1;
279322ad0b6aSJaegeuk Kim 	nm_i->nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg);
279422ad0b6aSJaegeuk Kim 	nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nm_i->nat_blocks;
27957ee0eeabSJaegeuk Kim 
2796b63da15eSJaegeuk Kim 	/* not used nids: 0, node, meta, (and root counted as valid node) */
279704d47e67SChao Yu 	nm_i->available_nids = nm_i->max_nid - sbi->total_valid_node_count -
2798292c196aSChao Yu 				sbi->nquota_files - F2FS_RESERVED_NODE_NUM;
27999a4ffdf5SChao Yu 	nm_i->nid_cnt[FREE_NID] = 0;
28009a4ffdf5SChao Yu 	nm_i->nid_cnt[PREALLOC_NID] = 0;
2801e05df3b1SJaegeuk Kim 	nm_i->nat_cnt = 0;
2802cdfc41c1SJaegeuk Kim 	nm_i->ram_thresh = DEF_RAM_THRESHOLD;
2803ea1a29a0SChao Yu 	nm_i->ra_nid_pages = DEF_RA_NID_PAGES;
28042304cb0cSChao Yu 	nm_i->dirty_nats_ratio = DEF_DIRTY_NAT_RATIO_THRESHOLD;
2805e05df3b1SJaegeuk Kim 
28068a7ed66aSJaegeuk Kim 	INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC);
28079a4ffdf5SChao Yu 	INIT_LIST_HEAD(&nm_i->free_nid_list);
2808769ec6e5SJaegeuk Kim 	INIT_RADIX_TREE(&nm_i->nat_root, GFP_NOIO);
2809769ec6e5SJaegeuk Kim 	INIT_RADIX_TREE(&nm_i->nat_set_root, GFP_NOIO);
2810e05df3b1SJaegeuk Kim 	INIT_LIST_HEAD(&nm_i->nat_entries);
2811e05df3b1SJaegeuk Kim 
2812e05df3b1SJaegeuk Kim 	mutex_init(&nm_i->build_lock);
2813b8559dc2SChao Yu 	spin_lock_init(&nm_i->nid_list_lock);
2814b873b798SJaegeuk Kim 	init_rwsem(&nm_i->nat_tree_lock);
2815e05df3b1SJaegeuk Kim 
2816e05df3b1SJaegeuk Kim 	nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
281779b5793bSAlexandru Gheorghiu 	nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
2818e05df3b1SJaegeuk Kim 	version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP);
2819e05df3b1SJaegeuk Kim 	if (!version_bitmap)
2820e05df3b1SJaegeuk Kim 		return -EFAULT;
2821e05df3b1SJaegeuk Kim 
282279b5793bSAlexandru Gheorghiu 	nm_i->nat_bitmap = kmemdup(version_bitmap, nm_i->bitmap_size,
282379b5793bSAlexandru Gheorghiu 					GFP_KERNEL);
282479b5793bSAlexandru Gheorghiu 	if (!nm_i->nat_bitmap)
282579b5793bSAlexandru Gheorghiu 		return -ENOMEM;
2826599a09b2SChao Yu 
282722ad0b6aSJaegeuk Kim 	err = __get_nat_bitmaps(sbi);
282822ad0b6aSJaegeuk Kim 	if (err)
282922ad0b6aSJaegeuk Kim 		return err;
283022ad0b6aSJaegeuk Kim 
2831599a09b2SChao Yu #ifdef CONFIG_F2FS_CHECK_FS
2832599a09b2SChao Yu 	nm_i->nat_bitmap_mir = kmemdup(version_bitmap, nm_i->bitmap_size,
2833599a09b2SChao Yu 					GFP_KERNEL);
2834599a09b2SChao Yu 	if (!nm_i->nat_bitmap_mir)
2835599a09b2SChao Yu 		return -ENOMEM;
2836599a09b2SChao Yu #endif
2837599a09b2SChao Yu 
2838e05df3b1SJaegeuk Kim 	return 0;
2839e05df3b1SJaegeuk Kim }
2840e05df3b1SJaegeuk Kim 
28419f7e4a2cSJaegeuk Kim static int init_free_nid_cache(struct f2fs_sb_info *sbi)
28424ac91242SChao Yu {
28434ac91242SChao Yu 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2844bb1105e4SJaegeuk Kim 	int i;
28454ac91242SChao Yu 
2846026f0507SKees Cook 	nm_i->free_nid_bitmap =
2847026f0507SKees Cook 		f2fs_kzalloc(sbi, array_size(sizeof(unsigned char *),
2848026f0507SKees Cook 					     nm_i->nat_blocks),
2849026f0507SKees Cook 			     GFP_KERNEL);
28504ac91242SChao Yu 	if (!nm_i->free_nid_bitmap)
28514ac91242SChao Yu 		return -ENOMEM;
28524ac91242SChao Yu 
2853bb1105e4SJaegeuk Kim 	for (i = 0; i < nm_i->nat_blocks; i++) {
2854bb1105e4SJaegeuk Kim 		nm_i->free_nid_bitmap[i] = f2fs_kvzalloc(sbi,
2855e15d54d5SYunlei He 			f2fs_bitmap_size(NAT_ENTRY_PER_BLOCK), GFP_KERNEL);
285668c43a23SYunlei He 		if (!nm_i->free_nid_bitmap[i])
2857bb1105e4SJaegeuk Kim 			return -ENOMEM;
2858bb1105e4SJaegeuk Kim 	}
2859bb1105e4SJaegeuk Kim 
2860628b3d14SChao Yu 	nm_i->nat_block_bitmap = f2fs_kvzalloc(sbi, nm_i->nat_blocks / 8,
28614ac91242SChao Yu 								GFP_KERNEL);
28624ac91242SChao Yu 	if (!nm_i->nat_block_bitmap)
28634ac91242SChao Yu 		return -ENOMEM;
2864586d1492SChao Yu 
28659d2a789cSKees Cook 	nm_i->free_nid_count =
28669d2a789cSKees Cook 		f2fs_kvzalloc(sbi, array_size(sizeof(unsigned short),
28679d2a789cSKees Cook 					      nm_i->nat_blocks),
28689d2a789cSKees Cook 			      GFP_KERNEL);
2869586d1492SChao Yu 	if (!nm_i->free_nid_count)
2870586d1492SChao Yu 		return -ENOMEM;
28714ac91242SChao Yu 	return 0;
28724ac91242SChao Yu }
28734ac91242SChao Yu 
28744d57b86dSChao Yu int f2fs_build_node_manager(struct f2fs_sb_info *sbi)
2875e05df3b1SJaegeuk Kim {
2876e05df3b1SJaegeuk Kim 	int err;
2877e05df3b1SJaegeuk Kim 
2878acbf054dSChao Yu 	sbi->nm_info = f2fs_kzalloc(sbi, sizeof(struct f2fs_nm_info),
2879acbf054dSChao Yu 							GFP_KERNEL);
2880e05df3b1SJaegeuk Kim 	if (!sbi->nm_info)
2881e05df3b1SJaegeuk Kim 		return -ENOMEM;
2882e05df3b1SJaegeuk Kim 
2883e05df3b1SJaegeuk Kim 	err = init_node_manager(sbi);
2884e05df3b1SJaegeuk Kim 	if (err)
2885e05df3b1SJaegeuk Kim 		return err;
2886e05df3b1SJaegeuk Kim 
28874ac91242SChao Yu 	err = init_free_nid_cache(sbi);
28884ac91242SChao Yu 	if (err)
28894ac91242SChao Yu 		return err;
28904ac91242SChao Yu 
28917041d5d2SChao Yu 	/* load free nid status from nat_bits table */
28927041d5d2SChao Yu 	load_free_nid_bitmap(sbi);
28937041d5d2SChao Yu 
2894e2374015SChao Yu 	return f2fs_build_free_nids(sbi, true, true);
2895e05df3b1SJaegeuk Kim }
2896e05df3b1SJaegeuk Kim 
28974d57b86dSChao Yu void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi)
2898e05df3b1SJaegeuk Kim {
2899e05df3b1SJaegeuk Kim 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2900e05df3b1SJaegeuk Kim 	struct free_nid *i, *next_i;
2901e05df3b1SJaegeuk Kim 	struct nat_entry *natvec[NATVEC_SIZE];
29027aed0d45SJaegeuk Kim 	struct nat_entry_set *setvec[SETVEC_SIZE];
2903e05df3b1SJaegeuk Kim 	nid_t nid = 0;
2904e05df3b1SJaegeuk Kim 	unsigned int found;
2905e05df3b1SJaegeuk Kim 
2906e05df3b1SJaegeuk Kim 	if (!nm_i)
2907e05df3b1SJaegeuk Kim 		return;
2908e05df3b1SJaegeuk Kim 
2909e05df3b1SJaegeuk Kim 	/* destroy free nid list */
2910b8559dc2SChao Yu 	spin_lock(&nm_i->nid_list_lock);
29119a4ffdf5SChao Yu 	list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) {
2912a0761f63SFan Li 		__remove_free_nid(sbi, i, FREE_NID);
2913b8559dc2SChao Yu 		spin_unlock(&nm_i->nid_list_lock);
2914cf0ee0f0SChao Yu 		kmem_cache_free(free_nid_slab, i);
2915b8559dc2SChao Yu 		spin_lock(&nm_i->nid_list_lock);
2916e05df3b1SJaegeuk Kim 	}
29179a4ffdf5SChao Yu 	f2fs_bug_on(sbi, nm_i->nid_cnt[FREE_NID]);
29189a4ffdf5SChao Yu 	f2fs_bug_on(sbi, nm_i->nid_cnt[PREALLOC_NID]);
29199a4ffdf5SChao Yu 	f2fs_bug_on(sbi, !list_empty(&nm_i->free_nid_list));
2920b8559dc2SChao Yu 	spin_unlock(&nm_i->nid_list_lock);
2921e05df3b1SJaegeuk Kim 
2922e05df3b1SJaegeuk Kim 	/* destroy nat cache */
2923b873b798SJaegeuk Kim 	down_write(&nm_i->nat_tree_lock);
2924e05df3b1SJaegeuk Kim 	while ((found = __gang_lookup_nat_cache(nm_i,
2925e05df3b1SJaegeuk Kim 					nid, NATVEC_SIZE, natvec))) {
2926e05df3b1SJaegeuk Kim 		unsigned idx;
29277aed0d45SJaegeuk Kim 
2928b6ce391eSGu Zheng 		nid = nat_get_nid(natvec[found - 1]) + 1;
2929b6ce391eSGu Zheng 		for (idx = 0; idx < found; idx++)
2930b6ce391eSGu Zheng 			__del_from_nat_cache(nm_i, natvec[idx]);
2931e05df3b1SJaegeuk Kim 	}
29329850cf4aSJaegeuk Kim 	f2fs_bug_on(sbi, nm_i->nat_cnt);
29337aed0d45SJaegeuk Kim 
29347aed0d45SJaegeuk Kim 	/* destroy nat set cache */
29357aed0d45SJaegeuk Kim 	nid = 0;
29367aed0d45SJaegeuk Kim 	while ((found = __gang_lookup_nat_set(nm_i,
29377aed0d45SJaegeuk Kim 					nid, SETVEC_SIZE, setvec))) {
29387aed0d45SJaegeuk Kim 		unsigned idx;
29397aed0d45SJaegeuk Kim 
29407aed0d45SJaegeuk Kim 		nid = setvec[found - 1]->set + 1;
29417aed0d45SJaegeuk Kim 		for (idx = 0; idx < found; idx++) {
29427aed0d45SJaegeuk Kim 			/* entry_cnt is not zero, when cp_error was occurred */
29437aed0d45SJaegeuk Kim 			f2fs_bug_on(sbi, !list_empty(&setvec[idx]->entry_list));
29447aed0d45SJaegeuk Kim 			radix_tree_delete(&nm_i->nat_set_root, setvec[idx]->set);
29457aed0d45SJaegeuk Kim 			kmem_cache_free(nat_entry_set_slab, setvec[idx]);
29467aed0d45SJaegeuk Kim 		}
29477aed0d45SJaegeuk Kim 	}
2948b873b798SJaegeuk Kim 	up_write(&nm_i->nat_tree_lock);
2949e05df3b1SJaegeuk Kim 
29504ac91242SChao Yu 	kvfree(nm_i->nat_block_bitmap);
2951bb1105e4SJaegeuk Kim 	if (nm_i->free_nid_bitmap) {
2952bb1105e4SJaegeuk Kim 		int i;
2953bb1105e4SJaegeuk Kim 
2954bb1105e4SJaegeuk Kim 		for (i = 0; i < nm_i->nat_blocks; i++)
2955bb1105e4SJaegeuk Kim 			kvfree(nm_i->free_nid_bitmap[i]);
2956bb1105e4SJaegeuk Kim 		kfree(nm_i->free_nid_bitmap);
2957bb1105e4SJaegeuk Kim 	}
2958586d1492SChao Yu 	kvfree(nm_i->free_nid_count);
29594ac91242SChao Yu 
2960e05df3b1SJaegeuk Kim 	kfree(nm_i->nat_bitmap);
296122ad0b6aSJaegeuk Kim 	kfree(nm_i->nat_bits);
2962599a09b2SChao Yu #ifdef CONFIG_F2FS_CHECK_FS
2963599a09b2SChao Yu 	kfree(nm_i->nat_bitmap_mir);
2964599a09b2SChao Yu #endif
2965e05df3b1SJaegeuk Kim 	sbi->nm_info = NULL;
2966e05df3b1SJaegeuk Kim 	kfree(nm_i);
2967e05df3b1SJaegeuk Kim }
2968e05df3b1SJaegeuk Kim 
29694d57b86dSChao Yu int __init f2fs_create_node_manager_caches(void)
2970e05df3b1SJaegeuk Kim {
2971e05df3b1SJaegeuk Kim 	nat_entry_slab = f2fs_kmem_cache_create("nat_entry",
2972e8512d2eSGu Zheng 			sizeof(struct nat_entry));
2973e05df3b1SJaegeuk Kim 	if (!nat_entry_slab)
2974aec71382SChao Yu 		goto fail;
2975e05df3b1SJaegeuk Kim 
2976e05df3b1SJaegeuk Kim 	free_nid_slab = f2fs_kmem_cache_create("free_nid",
2977e8512d2eSGu Zheng 			sizeof(struct free_nid));
2978aec71382SChao Yu 	if (!free_nid_slab)
2979ce3e6d25SMarkus Elfring 		goto destroy_nat_entry;
2980aec71382SChao Yu 
2981aec71382SChao Yu 	nat_entry_set_slab = f2fs_kmem_cache_create("nat_entry_set",
2982aec71382SChao Yu 			sizeof(struct nat_entry_set));
2983aec71382SChao Yu 	if (!nat_entry_set_slab)
2984ce3e6d25SMarkus Elfring 		goto destroy_free_nid;
2985e05df3b1SJaegeuk Kim 	return 0;
2986aec71382SChao Yu 
2987ce3e6d25SMarkus Elfring destroy_free_nid:
2988aec71382SChao Yu 	kmem_cache_destroy(free_nid_slab);
2989ce3e6d25SMarkus Elfring destroy_nat_entry:
2990aec71382SChao Yu 	kmem_cache_destroy(nat_entry_slab);
2991aec71382SChao Yu fail:
2992aec71382SChao Yu 	return -ENOMEM;
2993e05df3b1SJaegeuk Kim }
2994e05df3b1SJaegeuk Kim 
29954d57b86dSChao Yu void f2fs_destroy_node_manager_caches(void)
2996e05df3b1SJaegeuk Kim {
2997aec71382SChao Yu 	kmem_cache_destroy(nat_entry_set_slab);
2998e05df3b1SJaegeuk Kim 	kmem_cache_destroy(free_nid_slab);
2999e05df3b1SJaegeuk Kim 	kmem_cache_destroy(nat_entry_slab);
3000e05df3b1SJaegeuk Kim }
3001