xref: /openbmc/linux/fs/f2fs/node.c (revision 45590710)
10a8165d7SJaegeuk Kim /*
2e05df3b1SJaegeuk Kim  * fs/f2fs/node.c
3e05df3b1SJaegeuk Kim  *
4e05df3b1SJaegeuk Kim  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5e05df3b1SJaegeuk Kim  *             http://www.samsung.com/
6e05df3b1SJaegeuk Kim  *
7e05df3b1SJaegeuk Kim  * This program is free software; you can redistribute it and/or modify
8e05df3b1SJaegeuk Kim  * it under the terms of the GNU General Public License version 2 as
9e05df3b1SJaegeuk Kim  * published by the Free Software Foundation.
10e05df3b1SJaegeuk Kim  */
11e05df3b1SJaegeuk Kim #include <linux/fs.h>
12e05df3b1SJaegeuk Kim #include <linux/f2fs_fs.h>
13e05df3b1SJaegeuk Kim #include <linux/mpage.h>
14e05df3b1SJaegeuk Kim #include <linux/backing-dev.h>
15e05df3b1SJaegeuk Kim #include <linux/blkdev.h>
16e05df3b1SJaegeuk Kim #include <linux/pagevec.h>
17e05df3b1SJaegeuk Kim #include <linux/swap.h>
18e05df3b1SJaegeuk Kim 
19e05df3b1SJaegeuk Kim #include "f2fs.h"
20e05df3b1SJaegeuk Kim #include "node.h"
21e05df3b1SJaegeuk Kim #include "segment.h"
2251dd6249SNamjae Jeon #include <trace/events/f2fs.h>
23e05df3b1SJaegeuk Kim 
24e05df3b1SJaegeuk Kim static struct kmem_cache *nat_entry_slab;
25e05df3b1SJaegeuk Kim static struct kmem_cache *free_nid_slab;
26e05df3b1SJaegeuk Kim 
27e05df3b1SJaegeuk Kim static void clear_node_page_dirty(struct page *page)
28e05df3b1SJaegeuk Kim {
29e05df3b1SJaegeuk Kim 	struct address_space *mapping = page->mapping;
30e05df3b1SJaegeuk Kim 	struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
31e05df3b1SJaegeuk Kim 	unsigned int long flags;
32e05df3b1SJaegeuk Kim 
33e05df3b1SJaegeuk Kim 	if (PageDirty(page)) {
34e05df3b1SJaegeuk Kim 		spin_lock_irqsave(&mapping->tree_lock, flags);
35e05df3b1SJaegeuk Kim 		radix_tree_tag_clear(&mapping->page_tree,
36e05df3b1SJaegeuk Kim 				page_index(page),
37e05df3b1SJaegeuk Kim 				PAGECACHE_TAG_DIRTY);
38e05df3b1SJaegeuk Kim 		spin_unlock_irqrestore(&mapping->tree_lock, flags);
39e05df3b1SJaegeuk Kim 
40e05df3b1SJaegeuk Kim 		clear_page_dirty_for_io(page);
41e05df3b1SJaegeuk Kim 		dec_page_count(sbi, F2FS_DIRTY_NODES);
42e05df3b1SJaegeuk Kim 	}
43e05df3b1SJaegeuk Kim 	ClearPageUptodate(page);
44e05df3b1SJaegeuk Kim }
45e05df3b1SJaegeuk Kim 
46e05df3b1SJaegeuk Kim static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
47e05df3b1SJaegeuk Kim {
48e05df3b1SJaegeuk Kim 	pgoff_t index = current_nat_addr(sbi, nid);
49e05df3b1SJaegeuk Kim 	return get_meta_page(sbi, index);
50e05df3b1SJaegeuk Kim }
51e05df3b1SJaegeuk Kim 
52e05df3b1SJaegeuk Kim static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
53e05df3b1SJaegeuk Kim {
54e05df3b1SJaegeuk Kim 	struct page *src_page;
55e05df3b1SJaegeuk Kim 	struct page *dst_page;
56e05df3b1SJaegeuk Kim 	pgoff_t src_off;
57e05df3b1SJaegeuk Kim 	pgoff_t dst_off;
58e05df3b1SJaegeuk Kim 	void *src_addr;
59e05df3b1SJaegeuk Kim 	void *dst_addr;
60e05df3b1SJaegeuk Kim 	struct f2fs_nm_info *nm_i = NM_I(sbi);
61e05df3b1SJaegeuk Kim 
62e05df3b1SJaegeuk Kim 	src_off = current_nat_addr(sbi, nid);
63e05df3b1SJaegeuk Kim 	dst_off = next_nat_addr(sbi, src_off);
64e05df3b1SJaegeuk Kim 
65e05df3b1SJaegeuk Kim 	/* get current nat block page with lock */
66e05df3b1SJaegeuk Kim 	src_page = get_meta_page(sbi, src_off);
67e05df3b1SJaegeuk Kim 
68e05df3b1SJaegeuk Kim 	/* Dirty src_page means that it is already the new target NAT page. */
69e05df3b1SJaegeuk Kim 	if (PageDirty(src_page))
70e05df3b1SJaegeuk Kim 		return src_page;
71e05df3b1SJaegeuk Kim 
72e05df3b1SJaegeuk Kim 	dst_page = grab_meta_page(sbi, dst_off);
73e05df3b1SJaegeuk Kim 
74e05df3b1SJaegeuk Kim 	src_addr = page_address(src_page);
75e05df3b1SJaegeuk Kim 	dst_addr = page_address(dst_page);
76e05df3b1SJaegeuk Kim 	memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE);
77e05df3b1SJaegeuk Kim 	set_page_dirty(dst_page);
78e05df3b1SJaegeuk Kim 	f2fs_put_page(src_page, 1);
79e05df3b1SJaegeuk Kim 
80e05df3b1SJaegeuk Kim 	set_to_next_nat(nm_i, nid);
81e05df3b1SJaegeuk Kim 
82e05df3b1SJaegeuk Kim 	return dst_page;
83e05df3b1SJaegeuk Kim }
84e05df3b1SJaegeuk Kim 
850a8165d7SJaegeuk Kim /*
86e05df3b1SJaegeuk Kim  * Readahead NAT pages
87e05df3b1SJaegeuk Kim  */
88e05df3b1SJaegeuk Kim static void ra_nat_pages(struct f2fs_sb_info *sbi, int nid)
89e05df3b1SJaegeuk Kim {
90e05df3b1SJaegeuk Kim 	struct address_space *mapping = sbi->meta_inode->i_mapping;
91e05df3b1SJaegeuk Kim 	struct f2fs_nm_info *nm_i = NM_I(sbi);
92c718379bSJaegeuk Kim 	struct blk_plug plug;
93e05df3b1SJaegeuk Kim 	struct page *page;
94e05df3b1SJaegeuk Kim 	pgoff_t index;
95e05df3b1SJaegeuk Kim 	int i;
96e05df3b1SJaegeuk Kim 
97c718379bSJaegeuk Kim 	blk_start_plug(&plug);
98c718379bSJaegeuk Kim 
99e05df3b1SJaegeuk Kim 	for (i = 0; i < FREE_NID_PAGES; i++, nid += NAT_ENTRY_PER_BLOCK) {
100e05df3b1SJaegeuk Kim 		if (nid >= nm_i->max_nid)
101e05df3b1SJaegeuk Kim 			nid = 0;
102e05df3b1SJaegeuk Kim 		index = current_nat_addr(sbi, nid);
103e05df3b1SJaegeuk Kim 
104e05df3b1SJaegeuk Kim 		page = grab_cache_page(mapping, index);
105e05df3b1SJaegeuk Kim 		if (!page)
106e05df3b1SJaegeuk Kim 			continue;
107393ff91fSJaegeuk Kim 		if (PageUptodate(page)) {
108e05df3b1SJaegeuk Kim 			f2fs_put_page(page, 1);
109e05df3b1SJaegeuk Kim 			continue;
110e05df3b1SJaegeuk Kim 		}
111393ff91fSJaegeuk Kim 		if (f2fs_readpage(sbi, page, index, READ))
112393ff91fSJaegeuk Kim 			continue;
113393ff91fSJaegeuk Kim 
114369a708cSJaegeuk Kim 		f2fs_put_page(page, 0);
115e05df3b1SJaegeuk Kim 	}
116c718379bSJaegeuk Kim 	blk_finish_plug(&plug);
117e05df3b1SJaegeuk Kim }
118e05df3b1SJaegeuk Kim 
119e05df3b1SJaegeuk Kim static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n)
120e05df3b1SJaegeuk Kim {
121e05df3b1SJaegeuk Kim 	return radix_tree_lookup(&nm_i->nat_root, n);
122e05df3b1SJaegeuk Kim }
123e05df3b1SJaegeuk Kim 
124e05df3b1SJaegeuk Kim static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i,
125e05df3b1SJaegeuk Kim 		nid_t start, unsigned int nr, struct nat_entry **ep)
126e05df3b1SJaegeuk Kim {
127e05df3b1SJaegeuk Kim 	return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr);
128e05df3b1SJaegeuk Kim }
129e05df3b1SJaegeuk Kim 
130e05df3b1SJaegeuk Kim static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e)
131e05df3b1SJaegeuk Kim {
132e05df3b1SJaegeuk Kim 	list_del(&e->list);
133e05df3b1SJaegeuk Kim 	radix_tree_delete(&nm_i->nat_root, nat_get_nid(e));
134e05df3b1SJaegeuk Kim 	nm_i->nat_cnt--;
135e05df3b1SJaegeuk Kim 	kmem_cache_free(nat_entry_slab, e);
136e05df3b1SJaegeuk Kim }
137e05df3b1SJaegeuk Kim 
138e05df3b1SJaegeuk Kim int is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid)
139e05df3b1SJaegeuk Kim {
140e05df3b1SJaegeuk Kim 	struct f2fs_nm_info *nm_i = NM_I(sbi);
141e05df3b1SJaegeuk Kim 	struct nat_entry *e;
142e05df3b1SJaegeuk Kim 	int is_cp = 1;
143e05df3b1SJaegeuk Kim 
144e05df3b1SJaegeuk Kim 	read_lock(&nm_i->nat_tree_lock);
145e05df3b1SJaegeuk Kim 	e = __lookup_nat_cache(nm_i, nid);
146e05df3b1SJaegeuk Kim 	if (e && !e->checkpointed)
147e05df3b1SJaegeuk Kim 		is_cp = 0;
148e05df3b1SJaegeuk Kim 	read_unlock(&nm_i->nat_tree_lock);
149e05df3b1SJaegeuk Kim 	return is_cp;
150e05df3b1SJaegeuk Kim }
151e05df3b1SJaegeuk Kim 
152e05df3b1SJaegeuk Kim static struct nat_entry *grab_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid)
153e05df3b1SJaegeuk Kim {
154e05df3b1SJaegeuk Kim 	struct nat_entry *new;
155e05df3b1SJaegeuk Kim 
156e05df3b1SJaegeuk Kim 	new = kmem_cache_alloc(nat_entry_slab, GFP_ATOMIC);
157e05df3b1SJaegeuk Kim 	if (!new)
158e05df3b1SJaegeuk Kim 		return NULL;
159e05df3b1SJaegeuk Kim 	if (radix_tree_insert(&nm_i->nat_root, nid, new)) {
160e05df3b1SJaegeuk Kim 		kmem_cache_free(nat_entry_slab, new);
161e05df3b1SJaegeuk Kim 		return NULL;
162e05df3b1SJaegeuk Kim 	}
163e05df3b1SJaegeuk Kim 	memset(new, 0, sizeof(struct nat_entry));
164e05df3b1SJaegeuk Kim 	nat_set_nid(new, nid);
165e05df3b1SJaegeuk Kim 	list_add_tail(&new->list, &nm_i->nat_entries);
166e05df3b1SJaegeuk Kim 	nm_i->nat_cnt++;
167e05df3b1SJaegeuk Kim 	return new;
168e05df3b1SJaegeuk Kim }
169e05df3b1SJaegeuk Kim 
170e05df3b1SJaegeuk Kim static void cache_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid,
171e05df3b1SJaegeuk Kim 						struct f2fs_nat_entry *ne)
172e05df3b1SJaegeuk Kim {
173e05df3b1SJaegeuk Kim 	struct nat_entry *e;
174e05df3b1SJaegeuk Kim retry:
175e05df3b1SJaegeuk Kim 	write_lock(&nm_i->nat_tree_lock);
176e05df3b1SJaegeuk Kim 	e = __lookup_nat_cache(nm_i, nid);
177e05df3b1SJaegeuk Kim 	if (!e) {
178e05df3b1SJaegeuk Kim 		e = grab_nat_entry(nm_i, nid);
179e05df3b1SJaegeuk Kim 		if (!e) {
180e05df3b1SJaegeuk Kim 			write_unlock(&nm_i->nat_tree_lock);
181e05df3b1SJaegeuk Kim 			goto retry;
182e05df3b1SJaegeuk Kim 		}
183e05df3b1SJaegeuk Kim 		nat_set_blkaddr(e, le32_to_cpu(ne->block_addr));
184e05df3b1SJaegeuk Kim 		nat_set_ino(e, le32_to_cpu(ne->ino));
185e05df3b1SJaegeuk Kim 		nat_set_version(e, ne->version);
186e05df3b1SJaegeuk Kim 		e->checkpointed = true;
187e05df3b1SJaegeuk Kim 	}
188e05df3b1SJaegeuk Kim 	write_unlock(&nm_i->nat_tree_lock);
189e05df3b1SJaegeuk Kim }
190e05df3b1SJaegeuk Kim 
191e05df3b1SJaegeuk Kim static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
192e05df3b1SJaegeuk Kim 			block_t new_blkaddr)
193e05df3b1SJaegeuk Kim {
194e05df3b1SJaegeuk Kim 	struct f2fs_nm_info *nm_i = NM_I(sbi);
195e05df3b1SJaegeuk Kim 	struct nat_entry *e;
196e05df3b1SJaegeuk Kim retry:
197e05df3b1SJaegeuk Kim 	write_lock(&nm_i->nat_tree_lock);
198e05df3b1SJaegeuk Kim 	e = __lookup_nat_cache(nm_i, ni->nid);
199e05df3b1SJaegeuk Kim 	if (!e) {
200e05df3b1SJaegeuk Kim 		e = grab_nat_entry(nm_i, ni->nid);
201e05df3b1SJaegeuk Kim 		if (!e) {
202e05df3b1SJaegeuk Kim 			write_unlock(&nm_i->nat_tree_lock);
203e05df3b1SJaegeuk Kim 			goto retry;
204e05df3b1SJaegeuk Kim 		}
205e05df3b1SJaegeuk Kim 		e->ni = *ni;
206e05df3b1SJaegeuk Kim 		e->checkpointed = true;
207e05df3b1SJaegeuk Kim 		BUG_ON(ni->blk_addr == NEW_ADDR);
208e05df3b1SJaegeuk Kim 	} else if (new_blkaddr == NEW_ADDR) {
209e05df3b1SJaegeuk Kim 		/*
210e05df3b1SJaegeuk Kim 		 * when nid is reallocated,
211e05df3b1SJaegeuk Kim 		 * previous nat entry can be remained in nat cache.
212e05df3b1SJaegeuk Kim 		 * So, reinitialize it with new information.
213e05df3b1SJaegeuk Kim 		 */
214e05df3b1SJaegeuk Kim 		e->ni = *ni;
215e05df3b1SJaegeuk Kim 		BUG_ON(ni->blk_addr != NULL_ADDR);
216e05df3b1SJaegeuk Kim 	}
217e05df3b1SJaegeuk Kim 
218e05df3b1SJaegeuk Kim 	if (new_blkaddr == NEW_ADDR)
219e05df3b1SJaegeuk Kim 		e->checkpointed = false;
220e05df3b1SJaegeuk Kim 
221e05df3b1SJaegeuk Kim 	/* sanity check */
222e05df3b1SJaegeuk Kim 	BUG_ON(nat_get_blkaddr(e) != ni->blk_addr);
223e05df3b1SJaegeuk Kim 	BUG_ON(nat_get_blkaddr(e) == NULL_ADDR &&
224e05df3b1SJaegeuk Kim 			new_blkaddr == NULL_ADDR);
225e05df3b1SJaegeuk Kim 	BUG_ON(nat_get_blkaddr(e) == NEW_ADDR &&
226e05df3b1SJaegeuk Kim 			new_blkaddr == NEW_ADDR);
227e05df3b1SJaegeuk Kim 	BUG_ON(nat_get_blkaddr(e) != NEW_ADDR &&
228e05df3b1SJaegeuk Kim 			nat_get_blkaddr(e) != NULL_ADDR &&
229e05df3b1SJaegeuk Kim 			new_blkaddr == NEW_ADDR);
230e05df3b1SJaegeuk Kim 
231e05df3b1SJaegeuk Kim 	/* increament version no as node is removed */
232e05df3b1SJaegeuk Kim 	if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) {
233e05df3b1SJaegeuk Kim 		unsigned char version = nat_get_version(e);
234e05df3b1SJaegeuk Kim 		nat_set_version(e, inc_node_version(version));
235e05df3b1SJaegeuk Kim 	}
236e05df3b1SJaegeuk Kim 
237e05df3b1SJaegeuk Kim 	/* change address */
238e05df3b1SJaegeuk Kim 	nat_set_blkaddr(e, new_blkaddr);
239e05df3b1SJaegeuk Kim 	__set_nat_cache_dirty(nm_i, e);
240e05df3b1SJaegeuk Kim 	write_unlock(&nm_i->nat_tree_lock);
241e05df3b1SJaegeuk Kim }
242e05df3b1SJaegeuk Kim 
243e05df3b1SJaegeuk Kim static int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
244e05df3b1SJaegeuk Kim {
245e05df3b1SJaegeuk Kim 	struct f2fs_nm_info *nm_i = NM_I(sbi);
246e05df3b1SJaegeuk Kim 
2476cac3759SHaicheng Li 	if (nm_i->nat_cnt <= NM_WOUT_THRESHOLD)
248e05df3b1SJaegeuk Kim 		return 0;
249e05df3b1SJaegeuk Kim 
250e05df3b1SJaegeuk Kim 	write_lock(&nm_i->nat_tree_lock);
251e05df3b1SJaegeuk Kim 	while (nr_shrink && !list_empty(&nm_i->nat_entries)) {
252e05df3b1SJaegeuk Kim 		struct nat_entry *ne;
253e05df3b1SJaegeuk Kim 		ne = list_first_entry(&nm_i->nat_entries,
254e05df3b1SJaegeuk Kim 					struct nat_entry, list);
255e05df3b1SJaegeuk Kim 		__del_from_nat_cache(nm_i, ne);
256e05df3b1SJaegeuk Kim 		nr_shrink--;
257e05df3b1SJaegeuk Kim 	}
258e05df3b1SJaegeuk Kim 	write_unlock(&nm_i->nat_tree_lock);
259e05df3b1SJaegeuk Kim 	return nr_shrink;
260e05df3b1SJaegeuk Kim }
261e05df3b1SJaegeuk Kim 
2620a8165d7SJaegeuk Kim /*
263e05df3b1SJaegeuk Kim  * This function returns always success
264e05df3b1SJaegeuk Kim  */
265e05df3b1SJaegeuk Kim void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
266e05df3b1SJaegeuk Kim {
267e05df3b1SJaegeuk Kim 	struct f2fs_nm_info *nm_i = NM_I(sbi);
268e05df3b1SJaegeuk Kim 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
269e05df3b1SJaegeuk Kim 	struct f2fs_summary_block *sum = curseg->sum_blk;
270e05df3b1SJaegeuk Kim 	nid_t start_nid = START_NID(nid);
271e05df3b1SJaegeuk Kim 	struct f2fs_nat_block *nat_blk;
272e05df3b1SJaegeuk Kim 	struct page *page = NULL;
273e05df3b1SJaegeuk Kim 	struct f2fs_nat_entry ne;
274e05df3b1SJaegeuk Kim 	struct nat_entry *e;
275e05df3b1SJaegeuk Kim 	int i;
276e05df3b1SJaegeuk Kim 
277be4124f8SNamjae Jeon 	memset(&ne, 0, sizeof(struct f2fs_nat_entry));
278e05df3b1SJaegeuk Kim 	ni->nid = nid;
279e05df3b1SJaegeuk Kim 
280e05df3b1SJaegeuk Kim 	/* Check nat cache */
281e05df3b1SJaegeuk Kim 	read_lock(&nm_i->nat_tree_lock);
282e05df3b1SJaegeuk Kim 	e = __lookup_nat_cache(nm_i, nid);
283e05df3b1SJaegeuk Kim 	if (e) {
284e05df3b1SJaegeuk Kim 		ni->ino = nat_get_ino(e);
285e05df3b1SJaegeuk Kim 		ni->blk_addr = nat_get_blkaddr(e);
286e05df3b1SJaegeuk Kim 		ni->version = nat_get_version(e);
287e05df3b1SJaegeuk Kim 	}
288e05df3b1SJaegeuk Kim 	read_unlock(&nm_i->nat_tree_lock);
289e05df3b1SJaegeuk Kim 	if (e)
290e05df3b1SJaegeuk Kim 		return;
291e05df3b1SJaegeuk Kim 
292e05df3b1SJaegeuk Kim 	/* Check current segment summary */
293e05df3b1SJaegeuk Kim 	mutex_lock(&curseg->curseg_mutex);
294e05df3b1SJaegeuk Kim 	i = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 0);
295e05df3b1SJaegeuk Kim 	if (i >= 0) {
296e05df3b1SJaegeuk Kim 		ne = nat_in_journal(sum, i);
297e05df3b1SJaegeuk Kim 		node_info_from_raw_nat(ni, &ne);
298e05df3b1SJaegeuk Kim 	}
299e05df3b1SJaegeuk Kim 	mutex_unlock(&curseg->curseg_mutex);
300e05df3b1SJaegeuk Kim 	if (i >= 0)
301e05df3b1SJaegeuk Kim 		goto cache;
302e05df3b1SJaegeuk Kim 
303e05df3b1SJaegeuk Kim 	/* Fill node_info from nat page */
304e05df3b1SJaegeuk Kim 	page = get_current_nat_page(sbi, start_nid);
305e05df3b1SJaegeuk Kim 	nat_blk = (struct f2fs_nat_block *)page_address(page);
306e05df3b1SJaegeuk Kim 	ne = nat_blk->entries[nid - start_nid];
307e05df3b1SJaegeuk Kim 	node_info_from_raw_nat(ni, &ne);
308e05df3b1SJaegeuk Kim 	f2fs_put_page(page, 1);
309e05df3b1SJaegeuk Kim cache:
310e05df3b1SJaegeuk Kim 	/* cache nat entry */
311e05df3b1SJaegeuk Kim 	cache_nat_entry(NM_I(sbi), nid, &ne);
312e05df3b1SJaegeuk Kim }
313e05df3b1SJaegeuk Kim 
3140a8165d7SJaegeuk Kim /*
315e05df3b1SJaegeuk Kim  * The maximum depth is four.
316e05df3b1SJaegeuk Kim  * Offset[0] will have raw inode offset.
317e05df3b1SJaegeuk Kim  */
318e05df3b1SJaegeuk Kim static int get_node_path(long block, int offset[4], unsigned int noffset[4])
319e05df3b1SJaegeuk Kim {
320e05df3b1SJaegeuk Kim 	const long direct_index = ADDRS_PER_INODE;
321e05df3b1SJaegeuk Kim 	const long direct_blks = ADDRS_PER_BLOCK;
322e05df3b1SJaegeuk Kim 	const long dptrs_per_blk = NIDS_PER_BLOCK;
323e05df3b1SJaegeuk Kim 	const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK;
324e05df3b1SJaegeuk Kim 	const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK;
325e05df3b1SJaegeuk Kim 	int n = 0;
326e05df3b1SJaegeuk Kim 	int level = 0;
327e05df3b1SJaegeuk Kim 
328e05df3b1SJaegeuk Kim 	noffset[0] = 0;
329e05df3b1SJaegeuk Kim 
330e05df3b1SJaegeuk Kim 	if (block < direct_index) {
33125c0a6e5SNamjae Jeon 		offset[n] = block;
332e05df3b1SJaegeuk Kim 		goto got;
333e05df3b1SJaegeuk Kim 	}
334e05df3b1SJaegeuk Kim 	block -= direct_index;
335e05df3b1SJaegeuk Kim 	if (block < direct_blks) {
336e05df3b1SJaegeuk Kim 		offset[n++] = NODE_DIR1_BLOCK;
337e05df3b1SJaegeuk Kim 		noffset[n] = 1;
33825c0a6e5SNamjae Jeon 		offset[n] = block;
339e05df3b1SJaegeuk Kim 		level = 1;
340e05df3b1SJaegeuk Kim 		goto got;
341e05df3b1SJaegeuk Kim 	}
342e05df3b1SJaegeuk Kim 	block -= direct_blks;
343e05df3b1SJaegeuk Kim 	if (block < direct_blks) {
344e05df3b1SJaegeuk Kim 		offset[n++] = NODE_DIR2_BLOCK;
345e05df3b1SJaegeuk Kim 		noffset[n] = 2;
34625c0a6e5SNamjae Jeon 		offset[n] = block;
347e05df3b1SJaegeuk Kim 		level = 1;
348e05df3b1SJaegeuk Kim 		goto got;
349e05df3b1SJaegeuk Kim 	}
350e05df3b1SJaegeuk Kim 	block -= direct_blks;
351e05df3b1SJaegeuk Kim 	if (block < indirect_blks) {
352e05df3b1SJaegeuk Kim 		offset[n++] = NODE_IND1_BLOCK;
353e05df3b1SJaegeuk Kim 		noffset[n] = 3;
354e05df3b1SJaegeuk Kim 		offset[n++] = block / direct_blks;
355e05df3b1SJaegeuk Kim 		noffset[n] = 4 + offset[n - 1];
35625c0a6e5SNamjae Jeon 		offset[n] = block % direct_blks;
357e05df3b1SJaegeuk Kim 		level = 2;
358e05df3b1SJaegeuk Kim 		goto got;
359e05df3b1SJaegeuk Kim 	}
360e05df3b1SJaegeuk Kim 	block -= indirect_blks;
361e05df3b1SJaegeuk Kim 	if (block < indirect_blks) {
362e05df3b1SJaegeuk Kim 		offset[n++] = NODE_IND2_BLOCK;
363e05df3b1SJaegeuk Kim 		noffset[n] = 4 + dptrs_per_blk;
364e05df3b1SJaegeuk Kim 		offset[n++] = block / direct_blks;
365e05df3b1SJaegeuk Kim 		noffset[n] = 5 + dptrs_per_blk + offset[n - 1];
36625c0a6e5SNamjae Jeon 		offset[n] = block % direct_blks;
367e05df3b1SJaegeuk Kim 		level = 2;
368e05df3b1SJaegeuk Kim 		goto got;
369e05df3b1SJaegeuk Kim 	}
370e05df3b1SJaegeuk Kim 	block -= indirect_blks;
371e05df3b1SJaegeuk Kim 	if (block < dindirect_blks) {
372e05df3b1SJaegeuk Kim 		offset[n++] = NODE_DIND_BLOCK;
373e05df3b1SJaegeuk Kim 		noffset[n] = 5 + (dptrs_per_blk * 2);
374e05df3b1SJaegeuk Kim 		offset[n++] = block / indirect_blks;
375e05df3b1SJaegeuk Kim 		noffset[n] = 6 + (dptrs_per_blk * 2) +
376e05df3b1SJaegeuk Kim 			      offset[n - 1] * (dptrs_per_blk + 1);
377e05df3b1SJaegeuk Kim 		offset[n++] = (block / direct_blks) % dptrs_per_blk;
378e05df3b1SJaegeuk Kim 		noffset[n] = 7 + (dptrs_per_blk * 2) +
379e05df3b1SJaegeuk Kim 			      offset[n - 2] * (dptrs_per_blk + 1) +
380e05df3b1SJaegeuk Kim 			      offset[n - 1];
38125c0a6e5SNamjae Jeon 		offset[n] = block % direct_blks;
382e05df3b1SJaegeuk Kim 		level = 3;
383e05df3b1SJaegeuk Kim 		goto got;
384e05df3b1SJaegeuk Kim 	} else {
385e05df3b1SJaegeuk Kim 		BUG();
386e05df3b1SJaegeuk Kim 	}
387e05df3b1SJaegeuk Kim got:
388e05df3b1SJaegeuk Kim 	return level;
389e05df3b1SJaegeuk Kim }
390e05df3b1SJaegeuk Kim 
391e05df3b1SJaegeuk Kim /*
392e05df3b1SJaegeuk Kim  * Caller should call f2fs_put_dnode(dn).
39339936837SJaegeuk Kim  * Also, it should grab and release a mutex by calling mutex_lock_op() and
39439936837SJaegeuk Kim  * mutex_unlock_op() only if ro is not set RDONLY_NODE.
39539936837SJaegeuk Kim  * In the case of RDONLY_NODE, we don't need to care about mutex.
396e05df3b1SJaegeuk Kim  */
397266e97a8SJaegeuk Kim int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
398e05df3b1SJaegeuk Kim {
399e05df3b1SJaegeuk Kim 	struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
400e05df3b1SJaegeuk Kim 	struct page *npage[4];
401e05df3b1SJaegeuk Kim 	struct page *parent;
402e05df3b1SJaegeuk Kim 	int offset[4];
403e05df3b1SJaegeuk Kim 	unsigned int noffset[4];
404e05df3b1SJaegeuk Kim 	nid_t nids[4];
405e05df3b1SJaegeuk Kim 	int level, i;
406e05df3b1SJaegeuk Kim 	int err = 0;
407e05df3b1SJaegeuk Kim 
408e05df3b1SJaegeuk Kim 	level = get_node_path(index, offset, noffset);
409e05df3b1SJaegeuk Kim 
410e05df3b1SJaegeuk Kim 	nids[0] = dn->inode->i_ino;
4111646cfacSJaegeuk Kim 	npage[0] = dn->inode_page;
4121646cfacSJaegeuk Kim 
4131646cfacSJaegeuk Kim 	if (!npage[0]) {
414e05df3b1SJaegeuk Kim 		npage[0] = get_node_page(sbi, nids[0]);
415e05df3b1SJaegeuk Kim 		if (IS_ERR(npage[0]))
416e05df3b1SJaegeuk Kim 			return PTR_ERR(npage[0]);
4171646cfacSJaegeuk Kim 	}
418e05df3b1SJaegeuk Kim 	parent = npage[0];
41952c2db3fSChangman Lee 	if (level != 0)
420e05df3b1SJaegeuk Kim 		nids[1] = get_nid(parent, offset[0], true);
421e05df3b1SJaegeuk Kim 	dn->inode_page = npage[0];
422e05df3b1SJaegeuk Kim 	dn->inode_page_locked = true;
423e05df3b1SJaegeuk Kim 
424e05df3b1SJaegeuk Kim 	/* get indirect or direct nodes */
425e05df3b1SJaegeuk Kim 	for (i = 1; i <= level; i++) {
426e05df3b1SJaegeuk Kim 		bool done = false;
427e05df3b1SJaegeuk Kim 
428266e97a8SJaegeuk Kim 		if (!nids[i] && mode == ALLOC_NODE) {
429e05df3b1SJaegeuk Kim 			/* alloc new node */
430e05df3b1SJaegeuk Kim 			if (!alloc_nid(sbi, &(nids[i]))) {
431e05df3b1SJaegeuk Kim 				err = -ENOSPC;
432e05df3b1SJaegeuk Kim 				goto release_pages;
433e05df3b1SJaegeuk Kim 			}
434e05df3b1SJaegeuk Kim 
435e05df3b1SJaegeuk Kim 			dn->nid = nids[i];
4368ae8f162SJaegeuk Kim 			npage[i] = new_node_page(dn, noffset[i], NULL);
437e05df3b1SJaegeuk Kim 			if (IS_ERR(npage[i])) {
438e05df3b1SJaegeuk Kim 				alloc_nid_failed(sbi, nids[i]);
439e05df3b1SJaegeuk Kim 				err = PTR_ERR(npage[i]);
440e05df3b1SJaegeuk Kim 				goto release_pages;
441e05df3b1SJaegeuk Kim 			}
442e05df3b1SJaegeuk Kim 
443e05df3b1SJaegeuk Kim 			set_nid(parent, offset[i - 1], nids[i], i == 1);
444e05df3b1SJaegeuk Kim 			alloc_nid_done(sbi, nids[i]);
445e05df3b1SJaegeuk Kim 			done = true;
446266e97a8SJaegeuk Kim 		} else if (mode == LOOKUP_NODE_RA && i == level && level > 1) {
447e05df3b1SJaegeuk Kim 			npage[i] = get_node_page_ra(parent, offset[i - 1]);
448e05df3b1SJaegeuk Kim 			if (IS_ERR(npage[i])) {
449e05df3b1SJaegeuk Kim 				err = PTR_ERR(npage[i]);
450e05df3b1SJaegeuk Kim 				goto release_pages;
451e05df3b1SJaegeuk Kim 			}
452e05df3b1SJaegeuk Kim 			done = true;
453e05df3b1SJaegeuk Kim 		}
454e05df3b1SJaegeuk Kim 		if (i == 1) {
455e05df3b1SJaegeuk Kim 			dn->inode_page_locked = false;
456e05df3b1SJaegeuk Kim 			unlock_page(parent);
457e05df3b1SJaegeuk Kim 		} else {
458e05df3b1SJaegeuk Kim 			f2fs_put_page(parent, 1);
459e05df3b1SJaegeuk Kim 		}
460e05df3b1SJaegeuk Kim 
461e05df3b1SJaegeuk Kim 		if (!done) {
462e05df3b1SJaegeuk Kim 			npage[i] = get_node_page(sbi, nids[i]);
463e05df3b1SJaegeuk Kim 			if (IS_ERR(npage[i])) {
464e05df3b1SJaegeuk Kim 				err = PTR_ERR(npage[i]);
465e05df3b1SJaegeuk Kim 				f2fs_put_page(npage[0], 0);
466e05df3b1SJaegeuk Kim 				goto release_out;
467e05df3b1SJaegeuk Kim 			}
468e05df3b1SJaegeuk Kim 		}
469e05df3b1SJaegeuk Kim 		if (i < level) {
470e05df3b1SJaegeuk Kim 			parent = npage[i];
471e05df3b1SJaegeuk Kim 			nids[i + 1] = get_nid(parent, offset[i], false);
472e05df3b1SJaegeuk Kim 		}
473e05df3b1SJaegeuk Kim 	}
474e05df3b1SJaegeuk Kim 	dn->nid = nids[level];
475e05df3b1SJaegeuk Kim 	dn->ofs_in_node = offset[level];
476e05df3b1SJaegeuk Kim 	dn->node_page = npage[level];
477e05df3b1SJaegeuk Kim 	dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node);
478e05df3b1SJaegeuk Kim 	return 0;
479e05df3b1SJaegeuk Kim 
480e05df3b1SJaegeuk Kim release_pages:
481e05df3b1SJaegeuk Kim 	f2fs_put_page(parent, 1);
482e05df3b1SJaegeuk Kim 	if (i > 1)
483e05df3b1SJaegeuk Kim 		f2fs_put_page(npage[0], 0);
484e05df3b1SJaegeuk Kim release_out:
485e05df3b1SJaegeuk Kim 	dn->inode_page = NULL;
486e05df3b1SJaegeuk Kim 	dn->node_page = NULL;
487e05df3b1SJaegeuk Kim 	return err;
488e05df3b1SJaegeuk Kim }
489e05df3b1SJaegeuk Kim 
490e05df3b1SJaegeuk Kim static void truncate_node(struct dnode_of_data *dn)
491e05df3b1SJaegeuk Kim {
492e05df3b1SJaegeuk Kim 	struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
493e05df3b1SJaegeuk Kim 	struct node_info ni;
494e05df3b1SJaegeuk Kim 
495e05df3b1SJaegeuk Kim 	get_node_info(sbi, dn->nid, &ni);
49671e9fec5SJaegeuk Kim 	if (dn->inode->i_blocks == 0) {
49771e9fec5SJaegeuk Kim 		BUG_ON(ni.blk_addr != NULL_ADDR);
49871e9fec5SJaegeuk Kim 		goto invalidate;
49971e9fec5SJaegeuk Kim 	}
500e05df3b1SJaegeuk Kim 	BUG_ON(ni.blk_addr == NULL_ADDR);
501e05df3b1SJaegeuk Kim 
502e05df3b1SJaegeuk Kim 	/* Deallocate node address */
50371e9fec5SJaegeuk Kim 	invalidate_blocks(sbi, ni.blk_addr);
504e05df3b1SJaegeuk Kim 	dec_valid_node_count(sbi, dn->inode, 1);
505e05df3b1SJaegeuk Kim 	set_node_addr(sbi, &ni, NULL_ADDR);
506e05df3b1SJaegeuk Kim 
507e05df3b1SJaegeuk Kim 	if (dn->nid == dn->inode->i_ino) {
508e05df3b1SJaegeuk Kim 		remove_orphan_inode(sbi, dn->nid);
509e05df3b1SJaegeuk Kim 		dec_valid_inode_count(sbi);
510e05df3b1SJaegeuk Kim 	} else {
511e05df3b1SJaegeuk Kim 		sync_inode_page(dn);
512e05df3b1SJaegeuk Kim 	}
51371e9fec5SJaegeuk Kim invalidate:
514e05df3b1SJaegeuk Kim 	clear_node_page_dirty(dn->node_page);
515e05df3b1SJaegeuk Kim 	F2FS_SET_SB_DIRT(sbi);
516e05df3b1SJaegeuk Kim 
517e05df3b1SJaegeuk Kim 	f2fs_put_page(dn->node_page, 1);
518e05df3b1SJaegeuk Kim 	dn->node_page = NULL;
51951dd6249SNamjae Jeon 	trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr);
520e05df3b1SJaegeuk Kim }
521e05df3b1SJaegeuk Kim 
522e05df3b1SJaegeuk Kim static int truncate_dnode(struct dnode_of_data *dn)
523e05df3b1SJaegeuk Kim {
524e05df3b1SJaegeuk Kim 	struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
525e05df3b1SJaegeuk Kim 	struct page *page;
526e05df3b1SJaegeuk Kim 
527e05df3b1SJaegeuk Kim 	if (dn->nid == 0)
528e05df3b1SJaegeuk Kim 		return 1;
529e05df3b1SJaegeuk Kim 
530e05df3b1SJaegeuk Kim 	/* get direct node */
531e05df3b1SJaegeuk Kim 	page = get_node_page(sbi, dn->nid);
532e05df3b1SJaegeuk Kim 	if (IS_ERR(page) && PTR_ERR(page) == -ENOENT)
533e05df3b1SJaegeuk Kim 		return 1;
534e05df3b1SJaegeuk Kim 	else if (IS_ERR(page))
535e05df3b1SJaegeuk Kim 		return PTR_ERR(page);
536e05df3b1SJaegeuk Kim 
537e05df3b1SJaegeuk Kim 	/* Make dnode_of_data for parameter */
538e05df3b1SJaegeuk Kim 	dn->node_page = page;
539e05df3b1SJaegeuk Kim 	dn->ofs_in_node = 0;
540e05df3b1SJaegeuk Kim 	truncate_data_blocks(dn);
541e05df3b1SJaegeuk Kim 	truncate_node(dn);
542e05df3b1SJaegeuk Kim 	return 1;
543e05df3b1SJaegeuk Kim }
544e05df3b1SJaegeuk Kim 
545e05df3b1SJaegeuk Kim static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
546e05df3b1SJaegeuk Kim 						int ofs, int depth)
547e05df3b1SJaegeuk Kim {
548e05df3b1SJaegeuk Kim 	struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
549e05df3b1SJaegeuk Kim 	struct dnode_of_data rdn = *dn;
550e05df3b1SJaegeuk Kim 	struct page *page;
551e05df3b1SJaegeuk Kim 	struct f2fs_node *rn;
552e05df3b1SJaegeuk Kim 	nid_t child_nid;
553e05df3b1SJaegeuk Kim 	unsigned int child_nofs;
554e05df3b1SJaegeuk Kim 	int freed = 0;
555e05df3b1SJaegeuk Kim 	int i, ret;
556e05df3b1SJaegeuk Kim 
557e05df3b1SJaegeuk Kim 	if (dn->nid == 0)
558e05df3b1SJaegeuk Kim 		return NIDS_PER_BLOCK + 1;
559e05df3b1SJaegeuk Kim 
56051dd6249SNamjae Jeon 	trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr);
56151dd6249SNamjae Jeon 
562e05df3b1SJaegeuk Kim 	page = get_node_page(sbi, dn->nid);
56351dd6249SNamjae Jeon 	if (IS_ERR(page)) {
56451dd6249SNamjae Jeon 		trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(page));
565e05df3b1SJaegeuk Kim 		return PTR_ERR(page);
56651dd6249SNamjae Jeon 	}
567e05df3b1SJaegeuk Kim 
56845590710SGu Zheng 	rn = F2FS_NODE(page);
569e05df3b1SJaegeuk Kim 	if (depth < 3) {
570e05df3b1SJaegeuk Kim 		for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) {
571e05df3b1SJaegeuk Kim 			child_nid = le32_to_cpu(rn->in.nid[i]);
572e05df3b1SJaegeuk Kim 			if (child_nid == 0)
573e05df3b1SJaegeuk Kim 				continue;
574e05df3b1SJaegeuk Kim 			rdn.nid = child_nid;
575e05df3b1SJaegeuk Kim 			ret = truncate_dnode(&rdn);
576e05df3b1SJaegeuk Kim 			if (ret < 0)
577e05df3b1SJaegeuk Kim 				goto out_err;
578e05df3b1SJaegeuk Kim 			set_nid(page, i, 0, false);
579e05df3b1SJaegeuk Kim 		}
580e05df3b1SJaegeuk Kim 	} else {
581e05df3b1SJaegeuk Kim 		child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1;
582e05df3b1SJaegeuk Kim 		for (i = ofs; i < NIDS_PER_BLOCK; i++) {
583e05df3b1SJaegeuk Kim 			child_nid = le32_to_cpu(rn->in.nid[i]);
584e05df3b1SJaegeuk Kim 			if (child_nid == 0) {
585e05df3b1SJaegeuk Kim 				child_nofs += NIDS_PER_BLOCK + 1;
586e05df3b1SJaegeuk Kim 				continue;
587e05df3b1SJaegeuk Kim 			}
588e05df3b1SJaegeuk Kim 			rdn.nid = child_nid;
589e05df3b1SJaegeuk Kim 			ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1);
590e05df3b1SJaegeuk Kim 			if (ret == (NIDS_PER_BLOCK + 1)) {
591e05df3b1SJaegeuk Kim 				set_nid(page, i, 0, false);
592e05df3b1SJaegeuk Kim 				child_nofs += ret;
593e05df3b1SJaegeuk Kim 			} else if (ret < 0 && ret != -ENOENT) {
594e05df3b1SJaegeuk Kim 				goto out_err;
595e05df3b1SJaegeuk Kim 			}
596e05df3b1SJaegeuk Kim 		}
597e05df3b1SJaegeuk Kim 		freed = child_nofs;
598e05df3b1SJaegeuk Kim 	}
599e05df3b1SJaegeuk Kim 
600e05df3b1SJaegeuk Kim 	if (!ofs) {
601e05df3b1SJaegeuk Kim 		/* remove current indirect node */
602e05df3b1SJaegeuk Kim 		dn->node_page = page;
603e05df3b1SJaegeuk Kim 		truncate_node(dn);
604e05df3b1SJaegeuk Kim 		freed++;
605e05df3b1SJaegeuk Kim 	} else {
606e05df3b1SJaegeuk Kim 		f2fs_put_page(page, 1);
607e05df3b1SJaegeuk Kim 	}
60851dd6249SNamjae Jeon 	trace_f2fs_truncate_nodes_exit(dn->inode, freed);
609e05df3b1SJaegeuk Kim 	return freed;
610e05df3b1SJaegeuk Kim 
611e05df3b1SJaegeuk Kim out_err:
612e05df3b1SJaegeuk Kim 	f2fs_put_page(page, 1);
61351dd6249SNamjae Jeon 	trace_f2fs_truncate_nodes_exit(dn->inode, ret);
614e05df3b1SJaegeuk Kim 	return ret;
615e05df3b1SJaegeuk Kim }
616e05df3b1SJaegeuk Kim 
617e05df3b1SJaegeuk Kim static int truncate_partial_nodes(struct dnode_of_data *dn,
618e05df3b1SJaegeuk Kim 			struct f2fs_inode *ri, int *offset, int depth)
619e05df3b1SJaegeuk Kim {
620e05df3b1SJaegeuk Kim 	struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
621e05df3b1SJaegeuk Kim 	struct page *pages[2];
622e05df3b1SJaegeuk Kim 	nid_t nid[3];
623e05df3b1SJaegeuk Kim 	nid_t child_nid;
624e05df3b1SJaegeuk Kim 	int err = 0;
625e05df3b1SJaegeuk Kim 	int i;
626e05df3b1SJaegeuk Kim 	int idx = depth - 2;
627e05df3b1SJaegeuk Kim 
628e05df3b1SJaegeuk Kim 	nid[0] = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
629e05df3b1SJaegeuk Kim 	if (!nid[0])
630e05df3b1SJaegeuk Kim 		return 0;
631e05df3b1SJaegeuk Kim 
632e05df3b1SJaegeuk Kim 	/* get indirect nodes in the path */
633e05df3b1SJaegeuk Kim 	for (i = 0; i < depth - 1; i++) {
634e05df3b1SJaegeuk Kim 		/* refernece count'll be increased */
635e05df3b1SJaegeuk Kim 		pages[i] = get_node_page(sbi, nid[i]);
636e05df3b1SJaegeuk Kim 		if (IS_ERR(pages[i])) {
637e05df3b1SJaegeuk Kim 			depth = i + 1;
638e05df3b1SJaegeuk Kim 			err = PTR_ERR(pages[i]);
639e05df3b1SJaegeuk Kim 			goto fail;
640e05df3b1SJaegeuk Kim 		}
641e05df3b1SJaegeuk Kim 		nid[i + 1] = get_nid(pages[i], offset[i + 1], false);
642e05df3b1SJaegeuk Kim 	}
643e05df3b1SJaegeuk Kim 
644e05df3b1SJaegeuk Kim 	/* free direct nodes linked to a partial indirect node */
645e05df3b1SJaegeuk Kim 	for (i = offset[depth - 1]; i < NIDS_PER_BLOCK; i++) {
646e05df3b1SJaegeuk Kim 		child_nid = get_nid(pages[idx], i, false);
647e05df3b1SJaegeuk Kim 		if (!child_nid)
648e05df3b1SJaegeuk Kim 			continue;
649e05df3b1SJaegeuk Kim 		dn->nid = child_nid;
650e05df3b1SJaegeuk Kim 		err = truncate_dnode(dn);
651e05df3b1SJaegeuk Kim 		if (err < 0)
652e05df3b1SJaegeuk Kim 			goto fail;
653e05df3b1SJaegeuk Kim 		set_nid(pages[idx], i, 0, false);
654e05df3b1SJaegeuk Kim 	}
655e05df3b1SJaegeuk Kim 
656e05df3b1SJaegeuk Kim 	if (offset[depth - 1] == 0) {
657e05df3b1SJaegeuk Kim 		dn->node_page = pages[idx];
658e05df3b1SJaegeuk Kim 		dn->nid = nid[idx];
659e05df3b1SJaegeuk Kim 		truncate_node(dn);
660e05df3b1SJaegeuk Kim 	} else {
661e05df3b1SJaegeuk Kim 		f2fs_put_page(pages[idx], 1);
662e05df3b1SJaegeuk Kim 	}
663e05df3b1SJaegeuk Kim 	offset[idx]++;
664e05df3b1SJaegeuk Kim 	offset[depth - 1] = 0;
665e05df3b1SJaegeuk Kim fail:
666e05df3b1SJaegeuk Kim 	for (i = depth - 3; i >= 0; i--)
667e05df3b1SJaegeuk Kim 		f2fs_put_page(pages[i], 1);
66851dd6249SNamjae Jeon 
66951dd6249SNamjae Jeon 	trace_f2fs_truncate_partial_nodes(dn->inode, nid, depth, err);
67051dd6249SNamjae Jeon 
671e05df3b1SJaegeuk Kim 	return err;
672e05df3b1SJaegeuk Kim }
673e05df3b1SJaegeuk Kim 
6740a8165d7SJaegeuk Kim /*
675e05df3b1SJaegeuk Kim  * All the block addresses of data and nodes should be nullified.
676e05df3b1SJaegeuk Kim  */
677e05df3b1SJaegeuk Kim int truncate_inode_blocks(struct inode *inode, pgoff_t from)
678e05df3b1SJaegeuk Kim {
679e05df3b1SJaegeuk Kim 	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
680afcb7ca0SJaegeuk Kim 	struct address_space *node_mapping = sbi->node_inode->i_mapping;
681e05df3b1SJaegeuk Kim 	int err = 0, cont = 1;
682e05df3b1SJaegeuk Kim 	int level, offset[4], noffset[4];
6837dd690c8SJaegeuk Kim 	unsigned int nofs = 0;
684e05df3b1SJaegeuk Kim 	struct f2fs_node *rn;
685e05df3b1SJaegeuk Kim 	struct dnode_of_data dn;
686e05df3b1SJaegeuk Kim 	struct page *page;
687e05df3b1SJaegeuk Kim 
68851dd6249SNamjae Jeon 	trace_f2fs_truncate_inode_blocks_enter(inode, from);
68951dd6249SNamjae Jeon 
690e05df3b1SJaegeuk Kim 	level = get_node_path(from, offset, noffset);
691afcb7ca0SJaegeuk Kim restart:
692e05df3b1SJaegeuk Kim 	page = get_node_page(sbi, inode->i_ino);
69351dd6249SNamjae Jeon 	if (IS_ERR(page)) {
69451dd6249SNamjae Jeon 		trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page));
695e05df3b1SJaegeuk Kim 		return PTR_ERR(page);
69651dd6249SNamjae Jeon 	}
697e05df3b1SJaegeuk Kim 
698e05df3b1SJaegeuk Kim 	set_new_dnode(&dn, inode, page, NULL, 0);
699e05df3b1SJaegeuk Kim 	unlock_page(page);
700e05df3b1SJaegeuk Kim 
70145590710SGu Zheng 	rn = F2FS_NODE(page);
702e05df3b1SJaegeuk Kim 	switch (level) {
703e05df3b1SJaegeuk Kim 	case 0:
704e05df3b1SJaegeuk Kim 	case 1:
705e05df3b1SJaegeuk Kim 		nofs = noffset[1];
706e05df3b1SJaegeuk Kim 		break;
707e05df3b1SJaegeuk Kim 	case 2:
708e05df3b1SJaegeuk Kim 		nofs = noffset[1];
709e05df3b1SJaegeuk Kim 		if (!offset[level - 1])
710e05df3b1SJaegeuk Kim 			goto skip_partial;
711e05df3b1SJaegeuk Kim 		err = truncate_partial_nodes(&dn, &rn->i, offset, level);
712e05df3b1SJaegeuk Kim 		if (err < 0 && err != -ENOENT)
713e05df3b1SJaegeuk Kim 			goto fail;
714e05df3b1SJaegeuk Kim 		nofs += 1 + NIDS_PER_BLOCK;
715e05df3b1SJaegeuk Kim 		break;
716e05df3b1SJaegeuk Kim 	case 3:
717e05df3b1SJaegeuk Kim 		nofs = 5 + 2 * NIDS_PER_BLOCK;
718e05df3b1SJaegeuk Kim 		if (!offset[level - 1])
719e05df3b1SJaegeuk Kim 			goto skip_partial;
720e05df3b1SJaegeuk Kim 		err = truncate_partial_nodes(&dn, &rn->i, offset, level);
721e05df3b1SJaegeuk Kim 		if (err < 0 && err != -ENOENT)
722e05df3b1SJaegeuk Kim 			goto fail;
723e05df3b1SJaegeuk Kim 		break;
724e05df3b1SJaegeuk Kim 	default:
725e05df3b1SJaegeuk Kim 		BUG();
726e05df3b1SJaegeuk Kim 	}
727e05df3b1SJaegeuk Kim 
728e05df3b1SJaegeuk Kim skip_partial:
729e05df3b1SJaegeuk Kim 	while (cont) {
730e05df3b1SJaegeuk Kim 		dn.nid = le32_to_cpu(rn->i.i_nid[offset[0] - NODE_DIR1_BLOCK]);
731e05df3b1SJaegeuk Kim 		switch (offset[0]) {
732e05df3b1SJaegeuk Kim 		case NODE_DIR1_BLOCK:
733e05df3b1SJaegeuk Kim 		case NODE_DIR2_BLOCK:
734e05df3b1SJaegeuk Kim 			err = truncate_dnode(&dn);
735e05df3b1SJaegeuk Kim 			break;
736e05df3b1SJaegeuk Kim 
737e05df3b1SJaegeuk Kim 		case NODE_IND1_BLOCK:
738e05df3b1SJaegeuk Kim 		case NODE_IND2_BLOCK:
739e05df3b1SJaegeuk Kim 			err = truncate_nodes(&dn, nofs, offset[1], 2);
740e05df3b1SJaegeuk Kim 			break;
741e05df3b1SJaegeuk Kim 
742e05df3b1SJaegeuk Kim 		case NODE_DIND_BLOCK:
743e05df3b1SJaegeuk Kim 			err = truncate_nodes(&dn, nofs, offset[1], 3);
744e05df3b1SJaegeuk Kim 			cont = 0;
745e05df3b1SJaegeuk Kim 			break;
746e05df3b1SJaegeuk Kim 
747e05df3b1SJaegeuk Kim 		default:
748e05df3b1SJaegeuk Kim 			BUG();
749e05df3b1SJaegeuk Kim 		}
750e05df3b1SJaegeuk Kim 		if (err < 0 && err != -ENOENT)
751e05df3b1SJaegeuk Kim 			goto fail;
752e05df3b1SJaegeuk Kim 		if (offset[1] == 0 &&
753e05df3b1SJaegeuk Kim 				rn->i.i_nid[offset[0] - NODE_DIR1_BLOCK]) {
754e05df3b1SJaegeuk Kim 			lock_page(page);
755afcb7ca0SJaegeuk Kim 			if (page->mapping != node_mapping) {
756afcb7ca0SJaegeuk Kim 				f2fs_put_page(page, 1);
757afcb7ca0SJaegeuk Kim 				goto restart;
758afcb7ca0SJaegeuk Kim 			}
759e05df3b1SJaegeuk Kim 			wait_on_page_writeback(page);
760e05df3b1SJaegeuk Kim 			rn->i.i_nid[offset[0] - NODE_DIR1_BLOCK] = 0;
761e05df3b1SJaegeuk Kim 			set_page_dirty(page);
762e05df3b1SJaegeuk Kim 			unlock_page(page);
763e05df3b1SJaegeuk Kim 		}
764e05df3b1SJaegeuk Kim 		offset[1] = 0;
765e05df3b1SJaegeuk Kim 		offset[0]++;
766e05df3b1SJaegeuk Kim 		nofs += err;
767e05df3b1SJaegeuk Kim 	}
768e05df3b1SJaegeuk Kim fail:
769e05df3b1SJaegeuk Kim 	f2fs_put_page(page, 0);
77051dd6249SNamjae Jeon 	trace_f2fs_truncate_inode_blocks_exit(inode, err);
771e05df3b1SJaegeuk Kim 	return err > 0 ? 0 : err;
772e05df3b1SJaegeuk Kim }
773e05df3b1SJaegeuk Kim 
77439936837SJaegeuk Kim /*
77539936837SJaegeuk Kim  * Caller should grab and release a mutex by calling mutex_lock_op() and
77639936837SJaegeuk Kim  * mutex_unlock_op().
77739936837SJaegeuk Kim  */
778e05df3b1SJaegeuk Kim int remove_inode_page(struct inode *inode)
779e05df3b1SJaegeuk Kim {
780e05df3b1SJaegeuk Kim 	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
781e05df3b1SJaegeuk Kim 	struct page *page;
782e05df3b1SJaegeuk Kim 	nid_t ino = inode->i_ino;
783e05df3b1SJaegeuk Kim 	struct dnode_of_data dn;
784e05df3b1SJaegeuk Kim 
785e05df3b1SJaegeuk Kim 	page = get_node_page(sbi, ino);
78639936837SJaegeuk Kim 	if (IS_ERR(page))
787e05df3b1SJaegeuk Kim 		return PTR_ERR(page);
788e05df3b1SJaegeuk Kim 
789e05df3b1SJaegeuk Kim 	if (F2FS_I(inode)->i_xattr_nid) {
790e05df3b1SJaegeuk Kim 		nid_t nid = F2FS_I(inode)->i_xattr_nid;
791e05df3b1SJaegeuk Kim 		struct page *npage = get_node_page(sbi, nid);
792e05df3b1SJaegeuk Kim 
79339936837SJaegeuk Kim 		if (IS_ERR(npage))
794e05df3b1SJaegeuk Kim 			return PTR_ERR(npage);
795e05df3b1SJaegeuk Kim 
796e05df3b1SJaegeuk Kim 		F2FS_I(inode)->i_xattr_nid = 0;
797e05df3b1SJaegeuk Kim 		set_new_dnode(&dn, inode, page, npage, nid);
798e05df3b1SJaegeuk Kim 		dn.inode_page_locked = 1;
799e05df3b1SJaegeuk Kim 		truncate_node(&dn);
800e05df3b1SJaegeuk Kim 	}
80171e9fec5SJaegeuk Kim 
80271e9fec5SJaegeuk Kim 	/* 0 is possible, after f2fs_new_inode() is failed */
80371e9fec5SJaegeuk Kim 	BUG_ON(inode->i_blocks != 0 && inode->i_blocks != 1);
804e05df3b1SJaegeuk Kim 	set_new_dnode(&dn, inode, page, page, ino);
805e05df3b1SJaegeuk Kim 	truncate_node(&dn);
806e05df3b1SJaegeuk Kim 	return 0;
807e05df3b1SJaegeuk Kim }
808e05df3b1SJaegeuk Kim 
80944a83ff6SJaegeuk Kim struct page *new_inode_page(struct inode *inode, const struct qstr *name)
810e05df3b1SJaegeuk Kim {
811e05df3b1SJaegeuk Kim 	struct dnode_of_data dn;
812e05df3b1SJaegeuk Kim 
813e05df3b1SJaegeuk Kim 	/* allocate inode page for new inode */
814e05df3b1SJaegeuk Kim 	set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
81544a83ff6SJaegeuk Kim 
81644a83ff6SJaegeuk Kim 	/* caller should f2fs_put_page(page, 1); */
8178ae8f162SJaegeuk Kim 	return new_node_page(&dn, 0, NULL);
818e05df3b1SJaegeuk Kim }
819e05df3b1SJaegeuk Kim 
8208ae8f162SJaegeuk Kim struct page *new_node_page(struct dnode_of_data *dn,
8218ae8f162SJaegeuk Kim 				unsigned int ofs, struct page *ipage)
822e05df3b1SJaegeuk Kim {
823e05df3b1SJaegeuk Kim 	struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
824e05df3b1SJaegeuk Kim 	struct address_space *mapping = sbi->node_inode->i_mapping;
825e05df3b1SJaegeuk Kim 	struct node_info old_ni, new_ni;
826e05df3b1SJaegeuk Kim 	struct page *page;
827e05df3b1SJaegeuk Kim 	int err;
828e05df3b1SJaegeuk Kim 
829e05df3b1SJaegeuk Kim 	if (is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))
830e05df3b1SJaegeuk Kim 		return ERR_PTR(-EPERM);
831e05df3b1SJaegeuk Kim 
832e05df3b1SJaegeuk Kim 	page = grab_cache_page(mapping, dn->nid);
833e05df3b1SJaegeuk Kim 	if (!page)
834e05df3b1SJaegeuk Kim 		return ERR_PTR(-ENOMEM);
835e05df3b1SJaegeuk Kim 
836e05df3b1SJaegeuk Kim 	get_node_info(sbi, dn->nid, &old_ni);
837e05df3b1SJaegeuk Kim 
838e05df3b1SJaegeuk Kim 	SetPageUptodate(page);
839e05df3b1SJaegeuk Kim 	fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true);
840e05df3b1SJaegeuk Kim 
841e05df3b1SJaegeuk Kim 	/* Reinitialize old_ni with new node page */
842e05df3b1SJaegeuk Kim 	BUG_ON(old_ni.blk_addr != NULL_ADDR);
843e05df3b1SJaegeuk Kim 	new_ni = old_ni;
844e05df3b1SJaegeuk Kim 	new_ni.ino = dn->inode->i_ino;
845e05df3b1SJaegeuk Kim 
846e05df3b1SJaegeuk Kim 	if (!inc_valid_node_count(sbi, dn->inode, 1)) {
847e05df3b1SJaegeuk Kim 		err = -ENOSPC;
848e05df3b1SJaegeuk Kim 		goto fail;
849e05df3b1SJaegeuk Kim 	}
850e05df3b1SJaegeuk Kim 	set_node_addr(sbi, &new_ni, NEW_ADDR);
851398b1ac5SJaegeuk Kim 	set_cold_node(dn->inode, page);
852e05df3b1SJaegeuk Kim 
853e05df3b1SJaegeuk Kim 	dn->node_page = page;
8548ae8f162SJaegeuk Kim 	if (ipage)
8558ae8f162SJaegeuk Kim 		update_inode(dn->inode, ipage);
8568ae8f162SJaegeuk Kim 	else
857e05df3b1SJaegeuk Kim 		sync_inode_page(dn);
858e05df3b1SJaegeuk Kim 	set_page_dirty(page);
859e05df3b1SJaegeuk Kim 	if (ofs == 0)
860e05df3b1SJaegeuk Kim 		inc_valid_inode_count(sbi);
861e05df3b1SJaegeuk Kim 
862e05df3b1SJaegeuk Kim 	return page;
863e05df3b1SJaegeuk Kim 
864e05df3b1SJaegeuk Kim fail:
86571e9fec5SJaegeuk Kim 	clear_node_page_dirty(page);
866e05df3b1SJaegeuk Kim 	f2fs_put_page(page, 1);
867e05df3b1SJaegeuk Kim 	return ERR_PTR(err);
868e05df3b1SJaegeuk Kim }
869e05df3b1SJaegeuk Kim 
87056ae674cSJaegeuk Kim /*
87156ae674cSJaegeuk Kim  * Caller should do after getting the following values.
87256ae674cSJaegeuk Kim  * 0: f2fs_put_page(page, 0)
87356ae674cSJaegeuk Kim  * LOCKED_PAGE: f2fs_put_page(page, 1)
87456ae674cSJaegeuk Kim  * error: nothing
87556ae674cSJaegeuk Kim  */
876e05df3b1SJaegeuk Kim static int read_node_page(struct page *page, int type)
877e05df3b1SJaegeuk Kim {
878e05df3b1SJaegeuk Kim 	struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
879e05df3b1SJaegeuk Kim 	struct node_info ni;
880e05df3b1SJaegeuk Kim 
881e05df3b1SJaegeuk Kim 	get_node_info(sbi, page->index, &ni);
882e05df3b1SJaegeuk Kim 
883393ff91fSJaegeuk Kim 	if (ni.blk_addr == NULL_ADDR) {
884393ff91fSJaegeuk Kim 		f2fs_put_page(page, 1);
885e05df3b1SJaegeuk Kim 		return -ENOENT;
886393ff91fSJaegeuk Kim 	}
887393ff91fSJaegeuk Kim 
88856ae674cSJaegeuk Kim 	if (PageUptodate(page))
88956ae674cSJaegeuk Kim 		return LOCKED_PAGE;
890393ff91fSJaegeuk Kim 
891e05df3b1SJaegeuk Kim 	return f2fs_readpage(sbi, page, ni.blk_addr, type);
892e05df3b1SJaegeuk Kim }
893e05df3b1SJaegeuk Kim 
8940a8165d7SJaegeuk Kim /*
895e05df3b1SJaegeuk Kim  * Readahead a node page
896e05df3b1SJaegeuk Kim  */
897e05df3b1SJaegeuk Kim void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
898e05df3b1SJaegeuk Kim {
899e05df3b1SJaegeuk Kim 	struct address_space *mapping = sbi->node_inode->i_mapping;
900e05df3b1SJaegeuk Kim 	struct page *apage;
90156ae674cSJaegeuk Kim 	int err;
902e05df3b1SJaegeuk Kim 
903e05df3b1SJaegeuk Kim 	apage = find_get_page(mapping, nid);
904393ff91fSJaegeuk Kim 	if (apage && PageUptodate(apage)) {
905393ff91fSJaegeuk Kim 		f2fs_put_page(apage, 0);
906393ff91fSJaegeuk Kim 		return;
907393ff91fSJaegeuk Kim 	}
908e05df3b1SJaegeuk Kim 	f2fs_put_page(apage, 0);
909e05df3b1SJaegeuk Kim 
910e05df3b1SJaegeuk Kim 	apage = grab_cache_page(mapping, nid);
911e05df3b1SJaegeuk Kim 	if (!apage)
912e05df3b1SJaegeuk Kim 		return;
913e05df3b1SJaegeuk Kim 
91456ae674cSJaegeuk Kim 	err = read_node_page(apage, READA);
91556ae674cSJaegeuk Kim 	if (err == 0)
916369a708cSJaegeuk Kim 		f2fs_put_page(apage, 0);
91756ae674cSJaegeuk Kim 	else if (err == LOCKED_PAGE)
91856ae674cSJaegeuk Kim 		f2fs_put_page(apage, 1);
919a2b52a59SNamjae Jeon 	return;
920e05df3b1SJaegeuk Kim }
921e05df3b1SJaegeuk Kim 
922e05df3b1SJaegeuk Kim struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
923e05df3b1SJaegeuk Kim {
924e05df3b1SJaegeuk Kim 	struct address_space *mapping = sbi->node_inode->i_mapping;
92556ae674cSJaegeuk Kim 	struct page *page;
92656ae674cSJaegeuk Kim 	int err;
927afcb7ca0SJaegeuk Kim repeat:
928e05df3b1SJaegeuk Kim 	page = grab_cache_page(mapping, nid);
929e05df3b1SJaegeuk Kim 	if (!page)
930e05df3b1SJaegeuk Kim 		return ERR_PTR(-ENOMEM);
931e05df3b1SJaegeuk Kim 
932e05df3b1SJaegeuk Kim 	err = read_node_page(page, READ_SYNC);
93356ae674cSJaegeuk Kim 	if (err < 0)
934e05df3b1SJaegeuk Kim 		return ERR_PTR(err);
93556ae674cSJaegeuk Kim 	else if (err == LOCKED_PAGE)
93656ae674cSJaegeuk Kim 		goto got_it;
937e05df3b1SJaegeuk Kim 
938393ff91fSJaegeuk Kim 	lock_page(page);
939393ff91fSJaegeuk Kim 	if (!PageUptodate(page)) {
940393ff91fSJaegeuk Kim 		f2fs_put_page(page, 1);
941393ff91fSJaegeuk Kim 		return ERR_PTR(-EIO);
942393ff91fSJaegeuk Kim 	}
943afcb7ca0SJaegeuk Kim 	if (page->mapping != mapping) {
944afcb7ca0SJaegeuk Kim 		f2fs_put_page(page, 1);
945afcb7ca0SJaegeuk Kim 		goto repeat;
946afcb7ca0SJaegeuk Kim 	}
94756ae674cSJaegeuk Kim got_it:
948e05df3b1SJaegeuk Kim 	BUG_ON(nid != nid_of_node(page));
949e05df3b1SJaegeuk Kim 	mark_page_accessed(page);
950e05df3b1SJaegeuk Kim 	return page;
951e05df3b1SJaegeuk Kim }
952e05df3b1SJaegeuk Kim 
9530a8165d7SJaegeuk Kim /*
954e05df3b1SJaegeuk Kim  * Return a locked page for the desired node page.
955e05df3b1SJaegeuk Kim  * And, readahead MAX_RA_NODE number of node pages.
956e05df3b1SJaegeuk Kim  */
957e05df3b1SJaegeuk Kim struct page *get_node_page_ra(struct page *parent, int start)
958e05df3b1SJaegeuk Kim {
959e05df3b1SJaegeuk Kim 	struct f2fs_sb_info *sbi = F2FS_SB(parent->mapping->host->i_sb);
960e05df3b1SJaegeuk Kim 	struct address_space *mapping = sbi->node_inode->i_mapping;
961c718379bSJaegeuk Kim 	struct blk_plug plug;
962e05df3b1SJaegeuk Kim 	struct page *page;
96356ae674cSJaegeuk Kim 	int err, i, end;
96456ae674cSJaegeuk Kim 	nid_t nid;
965e05df3b1SJaegeuk Kim 
966e05df3b1SJaegeuk Kim 	/* First, try getting the desired direct node. */
967e05df3b1SJaegeuk Kim 	nid = get_nid(parent, start, false);
968e05df3b1SJaegeuk Kim 	if (!nid)
969e05df3b1SJaegeuk Kim 		return ERR_PTR(-ENOENT);
970afcb7ca0SJaegeuk Kim repeat:
971e05df3b1SJaegeuk Kim 	page = grab_cache_page(mapping, nid);
972e05df3b1SJaegeuk Kim 	if (!page)
973e05df3b1SJaegeuk Kim 		return ERR_PTR(-ENOMEM);
974e05df3b1SJaegeuk Kim 
97566d36a29SJaegeuk Kim 	err = read_node_page(page, READ_SYNC);
97656ae674cSJaegeuk Kim 	if (err < 0)
977e05df3b1SJaegeuk Kim 		return ERR_PTR(err);
97856ae674cSJaegeuk Kim 	else if (err == LOCKED_PAGE)
97956ae674cSJaegeuk Kim 		goto page_hit;
980e05df3b1SJaegeuk Kim 
981c718379bSJaegeuk Kim 	blk_start_plug(&plug);
982c718379bSJaegeuk Kim 
983e05df3b1SJaegeuk Kim 	/* Then, try readahead for siblings of the desired node */
984e05df3b1SJaegeuk Kim 	end = start + MAX_RA_NODE;
985e05df3b1SJaegeuk Kim 	end = min(end, NIDS_PER_BLOCK);
986e05df3b1SJaegeuk Kim 	for (i = start + 1; i < end; i++) {
987e05df3b1SJaegeuk Kim 		nid = get_nid(parent, i, false);
988e05df3b1SJaegeuk Kim 		if (!nid)
989e05df3b1SJaegeuk Kim 			continue;
990e05df3b1SJaegeuk Kim 		ra_node_page(sbi, nid);
991e05df3b1SJaegeuk Kim 	}
992e05df3b1SJaegeuk Kim 
993c718379bSJaegeuk Kim 	blk_finish_plug(&plug);
994c718379bSJaegeuk Kim 
995e05df3b1SJaegeuk Kim 	lock_page(page);
996afcb7ca0SJaegeuk Kim 	if (page->mapping != mapping) {
997afcb7ca0SJaegeuk Kim 		f2fs_put_page(page, 1);
998afcb7ca0SJaegeuk Kim 		goto repeat;
999afcb7ca0SJaegeuk Kim 	}
1000e0f56cb4SNamjae Jeon page_hit:
100156ae674cSJaegeuk Kim 	if (!PageUptodate(page)) {
1002e05df3b1SJaegeuk Kim 		f2fs_put_page(page, 1);
1003e05df3b1SJaegeuk Kim 		return ERR_PTR(-EIO);
1004e05df3b1SJaegeuk Kim 	}
1005393ff91fSJaegeuk Kim 	mark_page_accessed(page);
1006e05df3b1SJaegeuk Kim 	return page;
1007e05df3b1SJaegeuk Kim }
1008e05df3b1SJaegeuk Kim 
1009e05df3b1SJaegeuk Kim void sync_inode_page(struct dnode_of_data *dn)
1010e05df3b1SJaegeuk Kim {
1011e05df3b1SJaegeuk Kim 	if (IS_INODE(dn->node_page) || dn->inode_page == dn->node_page) {
1012e05df3b1SJaegeuk Kim 		update_inode(dn->inode, dn->node_page);
1013e05df3b1SJaegeuk Kim 	} else if (dn->inode_page) {
1014e05df3b1SJaegeuk Kim 		if (!dn->inode_page_locked)
1015e05df3b1SJaegeuk Kim 			lock_page(dn->inode_page);
1016e05df3b1SJaegeuk Kim 		update_inode(dn->inode, dn->inode_page);
1017e05df3b1SJaegeuk Kim 		if (!dn->inode_page_locked)
1018e05df3b1SJaegeuk Kim 			unlock_page(dn->inode_page);
1019e05df3b1SJaegeuk Kim 	} else {
102039936837SJaegeuk Kim 		update_inode_page(dn->inode);
1021e05df3b1SJaegeuk Kim 	}
1022e05df3b1SJaegeuk Kim }
1023e05df3b1SJaegeuk Kim 
1024e05df3b1SJaegeuk Kim int sync_node_pages(struct f2fs_sb_info *sbi, nid_t ino,
1025e05df3b1SJaegeuk Kim 					struct writeback_control *wbc)
1026e05df3b1SJaegeuk Kim {
1027e05df3b1SJaegeuk Kim 	struct address_space *mapping = sbi->node_inode->i_mapping;
1028e05df3b1SJaegeuk Kim 	pgoff_t index, end;
1029e05df3b1SJaegeuk Kim 	struct pagevec pvec;
1030e05df3b1SJaegeuk Kim 	int step = ino ? 2 : 0;
1031e05df3b1SJaegeuk Kim 	int nwritten = 0, wrote = 0;
1032e05df3b1SJaegeuk Kim 
1033e05df3b1SJaegeuk Kim 	pagevec_init(&pvec, 0);
1034e05df3b1SJaegeuk Kim 
1035e05df3b1SJaegeuk Kim next_step:
1036e05df3b1SJaegeuk Kim 	index = 0;
1037e05df3b1SJaegeuk Kim 	end = LONG_MAX;
1038e05df3b1SJaegeuk Kim 
1039e05df3b1SJaegeuk Kim 	while (index <= end) {
1040e05df3b1SJaegeuk Kim 		int i, nr_pages;
1041e05df3b1SJaegeuk Kim 		nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
1042e05df3b1SJaegeuk Kim 				PAGECACHE_TAG_DIRTY,
1043e05df3b1SJaegeuk Kim 				min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
1044e05df3b1SJaegeuk Kim 		if (nr_pages == 0)
1045e05df3b1SJaegeuk Kim 			break;
1046e05df3b1SJaegeuk Kim 
1047e05df3b1SJaegeuk Kim 		for (i = 0; i < nr_pages; i++) {
1048e05df3b1SJaegeuk Kim 			struct page *page = pvec.pages[i];
1049e05df3b1SJaegeuk Kim 
1050e05df3b1SJaegeuk Kim 			/*
1051e05df3b1SJaegeuk Kim 			 * flushing sequence with step:
1052e05df3b1SJaegeuk Kim 			 * 0. indirect nodes
1053e05df3b1SJaegeuk Kim 			 * 1. dentry dnodes
1054e05df3b1SJaegeuk Kim 			 * 2. file dnodes
1055e05df3b1SJaegeuk Kim 			 */
1056e05df3b1SJaegeuk Kim 			if (step == 0 && IS_DNODE(page))
1057e05df3b1SJaegeuk Kim 				continue;
1058e05df3b1SJaegeuk Kim 			if (step == 1 && (!IS_DNODE(page) ||
1059e05df3b1SJaegeuk Kim 						is_cold_node(page)))
1060e05df3b1SJaegeuk Kim 				continue;
1061e05df3b1SJaegeuk Kim 			if (step == 2 && (!IS_DNODE(page) ||
1062e05df3b1SJaegeuk Kim 						!is_cold_node(page)))
1063e05df3b1SJaegeuk Kim 				continue;
1064e05df3b1SJaegeuk Kim 
1065e05df3b1SJaegeuk Kim 			/*
1066e05df3b1SJaegeuk Kim 			 * If an fsync mode,
1067e05df3b1SJaegeuk Kim 			 * we should not skip writing node pages.
1068e05df3b1SJaegeuk Kim 			 */
1069e05df3b1SJaegeuk Kim 			if (ino && ino_of_node(page) == ino)
1070e05df3b1SJaegeuk Kim 				lock_page(page);
1071e05df3b1SJaegeuk Kim 			else if (!trylock_page(page))
1072e05df3b1SJaegeuk Kim 				continue;
1073e05df3b1SJaegeuk Kim 
1074e05df3b1SJaegeuk Kim 			if (unlikely(page->mapping != mapping)) {
1075e05df3b1SJaegeuk Kim continue_unlock:
1076e05df3b1SJaegeuk Kim 				unlock_page(page);
1077e05df3b1SJaegeuk Kim 				continue;
1078e05df3b1SJaegeuk Kim 			}
1079e05df3b1SJaegeuk Kim 			if (ino && ino_of_node(page) != ino)
1080e05df3b1SJaegeuk Kim 				goto continue_unlock;
1081e05df3b1SJaegeuk Kim 
1082e05df3b1SJaegeuk Kim 			if (!PageDirty(page)) {
1083e05df3b1SJaegeuk Kim 				/* someone wrote it for us */
1084e05df3b1SJaegeuk Kim 				goto continue_unlock;
1085e05df3b1SJaegeuk Kim 			}
1086e05df3b1SJaegeuk Kim 
1087e05df3b1SJaegeuk Kim 			if (!clear_page_dirty_for_io(page))
1088e05df3b1SJaegeuk Kim 				goto continue_unlock;
1089e05df3b1SJaegeuk Kim 
1090e05df3b1SJaegeuk Kim 			/* called by fsync() */
1091e05df3b1SJaegeuk Kim 			if (ino && IS_DNODE(page)) {
1092e05df3b1SJaegeuk Kim 				int mark = !is_checkpointed_node(sbi, ino);
1093e05df3b1SJaegeuk Kim 				set_fsync_mark(page, 1);
1094e05df3b1SJaegeuk Kim 				if (IS_INODE(page))
1095e05df3b1SJaegeuk Kim 					set_dentry_mark(page, mark);
1096e05df3b1SJaegeuk Kim 				nwritten++;
1097e05df3b1SJaegeuk Kim 			} else {
1098e05df3b1SJaegeuk Kim 				set_fsync_mark(page, 0);
1099e05df3b1SJaegeuk Kim 				set_dentry_mark(page, 0);
1100e05df3b1SJaegeuk Kim 			}
1101e05df3b1SJaegeuk Kim 			mapping->a_ops->writepage(page, wbc);
1102e05df3b1SJaegeuk Kim 			wrote++;
1103e05df3b1SJaegeuk Kim 
1104e05df3b1SJaegeuk Kim 			if (--wbc->nr_to_write == 0)
1105e05df3b1SJaegeuk Kim 				break;
1106e05df3b1SJaegeuk Kim 		}
1107e05df3b1SJaegeuk Kim 		pagevec_release(&pvec);
1108e05df3b1SJaegeuk Kim 		cond_resched();
1109e05df3b1SJaegeuk Kim 
1110e05df3b1SJaegeuk Kim 		if (wbc->nr_to_write == 0) {
1111e05df3b1SJaegeuk Kim 			step = 2;
1112e05df3b1SJaegeuk Kim 			break;
1113e05df3b1SJaegeuk Kim 		}
1114e05df3b1SJaegeuk Kim 	}
1115e05df3b1SJaegeuk Kim 
1116e05df3b1SJaegeuk Kim 	if (step < 2) {
1117e05df3b1SJaegeuk Kim 		step++;
1118e05df3b1SJaegeuk Kim 		goto next_step;
1119e05df3b1SJaegeuk Kim 	}
1120e05df3b1SJaegeuk Kim 
1121e05df3b1SJaegeuk Kim 	if (wrote)
1122e05df3b1SJaegeuk Kim 		f2fs_submit_bio(sbi, NODE, wbc->sync_mode == WB_SYNC_ALL);
1123e05df3b1SJaegeuk Kim 
1124e05df3b1SJaegeuk Kim 	return nwritten;
1125e05df3b1SJaegeuk Kim }
1126e05df3b1SJaegeuk Kim 
1127e05df3b1SJaegeuk Kim static int f2fs_write_node_page(struct page *page,
1128e05df3b1SJaegeuk Kim 				struct writeback_control *wbc)
1129e05df3b1SJaegeuk Kim {
1130e05df3b1SJaegeuk Kim 	struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
1131e05df3b1SJaegeuk Kim 	nid_t nid;
1132e05df3b1SJaegeuk Kim 	block_t new_addr;
1133e05df3b1SJaegeuk Kim 	struct node_info ni;
1134e05df3b1SJaegeuk Kim 
1135e05df3b1SJaegeuk Kim 	wait_on_page_writeback(page);
1136e05df3b1SJaegeuk Kim 
1137e05df3b1SJaegeuk Kim 	/* get old block addr of this node page */
1138e05df3b1SJaegeuk Kim 	nid = nid_of_node(page);
1139e05df3b1SJaegeuk Kim 	BUG_ON(page->index != nid);
1140e05df3b1SJaegeuk Kim 
1141e05df3b1SJaegeuk Kim 	get_node_info(sbi, nid, &ni);
1142e05df3b1SJaegeuk Kim 
1143e05df3b1SJaegeuk Kim 	/* This page is already truncated */
114439936837SJaegeuk Kim 	if (ni.blk_addr == NULL_ADDR) {
114539936837SJaegeuk Kim 		dec_page_count(sbi, F2FS_DIRTY_NODES);
114639936837SJaegeuk Kim 		unlock_page(page);
114739936837SJaegeuk Kim 		return 0;
114839936837SJaegeuk Kim 	}
1149e05df3b1SJaegeuk Kim 
115008d8058bSJaegeuk Kim 	if (wbc->for_reclaim) {
115108d8058bSJaegeuk Kim 		dec_page_count(sbi, F2FS_DIRTY_NODES);
115208d8058bSJaegeuk Kim 		wbc->pages_skipped++;
115308d8058bSJaegeuk Kim 		set_page_dirty(page);
115408d8058bSJaegeuk Kim 		return AOP_WRITEPAGE_ACTIVATE;
115508d8058bSJaegeuk Kim 	}
115608d8058bSJaegeuk Kim 
115739936837SJaegeuk Kim 	mutex_lock(&sbi->node_write);
1158e05df3b1SJaegeuk Kim 	set_page_writeback(page);
1159e05df3b1SJaegeuk Kim 	write_node_page(sbi, page, nid, ni.blk_addr, &new_addr);
1160e05df3b1SJaegeuk Kim 	set_node_addr(sbi, &ni, new_addr);
1161e05df3b1SJaegeuk Kim 	dec_page_count(sbi, F2FS_DIRTY_NODES);
116239936837SJaegeuk Kim 	mutex_unlock(&sbi->node_write);
1163e05df3b1SJaegeuk Kim 	unlock_page(page);
1164e05df3b1SJaegeuk Kim 	return 0;
1165e05df3b1SJaegeuk Kim }
1166e05df3b1SJaegeuk Kim 
1167a7fdffbdSJaegeuk Kim /*
1168a7fdffbdSJaegeuk Kim  * It is very important to gather dirty pages and write at once, so that we can
1169a7fdffbdSJaegeuk Kim  * submit a big bio without interfering other data writes.
1170a7fdffbdSJaegeuk Kim  * Be default, 512 pages (2MB), a segment size, is quite reasonable.
1171a7fdffbdSJaegeuk Kim  */
1172a7fdffbdSJaegeuk Kim #define COLLECT_DIRTY_NODES	512
1173e05df3b1SJaegeuk Kim static int f2fs_write_node_pages(struct address_space *mapping,
1174e05df3b1SJaegeuk Kim 			    struct writeback_control *wbc)
1175e05df3b1SJaegeuk Kim {
1176e05df3b1SJaegeuk Kim 	struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
1177e05df3b1SJaegeuk Kim 	long nr_to_write = wbc->nr_to_write;
1178e05df3b1SJaegeuk Kim 
1179a7fdffbdSJaegeuk Kim 	/* First check balancing cached NAT entries */
1180e05df3b1SJaegeuk Kim 	if (try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK)) {
1181b7473754SJaegeuk Kim 		f2fs_sync_fs(sbi->sb, true);
1182e05df3b1SJaegeuk Kim 		return 0;
1183e05df3b1SJaegeuk Kim 	}
1184e05df3b1SJaegeuk Kim 
1185a7fdffbdSJaegeuk Kim 	/* collect a number of dirty node pages and write together */
1186a7fdffbdSJaegeuk Kim 	if (get_pages(sbi, F2FS_DIRTY_NODES) < COLLECT_DIRTY_NODES)
1187a7fdffbdSJaegeuk Kim 		return 0;
1188a7fdffbdSJaegeuk Kim 
1189e05df3b1SJaegeuk Kim 	/* if mounting is failed, skip writing node pages */
1190ac5d156cSJaegeuk Kim 	wbc->nr_to_write = max_hw_blocks(sbi);
1191e05df3b1SJaegeuk Kim 	sync_node_pages(sbi, 0, wbc);
1192ac5d156cSJaegeuk Kim 	wbc->nr_to_write = nr_to_write - (max_hw_blocks(sbi) - wbc->nr_to_write);
1193e05df3b1SJaegeuk Kim 	return 0;
1194e05df3b1SJaegeuk Kim }
1195e05df3b1SJaegeuk Kim 
1196e05df3b1SJaegeuk Kim static int f2fs_set_node_page_dirty(struct page *page)
1197e05df3b1SJaegeuk Kim {
1198e05df3b1SJaegeuk Kim 	struct address_space *mapping = page->mapping;
1199e05df3b1SJaegeuk Kim 	struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
1200e05df3b1SJaegeuk Kim 
1201e05df3b1SJaegeuk Kim 	SetPageUptodate(page);
1202e05df3b1SJaegeuk Kim 	if (!PageDirty(page)) {
1203e05df3b1SJaegeuk Kim 		__set_page_dirty_nobuffers(page);
1204e05df3b1SJaegeuk Kim 		inc_page_count(sbi, F2FS_DIRTY_NODES);
1205e05df3b1SJaegeuk Kim 		SetPagePrivate(page);
1206e05df3b1SJaegeuk Kim 		return 1;
1207e05df3b1SJaegeuk Kim 	}
1208e05df3b1SJaegeuk Kim 	return 0;
1209e05df3b1SJaegeuk Kim }
1210e05df3b1SJaegeuk Kim 
1211d47992f8SLukas Czerner static void f2fs_invalidate_node_page(struct page *page, unsigned int offset,
1212d47992f8SLukas Czerner 				      unsigned int length)
1213e05df3b1SJaegeuk Kim {
1214e05df3b1SJaegeuk Kim 	struct inode *inode = page->mapping->host;
1215e05df3b1SJaegeuk Kim 	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
1216e05df3b1SJaegeuk Kim 	if (PageDirty(page))
1217e05df3b1SJaegeuk Kim 		dec_page_count(sbi, F2FS_DIRTY_NODES);
1218e05df3b1SJaegeuk Kim 	ClearPagePrivate(page);
1219e05df3b1SJaegeuk Kim }
1220e05df3b1SJaegeuk Kim 
1221e05df3b1SJaegeuk Kim static int f2fs_release_node_page(struct page *page, gfp_t wait)
1222e05df3b1SJaegeuk Kim {
1223e05df3b1SJaegeuk Kim 	ClearPagePrivate(page);
1224c3850aa1SJaegeuk Kim 	return 1;
1225e05df3b1SJaegeuk Kim }
1226e05df3b1SJaegeuk Kim 
12270a8165d7SJaegeuk Kim /*
1228e05df3b1SJaegeuk Kim  * Structure of the f2fs node operations
1229e05df3b1SJaegeuk Kim  */
1230e05df3b1SJaegeuk Kim const struct address_space_operations f2fs_node_aops = {
1231e05df3b1SJaegeuk Kim 	.writepage	= f2fs_write_node_page,
1232e05df3b1SJaegeuk Kim 	.writepages	= f2fs_write_node_pages,
1233e05df3b1SJaegeuk Kim 	.set_page_dirty	= f2fs_set_node_page_dirty,
1234e05df3b1SJaegeuk Kim 	.invalidatepage	= f2fs_invalidate_node_page,
1235e05df3b1SJaegeuk Kim 	.releasepage	= f2fs_release_node_page,
1236e05df3b1SJaegeuk Kim };
1237e05df3b1SJaegeuk Kim 
1238e05df3b1SJaegeuk Kim static struct free_nid *__lookup_free_nid_list(nid_t n, struct list_head *head)
1239e05df3b1SJaegeuk Kim {
1240e05df3b1SJaegeuk Kim 	struct list_head *this;
12413aa770a9SNamjae Jeon 	struct free_nid *i;
1242e05df3b1SJaegeuk Kim 	list_for_each(this, head) {
1243e05df3b1SJaegeuk Kim 		i = list_entry(this, struct free_nid, list);
1244e05df3b1SJaegeuk Kim 		if (i->nid == n)
1245e05df3b1SJaegeuk Kim 			return i;
1246e05df3b1SJaegeuk Kim 	}
12473aa770a9SNamjae Jeon 	return NULL;
12483aa770a9SNamjae Jeon }
1249e05df3b1SJaegeuk Kim 
1250e05df3b1SJaegeuk Kim static void __del_from_free_nid_list(struct free_nid *i)
1251e05df3b1SJaegeuk Kim {
1252e05df3b1SJaegeuk Kim 	list_del(&i->list);
1253e05df3b1SJaegeuk Kim 	kmem_cache_free(free_nid_slab, i);
1254e05df3b1SJaegeuk Kim }
1255e05df3b1SJaegeuk Kim 
125659bbd474SJaegeuk Kim static int add_free_nid(struct f2fs_nm_info *nm_i, nid_t nid, bool build)
1257e05df3b1SJaegeuk Kim {
1258e05df3b1SJaegeuk Kim 	struct free_nid *i;
125959bbd474SJaegeuk Kim 	struct nat_entry *ne;
126059bbd474SJaegeuk Kim 	bool allocated = false;
1261e05df3b1SJaegeuk Kim 
1262e05df3b1SJaegeuk Kim 	if (nm_i->fcnt > 2 * MAX_FREE_NIDS)
126323d38844SHaicheng Li 		return -1;
12649198acebSJaegeuk Kim 
12659198acebSJaegeuk Kim 	/* 0 nid should not be used */
12669198acebSJaegeuk Kim 	if (nid == 0)
12679198acebSJaegeuk Kim 		return 0;
126859bbd474SJaegeuk Kim 
126959bbd474SJaegeuk Kim 	if (!build)
127059bbd474SJaegeuk Kim 		goto retry;
127159bbd474SJaegeuk Kim 
127259bbd474SJaegeuk Kim 	/* do not add allocated nids */
127359bbd474SJaegeuk Kim 	read_lock(&nm_i->nat_tree_lock);
127459bbd474SJaegeuk Kim 	ne = __lookup_nat_cache(nm_i, nid);
127559bbd474SJaegeuk Kim 	if (ne && nat_get_blkaddr(ne) != NULL_ADDR)
127659bbd474SJaegeuk Kim 		allocated = true;
127759bbd474SJaegeuk Kim 	read_unlock(&nm_i->nat_tree_lock);
127859bbd474SJaegeuk Kim 	if (allocated)
127959bbd474SJaegeuk Kim 		return 0;
1280e05df3b1SJaegeuk Kim retry:
1281e05df3b1SJaegeuk Kim 	i = kmem_cache_alloc(free_nid_slab, GFP_NOFS);
1282e05df3b1SJaegeuk Kim 	if (!i) {
1283e05df3b1SJaegeuk Kim 		cond_resched();
1284e05df3b1SJaegeuk Kim 		goto retry;
1285e05df3b1SJaegeuk Kim 	}
1286e05df3b1SJaegeuk Kim 	i->nid = nid;
1287e05df3b1SJaegeuk Kim 	i->state = NID_NEW;
1288e05df3b1SJaegeuk Kim 
1289e05df3b1SJaegeuk Kim 	spin_lock(&nm_i->free_nid_list_lock);
1290e05df3b1SJaegeuk Kim 	if (__lookup_free_nid_list(nid, &nm_i->free_nid_list)) {
1291e05df3b1SJaegeuk Kim 		spin_unlock(&nm_i->free_nid_list_lock);
1292e05df3b1SJaegeuk Kim 		kmem_cache_free(free_nid_slab, i);
1293e05df3b1SJaegeuk Kim 		return 0;
1294e05df3b1SJaegeuk Kim 	}
1295e05df3b1SJaegeuk Kim 	list_add_tail(&i->list, &nm_i->free_nid_list);
1296e05df3b1SJaegeuk Kim 	nm_i->fcnt++;
1297e05df3b1SJaegeuk Kim 	spin_unlock(&nm_i->free_nid_list_lock);
1298e05df3b1SJaegeuk Kim 	return 1;
1299e05df3b1SJaegeuk Kim }
1300e05df3b1SJaegeuk Kim 
1301e05df3b1SJaegeuk Kim static void remove_free_nid(struct f2fs_nm_info *nm_i, nid_t nid)
1302e05df3b1SJaegeuk Kim {
1303e05df3b1SJaegeuk Kim 	struct free_nid *i;
1304e05df3b1SJaegeuk Kim 	spin_lock(&nm_i->free_nid_list_lock);
1305e05df3b1SJaegeuk Kim 	i = __lookup_free_nid_list(nid, &nm_i->free_nid_list);
1306e05df3b1SJaegeuk Kim 	if (i && i->state == NID_NEW) {
1307e05df3b1SJaegeuk Kim 		__del_from_free_nid_list(i);
1308e05df3b1SJaegeuk Kim 		nm_i->fcnt--;
1309e05df3b1SJaegeuk Kim 	}
1310e05df3b1SJaegeuk Kim 	spin_unlock(&nm_i->free_nid_list_lock);
1311e05df3b1SJaegeuk Kim }
1312e05df3b1SJaegeuk Kim 
13138760952dSHaicheng Li static void scan_nat_page(struct f2fs_nm_info *nm_i,
1314e05df3b1SJaegeuk Kim 			struct page *nat_page, nid_t start_nid)
1315e05df3b1SJaegeuk Kim {
1316e05df3b1SJaegeuk Kim 	struct f2fs_nat_block *nat_blk = page_address(nat_page);
1317e05df3b1SJaegeuk Kim 	block_t blk_addr;
1318e05df3b1SJaegeuk Kim 	int i;
1319e05df3b1SJaegeuk Kim 
1320e05df3b1SJaegeuk Kim 	i = start_nid % NAT_ENTRY_PER_BLOCK;
1321e05df3b1SJaegeuk Kim 
1322e05df3b1SJaegeuk Kim 	for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) {
132323d38844SHaicheng Li 
132404431c44SJaegeuk Kim 		if (start_nid >= nm_i->max_nid)
132504431c44SJaegeuk Kim 			break;
132623d38844SHaicheng Li 
1327e05df3b1SJaegeuk Kim 		blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
1328e05df3b1SJaegeuk Kim 		BUG_ON(blk_addr == NEW_ADDR);
132923d38844SHaicheng Li 		if (blk_addr == NULL_ADDR) {
133059bbd474SJaegeuk Kim 			if (add_free_nid(nm_i, start_nid, true) < 0)
133123d38844SHaicheng Li 				break;
133223d38844SHaicheng Li 		}
1333e05df3b1SJaegeuk Kim 	}
1334e05df3b1SJaegeuk Kim }
1335e05df3b1SJaegeuk Kim 
1336e05df3b1SJaegeuk Kim static void build_free_nids(struct f2fs_sb_info *sbi)
1337e05df3b1SJaegeuk Kim {
1338e05df3b1SJaegeuk Kim 	struct f2fs_nm_info *nm_i = NM_I(sbi);
1339e05df3b1SJaegeuk Kim 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1340e05df3b1SJaegeuk Kim 	struct f2fs_summary_block *sum = curseg->sum_blk;
13418760952dSHaicheng Li 	int i = 0;
134255008d84SJaegeuk Kim 	nid_t nid = nm_i->next_scan_nid;
1343e05df3b1SJaegeuk Kim 
134455008d84SJaegeuk Kim 	/* Enough entries */
134555008d84SJaegeuk Kim 	if (nm_i->fcnt > NAT_ENTRY_PER_BLOCK)
134655008d84SJaegeuk Kim 		return;
1347e05df3b1SJaegeuk Kim 
134855008d84SJaegeuk Kim 	/* readahead nat pages to be scanned */
1349e05df3b1SJaegeuk Kim 	ra_nat_pages(sbi, nid);
1350e05df3b1SJaegeuk Kim 
1351e05df3b1SJaegeuk Kim 	while (1) {
1352e05df3b1SJaegeuk Kim 		struct page *page = get_current_nat_page(sbi, nid);
1353e05df3b1SJaegeuk Kim 
13548760952dSHaicheng Li 		scan_nat_page(nm_i, page, nid);
1355e05df3b1SJaegeuk Kim 		f2fs_put_page(page, 1);
1356e05df3b1SJaegeuk Kim 
1357e05df3b1SJaegeuk Kim 		nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK));
135855008d84SJaegeuk Kim 		if (nid >= nm_i->max_nid)
1359e05df3b1SJaegeuk Kim 			nid = 0;
136055008d84SJaegeuk Kim 
136155008d84SJaegeuk Kim 		if (i++ == FREE_NID_PAGES)
1362e05df3b1SJaegeuk Kim 			break;
1363e05df3b1SJaegeuk Kim 	}
1364e05df3b1SJaegeuk Kim 
136555008d84SJaegeuk Kim 	/* go to the next free nat pages to find free nids abundantly */
136655008d84SJaegeuk Kim 	nm_i->next_scan_nid = nid;
1367e05df3b1SJaegeuk Kim 
1368e05df3b1SJaegeuk Kim 	/* find free nids from current sum_pages */
1369e05df3b1SJaegeuk Kim 	mutex_lock(&curseg->curseg_mutex);
1370e05df3b1SJaegeuk Kim 	for (i = 0; i < nats_in_cursum(sum); i++) {
1371e05df3b1SJaegeuk Kim 		block_t addr = le32_to_cpu(nat_in_journal(sum, i).block_addr);
1372e05df3b1SJaegeuk Kim 		nid = le32_to_cpu(nid_in_journal(sum, i));
1373e05df3b1SJaegeuk Kim 		if (addr == NULL_ADDR)
137459bbd474SJaegeuk Kim 			add_free_nid(nm_i, nid, true);
1375e05df3b1SJaegeuk Kim 		else
1376e05df3b1SJaegeuk Kim 			remove_free_nid(nm_i, nid);
1377e05df3b1SJaegeuk Kim 	}
1378e05df3b1SJaegeuk Kim 	mutex_unlock(&curseg->curseg_mutex);
1379e05df3b1SJaegeuk Kim }
1380e05df3b1SJaegeuk Kim 
1381e05df3b1SJaegeuk Kim /*
1382e05df3b1SJaegeuk Kim  * If this function returns success, caller can obtain a new nid
1383e05df3b1SJaegeuk Kim  * from second parameter of this function.
1384e05df3b1SJaegeuk Kim  * The returned nid could be used ino as well as nid when inode is created.
1385e05df3b1SJaegeuk Kim  */
1386e05df3b1SJaegeuk Kim bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
1387e05df3b1SJaegeuk Kim {
1388e05df3b1SJaegeuk Kim 	struct f2fs_nm_info *nm_i = NM_I(sbi);
1389e05df3b1SJaegeuk Kim 	struct free_nid *i = NULL;
1390e05df3b1SJaegeuk Kim 	struct list_head *this;
1391e05df3b1SJaegeuk Kim retry:
139255008d84SJaegeuk Kim 	if (sbi->total_valid_node_count + 1 >= nm_i->max_nid)
1393e05df3b1SJaegeuk Kim 		return false;
1394e05df3b1SJaegeuk Kim 
1395e05df3b1SJaegeuk Kim 	spin_lock(&nm_i->free_nid_list_lock);
1396e05df3b1SJaegeuk Kim 
139755008d84SJaegeuk Kim 	/* We should not use stale free nids created by build_free_nids */
139855008d84SJaegeuk Kim 	if (nm_i->fcnt && !sbi->on_build_free_nids) {
1399e05df3b1SJaegeuk Kim 		BUG_ON(list_empty(&nm_i->free_nid_list));
1400e05df3b1SJaegeuk Kim 		list_for_each(this, &nm_i->free_nid_list) {
1401e05df3b1SJaegeuk Kim 			i = list_entry(this, struct free_nid, list);
1402e05df3b1SJaegeuk Kim 			if (i->state == NID_NEW)
1403e05df3b1SJaegeuk Kim 				break;
1404e05df3b1SJaegeuk Kim 		}
1405e05df3b1SJaegeuk Kim 
1406e05df3b1SJaegeuk Kim 		BUG_ON(i->state != NID_NEW);
1407e05df3b1SJaegeuk Kim 		*nid = i->nid;
1408e05df3b1SJaegeuk Kim 		i->state = NID_ALLOC;
1409e05df3b1SJaegeuk Kim 		nm_i->fcnt--;
1410e05df3b1SJaegeuk Kim 		spin_unlock(&nm_i->free_nid_list_lock);
1411e05df3b1SJaegeuk Kim 		return true;
1412e05df3b1SJaegeuk Kim 	}
141355008d84SJaegeuk Kim 	spin_unlock(&nm_i->free_nid_list_lock);
141455008d84SJaegeuk Kim 
141555008d84SJaegeuk Kim 	/* Let's scan nat pages and its caches to get free nids */
141655008d84SJaegeuk Kim 	mutex_lock(&nm_i->build_lock);
141755008d84SJaegeuk Kim 	sbi->on_build_free_nids = 1;
141855008d84SJaegeuk Kim 	build_free_nids(sbi);
141955008d84SJaegeuk Kim 	sbi->on_build_free_nids = 0;
142055008d84SJaegeuk Kim 	mutex_unlock(&nm_i->build_lock);
142155008d84SJaegeuk Kim 	goto retry;
142255008d84SJaegeuk Kim }
1423e05df3b1SJaegeuk Kim 
14240a8165d7SJaegeuk Kim /*
1425e05df3b1SJaegeuk Kim  * alloc_nid() should be called prior to this function.
1426e05df3b1SJaegeuk Kim  */
1427e05df3b1SJaegeuk Kim void alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid)
1428e05df3b1SJaegeuk Kim {
1429e05df3b1SJaegeuk Kim 	struct f2fs_nm_info *nm_i = NM_I(sbi);
1430e05df3b1SJaegeuk Kim 	struct free_nid *i;
1431e05df3b1SJaegeuk Kim 
1432e05df3b1SJaegeuk Kim 	spin_lock(&nm_i->free_nid_list_lock);
1433e05df3b1SJaegeuk Kim 	i = __lookup_free_nid_list(nid, &nm_i->free_nid_list);
143449952fa1SJaegeuk Kim 	BUG_ON(!i || i->state != NID_ALLOC);
1435e05df3b1SJaegeuk Kim 	__del_from_free_nid_list(i);
1436e05df3b1SJaegeuk Kim 	spin_unlock(&nm_i->free_nid_list_lock);
1437e05df3b1SJaegeuk Kim }
1438e05df3b1SJaegeuk Kim 
14390a8165d7SJaegeuk Kim /*
1440e05df3b1SJaegeuk Kim  * alloc_nid() should be called prior to this function.
1441e05df3b1SJaegeuk Kim  */
1442e05df3b1SJaegeuk Kim void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
1443e05df3b1SJaegeuk Kim {
144449952fa1SJaegeuk Kim 	struct f2fs_nm_info *nm_i = NM_I(sbi);
144549952fa1SJaegeuk Kim 	struct free_nid *i;
144649952fa1SJaegeuk Kim 
144749952fa1SJaegeuk Kim 	spin_lock(&nm_i->free_nid_list_lock);
144849952fa1SJaegeuk Kim 	i = __lookup_free_nid_list(nid, &nm_i->free_nid_list);
144949952fa1SJaegeuk Kim 	BUG_ON(!i || i->state != NID_ALLOC);
145095630cbaSHaicheng Li 	if (nm_i->fcnt > 2 * MAX_FREE_NIDS) {
145195630cbaSHaicheng Li 		__del_from_free_nid_list(i);
145295630cbaSHaicheng Li 	} else {
145349952fa1SJaegeuk Kim 		i->state = NID_NEW;
145449952fa1SJaegeuk Kim 		nm_i->fcnt++;
145595630cbaSHaicheng Li 	}
145649952fa1SJaegeuk Kim 	spin_unlock(&nm_i->free_nid_list_lock);
1457e05df3b1SJaegeuk Kim }
1458e05df3b1SJaegeuk Kim 
1459e05df3b1SJaegeuk Kim void recover_node_page(struct f2fs_sb_info *sbi, struct page *page,
1460e05df3b1SJaegeuk Kim 		struct f2fs_summary *sum, struct node_info *ni,
1461e05df3b1SJaegeuk Kim 		block_t new_blkaddr)
1462e05df3b1SJaegeuk Kim {
1463e05df3b1SJaegeuk Kim 	rewrite_node_page(sbi, page, sum, ni->blk_addr, new_blkaddr);
1464e05df3b1SJaegeuk Kim 	set_node_addr(sbi, ni, new_blkaddr);
1465e05df3b1SJaegeuk Kim 	clear_node_page_dirty(page);
1466e05df3b1SJaegeuk Kim }
1467e05df3b1SJaegeuk Kim 
1468e05df3b1SJaegeuk Kim int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
1469e05df3b1SJaegeuk Kim {
1470e05df3b1SJaegeuk Kim 	struct address_space *mapping = sbi->node_inode->i_mapping;
1471e05df3b1SJaegeuk Kim 	struct f2fs_node *src, *dst;
1472e05df3b1SJaegeuk Kim 	nid_t ino = ino_of_node(page);
1473e05df3b1SJaegeuk Kim 	struct node_info old_ni, new_ni;
1474e05df3b1SJaegeuk Kim 	struct page *ipage;
1475e05df3b1SJaegeuk Kim 
1476e05df3b1SJaegeuk Kim 	ipage = grab_cache_page(mapping, ino);
1477e05df3b1SJaegeuk Kim 	if (!ipage)
1478e05df3b1SJaegeuk Kim 		return -ENOMEM;
1479e05df3b1SJaegeuk Kim 
1480e05df3b1SJaegeuk Kim 	/* Should not use this inode  from free nid list */
1481e05df3b1SJaegeuk Kim 	remove_free_nid(NM_I(sbi), ino);
1482e05df3b1SJaegeuk Kim 
1483e05df3b1SJaegeuk Kim 	get_node_info(sbi, ino, &old_ni);
1484e05df3b1SJaegeuk Kim 	SetPageUptodate(ipage);
1485e05df3b1SJaegeuk Kim 	fill_node_footer(ipage, ino, ino, 0, true);
1486e05df3b1SJaegeuk Kim 
148745590710SGu Zheng 	src = F2FS_NODE(page);
148845590710SGu Zheng 	dst = F2FS_NODE(ipage);
1489e05df3b1SJaegeuk Kim 
1490e05df3b1SJaegeuk Kim 	memcpy(dst, src, (unsigned long)&src->i.i_ext - (unsigned long)&src->i);
1491e05df3b1SJaegeuk Kim 	dst->i.i_size = 0;
149225ca923bSJaegeuk Kim 	dst->i.i_blocks = cpu_to_le64(1);
149325ca923bSJaegeuk Kim 	dst->i.i_links = cpu_to_le32(1);
1494e05df3b1SJaegeuk Kim 	dst->i.i_xattr_nid = 0;
1495e05df3b1SJaegeuk Kim 
1496e05df3b1SJaegeuk Kim 	new_ni = old_ni;
1497e05df3b1SJaegeuk Kim 	new_ni.ino = ino;
1498e05df3b1SJaegeuk Kim 
149965e5cd0aSJaegeuk Kim 	if (!inc_valid_node_count(sbi, NULL, 1))
150065e5cd0aSJaegeuk Kim 		WARN_ON(1);
1501e05df3b1SJaegeuk Kim 	set_node_addr(sbi, &new_ni, NEW_ADDR);
1502e05df3b1SJaegeuk Kim 	inc_valid_inode_count(sbi);
1503e05df3b1SJaegeuk Kim 	f2fs_put_page(ipage, 1);
1504e05df3b1SJaegeuk Kim 	return 0;
1505e05df3b1SJaegeuk Kim }
1506e05df3b1SJaegeuk Kim 
1507e05df3b1SJaegeuk Kim int restore_node_summary(struct f2fs_sb_info *sbi,
1508e05df3b1SJaegeuk Kim 			unsigned int segno, struct f2fs_summary_block *sum)
1509e05df3b1SJaegeuk Kim {
1510e05df3b1SJaegeuk Kim 	struct f2fs_node *rn;
1511e05df3b1SJaegeuk Kim 	struct f2fs_summary *sum_entry;
1512e05df3b1SJaegeuk Kim 	struct page *page;
1513e05df3b1SJaegeuk Kim 	block_t addr;
1514e05df3b1SJaegeuk Kim 	int i, last_offset;
1515e05df3b1SJaegeuk Kim 
1516e05df3b1SJaegeuk Kim 	/* alloc temporal page for read node */
1517e05df3b1SJaegeuk Kim 	page = alloc_page(GFP_NOFS | __GFP_ZERO);
1518e05df3b1SJaegeuk Kim 	if (IS_ERR(page))
1519e05df3b1SJaegeuk Kim 		return PTR_ERR(page);
1520e05df3b1SJaegeuk Kim 	lock_page(page);
1521e05df3b1SJaegeuk Kim 
1522e05df3b1SJaegeuk Kim 	/* scan the node segment */
1523e05df3b1SJaegeuk Kim 	last_offset = sbi->blocks_per_seg;
1524e05df3b1SJaegeuk Kim 	addr = START_BLOCK(sbi, segno);
1525e05df3b1SJaegeuk Kim 	sum_entry = &sum->entries[0];
1526e05df3b1SJaegeuk Kim 
1527e05df3b1SJaegeuk Kim 	for (i = 0; i < last_offset; i++, sum_entry++) {
1528e05df3b1SJaegeuk Kim 		/*
1529e05df3b1SJaegeuk Kim 		 * In order to read next node page,
1530e05df3b1SJaegeuk Kim 		 * we must clear PageUptodate flag.
1531e05df3b1SJaegeuk Kim 		 */
1532e05df3b1SJaegeuk Kim 		ClearPageUptodate(page);
1533393ff91fSJaegeuk Kim 
1534393ff91fSJaegeuk Kim 		if (f2fs_readpage(sbi, page, addr, READ_SYNC))
1535393ff91fSJaegeuk Kim 			goto out;
1536393ff91fSJaegeuk Kim 
1537393ff91fSJaegeuk Kim 		lock_page(page);
153845590710SGu Zheng 		rn = F2FS_NODE(page);
1539393ff91fSJaegeuk Kim 		sum_entry->nid = rn->footer.nid;
1540393ff91fSJaegeuk Kim 		sum_entry->version = 0;
1541393ff91fSJaegeuk Kim 		sum_entry->ofs_in_node = 0;
1542393ff91fSJaegeuk Kim 		addr++;
1543e05df3b1SJaegeuk Kim 	}
1544e05df3b1SJaegeuk Kim 	unlock_page(page);
1545393ff91fSJaegeuk Kim out:
1546e05df3b1SJaegeuk Kim 	__free_pages(page, 0);
1547e05df3b1SJaegeuk Kim 	return 0;
1548e05df3b1SJaegeuk Kim }
1549e05df3b1SJaegeuk Kim 
1550e05df3b1SJaegeuk Kim static bool flush_nats_in_journal(struct f2fs_sb_info *sbi)
1551e05df3b1SJaegeuk Kim {
1552e05df3b1SJaegeuk Kim 	struct f2fs_nm_info *nm_i = NM_I(sbi);
1553e05df3b1SJaegeuk Kim 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1554e05df3b1SJaegeuk Kim 	struct f2fs_summary_block *sum = curseg->sum_blk;
1555e05df3b1SJaegeuk Kim 	int i;
1556e05df3b1SJaegeuk Kim 
1557e05df3b1SJaegeuk Kim 	mutex_lock(&curseg->curseg_mutex);
1558e05df3b1SJaegeuk Kim 
1559e05df3b1SJaegeuk Kim 	if (nats_in_cursum(sum) < NAT_JOURNAL_ENTRIES) {
1560e05df3b1SJaegeuk Kim 		mutex_unlock(&curseg->curseg_mutex);
1561e05df3b1SJaegeuk Kim 		return false;
1562e05df3b1SJaegeuk Kim 	}
1563e05df3b1SJaegeuk Kim 
1564e05df3b1SJaegeuk Kim 	for (i = 0; i < nats_in_cursum(sum); i++) {
1565e05df3b1SJaegeuk Kim 		struct nat_entry *ne;
1566e05df3b1SJaegeuk Kim 		struct f2fs_nat_entry raw_ne;
1567e05df3b1SJaegeuk Kim 		nid_t nid = le32_to_cpu(nid_in_journal(sum, i));
1568e05df3b1SJaegeuk Kim 
1569e05df3b1SJaegeuk Kim 		raw_ne = nat_in_journal(sum, i);
1570e05df3b1SJaegeuk Kim retry:
1571e05df3b1SJaegeuk Kim 		write_lock(&nm_i->nat_tree_lock);
1572e05df3b1SJaegeuk Kim 		ne = __lookup_nat_cache(nm_i, nid);
1573e05df3b1SJaegeuk Kim 		if (ne) {
1574e05df3b1SJaegeuk Kim 			__set_nat_cache_dirty(nm_i, ne);
1575e05df3b1SJaegeuk Kim 			write_unlock(&nm_i->nat_tree_lock);
1576e05df3b1SJaegeuk Kim 			continue;
1577e05df3b1SJaegeuk Kim 		}
1578e05df3b1SJaegeuk Kim 		ne = grab_nat_entry(nm_i, nid);
1579e05df3b1SJaegeuk Kim 		if (!ne) {
1580e05df3b1SJaegeuk Kim 			write_unlock(&nm_i->nat_tree_lock);
1581e05df3b1SJaegeuk Kim 			goto retry;
1582e05df3b1SJaegeuk Kim 		}
1583e05df3b1SJaegeuk Kim 		nat_set_blkaddr(ne, le32_to_cpu(raw_ne.block_addr));
1584e05df3b1SJaegeuk Kim 		nat_set_ino(ne, le32_to_cpu(raw_ne.ino));
1585e05df3b1SJaegeuk Kim 		nat_set_version(ne, raw_ne.version);
1586e05df3b1SJaegeuk Kim 		__set_nat_cache_dirty(nm_i, ne);
1587e05df3b1SJaegeuk Kim 		write_unlock(&nm_i->nat_tree_lock);
1588e05df3b1SJaegeuk Kim 	}
1589e05df3b1SJaegeuk Kim 	update_nats_in_cursum(sum, -i);
1590e05df3b1SJaegeuk Kim 	mutex_unlock(&curseg->curseg_mutex);
1591e05df3b1SJaegeuk Kim 	return true;
1592e05df3b1SJaegeuk Kim }
1593e05df3b1SJaegeuk Kim 
15940a8165d7SJaegeuk Kim /*
1595e05df3b1SJaegeuk Kim  * This function is called during the checkpointing process.
1596e05df3b1SJaegeuk Kim  */
1597e05df3b1SJaegeuk Kim void flush_nat_entries(struct f2fs_sb_info *sbi)
1598e05df3b1SJaegeuk Kim {
1599e05df3b1SJaegeuk Kim 	struct f2fs_nm_info *nm_i = NM_I(sbi);
1600e05df3b1SJaegeuk Kim 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1601e05df3b1SJaegeuk Kim 	struct f2fs_summary_block *sum = curseg->sum_blk;
1602e05df3b1SJaegeuk Kim 	struct list_head *cur, *n;
1603e05df3b1SJaegeuk Kim 	struct page *page = NULL;
1604e05df3b1SJaegeuk Kim 	struct f2fs_nat_block *nat_blk = NULL;
1605e05df3b1SJaegeuk Kim 	nid_t start_nid = 0, end_nid = 0;
1606e05df3b1SJaegeuk Kim 	bool flushed;
1607e05df3b1SJaegeuk Kim 
1608e05df3b1SJaegeuk Kim 	flushed = flush_nats_in_journal(sbi);
1609e05df3b1SJaegeuk Kim 
1610e05df3b1SJaegeuk Kim 	if (!flushed)
1611e05df3b1SJaegeuk Kim 		mutex_lock(&curseg->curseg_mutex);
1612e05df3b1SJaegeuk Kim 
1613e05df3b1SJaegeuk Kim 	/* 1) flush dirty nat caches */
1614e05df3b1SJaegeuk Kim 	list_for_each_safe(cur, n, &nm_i->dirty_nat_entries) {
1615e05df3b1SJaegeuk Kim 		struct nat_entry *ne;
1616e05df3b1SJaegeuk Kim 		nid_t nid;
1617e05df3b1SJaegeuk Kim 		struct f2fs_nat_entry raw_ne;
1618e05df3b1SJaegeuk Kim 		int offset = -1;
16192b50638dSJaegeuk Kim 		block_t new_blkaddr;
1620e05df3b1SJaegeuk Kim 
1621e05df3b1SJaegeuk Kim 		ne = list_entry(cur, struct nat_entry, list);
1622e05df3b1SJaegeuk Kim 		nid = nat_get_nid(ne);
1623e05df3b1SJaegeuk Kim 
1624e05df3b1SJaegeuk Kim 		if (nat_get_blkaddr(ne) == NEW_ADDR)
1625e05df3b1SJaegeuk Kim 			continue;
1626e05df3b1SJaegeuk Kim 		if (flushed)
1627e05df3b1SJaegeuk Kim 			goto to_nat_page;
1628e05df3b1SJaegeuk Kim 
1629e05df3b1SJaegeuk Kim 		/* if there is room for nat enries in curseg->sumpage */
1630e05df3b1SJaegeuk Kim 		offset = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 1);
1631e05df3b1SJaegeuk Kim 		if (offset >= 0) {
1632e05df3b1SJaegeuk Kim 			raw_ne = nat_in_journal(sum, offset);
1633e05df3b1SJaegeuk Kim 			goto flush_now;
1634e05df3b1SJaegeuk Kim 		}
1635e05df3b1SJaegeuk Kim to_nat_page:
1636e05df3b1SJaegeuk Kim 		if (!page || (start_nid > nid || nid > end_nid)) {
1637e05df3b1SJaegeuk Kim 			if (page) {
1638e05df3b1SJaegeuk Kim 				f2fs_put_page(page, 1);
1639e05df3b1SJaegeuk Kim 				page = NULL;
1640e05df3b1SJaegeuk Kim 			}
1641e05df3b1SJaegeuk Kim 			start_nid = START_NID(nid);
1642e05df3b1SJaegeuk Kim 			end_nid = start_nid + NAT_ENTRY_PER_BLOCK - 1;
1643e05df3b1SJaegeuk Kim 
1644e05df3b1SJaegeuk Kim 			/*
1645e05df3b1SJaegeuk Kim 			 * get nat block with dirty flag, increased reference
1646e05df3b1SJaegeuk Kim 			 * count, mapped and lock
1647e05df3b1SJaegeuk Kim 			 */
1648e05df3b1SJaegeuk Kim 			page = get_next_nat_page(sbi, start_nid);
1649e05df3b1SJaegeuk Kim 			nat_blk = page_address(page);
1650e05df3b1SJaegeuk Kim 		}
1651e05df3b1SJaegeuk Kim 
1652e05df3b1SJaegeuk Kim 		BUG_ON(!nat_blk);
1653e05df3b1SJaegeuk Kim 		raw_ne = nat_blk->entries[nid - start_nid];
1654e05df3b1SJaegeuk Kim flush_now:
1655e05df3b1SJaegeuk Kim 		new_blkaddr = nat_get_blkaddr(ne);
1656e05df3b1SJaegeuk Kim 
1657e05df3b1SJaegeuk Kim 		raw_ne.ino = cpu_to_le32(nat_get_ino(ne));
1658e05df3b1SJaegeuk Kim 		raw_ne.block_addr = cpu_to_le32(new_blkaddr);
1659e05df3b1SJaegeuk Kim 		raw_ne.version = nat_get_version(ne);
1660e05df3b1SJaegeuk Kim 
1661e05df3b1SJaegeuk Kim 		if (offset < 0) {
1662e05df3b1SJaegeuk Kim 			nat_blk->entries[nid - start_nid] = raw_ne;
1663e05df3b1SJaegeuk Kim 		} else {
1664e05df3b1SJaegeuk Kim 			nat_in_journal(sum, offset) = raw_ne;
1665e05df3b1SJaegeuk Kim 			nid_in_journal(sum, offset) = cpu_to_le32(nid);
1666e05df3b1SJaegeuk Kim 		}
1667e05df3b1SJaegeuk Kim 
1668fa372417SJaegeuk Kim 		if (nat_get_blkaddr(ne) == NULL_ADDR &&
166959bbd474SJaegeuk Kim 				add_free_nid(NM_I(sbi), nid, false) <= 0) {
1670e05df3b1SJaegeuk Kim 			write_lock(&nm_i->nat_tree_lock);
1671e05df3b1SJaegeuk Kim 			__del_from_nat_cache(nm_i, ne);
1672e05df3b1SJaegeuk Kim 			write_unlock(&nm_i->nat_tree_lock);
1673e05df3b1SJaegeuk Kim 		} else {
1674e05df3b1SJaegeuk Kim 			write_lock(&nm_i->nat_tree_lock);
1675e05df3b1SJaegeuk Kim 			__clear_nat_cache_dirty(nm_i, ne);
1676e05df3b1SJaegeuk Kim 			ne->checkpointed = true;
1677e05df3b1SJaegeuk Kim 			write_unlock(&nm_i->nat_tree_lock);
1678e05df3b1SJaegeuk Kim 		}
1679e05df3b1SJaegeuk Kim 	}
1680e05df3b1SJaegeuk Kim 	if (!flushed)
1681e05df3b1SJaegeuk Kim 		mutex_unlock(&curseg->curseg_mutex);
1682e05df3b1SJaegeuk Kim 	f2fs_put_page(page, 1);
1683e05df3b1SJaegeuk Kim 
1684e05df3b1SJaegeuk Kim 	/* 2) shrink nat caches if necessary */
1685e05df3b1SJaegeuk Kim 	try_to_free_nats(sbi, nm_i->nat_cnt - NM_WOUT_THRESHOLD);
1686e05df3b1SJaegeuk Kim }
1687e05df3b1SJaegeuk Kim 
1688e05df3b1SJaegeuk Kim static int init_node_manager(struct f2fs_sb_info *sbi)
1689e05df3b1SJaegeuk Kim {
1690e05df3b1SJaegeuk Kim 	struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi);
1691e05df3b1SJaegeuk Kim 	struct f2fs_nm_info *nm_i = NM_I(sbi);
1692e05df3b1SJaegeuk Kim 	unsigned char *version_bitmap;
1693e05df3b1SJaegeuk Kim 	unsigned int nat_segs, nat_blocks;
1694e05df3b1SJaegeuk Kim 
1695e05df3b1SJaegeuk Kim 	nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr);
1696e05df3b1SJaegeuk Kim 
1697e05df3b1SJaegeuk Kim 	/* segment_count_nat includes pair segment so divide to 2. */
1698e05df3b1SJaegeuk Kim 	nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1;
1699e05df3b1SJaegeuk Kim 	nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg);
1700e05df3b1SJaegeuk Kim 	nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks;
1701e05df3b1SJaegeuk Kim 	nm_i->fcnt = 0;
1702e05df3b1SJaegeuk Kim 	nm_i->nat_cnt = 0;
1703e05df3b1SJaegeuk Kim 
1704e05df3b1SJaegeuk Kim 	INIT_LIST_HEAD(&nm_i->free_nid_list);
1705e05df3b1SJaegeuk Kim 	INIT_RADIX_TREE(&nm_i->nat_root, GFP_ATOMIC);
1706e05df3b1SJaegeuk Kim 	INIT_LIST_HEAD(&nm_i->nat_entries);
1707e05df3b1SJaegeuk Kim 	INIT_LIST_HEAD(&nm_i->dirty_nat_entries);
1708e05df3b1SJaegeuk Kim 
1709e05df3b1SJaegeuk Kim 	mutex_init(&nm_i->build_lock);
1710e05df3b1SJaegeuk Kim 	spin_lock_init(&nm_i->free_nid_list_lock);
1711e05df3b1SJaegeuk Kim 	rwlock_init(&nm_i->nat_tree_lock);
1712e05df3b1SJaegeuk Kim 
1713e05df3b1SJaegeuk Kim 	nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
171479b5793bSAlexandru Gheorghiu 	nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
1715e05df3b1SJaegeuk Kim 	version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP);
1716e05df3b1SJaegeuk Kim 	if (!version_bitmap)
1717e05df3b1SJaegeuk Kim 		return -EFAULT;
1718e05df3b1SJaegeuk Kim 
171979b5793bSAlexandru Gheorghiu 	nm_i->nat_bitmap = kmemdup(version_bitmap, nm_i->bitmap_size,
172079b5793bSAlexandru Gheorghiu 					GFP_KERNEL);
172179b5793bSAlexandru Gheorghiu 	if (!nm_i->nat_bitmap)
172279b5793bSAlexandru Gheorghiu 		return -ENOMEM;
1723e05df3b1SJaegeuk Kim 	return 0;
1724e05df3b1SJaegeuk Kim }
1725e05df3b1SJaegeuk Kim 
1726e05df3b1SJaegeuk Kim int build_node_manager(struct f2fs_sb_info *sbi)
1727e05df3b1SJaegeuk Kim {
1728e05df3b1SJaegeuk Kim 	int err;
1729e05df3b1SJaegeuk Kim 
1730e05df3b1SJaegeuk Kim 	sbi->nm_info = kzalloc(sizeof(struct f2fs_nm_info), GFP_KERNEL);
1731e05df3b1SJaegeuk Kim 	if (!sbi->nm_info)
1732e05df3b1SJaegeuk Kim 		return -ENOMEM;
1733e05df3b1SJaegeuk Kim 
1734e05df3b1SJaegeuk Kim 	err = init_node_manager(sbi);
1735e05df3b1SJaegeuk Kim 	if (err)
1736e05df3b1SJaegeuk Kim 		return err;
1737e05df3b1SJaegeuk Kim 
1738e05df3b1SJaegeuk Kim 	build_free_nids(sbi);
1739e05df3b1SJaegeuk Kim 	return 0;
1740e05df3b1SJaegeuk Kim }
1741e05df3b1SJaegeuk Kim 
1742e05df3b1SJaegeuk Kim void destroy_node_manager(struct f2fs_sb_info *sbi)
1743e05df3b1SJaegeuk Kim {
1744e05df3b1SJaegeuk Kim 	struct f2fs_nm_info *nm_i = NM_I(sbi);
1745e05df3b1SJaegeuk Kim 	struct free_nid *i, *next_i;
1746e05df3b1SJaegeuk Kim 	struct nat_entry *natvec[NATVEC_SIZE];
1747e05df3b1SJaegeuk Kim 	nid_t nid = 0;
1748e05df3b1SJaegeuk Kim 	unsigned int found;
1749e05df3b1SJaegeuk Kim 
1750e05df3b1SJaegeuk Kim 	if (!nm_i)
1751e05df3b1SJaegeuk Kim 		return;
1752e05df3b1SJaegeuk Kim 
1753e05df3b1SJaegeuk Kim 	/* destroy free nid list */
1754e05df3b1SJaegeuk Kim 	spin_lock(&nm_i->free_nid_list_lock);
1755e05df3b1SJaegeuk Kim 	list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) {
1756e05df3b1SJaegeuk Kim 		BUG_ON(i->state == NID_ALLOC);
1757e05df3b1SJaegeuk Kim 		__del_from_free_nid_list(i);
1758e05df3b1SJaegeuk Kim 		nm_i->fcnt--;
1759e05df3b1SJaegeuk Kim 	}
1760e05df3b1SJaegeuk Kim 	BUG_ON(nm_i->fcnt);
1761e05df3b1SJaegeuk Kim 	spin_unlock(&nm_i->free_nid_list_lock);
1762e05df3b1SJaegeuk Kim 
1763e05df3b1SJaegeuk Kim 	/* destroy nat cache */
1764e05df3b1SJaegeuk Kim 	write_lock(&nm_i->nat_tree_lock);
1765e05df3b1SJaegeuk Kim 	while ((found = __gang_lookup_nat_cache(nm_i,
1766e05df3b1SJaegeuk Kim 					nid, NATVEC_SIZE, natvec))) {
1767e05df3b1SJaegeuk Kim 		unsigned idx;
1768e05df3b1SJaegeuk Kim 		for (idx = 0; idx < found; idx++) {
1769e05df3b1SJaegeuk Kim 			struct nat_entry *e = natvec[idx];
1770e05df3b1SJaegeuk Kim 			nid = nat_get_nid(e) + 1;
1771e05df3b1SJaegeuk Kim 			__del_from_nat_cache(nm_i, e);
1772e05df3b1SJaegeuk Kim 		}
1773e05df3b1SJaegeuk Kim 	}
1774e05df3b1SJaegeuk Kim 	BUG_ON(nm_i->nat_cnt);
1775e05df3b1SJaegeuk Kim 	write_unlock(&nm_i->nat_tree_lock);
1776e05df3b1SJaegeuk Kim 
1777e05df3b1SJaegeuk Kim 	kfree(nm_i->nat_bitmap);
1778e05df3b1SJaegeuk Kim 	sbi->nm_info = NULL;
1779e05df3b1SJaegeuk Kim 	kfree(nm_i);
1780e05df3b1SJaegeuk Kim }
1781e05df3b1SJaegeuk Kim 
17826e6093a8SNamjae Jeon int __init create_node_manager_caches(void)
1783e05df3b1SJaegeuk Kim {
1784e05df3b1SJaegeuk Kim 	nat_entry_slab = f2fs_kmem_cache_create("nat_entry",
1785e05df3b1SJaegeuk Kim 			sizeof(struct nat_entry), NULL);
1786e05df3b1SJaegeuk Kim 	if (!nat_entry_slab)
1787e05df3b1SJaegeuk Kim 		return -ENOMEM;
1788e05df3b1SJaegeuk Kim 
1789e05df3b1SJaegeuk Kim 	free_nid_slab = f2fs_kmem_cache_create("free_nid",
1790e05df3b1SJaegeuk Kim 			sizeof(struct free_nid), NULL);
1791e05df3b1SJaegeuk Kim 	if (!free_nid_slab) {
1792e05df3b1SJaegeuk Kim 		kmem_cache_destroy(nat_entry_slab);
1793e05df3b1SJaegeuk Kim 		return -ENOMEM;
1794e05df3b1SJaegeuk Kim 	}
1795e05df3b1SJaegeuk Kim 	return 0;
1796e05df3b1SJaegeuk Kim }
1797e05df3b1SJaegeuk Kim 
1798e05df3b1SJaegeuk Kim void destroy_node_manager_caches(void)
1799e05df3b1SJaegeuk Kim {
1800e05df3b1SJaegeuk Kim 	kmem_cache_destroy(free_nid_slab);
1801e05df3b1SJaegeuk Kim 	kmem_cache_destroy(nat_entry_slab);
1802e05df3b1SJaegeuk Kim }
1803