xref: /openbmc/linux/fs/f2fs/node.c (revision 33ac9dba)
1 /*
2  * fs/f2fs/node.c
3  *
4  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5  *             http://www.samsung.com/
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 #include <linux/fs.h>
12 #include <linux/f2fs_fs.h>
13 #include <linux/mpage.h>
14 #include <linux/backing-dev.h>
15 #include <linux/blkdev.h>
16 #include <linux/pagevec.h>
17 #include <linux/swap.h>
18 
19 #include "f2fs.h"
20 #include "node.h"
21 #include "segment.h"
22 #include <trace/events/f2fs.h>
23 
24 #define on_build_free_nids(nmi) mutex_is_locked(&nm_i->build_lock)
25 
26 static struct kmem_cache *nat_entry_slab;
27 static struct kmem_cache *free_nid_slab;
28 static struct kmem_cache *nat_entry_set_slab;
29 
30 bool available_free_memory(struct f2fs_sb_info *sbi, int type)
31 {
32 	struct f2fs_nm_info *nm_i = NM_I(sbi);
33 	struct sysinfo val;
34 	unsigned long mem_size = 0;
35 	bool res = false;
36 
37 	si_meminfo(&val);
38 	/* give 25%, 25%, 50% memory for each components respectively */
39 	if (type == FREE_NIDS) {
40 		mem_size = (nm_i->fcnt * sizeof(struct free_nid)) >> 12;
41 		res = mem_size < ((val.totalram * nm_i->ram_thresh / 100) >> 2);
42 	} else if (type == NAT_ENTRIES) {
43 		mem_size = (nm_i->nat_cnt * sizeof(struct nat_entry)) >> 12;
44 		res = mem_size < ((val.totalram * nm_i->ram_thresh / 100) >> 2);
45 	} else if (type == DIRTY_DENTS) {
46 		if (sbi->sb->s_bdi->dirty_exceeded)
47 			return false;
48 		mem_size = get_pages(sbi, F2FS_DIRTY_DENTS);
49 		res = mem_size < ((val.totalram * nm_i->ram_thresh / 100) >> 1);
50 	}
51 	return res;
52 }
53 
54 static void clear_node_page_dirty(struct page *page)
55 {
56 	struct address_space *mapping = page->mapping;
57 	struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
58 	unsigned int long flags;
59 
60 	if (PageDirty(page)) {
61 		spin_lock_irqsave(&mapping->tree_lock, flags);
62 		radix_tree_tag_clear(&mapping->page_tree,
63 				page_index(page),
64 				PAGECACHE_TAG_DIRTY);
65 		spin_unlock_irqrestore(&mapping->tree_lock, flags);
66 
67 		clear_page_dirty_for_io(page);
68 		dec_page_count(sbi, F2FS_DIRTY_NODES);
69 	}
70 	ClearPageUptodate(page);
71 }
72 
73 static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
74 {
75 	pgoff_t index = current_nat_addr(sbi, nid);
76 	return get_meta_page(sbi, index);
77 }
78 
79 static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
80 {
81 	struct page *src_page;
82 	struct page *dst_page;
83 	pgoff_t src_off;
84 	pgoff_t dst_off;
85 	void *src_addr;
86 	void *dst_addr;
87 	struct f2fs_nm_info *nm_i = NM_I(sbi);
88 
89 	src_off = current_nat_addr(sbi, nid);
90 	dst_off = next_nat_addr(sbi, src_off);
91 
92 	/* get current nat block page with lock */
93 	src_page = get_meta_page(sbi, src_off);
94 	dst_page = grab_meta_page(sbi, dst_off);
95 	f2fs_bug_on(PageDirty(src_page));
96 
97 	src_addr = page_address(src_page);
98 	dst_addr = page_address(dst_page);
99 	memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE);
100 	set_page_dirty(dst_page);
101 	f2fs_put_page(src_page, 1);
102 
103 	set_to_next_nat(nm_i, nid);
104 
105 	return dst_page;
106 }
107 
108 static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n)
109 {
110 	return radix_tree_lookup(&nm_i->nat_root, n);
111 }
112 
113 static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i,
114 		nid_t start, unsigned int nr, struct nat_entry **ep)
115 {
116 	return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr);
117 }
118 
119 static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e)
120 {
121 	list_del(&e->list);
122 	radix_tree_delete(&nm_i->nat_root, nat_get_nid(e));
123 	nm_i->nat_cnt--;
124 	kmem_cache_free(nat_entry_slab, e);
125 }
126 
127 int is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid)
128 {
129 	struct f2fs_nm_info *nm_i = NM_I(sbi);
130 	struct nat_entry *e;
131 	int is_cp = 1;
132 
133 	read_lock(&nm_i->nat_tree_lock);
134 	e = __lookup_nat_cache(nm_i, nid);
135 	if (e && !e->checkpointed)
136 		is_cp = 0;
137 	read_unlock(&nm_i->nat_tree_lock);
138 	return is_cp;
139 }
140 
141 bool fsync_mark_done(struct f2fs_sb_info *sbi, nid_t nid)
142 {
143 	struct f2fs_nm_info *nm_i = NM_I(sbi);
144 	struct nat_entry *e;
145 	bool fsync_done = false;
146 
147 	read_lock(&nm_i->nat_tree_lock);
148 	e = __lookup_nat_cache(nm_i, nid);
149 	if (e)
150 		fsync_done = e->fsync_done;
151 	read_unlock(&nm_i->nat_tree_lock);
152 	return fsync_done;
153 }
154 
155 void fsync_mark_clear(struct f2fs_sb_info *sbi, nid_t nid)
156 {
157 	struct f2fs_nm_info *nm_i = NM_I(sbi);
158 	struct nat_entry *e;
159 
160 	write_lock(&nm_i->nat_tree_lock);
161 	e = __lookup_nat_cache(nm_i, nid);
162 	if (e)
163 		e->fsync_done = false;
164 	write_unlock(&nm_i->nat_tree_lock);
165 }
166 
167 static struct nat_entry *grab_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid)
168 {
169 	struct nat_entry *new;
170 
171 	new = kmem_cache_alloc(nat_entry_slab, GFP_ATOMIC);
172 	if (!new)
173 		return NULL;
174 	if (radix_tree_insert(&nm_i->nat_root, nid, new)) {
175 		kmem_cache_free(nat_entry_slab, new);
176 		return NULL;
177 	}
178 	memset(new, 0, sizeof(struct nat_entry));
179 	nat_set_nid(new, nid);
180 	new->checkpointed = true;
181 	list_add_tail(&new->list, &nm_i->nat_entries);
182 	nm_i->nat_cnt++;
183 	return new;
184 }
185 
186 static void cache_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid,
187 						struct f2fs_nat_entry *ne)
188 {
189 	struct nat_entry *e;
190 retry:
191 	write_lock(&nm_i->nat_tree_lock);
192 	e = __lookup_nat_cache(nm_i, nid);
193 	if (!e) {
194 		e = grab_nat_entry(nm_i, nid);
195 		if (!e) {
196 			write_unlock(&nm_i->nat_tree_lock);
197 			goto retry;
198 		}
199 		node_info_from_raw_nat(&e->ni, ne);
200 	}
201 	write_unlock(&nm_i->nat_tree_lock);
202 }
203 
204 static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
205 			block_t new_blkaddr, bool fsync_done)
206 {
207 	struct f2fs_nm_info *nm_i = NM_I(sbi);
208 	struct nat_entry *e;
209 retry:
210 	write_lock(&nm_i->nat_tree_lock);
211 	e = __lookup_nat_cache(nm_i, ni->nid);
212 	if (!e) {
213 		e = grab_nat_entry(nm_i, ni->nid);
214 		if (!e) {
215 			write_unlock(&nm_i->nat_tree_lock);
216 			goto retry;
217 		}
218 		e->ni = *ni;
219 		f2fs_bug_on(ni->blk_addr == NEW_ADDR);
220 	} else if (new_blkaddr == NEW_ADDR) {
221 		/*
222 		 * when nid is reallocated,
223 		 * previous nat entry can be remained in nat cache.
224 		 * So, reinitialize it with new information.
225 		 */
226 		e->ni = *ni;
227 		f2fs_bug_on(ni->blk_addr != NULL_ADDR);
228 	}
229 
230 	/* sanity check */
231 	f2fs_bug_on(nat_get_blkaddr(e) != ni->blk_addr);
232 	f2fs_bug_on(nat_get_blkaddr(e) == NULL_ADDR &&
233 			new_blkaddr == NULL_ADDR);
234 	f2fs_bug_on(nat_get_blkaddr(e) == NEW_ADDR &&
235 			new_blkaddr == NEW_ADDR);
236 	f2fs_bug_on(nat_get_blkaddr(e) != NEW_ADDR &&
237 			nat_get_blkaddr(e) != NULL_ADDR &&
238 			new_blkaddr == NEW_ADDR);
239 
240 	/* increament version no as node is removed */
241 	if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) {
242 		unsigned char version = nat_get_version(e);
243 		nat_set_version(e, inc_node_version(version));
244 	}
245 
246 	/* change address */
247 	nat_set_blkaddr(e, new_blkaddr);
248 	__set_nat_cache_dirty(nm_i, e);
249 
250 	/* update fsync_mark if its inode nat entry is still alive */
251 	e = __lookup_nat_cache(nm_i, ni->ino);
252 	if (e)
253 		e->fsync_done = fsync_done;
254 	write_unlock(&nm_i->nat_tree_lock);
255 }
256 
257 int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
258 {
259 	struct f2fs_nm_info *nm_i = NM_I(sbi);
260 
261 	if (available_free_memory(sbi, NAT_ENTRIES))
262 		return 0;
263 
264 	write_lock(&nm_i->nat_tree_lock);
265 	while (nr_shrink && !list_empty(&nm_i->nat_entries)) {
266 		struct nat_entry *ne;
267 		ne = list_first_entry(&nm_i->nat_entries,
268 					struct nat_entry, list);
269 		__del_from_nat_cache(nm_i, ne);
270 		nr_shrink--;
271 	}
272 	write_unlock(&nm_i->nat_tree_lock);
273 	return nr_shrink;
274 }
275 
276 /*
277  * This function returns always success
278  */
279 void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
280 {
281 	struct f2fs_nm_info *nm_i = NM_I(sbi);
282 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
283 	struct f2fs_summary_block *sum = curseg->sum_blk;
284 	nid_t start_nid = START_NID(nid);
285 	struct f2fs_nat_block *nat_blk;
286 	struct page *page = NULL;
287 	struct f2fs_nat_entry ne;
288 	struct nat_entry *e;
289 	int i;
290 
291 	memset(&ne, 0, sizeof(struct f2fs_nat_entry));
292 	ni->nid = nid;
293 
294 	/* Check nat cache */
295 	read_lock(&nm_i->nat_tree_lock);
296 	e = __lookup_nat_cache(nm_i, nid);
297 	if (e) {
298 		ni->ino = nat_get_ino(e);
299 		ni->blk_addr = nat_get_blkaddr(e);
300 		ni->version = nat_get_version(e);
301 	}
302 	read_unlock(&nm_i->nat_tree_lock);
303 	if (e)
304 		return;
305 
306 	/* Check current segment summary */
307 	mutex_lock(&curseg->curseg_mutex);
308 	i = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 0);
309 	if (i >= 0) {
310 		ne = nat_in_journal(sum, i);
311 		node_info_from_raw_nat(ni, &ne);
312 	}
313 	mutex_unlock(&curseg->curseg_mutex);
314 	if (i >= 0)
315 		goto cache;
316 
317 	/* Fill node_info from nat page */
318 	page = get_current_nat_page(sbi, start_nid);
319 	nat_blk = (struct f2fs_nat_block *)page_address(page);
320 	ne = nat_blk->entries[nid - start_nid];
321 	node_info_from_raw_nat(ni, &ne);
322 	f2fs_put_page(page, 1);
323 cache:
324 	/* cache nat entry */
325 	cache_nat_entry(NM_I(sbi), nid, &ne);
326 }
327 
328 /*
329  * The maximum depth is four.
330  * Offset[0] will have raw inode offset.
331  */
332 static int get_node_path(struct f2fs_inode_info *fi, long block,
333 				int offset[4], unsigned int noffset[4])
334 {
335 	const long direct_index = ADDRS_PER_INODE(fi);
336 	const long direct_blks = ADDRS_PER_BLOCK;
337 	const long dptrs_per_blk = NIDS_PER_BLOCK;
338 	const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK;
339 	const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK;
340 	int n = 0;
341 	int level = 0;
342 
343 	noffset[0] = 0;
344 
345 	if (block < direct_index) {
346 		offset[n] = block;
347 		goto got;
348 	}
349 	block -= direct_index;
350 	if (block < direct_blks) {
351 		offset[n++] = NODE_DIR1_BLOCK;
352 		noffset[n] = 1;
353 		offset[n] = block;
354 		level = 1;
355 		goto got;
356 	}
357 	block -= direct_blks;
358 	if (block < direct_blks) {
359 		offset[n++] = NODE_DIR2_BLOCK;
360 		noffset[n] = 2;
361 		offset[n] = block;
362 		level = 1;
363 		goto got;
364 	}
365 	block -= direct_blks;
366 	if (block < indirect_blks) {
367 		offset[n++] = NODE_IND1_BLOCK;
368 		noffset[n] = 3;
369 		offset[n++] = block / direct_blks;
370 		noffset[n] = 4 + offset[n - 1];
371 		offset[n] = block % direct_blks;
372 		level = 2;
373 		goto got;
374 	}
375 	block -= indirect_blks;
376 	if (block < indirect_blks) {
377 		offset[n++] = NODE_IND2_BLOCK;
378 		noffset[n] = 4 + dptrs_per_blk;
379 		offset[n++] = block / direct_blks;
380 		noffset[n] = 5 + dptrs_per_blk + offset[n - 1];
381 		offset[n] = block % direct_blks;
382 		level = 2;
383 		goto got;
384 	}
385 	block -= indirect_blks;
386 	if (block < dindirect_blks) {
387 		offset[n++] = NODE_DIND_BLOCK;
388 		noffset[n] = 5 + (dptrs_per_blk * 2);
389 		offset[n++] = block / indirect_blks;
390 		noffset[n] = 6 + (dptrs_per_blk * 2) +
391 			      offset[n - 1] * (dptrs_per_blk + 1);
392 		offset[n++] = (block / direct_blks) % dptrs_per_blk;
393 		noffset[n] = 7 + (dptrs_per_blk * 2) +
394 			      offset[n - 2] * (dptrs_per_blk + 1) +
395 			      offset[n - 1];
396 		offset[n] = block % direct_blks;
397 		level = 3;
398 		goto got;
399 	} else {
400 		BUG();
401 	}
402 got:
403 	return level;
404 }
405 
406 /*
407  * Caller should call f2fs_put_dnode(dn).
408  * Also, it should grab and release a rwsem by calling f2fs_lock_op() and
409  * f2fs_unlock_op() only if ro is not set RDONLY_NODE.
410  * In the case of RDONLY_NODE, we don't need to care about mutex.
411  */
412 int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
413 {
414 	struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
415 	struct page *npage[4];
416 	struct page *parent;
417 	int offset[4];
418 	unsigned int noffset[4];
419 	nid_t nids[4];
420 	int level, i;
421 	int err = 0;
422 
423 	level = get_node_path(F2FS_I(dn->inode), index, offset, noffset);
424 
425 	nids[0] = dn->inode->i_ino;
426 	npage[0] = dn->inode_page;
427 
428 	if (!npage[0]) {
429 		npage[0] = get_node_page(sbi, nids[0]);
430 		if (IS_ERR(npage[0]))
431 			return PTR_ERR(npage[0]);
432 	}
433 	parent = npage[0];
434 	if (level != 0)
435 		nids[1] = get_nid(parent, offset[0], true);
436 	dn->inode_page = npage[0];
437 	dn->inode_page_locked = true;
438 
439 	/* get indirect or direct nodes */
440 	for (i = 1; i <= level; i++) {
441 		bool done = false;
442 
443 		if (!nids[i] && mode == ALLOC_NODE) {
444 			/* alloc new node */
445 			if (!alloc_nid(sbi, &(nids[i]))) {
446 				err = -ENOSPC;
447 				goto release_pages;
448 			}
449 
450 			dn->nid = nids[i];
451 			npage[i] = new_node_page(dn, noffset[i], NULL);
452 			if (IS_ERR(npage[i])) {
453 				alloc_nid_failed(sbi, nids[i]);
454 				err = PTR_ERR(npage[i]);
455 				goto release_pages;
456 			}
457 
458 			set_nid(parent, offset[i - 1], nids[i], i == 1);
459 			alloc_nid_done(sbi, nids[i]);
460 			done = true;
461 		} else if (mode == LOOKUP_NODE_RA && i == level && level > 1) {
462 			npage[i] = get_node_page_ra(parent, offset[i - 1]);
463 			if (IS_ERR(npage[i])) {
464 				err = PTR_ERR(npage[i]);
465 				goto release_pages;
466 			}
467 			done = true;
468 		}
469 		if (i == 1) {
470 			dn->inode_page_locked = false;
471 			unlock_page(parent);
472 		} else {
473 			f2fs_put_page(parent, 1);
474 		}
475 
476 		if (!done) {
477 			npage[i] = get_node_page(sbi, nids[i]);
478 			if (IS_ERR(npage[i])) {
479 				err = PTR_ERR(npage[i]);
480 				f2fs_put_page(npage[0], 0);
481 				goto release_out;
482 			}
483 		}
484 		if (i < level) {
485 			parent = npage[i];
486 			nids[i + 1] = get_nid(parent, offset[i], false);
487 		}
488 	}
489 	dn->nid = nids[level];
490 	dn->ofs_in_node = offset[level];
491 	dn->node_page = npage[level];
492 	dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node);
493 	return 0;
494 
495 release_pages:
496 	f2fs_put_page(parent, 1);
497 	if (i > 1)
498 		f2fs_put_page(npage[0], 0);
499 release_out:
500 	dn->inode_page = NULL;
501 	dn->node_page = NULL;
502 	return err;
503 }
504 
505 static void truncate_node(struct dnode_of_data *dn)
506 {
507 	struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
508 	struct node_info ni;
509 
510 	get_node_info(sbi, dn->nid, &ni);
511 	if (dn->inode->i_blocks == 0) {
512 		f2fs_bug_on(ni.blk_addr != NULL_ADDR);
513 		goto invalidate;
514 	}
515 	f2fs_bug_on(ni.blk_addr == NULL_ADDR);
516 
517 	/* Deallocate node address */
518 	invalidate_blocks(sbi, ni.blk_addr);
519 	dec_valid_node_count(sbi, dn->inode);
520 	set_node_addr(sbi, &ni, NULL_ADDR, false);
521 
522 	if (dn->nid == dn->inode->i_ino) {
523 		remove_orphan_inode(sbi, dn->nid);
524 		dec_valid_inode_count(sbi);
525 	} else {
526 		sync_inode_page(dn);
527 	}
528 invalidate:
529 	clear_node_page_dirty(dn->node_page);
530 	F2FS_SET_SB_DIRT(sbi);
531 
532 	f2fs_put_page(dn->node_page, 1);
533 
534 	invalidate_mapping_pages(NODE_MAPPING(sbi),
535 			dn->node_page->index, dn->node_page->index);
536 
537 	dn->node_page = NULL;
538 	trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr);
539 }
540 
541 static int truncate_dnode(struct dnode_of_data *dn)
542 {
543 	struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
544 	struct page *page;
545 
546 	if (dn->nid == 0)
547 		return 1;
548 
549 	/* get direct node */
550 	page = get_node_page(sbi, dn->nid);
551 	if (IS_ERR(page) && PTR_ERR(page) == -ENOENT)
552 		return 1;
553 	else if (IS_ERR(page))
554 		return PTR_ERR(page);
555 
556 	/* Make dnode_of_data for parameter */
557 	dn->node_page = page;
558 	dn->ofs_in_node = 0;
559 	truncate_data_blocks(dn);
560 	truncate_node(dn);
561 	return 1;
562 }
563 
564 static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
565 						int ofs, int depth)
566 {
567 	struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
568 	struct dnode_of_data rdn = *dn;
569 	struct page *page;
570 	struct f2fs_node *rn;
571 	nid_t child_nid;
572 	unsigned int child_nofs;
573 	int freed = 0;
574 	int i, ret;
575 
576 	if (dn->nid == 0)
577 		return NIDS_PER_BLOCK + 1;
578 
579 	trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr);
580 
581 	page = get_node_page(sbi, dn->nid);
582 	if (IS_ERR(page)) {
583 		trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(page));
584 		return PTR_ERR(page);
585 	}
586 
587 	rn = F2FS_NODE(page);
588 	if (depth < 3) {
589 		for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) {
590 			child_nid = le32_to_cpu(rn->in.nid[i]);
591 			if (child_nid == 0)
592 				continue;
593 			rdn.nid = child_nid;
594 			ret = truncate_dnode(&rdn);
595 			if (ret < 0)
596 				goto out_err;
597 			set_nid(page, i, 0, false);
598 		}
599 	} else {
600 		child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1;
601 		for (i = ofs; i < NIDS_PER_BLOCK; i++) {
602 			child_nid = le32_to_cpu(rn->in.nid[i]);
603 			if (child_nid == 0) {
604 				child_nofs += NIDS_PER_BLOCK + 1;
605 				continue;
606 			}
607 			rdn.nid = child_nid;
608 			ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1);
609 			if (ret == (NIDS_PER_BLOCK + 1)) {
610 				set_nid(page, i, 0, false);
611 				child_nofs += ret;
612 			} else if (ret < 0 && ret != -ENOENT) {
613 				goto out_err;
614 			}
615 		}
616 		freed = child_nofs;
617 	}
618 
619 	if (!ofs) {
620 		/* remove current indirect node */
621 		dn->node_page = page;
622 		truncate_node(dn);
623 		freed++;
624 	} else {
625 		f2fs_put_page(page, 1);
626 	}
627 	trace_f2fs_truncate_nodes_exit(dn->inode, freed);
628 	return freed;
629 
630 out_err:
631 	f2fs_put_page(page, 1);
632 	trace_f2fs_truncate_nodes_exit(dn->inode, ret);
633 	return ret;
634 }
635 
636 static int truncate_partial_nodes(struct dnode_of_data *dn,
637 			struct f2fs_inode *ri, int *offset, int depth)
638 {
639 	struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
640 	struct page *pages[2];
641 	nid_t nid[3];
642 	nid_t child_nid;
643 	int err = 0;
644 	int i;
645 	int idx = depth - 2;
646 
647 	nid[0] = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
648 	if (!nid[0])
649 		return 0;
650 
651 	/* get indirect nodes in the path */
652 	for (i = 0; i < idx + 1; i++) {
653 		/* refernece count'll be increased */
654 		pages[i] = get_node_page(sbi, nid[i]);
655 		if (IS_ERR(pages[i])) {
656 			err = PTR_ERR(pages[i]);
657 			idx = i - 1;
658 			goto fail;
659 		}
660 		nid[i + 1] = get_nid(pages[i], offset[i + 1], false);
661 	}
662 
663 	/* free direct nodes linked to a partial indirect node */
664 	for (i = offset[idx + 1]; i < NIDS_PER_BLOCK; i++) {
665 		child_nid = get_nid(pages[idx], i, false);
666 		if (!child_nid)
667 			continue;
668 		dn->nid = child_nid;
669 		err = truncate_dnode(dn);
670 		if (err < 0)
671 			goto fail;
672 		set_nid(pages[idx], i, 0, false);
673 	}
674 
675 	if (offset[idx + 1] == 0) {
676 		dn->node_page = pages[idx];
677 		dn->nid = nid[idx];
678 		truncate_node(dn);
679 	} else {
680 		f2fs_put_page(pages[idx], 1);
681 	}
682 	offset[idx]++;
683 	offset[idx + 1] = 0;
684 	idx--;
685 fail:
686 	for (i = idx; i >= 0; i--)
687 		f2fs_put_page(pages[i], 1);
688 
689 	trace_f2fs_truncate_partial_nodes(dn->inode, nid, depth, err);
690 
691 	return err;
692 }
693 
694 /*
695  * All the block addresses of data and nodes should be nullified.
696  */
697 int truncate_inode_blocks(struct inode *inode, pgoff_t from)
698 {
699 	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
700 	int err = 0, cont = 1;
701 	int level, offset[4], noffset[4];
702 	unsigned int nofs = 0;
703 	struct f2fs_inode *ri;
704 	struct dnode_of_data dn;
705 	struct page *page;
706 
707 	trace_f2fs_truncate_inode_blocks_enter(inode, from);
708 
709 	level = get_node_path(F2FS_I(inode), from, offset, noffset);
710 restart:
711 	page = get_node_page(sbi, inode->i_ino);
712 	if (IS_ERR(page)) {
713 		trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page));
714 		return PTR_ERR(page);
715 	}
716 
717 	set_new_dnode(&dn, inode, page, NULL, 0);
718 	unlock_page(page);
719 
720 	ri = F2FS_INODE(page);
721 	switch (level) {
722 	case 0:
723 	case 1:
724 		nofs = noffset[1];
725 		break;
726 	case 2:
727 		nofs = noffset[1];
728 		if (!offset[level - 1])
729 			goto skip_partial;
730 		err = truncate_partial_nodes(&dn, ri, offset, level);
731 		if (err < 0 && err != -ENOENT)
732 			goto fail;
733 		nofs += 1 + NIDS_PER_BLOCK;
734 		break;
735 	case 3:
736 		nofs = 5 + 2 * NIDS_PER_BLOCK;
737 		if (!offset[level - 1])
738 			goto skip_partial;
739 		err = truncate_partial_nodes(&dn, ri, offset, level);
740 		if (err < 0 && err != -ENOENT)
741 			goto fail;
742 		break;
743 	default:
744 		BUG();
745 	}
746 
747 skip_partial:
748 	while (cont) {
749 		dn.nid = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
750 		switch (offset[0]) {
751 		case NODE_DIR1_BLOCK:
752 		case NODE_DIR2_BLOCK:
753 			err = truncate_dnode(&dn);
754 			break;
755 
756 		case NODE_IND1_BLOCK:
757 		case NODE_IND2_BLOCK:
758 			err = truncate_nodes(&dn, nofs, offset[1], 2);
759 			break;
760 
761 		case NODE_DIND_BLOCK:
762 			err = truncate_nodes(&dn, nofs, offset[1], 3);
763 			cont = 0;
764 			break;
765 
766 		default:
767 			BUG();
768 		}
769 		if (err < 0 && err != -ENOENT)
770 			goto fail;
771 		if (offset[1] == 0 &&
772 				ri->i_nid[offset[0] - NODE_DIR1_BLOCK]) {
773 			lock_page(page);
774 			if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
775 				f2fs_put_page(page, 1);
776 				goto restart;
777 			}
778 			f2fs_wait_on_page_writeback(page, NODE);
779 			ri->i_nid[offset[0] - NODE_DIR1_BLOCK] = 0;
780 			set_page_dirty(page);
781 			unlock_page(page);
782 		}
783 		offset[1] = 0;
784 		offset[0]++;
785 		nofs += err;
786 	}
787 fail:
788 	f2fs_put_page(page, 0);
789 	trace_f2fs_truncate_inode_blocks_exit(inode, err);
790 	return err > 0 ? 0 : err;
791 }
792 
793 int truncate_xattr_node(struct inode *inode, struct page *page)
794 {
795 	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
796 	nid_t nid = F2FS_I(inode)->i_xattr_nid;
797 	struct dnode_of_data dn;
798 	struct page *npage;
799 
800 	if (!nid)
801 		return 0;
802 
803 	npage = get_node_page(sbi, nid);
804 	if (IS_ERR(npage))
805 		return PTR_ERR(npage);
806 
807 	F2FS_I(inode)->i_xattr_nid = 0;
808 
809 	/* need to do checkpoint during fsync */
810 	F2FS_I(inode)->xattr_ver = cur_cp_version(F2FS_CKPT(sbi));
811 
812 	set_new_dnode(&dn, inode, page, npage, nid);
813 
814 	if (page)
815 		dn.inode_page_locked = true;
816 	truncate_node(&dn);
817 	return 0;
818 }
819 
820 /*
821  * Caller should grab and release a rwsem by calling f2fs_lock_op() and
822  * f2fs_unlock_op().
823  */
824 void remove_inode_page(struct inode *inode)
825 {
826 	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
827 	struct page *page;
828 	nid_t ino = inode->i_ino;
829 	struct dnode_of_data dn;
830 
831 	page = get_node_page(sbi, ino);
832 	if (IS_ERR(page))
833 		return;
834 
835 	if (truncate_xattr_node(inode, page)) {
836 		f2fs_put_page(page, 1);
837 		return;
838 	}
839 	/* 0 is possible, after f2fs_new_inode() is failed */
840 	f2fs_bug_on(inode->i_blocks != 0 && inode->i_blocks != 1);
841 	set_new_dnode(&dn, inode, page, page, ino);
842 	truncate_node(&dn);
843 }
844 
845 struct page *new_inode_page(struct inode *inode)
846 {
847 	struct dnode_of_data dn;
848 
849 	/* allocate inode page for new inode */
850 	set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
851 
852 	/* caller should f2fs_put_page(page, 1); */
853 	return new_node_page(&dn, 0, NULL);
854 }
855 
856 struct page *new_node_page(struct dnode_of_data *dn,
857 				unsigned int ofs, struct page *ipage)
858 {
859 	struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
860 	struct node_info old_ni, new_ni;
861 	struct page *page;
862 	int err;
863 
864 	if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
865 		return ERR_PTR(-EPERM);
866 
867 	page = grab_cache_page(NODE_MAPPING(sbi), dn->nid);
868 	if (!page)
869 		return ERR_PTR(-ENOMEM);
870 
871 	if (unlikely(!inc_valid_node_count(sbi, dn->inode))) {
872 		err = -ENOSPC;
873 		goto fail;
874 	}
875 
876 	get_node_info(sbi, dn->nid, &old_ni);
877 
878 	/* Reinitialize old_ni with new node page */
879 	f2fs_bug_on(old_ni.blk_addr != NULL_ADDR);
880 	new_ni = old_ni;
881 	new_ni.ino = dn->inode->i_ino;
882 	set_node_addr(sbi, &new_ni, NEW_ADDR, false);
883 
884 	f2fs_wait_on_page_writeback(page, NODE);
885 	fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true);
886 	set_cold_node(dn->inode, page);
887 	SetPageUptodate(page);
888 	set_page_dirty(page);
889 
890 	if (f2fs_has_xattr_block(ofs))
891 		F2FS_I(dn->inode)->i_xattr_nid = dn->nid;
892 
893 	dn->node_page = page;
894 	if (ipage)
895 		update_inode(dn->inode, ipage);
896 	else
897 		sync_inode_page(dn);
898 	if (ofs == 0)
899 		inc_valid_inode_count(sbi);
900 
901 	return page;
902 
903 fail:
904 	clear_node_page_dirty(page);
905 	f2fs_put_page(page, 1);
906 	return ERR_PTR(err);
907 }
908 
909 /*
910  * Caller should do after getting the following values.
911  * 0: f2fs_put_page(page, 0)
912  * LOCKED_PAGE: f2fs_put_page(page, 1)
913  * error: nothing
914  */
915 static int read_node_page(struct page *page, int rw)
916 {
917 	struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
918 	struct node_info ni;
919 
920 	get_node_info(sbi, page->index, &ni);
921 
922 	if (unlikely(ni.blk_addr == NULL_ADDR)) {
923 		f2fs_put_page(page, 1);
924 		return -ENOENT;
925 	}
926 
927 	if (PageUptodate(page))
928 		return LOCKED_PAGE;
929 
930 	return f2fs_submit_page_bio(sbi, page, ni.blk_addr, rw);
931 }
932 
933 /*
934  * Readahead a node page
935  */
936 void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
937 {
938 	struct page *apage;
939 	int err;
940 
941 	apage = find_get_page(NODE_MAPPING(sbi), nid);
942 	if (apage && PageUptodate(apage)) {
943 		f2fs_put_page(apage, 0);
944 		return;
945 	}
946 	f2fs_put_page(apage, 0);
947 
948 	apage = grab_cache_page(NODE_MAPPING(sbi), nid);
949 	if (!apage)
950 		return;
951 
952 	err = read_node_page(apage, READA);
953 	if (err == 0)
954 		f2fs_put_page(apage, 0);
955 	else if (err == LOCKED_PAGE)
956 		f2fs_put_page(apage, 1);
957 }
958 
959 struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
960 {
961 	struct page *page;
962 	int err;
963 repeat:
964 	page = grab_cache_page(NODE_MAPPING(sbi), nid);
965 	if (!page)
966 		return ERR_PTR(-ENOMEM);
967 
968 	err = read_node_page(page, READ_SYNC);
969 	if (err < 0)
970 		return ERR_PTR(err);
971 	else if (err == LOCKED_PAGE)
972 		goto got_it;
973 
974 	lock_page(page);
975 	if (unlikely(!PageUptodate(page) || nid != nid_of_node(page))) {
976 		f2fs_put_page(page, 1);
977 		return ERR_PTR(-EIO);
978 	}
979 	if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
980 		f2fs_put_page(page, 1);
981 		goto repeat;
982 	}
983 got_it:
984 	return page;
985 }
986 
987 /*
988  * Return a locked page for the desired node page.
989  * And, readahead MAX_RA_NODE number of node pages.
990  */
991 struct page *get_node_page_ra(struct page *parent, int start)
992 {
993 	struct f2fs_sb_info *sbi = F2FS_SB(parent->mapping->host->i_sb);
994 	struct blk_plug plug;
995 	struct page *page;
996 	int err, i, end;
997 	nid_t nid;
998 
999 	/* First, try getting the desired direct node. */
1000 	nid = get_nid(parent, start, false);
1001 	if (!nid)
1002 		return ERR_PTR(-ENOENT);
1003 repeat:
1004 	page = grab_cache_page(NODE_MAPPING(sbi), nid);
1005 	if (!page)
1006 		return ERR_PTR(-ENOMEM);
1007 
1008 	err = read_node_page(page, READ_SYNC);
1009 	if (err < 0)
1010 		return ERR_PTR(err);
1011 	else if (err == LOCKED_PAGE)
1012 		goto page_hit;
1013 
1014 	blk_start_plug(&plug);
1015 
1016 	/* Then, try readahead for siblings of the desired node */
1017 	end = start + MAX_RA_NODE;
1018 	end = min(end, NIDS_PER_BLOCK);
1019 	for (i = start + 1; i < end; i++) {
1020 		nid = get_nid(parent, i, false);
1021 		if (!nid)
1022 			continue;
1023 		ra_node_page(sbi, nid);
1024 	}
1025 
1026 	blk_finish_plug(&plug);
1027 
1028 	lock_page(page);
1029 	if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1030 		f2fs_put_page(page, 1);
1031 		goto repeat;
1032 	}
1033 page_hit:
1034 	if (unlikely(!PageUptodate(page))) {
1035 		f2fs_put_page(page, 1);
1036 		return ERR_PTR(-EIO);
1037 	}
1038 	return page;
1039 }
1040 
1041 void sync_inode_page(struct dnode_of_data *dn)
1042 {
1043 	if (IS_INODE(dn->node_page) || dn->inode_page == dn->node_page) {
1044 		update_inode(dn->inode, dn->node_page);
1045 	} else if (dn->inode_page) {
1046 		if (!dn->inode_page_locked)
1047 			lock_page(dn->inode_page);
1048 		update_inode(dn->inode, dn->inode_page);
1049 		if (!dn->inode_page_locked)
1050 			unlock_page(dn->inode_page);
1051 	} else {
1052 		update_inode_page(dn->inode);
1053 	}
1054 }
1055 
1056 int sync_node_pages(struct f2fs_sb_info *sbi, nid_t ino,
1057 					struct writeback_control *wbc)
1058 {
1059 	pgoff_t index, end;
1060 	struct pagevec pvec;
1061 	int step = ino ? 2 : 0;
1062 	int nwritten = 0, wrote = 0;
1063 
1064 	pagevec_init(&pvec, 0);
1065 
1066 next_step:
1067 	index = 0;
1068 	end = LONG_MAX;
1069 
1070 	while (index <= end) {
1071 		int i, nr_pages;
1072 		nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
1073 				PAGECACHE_TAG_DIRTY,
1074 				min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
1075 		if (nr_pages == 0)
1076 			break;
1077 
1078 		for (i = 0; i < nr_pages; i++) {
1079 			struct page *page = pvec.pages[i];
1080 
1081 			/*
1082 			 * flushing sequence with step:
1083 			 * 0. indirect nodes
1084 			 * 1. dentry dnodes
1085 			 * 2. file dnodes
1086 			 */
1087 			if (step == 0 && IS_DNODE(page))
1088 				continue;
1089 			if (step == 1 && (!IS_DNODE(page) ||
1090 						is_cold_node(page)))
1091 				continue;
1092 			if (step == 2 && (!IS_DNODE(page) ||
1093 						!is_cold_node(page)))
1094 				continue;
1095 
1096 			/*
1097 			 * If an fsync mode,
1098 			 * we should not skip writing node pages.
1099 			 */
1100 			if (ino && ino_of_node(page) == ino)
1101 				lock_page(page);
1102 			else if (!trylock_page(page))
1103 				continue;
1104 
1105 			if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1106 continue_unlock:
1107 				unlock_page(page);
1108 				continue;
1109 			}
1110 			if (ino && ino_of_node(page) != ino)
1111 				goto continue_unlock;
1112 
1113 			if (!PageDirty(page)) {
1114 				/* someone wrote it for us */
1115 				goto continue_unlock;
1116 			}
1117 
1118 			if (!clear_page_dirty_for_io(page))
1119 				goto continue_unlock;
1120 
1121 			/* called by fsync() */
1122 			if (ino && IS_DNODE(page)) {
1123 				int mark = !is_checkpointed_node(sbi, ino);
1124 				set_fsync_mark(page, 1);
1125 				if (IS_INODE(page))
1126 					set_dentry_mark(page, mark);
1127 				nwritten++;
1128 			} else {
1129 				set_fsync_mark(page, 0);
1130 				set_dentry_mark(page, 0);
1131 			}
1132 			NODE_MAPPING(sbi)->a_ops->writepage(page, wbc);
1133 			wrote++;
1134 
1135 			if (--wbc->nr_to_write == 0)
1136 				break;
1137 		}
1138 		pagevec_release(&pvec);
1139 		cond_resched();
1140 
1141 		if (wbc->nr_to_write == 0) {
1142 			step = 2;
1143 			break;
1144 		}
1145 	}
1146 
1147 	if (step < 2) {
1148 		step++;
1149 		goto next_step;
1150 	}
1151 
1152 	if (wrote)
1153 		f2fs_submit_merged_bio(sbi, NODE, WRITE);
1154 	return nwritten;
1155 }
1156 
1157 int wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, nid_t ino)
1158 {
1159 	pgoff_t index = 0, end = LONG_MAX;
1160 	struct pagevec pvec;
1161 	int ret2 = 0, ret = 0;
1162 
1163 	pagevec_init(&pvec, 0);
1164 
1165 	while (index <= end) {
1166 		int i, nr_pages;
1167 		nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
1168 				PAGECACHE_TAG_WRITEBACK,
1169 				min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
1170 		if (nr_pages == 0)
1171 			break;
1172 
1173 		for (i = 0; i < nr_pages; i++) {
1174 			struct page *page = pvec.pages[i];
1175 
1176 			/* until radix tree lookup accepts end_index */
1177 			if (unlikely(page->index > end))
1178 				continue;
1179 
1180 			if (ino && ino_of_node(page) == ino) {
1181 				f2fs_wait_on_page_writeback(page, NODE);
1182 				if (TestClearPageError(page))
1183 					ret = -EIO;
1184 			}
1185 		}
1186 		pagevec_release(&pvec);
1187 		cond_resched();
1188 	}
1189 
1190 	if (unlikely(test_and_clear_bit(AS_ENOSPC, &NODE_MAPPING(sbi)->flags)))
1191 		ret2 = -ENOSPC;
1192 	if (unlikely(test_and_clear_bit(AS_EIO, &NODE_MAPPING(sbi)->flags)))
1193 		ret2 = -EIO;
1194 	if (!ret)
1195 		ret = ret2;
1196 	return ret;
1197 }
1198 
1199 static int f2fs_write_node_page(struct page *page,
1200 				struct writeback_control *wbc)
1201 {
1202 	struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
1203 	nid_t nid;
1204 	block_t new_addr;
1205 	struct node_info ni;
1206 	struct f2fs_io_info fio = {
1207 		.type = NODE,
1208 		.rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE,
1209 	};
1210 
1211 	trace_f2fs_writepage(page, NODE);
1212 
1213 	if (unlikely(sbi->por_doing))
1214 		goto redirty_out;
1215 
1216 	f2fs_wait_on_page_writeback(page, NODE);
1217 
1218 	/* get old block addr of this node page */
1219 	nid = nid_of_node(page);
1220 	f2fs_bug_on(page->index != nid);
1221 
1222 	get_node_info(sbi, nid, &ni);
1223 
1224 	/* This page is already truncated */
1225 	if (unlikely(ni.blk_addr == NULL_ADDR)) {
1226 		dec_page_count(sbi, F2FS_DIRTY_NODES);
1227 		unlock_page(page);
1228 		return 0;
1229 	}
1230 
1231 	if (wbc->for_reclaim)
1232 		goto redirty_out;
1233 
1234 	down_read(&sbi->node_write);
1235 	set_page_writeback(page);
1236 	write_node_page(sbi, page, &fio, nid, ni.blk_addr, &new_addr);
1237 	set_node_addr(sbi, &ni, new_addr, is_fsync_dnode(page));
1238 	dec_page_count(sbi, F2FS_DIRTY_NODES);
1239 	up_read(&sbi->node_write);
1240 	unlock_page(page);
1241 	return 0;
1242 
1243 redirty_out:
1244 	redirty_page_for_writepage(wbc, page);
1245 	return AOP_WRITEPAGE_ACTIVATE;
1246 }
1247 
1248 static int f2fs_write_node_pages(struct address_space *mapping,
1249 			    struct writeback_control *wbc)
1250 {
1251 	struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
1252 	long diff;
1253 
1254 	trace_f2fs_writepages(mapping->host, wbc, NODE);
1255 
1256 	/* balancing f2fs's metadata in background */
1257 	f2fs_balance_fs_bg(sbi);
1258 
1259 	/* collect a number of dirty node pages and write together */
1260 	if (get_pages(sbi, F2FS_DIRTY_NODES) < nr_pages_to_skip(sbi, NODE))
1261 		goto skip_write;
1262 
1263 	diff = nr_pages_to_write(sbi, NODE, wbc);
1264 	wbc->sync_mode = WB_SYNC_NONE;
1265 	sync_node_pages(sbi, 0, wbc);
1266 	wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
1267 	return 0;
1268 
1269 skip_write:
1270 	wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_NODES);
1271 	return 0;
1272 }
1273 
1274 static int f2fs_set_node_page_dirty(struct page *page)
1275 {
1276 	struct address_space *mapping = page->mapping;
1277 	struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
1278 
1279 	trace_f2fs_set_page_dirty(page, NODE);
1280 
1281 	SetPageUptodate(page);
1282 	if (!PageDirty(page)) {
1283 		__set_page_dirty_nobuffers(page);
1284 		inc_page_count(sbi, F2FS_DIRTY_NODES);
1285 		SetPagePrivate(page);
1286 		return 1;
1287 	}
1288 	return 0;
1289 }
1290 
1291 static void f2fs_invalidate_node_page(struct page *page, unsigned int offset,
1292 				      unsigned int length)
1293 {
1294 	struct inode *inode = page->mapping->host;
1295 	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
1296 	if (PageDirty(page))
1297 		dec_page_count(sbi, F2FS_DIRTY_NODES);
1298 	ClearPagePrivate(page);
1299 }
1300 
1301 static int f2fs_release_node_page(struct page *page, gfp_t wait)
1302 {
1303 	ClearPagePrivate(page);
1304 	return 1;
1305 }
1306 
1307 /*
1308  * Structure of the f2fs node operations
1309  */
1310 const struct address_space_operations f2fs_node_aops = {
1311 	.writepage	= f2fs_write_node_page,
1312 	.writepages	= f2fs_write_node_pages,
1313 	.set_page_dirty	= f2fs_set_node_page_dirty,
1314 	.invalidatepage	= f2fs_invalidate_node_page,
1315 	.releasepage	= f2fs_release_node_page,
1316 };
1317 
1318 static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i,
1319 						nid_t n)
1320 {
1321 	return radix_tree_lookup(&nm_i->free_nid_root, n);
1322 }
1323 
1324 static void __del_from_free_nid_list(struct f2fs_nm_info *nm_i,
1325 						struct free_nid *i)
1326 {
1327 	list_del(&i->list);
1328 	radix_tree_delete(&nm_i->free_nid_root, i->nid);
1329 }
1330 
1331 static int add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build)
1332 {
1333 	struct f2fs_nm_info *nm_i = NM_I(sbi);
1334 	struct free_nid *i;
1335 	struct nat_entry *ne;
1336 	bool allocated = false;
1337 
1338 	if (!available_free_memory(sbi, FREE_NIDS))
1339 		return -1;
1340 
1341 	/* 0 nid should not be used */
1342 	if (unlikely(nid == 0))
1343 		return 0;
1344 
1345 	if (build) {
1346 		/* do not add allocated nids */
1347 		read_lock(&nm_i->nat_tree_lock);
1348 		ne = __lookup_nat_cache(nm_i, nid);
1349 		if (ne &&
1350 			(!ne->checkpointed || nat_get_blkaddr(ne) != NULL_ADDR))
1351 			allocated = true;
1352 		read_unlock(&nm_i->nat_tree_lock);
1353 		if (allocated)
1354 			return 0;
1355 	}
1356 
1357 	i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS);
1358 	i->nid = nid;
1359 	i->state = NID_NEW;
1360 
1361 	spin_lock(&nm_i->free_nid_list_lock);
1362 	if (radix_tree_insert(&nm_i->free_nid_root, i->nid, i)) {
1363 		spin_unlock(&nm_i->free_nid_list_lock);
1364 		kmem_cache_free(free_nid_slab, i);
1365 		return 0;
1366 	}
1367 	list_add_tail(&i->list, &nm_i->free_nid_list);
1368 	nm_i->fcnt++;
1369 	spin_unlock(&nm_i->free_nid_list_lock);
1370 	return 1;
1371 }
1372 
1373 static void remove_free_nid(struct f2fs_nm_info *nm_i, nid_t nid)
1374 {
1375 	struct free_nid *i;
1376 	bool need_free = false;
1377 
1378 	spin_lock(&nm_i->free_nid_list_lock);
1379 	i = __lookup_free_nid_list(nm_i, nid);
1380 	if (i && i->state == NID_NEW) {
1381 		__del_from_free_nid_list(nm_i, i);
1382 		nm_i->fcnt--;
1383 		need_free = true;
1384 	}
1385 	spin_unlock(&nm_i->free_nid_list_lock);
1386 
1387 	if (need_free)
1388 		kmem_cache_free(free_nid_slab, i);
1389 }
1390 
1391 static void scan_nat_page(struct f2fs_sb_info *sbi,
1392 			struct page *nat_page, nid_t start_nid)
1393 {
1394 	struct f2fs_nm_info *nm_i = NM_I(sbi);
1395 	struct f2fs_nat_block *nat_blk = page_address(nat_page);
1396 	block_t blk_addr;
1397 	int i;
1398 
1399 	i = start_nid % NAT_ENTRY_PER_BLOCK;
1400 
1401 	for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) {
1402 
1403 		if (unlikely(start_nid >= nm_i->max_nid))
1404 			break;
1405 
1406 		blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
1407 		f2fs_bug_on(blk_addr == NEW_ADDR);
1408 		if (blk_addr == NULL_ADDR) {
1409 			if (add_free_nid(sbi, start_nid, true) < 0)
1410 				break;
1411 		}
1412 	}
1413 }
1414 
1415 static void build_free_nids(struct f2fs_sb_info *sbi)
1416 {
1417 	struct f2fs_nm_info *nm_i = NM_I(sbi);
1418 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1419 	struct f2fs_summary_block *sum = curseg->sum_blk;
1420 	int i = 0;
1421 	nid_t nid = nm_i->next_scan_nid;
1422 
1423 	/* Enough entries */
1424 	if (nm_i->fcnt > NAT_ENTRY_PER_BLOCK)
1425 		return;
1426 
1427 	/* readahead nat pages to be scanned */
1428 	ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES, META_NAT);
1429 
1430 	while (1) {
1431 		struct page *page = get_current_nat_page(sbi, nid);
1432 
1433 		scan_nat_page(sbi, page, nid);
1434 		f2fs_put_page(page, 1);
1435 
1436 		nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK));
1437 		if (unlikely(nid >= nm_i->max_nid))
1438 			nid = 0;
1439 
1440 		if (i++ == FREE_NID_PAGES)
1441 			break;
1442 	}
1443 
1444 	/* go to the next free nat pages to find free nids abundantly */
1445 	nm_i->next_scan_nid = nid;
1446 
1447 	/* find free nids from current sum_pages */
1448 	mutex_lock(&curseg->curseg_mutex);
1449 	for (i = 0; i < nats_in_cursum(sum); i++) {
1450 		block_t addr = le32_to_cpu(nat_in_journal(sum, i).block_addr);
1451 		nid = le32_to_cpu(nid_in_journal(sum, i));
1452 		if (addr == NULL_ADDR)
1453 			add_free_nid(sbi, nid, true);
1454 		else
1455 			remove_free_nid(nm_i, nid);
1456 	}
1457 	mutex_unlock(&curseg->curseg_mutex);
1458 }
1459 
1460 /*
1461  * If this function returns success, caller can obtain a new nid
1462  * from second parameter of this function.
1463  * The returned nid could be used ino as well as nid when inode is created.
1464  */
1465 bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
1466 {
1467 	struct f2fs_nm_info *nm_i = NM_I(sbi);
1468 	struct free_nid *i = NULL;
1469 retry:
1470 	if (unlikely(sbi->total_valid_node_count + 1 > nm_i->available_nids))
1471 		return false;
1472 
1473 	spin_lock(&nm_i->free_nid_list_lock);
1474 
1475 	/* We should not use stale free nids created by build_free_nids */
1476 	if (nm_i->fcnt && !on_build_free_nids(nm_i)) {
1477 		f2fs_bug_on(list_empty(&nm_i->free_nid_list));
1478 		list_for_each_entry(i, &nm_i->free_nid_list, list)
1479 			if (i->state == NID_NEW)
1480 				break;
1481 
1482 		f2fs_bug_on(i->state != NID_NEW);
1483 		*nid = i->nid;
1484 		i->state = NID_ALLOC;
1485 		nm_i->fcnt--;
1486 		spin_unlock(&nm_i->free_nid_list_lock);
1487 		return true;
1488 	}
1489 	spin_unlock(&nm_i->free_nid_list_lock);
1490 
1491 	/* Let's scan nat pages and its caches to get free nids */
1492 	mutex_lock(&nm_i->build_lock);
1493 	build_free_nids(sbi);
1494 	mutex_unlock(&nm_i->build_lock);
1495 	goto retry;
1496 }
1497 
1498 /*
1499  * alloc_nid() should be called prior to this function.
1500  */
1501 void alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid)
1502 {
1503 	struct f2fs_nm_info *nm_i = NM_I(sbi);
1504 	struct free_nid *i;
1505 
1506 	spin_lock(&nm_i->free_nid_list_lock);
1507 	i = __lookup_free_nid_list(nm_i, nid);
1508 	f2fs_bug_on(!i || i->state != NID_ALLOC);
1509 	__del_from_free_nid_list(nm_i, i);
1510 	spin_unlock(&nm_i->free_nid_list_lock);
1511 
1512 	kmem_cache_free(free_nid_slab, i);
1513 }
1514 
1515 /*
1516  * alloc_nid() should be called prior to this function.
1517  */
1518 void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
1519 {
1520 	struct f2fs_nm_info *nm_i = NM_I(sbi);
1521 	struct free_nid *i;
1522 	bool need_free = false;
1523 
1524 	if (!nid)
1525 		return;
1526 
1527 	spin_lock(&nm_i->free_nid_list_lock);
1528 	i = __lookup_free_nid_list(nm_i, nid);
1529 	f2fs_bug_on(!i || i->state != NID_ALLOC);
1530 	if (!available_free_memory(sbi, FREE_NIDS)) {
1531 		__del_from_free_nid_list(nm_i, i);
1532 		need_free = true;
1533 	} else {
1534 		i->state = NID_NEW;
1535 		nm_i->fcnt++;
1536 	}
1537 	spin_unlock(&nm_i->free_nid_list_lock);
1538 
1539 	if (need_free)
1540 		kmem_cache_free(free_nid_slab, i);
1541 }
1542 
1543 void recover_node_page(struct f2fs_sb_info *sbi, struct page *page,
1544 		struct f2fs_summary *sum, struct node_info *ni,
1545 		block_t new_blkaddr)
1546 {
1547 	rewrite_node_page(sbi, page, sum, ni->blk_addr, new_blkaddr);
1548 	set_node_addr(sbi, ni, new_blkaddr, false);
1549 	clear_node_page_dirty(page);
1550 }
1551 
1552 void recover_inline_xattr(struct inode *inode, struct page *page)
1553 {
1554 	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
1555 	void *src_addr, *dst_addr;
1556 	size_t inline_size;
1557 	struct page *ipage;
1558 	struct f2fs_inode *ri;
1559 
1560 	if (!f2fs_has_inline_xattr(inode))
1561 		return;
1562 
1563 	if (!IS_INODE(page))
1564 		return;
1565 
1566 	ri = F2FS_INODE(page);
1567 	if (!(ri->i_inline & F2FS_INLINE_XATTR))
1568 		return;
1569 
1570 	ipage = get_node_page(sbi, inode->i_ino);
1571 	f2fs_bug_on(IS_ERR(ipage));
1572 
1573 	dst_addr = inline_xattr_addr(ipage);
1574 	src_addr = inline_xattr_addr(page);
1575 	inline_size = inline_xattr_size(inode);
1576 
1577 	f2fs_wait_on_page_writeback(ipage, NODE);
1578 	memcpy(dst_addr, src_addr, inline_size);
1579 
1580 	update_inode(inode, ipage);
1581 	f2fs_put_page(ipage, 1);
1582 }
1583 
1584 bool recover_xattr_data(struct inode *inode, struct page *page, block_t blkaddr)
1585 {
1586 	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
1587 	nid_t prev_xnid = F2FS_I(inode)->i_xattr_nid;
1588 	nid_t new_xnid = nid_of_node(page);
1589 	struct node_info ni;
1590 
1591 	if (!f2fs_has_xattr_block(ofs_of_node(page)))
1592 		return false;
1593 
1594 	/* 1: invalidate the previous xattr nid */
1595 	if (!prev_xnid)
1596 		goto recover_xnid;
1597 
1598 	/* Deallocate node address */
1599 	get_node_info(sbi, prev_xnid, &ni);
1600 	f2fs_bug_on(ni.blk_addr == NULL_ADDR);
1601 	invalidate_blocks(sbi, ni.blk_addr);
1602 	dec_valid_node_count(sbi, inode);
1603 	set_node_addr(sbi, &ni, NULL_ADDR, false);
1604 
1605 recover_xnid:
1606 	/* 2: allocate new xattr nid */
1607 	if (unlikely(!inc_valid_node_count(sbi, inode)))
1608 		f2fs_bug_on(1);
1609 
1610 	remove_free_nid(NM_I(sbi), new_xnid);
1611 	get_node_info(sbi, new_xnid, &ni);
1612 	ni.ino = inode->i_ino;
1613 	set_node_addr(sbi, &ni, NEW_ADDR, false);
1614 	F2FS_I(inode)->i_xattr_nid = new_xnid;
1615 
1616 	/* 3: update xattr blkaddr */
1617 	refresh_sit_entry(sbi, NEW_ADDR, blkaddr);
1618 	set_node_addr(sbi, &ni, blkaddr, false);
1619 
1620 	update_inode_page(inode);
1621 	return true;
1622 }
1623 
1624 int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
1625 {
1626 	struct f2fs_inode *src, *dst;
1627 	nid_t ino = ino_of_node(page);
1628 	struct node_info old_ni, new_ni;
1629 	struct page *ipage;
1630 
1631 	get_node_info(sbi, ino, &old_ni);
1632 
1633 	if (unlikely(old_ni.blk_addr != NULL_ADDR))
1634 		return -EINVAL;
1635 
1636 	ipage = grab_cache_page(NODE_MAPPING(sbi), ino);
1637 	if (!ipage)
1638 		return -ENOMEM;
1639 
1640 	/* Should not use this inode  from free nid list */
1641 	remove_free_nid(NM_I(sbi), ino);
1642 
1643 	SetPageUptodate(ipage);
1644 	fill_node_footer(ipage, ino, ino, 0, true);
1645 
1646 	src = F2FS_INODE(page);
1647 	dst = F2FS_INODE(ipage);
1648 
1649 	memcpy(dst, src, (unsigned long)&src->i_ext - (unsigned long)src);
1650 	dst->i_size = 0;
1651 	dst->i_blocks = cpu_to_le64(1);
1652 	dst->i_links = cpu_to_le32(1);
1653 	dst->i_xattr_nid = 0;
1654 
1655 	new_ni = old_ni;
1656 	new_ni.ino = ino;
1657 
1658 	if (unlikely(!inc_valid_node_count(sbi, NULL)))
1659 		WARN_ON(1);
1660 	set_node_addr(sbi, &new_ni, NEW_ADDR, false);
1661 	inc_valid_inode_count(sbi);
1662 	f2fs_put_page(ipage, 1);
1663 	return 0;
1664 }
1665 
1666 /*
1667  * ra_sum_pages() merge contiguous pages into one bio and submit.
1668  * these pre-readed pages are alloced in bd_inode's mapping tree.
1669  */
1670 static int ra_sum_pages(struct f2fs_sb_info *sbi, struct page **pages,
1671 				int start, int nrpages)
1672 {
1673 	struct inode *inode = sbi->sb->s_bdev->bd_inode;
1674 	struct address_space *mapping = inode->i_mapping;
1675 	int i, page_idx = start;
1676 	struct f2fs_io_info fio = {
1677 		.type = META,
1678 		.rw = READ_SYNC | REQ_META | REQ_PRIO
1679 	};
1680 
1681 	for (i = 0; page_idx < start + nrpages; page_idx++, i++) {
1682 		/* alloc page in bd_inode for reading node summary info */
1683 		pages[i] = grab_cache_page(mapping, page_idx);
1684 		if (!pages[i])
1685 			break;
1686 		f2fs_submit_page_mbio(sbi, pages[i], page_idx, &fio);
1687 	}
1688 
1689 	f2fs_submit_merged_bio(sbi, META, READ);
1690 	return i;
1691 }
1692 
1693 int restore_node_summary(struct f2fs_sb_info *sbi,
1694 			unsigned int segno, struct f2fs_summary_block *sum)
1695 {
1696 	struct f2fs_node *rn;
1697 	struct f2fs_summary *sum_entry;
1698 	struct inode *inode = sbi->sb->s_bdev->bd_inode;
1699 	block_t addr;
1700 	int bio_blocks = MAX_BIO_BLOCKS(max_hw_blocks(sbi));
1701 	struct page *pages[bio_blocks];
1702 	int i, idx, last_offset, nrpages, err = 0;
1703 
1704 	/* scan the node segment */
1705 	last_offset = sbi->blocks_per_seg;
1706 	addr = START_BLOCK(sbi, segno);
1707 	sum_entry = &sum->entries[0];
1708 
1709 	for (i = 0; !err && i < last_offset; i += nrpages, addr += nrpages) {
1710 		nrpages = min(last_offset - i, bio_blocks);
1711 
1712 		/* read ahead node pages */
1713 		nrpages = ra_sum_pages(sbi, pages, addr, nrpages);
1714 		if (!nrpages)
1715 			return -ENOMEM;
1716 
1717 		for (idx = 0; idx < nrpages; idx++) {
1718 			if (err)
1719 				goto skip;
1720 
1721 			lock_page(pages[idx]);
1722 			if (unlikely(!PageUptodate(pages[idx]))) {
1723 				err = -EIO;
1724 			} else {
1725 				rn = F2FS_NODE(pages[idx]);
1726 				sum_entry->nid = rn->footer.nid;
1727 				sum_entry->version = 0;
1728 				sum_entry->ofs_in_node = 0;
1729 				sum_entry++;
1730 			}
1731 			unlock_page(pages[idx]);
1732 skip:
1733 			page_cache_release(pages[idx]);
1734 		}
1735 
1736 		invalidate_mapping_pages(inode->i_mapping, addr,
1737 							addr + nrpages);
1738 	}
1739 	return err;
1740 }
1741 
1742 static struct nat_entry_set *grab_nat_entry_set(void)
1743 {
1744 	struct nat_entry_set *nes =
1745 			f2fs_kmem_cache_alloc(nat_entry_set_slab, GFP_ATOMIC);
1746 
1747 	nes->entry_cnt = 0;
1748 	INIT_LIST_HEAD(&nes->set_list);
1749 	INIT_LIST_HEAD(&nes->entry_list);
1750 	return nes;
1751 }
1752 
1753 static void release_nat_entry_set(struct nat_entry_set *nes,
1754 						struct f2fs_nm_info *nm_i)
1755 {
1756 	f2fs_bug_on(!list_empty(&nes->entry_list));
1757 
1758 	nm_i->dirty_nat_cnt -= nes->entry_cnt;
1759 	list_del(&nes->set_list);
1760 	kmem_cache_free(nat_entry_set_slab, nes);
1761 }
1762 
1763 static void adjust_nat_entry_set(struct nat_entry_set *nes,
1764 						struct list_head *head)
1765 {
1766 	struct nat_entry_set *next = nes;
1767 
1768 	if (list_is_last(&nes->set_list, head))
1769 		return;
1770 
1771 	list_for_each_entry_continue(next, head, set_list)
1772 		if (nes->entry_cnt <= next->entry_cnt)
1773 			break;
1774 
1775 	list_move_tail(&nes->set_list, &next->set_list);
1776 }
1777 
1778 static void add_nat_entry(struct nat_entry *ne, struct list_head *head)
1779 {
1780 	struct nat_entry_set *nes;
1781 	nid_t start_nid = START_NID(ne->ni.nid);
1782 
1783 	list_for_each_entry(nes, head, set_list) {
1784 		if (nes->start_nid == start_nid) {
1785 			list_move_tail(&ne->list, &nes->entry_list);
1786 			nes->entry_cnt++;
1787 			adjust_nat_entry_set(nes, head);
1788 			return;
1789 		}
1790 	}
1791 
1792 	nes = grab_nat_entry_set();
1793 
1794 	nes->start_nid = start_nid;
1795 	list_move_tail(&ne->list, &nes->entry_list);
1796 	nes->entry_cnt++;
1797 	list_add(&nes->set_list, head);
1798 }
1799 
1800 static void merge_nats_in_set(struct f2fs_sb_info *sbi)
1801 {
1802 	struct f2fs_nm_info *nm_i = NM_I(sbi);
1803 	struct list_head *dirty_list = &nm_i->dirty_nat_entries;
1804 	struct list_head *set_list = &nm_i->nat_entry_set;
1805 	struct nat_entry *ne, *tmp;
1806 
1807 	write_lock(&nm_i->nat_tree_lock);
1808 	list_for_each_entry_safe(ne, tmp, dirty_list, list) {
1809 		if (nat_get_blkaddr(ne) == NEW_ADDR)
1810 			continue;
1811 		add_nat_entry(ne, set_list);
1812 		nm_i->dirty_nat_cnt++;
1813 	}
1814 	write_unlock(&nm_i->nat_tree_lock);
1815 }
1816 
1817 static bool __has_cursum_space(struct f2fs_summary_block *sum, int size)
1818 {
1819 	if (nats_in_cursum(sum) + size <= NAT_JOURNAL_ENTRIES)
1820 		return true;
1821 	else
1822 		return false;
1823 }
1824 
1825 static void remove_nats_in_journal(struct f2fs_sb_info *sbi)
1826 {
1827 	struct f2fs_nm_info *nm_i = NM_I(sbi);
1828 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1829 	struct f2fs_summary_block *sum = curseg->sum_blk;
1830 	int i;
1831 
1832 	mutex_lock(&curseg->curseg_mutex);
1833 	for (i = 0; i < nats_in_cursum(sum); i++) {
1834 		struct nat_entry *ne;
1835 		struct f2fs_nat_entry raw_ne;
1836 		nid_t nid = le32_to_cpu(nid_in_journal(sum, i));
1837 
1838 		raw_ne = nat_in_journal(sum, i);
1839 retry:
1840 		write_lock(&nm_i->nat_tree_lock);
1841 		ne = __lookup_nat_cache(nm_i, nid);
1842 		if (ne)
1843 			goto found;
1844 
1845 		ne = grab_nat_entry(nm_i, nid);
1846 		if (!ne) {
1847 			write_unlock(&nm_i->nat_tree_lock);
1848 			goto retry;
1849 		}
1850 		node_info_from_raw_nat(&ne->ni, &raw_ne);
1851 found:
1852 		__set_nat_cache_dirty(nm_i, ne);
1853 		write_unlock(&nm_i->nat_tree_lock);
1854 	}
1855 	update_nats_in_cursum(sum, -i);
1856 	mutex_unlock(&curseg->curseg_mutex);
1857 }
1858 
1859 /*
1860  * This function is called during the checkpointing process.
1861  */
1862 void flush_nat_entries(struct f2fs_sb_info *sbi)
1863 {
1864 	struct f2fs_nm_info *nm_i = NM_I(sbi);
1865 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1866 	struct f2fs_summary_block *sum = curseg->sum_blk;
1867 	struct nat_entry_set *nes, *tmp;
1868 	struct list_head *head = &nm_i->nat_entry_set;
1869 	bool to_journal = true;
1870 
1871 	/* merge nat entries of dirty list to nat entry set temporarily */
1872 	merge_nats_in_set(sbi);
1873 
1874 	/*
1875 	 * if there are no enough space in journal to store dirty nat
1876 	 * entries, remove all entries from journal and merge them
1877 	 * into nat entry set.
1878 	 */
1879 	if (!__has_cursum_space(sum, nm_i->dirty_nat_cnt)) {
1880 		remove_nats_in_journal(sbi);
1881 
1882 		/*
1883 		 * merge nat entries of dirty list to nat entry set temporarily
1884 		 */
1885 		merge_nats_in_set(sbi);
1886 	}
1887 
1888 	if (!nm_i->dirty_nat_cnt)
1889 		return;
1890 
1891 	/*
1892 	 * there are two steps to flush nat entries:
1893 	 * #1, flush nat entries to journal in current hot data summary block.
1894 	 * #2, flush nat entries to nat page.
1895 	 */
1896 	list_for_each_entry_safe(nes, tmp, head, set_list) {
1897 		struct f2fs_nat_block *nat_blk;
1898 		struct nat_entry *ne, *cur;
1899 		struct page *page;
1900 		nid_t start_nid = nes->start_nid;
1901 
1902 		if (to_journal && !__has_cursum_space(sum, nes->entry_cnt))
1903 			to_journal = false;
1904 
1905 		if (to_journal) {
1906 			mutex_lock(&curseg->curseg_mutex);
1907 		} else {
1908 			page = get_next_nat_page(sbi, start_nid);
1909 			nat_blk = page_address(page);
1910 			f2fs_bug_on(!nat_blk);
1911 		}
1912 
1913 		/* flush dirty nats in nat entry set */
1914 		list_for_each_entry_safe(ne, cur, &nes->entry_list, list) {
1915 			struct f2fs_nat_entry *raw_ne;
1916 			nid_t nid = nat_get_nid(ne);
1917 			int offset;
1918 
1919 			if (to_journal) {
1920 				offset = lookup_journal_in_cursum(sum,
1921 							NAT_JOURNAL, nid, 1);
1922 				f2fs_bug_on(offset < 0);
1923 				raw_ne = &nat_in_journal(sum, offset);
1924 				nid_in_journal(sum, offset) = cpu_to_le32(nid);
1925 			} else {
1926 				raw_ne = &nat_blk->entries[nid - start_nid];
1927 			}
1928 			raw_nat_from_node_info(raw_ne, &ne->ni);
1929 
1930 			if (nat_get_blkaddr(ne) == NULL_ADDR &&
1931 				add_free_nid(sbi, nid, false) <= 0) {
1932 				write_lock(&nm_i->nat_tree_lock);
1933 				__del_from_nat_cache(nm_i, ne);
1934 				write_unlock(&nm_i->nat_tree_lock);
1935 			} else {
1936 				write_lock(&nm_i->nat_tree_lock);
1937 				__clear_nat_cache_dirty(nm_i, ne);
1938 				write_unlock(&nm_i->nat_tree_lock);
1939 			}
1940 		}
1941 
1942 		if (to_journal)
1943 			mutex_unlock(&curseg->curseg_mutex);
1944 		else
1945 			f2fs_put_page(page, 1);
1946 
1947 		release_nat_entry_set(nes, nm_i);
1948 	}
1949 
1950 	f2fs_bug_on(!list_empty(head));
1951 	f2fs_bug_on(nm_i->dirty_nat_cnt);
1952 }
1953 
1954 static int init_node_manager(struct f2fs_sb_info *sbi)
1955 {
1956 	struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi);
1957 	struct f2fs_nm_info *nm_i = NM_I(sbi);
1958 	unsigned char *version_bitmap;
1959 	unsigned int nat_segs, nat_blocks;
1960 
1961 	nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr);
1962 
1963 	/* segment_count_nat includes pair segment so divide to 2. */
1964 	nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1;
1965 	nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg);
1966 
1967 	nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks;
1968 
1969 	/* not used nids: 0, node, meta, (and root counted as valid node) */
1970 	nm_i->available_nids = nm_i->max_nid - 3;
1971 	nm_i->fcnt = 0;
1972 	nm_i->nat_cnt = 0;
1973 	nm_i->ram_thresh = DEF_RAM_THRESHOLD;
1974 
1975 	INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC);
1976 	INIT_LIST_HEAD(&nm_i->free_nid_list);
1977 	INIT_RADIX_TREE(&nm_i->nat_root, GFP_ATOMIC);
1978 	INIT_LIST_HEAD(&nm_i->nat_entries);
1979 	INIT_LIST_HEAD(&nm_i->dirty_nat_entries);
1980 	INIT_LIST_HEAD(&nm_i->nat_entry_set);
1981 
1982 	mutex_init(&nm_i->build_lock);
1983 	spin_lock_init(&nm_i->free_nid_list_lock);
1984 	rwlock_init(&nm_i->nat_tree_lock);
1985 
1986 	nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
1987 	nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
1988 	version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP);
1989 	if (!version_bitmap)
1990 		return -EFAULT;
1991 
1992 	nm_i->nat_bitmap = kmemdup(version_bitmap, nm_i->bitmap_size,
1993 					GFP_KERNEL);
1994 	if (!nm_i->nat_bitmap)
1995 		return -ENOMEM;
1996 	return 0;
1997 }
1998 
1999 int build_node_manager(struct f2fs_sb_info *sbi)
2000 {
2001 	int err;
2002 
2003 	sbi->nm_info = kzalloc(sizeof(struct f2fs_nm_info), GFP_KERNEL);
2004 	if (!sbi->nm_info)
2005 		return -ENOMEM;
2006 
2007 	err = init_node_manager(sbi);
2008 	if (err)
2009 		return err;
2010 
2011 	build_free_nids(sbi);
2012 	return 0;
2013 }
2014 
2015 void destroy_node_manager(struct f2fs_sb_info *sbi)
2016 {
2017 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2018 	struct free_nid *i, *next_i;
2019 	struct nat_entry *natvec[NATVEC_SIZE];
2020 	nid_t nid = 0;
2021 	unsigned int found;
2022 
2023 	if (!nm_i)
2024 		return;
2025 
2026 	/* destroy free nid list */
2027 	spin_lock(&nm_i->free_nid_list_lock);
2028 	list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) {
2029 		f2fs_bug_on(i->state == NID_ALLOC);
2030 		__del_from_free_nid_list(nm_i, i);
2031 		nm_i->fcnt--;
2032 		spin_unlock(&nm_i->free_nid_list_lock);
2033 		kmem_cache_free(free_nid_slab, i);
2034 		spin_lock(&nm_i->free_nid_list_lock);
2035 	}
2036 	f2fs_bug_on(nm_i->fcnt);
2037 	spin_unlock(&nm_i->free_nid_list_lock);
2038 
2039 	/* destroy nat cache */
2040 	write_lock(&nm_i->nat_tree_lock);
2041 	while ((found = __gang_lookup_nat_cache(nm_i,
2042 					nid, NATVEC_SIZE, natvec))) {
2043 		unsigned idx;
2044 		nid = nat_get_nid(natvec[found - 1]) + 1;
2045 		for (idx = 0; idx < found; idx++)
2046 			__del_from_nat_cache(nm_i, natvec[idx]);
2047 	}
2048 	f2fs_bug_on(nm_i->nat_cnt);
2049 	write_unlock(&nm_i->nat_tree_lock);
2050 
2051 	kfree(nm_i->nat_bitmap);
2052 	sbi->nm_info = NULL;
2053 	kfree(nm_i);
2054 }
2055 
2056 int __init create_node_manager_caches(void)
2057 {
2058 	nat_entry_slab = f2fs_kmem_cache_create("nat_entry",
2059 			sizeof(struct nat_entry));
2060 	if (!nat_entry_slab)
2061 		goto fail;
2062 
2063 	free_nid_slab = f2fs_kmem_cache_create("free_nid",
2064 			sizeof(struct free_nid));
2065 	if (!free_nid_slab)
2066 		goto destory_nat_entry;
2067 
2068 	nat_entry_set_slab = f2fs_kmem_cache_create("nat_entry_set",
2069 			sizeof(struct nat_entry_set));
2070 	if (!nat_entry_set_slab)
2071 		goto destory_free_nid;
2072 	return 0;
2073 
2074 destory_free_nid:
2075 	kmem_cache_destroy(free_nid_slab);
2076 destory_nat_entry:
2077 	kmem_cache_destroy(nat_entry_slab);
2078 fail:
2079 	return -ENOMEM;
2080 }
2081 
2082 void destroy_node_manager_caches(void)
2083 {
2084 	kmem_cache_destroy(nat_entry_set_slab);
2085 	kmem_cache_destroy(free_nid_slab);
2086 	kmem_cache_destroy(nat_entry_slab);
2087 }
2088