xref: /openbmc/linux/fs/f2fs/node.c (revision 4f16fb0f9be3f5f9d1254ff6d7bf54b23fb65f4a)
1 /*
2  * fs/f2fs/node.c
3  *
4  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5  *             http://www.samsung.com/
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 #include <linux/fs.h>
12 #include <linux/f2fs_fs.h>
13 #include <linux/mpage.h>
14 #include <linux/backing-dev.h>
15 #include <linux/blkdev.h>
16 #include <linux/pagevec.h>
17 #include <linux/swap.h>
18 
19 #include "f2fs.h"
20 #include "node.h"
21 #include "segment.h"
22 #include <trace/events/f2fs.h>
23 
24 static struct kmem_cache *nat_entry_slab;
25 static struct kmem_cache *free_nid_slab;
26 
27 static void clear_node_page_dirty(struct page *page)
28 {
29 	struct address_space *mapping = page->mapping;
30 	struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
31 	unsigned int long flags;
32 
33 	if (PageDirty(page)) {
34 		spin_lock_irqsave(&mapping->tree_lock, flags);
35 		radix_tree_tag_clear(&mapping->page_tree,
36 				page_index(page),
37 				PAGECACHE_TAG_DIRTY);
38 		spin_unlock_irqrestore(&mapping->tree_lock, flags);
39 
40 		clear_page_dirty_for_io(page);
41 		dec_page_count(sbi, F2FS_DIRTY_NODES);
42 	}
43 	ClearPageUptodate(page);
44 }
45 
46 static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
47 {
48 	pgoff_t index = current_nat_addr(sbi, nid);
49 	return get_meta_page(sbi, index);
50 }
51 
52 static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
53 {
54 	struct page *src_page;
55 	struct page *dst_page;
56 	pgoff_t src_off;
57 	pgoff_t dst_off;
58 	void *src_addr;
59 	void *dst_addr;
60 	struct f2fs_nm_info *nm_i = NM_I(sbi);
61 
62 	src_off = current_nat_addr(sbi, nid);
63 	dst_off = next_nat_addr(sbi, src_off);
64 
65 	/* get current nat block page with lock */
66 	src_page = get_meta_page(sbi, src_off);
67 
68 	/* Dirty src_page means that it is already the new target NAT page. */
69 	if (PageDirty(src_page))
70 		return src_page;
71 
72 	dst_page = grab_meta_page(sbi, dst_off);
73 
74 	src_addr = page_address(src_page);
75 	dst_addr = page_address(dst_page);
76 	memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE);
77 	set_page_dirty(dst_page);
78 	f2fs_put_page(src_page, 1);
79 
80 	set_to_next_nat(nm_i, nid);
81 
82 	return dst_page;
83 }
84 
85 /*
86  * Readahead NAT pages
87  */
88 static void ra_nat_pages(struct f2fs_sb_info *sbi, int nid)
89 {
90 	struct address_space *mapping = sbi->meta_inode->i_mapping;
91 	struct f2fs_nm_info *nm_i = NM_I(sbi);
92 	struct blk_plug plug;
93 	struct page *page;
94 	pgoff_t index;
95 	int i;
96 
97 	blk_start_plug(&plug);
98 
99 	for (i = 0; i < FREE_NID_PAGES; i++, nid += NAT_ENTRY_PER_BLOCK) {
100 		if (nid >= nm_i->max_nid)
101 			nid = 0;
102 		index = current_nat_addr(sbi, nid);
103 
104 		page = grab_cache_page(mapping, index);
105 		if (!page)
106 			continue;
107 		if (PageUptodate(page)) {
108 			f2fs_put_page(page, 1);
109 			continue;
110 		}
111 		if (f2fs_readpage(sbi, page, index, READ))
112 			continue;
113 
114 		f2fs_put_page(page, 0);
115 	}
116 	blk_finish_plug(&plug);
117 }
118 
119 static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n)
120 {
121 	return radix_tree_lookup(&nm_i->nat_root, n);
122 }
123 
124 static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i,
125 		nid_t start, unsigned int nr, struct nat_entry **ep)
126 {
127 	return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr);
128 }
129 
130 static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e)
131 {
132 	list_del(&e->list);
133 	radix_tree_delete(&nm_i->nat_root, nat_get_nid(e));
134 	nm_i->nat_cnt--;
135 	kmem_cache_free(nat_entry_slab, e);
136 }
137 
138 int is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid)
139 {
140 	struct f2fs_nm_info *nm_i = NM_I(sbi);
141 	struct nat_entry *e;
142 	int is_cp = 1;
143 
144 	read_lock(&nm_i->nat_tree_lock);
145 	e = __lookup_nat_cache(nm_i, nid);
146 	if (e && !e->checkpointed)
147 		is_cp = 0;
148 	read_unlock(&nm_i->nat_tree_lock);
149 	return is_cp;
150 }
151 
152 static struct nat_entry *grab_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid)
153 {
154 	struct nat_entry *new;
155 
156 	new = kmem_cache_alloc(nat_entry_slab, GFP_ATOMIC);
157 	if (!new)
158 		return NULL;
159 	if (radix_tree_insert(&nm_i->nat_root, nid, new)) {
160 		kmem_cache_free(nat_entry_slab, new);
161 		return NULL;
162 	}
163 	memset(new, 0, sizeof(struct nat_entry));
164 	nat_set_nid(new, nid);
165 	list_add_tail(&new->list, &nm_i->nat_entries);
166 	nm_i->nat_cnt++;
167 	return new;
168 }
169 
170 static void cache_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid,
171 						struct f2fs_nat_entry *ne)
172 {
173 	struct nat_entry *e;
174 retry:
175 	write_lock(&nm_i->nat_tree_lock);
176 	e = __lookup_nat_cache(nm_i, nid);
177 	if (!e) {
178 		e = grab_nat_entry(nm_i, nid);
179 		if (!e) {
180 			write_unlock(&nm_i->nat_tree_lock);
181 			goto retry;
182 		}
183 		nat_set_blkaddr(e, le32_to_cpu(ne->block_addr));
184 		nat_set_ino(e, le32_to_cpu(ne->ino));
185 		nat_set_version(e, ne->version);
186 		e->checkpointed = true;
187 	}
188 	write_unlock(&nm_i->nat_tree_lock);
189 }
190 
191 static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
192 			block_t new_blkaddr)
193 {
194 	struct f2fs_nm_info *nm_i = NM_I(sbi);
195 	struct nat_entry *e;
196 retry:
197 	write_lock(&nm_i->nat_tree_lock);
198 	e = __lookup_nat_cache(nm_i, ni->nid);
199 	if (!e) {
200 		e = grab_nat_entry(nm_i, ni->nid);
201 		if (!e) {
202 			write_unlock(&nm_i->nat_tree_lock);
203 			goto retry;
204 		}
205 		e->ni = *ni;
206 		e->checkpointed = true;
207 		BUG_ON(ni->blk_addr == NEW_ADDR);
208 	} else if (new_blkaddr == NEW_ADDR) {
209 		/*
210 		 * when nid is reallocated,
211 		 * previous nat entry can be remained in nat cache.
212 		 * So, reinitialize it with new information.
213 		 */
214 		e->ni = *ni;
215 		BUG_ON(ni->blk_addr != NULL_ADDR);
216 	}
217 
218 	if (new_blkaddr == NEW_ADDR)
219 		e->checkpointed = false;
220 
221 	/* sanity check */
222 	BUG_ON(nat_get_blkaddr(e) != ni->blk_addr);
223 	BUG_ON(nat_get_blkaddr(e) == NULL_ADDR &&
224 			new_blkaddr == NULL_ADDR);
225 	BUG_ON(nat_get_blkaddr(e) == NEW_ADDR &&
226 			new_blkaddr == NEW_ADDR);
227 	BUG_ON(nat_get_blkaddr(e) != NEW_ADDR &&
228 			nat_get_blkaddr(e) != NULL_ADDR &&
229 			new_blkaddr == NEW_ADDR);
230 
231 	/* increament version no as node is removed */
232 	if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) {
233 		unsigned char version = nat_get_version(e);
234 		nat_set_version(e, inc_node_version(version));
235 	}
236 
237 	/* change address */
238 	nat_set_blkaddr(e, new_blkaddr);
239 	__set_nat_cache_dirty(nm_i, e);
240 	write_unlock(&nm_i->nat_tree_lock);
241 }
242 
243 static int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
244 {
245 	struct f2fs_nm_info *nm_i = NM_I(sbi);
246 
247 	if (nm_i->nat_cnt <= NM_WOUT_THRESHOLD)
248 		return 0;
249 
250 	write_lock(&nm_i->nat_tree_lock);
251 	while (nr_shrink && !list_empty(&nm_i->nat_entries)) {
252 		struct nat_entry *ne;
253 		ne = list_first_entry(&nm_i->nat_entries,
254 					struct nat_entry, list);
255 		__del_from_nat_cache(nm_i, ne);
256 		nr_shrink--;
257 	}
258 	write_unlock(&nm_i->nat_tree_lock);
259 	return nr_shrink;
260 }
261 
262 /*
263  * This function returns always success
264  */
265 void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
266 {
267 	struct f2fs_nm_info *nm_i = NM_I(sbi);
268 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
269 	struct f2fs_summary_block *sum = curseg->sum_blk;
270 	nid_t start_nid = START_NID(nid);
271 	struct f2fs_nat_block *nat_blk;
272 	struct page *page = NULL;
273 	struct f2fs_nat_entry ne;
274 	struct nat_entry *e;
275 	int i;
276 
277 	memset(&ne, 0, sizeof(struct f2fs_nat_entry));
278 	ni->nid = nid;
279 
280 	/* Check nat cache */
281 	read_lock(&nm_i->nat_tree_lock);
282 	e = __lookup_nat_cache(nm_i, nid);
283 	if (e) {
284 		ni->ino = nat_get_ino(e);
285 		ni->blk_addr = nat_get_blkaddr(e);
286 		ni->version = nat_get_version(e);
287 	}
288 	read_unlock(&nm_i->nat_tree_lock);
289 	if (e)
290 		return;
291 
292 	/* Check current segment summary */
293 	mutex_lock(&curseg->curseg_mutex);
294 	i = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 0);
295 	if (i >= 0) {
296 		ne = nat_in_journal(sum, i);
297 		node_info_from_raw_nat(ni, &ne);
298 	}
299 	mutex_unlock(&curseg->curseg_mutex);
300 	if (i >= 0)
301 		goto cache;
302 
303 	/* Fill node_info from nat page */
304 	page = get_current_nat_page(sbi, start_nid);
305 	nat_blk = (struct f2fs_nat_block *)page_address(page);
306 	ne = nat_blk->entries[nid - start_nid];
307 	node_info_from_raw_nat(ni, &ne);
308 	f2fs_put_page(page, 1);
309 cache:
310 	/* cache nat entry */
311 	cache_nat_entry(NM_I(sbi), nid, &ne);
312 }
313 
314 /*
315  * The maximum depth is four.
316  * Offset[0] will have raw inode offset.
317  */
318 static int get_node_path(struct f2fs_inode_info *fi, long block,
319 				int offset[4], unsigned int noffset[4])
320 {
321 	const long direct_index = ADDRS_PER_INODE(fi);
322 	const long direct_blks = ADDRS_PER_BLOCK;
323 	const long dptrs_per_blk = NIDS_PER_BLOCK;
324 	const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK;
325 	const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK;
326 	int n = 0;
327 	int level = 0;
328 
329 	noffset[0] = 0;
330 
331 	if (block < direct_index) {
332 		offset[n] = block;
333 		goto got;
334 	}
335 	block -= direct_index;
336 	if (block < direct_blks) {
337 		offset[n++] = NODE_DIR1_BLOCK;
338 		noffset[n] = 1;
339 		offset[n] = block;
340 		level = 1;
341 		goto got;
342 	}
343 	block -= direct_blks;
344 	if (block < direct_blks) {
345 		offset[n++] = NODE_DIR2_BLOCK;
346 		noffset[n] = 2;
347 		offset[n] = block;
348 		level = 1;
349 		goto got;
350 	}
351 	block -= direct_blks;
352 	if (block < indirect_blks) {
353 		offset[n++] = NODE_IND1_BLOCK;
354 		noffset[n] = 3;
355 		offset[n++] = block / direct_blks;
356 		noffset[n] = 4 + offset[n - 1];
357 		offset[n] = block % direct_blks;
358 		level = 2;
359 		goto got;
360 	}
361 	block -= indirect_blks;
362 	if (block < indirect_blks) {
363 		offset[n++] = NODE_IND2_BLOCK;
364 		noffset[n] = 4 + dptrs_per_blk;
365 		offset[n++] = block / direct_blks;
366 		noffset[n] = 5 + dptrs_per_blk + offset[n - 1];
367 		offset[n] = block % direct_blks;
368 		level = 2;
369 		goto got;
370 	}
371 	block -= indirect_blks;
372 	if (block < dindirect_blks) {
373 		offset[n++] = NODE_DIND_BLOCK;
374 		noffset[n] = 5 + (dptrs_per_blk * 2);
375 		offset[n++] = block / indirect_blks;
376 		noffset[n] = 6 + (dptrs_per_blk * 2) +
377 			      offset[n - 1] * (dptrs_per_blk + 1);
378 		offset[n++] = (block / direct_blks) % dptrs_per_blk;
379 		noffset[n] = 7 + (dptrs_per_blk * 2) +
380 			      offset[n - 2] * (dptrs_per_blk + 1) +
381 			      offset[n - 1];
382 		offset[n] = block % direct_blks;
383 		level = 3;
384 		goto got;
385 	} else {
386 		BUG();
387 	}
388 got:
389 	return level;
390 }
391 
392 /*
393  * Caller should call f2fs_put_dnode(dn).
394  * Also, it should grab and release a mutex by calling mutex_lock_op() and
395  * mutex_unlock_op() only if ro is not set RDONLY_NODE.
396  * In the case of RDONLY_NODE, we don't need to care about mutex.
397  */
398 int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
399 {
400 	struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
401 	struct page *npage[4];
402 	struct page *parent;
403 	int offset[4];
404 	unsigned int noffset[4];
405 	nid_t nids[4];
406 	int level, i;
407 	int err = 0;
408 
409 	level = get_node_path(F2FS_I(dn->inode), index, offset, noffset);
410 
411 	nids[0] = dn->inode->i_ino;
412 	npage[0] = dn->inode_page;
413 
414 	if (!npage[0]) {
415 		npage[0] = get_node_page(sbi, nids[0]);
416 		if (IS_ERR(npage[0]))
417 			return PTR_ERR(npage[0]);
418 	}
419 	parent = npage[0];
420 	if (level != 0)
421 		nids[1] = get_nid(parent, offset[0], true);
422 	dn->inode_page = npage[0];
423 	dn->inode_page_locked = true;
424 
425 	/* get indirect or direct nodes */
426 	for (i = 1; i <= level; i++) {
427 		bool done = false;
428 
429 		if (!nids[i] && mode == ALLOC_NODE) {
430 			/* alloc new node */
431 			if (!alloc_nid(sbi, &(nids[i]))) {
432 				err = -ENOSPC;
433 				goto release_pages;
434 			}
435 
436 			dn->nid = nids[i];
437 			npage[i] = new_node_page(dn, noffset[i], NULL);
438 			if (IS_ERR(npage[i])) {
439 				alloc_nid_failed(sbi, nids[i]);
440 				err = PTR_ERR(npage[i]);
441 				goto release_pages;
442 			}
443 
444 			set_nid(parent, offset[i - 1], nids[i], i == 1);
445 			alloc_nid_done(sbi, nids[i]);
446 			done = true;
447 		} else if (mode == LOOKUP_NODE_RA && i == level && level > 1) {
448 			npage[i] = get_node_page_ra(parent, offset[i - 1]);
449 			if (IS_ERR(npage[i])) {
450 				err = PTR_ERR(npage[i]);
451 				goto release_pages;
452 			}
453 			done = true;
454 		}
455 		if (i == 1) {
456 			dn->inode_page_locked = false;
457 			unlock_page(parent);
458 		} else {
459 			f2fs_put_page(parent, 1);
460 		}
461 
462 		if (!done) {
463 			npage[i] = get_node_page(sbi, nids[i]);
464 			if (IS_ERR(npage[i])) {
465 				err = PTR_ERR(npage[i]);
466 				f2fs_put_page(npage[0], 0);
467 				goto release_out;
468 			}
469 		}
470 		if (i < level) {
471 			parent = npage[i];
472 			nids[i + 1] = get_nid(parent, offset[i], false);
473 		}
474 	}
475 	dn->nid = nids[level];
476 	dn->ofs_in_node = offset[level];
477 	dn->node_page = npage[level];
478 	dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node);
479 	return 0;
480 
481 release_pages:
482 	f2fs_put_page(parent, 1);
483 	if (i > 1)
484 		f2fs_put_page(npage[0], 0);
485 release_out:
486 	dn->inode_page = NULL;
487 	dn->node_page = NULL;
488 	return err;
489 }
490 
491 static void truncate_node(struct dnode_of_data *dn)
492 {
493 	struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
494 	struct node_info ni;
495 
496 	get_node_info(sbi, dn->nid, &ni);
497 	if (dn->inode->i_blocks == 0) {
498 		BUG_ON(ni.blk_addr != NULL_ADDR);
499 		goto invalidate;
500 	}
501 	BUG_ON(ni.blk_addr == NULL_ADDR);
502 
503 	/* Deallocate node address */
504 	invalidate_blocks(sbi, ni.blk_addr);
505 	dec_valid_node_count(sbi, dn->inode, 1);
506 	set_node_addr(sbi, &ni, NULL_ADDR);
507 
508 	if (dn->nid == dn->inode->i_ino) {
509 		remove_orphan_inode(sbi, dn->nid);
510 		dec_valid_inode_count(sbi);
511 	} else {
512 		sync_inode_page(dn);
513 	}
514 invalidate:
515 	clear_node_page_dirty(dn->node_page);
516 	F2FS_SET_SB_DIRT(sbi);
517 
518 	f2fs_put_page(dn->node_page, 1);
519 	dn->node_page = NULL;
520 	trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr);
521 }
522 
523 static int truncate_dnode(struct dnode_of_data *dn)
524 {
525 	struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
526 	struct page *page;
527 
528 	if (dn->nid == 0)
529 		return 1;
530 
531 	/* get direct node */
532 	page = get_node_page(sbi, dn->nid);
533 	if (IS_ERR(page) && PTR_ERR(page) == -ENOENT)
534 		return 1;
535 	else if (IS_ERR(page))
536 		return PTR_ERR(page);
537 
538 	/* Make dnode_of_data for parameter */
539 	dn->node_page = page;
540 	dn->ofs_in_node = 0;
541 	truncate_data_blocks(dn);
542 	truncate_node(dn);
543 	return 1;
544 }
545 
546 static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
547 						int ofs, int depth)
548 {
549 	struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
550 	struct dnode_of_data rdn = *dn;
551 	struct page *page;
552 	struct f2fs_node *rn;
553 	nid_t child_nid;
554 	unsigned int child_nofs;
555 	int freed = 0;
556 	int i, ret;
557 
558 	if (dn->nid == 0)
559 		return NIDS_PER_BLOCK + 1;
560 
561 	trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr);
562 
563 	page = get_node_page(sbi, dn->nid);
564 	if (IS_ERR(page)) {
565 		trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(page));
566 		return PTR_ERR(page);
567 	}
568 
569 	rn = F2FS_NODE(page);
570 	if (depth < 3) {
571 		for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) {
572 			child_nid = le32_to_cpu(rn->in.nid[i]);
573 			if (child_nid == 0)
574 				continue;
575 			rdn.nid = child_nid;
576 			ret = truncate_dnode(&rdn);
577 			if (ret < 0)
578 				goto out_err;
579 			set_nid(page, i, 0, false);
580 		}
581 	} else {
582 		child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1;
583 		for (i = ofs; i < NIDS_PER_BLOCK; i++) {
584 			child_nid = le32_to_cpu(rn->in.nid[i]);
585 			if (child_nid == 0) {
586 				child_nofs += NIDS_PER_BLOCK + 1;
587 				continue;
588 			}
589 			rdn.nid = child_nid;
590 			ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1);
591 			if (ret == (NIDS_PER_BLOCK + 1)) {
592 				set_nid(page, i, 0, false);
593 				child_nofs += ret;
594 			} else if (ret < 0 && ret != -ENOENT) {
595 				goto out_err;
596 			}
597 		}
598 		freed = child_nofs;
599 	}
600 
601 	if (!ofs) {
602 		/* remove current indirect node */
603 		dn->node_page = page;
604 		truncate_node(dn);
605 		freed++;
606 	} else {
607 		f2fs_put_page(page, 1);
608 	}
609 	trace_f2fs_truncate_nodes_exit(dn->inode, freed);
610 	return freed;
611 
612 out_err:
613 	f2fs_put_page(page, 1);
614 	trace_f2fs_truncate_nodes_exit(dn->inode, ret);
615 	return ret;
616 }
617 
618 static int truncate_partial_nodes(struct dnode_of_data *dn,
619 			struct f2fs_inode *ri, int *offset, int depth)
620 {
621 	struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
622 	struct page *pages[2];
623 	nid_t nid[3];
624 	nid_t child_nid;
625 	int err = 0;
626 	int i;
627 	int idx = depth - 2;
628 
629 	nid[0] = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
630 	if (!nid[0])
631 		return 0;
632 
633 	/* get indirect nodes in the path */
634 	for (i = 0; i < depth - 1; i++) {
635 		/* refernece count'll be increased */
636 		pages[i] = get_node_page(sbi, nid[i]);
637 		if (IS_ERR(pages[i])) {
638 			depth = i + 1;
639 			err = PTR_ERR(pages[i]);
640 			goto fail;
641 		}
642 		nid[i + 1] = get_nid(pages[i], offset[i + 1], false);
643 	}
644 
645 	/* free direct nodes linked to a partial indirect node */
646 	for (i = offset[depth - 1]; i < NIDS_PER_BLOCK; i++) {
647 		child_nid = get_nid(pages[idx], i, false);
648 		if (!child_nid)
649 			continue;
650 		dn->nid = child_nid;
651 		err = truncate_dnode(dn);
652 		if (err < 0)
653 			goto fail;
654 		set_nid(pages[idx], i, 0, false);
655 	}
656 
657 	if (offset[depth - 1] == 0) {
658 		dn->node_page = pages[idx];
659 		dn->nid = nid[idx];
660 		truncate_node(dn);
661 	} else {
662 		f2fs_put_page(pages[idx], 1);
663 	}
664 	offset[idx]++;
665 	offset[depth - 1] = 0;
666 fail:
667 	for (i = depth - 3; i >= 0; i--)
668 		f2fs_put_page(pages[i], 1);
669 
670 	trace_f2fs_truncate_partial_nodes(dn->inode, nid, depth, err);
671 
672 	return err;
673 }
674 
675 /*
676  * All the block addresses of data and nodes should be nullified.
677  */
678 int truncate_inode_blocks(struct inode *inode, pgoff_t from)
679 {
680 	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
681 	struct address_space *node_mapping = sbi->node_inode->i_mapping;
682 	int err = 0, cont = 1;
683 	int level, offset[4], noffset[4];
684 	unsigned int nofs = 0;
685 	struct f2fs_node *rn;
686 	struct dnode_of_data dn;
687 	struct page *page;
688 
689 	trace_f2fs_truncate_inode_blocks_enter(inode, from);
690 
691 	level = get_node_path(F2FS_I(inode), from, offset, noffset);
692 restart:
693 	page = get_node_page(sbi, inode->i_ino);
694 	if (IS_ERR(page)) {
695 		trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page));
696 		return PTR_ERR(page);
697 	}
698 
699 	set_new_dnode(&dn, inode, page, NULL, 0);
700 	unlock_page(page);
701 
702 	rn = F2FS_NODE(page);
703 	switch (level) {
704 	case 0:
705 	case 1:
706 		nofs = noffset[1];
707 		break;
708 	case 2:
709 		nofs = noffset[1];
710 		if (!offset[level - 1])
711 			goto skip_partial;
712 		err = truncate_partial_nodes(&dn, &rn->i, offset, level);
713 		if (err < 0 && err != -ENOENT)
714 			goto fail;
715 		nofs += 1 + NIDS_PER_BLOCK;
716 		break;
717 	case 3:
718 		nofs = 5 + 2 * NIDS_PER_BLOCK;
719 		if (!offset[level - 1])
720 			goto skip_partial;
721 		err = truncate_partial_nodes(&dn, &rn->i, offset, level);
722 		if (err < 0 && err != -ENOENT)
723 			goto fail;
724 		break;
725 	default:
726 		BUG();
727 	}
728 
729 skip_partial:
730 	while (cont) {
731 		dn.nid = le32_to_cpu(rn->i.i_nid[offset[0] - NODE_DIR1_BLOCK]);
732 		switch (offset[0]) {
733 		case NODE_DIR1_BLOCK:
734 		case NODE_DIR2_BLOCK:
735 			err = truncate_dnode(&dn);
736 			break;
737 
738 		case NODE_IND1_BLOCK:
739 		case NODE_IND2_BLOCK:
740 			err = truncate_nodes(&dn, nofs, offset[1], 2);
741 			break;
742 
743 		case NODE_DIND_BLOCK:
744 			err = truncate_nodes(&dn, nofs, offset[1], 3);
745 			cont = 0;
746 			break;
747 
748 		default:
749 			BUG();
750 		}
751 		if (err < 0 && err != -ENOENT)
752 			goto fail;
753 		if (offset[1] == 0 &&
754 				rn->i.i_nid[offset[0] - NODE_DIR1_BLOCK]) {
755 			lock_page(page);
756 			if (page->mapping != node_mapping) {
757 				f2fs_put_page(page, 1);
758 				goto restart;
759 			}
760 			wait_on_page_writeback(page);
761 			rn->i.i_nid[offset[0] - NODE_DIR1_BLOCK] = 0;
762 			set_page_dirty(page);
763 			unlock_page(page);
764 		}
765 		offset[1] = 0;
766 		offset[0]++;
767 		nofs += err;
768 	}
769 fail:
770 	f2fs_put_page(page, 0);
771 	trace_f2fs_truncate_inode_blocks_exit(inode, err);
772 	return err > 0 ? 0 : err;
773 }
774 
775 int truncate_xattr_node(struct inode *inode, struct page *page)
776 {
777 	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
778 	nid_t nid = F2FS_I(inode)->i_xattr_nid;
779 	struct dnode_of_data dn;
780 	struct page *npage;
781 
782 	if (!nid)
783 		return 0;
784 
785 	npage = get_node_page(sbi, nid);
786 	if (IS_ERR(npage))
787 		return PTR_ERR(npage);
788 
789 	F2FS_I(inode)->i_xattr_nid = 0;
790 	set_new_dnode(&dn, inode, page, npage, nid);
791 
792 	if (page)
793 		dn.inode_page_locked = 1;
794 	truncate_node(&dn);
795 	return 0;
796 }
797 
798 /*
799  * Caller should grab and release a mutex by calling mutex_lock_op() and
800  * mutex_unlock_op().
801  */
802 int remove_inode_page(struct inode *inode)
803 {
804 	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
805 	struct page *page;
806 	nid_t ino = inode->i_ino;
807 	struct dnode_of_data dn;
808 	int err;
809 
810 	page = get_node_page(sbi, ino);
811 	if (IS_ERR(page))
812 		return PTR_ERR(page);
813 
814 	err = truncate_xattr_node(inode, page);
815 	if (err) {
816 		f2fs_put_page(page, 1);
817 		return err;
818 	}
819 
820 	/* 0 is possible, after f2fs_new_inode() is failed */
821 	BUG_ON(inode->i_blocks != 0 && inode->i_blocks != 1);
822 	set_new_dnode(&dn, inode, page, page, ino);
823 	truncate_node(&dn);
824 	return 0;
825 }
826 
827 struct page *new_inode_page(struct inode *inode, const struct qstr *name)
828 {
829 	struct dnode_of_data dn;
830 
831 	/* allocate inode page for new inode */
832 	set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
833 
834 	/* caller should f2fs_put_page(page, 1); */
835 	return new_node_page(&dn, 0, NULL);
836 }
837 
838 struct page *new_node_page(struct dnode_of_data *dn,
839 				unsigned int ofs, struct page *ipage)
840 {
841 	struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
842 	struct address_space *mapping = sbi->node_inode->i_mapping;
843 	struct node_info old_ni, new_ni;
844 	struct page *page;
845 	int err;
846 
847 	if (is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))
848 		return ERR_PTR(-EPERM);
849 
850 	page = grab_cache_page(mapping, dn->nid);
851 	if (!page)
852 		return ERR_PTR(-ENOMEM);
853 
854 	if (!inc_valid_node_count(sbi, dn->inode, 1)) {
855 		err = -ENOSPC;
856 		goto fail;
857 	}
858 
859 	get_node_info(sbi, dn->nid, &old_ni);
860 
861 	/* Reinitialize old_ni with new node page */
862 	BUG_ON(old_ni.blk_addr != NULL_ADDR);
863 	new_ni = old_ni;
864 	new_ni.ino = dn->inode->i_ino;
865 	set_node_addr(sbi, &new_ni, NEW_ADDR);
866 
867 	fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true);
868 	set_cold_node(dn->inode, page);
869 	SetPageUptodate(page);
870 	set_page_dirty(page);
871 
872 	if (ofs == XATTR_NODE_OFFSET)
873 		F2FS_I(dn->inode)->i_xattr_nid = dn->nid;
874 
875 	dn->node_page = page;
876 	if (ipage)
877 		update_inode(dn->inode, ipage);
878 	else
879 		sync_inode_page(dn);
880 	if (ofs == 0)
881 		inc_valid_inode_count(sbi);
882 
883 	return page;
884 
885 fail:
886 	clear_node_page_dirty(page);
887 	f2fs_put_page(page, 1);
888 	return ERR_PTR(err);
889 }
890 
891 /*
892  * Caller should do after getting the following values.
893  * 0: f2fs_put_page(page, 0)
894  * LOCKED_PAGE: f2fs_put_page(page, 1)
895  * error: nothing
896  */
897 static int read_node_page(struct page *page, int type)
898 {
899 	struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
900 	struct node_info ni;
901 
902 	get_node_info(sbi, page->index, &ni);
903 
904 	if (ni.blk_addr == NULL_ADDR) {
905 		f2fs_put_page(page, 1);
906 		return -ENOENT;
907 	}
908 
909 	if (PageUptodate(page))
910 		return LOCKED_PAGE;
911 
912 	return f2fs_readpage(sbi, page, ni.blk_addr, type);
913 }
914 
915 /*
916  * Readahead a node page
917  */
918 void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
919 {
920 	struct address_space *mapping = sbi->node_inode->i_mapping;
921 	struct page *apage;
922 	int err;
923 
924 	apage = find_get_page(mapping, nid);
925 	if (apage && PageUptodate(apage)) {
926 		f2fs_put_page(apage, 0);
927 		return;
928 	}
929 	f2fs_put_page(apage, 0);
930 
931 	apage = grab_cache_page(mapping, nid);
932 	if (!apage)
933 		return;
934 
935 	err = read_node_page(apage, READA);
936 	if (err == 0)
937 		f2fs_put_page(apage, 0);
938 	else if (err == LOCKED_PAGE)
939 		f2fs_put_page(apage, 1);
940 }
941 
942 struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
943 {
944 	struct address_space *mapping = sbi->node_inode->i_mapping;
945 	struct page *page;
946 	int err;
947 repeat:
948 	page = grab_cache_page(mapping, nid);
949 	if (!page)
950 		return ERR_PTR(-ENOMEM);
951 
952 	err = read_node_page(page, READ_SYNC);
953 	if (err < 0)
954 		return ERR_PTR(err);
955 	else if (err == LOCKED_PAGE)
956 		goto got_it;
957 
958 	lock_page(page);
959 	if (!PageUptodate(page)) {
960 		f2fs_put_page(page, 1);
961 		return ERR_PTR(-EIO);
962 	}
963 	if (page->mapping != mapping) {
964 		f2fs_put_page(page, 1);
965 		goto repeat;
966 	}
967 got_it:
968 	BUG_ON(nid != nid_of_node(page));
969 	mark_page_accessed(page);
970 	return page;
971 }
972 
973 /*
974  * Return a locked page for the desired node page.
975  * And, readahead MAX_RA_NODE number of node pages.
976  */
977 struct page *get_node_page_ra(struct page *parent, int start)
978 {
979 	struct f2fs_sb_info *sbi = F2FS_SB(parent->mapping->host->i_sb);
980 	struct address_space *mapping = sbi->node_inode->i_mapping;
981 	struct blk_plug plug;
982 	struct page *page;
983 	int err, i, end;
984 	nid_t nid;
985 
986 	/* First, try getting the desired direct node. */
987 	nid = get_nid(parent, start, false);
988 	if (!nid)
989 		return ERR_PTR(-ENOENT);
990 repeat:
991 	page = grab_cache_page(mapping, nid);
992 	if (!page)
993 		return ERR_PTR(-ENOMEM);
994 
995 	err = read_node_page(page, READ_SYNC);
996 	if (err < 0)
997 		return ERR_PTR(err);
998 	else if (err == LOCKED_PAGE)
999 		goto page_hit;
1000 
1001 	blk_start_plug(&plug);
1002 
1003 	/* Then, try readahead for siblings of the desired node */
1004 	end = start + MAX_RA_NODE;
1005 	end = min(end, NIDS_PER_BLOCK);
1006 	for (i = start + 1; i < end; i++) {
1007 		nid = get_nid(parent, i, false);
1008 		if (!nid)
1009 			continue;
1010 		ra_node_page(sbi, nid);
1011 	}
1012 
1013 	blk_finish_plug(&plug);
1014 
1015 	lock_page(page);
1016 	if (page->mapping != mapping) {
1017 		f2fs_put_page(page, 1);
1018 		goto repeat;
1019 	}
1020 page_hit:
1021 	if (!PageUptodate(page)) {
1022 		f2fs_put_page(page, 1);
1023 		return ERR_PTR(-EIO);
1024 	}
1025 	mark_page_accessed(page);
1026 	return page;
1027 }
1028 
1029 void sync_inode_page(struct dnode_of_data *dn)
1030 {
1031 	if (IS_INODE(dn->node_page) || dn->inode_page == dn->node_page) {
1032 		update_inode(dn->inode, dn->node_page);
1033 	} else if (dn->inode_page) {
1034 		if (!dn->inode_page_locked)
1035 			lock_page(dn->inode_page);
1036 		update_inode(dn->inode, dn->inode_page);
1037 		if (!dn->inode_page_locked)
1038 			unlock_page(dn->inode_page);
1039 	} else {
1040 		update_inode_page(dn->inode);
1041 	}
1042 }
1043 
1044 int sync_node_pages(struct f2fs_sb_info *sbi, nid_t ino,
1045 					struct writeback_control *wbc)
1046 {
1047 	struct address_space *mapping = sbi->node_inode->i_mapping;
1048 	pgoff_t index, end;
1049 	struct pagevec pvec;
1050 	int step = ino ? 2 : 0;
1051 	int nwritten = 0, wrote = 0;
1052 
1053 	pagevec_init(&pvec, 0);
1054 
1055 next_step:
1056 	index = 0;
1057 	end = LONG_MAX;
1058 
1059 	while (index <= end) {
1060 		int i, nr_pages;
1061 		nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
1062 				PAGECACHE_TAG_DIRTY,
1063 				min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
1064 		if (nr_pages == 0)
1065 			break;
1066 
1067 		for (i = 0; i < nr_pages; i++) {
1068 			struct page *page = pvec.pages[i];
1069 
1070 			/*
1071 			 * flushing sequence with step:
1072 			 * 0. indirect nodes
1073 			 * 1. dentry dnodes
1074 			 * 2. file dnodes
1075 			 */
1076 			if (step == 0 && IS_DNODE(page))
1077 				continue;
1078 			if (step == 1 && (!IS_DNODE(page) ||
1079 						is_cold_node(page)))
1080 				continue;
1081 			if (step == 2 && (!IS_DNODE(page) ||
1082 						!is_cold_node(page)))
1083 				continue;
1084 
1085 			/*
1086 			 * If an fsync mode,
1087 			 * we should not skip writing node pages.
1088 			 */
1089 			if (ino && ino_of_node(page) == ino)
1090 				lock_page(page);
1091 			else if (!trylock_page(page))
1092 				continue;
1093 
1094 			if (unlikely(page->mapping != mapping)) {
1095 continue_unlock:
1096 				unlock_page(page);
1097 				continue;
1098 			}
1099 			if (ino && ino_of_node(page) != ino)
1100 				goto continue_unlock;
1101 
1102 			if (!PageDirty(page)) {
1103 				/* someone wrote it for us */
1104 				goto continue_unlock;
1105 			}
1106 
1107 			if (!clear_page_dirty_for_io(page))
1108 				goto continue_unlock;
1109 
1110 			/* called by fsync() */
1111 			if (ino && IS_DNODE(page)) {
1112 				int mark = !is_checkpointed_node(sbi, ino);
1113 				set_fsync_mark(page, 1);
1114 				if (IS_INODE(page))
1115 					set_dentry_mark(page, mark);
1116 				nwritten++;
1117 			} else {
1118 				set_fsync_mark(page, 0);
1119 				set_dentry_mark(page, 0);
1120 			}
1121 			mapping->a_ops->writepage(page, wbc);
1122 			wrote++;
1123 
1124 			if (--wbc->nr_to_write == 0)
1125 				break;
1126 		}
1127 		pagevec_release(&pvec);
1128 		cond_resched();
1129 
1130 		if (wbc->nr_to_write == 0) {
1131 			step = 2;
1132 			break;
1133 		}
1134 	}
1135 
1136 	if (step < 2) {
1137 		step++;
1138 		goto next_step;
1139 	}
1140 
1141 	if (wrote)
1142 		f2fs_submit_bio(sbi, NODE, wbc->sync_mode == WB_SYNC_ALL);
1143 
1144 	return nwritten;
1145 }
1146 
1147 static int f2fs_write_node_page(struct page *page,
1148 				struct writeback_control *wbc)
1149 {
1150 	struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
1151 	nid_t nid;
1152 	block_t new_addr;
1153 	struct node_info ni;
1154 
1155 	wait_on_page_writeback(page);
1156 
1157 	/* get old block addr of this node page */
1158 	nid = nid_of_node(page);
1159 	BUG_ON(page->index != nid);
1160 
1161 	get_node_info(sbi, nid, &ni);
1162 
1163 	/* This page is already truncated */
1164 	if (ni.blk_addr == NULL_ADDR) {
1165 		dec_page_count(sbi, F2FS_DIRTY_NODES);
1166 		unlock_page(page);
1167 		return 0;
1168 	}
1169 
1170 	if (wbc->for_reclaim) {
1171 		dec_page_count(sbi, F2FS_DIRTY_NODES);
1172 		wbc->pages_skipped++;
1173 		set_page_dirty(page);
1174 		return AOP_WRITEPAGE_ACTIVATE;
1175 	}
1176 
1177 	mutex_lock(&sbi->node_write);
1178 	set_page_writeback(page);
1179 	write_node_page(sbi, page, nid, ni.blk_addr, &new_addr);
1180 	set_node_addr(sbi, &ni, new_addr);
1181 	dec_page_count(sbi, F2FS_DIRTY_NODES);
1182 	mutex_unlock(&sbi->node_write);
1183 	unlock_page(page);
1184 	return 0;
1185 }
1186 
1187 /*
1188  * It is very important to gather dirty pages and write at once, so that we can
1189  * submit a big bio without interfering other data writes.
1190  * Be default, 512 pages (2MB), a segment size, is quite reasonable.
1191  */
1192 #define COLLECT_DIRTY_NODES	512
1193 static int f2fs_write_node_pages(struct address_space *mapping,
1194 			    struct writeback_control *wbc)
1195 {
1196 	struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
1197 	long nr_to_write = wbc->nr_to_write;
1198 
1199 	/* First check balancing cached NAT entries */
1200 	if (try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK)) {
1201 		f2fs_sync_fs(sbi->sb, true);
1202 		return 0;
1203 	}
1204 
1205 	/* collect a number of dirty node pages and write together */
1206 	if (get_pages(sbi, F2FS_DIRTY_NODES) < COLLECT_DIRTY_NODES)
1207 		return 0;
1208 
1209 	/* if mounting is failed, skip writing node pages */
1210 	wbc->nr_to_write = max_hw_blocks(sbi);
1211 	sync_node_pages(sbi, 0, wbc);
1212 	wbc->nr_to_write = nr_to_write - (max_hw_blocks(sbi) - wbc->nr_to_write);
1213 	return 0;
1214 }
1215 
1216 static int f2fs_set_node_page_dirty(struct page *page)
1217 {
1218 	struct address_space *mapping = page->mapping;
1219 	struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
1220 
1221 	SetPageUptodate(page);
1222 	if (!PageDirty(page)) {
1223 		__set_page_dirty_nobuffers(page);
1224 		inc_page_count(sbi, F2FS_DIRTY_NODES);
1225 		SetPagePrivate(page);
1226 		return 1;
1227 	}
1228 	return 0;
1229 }
1230 
1231 static void f2fs_invalidate_node_page(struct page *page, unsigned int offset,
1232 				      unsigned int length)
1233 {
1234 	struct inode *inode = page->mapping->host;
1235 	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
1236 	if (PageDirty(page))
1237 		dec_page_count(sbi, F2FS_DIRTY_NODES);
1238 	ClearPagePrivate(page);
1239 }
1240 
1241 static int f2fs_release_node_page(struct page *page, gfp_t wait)
1242 {
1243 	ClearPagePrivate(page);
1244 	return 1;
1245 }
1246 
1247 /*
1248  * Structure of the f2fs node operations
1249  */
1250 const struct address_space_operations f2fs_node_aops = {
1251 	.writepage	= f2fs_write_node_page,
1252 	.writepages	= f2fs_write_node_pages,
1253 	.set_page_dirty	= f2fs_set_node_page_dirty,
1254 	.invalidatepage	= f2fs_invalidate_node_page,
1255 	.releasepage	= f2fs_release_node_page,
1256 };
1257 
1258 static struct free_nid *__lookup_free_nid_list(nid_t n, struct list_head *head)
1259 {
1260 	struct list_head *this;
1261 	struct free_nid *i;
1262 	list_for_each(this, head) {
1263 		i = list_entry(this, struct free_nid, list);
1264 		if (i->nid == n)
1265 			return i;
1266 	}
1267 	return NULL;
1268 }
1269 
1270 static void __del_from_free_nid_list(struct free_nid *i)
1271 {
1272 	list_del(&i->list);
1273 	kmem_cache_free(free_nid_slab, i);
1274 }
1275 
1276 static int add_free_nid(struct f2fs_nm_info *nm_i, nid_t nid, bool build)
1277 {
1278 	struct free_nid *i;
1279 	struct nat_entry *ne;
1280 	bool allocated = false;
1281 
1282 	if (nm_i->fcnt > 2 * MAX_FREE_NIDS)
1283 		return -1;
1284 
1285 	/* 0 nid should not be used */
1286 	if (nid == 0)
1287 		return 0;
1288 
1289 	if (!build)
1290 		goto retry;
1291 
1292 	/* do not add allocated nids */
1293 	read_lock(&nm_i->nat_tree_lock);
1294 	ne = __lookup_nat_cache(nm_i, nid);
1295 	if (ne && nat_get_blkaddr(ne) != NULL_ADDR)
1296 		allocated = true;
1297 	read_unlock(&nm_i->nat_tree_lock);
1298 	if (allocated)
1299 		return 0;
1300 retry:
1301 	i = kmem_cache_alloc(free_nid_slab, GFP_NOFS);
1302 	if (!i) {
1303 		cond_resched();
1304 		goto retry;
1305 	}
1306 	i->nid = nid;
1307 	i->state = NID_NEW;
1308 
1309 	spin_lock(&nm_i->free_nid_list_lock);
1310 	if (__lookup_free_nid_list(nid, &nm_i->free_nid_list)) {
1311 		spin_unlock(&nm_i->free_nid_list_lock);
1312 		kmem_cache_free(free_nid_slab, i);
1313 		return 0;
1314 	}
1315 	list_add_tail(&i->list, &nm_i->free_nid_list);
1316 	nm_i->fcnt++;
1317 	spin_unlock(&nm_i->free_nid_list_lock);
1318 	return 1;
1319 }
1320 
1321 static void remove_free_nid(struct f2fs_nm_info *nm_i, nid_t nid)
1322 {
1323 	struct free_nid *i;
1324 	spin_lock(&nm_i->free_nid_list_lock);
1325 	i = __lookup_free_nid_list(nid, &nm_i->free_nid_list);
1326 	if (i && i->state == NID_NEW) {
1327 		__del_from_free_nid_list(i);
1328 		nm_i->fcnt--;
1329 	}
1330 	spin_unlock(&nm_i->free_nid_list_lock);
1331 }
1332 
1333 static void scan_nat_page(struct f2fs_nm_info *nm_i,
1334 			struct page *nat_page, nid_t start_nid)
1335 {
1336 	struct f2fs_nat_block *nat_blk = page_address(nat_page);
1337 	block_t blk_addr;
1338 	int i;
1339 
1340 	i = start_nid % NAT_ENTRY_PER_BLOCK;
1341 
1342 	for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) {
1343 
1344 		if (start_nid >= nm_i->max_nid)
1345 			break;
1346 
1347 		blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
1348 		BUG_ON(blk_addr == NEW_ADDR);
1349 		if (blk_addr == NULL_ADDR) {
1350 			if (add_free_nid(nm_i, start_nid, true) < 0)
1351 				break;
1352 		}
1353 	}
1354 }
1355 
1356 static void build_free_nids(struct f2fs_sb_info *sbi)
1357 {
1358 	struct f2fs_nm_info *nm_i = NM_I(sbi);
1359 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1360 	struct f2fs_summary_block *sum = curseg->sum_blk;
1361 	int i = 0;
1362 	nid_t nid = nm_i->next_scan_nid;
1363 
1364 	/* Enough entries */
1365 	if (nm_i->fcnt > NAT_ENTRY_PER_BLOCK)
1366 		return;
1367 
1368 	/* readahead nat pages to be scanned */
1369 	ra_nat_pages(sbi, nid);
1370 
1371 	while (1) {
1372 		struct page *page = get_current_nat_page(sbi, nid);
1373 
1374 		scan_nat_page(nm_i, page, nid);
1375 		f2fs_put_page(page, 1);
1376 
1377 		nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK));
1378 		if (nid >= nm_i->max_nid)
1379 			nid = 0;
1380 
1381 		if (i++ == FREE_NID_PAGES)
1382 			break;
1383 	}
1384 
1385 	/* go to the next free nat pages to find free nids abundantly */
1386 	nm_i->next_scan_nid = nid;
1387 
1388 	/* find free nids from current sum_pages */
1389 	mutex_lock(&curseg->curseg_mutex);
1390 	for (i = 0; i < nats_in_cursum(sum); i++) {
1391 		block_t addr = le32_to_cpu(nat_in_journal(sum, i).block_addr);
1392 		nid = le32_to_cpu(nid_in_journal(sum, i));
1393 		if (addr == NULL_ADDR)
1394 			add_free_nid(nm_i, nid, true);
1395 		else
1396 			remove_free_nid(nm_i, nid);
1397 	}
1398 	mutex_unlock(&curseg->curseg_mutex);
1399 }
1400 
1401 /*
1402  * If this function returns success, caller can obtain a new nid
1403  * from second parameter of this function.
1404  * The returned nid could be used ino as well as nid when inode is created.
1405  */
1406 bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
1407 {
1408 	struct f2fs_nm_info *nm_i = NM_I(sbi);
1409 	struct free_nid *i = NULL;
1410 	struct list_head *this;
1411 retry:
1412 	if (sbi->total_valid_node_count + 1 >= nm_i->max_nid)
1413 		return false;
1414 
1415 	spin_lock(&nm_i->free_nid_list_lock);
1416 
1417 	/* We should not use stale free nids created by build_free_nids */
1418 	if (nm_i->fcnt && !sbi->on_build_free_nids) {
1419 		BUG_ON(list_empty(&nm_i->free_nid_list));
1420 		list_for_each(this, &nm_i->free_nid_list) {
1421 			i = list_entry(this, struct free_nid, list);
1422 			if (i->state == NID_NEW)
1423 				break;
1424 		}
1425 
1426 		BUG_ON(i->state != NID_NEW);
1427 		*nid = i->nid;
1428 		i->state = NID_ALLOC;
1429 		nm_i->fcnt--;
1430 		spin_unlock(&nm_i->free_nid_list_lock);
1431 		return true;
1432 	}
1433 	spin_unlock(&nm_i->free_nid_list_lock);
1434 
1435 	/* Let's scan nat pages and its caches to get free nids */
1436 	mutex_lock(&nm_i->build_lock);
1437 	sbi->on_build_free_nids = 1;
1438 	build_free_nids(sbi);
1439 	sbi->on_build_free_nids = 0;
1440 	mutex_unlock(&nm_i->build_lock);
1441 	goto retry;
1442 }
1443 
1444 /*
1445  * alloc_nid() should be called prior to this function.
1446  */
1447 void alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid)
1448 {
1449 	struct f2fs_nm_info *nm_i = NM_I(sbi);
1450 	struct free_nid *i;
1451 
1452 	spin_lock(&nm_i->free_nid_list_lock);
1453 	i = __lookup_free_nid_list(nid, &nm_i->free_nid_list);
1454 	BUG_ON(!i || i->state != NID_ALLOC);
1455 	__del_from_free_nid_list(i);
1456 	spin_unlock(&nm_i->free_nid_list_lock);
1457 }
1458 
1459 /*
1460  * alloc_nid() should be called prior to this function.
1461  */
1462 void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
1463 {
1464 	struct f2fs_nm_info *nm_i = NM_I(sbi);
1465 	struct free_nid *i;
1466 
1467 	spin_lock(&nm_i->free_nid_list_lock);
1468 	i = __lookup_free_nid_list(nid, &nm_i->free_nid_list);
1469 	BUG_ON(!i || i->state != NID_ALLOC);
1470 	if (nm_i->fcnt > 2 * MAX_FREE_NIDS) {
1471 		__del_from_free_nid_list(i);
1472 	} else {
1473 		i->state = NID_NEW;
1474 		nm_i->fcnt++;
1475 	}
1476 	spin_unlock(&nm_i->free_nid_list_lock);
1477 }
1478 
1479 void recover_node_page(struct f2fs_sb_info *sbi, struct page *page,
1480 		struct f2fs_summary *sum, struct node_info *ni,
1481 		block_t new_blkaddr)
1482 {
1483 	rewrite_node_page(sbi, page, sum, ni->blk_addr, new_blkaddr);
1484 	set_node_addr(sbi, ni, new_blkaddr);
1485 	clear_node_page_dirty(page);
1486 }
1487 
1488 int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
1489 {
1490 	struct address_space *mapping = sbi->node_inode->i_mapping;
1491 	struct f2fs_node *src, *dst;
1492 	nid_t ino = ino_of_node(page);
1493 	struct node_info old_ni, new_ni;
1494 	struct page *ipage;
1495 
1496 	ipage = grab_cache_page(mapping, ino);
1497 	if (!ipage)
1498 		return -ENOMEM;
1499 
1500 	/* Should not use this inode  from free nid list */
1501 	remove_free_nid(NM_I(sbi), ino);
1502 
1503 	get_node_info(sbi, ino, &old_ni);
1504 	SetPageUptodate(ipage);
1505 	fill_node_footer(ipage, ino, ino, 0, true);
1506 
1507 	src = F2FS_NODE(page);
1508 	dst = F2FS_NODE(ipage);
1509 
1510 	memcpy(dst, src, (unsigned long)&src->i.i_ext - (unsigned long)&src->i);
1511 	dst->i.i_size = 0;
1512 	dst->i.i_blocks = cpu_to_le64(1);
1513 	dst->i.i_links = cpu_to_le32(1);
1514 	dst->i.i_xattr_nid = 0;
1515 
1516 	new_ni = old_ni;
1517 	new_ni.ino = ino;
1518 
1519 	if (!inc_valid_node_count(sbi, NULL, 1))
1520 		WARN_ON(1);
1521 	set_node_addr(sbi, &new_ni, NEW_ADDR);
1522 	inc_valid_inode_count(sbi);
1523 	f2fs_put_page(ipage, 1);
1524 	return 0;
1525 }
1526 
1527 int restore_node_summary(struct f2fs_sb_info *sbi,
1528 			unsigned int segno, struct f2fs_summary_block *sum)
1529 {
1530 	struct f2fs_node *rn;
1531 	struct f2fs_summary *sum_entry;
1532 	struct page *page;
1533 	block_t addr;
1534 	int i, last_offset;
1535 
1536 	/* alloc temporal page for read node */
1537 	page = alloc_page(GFP_NOFS | __GFP_ZERO);
1538 	if (!page)
1539 		return -ENOMEM;
1540 	lock_page(page);
1541 
1542 	/* scan the node segment */
1543 	last_offset = sbi->blocks_per_seg;
1544 	addr = START_BLOCK(sbi, segno);
1545 	sum_entry = &sum->entries[0];
1546 
1547 	for (i = 0; i < last_offset; i++, sum_entry++) {
1548 		/*
1549 		 * In order to read next node page,
1550 		 * we must clear PageUptodate flag.
1551 		 */
1552 		ClearPageUptodate(page);
1553 
1554 		if (f2fs_readpage(sbi, page, addr, READ_SYNC))
1555 			goto out;
1556 
1557 		lock_page(page);
1558 		rn = F2FS_NODE(page);
1559 		sum_entry->nid = rn->footer.nid;
1560 		sum_entry->version = 0;
1561 		sum_entry->ofs_in_node = 0;
1562 		addr++;
1563 	}
1564 	unlock_page(page);
1565 out:
1566 	__free_pages(page, 0);
1567 	return 0;
1568 }
1569 
1570 static bool flush_nats_in_journal(struct f2fs_sb_info *sbi)
1571 {
1572 	struct f2fs_nm_info *nm_i = NM_I(sbi);
1573 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1574 	struct f2fs_summary_block *sum = curseg->sum_blk;
1575 	int i;
1576 
1577 	mutex_lock(&curseg->curseg_mutex);
1578 
1579 	if (nats_in_cursum(sum) < NAT_JOURNAL_ENTRIES) {
1580 		mutex_unlock(&curseg->curseg_mutex);
1581 		return false;
1582 	}
1583 
1584 	for (i = 0; i < nats_in_cursum(sum); i++) {
1585 		struct nat_entry *ne;
1586 		struct f2fs_nat_entry raw_ne;
1587 		nid_t nid = le32_to_cpu(nid_in_journal(sum, i));
1588 
1589 		raw_ne = nat_in_journal(sum, i);
1590 retry:
1591 		write_lock(&nm_i->nat_tree_lock);
1592 		ne = __lookup_nat_cache(nm_i, nid);
1593 		if (ne) {
1594 			__set_nat_cache_dirty(nm_i, ne);
1595 			write_unlock(&nm_i->nat_tree_lock);
1596 			continue;
1597 		}
1598 		ne = grab_nat_entry(nm_i, nid);
1599 		if (!ne) {
1600 			write_unlock(&nm_i->nat_tree_lock);
1601 			goto retry;
1602 		}
1603 		nat_set_blkaddr(ne, le32_to_cpu(raw_ne.block_addr));
1604 		nat_set_ino(ne, le32_to_cpu(raw_ne.ino));
1605 		nat_set_version(ne, raw_ne.version);
1606 		__set_nat_cache_dirty(nm_i, ne);
1607 		write_unlock(&nm_i->nat_tree_lock);
1608 	}
1609 	update_nats_in_cursum(sum, -i);
1610 	mutex_unlock(&curseg->curseg_mutex);
1611 	return true;
1612 }
1613 
1614 /*
1615  * This function is called during the checkpointing process.
1616  */
1617 void flush_nat_entries(struct f2fs_sb_info *sbi)
1618 {
1619 	struct f2fs_nm_info *nm_i = NM_I(sbi);
1620 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1621 	struct f2fs_summary_block *sum = curseg->sum_blk;
1622 	struct list_head *cur, *n;
1623 	struct page *page = NULL;
1624 	struct f2fs_nat_block *nat_blk = NULL;
1625 	nid_t start_nid = 0, end_nid = 0;
1626 	bool flushed;
1627 
1628 	flushed = flush_nats_in_journal(sbi);
1629 
1630 	if (!flushed)
1631 		mutex_lock(&curseg->curseg_mutex);
1632 
1633 	/* 1) flush dirty nat caches */
1634 	list_for_each_safe(cur, n, &nm_i->dirty_nat_entries) {
1635 		struct nat_entry *ne;
1636 		nid_t nid;
1637 		struct f2fs_nat_entry raw_ne;
1638 		int offset = -1;
1639 		block_t new_blkaddr;
1640 
1641 		ne = list_entry(cur, struct nat_entry, list);
1642 		nid = nat_get_nid(ne);
1643 
1644 		if (nat_get_blkaddr(ne) == NEW_ADDR)
1645 			continue;
1646 		if (flushed)
1647 			goto to_nat_page;
1648 
1649 		/* if there is room for nat enries in curseg->sumpage */
1650 		offset = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 1);
1651 		if (offset >= 0) {
1652 			raw_ne = nat_in_journal(sum, offset);
1653 			goto flush_now;
1654 		}
1655 to_nat_page:
1656 		if (!page || (start_nid > nid || nid > end_nid)) {
1657 			if (page) {
1658 				f2fs_put_page(page, 1);
1659 				page = NULL;
1660 			}
1661 			start_nid = START_NID(nid);
1662 			end_nid = start_nid + NAT_ENTRY_PER_BLOCK - 1;
1663 
1664 			/*
1665 			 * get nat block with dirty flag, increased reference
1666 			 * count, mapped and lock
1667 			 */
1668 			page = get_next_nat_page(sbi, start_nid);
1669 			nat_blk = page_address(page);
1670 		}
1671 
1672 		BUG_ON(!nat_blk);
1673 		raw_ne = nat_blk->entries[nid - start_nid];
1674 flush_now:
1675 		new_blkaddr = nat_get_blkaddr(ne);
1676 
1677 		raw_ne.ino = cpu_to_le32(nat_get_ino(ne));
1678 		raw_ne.block_addr = cpu_to_le32(new_blkaddr);
1679 		raw_ne.version = nat_get_version(ne);
1680 
1681 		if (offset < 0) {
1682 			nat_blk->entries[nid - start_nid] = raw_ne;
1683 		} else {
1684 			nat_in_journal(sum, offset) = raw_ne;
1685 			nid_in_journal(sum, offset) = cpu_to_le32(nid);
1686 		}
1687 
1688 		if (nat_get_blkaddr(ne) == NULL_ADDR &&
1689 				add_free_nid(NM_I(sbi), nid, false) <= 0) {
1690 			write_lock(&nm_i->nat_tree_lock);
1691 			__del_from_nat_cache(nm_i, ne);
1692 			write_unlock(&nm_i->nat_tree_lock);
1693 		} else {
1694 			write_lock(&nm_i->nat_tree_lock);
1695 			__clear_nat_cache_dirty(nm_i, ne);
1696 			ne->checkpointed = true;
1697 			write_unlock(&nm_i->nat_tree_lock);
1698 		}
1699 	}
1700 	if (!flushed)
1701 		mutex_unlock(&curseg->curseg_mutex);
1702 	f2fs_put_page(page, 1);
1703 
1704 	/* 2) shrink nat caches if necessary */
1705 	try_to_free_nats(sbi, nm_i->nat_cnt - NM_WOUT_THRESHOLD);
1706 }
1707 
1708 static int init_node_manager(struct f2fs_sb_info *sbi)
1709 {
1710 	struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi);
1711 	struct f2fs_nm_info *nm_i = NM_I(sbi);
1712 	unsigned char *version_bitmap;
1713 	unsigned int nat_segs, nat_blocks;
1714 
1715 	nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr);
1716 
1717 	/* segment_count_nat includes pair segment so divide to 2. */
1718 	nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1;
1719 	nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg);
1720 	nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks;
1721 	nm_i->fcnt = 0;
1722 	nm_i->nat_cnt = 0;
1723 
1724 	INIT_LIST_HEAD(&nm_i->free_nid_list);
1725 	INIT_RADIX_TREE(&nm_i->nat_root, GFP_ATOMIC);
1726 	INIT_LIST_HEAD(&nm_i->nat_entries);
1727 	INIT_LIST_HEAD(&nm_i->dirty_nat_entries);
1728 
1729 	mutex_init(&nm_i->build_lock);
1730 	spin_lock_init(&nm_i->free_nid_list_lock);
1731 	rwlock_init(&nm_i->nat_tree_lock);
1732 
1733 	nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
1734 	nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
1735 	version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP);
1736 	if (!version_bitmap)
1737 		return -EFAULT;
1738 
1739 	nm_i->nat_bitmap = kmemdup(version_bitmap, nm_i->bitmap_size,
1740 					GFP_KERNEL);
1741 	if (!nm_i->nat_bitmap)
1742 		return -ENOMEM;
1743 	return 0;
1744 }
1745 
1746 int build_node_manager(struct f2fs_sb_info *sbi)
1747 {
1748 	int err;
1749 
1750 	sbi->nm_info = kzalloc(sizeof(struct f2fs_nm_info), GFP_KERNEL);
1751 	if (!sbi->nm_info)
1752 		return -ENOMEM;
1753 
1754 	err = init_node_manager(sbi);
1755 	if (err)
1756 		return err;
1757 
1758 	build_free_nids(sbi);
1759 	return 0;
1760 }
1761 
1762 void destroy_node_manager(struct f2fs_sb_info *sbi)
1763 {
1764 	struct f2fs_nm_info *nm_i = NM_I(sbi);
1765 	struct free_nid *i, *next_i;
1766 	struct nat_entry *natvec[NATVEC_SIZE];
1767 	nid_t nid = 0;
1768 	unsigned int found;
1769 
1770 	if (!nm_i)
1771 		return;
1772 
1773 	/* destroy free nid list */
1774 	spin_lock(&nm_i->free_nid_list_lock);
1775 	list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) {
1776 		BUG_ON(i->state == NID_ALLOC);
1777 		__del_from_free_nid_list(i);
1778 		nm_i->fcnt--;
1779 	}
1780 	BUG_ON(nm_i->fcnt);
1781 	spin_unlock(&nm_i->free_nid_list_lock);
1782 
1783 	/* destroy nat cache */
1784 	write_lock(&nm_i->nat_tree_lock);
1785 	while ((found = __gang_lookup_nat_cache(nm_i,
1786 					nid, NATVEC_SIZE, natvec))) {
1787 		unsigned idx;
1788 		for (idx = 0; idx < found; idx++) {
1789 			struct nat_entry *e = natvec[idx];
1790 			nid = nat_get_nid(e) + 1;
1791 			__del_from_nat_cache(nm_i, e);
1792 		}
1793 	}
1794 	BUG_ON(nm_i->nat_cnt);
1795 	write_unlock(&nm_i->nat_tree_lock);
1796 
1797 	kfree(nm_i->nat_bitmap);
1798 	sbi->nm_info = NULL;
1799 	kfree(nm_i);
1800 }
1801 
1802 int __init create_node_manager_caches(void)
1803 {
1804 	nat_entry_slab = f2fs_kmem_cache_create("nat_entry",
1805 			sizeof(struct nat_entry), NULL);
1806 	if (!nat_entry_slab)
1807 		return -ENOMEM;
1808 
1809 	free_nid_slab = f2fs_kmem_cache_create("free_nid",
1810 			sizeof(struct free_nid), NULL);
1811 	if (!free_nid_slab) {
1812 		kmem_cache_destroy(nat_entry_slab);
1813 		return -ENOMEM;
1814 	}
1815 	return 0;
1816 }
1817 
1818 void destroy_node_manager_caches(void)
1819 {
1820 	kmem_cache_destroy(free_nid_slab);
1821 	kmem_cache_destroy(nat_entry_slab);
1822 }
1823