xref: /openbmc/linux/fs/f2fs/node.c (revision 9198aceb53a493d1be0f3a5a1ce13c07a6fdcd26)
1 /*
2  * fs/f2fs/node.c
3  *
4  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5  *             http://www.samsung.com/
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 #include <linux/fs.h>
12 #include <linux/f2fs_fs.h>
13 #include <linux/mpage.h>
14 #include <linux/backing-dev.h>
15 #include <linux/blkdev.h>
16 #include <linux/pagevec.h>
17 #include <linux/swap.h>
18 
19 #include "f2fs.h"
20 #include "node.h"
21 #include "segment.h"
22 #include <trace/events/f2fs.h>
23 
24 static struct kmem_cache *nat_entry_slab;
25 static struct kmem_cache *free_nid_slab;
26 
27 static void clear_node_page_dirty(struct page *page)
28 {
29 	struct address_space *mapping = page->mapping;
30 	struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
31 	unsigned int long flags;
32 
33 	if (PageDirty(page)) {
34 		spin_lock_irqsave(&mapping->tree_lock, flags);
35 		radix_tree_tag_clear(&mapping->page_tree,
36 				page_index(page),
37 				PAGECACHE_TAG_DIRTY);
38 		spin_unlock_irqrestore(&mapping->tree_lock, flags);
39 
40 		clear_page_dirty_for_io(page);
41 		dec_page_count(sbi, F2FS_DIRTY_NODES);
42 	}
43 	ClearPageUptodate(page);
44 }
45 
46 static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
47 {
48 	pgoff_t index = current_nat_addr(sbi, nid);
49 	return get_meta_page(sbi, index);
50 }
51 
52 static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
53 {
54 	struct page *src_page;
55 	struct page *dst_page;
56 	pgoff_t src_off;
57 	pgoff_t dst_off;
58 	void *src_addr;
59 	void *dst_addr;
60 	struct f2fs_nm_info *nm_i = NM_I(sbi);
61 
62 	src_off = current_nat_addr(sbi, nid);
63 	dst_off = next_nat_addr(sbi, src_off);
64 
65 	/* get current nat block page with lock */
66 	src_page = get_meta_page(sbi, src_off);
67 
68 	/* Dirty src_page means that it is already the new target NAT page. */
69 	if (PageDirty(src_page))
70 		return src_page;
71 
72 	dst_page = grab_meta_page(sbi, dst_off);
73 
74 	src_addr = page_address(src_page);
75 	dst_addr = page_address(dst_page);
76 	memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE);
77 	set_page_dirty(dst_page);
78 	f2fs_put_page(src_page, 1);
79 
80 	set_to_next_nat(nm_i, nid);
81 
82 	return dst_page;
83 }
84 
85 /*
86  * Readahead NAT pages
87  */
88 static void ra_nat_pages(struct f2fs_sb_info *sbi, int nid)
89 {
90 	struct address_space *mapping = sbi->meta_inode->i_mapping;
91 	struct f2fs_nm_info *nm_i = NM_I(sbi);
92 	struct blk_plug plug;
93 	struct page *page;
94 	pgoff_t index;
95 	int i;
96 
97 	blk_start_plug(&plug);
98 
99 	for (i = 0; i < FREE_NID_PAGES; i++, nid += NAT_ENTRY_PER_BLOCK) {
100 		if (nid >= nm_i->max_nid)
101 			nid = 0;
102 		index = current_nat_addr(sbi, nid);
103 
104 		page = grab_cache_page(mapping, index);
105 		if (!page)
106 			continue;
107 		if (PageUptodate(page)) {
108 			f2fs_put_page(page, 1);
109 			continue;
110 		}
111 		if (f2fs_readpage(sbi, page, index, READ))
112 			continue;
113 
114 		f2fs_put_page(page, 0);
115 	}
116 	blk_finish_plug(&plug);
117 }
118 
119 static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n)
120 {
121 	return radix_tree_lookup(&nm_i->nat_root, n);
122 }
123 
124 static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i,
125 		nid_t start, unsigned int nr, struct nat_entry **ep)
126 {
127 	return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr);
128 }
129 
130 static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e)
131 {
132 	list_del(&e->list);
133 	radix_tree_delete(&nm_i->nat_root, nat_get_nid(e));
134 	nm_i->nat_cnt--;
135 	kmem_cache_free(nat_entry_slab, e);
136 }
137 
138 int is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid)
139 {
140 	struct f2fs_nm_info *nm_i = NM_I(sbi);
141 	struct nat_entry *e;
142 	int is_cp = 1;
143 
144 	read_lock(&nm_i->nat_tree_lock);
145 	e = __lookup_nat_cache(nm_i, nid);
146 	if (e && !e->checkpointed)
147 		is_cp = 0;
148 	read_unlock(&nm_i->nat_tree_lock);
149 	return is_cp;
150 }
151 
152 static struct nat_entry *grab_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid)
153 {
154 	struct nat_entry *new;
155 
156 	new = kmem_cache_alloc(nat_entry_slab, GFP_ATOMIC);
157 	if (!new)
158 		return NULL;
159 	if (radix_tree_insert(&nm_i->nat_root, nid, new)) {
160 		kmem_cache_free(nat_entry_slab, new);
161 		return NULL;
162 	}
163 	memset(new, 0, sizeof(struct nat_entry));
164 	nat_set_nid(new, nid);
165 	list_add_tail(&new->list, &nm_i->nat_entries);
166 	nm_i->nat_cnt++;
167 	return new;
168 }
169 
170 static void cache_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid,
171 						struct f2fs_nat_entry *ne)
172 {
173 	struct nat_entry *e;
174 retry:
175 	write_lock(&nm_i->nat_tree_lock);
176 	e = __lookup_nat_cache(nm_i, nid);
177 	if (!e) {
178 		e = grab_nat_entry(nm_i, nid);
179 		if (!e) {
180 			write_unlock(&nm_i->nat_tree_lock);
181 			goto retry;
182 		}
183 		nat_set_blkaddr(e, le32_to_cpu(ne->block_addr));
184 		nat_set_ino(e, le32_to_cpu(ne->ino));
185 		nat_set_version(e, ne->version);
186 		e->checkpointed = true;
187 	}
188 	write_unlock(&nm_i->nat_tree_lock);
189 }
190 
191 static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
192 			block_t new_blkaddr)
193 {
194 	struct f2fs_nm_info *nm_i = NM_I(sbi);
195 	struct nat_entry *e;
196 retry:
197 	write_lock(&nm_i->nat_tree_lock);
198 	e = __lookup_nat_cache(nm_i, ni->nid);
199 	if (!e) {
200 		e = grab_nat_entry(nm_i, ni->nid);
201 		if (!e) {
202 			write_unlock(&nm_i->nat_tree_lock);
203 			goto retry;
204 		}
205 		e->ni = *ni;
206 		e->checkpointed = true;
207 		BUG_ON(ni->blk_addr == NEW_ADDR);
208 	} else if (new_blkaddr == NEW_ADDR) {
209 		/*
210 		 * when nid is reallocated,
211 		 * previous nat entry can be remained in nat cache.
212 		 * So, reinitialize it with new information.
213 		 */
214 		e->ni = *ni;
215 		BUG_ON(ni->blk_addr != NULL_ADDR);
216 	}
217 
218 	if (new_blkaddr == NEW_ADDR)
219 		e->checkpointed = false;
220 
221 	/* sanity check */
222 	BUG_ON(nat_get_blkaddr(e) != ni->blk_addr);
223 	BUG_ON(nat_get_blkaddr(e) == NULL_ADDR &&
224 			new_blkaddr == NULL_ADDR);
225 	BUG_ON(nat_get_blkaddr(e) == NEW_ADDR &&
226 			new_blkaddr == NEW_ADDR);
227 	BUG_ON(nat_get_blkaddr(e) != NEW_ADDR &&
228 			nat_get_blkaddr(e) != NULL_ADDR &&
229 			new_blkaddr == NEW_ADDR);
230 
231 	/* increament version no as node is removed */
232 	if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) {
233 		unsigned char version = nat_get_version(e);
234 		nat_set_version(e, inc_node_version(version));
235 	}
236 
237 	/* change address */
238 	nat_set_blkaddr(e, new_blkaddr);
239 	__set_nat_cache_dirty(nm_i, e);
240 	write_unlock(&nm_i->nat_tree_lock);
241 }
242 
243 static int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
244 {
245 	struct f2fs_nm_info *nm_i = NM_I(sbi);
246 
247 	if (nm_i->nat_cnt < 2 * NM_WOUT_THRESHOLD)
248 		return 0;
249 
250 	write_lock(&nm_i->nat_tree_lock);
251 	while (nr_shrink && !list_empty(&nm_i->nat_entries)) {
252 		struct nat_entry *ne;
253 		ne = list_first_entry(&nm_i->nat_entries,
254 					struct nat_entry, list);
255 		__del_from_nat_cache(nm_i, ne);
256 		nr_shrink--;
257 	}
258 	write_unlock(&nm_i->nat_tree_lock);
259 	return nr_shrink;
260 }
261 
262 /*
263  * This function returns always success
264  */
265 void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
266 {
267 	struct f2fs_nm_info *nm_i = NM_I(sbi);
268 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
269 	struct f2fs_summary_block *sum = curseg->sum_blk;
270 	nid_t start_nid = START_NID(nid);
271 	struct f2fs_nat_block *nat_blk;
272 	struct page *page = NULL;
273 	struct f2fs_nat_entry ne;
274 	struct nat_entry *e;
275 	int i;
276 
277 	memset(&ne, 0, sizeof(struct f2fs_nat_entry));
278 	ni->nid = nid;
279 
280 	/* Check nat cache */
281 	read_lock(&nm_i->nat_tree_lock);
282 	e = __lookup_nat_cache(nm_i, nid);
283 	if (e) {
284 		ni->ino = nat_get_ino(e);
285 		ni->blk_addr = nat_get_blkaddr(e);
286 		ni->version = nat_get_version(e);
287 	}
288 	read_unlock(&nm_i->nat_tree_lock);
289 	if (e)
290 		return;
291 
292 	/* Check current segment summary */
293 	mutex_lock(&curseg->curseg_mutex);
294 	i = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 0);
295 	if (i >= 0) {
296 		ne = nat_in_journal(sum, i);
297 		node_info_from_raw_nat(ni, &ne);
298 	}
299 	mutex_unlock(&curseg->curseg_mutex);
300 	if (i >= 0)
301 		goto cache;
302 
303 	/* Fill node_info from nat page */
304 	page = get_current_nat_page(sbi, start_nid);
305 	nat_blk = (struct f2fs_nat_block *)page_address(page);
306 	ne = nat_blk->entries[nid - start_nid];
307 	node_info_from_raw_nat(ni, &ne);
308 	f2fs_put_page(page, 1);
309 cache:
310 	/* cache nat entry */
311 	cache_nat_entry(NM_I(sbi), nid, &ne);
312 }
313 
314 /*
315  * The maximum depth is four.
316  * Offset[0] will have raw inode offset.
317  */
318 static int get_node_path(long block, int offset[4], unsigned int noffset[4])
319 {
320 	const long direct_index = ADDRS_PER_INODE;
321 	const long direct_blks = ADDRS_PER_BLOCK;
322 	const long dptrs_per_blk = NIDS_PER_BLOCK;
323 	const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK;
324 	const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK;
325 	int n = 0;
326 	int level = 0;
327 
328 	noffset[0] = 0;
329 
330 	if (block < direct_index) {
331 		offset[n] = block;
332 		goto got;
333 	}
334 	block -= direct_index;
335 	if (block < direct_blks) {
336 		offset[n++] = NODE_DIR1_BLOCK;
337 		noffset[n] = 1;
338 		offset[n] = block;
339 		level = 1;
340 		goto got;
341 	}
342 	block -= direct_blks;
343 	if (block < direct_blks) {
344 		offset[n++] = NODE_DIR2_BLOCK;
345 		noffset[n] = 2;
346 		offset[n] = block;
347 		level = 1;
348 		goto got;
349 	}
350 	block -= direct_blks;
351 	if (block < indirect_blks) {
352 		offset[n++] = NODE_IND1_BLOCK;
353 		noffset[n] = 3;
354 		offset[n++] = block / direct_blks;
355 		noffset[n] = 4 + offset[n - 1];
356 		offset[n] = block % direct_blks;
357 		level = 2;
358 		goto got;
359 	}
360 	block -= indirect_blks;
361 	if (block < indirect_blks) {
362 		offset[n++] = NODE_IND2_BLOCK;
363 		noffset[n] = 4 + dptrs_per_blk;
364 		offset[n++] = block / direct_blks;
365 		noffset[n] = 5 + dptrs_per_blk + offset[n - 1];
366 		offset[n] = block % direct_blks;
367 		level = 2;
368 		goto got;
369 	}
370 	block -= indirect_blks;
371 	if (block < dindirect_blks) {
372 		offset[n++] = NODE_DIND_BLOCK;
373 		noffset[n] = 5 + (dptrs_per_blk * 2);
374 		offset[n++] = block / indirect_blks;
375 		noffset[n] = 6 + (dptrs_per_blk * 2) +
376 			      offset[n - 1] * (dptrs_per_blk + 1);
377 		offset[n++] = (block / direct_blks) % dptrs_per_blk;
378 		noffset[n] = 7 + (dptrs_per_blk * 2) +
379 			      offset[n - 2] * (dptrs_per_blk + 1) +
380 			      offset[n - 1];
381 		offset[n] = block % direct_blks;
382 		level = 3;
383 		goto got;
384 	} else {
385 		BUG();
386 	}
387 got:
388 	return level;
389 }
390 
391 /*
392  * Caller should call f2fs_put_dnode(dn).
393  * Also, it should grab and release a mutex by calling mutex_lock_op() and
394  * mutex_unlock_op() only if ro is not set RDONLY_NODE.
395  * In the case of RDONLY_NODE, we don't need to care about mutex.
396  */
397 int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
398 {
399 	struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
400 	struct page *npage[4];
401 	struct page *parent;
402 	int offset[4];
403 	unsigned int noffset[4];
404 	nid_t nids[4];
405 	int level, i;
406 	int err = 0;
407 
408 	level = get_node_path(index, offset, noffset);
409 
410 	nids[0] = dn->inode->i_ino;
411 	npage[0] = get_node_page(sbi, nids[0]);
412 	if (IS_ERR(npage[0]))
413 		return PTR_ERR(npage[0]);
414 
415 	parent = npage[0];
416 	if (level != 0)
417 		nids[1] = get_nid(parent, offset[0], true);
418 	dn->inode_page = npage[0];
419 	dn->inode_page_locked = true;
420 
421 	/* get indirect or direct nodes */
422 	for (i = 1; i <= level; i++) {
423 		bool done = false;
424 
425 		if (!nids[i] && mode == ALLOC_NODE) {
426 			/* alloc new node */
427 			if (!alloc_nid(sbi, &(nids[i]))) {
428 				err = -ENOSPC;
429 				goto release_pages;
430 			}
431 
432 			dn->nid = nids[i];
433 			npage[i] = new_node_page(dn, noffset[i]);
434 			if (IS_ERR(npage[i])) {
435 				alloc_nid_failed(sbi, nids[i]);
436 				err = PTR_ERR(npage[i]);
437 				goto release_pages;
438 			}
439 
440 			set_nid(parent, offset[i - 1], nids[i], i == 1);
441 			alloc_nid_done(sbi, nids[i]);
442 			done = true;
443 		} else if (mode == LOOKUP_NODE_RA && i == level && level > 1) {
444 			npage[i] = get_node_page_ra(parent, offset[i - 1]);
445 			if (IS_ERR(npage[i])) {
446 				err = PTR_ERR(npage[i]);
447 				goto release_pages;
448 			}
449 			done = true;
450 		}
451 		if (i == 1) {
452 			dn->inode_page_locked = false;
453 			unlock_page(parent);
454 		} else {
455 			f2fs_put_page(parent, 1);
456 		}
457 
458 		if (!done) {
459 			npage[i] = get_node_page(sbi, nids[i]);
460 			if (IS_ERR(npage[i])) {
461 				err = PTR_ERR(npage[i]);
462 				f2fs_put_page(npage[0], 0);
463 				goto release_out;
464 			}
465 		}
466 		if (i < level) {
467 			parent = npage[i];
468 			nids[i + 1] = get_nid(parent, offset[i], false);
469 		}
470 	}
471 	dn->nid = nids[level];
472 	dn->ofs_in_node = offset[level];
473 	dn->node_page = npage[level];
474 	dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node);
475 	return 0;
476 
477 release_pages:
478 	f2fs_put_page(parent, 1);
479 	if (i > 1)
480 		f2fs_put_page(npage[0], 0);
481 release_out:
482 	dn->inode_page = NULL;
483 	dn->node_page = NULL;
484 	return err;
485 }
486 
487 static void truncate_node(struct dnode_of_data *dn)
488 {
489 	struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
490 	struct node_info ni;
491 
492 	get_node_info(sbi, dn->nid, &ni);
493 	if (dn->inode->i_blocks == 0) {
494 		BUG_ON(ni.blk_addr != NULL_ADDR);
495 		goto invalidate;
496 	}
497 	BUG_ON(ni.blk_addr == NULL_ADDR);
498 
499 	/* Deallocate node address */
500 	invalidate_blocks(sbi, ni.blk_addr);
501 	dec_valid_node_count(sbi, dn->inode, 1);
502 	set_node_addr(sbi, &ni, NULL_ADDR);
503 
504 	if (dn->nid == dn->inode->i_ino) {
505 		remove_orphan_inode(sbi, dn->nid);
506 		dec_valid_inode_count(sbi);
507 	} else {
508 		sync_inode_page(dn);
509 	}
510 invalidate:
511 	clear_node_page_dirty(dn->node_page);
512 	F2FS_SET_SB_DIRT(sbi);
513 
514 	f2fs_put_page(dn->node_page, 1);
515 	dn->node_page = NULL;
516 	trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr);
517 }
518 
519 static int truncate_dnode(struct dnode_of_data *dn)
520 {
521 	struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
522 	struct page *page;
523 
524 	if (dn->nid == 0)
525 		return 1;
526 
527 	/* get direct node */
528 	page = get_node_page(sbi, dn->nid);
529 	if (IS_ERR(page) && PTR_ERR(page) == -ENOENT)
530 		return 1;
531 	else if (IS_ERR(page))
532 		return PTR_ERR(page);
533 
534 	/* Make dnode_of_data for parameter */
535 	dn->node_page = page;
536 	dn->ofs_in_node = 0;
537 	truncate_data_blocks(dn);
538 	truncate_node(dn);
539 	return 1;
540 }
541 
542 static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
543 						int ofs, int depth)
544 {
545 	struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
546 	struct dnode_of_data rdn = *dn;
547 	struct page *page;
548 	struct f2fs_node *rn;
549 	nid_t child_nid;
550 	unsigned int child_nofs;
551 	int freed = 0;
552 	int i, ret;
553 
554 	if (dn->nid == 0)
555 		return NIDS_PER_BLOCK + 1;
556 
557 	trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr);
558 
559 	page = get_node_page(sbi, dn->nid);
560 	if (IS_ERR(page)) {
561 		trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(page));
562 		return PTR_ERR(page);
563 	}
564 
565 	rn = (struct f2fs_node *)page_address(page);
566 	if (depth < 3) {
567 		for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) {
568 			child_nid = le32_to_cpu(rn->in.nid[i]);
569 			if (child_nid == 0)
570 				continue;
571 			rdn.nid = child_nid;
572 			ret = truncate_dnode(&rdn);
573 			if (ret < 0)
574 				goto out_err;
575 			set_nid(page, i, 0, false);
576 		}
577 	} else {
578 		child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1;
579 		for (i = ofs; i < NIDS_PER_BLOCK; i++) {
580 			child_nid = le32_to_cpu(rn->in.nid[i]);
581 			if (child_nid == 0) {
582 				child_nofs += NIDS_PER_BLOCK + 1;
583 				continue;
584 			}
585 			rdn.nid = child_nid;
586 			ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1);
587 			if (ret == (NIDS_PER_BLOCK + 1)) {
588 				set_nid(page, i, 0, false);
589 				child_nofs += ret;
590 			} else if (ret < 0 && ret != -ENOENT) {
591 				goto out_err;
592 			}
593 		}
594 		freed = child_nofs;
595 	}
596 
597 	if (!ofs) {
598 		/* remove current indirect node */
599 		dn->node_page = page;
600 		truncate_node(dn);
601 		freed++;
602 	} else {
603 		f2fs_put_page(page, 1);
604 	}
605 	trace_f2fs_truncate_nodes_exit(dn->inode, freed);
606 	return freed;
607 
608 out_err:
609 	f2fs_put_page(page, 1);
610 	trace_f2fs_truncate_nodes_exit(dn->inode, ret);
611 	return ret;
612 }
613 
614 static int truncate_partial_nodes(struct dnode_of_data *dn,
615 			struct f2fs_inode *ri, int *offset, int depth)
616 {
617 	struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
618 	struct page *pages[2];
619 	nid_t nid[3];
620 	nid_t child_nid;
621 	int err = 0;
622 	int i;
623 	int idx = depth - 2;
624 
625 	nid[0] = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
626 	if (!nid[0])
627 		return 0;
628 
629 	/* get indirect nodes in the path */
630 	for (i = 0; i < depth - 1; i++) {
631 		/* refernece count'll be increased */
632 		pages[i] = get_node_page(sbi, nid[i]);
633 		if (IS_ERR(pages[i])) {
634 			depth = i + 1;
635 			err = PTR_ERR(pages[i]);
636 			goto fail;
637 		}
638 		nid[i + 1] = get_nid(pages[i], offset[i + 1], false);
639 	}
640 
641 	/* free direct nodes linked to a partial indirect node */
642 	for (i = offset[depth - 1]; i < NIDS_PER_BLOCK; i++) {
643 		child_nid = get_nid(pages[idx], i, false);
644 		if (!child_nid)
645 			continue;
646 		dn->nid = child_nid;
647 		err = truncate_dnode(dn);
648 		if (err < 0)
649 			goto fail;
650 		set_nid(pages[idx], i, 0, false);
651 	}
652 
653 	if (offset[depth - 1] == 0) {
654 		dn->node_page = pages[idx];
655 		dn->nid = nid[idx];
656 		truncate_node(dn);
657 	} else {
658 		f2fs_put_page(pages[idx], 1);
659 	}
660 	offset[idx]++;
661 	offset[depth - 1] = 0;
662 fail:
663 	for (i = depth - 3; i >= 0; i--)
664 		f2fs_put_page(pages[i], 1);
665 
666 	trace_f2fs_truncate_partial_nodes(dn->inode, nid, depth, err);
667 
668 	return err;
669 }
670 
671 /*
672  * All the block addresses of data and nodes should be nullified.
673  */
674 int truncate_inode_blocks(struct inode *inode, pgoff_t from)
675 {
676 	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
677 	int err = 0, cont = 1;
678 	int level, offset[4], noffset[4];
679 	unsigned int nofs = 0;
680 	struct f2fs_node *rn;
681 	struct dnode_of_data dn;
682 	struct page *page;
683 
684 	trace_f2fs_truncate_inode_blocks_enter(inode, from);
685 
686 	level = get_node_path(from, offset, noffset);
687 
688 	page = get_node_page(sbi, inode->i_ino);
689 	if (IS_ERR(page)) {
690 		trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page));
691 		return PTR_ERR(page);
692 	}
693 
694 	set_new_dnode(&dn, inode, page, NULL, 0);
695 	unlock_page(page);
696 
697 	rn = page_address(page);
698 	switch (level) {
699 	case 0:
700 	case 1:
701 		nofs = noffset[1];
702 		break;
703 	case 2:
704 		nofs = noffset[1];
705 		if (!offset[level - 1])
706 			goto skip_partial;
707 		err = truncate_partial_nodes(&dn, &rn->i, offset, level);
708 		if (err < 0 && err != -ENOENT)
709 			goto fail;
710 		nofs += 1 + NIDS_PER_BLOCK;
711 		break;
712 	case 3:
713 		nofs = 5 + 2 * NIDS_PER_BLOCK;
714 		if (!offset[level - 1])
715 			goto skip_partial;
716 		err = truncate_partial_nodes(&dn, &rn->i, offset, level);
717 		if (err < 0 && err != -ENOENT)
718 			goto fail;
719 		break;
720 	default:
721 		BUG();
722 	}
723 
724 skip_partial:
725 	while (cont) {
726 		dn.nid = le32_to_cpu(rn->i.i_nid[offset[0] - NODE_DIR1_BLOCK]);
727 		switch (offset[0]) {
728 		case NODE_DIR1_BLOCK:
729 		case NODE_DIR2_BLOCK:
730 			err = truncate_dnode(&dn);
731 			break;
732 
733 		case NODE_IND1_BLOCK:
734 		case NODE_IND2_BLOCK:
735 			err = truncate_nodes(&dn, nofs, offset[1], 2);
736 			break;
737 
738 		case NODE_DIND_BLOCK:
739 			err = truncate_nodes(&dn, nofs, offset[1], 3);
740 			cont = 0;
741 			break;
742 
743 		default:
744 			BUG();
745 		}
746 		if (err < 0 && err != -ENOENT)
747 			goto fail;
748 		if (offset[1] == 0 &&
749 				rn->i.i_nid[offset[0] - NODE_DIR1_BLOCK]) {
750 			lock_page(page);
751 			wait_on_page_writeback(page);
752 			rn->i.i_nid[offset[0] - NODE_DIR1_BLOCK] = 0;
753 			set_page_dirty(page);
754 			unlock_page(page);
755 		}
756 		offset[1] = 0;
757 		offset[0]++;
758 		nofs += err;
759 	}
760 fail:
761 	f2fs_put_page(page, 0);
762 	trace_f2fs_truncate_inode_blocks_exit(inode, err);
763 	return err > 0 ? 0 : err;
764 }
765 
766 /*
767  * Caller should grab and release a mutex by calling mutex_lock_op() and
768  * mutex_unlock_op().
769  */
770 int remove_inode_page(struct inode *inode)
771 {
772 	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
773 	struct page *page;
774 	nid_t ino = inode->i_ino;
775 	struct dnode_of_data dn;
776 
777 	page = get_node_page(sbi, ino);
778 	if (IS_ERR(page))
779 		return PTR_ERR(page);
780 
781 	if (F2FS_I(inode)->i_xattr_nid) {
782 		nid_t nid = F2FS_I(inode)->i_xattr_nid;
783 		struct page *npage = get_node_page(sbi, nid);
784 
785 		if (IS_ERR(npage))
786 			return PTR_ERR(npage);
787 
788 		F2FS_I(inode)->i_xattr_nid = 0;
789 		set_new_dnode(&dn, inode, page, npage, nid);
790 		dn.inode_page_locked = 1;
791 		truncate_node(&dn);
792 	}
793 
794 	/* 0 is possible, after f2fs_new_inode() is failed */
795 	BUG_ON(inode->i_blocks != 0 && inode->i_blocks != 1);
796 	set_new_dnode(&dn, inode, page, page, ino);
797 	truncate_node(&dn);
798 	return 0;
799 }
800 
801 int new_inode_page(struct inode *inode, const struct qstr *name)
802 {
803 	struct page *page;
804 	struct dnode_of_data dn;
805 
806 	/* allocate inode page for new inode */
807 	set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
808 	page = new_node_page(&dn, 0);
809 	init_dent_inode(name, page);
810 	if (IS_ERR(page))
811 		return PTR_ERR(page);
812 	f2fs_put_page(page, 1);
813 	return 0;
814 }
815 
816 struct page *new_node_page(struct dnode_of_data *dn, unsigned int ofs)
817 {
818 	struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
819 	struct address_space *mapping = sbi->node_inode->i_mapping;
820 	struct node_info old_ni, new_ni;
821 	struct page *page;
822 	int err;
823 
824 	if (is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))
825 		return ERR_PTR(-EPERM);
826 
827 	page = grab_cache_page(mapping, dn->nid);
828 	if (!page)
829 		return ERR_PTR(-ENOMEM);
830 
831 	get_node_info(sbi, dn->nid, &old_ni);
832 
833 	SetPageUptodate(page);
834 	fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true);
835 
836 	/* Reinitialize old_ni with new node page */
837 	BUG_ON(old_ni.blk_addr != NULL_ADDR);
838 	new_ni = old_ni;
839 	new_ni.ino = dn->inode->i_ino;
840 
841 	if (!inc_valid_node_count(sbi, dn->inode, 1)) {
842 		err = -ENOSPC;
843 		goto fail;
844 	}
845 	set_node_addr(sbi, &new_ni, NEW_ADDR);
846 	set_cold_node(dn->inode, page);
847 
848 	dn->node_page = page;
849 	sync_inode_page(dn);
850 	set_page_dirty(page);
851 	if (ofs == 0)
852 		inc_valid_inode_count(sbi);
853 
854 	return page;
855 
856 fail:
857 	clear_node_page_dirty(page);
858 	f2fs_put_page(page, 1);
859 	return ERR_PTR(err);
860 }
861 
862 /*
863  * Caller should do after getting the following values.
864  * 0: f2fs_put_page(page, 0)
865  * LOCKED_PAGE: f2fs_put_page(page, 1)
866  * error: nothing
867  */
868 static int read_node_page(struct page *page, int type)
869 {
870 	struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
871 	struct node_info ni;
872 
873 	get_node_info(sbi, page->index, &ni);
874 
875 	if (ni.blk_addr == NULL_ADDR) {
876 		f2fs_put_page(page, 1);
877 		return -ENOENT;
878 	}
879 
880 	if (PageUptodate(page))
881 		return LOCKED_PAGE;
882 
883 	return f2fs_readpage(sbi, page, ni.blk_addr, type);
884 }
885 
886 /*
887  * Readahead a node page
888  */
889 void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
890 {
891 	struct address_space *mapping = sbi->node_inode->i_mapping;
892 	struct page *apage;
893 	int err;
894 
895 	apage = find_get_page(mapping, nid);
896 	if (apage && PageUptodate(apage)) {
897 		f2fs_put_page(apage, 0);
898 		return;
899 	}
900 	f2fs_put_page(apage, 0);
901 
902 	apage = grab_cache_page(mapping, nid);
903 	if (!apage)
904 		return;
905 
906 	err = read_node_page(apage, READA);
907 	if (err == 0)
908 		f2fs_put_page(apage, 0);
909 	else if (err == LOCKED_PAGE)
910 		f2fs_put_page(apage, 1);
911 	return;
912 }
913 
914 struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
915 {
916 	struct address_space *mapping = sbi->node_inode->i_mapping;
917 	struct page *page;
918 	int err;
919 
920 	page = grab_cache_page(mapping, nid);
921 	if (!page)
922 		return ERR_PTR(-ENOMEM);
923 
924 	err = read_node_page(page, READ_SYNC);
925 	if (err < 0)
926 		return ERR_PTR(err);
927 	else if (err == LOCKED_PAGE)
928 		goto got_it;
929 
930 	lock_page(page);
931 	if (!PageUptodate(page)) {
932 		f2fs_put_page(page, 1);
933 		return ERR_PTR(-EIO);
934 	}
935 got_it:
936 	BUG_ON(nid != nid_of_node(page));
937 	mark_page_accessed(page);
938 	return page;
939 }
940 
941 /*
942  * Return a locked page for the desired node page.
943  * And, readahead MAX_RA_NODE number of node pages.
944  */
945 struct page *get_node_page_ra(struct page *parent, int start)
946 {
947 	struct f2fs_sb_info *sbi = F2FS_SB(parent->mapping->host->i_sb);
948 	struct address_space *mapping = sbi->node_inode->i_mapping;
949 	struct blk_plug plug;
950 	struct page *page;
951 	int err, i, end;
952 	nid_t nid;
953 
954 	/* First, try getting the desired direct node. */
955 	nid = get_nid(parent, start, false);
956 	if (!nid)
957 		return ERR_PTR(-ENOENT);
958 
959 	page = grab_cache_page(mapping, nid);
960 	if (!page)
961 		return ERR_PTR(-ENOMEM);
962 
963 	err = read_node_page(page, READ_SYNC);
964 	if (err < 0)
965 		return ERR_PTR(err);
966 	else if (err == LOCKED_PAGE)
967 		goto page_hit;
968 
969 	blk_start_plug(&plug);
970 
971 	/* Then, try readahead for siblings of the desired node */
972 	end = start + MAX_RA_NODE;
973 	end = min(end, NIDS_PER_BLOCK);
974 	for (i = start + 1; i < end; i++) {
975 		nid = get_nid(parent, i, false);
976 		if (!nid)
977 			continue;
978 		ra_node_page(sbi, nid);
979 	}
980 
981 	blk_finish_plug(&plug);
982 
983 	lock_page(page);
984 
985 page_hit:
986 	if (!PageUptodate(page)) {
987 		f2fs_put_page(page, 1);
988 		return ERR_PTR(-EIO);
989 	}
990 	mark_page_accessed(page);
991 	return page;
992 }
993 
994 void sync_inode_page(struct dnode_of_data *dn)
995 {
996 	if (IS_INODE(dn->node_page) || dn->inode_page == dn->node_page) {
997 		update_inode(dn->inode, dn->node_page);
998 	} else if (dn->inode_page) {
999 		if (!dn->inode_page_locked)
1000 			lock_page(dn->inode_page);
1001 		update_inode(dn->inode, dn->inode_page);
1002 		if (!dn->inode_page_locked)
1003 			unlock_page(dn->inode_page);
1004 	} else {
1005 		update_inode_page(dn->inode);
1006 	}
1007 }
1008 
1009 int sync_node_pages(struct f2fs_sb_info *sbi, nid_t ino,
1010 					struct writeback_control *wbc)
1011 {
1012 	struct address_space *mapping = sbi->node_inode->i_mapping;
1013 	pgoff_t index, end;
1014 	struct pagevec pvec;
1015 	int step = ino ? 2 : 0;
1016 	int nwritten = 0, wrote = 0;
1017 
1018 	pagevec_init(&pvec, 0);
1019 
1020 next_step:
1021 	index = 0;
1022 	end = LONG_MAX;
1023 
1024 	while (index <= end) {
1025 		int i, nr_pages;
1026 		nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
1027 				PAGECACHE_TAG_DIRTY,
1028 				min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
1029 		if (nr_pages == 0)
1030 			break;
1031 
1032 		for (i = 0; i < nr_pages; i++) {
1033 			struct page *page = pvec.pages[i];
1034 
1035 			/*
1036 			 * flushing sequence with step:
1037 			 * 0. indirect nodes
1038 			 * 1. dentry dnodes
1039 			 * 2. file dnodes
1040 			 */
1041 			if (step == 0 && IS_DNODE(page))
1042 				continue;
1043 			if (step == 1 && (!IS_DNODE(page) ||
1044 						is_cold_node(page)))
1045 				continue;
1046 			if (step == 2 && (!IS_DNODE(page) ||
1047 						!is_cold_node(page)))
1048 				continue;
1049 
1050 			/*
1051 			 * If an fsync mode,
1052 			 * we should not skip writing node pages.
1053 			 */
1054 			if (ino && ino_of_node(page) == ino)
1055 				lock_page(page);
1056 			else if (!trylock_page(page))
1057 				continue;
1058 
1059 			if (unlikely(page->mapping != mapping)) {
1060 continue_unlock:
1061 				unlock_page(page);
1062 				continue;
1063 			}
1064 			if (ino && ino_of_node(page) != ino)
1065 				goto continue_unlock;
1066 
1067 			if (!PageDirty(page)) {
1068 				/* someone wrote it for us */
1069 				goto continue_unlock;
1070 			}
1071 
1072 			if (!clear_page_dirty_for_io(page))
1073 				goto continue_unlock;
1074 
1075 			/* called by fsync() */
1076 			if (ino && IS_DNODE(page)) {
1077 				int mark = !is_checkpointed_node(sbi, ino);
1078 				set_fsync_mark(page, 1);
1079 				if (IS_INODE(page))
1080 					set_dentry_mark(page, mark);
1081 				nwritten++;
1082 			} else {
1083 				set_fsync_mark(page, 0);
1084 				set_dentry_mark(page, 0);
1085 			}
1086 			mapping->a_ops->writepage(page, wbc);
1087 			wrote++;
1088 
1089 			if (--wbc->nr_to_write == 0)
1090 				break;
1091 		}
1092 		pagevec_release(&pvec);
1093 		cond_resched();
1094 
1095 		if (wbc->nr_to_write == 0) {
1096 			step = 2;
1097 			break;
1098 		}
1099 	}
1100 
1101 	if (step < 2) {
1102 		step++;
1103 		goto next_step;
1104 	}
1105 
1106 	if (wrote)
1107 		f2fs_submit_bio(sbi, NODE, wbc->sync_mode == WB_SYNC_ALL);
1108 
1109 	return nwritten;
1110 }
1111 
1112 static int f2fs_write_node_page(struct page *page,
1113 				struct writeback_control *wbc)
1114 {
1115 	struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
1116 	nid_t nid;
1117 	block_t new_addr;
1118 	struct node_info ni;
1119 
1120 	wait_on_page_writeback(page);
1121 
1122 	/* get old block addr of this node page */
1123 	nid = nid_of_node(page);
1124 	BUG_ON(page->index != nid);
1125 
1126 	get_node_info(sbi, nid, &ni);
1127 
1128 	/* This page is already truncated */
1129 	if (ni.blk_addr == NULL_ADDR) {
1130 		dec_page_count(sbi, F2FS_DIRTY_NODES);
1131 		unlock_page(page);
1132 		return 0;
1133 	}
1134 
1135 	if (wbc->for_reclaim) {
1136 		dec_page_count(sbi, F2FS_DIRTY_NODES);
1137 		wbc->pages_skipped++;
1138 		set_page_dirty(page);
1139 		return AOP_WRITEPAGE_ACTIVATE;
1140 	}
1141 
1142 	mutex_lock(&sbi->node_write);
1143 	set_page_writeback(page);
1144 	write_node_page(sbi, page, nid, ni.blk_addr, &new_addr);
1145 	set_node_addr(sbi, &ni, new_addr);
1146 	dec_page_count(sbi, F2FS_DIRTY_NODES);
1147 	mutex_unlock(&sbi->node_write);
1148 	unlock_page(page);
1149 	return 0;
1150 }
1151 
1152 /*
1153  * It is very important to gather dirty pages and write at once, so that we can
1154  * submit a big bio without interfering other data writes.
1155  * Be default, 512 pages (2MB), a segment size, is quite reasonable.
1156  */
1157 #define COLLECT_DIRTY_NODES	512
1158 static int f2fs_write_node_pages(struct address_space *mapping,
1159 			    struct writeback_control *wbc)
1160 {
1161 	struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
1162 	struct block_device *bdev = sbi->sb->s_bdev;
1163 	long nr_to_write = wbc->nr_to_write;
1164 
1165 	/* First check balancing cached NAT entries */
1166 	if (try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK)) {
1167 		f2fs_sync_fs(sbi->sb, true);
1168 		return 0;
1169 	}
1170 
1171 	/* collect a number of dirty node pages and write together */
1172 	if (get_pages(sbi, F2FS_DIRTY_NODES) < COLLECT_DIRTY_NODES)
1173 		return 0;
1174 
1175 	/* if mounting is failed, skip writing node pages */
1176 	wbc->nr_to_write = bio_get_nr_vecs(bdev);
1177 	sync_node_pages(sbi, 0, wbc);
1178 	wbc->nr_to_write = nr_to_write -
1179 		(bio_get_nr_vecs(bdev) - wbc->nr_to_write);
1180 	return 0;
1181 }
1182 
1183 static int f2fs_set_node_page_dirty(struct page *page)
1184 {
1185 	struct address_space *mapping = page->mapping;
1186 	struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
1187 
1188 	SetPageUptodate(page);
1189 	if (!PageDirty(page)) {
1190 		__set_page_dirty_nobuffers(page);
1191 		inc_page_count(sbi, F2FS_DIRTY_NODES);
1192 		SetPagePrivate(page);
1193 		return 1;
1194 	}
1195 	return 0;
1196 }
1197 
1198 static void f2fs_invalidate_node_page(struct page *page, unsigned long offset)
1199 {
1200 	struct inode *inode = page->mapping->host;
1201 	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
1202 	if (PageDirty(page))
1203 		dec_page_count(sbi, F2FS_DIRTY_NODES);
1204 	ClearPagePrivate(page);
1205 }
1206 
1207 static int f2fs_release_node_page(struct page *page, gfp_t wait)
1208 {
1209 	ClearPagePrivate(page);
1210 	return 1;
1211 }
1212 
1213 /*
1214  * Structure of the f2fs node operations
1215  */
1216 const struct address_space_operations f2fs_node_aops = {
1217 	.writepage	= f2fs_write_node_page,
1218 	.writepages	= f2fs_write_node_pages,
1219 	.set_page_dirty	= f2fs_set_node_page_dirty,
1220 	.invalidatepage	= f2fs_invalidate_node_page,
1221 	.releasepage	= f2fs_release_node_page,
1222 };
1223 
1224 static struct free_nid *__lookup_free_nid_list(nid_t n, struct list_head *head)
1225 {
1226 	struct list_head *this;
1227 	struct free_nid *i;
1228 	list_for_each(this, head) {
1229 		i = list_entry(this, struct free_nid, list);
1230 		if (i->nid == n)
1231 			return i;
1232 	}
1233 	return NULL;
1234 }
1235 
1236 static void __del_from_free_nid_list(struct free_nid *i)
1237 {
1238 	list_del(&i->list);
1239 	kmem_cache_free(free_nid_slab, i);
1240 }
1241 
1242 static int add_free_nid(struct f2fs_nm_info *nm_i, nid_t nid)
1243 {
1244 	struct free_nid *i;
1245 
1246 	if (nm_i->fcnt > 2 * MAX_FREE_NIDS)
1247 		return 0;
1248 
1249 	/* 0 nid should not be used */
1250 	if (nid == 0)
1251 		return 0;
1252 retry:
1253 	i = kmem_cache_alloc(free_nid_slab, GFP_NOFS);
1254 	if (!i) {
1255 		cond_resched();
1256 		goto retry;
1257 	}
1258 	i->nid = nid;
1259 	i->state = NID_NEW;
1260 
1261 	spin_lock(&nm_i->free_nid_list_lock);
1262 	if (__lookup_free_nid_list(nid, &nm_i->free_nid_list)) {
1263 		spin_unlock(&nm_i->free_nid_list_lock);
1264 		kmem_cache_free(free_nid_slab, i);
1265 		return 0;
1266 	}
1267 	list_add_tail(&i->list, &nm_i->free_nid_list);
1268 	nm_i->fcnt++;
1269 	spin_unlock(&nm_i->free_nid_list_lock);
1270 	return 1;
1271 }
1272 
1273 static void remove_free_nid(struct f2fs_nm_info *nm_i, nid_t nid)
1274 {
1275 	struct free_nid *i;
1276 	spin_lock(&nm_i->free_nid_list_lock);
1277 	i = __lookup_free_nid_list(nid, &nm_i->free_nid_list);
1278 	if (i && i->state == NID_NEW) {
1279 		__del_from_free_nid_list(i);
1280 		nm_i->fcnt--;
1281 	}
1282 	spin_unlock(&nm_i->free_nid_list_lock);
1283 }
1284 
1285 static int scan_nat_page(struct f2fs_nm_info *nm_i,
1286 			struct page *nat_page, nid_t start_nid)
1287 {
1288 	struct f2fs_nat_block *nat_blk = page_address(nat_page);
1289 	block_t blk_addr;
1290 	int fcnt = 0;
1291 	int i;
1292 
1293 	i = start_nid % NAT_ENTRY_PER_BLOCK;
1294 
1295 	for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) {
1296 		if (start_nid >= nm_i->max_nid)
1297 			break;
1298 		blk_addr  = le32_to_cpu(nat_blk->entries[i].block_addr);
1299 		BUG_ON(blk_addr == NEW_ADDR);
1300 		if (blk_addr == NULL_ADDR)
1301 			fcnt += add_free_nid(nm_i, start_nid);
1302 	}
1303 	return fcnt;
1304 }
1305 
1306 static void build_free_nids(struct f2fs_sb_info *sbi)
1307 {
1308 	struct free_nid *fnid, *next_fnid;
1309 	struct f2fs_nm_info *nm_i = NM_I(sbi);
1310 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1311 	struct f2fs_summary_block *sum = curseg->sum_blk;
1312 	nid_t nid = 0;
1313 	bool is_cycled = false;
1314 	int fcnt = 0;
1315 	int i;
1316 
1317 	nid = nm_i->next_scan_nid;
1318 	nm_i->init_scan_nid = nid;
1319 
1320 	ra_nat_pages(sbi, nid);
1321 
1322 	while (1) {
1323 		struct page *page = get_current_nat_page(sbi, nid);
1324 
1325 		fcnt += scan_nat_page(nm_i, page, nid);
1326 		f2fs_put_page(page, 1);
1327 
1328 		nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK));
1329 
1330 		if (nid >= nm_i->max_nid) {
1331 			nid = 0;
1332 			is_cycled = true;
1333 		}
1334 		if (fcnt > MAX_FREE_NIDS)
1335 			break;
1336 		if (is_cycled && nm_i->init_scan_nid <= nid)
1337 			break;
1338 	}
1339 
1340 	/* go to the next nat page in order to reuse free nids first */
1341 	nm_i->next_scan_nid = nm_i->init_scan_nid + NAT_ENTRY_PER_BLOCK;
1342 
1343 	/* find free nids from current sum_pages */
1344 	mutex_lock(&curseg->curseg_mutex);
1345 	for (i = 0; i < nats_in_cursum(sum); i++) {
1346 		block_t addr = le32_to_cpu(nat_in_journal(sum, i).block_addr);
1347 		nid = le32_to_cpu(nid_in_journal(sum, i));
1348 		if (addr == NULL_ADDR)
1349 			add_free_nid(nm_i, nid);
1350 		else
1351 			remove_free_nid(nm_i, nid);
1352 	}
1353 	mutex_unlock(&curseg->curseg_mutex);
1354 
1355 	/* remove the free nids from current allocated nids */
1356 	list_for_each_entry_safe(fnid, next_fnid, &nm_i->free_nid_list, list) {
1357 		struct nat_entry *ne;
1358 
1359 		read_lock(&nm_i->nat_tree_lock);
1360 		ne = __lookup_nat_cache(nm_i, fnid->nid);
1361 		if (ne && nat_get_blkaddr(ne) != NULL_ADDR)
1362 			remove_free_nid(nm_i, fnid->nid);
1363 		read_unlock(&nm_i->nat_tree_lock);
1364 	}
1365 }
1366 
1367 /*
1368  * If this function returns success, caller can obtain a new nid
1369  * from second parameter of this function.
1370  * The returned nid could be used ino as well as nid when inode is created.
1371  */
1372 bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
1373 {
1374 	struct f2fs_nm_info *nm_i = NM_I(sbi);
1375 	struct free_nid *i = NULL;
1376 	struct list_head *this;
1377 retry:
1378 	mutex_lock(&nm_i->build_lock);
1379 	if (!nm_i->fcnt) {
1380 		/* scan NAT in order to build free nid list */
1381 		build_free_nids(sbi);
1382 		if (!nm_i->fcnt) {
1383 			mutex_unlock(&nm_i->build_lock);
1384 			return false;
1385 		}
1386 	}
1387 	mutex_unlock(&nm_i->build_lock);
1388 
1389 	/*
1390 	 * We check fcnt again since previous check is racy as
1391 	 * we didn't hold free_nid_list_lock. So other thread
1392 	 * could consume all of free nids.
1393 	 */
1394 	spin_lock(&nm_i->free_nid_list_lock);
1395 	if (!nm_i->fcnt) {
1396 		spin_unlock(&nm_i->free_nid_list_lock);
1397 		goto retry;
1398 	}
1399 
1400 	BUG_ON(list_empty(&nm_i->free_nid_list));
1401 	list_for_each(this, &nm_i->free_nid_list) {
1402 		i = list_entry(this, struct free_nid, list);
1403 		if (i->state == NID_NEW)
1404 			break;
1405 	}
1406 
1407 	BUG_ON(i->state != NID_NEW);
1408 	*nid = i->nid;
1409 	i->state = NID_ALLOC;
1410 	nm_i->fcnt--;
1411 	spin_unlock(&nm_i->free_nid_list_lock);
1412 	return true;
1413 }
1414 
1415 /*
1416  * alloc_nid() should be called prior to this function.
1417  */
1418 void alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid)
1419 {
1420 	struct f2fs_nm_info *nm_i = NM_I(sbi);
1421 	struct free_nid *i;
1422 
1423 	spin_lock(&nm_i->free_nid_list_lock);
1424 	i = __lookup_free_nid_list(nid, &nm_i->free_nid_list);
1425 	BUG_ON(!i || i->state != NID_ALLOC);
1426 	__del_from_free_nid_list(i);
1427 	spin_unlock(&nm_i->free_nid_list_lock);
1428 }
1429 
1430 /*
1431  * alloc_nid() should be called prior to this function.
1432  */
1433 void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
1434 {
1435 	struct f2fs_nm_info *nm_i = NM_I(sbi);
1436 	struct free_nid *i;
1437 
1438 	spin_lock(&nm_i->free_nid_list_lock);
1439 	i = __lookup_free_nid_list(nid, &nm_i->free_nid_list);
1440 	BUG_ON(!i || i->state != NID_ALLOC);
1441 	i->state = NID_NEW;
1442 	nm_i->fcnt++;
1443 	spin_unlock(&nm_i->free_nid_list_lock);
1444 }
1445 
1446 void recover_node_page(struct f2fs_sb_info *sbi, struct page *page,
1447 		struct f2fs_summary *sum, struct node_info *ni,
1448 		block_t new_blkaddr)
1449 {
1450 	rewrite_node_page(sbi, page, sum, ni->blk_addr, new_blkaddr);
1451 	set_node_addr(sbi, ni, new_blkaddr);
1452 	clear_node_page_dirty(page);
1453 }
1454 
1455 int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
1456 {
1457 	struct address_space *mapping = sbi->node_inode->i_mapping;
1458 	struct f2fs_node *src, *dst;
1459 	nid_t ino = ino_of_node(page);
1460 	struct node_info old_ni, new_ni;
1461 	struct page *ipage;
1462 
1463 	ipage = grab_cache_page(mapping, ino);
1464 	if (!ipage)
1465 		return -ENOMEM;
1466 
1467 	/* Should not use this inode  from free nid list */
1468 	remove_free_nid(NM_I(sbi), ino);
1469 
1470 	get_node_info(sbi, ino, &old_ni);
1471 	SetPageUptodate(ipage);
1472 	fill_node_footer(ipage, ino, ino, 0, true);
1473 
1474 	src = (struct f2fs_node *)page_address(page);
1475 	dst = (struct f2fs_node *)page_address(ipage);
1476 
1477 	memcpy(dst, src, (unsigned long)&src->i.i_ext - (unsigned long)&src->i);
1478 	dst->i.i_size = 0;
1479 	dst->i.i_blocks = cpu_to_le64(1);
1480 	dst->i.i_links = cpu_to_le32(1);
1481 	dst->i.i_xattr_nid = 0;
1482 
1483 	new_ni = old_ni;
1484 	new_ni.ino = ino;
1485 
1486 	set_node_addr(sbi, &new_ni, NEW_ADDR);
1487 	inc_valid_inode_count(sbi);
1488 
1489 	f2fs_put_page(ipage, 1);
1490 	return 0;
1491 }
1492 
1493 int restore_node_summary(struct f2fs_sb_info *sbi,
1494 			unsigned int segno, struct f2fs_summary_block *sum)
1495 {
1496 	struct f2fs_node *rn;
1497 	struct f2fs_summary *sum_entry;
1498 	struct page *page;
1499 	block_t addr;
1500 	int i, last_offset;
1501 
1502 	/* alloc temporal page for read node */
1503 	page = alloc_page(GFP_NOFS | __GFP_ZERO);
1504 	if (IS_ERR(page))
1505 		return PTR_ERR(page);
1506 	lock_page(page);
1507 
1508 	/* scan the node segment */
1509 	last_offset = sbi->blocks_per_seg;
1510 	addr = START_BLOCK(sbi, segno);
1511 	sum_entry = &sum->entries[0];
1512 
1513 	for (i = 0; i < last_offset; i++, sum_entry++) {
1514 		/*
1515 		 * In order to read next node page,
1516 		 * we must clear PageUptodate flag.
1517 		 */
1518 		ClearPageUptodate(page);
1519 
1520 		if (f2fs_readpage(sbi, page, addr, READ_SYNC))
1521 			goto out;
1522 
1523 		lock_page(page);
1524 		rn = (struct f2fs_node *)page_address(page);
1525 		sum_entry->nid = rn->footer.nid;
1526 		sum_entry->version = 0;
1527 		sum_entry->ofs_in_node = 0;
1528 		addr++;
1529 	}
1530 	unlock_page(page);
1531 out:
1532 	__free_pages(page, 0);
1533 	return 0;
1534 }
1535 
1536 static bool flush_nats_in_journal(struct f2fs_sb_info *sbi)
1537 {
1538 	struct f2fs_nm_info *nm_i = NM_I(sbi);
1539 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1540 	struct f2fs_summary_block *sum = curseg->sum_blk;
1541 	int i;
1542 
1543 	mutex_lock(&curseg->curseg_mutex);
1544 
1545 	if (nats_in_cursum(sum) < NAT_JOURNAL_ENTRIES) {
1546 		mutex_unlock(&curseg->curseg_mutex);
1547 		return false;
1548 	}
1549 
1550 	for (i = 0; i < nats_in_cursum(sum); i++) {
1551 		struct nat_entry *ne;
1552 		struct f2fs_nat_entry raw_ne;
1553 		nid_t nid = le32_to_cpu(nid_in_journal(sum, i));
1554 
1555 		raw_ne = nat_in_journal(sum, i);
1556 retry:
1557 		write_lock(&nm_i->nat_tree_lock);
1558 		ne = __lookup_nat_cache(nm_i, nid);
1559 		if (ne) {
1560 			__set_nat_cache_dirty(nm_i, ne);
1561 			write_unlock(&nm_i->nat_tree_lock);
1562 			continue;
1563 		}
1564 		ne = grab_nat_entry(nm_i, nid);
1565 		if (!ne) {
1566 			write_unlock(&nm_i->nat_tree_lock);
1567 			goto retry;
1568 		}
1569 		nat_set_blkaddr(ne, le32_to_cpu(raw_ne.block_addr));
1570 		nat_set_ino(ne, le32_to_cpu(raw_ne.ino));
1571 		nat_set_version(ne, raw_ne.version);
1572 		__set_nat_cache_dirty(nm_i, ne);
1573 		write_unlock(&nm_i->nat_tree_lock);
1574 	}
1575 	update_nats_in_cursum(sum, -i);
1576 	mutex_unlock(&curseg->curseg_mutex);
1577 	return true;
1578 }
1579 
1580 /*
1581  * This function is called during the checkpointing process.
1582  */
1583 void flush_nat_entries(struct f2fs_sb_info *sbi)
1584 {
1585 	struct f2fs_nm_info *nm_i = NM_I(sbi);
1586 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1587 	struct f2fs_summary_block *sum = curseg->sum_blk;
1588 	struct list_head *cur, *n;
1589 	struct page *page = NULL;
1590 	struct f2fs_nat_block *nat_blk = NULL;
1591 	nid_t start_nid = 0, end_nid = 0;
1592 	bool flushed;
1593 
1594 	flushed = flush_nats_in_journal(sbi);
1595 
1596 	if (!flushed)
1597 		mutex_lock(&curseg->curseg_mutex);
1598 
1599 	/* 1) flush dirty nat caches */
1600 	list_for_each_safe(cur, n, &nm_i->dirty_nat_entries) {
1601 		struct nat_entry *ne;
1602 		nid_t nid;
1603 		struct f2fs_nat_entry raw_ne;
1604 		int offset = -1;
1605 		block_t new_blkaddr;
1606 
1607 		ne = list_entry(cur, struct nat_entry, list);
1608 		nid = nat_get_nid(ne);
1609 
1610 		if (nat_get_blkaddr(ne) == NEW_ADDR)
1611 			continue;
1612 		if (flushed)
1613 			goto to_nat_page;
1614 
1615 		/* if there is room for nat enries in curseg->sumpage */
1616 		offset = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 1);
1617 		if (offset >= 0) {
1618 			raw_ne = nat_in_journal(sum, offset);
1619 			goto flush_now;
1620 		}
1621 to_nat_page:
1622 		if (!page || (start_nid > nid || nid > end_nid)) {
1623 			if (page) {
1624 				f2fs_put_page(page, 1);
1625 				page = NULL;
1626 			}
1627 			start_nid = START_NID(nid);
1628 			end_nid = start_nid + NAT_ENTRY_PER_BLOCK - 1;
1629 
1630 			/*
1631 			 * get nat block with dirty flag, increased reference
1632 			 * count, mapped and lock
1633 			 */
1634 			page = get_next_nat_page(sbi, start_nid);
1635 			nat_blk = page_address(page);
1636 		}
1637 
1638 		BUG_ON(!nat_blk);
1639 		raw_ne = nat_blk->entries[nid - start_nid];
1640 flush_now:
1641 		new_blkaddr = nat_get_blkaddr(ne);
1642 
1643 		raw_ne.ino = cpu_to_le32(nat_get_ino(ne));
1644 		raw_ne.block_addr = cpu_to_le32(new_blkaddr);
1645 		raw_ne.version = nat_get_version(ne);
1646 
1647 		if (offset < 0) {
1648 			nat_blk->entries[nid - start_nid] = raw_ne;
1649 		} else {
1650 			nat_in_journal(sum, offset) = raw_ne;
1651 			nid_in_journal(sum, offset) = cpu_to_le32(nid);
1652 		}
1653 
1654 		if (nat_get_blkaddr(ne) == NULL_ADDR &&
1655 					!add_free_nid(NM_I(sbi), nid)) {
1656 			write_lock(&nm_i->nat_tree_lock);
1657 			__del_from_nat_cache(nm_i, ne);
1658 			write_unlock(&nm_i->nat_tree_lock);
1659 		} else {
1660 			write_lock(&nm_i->nat_tree_lock);
1661 			__clear_nat_cache_dirty(nm_i, ne);
1662 			ne->checkpointed = true;
1663 			write_unlock(&nm_i->nat_tree_lock);
1664 		}
1665 	}
1666 	if (!flushed)
1667 		mutex_unlock(&curseg->curseg_mutex);
1668 	f2fs_put_page(page, 1);
1669 
1670 	/* 2) shrink nat caches if necessary */
1671 	try_to_free_nats(sbi, nm_i->nat_cnt - NM_WOUT_THRESHOLD);
1672 }
1673 
1674 static int init_node_manager(struct f2fs_sb_info *sbi)
1675 {
1676 	struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi);
1677 	struct f2fs_nm_info *nm_i = NM_I(sbi);
1678 	unsigned char *version_bitmap;
1679 	unsigned int nat_segs, nat_blocks;
1680 
1681 	nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr);
1682 
1683 	/* segment_count_nat includes pair segment so divide to 2. */
1684 	nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1;
1685 	nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg);
1686 	nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks;
1687 	nm_i->fcnt = 0;
1688 	nm_i->nat_cnt = 0;
1689 
1690 	INIT_LIST_HEAD(&nm_i->free_nid_list);
1691 	INIT_RADIX_TREE(&nm_i->nat_root, GFP_ATOMIC);
1692 	INIT_LIST_HEAD(&nm_i->nat_entries);
1693 	INIT_LIST_HEAD(&nm_i->dirty_nat_entries);
1694 
1695 	mutex_init(&nm_i->build_lock);
1696 	spin_lock_init(&nm_i->free_nid_list_lock);
1697 	rwlock_init(&nm_i->nat_tree_lock);
1698 
1699 	nm_i->init_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
1700 	nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
1701 	nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
1702 	version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP);
1703 	if (!version_bitmap)
1704 		return -EFAULT;
1705 
1706 	nm_i->nat_bitmap = kmemdup(version_bitmap, nm_i->bitmap_size,
1707 					GFP_KERNEL);
1708 	if (!nm_i->nat_bitmap)
1709 		return -ENOMEM;
1710 	return 0;
1711 }
1712 
1713 int build_node_manager(struct f2fs_sb_info *sbi)
1714 {
1715 	int err;
1716 
1717 	sbi->nm_info = kzalloc(sizeof(struct f2fs_nm_info), GFP_KERNEL);
1718 	if (!sbi->nm_info)
1719 		return -ENOMEM;
1720 
1721 	err = init_node_manager(sbi);
1722 	if (err)
1723 		return err;
1724 
1725 	build_free_nids(sbi);
1726 	return 0;
1727 }
1728 
1729 void destroy_node_manager(struct f2fs_sb_info *sbi)
1730 {
1731 	struct f2fs_nm_info *nm_i = NM_I(sbi);
1732 	struct free_nid *i, *next_i;
1733 	struct nat_entry *natvec[NATVEC_SIZE];
1734 	nid_t nid = 0;
1735 	unsigned int found;
1736 
1737 	if (!nm_i)
1738 		return;
1739 
1740 	/* destroy free nid list */
1741 	spin_lock(&nm_i->free_nid_list_lock);
1742 	list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) {
1743 		BUG_ON(i->state == NID_ALLOC);
1744 		__del_from_free_nid_list(i);
1745 		nm_i->fcnt--;
1746 	}
1747 	BUG_ON(nm_i->fcnt);
1748 	spin_unlock(&nm_i->free_nid_list_lock);
1749 
1750 	/* destroy nat cache */
1751 	write_lock(&nm_i->nat_tree_lock);
1752 	while ((found = __gang_lookup_nat_cache(nm_i,
1753 					nid, NATVEC_SIZE, natvec))) {
1754 		unsigned idx;
1755 		for (idx = 0; idx < found; idx++) {
1756 			struct nat_entry *e = natvec[idx];
1757 			nid = nat_get_nid(e) + 1;
1758 			__del_from_nat_cache(nm_i, e);
1759 		}
1760 	}
1761 	BUG_ON(nm_i->nat_cnt);
1762 	write_unlock(&nm_i->nat_tree_lock);
1763 
1764 	kfree(nm_i->nat_bitmap);
1765 	sbi->nm_info = NULL;
1766 	kfree(nm_i);
1767 }
1768 
1769 int __init create_node_manager_caches(void)
1770 {
1771 	nat_entry_slab = f2fs_kmem_cache_create("nat_entry",
1772 			sizeof(struct nat_entry), NULL);
1773 	if (!nat_entry_slab)
1774 		return -ENOMEM;
1775 
1776 	free_nid_slab = f2fs_kmem_cache_create("free_nid",
1777 			sizeof(struct free_nid), NULL);
1778 	if (!free_nid_slab) {
1779 		kmem_cache_destroy(nat_entry_slab);
1780 		return -ENOMEM;
1781 	}
1782 	return 0;
1783 }
1784 
1785 void destroy_node_manager_caches(void)
1786 {
1787 	kmem_cache_destroy(free_nid_slab);
1788 	kmem_cache_destroy(nat_entry_slab);
1789 }
1790