xref: /openbmc/linux/fs/f2fs/node.c (revision 3aa770a9c9d077283b1aa07e8549a4fdc41fc5ed)
1 /*
2  * fs/f2fs/node.c
3  *
4  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5  *             http://www.samsung.com/
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 #include <linux/fs.h>
12 #include <linux/f2fs_fs.h>
13 #include <linux/mpage.h>
14 #include <linux/backing-dev.h>
15 #include <linux/blkdev.h>
16 #include <linux/pagevec.h>
17 #include <linux/swap.h>
18 
19 #include "f2fs.h"
20 #include "node.h"
21 #include "segment.h"
22 
23 static struct kmem_cache *nat_entry_slab;
24 static struct kmem_cache *free_nid_slab;
25 
26 static void clear_node_page_dirty(struct page *page)
27 {
28 	struct address_space *mapping = page->mapping;
29 	struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
30 	unsigned int long flags;
31 
32 	if (PageDirty(page)) {
33 		spin_lock_irqsave(&mapping->tree_lock, flags);
34 		radix_tree_tag_clear(&mapping->page_tree,
35 				page_index(page),
36 				PAGECACHE_TAG_DIRTY);
37 		spin_unlock_irqrestore(&mapping->tree_lock, flags);
38 
39 		clear_page_dirty_for_io(page);
40 		dec_page_count(sbi, F2FS_DIRTY_NODES);
41 	}
42 	ClearPageUptodate(page);
43 }
44 
45 static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
46 {
47 	pgoff_t index = current_nat_addr(sbi, nid);
48 	return get_meta_page(sbi, index);
49 }
50 
51 static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
52 {
53 	struct page *src_page;
54 	struct page *dst_page;
55 	pgoff_t src_off;
56 	pgoff_t dst_off;
57 	void *src_addr;
58 	void *dst_addr;
59 	struct f2fs_nm_info *nm_i = NM_I(sbi);
60 
61 	src_off = current_nat_addr(sbi, nid);
62 	dst_off = next_nat_addr(sbi, src_off);
63 
64 	/* get current nat block page with lock */
65 	src_page = get_meta_page(sbi, src_off);
66 
67 	/* Dirty src_page means that it is already the new target NAT page. */
68 	if (PageDirty(src_page))
69 		return src_page;
70 
71 	dst_page = grab_meta_page(sbi, dst_off);
72 
73 	src_addr = page_address(src_page);
74 	dst_addr = page_address(dst_page);
75 	memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE);
76 	set_page_dirty(dst_page);
77 	f2fs_put_page(src_page, 1);
78 
79 	set_to_next_nat(nm_i, nid);
80 
81 	return dst_page;
82 }
83 
84 /*
85  * Readahead NAT pages
86  */
87 static void ra_nat_pages(struct f2fs_sb_info *sbi, int nid)
88 {
89 	struct address_space *mapping = sbi->meta_inode->i_mapping;
90 	struct f2fs_nm_info *nm_i = NM_I(sbi);
91 	struct page *page;
92 	pgoff_t index;
93 	int i;
94 
95 	for (i = 0; i < FREE_NID_PAGES; i++, nid += NAT_ENTRY_PER_BLOCK) {
96 		if (nid >= nm_i->max_nid)
97 			nid = 0;
98 		index = current_nat_addr(sbi, nid);
99 
100 		page = grab_cache_page(mapping, index);
101 		if (!page)
102 			continue;
103 		if (f2fs_readpage(sbi, page, index, READ)) {
104 			f2fs_put_page(page, 1);
105 			continue;
106 		}
107 		f2fs_put_page(page, 0);
108 	}
109 }
110 
111 static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n)
112 {
113 	return radix_tree_lookup(&nm_i->nat_root, n);
114 }
115 
116 static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i,
117 		nid_t start, unsigned int nr, struct nat_entry **ep)
118 {
119 	return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr);
120 }
121 
122 static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e)
123 {
124 	list_del(&e->list);
125 	radix_tree_delete(&nm_i->nat_root, nat_get_nid(e));
126 	nm_i->nat_cnt--;
127 	kmem_cache_free(nat_entry_slab, e);
128 }
129 
130 int is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid)
131 {
132 	struct f2fs_nm_info *nm_i = NM_I(sbi);
133 	struct nat_entry *e;
134 	int is_cp = 1;
135 
136 	read_lock(&nm_i->nat_tree_lock);
137 	e = __lookup_nat_cache(nm_i, nid);
138 	if (e && !e->checkpointed)
139 		is_cp = 0;
140 	read_unlock(&nm_i->nat_tree_lock);
141 	return is_cp;
142 }
143 
144 static struct nat_entry *grab_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid)
145 {
146 	struct nat_entry *new;
147 
148 	new = kmem_cache_alloc(nat_entry_slab, GFP_ATOMIC);
149 	if (!new)
150 		return NULL;
151 	if (radix_tree_insert(&nm_i->nat_root, nid, new)) {
152 		kmem_cache_free(nat_entry_slab, new);
153 		return NULL;
154 	}
155 	memset(new, 0, sizeof(struct nat_entry));
156 	nat_set_nid(new, nid);
157 	list_add_tail(&new->list, &nm_i->nat_entries);
158 	nm_i->nat_cnt++;
159 	return new;
160 }
161 
162 static void cache_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid,
163 						struct f2fs_nat_entry *ne)
164 {
165 	struct nat_entry *e;
166 retry:
167 	write_lock(&nm_i->nat_tree_lock);
168 	e = __lookup_nat_cache(nm_i, nid);
169 	if (!e) {
170 		e = grab_nat_entry(nm_i, nid);
171 		if (!e) {
172 			write_unlock(&nm_i->nat_tree_lock);
173 			goto retry;
174 		}
175 		nat_set_blkaddr(e, le32_to_cpu(ne->block_addr));
176 		nat_set_ino(e, le32_to_cpu(ne->ino));
177 		nat_set_version(e, ne->version);
178 		e->checkpointed = true;
179 	}
180 	write_unlock(&nm_i->nat_tree_lock);
181 }
182 
183 static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
184 			block_t new_blkaddr)
185 {
186 	struct f2fs_nm_info *nm_i = NM_I(sbi);
187 	struct nat_entry *e;
188 retry:
189 	write_lock(&nm_i->nat_tree_lock);
190 	e = __lookup_nat_cache(nm_i, ni->nid);
191 	if (!e) {
192 		e = grab_nat_entry(nm_i, ni->nid);
193 		if (!e) {
194 			write_unlock(&nm_i->nat_tree_lock);
195 			goto retry;
196 		}
197 		e->ni = *ni;
198 		e->checkpointed = true;
199 		BUG_ON(ni->blk_addr == NEW_ADDR);
200 	} else if (new_blkaddr == NEW_ADDR) {
201 		/*
202 		 * when nid is reallocated,
203 		 * previous nat entry can be remained in nat cache.
204 		 * So, reinitialize it with new information.
205 		 */
206 		e->ni = *ni;
207 		BUG_ON(ni->blk_addr != NULL_ADDR);
208 	}
209 
210 	if (new_blkaddr == NEW_ADDR)
211 		e->checkpointed = false;
212 
213 	/* sanity check */
214 	BUG_ON(nat_get_blkaddr(e) != ni->blk_addr);
215 	BUG_ON(nat_get_blkaddr(e) == NULL_ADDR &&
216 			new_blkaddr == NULL_ADDR);
217 	BUG_ON(nat_get_blkaddr(e) == NEW_ADDR &&
218 			new_blkaddr == NEW_ADDR);
219 	BUG_ON(nat_get_blkaddr(e) != NEW_ADDR &&
220 			nat_get_blkaddr(e) != NULL_ADDR &&
221 			new_blkaddr == NEW_ADDR);
222 
223 	/* increament version no as node is removed */
224 	if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) {
225 		unsigned char version = nat_get_version(e);
226 		nat_set_version(e, inc_node_version(version));
227 	}
228 
229 	/* change address */
230 	nat_set_blkaddr(e, new_blkaddr);
231 	__set_nat_cache_dirty(nm_i, e);
232 	write_unlock(&nm_i->nat_tree_lock);
233 }
234 
235 static int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
236 {
237 	struct f2fs_nm_info *nm_i = NM_I(sbi);
238 
239 	if (nm_i->nat_cnt < 2 * NM_WOUT_THRESHOLD)
240 		return 0;
241 
242 	write_lock(&nm_i->nat_tree_lock);
243 	while (nr_shrink && !list_empty(&nm_i->nat_entries)) {
244 		struct nat_entry *ne;
245 		ne = list_first_entry(&nm_i->nat_entries,
246 					struct nat_entry, list);
247 		__del_from_nat_cache(nm_i, ne);
248 		nr_shrink--;
249 	}
250 	write_unlock(&nm_i->nat_tree_lock);
251 	return nr_shrink;
252 }
253 
254 /*
255  * This function returns always success
256  */
257 void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
258 {
259 	struct f2fs_nm_info *nm_i = NM_I(sbi);
260 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
261 	struct f2fs_summary_block *sum = curseg->sum_blk;
262 	nid_t start_nid = START_NID(nid);
263 	struct f2fs_nat_block *nat_blk;
264 	struct page *page = NULL;
265 	struct f2fs_nat_entry ne;
266 	struct nat_entry *e;
267 	int i;
268 
269 	memset(&ne, 0, sizeof(struct f2fs_nat_entry));
270 	ni->nid = nid;
271 
272 	/* Check nat cache */
273 	read_lock(&nm_i->nat_tree_lock);
274 	e = __lookup_nat_cache(nm_i, nid);
275 	if (e) {
276 		ni->ino = nat_get_ino(e);
277 		ni->blk_addr = nat_get_blkaddr(e);
278 		ni->version = nat_get_version(e);
279 	}
280 	read_unlock(&nm_i->nat_tree_lock);
281 	if (e)
282 		return;
283 
284 	/* Check current segment summary */
285 	mutex_lock(&curseg->curseg_mutex);
286 	i = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 0);
287 	if (i >= 0) {
288 		ne = nat_in_journal(sum, i);
289 		node_info_from_raw_nat(ni, &ne);
290 	}
291 	mutex_unlock(&curseg->curseg_mutex);
292 	if (i >= 0)
293 		goto cache;
294 
295 	/* Fill node_info from nat page */
296 	page = get_current_nat_page(sbi, start_nid);
297 	nat_blk = (struct f2fs_nat_block *)page_address(page);
298 	ne = nat_blk->entries[nid - start_nid];
299 	node_info_from_raw_nat(ni, &ne);
300 	f2fs_put_page(page, 1);
301 cache:
302 	/* cache nat entry */
303 	cache_nat_entry(NM_I(sbi), nid, &ne);
304 }
305 
306 /*
307  * The maximum depth is four.
308  * Offset[0] will have raw inode offset.
309  */
310 static int get_node_path(long block, int offset[4], unsigned int noffset[4])
311 {
312 	const long direct_index = ADDRS_PER_INODE;
313 	const long direct_blks = ADDRS_PER_BLOCK;
314 	const long dptrs_per_blk = NIDS_PER_BLOCK;
315 	const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK;
316 	const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK;
317 	int n = 0;
318 	int level = 0;
319 
320 	noffset[0] = 0;
321 
322 	if (block < direct_index) {
323 		offset[n++] = block;
324 		level = 0;
325 		goto got;
326 	}
327 	block -= direct_index;
328 	if (block < direct_blks) {
329 		offset[n++] = NODE_DIR1_BLOCK;
330 		noffset[n] = 1;
331 		offset[n++] = block;
332 		level = 1;
333 		goto got;
334 	}
335 	block -= direct_blks;
336 	if (block < direct_blks) {
337 		offset[n++] = NODE_DIR2_BLOCK;
338 		noffset[n] = 2;
339 		offset[n++] = block;
340 		level = 1;
341 		goto got;
342 	}
343 	block -= direct_blks;
344 	if (block < indirect_blks) {
345 		offset[n++] = NODE_IND1_BLOCK;
346 		noffset[n] = 3;
347 		offset[n++] = block / direct_blks;
348 		noffset[n] = 4 + offset[n - 1];
349 		offset[n++] = block % direct_blks;
350 		level = 2;
351 		goto got;
352 	}
353 	block -= indirect_blks;
354 	if (block < indirect_blks) {
355 		offset[n++] = NODE_IND2_BLOCK;
356 		noffset[n] = 4 + dptrs_per_blk;
357 		offset[n++] = block / direct_blks;
358 		noffset[n] = 5 + dptrs_per_blk + offset[n - 1];
359 		offset[n++] = block % direct_blks;
360 		level = 2;
361 		goto got;
362 	}
363 	block -= indirect_blks;
364 	if (block < dindirect_blks) {
365 		offset[n++] = NODE_DIND_BLOCK;
366 		noffset[n] = 5 + (dptrs_per_blk * 2);
367 		offset[n++] = block / indirect_blks;
368 		noffset[n] = 6 + (dptrs_per_blk * 2) +
369 			      offset[n - 1] * (dptrs_per_blk + 1);
370 		offset[n++] = (block / direct_blks) % dptrs_per_blk;
371 		noffset[n] = 7 + (dptrs_per_blk * 2) +
372 			      offset[n - 2] * (dptrs_per_blk + 1) +
373 			      offset[n - 1];
374 		offset[n++] = block % direct_blks;
375 		level = 3;
376 		goto got;
377 	} else {
378 		BUG();
379 	}
380 got:
381 	return level;
382 }
383 
384 /*
385  * Caller should call f2fs_put_dnode(dn).
386  */
387 int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
388 {
389 	struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
390 	struct page *npage[4];
391 	struct page *parent;
392 	int offset[4];
393 	unsigned int noffset[4];
394 	nid_t nids[4];
395 	int level, i;
396 	int err = 0;
397 
398 	level = get_node_path(index, offset, noffset);
399 
400 	nids[0] = dn->inode->i_ino;
401 	npage[0] = get_node_page(sbi, nids[0]);
402 	if (IS_ERR(npage[0]))
403 		return PTR_ERR(npage[0]);
404 
405 	parent = npage[0];
406 	if (level != 0)
407 		nids[1] = get_nid(parent, offset[0], true);
408 	dn->inode_page = npage[0];
409 	dn->inode_page_locked = true;
410 
411 	/* get indirect or direct nodes */
412 	for (i = 1; i <= level; i++) {
413 		bool done = false;
414 
415 		if (!nids[i] && mode == ALLOC_NODE) {
416 			mutex_lock_op(sbi, NODE_NEW);
417 
418 			/* alloc new node */
419 			if (!alloc_nid(sbi, &(nids[i]))) {
420 				mutex_unlock_op(sbi, NODE_NEW);
421 				err = -ENOSPC;
422 				goto release_pages;
423 			}
424 
425 			dn->nid = nids[i];
426 			npage[i] = new_node_page(dn, noffset[i]);
427 			if (IS_ERR(npage[i])) {
428 				alloc_nid_failed(sbi, nids[i]);
429 				mutex_unlock_op(sbi, NODE_NEW);
430 				err = PTR_ERR(npage[i]);
431 				goto release_pages;
432 			}
433 
434 			set_nid(parent, offset[i - 1], nids[i], i == 1);
435 			alloc_nid_done(sbi, nids[i]);
436 			mutex_unlock_op(sbi, NODE_NEW);
437 			done = true;
438 		} else if (mode == LOOKUP_NODE_RA && i == level && level > 1) {
439 			npage[i] = get_node_page_ra(parent, offset[i - 1]);
440 			if (IS_ERR(npage[i])) {
441 				err = PTR_ERR(npage[i]);
442 				goto release_pages;
443 			}
444 			done = true;
445 		}
446 		if (i == 1) {
447 			dn->inode_page_locked = false;
448 			unlock_page(parent);
449 		} else {
450 			f2fs_put_page(parent, 1);
451 		}
452 
453 		if (!done) {
454 			npage[i] = get_node_page(sbi, nids[i]);
455 			if (IS_ERR(npage[i])) {
456 				err = PTR_ERR(npage[i]);
457 				f2fs_put_page(npage[0], 0);
458 				goto release_out;
459 			}
460 		}
461 		if (i < level) {
462 			parent = npage[i];
463 			nids[i + 1] = get_nid(parent, offset[i], false);
464 		}
465 	}
466 	dn->nid = nids[level];
467 	dn->ofs_in_node = offset[level];
468 	dn->node_page = npage[level];
469 	dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node);
470 	return 0;
471 
472 release_pages:
473 	f2fs_put_page(parent, 1);
474 	if (i > 1)
475 		f2fs_put_page(npage[0], 0);
476 release_out:
477 	dn->inode_page = NULL;
478 	dn->node_page = NULL;
479 	return err;
480 }
481 
482 static void truncate_node(struct dnode_of_data *dn)
483 {
484 	struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
485 	struct node_info ni;
486 
487 	get_node_info(sbi, dn->nid, &ni);
488 	if (dn->inode->i_blocks == 0) {
489 		BUG_ON(ni.blk_addr != NULL_ADDR);
490 		goto invalidate;
491 	}
492 	BUG_ON(ni.blk_addr == NULL_ADDR);
493 
494 	/* Deallocate node address */
495 	invalidate_blocks(sbi, ni.blk_addr);
496 	dec_valid_node_count(sbi, dn->inode, 1);
497 	set_node_addr(sbi, &ni, NULL_ADDR);
498 
499 	if (dn->nid == dn->inode->i_ino) {
500 		remove_orphan_inode(sbi, dn->nid);
501 		dec_valid_inode_count(sbi);
502 	} else {
503 		sync_inode_page(dn);
504 	}
505 invalidate:
506 	clear_node_page_dirty(dn->node_page);
507 	F2FS_SET_SB_DIRT(sbi);
508 
509 	f2fs_put_page(dn->node_page, 1);
510 	dn->node_page = NULL;
511 }
512 
513 static int truncate_dnode(struct dnode_of_data *dn)
514 {
515 	struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
516 	struct page *page;
517 
518 	if (dn->nid == 0)
519 		return 1;
520 
521 	/* get direct node */
522 	page = get_node_page(sbi, dn->nid);
523 	if (IS_ERR(page) && PTR_ERR(page) == -ENOENT)
524 		return 1;
525 	else if (IS_ERR(page))
526 		return PTR_ERR(page);
527 
528 	/* Make dnode_of_data for parameter */
529 	dn->node_page = page;
530 	dn->ofs_in_node = 0;
531 	truncate_data_blocks(dn);
532 	truncate_node(dn);
533 	return 1;
534 }
535 
536 static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
537 						int ofs, int depth)
538 {
539 	struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
540 	struct dnode_of_data rdn = *dn;
541 	struct page *page;
542 	struct f2fs_node *rn;
543 	nid_t child_nid;
544 	unsigned int child_nofs;
545 	int freed = 0;
546 	int i, ret;
547 
548 	if (dn->nid == 0)
549 		return NIDS_PER_BLOCK + 1;
550 
551 	page = get_node_page(sbi, dn->nid);
552 	if (IS_ERR(page))
553 		return PTR_ERR(page);
554 
555 	rn = (struct f2fs_node *)page_address(page);
556 	if (depth < 3) {
557 		for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) {
558 			child_nid = le32_to_cpu(rn->in.nid[i]);
559 			if (child_nid == 0)
560 				continue;
561 			rdn.nid = child_nid;
562 			ret = truncate_dnode(&rdn);
563 			if (ret < 0)
564 				goto out_err;
565 			set_nid(page, i, 0, false);
566 		}
567 	} else {
568 		child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1;
569 		for (i = ofs; i < NIDS_PER_BLOCK; i++) {
570 			child_nid = le32_to_cpu(rn->in.nid[i]);
571 			if (child_nid == 0) {
572 				child_nofs += NIDS_PER_BLOCK + 1;
573 				continue;
574 			}
575 			rdn.nid = child_nid;
576 			ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1);
577 			if (ret == (NIDS_PER_BLOCK + 1)) {
578 				set_nid(page, i, 0, false);
579 				child_nofs += ret;
580 			} else if (ret < 0 && ret != -ENOENT) {
581 				goto out_err;
582 			}
583 		}
584 		freed = child_nofs;
585 	}
586 
587 	if (!ofs) {
588 		/* remove current indirect node */
589 		dn->node_page = page;
590 		truncate_node(dn);
591 		freed++;
592 	} else {
593 		f2fs_put_page(page, 1);
594 	}
595 	return freed;
596 
597 out_err:
598 	f2fs_put_page(page, 1);
599 	return ret;
600 }
601 
602 static int truncate_partial_nodes(struct dnode_of_data *dn,
603 			struct f2fs_inode *ri, int *offset, int depth)
604 {
605 	struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
606 	struct page *pages[2];
607 	nid_t nid[3];
608 	nid_t child_nid;
609 	int err = 0;
610 	int i;
611 	int idx = depth - 2;
612 
613 	nid[0] = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
614 	if (!nid[0])
615 		return 0;
616 
617 	/* get indirect nodes in the path */
618 	for (i = 0; i < depth - 1; i++) {
619 		/* refernece count'll be increased */
620 		pages[i] = get_node_page(sbi, nid[i]);
621 		if (IS_ERR(pages[i])) {
622 			depth = i + 1;
623 			err = PTR_ERR(pages[i]);
624 			goto fail;
625 		}
626 		nid[i + 1] = get_nid(pages[i], offset[i + 1], false);
627 	}
628 
629 	/* free direct nodes linked to a partial indirect node */
630 	for (i = offset[depth - 1]; i < NIDS_PER_BLOCK; i++) {
631 		child_nid = get_nid(pages[idx], i, false);
632 		if (!child_nid)
633 			continue;
634 		dn->nid = child_nid;
635 		err = truncate_dnode(dn);
636 		if (err < 0)
637 			goto fail;
638 		set_nid(pages[idx], i, 0, false);
639 	}
640 
641 	if (offset[depth - 1] == 0) {
642 		dn->node_page = pages[idx];
643 		dn->nid = nid[idx];
644 		truncate_node(dn);
645 	} else {
646 		f2fs_put_page(pages[idx], 1);
647 	}
648 	offset[idx]++;
649 	offset[depth - 1] = 0;
650 fail:
651 	for (i = depth - 3; i >= 0; i--)
652 		f2fs_put_page(pages[i], 1);
653 	return err;
654 }
655 
656 /*
657  * All the block addresses of data and nodes should be nullified.
658  */
659 int truncate_inode_blocks(struct inode *inode, pgoff_t from)
660 {
661 	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
662 	int err = 0, cont = 1;
663 	int level, offset[4], noffset[4];
664 	unsigned int nofs = 0;
665 	struct f2fs_node *rn;
666 	struct dnode_of_data dn;
667 	struct page *page;
668 
669 	level = get_node_path(from, offset, noffset);
670 
671 	page = get_node_page(sbi, inode->i_ino);
672 	if (IS_ERR(page))
673 		return PTR_ERR(page);
674 
675 	set_new_dnode(&dn, inode, page, NULL, 0);
676 	unlock_page(page);
677 
678 	rn = page_address(page);
679 	switch (level) {
680 	case 0:
681 	case 1:
682 		nofs = noffset[1];
683 		break;
684 	case 2:
685 		nofs = noffset[1];
686 		if (!offset[level - 1])
687 			goto skip_partial;
688 		err = truncate_partial_nodes(&dn, &rn->i, offset, level);
689 		if (err < 0 && err != -ENOENT)
690 			goto fail;
691 		nofs += 1 + NIDS_PER_BLOCK;
692 		break;
693 	case 3:
694 		nofs = 5 + 2 * NIDS_PER_BLOCK;
695 		if (!offset[level - 1])
696 			goto skip_partial;
697 		err = truncate_partial_nodes(&dn, &rn->i, offset, level);
698 		if (err < 0 && err != -ENOENT)
699 			goto fail;
700 		break;
701 	default:
702 		BUG();
703 	}
704 
705 skip_partial:
706 	while (cont) {
707 		dn.nid = le32_to_cpu(rn->i.i_nid[offset[0] - NODE_DIR1_BLOCK]);
708 		switch (offset[0]) {
709 		case NODE_DIR1_BLOCK:
710 		case NODE_DIR2_BLOCK:
711 			err = truncate_dnode(&dn);
712 			break;
713 
714 		case NODE_IND1_BLOCK:
715 		case NODE_IND2_BLOCK:
716 			err = truncate_nodes(&dn, nofs, offset[1], 2);
717 			break;
718 
719 		case NODE_DIND_BLOCK:
720 			err = truncate_nodes(&dn, nofs, offset[1], 3);
721 			cont = 0;
722 			break;
723 
724 		default:
725 			BUG();
726 		}
727 		if (err < 0 && err != -ENOENT)
728 			goto fail;
729 		if (offset[1] == 0 &&
730 				rn->i.i_nid[offset[0] - NODE_DIR1_BLOCK]) {
731 			lock_page(page);
732 			wait_on_page_writeback(page);
733 			rn->i.i_nid[offset[0] - NODE_DIR1_BLOCK] = 0;
734 			set_page_dirty(page);
735 			unlock_page(page);
736 		}
737 		offset[1] = 0;
738 		offset[0]++;
739 		nofs += err;
740 	}
741 fail:
742 	f2fs_put_page(page, 0);
743 	return err > 0 ? 0 : err;
744 }
745 
746 int remove_inode_page(struct inode *inode)
747 {
748 	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
749 	struct page *page;
750 	nid_t ino = inode->i_ino;
751 	struct dnode_of_data dn;
752 
753 	mutex_lock_op(sbi, NODE_TRUNC);
754 	page = get_node_page(sbi, ino);
755 	if (IS_ERR(page)) {
756 		mutex_unlock_op(sbi, NODE_TRUNC);
757 		return PTR_ERR(page);
758 	}
759 
760 	if (F2FS_I(inode)->i_xattr_nid) {
761 		nid_t nid = F2FS_I(inode)->i_xattr_nid;
762 		struct page *npage = get_node_page(sbi, nid);
763 
764 		if (IS_ERR(npage)) {
765 			mutex_unlock_op(sbi, NODE_TRUNC);
766 			return PTR_ERR(npage);
767 		}
768 
769 		F2FS_I(inode)->i_xattr_nid = 0;
770 		set_new_dnode(&dn, inode, page, npage, nid);
771 		dn.inode_page_locked = 1;
772 		truncate_node(&dn);
773 	}
774 
775 	/* 0 is possible, after f2fs_new_inode() is failed */
776 	BUG_ON(inode->i_blocks != 0 && inode->i_blocks != 1);
777 	set_new_dnode(&dn, inode, page, page, ino);
778 	truncate_node(&dn);
779 
780 	mutex_unlock_op(sbi, NODE_TRUNC);
781 	return 0;
782 }
783 
784 int new_inode_page(struct inode *inode, const struct qstr *name)
785 {
786 	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
787 	struct page *page;
788 	struct dnode_of_data dn;
789 
790 	/* allocate inode page for new inode */
791 	set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
792 	mutex_lock_op(sbi, NODE_NEW);
793 	page = new_node_page(&dn, 0);
794 	init_dent_inode(name, page);
795 	mutex_unlock_op(sbi, NODE_NEW);
796 	if (IS_ERR(page))
797 		return PTR_ERR(page);
798 	f2fs_put_page(page, 1);
799 	return 0;
800 }
801 
802 struct page *new_node_page(struct dnode_of_data *dn, unsigned int ofs)
803 {
804 	struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
805 	struct address_space *mapping = sbi->node_inode->i_mapping;
806 	struct node_info old_ni, new_ni;
807 	struct page *page;
808 	int err;
809 
810 	if (is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))
811 		return ERR_PTR(-EPERM);
812 
813 	page = grab_cache_page(mapping, dn->nid);
814 	if (!page)
815 		return ERR_PTR(-ENOMEM);
816 
817 	get_node_info(sbi, dn->nid, &old_ni);
818 
819 	SetPageUptodate(page);
820 	fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true);
821 
822 	/* Reinitialize old_ni with new node page */
823 	BUG_ON(old_ni.blk_addr != NULL_ADDR);
824 	new_ni = old_ni;
825 	new_ni.ino = dn->inode->i_ino;
826 
827 	if (!inc_valid_node_count(sbi, dn->inode, 1)) {
828 		err = -ENOSPC;
829 		goto fail;
830 	}
831 	set_node_addr(sbi, &new_ni, NEW_ADDR);
832 	set_cold_node(dn->inode, page);
833 
834 	dn->node_page = page;
835 	sync_inode_page(dn);
836 	set_page_dirty(page);
837 	if (ofs == 0)
838 		inc_valid_inode_count(sbi);
839 
840 	return page;
841 
842 fail:
843 	clear_node_page_dirty(page);
844 	f2fs_put_page(page, 1);
845 	return ERR_PTR(err);
846 }
847 
848 static int read_node_page(struct page *page, int type)
849 {
850 	struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
851 	struct node_info ni;
852 
853 	get_node_info(sbi, page->index, &ni);
854 
855 	if (ni.blk_addr == NULL_ADDR)
856 		return -ENOENT;
857 	return f2fs_readpage(sbi, page, ni.blk_addr, type);
858 }
859 
860 /*
861  * Readahead a node page
862  */
863 void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
864 {
865 	struct address_space *mapping = sbi->node_inode->i_mapping;
866 	struct page *apage;
867 
868 	apage = find_get_page(mapping, nid);
869 	if (apage && PageUptodate(apage))
870 		goto release_out;
871 	f2fs_put_page(apage, 0);
872 
873 	apage = grab_cache_page(mapping, nid);
874 	if (!apage)
875 		return;
876 
877 	if (read_node_page(apage, READA))
878 		unlock_page(apage);
879 
880 release_out:
881 	f2fs_put_page(apage, 0);
882 	return;
883 }
884 
885 struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
886 {
887 	int err;
888 	struct page *page;
889 	struct address_space *mapping = sbi->node_inode->i_mapping;
890 
891 	page = grab_cache_page(mapping, nid);
892 	if (!page)
893 		return ERR_PTR(-ENOMEM);
894 
895 	err = read_node_page(page, READ_SYNC);
896 	if (err) {
897 		f2fs_put_page(page, 1);
898 		return ERR_PTR(err);
899 	}
900 
901 	BUG_ON(nid != nid_of_node(page));
902 	mark_page_accessed(page);
903 	return page;
904 }
905 
906 /*
907  * Return a locked page for the desired node page.
908  * And, readahead MAX_RA_NODE number of node pages.
909  */
910 struct page *get_node_page_ra(struct page *parent, int start)
911 {
912 	struct f2fs_sb_info *sbi = F2FS_SB(parent->mapping->host->i_sb);
913 	struct address_space *mapping = sbi->node_inode->i_mapping;
914 	int i, end;
915 	int err = 0;
916 	nid_t nid;
917 	struct page *page;
918 
919 	/* First, try getting the desired direct node. */
920 	nid = get_nid(parent, start, false);
921 	if (!nid)
922 		return ERR_PTR(-ENOENT);
923 
924 repeat:
925 	page = grab_cache_page(mapping, nid);
926 	if (!page)
927 		return ERR_PTR(-ENOMEM);
928 	else if (PageUptodate(page))
929 		goto page_hit;
930 
931 	err = read_node_page(page, READ_SYNC);
932 	unlock_page(page);
933 	if (err) {
934 		f2fs_put_page(page, 0);
935 		return ERR_PTR(err);
936 	}
937 
938 	/* Then, try readahead for siblings of the desired node */
939 	end = start + MAX_RA_NODE;
940 	end = min(end, NIDS_PER_BLOCK);
941 	for (i = start + 1; i < end; i++) {
942 		nid = get_nid(parent, i, false);
943 		if (!nid)
944 			continue;
945 		ra_node_page(sbi, nid);
946 	}
947 
948 	lock_page(page);
949 
950 page_hit:
951 	if (PageError(page)) {
952 		f2fs_put_page(page, 1);
953 		return ERR_PTR(-EIO);
954 	}
955 
956 	/* Has the page been truncated? */
957 	if (page->mapping != mapping) {
958 		f2fs_put_page(page, 1);
959 		goto repeat;
960 	}
961 	return page;
962 }
963 
964 void sync_inode_page(struct dnode_of_data *dn)
965 {
966 	if (IS_INODE(dn->node_page) || dn->inode_page == dn->node_page) {
967 		update_inode(dn->inode, dn->node_page);
968 	} else if (dn->inode_page) {
969 		if (!dn->inode_page_locked)
970 			lock_page(dn->inode_page);
971 		update_inode(dn->inode, dn->inode_page);
972 		if (!dn->inode_page_locked)
973 			unlock_page(dn->inode_page);
974 	} else {
975 		f2fs_write_inode(dn->inode, NULL);
976 	}
977 }
978 
979 int sync_node_pages(struct f2fs_sb_info *sbi, nid_t ino,
980 					struct writeback_control *wbc)
981 {
982 	struct address_space *mapping = sbi->node_inode->i_mapping;
983 	pgoff_t index, end;
984 	struct pagevec pvec;
985 	int step = ino ? 2 : 0;
986 	int nwritten = 0, wrote = 0;
987 
988 	pagevec_init(&pvec, 0);
989 
990 next_step:
991 	index = 0;
992 	end = LONG_MAX;
993 
994 	while (index <= end) {
995 		int i, nr_pages;
996 		nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
997 				PAGECACHE_TAG_DIRTY,
998 				min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
999 		if (nr_pages == 0)
1000 			break;
1001 
1002 		for (i = 0; i < nr_pages; i++) {
1003 			struct page *page = pvec.pages[i];
1004 
1005 			/*
1006 			 * flushing sequence with step:
1007 			 * 0. indirect nodes
1008 			 * 1. dentry dnodes
1009 			 * 2. file dnodes
1010 			 */
1011 			if (step == 0 && IS_DNODE(page))
1012 				continue;
1013 			if (step == 1 && (!IS_DNODE(page) ||
1014 						is_cold_node(page)))
1015 				continue;
1016 			if (step == 2 && (!IS_DNODE(page) ||
1017 						!is_cold_node(page)))
1018 				continue;
1019 
1020 			/*
1021 			 * If an fsync mode,
1022 			 * we should not skip writing node pages.
1023 			 */
1024 			if (ino && ino_of_node(page) == ino)
1025 				lock_page(page);
1026 			else if (!trylock_page(page))
1027 				continue;
1028 
1029 			if (unlikely(page->mapping != mapping)) {
1030 continue_unlock:
1031 				unlock_page(page);
1032 				continue;
1033 			}
1034 			if (ino && ino_of_node(page) != ino)
1035 				goto continue_unlock;
1036 
1037 			if (!PageDirty(page)) {
1038 				/* someone wrote it for us */
1039 				goto continue_unlock;
1040 			}
1041 
1042 			if (!clear_page_dirty_for_io(page))
1043 				goto continue_unlock;
1044 
1045 			/* called by fsync() */
1046 			if (ino && IS_DNODE(page)) {
1047 				int mark = !is_checkpointed_node(sbi, ino);
1048 				set_fsync_mark(page, 1);
1049 				if (IS_INODE(page))
1050 					set_dentry_mark(page, mark);
1051 				nwritten++;
1052 			} else {
1053 				set_fsync_mark(page, 0);
1054 				set_dentry_mark(page, 0);
1055 			}
1056 			mapping->a_ops->writepage(page, wbc);
1057 			wrote++;
1058 
1059 			if (--wbc->nr_to_write == 0)
1060 				break;
1061 		}
1062 		pagevec_release(&pvec);
1063 		cond_resched();
1064 
1065 		if (wbc->nr_to_write == 0) {
1066 			step = 2;
1067 			break;
1068 		}
1069 	}
1070 
1071 	if (step < 2) {
1072 		step++;
1073 		goto next_step;
1074 	}
1075 
1076 	if (wrote)
1077 		f2fs_submit_bio(sbi, NODE, wbc->sync_mode == WB_SYNC_ALL);
1078 
1079 	return nwritten;
1080 }
1081 
1082 static int f2fs_write_node_page(struct page *page,
1083 				struct writeback_control *wbc)
1084 {
1085 	struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
1086 	nid_t nid;
1087 	block_t new_addr;
1088 	struct node_info ni;
1089 
1090 	if (wbc->for_reclaim) {
1091 		dec_page_count(sbi, F2FS_DIRTY_NODES);
1092 		wbc->pages_skipped++;
1093 		set_page_dirty(page);
1094 		return AOP_WRITEPAGE_ACTIVATE;
1095 	}
1096 
1097 	wait_on_page_writeback(page);
1098 
1099 	mutex_lock_op(sbi, NODE_WRITE);
1100 
1101 	/* get old block addr of this node page */
1102 	nid = nid_of_node(page);
1103 	BUG_ON(page->index != nid);
1104 
1105 	get_node_info(sbi, nid, &ni);
1106 
1107 	/* This page is already truncated */
1108 	if (ni.blk_addr == NULL_ADDR)
1109 		goto out;
1110 
1111 	set_page_writeback(page);
1112 
1113 	/* insert node offset */
1114 	write_node_page(sbi, page, nid, ni.blk_addr, &new_addr);
1115 	set_node_addr(sbi, &ni, new_addr);
1116 out:
1117 	dec_page_count(sbi, F2FS_DIRTY_NODES);
1118 	mutex_unlock_op(sbi, NODE_WRITE);
1119 	unlock_page(page);
1120 	return 0;
1121 }
1122 
1123 /*
1124  * It is very important to gather dirty pages and write at once, so that we can
1125  * submit a big bio without interfering other data writes.
1126  * Be default, 512 pages (2MB), a segment size, is quite reasonable.
1127  */
1128 #define COLLECT_DIRTY_NODES	512
1129 static int f2fs_write_node_pages(struct address_space *mapping,
1130 			    struct writeback_control *wbc)
1131 {
1132 	struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
1133 	struct block_device *bdev = sbi->sb->s_bdev;
1134 	long nr_to_write = wbc->nr_to_write;
1135 
1136 	/* First check balancing cached NAT entries */
1137 	if (try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK)) {
1138 		write_checkpoint(sbi, false);
1139 		return 0;
1140 	}
1141 
1142 	/* collect a number of dirty node pages and write together */
1143 	if (get_pages(sbi, F2FS_DIRTY_NODES) < COLLECT_DIRTY_NODES)
1144 		return 0;
1145 
1146 	/* if mounting is failed, skip writing node pages */
1147 	wbc->nr_to_write = bio_get_nr_vecs(bdev);
1148 	sync_node_pages(sbi, 0, wbc);
1149 	wbc->nr_to_write = nr_to_write -
1150 		(bio_get_nr_vecs(bdev) - wbc->nr_to_write);
1151 	return 0;
1152 }
1153 
1154 static int f2fs_set_node_page_dirty(struct page *page)
1155 {
1156 	struct address_space *mapping = page->mapping;
1157 	struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
1158 
1159 	SetPageUptodate(page);
1160 	if (!PageDirty(page)) {
1161 		__set_page_dirty_nobuffers(page);
1162 		inc_page_count(sbi, F2FS_DIRTY_NODES);
1163 		SetPagePrivate(page);
1164 		return 1;
1165 	}
1166 	return 0;
1167 }
1168 
1169 static void f2fs_invalidate_node_page(struct page *page, unsigned long offset)
1170 {
1171 	struct inode *inode = page->mapping->host;
1172 	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
1173 	if (PageDirty(page))
1174 		dec_page_count(sbi, F2FS_DIRTY_NODES);
1175 	ClearPagePrivate(page);
1176 }
1177 
1178 static int f2fs_release_node_page(struct page *page, gfp_t wait)
1179 {
1180 	ClearPagePrivate(page);
1181 	return 0;
1182 }
1183 
1184 /*
1185  * Structure of the f2fs node operations
1186  */
1187 const struct address_space_operations f2fs_node_aops = {
1188 	.writepage	= f2fs_write_node_page,
1189 	.writepages	= f2fs_write_node_pages,
1190 	.set_page_dirty	= f2fs_set_node_page_dirty,
1191 	.invalidatepage	= f2fs_invalidate_node_page,
1192 	.releasepage	= f2fs_release_node_page,
1193 };
1194 
1195 static struct free_nid *__lookup_free_nid_list(nid_t n, struct list_head *head)
1196 {
1197 	struct list_head *this;
1198 	struct free_nid *i;
1199 	list_for_each(this, head) {
1200 		i = list_entry(this, struct free_nid, list);
1201 		if (i->nid == n)
1202 			return i;
1203 	}
1204 	return NULL;
1205 }
1206 
1207 static void __del_from_free_nid_list(struct free_nid *i)
1208 {
1209 	list_del(&i->list);
1210 	kmem_cache_free(free_nid_slab, i);
1211 }
1212 
1213 static int add_free_nid(struct f2fs_nm_info *nm_i, nid_t nid)
1214 {
1215 	struct free_nid *i;
1216 
1217 	if (nm_i->fcnt > 2 * MAX_FREE_NIDS)
1218 		return 0;
1219 retry:
1220 	i = kmem_cache_alloc(free_nid_slab, GFP_NOFS);
1221 	if (!i) {
1222 		cond_resched();
1223 		goto retry;
1224 	}
1225 	i->nid = nid;
1226 	i->state = NID_NEW;
1227 
1228 	spin_lock(&nm_i->free_nid_list_lock);
1229 	if (__lookup_free_nid_list(nid, &nm_i->free_nid_list)) {
1230 		spin_unlock(&nm_i->free_nid_list_lock);
1231 		kmem_cache_free(free_nid_slab, i);
1232 		return 0;
1233 	}
1234 	list_add_tail(&i->list, &nm_i->free_nid_list);
1235 	nm_i->fcnt++;
1236 	spin_unlock(&nm_i->free_nid_list_lock);
1237 	return 1;
1238 }
1239 
1240 static void remove_free_nid(struct f2fs_nm_info *nm_i, nid_t nid)
1241 {
1242 	struct free_nid *i;
1243 	spin_lock(&nm_i->free_nid_list_lock);
1244 	i = __lookup_free_nid_list(nid, &nm_i->free_nid_list);
1245 	if (i && i->state == NID_NEW) {
1246 		__del_from_free_nid_list(i);
1247 		nm_i->fcnt--;
1248 	}
1249 	spin_unlock(&nm_i->free_nid_list_lock);
1250 }
1251 
1252 static int scan_nat_page(struct f2fs_nm_info *nm_i,
1253 			struct page *nat_page, nid_t start_nid)
1254 {
1255 	struct f2fs_nat_block *nat_blk = page_address(nat_page);
1256 	block_t blk_addr;
1257 	int fcnt = 0;
1258 	int i;
1259 
1260 	/* 0 nid should not be used */
1261 	if (start_nid == 0)
1262 		++start_nid;
1263 
1264 	i = start_nid % NAT_ENTRY_PER_BLOCK;
1265 
1266 	for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) {
1267 		blk_addr  = le32_to_cpu(nat_blk->entries[i].block_addr);
1268 		BUG_ON(blk_addr == NEW_ADDR);
1269 		if (blk_addr == NULL_ADDR)
1270 			fcnt += add_free_nid(nm_i, start_nid);
1271 	}
1272 	return fcnt;
1273 }
1274 
1275 static void build_free_nids(struct f2fs_sb_info *sbi)
1276 {
1277 	struct free_nid *fnid, *next_fnid;
1278 	struct f2fs_nm_info *nm_i = NM_I(sbi);
1279 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1280 	struct f2fs_summary_block *sum = curseg->sum_blk;
1281 	nid_t nid = 0;
1282 	bool is_cycled = false;
1283 	int fcnt = 0;
1284 	int i;
1285 
1286 	nid = nm_i->next_scan_nid;
1287 	nm_i->init_scan_nid = nid;
1288 
1289 	ra_nat_pages(sbi, nid);
1290 
1291 	while (1) {
1292 		struct page *page = get_current_nat_page(sbi, nid);
1293 
1294 		fcnt += scan_nat_page(nm_i, page, nid);
1295 		f2fs_put_page(page, 1);
1296 
1297 		nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK));
1298 
1299 		if (nid >= nm_i->max_nid) {
1300 			nid = 0;
1301 			is_cycled = true;
1302 		}
1303 		if (fcnt > MAX_FREE_NIDS)
1304 			break;
1305 		if (is_cycled && nm_i->init_scan_nid <= nid)
1306 			break;
1307 	}
1308 
1309 	nm_i->next_scan_nid = nid;
1310 
1311 	/* find free nids from current sum_pages */
1312 	mutex_lock(&curseg->curseg_mutex);
1313 	for (i = 0; i < nats_in_cursum(sum); i++) {
1314 		block_t addr = le32_to_cpu(nat_in_journal(sum, i).block_addr);
1315 		nid = le32_to_cpu(nid_in_journal(sum, i));
1316 		if (addr == NULL_ADDR)
1317 			add_free_nid(nm_i, nid);
1318 		else
1319 			remove_free_nid(nm_i, nid);
1320 	}
1321 	mutex_unlock(&curseg->curseg_mutex);
1322 
1323 	/* remove the free nids from current allocated nids */
1324 	list_for_each_entry_safe(fnid, next_fnid, &nm_i->free_nid_list, list) {
1325 		struct nat_entry *ne;
1326 
1327 		read_lock(&nm_i->nat_tree_lock);
1328 		ne = __lookup_nat_cache(nm_i, fnid->nid);
1329 		if (ne && nat_get_blkaddr(ne) != NULL_ADDR)
1330 			remove_free_nid(nm_i, fnid->nid);
1331 		read_unlock(&nm_i->nat_tree_lock);
1332 	}
1333 }
1334 
1335 /*
1336  * If this function returns success, caller can obtain a new nid
1337  * from second parameter of this function.
1338  * The returned nid could be used ino as well as nid when inode is created.
1339  */
1340 bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
1341 {
1342 	struct f2fs_nm_info *nm_i = NM_I(sbi);
1343 	struct free_nid *i = NULL;
1344 	struct list_head *this;
1345 retry:
1346 	mutex_lock(&nm_i->build_lock);
1347 	if (!nm_i->fcnt) {
1348 		/* scan NAT in order to build free nid list */
1349 		build_free_nids(sbi);
1350 		if (!nm_i->fcnt) {
1351 			mutex_unlock(&nm_i->build_lock);
1352 			return false;
1353 		}
1354 	}
1355 	mutex_unlock(&nm_i->build_lock);
1356 
1357 	/*
1358 	 * We check fcnt again since previous check is racy as
1359 	 * we didn't hold free_nid_list_lock. So other thread
1360 	 * could consume all of free nids.
1361 	 */
1362 	spin_lock(&nm_i->free_nid_list_lock);
1363 	if (!nm_i->fcnt) {
1364 		spin_unlock(&nm_i->free_nid_list_lock);
1365 		goto retry;
1366 	}
1367 
1368 	BUG_ON(list_empty(&nm_i->free_nid_list));
1369 	list_for_each(this, &nm_i->free_nid_list) {
1370 		i = list_entry(this, struct free_nid, list);
1371 		if (i->state == NID_NEW)
1372 			break;
1373 	}
1374 
1375 	BUG_ON(i->state != NID_NEW);
1376 	*nid = i->nid;
1377 	i->state = NID_ALLOC;
1378 	nm_i->fcnt--;
1379 	spin_unlock(&nm_i->free_nid_list_lock);
1380 	return true;
1381 }
1382 
1383 /*
1384  * alloc_nid() should be called prior to this function.
1385  */
1386 void alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid)
1387 {
1388 	struct f2fs_nm_info *nm_i = NM_I(sbi);
1389 	struct free_nid *i;
1390 
1391 	spin_lock(&nm_i->free_nid_list_lock);
1392 	i = __lookup_free_nid_list(nid, &nm_i->free_nid_list);
1393 	if (i) {
1394 		BUG_ON(i->state != NID_ALLOC);
1395 		__del_from_free_nid_list(i);
1396 	}
1397 	spin_unlock(&nm_i->free_nid_list_lock);
1398 }
1399 
1400 /*
1401  * alloc_nid() should be called prior to this function.
1402  */
1403 void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
1404 {
1405 	alloc_nid_done(sbi, nid);
1406 	add_free_nid(NM_I(sbi), nid);
1407 }
1408 
1409 void recover_node_page(struct f2fs_sb_info *sbi, struct page *page,
1410 		struct f2fs_summary *sum, struct node_info *ni,
1411 		block_t new_blkaddr)
1412 {
1413 	rewrite_node_page(sbi, page, sum, ni->blk_addr, new_blkaddr);
1414 	set_node_addr(sbi, ni, new_blkaddr);
1415 	clear_node_page_dirty(page);
1416 }
1417 
1418 int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
1419 {
1420 	struct address_space *mapping = sbi->node_inode->i_mapping;
1421 	struct f2fs_node *src, *dst;
1422 	nid_t ino = ino_of_node(page);
1423 	struct node_info old_ni, new_ni;
1424 	struct page *ipage;
1425 
1426 	ipage = grab_cache_page(mapping, ino);
1427 	if (!ipage)
1428 		return -ENOMEM;
1429 
1430 	/* Should not use this inode  from free nid list */
1431 	remove_free_nid(NM_I(sbi), ino);
1432 
1433 	get_node_info(sbi, ino, &old_ni);
1434 	SetPageUptodate(ipage);
1435 	fill_node_footer(ipage, ino, ino, 0, true);
1436 
1437 	src = (struct f2fs_node *)page_address(page);
1438 	dst = (struct f2fs_node *)page_address(ipage);
1439 
1440 	memcpy(dst, src, (unsigned long)&src->i.i_ext - (unsigned long)&src->i);
1441 	dst->i.i_size = 0;
1442 	dst->i.i_blocks = cpu_to_le64(1);
1443 	dst->i.i_links = cpu_to_le32(1);
1444 	dst->i.i_xattr_nid = 0;
1445 
1446 	new_ni = old_ni;
1447 	new_ni.ino = ino;
1448 
1449 	set_node_addr(sbi, &new_ni, NEW_ADDR);
1450 	inc_valid_inode_count(sbi);
1451 
1452 	f2fs_put_page(ipage, 1);
1453 	return 0;
1454 }
1455 
1456 int restore_node_summary(struct f2fs_sb_info *sbi,
1457 			unsigned int segno, struct f2fs_summary_block *sum)
1458 {
1459 	struct f2fs_node *rn;
1460 	struct f2fs_summary *sum_entry;
1461 	struct page *page;
1462 	block_t addr;
1463 	int i, last_offset;
1464 
1465 	/* alloc temporal page for read node */
1466 	page = alloc_page(GFP_NOFS | __GFP_ZERO);
1467 	if (IS_ERR(page))
1468 		return PTR_ERR(page);
1469 	lock_page(page);
1470 
1471 	/* scan the node segment */
1472 	last_offset = sbi->blocks_per_seg;
1473 	addr = START_BLOCK(sbi, segno);
1474 	sum_entry = &sum->entries[0];
1475 
1476 	for (i = 0; i < last_offset; i++, sum_entry++) {
1477 		if (f2fs_readpage(sbi, page, addr, READ_SYNC))
1478 			goto out;
1479 
1480 		rn = (struct f2fs_node *)page_address(page);
1481 		sum_entry->nid = rn->footer.nid;
1482 		sum_entry->version = 0;
1483 		sum_entry->ofs_in_node = 0;
1484 		addr++;
1485 
1486 		/*
1487 		 * In order to read next node page,
1488 		 * we must clear PageUptodate flag.
1489 		 */
1490 		ClearPageUptodate(page);
1491 	}
1492 out:
1493 	unlock_page(page);
1494 	__free_pages(page, 0);
1495 	return 0;
1496 }
1497 
1498 static bool flush_nats_in_journal(struct f2fs_sb_info *sbi)
1499 {
1500 	struct f2fs_nm_info *nm_i = NM_I(sbi);
1501 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1502 	struct f2fs_summary_block *sum = curseg->sum_blk;
1503 	int i;
1504 
1505 	mutex_lock(&curseg->curseg_mutex);
1506 
1507 	if (nats_in_cursum(sum) < NAT_JOURNAL_ENTRIES) {
1508 		mutex_unlock(&curseg->curseg_mutex);
1509 		return false;
1510 	}
1511 
1512 	for (i = 0; i < nats_in_cursum(sum); i++) {
1513 		struct nat_entry *ne;
1514 		struct f2fs_nat_entry raw_ne;
1515 		nid_t nid = le32_to_cpu(nid_in_journal(sum, i));
1516 
1517 		raw_ne = nat_in_journal(sum, i);
1518 retry:
1519 		write_lock(&nm_i->nat_tree_lock);
1520 		ne = __lookup_nat_cache(nm_i, nid);
1521 		if (ne) {
1522 			__set_nat_cache_dirty(nm_i, ne);
1523 			write_unlock(&nm_i->nat_tree_lock);
1524 			continue;
1525 		}
1526 		ne = grab_nat_entry(nm_i, nid);
1527 		if (!ne) {
1528 			write_unlock(&nm_i->nat_tree_lock);
1529 			goto retry;
1530 		}
1531 		nat_set_blkaddr(ne, le32_to_cpu(raw_ne.block_addr));
1532 		nat_set_ino(ne, le32_to_cpu(raw_ne.ino));
1533 		nat_set_version(ne, raw_ne.version);
1534 		__set_nat_cache_dirty(nm_i, ne);
1535 		write_unlock(&nm_i->nat_tree_lock);
1536 	}
1537 	update_nats_in_cursum(sum, -i);
1538 	mutex_unlock(&curseg->curseg_mutex);
1539 	return true;
1540 }
1541 
1542 /*
1543  * This function is called during the checkpointing process.
1544  */
1545 void flush_nat_entries(struct f2fs_sb_info *sbi)
1546 {
1547 	struct f2fs_nm_info *nm_i = NM_I(sbi);
1548 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1549 	struct f2fs_summary_block *sum = curseg->sum_blk;
1550 	struct list_head *cur, *n;
1551 	struct page *page = NULL;
1552 	struct f2fs_nat_block *nat_blk = NULL;
1553 	nid_t start_nid = 0, end_nid = 0;
1554 	bool flushed;
1555 
1556 	flushed = flush_nats_in_journal(sbi);
1557 
1558 	if (!flushed)
1559 		mutex_lock(&curseg->curseg_mutex);
1560 
1561 	/* 1) flush dirty nat caches */
1562 	list_for_each_safe(cur, n, &nm_i->dirty_nat_entries) {
1563 		struct nat_entry *ne;
1564 		nid_t nid;
1565 		struct f2fs_nat_entry raw_ne;
1566 		int offset = -1;
1567 		block_t new_blkaddr;
1568 
1569 		ne = list_entry(cur, struct nat_entry, list);
1570 		nid = nat_get_nid(ne);
1571 
1572 		if (nat_get_blkaddr(ne) == NEW_ADDR)
1573 			continue;
1574 		if (flushed)
1575 			goto to_nat_page;
1576 
1577 		/* if there is room for nat enries in curseg->sumpage */
1578 		offset = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 1);
1579 		if (offset >= 0) {
1580 			raw_ne = nat_in_journal(sum, offset);
1581 			goto flush_now;
1582 		}
1583 to_nat_page:
1584 		if (!page || (start_nid > nid || nid > end_nid)) {
1585 			if (page) {
1586 				f2fs_put_page(page, 1);
1587 				page = NULL;
1588 			}
1589 			start_nid = START_NID(nid);
1590 			end_nid = start_nid + NAT_ENTRY_PER_BLOCK - 1;
1591 
1592 			/*
1593 			 * get nat block with dirty flag, increased reference
1594 			 * count, mapped and lock
1595 			 */
1596 			page = get_next_nat_page(sbi, start_nid);
1597 			nat_blk = page_address(page);
1598 		}
1599 
1600 		BUG_ON(!nat_blk);
1601 		raw_ne = nat_blk->entries[nid - start_nid];
1602 flush_now:
1603 		new_blkaddr = nat_get_blkaddr(ne);
1604 
1605 		raw_ne.ino = cpu_to_le32(nat_get_ino(ne));
1606 		raw_ne.block_addr = cpu_to_le32(new_blkaddr);
1607 		raw_ne.version = nat_get_version(ne);
1608 
1609 		if (offset < 0) {
1610 			nat_blk->entries[nid - start_nid] = raw_ne;
1611 		} else {
1612 			nat_in_journal(sum, offset) = raw_ne;
1613 			nid_in_journal(sum, offset) = cpu_to_le32(nid);
1614 		}
1615 
1616 		if (nat_get_blkaddr(ne) == NULL_ADDR) {
1617 			write_lock(&nm_i->nat_tree_lock);
1618 			__del_from_nat_cache(nm_i, ne);
1619 			write_unlock(&nm_i->nat_tree_lock);
1620 
1621 			/* We can reuse this freed nid at this point */
1622 			add_free_nid(NM_I(sbi), nid);
1623 		} else {
1624 			write_lock(&nm_i->nat_tree_lock);
1625 			__clear_nat_cache_dirty(nm_i, ne);
1626 			ne->checkpointed = true;
1627 			write_unlock(&nm_i->nat_tree_lock);
1628 		}
1629 	}
1630 	if (!flushed)
1631 		mutex_unlock(&curseg->curseg_mutex);
1632 	f2fs_put_page(page, 1);
1633 
1634 	/* 2) shrink nat caches if necessary */
1635 	try_to_free_nats(sbi, nm_i->nat_cnt - NM_WOUT_THRESHOLD);
1636 }
1637 
1638 static int init_node_manager(struct f2fs_sb_info *sbi)
1639 {
1640 	struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi);
1641 	struct f2fs_nm_info *nm_i = NM_I(sbi);
1642 	unsigned char *version_bitmap;
1643 	unsigned int nat_segs, nat_blocks;
1644 
1645 	nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr);
1646 
1647 	/* segment_count_nat includes pair segment so divide to 2. */
1648 	nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1;
1649 	nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg);
1650 	nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks;
1651 	nm_i->fcnt = 0;
1652 	nm_i->nat_cnt = 0;
1653 
1654 	INIT_LIST_HEAD(&nm_i->free_nid_list);
1655 	INIT_RADIX_TREE(&nm_i->nat_root, GFP_ATOMIC);
1656 	INIT_LIST_HEAD(&nm_i->nat_entries);
1657 	INIT_LIST_HEAD(&nm_i->dirty_nat_entries);
1658 
1659 	mutex_init(&nm_i->build_lock);
1660 	spin_lock_init(&nm_i->free_nid_list_lock);
1661 	rwlock_init(&nm_i->nat_tree_lock);
1662 
1663 	nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
1664 	nm_i->init_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
1665 	nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
1666 
1667 	nm_i->nat_bitmap = kzalloc(nm_i->bitmap_size, GFP_KERNEL);
1668 	if (!nm_i->nat_bitmap)
1669 		return -ENOMEM;
1670 	version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP);
1671 	if (!version_bitmap)
1672 		return -EFAULT;
1673 
1674 	/* copy version bitmap */
1675 	memcpy(nm_i->nat_bitmap, version_bitmap, nm_i->bitmap_size);
1676 	return 0;
1677 }
1678 
1679 int build_node_manager(struct f2fs_sb_info *sbi)
1680 {
1681 	int err;
1682 
1683 	sbi->nm_info = kzalloc(sizeof(struct f2fs_nm_info), GFP_KERNEL);
1684 	if (!sbi->nm_info)
1685 		return -ENOMEM;
1686 
1687 	err = init_node_manager(sbi);
1688 	if (err)
1689 		return err;
1690 
1691 	build_free_nids(sbi);
1692 	return 0;
1693 }
1694 
1695 void destroy_node_manager(struct f2fs_sb_info *sbi)
1696 {
1697 	struct f2fs_nm_info *nm_i = NM_I(sbi);
1698 	struct free_nid *i, *next_i;
1699 	struct nat_entry *natvec[NATVEC_SIZE];
1700 	nid_t nid = 0;
1701 	unsigned int found;
1702 
1703 	if (!nm_i)
1704 		return;
1705 
1706 	/* destroy free nid list */
1707 	spin_lock(&nm_i->free_nid_list_lock);
1708 	list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) {
1709 		BUG_ON(i->state == NID_ALLOC);
1710 		__del_from_free_nid_list(i);
1711 		nm_i->fcnt--;
1712 	}
1713 	BUG_ON(nm_i->fcnt);
1714 	spin_unlock(&nm_i->free_nid_list_lock);
1715 
1716 	/* destroy nat cache */
1717 	write_lock(&nm_i->nat_tree_lock);
1718 	while ((found = __gang_lookup_nat_cache(nm_i,
1719 					nid, NATVEC_SIZE, natvec))) {
1720 		unsigned idx;
1721 		for (idx = 0; idx < found; idx++) {
1722 			struct nat_entry *e = natvec[idx];
1723 			nid = nat_get_nid(e) + 1;
1724 			__del_from_nat_cache(nm_i, e);
1725 		}
1726 	}
1727 	BUG_ON(nm_i->nat_cnt);
1728 	write_unlock(&nm_i->nat_tree_lock);
1729 
1730 	kfree(nm_i->nat_bitmap);
1731 	sbi->nm_info = NULL;
1732 	kfree(nm_i);
1733 }
1734 
1735 int __init create_node_manager_caches(void)
1736 {
1737 	nat_entry_slab = f2fs_kmem_cache_create("nat_entry",
1738 			sizeof(struct nat_entry), NULL);
1739 	if (!nat_entry_slab)
1740 		return -ENOMEM;
1741 
1742 	free_nid_slab = f2fs_kmem_cache_create("free_nid",
1743 			sizeof(struct free_nid), NULL);
1744 	if (!free_nid_slab) {
1745 		kmem_cache_destroy(nat_entry_slab);
1746 		return -ENOMEM;
1747 	}
1748 	return 0;
1749 }
1750 
1751 void destroy_node_manager_caches(void)
1752 {
1753 	kmem_cache_destroy(free_nid_slab);
1754 	kmem_cache_destroy(nat_entry_slab);
1755 }
1756