xref: /openbmc/linux/fs/f2fs/node.c (revision 48cb76c7be7056810cdcdcdcd8d90d3fdc4e250f)
1 /*
2  * fs/f2fs/node.c
3  *
4  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5  *             http://www.samsung.com/
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 #include <linux/fs.h>
12 #include <linux/f2fs_fs.h>
13 #include <linux/mpage.h>
14 #include <linux/backing-dev.h>
15 #include <linux/blkdev.h>
16 #include <linux/pagevec.h>
17 #include <linux/swap.h>
18 
19 #include "f2fs.h"
20 #include "node.h"
21 #include "segment.h"
22 
23 static struct kmem_cache *nat_entry_slab;
24 static struct kmem_cache *free_nid_slab;
25 
26 static void clear_node_page_dirty(struct page *page)
27 {
28 	struct address_space *mapping = page->mapping;
29 	struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
30 	unsigned int long flags;
31 
32 	if (PageDirty(page)) {
33 		spin_lock_irqsave(&mapping->tree_lock, flags);
34 		radix_tree_tag_clear(&mapping->page_tree,
35 				page_index(page),
36 				PAGECACHE_TAG_DIRTY);
37 		spin_unlock_irqrestore(&mapping->tree_lock, flags);
38 
39 		clear_page_dirty_for_io(page);
40 		dec_page_count(sbi, F2FS_DIRTY_NODES);
41 	}
42 	ClearPageUptodate(page);
43 }
44 
45 static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
46 {
47 	pgoff_t index = current_nat_addr(sbi, nid);
48 	return get_meta_page(sbi, index);
49 }
50 
51 static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
52 {
53 	struct page *src_page;
54 	struct page *dst_page;
55 	pgoff_t src_off;
56 	pgoff_t dst_off;
57 	void *src_addr;
58 	void *dst_addr;
59 	struct f2fs_nm_info *nm_i = NM_I(sbi);
60 
61 	src_off = current_nat_addr(sbi, nid);
62 	dst_off = next_nat_addr(sbi, src_off);
63 
64 	/* get current nat block page with lock */
65 	src_page = get_meta_page(sbi, src_off);
66 
67 	/* Dirty src_page means that it is already the new target NAT page. */
68 	if (PageDirty(src_page))
69 		return src_page;
70 
71 	dst_page = grab_meta_page(sbi, dst_off);
72 
73 	src_addr = page_address(src_page);
74 	dst_addr = page_address(dst_page);
75 	memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE);
76 	set_page_dirty(dst_page);
77 	f2fs_put_page(src_page, 1);
78 
79 	set_to_next_nat(nm_i, nid);
80 
81 	return dst_page;
82 }
83 
84 /*
85  * Readahead NAT pages
86  */
87 static void ra_nat_pages(struct f2fs_sb_info *sbi, int nid)
88 {
89 	struct address_space *mapping = sbi->meta_inode->i_mapping;
90 	struct f2fs_nm_info *nm_i = NM_I(sbi);
91 	struct page *page;
92 	pgoff_t index;
93 	int i;
94 
95 	for (i = 0; i < FREE_NID_PAGES; i++, nid += NAT_ENTRY_PER_BLOCK) {
96 		if (nid >= nm_i->max_nid)
97 			nid = 0;
98 		index = current_nat_addr(sbi, nid);
99 
100 		page = grab_cache_page(mapping, index);
101 		if (!page)
102 			continue;
103 		if (PageUptodate(page)) {
104 			f2fs_put_page(page, 1);
105 			continue;
106 		}
107 		if (f2fs_readpage(sbi, page, index, READ))
108 			continue;
109 
110 		f2fs_put_page(page, 0);
111 	}
112 }
113 
114 static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n)
115 {
116 	return radix_tree_lookup(&nm_i->nat_root, n);
117 }
118 
119 static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i,
120 		nid_t start, unsigned int nr, struct nat_entry **ep)
121 {
122 	return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr);
123 }
124 
125 static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e)
126 {
127 	list_del(&e->list);
128 	radix_tree_delete(&nm_i->nat_root, nat_get_nid(e));
129 	nm_i->nat_cnt--;
130 	kmem_cache_free(nat_entry_slab, e);
131 }
132 
133 int is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid)
134 {
135 	struct f2fs_nm_info *nm_i = NM_I(sbi);
136 	struct nat_entry *e;
137 	int is_cp = 1;
138 
139 	read_lock(&nm_i->nat_tree_lock);
140 	e = __lookup_nat_cache(nm_i, nid);
141 	if (e && !e->checkpointed)
142 		is_cp = 0;
143 	read_unlock(&nm_i->nat_tree_lock);
144 	return is_cp;
145 }
146 
147 static struct nat_entry *grab_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid)
148 {
149 	struct nat_entry *new;
150 
151 	new = kmem_cache_alloc(nat_entry_slab, GFP_ATOMIC);
152 	if (!new)
153 		return NULL;
154 	if (radix_tree_insert(&nm_i->nat_root, nid, new)) {
155 		kmem_cache_free(nat_entry_slab, new);
156 		return NULL;
157 	}
158 	memset(new, 0, sizeof(struct nat_entry));
159 	nat_set_nid(new, nid);
160 	list_add_tail(&new->list, &nm_i->nat_entries);
161 	nm_i->nat_cnt++;
162 	return new;
163 }
164 
165 static void cache_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid,
166 						struct f2fs_nat_entry *ne)
167 {
168 	struct nat_entry *e;
169 retry:
170 	write_lock(&nm_i->nat_tree_lock);
171 	e = __lookup_nat_cache(nm_i, nid);
172 	if (!e) {
173 		e = grab_nat_entry(nm_i, nid);
174 		if (!e) {
175 			write_unlock(&nm_i->nat_tree_lock);
176 			goto retry;
177 		}
178 		nat_set_blkaddr(e, le32_to_cpu(ne->block_addr));
179 		nat_set_ino(e, le32_to_cpu(ne->ino));
180 		nat_set_version(e, ne->version);
181 		e->checkpointed = true;
182 	}
183 	write_unlock(&nm_i->nat_tree_lock);
184 }
185 
186 static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
187 			block_t new_blkaddr)
188 {
189 	struct f2fs_nm_info *nm_i = NM_I(sbi);
190 	struct nat_entry *e;
191 retry:
192 	write_lock(&nm_i->nat_tree_lock);
193 	e = __lookup_nat_cache(nm_i, ni->nid);
194 	if (!e) {
195 		e = grab_nat_entry(nm_i, ni->nid);
196 		if (!e) {
197 			write_unlock(&nm_i->nat_tree_lock);
198 			goto retry;
199 		}
200 		e->ni = *ni;
201 		e->checkpointed = true;
202 		BUG_ON(ni->blk_addr == NEW_ADDR);
203 	} else if (new_blkaddr == NEW_ADDR) {
204 		/*
205 		 * when nid is reallocated,
206 		 * previous nat entry can be remained in nat cache.
207 		 * So, reinitialize it with new information.
208 		 */
209 		e->ni = *ni;
210 		BUG_ON(ni->blk_addr != NULL_ADDR);
211 	}
212 
213 	if (new_blkaddr == NEW_ADDR)
214 		e->checkpointed = false;
215 
216 	/* sanity check */
217 	BUG_ON(nat_get_blkaddr(e) != ni->blk_addr);
218 	BUG_ON(nat_get_blkaddr(e) == NULL_ADDR &&
219 			new_blkaddr == NULL_ADDR);
220 	BUG_ON(nat_get_blkaddr(e) == NEW_ADDR &&
221 			new_blkaddr == NEW_ADDR);
222 	BUG_ON(nat_get_blkaddr(e) != NEW_ADDR &&
223 			nat_get_blkaddr(e) != NULL_ADDR &&
224 			new_blkaddr == NEW_ADDR);
225 
226 	/* increament version no as node is removed */
227 	if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) {
228 		unsigned char version = nat_get_version(e);
229 		nat_set_version(e, inc_node_version(version));
230 	}
231 
232 	/* change address */
233 	nat_set_blkaddr(e, new_blkaddr);
234 	__set_nat_cache_dirty(nm_i, e);
235 	write_unlock(&nm_i->nat_tree_lock);
236 }
237 
238 static int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
239 {
240 	struct f2fs_nm_info *nm_i = NM_I(sbi);
241 
242 	if (nm_i->nat_cnt < 2 * NM_WOUT_THRESHOLD)
243 		return 0;
244 
245 	write_lock(&nm_i->nat_tree_lock);
246 	while (nr_shrink && !list_empty(&nm_i->nat_entries)) {
247 		struct nat_entry *ne;
248 		ne = list_first_entry(&nm_i->nat_entries,
249 					struct nat_entry, list);
250 		__del_from_nat_cache(nm_i, ne);
251 		nr_shrink--;
252 	}
253 	write_unlock(&nm_i->nat_tree_lock);
254 	return nr_shrink;
255 }
256 
257 /*
258  * This function returns always success
259  */
260 void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
261 {
262 	struct f2fs_nm_info *nm_i = NM_I(sbi);
263 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
264 	struct f2fs_summary_block *sum = curseg->sum_blk;
265 	nid_t start_nid = START_NID(nid);
266 	struct f2fs_nat_block *nat_blk;
267 	struct page *page = NULL;
268 	struct f2fs_nat_entry ne;
269 	struct nat_entry *e;
270 	int i;
271 
272 	memset(&ne, 0, sizeof(struct f2fs_nat_entry));
273 	ni->nid = nid;
274 
275 	/* Check nat cache */
276 	read_lock(&nm_i->nat_tree_lock);
277 	e = __lookup_nat_cache(nm_i, nid);
278 	if (e) {
279 		ni->ino = nat_get_ino(e);
280 		ni->blk_addr = nat_get_blkaddr(e);
281 		ni->version = nat_get_version(e);
282 	}
283 	read_unlock(&nm_i->nat_tree_lock);
284 	if (e)
285 		return;
286 
287 	/* Check current segment summary */
288 	mutex_lock(&curseg->curseg_mutex);
289 	i = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 0);
290 	if (i >= 0) {
291 		ne = nat_in_journal(sum, i);
292 		node_info_from_raw_nat(ni, &ne);
293 	}
294 	mutex_unlock(&curseg->curseg_mutex);
295 	if (i >= 0)
296 		goto cache;
297 
298 	/* Fill node_info from nat page */
299 	page = get_current_nat_page(sbi, start_nid);
300 	nat_blk = (struct f2fs_nat_block *)page_address(page);
301 	ne = nat_blk->entries[nid - start_nid];
302 	node_info_from_raw_nat(ni, &ne);
303 	f2fs_put_page(page, 1);
304 cache:
305 	/* cache nat entry */
306 	cache_nat_entry(NM_I(sbi), nid, &ne);
307 }
308 
309 /*
310  * The maximum depth is four.
311  * Offset[0] will have raw inode offset.
312  */
313 static int get_node_path(long block, int offset[4], unsigned int noffset[4])
314 {
315 	const long direct_index = ADDRS_PER_INODE;
316 	const long direct_blks = ADDRS_PER_BLOCK;
317 	const long dptrs_per_blk = NIDS_PER_BLOCK;
318 	const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK;
319 	const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK;
320 	int n = 0;
321 	int level = 0;
322 
323 	noffset[0] = 0;
324 
325 	if (block < direct_index) {
326 		offset[n] = block;
327 		goto got;
328 	}
329 	block -= direct_index;
330 	if (block < direct_blks) {
331 		offset[n++] = NODE_DIR1_BLOCK;
332 		noffset[n] = 1;
333 		offset[n] = block;
334 		level = 1;
335 		goto got;
336 	}
337 	block -= direct_blks;
338 	if (block < direct_blks) {
339 		offset[n++] = NODE_DIR2_BLOCK;
340 		noffset[n] = 2;
341 		offset[n] = block;
342 		level = 1;
343 		goto got;
344 	}
345 	block -= direct_blks;
346 	if (block < indirect_blks) {
347 		offset[n++] = NODE_IND1_BLOCK;
348 		noffset[n] = 3;
349 		offset[n++] = block / direct_blks;
350 		noffset[n] = 4 + offset[n - 1];
351 		offset[n] = block % direct_blks;
352 		level = 2;
353 		goto got;
354 	}
355 	block -= indirect_blks;
356 	if (block < indirect_blks) {
357 		offset[n++] = NODE_IND2_BLOCK;
358 		noffset[n] = 4 + dptrs_per_blk;
359 		offset[n++] = block / direct_blks;
360 		noffset[n] = 5 + dptrs_per_blk + offset[n - 1];
361 		offset[n] = block % direct_blks;
362 		level = 2;
363 		goto got;
364 	}
365 	block -= indirect_blks;
366 	if (block < dindirect_blks) {
367 		offset[n++] = NODE_DIND_BLOCK;
368 		noffset[n] = 5 + (dptrs_per_blk * 2);
369 		offset[n++] = block / indirect_blks;
370 		noffset[n] = 6 + (dptrs_per_blk * 2) +
371 			      offset[n - 1] * (dptrs_per_blk + 1);
372 		offset[n++] = (block / direct_blks) % dptrs_per_blk;
373 		noffset[n] = 7 + (dptrs_per_blk * 2) +
374 			      offset[n - 2] * (dptrs_per_blk + 1) +
375 			      offset[n - 1];
376 		offset[n] = block % direct_blks;
377 		level = 3;
378 		goto got;
379 	} else {
380 		BUG();
381 	}
382 got:
383 	return level;
384 }
385 
386 /*
387  * Caller should call f2fs_put_dnode(dn).
388  */
389 int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
390 {
391 	struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
392 	struct page *npage[4];
393 	struct page *parent;
394 	int offset[4];
395 	unsigned int noffset[4];
396 	nid_t nids[4];
397 	int level, i;
398 	int err = 0;
399 
400 	level = get_node_path(index, offset, noffset);
401 
402 	nids[0] = dn->inode->i_ino;
403 	npage[0] = get_node_page(sbi, nids[0]);
404 	if (IS_ERR(npage[0]))
405 		return PTR_ERR(npage[0]);
406 
407 	parent = npage[0];
408 	if (level != 0)
409 		nids[1] = get_nid(parent, offset[0], true);
410 	dn->inode_page = npage[0];
411 	dn->inode_page_locked = true;
412 
413 	/* get indirect or direct nodes */
414 	for (i = 1; i <= level; i++) {
415 		bool done = false;
416 
417 		if (!nids[i] && mode == ALLOC_NODE) {
418 			mutex_lock_op(sbi, NODE_NEW);
419 
420 			/* alloc new node */
421 			if (!alloc_nid(sbi, &(nids[i]))) {
422 				mutex_unlock_op(sbi, NODE_NEW);
423 				err = -ENOSPC;
424 				goto release_pages;
425 			}
426 
427 			dn->nid = nids[i];
428 			npage[i] = new_node_page(dn, noffset[i]);
429 			if (IS_ERR(npage[i])) {
430 				alloc_nid_failed(sbi, nids[i]);
431 				mutex_unlock_op(sbi, NODE_NEW);
432 				err = PTR_ERR(npage[i]);
433 				goto release_pages;
434 			}
435 
436 			set_nid(parent, offset[i - 1], nids[i], i == 1);
437 			alloc_nid_done(sbi, nids[i]);
438 			mutex_unlock_op(sbi, NODE_NEW);
439 			done = true;
440 		} else if (mode == LOOKUP_NODE_RA && i == level && level > 1) {
441 			npage[i] = get_node_page_ra(parent, offset[i - 1]);
442 			if (IS_ERR(npage[i])) {
443 				err = PTR_ERR(npage[i]);
444 				goto release_pages;
445 			}
446 			done = true;
447 		}
448 		if (i == 1) {
449 			dn->inode_page_locked = false;
450 			unlock_page(parent);
451 		} else {
452 			f2fs_put_page(parent, 1);
453 		}
454 
455 		if (!done) {
456 			npage[i] = get_node_page(sbi, nids[i]);
457 			if (IS_ERR(npage[i])) {
458 				err = PTR_ERR(npage[i]);
459 				f2fs_put_page(npage[0], 0);
460 				goto release_out;
461 			}
462 		}
463 		if (i < level) {
464 			parent = npage[i];
465 			nids[i + 1] = get_nid(parent, offset[i], false);
466 		}
467 	}
468 	dn->nid = nids[level];
469 	dn->ofs_in_node = offset[level];
470 	dn->node_page = npage[level];
471 	dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node);
472 	return 0;
473 
474 release_pages:
475 	f2fs_put_page(parent, 1);
476 	if (i > 1)
477 		f2fs_put_page(npage[0], 0);
478 release_out:
479 	dn->inode_page = NULL;
480 	dn->node_page = NULL;
481 	return err;
482 }
483 
484 static void truncate_node(struct dnode_of_data *dn)
485 {
486 	struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
487 	struct node_info ni;
488 
489 	get_node_info(sbi, dn->nid, &ni);
490 	if (dn->inode->i_blocks == 0) {
491 		BUG_ON(ni.blk_addr != NULL_ADDR);
492 		goto invalidate;
493 	}
494 	BUG_ON(ni.blk_addr == NULL_ADDR);
495 
496 	/* Deallocate node address */
497 	invalidate_blocks(sbi, ni.blk_addr);
498 	dec_valid_node_count(sbi, dn->inode, 1);
499 	set_node_addr(sbi, &ni, NULL_ADDR);
500 
501 	if (dn->nid == dn->inode->i_ino) {
502 		remove_orphan_inode(sbi, dn->nid);
503 		dec_valid_inode_count(sbi);
504 	} else {
505 		sync_inode_page(dn);
506 	}
507 invalidate:
508 	clear_node_page_dirty(dn->node_page);
509 	F2FS_SET_SB_DIRT(sbi);
510 
511 	f2fs_put_page(dn->node_page, 1);
512 	dn->node_page = NULL;
513 }
514 
515 static int truncate_dnode(struct dnode_of_data *dn)
516 {
517 	struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
518 	struct page *page;
519 
520 	if (dn->nid == 0)
521 		return 1;
522 
523 	/* get direct node */
524 	page = get_node_page(sbi, dn->nid);
525 	if (IS_ERR(page) && PTR_ERR(page) == -ENOENT)
526 		return 1;
527 	else if (IS_ERR(page))
528 		return PTR_ERR(page);
529 
530 	/* Make dnode_of_data for parameter */
531 	dn->node_page = page;
532 	dn->ofs_in_node = 0;
533 	truncate_data_blocks(dn);
534 	truncate_node(dn);
535 	return 1;
536 }
537 
538 static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
539 						int ofs, int depth)
540 {
541 	struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
542 	struct dnode_of_data rdn = *dn;
543 	struct page *page;
544 	struct f2fs_node *rn;
545 	nid_t child_nid;
546 	unsigned int child_nofs;
547 	int freed = 0;
548 	int i, ret;
549 
550 	if (dn->nid == 0)
551 		return NIDS_PER_BLOCK + 1;
552 
553 	page = get_node_page(sbi, dn->nid);
554 	if (IS_ERR(page))
555 		return PTR_ERR(page);
556 
557 	rn = (struct f2fs_node *)page_address(page);
558 	if (depth < 3) {
559 		for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) {
560 			child_nid = le32_to_cpu(rn->in.nid[i]);
561 			if (child_nid == 0)
562 				continue;
563 			rdn.nid = child_nid;
564 			ret = truncate_dnode(&rdn);
565 			if (ret < 0)
566 				goto out_err;
567 			set_nid(page, i, 0, false);
568 		}
569 	} else {
570 		child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1;
571 		for (i = ofs; i < NIDS_PER_BLOCK; i++) {
572 			child_nid = le32_to_cpu(rn->in.nid[i]);
573 			if (child_nid == 0) {
574 				child_nofs += NIDS_PER_BLOCK + 1;
575 				continue;
576 			}
577 			rdn.nid = child_nid;
578 			ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1);
579 			if (ret == (NIDS_PER_BLOCK + 1)) {
580 				set_nid(page, i, 0, false);
581 				child_nofs += ret;
582 			} else if (ret < 0 && ret != -ENOENT) {
583 				goto out_err;
584 			}
585 		}
586 		freed = child_nofs;
587 	}
588 
589 	if (!ofs) {
590 		/* remove current indirect node */
591 		dn->node_page = page;
592 		truncate_node(dn);
593 		freed++;
594 	} else {
595 		f2fs_put_page(page, 1);
596 	}
597 	return freed;
598 
599 out_err:
600 	f2fs_put_page(page, 1);
601 	return ret;
602 }
603 
604 static int truncate_partial_nodes(struct dnode_of_data *dn,
605 			struct f2fs_inode *ri, int *offset, int depth)
606 {
607 	struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
608 	struct page *pages[2];
609 	nid_t nid[3];
610 	nid_t child_nid;
611 	int err = 0;
612 	int i;
613 	int idx = depth - 2;
614 
615 	nid[0] = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
616 	if (!nid[0])
617 		return 0;
618 
619 	/* get indirect nodes in the path */
620 	for (i = 0; i < depth - 1; i++) {
621 		/* refernece count'll be increased */
622 		pages[i] = get_node_page(sbi, nid[i]);
623 		if (IS_ERR(pages[i])) {
624 			depth = i + 1;
625 			err = PTR_ERR(pages[i]);
626 			goto fail;
627 		}
628 		nid[i + 1] = get_nid(pages[i], offset[i + 1], false);
629 	}
630 
631 	/* free direct nodes linked to a partial indirect node */
632 	for (i = offset[depth - 1]; i < NIDS_PER_BLOCK; i++) {
633 		child_nid = get_nid(pages[idx], i, false);
634 		if (!child_nid)
635 			continue;
636 		dn->nid = child_nid;
637 		err = truncate_dnode(dn);
638 		if (err < 0)
639 			goto fail;
640 		set_nid(pages[idx], i, 0, false);
641 	}
642 
643 	if (offset[depth - 1] == 0) {
644 		dn->node_page = pages[idx];
645 		dn->nid = nid[idx];
646 		truncate_node(dn);
647 	} else {
648 		f2fs_put_page(pages[idx], 1);
649 	}
650 	offset[idx]++;
651 	offset[depth - 1] = 0;
652 fail:
653 	for (i = depth - 3; i >= 0; i--)
654 		f2fs_put_page(pages[i], 1);
655 	return err;
656 }
657 
658 /*
659  * All the block addresses of data and nodes should be nullified.
660  */
661 int truncate_inode_blocks(struct inode *inode, pgoff_t from)
662 {
663 	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
664 	int err = 0, cont = 1;
665 	int level, offset[4], noffset[4];
666 	unsigned int nofs = 0;
667 	struct f2fs_node *rn;
668 	struct dnode_of_data dn;
669 	struct page *page;
670 
671 	level = get_node_path(from, offset, noffset);
672 
673 	page = get_node_page(sbi, inode->i_ino);
674 	if (IS_ERR(page))
675 		return PTR_ERR(page);
676 
677 	set_new_dnode(&dn, inode, page, NULL, 0);
678 	unlock_page(page);
679 
680 	rn = page_address(page);
681 	switch (level) {
682 	case 0:
683 	case 1:
684 		nofs = noffset[1];
685 		break;
686 	case 2:
687 		nofs = noffset[1];
688 		if (!offset[level - 1])
689 			goto skip_partial;
690 		err = truncate_partial_nodes(&dn, &rn->i, offset, level);
691 		if (err < 0 && err != -ENOENT)
692 			goto fail;
693 		nofs += 1 + NIDS_PER_BLOCK;
694 		break;
695 	case 3:
696 		nofs = 5 + 2 * NIDS_PER_BLOCK;
697 		if (!offset[level - 1])
698 			goto skip_partial;
699 		err = truncate_partial_nodes(&dn, &rn->i, offset, level);
700 		if (err < 0 && err != -ENOENT)
701 			goto fail;
702 		break;
703 	default:
704 		BUG();
705 	}
706 
707 skip_partial:
708 	while (cont) {
709 		dn.nid = le32_to_cpu(rn->i.i_nid[offset[0] - NODE_DIR1_BLOCK]);
710 		switch (offset[0]) {
711 		case NODE_DIR1_BLOCK:
712 		case NODE_DIR2_BLOCK:
713 			err = truncate_dnode(&dn);
714 			break;
715 
716 		case NODE_IND1_BLOCK:
717 		case NODE_IND2_BLOCK:
718 			err = truncate_nodes(&dn, nofs, offset[1], 2);
719 			break;
720 
721 		case NODE_DIND_BLOCK:
722 			err = truncate_nodes(&dn, nofs, offset[1], 3);
723 			cont = 0;
724 			break;
725 
726 		default:
727 			BUG();
728 		}
729 		if (err < 0 && err != -ENOENT)
730 			goto fail;
731 		if (offset[1] == 0 &&
732 				rn->i.i_nid[offset[0] - NODE_DIR1_BLOCK]) {
733 			lock_page(page);
734 			wait_on_page_writeback(page);
735 			rn->i.i_nid[offset[0] - NODE_DIR1_BLOCK] = 0;
736 			set_page_dirty(page);
737 			unlock_page(page);
738 		}
739 		offset[1] = 0;
740 		offset[0]++;
741 		nofs += err;
742 	}
743 fail:
744 	f2fs_put_page(page, 0);
745 	return err > 0 ? 0 : err;
746 }
747 
748 int remove_inode_page(struct inode *inode)
749 {
750 	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
751 	struct page *page;
752 	nid_t ino = inode->i_ino;
753 	struct dnode_of_data dn;
754 
755 	mutex_lock_op(sbi, NODE_TRUNC);
756 	page = get_node_page(sbi, ino);
757 	if (IS_ERR(page)) {
758 		mutex_unlock_op(sbi, NODE_TRUNC);
759 		return PTR_ERR(page);
760 	}
761 
762 	if (F2FS_I(inode)->i_xattr_nid) {
763 		nid_t nid = F2FS_I(inode)->i_xattr_nid;
764 		struct page *npage = get_node_page(sbi, nid);
765 
766 		if (IS_ERR(npage)) {
767 			mutex_unlock_op(sbi, NODE_TRUNC);
768 			return PTR_ERR(npage);
769 		}
770 
771 		F2FS_I(inode)->i_xattr_nid = 0;
772 		set_new_dnode(&dn, inode, page, npage, nid);
773 		dn.inode_page_locked = 1;
774 		truncate_node(&dn);
775 	}
776 
777 	/* 0 is possible, after f2fs_new_inode() is failed */
778 	BUG_ON(inode->i_blocks != 0 && inode->i_blocks != 1);
779 	set_new_dnode(&dn, inode, page, page, ino);
780 	truncate_node(&dn);
781 
782 	mutex_unlock_op(sbi, NODE_TRUNC);
783 	return 0;
784 }
785 
786 int new_inode_page(struct inode *inode, const struct qstr *name)
787 {
788 	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
789 	struct page *page;
790 	struct dnode_of_data dn;
791 
792 	/* allocate inode page for new inode */
793 	set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
794 	mutex_lock_op(sbi, NODE_NEW);
795 	page = new_node_page(&dn, 0);
796 	init_dent_inode(name, page);
797 	mutex_unlock_op(sbi, NODE_NEW);
798 	if (IS_ERR(page))
799 		return PTR_ERR(page);
800 	f2fs_put_page(page, 1);
801 	return 0;
802 }
803 
804 struct page *new_node_page(struct dnode_of_data *dn, unsigned int ofs)
805 {
806 	struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
807 	struct address_space *mapping = sbi->node_inode->i_mapping;
808 	struct node_info old_ni, new_ni;
809 	struct page *page;
810 	int err;
811 
812 	if (is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))
813 		return ERR_PTR(-EPERM);
814 
815 	page = grab_cache_page(mapping, dn->nid);
816 	if (!page)
817 		return ERR_PTR(-ENOMEM);
818 
819 	get_node_info(sbi, dn->nid, &old_ni);
820 
821 	SetPageUptodate(page);
822 	fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true);
823 
824 	/* Reinitialize old_ni with new node page */
825 	BUG_ON(old_ni.blk_addr != NULL_ADDR);
826 	new_ni = old_ni;
827 	new_ni.ino = dn->inode->i_ino;
828 
829 	if (!inc_valid_node_count(sbi, dn->inode, 1)) {
830 		err = -ENOSPC;
831 		goto fail;
832 	}
833 	set_node_addr(sbi, &new_ni, NEW_ADDR);
834 	set_cold_node(dn->inode, page);
835 
836 	dn->node_page = page;
837 	sync_inode_page(dn);
838 	set_page_dirty(page);
839 	if (ofs == 0)
840 		inc_valid_inode_count(sbi);
841 
842 	return page;
843 
844 fail:
845 	clear_node_page_dirty(page);
846 	f2fs_put_page(page, 1);
847 	return ERR_PTR(err);
848 }
849 
850 static int read_node_page(struct page *page, int type)
851 {
852 	struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
853 	struct node_info ni;
854 
855 	get_node_info(sbi, page->index, &ni);
856 
857 	if (ni.blk_addr == NULL_ADDR) {
858 		f2fs_put_page(page, 1);
859 		return -ENOENT;
860 	}
861 
862 	if (PageUptodate(page)) {
863 		unlock_page(page);
864 		return 0;
865 	}
866 
867 	return f2fs_readpage(sbi, page, ni.blk_addr, type);
868 }
869 
870 /*
871  * Readahead a node page
872  */
873 void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
874 {
875 	struct address_space *mapping = sbi->node_inode->i_mapping;
876 	struct page *apage;
877 
878 	apage = find_get_page(mapping, nid);
879 	if (apage && PageUptodate(apage)) {
880 		f2fs_put_page(apage, 0);
881 		return;
882 	}
883 	f2fs_put_page(apage, 0);
884 
885 	apage = grab_cache_page(mapping, nid);
886 	if (!apage)
887 		return;
888 
889 	if (read_node_page(apage, READA) == 0)
890 		f2fs_put_page(apage, 0);
891 	return;
892 }
893 
894 struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
895 {
896 	int err;
897 	struct page *page;
898 	struct address_space *mapping = sbi->node_inode->i_mapping;
899 
900 	page = grab_cache_page(mapping, nid);
901 	if (!page)
902 		return ERR_PTR(-ENOMEM);
903 
904 	err = read_node_page(page, READ_SYNC);
905 	if (err)
906 		return ERR_PTR(err);
907 
908 	lock_page(page);
909 	if (!PageUptodate(page)) {
910 		f2fs_put_page(page, 1);
911 		return ERR_PTR(-EIO);
912 	}
913 	BUG_ON(nid != nid_of_node(page));
914 	mark_page_accessed(page);
915 	return page;
916 }
917 
918 /*
919  * Return a locked page for the desired node page.
920  * And, readahead MAX_RA_NODE number of node pages.
921  */
922 struct page *get_node_page_ra(struct page *parent, int start)
923 {
924 	struct f2fs_sb_info *sbi = F2FS_SB(parent->mapping->host->i_sb);
925 	struct address_space *mapping = sbi->node_inode->i_mapping;
926 	int i, end;
927 	int err = 0;
928 	nid_t nid;
929 	struct page *page;
930 
931 	/* First, try getting the desired direct node. */
932 	nid = get_nid(parent, start, false);
933 	if (!nid)
934 		return ERR_PTR(-ENOENT);
935 
936 repeat:
937 	page = grab_cache_page(mapping, nid);
938 	if (!page)
939 		return ERR_PTR(-ENOMEM);
940 	else if (PageUptodate(page))
941 		goto page_hit;
942 
943 	err = read_node_page(page, READ_SYNC);
944 	if (err)
945 		return ERR_PTR(err);
946 
947 	/* Then, try readahead for siblings of the desired node */
948 	end = start + MAX_RA_NODE;
949 	end = min(end, NIDS_PER_BLOCK);
950 	for (i = start + 1; i < end; i++) {
951 		nid = get_nid(parent, i, false);
952 		if (!nid)
953 			continue;
954 		ra_node_page(sbi, nid);
955 	}
956 
957 	lock_page(page);
958 
959 page_hit:
960 	if (PageError(page)) {
961 		f2fs_put_page(page, 1);
962 		return ERR_PTR(-EIO);
963 	}
964 
965 	/* Has the page been truncated? */
966 	if (page->mapping != mapping) {
967 		f2fs_put_page(page, 1);
968 		goto repeat;
969 	}
970 	mark_page_accessed(page);
971 	return page;
972 }
973 
974 void sync_inode_page(struct dnode_of_data *dn)
975 {
976 	if (IS_INODE(dn->node_page) || dn->inode_page == dn->node_page) {
977 		update_inode(dn->inode, dn->node_page);
978 	} else if (dn->inode_page) {
979 		if (!dn->inode_page_locked)
980 			lock_page(dn->inode_page);
981 		update_inode(dn->inode, dn->inode_page);
982 		if (!dn->inode_page_locked)
983 			unlock_page(dn->inode_page);
984 	} else {
985 		f2fs_write_inode(dn->inode, NULL);
986 	}
987 }
988 
989 int sync_node_pages(struct f2fs_sb_info *sbi, nid_t ino,
990 					struct writeback_control *wbc)
991 {
992 	struct address_space *mapping = sbi->node_inode->i_mapping;
993 	pgoff_t index, end;
994 	struct pagevec pvec;
995 	int step = ino ? 2 : 0;
996 	int nwritten = 0, wrote = 0;
997 
998 	pagevec_init(&pvec, 0);
999 
1000 next_step:
1001 	index = 0;
1002 	end = LONG_MAX;
1003 
1004 	while (index <= end) {
1005 		int i, nr_pages;
1006 		nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
1007 				PAGECACHE_TAG_DIRTY,
1008 				min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
1009 		if (nr_pages == 0)
1010 			break;
1011 
1012 		for (i = 0; i < nr_pages; i++) {
1013 			struct page *page = pvec.pages[i];
1014 
1015 			/*
1016 			 * flushing sequence with step:
1017 			 * 0. indirect nodes
1018 			 * 1. dentry dnodes
1019 			 * 2. file dnodes
1020 			 */
1021 			if (step == 0 && IS_DNODE(page))
1022 				continue;
1023 			if (step == 1 && (!IS_DNODE(page) ||
1024 						is_cold_node(page)))
1025 				continue;
1026 			if (step == 2 && (!IS_DNODE(page) ||
1027 						!is_cold_node(page)))
1028 				continue;
1029 
1030 			/*
1031 			 * If an fsync mode,
1032 			 * we should not skip writing node pages.
1033 			 */
1034 			if (ino && ino_of_node(page) == ino)
1035 				lock_page(page);
1036 			else if (!trylock_page(page))
1037 				continue;
1038 
1039 			if (unlikely(page->mapping != mapping)) {
1040 continue_unlock:
1041 				unlock_page(page);
1042 				continue;
1043 			}
1044 			if (ino && ino_of_node(page) != ino)
1045 				goto continue_unlock;
1046 
1047 			if (!PageDirty(page)) {
1048 				/* someone wrote it for us */
1049 				goto continue_unlock;
1050 			}
1051 
1052 			if (!clear_page_dirty_for_io(page))
1053 				goto continue_unlock;
1054 
1055 			/* called by fsync() */
1056 			if (ino && IS_DNODE(page)) {
1057 				int mark = !is_checkpointed_node(sbi, ino);
1058 				set_fsync_mark(page, 1);
1059 				if (IS_INODE(page))
1060 					set_dentry_mark(page, mark);
1061 				nwritten++;
1062 			} else {
1063 				set_fsync_mark(page, 0);
1064 				set_dentry_mark(page, 0);
1065 			}
1066 			mapping->a_ops->writepage(page, wbc);
1067 			wrote++;
1068 
1069 			if (--wbc->nr_to_write == 0)
1070 				break;
1071 		}
1072 		pagevec_release(&pvec);
1073 		cond_resched();
1074 
1075 		if (wbc->nr_to_write == 0) {
1076 			step = 2;
1077 			break;
1078 		}
1079 	}
1080 
1081 	if (step < 2) {
1082 		step++;
1083 		goto next_step;
1084 	}
1085 
1086 	if (wrote)
1087 		f2fs_submit_bio(sbi, NODE, wbc->sync_mode == WB_SYNC_ALL);
1088 
1089 	return nwritten;
1090 }
1091 
1092 static int f2fs_write_node_page(struct page *page,
1093 				struct writeback_control *wbc)
1094 {
1095 	struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
1096 	nid_t nid;
1097 	block_t new_addr;
1098 	struct node_info ni;
1099 
1100 	wait_on_page_writeback(page);
1101 
1102 	mutex_lock_op(sbi, NODE_WRITE);
1103 
1104 	/* get old block addr of this node page */
1105 	nid = nid_of_node(page);
1106 	BUG_ON(page->index != nid);
1107 
1108 	get_node_info(sbi, nid, &ni);
1109 
1110 	/* This page is already truncated */
1111 	if (ni.blk_addr == NULL_ADDR)
1112 		goto out;
1113 
1114 	if (wbc->for_reclaim) {
1115 		dec_page_count(sbi, F2FS_DIRTY_NODES);
1116 		wbc->pages_skipped++;
1117 		set_page_dirty(page);
1118 		mutex_unlock_op(sbi, NODE_WRITE);
1119 		return AOP_WRITEPAGE_ACTIVATE;
1120 	}
1121 
1122 	set_page_writeback(page);
1123 
1124 	/* insert node offset */
1125 	write_node_page(sbi, page, nid, ni.blk_addr, &new_addr);
1126 	set_node_addr(sbi, &ni, new_addr);
1127 out:
1128 	dec_page_count(sbi, F2FS_DIRTY_NODES);
1129 	mutex_unlock_op(sbi, NODE_WRITE);
1130 	unlock_page(page);
1131 	return 0;
1132 }
1133 
1134 /*
1135  * It is very important to gather dirty pages and write at once, so that we can
1136  * submit a big bio without interfering other data writes.
1137  * Be default, 512 pages (2MB), a segment size, is quite reasonable.
1138  */
1139 #define COLLECT_DIRTY_NODES	512
1140 static int f2fs_write_node_pages(struct address_space *mapping,
1141 			    struct writeback_control *wbc)
1142 {
1143 	struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
1144 	struct block_device *bdev = sbi->sb->s_bdev;
1145 	long nr_to_write = wbc->nr_to_write;
1146 
1147 	/* First check balancing cached NAT entries */
1148 	if (try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK)) {
1149 		write_checkpoint(sbi, false);
1150 		return 0;
1151 	}
1152 
1153 	/* collect a number of dirty node pages and write together */
1154 	if (get_pages(sbi, F2FS_DIRTY_NODES) < COLLECT_DIRTY_NODES)
1155 		return 0;
1156 
1157 	/* if mounting is failed, skip writing node pages */
1158 	wbc->nr_to_write = bio_get_nr_vecs(bdev);
1159 	sync_node_pages(sbi, 0, wbc);
1160 	wbc->nr_to_write = nr_to_write -
1161 		(bio_get_nr_vecs(bdev) - wbc->nr_to_write);
1162 	return 0;
1163 }
1164 
1165 static int f2fs_set_node_page_dirty(struct page *page)
1166 {
1167 	struct address_space *mapping = page->mapping;
1168 	struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
1169 
1170 	SetPageUptodate(page);
1171 	if (!PageDirty(page)) {
1172 		__set_page_dirty_nobuffers(page);
1173 		inc_page_count(sbi, F2FS_DIRTY_NODES);
1174 		SetPagePrivate(page);
1175 		return 1;
1176 	}
1177 	return 0;
1178 }
1179 
1180 static void f2fs_invalidate_node_page(struct page *page, unsigned long offset)
1181 {
1182 	struct inode *inode = page->mapping->host;
1183 	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
1184 	if (PageDirty(page))
1185 		dec_page_count(sbi, F2FS_DIRTY_NODES);
1186 	ClearPagePrivate(page);
1187 }
1188 
1189 static int f2fs_release_node_page(struct page *page, gfp_t wait)
1190 {
1191 	ClearPagePrivate(page);
1192 	return 0;
1193 }
1194 
1195 /*
1196  * Structure of the f2fs node operations
1197  */
1198 const struct address_space_operations f2fs_node_aops = {
1199 	.writepage	= f2fs_write_node_page,
1200 	.writepages	= f2fs_write_node_pages,
1201 	.set_page_dirty	= f2fs_set_node_page_dirty,
1202 	.invalidatepage	= f2fs_invalidate_node_page,
1203 	.releasepage	= f2fs_release_node_page,
1204 };
1205 
1206 static struct free_nid *__lookup_free_nid_list(nid_t n, struct list_head *head)
1207 {
1208 	struct list_head *this;
1209 	struct free_nid *i;
1210 	list_for_each(this, head) {
1211 		i = list_entry(this, struct free_nid, list);
1212 		if (i->nid == n)
1213 			return i;
1214 	}
1215 	return NULL;
1216 }
1217 
1218 static void __del_from_free_nid_list(struct free_nid *i)
1219 {
1220 	list_del(&i->list);
1221 	kmem_cache_free(free_nid_slab, i);
1222 }
1223 
1224 static int add_free_nid(struct f2fs_nm_info *nm_i, nid_t nid)
1225 {
1226 	struct free_nid *i;
1227 
1228 	if (nm_i->fcnt > 2 * MAX_FREE_NIDS)
1229 		return 0;
1230 retry:
1231 	i = kmem_cache_alloc(free_nid_slab, GFP_NOFS);
1232 	if (!i) {
1233 		cond_resched();
1234 		goto retry;
1235 	}
1236 	i->nid = nid;
1237 	i->state = NID_NEW;
1238 
1239 	spin_lock(&nm_i->free_nid_list_lock);
1240 	if (__lookup_free_nid_list(nid, &nm_i->free_nid_list)) {
1241 		spin_unlock(&nm_i->free_nid_list_lock);
1242 		kmem_cache_free(free_nid_slab, i);
1243 		return 0;
1244 	}
1245 	list_add_tail(&i->list, &nm_i->free_nid_list);
1246 	nm_i->fcnt++;
1247 	spin_unlock(&nm_i->free_nid_list_lock);
1248 	return 1;
1249 }
1250 
1251 static void remove_free_nid(struct f2fs_nm_info *nm_i, nid_t nid)
1252 {
1253 	struct free_nid *i;
1254 	spin_lock(&nm_i->free_nid_list_lock);
1255 	i = __lookup_free_nid_list(nid, &nm_i->free_nid_list);
1256 	if (i && i->state == NID_NEW) {
1257 		__del_from_free_nid_list(i);
1258 		nm_i->fcnt--;
1259 	}
1260 	spin_unlock(&nm_i->free_nid_list_lock);
1261 }
1262 
1263 static int scan_nat_page(struct f2fs_nm_info *nm_i,
1264 			struct page *nat_page, nid_t start_nid)
1265 {
1266 	struct f2fs_nat_block *nat_blk = page_address(nat_page);
1267 	block_t blk_addr;
1268 	int fcnt = 0;
1269 	int i;
1270 
1271 	/* 0 nid should not be used */
1272 	if (start_nid == 0)
1273 		++start_nid;
1274 
1275 	i = start_nid % NAT_ENTRY_PER_BLOCK;
1276 
1277 	for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) {
1278 		blk_addr  = le32_to_cpu(nat_blk->entries[i].block_addr);
1279 		BUG_ON(blk_addr == NEW_ADDR);
1280 		if (blk_addr == NULL_ADDR)
1281 			fcnt += add_free_nid(nm_i, start_nid);
1282 	}
1283 	return fcnt;
1284 }
1285 
1286 static void build_free_nids(struct f2fs_sb_info *sbi)
1287 {
1288 	struct free_nid *fnid, *next_fnid;
1289 	struct f2fs_nm_info *nm_i = NM_I(sbi);
1290 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1291 	struct f2fs_summary_block *sum = curseg->sum_blk;
1292 	nid_t nid = 0;
1293 	bool is_cycled = false;
1294 	int fcnt = 0;
1295 	int i;
1296 
1297 	nid = nm_i->next_scan_nid;
1298 	nm_i->init_scan_nid = nid;
1299 
1300 	ra_nat_pages(sbi, nid);
1301 
1302 	while (1) {
1303 		struct page *page = get_current_nat_page(sbi, nid);
1304 
1305 		fcnt += scan_nat_page(nm_i, page, nid);
1306 		f2fs_put_page(page, 1);
1307 
1308 		nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK));
1309 
1310 		if (nid >= nm_i->max_nid) {
1311 			nid = 0;
1312 			is_cycled = true;
1313 		}
1314 		if (fcnt > MAX_FREE_NIDS)
1315 			break;
1316 		if (is_cycled && nm_i->init_scan_nid <= nid)
1317 			break;
1318 	}
1319 
1320 	/* go to the next nat page in order to reuse free nids first */
1321 	nm_i->next_scan_nid = nm_i->init_scan_nid + NAT_ENTRY_PER_BLOCK;
1322 
1323 	/* find free nids from current sum_pages */
1324 	mutex_lock(&curseg->curseg_mutex);
1325 	for (i = 0; i < nats_in_cursum(sum); i++) {
1326 		block_t addr = le32_to_cpu(nat_in_journal(sum, i).block_addr);
1327 		nid = le32_to_cpu(nid_in_journal(sum, i));
1328 		if (addr == NULL_ADDR)
1329 			add_free_nid(nm_i, nid);
1330 		else
1331 			remove_free_nid(nm_i, nid);
1332 	}
1333 	mutex_unlock(&curseg->curseg_mutex);
1334 
1335 	/* remove the free nids from current allocated nids */
1336 	list_for_each_entry_safe(fnid, next_fnid, &nm_i->free_nid_list, list) {
1337 		struct nat_entry *ne;
1338 
1339 		read_lock(&nm_i->nat_tree_lock);
1340 		ne = __lookup_nat_cache(nm_i, fnid->nid);
1341 		if (ne && nat_get_blkaddr(ne) != NULL_ADDR)
1342 			remove_free_nid(nm_i, fnid->nid);
1343 		read_unlock(&nm_i->nat_tree_lock);
1344 	}
1345 }
1346 
1347 /*
1348  * If this function returns success, caller can obtain a new nid
1349  * from second parameter of this function.
1350  * The returned nid could be used ino as well as nid when inode is created.
1351  */
1352 bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
1353 {
1354 	struct f2fs_nm_info *nm_i = NM_I(sbi);
1355 	struct free_nid *i = NULL;
1356 	struct list_head *this;
1357 retry:
1358 	mutex_lock(&nm_i->build_lock);
1359 	if (!nm_i->fcnt) {
1360 		/* scan NAT in order to build free nid list */
1361 		build_free_nids(sbi);
1362 		if (!nm_i->fcnt) {
1363 			mutex_unlock(&nm_i->build_lock);
1364 			return false;
1365 		}
1366 	}
1367 	mutex_unlock(&nm_i->build_lock);
1368 
1369 	/*
1370 	 * We check fcnt again since previous check is racy as
1371 	 * we didn't hold free_nid_list_lock. So other thread
1372 	 * could consume all of free nids.
1373 	 */
1374 	spin_lock(&nm_i->free_nid_list_lock);
1375 	if (!nm_i->fcnt) {
1376 		spin_unlock(&nm_i->free_nid_list_lock);
1377 		goto retry;
1378 	}
1379 
1380 	BUG_ON(list_empty(&nm_i->free_nid_list));
1381 	list_for_each(this, &nm_i->free_nid_list) {
1382 		i = list_entry(this, struct free_nid, list);
1383 		if (i->state == NID_NEW)
1384 			break;
1385 	}
1386 
1387 	BUG_ON(i->state != NID_NEW);
1388 	*nid = i->nid;
1389 	i->state = NID_ALLOC;
1390 	nm_i->fcnt--;
1391 	spin_unlock(&nm_i->free_nid_list_lock);
1392 	return true;
1393 }
1394 
1395 /*
1396  * alloc_nid() should be called prior to this function.
1397  */
1398 void alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid)
1399 {
1400 	struct f2fs_nm_info *nm_i = NM_I(sbi);
1401 	struct free_nid *i;
1402 
1403 	spin_lock(&nm_i->free_nid_list_lock);
1404 	i = __lookup_free_nid_list(nid, &nm_i->free_nid_list);
1405 	if (i) {
1406 		BUG_ON(i->state != NID_ALLOC);
1407 		__del_from_free_nid_list(i);
1408 	}
1409 	spin_unlock(&nm_i->free_nid_list_lock);
1410 }
1411 
1412 /*
1413  * alloc_nid() should be called prior to this function.
1414  */
1415 void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
1416 {
1417 	alloc_nid_done(sbi, nid);
1418 	add_free_nid(NM_I(sbi), nid);
1419 }
1420 
1421 void recover_node_page(struct f2fs_sb_info *sbi, struct page *page,
1422 		struct f2fs_summary *sum, struct node_info *ni,
1423 		block_t new_blkaddr)
1424 {
1425 	rewrite_node_page(sbi, page, sum, ni->blk_addr, new_blkaddr);
1426 	set_node_addr(sbi, ni, new_blkaddr);
1427 	clear_node_page_dirty(page);
1428 }
1429 
1430 int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
1431 {
1432 	struct address_space *mapping = sbi->node_inode->i_mapping;
1433 	struct f2fs_node *src, *dst;
1434 	nid_t ino = ino_of_node(page);
1435 	struct node_info old_ni, new_ni;
1436 	struct page *ipage;
1437 
1438 	ipage = grab_cache_page(mapping, ino);
1439 	if (!ipage)
1440 		return -ENOMEM;
1441 
1442 	/* Should not use this inode  from free nid list */
1443 	remove_free_nid(NM_I(sbi), ino);
1444 
1445 	get_node_info(sbi, ino, &old_ni);
1446 	SetPageUptodate(ipage);
1447 	fill_node_footer(ipage, ino, ino, 0, true);
1448 
1449 	src = (struct f2fs_node *)page_address(page);
1450 	dst = (struct f2fs_node *)page_address(ipage);
1451 
1452 	memcpy(dst, src, (unsigned long)&src->i.i_ext - (unsigned long)&src->i);
1453 	dst->i.i_size = 0;
1454 	dst->i.i_blocks = cpu_to_le64(1);
1455 	dst->i.i_links = cpu_to_le32(1);
1456 	dst->i.i_xattr_nid = 0;
1457 
1458 	new_ni = old_ni;
1459 	new_ni.ino = ino;
1460 
1461 	set_node_addr(sbi, &new_ni, NEW_ADDR);
1462 	inc_valid_inode_count(sbi);
1463 
1464 	f2fs_put_page(ipage, 1);
1465 	return 0;
1466 }
1467 
1468 int restore_node_summary(struct f2fs_sb_info *sbi,
1469 			unsigned int segno, struct f2fs_summary_block *sum)
1470 {
1471 	struct f2fs_node *rn;
1472 	struct f2fs_summary *sum_entry;
1473 	struct page *page;
1474 	block_t addr;
1475 	int i, last_offset;
1476 
1477 	/* alloc temporal page for read node */
1478 	page = alloc_page(GFP_NOFS | __GFP_ZERO);
1479 	if (IS_ERR(page))
1480 		return PTR_ERR(page);
1481 	lock_page(page);
1482 
1483 	/* scan the node segment */
1484 	last_offset = sbi->blocks_per_seg;
1485 	addr = START_BLOCK(sbi, segno);
1486 	sum_entry = &sum->entries[0];
1487 
1488 	for (i = 0; i < last_offset; i++, sum_entry++) {
1489 		/*
1490 		 * In order to read next node page,
1491 		 * we must clear PageUptodate flag.
1492 		 */
1493 		ClearPageUptodate(page);
1494 
1495 		if (f2fs_readpage(sbi, page, addr, READ_SYNC))
1496 			goto out;
1497 
1498 		lock_page(page);
1499 		rn = (struct f2fs_node *)page_address(page);
1500 		sum_entry->nid = rn->footer.nid;
1501 		sum_entry->version = 0;
1502 		sum_entry->ofs_in_node = 0;
1503 		addr++;
1504 	}
1505 	unlock_page(page);
1506 out:
1507 	__free_pages(page, 0);
1508 	return 0;
1509 }
1510 
1511 static bool flush_nats_in_journal(struct f2fs_sb_info *sbi)
1512 {
1513 	struct f2fs_nm_info *nm_i = NM_I(sbi);
1514 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1515 	struct f2fs_summary_block *sum = curseg->sum_blk;
1516 	int i;
1517 
1518 	mutex_lock(&curseg->curseg_mutex);
1519 
1520 	if (nats_in_cursum(sum) < NAT_JOURNAL_ENTRIES) {
1521 		mutex_unlock(&curseg->curseg_mutex);
1522 		return false;
1523 	}
1524 
1525 	for (i = 0; i < nats_in_cursum(sum); i++) {
1526 		struct nat_entry *ne;
1527 		struct f2fs_nat_entry raw_ne;
1528 		nid_t nid = le32_to_cpu(nid_in_journal(sum, i));
1529 
1530 		raw_ne = nat_in_journal(sum, i);
1531 retry:
1532 		write_lock(&nm_i->nat_tree_lock);
1533 		ne = __lookup_nat_cache(nm_i, nid);
1534 		if (ne) {
1535 			__set_nat_cache_dirty(nm_i, ne);
1536 			write_unlock(&nm_i->nat_tree_lock);
1537 			continue;
1538 		}
1539 		ne = grab_nat_entry(nm_i, nid);
1540 		if (!ne) {
1541 			write_unlock(&nm_i->nat_tree_lock);
1542 			goto retry;
1543 		}
1544 		nat_set_blkaddr(ne, le32_to_cpu(raw_ne.block_addr));
1545 		nat_set_ino(ne, le32_to_cpu(raw_ne.ino));
1546 		nat_set_version(ne, raw_ne.version);
1547 		__set_nat_cache_dirty(nm_i, ne);
1548 		write_unlock(&nm_i->nat_tree_lock);
1549 	}
1550 	update_nats_in_cursum(sum, -i);
1551 	mutex_unlock(&curseg->curseg_mutex);
1552 	return true;
1553 }
1554 
1555 /*
1556  * This function is called during the checkpointing process.
1557  */
1558 void flush_nat_entries(struct f2fs_sb_info *sbi)
1559 {
1560 	struct f2fs_nm_info *nm_i = NM_I(sbi);
1561 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1562 	struct f2fs_summary_block *sum = curseg->sum_blk;
1563 	struct list_head *cur, *n;
1564 	struct page *page = NULL;
1565 	struct f2fs_nat_block *nat_blk = NULL;
1566 	nid_t start_nid = 0, end_nid = 0;
1567 	bool flushed;
1568 
1569 	flushed = flush_nats_in_journal(sbi);
1570 
1571 	if (!flushed)
1572 		mutex_lock(&curseg->curseg_mutex);
1573 
1574 	/* 1) flush dirty nat caches */
1575 	list_for_each_safe(cur, n, &nm_i->dirty_nat_entries) {
1576 		struct nat_entry *ne;
1577 		nid_t nid;
1578 		struct f2fs_nat_entry raw_ne;
1579 		int offset = -1;
1580 		block_t new_blkaddr;
1581 
1582 		ne = list_entry(cur, struct nat_entry, list);
1583 		nid = nat_get_nid(ne);
1584 
1585 		if (nat_get_blkaddr(ne) == NEW_ADDR)
1586 			continue;
1587 		if (flushed)
1588 			goto to_nat_page;
1589 
1590 		/* if there is room for nat enries in curseg->sumpage */
1591 		offset = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 1);
1592 		if (offset >= 0) {
1593 			raw_ne = nat_in_journal(sum, offset);
1594 			goto flush_now;
1595 		}
1596 to_nat_page:
1597 		if (!page || (start_nid > nid || nid > end_nid)) {
1598 			if (page) {
1599 				f2fs_put_page(page, 1);
1600 				page = NULL;
1601 			}
1602 			start_nid = START_NID(nid);
1603 			end_nid = start_nid + NAT_ENTRY_PER_BLOCK - 1;
1604 
1605 			/*
1606 			 * get nat block with dirty flag, increased reference
1607 			 * count, mapped and lock
1608 			 */
1609 			page = get_next_nat_page(sbi, start_nid);
1610 			nat_blk = page_address(page);
1611 		}
1612 
1613 		BUG_ON(!nat_blk);
1614 		raw_ne = nat_blk->entries[nid - start_nid];
1615 flush_now:
1616 		new_blkaddr = nat_get_blkaddr(ne);
1617 
1618 		raw_ne.ino = cpu_to_le32(nat_get_ino(ne));
1619 		raw_ne.block_addr = cpu_to_le32(new_blkaddr);
1620 		raw_ne.version = nat_get_version(ne);
1621 
1622 		if (offset < 0) {
1623 			nat_blk->entries[nid - start_nid] = raw_ne;
1624 		} else {
1625 			nat_in_journal(sum, offset) = raw_ne;
1626 			nid_in_journal(sum, offset) = cpu_to_le32(nid);
1627 		}
1628 
1629 		if (nat_get_blkaddr(ne) == NULL_ADDR) {
1630 			write_lock(&nm_i->nat_tree_lock);
1631 			__del_from_nat_cache(nm_i, ne);
1632 			write_unlock(&nm_i->nat_tree_lock);
1633 
1634 			/* We can reuse this freed nid at this point */
1635 			add_free_nid(NM_I(sbi), nid);
1636 		} else {
1637 			write_lock(&nm_i->nat_tree_lock);
1638 			__clear_nat_cache_dirty(nm_i, ne);
1639 			ne->checkpointed = true;
1640 			write_unlock(&nm_i->nat_tree_lock);
1641 		}
1642 	}
1643 	if (!flushed)
1644 		mutex_unlock(&curseg->curseg_mutex);
1645 	f2fs_put_page(page, 1);
1646 
1647 	/* 2) shrink nat caches if necessary */
1648 	try_to_free_nats(sbi, nm_i->nat_cnt - NM_WOUT_THRESHOLD);
1649 }
1650 
1651 static int init_node_manager(struct f2fs_sb_info *sbi)
1652 {
1653 	struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi);
1654 	struct f2fs_nm_info *nm_i = NM_I(sbi);
1655 	unsigned char *version_bitmap;
1656 	unsigned int nat_segs, nat_blocks;
1657 
1658 	nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr);
1659 
1660 	/* segment_count_nat includes pair segment so divide to 2. */
1661 	nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1;
1662 	nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg);
1663 	nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks;
1664 	nm_i->fcnt = 0;
1665 	nm_i->nat_cnt = 0;
1666 
1667 	INIT_LIST_HEAD(&nm_i->free_nid_list);
1668 	INIT_RADIX_TREE(&nm_i->nat_root, GFP_ATOMIC);
1669 	INIT_LIST_HEAD(&nm_i->nat_entries);
1670 	INIT_LIST_HEAD(&nm_i->dirty_nat_entries);
1671 
1672 	mutex_init(&nm_i->build_lock);
1673 	spin_lock_init(&nm_i->free_nid_list_lock);
1674 	rwlock_init(&nm_i->nat_tree_lock);
1675 
1676 	nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
1677 	nm_i->init_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
1678 	nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
1679 
1680 	nm_i->nat_bitmap = kzalloc(nm_i->bitmap_size, GFP_KERNEL);
1681 	if (!nm_i->nat_bitmap)
1682 		return -ENOMEM;
1683 	version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP);
1684 	if (!version_bitmap)
1685 		return -EFAULT;
1686 
1687 	/* copy version bitmap */
1688 	memcpy(nm_i->nat_bitmap, version_bitmap, nm_i->bitmap_size);
1689 	return 0;
1690 }
1691 
1692 int build_node_manager(struct f2fs_sb_info *sbi)
1693 {
1694 	int err;
1695 
1696 	sbi->nm_info = kzalloc(sizeof(struct f2fs_nm_info), GFP_KERNEL);
1697 	if (!sbi->nm_info)
1698 		return -ENOMEM;
1699 
1700 	err = init_node_manager(sbi);
1701 	if (err)
1702 		return err;
1703 
1704 	build_free_nids(sbi);
1705 	return 0;
1706 }
1707 
1708 void destroy_node_manager(struct f2fs_sb_info *sbi)
1709 {
1710 	struct f2fs_nm_info *nm_i = NM_I(sbi);
1711 	struct free_nid *i, *next_i;
1712 	struct nat_entry *natvec[NATVEC_SIZE];
1713 	nid_t nid = 0;
1714 	unsigned int found;
1715 
1716 	if (!nm_i)
1717 		return;
1718 
1719 	/* destroy free nid list */
1720 	spin_lock(&nm_i->free_nid_list_lock);
1721 	list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) {
1722 		BUG_ON(i->state == NID_ALLOC);
1723 		__del_from_free_nid_list(i);
1724 		nm_i->fcnt--;
1725 	}
1726 	BUG_ON(nm_i->fcnt);
1727 	spin_unlock(&nm_i->free_nid_list_lock);
1728 
1729 	/* destroy nat cache */
1730 	write_lock(&nm_i->nat_tree_lock);
1731 	while ((found = __gang_lookup_nat_cache(nm_i,
1732 					nid, NATVEC_SIZE, natvec))) {
1733 		unsigned idx;
1734 		for (idx = 0; idx < found; idx++) {
1735 			struct nat_entry *e = natvec[idx];
1736 			nid = nat_get_nid(e) + 1;
1737 			__del_from_nat_cache(nm_i, e);
1738 		}
1739 	}
1740 	BUG_ON(nm_i->nat_cnt);
1741 	write_unlock(&nm_i->nat_tree_lock);
1742 
1743 	kfree(nm_i->nat_bitmap);
1744 	sbi->nm_info = NULL;
1745 	kfree(nm_i);
1746 }
1747 
1748 int __init create_node_manager_caches(void)
1749 {
1750 	nat_entry_slab = f2fs_kmem_cache_create("nat_entry",
1751 			sizeof(struct nat_entry), NULL);
1752 	if (!nat_entry_slab)
1753 		return -ENOMEM;
1754 
1755 	free_nid_slab = f2fs_kmem_cache_create("free_nid",
1756 			sizeof(struct free_nid), NULL);
1757 	if (!free_nid_slab) {
1758 		kmem_cache_destroy(nat_entry_slab);
1759 		return -ENOMEM;
1760 	}
1761 	return 0;
1762 }
1763 
1764 void destroy_node_manager_caches(void)
1765 {
1766 	kmem_cache_destroy(free_nid_slab);
1767 	kmem_cache_destroy(nat_entry_slab);
1768 }
1769