xref: /openbmc/linux/fs/f2fs/node.c (revision 6b66a6f2)
1 /*
2  * fs/f2fs/node.c
3  *
4  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5  *             http://www.samsung.com/
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 #include <linux/fs.h>
12 #include <linux/f2fs_fs.h>
13 #include <linux/mpage.h>
14 #include <linux/backing-dev.h>
15 #include <linux/blkdev.h>
16 #include <linux/pagevec.h>
17 #include <linux/swap.h>
18 
19 #include "f2fs.h"
20 #include "node.h"
21 #include "segment.h"
22 #include "trace.h"
23 #include <trace/events/f2fs.h>
24 
25 #define on_build_free_nids(nmi) mutex_is_locked(&nm_i->build_lock)
26 
27 static struct kmem_cache *nat_entry_slab;
28 static struct kmem_cache *free_nid_slab;
29 static struct kmem_cache *nat_entry_set_slab;
30 
31 bool available_free_memory(struct f2fs_sb_info *sbi, int type)
32 {
33 	struct f2fs_nm_info *nm_i = NM_I(sbi);
34 	struct sysinfo val;
35 	unsigned long avail_ram;
36 	unsigned long mem_size = 0;
37 	bool res = false;
38 
39 	si_meminfo(&val);
40 
41 	/* only uses low memory */
42 	avail_ram = val.totalram - val.totalhigh;
43 
44 	/*
45 	 * give 25%, 25%, 50%, 50%, 50% memory for each components respectively
46 	 */
47 	if (type == FREE_NIDS) {
48 		mem_size = (nm_i->nid_cnt[FREE_NID_LIST] *
49 				sizeof(struct free_nid)) >> PAGE_SHIFT;
50 		res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
51 	} else if (type == NAT_ENTRIES) {
52 		mem_size = (nm_i->nat_cnt * sizeof(struct nat_entry)) >>
53 							PAGE_SHIFT;
54 		res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
55 		if (excess_cached_nats(sbi))
56 			res = false;
57 	} else if (type == DIRTY_DENTS) {
58 		if (sbi->sb->s_bdi->wb.dirty_exceeded)
59 			return false;
60 		mem_size = get_pages(sbi, F2FS_DIRTY_DENTS);
61 		res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
62 	} else if (type == INO_ENTRIES) {
63 		int i;
64 
65 		for (i = 0; i <= UPDATE_INO; i++)
66 			mem_size += (sbi->im[i].ino_num *
67 				sizeof(struct ino_entry)) >> PAGE_SHIFT;
68 		res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
69 	} else if (type == EXTENT_CACHE) {
70 		mem_size = (atomic_read(&sbi->total_ext_tree) *
71 				sizeof(struct extent_tree) +
72 				atomic_read(&sbi->total_ext_node) *
73 				sizeof(struct extent_node)) >> PAGE_SHIFT;
74 		res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
75 	} else {
76 		if (!sbi->sb->s_bdi->wb.dirty_exceeded)
77 			return true;
78 	}
79 	return res;
80 }
81 
82 static void clear_node_page_dirty(struct page *page)
83 {
84 	struct address_space *mapping = page->mapping;
85 	unsigned int long flags;
86 
87 	if (PageDirty(page)) {
88 		spin_lock_irqsave(&mapping->tree_lock, flags);
89 		radix_tree_tag_clear(&mapping->page_tree,
90 				page_index(page),
91 				PAGECACHE_TAG_DIRTY);
92 		spin_unlock_irqrestore(&mapping->tree_lock, flags);
93 
94 		clear_page_dirty_for_io(page);
95 		dec_page_count(F2FS_M_SB(mapping), F2FS_DIRTY_NODES);
96 	}
97 	ClearPageUptodate(page);
98 }
99 
100 static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
101 {
102 	pgoff_t index = current_nat_addr(sbi, nid);
103 	return get_meta_page(sbi, index);
104 }
105 
106 static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
107 {
108 	struct page *src_page;
109 	struct page *dst_page;
110 	pgoff_t src_off;
111 	pgoff_t dst_off;
112 	void *src_addr;
113 	void *dst_addr;
114 	struct f2fs_nm_info *nm_i = NM_I(sbi);
115 
116 	src_off = current_nat_addr(sbi, nid);
117 	dst_off = next_nat_addr(sbi, src_off);
118 
119 	/* get current nat block page with lock */
120 	src_page = get_meta_page(sbi, src_off);
121 	dst_page = grab_meta_page(sbi, dst_off);
122 	f2fs_bug_on(sbi, PageDirty(src_page));
123 
124 	src_addr = page_address(src_page);
125 	dst_addr = page_address(dst_page);
126 	memcpy(dst_addr, src_addr, PAGE_SIZE);
127 	set_page_dirty(dst_page);
128 	f2fs_put_page(src_page, 1);
129 
130 	set_to_next_nat(nm_i, nid);
131 
132 	return dst_page;
133 }
134 
135 static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n)
136 {
137 	return radix_tree_lookup(&nm_i->nat_root, n);
138 }
139 
140 static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i,
141 		nid_t start, unsigned int nr, struct nat_entry **ep)
142 {
143 	return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr);
144 }
145 
146 static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e)
147 {
148 	list_del(&e->list);
149 	radix_tree_delete(&nm_i->nat_root, nat_get_nid(e));
150 	nm_i->nat_cnt--;
151 	kmem_cache_free(nat_entry_slab, e);
152 }
153 
154 static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i,
155 						struct nat_entry *ne)
156 {
157 	nid_t set = NAT_BLOCK_OFFSET(ne->ni.nid);
158 	struct nat_entry_set *head;
159 
160 	if (get_nat_flag(ne, IS_DIRTY))
161 		return;
162 
163 	head = radix_tree_lookup(&nm_i->nat_set_root, set);
164 	if (!head) {
165 		head = f2fs_kmem_cache_alloc(nat_entry_set_slab, GFP_NOFS);
166 
167 		INIT_LIST_HEAD(&head->entry_list);
168 		INIT_LIST_HEAD(&head->set_list);
169 		head->set = set;
170 		head->entry_cnt = 0;
171 		f2fs_radix_tree_insert(&nm_i->nat_set_root, set, head);
172 	}
173 	list_move_tail(&ne->list, &head->entry_list);
174 	nm_i->dirty_nat_cnt++;
175 	head->entry_cnt++;
176 	set_nat_flag(ne, IS_DIRTY, true);
177 }
178 
179 static void __clear_nat_cache_dirty(struct f2fs_nm_info *nm_i,
180 						struct nat_entry *ne)
181 {
182 	nid_t set = NAT_BLOCK_OFFSET(ne->ni.nid);
183 	struct nat_entry_set *head;
184 
185 	head = radix_tree_lookup(&nm_i->nat_set_root, set);
186 	if (head) {
187 		list_move_tail(&ne->list, &nm_i->nat_entries);
188 		set_nat_flag(ne, IS_DIRTY, false);
189 		head->entry_cnt--;
190 		nm_i->dirty_nat_cnt--;
191 	}
192 }
193 
194 static unsigned int __gang_lookup_nat_set(struct f2fs_nm_info *nm_i,
195 		nid_t start, unsigned int nr, struct nat_entry_set **ep)
196 {
197 	return radix_tree_gang_lookup(&nm_i->nat_set_root, (void **)ep,
198 							start, nr);
199 }
200 
201 int need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid)
202 {
203 	struct f2fs_nm_info *nm_i = NM_I(sbi);
204 	struct nat_entry *e;
205 	bool need = false;
206 
207 	down_read(&nm_i->nat_tree_lock);
208 	e = __lookup_nat_cache(nm_i, nid);
209 	if (e) {
210 		if (!get_nat_flag(e, IS_CHECKPOINTED) &&
211 				!get_nat_flag(e, HAS_FSYNCED_INODE))
212 			need = true;
213 	}
214 	up_read(&nm_i->nat_tree_lock);
215 	return need;
216 }
217 
218 bool is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid)
219 {
220 	struct f2fs_nm_info *nm_i = NM_I(sbi);
221 	struct nat_entry *e;
222 	bool is_cp = true;
223 
224 	down_read(&nm_i->nat_tree_lock);
225 	e = __lookup_nat_cache(nm_i, nid);
226 	if (e && !get_nat_flag(e, IS_CHECKPOINTED))
227 		is_cp = false;
228 	up_read(&nm_i->nat_tree_lock);
229 	return is_cp;
230 }
231 
232 bool need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino)
233 {
234 	struct f2fs_nm_info *nm_i = NM_I(sbi);
235 	struct nat_entry *e;
236 	bool need_update = true;
237 
238 	down_read(&nm_i->nat_tree_lock);
239 	e = __lookup_nat_cache(nm_i, ino);
240 	if (e && get_nat_flag(e, HAS_LAST_FSYNC) &&
241 			(get_nat_flag(e, IS_CHECKPOINTED) ||
242 			 get_nat_flag(e, HAS_FSYNCED_INODE)))
243 		need_update = false;
244 	up_read(&nm_i->nat_tree_lock);
245 	return need_update;
246 }
247 
248 static struct nat_entry *grab_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid)
249 {
250 	struct nat_entry *new;
251 
252 	new = f2fs_kmem_cache_alloc(nat_entry_slab, GFP_NOFS);
253 	f2fs_radix_tree_insert(&nm_i->nat_root, nid, new);
254 	memset(new, 0, sizeof(struct nat_entry));
255 	nat_set_nid(new, nid);
256 	nat_reset_flag(new);
257 	list_add_tail(&new->list, &nm_i->nat_entries);
258 	nm_i->nat_cnt++;
259 	return new;
260 }
261 
262 static void cache_nat_entry(struct f2fs_sb_info *sbi, nid_t nid,
263 						struct f2fs_nat_entry *ne)
264 {
265 	struct f2fs_nm_info *nm_i = NM_I(sbi);
266 	struct nat_entry *e;
267 
268 	e = __lookup_nat_cache(nm_i, nid);
269 	if (!e) {
270 		e = grab_nat_entry(nm_i, nid);
271 		node_info_from_raw_nat(&e->ni, ne);
272 	} else {
273 		f2fs_bug_on(sbi, nat_get_ino(e) != le32_to_cpu(ne->ino) ||
274 				nat_get_blkaddr(e) !=
275 					le32_to_cpu(ne->block_addr) ||
276 				nat_get_version(e) != ne->version);
277 	}
278 }
279 
280 static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
281 			block_t new_blkaddr, bool fsync_done)
282 {
283 	struct f2fs_nm_info *nm_i = NM_I(sbi);
284 	struct nat_entry *e;
285 
286 	down_write(&nm_i->nat_tree_lock);
287 	e = __lookup_nat_cache(nm_i, ni->nid);
288 	if (!e) {
289 		e = grab_nat_entry(nm_i, ni->nid);
290 		copy_node_info(&e->ni, ni);
291 		f2fs_bug_on(sbi, ni->blk_addr == NEW_ADDR);
292 	} else if (new_blkaddr == NEW_ADDR) {
293 		/*
294 		 * when nid is reallocated,
295 		 * previous nat entry can be remained in nat cache.
296 		 * So, reinitialize it with new information.
297 		 */
298 		copy_node_info(&e->ni, ni);
299 		f2fs_bug_on(sbi, ni->blk_addr != NULL_ADDR);
300 	}
301 
302 	/* sanity check */
303 	f2fs_bug_on(sbi, nat_get_blkaddr(e) != ni->blk_addr);
304 	f2fs_bug_on(sbi, nat_get_blkaddr(e) == NULL_ADDR &&
305 			new_blkaddr == NULL_ADDR);
306 	f2fs_bug_on(sbi, nat_get_blkaddr(e) == NEW_ADDR &&
307 			new_blkaddr == NEW_ADDR);
308 	f2fs_bug_on(sbi, nat_get_blkaddr(e) != NEW_ADDR &&
309 			nat_get_blkaddr(e) != NULL_ADDR &&
310 			new_blkaddr == NEW_ADDR);
311 
312 	/* increment version no as node is removed */
313 	if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) {
314 		unsigned char version = nat_get_version(e);
315 		nat_set_version(e, inc_node_version(version));
316 
317 		/* in order to reuse the nid */
318 		if (nm_i->next_scan_nid > ni->nid)
319 			nm_i->next_scan_nid = ni->nid;
320 	}
321 
322 	/* change address */
323 	nat_set_blkaddr(e, new_blkaddr);
324 	if (new_blkaddr == NEW_ADDR || new_blkaddr == NULL_ADDR)
325 		set_nat_flag(e, IS_CHECKPOINTED, false);
326 	__set_nat_cache_dirty(nm_i, e);
327 
328 	/* update fsync_mark if its inode nat entry is still alive */
329 	if (ni->nid != ni->ino)
330 		e = __lookup_nat_cache(nm_i, ni->ino);
331 	if (e) {
332 		if (fsync_done && ni->nid == ni->ino)
333 			set_nat_flag(e, HAS_FSYNCED_INODE, true);
334 		set_nat_flag(e, HAS_LAST_FSYNC, fsync_done);
335 	}
336 	up_write(&nm_i->nat_tree_lock);
337 }
338 
339 int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
340 {
341 	struct f2fs_nm_info *nm_i = NM_I(sbi);
342 	int nr = nr_shrink;
343 
344 	if (!down_write_trylock(&nm_i->nat_tree_lock))
345 		return 0;
346 
347 	while (nr_shrink && !list_empty(&nm_i->nat_entries)) {
348 		struct nat_entry *ne;
349 		ne = list_first_entry(&nm_i->nat_entries,
350 					struct nat_entry, list);
351 		__del_from_nat_cache(nm_i, ne);
352 		nr_shrink--;
353 	}
354 	up_write(&nm_i->nat_tree_lock);
355 	return nr - nr_shrink;
356 }
357 
358 /*
359  * This function always returns success
360  */
361 void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
362 {
363 	struct f2fs_nm_info *nm_i = NM_I(sbi);
364 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
365 	struct f2fs_journal *journal = curseg->journal;
366 	nid_t start_nid = START_NID(nid);
367 	struct f2fs_nat_block *nat_blk;
368 	struct page *page = NULL;
369 	struct f2fs_nat_entry ne;
370 	struct nat_entry *e;
371 	int i;
372 
373 	ni->nid = nid;
374 
375 	/* Check nat cache */
376 	down_read(&nm_i->nat_tree_lock);
377 	e = __lookup_nat_cache(nm_i, nid);
378 	if (e) {
379 		ni->ino = nat_get_ino(e);
380 		ni->blk_addr = nat_get_blkaddr(e);
381 		ni->version = nat_get_version(e);
382 		up_read(&nm_i->nat_tree_lock);
383 		return;
384 	}
385 
386 	memset(&ne, 0, sizeof(struct f2fs_nat_entry));
387 
388 	/* Check current segment summary */
389 	down_read(&curseg->journal_rwsem);
390 	i = lookup_journal_in_cursum(journal, NAT_JOURNAL, nid, 0);
391 	if (i >= 0) {
392 		ne = nat_in_journal(journal, i);
393 		node_info_from_raw_nat(ni, &ne);
394 	}
395 	up_read(&curseg->journal_rwsem);
396 	if (i >= 0)
397 		goto cache;
398 
399 	/* Fill node_info from nat page */
400 	page = get_current_nat_page(sbi, start_nid);
401 	nat_blk = (struct f2fs_nat_block *)page_address(page);
402 	ne = nat_blk->entries[nid - start_nid];
403 	node_info_from_raw_nat(ni, &ne);
404 	f2fs_put_page(page, 1);
405 cache:
406 	up_read(&nm_i->nat_tree_lock);
407 	/* cache nat entry */
408 	down_write(&nm_i->nat_tree_lock);
409 	cache_nat_entry(sbi, nid, &ne);
410 	up_write(&nm_i->nat_tree_lock);
411 }
412 
413 /*
414  * readahead MAX_RA_NODE number of node pages.
415  */
416 static void ra_node_pages(struct page *parent, int start, int n)
417 {
418 	struct f2fs_sb_info *sbi = F2FS_P_SB(parent);
419 	struct blk_plug plug;
420 	int i, end;
421 	nid_t nid;
422 
423 	blk_start_plug(&plug);
424 
425 	/* Then, try readahead for siblings of the desired node */
426 	end = start + n;
427 	end = min(end, NIDS_PER_BLOCK);
428 	for (i = start; i < end; i++) {
429 		nid = get_nid(parent, i, false);
430 		ra_node_page(sbi, nid);
431 	}
432 
433 	blk_finish_plug(&plug);
434 }
435 
436 pgoff_t get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs)
437 {
438 	const long direct_index = ADDRS_PER_INODE(dn->inode);
439 	const long direct_blks = ADDRS_PER_BLOCK;
440 	const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK;
441 	unsigned int skipped_unit = ADDRS_PER_BLOCK;
442 	int cur_level = dn->cur_level;
443 	int max_level = dn->max_level;
444 	pgoff_t base = 0;
445 
446 	if (!dn->max_level)
447 		return pgofs + 1;
448 
449 	while (max_level-- > cur_level)
450 		skipped_unit *= NIDS_PER_BLOCK;
451 
452 	switch (dn->max_level) {
453 	case 3:
454 		base += 2 * indirect_blks;
455 	case 2:
456 		base += 2 * direct_blks;
457 	case 1:
458 		base += direct_index;
459 		break;
460 	default:
461 		f2fs_bug_on(F2FS_I_SB(dn->inode), 1);
462 	}
463 
464 	return ((pgofs - base) / skipped_unit + 1) * skipped_unit + base;
465 }
466 
467 /*
468  * The maximum depth is four.
469  * Offset[0] will have raw inode offset.
470  */
471 static int get_node_path(struct inode *inode, long block,
472 				int offset[4], unsigned int noffset[4])
473 {
474 	const long direct_index = ADDRS_PER_INODE(inode);
475 	const long direct_blks = ADDRS_PER_BLOCK;
476 	const long dptrs_per_blk = NIDS_PER_BLOCK;
477 	const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK;
478 	const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK;
479 	int n = 0;
480 	int level = 0;
481 
482 	noffset[0] = 0;
483 
484 	if (block < direct_index) {
485 		offset[n] = block;
486 		goto got;
487 	}
488 	block -= direct_index;
489 	if (block < direct_blks) {
490 		offset[n++] = NODE_DIR1_BLOCK;
491 		noffset[n] = 1;
492 		offset[n] = block;
493 		level = 1;
494 		goto got;
495 	}
496 	block -= direct_blks;
497 	if (block < direct_blks) {
498 		offset[n++] = NODE_DIR2_BLOCK;
499 		noffset[n] = 2;
500 		offset[n] = block;
501 		level = 1;
502 		goto got;
503 	}
504 	block -= direct_blks;
505 	if (block < indirect_blks) {
506 		offset[n++] = NODE_IND1_BLOCK;
507 		noffset[n] = 3;
508 		offset[n++] = block / direct_blks;
509 		noffset[n] = 4 + offset[n - 1];
510 		offset[n] = block % direct_blks;
511 		level = 2;
512 		goto got;
513 	}
514 	block -= indirect_blks;
515 	if (block < indirect_blks) {
516 		offset[n++] = NODE_IND2_BLOCK;
517 		noffset[n] = 4 + dptrs_per_blk;
518 		offset[n++] = block / direct_blks;
519 		noffset[n] = 5 + dptrs_per_blk + offset[n - 1];
520 		offset[n] = block % direct_blks;
521 		level = 2;
522 		goto got;
523 	}
524 	block -= indirect_blks;
525 	if (block < dindirect_blks) {
526 		offset[n++] = NODE_DIND_BLOCK;
527 		noffset[n] = 5 + (dptrs_per_blk * 2);
528 		offset[n++] = block / indirect_blks;
529 		noffset[n] = 6 + (dptrs_per_blk * 2) +
530 			      offset[n - 1] * (dptrs_per_blk + 1);
531 		offset[n++] = (block / direct_blks) % dptrs_per_blk;
532 		noffset[n] = 7 + (dptrs_per_blk * 2) +
533 			      offset[n - 2] * (dptrs_per_blk + 1) +
534 			      offset[n - 1];
535 		offset[n] = block % direct_blks;
536 		level = 3;
537 		goto got;
538 	} else {
539 		BUG();
540 	}
541 got:
542 	return level;
543 }
544 
545 /*
546  * Caller should call f2fs_put_dnode(dn).
547  * Also, it should grab and release a rwsem by calling f2fs_lock_op() and
548  * f2fs_unlock_op() only if ro is not set RDONLY_NODE.
549  * In the case of RDONLY_NODE, we don't need to care about mutex.
550  */
551 int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
552 {
553 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
554 	struct page *npage[4];
555 	struct page *parent = NULL;
556 	int offset[4];
557 	unsigned int noffset[4];
558 	nid_t nids[4];
559 	int level, i = 0;
560 	int err = 0;
561 
562 	level = get_node_path(dn->inode, index, offset, noffset);
563 
564 	nids[0] = dn->inode->i_ino;
565 	npage[0] = dn->inode_page;
566 
567 	if (!npage[0]) {
568 		npage[0] = get_node_page(sbi, nids[0]);
569 		if (IS_ERR(npage[0]))
570 			return PTR_ERR(npage[0]);
571 	}
572 
573 	/* if inline_data is set, should not report any block indices */
574 	if (f2fs_has_inline_data(dn->inode) && index) {
575 		err = -ENOENT;
576 		f2fs_put_page(npage[0], 1);
577 		goto release_out;
578 	}
579 
580 	parent = npage[0];
581 	if (level != 0)
582 		nids[1] = get_nid(parent, offset[0], true);
583 	dn->inode_page = npage[0];
584 	dn->inode_page_locked = true;
585 
586 	/* get indirect or direct nodes */
587 	for (i = 1; i <= level; i++) {
588 		bool done = false;
589 
590 		if (!nids[i] && mode == ALLOC_NODE) {
591 			/* alloc new node */
592 			if (!alloc_nid(sbi, &(nids[i]))) {
593 				err = -ENOSPC;
594 				goto release_pages;
595 			}
596 
597 			dn->nid = nids[i];
598 			npage[i] = new_node_page(dn, noffset[i], NULL);
599 			if (IS_ERR(npage[i])) {
600 				alloc_nid_failed(sbi, nids[i]);
601 				err = PTR_ERR(npage[i]);
602 				goto release_pages;
603 			}
604 
605 			set_nid(parent, offset[i - 1], nids[i], i == 1);
606 			alloc_nid_done(sbi, nids[i]);
607 			done = true;
608 		} else if (mode == LOOKUP_NODE_RA && i == level && level > 1) {
609 			npage[i] = get_node_page_ra(parent, offset[i - 1]);
610 			if (IS_ERR(npage[i])) {
611 				err = PTR_ERR(npage[i]);
612 				goto release_pages;
613 			}
614 			done = true;
615 		}
616 		if (i == 1) {
617 			dn->inode_page_locked = false;
618 			unlock_page(parent);
619 		} else {
620 			f2fs_put_page(parent, 1);
621 		}
622 
623 		if (!done) {
624 			npage[i] = get_node_page(sbi, nids[i]);
625 			if (IS_ERR(npage[i])) {
626 				err = PTR_ERR(npage[i]);
627 				f2fs_put_page(npage[0], 0);
628 				goto release_out;
629 			}
630 		}
631 		if (i < level) {
632 			parent = npage[i];
633 			nids[i + 1] = get_nid(parent, offset[i], false);
634 		}
635 	}
636 	dn->nid = nids[level];
637 	dn->ofs_in_node = offset[level];
638 	dn->node_page = npage[level];
639 	dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node);
640 	return 0;
641 
642 release_pages:
643 	f2fs_put_page(parent, 1);
644 	if (i > 1)
645 		f2fs_put_page(npage[0], 0);
646 release_out:
647 	dn->inode_page = NULL;
648 	dn->node_page = NULL;
649 	if (err == -ENOENT) {
650 		dn->cur_level = i;
651 		dn->max_level = level;
652 		dn->ofs_in_node = offset[level];
653 	}
654 	return err;
655 }
656 
657 static void truncate_node(struct dnode_of_data *dn)
658 {
659 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
660 	struct node_info ni;
661 
662 	get_node_info(sbi, dn->nid, &ni);
663 	if (dn->inode->i_blocks == 0) {
664 		f2fs_bug_on(sbi, ni.blk_addr != NULL_ADDR);
665 		goto invalidate;
666 	}
667 	f2fs_bug_on(sbi, ni.blk_addr == NULL_ADDR);
668 
669 	/* Deallocate node address */
670 	invalidate_blocks(sbi, ni.blk_addr);
671 	dec_valid_node_count(sbi, dn->inode);
672 	set_node_addr(sbi, &ni, NULL_ADDR, false);
673 
674 	if (dn->nid == dn->inode->i_ino) {
675 		remove_orphan_inode(sbi, dn->nid);
676 		dec_valid_inode_count(sbi);
677 		f2fs_inode_synced(dn->inode);
678 	}
679 invalidate:
680 	clear_node_page_dirty(dn->node_page);
681 	set_sbi_flag(sbi, SBI_IS_DIRTY);
682 
683 	f2fs_put_page(dn->node_page, 1);
684 
685 	invalidate_mapping_pages(NODE_MAPPING(sbi),
686 			dn->node_page->index, dn->node_page->index);
687 
688 	dn->node_page = NULL;
689 	trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr);
690 }
691 
692 static int truncate_dnode(struct dnode_of_data *dn)
693 {
694 	struct page *page;
695 
696 	if (dn->nid == 0)
697 		return 1;
698 
699 	/* get direct node */
700 	page = get_node_page(F2FS_I_SB(dn->inode), dn->nid);
701 	if (IS_ERR(page) && PTR_ERR(page) == -ENOENT)
702 		return 1;
703 	else if (IS_ERR(page))
704 		return PTR_ERR(page);
705 
706 	/* Make dnode_of_data for parameter */
707 	dn->node_page = page;
708 	dn->ofs_in_node = 0;
709 	truncate_data_blocks(dn);
710 	truncate_node(dn);
711 	return 1;
712 }
713 
714 static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
715 						int ofs, int depth)
716 {
717 	struct dnode_of_data rdn = *dn;
718 	struct page *page;
719 	struct f2fs_node *rn;
720 	nid_t child_nid;
721 	unsigned int child_nofs;
722 	int freed = 0;
723 	int i, ret;
724 
725 	if (dn->nid == 0)
726 		return NIDS_PER_BLOCK + 1;
727 
728 	trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr);
729 
730 	page = get_node_page(F2FS_I_SB(dn->inode), dn->nid);
731 	if (IS_ERR(page)) {
732 		trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(page));
733 		return PTR_ERR(page);
734 	}
735 
736 	ra_node_pages(page, ofs, NIDS_PER_BLOCK);
737 
738 	rn = F2FS_NODE(page);
739 	if (depth < 3) {
740 		for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) {
741 			child_nid = le32_to_cpu(rn->in.nid[i]);
742 			if (child_nid == 0)
743 				continue;
744 			rdn.nid = child_nid;
745 			ret = truncate_dnode(&rdn);
746 			if (ret < 0)
747 				goto out_err;
748 			if (set_nid(page, i, 0, false))
749 				dn->node_changed = true;
750 		}
751 	} else {
752 		child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1;
753 		for (i = ofs; i < NIDS_PER_BLOCK; i++) {
754 			child_nid = le32_to_cpu(rn->in.nid[i]);
755 			if (child_nid == 0) {
756 				child_nofs += NIDS_PER_BLOCK + 1;
757 				continue;
758 			}
759 			rdn.nid = child_nid;
760 			ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1);
761 			if (ret == (NIDS_PER_BLOCK + 1)) {
762 				if (set_nid(page, i, 0, false))
763 					dn->node_changed = true;
764 				child_nofs += ret;
765 			} else if (ret < 0 && ret != -ENOENT) {
766 				goto out_err;
767 			}
768 		}
769 		freed = child_nofs;
770 	}
771 
772 	if (!ofs) {
773 		/* remove current indirect node */
774 		dn->node_page = page;
775 		truncate_node(dn);
776 		freed++;
777 	} else {
778 		f2fs_put_page(page, 1);
779 	}
780 	trace_f2fs_truncate_nodes_exit(dn->inode, freed);
781 	return freed;
782 
783 out_err:
784 	f2fs_put_page(page, 1);
785 	trace_f2fs_truncate_nodes_exit(dn->inode, ret);
786 	return ret;
787 }
788 
789 static int truncate_partial_nodes(struct dnode_of_data *dn,
790 			struct f2fs_inode *ri, int *offset, int depth)
791 {
792 	struct page *pages[2];
793 	nid_t nid[3];
794 	nid_t child_nid;
795 	int err = 0;
796 	int i;
797 	int idx = depth - 2;
798 
799 	nid[0] = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
800 	if (!nid[0])
801 		return 0;
802 
803 	/* get indirect nodes in the path */
804 	for (i = 0; i < idx + 1; i++) {
805 		/* reference count'll be increased */
806 		pages[i] = get_node_page(F2FS_I_SB(dn->inode), nid[i]);
807 		if (IS_ERR(pages[i])) {
808 			err = PTR_ERR(pages[i]);
809 			idx = i - 1;
810 			goto fail;
811 		}
812 		nid[i + 1] = get_nid(pages[i], offset[i + 1], false);
813 	}
814 
815 	ra_node_pages(pages[idx], offset[idx + 1], NIDS_PER_BLOCK);
816 
817 	/* free direct nodes linked to a partial indirect node */
818 	for (i = offset[idx + 1]; i < NIDS_PER_BLOCK; i++) {
819 		child_nid = get_nid(pages[idx], i, false);
820 		if (!child_nid)
821 			continue;
822 		dn->nid = child_nid;
823 		err = truncate_dnode(dn);
824 		if (err < 0)
825 			goto fail;
826 		if (set_nid(pages[idx], i, 0, false))
827 			dn->node_changed = true;
828 	}
829 
830 	if (offset[idx + 1] == 0) {
831 		dn->node_page = pages[idx];
832 		dn->nid = nid[idx];
833 		truncate_node(dn);
834 	} else {
835 		f2fs_put_page(pages[idx], 1);
836 	}
837 	offset[idx]++;
838 	offset[idx + 1] = 0;
839 	idx--;
840 fail:
841 	for (i = idx; i >= 0; i--)
842 		f2fs_put_page(pages[i], 1);
843 
844 	trace_f2fs_truncate_partial_nodes(dn->inode, nid, depth, err);
845 
846 	return err;
847 }
848 
849 /*
850  * All the block addresses of data and nodes should be nullified.
851  */
852 int truncate_inode_blocks(struct inode *inode, pgoff_t from)
853 {
854 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
855 	int err = 0, cont = 1;
856 	int level, offset[4], noffset[4];
857 	unsigned int nofs = 0;
858 	struct f2fs_inode *ri;
859 	struct dnode_of_data dn;
860 	struct page *page;
861 
862 	trace_f2fs_truncate_inode_blocks_enter(inode, from);
863 
864 	level = get_node_path(inode, from, offset, noffset);
865 
866 	page = get_node_page(sbi, inode->i_ino);
867 	if (IS_ERR(page)) {
868 		trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page));
869 		return PTR_ERR(page);
870 	}
871 
872 	set_new_dnode(&dn, inode, page, NULL, 0);
873 	unlock_page(page);
874 
875 	ri = F2FS_INODE(page);
876 	switch (level) {
877 	case 0:
878 	case 1:
879 		nofs = noffset[1];
880 		break;
881 	case 2:
882 		nofs = noffset[1];
883 		if (!offset[level - 1])
884 			goto skip_partial;
885 		err = truncate_partial_nodes(&dn, ri, offset, level);
886 		if (err < 0 && err != -ENOENT)
887 			goto fail;
888 		nofs += 1 + NIDS_PER_BLOCK;
889 		break;
890 	case 3:
891 		nofs = 5 + 2 * NIDS_PER_BLOCK;
892 		if (!offset[level - 1])
893 			goto skip_partial;
894 		err = truncate_partial_nodes(&dn, ri, offset, level);
895 		if (err < 0 && err != -ENOENT)
896 			goto fail;
897 		break;
898 	default:
899 		BUG();
900 	}
901 
902 skip_partial:
903 	while (cont) {
904 		dn.nid = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
905 		switch (offset[0]) {
906 		case NODE_DIR1_BLOCK:
907 		case NODE_DIR2_BLOCK:
908 			err = truncate_dnode(&dn);
909 			break;
910 
911 		case NODE_IND1_BLOCK:
912 		case NODE_IND2_BLOCK:
913 			err = truncate_nodes(&dn, nofs, offset[1], 2);
914 			break;
915 
916 		case NODE_DIND_BLOCK:
917 			err = truncate_nodes(&dn, nofs, offset[1], 3);
918 			cont = 0;
919 			break;
920 
921 		default:
922 			BUG();
923 		}
924 		if (err < 0 && err != -ENOENT)
925 			goto fail;
926 		if (offset[1] == 0 &&
927 				ri->i_nid[offset[0] - NODE_DIR1_BLOCK]) {
928 			lock_page(page);
929 			BUG_ON(page->mapping != NODE_MAPPING(sbi));
930 			f2fs_wait_on_page_writeback(page, NODE, true);
931 			ri->i_nid[offset[0] - NODE_DIR1_BLOCK] = 0;
932 			set_page_dirty(page);
933 			unlock_page(page);
934 		}
935 		offset[1] = 0;
936 		offset[0]++;
937 		nofs += err;
938 	}
939 fail:
940 	f2fs_put_page(page, 0);
941 	trace_f2fs_truncate_inode_blocks_exit(inode, err);
942 	return err > 0 ? 0 : err;
943 }
944 
945 int truncate_xattr_node(struct inode *inode, struct page *page)
946 {
947 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
948 	nid_t nid = F2FS_I(inode)->i_xattr_nid;
949 	struct dnode_of_data dn;
950 	struct page *npage;
951 
952 	if (!nid)
953 		return 0;
954 
955 	npage = get_node_page(sbi, nid);
956 	if (IS_ERR(npage))
957 		return PTR_ERR(npage);
958 
959 	f2fs_i_xnid_write(inode, 0);
960 
961 	/* need to do checkpoint during fsync */
962 	F2FS_I(inode)->xattr_ver = cur_cp_version(F2FS_CKPT(sbi));
963 
964 	set_new_dnode(&dn, inode, page, npage, nid);
965 
966 	if (page)
967 		dn.inode_page_locked = true;
968 	truncate_node(&dn);
969 	return 0;
970 }
971 
972 /*
973  * Caller should grab and release a rwsem by calling f2fs_lock_op() and
974  * f2fs_unlock_op().
975  */
976 int remove_inode_page(struct inode *inode)
977 {
978 	struct dnode_of_data dn;
979 	int err;
980 
981 	set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
982 	err = get_dnode_of_data(&dn, 0, LOOKUP_NODE);
983 	if (err)
984 		return err;
985 
986 	err = truncate_xattr_node(inode, dn.inode_page);
987 	if (err) {
988 		f2fs_put_dnode(&dn);
989 		return err;
990 	}
991 
992 	/* remove potential inline_data blocks */
993 	if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
994 				S_ISLNK(inode->i_mode))
995 		truncate_data_blocks_range(&dn, 1);
996 
997 	/* 0 is possible, after f2fs_new_inode() has failed */
998 	f2fs_bug_on(F2FS_I_SB(inode),
999 			inode->i_blocks != 0 && inode->i_blocks != 1);
1000 
1001 	/* will put inode & node pages */
1002 	truncate_node(&dn);
1003 	return 0;
1004 }
1005 
1006 struct page *new_inode_page(struct inode *inode)
1007 {
1008 	struct dnode_of_data dn;
1009 
1010 	/* allocate inode page for new inode */
1011 	set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
1012 
1013 	/* caller should f2fs_put_page(page, 1); */
1014 	return new_node_page(&dn, 0, NULL);
1015 }
1016 
1017 struct page *new_node_page(struct dnode_of_data *dn,
1018 				unsigned int ofs, struct page *ipage)
1019 {
1020 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1021 	struct node_info old_ni, new_ni;
1022 	struct page *page;
1023 	int err;
1024 
1025 	if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
1026 		return ERR_PTR(-EPERM);
1027 
1028 	page = f2fs_grab_cache_page(NODE_MAPPING(sbi), dn->nid, false);
1029 	if (!page)
1030 		return ERR_PTR(-ENOMEM);
1031 
1032 	if (unlikely(!inc_valid_node_count(sbi, dn->inode))) {
1033 		err = -ENOSPC;
1034 		goto fail;
1035 	}
1036 
1037 	get_node_info(sbi, dn->nid, &old_ni);
1038 
1039 	/* Reinitialize old_ni with new node page */
1040 	f2fs_bug_on(sbi, old_ni.blk_addr != NULL_ADDR);
1041 	new_ni = old_ni;
1042 	new_ni.ino = dn->inode->i_ino;
1043 	set_node_addr(sbi, &new_ni, NEW_ADDR, false);
1044 
1045 	f2fs_wait_on_page_writeback(page, NODE, true);
1046 	fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true);
1047 	set_cold_node(dn->inode, page);
1048 	if (!PageUptodate(page))
1049 		SetPageUptodate(page);
1050 	if (set_page_dirty(page))
1051 		dn->node_changed = true;
1052 
1053 	if (f2fs_has_xattr_block(ofs))
1054 		f2fs_i_xnid_write(dn->inode, dn->nid);
1055 
1056 	if (ofs == 0)
1057 		inc_valid_inode_count(sbi);
1058 	return page;
1059 
1060 fail:
1061 	clear_node_page_dirty(page);
1062 	f2fs_put_page(page, 1);
1063 	return ERR_PTR(err);
1064 }
1065 
1066 /*
1067  * Caller should do after getting the following values.
1068  * 0: f2fs_put_page(page, 0)
1069  * LOCKED_PAGE or error: f2fs_put_page(page, 1)
1070  */
1071 static int read_node_page(struct page *page, int op_flags)
1072 {
1073 	struct f2fs_sb_info *sbi = F2FS_P_SB(page);
1074 	struct node_info ni;
1075 	struct f2fs_io_info fio = {
1076 		.sbi = sbi,
1077 		.type = NODE,
1078 		.op = REQ_OP_READ,
1079 		.op_flags = op_flags,
1080 		.page = page,
1081 		.encrypted_page = NULL,
1082 	};
1083 
1084 	if (PageUptodate(page))
1085 		return LOCKED_PAGE;
1086 
1087 	get_node_info(sbi, page->index, &ni);
1088 
1089 	if (unlikely(ni.blk_addr == NULL_ADDR)) {
1090 		ClearPageUptodate(page);
1091 		return -ENOENT;
1092 	}
1093 
1094 	fio.new_blkaddr = fio.old_blkaddr = ni.blk_addr;
1095 	return f2fs_submit_page_bio(&fio);
1096 }
1097 
1098 /*
1099  * Readahead a node page
1100  */
1101 void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
1102 {
1103 	struct page *apage;
1104 	int err;
1105 
1106 	if (!nid)
1107 		return;
1108 	f2fs_bug_on(sbi, check_nid_range(sbi, nid));
1109 
1110 	rcu_read_lock();
1111 	apage = radix_tree_lookup(&NODE_MAPPING(sbi)->page_tree, nid);
1112 	rcu_read_unlock();
1113 	if (apage)
1114 		return;
1115 
1116 	apage = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false);
1117 	if (!apage)
1118 		return;
1119 
1120 	err = read_node_page(apage, REQ_RAHEAD);
1121 	f2fs_put_page(apage, err ? 1 : 0);
1122 }
1123 
1124 static struct page *__get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid,
1125 					struct page *parent, int start)
1126 {
1127 	struct page *page;
1128 	int err;
1129 
1130 	if (!nid)
1131 		return ERR_PTR(-ENOENT);
1132 	f2fs_bug_on(sbi, check_nid_range(sbi, nid));
1133 repeat:
1134 	page = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false);
1135 	if (!page)
1136 		return ERR_PTR(-ENOMEM);
1137 
1138 	err = read_node_page(page, 0);
1139 	if (err < 0) {
1140 		f2fs_put_page(page, 1);
1141 		return ERR_PTR(err);
1142 	} else if (err == LOCKED_PAGE) {
1143 		goto page_hit;
1144 	}
1145 
1146 	if (parent)
1147 		ra_node_pages(parent, start + 1, MAX_RA_NODE);
1148 
1149 	lock_page(page);
1150 
1151 	if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1152 		f2fs_put_page(page, 1);
1153 		goto repeat;
1154 	}
1155 
1156 	if (unlikely(!PageUptodate(page)))
1157 		goto out_err;
1158 page_hit:
1159 	if(unlikely(nid != nid_of_node(page))) {
1160 		f2fs_bug_on(sbi, 1);
1161 		ClearPageUptodate(page);
1162 out_err:
1163 		f2fs_put_page(page, 1);
1164 		return ERR_PTR(-EIO);
1165 	}
1166 	return page;
1167 }
1168 
1169 struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
1170 {
1171 	return __get_node_page(sbi, nid, NULL, 0);
1172 }
1173 
1174 struct page *get_node_page_ra(struct page *parent, int start)
1175 {
1176 	struct f2fs_sb_info *sbi = F2FS_P_SB(parent);
1177 	nid_t nid = get_nid(parent, start, false);
1178 
1179 	return __get_node_page(sbi, nid, parent, start);
1180 }
1181 
1182 static void flush_inline_data(struct f2fs_sb_info *sbi, nid_t ino)
1183 {
1184 	struct inode *inode;
1185 	struct page *page;
1186 	int ret;
1187 
1188 	/* should flush inline_data before evict_inode */
1189 	inode = ilookup(sbi->sb, ino);
1190 	if (!inode)
1191 		return;
1192 
1193 	page = pagecache_get_page(inode->i_mapping, 0, FGP_LOCK|FGP_NOWAIT, 0);
1194 	if (!page)
1195 		goto iput_out;
1196 
1197 	if (!PageUptodate(page))
1198 		goto page_out;
1199 
1200 	if (!PageDirty(page))
1201 		goto page_out;
1202 
1203 	if (!clear_page_dirty_for_io(page))
1204 		goto page_out;
1205 
1206 	ret = f2fs_write_inline_data(inode, page);
1207 	inode_dec_dirty_pages(inode);
1208 	remove_dirty_inode(inode);
1209 	if (ret)
1210 		set_page_dirty(page);
1211 page_out:
1212 	f2fs_put_page(page, 1);
1213 iput_out:
1214 	iput(inode);
1215 }
1216 
1217 void move_node_page(struct page *node_page, int gc_type)
1218 {
1219 	if (gc_type == FG_GC) {
1220 		struct f2fs_sb_info *sbi = F2FS_P_SB(node_page);
1221 		struct writeback_control wbc = {
1222 			.sync_mode = WB_SYNC_ALL,
1223 			.nr_to_write = 1,
1224 			.for_reclaim = 0,
1225 		};
1226 
1227 		set_page_dirty(node_page);
1228 		f2fs_wait_on_page_writeback(node_page, NODE, true);
1229 
1230 		f2fs_bug_on(sbi, PageWriteback(node_page));
1231 		if (!clear_page_dirty_for_io(node_page))
1232 			goto out_page;
1233 
1234 		if (NODE_MAPPING(sbi)->a_ops->writepage(node_page, &wbc))
1235 			unlock_page(node_page);
1236 		goto release_page;
1237 	} else {
1238 		/* set page dirty and write it */
1239 		if (!PageWriteback(node_page))
1240 			set_page_dirty(node_page);
1241 	}
1242 out_page:
1243 	unlock_page(node_page);
1244 release_page:
1245 	f2fs_put_page(node_page, 0);
1246 }
1247 
1248 static struct page *last_fsync_dnode(struct f2fs_sb_info *sbi, nid_t ino)
1249 {
1250 	pgoff_t index, end;
1251 	struct pagevec pvec;
1252 	struct page *last_page = NULL;
1253 
1254 	pagevec_init(&pvec, 0);
1255 	index = 0;
1256 	end = ULONG_MAX;
1257 
1258 	while (index <= end) {
1259 		int i, nr_pages;
1260 		nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
1261 				PAGECACHE_TAG_DIRTY,
1262 				min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
1263 		if (nr_pages == 0)
1264 			break;
1265 
1266 		for (i = 0; i < nr_pages; i++) {
1267 			struct page *page = pvec.pages[i];
1268 
1269 			if (unlikely(f2fs_cp_error(sbi))) {
1270 				f2fs_put_page(last_page, 0);
1271 				pagevec_release(&pvec);
1272 				return ERR_PTR(-EIO);
1273 			}
1274 
1275 			if (!IS_DNODE(page) || !is_cold_node(page))
1276 				continue;
1277 			if (ino_of_node(page) != ino)
1278 				continue;
1279 
1280 			lock_page(page);
1281 
1282 			if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1283 continue_unlock:
1284 				unlock_page(page);
1285 				continue;
1286 			}
1287 			if (ino_of_node(page) != ino)
1288 				goto continue_unlock;
1289 
1290 			if (!PageDirty(page)) {
1291 				/* someone wrote it for us */
1292 				goto continue_unlock;
1293 			}
1294 
1295 			if (last_page)
1296 				f2fs_put_page(last_page, 0);
1297 
1298 			get_page(page);
1299 			last_page = page;
1300 			unlock_page(page);
1301 		}
1302 		pagevec_release(&pvec);
1303 		cond_resched();
1304 	}
1305 	return last_page;
1306 }
1307 
1308 int fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
1309 			struct writeback_control *wbc, bool atomic)
1310 {
1311 	pgoff_t index, end;
1312 	struct pagevec pvec;
1313 	int ret = 0;
1314 	struct page *last_page = NULL;
1315 	bool marked = false;
1316 	nid_t ino = inode->i_ino;
1317 	int nwritten = 0;
1318 
1319 	if (atomic) {
1320 		last_page = last_fsync_dnode(sbi, ino);
1321 		if (IS_ERR_OR_NULL(last_page))
1322 			return PTR_ERR_OR_ZERO(last_page);
1323 	}
1324 retry:
1325 	pagevec_init(&pvec, 0);
1326 	index = 0;
1327 	end = ULONG_MAX;
1328 
1329 	while (index <= end) {
1330 		int i, nr_pages;
1331 		nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
1332 				PAGECACHE_TAG_DIRTY,
1333 				min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
1334 		if (nr_pages == 0)
1335 			break;
1336 
1337 		for (i = 0; i < nr_pages; i++) {
1338 			struct page *page = pvec.pages[i];
1339 
1340 			if (unlikely(f2fs_cp_error(sbi))) {
1341 				f2fs_put_page(last_page, 0);
1342 				pagevec_release(&pvec);
1343 				ret = -EIO;
1344 				goto out;
1345 			}
1346 
1347 			if (!IS_DNODE(page) || !is_cold_node(page))
1348 				continue;
1349 			if (ino_of_node(page) != ino)
1350 				continue;
1351 
1352 			lock_page(page);
1353 
1354 			if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1355 continue_unlock:
1356 				unlock_page(page);
1357 				continue;
1358 			}
1359 			if (ino_of_node(page) != ino)
1360 				goto continue_unlock;
1361 
1362 			if (!PageDirty(page) && page != last_page) {
1363 				/* someone wrote it for us */
1364 				goto continue_unlock;
1365 			}
1366 
1367 			f2fs_wait_on_page_writeback(page, NODE, true);
1368 			BUG_ON(PageWriteback(page));
1369 
1370 			if (!atomic || page == last_page) {
1371 				set_fsync_mark(page, 1);
1372 				if (IS_INODE(page)) {
1373 					if (is_inode_flag_set(inode,
1374 								FI_DIRTY_INODE))
1375 						update_inode(inode, page);
1376 					set_dentry_mark(page,
1377 						need_dentry_mark(sbi, ino));
1378 				}
1379 				/*  may be written by other thread */
1380 				if (!PageDirty(page))
1381 					set_page_dirty(page);
1382 			}
1383 
1384 			if (!clear_page_dirty_for_io(page))
1385 				goto continue_unlock;
1386 
1387 			ret = NODE_MAPPING(sbi)->a_ops->writepage(page, wbc);
1388 			if (ret) {
1389 				unlock_page(page);
1390 				f2fs_put_page(last_page, 0);
1391 				break;
1392 			} else {
1393 				nwritten++;
1394 			}
1395 
1396 			if (page == last_page) {
1397 				f2fs_put_page(page, 0);
1398 				marked = true;
1399 				break;
1400 			}
1401 		}
1402 		pagevec_release(&pvec);
1403 		cond_resched();
1404 
1405 		if (ret || marked)
1406 			break;
1407 	}
1408 	if (!ret && atomic && !marked) {
1409 		f2fs_msg(sbi->sb, KERN_DEBUG,
1410 			"Retry to write fsync mark: ino=%u, idx=%lx",
1411 					ino, last_page->index);
1412 		lock_page(last_page);
1413 		f2fs_wait_on_page_writeback(last_page, NODE, true);
1414 		set_page_dirty(last_page);
1415 		unlock_page(last_page);
1416 		goto retry;
1417 	}
1418 out:
1419 	if (nwritten)
1420 		f2fs_submit_merged_bio_cond(sbi, NULL, NULL, ino, NODE, WRITE);
1421 	return ret ? -EIO: 0;
1422 }
1423 
1424 int sync_node_pages(struct f2fs_sb_info *sbi, struct writeback_control *wbc)
1425 {
1426 	pgoff_t index, end;
1427 	struct pagevec pvec;
1428 	int step = 0;
1429 	int nwritten = 0;
1430 	int ret = 0;
1431 
1432 	pagevec_init(&pvec, 0);
1433 
1434 next_step:
1435 	index = 0;
1436 	end = ULONG_MAX;
1437 
1438 	while (index <= end) {
1439 		int i, nr_pages;
1440 		nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
1441 				PAGECACHE_TAG_DIRTY,
1442 				min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
1443 		if (nr_pages == 0)
1444 			break;
1445 
1446 		for (i = 0; i < nr_pages; i++) {
1447 			struct page *page = pvec.pages[i];
1448 
1449 			if (unlikely(f2fs_cp_error(sbi))) {
1450 				pagevec_release(&pvec);
1451 				ret = -EIO;
1452 				goto out;
1453 			}
1454 
1455 			/*
1456 			 * flushing sequence with step:
1457 			 * 0. indirect nodes
1458 			 * 1. dentry dnodes
1459 			 * 2. file dnodes
1460 			 */
1461 			if (step == 0 && IS_DNODE(page))
1462 				continue;
1463 			if (step == 1 && (!IS_DNODE(page) ||
1464 						is_cold_node(page)))
1465 				continue;
1466 			if (step == 2 && (!IS_DNODE(page) ||
1467 						!is_cold_node(page)))
1468 				continue;
1469 lock_node:
1470 			if (!trylock_page(page))
1471 				continue;
1472 
1473 			if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1474 continue_unlock:
1475 				unlock_page(page);
1476 				continue;
1477 			}
1478 
1479 			if (!PageDirty(page)) {
1480 				/* someone wrote it for us */
1481 				goto continue_unlock;
1482 			}
1483 
1484 			/* flush inline_data */
1485 			if (is_inline_node(page)) {
1486 				clear_inline_node(page);
1487 				unlock_page(page);
1488 				flush_inline_data(sbi, ino_of_node(page));
1489 				goto lock_node;
1490 			}
1491 
1492 			f2fs_wait_on_page_writeback(page, NODE, true);
1493 
1494 			BUG_ON(PageWriteback(page));
1495 			if (!clear_page_dirty_for_io(page))
1496 				goto continue_unlock;
1497 
1498 			set_fsync_mark(page, 0);
1499 			set_dentry_mark(page, 0);
1500 
1501 			if (NODE_MAPPING(sbi)->a_ops->writepage(page, wbc))
1502 				unlock_page(page);
1503 			else
1504 				nwritten++;
1505 
1506 			if (--wbc->nr_to_write == 0)
1507 				break;
1508 		}
1509 		pagevec_release(&pvec);
1510 		cond_resched();
1511 
1512 		if (wbc->nr_to_write == 0) {
1513 			step = 2;
1514 			break;
1515 		}
1516 	}
1517 
1518 	if (step < 2) {
1519 		step++;
1520 		goto next_step;
1521 	}
1522 out:
1523 	if (nwritten)
1524 		f2fs_submit_merged_bio(sbi, NODE, WRITE);
1525 	return ret;
1526 }
1527 
1528 int wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, nid_t ino)
1529 {
1530 	pgoff_t index = 0, end = ULONG_MAX;
1531 	struct pagevec pvec;
1532 	int ret2, ret = 0;
1533 
1534 	pagevec_init(&pvec, 0);
1535 
1536 	while (index <= end) {
1537 		int i, nr_pages;
1538 		nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
1539 				PAGECACHE_TAG_WRITEBACK,
1540 				min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
1541 		if (nr_pages == 0)
1542 			break;
1543 
1544 		for (i = 0; i < nr_pages; i++) {
1545 			struct page *page = pvec.pages[i];
1546 
1547 			/* until radix tree lookup accepts end_index */
1548 			if (unlikely(page->index > end))
1549 				continue;
1550 
1551 			if (ino && ino_of_node(page) == ino) {
1552 				f2fs_wait_on_page_writeback(page, NODE, true);
1553 				if (TestClearPageError(page))
1554 					ret = -EIO;
1555 			}
1556 		}
1557 		pagevec_release(&pvec);
1558 		cond_resched();
1559 	}
1560 
1561 	ret2 = filemap_check_errors(NODE_MAPPING(sbi));
1562 	if (!ret)
1563 		ret = ret2;
1564 	return ret;
1565 }
1566 
1567 static int f2fs_write_node_page(struct page *page,
1568 				struct writeback_control *wbc)
1569 {
1570 	struct f2fs_sb_info *sbi = F2FS_P_SB(page);
1571 	nid_t nid;
1572 	struct node_info ni;
1573 	struct f2fs_io_info fio = {
1574 		.sbi = sbi,
1575 		.type = NODE,
1576 		.op = REQ_OP_WRITE,
1577 		.op_flags = wbc_to_write_flags(wbc),
1578 		.page = page,
1579 		.encrypted_page = NULL,
1580 	};
1581 
1582 	trace_f2fs_writepage(page, NODE);
1583 
1584 	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1585 		goto redirty_out;
1586 	if (unlikely(f2fs_cp_error(sbi)))
1587 		goto redirty_out;
1588 
1589 	/* get old block addr of this node page */
1590 	nid = nid_of_node(page);
1591 	f2fs_bug_on(sbi, page->index != nid);
1592 
1593 	if (wbc->for_reclaim) {
1594 		if (!down_read_trylock(&sbi->node_write))
1595 			goto redirty_out;
1596 	} else {
1597 		down_read(&sbi->node_write);
1598 	}
1599 
1600 	get_node_info(sbi, nid, &ni);
1601 
1602 	/* This page is already truncated */
1603 	if (unlikely(ni.blk_addr == NULL_ADDR)) {
1604 		ClearPageUptodate(page);
1605 		dec_page_count(sbi, F2FS_DIRTY_NODES);
1606 		up_read(&sbi->node_write);
1607 		unlock_page(page);
1608 		return 0;
1609 	}
1610 
1611 	set_page_writeback(page);
1612 	fio.old_blkaddr = ni.blk_addr;
1613 	write_node_page(nid, &fio);
1614 	set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(page));
1615 	dec_page_count(sbi, F2FS_DIRTY_NODES);
1616 	up_read(&sbi->node_write);
1617 
1618 	if (wbc->for_reclaim)
1619 		f2fs_submit_merged_bio_cond(sbi, NULL, page, 0, NODE, WRITE);
1620 
1621 	unlock_page(page);
1622 
1623 	if (unlikely(f2fs_cp_error(sbi)))
1624 		f2fs_submit_merged_bio(sbi, NODE, WRITE);
1625 
1626 	return 0;
1627 
1628 redirty_out:
1629 	redirty_page_for_writepage(wbc, page);
1630 	return AOP_WRITEPAGE_ACTIVATE;
1631 }
1632 
1633 static int f2fs_write_node_pages(struct address_space *mapping,
1634 			    struct writeback_control *wbc)
1635 {
1636 	struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
1637 	struct blk_plug plug;
1638 	long diff;
1639 
1640 	/* balancing f2fs's metadata in background */
1641 	f2fs_balance_fs_bg(sbi);
1642 
1643 	/* collect a number of dirty node pages and write together */
1644 	if (get_pages(sbi, F2FS_DIRTY_NODES) < nr_pages_to_skip(sbi, NODE))
1645 		goto skip_write;
1646 
1647 	trace_f2fs_writepages(mapping->host, wbc, NODE);
1648 
1649 	diff = nr_pages_to_write(sbi, NODE, wbc);
1650 	wbc->sync_mode = WB_SYNC_NONE;
1651 	blk_start_plug(&plug);
1652 	sync_node_pages(sbi, wbc);
1653 	blk_finish_plug(&plug);
1654 	wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
1655 	return 0;
1656 
1657 skip_write:
1658 	wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_NODES);
1659 	trace_f2fs_writepages(mapping->host, wbc, NODE);
1660 	return 0;
1661 }
1662 
1663 static int f2fs_set_node_page_dirty(struct page *page)
1664 {
1665 	trace_f2fs_set_page_dirty(page, NODE);
1666 
1667 	if (!PageUptodate(page))
1668 		SetPageUptodate(page);
1669 	if (!PageDirty(page)) {
1670 		f2fs_set_page_dirty_nobuffers(page);
1671 		inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES);
1672 		SetPagePrivate(page);
1673 		f2fs_trace_pid(page);
1674 		return 1;
1675 	}
1676 	return 0;
1677 }
1678 
1679 /*
1680  * Structure of the f2fs node operations
1681  */
1682 const struct address_space_operations f2fs_node_aops = {
1683 	.writepage	= f2fs_write_node_page,
1684 	.writepages	= f2fs_write_node_pages,
1685 	.set_page_dirty	= f2fs_set_node_page_dirty,
1686 	.invalidatepage	= f2fs_invalidate_page,
1687 	.releasepage	= f2fs_release_page,
1688 #ifdef CONFIG_MIGRATION
1689 	.migratepage    = f2fs_migrate_page,
1690 #endif
1691 };
1692 
1693 static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i,
1694 						nid_t n)
1695 {
1696 	return radix_tree_lookup(&nm_i->free_nid_root, n);
1697 }
1698 
1699 static int __insert_nid_to_list(struct f2fs_sb_info *sbi,
1700 			struct free_nid *i, enum nid_list list, bool new)
1701 {
1702 	struct f2fs_nm_info *nm_i = NM_I(sbi);
1703 
1704 	if (new) {
1705 		int err = radix_tree_insert(&nm_i->free_nid_root, i->nid, i);
1706 		if (err)
1707 			return err;
1708 	}
1709 
1710 	f2fs_bug_on(sbi, list == FREE_NID_LIST ? i->state != NID_NEW :
1711 						i->state != NID_ALLOC);
1712 	nm_i->nid_cnt[list]++;
1713 	list_add_tail(&i->list, &nm_i->nid_list[list]);
1714 	return 0;
1715 }
1716 
1717 static void __remove_nid_from_list(struct f2fs_sb_info *sbi,
1718 			struct free_nid *i, enum nid_list list, bool reuse)
1719 {
1720 	struct f2fs_nm_info *nm_i = NM_I(sbi);
1721 
1722 	f2fs_bug_on(sbi, list == FREE_NID_LIST ? i->state != NID_NEW :
1723 						i->state != NID_ALLOC);
1724 	nm_i->nid_cnt[list]--;
1725 	list_del(&i->list);
1726 	if (!reuse)
1727 		radix_tree_delete(&nm_i->free_nid_root, i->nid);
1728 }
1729 
1730 static int add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build)
1731 {
1732 	struct f2fs_nm_info *nm_i = NM_I(sbi);
1733 	struct free_nid *i;
1734 	struct nat_entry *ne;
1735 	int err;
1736 
1737 	/* 0 nid should not be used */
1738 	if (unlikely(nid == 0))
1739 		return 0;
1740 
1741 	if (build) {
1742 		/* do not add allocated nids */
1743 		ne = __lookup_nat_cache(nm_i, nid);
1744 		if (ne && (!get_nat_flag(ne, IS_CHECKPOINTED) ||
1745 				nat_get_blkaddr(ne) != NULL_ADDR))
1746 			return 0;
1747 	}
1748 
1749 	i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS);
1750 	i->nid = nid;
1751 	i->state = NID_NEW;
1752 
1753 	if (radix_tree_preload(GFP_NOFS)) {
1754 		kmem_cache_free(free_nid_slab, i);
1755 		return 0;
1756 	}
1757 
1758 	spin_lock(&nm_i->nid_list_lock);
1759 	err = __insert_nid_to_list(sbi, i, FREE_NID_LIST, true);
1760 	spin_unlock(&nm_i->nid_list_lock);
1761 	radix_tree_preload_end();
1762 	if (err) {
1763 		kmem_cache_free(free_nid_slab, i);
1764 		return 0;
1765 	}
1766 	return 1;
1767 }
1768 
1769 static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid)
1770 {
1771 	struct f2fs_nm_info *nm_i = NM_I(sbi);
1772 	struct free_nid *i;
1773 	bool need_free = false;
1774 
1775 	spin_lock(&nm_i->nid_list_lock);
1776 	i = __lookup_free_nid_list(nm_i, nid);
1777 	if (i && i->state == NID_NEW) {
1778 		__remove_nid_from_list(sbi, i, FREE_NID_LIST, false);
1779 		need_free = true;
1780 	}
1781 	spin_unlock(&nm_i->nid_list_lock);
1782 
1783 	if (need_free)
1784 		kmem_cache_free(free_nid_slab, i);
1785 }
1786 
1787 static void scan_nat_page(struct f2fs_sb_info *sbi,
1788 			struct page *nat_page, nid_t start_nid)
1789 {
1790 	struct f2fs_nm_info *nm_i = NM_I(sbi);
1791 	struct f2fs_nat_block *nat_blk = page_address(nat_page);
1792 	block_t blk_addr;
1793 	int i;
1794 
1795 	i = start_nid % NAT_ENTRY_PER_BLOCK;
1796 
1797 	for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) {
1798 
1799 		if (unlikely(start_nid >= nm_i->max_nid))
1800 			break;
1801 
1802 		blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
1803 		f2fs_bug_on(sbi, blk_addr == NEW_ADDR);
1804 		if (blk_addr == NULL_ADDR)
1805 			add_free_nid(sbi, start_nid, true);
1806 	}
1807 }
1808 
1809 static void __build_free_nids(struct f2fs_sb_info *sbi, bool sync)
1810 {
1811 	struct f2fs_nm_info *nm_i = NM_I(sbi);
1812 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1813 	struct f2fs_journal *journal = curseg->journal;
1814 	int i = 0;
1815 	nid_t nid = nm_i->next_scan_nid;
1816 
1817 	/* Enough entries */
1818 	if (nm_i->nid_cnt[FREE_NID_LIST] >= NAT_ENTRY_PER_BLOCK)
1819 		return;
1820 
1821 	if (!sync && !available_free_memory(sbi, FREE_NIDS))
1822 		return;
1823 
1824 	/* readahead nat pages to be scanned */
1825 	ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES,
1826 							META_NAT, true);
1827 
1828 	down_read(&nm_i->nat_tree_lock);
1829 
1830 	while (1) {
1831 		struct page *page = get_current_nat_page(sbi, nid);
1832 
1833 		scan_nat_page(sbi, page, nid);
1834 		f2fs_put_page(page, 1);
1835 
1836 		nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK));
1837 		if (unlikely(nid >= nm_i->max_nid))
1838 			nid = 0;
1839 
1840 		if (++i >= FREE_NID_PAGES)
1841 			break;
1842 	}
1843 
1844 	/* go to the next free nat pages to find free nids abundantly */
1845 	nm_i->next_scan_nid = nid;
1846 
1847 	/* find free nids from current sum_pages */
1848 	down_read(&curseg->journal_rwsem);
1849 	for (i = 0; i < nats_in_cursum(journal); i++) {
1850 		block_t addr;
1851 
1852 		addr = le32_to_cpu(nat_in_journal(journal, i).block_addr);
1853 		nid = le32_to_cpu(nid_in_journal(journal, i));
1854 		if (addr == NULL_ADDR)
1855 			add_free_nid(sbi, nid, true);
1856 		else
1857 			remove_free_nid(sbi, nid);
1858 	}
1859 	up_read(&curseg->journal_rwsem);
1860 	up_read(&nm_i->nat_tree_lock);
1861 
1862 	ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid),
1863 					nm_i->ra_nid_pages, META_NAT, false);
1864 }
1865 
1866 void build_free_nids(struct f2fs_sb_info *sbi, bool sync)
1867 {
1868 	mutex_lock(&NM_I(sbi)->build_lock);
1869 	__build_free_nids(sbi, sync);
1870 	mutex_unlock(&NM_I(sbi)->build_lock);
1871 }
1872 
1873 /*
1874  * If this function returns success, caller can obtain a new nid
1875  * from second parameter of this function.
1876  * The returned nid could be used ino as well as nid when inode is created.
1877  */
1878 bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
1879 {
1880 	struct f2fs_nm_info *nm_i = NM_I(sbi);
1881 	struct free_nid *i = NULL;
1882 retry:
1883 #ifdef CONFIG_F2FS_FAULT_INJECTION
1884 	if (time_to_inject(sbi, FAULT_ALLOC_NID))
1885 		return false;
1886 #endif
1887 	spin_lock(&nm_i->nid_list_lock);
1888 
1889 	if (unlikely(nm_i->available_nids == 0)) {
1890 		spin_unlock(&nm_i->nid_list_lock);
1891 		return false;
1892 	}
1893 
1894 	/* We should not use stale free nids created by build_free_nids */
1895 	if (nm_i->nid_cnt[FREE_NID_LIST] && !on_build_free_nids(nm_i)) {
1896 		f2fs_bug_on(sbi, list_empty(&nm_i->nid_list[FREE_NID_LIST]));
1897 		i = list_first_entry(&nm_i->nid_list[FREE_NID_LIST],
1898 					struct free_nid, list);
1899 		*nid = i->nid;
1900 
1901 		__remove_nid_from_list(sbi, i, FREE_NID_LIST, true);
1902 		i->state = NID_ALLOC;
1903 		__insert_nid_to_list(sbi, i, ALLOC_NID_LIST, false);
1904 		nm_i->available_nids--;
1905 		spin_unlock(&nm_i->nid_list_lock);
1906 		return true;
1907 	}
1908 	spin_unlock(&nm_i->nid_list_lock);
1909 
1910 	/* Let's scan nat pages and its caches to get free nids */
1911 	build_free_nids(sbi, true);
1912 	goto retry;
1913 }
1914 
1915 /*
1916  * alloc_nid() should be called prior to this function.
1917  */
1918 void alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid)
1919 {
1920 	struct f2fs_nm_info *nm_i = NM_I(sbi);
1921 	struct free_nid *i;
1922 
1923 	spin_lock(&nm_i->nid_list_lock);
1924 	i = __lookup_free_nid_list(nm_i, nid);
1925 	f2fs_bug_on(sbi, !i);
1926 	__remove_nid_from_list(sbi, i, ALLOC_NID_LIST, false);
1927 	spin_unlock(&nm_i->nid_list_lock);
1928 
1929 	kmem_cache_free(free_nid_slab, i);
1930 }
1931 
1932 /*
1933  * alloc_nid() should be called prior to this function.
1934  */
1935 void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
1936 {
1937 	struct f2fs_nm_info *nm_i = NM_I(sbi);
1938 	struct free_nid *i;
1939 	bool need_free = false;
1940 
1941 	if (!nid)
1942 		return;
1943 
1944 	spin_lock(&nm_i->nid_list_lock);
1945 	i = __lookup_free_nid_list(nm_i, nid);
1946 	f2fs_bug_on(sbi, !i);
1947 
1948 	if (!available_free_memory(sbi, FREE_NIDS)) {
1949 		__remove_nid_from_list(sbi, i, ALLOC_NID_LIST, false);
1950 		need_free = true;
1951 	} else {
1952 		__remove_nid_from_list(sbi, i, ALLOC_NID_LIST, true);
1953 		i->state = NID_NEW;
1954 		__insert_nid_to_list(sbi, i, FREE_NID_LIST, false);
1955 	}
1956 
1957 	nm_i->available_nids++;
1958 
1959 	spin_unlock(&nm_i->nid_list_lock);
1960 
1961 	if (need_free)
1962 		kmem_cache_free(free_nid_slab, i);
1963 }
1964 
1965 int try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink)
1966 {
1967 	struct f2fs_nm_info *nm_i = NM_I(sbi);
1968 	struct free_nid *i, *next;
1969 	int nr = nr_shrink;
1970 
1971 	if (nm_i->nid_cnt[FREE_NID_LIST] <= MAX_FREE_NIDS)
1972 		return 0;
1973 
1974 	if (!mutex_trylock(&nm_i->build_lock))
1975 		return 0;
1976 
1977 	spin_lock(&nm_i->nid_list_lock);
1978 	list_for_each_entry_safe(i, next, &nm_i->nid_list[FREE_NID_LIST],
1979 									list) {
1980 		if (nr_shrink <= 0 ||
1981 				nm_i->nid_cnt[FREE_NID_LIST] <= MAX_FREE_NIDS)
1982 			break;
1983 
1984 		__remove_nid_from_list(sbi, i, FREE_NID_LIST, false);
1985 		kmem_cache_free(free_nid_slab, i);
1986 		nr_shrink--;
1987 	}
1988 	spin_unlock(&nm_i->nid_list_lock);
1989 	mutex_unlock(&nm_i->build_lock);
1990 
1991 	return nr - nr_shrink;
1992 }
1993 
1994 void recover_inline_xattr(struct inode *inode, struct page *page)
1995 {
1996 	void *src_addr, *dst_addr;
1997 	size_t inline_size;
1998 	struct page *ipage;
1999 	struct f2fs_inode *ri;
2000 
2001 	ipage = get_node_page(F2FS_I_SB(inode), inode->i_ino);
2002 	f2fs_bug_on(F2FS_I_SB(inode), IS_ERR(ipage));
2003 
2004 	ri = F2FS_INODE(page);
2005 	if (!(ri->i_inline & F2FS_INLINE_XATTR)) {
2006 		clear_inode_flag(inode, FI_INLINE_XATTR);
2007 		goto update_inode;
2008 	}
2009 
2010 	dst_addr = inline_xattr_addr(ipage);
2011 	src_addr = inline_xattr_addr(page);
2012 	inline_size = inline_xattr_size(inode);
2013 
2014 	f2fs_wait_on_page_writeback(ipage, NODE, true);
2015 	memcpy(dst_addr, src_addr, inline_size);
2016 update_inode:
2017 	update_inode(inode, ipage);
2018 	f2fs_put_page(ipage, 1);
2019 }
2020 
2021 void recover_xattr_data(struct inode *inode, struct page *page, block_t blkaddr)
2022 {
2023 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2024 	nid_t prev_xnid = F2FS_I(inode)->i_xattr_nid;
2025 	nid_t new_xnid = nid_of_node(page);
2026 	struct node_info ni;
2027 
2028 	/* 1: invalidate the previous xattr nid */
2029 	if (!prev_xnid)
2030 		goto recover_xnid;
2031 
2032 	/* Deallocate node address */
2033 	get_node_info(sbi, prev_xnid, &ni);
2034 	f2fs_bug_on(sbi, ni.blk_addr == NULL_ADDR);
2035 	invalidate_blocks(sbi, ni.blk_addr);
2036 	dec_valid_node_count(sbi, inode);
2037 	set_node_addr(sbi, &ni, NULL_ADDR, false);
2038 
2039 recover_xnid:
2040 	/* 2: allocate new xattr nid */
2041 	if (unlikely(!inc_valid_node_count(sbi, inode)))
2042 		f2fs_bug_on(sbi, 1);
2043 
2044 	remove_free_nid(sbi, new_xnid);
2045 	get_node_info(sbi, new_xnid, &ni);
2046 	ni.ino = inode->i_ino;
2047 	set_node_addr(sbi, &ni, NEW_ADDR, false);
2048 	f2fs_i_xnid_write(inode, new_xnid);
2049 
2050 	/* 3: update xattr blkaddr */
2051 	refresh_sit_entry(sbi, NEW_ADDR, blkaddr);
2052 	set_node_addr(sbi, &ni, blkaddr, false);
2053 }
2054 
2055 int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
2056 {
2057 	struct f2fs_inode *src, *dst;
2058 	nid_t ino = ino_of_node(page);
2059 	struct node_info old_ni, new_ni;
2060 	struct page *ipage;
2061 
2062 	get_node_info(sbi, ino, &old_ni);
2063 
2064 	if (unlikely(old_ni.blk_addr != NULL_ADDR))
2065 		return -EINVAL;
2066 retry:
2067 	ipage = f2fs_grab_cache_page(NODE_MAPPING(sbi), ino, false);
2068 	if (!ipage) {
2069 		congestion_wait(BLK_RW_ASYNC, HZ/50);
2070 		goto retry;
2071 	}
2072 
2073 	/* Should not use this inode from free nid list */
2074 	remove_free_nid(sbi, ino);
2075 
2076 	if (!PageUptodate(ipage))
2077 		SetPageUptodate(ipage);
2078 	fill_node_footer(ipage, ino, ino, 0, true);
2079 
2080 	src = F2FS_INODE(page);
2081 	dst = F2FS_INODE(ipage);
2082 
2083 	memcpy(dst, src, (unsigned long)&src->i_ext - (unsigned long)src);
2084 	dst->i_size = 0;
2085 	dst->i_blocks = cpu_to_le64(1);
2086 	dst->i_links = cpu_to_le32(1);
2087 	dst->i_xattr_nid = 0;
2088 	dst->i_inline = src->i_inline & F2FS_INLINE_XATTR;
2089 
2090 	new_ni = old_ni;
2091 	new_ni.ino = ino;
2092 
2093 	if (unlikely(!inc_valid_node_count(sbi, NULL)))
2094 		WARN_ON(1);
2095 	set_node_addr(sbi, &new_ni, NEW_ADDR, false);
2096 	inc_valid_inode_count(sbi);
2097 	set_page_dirty(ipage);
2098 	f2fs_put_page(ipage, 1);
2099 	return 0;
2100 }
2101 
2102 int restore_node_summary(struct f2fs_sb_info *sbi,
2103 			unsigned int segno, struct f2fs_summary_block *sum)
2104 {
2105 	struct f2fs_node *rn;
2106 	struct f2fs_summary *sum_entry;
2107 	block_t addr;
2108 	int i, idx, last_offset, nrpages;
2109 
2110 	/* scan the node segment */
2111 	last_offset = sbi->blocks_per_seg;
2112 	addr = START_BLOCK(sbi, segno);
2113 	sum_entry = &sum->entries[0];
2114 
2115 	for (i = 0; i < last_offset; i += nrpages, addr += nrpages) {
2116 		nrpages = min(last_offset - i, BIO_MAX_PAGES);
2117 
2118 		/* readahead node pages */
2119 		ra_meta_pages(sbi, addr, nrpages, META_POR, true);
2120 
2121 		for (idx = addr; idx < addr + nrpages; idx++) {
2122 			struct page *page = get_tmp_page(sbi, idx);
2123 
2124 			rn = F2FS_NODE(page);
2125 			sum_entry->nid = rn->footer.nid;
2126 			sum_entry->version = 0;
2127 			sum_entry->ofs_in_node = 0;
2128 			sum_entry++;
2129 			f2fs_put_page(page, 1);
2130 		}
2131 
2132 		invalidate_mapping_pages(META_MAPPING(sbi), addr,
2133 							addr + nrpages);
2134 	}
2135 	return 0;
2136 }
2137 
2138 static void remove_nats_in_journal(struct f2fs_sb_info *sbi)
2139 {
2140 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2141 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2142 	struct f2fs_journal *journal = curseg->journal;
2143 	int i;
2144 
2145 	down_write(&curseg->journal_rwsem);
2146 	for (i = 0; i < nats_in_cursum(journal); i++) {
2147 		struct nat_entry *ne;
2148 		struct f2fs_nat_entry raw_ne;
2149 		nid_t nid = le32_to_cpu(nid_in_journal(journal, i));
2150 
2151 		raw_ne = nat_in_journal(journal, i);
2152 
2153 		ne = __lookup_nat_cache(nm_i, nid);
2154 		if (!ne) {
2155 			ne = grab_nat_entry(nm_i, nid);
2156 			node_info_from_raw_nat(&ne->ni, &raw_ne);
2157 		}
2158 
2159 		/*
2160 		 * if a free nat in journal has not been used after last
2161 		 * checkpoint, we should remove it from available nids,
2162 		 * since later we will add it again.
2163 		 */
2164 		if (!get_nat_flag(ne, IS_DIRTY) &&
2165 				le32_to_cpu(raw_ne.block_addr) == NULL_ADDR) {
2166 			spin_lock(&nm_i->nid_list_lock);
2167 			nm_i->available_nids--;
2168 			spin_unlock(&nm_i->nid_list_lock);
2169 		}
2170 
2171 		__set_nat_cache_dirty(nm_i, ne);
2172 	}
2173 	update_nats_in_cursum(journal, -i);
2174 	up_write(&curseg->journal_rwsem);
2175 }
2176 
2177 static void __adjust_nat_entry_set(struct nat_entry_set *nes,
2178 						struct list_head *head, int max)
2179 {
2180 	struct nat_entry_set *cur;
2181 
2182 	if (nes->entry_cnt >= max)
2183 		goto add_out;
2184 
2185 	list_for_each_entry(cur, head, set_list) {
2186 		if (cur->entry_cnt >= nes->entry_cnt) {
2187 			list_add(&nes->set_list, cur->set_list.prev);
2188 			return;
2189 		}
2190 	}
2191 add_out:
2192 	list_add_tail(&nes->set_list, head);
2193 }
2194 
2195 static void __flush_nat_entry_set(struct f2fs_sb_info *sbi,
2196 					struct nat_entry_set *set)
2197 {
2198 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2199 	struct f2fs_journal *journal = curseg->journal;
2200 	nid_t start_nid = set->set * NAT_ENTRY_PER_BLOCK;
2201 	bool to_journal = true;
2202 	struct f2fs_nat_block *nat_blk;
2203 	struct nat_entry *ne, *cur;
2204 	struct page *page = NULL;
2205 
2206 	/*
2207 	 * there are two steps to flush nat entries:
2208 	 * #1, flush nat entries to journal in current hot data summary block.
2209 	 * #2, flush nat entries to nat page.
2210 	 */
2211 	if (!__has_cursum_space(journal, set->entry_cnt, NAT_JOURNAL))
2212 		to_journal = false;
2213 
2214 	if (to_journal) {
2215 		down_write(&curseg->journal_rwsem);
2216 	} else {
2217 		page = get_next_nat_page(sbi, start_nid);
2218 		nat_blk = page_address(page);
2219 		f2fs_bug_on(sbi, !nat_blk);
2220 	}
2221 
2222 	/* flush dirty nats in nat entry set */
2223 	list_for_each_entry_safe(ne, cur, &set->entry_list, list) {
2224 		struct f2fs_nat_entry *raw_ne;
2225 		nid_t nid = nat_get_nid(ne);
2226 		int offset;
2227 
2228 		if (nat_get_blkaddr(ne) == NEW_ADDR)
2229 			continue;
2230 
2231 		if (to_journal) {
2232 			offset = lookup_journal_in_cursum(journal,
2233 							NAT_JOURNAL, nid, 1);
2234 			f2fs_bug_on(sbi, offset < 0);
2235 			raw_ne = &nat_in_journal(journal, offset);
2236 			nid_in_journal(journal, offset) = cpu_to_le32(nid);
2237 		} else {
2238 			raw_ne = &nat_blk->entries[nid - start_nid];
2239 		}
2240 		raw_nat_from_node_info(raw_ne, &ne->ni);
2241 		nat_reset_flag(ne);
2242 		__clear_nat_cache_dirty(NM_I(sbi), ne);
2243 		if (nat_get_blkaddr(ne) == NULL_ADDR) {
2244 			add_free_nid(sbi, nid, false);
2245 			spin_lock(&NM_I(sbi)->nid_list_lock);
2246 			NM_I(sbi)->available_nids++;
2247 			spin_unlock(&NM_I(sbi)->nid_list_lock);
2248 		}
2249 	}
2250 
2251 	if (to_journal)
2252 		up_write(&curseg->journal_rwsem);
2253 	else
2254 		f2fs_put_page(page, 1);
2255 
2256 	f2fs_bug_on(sbi, set->entry_cnt);
2257 
2258 	radix_tree_delete(&NM_I(sbi)->nat_set_root, set->set);
2259 	kmem_cache_free(nat_entry_set_slab, set);
2260 }
2261 
2262 /*
2263  * This function is called during the checkpointing process.
2264  */
2265 void flush_nat_entries(struct f2fs_sb_info *sbi)
2266 {
2267 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2268 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2269 	struct f2fs_journal *journal = curseg->journal;
2270 	struct nat_entry_set *setvec[SETVEC_SIZE];
2271 	struct nat_entry_set *set, *tmp;
2272 	unsigned int found;
2273 	nid_t set_idx = 0;
2274 	LIST_HEAD(sets);
2275 
2276 	if (!nm_i->dirty_nat_cnt)
2277 		return;
2278 
2279 	down_write(&nm_i->nat_tree_lock);
2280 
2281 	/*
2282 	 * if there are no enough space in journal to store dirty nat
2283 	 * entries, remove all entries from journal and merge them
2284 	 * into nat entry set.
2285 	 */
2286 	if (!__has_cursum_space(journal, nm_i->dirty_nat_cnt, NAT_JOURNAL))
2287 		remove_nats_in_journal(sbi);
2288 
2289 	while ((found = __gang_lookup_nat_set(nm_i,
2290 					set_idx, SETVEC_SIZE, setvec))) {
2291 		unsigned idx;
2292 		set_idx = setvec[found - 1]->set + 1;
2293 		for (idx = 0; idx < found; idx++)
2294 			__adjust_nat_entry_set(setvec[idx], &sets,
2295 						MAX_NAT_JENTRIES(journal));
2296 	}
2297 
2298 	/* flush dirty nats in nat entry set */
2299 	list_for_each_entry_safe(set, tmp, &sets, set_list)
2300 		__flush_nat_entry_set(sbi, set);
2301 
2302 	up_write(&nm_i->nat_tree_lock);
2303 
2304 	f2fs_bug_on(sbi, nm_i->dirty_nat_cnt);
2305 }
2306 
2307 static int init_node_manager(struct f2fs_sb_info *sbi)
2308 {
2309 	struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi);
2310 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2311 	unsigned char *version_bitmap;
2312 	unsigned int nat_segs, nat_blocks;
2313 
2314 	nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr);
2315 
2316 	/* segment_count_nat includes pair segment so divide to 2. */
2317 	nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1;
2318 	nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg);
2319 
2320 	nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks;
2321 
2322 	/* not used nids: 0, node, meta, (and root counted as valid node) */
2323 	nm_i->available_nids = nm_i->max_nid - sbi->total_valid_node_count -
2324 							F2FS_RESERVED_NODE_NUM;
2325 	nm_i->nid_cnt[FREE_NID_LIST] = 0;
2326 	nm_i->nid_cnt[ALLOC_NID_LIST] = 0;
2327 	nm_i->nat_cnt = 0;
2328 	nm_i->ram_thresh = DEF_RAM_THRESHOLD;
2329 	nm_i->ra_nid_pages = DEF_RA_NID_PAGES;
2330 	nm_i->dirty_nats_ratio = DEF_DIRTY_NAT_RATIO_THRESHOLD;
2331 
2332 	INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC);
2333 	INIT_LIST_HEAD(&nm_i->nid_list[FREE_NID_LIST]);
2334 	INIT_LIST_HEAD(&nm_i->nid_list[ALLOC_NID_LIST]);
2335 	INIT_RADIX_TREE(&nm_i->nat_root, GFP_NOIO);
2336 	INIT_RADIX_TREE(&nm_i->nat_set_root, GFP_NOIO);
2337 	INIT_LIST_HEAD(&nm_i->nat_entries);
2338 
2339 	mutex_init(&nm_i->build_lock);
2340 	spin_lock_init(&nm_i->nid_list_lock);
2341 	init_rwsem(&nm_i->nat_tree_lock);
2342 
2343 	nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
2344 	nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
2345 	version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP);
2346 	if (!version_bitmap)
2347 		return -EFAULT;
2348 
2349 	nm_i->nat_bitmap = kmemdup(version_bitmap, nm_i->bitmap_size,
2350 					GFP_KERNEL);
2351 	if (!nm_i->nat_bitmap)
2352 		return -ENOMEM;
2353 	return 0;
2354 }
2355 
2356 int build_node_manager(struct f2fs_sb_info *sbi)
2357 {
2358 	int err;
2359 
2360 	sbi->nm_info = kzalloc(sizeof(struct f2fs_nm_info), GFP_KERNEL);
2361 	if (!sbi->nm_info)
2362 		return -ENOMEM;
2363 
2364 	err = init_node_manager(sbi);
2365 	if (err)
2366 		return err;
2367 
2368 	build_free_nids(sbi, true);
2369 	return 0;
2370 }
2371 
2372 void destroy_node_manager(struct f2fs_sb_info *sbi)
2373 {
2374 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2375 	struct free_nid *i, *next_i;
2376 	struct nat_entry *natvec[NATVEC_SIZE];
2377 	struct nat_entry_set *setvec[SETVEC_SIZE];
2378 	nid_t nid = 0;
2379 	unsigned int found;
2380 
2381 	if (!nm_i)
2382 		return;
2383 
2384 	/* destroy free nid list */
2385 	spin_lock(&nm_i->nid_list_lock);
2386 	list_for_each_entry_safe(i, next_i, &nm_i->nid_list[FREE_NID_LIST],
2387 									list) {
2388 		__remove_nid_from_list(sbi, i, FREE_NID_LIST, false);
2389 		spin_unlock(&nm_i->nid_list_lock);
2390 		kmem_cache_free(free_nid_slab, i);
2391 		spin_lock(&nm_i->nid_list_lock);
2392 	}
2393 	f2fs_bug_on(sbi, nm_i->nid_cnt[FREE_NID_LIST]);
2394 	f2fs_bug_on(sbi, nm_i->nid_cnt[ALLOC_NID_LIST]);
2395 	f2fs_bug_on(sbi, !list_empty(&nm_i->nid_list[ALLOC_NID_LIST]));
2396 	spin_unlock(&nm_i->nid_list_lock);
2397 
2398 	/* destroy nat cache */
2399 	down_write(&nm_i->nat_tree_lock);
2400 	while ((found = __gang_lookup_nat_cache(nm_i,
2401 					nid, NATVEC_SIZE, natvec))) {
2402 		unsigned idx;
2403 
2404 		nid = nat_get_nid(natvec[found - 1]) + 1;
2405 		for (idx = 0; idx < found; idx++)
2406 			__del_from_nat_cache(nm_i, natvec[idx]);
2407 	}
2408 	f2fs_bug_on(sbi, nm_i->nat_cnt);
2409 
2410 	/* destroy nat set cache */
2411 	nid = 0;
2412 	while ((found = __gang_lookup_nat_set(nm_i,
2413 					nid, SETVEC_SIZE, setvec))) {
2414 		unsigned idx;
2415 
2416 		nid = setvec[found - 1]->set + 1;
2417 		for (idx = 0; idx < found; idx++) {
2418 			/* entry_cnt is not zero, when cp_error was occurred */
2419 			f2fs_bug_on(sbi, !list_empty(&setvec[idx]->entry_list));
2420 			radix_tree_delete(&nm_i->nat_set_root, setvec[idx]->set);
2421 			kmem_cache_free(nat_entry_set_slab, setvec[idx]);
2422 		}
2423 	}
2424 	up_write(&nm_i->nat_tree_lock);
2425 
2426 	kfree(nm_i->nat_bitmap);
2427 	sbi->nm_info = NULL;
2428 	kfree(nm_i);
2429 }
2430 
2431 int __init create_node_manager_caches(void)
2432 {
2433 	nat_entry_slab = f2fs_kmem_cache_create("nat_entry",
2434 			sizeof(struct nat_entry));
2435 	if (!nat_entry_slab)
2436 		goto fail;
2437 
2438 	free_nid_slab = f2fs_kmem_cache_create("free_nid",
2439 			sizeof(struct free_nid));
2440 	if (!free_nid_slab)
2441 		goto destroy_nat_entry;
2442 
2443 	nat_entry_set_slab = f2fs_kmem_cache_create("nat_entry_set",
2444 			sizeof(struct nat_entry_set));
2445 	if (!nat_entry_set_slab)
2446 		goto destroy_free_nid;
2447 	return 0;
2448 
2449 destroy_free_nid:
2450 	kmem_cache_destroy(free_nid_slab);
2451 destroy_nat_entry:
2452 	kmem_cache_destroy(nat_entry_slab);
2453 fail:
2454 	return -ENOMEM;
2455 }
2456 
2457 void destroy_node_manager_caches(void)
2458 {
2459 	kmem_cache_destroy(nat_entry_set_slab);
2460 	kmem_cache_destroy(free_nid_slab);
2461 	kmem_cache_destroy(nat_entry_slab);
2462 }
2463