xref: /openbmc/linux/fs/f2fs/node.c (revision cfdfc14e)
1 /*
2  * fs/f2fs/node.c
3  *
4  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5  *             http://www.samsung.com/
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 #include <linux/fs.h>
12 #include <linux/f2fs_fs.h>
13 #include <linux/mpage.h>
14 #include <linux/backing-dev.h>
15 #include <linux/blkdev.h>
16 #include <linux/pagevec.h>
17 #include <linux/swap.h>
18 
19 #include "f2fs.h"
20 #include "node.h"
21 #include "segment.h"
22 #include "xattr.h"
23 #include "trace.h"
24 #include <trace/events/f2fs.h>
25 
26 #define on_f2fs_build_free_nids(nmi) mutex_is_locked(&(nm_i)->build_lock)
27 
28 static struct kmem_cache *nat_entry_slab;
29 static struct kmem_cache *free_nid_slab;
30 static struct kmem_cache *nat_entry_set_slab;
31 
32 /*
33  * Check whether the given nid is within node id range.
34  */
35 int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid)
36 {
37 	if (unlikely(nid < F2FS_ROOT_INO(sbi) || nid >= NM_I(sbi)->max_nid)) {
38 		set_sbi_flag(sbi, SBI_NEED_FSCK);
39 		f2fs_msg(sbi->sb, KERN_WARNING,
40 				"%s: out-of-range nid=%x, run fsck to fix.",
41 				__func__, nid);
42 		return -EINVAL;
43 	}
44 	return 0;
45 }
46 
47 bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type)
48 {
49 	struct f2fs_nm_info *nm_i = NM_I(sbi);
50 	struct sysinfo val;
51 	unsigned long avail_ram;
52 	unsigned long mem_size = 0;
53 	bool res = false;
54 
55 	si_meminfo(&val);
56 
57 	/* only uses low memory */
58 	avail_ram = val.totalram - val.totalhigh;
59 
60 	/*
61 	 * give 25%, 25%, 50%, 50%, 50% memory for each components respectively
62 	 */
63 	if (type == FREE_NIDS) {
64 		mem_size = (nm_i->nid_cnt[FREE_NID] *
65 				sizeof(struct free_nid)) >> PAGE_SHIFT;
66 		res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
67 	} else if (type == NAT_ENTRIES) {
68 		mem_size = (nm_i->nat_cnt * sizeof(struct nat_entry)) >>
69 							PAGE_SHIFT;
70 		res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
71 		if (excess_cached_nats(sbi))
72 			res = false;
73 	} else if (type == DIRTY_DENTS) {
74 		if (sbi->sb->s_bdi->wb.dirty_exceeded)
75 			return false;
76 		mem_size = get_pages(sbi, F2FS_DIRTY_DENTS);
77 		res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
78 	} else if (type == INO_ENTRIES) {
79 		int i;
80 
81 		for (i = 0; i < MAX_INO_ENTRY; i++)
82 			mem_size += sbi->im[i].ino_num *
83 						sizeof(struct ino_entry);
84 		mem_size >>= PAGE_SHIFT;
85 		res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
86 	} else if (type == EXTENT_CACHE) {
87 		mem_size = (atomic_read(&sbi->total_ext_tree) *
88 				sizeof(struct extent_tree) +
89 				atomic_read(&sbi->total_ext_node) *
90 				sizeof(struct extent_node)) >> PAGE_SHIFT;
91 		res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
92 	} else if (type == INMEM_PAGES) {
93 		/* it allows 20% / total_ram for inmemory pages */
94 		mem_size = get_pages(sbi, F2FS_INMEM_PAGES);
95 		res = mem_size < (val.totalram / 5);
96 	} else {
97 		if (!sbi->sb->s_bdi->wb.dirty_exceeded)
98 			return true;
99 	}
100 	return res;
101 }
102 
103 static void clear_node_page_dirty(struct page *page)
104 {
105 	if (PageDirty(page)) {
106 		f2fs_clear_radix_tree_dirty_tag(page);
107 		clear_page_dirty_for_io(page);
108 		dec_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES);
109 	}
110 	ClearPageUptodate(page);
111 }
112 
113 static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
114 {
115 	pgoff_t index = current_nat_addr(sbi, nid);
116 	return f2fs_get_meta_page(sbi, index);
117 }
118 
119 static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
120 {
121 	struct page *src_page;
122 	struct page *dst_page;
123 	pgoff_t src_off;
124 	pgoff_t dst_off;
125 	void *src_addr;
126 	void *dst_addr;
127 	struct f2fs_nm_info *nm_i = NM_I(sbi);
128 
129 	src_off = current_nat_addr(sbi, nid);
130 	dst_off = next_nat_addr(sbi, src_off);
131 
132 	/* get current nat block page with lock */
133 	src_page = f2fs_get_meta_page(sbi, src_off);
134 	dst_page = f2fs_grab_meta_page(sbi, dst_off);
135 	f2fs_bug_on(sbi, PageDirty(src_page));
136 
137 	src_addr = page_address(src_page);
138 	dst_addr = page_address(dst_page);
139 	memcpy(dst_addr, src_addr, PAGE_SIZE);
140 	set_page_dirty(dst_page);
141 	f2fs_put_page(src_page, 1);
142 
143 	set_to_next_nat(nm_i, nid);
144 
145 	return dst_page;
146 }
147 
148 static struct nat_entry *__alloc_nat_entry(nid_t nid, bool no_fail)
149 {
150 	struct nat_entry *new;
151 
152 	if (no_fail)
153 		new = f2fs_kmem_cache_alloc(nat_entry_slab, GFP_F2FS_ZERO);
154 	else
155 		new = kmem_cache_alloc(nat_entry_slab, GFP_F2FS_ZERO);
156 	if (new) {
157 		nat_set_nid(new, nid);
158 		nat_reset_flag(new);
159 	}
160 	return new;
161 }
162 
163 static void __free_nat_entry(struct nat_entry *e)
164 {
165 	kmem_cache_free(nat_entry_slab, e);
166 }
167 
168 /* must be locked by nat_tree_lock */
169 static struct nat_entry *__init_nat_entry(struct f2fs_nm_info *nm_i,
170 	struct nat_entry *ne, struct f2fs_nat_entry *raw_ne, bool no_fail)
171 {
172 	if (no_fail)
173 		f2fs_radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne);
174 	else if (radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne))
175 		return NULL;
176 
177 	if (raw_ne)
178 		node_info_from_raw_nat(&ne->ni, raw_ne);
179 	list_add_tail(&ne->list, &nm_i->nat_entries);
180 	nm_i->nat_cnt++;
181 	return ne;
182 }
183 
184 static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n)
185 {
186 	return radix_tree_lookup(&nm_i->nat_root, n);
187 }
188 
189 static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i,
190 		nid_t start, unsigned int nr, struct nat_entry **ep)
191 {
192 	return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr);
193 }
194 
195 static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e)
196 {
197 	list_del(&e->list);
198 	radix_tree_delete(&nm_i->nat_root, nat_get_nid(e));
199 	nm_i->nat_cnt--;
200 	__free_nat_entry(e);
201 }
202 
203 static struct nat_entry_set *__grab_nat_entry_set(struct f2fs_nm_info *nm_i,
204 							struct nat_entry *ne)
205 {
206 	nid_t set = NAT_BLOCK_OFFSET(ne->ni.nid);
207 	struct nat_entry_set *head;
208 
209 	head = radix_tree_lookup(&nm_i->nat_set_root, set);
210 	if (!head) {
211 		head = f2fs_kmem_cache_alloc(nat_entry_set_slab, GFP_NOFS);
212 
213 		INIT_LIST_HEAD(&head->entry_list);
214 		INIT_LIST_HEAD(&head->set_list);
215 		head->set = set;
216 		head->entry_cnt = 0;
217 		f2fs_radix_tree_insert(&nm_i->nat_set_root, set, head);
218 	}
219 	return head;
220 }
221 
222 static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i,
223 						struct nat_entry *ne)
224 {
225 	struct nat_entry_set *head;
226 	bool new_ne = nat_get_blkaddr(ne) == NEW_ADDR;
227 
228 	if (!new_ne)
229 		head = __grab_nat_entry_set(nm_i, ne);
230 
231 	/*
232 	 * update entry_cnt in below condition:
233 	 * 1. update NEW_ADDR to valid block address;
234 	 * 2. update old block address to new one;
235 	 */
236 	if (!new_ne && (get_nat_flag(ne, IS_PREALLOC) ||
237 				!get_nat_flag(ne, IS_DIRTY)))
238 		head->entry_cnt++;
239 
240 	set_nat_flag(ne, IS_PREALLOC, new_ne);
241 
242 	if (get_nat_flag(ne, IS_DIRTY))
243 		goto refresh_list;
244 
245 	nm_i->dirty_nat_cnt++;
246 	set_nat_flag(ne, IS_DIRTY, true);
247 refresh_list:
248 	if (new_ne)
249 		list_del_init(&ne->list);
250 	else
251 		list_move_tail(&ne->list, &head->entry_list);
252 }
253 
254 static void __clear_nat_cache_dirty(struct f2fs_nm_info *nm_i,
255 		struct nat_entry_set *set, struct nat_entry *ne)
256 {
257 	list_move_tail(&ne->list, &nm_i->nat_entries);
258 	set_nat_flag(ne, IS_DIRTY, false);
259 	set->entry_cnt--;
260 	nm_i->dirty_nat_cnt--;
261 }
262 
263 static unsigned int __gang_lookup_nat_set(struct f2fs_nm_info *nm_i,
264 		nid_t start, unsigned int nr, struct nat_entry_set **ep)
265 {
266 	return radix_tree_gang_lookup(&nm_i->nat_set_root, (void **)ep,
267 							start, nr);
268 }
269 
270 int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid)
271 {
272 	struct f2fs_nm_info *nm_i = NM_I(sbi);
273 	struct nat_entry *e;
274 	bool need = false;
275 
276 	down_read(&nm_i->nat_tree_lock);
277 	e = __lookup_nat_cache(nm_i, nid);
278 	if (e) {
279 		if (!get_nat_flag(e, IS_CHECKPOINTED) &&
280 				!get_nat_flag(e, HAS_FSYNCED_INODE))
281 			need = true;
282 	}
283 	up_read(&nm_i->nat_tree_lock);
284 	return need;
285 }
286 
287 bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid)
288 {
289 	struct f2fs_nm_info *nm_i = NM_I(sbi);
290 	struct nat_entry *e;
291 	bool is_cp = true;
292 
293 	down_read(&nm_i->nat_tree_lock);
294 	e = __lookup_nat_cache(nm_i, nid);
295 	if (e && !get_nat_flag(e, IS_CHECKPOINTED))
296 		is_cp = false;
297 	up_read(&nm_i->nat_tree_lock);
298 	return is_cp;
299 }
300 
301 bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino)
302 {
303 	struct f2fs_nm_info *nm_i = NM_I(sbi);
304 	struct nat_entry *e;
305 	bool need_update = true;
306 
307 	down_read(&nm_i->nat_tree_lock);
308 	e = __lookup_nat_cache(nm_i, ino);
309 	if (e && get_nat_flag(e, HAS_LAST_FSYNC) &&
310 			(get_nat_flag(e, IS_CHECKPOINTED) ||
311 			 get_nat_flag(e, HAS_FSYNCED_INODE)))
312 		need_update = false;
313 	up_read(&nm_i->nat_tree_lock);
314 	return need_update;
315 }
316 
317 /* must be locked by nat_tree_lock */
318 static void cache_nat_entry(struct f2fs_sb_info *sbi, nid_t nid,
319 						struct f2fs_nat_entry *ne)
320 {
321 	struct f2fs_nm_info *nm_i = NM_I(sbi);
322 	struct nat_entry *new, *e;
323 
324 	new = __alloc_nat_entry(nid, false);
325 	if (!new)
326 		return;
327 
328 	down_write(&nm_i->nat_tree_lock);
329 	e = __lookup_nat_cache(nm_i, nid);
330 	if (!e)
331 		e = __init_nat_entry(nm_i, new, ne, false);
332 	else
333 		f2fs_bug_on(sbi, nat_get_ino(e) != le32_to_cpu(ne->ino) ||
334 				nat_get_blkaddr(e) !=
335 					le32_to_cpu(ne->block_addr) ||
336 				nat_get_version(e) != ne->version);
337 	up_write(&nm_i->nat_tree_lock);
338 	if (e != new)
339 		__free_nat_entry(new);
340 }
341 
342 static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
343 			block_t new_blkaddr, bool fsync_done)
344 {
345 	struct f2fs_nm_info *nm_i = NM_I(sbi);
346 	struct nat_entry *e;
347 	struct nat_entry *new = __alloc_nat_entry(ni->nid, true);
348 
349 	down_write(&nm_i->nat_tree_lock);
350 	e = __lookup_nat_cache(nm_i, ni->nid);
351 	if (!e) {
352 		e = __init_nat_entry(nm_i, new, NULL, true);
353 		copy_node_info(&e->ni, ni);
354 		f2fs_bug_on(sbi, ni->blk_addr == NEW_ADDR);
355 	} else if (new_blkaddr == NEW_ADDR) {
356 		/*
357 		 * when nid is reallocated,
358 		 * previous nat entry can be remained in nat cache.
359 		 * So, reinitialize it with new information.
360 		 */
361 		copy_node_info(&e->ni, ni);
362 		f2fs_bug_on(sbi, ni->blk_addr != NULL_ADDR);
363 	}
364 	/* let's free early to reduce memory consumption */
365 	if (e != new)
366 		__free_nat_entry(new);
367 
368 	/* sanity check */
369 	f2fs_bug_on(sbi, nat_get_blkaddr(e) != ni->blk_addr);
370 	f2fs_bug_on(sbi, nat_get_blkaddr(e) == NULL_ADDR &&
371 			new_blkaddr == NULL_ADDR);
372 	f2fs_bug_on(sbi, nat_get_blkaddr(e) == NEW_ADDR &&
373 			new_blkaddr == NEW_ADDR);
374 	f2fs_bug_on(sbi, is_valid_blkaddr(nat_get_blkaddr(e)) &&
375 			new_blkaddr == NEW_ADDR);
376 
377 	/* increment version no as node is removed */
378 	if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) {
379 		unsigned char version = nat_get_version(e);
380 		nat_set_version(e, inc_node_version(version));
381 	}
382 
383 	/* change address */
384 	nat_set_blkaddr(e, new_blkaddr);
385 	if (!is_valid_blkaddr(new_blkaddr))
386 		set_nat_flag(e, IS_CHECKPOINTED, false);
387 	__set_nat_cache_dirty(nm_i, e);
388 
389 	/* update fsync_mark if its inode nat entry is still alive */
390 	if (ni->nid != ni->ino)
391 		e = __lookup_nat_cache(nm_i, ni->ino);
392 	if (e) {
393 		if (fsync_done && ni->nid == ni->ino)
394 			set_nat_flag(e, HAS_FSYNCED_INODE, true);
395 		set_nat_flag(e, HAS_LAST_FSYNC, fsync_done);
396 	}
397 	up_write(&nm_i->nat_tree_lock);
398 }
399 
400 int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
401 {
402 	struct f2fs_nm_info *nm_i = NM_I(sbi);
403 	int nr = nr_shrink;
404 
405 	if (!down_write_trylock(&nm_i->nat_tree_lock))
406 		return 0;
407 
408 	while (nr_shrink && !list_empty(&nm_i->nat_entries)) {
409 		struct nat_entry *ne;
410 		ne = list_first_entry(&nm_i->nat_entries,
411 					struct nat_entry, list);
412 		__del_from_nat_cache(nm_i, ne);
413 		nr_shrink--;
414 	}
415 	up_write(&nm_i->nat_tree_lock);
416 	return nr - nr_shrink;
417 }
418 
419 /*
420  * This function always returns success
421  */
422 void f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
423 						struct node_info *ni)
424 {
425 	struct f2fs_nm_info *nm_i = NM_I(sbi);
426 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
427 	struct f2fs_journal *journal = curseg->journal;
428 	nid_t start_nid = START_NID(nid);
429 	struct f2fs_nat_block *nat_blk;
430 	struct page *page = NULL;
431 	struct f2fs_nat_entry ne;
432 	struct nat_entry *e;
433 	pgoff_t index;
434 	int i;
435 
436 	ni->nid = nid;
437 
438 	/* Check nat cache */
439 	down_read(&nm_i->nat_tree_lock);
440 	e = __lookup_nat_cache(nm_i, nid);
441 	if (e) {
442 		ni->ino = nat_get_ino(e);
443 		ni->blk_addr = nat_get_blkaddr(e);
444 		ni->version = nat_get_version(e);
445 		up_read(&nm_i->nat_tree_lock);
446 		return;
447 	}
448 
449 	memset(&ne, 0, sizeof(struct f2fs_nat_entry));
450 
451 	/* Check current segment summary */
452 	down_read(&curseg->journal_rwsem);
453 	i = f2fs_lookup_journal_in_cursum(journal, NAT_JOURNAL, nid, 0);
454 	if (i >= 0) {
455 		ne = nat_in_journal(journal, i);
456 		node_info_from_raw_nat(ni, &ne);
457 	}
458 	up_read(&curseg->journal_rwsem);
459 	if (i >= 0) {
460 		up_read(&nm_i->nat_tree_lock);
461 		goto cache;
462 	}
463 
464 	/* Fill node_info from nat page */
465 	index = current_nat_addr(sbi, nid);
466 	up_read(&nm_i->nat_tree_lock);
467 
468 	page = f2fs_get_meta_page(sbi, index);
469 	nat_blk = (struct f2fs_nat_block *)page_address(page);
470 	ne = nat_blk->entries[nid - start_nid];
471 	node_info_from_raw_nat(ni, &ne);
472 	f2fs_put_page(page, 1);
473 cache:
474 	/* cache nat entry */
475 	cache_nat_entry(sbi, nid, &ne);
476 }
477 
478 /*
479  * readahead MAX_RA_NODE number of node pages.
480  */
481 static void f2fs_ra_node_pages(struct page *parent, int start, int n)
482 {
483 	struct f2fs_sb_info *sbi = F2FS_P_SB(parent);
484 	struct blk_plug plug;
485 	int i, end;
486 	nid_t nid;
487 
488 	blk_start_plug(&plug);
489 
490 	/* Then, try readahead for siblings of the desired node */
491 	end = start + n;
492 	end = min(end, NIDS_PER_BLOCK);
493 	for (i = start; i < end; i++) {
494 		nid = get_nid(parent, i, false);
495 		f2fs_ra_node_page(sbi, nid);
496 	}
497 
498 	blk_finish_plug(&plug);
499 }
500 
501 pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs)
502 {
503 	const long direct_index = ADDRS_PER_INODE(dn->inode);
504 	const long direct_blks = ADDRS_PER_BLOCK;
505 	const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK;
506 	unsigned int skipped_unit = ADDRS_PER_BLOCK;
507 	int cur_level = dn->cur_level;
508 	int max_level = dn->max_level;
509 	pgoff_t base = 0;
510 
511 	if (!dn->max_level)
512 		return pgofs + 1;
513 
514 	while (max_level-- > cur_level)
515 		skipped_unit *= NIDS_PER_BLOCK;
516 
517 	switch (dn->max_level) {
518 	case 3:
519 		base += 2 * indirect_blks;
520 	case 2:
521 		base += 2 * direct_blks;
522 	case 1:
523 		base += direct_index;
524 		break;
525 	default:
526 		f2fs_bug_on(F2FS_I_SB(dn->inode), 1);
527 	}
528 
529 	return ((pgofs - base) / skipped_unit + 1) * skipped_unit + base;
530 }
531 
532 /*
533  * The maximum depth is four.
534  * Offset[0] will have raw inode offset.
535  */
536 static int get_node_path(struct inode *inode, long block,
537 				int offset[4], unsigned int noffset[4])
538 {
539 	const long direct_index = ADDRS_PER_INODE(inode);
540 	const long direct_blks = ADDRS_PER_BLOCK;
541 	const long dptrs_per_blk = NIDS_PER_BLOCK;
542 	const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK;
543 	const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK;
544 	int n = 0;
545 	int level = 0;
546 
547 	noffset[0] = 0;
548 
549 	if (block < direct_index) {
550 		offset[n] = block;
551 		goto got;
552 	}
553 	block -= direct_index;
554 	if (block < direct_blks) {
555 		offset[n++] = NODE_DIR1_BLOCK;
556 		noffset[n] = 1;
557 		offset[n] = block;
558 		level = 1;
559 		goto got;
560 	}
561 	block -= direct_blks;
562 	if (block < direct_blks) {
563 		offset[n++] = NODE_DIR2_BLOCK;
564 		noffset[n] = 2;
565 		offset[n] = block;
566 		level = 1;
567 		goto got;
568 	}
569 	block -= direct_blks;
570 	if (block < indirect_blks) {
571 		offset[n++] = NODE_IND1_BLOCK;
572 		noffset[n] = 3;
573 		offset[n++] = block / direct_blks;
574 		noffset[n] = 4 + offset[n - 1];
575 		offset[n] = block % direct_blks;
576 		level = 2;
577 		goto got;
578 	}
579 	block -= indirect_blks;
580 	if (block < indirect_blks) {
581 		offset[n++] = NODE_IND2_BLOCK;
582 		noffset[n] = 4 + dptrs_per_blk;
583 		offset[n++] = block / direct_blks;
584 		noffset[n] = 5 + dptrs_per_blk + offset[n - 1];
585 		offset[n] = block % direct_blks;
586 		level = 2;
587 		goto got;
588 	}
589 	block -= indirect_blks;
590 	if (block < dindirect_blks) {
591 		offset[n++] = NODE_DIND_BLOCK;
592 		noffset[n] = 5 + (dptrs_per_blk * 2);
593 		offset[n++] = block / indirect_blks;
594 		noffset[n] = 6 + (dptrs_per_blk * 2) +
595 			      offset[n - 1] * (dptrs_per_blk + 1);
596 		offset[n++] = (block / direct_blks) % dptrs_per_blk;
597 		noffset[n] = 7 + (dptrs_per_blk * 2) +
598 			      offset[n - 2] * (dptrs_per_blk + 1) +
599 			      offset[n - 1];
600 		offset[n] = block % direct_blks;
601 		level = 3;
602 		goto got;
603 	} else {
604 		return -E2BIG;
605 	}
606 got:
607 	return level;
608 }
609 
610 /*
611  * Caller should call f2fs_put_dnode(dn).
612  * Also, it should grab and release a rwsem by calling f2fs_lock_op() and
613  * f2fs_unlock_op() only if ro is not set RDONLY_NODE.
614  * In the case of RDONLY_NODE, we don't need to care about mutex.
615  */
616 int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
617 {
618 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
619 	struct page *npage[4];
620 	struct page *parent = NULL;
621 	int offset[4];
622 	unsigned int noffset[4];
623 	nid_t nids[4];
624 	int level, i = 0;
625 	int err = 0;
626 
627 	level = get_node_path(dn->inode, index, offset, noffset);
628 	if (level < 0)
629 		return level;
630 
631 	nids[0] = dn->inode->i_ino;
632 	npage[0] = dn->inode_page;
633 
634 	if (!npage[0]) {
635 		npage[0] = f2fs_get_node_page(sbi, nids[0]);
636 		if (IS_ERR(npage[0]))
637 			return PTR_ERR(npage[0]);
638 	}
639 
640 	/* if inline_data is set, should not report any block indices */
641 	if (f2fs_has_inline_data(dn->inode) && index) {
642 		err = -ENOENT;
643 		f2fs_put_page(npage[0], 1);
644 		goto release_out;
645 	}
646 
647 	parent = npage[0];
648 	if (level != 0)
649 		nids[1] = get_nid(parent, offset[0], true);
650 	dn->inode_page = npage[0];
651 	dn->inode_page_locked = true;
652 
653 	/* get indirect or direct nodes */
654 	for (i = 1; i <= level; i++) {
655 		bool done = false;
656 
657 		if (!nids[i] && mode == ALLOC_NODE) {
658 			/* alloc new node */
659 			if (!f2fs_alloc_nid(sbi, &(nids[i]))) {
660 				err = -ENOSPC;
661 				goto release_pages;
662 			}
663 
664 			dn->nid = nids[i];
665 			npage[i] = f2fs_new_node_page(dn, noffset[i]);
666 			if (IS_ERR(npage[i])) {
667 				f2fs_alloc_nid_failed(sbi, nids[i]);
668 				err = PTR_ERR(npage[i]);
669 				goto release_pages;
670 			}
671 
672 			set_nid(parent, offset[i - 1], nids[i], i == 1);
673 			f2fs_alloc_nid_done(sbi, nids[i]);
674 			done = true;
675 		} else if (mode == LOOKUP_NODE_RA && i == level && level > 1) {
676 			npage[i] = f2fs_get_node_page_ra(parent, offset[i - 1]);
677 			if (IS_ERR(npage[i])) {
678 				err = PTR_ERR(npage[i]);
679 				goto release_pages;
680 			}
681 			done = true;
682 		}
683 		if (i == 1) {
684 			dn->inode_page_locked = false;
685 			unlock_page(parent);
686 		} else {
687 			f2fs_put_page(parent, 1);
688 		}
689 
690 		if (!done) {
691 			npage[i] = f2fs_get_node_page(sbi, nids[i]);
692 			if (IS_ERR(npage[i])) {
693 				err = PTR_ERR(npage[i]);
694 				f2fs_put_page(npage[0], 0);
695 				goto release_out;
696 			}
697 		}
698 		if (i < level) {
699 			parent = npage[i];
700 			nids[i + 1] = get_nid(parent, offset[i], false);
701 		}
702 	}
703 	dn->nid = nids[level];
704 	dn->ofs_in_node = offset[level];
705 	dn->node_page = npage[level];
706 	dn->data_blkaddr = datablock_addr(dn->inode,
707 				dn->node_page, dn->ofs_in_node);
708 	return 0;
709 
710 release_pages:
711 	f2fs_put_page(parent, 1);
712 	if (i > 1)
713 		f2fs_put_page(npage[0], 0);
714 release_out:
715 	dn->inode_page = NULL;
716 	dn->node_page = NULL;
717 	if (err == -ENOENT) {
718 		dn->cur_level = i;
719 		dn->max_level = level;
720 		dn->ofs_in_node = offset[level];
721 	}
722 	return err;
723 }
724 
725 static void truncate_node(struct dnode_of_data *dn)
726 {
727 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
728 	struct node_info ni;
729 
730 	f2fs_get_node_info(sbi, dn->nid, &ni);
731 
732 	/* Deallocate node address */
733 	f2fs_invalidate_blocks(sbi, ni.blk_addr);
734 	dec_valid_node_count(sbi, dn->inode, dn->nid == dn->inode->i_ino);
735 	set_node_addr(sbi, &ni, NULL_ADDR, false);
736 
737 	if (dn->nid == dn->inode->i_ino) {
738 		f2fs_remove_orphan_inode(sbi, dn->nid);
739 		dec_valid_inode_count(sbi);
740 		f2fs_inode_synced(dn->inode);
741 	}
742 
743 	clear_node_page_dirty(dn->node_page);
744 	set_sbi_flag(sbi, SBI_IS_DIRTY);
745 
746 	f2fs_put_page(dn->node_page, 1);
747 
748 	invalidate_mapping_pages(NODE_MAPPING(sbi),
749 			dn->node_page->index, dn->node_page->index);
750 
751 	dn->node_page = NULL;
752 	trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr);
753 }
754 
755 static int truncate_dnode(struct dnode_of_data *dn)
756 {
757 	struct page *page;
758 
759 	if (dn->nid == 0)
760 		return 1;
761 
762 	/* get direct node */
763 	page = f2fs_get_node_page(F2FS_I_SB(dn->inode), dn->nid);
764 	if (IS_ERR(page) && PTR_ERR(page) == -ENOENT)
765 		return 1;
766 	else if (IS_ERR(page))
767 		return PTR_ERR(page);
768 
769 	/* Make dnode_of_data for parameter */
770 	dn->node_page = page;
771 	dn->ofs_in_node = 0;
772 	f2fs_truncate_data_blocks(dn);
773 	truncate_node(dn);
774 	return 1;
775 }
776 
777 static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
778 						int ofs, int depth)
779 {
780 	struct dnode_of_data rdn = *dn;
781 	struct page *page;
782 	struct f2fs_node *rn;
783 	nid_t child_nid;
784 	unsigned int child_nofs;
785 	int freed = 0;
786 	int i, ret;
787 
788 	if (dn->nid == 0)
789 		return NIDS_PER_BLOCK + 1;
790 
791 	trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr);
792 
793 	page = f2fs_get_node_page(F2FS_I_SB(dn->inode), dn->nid);
794 	if (IS_ERR(page)) {
795 		trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(page));
796 		return PTR_ERR(page);
797 	}
798 
799 	f2fs_ra_node_pages(page, ofs, NIDS_PER_BLOCK);
800 
801 	rn = F2FS_NODE(page);
802 	if (depth < 3) {
803 		for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) {
804 			child_nid = le32_to_cpu(rn->in.nid[i]);
805 			if (child_nid == 0)
806 				continue;
807 			rdn.nid = child_nid;
808 			ret = truncate_dnode(&rdn);
809 			if (ret < 0)
810 				goto out_err;
811 			if (set_nid(page, i, 0, false))
812 				dn->node_changed = true;
813 		}
814 	} else {
815 		child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1;
816 		for (i = ofs; i < NIDS_PER_BLOCK; i++) {
817 			child_nid = le32_to_cpu(rn->in.nid[i]);
818 			if (child_nid == 0) {
819 				child_nofs += NIDS_PER_BLOCK + 1;
820 				continue;
821 			}
822 			rdn.nid = child_nid;
823 			ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1);
824 			if (ret == (NIDS_PER_BLOCK + 1)) {
825 				if (set_nid(page, i, 0, false))
826 					dn->node_changed = true;
827 				child_nofs += ret;
828 			} else if (ret < 0 && ret != -ENOENT) {
829 				goto out_err;
830 			}
831 		}
832 		freed = child_nofs;
833 	}
834 
835 	if (!ofs) {
836 		/* remove current indirect node */
837 		dn->node_page = page;
838 		truncate_node(dn);
839 		freed++;
840 	} else {
841 		f2fs_put_page(page, 1);
842 	}
843 	trace_f2fs_truncate_nodes_exit(dn->inode, freed);
844 	return freed;
845 
846 out_err:
847 	f2fs_put_page(page, 1);
848 	trace_f2fs_truncate_nodes_exit(dn->inode, ret);
849 	return ret;
850 }
851 
852 static int truncate_partial_nodes(struct dnode_of_data *dn,
853 			struct f2fs_inode *ri, int *offset, int depth)
854 {
855 	struct page *pages[2];
856 	nid_t nid[3];
857 	nid_t child_nid;
858 	int err = 0;
859 	int i;
860 	int idx = depth - 2;
861 
862 	nid[0] = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
863 	if (!nid[0])
864 		return 0;
865 
866 	/* get indirect nodes in the path */
867 	for (i = 0; i < idx + 1; i++) {
868 		/* reference count'll be increased */
869 		pages[i] = f2fs_get_node_page(F2FS_I_SB(dn->inode), nid[i]);
870 		if (IS_ERR(pages[i])) {
871 			err = PTR_ERR(pages[i]);
872 			idx = i - 1;
873 			goto fail;
874 		}
875 		nid[i + 1] = get_nid(pages[i], offset[i + 1], false);
876 	}
877 
878 	f2fs_ra_node_pages(pages[idx], offset[idx + 1], NIDS_PER_BLOCK);
879 
880 	/* free direct nodes linked to a partial indirect node */
881 	for (i = offset[idx + 1]; i < NIDS_PER_BLOCK; i++) {
882 		child_nid = get_nid(pages[idx], i, false);
883 		if (!child_nid)
884 			continue;
885 		dn->nid = child_nid;
886 		err = truncate_dnode(dn);
887 		if (err < 0)
888 			goto fail;
889 		if (set_nid(pages[idx], i, 0, false))
890 			dn->node_changed = true;
891 	}
892 
893 	if (offset[idx + 1] == 0) {
894 		dn->node_page = pages[idx];
895 		dn->nid = nid[idx];
896 		truncate_node(dn);
897 	} else {
898 		f2fs_put_page(pages[idx], 1);
899 	}
900 	offset[idx]++;
901 	offset[idx + 1] = 0;
902 	idx--;
903 fail:
904 	for (i = idx; i >= 0; i--)
905 		f2fs_put_page(pages[i], 1);
906 
907 	trace_f2fs_truncate_partial_nodes(dn->inode, nid, depth, err);
908 
909 	return err;
910 }
911 
912 /*
913  * All the block addresses of data and nodes should be nullified.
914  */
915 int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from)
916 {
917 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
918 	int err = 0, cont = 1;
919 	int level, offset[4], noffset[4];
920 	unsigned int nofs = 0;
921 	struct f2fs_inode *ri;
922 	struct dnode_of_data dn;
923 	struct page *page;
924 
925 	trace_f2fs_truncate_inode_blocks_enter(inode, from);
926 
927 	level = get_node_path(inode, from, offset, noffset);
928 	if (level < 0)
929 		return level;
930 
931 	page = f2fs_get_node_page(sbi, inode->i_ino);
932 	if (IS_ERR(page)) {
933 		trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page));
934 		return PTR_ERR(page);
935 	}
936 
937 	set_new_dnode(&dn, inode, page, NULL, 0);
938 	unlock_page(page);
939 
940 	ri = F2FS_INODE(page);
941 	switch (level) {
942 	case 0:
943 	case 1:
944 		nofs = noffset[1];
945 		break;
946 	case 2:
947 		nofs = noffset[1];
948 		if (!offset[level - 1])
949 			goto skip_partial;
950 		err = truncate_partial_nodes(&dn, ri, offset, level);
951 		if (err < 0 && err != -ENOENT)
952 			goto fail;
953 		nofs += 1 + NIDS_PER_BLOCK;
954 		break;
955 	case 3:
956 		nofs = 5 + 2 * NIDS_PER_BLOCK;
957 		if (!offset[level - 1])
958 			goto skip_partial;
959 		err = truncate_partial_nodes(&dn, ri, offset, level);
960 		if (err < 0 && err != -ENOENT)
961 			goto fail;
962 		break;
963 	default:
964 		BUG();
965 	}
966 
967 skip_partial:
968 	while (cont) {
969 		dn.nid = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
970 		switch (offset[0]) {
971 		case NODE_DIR1_BLOCK:
972 		case NODE_DIR2_BLOCK:
973 			err = truncate_dnode(&dn);
974 			break;
975 
976 		case NODE_IND1_BLOCK:
977 		case NODE_IND2_BLOCK:
978 			err = truncate_nodes(&dn, nofs, offset[1], 2);
979 			break;
980 
981 		case NODE_DIND_BLOCK:
982 			err = truncate_nodes(&dn, nofs, offset[1], 3);
983 			cont = 0;
984 			break;
985 
986 		default:
987 			BUG();
988 		}
989 		if (err < 0 && err != -ENOENT)
990 			goto fail;
991 		if (offset[1] == 0 &&
992 				ri->i_nid[offset[0] - NODE_DIR1_BLOCK]) {
993 			lock_page(page);
994 			BUG_ON(page->mapping != NODE_MAPPING(sbi));
995 			f2fs_wait_on_page_writeback(page, NODE, true);
996 			ri->i_nid[offset[0] - NODE_DIR1_BLOCK] = 0;
997 			set_page_dirty(page);
998 			unlock_page(page);
999 		}
1000 		offset[1] = 0;
1001 		offset[0]++;
1002 		nofs += err;
1003 	}
1004 fail:
1005 	f2fs_put_page(page, 0);
1006 	trace_f2fs_truncate_inode_blocks_exit(inode, err);
1007 	return err > 0 ? 0 : err;
1008 }
1009 
1010 /* caller must lock inode page */
1011 int f2fs_truncate_xattr_node(struct inode *inode)
1012 {
1013 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1014 	nid_t nid = F2FS_I(inode)->i_xattr_nid;
1015 	struct dnode_of_data dn;
1016 	struct page *npage;
1017 
1018 	if (!nid)
1019 		return 0;
1020 
1021 	npage = f2fs_get_node_page(sbi, nid);
1022 	if (IS_ERR(npage))
1023 		return PTR_ERR(npage);
1024 
1025 	f2fs_i_xnid_write(inode, 0);
1026 
1027 	set_new_dnode(&dn, inode, NULL, npage, nid);
1028 	truncate_node(&dn);
1029 	return 0;
1030 }
1031 
1032 /*
1033  * Caller should grab and release a rwsem by calling f2fs_lock_op() and
1034  * f2fs_unlock_op().
1035  */
1036 int f2fs_remove_inode_page(struct inode *inode)
1037 {
1038 	struct dnode_of_data dn;
1039 	int err;
1040 
1041 	set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
1042 	err = f2fs_get_dnode_of_data(&dn, 0, LOOKUP_NODE);
1043 	if (err)
1044 		return err;
1045 
1046 	err = f2fs_truncate_xattr_node(inode);
1047 	if (err) {
1048 		f2fs_put_dnode(&dn);
1049 		return err;
1050 	}
1051 
1052 	/* remove potential inline_data blocks */
1053 	if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1054 				S_ISLNK(inode->i_mode))
1055 		f2fs_truncate_data_blocks_range(&dn, 1);
1056 
1057 	/* 0 is possible, after f2fs_new_inode() has failed */
1058 	f2fs_bug_on(F2FS_I_SB(inode),
1059 			inode->i_blocks != 0 && inode->i_blocks != 8);
1060 
1061 	/* will put inode & node pages */
1062 	truncate_node(&dn);
1063 	return 0;
1064 }
1065 
1066 struct page *f2fs_new_inode_page(struct inode *inode)
1067 {
1068 	struct dnode_of_data dn;
1069 
1070 	/* allocate inode page for new inode */
1071 	set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
1072 
1073 	/* caller should f2fs_put_page(page, 1); */
1074 	return f2fs_new_node_page(&dn, 0);
1075 }
1076 
1077 struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs)
1078 {
1079 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1080 	struct node_info new_ni;
1081 	struct page *page;
1082 	int err;
1083 
1084 	if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
1085 		return ERR_PTR(-EPERM);
1086 
1087 	page = f2fs_grab_cache_page(NODE_MAPPING(sbi), dn->nid, false);
1088 	if (!page)
1089 		return ERR_PTR(-ENOMEM);
1090 
1091 	if (unlikely((err = inc_valid_node_count(sbi, dn->inode, !ofs))))
1092 		goto fail;
1093 
1094 #ifdef CONFIG_F2FS_CHECK_FS
1095 	f2fs_get_node_info(sbi, dn->nid, &new_ni);
1096 	f2fs_bug_on(sbi, new_ni.blk_addr != NULL_ADDR);
1097 #endif
1098 	new_ni.nid = dn->nid;
1099 	new_ni.ino = dn->inode->i_ino;
1100 	new_ni.blk_addr = NULL_ADDR;
1101 	new_ni.flag = 0;
1102 	new_ni.version = 0;
1103 	set_node_addr(sbi, &new_ni, NEW_ADDR, false);
1104 
1105 	f2fs_wait_on_page_writeback(page, NODE, true);
1106 	fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true);
1107 	set_cold_node(page, S_ISDIR(dn->inode->i_mode));
1108 	if (!PageUptodate(page))
1109 		SetPageUptodate(page);
1110 	if (set_page_dirty(page))
1111 		dn->node_changed = true;
1112 
1113 	if (f2fs_has_xattr_block(ofs))
1114 		f2fs_i_xnid_write(dn->inode, dn->nid);
1115 
1116 	if (ofs == 0)
1117 		inc_valid_inode_count(sbi);
1118 	return page;
1119 
1120 fail:
1121 	clear_node_page_dirty(page);
1122 	f2fs_put_page(page, 1);
1123 	return ERR_PTR(err);
1124 }
1125 
1126 /*
1127  * Caller should do after getting the following values.
1128  * 0: f2fs_put_page(page, 0)
1129  * LOCKED_PAGE or error: f2fs_put_page(page, 1)
1130  */
1131 static int read_node_page(struct page *page, int op_flags)
1132 {
1133 	struct f2fs_sb_info *sbi = F2FS_P_SB(page);
1134 	struct node_info ni;
1135 	struct f2fs_io_info fio = {
1136 		.sbi = sbi,
1137 		.type = NODE,
1138 		.op = REQ_OP_READ,
1139 		.op_flags = op_flags,
1140 		.page = page,
1141 		.encrypted_page = NULL,
1142 	};
1143 
1144 	if (PageUptodate(page))
1145 		return LOCKED_PAGE;
1146 
1147 	f2fs_get_node_info(sbi, page->index, &ni);
1148 
1149 	if (unlikely(ni.blk_addr == NULL_ADDR)) {
1150 		ClearPageUptodate(page);
1151 		return -ENOENT;
1152 	}
1153 
1154 	fio.new_blkaddr = fio.old_blkaddr = ni.blk_addr;
1155 	return f2fs_submit_page_bio(&fio);
1156 }
1157 
1158 /*
1159  * Readahead a node page
1160  */
1161 void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
1162 {
1163 	struct page *apage;
1164 	int err;
1165 
1166 	if (!nid)
1167 		return;
1168 	if (f2fs_check_nid_range(sbi, nid))
1169 		return;
1170 
1171 	rcu_read_lock();
1172 	apage = radix_tree_lookup(&NODE_MAPPING(sbi)->i_pages, nid);
1173 	rcu_read_unlock();
1174 	if (apage)
1175 		return;
1176 
1177 	apage = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false);
1178 	if (!apage)
1179 		return;
1180 
1181 	err = read_node_page(apage, REQ_RAHEAD);
1182 	f2fs_put_page(apage, err ? 1 : 0);
1183 }
1184 
1185 static struct page *__get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid,
1186 					struct page *parent, int start)
1187 {
1188 	struct page *page;
1189 	int err;
1190 
1191 	if (!nid)
1192 		return ERR_PTR(-ENOENT);
1193 	if (f2fs_check_nid_range(sbi, nid))
1194 		return ERR_PTR(-EINVAL);
1195 repeat:
1196 	page = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false);
1197 	if (!page)
1198 		return ERR_PTR(-ENOMEM);
1199 
1200 	err = read_node_page(page, 0);
1201 	if (err < 0) {
1202 		f2fs_put_page(page, 1);
1203 		return ERR_PTR(err);
1204 	} else if (err == LOCKED_PAGE) {
1205 		err = 0;
1206 		goto page_hit;
1207 	}
1208 
1209 	if (parent)
1210 		f2fs_ra_node_pages(parent, start + 1, MAX_RA_NODE);
1211 
1212 	lock_page(page);
1213 
1214 	if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1215 		f2fs_put_page(page, 1);
1216 		goto repeat;
1217 	}
1218 
1219 	if (unlikely(!PageUptodate(page))) {
1220 		err = -EIO;
1221 		goto out_err;
1222 	}
1223 
1224 	if (!f2fs_inode_chksum_verify(sbi, page)) {
1225 		err = -EBADMSG;
1226 		goto out_err;
1227 	}
1228 page_hit:
1229 	if(unlikely(nid != nid_of_node(page))) {
1230 		f2fs_msg(sbi->sb, KERN_WARNING, "inconsistent node block, "
1231 			"nid:%lu, node_footer[nid:%u,ino:%u,ofs:%u,cpver:%llu,blkaddr:%u]",
1232 			nid, nid_of_node(page), ino_of_node(page),
1233 			ofs_of_node(page), cpver_of_node(page),
1234 			next_blkaddr_of_node(page));
1235 		err = -EINVAL;
1236 out_err:
1237 		ClearPageUptodate(page);
1238 		f2fs_put_page(page, 1);
1239 		return ERR_PTR(err);
1240 	}
1241 	return page;
1242 }
1243 
1244 struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
1245 {
1246 	return __get_node_page(sbi, nid, NULL, 0);
1247 }
1248 
1249 struct page *f2fs_get_node_page_ra(struct page *parent, int start)
1250 {
1251 	struct f2fs_sb_info *sbi = F2FS_P_SB(parent);
1252 	nid_t nid = get_nid(parent, start, false);
1253 
1254 	return __get_node_page(sbi, nid, parent, start);
1255 }
1256 
1257 static void flush_inline_data(struct f2fs_sb_info *sbi, nid_t ino)
1258 {
1259 	struct inode *inode;
1260 	struct page *page;
1261 	int ret;
1262 
1263 	/* should flush inline_data before evict_inode */
1264 	inode = ilookup(sbi->sb, ino);
1265 	if (!inode)
1266 		return;
1267 
1268 	page = f2fs_pagecache_get_page(inode->i_mapping, 0,
1269 					FGP_LOCK|FGP_NOWAIT, 0);
1270 	if (!page)
1271 		goto iput_out;
1272 
1273 	if (!PageUptodate(page))
1274 		goto page_out;
1275 
1276 	if (!PageDirty(page))
1277 		goto page_out;
1278 
1279 	if (!clear_page_dirty_for_io(page))
1280 		goto page_out;
1281 
1282 	ret = f2fs_write_inline_data(inode, page);
1283 	inode_dec_dirty_pages(inode);
1284 	f2fs_remove_dirty_inode(inode);
1285 	if (ret)
1286 		set_page_dirty(page);
1287 page_out:
1288 	f2fs_put_page(page, 1);
1289 iput_out:
1290 	iput(inode);
1291 }
1292 
1293 static struct page *last_fsync_dnode(struct f2fs_sb_info *sbi, nid_t ino)
1294 {
1295 	pgoff_t index;
1296 	struct pagevec pvec;
1297 	struct page *last_page = NULL;
1298 	int nr_pages;
1299 
1300 	pagevec_init(&pvec);
1301 	index = 0;
1302 
1303 	while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
1304 				PAGECACHE_TAG_DIRTY))) {
1305 		int i;
1306 
1307 		for (i = 0; i < nr_pages; i++) {
1308 			struct page *page = pvec.pages[i];
1309 
1310 			if (unlikely(f2fs_cp_error(sbi))) {
1311 				f2fs_put_page(last_page, 0);
1312 				pagevec_release(&pvec);
1313 				return ERR_PTR(-EIO);
1314 			}
1315 
1316 			if (!IS_DNODE(page) || !is_cold_node(page))
1317 				continue;
1318 			if (ino_of_node(page) != ino)
1319 				continue;
1320 
1321 			lock_page(page);
1322 
1323 			if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1324 continue_unlock:
1325 				unlock_page(page);
1326 				continue;
1327 			}
1328 			if (ino_of_node(page) != ino)
1329 				goto continue_unlock;
1330 
1331 			if (!PageDirty(page)) {
1332 				/* someone wrote it for us */
1333 				goto continue_unlock;
1334 			}
1335 
1336 			if (last_page)
1337 				f2fs_put_page(last_page, 0);
1338 
1339 			get_page(page);
1340 			last_page = page;
1341 			unlock_page(page);
1342 		}
1343 		pagevec_release(&pvec);
1344 		cond_resched();
1345 	}
1346 	return last_page;
1347 }
1348 
1349 static int __write_node_page(struct page *page, bool atomic, bool *submitted,
1350 				struct writeback_control *wbc, bool do_balance,
1351 				enum iostat_type io_type)
1352 {
1353 	struct f2fs_sb_info *sbi = F2FS_P_SB(page);
1354 	nid_t nid;
1355 	struct node_info ni;
1356 	struct f2fs_io_info fio = {
1357 		.sbi = sbi,
1358 		.ino = ino_of_node(page),
1359 		.type = NODE,
1360 		.op = REQ_OP_WRITE,
1361 		.op_flags = wbc_to_write_flags(wbc),
1362 		.page = page,
1363 		.encrypted_page = NULL,
1364 		.submitted = false,
1365 		.io_type = io_type,
1366 		.io_wbc = wbc,
1367 	};
1368 
1369 	trace_f2fs_writepage(page, NODE);
1370 
1371 	if (unlikely(f2fs_cp_error(sbi)))
1372 		goto redirty_out;
1373 
1374 	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1375 		goto redirty_out;
1376 
1377 	/* get old block addr of this node page */
1378 	nid = nid_of_node(page);
1379 	f2fs_bug_on(sbi, page->index != nid);
1380 
1381 	if (wbc->for_reclaim) {
1382 		if (!down_read_trylock(&sbi->node_write))
1383 			goto redirty_out;
1384 	} else {
1385 		down_read(&sbi->node_write);
1386 	}
1387 
1388 	f2fs_get_node_info(sbi, nid, &ni);
1389 
1390 	/* This page is already truncated */
1391 	if (unlikely(ni.blk_addr == NULL_ADDR)) {
1392 		ClearPageUptodate(page);
1393 		dec_page_count(sbi, F2FS_DIRTY_NODES);
1394 		up_read(&sbi->node_write);
1395 		unlock_page(page);
1396 		return 0;
1397 	}
1398 
1399 	if (atomic && !test_opt(sbi, NOBARRIER))
1400 		fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
1401 
1402 	set_page_writeback(page);
1403 	ClearPageError(page);
1404 	fio.old_blkaddr = ni.blk_addr;
1405 	f2fs_do_write_node_page(nid, &fio);
1406 	set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(page));
1407 	dec_page_count(sbi, F2FS_DIRTY_NODES);
1408 	up_read(&sbi->node_write);
1409 
1410 	if (wbc->for_reclaim) {
1411 		f2fs_submit_merged_write_cond(sbi, page->mapping->host, 0,
1412 						page->index, NODE);
1413 		submitted = NULL;
1414 	}
1415 
1416 	unlock_page(page);
1417 
1418 	if (unlikely(f2fs_cp_error(sbi))) {
1419 		f2fs_submit_merged_write(sbi, NODE);
1420 		submitted = NULL;
1421 	}
1422 	if (submitted)
1423 		*submitted = fio.submitted;
1424 
1425 	if (do_balance)
1426 		f2fs_balance_fs(sbi, false);
1427 	return 0;
1428 
1429 redirty_out:
1430 	redirty_page_for_writepage(wbc, page);
1431 	return AOP_WRITEPAGE_ACTIVATE;
1432 }
1433 
1434 void f2fs_move_node_page(struct page *node_page, int gc_type)
1435 {
1436 	if (gc_type == FG_GC) {
1437 		struct writeback_control wbc = {
1438 			.sync_mode = WB_SYNC_ALL,
1439 			.nr_to_write = 1,
1440 			.for_reclaim = 0,
1441 		};
1442 
1443 		set_page_dirty(node_page);
1444 		f2fs_wait_on_page_writeback(node_page, NODE, true);
1445 
1446 		f2fs_bug_on(F2FS_P_SB(node_page), PageWriteback(node_page));
1447 		if (!clear_page_dirty_for_io(node_page))
1448 			goto out_page;
1449 
1450 		if (__write_node_page(node_page, false, NULL,
1451 					&wbc, false, FS_GC_NODE_IO))
1452 			unlock_page(node_page);
1453 		goto release_page;
1454 	} else {
1455 		/* set page dirty and write it */
1456 		if (!PageWriteback(node_page))
1457 			set_page_dirty(node_page);
1458 	}
1459 out_page:
1460 	unlock_page(node_page);
1461 release_page:
1462 	f2fs_put_page(node_page, 0);
1463 }
1464 
1465 static int f2fs_write_node_page(struct page *page,
1466 				struct writeback_control *wbc)
1467 {
1468 	return __write_node_page(page, false, NULL, wbc, false, FS_NODE_IO);
1469 }
1470 
1471 int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
1472 			struct writeback_control *wbc, bool atomic)
1473 {
1474 	pgoff_t index;
1475 	pgoff_t last_idx = ULONG_MAX;
1476 	struct pagevec pvec;
1477 	int ret = 0;
1478 	struct page *last_page = NULL;
1479 	bool marked = false;
1480 	nid_t ino = inode->i_ino;
1481 	int nr_pages;
1482 
1483 	if (atomic) {
1484 		last_page = last_fsync_dnode(sbi, ino);
1485 		if (IS_ERR_OR_NULL(last_page))
1486 			return PTR_ERR_OR_ZERO(last_page);
1487 	}
1488 retry:
1489 	pagevec_init(&pvec);
1490 	index = 0;
1491 
1492 	while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
1493 				PAGECACHE_TAG_DIRTY))) {
1494 		int i;
1495 
1496 		for (i = 0; i < nr_pages; i++) {
1497 			struct page *page = pvec.pages[i];
1498 			bool submitted = false;
1499 
1500 			if (unlikely(f2fs_cp_error(sbi))) {
1501 				f2fs_put_page(last_page, 0);
1502 				pagevec_release(&pvec);
1503 				ret = -EIO;
1504 				goto out;
1505 			}
1506 
1507 			if (!IS_DNODE(page) || !is_cold_node(page))
1508 				continue;
1509 			if (ino_of_node(page) != ino)
1510 				continue;
1511 
1512 			lock_page(page);
1513 
1514 			if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1515 continue_unlock:
1516 				unlock_page(page);
1517 				continue;
1518 			}
1519 			if (ino_of_node(page) != ino)
1520 				goto continue_unlock;
1521 
1522 			if (!PageDirty(page) && page != last_page) {
1523 				/* someone wrote it for us */
1524 				goto continue_unlock;
1525 			}
1526 
1527 			f2fs_wait_on_page_writeback(page, NODE, true);
1528 			BUG_ON(PageWriteback(page));
1529 
1530 			set_fsync_mark(page, 0);
1531 			set_dentry_mark(page, 0);
1532 
1533 			if (!atomic || page == last_page) {
1534 				set_fsync_mark(page, 1);
1535 				if (IS_INODE(page)) {
1536 					if (is_inode_flag_set(inode,
1537 								FI_DIRTY_INODE))
1538 						f2fs_update_inode(inode, page);
1539 					set_dentry_mark(page,
1540 						f2fs_need_dentry_mark(sbi, ino));
1541 				}
1542 				/*  may be written by other thread */
1543 				if (!PageDirty(page))
1544 					set_page_dirty(page);
1545 			}
1546 
1547 			if (!clear_page_dirty_for_io(page))
1548 				goto continue_unlock;
1549 
1550 			ret = __write_node_page(page, atomic &&
1551 						page == last_page,
1552 						&submitted, wbc, true,
1553 						FS_NODE_IO);
1554 			if (ret) {
1555 				unlock_page(page);
1556 				f2fs_put_page(last_page, 0);
1557 				break;
1558 			} else if (submitted) {
1559 				last_idx = page->index;
1560 			}
1561 
1562 			if (page == last_page) {
1563 				f2fs_put_page(page, 0);
1564 				marked = true;
1565 				break;
1566 			}
1567 		}
1568 		pagevec_release(&pvec);
1569 		cond_resched();
1570 
1571 		if (ret || marked)
1572 			break;
1573 	}
1574 	if (!ret && atomic && !marked) {
1575 		f2fs_msg(sbi->sb, KERN_DEBUG,
1576 			"Retry to write fsync mark: ino=%u, idx=%lx",
1577 					ino, last_page->index);
1578 		lock_page(last_page);
1579 		f2fs_wait_on_page_writeback(last_page, NODE, true);
1580 		set_page_dirty(last_page);
1581 		unlock_page(last_page);
1582 		goto retry;
1583 	}
1584 out:
1585 	if (last_idx != ULONG_MAX)
1586 		f2fs_submit_merged_write_cond(sbi, NULL, ino, last_idx, NODE);
1587 	return ret ? -EIO: 0;
1588 }
1589 
1590 int f2fs_sync_node_pages(struct f2fs_sb_info *sbi,
1591 				struct writeback_control *wbc,
1592 				bool do_balance, enum iostat_type io_type)
1593 {
1594 	pgoff_t index;
1595 	struct pagevec pvec;
1596 	int step = 0;
1597 	int nwritten = 0;
1598 	int ret = 0;
1599 	int nr_pages, done = 0;
1600 
1601 	pagevec_init(&pvec);
1602 
1603 next_step:
1604 	index = 0;
1605 
1606 	while (!done && (nr_pages = pagevec_lookup_tag(&pvec,
1607 			NODE_MAPPING(sbi), &index, PAGECACHE_TAG_DIRTY))) {
1608 		int i;
1609 
1610 		for (i = 0; i < nr_pages; i++) {
1611 			struct page *page = pvec.pages[i];
1612 			bool submitted = false;
1613 
1614 			/* give a priority to WB_SYNC threads */
1615 			if (atomic_read(&sbi->wb_sync_req[NODE]) &&
1616 					wbc->sync_mode == WB_SYNC_NONE) {
1617 				done = 1;
1618 				break;
1619 			}
1620 
1621 			/*
1622 			 * flushing sequence with step:
1623 			 * 0. indirect nodes
1624 			 * 1. dentry dnodes
1625 			 * 2. file dnodes
1626 			 */
1627 			if (step == 0 && IS_DNODE(page))
1628 				continue;
1629 			if (step == 1 && (!IS_DNODE(page) ||
1630 						is_cold_node(page)))
1631 				continue;
1632 			if (step == 2 && (!IS_DNODE(page) ||
1633 						!is_cold_node(page)))
1634 				continue;
1635 lock_node:
1636 			if (!trylock_page(page))
1637 				continue;
1638 
1639 			if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1640 continue_unlock:
1641 				unlock_page(page);
1642 				continue;
1643 			}
1644 
1645 			if (!PageDirty(page)) {
1646 				/* someone wrote it for us */
1647 				goto continue_unlock;
1648 			}
1649 
1650 			/* flush inline_data */
1651 			if (is_inline_node(page)) {
1652 				clear_inline_node(page);
1653 				unlock_page(page);
1654 				flush_inline_data(sbi, ino_of_node(page));
1655 				goto lock_node;
1656 			}
1657 
1658 			f2fs_wait_on_page_writeback(page, NODE, true);
1659 
1660 			BUG_ON(PageWriteback(page));
1661 			if (!clear_page_dirty_for_io(page))
1662 				goto continue_unlock;
1663 
1664 			set_fsync_mark(page, 0);
1665 			set_dentry_mark(page, 0);
1666 
1667 			ret = __write_node_page(page, false, &submitted,
1668 						wbc, do_balance, io_type);
1669 			if (ret)
1670 				unlock_page(page);
1671 			else if (submitted)
1672 				nwritten++;
1673 
1674 			if (--wbc->nr_to_write == 0)
1675 				break;
1676 		}
1677 		pagevec_release(&pvec);
1678 		cond_resched();
1679 
1680 		if (wbc->nr_to_write == 0) {
1681 			step = 2;
1682 			break;
1683 		}
1684 	}
1685 
1686 	if (step < 2) {
1687 		step++;
1688 		goto next_step;
1689 	}
1690 
1691 	if (nwritten)
1692 		f2fs_submit_merged_write(sbi, NODE);
1693 
1694 	if (unlikely(f2fs_cp_error(sbi)))
1695 		return -EIO;
1696 	return ret;
1697 }
1698 
1699 int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, nid_t ino)
1700 {
1701 	pgoff_t index = 0;
1702 	struct pagevec pvec;
1703 	int ret2, ret = 0;
1704 	int nr_pages;
1705 
1706 	pagevec_init(&pvec);
1707 
1708 	while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
1709 				PAGECACHE_TAG_WRITEBACK))) {
1710 		int i;
1711 
1712 		for (i = 0; i < nr_pages; i++) {
1713 			struct page *page = pvec.pages[i];
1714 
1715 			if (ino && ino_of_node(page) == ino) {
1716 				f2fs_wait_on_page_writeback(page, NODE, true);
1717 				if (TestClearPageError(page))
1718 					ret = -EIO;
1719 			}
1720 		}
1721 		pagevec_release(&pvec);
1722 		cond_resched();
1723 	}
1724 
1725 	ret2 = filemap_check_errors(NODE_MAPPING(sbi));
1726 	if (!ret)
1727 		ret = ret2;
1728 	return ret;
1729 }
1730 
1731 static int f2fs_write_node_pages(struct address_space *mapping,
1732 			    struct writeback_control *wbc)
1733 {
1734 	struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
1735 	struct blk_plug plug;
1736 	long diff;
1737 
1738 	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1739 		goto skip_write;
1740 
1741 	/* balancing f2fs's metadata in background */
1742 	f2fs_balance_fs_bg(sbi);
1743 
1744 	/* collect a number of dirty node pages and write together */
1745 	if (get_pages(sbi, F2FS_DIRTY_NODES) < nr_pages_to_skip(sbi, NODE))
1746 		goto skip_write;
1747 
1748 	if (wbc->sync_mode == WB_SYNC_ALL)
1749 		atomic_inc(&sbi->wb_sync_req[NODE]);
1750 	else if (atomic_read(&sbi->wb_sync_req[NODE]))
1751 		goto skip_write;
1752 
1753 	trace_f2fs_writepages(mapping->host, wbc, NODE);
1754 
1755 	diff = nr_pages_to_write(sbi, NODE, wbc);
1756 	blk_start_plug(&plug);
1757 	f2fs_sync_node_pages(sbi, wbc, true, FS_NODE_IO);
1758 	blk_finish_plug(&plug);
1759 	wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
1760 
1761 	if (wbc->sync_mode == WB_SYNC_ALL)
1762 		atomic_dec(&sbi->wb_sync_req[NODE]);
1763 	return 0;
1764 
1765 skip_write:
1766 	wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_NODES);
1767 	trace_f2fs_writepages(mapping->host, wbc, NODE);
1768 	return 0;
1769 }
1770 
1771 static int f2fs_set_node_page_dirty(struct page *page)
1772 {
1773 	trace_f2fs_set_page_dirty(page, NODE);
1774 
1775 	if (!PageUptodate(page))
1776 		SetPageUptodate(page);
1777 	if (!PageDirty(page)) {
1778 		__set_page_dirty_nobuffers(page);
1779 		inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES);
1780 		SetPagePrivate(page);
1781 		f2fs_trace_pid(page);
1782 		return 1;
1783 	}
1784 	return 0;
1785 }
1786 
1787 /*
1788  * Structure of the f2fs node operations
1789  */
1790 const struct address_space_operations f2fs_node_aops = {
1791 	.writepage	= f2fs_write_node_page,
1792 	.writepages	= f2fs_write_node_pages,
1793 	.set_page_dirty	= f2fs_set_node_page_dirty,
1794 	.invalidatepage	= f2fs_invalidate_page,
1795 	.releasepage	= f2fs_release_page,
1796 #ifdef CONFIG_MIGRATION
1797 	.migratepage    = f2fs_migrate_page,
1798 #endif
1799 };
1800 
1801 static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i,
1802 						nid_t n)
1803 {
1804 	return radix_tree_lookup(&nm_i->free_nid_root, n);
1805 }
1806 
1807 static int __insert_free_nid(struct f2fs_sb_info *sbi,
1808 			struct free_nid *i, enum nid_state state)
1809 {
1810 	struct f2fs_nm_info *nm_i = NM_I(sbi);
1811 
1812 	int err = radix_tree_insert(&nm_i->free_nid_root, i->nid, i);
1813 	if (err)
1814 		return err;
1815 
1816 	f2fs_bug_on(sbi, state != i->state);
1817 	nm_i->nid_cnt[state]++;
1818 	if (state == FREE_NID)
1819 		list_add_tail(&i->list, &nm_i->free_nid_list);
1820 	return 0;
1821 }
1822 
1823 static void __remove_free_nid(struct f2fs_sb_info *sbi,
1824 			struct free_nid *i, enum nid_state state)
1825 {
1826 	struct f2fs_nm_info *nm_i = NM_I(sbi);
1827 
1828 	f2fs_bug_on(sbi, state != i->state);
1829 	nm_i->nid_cnt[state]--;
1830 	if (state == FREE_NID)
1831 		list_del(&i->list);
1832 	radix_tree_delete(&nm_i->free_nid_root, i->nid);
1833 }
1834 
1835 static void __move_free_nid(struct f2fs_sb_info *sbi, struct free_nid *i,
1836 			enum nid_state org_state, enum nid_state dst_state)
1837 {
1838 	struct f2fs_nm_info *nm_i = NM_I(sbi);
1839 
1840 	f2fs_bug_on(sbi, org_state != i->state);
1841 	i->state = dst_state;
1842 	nm_i->nid_cnt[org_state]--;
1843 	nm_i->nid_cnt[dst_state]++;
1844 
1845 	switch (dst_state) {
1846 	case PREALLOC_NID:
1847 		list_del(&i->list);
1848 		break;
1849 	case FREE_NID:
1850 		list_add_tail(&i->list, &nm_i->free_nid_list);
1851 		break;
1852 	default:
1853 		BUG_ON(1);
1854 	}
1855 }
1856 
1857 static void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid,
1858 							bool set, bool build)
1859 {
1860 	struct f2fs_nm_info *nm_i = NM_I(sbi);
1861 	unsigned int nat_ofs = NAT_BLOCK_OFFSET(nid);
1862 	unsigned int nid_ofs = nid - START_NID(nid);
1863 
1864 	if (!test_bit_le(nat_ofs, nm_i->nat_block_bitmap))
1865 		return;
1866 
1867 	if (set) {
1868 		if (test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]))
1869 			return;
1870 		__set_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
1871 		nm_i->free_nid_count[nat_ofs]++;
1872 	} else {
1873 		if (!test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]))
1874 			return;
1875 		__clear_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
1876 		if (!build)
1877 			nm_i->free_nid_count[nat_ofs]--;
1878 	}
1879 }
1880 
1881 /* return if the nid is recognized as free */
1882 static bool add_free_nid(struct f2fs_sb_info *sbi,
1883 				nid_t nid, bool build, bool update)
1884 {
1885 	struct f2fs_nm_info *nm_i = NM_I(sbi);
1886 	struct free_nid *i, *e;
1887 	struct nat_entry *ne;
1888 	int err = -EINVAL;
1889 	bool ret = false;
1890 
1891 	/* 0 nid should not be used */
1892 	if (unlikely(nid == 0))
1893 		return false;
1894 
1895 	i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS);
1896 	i->nid = nid;
1897 	i->state = FREE_NID;
1898 
1899 	radix_tree_preload(GFP_NOFS | __GFP_NOFAIL);
1900 
1901 	spin_lock(&nm_i->nid_list_lock);
1902 
1903 	if (build) {
1904 		/*
1905 		 *   Thread A             Thread B
1906 		 *  - f2fs_create
1907 		 *   - f2fs_new_inode
1908 		 *    - f2fs_alloc_nid
1909 		 *     - __insert_nid_to_list(PREALLOC_NID)
1910 		 *                     - f2fs_balance_fs_bg
1911 		 *                      - f2fs_build_free_nids
1912 		 *                       - __f2fs_build_free_nids
1913 		 *                        - scan_nat_page
1914 		 *                         - add_free_nid
1915 		 *                          - __lookup_nat_cache
1916 		 *  - f2fs_add_link
1917 		 *   - f2fs_init_inode_metadata
1918 		 *    - f2fs_new_inode_page
1919 		 *     - f2fs_new_node_page
1920 		 *      - set_node_addr
1921 		 *  - f2fs_alloc_nid_done
1922 		 *   - __remove_nid_from_list(PREALLOC_NID)
1923 		 *                         - __insert_nid_to_list(FREE_NID)
1924 		 */
1925 		ne = __lookup_nat_cache(nm_i, nid);
1926 		if (ne && (!get_nat_flag(ne, IS_CHECKPOINTED) ||
1927 				nat_get_blkaddr(ne) != NULL_ADDR))
1928 			goto err_out;
1929 
1930 		e = __lookup_free_nid_list(nm_i, nid);
1931 		if (e) {
1932 			if (e->state == FREE_NID)
1933 				ret = true;
1934 			goto err_out;
1935 		}
1936 	}
1937 	ret = true;
1938 	err = __insert_free_nid(sbi, i, FREE_NID);
1939 err_out:
1940 	if (update) {
1941 		update_free_nid_bitmap(sbi, nid, ret, build);
1942 		if (!build)
1943 			nm_i->available_nids++;
1944 	}
1945 	spin_unlock(&nm_i->nid_list_lock);
1946 	radix_tree_preload_end();
1947 
1948 	if (err)
1949 		kmem_cache_free(free_nid_slab, i);
1950 	return ret;
1951 }
1952 
1953 static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid)
1954 {
1955 	struct f2fs_nm_info *nm_i = NM_I(sbi);
1956 	struct free_nid *i;
1957 	bool need_free = false;
1958 
1959 	spin_lock(&nm_i->nid_list_lock);
1960 	i = __lookup_free_nid_list(nm_i, nid);
1961 	if (i && i->state == FREE_NID) {
1962 		__remove_free_nid(sbi, i, FREE_NID);
1963 		need_free = true;
1964 	}
1965 	spin_unlock(&nm_i->nid_list_lock);
1966 
1967 	if (need_free)
1968 		kmem_cache_free(free_nid_slab, i);
1969 }
1970 
1971 static void scan_nat_page(struct f2fs_sb_info *sbi,
1972 			struct page *nat_page, nid_t start_nid)
1973 {
1974 	struct f2fs_nm_info *nm_i = NM_I(sbi);
1975 	struct f2fs_nat_block *nat_blk = page_address(nat_page);
1976 	block_t blk_addr;
1977 	unsigned int nat_ofs = NAT_BLOCK_OFFSET(start_nid);
1978 	int i;
1979 
1980 	__set_bit_le(nat_ofs, nm_i->nat_block_bitmap);
1981 
1982 	i = start_nid % NAT_ENTRY_PER_BLOCK;
1983 
1984 	for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) {
1985 		if (unlikely(start_nid >= nm_i->max_nid))
1986 			break;
1987 
1988 		blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
1989 		f2fs_bug_on(sbi, blk_addr == NEW_ADDR);
1990 		if (blk_addr == NULL_ADDR) {
1991 			add_free_nid(sbi, start_nid, true, true);
1992 		} else {
1993 			spin_lock(&NM_I(sbi)->nid_list_lock);
1994 			update_free_nid_bitmap(sbi, start_nid, false, true);
1995 			spin_unlock(&NM_I(sbi)->nid_list_lock);
1996 		}
1997 	}
1998 }
1999 
2000 static void scan_curseg_cache(struct f2fs_sb_info *sbi)
2001 {
2002 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2003 	struct f2fs_journal *journal = curseg->journal;
2004 	int i;
2005 
2006 	down_read(&curseg->journal_rwsem);
2007 	for (i = 0; i < nats_in_cursum(journal); i++) {
2008 		block_t addr;
2009 		nid_t nid;
2010 
2011 		addr = le32_to_cpu(nat_in_journal(journal, i).block_addr);
2012 		nid = le32_to_cpu(nid_in_journal(journal, i));
2013 		if (addr == NULL_ADDR)
2014 			add_free_nid(sbi, nid, true, false);
2015 		else
2016 			remove_free_nid(sbi, nid);
2017 	}
2018 	up_read(&curseg->journal_rwsem);
2019 }
2020 
2021 static void scan_free_nid_bits(struct f2fs_sb_info *sbi)
2022 {
2023 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2024 	unsigned int i, idx;
2025 	nid_t nid;
2026 
2027 	down_read(&nm_i->nat_tree_lock);
2028 
2029 	for (i = 0; i < nm_i->nat_blocks; i++) {
2030 		if (!test_bit_le(i, nm_i->nat_block_bitmap))
2031 			continue;
2032 		if (!nm_i->free_nid_count[i])
2033 			continue;
2034 		for (idx = 0; idx < NAT_ENTRY_PER_BLOCK; idx++) {
2035 			idx = find_next_bit_le(nm_i->free_nid_bitmap[i],
2036 						NAT_ENTRY_PER_BLOCK, idx);
2037 			if (idx >= NAT_ENTRY_PER_BLOCK)
2038 				break;
2039 
2040 			nid = i * NAT_ENTRY_PER_BLOCK + idx;
2041 			add_free_nid(sbi, nid, true, false);
2042 
2043 			if (nm_i->nid_cnt[FREE_NID] >= MAX_FREE_NIDS)
2044 				goto out;
2045 		}
2046 	}
2047 out:
2048 	scan_curseg_cache(sbi);
2049 
2050 	up_read(&nm_i->nat_tree_lock);
2051 }
2052 
2053 static void __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
2054 						bool sync, bool mount)
2055 {
2056 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2057 	int i = 0;
2058 	nid_t nid = nm_i->next_scan_nid;
2059 
2060 	if (unlikely(nid >= nm_i->max_nid))
2061 		nid = 0;
2062 
2063 	/* Enough entries */
2064 	if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
2065 		return;
2066 
2067 	if (!sync && !f2fs_available_free_memory(sbi, FREE_NIDS))
2068 		return;
2069 
2070 	if (!mount) {
2071 		/* try to find free nids in free_nid_bitmap */
2072 		scan_free_nid_bits(sbi);
2073 
2074 		if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
2075 			return;
2076 	}
2077 
2078 	/* readahead nat pages to be scanned */
2079 	f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES,
2080 							META_NAT, true);
2081 
2082 	down_read(&nm_i->nat_tree_lock);
2083 
2084 	while (1) {
2085 		if (!test_bit_le(NAT_BLOCK_OFFSET(nid),
2086 						nm_i->nat_block_bitmap)) {
2087 			struct page *page = get_current_nat_page(sbi, nid);
2088 
2089 			scan_nat_page(sbi, page, nid);
2090 			f2fs_put_page(page, 1);
2091 		}
2092 
2093 		nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK));
2094 		if (unlikely(nid >= nm_i->max_nid))
2095 			nid = 0;
2096 
2097 		if (++i >= FREE_NID_PAGES)
2098 			break;
2099 	}
2100 
2101 	/* go to the next free nat pages to find free nids abundantly */
2102 	nm_i->next_scan_nid = nid;
2103 
2104 	/* find free nids from current sum_pages */
2105 	scan_curseg_cache(sbi);
2106 
2107 	up_read(&nm_i->nat_tree_lock);
2108 
2109 	f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid),
2110 					nm_i->ra_nid_pages, META_NAT, false);
2111 }
2112 
2113 void f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
2114 {
2115 	mutex_lock(&NM_I(sbi)->build_lock);
2116 	__f2fs_build_free_nids(sbi, sync, mount);
2117 	mutex_unlock(&NM_I(sbi)->build_lock);
2118 }
2119 
2120 /*
2121  * If this function returns success, caller can obtain a new nid
2122  * from second parameter of this function.
2123  * The returned nid could be used ino as well as nid when inode is created.
2124  */
2125 bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
2126 {
2127 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2128 	struct free_nid *i = NULL;
2129 retry:
2130 #ifdef CONFIG_F2FS_FAULT_INJECTION
2131 	if (time_to_inject(sbi, FAULT_ALLOC_NID)) {
2132 		f2fs_show_injection_info(FAULT_ALLOC_NID);
2133 		return false;
2134 	}
2135 #endif
2136 	spin_lock(&nm_i->nid_list_lock);
2137 
2138 	if (unlikely(nm_i->available_nids == 0)) {
2139 		spin_unlock(&nm_i->nid_list_lock);
2140 		return false;
2141 	}
2142 
2143 	/* We should not use stale free nids created by f2fs_build_free_nids */
2144 	if (nm_i->nid_cnt[FREE_NID] && !on_f2fs_build_free_nids(nm_i)) {
2145 		f2fs_bug_on(sbi, list_empty(&nm_i->free_nid_list));
2146 		i = list_first_entry(&nm_i->free_nid_list,
2147 					struct free_nid, list);
2148 		*nid = i->nid;
2149 
2150 		__move_free_nid(sbi, i, FREE_NID, PREALLOC_NID);
2151 		nm_i->available_nids--;
2152 
2153 		update_free_nid_bitmap(sbi, *nid, false, false);
2154 
2155 		spin_unlock(&nm_i->nid_list_lock);
2156 		return true;
2157 	}
2158 	spin_unlock(&nm_i->nid_list_lock);
2159 
2160 	/* Let's scan nat pages and its caches to get free nids */
2161 	f2fs_build_free_nids(sbi, true, false);
2162 	goto retry;
2163 }
2164 
2165 /*
2166  * f2fs_alloc_nid() should be called prior to this function.
2167  */
2168 void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid)
2169 {
2170 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2171 	struct free_nid *i;
2172 
2173 	spin_lock(&nm_i->nid_list_lock);
2174 	i = __lookup_free_nid_list(nm_i, nid);
2175 	f2fs_bug_on(sbi, !i);
2176 	__remove_free_nid(sbi, i, PREALLOC_NID);
2177 	spin_unlock(&nm_i->nid_list_lock);
2178 
2179 	kmem_cache_free(free_nid_slab, i);
2180 }
2181 
2182 /*
2183  * f2fs_alloc_nid() should be called prior to this function.
2184  */
2185 void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
2186 {
2187 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2188 	struct free_nid *i;
2189 	bool need_free = false;
2190 
2191 	if (!nid)
2192 		return;
2193 
2194 	spin_lock(&nm_i->nid_list_lock);
2195 	i = __lookup_free_nid_list(nm_i, nid);
2196 	f2fs_bug_on(sbi, !i);
2197 
2198 	if (!f2fs_available_free_memory(sbi, FREE_NIDS)) {
2199 		__remove_free_nid(sbi, i, PREALLOC_NID);
2200 		need_free = true;
2201 	} else {
2202 		__move_free_nid(sbi, i, PREALLOC_NID, FREE_NID);
2203 	}
2204 
2205 	nm_i->available_nids++;
2206 
2207 	update_free_nid_bitmap(sbi, nid, true, false);
2208 
2209 	spin_unlock(&nm_i->nid_list_lock);
2210 
2211 	if (need_free)
2212 		kmem_cache_free(free_nid_slab, i);
2213 }
2214 
2215 int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink)
2216 {
2217 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2218 	struct free_nid *i, *next;
2219 	int nr = nr_shrink;
2220 
2221 	if (nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS)
2222 		return 0;
2223 
2224 	if (!mutex_trylock(&nm_i->build_lock))
2225 		return 0;
2226 
2227 	spin_lock(&nm_i->nid_list_lock);
2228 	list_for_each_entry_safe(i, next, &nm_i->free_nid_list, list) {
2229 		if (nr_shrink <= 0 ||
2230 				nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS)
2231 			break;
2232 
2233 		__remove_free_nid(sbi, i, FREE_NID);
2234 		kmem_cache_free(free_nid_slab, i);
2235 		nr_shrink--;
2236 	}
2237 	spin_unlock(&nm_i->nid_list_lock);
2238 	mutex_unlock(&nm_i->build_lock);
2239 
2240 	return nr - nr_shrink;
2241 }
2242 
2243 void f2fs_recover_inline_xattr(struct inode *inode, struct page *page)
2244 {
2245 	void *src_addr, *dst_addr;
2246 	size_t inline_size;
2247 	struct page *ipage;
2248 	struct f2fs_inode *ri;
2249 
2250 	ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
2251 	f2fs_bug_on(F2FS_I_SB(inode), IS_ERR(ipage));
2252 
2253 	ri = F2FS_INODE(page);
2254 	if (ri->i_inline & F2FS_INLINE_XATTR) {
2255 		set_inode_flag(inode, FI_INLINE_XATTR);
2256 	} else {
2257 		clear_inode_flag(inode, FI_INLINE_XATTR);
2258 		goto update_inode;
2259 	}
2260 
2261 	dst_addr = inline_xattr_addr(inode, ipage);
2262 	src_addr = inline_xattr_addr(inode, page);
2263 	inline_size = inline_xattr_size(inode);
2264 
2265 	f2fs_wait_on_page_writeback(ipage, NODE, true);
2266 	memcpy(dst_addr, src_addr, inline_size);
2267 update_inode:
2268 	f2fs_update_inode(inode, ipage);
2269 	f2fs_put_page(ipage, 1);
2270 }
2271 
2272 int f2fs_recover_xattr_data(struct inode *inode, struct page *page)
2273 {
2274 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2275 	nid_t prev_xnid = F2FS_I(inode)->i_xattr_nid;
2276 	nid_t new_xnid;
2277 	struct dnode_of_data dn;
2278 	struct node_info ni;
2279 	struct page *xpage;
2280 
2281 	if (!prev_xnid)
2282 		goto recover_xnid;
2283 
2284 	/* 1: invalidate the previous xattr nid */
2285 	f2fs_get_node_info(sbi, prev_xnid, &ni);
2286 	f2fs_invalidate_blocks(sbi, ni.blk_addr);
2287 	dec_valid_node_count(sbi, inode, false);
2288 	set_node_addr(sbi, &ni, NULL_ADDR, false);
2289 
2290 recover_xnid:
2291 	/* 2: update xattr nid in inode */
2292 	if (!f2fs_alloc_nid(sbi, &new_xnid))
2293 		return -ENOSPC;
2294 
2295 	set_new_dnode(&dn, inode, NULL, NULL, new_xnid);
2296 	xpage = f2fs_new_node_page(&dn, XATTR_NODE_OFFSET);
2297 	if (IS_ERR(xpage)) {
2298 		f2fs_alloc_nid_failed(sbi, new_xnid);
2299 		return PTR_ERR(xpage);
2300 	}
2301 
2302 	f2fs_alloc_nid_done(sbi, new_xnid);
2303 	f2fs_update_inode_page(inode);
2304 
2305 	/* 3: update and set xattr node page dirty */
2306 	memcpy(F2FS_NODE(xpage), F2FS_NODE(page), VALID_XATTR_BLOCK_SIZE);
2307 
2308 	set_page_dirty(xpage);
2309 	f2fs_put_page(xpage, 1);
2310 
2311 	return 0;
2312 }
2313 
2314 int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
2315 {
2316 	struct f2fs_inode *src, *dst;
2317 	nid_t ino = ino_of_node(page);
2318 	struct node_info old_ni, new_ni;
2319 	struct page *ipage;
2320 
2321 	f2fs_get_node_info(sbi, ino, &old_ni);
2322 
2323 	if (unlikely(old_ni.blk_addr != NULL_ADDR))
2324 		return -EINVAL;
2325 retry:
2326 	ipage = f2fs_grab_cache_page(NODE_MAPPING(sbi), ino, false);
2327 	if (!ipage) {
2328 		congestion_wait(BLK_RW_ASYNC, HZ/50);
2329 		goto retry;
2330 	}
2331 
2332 	/* Should not use this inode from free nid list */
2333 	remove_free_nid(sbi, ino);
2334 
2335 	if (!PageUptodate(ipage))
2336 		SetPageUptodate(ipage);
2337 	fill_node_footer(ipage, ino, ino, 0, true);
2338 	set_cold_node(page, false);
2339 
2340 	src = F2FS_INODE(page);
2341 	dst = F2FS_INODE(ipage);
2342 
2343 	memcpy(dst, src, (unsigned long)&src->i_ext - (unsigned long)src);
2344 	dst->i_size = 0;
2345 	dst->i_blocks = cpu_to_le64(1);
2346 	dst->i_links = cpu_to_le32(1);
2347 	dst->i_xattr_nid = 0;
2348 	dst->i_inline = src->i_inline & (F2FS_INLINE_XATTR | F2FS_EXTRA_ATTR);
2349 	if (dst->i_inline & F2FS_EXTRA_ATTR) {
2350 		dst->i_extra_isize = src->i_extra_isize;
2351 
2352 		if (f2fs_sb_has_flexible_inline_xattr(sbi->sb) &&
2353 			F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
2354 							i_inline_xattr_size))
2355 			dst->i_inline_xattr_size = src->i_inline_xattr_size;
2356 
2357 		if (f2fs_sb_has_project_quota(sbi->sb) &&
2358 			F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
2359 								i_projid))
2360 			dst->i_projid = src->i_projid;
2361 	}
2362 
2363 	new_ni = old_ni;
2364 	new_ni.ino = ino;
2365 
2366 	if (unlikely(inc_valid_node_count(sbi, NULL, true)))
2367 		WARN_ON(1);
2368 	set_node_addr(sbi, &new_ni, NEW_ADDR, false);
2369 	inc_valid_inode_count(sbi);
2370 	set_page_dirty(ipage);
2371 	f2fs_put_page(ipage, 1);
2372 	return 0;
2373 }
2374 
2375 void f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
2376 			unsigned int segno, struct f2fs_summary_block *sum)
2377 {
2378 	struct f2fs_node *rn;
2379 	struct f2fs_summary *sum_entry;
2380 	block_t addr;
2381 	int i, idx, last_offset, nrpages;
2382 
2383 	/* scan the node segment */
2384 	last_offset = sbi->blocks_per_seg;
2385 	addr = START_BLOCK(sbi, segno);
2386 	sum_entry = &sum->entries[0];
2387 
2388 	for (i = 0; i < last_offset; i += nrpages, addr += nrpages) {
2389 		nrpages = min(last_offset - i, BIO_MAX_PAGES);
2390 
2391 		/* readahead node pages */
2392 		f2fs_ra_meta_pages(sbi, addr, nrpages, META_POR, true);
2393 
2394 		for (idx = addr; idx < addr + nrpages; idx++) {
2395 			struct page *page = f2fs_get_tmp_page(sbi, idx);
2396 
2397 			rn = F2FS_NODE(page);
2398 			sum_entry->nid = rn->footer.nid;
2399 			sum_entry->version = 0;
2400 			sum_entry->ofs_in_node = 0;
2401 			sum_entry++;
2402 			f2fs_put_page(page, 1);
2403 		}
2404 
2405 		invalidate_mapping_pages(META_MAPPING(sbi), addr,
2406 							addr + nrpages);
2407 	}
2408 }
2409 
2410 static void remove_nats_in_journal(struct f2fs_sb_info *sbi)
2411 {
2412 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2413 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2414 	struct f2fs_journal *journal = curseg->journal;
2415 	int i;
2416 
2417 	down_write(&curseg->journal_rwsem);
2418 	for (i = 0; i < nats_in_cursum(journal); i++) {
2419 		struct nat_entry *ne;
2420 		struct f2fs_nat_entry raw_ne;
2421 		nid_t nid = le32_to_cpu(nid_in_journal(journal, i));
2422 
2423 		raw_ne = nat_in_journal(journal, i);
2424 
2425 		ne = __lookup_nat_cache(nm_i, nid);
2426 		if (!ne) {
2427 			ne = __alloc_nat_entry(nid, true);
2428 			__init_nat_entry(nm_i, ne, &raw_ne, true);
2429 		}
2430 
2431 		/*
2432 		 * if a free nat in journal has not been used after last
2433 		 * checkpoint, we should remove it from available nids,
2434 		 * since later we will add it again.
2435 		 */
2436 		if (!get_nat_flag(ne, IS_DIRTY) &&
2437 				le32_to_cpu(raw_ne.block_addr) == NULL_ADDR) {
2438 			spin_lock(&nm_i->nid_list_lock);
2439 			nm_i->available_nids--;
2440 			spin_unlock(&nm_i->nid_list_lock);
2441 		}
2442 
2443 		__set_nat_cache_dirty(nm_i, ne);
2444 	}
2445 	update_nats_in_cursum(journal, -i);
2446 	up_write(&curseg->journal_rwsem);
2447 }
2448 
2449 static void __adjust_nat_entry_set(struct nat_entry_set *nes,
2450 						struct list_head *head, int max)
2451 {
2452 	struct nat_entry_set *cur;
2453 
2454 	if (nes->entry_cnt >= max)
2455 		goto add_out;
2456 
2457 	list_for_each_entry(cur, head, set_list) {
2458 		if (cur->entry_cnt >= nes->entry_cnt) {
2459 			list_add(&nes->set_list, cur->set_list.prev);
2460 			return;
2461 		}
2462 	}
2463 add_out:
2464 	list_add_tail(&nes->set_list, head);
2465 }
2466 
2467 static void __update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid,
2468 						struct page *page)
2469 {
2470 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2471 	unsigned int nat_index = start_nid / NAT_ENTRY_PER_BLOCK;
2472 	struct f2fs_nat_block *nat_blk = page_address(page);
2473 	int valid = 0;
2474 	int i = 0;
2475 
2476 	if (!enabled_nat_bits(sbi, NULL))
2477 		return;
2478 
2479 	if (nat_index == 0) {
2480 		valid = 1;
2481 		i = 1;
2482 	}
2483 	for (; i < NAT_ENTRY_PER_BLOCK; i++) {
2484 		if (nat_blk->entries[i].block_addr != NULL_ADDR)
2485 			valid++;
2486 	}
2487 	if (valid == 0) {
2488 		__set_bit_le(nat_index, nm_i->empty_nat_bits);
2489 		__clear_bit_le(nat_index, nm_i->full_nat_bits);
2490 		return;
2491 	}
2492 
2493 	__clear_bit_le(nat_index, nm_i->empty_nat_bits);
2494 	if (valid == NAT_ENTRY_PER_BLOCK)
2495 		__set_bit_le(nat_index, nm_i->full_nat_bits);
2496 	else
2497 		__clear_bit_le(nat_index, nm_i->full_nat_bits);
2498 }
2499 
2500 static void __flush_nat_entry_set(struct f2fs_sb_info *sbi,
2501 		struct nat_entry_set *set, struct cp_control *cpc)
2502 {
2503 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2504 	struct f2fs_journal *journal = curseg->journal;
2505 	nid_t start_nid = set->set * NAT_ENTRY_PER_BLOCK;
2506 	bool to_journal = true;
2507 	struct f2fs_nat_block *nat_blk;
2508 	struct nat_entry *ne, *cur;
2509 	struct page *page = NULL;
2510 
2511 	/*
2512 	 * there are two steps to flush nat entries:
2513 	 * #1, flush nat entries to journal in current hot data summary block.
2514 	 * #2, flush nat entries to nat page.
2515 	 */
2516 	if (enabled_nat_bits(sbi, cpc) ||
2517 		!__has_cursum_space(journal, set->entry_cnt, NAT_JOURNAL))
2518 		to_journal = false;
2519 
2520 	if (to_journal) {
2521 		down_write(&curseg->journal_rwsem);
2522 	} else {
2523 		page = get_next_nat_page(sbi, start_nid);
2524 		nat_blk = page_address(page);
2525 		f2fs_bug_on(sbi, !nat_blk);
2526 	}
2527 
2528 	/* flush dirty nats in nat entry set */
2529 	list_for_each_entry_safe(ne, cur, &set->entry_list, list) {
2530 		struct f2fs_nat_entry *raw_ne;
2531 		nid_t nid = nat_get_nid(ne);
2532 		int offset;
2533 
2534 		f2fs_bug_on(sbi, nat_get_blkaddr(ne) == NEW_ADDR);
2535 
2536 		if (to_journal) {
2537 			offset = f2fs_lookup_journal_in_cursum(journal,
2538 							NAT_JOURNAL, nid, 1);
2539 			f2fs_bug_on(sbi, offset < 0);
2540 			raw_ne = &nat_in_journal(journal, offset);
2541 			nid_in_journal(journal, offset) = cpu_to_le32(nid);
2542 		} else {
2543 			raw_ne = &nat_blk->entries[nid - start_nid];
2544 		}
2545 		raw_nat_from_node_info(raw_ne, &ne->ni);
2546 		nat_reset_flag(ne);
2547 		__clear_nat_cache_dirty(NM_I(sbi), set, ne);
2548 		if (nat_get_blkaddr(ne) == NULL_ADDR) {
2549 			add_free_nid(sbi, nid, false, true);
2550 		} else {
2551 			spin_lock(&NM_I(sbi)->nid_list_lock);
2552 			update_free_nid_bitmap(sbi, nid, false, false);
2553 			spin_unlock(&NM_I(sbi)->nid_list_lock);
2554 		}
2555 	}
2556 
2557 	if (to_journal) {
2558 		up_write(&curseg->journal_rwsem);
2559 	} else {
2560 		__update_nat_bits(sbi, start_nid, page);
2561 		f2fs_put_page(page, 1);
2562 	}
2563 
2564 	/* Allow dirty nats by node block allocation in write_begin */
2565 	if (!set->entry_cnt) {
2566 		radix_tree_delete(&NM_I(sbi)->nat_set_root, set->set);
2567 		kmem_cache_free(nat_entry_set_slab, set);
2568 	}
2569 }
2570 
2571 /*
2572  * This function is called during the checkpointing process.
2573  */
2574 void f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
2575 {
2576 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2577 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2578 	struct f2fs_journal *journal = curseg->journal;
2579 	struct nat_entry_set *setvec[SETVEC_SIZE];
2580 	struct nat_entry_set *set, *tmp;
2581 	unsigned int found;
2582 	nid_t set_idx = 0;
2583 	LIST_HEAD(sets);
2584 
2585 	if (!nm_i->dirty_nat_cnt)
2586 		return;
2587 
2588 	down_write(&nm_i->nat_tree_lock);
2589 
2590 	/*
2591 	 * if there are no enough space in journal to store dirty nat
2592 	 * entries, remove all entries from journal and merge them
2593 	 * into nat entry set.
2594 	 */
2595 	if (enabled_nat_bits(sbi, cpc) ||
2596 		!__has_cursum_space(journal, nm_i->dirty_nat_cnt, NAT_JOURNAL))
2597 		remove_nats_in_journal(sbi);
2598 
2599 	while ((found = __gang_lookup_nat_set(nm_i,
2600 					set_idx, SETVEC_SIZE, setvec))) {
2601 		unsigned idx;
2602 		set_idx = setvec[found - 1]->set + 1;
2603 		for (idx = 0; idx < found; idx++)
2604 			__adjust_nat_entry_set(setvec[idx], &sets,
2605 						MAX_NAT_JENTRIES(journal));
2606 	}
2607 
2608 	/* flush dirty nats in nat entry set */
2609 	list_for_each_entry_safe(set, tmp, &sets, set_list)
2610 		__flush_nat_entry_set(sbi, set, cpc);
2611 
2612 	up_write(&nm_i->nat_tree_lock);
2613 	/* Allow dirty nats by node block allocation in write_begin */
2614 }
2615 
2616 static int __get_nat_bitmaps(struct f2fs_sb_info *sbi)
2617 {
2618 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
2619 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2620 	unsigned int nat_bits_bytes = nm_i->nat_blocks / BITS_PER_BYTE;
2621 	unsigned int i;
2622 	__u64 cp_ver = cur_cp_version(ckpt);
2623 	block_t nat_bits_addr;
2624 
2625 	if (!enabled_nat_bits(sbi, NULL))
2626 		return 0;
2627 
2628 	nm_i->nat_bits_blocks = F2FS_BLK_ALIGN((nat_bits_bytes << 1) + 8);
2629 	nm_i->nat_bits = f2fs_kzalloc(sbi,
2630 			nm_i->nat_bits_blocks << F2FS_BLKSIZE_BITS, GFP_KERNEL);
2631 	if (!nm_i->nat_bits)
2632 		return -ENOMEM;
2633 
2634 	nat_bits_addr = __start_cp_addr(sbi) + sbi->blocks_per_seg -
2635 						nm_i->nat_bits_blocks;
2636 	for (i = 0; i < nm_i->nat_bits_blocks; i++) {
2637 		struct page *page = f2fs_get_meta_page(sbi, nat_bits_addr++);
2638 
2639 		memcpy(nm_i->nat_bits + (i << F2FS_BLKSIZE_BITS),
2640 					page_address(page), F2FS_BLKSIZE);
2641 		f2fs_put_page(page, 1);
2642 	}
2643 
2644 	cp_ver |= (cur_cp_crc(ckpt) << 32);
2645 	if (cpu_to_le64(cp_ver) != *(__le64 *)nm_i->nat_bits) {
2646 		disable_nat_bits(sbi, true);
2647 		return 0;
2648 	}
2649 
2650 	nm_i->full_nat_bits = nm_i->nat_bits + 8;
2651 	nm_i->empty_nat_bits = nm_i->full_nat_bits + nat_bits_bytes;
2652 
2653 	f2fs_msg(sbi->sb, KERN_NOTICE, "Found nat_bits in checkpoint");
2654 	return 0;
2655 }
2656 
2657 static inline void load_free_nid_bitmap(struct f2fs_sb_info *sbi)
2658 {
2659 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2660 	unsigned int i = 0;
2661 	nid_t nid, last_nid;
2662 
2663 	if (!enabled_nat_bits(sbi, NULL))
2664 		return;
2665 
2666 	for (i = 0; i < nm_i->nat_blocks; i++) {
2667 		i = find_next_bit_le(nm_i->empty_nat_bits, nm_i->nat_blocks, i);
2668 		if (i >= nm_i->nat_blocks)
2669 			break;
2670 
2671 		__set_bit_le(i, nm_i->nat_block_bitmap);
2672 
2673 		nid = i * NAT_ENTRY_PER_BLOCK;
2674 		last_nid = nid + NAT_ENTRY_PER_BLOCK;
2675 
2676 		spin_lock(&NM_I(sbi)->nid_list_lock);
2677 		for (; nid < last_nid; nid++)
2678 			update_free_nid_bitmap(sbi, nid, true, true);
2679 		spin_unlock(&NM_I(sbi)->nid_list_lock);
2680 	}
2681 
2682 	for (i = 0; i < nm_i->nat_blocks; i++) {
2683 		i = find_next_bit_le(nm_i->full_nat_bits, nm_i->nat_blocks, i);
2684 		if (i >= nm_i->nat_blocks)
2685 			break;
2686 
2687 		__set_bit_le(i, nm_i->nat_block_bitmap);
2688 	}
2689 }
2690 
2691 static int init_node_manager(struct f2fs_sb_info *sbi)
2692 {
2693 	struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi);
2694 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2695 	unsigned char *version_bitmap;
2696 	unsigned int nat_segs;
2697 	int err;
2698 
2699 	nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr);
2700 
2701 	/* segment_count_nat includes pair segment so divide to 2. */
2702 	nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1;
2703 	nm_i->nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg);
2704 	nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nm_i->nat_blocks;
2705 
2706 	/* not used nids: 0, node, meta, (and root counted as valid node) */
2707 	nm_i->available_nids = nm_i->max_nid - sbi->total_valid_node_count -
2708 				sbi->nquota_files - F2FS_RESERVED_NODE_NUM;
2709 	nm_i->nid_cnt[FREE_NID] = 0;
2710 	nm_i->nid_cnt[PREALLOC_NID] = 0;
2711 	nm_i->nat_cnt = 0;
2712 	nm_i->ram_thresh = DEF_RAM_THRESHOLD;
2713 	nm_i->ra_nid_pages = DEF_RA_NID_PAGES;
2714 	nm_i->dirty_nats_ratio = DEF_DIRTY_NAT_RATIO_THRESHOLD;
2715 
2716 	INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC);
2717 	INIT_LIST_HEAD(&nm_i->free_nid_list);
2718 	INIT_RADIX_TREE(&nm_i->nat_root, GFP_NOIO);
2719 	INIT_RADIX_TREE(&nm_i->nat_set_root, GFP_NOIO);
2720 	INIT_LIST_HEAD(&nm_i->nat_entries);
2721 
2722 	mutex_init(&nm_i->build_lock);
2723 	spin_lock_init(&nm_i->nid_list_lock);
2724 	init_rwsem(&nm_i->nat_tree_lock);
2725 
2726 	nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
2727 	nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
2728 	version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP);
2729 	if (!version_bitmap)
2730 		return -EFAULT;
2731 
2732 	nm_i->nat_bitmap = kmemdup(version_bitmap, nm_i->bitmap_size,
2733 					GFP_KERNEL);
2734 	if (!nm_i->nat_bitmap)
2735 		return -ENOMEM;
2736 
2737 	err = __get_nat_bitmaps(sbi);
2738 	if (err)
2739 		return err;
2740 
2741 #ifdef CONFIG_F2FS_CHECK_FS
2742 	nm_i->nat_bitmap_mir = kmemdup(version_bitmap, nm_i->bitmap_size,
2743 					GFP_KERNEL);
2744 	if (!nm_i->nat_bitmap_mir)
2745 		return -ENOMEM;
2746 #endif
2747 
2748 	return 0;
2749 }
2750 
2751 static int init_free_nid_cache(struct f2fs_sb_info *sbi)
2752 {
2753 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2754 	int i;
2755 
2756 	nm_i->free_nid_bitmap =
2757 		f2fs_kzalloc(sbi, array_size(sizeof(unsigned char *),
2758 					     nm_i->nat_blocks),
2759 			     GFP_KERNEL);
2760 	if (!nm_i->free_nid_bitmap)
2761 		return -ENOMEM;
2762 
2763 	for (i = 0; i < nm_i->nat_blocks; i++) {
2764 		nm_i->free_nid_bitmap[i] = f2fs_kvzalloc(sbi,
2765 				NAT_ENTRY_BITMAP_SIZE_ALIGNED, GFP_KERNEL);
2766 		if (!nm_i->free_nid_bitmap)
2767 			return -ENOMEM;
2768 	}
2769 
2770 	nm_i->nat_block_bitmap = f2fs_kvzalloc(sbi, nm_i->nat_blocks / 8,
2771 								GFP_KERNEL);
2772 	if (!nm_i->nat_block_bitmap)
2773 		return -ENOMEM;
2774 
2775 	nm_i->free_nid_count =
2776 		f2fs_kvzalloc(sbi, array_size(sizeof(unsigned short),
2777 					      nm_i->nat_blocks),
2778 			      GFP_KERNEL);
2779 	if (!nm_i->free_nid_count)
2780 		return -ENOMEM;
2781 	return 0;
2782 }
2783 
2784 int f2fs_build_node_manager(struct f2fs_sb_info *sbi)
2785 {
2786 	int err;
2787 
2788 	sbi->nm_info = f2fs_kzalloc(sbi, sizeof(struct f2fs_nm_info),
2789 							GFP_KERNEL);
2790 	if (!sbi->nm_info)
2791 		return -ENOMEM;
2792 
2793 	err = init_node_manager(sbi);
2794 	if (err)
2795 		return err;
2796 
2797 	err = init_free_nid_cache(sbi);
2798 	if (err)
2799 		return err;
2800 
2801 	/* load free nid status from nat_bits table */
2802 	load_free_nid_bitmap(sbi);
2803 
2804 	f2fs_build_free_nids(sbi, true, true);
2805 	return 0;
2806 }
2807 
2808 void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi)
2809 {
2810 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2811 	struct free_nid *i, *next_i;
2812 	struct nat_entry *natvec[NATVEC_SIZE];
2813 	struct nat_entry_set *setvec[SETVEC_SIZE];
2814 	nid_t nid = 0;
2815 	unsigned int found;
2816 
2817 	if (!nm_i)
2818 		return;
2819 
2820 	/* destroy free nid list */
2821 	spin_lock(&nm_i->nid_list_lock);
2822 	list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) {
2823 		__remove_free_nid(sbi, i, FREE_NID);
2824 		spin_unlock(&nm_i->nid_list_lock);
2825 		kmem_cache_free(free_nid_slab, i);
2826 		spin_lock(&nm_i->nid_list_lock);
2827 	}
2828 	f2fs_bug_on(sbi, nm_i->nid_cnt[FREE_NID]);
2829 	f2fs_bug_on(sbi, nm_i->nid_cnt[PREALLOC_NID]);
2830 	f2fs_bug_on(sbi, !list_empty(&nm_i->free_nid_list));
2831 	spin_unlock(&nm_i->nid_list_lock);
2832 
2833 	/* destroy nat cache */
2834 	down_write(&nm_i->nat_tree_lock);
2835 	while ((found = __gang_lookup_nat_cache(nm_i,
2836 					nid, NATVEC_SIZE, natvec))) {
2837 		unsigned idx;
2838 
2839 		nid = nat_get_nid(natvec[found - 1]) + 1;
2840 		for (idx = 0; idx < found; idx++)
2841 			__del_from_nat_cache(nm_i, natvec[idx]);
2842 	}
2843 	f2fs_bug_on(sbi, nm_i->nat_cnt);
2844 
2845 	/* destroy nat set cache */
2846 	nid = 0;
2847 	while ((found = __gang_lookup_nat_set(nm_i,
2848 					nid, SETVEC_SIZE, setvec))) {
2849 		unsigned idx;
2850 
2851 		nid = setvec[found - 1]->set + 1;
2852 		for (idx = 0; idx < found; idx++) {
2853 			/* entry_cnt is not zero, when cp_error was occurred */
2854 			f2fs_bug_on(sbi, !list_empty(&setvec[idx]->entry_list));
2855 			radix_tree_delete(&nm_i->nat_set_root, setvec[idx]->set);
2856 			kmem_cache_free(nat_entry_set_slab, setvec[idx]);
2857 		}
2858 	}
2859 	up_write(&nm_i->nat_tree_lock);
2860 
2861 	kvfree(nm_i->nat_block_bitmap);
2862 	if (nm_i->free_nid_bitmap) {
2863 		int i;
2864 
2865 		for (i = 0; i < nm_i->nat_blocks; i++)
2866 			kvfree(nm_i->free_nid_bitmap[i]);
2867 		kfree(nm_i->free_nid_bitmap);
2868 	}
2869 	kvfree(nm_i->free_nid_count);
2870 
2871 	kfree(nm_i->nat_bitmap);
2872 	kfree(nm_i->nat_bits);
2873 #ifdef CONFIG_F2FS_CHECK_FS
2874 	kfree(nm_i->nat_bitmap_mir);
2875 #endif
2876 	sbi->nm_info = NULL;
2877 	kfree(nm_i);
2878 }
2879 
2880 int __init f2fs_create_node_manager_caches(void)
2881 {
2882 	nat_entry_slab = f2fs_kmem_cache_create("nat_entry",
2883 			sizeof(struct nat_entry));
2884 	if (!nat_entry_slab)
2885 		goto fail;
2886 
2887 	free_nid_slab = f2fs_kmem_cache_create("free_nid",
2888 			sizeof(struct free_nid));
2889 	if (!free_nid_slab)
2890 		goto destroy_nat_entry;
2891 
2892 	nat_entry_set_slab = f2fs_kmem_cache_create("nat_entry_set",
2893 			sizeof(struct nat_entry_set));
2894 	if (!nat_entry_set_slab)
2895 		goto destroy_free_nid;
2896 	return 0;
2897 
2898 destroy_free_nid:
2899 	kmem_cache_destroy(free_nid_slab);
2900 destroy_nat_entry:
2901 	kmem_cache_destroy(nat_entry_slab);
2902 fail:
2903 	return -ENOMEM;
2904 }
2905 
2906 void f2fs_destroy_node_manager_caches(void)
2907 {
2908 	kmem_cache_destroy(nat_entry_set_slab);
2909 	kmem_cache_destroy(free_nid_slab);
2910 	kmem_cache_destroy(nat_entry_slab);
2911 }
2912