xref: /openbmc/linux/fs/f2fs/node.c (revision 83d3c4f2)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * fs/f2fs/node.c
4  *
5  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6  *             http://www.samsung.com/
7  */
8 #include <linux/fs.h>
9 #include <linux/f2fs_fs.h>
10 #include <linux/mpage.h>
11 #include <linux/backing-dev.h>
12 #include <linux/blkdev.h>
13 #include <linux/pagevec.h>
14 #include <linux/swap.h>
15 
16 #include "f2fs.h"
17 #include "node.h"
18 #include "segment.h"
19 #include "xattr.h"
20 #include "iostat.h"
21 #include <trace/events/f2fs.h>
22 
23 #define on_f2fs_build_free_nids(nmi) mutex_is_locked(&(nm_i)->build_lock)
24 
25 static struct kmem_cache *nat_entry_slab;
26 static struct kmem_cache *free_nid_slab;
27 static struct kmem_cache *nat_entry_set_slab;
28 static struct kmem_cache *fsync_node_entry_slab;
29 
30 /*
31  * Check whether the given nid is within node id range.
32  */
33 int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid)
34 {
35 	if (unlikely(nid < F2FS_ROOT_INO(sbi) || nid >= NM_I(sbi)->max_nid)) {
36 		set_sbi_flag(sbi, SBI_NEED_FSCK);
37 		f2fs_warn(sbi, "%s: out-of-range nid=%x, run fsck to fix.",
38 			  __func__, nid);
39 		return -EFSCORRUPTED;
40 	}
41 	return 0;
42 }
43 
44 bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type)
45 {
46 	struct f2fs_nm_info *nm_i = NM_I(sbi);
47 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
48 	struct sysinfo val;
49 	unsigned long avail_ram;
50 	unsigned long mem_size = 0;
51 	bool res = false;
52 
53 	if (!nm_i)
54 		return true;
55 
56 	si_meminfo(&val);
57 
58 	/* only uses low memory */
59 	avail_ram = val.totalram - val.totalhigh;
60 
61 	/*
62 	 * give 25%, 25%, 50%, 50%, 50% memory for each components respectively
63 	 */
64 	if (type == FREE_NIDS) {
65 		mem_size = (nm_i->nid_cnt[FREE_NID] *
66 				sizeof(struct free_nid)) >> PAGE_SHIFT;
67 		res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
68 	} else if (type == NAT_ENTRIES) {
69 		mem_size = (nm_i->nat_cnt[TOTAL_NAT] *
70 				sizeof(struct nat_entry)) >> PAGE_SHIFT;
71 		res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
72 		if (excess_cached_nats(sbi))
73 			res = false;
74 	} else if (type == DIRTY_DENTS) {
75 		if (sbi->sb->s_bdi->wb.dirty_exceeded)
76 			return false;
77 		mem_size = get_pages(sbi, F2FS_DIRTY_DENTS);
78 		res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
79 	} else if (type == INO_ENTRIES) {
80 		int i;
81 
82 		for (i = 0; i < MAX_INO_ENTRY; i++)
83 			mem_size += sbi->im[i].ino_num *
84 						sizeof(struct ino_entry);
85 		mem_size >>= PAGE_SHIFT;
86 		res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
87 	} else if (type == EXTENT_CACHE) {
88 		mem_size = (atomic_read(&sbi->total_ext_tree) *
89 				sizeof(struct extent_tree) +
90 				atomic_read(&sbi->total_ext_node) *
91 				sizeof(struct extent_node)) >> PAGE_SHIFT;
92 		res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
93 	} else if (type == INMEM_PAGES) {
94 		/* it allows 20% / total_ram for inmemory pages */
95 		mem_size = get_pages(sbi, F2FS_INMEM_PAGES);
96 		res = mem_size < (val.totalram / 5);
97 	} else if (type == DISCARD_CACHE) {
98 		mem_size = (atomic_read(&dcc->discard_cmd_cnt) *
99 				sizeof(struct discard_cmd)) >> PAGE_SHIFT;
100 		res = mem_size < (avail_ram * nm_i->ram_thresh / 100);
101 	} else if (type == COMPRESS_PAGE) {
102 #ifdef CONFIG_F2FS_FS_COMPRESSION
103 		unsigned long free_ram = val.freeram;
104 
105 		/*
106 		 * free memory is lower than watermark or cached page count
107 		 * exceed threshold, deny caching compress page.
108 		 */
109 		res = (free_ram > avail_ram * sbi->compress_watermark / 100) &&
110 			(COMPRESS_MAPPING(sbi)->nrpages <
111 			 free_ram * sbi->compress_percent / 100);
112 #else
113 		res = false;
114 #endif
115 	} else {
116 		if (!sbi->sb->s_bdi->wb.dirty_exceeded)
117 			return true;
118 	}
119 	return res;
120 }
121 
122 static void clear_node_page_dirty(struct page *page)
123 {
124 	if (PageDirty(page)) {
125 		f2fs_clear_page_cache_dirty_tag(page);
126 		clear_page_dirty_for_io(page);
127 		dec_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES);
128 	}
129 	ClearPageUptodate(page);
130 }
131 
132 static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
133 {
134 	return f2fs_get_meta_page_retry(sbi, current_nat_addr(sbi, nid));
135 }
136 
137 static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
138 {
139 	struct page *src_page;
140 	struct page *dst_page;
141 	pgoff_t dst_off;
142 	void *src_addr;
143 	void *dst_addr;
144 	struct f2fs_nm_info *nm_i = NM_I(sbi);
145 
146 	dst_off = next_nat_addr(sbi, current_nat_addr(sbi, nid));
147 
148 	/* get current nat block page with lock */
149 	src_page = get_current_nat_page(sbi, nid);
150 	if (IS_ERR(src_page))
151 		return src_page;
152 	dst_page = f2fs_grab_meta_page(sbi, dst_off);
153 	f2fs_bug_on(sbi, PageDirty(src_page));
154 
155 	src_addr = page_address(src_page);
156 	dst_addr = page_address(dst_page);
157 	memcpy(dst_addr, src_addr, PAGE_SIZE);
158 	set_page_dirty(dst_page);
159 	f2fs_put_page(src_page, 1);
160 
161 	set_to_next_nat(nm_i, nid);
162 
163 	return dst_page;
164 }
165 
166 static struct nat_entry *__alloc_nat_entry(struct f2fs_sb_info *sbi,
167 						nid_t nid, bool no_fail)
168 {
169 	struct nat_entry *new;
170 
171 	new = f2fs_kmem_cache_alloc(nat_entry_slab,
172 					GFP_F2FS_ZERO, no_fail, sbi);
173 	if (new) {
174 		nat_set_nid(new, nid);
175 		nat_reset_flag(new);
176 	}
177 	return new;
178 }
179 
180 static void __free_nat_entry(struct nat_entry *e)
181 {
182 	kmem_cache_free(nat_entry_slab, e);
183 }
184 
185 /* must be locked by nat_tree_lock */
186 static struct nat_entry *__init_nat_entry(struct f2fs_nm_info *nm_i,
187 	struct nat_entry *ne, struct f2fs_nat_entry *raw_ne, bool no_fail)
188 {
189 	if (no_fail)
190 		f2fs_radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne);
191 	else if (radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne))
192 		return NULL;
193 
194 	if (raw_ne)
195 		node_info_from_raw_nat(&ne->ni, raw_ne);
196 
197 	spin_lock(&nm_i->nat_list_lock);
198 	list_add_tail(&ne->list, &nm_i->nat_entries);
199 	spin_unlock(&nm_i->nat_list_lock);
200 
201 	nm_i->nat_cnt[TOTAL_NAT]++;
202 	nm_i->nat_cnt[RECLAIMABLE_NAT]++;
203 	return ne;
204 }
205 
206 static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n)
207 {
208 	struct nat_entry *ne;
209 
210 	ne = radix_tree_lookup(&nm_i->nat_root, n);
211 
212 	/* for recent accessed nat entry, move it to tail of lru list */
213 	if (ne && !get_nat_flag(ne, IS_DIRTY)) {
214 		spin_lock(&nm_i->nat_list_lock);
215 		if (!list_empty(&ne->list))
216 			list_move_tail(&ne->list, &nm_i->nat_entries);
217 		spin_unlock(&nm_i->nat_list_lock);
218 	}
219 
220 	return ne;
221 }
222 
223 static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i,
224 		nid_t start, unsigned int nr, struct nat_entry **ep)
225 {
226 	return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr);
227 }
228 
229 static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e)
230 {
231 	radix_tree_delete(&nm_i->nat_root, nat_get_nid(e));
232 	nm_i->nat_cnt[TOTAL_NAT]--;
233 	nm_i->nat_cnt[RECLAIMABLE_NAT]--;
234 	__free_nat_entry(e);
235 }
236 
237 static struct nat_entry_set *__grab_nat_entry_set(struct f2fs_nm_info *nm_i,
238 							struct nat_entry *ne)
239 {
240 	nid_t set = NAT_BLOCK_OFFSET(ne->ni.nid);
241 	struct nat_entry_set *head;
242 
243 	head = radix_tree_lookup(&nm_i->nat_set_root, set);
244 	if (!head) {
245 		head = f2fs_kmem_cache_alloc(nat_entry_set_slab,
246 						GFP_NOFS, true, NULL);
247 
248 		INIT_LIST_HEAD(&head->entry_list);
249 		INIT_LIST_HEAD(&head->set_list);
250 		head->set = set;
251 		head->entry_cnt = 0;
252 		f2fs_radix_tree_insert(&nm_i->nat_set_root, set, head);
253 	}
254 	return head;
255 }
256 
257 static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i,
258 						struct nat_entry *ne)
259 {
260 	struct nat_entry_set *head;
261 	bool new_ne = nat_get_blkaddr(ne) == NEW_ADDR;
262 
263 	if (!new_ne)
264 		head = __grab_nat_entry_set(nm_i, ne);
265 
266 	/*
267 	 * update entry_cnt in below condition:
268 	 * 1. update NEW_ADDR to valid block address;
269 	 * 2. update old block address to new one;
270 	 */
271 	if (!new_ne && (get_nat_flag(ne, IS_PREALLOC) ||
272 				!get_nat_flag(ne, IS_DIRTY)))
273 		head->entry_cnt++;
274 
275 	set_nat_flag(ne, IS_PREALLOC, new_ne);
276 
277 	if (get_nat_flag(ne, IS_DIRTY))
278 		goto refresh_list;
279 
280 	nm_i->nat_cnt[DIRTY_NAT]++;
281 	nm_i->nat_cnt[RECLAIMABLE_NAT]--;
282 	set_nat_flag(ne, IS_DIRTY, true);
283 refresh_list:
284 	spin_lock(&nm_i->nat_list_lock);
285 	if (new_ne)
286 		list_del_init(&ne->list);
287 	else
288 		list_move_tail(&ne->list, &head->entry_list);
289 	spin_unlock(&nm_i->nat_list_lock);
290 }
291 
292 static void __clear_nat_cache_dirty(struct f2fs_nm_info *nm_i,
293 		struct nat_entry_set *set, struct nat_entry *ne)
294 {
295 	spin_lock(&nm_i->nat_list_lock);
296 	list_move_tail(&ne->list, &nm_i->nat_entries);
297 	spin_unlock(&nm_i->nat_list_lock);
298 
299 	set_nat_flag(ne, IS_DIRTY, false);
300 	set->entry_cnt--;
301 	nm_i->nat_cnt[DIRTY_NAT]--;
302 	nm_i->nat_cnt[RECLAIMABLE_NAT]++;
303 }
304 
305 static unsigned int __gang_lookup_nat_set(struct f2fs_nm_info *nm_i,
306 		nid_t start, unsigned int nr, struct nat_entry_set **ep)
307 {
308 	return radix_tree_gang_lookup(&nm_i->nat_set_root, (void **)ep,
309 							start, nr);
310 }
311 
312 bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page)
313 {
314 	return NODE_MAPPING(sbi) == page->mapping &&
315 			IS_DNODE(page) && is_cold_node(page);
316 }
317 
318 void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi)
319 {
320 	spin_lock_init(&sbi->fsync_node_lock);
321 	INIT_LIST_HEAD(&sbi->fsync_node_list);
322 	sbi->fsync_seg_id = 0;
323 	sbi->fsync_node_num = 0;
324 }
325 
326 static unsigned int f2fs_add_fsync_node_entry(struct f2fs_sb_info *sbi,
327 							struct page *page)
328 {
329 	struct fsync_node_entry *fn;
330 	unsigned long flags;
331 	unsigned int seq_id;
332 
333 	fn = f2fs_kmem_cache_alloc(fsync_node_entry_slab,
334 					GFP_NOFS, true, NULL);
335 
336 	get_page(page);
337 	fn->page = page;
338 	INIT_LIST_HEAD(&fn->list);
339 
340 	spin_lock_irqsave(&sbi->fsync_node_lock, flags);
341 	list_add_tail(&fn->list, &sbi->fsync_node_list);
342 	fn->seq_id = sbi->fsync_seg_id++;
343 	seq_id = fn->seq_id;
344 	sbi->fsync_node_num++;
345 	spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
346 
347 	return seq_id;
348 }
349 
350 void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page)
351 {
352 	struct fsync_node_entry *fn;
353 	unsigned long flags;
354 
355 	spin_lock_irqsave(&sbi->fsync_node_lock, flags);
356 	list_for_each_entry(fn, &sbi->fsync_node_list, list) {
357 		if (fn->page == page) {
358 			list_del(&fn->list);
359 			sbi->fsync_node_num--;
360 			spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
361 			kmem_cache_free(fsync_node_entry_slab, fn);
362 			put_page(page);
363 			return;
364 		}
365 	}
366 	spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
367 	f2fs_bug_on(sbi, 1);
368 }
369 
370 void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi)
371 {
372 	unsigned long flags;
373 
374 	spin_lock_irqsave(&sbi->fsync_node_lock, flags);
375 	sbi->fsync_seg_id = 0;
376 	spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
377 }
378 
379 int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid)
380 {
381 	struct f2fs_nm_info *nm_i = NM_I(sbi);
382 	struct nat_entry *e;
383 	bool need = false;
384 
385 	down_read(&nm_i->nat_tree_lock);
386 	e = __lookup_nat_cache(nm_i, nid);
387 	if (e) {
388 		if (!get_nat_flag(e, IS_CHECKPOINTED) &&
389 				!get_nat_flag(e, HAS_FSYNCED_INODE))
390 			need = true;
391 	}
392 	up_read(&nm_i->nat_tree_lock);
393 	return need;
394 }
395 
396 bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid)
397 {
398 	struct f2fs_nm_info *nm_i = NM_I(sbi);
399 	struct nat_entry *e;
400 	bool is_cp = true;
401 
402 	down_read(&nm_i->nat_tree_lock);
403 	e = __lookup_nat_cache(nm_i, nid);
404 	if (e && !get_nat_flag(e, IS_CHECKPOINTED))
405 		is_cp = false;
406 	up_read(&nm_i->nat_tree_lock);
407 	return is_cp;
408 }
409 
410 bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino)
411 {
412 	struct f2fs_nm_info *nm_i = NM_I(sbi);
413 	struct nat_entry *e;
414 	bool need_update = true;
415 
416 	down_read(&nm_i->nat_tree_lock);
417 	e = __lookup_nat_cache(nm_i, ino);
418 	if (e && get_nat_flag(e, HAS_LAST_FSYNC) &&
419 			(get_nat_flag(e, IS_CHECKPOINTED) ||
420 			 get_nat_flag(e, HAS_FSYNCED_INODE)))
421 		need_update = false;
422 	up_read(&nm_i->nat_tree_lock);
423 	return need_update;
424 }
425 
426 /* must be locked by nat_tree_lock */
427 static void cache_nat_entry(struct f2fs_sb_info *sbi, nid_t nid,
428 						struct f2fs_nat_entry *ne)
429 {
430 	struct f2fs_nm_info *nm_i = NM_I(sbi);
431 	struct nat_entry *new, *e;
432 
433 	new = __alloc_nat_entry(sbi, nid, false);
434 	if (!new)
435 		return;
436 
437 	down_write(&nm_i->nat_tree_lock);
438 	e = __lookup_nat_cache(nm_i, nid);
439 	if (!e)
440 		e = __init_nat_entry(nm_i, new, ne, false);
441 	else
442 		f2fs_bug_on(sbi, nat_get_ino(e) != le32_to_cpu(ne->ino) ||
443 				nat_get_blkaddr(e) !=
444 					le32_to_cpu(ne->block_addr) ||
445 				nat_get_version(e) != ne->version);
446 	up_write(&nm_i->nat_tree_lock);
447 	if (e != new)
448 		__free_nat_entry(new);
449 }
450 
451 static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
452 			block_t new_blkaddr, bool fsync_done)
453 {
454 	struct f2fs_nm_info *nm_i = NM_I(sbi);
455 	struct nat_entry *e;
456 	struct nat_entry *new = __alloc_nat_entry(sbi, ni->nid, true);
457 
458 	down_write(&nm_i->nat_tree_lock);
459 	e = __lookup_nat_cache(nm_i, ni->nid);
460 	if (!e) {
461 		e = __init_nat_entry(nm_i, new, NULL, true);
462 		copy_node_info(&e->ni, ni);
463 		f2fs_bug_on(sbi, ni->blk_addr == NEW_ADDR);
464 	} else if (new_blkaddr == NEW_ADDR) {
465 		/*
466 		 * when nid is reallocated,
467 		 * previous nat entry can be remained in nat cache.
468 		 * So, reinitialize it with new information.
469 		 */
470 		copy_node_info(&e->ni, ni);
471 		f2fs_bug_on(sbi, ni->blk_addr != NULL_ADDR);
472 	}
473 	/* let's free early to reduce memory consumption */
474 	if (e != new)
475 		__free_nat_entry(new);
476 
477 	/* sanity check */
478 	f2fs_bug_on(sbi, nat_get_blkaddr(e) != ni->blk_addr);
479 	f2fs_bug_on(sbi, nat_get_blkaddr(e) == NULL_ADDR &&
480 			new_blkaddr == NULL_ADDR);
481 	f2fs_bug_on(sbi, nat_get_blkaddr(e) == NEW_ADDR &&
482 			new_blkaddr == NEW_ADDR);
483 	f2fs_bug_on(sbi, __is_valid_data_blkaddr(nat_get_blkaddr(e)) &&
484 			new_blkaddr == NEW_ADDR);
485 
486 	/* increment version no as node is removed */
487 	if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) {
488 		unsigned char version = nat_get_version(e);
489 
490 		nat_set_version(e, inc_node_version(version));
491 	}
492 
493 	/* change address */
494 	nat_set_blkaddr(e, new_blkaddr);
495 	if (!__is_valid_data_blkaddr(new_blkaddr))
496 		set_nat_flag(e, IS_CHECKPOINTED, false);
497 	__set_nat_cache_dirty(nm_i, e);
498 
499 	/* update fsync_mark if its inode nat entry is still alive */
500 	if (ni->nid != ni->ino)
501 		e = __lookup_nat_cache(nm_i, ni->ino);
502 	if (e) {
503 		if (fsync_done && ni->nid == ni->ino)
504 			set_nat_flag(e, HAS_FSYNCED_INODE, true);
505 		set_nat_flag(e, HAS_LAST_FSYNC, fsync_done);
506 	}
507 	up_write(&nm_i->nat_tree_lock);
508 }
509 
510 int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
511 {
512 	struct f2fs_nm_info *nm_i = NM_I(sbi);
513 	int nr = nr_shrink;
514 
515 	if (!down_write_trylock(&nm_i->nat_tree_lock))
516 		return 0;
517 
518 	spin_lock(&nm_i->nat_list_lock);
519 	while (nr_shrink) {
520 		struct nat_entry *ne;
521 
522 		if (list_empty(&nm_i->nat_entries))
523 			break;
524 
525 		ne = list_first_entry(&nm_i->nat_entries,
526 					struct nat_entry, list);
527 		list_del(&ne->list);
528 		spin_unlock(&nm_i->nat_list_lock);
529 
530 		__del_from_nat_cache(nm_i, ne);
531 		nr_shrink--;
532 
533 		spin_lock(&nm_i->nat_list_lock);
534 	}
535 	spin_unlock(&nm_i->nat_list_lock);
536 
537 	up_write(&nm_i->nat_tree_lock);
538 	return nr - nr_shrink;
539 }
540 
541 int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
542 						struct node_info *ni)
543 {
544 	struct f2fs_nm_info *nm_i = NM_I(sbi);
545 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
546 	struct f2fs_journal *journal = curseg->journal;
547 	nid_t start_nid = START_NID(nid);
548 	struct f2fs_nat_block *nat_blk;
549 	struct page *page = NULL;
550 	struct f2fs_nat_entry ne;
551 	struct nat_entry *e;
552 	pgoff_t index;
553 	block_t blkaddr;
554 	int i;
555 
556 	ni->nid = nid;
557 retry:
558 	/* Check nat cache */
559 	down_read(&nm_i->nat_tree_lock);
560 	e = __lookup_nat_cache(nm_i, nid);
561 	if (e) {
562 		ni->ino = nat_get_ino(e);
563 		ni->blk_addr = nat_get_blkaddr(e);
564 		ni->version = nat_get_version(e);
565 		up_read(&nm_i->nat_tree_lock);
566 		return 0;
567 	}
568 
569 	/*
570 	 * Check current segment summary by trying to grab journal_rwsem first.
571 	 * This sem is on the critical path on the checkpoint requiring the above
572 	 * nat_tree_lock. Therefore, we should retry, if we failed to grab here
573 	 * while not bothering checkpoint.
574 	 */
575 	if (!rwsem_is_locked(&sbi->cp_global_sem)) {
576 		down_read(&curseg->journal_rwsem);
577 	} else if (!down_read_trylock(&curseg->journal_rwsem)) {
578 		up_read(&nm_i->nat_tree_lock);
579 		goto retry;
580 	}
581 
582 	i = f2fs_lookup_journal_in_cursum(journal, NAT_JOURNAL, nid, 0);
583 	if (i >= 0) {
584 		ne = nat_in_journal(journal, i);
585 		node_info_from_raw_nat(ni, &ne);
586 	}
587 	up_read(&curseg->journal_rwsem);
588 	if (i >= 0) {
589 		up_read(&nm_i->nat_tree_lock);
590 		goto cache;
591 	}
592 
593 	/* Fill node_info from nat page */
594 	index = current_nat_addr(sbi, nid);
595 	up_read(&nm_i->nat_tree_lock);
596 
597 	page = f2fs_get_meta_page(sbi, index);
598 	if (IS_ERR(page))
599 		return PTR_ERR(page);
600 
601 	nat_blk = (struct f2fs_nat_block *)page_address(page);
602 	ne = nat_blk->entries[nid - start_nid];
603 	node_info_from_raw_nat(ni, &ne);
604 	f2fs_put_page(page, 1);
605 cache:
606 	blkaddr = le32_to_cpu(ne.block_addr);
607 	if (__is_valid_data_blkaddr(blkaddr) &&
608 		!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE))
609 		return -EFAULT;
610 
611 	/* cache nat entry */
612 	cache_nat_entry(sbi, nid, &ne);
613 	return 0;
614 }
615 
616 /*
617  * readahead MAX_RA_NODE number of node pages.
618  */
619 static void f2fs_ra_node_pages(struct page *parent, int start, int n)
620 {
621 	struct f2fs_sb_info *sbi = F2FS_P_SB(parent);
622 	struct blk_plug plug;
623 	int i, end;
624 	nid_t nid;
625 
626 	blk_start_plug(&plug);
627 
628 	/* Then, try readahead for siblings of the desired node */
629 	end = start + n;
630 	end = min(end, NIDS_PER_BLOCK);
631 	for (i = start; i < end; i++) {
632 		nid = get_nid(parent, i, false);
633 		f2fs_ra_node_page(sbi, nid);
634 	}
635 
636 	blk_finish_plug(&plug);
637 }
638 
639 pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs)
640 {
641 	const long direct_index = ADDRS_PER_INODE(dn->inode);
642 	const long direct_blks = ADDRS_PER_BLOCK(dn->inode);
643 	const long indirect_blks = ADDRS_PER_BLOCK(dn->inode) * NIDS_PER_BLOCK;
644 	unsigned int skipped_unit = ADDRS_PER_BLOCK(dn->inode);
645 	int cur_level = dn->cur_level;
646 	int max_level = dn->max_level;
647 	pgoff_t base = 0;
648 
649 	if (!dn->max_level)
650 		return pgofs + 1;
651 
652 	while (max_level-- > cur_level)
653 		skipped_unit *= NIDS_PER_BLOCK;
654 
655 	switch (dn->max_level) {
656 	case 3:
657 		base += 2 * indirect_blks;
658 		fallthrough;
659 	case 2:
660 		base += 2 * direct_blks;
661 		fallthrough;
662 	case 1:
663 		base += direct_index;
664 		break;
665 	default:
666 		f2fs_bug_on(F2FS_I_SB(dn->inode), 1);
667 	}
668 
669 	return ((pgofs - base) / skipped_unit + 1) * skipped_unit + base;
670 }
671 
672 /*
673  * The maximum depth is four.
674  * Offset[0] will have raw inode offset.
675  */
676 static int get_node_path(struct inode *inode, long block,
677 				int offset[4], unsigned int noffset[4])
678 {
679 	const long direct_index = ADDRS_PER_INODE(inode);
680 	const long direct_blks = ADDRS_PER_BLOCK(inode);
681 	const long dptrs_per_blk = NIDS_PER_BLOCK;
682 	const long indirect_blks = ADDRS_PER_BLOCK(inode) * NIDS_PER_BLOCK;
683 	const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK;
684 	int n = 0;
685 	int level = 0;
686 
687 	noffset[0] = 0;
688 
689 	if (block < direct_index) {
690 		offset[n] = block;
691 		goto got;
692 	}
693 	block -= direct_index;
694 	if (block < direct_blks) {
695 		offset[n++] = NODE_DIR1_BLOCK;
696 		noffset[n] = 1;
697 		offset[n] = block;
698 		level = 1;
699 		goto got;
700 	}
701 	block -= direct_blks;
702 	if (block < direct_blks) {
703 		offset[n++] = NODE_DIR2_BLOCK;
704 		noffset[n] = 2;
705 		offset[n] = block;
706 		level = 1;
707 		goto got;
708 	}
709 	block -= direct_blks;
710 	if (block < indirect_blks) {
711 		offset[n++] = NODE_IND1_BLOCK;
712 		noffset[n] = 3;
713 		offset[n++] = block / direct_blks;
714 		noffset[n] = 4 + offset[n - 1];
715 		offset[n] = block % direct_blks;
716 		level = 2;
717 		goto got;
718 	}
719 	block -= indirect_blks;
720 	if (block < indirect_blks) {
721 		offset[n++] = NODE_IND2_BLOCK;
722 		noffset[n] = 4 + dptrs_per_blk;
723 		offset[n++] = block / direct_blks;
724 		noffset[n] = 5 + dptrs_per_blk + offset[n - 1];
725 		offset[n] = block % direct_blks;
726 		level = 2;
727 		goto got;
728 	}
729 	block -= indirect_blks;
730 	if (block < dindirect_blks) {
731 		offset[n++] = NODE_DIND_BLOCK;
732 		noffset[n] = 5 + (dptrs_per_blk * 2);
733 		offset[n++] = block / indirect_blks;
734 		noffset[n] = 6 + (dptrs_per_blk * 2) +
735 			      offset[n - 1] * (dptrs_per_blk + 1);
736 		offset[n++] = (block / direct_blks) % dptrs_per_blk;
737 		noffset[n] = 7 + (dptrs_per_blk * 2) +
738 			      offset[n - 2] * (dptrs_per_blk + 1) +
739 			      offset[n - 1];
740 		offset[n] = block % direct_blks;
741 		level = 3;
742 		goto got;
743 	} else {
744 		return -E2BIG;
745 	}
746 got:
747 	return level;
748 }
749 
750 /*
751  * Caller should call f2fs_put_dnode(dn).
752  * Also, it should grab and release a rwsem by calling f2fs_lock_op() and
753  * f2fs_unlock_op() only if mode is set with ALLOC_NODE.
754  */
755 int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
756 {
757 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
758 	struct page *npage[4];
759 	struct page *parent = NULL;
760 	int offset[4];
761 	unsigned int noffset[4];
762 	nid_t nids[4];
763 	int level, i = 0;
764 	int err = 0;
765 
766 	level = get_node_path(dn->inode, index, offset, noffset);
767 	if (level < 0)
768 		return level;
769 
770 	nids[0] = dn->inode->i_ino;
771 	npage[0] = dn->inode_page;
772 
773 	if (!npage[0]) {
774 		npage[0] = f2fs_get_node_page(sbi, nids[0]);
775 		if (IS_ERR(npage[0]))
776 			return PTR_ERR(npage[0]);
777 	}
778 
779 	/* if inline_data is set, should not report any block indices */
780 	if (f2fs_has_inline_data(dn->inode) && index) {
781 		err = -ENOENT;
782 		f2fs_put_page(npage[0], 1);
783 		goto release_out;
784 	}
785 
786 	parent = npage[0];
787 	if (level != 0)
788 		nids[1] = get_nid(parent, offset[0], true);
789 	dn->inode_page = npage[0];
790 	dn->inode_page_locked = true;
791 
792 	/* get indirect or direct nodes */
793 	for (i = 1; i <= level; i++) {
794 		bool done = false;
795 
796 		if (!nids[i] && mode == ALLOC_NODE) {
797 			/* alloc new node */
798 			if (!f2fs_alloc_nid(sbi, &(nids[i]))) {
799 				err = -ENOSPC;
800 				goto release_pages;
801 			}
802 
803 			dn->nid = nids[i];
804 			npage[i] = f2fs_new_node_page(dn, noffset[i]);
805 			if (IS_ERR(npage[i])) {
806 				f2fs_alloc_nid_failed(sbi, nids[i]);
807 				err = PTR_ERR(npage[i]);
808 				goto release_pages;
809 			}
810 
811 			set_nid(parent, offset[i - 1], nids[i], i == 1);
812 			f2fs_alloc_nid_done(sbi, nids[i]);
813 			done = true;
814 		} else if (mode == LOOKUP_NODE_RA && i == level && level > 1) {
815 			npage[i] = f2fs_get_node_page_ra(parent, offset[i - 1]);
816 			if (IS_ERR(npage[i])) {
817 				err = PTR_ERR(npage[i]);
818 				goto release_pages;
819 			}
820 			done = true;
821 		}
822 		if (i == 1) {
823 			dn->inode_page_locked = false;
824 			unlock_page(parent);
825 		} else {
826 			f2fs_put_page(parent, 1);
827 		}
828 
829 		if (!done) {
830 			npage[i] = f2fs_get_node_page(sbi, nids[i]);
831 			if (IS_ERR(npage[i])) {
832 				err = PTR_ERR(npage[i]);
833 				f2fs_put_page(npage[0], 0);
834 				goto release_out;
835 			}
836 		}
837 		if (i < level) {
838 			parent = npage[i];
839 			nids[i + 1] = get_nid(parent, offset[i], false);
840 		}
841 	}
842 	dn->nid = nids[level];
843 	dn->ofs_in_node = offset[level];
844 	dn->node_page = npage[level];
845 	dn->data_blkaddr = f2fs_data_blkaddr(dn);
846 
847 	if (is_inode_flag_set(dn->inode, FI_COMPRESSED_FILE) &&
848 					f2fs_sb_has_readonly(sbi)) {
849 		unsigned int c_len = f2fs_cluster_blocks_are_contiguous(dn);
850 		block_t blkaddr;
851 
852 		if (!c_len)
853 			goto out;
854 
855 		blkaddr = f2fs_data_blkaddr(dn);
856 		if (blkaddr == COMPRESS_ADDR)
857 			blkaddr = data_blkaddr(dn->inode, dn->node_page,
858 						dn->ofs_in_node + 1);
859 
860 		f2fs_update_extent_tree_range_compressed(dn->inode,
861 					index, blkaddr,
862 					F2FS_I(dn->inode)->i_cluster_size,
863 					c_len);
864 	}
865 out:
866 	return 0;
867 
868 release_pages:
869 	f2fs_put_page(parent, 1);
870 	if (i > 1)
871 		f2fs_put_page(npage[0], 0);
872 release_out:
873 	dn->inode_page = NULL;
874 	dn->node_page = NULL;
875 	if (err == -ENOENT) {
876 		dn->cur_level = i;
877 		dn->max_level = level;
878 		dn->ofs_in_node = offset[level];
879 	}
880 	return err;
881 }
882 
883 static int truncate_node(struct dnode_of_data *dn)
884 {
885 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
886 	struct node_info ni;
887 	int err;
888 	pgoff_t index;
889 
890 	err = f2fs_get_node_info(sbi, dn->nid, &ni);
891 	if (err)
892 		return err;
893 
894 	/* Deallocate node address */
895 	f2fs_invalidate_blocks(sbi, ni.blk_addr);
896 	dec_valid_node_count(sbi, dn->inode, dn->nid == dn->inode->i_ino);
897 	set_node_addr(sbi, &ni, NULL_ADDR, false);
898 
899 	if (dn->nid == dn->inode->i_ino) {
900 		f2fs_remove_orphan_inode(sbi, dn->nid);
901 		dec_valid_inode_count(sbi);
902 		f2fs_inode_synced(dn->inode);
903 	}
904 
905 	clear_node_page_dirty(dn->node_page);
906 	set_sbi_flag(sbi, SBI_IS_DIRTY);
907 
908 	index = dn->node_page->index;
909 	f2fs_put_page(dn->node_page, 1);
910 
911 	invalidate_mapping_pages(NODE_MAPPING(sbi),
912 			index, index);
913 
914 	dn->node_page = NULL;
915 	trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr);
916 
917 	return 0;
918 }
919 
920 static int truncate_dnode(struct dnode_of_data *dn)
921 {
922 	struct page *page;
923 	int err;
924 
925 	if (dn->nid == 0)
926 		return 1;
927 
928 	/* get direct node */
929 	page = f2fs_get_node_page(F2FS_I_SB(dn->inode), dn->nid);
930 	if (PTR_ERR(page) == -ENOENT)
931 		return 1;
932 	else if (IS_ERR(page))
933 		return PTR_ERR(page);
934 
935 	/* Make dnode_of_data for parameter */
936 	dn->node_page = page;
937 	dn->ofs_in_node = 0;
938 	f2fs_truncate_data_blocks(dn);
939 	err = truncate_node(dn);
940 	if (err)
941 		return err;
942 
943 	return 1;
944 }
945 
946 static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
947 						int ofs, int depth)
948 {
949 	struct dnode_of_data rdn = *dn;
950 	struct page *page;
951 	struct f2fs_node *rn;
952 	nid_t child_nid;
953 	unsigned int child_nofs;
954 	int freed = 0;
955 	int i, ret;
956 
957 	if (dn->nid == 0)
958 		return NIDS_PER_BLOCK + 1;
959 
960 	trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr);
961 
962 	page = f2fs_get_node_page(F2FS_I_SB(dn->inode), dn->nid);
963 	if (IS_ERR(page)) {
964 		trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(page));
965 		return PTR_ERR(page);
966 	}
967 
968 	f2fs_ra_node_pages(page, ofs, NIDS_PER_BLOCK);
969 
970 	rn = F2FS_NODE(page);
971 	if (depth < 3) {
972 		for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) {
973 			child_nid = le32_to_cpu(rn->in.nid[i]);
974 			if (child_nid == 0)
975 				continue;
976 			rdn.nid = child_nid;
977 			ret = truncate_dnode(&rdn);
978 			if (ret < 0)
979 				goto out_err;
980 			if (set_nid(page, i, 0, false))
981 				dn->node_changed = true;
982 		}
983 	} else {
984 		child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1;
985 		for (i = ofs; i < NIDS_PER_BLOCK; i++) {
986 			child_nid = le32_to_cpu(rn->in.nid[i]);
987 			if (child_nid == 0) {
988 				child_nofs += NIDS_PER_BLOCK + 1;
989 				continue;
990 			}
991 			rdn.nid = child_nid;
992 			ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1);
993 			if (ret == (NIDS_PER_BLOCK + 1)) {
994 				if (set_nid(page, i, 0, false))
995 					dn->node_changed = true;
996 				child_nofs += ret;
997 			} else if (ret < 0 && ret != -ENOENT) {
998 				goto out_err;
999 			}
1000 		}
1001 		freed = child_nofs;
1002 	}
1003 
1004 	if (!ofs) {
1005 		/* remove current indirect node */
1006 		dn->node_page = page;
1007 		ret = truncate_node(dn);
1008 		if (ret)
1009 			goto out_err;
1010 		freed++;
1011 	} else {
1012 		f2fs_put_page(page, 1);
1013 	}
1014 	trace_f2fs_truncate_nodes_exit(dn->inode, freed);
1015 	return freed;
1016 
1017 out_err:
1018 	f2fs_put_page(page, 1);
1019 	trace_f2fs_truncate_nodes_exit(dn->inode, ret);
1020 	return ret;
1021 }
1022 
1023 static int truncate_partial_nodes(struct dnode_of_data *dn,
1024 			struct f2fs_inode *ri, int *offset, int depth)
1025 {
1026 	struct page *pages[2];
1027 	nid_t nid[3];
1028 	nid_t child_nid;
1029 	int err = 0;
1030 	int i;
1031 	int idx = depth - 2;
1032 
1033 	nid[0] = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
1034 	if (!nid[0])
1035 		return 0;
1036 
1037 	/* get indirect nodes in the path */
1038 	for (i = 0; i < idx + 1; i++) {
1039 		/* reference count'll be increased */
1040 		pages[i] = f2fs_get_node_page(F2FS_I_SB(dn->inode), nid[i]);
1041 		if (IS_ERR(pages[i])) {
1042 			err = PTR_ERR(pages[i]);
1043 			idx = i - 1;
1044 			goto fail;
1045 		}
1046 		nid[i + 1] = get_nid(pages[i], offset[i + 1], false);
1047 	}
1048 
1049 	f2fs_ra_node_pages(pages[idx], offset[idx + 1], NIDS_PER_BLOCK);
1050 
1051 	/* free direct nodes linked to a partial indirect node */
1052 	for (i = offset[idx + 1]; i < NIDS_PER_BLOCK; i++) {
1053 		child_nid = get_nid(pages[idx], i, false);
1054 		if (!child_nid)
1055 			continue;
1056 		dn->nid = child_nid;
1057 		err = truncate_dnode(dn);
1058 		if (err < 0)
1059 			goto fail;
1060 		if (set_nid(pages[idx], i, 0, false))
1061 			dn->node_changed = true;
1062 	}
1063 
1064 	if (offset[idx + 1] == 0) {
1065 		dn->node_page = pages[idx];
1066 		dn->nid = nid[idx];
1067 		err = truncate_node(dn);
1068 		if (err)
1069 			goto fail;
1070 	} else {
1071 		f2fs_put_page(pages[idx], 1);
1072 	}
1073 	offset[idx]++;
1074 	offset[idx + 1] = 0;
1075 	idx--;
1076 fail:
1077 	for (i = idx; i >= 0; i--)
1078 		f2fs_put_page(pages[i], 1);
1079 
1080 	trace_f2fs_truncate_partial_nodes(dn->inode, nid, depth, err);
1081 
1082 	return err;
1083 }
1084 
1085 /*
1086  * All the block addresses of data and nodes should be nullified.
1087  */
1088 int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from)
1089 {
1090 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1091 	int err = 0, cont = 1;
1092 	int level, offset[4], noffset[4];
1093 	unsigned int nofs = 0;
1094 	struct f2fs_inode *ri;
1095 	struct dnode_of_data dn;
1096 	struct page *page;
1097 
1098 	trace_f2fs_truncate_inode_blocks_enter(inode, from);
1099 
1100 	level = get_node_path(inode, from, offset, noffset);
1101 	if (level < 0) {
1102 		trace_f2fs_truncate_inode_blocks_exit(inode, level);
1103 		return level;
1104 	}
1105 
1106 	page = f2fs_get_node_page(sbi, inode->i_ino);
1107 	if (IS_ERR(page)) {
1108 		trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page));
1109 		return PTR_ERR(page);
1110 	}
1111 
1112 	set_new_dnode(&dn, inode, page, NULL, 0);
1113 	unlock_page(page);
1114 
1115 	ri = F2FS_INODE(page);
1116 	switch (level) {
1117 	case 0:
1118 	case 1:
1119 		nofs = noffset[1];
1120 		break;
1121 	case 2:
1122 		nofs = noffset[1];
1123 		if (!offset[level - 1])
1124 			goto skip_partial;
1125 		err = truncate_partial_nodes(&dn, ri, offset, level);
1126 		if (err < 0 && err != -ENOENT)
1127 			goto fail;
1128 		nofs += 1 + NIDS_PER_BLOCK;
1129 		break;
1130 	case 3:
1131 		nofs = 5 + 2 * NIDS_PER_BLOCK;
1132 		if (!offset[level - 1])
1133 			goto skip_partial;
1134 		err = truncate_partial_nodes(&dn, ri, offset, level);
1135 		if (err < 0 && err != -ENOENT)
1136 			goto fail;
1137 		break;
1138 	default:
1139 		BUG();
1140 	}
1141 
1142 skip_partial:
1143 	while (cont) {
1144 		dn.nid = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
1145 		switch (offset[0]) {
1146 		case NODE_DIR1_BLOCK:
1147 		case NODE_DIR2_BLOCK:
1148 			err = truncate_dnode(&dn);
1149 			break;
1150 
1151 		case NODE_IND1_BLOCK:
1152 		case NODE_IND2_BLOCK:
1153 			err = truncate_nodes(&dn, nofs, offset[1], 2);
1154 			break;
1155 
1156 		case NODE_DIND_BLOCK:
1157 			err = truncate_nodes(&dn, nofs, offset[1], 3);
1158 			cont = 0;
1159 			break;
1160 
1161 		default:
1162 			BUG();
1163 		}
1164 		if (err < 0 && err != -ENOENT)
1165 			goto fail;
1166 		if (offset[1] == 0 &&
1167 				ri->i_nid[offset[0] - NODE_DIR1_BLOCK]) {
1168 			lock_page(page);
1169 			BUG_ON(page->mapping != NODE_MAPPING(sbi));
1170 			f2fs_wait_on_page_writeback(page, NODE, true, true);
1171 			ri->i_nid[offset[0] - NODE_DIR1_BLOCK] = 0;
1172 			set_page_dirty(page);
1173 			unlock_page(page);
1174 		}
1175 		offset[1] = 0;
1176 		offset[0]++;
1177 		nofs += err;
1178 	}
1179 fail:
1180 	f2fs_put_page(page, 0);
1181 	trace_f2fs_truncate_inode_blocks_exit(inode, err);
1182 	return err > 0 ? 0 : err;
1183 }
1184 
1185 /* caller must lock inode page */
1186 int f2fs_truncate_xattr_node(struct inode *inode)
1187 {
1188 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1189 	nid_t nid = F2FS_I(inode)->i_xattr_nid;
1190 	struct dnode_of_data dn;
1191 	struct page *npage;
1192 	int err;
1193 
1194 	if (!nid)
1195 		return 0;
1196 
1197 	npage = f2fs_get_node_page(sbi, nid);
1198 	if (IS_ERR(npage))
1199 		return PTR_ERR(npage);
1200 
1201 	set_new_dnode(&dn, inode, NULL, npage, nid);
1202 	err = truncate_node(&dn);
1203 	if (err) {
1204 		f2fs_put_page(npage, 1);
1205 		return err;
1206 	}
1207 
1208 	f2fs_i_xnid_write(inode, 0);
1209 
1210 	return 0;
1211 }
1212 
1213 /*
1214  * Caller should grab and release a rwsem by calling f2fs_lock_op() and
1215  * f2fs_unlock_op().
1216  */
1217 int f2fs_remove_inode_page(struct inode *inode)
1218 {
1219 	struct dnode_of_data dn;
1220 	int err;
1221 
1222 	set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
1223 	err = f2fs_get_dnode_of_data(&dn, 0, LOOKUP_NODE);
1224 	if (err)
1225 		return err;
1226 
1227 	err = f2fs_truncate_xattr_node(inode);
1228 	if (err) {
1229 		f2fs_put_dnode(&dn);
1230 		return err;
1231 	}
1232 
1233 	/* remove potential inline_data blocks */
1234 	if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1235 				S_ISLNK(inode->i_mode))
1236 		f2fs_truncate_data_blocks_range(&dn, 1);
1237 
1238 	/* 0 is possible, after f2fs_new_inode() has failed */
1239 	if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
1240 		f2fs_put_dnode(&dn);
1241 		return -EIO;
1242 	}
1243 
1244 	if (unlikely(inode->i_blocks != 0 && inode->i_blocks != 8)) {
1245 		f2fs_warn(F2FS_I_SB(inode),
1246 			"f2fs_remove_inode_page: inconsistent i_blocks, ino:%lu, iblocks:%llu",
1247 			inode->i_ino, (unsigned long long)inode->i_blocks);
1248 		set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
1249 	}
1250 
1251 	/* will put inode & node pages */
1252 	err = truncate_node(&dn);
1253 	if (err) {
1254 		f2fs_put_dnode(&dn);
1255 		return err;
1256 	}
1257 	return 0;
1258 }
1259 
1260 struct page *f2fs_new_inode_page(struct inode *inode)
1261 {
1262 	struct dnode_of_data dn;
1263 
1264 	/* allocate inode page for new inode */
1265 	set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
1266 
1267 	/* caller should f2fs_put_page(page, 1); */
1268 	return f2fs_new_node_page(&dn, 0);
1269 }
1270 
1271 struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs)
1272 {
1273 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1274 	struct node_info new_ni;
1275 	struct page *page;
1276 	int err;
1277 
1278 	if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
1279 		return ERR_PTR(-EPERM);
1280 
1281 	page = f2fs_grab_cache_page(NODE_MAPPING(sbi), dn->nid, false);
1282 	if (!page)
1283 		return ERR_PTR(-ENOMEM);
1284 
1285 	if (unlikely((err = inc_valid_node_count(sbi, dn->inode, !ofs))))
1286 		goto fail;
1287 
1288 #ifdef CONFIG_F2FS_CHECK_FS
1289 	err = f2fs_get_node_info(sbi, dn->nid, &new_ni);
1290 	if (err) {
1291 		dec_valid_node_count(sbi, dn->inode, !ofs);
1292 		goto fail;
1293 	}
1294 	f2fs_bug_on(sbi, new_ni.blk_addr != NULL_ADDR);
1295 #endif
1296 	new_ni.nid = dn->nid;
1297 	new_ni.ino = dn->inode->i_ino;
1298 	new_ni.blk_addr = NULL_ADDR;
1299 	new_ni.flag = 0;
1300 	new_ni.version = 0;
1301 	set_node_addr(sbi, &new_ni, NEW_ADDR, false);
1302 
1303 	f2fs_wait_on_page_writeback(page, NODE, true, true);
1304 	fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true);
1305 	set_cold_node(page, S_ISDIR(dn->inode->i_mode));
1306 	if (!PageUptodate(page))
1307 		SetPageUptodate(page);
1308 	if (set_page_dirty(page))
1309 		dn->node_changed = true;
1310 
1311 	if (f2fs_has_xattr_block(ofs))
1312 		f2fs_i_xnid_write(dn->inode, dn->nid);
1313 
1314 	if (ofs == 0)
1315 		inc_valid_inode_count(sbi);
1316 	return page;
1317 
1318 fail:
1319 	clear_node_page_dirty(page);
1320 	f2fs_put_page(page, 1);
1321 	return ERR_PTR(err);
1322 }
1323 
1324 /*
1325  * Caller should do after getting the following values.
1326  * 0: f2fs_put_page(page, 0)
1327  * LOCKED_PAGE or error: f2fs_put_page(page, 1)
1328  */
1329 static int read_node_page(struct page *page, int op_flags)
1330 {
1331 	struct f2fs_sb_info *sbi = F2FS_P_SB(page);
1332 	struct node_info ni;
1333 	struct f2fs_io_info fio = {
1334 		.sbi = sbi,
1335 		.type = NODE,
1336 		.op = REQ_OP_READ,
1337 		.op_flags = op_flags,
1338 		.page = page,
1339 		.encrypted_page = NULL,
1340 	};
1341 	int err;
1342 
1343 	if (PageUptodate(page)) {
1344 		if (!f2fs_inode_chksum_verify(sbi, page)) {
1345 			ClearPageUptodate(page);
1346 			return -EFSBADCRC;
1347 		}
1348 		return LOCKED_PAGE;
1349 	}
1350 
1351 	err = f2fs_get_node_info(sbi, page->index, &ni);
1352 	if (err)
1353 		return err;
1354 
1355 	/* NEW_ADDR can be seen, after cp_error drops some dirty node pages */
1356 	if (unlikely(ni.blk_addr == NULL_ADDR || ni.blk_addr == NEW_ADDR) ||
1357 			is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN)) {
1358 		ClearPageUptodate(page);
1359 		return -ENOENT;
1360 	}
1361 
1362 	fio.new_blkaddr = fio.old_blkaddr = ni.blk_addr;
1363 
1364 	err = f2fs_submit_page_bio(&fio);
1365 
1366 	if (!err)
1367 		f2fs_update_iostat(sbi, FS_NODE_READ_IO, F2FS_BLKSIZE);
1368 
1369 	return err;
1370 }
1371 
1372 /*
1373  * Readahead a node page
1374  */
1375 void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
1376 {
1377 	struct page *apage;
1378 	int err;
1379 
1380 	if (!nid)
1381 		return;
1382 	if (f2fs_check_nid_range(sbi, nid))
1383 		return;
1384 
1385 	apage = xa_load(&NODE_MAPPING(sbi)->i_pages, nid);
1386 	if (apage)
1387 		return;
1388 
1389 	apage = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false);
1390 	if (!apage)
1391 		return;
1392 
1393 	err = read_node_page(apage, REQ_RAHEAD);
1394 	f2fs_put_page(apage, err ? 1 : 0);
1395 }
1396 
1397 static struct page *__get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid,
1398 					struct page *parent, int start)
1399 {
1400 	struct page *page;
1401 	int err;
1402 
1403 	if (!nid)
1404 		return ERR_PTR(-ENOENT);
1405 	if (f2fs_check_nid_range(sbi, nid))
1406 		return ERR_PTR(-EINVAL);
1407 repeat:
1408 	page = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false);
1409 	if (!page)
1410 		return ERR_PTR(-ENOMEM);
1411 
1412 	err = read_node_page(page, 0);
1413 	if (err < 0) {
1414 		f2fs_put_page(page, 1);
1415 		return ERR_PTR(err);
1416 	} else if (err == LOCKED_PAGE) {
1417 		err = 0;
1418 		goto page_hit;
1419 	}
1420 
1421 	if (parent)
1422 		f2fs_ra_node_pages(parent, start + 1, MAX_RA_NODE);
1423 
1424 	lock_page(page);
1425 
1426 	if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1427 		f2fs_put_page(page, 1);
1428 		goto repeat;
1429 	}
1430 
1431 	if (unlikely(!PageUptodate(page))) {
1432 		err = -EIO;
1433 		goto out_err;
1434 	}
1435 
1436 	if (!f2fs_inode_chksum_verify(sbi, page)) {
1437 		err = -EFSBADCRC;
1438 		goto out_err;
1439 	}
1440 page_hit:
1441 	if (unlikely(nid != nid_of_node(page))) {
1442 		f2fs_warn(sbi, "inconsistent node block, nid:%lu, node_footer[nid:%u,ino:%u,ofs:%u,cpver:%llu,blkaddr:%u]",
1443 			  nid, nid_of_node(page), ino_of_node(page),
1444 			  ofs_of_node(page), cpver_of_node(page),
1445 			  next_blkaddr_of_node(page));
1446 		err = -EINVAL;
1447 out_err:
1448 		ClearPageUptodate(page);
1449 		f2fs_put_page(page, 1);
1450 		return ERR_PTR(err);
1451 	}
1452 	return page;
1453 }
1454 
1455 struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
1456 {
1457 	return __get_node_page(sbi, nid, NULL, 0);
1458 }
1459 
1460 struct page *f2fs_get_node_page_ra(struct page *parent, int start)
1461 {
1462 	struct f2fs_sb_info *sbi = F2FS_P_SB(parent);
1463 	nid_t nid = get_nid(parent, start, false);
1464 
1465 	return __get_node_page(sbi, nid, parent, start);
1466 }
1467 
1468 static void flush_inline_data(struct f2fs_sb_info *sbi, nid_t ino)
1469 {
1470 	struct inode *inode;
1471 	struct page *page;
1472 	int ret;
1473 
1474 	/* should flush inline_data before evict_inode */
1475 	inode = ilookup(sbi->sb, ino);
1476 	if (!inode)
1477 		return;
1478 
1479 	page = f2fs_pagecache_get_page(inode->i_mapping, 0,
1480 					FGP_LOCK|FGP_NOWAIT, 0);
1481 	if (!page)
1482 		goto iput_out;
1483 
1484 	if (!PageUptodate(page))
1485 		goto page_out;
1486 
1487 	if (!PageDirty(page))
1488 		goto page_out;
1489 
1490 	if (!clear_page_dirty_for_io(page))
1491 		goto page_out;
1492 
1493 	ret = f2fs_write_inline_data(inode, page);
1494 	inode_dec_dirty_pages(inode);
1495 	f2fs_remove_dirty_inode(inode);
1496 	if (ret)
1497 		set_page_dirty(page);
1498 page_out:
1499 	f2fs_put_page(page, 1);
1500 iput_out:
1501 	iput(inode);
1502 }
1503 
1504 static struct page *last_fsync_dnode(struct f2fs_sb_info *sbi, nid_t ino)
1505 {
1506 	pgoff_t index;
1507 	struct pagevec pvec;
1508 	struct page *last_page = NULL;
1509 	int nr_pages;
1510 
1511 	pagevec_init(&pvec);
1512 	index = 0;
1513 
1514 	while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
1515 				PAGECACHE_TAG_DIRTY))) {
1516 		int i;
1517 
1518 		for (i = 0; i < nr_pages; i++) {
1519 			struct page *page = pvec.pages[i];
1520 
1521 			if (unlikely(f2fs_cp_error(sbi))) {
1522 				f2fs_put_page(last_page, 0);
1523 				pagevec_release(&pvec);
1524 				return ERR_PTR(-EIO);
1525 			}
1526 
1527 			if (!IS_DNODE(page) || !is_cold_node(page))
1528 				continue;
1529 			if (ino_of_node(page) != ino)
1530 				continue;
1531 
1532 			lock_page(page);
1533 
1534 			if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1535 continue_unlock:
1536 				unlock_page(page);
1537 				continue;
1538 			}
1539 			if (ino_of_node(page) != ino)
1540 				goto continue_unlock;
1541 
1542 			if (!PageDirty(page)) {
1543 				/* someone wrote it for us */
1544 				goto continue_unlock;
1545 			}
1546 
1547 			if (last_page)
1548 				f2fs_put_page(last_page, 0);
1549 
1550 			get_page(page);
1551 			last_page = page;
1552 			unlock_page(page);
1553 		}
1554 		pagevec_release(&pvec);
1555 		cond_resched();
1556 	}
1557 	return last_page;
1558 }
1559 
1560 static int __write_node_page(struct page *page, bool atomic, bool *submitted,
1561 				struct writeback_control *wbc, bool do_balance,
1562 				enum iostat_type io_type, unsigned int *seq_id)
1563 {
1564 	struct f2fs_sb_info *sbi = F2FS_P_SB(page);
1565 	nid_t nid;
1566 	struct node_info ni;
1567 	struct f2fs_io_info fio = {
1568 		.sbi = sbi,
1569 		.ino = ino_of_node(page),
1570 		.type = NODE,
1571 		.op = REQ_OP_WRITE,
1572 		.op_flags = wbc_to_write_flags(wbc),
1573 		.page = page,
1574 		.encrypted_page = NULL,
1575 		.submitted = false,
1576 		.io_type = io_type,
1577 		.io_wbc = wbc,
1578 	};
1579 	unsigned int seq;
1580 
1581 	trace_f2fs_writepage(page, NODE);
1582 
1583 	if (unlikely(f2fs_cp_error(sbi))) {
1584 		ClearPageUptodate(page);
1585 		dec_page_count(sbi, F2FS_DIRTY_NODES);
1586 		unlock_page(page);
1587 		return 0;
1588 	}
1589 
1590 	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1591 		goto redirty_out;
1592 
1593 	if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
1594 			wbc->sync_mode == WB_SYNC_NONE &&
1595 			IS_DNODE(page) && is_cold_node(page))
1596 		goto redirty_out;
1597 
1598 	/* get old block addr of this node page */
1599 	nid = nid_of_node(page);
1600 	f2fs_bug_on(sbi, page->index != nid);
1601 
1602 	if (f2fs_get_node_info(sbi, nid, &ni))
1603 		goto redirty_out;
1604 
1605 	if (wbc->for_reclaim) {
1606 		if (!down_read_trylock(&sbi->node_write))
1607 			goto redirty_out;
1608 	} else {
1609 		down_read(&sbi->node_write);
1610 	}
1611 
1612 	/* This page is already truncated */
1613 	if (unlikely(ni.blk_addr == NULL_ADDR)) {
1614 		ClearPageUptodate(page);
1615 		dec_page_count(sbi, F2FS_DIRTY_NODES);
1616 		up_read(&sbi->node_write);
1617 		unlock_page(page);
1618 		return 0;
1619 	}
1620 
1621 	if (__is_valid_data_blkaddr(ni.blk_addr) &&
1622 		!f2fs_is_valid_blkaddr(sbi, ni.blk_addr,
1623 					DATA_GENERIC_ENHANCE)) {
1624 		up_read(&sbi->node_write);
1625 		goto redirty_out;
1626 	}
1627 
1628 	if (atomic && !test_opt(sbi, NOBARRIER))
1629 		fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
1630 
1631 	/* should add to global list before clearing PAGECACHE status */
1632 	if (f2fs_in_warm_node_list(sbi, page)) {
1633 		seq = f2fs_add_fsync_node_entry(sbi, page);
1634 		if (seq_id)
1635 			*seq_id = seq;
1636 	}
1637 
1638 	set_page_writeback(page);
1639 	ClearPageError(page);
1640 
1641 	fio.old_blkaddr = ni.blk_addr;
1642 	f2fs_do_write_node_page(nid, &fio);
1643 	set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(page));
1644 	dec_page_count(sbi, F2FS_DIRTY_NODES);
1645 	up_read(&sbi->node_write);
1646 
1647 	if (wbc->for_reclaim) {
1648 		f2fs_submit_merged_write_cond(sbi, NULL, page, 0, NODE);
1649 		submitted = NULL;
1650 	}
1651 
1652 	unlock_page(page);
1653 
1654 	if (unlikely(f2fs_cp_error(sbi))) {
1655 		f2fs_submit_merged_write(sbi, NODE);
1656 		submitted = NULL;
1657 	}
1658 	if (submitted)
1659 		*submitted = fio.submitted;
1660 
1661 	if (do_balance)
1662 		f2fs_balance_fs(sbi, false);
1663 	return 0;
1664 
1665 redirty_out:
1666 	redirty_page_for_writepage(wbc, page);
1667 	return AOP_WRITEPAGE_ACTIVATE;
1668 }
1669 
1670 int f2fs_move_node_page(struct page *node_page, int gc_type)
1671 {
1672 	int err = 0;
1673 
1674 	if (gc_type == FG_GC) {
1675 		struct writeback_control wbc = {
1676 			.sync_mode = WB_SYNC_ALL,
1677 			.nr_to_write = 1,
1678 			.for_reclaim = 0,
1679 		};
1680 
1681 		f2fs_wait_on_page_writeback(node_page, NODE, true, true);
1682 
1683 		set_page_dirty(node_page);
1684 
1685 		if (!clear_page_dirty_for_io(node_page)) {
1686 			err = -EAGAIN;
1687 			goto out_page;
1688 		}
1689 
1690 		if (__write_node_page(node_page, false, NULL,
1691 					&wbc, false, FS_GC_NODE_IO, NULL)) {
1692 			err = -EAGAIN;
1693 			unlock_page(node_page);
1694 		}
1695 		goto release_page;
1696 	} else {
1697 		/* set page dirty and write it */
1698 		if (!PageWriteback(node_page))
1699 			set_page_dirty(node_page);
1700 	}
1701 out_page:
1702 	unlock_page(node_page);
1703 release_page:
1704 	f2fs_put_page(node_page, 0);
1705 	return err;
1706 }
1707 
1708 static int f2fs_write_node_page(struct page *page,
1709 				struct writeback_control *wbc)
1710 {
1711 	return __write_node_page(page, false, NULL, wbc, false,
1712 						FS_NODE_IO, NULL);
1713 }
1714 
1715 int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
1716 			struct writeback_control *wbc, bool atomic,
1717 			unsigned int *seq_id)
1718 {
1719 	pgoff_t index;
1720 	struct pagevec pvec;
1721 	int ret = 0;
1722 	struct page *last_page = NULL;
1723 	bool marked = false;
1724 	nid_t ino = inode->i_ino;
1725 	int nr_pages;
1726 	int nwritten = 0;
1727 
1728 	if (atomic) {
1729 		last_page = last_fsync_dnode(sbi, ino);
1730 		if (IS_ERR_OR_NULL(last_page))
1731 			return PTR_ERR_OR_ZERO(last_page);
1732 	}
1733 retry:
1734 	pagevec_init(&pvec);
1735 	index = 0;
1736 
1737 	while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
1738 				PAGECACHE_TAG_DIRTY))) {
1739 		int i;
1740 
1741 		for (i = 0; i < nr_pages; i++) {
1742 			struct page *page = pvec.pages[i];
1743 			bool submitted = false;
1744 
1745 			if (unlikely(f2fs_cp_error(sbi))) {
1746 				f2fs_put_page(last_page, 0);
1747 				pagevec_release(&pvec);
1748 				ret = -EIO;
1749 				goto out;
1750 			}
1751 
1752 			if (!IS_DNODE(page) || !is_cold_node(page))
1753 				continue;
1754 			if (ino_of_node(page) != ino)
1755 				continue;
1756 
1757 			lock_page(page);
1758 
1759 			if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1760 continue_unlock:
1761 				unlock_page(page);
1762 				continue;
1763 			}
1764 			if (ino_of_node(page) != ino)
1765 				goto continue_unlock;
1766 
1767 			if (!PageDirty(page) && page != last_page) {
1768 				/* someone wrote it for us */
1769 				goto continue_unlock;
1770 			}
1771 
1772 			f2fs_wait_on_page_writeback(page, NODE, true, true);
1773 
1774 			set_fsync_mark(page, 0);
1775 			set_dentry_mark(page, 0);
1776 
1777 			if (!atomic || page == last_page) {
1778 				set_fsync_mark(page, 1);
1779 				if (IS_INODE(page)) {
1780 					if (is_inode_flag_set(inode,
1781 								FI_DIRTY_INODE))
1782 						f2fs_update_inode(inode, page);
1783 					set_dentry_mark(page,
1784 						f2fs_need_dentry_mark(sbi, ino));
1785 				}
1786 				/* may be written by other thread */
1787 				if (!PageDirty(page))
1788 					set_page_dirty(page);
1789 			}
1790 
1791 			if (!clear_page_dirty_for_io(page))
1792 				goto continue_unlock;
1793 
1794 			ret = __write_node_page(page, atomic &&
1795 						page == last_page,
1796 						&submitted, wbc, true,
1797 						FS_NODE_IO, seq_id);
1798 			if (ret) {
1799 				unlock_page(page);
1800 				f2fs_put_page(last_page, 0);
1801 				break;
1802 			} else if (submitted) {
1803 				nwritten++;
1804 			}
1805 
1806 			if (page == last_page) {
1807 				f2fs_put_page(page, 0);
1808 				marked = true;
1809 				break;
1810 			}
1811 		}
1812 		pagevec_release(&pvec);
1813 		cond_resched();
1814 
1815 		if (ret || marked)
1816 			break;
1817 	}
1818 	if (!ret && atomic && !marked) {
1819 		f2fs_debug(sbi, "Retry to write fsync mark: ino=%u, idx=%lx",
1820 			   ino, last_page->index);
1821 		lock_page(last_page);
1822 		f2fs_wait_on_page_writeback(last_page, NODE, true, true);
1823 		set_page_dirty(last_page);
1824 		unlock_page(last_page);
1825 		goto retry;
1826 	}
1827 out:
1828 	if (nwritten)
1829 		f2fs_submit_merged_write_cond(sbi, NULL, NULL, ino, NODE);
1830 	return ret ? -EIO : 0;
1831 }
1832 
1833 static int f2fs_match_ino(struct inode *inode, unsigned long ino, void *data)
1834 {
1835 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1836 	bool clean;
1837 
1838 	if (inode->i_ino != ino)
1839 		return 0;
1840 
1841 	if (!is_inode_flag_set(inode, FI_DIRTY_INODE))
1842 		return 0;
1843 
1844 	spin_lock(&sbi->inode_lock[DIRTY_META]);
1845 	clean = list_empty(&F2FS_I(inode)->gdirty_list);
1846 	spin_unlock(&sbi->inode_lock[DIRTY_META]);
1847 
1848 	if (clean)
1849 		return 0;
1850 
1851 	inode = igrab(inode);
1852 	if (!inode)
1853 		return 0;
1854 	return 1;
1855 }
1856 
1857 static bool flush_dirty_inode(struct page *page)
1858 {
1859 	struct f2fs_sb_info *sbi = F2FS_P_SB(page);
1860 	struct inode *inode;
1861 	nid_t ino = ino_of_node(page);
1862 
1863 	inode = find_inode_nowait(sbi->sb, ino, f2fs_match_ino, NULL);
1864 	if (!inode)
1865 		return false;
1866 
1867 	f2fs_update_inode(inode, page);
1868 	unlock_page(page);
1869 
1870 	iput(inode);
1871 	return true;
1872 }
1873 
1874 void f2fs_flush_inline_data(struct f2fs_sb_info *sbi)
1875 {
1876 	pgoff_t index = 0;
1877 	struct pagevec pvec;
1878 	int nr_pages;
1879 
1880 	pagevec_init(&pvec);
1881 
1882 	while ((nr_pages = pagevec_lookup_tag(&pvec,
1883 			NODE_MAPPING(sbi), &index, PAGECACHE_TAG_DIRTY))) {
1884 		int i;
1885 
1886 		for (i = 0; i < nr_pages; i++) {
1887 			struct page *page = pvec.pages[i];
1888 
1889 			if (!IS_DNODE(page))
1890 				continue;
1891 
1892 			lock_page(page);
1893 
1894 			if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1895 continue_unlock:
1896 				unlock_page(page);
1897 				continue;
1898 			}
1899 
1900 			if (!PageDirty(page)) {
1901 				/* someone wrote it for us */
1902 				goto continue_unlock;
1903 			}
1904 
1905 			/* flush inline_data, if it's async context. */
1906 			if (page_private_inline(page)) {
1907 				clear_page_private_inline(page);
1908 				unlock_page(page);
1909 				flush_inline_data(sbi, ino_of_node(page));
1910 				continue;
1911 			}
1912 			unlock_page(page);
1913 		}
1914 		pagevec_release(&pvec);
1915 		cond_resched();
1916 	}
1917 }
1918 
1919 int f2fs_sync_node_pages(struct f2fs_sb_info *sbi,
1920 				struct writeback_control *wbc,
1921 				bool do_balance, enum iostat_type io_type)
1922 {
1923 	pgoff_t index;
1924 	struct pagevec pvec;
1925 	int step = 0;
1926 	int nwritten = 0;
1927 	int ret = 0;
1928 	int nr_pages, done = 0;
1929 
1930 	pagevec_init(&pvec);
1931 
1932 next_step:
1933 	index = 0;
1934 
1935 	while (!done && (nr_pages = pagevec_lookup_tag(&pvec,
1936 			NODE_MAPPING(sbi), &index, PAGECACHE_TAG_DIRTY))) {
1937 		int i;
1938 
1939 		for (i = 0; i < nr_pages; i++) {
1940 			struct page *page = pvec.pages[i];
1941 			bool submitted = false;
1942 			bool may_dirty = true;
1943 
1944 			/* give a priority to WB_SYNC threads */
1945 			if (atomic_read(&sbi->wb_sync_req[NODE]) &&
1946 					wbc->sync_mode == WB_SYNC_NONE) {
1947 				done = 1;
1948 				break;
1949 			}
1950 
1951 			/*
1952 			 * flushing sequence with step:
1953 			 * 0. indirect nodes
1954 			 * 1. dentry dnodes
1955 			 * 2. file dnodes
1956 			 */
1957 			if (step == 0 && IS_DNODE(page))
1958 				continue;
1959 			if (step == 1 && (!IS_DNODE(page) ||
1960 						is_cold_node(page)))
1961 				continue;
1962 			if (step == 2 && (!IS_DNODE(page) ||
1963 						!is_cold_node(page)))
1964 				continue;
1965 lock_node:
1966 			if (wbc->sync_mode == WB_SYNC_ALL)
1967 				lock_page(page);
1968 			else if (!trylock_page(page))
1969 				continue;
1970 
1971 			if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1972 continue_unlock:
1973 				unlock_page(page);
1974 				continue;
1975 			}
1976 
1977 			if (!PageDirty(page)) {
1978 				/* someone wrote it for us */
1979 				goto continue_unlock;
1980 			}
1981 
1982 			/* flush inline_data/inode, if it's async context. */
1983 			if (!do_balance)
1984 				goto write_node;
1985 
1986 			/* flush inline_data */
1987 			if (page_private_inline(page)) {
1988 				clear_page_private_inline(page);
1989 				unlock_page(page);
1990 				flush_inline_data(sbi, ino_of_node(page));
1991 				goto lock_node;
1992 			}
1993 
1994 			/* flush dirty inode */
1995 			if (IS_INODE(page) && may_dirty) {
1996 				may_dirty = false;
1997 				if (flush_dirty_inode(page))
1998 					goto lock_node;
1999 			}
2000 write_node:
2001 			f2fs_wait_on_page_writeback(page, NODE, true, true);
2002 
2003 			if (!clear_page_dirty_for_io(page))
2004 				goto continue_unlock;
2005 
2006 			set_fsync_mark(page, 0);
2007 			set_dentry_mark(page, 0);
2008 
2009 			ret = __write_node_page(page, false, &submitted,
2010 						wbc, do_balance, io_type, NULL);
2011 			if (ret)
2012 				unlock_page(page);
2013 			else if (submitted)
2014 				nwritten++;
2015 
2016 			if (--wbc->nr_to_write == 0)
2017 				break;
2018 		}
2019 		pagevec_release(&pvec);
2020 		cond_resched();
2021 
2022 		if (wbc->nr_to_write == 0) {
2023 			step = 2;
2024 			break;
2025 		}
2026 	}
2027 
2028 	if (step < 2) {
2029 		if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
2030 				wbc->sync_mode == WB_SYNC_NONE && step == 1)
2031 			goto out;
2032 		step++;
2033 		goto next_step;
2034 	}
2035 out:
2036 	if (nwritten)
2037 		f2fs_submit_merged_write(sbi, NODE);
2038 
2039 	if (unlikely(f2fs_cp_error(sbi)))
2040 		return -EIO;
2041 	return ret;
2042 }
2043 
2044 int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi,
2045 						unsigned int seq_id)
2046 {
2047 	struct fsync_node_entry *fn;
2048 	struct page *page;
2049 	struct list_head *head = &sbi->fsync_node_list;
2050 	unsigned long flags;
2051 	unsigned int cur_seq_id = 0;
2052 	int ret2, ret = 0;
2053 
2054 	while (seq_id && cur_seq_id < seq_id) {
2055 		spin_lock_irqsave(&sbi->fsync_node_lock, flags);
2056 		if (list_empty(head)) {
2057 			spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
2058 			break;
2059 		}
2060 		fn = list_first_entry(head, struct fsync_node_entry, list);
2061 		if (fn->seq_id > seq_id) {
2062 			spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
2063 			break;
2064 		}
2065 		cur_seq_id = fn->seq_id;
2066 		page = fn->page;
2067 		get_page(page);
2068 		spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
2069 
2070 		f2fs_wait_on_page_writeback(page, NODE, true, false);
2071 		if (TestClearPageError(page))
2072 			ret = -EIO;
2073 
2074 		put_page(page);
2075 
2076 		if (ret)
2077 			break;
2078 	}
2079 
2080 	ret2 = filemap_check_errors(NODE_MAPPING(sbi));
2081 	if (!ret)
2082 		ret = ret2;
2083 
2084 	return ret;
2085 }
2086 
2087 static int f2fs_write_node_pages(struct address_space *mapping,
2088 			    struct writeback_control *wbc)
2089 {
2090 	struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
2091 	struct blk_plug plug;
2092 	long diff;
2093 
2094 	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
2095 		goto skip_write;
2096 
2097 	/* balancing f2fs's metadata in background */
2098 	f2fs_balance_fs_bg(sbi, true);
2099 
2100 	/* collect a number of dirty node pages and write together */
2101 	if (wbc->sync_mode != WB_SYNC_ALL &&
2102 			get_pages(sbi, F2FS_DIRTY_NODES) <
2103 					nr_pages_to_skip(sbi, NODE))
2104 		goto skip_write;
2105 
2106 	if (wbc->sync_mode == WB_SYNC_ALL)
2107 		atomic_inc(&sbi->wb_sync_req[NODE]);
2108 	else if (atomic_read(&sbi->wb_sync_req[NODE]))
2109 		goto skip_write;
2110 
2111 	trace_f2fs_writepages(mapping->host, wbc, NODE);
2112 
2113 	diff = nr_pages_to_write(sbi, NODE, wbc);
2114 	blk_start_plug(&plug);
2115 	f2fs_sync_node_pages(sbi, wbc, true, FS_NODE_IO);
2116 	blk_finish_plug(&plug);
2117 	wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
2118 
2119 	if (wbc->sync_mode == WB_SYNC_ALL)
2120 		atomic_dec(&sbi->wb_sync_req[NODE]);
2121 	return 0;
2122 
2123 skip_write:
2124 	wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_NODES);
2125 	trace_f2fs_writepages(mapping->host, wbc, NODE);
2126 	return 0;
2127 }
2128 
2129 static int f2fs_set_node_page_dirty(struct page *page)
2130 {
2131 	trace_f2fs_set_page_dirty(page, NODE);
2132 
2133 	if (!PageUptodate(page))
2134 		SetPageUptodate(page);
2135 #ifdef CONFIG_F2FS_CHECK_FS
2136 	if (IS_INODE(page))
2137 		f2fs_inode_chksum_set(F2FS_P_SB(page), page);
2138 #endif
2139 	if (!PageDirty(page)) {
2140 		__set_page_dirty_nobuffers(page);
2141 		inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES);
2142 		set_page_private_reference(page);
2143 		return 1;
2144 	}
2145 	return 0;
2146 }
2147 
2148 /*
2149  * Structure of the f2fs node operations
2150  */
2151 const struct address_space_operations f2fs_node_aops = {
2152 	.writepage	= f2fs_write_node_page,
2153 	.writepages	= f2fs_write_node_pages,
2154 	.set_page_dirty	= f2fs_set_node_page_dirty,
2155 	.invalidatepage	= f2fs_invalidate_page,
2156 	.releasepage	= f2fs_release_page,
2157 #ifdef CONFIG_MIGRATION
2158 	.migratepage	= f2fs_migrate_page,
2159 #endif
2160 };
2161 
2162 static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i,
2163 						nid_t n)
2164 {
2165 	return radix_tree_lookup(&nm_i->free_nid_root, n);
2166 }
2167 
2168 static int __insert_free_nid(struct f2fs_sb_info *sbi,
2169 				struct free_nid *i)
2170 {
2171 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2172 	int err = radix_tree_insert(&nm_i->free_nid_root, i->nid, i);
2173 
2174 	if (err)
2175 		return err;
2176 
2177 	nm_i->nid_cnt[FREE_NID]++;
2178 	list_add_tail(&i->list, &nm_i->free_nid_list);
2179 	return 0;
2180 }
2181 
2182 static void __remove_free_nid(struct f2fs_sb_info *sbi,
2183 			struct free_nid *i, enum nid_state state)
2184 {
2185 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2186 
2187 	f2fs_bug_on(sbi, state != i->state);
2188 	nm_i->nid_cnt[state]--;
2189 	if (state == FREE_NID)
2190 		list_del(&i->list);
2191 	radix_tree_delete(&nm_i->free_nid_root, i->nid);
2192 }
2193 
2194 static void __move_free_nid(struct f2fs_sb_info *sbi, struct free_nid *i,
2195 			enum nid_state org_state, enum nid_state dst_state)
2196 {
2197 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2198 
2199 	f2fs_bug_on(sbi, org_state != i->state);
2200 	i->state = dst_state;
2201 	nm_i->nid_cnt[org_state]--;
2202 	nm_i->nid_cnt[dst_state]++;
2203 
2204 	switch (dst_state) {
2205 	case PREALLOC_NID:
2206 		list_del(&i->list);
2207 		break;
2208 	case FREE_NID:
2209 		list_add_tail(&i->list, &nm_i->free_nid_list);
2210 		break;
2211 	default:
2212 		BUG_ON(1);
2213 	}
2214 }
2215 
2216 bool f2fs_nat_bitmap_enabled(struct f2fs_sb_info *sbi)
2217 {
2218 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2219 	unsigned int i;
2220 	bool ret = true;
2221 
2222 	down_read(&nm_i->nat_tree_lock);
2223 	for (i = 0; i < nm_i->nat_blocks; i++) {
2224 		if (!test_bit_le(i, nm_i->nat_block_bitmap)) {
2225 			ret = false;
2226 			break;
2227 		}
2228 	}
2229 	up_read(&nm_i->nat_tree_lock);
2230 
2231 	return ret;
2232 }
2233 
2234 static void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid,
2235 							bool set, bool build)
2236 {
2237 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2238 	unsigned int nat_ofs = NAT_BLOCK_OFFSET(nid);
2239 	unsigned int nid_ofs = nid - START_NID(nid);
2240 
2241 	if (!test_bit_le(nat_ofs, nm_i->nat_block_bitmap))
2242 		return;
2243 
2244 	if (set) {
2245 		if (test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]))
2246 			return;
2247 		__set_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
2248 		nm_i->free_nid_count[nat_ofs]++;
2249 	} else {
2250 		if (!test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]))
2251 			return;
2252 		__clear_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
2253 		if (!build)
2254 			nm_i->free_nid_count[nat_ofs]--;
2255 	}
2256 }
2257 
2258 /* return if the nid is recognized as free */
2259 static bool add_free_nid(struct f2fs_sb_info *sbi,
2260 				nid_t nid, bool build, bool update)
2261 {
2262 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2263 	struct free_nid *i, *e;
2264 	struct nat_entry *ne;
2265 	int err = -EINVAL;
2266 	bool ret = false;
2267 
2268 	/* 0 nid should not be used */
2269 	if (unlikely(nid == 0))
2270 		return false;
2271 
2272 	if (unlikely(f2fs_check_nid_range(sbi, nid)))
2273 		return false;
2274 
2275 	i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS, true, NULL);
2276 	i->nid = nid;
2277 	i->state = FREE_NID;
2278 
2279 	radix_tree_preload(GFP_NOFS | __GFP_NOFAIL);
2280 
2281 	spin_lock(&nm_i->nid_list_lock);
2282 
2283 	if (build) {
2284 		/*
2285 		 *   Thread A             Thread B
2286 		 *  - f2fs_create
2287 		 *   - f2fs_new_inode
2288 		 *    - f2fs_alloc_nid
2289 		 *     - __insert_nid_to_list(PREALLOC_NID)
2290 		 *                     - f2fs_balance_fs_bg
2291 		 *                      - f2fs_build_free_nids
2292 		 *                       - __f2fs_build_free_nids
2293 		 *                        - scan_nat_page
2294 		 *                         - add_free_nid
2295 		 *                          - __lookup_nat_cache
2296 		 *  - f2fs_add_link
2297 		 *   - f2fs_init_inode_metadata
2298 		 *    - f2fs_new_inode_page
2299 		 *     - f2fs_new_node_page
2300 		 *      - set_node_addr
2301 		 *  - f2fs_alloc_nid_done
2302 		 *   - __remove_nid_from_list(PREALLOC_NID)
2303 		 *                         - __insert_nid_to_list(FREE_NID)
2304 		 */
2305 		ne = __lookup_nat_cache(nm_i, nid);
2306 		if (ne && (!get_nat_flag(ne, IS_CHECKPOINTED) ||
2307 				nat_get_blkaddr(ne) != NULL_ADDR))
2308 			goto err_out;
2309 
2310 		e = __lookup_free_nid_list(nm_i, nid);
2311 		if (e) {
2312 			if (e->state == FREE_NID)
2313 				ret = true;
2314 			goto err_out;
2315 		}
2316 	}
2317 	ret = true;
2318 	err = __insert_free_nid(sbi, i);
2319 err_out:
2320 	if (update) {
2321 		update_free_nid_bitmap(sbi, nid, ret, build);
2322 		if (!build)
2323 			nm_i->available_nids++;
2324 	}
2325 	spin_unlock(&nm_i->nid_list_lock);
2326 	radix_tree_preload_end();
2327 
2328 	if (err)
2329 		kmem_cache_free(free_nid_slab, i);
2330 	return ret;
2331 }
2332 
2333 static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid)
2334 {
2335 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2336 	struct free_nid *i;
2337 	bool need_free = false;
2338 
2339 	spin_lock(&nm_i->nid_list_lock);
2340 	i = __lookup_free_nid_list(nm_i, nid);
2341 	if (i && i->state == FREE_NID) {
2342 		__remove_free_nid(sbi, i, FREE_NID);
2343 		need_free = true;
2344 	}
2345 	spin_unlock(&nm_i->nid_list_lock);
2346 
2347 	if (need_free)
2348 		kmem_cache_free(free_nid_slab, i);
2349 }
2350 
2351 static int scan_nat_page(struct f2fs_sb_info *sbi,
2352 			struct page *nat_page, nid_t start_nid)
2353 {
2354 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2355 	struct f2fs_nat_block *nat_blk = page_address(nat_page);
2356 	block_t blk_addr;
2357 	unsigned int nat_ofs = NAT_BLOCK_OFFSET(start_nid);
2358 	int i;
2359 
2360 	__set_bit_le(nat_ofs, nm_i->nat_block_bitmap);
2361 
2362 	i = start_nid % NAT_ENTRY_PER_BLOCK;
2363 
2364 	for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) {
2365 		if (unlikely(start_nid >= nm_i->max_nid))
2366 			break;
2367 
2368 		blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
2369 
2370 		if (blk_addr == NEW_ADDR)
2371 			return -EINVAL;
2372 
2373 		if (blk_addr == NULL_ADDR) {
2374 			add_free_nid(sbi, start_nid, true, true);
2375 		} else {
2376 			spin_lock(&NM_I(sbi)->nid_list_lock);
2377 			update_free_nid_bitmap(sbi, start_nid, false, true);
2378 			spin_unlock(&NM_I(sbi)->nid_list_lock);
2379 		}
2380 	}
2381 
2382 	return 0;
2383 }
2384 
2385 static void scan_curseg_cache(struct f2fs_sb_info *sbi)
2386 {
2387 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2388 	struct f2fs_journal *journal = curseg->journal;
2389 	int i;
2390 
2391 	down_read(&curseg->journal_rwsem);
2392 	for (i = 0; i < nats_in_cursum(journal); i++) {
2393 		block_t addr;
2394 		nid_t nid;
2395 
2396 		addr = le32_to_cpu(nat_in_journal(journal, i).block_addr);
2397 		nid = le32_to_cpu(nid_in_journal(journal, i));
2398 		if (addr == NULL_ADDR)
2399 			add_free_nid(sbi, nid, true, false);
2400 		else
2401 			remove_free_nid(sbi, nid);
2402 	}
2403 	up_read(&curseg->journal_rwsem);
2404 }
2405 
2406 static void scan_free_nid_bits(struct f2fs_sb_info *sbi)
2407 {
2408 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2409 	unsigned int i, idx;
2410 	nid_t nid;
2411 
2412 	down_read(&nm_i->nat_tree_lock);
2413 
2414 	for (i = 0; i < nm_i->nat_blocks; i++) {
2415 		if (!test_bit_le(i, nm_i->nat_block_bitmap))
2416 			continue;
2417 		if (!nm_i->free_nid_count[i])
2418 			continue;
2419 		for (idx = 0; idx < NAT_ENTRY_PER_BLOCK; idx++) {
2420 			idx = find_next_bit_le(nm_i->free_nid_bitmap[i],
2421 						NAT_ENTRY_PER_BLOCK, idx);
2422 			if (idx >= NAT_ENTRY_PER_BLOCK)
2423 				break;
2424 
2425 			nid = i * NAT_ENTRY_PER_BLOCK + idx;
2426 			add_free_nid(sbi, nid, true, false);
2427 
2428 			if (nm_i->nid_cnt[FREE_NID] >= MAX_FREE_NIDS)
2429 				goto out;
2430 		}
2431 	}
2432 out:
2433 	scan_curseg_cache(sbi);
2434 
2435 	up_read(&nm_i->nat_tree_lock);
2436 }
2437 
2438 static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
2439 						bool sync, bool mount)
2440 {
2441 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2442 	int i = 0, ret;
2443 	nid_t nid = nm_i->next_scan_nid;
2444 
2445 	if (unlikely(nid >= nm_i->max_nid))
2446 		nid = 0;
2447 
2448 	if (unlikely(nid % NAT_ENTRY_PER_BLOCK))
2449 		nid = NAT_BLOCK_OFFSET(nid) * NAT_ENTRY_PER_BLOCK;
2450 
2451 	/* Enough entries */
2452 	if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
2453 		return 0;
2454 
2455 	if (!sync && !f2fs_available_free_memory(sbi, FREE_NIDS))
2456 		return 0;
2457 
2458 	if (!mount) {
2459 		/* try to find free nids in free_nid_bitmap */
2460 		scan_free_nid_bits(sbi);
2461 
2462 		if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
2463 			return 0;
2464 	}
2465 
2466 	/* readahead nat pages to be scanned */
2467 	f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES,
2468 							META_NAT, true);
2469 
2470 	down_read(&nm_i->nat_tree_lock);
2471 
2472 	while (1) {
2473 		if (!test_bit_le(NAT_BLOCK_OFFSET(nid),
2474 						nm_i->nat_block_bitmap)) {
2475 			struct page *page = get_current_nat_page(sbi, nid);
2476 
2477 			if (IS_ERR(page)) {
2478 				ret = PTR_ERR(page);
2479 			} else {
2480 				ret = scan_nat_page(sbi, page, nid);
2481 				f2fs_put_page(page, 1);
2482 			}
2483 
2484 			if (ret) {
2485 				up_read(&nm_i->nat_tree_lock);
2486 				f2fs_err(sbi, "NAT is corrupt, run fsck to fix it");
2487 				return ret;
2488 			}
2489 		}
2490 
2491 		nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK));
2492 		if (unlikely(nid >= nm_i->max_nid))
2493 			nid = 0;
2494 
2495 		if (++i >= FREE_NID_PAGES)
2496 			break;
2497 	}
2498 
2499 	/* go to the next free nat pages to find free nids abundantly */
2500 	nm_i->next_scan_nid = nid;
2501 
2502 	/* find free nids from current sum_pages */
2503 	scan_curseg_cache(sbi);
2504 
2505 	up_read(&nm_i->nat_tree_lock);
2506 
2507 	f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid),
2508 					nm_i->ra_nid_pages, META_NAT, false);
2509 
2510 	return 0;
2511 }
2512 
2513 int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
2514 {
2515 	int ret;
2516 
2517 	mutex_lock(&NM_I(sbi)->build_lock);
2518 	ret = __f2fs_build_free_nids(sbi, sync, mount);
2519 	mutex_unlock(&NM_I(sbi)->build_lock);
2520 
2521 	return ret;
2522 }
2523 
2524 /*
2525  * If this function returns success, caller can obtain a new nid
2526  * from second parameter of this function.
2527  * The returned nid could be used ino as well as nid when inode is created.
2528  */
2529 bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
2530 {
2531 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2532 	struct free_nid *i = NULL;
2533 retry:
2534 	if (time_to_inject(sbi, FAULT_ALLOC_NID)) {
2535 		f2fs_show_injection_info(sbi, FAULT_ALLOC_NID);
2536 		return false;
2537 	}
2538 
2539 	spin_lock(&nm_i->nid_list_lock);
2540 
2541 	if (unlikely(nm_i->available_nids == 0)) {
2542 		spin_unlock(&nm_i->nid_list_lock);
2543 		return false;
2544 	}
2545 
2546 	/* We should not use stale free nids created by f2fs_build_free_nids */
2547 	if (nm_i->nid_cnt[FREE_NID] && !on_f2fs_build_free_nids(nm_i)) {
2548 		f2fs_bug_on(sbi, list_empty(&nm_i->free_nid_list));
2549 		i = list_first_entry(&nm_i->free_nid_list,
2550 					struct free_nid, list);
2551 		*nid = i->nid;
2552 
2553 		__move_free_nid(sbi, i, FREE_NID, PREALLOC_NID);
2554 		nm_i->available_nids--;
2555 
2556 		update_free_nid_bitmap(sbi, *nid, false, false);
2557 
2558 		spin_unlock(&nm_i->nid_list_lock);
2559 		return true;
2560 	}
2561 	spin_unlock(&nm_i->nid_list_lock);
2562 
2563 	/* Let's scan nat pages and its caches to get free nids */
2564 	if (!f2fs_build_free_nids(sbi, true, false))
2565 		goto retry;
2566 	return false;
2567 }
2568 
2569 /*
2570  * f2fs_alloc_nid() should be called prior to this function.
2571  */
2572 void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid)
2573 {
2574 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2575 	struct free_nid *i;
2576 
2577 	spin_lock(&nm_i->nid_list_lock);
2578 	i = __lookup_free_nid_list(nm_i, nid);
2579 	f2fs_bug_on(sbi, !i);
2580 	__remove_free_nid(sbi, i, PREALLOC_NID);
2581 	spin_unlock(&nm_i->nid_list_lock);
2582 
2583 	kmem_cache_free(free_nid_slab, i);
2584 }
2585 
2586 /*
2587  * f2fs_alloc_nid() should be called prior to this function.
2588  */
2589 void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
2590 {
2591 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2592 	struct free_nid *i;
2593 	bool need_free = false;
2594 
2595 	if (!nid)
2596 		return;
2597 
2598 	spin_lock(&nm_i->nid_list_lock);
2599 	i = __lookup_free_nid_list(nm_i, nid);
2600 	f2fs_bug_on(sbi, !i);
2601 
2602 	if (!f2fs_available_free_memory(sbi, FREE_NIDS)) {
2603 		__remove_free_nid(sbi, i, PREALLOC_NID);
2604 		need_free = true;
2605 	} else {
2606 		__move_free_nid(sbi, i, PREALLOC_NID, FREE_NID);
2607 	}
2608 
2609 	nm_i->available_nids++;
2610 
2611 	update_free_nid_bitmap(sbi, nid, true, false);
2612 
2613 	spin_unlock(&nm_i->nid_list_lock);
2614 
2615 	if (need_free)
2616 		kmem_cache_free(free_nid_slab, i);
2617 }
2618 
2619 int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink)
2620 {
2621 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2622 	int nr = nr_shrink;
2623 
2624 	if (nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS)
2625 		return 0;
2626 
2627 	if (!mutex_trylock(&nm_i->build_lock))
2628 		return 0;
2629 
2630 	while (nr_shrink && nm_i->nid_cnt[FREE_NID] > MAX_FREE_NIDS) {
2631 		struct free_nid *i, *next;
2632 		unsigned int batch = SHRINK_NID_BATCH_SIZE;
2633 
2634 		spin_lock(&nm_i->nid_list_lock);
2635 		list_for_each_entry_safe(i, next, &nm_i->free_nid_list, list) {
2636 			if (!nr_shrink || !batch ||
2637 				nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS)
2638 				break;
2639 			__remove_free_nid(sbi, i, FREE_NID);
2640 			kmem_cache_free(free_nid_slab, i);
2641 			nr_shrink--;
2642 			batch--;
2643 		}
2644 		spin_unlock(&nm_i->nid_list_lock);
2645 	}
2646 
2647 	mutex_unlock(&nm_i->build_lock);
2648 
2649 	return nr - nr_shrink;
2650 }
2651 
2652 int f2fs_recover_inline_xattr(struct inode *inode, struct page *page)
2653 {
2654 	void *src_addr, *dst_addr;
2655 	size_t inline_size;
2656 	struct page *ipage;
2657 	struct f2fs_inode *ri;
2658 
2659 	ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
2660 	if (IS_ERR(ipage))
2661 		return PTR_ERR(ipage);
2662 
2663 	ri = F2FS_INODE(page);
2664 	if (ri->i_inline & F2FS_INLINE_XATTR) {
2665 		if (!f2fs_has_inline_xattr(inode)) {
2666 			set_inode_flag(inode, FI_INLINE_XATTR);
2667 			stat_inc_inline_xattr(inode);
2668 		}
2669 	} else {
2670 		if (f2fs_has_inline_xattr(inode)) {
2671 			stat_dec_inline_xattr(inode);
2672 			clear_inode_flag(inode, FI_INLINE_XATTR);
2673 		}
2674 		goto update_inode;
2675 	}
2676 
2677 	dst_addr = inline_xattr_addr(inode, ipage);
2678 	src_addr = inline_xattr_addr(inode, page);
2679 	inline_size = inline_xattr_size(inode);
2680 
2681 	f2fs_wait_on_page_writeback(ipage, NODE, true, true);
2682 	memcpy(dst_addr, src_addr, inline_size);
2683 update_inode:
2684 	f2fs_update_inode(inode, ipage);
2685 	f2fs_put_page(ipage, 1);
2686 	return 0;
2687 }
2688 
2689 int f2fs_recover_xattr_data(struct inode *inode, struct page *page)
2690 {
2691 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2692 	nid_t prev_xnid = F2FS_I(inode)->i_xattr_nid;
2693 	nid_t new_xnid;
2694 	struct dnode_of_data dn;
2695 	struct node_info ni;
2696 	struct page *xpage;
2697 	int err;
2698 
2699 	if (!prev_xnid)
2700 		goto recover_xnid;
2701 
2702 	/* 1: invalidate the previous xattr nid */
2703 	err = f2fs_get_node_info(sbi, prev_xnid, &ni);
2704 	if (err)
2705 		return err;
2706 
2707 	f2fs_invalidate_blocks(sbi, ni.blk_addr);
2708 	dec_valid_node_count(sbi, inode, false);
2709 	set_node_addr(sbi, &ni, NULL_ADDR, false);
2710 
2711 recover_xnid:
2712 	/* 2: update xattr nid in inode */
2713 	if (!f2fs_alloc_nid(sbi, &new_xnid))
2714 		return -ENOSPC;
2715 
2716 	set_new_dnode(&dn, inode, NULL, NULL, new_xnid);
2717 	xpage = f2fs_new_node_page(&dn, XATTR_NODE_OFFSET);
2718 	if (IS_ERR(xpage)) {
2719 		f2fs_alloc_nid_failed(sbi, new_xnid);
2720 		return PTR_ERR(xpage);
2721 	}
2722 
2723 	f2fs_alloc_nid_done(sbi, new_xnid);
2724 	f2fs_update_inode_page(inode);
2725 
2726 	/* 3: update and set xattr node page dirty */
2727 	memcpy(F2FS_NODE(xpage), F2FS_NODE(page), VALID_XATTR_BLOCK_SIZE);
2728 
2729 	set_page_dirty(xpage);
2730 	f2fs_put_page(xpage, 1);
2731 
2732 	return 0;
2733 }
2734 
2735 int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
2736 {
2737 	struct f2fs_inode *src, *dst;
2738 	nid_t ino = ino_of_node(page);
2739 	struct node_info old_ni, new_ni;
2740 	struct page *ipage;
2741 	int err;
2742 
2743 	err = f2fs_get_node_info(sbi, ino, &old_ni);
2744 	if (err)
2745 		return err;
2746 
2747 	if (unlikely(old_ni.blk_addr != NULL_ADDR))
2748 		return -EINVAL;
2749 retry:
2750 	ipage = f2fs_grab_cache_page(NODE_MAPPING(sbi), ino, false);
2751 	if (!ipage) {
2752 		congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT);
2753 		goto retry;
2754 	}
2755 
2756 	/* Should not use this inode from free nid list */
2757 	remove_free_nid(sbi, ino);
2758 
2759 	if (!PageUptodate(ipage))
2760 		SetPageUptodate(ipage);
2761 	fill_node_footer(ipage, ino, ino, 0, true);
2762 	set_cold_node(ipage, false);
2763 
2764 	src = F2FS_INODE(page);
2765 	dst = F2FS_INODE(ipage);
2766 
2767 	memcpy(dst, src, offsetof(struct f2fs_inode, i_ext));
2768 	dst->i_size = 0;
2769 	dst->i_blocks = cpu_to_le64(1);
2770 	dst->i_links = cpu_to_le32(1);
2771 	dst->i_xattr_nid = 0;
2772 	dst->i_inline = src->i_inline & (F2FS_INLINE_XATTR | F2FS_EXTRA_ATTR);
2773 	if (dst->i_inline & F2FS_EXTRA_ATTR) {
2774 		dst->i_extra_isize = src->i_extra_isize;
2775 
2776 		if (f2fs_sb_has_flexible_inline_xattr(sbi) &&
2777 			F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
2778 							i_inline_xattr_size))
2779 			dst->i_inline_xattr_size = src->i_inline_xattr_size;
2780 
2781 		if (f2fs_sb_has_project_quota(sbi) &&
2782 			F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
2783 								i_projid))
2784 			dst->i_projid = src->i_projid;
2785 
2786 		if (f2fs_sb_has_inode_crtime(sbi) &&
2787 			F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
2788 							i_crtime_nsec)) {
2789 			dst->i_crtime = src->i_crtime;
2790 			dst->i_crtime_nsec = src->i_crtime_nsec;
2791 		}
2792 	}
2793 
2794 	new_ni = old_ni;
2795 	new_ni.ino = ino;
2796 
2797 	if (unlikely(inc_valid_node_count(sbi, NULL, true)))
2798 		WARN_ON(1);
2799 	set_node_addr(sbi, &new_ni, NEW_ADDR, false);
2800 	inc_valid_inode_count(sbi);
2801 	set_page_dirty(ipage);
2802 	f2fs_put_page(ipage, 1);
2803 	return 0;
2804 }
2805 
2806 int f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
2807 			unsigned int segno, struct f2fs_summary_block *sum)
2808 {
2809 	struct f2fs_node *rn;
2810 	struct f2fs_summary *sum_entry;
2811 	block_t addr;
2812 	int i, idx, last_offset, nrpages;
2813 
2814 	/* scan the node segment */
2815 	last_offset = sbi->blocks_per_seg;
2816 	addr = START_BLOCK(sbi, segno);
2817 	sum_entry = &sum->entries[0];
2818 
2819 	for (i = 0; i < last_offset; i += nrpages, addr += nrpages) {
2820 		nrpages = bio_max_segs(last_offset - i);
2821 
2822 		/* readahead node pages */
2823 		f2fs_ra_meta_pages(sbi, addr, nrpages, META_POR, true);
2824 
2825 		for (idx = addr; idx < addr + nrpages; idx++) {
2826 			struct page *page = f2fs_get_tmp_page(sbi, idx);
2827 
2828 			if (IS_ERR(page))
2829 				return PTR_ERR(page);
2830 
2831 			rn = F2FS_NODE(page);
2832 			sum_entry->nid = rn->footer.nid;
2833 			sum_entry->version = 0;
2834 			sum_entry->ofs_in_node = 0;
2835 			sum_entry++;
2836 			f2fs_put_page(page, 1);
2837 		}
2838 
2839 		invalidate_mapping_pages(META_MAPPING(sbi), addr,
2840 							addr + nrpages);
2841 	}
2842 	return 0;
2843 }
2844 
2845 static void remove_nats_in_journal(struct f2fs_sb_info *sbi)
2846 {
2847 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2848 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2849 	struct f2fs_journal *journal = curseg->journal;
2850 	int i;
2851 
2852 	down_write(&curseg->journal_rwsem);
2853 	for (i = 0; i < nats_in_cursum(journal); i++) {
2854 		struct nat_entry *ne;
2855 		struct f2fs_nat_entry raw_ne;
2856 		nid_t nid = le32_to_cpu(nid_in_journal(journal, i));
2857 
2858 		if (f2fs_check_nid_range(sbi, nid))
2859 			continue;
2860 
2861 		raw_ne = nat_in_journal(journal, i);
2862 
2863 		ne = __lookup_nat_cache(nm_i, nid);
2864 		if (!ne) {
2865 			ne = __alloc_nat_entry(sbi, nid, true);
2866 			__init_nat_entry(nm_i, ne, &raw_ne, true);
2867 		}
2868 
2869 		/*
2870 		 * if a free nat in journal has not been used after last
2871 		 * checkpoint, we should remove it from available nids,
2872 		 * since later we will add it again.
2873 		 */
2874 		if (!get_nat_flag(ne, IS_DIRTY) &&
2875 				le32_to_cpu(raw_ne.block_addr) == NULL_ADDR) {
2876 			spin_lock(&nm_i->nid_list_lock);
2877 			nm_i->available_nids--;
2878 			spin_unlock(&nm_i->nid_list_lock);
2879 		}
2880 
2881 		__set_nat_cache_dirty(nm_i, ne);
2882 	}
2883 	update_nats_in_cursum(journal, -i);
2884 	up_write(&curseg->journal_rwsem);
2885 }
2886 
2887 static void __adjust_nat_entry_set(struct nat_entry_set *nes,
2888 						struct list_head *head, int max)
2889 {
2890 	struct nat_entry_set *cur;
2891 
2892 	if (nes->entry_cnt >= max)
2893 		goto add_out;
2894 
2895 	list_for_each_entry(cur, head, set_list) {
2896 		if (cur->entry_cnt >= nes->entry_cnt) {
2897 			list_add(&nes->set_list, cur->set_list.prev);
2898 			return;
2899 		}
2900 	}
2901 add_out:
2902 	list_add_tail(&nes->set_list, head);
2903 }
2904 
2905 static void __update_nat_bits(struct f2fs_nm_info *nm_i, unsigned int nat_ofs,
2906 							unsigned int valid)
2907 {
2908 	if (valid == 0) {
2909 		__set_bit_le(nat_ofs, nm_i->empty_nat_bits);
2910 		__clear_bit_le(nat_ofs, nm_i->full_nat_bits);
2911 		return;
2912 	}
2913 
2914 	__clear_bit_le(nat_ofs, nm_i->empty_nat_bits);
2915 	if (valid == NAT_ENTRY_PER_BLOCK)
2916 		__set_bit_le(nat_ofs, nm_i->full_nat_bits);
2917 	else
2918 		__clear_bit_le(nat_ofs, nm_i->full_nat_bits);
2919 }
2920 
2921 static void update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid,
2922 						struct page *page)
2923 {
2924 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2925 	unsigned int nat_index = start_nid / NAT_ENTRY_PER_BLOCK;
2926 	struct f2fs_nat_block *nat_blk = page_address(page);
2927 	int valid = 0;
2928 	int i = 0;
2929 
2930 	if (!is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG))
2931 		return;
2932 
2933 	if (nat_index == 0) {
2934 		valid = 1;
2935 		i = 1;
2936 	}
2937 	for (; i < NAT_ENTRY_PER_BLOCK; i++) {
2938 		if (le32_to_cpu(nat_blk->entries[i].block_addr) != NULL_ADDR)
2939 			valid++;
2940 	}
2941 
2942 	__update_nat_bits(nm_i, nat_index, valid);
2943 }
2944 
2945 void f2fs_enable_nat_bits(struct f2fs_sb_info *sbi)
2946 {
2947 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2948 	unsigned int nat_ofs;
2949 
2950 	down_read(&nm_i->nat_tree_lock);
2951 
2952 	for (nat_ofs = 0; nat_ofs < nm_i->nat_blocks; nat_ofs++) {
2953 		unsigned int valid = 0, nid_ofs = 0;
2954 
2955 		/* handle nid zero due to it should never be used */
2956 		if (unlikely(nat_ofs == 0)) {
2957 			valid = 1;
2958 			nid_ofs = 1;
2959 		}
2960 
2961 		for (; nid_ofs < NAT_ENTRY_PER_BLOCK; nid_ofs++) {
2962 			if (!test_bit_le(nid_ofs,
2963 					nm_i->free_nid_bitmap[nat_ofs]))
2964 				valid++;
2965 		}
2966 
2967 		__update_nat_bits(nm_i, nat_ofs, valid);
2968 	}
2969 
2970 	up_read(&nm_i->nat_tree_lock);
2971 }
2972 
2973 static int __flush_nat_entry_set(struct f2fs_sb_info *sbi,
2974 		struct nat_entry_set *set, struct cp_control *cpc)
2975 {
2976 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2977 	struct f2fs_journal *journal = curseg->journal;
2978 	nid_t start_nid = set->set * NAT_ENTRY_PER_BLOCK;
2979 	bool to_journal = true;
2980 	struct f2fs_nat_block *nat_blk;
2981 	struct nat_entry *ne, *cur;
2982 	struct page *page = NULL;
2983 
2984 	/*
2985 	 * there are two steps to flush nat entries:
2986 	 * #1, flush nat entries to journal in current hot data summary block.
2987 	 * #2, flush nat entries to nat page.
2988 	 */
2989 	if ((cpc->reason & CP_UMOUNT) ||
2990 		!__has_cursum_space(journal, set->entry_cnt, NAT_JOURNAL))
2991 		to_journal = false;
2992 
2993 	if (to_journal) {
2994 		down_write(&curseg->journal_rwsem);
2995 	} else {
2996 		page = get_next_nat_page(sbi, start_nid);
2997 		if (IS_ERR(page))
2998 			return PTR_ERR(page);
2999 
3000 		nat_blk = page_address(page);
3001 		f2fs_bug_on(sbi, !nat_blk);
3002 	}
3003 
3004 	/* flush dirty nats in nat entry set */
3005 	list_for_each_entry_safe(ne, cur, &set->entry_list, list) {
3006 		struct f2fs_nat_entry *raw_ne;
3007 		nid_t nid = nat_get_nid(ne);
3008 		int offset;
3009 
3010 		f2fs_bug_on(sbi, nat_get_blkaddr(ne) == NEW_ADDR);
3011 
3012 		if (to_journal) {
3013 			offset = f2fs_lookup_journal_in_cursum(journal,
3014 							NAT_JOURNAL, nid, 1);
3015 			f2fs_bug_on(sbi, offset < 0);
3016 			raw_ne = &nat_in_journal(journal, offset);
3017 			nid_in_journal(journal, offset) = cpu_to_le32(nid);
3018 		} else {
3019 			raw_ne = &nat_blk->entries[nid - start_nid];
3020 		}
3021 		raw_nat_from_node_info(raw_ne, &ne->ni);
3022 		nat_reset_flag(ne);
3023 		__clear_nat_cache_dirty(NM_I(sbi), set, ne);
3024 		if (nat_get_blkaddr(ne) == NULL_ADDR) {
3025 			add_free_nid(sbi, nid, false, true);
3026 		} else {
3027 			spin_lock(&NM_I(sbi)->nid_list_lock);
3028 			update_free_nid_bitmap(sbi, nid, false, false);
3029 			spin_unlock(&NM_I(sbi)->nid_list_lock);
3030 		}
3031 	}
3032 
3033 	if (to_journal) {
3034 		up_write(&curseg->journal_rwsem);
3035 	} else {
3036 		update_nat_bits(sbi, start_nid, page);
3037 		f2fs_put_page(page, 1);
3038 	}
3039 
3040 	/* Allow dirty nats by node block allocation in write_begin */
3041 	if (!set->entry_cnt) {
3042 		radix_tree_delete(&NM_I(sbi)->nat_set_root, set->set);
3043 		kmem_cache_free(nat_entry_set_slab, set);
3044 	}
3045 	return 0;
3046 }
3047 
3048 /*
3049  * This function is called during the checkpointing process.
3050  */
3051 int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
3052 {
3053 	struct f2fs_nm_info *nm_i = NM_I(sbi);
3054 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
3055 	struct f2fs_journal *journal = curseg->journal;
3056 	struct nat_entry_set *setvec[SETVEC_SIZE];
3057 	struct nat_entry_set *set, *tmp;
3058 	unsigned int found;
3059 	nid_t set_idx = 0;
3060 	LIST_HEAD(sets);
3061 	int err = 0;
3062 
3063 	/*
3064 	 * during unmount, let's flush nat_bits before checking
3065 	 * nat_cnt[DIRTY_NAT].
3066 	 */
3067 	if (cpc->reason & CP_UMOUNT) {
3068 		down_write(&nm_i->nat_tree_lock);
3069 		remove_nats_in_journal(sbi);
3070 		up_write(&nm_i->nat_tree_lock);
3071 	}
3072 
3073 	if (!nm_i->nat_cnt[DIRTY_NAT])
3074 		return 0;
3075 
3076 	down_write(&nm_i->nat_tree_lock);
3077 
3078 	/*
3079 	 * if there are no enough space in journal to store dirty nat
3080 	 * entries, remove all entries from journal and merge them
3081 	 * into nat entry set.
3082 	 */
3083 	if (cpc->reason & CP_UMOUNT ||
3084 		!__has_cursum_space(journal,
3085 			nm_i->nat_cnt[DIRTY_NAT], NAT_JOURNAL))
3086 		remove_nats_in_journal(sbi);
3087 
3088 	while ((found = __gang_lookup_nat_set(nm_i,
3089 					set_idx, SETVEC_SIZE, setvec))) {
3090 		unsigned idx;
3091 
3092 		set_idx = setvec[found - 1]->set + 1;
3093 		for (idx = 0; idx < found; idx++)
3094 			__adjust_nat_entry_set(setvec[idx], &sets,
3095 						MAX_NAT_JENTRIES(journal));
3096 	}
3097 
3098 	/* flush dirty nats in nat entry set */
3099 	list_for_each_entry_safe(set, tmp, &sets, set_list) {
3100 		err = __flush_nat_entry_set(sbi, set, cpc);
3101 		if (err)
3102 			break;
3103 	}
3104 
3105 	up_write(&nm_i->nat_tree_lock);
3106 	/* Allow dirty nats by node block allocation in write_begin */
3107 
3108 	return err;
3109 }
3110 
3111 static int __get_nat_bitmaps(struct f2fs_sb_info *sbi)
3112 {
3113 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
3114 	struct f2fs_nm_info *nm_i = NM_I(sbi);
3115 	unsigned int nat_bits_bytes = nm_i->nat_blocks / BITS_PER_BYTE;
3116 	unsigned int i;
3117 	__u64 cp_ver = cur_cp_version(ckpt);
3118 	block_t nat_bits_addr;
3119 
3120 	nm_i->nat_bits_blocks = F2FS_BLK_ALIGN((nat_bits_bytes << 1) + 8);
3121 	nm_i->nat_bits = f2fs_kvzalloc(sbi,
3122 			nm_i->nat_bits_blocks << F2FS_BLKSIZE_BITS, GFP_KERNEL);
3123 	if (!nm_i->nat_bits)
3124 		return -ENOMEM;
3125 
3126 	nm_i->full_nat_bits = nm_i->nat_bits + 8;
3127 	nm_i->empty_nat_bits = nm_i->full_nat_bits + nat_bits_bytes;
3128 
3129 	if (!is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG))
3130 		return 0;
3131 
3132 	nat_bits_addr = __start_cp_addr(sbi) + sbi->blocks_per_seg -
3133 						nm_i->nat_bits_blocks;
3134 	for (i = 0; i < nm_i->nat_bits_blocks; i++) {
3135 		struct page *page;
3136 
3137 		page = f2fs_get_meta_page(sbi, nat_bits_addr++);
3138 		if (IS_ERR(page))
3139 			return PTR_ERR(page);
3140 
3141 		memcpy(nm_i->nat_bits + (i << F2FS_BLKSIZE_BITS),
3142 					page_address(page), F2FS_BLKSIZE);
3143 		f2fs_put_page(page, 1);
3144 	}
3145 
3146 	cp_ver |= (cur_cp_crc(ckpt) << 32);
3147 	if (cpu_to_le64(cp_ver) != *(__le64 *)nm_i->nat_bits) {
3148 		clear_ckpt_flags(sbi, CP_NAT_BITS_FLAG);
3149 		f2fs_notice(sbi, "Disable nat_bits due to incorrect cp_ver (%llu, %llu)",
3150 			cp_ver, le64_to_cpu(*(__le64 *)nm_i->nat_bits));
3151 		return 0;
3152 	}
3153 
3154 	f2fs_notice(sbi, "Found nat_bits in checkpoint");
3155 	return 0;
3156 }
3157 
3158 static inline void load_free_nid_bitmap(struct f2fs_sb_info *sbi)
3159 {
3160 	struct f2fs_nm_info *nm_i = NM_I(sbi);
3161 	unsigned int i = 0;
3162 	nid_t nid, last_nid;
3163 
3164 	if (!is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG))
3165 		return;
3166 
3167 	for (i = 0; i < nm_i->nat_blocks; i++) {
3168 		i = find_next_bit_le(nm_i->empty_nat_bits, nm_i->nat_blocks, i);
3169 		if (i >= nm_i->nat_blocks)
3170 			break;
3171 
3172 		__set_bit_le(i, nm_i->nat_block_bitmap);
3173 
3174 		nid = i * NAT_ENTRY_PER_BLOCK;
3175 		last_nid = nid + NAT_ENTRY_PER_BLOCK;
3176 
3177 		spin_lock(&NM_I(sbi)->nid_list_lock);
3178 		for (; nid < last_nid; nid++)
3179 			update_free_nid_bitmap(sbi, nid, true, true);
3180 		spin_unlock(&NM_I(sbi)->nid_list_lock);
3181 	}
3182 
3183 	for (i = 0; i < nm_i->nat_blocks; i++) {
3184 		i = find_next_bit_le(nm_i->full_nat_bits, nm_i->nat_blocks, i);
3185 		if (i >= nm_i->nat_blocks)
3186 			break;
3187 
3188 		__set_bit_le(i, nm_i->nat_block_bitmap);
3189 	}
3190 }
3191 
3192 static int init_node_manager(struct f2fs_sb_info *sbi)
3193 {
3194 	struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi);
3195 	struct f2fs_nm_info *nm_i = NM_I(sbi);
3196 	unsigned char *version_bitmap;
3197 	unsigned int nat_segs;
3198 	int err;
3199 
3200 	nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr);
3201 
3202 	/* segment_count_nat includes pair segment so divide to 2. */
3203 	nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1;
3204 	nm_i->nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg);
3205 	nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nm_i->nat_blocks;
3206 
3207 	/* not used nids: 0, node, meta, (and root counted as valid node) */
3208 	nm_i->available_nids = nm_i->max_nid - sbi->total_valid_node_count -
3209 						F2FS_RESERVED_NODE_NUM;
3210 	nm_i->nid_cnt[FREE_NID] = 0;
3211 	nm_i->nid_cnt[PREALLOC_NID] = 0;
3212 	nm_i->ram_thresh = DEF_RAM_THRESHOLD;
3213 	nm_i->ra_nid_pages = DEF_RA_NID_PAGES;
3214 	nm_i->dirty_nats_ratio = DEF_DIRTY_NAT_RATIO_THRESHOLD;
3215 
3216 	INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC);
3217 	INIT_LIST_HEAD(&nm_i->free_nid_list);
3218 	INIT_RADIX_TREE(&nm_i->nat_root, GFP_NOIO);
3219 	INIT_RADIX_TREE(&nm_i->nat_set_root, GFP_NOIO);
3220 	INIT_LIST_HEAD(&nm_i->nat_entries);
3221 	spin_lock_init(&nm_i->nat_list_lock);
3222 
3223 	mutex_init(&nm_i->build_lock);
3224 	spin_lock_init(&nm_i->nid_list_lock);
3225 	init_rwsem(&nm_i->nat_tree_lock);
3226 
3227 	nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
3228 	nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
3229 	version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP);
3230 	nm_i->nat_bitmap = kmemdup(version_bitmap, nm_i->bitmap_size,
3231 					GFP_KERNEL);
3232 	if (!nm_i->nat_bitmap)
3233 		return -ENOMEM;
3234 
3235 	err = __get_nat_bitmaps(sbi);
3236 	if (err)
3237 		return err;
3238 
3239 #ifdef CONFIG_F2FS_CHECK_FS
3240 	nm_i->nat_bitmap_mir = kmemdup(version_bitmap, nm_i->bitmap_size,
3241 					GFP_KERNEL);
3242 	if (!nm_i->nat_bitmap_mir)
3243 		return -ENOMEM;
3244 #endif
3245 
3246 	return 0;
3247 }
3248 
3249 static int init_free_nid_cache(struct f2fs_sb_info *sbi)
3250 {
3251 	struct f2fs_nm_info *nm_i = NM_I(sbi);
3252 	int i;
3253 
3254 	nm_i->free_nid_bitmap =
3255 		f2fs_kvzalloc(sbi, array_size(sizeof(unsigned char *),
3256 					      nm_i->nat_blocks),
3257 			      GFP_KERNEL);
3258 	if (!nm_i->free_nid_bitmap)
3259 		return -ENOMEM;
3260 
3261 	for (i = 0; i < nm_i->nat_blocks; i++) {
3262 		nm_i->free_nid_bitmap[i] = f2fs_kvzalloc(sbi,
3263 			f2fs_bitmap_size(NAT_ENTRY_PER_BLOCK), GFP_KERNEL);
3264 		if (!nm_i->free_nid_bitmap[i])
3265 			return -ENOMEM;
3266 	}
3267 
3268 	nm_i->nat_block_bitmap = f2fs_kvzalloc(sbi, nm_i->nat_blocks / 8,
3269 								GFP_KERNEL);
3270 	if (!nm_i->nat_block_bitmap)
3271 		return -ENOMEM;
3272 
3273 	nm_i->free_nid_count =
3274 		f2fs_kvzalloc(sbi, array_size(sizeof(unsigned short),
3275 					      nm_i->nat_blocks),
3276 			      GFP_KERNEL);
3277 	if (!nm_i->free_nid_count)
3278 		return -ENOMEM;
3279 	return 0;
3280 }
3281 
3282 int f2fs_build_node_manager(struct f2fs_sb_info *sbi)
3283 {
3284 	int err;
3285 
3286 	sbi->nm_info = f2fs_kzalloc(sbi, sizeof(struct f2fs_nm_info),
3287 							GFP_KERNEL);
3288 	if (!sbi->nm_info)
3289 		return -ENOMEM;
3290 
3291 	err = init_node_manager(sbi);
3292 	if (err)
3293 		return err;
3294 
3295 	err = init_free_nid_cache(sbi);
3296 	if (err)
3297 		return err;
3298 
3299 	/* load free nid status from nat_bits table */
3300 	load_free_nid_bitmap(sbi);
3301 
3302 	return f2fs_build_free_nids(sbi, true, true);
3303 }
3304 
3305 void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi)
3306 {
3307 	struct f2fs_nm_info *nm_i = NM_I(sbi);
3308 	struct free_nid *i, *next_i;
3309 	struct nat_entry *natvec[NATVEC_SIZE];
3310 	struct nat_entry_set *setvec[SETVEC_SIZE];
3311 	nid_t nid = 0;
3312 	unsigned int found;
3313 
3314 	if (!nm_i)
3315 		return;
3316 
3317 	/* destroy free nid list */
3318 	spin_lock(&nm_i->nid_list_lock);
3319 	list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) {
3320 		__remove_free_nid(sbi, i, FREE_NID);
3321 		spin_unlock(&nm_i->nid_list_lock);
3322 		kmem_cache_free(free_nid_slab, i);
3323 		spin_lock(&nm_i->nid_list_lock);
3324 	}
3325 	f2fs_bug_on(sbi, nm_i->nid_cnt[FREE_NID]);
3326 	f2fs_bug_on(sbi, nm_i->nid_cnt[PREALLOC_NID]);
3327 	f2fs_bug_on(sbi, !list_empty(&nm_i->free_nid_list));
3328 	spin_unlock(&nm_i->nid_list_lock);
3329 
3330 	/* destroy nat cache */
3331 	down_write(&nm_i->nat_tree_lock);
3332 	while ((found = __gang_lookup_nat_cache(nm_i,
3333 					nid, NATVEC_SIZE, natvec))) {
3334 		unsigned idx;
3335 
3336 		nid = nat_get_nid(natvec[found - 1]) + 1;
3337 		for (idx = 0; idx < found; idx++) {
3338 			spin_lock(&nm_i->nat_list_lock);
3339 			list_del(&natvec[idx]->list);
3340 			spin_unlock(&nm_i->nat_list_lock);
3341 
3342 			__del_from_nat_cache(nm_i, natvec[idx]);
3343 		}
3344 	}
3345 	f2fs_bug_on(sbi, nm_i->nat_cnt[TOTAL_NAT]);
3346 
3347 	/* destroy nat set cache */
3348 	nid = 0;
3349 	while ((found = __gang_lookup_nat_set(nm_i,
3350 					nid, SETVEC_SIZE, setvec))) {
3351 		unsigned idx;
3352 
3353 		nid = setvec[found - 1]->set + 1;
3354 		for (idx = 0; idx < found; idx++) {
3355 			/* entry_cnt is not zero, when cp_error was occurred */
3356 			f2fs_bug_on(sbi, !list_empty(&setvec[idx]->entry_list));
3357 			radix_tree_delete(&nm_i->nat_set_root, setvec[idx]->set);
3358 			kmem_cache_free(nat_entry_set_slab, setvec[idx]);
3359 		}
3360 	}
3361 	up_write(&nm_i->nat_tree_lock);
3362 
3363 	kvfree(nm_i->nat_block_bitmap);
3364 	if (nm_i->free_nid_bitmap) {
3365 		int i;
3366 
3367 		for (i = 0; i < nm_i->nat_blocks; i++)
3368 			kvfree(nm_i->free_nid_bitmap[i]);
3369 		kvfree(nm_i->free_nid_bitmap);
3370 	}
3371 	kvfree(nm_i->free_nid_count);
3372 
3373 	kvfree(nm_i->nat_bitmap);
3374 	kvfree(nm_i->nat_bits);
3375 #ifdef CONFIG_F2FS_CHECK_FS
3376 	kvfree(nm_i->nat_bitmap_mir);
3377 #endif
3378 	sbi->nm_info = NULL;
3379 	kfree(nm_i);
3380 }
3381 
3382 int __init f2fs_create_node_manager_caches(void)
3383 {
3384 	nat_entry_slab = f2fs_kmem_cache_create("f2fs_nat_entry",
3385 			sizeof(struct nat_entry));
3386 	if (!nat_entry_slab)
3387 		goto fail;
3388 
3389 	free_nid_slab = f2fs_kmem_cache_create("f2fs_free_nid",
3390 			sizeof(struct free_nid));
3391 	if (!free_nid_slab)
3392 		goto destroy_nat_entry;
3393 
3394 	nat_entry_set_slab = f2fs_kmem_cache_create("f2fs_nat_entry_set",
3395 			sizeof(struct nat_entry_set));
3396 	if (!nat_entry_set_slab)
3397 		goto destroy_free_nid;
3398 
3399 	fsync_node_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_node_entry",
3400 			sizeof(struct fsync_node_entry));
3401 	if (!fsync_node_entry_slab)
3402 		goto destroy_nat_entry_set;
3403 	return 0;
3404 
3405 destroy_nat_entry_set:
3406 	kmem_cache_destroy(nat_entry_set_slab);
3407 destroy_free_nid:
3408 	kmem_cache_destroy(free_nid_slab);
3409 destroy_nat_entry:
3410 	kmem_cache_destroy(nat_entry_slab);
3411 fail:
3412 	return -ENOMEM;
3413 }
3414 
3415 void f2fs_destroy_node_manager_caches(void)
3416 {
3417 	kmem_cache_destroy(fsync_node_entry_slab);
3418 	kmem_cache_destroy(nat_entry_set_slab);
3419 	kmem_cache_destroy(free_nid_slab);
3420 	kmem_cache_destroy(nat_entry_slab);
3421 }
3422