xref: /openbmc/linux/fs/f2fs/node.c (revision c31e4961)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * fs/f2fs/node.c
4  *
5  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6  *             http://www.samsung.com/
7  */
8 #include <linux/fs.h>
9 #include <linux/f2fs_fs.h>
10 #include <linux/mpage.h>
11 #include <linux/sched/mm.h>
12 #include <linux/blkdev.h>
13 #include <linux/pagevec.h>
14 #include <linux/swap.h>
15 
16 #include "f2fs.h"
17 #include "node.h"
18 #include "segment.h"
19 #include "xattr.h"
20 #include "iostat.h"
21 #include <trace/events/f2fs.h>
22 
23 #define on_f2fs_build_free_nids(nmi) mutex_is_locked(&(nm_i)->build_lock)
24 
25 static struct kmem_cache *nat_entry_slab;
26 static struct kmem_cache *free_nid_slab;
27 static struct kmem_cache *nat_entry_set_slab;
28 static struct kmem_cache *fsync_node_entry_slab;
29 
30 /*
31  * Check whether the given nid is within node id range.
32  */
33 int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid)
34 {
35 	if (unlikely(nid < F2FS_ROOT_INO(sbi) || nid >= NM_I(sbi)->max_nid)) {
36 		set_sbi_flag(sbi, SBI_NEED_FSCK);
37 		f2fs_warn(sbi, "%s: out-of-range nid=%x, run fsck to fix.",
38 			  __func__, nid);
39 		f2fs_handle_error(sbi, ERROR_CORRUPTED_INODE);
40 		return -EFSCORRUPTED;
41 	}
42 	return 0;
43 }
44 
45 bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type)
46 {
47 	struct f2fs_nm_info *nm_i = NM_I(sbi);
48 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
49 	struct sysinfo val;
50 	unsigned long avail_ram;
51 	unsigned long mem_size = 0;
52 	bool res = false;
53 
54 	if (!nm_i)
55 		return true;
56 
57 	si_meminfo(&val);
58 
59 	/* only uses low memory */
60 	avail_ram = val.totalram - val.totalhigh;
61 
62 	/*
63 	 * give 25%, 25%, 50%, 50%, 25%, 25% memory for each components respectively
64 	 */
65 	if (type == FREE_NIDS) {
66 		mem_size = (nm_i->nid_cnt[FREE_NID] *
67 				sizeof(struct free_nid)) >> PAGE_SHIFT;
68 		res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
69 	} else if (type == NAT_ENTRIES) {
70 		mem_size = (nm_i->nat_cnt[TOTAL_NAT] *
71 				sizeof(struct nat_entry)) >> PAGE_SHIFT;
72 		res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
73 		if (excess_cached_nats(sbi))
74 			res = false;
75 	} else if (type == DIRTY_DENTS) {
76 		if (sbi->sb->s_bdi->wb.dirty_exceeded)
77 			return false;
78 		mem_size = get_pages(sbi, F2FS_DIRTY_DENTS);
79 		res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
80 	} else if (type == INO_ENTRIES) {
81 		int i;
82 
83 		for (i = 0; i < MAX_INO_ENTRY; i++)
84 			mem_size += sbi->im[i].ino_num *
85 						sizeof(struct ino_entry);
86 		mem_size >>= PAGE_SHIFT;
87 		res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
88 	} else if (type == READ_EXTENT_CACHE || type == AGE_EXTENT_CACHE) {
89 		enum extent_type etype = type == READ_EXTENT_CACHE ?
90 						EX_READ : EX_BLOCK_AGE;
91 		struct extent_tree_info *eti = &sbi->extent_tree[etype];
92 
93 		mem_size = (atomic_read(&eti->total_ext_tree) *
94 				sizeof(struct extent_tree) +
95 				atomic_read(&eti->total_ext_node) *
96 				sizeof(struct extent_node)) >> PAGE_SHIFT;
97 		res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
98 	} else if (type == DISCARD_CACHE) {
99 		mem_size = (atomic_read(&dcc->discard_cmd_cnt) *
100 				sizeof(struct discard_cmd)) >> PAGE_SHIFT;
101 		res = mem_size < (avail_ram * nm_i->ram_thresh / 100);
102 	} else if (type == COMPRESS_PAGE) {
103 #ifdef CONFIG_F2FS_FS_COMPRESSION
104 		unsigned long free_ram = val.freeram;
105 
106 		/*
107 		 * free memory is lower than watermark or cached page count
108 		 * exceed threshold, deny caching compress page.
109 		 */
110 		res = (free_ram > avail_ram * sbi->compress_watermark / 100) &&
111 			(COMPRESS_MAPPING(sbi)->nrpages <
112 			 free_ram * sbi->compress_percent / 100);
113 #else
114 		res = false;
115 #endif
116 	} else {
117 		if (!sbi->sb->s_bdi->wb.dirty_exceeded)
118 			return true;
119 	}
120 	return res;
121 }
122 
123 static void clear_node_page_dirty(struct page *page)
124 {
125 	if (PageDirty(page)) {
126 		f2fs_clear_page_cache_dirty_tag(page);
127 		clear_page_dirty_for_io(page);
128 		dec_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES);
129 	}
130 	ClearPageUptodate(page);
131 }
132 
133 static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
134 {
135 	return f2fs_get_meta_page_retry(sbi, current_nat_addr(sbi, nid));
136 }
137 
138 static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
139 {
140 	struct page *src_page;
141 	struct page *dst_page;
142 	pgoff_t dst_off;
143 	void *src_addr;
144 	void *dst_addr;
145 	struct f2fs_nm_info *nm_i = NM_I(sbi);
146 
147 	dst_off = next_nat_addr(sbi, current_nat_addr(sbi, nid));
148 
149 	/* get current nat block page with lock */
150 	src_page = get_current_nat_page(sbi, nid);
151 	if (IS_ERR(src_page))
152 		return src_page;
153 	dst_page = f2fs_grab_meta_page(sbi, dst_off);
154 	f2fs_bug_on(sbi, PageDirty(src_page));
155 
156 	src_addr = page_address(src_page);
157 	dst_addr = page_address(dst_page);
158 	memcpy(dst_addr, src_addr, PAGE_SIZE);
159 	set_page_dirty(dst_page);
160 	f2fs_put_page(src_page, 1);
161 
162 	set_to_next_nat(nm_i, nid);
163 
164 	return dst_page;
165 }
166 
167 static struct nat_entry *__alloc_nat_entry(struct f2fs_sb_info *sbi,
168 						nid_t nid, bool no_fail)
169 {
170 	struct nat_entry *new;
171 
172 	new = f2fs_kmem_cache_alloc(nat_entry_slab,
173 					GFP_F2FS_ZERO, no_fail, sbi);
174 	if (new) {
175 		nat_set_nid(new, nid);
176 		nat_reset_flag(new);
177 	}
178 	return new;
179 }
180 
181 static void __free_nat_entry(struct nat_entry *e)
182 {
183 	kmem_cache_free(nat_entry_slab, e);
184 }
185 
186 /* must be locked by nat_tree_lock */
187 static struct nat_entry *__init_nat_entry(struct f2fs_nm_info *nm_i,
188 	struct nat_entry *ne, struct f2fs_nat_entry *raw_ne, bool no_fail)
189 {
190 	if (no_fail)
191 		f2fs_radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne);
192 	else if (radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne))
193 		return NULL;
194 
195 	if (raw_ne)
196 		node_info_from_raw_nat(&ne->ni, raw_ne);
197 
198 	spin_lock(&nm_i->nat_list_lock);
199 	list_add_tail(&ne->list, &nm_i->nat_entries);
200 	spin_unlock(&nm_i->nat_list_lock);
201 
202 	nm_i->nat_cnt[TOTAL_NAT]++;
203 	nm_i->nat_cnt[RECLAIMABLE_NAT]++;
204 	return ne;
205 }
206 
207 static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n)
208 {
209 	struct nat_entry *ne;
210 
211 	ne = radix_tree_lookup(&nm_i->nat_root, n);
212 
213 	/* for recent accessed nat entry, move it to tail of lru list */
214 	if (ne && !get_nat_flag(ne, IS_DIRTY)) {
215 		spin_lock(&nm_i->nat_list_lock);
216 		if (!list_empty(&ne->list))
217 			list_move_tail(&ne->list, &nm_i->nat_entries);
218 		spin_unlock(&nm_i->nat_list_lock);
219 	}
220 
221 	return ne;
222 }
223 
224 static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i,
225 		nid_t start, unsigned int nr, struct nat_entry **ep)
226 {
227 	return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr);
228 }
229 
230 static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e)
231 {
232 	radix_tree_delete(&nm_i->nat_root, nat_get_nid(e));
233 	nm_i->nat_cnt[TOTAL_NAT]--;
234 	nm_i->nat_cnt[RECLAIMABLE_NAT]--;
235 	__free_nat_entry(e);
236 }
237 
238 static struct nat_entry_set *__grab_nat_entry_set(struct f2fs_nm_info *nm_i,
239 							struct nat_entry *ne)
240 {
241 	nid_t set = NAT_BLOCK_OFFSET(ne->ni.nid);
242 	struct nat_entry_set *head;
243 
244 	head = radix_tree_lookup(&nm_i->nat_set_root, set);
245 	if (!head) {
246 		head = f2fs_kmem_cache_alloc(nat_entry_set_slab,
247 						GFP_NOFS, true, NULL);
248 
249 		INIT_LIST_HEAD(&head->entry_list);
250 		INIT_LIST_HEAD(&head->set_list);
251 		head->set = set;
252 		head->entry_cnt = 0;
253 		f2fs_radix_tree_insert(&nm_i->nat_set_root, set, head);
254 	}
255 	return head;
256 }
257 
258 static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i,
259 						struct nat_entry *ne)
260 {
261 	struct nat_entry_set *head;
262 	bool new_ne = nat_get_blkaddr(ne) == NEW_ADDR;
263 
264 	if (!new_ne)
265 		head = __grab_nat_entry_set(nm_i, ne);
266 
267 	/*
268 	 * update entry_cnt in below condition:
269 	 * 1. update NEW_ADDR to valid block address;
270 	 * 2. update old block address to new one;
271 	 */
272 	if (!new_ne && (get_nat_flag(ne, IS_PREALLOC) ||
273 				!get_nat_flag(ne, IS_DIRTY)))
274 		head->entry_cnt++;
275 
276 	set_nat_flag(ne, IS_PREALLOC, new_ne);
277 
278 	if (get_nat_flag(ne, IS_DIRTY))
279 		goto refresh_list;
280 
281 	nm_i->nat_cnt[DIRTY_NAT]++;
282 	nm_i->nat_cnt[RECLAIMABLE_NAT]--;
283 	set_nat_flag(ne, IS_DIRTY, true);
284 refresh_list:
285 	spin_lock(&nm_i->nat_list_lock);
286 	if (new_ne)
287 		list_del_init(&ne->list);
288 	else
289 		list_move_tail(&ne->list, &head->entry_list);
290 	spin_unlock(&nm_i->nat_list_lock);
291 }
292 
293 static void __clear_nat_cache_dirty(struct f2fs_nm_info *nm_i,
294 		struct nat_entry_set *set, struct nat_entry *ne)
295 {
296 	spin_lock(&nm_i->nat_list_lock);
297 	list_move_tail(&ne->list, &nm_i->nat_entries);
298 	spin_unlock(&nm_i->nat_list_lock);
299 
300 	set_nat_flag(ne, IS_DIRTY, false);
301 	set->entry_cnt--;
302 	nm_i->nat_cnt[DIRTY_NAT]--;
303 	nm_i->nat_cnt[RECLAIMABLE_NAT]++;
304 }
305 
306 static unsigned int __gang_lookup_nat_set(struct f2fs_nm_info *nm_i,
307 		nid_t start, unsigned int nr, struct nat_entry_set **ep)
308 {
309 	return radix_tree_gang_lookup(&nm_i->nat_set_root, (void **)ep,
310 							start, nr);
311 }
312 
313 bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page)
314 {
315 	return NODE_MAPPING(sbi) == page->mapping &&
316 			IS_DNODE(page) && is_cold_node(page);
317 }
318 
319 void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi)
320 {
321 	spin_lock_init(&sbi->fsync_node_lock);
322 	INIT_LIST_HEAD(&sbi->fsync_node_list);
323 	sbi->fsync_seg_id = 0;
324 	sbi->fsync_node_num = 0;
325 }
326 
327 static unsigned int f2fs_add_fsync_node_entry(struct f2fs_sb_info *sbi,
328 							struct page *page)
329 {
330 	struct fsync_node_entry *fn;
331 	unsigned long flags;
332 	unsigned int seq_id;
333 
334 	fn = f2fs_kmem_cache_alloc(fsync_node_entry_slab,
335 					GFP_NOFS, true, NULL);
336 
337 	get_page(page);
338 	fn->page = page;
339 	INIT_LIST_HEAD(&fn->list);
340 
341 	spin_lock_irqsave(&sbi->fsync_node_lock, flags);
342 	list_add_tail(&fn->list, &sbi->fsync_node_list);
343 	fn->seq_id = sbi->fsync_seg_id++;
344 	seq_id = fn->seq_id;
345 	sbi->fsync_node_num++;
346 	spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
347 
348 	return seq_id;
349 }
350 
351 void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page)
352 {
353 	struct fsync_node_entry *fn;
354 	unsigned long flags;
355 
356 	spin_lock_irqsave(&sbi->fsync_node_lock, flags);
357 	list_for_each_entry(fn, &sbi->fsync_node_list, list) {
358 		if (fn->page == page) {
359 			list_del(&fn->list);
360 			sbi->fsync_node_num--;
361 			spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
362 			kmem_cache_free(fsync_node_entry_slab, fn);
363 			put_page(page);
364 			return;
365 		}
366 	}
367 	spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
368 	f2fs_bug_on(sbi, 1);
369 }
370 
371 void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi)
372 {
373 	unsigned long flags;
374 
375 	spin_lock_irqsave(&sbi->fsync_node_lock, flags);
376 	sbi->fsync_seg_id = 0;
377 	spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
378 }
379 
380 int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid)
381 {
382 	struct f2fs_nm_info *nm_i = NM_I(sbi);
383 	struct nat_entry *e;
384 	bool need = false;
385 
386 	f2fs_down_read(&nm_i->nat_tree_lock);
387 	e = __lookup_nat_cache(nm_i, nid);
388 	if (e) {
389 		if (!get_nat_flag(e, IS_CHECKPOINTED) &&
390 				!get_nat_flag(e, HAS_FSYNCED_INODE))
391 			need = true;
392 	}
393 	f2fs_up_read(&nm_i->nat_tree_lock);
394 	return need;
395 }
396 
397 bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid)
398 {
399 	struct f2fs_nm_info *nm_i = NM_I(sbi);
400 	struct nat_entry *e;
401 	bool is_cp = true;
402 
403 	f2fs_down_read(&nm_i->nat_tree_lock);
404 	e = __lookup_nat_cache(nm_i, nid);
405 	if (e && !get_nat_flag(e, IS_CHECKPOINTED))
406 		is_cp = false;
407 	f2fs_up_read(&nm_i->nat_tree_lock);
408 	return is_cp;
409 }
410 
411 bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino)
412 {
413 	struct f2fs_nm_info *nm_i = NM_I(sbi);
414 	struct nat_entry *e;
415 	bool need_update = true;
416 
417 	f2fs_down_read(&nm_i->nat_tree_lock);
418 	e = __lookup_nat_cache(nm_i, ino);
419 	if (e && get_nat_flag(e, HAS_LAST_FSYNC) &&
420 			(get_nat_flag(e, IS_CHECKPOINTED) ||
421 			 get_nat_flag(e, HAS_FSYNCED_INODE)))
422 		need_update = false;
423 	f2fs_up_read(&nm_i->nat_tree_lock);
424 	return need_update;
425 }
426 
427 /* must be locked by nat_tree_lock */
428 static void cache_nat_entry(struct f2fs_sb_info *sbi, nid_t nid,
429 						struct f2fs_nat_entry *ne)
430 {
431 	struct f2fs_nm_info *nm_i = NM_I(sbi);
432 	struct nat_entry *new, *e;
433 
434 	/* Let's mitigate lock contention of nat_tree_lock during checkpoint */
435 	if (f2fs_rwsem_is_locked(&sbi->cp_global_sem))
436 		return;
437 
438 	new = __alloc_nat_entry(sbi, nid, false);
439 	if (!new)
440 		return;
441 
442 	f2fs_down_write(&nm_i->nat_tree_lock);
443 	e = __lookup_nat_cache(nm_i, nid);
444 	if (!e)
445 		e = __init_nat_entry(nm_i, new, ne, false);
446 	else
447 		f2fs_bug_on(sbi, nat_get_ino(e) != le32_to_cpu(ne->ino) ||
448 				nat_get_blkaddr(e) !=
449 					le32_to_cpu(ne->block_addr) ||
450 				nat_get_version(e) != ne->version);
451 	f2fs_up_write(&nm_i->nat_tree_lock);
452 	if (e != new)
453 		__free_nat_entry(new);
454 }
455 
456 static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
457 			block_t new_blkaddr, bool fsync_done)
458 {
459 	struct f2fs_nm_info *nm_i = NM_I(sbi);
460 	struct nat_entry *e;
461 	struct nat_entry *new = __alloc_nat_entry(sbi, ni->nid, true);
462 
463 	f2fs_down_write(&nm_i->nat_tree_lock);
464 	e = __lookup_nat_cache(nm_i, ni->nid);
465 	if (!e) {
466 		e = __init_nat_entry(nm_i, new, NULL, true);
467 		copy_node_info(&e->ni, ni);
468 		f2fs_bug_on(sbi, ni->blk_addr == NEW_ADDR);
469 	} else if (new_blkaddr == NEW_ADDR) {
470 		/*
471 		 * when nid is reallocated,
472 		 * previous nat entry can be remained in nat cache.
473 		 * So, reinitialize it with new information.
474 		 */
475 		copy_node_info(&e->ni, ni);
476 		f2fs_bug_on(sbi, ni->blk_addr != NULL_ADDR);
477 	}
478 	/* let's free early to reduce memory consumption */
479 	if (e != new)
480 		__free_nat_entry(new);
481 
482 	/* sanity check */
483 	f2fs_bug_on(sbi, nat_get_blkaddr(e) != ni->blk_addr);
484 	f2fs_bug_on(sbi, nat_get_blkaddr(e) == NULL_ADDR &&
485 			new_blkaddr == NULL_ADDR);
486 	f2fs_bug_on(sbi, nat_get_blkaddr(e) == NEW_ADDR &&
487 			new_blkaddr == NEW_ADDR);
488 	f2fs_bug_on(sbi, __is_valid_data_blkaddr(nat_get_blkaddr(e)) &&
489 			new_blkaddr == NEW_ADDR);
490 
491 	/* increment version no as node is removed */
492 	if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) {
493 		unsigned char version = nat_get_version(e);
494 
495 		nat_set_version(e, inc_node_version(version));
496 	}
497 
498 	/* change address */
499 	nat_set_blkaddr(e, new_blkaddr);
500 	if (!__is_valid_data_blkaddr(new_blkaddr))
501 		set_nat_flag(e, IS_CHECKPOINTED, false);
502 	__set_nat_cache_dirty(nm_i, e);
503 
504 	/* update fsync_mark if its inode nat entry is still alive */
505 	if (ni->nid != ni->ino)
506 		e = __lookup_nat_cache(nm_i, ni->ino);
507 	if (e) {
508 		if (fsync_done && ni->nid == ni->ino)
509 			set_nat_flag(e, HAS_FSYNCED_INODE, true);
510 		set_nat_flag(e, HAS_LAST_FSYNC, fsync_done);
511 	}
512 	f2fs_up_write(&nm_i->nat_tree_lock);
513 }
514 
515 int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
516 {
517 	struct f2fs_nm_info *nm_i = NM_I(sbi);
518 	int nr = nr_shrink;
519 
520 	if (!f2fs_down_write_trylock(&nm_i->nat_tree_lock))
521 		return 0;
522 
523 	spin_lock(&nm_i->nat_list_lock);
524 	while (nr_shrink) {
525 		struct nat_entry *ne;
526 
527 		if (list_empty(&nm_i->nat_entries))
528 			break;
529 
530 		ne = list_first_entry(&nm_i->nat_entries,
531 					struct nat_entry, list);
532 		list_del(&ne->list);
533 		spin_unlock(&nm_i->nat_list_lock);
534 
535 		__del_from_nat_cache(nm_i, ne);
536 		nr_shrink--;
537 
538 		spin_lock(&nm_i->nat_list_lock);
539 	}
540 	spin_unlock(&nm_i->nat_list_lock);
541 
542 	f2fs_up_write(&nm_i->nat_tree_lock);
543 	return nr - nr_shrink;
544 }
545 
546 int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
547 				struct node_info *ni, bool checkpoint_context)
548 {
549 	struct f2fs_nm_info *nm_i = NM_I(sbi);
550 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
551 	struct f2fs_journal *journal = curseg->journal;
552 	nid_t start_nid = START_NID(nid);
553 	struct f2fs_nat_block *nat_blk;
554 	struct page *page = NULL;
555 	struct f2fs_nat_entry ne;
556 	struct nat_entry *e;
557 	pgoff_t index;
558 	block_t blkaddr;
559 	int i;
560 
561 	ni->nid = nid;
562 retry:
563 	/* Check nat cache */
564 	f2fs_down_read(&nm_i->nat_tree_lock);
565 	e = __lookup_nat_cache(nm_i, nid);
566 	if (e) {
567 		ni->ino = nat_get_ino(e);
568 		ni->blk_addr = nat_get_blkaddr(e);
569 		ni->version = nat_get_version(e);
570 		f2fs_up_read(&nm_i->nat_tree_lock);
571 		return 0;
572 	}
573 
574 	/*
575 	 * Check current segment summary by trying to grab journal_rwsem first.
576 	 * This sem is on the critical path on the checkpoint requiring the above
577 	 * nat_tree_lock. Therefore, we should retry, if we failed to grab here
578 	 * while not bothering checkpoint.
579 	 */
580 	if (!f2fs_rwsem_is_locked(&sbi->cp_global_sem) || checkpoint_context) {
581 		down_read(&curseg->journal_rwsem);
582 	} else if (f2fs_rwsem_is_contended(&nm_i->nat_tree_lock) ||
583 				!down_read_trylock(&curseg->journal_rwsem)) {
584 		f2fs_up_read(&nm_i->nat_tree_lock);
585 		goto retry;
586 	}
587 
588 	i = f2fs_lookup_journal_in_cursum(journal, NAT_JOURNAL, nid, 0);
589 	if (i >= 0) {
590 		ne = nat_in_journal(journal, i);
591 		node_info_from_raw_nat(ni, &ne);
592 	}
593 	up_read(&curseg->journal_rwsem);
594 	if (i >= 0) {
595 		f2fs_up_read(&nm_i->nat_tree_lock);
596 		goto cache;
597 	}
598 
599 	/* Fill node_info from nat page */
600 	index = current_nat_addr(sbi, nid);
601 	f2fs_up_read(&nm_i->nat_tree_lock);
602 
603 	page = f2fs_get_meta_page(sbi, index);
604 	if (IS_ERR(page))
605 		return PTR_ERR(page);
606 
607 	nat_blk = (struct f2fs_nat_block *)page_address(page);
608 	ne = nat_blk->entries[nid - start_nid];
609 	node_info_from_raw_nat(ni, &ne);
610 	f2fs_put_page(page, 1);
611 cache:
612 	blkaddr = le32_to_cpu(ne.block_addr);
613 	if (__is_valid_data_blkaddr(blkaddr) &&
614 		!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE))
615 		return -EFAULT;
616 
617 	/* cache nat entry */
618 	cache_nat_entry(sbi, nid, &ne);
619 	return 0;
620 }
621 
622 /*
623  * readahead MAX_RA_NODE number of node pages.
624  */
625 static void f2fs_ra_node_pages(struct page *parent, int start, int n)
626 {
627 	struct f2fs_sb_info *sbi = F2FS_P_SB(parent);
628 	struct blk_plug plug;
629 	int i, end;
630 	nid_t nid;
631 
632 	blk_start_plug(&plug);
633 
634 	/* Then, try readahead for siblings of the desired node */
635 	end = start + n;
636 	end = min(end, NIDS_PER_BLOCK);
637 	for (i = start; i < end; i++) {
638 		nid = get_nid(parent, i, false);
639 		f2fs_ra_node_page(sbi, nid);
640 	}
641 
642 	blk_finish_plug(&plug);
643 }
644 
645 pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs)
646 {
647 	const long direct_index = ADDRS_PER_INODE(dn->inode);
648 	const long direct_blks = ADDRS_PER_BLOCK(dn->inode);
649 	const long indirect_blks = ADDRS_PER_BLOCK(dn->inode) * NIDS_PER_BLOCK;
650 	unsigned int skipped_unit = ADDRS_PER_BLOCK(dn->inode);
651 	int cur_level = dn->cur_level;
652 	int max_level = dn->max_level;
653 	pgoff_t base = 0;
654 
655 	if (!dn->max_level)
656 		return pgofs + 1;
657 
658 	while (max_level-- > cur_level)
659 		skipped_unit *= NIDS_PER_BLOCK;
660 
661 	switch (dn->max_level) {
662 	case 3:
663 		base += 2 * indirect_blks;
664 		fallthrough;
665 	case 2:
666 		base += 2 * direct_blks;
667 		fallthrough;
668 	case 1:
669 		base += direct_index;
670 		break;
671 	default:
672 		f2fs_bug_on(F2FS_I_SB(dn->inode), 1);
673 	}
674 
675 	return ((pgofs - base) / skipped_unit + 1) * skipped_unit + base;
676 }
677 
678 /*
679  * The maximum depth is four.
680  * Offset[0] will have raw inode offset.
681  */
682 static int get_node_path(struct inode *inode, long block,
683 				int offset[4], unsigned int noffset[4])
684 {
685 	const long direct_index = ADDRS_PER_INODE(inode);
686 	const long direct_blks = ADDRS_PER_BLOCK(inode);
687 	const long dptrs_per_blk = NIDS_PER_BLOCK;
688 	const long indirect_blks = ADDRS_PER_BLOCK(inode) * NIDS_PER_BLOCK;
689 	const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK;
690 	int n = 0;
691 	int level = 0;
692 
693 	noffset[0] = 0;
694 
695 	if (block < direct_index) {
696 		offset[n] = block;
697 		goto got;
698 	}
699 	block -= direct_index;
700 	if (block < direct_blks) {
701 		offset[n++] = NODE_DIR1_BLOCK;
702 		noffset[n] = 1;
703 		offset[n] = block;
704 		level = 1;
705 		goto got;
706 	}
707 	block -= direct_blks;
708 	if (block < direct_blks) {
709 		offset[n++] = NODE_DIR2_BLOCK;
710 		noffset[n] = 2;
711 		offset[n] = block;
712 		level = 1;
713 		goto got;
714 	}
715 	block -= direct_blks;
716 	if (block < indirect_blks) {
717 		offset[n++] = NODE_IND1_BLOCK;
718 		noffset[n] = 3;
719 		offset[n++] = block / direct_blks;
720 		noffset[n] = 4 + offset[n - 1];
721 		offset[n] = block % direct_blks;
722 		level = 2;
723 		goto got;
724 	}
725 	block -= indirect_blks;
726 	if (block < indirect_blks) {
727 		offset[n++] = NODE_IND2_BLOCK;
728 		noffset[n] = 4 + dptrs_per_blk;
729 		offset[n++] = block / direct_blks;
730 		noffset[n] = 5 + dptrs_per_blk + offset[n - 1];
731 		offset[n] = block % direct_blks;
732 		level = 2;
733 		goto got;
734 	}
735 	block -= indirect_blks;
736 	if (block < dindirect_blks) {
737 		offset[n++] = NODE_DIND_BLOCK;
738 		noffset[n] = 5 + (dptrs_per_blk * 2);
739 		offset[n++] = block / indirect_blks;
740 		noffset[n] = 6 + (dptrs_per_blk * 2) +
741 			      offset[n - 1] * (dptrs_per_blk + 1);
742 		offset[n++] = (block / direct_blks) % dptrs_per_blk;
743 		noffset[n] = 7 + (dptrs_per_blk * 2) +
744 			      offset[n - 2] * (dptrs_per_blk + 1) +
745 			      offset[n - 1];
746 		offset[n] = block % direct_blks;
747 		level = 3;
748 		goto got;
749 	} else {
750 		return -E2BIG;
751 	}
752 got:
753 	return level;
754 }
755 
756 /*
757  * Caller should call f2fs_put_dnode(dn).
758  * Also, it should grab and release a rwsem by calling f2fs_lock_op() and
759  * f2fs_unlock_op() only if mode is set with ALLOC_NODE.
760  */
761 int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
762 {
763 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
764 	struct page *npage[4];
765 	struct page *parent = NULL;
766 	int offset[4];
767 	unsigned int noffset[4];
768 	nid_t nids[4];
769 	int level, i = 0;
770 	int err = 0;
771 
772 	level = get_node_path(dn->inode, index, offset, noffset);
773 	if (level < 0)
774 		return level;
775 
776 	nids[0] = dn->inode->i_ino;
777 	npage[0] = dn->inode_page;
778 
779 	if (!npage[0]) {
780 		npage[0] = f2fs_get_node_page(sbi, nids[0]);
781 		if (IS_ERR(npage[0]))
782 			return PTR_ERR(npage[0]);
783 	}
784 
785 	/* if inline_data is set, should not report any block indices */
786 	if (f2fs_has_inline_data(dn->inode) && index) {
787 		err = -ENOENT;
788 		f2fs_put_page(npage[0], 1);
789 		goto release_out;
790 	}
791 
792 	parent = npage[0];
793 	if (level != 0)
794 		nids[1] = get_nid(parent, offset[0], true);
795 	dn->inode_page = npage[0];
796 	dn->inode_page_locked = true;
797 
798 	/* get indirect or direct nodes */
799 	for (i = 1; i <= level; i++) {
800 		bool done = false;
801 
802 		if (!nids[i] && mode == ALLOC_NODE) {
803 			/* alloc new node */
804 			if (!f2fs_alloc_nid(sbi, &(nids[i]))) {
805 				err = -ENOSPC;
806 				goto release_pages;
807 			}
808 
809 			dn->nid = nids[i];
810 			npage[i] = f2fs_new_node_page(dn, noffset[i]);
811 			if (IS_ERR(npage[i])) {
812 				f2fs_alloc_nid_failed(sbi, nids[i]);
813 				err = PTR_ERR(npage[i]);
814 				goto release_pages;
815 			}
816 
817 			set_nid(parent, offset[i - 1], nids[i], i == 1);
818 			f2fs_alloc_nid_done(sbi, nids[i]);
819 			done = true;
820 		} else if (mode == LOOKUP_NODE_RA && i == level && level > 1) {
821 			npage[i] = f2fs_get_node_page_ra(parent, offset[i - 1]);
822 			if (IS_ERR(npage[i])) {
823 				err = PTR_ERR(npage[i]);
824 				goto release_pages;
825 			}
826 			done = true;
827 		}
828 		if (i == 1) {
829 			dn->inode_page_locked = false;
830 			unlock_page(parent);
831 		} else {
832 			f2fs_put_page(parent, 1);
833 		}
834 
835 		if (!done) {
836 			npage[i] = f2fs_get_node_page(sbi, nids[i]);
837 			if (IS_ERR(npage[i])) {
838 				err = PTR_ERR(npage[i]);
839 				f2fs_put_page(npage[0], 0);
840 				goto release_out;
841 			}
842 		}
843 		if (i < level) {
844 			parent = npage[i];
845 			nids[i + 1] = get_nid(parent, offset[i], false);
846 		}
847 	}
848 	dn->nid = nids[level];
849 	dn->ofs_in_node = offset[level];
850 	dn->node_page = npage[level];
851 	dn->data_blkaddr = f2fs_data_blkaddr(dn);
852 
853 	if (is_inode_flag_set(dn->inode, FI_COMPRESSED_FILE) &&
854 					f2fs_sb_has_readonly(sbi)) {
855 		unsigned int c_len = f2fs_cluster_blocks_are_contiguous(dn);
856 		block_t blkaddr;
857 
858 		if (!c_len)
859 			goto out;
860 
861 		blkaddr = f2fs_data_blkaddr(dn);
862 		if (blkaddr == COMPRESS_ADDR)
863 			blkaddr = data_blkaddr(dn->inode, dn->node_page,
864 						dn->ofs_in_node + 1);
865 
866 		f2fs_update_read_extent_tree_range_compressed(dn->inode,
867 					index, blkaddr,
868 					F2FS_I(dn->inode)->i_cluster_size,
869 					c_len);
870 	}
871 out:
872 	return 0;
873 
874 release_pages:
875 	f2fs_put_page(parent, 1);
876 	if (i > 1)
877 		f2fs_put_page(npage[0], 0);
878 release_out:
879 	dn->inode_page = NULL;
880 	dn->node_page = NULL;
881 	if (err == -ENOENT) {
882 		dn->cur_level = i;
883 		dn->max_level = level;
884 		dn->ofs_in_node = offset[level];
885 	}
886 	return err;
887 }
888 
889 static int truncate_node(struct dnode_of_data *dn)
890 {
891 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
892 	struct node_info ni;
893 	int err;
894 	pgoff_t index;
895 
896 	err = f2fs_get_node_info(sbi, dn->nid, &ni, false);
897 	if (err)
898 		return err;
899 
900 	/* Deallocate node address */
901 	f2fs_invalidate_blocks(sbi, ni.blk_addr);
902 	dec_valid_node_count(sbi, dn->inode, dn->nid == dn->inode->i_ino);
903 	set_node_addr(sbi, &ni, NULL_ADDR, false);
904 
905 	if (dn->nid == dn->inode->i_ino) {
906 		f2fs_remove_orphan_inode(sbi, dn->nid);
907 		dec_valid_inode_count(sbi);
908 		f2fs_inode_synced(dn->inode);
909 	}
910 
911 	clear_node_page_dirty(dn->node_page);
912 	set_sbi_flag(sbi, SBI_IS_DIRTY);
913 
914 	index = dn->node_page->index;
915 	f2fs_put_page(dn->node_page, 1);
916 
917 	invalidate_mapping_pages(NODE_MAPPING(sbi),
918 			index, index);
919 
920 	dn->node_page = NULL;
921 	trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr);
922 
923 	return 0;
924 }
925 
926 static int truncate_dnode(struct dnode_of_data *dn)
927 {
928 	struct page *page;
929 	int err;
930 
931 	if (dn->nid == 0)
932 		return 1;
933 
934 	/* get direct node */
935 	page = f2fs_get_node_page(F2FS_I_SB(dn->inode), dn->nid);
936 	if (PTR_ERR(page) == -ENOENT)
937 		return 1;
938 	else if (IS_ERR(page))
939 		return PTR_ERR(page);
940 
941 	/* Make dnode_of_data for parameter */
942 	dn->node_page = page;
943 	dn->ofs_in_node = 0;
944 	f2fs_truncate_data_blocks(dn);
945 	err = truncate_node(dn);
946 	if (err) {
947 		f2fs_put_page(page, 1);
948 		return err;
949 	}
950 
951 	return 1;
952 }
953 
954 static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
955 						int ofs, int depth)
956 {
957 	struct dnode_of_data rdn = *dn;
958 	struct page *page;
959 	struct f2fs_node *rn;
960 	nid_t child_nid;
961 	unsigned int child_nofs;
962 	int freed = 0;
963 	int i, ret;
964 
965 	if (dn->nid == 0)
966 		return NIDS_PER_BLOCK + 1;
967 
968 	trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr);
969 
970 	page = f2fs_get_node_page(F2FS_I_SB(dn->inode), dn->nid);
971 	if (IS_ERR(page)) {
972 		trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(page));
973 		return PTR_ERR(page);
974 	}
975 
976 	f2fs_ra_node_pages(page, ofs, NIDS_PER_BLOCK);
977 
978 	rn = F2FS_NODE(page);
979 	if (depth < 3) {
980 		for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) {
981 			child_nid = le32_to_cpu(rn->in.nid[i]);
982 			if (child_nid == 0)
983 				continue;
984 			rdn.nid = child_nid;
985 			ret = truncate_dnode(&rdn);
986 			if (ret < 0)
987 				goto out_err;
988 			if (set_nid(page, i, 0, false))
989 				dn->node_changed = true;
990 		}
991 	} else {
992 		child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1;
993 		for (i = ofs; i < NIDS_PER_BLOCK; i++) {
994 			child_nid = le32_to_cpu(rn->in.nid[i]);
995 			if (child_nid == 0) {
996 				child_nofs += NIDS_PER_BLOCK + 1;
997 				continue;
998 			}
999 			rdn.nid = child_nid;
1000 			ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1);
1001 			if (ret == (NIDS_PER_BLOCK + 1)) {
1002 				if (set_nid(page, i, 0, false))
1003 					dn->node_changed = true;
1004 				child_nofs += ret;
1005 			} else if (ret < 0 && ret != -ENOENT) {
1006 				goto out_err;
1007 			}
1008 		}
1009 		freed = child_nofs;
1010 	}
1011 
1012 	if (!ofs) {
1013 		/* remove current indirect node */
1014 		dn->node_page = page;
1015 		ret = truncate_node(dn);
1016 		if (ret)
1017 			goto out_err;
1018 		freed++;
1019 	} else {
1020 		f2fs_put_page(page, 1);
1021 	}
1022 	trace_f2fs_truncate_nodes_exit(dn->inode, freed);
1023 	return freed;
1024 
1025 out_err:
1026 	f2fs_put_page(page, 1);
1027 	trace_f2fs_truncate_nodes_exit(dn->inode, ret);
1028 	return ret;
1029 }
1030 
1031 static int truncate_partial_nodes(struct dnode_of_data *dn,
1032 			struct f2fs_inode *ri, int *offset, int depth)
1033 {
1034 	struct page *pages[2];
1035 	nid_t nid[3];
1036 	nid_t child_nid;
1037 	int err = 0;
1038 	int i;
1039 	int idx = depth - 2;
1040 
1041 	nid[0] = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
1042 	if (!nid[0])
1043 		return 0;
1044 
1045 	/* get indirect nodes in the path */
1046 	for (i = 0; i < idx + 1; i++) {
1047 		/* reference count'll be increased */
1048 		pages[i] = f2fs_get_node_page(F2FS_I_SB(dn->inode), nid[i]);
1049 		if (IS_ERR(pages[i])) {
1050 			err = PTR_ERR(pages[i]);
1051 			idx = i - 1;
1052 			goto fail;
1053 		}
1054 		nid[i + 1] = get_nid(pages[i], offset[i + 1], false);
1055 	}
1056 
1057 	f2fs_ra_node_pages(pages[idx], offset[idx + 1], NIDS_PER_BLOCK);
1058 
1059 	/* free direct nodes linked to a partial indirect node */
1060 	for (i = offset[idx + 1]; i < NIDS_PER_BLOCK; i++) {
1061 		child_nid = get_nid(pages[idx], i, false);
1062 		if (!child_nid)
1063 			continue;
1064 		dn->nid = child_nid;
1065 		err = truncate_dnode(dn);
1066 		if (err < 0)
1067 			goto fail;
1068 		if (set_nid(pages[idx], i, 0, false))
1069 			dn->node_changed = true;
1070 	}
1071 
1072 	if (offset[idx + 1] == 0) {
1073 		dn->node_page = pages[idx];
1074 		dn->nid = nid[idx];
1075 		err = truncate_node(dn);
1076 		if (err)
1077 			goto fail;
1078 	} else {
1079 		f2fs_put_page(pages[idx], 1);
1080 	}
1081 	offset[idx]++;
1082 	offset[idx + 1] = 0;
1083 	idx--;
1084 fail:
1085 	for (i = idx; i >= 0; i--)
1086 		f2fs_put_page(pages[i], 1);
1087 
1088 	trace_f2fs_truncate_partial_nodes(dn->inode, nid, depth, err);
1089 
1090 	return err;
1091 }
1092 
1093 /*
1094  * All the block addresses of data and nodes should be nullified.
1095  */
1096 int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from)
1097 {
1098 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1099 	int err = 0, cont = 1;
1100 	int level, offset[4], noffset[4];
1101 	unsigned int nofs = 0;
1102 	struct f2fs_inode *ri;
1103 	struct dnode_of_data dn;
1104 	struct page *page;
1105 
1106 	trace_f2fs_truncate_inode_blocks_enter(inode, from);
1107 
1108 	level = get_node_path(inode, from, offset, noffset);
1109 	if (level < 0) {
1110 		trace_f2fs_truncate_inode_blocks_exit(inode, level);
1111 		return level;
1112 	}
1113 
1114 	page = f2fs_get_node_page(sbi, inode->i_ino);
1115 	if (IS_ERR(page)) {
1116 		trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page));
1117 		return PTR_ERR(page);
1118 	}
1119 
1120 	set_new_dnode(&dn, inode, page, NULL, 0);
1121 	unlock_page(page);
1122 
1123 	ri = F2FS_INODE(page);
1124 	switch (level) {
1125 	case 0:
1126 	case 1:
1127 		nofs = noffset[1];
1128 		break;
1129 	case 2:
1130 		nofs = noffset[1];
1131 		if (!offset[level - 1])
1132 			goto skip_partial;
1133 		err = truncate_partial_nodes(&dn, ri, offset, level);
1134 		if (err < 0 && err != -ENOENT)
1135 			goto fail;
1136 		nofs += 1 + NIDS_PER_BLOCK;
1137 		break;
1138 	case 3:
1139 		nofs = 5 + 2 * NIDS_PER_BLOCK;
1140 		if (!offset[level - 1])
1141 			goto skip_partial;
1142 		err = truncate_partial_nodes(&dn, ri, offset, level);
1143 		if (err < 0 && err != -ENOENT)
1144 			goto fail;
1145 		break;
1146 	default:
1147 		BUG();
1148 	}
1149 
1150 skip_partial:
1151 	while (cont) {
1152 		dn.nid = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
1153 		switch (offset[0]) {
1154 		case NODE_DIR1_BLOCK:
1155 		case NODE_DIR2_BLOCK:
1156 			err = truncate_dnode(&dn);
1157 			break;
1158 
1159 		case NODE_IND1_BLOCK:
1160 		case NODE_IND2_BLOCK:
1161 			err = truncate_nodes(&dn, nofs, offset[1], 2);
1162 			break;
1163 
1164 		case NODE_DIND_BLOCK:
1165 			err = truncate_nodes(&dn, nofs, offset[1], 3);
1166 			cont = 0;
1167 			break;
1168 
1169 		default:
1170 			BUG();
1171 		}
1172 		if (err < 0 && err != -ENOENT)
1173 			goto fail;
1174 		if (offset[1] == 0 &&
1175 				ri->i_nid[offset[0] - NODE_DIR1_BLOCK]) {
1176 			lock_page(page);
1177 			BUG_ON(page->mapping != NODE_MAPPING(sbi));
1178 			f2fs_wait_on_page_writeback(page, NODE, true, true);
1179 			ri->i_nid[offset[0] - NODE_DIR1_BLOCK] = 0;
1180 			set_page_dirty(page);
1181 			unlock_page(page);
1182 		}
1183 		offset[1] = 0;
1184 		offset[0]++;
1185 		nofs += err;
1186 	}
1187 fail:
1188 	f2fs_put_page(page, 0);
1189 	trace_f2fs_truncate_inode_blocks_exit(inode, err);
1190 	return err > 0 ? 0 : err;
1191 }
1192 
1193 /* caller must lock inode page */
1194 int f2fs_truncate_xattr_node(struct inode *inode)
1195 {
1196 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1197 	nid_t nid = F2FS_I(inode)->i_xattr_nid;
1198 	struct dnode_of_data dn;
1199 	struct page *npage;
1200 	int err;
1201 
1202 	if (!nid)
1203 		return 0;
1204 
1205 	npage = f2fs_get_node_page(sbi, nid);
1206 	if (IS_ERR(npage))
1207 		return PTR_ERR(npage);
1208 
1209 	set_new_dnode(&dn, inode, NULL, npage, nid);
1210 	err = truncate_node(&dn);
1211 	if (err) {
1212 		f2fs_put_page(npage, 1);
1213 		return err;
1214 	}
1215 
1216 	f2fs_i_xnid_write(inode, 0);
1217 
1218 	return 0;
1219 }
1220 
1221 /*
1222  * Caller should grab and release a rwsem by calling f2fs_lock_op() and
1223  * f2fs_unlock_op().
1224  */
1225 int f2fs_remove_inode_page(struct inode *inode)
1226 {
1227 	struct dnode_of_data dn;
1228 	int err;
1229 
1230 	set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
1231 	err = f2fs_get_dnode_of_data(&dn, 0, LOOKUP_NODE);
1232 	if (err)
1233 		return err;
1234 
1235 	err = f2fs_truncate_xattr_node(inode);
1236 	if (err) {
1237 		f2fs_put_dnode(&dn);
1238 		return err;
1239 	}
1240 
1241 	/* remove potential inline_data blocks */
1242 	if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1243 				S_ISLNK(inode->i_mode))
1244 		f2fs_truncate_data_blocks_range(&dn, 1);
1245 
1246 	/* 0 is possible, after f2fs_new_inode() has failed */
1247 	if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
1248 		f2fs_put_dnode(&dn);
1249 		return -EIO;
1250 	}
1251 
1252 	if (unlikely(inode->i_blocks != 0 && inode->i_blocks != 8)) {
1253 		f2fs_warn(F2FS_I_SB(inode),
1254 			"f2fs_remove_inode_page: inconsistent i_blocks, ino:%lu, iblocks:%llu",
1255 			inode->i_ino, (unsigned long long)inode->i_blocks);
1256 		set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
1257 	}
1258 
1259 	/* will put inode & node pages */
1260 	err = truncate_node(&dn);
1261 	if (err) {
1262 		f2fs_put_dnode(&dn);
1263 		return err;
1264 	}
1265 	return 0;
1266 }
1267 
1268 struct page *f2fs_new_inode_page(struct inode *inode)
1269 {
1270 	struct dnode_of_data dn;
1271 
1272 	/* allocate inode page for new inode */
1273 	set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
1274 
1275 	/* caller should f2fs_put_page(page, 1); */
1276 	return f2fs_new_node_page(&dn, 0);
1277 }
1278 
1279 struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs)
1280 {
1281 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1282 	struct node_info new_ni;
1283 	struct page *page;
1284 	int err;
1285 
1286 	if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
1287 		return ERR_PTR(-EPERM);
1288 
1289 	page = f2fs_grab_cache_page(NODE_MAPPING(sbi), dn->nid, false);
1290 	if (!page)
1291 		return ERR_PTR(-ENOMEM);
1292 
1293 	if (unlikely((err = inc_valid_node_count(sbi, dn->inode, !ofs))))
1294 		goto fail;
1295 
1296 #ifdef CONFIG_F2FS_CHECK_FS
1297 	err = f2fs_get_node_info(sbi, dn->nid, &new_ni, false);
1298 	if (err) {
1299 		dec_valid_node_count(sbi, dn->inode, !ofs);
1300 		goto fail;
1301 	}
1302 	if (unlikely(new_ni.blk_addr != NULL_ADDR)) {
1303 		err = -EFSCORRUPTED;
1304 		set_sbi_flag(sbi, SBI_NEED_FSCK);
1305 		f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
1306 		goto fail;
1307 	}
1308 #endif
1309 	new_ni.nid = dn->nid;
1310 	new_ni.ino = dn->inode->i_ino;
1311 	new_ni.blk_addr = NULL_ADDR;
1312 	new_ni.flag = 0;
1313 	new_ni.version = 0;
1314 	set_node_addr(sbi, &new_ni, NEW_ADDR, false);
1315 
1316 	f2fs_wait_on_page_writeback(page, NODE, true, true);
1317 	fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true);
1318 	set_cold_node(page, S_ISDIR(dn->inode->i_mode));
1319 	if (!PageUptodate(page))
1320 		SetPageUptodate(page);
1321 	if (set_page_dirty(page))
1322 		dn->node_changed = true;
1323 
1324 	if (f2fs_has_xattr_block(ofs))
1325 		f2fs_i_xnid_write(dn->inode, dn->nid);
1326 
1327 	if (ofs == 0)
1328 		inc_valid_inode_count(sbi);
1329 	return page;
1330 
1331 fail:
1332 	clear_node_page_dirty(page);
1333 	f2fs_put_page(page, 1);
1334 	return ERR_PTR(err);
1335 }
1336 
1337 /*
1338  * Caller should do after getting the following values.
1339  * 0: f2fs_put_page(page, 0)
1340  * LOCKED_PAGE or error: f2fs_put_page(page, 1)
1341  */
1342 static int read_node_page(struct page *page, blk_opf_t op_flags)
1343 {
1344 	struct f2fs_sb_info *sbi = F2FS_P_SB(page);
1345 	struct node_info ni;
1346 	struct f2fs_io_info fio = {
1347 		.sbi = sbi,
1348 		.type = NODE,
1349 		.op = REQ_OP_READ,
1350 		.op_flags = op_flags,
1351 		.page = page,
1352 		.encrypted_page = NULL,
1353 	};
1354 	int err;
1355 
1356 	if (PageUptodate(page)) {
1357 		if (!f2fs_inode_chksum_verify(sbi, page)) {
1358 			ClearPageUptodate(page);
1359 			return -EFSBADCRC;
1360 		}
1361 		return LOCKED_PAGE;
1362 	}
1363 
1364 	err = f2fs_get_node_info(sbi, page->index, &ni, false);
1365 	if (err)
1366 		return err;
1367 
1368 	/* NEW_ADDR can be seen, after cp_error drops some dirty node pages */
1369 	if (unlikely(ni.blk_addr == NULL_ADDR || ni.blk_addr == NEW_ADDR)) {
1370 		ClearPageUptodate(page);
1371 		return -ENOENT;
1372 	}
1373 
1374 	fio.new_blkaddr = fio.old_blkaddr = ni.blk_addr;
1375 
1376 	err = f2fs_submit_page_bio(&fio);
1377 
1378 	if (!err)
1379 		f2fs_update_iostat(sbi, NULL, FS_NODE_READ_IO, F2FS_BLKSIZE);
1380 
1381 	return err;
1382 }
1383 
1384 /*
1385  * Readahead a node page
1386  */
1387 void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
1388 {
1389 	struct page *apage;
1390 	int err;
1391 
1392 	if (!nid)
1393 		return;
1394 	if (f2fs_check_nid_range(sbi, nid))
1395 		return;
1396 
1397 	apage = xa_load(&NODE_MAPPING(sbi)->i_pages, nid);
1398 	if (apage)
1399 		return;
1400 
1401 	apage = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false);
1402 	if (!apage)
1403 		return;
1404 
1405 	err = read_node_page(apage, REQ_RAHEAD);
1406 	f2fs_put_page(apage, err ? 1 : 0);
1407 }
1408 
1409 static struct page *__get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid,
1410 					struct page *parent, int start)
1411 {
1412 	struct page *page;
1413 	int err;
1414 
1415 	if (!nid)
1416 		return ERR_PTR(-ENOENT);
1417 	if (f2fs_check_nid_range(sbi, nid))
1418 		return ERR_PTR(-EINVAL);
1419 repeat:
1420 	page = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false);
1421 	if (!page)
1422 		return ERR_PTR(-ENOMEM);
1423 
1424 	err = read_node_page(page, 0);
1425 	if (err < 0) {
1426 		goto out_put_err;
1427 	} else if (err == LOCKED_PAGE) {
1428 		err = 0;
1429 		goto page_hit;
1430 	}
1431 
1432 	if (parent)
1433 		f2fs_ra_node_pages(parent, start + 1, MAX_RA_NODE);
1434 
1435 	lock_page(page);
1436 
1437 	if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1438 		f2fs_put_page(page, 1);
1439 		goto repeat;
1440 	}
1441 
1442 	if (unlikely(!PageUptodate(page))) {
1443 		err = -EIO;
1444 		goto out_err;
1445 	}
1446 
1447 	if (!f2fs_inode_chksum_verify(sbi, page)) {
1448 		err = -EFSBADCRC;
1449 		goto out_err;
1450 	}
1451 page_hit:
1452 	if (likely(nid == nid_of_node(page)))
1453 		return page;
1454 
1455 	f2fs_warn(sbi, "inconsistent node block, nid:%lu, node_footer[nid:%u,ino:%u,ofs:%u,cpver:%llu,blkaddr:%u]",
1456 			  nid, nid_of_node(page), ino_of_node(page),
1457 			  ofs_of_node(page), cpver_of_node(page),
1458 			  next_blkaddr_of_node(page));
1459 	set_sbi_flag(sbi, SBI_NEED_FSCK);
1460 	err = -EINVAL;
1461 out_err:
1462 	ClearPageUptodate(page);
1463 out_put_err:
1464 	/* ENOENT comes from read_node_page which is not an error. */
1465 	if (err != -ENOENT)
1466 		f2fs_handle_page_eio(sbi, page->index, NODE);
1467 	f2fs_put_page(page, 1);
1468 	return ERR_PTR(err);
1469 }
1470 
1471 struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
1472 {
1473 	return __get_node_page(sbi, nid, NULL, 0);
1474 }
1475 
1476 struct page *f2fs_get_node_page_ra(struct page *parent, int start)
1477 {
1478 	struct f2fs_sb_info *sbi = F2FS_P_SB(parent);
1479 	nid_t nid = get_nid(parent, start, false);
1480 
1481 	return __get_node_page(sbi, nid, parent, start);
1482 }
1483 
1484 static void flush_inline_data(struct f2fs_sb_info *sbi, nid_t ino)
1485 {
1486 	struct inode *inode;
1487 	struct page *page;
1488 	int ret;
1489 
1490 	/* should flush inline_data before evict_inode */
1491 	inode = ilookup(sbi->sb, ino);
1492 	if (!inode)
1493 		return;
1494 
1495 	page = f2fs_pagecache_get_page(inode->i_mapping, 0,
1496 					FGP_LOCK|FGP_NOWAIT, 0);
1497 	if (!page)
1498 		goto iput_out;
1499 
1500 	if (!PageUptodate(page))
1501 		goto page_out;
1502 
1503 	if (!PageDirty(page))
1504 		goto page_out;
1505 
1506 	if (!clear_page_dirty_for_io(page))
1507 		goto page_out;
1508 
1509 	ret = f2fs_write_inline_data(inode, page);
1510 	inode_dec_dirty_pages(inode);
1511 	f2fs_remove_dirty_inode(inode);
1512 	if (ret)
1513 		set_page_dirty(page);
1514 page_out:
1515 	f2fs_put_page(page, 1);
1516 iput_out:
1517 	iput(inode);
1518 }
1519 
1520 static struct page *last_fsync_dnode(struct f2fs_sb_info *sbi, nid_t ino)
1521 {
1522 	pgoff_t index;
1523 	struct folio_batch fbatch;
1524 	struct page *last_page = NULL;
1525 	int nr_folios;
1526 
1527 	folio_batch_init(&fbatch);
1528 	index = 0;
1529 
1530 	while ((nr_folios = filemap_get_folios_tag(NODE_MAPPING(sbi), &index,
1531 					(pgoff_t)-1, PAGECACHE_TAG_DIRTY,
1532 					&fbatch))) {
1533 		int i;
1534 
1535 		for (i = 0; i < nr_folios; i++) {
1536 			struct page *page = &fbatch.folios[i]->page;
1537 
1538 			if (unlikely(f2fs_cp_error(sbi))) {
1539 				f2fs_put_page(last_page, 0);
1540 				folio_batch_release(&fbatch);
1541 				return ERR_PTR(-EIO);
1542 			}
1543 
1544 			if (!IS_DNODE(page) || !is_cold_node(page))
1545 				continue;
1546 			if (ino_of_node(page) != ino)
1547 				continue;
1548 
1549 			lock_page(page);
1550 
1551 			if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1552 continue_unlock:
1553 				unlock_page(page);
1554 				continue;
1555 			}
1556 			if (ino_of_node(page) != ino)
1557 				goto continue_unlock;
1558 
1559 			if (!PageDirty(page)) {
1560 				/* someone wrote it for us */
1561 				goto continue_unlock;
1562 			}
1563 
1564 			if (last_page)
1565 				f2fs_put_page(last_page, 0);
1566 
1567 			get_page(page);
1568 			last_page = page;
1569 			unlock_page(page);
1570 		}
1571 		folio_batch_release(&fbatch);
1572 		cond_resched();
1573 	}
1574 	return last_page;
1575 }
1576 
1577 static int __write_node_page(struct page *page, bool atomic, bool *submitted,
1578 				struct writeback_control *wbc, bool do_balance,
1579 				enum iostat_type io_type, unsigned int *seq_id)
1580 {
1581 	struct f2fs_sb_info *sbi = F2FS_P_SB(page);
1582 	nid_t nid;
1583 	struct node_info ni;
1584 	struct f2fs_io_info fio = {
1585 		.sbi = sbi,
1586 		.ino = ino_of_node(page),
1587 		.type = NODE,
1588 		.op = REQ_OP_WRITE,
1589 		.op_flags = wbc_to_write_flags(wbc),
1590 		.page = page,
1591 		.encrypted_page = NULL,
1592 		.submitted = 0,
1593 		.io_type = io_type,
1594 		.io_wbc = wbc,
1595 	};
1596 	unsigned int seq;
1597 
1598 	trace_f2fs_writepage(page, NODE);
1599 
1600 	if (unlikely(f2fs_cp_error(sbi))) {
1601 		/* keep node pages in remount-ro mode */
1602 		if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_READONLY)
1603 			goto redirty_out;
1604 		ClearPageUptodate(page);
1605 		dec_page_count(sbi, F2FS_DIRTY_NODES);
1606 		unlock_page(page);
1607 		return 0;
1608 	}
1609 
1610 	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1611 		goto redirty_out;
1612 
1613 	if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
1614 			wbc->sync_mode == WB_SYNC_NONE &&
1615 			IS_DNODE(page) && is_cold_node(page))
1616 		goto redirty_out;
1617 
1618 	/* get old block addr of this node page */
1619 	nid = nid_of_node(page);
1620 	f2fs_bug_on(sbi, page->index != nid);
1621 
1622 	if (f2fs_get_node_info(sbi, nid, &ni, !do_balance))
1623 		goto redirty_out;
1624 
1625 	if (wbc->for_reclaim) {
1626 		if (!f2fs_down_read_trylock(&sbi->node_write))
1627 			goto redirty_out;
1628 	} else {
1629 		f2fs_down_read(&sbi->node_write);
1630 	}
1631 
1632 	/* This page is already truncated */
1633 	if (unlikely(ni.blk_addr == NULL_ADDR)) {
1634 		ClearPageUptodate(page);
1635 		dec_page_count(sbi, F2FS_DIRTY_NODES);
1636 		f2fs_up_read(&sbi->node_write);
1637 		unlock_page(page);
1638 		return 0;
1639 	}
1640 
1641 	if (__is_valid_data_blkaddr(ni.blk_addr) &&
1642 		!f2fs_is_valid_blkaddr(sbi, ni.blk_addr,
1643 					DATA_GENERIC_ENHANCE)) {
1644 		f2fs_up_read(&sbi->node_write);
1645 		goto redirty_out;
1646 	}
1647 
1648 	if (atomic && !test_opt(sbi, NOBARRIER) && !f2fs_sb_has_blkzoned(sbi))
1649 		fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
1650 
1651 	/* should add to global list before clearing PAGECACHE status */
1652 	if (f2fs_in_warm_node_list(sbi, page)) {
1653 		seq = f2fs_add_fsync_node_entry(sbi, page);
1654 		if (seq_id)
1655 			*seq_id = seq;
1656 	}
1657 
1658 	set_page_writeback(page);
1659 
1660 	fio.old_blkaddr = ni.blk_addr;
1661 	f2fs_do_write_node_page(nid, &fio);
1662 	set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(page));
1663 	dec_page_count(sbi, F2FS_DIRTY_NODES);
1664 	f2fs_up_read(&sbi->node_write);
1665 
1666 	if (wbc->for_reclaim) {
1667 		f2fs_submit_merged_write_cond(sbi, NULL, page, 0, NODE);
1668 		submitted = NULL;
1669 	}
1670 
1671 	unlock_page(page);
1672 
1673 	if (unlikely(f2fs_cp_error(sbi))) {
1674 		f2fs_submit_merged_write(sbi, NODE);
1675 		submitted = NULL;
1676 	}
1677 	if (submitted)
1678 		*submitted = fio.submitted;
1679 
1680 	if (do_balance)
1681 		f2fs_balance_fs(sbi, false);
1682 	return 0;
1683 
1684 redirty_out:
1685 	redirty_page_for_writepage(wbc, page);
1686 	return AOP_WRITEPAGE_ACTIVATE;
1687 }
1688 
1689 int f2fs_move_node_page(struct page *node_page, int gc_type)
1690 {
1691 	int err = 0;
1692 
1693 	if (gc_type == FG_GC) {
1694 		struct writeback_control wbc = {
1695 			.sync_mode = WB_SYNC_ALL,
1696 			.nr_to_write = 1,
1697 			.for_reclaim = 0,
1698 		};
1699 
1700 		f2fs_wait_on_page_writeback(node_page, NODE, true, true);
1701 
1702 		set_page_dirty(node_page);
1703 
1704 		if (!clear_page_dirty_for_io(node_page)) {
1705 			err = -EAGAIN;
1706 			goto out_page;
1707 		}
1708 
1709 		if (__write_node_page(node_page, false, NULL,
1710 					&wbc, false, FS_GC_NODE_IO, NULL)) {
1711 			err = -EAGAIN;
1712 			unlock_page(node_page);
1713 		}
1714 		goto release_page;
1715 	} else {
1716 		/* set page dirty and write it */
1717 		if (!PageWriteback(node_page))
1718 			set_page_dirty(node_page);
1719 	}
1720 out_page:
1721 	unlock_page(node_page);
1722 release_page:
1723 	f2fs_put_page(node_page, 0);
1724 	return err;
1725 }
1726 
1727 static int f2fs_write_node_page(struct page *page,
1728 				struct writeback_control *wbc)
1729 {
1730 	return __write_node_page(page, false, NULL, wbc, false,
1731 						FS_NODE_IO, NULL);
1732 }
1733 
1734 int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
1735 			struct writeback_control *wbc, bool atomic,
1736 			unsigned int *seq_id)
1737 {
1738 	pgoff_t index;
1739 	struct folio_batch fbatch;
1740 	int ret = 0;
1741 	struct page *last_page = NULL;
1742 	bool marked = false;
1743 	nid_t ino = inode->i_ino;
1744 	int nr_folios;
1745 	int nwritten = 0;
1746 
1747 	if (atomic) {
1748 		last_page = last_fsync_dnode(sbi, ino);
1749 		if (IS_ERR_OR_NULL(last_page))
1750 			return PTR_ERR_OR_ZERO(last_page);
1751 	}
1752 retry:
1753 	folio_batch_init(&fbatch);
1754 	index = 0;
1755 
1756 	while ((nr_folios = filemap_get_folios_tag(NODE_MAPPING(sbi), &index,
1757 					(pgoff_t)-1, PAGECACHE_TAG_DIRTY,
1758 					&fbatch))) {
1759 		int i;
1760 
1761 		for (i = 0; i < nr_folios; i++) {
1762 			struct page *page = &fbatch.folios[i]->page;
1763 			bool submitted = false;
1764 
1765 			if (unlikely(f2fs_cp_error(sbi))) {
1766 				f2fs_put_page(last_page, 0);
1767 				folio_batch_release(&fbatch);
1768 				ret = -EIO;
1769 				goto out;
1770 			}
1771 
1772 			if (!IS_DNODE(page) || !is_cold_node(page))
1773 				continue;
1774 			if (ino_of_node(page) != ino)
1775 				continue;
1776 
1777 			lock_page(page);
1778 
1779 			if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1780 continue_unlock:
1781 				unlock_page(page);
1782 				continue;
1783 			}
1784 			if (ino_of_node(page) != ino)
1785 				goto continue_unlock;
1786 
1787 			if (!PageDirty(page) && page != last_page) {
1788 				/* someone wrote it for us */
1789 				goto continue_unlock;
1790 			}
1791 
1792 			f2fs_wait_on_page_writeback(page, NODE, true, true);
1793 
1794 			set_fsync_mark(page, 0);
1795 			set_dentry_mark(page, 0);
1796 
1797 			if (!atomic || page == last_page) {
1798 				set_fsync_mark(page, 1);
1799 				percpu_counter_inc(&sbi->rf_node_block_count);
1800 				if (IS_INODE(page)) {
1801 					if (is_inode_flag_set(inode,
1802 								FI_DIRTY_INODE))
1803 						f2fs_update_inode(inode, page);
1804 					set_dentry_mark(page,
1805 						f2fs_need_dentry_mark(sbi, ino));
1806 				}
1807 				/* may be written by other thread */
1808 				if (!PageDirty(page))
1809 					set_page_dirty(page);
1810 			}
1811 
1812 			if (!clear_page_dirty_for_io(page))
1813 				goto continue_unlock;
1814 
1815 			ret = __write_node_page(page, atomic &&
1816 						page == last_page,
1817 						&submitted, wbc, true,
1818 						FS_NODE_IO, seq_id);
1819 			if (ret) {
1820 				unlock_page(page);
1821 				f2fs_put_page(last_page, 0);
1822 				break;
1823 			} else if (submitted) {
1824 				nwritten++;
1825 			}
1826 
1827 			if (page == last_page) {
1828 				f2fs_put_page(page, 0);
1829 				marked = true;
1830 				break;
1831 			}
1832 		}
1833 		folio_batch_release(&fbatch);
1834 		cond_resched();
1835 
1836 		if (ret || marked)
1837 			break;
1838 	}
1839 	if (!ret && atomic && !marked) {
1840 		f2fs_debug(sbi, "Retry to write fsync mark: ino=%u, idx=%lx",
1841 			   ino, last_page->index);
1842 		lock_page(last_page);
1843 		f2fs_wait_on_page_writeback(last_page, NODE, true, true);
1844 		set_page_dirty(last_page);
1845 		unlock_page(last_page);
1846 		goto retry;
1847 	}
1848 out:
1849 	if (nwritten)
1850 		f2fs_submit_merged_write_cond(sbi, NULL, NULL, ino, NODE);
1851 	return ret ? -EIO : 0;
1852 }
1853 
1854 static int f2fs_match_ino(struct inode *inode, unsigned long ino, void *data)
1855 {
1856 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1857 	bool clean;
1858 
1859 	if (inode->i_ino != ino)
1860 		return 0;
1861 
1862 	if (!is_inode_flag_set(inode, FI_DIRTY_INODE))
1863 		return 0;
1864 
1865 	spin_lock(&sbi->inode_lock[DIRTY_META]);
1866 	clean = list_empty(&F2FS_I(inode)->gdirty_list);
1867 	spin_unlock(&sbi->inode_lock[DIRTY_META]);
1868 
1869 	if (clean)
1870 		return 0;
1871 
1872 	inode = igrab(inode);
1873 	if (!inode)
1874 		return 0;
1875 	return 1;
1876 }
1877 
1878 static bool flush_dirty_inode(struct page *page)
1879 {
1880 	struct f2fs_sb_info *sbi = F2FS_P_SB(page);
1881 	struct inode *inode;
1882 	nid_t ino = ino_of_node(page);
1883 
1884 	inode = find_inode_nowait(sbi->sb, ino, f2fs_match_ino, NULL);
1885 	if (!inode)
1886 		return false;
1887 
1888 	f2fs_update_inode(inode, page);
1889 	unlock_page(page);
1890 
1891 	iput(inode);
1892 	return true;
1893 }
1894 
1895 void f2fs_flush_inline_data(struct f2fs_sb_info *sbi)
1896 {
1897 	pgoff_t index = 0;
1898 	struct folio_batch fbatch;
1899 	int nr_folios;
1900 
1901 	folio_batch_init(&fbatch);
1902 
1903 	while ((nr_folios = filemap_get_folios_tag(NODE_MAPPING(sbi), &index,
1904 					(pgoff_t)-1, PAGECACHE_TAG_DIRTY,
1905 					&fbatch))) {
1906 		int i;
1907 
1908 		for (i = 0; i < nr_folios; i++) {
1909 			struct page *page = &fbatch.folios[i]->page;
1910 
1911 			if (!IS_DNODE(page))
1912 				continue;
1913 
1914 			lock_page(page);
1915 
1916 			if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1917 continue_unlock:
1918 				unlock_page(page);
1919 				continue;
1920 			}
1921 
1922 			if (!PageDirty(page)) {
1923 				/* someone wrote it for us */
1924 				goto continue_unlock;
1925 			}
1926 
1927 			/* flush inline_data, if it's async context. */
1928 			if (page_private_inline(page)) {
1929 				clear_page_private_inline(page);
1930 				unlock_page(page);
1931 				flush_inline_data(sbi, ino_of_node(page));
1932 				continue;
1933 			}
1934 			unlock_page(page);
1935 		}
1936 		folio_batch_release(&fbatch);
1937 		cond_resched();
1938 	}
1939 }
1940 
1941 int f2fs_sync_node_pages(struct f2fs_sb_info *sbi,
1942 				struct writeback_control *wbc,
1943 				bool do_balance, enum iostat_type io_type)
1944 {
1945 	pgoff_t index;
1946 	struct folio_batch fbatch;
1947 	int step = 0;
1948 	int nwritten = 0;
1949 	int ret = 0;
1950 	int nr_folios, done = 0;
1951 
1952 	folio_batch_init(&fbatch);
1953 
1954 next_step:
1955 	index = 0;
1956 
1957 	while (!done && (nr_folios = filemap_get_folios_tag(NODE_MAPPING(sbi),
1958 				&index, (pgoff_t)-1, PAGECACHE_TAG_DIRTY,
1959 				&fbatch))) {
1960 		int i;
1961 
1962 		for (i = 0; i < nr_folios; i++) {
1963 			struct page *page = &fbatch.folios[i]->page;
1964 			bool submitted = false;
1965 
1966 			/* give a priority to WB_SYNC threads */
1967 			if (atomic_read(&sbi->wb_sync_req[NODE]) &&
1968 					wbc->sync_mode == WB_SYNC_NONE) {
1969 				done = 1;
1970 				break;
1971 			}
1972 
1973 			/*
1974 			 * flushing sequence with step:
1975 			 * 0. indirect nodes
1976 			 * 1. dentry dnodes
1977 			 * 2. file dnodes
1978 			 */
1979 			if (step == 0 && IS_DNODE(page))
1980 				continue;
1981 			if (step == 1 && (!IS_DNODE(page) ||
1982 						is_cold_node(page)))
1983 				continue;
1984 			if (step == 2 && (!IS_DNODE(page) ||
1985 						!is_cold_node(page)))
1986 				continue;
1987 lock_node:
1988 			if (wbc->sync_mode == WB_SYNC_ALL)
1989 				lock_page(page);
1990 			else if (!trylock_page(page))
1991 				continue;
1992 
1993 			if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1994 continue_unlock:
1995 				unlock_page(page);
1996 				continue;
1997 			}
1998 
1999 			if (!PageDirty(page)) {
2000 				/* someone wrote it for us */
2001 				goto continue_unlock;
2002 			}
2003 
2004 			/* flush inline_data/inode, if it's async context. */
2005 			if (!do_balance)
2006 				goto write_node;
2007 
2008 			/* flush inline_data */
2009 			if (page_private_inline(page)) {
2010 				clear_page_private_inline(page);
2011 				unlock_page(page);
2012 				flush_inline_data(sbi, ino_of_node(page));
2013 				goto lock_node;
2014 			}
2015 
2016 			/* flush dirty inode */
2017 			if (IS_INODE(page) && flush_dirty_inode(page))
2018 				goto lock_node;
2019 write_node:
2020 			f2fs_wait_on_page_writeback(page, NODE, true, true);
2021 
2022 			if (!clear_page_dirty_for_io(page))
2023 				goto continue_unlock;
2024 
2025 			set_fsync_mark(page, 0);
2026 			set_dentry_mark(page, 0);
2027 
2028 			ret = __write_node_page(page, false, &submitted,
2029 						wbc, do_balance, io_type, NULL);
2030 			if (ret)
2031 				unlock_page(page);
2032 			else if (submitted)
2033 				nwritten++;
2034 
2035 			if (--wbc->nr_to_write == 0)
2036 				break;
2037 		}
2038 		folio_batch_release(&fbatch);
2039 		cond_resched();
2040 
2041 		if (wbc->nr_to_write == 0) {
2042 			step = 2;
2043 			break;
2044 		}
2045 	}
2046 
2047 	if (step < 2) {
2048 		if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
2049 				wbc->sync_mode == WB_SYNC_NONE && step == 1)
2050 			goto out;
2051 		step++;
2052 		goto next_step;
2053 	}
2054 out:
2055 	if (nwritten)
2056 		f2fs_submit_merged_write(sbi, NODE);
2057 
2058 	if (unlikely(f2fs_cp_error(sbi)))
2059 		return -EIO;
2060 	return ret;
2061 }
2062 
2063 int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi,
2064 						unsigned int seq_id)
2065 {
2066 	struct fsync_node_entry *fn;
2067 	struct page *page;
2068 	struct list_head *head = &sbi->fsync_node_list;
2069 	unsigned long flags;
2070 	unsigned int cur_seq_id = 0;
2071 
2072 	while (seq_id && cur_seq_id < seq_id) {
2073 		spin_lock_irqsave(&sbi->fsync_node_lock, flags);
2074 		if (list_empty(head)) {
2075 			spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
2076 			break;
2077 		}
2078 		fn = list_first_entry(head, struct fsync_node_entry, list);
2079 		if (fn->seq_id > seq_id) {
2080 			spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
2081 			break;
2082 		}
2083 		cur_seq_id = fn->seq_id;
2084 		page = fn->page;
2085 		get_page(page);
2086 		spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
2087 
2088 		f2fs_wait_on_page_writeback(page, NODE, true, false);
2089 
2090 		put_page(page);
2091 	}
2092 
2093 	return filemap_check_errors(NODE_MAPPING(sbi));
2094 }
2095 
2096 static int f2fs_write_node_pages(struct address_space *mapping,
2097 			    struct writeback_control *wbc)
2098 {
2099 	struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
2100 	struct blk_plug plug;
2101 	long diff;
2102 
2103 	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
2104 		goto skip_write;
2105 
2106 	/* balancing f2fs's metadata in background */
2107 	f2fs_balance_fs_bg(sbi, true);
2108 
2109 	/* collect a number of dirty node pages and write together */
2110 	if (wbc->sync_mode != WB_SYNC_ALL &&
2111 			get_pages(sbi, F2FS_DIRTY_NODES) <
2112 					nr_pages_to_skip(sbi, NODE))
2113 		goto skip_write;
2114 
2115 	if (wbc->sync_mode == WB_SYNC_ALL)
2116 		atomic_inc(&sbi->wb_sync_req[NODE]);
2117 	else if (atomic_read(&sbi->wb_sync_req[NODE])) {
2118 		/* to avoid potential deadlock */
2119 		if (current->plug)
2120 			blk_finish_plug(current->plug);
2121 		goto skip_write;
2122 	}
2123 
2124 	trace_f2fs_writepages(mapping->host, wbc, NODE);
2125 
2126 	diff = nr_pages_to_write(sbi, NODE, wbc);
2127 	blk_start_plug(&plug);
2128 	f2fs_sync_node_pages(sbi, wbc, true, FS_NODE_IO);
2129 	blk_finish_plug(&plug);
2130 	wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
2131 
2132 	if (wbc->sync_mode == WB_SYNC_ALL)
2133 		atomic_dec(&sbi->wb_sync_req[NODE]);
2134 	return 0;
2135 
2136 skip_write:
2137 	wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_NODES);
2138 	trace_f2fs_writepages(mapping->host, wbc, NODE);
2139 	return 0;
2140 }
2141 
2142 static bool f2fs_dirty_node_folio(struct address_space *mapping,
2143 		struct folio *folio)
2144 {
2145 	trace_f2fs_set_page_dirty(&folio->page, NODE);
2146 
2147 	if (!folio_test_uptodate(folio))
2148 		folio_mark_uptodate(folio);
2149 #ifdef CONFIG_F2FS_CHECK_FS
2150 	if (IS_INODE(&folio->page))
2151 		f2fs_inode_chksum_set(F2FS_M_SB(mapping), &folio->page);
2152 #endif
2153 	if (filemap_dirty_folio(mapping, folio)) {
2154 		inc_page_count(F2FS_M_SB(mapping), F2FS_DIRTY_NODES);
2155 		set_page_private_reference(&folio->page);
2156 		return true;
2157 	}
2158 	return false;
2159 }
2160 
2161 /*
2162  * Structure of the f2fs node operations
2163  */
2164 const struct address_space_operations f2fs_node_aops = {
2165 	.writepage	= f2fs_write_node_page,
2166 	.writepages	= f2fs_write_node_pages,
2167 	.dirty_folio	= f2fs_dirty_node_folio,
2168 	.invalidate_folio = f2fs_invalidate_folio,
2169 	.release_folio	= f2fs_release_folio,
2170 	.migrate_folio	= filemap_migrate_folio,
2171 };
2172 
2173 static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i,
2174 						nid_t n)
2175 {
2176 	return radix_tree_lookup(&nm_i->free_nid_root, n);
2177 }
2178 
2179 static int __insert_free_nid(struct f2fs_sb_info *sbi,
2180 				struct free_nid *i)
2181 {
2182 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2183 	int err = radix_tree_insert(&nm_i->free_nid_root, i->nid, i);
2184 
2185 	if (err)
2186 		return err;
2187 
2188 	nm_i->nid_cnt[FREE_NID]++;
2189 	list_add_tail(&i->list, &nm_i->free_nid_list);
2190 	return 0;
2191 }
2192 
2193 static void __remove_free_nid(struct f2fs_sb_info *sbi,
2194 			struct free_nid *i, enum nid_state state)
2195 {
2196 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2197 
2198 	f2fs_bug_on(sbi, state != i->state);
2199 	nm_i->nid_cnt[state]--;
2200 	if (state == FREE_NID)
2201 		list_del(&i->list);
2202 	radix_tree_delete(&nm_i->free_nid_root, i->nid);
2203 }
2204 
2205 static void __move_free_nid(struct f2fs_sb_info *sbi, struct free_nid *i,
2206 			enum nid_state org_state, enum nid_state dst_state)
2207 {
2208 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2209 
2210 	f2fs_bug_on(sbi, org_state != i->state);
2211 	i->state = dst_state;
2212 	nm_i->nid_cnt[org_state]--;
2213 	nm_i->nid_cnt[dst_state]++;
2214 
2215 	switch (dst_state) {
2216 	case PREALLOC_NID:
2217 		list_del(&i->list);
2218 		break;
2219 	case FREE_NID:
2220 		list_add_tail(&i->list, &nm_i->free_nid_list);
2221 		break;
2222 	default:
2223 		BUG_ON(1);
2224 	}
2225 }
2226 
2227 bool f2fs_nat_bitmap_enabled(struct f2fs_sb_info *sbi)
2228 {
2229 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2230 	unsigned int i;
2231 	bool ret = true;
2232 
2233 	f2fs_down_read(&nm_i->nat_tree_lock);
2234 	for (i = 0; i < nm_i->nat_blocks; i++) {
2235 		if (!test_bit_le(i, nm_i->nat_block_bitmap)) {
2236 			ret = false;
2237 			break;
2238 		}
2239 	}
2240 	f2fs_up_read(&nm_i->nat_tree_lock);
2241 
2242 	return ret;
2243 }
2244 
2245 static void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid,
2246 							bool set, bool build)
2247 {
2248 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2249 	unsigned int nat_ofs = NAT_BLOCK_OFFSET(nid);
2250 	unsigned int nid_ofs = nid - START_NID(nid);
2251 
2252 	if (!test_bit_le(nat_ofs, nm_i->nat_block_bitmap))
2253 		return;
2254 
2255 	if (set) {
2256 		if (test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]))
2257 			return;
2258 		__set_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
2259 		nm_i->free_nid_count[nat_ofs]++;
2260 	} else {
2261 		if (!test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]))
2262 			return;
2263 		__clear_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
2264 		if (!build)
2265 			nm_i->free_nid_count[nat_ofs]--;
2266 	}
2267 }
2268 
2269 /* return if the nid is recognized as free */
2270 static bool add_free_nid(struct f2fs_sb_info *sbi,
2271 				nid_t nid, bool build, bool update)
2272 {
2273 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2274 	struct free_nid *i, *e;
2275 	struct nat_entry *ne;
2276 	int err = -EINVAL;
2277 	bool ret = false;
2278 
2279 	/* 0 nid should not be used */
2280 	if (unlikely(nid == 0))
2281 		return false;
2282 
2283 	if (unlikely(f2fs_check_nid_range(sbi, nid)))
2284 		return false;
2285 
2286 	i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS, true, NULL);
2287 	i->nid = nid;
2288 	i->state = FREE_NID;
2289 
2290 	radix_tree_preload(GFP_NOFS | __GFP_NOFAIL);
2291 
2292 	spin_lock(&nm_i->nid_list_lock);
2293 
2294 	if (build) {
2295 		/*
2296 		 *   Thread A             Thread B
2297 		 *  - f2fs_create
2298 		 *   - f2fs_new_inode
2299 		 *    - f2fs_alloc_nid
2300 		 *     - __insert_nid_to_list(PREALLOC_NID)
2301 		 *                     - f2fs_balance_fs_bg
2302 		 *                      - f2fs_build_free_nids
2303 		 *                       - __f2fs_build_free_nids
2304 		 *                        - scan_nat_page
2305 		 *                         - add_free_nid
2306 		 *                          - __lookup_nat_cache
2307 		 *  - f2fs_add_link
2308 		 *   - f2fs_init_inode_metadata
2309 		 *    - f2fs_new_inode_page
2310 		 *     - f2fs_new_node_page
2311 		 *      - set_node_addr
2312 		 *  - f2fs_alloc_nid_done
2313 		 *   - __remove_nid_from_list(PREALLOC_NID)
2314 		 *                         - __insert_nid_to_list(FREE_NID)
2315 		 */
2316 		ne = __lookup_nat_cache(nm_i, nid);
2317 		if (ne && (!get_nat_flag(ne, IS_CHECKPOINTED) ||
2318 				nat_get_blkaddr(ne) != NULL_ADDR))
2319 			goto err_out;
2320 
2321 		e = __lookup_free_nid_list(nm_i, nid);
2322 		if (e) {
2323 			if (e->state == FREE_NID)
2324 				ret = true;
2325 			goto err_out;
2326 		}
2327 	}
2328 	ret = true;
2329 	err = __insert_free_nid(sbi, i);
2330 err_out:
2331 	if (update) {
2332 		update_free_nid_bitmap(sbi, nid, ret, build);
2333 		if (!build)
2334 			nm_i->available_nids++;
2335 	}
2336 	spin_unlock(&nm_i->nid_list_lock);
2337 	radix_tree_preload_end();
2338 
2339 	if (err)
2340 		kmem_cache_free(free_nid_slab, i);
2341 	return ret;
2342 }
2343 
2344 static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid)
2345 {
2346 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2347 	struct free_nid *i;
2348 	bool need_free = false;
2349 
2350 	spin_lock(&nm_i->nid_list_lock);
2351 	i = __lookup_free_nid_list(nm_i, nid);
2352 	if (i && i->state == FREE_NID) {
2353 		__remove_free_nid(sbi, i, FREE_NID);
2354 		need_free = true;
2355 	}
2356 	spin_unlock(&nm_i->nid_list_lock);
2357 
2358 	if (need_free)
2359 		kmem_cache_free(free_nid_slab, i);
2360 }
2361 
2362 static int scan_nat_page(struct f2fs_sb_info *sbi,
2363 			struct page *nat_page, nid_t start_nid)
2364 {
2365 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2366 	struct f2fs_nat_block *nat_blk = page_address(nat_page);
2367 	block_t blk_addr;
2368 	unsigned int nat_ofs = NAT_BLOCK_OFFSET(start_nid);
2369 	int i;
2370 
2371 	__set_bit_le(nat_ofs, nm_i->nat_block_bitmap);
2372 
2373 	i = start_nid % NAT_ENTRY_PER_BLOCK;
2374 
2375 	for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) {
2376 		if (unlikely(start_nid >= nm_i->max_nid))
2377 			break;
2378 
2379 		blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
2380 
2381 		if (blk_addr == NEW_ADDR)
2382 			return -EINVAL;
2383 
2384 		if (blk_addr == NULL_ADDR) {
2385 			add_free_nid(sbi, start_nid, true, true);
2386 		} else {
2387 			spin_lock(&NM_I(sbi)->nid_list_lock);
2388 			update_free_nid_bitmap(sbi, start_nid, false, true);
2389 			spin_unlock(&NM_I(sbi)->nid_list_lock);
2390 		}
2391 	}
2392 
2393 	return 0;
2394 }
2395 
2396 static void scan_curseg_cache(struct f2fs_sb_info *sbi)
2397 {
2398 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2399 	struct f2fs_journal *journal = curseg->journal;
2400 	int i;
2401 
2402 	down_read(&curseg->journal_rwsem);
2403 	for (i = 0; i < nats_in_cursum(journal); i++) {
2404 		block_t addr;
2405 		nid_t nid;
2406 
2407 		addr = le32_to_cpu(nat_in_journal(journal, i).block_addr);
2408 		nid = le32_to_cpu(nid_in_journal(journal, i));
2409 		if (addr == NULL_ADDR)
2410 			add_free_nid(sbi, nid, true, false);
2411 		else
2412 			remove_free_nid(sbi, nid);
2413 	}
2414 	up_read(&curseg->journal_rwsem);
2415 }
2416 
2417 static void scan_free_nid_bits(struct f2fs_sb_info *sbi)
2418 {
2419 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2420 	unsigned int i, idx;
2421 	nid_t nid;
2422 
2423 	f2fs_down_read(&nm_i->nat_tree_lock);
2424 
2425 	for (i = 0; i < nm_i->nat_blocks; i++) {
2426 		if (!test_bit_le(i, nm_i->nat_block_bitmap))
2427 			continue;
2428 		if (!nm_i->free_nid_count[i])
2429 			continue;
2430 		for (idx = 0; idx < NAT_ENTRY_PER_BLOCK; idx++) {
2431 			idx = find_next_bit_le(nm_i->free_nid_bitmap[i],
2432 						NAT_ENTRY_PER_BLOCK, idx);
2433 			if (idx >= NAT_ENTRY_PER_BLOCK)
2434 				break;
2435 
2436 			nid = i * NAT_ENTRY_PER_BLOCK + idx;
2437 			add_free_nid(sbi, nid, true, false);
2438 
2439 			if (nm_i->nid_cnt[FREE_NID] >= MAX_FREE_NIDS)
2440 				goto out;
2441 		}
2442 	}
2443 out:
2444 	scan_curseg_cache(sbi);
2445 
2446 	f2fs_up_read(&nm_i->nat_tree_lock);
2447 }
2448 
2449 static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
2450 						bool sync, bool mount)
2451 {
2452 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2453 	int i = 0, ret;
2454 	nid_t nid = nm_i->next_scan_nid;
2455 
2456 	if (unlikely(nid >= nm_i->max_nid))
2457 		nid = 0;
2458 
2459 	if (unlikely(nid % NAT_ENTRY_PER_BLOCK))
2460 		nid = NAT_BLOCK_OFFSET(nid) * NAT_ENTRY_PER_BLOCK;
2461 
2462 	/* Enough entries */
2463 	if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
2464 		return 0;
2465 
2466 	if (!sync && !f2fs_available_free_memory(sbi, FREE_NIDS))
2467 		return 0;
2468 
2469 	if (!mount) {
2470 		/* try to find free nids in free_nid_bitmap */
2471 		scan_free_nid_bits(sbi);
2472 
2473 		if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
2474 			return 0;
2475 	}
2476 
2477 	/* readahead nat pages to be scanned */
2478 	f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES,
2479 							META_NAT, true);
2480 
2481 	f2fs_down_read(&nm_i->nat_tree_lock);
2482 
2483 	while (1) {
2484 		if (!test_bit_le(NAT_BLOCK_OFFSET(nid),
2485 						nm_i->nat_block_bitmap)) {
2486 			struct page *page = get_current_nat_page(sbi, nid);
2487 
2488 			if (IS_ERR(page)) {
2489 				ret = PTR_ERR(page);
2490 			} else {
2491 				ret = scan_nat_page(sbi, page, nid);
2492 				f2fs_put_page(page, 1);
2493 			}
2494 
2495 			if (ret) {
2496 				f2fs_up_read(&nm_i->nat_tree_lock);
2497 				f2fs_err(sbi, "NAT is corrupt, run fsck to fix it");
2498 				return ret;
2499 			}
2500 		}
2501 
2502 		nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK));
2503 		if (unlikely(nid >= nm_i->max_nid))
2504 			nid = 0;
2505 
2506 		if (++i >= FREE_NID_PAGES)
2507 			break;
2508 	}
2509 
2510 	/* go to the next free nat pages to find free nids abundantly */
2511 	nm_i->next_scan_nid = nid;
2512 
2513 	/* find free nids from current sum_pages */
2514 	scan_curseg_cache(sbi);
2515 
2516 	f2fs_up_read(&nm_i->nat_tree_lock);
2517 
2518 	f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid),
2519 					nm_i->ra_nid_pages, META_NAT, false);
2520 
2521 	return 0;
2522 }
2523 
2524 int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
2525 {
2526 	int ret;
2527 
2528 	mutex_lock(&NM_I(sbi)->build_lock);
2529 	ret = __f2fs_build_free_nids(sbi, sync, mount);
2530 	mutex_unlock(&NM_I(sbi)->build_lock);
2531 
2532 	return ret;
2533 }
2534 
2535 /*
2536  * If this function returns success, caller can obtain a new nid
2537  * from second parameter of this function.
2538  * The returned nid could be used ino as well as nid when inode is created.
2539  */
2540 bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
2541 {
2542 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2543 	struct free_nid *i = NULL;
2544 retry:
2545 	if (time_to_inject(sbi, FAULT_ALLOC_NID))
2546 		return false;
2547 
2548 	spin_lock(&nm_i->nid_list_lock);
2549 
2550 	if (unlikely(nm_i->available_nids == 0)) {
2551 		spin_unlock(&nm_i->nid_list_lock);
2552 		return false;
2553 	}
2554 
2555 	/* We should not use stale free nids created by f2fs_build_free_nids */
2556 	if (nm_i->nid_cnt[FREE_NID] && !on_f2fs_build_free_nids(nm_i)) {
2557 		f2fs_bug_on(sbi, list_empty(&nm_i->free_nid_list));
2558 		i = list_first_entry(&nm_i->free_nid_list,
2559 					struct free_nid, list);
2560 		*nid = i->nid;
2561 
2562 		__move_free_nid(sbi, i, FREE_NID, PREALLOC_NID);
2563 		nm_i->available_nids--;
2564 
2565 		update_free_nid_bitmap(sbi, *nid, false, false);
2566 
2567 		spin_unlock(&nm_i->nid_list_lock);
2568 		return true;
2569 	}
2570 	spin_unlock(&nm_i->nid_list_lock);
2571 
2572 	/* Let's scan nat pages and its caches to get free nids */
2573 	if (!f2fs_build_free_nids(sbi, true, false))
2574 		goto retry;
2575 	return false;
2576 }
2577 
2578 /*
2579  * f2fs_alloc_nid() should be called prior to this function.
2580  */
2581 void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid)
2582 {
2583 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2584 	struct free_nid *i;
2585 
2586 	spin_lock(&nm_i->nid_list_lock);
2587 	i = __lookup_free_nid_list(nm_i, nid);
2588 	f2fs_bug_on(sbi, !i);
2589 	__remove_free_nid(sbi, i, PREALLOC_NID);
2590 	spin_unlock(&nm_i->nid_list_lock);
2591 
2592 	kmem_cache_free(free_nid_slab, i);
2593 }
2594 
2595 /*
2596  * f2fs_alloc_nid() should be called prior to this function.
2597  */
2598 void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
2599 {
2600 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2601 	struct free_nid *i;
2602 	bool need_free = false;
2603 
2604 	if (!nid)
2605 		return;
2606 
2607 	spin_lock(&nm_i->nid_list_lock);
2608 	i = __lookup_free_nid_list(nm_i, nid);
2609 	f2fs_bug_on(sbi, !i);
2610 
2611 	if (!f2fs_available_free_memory(sbi, FREE_NIDS)) {
2612 		__remove_free_nid(sbi, i, PREALLOC_NID);
2613 		need_free = true;
2614 	} else {
2615 		__move_free_nid(sbi, i, PREALLOC_NID, FREE_NID);
2616 	}
2617 
2618 	nm_i->available_nids++;
2619 
2620 	update_free_nid_bitmap(sbi, nid, true, false);
2621 
2622 	spin_unlock(&nm_i->nid_list_lock);
2623 
2624 	if (need_free)
2625 		kmem_cache_free(free_nid_slab, i);
2626 }
2627 
2628 int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink)
2629 {
2630 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2631 	int nr = nr_shrink;
2632 
2633 	if (nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS)
2634 		return 0;
2635 
2636 	if (!mutex_trylock(&nm_i->build_lock))
2637 		return 0;
2638 
2639 	while (nr_shrink && nm_i->nid_cnt[FREE_NID] > MAX_FREE_NIDS) {
2640 		struct free_nid *i, *next;
2641 		unsigned int batch = SHRINK_NID_BATCH_SIZE;
2642 
2643 		spin_lock(&nm_i->nid_list_lock);
2644 		list_for_each_entry_safe(i, next, &nm_i->free_nid_list, list) {
2645 			if (!nr_shrink || !batch ||
2646 				nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS)
2647 				break;
2648 			__remove_free_nid(sbi, i, FREE_NID);
2649 			kmem_cache_free(free_nid_slab, i);
2650 			nr_shrink--;
2651 			batch--;
2652 		}
2653 		spin_unlock(&nm_i->nid_list_lock);
2654 	}
2655 
2656 	mutex_unlock(&nm_i->build_lock);
2657 
2658 	return nr - nr_shrink;
2659 }
2660 
2661 int f2fs_recover_inline_xattr(struct inode *inode, struct page *page)
2662 {
2663 	void *src_addr, *dst_addr;
2664 	size_t inline_size;
2665 	struct page *ipage;
2666 	struct f2fs_inode *ri;
2667 
2668 	ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
2669 	if (IS_ERR(ipage))
2670 		return PTR_ERR(ipage);
2671 
2672 	ri = F2FS_INODE(page);
2673 	if (ri->i_inline & F2FS_INLINE_XATTR) {
2674 		if (!f2fs_has_inline_xattr(inode)) {
2675 			set_inode_flag(inode, FI_INLINE_XATTR);
2676 			stat_inc_inline_xattr(inode);
2677 		}
2678 	} else {
2679 		if (f2fs_has_inline_xattr(inode)) {
2680 			stat_dec_inline_xattr(inode);
2681 			clear_inode_flag(inode, FI_INLINE_XATTR);
2682 		}
2683 		goto update_inode;
2684 	}
2685 
2686 	dst_addr = inline_xattr_addr(inode, ipage);
2687 	src_addr = inline_xattr_addr(inode, page);
2688 	inline_size = inline_xattr_size(inode);
2689 
2690 	f2fs_wait_on_page_writeback(ipage, NODE, true, true);
2691 	memcpy(dst_addr, src_addr, inline_size);
2692 update_inode:
2693 	f2fs_update_inode(inode, ipage);
2694 	f2fs_put_page(ipage, 1);
2695 	return 0;
2696 }
2697 
2698 int f2fs_recover_xattr_data(struct inode *inode, struct page *page)
2699 {
2700 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2701 	nid_t prev_xnid = F2FS_I(inode)->i_xattr_nid;
2702 	nid_t new_xnid;
2703 	struct dnode_of_data dn;
2704 	struct node_info ni;
2705 	struct page *xpage;
2706 	int err;
2707 
2708 	if (!prev_xnid)
2709 		goto recover_xnid;
2710 
2711 	/* 1: invalidate the previous xattr nid */
2712 	err = f2fs_get_node_info(sbi, prev_xnid, &ni, false);
2713 	if (err)
2714 		return err;
2715 
2716 	f2fs_invalidate_blocks(sbi, ni.blk_addr);
2717 	dec_valid_node_count(sbi, inode, false);
2718 	set_node_addr(sbi, &ni, NULL_ADDR, false);
2719 
2720 recover_xnid:
2721 	/* 2: update xattr nid in inode */
2722 	if (!f2fs_alloc_nid(sbi, &new_xnid))
2723 		return -ENOSPC;
2724 
2725 	set_new_dnode(&dn, inode, NULL, NULL, new_xnid);
2726 	xpage = f2fs_new_node_page(&dn, XATTR_NODE_OFFSET);
2727 	if (IS_ERR(xpage)) {
2728 		f2fs_alloc_nid_failed(sbi, new_xnid);
2729 		return PTR_ERR(xpage);
2730 	}
2731 
2732 	f2fs_alloc_nid_done(sbi, new_xnid);
2733 	f2fs_update_inode_page(inode);
2734 
2735 	/* 3: update and set xattr node page dirty */
2736 	memcpy(F2FS_NODE(xpage), F2FS_NODE(page), VALID_XATTR_BLOCK_SIZE);
2737 
2738 	set_page_dirty(xpage);
2739 	f2fs_put_page(xpage, 1);
2740 
2741 	return 0;
2742 }
2743 
2744 int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
2745 {
2746 	struct f2fs_inode *src, *dst;
2747 	nid_t ino = ino_of_node(page);
2748 	struct node_info old_ni, new_ni;
2749 	struct page *ipage;
2750 	int err;
2751 
2752 	err = f2fs_get_node_info(sbi, ino, &old_ni, false);
2753 	if (err)
2754 		return err;
2755 
2756 	if (unlikely(old_ni.blk_addr != NULL_ADDR))
2757 		return -EINVAL;
2758 retry:
2759 	ipage = f2fs_grab_cache_page(NODE_MAPPING(sbi), ino, false);
2760 	if (!ipage) {
2761 		memalloc_retry_wait(GFP_NOFS);
2762 		goto retry;
2763 	}
2764 
2765 	/* Should not use this inode from free nid list */
2766 	remove_free_nid(sbi, ino);
2767 
2768 	if (!PageUptodate(ipage))
2769 		SetPageUptodate(ipage);
2770 	fill_node_footer(ipage, ino, ino, 0, true);
2771 	set_cold_node(ipage, false);
2772 
2773 	src = F2FS_INODE(page);
2774 	dst = F2FS_INODE(ipage);
2775 
2776 	memcpy(dst, src, offsetof(struct f2fs_inode, i_ext));
2777 	dst->i_size = 0;
2778 	dst->i_blocks = cpu_to_le64(1);
2779 	dst->i_links = cpu_to_le32(1);
2780 	dst->i_xattr_nid = 0;
2781 	dst->i_inline = src->i_inline & (F2FS_INLINE_XATTR | F2FS_EXTRA_ATTR);
2782 	if (dst->i_inline & F2FS_EXTRA_ATTR) {
2783 		dst->i_extra_isize = src->i_extra_isize;
2784 
2785 		if (f2fs_sb_has_flexible_inline_xattr(sbi) &&
2786 			F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
2787 							i_inline_xattr_size))
2788 			dst->i_inline_xattr_size = src->i_inline_xattr_size;
2789 
2790 		if (f2fs_sb_has_project_quota(sbi) &&
2791 			F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
2792 								i_projid))
2793 			dst->i_projid = src->i_projid;
2794 
2795 		if (f2fs_sb_has_inode_crtime(sbi) &&
2796 			F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
2797 							i_crtime_nsec)) {
2798 			dst->i_crtime = src->i_crtime;
2799 			dst->i_crtime_nsec = src->i_crtime_nsec;
2800 		}
2801 	}
2802 
2803 	new_ni = old_ni;
2804 	new_ni.ino = ino;
2805 
2806 	if (unlikely(inc_valid_node_count(sbi, NULL, true)))
2807 		WARN_ON(1);
2808 	set_node_addr(sbi, &new_ni, NEW_ADDR, false);
2809 	inc_valid_inode_count(sbi);
2810 	set_page_dirty(ipage);
2811 	f2fs_put_page(ipage, 1);
2812 	return 0;
2813 }
2814 
2815 int f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
2816 			unsigned int segno, struct f2fs_summary_block *sum)
2817 {
2818 	struct f2fs_node *rn;
2819 	struct f2fs_summary *sum_entry;
2820 	block_t addr;
2821 	int i, idx, last_offset, nrpages;
2822 
2823 	/* scan the node segment */
2824 	last_offset = sbi->blocks_per_seg;
2825 	addr = START_BLOCK(sbi, segno);
2826 	sum_entry = &sum->entries[0];
2827 
2828 	for (i = 0; i < last_offset; i += nrpages, addr += nrpages) {
2829 		nrpages = bio_max_segs(last_offset - i);
2830 
2831 		/* readahead node pages */
2832 		f2fs_ra_meta_pages(sbi, addr, nrpages, META_POR, true);
2833 
2834 		for (idx = addr; idx < addr + nrpages; idx++) {
2835 			struct page *page = f2fs_get_tmp_page(sbi, idx);
2836 
2837 			if (IS_ERR(page))
2838 				return PTR_ERR(page);
2839 
2840 			rn = F2FS_NODE(page);
2841 			sum_entry->nid = rn->footer.nid;
2842 			sum_entry->version = 0;
2843 			sum_entry->ofs_in_node = 0;
2844 			sum_entry++;
2845 			f2fs_put_page(page, 1);
2846 		}
2847 
2848 		invalidate_mapping_pages(META_MAPPING(sbi), addr,
2849 							addr + nrpages);
2850 	}
2851 	return 0;
2852 }
2853 
2854 static void remove_nats_in_journal(struct f2fs_sb_info *sbi)
2855 {
2856 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2857 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2858 	struct f2fs_journal *journal = curseg->journal;
2859 	int i;
2860 
2861 	down_write(&curseg->journal_rwsem);
2862 	for (i = 0; i < nats_in_cursum(journal); i++) {
2863 		struct nat_entry *ne;
2864 		struct f2fs_nat_entry raw_ne;
2865 		nid_t nid = le32_to_cpu(nid_in_journal(journal, i));
2866 
2867 		if (f2fs_check_nid_range(sbi, nid))
2868 			continue;
2869 
2870 		raw_ne = nat_in_journal(journal, i);
2871 
2872 		ne = __lookup_nat_cache(nm_i, nid);
2873 		if (!ne) {
2874 			ne = __alloc_nat_entry(sbi, nid, true);
2875 			__init_nat_entry(nm_i, ne, &raw_ne, true);
2876 		}
2877 
2878 		/*
2879 		 * if a free nat in journal has not been used after last
2880 		 * checkpoint, we should remove it from available nids,
2881 		 * since later we will add it again.
2882 		 */
2883 		if (!get_nat_flag(ne, IS_DIRTY) &&
2884 				le32_to_cpu(raw_ne.block_addr) == NULL_ADDR) {
2885 			spin_lock(&nm_i->nid_list_lock);
2886 			nm_i->available_nids--;
2887 			spin_unlock(&nm_i->nid_list_lock);
2888 		}
2889 
2890 		__set_nat_cache_dirty(nm_i, ne);
2891 	}
2892 	update_nats_in_cursum(journal, -i);
2893 	up_write(&curseg->journal_rwsem);
2894 }
2895 
2896 static void __adjust_nat_entry_set(struct nat_entry_set *nes,
2897 						struct list_head *head, int max)
2898 {
2899 	struct nat_entry_set *cur;
2900 
2901 	if (nes->entry_cnt >= max)
2902 		goto add_out;
2903 
2904 	list_for_each_entry(cur, head, set_list) {
2905 		if (cur->entry_cnt >= nes->entry_cnt) {
2906 			list_add(&nes->set_list, cur->set_list.prev);
2907 			return;
2908 		}
2909 	}
2910 add_out:
2911 	list_add_tail(&nes->set_list, head);
2912 }
2913 
2914 static void __update_nat_bits(struct f2fs_nm_info *nm_i, unsigned int nat_ofs,
2915 							unsigned int valid)
2916 {
2917 	if (valid == 0) {
2918 		__set_bit_le(nat_ofs, nm_i->empty_nat_bits);
2919 		__clear_bit_le(nat_ofs, nm_i->full_nat_bits);
2920 		return;
2921 	}
2922 
2923 	__clear_bit_le(nat_ofs, nm_i->empty_nat_bits);
2924 	if (valid == NAT_ENTRY_PER_BLOCK)
2925 		__set_bit_le(nat_ofs, nm_i->full_nat_bits);
2926 	else
2927 		__clear_bit_le(nat_ofs, nm_i->full_nat_bits);
2928 }
2929 
2930 static void update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid,
2931 						struct page *page)
2932 {
2933 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2934 	unsigned int nat_index = start_nid / NAT_ENTRY_PER_BLOCK;
2935 	struct f2fs_nat_block *nat_blk = page_address(page);
2936 	int valid = 0;
2937 	int i = 0;
2938 
2939 	if (!is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG))
2940 		return;
2941 
2942 	if (nat_index == 0) {
2943 		valid = 1;
2944 		i = 1;
2945 	}
2946 	for (; i < NAT_ENTRY_PER_BLOCK; i++) {
2947 		if (le32_to_cpu(nat_blk->entries[i].block_addr) != NULL_ADDR)
2948 			valid++;
2949 	}
2950 
2951 	__update_nat_bits(nm_i, nat_index, valid);
2952 }
2953 
2954 void f2fs_enable_nat_bits(struct f2fs_sb_info *sbi)
2955 {
2956 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2957 	unsigned int nat_ofs;
2958 
2959 	f2fs_down_read(&nm_i->nat_tree_lock);
2960 
2961 	for (nat_ofs = 0; nat_ofs < nm_i->nat_blocks; nat_ofs++) {
2962 		unsigned int valid = 0, nid_ofs = 0;
2963 
2964 		/* handle nid zero due to it should never be used */
2965 		if (unlikely(nat_ofs == 0)) {
2966 			valid = 1;
2967 			nid_ofs = 1;
2968 		}
2969 
2970 		for (; nid_ofs < NAT_ENTRY_PER_BLOCK; nid_ofs++) {
2971 			if (!test_bit_le(nid_ofs,
2972 					nm_i->free_nid_bitmap[nat_ofs]))
2973 				valid++;
2974 		}
2975 
2976 		__update_nat_bits(nm_i, nat_ofs, valid);
2977 	}
2978 
2979 	f2fs_up_read(&nm_i->nat_tree_lock);
2980 }
2981 
2982 static int __flush_nat_entry_set(struct f2fs_sb_info *sbi,
2983 		struct nat_entry_set *set, struct cp_control *cpc)
2984 {
2985 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2986 	struct f2fs_journal *journal = curseg->journal;
2987 	nid_t start_nid = set->set * NAT_ENTRY_PER_BLOCK;
2988 	bool to_journal = true;
2989 	struct f2fs_nat_block *nat_blk;
2990 	struct nat_entry *ne, *cur;
2991 	struct page *page = NULL;
2992 
2993 	/*
2994 	 * there are two steps to flush nat entries:
2995 	 * #1, flush nat entries to journal in current hot data summary block.
2996 	 * #2, flush nat entries to nat page.
2997 	 */
2998 	if ((cpc->reason & CP_UMOUNT) ||
2999 		!__has_cursum_space(journal, set->entry_cnt, NAT_JOURNAL))
3000 		to_journal = false;
3001 
3002 	if (to_journal) {
3003 		down_write(&curseg->journal_rwsem);
3004 	} else {
3005 		page = get_next_nat_page(sbi, start_nid);
3006 		if (IS_ERR(page))
3007 			return PTR_ERR(page);
3008 
3009 		nat_blk = page_address(page);
3010 		f2fs_bug_on(sbi, !nat_blk);
3011 	}
3012 
3013 	/* flush dirty nats in nat entry set */
3014 	list_for_each_entry_safe(ne, cur, &set->entry_list, list) {
3015 		struct f2fs_nat_entry *raw_ne;
3016 		nid_t nid = nat_get_nid(ne);
3017 		int offset;
3018 
3019 		f2fs_bug_on(sbi, nat_get_blkaddr(ne) == NEW_ADDR);
3020 
3021 		if (to_journal) {
3022 			offset = f2fs_lookup_journal_in_cursum(journal,
3023 							NAT_JOURNAL, nid, 1);
3024 			f2fs_bug_on(sbi, offset < 0);
3025 			raw_ne = &nat_in_journal(journal, offset);
3026 			nid_in_journal(journal, offset) = cpu_to_le32(nid);
3027 		} else {
3028 			raw_ne = &nat_blk->entries[nid - start_nid];
3029 		}
3030 		raw_nat_from_node_info(raw_ne, &ne->ni);
3031 		nat_reset_flag(ne);
3032 		__clear_nat_cache_dirty(NM_I(sbi), set, ne);
3033 		if (nat_get_blkaddr(ne) == NULL_ADDR) {
3034 			add_free_nid(sbi, nid, false, true);
3035 		} else {
3036 			spin_lock(&NM_I(sbi)->nid_list_lock);
3037 			update_free_nid_bitmap(sbi, nid, false, false);
3038 			spin_unlock(&NM_I(sbi)->nid_list_lock);
3039 		}
3040 	}
3041 
3042 	if (to_journal) {
3043 		up_write(&curseg->journal_rwsem);
3044 	} else {
3045 		update_nat_bits(sbi, start_nid, page);
3046 		f2fs_put_page(page, 1);
3047 	}
3048 
3049 	/* Allow dirty nats by node block allocation in write_begin */
3050 	if (!set->entry_cnt) {
3051 		radix_tree_delete(&NM_I(sbi)->nat_set_root, set->set);
3052 		kmem_cache_free(nat_entry_set_slab, set);
3053 	}
3054 	return 0;
3055 }
3056 
3057 /*
3058  * This function is called during the checkpointing process.
3059  */
3060 int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
3061 {
3062 	struct f2fs_nm_info *nm_i = NM_I(sbi);
3063 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
3064 	struct f2fs_journal *journal = curseg->journal;
3065 	struct nat_entry_set *setvec[NAT_VEC_SIZE];
3066 	struct nat_entry_set *set, *tmp;
3067 	unsigned int found;
3068 	nid_t set_idx = 0;
3069 	LIST_HEAD(sets);
3070 	int err = 0;
3071 
3072 	/*
3073 	 * during unmount, let's flush nat_bits before checking
3074 	 * nat_cnt[DIRTY_NAT].
3075 	 */
3076 	if (cpc->reason & CP_UMOUNT) {
3077 		f2fs_down_write(&nm_i->nat_tree_lock);
3078 		remove_nats_in_journal(sbi);
3079 		f2fs_up_write(&nm_i->nat_tree_lock);
3080 	}
3081 
3082 	if (!nm_i->nat_cnt[DIRTY_NAT])
3083 		return 0;
3084 
3085 	f2fs_down_write(&nm_i->nat_tree_lock);
3086 
3087 	/*
3088 	 * if there are no enough space in journal to store dirty nat
3089 	 * entries, remove all entries from journal and merge them
3090 	 * into nat entry set.
3091 	 */
3092 	if (cpc->reason & CP_UMOUNT ||
3093 		!__has_cursum_space(journal,
3094 			nm_i->nat_cnt[DIRTY_NAT], NAT_JOURNAL))
3095 		remove_nats_in_journal(sbi);
3096 
3097 	while ((found = __gang_lookup_nat_set(nm_i,
3098 					set_idx, NAT_VEC_SIZE, setvec))) {
3099 		unsigned idx;
3100 
3101 		set_idx = setvec[found - 1]->set + 1;
3102 		for (idx = 0; idx < found; idx++)
3103 			__adjust_nat_entry_set(setvec[idx], &sets,
3104 						MAX_NAT_JENTRIES(journal));
3105 	}
3106 
3107 	/* flush dirty nats in nat entry set */
3108 	list_for_each_entry_safe(set, tmp, &sets, set_list) {
3109 		err = __flush_nat_entry_set(sbi, set, cpc);
3110 		if (err)
3111 			break;
3112 	}
3113 
3114 	f2fs_up_write(&nm_i->nat_tree_lock);
3115 	/* Allow dirty nats by node block allocation in write_begin */
3116 
3117 	return err;
3118 }
3119 
3120 static int __get_nat_bitmaps(struct f2fs_sb_info *sbi)
3121 {
3122 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
3123 	struct f2fs_nm_info *nm_i = NM_I(sbi);
3124 	unsigned int nat_bits_bytes = nm_i->nat_blocks / BITS_PER_BYTE;
3125 	unsigned int i;
3126 	__u64 cp_ver = cur_cp_version(ckpt);
3127 	block_t nat_bits_addr;
3128 
3129 	nm_i->nat_bits_blocks = F2FS_BLK_ALIGN((nat_bits_bytes << 1) + 8);
3130 	nm_i->nat_bits = f2fs_kvzalloc(sbi,
3131 			nm_i->nat_bits_blocks << F2FS_BLKSIZE_BITS, GFP_KERNEL);
3132 	if (!nm_i->nat_bits)
3133 		return -ENOMEM;
3134 
3135 	nm_i->full_nat_bits = nm_i->nat_bits + 8;
3136 	nm_i->empty_nat_bits = nm_i->full_nat_bits + nat_bits_bytes;
3137 
3138 	if (!is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG))
3139 		return 0;
3140 
3141 	nat_bits_addr = __start_cp_addr(sbi) + sbi->blocks_per_seg -
3142 						nm_i->nat_bits_blocks;
3143 	for (i = 0; i < nm_i->nat_bits_blocks; i++) {
3144 		struct page *page;
3145 
3146 		page = f2fs_get_meta_page(sbi, nat_bits_addr++);
3147 		if (IS_ERR(page))
3148 			return PTR_ERR(page);
3149 
3150 		memcpy(nm_i->nat_bits + (i << F2FS_BLKSIZE_BITS),
3151 					page_address(page), F2FS_BLKSIZE);
3152 		f2fs_put_page(page, 1);
3153 	}
3154 
3155 	cp_ver |= (cur_cp_crc(ckpt) << 32);
3156 	if (cpu_to_le64(cp_ver) != *(__le64 *)nm_i->nat_bits) {
3157 		clear_ckpt_flags(sbi, CP_NAT_BITS_FLAG);
3158 		f2fs_notice(sbi, "Disable nat_bits due to incorrect cp_ver (%llu, %llu)",
3159 			cp_ver, le64_to_cpu(*(__le64 *)nm_i->nat_bits));
3160 		return 0;
3161 	}
3162 
3163 	f2fs_notice(sbi, "Found nat_bits in checkpoint");
3164 	return 0;
3165 }
3166 
3167 static inline void load_free_nid_bitmap(struct f2fs_sb_info *sbi)
3168 {
3169 	struct f2fs_nm_info *nm_i = NM_I(sbi);
3170 	unsigned int i = 0;
3171 	nid_t nid, last_nid;
3172 
3173 	if (!is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG))
3174 		return;
3175 
3176 	for (i = 0; i < nm_i->nat_blocks; i++) {
3177 		i = find_next_bit_le(nm_i->empty_nat_bits, nm_i->nat_blocks, i);
3178 		if (i >= nm_i->nat_blocks)
3179 			break;
3180 
3181 		__set_bit_le(i, nm_i->nat_block_bitmap);
3182 
3183 		nid = i * NAT_ENTRY_PER_BLOCK;
3184 		last_nid = nid + NAT_ENTRY_PER_BLOCK;
3185 
3186 		spin_lock(&NM_I(sbi)->nid_list_lock);
3187 		for (; nid < last_nid; nid++)
3188 			update_free_nid_bitmap(sbi, nid, true, true);
3189 		spin_unlock(&NM_I(sbi)->nid_list_lock);
3190 	}
3191 
3192 	for (i = 0; i < nm_i->nat_blocks; i++) {
3193 		i = find_next_bit_le(nm_i->full_nat_bits, nm_i->nat_blocks, i);
3194 		if (i >= nm_i->nat_blocks)
3195 			break;
3196 
3197 		__set_bit_le(i, nm_i->nat_block_bitmap);
3198 	}
3199 }
3200 
3201 static int init_node_manager(struct f2fs_sb_info *sbi)
3202 {
3203 	struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi);
3204 	struct f2fs_nm_info *nm_i = NM_I(sbi);
3205 	unsigned char *version_bitmap;
3206 	unsigned int nat_segs;
3207 	int err;
3208 
3209 	nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr);
3210 
3211 	/* segment_count_nat includes pair segment so divide to 2. */
3212 	nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1;
3213 	nm_i->nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg);
3214 	nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nm_i->nat_blocks;
3215 
3216 	/* not used nids: 0, node, meta, (and root counted as valid node) */
3217 	nm_i->available_nids = nm_i->max_nid - sbi->total_valid_node_count -
3218 						F2FS_RESERVED_NODE_NUM;
3219 	nm_i->nid_cnt[FREE_NID] = 0;
3220 	nm_i->nid_cnt[PREALLOC_NID] = 0;
3221 	nm_i->ram_thresh = DEF_RAM_THRESHOLD;
3222 	nm_i->ra_nid_pages = DEF_RA_NID_PAGES;
3223 	nm_i->dirty_nats_ratio = DEF_DIRTY_NAT_RATIO_THRESHOLD;
3224 	nm_i->max_rf_node_blocks = DEF_RF_NODE_BLOCKS;
3225 
3226 	INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC);
3227 	INIT_LIST_HEAD(&nm_i->free_nid_list);
3228 	INIT_RADIX_TREE(&nm_i->nat_root, GFP_NOIO);
3229 	INIT_RADIX_TREE(&nm_i->nat_set_root, GFP_NOIO);
3230 	INIT_LIST_HEAD(&nm_i->nat_entries);
3231 	spin_lock_init(&nm_i->nat_list_lock);
3232 
3233 	mutex_init(&nm_i->build_lock);
3234 	spin_lock_init(&nm_i->nid_list_lock);
3235 	init_f2fs_rwsem(&nm_i->nat_tree_lock);
3236 
3237 	nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
3238 	nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
3239 	version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP);
3240 	nm_i->nat_bitmap = kmemdup(version_bitmap, nm_i->bitmap_size,
3241 					GFP_KERNEL);
3242 	if (!nm_i->nat_bitmap)
3243 		return -ENOMEM;
3244 
3245 	err = __get_nat_bitmaps(sbi);
3246 	if (err)
3247 		return err;
3248 
3249 #ifdef CONFIG_F2FS_CHECK_FS
3250 	nm_i->nat_bitmap_mir = kmemdup(version_bitmap, nm_i->bitmap_size,
3251 					GFP_KERNEL);
3252 	if (!nm_i->nat_bitmap_mir)
3253 		return -ENOMEM;
3254 #endif
3255 
3256 	return 0;
3257 }
3258 
3259 static int init_free_nid_cache(struct f2fs_sb_info *sbi)
3260 {
3261 	struct f2fs_nm_info *nm_i = NM_I(sbi);
3262 	int i;
3263 
3264 	nm_i->free_nid_bitmap =
3265 		f2fs_kvzalloc(sbi, array_size(sizeof(unsigned char *),
3266 					      nm_i->nat_blocks),
3267 			      GFP_KERNEL);
3268 	if (!nm_i->free_nid_bitmap)
3269 		return -ENOMEM;
3270 
3271 	for (i = 0; i < nm_i->nat_blocks; i++) {
3272 		nm_i->free_nid_bitmap[i] = f2fs_kvzalloc(sbi,
3273 			f2fs_bitmap_size(NAT_ENTRY_PER_BLOCK), GFP_KERNEL);
3274 		if (!nm_i->free_nid_bitmap[i])
3275 			return -ENOMEM;
3276 	}
3277 
3278 	nm_i->nat_block_bitmap = f2fs_kvzalloc(sbi, nm_i->nat_blocks / 8,
3279 								GFP_KERNEL);
3280 	if (!nm_i->nat_block_bitmap)
3281 		return -ENOMEM;
3282 
3283 	nm_i->free_nid_count =
3284 		f2fs_kvzalloc(sbi, array_size(sizeof(unsigned short),
3285 					      nm_i->nat_blocks),
3286 			      GFP_KERNEL);
3287 	if (!nm_i->free_nid_count)
3288 		return -ENOMEM;
3289 	return 0;
3290 }
3291 
3292 int f2fs_build_node_manager(struct f2fs_sb_info *sbi)
3293 {
3294 	int err;
3295 
3296 	sbi->nm_info = f2fs_kzalloc(sbi, sizeof(struct f2fs_nm_info),
3297 							GFP_KERNEL);
3298 	if (!sbi->nm_info)
3299 		return -ENOMEM;
3300 
3301 	err = init_node_manager(sbi);
3302 	if (err)
3303 		return err;
3304 
3305 	err = init_free_nid_cache(sbi);
3306 	if (err)
3307 		return err;
3308 
3309 	/* load free nid status from nat_bits table */
3310 	load_free_nid_bitmap(sbi);
3311 
3312 	return f2fs_build_free_nids(sbi, true, true);
3313 }
3314 
3315 void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi)
3316 {
3317 	struct f2fs_nm_info *nm_i = NM_I(sbi);
3318 	struct free_nid *i, *next_i;
3319 	void *vec[NAT_VEC_SIZE];
3320 	struct nat_entry **natvec = (struct nat_entry **)vec;
3321 	struct nat_entry_set **setvec = (struct nat_entry_set **)vec;
3322 	nid_t nid = 0;
3323 	unsigned int found;
3324 
3325 	if (!nm_i)
3326 		return;
3327 
3328 	/* destroy free nid list */
3329 	spin_lock(&nm_i->nid_list_lock);
3330 	list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) {
3331 		__remove_free_nid(sbi, i, FREE_NID);
3332 		spin_unlock(&nm_i->nid_list_lock);
3333 		kmem_cache_free(free_nid_slab, i);
3334 		spin_lock(&nm_i->nid_list_lock);
3335 	}
3336 	f2fs_bug_on(sbi, nm_i->nid_cnt[FREE_NID]);
3337 	f2fs_bug_on(sbi, nm_i->nid_cnt[PREALLOC_NID]);
3338 	f2fs_bug_on(sbi, !list_empty(&nm_i->free_nid_list));
3339 	spin_unlock(&nm_i->nid_list_lock);
3340 
3341 	/* destroy nat cache */
3342 	f2fs_down_write(&nm_i->nat_tree_lock);
3343 	while ((found = __gang_lookup_nat_cache(nm_i,
3344 					nid, NAT_VEC_SIZE, natvec))) {
3345 		unsigned idx;
3346 
3347 		nid = nat_get_nid(natvec[found - 1]) + 1;
3348 		for (idx = 0; idx < found; idx++) {
3349 			spin_lock(&nm_i->nat_list_lock);
3350 			list_del(&natvec[idx]->list);
3351 			spin_unlock(&nm_i->nat_list_lock);
3352 
3353 			__del_from_nat_cache(nm_i, natvec[idx]);
3354 		}
3355 	}
3356 	f2fs_bug_on(sbi, nm_i->nat_cnt[TOTAL_NAT]);
3357 
3358 	/* destroy nat set cache */
3359 	nid = 0;
3360 	memset(vec, 0, sizeof(void *) * NAT_VEC_SIZE);
3361 	while ((found = __gang_lookup_nat_set(nm_i,
3362 					nid, NAT_VEC_SIZE, setvec))) {
3363 		unsigned idx;
3364 
3365 		nid = setvec[found - 1]->set + 1;
3366 		for (idx = 0; idx < found; idx++) {
3367 			/* entry_cnt is not zero, when cp_error was occurred */
3368 			f2fs_bug_on(sbi, !list_empty(&setvec[idx]->entry_list));
3369 			radix_tree_delete(&nm_i->nat_set_root, setvec[idx]->set);
3370 			kmem_cache_free(nat_entry_set_slab, setvec[idx]);
3371 		}
3372 	}
3373 	f2fs_up_write(&nm_i->nat_tree_lock);
3374 
3375 	kvfree(nm_i->nat_block_bitmap);
3376 	if (nm_i->free_nid_bitmap) {
3377 		int i;
3378 
3379 		for (i = 0; i < nm_i->nat_blocks; i++)
3380 			kvfree(nm_i->free_nid_bitmap[i]);
3381 		kvfree(nm_i->free_nid_bitmap);
3382 	}
3383 	kvfree(nm_i->free_nid_count);
3384 
3385 	kvfree(nm_i->nat_bitmap);
3386 	kvfree(nm_i->nat_bits);
3387 #ifdef CONFIG_F2FS_CHECK_FS
3388 	kvfree(nm_i->nat_bitmap_mir);
3389 #endif
3390 	sbi->nm_info = NULL;
3391 	kfree(nm_i);
3392 }
3393 
3394 int __init f2fs_create_node_manager_caches(void)
3395 {
3396 	nat_entry_slab = f2fs_kmem_cache_create("f2fs_nat_entry",
3397 			sizeof(struct nat_entry));
3398 	if (!nat_entry_slab)
3399 		goto fail;
3400 
3401 	free_nid_slab = f2fs_kmem_cache_create("f2fs_free_nid",
3402 			sizeof(struct free_nid));
3403 	if (!free_nid_slab)
3404 		goto destroy_nat_entry;
3405 
3406 	nat_entry_set_slab = f2fs_kmem_cache_create("f2fs_nat_entry_set",
3407 			sizeof(struct nat_entry_set));
3408 	if (!nat_entry_set_slab)
3409 		goto destroy_free_nid;
3410 
3411 	fsync_node_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_node_entry",
3412 			sizeof(struct fsync_node_entry));
3413 	if (!fsync_node_entry_slab)
3414 		goto destroy_nat_entry_set;
3415 	return 0;
3416 
3417 destroy_nat_entry_set:
3418 	kmem_cache_destroy(nat_entry_set_slab);
3419 destroy_free_nid:
3420 	kmem_cache_destroy(free_nid_slab);
3421 destroy_nat_entry:
3422 	kmem_cache_destroy(nat_entry_slab);
3423 fail:
3424 	return -ENOMEM;
3425 }
3426 
3427 void f2fs_destroy_node_manager_caches(void)
3428 {
3429 	kmem_cache_destroy(fsync_node_entry_slab);
3430 	kmem_cache_destroy(nat_entry_set_slab);
3431 	kmem_cache_destroy(free_nid_slab);
3432 	kmem_cache_destroy(nat_entry_slab);
3433 }
3434