xref: /openbmc/linux/fs/f2fs/recovery.c (revision b60a5b8d)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * fs/f2fs/recovery.c
4  *
5  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6  *             http://www.samsung.com/
7  */
8 #include <linux/fs.h>
9 #include <linux/f2fs_fs.h>
10 #include "f2fs.h"
11 #include "node.h"
12 #include "segment.h"
13 
14 /*
15  * Roll forward recovery scenarios.
16  *
17  * [Term] F: fsync_mark, D: dentry_mark
18  *
19  * 1. inode(x) | CP | inode(x) | dnode(F)
20  * -> Update the latest inode(x).
21  *
22  * 2. inode(x) | CP | inode(F) | dnode(F)
23  * -> No problem.
24  *
25  * 3. inode(x) | CP | dnode(F) | inode(x)
26  * -> Recover to the latest dnode(F), and drop the last inode(x)
27  *
28  * 4. inode(x) | CP | dnode(F) | inode(F)
29  * -> No problem.
30  *
31  * 5. CP | inode(x) | dnode(F)
32  * -> The inode(DF) was missing. Should drop this dnode(F).
33  *
34  * 6. CP | inode(DF) | dnode(F)
35  * -> No problem.
36  *
37  * 7. CP | dnode(F) | inode(DF)
38  * -> If f2fs_iget fails, then goto next to find inode(DF).
39  *
40  * 8. CP | dnode(F) | inode(x)
41  * -> If f2fs_iget fails, then goto next to find inode(DF).
42  *    But it will fail due to no inode(DF).
43  */
44 
45 static struct kmem_cache *fsync_entry_slab;
46 
47 bool f2fs_space_for_roll_forward(struct f2fs_sb_info *sbi)
48 {
49 	s64 nalloc = percpu_counter_sum_positive(&sbi->alloc_valid_block_count);
50 
51 	if (sbi->last_valid_block_count + nalloc > sbi->user_block_count)
52 		return false;
53 	return true;
54 }
55 
56 static struct fsync_inode_entry *get_fsync_inode(struct list_head *head,
57 								nid_t ino)
58 {
59 	struct fsync_inode_entry *entry;
60 
61 	list_for_each_entry(entry, head, list)
62 		if (entry->inode->i_ino == ino)
63 			return entry;
64 
65 	return NULL;
66 }
67 
68 static struct fsync_inode_entry *add_fsync_inode(struct f2fs_sb_info *sbi,
69 			struct list_head *head, nid_t ino, bool quota_inode)
70 {
71 	struct inode *inode;
72 	struct fsync_inode_entry *entry;
73 	int err;
74 
75 	inode = f2fs_iget_retry(sbi->sb, ino);
76 	if (IS_ERR(inode))
77 		return ERR_CAST(inode);
78 
79 	err = dquot_initialize(inode);
80 	if (err)
81 		goto err_out;
82 
83 	if (quota_inode) {
84 		err = dquot_alloc_inode(inode);
85 		if (err)
86 			goto err_out;
87 	}
88 
89 	entry = f2fs_kmem_cache_alloc(fsync_entry_slab, GFP_F2FS_ZERO);
90 	entry->inode = inode;
91 	list_add_tail(&entry->list, head);
92 
93 	return entry;
94 err_out:
95 	iput(inode);
96 	return ERR_PTR(err);
97 }
98 
99 static void del_fsync_inode(struct fsync_inode_entry *entry, int drop)
100 {
101 	if (drop) {
102 		/* inode should not be recovered, drop it */
103 		f2fs_inode_synced(entry->inode);
104 	}
105 	iput(entry->inode);
106 	list_del(&entry->list);
107 	kmem_cache_free(fsync_entry_slab, entry);
108 }
109 
110 static int recover_dentry(struct inode *inode, struct page *ipage,
111 						struct list_head *dir_list)
112 {
113 	struct f2fs_inode *raw_inode = F2FS_INODE(ipage);
114 	nid_t pino = le32_to_cpu(raw_inode->i_pino);
115 	struct f2fs_dir_entry *de;
116 	struct fscrypt_name fname;
117 	struct page *page;
118 	struct inode *dir, *einode;
119 	struct fsync_inode_entry *entry;
120 	int err = 0;
121 	char *name;
122 
123 	entry = get_fsync_inode(dir_list, pino);
124 	if (!entry) {
125 		entry = add_fsync_inode(F2FS_I_SB(inode), dir_list,
126 							pino, false);
127 		if (IS_ERR(entry)) {
128 			dir = ERR_CAST(entry);
129 			err = PTR_ERR(entry);
130 			goto out;
131 		}
132 	}
133 
134 	dir = entry->inode;
135 
136 	memset(&fname, 0, sizeof(struct fscrypt_name));
137 	fname.disk_name.len = le32_to_cpu(raw_inode->i_namelen);
138 	fname.disk_name.name = raw_inode->i_name;
139 
140 	if (unlikely(fname.disk_name.len > F2FS_NAME_LEN)) {
141 		WARN_ON(1);
142 		err = -ENAMETOOLONG;
143 		goto out;
144 	}
145 retry:
146 	de = __f2fs_find_entry(dir, &fname, &page);
147 	if (de && inode->i_ino == le32_to_cpu(de->ino))
148 		goto out_put;
149 
150 	if (de) {
151 		einode = f2fs_iget_retry(inode->i_sb, le32_to_cpu(de->ino));
152 		if (IS_ERR(einode)) {
153 			WARN_ON(1);
154 			err = PTR_ERR(einode);
155 			if (err == -ENOENT)
156 				err = -EEXIST;
157 			goto out_put;
158 		}
159 
160 		err = dquot_initialize(einode);
161 		if (err) {
162 			iput(einode);
163 			goto out_put;
164 		}
165 
166 		err = f2fs_acquire_orphan_inode(F2FS_I_SB(inode));
167 		if (err) {
168 			iput(einode);
169 			goto out_put;
170 		}
171 		f2fs_delete_entry(de, page, dir, einode);
172 		iput(einode);
173 		goto retry;
174 	} else if (IS_ERR(page)) {
175 		err = PTR_ERR(page);
176 	} else {
177 		err = f2fs_add_dentry(dir, &fname, inode,
178 					inode->i_ino, inode->i_mode);
179 	}
180 	if (err == -ENOMEM)
181 		goto retry;
182 	goto out;
183 
184 out_put:
185 	f2fs_put_page(page, 0);
186 out:
187 	if (file_enc_name(inode))
188 		name = "<encrypted>";
189 	else
190 		name = raw_inode->i_name;
191 	f2fs_msg(inode->i_sb, KERN_NOTICE,
192 			"%s: ino = %x, name = %s, dir = %lx, err = %d",
193 			__func__, ino_of_node(ipage), name,
194 			IS_ERR(dir) ? 0 : dir->i_ino, err);
195 	return err;
196 }
197 
198 static int recover_quota_data(struct inode *inode, struct page *page)
199 {
200 	struct f2fs_inode *raw = F2FS_INODE(page);
201 	struct iattr attr;
202 	uid_t i_uid = le32_to_cpu(raw->i_uid);
203 	gid_t i_gid = le32_to_cpu(raw->i_gid);
204 	int err;
205 
206 	memset(&attr, 0, sizeof(attr));
207 
208 	attr.ia_uid = make_kuid(inode->i_sb->s_user_ns, i_uid);
209 	attr.ia_gid = make_kgid(inode->i_sb->s_user_ns, i_gid);
210 
211 	if (!uid_eq(attr.ia_uid, inode->i_uid))
212 		attr.ia_valid |= ATTR_UID;
213 	if (!gid_eq(attr.ia_gid, inode->i_gid))
214 		attr.ia_valid |= ATTR_GID;
215 
216 	if (!attr.ia_valid)
217 		return 0;
218 
219 	err = dquot_transfer(inode, &attr);
220 	if (err)
221 		set_sbi_flag(F2FS_I_SB(inode), SBI_QUOTA_NEED_REPAIR);
222 	return err;
223 }
224 
225 static void recover_inline_flags(struct inode *inode, struct f2fs_inode *ri)
226 {
227 	if (ri->i_inline & F2FS_PIN_FILE)
228 		set_inode_flag(inode, FI_PIN_FILE);
229 	else
230 		clear_inode_flag(inode, FI_PIN_FILE);
231 	if (ri->i_inline & F2FS_DATA_EXIST)
232 		set_inode_flag(inode, FI_DATA_EXIST);
233 	else
234 		clear_inode_flag(inode, FI_DATA_EXIST);
235 }
236 
237 static int recover_inode(struct inode *inode, struct page *page)
238 {
239 	struct f2fs_inode *raw = F2FS_INODE(page);
240 	char *name;
241 	int err;
242 
243 	inode->i_mode = le16_to_cpu(raw->i_mode);
244 
245 	err = recover_quota_data(inode, page);
246 	if (err)
247 		return err;
248 
249 	i_uid_write(inode, le32_to_cpu(raw->i_uid));
250 	i_gid_write(inode, le32_to_cpu(raw->i_gid));
251 
252 	if (raw->i_inline & F2FS_EXTRA_ATTR) {
253 		if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)) &&
254 			F2FS_FITS_IN_INODE(raw, le16_to_cpu(raw->i_extra_isize),
255 								i_projid)) {
256 			projid_t i_projid;
257 			kprojid_t kprojid;
258 
259 			i_projid = (projid_t)le32_to_cpu(raw->i_projid);
260 			kprojid = make_kprojid(&init_user_ns, i_projid);
261 
262 			if (!projid_eq(kprojid, F2FS_I(inode)->i_projid)) {
263 				err = f2fs_transfer_project_quota(inode,
264 								kprojid);
265 				if (err)
266 					return err;
267 				F2FS_I(inode)->i_projid = kprojid;
268 			}
269 		}
270 	}
271 
272 	f2fs_i_size_write(inode, le64_to_cpu(raw->i_size));
273 	inode->i_atime.tv_sec = le64_to_cpu(raw->i_atime);
274 	inode->i_ctime.tv_sec = le64_to_cpu(raw->i_ctime);
275 	inode->i_mtime.tv_sec = le64_to_cpu(raw->i_mtime);
276 	inode->i_atime.tv_nsec = le32_to_cpu(raw->i_atime_nsec);
277 	inode->i_ctime.tv_nsec = le32_to_cpu(raw->i_ctime_nsec);
278 	inode->i_mtime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec);
279 
280 	F2FS_I(inode)->i_advise = raw->i_advise;
281 	F2FS_I(inode)->i_flags = le32_to_cpu(raw->i_flags);
282 	f2fs_set_inode_flags(inode);
283 	F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN] =
284 				le16_to_cpu(raw->i_gc_failures);
285 
286 	recover_inline_flags(inode, raw);
287 
288 	f2fs_mark_inode_dirty_sync(inode, true);
289 
290 	if (file_enc_name(inode))
291 		name = "<encrypted>";
292 	else
293 		name = F2FS_INODE(page)->i_name;
294 
295 	f2fs_msg(inode->i_sb, KERN_NOTICE,
296 		"recover_inode: ino = %x, name = %s, inline = %x",
297 			ino_of_node(page), name, raw->i_inline);
298 	return 0;
299 }
300 
301 static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head,
302 				bool check_only)
303 {
304 	struct curseg_info *curseg;
305 	struct page *page = NULL;
306 	block_t blkaddr;
307 	unsigned int loop_cnt = 0;
308 	unsigned int free_blocks = MAIN_SEGS(sbi) * sbi->blocks_per_seg -
309 						valid_user_blocks(sbi);
310 	int err = 0;
311 
312 	/* get node pages in the current segment */
313 	curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
314 	blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
315 
316 	while (1) {
317 		struct fsync_inode_entry *entry;
318 
319 		if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR))
320 			return 0;
321 
322 		page = f2fs_get_tmp_page(sbi, blkaddr);
323 		if (IS_ERR(page)) {
324 			err = PTR_ERR(page);
325 			break;
326 		}
327 
328 		if (!is_recoverable_dnode(page))
329 			break;
330 
331 		if (!is_fsync_dnode(page))
332 			goto next;
333 
334 		entry = get_fsync_inode(head, ino_of_node(page));
335 		if (!entry) {
336 			bool quota_inode = false;
337 
338 			if (!check_only &&
339 					IS_INODE(page) && is_dent_dnode(page)) {
340 				err = f2fs_recover_inode_page(sbi, page);
341 				if (err)
342 					break;
343 				quota_inode = true;
344 			}
345 
346 			/*
347 			 * CP | dnode(F) | inode(DF)
348 			 * For this case, we should not give up now.
349 			 */
350 			entry = add_fsync_inode(sbi, head, ino_of_node(page),
351 								quota_inode);
352 			if (IS_ERR(entry)) {
353 				err = PTR_ERR(entry);
354 				if (err == -ENOENT) {
355 					err = 0;
356 					goto next;
357 				}
358 				break;
359 			}
360 		}
361 		entry->blkaddr = blkaddr;
362 
363 		if (IS_INODE(page) && is_dent_dnode(page))
364 			entry->last_dentry = blkaddr;
365 next:
366 		/* sanity check in order to detect looped node chain */
367 		if (++loop_cnt >= free_blocks ||
368 			blkaddr == next_blkaddr_of_node(page)) {
369 			f2fs_msg(sbi->sb, KERN_NOTICE,
370 				"%s: detect looped node chain, "
371 				"blkaddr:%u, next:%u",
372 				__func__, blkaddr, next_blkaddr_of_node(page));
373 			err = -EINVAL;
374 			break;
375 		}
376 
377 		/* check next segment */
378 		blkaddr = next_blkaddr_of_node(page);
379 		f2fs_put_page(page, 1);
380 
381 		f2fs_ra_meta_pages_cond(sbi, blkaddr);
382 	}
383 	f2fs_put_page(page, 1);
384 	return err;
385 }
386 
387 static void destroy_fsync_dnodes(struct list_head *head, int drop)
388 {
389 	struct fsync_inode_entry *entry, *tmp;
390 
391 	list_for_each_entry_safe(entry, tmp, head, list)
392 		del_fsync_inode(entry, drop);
393 }
394 
395 static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
396 			block_t blkaddr, struct dnode_of_data *dn)
397 {
398 	struct seg_entry *sentry;
399 	unsigned int segno = GET_SEGNO(sbi, blkaddr);
400 	unsigned short blkoff = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
401 	struct f2fs_summary_block *sum_node;
402 	struct f2fs_summary sum;
403 	struct page *sum_page, *node_page;
404 	struct dnode_of_data tdn = *dn;
405 	nid_t ino, nid;
406 	struct inode *inode;
407 	unsigned int offset;
408 	block_t bidx;
409 	int i;
410 
411 	sentry = get_seg_entry(sbi, segno);
412 	if (!f2fs_test_bit(blkoff, sentry->cur_valid_map))
413 		return 0;
414 
415 	/* Get the previous summary */
416 	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
417 		struct curseg_info *curseg = CURSEG_I(sbi, i);
418 		if (curseg->segno == segno) {
419 			sum = curseg->sum_blk->entries[blkoff];
420 			goto got_it;
421 		}
422 	}
423 
424 	sum_page = f2fs_get_sum_page(sbi, segno);
425 	if (IS_ERR(sum_page))
426 		return PTR_ERR(sum_page);
427 	sum_node = (struct f2fs_summary_block *)page_address(sum_page);
428 	sum = sum_node->entries[blkoff];
429 	f2fs_put_page(sum_page, 1);
430 got_it:
431 	/* Use the locked dnode page and inode */
432 	nid = le32_to_cpu(sum.nid);
433 	if (dn->inode->i_ino == nid) {
434 		tdn.nid = nid;
435 		if (!dn->inode_page_locked)
436 			lock_page(dn->inode_page);
437 		tdn.node_page = dn->inode_page;
438 		tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
439 		goto truncate_out;
440 	} else if (dn->nid == nid) {
441 		tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
442 		goto truncate_out;
443 	}
444 
445 	/* Get the node page */
446 	node_page = f2fs_get_node_page(sbi, nid);
447 	if (IS_ERR(node_page))
448 		return PTR_ERR(node_page);
449 
450 	offset = ofs_of_node(node_page);
451 	ino = ino_of_node(node_page);
452 	f2fs_put_page(node_page, 1);
453 
454 	if (ino != dn->inode->i_ino) {
455 		int ret;
456 
457 		/* Deallocate previous index in the node page */
458 		inode = f2fs_iget_retry(sbi->sb, ino);
459 		if (IS_ERR(inode))
460 			return PTR_ERR(inode);
461 
462 		ret = dquot_initialize(inode);
463 		if (ret) {
464 			iput(inode);
465 			return ret;
466 		}
467 	} else {
468 		inode = dn->inode;
469 	}
470 
471 	bidx = f2fs_start_bidx_of_node(offset, inode) +
472 				le16_to_cpu(sum.ofs_in_node);
473 
474 	/*
475 	 * if inode page is locked, unlock temporarily, but its reference
476 	 * count keeps alive.
477 	 */
478 	if (ino == dn->inode->i_ino && dn->inode_page_locked)
479 		unlock_page(dn->inode_page);
480 
481 	set_new_dnode(&tdn, inode, NULL, NULL, 0);
482 	if (f2fs_get_dnode_of_data(&tdn, bidx, LOOKUP_NODE))
483 		goto out;
484 
485 	if (tdn.data_blkaddr == blkaddr)
486 		f2fs_truncate_data_blocks_range(&tdn, 1);
487 
488 	f2fs_put_dnode(&tdn);
489 out:
490 	if (ino != dn->inode->i_ino)
491 		iput(inode);
492 	else if (dn->inode_page_locked)
493 		lock_page(dn->inode_page);
494 	return 0;
495 
496 truncate_out:
497 	if (datablock_addr(tdn.inode, tdn.node_page,
498 					tdn.ofs_in_node) == blkaddr)
499 		f2fs_truncate_data_blocks_range(&tdn, 1);
500 	if (dn->inode->i_ino == nid && !dn->inode_page_locked)
501 		unlock_page(dn->inode_page);
502 	return 0;
503 }
504 
505 static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
506 					struct page *page)
507 {
508 	struct dnode_of_data dn;
509 	struct node_info ni;
510 	unsigned int start, end;
511 	int err = 0, recovered = 0;
512 
513 	/* step 1: recover xattr */
514 	if (IS_INODE(page)) {
515 		f2fs_recover_inline_xattr(inode, page);
516 	} else if (f2fs_has_xattr_block(ofs_of_node(page))) {
517 		err = f2fs_recover_xattr_data(inode, page);
518 		if (!err)
519 			recovered++;
520 		goto out;
521 	}
522 
523 	/* step 2: recover inline data */
524 	if (f2fs_recover_inline_data(inode, page))
525 		goto out;
526 
527 	/* step 3: recover data indices */
528 	start = f2fs_start_bidx_of_node(ofs_of_node(page), inode);
529 	end = start + ADDRS_PER_PAGE(page, inode);
530 
531 	set_new_dnode(&dn, inode, NULL, NULL, 0);
532 retry_dn:
533 	err = f2fs_get_dnode_of_data(&dn, start, ALLOC_NODE);
534 	if (err) {
535 		if (err == -ENOMEM) {
536 			congestion_wait(BLK_RW_ASYNC, HZ/50);
537 			goto retry_dn;
538 		}
539 		goto out;
540 	}
541 
542 	f2fs_wait_on_page_writeback(dn.node_page, NODE, true, true);
543 
544 	err = f2fs_get_node_info(sbi, dn.nid, &ni);
545 	if (err)
546 		goto err;
547 
548 	f2fs_bug_on(sbi, ni.ino != ino_of_node(page));
549 	f2fs_bug_on(sbi, ofs_of_node(dn.node_page) != ofs_of_node(page));
550 
551 	for (; start < end; start++, dn.ofs_in_node++) {
552 		block_t src, dest;
553 
554 		src = datablock_addr(dn.inode, dn.node_page, dn.ofs_in_node);
555 		dest = datablock_addr(dn.inode, page, dn.ofs_in_node);
556 
557 		/* skip recovering if dest is the same as src */
558 		if (src == dest)
559 			continue;
560 
561 		/* dest is invalid, just invalidate src block */
562 		if (dest == NULL_ADDR) {
563 			f2fs_truncate_data_blocks_range(&dn, 1);
564 			continue;
565 		}
566 
567 		if (!file_keep_isize(inode) &&
568 			(i_size_read(inode) <= ((loff_t)start << PAGE_SHIFT)))
569 			f2fs_i_size_write(inode,
570 				(loff_t)(start + 1) << PAGE_SHIFT);
571 
572 		/*
573 		 * dest is reserved block, invalidate src block
574 		 * and then reserve one new block in dnode page.
575 		 */
576 		if (dest == NEW_ADDR) {
577 			f2fs_truncate_data_blocks_range(&dn, 1);
578 			f2fs_reserve_new_block(&dn);
579 			continue;
580 		}
581 
582 		/* dest is valid block, try to recover from src to dest */
583 		if (f2fs_is_valid_blkaddr(sbi, dest, META_POR)) {
584 
585 			if (src == NULL_ADDR) {
586 				err = f2fs_reserve_new_block(&dn);
587 				while (err &&
588 				       IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION))
589 					err = f2fs_reserve_new_block(&dn);
590 				/* We should not get -ENOSPC */
591 				f2fs_bug_on(sbi, err);
592 				if (err)
593 					goto err;
594 			}
595 retry_prev:
596 			/* Check the previous node page having this index */
597 			err = check_index_in_prev_nodes(sbi, dest, &dn);
598 			if (err) {
599 				if (err == -ENOMEM) {
600 					congestion_wait(BLK_RW_ASYNC, HZ/50);
601 					goto retry_prev;
602 				}
603 				goto err;
604 			}
605 
606 			/* write dummy data page */
607 			f2fs_replace_block(sbi, &dn, src, dest,
608 						ni.version, false, false);
609 			recovered++;
610 		}
611 	}
612 
613 	copy_node_footer(dn.node_page, page);
614 	fill_node_footer(dn.node_page, dn.nid, ni.ino,
615 					ofs_of_node(page), false);
616 	set_page_dirty(dn.node_page);
617 err:
618 	f2fs_put_dnode(&dn);
619 out:
620 	f2fs_msg(sbi->sb, KERN_NOTICE,
621 		"recover_data: ino = %lx (i_size: %s) recovered = %d, err = %d",
622 		inode->i_ino,
623 		file_keep_isize(inode) ? "keep" : "recover",
624 		recovered, err);
625 	return err;
626 }
627 
628 static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list,
629 		struct list_head *tmp_inode_list, struct list_head *dir_list)
630 {
631 	struct curseg_info *curseg;
632 	struct page *page = NULL;
633 	int err = 0;
634 	block_t blkaddr;
635 
636 	/* get node pages in the current segment */
637 	curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
638 	blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
639 
640 	while (1) {
641 		struct fsync_inode_entry *entry;
642 
643 		if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR))
644 			break;
645 
646 		f2fs_ra_meta_pages_cond(sbi, blkaddr);
647 
648 		page = f2fs_get_tmp_page(sbi, blkaddr);
649 		if (IS_ERR(page)) {
650 			err = PTR_ERR(page);
651 			break;
652 		}
653 
654 		if (!is_recoverable_dnode(page)) {
655 			f2fs_put_page(page, 1);
656 			break;
657 		}
658 
659 		entry = get_fsync_inode(inode_list, ino_of_node(page));
660 		if (!entry)
661 			goto next;
662 		/*
663 		 * inode(x) | CP | inode(x) | dnode(F)
664 		 * In this case, we can lose the latest inode(x).
665 		 * So, call recover_inode for the inode update.
666 		 */
667 		if (IS_INODE(page)) {
668 			err = recover_inode(entry->inode, page);
669 			if (err)
670 				break;
671 		}
672 		if (entry->last_dentry == blkaddr) {
673 			err = recover_dentry(entry->inode, page, dir_list);
674 			if (err) {
675 				f2fs_put_page(page, 1);
676 				break;
677 			}
678 		}
679 		err = do_recover_data(sbi, entry->inode, page);
680 		if (err) {
681 			f2fs_put_page(page, 1);
682 			break;
683 		}
684 
685 		if (entry->blkaddr == blkaddr)
686 			list_move_tail(&entry->list, tmp_inode_list);
687 next:
688 		/* check next segment */
689 		blkaddr = next_blkaddr_of_node(page);
690 		f2fs_put_page(page, 1);
691 	}
692 	if (!err)
693 		f2fs_allocate_new_segments(sbi);
694 	return err;
695 }
696 
697 int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
698 {
699 	struct list_head inode_list, tmp_inode_list;
700 	struct list_head dir_list;
701 	int err;
702 	int ret = 0;
703 	unsigned long s_flags = sbi->sb->s_flags;
704 	bool need_writecp = false;
705 #ifdef CONFIG_QUOTA
706 	int quota_enabled;
707 #endif
708 
709 	if (s_flags & SB_RDONLY) {
710 		f2fs_msg(sbi->sb, KERN_INFO,
711 				"recover fsync data on readonly fs");
712 		sbi->sb->s_flags &= ~SB_RDONLY;
713 	}
714 
715 #ifdef CONFIG_QUOTA
716 	/* Needed for iput() to work correctly and not trash data */
717 	sbi->sb->s_flags |= SB_ACTIVE;
718 	/* Turn on quotas so that they are updated correctly */
719 	quota_enabled = f2fs_enable_quota_files(sbi, s_flags & SB_RDONLY);
720 #endif
721 
722 	fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
723 			sizeof(struct fsync_inode_entry));
724 	if (!fsync_entry_slab) {
725 		err = -ENOMEM;
726 		goto out;
727 	}
728 
729 	INIT_LIST_HEAD(&inode_list);
730 	INIT_LIST_HEAD(&tmp_inode_list);
731 	INIT_LIST_HEAD(&dir_list);
732 
733 	/* prevent checkpoint */
734 	mutex_lock(&sbi->cp_mutex);
735 
736 	/* step #1: find fsynced inode numbers */
737 	err = find_fsync_dnodes(sbi, &inode_list, check_only);
738 	if (err || list_empty(&inode_list))
739 		goto skip;
740 
741 	if (check_only) {
742 		ret = 1;
743 		goto skip;
744 	}
745 
746 	need_writecp = true;
747 
748 	/* step #2: recover data */
749 	err = recover_data(sbi, &inode_list, &tmp_inode_list, &dir_list);
750 	if (!err)
751 		f2fs_bug_on(sbi, !list_empty(&inode_list));
752 	else {
753 		/* restore s_flags to let iput() trash data */
754 		sbi->sb->s_flags = s_flags;
755 	}
756 skip:
757 	destroy_fsync_dnodes(&inode_list, err);
758 	destroy_fsync_dnodes(&tmp_inode_list, err);
759 
760 	/* truncate meta pages to be used by the recovery */
761 	truncate_inode_pages_range(META_MAPPING(sbi),
762 			(loff_t)MAIN_BLKADDR(sbi) << PAGE_SHIFT, -1);
763 
764 	if (err) {
765 		truncate_inode_pages_final(NODE_MAPPING(sbi));
766 		truncate_inode_pages_final(META_MAPPING(sbi));
767 	} else {
768 		clear_sbi_flag(sbi, SBI_POR_DOING);
769 	}
770 	mutex_unlock(&sbi->cp_mutex);
771 
772 	/* let's drop all the directory inodes for clean checkpoint */
773 	destroy_fsync_dnodes(&dir_list, err);
774 
775 	if (need_writecp) {
776 		set_sbi_flag(sbi, SBI_IS_RECOVERED);
777 
778 		if (!err) {
779 			struct cp_control cpc = {
780 				.reason = CP_RECOVERY,
781 			};
782 			err = f2fs_write_checkpoint(sbi, &cpc);
783 		}
784 	}
785 
786 	kmem_cache_destroy(fsync_entry_slab);
787 out:
788 #ifdef CONFIG_QUOTA
789 	/* Turn quotas off */
790 	if (quota_enabled)
791 		f2fs_quota_off_umount(sbi->sb);
792 #endif
793 	sbi->sb->s_flags = s_flags; /* Restore SB_RDONLY status */
794 
795 	return ret ? ret: err;
796 }
797