xref: /openbmc/linux/fs/f2fs/recovery.c (revision 393ff91f)
10a8165d7SJaegeuk Kim /*
2d624c96fSJaegeuk Kim  * fs/f2fs/recovery.c
3d624c96fSJaegeuk Kim  *
4d624c96fSJaegeuk Kim  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5d624c96fSJaegeuk Kim  *             http://www.samsung.com/
6d624c96fSJaegeuk Kim  *
7d624c96fSJaegeuk Kim  * This program is free software; you can redistribute it and/or modify
8d624c96fSJaegeuk Kim  * it under the terms of the GNU General Public License version 2 as
9d624c96fSJaegeuk Kim  * published by the Free Software Foundation.
10d624c96fSJaegeuk Kim  */
11d624c96fSJaegeuk Kim #include <linux/fs.h>
12d624c96fSJaegeuk Kim #include <linux/f2fs_fs.h>
13d624c96fSJaegeuk Kim #include "f2fs.h"
14d624c96fSJaegeuk Kim #include "node.h"
15d624c96fSJaegeuk Kim #include "segment.h"
16d624c96fSJaegeuk Kim 
17d624c96fSJaegeuk Kim static struct kmem_cache *fsync_entry_slab;
18d624c96fSJaegeuk Kim 
19d624c96fSJaegeuk Kim bool space_for_roll_forward(struct f2fs_sb_info *sbi)
20d624c96fSJaegeuk Kim {
21d624c96fSJaegeuk Kim 	if (sbi->last_valid_block_count + sbi->alloc_valid_block_count
22d624c96fSJaegeuk Kim 			> sbi->user_block_count)
23d624c96fSJaegeuk Kim 		return false;
24d624c96fSJaegeuk Kim 	return true;
25d624c96fSJaegeuk Kim }
26d624c96fSJaegeuk Kim 
27d624c96fSJaegeuk Kim static struct fsync_inode_entry *get_fsync_inode(struct list_head *head,
28d624c96fSJaegeuk Kim 								nid_t ino)
29d624c96fSJaegeuk Kim {
30d624c96fSJaegeuk Kim 	struct list_head *this;
31d624c96fSJaegeuk Kim 	struct fsync_inode_entry *entry;
32d624c96fSJaegeuk Kim 
33d624c96fSJaegeuk Kim 	list_for_each(this, head) {
34d624c96fSJaegeuk Kim 		entry = list_entry(this, struct fsync_inode_entry, list);
35d624c96fSJaegeuk Kim 		if (entry->inode->i_ino == ino)
36d624c96fSJaegeuk Kim 			return entry;
37d624c96fSJaegeuk Kim 	}
38d624c96fSJaegeuk Kim 	return NULL;
39d624c96fSJaegeuk Kim }
40d624c96fSJaegeuk Kim 
41d624c96fSJaegeuk Kim static int recover_dentry(struct page *ipage, struct inode *inode)
42d624c96fSJaegeuk Kim {
43d624c96fSJaegeuk Kim 	struct f2fs_node *raw_node = (struct f2fs_node *)kmap(ipage);
44d624c96fSJaegeuk Kim 	struct f2fs_inode *raw_inode = &(raw_node->i);
45b7f7a5e0SAl Viro 	struct qstr name;
46d624c96fSJaegeuk Kim 	struct f2fs_dir_entry *de;
47d624c96fSJaegeuk Kim 	struct page *page;
48d624c96fSJaegeuk Kim 	struct inode *dir;
49d624c96fSJaegeuk Kim 	int err = 0;
50d624c96fSJaegeuk Kim 
51d624c96fSJaegeuk Kim 	if (!is_dent_dnode(ipage))
52d624c96fSJaegeuk Kim 		goto out;
53d624c96fSJaegeuk Kim 
54d624c96fSJaegeuk Kim 	dir = f2fs_iget(inode->i_sb, le32_to_cpu(raw_inode->i_pino));
55d624c96fSJaegeuk Kim 	if (IS_ERR(dir)) {
56d624c96fSJaegeuk Kim 		err = -EINVAL;
57d624c96fSJaegeuk Kim 		goto out;
58d624c96fSJaegeuk Kim 	}
59d624c96fSJaegeuk Kim 
60b7f7a5e0SAl Viro 	name.len = le32_to_cpu(raw_inode->i_namelen);
61b7f7a5e0SAl Viro 	name.name = raw_inode->i_name;
62d624c96fSJaegeuk Kim 
63b7f7a5e0SAl Viro 	de = f2fs_find_entry(dir, &name, &page);
64d624c96fSJaegeuk Kim 	if (de) {
65d624c96fSJaegeuk Kim 		kunmap(page);
66d624c96fSJaegeuk Kim 		f2fs_put_page(page, 0);
67d624c96fSJaegeuk Kim 	} else {
6890b2fc64SJaegeuk Kim 		err = __f2fs_add_link(dir, &name, inode);
69d624c96fSJaegeuk Kim 	}
70d624c96fSJaegeuk Kim 	iput(dir);
71d624c96fSJaegeuk Kim out:
72d624c96fSJaegeuk Kim 	kunmap(ipage);
73d624c96fSJaegeuk Kim 	return err;
74d624c96fSJaegeuk Kim }
75d624c96fSJaegeuk Kim 
76d624c96fSJaegeuk Kim static int recover_inode(struct inode *inode, struct page *node_page)
77d624c96fSJaegeuk Kim {
78d624c96fSJaegeuk Kim 	void *kaddr = page_address(node_page);
79d624c96fSJaegeuk Kim 	struct f2fs_node *raw_node = (struct f2fs_node *)kaddr;
80d624c96fSJaegeuk Kim 	struct f2fs_inode *raw_inode = &(raw_node->i);
81d624c96fSJaegeuk Kim 
8225ca923bSJaegeuk Kim 	inode->i_mode = le16_to_cpu(raw_inode->i_mode);
83d624c96fSJaegeuk Kim 	i_size_write(inode, le64_to_cpu(raw_inode->i_size));
84d624c96fSJaegeuk Kim 	inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
85d624c96fSJaegeuk Kim 	inode->i_ctime.tv_sec = le64_to_cpu(raw_inode->i_ctime);
86d624c96fSJaegeuk Kim 	inode->i_mtime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
87d624c96fSJaegeuk Kim 	inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
88d624c96fSJaegeuk Kim 	inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec);
89d624c96fSJaegeuk Kim 	inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
90d624c96fSJaegeuk Kim 
91d624c96fSJaegeuk Kim 	return recover_dentry(node_page, inode);
92d624c96fSJaegeuk Kim }
93d624c96fSJaegeuk Kim 
94d624c96fSJaegeuk Kim static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
95d624c96fSJaegeuk Kim {
96d624c96fSJaegeuk Kim 	unsigned long long cp_ver = le64_to_cpu(sbi->ckpt->checkpoint_ver);
97d624c96fSJaegeuk Kim 	struct curseg_info *curseg;
98d624c96fSJaegeuk Kim 	struct page *page;
99d624c96fSJaegeuk Kim 	block_t blkaddr;
100d624c96fSJaegeuk Kim 	int err = 0;
101d624c96fSJaegeuk Kim 
102d624c96fSJaegeuk Kim 	/* get node pages in the current segment */
103d624c96fSJaegeuk Kim 	curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
104d624c96fSJaegeuk Kim 	blkaddr = START_BLOCK(sbi, curseg->segno) + curseg->next_blkoff;
105d624c96fSJaegeuk Kim 
106d624c96fSJaegeuk Kim 	/* read node page */
107d624c96fSJaegeuk Kim 	page = alloc_page(GFP_F2FS_ZERO);
108d624c96fSJaegeuk Kim 	if (IS_ERR(page))
109d624c96fSJaegeuk Kim 		return PTR_ERR(page);
110d624c96fSJaegeuk Kim 	lock_page(page);
111d624c96fSJaegeuk Kim 
112d624c96fSJaegeuk Kim 	while (1) {
113d624c96fSJaegeuk Kim 		struct fsync_inode_entry *entry;
114d624c96fSJaegeuk Kim 
115393ff91fSJaegeuk Kim 		err = f2fs_readpage(sbi, page, blkaddr, READ_SYNC);
116393ff91fSJaegeuk Kim 		if (err)
117d624c96fSJaegeuk Kim 			goto out;
118d624c96fSJaegeuk Kim 
119393ff91fSJaegeuk Kim 		lock_page(page);
120393ff91fSJaegeuk Kim 
121393ff91fSJaegeuk Kim 		if (cp_ver != cpver_of_node(page)) {
122393ff91fSJaegeuk Kim 			err = -EINVAL;
123393ff91fSJaegeuk Kim 			goto unlock_out;
124393ff91fSJaegeuk Kim 		}
125d624c96fSJaegeuk Kim 
126d624c96fSJaegeuk Kim 		if (!is_fsync_dnode(page))
127d624c96fSJaegeuk Kim 			goto next;
128d624c96fSJaegeuk Kim 
129d624c96fSJaegeuk Kim 		entry = get_fsync_inode(head, ino_of_node(page));
130d624c96fSJaegeuk Kim 		if (entry) {
131d624c96fSJaegeuk Kim 			entry->blkaddr = blkaddr;
132d624c96fSJaegeuk Kim 			if (IS_INODE(page) && is_dent_dnode(page))
133d624c96fSJaegeuk Kim 				set_inode_flag(F2FS_I(entry->inode),
134d624c96fSJaegeuk Kim 							FI_INC_LINK);
135d624c96fSJaegeuk Kim 		} else {
136d624c96fSJaegeuk Kim 			if (IS_INODE(page) && is_dent_dnode(page)) {
137d624c96fSJaegeuk Kim 				if (recover_inode_page(sbi, page)) {
138d624c96fSJaegeuk Kim 					err = -ENOMEM;
139393ff91fSJaegeuk Kim 					goto unlock_out;
140d624c96fSJaegeuk Kim 				}
141d624c96fSJaegeuk Kim 			}
142d624c96fSJaegeuk Kim 
143d624c96fSJaegeuk Kim 			/* add this fsync inode to the list */
144d624c96fSJaegeuk Kim 			entry = kmem_cache_alloc(fsync_entry_slab, GFP_NOFS);
145d624c96fSJaegeuk Kim 			if (!entry) {
146d624c96fSJaegeuk Kim 				err = -ENOMEM;
147393ff91fSJaegeuk Kim 				goto unlock_out;
148d624c96fSJaegeuk Kim 			}
149d624c96fSJaegeuk Kim 
150d624c96fSJaegeuk Kim 			entry->inode = f2fs_iget(sbi->sb, ino_of_node(page));
151d624c96fSJaegeuk Kim 			if (IS_ERR(entry->inode)) {
152d624c96fSJaegeuk Kim 				err = PTR_ERR(entry->inode);
153fd8bb65fSNamjae Jeon 				kmem_cache_free(fsync_entry_slab, entry);
154393ff91fSJaegeuk Kim 				goto unlock_out;
155d624c96fSJaegeuk Kim 			}
156fd8bb65fSNamjae Jeon 
157fd8bb65fSNamjae Jeon 			list_add_tail(&entry->list, head);
158d624c96fSJaegeuk Kim 			entry->blkaddr = blkaddr;
159d624c96fSJaegeuk Kim 		}
160d624c96fSJaegeuk Kim 		if (IS_INODE(page)) {
161d624c96fSJaegeuk Kim 			err = recover_inode(entry->inode, page);
162d624c96fSJaegeuk Kim 			if (err)
163393ff91fSJaegeuk Kim 				goto unlock_out;
164d624c96fSJaegeuk Kim 		}
165d624c96fSJaegeuk Kim next:
166d624c96fSJaegeuk Kim 		/* check next segment */
167d624c96fSJaegeuk Kim 		blkaddr = next_blkaddr_of_node(page);
168d624c96fSJaegeuk Kim 	}
169393ff91fSJaegeuk Kim unlock_out:
170d624c96fSJaegeuk Kim 	unlock_page(page);
171393ff91fSJaegeuk Kim out:
172d624c96fSJaegeuk Kim 	__free_pages(page, 0);
173d624c96fSJaegeuk Kim 	return err;
174d624c96fSJaegeuk Kim }
175d624c96fSJaegeuk Kim 
176d624c96fSJaegeuk Kim static void destroy_fsync_dnodes(struct f2fs_sb_info *sbi,
177d624c96fSJaegeuk Kim 					struct list_head *head)
178d624c96fSJaegeuk Kim {
179d8b79b2fSDan Carpenter 	struct fsync_inode_entry *entry, *tmp;
180d8b79b2fSDan Carpenter 
181d8b79b2fSDan Carpenter 	list_for_each_entry_safe(entry, tmp, head, list) {
182d624c96fSJaegeuk Kim 		iput(entry->inode);
183d624c96fSJaegeuk Kim 		list_del(&entry->list);
184d624c96fSJaegeuk Kim 		kmem_cache_free(fsync_entry_slab, entry);
185d624c96fSJaegeuk Kim 	}
186d624c96fSJaegeuk Kim }
187d624c96fSJaegeuk Kim 
188d624c96fSJaegeuk Kim static void check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
189d624c96fSJaegeuk Kim 						block_t blkaddr)
190d624c96fSJaegeuk Kim {
191d624c96fSJaegeuk Kim 	struct seg_entry *sentry;
192d624c96fSJaegeuk Kim 	unsigned int segno = GET_SEGNO(sbi, blkaddr);
193d624c96fSJaegeuk Kim 	unsigned short blkoff = GET_SEGOFF_FROM_SEG0(sbi, blkaddr) &
194d624c96fSJaegeuk Kim 					(sbi->blocks_per_seg - 1);
195d624c96fSJaegeuk Kim 	struct f2fs_summary sum;
196d624c96fSJaegeuk Kim 	nid_t ino;
197d624c96fSJaegeuk Kim 	void *kaddr;
198d624c96fSJaegeuk Kim 	struct inode *inode;
199d624c96fSJaegeuk Kim 	struct page *node_page;
200d624c96fSJaegeuk Kim 	block_t bidx;
201d624c96fSJaegeuk Kim 	int i;
202d624c96fSJaegeuk Kim 
203d624c96fSJaegeuk Kim 	sentry = get_seg_entry(sbi, segno);
204d624c96fSJaegeuk Kim 	if (!f2fs_test_bit(blkoff, sentry->cur_valid_map))
205d624c96fSJaegeuk Kim 		return;
206d624c96fSJaegeuk Kim 
207d624c96fSJaegeuk Kim 	/* Get the previous summary */
208d624c96fSJaegeuk Kim 	for (i = CURSEG_WARM_DATA; i <= CURSEG_COLD_DATA; i++) {
209d624c96fSJaegeuk Kim 		struct curseg_info *curseg = CURSEG_I(sbi, i);
210d624c96fSJaegeuk Kim 		if (curseg->segno == segno) {
211d624c96fSJaegeuk Kim 			sum = curseg->sum_blk->entries[blkoff];
212d624c96fSJaegeuk Kim 			break;
213d624c96fSJaegeuk Kim 		}
214d624c96fSJaegeuk Kim 	}
215d624c96fSJaegeuk Kim 	if (i > CURSEG_COLD_DATA) {
216d624c96fSJaegeuk Kim 		struct page *sum_page = get_sum_page(sbi, segno);
217d624c96fSJaegeuk Kim 		struct f2fs_summary_block *sum_node;
218d624c96fSJaegeuk Kim 		kaddr = page_address(sum_page);
219d624c96fSJaegeuk Kim 		sum_node = (struct f2fs_summary_block *)kaddr;
220d624c96fSJaegeuk Kim 		sum = sum_node->entries[blkoff];
221d624c96fSJaegeuk Kim 		f2fs_put_page(sum_page, 1);
222d624c96fSJaegeuk Kim 	}
223d624c96fSJaegeuk Kim 
224d624c96fSJaegeuk Kim 	/* Get the node page */
225d624c96fSJaegeuk Kim 	node_page = get_node_page(sbi, le32_to_cpu(sum.nid));
226d624c96fSJaegeuk Kim 	bidx = start_bidx_of_node(ofs_of_node(node_page)) +
227d624c96fSJaegeuk Kim 				le16_to_cpu(sum.ofs_in_node);
228d624c96fSJaegeuk Kim 	ino = ino_of_node(node_page);
229d624c96fSJaegeuk Kim 	f2fs_put_page(node_page, 1);
230d624c96fSJaegeuk Kim 
231d624c96fSJaegeuk Kim 	/* Deallocate previous index in the node page */
232d4686d56SJaegeuk Kim 	inode = f2fs_iget(sbi->sb, ino);
23306025f4dSNamjae Jeon 	if (IS_ERR(inode))
23406025f4dSNamjae Jeon 		return;
23506025f4dSNamjae Jeon 
236d624c96fSJaegeuk Kim 	truncate_hole(inode, bidx, bidx + 1);
237d624c96fSJaegeuk Kim 	iput(inode);
238d624c96fSJaegeuk Kim }
239d624c96fSJaegeuk Kim 
240d624c96fSJaegeuk Kim static void do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
241d624c96fSJaegeuk Kim 					struct page *page, block_t blkaddr)
242d624c96fSJaegeuk Kim {
243d624c96fSJaegeuk Kim 	unsigned int start, end;
244d624c96fSJaegeuk Kim 	struct dnode_of_data dn;
245d624c96fSJaegeuk Kim 	struct f2fs_summary sum;
246d624c96fSJaegeuk Kim 	struct node_info ni;
247d624c96fSJaegeuk Kim 
248d624c96fSJaegeuk Kim 	start = start_bidx_of_node(ofs_of_node(page));
249d624c96fSJaegeuk Kim 	if (IS_INODE(page))
250d624c96fSJaegeuk Kim 		end = start + ADDRS_PER_INODE;
251d624c96fSJaegeuk Kim 	else
252d624c96fSJaegeuk Kim 		end = start + ADDRS_PER_BLOCK;
253d624c96fSJaegeuk Kim 
254d624c96fSJaegeuk Kim 	set_new_dnode(&dn, inode, NULL, NULL, 0);
255266e97a8SJaegeuk Kim 	if (get_dnode_of_data(&dn, start, ALLOC_NODE))
256d624c96fSJaegeuk Kim 		return;
257d624c96fSJaegeuk Kim 
258d624c96fSJaegeuk Kim 	wait_on_page_writeback(dn.node_page);
259d624c96fSJaegeuk Kim 
260d624c96fSJaegeuk Kim 	get_node_info(sbi, dn.nid, &ni);
261d624c96fSJaegeuk Kim 	BUG_ON(ni.ino != ino_of_node(page));
262d624c96fSJaegeuk Kim 	BUG_ON(ofs_of_node(dn.node_page) != ofs_of_node(page));
263d624c96fSJaegeuk Kim 
264d624c96fSJaegeuk Kim 	for (; start < end; start++) {
265d624c96fSJaegeuk Kim 		block_t src, dest;
266d624c96fSJaegeuk Kim 
267d624c96fSJaegeuk Kim 		src = datablock_addr(dn.node_page, dn.ofs_in_node);
268d624c96fSJaegeuk Kim 		dest = datablock_addr(page, dn.ofs_in_node);
269d624c96fSJaegeuk Kim 
270d624c96fSJaegeuk Kim 		if (src != dest && dest != NEW_ADDR && dest != NULL_ADDR) {
271d624c96fSJaegeuk Kim 			if (src == NULL_ADDR) {
272d624c96fSJaegeuk Kim 				int err = reserve_new_block(&dn);
273d624c96fSJaegeuk Kim 				/* We should not get -ENOSPC */
274d624c96fSJaegeuk Kim 				BUG_ON(err);
275d624c96fSJaegeuk Kim 			}
276d624c96fSJaegeuk Kim 
277d624c96fSJaegeuk Kim 			/* Check the previous node page having this index */
278d624c96fSJaegeuk Kim 			check_index_in_prev_nodes(sbi, dest);
279d624c96fSJaegeuk Kim 
280d624c96fSJaegeuk Kim 			set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
281d624c96fSJaegeuk Kim 
282d624c96fSJaegeuk Kim 			/* write dummy data page */
283d624c96fSJaegeuk Kim 			recover_data_page(sbi, NULL, &sum, src, dest);
284d624c96fSJaegeuk Kim 			update_extent_cache(dest, &dn);
285d624c96fSJaegeuk Kim 		}
286d624c96fSJaegeuk Kim 		dn.ofs_in_node++;
287d624c96fSJaegeuk Kim 	}
288d624c96fSJaegeuk Kim 
289d624c96fSJaegeuk Kim 	/* write node page in place */
290d624c96fSJaegeuk Kim 	set_summary(&sum, dn.nid, 0, 0);
291d624c96fSJaegeuk Kim 	if (IS_INODE(dn.node_page))
292d624c96fSJaegeuk Kim 		sync_inode_page(&dn);
293d624c96fSJaegeuk Kim 
294d624c96fSJaegeuk Kim 	copy_node_footer(dn.node_page, page);
295d624c96fSJaegeuk Kim 	fill_node_footer(dn.node_page, dn.nid, ni.ino,
296d624c96fSJaegeuk Kim 					ofs_of_node(page), false);
297d624c96fSJaegeuk Kim 	set_page_dirty(dn.node_page);
298d624c96fSJaegeuk Kim 
299d624c96fSJaegeuk Kim 	recover_node_page(sbi, dn.node_page, &sum, &ni, blkaddr);
300d624c96fSJaegeuk Kim 	f2fs_put_dnode(&dn);
301d624c96fSJaegeuk Kim }
302d624c96fSJaegeuk Kim 
303d624c96fSJaegeuk Kim static void recover_data(struct f2fs_sb_info *sbi,
304d624c96fSJaegeuk Kim 				struct list_head *head, int type)
305d624c96fSJaegeuk Kim {
306d624c96fSJaegeuk Kim 	unsigned long long cp_ver = le64_to_cpu(sbi->ckpt->checkpoint_ver);
307d624c96fSJaegeuk Kim 	struct curseg_info *curseg;
308d624c96fSJaegeuk Kim 	struct page *page;
309d624c96fSJaegeuk Kim 	block_t blkaddr;
310d624c96fSJaegeuk Kim 
311d624c96fSJaegeuk Kim 	/* get node pages in the current segment */
312d624c96fSJaegeuk Kim 	curseg = CURSEG_I(sbi, type);
313d624c96fSJaegeuk Kim 	blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
314d624c96fSJaegeuk Kim 
315d624c96fSJaegeuk Kim 	/* read node page */
316d624c96fSJaegeuk Kim 	page = alloc_page(GFP_NOFS | __GFP_ZERO);
317d624c96fSJaegeuk Kim 	if (IS_ERR(page))
318d624c96fSJaegeuk Kim 		return;
319d624c96fSJaegeuk Kim 	lock_page(page);
320d624c96fSJaegeuk Kim 
321d624c96fSJaegeuk Kim 	while (1) {
322d624c96fSJaegeuk Kim 		struct fsync_inode_entry *entry;
323d624c96fSJaegeuk Kim 
324d624c96fSJaegeuk Kim 		if (f2fs_readpage(sbi, page, blkaddr, READ_SYNC))
325d624c96fSJaegeuk Kim 			goto out;
326d624c96fSJaegeuk Kim 
327393ff91fSJaegeuk Kim 		lock_page(page);
328393ff91fSJaegeuk Kim 
329d624c96fSJaegeuk Kim 		if (cp_ver != cpver_of_node(page))
330393ff91fSJaegeuk Kim 			goto unlock_out;
331d624c96fSJaegeuk Kim 
332d624c96fSJaegeuk Kim 		entry = get_fsync_inode(head, ino_of_node(page));
333d624c96fSJaegeuk Kim 		if (!entry)
334d624c96fSJaegeuk Kim 			goto next;
335d624c96fSJaegeuk Kim 
336d624c96fSJaegeuk Kim 		do_recover_data(sbi, entry->inode, page, blkaddr);
337d624c96fSJaegeuk Kim 
338d624c96fSJaegeuk Kim 		if (entry->blkaddr == blkaddr) {
339d624c96fSJaegeuk Kim 			iput(entry->inode);
340d624c96fSJaegeuk Kim 			list_del(&entry->list);
341d624c96fSJaegeuk Kim 			kmem_cache_free(fsync_entry_slab, entry);
342d624c96fSJaegeuk Kim 		}
343d624c96fSJaegeuk Kim next:
344d624c96fSJaegeuk Kim 		/* check next segment */
345d624c96fSJaegeuk Kim 		blkaddr = next_blkaddr_of_node(page);
346d624c96fSJaegeuk Kim 	}
347393ff91fSJaegeuk Kim unlock_out:
348d624c96fSJaegeuk Kim 	unlock_page(page);
349393ff91fSJaegeuk Kim out:
350d624c96fSJaegeuk Kim 	__free_pages(page, 0);
351d624c96fSJaegeuk Kim 
352d624c96fSJaegeuk Kim 	allocate_new_segments(sbi);
353d624c96fSJaegeuk Kim }
354d624c96fSJaegeuk Kim 
355d624c96fSJaegeuk Kim void recover_fsync_data(struct f2fs_sb_info *sbi)
356d624c96fSJaegeuk Kim {
357d624c96fSJaegeuk Kim 	struct list_head inode_list;
358d624c96fSJaegeuk Kim 
359d624c96fSJaegeuk Kim 	fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
360d624c96fSJaegeuk Kim 			sizeof(struct fsync_inode_entry), NULL);
361d624c96fSJaegeuk Kim 	if (unlikely(!fsync_entry_slab))
362d624c96fSJaegeuk Kim 		return;
363d624c96fSJaegeuk Kim 
364d624c96fSJaegeuk Kim 	INIT_LIST_HEAD(&inode_list);
365d624c96fSJaegeuk Kim 
366d624c96fSJaegeuk Kim 	/* step #1: find fsynced inode numbers */
367d624c96fSJaegeuk Kim 	if (find_fsync_dnodes(sbi, &inode_list))
368d624c96fSJaegeuk Kim 		goto out;
369d624c96fSJaegeuk Kim 
370d624c96fSJaegeuk Kim 	if (list_empty(&inode_list))
371d624c96fSJaegeuk Kim 		goto out;
372d624c96fSJaegeuk Kim 
373d624c96fSJaegeuk Kim 	/* step #2: recover data */
374d624c96fSJaegeuk Kim 	sbi->por_doing = 1;
375d624c96fSJaegeuk Kim 	recover_data(sbi, &inode_list, CURSEG_WARM_NODE);
376d624c96fSJaegeuk Kim 	sbi->por_doing = 0;
377d624c96fSJaegeuk Kim 	BUG_ON(!list_empty(&inode_list));
378d624c96fSJaegeuk Kim out:
379d624c96fSJaegeuk Kim 	destroy_fsync_dnodes(sbi, &inode_list);
380d624c96fSJaegeuk Kim 	kmem_cache_destroy(fsync_entry_slab);
38143727527SJaegeuk Kim 	write_checkpoint(sbi, false);
382d624c96fSJaegeuk Kim }
383