xref: /openbmc/linux/fs/f2fs/recovery.c (revision 635aee1f)
10a8165d7SJaegeuk Kim /*
2d624c96fSJaegeuk Kim  * fs/f2fs/recovery.c
3d624c96fSJaegeuk Kim  *
4d624c96fSJaegeuk Kim  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5d624c96fSJaegeuk Kim  *             http://www.samsung.com/
6d624c96fSJaegeuk Kim  *
7d624c96fSJaegeuk Kim  * This program is free software; you can redistribute it and/or modify
8d624c96fSJaegeuk Kim  * it under the terms of the GNU General Public License version 2 as
9d624c96fSJaegeuk Kim  * published by the Free Software Foundation.
10d624c96fSJaegeuk Kim  */
11d624c96fSJaegeuk Kim #include <linux/fs.h>
12d624c96fSJaegeuk Kim #include <linux/f2fs_fs.h>
13d624c96fSJaegeuk Kim #include "f2fs.h"
14d624c96fSJaegeuk Kim #include "node.h"
15d624c96fSJaegeuk Kim #include "segment.h"
16d624c96fSJaegeuk Kim 
17441ac5cbSJaegeuk Kim /*
18441ac5cbSJaegeuk Kim  * Roll forward recovery scenarios.
19441ac5cbSJaegeuk Kim  *
20441ac5cbSJaegeuk Kim  * [Term] F: fsync_mark, D: dentry_mark
21441ac5cbSJaegeuk Kim  *
22441ac5cbSJaegeuk Kim  * 1. inode(x) | CP | inode(x) | dnode(F)
23441ac5cbSJaegeuk Kim  * -> Update the latest inode(x).
24441ac5cbSJaegeuk Kim  *
25441ac5cbSJaegeuk Kim  * 2. inode(x) | CP | inode(F) | dnode(F)
26441ac5cbSJaegeuk Kim  * -> No problem.
27441ac5cbSJaegeuk Kim  *
28441ac5cbSJaegeuk Kim  * 3. inode(x) | CP | dnode(F) | inode(x)
29441ac5cbSJaegeuk Kim  * -> Recover to the latest dnode(F), and drop the last inode(x)
30441ac5cbSJaegeuk Kim  *
31441ac5cbSJaegeuk Kim  * 4. inode(x) | CP | dnode(F) | inode(F)
32441ac5cbSJaegeuk Kim  * -> No problem.
33441ac5cbSJaegeuk Kim  *
34441ac5cbSJaegeuk Kim  * 5. CP | inode(x) | dnode(F)
35441ac5cbSJaegeuk Kim  * -> The inode(DF) was missing. Should drop this dnode(F).
36441ac5cbSJaegeuk Kim  *
37441ac5cbSJaegeuk Kim  * 6. CP | inode(DF) | dnode(F)
38441ac5cbSJaegeuk Kim  * -> No problem.
39441ac5cbSJaegeuk Kim  *
40441ac5cbSJaegeuk Kim  * 7. CP | dnode(F) | inode(DF)
41441ac5cbSJaegeuk Kim  * -> If f2fs_iget fails, then goto next to find inode(DF).
42441ac5cbSJaegeuk Kim  *
43441ac5cbSJaegeuk Kim  * 8. CP | dnode(F) | inode(x)
44441ac5cbSJaegeuk Kim  * -> If f2fs_iget fails, then goto next to find inode(DF).
45441ac5cbSJaegeuk Kim  *    But it will fail due to no inode(DF).
46441ac5cbSJaegeuk Kim  */
47441ac5cbSJaegeuk Kim 
48d624c96fSJaegeuk Kim static struct kmem_cache *fsync_entry_slab;
49d624c96fSJaegeuk Kim 
50d624c96fSJaegeuk Kim bool space_for_roll_forward(struct f2fs_sb_info *sbi)
51d624c96fSJaegeuk Kim {
52d624c96fSJaegeuk Kim 	if (sbi->last_valid_block_count + sbi->alloc_valid_block_count
53d624c96fSJaegeuk Kim 			> sbi->user_block_count)
54d624c96fSJaegeuk Kim 		return false;
55d624c96fSJaegeuk Kim 	return true;
56d624c96fSJaegeuk Kim }
57d624c96fSJaegeuk Kim 
58d624c96fSJaegeuk Kim static struct fsync_inode_entry *get_fsync_inode(struct list_head *head,
59d624c96fSJaegeuk Kim 								nid_t ino)
60d624c96fSJaegeuk Kim {
61d624c96fSJaegeuk Kim 	struct fsync_inode_entry *entry;
62d624c96fSJaegeuk Kim 
632d7b822aSChao Yu 	list_for_each_entry(entry, head, list)
64d624c96fSJaegeuk Kim 		if (entry->inode->i_ino == ino)
65d624c96fSJaegeuk Kim 			return entry;
662d7b822aSChao Yu 
67d624c96fSJaegeuk Kim 	return NULL;
68d624c96fSJaegeuk Kim }
69d624c96fSJaegeuk Kim 
70c52e1b10SJaegeuk Kim static int recover_dentry(struct inode *inode, struct page *ipage)
71d624c96fSJaegeuk Kim {
7258bfaf44SJaegeuk Kim 	struct f2fs_inode *raw_inode = F2FS_INODE(ipage);
7374d0b917SJaegeuk Kim 	nid_t pino = le32_to_cpu(raw_inode->i_pino);
746b8213d9SJaegeuk Kim 	struct f2fs_dir_entry *de;
75b7f7a5e0SAl Viro 	struct qstr name;
76d624c96fSJaegeuk Kim 	struct page *page;
776b8213d9SJaegeuk Kim 	struct inode *dir, *einode;
78d624c96fSJaegeuk Kim 	int err = 0;
79d624c96fSJaegeuk Kim 
8074d0b917SJaegeuk Kim 	dir = f2fs_iget(inode->i_sb, pino);
81d624c96fSJaegeuk Kim 	if (IS_ERR(dir)) {
82047184b4SChris Fries 		err = PTR_ERR(dir);
83d624c96fSJaegeuk Kim 		goto out;
84d624c96fSJaegeuk Kim 	}
85ed57c27fSJaegeuk Kim 
86b7f7a5e0SAl Viro 	name.len = le32_to_cpu(raw_inode->i_namelen);
87b7f7a5e0SAl Viro 	name.name = raw_inode->i_name;
88d96b1431SChao Yu 
89d96b1431SChao Yu 	if (unlikely(name.len > F2FS_NAME_LEN)) {
90d96b1431SChao Yu 		WARN_ON(1);
91d96b1431SChao Yu 		err = -ENAMETOOLONG;
9286928f98SJaegeuk Kim 		goto out_err;
93d96b1431SChao Yu 	}
946b8213d9SJaegeuk Kim retry:
956b8213d9SJaegeuk Kim 	de = f2fs_find_entry(dir, &name, &page);
96695facc0SJaegeuk Kim 	if (de && inode->i_ino == le32_to_cpu(de->ino)) {
97695facc0SJaegeuk Kim 		clear_inode_flag(F2FS_I(inode), FI_INC_LINK);
982e5558f4SRuss W. Knize 		goto out_unmap_put;
99695facc0SJaegeuk Kim 	}
1006b8213d9SJaegeuk Kim 	if (de) {
1016b8213d9SJaegeuk Kim 		einode = f2fs_iget(inode->i_sb, le32_to_cpu(de->ino));
1026b8213d9SJaegeuk Kim 		if (IS_ERR(einode)) {
1036b8213d9SJaegeuk Kim 			WARN_ON(1);
1045c1f9927SChao Yu 			err = PTR_ERR(einode);
1055c1f9927SChao Yu 			if (err == -ENOENT)
1066b8213d9SJaegeuk Kim 				err = -EEXIST;
1072e5558f4SRuss W. Knize 			goto out_unmap_put;
1082e5558f4SRuss W. Knize 		}
1094081363fSJaegeuk Kim 		err = acquire_orphan_inode(F2FS_I_SB(inode));
1102e5558f4SRuss W. Knize 		if (err) {
1112e5558f4SRuss W. Knize 			iput(einode);
1122e5558f4SRuss W. Knize 			goto out_unmap_put;
1136b8213d9SJaegeuk Kim 		}
114dbeacf02SChao Yu 		f2fs_delete_entry(de, page, dir, einode);
1156b8213d9SJaegeuk Kim 		iput(einode);
1166b8213d9SJaegeuk Kim 		goto retry;
1176b8213d9SJaegeuk Kim 	}
1186b8213d9SJaegeuk Kim 	err = __f2fs_add_link(dir, &name, inode);
11986928f98SJaegeuk Kim 	if (err)
12086928f98SJaegeuk Kim 		goto out_err;
12186928f98SJaegeuk Kim 
12286928f98SJaegeuk Kim 	if (is_inode_flag_set(F2FS_I(dir), FI_DELAY_IPUT)) {
12386928f98SJaegeuk Kim 		iput(dir);
12486928f98SJaegeuk Kim 	} else {
12586928f98SJaegeuk Kim 		add_dirty_dir_inode(dir);
12686928f98SJaegeuk Kim 		set_inode_flag(F2FS_I(dir), FI_DELAY_IPUT);
12786928f98SJaegeuk Kim 	}
12886928f98SJaegeuk Kim 
1292e5558f4SRuss W. Knize 	goto out;
1302e5558f4SRuss W. Knize 
1312e5558f4SRuss W. Knize out_unmap_put:
1329486ba44SJaegeuk Kim 	f2fs_dentry_kunmap(dir, page);
1332e5558f4SRuss W. Knize 	f2fs_put_page(page, 0);
13486928f98SJaegeuk Kim out_err:
13586928f98SJaegeuk Kim 	iput(dir);
136d624c96fSJaegeuk Kim out:
1376c311ec6SChris Fries 	f2fs_msg(inode->i_sb, KERN_NOTICE,
1386c311ec6SChris Fries 			"%s: ino = %x, name = %s, dir = %lx, err = %d",
1396c311ec6SChris Fries 			__func__, ino_of_node(ipage), raw_inode->i_name,
140f28c06faSDan Carpenter 			IS_ERR(dir) ? 0 : dir->i_ino, err);
141d624c96fSJaegeuk Kim 	return err;
142d624c96fSJaegeuk Kim }
143d624c96fSJaegeuk Kim 
144c52e1b10SJaegeuk Kim static void recover_inode(struct inode *inode, struct page *page)
145441ac5cbSJaegeuk Kim {
146441ac5cbSJaegeuk Kim 	struct f2fs_inode *raw = F2FS_INODE(page);
147441ac5cbSJaegeuk Kim 
148441ac5cbSJaegeuk Kim 	inode->i_mode = le16_to_cpu(raw->i_mode);
149441ac5cbSJaegeuk Kim 	i_size_write(inode, le64_to_cpu(raw->i_size));
150441ac5cbSJaegeuk Kim 	inode->i_atime.tv_sec = le64_to_cpu(raw->i_mtime);
151441ac5cbSJaegeuk Kim 	inode->i_ctime.tv_sec = le64_to_cpu(raw->i_ctime);
152441ac5cbSJaegeuk Kim 	inode->i_mtime.tv_sec = le64_to_cpu(raw->i_mtime);
153441ac5cbSJaegeuk Kim 	inode->i_atime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec);
154441ac5cbSJaegeuk Kim 	inode->i_ctime.tv_nsec = le32_to_cpu(raw->i_ctime_nsec);
155441ac5cbSJaegeuk Kim 	inode->i_mtime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec);
156f356fe0cSJaegeuk Kim 
157f356fe0cSJaegeuk Kim 	f2fs_msg(inode->i_sb, KERN_NOTICE, "recover_inode: ino = %x, name = %s",
158c52e1b10SJaegeuk Kim 			ino_of_node(page), F2FS_INODE(page)->i_name);
159d624c96fSJaegeuk Kim }
160d624c96fSJaegeuk Kim 
161d624c96fSJaegeuk Kim static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
162d624c96fSJaegeuk Kim {
163d71b5564SJaegeuk Kim 	unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi));
164d624c96fSJaegeuk Kim 	struct curseg_info *curseg;
1654c521f49SJaegeuk Kim 	struct page *page = NULL;
166d624c96fSJaegeuk Kim 	block_t blkaddr;
167d624c96fSJaegeuk Kim 	int err = 0;
168d624c96fSJaegeuk Kim 
169d624c96fSJaegeuk Kim 	/* get node pages in the current segment */
170d624c96fSJaegeuk Kim 	curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
171695fd1edSChao Yu 	blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
172d624c96fSJaegeuk Kim 
173635aee1fSChao Yu 	ra_meta_pages(sbi, blkaddr, 1, META_POR);
174635aee1fSChao Yu 
175d624c96fSJaegeuk Kim 	while (1) {
176d624c96fSJaegeuk Kim 		struct fsync_inode_entry *entry;
177d624c96fSJaegeuk Kim 
1787cd8558bSJaegeuk Kim 		if (blkaddr < MAIN_BLKADDR(sbi) || blkaddr >= MAX_BLKADDR(sbi))
1794c521f49SJaegeuk Kim 			return 0;
180d624c96fSJaegeuk Kim 
181635aee1fSChao Yu 		page = get_meta_page(sbi, blkaddr);
182393ff91fSJaegeuk Kim 
1836ead1142SJaegeuk Kim 		if (cp_ver != cpver_of_node(page))
184f356fe0cSJaegeuk Kim 			break;
185d624c96fSJaegeuk Kim 
186d624c96fSJaegeuk Kim 		if (!is_fsync_dnode(page))
187d624c96fSJaegeuk Kim 			goto next;
188d624c96fSJaegeuk Kim 
189d624c96fSJaegeuk Kim 		entry = get_fsync_inode(head, ino_of_node(page));
190d624c96fSJaegeuk Kim 		if (entry) {
191d624c96fSJaegeuk Kim 			if (IS_INODE(page) && is_dent_dnode(page))
192d624c96fSJaegeuk Kim 				set_inode_flag(F2FS_I(entry->inode),
193d624c96fSJaegeuk Kim 							FI_INC_LINK);
194d624c96fSJaegeuk Kim 		} else {
195d624c96fSJaegeuk Kim 			if (IS_INODE(page) && is_dent_dnode(page)) {
1966ead1142SJaegeuk Kim 				err = recover_inode_page(sbi, page);
1976ead1142SJaegeuk Kim 				if (err)
198f356fe0cSJaegeuk Kim 					break;
199d624c96fSJaegeuk Kim 			}
200d624c96fSJaegeuk Kim 
201d624c96fSJaegeuk Kim 			/* add this fsync inode to the list */
202c52e1b10SJaegeuk Kim 			entry = kmem_cache_alloc(fsync_entry_slab, GFP_F2FS_ZERO);
203d624c96fSJaegeuk Kim 			if (!entry) {
204d624c96fSJaegeuk Kim 				err = -ENOMEM;
205f356fe0cSJaegeuk Kim 				break;
206d624c96fSJaegeuk Kim 			}
207441ac5cbSJaegeuk Kim 			/*
208441ac5cbSJaegeuk Kim 			 * CP | dnode(F) | inode(DF)
209441ac5cbSJaegeuk Kim 			 * For this case, we should not give up now.
210441ac5cbSJaegeuk Kim 			 */
211d624c96fSJaegeuk Kim 			entry->inode = f2fs_iget(sbi->sb, ino_of_node(page));
212d624c96fSJaegeuk Kim 			if (IS_ERR(entry->inode)) {
213d624c96fSJaegeuk Kim 				err = PTR_ERR(entry->inode);
214fd8bb65fSNamjae Jeon 				kmem_cache_free(fsync_entry_slab, entry);
215441ac5cbSJaegeuk Kim 				if (err == -ENOENT)
216441ac5cbSJaegeuk Kim 					goto next;
217f356fe0cSJaegeuk Kim 				break;
218d624c96fSJaegeuk Kim 			}
219fd8bb65fSNamjae Jeon 			list_add_tail(&entry->list, head);
220d624c96fSJaegeuk Kim 		}
221addbe45bSJaegeuk Kim 		entry->blkaddr = blkaddr;
222addbe45bSJaegeuk Kim 
223c52e1b10SJaegeuk Kim 		if (IS_INODE(page)) {
224c52e1b10SJaegeuk Kim 			entry->last_inode = blkaddr;
225c52e1b10SJaegeuk Kim 			if (is_dent_dnode(page))
226c52e1b10SJaegeuk Kim 				entry->last_dentry = blkaddr;
227c52e1b10SJaegeuk Kim 		}
228d624c96fSJaegeuk Kim next:
229d624c96fSJaegeuk Kim 		/* check next segment */
230d624c96fSJaegeuk Kim 		blkaddr = next_blkaddr_of_node(page);
2314c521f49SJaegeuk Kim 		f2fs_put_page(page, 1);
232635aee1fSChao Yu 
233635aee1fSChao Yu 		ra_meta_pages_cond(sbi, blkaddr);
234d624c96fSJaegeuk Kim 	}
2354c521f49SJaegeuk Kim 	f2fs_put_page(page, 1);
236d624c96fSJaegeuk Kim 	return err;
237d624c96fSJaegeuk Kim }
238d624c96fSJaegeuk Kim 
2395ebefc5bSGu Zheng static void destroy_fsync_dnodes(struct list_head *head)
240d624c96fSJaegeuk Kim {
241d8b79b2fSDan Carpenter 	struct fsync_inode_entry *entry, *tmp;
242d8b79b2fSDan Carpenter 
243d8b79b2fSDan Carpenter 	list_for_each_entry_safe(entry, tmp, head, list) {
244d624c96fSJaegeuk Kim 		iput(entry->inode);
245d624c96fSJaegeuk Kim 		list_del(&entry->list);
246d624c96fSJaegeuk Kim 		kmem_cache_free(fsync_entry_slab, entry);
247d624c96fSJaegeuk Kim 	}
248d624c96fSJaegeuk Kim }
249d624c96fSJaegeuk Kim 
25039cf72cfSJaegeuk Kim static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
251b292dcabSJaegeuk Kim 			block_t blkaddr, struct dnode_of_data *dn)
252d624c96fSJaegeuk Kim {
253d624c96fSJaegeuk Kim 	struct seg_entry *sentry;
254d624c96fSJaegeuk Kim 	unsigned int segno = GET_SEGNO(sbi, blkaddr);
255491c0854SJaegeuk Kim 	unsigned short blkoff = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
256f6517cfcSJaegeuk Kim 	struct f2fs_summary_block *sum_node;
257d624c96fSJaegeuk Kim 	struct f2fs_summary sum;
258f6517cfcSJaegeuk Kim 	struct page *sum_page, *node_page;
259b292dcabSJaegeuk Kim 	nid_t ino, nid;
260d624c96fSJaegeuk Kim 	struct inode *inode;
261de93653fSJaegeuk Kim 	unsigned int offset;
262d624c96fSJaegeuk Kim 	block_t bidx;
263d624c96fSJaegeuk Kim 	int i;
264d624c96fSJaegeuk Kim 
265d624c96fSJaegeuk Kim 	sentry = get_seg_entry(sbi, segno);
266d624c96fSJaegeuk Kim 	if (!f2fs_test_bit(blkoff, sentry->cur_valid_map))
26739cf72cfSJaegeuk Kim 		return 0;
268d624c96fSJaegeuk Kim 
269d624c96fSJaegeuk Kim 	/* Get the previous summary */
270d624c96fSJaegeuk Kim 	for (i = CURSEG_WARM_DATA; i <= CURSEG_COLD_DATA; i++) {
271d624c96fSJaegeuk Kim 		struct curseg_info *curseg = CURSEG_I(sbi, i);
272d624c96fSJaegeuk Kim 		if (curseg->segno == segno) {
273d624c96fSJaegeuk Kim 			sum = curseg->sum_blk->entries[blkoff];
274f6517cfcSJaegeuk Kim 			goto got_it;
275d624c96fSJaegeuk Kim 		}
276d624c96fSJaegeuk Kim 	}
277d624c96fSJaegeuk Kim 
278f6517cfcSJaegeuk Kim 	sum_page = get_sum_page(sbi, segno);
279f6517cfcSJaegeuk Kim 	sum_node = (struct f2fs_summary_block *)page_address(sum_page);
280f6517cfcSJaegeuk Kim 	sum = sum_node->entries[blkoff];
281f6517cfcSJaegeuk Kim 	f2fs_put_page(sum_page, 1);
282f6517cfcSJaegeuk Kim got_it:
283b292dcabSJaegeuk Kim 	/* Use the locked dnode page and inode */
284b292dcabSJaegeuk Kim 	nid = le32_to_cpu(sum.nid);
285b292dcabSJaegeuk Kim 	if (dn->inode->i_ino == nid) {
286b292dcabSJaegeuk Kim 		struct dnode_of_data tdn = *dn;
287b292dcabSJaegeuk Kim 		tdn.nid = nid;
288b292dcabSJaegeuk Kim 		tdn.node_page = dn->inode_page;
289060dd67bSJaegeuk Kim 		tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
290b292dcabSJaegeuk Kim 		truncate_data_blocks_range(&tdn, 1);
29139cf72cfSJaegeuk Kim 		return 0;
292b292dcabSJaegeuk Kim 	} else if (dn->nid == nid) {
293b292dcabSJaegeuk Kim 		struct dnode_of_data tdn = *dn;
294060dd67bSJaegeuk Kim 		tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
295b292dcabSJaegeuk Kim 		truncate_data_blocks_range(&tdn, 1);
29639cf72cfSJaegeuk Kim 		return 0;
297b292dcabSJaegeuk Kim 	}
298b292dcabSJaegeuk Kim 
299d624c96fSJaegeuk Kim 	/* Get the node page */
300b292dcabSJaegeuk Kim 	node_page = get_node_page(sbi, nid);
30139cf72cfSJaegeuk Kim 	if (IS_ERR(node_page))
30239cf72cfSJaegeuk Kim 		return PTR_ERR(node_page);
303de93653fSJaegeuk Kim 
304de93653fSJaegeuk Kim 	offset = ofs_of_node(node_page);
305d624c96fSJaegeuk Kim 	ino = ino_of_node(node_page);
306d624c96fSJaegeuk Kim 	f2fs_put_page(node_page, 1);
307d624c96fSJaegeuk Kim 
30860979115SJaegeuk Kim 	if (ino != dn->inode->i_ino) {
309d624c96fSJaegeuk Kim 		/* Deallocate previous index in the node page */
310d4686d56SJaegeuk Kim 		inode = f2fs_iget(sbi->sb, ino);
31106025f4dSNamjae Jeon 		if (IS_ERR(inode))
31239cf72cfSJaegeuk Kim 			return PTR_ERR(inode);
31360979115SJaegeuk Kim 	} else {
31460979115SJaegeuk Kim 		inode = dn->inode;
31560979115SJaegeuk Kim 	}
31606025f4dSNamjae Jeon 
317de93653fSJaegeuk Kim 	bidx = start_bidx_of_node(offset, F2FS_I(inode)) +
318de93653fSJaegeuk Kim 			le16_to_cpu(sum.ofs_in_node);
319de93653fSJaegeuk Kim 
32060979115SJaegeuk Kim 	if (ino != dn->inode->i_ino) {
321d624c96fSJaegeuk Kim 		truncate_hole(inode, bidx, bidx + 1);
322d624c96fSJaegeuk Kim 		iput(inode);
32360979115SJaegeuk Kim 	} else {
32460979115SJaegeuk Kim 		struct dnode_of_data tdn;
32560979115SJaegeuk Kim 		set_new_dnode(&tdn, inode, dn->inode_page, NULL, 0);
32660979115SJaegeuk Kim 		if (get_dnode_of_data(&tdn, bidx, LOOKUP_NODE))
32760979115SJaegeuk Kim 			return 0;
32860979115SJaegeuk Kim 		if (tdn.data_blkaddr != NULL_ADDR)
32960979115SJaegeuk Kim 			truncate_data_blocks_range(&tdn, 1);
33060979115SJaegeuk Kim 		f2fs_put_page(tdn.node_page, 1);
33160979115SJaegeuk Kim 	}
33239cf72cfSJaegeuk Kim 	return 0;
333d624c96fSJaegeuk Kim }
334d624c96fSJaegeuk Kim 
3356ead1142SJaegeuk Kim static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
336d624c96fSJaegeuk Kim 					struct page *page, block_t blkaddr)
337d624c96fSJaegeuk Kim {
338de93653fSJaegeuk Kim 	struct f2fs_inode_info *fi = F2FS_I(inode);
339d624c96fSJaegeuk Kim 	unsigned int start, end;
340d624c96fSJaegeuk Kim 	struct dnode_of_data dn;
341d624c96fSJaegeuk Kim 	struct f2fs_summary sum;
342d624c96fSJaegeuk Kim 	struct node_info ni;
343f356fe0cSJaegeuk Kim 	int err = 0, recovered = 0;
344d624c96fSJaegeuk Kim 
3451c35a90eSJaegeuk Kim 	/* step 1: recover xattr */
3461c35a90eSJaegeuk Kim 	if (IS_INODE(page)) {
34770cfed88SChao Yu 		recover_inline_xattr(inode, page);
3481c35a90eSJaegeuk Kim 	} else if (f2fs_has_xattr_block(ofs_of_node(page))) {
3491c35a90eSJaegeuk Kim 		recover_xattr_data(inode, page, blkaddr);
3501c35a90eSJaegeuk Kim 		goto out;
3511c35a90eSJaegeuk Kim 	}
35270cfed88SChao Yu 
3531c35a90eSJaegeuk Kim 	/* step 2: recover inline data */
3541e1bb4baSJaegeuk Kim 	if (recover_inline_data(inode, page))
3551e1bb4baSJaegeuk Kim 		goto out;
3561e1bb4baSJaegeuk Kim 
3571c35a90eSJaegeuk Kim 	/* step 3: recover data indices */
358de93653fSJaegeuk Kim 	start = start_bidx_of_node(ofs_of_node(page), fi);
3596403eb1fSChao Yu 	end = start + ADDRS_PER_PAGE(page, fi);
360d624c96fSJaegeuk Kim 
361e479556bSGu Zheng 	f2fs_lock_op(sbi);
3621e1bb4baSJaegeuk Kim 
363d624c96fSJaegeuk Kim 	set_new_dnode(&dn, inode, NULL, NULL, 0);
36439936837SJaegeuk Kim 
3656ead1142SJaegeuk Kim 	err = get_dnode_of_data(&dn, start, ALLOC_NODE);
36639936837SJaegeuk Kim 	if (err) {
367e479556bSGu Zheng 		f2fs_unlock_op(sbi);
3681e1bb4baSJaegeuk Kim 		goto out;
36939936837SJaegeuk Kim 	}
370d624c96fSJaegeuk Kim 
3713cb5ad15SJaegeuk Kim 	f2fs_wait_on_page_writeback(dn.node_page, NODE);
372d624c96fSJaegeuk Kim 
373d624c96fSJaegeuk Kim 	get_node_info(sbi, dn.nid, &ni);
3749850cf4aSJaegeuk Kim 	f2fs_bug_on(sbi, ni.ino != ino_of_node(page));
3759850cf4aSJaegeuk Kim 	f2fs_bug_on(sbi, ofs_of_node(dn.node_page) != ofs_of_node(page));
376d624c96fSJaegeuk Kim 
377d624c96fSJaegeuk Kim 	for (; start < end; start++) {
378d624c96fSJaegeuk Kim 		block_t src, dest;
379d624c96fSJaegeuk Kim 
380d624c96fSJaegeuk Kim 		src = datablock_addr(dn.node_page, dn.ofs_in_node);
381d624c96fSJaegeuk Kim 		dest = datablock_addr(page, dn.ofs_in_node);
382d624c96fSJaegeuk Kim 
383d624c96fSJaegeuk Kim 		if (src != dest && dest != NEW_ADDR && dest != NULL_ADDR) {
384d624c96fSJaegeuk Kim 			if (src == NULL_ADDR) {
3855d56b671SJaegeuk Kim 				err = reserve_new_block(&dn);
386d624c96fSJaegeuk Kim 				/* We should not get -ENOSPC */
3879850cf4aSJaegeuk Kim 				f2fs_bug_on(sbi, err);
388d624c96fSJaegeuk Kim 			}
389d624c96fSJaegeuk Kim 
390d624c96fSJaegeuk Kim 			/* Check the previous node page having this index */
39139cf72cfSJaegeuk Kim 			err = check_index_in_prev_nodes(sbi, dest, &dn);
39239cf72cfSJaegeuk Kim 			if (err)
39339cf72cfSJaegeuk Kim 				goto err;
394d624c96fSJaegeuk Kim 
395d624c96fSJaegeuk Kim 			set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
396d624c96fSJaegeuk Kim 
397d624c96fSJaegeuk Kim 			/* write dummy data page */
398d624c96fSJaegeuk Kim 			recover_data_page(sbi, NULL, &sum, src, dest);
399d624c96fSJaegeuk Kim 			update_extent_cache(dest, &dn);
400f356fe0cSJaegeuk Kim 			recovered++;
401d624c96fSJaegeuk Kim 		}
402d624c96fSJaegeuk Kim 		dn.ofs_in_node++;
403d624c96fSJaegeuk Kim 	}
404d624c96fSJaegeuk Kim 
405d624c96fSJaegeuk Kim 	/* write node page in place */
406d624c96fSJaegeuk Kim 	set_summary(&sum, dn.nid, 0, 0);
407d624c96fSJaegeuk Kim 	if (IS_INODE(dn.node_page))
408d624c96fSJaegeuk Kim 		sync_inode_page(&dn);
409d624c96fSJaegeuk Kim 
410d624c96fSJaegeuk Kim 	copy_node_footer(dn.node_page, page);
411d624c96fSJaegeuk Kim 	fill_node_footer(dn.node_page, dn.nid, ni.ino,
412d624c96fSJaegeuk Kim 					ofs_of_node(page), false);
413d624c96fSJaegeuk Kim 	set_page_dirty(dn.node_page);
41439cf72cfSJaegeuk Kim err:
415d624c96fSJaegeuk Kim 	f2fs_put_dnode(&dn);
416e479556bSGu Zheng 	f2fs_unlock_op(sbi);
4171e1bb4baSJaegeuk Kim out:
4186c311ec6SChris Fries 	f2fs_msg(sbi->sb, KERN_NOTICE,
4196c311ec6SChris Fries 		"recover_data: ino = %lx, recovered = %d blocks, err = %d",
42039cf72cfSJaegeuk Kim 		inode->i_ino, recovered, err);
42139cf72cfSJaegeuk Kim 	return err;
422d624c96fSJaegeuk Kim }
423d624c96fSJaegeuk Kim 
4246ead1142SJaegeuk Kim static int recover_data(struct f2fs_sb_info *sbi,
425d624c96fSJaegeuk Kim 				struct list_head *head, int type)
426d624c96fSJaegeuk Kim {
427d71b5564SJaegeuk Kim 	unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi));
428d624c96fSJaegeuk Kim 	struct curseg_info *curseg;
4294c521f49SJaegeuk Kim 	struct page *page = NULL;
4306ead1142SJaegeuk Kim 	int err = 0;
431d624c96fSJaegeuk Kim 	block_t blkaddr;
432d624c96fSJaegeuk Kim 
433d624c96fSJaegeuk Kim 	/* get node pages in the current segment */
434d624c96fSJaegeuk Kim 	curseg = CURSEG_I(sbi, type);
435d624c96fSJaegeuk Kim 	blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
436d624c96fSJaegeuk Kim 
437d624c96fSJaegeuk Kim 	while (1) {
438d624c96fSJaegeuk Kim 		struct fsync_inode_entry *entry;
439d624c96fSJaegeuk Kim 
4407cd8558bSJaegeuk Kim 		if (blkaddr < MAIN_BLKADDR(sbi) || blkaddr >= MAX_BLKADDR(sbi))
44145856affSJaegeuk Kim 			break;
442d624c96fSJaegeuk Kim 
443635aee1fSChao Yu 		ra_meta_pages_cond(sbi, blkaddr);
444635aee1fSChao Yu 
445635aee1fSChao Yu 		page = get_meta_page(sbi, blkaddr);
4464c521f49SJaegeuk Kim 
4474c521f49SJaegeuk Kim 		if (cp_ver != cpver_of_node(page)) {
4484c521f49SJaegeuk Kim 			f2fs_put_page(page, 1);
4494c521f49SJaegeuk Kim 			break;
4504c521f49SJaegeuk Kim 		}
4514c521f49SJaegeuk Kim 
452d624c96fSJaegeuk Kim 		entry = get_fsync_inode(head, ino_of_node(page));
453d624c96fSJaegeuk Kim 		if (!entry)
454d624c96fSJaegeuk Kim 			goto next;
455441ac5cbSJaegeuk Kim 		/*
456441ac5cbSJaegeuk Kim 		 * inode(x) | CP | inode(x) | dnode(F)
457441ac5cbSJaegeuk Kim 		 * In this case, we can lose the latest inode(x).
458c52e1b10SJaegeuk Kim 		 * So, call recover_inode for the inode update.
459441ac5cbSJaegeuk Kim 		 */
460c52e1b10SJaegeuk Kim 		if (entry->last_inode == blkaddr)
461c52e1b10SJaegeuk Kim 			recover_inode(entry->inode, page);
462c52e1b10SJaegeuk Kim 		if (entry->last_dentry == blkaddr) {
463c52e1b10SJaegeuk Kim 			err = recover_dentry(entry->inode, page);
464c52e1b10SJaegeuk Kim 			if (err) {
465c52e1b10SJaegeuk Kim 				f2fs_put_page(page, 1);
466c52e1b10SJaegeuk Kim 				break;
467c52e1b10SJaegeuk Kim 			}
468c52e1b10SJaegeuk Kim 		}
4696ead1142SJaegeuk Kim 		err = do_recover_data(sbi, entry->inode, page, blkaddr);
4704c521f49SJaegeuk Kim 		if (err) {
4714c521f49SJaegeuk Kim 			f2fs_put_page(page, 1);
47245856affSJaegeuk Kim 			break;
4734c521f49SJaegeuk Kim 		}
474d624c96fSJaegeuk Kim 
475d624c96fSJaegeuk Kim 		if (entry->blkaddr == blkaddr) {
476d624c96fSJaegeuk Kim 			iput(entry->inode);
477d624c96fSJaegeuk Kim 			list_del(&entry->list);
478d624c96fSJaegeuk Kim 			kmem_cache_free(fsync_entry_slab, entry);
479d624c96fSJaegeuk Kim 		}
480d624c96fSJaegeuk Kim next:
481d624c96fSJaegeuk Kim 		/* check next segment */
482d624c96fSJaegeuk Kim 		blkaddr = next_blkaddr_of_node(page);
4834c521f49SJaegeuk Kim 		f2fs_put_page(page, 1);
484d624c96fSJaegeuk Kim 	}
4856ead1142SJaegeuk Kim 	if (!err)
486d624c96fSJaegeuk Kim 		allocate_new_segments(sbi);
4876ead1142SJaegeuk Kim 	return err;
488d624c96fSJaegeuk Kim }
489d624c96fSJaegeuk Kim 
4906ead1142SJaegeuk Kim int recover_fsync_data(struct f2fs_sb_info *sbi)
491d624c96fSJaegeuk Kim {
492cf2271e7SJaegeuk Kim 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
493d624c96fSJaegeuk Kim 	struct list_head inode_list;
494cf2271e7SJaegeuk Kim 	block_t blkaddr;
4956ead1142SJaegeuk Kim 	int err;
496aabe5136SHaicheng Li 	bool need_writecp = false;
497d624c96fSJaegeuk Kim 
498d624c96fSJaegeuk Kim 	fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
499e8512d2eSGu Zheng 			sizeof(struct fsync_inode_entry));
5006bacf52fSJaegeuk Kim 	if (!fsync_entry_slab)
5016ead1142SJaegeuk Kim 		return -ENOMEM;
502d624c96fSJaegeuk Kim 
503d624c96fSJaegeuk Kim 	INIT_LIST_HEAD(&inode_list);
504d624c96fSJaegeuk Kim 
505d624c96fSJaegeuk Kim 	/* step #1: find fsynced inode numbers */
506aabe5136SHaicheng Li 	sbi->por_doing = true;
507cf2271e7SJaegeuk Kim 
50814f4e690SJaegeuk Kim 	/* prevent checkpoint */
50914f4e690SJaegeuk Kim 	mutex_lock(&sbi->cp_mutex);
51014f4e690SJaegeuk Kim 
511cf2271e7SJaegeuk Kim 	blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
512cf2271e7SJaegeuk Kim 
5136ead1142SJaegeuk Kim 	err = find_fsync_dnodes(sbi, &inode_list);
5146ead1142SJaegeuk Kim 	if (err)
515d624c96fSJaegeuk Kim 		goto out;
516d624c96fSJaegeuk Kim 
517d624c96fSJaegeuk Kim 	if (list_empty(&inode_list))
518d624c96fSJaegeuk Kim 		goto out;
519d624c96fSJaegeuk Kim 
520aabe5136SHaicheng Li 	need_writecp = true;
521691c6fd2SChao Yu 
522d624c96fSJaegeuk Kim 	/* step #2: recover data */
5236ead1142SJaegeuk Kim 	err = recover_data(sbi, &inode_list, CURSEG_WARM_NODE);
524b307384eSJaegeuk Kim 	if (!err)
5259850cf4aSJaegeuk Kim 		f2fs_bug_on(sbi, !list_empty(&inode_list));
526d624c96fSJaegeuk Kim out:
5275ebefc5bSGu Zheng 	destroy_fsync_dnodes(&inode_list);
528d624c96fSJaegeuk Kim 	kmem_cache_destroy(fsync_entry_slab);
529cf2271e7SJaegeuk Kim 
5304c521f49SJaegeuk Kim 	/* truncate meta pages to be used by the recovery */
5314c521f49SJaegeuk Kim 	truncate_inode_pages_range(META_MAPPING(sbi),
5327cd8558bSJaegeuk Kim 			MAIN_BLKADDR(sbi) << PAGE_CACHE_SHIFT, -1);
5334c521f49SJaegeuk Kim 
534cf2271e7SJaegeuk Kim 	if (err) {
535cf2271e7SJaegeuk Kim 		truncate_inode_pages_final(NODE_MAPPING(sbi));
536cf2271e7SJaegeuk Kim 		truncate_inode_pages_final(META_MAPPING(sbi));
537cf2271e7SJaegeuk Kim 	}
538cf2271e7SJaegeuk Kim 
539aabe5136SHaicheng Li 	sbi->por_doing = false;
540cf2271e7SJaegeuk Kim 	if (err) {
541cf2271e7SJaegeuk Kim 		discard_next_dnode(sbi, blkaddr);
542cf2271e7SJaegeuk Kim 
543cf2271e7SJaegeuk Kim 		/* Flush all the NAT/SIT pages */
544cf2271e7SJaegeuk Kim 		while (get_pages(sbi, F2FS_DIRTY_META))
545cf2271e7SJaegeuk Kim 			sync_meta_pages(sbi, META, LONG_MAX);
54614f4e690SJaegeuk Kim 		set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG);
54714f4e690SJaegeuk Kim 		mutex_unlock(&sbi->cp_mutex);
548cf2271e7SJaegeuk Kim 	} else if (need_writecp) {
54975ab4cb8SJaegeuk Kim 		struct cp_control cpc = {
55075ab4cb8SJaegeuk Kim 			.reason = CP_SYNC,
55175ab4cb8SJaegeuk Kim 		};
55214f4e690SJaegeuk Kim 		mutex_unlock(&sbi->cp_mutex);
55375ab4cb8SJaegeuk Kim 		write_checkpoint(sbi, &cpc);
55414f4e690SJaegeuk Kim 	} else {
55514f4e690SJaegeuk Kim 		mutex_unlock(&sbi->cp_mutex);
556cf2271e7SJaegeuk Kim 	}
5576ead1142SJaegeuk Kim 	return err;
558d624c96fSJaegeuk Kim }
559