xref: /openbmc/linux/fs/f2fs/recovery.c (revision 39936837)
1 /*
2  * fs/f2fs/recovery.c
3  *
4  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5  *             http://www.samsung.com/
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 #include <linux/fs.h>
12 #include <linux/f2fs_fs.h>
13 #include "f2fs.h"
14 #include "node.h"
15 #include "segment.h"
16 
17 static struct kmem_cache *fsync_entry_slab;
18 
19 bool space_for_roll_forward(struct f2fs_sb_info *sbi)
20 {
21 	if (sbi->last_valid_block_count + sbi->alloc_valid_block_count
22 			> sbi->user_block_count)
23 		return false;
24 	return true;
25 }
26 
27 static struct fsync_inode_entry *get_fsync_inode(struct list_head *head,
28 								nid_t ino)
29 {
30 	struct list_head *this;
31 	struct fsync_inode_entry *entry;
32 
33 	list_for_each(this, head) {
34 		entry = list_entry(this, struct fsync_inode_entry, list);
35 		if (entry->inode->i_ino == ino)
36 			return entry;
37 	}
38 	return NULL;
39 }
40 
41 static int recover_dentry(struct page *ipage, struct inode *inode)
42 {
43 	struct f2fs_node *raw_node = (struct f2fs_node *)kmap(ipage);
44 	struct f2fs_inode *raw_inode = &(raw_node->i);
45 	struct qstr name;
46 	struct f2fs_dir_entry *de;
47 	struct page *page;
48 	struct inode *dir;
49 	int err = 0;
50 
51 	if (!is_dent_dnode(ipage))
52 		goto out;
53 
54 	dir = f2fs_iget(inode->i_sb, le32_to_cpu(raw_inode->i_pino));
55 	if (IS_ERR(dir)) {
56 		err = -EINVAL;
57 		goto out;
58 	}
59 
60 	name.len = le32_to_cpu(raw_inode->i_namelen);
61 	name.name = raw_inode->i_name;
62 
63 	de = f2fs_find_entry(dir, &name, &page);
64 	if (de) {
65 		kunmap(page);
66 		f2fs_put_page(page, 0);
67 	} else {
68 		err = __f2fs_add_link(dir, &name, inode);
69 	}
70 	iput(dir);
71 out:
72 	kunmap(ipage);
73 	return err;
74 }
75 
76 static int recover_inode(struct inode *inode, struct page *node_page)
77 {
78 	void *kaddr = page_address(node_page);
79 	struct f2fs_node *raw_node = (struct f2fs_node *)kaddr;
80 	struct f2fs_inode *raw_inode = &(raw_node->i);
81 
82 	inode->i_mode = le16_to_cpu(raw_inode->i_mode);
83 	i_size_write(inode, le64_to_cpu(raw_inode->i_size));
84 	inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
85 	inode->i_ctime.tv_sec = le64_to_cpu(raw_inode->i_ctime);
86 	inode->i_mtime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
87 	inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
88 	inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec);
89 	inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
90 
91 	return recover_dentry(node_page, inode);
92 }
93 
94 static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
95 {
96 	unsigned long long cp_ver = le64_to_cpu(sbi->ckpt->checkpoint_ver);
97 	struct curseg_info *curseg;
98 	struct page *page;
99 	block_t blkaddr;
100 	int err = 0;
101 
102 	/* get node pages in the current segment */
103 	curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
104 	blkaddr = START_BLOCK(sbi, curseg->segno) + curseg->next_blkoff;
105 
106 	/* read node page */
107 	page = alloc_page(GFP_F2FS_ZERO);
108 	if (IS_ERR(page))
109 		return PTR_ERR(page);
110 	lock_page(page);
111 
112 	while (1) {
113 		struct fsync_inode_entry *entry;
114 
115 		err = f2fs_readpage(sbi, page, blkaddr, READ_SYNC);
116 		if (err)
117 			goto out;
118 
119 		lock_page(page);
120 
121 		if (cp_ver != cpver_of_node(page))
122 			goto unlock_out;
123 
124 		if (!is_fsync_dnode(page))
125 			goto next;
126 
127 		entry = get_fsync_inode(head, ino_of_node(page));
128 		if (entry) {
129 			entry->blkaddr = blkaddr;
130 			if (IS_INODE(page) && is_dent_dnode(page))
131 				set_inode_flag(F2FS_I(entry->inode),
132 							FI_INC_LINK);
133 		} else {
134 			if (IS_INODE(page) && is_dent_dnode(page)) {
135 				err = recover_inode_page(sbi, page);
136 				if (err)
137 					goto unlock_out;
138 			}
139 
140 			/* add this fsync inode to the list */
141 			entry = kmem_cache_alloc(fsync_entry_slab, GFP_NOFS);
142 			if (!entry) {
143 				err = -ENOMEM;
144 				goto unlock_out;
145 			}
146 
147 			entry->inode = f2fs_iget(sbi->sb, ino_of_node(page));
148 			if (IS_ERR(entry->inode)) {
149 				err = PTR_ERR(entry->inode);
150 				kmem_cache_free(fsync_entry_slab, entry);
151 				goto unlock_out;
152 			}
153 
154 			list_add_tail(&entry->list, head);
155 			entry->blkaddr = blkaddr;
156 		}
157 		if (IS_INODE(page)) {
158 			err = recover_inode(entry->inode, page);
159 			if (err)
160 				goto unlock_out;
161 		}
162 next:
163 		/* check next segment */
164 		blkaddr = next_blkaddr_of_node(page);
165 	}
166 unlock_out:
167 	unlock_page(page);
168 out:
169 	__free_pages(page, 0);
170 	return err;
171 }
172 
173 static void destroy_fsync_dnodes(struct f2fs_sb_info *sbi,
174 					struct list_head *head)
175 {
176 	struct fsync_inode_entry *entry, *tmp;
177 
178 	list_for_each_entry_safe(entry, tmp, head, list) {
179 		iput(entry->inode);
180 		list_del(&entry->list);
181 		kmem_cache_free(fsync_entry_slab, entry);
182 	}
183 }
184 
185 static void check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
186 						block_t blkaddr)
187 {
188 	struct seg_entry *sentry;
189 	unsigned int segno = GET_SEGNO(sbi, blkaddr);
190 	unsigned short blkoff = GET_SEGOFF_FROM_SEG0(sbi, blkaddr) &
191 					(sbi->blocks_per_seg - 1);
192 	struct f2fs_summary sum;
193 	nid_t ino;
194 	void *kaddr;
195 	struct inode *inode;
196 	struct page *node_page;
197 	block_t bidx;
198 	int i;
199 
200 	sentry = get_seg_entry(sbi, segno);
201 	if (!f2fs_test_bit(blkoff, sentry->cur_valid_map))
202 		return;
203 
204 	/* Get the previous summary */
205 	for (i = CURSEG_WARM_DATA; i <= CURSEG_COLD_DATA; i++) {
206 		struct curseg_info *curseg = CURSEG_I(sbi, i);
207 		if (curseg->segno == segno) {
208 			sum = curseg->sum_blk->entries[blkoff];
209 			break;
210 		}
211 	}
212 	if (i > CURSEG_COLD_DATA) {
213 		struct page *sum_page = get_sum_page(sbi, segno);
214 		struct f2fs_summary_block *sum_node;
215 		kaddr = page_address(sum_page);
216 		sum_node = (struct f2fs_summary_block *)kaddr;
217 		sum = sum_node->entries[blkoff];
218 		f2fs_put_page(sum_page, 1);
219 	}
220 
221 	/* Get the node page */
222 	node_page = get_node_page(sbi, le32_to_cpu(sum.nid));
223 	bidx = start_bidx_of_node(ofs_of_node(node_page)) +
224 				le16_to_cpu(sum.ofs_in_node);
225 	ino = ino_of_node(node_page);
226 	f2fs_put_page(node_page, 1);
227 
228 	/* Deallocate previous index in the node page */
229 	inode = f2fs_iget(sbi->sb, ino);
230 	if (IS_ERR(inode))
231 		return;
232 
233 	truncate_hole(inode, bidx, bidx + 1);
234 	iput(inode);
235 }
236 
237 static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
238 					struct page *page, block_t blkaddr)
239 {
240 	unsigned int start, end;
241 	struct dnode_of_data dn;
242 	struct f2fs_summary sum;
243 	struct node_info ni;
244 	int err = 0;
245 	int ilock;
246 
247 	start = start_bidx_of_node(ofs_of_node(page));
248 	if (IS_INODE(page))
249 		end = start + ADDRS_PER_INODE;
250 	else
251 		end = start + ADDRS_PER_BLOCK;
252 
253 	ilock = mutex_lock_op(sbi);
254 	set_new_dnode(&dn, inode, NULL, NULL, 0);
255 
256 	err = get_dnode_of_data(&dn, start, ALLOC_NODE);
257 	if (err) {
258 		mutex_unlock_op(sbi, ilock);
259 		return err;
260 	}
261 
262 	wait_on_page_writeback(dn.node_page);
263 
264 	get_node_info(sbi, dn.nid, &ni);
265 	BUG_ON(ni.ino != ino_of_node(page));
266 	BUG_ON(ofs_of_node(dn.node_page) != ofs_of_node(page));
267 
268 	for (; start < end; start++) {
269 		block_t src, dest;
270 
271 		src = datablock_addr(dn.node_page, dn.ofs_in_node);
272 		dest = datablock_addr(page, dn.ofs_in_node);
273 
274 		if (src != dest && dest != NEW_ADDR && dest != NULL_ADDR) {
275 			if (src == NULL_ADDR) {
276 				int err = reserve_new_block(&dn);
277 				/* We should not get -ENOSPC */
278 				BUG_ON(err);
279 			}
280 
281 			/* Check the previous node page having this index */
282 			check_index_in_prev_nodes(sbi, dest);
283 
284 			set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
285 
286 			/* write dummy data page */
287 			recover_data_page(sbi, NULL, &sum, src, dest);
288 			update_extent_cache(dest, &dn);
289 		}
290 		dn.ofs_in_node++;
291 	}
292 
293 	/* write node page in place */
294 	set_summary(&sum, dn.nid, 0, 0);
295 	if (IS_INODE(dn.node_page))
296 		sync_inode_page(&dn);
297 
298 	copy_node_footer(dn.node_page, page);
299 	fill_node_footer(dn.node_page, dn.nid, ni.ino,
300 					ofs_of_node(page), false);
301 	set_page_dirty(dn.node_page);
302 
303 	recover_node_page(sbi, dn.node_page, &sum, &ni, blkaddr);
304 	f2fs_put_dnode(&dn);
305 	mutex_unlock_op(sbi, ilock);
306 	return 0;
307 }
308 
309 static int recover_data(struct f2fs_sb_info *sbi,
310 				struct list_head *head, int type)
311 {
312 	unsigned long long cp_ver = le64_to_cpu(sbi->ckpt->checkpoint_ver);
313 	struct curseg_info *curseg;
314 	struct page *page;
315 	int err = 0;
316 	block_t blkaddr;
317 
318 	/* get node pages in the current segment */
319 	curseg = CURSEG_I(sbi, type);
320 	blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
321 
322 	/* read node page */
323 	page = alloc_page(GFP_NOFS | __GFP_ZERO);
324 	if (IS_ERR(page))
325 		return -ENOMEM;
326 
327 	lock_page(page);
328 
329 	while (1) {
330 		struct fsync_inode_entry *entry;
331 
332 		err = f2fs_readpage(sbi, page, blkaddr, READ_SYNC);
333 		if (err)
334 			goto out;
335 
336 		lock_page(page);
337 
338 		if (cp_ver != cpver_of_node(page))
339 			goto unlock_out;
340 
341 		entry = get_fsync_inode(head, ino_of_node(page));
342 		if (!entry)
343 			goto next;
344 
345 		err = do_recover_data(sbi, entry->inode, page, blkaddr);
346 		if (err)
347 			goto out;
348 
349 		if (entry->blkaddr == blkaddr) {
350 			iput(entry->inode);
351 			list_del(&entry->list);
352 			kmem_cache_free(fsync_entry_slab, entry);
353 		}
354 next:
355 		/* check next segment */
356 		blkaddr = next_blkaddr_of_node(page);
357 	}
358 unlock_out:
359 	unlock_page(page);
360 out:
361 	__free_pages(page, 0);
362 
363 	if (!err)
364 		allocate_new_segments(sbi);
365 	return err;
366 }
367 
368 int recover_fsync_data(struct f2fs_sb_info *sbi)
369 {
370 	struct list_head inode_list;
371 	int err;
372 
373 	fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
374 			sizeof(struct fsync_inode_entry), NULL);
375 	if (unlikely(!fsync_entry_slab))
376 		return -ENOMEM;
377 
378 	INIT_LIST_HEAD(&inode_list);
379 
380 	/* step #1: find fsynced inode numbers */
381 	err = find_fsync_dnodes(sbi, &inode_list);
382 	if (err)
383 		goto out;
384 
385 	if (list_empty(&inode_list))
386 		goto out;
387 
388 	/* step #2: recover data */
389 	sbi->por_doing = 1;
390 	err = recover_data(sbi, &inode_list, CURSEG_WARM_NODE);
391 	sbi->por_doing = 0;
392 	BUG_ON(!list_empty(&inode_list));
393 out:
394 	destroy_fsync_dnodes(sbi, &inode_list);
395 	kmem_cache_destroy(fsync_entry_slab);
396 	write_checkpoint(sbi, false);
397 	return err;
398 }
399