xref: /openbmc/linux/fs/f2fs/recovery.c (revision 32981ea5)
1 /*
2  * fs/f2fs/recovery.c
3  *
4  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5  *             http://www.samsung.com/
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 #include <linux/fs.h>
12 #include <linux/f2fs_fs.h>
13 #include "f2fs.h"
14 #include "node.h"
15 #include "segment.h"
16 
17 /*
18  * Roll forward recovery scenarios.
19  *
20  * [Term] F: fsync_mark, D: dentry_mark
21  *
22  * 1. inode(x) | CP | inode(x) | dnode(F)
23  * -> Update the latest inode(x).
24  *
25  * 2. inode(x) | CP | inode(F) | dnode(F)
26  * -> No problem.
27  *
28  * 3. inode(x) | CP | dnode(F) | inode(x)
29  * -> Recover to the latest dnode(F), and drop the last inode(x)
30  *
31  * 4. inode(x) | CP | dnode(F) | inode(F)
32  * -> No problem.
33  *
34  * 5. CP | inode(x) | dnode(F)
35  * -> The inode(DF) was missing. Should drop this dnode(F).
36  *
37  * 6. CP | inode(DF) | dnode(F)
38  * -> No problem.
39  *
40  * 7. CP | dnode(F) | inode(DF)
41  * -> If f2fs_iget fails, then goto next to find inode(DF).
42  *
43  * 8. CP | dnode(F) | inode(x)
44  * -> If f2fs_iget fails, then goto next to find inode(DF).
45  *    But it will fail due to no inode(DF).
46  */
47 
48 static struct kmem_cache *fsync_entry_slab;
49 
50 bool space_for_roll_forward(struct f2fs_sb_info *sbi)
51 {
52 	s64 nalloc = percpu_counter_sum_positive(&sbi->alloc_valid_block_count);
53 
54 	if (sbi->last_valid_block_count + nalloc > sbi->user_block_count)
55 		return false;
56 	return true;
57 }
58 
59 static struct fsync_inode_entry *get_fsync_inode(struct list_head *head,
60 								nid_t ino)
61 {
62 	struct fsync_inode_entry *entry;
63 
64 	list_for_each_entry(entry, head, list)
65 		if (entry->inode->i_ino == ino)
66 			return entry;
67 
68 	return NULL;
69 }
70 
71 static struct fsync_inode_entry *add_fsync_inode(struct list_head *head,
72 							struct inode *inode)
73 {
74 	struct fsync_inode_entry *entry;
75 
76 	entry = kmem_cache_alloc(fsync_entry_slab, GFP_F2FS_ZERO);
77 	if (!entry)
78 		return NULL;
79 
80 	entry->inode = inode;
81 	list_add_tail(&entry->list, head);
82 
83 	return entry;
84 }
85 
86 static void del_fsync_inode(struct fsync_inode_entry *entry)
87 {
88 	iput(entry->inode);
89 	list_del(&entry->list);
90 	kmem_cache_free(fsync_entry_slab, entry);
91 }
92 
93 static int recover_dentry(struct inode *inode, struct page *ipage,
94 						struct list_head *dir_list)
95 {
96 	struct f2fs_inode *raw_inode = F2FS_INODE(ipage);
97 	nid_t pino = le32_to_cpu(raw_inode->i_pino);
98 	struct f2fs_dir_entry *de;
99 	struct qstr name;
100 	struct page *page;
101 	struct inode *dir, *einode;
102 	struct fsync_inode_entry *entry;
103 	int err = 0;
104 
105 	entry = get_fsync_inode(dir_list, pino);
106 	if (!entry) {
107 		dir = f2fs_iget(inode->i_sb, pino);
108 		if (IS_ERR(dir)) {
109 			err = PTR_ERR(dir);
110 			goto out;
111 		}
112 
113 		entry = add_fsync_inode(dir_list, dir);
114 		if (!entry) {
115 			err = -ENOMEM;
116 			iput(dir);
117 			goto out;
118 		}
119 	}
120 
121 	dir = entry->inode;
122 
123 	if (file_enc_name(inode))
124 		return 0;
125 
126 	name.len = le32_to_cpu(raw_inode->i_namelen);
127 	name.name = raw_inode->i_name;
128 
129 	if (unlikely(name.len > F2FS_NAME_LEN)) {
130 		WARN_ON(1);
131 		err = -ENAMETOOLONG;
132 		goto out;
133 	}
134 retry:
135 	de = f2fs_find_entry(dir, &name, &page);
136 	if (de && inode->i_ino == le32_to_cpu(de->ino))
137 		goto out_unmap_put;
138 
139 	if (de) {
140 		einode = f2fs_iget(inode->i_sb, le32_to_cpu(de->ino));
141 		if (IS_ERR(einode)) {
142 			WARN_ON(1);
143 			err = PTR_ERR(einode);
144 			if (err == -ENOENT)
145 				err = -EEXIST;
146 			goto out_unmap_put;
147 		}
148 		err = acquire_orphan_inode(F2FS_I_SB(inode));
149 		if (err) {
150 			iput(einode);
151 			goto out_unmap_put;
152 		}
153 		f2fs_delete_entry(de, page, dir, einode);
154 		iput(einode);
155 		goto retry;
156 	}
157 	err = __f2fs_add_link(dir, &name, inode, inode->i_ino, inode->i_mode);
158 
159 	goto out;
160 
161 out_unmap_put:
162 	f2fs_dentry_kunmap(dir, page);
163 	f2fs_put_page(page, 0);
164 out:
165 	f2fs_msg(inode->i_sb, KERN_NOTICE,
166 			"%s: ino = %x, name = %s, dir = %lx, err = %d",
167 			__func__, ino_of_node(ipage), raw_inode->i_name,
168 			IS_ERR(dir) ? 0 : dir->i_ino, err);
169 	return err;
170 }
171 
172 static void recover_inode(struct inode *inode, struct page *page)
173 {
174 	struct f2fs_inode *raw = F2FS_INODE(page);
175 	char *name;
176 
177 	inode->i_mode = le16_to_cpu(raw->i_mode);
178 	i_size_write(inode, le64_to_cpu(raw->i_size));
179 	inode->i_atime.tv_sec = le64_to_cpu(raw->i_mtime);
180 	inode->i_ctime.tv_sec = le64_to_cpu(raw->i_ctime);
181 	inode->i_mtime.tv_sec = le64_to_cpu(raw->i_mtime);
182 	inode->i_atime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec);
183 	inode->i_ctime.tv_nsec = le32_to_cpu(raw->i_ctime_nsec);
184 	inode->i_mtime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec);
185 
186 	if (file_enc_name(inode))
187 		name = "<encrypted>";
188 	else
189 		name = F2FS_INODE(page)->i_name;
190 
191 	f2fs_msg(inode->i_sb, KERN_NOTICE, "recover_inode: ino = %x, name = %s",
192 			ino_of_node(page), name);
193 }
194 
195 static bool is_same_inode(struct inode *inode, struct page *ipage)
196 {
197 	struct f2fs_inode *ri = F2FS_INODE(ipage);
198 	struct timespec disk;
199 
200 	if (!IS_INODE(ipage))
201 		return true;
202 
203 	disk.tv_sec = le64_to_cpu(ri->i_ctime);
204 	disk.tv_nsec = le32_to_cpu(ri->i_ctime_nsec);
205 	if (timespec_compare(&inode->i_ctime, &disk) > 0)
206 		return false;
207 
208 	disk.tv_sec = le64_to_cpu(ri->i_atime);
209 	disk.tv_nsec = le32_to_cpu(ri->i_atime_nsec);
210 	if (timespec_compare(&inode->i_atime, &disk) > 0)
211 		return false;
212 
213 	disk.tv_sec = le64_to_cpu(ri->i_mtime);
214 	disk.tv_nsec = le32_to_cpu(ri->i_mtime_nsec);
215 	if (timespec_compare(&inode->i_mtime, &disk) > 0)
216 		return false;
217 
218 	return true;
219 }
220 
221 static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
222 {
223 	unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi));
224 	struct curseg_info *curseg;
225 	struct inode *inode;
226 	struct page *page = NULL;
227 	block_t blkaddr;
228 	int err = 0;
229 
230 	/* get node pages in the current segment */
231 	curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
232 	blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
233 
234 	while (1) {
235 		struct fsync_inode_entry *entry;
236 
237 		if (!is_valid_blkaddr(sbi, blkaddr, META_POR))
238 			return 0;
239 
240 		page = get_tmp_page(sbi, blkaddr);
241 
242 		if (cp_ver != cpver_of_node(page))
243 			break;
244 
245 		if (!is_fsync_dnode(page))
246 			goto next;
247 
248 		entry = get_fsync_inode(head, ino_of_node(page));
249 		if (entry) {
250 			if (!is_same_inode(entry->inode, page))
251 				goto next;
252 		} else {
253 			if (IS_INODE(page) && is_dent_dnode(page)) {
254 				err = recover_inode_page(sbi, page);
255 				if (err)
256 					break;
257 			}
258 
259 			/*
260 			 * CP | dnode(F) | inode(DF)
261 			 * For this case, we should not give up now.
262 			 */
263 			inode = f2fs_iget(sbi->sb, ino_of_node(page));
264 			if (IS_ERR(inode)) {
265 				err = PTR_ERR(inode);
266 				if (err == -ENOENT) {
267 					err = 0;
268 					goto next;
269 				}
270 				break;
271 			}
272 
273 			/* add this fsync inode to the list */
274 			entry = add_fsync_inode(head, inode);
275 			if (!entry) {
276 				err = -ENOMEM;
277 				iput(inode);
278 				break;
279 			}
280 		}
281 		entry->blkaddr = blkaddr;
282 
283 		if (IS_INODE(page) && is_dent_dnode(page))
284 			entry->last_dentry = blkaddr;
285 next:
286 		/* check next segment */
287 		blkaddr = next_blkaddr_of_node(page);
288 		f2fs_put_page(page, 1);
289 
290 		ra_meta_pages_cond(sbi, blkaddr);
291 	}
292 	f2fs_put_page(page, 1);
293 	return err;
294 }
295 
296 static void destroy_fsync_dnodes(struct list_head *head)
297 {
298 	struct fsync_inode_entry *entry, *tmp;
299 
300 	list_for_each_entry_safe(entry, tmp, head, list)
301 		del_fsync_inode(entry);
302 }
303 
304 static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
305 			block_t blkaddr, struct dnode_of_data *dn)
306 {
307 	struct seg_entry *sentry;
308 	unsigned int segno = GET_SEGNO(sbi, blkaddr);
309 	unsigned short blkoff = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
310 	struct f2fs_summary_block *sum_node;
311 	struct f2fs_summary sum;
312 	struct page *sum_page, *node_page;
313 	struct dnode_of_data tdn = *dn;
314 	nid_t ino, nid;
315 	struct inode *inode;
316 	unsigned int offset;
317 	block_t bidx;
318 	int i;
319 
320 	sentry = get_seg_entry(sbi, segno);
321 	if (!f2fs_test_bit(blkoff, sentry->cur_valid_map))
322 		return 0;
323 
324 	/* Get the previous summary */
325 	for (i = CURSEG_WARM_DATA; i <= CURSEG_COLD_DATA; i++) {
326 		struct curseg_info *curseg = CURSEG_I(sbi, i);
327 		if (curseg->segno == segno) {
328 			sum = curseg->sum_blk->entries[blkoff];
329 			goto got_it;
330 		}
331 	}
332 
333 	sum_page = get_sum_page(sbi, segno);
334 	sum_node = (struct f2fs_summary_block *)page_address(sum_page);
335 	sum = sum_node->entries[blkoff];
336 	f2fs_put_page(sum_page, 1);
337 got_it:
338 	/* Use the locked dnode page and inode */
339 	nid = le32_to_cpu(sum.nid);
340 	if (dn->inode->i_ino == nid) {
341 		tdn.nid = nid;
342 		if (!dn->inode_page_locked)
343 			lock_page(dn->inode_page);
344 		tdn.node_page = dn->inode_page;
345 		tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
346 		goto truncate_out;
347 	} else if (dn->nid == nid) {
348 		tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
349 		goto truncate_out;
350 	}
351 
352 	/* Get the node page */
353 	node_page = get_node_page(sbi, nid);
354 	if (IS_ERR(node_page))
355 		return PTR_ERR(node_page);
356 
357 	offset = ofs_of_node(node_page);
358 	ino = ino_of_node(node_page);
359 	f2fs_put_page(node_page, 1);
360 
361 	if (ino != dn->inode->i_ino) {
362 		/* Deallocate previous index in the node page */
363 		inode = f2fs_iget(sbi->sb, ino);
364 		if (IS_ERR(inode))
365 			return PTR_ERR(inode);
366 	} else {
367 		inode = dn->inode;
368 	}
369 
370 	bidx = start_bidx_of_node(offset, inode) + le16_to_cpu(sum.ofs_in_node);
371 
372 	/*
373 	 * if inode page is locked, unlock temporarily, but its reference
374 	 * count keeps alive.
375 	 */
376 	if (ino == dn->inode->i_ino && dn->inode_page_locked)
377 		unlock_page(dn->inode_page);
378 
379 	set_new_dnode(&tdn, inode, NULL, NULL, 0);
380 	if (get_dnode_of_data(&tdn, bidx, LOOKUP_NODE))
381 		goto out;
382 
383 	if (tdn.data_blkaddr == blkaddr)
384 		truncate_data_blocks_range(&tdn, 1);
385 
386 	f2fs_put_dnode(&tdn);
387 out:
388 	if (ino != dn->inode->i_ino)
389 		iput(inode);
390 	else if (dn->inode_page_locked)
391 		lock_page(dn->inode_page);
392 	return 0;
393 
394 truncate_out:
395 	if (datablock_addr(tdn.node_page, tdn.ofs_in_node) == blkaddr)
396 		truncate_data_blocks_range(&tdn, 1);
397 	if (dn->inode->i_ino == nid && !dn->inode_page_locked)
398 		unlock_page(dn->inode_page);
399 	return 0;
400 }
401 
402 static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
403 					struct page *page, block_t blkaddr)
404 {
405 	struct dnode_of_data dn;
406 	struct node_info ni;
407 	unsigned int start, end;
408 	int err = 0, recovered = 0;
409 
410 	/* step 1: recover xattr */
411 	if (IS_INODE(page)) {
412 		recover_inline_xattr(inode, page);
413 	} else if (f2fs_has_xattr_block(ofs_of_node(page))) {
414 		/*
415 		 * Deprecated; xattr blocks should be found from cold log.
416 		 * But, we should remain this for backward compatibility.
417 		 */
418 		recover_xattr_data(inode, page, blkaddr);
419 		goto out;
420 	}
421 
422 	/* step 2: recover inline data */
423 	if (recover_inline_data(inode, page))
424 		goto out;
425 
426 	/* step 3: recover data indices */
427 	start = start_bidx_of_node(ofs_of_node(page), inode);
428 	end = start + ADDRS_PER_PAGE(page, inode);
429 
430 	set_new_dnode(&dn, inode, NULL, NULL, 0);
431 
432 	err = get_dnode_of_data(&dn, start, ALLOC_NODE);
433 	if (err)
434 		goto out;
435 
436 	f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
437 
438 	get_node_info(sbi, dn.nid, &ni);
439 	f2fs_bug_on(sbi, ni.ino != ino_of_node(page));
440 	f2fs_bug_on(sbi, ofs_of_node(dn.node_page) != ofs_of_node(page));
441 
442 	for (; start < end; start++, dn.ofs_in_node++) {
443 		block_t src, dest;
444 
445 		src = datablock_addr(dn.node_page, dn.ofs_in_node);
446 		dest = datablock_addr(page, dn.ofs_in_node);
447 
448 		/* skip recovering if dest is the same as src */
449 		if (src == dest)
450 			continue;
451 
452 		/* dest is invalid, just invalidate src block */
453 		if (dest == NULL_ADDR) {
454 			truncate_data_blocks_range(&dn, 1);
455 			continue;
456 		}
457 
458 		/*
459 		 * dest is reserved block, invalidate src block
460 		 * and then reserve one new block in dnode page.
461 		 */
462 		if (dest == NEW_ADDR) {
463 			truncate_data_blocks_range(&dn, 1);
464 			reserve_new_block(&dn);
465 			continue;
466 		}
467 
468 		/* dest is valid block, try to recover from src to dest */
469 		if (is_valid_blkaddr(sbi, dest, META_POR)) {
470 
471 			if (src == NULL_ADDR) {
472 				err = reserve_new_block(&dn);
473 #ifdef CONFIG_F2FS_FAULT_INJECTION
474 				while (err)
475 					err = reserve_new_block(&dn);
476 #endif
477 				/* We should not get -ENOSPC */
478 				f2fs_bug_on(sbi, err);
479 			}
480 
481 			/* Check the previous node page having this index */
482 			err = check_index_in_prev_nodes(sbi, dest, &dn);
483 			if (err)
484 				goto err;
485 
486 			/* write dummy data page */
487 			f2fs_replace_block(sbi, &dn, src, dest,
488 						ni.version, false, false);
489 			recovered++;
490 		}
491 	}
492 
493 	if (IS_INODE(dn.node_page))
494 		sync_inode_page(&dn);
495 
496 	copy_node_footer(dn.node_page, page);
497 	fill_node_footer(dn.node_page, dn.nid, ni.ino,
498 					ofs_of_node(page), false);
499 	set_page_dirty(dn.node_page);
500 err:
501 	f2fs_put_dnode(&dn);
502 out:
503 	f2fs_msg(sbi->sb, KERN_NOTICE,
504 		"recover_data: ino = %lx, recovered = %d blocks, err = %d",
505 		inode->i_ino, recovered, err);
506 	return err;
507 }
508 
509 static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list,
510 						struct list_head *dir_list)
511 {
512 	unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi));
513 	struct curseg_info *curseg;
514 	struct page *page = NULL;
515 	int err = 0;
516 	block_t blkaddr;
517 
518 	/* get node pages in the current segment */
519 	curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
520 	blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
521 
522 	while (1) {
523 		struct fsync_inode_entry *entry;
524 
525 		if (!is_valid_blkaddr(sbi, blkaddr, META_POR))
526 			break;
527 
528 		ra_meta_pages_cond(sbi, blkaddr);
529 
530 		page = get_tmp_page(sbi, blkaddr);
531 
532 		if (cp_ver != cpver_of_node(page)) {
533 			f2fs_put_page(page, 1);
534 			break;
535 		}
536 
537 		entry = get_fsync_inode(inode_list, ino_of_node(page));
538 		if (!entry)
539 			goto next;
540 		/*
541 		 * inode(x) | CP | inode(x) | dnode(F)
542 		 * In this case, we can lose the latest inode(x).
543 		 * So, call recover_inode for the inode update.
544 		 */
545 		if (IS_INODE(page))
546 			recover_inode(entry->inode, page);
547 		if (entry->last_dentry == blkaddr) {
548 			err = recover_dentry(entry->inode, page, dir_list);
549 			if (err) {
550 				f2fs_put_page(page, 1);
551 				break;
552 			}
553 		}
554 		err = do_recover_data(sbi, entry->inode, page, blkaddr);
555 		if (err) {
556 			f2fs_put_page(page, 1);
557 			break;
558 		}
559 
560 		if (entry->blkaddr == blkaddr)
561 			del_fsync_inode(entry);
562 next:
563 		/* check next segment */
564 		blkaddr = next_blkaddr_of_node(page);
565 		f2fs_put_page(page, 1);
566 	}
567 	if (!err)
568 		allocate_new_segments(sbi);
569 	return err;
570 }
571 
572 int recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
573 {
574 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
575 	struct list_head inode_list;
576 	struct list_head dir_list;
577 	block_t blkaddr;
578 	int err;
579 	int ret = 0;
580 	bool need_writecp = false;
581 
582 	fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
583 			sizeof(struct fsync_inode_entry));
584 	if (!fsync_entry_slab)
585 		return -ENOMEM;
586 
587 	INIT_LIST_HEAD(&inode_list);
588 	INIT_LIST_HEAD(&dir_list);
589 
590 	/* prevent checkpoint */
591 	mutex_lock(&sbi->cp_mutex);
592 
593 	blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
594 
595 	/* step #1: find fsynced inode numbers */
596 	err = find_fsync_dnodes(sbi, &inode_list);
597 	if (err || list_empty(&inode_list))
598 		goto out;
599 
600 	if (check_only) {
601 		ret = 1;
602 		goto out;
603 	}
604 
605 	need_writecp = true;
606 
607 	/* step #2: recover data */
608 	err = recover_data(sbi, &inode_list, &dir_list);
609 	if (!err)
610 		f2fs_bug_on(sbi, !list_empty(&inode_list));
611 out:
612 	destroy_fsync_dnodes(&inode_list);
613 
614 	/* truncate meta pages to be used by the recovery */
615 	truncate_inode_pages_range(META_MAPPING(sbi),
616 			(loff_t)MAIN_BLKADDR(sbi) << PAGE_SHIFT, -1);
617 
618 	if (err) {
619 		truncate_inode_pages_final(NODE_MAPPING(sbi));
620 		truncate_inode_pages_final(META_MAPPING(sbi));
621 	}
622 
623 	clear_sbi_flag(sbi, SBI_POR_DOING);
624 	if (err) {
625 		bool invalidate = false;
626 
627 		if (discard_next_dnode(sbi, blkaddr))
628 			invalidate = true;
629 
630 		/* Flush all the NAT/SIT pages */
631 		while (get_pages(sbi, F2FS_DIRTY_META))
632 			sync_meta_pages(sbi, META, LONG_MAX);
633 
634 		/* invalidate temporary meta page */
635 		if (invalidate)
636 			invalidate_mapping_pages(META_MAPPING(sbi),
637 							blkaddr, blkaddr);
638 
639 		set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG);
640 		mutex_unlock(&sbi->cp_mutex);
641 	} else if (need_writecp) {
642 		struct cp_control cpc = {
643 			.reason = CP_RECOVERY,
644 		};
645 		mutex_unlock(&sbi->cp_mutex);
646 		err = write_checkpoint(sbi, &cpc);
647 	} else {
648 		mutex_unlock(&sbi->cp_mutex);
649 	}
650 
651 	destroy_fsync_dnodes(&dir_list);
652 	kmem_cache_destroy(fsync_entry_slab);
653 	return ret ? ret: err;
654 }
655