xref: /openbmc/linux/fs/f2fs/recovery.c (revision ddc141e5)
1  /*
2   * fs/f2fs/recovery.c
3   *
4   * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5   *             http://www.samsung.com/
6   *
7   * This program is free software; you can redistribute it and/or modify
8   * it under the terms of the GNU General Public License version 2 as
9   * published by the Free Software Foundation.
10   */
11  #include <linux/fs.h>
12  #include <linux/f2fs_fs.h>
13  #include "f2fs.h"
14  #include "node.h"
15  #include "segment.h"
16  
17  /*
18   * Roll forward recovery scenarios.
19   *
20   * [Term] F: fsync_mark, D: dentry_mark
21   *
22   * 1. inode(x) | CP | inode(x) | dnode(F)
23   * -> Update the latest inode(x).
24   *
25   * 2. inode(x) | CP | inode(F) | dnode(F)
26   * -> No problem.
27   *
28   * 3. inode(x) | CP | dnode(F) | inode(x)
29   * -> Recover to the latest dnode(F), and drop the last inode(x)
30   *
31   * 4. inode(x) | CP | dnode(F) | inode(F)
32   * -> No problem.
33   *
34   * 5. CP | inode(x) | dnode(F)
35   * -> The inode(DF) was missing. Should drop this dnode(F).
36   *
37   * 6. CP | inode(DF) | dnode(F)
38   * -> No problem.
39   *
40   * 7. CP | dnode(F) | inode(DF)
41   * -> If f2fs_iget fails, then goto next to find inode(DF).
42   *
43   * 8. CP | dnode(F) | inode(x)
44   * -> If f2fs_iget fails, then goto next to find inode(DF).
45   *    But it will fail due to no inode(DF).
46   */
47  
48  static struct kmem_cache *fsync_entry_slab;
49  
50  bool space_for_roll_forward(struct f2fs_sb_info *sbi)
51  {
52  	s64 nalloc = percpu_counter_sum_positive(&sbi->alloc_valid_block_count);
53  
54  	if (sbi->last_valid_block_count + nalloc > sbi->user_block_count)
55  		return false;
56  	return true;
57  }
58  
59  static struct fsync_inode_entry *get_fsync_inode(struct list_head *head,
60  								nid_t ino)
61  {
62  	struct fsync_inode_entry *entry;
63  
64  	list_for_each_entry(entry, head, list)
65  		if (entry->inode->i_ino == ino)
66  			return entry;
67  
68  	return NULL;
69  }
70  
71  static struct fsync_inode_entry *add_fsync_inode(struct f2fs_sb_info *sbi,
72  			struct list_head *head, nid_t ino, bool quota_inode)
73  {
74  	struct inode *inode;
75  	struct fsync_inode_entry *entry;
76  	int err;
77  
78  	inode = f2fs_iget_retry(sbi->sb, ino);
79  	if (IS_ERR(inode))
80  		return ERR_CAST(inode);
81  
82  	err = dquot_initialize(inode);
83  	if (err)
84  		goto err_out;
85  
86  	if (quota_inode) {
87  		err = dquot_alloc_inode(inode);
88  		if (err)
89  			goto err_out;
90  	}
91  
92  	entry = f2fs_kmem_cache_alloc(fsync_entry_slab, GFP_F2FS_ZERO);
93  	entry->inode = inode;
94  	list_add_tail(&entry->list, head);
95  
96  	return entry;
97  err_out:
98  	iput(inode);
99  	return ERR_PTR(err);
100  }
101  
102  static void del_fsync_inode(struct fsync_inode_entry *entry)
103  {
104  	iput(entry->inode);
105  	list_del(&entry->list);
106  	kmem_cache_free(fsync_entry_slab, entry);
107  }
108  
109  static int recover_dentry(struct inode *inode, struct page *ipage,
110  						struct list_head *dir_list)
111  {
112  	struct f2fs_inode *raw_inode = F2FS_INODE(ipage);
113  	nid_t pino = le32_to_cpu(raw_inode->i_pino);
114  	struct f2fs_dir_entry *de;
115  	struct fscrypt_name fname;
116  	struct page *page;
117  	struct inode *dir, *einode;
118  	struct fsync_inode_entry *entry;
119  	int err = 0;
120  	char *name;
121  
122  	entry = get_fsync_inode(dir_list, pino);
123  	if (!entry) {
124  		entry = add_fsync_inode(F2FS_I_SB(inode), dir_list,
125  							pino, false);
126  		if (IS_ERR(entry)) {
127  			dir = ERR_CAST(entry);
128  			err = PTR_ERR(entry);
129  			goto out;
130  		}
131  	}
132  
133  	dir = entry->inode;
134  
135  	memset(&fname, 0, sizeof(struct fscrypt_name));
136  	fname.disk_name.len = le32_to_cpu(raw_inode->i_namelen);
137  	fname.disk_name.name = raw_inode->i_name;
138  
139  	if (unlikely(fname.disk_name.len > F2FS_NAME_LEN)) {
140  		WARN_ON(1);
141  		err = -ENAMETOOLONG;
142  		goto out;
143  	}
144  retry:
145  	de = __f2fs_find_entry(dir, &fname, &page);
146  	if (de && inode->i_ino == le32_to_cpu(de->ino))
147  		goto out_unmap_put;
148  
149  	if (de) {
150  		einode = f2fs_iget_retry(inode->i_sb, le32_to_cpu(de->ino));
151  		if (IS_ERR(einode)) {
152  			WARN_ON(1);
153  			err = PTR_ERR(einode);
154  			if (err == -ENOENT)
155  				err = -EEXIST;
156  			goto out_unmap_put;
157  		}
158  
159  		err = dquot_initialize(einode);
160  		if (err) {
161  			iput(einode);
162  			goto out_unmap_put;
163  		}
164  
165  		err = acquire_orphan_inode(F2FS_I_SB(inode));
166  		if (err) {
167  			iput(einode);
168  			goto out_unmap_put;
169  		}
170  		f2fs_delete_entry(de, page, dir, einode);
171  		iput(einode);
172  		goto retry;
173  	} else if (IS_ERR(page)) {
174  		err = PTR_ERR(page);
175  	} else {
176  		err = __f2fs_do_add_link(dir, &fname, inode,
177  					inode->i_ino, inode->i_mode);
178  	}
179  	if (err == -ENOMEM)
180  		goto retry;
181  	goto out;
182  
183  out_unmap_put:
184  	f2fs_dentry_kunmap(dir, page);
185  	f2fs_put_page(page, 0);
186  out:
187  	if (file_enc_name(inode))
188  		name = "<encrypted>";
189  	else
190  		name = raw_inode->i_name;
191  	f2fs_msg(inode->i_sb, KERN_NOTICE,
192  			"%s: ino = %x, name = %s, dir = %lx, err = %d",
193  			__func__, ino_of_node(ipage), name,
194  			IS_ERR(dir) ? 0 : dir->i_ino, err);
195  	return err;
196  }
197  
198  static void recover_inline_flags(struct inode *inode, struct f2fs_inode *ri)
199  {
200  	if (ri->i_inline & F2FS_PIN_FILE)
201  		set_inode_flag(inode, FI_PIN_FILE);
202  	else
203  		clear_inode_flag(inode, FI_PIN_FILE);
204  	if (ri->i_inline & F2FS_DATA_EXIST)
205  		set_inode_flag(inode, FI_DATA_EXIST);
206  	else
207  		clear_inode_flag(inode, FI_DATA_EXIST);
208  	if (!(ri->i_inline & F2FS_INLINE_DOTS))
209  		clear_inode_flag(inode, FI_INLINE_DOTS);
210  }
211  
212  static void recover_inode(struct inode *inode, struct page *page)
213  {
214  	struct f2fs_inode *raw = F2FS_INODE(page);
215  	char *name;
216  
217  	inode->i_mode = le16_to_cpu(raw->i_mode);
218  	f2fs_i_size_write(inode, le64_to_cpu(raw->i_size));
219  	inode->i_atime.tv_sec = le64_to_cpu(raw->i_atime);
220  	inode->i_ctime.tv_sec = le64_to_cpu(raw->i_ctime);
221  	inode->i_mtime.tv_sec = le64_to_cpu(raw->i_mtime);
222  	inode->i_atime.tv_nsec = le32_to_cpu(raw->i_atime_nsec);
223  	inode->i_ctime.tv_nsec = le32_to_cpu(raw->i_ctime_nsec);
224  	inode->i_mtime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec);
225  
226  	F2FS_I(inode)->i_advise = raw->i_advise;
227  
228  	recover_inline_flags(inode, raw);
229  
230  	if (file_enc_name(inode))
231  		name = "<encrypted>";
232  	else
233  		name = F2FS_INODE(page)->i_name;
234  
235  	f2fs_msg(inode->i_sb, KERN_NOTICE,
236  		"recover_inode: ino = %x, name = %s, inline = %x",
237  			ino_of_node(page), name, raw->i_inline);
238  }
239  
240  static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head,
241  				bool check_only)
242  {
243  	struct curseg_info *curseg;
244  	struct page *page = NULL;
245  	block_t blkaddr;
246  	int err = 0;
247  
248  	/* get node pages in the current segment */
249  	curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
250  	blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
251  
252  	while (1) {
253  		struct fsync_inode_entry *entry;
254  
255  		if (!is_valid_blkaddr(sbi, blkaddr, META_POR))
256  			return 0;
257  
258  		page = get_tmp_page(sbi, blkaddr);
259  
260  		if (!is_recoverable_dnode(page))
261  			break;
262  
263  		if (!is_fsync_dnode(page))
264  			goto next;
265  
266  		entry = get_fsync_inode(head, ino_of_node(page));
267  		if (!entry) {
268  			bool quota_inode = false;
269  
270  			if (!check_only &&
271  					IS_INODE(page) && is_dent_dnode(page)) {
272  				err = recover_inode_page(sbi, page);
273  				if (err)
274  					break;
275  				quota_inode = true;
276  			}
277  
278  			/*
279  			 * CP | dnode(F) | inode(DF)
280  			 * For this case, we should not give up now.
281  			 */
282  			entry = add_fsync_inode(sbi, head, ino_of_node(page),
283  								quota_inode);
284  			if (IS_ERR(entry)) {
285  				err = PTR_ERR(entry);
286  				if (err == -ENOENT) {
287  					err = 0;
288  					goto next;
289  				}
290  				break;
291  			}
292  		}
293  		entry->blkaddr = blkaddr;
294  
295  		if (IS_INODE(page) && is_dent_dnode(page))
296  			entry->last_dentry = blkaddr;
297  next:
298  		/* check next segment */
299  		blkaddr = next_blkaddr_of_node(page);
300  		f2fs_put_page(page, 1);
301  
302  		ra_meta_pages_cond(sbi, blkaddr);
303  	}
304  	f2fs_put_page(page, 1);
305  	return err;
306  }
307  
308  static void destroy_fsync_dnodes(struct list_head *head)
309  {
310  	struct fsync_inode_entry *entry, *tmp;
311  
312  	list_for_each_entry_safe(entry, tmp, head, list)
313  		del_fsync_inode(entry);
314  }
315  
316  static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
317  			block_t blkaddr, struct dnode_of_data *dn)
318  {
319  	struct seg_entry *sentry;
320  	unsigned int segno = GET_SEGNO(sbi, blkaddr);
321  	unsigned short blkoff = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
322  	struct f2fs_summary_block *sum_node;
323  	struct f2fs_summary sum;
324  	struct page *sum_page, *node_page;
325  	struct dnode_of_data tdn = *dn;
326  	nid_t ino, nid;
327  	struct inode *inode;
328  	unsigned int offset;
329  	block_t bidx;
330  	int i;
331  
332  	sentry = get_seg_entry(sbi, segno);
333  	if (!f2fs_test_bit(blkoff, sentry->cur_valid_map))
334  		return 0;
335  
336  	/* Get the previous summary */
337  	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
338  		struct curseg_info *curseg = CURSEG_I(sbi, i);
339  		if (curseg->segno == segno) {
340  			sum = curseg->sum_blk->entries[blkoff];
341  			goto got_it;
342  		}
343  	}
344  
345  	sum_page = get_sum_page(sbi, segno);
346  	sum_node = (struct f2fs_summary_block *)page_address(sum_page);
347  	sum = sum_node->entries[blkoff];
348  	f2fs_put_page(sum_page, 1);
349  got_it:
350  	/* Use the locked dnode page and inode */
351  	nid = le32_to_cpu(sum.nid);
352  	if (dn->inode->i_ino == nid) {
353  		tdn.nid = nid;
354  		if (!dn->inode_page_locked)
355  			lock_page(dn->inode_page);
356  		tdn.node_page = dn->inode_page;
357  		tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
358  		goto truncate_out;
359  	} else if (dn->nid == nid) {
360  		tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
361  		goto truncate_out;
362  	}
363  
364  	/* Get the node page */
365  	node_page = get_node_page(sbi, nid);
366  	if (IS_ERR(node_page))
367  		return PTR_ERR(node_page);
368  
369  	offset = ofs_of_node(node_page);
370  	ino = ino_of_node(node_page);
371  	f2fs_put_page(node_page, 1);
372  
373  	if (ino != dn->inode->i_ino) {
374  		int ret;
375  
376  		/* Deallocate previous index in the node page */
377  		inode = f2fs_iget_retry(sbi->sb, ino);
378  		if (IS_ERR(inode))
379  			return PTR_ERR(inode);
380  
381  		ret = dquot_initialize(inode);
382  		if (ret) {
383  			iput(inode);
384  			return ret;
385  		}
386  	} else {
387  		inode = dn->inode;
388  	}
389  
390  	bidx = start_bidx_of_node(offset, inode) + le16_to_cpu(sum.ofs_in_node);
391  
392  	/*
393  	 * if inode page is locked, unlock temporarily, but its reference
394  	 * count keeps alive.
395  	 */
396  	if (ino == dn->inode->i_ino && dn->inode_page_locked)
397  		unlock_page(dn->inode_page);
398  
399  	set_new_dnode(&tdn, inode, NULL, NULL, 0);
400  	if (get_dnode_of_data(&tdn, bidx, LOOKUP_NODE))
401  		goto out;
402  
403  	if (tdn.data_blkaddr == blkaddr)
404  		truncate_data_blocks_range(&tdn, 1);
405  
406  	f2fs_put_dnode(&tdn);
407  out:
408  	if (ino != dn->inode->i_ino)
409  		iput(inode);
410  	else if (dn->inode_page_locked)
411  		lock_page(dn->inode_page);
412  	return 0;
413  
414  truncate_out:
415  	if (datablock_addr(tdn.inode, tdn.node_page,
416  					tdn.ofs_in_node) == blkaddr)
417  		truncate_data_blocks_range(&tdn, 1);
418  	if (dn->inode->i_ino == nid && !dn->inode_page_locked)
419  		unlock_page(dn->inode_page);
420  	return 0;
421  }
422  
423  static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
424  					struct page *page)
425  {
426  	struct dnode_of_data dn;
427  	struct node_info ni;
428  	unsigned int start, end;
429  	int err = 0, recovered = 0;
430  
431  	/* step 1: recover xattr */
432  	if (IS_INODE(page)) {
433  		recover_inline_xattr(inode, page);
434  	} else if (f2fs_has_xattr_block(ofs_of_node(page))) {
435  		err = recover_xattr_data(inode, page);
436  		if (!err)
437  			recovered++;
438  		goto out;
439  	}
440  
441  	/* step 2: recover inline data */
442  	if (recover_inline_data(inode, page))
443  		goto out;
444  
445  	/* step 3: recover data indices */
446  	start = start_bidx_of_node(ofs_of_node(page), inode);
447  	end = start + ADDRS_PER_PAGE(page, inode);
448  
449  	set_new_dnode(&dn, inode, NULL, NULL, 0);
450  retry_dn:
451  	err = get_dnode_of_data(&dn, start, ALLOC_NODE);
452  	if (err) {
453  		if (err == -ENOMEM) {
454  			congestion_wait(BLK_RW_ASYNC, HZ/50);
455  			goto retry_dn;
456  		}
457  		goto out;
458  	}
459  
460  	f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
461  
462  	get_node_info(sbi, dn.nid, &ni);
463  	f2fs_bug_on(sbi, ni.ino != ino_of_node(page));
464  	f2fs_bug_on(sbi, ofs_of_node(dn.node_page) != ofs_of_node(page));
465  
466  	for (; start < end; start++, dn.ofs_in_node++) {
467  		block_t src, dest;
468  
469  		src = datablock_addr(dn.inode, dn.node_page, dn.ofs_in_node);
470  		dest = datablock_addr(dn.inode, page, dn.ofs_in_node);
471  
472  		/* skip recovering if dest is the same as src */
473  		if (src == dest)
474  			continue;
475  
476  		/* dest is invalid, just invalidate src block */
477  		if (dest == NULL_ADDR) {
478  			truncate_data_blocks_range(&dn, 1);
479  			continue;
480  		}
481  
482  		if (!file_keep_isize(inode) &&
483  			(i_size_read(inode) <= ((loff_t)start << PAGE_SHIFT)))
484  			f2fs_i_size_write(inode,
485  				(loff_t)(start + 1) << PAGE_SHIFT);
486  
487  		/*
488  		 * dest is reserved block, invalidate src block
489  		 * and then reserve one new block in dnode page.
490  		 */
491  		if (dest == NEW_ADDR) {
492  			truncate_data_blocks_range(&dn, 1);
493  			reserve_new_block(&dn);
494  			continue;
495  		}
496  
497  		/* dest is valid block, try to recover from src to dest */
498  		if (is_valid_blkaddr(sbi, dest, META_POR)) {
499  
500  			if (src == NULL_ADDR) {
501  				err = reserve_new_block(&dn);
502  #ifdef CONFIG_F2FS_FAULT_INJECTION
503  				while (err)
504  					err = reserve_new_block(&dn);
505  #endif
506  				/* We should not get -ENOSPC */
507  				f2fs_bug_on(sbi, err);
508  				if (err)
509  					goto err;
510  			}
511  retry_prev:
512  			/* Check the previous node page having this index */
513  			err = check_index_in_prev_nodes(sbi, dest, &dn);
514  			if (err) {
515  				if (err == -ENOMEM) {
516  					congestion_wait(BLK_RW_ASYNC, HZ/50);
517  					goto retry_prev;
518  				}
519  				goto err;
520  			}
521  
522  			/* write dummy data page */
523  			f2fs_replace_block(sbi, &dn, src, dest,
524  						ni.version, false, false);
525  			recovered++;
526  		}
527  	}
528  
529  	copy_node_footer(dn.node_page, page);
530  	fill_node_footer(dn.node_page, dn.nid, ni.ino,
531  					ofs_of_node(page), false);
532  	set_page_dirty(dn.node_page);
533  err:
534  	f2fs_put_dnode(&dn);
535  out:
536  	f2fs_msg(sbi->sb, KERN_NOTICE,
537  		"recover_data: ino = %lx (i_size: %s) recovered = %d, err = %d",
538  		inode->i_ino,
539  		file_keep_isize(inode) ? "keep" : "recover",
540  		recovered, err);
541  	return err;
542  }
543  
544  static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list,
545  						struct list_head *dir_list)
546  {
547  	struct curseg_info *curseg;
548  	struct page *page = NULL;
549  	int err = 0;
550  	block_t blkaddr;
551  
552  	/* get node pages in the current segment */
553  	curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
554  	blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
555  
556  	while (1) {
557  		struct fsync_inode_entry *entry;
558  
559  		if (!is_valid_blkaddr(sbi, blkaddr, META_POR))
560  			break;
561  
562  		ra_meta_pages_cond(sbi, blkaddr);
563  
564  		page = get_tmp_page(sbi, blkaddr);
565  
566  		if (!is_recoverable_dnode(page)) {
567  			f2fs_put_page(page, 1);
568  			break;
569  		}
570  
571  		entry = get_fsync_inode(inode_list, ino_of_node(page));
572  		if (!entry)
573  			goto next;
574  		/*
575  		 * inode(x) | CP | inode(x) | dnode(F)
576  		 * In this case, we can lose the latest inode(x).
577  		 * So, call recover_inode for the inode update.
578  		 */
579  		if (IS_INODE(page))
580  			recover_inode(entry->inode, page);
581  		if (entry->last_dentry == blkaddr) {
582  			err = recover_dentry(entry->inode, page, dir_list);
583  			if (err) {
584  				f2fs_put_page(page, 1);
585  				break;
586  			}
587  		}
588  		err = do_recover_data(sbi, entry->inode, page);
589  		if (err) {
590  			f2fs_put_page(page, 1);
591  			break;
592  		}
593  
594  		if (entry->blkaddr == blkaddr)
595  			del_fsync_inode(entry);
596  next:
597  		/* check next segment */
598  		blkaddr = next_blkaddr_of_node(page);
599  		f2fs_put_page(page, 1);
600  	}
601  	if (!err)
602  		allocate_new_segments(sbi);
603  	return err;
604  }
605  
606  int recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
607  {
608  	struct list_head inode_list;
609  	struct list_head dir_list;
610  	int err;
611  	int ret = 0;
612  	unsigned long s_flags = sbi->sb->s_flags;
613  	bool need_writecp = false;
614  #ifdef CONFIG_QUOTA
615  	int quota_enabled;
616  #endif
617  
618  	if (s_flags & SB_RDONLY) {
619  		f2fs_msg(sbi->sb, KERN_INFO, "orphan cleanup on readonly fs");
620  		sbi->sb->s_flags &= ~SB_RDONLY;
621  	}
622  
623  #ifdef CONFIG_QUOTA
624  	/* Needed for iput() to work correctly and not trash data */
625  	sbi->sb->s_flags |= SB_ACTIVE;
626  	/* Turn on quotas so that they are updated correctly */
627  	quota_enabled = f2fs_enable_quota_files(sbi, s_flags & SB_RDONLY);
628  #endif
629  
630  	fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
631  			sizeof(struct fsync_inode_entry));
632  	if (!fsync_entry_slab) {
633  		err = -ENOMEM;
634  		goto out;
635  	}
636  
637  	INIT_LIST_HEAD(&inode_list);
638  	INIT_LIST_HEAD(&dir_list);
639  
640  	/* prevent checkpoint */
641  	mutex_lock(&sbi->cp_mutex);
642  
643  	/* step #1: find fsynced inode numbers */
644  	err = find_fsync_dnodes(sbi, &inode_list, check_only);
645  	if (err || list_empty(&inode_list))
646  		goto skip;
647  
648  	if (check_only) {
649  		ret = 1;
650  		goto skip;
651  	}
652  
653  	need_writecp = true;
654  
655  	/* step #2: recover data */
656  	err = recover_data(sbi, &inode_list, &dir_list);
657  	if (!err)
658  		f2fs_bug_on(sbi, !list_empty(&inode_list));
659  skip:
660  	destroy_fsync_dnodes(&inode_list);
661  
662  	/* truncate meta pages to be used by the recovery */
663  	truncate_inode_pages_range(META_MAPPING(sbi),
664  			(loff_t)MAIN_BLKADDR(sbi) << PAGE_SHIFT, -1);
665  
666  	if (err) {
667  		truncate_inode_pages_final(NODE_MAPPING(sbi));
668  		truncate_inode_pages_final(META_MAPPING(sbi));
669  	}
670  
671  	clear_sbi_flag(sbi, SBI_POR_DOING);
672  	mutex_unlock(&sbi->cp_mutex);
673  
674  	/* let's drop all the directory inodes for clean checkpoint */
675  	destroy_fsync_dnodes(&dir_list);
676  
677  	if (!err && need_writecp) {
678  		struct cp_control cpc = {
679  			.reason = CP_RECOVERY,
680  		};
681  		err = write_checkpoint(sbi, &cpc);
682  	}
683  
684  	kmem_cache_destroy(fsync_entry_slab);
685  out:
686  #ifdef CONFIG_QUOTA
687  	/* Turn quotas off */
688  	if (quota_enabled)
689  		f2fs_quota_off_umount(sbi->sb);
690  #endif
691  	sbi->sb->s_flags = s_flags; /* Restore SB_RDONLY status */
692  
693  	return ret ? ret: err;
694  }
695