xref: /openbmc/linux/fs/f2fs/super.c (revision 6ead114232f786e3ef7a034c8617f2a4df8e5226)
1 /*
2  * fs/f2fs/super.c
3  *
4  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5  *             http://www.samsung.com/
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 #include <linux/module.h>
12 #include <linux/init.h>
13 #include <linux/fs.h>
14 #include <linux/statfs.h>
15 #include <linux/proc_fs.h>
16 #include <linux/buffer_head.h>
17 #include <linux/backing-dev.h>
18 #include <linux/kthread.h>
19 #include <linux/parser.h>
20 #include <linux/mount.h>
21 #include <linux/seq_file.h>
22 #include <linux/random.h>
23 #include <linux/exportfs.h>
24 #include <linux/blkdev.h>
25 #include <linux/f2fs_fs.h>
26 
27 #include "f2fs.h"
28 #include "node.h"
29 #include "xattr.h"
30 
31 static struct kmem_cache *f2fs_inode_cachep;
32 
33 enum {
34 	Opt_gc_background_off,
35 	Opt_disable_roll_forward,
36 	Opt_discard,
37 	Opt_noheap,
38 	Opt_nouser_xattr,
39 	Opt_noacl,
40 	Opt_active_logs,
41 	Opt_disable_ext_identify,
42 	Opt_err,
43 };
44 
45 static match_table_t f2fs_tokens = {
46 	{Opt_gc_background_off, "background_gc_off"},
47 	{Opt_disable_roll_forward, "disable_roll_forward"},
48 	{Opt_discard, "discard"},
49 	{Opt_noheap, "no_heap"},
50 	{Opt_nouser_xattr, "nouser_xattr"},
51 	{Opt_noacl, "noacl"},
52 	{Opt_active_logs, "active_logs=%u"},
53 	{Opt_disable_ext_identify, "disable_ext_identify"},
54 	{Opt_err, NULL},
55 };
56 
57 void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...)
58 {
59 	struct va_format vaf;
60 	va_list args;
61 
62 	va_start(args, fmt);
63 	vaf.fmt = fmt;
64 	vaf.va = &args;
65 	printk("%sF2FS-fs (%s): %pV\n", level, sb->s_id, &vaf);
66 	va_end(args);
67 }
68 
69 static void init_once(void *foo)
70 {
71 	struct f2fs_inode_info *fi = (struct f2fs_inode_info *) foo;
72 
73 	inode_init_once(&fi->vfs_inode);
74 }
75 
76 static struct inode *f2fs_alloc_inode(struct super_block *sb)
77 {
78 	struct f2fs_inode_info *fi;
79 
80 	fi = kmem_cache_alloc(f2fs_inode_cachep, GFP_NOFS | __GFP_ZERO);
81 	if (!fi)
82 		return NULL;
83 
84 	init_once((void *) fi);
85 
86 	/* Initialize f2fs-specific inode info */
87 	fi->vfs_inode.i_version = 1;
88 	atomic_set(&fi->dirty_dents, 0);
89 	fi->i_current_depth = 1;
90 	fi->i_advise = 0;
91 	rwlock_init(&fi->ext.ext_lock);
92 
93 	set_inode_flag(fi, FI_NEW_INODE);
94 
95 	return &fi->vfs_inode;
96 }
97 
98 static void f2fs_i_callback(struct rcu_head *head)
99 {
100 	struct inode *inode = container_of(head, struct inode, i_rcu);
101 	kmem_cache_free(f2fs_inode_cachep, F2FS_I(inode));
102 }
103 
104 static void f2fs_destroy_inode(struct inode *inode)
105 {
106 	call_rcu(&inode->i_rcu, f2fs_i_callback);
107 }
108 
109 static void f2fs_put_super(struct super_block *sb)
110 {
111 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
112 
113 	f2fs_destroy_stats(sbi);
114 	stop_gc_thread(sbi);
115 
116 	write_checkpoint(sbi, true);
117 
118 	iput(sbi->node_inode);
119 	iput(sbi->meta_inode);
120 
121 	/* destroy f2fs internal modules */
122 	destroy_node_manager(sbi);
123 	destroy_segment_manager(sbi);
124 
125 	kfree(sbi->ckpt);
126 
127 	sb->s_fs_info = NULL;
128 	brelse(sbi->raw_super_buf);
129 	kfree(sbi);
130 }
131 
132 int f2fs_sync_fs(struct super_block *sb, int sync)
133 {
134 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
135 
136 	if (!sbi->s_dirty && !get_pages(sbi, F2FS_DIRTY_NODES))
137 		return 0;
138 
139 	if (sync)
140 		write_checkpoint(sbi, false);
141 	else
142 		f2fs_balance_fs(sbi);
143 
144 	return 0;
145 }
146 
147 static int f2fs_freeze(struct super_block *sb)
148 {
149 	int err;
150 
151 	if (sb->s_flags & MS_RDONLY)
152 		return 0;
153 
154 	err = f2fs_sync_fs(sb, 1);
155 	return err;
156 }
157 
158 static int f2fs_unfreeze(struct super_block *sb)
159 {
160 	return 0;
161 }
162 
163 static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
164 {
165 	struct super_block *sb = dentry->d_sb;
166 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
167 	u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
168 	block_t total_count, user_block_count, start_count, ovp_count;
169 
170 	total_count = le64_to_cpu(sbi->raw_super->block_count);
171 	user_block_count = sbi->user_block_count;
172 	start_count = le32_to_cpu(sbi->raw_super->segment0_blkaddr);
173 	ovp_count = SM_I(sbi)->ovp_segments << sbi->log_blocks_per_seg;
174 	buf->f_type = F2FS_SUPER_MAGIC;
175 	buf->f_bsize = sbi->blocksize;
176 
177 	buf->f_blocks = total_count - start_count;
178 	buf->f_bfree = buf->f_blocks - valid_user_blocks(sbi) - ovp_count;
179 	buf->f_bavail = user_block_count - valid_user_blocks(sbi);
180 
181 	buf->f_files = sbi->total_node_count;
182 	buf->f_ffree = sbi->total_node_count - valid_inode_count(sbi);
183 
184 	buf->f_namelen = F2FS_NAME_LEN;
185 	buf->f_fsid.val[0] = (u32)id;
186 	buf->f_fsid.val[1] = (u32)(id >> 32);
187 
188 	return 0;
189 }
190 
191 static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
192 {
193 	struct f2fs_sb_info *sbi = F2FS_SB(root->d_sb);
194 
195 	if (test_opt(sbi, BG_GC))
196 		seq_puts(seq, ",background_gc_on");
197 	else
198 		seq_puts(seq, ",background_gc_off");
199 	if (test_opt(sbi, DISABLE_ROLL_FORWARD))
200 		seq_puts(seq, ",disable_roll_forward");
201 	if (test_opt(sbi, DISCARD))
202 		seq_puts(seq, ",discard");
203 	if (test_opt(sbi, NOHEAP))
204 		seq_puts(seq, ",no_heap_alloc");
205 #ifdef CONFIG_F2FS_FS_XATTR
206 	if (test_opt(sbi, XATTR_USER))
207 		seq_puts(seq, ",user_xattr");
208 	else
209 		seq_puts(seq, ",nouser_xattr");
210 #endif
211 #ifdef CONFIG_F2FS_FS_POSIX_ACL
212 	if (test_opt(sbi, POSIX_ACL))
213 		seq_puts(seq, ",acl");
214 	else
215 		seq_puts(seq, ",noacl");
216 #endif
217 	if (test_opt(sbi, DISABLE_EXT_IDENTIFY))
218 		seq_puts(seq, ",disable_ext_identify");
219 
220 	seq_printf(seq, ",active_logs=%u", sbi->active_logs);
221 
222 	return 0;
223 }
224 
225 static struct super_operations f2fs_sops = {
226 	.alloc_inode	= f2fs_alloc_inode,
227 	.destroy_inode	= f2fs_destroy_inode,
228 	.write_inode	= f2fs_write_inode,
229 	.show_options	= f2fs_show_options,
230 	.evict_inode	= f2fs_evict_inode,
231 	.put_super	= f2fs_put_super,
232 	.sync_fs	= f2fs_sync_fs,
233 	.freeze_fs	= f2fs_freeze,
234 	.unfreeze_fs	= f2fs_unfreeze,
235 	.statfs		= f2fs_statfs,
236 };
237 
238 static struct inode *f2fs_nfs_get_inode(struct super_block *sb,
239 		u64 ino, u32 generation)
240 {
241 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
242 	struct inode *inode;
243 
244 	if (ino < F2FS_ROOT_INO(sbi))
245 		return ERR_PTR(-ESTALE);
246 
247 	/*
248 	 * f2fs_iget isn't quite right if the inode is currently unallocated!
249 	 * However f2fs_iget currently does appropriate checks to handle stale
250 	 * inodes so everything is OK.
251 	 */
252 	inode = f2fs_iget(sb, ino);
253 	if (IS_ERR(inode))
254 		return ERR_CAST(inode);
255 	if (generation && inode->i_generation != generation) {
256 		/* we didn't find the right inode.. */
257 		iput(inode);
258 		return ERR_PTR(-ESTALE);
259 	}
260 	return inode;
261 }
262 
263 static struct dentry *f2fs_fh_to_dentry(struct super_block *sb, struct fid *fid,
264 		int fh_len, int fh_type)
265 {
266 	return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
267 				    f2fs_nfs_get_inode);
268 }
269 
270 static struct dentry *f2fs_fh_to_parent(struct super_block *sb, struct fid *fid,
271 		int fh_len, int fh_type)
272 {
273 	return generic_fh_to_parent(sb, fid, fh_len, fh_type,
274 				    f2fs_nfs_get_inode);
275 }
276 
277 static const struct export_operations f2fs_export_ops = {
278 	.fh_to_dentry = f2fs_fh_to_dentry,
279 	.fh_to_parent = f2fs_fh_to_parent,
280 	.get_parent = f2fs_get_parent,
281 };
282 
283 static int parse_options(struct super_block *sb, struct f2fs_sb_info *sbi,
284 				char *options)
285 {
286 	substring_t args[MAX_OPT_ARGS];
287 	char *p;
288 	int arg = 0;
289 
290 	if (!options)
291 		return 0;
292 
293 	while ((p = strsep(&options, ",")) != NULL) {
294 		int token;
295 		if (!*p)
296 			continue;
297 		/*
298 		 * Initialize args struct so we know whether arg was
299 		 * found; some options take optional arguments.
300 		 */
301 		args[0].to = args[0].from = NULL;
302 		token = match_token(p, f2fs_tokens, args);
303 
304 		switch (token) {
305 		case Opt_gc_background_off:
306 			clear_opt(sbi, BG_GC);
307 			break;
308 		case Opt_disable_roll_forward:
309 			set_opt(sbi, DISABLE_ROLL_FORWARD);
310 			break;
311 		case Opt_discard:
312 			set_opt(sbi, DISCARD);
313 			break;
314 		case Opt_noheap:
315 			set_opt(sbi, NOHEAP);
316 			break;
317 #ifdef CONFIG_F2FS_FS_XATTR
318 		case Opt_nouser_xattr:
319 			clear_opt(sbi, XATTR_USER);
320 			break;
321 #else
322 		case Opt_nouser_xattr:
323 			f2fs_msg(sb, KERN_INFO,
324 				"nouser_xattr options not supported");
325 			break;
326 #endif
327 #ifdef CONFIG_F2FS_FS_POSIX_ACL
328 		case Opt_noacl:
329 			clear_opt(sbi, POSIX_ACL);
330 			break;
331 #else
332 		case Opt_noacl:
333 			f2fs_msg(sb, KERN_INFO, "noacl options not supported");
334 			break;
335 #endif
336 		case Opt_active_logs:
337 			if (args->from && match_int(args, &arg))
338 				return -EINVAL;
339 			if (arg != 2 && arg != 4 && arg != NR_CURSEG_TYPE)
340 				return -EINVAL;
341 			sbi->active_logs = arg;
342 			break;
343 		case Opt_disable_ext_identify:
344 			set_opt(sbi, DISABLE_EXT_IDENTIFY);
345 			break;
346 		default:
347 			f2fs_msg(sb, KERN_ERR,
348 				"Unrecognized mount option \"%s\" or missing value",
349 				p);
350 			return -EINVAL;
351 		}
352 	}
353 	return 0;
354 }
355 
356 static loff_t max_file_size(unsigned bits)
357 {
358 	loff_t result = ADDRS_PER_INODE;
359 	loff_t leaf_count = ADDRS_PER_BLOCK;
360 
361 	/* two direct node blocks */
362 	result += (leaf_count * 2);
363 
364 	/* two indirect node blocks */
365 	leaf_count *= NIDS_PER_BLOCK;
366 	result += (leaf_count * 2);
367 
368 	/* one double indirect node block */
369 	leaf_count *= NIDS_PER_BLOCK;
370 	result += leaf_count;
371 
372 	result <<= bits;
373 	return result;
374 }
375 
376 static int sanity_check_raw_super(struct super_block *sb,
377 			struct f2fs_super_block *raw_super)
378 {
379 	unsigned int blocksize;
380 
381 	if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) {
382 		f2fs_msg(sb, KERN_INFO,
383 			"Magic Mismatch, valid(0x%x) - read(0x%x)",
384 			F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic));
385 		return 1;
386 	}
387 
388 	/* Currently, support only 4KB page cache size */
389 	if (F2FS_BLKSIZE != PAGE_CACHE_SIZE) {
390 		f2fs_msg(sb, KERN_INFO,
391 			"Invalid page_cache_size (%lu), supports only 4KB\n",
392 			PAGE_CACHE_SIZE);
393 		return 1;
394 	}
395 
396 	/* Currently, support only 4KB block size */
397 	blocksize = 1 << le32_to_cpu(raw_super->log_blocksize);
398 	if (blocksize != F2FS_BLKSIZE) {
399 		f2fs_msg(sb, KERN_INFO,
400 			"Invalid blocksize (%u), supports only 4KB\n",
401 			blocksize);
402 		return 1;
403 	}
404 
405 	if (le32_to_cpu(raw_super->log_sectorsize) !=
406 					F2FS_LOG_SECTOR_SIZE) {
407 		f2fs_msg(sb, KERN_INFO, "Invalid log sectorsize");
408 		return 1;
409 	}
410 	if (le32_to_cpu(raw_super->log_sectors_per_block) !=
411 					F2FS_LOG_SECTORS_PER_BLOCK) {
412 		f2fs_msg(sb, KERN_INFO, "Invalid log sectors per block");
413 		return 1;
414 	}
415 	return 0;
416 }
417 
418 static int sanity_check_ckpt(struct f2fs_sb_info *sbi)
419 {
420 	unsigned int total, fsmeta;
421 	struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
422 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
423 
424 	total = le32_to_cpu(raw_super->segment_count);
425 	fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
426 	fsmeta += le32_to_cpu(raw_super->segment_count_sit);
427 	fsmeta += le32_to_cpu(raw_super->segment_count_nat);
428 	fsmeta += le32_to_cpu(ckpt->rsvd_segment_count);
429 	fsmeta += le32_to_cpu(raw_super->segment_count_ssa);
430 
431 	if (fsmeta >= total)
432 		return 1;
433 
434 	if (is_set_ckpt_flags(ckpt, CP_ERROR_FLAG)) {
435 		f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck");
436 		return 1;
437 	}
438 	return 0;
439 }
440 
441 static void init_sb_info(struct f2fs_sb_info *sbi)
442 {
443 	struct f2fs_super_block *raw_super = sbi->raw_super;
444 	int i;
445 
446 	sbi->log_sectors_per_block =
447 		le32_to_cpu(raw_super->log_sectors_per_block);
448 	sbi->log_blocksize = le32_to_cpu(raw_super->log_blocksize);
449 	sbi->blocksize = 1 << sbi->log_blocksize;
450 	sbi->log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
451 	sbi->blocks_per_seg = 1 << sbi->log_blocks_per_seg;
452 	sbi->segs_per_sec = le32_to_cpu(raw_super->segs_per_sec);
453 	sbi->secs_per_zone = le32_to_cpu(raw_super->secs_per_zone);
454 	sbi->total_sections = le32_to_cpu(raw_super->section_count);
455 	sbi->total_node_count =
456 		(le32_to_cpu(raw_super->segment_count_nat) / 2)
457 			* sbi->blocks_per_seg * NAT_ENTRY_PER_BLOCK;
458 	sbi->root_ino_num = le32_to_cpu(raw_super->root_ino);
459 	sbi->node_ino_num = le32_to_cpu(raw_super->node_ino);
460 	sbi->meta_ino_num = le32_to_cpu(raw_super->meta_ino);
461 
462 	for (i = 0; i < NR_COUNT_TYPE; i++)
463 		atomic_set(&sbi->nr_pages[i], 0);
464 }
465 
466 static int validate_superblock(struct super_block *sb,
467 		struct f2fs_super_block **raw_super,
468 		struct buffer_head **raw_super_buf, sector_t block)
469 {
470 	const char *super = (block == 0 ? "first" : "second");
471 
472 	/* read f2fs raw super block */
473 	*raw_super_buf = sb_bread(sb, block);
474 	if (!*raw_super_buf) {
475 		f2fs_msg(sb, KERN_ERR, "unable to read %s superblock",
476 				super);
477 		return -EIO;
478 	}
479 
480 	*raw_super = (struct f2fs_super_block *)
481 		((char *)(*raw_super_buf)->b_data + F2FS_SUPER_OFFSET);
482 
483 	/* sanity checking of raw super */
484 	if (!sanity_check_raw_super(sb, *raw_super))
485 		return 0;
486 
487 	f2fs_msg(sb, KERN_ERR, "Can't find a valid F2FS filesystem "
488 				"in %s superblock", super);
489 	return -EINVAL;
490 }
491 
492 static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
493 {
494 	struct f2fs_sb_info *sbi;
495 	struct f2fs_super_block *raw_super;
496 	struct buffer_head *raw_super_buf;
497 	struct inode *root;
498 	long err = -EINVAL;
499 	int i;
500 
501 	/* allocate memory for f2fs-specific super block info */
502 	sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL);
503 	if (!sbi)
504 		return -ENOMEM;
505 
506 	/* set a block size */
507 	if (!sb_set_blocksize(sb, F2FS_BLKSIZE)) {
508 		f2fs_msg(sb, KERN_ERR, "unable to set blocksize");
509 		goto free_sbi;
510 	}
511 
512 	err = validate_superblock(sb, &raw_super, &raw_super_buf, 0);
513 	if (err) {
514 		brelse(raw_super_buf);
515 		/* check secondary superblock when primary failed */
516 		err = validate_superblock(sb, &raw_super, &raw_super_buf, 1);
517 		if (err)
518 			goto free_sb_buf;
519 	}
520 	/* init some FS parameters */
521 	sbi->active_logs = NR_CURSEG_TYPE;
522 
523 	set_opt(sbi, BG_GC);
524 
525 #ifdef CONFIG_F2FS_FS_XATTR
526 	set_opt(sbi, XATTR_USER);
527 #endif
528 #ifdef CONFIG_F2FS_FS_POSIX_ACL
529 	set_opt(sbi, POSIX_ACL);
530 #endif
531 	/* parse mount options */
532 	if (parse_options(sb, sbi, (char *)data))
533 		goto free_sb_buf;
534 
535 	sb->s_maxbytes = max_file_size(le32_to_cpu(raw_super->log_blocksize));
536 	sb->s_max_links = F2FS_LINK_MAX;
537 	get_random_bytes(&sbi->s_next_generation, sizeof(u32));
538 
539 	sb->s_op = &f2fs_sops;
540 	sb->s_xattr = f2fs_xattr_handlers;
541 	sb->s_export_op = &f2fs_export_ops;
542 	sb->s_magic = F2FS_SUPER_MAGIC;
543 	sb->s_fs_info = sbi;
544 	sb->s_time_gran = 1;
545 	sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
546 		(test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0);
547 	memcpy(sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));
548 
549 	/* init f2fs-specific super block info */
550 	sbi->sb = sb;
551 	sbi->raw_super = raw_super;
552 	sbi->raw_super_buf = raw_super_buf;
553 	mutex_init(&sbi->gc_mutex);
554 	mutex_init(&sbi->write_inode);
555 	mutex_init(&sbi->writepages);
556 	mutex_init(&sbi->cp_mutex);
557 	for (i = 0; i < NR_LOCK_TYPE; i++)
558 		mutex_init(&sbi->fs_lock[i]);
559 	sbi->por_doing = 0;
560 	spin_lock_init(&sbi->stat_lock);
561 	init_rwsem(&sbi->bio_sem);
562 	init_sb_info(sbi);
563 
564 	/* get an inode for meta space */
565 	sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi));
566 	if (IS_ERR(sbi->meta_inode)) {
567 		f2fs_msg(sb, KERN_ERR, "Failed to read F2FS meta data inode");
568 		err = PTR_ERR(sbi->meta_inode);
569 		goto free_sb_buf;
570 	}
571 
572 	err = get_valid_checkpoint(sbi);
573 	if (err) {
574 		f2fs_msg(sb, KERN_ERR, "Failed to get valid F2FS checkpoint");
575 		goto free_meta_inode;
576 	}
577 
578 	/* sanity checking of checkpoint */
579 	err = -EINVAL;
580 	if (sanity_check_ckpt(sbi)) {
581 		f2fs_msg(sb, KERN_ERR, "Invalid F2FS checkpoint");
582 		goto free_cp;
583 	}
584 
585 	sbi->total_valid_node_count =
586 				le32_to_cpu(sbi->ckpt->valid_node_count);
587 	sbi->total_valid_inode_count =
588 				le32_to_cpu(sbi->ckpt->valid_inode_count);
589 	sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count);
590 	sbi->total_valid_block_count =
591 				le64_to_cpu(sbi->ckpt->valid_block_count);
592 	sbi->last_valid_block_count = sbi->total_valid_block_count;
593 	sbi->alloc_valid_block_count = 0;
594 	INIT_LIST_HEAD(&sbi->dir_inode_list);
595 	spin_lock_init(&sbi->dir_inode_lock);
596 
597 	init_orphan_info(sbi);
598 
599 	/* setup f2fs internal modules */
600 	err = build_segment_manager(sbi);
601 	if (err) {
602 		f2fs_msg(sb, KERN_ERR,
603 			"Failed to initialize F2FS segment manager");
604 		goto free_sm;
605 	}
606 	err = build_node_manager(sbi);
607 	if (err) {
608 		f2fs_msg(sb, KERN_ERR,
609 			"Failed to initialize F2FS node manager");
610 		goto free_nm;
611 	}
612 
613 	build_gc_manager(sbi);
614 
615 	/* get an inode for node space */
616 	sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi));
617 	if (IS_ERR(sbi->node_inode)) {
618 		f2fs_msg(sb, KERN_ERR, "Failed to read node inode");
619 		err = PTR_ERR(sbi->node_inode);
620 		goto free_nm;
621 	}
622 
623 	/* if there are nt orphan nodes free them */
624 	err = -EINVAL;
625 	if (recover_orphan_inodes(sbi))
626 		goto free_node_inode;
627 
628 	/* read root inode and dentry */
629 	root = f2fs_iget(sb, F2FS_ROOT_INO(sbi));
630 	if (IS_ERR(root)) {
631 		f2fs_msg(sb, KERN_ERR, "Failed to read root inode");
632 		err = PTR_ERR(root);
633 		goto free_node_inode;
634 	}
635 	if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size)
636 		goto free_root_inode;
637 
638 	sb->s_root = d_make_root(root); /* allocate root dentry */
639 	if (!sb->s_root) {
640 		err = -ENOMEM;
641 		goto free_root_inode;
642 	}
643 
644 	/* recover fsynced data */
645 	if (!test_opt(sbi, DISABLE_ROLL_FORWARD)) {
646 		err = recover_fsync_data(sbi);
647 		if (err) {
648 			f2fs_msg(sb, KERN_ERR, "Failed to recover fsync data");
649 			goto free_root_inode;
650 		}
651 	}
652 
653 	/* After POR, we can run background GC thread */
654 	err = start_gc_thread(sbi);
655 	if (err)
656 		goto fail;
657 
658 	err = f2fs_build_stats(sbi);
659 	if (err)
660 		goto fail;
661 
662 	if (test_opt(sbi, DISCARD)) {
663 		struct request_queue *q = bdev_get_queue(sb->s_bdev);
664 		if (!blk_queue_discard(q))
665 			f2fs_msg(sb, KERN_WARNING,
666 					"mounting with \"discard\" option, but "
667 					"the device does not support discard");
668 	}
669 
670 	return 0;
671 fail:
672 	stop_gc_thread(sbi);
673 free_root_inode:
674 	dput(sb->s_root);
675 	sb->s_root = NULL;
676 free_node_inode:
677 	iput(sbi->node_inode);
678 free_nm:
679 	destroy_node_manager(sbi);
680 free_sm:
681 	destroy_segment_manager(sbi);
682 free_cp:
683 	kfree(sbi->ckpt);
684 free_meta_inode:
685 	make_bad_inode(sbi->meta_inode);
686 	iput(sbi->meta_inode);
687 free_sb_buf:
688 	brelse(raw_super_buf);
689 free_sbi:
690 	kfree(sbi);
691 	return err;
692 }
693 
694 static struct dentry *f2fs_mount(struct file_system_type *fs_type, int flags,
695 			const char *dev_name, void *data)
696 {
697 	return mount_bdev(fs_type, flags, dev_name, data, f2fs_fill_super);
698 }
699 
700 static struct file_system_type f2fs_fs_type = {
701 	.owner		= THIS_MODULE,
702 	.name		= "f2fs",
703 	.mount		= f2fs_mount,
704 	.kill_sb	= kill_block_super,
705 	.fs_flags	= FS_REQUIRES_DEV,
706 };
707 
708 static int __init init_inodecache(void)
709 {
710 	f2fs_inode_cachep = f2fs_kmem_cache_create("f2fs_inode_cache",
711 			sizeof(struct f2fs_inode_info), NULL);
712 	if (f2fs_inode_cachep == NULL)
713 		return -ENOMEM;
714 	return 0;
715 }
716 
717 static void destroy_inodecache(void)
718 {
719 	/*
720 	 * Make sure all delayed rcu free inodes are flushed before we
721 	 * destroy cache.
722 	 */
723 	rcu_barrier();
724 	kmem_cache_destroy(f2fs_inode_cachep);
725 }
726 
727 static int __init init_f2fs_fs(void)
728 {
729 	int err;
730 
731 	err = init_inodecache();
732 	if (err)
733 		goto fail;
734 	err = create_node_manager_caches();
735 	if (err)
736 		goto fail;
737 	err = create_gc_caches();
738 	if (err)
739 		goto fail;
740 	err = create_checkpoint_caches();
741 	if (err)
742 		goto fail;
743 	err = register_filesystem(&f2fs_fs_type);
744 	if (err)
745 		goto fail;
746 	f2fs_create_root_stats();
747 fail:
748 	return err;
749 }
750 
751 static void __exit exit_f2fs_fs(void)
752 {
753 	f2fs_destroy_root_stats();
754 	unregister_filesystem(&f2fs_fs_type);
755 	destroy_checkpoint_caches();
756 	destroy_gc_caches();
757 	destroy_node_manager_caches();
758 	destroy_inodecache();
759 }
760 
761 module_init(init_f2fs_fs)
762 module_exit(exit_f2fs_fs)
763 
764 MODULE_AUTHOR("Samsung Electronics's Praesto Team");
765 MODULE_DESCRIPTION("Flash Friendly File System");
766 MODULE_LICENSE("GPL");
767