xref: /openbmc/linux/fs/erofs/super.c (revision ab92184ff8f12979f3d3dd5ed601ed85770d81ba)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2017-2018 HUAWEI, Inc.
4  *             https://www.huawei.com/
5  */
6 #include <linux/module.h>
7 #include <linux/buffer_head.h>
8 #include <linux/statfs.h>
9 #include <linux/parser.h>
10 #include <linux/seq_file.h>
11 #include <linux/crc32c.h>
12 #include <linux/fs_context.h>
13 #include <linux/fs_parser.h>
14 #include <linux/dax.h>
15 #include "xattr.h"
16 
17 #define CREATE_TRACE_POINTS
18 #include <trace/events/erofs.h>
19 
20 static struct kmem_cache *erofs_inode_cachep __read_mostly;
21 
22 void _erofs_err(struct super_block *sb, const char *function,
23 		const char *fmt, ...)
24 {
25 	struct va_format vaf;
26 	va_list args;
27 
28 	va_start(args, fmt);
29 
30 	vaf.fmt = fmt;
31 	vaf.va = &args;
32 
33 	pr_err("(device %s): %s: %pV", sb->s_id, function, &vaf);
34 	va_end(args);
35 }
36 
37 void _erofs_info(struct super_block *sb, const char *function,
38 		 const char *fmt, ...)
39 {
40 	struct va_format vaf;
41 	va_list args;
42 
43 	va_start(args, fmt);
44 
45 	vaf.fmt = fmt;
46 	vaf.va = &args;
47 
48 	pr_info("(device %s): %pV", sb->s_id, &vaf);
49 	va_end(args);
50 }
51 
52 static int erofs_superblock_csum_verify(struct super_block *sb, void *sbdata)
53 {
54 	struct erofs_super_block *dsb;
55 	u32 expected_crc, crc;
56 
57 	dsb = kmemdup(sbdata + EROFS_SUPER_OFFSET,
58 		      EROFS_BLKSIZ - EROFS_SUPER_OFFSET, GFP_KERNEL);
59 	if (!dsb)
60 		return -ENOMEM;
61 
62 	expected_crc = le32_to_cpu(dsb->checksum);
63 	dsb->checksum = 0;
64 	/* to allow for x86 boot sectors and other oddities. */
65 	crc = crc32c(~0, dsb, EROFS_BLKSIZ - EROFS_SUPER_OFFSET);
66 	kfree(dsb);
67 
68 	if (crc != expected_crc) {
69 		erofs_err(sb, "invalid checksum 0x%08x, 0x%08x expected",
70 			  crc, expected_crc);
71 		return -EBADMSG;
72 	}
73 	return 0;
74 }
75 
76 static void erofs_inode_init_once(void *ptr)
77 {
78 	struct erofs_inode *vi = ptr;
79 
80 	inode_init_once(&vi->vfs_inode);
81 }
82 
83 static struct inode *erofs_alloc_inode(struct super_block *sb)
84 {
85 	struct erofs_inode *vi =
86 		kmem_cache_alloc(erofs_inode_cachep, GFP_KERNEL);
87 
88 	if (!vi)
89 		return NULL;
90 
91 	/* zero out everything except vfs_inode */
92 	memset(vi, 0, offsetof(struct erofs_inode, vfs_inode));
93 	return &vi->vfs_inode;
94 }
95 
96 static void erofs_free_inode(struct inode *inode)
97 {
98 	struct erofs_inode *vi = EROFS_I(inode);
99 
100 	/* be careful of RCU symlink path */
101 	if (inode->i_op == &erofs_fast_symlink_iops)
102 		kfree(inode->i_link);
103 	kfree(vi->xattr_shared_xattrs);
104 
105 	kmem_cache_free(erofs_inode_cachep, vi);
106 }
107 
108 static bool check_layout_compatibility(struct super_block *sb,
109 				       struct erofs_super_block *dsb)
110 {
111 	const unsigned int feature = le32_to_cpu(dsb->feature_incompat);
112 
113 	EROFS_SB(sb)->feature_incompat = feature;
114 
115 	/* check if current kernel meets all mandatory requirements */
116 	if (feature & (~EROFS_ALL_FEATURE_INCOMPAT)) {
117 		erofs_err(sb,
118 			  "unidentified incompatible feature %x, please upgrade kernel version",
119 			   feature & ~EROFS_ALL_FEATURE_INCOMPAT);
120 		return false;
121 	}
122 	return true;
123 }
124 
125 #ifdef CONFIG_EROFS_FS_ZIP
126 /* read variable-sized metadata, offset will be aligned by 4-byte */
127 static void *erofs_read_metadata(struct super_block *sb, struct page **pagep,
128 				 erofs_off_t *offset, int *lengthp)
129 {
130 	struct page *page = *pagep;
131 	u8 *buffer, *ptr;
132 	int len, i, cnt;
133 	erofs_blk_t blk;
134 
135 	*offset = round_up(*offset, 4);
136 	blk = erofs_blknr(*offset);
137 
138 	if (!page || page->index != blk) {
139 		if (page) {
140 			unlock_page(page);
141 			put_page(page);
142 		}
143 		page = erofs_get_meta_page(sb, blk);
144 		if (IS_ERR(page))
145 			goto err_nullpage;
146 	}
147 
148 	ptr = kmap(page);
149 	len = le16_to_cpu(*(__le16 *)&ptr[erofs_blkoff(*offset)]);
150 	if (!len)
151 		len = U16_MAX + 1;
152 	buffer = kmalloc(len, GFP_KERNEL);
153 	if (!buffer) {
154 		buffer = ERR_PTR(-ENOMEM);
155 		goto out;
156 	}
157 	*offset += sizeof(__le16);
158 	*lengthp = len;
159 
160 	for (i = 0; i < len; i += cnt) {
161 		cnt = min(EROFS_BLKSIZ - (int)erofs_blkoff(*offset), len - i);
162 		blk = erofs_blknr(*offset);
163 
164 		if (!page || page->index != blk) {
165 			if (page) {
166 				kunmap(page);
167 				unlock_page(page);
168 				put_page(page);
169 			}
170 			page = erofs_get_meta_page(sb, blk);
171 			if (IS_ERR(page)) {
172 				kfree(buffer);
173 				goto err_nullpage;
174 			}
175 			ptr = kmap(page);
176 		}
177 		memcpy(buffer + i, ptr + erofs_blkoff(*offset), cnt);
178 		*offset += cnt;
179 	}
180 out:
181 	kunmap(page);
182 	*pagep = page;
183 	return buffer;
184 err_nullpage:
185 	*pagep = NULL;
186 	return page;
187 }
188 
189 static int erofs_load_compr_cfgs(struct super_block *sb,
190 				 struct erofs_super_block *dsb)
191 {
192 	struct erofs_sb_info *sbi;
193 	struct page *page;
194 	unsigned int algs, alg;
195 	erofs_off_t offset;
196 	int size, ret;
197 
198 	sbi = EROFS_SB(sb);
199 	sbi->available_compr_algs = le16_to_cpu(dsb->u1.available_compr_algs);
200 
201 	if (sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS) {
202 		erofs_err(sb, "try to load compressed fs with unsupported algorithms %x",
203 			  sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS);
204 		return -EINVAL;
205 	}
206 
207 	offset = EROFS_SUPER_OFFSET + sbi->sb_size;
208 	page = NULL;
209 	alg = 0;
210 	ret = 0;
211 
212 	for (algs = sbi->available_compr_algs; algs; algs >>= 1, ++alg) {
213 		void *data;
214 
215 		if (!(algs & 1))
216 			continue;
217 
218 		data = erofs_read_metadata(sb, &page, &offset, &size);
219 		if (IS_ERR(data)) {
220 			ret = PTR_ERR(data);
221 			goto err;
222 		}
223 
224 		switch (alg) {
225 		case Z_EROFS_COMPRESSION_LZ4:
226 			ret = z_erofs_load_lz4_config(sb, dsb, data, size);
227 			break;
228 		case Z_EROFS_COMPRESSION_LZMA:
229 			ret = z_erofs_load_lzma_config(sb, dsb, data, size);
230 			break;
231 		default:
232 			DBG_BUGON(1);
233 			ret = -EFAULT;
234 		}
235 		kfree(data);
236 		if (ret)
237 			goto err;
238 	}
239 err:
240 	if (page) {
241 		unlock_page(page);
242 		put_page(page);
243 	}
244 	return ret;
245 }
246 #else
247 static int erofs_load_compr_cfgs(struct super_block *sb,
248 				 struct erofs_super_block *dsb)
249 {
250 	if (dsb->u1.available_compr_algs) {
251 		erofs_err(sb, "try to load compressed fs when compression is disabled");
252 		return -EINVAL;
253 	}
254 	return 0;
255 }
256 #endif
257 
258 static int erofs_init_devices(struct super_block *sb,
259 			      struct erofs_super_block *dsb)
260 {
261 	struct erofs_sb_info *sbi = EROFS_SB(sb);
262 	unsigned int ondisk_extradevs;
263 	erofs_off_t pos;
264 	struct page *page = NULL;
265 	struct erofs_device_info *dif;
266 	struct erofs_deviceslot *dis;
267 	void *ptr;
268 	int id, err = 0;
269 
270 	sbi->total_blocks = sbi->primarydevice_blocks;
271 	if (!erofs_sb_has_device_table(sbi))
272 		ondisk_extradevs = 0;
273 	else
274 		ondisk_extradevs = le16_to_cpu(dsb->extra_devices);
275 
276 	if (ondisk_extradevs != sbi->devs->extra_devices) {
277 		erofs_err(sb, "extra devices don't match (ondisk %u, given %u)",
278 			  ondisk_extradevs, sbi->devs->extra_devices);
279 		return -EINVAL;
280 	}
281 	if (!ondisk_extradevs)
282 		return 0;
283 
284 	sbi->device_id_mask = roundup_pow_of_two(ondisk_extradevs + 1) - 1;
285 	pos = le16_to_cpu(dsb->devt_slotoff) * EROFS_DEVT_SLOT_SIZE;
286 	down_read(&sbi->devs->rwsem);
287 	idr_for_each_entry(&sbi->devs->tree, dif, id) {
288 		erofs_blk_t blk = erofs_blknr(pos);
289 		struct block_device *bdev;
290 
291 		if (!page || page->index != blk) {
292 			if (page) {
293 				kunmap(page);
294 				unlock_page(page);
295 				put_page(page);
296 			}
297 
298 			page = erofs_get_meta_page(sb, blk);
299 			if (IS_ERR(page)) {
300 				up_read(&sbi->devs->rwsem);
301 				return PTR_ERR(page);
302 			}
303 			ptr = kmap(page);
304 		}
305 		dis = ptr + erofs_blkoff(pos);
306 
307 		bdev = blkdev_get_by_path(dif->path,
308 					  FMODE_READ | FMODE_EXCL,
309 					  sb->s_type);
310 		if (IS_ERR(bdev)) {
311 			err = PTR_ERR(bdev);
312 			goto err_out;
313 		}
314 		dif->bdev = bdev;
315 		dif->dax_dev = fs_dax_get_by_bdev(bdev);
316 		dif->blocks = le32_to_cpu(dis->blocks);
317 		dif->mapped_blkaddr = le32_to_cpu(dis->mapped_blkaddr);
318 		sbi->total_blocks += dif->blocks;
319 		pos += EROFS_DEVT_SLOT_SIZE;
320 	}
321 err_out:
322 	up_read(&sbi->devs->rwsem);
323 	if (page) {
324 		kunmap(page);
325 		unlock_page(page);
326 		put_page(page);
327 	}
328 	return err;
329 }
330 
331 static int erofs_read_superblock(struct super_block *sb)
332 {
333 	struct erofs_sb_info *sbi;
334 	struct page *page;
335 	struct erofs_super_block *dsb;
336 	unsigned int blkszbits;
337 	void *data;
338 	int ret;
339 
340 	page = read_mapping_page(sb->s_bdev->bd_inode->i_mapping, 0, NULL);
341 	if (IS_ERR(page)) {
342 		erofs_err(sb, "cannot read erofs superblock");
343 		return PTR_ERR(page);
344 	}
345 
346 	sbi = EROFS_SB(sb);
347 
348 	data = kmap(page);
349 	dsb = (struct erofs_super_block *)(data + EROFS_SUPER_OFFSET);
350 
351 	ret = -EINVAL;
352 	if (le32_to_cpu(dsb->magic) != EROFS_SUPER_MAGIC_V1) {
353 		erofs_err(sb, "cannot find valid erofs superblock");
354 		goto out;
355 	}
356 
357 	sbi->feature_compat = le32_to_cpu(dsb->feature_compat);
358 	if (erofs_sb_has_sb_chksum(sbi)) {
359 		ret = erofs_superblock_csum_verify(sb, data);
360 		if (ret)
361 			goto out;
362 	}
363 
364 	ret = -EINVAL;
365 	blkszbits = dsb->blkszbits;
366 	/* 9(512 bytes) + LOG_SECTORS_PER_BLOCK == LOG_BLOCK_SIZE */
367 	if (blkszbits != LOG_BLOCK_SIZE) {
368 		erofs_err(sb, "blkszbits %u isn't supported on this platform",
369 			  blkszbits);
370 		goto out;
371 	}
372 
373 	if (!check_layout_compatibility(sb, dsb))
374 		goto out;
375 
376 	sbi->sb_size = 128 + dsb->sb_extslots * EROFS_SB_EXTSLOT_SIZE;
377 	if (sbi->sb_size > EROFS_BLKSIZ) {
378 		erofs_err(sb, "invalid sb_extslots %u (more than a fs block)",
379 			  sbi->sb_size);
380 		goto out;
381 	}
382 	sbi->primarydevice_blocks = le32_to_cpu(dsb->blocks);
383 	sbi->meta_blkaddr = le32_to_cpu(dsb->meta_blkaddr);
384 #ifdef CONFIG_EROFS_FS_XATTR
385 	sbi->xattr_blkaddr = le32_to_cpu(dsb->xattr_blkaddr);
386 #endif
387 	sbi->islotbits = ilog2(sizeof(struct erofs_inode_compact));
388 	sbi->root_nid = le16_to_cpu(dsb->root_nid);
389 	sbi->inos = le64_to_cpu(dsb->inos);
390 
391 	sbi->build_time = le64_to_cpu(dsb->build_time);
392 	sbi->build_time_nsec = le32_to_cpu(dsb->build_time_nsec);
393 
394 	memcpy(&sb->s_uuid, dsb->uuid, sizeof(dsb->uuid));
395 
396 	ret = strscpy(sbi->volume_name, dsb->volume_name,
397 		      sizeof(dsb->volume_name));
398 	if (ret < 0) {	/* -E2BIG */
399 		erofs_err(sb, "bad volume name without NIL terminator");
400 		ret = -EFSCORRUPTED;
401 		goto out;
402 	}
403 
404 	/* parse on-disk compression configurations */
405 	if (erofs_sb_has_compr_cfgs(sbi))
406 		ret = erofs_load_compr_cfgs(sb, dsb);
407 	else
408 		ret = z_erofs_load_lz4_config(sb, dsb, NULL, 0);
409 	if (ret < 0)
410 		goto out;
411 
412 	/* handle multiple devices */
413 	ret = erofs_init_devices(sb, dsb);
414 
415 	if (erofs_sb_has_ztailpacking(sbi))
416 		erofs_info(sb, "EXPERIMENTAL compressed inline data feature in use. Use at your own risk!");
417 out:
418 	kunmap(page);
419 	put_page(page);
420 	return ret;
421 }
422 
423 /* set up default EROFS parameters */
424 static void erofs_default_options(struct erofs_fs_context *ctx)
425 {
426 #ifdef CONFIG_EROFS_FS_ZIP
427 	ctx->opt.cache_strategy = EROFS_ZIP_CACHE_READAROUND;
428 	ctx->opt.max_sync_decompress_pages = 3;
429 	ctx->opt.sync_decompress = EROFS_SYNC_DECOMPRESS_AUTO;
430 #endif
431 #ifdef CONFIG_EROFS_FS_XATTR
432 	set_opt(&ctx->opt, XATTR_USER);
433 #endif
434 #ifdef CONFIG_EROFS_FS_POSIX_ACL
435 	set_opt(&ctx->opt, POSIX_ACL);
436 #endif
437 }
438 
439 enum {
440 	Opt_user_xattr,
441 	Opt_acl,
442 	Opt_cache_strategy,
443 	Opt_dax,
444 	Opt_dax_enum,
445 	Opt_device,
446 	Opt_err
447 };
448 
449 static const struct constant_table erofs_param_cache_strategy[] = {
450 	{"disabled",	EROFS_ZIP_CACHE_DISABLED},
451 	{"readahead",	EROFS_ZIP_CACHE_READAHEAD},
452 	{"readaround",	EROFS_ZIP_CACHE_READAROUND},
453 	{}
454 };
455 
456 static const struct constant_table erofs_dax_param_enums[] = {
457 	{"always",	EROFS_MOUNT_DAX_ALWAYS},
458 	{"never",	EROFS_MOUNT_DAX_NEVER},
459 	{}
460 };
461 
462 static const struct fs_parameter_spec erofs_fs_parameters[] = {
463 	fsparam_flag_no("user_xattr",	Opt_user_xattr),
464 	fsparam_flag_no("acl",		Opt_acl),
465 	fsparam_enum("cache_strategy",	Opt_cache_strategy,
466 		     erofs_param_cache_strategy),
467 	fsparam_flag("dax",             Opt_dax),
468 	fsparam_enum("dax",		Opt_dax_enum, erofs_dax_param_enums),
469 	fsparam_string("device",	Opt_device),
470 	{}
471 };
472 
473 static bool erofs_fc_set_dax_mode(struct fs_context *fc, unsigned int mode)
474 {
475 #ifdef CONFIG_FS_DAX
476 	struct erofs_fs_context *ctx = fc->fs_private;
477 
478 	switch (mode) {
479 	case EROFS_MOUNT_DAX_ALWAYS:
480 		warnfc(fc, "DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
481 		set_opt(&ctx->opt, DAX_ALWAYS);
482 		clear_opt(&ctx->opt, DAX_NEVER);
483 		return true;
484 	case EROFS_MOUNT_DAX_NEVER:
485 		set_opt(&ctx->opt, DAX_NEVER);
486 		clear_opt(&ctx->opt, DAX_ALWAYS);
487 		return true;
488 	default:
489 		DBG_BUGON(1);
490 		return false;
491 	}
492 #else
493 	errorfc(fc, "dax options not supported");
494 	return false;
495 #endif
496 }
497 
498 static int erofs_fc_parse_param(struct fs_context *fc,
499 				struct fs_parameter *param)
500 {
501 	struct erofs_fs_context *ctx = fc->fs_private;
502 	struct fs_parse_result result;
503 	struct erofs_device_info *dif;
504 	int opt, ret;
505 
506 	opt = fs_parse(fc, erofs_fs_parameters, param, &result);
507 	if (opt < 0)
508 		return opt;
509 
510 	switch (opt) {
511 	case Opt_user_xattr:
512 #ifdef CONFIG_EROFS_FS_XATTR
513 		if (result.boolean)
514 			set_opt(&ctx->opt, XATTR_USER);
515 		else
516 			clear_opt(&ctx->opt, XATTR_USER);
517 #else
518 		errorfc(fc, "{,no}user_xattr options not supported");
519 #endif
520 		break;
521 	case Opt_acl:
522 #ifdef CONFIG_EROFS_FS_POSIX_ACL
523 		if (result.boolean)
524 			set_opt(&ctx->opt, POSIX_ACL);
525 		else
526 			clear_opt(&ctx->opt, POSIX_ACL);
527 #else
528 		errorfc(fc, "{,no}acl options not supported");
529 #endif
530 		break;
531 	case Opt_cache_strategy:
532 #ifdef CONFIG_EROFS_FS_ZIP
533 		ctx->opt.cache_strategy = result.uint_32;
534 #else
535 		errorfc(fc, "compression not supported, cache_strategy ignored");
536 #endif
537 		break;
538 	case Opt_dax:
539 		if (!erofs_fc_set_dax_mode(fc, EROFS_MOUNT_DAX_ALWAYS))
540 			return -EINVAL;
541 		break;
542 	case Opt_dax_enum:
543 		if (!erofs_fc_set_dax_mode(fc, result.uint_32))
544 			return -EINVAL;
545 		break;
546 	case Opt_device:
547 		dif = kzalloc(sizeof(*dif), GFP_KERNEL);
548 		if (!dif)
549 			return -ENOMEM;
550 		dif->path = kstrdup(param->string, GFP_KERNEL);
551 		if (!dif->path) {
552 			kfree(dif);
553 			return -ENOMEM;
554 		}
555 		down_write(&ctx->devs->rwsem);
556 		ret = idr_alloc(&ctx->devs->tree, dif, 0, 0, GFP_KERNEL);
557 		up_write(&ctx->devs->rwsem);
558 		if (ret < 0) {
559 			kfree(dif->path);
560 			kfree(dif);
561 			return ret;
562 		}
563 		++ctx->devs->extra_devices;
564 		break;
565 	default:
566 		return -ENOPARAM;
567 	}
568 	return 0;
569 }
570 
571 #ifdef CONFIG_EROFS_FS_ZIP
572 static const struct address_space_operations managed_cache_aops;
573 
574 static int erofs_managed_cache_releasepage(struct page *page, gfp_t gfp_mask)
575 {
576 	int ret = 1;	/* 0 - busy */
577 	struct address_space *const mapping = page->mapping;
578 
579 	DBG_BUGON(!PageLocked(page));
580 	DBG_BUGON(mapping->a_ops != &managed_cache_aops);
581 
582 	if (PagePrivate(page))
583 		ret = erofs_try_to_free_cached_page(page);
584 
585 	return ret;
586 }
587 
588 static void erofs_managed_cache_invalidatepage(struct page *page,
589 					       unsigned int offset,
590 					       unsigned int length)
591 {
592 	const unsigned int stop = length + offset;
593 
594 	DBG_BUGON(!PageLocked(page));
595 
596 	/* Check for potential overflow in debug mode */
597 	DBG_BUGON(stop > PAGE_SIZE || stop < length);
598 
599 	if (offset == 0 && stop == PAGE_SIZE)
600 		while (!erofs_managed_cache_releasepage(page, GFP_NOFS))
601 			cond_resched();
602 }
603 
604 static const struct address_space_operations managed_cache_aops = {
605 	.releasepage = erofs_managed_cache_releasepage,
606 	.invalidatepage = erofs_managed_cache_invalidatepage,
607 };
608 
609 static int erofs_init_managed_cache(struct super_block *sb)
610 {
611 	struct erofs_sb_info *const sbi = EROFS_SB(sb);
612 	struct inode *const inode = new_inode(sb);
613 
614 	if (!inode)
615 		return -ENOMEM;
616 
617 	set_nlink(inode, 1);
618 	inode->i_size = OFFSET_MAX;
619 
620 	inode->i_mapping->a_ops = &managed_cache_aops;
621 	mapping_set_gfp_mask(inode->i_mapping,
622 			     GFP_NOFS | __GFP_HIGHMEM | __GFP_MOVABLE);
623 	sbi->managed_cache = inode;
624 	return 0;
625 }
626 #else
627 static int erofs_init_managed_cache(struct super_block *sb) { return 0; }
628 #endif
629 
630 static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
631 {
632 	struct inode *inode;
633 	struct erofs_sb_info *sbi;
634 	struct erofs_fs_context *ctx = fc->fs_private;
635 	int err;
636 
637 	sb->s_magic = EROFS_SUPER_MAGIC;
638 
639 	if (!sb_set_blocksize(sb, EROFS_BLKSIZ)) {
640 		erofs_err(sb, "failed to set erofs blksize");
641 		return -EINVAL;
642 	}
643 
644 	sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
645 	if (!sbi)
646 		return -ENOMEM;
647 
648 	sb->s_fs_info = sbi;
649 	sbi->opt = ctx->opt;
650 	sbi->dax_dev = fs_dax_get_by_bdev(sb->s_bdev);
651 	sbi->devs = ctx->devs;
652 	ctx->devs = NULL;
653 
654 	err = erofs_read_superblock(sb);
655 	if (err)
656 		return err;
657 
658 	if (test_opt(&sbi->opt, DAX_ALWAYS) &&
659 	    !dax_supported(sbi->dax_dev, sb->s_bdev, EROFS_BLKSIZ, 0, bdev_nr_sectors(sb->s_bdev))) {
660 		errorfc(fc, "DAX unsupported by block device. Turning off DAX.");
661 		clear_opt(&sbi->opt, DAX_ALWAYS);
662 	}
663 	sb->s_flags |= SB_RDONLY | SB_NOATIME;
664 	sb->s_maxbytes = MAX_LFS_FILESIZE;
665 	sb->s_time_gran = 1;
666 
667 	sb->s_op = &erofs_sops;
668 	sb->s_xattr = erofs_xattr_handlers;
669 
670 	if (test_opt(&sbi->opt, POSIX_ACL))
671 		sb->s_flags |= SB_POSIXACL;
672 	else
673 		sb->s_flags &= ~SB_POSIXACL;
674 
675 #ifdef CONFIG_EROFS_FS_ZIP
676 	xa_init(&sbi->managed_pslots);
677 #endif
678 
679 	/* get the root inode */
680 	inode = erofs_iget(sb, ROOT_NID(sbi), true);
681 	if (IS_ERR(inode))
682 		return PTR_ERR(inode);
683 
684 	if (!S_ISDIR(inode->i_mode)) {
685 		erofs_err(sb, "rootino(nid %llu) is not a directory(i_mode %o)",
686 			  ROOT_NID(sbi), inode->i_mode);
687 		iput(inode);
688 		return -EINVAL;
689 	}
690 
691 	sb->s_root = d_make_root(inode);
692 	if (!sb->s_root)
693 		return -ENOMEM;
694 
695 	erofs_shrinker_register(sb);
696 	/* sb->s_umount is already locked, SB_ACTIVE and SB_BORN are not set */
697 	err = erofs_init_managed_cache(sb);
698 	if (err)
699 		return err;
700 
701 	err = erofs_register_sysfs(sb);
702 	if (err)
703 		return err;
704 
705 	erofs_info(sb, "mounted with root inode @ nid %llu.", ROOT_NID(sbi));
706 	return 0;
707 }
708 
709 static int erofs_fc_get_tree(struct fs_context *fc)
710 {
711 	return get_tree_bdev(fc, erofs_fc_fill_super);
712 }
713 
714 static int erofs_fc_reconfigure(struct fs_context *fc)
715 {
716 	struct super_block *sb = fc->root->d_sb;
717 	struct erofs_sb_info *sbi = EROFS_SB(sb);
718 	struct erofs_fs_context *ctx = fc->fs_private;
719 
720 	DBG_BUGON(!sb_rdonly(sb));
721 
722 	if (test_opt(&ctx->opt, POSIX_ACL))
723 		fc->sb_flags |= SB_POSIXACL;
724 	else
725 		fc->sb_flags &= ~SB_POSIXACL;
726 
727 	sbi->opt = ctx->opt;
728 
729 	fc->sb_flags |= SB_RDONLY;
730 	return 0;
731 }
732 
733 static int erofs_release_device_info(int id, void *ptr, void *data)
734 {
735 	struct erofs_device_info *dif = ptr;
736 
737 	fs_put_dax(dif->dax_dev);
738 	if (dif->bdev)
739 		blkdev_put(dif->bdev, FMODE_READ | FMODE_EXCL);
740 	kfree(dif->path);
741 	kfree(dif);
742 	return 0;
743 }
744 
745 static void erofs_free_dev_context(struct erofs_dev_context *devs)
746 {
747 	if (!devs)
748 		return;
749 	idr_for_each(&devs->tree, &erofs_release_device_info, NULL);
750 	idr_destroy(&devs->tree);
751 	kfree(devs);
752 }
753 
754 static void erofs_fc_free(struct fs_context *fc)
755 {
756 	struct erofs_fs_context *ctx = fc->fs_private;
757 
758 	erofs_free_dev_context(ctx->devs);
759 	kfree(ctx);
760 }
761 
762 static const struct fs_context_operations erofs_context_ops = {
763 	.parse_param	= erofs_fc_parse_param,
764 	.get_tree       = erofs_fc_get_tree,
765 	.reconfigure    = erofs_fc_reconfigure,
766 	.free		= erofs_fc_free,
767 };
768 
769 static int erofs_init_fs_context(struct fs_context *fc)
770 {
771 	struct erofs_fs_context *ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
772 
773 	if (!ctx)
774 		return -ENOMEM;
775 	ctx->devs = kzalloc(sizeof(struct erofs_dev_context), GFP_KERNEL);
776 	if (!ctx->devs) {
777 		kfree(ctx);
778 		return -ENOMEM;
779 	}
780 	fc->fs_private = ctx;
781 
782 	idr_init(&ctx->devs->tree);
783 	init_rwsem(&ctx->devs->rwsem);
784 	erofs_default_options(ctx);
785 	fc->ops = &erofs_context_ops;
786 	return 0;
787 }
788 
789 /*
790  * could be triggered after deactivate_locked_super()
791  * is called, thus including umount and failed to initialize.
792  */
793 static void erofs_kill_sb(struct super_block *sb)
794 {
795 	struct erofs_sb_info *sbi;
796 
797 	WARN_ON(sb->s_magic != EROFS_SUPER_MAGIC);
798 
799 	kill_block_super(sb);
800 
801 	sbi = EROFS_SB(sb);
802 	if (!sbi)
803 		return;
804 
805 	erofs_free_dev_context(sbi->devs);
806 	fs_put_dax(sbi->dax_dev);
807 	kfree(sbi);
808 	sb->s_fs_info = NULL;
809 }
810 
811 /* called when ->s_root is non-NULL */
812 static void erofs_put_super(struct super_block *sb)
813 {
814 	struct erofs_sb_info *const sbi = EROFS_SB(sb);
815 
816 	DBG_BUGON(!sbi);
817 
818 	erofs_unregister_sysfs(sb);
819 	erofs_shrinker_unregister(sb);
820 #ifdef CONFIG_EROFS_FS_ZIP
821 	iput(sbi->managed_cache);
822 	sbi->managed_cache = NULL;
823 #endif
824 }
825 
826 static struct file_system_type erofs_fs_type = {
827 	.owner          = THIS_MODULE,
828 	.name           = "erofs",
829 	.init_fs_context = erofs_init_fs_context,
830 	.kill_sb        = erofs_kill_sb,
831 	.fs_flags       = FS_REQUIRES_DEV,
832 };
833 MODULE_ALIAS_FS("erofs");
834 
835 static int __init erofs_module_init(void)
836 {
837 	int err;
838 
839 	erofs_check_ondisk_layout_definitions();
840 
841 	erofs_inode_cachep = kmem_cache_create("erofs_inode",
842 					       sizeof(struct erofs_inode), 0,
843 					       SLAB_RECLAIM_ACCOUNT,
844 					       erofs_inode_init_once);
845 	if (!erofs_inode_cachep) {
846 		err = -ENOMEM;
847 		goto icache_err;
848 	}
849 
850 	err = erofs_init_shrinker();
851 	if (err)
852 		goto shrinker_err;
853 
854 	err = z_erofs_lzma_init();
855 	if (err)
856 		goto lzma_err;
857 
858 	erofs_pcpubuf_init();
859 	err = z_erofs_init_zip_subsystem();
860 	if (err)
861 		goto zip_err;
862 
863 	err = erofs_init_sysfs();
864 	if (err)
865 		goto sysfs_err;
866 
867 	err = register_filesystem(&erofs_fs_type);
868 	if (err)
869 		goto fs_err;
870 
871 	return 0;
872 
873 fs_err:
874 	erofs_exit_sysfs();
875 sysfs_err:
876 	z_erofs_exit_zip_subsystem();
877 zip_err:
878 	z_erofs_lzma_exit();
879 lzma_err:
880 	erofs_exit_shrinker();
881 shrinker_err:
882 	kmem_cache_destroy(erofs_inode_cachep);
883 icache_err:
884 	return err;
885 }
886 
887 static void __exit erofs_module_exit(void)
888 {
889 	unregister_filesystem(&erofs_fs_type);
890 
891 	/* Ensure all RCU free inodes / pclusters are safe to be destroyed. */
892 	rcu_barrier();
893 
894 	erofs_exit_sysfs();
895 	z_erofs_exit_zip_subsystem();
896 	z_erofs_lzma_exit();
897 	erofs_exit_shrinker();
898 	kmem_cache_destroy(erofs_inode_cachep);
899 	erofs_pcpubuf_exit();
900 }
901 
902 /* get filesystem statistics */
903 static int erofs_statfs(struct dentry *dentry, struct kstatfs *buf)
904 {
905 	struct super_block *sb = dentry->d_sb;
906 	struct erofs_sb_info *sbi = EROFS_SB(sb);
907 	u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
908 
909 	buf->f_type = sb->s_magic;
910 	buf->f_bsize = EROFS_BLKSIZ;
911 	buf->f_blocks = sbi->total_blocks;
912 	buf->f_bfree = buf->f_bavail = 0;
913 
914 	buf->f_files = ULLONG_MAX;
915 	buf->f_ffree = ULLONG_MAX - sbi->inos;
916 
917 	buf->f_namelen = EROFS_NAME_LEN;
918 
919 	buf->f_fsid    = u64_to_fsid(id);
920 	return 0;
921 }
922 
923 static int erofs_show_options(struct seq_file *seq, struct dentry *root)
924 {
925 	struct erofs_sb_info *sbi = EROFS_SB(root->d_sb);
926 	struct erofs_mount_opts *opt = &sbi->opt;
927 
928 #ifdef CONFIG_EROFS_FS_XATTR
929 	if (test_opt(opt, XATTR_USER))
930 		seq_puts(seq, ",user_xattr");
931 	else
932 		seq_puts(seq, ",nouser_xattr");
933 #endif
934 #ifdef CONFIG_EROFS_FS_POSIX_ACL
935 	if (test_opt(opt, POSIX_ACL))
936 		seq_puts(seq, ",acl");
937 	else
938 		seq_puts(seq, ",noacl");
939 #endif
940 #ifdef CONFIG_EROFS_FS_ZIP
941 	if (opt->cache_strategy == EROFS_ZIP_CACHE_DISABLED)
942 		seq_puts(seq, ",cache_strategy=disabled");
943 	else if (opt->cache_strategy == EROFS_ZIP_CACHE_READAHEAD)
944 		seq_puts(seq, ",cache_strategy=readahead");
945 	else if (opt->cache_strategy == EROFS_ZIP_CACHE_READAROUND)
946 		seq_puts(seq, ",cache_strategy=readaround");
947 #endif
948 	if (test_opt(opt, DAX_ALWAYS))
949 		seq_puts(seq, ",dax=always");
950 	if (test_opt(opt, DAX_NEVER))
951 		seq_puts(seq, ",dax=never");
952 	return 0;
953 }
954 
955 const struct super_operations erofs_sops = {
956 	.put_super = erofs_put_super,
957 	.alloc_inode = erofs_alloc_inode,
958 	.free_inode = erofs_free_inode,
959 	.statfs = erofs_statfs,
960 	.show_options = erofs_show_options,
961 };
962 
963 module_init(erofs_module_init);
964 module_exit(erofs_module_exit);
965 
966 MODULE_DESCRIPTION("Enhanced ROM File System");
967 MODULE_AUTHOR("Gao Xiang, Chao Yu, Miao Xie, CONSUMER BG, HUAWEI Inc.");
968 MODULE_LICENSE("GPL");
969