xref: /openbmc/linux/fs/erofs/super.c (revision 236a9bf2)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2017-2018 HUAWEI, Inc.
4  *             https://www.huawei.com/
5  * Copyright (C) 2021, Alibaba Cloud
6  */
7 #include <linux/module.h>
8 #include <linux/statfs.h>
9 #include <linux/parser.h>
10 #include <linux/seq_file.h>
11 #include <linux/crc32c.h>
12 #include <linux/fs_context.h>
13 #include <linux/fs_parser.h>
14 #include <linux/dax.h>
15 #include <linux/exportfs.h>
16 #include "xattr.h"
17 
18 #define CREATE_TRACE_POINTS
19 #include <trace/events/erofs.h>
20 
21 static struct kmem_cache *erofs_inode_cachep __read_mostly;
22 
23 void _erofs_err(struct super_block *sb, const char *func, const char *fmt, ...)
24 {
25 	struct va_format vaf;
26 	va_list args;
27 
28 	va_start(args, fmt);
29 
30 	vaf.fmt = fmt;
31 	vaf.va = &args;
32 
33 	pr_err("(device %s): %s: %pV", sb->s_id, func, &vaf);
34 	va_end(args);
35 }
36 
37 void _erofs_info(struct super_block *sb, const char *func, const char *fmt, ...)
38 {
39 	struct va_format vaf;
40 	va_list args;
41 
42 	va_start(args, fmt);
43 
44 	vaf.fmt = fmt;
45 	vaf.va = &args;
46 
47 	pr_info("(device %s): %pV", sb->s_id, &vaf);
48 	va_end(args);
49 }
50 
51 static int erofs_superblock_csum_verify(struct super_block *sb, void *sbdata)
52 {
53 	size_t len = 1 << EROFS_SB(sb)->blkszbits;
54 	struct erofs_super_block *dsb;
55 	u32 expected_crc, crc;
56 
57 	if (len > EROFS_SUPER_OFFSET)
58 		len -= EROFS_SUPER_OFFSET;
59 
60 	dsb = kmemdup(sbdata + EROFS_SUPER_OFFSET, len, GFP_KERNEL);
61 	if (!dsb)
62 		return -ENOMEM;
63 
64 	expected_crc = le32_to_cpu(dsb->checksum);
65 	dsb->checksum = 0;
66 	/* to allow for x86 boot sectors and other oddities. */
67 	crc = crc32c(~0, dsb, len);
68 	kfree(dsb);
69 
70 	if (crc != expected_crc) {
71 		erofs_err(sb, "invalid checksum 0x%08x, 0x%08x expected",
72 			  crc, expected_crc);
73 		return -EBADMSG;
74 	}
75 	return 0;
76 }
77 
78 static void erofs_inode_init_once(void *ptr)
79 {
80 	struct erofs_inode *vi = ptr;
81 
82 	inode_init_once(&vi->vfs_inode);
83 }
84 
85 static struct inode *erofs_alloc_inode(struct super_block *sb)
86 {
87 	struct erofs_inode *vi =
88 		alloc_inode_sb(sb, erofs_inode_cachep, GFP_KERNEL);
89 
90 	if (!vi)
91 		return NULL;
92 
93 	/* zero out everything except vfs_inode */
94 	memset(vi, 0, offsetof(struct erofs_inode, vfs_inode));
95 	return &vi->vfs_inode;
96 }
97 
98 static void erofs_free_inode(struct inode *inode)
99 {
100 	struct erofs_inode *vi = EROFS_I(inode);
101 
102 	if (inode->i_op == &erofs_fast_symlink_iops)
103 		kfree(inode->i_link);
104 	kfree(vi->xattr_shared_xattrs);
105 	kmem_cache_free(erofs_inode_cachep, vi);
106 }
107 
108 static bool check_layout_compatibility(struct super_block *sb,
109 				       struct erofs_super_block *dsb)
110 {
111 	const unsigned int feature = le32_to_cpu(dsb->feature_incompat);
112 
113 	EROFS_SB(sb)->feature_incompat = feature;
114 
115 	/* check if current kernel meets all mandatory requirements */
116 	if (feature & (~EROFS_ALL_FEATURE_INCOMPAT)) {
117 		erofs_err(sb, "unidentified incompatible feature %x, please upgrade kernel",
118 			   feature & ~EROFS_ALL_FEATURE_INCOMPAT);
119 		return false;
120 	}
121 	return true;
122 }
123 
124 /* read variable-sized metadata, offset will be aligned by 4-byte */
125 void *erofs_read_metadata(struct super_block *sb, struct erofs_buf *buf,
126 			  erofs_off_t *offset, int *lengthp)
127 {
128 	u8 *buffer, *ptr;
129 	int len, i, cnt;
130 
131 	*offset = round_up(*offset, 4);
132 	ptr = erofs_bread(buf, erofs_blknr(sb, *offset), EROFS_KMAP);
133 	if (IS_ERR(ptr))
134 		return ptr;
135 
136 	len = le16_to_cpu(*(__le16 *)&ptr[erofs_blkoff(sb, *offset)]);
137 	if (!len)
138 		len = U16_MAX + 1;
139 	buffer = kmalloc(len, GFP_KERNEL);
140 	if (!buffer)
141 		return ERR_PTR(-ENOMEM);
142 	*offset += sizeof(__le16);
143 	*lengthp = len;
144 
145 	for (i = 0; i < len; i += cnt) {
146 		cnt = min_t(int, sb->s_blocksize - erofs_blkoff(sb, *offset),
147 			    len - i);
148 		ptr = erofs_bread(buf, erofs_blknr(sb, *offset), EROFS_KMAP);
149 		if (IS_ERR(ptr)) {
150 			kfree(buffer);
151 			return ptr;
152 		}
153 		memcpy(buffer + i, ptr + erofs_blkoff(sb, *offset), cnt);
154 		*offset += cnt;
155 	}
156 	return buffer;
157 }
158 
159 #ifndef CONFIG_EROFS_FS_ZIP
160 static int z_erofs_parse_cfgs(struct super_block *sb,
161 			      struct erofs_super_block *dsb)
162 {
163 	if (!dsb->u1.available_compr_algs)
164 		return 0;
165 
166 	erofs_err(sb, "compression disabled, unable to mount compressed EROFS");
167 	return -EOPNOTSUPP;
168 }
169 #endif
170 
171 static int erofs_init_device(struct erofs_buf *buf, struct super_block *sb,
172 			     struct erofs_device_info *dif, erofs_off_t *pos)
173 {
174 	struct erofs_sb_info *sbi = EROFS_SB(sb);
175 	struct erofs_fscache *fscache;
176 	struct erofs_deviceslot *dis;
177 	struct block_device *bdev;
178 	void *ptr;
179 
180 	ptr = erofs_read_metabuf(buf, sb, erofs_blknr(sb, *pos), EROFS_KMAP);
181 	if (IS_ERR(ptr))
182 		return PTR_ERR(ptr);
183 	dis = ptr + erofs_blkoff(sb, *pos);
184 
185 	if (!sbi->devs->flatdev && !dif->path) {
186 		if (!dis->tag[0]) {
187 			erofs_err(sb, "empty device tag @ pos %llu", *pos);
188 			return -EINVAL;
189 		}
190 		dif->path = kmemdup_nul(dis->tag, sizeof(dis->tag), GFP_KERNEL);
191 		if (!dif->path)
192 			return -ENOMEM;
193 	}
194 
195 	if (erofs_is_fscache_mode(sb)) {
196 		fscache = erofs_fscache_register_cookie(sb, dif->path, 0);
197 		if (IS_ERR(fscache))
198 			return PTR_ERR(fscache);
199 		dif->fscache = fscache;
200 	} else if (!sbi->devs->flatdev) {
201 		bdev = blkdev_get_by_path(dif->path, BLK_OPEN_READ, sb->s_type,
202 					  NULL);
203 		if (IS_ERR(bdev))
204 			return PTR_ERR(bdev);
205 		dif->bdev = bdev;
206 		dif->dax_dev = fs_dax_get_by_bdev(bdev, &dif->dax_part_off,
207 						  NULL, NULL);
208 	}
209 
210 	dif->blocks = le32_to_cpu(dis->blocks);
211 	dif->mapped_blkaddr = le32_to_cpu(dis->mapped_blkaddr);
212 	sbi->total_blocks += dif->blocks;
213 	*pos += EROFS_DEVT_SLOT_SIZE;
214 	return 0;
215 }
216 
217 static int erofs_scan_devices(struct super_block *sb,
218 			      struct erofs_super_block *dsb)
219 {
220 	struct erofs_sb_info *sbi = EROFS_SB(sb);
221 	unsigned int ondisk_extradevs;
222 	erofs_off_t pos;
223 	struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
224 	struct erofs_device_info *dif;
225 	int id, err = 0;
226 
227 	sbi->total_blocks = sbi->primarydevice_blocks;
228 	if (!erofs_sb_has_device_table(sbi))
229 		ondisk_extradevs = 0;
230 	else
231 		ondisk_extradevs = le16_to_cpu(dsb->extra_devices);
232 
233 	if (sbi->devs->extra_devices &&
234 	    ondisk_extradevs != sbi->devs->extra_devices) {
235 		erofs_err(sb, "extra devices don't match (ondisk %u, given %u)",
236 			  ondisk_extradevs, sbi->devs->extra_devices);
237 		return -EINVAL;
238 	}
239 	if (!ondisk_extradevs)
240 		return 0;
241 
242 	if (!sbi->devs->extra_devices && !erofs_is_fscache_mode(sb))
243 		sbi->devs->flatdev = true;
244 
245 	sbi->device_id_mask = roundup_pow_of_two(ondisk_extradevs + 1) - 1;
246 	pos = le16_to_cpu(dsb->devt_slotoff) * EROFS_DEVT_SLOT_SIZE;
247 	down_read(&sbi->devs->rwsem);
248 	if (sbi->devs->extra_devices) {
249 		idr_for_each_entry(&sbi->devs->tree, dif, id) {
250 			err = erofs_init_device(&buf, sb, dif, &pos);
251 			if (err)
252 				break;
253 		}
254 	} else {
255 		for (id = 0; id < ondisk_extradevs; id++) {
256 			dif = kzalloc(sizeof(*dif), GFP_KERNEL);
257 			if (!dif) {
258 				err = -ENOMEM;
259 				break;
260 			}
261 
262 			err = idr_alloc(&sbi->devs->tree, dif, 0, 0, GFP_KERNEL);
263 			if (err < 0) {
264 				kfree(dif);
265 				break;
266 			}
267 			++sbi->devs->extra_devices;
268 
269 			err = erofs_init_device(&buf, sb, dif, &pos);
270 			if (err)
271 				break;
272 		}
273 	}
274 	up_read(&sbi->devs->rwsem);
275 	erofs_put_metabuf(&buf);
276 	return err;
277 }
278 
279 static int erofs_read_superblock(struct super_block *sb)
280 {
281 	struct erofs_sb_info *sbi;
282 	struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
283 	struct erofs_super_block *dsb;
284 	void *data;
285 	int ret;
286 
287 	data = erofs_read_metabuf(&buf, sb, 0, EROFS_KMAP);
288 	if (IS_ERR(data)) {
289 		erofs_err(sb, "cannot read erofs superblock");
290 		return PTR_ERR(data);
291 	}
292 
293 	sbi = EROFS_SB(sb);
294 	dsb = (struct erofs_super_block *)(data + EROFS_SUPER_OFFSET);
295 
296 	ret = -EINVAL;
297 	if (le32_to_cpu(dsb->magic) != EROFS_SUPER_MAGIC_V1) {
298 		erofs_err(sb, "cannot find valid erofs superblock");
299 		goto out;
300 	}
301 
302 	sbi->blkszbits  = dsb->blkszbits;
303 	if (sbi->blkszbits < 9 || sbi->blkszbits > PAGE_SHIFT) {
304 		erofs_err(sb, "blkszbits %u isn't supported", sbi->blkszbits);
305 		goto out;
306 	}
307 	if (dsb->dirblkbits) {
308 		erofs_err(sb, "dirblkbits %u isn't supported", dsb->dirblkbits);
309 		goto out;
310 	}
311 
312 	sbi->feature_compat = le32_to_cpu(dsb->feature_compat);
313 	if (erofs_sb_has_sb_chksum(sbi)) {
314 		ret = erofs_superblock_csum_verify(sb, data);
315 		if (ret)
316 			goto out;
317 	}
318 
319 	ret = -EINVAL;
320 	if (!check_layout_compatibility(sb, dsb))
321 		goto out;
322 
323 	sbi->sb_size = 128 + dsb->sb_extslots * EROFS_SB_EXTSLOT_SIZE;
324 	if (sbi->sb_size > PAGE_SIZE - EROFS_SUPER_OFFSET) {
325 		erofs_err(sb, "invalid sb_extslots %u (more than a fs block)",
326 			  sbi->sb_size);
327 		goto out;
328 	}
329 	sbi->primarydevice_blocks = le32_to_cpu(dsb->blocks);
330 	sbi->meta_blkaddr = le32_to_cpu(dsb->meta_blkaddr);
331 #ifdef CONFIG_EROFS_FS_XATTR
332 	sbi->xattr_blkaddr = le32_to_cpu(dsb->xattr_blkaddr);
333 	sbi->xattr_prefix_start = le32_to_cpu(dsb->xattr_prefix_start);
334 	sbi->xattr_prefix_count = dsb->xattr_prefix_count;
335 	sbi->xattr_filter_reserved = dsb->xattr_filter_reserved;
336 #endif
337 	sbi->islotbits = ilog2(sizeof(struct erofs_inode_compact));
338 	sbi->root_nid = le16_to_cpu(dsb->root_nid);
339 	sbi->packed_nid = le64_to_cpu(dsb->packed_nid);
340 	sbi->inos = le64_to_cpu(dsb->inos);
341 
342 	sbi->build_time = le64_to_cpu(dsb->build_time);
343 	sbi->build_time_nsec = le32_to_cpu(dsb->build_time_nsec);
344 
345 	memcpy(&sb->s_uuid, dsb->uuid, sizeof(dsb->uuid));
346 
347 	ret = strscpy(sbi->volume_name, dsb->volume_name,
348 		      sizeof(dsb->volume_name));
349 	if (ret < 0) {	/* -E2BIG */
350 		erofs_err(sb, "bad volume name without NIL terminator");
351 		ret = -EFSCORRUPTED;
352 		goto out;
353 	}
354 
355 	/* parse on-disk compression configurations */
356 	ret = z_erofs_parse_cfgs(sb, dsb);
357 	if (ret < 0)
358 		goto out;
359 
360 	/* handle multiple devices */
361 	ret = erofs_scan_devices(sb, dsb);
362 
363 	if (erofs_is_fscache_mode(sb))
364 		erofs_info(sb, "EXPERIMENTAL fscache-based on-demand read feature in use. Use at your own risk!");
365 out:
366 	erofs_put_metabuf(&buf);
367 	return ret;
368 }
369 
370 static void erofs_default_options(struct erofs_fs_context *ctx)
371 {
372 #ifdef CONFIG_EROFS_FS_ZIP
373 	ctx->opt.cache_strategy = EROFS_ZIP_CACHE_READAROUND;
374 	ctx->opt.max_sync_decompress_pages = 3;
375 	ctx->opt.sync_decompress = EROFS_SYNC_DECOMPRESS_AUTO;
376 #endif
377 #ifdef CONFIG_EROFS_FS_XATTR
378 	set_opt(&ctx->opt, XATTR_USER);
379 #endif
380 #ifdef CONFIG_EROFS_FS_POSIX_ACL
381 	set_opt(&ctx->opt, POSIX_ACL);
382 #endif
383 }
384 
385 enum {
386 	Opt_user_xattr,
387 	Opt_acl,
388 	Opt_cache_strategy,
389 	Opt_dax,
390 	Opt_dax_enum,
391 	Opt_device,
392 	Opt_fsid,
393 	Opt_domain_id,
394 	Opt_err
395 };
396 
397 static const struct constant_table erofs_param_cache_strategy[] = {
398 	{"disabled",	EROFS_ZIP_CACHE_DISABLED},
399 	{"readahead",	EROFS_ZIP_CACHE_READAHEAD},
400 	{"readaround",	EROFS_ZIP_CACHE_READAROUND},
401 	{}
402 };
403 
404 static const struct constant_table erofs_dax_param_enums[] = {
405 	{"always",	EROFS_MOUNT_DAX_ALWAYS},
406 	{"never",	EROFS_MOUNT_DAX_NEVER},
407 	{}
408 };
409 
410 static const struct fs_parameter_spec erofs_fs_parameters[] = {
411 	fsparam_flag_no("user_xattr",	Opt_user_xattr),
412 	fsparam_flag_no("acl",		Opt_acl),
413 	fsparam_enum("cache_strategy",	Opt_cache_strategy,
414 		     erofs_param_cache_strategy),
415 	fsparam_flag("dax",             Opt_dax),
416 	fsparam_enum("dax",		Opt_dax_enum, erofs_dax_param_enums),
417 	fsparam_string("device",	Opt_device),
418 	fsparam_string("fsid",		Opt_fsid),
419 	fsparam_string("domain_id",	Opt_domain_id),
420 	{}
421 };
422 
423 static bool erofs_fc_set_dax_mode(struct fs_context *fc, unsigned int mode)
424 {
425 #ifdef CONFIG_FS_DAX
426 	struct erofs_fs_context *ctx = fc->fs_private;
427 
428 	switch (mode) {
429 	case EROFS_MOUNT_DAX_ALWAYS:
430 		warnfc(fc, "DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
431 		set_opt(&ctx->opt, DAX_ALWAYS);
432 		clear_opt(&ctx->opt, DAX_NEVER);
433 		return true;
434 	case EROFS_MOUNT_DAX_NEVER:
435 		set_opt(&ctx->opt, DAX_NEVER);
436 		clear_opt(&ctx->opt, DAX_ALWAYS);
437 		return true;
438 	default:
439 		DBG_BUGON(1);
440 		return false;
441 	}
442 #else
443 	errorfc(fc, "dax options not supported");
444 	return false;
445 #endif
446 }
447 
448 static int erofs_fc_parse_param(struct fs_context *fc,
449 				struct fs_parameter *param)
450 {
451 	struct erofs_fs_context *ctx = fc->fs_private;
452 	struct fs_parse_result result;
453 	struct erofs_device_info *dif;
454 	int opt, ret;
455 
456 	opt = fs_parse(fc, erofs_fs_parameters, param, &result);
457 	if (opt < 0)
458 		return opt;
459 
460 	switch (opt) {
461 	case Opt_user_xattr:
462 #ifdef CONFIG_EROFS_FS_XATTR
463 		if (result.boolean)
464 			set_opt(&ctx->opt, XATTR_USER);
465 		else
466 			clear_opt(&ctx->opt, XATTR_USER);
467 #else
468 		errorfc(fc, "{,no}user_xattr options not supported");
469 #endif
470 		break;
471 	case Opt_acl:
472 #ifdef CONFIG_EROFS_FS_POSIX_ACL
473 		if (result.boolean)
474 			set_opt(&ctx->opt, POSIX_ACL);
475 		else
476 			clear_opt(&ctx->opt, POSIX_ACL);
477 #else
478 		errorfc(fc, "{,no}acl options not supported");
479 #endif
480 		break;
481 	case Opt_cache_strategy:
482 #ifdef CONFIG_EROFS_FS_ZIP
483 		ctx->opt.cache_strategy = result.uint_32;
484 #else
485 		errorfc(fc, "compression not supported, cache_strategy ignored");
486 #endif
487 		break;
488 	case Opt_dax:
489 		if (!erofs_fc_set_dax_mode(fc, EROFS_MOUNT_DAX_ALWAYS))
490 			return -EINVAL;
491 		break;
492 	case Opt_dax_enum:
493 		if (!erofs_fc_set_dax_mode(fc, result.uint_32))
494 			return -EINVAL;
495 		break;
496 	case Opt_device:
497 		dif = kzalloc(sizeof(*dif), GFP_KERNEL);
498 		if (!dif)
499 			return -ENOMEM;
500 		dif->path = kstrdup(param->string, GFP_KERNEL);
501 		if (!dif->path) {
502 			kfree(dif);
503 			return -ENOMEM;
504 		}
505 		down_write(&ctx->devs->rwsem);
506 		ret = idr_alloc(&ctx->devs->tree, dif, 0, 0, GFP_KERNEL);
507 		up_write(&ctx->devs->rwsem);
508 		if (ret < 0) {
509 			kfree(dif->path);
510 			kfree(dif);
511 			return ret;
512 		}
513 		++ctx->devs->extra_devices;
514 		break;
515 #ifdef CONFIG_EROFS_FS_ONDEMAND
516 	case Opt_fsid:
517 		kfree(ctx->fsid);
518 		ctx->fsid = kstrdup(param->string, GFP_KERNEL);
519 		if (!ctx->fsid)
520 			return -ENOMEM;
521 		break;
522 	case Opt_domain_id:
523 		kfree(ctx->domain_id);
524 		ctx->domain_id = kstrdup(param->string, GFP_KERNEL);
525 		if (!ctx->domain_id)
526 			return -ENOMEM;
527 		break;
528 #else
529 	case Opt_fsid:
530 	case Opt_domain_id:
531 		errorfc(fc, "%s option not supported", erofs_fs_parameters[opt].name);
532 		break;
533 #endif
534 	default:
535 		return -ENOPARAM;
536 	}
537 	return 0;
538 }
539 
540 static struct inode *erofs_nfs_get_inode(struct super_block *sb,
541 					 u64 ino, u32 generation)
542 {
543 	return erofs_iget(sb, ino);
544 }
545 
546 static struct dentry *erofs_fh_to_dentry(struct super_block *sb,
547 		struct fid *fid, int fh_len, int fh_type)
548 {
549 	return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
550 				    erofs_nfs_get_inode);
551 }
552 
553 static struct dentry *erofs_fh_to_parent(struct super_block *sb,
554 		struct fid *fid, int fh_len, int fh_type)
555 {
556 	return generic_fh_to_parent(sb, fid, fh_len, fh_type,
557 				    erofs_nfs_get_inode);
558 }
559 
560 static struct dentry *erofs_get_parent(struct dentry *child)
561 {
562 	erofs_nid_t nid;
563 	unsigned int d_type;
564 	int err;
565 
566 	err = erofs_namei(d_inode(child), &dotdot_name, &nid, &d_type);
567 	if (err)
568 		return ERR_PTR(err);
569 	return d_obtain_alias(erofs_iget(child->d_sb, nid));
570 }
571 
572 static const struct export_operations erofs_export_ops = {
573 	.fh_to_dentry = erofs_fh_to_dentry,
574 	.fh_to_parent = erofs_fh_to_parent,
575 	.get_parent = erofs_get_parent,
576 };
577 
578 static int erofs_fc_fill_pseudo_super(struct super_block *sb, struct fs_context *fc)
579 {
580 	static const struct tree_descr empty_descr = {""};
581 
582 	return simple_fill_super(sb, EROFS_SUPER_MAGIC, &empty_descr);
583 }
584 
585 static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
586 {
587 	struct inode *inode;
588 	struct erofs_sb_info *sbi;
589 	struct erofs_fs_context *ctx = fc->fs_private;
590 	int err;
591 
592 	sb->s_magic = EROFS_SUPER_MAGIC;
593 	sb->s_flags |= SB_RDONLY | SB_NOATIME;
594 	sb->s_maxbytes = MAX_LFS_FILESIZE;
595 	sb->s_op = &erofs_sops;
596 
597 	sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
598 	if (!sbi)
599 		return -ENOMEM;
600 
601 	sb->s_fs_info = sbi;
602 	sbi->opt = ctx->opt;
603 	sbi->devs = ctx->devs;
604 	ctx->devs = NULL;
605 	sbi->fsid = ctx->fsid;
606 	ctx->fsid = NULL;
607 	sbi->domain_id = ctx->domain_id;
608 	ctx->domain_id = NULL;
609 
610 	sbi->blkszbits = PAGE_SHIFT;
611 	if (erofs_is_fscache_mode(sb)) {
612 		sb->s_blocksize = PAGE_SIZE;
613 		sb->s_blocksize_bits = PAGE_SHIFT;
614 
615 		err = erofs_fscache_register_fs(sb);
616 		if (err)
617 			return err;
618 
619 		err = super_setup_bdi(sb);
620 		if (err)
621 			return err;
622 	} else {
623 		if (!sb_set_blocksize(sb, PAGE_SIZE)) {
624 			errorfc(fc, "failed to set initial blksize");
625 			return -EINVAL;
626 		}
627 
628 		sbi->dax_dev = fs_dax_get_by_bdev(sb->s_bdev,
629 						  &sbi->dax_part_off,
630 						  NULL, NULL);
631 	}
632 
633 	err = erofs_read_superblock(sb);
634 	if (err)
635 		return err;
636 
637 	if (sb->s_blocksize_bits != sbi->blkszbits) {
638 		if (erofs_is_fscache_mode(sb)) {
639 			errorfc(fc, "unsupported blksize for fscache mode");
640 			return -EINVAL;
641 		}
642 		if (!sb_set_blocksize(sb, 1 << sbi->blkszbits)) {
643 			errorfc(fc, "failed to set erofs blksize");
644 			return -EINVAL;
645 		}
646 	}
647 
648 	if (test_opt(&sbi->opt, DAX_ALWAYS)) {
649 		if (!sbi->dax_dev) {
650 			errorfc(fc, "DAX unsupported by block device. Turning off DAX.");
651 			clear_opt(&sbi->opt, DAX_ALWAYS);
652 		} else if (sbi->blkszbits != PAGE_SHIFT) {
653 			errorfc(fc, "unsupported blocksize for DAX");
654 			clear_opt(&sbi->opt, DAX_ALWAYS);
655 		}
656 	}
657 
658 	sb->s_time_gran = 1;
659 	sb->s_xattr = erofs_xattr_handlers;
660 	sb->s_export_op = &erofs_export_ops;
661 
662 	if (test_opt(&sbi->opt, POSIX_ACL))
663 		sb->s_flags |= SB_POSIXACL;
664 	else
665 		sb->s_flags &= ~SB_POSIXACL;
666 
667 #ifdef CONFIG_EROFS_FS_ZIP
668 	xa_init(&sbi->managed_pslots);
669 #endif
670 
671 	inode = erofs_iget(sb, ROOT_NID(sbi));
672 	if (IS_ERR(inode))
673 		return PTR_ERR(inode);
674 
675 	if (!S_ISDIR(inode->i_mode)) {
676 		erofs_err(sb, "rootino(nid %llu) is not a directory(i_mode %o)",
677 			  ROOT_NID(sbi), inode->i_mode);
678 		iput(inode);
679 		return -EINVAL;
680 	}
681 
682 	sb->s_root = d_make_root(inode);
683 	if (!sb->s_root)
684 		return -ENOMEM;
685 
686 	erofs_shrinker_register(sb);
687 	if (erofs_sb_has_fragments(sbi) && sbi->packed_nid) {
688 		sbi->packed_inode = erofs_iget(sb, sbi->packed_nid);
689 		if (IS_ERR(sbi->packed_inode)) {
690 			err = PTR_ERR(sbi->packed_inode);
691 			sbi->packed_inode = NULL;
692 			return err;
693 		}
694 	}
695 	err = erofs_init_managed_cache(sb);
696 	if (err)
697 		return err;
698 
699 	err = erofs_xattr_prefixes_init(sb);
700 	if (err)
701 		return err;
702 
703 	err = erofs_register_sysfs(sb);
704 	if (err)
705 		return err;
706 
707 	erofs_info(sb, "mounted with root inode @ nid %llu.", ROOT_NID(sbi));
708 	return 0;
709 }
710 
711 static int erofs_fc_anon_get_tree(struct fs_context *fc)
712 {
713 	return get_tree_nodev(fc, erofs_fc_fill_pseudo_super);
714 }
715 
716 static int erofs_fc_get_tree(struct fs_context *fc)
717 {
718 	struct erofs_fs_context *ctx = fc->fs_private;
719 
720 	if (IS_ENABLED(CONFIG_EROFS_FS_ONDEMAND) && ctx->fsid)
721 		return get_tree_nodev(fc, erofs_fc_fill_super);
722 
723 	return get_tree_bdev(fc, erofs_fc_fill_super);
724 }
725 
726 static int erofs_fc_reconfigure(struct fs_context *fc)
727 {
728 	struct super_block *sb = fc->root->d_sb;
729 	struct erofs_sb_info *sbi = EROFS_SB(sb);
730 	struct erofs_fs_context *ctx = fc->fs_private;
731 
732 	DBG_BUGON(!sb_rdonly(sb));
733 
734 	if (ctx->fsid || ctx->domain_id)
735 		erofs_info(sb, "ignoring reconfiguration for fsid|domain_id.");
736 
737 	if (test_opt(&ctx->opt, POSIX_ACL))
738 		fc->sb_flags |= SB_POSIXACL;
739 	else
740 		fc->sb_flags &= ~SB_POSIXACL;
741 
742 	sbi->opt = ctx->opt;
743 
744 	fc->sb_flags |= SB_RDONLY;
745 	return 0;
746 }
747 
748 static int erofs_release_device_info(int id, void *ptr, void *data)
749 {
750 	struct erofs_device_info *dif = ptr;
751 
752 	fs_put_dax(dif->dax_dev, NULL);
753 	if (dif->bdev)
754 		blkdev_put(dif->bdev, &erofs_fs_type);
755 	erofs_fscache_unregister_cookie(dif->fscache);
756 	dif->fscache = NULL;
757 	kfree(dif->path);
758 	kfree(dif);
759 	return 0;
760 }
761 
762 static void erofs_free_dev_context(struct erofs_dev_context *devs)
763 {
764 	if (!devs)
765 		return;
766 	idr_for_each(&devs->tree, &erofs_release_device_info, NULL);
767 	idr_destroy(&devs->tree);
768 	kfree(devs);
769 }
770 
771 static void erofs_fc_free(struct fs_context *fc)
772 {
773 	struct erofs_fs_context *ctx = fc->fs_private;
774 
775 	erofs_free_dev_context(ctx->devs);
776 	kfree(ctx->fsid);
777 	kfree(ctx->domain_id);
778 	kfree(ctx);
779 }
780 
781 static const struct fs_context_operations erofs_context_ops = {
782 	.parse_param	= erofs_fc_parse_param,
783 	.get_tree       = erofs_fc_get_tree,
784 	.reconfigure    = erofs_fc_reconfigure,
785 	.free		= erofs_fc_free,
786 };
787 
788 static const struct fs_context_operations erofs_anon_context_ops = {
789 	.get_tree       = erofs_fc_anon_get_tree,
790 };
791 
792 static int erofs_init_fs_context(struct fs_context *fc)
793 {
794 	struct erofs_fs_context *ctx;
795 
796 	/* pseudo mount for anon inodes */
797 	if (fc->sb_flags & SB_KERNMOUNT) {
798 		fc->ops = &erofs_anon_context_ops;
799 		return 0;
800 	}
801 
802 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
803 	if (!ctx)
804 		return -ENOMEM;
805 	ctx->devs = kzalloc(sizeof(struct erofs_dev_context), GFP_KERNEL);
806 	if (!ctx->devs) {
807 		kfree(ctx);
808 		return -ENOMEM;
809 	}
810 	fc->fs_private = ctx;
811 
812 	idr_init(&ctx->devs->tree);
813 	init_rwsem(&ctx->devs->rwsem);
814 	erofs_default_options(ctx);
815 	fc->ops = &erofs_context_ops;
816 	return 0;
817 }
818 
819 static void erofs_kill_sb(struct super_block *sb)
820 {
821 	struct erofs_sb_info *sbi;
822 
823 	/* pseudo mount for anon inodes */
824 	if (sb->s_flags & SB_KERNMOUNT) {
825 		kill_anon_super(sb);
826 		return;
827 	}
828 
829 	if (erofs_is_fscache_mode(sb))
830 		kill_anon_super(sb);
831 	else
832 		kill_block_super(sb);
833 
834 	sbi = EROFS_SB(sb);
835 	if (!sbi)
836 		return;
837 
838 	erofs_free_dev_context(sbi->devs);
839 	fs_put_dax(sbi->dax_dev, NULL);
840 	erofs_fscache_unregister_fs(sb);
841 	kfree(sbi->fsid);
842 	kfree(sbi->domain_id);
843 	kfree(sbi);
844 	sb->s_fs_info = NULL;
845 }
846 
847 static void erofs_put_super(struct super_block *sb)
848 {
849 	struct erofs_sb_info *const sbi = EROFS_SB(sb);
850 
851 	DBG_BUGON(!sbi);
852 
853 	erofs_unregister_sysfs(sb);
854 	erofs_shrinker_unregister(sb);
855 	erofs_xattr_prefixes_cleanup(sb);
856 #ifdef CONFIG_EROFS_FS_ZIP
857 	iput(sbi->managed_cache);
858 	sbi->managed_cache = NULL;
859 #endif
860 	iput(sbi->packed_inode);
861 	sbi->packed_inode = NULL;
862 	erofs_free_dev_context(sbi->devs);
863 	sbi->devs = NULL;
864 	erofs_fscache_unregister_fs(sb);
865 }
866 
867 struct file_system_type erofs_fs_type = {
868 	.owner          = THIS_MODULE,
869 	.name           = "erofs",
870 	.init_fs_context = erofs_init_fs_context,
871 	.kill_sb        = erofs_kill_sb,
872 	.fs_flags       = FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
873 };
874 MODULE_ALIAS_FS("erofs");
875 
876 static int __init erofs_module_init(void)
877 {
878 	int err;
879 
880 	erofs_check_ondisk_layout_definitions();
881 
882 	erofs_inode_cachep = kmem_cache_create("erofs_inode",
883 			sizeof(struct erofs_inode), 0,
884 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD | SLAB_ACCOUNT,
885 			erofs_inode_init_once);
886 	if (!erofs_inode_cachep)
887 		return -ENOMEM;
888 
889 	err = erofs_init_shrinker();
890 	if (err)
891 		goto shrinker_err;
892 
893 	err = z_erofs_lzma_init();
894 	if (err)
895 		goto lzma_err;
896 
897 	err = z_erofs_deflate_init();
898 	if (err)
899 		goto deflate_err;
900 
901 	erofs_pcpubuf_init();
902 	err = z_erofs_init_zip_subsystem();
903 	if (err)
904 		goto zip_err;
905 
906 	err = erofs_init_sysfs();
907 	if (err)
908 		goto sysfs_err;
909 
910 	err = register_filesystem(&erofs_fs_type);
911 	if (err)
912 		goto fs_err;
913 
914 	return 0;
915 
916 fs_err:
917 	erofs_exit_sysfs();
918 sysfs_err:
919 	z_erofs_exit_zip_subsystem();
920 zip_err:
921 	z_erofs_deflate_exit();
922 deflate_err:
923 	z_erofs_lzma_exit();
924 lzma_err:
925 	erofs_exit_shrinker();
926 shrinker_err:
927 	kmem_cache_destroy(erofs_inode_cachep);
928 	return err;
929 }
930 
931 static void __exit erofs_module_exit(void)
932 {
933 	unregister_filesystem(&erofs_fs_type);
934 
935 	/* Ensure all RCU free inodes / pclusters are safe to be destroyed. */
936 	rcu_barrier();
937 
938 	erofs_exit_sysfs();
939 	z_erofs_exit_zip_subsystem();
940 	z_erofs_deflate_exit();
941 	z_erofs_lzma_exit();
942 	erofs_exit_shrinker();
943 	kmem_cache_destroy(erofs_inode_cachep);
944 	erofs_pcpubuf_exit();
945 }
946 
947 static int erofs_statfs(struct dentry *dentry, struct kstatfs *buf)
948 {
949 	struct super_block *sb = dentry->d_sb;
950 	struct erofs_sb_info *sbi = EROFS_SB(sb);
951 	u64 id = 0;
952 
953 	if (!erofs_is_fscache_mode(sb))
954 		id = huge_encode_dev(sb->s_bdev->bd_dev);
955 
956 	buf->f_type = sb->s_magic;
957 	buf->f_bsize = sb->s_blocksize;
958 	buf->f_blocks = sbi->total_blocks;
959 	buf->f_bfree = buf->f_bavail = 0;
960 
961 	buf->f_files = ULLONG_MAX;
962 	buf->f_ffree = ULLONG_MAX - sbi->inos;
963 
964 	buf->f_namelen = EROFS_NAME_LEN;
965 
966 	buf->f_fsid    = u64_to_fsid(id);
967 	return 0;
968 }
969 
970 static int erofs_show_options(struct seq_file *seq, struct dentry *root)
971 {
972 	struct erofs_sb_info *sbi = EROFS_SB(root->d_sb);
973 	struct erofs_mount_opts *opt = &sbi->opt;
974 
975 #ifdef CONFIG_EROFS_FS_XATTR
976 	if (test_opt(opt, XATTR_USER))
977 		seq_puts(seq, ",user_xattr");
978 	else
979 		seq_puts(seq, ",nouser_xattr");
980 #endif
981 #ifdef CONFIG_EROFS_FS_POSIX_ACL
982 	if (test_opt(opt, POSIX_ACL))
983 		seq_puts(seq, ",acl");
984 	else
985 		seq_puts(seq, ",noacl");
986 #endif
987 #ifdef CONFIG_EROFS_FS_ZIP
988 	if (opt->cache_strategy == EROFS_ZIP_CACHE_DISABLED)
989 		seq_puts(seq, ",cache_strategy=disabled");
990 	else if (opt->cache_strategy == EROFS_ZIP_CACHE_READAHEAD)
991 		seq_puts(seq, ",cache_strategy=readahead");
992 	else if (opt->cache_strategy == EROFS_ZIP_CACHE_READAROUND)
993 		seq_puts(seq, ",cache_strategy=readaround");
994 #endif
995 	if (test_opt(opt, DAX_ALWAYS))
996 		seq_puts(seq, ",dax=always");
997 	if (test_opt(opt, DAX_NEVER))
998 		seq_puts(seq, ",dax=never");
999 #ifdef CONFIG_EROFS_FS_ONDEMAND
1000 	if (sbi->fsid)
1001 		seq_printf(seq, ",fsid=%s", sbi->fsid);
1002 	if (sbi->domain_id)
1003 		seq_printf(seq, ",domain_id=%s", sbi->domain_id);
1004 #endif
1005 	return 0;
1006 }
1007 
1008 const struct super_operations erofs_sops = {
1009 	.put_super = erofs_put_super,
1010 	.alloc_inode = erofs_alloc_inode,
1011 	.free_inode = erofs_free_inode,
1012 	.statfs = erofs_statfs,
1013 	.show_options = erofs_show_options,
1014 };
1015 
1016 module_init(erofs_module_init);
1017 module_exit(erofs_module_exit);
1018 
1019 MODULE_DESCRIPTION("Enhanced ROM File System");
1020 MODULE_AUTHOR("Gao Xiang, Chao Yu, Miao Xie, CONSUMER BG, HUAWEI Inc.");
1021 MODULE_LICENSE("GPL");
1022