xref: /openbmc/linux/fs/erofs/super.c (revision 39f555fb)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2017-2018 HUAWEI, Inc.
4  *             https://www.huawei.com/
5  * Copyright (C) 2021, Alibaba Cloud
6  */
7 #include <linux/module.h>
8 #include <linux/statfs.h>
9 #include <linux/parser.h>
10 #include <linux/seq_file.h>
11 #include <linux/crc32c.h>
12 #include <linux/fs_context.h>
13 #include <linux/fs_parser.h>
14 #include <linux/dax.h>
15 #include <linux/exportfs.h>
16 #include "xattr.h"
17 
18 #define CREATE_TRACE_POINTS
19 #include <trace/events/erofs.h>
20 
21 static struct kmem_cache *erofs_inode_cachep __read_mostly;
22 
23 void _erofs_err(struct super_block *sb, const char *func, const char *fmt, ...)
24 {
25 	struct va_format vaf;
26 	va_list args;
27 
28 	va_start(args, fmt);
29 
30 	vaf.fmt = fmt;
31 	vaf.va = &args;
32 
33 	pr_err("(device %s): %s: %pV", sb->s_id, func, &vaf);
34 	va_end(args);
35 }
36 
37 void _erofs_info(struct super_block *sb, const char *func, const char *fmt, ...)
38 {
39 	struct va_format vaf;
40 	va_list args;
41 
42 	va_start(args, fmt);
43 
44 	vaf.fmt = fmt;
45 	vaf.va = &args;
46 
47 	pr_info("(device %s): %pV", sb->s_id, &vaf);
48 	va_end(args);
49 }
50 
51 static int erofs_superblock_csum_verify(struct super_block *sb, void *sbdata)
52 {
53 	size_t len = 1 << EROFS_SB(sb)->blkszbits;
54 	struct erofs_super_block *dsb;
55 	u32 expected_crc, crc;
56 
57 	if (len > EROFS_SUPER_OFFSET)
58 		len -= EROFS_SUPER_OFFSET;
59 
60 	dsb = kmemdup(sbdata + EROFS_SUPER_OFFSET, len, GFP_KERNEL);
61 	if (!dsb)
62 		return -ENOMEM;
63 
64 	expected_crc = le32_to_cpu(dsb->checksum);
65 	dsb->checksum = 0;
66 	/* to allow for x86 boot sectors and other oddities. */
67 	crc = crc32c(~0, dsb, len);
68 	kfree(dsb);
69 
70 	if (crc != expected_crc) {
71 		erofs_err(sb, "invalid checksum 0x%08x, 0x%08x expected",
72 			  crc, expected_crc);
73 		return -EBADMSG;
74 	}
75 	return 0;
76 }
77 
78 static void erofs_inode_init_once(void *ptr)
79 {
80 	struct erofs_inode *vi = ptr;
81 
82 	inode_init_once(&vi->vfs_inode);
83 }
84 
85 static struct inode *erofs_alloc_inode(struct super_block *sb)
86 {
87 	struct erofs_inode *vi =
88 		alloc_inode_sb(sb, erofs_inode_cachep, GFP_KERNEL);
89 
90 	if (!vi)
91 		return NULL;
92 
93 	/* zero out everything except vfs_inode */
94 	memset(vi, 0, offsetof(struct erofs_inode, vfs_inode));
95 	return &vi->vfs_inode;
96 }
97 
98 static void erofs_free_inode(struct inode *inode)
99 {
100 	struct erofs_inode *vi = EROFS_I(inode);
101 
102 	if (inode->i_op == &erofs_fast_symlink_iops)
103 		kfree(inode->i_link);
104 	kfree(vi->xattr_shared_xattrs);
105 	kmem_cache_free(erofs_inode_cachep, vi);
106 }
107 
108 static bool check_layout_compatibility(struct super_block *sb,
109 				       struct erofs_super_block *dsb)
110 {
111 	const unsigned int feature = le32_to_cpu(dsb->feature_incompat);
112 
113 	EROFS_SB(sb)->feature_incompat = feature;
114 
115 	/* check if current kernel meets all mandatory requirements */
116 	if (feature & (~EROFS_ALL_FEATURE_INCOMPAT)) {
117 		erofs_err(sb, "unidentified incompatible feature %x, please upgrade kernel",
118 			   feature & ~EROFS_ALL_FEATURE_INCOMPAT);
119 		return false;
120 	}
121 	return true;
122 }
123 
124 /* read variable-sized metadata, offset will be aligned by 4-byte */
125 void *erofs_read_metadata(struct super_block *sb, struct erofs_buf *buf,
126 			  erofs_off_t *offset, int *lengthp)
127 {
128 	u8 *buffer, *ptr;
129 	int len, i, cnt;
130 
131 	*offset = round_up(*offset, 4);
132 	ptr = erofs_bread(buf, erofs_blknr(sb, *offset), EROFS_KMAP);
133 	if (IS_ERR(ptr))
134 		return ptr;
135 
136 	len = le16_to_cpu(*(__le16 *)&ptr[erofs_blkoff(sb, *offset)]);
137 	if (!len)
138 		len = U16_MAX + 1;
139 	buffer = kmalloc(len, GFP_KERNEL);
140 	if (!buffer)
141 		return ERR_PTR(-ENOMEM);
142 	*offset += sizeof(__le16);
143 	*lengthp = len;
144 
145 	for (i = 0; i < len; i += cnt) {
146 		cnt = min_t(int, sb->s_blocksize - erofs_blkoff(sb, *offset),
147 			    len - i);
148 		ptr = erofs_bread(buf, erofs_blknr(sb, *offset), EROFS_KMAP);
149 		if (IS_ERR(ptr)) {
150 			kfree(buffer);
151 			return ptr;
152 		}
153 		memcpy(buffer + i, ptr + erofs_blkoff(sb, *offset), cnt);
154 		*offset += cnt;
155 	}
156 	return buffer;
157 }
158 
159 #ifdef CONFIG_EROFS_FS_ZIP
160 static int erofs_load_compr_cfgs(struct super_block *sb,
161 				 struct erofs_super_block *dsb)
162 {
163 	struct erofs_sb_info *sbi = EROFS_SB(sb);
164 	struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
165 	unsigned int algs, alg;
166 	erofs_off_t offset;
167 	int size, ret = 0;
168 
169 	sbi->available_compr_algs = le16_to_cpu(dsb->u1.available_compr_algs);
170 	if (sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS) {
171 		erofs_err(sb, "try to load compressed fs with unsupported algorithms %x",
172 			  sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS);
173 		return -EINVAL;
174 	}
175 
176 	erofs_init_metabuf(&buf, sb);
177 	offset = EROFS_SUPER_OFFSET + sbi->sb_size;
178 	alg = 0;
179 	for (algs = sbi->available_compr_algs; algs; algs >>= 1, ++alg) {
180 		void *data;
181 
182 		if (!(algs & 1))
183 			continue;
184 
185 		data = erofs_read_metadata(sb, &buf, &offset, &size);
186 		if (IS_ERR(data)) {
187 			ret = PTR_ERR(data);
188 			break;
189 		}
190 
191 		switch (alg) {
192 		case Z_EROFS_COMPRESSION_LZ4:
193 			ret = z_erofs_load_lz4_config(sb, dsb, data, size);
194 			break;
195 		case Z_EROFS_COMPRESSION_LZMA:
196 			ret = z_erofs_load_lzma_config(sb, dsb, data, size);
197 			break;
198 		case Z_EROFS_COMPRESSION_DEFLATE:
199 			ret = z_erofs_load_deflate_config(sb, dsb, data, size);
200 			break;
201 		default:
202 			DBG_BUGON(1);
203 			ret = -EFAULT;
204 		}
205 		kfree(data);
206 		if (ret)
207 			break;
208 	}
209 	erofs_put_metabuf(&buf);
210 	return ret;
211 }
212 #else
213 static int erofs_load_compr_cfgs(struct super_block *sb,
214 				 struct erofs_super_block *dsb)
215 {
216 	if (dsb->u1.available_compr_algs) {
217 		erofs_err(sb, "try to load compressed fs when compression is disabled");
218 		return -EINVAL;
219 	}
220 	return 0;
221 }
222 #endif
223 
224 static int erofs_init_device(struct erofs_buf *buf, struct super_block *sb,
225 			     struct erofs_device_info *dif, erofs_off_t *pos)
226 {
227 	struct erofs_sb_info *sbi = EROFS_SB(sb);
228 	struct erofs_fscache *fscache;
229 	struct erofs_deviceslot *dis;
230 	struct block_device *bdev;
231 	void *ptr;
232 
233 	ptr = erofs_read_metabuf(buf, sb, erofs_blknr(sb, *pos), EROFS_KMAP);
234 	if (IS_ERR(ptr))
235 		return PTR_ERR(ptr);
236 	dis = ptr + erofs_blkoff(sb, *pos);
237 
238 	if (!sbi->devs->flatdev && !dif->path) {
239 		if (!dis->tag[0]) {
240 			erofs_err(sb, "empty device tag @ pos %llu", *pos);
241 			return -EINVAL;
242 		}
243 		dif->path = kmemdup_nul(dis->tag, sizeof(dis->tag), GFP_KERNEL);
244 		if (!dif->path)
245 			return -ENOMEM;
246 	}
247 
248 	if (erofs_is_fscache_mode(sb)) {
249 		fscache = erofs_fscache_register_cookie(sb, dif->path, 0);
250 		if (IS_ERR(fscache))
251 			return PTR_ERR(fscache);
252 		dif->fscache = fscache;
253 	} else if (!sbi->devs->flatdev) {
254 		bdev = blkdev_get_by_path(dif->path, BLK_OPEN_READ, sb->s_type,
255 					  NULL);
256 		if (IS_ERR(bdev))
257 			return PTR_ERR(bdev);
258 		dif->bdev = bdev;
259 		dif->dax_dev = fs_dax_get_by_bdev(bdev, &dif->dax_part_off,
260 						  NULL, NULL);
261 	}
262 
263 	dif->blocks = le32_to_cpu(dis->blocks);
264 	dif->mapped_blkaddr = le32_to_cpu(dis->mapped_blkaddr);
265 	sbi->total_blocks += dif->blocks;
266 	*pos += EROFS_DEVT_SLOT_SIZE;
267 	return 0;
268 }
269 
270 static int erofs_scan_devices(struct super_block *sb,
271 			      struct erofs_super_block *dsb)
272 {
273 	struct erofs_sb_info *sbi = EROFS_SB(sb);
274 	unsigned int ondisk_extradevs;
275 	erofs_off_t pos;
276 	struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
277 	struct erofs_device_info *dif;
278 	int id, err = 0;
279 
280 	sbi->total_blocks = sbi->primarydevice_blocks;
281 	if (!erofs_sb_has_device_table(sbi))
282 		ondisk_extradevs = 0;
283 	else
284 		ondisk_extradevs = le16_to_cpu(dsb->extra_devices);
285 
286 	if (sbi->devs->extra_devices &&
287 	    ondisk_extradevs != sbi->devs->extra_devices) {
288 		erofs_err(sb, "extra devices don't match (ondisk %u, given %u)",
289 			  ondisk_extradevs, sbi->devs->extra_devices);
290 		return -EINVAL;
291 	}
292 	if (!ondisk_extradevs)
293 		return 0;
294 
295 	if (!sbi->devs->extra_devices && !erofs_is_fscache_mode(sb))
296 		sbi->devs->flatdev = true;
297 
298 	sbi->device_id_mask = roundup_pow_of_two(ondisk_extradevs + 1) - 1;
299 	pos = le16_to_cpu(dsb->devt_slotoff) * EROFS_DEVT_SLOT_SIZE;
300 	down_read(&sbi->devs->rwsem);
301 	if (sbi->devs->extra_devices) {
302 		idr_for_each_entry(&sbi->devs->tree, dif, id) {
303 			err = erofs_init_device(&buf, sb, dif, &pos);
304 			if (err)
305 				break;
306 		}
307 	} else {
308 		for (id = 0; id < ondisk_extradevs; id++) {
309 			dif = kzalloc(sizeof(*dif), GFP_KERNEL);
310 			if (!dif) {
311 				err = -ENOMEM;
312 				break;
313 			}
314 
315 			err = idr_alloc(&sbi->devs->tree, dif, 0, 0, GFP_KERNEL);
316 			if (err < 0) {
317 				kfree(dif);
318 				break;
319 			}
320 			++sbi->devs->extra_devices;
321 
322 			err = erofs_init_device(&buf, sb, dif, &pos);
323 			if (err)
324 				break;
325 		}
326 	}
327 	up_read(&sbi->devs->rwsem);
328 	erofs_put_metabuf(&buf);
329 	return err;
330 }
331 
332 static int erofs_read_superblock(struct super_block *sb)
333 {
334 	struct erofs_sb_info *sbi;
335 	struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
336 	struct erofs_super_block *dsb;
337 	void *data;
338 	int ret;
339 
340 	data = erofs_read_metabuf(&buf, sb, 0, EROFS_KMAP);
341 	if (IS_ERR(data)) {
342 		erofs_err(sb, "cannot read erofs superblock");
343 		return PTR_ERR(data);
344 	}
345 
346 	sbi = EROFS_SB(sb);
347 	dsb = (struct erofs_super_block *)(data + EROFS_SUPER_OFFSET);
348 
349 	ret = -EINVAL;
350 	if (le32_to_cpu(dsb->magic) != EROFS_SUPER_MAGIC_V1) {
351 		erofs_err(sb, "cannot find valid erofs superblock");
352 		goto out;
353 	}
354 
355 	sbi->blkszbits  = dsb->blkszbits;
356 	if (sbi->blkszbits < 9 || sbi->blkszbits > PAGE_SHIFT) {
357 		erofs_err(sb, "blkszbits %u isn't supported", sbi->blkszbits);
358 		goto out;
359 	}
360 	if (dsb->dirblkbits) {
361 		erofs_err(sb, "dirblkbits %u isn't supported", dsb->dirblkbits);
362 		goto out;
363 	}
364 
365 	sbi->feature_compat = le32_to_cpu(dsb->feature_compat);
366 	if (erofs_sb_has_sb_chksum(sbi)) {
367 		ret = erofs_superblock_csum_verify(sb, data);
368 		if (ret)
369 			goto out;
370 	}
371 
372 	ret = -EINVAL;
373 	if (!check_layout_compatibility(sb, dsb))
374 		goto out;
375 
376 	sbi->sb_size = 128 + dsb->sb_extslots * EROFS_SB_EXTSLOT_SIZE;
377 	if (sbi->sb_size > PAGE_SIZE - EROFS_SUPER_OFFSET) {
378 		erofs_err(sb, "invalid sb_extslots %u (more than a fs block)",
379 			  sbi->sb_size);
380 		goto out;
381 	}
382 	sbi->primarydevice_blocks = le32_to_cpu(dsb->blocks);
383 	sbi->meta_blkaddr = le32_to_cpu(dsb->meta_blkaddr);
384 #ifdef CONFIG_EROFS_FS_XATTR
385 	sbi->xattr_blkaddr = le32_to_cpu(dsb->xattr_blkaddr);
386 	sbi->xattr_prefix_start = le32_to_cpu(dsb->xattr_prefix_start);
387 	sbi->xattr_prefix_count = dsb->xattr_prefix_count;
388 	sbi->xattr_filter_reserved = dsb->xattr_filter_reserved;
389 #endif
390 	sbi->islotbits = ilog2(sizeof(struct erofs_inode_compact));
391 	sbi->root_nid = le16_to_cpu(dsb->root_nid);
392 	sbi->packed_nid = le64_to_cpu(dsb->packed_nid);
393 	sbi->inos = le64_to_cpu(dsb->inos);
394 
395 	sbi->build_time = le64_to_cpu(dsb->build_time);
396 	sbi->build_time_nsec = le32_to_cpu(dsb->build_time_nsec);
397 
398 	memcpy(&sb->s_uuid, dsb->uuid, sizeof(dsb->uuid));
399 
400 	ret = strscpy(sbi->volume_name, dsb->volume_name,
401 		      sizeof(dsb->volume_name));
402 	if (ret < 0) {	/* -E2BIG */
403 		erofs_err(sb, "bad volume name without NIL terminator");
404 		ret = -EFSCORRUPTED;
405 		goto out;
406 	}
407 
408 	/* parse on-disk compression configurations */
409 	if (erofs_sb_has_compr_cfgs(sbi))
410 		ret = erofs_load_compr_cfgs(sb, dsb);
411 	else
412 		ret = z_erofs_load_lz4_config(sb, dsb, NULL, 0);
413 	if (ret < 0)
414 		goto out;
415 
416 	/* handle multiple devices */
417 	ret = erofs_scan_devices(sb, dsb);
418 
419 	if (erofs_is_fscache_mode(sb))
420 		erofs_info(sb, "EXPERIMENTAL fscache-based on-demand read feature in use. Use at your own risk!");
421 out:
422 	erofs_put_metabuf(&buf);
423 	return ret;
424 }
425 
426 static void erofs_default_options(struct erofs_fs_context *ctx)
427 {
428 #ifdef CONFIG_EROFS_FS_ZIP
429 	ctx->opt.cache_strategy = EROFS_ZIP_CACHE_READAROUND;
430 	ctx->opt.max_sync_decompress_pages = 3;
431 	ctx->opt.sync_decompress = EROFS_SYNC_DECOMPRESS_AUTO;
432 #endif
433 #ifdef CONFIG_EROFS_FS_XATTR
434 	set_opt(&ctx->opt, XATTR_USER);
435 #endif
436 #ifdef CONFIG_EROFS_FS_POSIX_ACL
437 	set_opt(&ctx->opt, POSIX_ACL);
438 #endif
439 }
440 
441 enum {
442 	Opt_user_xattr,
443 	Opt_acl,
444 	Opt_cache_strategy,
445 	Opt_dax,
446 	Opt_dax_enum,
447 	Opt_device,
448 	Opt_fsid,
449 	Opt_domain_id,
450 	Opt_err
451 };
452 
453 static const struct constant_table erofs_param_cache_strategy[] = {
454 	{"disabled",	EROFS_ZIP_CACHE_DISABLED},
455 	{"readahead",	EROFS_ZIP_CACHE_READAHEAD},
456 	{"readaround",	EROFS_ZIP_CACHE_READAROUND},
457 	{}
458 };
459 
460 static const struct constant_table erofs_dax_param_enums[] = {
461 	{"always",	EROFS_MOUNT_DAX_ALWAYS},
462 	{"never",	EROFS_MOUNT_DAX_NEVER},
463 	{}
464 };
465 
466 static const struct fs_parameter_spec erofs_fs_parameters[] = {
467 	fsparam_flag_no("user_xattr",	Opt_user_xattr),
468 	fsparam_flag_no("acl",		Opt_acl),
469 	fsparam_enum("cache_strategy",	Opt_cache_strategy,
470 		     erofs_param_cache_strategy),
471 	fsparam_flag("dax",             Opt_dax),
472 	fsparam_enum("dax",		Opt_dax_enum, erofs_dax_param_enums),
473 	fsparam_string("device",	Opt_device),
474 	fsparam_string("fsid",		Opt_fsid),
475 	fsparam_string("domain_id",	Opt_domain_id),
476 	{}
477 };
478 
479 static bool erofs_fc_set_dax_mode(struct fs_context *fc, unsigned int mode)
480 {
481 #ifdef CONFIG_FS_DAX
482 	struct erofs_fs_context *ctx = fc->fs_private;
483 
484 	switch (mode) {
485 	case EROFS_MOUNT_DAX_ALWAYS:
486 		warnfc(fc, "DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
487 		set_opt(&ctx->opt, DAX_ALWAYS);
488 		clear_opt(&ctx->opt, DAX_NEVER);
489 		return true;
490 	case EROFS_MOUNT_DAX_NEVER:
491 		set_opt(&ctx->opt, DAX_NEVER);
492 		clear_opt(&ctx->opt, DAX_ALWAYS);
493 		return true;
494 	default:
495 		DBG_BUGON(1);
496 		return false;
497 	}
498 #else
499 	errorfc(fc, "dax options not supported");
500 	return false;
501 #endif
502 }
503 
504 static int erofs_fc_parse_param(struct fs_context *fc,
505 				struct fs_parameter *param)
506 {
507 	struct erofs_fs_context *ctx = fc->fs_private;
508 	struct fs_parse_result result;
509 	struct erofs_device_info *dif;
510 	int opt, ret;
511 
512 	opt = fs_parse(fc, erofs_fs_parameters, param, &result);
513 	if (opt < 0)
514 		return opt;
515 
516 	switch (opt) {
517 	case Opt_user_xattr:
518 #ifdef CONFIG_EROFS_FS_XATTR
519 		if (result.boolean)
520 			set_opt(&ctx->opt, XATTR_USER);
521 		else
522 			clear_opt(&ctx->opt, XATTR_USER);
523 #else
524 		errorfc(fc, "{,no}user_xattr options not supported");
525 #endif
526 		break;
527 	case Opt_acl:
528 #ifdef CONFIG_EROFS_FS_POSIX_ACL
529 		if (result.boolean)
530 			set_opt(&ctx->opt, POSIX_ACL);
531 		else
532 			clear_opt(&ctx->opt, POSIX_ACL);
533 #else
534 		errorfc(fc, "{,no}acl options not supported");
535 #endif
536 		break;
537 	case Opt_cache_strategy:
538 #ifdef CONFIG_EROFS_FS_ZIP
539 		ctx->opt.cache_strategy = result.uint_32;
540 #else
541 		errorfc(fc, "compression not supported, cache_strategy ignored");
542 #endif
543 		break;
544 	case Opt_dax:
545 		if (!erofs_fc_set_dax_mode(fc, EROFS_MOUNT_DAX_ALWAYS))
546 			return -EINVAL;
547 		break;
548 	case Opt_dax_enum:
549 		if (!erofs_fc_set_dax_mode(fc, result.uint_32))
550 			return -EINVAL;
551 		break;
552 	case Opt_device:
553 		dif = kzalloc(sizeof(*dif), GFP_KERNEL);
554 		if (!dif)
555 			return -ENOMEM;
556 		dif->path = kstrdup(param->string, GFP_KERNEL);
557 		if (!dif->path) {
558 			kfree(dif);
559 			return -ENOMEM;
560 		}
561 		down_write(&ctx->devs->rwsem);
562 		ret = idr_alloc(&ctx->devs->tree, dif, 0, 0, GFP_KERNEL);
563 		up_write(&ctx->devs->rwsem);
564 		if (ret < 0) {
565 			kfree(dif->path);
566 			kfree(dif);
567 			return ret;
568 		}
569 		++ctx->devs->extra_devices;
570 		break;
571 #ifdef CONFIG_EROFS_FS_ONDEMAND
572 	case Opt_fsid:
573 		kfree(ctx->fsid);
574 		ctx->fsid = kstrdup(param->string, GFP_KERNEL);
575 		if (!ctx->fsid)
576 			return -ENOMEM;
577 		break;
578 	case Opt_domain_id:
579 		kfree(ctx->domain_id);
580 		ctx->domain_id = kstrdup(param->string, GFP_KERNEL);
581 		if (!ctx->domain_id)
582 			return -ENOMEM;
583 		break;
584 #else
585 	case Opt_fsid:
586 	case Opt_domain_id:
587 		errorfc(fc, "%s option not supported", erofs_fs_parameters[opt].name);
588 		break;
589 #endif
590 	default:
591 		return -ENOPARAM;
592 	}
593 	return 0;
594 }
595 
596 static struct inode *erofs_nfs_get_inode(struct super_block *sb,
597 					 u64 ino, u32 generation)
598 {
599 	return erofs_iget(sb, ino);
600 }
601 
602 static struct dentry *erofs_fh_to_dentry(struct super_block *sb,
603 		struct fid *fid, int fh_len, int fh_type)
604 {
605 	return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
606 				    erofs_nfs_get_inode);
607 }
608 
609 static struct dentry *erofs_fh_to_parent(struct super_block *sb,
610 		struct fid *fid, int fh_len, int fh_type)
611 {
612 	return generic_fh_to_parent(sb, fid, fh_len, fh_type,
613 				    erofs_nfs_get_inode);
614 }
615 
616 static struct dentry *erofs_get_parent(struct dentry *child)
617 {
618 	erofs_nid_t nid;
619 	unsigned int d_type;
620 	int err;
621 
622 	err = erofs_namei(d_inode(child), &dotdot_name, &nid, &d_type);
623 	if (err)
624 		return ERR_PTR(err);
625 	return d_obtain_alias(erofs_iget(child->d_sb, nid));
626 }
627 
628 static const struct export_operations erofs_export_ops = {
629 	.fh_to_dentry = erofs_fh_to_dentry,
630 	.fh_to_parent = erofs_fh_to_parent,
631 	.get_parent = erofs_get_parent,
632 };
633 
634 static int erofs_fc_fill_pseudo_super(struct super_block *sb, struct fs_context *fc)
635 {
636 	static const struct tree_descr empty_descr = {""};
637 
638 	return simple_fill_super(sb, EROFS_SUPER_MAGIC, &empty_descr);
639 }
640 
641 static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
642 {
643 	struct inode *inode;
644 	struct erofs_sb_info *sbi;
645 	struct erofs_fs_context *ctx = fc->fs_private;
646 	int err;
647 
648 	sb->s_magic = EROFS_SUPER_MAGIC;
649 	sb->s_flags |= SB_RDONLY | SB_NOATIME;
650 	sb->s_maxbytes = MAX_LFS_FILESIZE;
651 	sb->s_op = &erofs_sops;
652 
653 	sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
654 	if (!sbi)
655 		return -ENOMEM;
656 
657 	sb->s_fs_info = sbi;
658 	sbi->opt = ctx->opt;
659 	sbi->devs = ctx->devs;
660 	ctx->devs = NULL;
661 	sbi->fsid = ctx->fsid;
662 	ctx->fsid = NULL;
663 	sbi->domain_id = ctx->domain_id;
664 	ctx->domain_id = NULL;
665 
666 	sbi->blkszbits = PAGE_SHIFT;
667 	if (erofs_is_fscache_mode(sb)) {
668 		sb->s_blocksize = PAGE_SIZE;
669 		sb->s_blocksize_bits = PAGE_SHIFT;
670 
671 		err = erofs_fscache_register_fs(sb);
672 		if (err)
673 			return err;
674 
675 		err = super_setup_bdi(sb);
676 		if (err)
677 			return err;
678 	} else {
679 		if (!sb_set_blocksize(sb, PAGE_SIZE)) {
680 			errorfc(fc, "failed to set initial blksize");
681 			return -EINVAL;
682 		}
683 
684 		sbi->dax_dev = fs_dax_get_by_bdev(sb->s_bdev,
685 						  &sbi->dax_part_off,
686 						  NULL, NULL);
687 	}
688 
689 	err = erofs_read_superblock(sb);
690 	if (err)
691 		return err;
692 
693 	if (sb->s_blocksize_bits != sbi->blkszbits) {
694 		if (erofs_is_fscache_mode(sb)) {
695 			errorfc(fc, "unsupported blksize for fscache mode");
696 			return -EINVAL;
697 		}
698 		if (!sb_set_blocksize(sb, 1 << sbi->blkszbits)) {
699 			errorfc(fc, "failed to set erofs blksize");
700 			return -EINVAL;
701 		}
702 	}
703 
704 	if (test_opt(&sbi->opt, DAX_ALWAYS)) {
705 		if (!sbi->dax_dev) {
706 			errorfc(fc, "DAX unsupported by block device. Turning off DAX.");
707 			clear_opt(&sbi->opt, DAX_ALWAYS);
708 		} else if (sbi->blkszbits != PAGE_SHIFT) {
709 			errorfc(fc, "unsupported blocksize for DAX");
710 			clear_opt(&sbi->opt, DAX_ALWAYS);
711 		}
712 	}
713 
714 	sb->s_time_gran = 1;
715 	sb->s_xattr = erofs_xattr_handlers;
716 	sb->s_export_op = &erofs_export_ops;
717 
718 	if (test_opt(&sbi->opt, POSIX_ACL))
719 		sb->s_flags |= SB_POSIXACL;
720 	else
721 		sb->s_flags &= ~SB_POSIXACL;
722 
723 #ifdef CONFIG_EROFS_FS_ZIP
724 	xa_init(&sbi->managed_pslots);
725 #endif
726 
727 	inode = erofs_iget(sb, ROOT_NID(sbi));
728 	if (IS_ERR(inode))
729 		return PTR_ERR(inode);
730 
731 	if (!S_ISDIR(inode->i_mode)) {
732 		erofs_err(sb, "rootino(nid %llu) is not a directory(i_mode %o)",
733 			  ROOT_NID(sbi), inode->i_mode);
734 		iput(inode);
735 		return -EINVAL;
736 	}
737 
738 	sb->s_root = d_make_root(inode);
739 	if (!sb->s_root)
740 		return -ENOMEM;
741 
742 	erofs_shrinker_register(sb);
743 	if (erofs_sb_has_fragments(sbi) && sbi->packed_nid) {
744 		sbi->packed_inode = erofs_iget(sb, sbi->packed_nid);
745 		if (IS_ERR(sbi->packed_inode)) {
746 			err = PTR_ERR(sbi->packed_inode);
747 			sbi->packed_inode = NULL;
748 			return err;
749 		}
750 	}
751 	err = erofs_init_managed_cache(sb);
752 	if (err)
753 		return err;
754 
755 	err = erofs_xattr_prefixes_init(sb);
756 	if (err)
757 		return err;
758 
759 	err = erofs_register_sysfs(sb);
760 	if (err)
761 		return err;
762 
763 	erofs_info(sb, "mounted with root inode @ nid %llu.", ROOT_NID(sbi));
764 	return 0;
765 }
766 
767 static int erofs_fc_anon_get_tree(struct fs_context *fc)
768 {
769 	return get_tree_nodev(fc, erofs_fc_fill_pseudo_super);
770 }
771 
772 static int erofs_fc_get_tree(struct fs_context *fc)
773 {
774 	struct erofs_fs_context *ctx = fc->fs_private;
775 
776 	if (IS_ENABLED(CONFIG_EROFS_FS_ONDEMAND) && ctx->fsid)
777 		return get_tree_nodev(fc, erofs_fc_fill_super);
778 
779 	return get_tree_bdev(fc, erofs_fc_fill_super);
780 }
781 
782 static int erofs_fc_reconfigure(struct fs_context *fc)
783 {
784 	struct super_block *sb = fc->root->d_sb;
785 	struct erofs_sb_info *sbi = EROFS_SB(sb);
786 	struct erofs_fs_context *ctx = fc->fs_private;
787 
788 	DBG_BUGON(!sb_rdonly(sb));
789 
790 	if (ctx->fsid || ctx->domain_id)
791 		erofs_info(sb, "ignoring reconfiguration for fsid|domain_id.");
792 
793 	if (test_opt(&ctx->opt, POSIX_ACL))
794 		fc->sb_flags |= SB_POSIXACL;
795 	else
796 		fc->sb_flags &= ~SB_POSIXACL;
797 
798 	sbi->opt = ctx->opt;
799 
800 	fc->sb_flags |= SB_RDONLY;
801 	return 0;
802 }
803 
804 static int erofs_release_device_info(int id, void *ptr, void *data)
805 {
806 	struct erofs_device_info *dif = ptr;
807 
808 	fs_put_dax(dif->dax_dev, NULL);
809 	if (dif->bdev)
810 		blkdev_put(dif->bdev, &erofs_fs_type);
811 	erofs_fscache_unregister_cookie(dif->fscache);
812 	dif->fscache = NULL;
813 	kfree(dif->path);
814 	kfree(dif);
815 	return 0;
816 }
817 
818 static void erofs_free_dev_context(struct erofs_dev_context *devs)
819 {
820 	if (!devs)
821 		return;
822 	idr_for_each(&devs->tree, &erofs_release_device_info, NULL);
823 	idr_destroy(&devs->tree);
824 	kfree(devs);
825 }
826 
827 static void erofs_fc_free(struct fs_context *fc)
828 {
829 	struct erofs_fs_context *ctx = fc->fs_private;
830 
831 	erofs_free_dev_context(ctx->devs);
832 	kfree(ctx->fsid);
833 	kfree(ctx->domain_id);
834 	kfree(ctx);
835 }
836 
837 static const struct fs_context_operations erofs_context_ops = {
838 	.parse_param	= erofs_fc_parse_param,
839 	.get_tree       = erofs_fc_get_tree,
840 	.reconfigure    = erofs_fc_reconfigure,
841 	.free		= erofs_fc_free,
842 };
843 
844 static const struct fs_context_operations erofs_anon_context_ops = {
845 	.get_tree       = erofs_fc_anon_get_tree,
846 };
847 
848 static int erofs_init_fs_context(struct fs_context *fc)
849 {
850 	struct erofs_fs_context *ctx;
851 
852 	/* pseudo mount for anon inodes */
853 	if (fc->sb_flags & SB_KERNMOUNT) {
854 		fc->ops = &erofs_anon_context_ops;
855 		return 0;
856 	}
857 
858 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
859 	if (!ctx)
860 		return -ENOMEM;
861 	ctx->devs = kzalloc(sizeof(struct erofs_dev_context), GFP_KERNEL);
862 	if (!ctx->devs) {
863 		kfree(ctx);
864 		return -ENOMEM;
865 	}
866 	fc->fs_private = ctx;
867 
868 	idr_init(&ctx->devs->tree);
869 	init_rwsem(&ctx->devs->rwsem);
870 	erofs_default_options(ctx);
871 	fc->ops = &erofs_context_ops;
872 	return 0;
873 }
874 
875 static void erofs_kill_sb(struct super_block *sb)
876 {
877 	struct erofs_sb_info *sbi;
878 
879 	/* pseudo mount for anon inodes */
880 	if (sb->s_flags & SB_KERNMOUNT) {
881 		kill_anon_super(sb);
882 		return;
883 	}
884 
885 	if (erofs_is_fscache_mode(sb))
886 		kill_anon_super(sb);
887 	else
888 		kill_block_super(sb);
889 
890 	sbi = EROFS_SB(sb);
891 	if (!sbi)
892 		return;
893 
894 	erofs_free_dev_context(sbi->devs);
895 	fs_put_dax(sbi->dax_dev, NULL);
896 	erofs_fscache_unregister_fs(sb);
897 	kfree(sbi->fsid);
898 	kfree(sbi->domain_id);
899 	kfree(sbi);
900 	sb->s_fs_info = NULL;
901 }
902 
903 static void erofs_put_super(struct super_block *sb)
904 {
905 	struct erofs_sb_info *const sbi = EROFS_SB(sb);
906 
907 	DBG_BUGON(!sbi);
908 
909 	erofs_unregister_sysfs(sb);
910 	erofs_shrinker_unregister(sb);
911 	erofs_xattr_prefixes_cleanup(sb);
912 #ifdef CONFIG_EROFS_FS_ZIP
913 	iput(sbi->managed_cache);
914 	sbi->managed_cache = NULL;
915 #endif
916 	iput(sbi->packed_inode);
917 	sbi->packed_inode = NULL;
918 	erofs_free_dev_context(sbi->devs);
919 	sbi->devs = NULL;
920 	erofs_fscache_unregister_fs(sb);
921 }
922 
923 struct file_system_type erofs_fs_type = {
924 	.owner          = THIS_MODULE,
925 	.name           = "erofs",
926 	.init_fs_context = erofs_init_fs_context,
927 	.kill_sb        = erofs_kill_sb,
928 	.fs_flags       = FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
929 };
930 MODULE_ALIAS_FS("erofs");
931 
932 static int __init erofs_module_init(void)
933 {
934 	int err;
935 
936 	erofs_check_ondisk_layout_definitions();
937 
938 	erofs_inode_cachep = kmem_cache_create("erofs_inode",
939 			sizeof(struct erofs_inode), 0,
940 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD | SLAB_ACCOUNT,
941 			erofs_inode_init_once);
942 	if (!erofs_inode_cachep)
943 		return -ENOMEM;
944 
945 	err = erofs_init_shrinker();
946 	if (err)
947 		goto shrinker_err;
948 
949 	err = z_erofs_lzma_init();
950 	if (err)
951 		goto lzma_err;
952 
953 	err = z_erofs_deflate_init();
954 	if (err)
955 		goto deflate_err;
956 
957 	erofs_pcpubuf_init();
958 	err = z_erofs_init_zip_subsystem();
959 	if (err)
960 		goto zip_err;
961 
962 	err = erofs_init_sysfs();
963 	if (err)
964 		goto sysfs_err;
965 
966 	err = register_filesystem(&erofs_fs_type);
967 	if (err)
968 		goto fs_err;
969 
970 	return 0;
971 
972 fs_err:
973 	erofs_exit_sysfs();
974 sysfs_err:
975 	z_erofs_exit_zip_subsystem();
976 zip_err:
977 	z_erofs_deflate_exit();
978 deflate_err:
979 	z_erofs_lzma_exit();
980 lzma_err:
981 	erofs_exit_shrinker();
982 shrinker_err:
983 	kmem_cache_destroy(erofs_inode_cachep);
984 	return err;
985 }
986 
987 static void __exit erofs_module_exit(void)
988 {
989 	unregister_filesystem(&erofs_fs_type);
990 
991 	/* Ensure all RCU free inodes / pclusters are safe to be destroyed. */
992 	rcu_barrier();
993 
994 	erofs_exit_sysfs();
995 	z_erofs_exit_zip_subsystem();
996 	z_erofs_deflate_exit();
997 	z_erofs_lzma_exit();
998 	erofs_exit_shrinker();
999 	kmem_cache_destroy(erofs_inode_cachep);
1000 	erofs_pcpubuf_exit();
1001 }
1002 
1003 static int erofs_statfs(struct dentry *dentry, struct kstatfs *buf)
1004 {
1005 	struct super_block *sb = dentry->d_sb;
1006 	struct erofs_sb_info *sbi = EROFS_SB(sb);
1007 	u64 id = 0;
1008 
1009 	if (!erofs_is_fscache_mode(sb))
1010 		id = huge_encode_dev(sb->s_bdev->bd_dev);
1011 
1012 	buf->f_type = sb->s_magic;
1013 	buf->f_bsize = sb->s_blocksize;
1014 	buf->f_blocks = sbi->total_blocks;
1015 	buf->f_bfree = buf->f_bavail = 0;
1016 
1017 	buf->f_files = ULLONG_MAX;
1018 	buf->f_ffree = ULLONG_MAX - sbi->inos;
1019 
1020 	buf->f_namelen = EROFS_NAME_LEN;
1021 
1022 	buf->f_fsid    = u64_to_fsid(id);
1023 	return 0;
1024 }
1025 
1026 static int erofs_show_options(struct seq_file *seq, struct dentry *root)
1027 {
1028 	struct erofs_sb_info *sbi = EROFS_SB(root->d_sb);
1029 	struct erofs_mount_opts *opt = &sbi->opt;
1030 
1031 #ifdef CONFIG_EROFS_FS_XATTR
1032 	if (test_opt(opt, XATTR_USER))
1033 		seq_puts(seq, ",user_xattr");
1034 	else
1035 		seq_puts(seq, ",nouser_xattr");
1036 #endif
1037 #ifdef CONFIG_EROFS_FS_POSIX_ACL
1038 	if (test_opt(opt, POSIX_ACL))
1039 		seq_puts(seq, ",acl");
1040 	else
1041 		seq_puts(seq, ",noacl");
1042 #endif
1043 #ifdef CONFIG_EROFS_FS_ZIP
1044 	if (opt->cache_strategy == EROFS_ZIP_CACHE_DISABLED)
1045 		seq_puts(seq, ",cache_strategy=disabled");
1046 	else if (opt->cache_strategy == EROFS_ZIP_CACHE_READAHEAD)
1047 		seq_puts(seq, ",cache_strategy=readahead");
1048 	else if (opt->cache_strategy == EROFS_ZIP_CACHE_READAROUND)
1049 		seq_puts(seq, ",cache_strategy=readaround");
1050 #endif
1051 	if (test_opt(opt, DAX_ALWAYS))
1052 		seq_puts(seq, ",dax=always");
1053 	if (test_opt(opt, DAX_NEVER))
1054 		seq_puts(seq, ",dax=never");
1055 #ifdef CONFIG_EROFS_FS_ONDEMAND
1056 	if (sbi->fsid)
1057 		seq_printf(seq, ",fsid=%s", sbi->fsid);
1058 	if (sbi->domain_id)
1059 		seq_printf(seq, ",domain_id=%s", sbi->domain_id);
1060 #endif
1061 	return 0;
1062 }
1063 
1064 const struct super_operations erofs_sops = {
1065 	.put_super = erofs_put_super,
1066 	.alloc_inode = erofs_alloc_inode,
1067 	.free_inode = erofs_free_inode,
1068 	.statfs = erofs_statfs,
1069 	.show_options = erofs_show_options,
1070 };
1071 
1072 module_init(erofs_module_init);
1073 module_exit(erofs_module_exit);
1074 
1075 MODULE_DESCRIPTION("Enhanced ROM File System");
1076 MODULE_AUTHOR("Gao Xiang, Chao Yu, Miao Xie, CONSUMER BG, HUAWEI Inc.");
1077 MODULE_LICENSE("GPL");
1078