1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2017-2018 HUAWEI, Inc. 4 * https://www.huawei.com/ 5 * Copyright (C) 2021, Alibaba Cloud 6 */ 7 #include <linux/module.h> 8 #include <linux/statfs.h> 9 #include <linux/parser.h> 10 #include <linux/seq_file.h> 11 #include <linux/crc32c.h> 12 #include <linux/fs_context.h> 13 #include <linux/fs_parser.h> 14 #include <linux/dax.h> 15 #include <linux/exportfs.h> 16 #include "xattr.h" 17 18 #define CREATE_TRACE_POINTS 19 #include <trace/events/erofs.h> 20 21 static struct kmem_cache *erofs_inode_cachep __read_mostly; 22 23 void _erofs_err(struct super_block *sb, const char *func, const char *fmt, ...) 24 { 25 struct va_format vaf; 26 va_list args; 27 28 va_start(args, fmt); 29 30 vaf.fmt = fmt; 31 vaf.va = &args; 32 33 pr_err("(device %s): %s: %pV", sb->s_id, func, &vaf); 34 va_end(args); 35 } 36 37 void _erofs_info(struct super_block *sb, const char *func, const char *fmt, ...) 38 { 39 struct va_format vaf; 40 va_list args; 41 42 va_start(args, fmt); 43 44 vaf.fmt = fmt; 45 vaf.va = &args; 46 47 pr_info("(device %s): %pV", sb->s_id, &vaf); 48 va_end(args); 49 } 50 51 static int erofs_superblock_csum_verify(struct super_block *sb, void *sbdata) 52 { 53 size_t len = 1 << EROFS_SB(sb)->blkszbits; 54 struct erofs_super_block *dsb; 55 u32 expected_crc, crc; 56 57 if (len > EROFS_SUPER_OFFSET) 58 len -= EROFS_SUPER_OFFSET; 59 60 dsb = kmemdup(sbdata + EROFS_SUPER_OFFSET, len, GFP_KERNEL); 61 if (!dsb) 62 return -ENOMEM; 63 64 expected_crc = le32_to_cpu(dsb->checksum); 65 dsb->checksum = 0; 66 /* to allow for x86 boot sectors and other oddities. */ 67 crc = crc32c(~0, dsb, len); 68 kfree(dsb); 69 70 if (crc != expected_crc) { 71 erofs_err(sb, "invalid checksum 0x%08x, 0x%08x expected", 72 crc, expected_crc); 73 return -EBADMSG; 74 } 75 return 0; 76 } 77 78 static void erofs_inode_init_once(void *ptr) 79 { 80 struct erofs_inode *vi = ptr; 81 82 inode_init_once(&vi->vfs_inode); 83 } 84 85 static struct inode *erofs_alloc_inode(struct super_block *sb) 86 { 87 struct erofs_inode *vi = 88 alloc_inode_sb(sb, erofs_inode_cachep, GFP_KERNEL); 89 90 if (!vi) 91 return NULL; 92 93 /* zero out everything except vfs_inode */ 94 memset(vi, 0, offsetof(struct erofs_inode, vfs_inode)); 95 return &vi->vfs_inode; 96 } 97 98 static void erofs_free_inode(struct inode *inode) 99 { 100 struct erofs_inode *vi = EROFS_I(inode); 101 102 if (inode->i_op == &erofs_fast_symlink_iops) 103 kfree(inode->i_link); 104 kfree(vi->xattr_shared_xattrs); 105 kmem_cache_free(erofs_inode_cachep, vi); 106 } 107 108 static bool check_layout_compatibility(struct super_block *sb, 109 struct erofs_super_block *dsb) 110 { 111 const unsigned int feature = le32_to_cpu(dsb->feature_incompat); 112 113 EROFS_SB(sb)->feature_incompat = feature; 114 115 /* check if current kernel meets all mandatory requirements */ 116 if (feature & (~EROFS_ALL_FEATURE_INCOMPAT)) { 117 erofs_err(sb, "unidentified incompatible feature %x, please upgrade kernel", 118 feature & ~EROFS_ALL_FEATURE_INCOMPAT); 119 return false; 120 } 121 return true; 122 } 123 124 /* read variable-sized metadata, offset will be aligned by 4-byte */ 125 void *erofs_read_metadata(struct super_block *sb, struct erofs_buf *buf, 126 erofs_off_t *offset, int *lengthp) 127 { 128 u8 *buffer, *ptr; 129 int len, i, cnt; 130 131 *offset = round_up(*offset, 4); 132 ptr = erofs_bread(buf, erofs_blknr(sb, *offset), EROFS_KMAP); 133 if (IS_ERR(ptr)) 134 return ptr; 135 136 len = le16_to_cpu(*(__le16 *)&ptr[erofs_blkoff(sb, *offset)]); 137 if (!len) 138 len = U16_MAX + 1; 139 buffer = kmalloc(len, GFP_KERNEL); 140 if (!buffer) 141 return ERR_PTR(-ENOMEM); 142 *offset += sizeof(__le16); 143 *lengthp = len; 144 145 for (i = 0; i < len; i += cnt) { 146 cnt = min_t(int, sb->s_blocksize - erofs_blkoff(sb, *offset), 147 len - i); 148 ptr = erofs_bread(buf, erofs_blknr(sb, *offset), EROFS_KMAP); 149 if (IS_ERR(ptr)) { 150 kfree(buffer); 151 return ptr; 152 } 153 memcpy(buffer + i, ptr + erofs_blkoff(sb, *offset), cnt); 154 *offset += cnt; 155 } 156 return buffer; 157 } 158 159 #ifndef CONFIG_EROFS_FS_ZIP 160 static int z_erofs_parse_cfgs(struct super_block *sb, 161 struct erofs_super_block *dsb) 162 { 163 if (!dsb->u1.available_compr_algs) 164 return 0; 165 166 erofs_err(sb, "compression disabled, unable to mount compressed EROFS"); 167 return -EOPNOTSUPP; 168 } 169 #endif 170 171 static int erofs_init_device(struct erofs_buf *buf, struct super_block *sb, 172 struct erofs_device_info *dif, erofs_off_t *pos) 173 { 174 struct erofs_sb_info *sbi = EROFS_SB(sb); 175 struct erofs_fscache *fscache; 176 struct erofs_deviceslot *dis; 177 struct bdev_handle *bdev_handle; 178 void *ptr; 179 180 ptr = erofs_read_metabuf(buf, sb, erofs_blknr(sb, *pos), EROFS_KMAP); 181 if (IS_ERR(ptr)) 182 return PTR_ERR(ptr); 183 dis = ptr + erofs_blkoff(sb, *pos); 184 185 if (!sbi->devs->flatdev && !dif->path) { 186 if (!dis->tag[0]) { 187 erofs_err(sb, "empty device tag @ pos %llu", *pos); 188 return -EINVAL; 189 } 190 dif->path = kmemdup_nul(dis->tag, sizeof(dis->tag), GFP_KERNEL); 191 if (!dif->path) 192 return -ENOMEM; 193 } 194 195 if (erofs_is_fscache_mode(sb)) { 196 fscache = erofs_fscache_register_cookie(sb, dif->path, 0); 197 if (IS_ERR(fscache)) 198 return PTR_ERR(fscache); 199 dif->fscache = fscache; 200 } else if (!sbi->devs->flatdev) { 201 bdev_handle = bdev_open_by_path(dif->path, BLK_OPEN_READ, 202 sb->s_type, NULL); 203 if (IS_ERR(bdev_handle)) 204 return PTR_ERR(bdev_handle); 205 dif->bdev_handle = bdev_handle; 206 dif->dax_dev = fs_dax_get_by_bdev(bdev_handle->bdev, 207 &dif->dax_part_off, NULL, NULL); 208 } 209 210 dif->blocks = le32_to_cpu(dis->blocks); 211 dif->mapped_blkaddr = le32_to_cpu(dis->mapped_blkaddr); 212 sbi->total_blocks += dif->blocks; 213 *pos += EROFS_DEVT_SLOT_SIZE; 214 return 0; 215 } 216 217 static int erofs_scan_devices(struct super_block *sb, 218 struct erofs_super_block *dsb) 219 { 220 struct erofs_sb_info *sbi = EROFS_SB(sb); 221 unsigned int ondisk_extradevs; 222 erofs_off_t pos; 223 struct erofs_buf buf = __EROFS_BUF_INITIALIZER; 224 struct erofs_device_info *dif; 225 int id, err = 0; 226 227 sbi->total_blocks = sbi->primarydevice_blocks; 228 if (!erofs_sb_has_device_table(sbi)) 229 ondisk_extradevs = 0; 230 else 231 ondisk_extradevs = le16_to_cpu(dsb->extra_devices); 232 233 if (sbi->devs->extra_devices && 234 ondisk_extradevs != sbi->devs->extra_devices) { 235 erofs_err(sb, "extra devices don't match (ondisk %u, given %u)", 236 ondisk_extradevs, sbi->devs->extra_devices); 237 return -EINVAL; 238 } 239 if (!ondisk_extradevs) 240 return 0; 241 242 if (!sbi->devs->extra_devices && !erofs_is_fscache_mode(sb)) 243 sbi->devs->flatdev = true; 244 245 sbi->device_id_mask = roundup_pow_of_two(ondisk_extradevs + 1) - 1; 246 pos = le16_to_cpu(dsb->devt_slotoff) * EROFS_DEVT_SLOT_SIZE; 247 down_read(&sbi->devs->rwsem); 248 if (sbi->devs->extra_devices) { 249 idr_for_each_entry(&sbi->devs->tree, dif, id) { 250 err = erofs_init_device(&buf, sb, dif, &pos); 251 if (err) 252 break; 253 } 254 } else { 255 for (id = 0; id < ondisk_extradevs; id++) { 256 dif = kzalloc(sizeof(*dif), GFP_KERNEL); 257 if (!dif) { 258 err = -ENOMEM; 259 break; 260 } 261 262 err = idr_alloc(&sbi->devs->tree, dif, 0, 0, GFP_KERNEL); 263 if (err < 0) { 264 kfree(dif); 265 break; 266 } 267 ++sbi->devs->extra_devices; 268 269 err = erofs_init_device(&buf, sb, dif, &pos); 270 if (err) 271 break; 272 } 273 } 274 up_read(&sbi->devs->rwsem); 275 erofs_put_metabuf(&buf); 276 return err; 277 } 278 279 static int erofs_read_superblock(struct super_block *sb) 280 { 281 struct erofs_sb_info *sbi; 282 struct erofs_buf buf = __EROFS_BUF_INITIALIZER; 283 struct erofs_super_block *dsb; 284 void *data; 285 int ret; 286 287 data = erofs_read_metabuf(&buf, sb, 0, EROFS_KMAP); 288 if (IS_ERR(data)) { 289 erofs_err(sb, "cannot read erofs superblock"); 290 return PTR_ERR(data); 291 } 292 293 sbi = EROFS_SB(sb); 294 dsb = (struct erofs_super_block *)(data + EROFS_SUPER_OFFSET); 295 296 ret = -EINVAL; 297 if (le32_to_cpu(dsb->magic) != EROFS_SUPER_MAGIC_V1) { 298 erofs_err(sb, "cannot find valid erofs superblock"); 299 goto out; 300 } 301 302 sbi->blkszbits = dsb->blkszbits; 303 if (sbi->blkszbits < 9 || sbi->blkszbits > PAGE_SHIFT) { 304 erofs_err(sb, "blkszbits %u isn't supported", sbi->blkszbits); 305 goto out; 306 } 307 if (dsb->dirblkbits) { 308 erofs_err(sb, "dirblkbits %u isn't supported", dsb->dirblkbits); 309 goto out; 310 } 311 312 sbi->feature_compat = le32_to_cpu(dsb->feature_compat); 313 if (erofs_sb_has_sb_chksum(sbi)) { 314 ret = erofs_superblock_csum_verify(sb, data); 315 if (ret) 316 goto out; 317 } 318 319 ret = -EINVAL; 320 if (!check_layout_compatibility(sb, dsb)) 321 goto out; 322 323 sbi->sb_size = 128 + dsb->sb_extslots * EROFS_SB_EXTSLOT_SIZE; 324 if (sbi->sb_size > PAGE_SIZE - EROFS_SUPER_OFFSET) { 325 erofs_err(sb, "invalid sb_extslots %u (more than a fs block)", 326 sbi->sb_size); 327 goto out; 328 } 329 sbi->primarydevice_blocks = le32_to_cpu(dsb->blocks); 330 sbi->meta_blkaddr = le32_to_cpu(dsb->meta_blkaddr); 331 #ifdef CONFIG_EROFS_FS_XATTR 332 sbi->xattr_blkaddr = le32_to_cpu(dsb->xattr_blkaddr); 333 sbi->xattr_prefix_start = le32_to_cpu(dsb->xattr_prefix_start); 334 sbi->xattr_prefix_count = dsb->xattr_prefix_count; 335 sbi->xattr_filter_reserved = dsb->xattr_filter_reserved; 336 #endif 337 sbi->islotbits = ilog2(sizeof(struct erofs_inode_compact)); 338 sbi->root_nid = le16_to_cpu(dsb->root_nid); 339 sbi->packed_nid = le64_to_cpu(dsb->packed_nid); 340 sbi->inos = le64_to_cpu(dsb->inos); 341 342 sbi->build_time = le64_to_cpu(dsb->build_time); 343 sbi->build_time_nsec = le32_to_cpu(dsb->build_time_nsec); 344 345 memcpy(&sb->s_uuid, dsb->uuid, sizeof(dsb->uuid)); 346 347 ret = strscpy(sbi->volume_name, dsb->volume_name, 348 sizeof(dsb->volume_name)); 349 if (ret < 0) { /* -E2BIG */ 350 erofs_err(sb, "bad volume name without NIL terminator"); 351 ret = -EFSCORRUPTED; 352 goto out; 353 } 354 355 /* parse on-disk compression configurations */ 356 ret = z_erofs_parse_cfgs(sb, dsb); 357 if (ret < 0) 358 goto out; 359 360 /* handle multiple devices */ 361 ret = erofs_scan_devices(sb, dsb); 362 363 if (erofs_is_fscache_mode(sb)) 364 erofs_info(sb, "EXPERIMENTAL fscache-based on-demand read feature in use. Use at your own risk!"); 365 out: 366 erofs_put_metabuf(&buf); 367 return ret; 368 } 369 370 static void erofs_default_options(struct erofs_fs_context *ctx) 371 { 372 #ifdef CONFIG_EROFS_FS_ZIP 373 ctx->opt.cache_strategy = EROFS_ZIP_CACHE_READAROUND; 374 ctx->opt.max_sync_decompress_pages = 3; 375 ctx->opt.sync_decompress = EROFS_SYNC_DECOMPRESS_AUTO; 376 #endif 377 #ifdef CONFIG_EROFS_FS_XATTR 378 set_opt(&ctx->opt, XATTR_USER); 379 #endif 380 #ifdef CONFIG_EROFS_FS_POSIX_ACL 381 set_opt(&ctx->opt, POSIX_ACL); 382 #endif 383 } 384 385 enum { 386 Opt_user_xattr, 387 Opt_acl, 388 Opt_cache_strategy, 389 Opt_dax, 390 Opt_dax_enum, 391 Opt_device, 392 Opt_fsid, 393 Opt_domain_id, 394 Opt_err 395 }; 396 397 static const struct constant_table erofs_param_cache_strategy[] = { 398 {"disabled", EROFS_ZIP_CACHE_DISABLED}, 399 {"readahead", EROFS_ZIP_CACHE_READAHEAD}, 400 {"readaround", EROFS_ZIP_CACHE_READAROUND}, 401 {} 402 }; 403 404 static const struct constant_table erofs_dax_param_enums[] = { 405 {"always", EROFS_MOUNT_DAX_ALWAYS}, 406 {"never", EROFS_MOUNT_DAX_NEVER}, 407 {} 408 }; 409 410 static const struct fs_parameter_spec erofs_fs_parameters[] = { 411 fsparam_flag_no("user_xattr", Opt_user_xattr), 412 fsparam_flag_no("acl", Opt_acl), 413 fsparam_enum("cache_strategy", Opt_cache_strategy, 414 erofs_param_cache_strategy), 415 fsparam_flag("dax", Opt_dax), 416 fsparam_enum("dax", Opt_dax_enum, erofs_dax_param_enums), 417 fsparam_string("device", Opt_device), 418 fsparam_string("fsid", Opt_fsid), 419 fsparam_string("domain_id", Opt_domain_id), 420 {} 421 }; 422 423 static bool erofs_fc_set_dax_mode(struct fs_context *fc, unsigned int mode) 424 { 425 #ifdef CONFIG_FS_DAX 426 struct erofs_fs_context *ctx = fc->fs_private; 427 428 switch (mode) { 429 case EROFS_MOUNT_DAX_ALWAYS: 430 warnfc(fc, "DAX enabled. Warning: EXPERIMENTAL, use at your own risk"); 431 set_opt(&ctx->opt, DAX_ALWAYS); 432 clear_opt(&ctx->opt, DAX_NEVER); 433 return true; 434 case EROFS_MOUNT_DAX_NEVER: 435 set_opt(&ctx->opt, DAX_NEVER); 436 clear_opt(&ctx->opt, DAX_ALWAYS); 437 return true; 438 default: 439 DBG_BUGON(1); 440 return false; 441 } 442 #else 443 errorfc(fc, "dax options not supported"); 444 return false; 445 #endif 446 } 447 448 static int erofs_fc_parse_param(struct fs_context *fc, 449 struct fs_parameter *param) 450 { 451 struct erofs_fs_context *ctx = fc->fs_private; 452 struct fs_parse_result result; 453 struct erofs_device_info *dif; 454 int opt, ret; 455 456 opt = fs_parse(fc, erofs_fs_parameters, param, &result); 457 if (opt < 0) 458 return opt; 459 460 switch (opt) { 461 case Opt_user_xattr: 462 #ifdef CONFIG_EROFS_FS_XATTR 463 if (result.boolean) 464 set_opt(&ctx->opt, XATTR_USER); 465 else 466 clear_opt(&ctx->opt, XATTR_USER); 467 #else 468 errorfc(fc, "{,no}user_xattr options not supported"); 469 #endif 470 break; 471 case Opt_acl: 472 #ifdef CONFIG_EROFS_FS_POSIX_ACL 473 if (result.boolean) 474 set_opt(&ctx->opt, POSIX_ACL); 475 else 476 clear_opt(&ctx->opt, POSIX_ACL); 477 #else 478 errorfc(fc, "{,no}acl options not supported"); 479 #endif 480 break; 481 case Opt_cache_strategy: 482 #ifdef CONFIG_EROFS_FS_ZIP 483 ctx->opt.cache_strategy = result.uint_32; 484 #else 485 errorfc(fc, "compression not supported, cache_strategy ignored"); 486 #endif 487 break; 488 case Opt_dax: 489 if (!erofs_fc_set_dax_mode(fc, EROFS_MOUNT_DAX_ALWAYS)) 490 return -EINVAL; 491 break; 492 case Opt_dax_enum: 493 if (!erofs_fc_set_dax_mode(fc, result.uint_32)) 494 return -EINVAL; 495 break; 496 case Opt_device: 497 dif = kzalloc(sizeof(*dif), GFP_KERNEL); 498 if (!dif) 499 return -ENOMEM; 500 dif->path = kstrdup(param->string, GFP_KERNEL); 501 if (!dif->path) { 502 kfree(dif); 503 return -ENOMEM; 504 } 505 down_write(&ctx->devs->rwsem); 506 ret = idr_alloc(&ctx->devs->tree, dif, 0, 0, GFP_KERNEL); 507 up_write(&ctx->devs->rwsem); 508 if (ret < 0) { 509 kfree(dif->path); 510 kfree(dif); 511 return ret; 512 } 513 ++ctx->devs->extra_devices; 514 break; 515 #ifdef CONFIG_EROFS_FS_ONDEMAND 516 case Opt_fsid: 517 kfree(ctx->fsid); 518 ctx->fsid = kstrdup(param->string, GFP_KERNEL); 519 if (!ctx->fsid) 520 return -ENOMEM; 521 break; 522 case Opt_domain_id: 523 kfree(ctx->domain_id); 524 ctx->domain_id = kstrdup(param->string, GFP_KERNEL); 525 if (!ctx->domain_id) 526 return -ENOMEM; 527 break; 528 #else 529 case Opt_fsid: 530 case Opt_domain_id: 531 errorfc(fc, "%s option not supported", erofs_fs_parameters[opt].name); 532 break; 533 #endif 534 default: 535 return -ENOPARAM; 536 } 537 return 0; 538 } 539 540 static struct inode *erofs_nfs_get_inode(struct super_block *sb, 541 u64 ino, u32 generation) 542 { 543 return erofs_iget(sb, ino); 544 } 545 546 static struct dentry *erofs_fh_to_dentry(struct super_block *sb, 547 struct fid *fid, int fh_len, int fh_type) 548 { 549 return generic_fh_to_dentry(sb, fid, fh_len, fh_type, 550 erofs_nfs_get_inode); 551 } 552 553 static struct dentry *erofs_fh_to_parent(struct super_block *sb, 554 struct fid *fid, int fh_len, int fh_type) 555 { 556 return generic_fh_to_parent(sb, fid, fh_len, fh_type, 557 erofs_nfs_get_inode); 558 } 559 560 static struct dentry *erofs_get_parent(struct dentry *child) 561 { 562 erofs_nid_t nid; 563 unsigned int d_type; 564 int err; 565 566 err = erofs_namei(d_inode(child), &dotdot_name, &nid, &d_type); 567 if (err) 568 return ERR_PTR(err); 569 return d_obtain_alias(erofs_iget(child->d_sb, nid)); 570 } 571 572 static const struct export_operations erofs_export_ops = { 573 .fh_to_dentry = erofs_fh_to_dentry, 574 .fh_to_parent = erofs_fh_to_parent, 575 .get_parent = erofs_get_parent, 576 }; 577 578 static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc) 579 { 580 struct inode *inode; 581 struct erofs_sb_info *sbi; 582 struct erofs_fs_context *ctx = fc->fs_private; 583 int err; 584 585 sb->s_magic = EROFS_SUPER_MAGIC; 586 sb->s_flags |= SB_RDONLY | SB_NOATIME; 587 sb->s_maxbytes = MAX_LFS_FILESIZE; 588 sb->s_op = &erofs_sops; 589 590 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); 591 if (!sbi) 592 return -ENOMEM; 593 594 sb->s_fs_info = sbi; 595 sbi->opt = ctx->opt; 596 sbi->devs = ctx->devs; 597 ctx->devs = NULL; 598 sbi->fsid = ctx->fsid; 599 ctx->fsid = NULL; 600 sbi->domain_id = ctx->domain_id; 601 ctx->domain_id = NULL; 602 603 sbi->blkszbits = PAGE_SHIFT; 604 if (erofs_is_fscache_mode(sb)) { 605 sb->s_blocksize = PAGE_SIZE; 606 sb->s_blocksize_bits = PAGE_SHIFT; 607 608 err = erofs_fscache_register_fs(sb); 609 if (err) 610 return err; 611 612 err = super_setup_bdi(sb); 613 if (err) 614 return err; 615 } else { 616 if (!sb_set_blocksize(sb, PAGE_SIZE)) { 617 errorfc(fc, "failed to set initial blksize"); 618 return -EINVAL; 619 } 620 621 sbi->dax_dev = fs_dax_get_by_bdev(sb->s_bdev, 622 &sbi->dax_part_off, 623 NULL, NULL); 624 } 625 626 err = erofs_read_superblock(sb); 627 if (err) 628 return err; 629 630 if (sb->s_blocksize_bits != sbi->blkszbits) { 631 if (erofs_is_fscache_mode(sb)) { 632 errorfc(fc, "unsupported blksize for fscache mode"); 633 return -EINVAL; 634 } 635 if (!sb_set_blocksize(sb, 1 << sbi->blkszbits)) { 636 errorfc(fc, "failed to set erofs blksize"); 637 return -EINVAL; 638 } 639 } 640 641 if (test_opt(&sbi->opt, DAX_ALWAYS)) { 642 if (!sbi->dax_dev) { 643 errorfc(fc, "DAX unsupported by block device. Turning off DAX."); 644 clear_opt(&sbi->opt, DAX_ALWAYS); 645 } else if (sbi->blkszbits != PAGE_SHIFT) { 646 errorfc(fc, "unsupported blocksize for DAX"); 647 clear_opt(&sbi->opt, DAX_ALWAYS); 648 } 649 } 650 651 sb->s_time_gran = 1; 652 sb->s_xattr = erofs_xattr_handlers; 653 sb->s_export_op = &erofs_export_ops; 654 655 if (test_opt(&sbi->opt, POSIX_ACL)) 656 sb->s_flags |= SB_POSIXACL; 657 else 658 sb->s_flags &= ~SB_POSIXACL; 659 660 #ifdef CONFIG_EROFS_FS_ZIP 661 xa_init(&sbi->managed_pslots); 662 #endif 663 664 inode = erofs_iget(sb, ROOT_NID(sbi)); 665 if (IS_ERR(inode)) 666 return PTR_ERR(inode); 667 668 if (!S_ISDIR(inode->i_mode)) { 669 erofs_err(sb, "rootino(nid %llu) is not a directory(i_mode %o)", 670 ROOT_NID(sbi), inode->i_mode); 671 iput(inode); 672 return -EINVAL; 673 } 674 675 sb->s_root = d_make_root(inode); 676 if (!sb->s_root) 677 return -ENOMEM; 678 679 erofs_shrinker_register(sb); 680 if (erofs_sb_has_fragments(sbi) && sbi->packed_nid) { 681 sbi->packed_inode = erofs_iget(sb, sbi->packed_nid); 682 if (IS_ERR(sbi->packed_inode)) { 683 err = PTR_ERR(sbi->packed_inode); 684 sbi->packed_inode = NULL; 685 return err; 686 } 687 } 688 err = erofs_init_managed_cache(sb); 689 if (err) 690 return err; 691 692 err = erofs_xattr_prefixes_init(sb); 693 if (err) 694 return err; 695 696 err = erofs_register_sysfs(sb); 697 if (err) 698 return err; 699 700 erofs_info(sb, "mounted with root inode @ nid %llu.", ROOT_NID(sbi)); 701 return 0; 702 } 703 704 static int erofs_fc_get_tree(struct fs_context *fc) 705 { 706 struct erofs_fs_context *ctx = fc->fs_private; 707 708 if (IS_ENABLED(CONFIG_EROFS_FS_ONDEMAND) && ctx->fsid) 709 return get_tree_nodev(fc, erofs_fc_fill_super); 710 711 return get_tree_bdev(fc, erofs_fc_fill_super); 712 } 713 714 static int erofs_fc_reconfigure(struct fs_context *fc) 715 { 716 struct super_block *sb = fc->root->d_sb; 717 struct erofs_sb_info *sbi = EROFS_SB(sb); 718 struct erofs_fs_context *ctx = fc->fs_private; 719 720 DBG_BUGON(!sb_rdonly(sb)); 721 722 if (ctx->fsid || ctx->domain_id) 723 erofs_info(sb, "ignoring reconfiguration for fsid|domain_id."); 724 725 if (test_opt(&ctx->opt, POSIX_ACL)) 726 fc->sb_flags |= SB_POSIXACL; 727 else 728 fc->sb_flags &= ~SB_POSIXACL; 729 730 sbi->opt = ctx->opt; 731 732 fc->sb_flags |= SB_RDONLY; 733 return 0; 734 } 735 736 static int erofs_release_device_info(int id, void *ptr, void *data) 737 { 738 struct erofs_device_info *dif = ptr; 739 740 fs_put_dax(dif->dax_dev, NULL); 741 if (dif->bdev_handle) 742 bdev_release(dif->bdev_handle); 743 erofs_fscache_unregister_cookie(dif->fscache); 744 dif->fscache = NULL; 745 kfree(dif->path); 746 kfree(dif); 747 return 0; 748 } 749 750 static void erofs_free_dev_context(struct erofs_dev_context *devs) 751 { 752 if (!devs) 753 return; 754 idr_for_each(&devs->tree, &erofs_release_device_info, NULL); 755 idr_destroy(&devs->tree); 756 kfree(devs); 757 } 758 759 static void erofs_fc_free(struct fs_context *fc) 760 { 761 struct erofs_fs_context *ctx = fc->fs_private; 762 763 erofs_free_dev_context(ctx->devs); 764 kfree(ctx->fsid); 765 kfree(ctx->domain_id); 766 kfree(ctx); 767 } 768 769 static const struct fs_context_operations erofs_context_ops = { 770 .parse_param = erofs_fc_parse_param, 771 .get_tree = erofs_fc_get_tree, 772 .reconfigure = erofs_fc_reconfigure, 773 .free = erofs_fc_free, 774 }; 775 776 static int erofs_init_fs_context(struct fs_context *fc) 777 { 778 struct erofs_fs_context *ctx; 779 780 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 781 if (!ctx) 782 return -ENOMEM; 783 ctx->devs = kzalloc(sizeof(struct erofs_dev_context), GFP_KERNEL); 784 if (!ctx->devs) { 785 kfree(ctx); 786 return -ENOMEM; 787 } 788 fc->fs_private = ctx; 789 790 idr_init(&ctx->devs->tree); 791 init_rwsem(&ctx->devs->rwsem); 792 erofs_default_options(ctx); 793 fc->ops = &erofs_context_ops; 794 return 0; 795 } 796 797 static void erofs_kill_sb(struct super_block *sb) 798 { 799 struct erofs_sb_info *sbi; 800 801 if (erofs_is_fscache_mode(sb)) 802 kill_anon_super(sb); 803 else 804 kill_block_super(sb); 805 806 sbi = EROFS_SB(sb); 807 if (!sbi) 808 return; 809 810 erofs_free_dev_context(sbi->devs); 811 fs_put_dax(sbi->dax_dev, NULL); 812 erofs_fscache_unregister_fs(sb); 813 kfree(sbi->fsid); 814 kfree(sbi->domain_id); 815 kfree(sbi); 816 sb->s_fs_info = NULL; 817 } 818 819 static void erofs_put_super(struct super_block *sb) 820 { 821 struct erofs_sb_info *const sbi = EROFS_SB(sb); 822 823 DBG_BUGON(!sbi); 824 825 erofs_unregister_sysfs(sb); 826 erofs_shrinker_unregister(sb); 827 erofs_xattr_prefixes_cleanup(sb); 828 #ifdef CONFIG_EROFS_FS_ZIP 829 iput(sbi->managed_cache); 830 sbi->managed_cache = NULL; 831 #endif 832 iput(sbi->packed_inode); 833 sbi->packed_inode = NULL; 834 erofs_free_dev_context(sbi->devs); 835 sbi->devs = NULL; 836 erofs_fscache_unregister_fs(sb); 837 } 838 839 static struct file_system_type erofs_fs_type = { 840 .owner = THIS_MODULE, 841 .name = "erofs", 842 .init_fs_context = erofs_init_fs_context, 843 .kill_sb = erofs_kill_sb, 844 .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP, 845 }; 846 MODULE_ALIAS_FS("erofs"); 847 848 static int __init erofs_module_init(void) 849 { 850 int err; 851 852 erofs_check_ondisk_layout_definitions(); 853 854 erofs_inode_cachep = kmem_cache_create("erofs_inode", 855 sizeof(struct erofs_inode), 0, 856 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD | SLAB_ACCOUNT, 857 erofs_inode_init_once); 858 if (!erofs_inode_cachep) 859 return -ENOMEM; 860 861 err = erofs_init_shrinker(); 862 if (err) 863 goto shrinker_err; 864 865 err = z_erofs_lzma_init(); 866 if (err) 867 goto lzma_err; 868 869 err = z_erofs_deflate_init(); 870 if (err) 871 goto deflate_err; 872 873 erofs_pcpubuf_init(); 874 err = z_erofs_init_zip_subsystem(); 875 if (err) 876 goto zip_err; 877 878 err = erofs_init_sysfs(); 879 if (err) 880 goto sysfs_err; 881 882 err = register_filesystem(&erofs_fs_type); 883 if (err) 884 goto fs_err; 885 886 return 0; 887 888 fs_err: 889 erofs_exit_sysfs(); 890 sysfs_err: 891 z_erofs_exit_zip_subsystem(); 892 zip_err: 893 z_erofs_deflate_exit(); 894 deflate_err: 895 z_erofs_lzma_exit(); 896 lzma_err: 897 erofs_exit_shrinker(); 898 shrinker_err: 899 kmem_cache_destroy(erofs_inode_cachep); 900 return err; 901 } 902 903 static void __exit erofs_module_exit(void) 904 { 905 unregister_filesystem(&erofs_fs_type); 906 907 /* Ensure all RCU free inodes / pclusters are safe to be destroyed. */ 908 rcu_barrier(); 909 910 erofs_exit_sysfs(); 911 z_erofs_exit_zip_subsystem(); 912 z_erofs_deflate_exit(); 913 z_erofs_lzma_exit(); 914 erofs_exit_shrinker(); 915 kmem_cache_destroy(erofs_inode_cachep); 916 erofs_pcpubuf_exit(); 917 } 918 919 static int erofs_statfs(struct dentry *dentry, struct kstatfs *buf) 920 { 921 struct super_block *sb = dentry->d_sb; 922 struct erofs_sb_info *sbi = EROFS_SB(sb); 923 u64 id = 0; 924 925 if (!erofs_is_fscache_mode(sb)) 926 id = huge_encode_dev(sb->s_bdev->bd_dev); 927 928 buf->f_type = sb->s_magic; 929 buf->f_bsize = sb->s_blocksize; 930 buf->f_blocks = sbi->total_blocks; 931 buf->f_bfree = buf->f_bavail = 0; 932 933 buf->f_files = ULLONG_MAX; 934 buf->f_ffree = ULLONG_MAX - sbi->inos; 935 936 buf->f_namelen = EROFS_NAME_LEN; 937 938 buf->f_fsid = u64_to_fsid(id); 939 return 0; 940 } 941 942 static int erofs_show_options(struct seq_file *seq, struct dentry *root) 943 { 944 struct erofs_sb_info *sbi = EROFS_SB(root->d_sb); 945 struct erofs_mount_opts *opt = &sbi->opt; 946 947 #ifdef CONFIG_EROFS_FS_XATTR 948 if (test_opt(opt, XATTR_USER)) 949 seq_puts(seq, ",user_xattr"); 950 else 951 seq_puts(seq, ",nouser_xattr"); 952 #endif 953 #ifdef CONFIG_EROFS_FS_POSIX_ACL 954 if (test_opt(opt, POSIX_ACL)) 955 seq_puts(seq, ",acl"); 956 else 957 seq_puts(seq, ",noacl"); 958 #endif 959 #ifdef CONFIG_EROFS_FS_ZIP 960 if (opt->cache_strategy == EROFS_ZIP_CACHE_DISABLED) 961 seq_puts(seq, ",cache_strategy=disabled"); 962 else if (opt->cache_strategy == EROFS_ZIP_CACHE_READAHEAD) 963 seq_puts(seq, ",cache_strategy=readahead"); 964 else if (opt->cache_strategy == EROFS_ZIP_CACHE_READAROUND) 965 seq_puts(seq, ",cache_strategy=readaround"); 966 #endif 967 if (test_opt(opt, DAX_ALWAYS)) 968 seq_puts(seq, ",dax=always"); 969 if (test_opt(opt, DAX_NEVER)) 970 seq_puts(seq, ",dax=never"); 971 #ifdef CONFIG_EROFS_FS_ONDEMAND 972 if (sbi->fsid) 973 seq_printf(seq, ",fsid=%s", sbi->fsid); 974 if (sbi->domain_id) 975 seq_printf(seq, ",domain_id=%s", sbi->domain_id); 976 #endif 977 return 0; 978 } 979 980 const struct super_operations erofs_sops = { 981 .put_super = erofs_put_super, 982 .alloc_inode = erofs_alloc_inode, 983 .free_inode = erofs_free_inode, 984 .statfs = erofs_statfs, 985 .show_options = erofs_show_options, 986 }; 987 988 module_init(erofs_module_init); 989 module_exit(erofs_module_exit); 990 991 MODULE_DESCRIPTION("Enhanced ROM File System"); 992 MODULE_AUTHOR("Gao Xiang, Chao Yu, Miao Xie, CONSUMER BG, HUAWEI Inc."); 993 MODULE_LICENSE("GPL"); 994