1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2017-2018 HUAWEI, Inc. 4 * https://www.huawei.com/ 5 */ 6 #include <linux/module.h> 7 #include <linux/buffer_head.h> 8 #include <linux/statfs.h> 9 #include <linux/parser.h> 10 #include <linux/seq_file.h> 11 #include <linux/crc32c.h> 12 #include <linux/fs_context.h> 13 #include <linux/fs_parser.h> 14 #include <linux/dax.h> 15 #include "xattr.h" 16 17 #define CREATE_TRACE_POINTS 18 #include <trace/events/erofs.h> 19 20 static struct kmem_cache *erofs_inode_cachep __read_mostly; 21 22 void _erofs_err(struct super_block *sb, const char *function, 23 const char *fmt, ...) 24 { 25 struct va_format vaf; 26 va_list args; 27 28 va_start(args, fmt); 29 30 vaf.fmt = fmt; 31 vaf.va = &args; 32 33 pr_err("(device %s): %s: %pV", sb->s_id, function, &vaf); 34 va_end(args); 35 } 36 37 void _erofs_info(struct super_block *sb, const char *function, 38 const char *fmt, ...) 39 { 40 struct va_format vaf; 41 va_list args; 42 43 va_start(args, fmt); 44 45 vaf.fmt = fmt; 46 vaf.va = &args; 47 48 pr_info("(device %s): %pV", sb->s_id, &vaf); 49 va_end(args); 50 } 51 52 static int erofs_superblock_csum_verify(struct super_block *sb, void *sbdata) 53 { 54 struct erofs_super_block *dsb; 55 u32 expected_crc, crc; 56 57 dsb = kmemdup(sbdata + EROFS_SUPER_OFFSET, 58 EROFS_BLKSIZ - EROFS_SUPER_OFFSET, GFP_KERNEL); 59 if (!dsb) 60 return -ENOMEM; 61 62 expected_crc = le32_to_cpu(dsb->checksum); 63 dsb->checksum = 0; 64 /* to allow for x86 boot sectors and other oddities. */ 65 crc = crc32c(~0, dsb, EROFS_BLKSIZ - EROFS_SUPER_OFFSET); 66 kfree(dsb); 67 68 if (crc != expected_crc) { 69 erofs_err(sb, "invalid checksum 0x%08x, 0x%08x expected", 70 crc, expected_crc); 71 return -EBADMSG; 72 } 73 return 0; 74 } 75 76 static void erofs_inode_init_once(void *ptr) 77 { 78 struct erofs_inode *vi = ptr; 79 80 inode_init_once(&vi->vfs_inode); 81 } 82 83 static struct inode *erofs_alloc_inode(struct super_block *sb) 84 { 85 struct erofs_inode *vi = 86 kmem_cache_alloc(erofs_inode_cachep, GFP_KERNEL); 87 88 if (!vi) 89 return NULL; 90 91 /* zero out everything except vfs_inode */ 92 memset(vi, 0, offsetof(struct erofs_inode, vfs_inode)); 93 return &vi->vfs_inode; 94 } 95 96 static void erofs_free_inode(struct inode *inode) 97 { 98 struct erofs_inode *vi = EROFS_I(inode); 99 100 /* be careful of RCU symlink path */ 101 if (inode->i_op == &erofs_fast_symlink_iops) 102 kfree(inode->i_link); 103 kfree(vi->xattr_shared_xattrs); 104 105 kmem_cache_free(erofs_inode_cachep, vi); 106 } 107 108 static bool check_layout_compatibility(struct super_block *sb, 109 struct erofs_super_block *dsb) 110 { 111 const unsigned int feature = le32_to_cpu(dsb->feature_incompat); 112 113 EROFS_SB(sb)->feature_incompat = feature; 114 115 /* check if current kernel meets all mandatory requirements */ 116 if (feature & (~EROFS_ALL_FEATURE_INCOMPAT)) { 117 erofs_err(sb, 118 "unidentified incompatible feature %x, please upgrade kernel version", 119 feature & ~EROFS_ALL_FEATURE_INCOMPAT); 120 return false; 121 } 122 return true; 123 } 124 125 #ifdef CONFIG_EROFS_FS_ZIP 126 /* read variable-sized metadata, offset will be aligned by 4-byte */ 127 static void *erofs_read_metadata(struct super_block *sb, struct page **pagep, 128 erofs_off_t *offset, int *lengthp) 129 { 130 struct page *page = *pagep; 131 u8 *buffer, *ptr; 132 int len, i, cnt; 133 erofs_blk_t blk; 134 135 *offset = round_up(*offset, 4); 136 blk = erofs_blknr(*offset); 137 138 if (!page || page->index != blk) { 139 if (page) { 140 unlock_page(page); 141 put_page(page); 142 } 143 page = erofs_get_meta_page(sb, blk); 144 if (IS_ERR(page)) 145 goto err_nullpage; 146 } 147 148 ptr = kmap(page); 149 len = le16_to_cpu(*(__le16 *)&ptr[erofs_blkoff(*offset)]); 150 if (!len) 151 len = U16_MAX + 1; 152 buffer = kmalloc(len, GFP_KERNEL); 153 if (!buffer) { 154 buffer = ERR_PTR(-ENOMEM); 155 goto out; 156 } 157 *offset += sizeof(__le16); 158 *lengthp = len; 159 160 for (i = 0; i < len; i += cnt) { 161 cnt = min(EROFS_BLKSIZ - (int)erofs_blkoff(*offset), len - i); 162 blk = erofs_blknr(*offset); 163 164 if (!page || page->index != blk) { 165 if (page) { 166 kunmap(page); 167 unlock_page(page); 168 put_page(page); 169 } 170 page = erofs_get_meta_page(sb, blk); 171 if (IS_ERR(page)) { 172 kfree(buffer); 173 goto err_nullpage; 174 } 175 ptr = kmap(page); 176 } 177 memcpy(buffer + i, ptr + erofs_blkoff(*offset), cnt); 178 *offset += cnt; 179 } 180 out: 181 kunmap(page); 182 *pagep = page; 183 return buffer; 184 err_nullpage: 185 *pagep = NULL; 186 return page; 187 } 188 189 static int erofs_load_compr_cfgs(struct super_block *sb, 190 struct erofs_super_block *dsb) 191 { 192 struct erofs_sb_info *sbi; 193 struct page *page; 194 unsigned int algs, alg; 195 erofs_off_t offset; 196 int size, ret; 197 198 sbi = EROFS_SB(sb); 199 sbi->available_compr_algs = le16_to_cpu(dsb->u1.available_compr_algs); 200 201 if (sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS) { 202 erofs_err(sb, "try to load compressed fs with unsupported algorithms %x", 203 sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS); 204 return -EINVAL; 205 } 206 207 offset = EROFS_SUPER_OFFSET + sbi->sb_size; 208 page = NULL; 209 alg = 0; 210 ret = 0; 211 212 for (algs = sbi->available_compr_algs; algs; algs >>= 1, ++alg) { 213 void *data; 214 215 if (!(algs & 1)) 216 continue; 217 218 data = erofs_read_metadata(sb, &page, &offset, &size); 219 if (IS_ERR(data)) { 220 ret = PTR_ERR(data); 221 goto err; 222 } 223 224 switch (alg) { 225 case Z_EROFS_COMPRESSION_LZ4: 226 ret = z_erofs_load_lz4_config(sb, dsb, data, size); 227 break; 228 default: 229 DBG_BUGON(1); 230 ret = -EFAULT; 231 } 232 kfree(data); 233 if (ret) 234 goto err; 235 } 236 err: 237 if (page) { 238 unlock_page(page); 239 put_page(page); 240 } 241 return ret; 242 } 243 #else 244 static int erofs_load_compr_cfgs(struct super_block *sb, 245 struct erofs_super_block *dsb) 246 { 247 if (dsb->u1.available_compr_algs) { 248 erofs_err(sb, "try to load compressed fs when compression is disabled"); 249 return -EINVAL; 250 } 251 return 0; 252 } 253 #endif 254 255 static int erofs_init_devices(struct super_block *sb, 256 struct erofs_super_block *dsb) 257 { 258 struct erofs_sb_info *sbi = EROFS_SB(sb); 259 unsigned int ondisk_extradevs; 260 erofs_off_t pos; 261 struct page *page = NULL; 262 struct erofs_device_info *dif; 263 struct erofs_deviceslot *dis; 264 void *ptr; 265 int id, err = 0; 266 267 sbi->total_blocks = sbi->primarydevice_blocks; 268 if (!erofs_sb_has_device_table(sbi)) 269 ondisk_extradevs = 0; 270 else 271 ondisk_extradevs = le16_to_cpu(dsb->extra_devices); 272 273 if (ondisk_extradevs != sbi->devs->extra_devices) { 274 erofs_err(sb, "extra devices don't match (ondisk %u, given %u)", 275 ondisk_extradevs, sbi->devs->extra_devices); 276 return -EINVAL; 277 } 278 if (!ondisk_extradevs) 279 return 0; 280 281 sbi->device_id_mask = roundup_pow_of_two(ondisk_extradevs + 1) - 1; 282 pos = le16_to_cpu(dsb->devt_slotoff) * EROFS_DEVT_SLOT_SIZE; 283 down_read(&sbi->devs->rwsem); 284 idr_for_each_entry(&sbi->devs->tree, dif, id) { 285 erofs_blk_t blk = erofs_blknr(pos); 286 struct block_device *bdev; 287 288 if (!page || page->index != blk) { 289 if (page) { 290 kunmap(page); 291 unlock_page(page); 292 put_page(page); 293 } 294 295 page = erofs_get_meta_page(sb, blk); 296 if (IS_ERR(page)) { 297 up_read(&sbi->devs->rwsem); 298 return PTR_ERR(page); 299 } 300 ptr = kmap(page); 301 } 302 dis = ptr + erofs_blkoff(pos); 303 304 bdev = blkdev_get_by_path(dif->path, 305 FMODE_READ | FMODE_EXCL, 306 sb->s_type); 307 if (IS_ERR(bdev)) { 308 err = PTR_ERR(bdev); 309 goto err_out; 310 } 311 dif->bdev = bdev; 312 dif->dax_dev = fs_dax_get_by_bdev(bdev); 313 dif->blocks = le32_to_cpu(dis->blocks); 314 dif->mapped_blkaddr = le32_to_cpu(dis->mapped_blkaddr); 315 sbi->total_blocks += dif->blocks; 316 pos += EROFS_DEVT_SLOT_SIZE; 317 } 318 err_out: 319 up_read(&sbi->devs->rwsem); 320 if (page) { 321 kunmap(page); 322 unlock_page(page); 323 put_page(page); 324 } 325 return err; 326 } 327 328 static int erofs_read_superblock(struct super_block *sb) 329 { 330 struct erofs_sb_info *sbi; 331 struct page *page; 332 struct erofs_super_block *dsb; 333 unsigned int blkszbits; 334 void *data; 335 int ret; 336 337 page = read_mapping_page(sb->s_bdev->bd_inode->i_mapping, 0, NULL); 338 if (IS_ERR(page)) { 339 erofs_err(sb, "cannot read erofs superblock"); 340 return PTR_ERR(page); 341 } 342 343 sbi = EROFS_SB(sb); 344 345 data = kmap(page); 346 dsb = (struct erofs_super_block *)(data + EROFS_SUPER_OFFSET); 347 348 ret = -EINVAL; 349 if (le32_to_cpu(dsb->magic) != EROFS_SUPER_MAGIC_V1) { 350 erofs_err(sb, "cannot find valid erofs superblock"); 351 goto out; 352 } 353 354 sbi->feature_compat = le32_to_cpu(dsb->feature_compat); 355 if (erofs_sb_has_sb_chksum(sbi)) { 356 ret = erofs_superblock_csum_verify(sb, data); 357 if (ret) 358 goto out; 359 } 360 361 ret = -EINVAL; 362 blkszbits = dsb->blkszbits; 363 /* 9(512 bytes) + LOG_SECTORS_PER_BLOCK == LOG_BLOCK_SIZE */ 364 if (blkszbits != LOG_BLOCK_SIZE) { 365 erofs_err(sb, "blkszbits %u isn't supported on this platform", 366 blkszbits); 367 goto out; 368 } 369 370 if (!check_layout_compatibility(sb, dsb)) 371 goto out; 372 373 sbi->sb_size = 128 + dsb->sb_extslots * EROFS_SB_EXTSLOT_SIZE; 374 if (sbi->sb_size > EROFS_BLKSIZ) { 375 erofs_err(sb, "invalid sb_extslots %u (more than a fs block)", 376 sbi->sb_size); 377 goto out; 378 } 379 sbi->primarydevice_blocks = le32_to_cpu(dsb->blocks); 380 sbi->meta_blkaddr = le32_to_cpu(dsb->meta_blkaddr); 381 #ifdef CONFIG_EROFS_FS_XATTR 382 sbi->xattr_blkaddr = le32_to_cpu(dsb->xattr_blkaddr); 383 #endif 384 sbi->islotbits = ilog2(sizeof(struct erofs_inode_compact)); 385 sbi->root_nid = le16_to_cpu(dsb->root_nid); 386 sbi->inos = le64_to_cpu(dsb->inos); 387 388 sbi->build_time = le64_to_cpu(dsb->build_time); 389 sbi->build_time_nsec = le32_to_cpu(dsb->build_time_nsec); 390 391 memcpy(&sb->s_uuid, dsb->uuid, sizeof(dsb->uuid)); 392 393 ret = strscpy(sbi->volume_name, dsb->volume_name, 394 sizeof(dsb->volume_name)); 395 if (ret < 0) { /* -E2BIG */ 396 erofs_err(sb, "bad volume name without NIL terminator"); 397 ret = -EFSCORRUPTED; 398 goto out; 399 } 400 401 /* parse on-disk compression configurations */ 402 if (erofs_sb_has_compr_cfgs(sbi)) 403 ret = erofs_load_compr_cfgs(sb, dsb); 404 else 405 ret = z_erofs_load_lz4_config(sb, dsb, NULL, 0); 406 if (ret < 0) 407 goto out; 408 409 /* handle multiple devices */ 410 ret = erofs_init_devices(sb, dsb); 411 out: 412 kunmap(page); 413 put_page(page); 414 return ret; 415 } 416 417 /* set up default EROFS parameters */ 418 static void erofs_default_options(struct erofs_fs_context *ctx) 419 { 420 #ifdef CONFIG_EROFS_FS_ZIP 421 ctx->opt.cache_strategy = EROFS_ZIP_CACHE_READAROUND; 422 ctx->opt.max_sync_decompress_pages = 3; 423 ctx->opt.readahead_sync_decompress = false; 424 #endif 425 #ifdef CONFIG_EROFS_FS_XATTR 426 set_opt(&ctx->opt, XATTR_USER); 427 #endif 428 #ifdef CONFIG_EROFS_FS_POSIX_ACL 429 set_opt(&ctx->opt, POSIX_ACL); 430 #endif 431 } 432 433 enum { 434 Opt_user_xattr, 435 Opt_acl, 436 Opt_cache_strategy, 437 Opt_dax, 438 Opt_dax_enum, 439 Opt_device, 440 Opt_err 441 }; 442 443 static const struct constant_table erofs_param_cache_strategy[] = { 444 {"disabled", EROFS_ZIP_CACHE_DISABLED}, 445 {"readahead", EROFS_ZIP_CACHE_READAHEAD}, 446 {"readaround", EROFS_ZIP_CACHE_READAROUND}, 447 {} 448 }; 449 450 static const struct constant_table erofs_dax_param_enums[] = { 451 {"always", EROFS_MOUNT_DAX_ALWAYS}, 452 {"never", EROFS_MOUNT_DAX_NEVER}, 453 {} 454 }; 455 456 static const struct fs_parameter_spec erofs_fs_parameters[] = { 457 fsparam_flag_no("user_xattr", Opt_user_xattr), 458 fsparam_flag_no("acl", Opt_acl), 459 fsparam_enum("cache_strategy", Opt_cache_strategy, 460 erofs_param_cache_strategy), 461 fsparam_flag("dax", Opt_dax), 462 fsparam_enum("dax", Opt_dax_enum, erofs_dax_param_enums), 463 fsparam_string("device", Opt_device), 464 {} 465 }; 466 467 static bool erofs_fc_set_dax_mode(struct fs_context *fc, unsigned int mode) 468 { 469 #ifdef CONFIG_FS_DAX 470 struct erofs_fs_context *ctx = fc->fs_private; 471 472 switch (mode) { 473 case EROFS_MOUNT_DAX_ALWAYS: 474 warnfc(fc, "DAX enabled. Warning: EXPERIMENTAL, use at your own risk"); 475 set_opt(&ctx->opt, DAX_ALWAYS); 476 clear_opt(&ctx->opt, DAX_NEVER); 477 return true; 478 case EROFS_MOUNT_DAX_NEVER: 479 set_opt(&ctx->opt, DAX_NEVER); 480 clear_opt(&ctx->opt, DAX_ALWAYS); 481 return true; 482 default: 483 DBG_BUGON(1); 484 return false; 485 } 486 #else 487 errorfc(fc, "dax options not supported"); 488 return false; 489 #endif 490 } 491 492 static int erofs_fc_parse_param(struct fs_context *fc, 493 struct fs_parameter *param) 494 { 495 struct erofs_fs_context *ctx = fc->fs_private; 496 struct fs_parse_result result; 497 struct erofs_device_info *dif; 498 int opt, ret; 499 500 opt = fs_parse(fc, erofs_fs_parameters, param, &result); 501 if (opt < 0) 502 return opt; 503 504 switch (opt) { 505 case Opt_user_xattr: 506 #ifdef CONFIG_EROFS_FS_XATTR 507 if (result.boolean) 508 set_opt(&ctx->opt, XATTR_USER); 509 else 510 clear_opt(&ctx->opt, XATTR_USER); 511 #else 512 errorfc(fc, "{,no}user_xattr options not supported"); 513 #endif 514 break; 515 case Opt_acl: 516 #ifdef CONFIG_EROFS_FS_POSIX_ACL 517 if (result.boolean) 518 set_opt(&ctx->opt, POSIX_ACL); 519 else 520 clear_opt(&ctx->opt, POSIX_ACL); 521 #else 522 errorfc(fc, "{,no}acl options not supported"); 523 #endif 524 break; 525 case Opt_cache_strategy: 526 #ifdef CONFIG_EROFS_FS_ZIP 527 ctx->opt.cache_strategy = result.uint_32; 528 #else 529 errorfc(fc, "compression not supported, cache_strategy ignored"); 530 #endif 531 break; 532 case Opt_dax: 533 if (!erofs_fc_set_dax_mode(fc, EROFS_MOUNT_DAX_ALWAYS)) 534 return -EINVAL; 535 break; 536 case Opt_dax_enum: 537 if (!erofs_fc_set_dax_mode(fc, result.uint_32)) 538 return -EINVAL; 539 break; 540 case Opt_device: 541 dif = kzalloc(sizeof(*dif), GFP_KERNEL); 542 if (!dif) 543 return -ENOMEM; 544 dif->path = kstrdup(param->string, GFP_KERNEL); 545 if (!dif->path) { 546 kfree(dif); 547 return -ENOMEM; 548 } 549 down_write(&ctx->devs->rwsem); 550 ret = idr_alloc(&ctx->devs->tree, dif, 0, 0, GFP_KERNEL); 551 up_write(&ctx->devs->rwsem); 552 if (ret < 0) { 553 kfree(dif->path); 554 kfree(dif); 555 return ret; 556 } 557 ++ctx->devs->extra_devices; 558 break; 559 default: 560 return -ENOPARAM; 561 } 562 return 0; 563 } 564 565 #ifdef CONFIG_EROFS_FS_ZIP 566 static const struct address_space_operations managed_cache_aops; 567 568 static int erofs_managed_cache_releasepage(struct page *page, gfp_t gfp_mask) 569 { 570 int ret = 1; /* 0 - busy */ 571 struct address_space *const mapping = page->mapping; 572 573 DBG_BUGON(!PageLocked(page)); 574 DBG_BUGON(mapping->a_ops != &managed_cache_aops); 575 576 if (PagePrivate(page)) 577 ret = erofs_try_to_free_cached_page(page); 578 579 return ret; 580 } 581 582 static void erofs_managed_cache_invalidatepage(struct page *page, 583 unsigned int offset, 584 unsigned int length) 585 { 586 const unsigned int stop = length + offset; 587 588 DBG_BUGON(!PageLocked(page)); 589 590 /* Check for potential overflow in debug mode */ 591 DBG_BUGON(stop > PAGE_SIZE || stop < length); 592 593 if (offset == 0 && stop == PAGE_SIZE) 594 while (!erofs_managed_cache_releasepage(page, GFP_NOFS)) 595 cond_resched(); 596 } 597 598 static const struct address_space_operations managed_cache_aops = { 599 .releasepage = erofs_managed_cache_releasepage, 600 .invalidatepage = erofs_managed_cache_invalidatepage, 601 }; 602 603 static int erofs_init_managed_cache(struct super_block *sb) 604 { 605 struct erofs_sb_info *const sbi = EROFS_SB(sb); 606 struct inode *const inode = new_inode(sb); 607 608 if (!inode) 609 return -ENOMEM; 610 611 set_nlink(inode, 1); 612 inode->i_size = OFFSET_MAX; 613 614 inode->i_mapping->a_ops = &managed_cache_aops; 615 mapping_set_gfp_mask(inode->i_mapping, 616 GFP_NOFS | __GFP_HIGHMEM | __GFP_MOVABLE); 617 sbi->managed_cache = inode; 618 return 0; 619 } 620 #else 621 static int erofs_init_managed_cache(struct super_block *sb) { return 0; } 622 #endif 623 624 static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc) 625 { 626 struct inode *inode; 627 struct erofs_sb_info *sbi; 628 struct erofs_fs_context *ctx = fc->fs_private; 629 int err; 630 631 sb->s_magic = EROFS_SUPER_MAGIC; 632 633 if (!sb_set_blocksize(sb, EROFS_BLKSIZ)) { 634 erofs_err(sb, "failed to set erofs blksize"); 635 return -EINVAL; 636 } 637 638 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); 639 if (!sbi) 640 return -ENOMEM; 641 642 sb->s_fs_info = sbi; 643 sbi->opt = ctx->opt; 644 sbi->dax_dev = fs_dax_get_by_bdev(sb->s_bdev); 645 sbi->devs = ctx->devs; 646 ctx->devs = NULL; 647 648 err = erofs_read_superblock(sb); 649 if (err) 650 return err; 651 652 if (test_opt(&sbi->opt, DAX_ALWAYS) && 653 !dax_supported(sbi->dax_dev, sb->s_bdev, EROFS_BLKSIZ, 0, bdev_nr_sectors(sb->s_bdev))) { 654 errorfc(fc, "DAX unsupported by block device. Turning off DAX."); 655 clear_opt(&sbi->opt, DAX_ALWAYS); 656 } 657 sb->s_flags |= SB_RDONLY | SB_NOATIME; 658 sb->s_maxbytes = MAX_LFS_FILESIZE; 659 sb->s_time_gran = 1; 660 661 sb->s_op = &erofs_sops; 662 sb->s_xattr = erofs_xattr_handlers; 663 664 if (test_opt(&sbi->opt, POSIX_ACL)) 665 sb->s_flags |= SB_POSIXACL; 666 else 667 sb->s_flags &= ~SB_POSIXACL; 668 669 #ifdef CONFIG_EROFS_FS_ZIP 670 xa_init(&sbi->managed_pslots); 671 #endif 672 673 /* get the root inode */ 674 inode = erofs_iget(sb, ROOT_NID(sbi), true); 675 if (IS_ERR(inode)) 676 return PTR_ERR(inode); 677 678 if (!S_ISDIR(inode->i_mode)) { 679 erofs_err(sb, "rootino(nid %llu) is not a directory(i_mode %o)", 680 ROOT_NID(sbi), inode->i_mode); 681 iput(inode); 682 return -EINVAL; 683 } 684 685 sb->s_root = d_make_root(inode); 686 if (!sb->s_root) 687 return -ENOMEM; 688 689 erofs_shrinker_register(sb); 690 /* sb->s_umount is already locked, SB_ACTIVE and SB_BORN are not set */ 691 err = erofs_init_managed_cache(sb); 692 if (err) 693 return err; 694 695 erofs_info(sb, "mounted with root inode @ nid %llu.", ROOT_NID(sbi)); 696 return 0; 697 } 698 699 static int erofs_fc_get_tree(struct fs_context *fc) 700 { 701 return get_tree_bdev(fc, erofs_fc_fill_super); 702 } 703 704 static int erofs_fc_reconfigure(struct fs_context *fc) 705 { 706 struct super_block *sb = fc->root->d_sb; 707 struct erofs_sb_info *sbi = EROFS_SB(sb); 708 struct erofs_fs_context *ctx = fc->fs_private; 709 710 DBG_BUGON(!sb_rdonly(sb)); 711 712 if (test_opt(&ctx->opt, POSIX_ACL)) 713 fc->sb_flags |= SB_POSIXACL; 714 else 715 fc->sb_flags &= ~SB_POSIXACL; 716 717 sbi->opt = ctx->opt; 718 719 fc->sb_flags |= SB_RDONLY; 720 return 0; 721 } 722 723 static int erofs_release_device_info(int id, void *ptr, void *data) 724 { 725 struct erofs_device_info *dif = ptr; 726 727 fs_put_dax(dif->dax_dev); 728 if (dif->bdev) 729 blkdev_put(dif->bdev, FMODE_READ | FMODE_EXCL); 730 kfree(dif->path); 731 kfree(dif); 732 return 0; 733 } 734 735 static void erofs_free_dev_context(struct erofs_dev_context *devs) 736 { 737 if (!devs) 738 return; 739 idr_for_each(&devs->tree, &erofs_release_device_info, NULL); 740 idr_destroy(&devs->tree); 741 kfree(devs); 742 } 743 744 static void erofs_fc_free(struct fs_context *fc) 745 { 746 struct erofs_fs_context *ctx = fc->fs_private; 747 748 erofs_free_dev_context(ctx->devs); 749 kfree(ctx); 750 } 751 752 static const struct fs_context_operations erofs_context_ops = { 753 .parse_param = erofs_fc_parse_param, 754 .get_tree = erofs_fc_get_tree, 755 .reconfigure = erofs_fc_reconfigure, 756 .free = erofs_fc_free, 757 }; 758 759 static int erofs_init_fs_context(struct fs_context *fc) 760 { 761 struct erofs_fs_context *ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 762 763 if (!ctx) 764 return -ENOMEM; 765 ctx->devs = kzalloc(sizeof(struct erofs_dev_context), GFP_KERNEL); 766 if (!ctx->devs) { 767 kfree(ctx); 768 return -ENOMEM; 769 } 770 fc->fs_private = ctx; 771 772 idr_init(&ctx->devs->tree); 773 init_rwsem(&ctx->devs->rwsem); 774 erofs_default_options(ctx); 775 fc->ops = &erofs_context_ops; 776 return 0; 777 } 778 779 /* 780 * could be triggered after deactivate_locked_super() 781 * is called, thus including umount and failed to initialize. 782 */ 783 static void erofs_kill_sb(struct super_block *sb) 784 { 785 struct erofs_sb_info *sbi; 786 787 WARN_ON(sb->s_magic != EROFS_SUPER_MAGIC); 788 789 kill_block_super(sb); 790 791 sbi = EROFS_SB(sb); 792 if (!sbi) 793 return; 794 795 erofs_free_dev_context(sbi->devs); 796 fs_put_dax(sbi->dax_dev); 797 kfree(sbi); 798 sb->s_fs_info = NULL; 799 } 800 801 /* called when ->s_root is non-NULL */ 802 static void erofs_put_super(struct super_block *sb) 803 { 804 struct erofs_sb_info *const sbi = EROFS_SB(sb); 805 806 DBG_BUGON(!sbi); 807 808 erofs_shrinker_unregister(sb); 809 #ifdef CONFIG_EROFS_FS_ZIP 810 iput(sbi->managed_cache); 811 sbi->managed_cache = NULL; 812 #endif 813 } 814 815 static struct file_system_type erofs_fs_type = { 816 .owner = THIS_MODULE, 817 .name = "erofs", 818 .init_fs_context = erofs_init_fs_context, 819 .kill_sb = erofs_kill_sb, 820 .fs_flags = FS_REQUIRES_DEV, 821 }; 822 MODULE_ALIAS_FS("erofs"); 823 824 static int __init erofs_module_init(void) 825 { 826 int err; 827 828 erofs_check_ondisk_layout_definitions(); 829 830 erofs_inode_cachep = kmem_cache_create("erofs_inode", 831 sizeof(struct erofs_inode), 0, 832 SLAB_RECLAIM_ACCOUNT, 833 erofs_inode_init_once); 834 if (!erofs_inode_cachep) { 835 err = -ENOMEM; 836 goto icache_err; 837 } 838 839 err = erofs_init_shrinker(); 840 if (err) 841 goto shrinker_err; 842 843 erofs_pcpubuf_init(); 844 err = z_erofs_init_zip_subsystem(); 845 if (err) 846 goto zip_err; 847 848 err = register_filesystem(&erofs_fs_type); 849 if (err) 850 goto fs_err; 851 852 return 0; 853 854 fs_err: 855 z_erofs_exit_zip_subsystem(); 856 zip_err: 857 erofs_exit_shrinker(); 858 shrinker_err: 859 kmem_cache_destroy(erofs_inode_cachep); 860 icache_err: 861 return err; 862 } 863 864 static void __exit erofs_module_exit(void) 865 { 866 unregister_filesystem(&erofs_fs_type); 867 z_erofs_exit_zip_subsystem(); 868 erofs_exit_shrinker(); 869 870 /* Ensure all RCU free inodes are safe before cache is destroyed. */ 871 rcu_barrier(); 872 kmem_cache_destroy(erofs_inode_cachep); 873 erofs_pcpubuf_exit(); 874 } 875 876 /* get filesystem statistics */ 877 static int erofs_statfs(struct dentry *dentry, struct kstatfs *buf) 878 { 879 struct super_block *sb = dentry->d_sb; 880 struct erofs_sb_info *sbi = EROFS_SB(sb); 881 u64 id = huge_encode_dev(sb->s_bdev->bd_dev); 882 883 buf->f_type = sb->s_magic; 884 buf->f_bsize = EROFS_BLKSIZ; 885 buf->f_blocks = sbi->total_blocks; 886 buf->f_bfree = buf->f_bavail = 0; 887 888 buf->f_files = ULLONG_MAX; 889 buf->f_ffree = ULLONG_MAX - sbi->inos; 890 891 buf->f_namelen = EROFS_NAME_LEN; 892 893 buf->f_fsid = u64_to_fsid(id); 894 return 0; 895 } 896 897 static int erofs_show_options(struct seq_file *seq, struct dentry *root) 898 { 899 struct erofs_sb_info *sbi = EROFS_SB(root->d_sb); 900 struct erofs_mount_opts *opt = &sbi->opt; 901 902 #ifdef CONFIG_EROFS_FS_XATTR 903 if (test_opt(opt, XATTR_USER)) 904 seq_puts(seq, ",user_xattr"); 905 else 906 seq_puts(seq, ",nouser_xattr"); 907 #endif 908 #ifdef CONFIG_EROFS_FS_POSIX_ACL 909 if (test_opt(opt, POSIX_ACL)) 910 seq_puts(seq, ",acl"); 911 else 912 seq_puts(seq, ",noacl"); 913 #endif 914 #ifdef CONFIG_EROFS_FS_ZIP 915 if (opt->cache_strategy == EROFS_ZIP_CACHE_DISABLED) 916 seq_puts(seq, ",cache_strategy=disabled"); 917 else if (opt->cache_strategy == EROFS_ZIP_CACHE_READAHEAD) 918 seq_puts(seq, ",cache_strategy=readahead"); 919 else if (opt->cache_strategy == EROFS_ZIP_CACHE_READAROUND) 920 seq_puts(seq, ",cache_strategy=readaround"); 921 #endif 922 if (test_opt(opt, DAX_ALWAYS)) 923 seq_puts(seq, ",dax=always"); 924 if (test_opt(opt, DAX_NEVER)) 925 seq_puts(seq, ",dax=never"); 926 return 0; 927 } 928 929 const struct super_operations erofs_sops = { 930 .put_super = erofs_put_super, 931 .alloc_inode = erofs_alloc_inode, 932 .free_inode = erofs_free_inode, 933 .statfs = erofs_statfs, 934 .show_options = erofs_show_options, 935 }; 936 937 module_init(erofs_module_init); 938 module_exit(erofs_module_exit); 939 940 MODULE_DESCRIPTION("Enhanced ROM File System"); 941 MODULE_AUTHOR("Gao Xiang, Chao Yu, Miao Xie, CONSUMER BG, HUAWEI Inc."); 942 MODULE_LICENSE("GPL"); 943