1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2017-2018 HUAWEI, Inc. 4 * http://www.huawei.com/ 5 * Created by Gao Xiang <gaoxiang25@huawei.com> 6 */ 7 #include <linux/module.h> 8 #include <linux/buffer_head.h> 9 #include <linux/statfs.h> 10 #include <linux/parser.h> 11 #include <linux/seq_file.h> 12 #include "xattr.h" 13 14 #define CREATE_TRACE_POINTS 15 #include <trace/events/erofs.h> 16 17 static struct kmem_cache *erofs_inode_cachep __read_mostly; 18 19 static void init_once(void *ptr) 20 { 21 struct erofs_vnode *vi = ptr; 22 23 inode_init_once(&vi->vfs_inode); 24 } 25 26 static int __init erofs_init_inode_cache(void) 27 { 28 erofs_inode_cachep = kmem_cache_create("erofs_inode", 29 sizeof(struct erofs_vnode), 0, 30 SLAB_RECLAIM_ACCOUNT, 31 init_once); 32 33 return erofs_inode_cachep ? 0 : -ENOMEM; 34 } 35 36 static void erofs_exit_inode_cache(void) 37 { 38 kmem_cache_destroy(erofs_inode_cachep); 39 } 40 41 static struct inode *alloc_inode(struct super_block *sb) 42 { 43 struct erofs_vnode *vi = 44 kmem_cache_alloc(erofs_inode_cachep, GFP_KERNEL); 45 46 if (!vi) 47 return NULL; 48 49 /* zero out everything except vfs_inode */ 50 memset(vi, 0, offsetof(struct erofs_vnode, vfs_inode)); 51 return &vi->vfs_inode; 52 } 53 54 static void free_inode(struct inode *inode) 55 { 56 struct erofs_vnode *vi = EROFS_V(inode); 57 58 /* be careful RCU symlink path (see ext4_inode_info->i_data)! */ 59 if (is_inode_fast_symlink(inode)) 60 kfree(inode->i_link); 61 62 kfree(vi->xattr_shared_xattrs); 63 64 kmem_cache_free(erofs_inode_cachep, vi); 65 } 66 67 static bool check_layout_compatibility(struct super_block *sb, 68 struct erofs_super_block *layout) 69 { 70 const unsigned int requirements = le32_to_cpu(layout->requirements); 71 72 EROFS_SB(sb)->requirements = requirements; 73 74 /* check if current kernel meets all mandatory requirements */ 75 if (requirements & (~EROFS_ALL_REQUIREMENTS)) { 76 errln("unidentified requirements %x, please upgrade kernel version", 77 requirements & ~EROFS_ALL_REQUIREMENTS); 78 return false; 79 } 80 return true; 81 } 82 83 static int superblock_read(struct super_block *sb) 84 { 85 struct erofs_sb_info *sbi; 86 struct buffer_head *bh; 87 struct erofs_super_block *layout; 88 unsigned int blkszbits; 89 int ret; 90 91 bh = sb_bread(sb, 0); 92 93 if (!bh) { 94 errln("cannot read erofs superblock"); 95 return -EIO; 96 } 97 98 sbi = EROFS_SB(sb); 99 layout = (struct erofs_super_block *)((u8 *)bh->b_data 100 + EROFS_SUPER_OFFSET); 101 102 ret = -EINVAL; 103 if (le32_to_cpu(layout->magic) != EROFS_SUPER_MAGIC_V1) { 104 errln("cannot find valid erofs superblock"); 105 goto out; 106 } 107 108 blkszbits = layout->blkszbits; 109 /* 9(512 bytes) + LOG_SECTORS_PER_BLOCK == LOG_BLOCK_SIZE */ 110 if (blkszbits != LOG_BLOCK_SIZE) { 111 errln("blksize %u isn't supported on this platform", 112 1 << blkszbits); 113 goto out; 114 } 115 116 if (!check_layout_compatibility(sb, layout)) 117 goto out; 118 119 sbi->blocks = le32_to_cpu(layout->blocks); 120 sbi->meta_blkaddr = le32_to_cpu(layout->meta_blkaddr); 121 #ifdef CONFIG_EROFS_FS_XATTR 122 sbi->xattr_blkaddr = le32_to_cpu(layout->xattr_blkaddr); 123 #endif 124 sbi->islotbits = ffs(sizeof(struct erofs_inode_v1)) - 1; 125 sbi->root_nid = le16_to_cpu(layout->root_nid); 126 sbi->inos = le64_to_cpu(layout->inos); 127 128 sbi->build_time = le64_to_cpu(layout->build_time); 129 sbi->build_time_nsec = le32_to_cpu(layout->build_time_nsec); 130 131 memcpy(&sb->s_uuid, layout->uuid, sizeof(layout->uuid)); 132 133 ret = strscpy(sbi->volume_name, layout->volume_name, 134 sizeof(layout->volume_name)); 135 if (ret < 0) { /* -E2BIG */ 136 errln("bad volume name without NIL terminator"); 137 ret = -EFSCORRUPTED; 138 goto out; 139 } 140 ret = 0; 141 out: 142 brelse(bh); 143 return ret; 144 } 145 146 #ifdef CONFIG_EROFS_FAULT_INJECTION 147 const char *erofs_fault_name[FAULT_MAX] = { 148 [FAULT_KMALLOC] = "kmalloc", 149 [FAULT_READ_IO] = "read IO error", 150 }; 151 152 static void __erofs_build_fault_attr(struct erofs_sb_info *sbi, 153 unsigned int rate) 154 { 155 struct erofs_fault_info *ffi = &sbi->fault_info; 156 157 if (rate) { 158 atomic_set(&ffi->inject_ops, 0); 159 ffi->inject_rate = rate; 160 ffi->inject_type = (1 << FAULT_MAX) - 1; 161 } else { 162 memset(ffi, 0, sizeof(struct erofs_fault_info)); 163 } 164 165 set_opt(sbi, FAULT_INJECTION); 166 } 167 168 static int erofs_build_fault_attr(struct erofs_sb_info *sbi, 169 substring_t *args) 170 { 171 int rate = 0; 172 173 if (args->from && match_int(args, &rate)) 174 return -EINVAL; 175 176 __erofs_build_fault_attr(sbi, rate); 177 return 0; 178 } 179 180 static unsigned int erofs_get_fault_rate(struct erofs_sb_info *sbi) 181 { 182 return sbi->fault_info.inject_rate; 183 } 184 #else 185 static void __erofs_build_fault_attr(struct erofs_sb_info *sbi, 186 unsigned int rate) 187 { 188 } 189 190 static int erofs_build_fault_attr(struct erofs_sb_info *sbi, 191 substring_t *args) 192 { 193 infoln("fault_injection options not supported"); 194 return 0; 195 } 196 197 static unsigned int erofs_get_fault_rate(struct erofs_sb_info *sbi) 198 { 199 return 0; 200 } 201 #endif 202 203 #ifdef CONFIG_EROFS_FS_ZIP 204 static int erofs_build_cache_strategy(struct erofs_sb_info *sbi, 205 substring_t *args) 206 { 207 const char *cs = match_strdup(args); 208 int err = 0; 209 210 if (!cs) { 211 errln("Not enough memory to store cache strategy"); 212 return -ENOMEM; 213 } 214 215 if (!strcmp(cs, "disabled")) { 216 sbi->cache_strategy = EROFS_ZIP_CACHE_DISABLED; 217 } else if (!strcmp(cs, "readahead")) { 218 sbi->cache_strategy = EROFS_ZIP_CACHE_READAHEAD; 219 } else if (!strcmp(cs, "readaround")) { 220 sbi->cache_strategy = EROFS_ZIP_CACHE_READAROUND; 221 } else { 222 errln("Unrecognized cache strategy \"%s\"", cs); 223 err = -EINVAL; 224 } 225 kfree(cs); 226 return err; 227 } 228 #else 229 static int erofs_build_cache_strategy(struct erofs_sb_info *sbi, 230 substring_t *args) 231 { 232 infoln("EROFS compression is disabled, so cache strategy is ignored"); 233 return 0; 234 } 235 #endif 236 237 /* set up default EROFS parameters */ 238 static void default_options(struct erofs_sb_info *sbi) 239 { 240 #ifdef CONFIG_EROFS_FS_ZIP 241 sbi->cache_strategy = EROFS_ZIP_CACHE_READAROUND; 242 sbi->max_sync_decompress_pages = 3; 243 #endif 244 #ifdef CONFIG_EROFS_FS_XATTR 245 set_opt(sbi, XATTR_USER); 246 #endif 247 #ifdef CONFIG_EROFS_FS_POSIX_ACL 248 set_opt(sbi, POSIX_ACL); 249 #endif 250 } 251 252 enum { 253 Opt_user_xattr, 254 Opt_nouser_xattr, 255 Opt_acl, 256 Opt_noacl, 257 Opt_fault_injection, 258 Opt_cache_strategy, 259 Opt_err 260 }; 261 262 static match_table_t erofs_tokens = { 263 {Opt_user_xattr, "user_xattr"}, 264 {Opt_nouser_xattr, "nouser_xattr"}, 265 {Opt_acl, "acl"}, 266 {Opt_noacl, "noacl"}, 267 {Opt_fault_injection, "fault_injection=%u"}, 268 {Opt_cache_strategy, "cache_strategy=%s"}, 269 {Opt_err, NULL} 270 }; 271 272 static int parse_options(struct super_block *sb, char *options) 273 { 274 substring_t args[MAX_OPT_ARGS]; 275 char *p; 276 int err; 277 278 if (!options) 279 return 0; 280 281 while ((p = strsep(&options, ","))) { 282 int token; 283 284 if (!*p) 285 continue; 286 287 args[0].to = args[0].from = NULL; 288 token = match_token(p, erofs_tokens, args); 289 290 switch (token) { 291 #ifdef CONFIG_EROFS_FS_XATTR 292 case Opt_user_xattr: 293 set_opt(EROFS_SB(sb), XATTR_USER); 294 break; 295 case Opt_nouser_xattr: 296 clear_opt(EROFS_SB(sb), XATTR_USER); 297 break; 298 #else 299 case Opt_user_xattr: 300 infoln("user_xattr options not supported"); 301 break; 302 case Opt_nouser_xattr: 303 infoln("nouser_xattr options not supported"); 304 break; 305 #endif 306 #ifdef CONFIG_EROFS_FS_POSIX_ACL 307 case Opt_acl: 308 set_opt(EROFS_SB(sb), POSIX_ACL); 309 break; 310 case Opt_noacl: 311 clear_opt(EROFS_SB(sb), POSIX_ACL); 312 break; 313 #else 314 case Opt_acl: 315 infoln("acl options not supported"); 316 break; 317 case Opt_noacl: 318 infoln("noacl options not supported"); 319 break; 320 #endif 321 case Opt_fault_injection: 322 err = erofs_build_fault_attr(EROFS_SB(sb), args); 323 if (err) 324 return err; 325 break; 326 case Opt_cache_strategy: 327 err = erofs_build_cache_strategy(EROFS_SB(sb), args); 328 if (err) 329 return err; 330 break; 331 default: 332 errln("Unrecognized mount option \"%s\" or missing value", p); 333 return -EINVAL; 334 } 335 } 336 return 0; 337 } 338 339 #ifdef CONFIG_EROFS_FS_ZIP 340 static const struct address_space_operations managed_cache_aops; 341 342 static int managed_cache_releasepage(struct page *page, gfp_t gfp_mask) 343 { 344 int ret = 1; /* 0 - busy */ 345 struct address_space *const mapping = page->mapping; 346 347 DBG_BUGON(!PageLocked(page)); 348 DBG_BUGON(mapping->a_ops != &managed_cache_aops); 349 350 if (PagePrivate(page)) 351 ret = erofs_try_to_free_cached_page(mapping, page); 352 353 return ret; 354 } 355 356 static void managed_cache_invalidatepage(struct page *page, 357 unsigned int offset, 358 unsigned int length) 359 { 360 const unsigned int stop = length + offset; 361 362 DBG_BUGON(!PageLocked(page)); 363 364 /* Check for potential overflow in debug mode */ 365 DBG_BUGON(stop > PAGE_SIZE || stop < length); 366 367 if (offset == 0 && stop == PAGE_SIZE) 368 while (!managed_cache_releasepage(page, GFP_NOFS)) 369 cond_resched(); 370 } 371 372 static const struct address_space_operations managed_cache_aops = { 373 .releasepage = managed_cache_releasepage, 374 .invalidatepage = managed_cache_invalidatepage, 375 }; 376 377 static int erofs_init_managed_cache(struct super_block *sb) 378 { 379 struct erofs_sb_info *const sbi = EROFS_SB(sb); 380 struct inode *const inode = new_inode(sb); 381 382 if (!inode) 383 return -ENOMEM; 384 385 set_nlink(inode, 1); 386 inode->i_size = OFFSET_MAX; 387 388 inode->i_mapping->a_ops = &managed_cache_aops; 389 mapping_set_gfp_mask(inode->i_mapping, 390 GFP_NOFS | __GFP_HIGHMEM | __GFP_MOVABLE); 391 sbi->managed_cache = inode; 392 return 0; 393 } 394 #else 395 static int erofs_init_managed_cache(struct super_block *sb) { return 0; } 396 #endif 397 398 static int erofs_fill_super(struct super_block *sb, void *data, int silent) 399 { 400 struct inode *inode; 401 struct erofs_sb_info *sbi; 402 int err; 403 404 infoln("fill_super, device -> %s", sb->s_id); 405 infoln("options -> %s", (char *)data); 406 407 sb->s_magic = EROFS_SUPER_MAGIC; 408 409 if (!sb_set_blocksize(sb, EROFS_BLKSIZ)) { 410 errln("failed to set erofs blksize"); 411 return -EINVAL; 412 } 413 414 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); 415 if (!sbi) 416 return -ENOMEM; 417 418 sb->s_fs_info = sbi; 419 err = superblock_read(sb); 420 if (err) 421 return err; 422 423 sb->s_flags |= SB_RDONLY | SB_NOATIME; 424 sb->s_maxbytes = MAX_LFS_FILESIZE; 425 sb->s_time_gran = 1; 426 427 sb->s_op = &erofs_sops; 428 429 #ifdef CONFIG_EROFS_FS_XATTR 430 sb->s_xattr = erofs_xattr_handlers; 431 #endif 432 /* set erofs default mount options */ 433 default_options(sbi); 434 435 err = parse_options(sb, data); 436 if (err) 437 return err; 438 439 if (!silent) 440 infoln("root inode @ nid %llu", ROOT_NID(sbi)); 441 442 if (test_opt(sbi, POSIX_ACL)) 443 sb->s_flags |= SB_POSIXACL; 444 else 445 sb->s_flags &= ~SB_POSIXACL; 446 447 #ifdef CONFIG_EROFS_FS_ZIP 448 INIT_RADIX_TREE(&sbi->workstn_tree, GFP_ATOMIC); 449 #endif 450 451 /* get the root inode */ 452 inode = erofs_iget(sb, ROOT_NID(sbi), true); 453 if (IS_ERR(inode)) 454 return PTR_ERR(inode); 455 456 if (!S_ISDIR(inode->i_mode)) { 457 errln("rootino(nid %llu) is not a directory(i_mode %o)", 458 ROOT_NID(sbi), inode->i_mode); 459 iput(inode); 460 return -EINVAL; 461 } 462 463 sb->s_root = d_make_root(inode); 464 if (!sb->s_root) 465 return -ENOMEM; 466 467 erofs_shrinker_register(sb); 468 /* sb->s_umount is already locked, SB_ACTIVE and SB_BORN are not set */ 469 err = erofs_init_managed_cache(sb); 470 if (err) 471 return err; 472 473 if (!silent) 474 infoln("mounted on %s with opts: %s.", sb->s_id, (char *)data); 475 return 0; 476 } 477 478 static struct dentry *erofs_mount(struct file_system_type *fs_type, int flags, 479 const char *dev_name, void *data) 480 { 481 return mount_bdev(fs_type, flags, dev_name, data, erofs_fill_super); 482 } 483 484 /* 485 * could be triggered after deactivate_locked_super() 486 * is called, thus including umount and failed to initialize. 487 */ 488 static void erofs_kill_sb(struct super_block *sb) 489 { 490 struct erofs_sb_info *sbi; 491 492 WARN_ON(sb->s_magic != EROFS_SUPER_MAGIC); 493 infoln("unmounting for %s", sb->s_id); 494 495 kill_block_super(sb); 496 497 sbi = EROFS_SB(sb); 498 if (!sbi) 499 return; 500 kfree(sbi); 501 sb->s_fs_info = NULL; 502 } 503 504 /* called when ->s_root is non-NULL */ 505 static void erofs_put_super(struct super_block *sb) 506 { 507 struct erofs_sb_info *const sbi = EROFS_SB(sb); 508 509 DBG_BUGON(!sbi); 510 511 erofs_shrinker_unregister(sb); 512 #ifdef CONFIG_EROFS_FS_ZIP 513 iput(sbi->managed_cache); 514 sbi->managed_cache = NULL; 515 #endif 516 } 517 518 static struct file_system_type erofs_fs_type = { 519 .owner = THIS_MODULE, 520 .name = "erofs", 521 .mount = erofs_mount, 522 .kill_sb = erofs_kill_sb, 523 .fs_flags = FS_REQUIRES_DEV, 524 }; 525 MODULE_ALIAS_FS("erofs"); 526 527 static int __init erofs_module_init(void) 528 { 529 int err; 530 531 erofs_check_ondisk_layout_definitions(); 532 infoln("initializing erofs " EROFS_VERSION); 533 534 err = erofs_init_inode_cache(); 535 if (err) 536 goto icache_err; 537 538 err = erofs_init_shrinker(); 539 if (err) 540 goto shrinker_err; 541 542 err = z_erofs_init_zip_subsystem(); 543 if (err) 544 goto zip_err; 545 546 err = register_filesystem(&erofs_fs_type); 547 if (err) 548 goto fs_err; 549 550 infoln("successfully to initialize erofs"); 551 return 0; 552 553 fs_err: 554 z_erofs_exit_zip_subsystem(); 555 zip_err: 556 erofs_exit_shrinker(); 557 shrinker_err: 558 erofs_exit_inode_cache(); 559 icache_err: 560 return err; 561 } 562 563 static void __exit erofs_module_exit(void) 564 { 565 unregister_filesystem(&erofs_fs_type); 566 z_erofs_exit_zip_subsystem(); 567 erofs_exit_shrinker(); 568 erofs_exit_inode_cache(); 569 infoln("successfully finalize erofs"); 570 } 571 572 /* get filesystem statistics */ 573 static int erofs_statfs(struct dentry *dentry, struct kstatfs *buf) 574 { 575 struct super_block *sb = dentry->d_sb; 576 struct erofs_sb_info *sbi = EROFS_SB(sb); 577 u64 id = huge_encode_dev(sb->s_bdev->bd_dev); 578 579 buf->f_type = sb->s_magic; 580 buf->f_bsize = EROFS_BLKSIZ; 581 buf->f_blocks = sbi->blocks; 582 buf->f_bfree = buf->f_bavail = 0; 583 584 buf->f_files = ULLONG_MAX; 585 buf->f_ffree = ULLONG_MAX - sbi->inos; 586 587 buf->f_namelen = EROFS_NAME_LEN; 588 589 buf->f_fsid.val[0] = (u32)id; 590 buf->f_fsid.val[1] = (u32)(id >> 32); 591 return 0; 592 } 593 594 static int erofs_show_options(struct seq_file *seq, struct dentry *root) 595 { 596 struct erofs_sb_info *sbi __maybe_unused = EROFS_SB(root->d_sb); 597 598 #ifdef CONFIG_EROFS_FS_XATTR 599 if (test_opt(sbi, XATTR_USER)) 600 seq_puts(seq, ",user_xattr"); 601 else 602 seq_puts(seq, ",nouser_xattr"); 603 #endif 604 #ifdef CONFIG_EROFS_FS_POSIX_ACL 605 if (test_opt(sbi, POSIX_ACL)) 606 seq_puts(seq, ",acl"); 607 else 608 seq_puts(seq, ",noacl"); 609 #endif 610 if (test_opt(sbi, FAULT_INJECTION)) 611 seq_printf(seq, ",fault_injection=%u", 612 erofs_get_fault_rate(sbi)); 613 #ifdef CONFIG_EROFS_FS_ZIP 614 if (sbi->cache_strategy == EROFS_ZIP_CACHE_DISABLED) { 615 seq_puts(seq, ",cache_strategy=disabled"); 616 } else if (sbi->cache_strategy == EROFS_ZIP_CACHE_READAHEAD) { 617 seq_puts(seq, ",cache_strategy=readahead"); 618 } else if (sbi->cache_strategy == EROFS_ZIP_CACHE_READAROUND) { 619 seq_puts(seq, ",cache_strategy=readaround"); 620 } else { 621 seq_puts(seq, ",cache_strategy=(unknown)"); 622 DBG_BUGON(1); 623 } 624 #endif 625 return 0; 626 } 627 628 static int erofs_remount(struct super_block *sb, int *flags, char *data) 629 { 630 struct erofs_sb_info *sbi = EROFS_SB(sb); 631 unsigned int org_mnt_opt = sbi->mount_opt; 632 unsigned int org_inject_rate = erofs_get_fault_rate(sbi); 633 int err; 634 635 DBG_BUGON(!sb_rdonly(sb)); 636 err = parse_options(sb, data); 637 if (err) 638 goto out; 639 640 if (test_opt(sbi, POSIX_ACL)) 641 sb->s_flags |= SB_POSIXACL; 642 else 643 sb->s_flags &= ~SB_POSIXACL; 644 645 *flags |= SB_RDONLY; 646 return 0; 647 out: 648 __erofs_build_fault_attr(sbi, org_inject_rate); 649 sbi->mount_opt = org_mnt_opt; 650 651 return err; 652 } 653 654 const struct super_operations erofs_sops = { 655 .put_super = erofs_put_super, 656 .alloc_inode = alloc_inode, 657 .free_inode = free_inode, 658 .statfs = erofs_statfs, 659 .show_options = erofs_show_options, 660 .remount_fs = erofs_remount, 661 }; 662 663 module_init(erofs_module_init); 664 module_exit(erofs_module_exit); 665 666 MODULE_DESCRIPTION("Enhanced ROM File System"); 667 MODULE_AUTHOR("Gao Xiang, Chao Yu, Miao Xie, CONSUMER BG, HUAWEI Inc."); 668 MODULE_LICENSE("GPL"); 669 670