1 /* 2 * linux/fs/hfsplus/super.c 3 * 4 * Copyright (C) 2001 5 * Brad Boyer (flar@allandria.com) 6 * (C) 2003 Ardis Technologies <roman@ardistech.com> 7 * 8 */ 9 10 #include <linux/module.h> 11 #include <linux/init.h> 12 #include <linux/pagemap.h> 13 #include <linux/blkdev.h> 14 #include <linux/fs.h> 15 #include <linux/slab.h> 16 #include <linux/vfs.h> 17 #include <linux/nls.h> 18 19 static struct inode *hfsplus_alloc_inode(struct super_block *sb); 20 static void hfsplus_destroy_inode(struct inode *inode); 21 22 #include "hfsplus_fs.h" 23 #include "xattr.h" 24 25 static int hfsplus_system_read_inode(struct inode *inode) 26 { 27 struct hfsplus_vh *vhdr = HFSPLUS_SB(inode->i_sb)->s_vhdr; 28 29 switch (inode->i_ino) { 30 case HFSPLUS_EXT_CNID: 31 hfsplus_inode_read_fork(inode, &vhdr->ext_file); 32 inode->i_mapping->a_ops = &hfsplus_btree_aops; 33 break; 34 case HFSPLUS_CAT_CNID: 35 hfsplus_inode_read_fork(inode, &vhdr->cat_file); 36 inode->i_mapping->a_ops = &hfsplus_btree_aops; 37 break; 38 case HFSPLUS_ALLOC_CNID: 39 hfsplus_inode_read_fork(inode, &vhdr->alloc_file); 40 inode->i_mapping->a_ops = &hfsplus_aops; 41 break; 42 case HFSPLUS_START_CNID: 43 hfsplus_inode_read_fork(inode, &vhdr->start_file); 44 break; 45 case HFSPLUS_ATTR_CNID: 46 hfsplus_inode_read_fork(inode, &vhdr->attr_file); 47 inode->i_mapping->a_ops = &hfsplus_btree_aops; 48 break; 49 default: 50 return -EIO; 51 } 52 53 return 0; 54 } 55 56 struct inode *hfsplus_iget(struct super_block *sb, unsigned long ino) 57 { 58 struct hfs_find_data fd; 59 struct inode *inode; 60 int err; 61 62 inode = iget_locked(sb, ino); 63 if (!inode) 64 return ERR_PTR(-ENOMEM); 65 if (!(inode->i_state & I_NEW)) 66 return inode; 67 68 INIT_LIST_HEAD(&HFSPLUS_I(inode)->open_dir_list); 69 mutex_init(&HFSPLUS_I(inode)->extents_lock); 70 HFSPLUS_I(inode)->flags = 0; 71 HFSPLUS_I(inode)->extent_state = 0; 72 HFSPLUS_I(inode)->rsrc_inode = NULL; 73 atomic_set(&HFSPLUS_I(inode)->opencnt, 0); 74 75 if (inode->i_ino >= HFSPLUS_FIRSTUSER_CNID || 76 inode->i_ino == HFSPLUS_ROOT_CNID) { 77 err = hfs_find_init(HFSPLUS_SB(inode->i_sb)->cat_tree, &fd); 78 if (!err) { 79 err = hfsplus_find_cat(inode->i_sb, inode->i_ino, &fd); 80 if (!err) 81 err = hfsplus_cat_read_inode(inode, &fd); 82 hfs_find_exit(&fd); 83 } 84 } else { 85 err = hfsplus_system_read_inode(inode); 86 } 87 88 if (err) { 89 iget_failed(inode); 90 return ERR_PTR(err); 91 } 92 93 unlock_new_inode(inode); 94 return inode; 95 } 96 97 static int hfsplus_system_write_inode(struct inode *inode) 98 { 99 struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb); 100 struct hfsplus_vh *vhdr = sbi->s_vhdr; 101 struct hfsplus_fork_raw *fork; 102 struct hfs_btree *tree = NULL; 103 104 switch (inode->i_ino) { 105 case HFSPLUS_EXT_CNID: 106 fork = &vhdr->ext_file; 107 tree = sbi->ext_tree; 108 break; 109 case HFSPLUS_CAT_CNID: 110 fork = &vhdr->cat_file; 111 tree = sbi->cat_tree; 112 break; 113 case HFSPLUS_ALLOC_CNID: 114 fork = &vhdr->alloc_file; 115 break; 116 case HFSPLUS_START_CNID: 117 fork = &vhdr->start_file; 118 break; 119 case HFSPLUS_ATTR_CNID: 120 fork = &vhdr->attr_file; 121 tree = sbi->attr_tree; 122 break; 123 default: 124 return -EIO; 125 } 126 127 if (fork->total_size != cpu_to_be64(inode->i_size)) { 128 set_bit(HFSPLUS_SB_WRITEBACKUP, &sbi->flags); 129 hfsplus_mark_mdb_dirty(inode->i_sb); 130 } 131 hfsplus_inode_write_fork(inode, fork); 132 if (tree) { 133 int err = hfs_btree_write(tree); 134 135 if (err) { 136 pr_err("b-tree write err: %d, ino %lu\n", 137 err, inode->i_ino); 138 return err; 139 } 140 } 141 return 0; 142 } 143 144 static int hfsplus_write_inode(struct inode *inode, 145 struct writeback_control *wbc) 146 { 147 int err; 148 149 hfs_dbg(INODE, "hfsplus_write_inode: %lu\n", inode->i_ino); 150 151 err = hfsplus_ext_write_extent(inode); 152 if (err) 153 return err; 154 155 if (inode->i_ino >= HFSPLUS_FIRSTUSER_CNID || 156 inode->i_ino == HFSPLUS_ROOT_CNID) 157 return hfsplus_cat_write_inode(inode); 158 else 159 return hfsplus_system_write_inode(inode); 160 } 161 162 static void hfsplus_evict_inode(struct inode *inode) 163 { 164 hfs_dbg(INODE, "hfsplus_evict_inode: %lu\n", inode->i_ino); 165 truncate_inode_pages_final(&inode->i_data); 166 clear_inode(inode); 167 if (HFSPLUS_IS_RSRC(inode)) { 168 HFSPLUS_I(HFSPLUS_I(inode)->rsrc_inode)->rsrc_inode = NULL; 169 iput(HFSPLUS_I(inode)->rsrc_inode); 170 } 171 } 172 173 static int hfsplus_sync_fs(struct super_block *sb, int wait) 174 { 175 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); 176 struct hfsplus_vh *vhdr = sbi->s_vhdr; 177 int write_backup = 0; 178 int error, error2; 179 180 if (!wait) 181 return 0; 182 183 hfs_dbg(SUPER, "hfsplus_sync_fs\n"); 184 185 /* 186 * Explicitly write out the special metadata inodes. 187 * 188 * While these special inodes are marked as hashed and written 189 * out peridocically by the flusher threads we redirty them 190 * during writeout of normal inodes, and thus the life lock 191 * prevents us from getting the latest state to disk. 192 */ 193 error = filemap_write_and_wait(sbi->cat_tree->inode->i_mapping); 194 error2 = filemap_write_and_wait(sbi->ext_tree->inode->i_mapping); 195 if (!error) 196 error = error2; 197 if (sbi->attr_tree) { 198 error2 = 199 filemap_write_and_wait(sbi->attr_tree->inode->i_mapping); 200 if (!error) 201 error = error2; 202 } 203 error2 = filemap_write_and_wait(sbi->alloc_file->i_mapping); 204 if (!error) 205 error = error2; 206 207 mutex_lock(&sbi->vh_mutex); 208 mutex_lock(&sbi->alloc_mutex); 209 vhdr->free_blocks = cpu_to_be32(sbi->free_blocks); 210 vhdr->next_cnid = cpu_to_be32(sbi->next_cnid); 211 vhdr->folder_count = cpu_to_be32(sbi->folder_count); 212 vhdr->file_count = cpu_to_be32(sbi->file_count); 213 214 if (test_and_clear_bit(HFSPLUS_SB_WRITEBACKUP, &sbi->flags)) { 215 memcpy(sbi->s_backup_vhdr, sbi->s_vhdr, sizeof(*sbi->s_vhdr)); 216 write_backup = 1; 217 } 218 219 error2 = hfsplus_submit_bio(sb, 220 sbi->part_start + HFSPLUS_VOLHEAD_SECTOR, 221 sbi->s_vhdr_buf, NULL, WRITE_SYNC); 222 if (!error) 223 error = error2; 224 if (!write_backup) 225 goto out; 226 227 error2 = hfsplus_submit_bio(sb, 228 sbi->part_start + sbi->sect_count - 2, 229 sbi->s_backup_vhdr_buf, NULL, WRITE_SYNC); 230 if (!error) 231 error2 = error; 232 out: 233 mutex_unlock(&sbi->alloc_mutex); 234 mutex_unlock(&sbi->vh_mutex); 235 236 if (!test_bit(HFSPLUS_SB_NOBARRIER, &sbi->flags)) 237 blkdev_issue_flush(sb->s_bdev, GFP_KERNEL, NULL); 238 239 return error; 240 } 241 242 static void delayed_sync_fs(struct work_struct *work) 243 { 244 int err; 245 struct hfsplus_sb_info *sbi; 246 247 sbi = container_of(work, struct hfsplus_sb_info, sync_work.work); 248 249 spin_lock(&sbi->work_lock); 250 sbi->work_queued = 0; 251 spin_unlock(&sbi->work_lock); 252 253 err = hfsplus_sync_fs(sbi->alloc_file->i_sb, 1); 254 if (err) 255 pr_err("delayed sync fs err %d\n", err); 256 } 257 258 void hfsplus_mark_mdb_dirty(struct super_block *sb) 259 { 260 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); 261 unsigned long delay; 262 263 if (sb->s_flags & MS_RDONLY) 264 return; 265 266 spin_lock(&sbi->work_lock); 267 if (!sbi->work_queued) { 268 delay = msecs_to_jiffies(dirty_writeback_interval * 10); 269 queue_delayed_work(system_long_wq, &sbi->sync_work, delay); 270 sbi->work_queued = 1; 271 } 272 spin_unlock(&sbi->work_lock); 273 } 274 275 static void hfsplus_put_super(struct super_block *sb) 276 { 277 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); 278 279 hfs_dbg(SUPER, "hfsplus_put_super\n"); 280 281 cancel_delayed_work_sync(&sbi->sync_work); 282 283 if (!(sb->s_flags & MS_RDONLY) && sbi->s_vhdr) { 284 struct hfsplus_vh *vhdr = sbi->s_vhdr; 285 286 vhdr->modify_date = hfsp_now2mt(); 287 vhdr->attributes |= cpu_to_be32(HFSPLUS_VOL_UNMNT); 288 vhdr->attributes &= cpu_to_be32(~HFSPLUS_VOL_INCNSTNT); 289 290 hfsplus_sync_fs(sb, 1); 291 } 292 293 hfs_btree_close(sbi->attr_tree); 294 hfs_btree_close(sbi->cat_tree); 295 hfs_btree_close(sbi->ext_tree); 296 iput(sbi->alloc_file); 297 iput(sbi->hidden_dir); 298 kfree(sbi->s_vhdr_buf); 299 kfree(sbi->s_backup_vhdr_buf); 300 unload_nls(sbi->nls); 301 kfree(sb->s_fs_info); 302 sb->s_fs_info = NULL; 303 } 304 305 static int hfsplus_statfs(struct dentry *dentry, struct kstatfs *buf) 306 { 307 struct super_block *sb = dentry->d_sb; 308 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); 309 u64 id = huge_encode_dev(sb->s_bdev->bd_dev); 310 311 buf->f_type = HFSPLUS_SUPER_MAGIC; 312 buf->f_bsize = sb->s_blocksize; 313 buf->f_blocks = sbi->total_blocks << sbi->fs_shift; 314 buf->f_bfree = sbi->free_blocks << sbi->fs_shift; 315 buf->f_bavail = buf->f_bfree; 316 buf->f_files = 0xFFFFFFFF; 317 buf->f_ffree = 0xFFFFFFFF - sbi->next_cnid; 318 buf->f_fsid.val[0] = (u32)id; 319 buf->f_fsid.val[1] = (u32)(id >> 32); 320 buf->f_namelen = HFSPLUS_MAX_STRLEN; 321 322 return 0; 323 } 324 325 static int hfsplus_remount(struct super_block *sb, int *flags, char *data) 326 { 327 sync_filesystem(sb); 328 if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY)) 329 return 0; 330 if (!(*flags & MS_RDONLY)) { 331 struct hfsplus_vh *vhdr = HFSPLUS_SB(sb)->s_vhdr; 332 int force = 0; 333 334 if (!hfsplus_parse_options_remount(data, &force)) 335 return -EINVAL; 336 337 if (!(vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_UNMNT))) { 338 pr_warn("filesystem was not cleanly unmounted, running fsck.hfsplus is recommended. leaving read-only.\n"); 339 sb->s_flags |= MS_RDONLY; 340 *flags |= MS_RDONLY; 341 } else if (force) { 342 /* nothing */ 343 } else if (vhdr->attributes & 344 cpu_to_be32(HFSPLUS_VOL_SOFTLOCK)) { 345 pr_warn("filesystem is marked locked, leaving read-only.\n"); 346 sb->s_flags |= MS_RDONLY; 347 *flags |= MS_RDONLY; 348 } else if (vhdr->attributes & 349 cpu_to_be32(HFSPLUS_VOL_JOURNALED)) { 350 pr_warn("filesystem is marked journaled, leaving read-only.\n"); 351 sb->s_flags |= MS_RDONLY; 352 *flags |= MS_RDONLY; 353 } 354 } 355 return 0; 356 } 357 358 static const struct super_operations hfsplus_sops = { 359 .alloc_inode = hfsplus_alloc_inode, 360 .destroy_inode = hfsplus_destroy_inode, 361 .write_inode = hfsplus_write_inode, 362 .evict_inode = hfsplus_evict_inode, 363 .put_super = hfsplus_put_super, 364 .sync_fs = hfsplus_sync_fs, 365 .statfs = hfsplus_statfs, 366 .remount_fs = hfsplus_remount, 367 .show_options = hfsplus_show_options, 368 }; 369 370 static int hfsplus_fill_super(struct super_block *sb, void *data, int silent) 371 { 372 struct hfsplus_vh *vhdr; 373 struct hfsplus_sb_info *sbi; 374 hfsplus_cat_entry entry; 375 struct hfs_find_data fd; 376 struct inode *root, *inode; 377 struct qstr str; 378 struct nls_table *nls = NULL; 379 u64 last_fs_block, last_fs_page; 380 int err; 381 382 err = -ENOMEM; 383 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); 384 if (!sbi) 385 goto out; 386 387 sb->s_fs_info = sbi; 388 mutex_init(&sbi->alloc_mutex); 389 mutex_init(&sbi->vh_mutex); 390 spin_lock_init(&sbi->work_lock); 391 INIT_DELAYED_WORK(&sbi->sync_work, delayed_sync_fs); 392 hfsplus_fill_defaults(sbi); 393 394 err = -EINVAL; 395 if (!hfsplus_parse_options(data, sbi)) { 396 pr_err("unable to parse mount options\n"); 397 goto out_unload_nls; 398 } 399 400 /* temporarily use utf8 to correctly find the hidden dir below */ 401 nls = sbi->nls; 402 sbi->nls = load_nls("utf8"); 403 if (!sbi->nls) { 404 pr_err("unable to load nls for utf8\n"); 405 goto out_unload_nls; 406 } 407 408 /* Grab the volume header */ 409 if (hfsplus_read_wrapper(sb)) { 410 if (!silent) 411 pr_warn("unable to find HFS+ superblock\n"); 412 goto out_unload_nls; 413 } 414 vhdr = sbi->s_vhdr; 415 416 /* Copy parts of the volume header into the superblock */ 417 sb->s_magic = HFSPLUS_VOLHEAD_SIG; 418 if (be16_to_cpu(vhdr->version) < HFSPLUS_MIN_VERSION || 419 be16_to_cpu(vhdr->version) > HFSPLUS_CURRENT_VERSION) { 420 pr_err("wrong filesystem version\n"); 421 goto out_free_vhdr; 422 } 423 sbi->total_blocks = be32_to_cpu(vhdr->total_blocks); 424 sbi->free_blocks = be32_to_cpu(vhdr->free_blocks); 425 sbi->next_cnid = be32_to_cpu(vhdr->next_cnid); 426 sbi->file_count = be32_to_cpu(vhdr->file_count); 427 sbi->folder_count = be32_to_cpu(vhdr->folder_count); 428 sbi->data_clump_blocks = 429 be32_to_cpu(vhdr->data_clump_sz) >> sbi->alloc_blksz_shift; 430 if (!sbi->data_clump_blocks) 431 sbi->data_clump_blocks = 1; 432 sbi->rsrc_clump_blocks = 433 be32_to_cpu(vhdr->rsrc_clump_sz) >> sbi->alloc_blksz_shift; 434 if (!sbi->rsrc_clump_blocks) 435 sbi->rsrc_clump_blocks = 1; 436 437 err = -EFBIG; 438 last_fs_block = sbi->total_blocks - 1; 439 last_fs_page = (last_fs_block << sbi->alloc_blksz_shift) >> 440 PAGE_CACHE_SHIFT; 441 442 if ((last_fs_block > (sector_t)(~0ULL) >> (sbi->alloc_blksz_shift - 9)) || 443 (last_fs_page > (pgoff_t)(~0ULL))) { 444 pr_err("filesystem size too large\n"); 445 goto out_free_vhdr; 446 } 447 448 /* Set up operations so we can load metadata */ 449 sb->s_op = &hfsplus_sops; 450 sb->s_maxbytes = MAX_LFS_FILESIZE; 451 452 if (!(vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_UNMNT))) { 453 pr_warn("Filesystem was not cleanly unmounted, running fsck.hfsplus is recommended. mounting read-only.\n"); 454 sb->s_flags |= MS_RDONLY; 455 } else if (test_and_clear_bit(HFSPLUS_SB_FORCE, &sbi->flags)) { 456 /* nothing */ 457 } else if (vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_SOFTLOCK)) { 458 pr_warn("Filesystem is marked locked, mounting read-only.\n"); 459 sb->s_flags |= MS_RDONLY; 460 } else if ((vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_JOURNALED)) && 461 !(sb->s_flags & MS_RDONLY)) { 462 pr_warn("write access to a journaled filesystem is not supported, use the force option at your own risk, mounting read-only.\n"); 463 sb->s_flags |= MS_RDONLY; 464 } 465 466 err = -EINVAL; 467 468 /* Load metadata objects (B*Trees) */ 469 sbi->ext_tree = hfs_btree_open(sb, HFSPLUS_EXT_CNID); 470 if (!sbi->ext_tree) { 471 pr_err("failed to load extents file\n"); 472 goto out_free_vhdr; 473 } 474 sbi->cat_tree = hfs_btree_open(sb, HFSPLUS_CAT_CNID); 475 if (!sbi->cat_tree) { 476 pr_err("failed to load catalog file\n"); 477 goto out_close_ext_tree; 478 } 479 atomic_set(&sbi->attr_tree_state, HFSPLUS_EMPTY_ATTR_TREE); 480 if (vhdr->attr_file.total_blocks != 0) { 481 sbi->attr_tree = hfs_btree_open(sb, HFSPLUS_ATTR_CNID); 482 if (!sbi->attr_tree) { 483 pr_err("failed to load attributes file\n"); 484 goto out_close_cat_tree; 485 } 486 atomic_set(&sbi->attr_tree_state, HFSPLUS_VALID_ATTR_TREE); 487 } 488 sb->s_xattr = hfsplus_xattr_handlers; 489 490 inode = hfsplus_iget(sb, HFSPLUS_ALLOC_CNID); 491 if (IS_ERR(inode)) { 492 pr_err("failed to load allocation file\n"); 493 err = PTR_ERR(inode); 494 goto out_close_attr_tree; 495 } 496 sbi->alloc_file = inode; 497 498 /* Load the root directory */ 499 root = hfsplus_iget(sb, HFSPLUS_ROOT_CNID); 500 if (IS_ERR(root)) { 501 pr_err("failed to load root directory\n"); 502 err = PTR_ERR(root); 503 goto out_put_alloc_file; 504 } 505 506 sb->s_d_op = &hfsplus_dentry_operations; 507 sb->s_root = d_make_root(root); 508 if (!sb->s_root) { 509 err = -ENOMEM; 510 goto out_put_alloc_file; 511 } 512 513 str.len = sizeof(HFSP_HIDDENDIR_NAME) - 1; 514 str.name = HFSP_HIDDENDIR_NAME; 515 err = hfs_find_init(sbi->cat_tree, &fd); 516 if (err) 517 goto out_put_root; 518 hfsplus_cat_build_key(sb, fd.search_key, HFSPLUS_ROOT_CNID, &str); 519 if (!hfs_brec_read(&fd, &entry, sizeof(entry))) { 520 hfs_find_exit(&fd); 521 if (entry.type != cpu_to_be16(HFSPLUS_FOLDER)) 522 goto out_put_root; 523 inode = hfsplus_iget(sb, be32_to_cpu(entry.folder.id)); 524 if (IS_ERR(inode)) { 525 err = PTR_ERR(inode); 526 goto out_put_root; 527 } 528 sbi->hidden_dir = inode; 529 } else 530 hfs_find_exit(&fd); 531 532 if (!(sb->s_flags & MS_RDONLY)) { 533 /* 534 * H+LX == hfsplusutils, H+Lx == this driver, H+lx is unused 535 * all three are registered with Apple for our use 536 */ 537 vhdr->last_mount_vers = cpu_to_be32(HFSP_MOUNT_VERSION); 538 vhdr->modify_date = hfsp_now2mt(); 539 be32_add_cpu(&vhdr->write_count, 1); 540 vhdr->attributes &= cpu_to_be32(~HFSPLUS_VOL_UNMNT); 541 vhdr->attributes |= cpu_to_be32(HFSPLUS_VOL_INCNSTNT); 542 hfsplus_sync_fs(sb, 1); 543 544 if (!sbi->hidden_dir) { 545 mutex_lock(&sbi->vh_mutex); 546 sbi->hidden_dir = hfsplus_new_inode(sb, S_IFDIR); 547 if (!sbi->hidden_dir) { 548 mutex_unlock(&sbi->vh_mutex); 549 err = -ENOMEM; 550 goto out_put_root; 551 } 552 err = hfsplus_create_cat(sbi->hidden_dir->i_ino, root, 553 &str, sbi->hidden_dir); 554 if (err) { 555 mutex_unlock(&sbi->vh_mutex); 556 goto out_put_hidden_dir; 557 } 558 559 err = hfsplus_init_inode_security(sbi->hidden_dir, 560 root, &str); 561 if (err == -EOPNOTSUPP) 562 err = 0; /* Operation is not supported. */ 563 else if (err) { 564 /* 565 * Try to delete anyway without 566 * error analysis. 567 */ 568 hfsplus_delete_cat(sbi->hidden_dir->i_ino, 569 root, &str); 570 mutex_unlock(&sbi->vh_mutex); 571 goto out_put_hidden_dir; 572 } 573 574 mutex_unlock(&sbi->vh_mutex); 575 hfsplus_mark_inode_dirty(sbi->hidden_dir, 576 HFSPLUS_I_CAT_DIRTY); 577 } 578 } 579 580 unload_nls(sbi->nls); 581 sbi->nls = nls; 582 return 0; 583 584 out_put_hidden_dir: 585 iput(sbi->hidden_dir); 586 out_put_root: 587 dput(sb->s_root); 588 sb->s_root = NULL; 589 out_put_alloc_file: 590 iput(sbi->alloc_file); 591 out_close_attr_tree: 592 hfs_btree_close(sbi->attr_tree); 593 out_close_cat_tree: 594 hfs_btree_close(sbi->cat_tree); 595 out_close_ext_tree: 596 hfs_btree_close(sbi->ext_tree); 597 out_free_vhdr: 598 kfree(sbi->s_vhdr_buf); 599 kfree(sbi->s_backup_vhdr_buf); 600 out_unload_nls: 601 unload_nls(sbi->nls); 602 unload_nls(nls); 603 kfree(sbi); 604 out: 605 return err; 606 } 607 608 MODULE_AUTHOR("Brad Boyer"); 609 MODULE_DESCRIPTION("Extended Macintosh Filesystem"); 610 MODULE_LICENSE("GPL"); 611 612 static struct kmem_cache *hfsplus_inode_cachep; 613 614 static struct inode *hfsplus_alloc_inode(struct super_block *sb) 615 { 616 struct hfsplus_inode_info *i; 617 618 i = kmem_cache_alloc(hfsplus_inode_cachep, GFP_KERNEL); 619 return i ? &i->vfs_inode : NULL; 620 } 621 622 static void hfsplus_i_callback(struct rcu_head *head) 623 { 624 struct inode *inode = container_of(head, struct inode, i_rcu); 625 626 kmem_cache_free(hfsplus_inode_cachep, HFSPLUS_I(inode)); 627 } 628 629 static void hfsplus_destroy_inode(struct inode *inode) 630 { 631 call_rcu(&inode->i_rcu, hfsplus_i_callback); 632 } 633 634 #define HFSPLUS_INODE_SIZE sizeof(struct hfsplus_inode_info) 635 636 static struct dentry *hfsplus_mount(struct file_system_type *fs_type, 637 int flags, const char *dev_name, void *data) 638 { 639 return mount_bdev(fs_type, flags, dev_name, data, hfsplus_fill_super); 640 } 641 642 static struct file_system_type hfsplus_fs_type = { 643 .owner = THIS_MODULE, 644 .name = "hfsplus", 645 .mount = hfsplus_mount, 646 .kill_sb = kill_block_super, 647 .fs_flags = FS_REQUIRES_DEV, 648 }; 649 MODULE_ALIAS_FS("hfsplus"); 650 651 static void hfsplus_init_once(void *p) 652 { 653 struct hfsplus_inode_info *i = p; 654 655 inode_init_once(&i->vfs_inode); 656 } 657 658 static int __init init_hfsplus_fs(void) 659 { 660 int err; 661 662 hfsplus_inode_cachep = kmem_cache_create("hfsplus_icache", 663 HFSPLUS_INODE_SIZE, 0, SLAB_HWCACHE_ALIGN, 664 hfsplus_init_once); 665 if (!hfsplus_inode_cachep) 666 return -ENOMEM; 667 err = hfsplus_create_attr_tree_cache(); 668 if (err) 669 goto destroy_inode_cache; 670 err = register_filesystem(&hfsplus_fs_type); 671 if (err) 672 goto destroy_attr_tree_cache; 673 return 0; 674 675 destroy_attr_tree_cache: 676 hfsplus_destroy_attr_tree_cache(); 677 678 destroy_inode_cache: 679 kmem_cache_destroy(hfsplus_inode_cachep); 680 681 return err; 682 } 683 684 static void __exit exit_hfsplus_fs(void) 685 { 686 unregister_filesystem(&hfsplus_fs_type); 687 688 /* 689 * Make sure all delayed rcu free inodes are flushed before we 690 * destroy cache. 691 */ 692 rcu_barrier(); 693 hfsplus_destroy_attr_tree_cache(); 694 kmem_cache_destroy(hfsplus_inode_cachep); 695 } 696 697 module_init(init_hfsplus_fs) 698 module_exit(exit_hfsplus_fs) 699