1 2 /* 3 * SPU file system 4 * 5 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 6 * 7 * Author: Arnd Bergmann <arndb@de.ibm.com> 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License as published by 11 * the Free Software Foundation; either version 2, or (at your option) 12 * any later version. 13 * 14 * This program is distributed in the hope that it will be useful, 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * GNU General Public License for more details. 18 * 19 * You should have received a copy of the GNU General Public License 20 * along with this program; if not, write to the Free Software 21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 22 */ 23 24 #include <linux/file.h> 25 #include <linux/fs.h> 26 #include <linux/fsnotify.h> 27 #include <linux/backing-dev.h> 28 #include <linux/init.h> 29 #include <linux/ioctl.h> 30 #include <linux/module.h> 31 #include <linux/mount.h> 32 #include <linux/namei.h> 33 #include <linux/pagemap.h> 34 #include <linux/poll.h> 35 #include <linux/slab.h> 36 #include <linux/parser.h> 37 38 #include <asm/prom.h> 39 #include <asm/spu.h> 40 #include <asm/spu_priv1.h> 41 #include <asm/uaccess.h> 42 43 #include "spufs.h" 44 45 struct spufs_sb_info { 46 int debug; 47 }; 48 49 static struct kmem_cache *spufs_inode_cache; 50 char *isolated_loader; 51 static int isolated_loader_size; 52 53 static struct spufs_sb_info *spufs_get_sb_info(struct super_block *sb) 54 { 55 return sb->s_fs_info; 56 } 57 58 static struct inode * 59 spufs_alloc_inode(struct super_block *sb) 60 { 61 struct spufs_inode_info *ei; 62 63 ei = kmem_cache_alloc(spufs_inode_cache, GFP_KERNEL); 64 if (!ei) 65 return NULL; 66 67 ei->i_gang = NULL; 68 ei->i_ctx = NULL; 69 ei->i_openers = 0; 70 71 return &ei->vfs_inode; 72 } 73 74 static void 75 spufs_destroy_inode(struct inode *inode) 76 { 77 kmem_cache_free(spufs_inode_cache, SPUFS_I(inode)); 78 } 79 80 static void 81 spufs_init_once(void *p) 82 { 83 struct spufs_inode_info *ei = p; 84 85 inode_init_once(&ei->vfs_inode); 86 } 87 88 static struct inode * 89 spufs_new_inode(struct super_block *sb, int mode) 90 { 91 struct inode *inode; 92 93 inode = new_inode(sb); 94 if (!inode) 95 goto out; 96 97 inode->i_mode = mode; 98 inode->i_uid = current_fsuid(); 99 inode->i_gid = current_fsgid(); 100 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; 101 out: 102 return inode; 103 } 104 105 static int 106 spufs_setattr(struct dentry *dentry, struct iattr *attr) 107 { 108 struct inode *inode = dentry->d_inode; 109 110 if ((attr->ia_valid & ATTR_SIZE) && 111 (attr->ia_size != inode->i_size)) 112 return -EINVAL; 113 setattr_copy(inode, attr); 114 mark_inode_dirty(inode); 115 return 0; 116 } 117 118 119 static int 120 spufs_new_file(struct super_block *sb, struct dentry *dentry, 121 const struct file_operations *fops, int mode, 122 size_t size, struct spu_context *ctx) 123 { 124 static const struct inode_operations spufs_file_iops = { 125 .setattr = spufs_setattr, 126 }; 127 struct inode *inode; 128 int ret; 129 130 ret = -ENOSPC; 131 inode = spufs_new_inode(sb, S_IFREG | mode); 132 if (!inode) 133 goto out; 134 135 ret = 0; 136 inode->i_op = &spufs_file_iops; 137 inode->i_fop = fops; 138 inode->i_size = size; 139 inode->i_private = SPUFS_I(inode)->i_ctx = get_spu_context(ctx); 140 d_add(dentry, inode); 141 out: 142 return ret; 143 } 144 145 static void 146 spufs_evict_inode(struct inode *inode) 147 { 148 struct spufs_inode_info *ei = SPUFS_I(inode); 149 end_writeback(inode); 150 if (ei->i_ctx) 151 put_spu_context(ei->i_ctx); 152 if (ei->i_gang) 153 put_spu_gang(ei->i_gang); 154 } 155 156 static void spufs_prune_dir(struct dentry *dir) 157 { 158 struct dentry *dentry, *tmp; 159 160 mutex_lock(&dir->d_inode->i_mutex); 161 list_for_each_entry_safe(dentry, tmp, &dir->d_subdirs, d_u.d_child) { 162 spin_lock(&dcache_lock); 163 spin_lock(&dentry->d_lock); 164 if (!(d_unhashed(dentry)) && dentry->d_inode) { 165 dget_locked(dentry); 166 __d_drop(dentry); 167 spin_unlock(&dentry->d_lock); 168 simple_unlink(dir->d_inode, dentry); 169 spin_unlock(&dcache_lock); 170 dput(dentry); 171 } else { 172 spin_unlock(&dentry->d_lock); 173 spin_unlock(&dcache_lock); 174 } 175 } 176 shrink_dcache_parent(dir); 177 mutex_unlock(&dir->d_inode->i_mutex); 178 } 179 180 /* Caller must hold parent->i_mutex */ 181 static int spufs_rmdir(struct inode *parent, struct dentry *dir) 182 { 183 /* remove all entries */ 184 spufs_prune_dir(dir); 185 d_drop(dir); 186 187 return simple_rmdir(parent, dir); 188 } 189 190 static int spufs_fill_dir(struct dentry *dir, 191 const struct spufs_tree_descr *files, int mode, 192 struct spu_context *ctx) 193 { 194 struct dentry *dentry, *tmp; 195 int ret; 196 197 while (files->name && files->name[0]) { 198 ret = -ENOMEM; 199 dentry = d_alloc_name(dir, files->name); 200 if (!dentry) 201 goto out; 202 ret = spufs_new_file(dir->d_sb, dentry, files->ops, 203 files->mode & mode, files->size, ctx); 204 if (ret) 205 goto out; 206 files++; 207 } 208 return 0; 209 out: 210 /* 211 * remove all children from dir. dir->inode is not set so don't 212 * just simply use spufs_prune_dir() and panic afterwards :) 213 * dput() looks like it will do the right thing: 214 * - dec parent's ref counter 215 * - remove child from parent's child list 216 * - free child's inode if possible 217 * - free child 218 */ 219 list_for_each_entry_safe(dentry, tmp, &dir->d_subdirs, d_u.d_child) { 220 dput(dentry); 221 } 222 223 shrink_dcache_parent(dir); 224 return ret; 225 } 226 227 static int spufs_dir_close(struct inode *inode, struct file *file) 228 { 229 struct spu_context *ctx; 230 struct inode *parent; 231 struct dentry *dir; 232 int ret; 233 234 dir = file->f_path.dentry; 235 parent = dir->d_parent->d_inode; 236 ctx = SPUFS_I(dir->d_inode)->i_ctx; 237 238 mutex_lock_nested(&parent->i_mutex, I_MUTEX_PARENT); 239 ret = spufs_rmdir(parent, dir); 240 mutex_unlock(&parent->i_mutex); 241 WARN_ON(ret); 242 243 /* We have to give up the mm_struct */ 244 spu_forget(ctx); 245 246 return dcache_dir_close(inode, file); 247 } 248 249 const struct file_operations spufs_context_fops = { 250 .open = dcache_dir_open, 251 .release = spufs_dir_close, 252 .llseek = dcache_dir_lseek, 253 .read = generic_read_dir, 254 .readdir = dcache_readdir, 255 .fsync = noop_fsync, 256 }; 257 EXPORT_SYMBOL_GPL(spufs_context_fops); 258 259 static int 260 spufs_mkdir(struct inode *dir, struct dentry *dentry, unsigned int flags, 261 int mode) 262 { 263 int ret; 264 struct inode *inode; 265 struct spu_context *ctx; 266 267 ret = -ENOSPC; 268 inode = spufs_new_inode(dir->i_sb, mode | S_IFDIR); 269 if (!inode) 270 goto out; 271 272 if (dir->i_mode & S_ISGID) { 273 inode->i_gid = dir->i_gid; 274 inode->i_mode &= S_ISGID; 275 } 276 ctx = alloc_spu_context(SPUFS_I(dir)->i_gang); /* XXX gang */ 277 SPUFS_I(inode)->i_ctx = ctx; 278 if (!ctx) 279 goto out_iput; 280 281 ctx->flags = flags; 282 inode->i_op = &simple_dir_inode_operations; 283 inode->i_fop = &simple_dir_operations; 284 if (flags & SPU_CREATE_NOSCHED) 285 ret = spufs_fill_dir(dentry, spufs_dir_nosched_contents, 286 mode, ctx); 287 else 288 ret = spufs_fill_dir(dentry, spufs_dir_contents, mode, ctx); 289 290 if (ret) 291 goto out_free_ctx; 292 293 if (spufs_get_sb_info(dir->i_sb)->debug) 294 ret = spufs_fill_dir(dentry, spufs_dir_debug_contents, 295 mode, ctx); 296 297 if (ret) 298 goto out_free_ctx; 299 300 d_instantiate(dentry, inode); 301 dget(dentry); 302 inc_nlink(dir); 303 inc_nlink(dentry->d_inode); 304 goto out; 305 306 out_free_ctx: 307 spu_forget(ctx); 308 put_spu_context(ctx); 309 out_iput: 310 iput(inode); 311 out: 312 return ret; 313 } 314 315 static int spufs_context_open(struct dentry *dentry, struct vfsmount *mnt) 316 { 317 int ret; 318 struct file *filp; 319 320 ret = get_unused_fd(); 321 if (ret < 0) { 322 dput(dentry); 323 mntput(mnt); 324 goto out; 325 } 326 327 filp = dentry_open(dentry, mnt, O_RDONLY, current_cred()); 328 if (IS_ERR(filp)) { 329 put_unused_fd(ret); 330 ret = PTR_ERR(filp); 331 goto out; 332 } 333 334 filp->f_op = &spufs_context_fops; 335 fd_install(ret, filp); 336 out: 337 return ret; 338 } 339 340 static struct spu_context * 341 spufs_assert_affinity(unsigned int flags, struct spu_gang *gang, 342 struct file *filp) 343 { 344 struct spu_context *tmp, *neighbor, *err; 345 int count, node; 346 int aff_supp; 347 348 aff_supp = !list_empty(&(list_entry(cbe_spu_info[0].spus.next, 349 struct spu, cbe_list))->aff_list); 350 351 if (!aff_supp) 352 return ERR_PTR(-EINVAL); 353 354 if (flags & SPU_CREATE_GANG) 355 return ERR_PTR(-EINVAL); 356 357 if (flags & SPU_CREATE_AFFINITY_MEM && 358 gang->aff_ref_ctx && 359 gang->aff_ref_ctx->flags & SPU_CREATE_AFFINITY_MEM) 360 return ERR_PTR(-EEXIST); 361 362 if (gang->aff_flags & AFF_MERGED) 363 return ERR_PTR(-EBUSY); 364 365 neighbor = NULL; 366 if (flags & SPU_CREATE_AFFINITY_SPU) { 367 if (!filp || filp->f_op != &spufs_context_fops) 368 return ERR_PTR(-EINVAL); 369 370 neighbor = get_spu_context( 371 SPUFS_I(filp->f_dentry->d_inode)->i_ctx); 372 373 if (!list_empty(&neighbor->aff_list) && !(neighbor->aff_head) && 374 !list_is_last(&neighbor->aff_list, &gang->aff_list_head) && 375 !list_entry(neighbor->aff_list.next, struct spu_context, 376 aff_list)->aff_head) { 377 err = ERR_PTR(-EEXIST); 378 goto out_put_neighbor; 379 } 380 381 if (gang != neighbor->gang) { 382 err = ERR_PTR(-EINVAL); 383 goto out_put_neighbor; 384 } 385 386 count = 1; 387 list_for_each_entry(tmp, &gang->aff_list_head, aff_list) 388 count++; 389 if (list_empty(&neighbor->aff_list)) 390 count++; 391 392 for (node = 0; node < MAX_NUMNODES; node++) { 393 if ((cbe_spu_info[node].n_spus - atomic_read( 394 &cbe_spu_info[node].reserved_spus)) >= count) 395 break; 396 } 397 398 if (node == MAX_NUMNODES) { 399 err = ERR_PTR(-EEXIST); 400 goto out_put_neighbor; 401 } 402 } 403 404 return neighbor; 405 406 out_put_neighbor: 407 put_spu_context(neighbor); 408 return err; 409 } 410 411 static void 412 spufs_set_affinity(unsigned int flags, struct spu_context *ctx, 413 struct spu_context *neighbor) 414 { 415 if (flags & SPU_CREATE_AFFINITY_MEM) 416 ctx->gang->aff_ref_ctx = ctx; 417 418 if (flags & SPU_CREATE_AFFINITY_SPU) { 419 if (list_empty(&neighbor->aff_list)) { 420 list_add_tail(&neighbor->aff_list, 421 &ctx->gang->aff_list_head); 422 neighbor->aff_head = 1; 423 } 424 425 if (list_is_last(&neighbor->aff_list, &ctx->gang->aff_list_head) 426 || list_entry(neighbor->aff_list.next, struct spu_context, 427 aff_list)->aff_head) { 428 list_add(&ctx->aff_list, &neighbor->aff_list); 429 } else { 430 list_add_tail(&ctx->aff_list, &neighbor->aff_list); 431 if (neighbor->aff_head) { 432 neighbor->aff_head = 0; 433 ctx->aff_head = 1; 434 } 435 } 436 437 if (!ctx->gang->aff_ref_ctx) 438 ctx->gang->aff_ref_ctx = ctx; 439 } 440 } 441 442 static int 443 spufs_create_context(struct inode *inode, struct dentry *dentry, 444 struct vfsmount *mnt, int flags, int mode, 445 struct file *aff_filp) 446 { 447 int ret; 448 int affinity; 449 struct spu_gang *gang; 450 struct spu_context *neighbor; 451 452 ret = -EPERM; 453 if ((flags & SPU_CREATE_NOSCHED) && 454 !capable(CAP_SYS_NICE)) 455 goto out_unlock; 456 457 ret = -EINVAL; 458 if ((flags & (SPU_CREATE_NOSCHED | SPU_CREATE_ISOLATE)) 459 == SPU_CREATE_ISOLATE) 460 goto out_unlock; 461 462 ret = -ENODEV; 463 if ((flags & SPU_CREATE_ISOLATE) && !isolated_loader) 464 goto out_unlock; 465 466 gang = NULL; 467 neighbor = NULL; 468 affinity = flags & (SPU_CREATE_AFFINITY_MEM | SPU_CREATE_AFFINITY_SPU); 469 if (affinity) { 470 gang = SPUFS_I(inode)->i_gang; 471 ret = -EINVAL; 472 if (!gang) 473 goto out_unlock; 474 mutex_lock(&gang->aff_mutex); 475 neighbor = spufs_assert_affinity(flags, gang, aff_filp); 476 if (IS_ERR(neighbor)) { 477 ret = PTR_ERR(neighbor); 478 goto out_aff_unlock; 479 } 480 } 481 482 ret = spufs_mkdir(inode, dentry, flags, mode & S_IRWXUGO); 483 if (ret) 484 goto out_aff_unlock; 485 486 if (affinity) { 487 spufs_set_affinity(flags, SPUFS_I(dentry->d_inode)->i_ctx, 488 neighbor); 489 if (neighbor) 490 put_spu_context(neighbor); 491 } 492 493 /* 494 * get references for dget and mntget, will be released 495 * in error path of *_open(). 496 */ 497 ret = spufs_context_open(dget(dentry), mntget(mnt)); 498 if (ret < 0) { 499 WARN_ON(spufs_rmdir(inode, dentry)); 500 if (affinity) 501 mutex_unlock(&gang->aff_mutex); 502 mutex_unlock(&inode->i_mutex); 503 spu_forget(SPUFS_I(dentry->d_inode)->i_ctx); 504 goto out; 505 } 506 507 out_aff_unlock: 508 if (affinity) 509 mutex_unlock(&gang->aff_mutex); 510 out_unlock: 511 mutex_unlock(&inode->i_mutex); 512 out: 513 dput(dentry); 514 return ret; 515 } 516 517 static int 518 spufs_mkgang(struct inode *dir, struct dentry *dentry, int mode) 519 { 520 int ret; 521 struct inode *inode; 522 struct spu_gang *gang; 523 524 ret = -ENOSPC; 525 inode = spufs_new_inode(dir->i_sb, mode | S_IFDIR); 526 if (!inode) 527 goto out; 528 529 ret = 0; 530 if (dir->i_mode & S_ISGID) { 531 inode->i_gid = dir->i_gid; 532 inode->i_mode &= S_ISGID; 533 } 534 gang = alloc_spu_gang(); 535 SPUFS_I(inode)->i_ctx = NULL; 536 SPUFS_I(inode)->i_gang = gang; 537 if (!gang) 538 goto out_iput; 539 540 inode->i_op = &simple_dir_inode_operations; 541 inode->i_fop = &simple_dir_operations; 542 543 d_instantiate(dentry, inode); 544 inc_nlink(dir); 545 inc_nlink(dentry->d_inode); 546 return ret; 547 548 out_iput: 549 iput(inode); 550 out: 551 return ret; 552 } 553 554 static int spufs_gang_open(struct dentry *dentry, struct vfsmount *mnt) 555 { 556 int ret; 557 struct file *filp; 558 559 ret = get_unused_fd(); 560 if (ret < 0) { 561 dput(dentry); 562 mntput(mnt); 563 goto out; 564 } 565 566 filp = dentry_open(dentry, mnt, O_RDONLY, current_cred()); 567 if (IS_ERR(filp)) { 568 put_unused_fd(ret); 569 ret = PTR_ERR(filp); 570 goto out; 571 } 572 573 filp->f_op = &simple_dir_operations; 574 fd_install(ret, filp); 575 out: 576 return ret; 577 } 578 579 static int spufs_create_gang(struct inode *inode, 580 struct dentry *dentry, 581 struct vfsmount *mnt, int mode) 582 { 583 int ret; 584 585 ret = spufs_mkgang(inode, dentry, mode & S_IRWXUGO); 586 if (ret) 587 goto out; 588 589 /* 590 * get references for dget and mntget, will be released 591 * in error path of *_open(). 592 */ 593 ret = spufs_gang_open(dget(dentry), mntget(mnt)); 594 if (ret < 0) { 595 int err = simple_rmdir(inode, dentry); 596 WARN_ON(err); 597 } 598 599 out: 600 mutex_unlock(&inode->i_mutex); 601 dput(dentry); 602 return ret; 603 } 604 605 606 static struct file_system_type spufs_type; 607 608 long spufs_create(struct nameidata *nd, unsigned int flags, mode_t mode, 609 struct file *filp) 610 { 611 struct dentry *dentry; 612 int ret; 613 614 ret = -EINVAL; 615 /* check if we are on spufs */ 616 if (nd->path.dentry->d_sb->s_type != &spufs_type) 617 goto out; 618 619 /* don't accept undefined flags */ 620 if (flags & (~SPU_CREATE_FLAG_ALL)) 621 goto out; 622 623 /* only threads can be underneath a gang */ 624 if (nd->path.dentry != nd->path.dentry->d_sb->s_root) { 625 if ((flags & SPU_CREATE_GANG) || 626 !SPUFS_I(nd->path.dentry->d_inode)->i_gang) 627 goto out; 628 } 629 630 dentry = lookup_create(nd, 1); 631 ret = PTR_ERR(dentry); 632 if (IS_ERR(dentry)) 633 goto out_dir; 634 635 mode &= ~current_umask(); 636 637 if (flags & SPU_CREATE_GANG) 638 ret = spufs_create_gang(nd->path.dentry->d_inode, 639 dentry, nd->path.mnt, mode); 640 else 641 ret = spufs_create_context(nd->path.dentry->d_inode, 642 dentry, nd->path.mnt, flags, mode, 643 filp); 644 if (ret >= 0) 645 fsnotify_mkdir(nd->path.dentry->d_inode, dentry); 646 return ret; 647 648 out_dir: 649 mutex_unlock(&nd->path.dentry->d_inode->i_mutex); 650 out: 651 return ret; 652 } 653 654 /* File system initialization */ 655 enum { 656 Opt_uid, Opt_gid, Opt_mode, Opt_debug, Opt_err, 657 }; 658 659 static const match_table_t spufs_tokens = { 660 { Opt_uid, "uid=%d" }, 661 { Opt_gid, "gid=%d" }, 662 { Opt_mode, "mode=%o" }, 663 { Opt_debug, "debug" }, 664 { Opt_err, NULL }, 665 }; 666 667 static int 668 spufs_parse_options(struct super_block *sb, char *options, struct inode *root) 669 { 670 char *p; 671 substring_t args[MAX_OPT_ARGS]; 672 673 while ((p = strsep(&options, ",")) != NULL) { 674 int token, option; 675 676 if (!*p) 677 continue; 678 679 token = match_token(p, spufs_tokens, args); 680 switch (token) { 681 case Opt_uid: 682 if (match_int(&args[0], &option)) 683 return 0; 684 root->i_uid = option; 685 break; 686 case Opt_gid: 687 if (match_int(&args[0], &option)) 688 return 0; 689 root->i_gid = option; 690 break; 691 case Opt_mode: 692 if (match_octal(&args[0], &option)) 693 return 0; 694 root->i_mode = option | S_IFDIR; 695 break; 696 case Opt_debug: 697 spufs_get_sb_info(sb)->debug = 1; 698 break; 699 default: 700 return 0; 701 } 702 } 703 return 1; 704 } 705 706 static void spufs_exit_isolated_loader(void) 707 { 708 free_pages((unsigned long) isolated_loader, 709 get_order(isolated_loader_size)); 710 } 711 712 static void 713 spufs_init_isolated_loader(void) 714 { 715 struct device_node *dn; 716 const char *loader; 717 int size; 718 719 dn = of_find_node_by_path("/spu-isolation"); 720 if (!dn) 721 return; 722 723 loader = of_get_property(dn, "loader", &size); 724 if (!loader) 725 return; 726 727 /* the loader must be align on a 16 byte boundary */ 728 isolated_loader = (char *)__get_free_pages(GFP_KERNEL, get_order(size)); 729 if (!isolated_loader) 730 return; 731 732 isolated_loader_size = size; 733 memcpy(isolated_loader, loader, size); 734 printk(KERN_INFO "spufs: SPU isolation mode enabled\n"); 735 } 736 737 static int 738 spufs_create_root(struct super_block *sb, void *data) 739 { 740 struct inode *inode; 741 int ret; 742 743 ret = -ENODEV; 744 if (!spu_management_ops) 745 goto out; 746 747 ret = -ENOMEM; 748 inode = spufs_new_inode(sb, S_IFDIR | 0775); 749 if (!inode) 750 goto out; 751 752 inode->i_op = &simple_dir_inode_operations; 753 inode->i_fop = &simple_dir_operations; 754 SPUFS_I(inode)->i_ctx = NULL; 755 inc_nlink(inode); 756 757 ret = -EINVAL; 758 if (!spufs_parse_options(sb, data, inode)) 759 goto out_iput; 760 761 ret = -ENOMEM; 762 sb->s_root = d_alloc_root(inode); 763 if (!sb->s_root) 764 goto out_iput; 765 766 return 0; 767 out_iput: 768 iput(inode); 769 out: 770 return ret; 771 } 772 773 static int 774 spufs_fill_super(struct super_block *sb, void *data, int silent) 775 { 776 struct spufs_sb_info *info; 777 static const struct super_operations s_ops = { 778 .alloc_inode = spufs_alloc_inode, 779 .destroy_inode = spufs_destroy_inode, 780 .statfs = simple_statfs, 781 .evict_inode = spufs_evict_inode, 782 .show_options = generic_show_options, 783 }; 784 785 save_mount_options(sb, data); 786 787 info = kzalloc(sizeof(*info), GFP_KERNEL); 788 if (!info) 789 return -ENOMEM; 790 791 sb->s_maxbytes = MAX_LFS_FILESIZE; 792 sb->s_blocksize = PAGE_CACHE_SIZE; 793 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 794 sb->s_magic = SPUFS_MAGIC; 795 sb->s_op = &s_ops; 796 sb->s_fs_info = info; 797 798 return spufs_create_root(sb, data); 799 } 800 801 static struct dentry * 802 spufs_mount(struct file_system_type *fstype, int flags, 803 const char *name, void *data) 804 { 805 return mount_single(fstype, flags, data, spufs_fill_super); 806 } 807 808 static struct file_system_type spufs_type = { 809 .owner = THIS_MODULE, 810 .name = "spufs", 811 .mount = spufs_mount, 812 .kill_sb = kill_litter_super, 813 }; 814 815 static int __init spufs_init(void) 816 { 817 int ret; 818 819 ret = -ENODEV; 820 if (!spu_management_ops) 821 goto out; 822 823 ret = -ENOMEM; 824 spufs_inode_cache = kmem_cache_create("spufs_inode_cache", 825 sizeof(struct spufs_inode_info), 0, 826 SLAB_HWCACHE_ALIGN, spufs_init_once); 827 828 if (!spufs_inode_cache) 829 goto out; 830 ret = spu_sched_init(); 831 if (ret) 832 goto out_cache; 833 ret = register_filesystem(&spufs_type); 834 if (ret) 835 goto out_sched; 836 ret = register_spu_syscalls(&spufs_calls); 837 if (ret) 838 goto out_fs; 839 840 spufs_init_isolated_loader(); 841 842 return 0; 843 844 out_fs: 845 unregister_filesystem(&spufs_type); 846 out_sched: 847 spu_sched_exit(); 848 out_cache: 849 kmem_cache_destroy(spufs_inode_cache); 850 out: 851 return ret; 852 } 853 module_init(spufs_init); 854 855 static void __exit spufs_exit(void) 856 { 857 spu_sched_exit(); 858 spufs_exit_isolated_loader(); 859 unregister_spu_syscalls(&spufs_calls); 860 unregister_filesystem(&spufs_type); 861 kmem_cache_destroy(spufs_inode_cache); 862 } 863 module_exit(spufs_exit); 864 865 MODULE_LICENSE("GPL"); 866 MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>"); 867 868