1 /* 2 * Copyright (C) International Business Machines Corp., 2000-2004 3 * Portions Copyright (C) Christoph Hellwig, 2001-2002 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2 of the License, or 8 * (at your option) any later version. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See 13 * the GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 */ 19 20 #include <linux/fs.h> 21 #include <linux/module.h> 22 #include <linux/parser.h> 23 #include <linux/completion.h> 24 #include <linux/vfs.h> 25 #include <linux/quotaops.h> 26 #include <linux/mount.h> 27 #include <linux/moduleparam.h> 28 #include <linux/kthread.h> 29 #include <linux/posix_acl.h> 30 #include <linux/buffer_head.h> 31 #include <linux/exportfs.h> 32 #include <asm/uaccess.h> 33 #include <linux/seq_file.h> 34 35 #include "jfs_incore.h" 36 #include "jfs_filsys.h" 37 #include "jfs_inode.h" 38 #include "jfs_metapage.h" 39 #include "jfs_superblock.h" 40 #include "jfs_dmap.h" 41 #include "jfs_imap.h" 42 #include "jfs_acl.h" 43 #include "jfs_debug.h" 44 45 MODULE_DESCRIPTION("The Journaled Filesystem (JFS)"); 46 MODULE_AUTHOR("Steve Best/Dave Kleikamp/Barry Arndt, IBM"); 47 MODULE_LICENSE("GPL"); 48 49 static struct kmem_cache * jfs_inode_cachep; 50 51 static const struct super_operations jfs_super_operations; 52 static const struct export_operations jfs_export_operations; 53 static struct file_system_type jfs_fs_type; 54 55 #define MAX_COMMIT_THREADS 64 56 static int commit_threads = 0; 57 module_param(commit_threads, int, 0); 58 MODULE_PARM_DESC(commit_threads, "Number of commit threads"); 59 60 static struct task_struct *jfsCommitThread[MAX_COMMIT_THREADS]; 61 struct task_struct *jfsIOthread; 62 struct task_struct *jfsSyncThread; 63 64 #ifdef CONFIG_JFS_DEBUG 65 int jfsloglevel = JFS_LOGLEVEL_WARN; 66 module_param(jfsloglevel, int, 0644); 67 MODULE_PARM_DESC(jfsloglevel, "Specify JFS loglevel (0, 1 or 2)"); 68 #endif 69 70 static void jfs_handle_error(struct super_block *sb) 71 { 72 struct jfs_sb_info *sbi = JFS_SBI(sb); 73 74 if (sb->s_flags & MS_RDONLY) 75 return; 76 77 updateSuper(sb, FM_DIRTY); 78 79 if (sbi->flag & JFS_ERR_PANIC) 80 panic("JFS (device %s): panic forced after error\n", 81 sb->s_id); 82 else if (sbi->flag & JFS_ERR_REMOUNT_RO) { 83 jfs_err("ERROR: (device %s): remounting filesystem " 84 "as read-only\n", 85 sb->s_id); 86 sb->s_flags |= MS_RDONLY; 87 } 88 89 /* nothing is done for continue beyond marking the superblock dirty */ 90 } 91 92 void jfs_error(struct super_block *sb, const char * function, ...) 93 { 94 static char error_buf[256]; 95 va_list args; 96 97 va_start(args, function); 98 vsnprintf(error_buf, sizeof(error_buf), function, args); 99 va_end(args); 100 101 printk(KERN_ERR "ERROR: (device %s): %s\n", sb->s_id, error_buf); 102 103 jfs_handle_error(sb); 104 } 105 106 static struct inode *jfs_alloc_inode(struct super_block *sb) 107 { 108 struct jfs_inode_info *jfs_inode; 109 110 jfs_inode = kmem_cache_alloc(jfs_inode_cachep, GFP_NOFS); 111 if (!jfs_inode) 112 return NULL; 113 return &jfs_inode->vfs_inode; 114 } 115 116 static void jfs_destroy_inode(struct inode *inode) 117 { 118 struct jfs_inode_info *ji = JFS_IP(inode); 119 120 BUG_ON(!list_empty(&ji->anon_inode_list)); 121 122 spin_lock_irq(&ji->ag_lock); 123 if (ji->active_ag != -1) { 124 struct bmap *bmap = JFS_SBI(inode->i_sb)->bmap; 125 atomic_dec(&bmap->db_active[ji->active_ag]); 126 ji->active_ag = -1; 127 } 128 spin_unlock_irq(&ji->ag_lock); 129 130 #ifdef CONFIG_JFS_POSIX_ACL 131 if (ji->i_acl != JFS_ACL_NOT_CACHED) { 132 posix_acl_release(ji->i_acl); 133 ji->i_acl = JFS_ACL_NOT_CACHED; 134 } 135 if (ji->i_default_acl != JFS_ACL_NOT_CACHED) { 136 posix_acl_release(ji->i_default_acl); 137 ji->i_default_acl = JFS_ACL_NOT_CACHED; 138 } 139 #endif 140 141 kmem_cache_free(jfs_inode_cachep, ji); 142 } 143 144 static int jfs_statfs(struct dentry *dentry, struct kstatfs *buf) 145 { 146 struct jfs_sb_info *sbi = JFS_SBI(dentry->d_sb); 147 s64 maxinodes; 148 struct inomap *imap = JFS_IP(sbi->ipimap)->i_imap; 149 150 jfs_info("In jfs_statfs"); 151 buf->f_type = JFS_SUPER_MAGIC; 152 buf->f_bsize = sbi->bsize; 153 buf->f_blocks = sbi->bmap->db_mapsize; 154 buf->f_bfree = sbi->bmap->db_nfree; 155 buf->f_bavail = sbi->bmap->db_nfree; 156 /* 157 * If we really return the number of allocated & free inodes, some 158 * applications will fail because they won't see enough free inodes. 159 * We'll try to calculate some guess as to how may inodes we can 160 * really allocate 161 * 162 * buf->f_files = atomic_read(&imap->im_numinos); 163 * buf->f_ffree = atomic_read(&imap->im_numfree); 164 */ 165 maxinodes = min((s64) atomic_read(&imap->im_numinos) + 166 ((sbi->bmap->db_nfree >> imap->im_l2nbperiext) 167 << L2INOSPEREXT), (s64) 0xffffffffLL); 168 buf->f_files = maxinodes; 169 buf->f_ffree = maxinodes - (atomic_read(&imap->im_numinos) - 170 atomic_read(&imap->im_numfree)); 171 172 buf->f_namelen = JFS_NAME_MAX; 173 return 0; 174 } 175 176 static void jfs_put_super(struct super_block *sb) 177 { 178 struct jfs_sb_info *sbi = JFS_SBI(sb); 179 int rc; 180 181 jfs_info("In jfs_put_super"); 182 rc = jfs_umount(sb); 183 if (rc) 184 jfs_err("jfs_umount failed with return code %d", rc); 185 if (sbi->nls_tab) 186 unload_nls(sbi->nls_tab); 187 sbi->nls_tab = NULL; 188 189 truncate_inode_pages(sbi->direct_inode->i_mapping, 0); 190 iput(sbi->direct_inode); 191 sbi->direct_inode = NULL; 192 193 kfree(sbi); 194 } 195 196 enum { 197 Opt_integrity, Opt_nointegrity, Opt_iocharset, Opt_resize, 198 Opt_resize_nosize, Opt_errors, Opt_ignore, Opt_err, Opt_quota, 199 Opt_usrquota, Opt_grpquota, Opt_uid, Opt_gid, Opt_umask 200 }; 201 202 static match_table_t tokens = { 203 {Opt_integrity, "integrity"}, 204 {Opt_nointegrity, "nointegrity"}, 205 {Opt_iocharset, "iocharset=%s"}, 206 {Opt_resize, "resize=%u"}, 207 {Opt_resize_nosize, "resize"}, 208 {Opt_errors, "errors=%s"}, 209 {Opt_ignore, "noquota"}, 210 {Opt_ignore, "quota"}, 211 {Opt_usrquota, "usrquota"}, 212 {Opt_grpquota, "grpquota"}, 213 {Opt_uid, "uid=%u"}, 214 {Opt_gid, "gid=%u"}, 215 {Opt_umask, "umask=%u"}, 216 {Opt_err, NULL} 217 }; 218 219 static int parse_options(char *options, struct super_block *sb, s64 *newLVSize, 220 int *flag) 221 { 222 void *nls_map = (void *)-1; /* -1: no change; NULL: none */ 223 char *p; 224 struct jfs_sb_info *sbi = JFS_SBI(sb); 225 226 *newLVSize = 0; 227 228 if (!options) 229 return 1; 230 231 while ((p = strsep(&options, ",")) != NULL) { 232 substring_t args[MAX_OPT_ARGS]; 233 int token; 234 if (!*p) 235 continue; 236 237 token = match_token(p, tokens, args); 238 switch (token) { 239 case Opt_integrity: 240 *flag &= ~JFS_NOINTEGRITY; 241 break; 242 case Opt_nointegrity: 243 *flag |= JFS_NOINTEGRITY; 244 break; 245 case Opt_ignore: 246 /* Silently ignore the quota options */ 247 /* Don't do anything ;-) */ 248 break; 249 case Opt_iocharset: 250 if (nls_map && nls_map != (void *) -1) 251 unload_nls(nls_map); 252 if (!strcmp(args[0].from, "none")) 253 nls_map = NULL; 254 else { 255 nls_map = load_nls(args[0].from); 256 if (!nls_map) { 257 printk(KERN_ERR 258 "JFS: charset not found\n"); 259 goto cleanup; 260 } 261 } 262 break; 263 case Opt_resize: 264 { 265 char *resize = args[0].from; 266 *newLVSize = simple_strtoull(resize, &resize, 0); 267 break; 268 } 269 case Opt_resize_nosize: 270 { 271 *newLVSize = sb->s_bdev->bd_inode->i_size >> 272 sb->s_blocksize_bits; 273 if (*newLVSize == 0) 274 printk(KERN_ERR 275 "JFS: Cannot determine volume size\n"); 276 break; 277 } 278 case Opt_errors: 279 { 280 char *errors = args[0].from; 281 if (!errors || !*errors) 282 goto cleanup; 283 if (!strcmp(errors, "continue")) { 284 *flag &= ~JFS_ERR_REMOUNT_RO; 285 *flag &= ~JFS_ERR_PANIC; 286 *flag |= JFS_ERR_CONTINUE; 287 } else if (!strcmp(errors, "remount-ro")) { 288 *flag &= ~JFS_ERR_CONTINUE; 289 *flag &= ~JFS_ERR_PANIC; 290 *flag |= JFS_ERR_REMOUNT_RO; 291 } else if (!strcmp(errors, "panic")) { 292 *flag &= ~JFS_ERR_CONTINUE; 293 *flag &= ~JFS_ERR_REMOUNT_RO; 294 *flag |= JFS_ERR_PANIC; 295 } else { 296 printk(KERN_ERR 297 "JFS: %s is an invalid error handler\n", 298 errors); 299 goto cleanup; 300 } 301 break; 302 } 303 304 #ifdef CONFIG_QUOTA 305 case Opt_quota: 306 case Opt_usrquota: 307 *flag |= JFS_USRQUOTA; 308 break; 309 case Opt_grpquota: 310 *flag |= JFS_GRPQUOTA; 311 break; 312 #else 313 case Opt_usrquota: 314 case Opt_grpquota: 315 case Opt_quota: 316 printk(KERN_ERR 317 "JFS: quota operations not supported\n"); 318 break; 319 #endif 320 case Opt_uid: 321 { 322 char *uid = args[0].from; 323 sbi->uid = simple_strtoul(uid, &uid, 0); 324 break; 325 } 326 case Opt_gid: 327 { 328 char *gid = args[0].from; 329 sbi->gid = simple_strtoul(gid, &gid, 0); 330 break; 331 } 332 case Opt_umask: 333 { 334 char *umask = args[0].from; 335 sbi->umask = simple_strtoul(umask, &umask, 8); 336 if (sbi->umask & ~0777) { 337 printk(KERN_ERR 338 "JFS: Invalid value of umask\n"); 339 goto cleanup; 340 } 341 break; 342 } 343 default: 344 printk("jfs: Unrecognized mount option \"%s\" " 345 " or missing value\n", p); 346 goto cleanup; 347 } 348 } 349 350 if (nls_map != (void *) -1) { 351 /* Discard old (if remount) */ 352 if (sbi->nls_tab) 353 unload_nls(sbi->nls_tab); 354 sbi->nls_tab = nls_map; 355 } 356 return 1; 357 358 cleanup: 359 if (nls_map && nls_map != (void *) -1) 360 unload_nls(nls_map); 361 return 0; 362 } 363 364 static int jfs_remount(struct super_block *sb, int *flags, char *data) 365 { 366 s64 newLVSize = 0; 367 int rc = 0; 368 int flag = JFS_SBI(sb)->flag; 369 370 if (!parse_options(data, sb, &newLVSize, &flag)) { 371 return -EINVAL; 372 } 373 if (newLVSize) { 374 if (sb->s_flags & MS_RDONLY) { 375 printk(KERN_ERR 376 "JFS: resize requires volume to be mounted read-write\n"); 377 return -EROFS; 378 } 379 rc = jfs_extendfs(sb, newLVSize, 0); 380 if (rc) 381 return rc; 382 } 383 384 if ((sb->s_flags & MS_RDONLY) && !(*flags & MS_RDONLY)) { 385 /* 386 * Invalidate any previously read metadata. fsck may have 387 * changed the on-disk data since we mounted r/o 388 */ 389 truncate_inode_pages(JFS_SBI(sb)->direct_inode->i_mapping, 0); 390 391 JFS_SBI(sb)->flag = flag; 392 return jfs_mount_rw(sb, 1); 393 } 394 if ((!(sb->s_flags & MS_RDONLY)) && (*flags & MS_RDONLY)) { 395 rc = jfs_umount_rw(sb); 396 JFS_SBI(sb)->flag = flag; 397 return rc; 398 } 399 if ((JFS_SBI(sb)->flag & JFS_NOINTEGRITY) != (flag & JFS_NOINTEGRITY)) 400 if (!(sb->s_flags & MS_RDONLY)) { 401 rc = jfs_umount_rw(sb); 402 if (rc) 403 return rc; 404 JFS_SBI(sb)->flag = flag; 405 return jfs_mount_rw(sb, 1); 406 } 407 JFS_SBI(sb)->flag = flag; 408 409 return 0; 410 } 411 412 static int jfs_fill_super(struct super_block *sb, void *data, int silent) 413 { 414 struct jfs_sb_info *sbi; 415 struct inode *inode; 416 int rc; 417 s64 newLVSize = 0; 418 int flag, ret = -EINVAL; 419 420 jfs_info("In jfs_read_super: s_flags=0x%lx", sb->s_flags); 421 422 if (!new_valid_dev(sb->s_bdev->bd_dev)) 423 return -EOVERFLOW; 424 425 sbi = kzalloc(sizeof (struct jfs_sb_info), GFP_KERNEL); 426 if (!sbi) 427 return -ENOMEM; 428 sb->s_fs_info = sbi; 429 sbi->sb = sb; 430 sbi->uid = sbi->gid = sbi->umask = -1; 431 432 /* initialize the mount flag and determine the default error handler */ 433 flag = JFS_ERR_REMOUNT_RO; 434 435 if (!parse_options((char *) data, sb, &newLVSize, &flag)) { 436 kfree(sbi); 437 return -EINVAL; 438 } 439 sbi->flag = flag; 440 441 #ifdef CONFIG_JFS_POSIX_ACL 442 sb->s_flags |= MS_POSIXACL; 443 #endif 444 445 if (newLVSize) { 446 printk(KERN_ERR "resize option for remount only\n"); 447 return -EINVAL; 448 } 449 450 /* 451 * Initialize blocksize to 4K. 452 */ 453 sb_set_blocksize(sb, PSIZE); 454 455 /* 456 * Set method vectors. 457 */ 458 sb->s_op = &jfs_super_operations; 459 sb->s_export_op = &jfs_export_operations; 460 461 /* 462 * Initialize direct-mapping inode/address-space 463 */ 464 inode = new_inode(sb); 465 if (inode == NULL) { 466 ret = -ENOMEM; 467 goto out_kfree; 468 } 469 inode->i_ino = 0; 470 inode->i_nlink = 1; 471 inode->i_size = sb->s_bdev->bd_inode->i_size; 472 inode->i_mapping->a_ops = &jfs_metapage_aops; 473 insert_inode_hash(inode); 474 mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS); 475 476 sbi->direct_inode = inode; 477 478 rc = jfs_mount(sb); 479 if (rc) { 480 if (!silent) { 481 jfs_err("jfs_mount failed w/return code = %d", rc); 482 } 483 goto out_mount_failed; 484 } 485 if (sb->s_flags & MS_RDONLY) 486 sbi->log = NULL; 487 else { 488 rc = jfs_mount_rw(sb, 0); 489 if (rc) { 490 if (!silent) { 491 jfs_err("jfs_mount_rw failed, return code = %d", 492 rc); 493 } 494 goto out_no_rw; 495 } 496 } 497 498 sb->s_magic = JFS_SUPER_MAGIC; 499 500 inode = jfs_iget(sb, ROOT_I); 501 if (IS_ERR(inode)) { 502 ret = PTR_ERR(inode); 503 goto out_no_rw; 504 } 505 sb->s_root = d_alloc_root(inode); 506 if (!sb->s_root) 507 goto out_no_root; 508 509 if (sbi->mntflag & JFS_OS2) 510 sb->s_root->d_op = &jfs_ci_dentry_operations; 511 512 /* logical blocks are represented by 40 bits in pxd_t, etc. */ 513 sb->s_maxbytes = ((u64) sb->s_blocksize) << 40; 514 #if BITS_PER_LONG == 32 515 /* 516 * Page cache is indexed by long. 517 * I would use MAX_LFS_FILESIZE, but it's only half as big 518 */ 519 sb->s_maxbytes = min(((u64) PAGE_CACHE_SIZE << 32) - 1, sb->s_maxbytes); 520 #endif 521 sb->s_time_gran = 1; 522 return 0; 523 524 out_no_root: 525 jfs_err("jfs_read_super: get root dentry failed"); 526 iput(inode); 527 528 out_no_rw: 529 rc = jfs_umount(sb); 530 if (rc) { 531 jfs_err("jfs_umount failed with return code %d", rc); 532 } 533 out_mount_failed: 534 filemap_write_and_wait(sbi->direct_inode->i_mapping); 535 truncate_inode_pages(sbi->direct_inode->i_mapping, 0); 536 make_bad_inode(sbi->direct_inode); 537 iput(sbi->direct_inode); 538 sbi->direct_inode = NULL; 539 out_kfree: 540 if (sbi->nls_tab) 541 unload_nls(sbi->nls_tab); 542 kfree(sbi); 543 return ret; 544 } 545 546 static void jfs_write_super_lockfs(struct super_block *sb) 547 { 548 struct jfs_sb_info *sbi = JFS_SBI(sb); 549 struct jfs_log *log = sbi->log; 550 551 if (!(sb->s_flags & MS_RDONLY)) { 552 txQuiesce(sb); 553 lmLogShutdown(log); 554 updateSuper(sb, FM_CLEAN); 555 } 556 } 557 558 static void jfs_unlockfs(struct super_block *sb) 559 { 560 struct jfs_sb_info *sbi = JFS_SBI(sb); 561 struct jfs_log *log = sbi->log; 562 int rc = 0; 563 564 if (!(sb->s_flags & MS_RDONLY)) { 565 updateSuper(sb, FM_MOUNT); 566 if ((rc = lmLogInit(log))) 567 jfs_err("jfs_unlock failed with return code %d", rc); 568 else 569 txResume(sb); 570 } 571 } 572 573 static int jfs_get_sb(struct file_system_type *fs_type, 574 int flags, const char *dev_name, void *data, struct vfsmount *mnt) 575 { 576 return get_sb_bdev(fs_type, flags, dev_name, data, jfs_fill_super, 577 mnt); 578 } 579 580 static int jfs_sync_fs(struct super_block *sb, int wait) 581 { 582 struct jfs_log *log = JFS_SBI(sb)->log; 583 584 /* log == NULL indicates read-only mount */ 585 if (log) { 586 jfs_flush_journal(log, wait); 587 jfs_syncpt(log, 0); 588 } 589 590 return 0; 591 } 592 593 static int jfs_show_options(struct seq_file *seq, struct vfsmount *vfs) 594 { 595 struct jfs_sb_info *sbi = JFS_SBI(vfs->mnt_sb); 596 597 if (sbi->uid != -1) 598 seq_printf(seq, ",uid=%d", sbi->uid); 599 if (sbi->gid != -1) 600 seq_printf(seq, ",gid=%d", sbi->gid); 601 if (sbi->umask != -1) 602 seq_printf(seq, ",umask=%03o", sbi->umask); 603 if (sbi->flag & JFS_NOINTEGRITY) 604 seq_puts(seq, ",nointegrity"); 605 if (sbi->nls_tab) 606 seq_printf(seq, ",iocharset=%s", sbi->nls_tab->charset); 607 if (sbi->flag & JFS_ERR_CONTINUE) 608 seq_printf(seq, ",errors=continue"); 609 if (sbi->flag & JFS_ERR_PANIC) 610 seq_printf(seq, ",errors=panic"); 611 612 #ifdef CONFIG_QUOTA 613 if (sbi->flag & JFS_USRQUOTA) 614 seq_puts(seq, ",usrquota"); 615 616 if (sbi->flag & JFS_GRPQUOTA) 617 seq_puts(seq, ",grpquota"); 618 #endif 619 620 return 0; 621 } 622 623 #ifdef CONFIG_QUOTA 624 625 /* Read data from quotafile - avoid pagecache and such because we cannot afford 626 * acquiring the locks... As quota files are never truncated and quota code 627 * itself serializes the operations (and noone else should touch the files) 628 * we don't have to be afraid of races */ 629 static ssize_t jfs_quota_read(struct super_block *sb, int type, char *data, 630 size_t len, loff_t off) 631 { 632 struct inode *inode = sb_dqopt(sb)->files[type]; 633 sector_t blk = off >> sb->s_blocksize_bits; 634 int err = 0; 635 int offset = off & (sb->s_blocksize - 1); 636 int tocopy; 637 size_t toread; 638 struct buffer_head tmp_bh; 639 struct buffer_head *bh; 640 loff_t i_size = i_size_read(inode); 641 642 if (off > i_size) 643 return 0; 644 if (off+len > i_size) 645 len = i_size-off; 646 toread = len; 647 while (toread > 0) { 648 tocopy = sb->s_blocksize - offset < toread ? 649 sb->s_blocksize - offset : toread; 650 651 tmp_bh.b_state = 0; 652 tmp_bh.b_size = 1 << inode->i_blkbits; 653 err = jfs_get_block(inode, blk, &tmp_bh, 0); 654 if (err) 655 return err; 656 if (!buffer_mapped(&tmp_bh)) /* A hole? */ 657 memset(data, 0, tocopy); 658 else { 659 bh = sb_bread(sb, tmp_bh.b_blocknr); 660 if (!bh) 661 return -EIO; 662 memcpy(data, bh->b_data+offset, tocopy); 663 brelse(bh); 664 } 665 offset = 0; 666 toread -= tocopy; 667 data += tocopy; 668 blk++; 669 } 670 return len; 671 } 672 673 /* Write to quotafile */ 674 static ssize_t jfs_quota_write(struct super_block *sb, int type, 675 const char *data, size_t len, loff_t off) 676 { 677 struct inode *inode = sb_dqopt(sb)->files[type]; 678 sector_t blk = off >> sb->s_blocksize_bits; 679 int err = 0; 680 int offset = off & (sb->s_blocksize - 1); 681 int tocopy; 682 size_t towrite = len; 683 struct buffer_head tmp_bh; 684 struct buffer_head *bh; 685 686 mutex_lock(&inode->i_mutex); 687 while (towrite > 0) { 688 tocopy = sb->s_blocksize - offset < towrite ? 689 sb->s_blocksize - offset : towrite; 690 691 tmp_bh.b_state = 0; 692 tmp_bh.b_size = 1 << inode->i_blkbits; 693 err = jfs_get_block(inode, blk, &tmp_bh, 1); 694 if (err) 695 goto out; 696 if (offset || tocopy != sb->s_blocksize) 697 bh = sb_bread(sb, tmp_bh.b_blocknr); 698 else 699 bh = sb_getblk(sb, tmp_bh.b_blocknr); 700 if (!bh) { 701 err = -EIO; 702 goto out; 703 } 704 lock_buffer(bh); 705 memcpy(bh->b_data+offset, data, tocopy); 706 flush_dcache_page(bh->b_page); 707 set_buffer_uptodate(bh); 708 mark_buffer_dirty(bh); 709 unlock_buffer(bh); 710 brelse(bh); 711 offset = 0; 712 towrite -= tocopy; 713 data += tocopy; 714 blk++; 715 } 716 out: 717 if (len == towrite) 718 return err; 719 if (inode->i_size < off+len-towrite) 720 i_size_write(inode, off+len-towrite); 721 inode->i_version++; 722 inode->i_mtime = inode->i_ctime = CURRENT_TIME; 723 mark_inode_dirty(inode); 724 mutex_unlock(&inode->i_mutex); 725 return len - towrite; 726 } 727 728 #endif 729 730 static const struct super_operations jfs_super_operations = { 731 .alloc_inode = jfs_alloc_inode, 732 .destroy_inode = jfs_destroy_inode, 733 .dirty_inode = jfs_dirty_inode, 734 .write_inode = jfs_write_inode, 735 .delete_inode = jfs_delete_inode, 736 .put_super = jfs_put_super, 737 .sync_fs = jfs_sync_fs, 738 .write_super_lockfs = jfs_write_super_lockfs, 739 .unlockfs = jfs_unlockfs, 740 .statfs = jfs_statfs, 741 .remount_fs = jfs_remount, 742 .show_options = jfs_show_options, 743 #ifdef CONFIG_QUOTA 744 .quota_read = jfs_quota_read, 745 .quota_write = jfs_quota_write, 746 #endif 747 }; 748 749 static const struct export_operations jfs_export_operations = { 750 .fh_to_dentry = jfs_fh_to_dentry, 751 .fh_to_parent = jfs_fh_to_parent, 752 .get_parent = jfs_get_parent, 753 }; 754 755 static struct file_system_type jfs_fs_type = { 756 .owner = THIS_MODULE, 757 .name = "jfs", 758 .get_sb = jfs_get_sb, 759 .kill_sb = kill_block_super, 760 .fs_flags = FS_REQUIRES_DEV, 761 }; 762 763 static void init_once(void *foo) 764 { 765 struct jfs_inode_info *jfs_ip = (struct jfs_inode_info *) foo; 766 767 memset(jfs_ip, 0, sizeof(struct jfs_inode_info)); 768 INIT_LIST_HEAD(&jfs_ip->anon_inode_list); 769 init_rwsem(&jfs_ip->rdwrlock); 770 mutex_init(&jfs_ip->commit_mutex); 771 init_rwsem(&jfs_ip->xattr_sem); 772 spin_lock_init(&jfs_ip->ag_lock); 773 jfs_ip->active_ag = -1; 774 #ifdef CONFIG_JFS_POSIX_ACL 775 jfs_ip->i_acl = JFS_ACL_NOT_CACHED; 776 jfs_ip->i_default_acl = JFS_ACL_NOT_CACHED; 777 #endif 778 inode_init_once(&jfs_ip->vfs_inode); 779 } 780 781 static int __init init_jfs_fs(void) 782 { 783 int i; 784 int rc; 785 786 jfs_inode_cachep = 787 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0, 788 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, 789 init_once); 790 if (jfs_inode_cachep == NULL) 791 return -ENOMEM; 792 793 /* 794 * Metapage initialization 795 */ 796 rc = metapage_init(); 797 if (rc) { 798 jfs_err("metapage_init failed w/rc = %d", rc); 799 goto free_slab; 800 } 801 802 /* 803 * Transaction Manager initialization 804 */ 805 rc = txInit(); 806 if (rc) { 807 jfs_err("txInit failed w/rc = %d", rc); 808 goto free_metapage; 809 } 810 811 /* 812 * I/O completion thread (endio) 813 */ 814 jfsIOthread = kthread_run(jfsIOWait, NULL, "jfsIO"); 815 if (IS_ERR(jfsIOthread)) { 816 rc = PTR_ERR(jfsIOthread); 817 jfs_err("init_jfs_fs: fork failed w/rc = %d", rc); 818 goto end_txmngr; 819 } 820 821 if (commit_threads < 1) 822 commit_threads = num_online_cpus(); 823 if (commit_threads > MAX_COMMIT_THREADS) 824 commit_threads = MAX_COMMIT_THREADS; 825 826 for (i = 0; i < commit_threads; i++) { 827 jfsCommitThread[i] = kthread_run(jfs_lazycommit, NULL, "jfsCommit"); 828 if (IS_ERR(jfsCommitThread[i])) { 829 rc = PTR_ERR(jfsCommitThread[i]); 830 jfs_err("init_jfs_fs: fork failed w/rc = %d", rc); 831 commit_threads = i; 832 goto kill_committask; 833 } 834 } 835 836 jfsSyncThread = kthread_run(jfs_sync, NULL, "jfsSync"); 837 if (IS_ERR(jfsSyncThread)) { 838 rc = PTR_ERR(jfsSyncThread); 839 jfs_err("init_jfs_fs: fork failed w/rc = %d", rc); 840 goto kill_committask; 841 } 842 843 #ifdef PROC_FS_JFS 844 jfs_proc_init(); 845 #endif 846 847 return register_filesystem(&jfs_fs_type); 848 849 kill_committask: 850 for (i = 0; i < commit_threads; i++) 851 kthread_stop(jfsCommitThread[i]); 852 kthread_stop(jfsIOthread); 853 end_txmngr: 854 txExit(); 855 free_metapage: 856 metapage_exit(); 857 free_slab: 858 kmem_cache_destroy(jfs_inode_cachep); 859 return rc; 860 } 861 862 static void __exit exit_jfs_fs(void) 863 { 864 int i; 865 866 jfs_info("exit_jfs_fs called"); 867 868 txExit(); 869 metapage_exit(); 870 871 kthread_stop(jfsIOthread); 872 for (i = 0; i < commit_threads; i++) 873 kthread_stop(jfsCommitThread[i]); 874 kthread_stop(jfsSyncThread); 875 #ifdef PROC_FS_JFS 876 jfs_proc_clean(); 877 #endif 878 unregister_filesystem(&jfs_fs_type); 879 kmem_cache_destroy(jfs_inode_cachep); 880 } 881 882 module_init(init_jfs_fs) 883 module_exit(exit_jfs_fs) 884