1 /* 2 * Copyright (C) International Business Machines Corp., 2000-2004 3 * Portions Copyright (C) Christoph Hellwig, 2001-2002 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2 of the License, or 8 * (at your option) any later version. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See 13 * the GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 */ 19 20 #include <linux/fs.h> 21 #include <linux/module.h> 22 #include <linux/parser.h> 23 #include <linux/completion.h> 24 #include <linux/vfs.h> 25 #include <linux/quotaops.h> 26 #include <linux/mount.h> 27 #include <linux/moduleparam.h> 28 #include <linux/kthread.h> 29 #include <linux/posix_acl.h> 30 #include <linux/buffer_head.h> 31 #include <linux/exportfs.h> 32 #include <linux/crc32.h> 33 #include <linux/slab.h> 34 #include <asm/uaccess.h> 35 #include <linux/seq_file.h> 36 37 #include "jfs_incore.h" 38 #include "jfs_filsys.h" 39 #include "jfs_inode.h" 40 #include "jfs_metapage.h" 41 #include "jfs_superblock.h" 42 #include "jfs_dmap.h" 43 #include "jfs_imap.h" 44 #include "jfs_acl.h" 45 #include "jfs_debug.h" 46 47 MODULE_DESCRIPTION("The Journaled Filesystem (JFS)"); 48 MODULE_AUTHOR("Steve Best/Dave Kleikamp/Barry Arndt, IBM"); 49 MODULE_LICENSE("GPL"); 50 51 static struct kmem_cache * jfs_inode_cachep; 52 53 static const struct super_operations jfs_super_operations; 54 static const struct export_operations jfs_export_operations; 55 static struct file_system_type jfs_fs_type; 56 57 #define MAX_COMMIT_THREADS 64 58 static int commit_threads = 0; 59 module_param(commit_threads, int, 0); 60 MODULE_PARM_DESC(commit_threads, "Number of commit threads"); 61 62 static struct task_struct *jfsCommitThread[MAX_COMMIT_THREADS]; 63 struct task_struct *jfsIOthread; 64 struct task_struct *jfsSyncThread; 65 66 #ifdef CONFIG_JFS_DEBUG 67 int jfsloglevel = JFS_LOGLEVEL_WARN; 68 module_param(jfsloglevel, int, 0644); 69 MODULE_PARM_DESC(jfsloglevel, "Specify JFS loglevel (0, 1 or 2)"); 70 #endif 71 72 static void jfs_handle_error(struct super_block *sb) 73 { 74 struct jfs_sb_info *sbi = JFS_SBI(sb); 75 76 if (sb->s_flags & MS_RDONLY) 77 return; 78 79 updateSuper(sb, FM_DIRTY); 80 81 if (sbi->flag & JFS_ERR_PANIC) 82 panic("JFS (device %s): panic forced after error\n", 83 sb->s_id); 84 else if (sbi->flag & JFS_ERR_REMOUNT_RO) { 85 jfs_err("ERROR: (device %s): remounting filesystem " 86 "as read-only\n", 87 sb->s_id); 88 sb->s_flags |= MS_RDONLY; 89 } 90 91 /* nothing is done for continue beyond marking the superblock dirty */ 92 } 93 94 void jfs_error(struct super_block *sb, const char * function, ...) 95 { 96 static char error_buf[256]; 97 va_list args; 98 99 va_start(args, function); 100 vsnprintf(error_buf, sizeof(error_buf), function, args); 101 va_end(args); 102 103 printk(KERN_ERR "ERROR: (device %s): %s\n", sb->s_id, error_buf); 104 105 jfs_handle_error(sb); 106 } 107 108 static struct inode *jfs_alloc_inode(struct super_block *sb) 109 { 110 struct jfs_inode_info *jfs_inode; 111 112 jfs_inode = kmem_cache_alloc(jfs_inode_cachep, GFP_NOFS); 113 if (!jfs_inode) 114 return NULL; 115 return &jfs_inode->vfs_inode; 116 } 117 118 static void jfs_i_callback(struct rcu_head *head) 119 { 120 struct inode *inode = container_of(head, struct inode, i_rcu); 121 struct jfs_inode_info *ji = JFS_IP(inode); 122 INIT_LIST_HEAD(&inode->i_dentry); 123 kmem_cache_free(jfs_inode_cachep, ji); 124 } 125 126 static void jfs_destroy_inode(struct inode *inode) 127 { 128 struct jfs_inode_info *ji = JFS_IP(inode); 129 130 BUG_ON(!list_empty(&ji->anon_inode_list)); 131 132 spin_lock_irq(&ji->ag_lock); 133 if (ji->active_ag != -1) { 134 struct bmap *bmap = JFS_SBI(inode->i_sb)->bmap; 135 atomic_dec(&bmap->db_active[ji->active_ag]); 136 ji->active_ag = -1; 137 } 138 spin_unlock_irq(&ji->ag_lock); 139 call_rcu(&inode->i_rcu, jfs_i_callback); 140 } 141 142 static int jfs_statfs(struct dentry *dentry, struct kstatfs *buf) 143 { 144 struct jfs_sb_info *sbi = JFS_SBI(dentry->d_sb); 145 s64 maxinodes; 146 struct inomap *imap = JFS_IP(sbi->ipimap)->i_imap; 147 148 jfs_info("In jfs_statfs"); 149 buf->f_type = JFS_SUPER_MAGIC; 150 buf->f_bsize = sbi->bsize; 151 buf->f_blocks = sbi->bmap->db_mapsize; 152 buf->f_bfree = sbi->bmap->db_nfree; 153 buf->f_bavail = sbi->bmap->db_nfree; 154 /* 155 * If we really return the number of allocated & free inodes, some 156 * applications will fail because they won't see enough free inodes. 157 * We'll try to calculate some guess as to how may inodes we can 158 * really allocate 159 * 160 * buf->f_files = atomic_read(&imap->im_numinos); 161 * buf->f_ffree = atomic_read(&imap->im_numfree); 162 */ 163 maxinodes = min((s64) atomic_read(&imap->im_numinos) + 164 ((sbi->bmap->db_nfree >> imap->im_l2nbperiext) 165 << L2INOSPEREXT), (s64) 0xffffffffLL); 166 buf->f_files = maxinodes; 167 buf->f_ffree = maxinodes - (atomic_read(&imap->im_numinos) - 168 atomic_read(&imap->im_numfree)); 169 buf->f_fsid.val[0] = (u32)crc32_le(0, sbi->uuid, sizeof(sbi->uuid)/2); 170 buf->f_fsid.val[1] = (u32)crc32_le(0, sbi->uuid + sizeof(sbi->uuid)/2, 171 sizeof(sbi->uuid)/2); 172 173 buf->f_namelen = JFS_NAME_MAX; 174 return 0; 175 } 176 177 static void jfs_put_super(struct super_block *sb) 178 { 179 struct jfs_sb_info *sbi = JFS_SBI(sb); 180 int rc; 181 182 jfs_info("In jfs_put_super"); 183 184 dquot_disable(sb, -1, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED); 185 186 rc = jfs_umount(sb); 187 if (rc) 188 jfs_err("jfs_umount failed with return code %d", rc); 189 190 unload_nls(sbi->nls_tab); 191 192 truncate_inode_pages(sbi->direct_inode->i_mapping, 0); 193 iput(sbi->direct_inode); 194 195 kfree(sbi); 196 } 197 198 enum { 199 Opt_integrity, Opt_nointegrity, Opt_iocharset, Opt_resize, 200 Opt_resize_nosize, Opt_errors, Opt_ignore, Opt_err, Opt_quota, 201 Opt_usrquota, Opt_grpquota, Opt_uid, Opt_gid, Opt_umask 202 }; 203 204 static const match_table_t tokens = { 205 {Opt_integrity, "integrity"}, 206 {Opt_nointegrity, "nointegrity"}, 207 {Opt_iocharset, "iocharset=%s"}, 208 {Opt_resize, "resize=%u"}, 209 {Opt_resize_nosize, "resize"}, 210 {Opt_errors, "errors=%s"}, 211 {Opt_ignore, "noquota"}, 212 {Opt_ignore, "quota"}, 213 {Opt_usrquota, "usrquota"}, 214 {Opt_grpquota, "grpquota"}, 215 {Opt_uid, "uid=%u"}, 216 {Opt_gid, "gid=%u"}, 217 {Opt_umask, "umask=%u"}, 218 {Opt_err, NULL} 219 }; 220 221 static int parse_options(char *options, struct super_block *sb, s64 *newLVSize, 222 int *flag) 223 { 224 void *nls_map = (void *)-1; /* -1: no change; NULL: none */ 225 char *p; 226 struct jfs_sb_info *sbi = JFS_SBI(sb); 227 228 *newLVSize = 0; 229 230 if (!options) 231 return 1; 232 233 while ((p = strsep(&options, ",")) != NULL) { 234 substring_t args[MAX_OPT_ARGS]; 235 int token; 236 if (!*p) 237 continue; 238 239 token = match_token(p, tokens, args); 240 switch (token) { 241 case Opt_integrity: 242 *flag &= ~JFS_NOINTEGRITY; 243 break; 244 case Opt_nointegrity: 245 *flag |= JFS_NOINTEGRITY; 246 break; 247 case Opt_ignore: 248 /* Silently ignore the quota options */ 249 /* Don't do anything ;-) */ 250 break; 251 case Opt_iocharset: 252 if (nls_map && nls_map != (void *) -1) 253 unload_nls(nls_map); 254 if (!strcmp(args[0].from, "none")) 255 nls_map = NULL; 256 else { 257 nls_map = load_nls(args[0].from); 258 if (!nls_map) { 259 printk(KERN_ERR 260 "JFS: charset not found\n"); 261 goto cleanup; 262 } 263 } 264 break; 265 case Opt_resize: 266 { 267 char *resize = args[0].from; 268 *newLVSize = simple_strtoull(resize, &resize, 0); 269 break; 270 } 271 case Opt_resize_nosize: 272 { 273 *newLVSize = sb->s_bdev->bd_inode->i_size >> 274 sb->s_blocksize_bits; 275 if (*newLVSize == 0) 276 printk(KERN_ERR 277 "JFS: Cannot determine volume size\n"); 278 break; 279 } 280 case Opt_errors: 281 { 282 char *errors = args[0].from; 283 if (!errors || !*errors) 284 goto cleanup; 285 if (!strcmp(errors, "continue")) { 286 *flag &= ~JFS_ERR_REMOUNT_RO; 287 *flag &= ~JFS_ERR_PANIC; 288 *flag |= JFS_ERR_CONTINUE; 289 } else if (!strcmp(errors, "remount-ro")) { 290 *flag &= ~JFS_ERR_CONTINUE; 291 *flag &= ~JFS_ERR_PANIC; 292 *flag |= JFS_ERR_REMOUNT_RO; 293 } else if (!strcmp(errors, "panic")) { 294 *flag &= ~JFS_ERR_CONTINUE; 295 *flag &= ~JFS_ERR_REMOUNT_RO; 296 *flag |= JFS_ERR_PANIC; 297 } else { 298 printk(KERN_ERR 299 "JFS: %s is an invalid error handler\n", 300 errors); 301 goto cleanup; 302 } 303 break; 304 } 305 306 #ifdef CONFIG_QUOTA 307 case Opt_quota: 308 case Opt_usrquota: 309 *flag |= JFS_USRQUOTA; 310 break; 311 case Opt_grpquota: 312 *flag |= JFS_GRPQUOTA; 313 break; 314 #else 315 case Opt_usrquota: 316 case Opt_grpquota: 317 case Opt_quota: 318 printk(KERN_ERR 319 "JFS: quota operations not supported\n"); 320 break; 321 #endif 322 case Opt_uid: 323 { 324 char *uid = args[0].from; 325 sbi->uid = simple_strtoul(uid, &uid, 0); 326 break; 327 } 328 case Opt_gid: 329 { 330 char *gid = args[0].from; 331 sbi->gid = simple_strtoul(gid, &gid, 0); 332 break; 333 } 334 case Opt_umask: 335 { 336 char *umask = args[0].from; 337 sbi->umask = simple_strtoul(umask, &umask, 8); 338 if (sbi->umask & ~0777) { 339 printk(KERN_ERR 340 "JFS: Invalid value of umask\n"); 341 goto cleanup; 342 } 343 break; 344 } 345 default: 346 printk("jfs: Unrecognized mount option \"%s\" " 347 " or missing value\n", p); 348 goto cleanup; 349 } 350 } 351 352 if (nls_map != (void *) -1) { 353 /* Discard old (if remount) */ 354 unload_nls(sbi->nls_tab); 355 sbi->nls_tab = nls_map; 356 } 357 return 1; 358 359 cleanup: 360 if (nls_map && nls_map != (void *) -1) 361 unload_nls(nls_map); 362 return 0; 363 } 364 365 static int jfs_remount(struct super_block *sb, int *flags, char *data) 366 { 367 s64 newLVSize = 0; 368 int rc = 0; 369 int flag = JFS_SBI(sb)->flag; 370 int ret; 371 372 if (!parse_options(data, sb, &newLVSize, &flag)) { 373 return -EINVAL; 374 } 375 376 if (newLVSize) { 377 if (sb->s_flags & MS_RDONLY) { 378 printk(KERN_ERR 379 "JFS: resize requires volume to be mounted read-write\n"); 380 return -EROFS; 381 } 382 rc = jfs_extendfs(sb, newLVSize, 0); 383 if (rc) 384 return rc; 385 } 386 387 if ((sb->s_flags & MS_RDONLY) && !(*flags & MS_RDONLY)) { 388 /* 389 * Invalidate any previously read metadata. fsck may have 390 * changed the on-disk data since we mounted r/o 391 */ 392 truncate_inode_pages(JFS_SBI(sb)->direct_inode->i_mapping, 0); 393 394 JFS_SBI(sb)->flag = flag; 395 ret = jfs_mount_rw(sb, 1); 396 397 /* mark the fs r/w for quota activity */ 398 sb->s_flags &= ~MS_RDONLY; 399 400 dquot_resume(sb, -1); 401 return ret; 402 } 403 if ((!(sb->s_flags & MS_RDONLY)) && (*flags & MS_RDONLY)) { 404 rc = dquot_suspend(sb, -1); 405 if (rc < 0) { 406 return rc; 407 } 408 rc = jfs_umount_rw(sb); 409 JFS_SBI(sb)->flag = flag; 410 return rc; 411 } 412 if ((JFS_SBI(sb)->flag & JFS_NOINTEGRITY) != (flag & JFS_NOINTEGRITY)) 413 if (!(sb->s_flags & MS_RDONLY)) { 414 rc = jfs_umount_rw(sb); 415 if (rc) 416 return rc; 417 418 JFS_SBI(sb)->flag = flag; 419 ret = jfs_mount_rw(sb, 1); 420 return ret; 421 } 422 JFS_SBI(sb)->flag = flag; 423 424 return 0; 425 } 426 427 static int jfs_fill_super(struct super_block *sb, void *data, int silent) 428 { 429 struct jfs_sb_info *sbi; 430 struct inode *inode; 431 int rc; 432 s64 newLVSize = 0; 433 int flag, ret = -EINVAL; 434 435 jfs_info("In jfs_read_super: s_flags=0x%lx", sb->s_flags); 436 437 if (!new_valid_dev(sb->s_bdev->bd_dev)) 438 return -EOVERFLOW; 439 440 sbi = kzalloc(sizeof (struct jfs_sb_info), GFP_KERNEL); 441 if (!sbi) 442 return -ENOMEM; 443 444 sb->s_fs_info = sbi; 445 sbi->sb = sb; 446 sbi->uid = sbi->gid = sbi->umask = -1; 447 448 /* initialize the mount flag and determine the default error handler */ 449 flag = JFS_ERR_REMOUNT_RO; 450 451 if (!parse_options((char *) data, sb, &newLVSize, &flag)) 452 goto out_kfree; 453 sbi->flag = flag; 454 455 #ifdef CONFIG_JFS_POSIX_ACL 456 sb->s_flags |= MS_POSIXACL; 457 #endif 458 459 if (newLVSize) { 460 printk(KERN_ERR "resize option for remount only\n"); 461 goto out_kfree; 462 } 463 464 /* 465 * Initialize blocksize to 4K. 466 */ 467 sb_set_blocksize(sb, PSIZE); 468 469 /* 470 * Set method vectors. 471 */ 472 sb->s_op = &jfs_super_operations; 473 sb->s_export_op = &jfs_export_operations; 474 #ifdef CONFIG_QUOTA 475 sb->dq_op = &dquot_operations; 476 sb->s_qcop = &dquot_quotactl_ops; 477 #endif 478 479 /* 480 * Initialize direct-mapping inode/address-space 481 */ 482 inode = new_inode(sb); 483 if (inode == NULL) { 484 ret = -ENOMEM; 485 goto out_unload; 486 } 487 inode->i_ino = 0; 488 inode->i_nlink = 1; 489 inode->i_size = sb->s_bdev->bd_inode->i_size; 490 inode->i_mapping->a_ops = &jfs_metapage_aops; 491 insert_inode_hash(inode); 492 mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS); 493 494 sbi->direct_inode = inode; 495 496 rc = jfs_mount(sb); 497 if (rc) { 498 if (!silent) { 499 jfs_err("jfs_mount failed w/return code = %d", rc); 500 } 501 goto out_mount_failed; 502 } 503 if (sb->s_flags & MS_RDONLY) 504 sbi->log = NULL; 505 else { 506 rc = jfs_mount_rw(sb, 0); 507 if (rc) { 508 if (!silent) { 509 jfs_err("jfs_mount_rw failed, return code = %d", 510 rc); 511 } 512 goto out_no_rw; 513 } 514 } 515 516 sb->s_magic = JFS_SUPER_MAGIC; 517 518 if (sbi->mntflag & JFS_OS2) 519 sb->s_d_op = &jfs_ci_dentry_operations; 520 521 inode = jfs_iget(sb, ROOT_I); 522 if (IS_ERR(inode)) { 523 ret = PTR_ERR(inode); 524 goto out_no_rw; 525 } 526 sb->s_root = d_alloc_root(inode); 527 if (!sb->s_root) 528 goto out_no_root; 529 530 /* logical blocks are represented by 40 bits in pxd_t, etc. */ 531 sb->s_maxbytes = ((u64) sb->s_blocksize) << 40; 532 #if BITS_PER_LONG == 32 533 /* 534 * Page cache is indexed by long. 535 * I would use MAX_LFS_FILESIZE, but it's only half as big 536 */ 537 sb->s_maxbytes = min(((u64) PAGE_CACHE_SIZE << 32) - 1, (u64)sb->s_maxbytes); 538 #endif 539 sb->s_time_gran = 1; 540 return 0; 541 542 out_no_root: 543 jfs_err("jfs_read_super: get root dentry failed"); 544 iput(inode); 545 546 out_no_rw: 547 rc = jfs_umount(sb); 548 if (rc) { 549 jfs_err("jfs_umount failed with return code %d", rc); 550 } 551 out_mount_failed: 552 filemap_write_and_wait(sbi->direct_inode->i_mapping); 553 truncate_inode_pages(sbi->direct_inode->i_mapping, 0); 554 make_bad_inode(sbi->direct_inode); 555 iput(sbi->direct_inode); 556 sbi->direct_inode = NULL; 557 out_unload: 558 if (sbi->nls_tab) 559 unload_nls(sbi->nls_tab); 560 out_kfree: 561 kfree(sbi); 562 return ret; 563 } 564 565 static int jfs_freeze(struct super_block *sb) 566 { 567 struct jfs_sb_info *sbi = JFS_SBI(sb); 568 struct jfs_log *log = sbi->log; 569 570 if (!(sb->s_flags & MS_RDONLY)) { 571 txQuiesce(sb); 572 lmLogShutdown(log); 573 updateSuper(sb, FM_CLEAN); 574 } 575 return 0; 576 } 577 578 static int jfs_unfreeze(struct super_block *sb) 579 { 580 struct jfs_sb_info *sbi = JFS_SBI(sb); 581 struct jfs_log *log = sbi->log; 582 int rc = 0; 583 584 if (!(sb->s_flags & MS_RDONLY)) { 585 updateSuper(sb, FM_MOUNT); 586 if ((rc = lmLogInit(log))) 587 jfs_err("jfs_unlock failed with return code %d", rc); 588 else 589 txResume(sb); 590 } 591 return 0; 592 } 593 594 static struct dentry *jfs_do_mount(struct file_system_type *fs_type, 595 int flags, const char *dev_name, void *data) 596 { 597 return mount_bdev(fs_type, flags, dev_name, data, jfs_fill_super); 598 } 599 600 static int jfs_sync_fs(struct super_block *sb, int wait) 601 { 602 struct jfs_log *log = JFS_SBI(sb)->log; 603 604 /* log == NULL indicates read-only mount */ 605 if (log) { 606 jfs_flush_journal(log, wait); 607 jfs_syncpt(log, 0); 608 } 609 610 return 0; 611 } 612 613 static int jfs_show_options(struct seq_file *seq, struct vfsmount *vfs) 614 { 615 struct jfs_sb_info *sbi = JFS_SBI(vfs->mnt_sb); 616 617 if (sbi->uid != -1) 618 seq_printf(seq, ",uid=%d", sbi->uid); 619 if (sbi->gid != -1) 620 seq_printf(seq, ",gid=%d", sbi->gid); 621 if (sbi->umask != -1) 622 seq_printf(seq, ",umask=%03o", sbi->umask); 623 if (sbi->flag & JFS_NOINTEGRITY) 624 seq_puts(seq, ",nointegrity"); 625 if (sbi->nls_tab) 626 seq_printf(seq, ",iocharset=%s", sbi->nls_tab->charset); 627 if (sbi->flag & JFS_ERR_CONTINUE) 628 seq_printf(seq, ",errors=continue"); 629 if (sbi->flag & JFS_ERR_PANIC) 630 seq_printf(seq, ",errors=panic"); 631 632 #ifdef CONFIG_QUOTA 633 if (sbi->flag & JFS_USRQUOTA) 634 seq_puts(seq, ",usrquota"); 635 636 if (sbi->flag & JFS_GRPQUOTA) 637 seq_puts(seq, ",grpquota"); 638 #endif 639 640 return 0; 641 } 642 643 #ifdef CONFIG_QUOTA 644 645 /* Read data from quotafile - avoid pagecache and such because we cannot afford 646 * acquiring the locks... As quota files are never truncated and quota code 647 * itself serializes the operations (and no one else should touch the files) 648 * we don't have to be afraid of races */ 649 static ssize_t jfs_quota_read(struct super_block *sb, int type, char *data, 650 size_t len, loff_t off) 651 { 652 struct inode *inode = sb_dqopt(sb)->files[type]; 653 sector_t blk = off >> sb->s_blocksize_bits; 654 int err = 0; 655 int offset = off & (sb->s_blocksize - 1); 656 int tocopy; 657 size_t toread; 658 struct buffer_head tmp_bh; 659 struct buffer_head *bh; 660 loff_t i_size = i_size_read(inode); 661 662 if (off > i_size) 663 return 0; 664 if (off+len > i_size) 665 len = i_size-off; 666 toread = len; 667 while (toread > 0) { 668 tocopy = sb->s_blocksize - offset < toread ? 669 sb->s_blocksize - offset : toread; 670 671 tmp_bh.b_state = 0; 672 tmp_bh.b_size = 1 << inode->i_blkbits; 673 err = jfs_get_block(inode, blk, &tmp_bh, 0); 674 if (err) 675 return err; 676 if (!buffer_mapped(&tmp_bh)) /* A hole? */ 677 memset(data, 0, tocopy); 678 else { 679 bh = sb_bread(sb, tmp_bh.b_blocknr); 680 if (!bh) 681 return -EIO; 682 memcpy(data, bh->b_data+offset, tocopy); 683 brelse(bh); 684 } 685 offset = 0; 686 toread -= tocopy; 687 data += tocopy; 688 blk++; 689 } 690 return len; 691 } 692 693 /* Write to quotafile */ 694 static ssize_t jfs_quota_write(struct super_block *sb, int type, 695 const char *data, size_t len, loff_t off) 696 { 697 struct inode *inode = sb_dqopt(sb)->files[type]; 698 sector_t blk = off >> sb->s_blocksize_bits; 699 int err = 0; 700 int offset = off & (sb->s_blocksize - 1); 701 int tocopy; 702 size_t towrite = len; 703 struct buffer_head tmp_bh; 704 struct buffer_head *bh; 705 706 mutex_lock(&inode->i_mutex); 707 while (towrite > 0) { 708 tocopy = sb->s_blocksize - offset < towrite ? 709 sb->s_blocksize - offset : towrite; 710 711 tmp_bh.b_state = 0; 712 tmp_bh.b_size = 1 << inode->i_blkbits; 713 err = jfs_get_block(inode, blk, &tmp_bh, 1); 714 if (err) 715 goto out; 716 if (offset || tocopy != sb->s_blocksize) 717 bh = sb_bread(sb, tmp_bh.b_blocknr); 718 else 719 bh = sb_getblk(sb, tmp_bh.b_blocknr); 720 if (!bh) { 721 err = -EIO; 722 goto out; 723 } 724 lock_buffer(bh); 725 memcpy(bh->b_data+offset, data, tocopy); 726 flush_dcache_page(bh->b_page); 727 set_buffer_uptodate(bh); 728 mark_buffer_dirty(bh); 729 unlock_buffer(bh); 730 brelse(bh); 731 offset = 0; 732 towrite -= tocopy; 733 data += tocopy; 734 blk++; 735 } 736 out: 737 if (len == towrite) { 738 mutex_unlock(&inode->i_mutex); 739 return err; 740 } 741 if (inode->i_size < off+len-towrite) 742 i_size_write(inode, off+len-towrite); 743 inode->i_version++; 744 inode->i_mtime = inode->i_ctime = CURRENT_TIME; 745 mark_inode_dirty(inode); 746 mutex_unlock(&inode->i_mutex); 747 return len - towrite; 748 } 749 750 #endif 751 752 static const struct super_operations jfs_super_operations = { 753 .alloc_inode = jfs_alloc_inode, 754 .destroy_inode = jfs_destroy_inode, 755 .dirty_inode = jfs_dirty_inode, 756 .write_inode = jfs_write_inode, 757 .evict_inode = jfs_evict_inode, 758 .put_super = jfs_put_super, 759 .sync_fs = jfs_sync_fs, 760 .freeze_fs = jfs_freeze, 761 .unfreeze_fs = jfs_unfreeze, 762 .statfs = jfs_statfs, 763 .remount_fs = jfs_remount, 764 .show_options = jfs_show_options, 765 #ifdef CONFIG_QUOTA 766 .quota_read = jfs_quota_read, 767 .quota_write = jfs_quota_write, 768 #endif 769 }; 770 771 static const struct export_operations jfs_export_operations = { 772 .fh_to_dentry = jfs_fh_to_dentry, 773 .fh_to_parent = jfs_fh_to_parent, 774 .get_parent = jfs_get_parent, 775 }; 776 777 static struct file_system_type jfs_fs_type = { 778 .owner = THIS_MODULE, 779 .name = "jfs", 780 .mount = jfs_do_mount, 781 .kill_sb = kill_block_super, 782 .fs_flags = FS_REQUIRES_DEV, 783 }; 784 785 static void init_once(void *foo) 786 { 787 struct jfs_inode_info *jfs_ip = (struct jfs_inode_info *) foo; 788 789 memset(jfs_ip, 0, sizeof(struct jfs_inode_info)); 790 INIT_LIST_HEAD(&jfs_ip->anon_inode_list); 791 init_rwsem(&jfs_ip->rdwrlock); 792 mutex_init(&jfs_ip->commit_mutex); 793 init_rwsem(&jfs_ip->xattr_sem); 794 spin_lock_init(&jfs_ip->ag_lock); 795 jfs_ip->active_ag = -1; 796 inode_init_once(&jfs_ip->vfs_inode); 797 } 798 799 static int __init init_jfs_fs(void) 800 { 801 int i; 802 int rc; 803 804 jfs_inode_cachep = 805 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0, 806 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, 807 init_once); 808 if (jfs_inode_cachep == NULL) 809 return -ENOMEM; 810 811 /* 812 * Metapage initialization 813 */ 814 rc = metapage_init(); 815 if (rc) { 816 jfs_err("metapage_init failed w/rc = %d", rc); 817 goto free_slab; 818 } 819 820 /* 821 * Transaction Manager initialization 822 */ 823 rc = txInit(); 824 if (rc) { 825 jfs_err("txInit failed w/rc = %d", rc); 826 goto free_metapage; 827 } 828 829 /* 830 * I/O completion thread (endio) 831 */ 832 jfsIOthread = kthread_run(jfsIOWait, NULL, "jfsIO"); 833 if (IS_ERR(jfsIOthread)) { 834 rc = PTR_ERR(jfsIOthread); 835 jfs_err("init_jfs_fs: fork failed w/rc = %d", rc); 836 goto end_txmngr; 837 } 838 839 if (commit_threads < 1) 840 commit_threads = num_online_cpus(); 841 if (commit_threads > MAX_COMMIT_THREADS) 842 commit_threads = MAX_COMMIT_THREADS; 843 844 for (i = 0; i < commit_threads; i++) { 845 jfsCommitThread[i] = kthread_run(jfs_lazycommit, NULL, "jfsCommit"); 846 if (IS_ERR(jfsCommitThread[i])) { 847 rc = PTR_ERR(jfsCommitThread[i]); 848 jfs_err("init_jfs_fs: fork failed w/rc = %d", rc); 849 commit_threads = i; 850 goto kill_committask; 851 } 852 } 853 854 jfsSyncThread = kthread_run(jfs_sync, NULL, "jfsSync"); 855 if (IS_ERR(jfsSyncThread)) { 856 rc = PTR_ERR(jfsSyncThread); 857 jfs_err("init_jfs_fs: fork failed w/rc = %d", rc); 858 goto kill_committask; 859 } 860 861 #ifdef PROC_FS_JFS 862 jfs_proc_init(); 863 #endif 864 865 return register_filesystem(&jfs_fs_type); 866 867 kill_committask: 868 for (i = 0; i < commit_threads; i++) 869 kthread_stop(jfsCommitThread[i]); 870 kthread_stop(jfsIOthread); 871 end_txmngr: 872 txExit(); 873 free_metapage: 874 metapage_exit(); 875 free_slab: 876 kmem_cache_destroy(jfs_inode_cachep); 877 return rc; 878 } 879 880 static void __exit exit_jfs_fs(void) 881 { 882 int i; 883 884 jfs_info("exit_jfs_fs called"); 885 886 txExit(); 887 metapage_exit(); 888 889 kthread_stop(jfsIOthread); 890 for (i = 0; i < commit_threads; i++) 891 kthread_stop(jfsCommitThread[i]); 892 kthread_stop(jfsSyncThread); 893 #ifdef PROC_FS_JFS 894 jfs_proc_clean(); 895 #endif 896 unregister_filesystem(&jfs_fs_type); 897 kmem_cache_destroy(jfs_inode_cachep); 898 } 899 900 module_init(init_jfs_fs) 901 module_exit(exit_jfs_fs) 902