1 /* 2 * linux/fs/minix/inode.c 3 * 4 * Copyright (C) 1991, 1992 Linus Torvalds 5 * 6 * Copyright (C) 1996 Gertjan van Wingerde 7 * Minix V2 fs support. 8 * 9 * Modified for 680x0 by Andreas Schwab 10 * Updated to filesystem version 3 by Daniel Aragones 11 */ 12 13 #include <linux/module.h> 14 #include "minix.h" 15 #include <linux/buffer_head.h> 16 #include <linux/slab.h> 17 #include <linux/init.h> 18 #include <linux/highuid.h> 19 #include <linux/vfs.h> 20 #include <linux/writeback.h> 21 22 static int minix_write_inode(struct inode *inode, 23 struct writeback_control *wbc); 24 static int minix_statfs(struct dentry *dentry, struct kstatfs *buf); 25 static int minix_remount (struct super_block * sb, int * flags, char * data); 26 27 static void minix_evict_inode(struct inode *inode) 28 { 29 truncate_inode_pages_final(&inode->i_data); 30 if (!inode->i_nlink) { 31 inode->i_size = 0; 32 minix_truncate(inode); 33 } 34 invalidate_inode_buffers(inode); 35 clear_inode(inode); 36 if (!inode->i_nlink) 37 minix_free_inode(inode); 38 } 39 40 static void minix_put_super(struct super_block *sb) 41 { 42 int i; 43 struct minix_sb_info *sbi = minix_sb(sb); 44 45 if (!sb_rdonly(sb)) { 46 if (sbi->s_version != MINIX_V3) /* s_state is now out from V3 sb */ 47 sbi->s_ms->s_state = sbi->s_mount_state; 48 mark_buffer_dirty(sbi->s_sbh); 49 } 50 for (i = 0; i < sbi->s_imap_blocks; i++) 51 brelse(sbi->s_imap[i]); 52 for (i = 0; i < sbi->s_zmap_blocks; i++) 53 brelse(sbi->s_zmap[i]); 54 brelse (sbi->s_sbh); 55 kfree(sbi->s_imap); 56 sb->s_fs_info = NULL; 57 kfree(sbi); 58 } 59 60 static struct kmem_cache * minix_inode_cachep; 61 62 static struct inode *minix_alloc_inode(struct super_block *sb) 63 { 64 struct minix_inode_info *ei; 65 ei = kmem_cache_alloc(minix_inode_cachep, GFP_KERNEL); 66 if (!ei) 67 return NULL; 68 return &ei->vfs_inode; 69 } 70 71 static void minix_free_in_core_inode(struct inode *inode) 72 { 73 kmem_cache_free(minix_inode_cachep, minix_i(inode)); 74 } 75 76 static void init_once(void *foo) 77 { 78 struct minix_inode_info *ei = (struct minix_inode_info *) foo; 79 80 inode_init_once(&ei->vfs_inode); 81 } 82 83 static int __init init_inodecache(void) 84 { 85 minix_inode_cachep = kmem_cache_create("minix_inode_cache", 86 sizeof(struct minix_inode_info), 87 0, (SLAB_RECLAIM_ACCOUNT| 88 SLAB_MEM_SPREAD|SLAB_ACCOUNT), 89 init_once); 90 if (minix_inode_cachep == NULL) 91 return -ENOMEM; 92 return 0; 93 } 94 95 static void destroy_inodecache(void) 96 { 97 /* 98 * Make sure all delayed rcu free inodes are flushed before we 99 * destroy cache. 100 */ 101 rcu_barrier(); 102 kmem_cache_destroy(minix_inode_cachep); 103 } 104 105 static const struct super_operations minix_sops = { 106 .alloc_inode = minix_alloc_inode, 107 .free_inode = minix_free_in_core_inode, 108 .write_inode = minix_write_inode, 109 .evict_inode = minix_evict_inode, 110 .put_super = minix_put_super, 111 .statfs = minix_statfs, 112 .remount_fs = minix_remount, 113 }; 114 115 static int minix_remount (struct super_block * sb, int * flags, char * data) 116 { 117 struct minix_sb_info * sbi = minix_sb(sb); 118 struct minix_super_block * ms; 119 120 sync_filesystem(sb); 121 ms = sbi->s_ms; 122 if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb)) 123 return 0; 124 if (*flags & SB_RDONLY) { 125 if (ms->s_state & MINIX_VALID_FS || 126 !(sbi->s_mount_state & MINIX_VALID_FS)) 127 return 0; 128 /* Mounting a rw partition read-only. */ 129 if (sbi->s_version != MINIX_V3) 130 ms->s_state = sbi->s_mount_state; 131 mark_buffer_dirty(sbi->s_sbh); 132 } else { 133 /* Mount a partition which is read-only, read-write. */ 134 if (sbi->s_version != MINIX_V3) { 135 sbi->s_mount_state = ms->s_state; 136 ms->s_state &= ~MINIX_VALID_FS; 137 } else { 138 sbi->s_mount_state = MINIX_VALID_FS; 139 } 140 mark_buffer_dirty(sbi->s_sbh); 141 142 if (!(sbi->s_mount_state & MINIX_VALID_FS)) 143 printk("MINIX-fs warning: remounting unchecked fs, " 144 "running fsck is recommended\n"); 145 else if ((sbi->s_mount_state & MINIX_ERROR_FS)) 146 printk("MINIX-fs warning: remounting fs with errors, " 147 "running fsck is recommended\n"); 148 } 149 return 0; 150 } 151 152 static int minix_fill_super(struct super_block *s, void *data, int silent) 153 { 154 struct buffer_head *bh; 155 struct buffer_head **map; 156 struct minix_super_block *ms; 157 struct minix3_super_block *m3s = NULL; 158 unsigned long i, block; 159 struct inode *root_inode; 160 struct minix_sb_info *sbi; 161 int ret = -EINVAL; 162 163 sbi = kzalloc(sizeof(struct minix_sb_info), GFP_KERNEL); 164 if (!sbi) 165 return -ENOMEM; 166 s->s_fs_info = sbi; 167 168 BUILD_BUG_ON(32 != sizeof (struct minix_inode)); 169 BUILD_BUG_ON(64 != sizeof(struct minix2_inode)); 170 171 if (!sb_set_blocksize(s, BLOCK_SIZE)) 172 goto out_bad_hblock; 173 174 if (!(bh = sb_bread(s, 1))) 175 goto out_bad_sb; 176 177 ms = (struct minix_super_block *) bh->b_data; 178 sbi->s_ms = ms; 179 sbi->s_sbh = bh; 180 sbi->s_mount_state = ms->s_state; 181 sbi->s_ninodes = ms->s_ninodes; 182 sbi->s_nzones = ms->s_nzones; 183 sbi->s_imap_blocks = ms->s_imap_blocks; 184 sbi->s_zmap_blocks = ms->s_zmap_blocks; 185 sbi->s_firstdatazone = ms->s_firstdatazone; 186 sbi->s_log_zone_size = ms->s_log_zone_size; 187 sbi->s_max_size = ms->s_max_size; 188 s->s_magic = ms->s_magic; 189 if (s->s_magic == MINIX_SUPER_MAGIC) { 190 sbi->s_version = MINIX_V1; 191 sbi->s_dirsize = 16; 192 sbi->s_namelen = 14; 193 s->s_max_links = MINIX_LINK_MAX; 194 } else if (s->s_magic == MINIX_SUPER_MAGIC2) { 195 sbi->s_version = MINIX_V1; 196 sbi->s_dirsize = 32; 197 sbi->s_namelen = 30; 198 s->s_max_links = MINIX_LINK_MAX; 199 } else if (s->s_magic == MINIX2_SUPER_MAGIC) { 200 sbi->s_version = MINIX_V2; 201 sbi->s_nzones = ms->s_zones; 202 sbi->s_dirsize = 16; 203 sbi->s_namelen = 14; 204 s->s_max_links = MINIX2_LINK_MAX; 205 } else if (s->s_magic == MINIX2_SUPER_MAGIC2) { 206 sbi->s_version = MINIX_V2; 207 sbi->s_nzones = ms->s_zones; 208 sbi->s_dirsize = 32; 209 sbi->s_namelen = 30; 210 s->s_max_links = MINIX2_LINK_MAX; 211 } else if ( *(__u16 *)(bh->b_data + 24) == MINIX3_SUPER_MAGIC) { 212 m3s = (struct minix3_super_block *) bh->b_data; 213 s->s_magic = m3s->s_magic; 214 sbi->s_imap_blocks = m3s->s_imap_blocks; 215 sbi->s_zmap_blocks = m3s->s_zmap_blocks; 216 sbi->s_firstdatazone = m3s->s_firstdatazone; 217 sbi->s_log_zone_size = m3s->s_log_zone_size; 218 sbi->s_max_size = m3s->s_max_size; 219 sbi->s_ninodes = m3s->s_ninodes; 220 sbi->s_nzones = m3s->s_zones; 221 sbi->s_dirsize = 64; 222 sbi->s_namelen = 60; 223 sbi->s_version = MINIX_V3; 224 sbi->s_mount_state = MINIX_VALID_FS; 225 sb_set_blocksize(s, m3s->s_blocksize); 226 s->s_max_links = MINIX2_LINK_MAX; 227 } else 228 goto out_no_fs; 229 230 /* 231 * Allocate the buffer map to keep the superblock small. 232 */ 233 if (sbi->s_imap_blocks == 0 || sbi->s_zmap_blocks == 0) 234 goto out_illegal_sb; 235 i = (sbi->s_imap_blocks + sbi->s_zmap_blocks) * sizeof(bh); 236 map = kzalloc(i, GFP_KERNEL); 237 if (!map) 238 goto out_no_map; 239 sbi->s_imap = &map[0]; 240 sbi->s_zmap = &map[sbi->s_imap_blocks]; 241 242 block=2; 243 for (i=0 ; i < sbi->s_imap_blocks ; i++) { 244 if (!(sbi->s_imap[i]=sb_bread(s, block))) 245 goto out_no_bitmap; 246 block++; 247 } 248 for (i=0 ; i < sbi->s_zmap_blocks ; i++) { 249 if (!(sbi->s_zmap[i]=sb_bread(s, block))) 250 goto out_no_bitmap; 251 block++; 252 } 253 254 minix_set_bit(0,sbi->s_imap[0]->b_data); 255 minix_set_bit(0,sbi->s_zmap[0]->b_data); 256 257 /* Apparently minix can create filesystems that allocate more blocks for 258 * the bitmaps than needed. We simply ignore that, but verify it didn't 259 * create one with not enough blocks and bail out if so. 260 */ 261 block = minix_blocks_needed(sbi->s_ninodes, s->s_blocksize); 262 if (sbi->s_imap_blocks < block) { 263 printk("MINIX-fs: file system does not have enough " 264 "imap blocks allocated. Refusing to mount.\n"); 265 goto out_no_bitmap; 266 } 267 268 block = minix_blocks_needed( 269 (sbi->s_nzones - sbi->s_firstdatazone + 1), 270 s->s_blocksize); 271 if (sbi->s_zmap_blocks < block) { 272 printk("MINIX-fs: file system does not have enough " 273 "zmap blocks allocated. Refusing to mount.\n"); 274 goto out_no_bitmap; 275 } 276 277 /* set up enough so that it can read an inode */ 278 s->s_op = &minix_sops; 279 root_inode = minix_iget(s, MINIX_ROOT_INO); 280 if (IS_ERR(root_inode)) { 281 ret = PTR_ERR(root_inode); 282 goto out_no_root; 283 } 284 285 ret = -ENOMEM; 286 s->s_root = d_make_root(root_inode); 287 if (!s->s_root) 288 goto out_no_root; 289 290 if (!sb_rdonly(s)) { 291 if (sbi->s_version != MINIX_V3) /* s_state is now out from V3 sb */ 292 ms->s_state &= ~MINIX_VALID_FS; 293 mark_buffer_dirty(bh); 294 } 295 if (!(sbi->s_mount_state & MINIX_VALID_FS)) 296 printk("MINIX-fs: mounting unchecked file system, " 297 "running fsck is recommended\n"); 298 else if (sbi->s_mount_state & MINIX_ERROR_FS) 299 printk("MINIX-fs: mounting file system with errors, " 300 "running fsck is recommended\n"); 301 302 return 0; 303 304 out_no_root: 305 if (!silent) 306 printk("MINIX-fs: get root inode failed\n"); 307 goto out_freemap; 308 309 out_no_bitmap: 310 printk("MINIX-fs: bad superblock or unable to read bitmaps\n"); 311 out_freemap: 312 for (i = 0; i < sbi->s_imap_blocks; i++) 313 brelse(sbi->s_imap[i]); 314 for (i = 0; i < sbi->s_zmap_blocks; i++) 315 brelse(sbi->s_zmap[i]); 316 kfree(sbi->s_imap); 317 goto out_release; 318 319 out_no_map: 320 ret = -ENOMEM; 321 if (!silent) 322 printk("MINIX-fs: can't allocate map\n"); 323 goto out_release; 324 325 out_illegal_sb: 326 if (!silent) 327 printk("MINIX-fs: bad superblock\n"); 328 goto out_release; 329 330 out_no_fs: 331 if (!silent) 332 printk("VFS: Can't find a Minix filesystem V1 | V2 | V3 " 333 "on device %s.\n", s->s_id); 334 out_release: 335 brelse(bh); 336 goto out; 337 338 out_bad_hblock: 339 printk("MINIX-fs: blocksize too small for device\n"); 340 goto out; 341 342 out_bad_sb: 343 printk("MINIX-fs: unable to read superblock\n"); 344 out: 345 s->s_fs_info = NULL; 346 kfree(sbi); 347 return ret; 348 } 349 350 static int minix_statfs(struct dentry *dentry, struct kstatfs *buf) 351 { 352 struct super_block *sb = dentry->d_sb; 353 struct minix_sb_info *sbi = minix_sb(sb); 354 u64 id = huge_encode_dev(sb->s_bdev->bd_dev); 355 buf->f_type = sb->s_magic; 356 buf->f_bsize = sb->s_blocksize; 357 buf->f_blocks = (sbi->s_nzones - sbi->s_firstdatazone) << sbi->s_log_zone_size; 358 buf->f_bfree = minix_count_free_blocks(sb); 359 buf->f_bavail = buf->f_bfree; 360 buf->f_files = sbi->s_ninodes; 361 buf->f_ffree = minix_count_free_inodes(sb); 362 buf->f_namelen = sbi->s_namelen; 363 buf->f_fsid.val[0] = (u32)id; 364 buf->f_fsid.val[1] = (u32)(id >> 32); 365 366 return 0; 367 } 368 369 static int minix_get_block(struct inode *inode, sector_t block, 370 struct buffer_head *bh_result, int create) 371 { 372 if (INODE_VERSION(inode) == MINIX_V1) 373 return V1_minix_get_block(inode, block, bh_result, create); 374 else 375 return V2_minix_get_block(inode, block, bh_result, create); 376 } 377 378 static int minix_writepage(struct page *page, struct writeback_control *wbc) 379 { 380 return block_write_full_page(page, minix_get_block, wbc); 381 } 382 383 static int minix_readpage(struct file *file, struct page *page) 384 { 385 return block_read_full_page(page,minix_get_block); 386 } 387 388 int minix_prepare_chunk(struct page *page, loff_t pos, unsigned len) 389 { 390 return __block_write_begin(page, pos, len, minix_get_block); 391 } 392 393 static void minix_write_failed(struct address_space *mapping, loff_t to) 394 { 395 struct inode *inode = mapping->host; 396 397 if (to > inode->i_size) { 398 truncate_pagecache(inode, inode->i_size); 399 minix_truncate(inode); 400 } 401 } 402 403 static int minix_write_begin(struct file *file, struct address_space *mapping, 404 loff_t pos, unsigned len, unsigned flags, 405 struct page **pagep, void **fsdata) 406 { 407 int ret; 408 409 ret = block_write_begin(mapping, pos, len, flags, pagep, 410 minix_get_block); 411 if (unlikely(ret)) 412 minix_write_failed(mapping, pos + len); 413 414 return ret; 415 } 416 417 static sector_t minix_bmap(struct address_space *mapping, sector_t block) 418 { 419 return generic_block_bmap(mapping,block,minix_get_block); 420 } 421 422 static const struct address_space_operations minix_aops = { 423 .readpage = minix_readpage, 424 .writepage = minix_writepage, 425 .write_begin = minix_write_begin, 426 .write_end = generic_write_end, 427 .bmap = minix_bmap 428 }; 429 430 static const struct inode_operations minix_symlink_inode_operations = { 431 .get_link = page_get_link, 432 .getattr = minix_getattr, 433 }; 434 435 void minix_set_inode(struct inode *inode, dev_t rdev) 436 { 437 if (S_ISREG(inode->i_mode)) { 438 inode->i_op = &minix_file_inode_operations; 439 inode->i_fop = &minix_file_operations; 440 inode->i_mapping->a_ops = &minix_aops; 441 } else if (S_ISDIR(inode->i_mode)) { 442 inode->i_op = &minix_dir_inode_operations; 443 inode->i_fop = &minix_dir_operations; 444 inode->i_mapping->a_ops = &minix_aops; 445 } else if (S_ISLNK(inode->i_mode)) { 446 inode->i_op = &minix_symlink_inode_operations; 447 inode_nohighmem(inode); 448 inode->i_mapping->a_ops = &minix_aops; 449 } else 450 init_special_inode(inode, inode->i_mode, rdev); 451 } 452 453 /* 454 * The minix V1 function to read an inode. 455 */ 456 static struct inode *V1_minix_iget(struct inode *inode) 457 { 458 struct buffer_head * bh; 459 struct minix_inode * raw_inode; 460 struct minix_inode_info *minix_inode = minix_i(inode); 461 int i; 462 463 raw_inode = minix_V1_raw_inode(inode->i_sb, inode->i_ino, &bh); 464 if (!raw_inode) { 465 iget_failed(inode); 466 return ERR_PTR(-EIO); 467 } 468 inode->i_mode = raw_inode->i_mode; 469 i_uid_write(inode, raw_inode->i_uid); 470 i_gid_write(inode, raw_inode->i_gid); 471 set_nlink(inode, raw_inode->i_nlinks); 472 inode->i_size = raw_inode->i_size; 473 inode->i_mtime.tv_sec = inode->i_atime.tv_sec = inode->i_ctime.tv_sec = raw_inode->i_time; 474 inode->i_mtime.tv_nsec = 0; 475 inode->i_atime.tv_nsec = 0; 476 inode->i_ctime.tv_nsec = 0; 477 inode->i_blocks = 0; 478 for (i = 0; i < 9; i++) 479 minix_inode->u.i1_data[i] = raw_inode->i_zone[i]; 480 minix_set_inode(inode, old_decode_dev(raw_inode->i_zone[0])); 481 brelse(bh); 482 unlock_new_inode(inode); 483 return inode; 484 } 485 486 /* 487 * The minix V2 function to read an inode. 488 */ 489 static struct inode *V2_minix_iget(struct inode *inode) 490 { 491 struct buffer_head * bh; 492 struct minix2_inode * raw_inode; 493 struct minix_inode_info *minix_inode = minix_i(inode); 494 int i; 495 496 raw_inode = minix_V2_raw_inode(inode->i_sb, inode->i_ino, &bh); 497 if (!raw_inode) { 498 iget_failed(inode); 499 return ERR_PTR(-EIO); 500 } 501 inode->i_mode = raw_inode->i_mode; 502 i_uid_write(inode, raw_inode->i_uid); 503 i_gid_write(inode, raw_inode->i_gid); 504 set_nlink(inode, raw_inode->i_nlinks); 505 inode->i_size = raw_inode->i_size; 506 inode->i_mtime.tv_sec = raw_inode->i_mtime; 507 inode->i_atime.tv_sec = raw_inode->i_atime; 508 inode->i_ctime.tv_sec = raw_inode->i_ctime; 509 inode->i_mtime.tv_nsec = 0; 510 inode->i_atime.tv_nsec = 0; 511 inode->i_ctime.tv_nsec = 0; 512 inode->i_blocks = 0; 513 for (i = 0; i < 10; i++) 514 minix_inode->u.i2_data[i] = raw_inode->i_zone[i]; 515 minix_set_inode(inode, old_decode_dev(raw_inode->i_zone[0])); 516 brelse(bh); 517 unlock_new_inode(inode); 518 return inode; 519 } 520 521 /* 522 * The global function to read an inode. 523 */ 524 struct inode *minix_iget(struct super_block *sb, unsigned long ino) 525 { 526 struct inode *inode; 527 528 inode = iget_locked(sb, ino); 529 if (!inode) 530 return ERR_PTR(-ENOMEM); 531 if (!(inode->i_state & I_NEW)) 532 return inode; 533 534 if (INODE_VERSION(inode) == MINIX_V1) 535 return V1_minix_iget(inode); 536 else 537 return V2_minix_iget(inode); 538 } 539 540 /* 541 * The minix V1 function to synchronize an inode. 542 */ 543 static struct buffer_head * V1_minix_update_inode(struct inode * inode) 544 { 545 struct buffer_head * bh; 546 struct minix_inode * raw_inode; 547 struct minix_inode_info *minix_inode = minix_i(inode); 548 int i; 549 550 raw_inode = minix_V1_raw_inode(inode->i_sb, inode->i_ino, &bh); 551 if (!raw_inode) 552 return NULL; 553 raw_inode->i_mode = inode->i_mode; 554 raw_inode->i_uid = fs_high2lowuid(i_uid_read(inode)); 555 raw_inode->i_gid = fs_high2lowgid(i_gid_read(inode)); 556 raw_inode->i_nlinks = inode->i_nlink; 557 raw_inode->i_size = inode->i_size; 558 raw_inode->i_time = inode->i_mtime.tv_sec; 559 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) 560 raw_inode->i_zone[0] = old_encode_dev(inode->i_rdev); 561 else for (i = 0; i < 9; i++) 562 raw_inode->i_zone[i] = minix_inode->u.i1_data[i]; 563 mark_buffer_dirty(bh); 564 return bh; 565 } 566 567 /* 568 * The minix V2 function to synchronize an inode. 569 */ 570 static struct buffer_head * V2_minix_update_inode(struct inode * inode) 571 { 572 struct buffer_head * bh; 573 struct minix2_inode * raw_inode; 574 struct minix_inode_info *minix_inode = minix_i(inode); 575 int i; 576 577 raw_inode = minix_V2_raw_inode(inode->i_sb, inode->i_ino, &bh); 578 if (!raw_inode) 579 return NULL; 580 raw_inode->i_mode = inode->i_mode; 581 raw_inode->i_uid = fs_high2lowuid(i_uid_read(inode)); 582 raw_inode->i_gid = fs_high2lowgid(i_gid_read(inode)); 583 raw_inode->i_nlinks = inode->i_nlink; 584 raw_inode->i_size = inode->i_size; 585 raw_inode->i_mtime = inode->i_mtime.tv_sec; 586 raw_inode->i_atime = inode->i_atime.tv_sec; 587 raw_inode->i_ctime = inode->i_ctime.tv_sec; 588 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) 589 raw_inode->i_zone[0] = old_encode_dev(inode->i_rdev); 590 else for (i = 0; i < 10; i++) 591 raw_inode->i_zone[i] = minix_inode->u.i2_data[i]; 592 mark_buffer_dirty(bh); 593 return bh; 594 } 595 596 static int minix_write_inode(struct inode *inode, struct writeback_control *wbc) 597 { 598 int err = 0; 599 struct buffer_head *bh; 600 601 if (INODE_VERSION(inode) == MINIX_V1) 602 bh = V1_minix_update_inode(inode); 603 else 604 bh = V2_minix_update_inode(inode); 605 if (!bh) 606 return -EIO; 607 if (wbc->sync_mode == WB_SYNC_ALL && buffer_dirty(bh)) { 608 sync_dirty_buffer(bh); 609 if (buffer_req(bh) && !buffer_uptodate(bh)) { 610 printk("IO error syncing minix inode [%s:%08lx]\n", 611 inode->i_sb->s_id, inode->i_ino); 612 err = -EIO; 613 } 614 } 615 brelse (bh); 616 return err; 617 } 618 619 int minix_getattr(const struct path *path, struct kstat *stat, 620 u32 request_mask, unsigned int flags) 621 { 622 struct super_block *sb = path->dentry->d_sb; 623 struct inode *inode = d_inode(path->dentry); 624 625 generic_fillattr(inode, stat); 626 if (INODE_VERSION(inode) == MINIX_V1) 627 stat->blocks = (BLOCK_SIZE / 512) * V1_minix_blocks(stat->size, sb); 628 else 629 stat->blocks = (sb->s_blocksize / 512) * V2_minix_blocks(stat->size, sb); 630 stat->blksize = sb->s_blocksize; 631 return 0; 632 } 633 634 /* 635 * The function that is called for file truncation. 636 */ 637 void minix_truncate(struct inode * inode) 638 { 639 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))) 640 return; 641 if (INODE_VERSION(inode) == MINIX_V1) 642 V1_minix_truncate(inode); 643 else 644 V2_minix_truncate(inode); 645 } 646 647 static struct dentry *minix_mount(struct file_system_type *fs_type, 648 int flags, const char *dev_name, void *data) 649 { 650 return mount_bdev(fs_type, flags, dev_name, data, minix_fill_super); 651 } 652 653 static struct file_system_type minix_fs_type = { 654 .owner = THIS_MODULE, 655 .name = "minix", 656 .mount = minix_mount, 657 .kill_sb = kill_block_super, 658 .fs_flags = FS_REQUIRES_DEV, 659 }; 660 MODULE_ALIAS_FS("minix"); 661 662 static int __init init_minix_fs(void) 663 { 664 int err = init_inodecache(); 665 if (err) 666 goto out1; 667 err = register_filesystem(&minix_fs_type); 668 if (err) 669 goto out; 670 return 0; 671 out: 672 destroy_inodecache(); 673 out1: 674 return err; 675 } 676 677 static void __exit exit_minix_fs(void) 678 { 679 unregister_filesystem(&minix_fs_type); 680 destroy_inodecache(); 681 } 682 683 module_init(init_minix_fs) 684 module_exit(exit_minix_fs) 685 MODULE_LICENSE("GPL"); 686 687