1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * super.c - NILFS module and super block management. 4 * 5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. 6 * 7 * Written by Ryusuke Konishi. 8 */ 9 /* 10 * linux/fs/ext2/super.c 11 * 12 * Copyright (C) 1992, 1993, 1994, 1995 13 * Remy Card (card@masi.ibp.fr) 14 * Laboratoire MASI - Institut Blaise Pascal 15 * Universite Pierre et Marie Curie (Paris VI) 16 * 17 * from 18 * 19 * linux/fs/minix/inode.c 20 * 21 * Copyright (C) 1991, 1992 Linus Torvalds 22 * 23 * Big-endian to little-endian byte-swapping/bitmaps by 24 * David S. Miller (davem@caip.rutgers.edu), 1995 25 */ 26 27 #include <linux/module.h> 28 #include <linux/string.h> 29 #include <linux/slab.h> 30 #include <linux/init.h> 31 #include <linux/blkdev.h> 32 #include <linux/parser.h> 33 #include <linux/crc32.h> 34 #include <linux/vfs.h> 35 #include <linux/writeback.h> 36 #include <linux/seq_file.h> 37 #include <linux/mount.h> 38 #include "nilfs.h" 39 #include "export.h" 40 #include "mdt.h" 41 #include "alloc.h" 42 #include "btree.h" 43 #include "btnode.h" 44 #include "page.h" 45 #include "cpfile.h" 46 #include "sufile.h" /* nilfs_sufile_resize(), nilfs_sufile_set_alloc_range() */ 47 #include "ifile.h" 48 #include "dat.h" 49 #include "segment.h" 50 #include "segbuf.h" 51 52 MODULE_AUTHOR("NTT Corp."); 53 MODULE_DESCRIPTION("A New Implementation of the Log-structured Filesystem " 54 "(NILFS)"); 55 MODULE_LICENSE("GPL"); 56 57 static struct kmem_cache *nilfs_inode_cachep; 58 struct kmem_cache *nilfs_transaction_cachep; 59 struct kmem_cache *nilfs_segbuf_cachep; 60 struct kmem_cache *nilfs_btree_path_cache; 61 62 static int nilfs_setup_super(struct super_block *sb, int is_mount); 63 static int nilfs_remount(struct super_block *sb, int *flags, char *data); 64 65 void __nilfs_msg(struct super_block *sb, const char *level, const char *fmt, 66 ...) 67 { 68 struct va_format vaf; 69 va_list args; 70 71 va_start(args, fmt); 72 vaf.fmt = fmt; 73 vaf.va = &args; 74 if (sb) 75 printk("%sNILFS (%s): %pV\n", level, sb->s_id, &vaf); 76 else 77 printk("%sNILFS: %pV\n", level, &vaf); 78 va_end(args); 79 } 80 81 static void nilfs_set_error(struct super_block *sb) 82 { 83 struct the_nilfs *nilfs = sb->s_fs_info; 84 struct nilfs_super_block **sbp; 85 86 down_write(&nilfs->ns_sem); 87 if (!(nilfs->ns_mount_state & NILFS_ERROR_FS)) { 88 nilfs->ns_mount_state |= NILFS_ERROR_FS; 89 sbp = nilfs_prepare_super(sb, 0); 90 if (likely(sbp)) { 91 sbp[0]->s_state |= cpu_to_le16(NILFS_ERROR_FS); 92 if (sbp[1]) 93 sbp[1]->s_state |= cpu_to_le16(NILFS_ERROR_FS); 94 nilfs_commit_super(sb, NILFS_SB_COMMIT_ALL); 95 } 96 } 97 up_write(&nilfs->ns_sem); 98 } 99 100 /** 101 * __nilfs_error() - report failure condition on a filesystem 102 * 103 * __nilfs_error() sets an ERROR_FS flag on the superblock as well as 104 * reporting an error message. This function should be called when 105 * NILFS detects incoherences or defects of meta data on disk. 106 * 107 * This implements the body of nilfs_error() macro. Normally, 108 * nilfs_error() should be used. As for sustainable errors such as a 109 * single-shot I/O error, nilfs_msg() should be used instead. 110 * 111 * Callers should not add a trailing newline since this will do it. 112 */ 113 void __nilfs_error(struct super_block *sb, const char *function, 114 const char *fmt, ...) 115 { 116 struct the_nilfs *nilfs = sb->s_fs_info; 117 struct va_format vaf; 118 va_list args; 119 120 va_start(args, fmt); 121 122 vaf.fmt = fmt; 123 vaf.va = &args; 124 125 printk(KERN_CRIT "NILFS error (device %s): %s: %pV\n", 126 sb->s_id, function, &vaf); 127 128 va_end(args); 129 130 if (!sb_rdonly(sb)) { 131 nilfs_set_error(sb); 132 133 if (nilfs_test_opt(nilfs, ERRORS_RO)) { 134 printk(KERN_CRIT "Remounting filesystem read-only\n"); 135 sb->s_flags |= SB_RDONLY; 136 } 137 } 138 139 if (nilfs_test_opt(nilfs, ERRORS_PANIC)) 140 panic("NILFS (device %s): panic forced after error\n", 141 sb->s_id); 142 } 143 144 struct inode *nilfs_alloc_inode(struct super_block *sb) 145 { 146 struct nilfs_inode_info *ii; 147 148 ii = kmem_cache_alloc(nilfs_inode_cachep, GFP_NOFS); 149 if (!ii) 150 return NULL; 151 ii->i_bh = NULL; 152 ii->i_state = 0; 153 ii->i_cno = 0; 154 nilfs_mapping_init(&ii->i_btnode_cache, &ii->vfs_inode); 155 return &ii->vfs_inode; 156 } 157 158 static void nilfs_free_inode(struct inode *inode) 159 { 160 if (nilfs_is_metadata_file_inode(inode)) 161 nilfs_mdt_destroy(inode); 162 163 kmem_cache_free(nilfs_inode_cachep, NILFS_I(inode)); 164 } 165 166 static int nilfs_sync_super(struct super_block *sb, int flag) 167 { 168 struct the_nilfs *nilfs = sb->s_fs_info; 169 int err; 170 171 retry: 172 set_buffer_dirty(nilfs->ns_sbh[0]); 173 if (nilfs_test_opt(nilfs, BARRIER)) { 174 err = __sync_dirty_buffer(nilfs->ns_sbh[0], 175 REQ_SYNC | REQ_PREFLUSH | REQ_FUA); 176 } else { 177 err = sync_dirty_buffer(nilfs->ns_sbh[0]); 178 } 179 180 if (unlikely(err)) { 181 nilfs_msg(sb, KERN_ERR, "unable to write superblock: err=%d", 182 err); 183 if (err == -EIO && nilfs->ns_sbh[1]) { 184 /* 185 * sbp[0] points to newer log than sbp[1], 186 * so copy sbp[0] to sbp[1] to take over sbp[0]. 187 */ 188 memcpy(nilfs->ns_sbp[1], nilfs->ns_sbp[0], 189 nilfs->ns_sbsize); 190 nilfs_fall_back_super_block(nilfs); 191 goto retry; 192 } 193 } else { 194 struct nilfs_super_block *sbp = nilfs->ns_sbp[0]; 195 196 nilfs->ns_sbwcount++; 197 198 /* 199 * The latest segment becomes trailable from the position 200 * written in superblock. 201 */ 202 clear_nilfs_discontinued(nilfs); 203 204 /* update GC protection for recent segments */ 205 if (nilfs->ns_sbh[1]) { 206 if (flag == NILFS_SB_COMMIT_ALL) { 207 set_buffer_dirty(nilfs->ns_sbh[1]); 208 if (sync_dirty_buffer(nilfs->ns_sbh[1]) < 0) 209 goto out; 210 } 211 if (le64_to_cpu(nilfs->ns_sbp[1]->s_last_cno) < 212 le64_to_cpu(nilfs->ns_sbp[0]->s_last_cno)) 213 sbp = nilfs->ns_sbp[1]; 214 } 215 216 spin_lock(&nilfs->ns_last_segment_lock); 217 nilfs->ns_prot_seq = le64_to_cpu(sbp->s_last_seq); 218 spin_unlock(&nilfs->ns_last_segment_lock); 219 } 220 out: 221 return err; 222 } 223 224 void nilfs_set_log_cursor(struct nilfs_super_block *sbp, 225 struct the_nilfs *nilfs) 226 { 227 sector_t nfreeblocks; 228 229 /* nilfs->ns_sem must be locked by the caller. */ 230 nilfs_count_free_blocks(nilfs, &nfreeblocks); 231 sbp->s_free_blocks_count = cpu_to_le64(nfreeblocks); 232 233 spin_lock(&nilfs->ns_last_segment_lock); 234 sbp->s_last_seq = cpu_to_le64(nilfs->ns_last_seq); 235 sbp->s_last_pseg = cpu_to_le64(nilfs->ns_last_pseg); 236 sbp->s_last_cno = cpu_to_le64(nilfs->ns_last_cno); 237 spin_unlock(&nilfs->ns_last_segment_lock); 238 } 239 240 struct nilfs_super_block **nilfs_prepare_super(struct super_block *sb, 241 int flip) 242 { 243 struct the_nilfs *nilfs = sb->s_fs_info; 244 struct nilfs_super_block **sbp = nilfs->ns_sbp; 245 246 /* nilfs->ns_sem must be locked by the caller. */ 247 if (sbp[0]->s_magic != cpu_to_le16(NILFS_SUPER_MAGIC)) { 248 if (sbp[1] && 249 sbp[1]->s_magic == cpu_to_le16(NILFS_SUPER_MAGIC)) { 250 memcpy(sbp[0], sbp[1], nilfs->ns_sbsize); 251 } else { 252 nilfs_msg(sb, KERN_CRIT, "superblock broke"); 253 return NULL; 254 } 255 } else if (sbp[1] && 256 sbp[1]->s_magic != cpu_to_le16(NILFS_SUPER_MAGIC)) { 257 memcpy(sbp[1], sbp[0], nilfs->ns_sbsize); 258 } 259 260 if (flip && sbp[1]) 261 nilfs_swap_super_block(nilfs); 262 263 return sbp; 264 } 265 266 int nilfs_commit_super(struct super_block *sb, int flag) 267 { 268 struct the_nilfs *nilfs = sb->s_fs_info; 269 struct nilfs_super_block **sbp = nilfs->ns_sbp; 270 time64_t t; 271 272 /* nilfs->ns_sem must be locked by the caller. */ 273 t = ktime_get_real_seconds(); 274 nilfs->ns_sbwtime = t; 275 sbp[0]->s_wtime = cpu_to_le64(t); 276 sbp[0]->s_sum = 0; 277 sbp[0]->s_sum = cpu_to_le32(crc32_le(nilfs->ns_crc_seed, 278 (unsigned char *)sbp[0], 279 nilfs->ns_sbsize)); 280 if (flag == NILFS_SB_COMMIT_ALL && sbp[1]) { 281 sbp[1]->s_wtime = sbp[0]->s_wtime; 282 sbp[1]->s_sum = 0; 283 sbp[1]->s_sum = cpu_to_le32(crc32_le(nilfs->ns_crc_seed, 284 (unsigned char *)sbp[1], 285 nilfs->ns_sbsize)); 286 } 287 clear_nilfs_sb_dirty(nilfs); 288 nilfs->ns_flushed_device = 1; 289 /* make sure store to ns_flushed_device cannot be reordered */ 290 smp_wmb(); 291 return nilfs_sync_super(sb, flag); 292 } 293 294 /** 295 * nilfs_cleanup_super() - write filesystem state for cleanup 296 * @sb: super block instance to be unmounted or degraded to read-only 297 * 298 * This function restores state flags in the on-disk super block. 299 * This will set "clean" flag (i.e. NILFS_VALID_FS) unless the 300 * filesystem was not clean previously. 301 */ 302 int nilfs_cleanup_super(struct super_block *sb) 303 { 304 struct the_nilfs *nilfs = sb->s_fs_info; 305 struct nilfs_super_block **sbp; 306 int flag = NILFS_SB_COMMIT; 307 int ret = -EIO; 308 309 sbp = nilfs_prepare_super(sb, 0); 310 if (sbp) { 311 sbp[0]->s_state = cpu_to_le16(nilfs->ns_mount_state); 312 nilfs_set_log_cursor(sbp[0], nilfs); 313 if (sbp[1] && sbp[0]->s_last_cno == sbp[1]->s_last_cno) { 314 /* 315 * make the "clean" flag also to the opposite 316 * super block if both super blocks point to 317 * the same checkpoint. 318 */ 319 sbp[1]->s_state = sbp[0]->s_state; 320 flag = NILFS_SB_COMMIT_ALL; 321 } 322 ret = nilfs_commit_super(sb, flag); 323 } 324 return ret; 325 } 326 327 /** 328 * nilfs_move_2nd_super - relocate secondary super block 329 * @sb: super block instance 330 * @sb2off: new offset of the secondary super block (in bytes) 331 */ 332 static int nilfs_move_2nd_super(struct super_block *sb, loff_t sb2off) 333 { 334 struct the_nilfs *nilfs = sb->s_fs_info; 335 struct buffer_head *nsbh; 336 struct nilfs_super_block *nsbp; 337 sector_t blocknr, newblocknr; 338 unsigned long offset; 339 int sb2i; /* array index of the secondary superblock */ 340 int ret = 0; 341 342 /* nilfs->ns_sem must be locked by the caller. */ 343 if (nilfs->ns_sbh[1] && 344 nilfs->ns_sbh[1]->b_blocknr > nilfs->ns_first_data_block) { 345 sb2i = 1; 346 blocknr = nilfs->ns_sbh[1]->b_blocknr; 347 } else if (nilfs->ns_sbh[0]->b_blocknr > nilfs->ns_first_data_block) { 348 sb2i = 0; 349 blocknr = nilfs->ns_sbh[0]->b_blocknr; 350 } else { 351 sb2i = -1; 352 blocknr = 0; 353 } 354 if (sb2i >= 0 && (u64)blocknr << nilfs->ns_blocksize_bits == sb2off) 355 goto out; /* super block location is unchanged */ 356 357 /* Get new super block buffer */ 358 newblocknr = sb2off >> nilfs->ns_blocksize_bits; 359 offset = sb2off & (nilfs->ns_blocksize - 1); 360 nsbh = sb_getblk(sb, newblocknr); 361 if (!nsbh) { 362 nilfs_msg(sb, KERN_WARNING, 363 "unable to move secondary superblock to block %llu", 364 (unsigned long long)newblocknr); 365 ret = -EIO; 366 goto out; 367 } 368 nsbp = (void *)nsbh->b_data + offset; 369 memset(nsbp, 0, nilfs->ns_blocksize); 370 371 if (sb2i >= 0) { 372 memcpy(nsbp, nilfs->ns_sbp[sb2i], nilfs->ns_sbsize); 373 brelse(nilfs->ns_sbh[sb2i]); 374 nilfs->ns_sbh[sb2i] = nsbh; 375 nilfs->ns_sbp[sb2i] = nsbp; 376 } else if (nilfs->ns_sbh[0]->b_blocknr < nilfs->ns_first_data_block) { 377 /* secondary super block will be restored to index 1 */ 378 nilfs->ns_sbh[1] = nsbh; 379 nilfs->ns_sbp[1] = nsbp; 380 } else { 381 brelse(nsbh); 382 } 383 out: 384 return ret; 385 } 386 387 /** 388 * nilfs_resize_fs - resize the filesystem 389 * @sb: super block instance 390 * @newsize: new size of the filesystem (in bytes) 391 */ 392 int nilfs_resize_fs(struct super_block *sb, __u64 newsize) 393 { 394 struct the_nilfs *nilfs = sb->s_fs_info; 395 struct nilfs_super_block **sbp; 396 __u64 devsize, newnsegs; 397 loff_t sb2off; 398 int ret; 399 400 ret = -ERANGE; 401 devsize = i_size_read(sb->s_bdev->bd_inode); 402 if (newsize > devsize) 403 goto out; 404 405 /* 406 * Write lock is required to protect some functions depending 407 * on the number of segments, the number of reserved segments, 408 * and so forth. 409 */ 410 down_write(&nilfs->ns_segctor_sem); 411 412 sb2off = NILFS_SB2_OFFSET_BYTES(newsize); 413 newnsegs = sb2off >> nilfs->ns_blocksize_bits; 414 do_div(newnsegs, nilfs->ns_blocks_per_segment); 415 416 ret = nilfs_sufile_resize(nilfs->ns_sufile, newnsegs); 417 up_write(&nilfs->ns_segctor_sem); 418 if (ret < 0) 419 goto out; 420 421 ret = nilfs_construct_segment(sb); 422 if (ret < 0) 423 goto out; 424 425 down_write(&nilfs->ns_sem); 426 nilfs_move_2nd_super(sb, sb2off); 427 ret = -EIO; 428 sbp = nilfs_prepare_super(sb, 0); 429 if (likely(sbp)) { 430 nilfs_set_log_cursor(sbp[0], nilfs); 431 /* 432 * Drop NILFS_RESIZE_FS flag for compatibility with 433 * mount-time resize which may be implemented in a 434 * future release. 435 */ 436 sbp[0]->s_state = cpu_to_le16(le16_to_cpu(sbp[0]->s_state) & 437 ~NILFS_RESIZE_FS); 438 sbp[0]->s_dev_size = cpu_to_le64(newsize); 439 sbp[0]->s_nsegments = cpu_to_le64(nilfs->ns_nsegments); 440 if (sbp[1]) 441 memcpy(sbp[1], sbp[0], nilfs->ns_sbsize); 442 ret = nilfs_commit_super(sb, NILFS_SB_COMMIT_ALL); 443 } 444 up_write(&nilfs->ns_sem); 445 446 /* 447 * Reset the range of allocatable segments last. This order 448 * is important in the case of expansion because the secondary 449 * superblock must be protected from log write until migration 450 * completes. 451 */ 452 if (!ret) 453 nilfs_sufile_set_alloc_range(nilfs->ns_sufile, 0, newnsegs - 1); 454 out: 455 return ret; 456 } 457 458 static void nilfs_put_super(struct super_block *sb) 459 { 460 struct the_nilfs *nilfs = sb->s_fs_info; 461 462 nilfs_detach_log_writer(sb); 463 464 if (!sb_rdonly(sb)) { 465 down_write(&nilfs->ns_sem); 466 nilfs_cleanup_super(sb); 467 up_write(&nilfs->ns_sem); 468 } 469 470 iput(nilfs->ns_sufile); 471 iput(nilfs->ns_cpfile); 472 iput(nilfs->ns_dat); 473 474 destroy_nilfs(nilfs); 475 sb->s_fs_info = NULL; 476 } 477 478 static int nilfs_sync_fs(struct super_block *sb, int wait) 479 { 480 struct the_nilfs *nilfs = sb->s_fs_info; 481 struct nilfs_super_block **sbp; 482 int err = 0; 483 484 /* This function is called when super block should be written back */ 485 if (wait) 486 err = nilfs_construct_segment(sb); 487 488 down_write(&nilfs->ns_sem); 489 if (nilfs_sb_dirty(nilfs)) { 490 sbp = nilfs_prepare_super(sb, nilfs_sb_will_flip(nilfs)); 491 if (likely(sbp)) { 492 nilfs_set_log_cursor(sbp[0], nilfs); 493 nilfs_commit_super(sb, NILFS_SB_COMMIT); 494 } 495 } 496 up_write(&nilfs->ns_sem); 497 498 if (!err) 499 err = nilfs_flush_device(nilfs); 500 501 return err; 502 } 503 504 int nilfs_attach_checkpoint(struct super_block *sb, __u64 cno, int curr_mnt, 505 struct nilfs_root **rootp) 506 { 507 struct the_nilfs *nilfs = sb->s_fs_info; 508 struct nilfs_root *root; 509 struct nilfs_checkpoint *raw_cp; 510 struct buffer_head *bh_cp; 511 int err = -ENOMEM; 512 513 root = nilfs_find_or_create_root( 514 nilfs, curr_mnt ? NILFS_CPTREE_CURRENT_CNO : cno); 515 if (!root) 516 return err; 517 518 if (root->ifile) 519 goto reuse; /* already attached checkpoint */ 520 521 down_read(&nilfs->ns_segctor_sem); 522 err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, cno, 0, &raw_cp, 523 &bh_cp); 524 up_read(&nilfs->ns_segctor_sem); 525 if (unlikely(err)) { 526 if (err == -ENOENT || err == -EINVAL) { 527 nilfs_msg(sb, KERN_ERR, 528 "Invalid checkpoint (checkpoint number=%llu)", 529 (unsigned long long)cno); 530 err = -EINVAL; 531 } 532 goto failed; 533 } 534 535 err = nilfs_ifile_read(sb, root, nilfs->ns_inode_size, 536 &raw_cp->cp_ifile_inode, &root->ifile); 537 if (err) 538 goto failed_bh; 539 540 atomic64_set(&root->inodes_count, 541 le64_to_cpu(raw_cp->cp_inodes_count)); 542 atomic64_set(&root->blocks_count, 543 le64_to_cpu(raw_cp->cp_blocks_count)); 544 545 nilfs_cpfile_put_checkpoint(nilfs->ns_cpfile, cno, bh_cp); 546 547 reuse: 548 *rootp = root; 549 return 0; 550 551 failed_bh: 552 nilfs_cpfile_put_checkpoint(nilfs->ns_cpfile, cno, bh_cp); 553 failed: 554 nilfs_put_root(root); 555 556 return err; 557 } 558 559 static int nilfs_freeze(struct super_block *sb) 560 { 561 struct the_nilfs *nilfs = sb->s_fs_info; 562 int err; 563 564 if (sb_rdonly(sb)) 565 return 0; 566 567 /* Mark super block clean */ 568 down_write(&nilfs->ns_sem); 569 err = nilfs_cleanup_super(sb); 570 up_write(&nilfs->ns_sem); 571 return err; 572 } 573 574 static int nilfs_unfreeze(struct super_block *sb) 575 { 576 struct the_nilfs *nilfs = sb->s_fs_info; 577 578 if (sb_rdonly(sb)) 579 return 0; 580 581 down_write(&nilfs->ns_sem); 582 nilfs_setup_super(sb, false); 583 up_write(&nilfs->ns_sem); 584 return 0; 585 } 586 587 static int nilfs_statfs(struct dentry *dentry, struct kstatfs *buf) 588 { 589 struct super_block *sb = dentry->d_sb; 590 struct nilfs_root *root = NILFS_I(d_inode(dentry))->i_root; 591 struct the_nilfs *nilfs = root->nilfs; 592 u64 id = huge_encode_dev(sb->s_bdev->bd_dev); 593 unsigned long long blocks; 594 unsigned long overhead; 595 unsigned long nrsvblocks; 596 sector_t nfreeblocks; 597 u64 nmaxinodes, nfreeinodes; 598 int err; 599 600 /* 601 * Compute all of the segment blocks 602 * 603 * The blocks before first segment and after last segment 604 * are excluded. 605 */ 606 blocks = nilfs->ns_blocks_per_segment * nilfs->ns_nsegments 607 - nilfs->ns_first_data_block; 608 nrsvblocks = nilfs->ns_nrsvsegs * nilfs->ns_blocks_per_segment; 609 610 /* 611 * Compute the overhead 612 * 613 * When distributing meta data blocks outside segment structure, 614 * We must count them as the overhead. 615 */ 616 overhead = 0; 617 618 err = nilfs_count_free_blocks(nilfs, &nfreeblocks); 619 if (unlikely(err)) 620 return err; 621 622 err = nilfs_ifile_count_free_inodes(root->ifile, 623 &nmaxinodes, &nfreeinodes); 624 if (unlikely(err)) { 625 nilfs_msg(sb, KERN_WARNING, 626 "failed to count free inodes: err=%d", err); 627 if (err == -ERANGE) { 628 /* 629 * If nilfs_palloc_count_max_entries() returns 630 * -ERANGE error code then we simply treat 631 * curent inodes count as maximum possible and 632 * zero as free inodes value. 633 */ 634 nmaxinodes = atomic64_read(&root->inodes_count); 635 nfreeinodes = 0; 636 err = 0; 637 } else 638 return err; 639 } 640 641 buf->f_type = NILFS_SUPER_MAGIC; 642 buf->f_bsize = sb->s_blocksize; 643 buf->f_blocks = blocks - overhead; 644 buf->f_bfree = nfreeblocks; 645 buf->f_bavail = (buf->f_bfree >= nrsvblocks) ? 646 (buf->f_bfree - nrsvblocks) : 0; 647 buf->f_files = nmaxinodes; 648 buf->f_ffree = nfreeinodes; 649 buf->f_namelen = NILFS_NAME_LEN; 650 buf->f_fsid.val[0] = (u32)id; 651 buf->f_fsid.val[1] = (u32)(id >> 32); 652 653 return 0; 654 } 655 656 static int nilfs_show_options(struct seq_file *seq, struct dentry *dentry) 657 { 658 struct super_block *sb = dentry->d_sb; 659 struct the_nilfs *nilfs = sb->s_fs_info; 660 struct nilfs_root *root = NILFS_I(d_inode(dentry))->i_root; 661 662 if (!nilfs_test_opt(nilfs, BARRIER)) 663 seq_puts(seq, ",nobarrier"); 664 if (root->cno != NILFS_CPTREE_CURRENT_CNO) 665 seq_printf(seq, ",cp=%llu", (unsigned long long)root->cno); 666 if (nilfs_test_opt(nilfs, ERRORS_PANIC)) 667 seq_puts(seq, ",errors=panic"); 668 if (nilfs_test_opt(nilfs, ERRORS_CONT)) 669 seq_puts(seq, ",errors=continue"); 670 if (nilfs_test_opt(nilfs, STRICT_ORDER)) 671 seq_puts(seq, ",order=strict"); 672 if (nilfs_test_opt(nilfs, NORECOVERY)) 673 seq_puts(seq, ",norecovery"); 674 if (nilfs_test_opt(nilfs, DISCARD)) 675 seq_puts(seq, ",discard"); 676 677 return 0; 678 } 679 680 static const struct super_operations nilfs_sops = { 681 .alloc_inode = nilfs_alloc_inode, 682 .free_inode = nilfs_free_inode, 683 .dirty_inode = nilfs_dirty_inode, 684 .evict_inode = nilfs_evict_inode, 685 .put_super = nilfs_put_super, 686 .sync_fs = nilfs_sync_fs, 687 .freeze_fs = nilfs_freeze, 688 .unfreeze_fs = nilfs_unfreeze, 689 .statfs = nilfs_statfs, 690 .remount_fs = nilfs_remount, 691 .show_options = nilfs_show_options 692 }; 693 694 enum { 695 Opt_err_cont, Opt_err_panic, Opt_err_ro, 696 Opt_barrier, Opt_nobarrier, Opt_snapshot, Opt_order, Opt_norecovery, 697 Opt_discard, Opt_nodiscard, Opt_err, 698 }; 699 700 static match_table_t tokens = { 701 {Opt_err_cont, "errors=continue"}, 702 {Opt_err_panic, "errors=panic"}, 703 {Opt_err_ro, "errors=remount-ro"}, 704 {Opt_barrier, "barrier"}, 705 {Opt_nobarrier, "nobarrier"}, 706 {Opt_snapshot, "cp=%u"}, 707 {Opt_order, "order=%s"}, 708 {Opt_norecovery, "norecovery"}, 709 {Opt_discard, "discard"}, 710 {Opt_nodiscard, "nodiscard"}, 711 {Opt_err, NULL} 712 }; 713 714 static int parse_options(char *options, struct super_block *sb, int is_remount) 715 { 716 struct the_nilfs *nilfs = sb->s_fs_info; 717 char *p; 718 substring_t args[MAX_OPT_ARGS]; 719 720 if (!options) 721 return 1; 722 723 while ((p = strsep(&options, ",")) != NULL) { 724 int token; 725 726 if (!*p) 727 continue; 728 729 token = match_token(p, tokens, args); 730 switch (token) { 731 case Opt_barrier: 732 nilfs_set_opt(nilfs, BARRIER); 733 break; 734 case Opt_nobarrier: 735 nilfs_clear_opt(nilfs, BARRIER); 736 break; 737 case Opt_order: 738 if (strcmp(args[0].from, "relaxed") == 0) 739 /* Ordered data semantics */ 740 nilfs_clear_opt(nilfs, STRICT_ORDER); 741 else if (strcmp(args[0].from, "strict") == 0) 742 /* Strict in-order semantics */ 743 nilfs_set_opt(nilfs, STRICT_ORDER); 744 else 745 return 0; 746 break; 747 case Opt_err_panic: 748 nilfs_write_opt(nilfs, ERROR_MODE, ERRORS_PANIC); 749 break; 750 case Opt_err_ro: 751 nilfs_write_opt(nilfs, ERROR_MODE, ERRORS_RO); 752 break; 753 case Opt_err_cont: 754 nilfs_write_opt(nilfs, ERROR_MODE, ERRORS_CONT); 755 break; 756 case Opt_snapshot: 757 if (is_remount) { 758 nilfs_msg(sb, KERN_ERR, 759 "\"%s\" option is invalid for remount", 760 p); 761 return 0; 762 } 763 break; 764 case Opt_norecovery: 765 nilfs_set_opt(nilfs, NORECOVERY); 766 break; 767 case Opt_discard: 768 nilfs_set_opt(nilfs, DISCARD); 769 break; 770 case Opt_nodiscard: 771 nilfs_clear_opt(nilfs, DISCARD); 772 break; 773 default: 774 nilfs_msg(sb, KERN_ERR, 775 "unrecognized mount option \"%s\"", p); 776 return 0; 777 } 778 } 779 return 1; 780 } 781 782 static inline void 783 nilfs_set_default_options(struct super_block *sb, 784 struct nilfs_super_block *sbp) 785 { 786 struct the_nilfs *nilfs = sb->s_fs_info; 787 788 nilfs->ns_mount_opt = 789 NILFS_MOUNT_ERRORS_RO | NILFS_MOUNT_BARRIER; 790 } 791 792 static int nilfs_setup_super(struct super_block *sb, int is_mount) 793 { 794 struct the_nilfs *nilfs = sb->s_fs_info; 795 struct nilfs_super_block **sbp; 796 int max_mnt_count; 797 int mnt_count; 798 799 /* nilfs->ns_sem must be locked by the caller. */ 800 sbp = nilfs_prepare_super(sb, 0); 801 if (!sbp) 802 return -EIO; 803 804 if (!is_mount) 805 goto skip_mount_setup; 806 807 max_mnt_count = le16_to_cpu(sbp[0]->s_max_mnt_count); 808 mnt_count = le16_to_cpu(sbp[0]->s_mnt_count); 809 810 if (nilfs->ns_mount_state & NILFS_ERROR_FS) { 811 nilfs_msg(sb, KERN_WARNING, "mounting fs with errors"); 812 #if 0 813 } else if (max_mnt_count >= 0 && mnt_count >= max_mnt_count) { 814 nilfs_msg(sb, KERN_WARNING, "maximal mount count reached"); 815 #endif 816 } 817 if (!max_mnt_count) 818 sbp[0]->s_max_mnt_count = cpu_to_le16(NILFS_DFL_MAX_MNT_COUNT); 819 820 sbp[0]->s_mnt_count = cpu_to_le16(mnt_count + 1); 821 sbp[0]->s_mtime = cpu_to_le64(ktime_get_real_seconds()); 822 823 skip_mount_setup: 824 sbp[0]->s_state = 825 cpu_to_le16(le16_to_cpu(sbp[0]->s_state) & ~NILFS_VALID_FS); 826 /* synchronize sbp[1] with sbp[0] */ 827 if (sbp[1]) 828 memcpy(sbp[1], sbp[0], nilfs->ns_sbsize); 829 return nilfs_commit_super(sb, NILFS_SB_COMMIT_ALL); 830 } 831 832 struct nilfs_super_block *nilfs_read_super_block(struct super_block *sb, 833 u64 pos, int blocksize, 834 struct buffer_head **pbh) 835 { 836 unsigned long long sb_index = pos; 837 unsigned long offset; 838 839 offset = do_div(sb_index, blocksize); 840 *pbh = sb_bread(sb, sb_index); 841 if (!*pbh) 842 return NULL; 843 return (struct nilfs_super_block *)((char *)(*pbh)->b_data + offset); 844 } 845 846 int nilfs_store_magic_and_option(struct super_block *sb, 847 struct nilfs_super_block *sbp, 848 char *data) 849 { 850 struct the_nilfs *nilfs = sb->s_fs_info; 851 852 sb->s_magic = le16_to_cpu(sbp->s_magic); 853 854 /* FS independent flags */ 855 #ifdef NILFS_ATIME_DISABLE 856 sb->s_flags |= SB_NOATIME; 857 #endif 858 859 nilfs_set_default_options(sb, sbp); 860 861 nilfs->ns_resuid = le16_to_cpu(sbp->s_def_resuid); 862 nilfs->ns_resgid = le16_to_cpu(sbp->s_def_resgid); 863 nilfs->ns_interval = le32_to_cpu(sbp->s_c_interval); 864 nilfs->ns_watermark = le32_to_cpu(sbp->s_c_block_max); 865 866 return !parse_options(data, sb, 0) ? -EINVAL : 0; 867 } 868 869 int nilfs_check_feature_compatibility(struct super_block *sb, 870 struct nilfs_super_block *sbp) 871 { 872 __u64 features; 873 874 features = le64_to_cpu(sbp->s_feature_incompat) & 875 ~NILFS_FEATURE_INCOMPAT_SUPP; 876 if (features) { 877 nilfs_msg(sb, KERN_ERR, 878 "couldn't mount because of unsupported optional features (%llx)", 879 (unsigned long long)features); 880 return -EINVAL; 881 } 882 features = le64_to_cpu(sbp->s_feature_compat_ro) & 883 ~NILFS_FEATURE_COMPAT_RO_SUPP; 884 if (!sb_rdonly(sb) && features) { 885 nilfs_msg(sb, KERN_ERR, 886 "couldn't mount RDWR because of unsupported optional features (%llx)", 887 (unsigned long long)features); 888 return -EINVAL; 889 } 890 return 0; 891 } 892 893 static int nilfs_get_root_dentry(struct super_block *sb, 894 struct nilfs_root *root, 895 struct dentry **root_dentry) 896 { 897 struct inode *inode; 898 struct dentry *dentry; 899 int ret = 0; 900 901 inode = nilfs_iget(sb, root, NILFS_ROOT_INO); 902 if (IS_ERR(inode)) { 903 ret = PTR_ERR(inode); 904 nilfs_msg(sb, KERN_ERR, "error %d getting root inode", ret); 905 goto out; 906 } 907 if (!S_ISDIR(inode->i_mode) || !inode->i_blocks || !inode->i_size) { 908 iput(inode); 909 nilfs_msg(sb, KERN_ERR, "corrupt root inode"); 910 ret = -EINVAL; 911 goto out; 912 } 913 914 if (root->cno == NILFS_CPTREE_CURRENT_CNO) { 915 dentry = d_find_alias(inode); 916 if (!dentry) { 917 dentry = d_make_root(inode); 918 if (!dentry) { 919 ret = -ENOMEM; 920 goto failed_dentry; 921 } 922 } else { 923 iput(inode); 924 } 925 } else { 926 dentry = d_obtain_root(inode); 927 if (IS_ERR(dentry)) { 928 ret = PTR_ERR(dentry); 929 goto failed_dentry; 930 } 931 } 932 *root_dentry = dentry; 933 out: 934 return ret; 935 936 failed_dentry: 937 nilfs_msg(sb, KERN_ERR, "error %d getting root dentry", ret); 938 goto out; 939 } 940 941 static int nilfs_attach_snapshot(struct super_block *s, __u64 cno, 942 struct dentry **root_dentry) 943 { 944 struct the_nilfs *nilfs = s->s_fs_info; 945 struct nilfs_root *root; 946 int ret; 947 948 mutex_lock(&nilfs->ns_snapshot_mount_mutex); 949 950 down_read(&nilfs->ns_segctor_sem); 951 ret = nilfs_cpfile_is_snapshot(nilfs->ns_cpfile, cno); 952 up_read(&nilfs->ns_segctor_sem); 953 if (ret < 0) { 954 ret = (ret == -ENOENT) ? -EINVAL : ret; 955 goto out; 956 } else if (!ret) { 957 nilfs_msg(s, KERN_ERR, 958 "The specified checkpoint is not a snapshot (checkpoint number=%llu)", 959 (unsigned long long)cno); 960 ret = -EINVAL; 961 goto out; 962 } 963 964 ret = nilfs_attach_checkpoint(s, cno, false, &root); 965 if (ret) { 966 nilfs_msg(s, KERN_ERR, 967 "error %d while loading snapshot (checkpoint number=%llu)", 968 ret, (unsigned long long)cno); 969 goto out; 970 } 971 ret = nilfs_get_root_dentry(s, root, root_dentry); 972 nilfs_put_root(root); 973 out: 974 mutex_unlock(&nilfs->ns_snapshot_mount_mutex); 975 return ret; 976 } 977 978 /** 979 * nilfs_tree_is_busy() - try to shrink dentries of a checkpoint 980 * @root_dentry: root dentry of the tree to be shrunk 981 * 982 * This function returns true if the tree was in-use. 983 */ 984 static bool nilfs_tree_is_busy(struct dentry *root_dentry) 985 { 986 shrink_dcache_parent(root_dentry); 987 return d_count(root_dentry) > 1; 988 } 989 990 int nilfs_checkpoint_is_mounted(struct super_block *sb, __u64 cno) 991 { 992 struct the_nilfs *nilfs = sb->s_fs_info; 993 struct nilfs_root *root; 994 struct inode *inode; 995 struct dentry *dentry; 996 int ret; 997 998 if (cno > nilfs->ns_cno) 999 return false; 1000 1001 if (cno >= nilfs_last_cno(nilfs)) 1002 return true; /* protect recent checkpoints */ 1003 1004 ret = false; 1005 root = nilfs_lookup_root(nilfs, cno); 1006 if (root) { 1007 inode = nilfs_ilookup(sb, root, NILFS_ROOT_INO); 1008 if (inode) { 1009 dentry = d_find_alias(inode); 1010 if (dentry) { 1011 ret = nilfs_tree_is_busy(dentry); 1012 dput(dentry); 1013 } 1014 iput(inode); 1015 } 1016 nilfs_put_root(root); 1017 } 1018 return ret; 1019 } 1020 1021 /** 1022 * nilfs_fill_super() - initialize a super block instance 1023 * @sb: super_block 1024 * @data: mount options 1025 * @silent: silent mode flag 1026 * 1027 * This function is called exclusively by nilfs->ns_mount_mutex. 1028 * So, the recovery process is protected from other simultaneous mounts. 1029 */ 1030 static int 1031 nilfs_fill_super(struct super_block *sb, void *data, int silent) 1032 { 1033 struct the_nilfs *nilfs; 1034 struct nilfs_root *fsroot; 1035 __u64 cno; 1036 int err; 1037 1038 nilfs = alloc_nilfs(sb); 1039 if (!nilfs) 1040 return -ENOMEM; 1041 1042 sb->s_fs_info = nilfs; 1043 1044 err = init_nilfs(nilfs, sb, (char *)data); 1045 if (err) 1046 goto failed_nilfs; 1047 1048 sb->s_op = &nilfs_sops; 1049 sb->s_export_op = &nilfs_export_ops; 1050 sb->s_root = NULL; 1051 sb->s_time_gran = 1; 1052 sb->s_max_links = NILFS_LINK_MAX; 1053 1054 sb->s_bdi = bdi_get(sb->s_bdev->bd_bdi); 1055 1056 err = load_nilfs(nilfs, sb); 1057 if (err) 1058 goto failed_nilfs; 1059 1060 cno = nilfs_last_cno(nilfs); 1061 err = nilfs_attach_checkpoint(sb, cno, true, &fsroot); 1062 if (err) { 1063 nilfs_msg(sb, KERN_ERR, 1064 "error %d while loading last checkpoint (checkpoint number=%llu)", 1065 err, (unsigned long long)cno); 1066 goto failed_unload; 1067 } 1068 1069 if (!sb_rdonly(sb)) { 1070 err = nilfs_attach_log_writer(sb, fsroot); 1071 if (err) 1072 goto failed_checkpoint; 1073 } 1074 1075 err = nilfs_get_root_dentry(sb, fsroot, &sb->s_root); 1076 if (err) 1077 goto failed_segctor; 1078 1079 nilfs_put_root(fsroot); 1080 1081 if (!sb_rdonly(sb)) { 1082 down_write(&nilfs->ns_sem); 1083 nilfs_setup_super(sb, true); 1084 up_write(&nilfs->ns_sem); 1085 } 1086 1087 return 0; 1088 1089 failed_segctor: 1090 nilfs_detach_log_writer(sb); 1091 1092 failed_checkpoint: 1093 nilfs_put_root(fsroot); 1094 1095 failed_unload: 1096 iput(nilfs->ns_sufile); 1097 iput(nilfs->ns_cpfile); 1098 iput(nilfs->ns_dat); 1099 1100 failed_nilfs: 1101 destroy_nilfs(nilfs); 1102 return err; 1103 } 1104 1105 static int nilfs_remount(struct super_block *sb, int *flags, char *data) 1106 { 1107 struct the_nilfs *nilfs = sb->s_fs_info; 1108 unsigned long old_sb_flags; 1109 unsigned long old_mount_opt; 1110 int err; 1111 1112 sync_filesystem(sb); 1113 old_sb_flags = sb->s_flags; 1114 old_mount_opt = nilfs->ns_mount_opt; 1115 1116 if (!parse_options(data, sb, 1)) { 1117 err = -EINVAL; 1118 goto restore_opts; 1119 } 1120 sb->s_flags = (sb->s_flags & ~SB_POSIXACL); 1121 1122 err = -EINVAL; 1123 1124 if (!nilfs_valid_fs(nilfs)) { 1125 nilfs_msg(sb, KERN_WARNING, 1126 "couldn't remount because the filesystem is in an incomplete recovery state"); 1127 goto restore_opts; 1128 } 1129 1130 if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb)) 1131 goto out; 1132 if (*flags & SB_RDONLY) { 1133 /* Shutting down log writer */ 1134 nilfs_detach_log_writer(sb); 1135 sb->s_flags |= SB_RDONLY; 1136 1137 /* 1138 * Remounting a valid RW partition RDONLY, so set 1139 * the RDONLY flag and then mark the partition as valid again. 1140 */ 1141 down_write(&nilfs->ns_sem); 1142 nilfs_cleanup_super(sb); 1143 up_write(&nilfs->ns_sem); 1144 } else { 1145 __u64 features; 1146 struct nilfs_root *root; 1147 1148 /* 1149 * Mounting a RDONLY partition read-write, so reread and 1150 * store the current valid flag. (It may have been changed 1151 * by fsck since we originally mounted the partition.) 1152 */ 1153 down_read(&nilfs->ns_sem); 1154 features = le64_to_cpu(nilfs->ns_sbp[0]->s_feature_compat_ro) & 1155 ~NILFS_FEATURE_COMPAT_RO_SUPP; 1156 up_read(&nilfs->ns_sem); 1157 if (features) { 1158 nilfs_msg(sb, KERN_WARNING, 1159 "couldn't remount RDWR because of unsupported optional features (%llx)", 1160 (unsigned long long)features); 1161 err = -EROFS; 1162 goto restore_opts; 1163 } 1164 1165 sb->s_flags &= ~SB_RDONLY; 1166 1167 root = NILFS_I(d_inode(sb->s_root))->i_root; 1168 err = nilfs_attach_log_writer(sb, root); 1169 if (err) 1170 goto restore_opts; 1171 1172 down_write(&nilfs->ns_sem); 1173 nilfs_setup_super(sb, true); 1174 up_write(&nilfs->ns_sem); 1175 } 1176 out: 1177 return 0; 1178 1179 restore_opts: 1180 sb->s_flags = old_sb_flags; 1181 nilfs->ns_mount_opt = old_mount_opt; 1182 return err; 1183 } 1184 1185 struct nilfs_super_data { 1186 struct block_device *bdev; 1187 __u64 cno; 1188 int flags; 1189 }; 1190 1191 static int nilfs_parse_snapshot_option(const char *option, 1192 const substring_t *arg, 1193 struct nilfs_super_data *sd) 1194 { 1195 unsigned long long val; 1196 const char *msg = NULL; 1197 int err; 1198 1199 if (!(sd->flags & SB_RDONLY)) { 1200 msg = "read-only option is not specified"; 1201 goto parse_error; 1202 } 1203 1204 err = kstrtoull(arg->from, 0, &val); 1205 if (err) { 1206 if (err == -ERANGE) 1207 msg = "too large checkpoint number"; 1208 else 1209 msg = "malformed argument"; 1210 goto parse_error; 1211 } else if (val == 0) { 1212 msg = "invalid checkpoint number 0"; 1213 goto parse_error; 1214 } 1215 sd->cno = val; 1216 return 0; 1217 1218 parse_error: 1219 nilfs_msg(NULL, KERN_ERR, "invalid option \"%s\": %s", option, msg); 1220 return 1; 1221 } 1222 1223 /** 1224 * nilfs_identify - pre-read mount options needed to identify mount instance 1225 * @data: mount options 1226 * @sd: nilfs_super_data 1227 */ 1228 static int nilfs_identify(char *data, struct nilfs_super_data *sd) 1229 { 1230 char *p, *options = data; 1231 substring_t args[MAX_OPT_ARGS]; 1232 int token; 1233 int ret = 0; 1234 1235 do { 1236 p = strsep(&options, ","); 1237 if (p != NULL && *p) { 1238 token = match_token(p, tokens, args); 1239 if (token == Opt_snapshot) 1240 ret = nilfs_parse_snapshot_option(p, &args[0], 1241 sd); 1242 } 1243 if (!options) 1244 break; 1245 BUG_ON(options == data); 1246 *(options - 1) = ','; 1247 } while (!ret); 1248 return ret; 1249 } 1250 1251 static int nilfs_set_bdev_super(struct super_block *s, void *data) 1252 { 1253 s->s_bdev = data; 1254 s->s_dev = s->s_bdev->bd_dev; 1255 return 0; 1256 } 1257 1258 static int nilfs_test_bdev_super(struct super_block *s, void *data) 1259 { 1260 return (void *)s->s_bdev == data; 1261 } 1262 1263 static struct dentry * 1264 nilfs_mount(struct file_system_type *fs_type, int flags, 1265 const char *dev_name, void *data) 1266 { 1267 struct nilfs_super_data sd; 1268 struct super_block *s; 1269 fmode_t mode = FMODE_READ | FMODE_EXCL; 1270 struct dentry *root_dentry; 1271 int err, s_new = false; 1272 1273 if (!(flags & SB_RDONLY)) 1274 mode |= FMODE_WRITE; 1275 1276 sd.bdev = blkdev_get_by_path(dev_name, mode, fs_type); 1277 if (IS_ERR(sd.bdev)) 1278 return ERR_CAST(sd.bdev); 1279 1280 sd.cno = 0; 1281 sd.flags = flags; 1282 if (nilfs_identify((char *)data, &sd)) { 1283 err = -EINVAL; 1284 goto failed; 1285 } 1286 1287 /* 1288 * once the super is inserted into the list by sget, s_umount 1289 * will protect the lockfs code from trying to start a snapshot 1290 * while we are mounting 1291 */ 1292 mutex_lock(&sd.bdev->bd_fsfreeze_mutex); 1293 if (sd.bdev->bd_fsfreeze_count > 0) { 1294 mutex_unlock(&sd.bdev->bd_fsfreeze_mutex); 1295 err = -EBUSY; 1296 goto failed; 1297 } 1298 s = sget(fs_type, nilfs_test_bdev_super, nilfs_set_bdev_super, flags, 1299 sd.bdev); 1300 mutex_unlock(&sd.bdev->bd_fsfreeze_mutex); 1301 if (IS_ERR(s)) { 1302 err = PTR_ERR(s); 1303 goto failed; 1304 } 1305 1306 if (!s->s_root) { 1307 s_new = true; 1308 1309 /* New superblock instance created */ 1310 s->s_mode = mode; 1311 snprintf(s->s_id, sizeof(s->s_id), "%pg", sd.bdev); 1312 sb_set_blocksize(s, block_size(sd.bdev)); 1313 1314 err = nilfs_fill_super(s, data, flags & SB_SILENT ? 1 : 0); 1315 if (err) 1316 goto failed_super; 1317 1318 s->s_flags |= SB_ACTIVE; 1319 } else if (!sd.cno) { 1320 if (nilfs_tree_is_busy(s->s_root)) { 1321 if ((flags ^ s->s_flags) & SB_RDONLY) { 1322 nilfs_msg(s, KERN_ERR, 1323 "the device already has a %s mount.", 1324 sb_rdonly(s) ? "read-only" : "read/write"); 1325 err = -EBUSY; 1326 goto failed_super; 1327 } 1328 } else { 1329 /* 1330 * Try remount to setup mount states if the current 1331 * tree is not mounted and only snapshots use this sb. 1332 */ 1333 err = nilfs_remount(s, &flags, data); 1334 if (err) 1335 goto failed_super; 1336 } 1337 } 1338 1339 if (sd.cno) { 1340 err = nilfs_attach_snapshot(s, sd.cno, &root_dentry); 1341 if (err) 1342 goto failed_super; 1343 } else { 1344 root_dentry = dget(s->s_root); 1345 } 1346 1347 if (!s_new) 1348 blkdev_put(sd.bdev, mode); 1349 1350 return root_dentry; 1351 1352 failed_super: 1353 deactivate_locked_super(s); 1354 1355 failed: 1356 if (!s_new) 1357 blkdev_put(sd.bdev, mode); 1358 return ERR_PTR(err); 1359 } 1360 1361 struct file_system_type nilfs_fs_type = { 1362 .owner = THIS_MODULE, 1363 .name = "nilfs2", 1364 .mount = nilfs_mount, 1365 .kill_sb = kill_block_super, 1366 .fs_flags = FS_REQUIRES_DEV, 1367 }; 1368 MODULE_ALIAS_FS("nilfs2"); 1369 1370 static void nilfs_inode_init_once(void *obj) 1371 { 1372 struct nilfs_inode_info *ii = obj; 1373 1374 INIT_LIST_HEAD(&ii->i_dirty); 1375 #ifdef CONFIG_NILFS_XATTR 1376 init_rwsem(&ii->xattr_sem); 1377 #endif 1378 address_space_init_once(&ii->i_btnode_cache); 1379 ii->i_bmap = &ii->i_bmap_data; 1380 inode_init_once(&ii->vfs_inode); 1381 } 1382 1383 static void nilfs_segbuf_init_once(void *obj) 1384 { 1385 memset(obj, 0, sizeof(struct nilfs_segment_buffer)); 1386 } 1387 1388 static void nilfs_destroy_cachep(void) 1389 { 1390 /* 1391 * Make sure all delayed rcu free inodes are flushed before we 1392 * destroy cache. 1393 */ 1394 rcu_barrier(); 1395 1396 kmem_cache_destroy(nilfs_inode_cachep); 1397 kmem_cache_destroy(nilfs_transaction_cachep); 1398 kmem_cache_destroy(nilfs_segbuf_cachep); 1399 kmem_cache_destroy(nilfs_btree_path_cache); 1400 } 1401 1402 static int __init nilfs_init_cachep(void) 1403 { 1404 nilfs_inode_cachep = kmem_cache_create("nilfs2_inode_cache", 1405 sizeof(struct nilfs_inode_info), 0, 1406 SLAB_RECLAIM_ACCOUNT|SLAB_ACCOUNT, 1407 nilfs_inode_init_once); 1408 if (!nilfs_inode_cachep) 1409 goto fail; 1410 1411 nilfs_transaction_cachep = kmem_cache_create("nilfs2_transaction_cache", 1412 sizeof(struct nilfs_transaction_info), 0, 1413 SLAB_RECLAIM_ACCOUNT, NULL); 1414 if (!nilfs_transaction_cachep) 1415 goto fail; 1416 1417 nilfs_segbuf_cachep = kmem_cache_create("nilfs2_segbuf_cache", 1418 sizeof(struct nilfs_segment_buffer), 0, 1419 SLAB_RECLAIM_ACCOUNT, nilfs_segbuf_init_once); 1420 if (!nilfs_segbuf_cachep) 1421 goto fail; 1422 1423 nilfs_btree_path_cache = kmem_cache_create("nilfs2_btree_path_cache", 1424 sizeof(struct nilfs_btree_path) * NILFS_BTREE_LEVEL_MAX, 1425 0, 0, NULL); 1426 if (!nilfs_btree_path_cache) 1427 goto fail; 1428 1429 return 0; 1430 1431 fail: 1432 nilfs_destroy_cachep(); 1433 return -ENOMEM; 1434 } 1435 1436 static int __init init_nilfs_fs(void) 1437 { 1438 int err; 1439 1440 err = nilfs_init_cachep(); 1441 if (err) 1442 goto fail; 1443 1444 err = nilfs_sysfs_init(); 1445 if (err) 1446 goto free_cachep; 1447 1448 err = register_filesystem(&nilfs_fs_type); 1449 if (err) 1450 goto deinit_sysfs_entry; 1451 1452 printk(KERN_INFO "NILFS version 2 loaded\n"); 1453 return 0; 1454 1455 deinit_sysfs_entry: 1456 nilfs_sysfs_exit(); 1457 free_cachep: 1458 nilfs_destroy_cachep(); 1459 fail: 1460 return err; 1461 } 1462 1463 static void __exit exit_nilfs_fs(void) 1464 { 1465 nilfs_destroy_cachep(); 1466 nilfs_sysfs_exit(); 1467 unregister_filesystem(&nilfs_fs_type); 1468 } 1469 1470 module_init(init_nilfs_fs) 1471 module_exit(exit_nilfs_fs) 1472