1 /* 2 * the_nilfs.c - the_nilfs shared structure. 3 * 4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 19 * 20 * Written by Ryusuke Konishi <ryusuke@osrg.net> 21 * 22 */ 23 24 #include <linux/buffer_head.h> 25 #include <linux/slab.h> 26 #include <linux/blkdev.h> 27 #include <linux/backing-dev.h> 28 #include <linux/crc32.h> 29 #include "nilfs.h" 30 #include "segment.h" 31 #include "alloc.h" 32 #include "cpfile.h" 33 #include "sufile.h" 34 #include "dat.h" 35 #include "segbuf.h" 36 37 38 static LIST_HEAD(nilfs_objects); 39 static DEFINE_SPINLOCK(nilfs_lock); 40 41 void nilfs_set_last_segment(struct the_nilfs *nilfs, 42 sector_t start_blocknr, u64 seq, __u64 cno) 43 { 44 spin_lock(&nilfs->ns_last_segment_lock); 45 nilfs->ns_last_pseg = start_blocknr; 46 nilfs->ns_last_seq = seq; 47 nilfs->ns_last_cno = cno; 48 spin_unlock(&nilfs->ns_last_segment_lock); 49 } 50 51 /** 52 * alloc_nilfs - allocate the_nilfs structure 53 * @bdev: block device to which the_nilfs is related 54 * 55 * alloc_nilfs() allocates memory for the_nilfs and 56 * initializes its reference count and locks. 57 * 58 * Return Value: On success, pointer to the_nilfs is returned. 59 * On error, NULL is returned. 60 */ 61 static struct the_nilfs *alloc_nilfs(struct block_device *bdev) 62 { 63 struct the_nilfs *nilfs; 64 65 nilfs = kzalloc(sizeof(*nilfs), GFP_KERNEL); 66 if (!nilfs) 67 return NULL; 68 69 nilfs->ns_bdev = bdev; 70 atomic_set(&nilfs->ns_count, 1); 71 atomic_set(&nilfs->ns_ndirtyblks, 0); 72 init_rwsem(&nilfs->ns_sem); 73 init_rwsem(&nilfs->ns_super_sem); 74 mutex_init(&nilfs->ns_mount_mutex); 75 init_rwsem(&nilfs->ns_writer_sem); 76 INIT_LIST_HEAD(&nilfs->ns_list); 77 INIT_LIST_HEAD(&nilfs->ns_supers); 78 spin_lock_init(&nilfs->ns_last_segment_lock); 79 nilfs->ns_gc_inodes_h = NULL; 80 init_rwsem(&nilfs->ns_segctor_sem); 81 82 return nilfs; 83 } 84 85 /** 86 * find_or_create_nilfs - find or create nilfs object 87 * @bdev: block device to which the_nilfs is related 88 * 89 * find_nilfs() looks up an existent nilfs object created on the 90 * device and gets the reference count of the object. If no nilfs object 91 * is found on the device, a new nilfs object is allocated. 92 * 93 * Return Value: On success, pointer to the nilfs object is returned. 94 * On error, NULL is returned. 95 */ 96 struct the_nilfs *find_or_create_nilfs(struct block_device *bdev) 97 { 98 struct the_nilfs *nilfs, *new = NULL; 99 100 retry: 101 spin_lock(&nilfs_lock); 102 list_for_each_entry(nilfs, &nilfs_objects, ns_list) { 103 if (nilfs->ns_bdev == bdev) { 104 get_nilfs(nilfs); 105 spin_unlock(&nilfs_lock); 106 if (new) 107 put_nilfs(new); 108 return nilfs; /* existing object */ 109 } 110 } 111 if (new) { 112 list_add_tail(&new->ns_list, &nilfs_objects); 113 spin_unlock(&nilfs_lock); 114 return new; /* new object */ 115 } 116 spin_unlock(&nilfs_lock); 117 118 new = alloc_nilfs(bdev); 119 if (new) 120 goto retry; 121 return NULL; /* insufficient memory */ 122 } 123 124 /** 125 * put_nilfs - release a reference to the_nilfs 126 * @nilfs: the_nilfs structure to be released 127 * 128 * put_nilfs() decrements a reference counter of the_nilfs. 129 * If the reference count reaches zero, the_nilfs is freed. 130 */ 131 void put_nilfs(struct the_nilfs *nilfs) 132 { 133 spin_lock(&nilfs_lock); 134 if (!atomic_dec_and_test(&nilfs->ns_count)) { 135 spin_unlock(&nilfs_lock); 136 return; 137 } 138 list_del_init(&nilfs->ns_list); 139 spin_unlock(&nilfs_lock); 140 141 /* 142 * Increment of ns_count never occurs below because the caller 143 * of get_nilfs() holds at least one reference to the_nilfs. 144 * Thus its exclusion control is not required here. 145 */ 146 147 might_sleep(); 148 if (nilfs_loaded(nilfs)) { 149 nilfs_mdt_clear(nilfs->ns_sufile); 150 nilfs_mdt_destroy(nilfs->ns_sufile); 151 nilfs_mdt_clear(nilfs->ns_cpfile); 152 nilfs_mdt_destroy(nilfs->ns_cpfile); 153 nilfs_mdt_clear(nilfs->ns_dat); 154 nilfs_mdt_destroy(nilfs->ns_dat); 155 /* XXX: how and when to clear nilfs->ns_gc_dat? */ 156 nilfs_mdt_destroy(nilfs->ns_gc_dat); 157 } 158 if (nilfs_init(nilfs)) { 159 nilfs_destroy_gccache(nilfs); 160 brelse(nilfs->ns_sbh[0]); 161 brelse(nilfs->ns_sbh[1]); 162 } 163 kfree(nilfs); 164 } 165 166 static int nilfs_load_super_root(struct the_nilfs *nilfs, 167 struct nilfs_sb_info *sbi, sector_t sr_block) 168 { 169 static struct lock_class_key dat_lock_key; 170 struct buffer_head *bh_sr; 171 struct nilfs_super_root *raw_sr; 172 struct nilfs_super_block **sbp = nilfs->ns_sbp; 173 unsigned dat_entry_size, segment_usage_size, checkpoint_size; 174 unsigned inode_size; 175 int err; 176 177 err = nilfs_read_super_root_block(sbi->s_super, sr_block, &bh_sr, 1); 178 if (unlikely(err)) 179 return err; 180 181 down_read(&nilfs->ns_sem); 182 dat_entry_size = le16_to_cpu(sbp[0]->s_dat_entry_size); 183 checkpoint_size = le16_to_cpu(sbp[0]->s_checkpoint_size); 184 segment_usage_size = le16_to_cpu(sbp[0]->s_segment_usage_size); 185 up_read(&nilfs->ns_sem); 186 187 inode_size = nilfs->ns_inode_size; 188 189 err = -ENOMEM; 190 nilfs->ns_dat = nilfs_mdt_new(nilfs, NULL, NILFS_DAT_INO); 191 if (unlikely(!nilfs->ns_dat)) 192 goto failed; 193 194 nilfs->ns_gc_dat = nilfs_mdt_new(nilfs, NULL, NILFS_DAT_INO); 195 if (unlikely(!nilfs->ns_gc_dat)) 196 goto failed_dat; 197 198 nilfs->ns_cpfile = nilfs_mdt_new(nilfs, NULL, NILFS_CPFILE_INO); 199 if (unlikely(!nilfs->ns_cpfile)) 200 goto failed_gc_dat; 201 202 nilfs->ns_sufile = nilfs_mdt_new(nilfs, NULL, NILFS_SUFILE_INO); 203 if (unlikely(!nilfs->ns_sufile)) 204 goto failed_cpfile; 205 206 err = nilfs_palloc_init_blockgroup(nilfs->ns_dat, dat_entry_size); 207 if (unlikely(err)) 208 goto failed_sufile; 209 210 err = nilfs_palloc_init_blockgroup(nilfs->ns_gc_dat, dat_entry_size); 211 if (unlikely(err)) 212 goto failed_sufile; 213 214 lockdep_set_class(&NILFS_MDT(nilfs->ns_dat)->mi_sem, &dat_lock_key); 215 lockdep_set_class(&NILFS_MDT(nilfs->ns_gc_dat)->mi_sem, &dat_lock_key); 216 217 nilfs_mdt_set_shadow(nilfs->ns_dat, nilfs->ns_gc_dat); 218 nilfs_mdt_set_entry_size(nilfs->ns_cpfile, checkpoint_size, 219 sizeof(struct nilfs_cpfile_header)); 220 nilfs_mdt_set_entry_size(nilfs->ns_sufile, segment_usage_size, 221 sizeof(struct nilfs_sufile_header)); 222 223 err = nilfs_mdt_read_inode_direct( 224 nilfs->ns_dat, bh_sr, NILFS_SR_DAT_OFFSET(inode_size)); 225 if (unlikely(err)) 226 goto failed_sufile; 227 228 err = nilfs_mdt_read_inode_direct( 229 nilfs->ns_cpfile, bh_sr, NILFS_SR_CPFILE_OFFSET(inode_size)); 230 if (unlikely(err)) 231 goto failed_sufile; 232 233 err = nilfs_mdt_read_inode_direct( 234 nilfs->ns_sufile, bh_sr, NILFS_SR_SUFILE_OFFSET(inode_size)); 235 if (unlikely(err)) 236 goto failed_sufile; 237 238 raw_sr = (struct nilfs_super_root *)bh_sr->b_data; 239 nilfs->ns_nongc_ctime = le64_to_cpu(raw_sr->sr_nongc_ctime); 240 241 failed: 242 brelse(bh_sr); 243 return err; 244 245 failed_sufile: 246 nilfs_mdt_destroy(nilfs->ns_sufile); 247 248 failed_cpfile: 249 nilfs_mdt_destroy(nilfs->ns_cpfile); 250 251 failed_gc_dat: 252 nilfs_mdt_destroy(nilfs->ns_gc_dat); 253 254 failed_dat: 255 nilfs_mdt_destroy(nilfs->ns_dat); 256 goto failed; 257 } 258 259 static void nilfs_init_recovery_info(struct nilfs_recovery_info *ri) 260 { 261 memset(ri, 0, sizeof(*ri)); 262 INIT_LIST_HEAD(&ri->ri_used_segments); 263 } 264 265 static void nilfs_clear_recovery_info(struct nilfs_recovery_info *ri) 266 { 267 nilfs_dispose_segment_list(&ri->ri_used_segments); 268 } 269 270 /** 271 * load_nilfs - load and recover the nilfs 272 * @nilfs: the_nilfs structure to be released 273 * @sbi: nilfs_sb_info used to recover past segment 274 * 275 * load_nilfs() searches and load the latest super root, 276 * attaches the last segment, and does recovery if needed. 277 * The caller must call this exclusively for simultaneous mounts. 278 */ 279 int load_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi) 280 { 281 struct nilfs_recovery_info ri; 282 unsigned int s_flags = sbi->s_super->s_flags; 283 int really_read_only = bdev_read_only(nilfs->ns_bdev); 284 unsigned valid_fs; 285 int err = 0; 286 287 nilfs_init_recovery_info(&ri); 288 289 down_write(&nilfs->ns_sem); 290 valid_fs = (nilfs->ns_mount_state & NILFS_VALID_FS); 291 up_write(&nilfs->ns_sem); 292 293 if (!valid_fs && (s_flags & MS_RDONLY)) { 294 printk(KERN_INFO "NILFS: INFO: recovery " 295 "required for readonly filesystem.\n"); 296 if (really_read_only) { 297 printk(KERN_ERR "NILFS: write access " 298 "unavailable, cannot proceed.\n"); 299 err = -EROFS; 300 goto failed; 301 } 302 printk(KERN_INFO "NILFS: write access will " 303 "be enabled during recovery.\n"); 304 sbi->s_super->s_flags &= ~MS_RDONLY; 305 } 306 307 err = nilfs_search_super_root(nilfs, sbi, &ri); 308 if (unlikely(err)) { 309 printk(KERN_ERR "NILFS: error searching super root.\n"); 310 goto failed; 311 } 312 313 err = nilfs_load_super_root(nilfs, sbi, ri.ri_super_root); 314 if (unlikely(err)) { 315 printk(KERN_ERR "NILFS: error loading super root.\n"); 316 goto failed; 317 } 318 319 if (!valid_fs) { 320 err = nilfs_recover_logical_segments(nilfs, sbi, &ri); 321 if (unlikely(err)) { 322 nilfs_mdt_destroy(nilfs->ns_cpfile); 323 nilfs_mdt_destroy(nilfs->ns_sufile); 324 nilfs_mdt_destroy(nilfs->ns_dat); 325 goto failed; 326 } 327 if (ri.ri_need_recovery == NILFS_RECOVERY_SR_UPDATED) 328 sbi->s_super->s_dirt = 1; 329 } 330 331 set_nilfs_loaded(nilfs); 332 333 failed: 334 nilfs_clear_recovery_info(&ri); 335 sbi->s_super->s_flags = s_flags; 336 return err; 337 } 338 339 static unsigned long long nilfs_max_size(unsigned int blkbits) 340 { 341 unsigned int max_bits; 342 unsigned long long res = MAX_LFS_FILESIZE; /* page cache limit */ 343 344 max_bits = blkbits + NILFS_BMAP_KEY_BIT; /* bmap size limit */ 345 if (max_bits < 64) 346 res = min_t(unsigned long long, res, (1ULL << max_bits) - 1); 347 return res; 348 } 349 350 static int nilfs_store_disk_layout(struct the_nilfs *nilfs, 351 struct nilfs_super_block *sbp) 352 { 353 if (le32_to_cpu(sbp->s_rev_level) != NILFS_CURRENT_REV) { 354 printk(KERN_ERR "NILFS: revision mismatch " 355 "(superblock rev.=%d.%d, current rev.=%d.%d). " 356 "Please check the version of mkfs.nilfs.\n", 357 le32_to_cpu(sbp->s_rev_level), 358 le16_to_cpu(sbp->s_minor_rev_level), 359 NILFS_CURRENT_REV, NILFS_MINOR_REV); 360 return -EINVAL; 361 } 362 nilfs->ns_sbsize = le16_to_cpu(sbp->s_bytes); 363 if (nilfs->ns_sbsize > BLOCK_SIZE) 364 return -EINVAL; 365 366 nilfs->ns_inode_size = le16_to_cpu(sbp->s_inode_size); 367 nilfs->ns_first_ino = le32_to_cpu(sbp->s_first_ino); 368 369 nilfs->ns_blocks_per_segment = le32_to_cpu(sbp->s_blocks_per_segment); 370 if (nilfs->ns_blocks_per_segment < NILFS_SEG_MIN_BLOCKS) { 371 printk(KERN_ERR "NILFS: too short segment. \n"); 372 return -EINVAL; 373 } 374 375 nilfs->ns_first_data_block = le64_to_cpu(sbp->s_first_data_block); 376 nilfs->ns_nsegments = le64_to_cpu(sbp->s_nsegments); 377 nilfs->ns_r_segments_percentage = 378 le32_to_cpu(sbp->s_r_segments_percentage); 379 nilfs->ns_nrsvsegs = 380 max_t(unsigned long, NILFS_MIN_NRSVSEGS, 381 DIV_ROUND_UP(nilfs->ns_nsegments * 382 nilfs->ns_r_segments_percentage, 100)); 383 nilfs->ns_crc_seed = le32_to_cpu(sbp->s_crc_seed); 384 return 0; 385 } 386 387 static int nilfs_valid_sb(struct nilfs_super_block *sbp) 388 { 389 static unsigned char sum[4]; 390 const int sumoff = offsetof(struct nilfs_super_block, s_sum); 391 size_t bytes; 392 u32 crc; 393 394 if (!sbp || le16_to_cpu(sbp->s_magic) != NILFS_SUPER_MAGIC) 395 return 0; 396 bytes = le16_to_cpu(sbp->s_bytes); 397 if (bytes > BLOCK_SIZE) 398 return 0; 399 crc = crc32_le(le32_to_cpu(sbp->s_crc_seed), (unsigned char *)sbp, 400 sumoff); 401 crc = crc32_le(crc, sum, 4); 402 crc = crc32_le(crc, (unsigned char *)sbp + sumoff + 4, 403 bytes - sumoff - 4); 404 return crc == le32_to_cpu(sbp->s_sum); 405 } 406 407 static int nilfs_sb2_bad_offset(struct nilfs_super_block *sbp, u64 offset) 408 { 409 return offset < ((le64_to_cpu(sbp->s_nsegments) * 410 le32_to_cpu(sbp->s_blocks_per_segment)) << 411 (le32_to_cpu(sbp->s_log_block_size) + 10)); 412 } 413 414 static void nilfs_release_super_block(struct the_nilfs *nilfs) 415 { 416 int i; 417 418 for (i = 0; i < 2; i++) { 419 if (nilfs->ns_sbp[i]) { 420 brelse(nilfs->ns_sbh[i]); 421 nilfs->ns_sbh[i] = NULL; 422 nilfs->ns_sbp[i] = NULL; 423 } 424 } 425 } 426 427 void nilfs_fall_back_super_block(struct the_nilfs *nilfs) 428 { 429 brelse(nilfs->ns_sbh[0]); 430 nilfs->ns_sbh[0] = nilfs->ns_sbh[1]; 431 nilfs->ns_sbp[0] = nilfs->ns_sbp[1]; 432 nilfs->ns_sbh[1] = NULL; 433 nilfs->ns_sbp[1] = NULL; 434 } 435 436 void nilfs_swap_super_block(struct the_nilfs *nilfs) 437 { 438 struct buffer_head *tsbh = nilfs->ns_sbh[0]; 439 struct nilfs_super_block *tsbp = nilfs->ns_sbp[0]; 440 441 nilfs->ns_sbh[0] = nilfs->ns_sbh[1]; 442 nilfs->ns_sbp[0] = nilfs->ns_sbp[1]; 443 nilfs->ns_sbh[1] = tsbh; 444 nilfs->ns_sbp[1] = tsbp; 445 } 446 447 static int nilfs_load_super_block(struct the_nilfs *nilfs, 448 struct super_block *sb, int blocksize, 449 struct nilfs_super_block **sbpp) 450 { 451 struct nilfs_super_block **sbp = nilfs->ns_sbp; 452 struct buffer_head **sbh = nilfs->ns_sbh; 453 u64 sb2off = NILFS_SB2_OFFSET_BYTES(nilfs->ns_bdev->bd_inode->i_size); 454 int valid[2], swp = 0; 455 456 sbp[0] = nilfs_read_super_block(sb, NILFS_SB_OFFSET_BYTES, blocksize, 457 &sbh[0]); 458 sbp[1] = nilfs_read_super_block(sb, sb2off, blocksize, &sbh[1]); 459 460 if (!sbp[0]) { 461 if (!sbp[1]) { 462 printk(KERN_ERR "NILFS: unable to read superblock\n"); 463 return -EIO; 464 } 465 printk(KERN_WARNING 466 "NILFS warning: unable to read primary superblock\n"); 467 } else if (!sbp[1]) 468 printk(KERN_WARNING 469 "NILFS warning: unable to read secondary superblock\n"); 470 471 valid[0] = nilfs_valid_sb(sbp[0]); 472 valid[1] = nilfs_valid_sb(sbp[1]); 473 swp = valid[1] && 474 (!valid[0] || 475 le64_to_cpu(sbp[1]->s_wtime) > le64_to_cpu(sbp[0]->s_wtime)); 476 477 if (valid[swp] && nilfs_sb2_bad_offset(sbp[swp], sb2off)) { 478 brelse(sbh[1]); 479 sbh[1] = NULL; 480 sbp[1] = NULL; 481 swp = 0; 482 } 483 if (!valid[swp]) { 484 nilfs_release_super_block(nilfs); 485 printk(KERN_ERR "NILFS: Can't find nilfs on dev %s.\n", 486 sb->s_id); 487 return -EINVAL; 488 } 489 490 if (swp) { 491 printk(KERN_WARNING "NILFS warning: broken superblock. " 492 "using spare superblock.\n"); 493 nilfs_swap_super_block(nilfs); 494 } 495 496 nilfs->ns_sbwtime[0] = le64_to_cpu(sbp[0]->s_wtime); 497 nilfs->ns_sbwtime[1] = valid[!swp] ? le64_to_cpu(sbp[1]->s_wtime) : 0; 498 nilfs->ns_prot_seq = le64_to_cpu(sbp[valid[1] & !swp]->s_last_seq); 499 *sbpp = sbp[0]; 500 return 0; 501 } 502 503 /** 504 * init_nilfs - initialize a NILFS instance. 505 * @nilfs: the_nilfs structure 506 * @sbi: nilfs_sb_info 507 * @sb: super block 508 * @data: mount options 509 * 510 * init_nilfs() performs common initialization per block device (e.g. 511 * reading the super block, getting disk layout information, initializing 512 * shared fields in the_nilfs). It takes on some portion of the jobs 513 * typically done by a fill_super() routine. This division arises from 514 * the nature that multiple NILFS instances may be simultaneously 515 * mounted on a device. 516 * For multiple mounts on the same device, only the first mount 517 * invokes these tasks. 518 * 519 * Return Value: On success, 0 is returned. On error, a negative error 520 * code is returned. 521 */ 522 int init_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi, char *data) 523 { 524 struct super_block *sb = sbi->s_super; 525 struct nilfs_super_block *sbp; 526 struct backing_dev_info *bdi; 527 int blocksize; 528 int err; 529 530 down_write(&nilfs->ns_sem); 531 if (nilfs_init(nilfs)) { 532 /* Load values from existing the_nilfs */ 533 sbp = nilfs->ns_sbp[0]; 534 err = nilfs_store_magic_and_option(sb, sbp, data); 535 if (err) 536 goto out; 537 538 blocksize = BLOCK_SIZE << le32_to_cpu(sbp->s_log_block_size); 539 if (sb->s_blocksize != blocksize && 540 !sb_set_blocksize(sb, blocksize)) { 541 printk(KERN_ERR "NILFS: blocksize %d unfit to device\n", 542 blocksize); 543 err = -EINVAL; 544 } 545 sb->s_maxbytes = nilfs_max_size(sb->s_blocksize_bits); 546 goto out; 547 } 548 549 blocksize = sb_min_blocksize(sb, BLOCK_SIZE); 550 if (!blocksize) { 551 printk(KERN_ERR "NILFS: unable to set blocksize\n"); 552 err = -EINVAL; 553 goto out; 554 } 555 err = nilfs_load_super_block(nilfs, sb, blocksize, &sbp); 556 if (err) 557 goto out; 558 559 err = nilfs_store_magic_and_option(sb, sbp, data); 560 if (err) 561 goto failed_sbh; 562 563 blocksize = BLOCK_SIZE << le32_to_cpu(sbp->s_log_block_size); 564 if (sb->s_blocksize != blocksize) { 565 int hw_blocksize = bdev_logical_block_size(sb->s_bdev); 566 567 if (blocksize < hw_blocksize) { 568 printk(KERN_ERR 569 "NILFS: blocksize %d too small for device " 570 "(sector-size = %d).\n", 571 blocksize, hw_blocksize); 572 err = -EINVAL; 573 goto failed_sbh; 574 } 575 nilfs_release_super_block(nilfs); 576 sb_set_blocksize(sb, blocksize); 577 578 err = nilfs_load_super_block(nilfs, sb, blocksize, &sbp); 579 if (err) 580 goto out; 581 /* not failed_sbh; sbh is released automatically 582 when reloading fails. */ 583 } 584 nilfs->ns_blocksize_bits = sb->s_blocksize_bits; 585 586 err = nilfs_store_disk_layout(nilfs, sbp); 587 if (err) 588 goto failed_sbh; 589 590 sb->s_maxbytes = nilfs_max_size(sb->s_blocksize_bits); 591 592 nilfs->ns_mount_state = le16_to_cpu(sbp->s_state); 593 594 bdi = nilfs->ns_bdev->bd_inode->i_mapping->backing_dev_info; 595 nilfs->ns_bdi = bdi ? : &default_backing_dev_info; 596 597 /* Finding last segment */ 598 nilfs->ns_last_pseg = le64_to_cpu(sbp->s_last_pseg); 599 nilfs->ns_last_cno = le64_to_cpu(sbp->s_last_cno); 600 nilfs->ns_last_seq = le64_to_cpu(sbp->s_last_seq); 601 602 nilfs->ns_seg_seq = nilfs->ns_last_seq; 603 nilfs->ns_segnum = 604 nilfs_get_segnum_of_block(nilfs, nilfs->ns_last_pseg); 605 nilfs->ns_cno = nilfs->ns_last_cno + 1; 606 if (nilfs->ns_segnum >= nilfs->ns_nsegments) { 607 printk(KERN_ERR "NILFS invalid last segment number.\n"); 608 err = -EINVAL; 609 goto failed_sbh; 610 } 611 /* Dummy values */ 612 nilfs->ns_free_segments_count = 613 nilfs->ns_nsegments - (nilfs->ns_segnum + 1); 614 615 /* Initialize gcinode cache */ 616 err = nilfs_init_gccache(nilfs); 617 if (err) 618 goto failed_sbh; 619 620 set_nilfs_init(nilfs); 621 err = 0; 622 out: 623 up_write(&nilfs->ns_sem); 624 return err; 625 626 failed_sbh: 627 nilfs_release_super_block(nilfs); 628 goto out; 629 } 630 631 int nilfs_count_free_blocks(struct the_nilfs *nilfs, sector_t *nblocks) 632 { 633 struct inode *dat = nilfs_dat_inode(nilfs); 634 unsigned long ncleansegs; 635 int err; 636 637 down_read(&NILFS_MDT(dat)->mi_sem); /* XXX */ 638 err = nilfs_sufile_get_ncleansegs(nilfs->ns_sufile, &ncleansegs); 639 up_read(&NILFS_MDT(dat)->mi_sem); /* XXX */ 640 if (likely(!err)) 641 *nblocks = (sector_t)ncleansegs * nilfs->ns_blocks_per_segment; 642 return err; 643 } 644 645 int nilfs_near_disk_full(struct the_nilfs *nilfs) 646 { 647 struct inode *sufile = nilfs->ns_sufile; 648 unsigned long ncleansegs, nincsegs; 649 int ret; 650 651 ret = nilfs_sufile_get_ncleansegs(sufile, &ncleansegs); 652 if (likely(!ret)) { 653 nincsegs = atomic_read(&nilfs->ns_ndirtyblks) / 654 nilfs->ns_blocks_per_segment + 1; 655 if (ncleansegs <= nilfs->ns_nrsvsegs + nincsegs) 656 ret++; 657 } 658 return ret; 659 } 660 661 /** 662 * nilfs_find_sbinfo - find existing nilfs_sb_info structure 663 * @nilfs: nilfs object 664 * @rw_mount: mount type (non-zero value for read/write mount) 665 * @cno: checkpoint number (zero for read-only mount) 666 * 667 * nilfs_find_sbinfo() returns the nilfs_sb_info structure which 668 * @rw_mount and @cno (in case of snapshots) matched. If no instance 669 * was found, NULL is returned. Although the super block instance can 670 * be unmounted after this function returns, the nilfs_sb_info struct 671 * is kept on memory until nilfs_put_sbinfo() is called. 672 */ 673 struct nilfs_sb_info *nilfs_find_sbinfo(struct the_nilfs *nilfs, 674 int rw_mount, __u64 cno) 675 { 676 struct nilfs_sb_info *sbi; 677 678 down_read(&nilfs->ns_super_sem); 679 /* 680 * The SNAPSHOT flag and sb->s_flags are supposed to be 681 * protected with nilfs->ns_super_sem. 682 */ 683 sbi = nilfs->ns_current; 684 if (rw_mount) { 685 if (sbi && !(sbi->s_super->s_flags & MS_RDONLY)) 686 goto found; /* read/write mount */ 687 else 688 goto out; 689 } else if (cno == 0) { 690 if (sbi && (sbi->s_super->s_flags & MS_RDONLY)) 691 goto found; /* read-only mount */ 692 else 693 goto out; 694 } 695 696 list_for_each_entry(sbi, &nilfs->ns_supers, s_list) { 697 if (nilfs_test_opt(sbi, SNAPSHOT) && 698 sbi->s_snapshot_cno == cno) 699 goto found; /* snapshot mount */ 700 } 701 out: 702 up_read(&nilfs->ns_super_sem); 703 return NULL; 704 705 found: 706 atomic_inc(&sbi->s_count); 707 up_read(&nilfs->ns_super_sem); 708 return sbi; 709 } 710 711 int nilfs_checkpoint_is_mounted(struct the_nilfs *nilfs, __u64 cno, 712 int snapshot_mount) 713 { 714 struct nilfs_sb_info *sbi; 715 int ret = 0; 716 717 down_read(&nilfs->ns_super_sem); 718 if (cno == 0 || cno > nilfs->ns_cno) 719 goto out_unlock; 720 721 list_for_each_entry(sbi, &nilfs->ns_supers, s_list) { 722 if (sbi->s_snapshot_cno == cno && 723 (!snapshot_mount || nilfs_test_opt(sbi, SNAPSHOT))) { 724 /* exclude read-only mounts */ 725 ret++; 726 break; 727 } 728 } 729 /* for protecting recent checkpoints */ 730 if (cno >= nilfs_last_cno(nilfs)) 731 ret++; 732 733 out_unlock: 734 up_read(&nilfs->ns_super_sem); 735 return ret; 736 } 737