1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * the_nilfs shared structure. 4 * 5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. 6 * 7 * Written by Ryusuke Konishi. 8 * 9 */ 10 11 #include <linux/buffer_head.h> 12 #include <linux/slab.h> 13 #include <linux/blkdev.h> 14 #include <linux/backing-dev.h> 15 #include <linux/random.h> 16 #include <linux/log2.h> 17 #include <linux/crc32.h> 18 #include "nilfs.h" 19 #include "segment.h" 20 #include "alloc.h" 21 #include "cpfile.h" 22 #include "sufile.h" 23 #include "dat.h" 24 #include "segbuf.h" 25 26 27 static int nilfs_valid_sb(struct nilfs_super_block *sbp); 28 29 void nilfs_set_last_segment(struct the_nilfs *nilfs, 30 sector_t start_blocknr, u64 seq, __u64 cno) 31 { 32 spin_lock(&nilfs->ns_last_segment_lock); 33 nilfs->ns_last_pseg = start_blocknr; 34 nilfs->ns_last_seq = seq; 35 nilfs->ns_last_cno = cno; 36 37 if (!nilfs_sb_dirty(nilfs)) { 38 if (nilfs->ns_prev_seq == nilfs->ns_last_seq) 39 goto stay_cursor; 40 41 set_nilfs_sb_dirty(nilfs); 42 } 43 nilfs->ns_prev_seq = nilfs->ns_last_seq; 44 45 stay_cursor: 46 spin_unlock(&nilfs->ns_last_segment_lock); 47 } 48 49 /** 50 * alloc_nilfs - allocate a nilfs object 51 * @sb: super block instance 52 * 53 * Return Value: On success, pointer to the_nilfs is returned. 54 * On error, NULL is returned. 55 */ 56 struct the_nilfs *alloc_nilfs(struct super_block *sb) 57 { 58 struct the_nilfs *nilfs; 59 60 nilfs = kzalloc(sizeof(*nilfs), GFP_KERNEL); 61 if (!nilfs) 62 return NULL; 63 64 nilfs->ns_sb = sb; 65 nilfs->ns_bdev = sb->s_bdev; 66 atomic_set(&nilfs->ns_ndirtyblks, 0); 67 init_rwsem(&nilfs->ns_sem); 68 mutex_init(&nilfs->ns_snapshot_mount_mutex); 69 INIT_LIST_HEAD(&nilfs->ns_dirty_files); 70 INIT_LIST_HEAD(&nilfs->ns_gc_inodes); 71 spin_lock_init(&nilfs->ns_inode_lock); 72 spin_lock_init(&nilfs->ns_next_gen_lock); 73 spin_lock_init(&nilfs->ns_last_segment_lock); 74 nilfs->ns_cptree = RB_ROOT; 75 spin_lock_init(&nilfs->ns_cptree_lock); 76 init_rwsem(&nilfs->ns_segctor_sem); 77 nilfs->ns_sb_update_freq = NILFS_SB_FREQ; 78 79 return nilfs; 80 } 81 82 /** 83 * destroy_nilfs - destroy nilfs object 84 * @nilfs: nilfs object to be released 85 */ 86 void destroy_nilfs(struct the_nilfs *nilfs) 87 { 88 might_sleep(); 89 if (nilfs_init(nilfs)) { 90 nilfs_sysfs_delete_device_group(nilfs); 91 brelse(nilfs->ns_sbh[0]); 92 brelse(nilfs->ns_sbh[1]); 93 } 94 kfree(nilfs); 95 } 96 97 static int nilfs_load_super_root(struct the_nilfs *nilfs, 98 struct super_block *sb, sector_t sr_block) 99 { 100 struct buffer_head *bh_sr; 101 struct nilfs_super_root *raw_sr; 102 struct nilfs_super_block **sbp = nilfs->ns_sbp; 103 struct nilfs_inode *rawi; 104 unsigned int dat_entry_size, segment_usage_size, checkpoint_size; 105 unsigned int inode_size; 106 int err; 107 108 err = nilfs_read_super_root_block(nilfs, sr_block, &bh_sr, 1); 109 if (unlikely(err)) 110 return err; 111 112 down_read(&nilfs->ns_sem); 113 dat_entry_size = le16_to_cpu(sbp[0]->s_dat_entry_size); 114 checkpoint_size = le16_to_cpu(sbp[0]->s_checkpoint_size); 115 segment_usage_size = le16_to_cpu(sbp[0]->s_segment_usage_size); 116 up_read(&nilfs->ns_sem); 117 118 inode_size = nilfs->ns_inode_size; 119 120 rawi = (void *)bh_sr->b_data + NILFS_SR_DAT_OFFSET(inode_size); 121 err = nilfs_dat_read(sb, dat_entry_size, rawi, &nilfs->ns_dat); 122 if (err) 123 goto failed; 124 125 rawi = (void *)bh_sr->b_data + NILFS_SR_CPFILE_OFFSET(inode_size); 126 err = nilfs_cpfile_read(sb, checkpoint_size, rawi, &nilfs->ns_cpfile); 127 if (err) 128 goto failed_dat; 129 130 rawi = (void *)bh_sr->b_data + NILFS_SR_SUFILE_OFFSET(inode_size); 131 err = nilfs_sufile_read(sb, segment_usage_size, rawi, 132 &nilfs->ns_sufile); 133 if (err) 134 goto failed_cpfile; 135 136 raw_sr = (struct nilfs_super_root *)bh_sr->b_data; 137 nilfs->ns_nongc_ctime = le64_to_cpu(raw_sr->sr_nongc_ctime); 138 139 failed: 140 brelse(bh_sr); 141 return err; 142 143 failed_cpfile: 144 iput(nilfs->ns_cpfile); 145 146 failed_dat: 147 iput(nilfs->ns_dat); 148 goto failed; 149 } 150 151 static void nilfs_init_recovery_info(struct nilfs_recovery_info *ri) 152 { 153 memset(ri, 0, sizeof(*ri)); 154 INIT_LIST_HEAD(&ri->ri_used_segments); 155 } 156 157 static void nilfs_clear_recovery_info(struct nilfs_recovery_info *ri) 158 { 159 nilfs_dispose_segment_list(&ri->ri_used_segments); 160 } 161 162 /** 163 * nilfs_store_log_cursor - load log cursor from a super block 164 * @nilfs: nilfs object 165 * @sbp: buffer storing super block to be read 166 * 167 * nilfs_store_log_cursor() reads the last position of the log 168 * containing a super root from a given super block, and initializes 169 * relevant information on the nilfs object preparatory for log 170 * scanning and recovery. 171 */ 172 static int nilfs_store_log_cursor(struct the_nilfs *nilfs, 173 struct nilfs_super_block *sbp) 174 { 175 int ret = 0; 176 177 nilfs->ns_last_pseg = le64_to_cpu(sbp->s_last_pseg); 178 nilfs->ns_last_cno = le64_to_cpu(sbp->s_last_cno); 179 nilfs->ns_last_seq = le64_to_cpu(sbp->s_last_seq); 180 181 nilfs->ns_prev_seq = nilfs->ns_last_seq; 182 nilfs->ns_seg_seq = nilfs->ns_last_seq; 183 nilfs->ns_segnum = 184 nilfs_get_segnum_of_block(nilfs, nilfs->ns_last_pseg); 185 nilfs->ns_cno = nilfs->ns_last_cno + 1; 186 if (nilfs->ns_segnum >= nilfs->ns_nsegments) { 187 nilfs_err(nilfs->ns_sb, 188 "pointed segment number is out of range: segnum=%llu, nsegments=%lu", 189 (unsigned long long)nilfs->ns_segnum, 190 nilfs->ns_nsegments); 191 ret = -EINVAL; 192 } 193 return ret; 194 } 195 196 /** 197 * nilfs_get_blocksize - get block size from raw superblock data 198 * @sb: super block instance 199 * @sbp: superblock raw data buffer 200 * @blocksize: place to store block size 201 * 202 * nilfs_get_blocksize() calculates the block size from the block size 203 * exponent information written in @sbp and stores it in @blocksize, 204 * or aborts with an error message if it's too large. 205 * 206 * Return Value: On success, 0 is returned. If the block size is too 207 * large, -EINVAL is returned. 208 */ 209 static int nilfs_get_blocksize(struct super_block *sb, 210 struct nilfs_super_block *sbp, int *blocksize) 211 { 212 unsigned int shift_bits = le32_to_cpu(sbp->s_log_block_size); 213 214 if (unlikely(shift_bits > 215 ilog2(NILFS_MAX_BLOCK_SIZE) - BLOCK_SIZE_BITS)) { 216 nilfs_err(sb, "too large filesystem blocksize: 2 ^ %u KiB", 217 shift_bits); 218 return -EINVAL; 219 } 220 *blocksize = BLOCK_SIZE << shift_bits; 221 return 0; 222 } 223 224 /** 225 * load_nilfs - load and recover the nilfs 226 * @nilfs: the_nilfs structure to be released 227 * @sb: super block instance used to recover past segment 228 * 229 * load_nilfs() searches and load the latest super root, 230 * attaches the last segment, and does recovery if needed. 231 * The caller must call this exclusively for simultaneous mounts. 232 */ 233 int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb) 234 { 235 struct nilfs_recovery_info ri; 236 unsigned int s_flags = sb->s_flags; 237 int really_read_only = bdev_read_only(nilfs->ns_bdev); 238 int valid_fs = nilfs_valid_fs(nilfs); 239 int err; 240 241 if (!valid_fs) { 242 nilfs_warn(sb, "mounting unchecked fs"); 243 if (s_flags & SB_RDONLY) { 244 nilfs_info(sb, 245 "recovery required for readonly filesystem"); 246 nilfs_info(sb, 247 "write access will be enabled during recovery"); 248 } 249 } 250 251 nilfs_init_recovery_info(&ri); 252 253 err = nilfs_search_super_root(nilfs, &ri); 254 if (unlikely(err)) { 255 struct nilfs_super_block **sbp = nilfs->ns_sbp; 256 int blocksize; 257 258 if (err != -EINVAL) 259 goto scan_error; 260 261 if (!nilfs_valid_sb(sbp[1])) { 262 nilfs_warn(sb, 263 "unable to fall back to spare super block"); 264 goto scan_error; 265 } 266 nilfs_info(sb, "trying rollback from an earlier position"); 267 268 /* 269 * restore super block with its spare and reconfigure 270 * relevant states of the nilfs object. 271 */ 272 memcpy(sbp[0], sbp[1], nilfs->ns_sbsize); 273 nilfs->ns_crc_seed = le32_to_cpu(sbp[0]->s_crc_seed); 274 nilfs->ns_sbwtime = le64_to_cpu(sbp[0]->s_wtime); 275 276 /* verify consistency between two super blocks */ 277 err = nilfs_get_blocksize(sb, sbp[0], &blocksize); 278 if (err) 279 goto scan_error; 280 281 if (blocksize != nilfs->ns_blocksize) { 282 nilfs_warn(sb, 283 "blocksize differs between two super blocks (%d != %d)", 284 blocksize, nilfs->ns_blocksize); 285 err = -EINVAL; 286 goto scan_error; 287 } 288 289 err = nilfs_store_log_cursor(nilfs, sbp[0]); 290 if (err) 291 goto scan_error; 292 293 /* drop clean flag to allow roll-forward and recovery */ 294 nilfs->ns_mount_state &= ~NILFS_VALID_FS; 295 valid_fs = 0; 296 297 err = nilfs_search_super_root(nilfs, &ri); 298 if (err) 299 goto scan_error; 300 } 301 302 err = nilfs_load_super_root(nilfs, sb, ri.ri_super_root); 303 if (unlikely(err)) { 304 nilfs_err(sb, "error %d while loading super root", err); 305 goto failed; 306 } 307 308 if (valid_fs) 309 goto skip_recovery; 310 311 if (s_flags & SB_RDONLY) { 312 __u64 features; 313 314 if (nilfs_test_opt(nilfs, NORECOVERY)) { 315 nilfs_info(sb, 316 "norecovery option specified, skipping roll-forward recovery"); 317 goto skip_recovery; 318 } 319 features = le64_to_cpu(nilfs->ns_sbp[0]->s_feature_compat_ro) & 320 ~NILFS_FEATURE_COMPAT_RO_SUPP; 321 if (features) { 322 nilfs_err(sb, 323 "couldn't proceed with recovery because of unsupported optional features (%llx)", 324 (unsigned long long)features); 325 err = -EROFS; 326 goto failed_unload; 327 } 328 if (really_read_only) { 329 nilfs_err(sb, 330 "write access unavailable, cannot proceed"); 331 err = -EROFS; 332 goto failed_unload; 333 } 334 sb->s_flags &= ~SB_RDONLY; 335 } else if (nilfs_test_opt(nilfs, NORECOVERY)) { 336 nilfs_err(sb, 337 "recovery cancelled because norecovery option was specified for a read/write mount"); 338 err = -EINVAL; 339 goto failed_unload; 340 } 341 342 err = nilfs_salvage_orphan_logs(nilfs, sb, &ri); 343 if (err) 344 goto failed_unload; 345 346 down_write(&nilfs->ns_sem); 347 nilfs->ns_mount_state |= NILFS_VALID_FS; /* set "clean" flag */ 348 err = nilfs_cleanup_super(sb); 349 up_write(&nilfs->ns_sem); 350 351 if (err) { 352 nilfs_err(sb, 353 "error %d updating super block. recovery unfinished.", 354 err); 355 goto failed_unload; 356 } 357 nilfs_info(sb, "recovery complete"); 358 359 skip_recovery: 360 nilfs_clear_recovery_info(&ri); 361 sb->s_flags = s_flags; 362 return 0; 363 364 scan_error: 365 nilfs_err(sb, "error %d while searching super root", err); 366 goto failed; 367 368 failed_unload: 369 iput(nilfs->ns_cpfile); 370 iput(nilfs->ns_sufile); 371 iput(nilfs->ns_dat); 372 373 failed: 374 nilfs_clear_recovery_info(&ri); 375 sb->s_flags = s_flags; 376 return err; 377 } 378 379 static unsigned long long nilfs_max_size(unsigned int blkbits) 380 { 381 unsigned int max_bits; 382 unsigned long long res = MAX_LFS_FILESIZE; /* page cache limit */ 383 384 max_bits = blkbits + NILFS_BMAP_KEY_BIT; /* bmap size limit */ 385 if (max_bits < 64) 386 res = min_t(unsigned long long, res, (1ULL << max_bits) - 1); 387 return res; 388 } 389 390 /** 391 * nilfs_nrsvsegs - calculate the number of reserved segments 392 * @nilfs: nilfs object 393 * @nsegs: total number of segments 394 */ 395 unsigned long nilfs_nrsvsegs(struct the_nilfs *nilfs, unsigned long nsegs) 396 { 397 return max_t(unsigned long, NILFS_MIN_NRSVSEGS, 398 DIV_ROUND_UP(nsegs * nilfs->ns_r_segments_percentage, 399 100)); 400 } 401 402 void nilfs_set_nsegments(struct the_nilfs *nilfs, unsigned long nsegs) 403 { 404 nilfs->ns_nsegments = nsegs; 405 nilfs->ns_nrsvsegs = nilfs_nrsvsegs(nilfs, nsegs); 406 } 407 408 static int nilfs_store_disk_layout(struct the_nilfs *nilfs, 409 struct nilfs_super_block *sbp) 410 { 411 if (le32_to_cpu(sbp->s_rev_level) < NILFS_MIN_SUPP_REV) { 412 nilfs_err(nilfs->ns_sb, 413 "unsupported revision (superblock rev.=%d.%d, current rev.=%d.%d). Please check the version of mkfs.nilfs(2).", 414 le32_to_cpu(sbp->s_rev_level), 415 le16_to_cpu(sbp->s_minor_rev_level), 416 NILFS_CURRENT_REV, NILFS_MINOR_REV); 417 return -EINVAL; 418 } 419 nilfs->ns_sbsize = le16_to_cpu(sbp->s_bytes); 420 if (nilfs->ns_sbsize > BLOCK_SIZE) 421 return -EINVAL; 422 423 nilfs->ns_inode_size = le16_to_cpu(sbp->s_inode_size); 424 if (nilfs->ns_inode_size > nilfs->ns_blocksize) { 425 nilfs_err(nilfs->ns_sb, "too large inode size: %d bytes", 426 nilfs->ns_inode_size); 427 return -EINVAL; 428 } else if (nilfs->ns_inode_size < NILFS_MIN_INODE_SIZE) { 429 nilfs_err(nilfs->ns_sb, "too small inode size: %d bytes", 430 nilfs->ns_inode_size); 431 return -EINVAL; 432 } 433 434 nilfs->ns_first_ino = le32_to_cpu(sbp->s_first_ino); 435 436 nilfs->ns_blocks_per_segment = le32_to_cpu(sbp->s_blocks_per_segment); 437 if (nilfs->ns_blocks_per_segment < NILFS_SEG_MIN_BLOCKS) { 438 nilfs_err(nilfs->ns_sb, "too short segment: %lu blocks", 439 nilfs->ns_blocks_per_segment); 440 return -EINVAL; 441 } 442 443 nilfs->ns_first_data_block = le64_to_cpu(sbp->s_first_data_block); 444 nilfs->ns_r_segments_percentage = 445 le32_to_cpu(sbp->s_r_segments_percentage); 446 if (nilfs->ns_r_segments_percentage < 1 || 447 nilfs->ns_r_segments_percentage > 99) { 448 nilfs_err(nilfs->ns_sb, 449 "invalid reserved segments percentage: %lu", 450 nilfs->ns_r_segments_percentage); 451 return -EINVAL; 452 } 453 454 nilfs_set_nsegments(nilfs, le64_to_cpu(sbp->s_nsegments)); 455 nilfs->ns_crc_seed = le32_to_cpu(sbp->s_crc_seed); 456 return 0; 457 } 458 459 static int nilfs_valid_sb(struct nilfs_super_block *sbp) 460 { 461 static unsigned char sum[4]; 462 const int sumoff = offsetof(struct nilfs_super_block, s_sum); 463 size_t bytes; 464 u32 crc; 465 466 if (!sbp || le16_to_cpu(sbp->s_magic) != NILFS_SUPER_MAGIC) 467 return 0; 468 bytes = le16_to_cpu(sbp->s_bytes); 469 if (bytes < sumoff + 4 || bytes > BLOCK_SIZE) 470 return 0; 471 crc = crc32_le(le32_to_cpu(sbp->s_crc_seed), (unsigned char *)sbp, 472 sumoff); 473 crc = crc32_le(crc, sum, 4); 474 crc = crc32_le(crc, (unsigned char *)sbp + sumoff + 4, 475 bytes - sumoff - 4); 476 return crc == le32_to_cpu(sbp->s_sum); 477 } 478 479 /** 480 * nilfs_sb2_bad_offset - check the location of the second superblock 481 * @sbp: superblock raw data buffer 482 * @offset: byte offset of second superblock calculated from device size 483 * 484 * nilfs_sb2_bad_offset() checks if the position on the second 485 * superblock is valid or not based on the filesystem parameters 486 * stored in @sbp. If @offset points to a location within the segment 487 * area, or if the parameters themselves are not normal, it is 488 * determined to be invalid. 489 * 490 * Return Value: true if invalid, false if valid. 491 */ 492 static bool nilfs_sb2_bad_offset(struct nilfs_super_block *sbp, u64 offset) 493 { 494 unsigned int shift_bits = le32_to_cpu(sbp->s_log_block_size); 495 u32 blocks_per_segment = le32_to_cpu(sbp->s_blocks_per_segment); 496 u64 nsegments = le64_to_cpu(sbp->s_nsegments); 497 u64 index; 498 499 if (blocks_per_segment < NILFS_SEG_MIN_BLOCKS || 500 shift_bits > ilog2(NILFS_MAX_BLOCK_SIZE) - BLOCK_SIZE_BITS) 501 return true; 502 503 index = offset >> (shift_bits + BLOCK_SIZE_BITS); 504 do_div(index, blocks_per_segment); 505 return index < nsegments; 506 } 507 508 static void nilfs_release_super_block(struct the_nilfs *nilfs) 509 { 510 int i; 511 512 for (i = 0; i < 2; i++) { 513 if (nilfs->ns_sbp[i]) { 514 brelse(nilfs->ns_sbh[i]); 515 nilfs->ns_sbh[i] = NULL; 516 nilfs->ns_sbp[i] = NULL; 517 } 518 } 519 } 520 521 void nilfs_fall_back_super_block(struct the_nilfs *nilfs) 522 { 523 brelse(nilfs->ns_sbh[0]); 524 nilfs->ns_sbh[0] = nilfs->ns_sbh[1]; 525 nilfs->ns_sbp[0] = nilfs->ns_sbp[1]; 526 nilfs->ns_sbh[1] = NULL; 527 nilfs->ns_sbp[1] = NULL; 528 } 529 530 void nilfs_swap_super_block(struct the_nilfs *nilfs) 531 { 532 struct buffer_head *tsbh = nilfs->ns_sbh[0]; 533 struct nilfs_super_block *tsbp = nilfs->ns_sbp[0]; 534 535 nilfs->ns_sbh[0] = nilfs->ns_sbh[1]; 536 nilfs->ns_sbp[0] = nilfs->ns_sbp[1]; 537 nilfs->ns_sbh[1] = tsbh; 538 nilfs->ns_sbp[1] = tsbp; 539 } 540 541 static int nilfs_load_super_block(struct the_nilfs *nilfs, 542 struct super_block *sb, int blocksize, 543 struct nilfs_super_block **sbpp) 544 { 545 struct nilfs_super_block **sbp = nilfs->ns_sbp; 546 struct buffer_head **sbh = nilfs->ns_sbh; 547 u64 sb2off, devsize = bdev_nr_bytes(nilfs->ns_bdev); 548 int valid[2], swp = 0; 549 550 if (devsize < NILFS_SEG_MIN_BLOCKS * NILFS_MIN_BLOCK_SIZE + 4096) { 551 nilfs_err(sb, "device size too small"); 552 return -EINVAL; 553 } 554 sb2off = NILFS_SB2_OFFSET_BYTES(devsize); 555 556 sbp[0] = nilfs_read_super_block(sb, NILFS_SB_OFFSET_BYTES, blocksize, 557 &sbh[0]); 558 sbp[1] = nilfs_read_super_block(sb, sb2off, blocksize, &sbh[1]); 559 560 if (!sbp[0]) { 561 if (!sbp[1]) { 562 nilfs_err(sb, "unable to read superblock"); 563 return -EIO; 564 } 565 nilfs_warn(sb, 566 "unable to read primary superblock (blocksize = %d)", 567 blocksize); 568 } else if (!sbp[1]) { 569 nilfs_warn(sb, 570 "unable to read secondary superblock (blocksize = %d)", 571 blocksize); 572 } 573 574 /* 575 * Compare two super blocks and set 1 in swp if the secondary 576 * super block is valid and newer. Otherwise, set 0 in swp. 577 */ 578 valid[0] = nilfs_valid_sb(sbp[0]); 579 valid[1] = nilfs_valid_sb(sbp[1]); 580 swp = valid[1] && (!valid[0] || 581 le64_to_cpu(sbp[1]->s_last_cno) > 582 le64_to_cpu(sbp[0]->s_last_cno)); 583 584 if (valid[swp] && nilfs_sb2_bad_offset(sbp[swp], sb2off)) { 585 brelse(sbh[1]); 586 sbh[1] = NULL; 587 sbp[1] = NULL; 588 valid[1] = 0; 589 swp = 0; 590 } 591 if (!valid[swp]) { 592 nilfs_release_super_block(nilfs); 593 nilfs_err(sb, "couldn't find nilfs on the device"); 594 return -EINVAL; 595 } 596 597 if (!valid[!swp]) 598 nilfs_warn(sb, 599 "broken superblock, retrying with spare superblock (blocksize = %d)", 600 blocksize); 601 if (swp) 602 nilfs_swap_super_block(nilfs); 603 604 nilfs->ns_sbwcount = 0; 605 nilfs->ns_sbwtime = le64_to_cpu(sbp[0]->s_wtime); 606 nilfs->ns_prot_seq = le64_to_cpu(sbp[valid[1] & !swp]->s_last_seq); 607 *sbpp = sbp[0]; 608 return 0; 609 } 610 611 /** 612 * init_nilfs - initialize a NILFS instance. 613 * @nilfs: the_nilfs structure 614 * @sb: super block 615 * @data: mount options 616 * 617 * init_nilfs() performs common initialization per block device (e.g. 618 * reading the super block, getting disk layout information, initializing 619 * shared fields in the_nilfs). 620 * 621 * Return Value: On success, 0 is returned. On error, a negative error 622 * code is returned. 623 */ 624 int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb, char *data) 625 { 626 struct nilfs_super_block *sbp; 627 int blocksize; 628 int err; 629 630 down_write(&nilfs->ns_sem); 631 632 blocksize = sb_min_blocksize(sb, NILFS_MIN_BLOCK_SIZE); 633 if (!blocksize) { 634 nilfs_err(sb, "unable to set blocksize"); 635 err = -EINVAL; 636 goto out; 637 } 638 err = nilfs_load_super_block(nilfs, sb, blocksize, &sbp); 639 if (err) 640 goto out; 641 642 err = nilfs_store_magic_and_option(sb, sbp, data); 643 if (err) 644 goto failed_sbh; 645 646 err = nilfs_check_feature_compatibility(sb, sbp); 647 if (err) 648 goto failed_sbh; 649 650 err = nilfs_get_blocksize(sb, sbp, &blocksize); 651 if (err) 652 goto failed_sbh; 653 654 if (blocksize < NILFS_MIN_BLOCK_SIZE) { 655 nilfs_err(sb, 656 "couldn't mount because of unsupported filesystem blocksize %d", 657 blocksize); 658 err = -EINVAL; 659 goto failed_sbh; 660 } 661 if (sb->s_blocksize != blocksize) { 662 int hw_blocksize = bdev_logical_block_size(sb->s_bdev); 663 664 if (blocksize < hw_blocksize) { 665 nilfs_err(sb, 666 "blocksize %d too small for device (sector-size = %d)", 667 blocksize, hw_blocksize); 668 err = -EINVAL; 669 goto failed_sbh; 670 } 671 nilfs_release_super_block(nilfs); 672 sb_set_blocksize(sb, blocksize); 673 674 err = nilfs_load_super_block(nilfs, sb, blocksize, &sbp); 675 if (err) 676 goto out; 677 /* 678 * Not to failed_sbh; sbh is released automatically 679 * when reloading fails. 680 */ 681 } 682 nilfs->ns_blocksize_bits = sb->s_blocksize_bits; 683 nilfs->ns_blocksize = blocksize; 684 685 get_random_bytes(&nilfs->ns_next_generation, 686 sizeof(nilfs->ns_next_generation)); 687 688 err = nilfs_store_disk_layout(nilfs, sbp); 689 if (err) 690 goto failed_sbh; 691 692 sb->s_maxbytes = nilfs_max_size(sb->s_blocksize_bits); 693 694 nilfs->ns_mount_state = le16_to_cpu(sbp->s_state); 695 696 err = nilfs_store_log_cursor(nilfs, sbp); 697 if (err) 698 goto failed_sbh; 699 700 err = nilfs_sysfs_create_device_group(sb); 701 if (err) 702 goto failed_sbh; 703 704 set_nilfs_init(nilfs); 705 err = 0; 706 out: 707 up_write(&nilfs->ns_sem); 708 return err; 709 710 failed_sbh: 711 nilfs_release_super_block(nilfs); 712 goto out; 713 } 714 715 int nilfs_discard_segments(struct the_nilfs *nilfs, __u64 *segnump, 716 size_t nsegs) 717 { 718 sector_t seg_start, seg_end; 719 sector_t start = 0, nblocks = 0; 720 unsigned int sects_per_block; 721 __u64 *sn; 722 int ret = 0; 723 724 sects_per_block = (1 << nilfs->ns_blocksize_bits) / 725 bdev_logical_block_size(nilfs->ns_bdev); 726 for (sn = segnump; sn < segnump + nsegs; sn++) { 727 nilfs_get_segment_range(nilfs, *sn, &seg_start, &seg_end); 728 729 if (!nblocks) { 730 start = seg_start; 731 nblocks = seg_end - seg_start + 1; 732 } else if (start + nblocks == seg_start) { 733 nblocks += seg_end - seg_start + 1; 734 } else { 735 ret = blkdev_issue_discard(nilfs->ns_bdev, 736 start * sects_per_block, 737 nblocks * sects_per_block, 738 GFP_NOFS); 739 if (ret < 0) 740 return ret; 741 nblocks = 0; 742 } 743 } 744 if (nblocks) 745 ret = blkdev_issue_discard(nilfs->ns_bdev, 746 start * sects_per_block, 747 nblocks * sects_per_block, 748 GFP_NOFS); 749 return ret; 750 } 751 752 int nilfs_count_free_blocks(struct the_nilfs *nilfs, sector_t *nblocks) 753 { 754 unsigned long ncleansegs; 755 756 ncleansegs = nilfs_sufile_get_ncleansegs(nilfs->ns_sufile); 757 *nblocks = (sector_t)ncleansegs * nilfs->ns_blocks_per_segment; 758 return 0; 759 } 760 761 int nilfs_near_disk_full(struct the_nilfs *nilfs) 762 { 763 unsigned long ncleansegs, nincsegs; 764 765 ncleansegs = nilfs_sufile_get_ncleansegs(nilfs->ns_sufile); 766 nincsegs = atomic_read(&nilfs->ns_ndirtyblks) / 767 nilfs->ns_blocks_per_segment + 1; 768 769 return ncleansegs <= nilfs->ns_nrsvsegs + nincsegs; 770 } 771 772 struct nilfs_root *nilfs_lookup_root(struct the_nilfs *nilfs, __u64 cno) 773 { 774 struct rb_node *n; 775 struct nilfs_root *root; 776 777 spin_lock(&nilfs->ns_cptree_lock); 778 n = nilfs->ns_cptree.rb_node; 779 while (n) { 780 root = rb_entry(n, struct nilfs_root, rb_node); 781 782 if (cno < root->cno) { 783 n = n->rb_left; 784 } else if (cno > root->cno) { 785 n = n->rb_right; 786 } else { 787 refcount_inc(&root->count); 788 spin_unlock(&nilfs->ns_cptree_lock); 789 return root; 790 } 791 } 792 spin_unlock(&nilfs->ns_cptree_lock); 793 794 return NULL; 795 } 796 797 struct nilfs_root * 798 nilfs_find_or_create_root(struct the_nilfs *nilfs, __u64 cno) 799 { 800 struct rb_node **p, *parent; 801 struct nilfs_root *root, *new; 802 int err; 803 804 root = nilfs_lookup_root(nilfs, cno); 805 if (root) 806 return root; 807 808 new = kzalloc(sizeof(*root), GFP_KERNEL); 809 if (!new) 810 return NULL; 811 812 spin_lock(&nilfs->ns_cptree_lock); 813 814 p = &nilfs->ns_cptree.rb_node; 815 parent = NULL; 816 817 while (*p) { 818 parent = *p; 819 root = rb_entry(parent, struct nilfs_root, rb_node); 820 821 if (cno < root->cno) { 822 p = &(*p)->rb_left; 823 } else if (cno > root->cno) { 824 p = &(*p)->rb_right; 825 } else { 826 refcount_inc(&root->count); 827 spin_unlock(&nilfs->ns_cptree_lock); 828 kfree(new); 829 return root; 830 } 831 } 832 833 new->cno = cno; 834 new->ifile = NULL; 835 new->nilfs = nilfs; 836 refcount_set(&new->count, 1); 837 atomic64_set(&new->inodes_count, 0); 838 atomic64_set(&new->blocks_count, 0); 839 840 rb_link_node(&new->rb_node, parent, p); 841 rb_insert_color(&new->rb_node, &nilfs->ns_cptree); 842 843 spin_unlock(&nilfs->ns_cptree_lock); 844 845 err = nilfs_sysfs_create_snapshot_group(new); 846 if (err) { 847 kfree(new); 848 new = NULL; 849 } 850 851 return new; 852 } 853 854 void nilfs_put_root(struct nilfs_root *root) 855 { 856 struct the_nilfs *nilfs = root->nilfs; 857 858 if (refcount_dec_and_lock(&root->count, &nilfs->ns_cptree_lock)) { 859 rb_erase(&root->rb_node, &nilfs->ns_cptree); 860 spin_unlock(&nilfs->ns_cptree_lock); 861 862 nilfs_sysfs_delete_snapshot_group(root); 863 iput(root->ifile); 864 865 kfree(root); 866 } 867 } 868