1 /* 2 * recovery.c - NILFS recovery logic 3 * 4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * Written by Ryusuke Konishi. 17 */ 18 19 #include <linux/buffer_head.h> 20 #include <linux/blkdev.h> 21 #include <linux/swap.h> 22 #include <linux/slab.h> 23 #include <linux/crc32.h> 24 #include "nilfs.h" 25 #include "segment.h" 26 #include "sufile.h" 27 #include "page.h" 28 #include "segbuf.h" 29 30 /* 31 * Segment check result 32 */ 33 enum { 34 NILFS_SEG_VALID, 35 NILFS_SEG_NO_SUPER_ROOT, 36 NILFS_SEG_FAIL_IO, 37 NILFS_SEG_FAIL_MAGIC, 38 NILFS_SEG_FAIL_SEQ, 39 NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT, 40 NILFS_SEG_FAIL_CHECKSUM_FULL, 41 NILFS_SEG_FAIL_CONSISTENCY, 42 }; 43 44 /* work structure for recovery */ 45 struct nilfs_recovery_block { 46 ino_t ino; /* 47 * Inode number of the file that this block 48 * belongs to 49 */ 50 sector_t blocknr; /* block number */ 51 __u64 vblocknr; /* virtual block number */ 52 unsigned long blkoff; /* File offset of the data block (per block) */ 53 struct list_head list; 54 }; 55 56 57 static int nilfs_warn_segment_error(int err) 58 { 59 switch (err) { 60 case NILFS_SEG_FAIL_IO: 61 printk(KERN_WARNING 62 "NILFS warning: I/O error on loading last segment\n"); 63 return -EIO; 64 case NILFS_SEG_FAIL_MAGIC: 65 printk(KERN_WARNING 66 "NILFS warning: Segment magic number invalid\n"); 67 break; 68 case NILFS_SEG_FAIL_SEQ: 69 printk(KERN_WARNING 70 "NILFS warning: Sequence number mismatch\n"); 71 break; 72 case NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT: 73 printk(KERN_WARNING 74 "NILFS warning: Checksum error in super root\n"); 75 break; 76 case NILFS_SEG_FAIL_CHECKSUM_FULL: 77 printk(KERN_WARNING 78 "NILFS warning: Checksum error in segment payload\n"); 79 break; 80 case NILFS_SEG_FAIL_CONSISTENCY: 81 printk(KERN_WARNING 82 "NILFS warning: Inconsistent segment\n"); 83 break; 84 case NILFS_SEG_NO_SUPER_ROOT: 85 printk(KERN_WARNING 86 "NILFS warning: No super root in the last segment\n"); 87 break; 88 } 89 return -EINVAL; 90 } 91 92 /** 93 * nilfs_compute_checksum - compute checksum of blocks continuously 94 * @nilfs: nilfs object 95 * @bhs: buffer head of start block 96 * @sum: place to store result 97 * @offset: offset bytes in the first block 98 * @check_bytes: number of bytes to be checked 99 * @start: DBN of start block 100 * @nblock: number of blocks to be checked 101 */ 102 static int nilfs_compute_checksum(struct the_nilfs *nilfs, 103 struct buffer_head *bhs, u32 *sum, 104 unsigned long offset, u64 check_bytes, 105 sector_t start, unsigned long nblock) 106 { 107 unsigned int blocksize = nilfs->ns_blocksize; 108 unsigned long size; 109 u32 crc; 110 111 BUG_ON(offset >= blocksize); 112 check_bytes -= offset; 113 size = min_t(u64, check_bytes, blocksize - offset); 114 crc = crc32_le(nilfs->ns_crc_seed, 115 (unsigned char *)bhs->b_data + offset, size); 116 if (--nblock > 0) { 117 do { 118 struct buffer_head *bh; 119 120 bh = __bread(nilfs->ns_bdev, ++start, blocksize); 121 if (!bh) 122 return -EIO; 123 check_bytes -= size; 124 size = min_t(u64, check_bytes, blocksize); 125 crc = crc32_le(crc, bh->b_data, size); 126 brelse(bh); 127 } while (--nblock > 0); 128 } 129 *sum = crc; 130 return 0; 131 } 132 133 /** 134 * nilfs_read_super_root_block - read super root block 135 * @nilfs: nilfs object 136 * @sr_block: disk block number of the super root block 137 * @pbh: address of a buffer_head pointer to return super root buffer 138 * @check: CRC check flag 139 */ 140 int nilfs_read_super_root_block(struct the_nilfs *nilfs, sector_t sr_block, 141 struct buffer_head **pbh, int check) 142 { 143 struct buffer_head *bh_sr; 144 struct nilfs_super_root *sr; 145 u32 crc; 146 int ret; 147 148 *pbh = NULL; 149 bh_sr = __bread(nilfs->ns_bdev, sr_block, nilfs->ns_blocksize); 150 if (unlikely(!bh_sr)) { 151 ret = NILFS_SEG_FAIL_IO; 152 goto failed; 153 } 154 155 sr = (struct nilfs_super_root *)bh_sr->b_data; 156 if (check) { 157 unsigned int bytes = le16_to_cpu(sr->sr_bytes); 158 159 if (bytes == 0 || bytes > nilfs->ns_blocksize) { 160 ret = NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT; 161 goto failed_bh; 162 } 163 if (nilfs_compute_checksum( 164 nilfs, bh_sr, &crc, sizeof(sr->sr_sum), bytes, 165 sr_block, 1)) { 166 ret = NILFS_SEG_FAIL_IO; 167 goto failed_bh; 168 } 169 if (crc != le32_to_cpu(sr->sr_sum)) { 170 ret = NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT; 171 goto failed_bh; 172 } 173 } 174 *pbh = bh_sr; 175 return 0; 176 177 failed_bh: 178 brelse(bh_sr); 179 180 failed: 181 return nilfs_warn_segment_error(ret); 182 } 183 184 /** 185 * nilfs_read_log_header - read summary header of the specified log 186 * @nilfs: nilfs object 187 * @start_blocknr: start block number of the log 188 * @sum: pointer to return segment summary structure 189 */ 190 static struct buffer_head * 191 nilfs_read_log_header(struct the_nilfs *nilfs, sector_t start_blocknr, 192 struct nilfs_segment_summary **sum) 193 { 194 struct buffer_head *bh_sum; 195 196 bh_sum = __bread(nilfs->ns_bdev, start_blocknr, nilfs->ns_blocksize); 197 if (bh_sum) 198 *sum = (struct nilfs_segment_summary *)bh_sum->b_data; 199 return bh_sum; 200 } 201 202 /** 203 * nilfs_validate_log - verify consistency of log 204 * @nilfs: nilfs object 205 * @seg_seq: sequence number of segment 206 * @bh_sum: buffer head of summary block 207 * @sum: segment summary struct 208 */ 209 static int nilfs_validate_log(struct the_nilfs *nilfs, u64 seg_seq, 210 struct buffer_head *bh_sum, 211 struct nilfs_segment_summary *sum) 212 { 213 unsigned long nblock; 214 u32 crc; 215 int ret; 216 217 ret = NILFS_SEG_FAIL_MAGIC; 218 if (le32_to_cpu(sum->ss_magic) != NILFS_SEGSUM_MAGIC) 219 goto out; 220 221 ret = NILFS_SEG_FAIL_SEQ; 222 if (le64_to_cpu(sum->ss_seq) != seg_seq) 223 goto out; 224 225 nblock = le32_to_cpu(sum->ss_nblocks); 226 ret = NILFS_SEG_FAIL_CONSISTENCY; 227 if (unlikely(nblock == 0 || nblock > nilfs->ns_blocks_per_segment)) 228 /* This limits the number of blocks read in the CRC check */ 229 goto out; 230 231 ret = NILFS_SEG_FAIL_IO; 232 if (nilfs_compute_checksum(nilfs, bh_sum, &crc, sizeof(sum->ss_datasum), 233 ((u64)nblock << nilfs->ns_blocksize_bits), 234 bh_sum->b_blocknr, nblock)) 235 goto out; 236 237 ret = NILFS_SEG_FAIL_CHECKSUM_FULL; 238 if (crc != le32_to_cpu(sum->ss_datasum)) 239 goto out; 240 ret = 0; 241 out: 242 return ret; 243 } 244 245 /** 246 * nilfs_read_summary_info - read an item on summary blocks of a log 247 * @nilfs: nilfs object 248 * @pbh: the current buffer head on summary blocks [in, out] 249 * @offset: the current byte offset on summary blocks [in, out] 250 * @bytes: byte size of the item to be read 251 */ 252 static void *nilfs_read_summary_info(struct the_nilfs *nilfs, 253 struct buffer_head **pbh, 254 unsigned int *offset, unsigned int bytes) 255 { 256 void *ptr; 257 sector_t blocknr; 258 259 BUG_ON((*pbh)->b_size < *offset); 260 if (bytes > (*pbh)->b_size - *offset) { 261 blocknr = (*pbh)->b_blocknr; 262 brelse(*pbh); 263 *pbh = __bread(nilfs->ns_bdev, blocknr + 1, 264 nilfs->ns_blocksize); 265 if (unlikely(!*pbh)) 266 return NULL; 267 *offset = 0; 268 } 269 ptr = (*pbh)->b_data + *offset; 270 *offset += bytes; 271 return ptr; 272 } 273 274 /** 275 * nilfs_skip_summary_info - skip items on summary blocks of a log 276 * @nilfs: nilfs object 277 * @pbh: the current buffer head on summary blocks [in, out] 278 * @offset: the current byte offset on summary blocks [in, out] 279 * @bytes: byte size of the item to be skipped 280 * @count: number of items to be skipped 281 */ 282 static void nilfs_skip_summary_info(struct the_nilfs *nilfs, 283 struct buffer_head **pbh, 284 unsigned int *offset, unsigned int bytes, 285 unsigned long count) 286 { 287 unsigned int rest_item_in_current_block 288 = ((*pbh)->b_size - *offset) / bytes; 289 290 if (count <= rest_item_in_current_block) { 291 *offset += bytes * count; 292 } else { 293 sector_t blocknr = (*pbh)->b_blocknr; 294 unsigned int nitem_per_block = (*pbh)->b_size / bytes; 295 unsigned int bcnt; 296 297 count -= rest_item_in_current_block; 298 bcnt = DIV_ROUND_UP(count, nitem_per_block); 299 *offset = bytes * (count - (bcnt - 1) * nitem_per_block); 300 301 brelse(*pbh); 302 *pbh = __bread(nilfs->ns_bdev, blocknr + bcnt, 303 nilfs->ns_blocksize); 304 } 305 } 306 307 /** 308 * nilfs_scan_dsync_log - get block information of a log written for data sync 309 * @nilfs: nilfs object 310 * @start_blocknr: start block number of the log 311 * @sum: log summary information 312 * @head: list head to add nilfs_recovery_block struct 313 */ 314 static int nilfs_scan_dsync_log(struct the_nilfs *nilfs, sector_t start_blocknr, 315 struct nilfs_segment_summary *sum, 316 struct list_head *head) 317 { 318 struct buffer_head *bh; 319 unsigned int offset; 320 u32 nfinfo, sumbytes; 321 sector_t blocknr; 322 ino_t ino; 323 int err = -EIO; 324 325 nfinfo = le32_to_cpu(sum->ss_nfinfo); 326 if (!nfinfo) 327 return 0; 328 329 sumbytes = le32_to_cpu(sum->ss_sumbytes); 330 blocknr = start_blocknr + DIV_ROUND_UP(sumbytes, nilfs->ns_blocksize); 331 bh = __bread(nilfs->ns_bdev, start_blocknr, nilfs->ns_blocksize); 332 if (unlikely(!bh)) 333 goto out; 334 335 offset = le16_to_cpu(sum->ss_bytes); 336 for (;;) { 337 unsigned long nblocks, ndatablk, nnodeblk; 338 struct nilfs_finfo *finfo; 339 340 finfo = nilfs_read_summary_info(nilfs, &bh, &offset, 341 sizeof(*finfo)); 342 if (unlikely(!finfo)) 343 goto out; 344 345 ino = le64_to_cpu(finfo->fi_ino); 346 nblocks = le32_to_cpu(finfo->fi_nblocks); 347 ndatablk = le32_to_cpu(finfo->fi_ndatablk); 348 nnodeblk = nblocks - ndatablk; 349 350 while (ndatablk-- > 0) { 351 struct nilfs_recovery_block *rb; 352 struct nilfs_binfo_v *binfo; 353 354 binfo = nilfs_read_summary_info(nilfs, &bh, &offset, 355 sizeof(*binfo)); 356 if (unlikely(!binfo)) 357 goto out; 358 359 rb = kmalloc(sizeof(*rb), GFP_NOFS); 360 if (unlikely(!rb)) { 361 err = -ENOMEM; 362 goto out; 363 } 364 rb->ino = ino; 365 rb->blocknr = blocknr++; 366 rb->vblocknr = le64_to_cpu(binfo->bi_vblocknr); 367 rb->blkoff = le64_to_cpu(binfo->bi_blkoff); 368 /* INIT_LIST_HEAD(&rb->list); */ 369 list_add_tail(&rb->list, head); 370 } 371 if (--nfinfo == 0) 372 break; 373 blocknr += nnodeblk; /* always 0 for data sync logs */ 374 nilfs_skip_summary_info(nilfs, &bh, &offset, sizeof(__le64), 375 nnodeblk); 376 if (unlikely(!bh)) 377 goto out; 378 } 379 err = 0; 380 out: 381 brelse(bh); /* brelse(NULL) is just ignored */ 382 return err; 383 } 384 385 static void dispose_recovery_list(struct list_head *head) 386 { 387 while (!list_empty(head)) { 388 struct nilfs_recovery_block *rb; 389 390 rb = list_first_entry(head, struct nilfs_recovery_block, list); 391 list_del(&rb->list); 392 kfree(rb); 393 } 394 } 395 396 struct nilfs_segment_entry { 397 struct list_head list; 398 __u64 segnum; 399 }; 400 401 static int nilfs_segment_list_add(struct list_head *head, __u64 segnum) 402 { 403 struct nilfs_segment_entry *ent = kmalloc(sizeof(*ent), GFP_NOFS); 404 405 if (unlikely(!ent)) 406 return -ENOMEM; 407 408 ent->segnum = segnum; 409 INIT_LIST_HEAD(&ent->list); 410 list_add_tail(&ent->list, head); 411 return 0; 412 } 413 414 void nilfs_dispose_segment_list(struct list_head *head) 415 { 416 while (!list_empty(head)) { 417 struct nilfs_segment_entry *ent; 418 419 ent = list_first_entry(head, struct nilfs_segment_entry, list); 420 list_del(&ent->list); 421 kfree(ent); 422 } 423 } 424 425 static int nilfs_prepare_segment_for_recovery(struct the_nilfs *nilfs, 426 struct super_block *sb, 427 struct nilfs_recovery_info *ri) 428 { 429 struct list_head *head = &ri->ri_used_segments; 430 struct nilfs_segment_entry *ent, *n; 431 struct inode *sufile = nilfs->ns_sufile; 432 __u64 segnum[4]; 433 int err; 434 int i; 435 436 segnum[0] = nilfs->ns_segnum; 437 segnum[1] = nilfs->ns_nextnum; 438 segnum[2] = ri->ri_segnum; 439 segnum[3] = ri->ri_nextnum; 440 441 /* 442 * Releasing the next segment of the latest super root. 443 * The next segment is invalidated by this recovery. 444 */ 445 err = nilfs_sufile_free(sufile, segnum[1]); 446 if (unlikely(err)) 447 goto failed; 448 449 for (i = 1; i < 4; i++) { 450 err = nilfs_segment_list_add(head, segnum[i]); 451 if (unlikely(err)) 452 goto failed; 453 } 454 455 /* 456 * Collecting segments written after the latest super root. 457 * These are marked dirty to avoid being reallocated in the next write. 458 */ 459 list_for_each_entry_safe(ent, n, head, list) { 460 if (ent->segnum != segnum[0]) { 461 err = nilfs_sufile_scrap(sufile, ent->segnum); 462 if (unlikely(err)) 463 goto failed; 464 } 465 list_del(&ent->list); 466 kfree(ent); 467 } 468 469 /* Allocate new segments for recovery */ 470 err = nilfs_sufile_alloc(sufile, &segnum[0]); 471 if (unlikely(err)) 472 goto failed; 473 474 nilfs->ns_pseg_offset = 0; 475 nilfs->ns_seg_seq = ri->ri_seq + 2; 476 nilfs->ns_nextnum = nilfs->ns_segnum = segnum[0]; 477 478 failed: 479 /* No need to recover sufile because it will be destroyed on error */ 480 return err; 481 } 482 483 static int nilfs_recovery_copy_block(struct the_nilfs *nilfs, 484 struct nilfs_recovery_block *rb, 485 struct page *page) 486 { 487 struct buffer_head *bh_org; 488 void *kaddr; 489 490 bh_org = __bread(nilfs->ns_bdev, rb->blocknr, nilfs->ns_blocksize); 491 if (unlikely(!bh_org)) 492 return -EIO; 493 494 kaddr = kmap_atomic(page); 495 memcpy(kaddr + bh_offset(bh_org), bh_org->b_data, bh_org->b_size); 496 kunmap_atomic(kaddr); 497 brelse(bh_org); 498 return 0; 499 } 500 501 static int nilfs_recover_dsync_blocks(struct the_nilfs *nilfs, 502 struct super_block *sb, 503 struct nilfs_root *root, 504 struct list_head *head, 505 unsigned long *nr_salvaged_blocks) 506 { 507 struct inode *inode; 508 struct nilfs_recovery_block *rb, *n; 509 unsigned int blocksize = nilfs->ns_blocksize; 510 struct page *page; 511 loff_t pos; 512 int err = 0, err2 = 0; 513 514 list_for_each_entry_safe(rb, n, head, list) { 515 inode = nilfs_iget(sb, root, rb->ino); 516 if (IS_ERR(inode)) { 517 err = PTR_ERR(inode); 518 inode = NULL; 519 goto failed_inode; 520 } 521 522 pos = rb->blkoff << inode->i_blkbits; 523 err = block_write_begin(inode->i_mapping, pos, blocksize, 524 0, &page, nilfs_get_block); 525 if (unlikely(err)) { 526 loff_t isize = inode->i_size; 527 528 if (pos + blocksize > isize) 529 nilfs_write_failed(inode->i_mapping, 530 pos + blocksize); 531 goto failed_inode; 532 } 533 534 err = nilfs_recovery_copy_block(nilfs, rb, page); 535 if (unlikely(err)) 536 goto failed_page; 537 538 err = nilfs_set_file_dirty(inode, 1); 539 if (unlikely(err)) 540 goto failed_page; 541 542 block_write_end(NULL, inode->i_mapping, pos, blocksize, 543 blocksize, page, NULL); 544 545 unlock_page(page); 546 put_page(page); 547 548 (*nr_salvaged_blocks)++; 549 goto next; 550 551 failed_page: 552 unlock_page(page); 553 put_page(page); 554 555 failed_inode: 556 printk(KERN_WARNING 557 "NILFS warning: error recovering data block " 558 "(err=%d, ino=%lu, block-offset=%llu)\n", 559 err, (unsigned long)rb->ino, 560 (unsigned long long)rb->blkoff); 561 if (!err2) 562 err2 = err; 563 next: 564 iput(inode); /* iput(NULL) is just ignored */ 565 list_del_init(&rb->list); 566 kfree(rb); 567 } 568 return err2; 569 } 570 571 /** 572 * nilfs_do_roll_forward - salvage logical segments newer than the latest 573 * checkpoint 574 * @nilfs: nilfs object 575 * @sb: super block instance 576 * @ri: pointer to a nilfs_recovery_info 577 */ 578 static int nilfs_do_roll_forward(struct the_nilfs *nilfs, 579 struct super_block *sb, 580 struct nilfs_root *root, 581 struct nilfs_recovery_info *ri) 582 { 583 struct buffer_head *bh_sum = NULL; 584 struct nilfs_segment_summary *sum = NULL; 585 sector_t pseg_start; 586 sector_t seg_start, seg_end; /* Starting/ending DBN of full segment */ 587 unsigned long nsalvaged_blocks = 0; 588 unsigned int flags; 589 u64 seg_seq; 590 __u64 segnum, nextnum = 0; 591 int empty_seg = 0; 592 int err = 0, ret; 593 LIST_HEAD(dsync_blocks); /* list of data blocks to be recovered */ 594 enum { 595 RF_INIT_ST, 596 RF_DSYNC_ST, /* scanning data-sync segments */ 597 }; 598 int state = RF_INIT_ST; 599 600 pseg_start = ri->ri_lsegs_start; 601 seg_seq = ri->ri_lsegs_start_seq; 602 segnum = nilfs_get_segnum_of_block(nilfs, pseg_start); 603 nilfs_get_segment_range(nilfs, segnum, &seg_start, &seg_end); 604 605 while (segnum != ri->ri_segnum || pseg_start <= ri->ri_pseg_start) { 606 brelse(bh_sum); 607 bh_sum = nilfs_read_log_header(nilfs, pseg_start, &sum); 608 if (!bh_sum) { 609 err = -EIO; 610 goto failed; 611 } 612 613 ret = nilfs_validate_log(nilfs, seg_seq, bh_sum, sum); 614 if (ret) { 615 if (ret == NILFS_SEG_FAIL_IO) { 616 err = -EIO; 617 goto failed; 618 } 619 goto strayed; 620 } 621 622 flags = le16_to_cpu(sum->ss_flags); 623 if (flags & NILFS_SS_SR) 624 goto confused; 625 626 /* Found a valid partial segment; do recovery actions */ 627 nextnum = nilfs_get_segnum_of_block(nilfs, 628 le64_to_cpu(sum->ss_next)); 629 empty_seg = 0; 630 nilfs->ns_ctime = le64_to_cpu(sum->ss_create); 631 if (!(flags & NILFS_SS_GC)) 632 nilfs->ns_nongc_ctime = nilfs->ns_ctime; 633 634 switch (state) { 635 case RF_INIT_ST: 636 if (!(flags & NILFS_SS_LOGBGN) || 637 !(flags & NILFS_SS_SYNDT)) 638 goto try_next_pseg; 639 state = RF_DSYNC_ST; 640 /* Fall through */ 641 case RF_DSYNC_ST: 642 if (!(flags & NILFS_SS_SYNDT)) 643 goto confused; 644 645 err = nilfs_scan_dsync_log(nilfs, pseg_start, sum, 646 &dsync_blocks); 647 if (unlikely(err)) 648 goto failed; 649 if (flags & NILFS_SS_LOGEND) { 650 err = nilfs_recover_dsync_blocks( 651 nilfs, sb, root, &dsync_blocks, 652 &nsalvaged_blocks); 653 if (unlikely(err)) 654 goto failed; 655 state = RF_INIT_ST; 656 } 657 break; /* Fall through to try_next_pseg */ 658 } 659 660 try_next_pseg: 661 if (pseg_start == ri->ri_lsegs_end) 662 break; 663 pseg_start += le32_to_cpu(sum->ss_nblocks); 664 if (pseg_start < seg_end) 665 continue; 666 goto feed_segment; 667 668 strayed: 669 if (pseg_start == ri->ri_lsegs_end) 670 break; 671 672 feed_segment: 673 /* Looking to the next full segment */ 674 if (empty_seg++) 675 break; 676 seg_seq++; 677 segnum = nextnum; 678 nilfs_get_segment_range(nilfs, segnum, &seg_start, &seg_end); 679 pseg_start = seg_start; 680 } 681 682 if (nsalvaged_blocks) { 683 printk(KERN_INFO "NILFS (device %s): salvaged %lu blocks\n", 684 sb->s_id, nsalvaged_blocks); 685 ri->ri_need_recovery = NILFS_RECOVERY_ROLLFORWARD_DONE; 686 } 687 out: 688 brelse(bh_sum); 689 dispose_recovery_list(&dsync_blocks); 690 return err; 691 692 confused: 693 err = -EINVAL; 694 failed: 695 printk(KERN_ERR 696 "NILFS (device %s): Error roll-forwarding " 697 "(err=%d, pseg block=%llu). ", 698 sb->s_id, err, (unsigned long long)pseg_start); 699 goto out; 700 } 701 702 static void nilfs_finish_roll_forward(struct the_nilfs *nilfs, 703 struct nilfs_recovery_info *ri) 704 { 705 struct buffer_head *bh; 706 int err; 707 708 if (nilfs_get_segnum_of_block(nilfs, ri->ri_lsegs_start) != 709 nilfs_get_segnum_of_block(nilfs, ri->ri_super_root)) 710 return; 711 712 bh = __getblk(nilfs->ns_bdev, ri->ri_lsegs_start, nilfs->ns_blocksize); 713 BUG_ON(!bh); 714 memset(bh->b_data, 0, bh->b_size); 715 set_buffer_dirty(bh); 716 err = sync_dirty_buffer(bh); 717 if (unlikely(err)) 718 printk(KERN_WARNING 719 "NILFS warning: buffer sync write failed during " 720 "post-cleaning of recovery.\n"); 721 brelse(bh); 722 } 723 724 /** 725 * nilfs_salvage_orphan_logs - salvage logs written after the latest checkpoint 726 * @nilfs: nilfs object 727 * @sb: super block instance 728 * @ri: pointer to a nilfs_recovery_info struct to store search results. 729 * 730 * Return Value: On success, 0 is returned. On error, one of the following 731 * negative error code is returned. 732 * 733 * %-EINVAL - Inconsistent filesystem state. 734 * 735 * %-EIO - I/O error 736 * 737 * %-ENOSPC - No space left on device (only in a panic state). 738 * 739 * %-ERESTARTSYS - Interrupted. 740 * 741 * %-ENOMEM - Insufficient memory available. 742 */ 743 int nilfs_salvage_orphan_logs(struct the_nilfs *nilfs, 744 struct super_block *sb, 745 struct nilfs_recovery_info *ri) 746 { 747 struct nilfs_root *root; 748 int err; 749 750 if (ri->ri_lsegs_start == 0 || ri->ri_lsegs_end == 0) 751 return 0; 752 753 err = nilfs_attach_checkpoint(sb, ri->ri_cno, true, &root); 754 if (unlikely(err)) { 755 printk(KERN_ERR 756 "NILFS: error loading the latest checkpoint.\n"); 757 return err; 758 } 759 760 err = nilfs_do_roll_forward(nilfs, sb, root, ri); 761 if (unlikely(err)) 762 goto failed; 763 764 if (ri->ri_need_recovery == NILFS_RECOVERY_ROLLFORWARD_DONE) { 765 err = nilfs_prepare_segment_for_recovery(nilfs, sb, ri); 766 if (unlikely(err)) { 767 printk(KERN_ERR "NILFS: Error preparing segments for " 768 "recovery.\n"); 769 goto failed; 770 } 771 772 err = nilfs_attach_log_writer(sb, root); 773 if (unlikely(err)) 774 goto failed; 775 776 set_nilfs_discontinued(nilfs); 777 err = nilfs_construct_segment(sb); 778 nilfs_detach_log_writer(sb); 779 780 if (unlikely(err)) { 781 printk(KERN_ERR "NILFS: Oops! recovery failed. " 782 "(err=%d)\n", err); 783 goto failed; 784 } 785 786 nilfs_finish_roll_forward(nilfs, ri); 787 } 788 789 failed: 790 nilfs_put_root(root); 791 return err; 792 } 793 794 /** 795 * nilfs_search_super_root - search the latest valid super root 796 * @nilfs: the_nilfs 797 * @ri: pointer to a nilfs_recovery_info struct to store search results. 798 * 799 * nilfs_search_super_root() looks for the latest super-root from a partial 800 * segment pointed by the superblock. It sets up struct the_nilfs through 801 * this search. It fills nilfs_recovery_info (ri) required for recovery. 802 * 803 * Return Value: On success, 0 is returned. On error, one of the following 804 * negative error code is returned. 805 * 806 * %-EINVAL - No valid segment found 807 * 808 * %-EIO - I/O error 809 * 810 * %-ENOMEM - Insufficient memory available. 811 */ 812 int nilfs_search_super_root(struct the_nilfs *nilfs, 813 struct nilfs_recovery_info *ri) 814 { 815 struct buffer_head *bh_sum = NULL; 816 struct nilfs_segment_summary *sum = NULL; 817 sector_t pseg_start, pseg_end, sr_pseg_start = 0; 818 sector_t seg_start, seg_end; /* range of full segment (block number) */ 819 sector_t b, end; 820 unsigned long nblocks; 821 unsigned int flags; 822 u64 seg_seq; 823 __u64 segnum, nextnum = 0; 824 __u64 cno; 825 LIST_HEAD(segments); 826 int empty_seg = 0, scan_newer = 0; 827 int ret; 828 829 pseg_start = nilfs->ns_last_pseg; 830 seg_seq = nilfs->ns_last_seq; 831 cno = nilfs->ns_last_cno; 832 segnum = nilfs_get_segnum_of_block(nilfs, pseg_start); 833 834 /* Calculate range of segment */ 835 nilfs_get_segment_range(nilfs, segnum, &seg_start, &seg_end); 836 837 /* Read ahead segment */ 838 b = seg_start; 839 while (b <= seg_end) 840 __breadahead(nilfs->ns_bdev, b++, nilfs->ns_blocksize); 841 842 for (;;) { 843 brelse(bh_sum); 844 ret = NILFS_SEG_FAIL_IO; 845 bh_sum = nilfs_read_log_header(nilfs, pseg_start, &sum); 846 if (!bh_sum) 847 goto failed; 848 849 ret = nilfs_validate_log(nilfs, seg_seq, bh_sum, sum); 850 if (ret) { 851 if (ret == NILFS_SEG_FAIL_IO) 852 goto failed; 853 goto strayed; 854 } 855 856 nblocks = le32_to_cpu(sum->ss_nblocks); 857 pseg_end = pseg_start + nblocks - 1; 858 if (unlikely(pseg_end > seg_end)) { 859 ret = NILFS_SEG_FAIL_CONSISTENCY; 860 goto strayed; 861 } 862 863 /* A valid partial segment */ 864 ri->ri_pseg_start = pseg_start; 865 ri->ri_seq = seg_seq; 866 ri->ri_segnum = segnum; 867 nextnum = nilfs_get_segnum_of_block(nilfs, 868 le64_to_cpu(sum->ss_next)); 869 ri->ri_nextnum = nextnum; 870 empty_seg = 0; 871 872 flags = le16_to_cpu(sum->ss_flags); 873 if (!(flags & NILFS_SS_SR) && !scan_newer) { 874 /* 875 * This will never happen because a superblock 876 * (last_segment) always points to a pseg with 877 * a super root. 878 */ 879 ret = NILFS_SEG_FAIL_CONSISTENCY; 880 goto failed; 881 } 882 883 if (pseg_start == seg_start) { 884 nilfs_get_segment_range(nilfs, nextnum, &b, &end); 885 while (b <= end) 886 __breadahead(nilfs->ns_bdev, b++, 887 nilfs->ns_blocksize); 888 } 889 if (!(flags & NILFS_SS_SR)) { 890 if (!ri->ri_lsegs_start && (flags & NILFS_SS_LOGBGN)) { 891 ri->ri_lsegs_start = pseg_start; 892 ri->ri_lsegs_start_seq = seg_seq; 893 } 894 if (flags & NILFS_SS_LOGEND) 895 ri->ri_lsegs_end = pseg_start; 896 goto try_next_pseg; 897 } 898 899 /* A valid super root was found. */ 900 ri->ri_cno = cno++; 901 ri->ri_super_root = pseg_end; 902 ri->ri_lsegs_start = ri->ri_lsegs_end = 0; 903 904 nilfs_dispose_segment_list(&segments); 905 sr_pseg_start = pseg_start; 906 nilfs->ns_pseg_offset = pseg_start + nblocks - seg_start; 907 nilfs->ns_seg_seq = seg_seq; 908 nilfs->ns_segnum = segnum; 909 nilfs->ns_cno = cno; /* nilfs->ns_cno = ri->ri_cno + 1 */ 910 nilfs->ns_ctime = le64_to_cpu(sum->ss_create); 911 nilfs->ns_nextnum = nextnum; 912 913 if (scan_newer) 914 ri->ri_need_recovery = NILFS_RECOVERY_SR_UPDATED; 915 else { 916 if (nilfs->ns_mount_state & NILFS_VALID_FS) 917 goto super_root_found; 918 scan_newer = 1; 919 } 920 921 try_next_pseg: 922 /* Standing on a course, or met an inconsistent state */ 923 pseg_start += nblocks; 924 if (pseg_start < seg_end) 925 continue; 926 goto feed_segment; 927 928 strayed: 929 /* Off the trail */ 930 if (!scan_newer) 931 /* 932 * This can happen if a checkpoint was written without 933 * barriers, or as a result of an I/O failure. 934 */ 935 goto failed; 936 937 feed_segment: 938 /* Looking to the next full segment */ 939 if (empty_seg++) 940 goto super_root_found; /* found a valid super root */ 941 942 ret = nilfs_segment_list_add(&segments, segnum); 943 if (unlikely(ret)) 944 goto failed; 945 946 seg_seq++; 947 segnum = nextnum; 948 nilfs_get_segment_range(nilfs, segnum, &seg_start, &seg_end); 949 pseg_start = seg_start; 950 } 951 952 super_root_found: 953 /* Updating pointers relating to the latest checkpoint */ 954 brelse(bh_sum); 955 list_splice_tail(&segments, &ri->ri_used_segments); 956 nilfs->ns_last_pseg = sr_pseg_start; 957 nilfs->ns_last_seq = nilfs->ns_seg_seq; 958 nilfs->ns_last_cno = ri->ri_cno; 959 return 0; 960 961 failed: 962 brelse(bh_sum); 963 nilfs_dispose_segment_list(&segments); 964 return (ret < 0) ? ret : nilfs_warn_segment_error(ret); 965 } 966