1 /* 2 * Copyright (c) 2000-2006 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 #include "xfs.h" 19 #include "xfs_fs.h" 20 #include "xfs_shared.h" 21 #include "xfs_format.h" 22 #include "xfs_log_format.h" 23 #include "xfs_trans_resv.h" 24 #include "xfs_bit.h" 25 #include "xfs_inum.h" 26 #include "xfs_sb.h" 27 #include "xfs_ag.h" 28 #include "xfs_mount.h" 29 #include "xfs_da_format.h" 30 #include "xfs_inode.h" 31 #include "xfs_trans.h" 32 #include "xfs_log.h" 33 #include "xfs_log_priv.h" 34 #include "xfs_log_recover.h" 35 #include "xfs_inode_item.h" 36 #include "xfs_extfree_item.h" 37 #include "xfs_trans_priv.h" 38 #include "xfs_alloc.h" 39 #include "xfs_ialloc.h" 40 #include "xfs_quota.h" 41 #include "xfs_cksum.h" 42 #include "xfs_trace.h" 43 #include "xfs_icache.h" 44 #include "xfs_bmap_btree.h" 45 #include "xfs_dinode.h" 46 #include "xfs_error.h" 47 #include "xfs_dir2.h" 48 49 #define BLK_AVG(blk1, blk2) ((blk1+blk2) >> 1) 50 51 STATIC int 52 xlog_find_zeroed( 53 struct xlog *, 54 xfs_daddr_t *); 55 STATIC int 56 xlog_clear_stale_blocks( 57 struct xlog *, 58 xfs_lsn_t); 59 #if defined(DEBUG) 60 STATIC void 61 xlog_recover_check_summary( 62 struct xlog *); 63 #else 64 #define xlog_recover_check_summary(log) 65 #endif 66 67 /* 68 * This structure is used during recovery to record the buf log items which 69 * have been canceled and should not be replayed. 70 */ 71 struct xfs_buf_cancel { 72 xfs_daddr_t bc_blkno; 73 uint bc_len; 74 int bc_refcount; 75 struct list_head bc_list; 76 }; 77 78 /* 79 * Sector aligned buffer routines for buffer create/read/write/access 80 */ 81 82 /* 83 * Verify the given count of basic blocks is valid number of blocks 84 * to specify for an operation involving the given XFS log buffer. 85 * Returns nonzero if the count is valid, 0 otherwise. 86 */ 87 88 static inline int 89 xlog_buf_bbcount_valid( 90 struct xlog *log, 91 int bbcount) 92 { 93 return bbcount > 0 && bbcount <= log->l_logBBsize; 94 } 95 96 /* 97 * Allocate a buffer to hold log data. The buffer needs to be able 98 * to map to a range of nbblks basic blocks at any valid (basic 99 * block) offset within the log. 100 */ 101 STATIC xfs_buf_t * 102 xlog_get_bp( 103 struct xlog *log, 104 int nbblks) 105 { 106 struct xfs_buf *bp; 107 108 if (!xlog_buf_bbcount_valid(log, nbblks)) { 109 xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer", 110 nbblks); 111 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp); 112 return NULL; 113 } 114 115 /* 116 * We do log I/O in units of log sectors (a power-of-2 117 * multiple of the basic block size), so we round up the 118 * requested size to accommodate the basic blocks required 119 * for complete log sectors. 120 * 121 * In addition, the buffer may be used for a non-sector- 122 * aligned block offset, in which case an I/O of the 123 * requested size could extend beyond the end of the 124 * buffer. If the requested size is only 1 basic block it 125 * will never straddle a sector boundary, so this won't be 126 * an issue. Nor will this be a problem if the log I/O is 127 * done in basic blocks (sector size 1). But otherwise we 128 * extend the buffer by one extra log sector to ensure 129 * there's space to accommodate this possibility. 130 */ 131 if (nbblks > 1 && log->l_sectBBsize > 1) 132 nbblks += log->l_sectBBsize; 133 nbblks = round_up(nbblks, log->l_sectBBsize); 134 135 bp = xfs_buf_get_uncached(log->l_mp->m_logdev_targp, nbblks, 0); 136 if (bp) 137 xfs_buf_unlock(bp); 138 return bp; 139 } 140 141 STATIC void 142 xlog_put_bp( 143 xfs_buf_t *bp) 144 { 145 xfs_buf_free(bp); 146 } 147 148 /* 149 * Return the address of the start of the given block number's data 150 * in a log buffer. The buffer covers a log sector-aligned region. 151 */ 152 STATIC xfs_caddr_t 153 xlog_align( 154 struct xlog *log, 155 xfs_daddr_t blk_no, 156 int nbblks, 157 struct xfs_buf *bp) 158 { 159 xfs_daddr_t offset = blk_no & ((xfs_daddr_t)log->l_sectBBsize - 1); 160 161 ASSERT(offset + nbblks <= bp->b_length); 162 return bp->b_addr + BBTOB(offset); 163 } 164 165 166 /* 167 * nbblks should be uint, but oh well. Just want to catch that 32-bit length. 168 */ 169 STATIC int 170 xlog_bread_noalign( 171 struct xlog *log, 172 xfs_daddr_t blk_no, 173 int nbblks, 174 struct xfs_buf *bp) 175 { 176 int error; 177 178 if (!xlog_buf_bbcount_valid(log, nbblks)) { 179 xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer", 180 nbblks); 181 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp); 182 return EFSCORRUPTED; 183 } 184 185 blk_no = round_down(blk_no, log->l_sectBBsize); 186 nbblks = round_up(nbblks, log->l_sectBBsize); 187 188 ASSERT(nbblks > 0); 189 ASSERT(nbblks <= bp->b_length); 190 191 XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no); 192 XFS_BUF_READ(bp); 193 bp->b_io_length = nbblks; 194 bp->b_error = 0; 195 196 if (XFS_FORCED_SHUTDOWN(log->l_mp)) 197 return XFS_ERROR(EIO); 198 199 xfs_buf_iorequest(bp); 200 error = xfs_buf_iowait(bp); 201 if (error) 202 xfs_buf_ioerror_alert(bp, __func__); 203 return error; 204 } 205 206 STATIC int 207 xlog_bread( 208 struct xlog *log, 209 xfs_daddr_t blk_no, 210 int nbblks, 211 struct xfs_buf *bp, 212 xfs_caddr_t *offset) 213 { 214 int error; 215 216 error = xlog_bread_noalign(log, blk_no, nbblks, bp); 217 if (error) 218 return error; 219 220 *offset = xlog_align(log, blk_no, nbblks, bp); 221 return 0; 222 } 223 224 /* 225 * Read at an offset into the buffer. Returns with the buffer in it's original 226 * state regardless of the result of the read. 227 */ 228 STATIC int 229 xlog_bread_offset( 230 struct xlog *log, 231 xfs_daddr_t blk_no, /* block to read from */ 232 int nbblks, /* blocks to read */ 233 struct xfs_buf *bp, 234 xfs_caddr_t offset) 235 { 236 xfs_caddr_t orig_offset = bp->b_addr; 237 int orig_len = BBTOB(bp->b_length); 238 int error, error2; 239 240 error = xfs_buf_associate_memory(bp, offset, BBTOB(nbblks)); 241 if (error) 242 return error; 243 244 error = xlog_bread_noalign(log, blk_no, nbblks, bp); 245 246 /* must reset buffer pointer even on error */ 247 error2 = xfs_buf_associate_memory(bp, orig_offset, orig_len); 248 if (error) 249 return error; 250 return error2; 251 } 252 253 /* 254 * Write out the buffer at the given block for the given number of blocks. 255 * The buffer is kept locked across the write and is returned locked. 256 * This can only be used for synchronous log writes. 257 */ 258 STATIC int 259 xlog_bwrite( 260 struct xlog *log, 261 xfs_daddr_t blk_no, 262 int nbblks, 263 struct xfs_buf *bp) 264 { 265 int error; 266 267 if (!xlog_buf_bbcount_valid(log, nbblks)) { 268 xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer", 269 nbblks); 270 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp); 271 return EFSCORRUPTED; 272 } 273 274 blk_no = round_down(blk_no, log->l_sectBBsize); 275 nbblks = round_up(nbblks, log->l_sectBBsize); 276 277 ASSERT(nbblks > 0); 278 ASSERT(nbblks <= bp->b_length); 279 280 XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no); 281 XFS_BUF_ZEROFLAGS(bp); 282 xfs_buf_hold(bp); 283 xfs_buf_lock(bp); 284 bp->b_io_length = nbblks; 285 bp->b_error = 0; 286 287 error = xfs_bwrite(bp); 288 if (error) 289 xfs_buf_ioerror_alert(bp, __func__); 290 xfs_buf_relse(bp); 291 return error; 292 } 293 294 #ifdef DEBUG 295 /* 296 * dump debug superblock and log record information 297 */ 298 STATIC void 299 xlog_header_check_dump( 300 xfs_mount_t *mp, 301 xlog_rec_header_t *head) 302 { 303 xfs_debug(mp, "%s: SB : uuid = %pU, fmt = %d", 304 __func__, &mp->m_sb.sb_uuid, XLOG_FMT); 305 xfs_debug(mp, " log : uuid = %pU, fmt = %d", 306 &head->h_fs_uuid, be32_to_cpu(head->h_fmt)); 307 } 308 #else 309 #define xlog_header_check_dump(mp, head) 310 #endif 311 312 /* 313 * check log record header for recovery 314 */ 315 STATIC int 316 xlog_header_check_recover( 317 xfs_mount_t *mp, 318 xlog_rec_header_t *head) 319 { 320 ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)); 321 322 /* 323 * IRIX doesn't write the h_fmt field and leaves it zeroed 324 * (XLOG_FMT_UNKNOWN). This stops us from trying to recover 325 * a dirty log created in IRIX. 326 */ 327 if (unlikely(head->h_fmt != cpu_to_be32(XLOG_FMT))) { 328 xfs_warn(mp, 329 "dirty log written in incompatible format - can't recover"); 330 xlog_header_check_dump(mp, head); 331 XFS_ERROR_REPORT("xlog_header_check_recover(1)", 332 XFS_ERRLEVEL_HIGH, mp); 333 return XFS_ERROR(EFSCORRUPTED); 334 } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) { 335 xfs_warn(mp, 336 "dirty log entry has mismatched uuid - can't recover"); 337 xlog_header_check_dump(mp, head); 338 XFS_ERROR_REPORT("xlog_header_check_recover(2)", 339 XFS_ERRLEVEL_HIGH, mp); 340 return XFS_ERROR(EFSCORRUPTED); 341 } 342 return 0; 343 } 344 345 /* 346 * read the head block of the log and check the header 347 */ 348 STATIC int 349 xlog_header_check_mount( 350 xfs_mount_t *mp, 351 xlog_rec_header_t *head) 352 { 353 ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)); 354 355 if (uuid_is_nil(&head->h_fs_uuid)) { 356 /* 357 * IRIX doesn't write the h_fs_uuid or h_fmt fields. If 358 * h_fs_uuid is nil, we assume this log was last mounted 359 * by IRIX and continue. 360 */ 361 xfs_warn(mp, "nil uuid in log - IRIX style log"); 362 } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) { 363 xfs_warn(mp, "log has mismatched uuid - can't recover"); 364 xlog_header_check_dump(mp, head); 365 XFS_ERROR_REPORT("xlog_header_check_mount", 366 XFS_ERRLEVEL_HIGH, mp); 367 return XFS_ERROR(EFSCORRUPTED); 368 } 369 return 0; 370 } 371 372 STATIC void 373 xlog_recover_iodone( 374 struct xfs_buf *bp) 375 { 376 if (bp->b_error) { 377 /* 378 * We're not going to bother about retrying 379 * this during recovery. One strike! 380 */ 381 xfs_buf_ioerror_alert(bp, __func__); 382 xfs_force_shutdown(bp->b_target->bt_mount, 383 SHUTDOWN_META_IO_ERROR); 384 } 385 bp->b_iodone = NULL; 386 xfs_buf_ioend(bp, 0); 387 } 388 389 /* 390 * This routine finds (to an approximation) the first block in the physical 391 * log which contains the given cycle. It uses a binary search algorithm. 392 * Note that the algorithm can not be perfect because the disk will not 393 * necessarily be perfect. 394 */ 395 STATIC int 396 xlog_find_cycle_start( 397 struct xlog *log, 398 struct xfs_buf *bp, 399 xfs_daddr_t first_blk, 400 xfs_daddr_t *last_blk, 401 uint cycle) 402 { 403 xfs_caddr_t offset; 404 xfs_daddr_t mid_blk; 405 xfs_daddr_t end_blk; 406 uint mid_cycle; 407 int error; 408 409 end_blk = *last_blk; 410 mid_blk = BLK_AVG(first_blk, end_blk); 411 while (mid_blk != first_blk && mid_blk != end_blk) { 412 error = xlog_bread(log, mid_blk, 1, bp, &offset); 413 if (error) 414 return error; 415 mid_cycle = xlog_get_cycle(offset); 416 if (mid_cycle == cycle) 417 end_blk = mid_blk; /* last_half_cycle == mid_cycle */ 418 else 419 first_blk = mid_blk; /* first_half_cycle == mid_cycle */ 420 mid_blk = BLK_AVG(first_blk, end_blk); 421 } 422 ASSERT((mid_blk == first_blk && mid_blk+1 == end_blk) || 423 (mid_blk == end_blk && mid_blk-1 == first_blk)); 424 425 *last_blk = end_blk; 426 427 return 0; 428 } 429 430 /* 431 * Check that a range of blocks does not contain stop_on_cycle_no. 432 * Fill in *new_blk with the block offset where such a block is 433 * found, or with -1 (an invalid block number) if there is no such 434 * block in the range. The scan needs to occur from front to back 435 * and the pointer into the region must be updated since a later 436 * routine will need to perform another test. 437 */ 438 STATIC int 439 xlog_find_verify_cycle( 440 struct xlog *log, 441 xfs_daddr_t start_blk, 442 int nbblks, 443 uint stop_on_cycle_no, 444 xfs_daddr_t *new_blk) 445 { 446 xfs_daddr_t i, j; 447 uint cycle; 448 xfs_buf_t *bp; 449 xfs_daddr_t bufblks; 450 xfs_caddr_t buf = NULL; 451 int error = 0; 452 453 /* 454 * Greedily allocate a buffer big enough to handle the full 455 * range of basic blocks we'll be examining. If that fails, 456 * try a smaller size. We need to be able to read at least 457 * a log sector, or we're out of luck. 458 */ 459 bufblks = 1 << ffs(nbblks); 460 while (bufblks > log->l_logBBsize) 461 bufblks >>= 1; 462 while (!(bp = xlog_get_bp(log, bufblks))) { 463 bufblks >>= 1; 464 if (bufblks < log->l_sectBBsize) 465 return ENOMEM; 466 } 467 468 for (i = start_blk; i < start_blk + nbblks; i += bufblks) { 469 int bcount; 470 471 bcount = min(bufblks, (start_blk + nbblks - i)); 472 473 error = xlog_bread(log, i, bcount, bp, &buf); 474 if (error) 475 goto out; 476 477 for (j = 0; j < bcount; j++) { 478 cycle = xlog_get_cycle(buf); 479 if (cycle == stop_on_cycle_no) { 480 *new_blk = i+j; 481 goto out; 482 } 483 484 buf += BBSIZE; 485 } 486 } 487 488 *new_blk = -1; 489 490 out: 491 xlog_put_bp(bp); 492 return error; 493 } 494 495 /* 496 * Potentially backup over partial log record write. 497 * 498 * In the typical case, last_blk is the number of the block directly after 499 * a good log record. Therefore, we subtract one to get the block number 500 * of the last block in the given buffer. extra_bblks contains the number 501 * of blocks we would have read on a previous read. This happens when the 502 * last log record is split over the end of the physical log. 503 * 504 * extra_bblks is the number of blocks potentially verified on a previous 505 * call to this routine. 506 */ 507 STATIC int 508 xlog_find_verify_log_record( 509 struct xlog *log, 510 xfs_daddr_t start_blk, 511 xfs_daddr_t *last_blk, 512 int extra_bblks) 513 { 514 xfs_daddr_t i; 515 xfs_buf_t *bp; 516 xfs_caddr_t offset = NULL; 517 xlog_rec_header_t *head = NULL; 518 int error = 0; 519 int smallmem = 0; 520 int num_blks = *last_blk - start_blk; 521 int xhdrs; 522 523 ASSERT(start_blk != 0 || *last_blk != start_blk); 524 525 if (!(bp = xlog_get_bp(log, num_blks))) { 526 if (!(bp = xlog_get_bp(log, 1))) 527 return ENOMEM; 528 smallmem = 1; 529 } else { 530 error = xlog_bread(log, start_blk, num_blks, bp, &offset); 531 if (error) 532 goto out; 533 offset += ((num_blks - 1) << BBSHIFT); 534 } 535 536 for (i = (*last_blk) - 1; i >= 0; i--) { 537 if (i < start_blk) { 538 /* valid log record not found */ 539 xfs_warn(log->l_mp, 540 "Log inconsistent (didn't find previous header)"); 541 ASSERT(0); 542 error = XFS_ERROR(EIO); 543 goto out; 544 } 545 546 if (smallmem) { 547 error = xlog_bread(log, i, 1, bp, &offset); 548 if (error) 549 goto out; 550 } 551 552 head = (xlog_rec_header_t *)offset; 553 554 if (head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) 555 break; 556 557 if (!smallmem) 558 offset -= BBSIZE; 559 } 560 561 /* 562 * We hit the beginning of the physical log & still no header. Return 563 * to caller. If caller can handle a return of -1, then this routine 564 * will be called again for the end of the physical log. 565 */ 566 if (i == -1) { 567 error = -1; 568 goto out; 569 } 570 571 /* 572 * We have the final block of the good log (the first block 573 * of the log record _before_ the head. So we check the uuid. 574 */ 575 if ((error = xlog_header_check_mount(log->l_mp, head))) 576 goto out; 577 578 /* 579 * We may have found a log record header before we expected one. 580 * last_blk will be the 1st block # with a given cycle #. We may end 581 * up reading an entire log record. In this case, we don't want to 582 * reset last_blk. Only when last_blk points in the middle of a log 583 * record do we update last_blk. 584 */ 585 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) { 586 uint h_size = be32_to_cpu(head->h_size); 587 588 xhdrs = h_size / XLOG_HEADER_CYCLE_SIZE; 589 if (h_size % XLOG_HEADER_CYCLE_SIZE) 590 xhdrs++; 591 } else { 592 xhdrs = 1; 593 } 594 595 if (*last_blk - i + extra_bblks != 596 BTOBB(be32_to_cpu(head->h_len)) + xhdrs) 597 *last_blk = i; 598 599 out: 600 xlog_put_bp(bp); 601 return error; 602 } 603 604 /* 605 * Head is defined to be the point of the log where the next log write 606 * could go. This means that incomplete LR writes at the end are 607 * eliminated when calculating the head. We aren't guaranteed that previous 608 * LR have complete transactions. We only know that a cycle number of 609 * current cycle number -1 won't be present in the log if we start writing 610 * from our current block number. 611 * 612 * last_blk contains the block number of the first block with a given 613 * cycle number. 614 * 615 * Return: zero if normal, non-zero if error. 616 */ 617 STATIC int 618 xlog_find_head( 619 struct xlog *log, 620 xfs_daddr_t *return_head_blk) 621 { 622 xfs_buf_t *bp; 623 xfs_caddr_t offset; 624 xfs_daddr_t new_blk, first_blk, start_blk, last_blk, head_blk; 625 int num_scan_bblks; 626 uint first_half_cycle, last_half_cycle; 627 uint stop_on_cycle; 628 int error, log_bbnum = log->l_logBBsize; 629 630 /* Is the end of the log device zeroed? */ 631 if ((error = xlog_find_zeroed(log, &first_blk)) == -1) { 632 *return_head_blk = first_blk; 633 634 /* Is the whole lot zeroed? */ 635 if (!first_blk) { 636 /* Linux XFS shouldn't generate totally zeroed logs - 637 * mkfs etc write a dummy unmount record to a fresh 638 * log so we can store the uuid in there 639 */ 640 xfs_warn(log->l_mp, "totally zeroed log"); 641 } 642 643 return 0; 644 } else if (error) { 645 xfs_warn(log->l_mp, "empty log check failed"); 646 return error; 647 } 648 649 first_blk = 0; /* get cycle # of 1st block */ 650 bp = xlog_get_bp(log, 1); 651 if (!bp) 652 return ENOMEM; 653 654 error = xlog_bread(log, 0, 1, bp, &offset); 655 if (error) 656 goto bp_err; 657 658 first_half_cycle = xlog_get_cycle(offset); 659 660 last_blk = head_blk = log_bbnum - 1; /* get cycle # of last block */ 661 error = xlog_bread(log, last_blk, 1, bp, &offset); 662 if (error) 663 goto bp_err; 664 665 last_half_cycle = xlog_get_cycle(offset); 666 ASSERT(last_half_cycle != 0); 667 668 /* 669 * If the 1st half cycle number is equal to the last half cycle number, 670 * then the entire log is stamped with the same cycle number. In this 671 * case, head_blk can't be set to zero (which makes sense). The below 672 * math doesn't work out properly with head_blk equal to zero. Instead, 673 * we set it to log_bbnum which is an invalid block number, but this 674 * value makes the math correct. If head_blk doesn't changed through 675 * all the tests below, *head_blk is set to zero at the very end rather 676 * than log_bbnum. In a sense, log_bbnum and zero are the same block 677 * in a circular file. 678 */ 679 if (first_half_cycle == last_half_cycle) { 680 /* 681 * In this case we believe that the entire log should have 682 * cycle number last_half_cycle. We need to scan backwards 683 * from the end verifying that there are no holes still 684 * containing last_half_cycle - 1. If we find such a hole, 685 * then the start of that hole will be the new head. The 686 * simple case looks like 687 * x | x ... | x - 1 | x 688 * Another case that fits this picture would be 689 * x | x + 1 | x ... | x 690 * In this case the head really is somewhere at the end of the 691 * log, as one of the latest writes at the beginning was 692 * incomplete. 693 * One more case is 694 * x | x + 1 | x ... | x - 1 | x 695 * This is really the combination of the above two cases, and 696 * the head has to end up at the start of the x-1 hole at the 697 * end of the log. 698 * 699 * In the 256k log case, we will read from the beginning to the 700 * end of the log and search for cycle numbers equal to x-1. 701 * We don't worry about the x+1 blocks that we encounter, 702 * because we know that they cannot be the head since the log 703 * started with x. 704 */ 705 head_blk = log_bbnum; 706 stop_on_cycle = last_half_cycle - 1; 707 } else { 708 /* 709 * In this case we want to find the first block with cycle 710 * number matching last_half_cycle. We expect the log to be 711 * some variation on 712 * x + 1 ... | x ... | x 713 * The first block with cycle number x (last_half_cycle) will 714 * be where the new head belongs. First we do a binary search 715 * for the first occurrence of last_half_cycle. The binary 716 * search may not be totally accurate, so then we scan back 717 * from there looking for occurrences of last_half_cycle before 718 * us. If that backwards scan wraps around the beginning of 719 * the log, then we look for occurrences of last_half_cycle - 1 720 * at the end of the log. The cases we're looking for look 721 * like 722 * v binary search stopped here 723 * x + 1 ... | x | x + 1 | x ... | x 724 * ^ but we want to locate this spot 725 * or 726 * <---------> less than scan distance 727 * x + 1 ... | x ... | x - 1 | x 728 * ^ we want to locate this spot 729 */ 730 stop_on_cycle = last_half_cycle; 731 if ((error = xlog_find_cycle_start(log, bp, first_blk, 732 &head_blk, last_half_cycle))) 733 goto bp_err; 734 } 735 736 /* 737 * Now validate the answer. Scan back some number of maximum possible 738 * blocks and make sure each one has the expected cycle number. The 739 * maximum is determined by the total possible amount of buffering 740 * in the in-core log. The following number can be made tighter if 741 * we actually look at the block size of the filesystem. 742 */ 743 num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log); 744 if (head_blk >= num_scan_bblks) { 745 /* 746 * We are guaranteed that the entire check can be performed 747 * in one buffer. 748 */ 749 start_blk = head_blk - num_scan_bblks; 750 if ((error = xlog_find_verify_cycle(log, 751 start_blk, num_scan_bblks, 752 stop_on_cycle, &new_blk))) 753 goto bp_err; 754 if (new_blk != -1) 755 head_blk = new_blk; 756 } else { /* need to read 2 parts of log */ 757 /* 758 * We are going to scan backwards in the log in two parts. 759 * First we scan the physical end of the log. In this part 760 * of the log, we are looking for blocks with cycle number 761 * last_half_cycle - 1. 762 * If we find one, then we know that the log starts there, as 763 * we've found a hole that didn't get written in going around 764 * the end of the physical log. The simple case for this is 765 * x + 1 ... | x ... | x - 1 | x 766 * <---------> less than scan distance 767 * If all of the blocks at the end of the log have cycle number 768 * last_half_cycle, then we check the blocks at the start of 769 * the log looking for occurrences of last_half_cycle. If we 770 * find one, then our current estimate for the location of the 771 * first occurrence of last_half_cycle is wrong and we move 772 * back to the hole we've found. This case looks like 773 * x + 1 ... | x | x + 1 | x ... 774 * ^ binary search stopped here 775 * Another case we need to handle that only occurs in 256k 776 * logs is 777 * x + 1 ... | x ... | x+1 | x ... 778 * ^ binary search stops here 779 * In a 256k log, the scan at the end of the log will see the 780 * x + 1 blocks. We need to skip past those since that is 781 * certainly not the head of the log. By searching for 782 * last_half_cycle-1 we accomplish that. 783 */ 784 ASSERT(head_blk <= INT_MAX && 785 (xfs_daddr_t) num_scan_bblks >= head_blk); 786 start_blk = log_bbnum - (num_scan_bblks - head_blk); 787 if ((error = xlog_find_verify_cycle(log, start_blk, 788 num_scan_bblks - (int)head_blk, 789 (stop_on_cycle - 1), &new_blk))) 790 goto bp_err; 791 if (new_blk != -1) { 792 head_blk = new_blk; 793 goto validate_head; 794 } 795 796 /* 797 * Scan beginning of log now. The last part of the physical 798 * log is good. This scan needs to verify that it doesn't find 799 * the last_half_cycle. 800 */ 801 start_blk = 0; 802 ASSERT(head_blk <= INT_MAX); 803 if ((error = xlog_find_verify_cycle(log, 804 start_blk, (int)head_blk, 805 stop_on_cycle, &new_blk))) 806 goto bp_err; 807 if (new_blk != -1) 808 head_blk = new_blk; 809 } 810 811 validate_head: 812 /* 813 * Now we need to make sure head_blk is not pointing to a block in 814 * the middle of a log record. 815 */ 816 num_scan_bblks = XLOG_REC_SHIFT(log); 817 if (head_blk >= num_scan_bblks) { 818 start_blk = head_blk - num_scan_bblks; /* don't read head_blk */ 819 820 /* start ptr at last block ptr before head_blk */ 821 if ((error = xlog_find_verify_log_record(log, start_blk, 822 &head_blk, 0)) == -1) { 823 error = XFS_ERROR(EIO); 824 goto bp_err; 825 } else if (error) 826 goto bp_err; 827 } else { 828 start_blk = 0; 829 ASSERT(head_blk <= INT_MAX); 830 if ((error = xlog_find_verify_log_record(log, start_blk, 831 &head_blk, 0)) == -1) { 832 /* We hit the beginning of the log during our search */ 833 start_blk = log_bbnum - (num_scan_bblks - head_blk); 834 new_blk = log_bbnum; 835 ASSERT(start_blk <= INT_MAX && 836 (xfs_daddr_t) log_bbnum-start_blk >= 0); 837 ASSERT(head_blk <= INT_MAX); 838 if ((error = xlog_find_verify_log_record(log, 839 start_blk, &new_blk, 840 (int)head_blk)) == -1) { 841 error = XFS_ERROR(EIO); 842 goto bp_err; 843 } else if (error) 844 goto bp_err; 845 if (new_blk != log_bbnum) 846 head_blk = new_blk; 847 } else if (error) 848 goto bp_err; 849 } 850 851 xlog_put_bp(bp); 852 if (head_blk == log_bbnum) 853 *return_head_blk = 0; 854 else 855 *return_head_blk = head_blk; 856 /* 857 * When returning here, we have a good block number. Bad block 858 * means that during a previous crash, we didn't have a clean break 859 * from cycle number N to cycle number N-1. In this case, we need 860 * to find the first block with cycle number N-1. 861 */ 862 return 0; 863 864 bp_err: 865 xlog_put_bp(bp); 866 867 if (error) 868 xfs_warn(log->l_mp, "failed to find log head"); 869 return error; 870 } 871 872 /* 873 * Find the sync block number or the tail of the log. 874 * 875 * This will be the block number of the last record to have its 876 * associated buffers synced to disk. Every log record header has 877 * a sync lsn embedded in it. LSNs hold block numbers, so it is easy 878 * to get a sync block number. The only concern is to figure out which 879 * log record header to believe. 880 * 881 * The following algorithm uses the log record header with the largest 882 * lsn. The entire log record does not need to be valid. We only care 883 * that the header is valid. 884 * 885 * We could speed up search by using current head_blk buffer, but it is not 886 * available. 887 */ 888 STATIC int 889 xlog_find_tail( 890 struct xlog *log, 891 xfs_daddr_t *head_blk, 892 xfs_daddr_t *tail_blk) 893 { 894 xlog_rec_header_t *rhead; 895 xlog_op_header_t *op_head; 896 xfs_caddr_t offset = NULL; 897 xfs_buf_t *bp; 898 int error, i, found; 899 xfs_daddr_t umount_data_blk; 900 xfs_daddr_t after_umount_blk; 901 xfs_lsn_t tail_lsn; 902 int hblks; 903 904 found = 0; 905 906 /* 907 * Find previous log record 908 */ 909 if ((error = xlog_find_head(log, head_blk))) 910 return error; 911 912 bp = xlog_get_bp(log, 1); 913 if (!bp) 914 return ENOMEM; 915 if (*head_blk == 0) { /* special case */ 916 error = xlog_bread(log, 0, 1, bp, &offset); 917 if (error) 918 goto done; 919 920 if (xlog_get_cycle(offset) == 0) { 921 *tail_blk = 0; 922 /* leave all other log inited values alone */ 923 goto done; 924 } 925 } 926 927 /* 928 * Search backwards looking for log record header block 929 */ 930 ASSERT(*head_blk < INT_MAX); 931 for (i = (int)(*head_blk) - 1; i >= 0; i--) { 932 error = xlog_bread(log, i, 1, bp, &offset); 933 if (error) 934 goto done; 935 936 if (*(__be32 *)offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) { 937 found = 1; 938 break; 939 } 940 } 941 /* 942 * If we haven't found the log record header block, start looking 943 * again from the end of the physical log. XXXmiken: There should be 944 * a check here to make sure we didn't search more than N blocks in 945 * the previous code. 946 */ 947 if (!found) { 948 for (i = log->l_logBBsize - 1; i >= (int)(*head_blk); i--) { 949 error = xlog_bread(log, i, 1, bp, &offset); 950 if (error) 951 goto done; 952 953 if (*(__be32 *)offset == 954 cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) { 955 found = 2; 956 break; 957 } 958 } 959 } 960 if (!found) { 961 xfs_warn(log->l_mp, "%s: couldn't find sync record", __func__); 962 xlog_put_bp(bp); 963 ASSERT(0); 964 return XFS_ERROR(EIO); 965 } 966 967 /* find blk_no of tail of log */ 968 rhead = (xlog_rec_header_t *)offset; 969 *tail_blk = BLOCK_LSN(be64_to_cpu(rhead->h_tail_lsn)); 970 971 /* 972 * Reset log values according to the state of the log when we 973 * crashed. In the case where head_blk == 0, we bump curr_cycle 974 * one because the next write starts a new cycle rather than 975 * continuing the cycle of the last good log record. At this 976 * point we have guaranteed that all partial log records have been 977 * accounted for. Therefore, we know that the last good log record 978 * written was complete and ended exactly on the end boundary 979 * of the physical log. 980 */ 981 log->l_prev_block = i; 982 log->l_curr_block = (int)*head_blk; 983 log->l_curr_cycle = be32_to_cpu(rhead->h_cycle); 984 if (found == 2) 985 log->l_curr_cycle++; 986 atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn)); 987 atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn)); 988 xlog_assign_grant_head(&log->l_reserve_head.grant, log->l_curr_cycle, 989 BBTOB(log->l_curr_block)); 990 xlog_assign_grant_head(&log->l_write_head.grant, log->l_curr_cycle, 991 BBTOB(log->l_curr_block)); 992 993 /* 994 * Look for unmount record. If we find it, then we know there 995 * was a clean unmount. Since 'i' could be the last block in 996 * the physical log, we convert to a log block before comparing 997 * to the head_blk. 998 * 999 * Save the current tail lsn to use to pass to 1000 * xlog_clear_stale_blocks() below. We won't want to clear the 1001 * unmount record if there is one, so we pass the lsn of the 1002 * unmount record rather than the block after it. 1003 */ 1004 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) { 1005 int h_size = be32_to_cpu(rhead->h_size); 1006 int h_version = be32_to_cpu(rhead->h_version); 1007 1008 if ((h_version & XLOG_VERSION_2) && 1009 (h_size > XLOG_HEADER_CYCLE_SIZE)) { 1010 hblks = h_size / XLOG_HEADER_CYCLE_SIZE; 1011 if (h_size % XLOG_HEADER_CYCLE_SIZE) 1012 hblks++; 1013 } else { 1014 hblks = 1; 1015 } 1016 } else { 1017 hblks = 1; 1018 } 1019 after_umount_blk = (i + hblks + (int) 1020 BTOBB(be32_to_cpu(rhead->h_len))) % log->l_logBBsize; 1021 tail_lsn = atomic64_read(&log->l_tail_lsn); 1022 if (*head_blk == after_umount_blk && 1023 be32_to_cpu(rhead->h_num_logops) == 1) { 1024 umount_data_blk = (i + hblks) % log->l_logBBsize; 1025 error = xlog_bread(log, umount_data_blk, 1, bp, &offset); 1026 if (error) 1027 goto done; 1028 1029 op_head = (xlog_op_header_t *)offset; 1030 if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) { 1031 /* 1032 * Set tail and last sync so that newly written 1033 * log records will point recovery to after the 1034 * current unmount record. 1035 */ 1036 xlog_assign_atomic_lsn(&log->l_tail_lsn, 1037 log->l_curr_cycle, after_umount_blk); 1038 xlog_assign_atomic_lsn(&log->l_last_sync_lsn, 1039 log->l_curr_cycle, after_umount_blk); 1040 *tail_blk = after_umount_blk; 1041 1042 /* 1043 * Note that the unmount was clean. If the unmount 1044 * was not clean, we need to know this to rebuild the 1045 * superblock counters from the perag headers if we 1046 * have a filesystem using non-persistent counters. 1047 */ 1048 log->l_mp->m_flags |= XFS_MOUNT_WAS_CLEAN; 1049 } 1050 } 1051 1052 /* 1053 * Make sure that there are no blocks in front of the head 1054 * with the same cycle number as the head. This can happen 1055 * because we allow multiple outstanding log writes concurrently, 1056 * and the later writes might make it out before earlier ones. 1057 * 1058 * We use the lsn from before modifying it so that we'll never 1059 * overwrite the unmount record after a clean unmount. 1060 * 1061 * Do this only if we are going to recover the filesystem 1062 * 1063 * NOTE: This used to say "if (!readonly)" 1064 * However on Linux, we can & do recover a read-only filesystem. 1065 * We only skip recovery if NORECOVERY is specified on mount, 1066 * in which case we would not be here. 1067 * 1068 * But... if the -device- itself is readonly, just skip this. 1069 * We can't recover this device anyway, so it won't matter. 1070 */ 1071 if (!xfs_readonly_buftarg(log->l_mp->m_logdev_targp)) 1072 error = xlog_clear_stale_blocks(log, tail_lsn); 1073 1074 done: 1075 xlog_put_bp(bp); 1076 1077 if (error) 1078 xfs_warn(log->l_mp, "failed to locate log tail"); 1079 return error; 1080 } 1081 1082 /* 1083 * Is the log zeroed at all? 1084 * 1085 * The last binary search should be changed to perform an X block read 1086 * once X becomes small enough. You can then search linearly through 1087 * the X blocks. This will cut down on the number of reads we need to do. 1088 * 1089 * If the log is partially zeroed, this routine will pass back the blkno 1090 * of the first block with cycle number 0. It won't have a complete LR 1091 * preceding it. 1092 * 1093 * Return: 1094 * 0 => the log is completely written to 1095 * -1 => use *blk_no as the first block of the log 1096 * >0 => error has occurred 1097 */ 1098 STATIC int 1099 xlog_find_zeroed( 1100 struct xlog *log, 1101 xfs_daddr_t *blk_no) 1102 { 1103 xfs_buf_t *bp; 1104 xfs_caddr_t offset; 1105 uint first_cycle, last_cycle; 1106 xfs_daddr_t new_blk, last_blk, start_blk; 1107 xfs_daddr_t num_scan_bblks; 1108 int error, log_bbnum = log->l_logBBsize; 1109 1110 *blk_no = 0; 1111 1112 /* check totally zeroed log */ 1113 bp = xlog_get_bp(log, 1); 1114 if (!bp) 1115 return ENOMEM; 1116 error = xlog_bread(log, 0, 1, bp, &offset); 1117 if (error) 1118 goto bp_err; 1119 1120 first_cycle = xlog_get_cycle(offset); 1121 if (first_cycle == 0) { /* completely zeroed log */ 1122 *blk_no = 0; 1123 xlog_put_bp(bp); 1124 return -1; 1125 } 1126 1127 /* check partially zeroed log */ 1128 error = xlog_bread(log, log_bbnum-1, 1, bp, &offset); 1129 if (error) 1130 goto bp_err; 1131 1132 last_cycle = xlog_get_cycle(offset); 1133 if (last_cycle != 0) { /* log completely written to */ 1134 xlog_put_bp(bp); 1135 return 0; 1136 } else if (first_cycle != 1) { 1137 /* 1138 * If the cycle of the last block is zero, the cycle of 1139 * the first block must be 1. If it's not, maybe we're 1140 * not looking at a log... Bail out. 1141 */ 1142 xfs_warn(log->l_mp, 1143 "Log inconsistent or not a log (last==0, first!=1)"); 1144 error = XFS_ERROR(EINVAL); 1145 goto bp_err; 1146 } 1147 1148 /* we have a partially zeroed log */ 1149 last_blk = log_bbnum-1; 1150 if ((error = xlog_find_cycle_start(log, bp, 0, &last_blk, 0))) 1151 goto bp_err; 1152 1153 /* 1154 * Validate the answer. Because there is no way to guarantee that 1155 * the entire log is made up of log records which are the same size, 1156 * we scan over the defined maximum blocks. At this point, the maximum 1157 * is not chosen to mean anything special. XXXmiken 1158 */ 1159 num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log); 1160 ASSERT(num_scan_bblks <= INT_MAX); 1161 1162 if (last_blk < num_scan_bblks) 1163 num_scan_bblks = last_blk; 1164 start_blk = last_blk - num_scan_bblks; 1165 1166 /* 1167 * We search for any instances of cycle number 0 that occur before 1168 * our current estimate of the head. What we're trying to detect is 1169 * 1 ... | 0 | 1 | 0... 1170 * ^ binary search ends here 1171 */ 1172 if ((error = xlog_find_verify_cycle(log, start_blk, 1173 (int)num_scan_bblks, 0, &new_blk))) 1174 goto bp_err; 1175 if (new_blk != -1) 1176 last_blk = new_blk; 1177 1178 /* 1179 * Potentially backup over partial log record write. We don't need 1180 * to search the end of the log because we know it is zero. 1181 */ 1182 if ((error = xlog_find_verify_log_record(log, start_blk, 1183 &last_blk, 0)) == -1) { 1184 error = XFS_ERROR(EIO); 1185 goto bp_err; 1186 } else if (error) 1187 goto bp_err; 1188 1189 *blk_no = last_blk; 1190 bp_err: 1191 xlog_put_bp(bp); 1192 if (error) 1193 return error; 1194 return -1; 1195 } 1196 1197 /* 1198 * These are simple subroutines used by xlog_clear_stale_blocks() below 1199 * to initialize a buffer full of empty log record headers and write 1200 * them into the log. 1201 */ 1202 STATIC void 1203 xlog_add_record( 1204 struct xlog *log, 1205 xfs_caddr_t buf, 1206 int cycle, 1207 int block, 1208 int tail_cycle, 1209 int tail_block) 1210 { 1211 xlog_rec_header_t *recp = (xlog_rec_header_t *)buf; 1212 1213 memset(buf, 0, BBSIZE); 1214 recp->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM); 1215 recp->h_cycle = cpu_to_be32(cycle); 1216 recp->h_version = cpu_to_be32( 1217 xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1); 1218 recp->h_lsn = cpu_to_be64(xlog_assign_lsn(cycle, block)); 1219 recp->h_tail_lsn = cpu_to_be64(xlog_assign_lsn(tail_cycle, tail_block)); 1220 recp->h_fmt = cpu_to_be32(XLOG_FMT); 1221 memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t)); 1222 } 1223 1224 STATIC int 1225 xlog_write_log_records( 1226 struct xlog *log, 1227 int cycle, 1228 int start_block, 1229 int blocks, 1230 int tail_cycle, 1231 int tail_block) 1232 { 1233 xfs_caddr_t offset; 1234 xfs_buf_t *bp; 1235 int balign, ealign; 1236 int sectbb = log->l_sectBBsize; 1237 int end_block = start_block + blocks; 1238 int bufblks; 1239 int error = 0; 1240 int i, j = 0; 1241 1242 /* 1243 * Greedily allocate a buffer big enough to handle the full 1244 * range of basic blocks to be written. If that fails, try 1245 * a smaller size. We need to be able to write at least a 1246 * log sector, or we're out of luck. 1247 */ 1248 bufblks = 1 << ffs(blocks); 1249 while (bufblks > log->l_logBBsize) 1250 bufblks >>= 1; 1251 while (!(bp = xlog_get_bp(log, bufblks))) { 1252 bufblks >>= 1; 1253 if (bufblks < sectbb) 1254 return ENOMEM; 1255 } 1256 1257 /* We may need to do a read at the start to fill in part of 1258 * the buffer in the starting sector not covered by the first 1259 * write below. 1260 */ 1261 balign = round_down(start_block, sectbb); 1262 if (balign != start_block) { 1263 error = xlog_bread_noalign(log, start_block, 1, bp); 1264 if (error) 1265 goto out_put_bp; 1266 1267 j = start_block - balign; 1268 } 1269 1270 for (i = start_block; i < end_block; i += bufblks) { 1271 int bcount, endcount; 1272 1273 bcount = min(bufblks, end_block - start_block); 1274 endcount = bcount - j; 1275 1276 /* We may need to do a read at the end to fill in part of 1277 * the buffer in the final sector not covered by the write. 1278 * If this is the same sector as the above read, skip it. 1279 */ 1280 ealign = round_down(end_block, sectbb); 1281 if (j == 0 && (start_block + endcount > ealign)) { 1282 offset = bp->b_addr + BBTOB(ealign - start_block); 1283 error = xlog_bread_offset(log, ealign, sectbb, 1284 bp, offset); 1285 if (error) 1286 break; 1287 1288 } 1289 1290 offset = xlog_align(log, start_block, endcount, bp); 1291 for (; j < endcount; j++) { 1292 xlog_add_record(log, offset, cycle, i+j, 1293 tail_cycle, tail_block); 1294 offset += BBSIZE; 1295 } 1296 error = xlog_bwrite(log, start_block, endcount, bp); 1297 if (error) 1298 break; 1299 start_block += endcount; 1300 j = 0; 1301 } 1302 1303 out_put_bp: 1304 xlog_put_bp(bp); 1305 return error; 1306 } 1307 1308 /* 1309 * This routine is called to blow away any incomplete log writes out 1310 * in front of the log head. We do this so that we won't become confused 1311 * if we come up, write only a little bit more, and then crash again. 1312 * If we leave the partial log records out there, this situation could 1313 * cause us to think those partial writes are valid blocks since they 1314 * have the current cycle number. We get rid of them by overwriting them 1315 * with empty log records with the old cycle number rather than the 1316 * current one. 1317 * 1318 * The tail lsn is passed in rather than taken from 1319 * the log so that we will not write over the unmount record after a 1320 * clean unmount in a 512 block log. Doing so would leave the log without 1321 * any valid log records in it until a new one was written. If we crashed 1322 * during that time we would not be able to recover. 1323 */ 1324 STATIC int 1325 xlog_clear_stale_blocks( 1326 struct xlog *log, 1327 xfs_lsn_t tail_lsn) 1328 { 1329 int tail_cycle, head_cycle; 1330 int tail_block, head_block; 1331 int tail_distance, max_distance; 1332 int distance; 1333 int error; 1334 1335 tail_cycle = CYCLE_LSN(tail_lsn); 1336 tail_block = BLOCK_LSN(tail_lsn); 1337 head_cycle = log->l_curr_cycle; 1338 head_block = log->l_curr_block; 1339 1340 /* 1341 * Figure out the distance between the new head of the log 1342 * and the tail. We want to write over any blocks beyond the 1343 * head that we may have written just before the crash, but 1344 * we don't want to overwrite the tail of the log. 1345 */ 1346 if (head_cycle == tail_cycle) { 1347 /* 1348 * The tail is behind the head in the physical log, 1349 * so the distance from the head to the tail is the 1350 * distance from the head to the end of the log plus 1351 * the distance from the beginning of the log to the 1352 * tail. 1353 */ 1354 if (unlikely(head_block < tail_block || head_block >= log->l_logBBsize)) { 1355 XFS_ERROR_REPORT("xlog_clear_stale_blocks(1)", 1356 XFS_ERRLEVEL_LOW, log->l_mp); 1357 return XFS_ERROR(EFSCORRUPTED); 1358 } 1359 tail_distance = tail_block + (log->l_logBBsize - head_block); 1360 } else { 1361 /* 1362 * The head is behind the tail in the physical log, 1363 * so the distance from the head to the tail is just 1364 * the tail block minus the head block. 1365 */ 1366 if (unlikely(head_block >= tail_block || head_cycle != (tail_cycle + 1))){ 1367 XFS_ERROR_REPORT("xlog_clear_stale_blocks(2)", 1368 XFS_ERRLEVEL_LOW, log->l_mp); 1369 return XFS_ERROR(EFSCORRUPTED); 1370 } 1371 tail_distance = tail_block - head_block; 1372 } 1373 1374 /* 1375 * If the head is right up against the tail, we can't clear 1376 * anything. 1377 */ 1378 if (tail_distance <= 0) { 1379 ASSERT(tail_distance == 0); 1380 return 0; 1381 } 1382 1383 max_distance = XLOG_TOTAL_REC_SHIFT(log); 1384 /* 1385 * Take the smaller of the maximum amount of outstanding I/O 1386 * we could have and the distance to the tail to clear out. 1387 * We take the smaller so that we don't overwrite the tail and 1388 * we don't waste all day writing from the head to the tail 1389 * for no reason. 1390 */ 1391 max_distance = MIN(max_distance, tail_distance); 1392 1393 if ((head_block + max_distance) <= log->l_logBBsize) { 1394 /* 1395 * We can stomp all the blocks we need to without 1396 * wrapping around the end of the log. Just do it 1397 * in a single write. Use the cycle number of the 1398 * current cycle minus one so that the log will look like: 1399 * n ... | n - 1 ... 1400 */ 1401 error = xlog_write_log_records(log, (head_cycle - 1), 1402 head_block, max_distance, tail_cycle, 1403 tail_block); 1404 if (error) 1405 return error; 1406 } else { 1407 /* 1408 * We need to wrap around the end of the physical log in 1409 * order to clear all the blocks. Do it in two separate 1410 * I/Os. The first write should be from the head to the 1411 * end of the physical log, and it should use the current 1412 * cycle number minus one just like above. 1413 */ 1414 distance = log->l_logBBsize - head_block; 1415 error = xlog_write_log_records(log, (head_cycle - 1), 1416 head_block, distance, tail_cycle, 1417 tail_block); 1418 1419 if (error) 1420 return error; 1421 1422 /* 1423 * Now write the blocks at the start of the physical log. 1424 * This writes the remainder of the blocks we want to clear. 1425 * It uses the current cycle number since we're now on the 1426 * same cycle as the head so that we get: 1427 * n ... n ... | n - 1 ... 1428 * ^^^^^ blocks we're writing 1429 */ 1430 distance = max_distance - (log->l_logBBsize - head_block); 1431 error = xlog_write_log_records(log, head_cycle, 0, distance, 1432 tail_cycle, tail_block); 1433 if (error) 1434 return error; 1435 } 1436 1437 return 0; 1438 } 1439 1440 /****************************************************************************** 1441 * 1442 * Log recover routines 1443 * 1444 ****************************************************************************** 1445 */ 1446 1447 STATIC xlog_recover_t * 1448 xlog_recover_find_tid( 1449 struct hlist_head *head, 1450 xlog_tid_t tid) 1451 { 1452 xlog_recover_t *trans; 1453 1454 hlist_for_each_entry(trans, head, r_list) { 1455 if (trans->r_log_tid == tid) 1456 return trans; 1457 } 1458 return NULL; 1459 } 1460 1461 STATIC void 1462 xlog_recover_new_tid( 1463 struct hlist_head *head, 1464 xlog_tid_t tid, 1465 xfs_lsn_t lsn) 1466 { 1467 xlog_recover_t *trans; 1468 1469 trans = kmem_zalloc(sizeof(xlog_recover_t), KM_SLEEP); 1470 trans->r_log_tid = tid; 1471 trans->r_lsn = lsn; 1472 INIT_LIST_HEAD(&trans->r_itemq); 1473 1474 INIT_HLIST_NODE(&trans->r_list); 1475 hlist_add_head(&trans->r_list, head); 1476 } 1477 1478 STATIC void 1479 xlog_recover_add_item( 1480 struct list_head *head) 1481 { 1482 xlog_recover_item_t *item; 1483 1484 item = kmem_zalloc(sizeof(xlog_recover_item_t), KM_SLEEP); 1485 INIT_LIST_HEAD(&item->ri_list); 1486 list_add_tail(&item->ri_list, head); 1487 } 1488 1489 STATIC int 1490 xlog_recover_add_to_cont_trans( 1491 struct xlog *log, 1492 struct xlog_recover *trans, 1493 xfs_caddr_t dp, 1494 int len) 1495 { 1496 xlog_recover_item_t *item; 1497 xfs_caddr_t ptr, old_ptr; 1498 int old_len; 1499 1500 if (list_empty(&trans->r_itemq)) { 1501 /* finish copying rest of trans header */ 1502 xlog_recover_add_item(&trans->r_itemq); 1503 ptr = (xfs_caddr_t) &trans->r_theader + 1504 sizeof(xfs_trans_header_t) - len; 1505 memcpy(ptr, dp, len); /* d, s, l */ 1506 return 0; 1507 } 1508 /* take the tail entry */ 1509 item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list); 1510 1511 old_ptr = item->ri_buf[item->ri_cnt-1].i_addr; 1512 old_len = item->ri_buf[item->ri_cnt-1].i_len; 1513 1514 ptr = kmem_realloc(old_ptr, len+old_len, old_len, KM_SLEEP); 1515 memcpy(&ptr[old_len], dp, len); /* d, s, l */ 1516 item->ri_buf[item->ri_cnt-1].i_len += len; 1517 item->ri_buf[item->ri_cnt-1].i_addr = ptr; 1518 trace_xfs_log_recover_item_add_cont(log, trans, item, 0); 1519 return 0; 1520 } 1521 1522 /* 1523 * The next region to add is the start of a new region. It could be 1524 * a whole region or it could be the first part of a new region. Because 1525 * of this, the assumption here is that the type and size fields of all 1526 * format structures fit into the first 32 bits of the structure. 1527 * 1528 * This works because all regions must be 32 bit aligned. Therefore, we 1529 * either have both fields or we have neither field. In the case we have 1530 * neither field, the data part of the region is zero length. We only have 1531 * a log_op_header and can throw away the header since a new one will appear 1532 * later. If we have at least 4 bytes, then we can determine how many regions 1533 * will appear in the current log item. 1534 */ 1535 STATIC int 1536 xlog_recover_add_to_trans( 1537 struct xlog *log, 1538 struct xlog_recover *trans, 1539 xfs_caddr_t dp, 1540 int len) 1541 { 1542 xfs_inode_log_format_t *in_f; /* any will do */ 1543 xlog_recover_item_t *item; 1544 xfs_caddr_t ptr; 1545 1546 if (!len) 1547 return 0; 1548 if (list_empty(&trans->r_itemq)) { 1549 /* we need to catch log corruptions here */ 1550 if (*(uint *)dp != XFS_TRANS_HEADER_MAGIC) { 1551 xfs_warn(log->l_mp, "%s: bad header magic number", 1552 __func__); 1553 ASSERT(0); 1554 return XFS_ERROR(EIO); 1555 } 1556 if (len == sizeof(xfs_trans_header_t)) 1557 xlog_recover_add_item(&trans->r_itemq); 1558 memcpy(&trans->r_theader, dp, len); /* d, s, l */ 1559 return 0; 1560 } 1561 1562 ptr = kmem_alloc(len, KM_SLEEP); 1563 memcpy(ptr, dp, len); 1564 in_f = (xfs_inode_log_format_t *)ptr; 1565 1566 /* take the tail entry */ 1567 item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list); 1568 if (item->ri_total != 0 && 1569 item->ri_total == item->ri_cnt) { 1570 /* tail item is in use, get a new one */ 1571 xlog_recover_add_item(&trans->r_itemq); 1572 item = list_entry(trans->r_itemq.prev, 1573 xlog_recover_item_t, ri_list); 1574 } 1575 1576 if (item->ri_total == 0) { /* first region to be added */ 1577 if (in_f->ilf_size == 0 || 1578 in_f->ilf_size > XLOG_MAX_REGIONS_IN_ITEM) { 1579 xfs_warn(log->l_mp, 1580 "bad number of regions (%d) in inode log format", 1581 in_f->ilf_size); 1582 ASSERT(0); 1583 kmem_free(ptr); 1584 return XFS_ERROR(EIO); 1585 } 1586 1587 item->ri_total = in_f->ilf_size; 1588 item->ri_buf = 1589 kmem_zalloc(item->ri_total * sizeof(xfs_log_iovec_t), 1590 KM_SLEEP); 1591 } 1592 ASSERT(item->ri_total > item->ri_cnt); 1593 /* Description region is ri_buf[0] */ 1594 item->ri_buf[item->ri_cnt].i_addr = ptr; 1595 item->ri_buf[item->ri_cnt].i_len = len; 1596 item->ri_cnt++; 1597 trace_xfs_log_recover_item_add(log, trans, item, 0); 1598 return 0; 1599 } 1600 1601 /* 1602 * Sort the log items in the transaction. 1603 * 1604 * The ordering constraints are defined by the inode allocation and unlink 1605 * behaviour. The rules are: 1606 * 1607 * 1. Every item is only logged once in a given transaction. Hence it 1608 * represents the last logged state of the item. Hence ordering is 1609 * dependent on the order in which operations need to be performed so 1610 * required initial conditions are always met. 1611 * 1612 * 2. Cancelled buffers are recorded in pass 1 in a separate table and 1613 * there's nothing to replay from them so we can simply cull them 1614 * from the transaction. However, we can't do that until after we've 1615 * replayed all the other items because they may be dependent on the 1616 * cancelled buffer and replaying the cancelled buffer can remove it 1617 * form the cancelled buffer table. Hence they have tobe done last. 1618 * 1619 * 3. Inode allocation buffers must be replayed before inode items that 1620 * read the buffer and replay changes into it. For filesystems using the 1621 * ICREATE transactions, this means XFS_LI_ICREATE objects need to get 1622 * treated the same as inode allocation buffers as they create and 1623 * initialise the buffers directly. 1624 * 1625 * 4. Inode unlink buffers must be replayed after inode items are replayed. 1626 * This ensures that inodes are completely flushed to the inode buffer 1627 * in a "free" state before we remove the unlinked inode list pointer. 1628 * 1629 * Hence the ordering needs to be inode allocation buffers first, inode items 1630 * second, inode unlink buffers third and cancelled buffers last. 1631 * 1632 * But there's a problem with that - we can't tell an inode allocation buffer 1633 * apart from a regular buffer, so we can't separate them. We can, however, 1634 * tell an inode unlink buffer from the others, and so we can separate them out 1635 * from all the other buffers and move them to last. 1636 * 1637 * Hence, 4 lists, in order from head to tail: 1638 * - buffer_list for all buffers except cancelled/inode unlink buffers 1639 * - item_list for all non-buffer items 1640 * - inode_buffer_list for inode unlink buffers 1641 * - cancel_list for the cancelled buffers 1642 * 1643 * Note that we add objects to the tail of the lists so that first-to-last 1644 * ordering is preserved within the lists. Adding objects to the head of the 1645 * list means when we traverse from the head we walk them in last-to-first 1646 * order. For cancelled buffers and inode unlink buffers this doesn't matter, 1647 * but for all other items there may be specific ordering that we need to 1648 * preserve. 1649 */ 1650 STATIC int 1651 xlog_recover_reorder_trans( 1652 struct xlog *log, 1653 struct xlog_recover *trans, 1654 int pass) 1655 { 1656 xlog_recover_item_t *item, *n; 1657 LIST_HEAD(sort_list); 1658 LIST_HEAD(cancel_list); 1659 LIST_HEAD(buffer_list); 1660 LIST_HEAD(inode_buffer_list); 1661 LIST_HEAD(inode_list); 1662 1663 list_splice_init(&trans->r_itemq, &sort_list); 1664 list_for_each_entry_safe(item, n, &sort_list, ri_list) { 1665 xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr; 1666 1667 switch (ITEM_TYPE(item)) { 1668 case XFS_LI_ICREATE: 1669 list_move_tail(&item->ri_list, &buffer_list); 1670 break; 1671 case XFS_LI_BUF: 1672 if (buf_f->blf_flags & XFS_BLF_CANCEL) { 1673 trace_xfs_log_recover_item_reorder_head(log, 1674 trans, item, pass); 1675 list_move(&item->ri_list, &cancel_list); 1676 break; 1677 } 1678 if (buf_f->blf_flags & XFS_BLF_INODE_BUF) { 1679 list_move(&item->ri_list, &inode_buffer_list); 1680 break; 1681 } 1682 list_move_tail(&item->ri_list, &buffer_list); 1683 break; 1684 case XFS_LI_INODE: 1685 case XFS_LI_DQUOT: 1686 case XFS_LI_QUOTAOFF: 1687 case XFS_LI_EFD: 1688 case XFS_LI_EFI: 1689 trace_xfs_log_recover_item_reorder_tail(log, 1690 trans, item, pass); 1691 list_move_tail(&item->ri_list, &inode_list); 1692 break; 1693 default: 1694 xfs_warn(log->l_mp, 1695 "%s: unrecognized type of log operation", 1696 __func__); 1697 ASSERT(0); 1698 return XFS_ERROR(EIO); 1699 } 1700 } 1701 ASSERT(list_empty(&sort_list)); 1702 if (!list_empty(&buffer_list)) 1703 list_splice(&buffer_list, &trans->r_itemq); 1704 if (!list_empty(&inode_list)) 1705 list_splice_tail(&inode_list, &trans->r_itemq); 1706 if (!list_empty(&inode_buffer_list)) 1707 list_splice_tail(&inode_buffer_list, &trans->r_itemq); 1708 if (!list_empty(&cancel_list)) 1709 list_splice_tail(&cancel_list, &trans->r_itemq); 1710 return 0; 1711 } 1712 1713 /* 1714 * Build up the table of buf cancel records so that we don't replay 1715 * cancelled data in the second pass. For buffer records that are 1716 * not cancel records, there is nothing to do here so we just return. 1717 * 1718 * If we get a cancel record which is already in the table, this indicates 1719 * that the buffer was cancelled multiple times. In order to ensure 1720 * that during pass 2 we keep the record in the table until we reach its 1721 * last occurrence in the log, we keep a reference count in the cancel 1722 * record in the table to tell us how many times we expect to see this 1723 * record during the second pass. 1724 */ 1725 STATIC int 1726 xlog_recover_buffer_pass1( 1727 struct xlog *log, 1728 struct xlog_recover_item *item) 1729 { 1730 xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr; 1731 struct list_head *bucket; 1732 struct xfs_buf_cancel *bcp; 1733 1734 /* 1735 * If this isn't a cancel buffer item, then just return. 1736 */ 1737 if (!(buf_f->blf_flags & XFS_BLF_CANCEL)) { 1738 trace_xfs_log_recover_buf_not_cancel(log, buf_f); 1739 return 0; 1740 } 1741 1742 /* 1743 * Insert an xfs_buf_cancel record into the hash table of them. 1744 * If there is already an identical record, bump its reference count. 1745 */ 1746 bucket = XLOG_BUF_CANCEL_BUCKET(log, buf_f->blf_blkno); 1747 list_for_each_entry(bcp, bucket, bc_list) { 1748 if (bcp->bc_blkno == buf_f->blf_blkno && 1749 bcp->bc_len == buf_f->blf_len) { 1750 bcp->bc_refcount++; 1751 trace_xfs_log_recover_buf_cancel_ref_inc(log, buf_f); 1752 return 0; 1753 } 1754 } 1755 1756 bcp = kmem_alloc(sizeof(struct xfs_buf_cancel), KM_SLEEP); 1757 bcp->bc_blkno = buf_f->blf_blkno; 1758 bcp->bc_len = buf_f->blf_len; 1759 bcp->bc_refcount = 1; 1760 list_add_tail(&bcp->bc_list, bucket); 1761 1762 trace_xfs_log_recover_buf_cancel_add(log, buf_f); 1763 return 0; 1764 } 1765 1766 /* 1767 * Check to see whether the buffer being recovered has a corresponding 1768 * entry in the buffer cancel record table. If it is, return the cancel 1769 * buffer structure to the caller. 1770 */ 1771 STATIC struct xfs_buf_cancel * 1772 xlog_peek_buffer_cancelled( 1773 struct xlog *log, 1774 xfs_daddr_t blkno, 1775 uint len, 1776 ushort flags) 1777 { 1778 struct list_head *bucket; 1779 struct xfs_buf_cancel *bcp; 1780 1781 if (!log->l_buf_cancel_table) { 1782 /* empty table means no cancelled buffers in the log */ 1783 ASSERT(!(flags & XFS_BLF_CANCEL)); 1784 return NULL; 1785 } 1786 1787 bucket = XLOG_BUF_CANCEL_BUCKET(log, blkno); 1788 list_for_each_entry(bcp, bucket, bc_list) { 1789 if (bcp->bc_blkno == blkno && bcp->bc_len == len) 1790 return bcp; 1791 } 1792 1793 /* 1794 * We didn't find a corresponding entry in the table, so return 0 so 1795 * that the buffer is NOT cancelled. 1796 */ 1797 ASSERT(!(flags & XFS_BLF_CANCEL)); 1798 return NULL; 1799 } 1800 1801 /* 1802 * If the buffer is being cancelled then return 1 so that it will be cancelled, 1803 * otherwise return 0. If the buffer is actually a buffer cancel item 1804 * (XFS_BLF_CANCEL is set), then decrement the refcount on the entry in the 1805 * table and remove it from the table if this is the last reference. 1806 * 1807 * We remove the cancel record from the table when we encounter its last 1808 * occurrence in the log so that if the same buffer is re-used again after its 1809 * last cancellation we actually replay the changes made at that point. 1810 */ 1811 STATIC int 1812 xlog_check_buffer_cancelled( 1813 struct xlog *log, 1814 xfs_daddr_t blkno, 1815 uint len, 1816 ushort flags) 1817 { 1818 struct xfs_buf_cancel *bcp; 1819 1820 bcp = xlog_peek_buffer_cancelled(log, blkno, len, flags); 1821 if (!bcp) 1822 return 0; 1823 1824 /* 1825 * We've go a match, so return 1 so that the recovery of this buffer 1826 * is cancelled. If this buffer is actually a buffer cancel log 1827 * item, then decrement the refcount on the one in the table and 1828 * remove it if this is the last reference. 1829 */ 1830 if (flags & XFS_BLF_CANCEL) { 1831 if (--bcp->bc_refcount == 0) { 1832 list_del(&bcp->bc_list); 1833 kmem_free(bcp); 1834 } 1835 } 1836 return 1; 1837 } 1838 1839 /* 1840 * Perform recovery for a buffer full of inodes. In these buffers, the only 1841 * data which should be recovered is that which corresponds to the 1842 * di_next_unlinked pointers in the on disk inode structures. The rest of the 1843 * data for the inodes is always logged through the inodes themselves rather 1844 * than the inode buffer and is recovered in xlog_recover_inode_pass2(). 1845 * 1846 * The only time when buffers full of inodes are fully recovered is when the 1847 * buffer is full of newly allocated inodes. In this case the buffer will 1848 * not be marked as an inode buffer and so will be sent to 1849 * xlog_recover_do_reg_buffer() below during recovery. 1850 */ 1851 STATIC int 1852 xlog_recover_do_inode_buffer( 1853 struct xfs_mount *mp, 1854 xlog_recover_item_t *item, 1855 struct xfs_buf *bp, 1856 xfs_buf_log_format_t *buf_f) 1857 { 1858 int i; 1859 int item_index = 0; 1860 int bit = 0; 1861 int nbits = 0; 1862 int reg_buf_offset = 0; 1863 int reg_buf_bytes = 0; 1864 int next_unlinked_offset; 1865 int inodes_per_buf; 1866 xfs_agino_t *logged_nextp; 1867 xfs_agino_t *buffer_nextp; 1868 1869 trace_xfs_log_recover_buf_inode_buf(mp->m_log, buf_f); 1870 1871 /* 1872 * Post recovery validation only works properly on CRC enabled 1873 * filesystems. 1874 */ 1875 if (xfs_sb_version_hascrc(&mp->m_sb)) 1876 bp->b_ops = &xfs_inode_buf_ops; 1877 1878 inodes_per_buf = BBTOB(bp->b_io_length) >> mp->m_sb.sb_inodelog; 1879 for (i = 0; i < inodes_per_buf; i++) { 1880 next_unlinked_offset = (i * mp->m_sb.sb_inodesize) + 1881 offsetof(xfs_dinode_t, di_next_unlinked); 1882 1883 while (next_unlinked_offset >= 1884 (reg_buf_offset + reg_buf_bytes)) { 1885 /* 1886 * The next di_next_unlinked field is beyond 1887 * the current logged region. Find the next 1888 * logged region that contains or is beyond 1889 * the current di_next_unlinked field. 1890 */ 1891 bit += nbits; 1892 bit = xfs_next_bit(buf_f->blf_data_map, 1893 buf_f->blf_map_size, bit); 1894 1895 /* 1896 * If there are no more logged regions in the 1897 * buffer, then we're done. 1898 */ 1899 if (bit == -1) 1900 return 0; 1901 1902 nbits = xfs_contig_bits(buf_f->blf_data_map, 1903 buf_f->blf_map_size, bit); 1904 ASSERT(nbits > 0); 1905 reg_buf_offset = bit << XFS_BLF_SHIFT; 1906 reg_buf_bytes = nbits << XFS_BLF_SHIFT; 1907 item_index++; 1908 } 1909 1910 /* 1911 * If the current logged region starts after the current 1912 * di_next_unlinked field, then move on to the next 1913 * di_next_unlinked field. 1914 */ 1915 if (next_unlinked_offset < reg_buf_offset) 1916 continue; 1917 1918 ASSERT(item->ri_buf[item_index].i_addr != NULL); 1919 ASSERT((item->ri_buf[item_index].i_len % XFS_BLF_CHUNK) == 0); 1920 ASSERT((reg_buf_offset + reg_buf_bytes) <= 1921 BBTOB(bp->b_io_length)); 1922 1923 /* 1924 * The current logged region contains a copy of the 1925 * current di_next_unlinked field. Extract its value 1926 * and copy it to the buffer copy. 1927 */ 1928 logged_nextp = item->ri_buf[item_index].i_addr + 1929 next_unlinked_offset - reg_buf_offset; 1930 if (unlikely(*logged_nextp == 0)) { 1931 xfs_alert(mp, 1932 "Bad inode buffer log record (ptr = 0x%p, bp = 0x%p). " 1933 "Trying to replay bad (0) inode di_next_unlinked field.", 1934 item, bp); 1935 XFS_ERROR_REPORT("xlog_recover_do_inode_buf", 1936 XFS_ERRLEVEL_LOW, mp); 1937 return XFS_ERROR(EFSCORRUPTED); 1938 } 1939 1940 buffer_nextp = (xfs_agino_t *)xfs_buf_offset(bp, 1941 next_unlinked_offset); 1942 *buffer_nextp = *logged_nextp; 1943 1944 /* 1945 * If necessary, recalculate the CRC in the on-disk inode. We 1946 * have to leave the inode in a consistent state for whoever 1947 * reads it next.... 1948 */ 1949 xfs_dinode_calc_crc(mp, (struct xfs_dinode *) 1950 xfs_buf_offset(bp, i * mp->m_sb.sb_inodesize)); 1951 1952 } 1953 1954 return 0; 1955 } 1956 1957 /* 1958 * V5 filesystems know the age of the buffer on disk being recovered. We can 1959 * have newer objects on disk than we are replaying, and so for these cases we 1960 * don't want to replay the current change as that will make the buffer contents 1961 * temporarily invalid on disk. 1962 * 1963 * The magic number might not match the buffer type we are going to recover 1964 * (e.g. reallocated blocks), so we ignore the xfs_buf_log_format flags. Hence 1965 * extract the LSN of the existing object in the buffer based on it's current 1966 * magic number. If we don't recognise the magic number in the buffer, then 1967 * return a LSN of -1 so that the caller knows it was an unrecognised block and 1968 * so can recover the buffer. 1969 * 1970 * Note: we cannot rely solely on magic number matches to determine that the 1971 * buffer has a valid LSN - we also need to verify that it belongs to this 1972 * filesystem, so we need to extract the object's LSN and compare it to that 1973 * which we read from the superblock. If the UUIDs don't match, then we've got a 1974 * stale metadata block from an old filesystem instance that we need to recover 1975 * over the top of. 1976 */ 1977 static xfs_lsn_t 1978 xlog_recover_get_buf_lsn( 1979 struct xfs_mount *mp, 1980 struct xfs_buf *bp) 1981 { 1982 __uint32_t magic32; 1983 __uint16_t magic16; 1984 __uint16_t magicda; 1985 void *blk = bp->b_addr; 1986 uuid_t *uuid; 1987 xfs_lsn_t lsn = -1; 1988 1989 /* v4 filesystems always recover immediately */ 1990 if (!xfs_sb_version_hascrc(&mp->m_sb)) 1991 goto recover_immediately; 1992 1993 magic32 = be32_to_cpu(*(__be32 *)blk); 1994 switch (magic32) { 1995 case XFS_ABTB_CRC_MAGIC: 1996 case XFS_ABTC_CRC_MAGIC: 1997 case XFS_ABTB_MAGIC: 1998 case XFS_ABTC_MAGIC: 1999 case XFS_IBT_CRC_MAGIC: 2000 case XFS_IBT_MAGIC: { 2001 struct xfs_btree_block *btb = blk; 2002 2003 lsn = be64_to_cpu(btb->bb_u.s.bb_lsn); 2004 uuid = &btb->bb_u.s.bb_uuid; 2005 break; 2006 } 2007 case XFS_BMAP_CRC_MAGIC: 2008 case XFS_BMAP_MAGIC: { 2009 struct xfs_btree_block *btb = blk; 2010 2011 lsn = be64_to_cpu(btb->bb_u.l.bb_lsn); 2012 uuid = &btb->bb_u.l.bb_uuid; 2013 break; 2014 } 2015 case XFS_AGF_MAGIC: 2016 lsn = be64_to_cpu(((struct xfs_agf *)blk)->agf_lsn); 2017 uuid = &((struct xfs_agf *)blk)->agf_uuid; 2018 break; 2019 case XFS_AGFL_MAGIC: 2020 lsn = be64_to_cpu(((struct xfs_agfl *)blk)->agfl_lsn); 2021 uuid = &((struct xfs_agfl *)blk)->agfl_uuid; 2022 break; 2023 case XFS_AGI_MAGIC: 2024 lsn = be64_to_cpu(((struct xfs_agi *)blk)->agi_lsn); 2025 uuid = &((struct xfs_agi *)blk)->agi_uuid; 2026 break; 2027 case XFS_SYMLINK_MAGIC: 2028 lsn = be64_to_cpu(((struct xfs_dsymlink_hdr *)blk)->sl_lsn); 2029 uuid = &((struct xfs_dsymlink_hdr *)blk)->sl_uuid; 2030 break; 2031 case XFS_DIR3_BLOCK_MAGIC: 2032 case XFS_DIR3_DATA_MAGIC: 2033 case XFS_DIR3_FREE_MAGIC: 2034 lsn = be64_to_cpu(((struct xfs_dir3_blk_hdr *)blk)->lsn); 2035 uuid = &((struct xfs_dir3_blk_hdr *)blk)->uuid; 2036 break; 2037 case XFS_ATTR3_RMT_MAGIC: 2038 lsn = be64_to_cpu(((struct xfs_attr3_rmt_hdr *)blk)->rm_lsn); 2039 uuid = &((struct xfs_attr3_rmt_hdr *)blk)->rm_uuid; 2040 break; 2041 case XFS_SB_MAGIC: 2042 lsn = be64_to_cpu(((struct xfs_dsb *)blk)->sb_lsn); 2043 uuid = &((struct xfs_dsb *)blk)->sb_uuid; 2044 break; 2045 default: 2046 break; 2047 } 2048 2049 if (lsn != (xfs_lsn_t)-1) { 2050 if (!uuid_equal(&mp->m_sb.sb_uuid, uuid)) 2051 goto recover_immediately; 2052 return lsn; 2053 } 2054 2055 magicda = be16_to_cpu(((struct xfs_da_blkinfo *)blk)->magic); 2056 switch (magicda) { 2057 case XFS_DIR3_LEAF1_MAGIC: 2058 case XFS_DIR3_LEAFN_MAGIC: 2059 case XFS_DA3_NODE_MAGIC: 2060 lsn = be64_to_cpu(((struct xfs_da3_blkinfo *)blk)->lsn); 2061 uuid = &((struct xfs_da3_blkinfo *)blk)->uuid; 2062 break; 2063 default: 2064 break; 2065 } 2066 2067 if (lsn != (xfs_lsn_t)-1) { 2068 if (!uuid_equal(&mp->m_sb.sb_uuid, uuid)) 2069 goto recover_immediately; 2070 return lsn; 2071 } 2072 2073 /* 2074 * We do individual object checks on dquot and inode buffers as they 2075 * have their own individual LSN records. Also, we could have a stale 2076 * buffer here, so we have to at least recognise these buffer types. 2077 * 2078 * A notd complexity here is inode unlinked list processing - it logs 2079 * the inode directly in the buffer, but we don't know which inodes have 2080 * been modified, and there is no global buffer LSN. Hence we need to 2081 * recover all inode buffer types immediately. This problem will be 2082 * fixed by logical logging of the unlinked list modifications. 2083 */ 2084 magic16 = be16_to_cpu(*(__be16 *)blk); 2085 switch (magic16) { 2086 case XFS_DQUOT_MAGIC: 2087 case XFS_DINODE_MAGIC: 2088 goto recover_immediately; 2089 default: 2090 break; 2091 } 2092 2093 /* unknown buffer contents, recover immediately */ 2094 2095 recover_immediately: 2096 return (xfs_lsn_t)-1; 2097 2098 } 2099 2100 /* 2101 * Validate the recovered buffer is of the correct type and attach the 2102 * appropriate buffer operations to them for writeback. Magic numbers are in a 2103 * few places: 2104 * the first 16 bits of the buffer (inode buffer, dquot buffer), 2105 * the first 32 bits of the buffer (most blocks), 2106 * inside a struct xfs_da_blkinfo at the start of the buffer. 2107 */ 2108 static void 2109 xlog_recover_validate_buf_type( 2110 struct xfs_mount *mp, 2111 struct xfs_buf *bp, 2112 xfs_buf_log_format_t *buf_f) 2113 { 2114 struct xfs_da_blkinfo *info = bp->b_addr; 2115 __uint32_t magic32; 2116 __uint16_t magic16; 2117 __uint16_t magicda; 2118 2119 magic32 = be32_to_cpu(*(__be32 *)bp->b_addr); 2120 magic16 = be16_to_cpu(*(__be16*)bp->b_addr); 2121 magicda = be16_to_cpu(info->magic); 2122 switch (xfs_blft_from_flags(buf_f)) { 2123 case XFS_BLFT_BTREE_BUF: 2124 switch (magic32) { 2125 case XFS_ABTB_CRC_MAGIC: 2126 case XFS_ABTC_CRC_MAGIC: 2127 case XFS_ABTB_MAGIC: 2128 case XFS_ABTC_MAGIC: 2129 bp->b_ops = &xfs_allocbt_buf_ops; 2130 break; 2131 case XFS_IBT_CRC_MAGIC: 2132 case XFS_IBT_MAGIC: 2133 bp->b_ops = &xfs_inobt_buf_ops; 2134 break; 2135 case XFS_BMAP_CRC_MAGIC: 2136 case XFS_BMAP_MAGIC: 2137 bp->b_ops = &xfs_bmbt_buf_ops; 2138 break; 2139 default: 2140 xfs_warn(mp, "Bad btree block magic!"); 2141 ASSERT(0); 2142 break; 2143 } 2144 break; 2145 case XFS_BLFT_AGF_BUF: 2146 if (magic32 != XFS_AGF_MAGIC) { 2147 xfs_warn(mp, "Bad AGF block magic!"); 2148 ASSERT(0); 2149 break; 2150 } 2151 bp->b_ops = &xfs_agf_buf_ops; 2152 break; 2153 case XFS_BLFT_AGFL_BUF: 2154 if (!xfs_sb_version_hascrc(&mp->m_sb)) 2155 break; 2156 if (magic32 != XFS_AGFL_MAGIC) { 2157 xfs_warn(mp, "Bad AGFL block magic!"); 2158 ASSERT(0); 2159 break; 2160 } 2161 bp->b_ops = &xfs_agfl_buf_ops; 2162 break; 2163 case XFS_BLFT_AGI_BUF: 2164 if (magic32 != XFS_AGI_MAGIC) { 2165 xfs_warn(mp, "Bad AGI block magic!"); 2166 ASSERT(0); 2167 break; 2168 } 2169 bp->b_ops = &xfs_agi_buf_ops; 2170 break; 2171 case XFS_BLFT_UDQUOT_BUF: 2172 case XFS_BLFT_PDQUOT_BUF: 2173 case XFS_BLFT_GDQUOT_BUF: 2174 #ifdef CONFIG_XFS_QUOTA 2175 if (magic16 != XFS_DQUOT_MAGIC) { 2176 xfs_warn(mp, "Bad DQUOT block magic!"); 2177 ASSERT(0); 2178 break; 2179 } 2180 bp->b_ops = &xfs_dquot_buf_ops; 2181 #else 2182 xfs_alert(mp, 2183 "Trying to recover dquots without QUOTA support built in!"); 2184 ASSERT(0); 2185 #endif 2186 break; 2187 case XFS_BLFT_DINO_BUF: 2188 /* 2189 * we get here with inode allocation buffers, not buffers that 2190 * track unlinked list changes. 2191 */ 2192 if (magic16 != XFS_DINODE_MAGIC) { 2193 xfs_warn(mp, "Bad INODE block magic!"); 2194 ASSERT(0); 2195 break; 2196 } 2197 bp->b_ops = &xfs_inode_buf_ops; 2198 break; 2199 case XFS_BLFT_SYMLINK_BUF: 2200 if (magic32 != XFS_SYMLINK_MAGIC) { 2201 xfs_warn(mp, "Bad symlink block magic!"); 2202 ASSERT(0); 2203 break; 2204 } 2205 bp->b_ops = &xfs_symlink_buf_ops; 2206 break; 2207 case XFS_BLFT_DIR_BLOCK_BUF: 2208 if (magic32 != XFS_DIR2_BLOCK_MAGIC && 2209 magic32 != XFS_DIR3_BLOCK_MAGIC) { 2210 xfs_warn(mp, "Bad dir block magic!"); 2211 ASSERT(0); 2212 break; 2213 } 2214 bp->b_ops = &xfs_dir3_block_buf_ops; 2215 break; 2216 case XFS_BLFT_DIR_DATA_BUF: 2217 if (magic32 != XFS_DIR2_DATA_MAGIC && 2218 magic32 != XFS_DIR3_DATA_MAGIC) { 2219 xfs_warn(mp, "Bad dir data magic!"); 2220 ASSERT(0); 2221 break; 2222 } 2223 bp->b_ops = &xfs_dir3_data_buf_ops; 2224 break; 2225 case XFS_BLFT_DIR_FREE_BUF: 2226 if (magic32 != XFS_DIR2_FREE_MAGIC && 2227 magic32 != XFS_DIR3_FREE_MAGIC) { 2228 xfs_warn(mp, "Bad dir3 free magic!"); 2229 ASSERT(0); 2230 break; 2231 } 2232 bp->b_ops = &xfs_dir3_free_buf_ops; 2233 break; 2234 case XFS_BLFT_DIR_LEAF1_BUF: 2235 if (magicda != XFS_DIR2_LEAF1_MAGIC && 2236 magicda != XFS_DIR3_LEAF1_MAGIC) { 2237 xfs_warn(mp, "Bad dir leaf1 magic!"); 2238 ASSERT(0); 2239 break; 2240 } 2241 bp->b_ops = &xfs_dir3_leaf1_buf_ops; 2242 break; 2243 case XFS_BLFT_DIR_LEAFN_BUF: 2244 if (magicda != XFS_DIR2_LEAFN_MAGIC && 2245 magicda != XFS_DIR3_LEAFN_MAGIC) { 2246 xfs_warn(mp, "Bad dir leafn magic!"); 2247 ASSERT(0); 2248 break; 2249 } 2250 bp->b_ops = &xfs_dir3_leafn_buf_ops; 2251 break; 2252 case XFS_BLFT_DA_NODE_BUF: 2253 if (magicda != XFS_DA_NODE_MAGIC && 2254 magicda != XFS_DA3_NODE_MAGIC) { 2255 xfs_warn(mp, "Bad da node magic!"); 2256 ASSERT(0); 2257 break; 2258 } 2259 bp->b_ops = &xfs_da3_node_buf_ops; 2260 break; 2261 case XFS_BLFT_ATTR_LEAF_BUF: 2262 if (magicda != XFS_ATTR_LEAF_MAGIC && 2263 magicda != XFS_ATTR3_LEAF_MAGIC) { 2264 xfs_warn(mp, "Bad attr leaf magic!"); 2265 ASSERT(0); 2266 break; 2267 } 2268 bp->b_ops = &xfs_attr3_leaf_buf_ops; 2269 break; 2270 case XFS_BLFT_ATTR_RMT_BUF: 2271 if (!xfs_sb_version_hascrc(&mp->m_sb)) 2272 break; 2273 if (magic32 != XFS_ATTR3_RMT_MAGIC) { 2274 xfs_warn(mp, "Bad attr remote magic!"); 2275 ASSERT(0); 2276 break; 2277 } 2278 bp->b_ops = &xfs_attr3_rmt_buf_ops; 2279 break; 2280 case XFS_BLFT_SB_BUF: 2281 if (magic32 != XFS_SB_MAGIC) { 2282 xfs_warn(mp, "Bad SB block magic!"); 2283 ASSERT(0); 2284 break; 2285 } 2286 bp->b_ops = &xfs_sb_buf_ops; 2287 break; 2288 default: 2289 xfs_warn(mp, "Unknown buffer type %d!", 2290 xfs_blft_from_flags(buf_f)); 2291 break; 2292 } 2293 } 2294 2295 /* 2296 * Perform a 'normal' buffer recovery. Each logged region of the 2297 * buffer should be copied over the corresponding region in the 2298 * given buffer. The bitmap in the buf log format structure indicates 2299 * where to place the logged data. 2300 */ 2301 STATIC void 2302 xlog_recover_do_reg_buffer( 2303 struct xfs_mount *mp, 2304 xlog_recover_item_t *item, 2305 struct xfs_buf *bp, 2306 xfs_buf_log_format_t *buf_f) 2307 { 2308 int i; 2309 int bit; 2310 int nbits; 2311 int error; 2312 2313 trace_xfs_log_recover_buf_reg_buf(mp->m_log, buf_f); 2314 2315 bit = 0; 2316 i = 1; /* 0 is the buf format structure */ 2317 while (1) { 2318 bit = xfs_next_bit(buf_f->blf_data_map, 2319 buf_f->blf_map_size, bit); 2320 if (bit == -1) 2321 break; 2322 nbits = xfs_contig_bits(buf_f->blf_data_map, 2323 buf_f->blf_map_size, bit); 2324 ASSERT(nbits > 0); 2325 ASSERT(item->ri_buf[i].i_addr != NULL); 2326 ASSERT(item->ri_buf[i].i_len % XFS_BLF_CHUNK == 0); 2327 ASSERT(BBTOB(bp->b_io_length) >= 2328 ((uint)bit << XFS_BLF_SHIFT) + (nbits << XFS_BLF_SHIFT)); 2329 2330 /* 2331 * The dirty regions logged in the buffer, even though 2332 * contiguous, may span multiple chunks. This is because the 2333 * dirty region may span a physical page boundary in a buffer 2334 * and hence be split into two separate vectors for writing into 2335 * the log. Hence we need to trim nbits back to the length of 2336 * the current region being copied out of the log. 2337 */ 2338 if (item->ri_buf[i].i_len < (nbits << XFS_BLF_SHIFT)) 2339 nbits = item->ri_buf[i].i_len >> XFS_BLF_SHIFT; 2340 2341 /* 2342 * Do a sanity check if this is a dquot buffer. Just checking 2343 * the first dquot in the buffer should do. XXXThis is 2344 * probably a good thing to do for other buf types also. 2345 */ 2346 error = 0; 2347 if (buf_f->blf_flags & 2348 (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) { 2349 if (item->ri_buf[i].i_addr == NULL) { 2350 xfs_alert(mp, 2351 "XFS: NULL dquot in %s.", __func__); 2352 goto next; 2353 } 2354 if (item->ri_buf[i].i_len < sizeof(xfs_disk_dquot_t)) { 2355 xfs_alert(mp, 2356 "XFS: dquot too small (%d) in %s.", 2357 item->ri_buf[i].i_len, __func__); 2358 goto next; 2359 } 2360 error = xfs_dqcheck(mp, item->ri_buf[i].i_addr, 2361 -1, 0, XFS_QMOPT_DOWARN, 2362 "dquot_buf_recover"); 2363 if (error) 2364 goto next; 2365 } 2366 2367 memcpy(xfs_buf_offset(bp, 2368 (uint)bit << XFS_BLF_SHIFT), /* dest */ 2369 item->ri_buf[i].i_addr, /* source */ 2370 nbits<<XFS_BLF_SHIFT); /* length */ 2371 next: 2372 i++; 2373 bit += nbits; 2374 } 2375 2376 /* Shouldn't be any more regions */ 2377 ASSERT(i == item->ri_total); 2378 2379 /* 2380 * We can only do post recovery validation on items on CRC enabled 2381 * fielsystems as we need to know when the buffer was written to be able 2382 * to determine if we should have replayed the item. If we replay old 2383 * metadata over a newer buffer, then it will enter a temporarily 2384 * inconsistent state resulting in verification failures. Hence for now 2385 * just avoid the verification stage for non-crc filesystems 2386 */ 2387 if (xfs_sb_version_hascrc(&mp->m_sb)) 2388 xlog_recover_validate_buf_type(mp, bp, buf_f); 2389 } 2390 2391 /* 2392 * Perform a dquot buffer recovery. 2393 * Simple algorithm: if we have found a QUOTAOFF log item of the same type 2394 * (ie. USR or GRP), then just toss this buffer away; don't recover it. 2395 * Else, treat it as a regular buffer and do recovery. 2396 */ 2397 STATIC void 2398 xlog_recover_do_dquot_buffer( 2399 struct xfs_mount *mp, 2400 struct xlog *log, 2401 struct xlog_recover_item *item, 2402 struct xfs_buf *bp, 2403 struct xfs_buf_log_format *buf_f) 2404 { 2405 uint type; 2406 2407 trace_xfs_log_recover_buf_dquot_buf(log, buf_f); 2408 2409 /* 2410 * Filesystems are required to send in quota flags at mount time. 2411 */ 2412 if (mp->m_qflags == 0) { 2413 return; 2414 } 2415 2416 type = 0; 2417 if (buf_f->blf_flags & XFS_BLF_UDQUOT_BUF) 2418 type |= XFS_DQ_USER; 2419 if (buf_f->blf_flags & XFS_BLF_PDQUOT_BUF) 2420 type |= XFS_DQ_PROJ; 2421 if (buf_f->blf_flags & XFS_BLF_GDQUOT_BUF) 2422 type |= XFS_DQ_GROUP; 2423 /* 2424 * This type of quotas was turned off, so ignore this buffer 2425 */ 2426 if (log->l_quotaoffs_flag & type) 2427 return; 2428 2429 xlog_recover_do_reg_buffer(mp, item, bp, buf_f); 2430 } 2431 2432 /* 2433 * This routine replays a modification made to a buffer at runtime. 2434 * There are actually two types of buffer, regular and inode, which 2435 * are handled differently. Inode buffers are handled differently 2436 * in that we only recover a specific set of data from them, namely 2437 * the inode di_next_unlinked fields. This is because all other inode 2438 * data is actually logged via inode records and any data we replay 2439 * here which overlaps that may be stale. 2440 * 2441 * When meta-data buffers are freed at run time we log a buffer item 2442 * with the XFS_BLF_CANCEL bit set to indicate that previous copies 2443 * of the buffer in the log should not be replayed at recovery time. 2444 * This is so that if the blocks covered by the buffer are reused for 2445 * file data before we crash we don't end up replaying old, freed 2446 * meta-data into a user's file. 2447 * 2448 * To handle the cancellation of buffer log items, we make two passes 2449 * over the log during recovery. During the first we build a table of 2450 * those buffers which have been cancelled, and during the second we 2451 * only replay those buffers which do not have corresponding cancel 2452 * records in the table. See xlog_recover_buffer_pass[1,2] above 2453 * for more details on the implementation of the table of cancel records. 2454 */ 2455 STATIC int 2456 xlog_recover_buffer_pass2( 2457 struct xlog *log, 2458 struct list_head *buffer_list, 2459 struct xlog_recover_item *item, 2460 xfs_lsn_t current_lsn) 2461 { 2462 xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr; 2463 xfs_mount_t *mp = log->l_mp; 2464 xfs_buf_t *bp; 2465 int error; 2466 uint buf_flags; 2467 xfs_lsn_t lsn; 2468 2469 /* 2470 * In this pass we only want to recover all the buffers which have 2471 * not been cancelled and are not cancellation buffers themselves. 2472 */ 2473 if (xlog_check_buffer_cancelled(log, buf_f->blf_blkno, 2474 buf_f->blf_len, buf_f->blf_flags)) { 2475 trace_xfs_log_recover_buf_cancel(log, buf_f); 2476 return 0; 2477 } 2478 2479 trace_xfs_log_recover_buf_recover(log, buf_f); 2480 2481 buf_flags = 0; 2482 if (buf_f->blf_flags & XFS_BLF_INODE_BUF) 2483 buf_flags |= XBF_UNMAPPED; 2484 2485 bp = xfs_buf_read(mp->m_ddev_targp, buf_f->blf_blkno, buf_f->blf_len, 2486 buf_flags, NULL); 2487 if (!bp) 2488 return XFS_ERROR(ENOMEM); 2489 error = bp->b_error; 2490 if (error) { 2491 xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#1)"); 2492 goto out_release; 2493 } 2494 2495 /* 2496 * recover the buffer only if we get an LSN from it and it's less than 2497 * the lsn of the transaction we are replaying. 2498 */ 2499 lsn = xlog_recover_get_buf_lsn(mp, bp); 2500 if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) 2501 goto out_release; 2502 2503 if (buf_f->blf_flags & XFS_BLF_INODE_BUF) { 2504 error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f); 2505 } else if (buf_f->blf_flags & 2506 (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) { 2507 xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f); 2508 } else { 2509 xlog_recover_do_reg_buffer(mp, item, bp, buf_f); 2510 } 2511 if (error) 2512 goto out_release; 2513 2514 /* 2515 * Perform delayed write on the buffer. Asynchronous writes will be 2516 * slower when taking into account all the buffers to be flushed. 2517 * 2518 * Also make sure that only inode buffers with good sizes stay in 2519 * the buffer cache. The kernel moves inodes in buffers of 1 block 2520 * or XFS_INODE_CLUSTER_SIZE bytes, whichever is bigger. The inode 2521 * buffers in the log can be a different size if the log was generated 2522 * by an older kernel using unclustered inode buffers or a newer kernel 2523 * running with a different inode cluster size. Regardless, if the 2524 * the inode buffer size isn't MAX(blocksize, XFS_INODE_CLUSTER_SIZE) 2525 * for *our* value of XFS_INODE_CLUSTER_SIZE, then we need to keep 2526 * the buffer out of the buffer cache so that the buffer won't 2527 * overlap with future reads of those inodes. 2528 */ 2529 if (XFS_DINODE_MAGIC == 2530 be16_to_cpu(*((__be16 *)xfs_buf_offset(bp, 0))) && 2531 (BBTOB(bp->b_io_length) != MAX(log->l_mp->m_sb.sb_blocksize, 2532 (__uint32_t)XFS_INODE_CLUSTER_SIZE(log->l_mp)))) { 2533 xfs_buf_stale(bp); 2534 error = xfs_bwrite(bp); 2535 } else { 2536 ASSERT(bp->b_target->bt_mount == mp); 2537 bp->b_iodone = xlog_recover_iodone; 2538 xfs_buf_delwri_queue(bp, buffer_list); 2539 } 2540 2541 out_release: 2542 xfs_buf_relse(bp); 2543 return error; 2544 } 2545 2546 /* 2547 * Inode fork owner changes 2548 * 2549 * If we have been told that we have to reparent the inode fork, it's because an 2550 * extent swap operation on a CRC enabled filesystem has been done and we are 2551 * replaying it. We need to walk the BMBT of the appropriate fork and change the 2552 * owners of it. 2553 * 2554 * The complexity here is that we don't have an inode context to work with, so 2555 * after we've replayed the inode we need to instantiate one. This is where the 2556 * fun begins. 2557 * 2558 * We are in the middle of log recovery, so we can't run transactions. That 2559 * means we cannot use cache coherent inode instantiation via xfs_iget(), as 2560 * that will result in the corresponding iput() running the inode through 2561 * xfs_inactive(). If we've just replayed an inode core that changes the link 2562 * count to zero (i.e. it's been unlinked), then xfs_inactive() will run 2563 * transactions (bad!). 2564 * 2565 * So, to avoid this, we instantiate an inode directly from the inode core we've 2566 * just recovered. We have the buffer still locked, and all we really need to 2567 * instantiate is the inode core and the forks being modified. We can do this 2568 * manually, then run the inode btree owner change, and then tear down the 2569 * xfs_inode without having to run any transactions at all. 2570 * 2571 * Also, because we don't have a transaction context available here but need to 2572 * gather all the buffers we modify for writeback so we pass the buffer_list 2573 * instead for the operation to use. 2574 */ 2575 2576 STATIC int 2577 xfs_recover_inode_owner_change( 2578 struct xfs_mount *mp, 2579 struct xfs_dinode *dip, 2580 struct xfs_inode_log_format *in_f, 2581 struct list_head *buffer_list) 2582 { 2583 struct xfs_inode *ip; 2584 int error; 2585 2586 ASSERT(in_f->ilf_fields & (XFS_ILOG_DOWNER|XFS_ILOG_AOWNER)); 2587 2588 ip = xfs_inode_alloc(mp, in_f->ilf_ino); 2589 if (!ip) 2590 return ENOMEM; 2591 2592 /* instantiate the inode */ 2593 xfs_dinode_from_disk(&ip->i_d, dip); 2594 ASSERT(ip->i_d.di_version >= 3); 2595 2596 error = xfs_iformat_fork(ip, dip); 2597 if (error) 2598 goto out_free_ip; 2599 2600 2601 if (in_f->ilf_fields & XFS_ILOG_DOWNER) { 2602 ASSERT(in_f->ilf_fields & XFS_ILOG_DBROOT); 2603 error = xfs_bmbt_change_owner(NULL, ip, XFS_DATA_FORK, 2604 ip->i_ino, buffer_list); 2605 if (error) 2606 goto out_free_ip; 2607 } 2608 2609 if (in_f->ilf_fields & XFS_ILOG_AOWNER) { 2610 ASSERT(in_f->ilf_fields & XFS_ILOG_ABROOT); 2611 error = xfs_bmbt_change_owner(NULL, ip, XFS_ATTR_FORK, 2612 ip->i_ino, buffer_list); 2613 if (error) 2614 goto out_free_ip; 2615 } 2616 2617 out_free_ip: 2618 xfs_inode_free(ip); 2619 return error; 2620 } 2621 2622 STATIC int 2623 xlog_recover_inode_pass2( 2624 struct xlog *log, 2625 struct list_head *buffer_list, 2626 struct xlog_recover_item *item, 2627 xfs_lsn_t current_lsn) 2628 { 2629 xfs_inode_log_format_t *in_f; 2630 xfs_mount_t *mp = log->l_mp; 2631 xfs_buf_t *bp; 2632 xfs_dinode_t *dip; 2633 int len; 2634 xfs_caddr_t src; 2635 xfs_caddr_t dest; 2636 int error; 2637 int attr_index; 2638 uint fields; 2639 xfs_icdinode_t *dicp; 2640 uint isize; 2641 int need_free = 0; 2642 2643 if (item->ri_buf[0].i_len == sizeof(xfs_inode_log_format_t)) { 2644 in_f = item->ri_buf[0].i_addr; 2645 } else { 2646 in_f = kmem_alloc(sizeof(xfs_inode_log_format_t), KM_SLEEP); 2647 need_free = 1; 2648 error = xfs_inode_item_format_convert(&item->ri_buf[0], in_f); 2649 if (error) 2650 goto error; 2651 } 2652 2653 /* 2654 * Inode buffers can be freed, look out for it, 2655 * and do not replay the inode. 2656 */ 2657 if (xlog_check_buffer_cancelled(log, in_f->ilf_blkno, 2658 in_f->ilf_len, 0)) { 2659 error = 0; 2660 trace_xfs_log_recover_inode_cancel(log, in_f); 2661 goto error; 2662 } 2663 trace_xfs_log_recover_inode_recover(log, in_f); 2664 2665 bp = xfs_buf_read(mp->m_ddev_targp, in_f->ilf_blkno, in_f->ilf_len, 0, 2666 &xfs_inode_buf_ops); 2667 if (!bp) { 2668 error = ENOMEM; 2669 goto error; 2670 } 2671 error = bp->b_error; 2672 if (error) { 2673 xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#2)"); 2674 goto out_release; 2675 } 2676 ASSERT(in_f->ilf_fields & XFS_ILOG_CORE); 2677 dip = (xfs_dinode_t *)xfs_buf_offset(bp, in_f->ilf_boffset); 2678 2679 /* 2680 * Make sure the place we're flushing out to really looks 2681 * like an inode! 2682 */ 2683 if (unlikely(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC))) { 2684 xfs_alert(mp, 2685 "%s: Bad inode magic number, dip = 0x%p, dino bp = 0x%p, ino = %Ld", 2686 __func__, dip, bp, in_f->ilf_ino); 2687 XFS_ERROR_REPORT("xlog_recover_inode_pass2(1)", 2688 XFS_ERRLEVEL_LOW, mp); 2689 error = EFSCORRUPTED; 2690 goto out_release; 2691 } 2692 dicp = item->ri_buf[1].i_addr; 2693 if (unlikely(dicp->di_magic != XFS_DINODE_MAGIC)) { 2694 xfs_alert(mp, 2695 "%s: Bad inode log record, rec ptr 0x%p, ino %Ld", 2696 __func__, item, in_f->ilf_ino); 2697 XFS_ERROR_REPORT("xlog_recover_inode_pass2(2)", 2698 XFS_ERRLEVEL_LOW, mp); 2699 error = EFSCORRUPTED; 2700 goto out_release; 2701 } 2702 2703 /* 2704 * If the inode has an LSN in it, recover the inode only if it's less 2705 * than the lsn of the transaction we are replaying. Note: we still 2706 * need to replay an owner change even though the inode is more recent 2707 * than the transaction as there is no guarantee that all the btree 2708 * blocks are more recent than this transaction, too. 2709 */ 2710 if (dip->di_version >= 3) { 2711 xfs_lsn_t lsn = be64_to_cpu(dip->di_lsn); 2712 2713 if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) { 2714 trace_xfs_log_recover_inode_skip(log, in_f); 2715 error = 0; 2716 goto out_owner_change; 2717 } 2718 } 2719 2720 /* 2721 * di_flushiter is only valid for v1/2 inodes. All changes for v3 inodes 2722 * are transactional and if ordering is necessary we can determine that 2723 * more accurately by the LSN field in the V3 inode core. Don't trust 2724 * the inode versions we might be changing them here - use the 2725 * superblock flag to determine whether we need to look at di_flushiter 2726 * to skip replay when the on disk inode is newer than the log one 2727 */ 2728 if (!xfs_sb_version_hascrc(&mp->m_sb) && 2729 dicp->di_flushiter < be16_to_cpu(dip->di_flushiter)) { 2730 /* 2731 * Deal with the wrap case, DI_MAX_FLUSH is less 2732 * than smaller numbers 2733 */ 2734 if (be16_to_cpu(dip->di_flushiter) == DI_MAX_FLUSH && 2735 dicp->di_flushiter < (DI_MAX_FLUSH >> 1)) { 2736 /* do nothing */ 2737 } else { 2738 trace_xfs_log_recover_inode_skip(log, in_f); 2739 error = 0; 2740 goto out_release; 2741 } 2742 } 2743 2744 /* Take the opportunity to reset the flush iteration count */ 2745 dicp->di_flushiter = 0; 2746 2747 if (unlikely(S_ISREG(dicp->di_mode))) { 2748 if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) && 2749 (dicp->di_format != XFS_DINODE_FMT_BTREE)) { 2750 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(3)", 2751 XFS_ERRLEVEL_LOW, mp, dicp); 2752 xfs_alert(mp, 2753 "%s: Bad regular inode log record, rec ptr 0x%p, " 2754 "ino ptr = 0x%p, ino bp = 0x%p, ino %Ld", 2755 __func__, item, dip, bp, in_f->ilf_ino); 2756 error = EFSCORRUPTED; 2757 goto out_release; 2758 } 2759 } else if (unlikely(S_ISDIR(dicp->di_mode))) { 2760 if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) && 2761 (dicp->di_format != XFS_DINODE_FMT_BTREE) && 2762 (dicp->di_format != XFS_DINODE_FMT_LOCAL)) { 2763 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(4)", 2764 XFS_ERRLEVEL_LOW, mp, dicp); 2765 xfs_alert(mp, 2766 "%s: Bad dir inode log record, rec ptr 0x%p, " 2767 "ino ptr = 0x%p, ino bp = 0x%p, ino %Ld", 2768 __func__, item, dip, bp, in_f->ilf_ino); 2769 error = EFSCORRUPTED; 2770 goto out_release; 2771 } 2772 } 2773 if (unlikely(dicp->di_nextents + dicp->di_anextents > dicp->di_nblocks)){ 2774 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(5)", 2775 XFS_ERRLEVEL_LOW, mp, dicp); 2776 xfs_alert(mp, 2777 "%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, " 2778 "dino bp 0x%p, ino %Ld, total extents = %d, nblocks = %Ld", 2779 __func__, item, dip, bp, in_f->ilf_ino, 2780 dicp->di_nextents + dicp->di_anextents, 2781 dicp->di_nblocks); 2782 error = EFSCORRUPTED; 2783 goto out_release; 2784 } 2785 if (unlikely(dicp->di_forkoff > mp->m_sb.sb_inodesize)) { 2786 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(6)", 2787 XFS_ERRLEVEL_LOW, mp, dicp); 2788 xfs_alert(mp, 2789 "%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, " 2790 "dino bp 0x%p, ino %Ld, forkoff 0x%x", __func__, 2791 item, dip, bp, in_f->ilf_ino, dicp->di_forkoff); 2792 error = EFSCORRUPTED; 2793 goto out_release; 2794 } 2795 isize = xfs_icdinode_size(dicp->di_version); 2796 if (unlikely(item->ri_buf[1].i_len > isize)) { 2797 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(7)", 2798 XFS_ERRLEVEL_LOW, mp, dicp); 2799 xfs_alert(mp, 2800 "%s: Bad inode log record length %d, rec ptr 0x%p", 2801 __func__, item->ri_buf[1].i_len, item); 2802 error = EFSCORRUPTED; 2803 goto out_release; 2804 } 2805 2806 /* The core is in in-core format */ 2807 xfs_dinode_to_disk(dip, dicp); 2808 2809 /* the rest is in on-disk format */ 2810 if (item->ri_buf[1].i_len > isize) { 2811 memcpy((char *)dip + isize, 2812 item->ri_buf[1].i_addr + isize, 2813 item->ri_buf[1].i_len - isize); 2814 } 2815 2816 fields = in_f->ilf_fields; 2817 switch (fields & (XFS_ILOG_DEV | XFS_ILOG_UUID)) { 2818 case XFS_ILOG_DEV: 2819 xfs_dinode_put_rdev(dip, in_f->ilf_u.ilfu_rdev); 2820 break; 2821 case XFS_ILOG_UUID: 2822 memcpy(XFS_DFORK_DPTR(dip), 2823 &in_f->ilf_u.ilfu_uuid, 2824 sizeof(uuid_t)); 2825 break; 2826 } 2827 2828 if (in_f->ilf_size == 2) 2829 goto out_owner_change; 2830 len = item->ri_buf[2].i_len; 2831 src = item->ri_buf[2].i_addr; 2832 ASSERT(in_f->ilf_size <= 4); 2833 ASSERT((in_f->ilf_size == 3) || (fields & XFS_ILOG_AFORK)); 2834 ASSERT(!(fields & XFS_ILOG_DFORK) || 2835 (len == in_f->ilf_dsize)); 2836 2837 switch (fields & XFS_ILOG_DFORK) { 2838 case XFS_ILOG_DDATA: 2839 case XFS_ILOG_DEXT: 2840 memcpy(XFS_DFORK_DPTR(dip), src, len); 2841 break; 2842 2843 case XFS_ILOG_DBROOT: 2844 xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src, len, 2845 (xfs_bmdr_block_t *)XFS_DFORK_DPTR(dip), 2846 XFS_DFORK_DSIZE(dip, mp)); 2847 break; 2848 2849 default: 2850 /* 2851 * There are no data fork flags set. 2852 */ 2853 ASSERT((fields & XFS_ILOG_DFORK) == 0); 2854 break; 2855 } 2856 2857 /* 2858 * If we logged any attribute data, recover it. There may or 2859 * may not have been any other non-core data logged in this 2860 * transaction. 2861 */ 2862 if (in_f->ilf_fields & XFS_ILOG_AFORK) { 2863 if (in_f->ilf_fields & XFS_ILOG_DFORK) { 2864 attr_index = 3; 2865 } else { 2866 attr_index = 2; 2867 } 2868 len = item->ri_buf[attr_index].i_len; 2869 src = item->ri_buf[attr_index].i_addr; 2870 ASSERT(len == in_f->ilf_asize); 2871 2872 switch (in_f->ilf_fields & XFS_ILOG_AFORK) { 2873 case XFS_ILOG_ADATA: 2874 case XFS_ILOG_AEXT: 2875 dest = XFS_DFORK_APTR(dip); 2876 ASSERT(len <= XFS_DFORK_ASIZE(dip, mp)); 2877 memcpy(dest, src, len); 2878 break; 2879 2880 case XFS_ILOG_ABROOT: 2881 dest = XFS_DFORK_APTR(dip); 2882 xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src, 2883 len, (xfs_bmdr_block_t*)dest, 2884 XFS_DFORK_ASIZE(dip, mp)); 2885 break; 2886 2887 default: 2888 xfs_warn(log->l_mp, "%s: Invalid flag", __func__); 2889 ASSERT(0); 2890 error = EIO; 2891 goto out_release; 2892 } 2893 } 2894 2895 out_owner_change: 2896 if (in_f->ilf_fields & (XFS_ILOG_DOWNER|XFS_ILOG_AOWNER)) 2897 error = xfs_recover_inode_owner_change(mp, dip, in_f, 2898 buffer_list); 2899 /* re-generate the checksum. */ 2900 xfs_dinode_calc_crc(log->l_mp, dip); 2901 2902 ASSERT(bp->b_target->bt_mount == mp); 2903 bp->b_iodone = xlog_recover_iodone; 2904 xfs_buf_delwri_queue(bp, buffer_list); 2905 2906 out_release: 2907 xfs_buf_relse(bp); 2908 error: 2909 if (need_free) 2910 kmem_free(in_f); 2911 return XFS_ERROR(error); 2912 } 2913 2914 /* 2915 * Recover QUOTAOFF records. We simply make a note of it in the xlog 2916 * structure, so that we know not to do any dquot item or dquot buffer recovery, 2917 * of that type. 2918 */ 2919 STATIC int 2920 xlog_recover_quotaoff_pass1( 2921 struct xlog *log, 2922 struct xlog_recover_item *item) 2923 { 2924 xfs_qoff_logformat_t *qoff_f = item->ri_buf[0].i_addr; 2925 ASSERT(qoff_f); 2926 2927 /* 2928 * The logitem format's flag tells us if this was user quotaoff, 2929 * group/project quotaoff or both. 2930 */ 2931 if (qoff_f->qf_flags & XFS_UQUOTA_ACCT) 2932 log->l_quotaoffs_flag |= XFS_DQ_USER; 2933 if (qoff_f->qf_flags & XFS_PQUOTA_ACCT) 2934 log->l_quotaoffs_flag |= XFS_DQ_PROJ; 2935 if (qoff_f->qf_flags & XFS_GQUOTA_ACCT) 2936 log->l_quotaoffs_flag |= XFS_DQ_GROUP; 2937 2938 return (0); 2939 } 2940 2941 /* 2942 * Recover a dquot record 2943 */ 2944 STATIC int 2945 xlog_recover_dquot_pass2( 2946 struct xlog *log, 2947 struct list_head *buffer_list, 2948 struct xlog_recover_item *item, 2949 xfs_lsn_t current_lsn) 2950 { 2951 xfs_mount_t *mp = log->l_mp; 2952 xfs_buf_t *bp; 2953 struct xfs_disk_dquot *ddq, *recddq; 2954 int error; 2955 xfs_dq_logformat_t *dq_f; 2956 uint type; 2957 2958 2959 /* 2960 * Filesystems are required to send in quota flags at mount time. 2961 */ 2962 if (mp->m_qflags == 0) 2963 return (0); 2964 2965 recddq = item->ri_buf[1].i_addr; 2966 if (recddq == NULL) { 2967 xfs_alert(log->l_mp, "NULL dquot in %s.", __func__); 2968 return XFS_ERROR(EIO); 2969 } 2970 if (item->ri_buf[1].i_len < sizeof(xfs_disk_dquot_t)) { 2971 xfs_alert(log->l_mp, "dquot too small (%d) in %s.", 2972 item->ri_buf[1].i_len, __func__); 2973 return XFS_ERROR(EIO); 2974 } 2975 2976 /* 2977 * This type of quotas was turned off, so ignore this record. 2978 */ 2979 type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP); 2980 ASSERT(type); 2981 if (log->l_quotaoffs_flag & type) 2982 return (0); 2983 2984 /* 2985 * At this point we know that quota was _not_ turned off. 2986 * Since the mount flags are not indicating to us otherwise, this 2987 * must mean that quota is on, and the dquot needs to be replayed. 2988 * Remember that we may not have fully recovered the superblock yet, 2989 * so we can't do the usual trick of looking at the SB quota bits. 2990 * 2991 * The other possibility, of course, is that the quota subsystem was 2992 * removed since the last mount - ENOSYS. 2993 */ 2994 dq_f = item->ri_buf[0].i_addr; 2995 ASSERT(dq_f); 2996 error = xfs_dqcheck(mp, recddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN, 2997 "xlog_recover_dquot_pass2 (log copy)"); 2998 if (error) 2999 return XFS_ERROR(EIO); 3000 ASSERT(dq_f->qlf_len == 1); 3001 3002 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dq_f->qlf_blkno, 3003 XFS_FSB_TO_BB(mp, dq_f->qlf_len), 0, &bp, 3004 NULL); 3005 if (error) 3006 return error; 3007 3008 ASSERT(bp); 3009 ddq = (xfs_disk_dquot_t *)xfs_buf_offset(bp, dq_f->qlf_boffset); 3010 3011 /* 3012 * At least the magic num portion should be on disk because this 3013 * was among a chunk of dquots created earlier, and we did some 3014 * minimal initialization then. 3015 */ 3016 error = xfs_dqcheck(mp, ddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN, 3017 "xlog_recover_dquot_pass2"); 3018 if (error) { 3019 xfs_buf_relse(bp); 3020 return XFS_ERROR(EIO); 3021 } 3022 3023 /* 3024 * If the dquot has an LSN in it, recover the dquot only if it's less 3025 * than the lsn of the transaction we are replaying. 3026 */ 3027 if (xfs_sb_version_hascrc(&mp->m_sb)) { 3028 struct xfs_dqblk *dqb = (struct xfs_dqblk *)ddq; 3029 xfs_lsn_t lsn = be64_to_cpu(dqb->dd_lsn); 3030 3031 if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) { 3032 goto out_release; 3033 } 3034 } 3035 3036 memcpy(ddq, recddq, item->ri_buf[1].i_len); 3037 if (xfs_sb_version_hascrc(&mp->m_sb)) { 3038 xfs_update_cksum((char *)ddq, sizeof(struct xfs_dqblk), 3039 XFS_DQUOT_CRC_OFF); 3040 } 3041 3042 ASSERT(dq_f->qlf_size == 2); 3043 ASSERT(bp->b_target->bt_mount == mp); 3044 bp->b_iodone = xlog_recover_iodone; 3045 xfs_buf_delwri_queue(bp, buffer_list); 3046 3047 out_release: 3048 xfs_buf_relse(bp); 3049 return 0; 3050 } 3051 3052 /* 3053 * This routine is called to create an in-core extent free intent 3054 * item from the efi format structure which was logged on disk. 3055 * It allocates an in-core efi, copies the extents from the format 3056 * structure into it, and adds the efi to the AIL with the given 3057 * LSN. 3058 */ 3059 STATIC int 3060 xlog_recover_efi_pass2( 3061 struct xlog *log, 3062 struct xlog_recover_item *item, 3063 xfs_lsn_t lsn) 3064 { 3065 int error; 3066 xfs_mount_t *mp = log->l_mp; 3067 xfs_efi_log_item_t *efip; 3068 xfs_efi_log_format_t *efi_formatp; 3069 3070 efi_formatp = item->ri_buf[0].i_addr; 3071 3072 efip = xfs_efi_init(mp, efi_formatp->efi_nextents); 3073 if ((error = xfs_efi_copy_format(&(item->ri_buf[0]), 3074 &(efip->efi_format)))) { 3075 xfs_efi_item_free(efip); 3076 return error; 3077 } 3078 atomic_set(&efip->efi_next_extent, efi_formatp->efi_nextents); 3079 3080 spin_lock(&log->l_ailp->xa_lock); 3081 /* 3082 * xfs_trans_ail_update() drops the AIL lock. 3083 */ 3084 xfs_trans_ail_update(log->l_ailp, &efip->efi_item, lsn); 3085 return 0; 3086 } 3087 3088 3089 /* 3090 * This routine is called when an efd format structure is found in 3091 * a committed transaction in the log. It's purpose is to cancel 3092 * the corresponding efi if it was still in the log. To do this 3093 * it searches the AIL for the efi with an id equal to that in the 3094 * efd format structure. If we find it, we remove the efi from the 3095 * AIL and free it. 3096 */ 3097 STATIC int 3098 xlog_recover_efd_pass2( 3099 struct xlog *log, 3100 struct xlog_recover_item *item) 3101 { 3102 xfs_efd_log_format_t *efd_formatp; 3103 xfs_efi_log_item_t *efip = NULL; 3104 xfs_log_item_t *lip; 3105 __uint64_t efi_id; 3106 struct xfs_ail_cursor cur; 3107 struct xfs_ail *ailp = log->l_ailp; 3108 3109 efd_formatp = item->ri_buf[0].i_addr; 3110 ASSERT((item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_32_t) + 3111 ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_32_t)))) || 3112 (item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_64_t) + 3113 ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_64_t))))); 3114 efi_id = efd_formatp->efd_efi_id; 3115 3116 /* 3117 * Search for the efi with the id in the efd format structure 3118 * in the AIL. 3119 */ 3120 spin_lock(&ailp->xa_lock); 3121 lip = xfs_trans_ail_cursor_first(ailp, &cur, 0); 3122 while (lip != NULL) { 3123 if (lip->li_type == XFS_LI_EFI) { 3124 efip = (xfs_efi_log_item_t *)lip; 3125 if (efip->efi_format.efi_id == efi_id) { 3126 /* 3127 * xfs_trans_ail_delete() drops the 3128 * AIL lock. 3129 */ 3130 xfs_trans_ail_delete(ailp, lip, 3131 SHUTDOWN_CORRUPT_INCORE); 3132 xfs_efi_item_free(efip); 3133 spin_lock(&ailp->xa_lock); 3134 break; 3135 } 3136 } 3137 lip = xfs_trans_ail_cursor_next(ailp, &cur); 3138 } 3139 xfs_trans_ail_cursor_done(ailp, &cur); 3140 spin_unlock(&ailp->xa_lock); 3141 3142 return 0; 3143 } 3144 3145 /* 3146 * This routine is called when an inode create format structure is found in a 3147 * committed transaction in the log. It's purpose is to initialise the inodes 3148 * being allocated on disk. This requires us to get inode cluster buffers that 3149 * match the range to be intialised, stamped with inode templates and written 3150 * by delayed write so that subsequent modifications will hit the cached buffer 3151 * and only need writing out at the end of recovery. 3152 */ 3153 STATIC int 3154 xlog_recover_do_icreate_pass2( 3155 struct xlog *log, 3156 struct list_head *buffer_list, 3157 xlog_recover_item_t *item) 3158 { 3159 struct xfs_mount *mp = log->l_mp; 3160 struct xfs_icreate_log *icl; 3161 xfs_agnumber_t agno; 3162 xfs_agblock_t agbno; 3163 unsigned int count; 3164 unsigned int isize; 3165 xfs_agblock_t length; 3166 3167 icl = (struct xfs_icreate_log *)item->ri_buf[0].i_addr; 3168 if (icl->icl_type != XFS_LI_ICREATE) { 3169 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad type"); 3170 return EINVAL; 3171 } 3172 3173 if (icl->icl_size != 1) { 3174 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad icl size"); 3175 return EINVAL; 3176 } 3177 3178 agno = be32_to_cpu(icl->icl_ag); 3179 if (agno >= mp->m_sb.sb_agcount) { 3180 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad agno"); 3181 return EINVAL; 3182 } 3183 agbno = be32_to_cpu(icl->icl_agbno); 3184 if (!agbno || agbno == NULLAGBLOCK || agbno >= mp->m_sb.sb_agblocks) { 3185 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad agbno"); 3186 return EINVAL; 3187 } 3188 isize = be32_to_cpu(icl->icl_isize); 3189 if (isize != mp->m_sb.sb_inodesize) { 3190 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad isize"); 3191 return EINVAL; 3192 } 3193 count = be32_to_cpu(icl->icl_count); 3194 if (!count) { 3195 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad count"); 3196 return EINVAL; 3197 } 3198 length = be32_to_cpu(icl->icl_length); 3199 if (!length || length >= mp->m_sb.sb_agblocks) { 3200 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad length"); 3201 return EINVAL; 3202 } 3203 3204 /* existing allocation is fixed value */ 3205 ASSERT(count == XFS_IALLOC_INODES(mp)); 3206 ASSERT(length == XFS_IALLOC_BLOCKS(mp)); 3207 if (count != XFS_IALLOC_INODES(mp) || 3208 length != XFS_IALLOC_BLOCKS(mp)) { 3209 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad count 2"); 3210 return EINVAL; 3211 } 3212 3213 /* 3214 * Inode buffers can be freed. Do not replay the inode initialisation as 3215 * we could be overwriting something written after this inode buffer was 3216 * cancelled. 3217 * 3218 * XXX: we need to iterate all buffers and only init those that are not 3219 * cancelled. I think that a more fine grained factoring of 3220 * xfs_ialloc_inode_init may be appropriate here to enable this to be 3221 * done easily. 3222 */ 3223 if (xlog_check_buffer_cancelled(log, 3224 XFS_AGB_TO_DADDR(mp, agno, agbno), length, 0)) 3225 return 0; 3226 3227 xfs_ialloc_inode_init(mp, NULL, buffer_list, agno, agbno, length, 3228 be32_to_cpu(icl->icl_gen)); 3229 return 0; 3230 } 3231 3232 /* 3233 * Free up any resources allocated by the transaction 3234 * 3235 * Remember that EFIs, EFDs, and IUNLINKs are handled later. 3236 */ 3237 STATIC void 3238 xlog_recover_free_trans( 3239 struct xlog_recover *trans) 3240 { 3241 xlog_recover_item_t *item, *n; 3242 int i; 3243 3244 list_for_each_entry_safe(item, n, &trans->r_itemq, ri_list) { 3245 /* Free the regions in the item. */ 3246 list_del(&item->ri_list); 3247 for (i = 0; i < item->ri_cnt; i++) 3248 kmem_free(item->ri_buf[i].i_addr); 3249 /* Free the item itself */ 3250 kmem_free(item->ri_buf); 3251 kmem_free(item); 3252 } 3253 /* Free the transaction recover structure */ 3254 kmem_free(trans); 3255 } 3256 3257 STATIC void 3258 xlog_recover_buffer_ra_pass2( 3259 struct xlog *log, 3260 struct xlog_recover_item *item) 3261 { 3262 struct xfs_buf_log_format *buf_f = item->ri_buf[0].i_addr; 3263 struct xfs_mount *mp = log->l_mp; 3264 3265 if (xlog_peek_buffer_cancelled(log, buf_f->blf_blkno, 3266 buf_f->blf_len, buf_f->blf_flags)) { 3267 return; 3268 } 3269 3270 xfs_buf_readahead(mp->m_ddev_targp, buf_f->blf_blkno, 3271 buf_f->blf_len, NULL); 3272 } 3273 3274 STATIC void 3275 xlog_recover_inode_ra_pass2( 3276 struct xlog *log, 3277 struct xlog_recover_item *item) 3278 { 3279 struct xfs_inode_log_format ilf_buf; 3280 struct xfs_inode_log_format *ilfp; 3281 struct xfs_mount *mp = log->l_mp; 3282 int error; 3283 3284 if (item->ri_buf[0].i_len == sizeof(struct xfs_inode_log_format)) { 3285 ilfp = item->ri_buf[0].i_addr; 3286 } else { 3287 ilfp = &ilf_buf; 3288 memset(ilfp, 0, sizeof(*ilfp)); 3289 error = xfs_inode_item_format_convert(&item->ri_buf[0], ilfp); 3290 if (error) 3291 return; 3292 } 3293 3294 if (xlog_peek_buffer_cancelled(log, ilfp->ilf_blkno, ilfp->ilf_len, 0)) 3295 return; 3296 3297 xfs_buf_readahead(mp->m_ddev_targp, ilfp->ilf_blkno, 3298 ilfp->ilf_len, &xfs_inode_buf_ra_ops); 3299 } 3300 3301 STATIC void 3302 xlog_recover_dquot_ra_pass2( 3303 struct xlog *log, 3304 struct xlog_recover_item *item) 3305 { 3306 struct xfs_mount *mp = log->l_mp; 3307 struct xfs_disk_dquot *recddq; 3308 struct xfs_dq_logformat *dq_f; 3309 uint type; 3310 3311 3312 if (mp->m_qflags == 0) 3313 return; 3314 3315 recddq = item->ri_buf[1].i_addr; 3316 if (recddq == NULL) 3317 return; 3318 if (item->ri_buf[1].i_len < sizeof(struct xfs_disk_dquot)) 3319 return; 3320 3321 type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP); 3322 ASSERT(type); 3323 if (log->l_quotaoffs_flag & type) 3324 return; 3325 3326 dq_f = item->ri_buf[0].i_addr; 3327 ASSERT(dq_f); 3328 ASSERT(dq_f->qlf_len == 1); 3329 3330 xfs_buf_readahead(mp->m_ddev_targp, dq_f->qlf_blkno, 3331 XFS_FSB_TO_BB(mp, dq_f->qlf_len), NULL); 3332 } 3333 3334 STATIC void 3335 xlog_recover_ra_pass2( 3336 struct xlog *log, 3337 struct xlog_recover_item *item) 3338 { 3339 switch (ITEM_TYPE(item)) { 3340 case XFS_LI_BUF: 3341 xlog_recover_buffer_ra_pass2(log, item); 3342 break; 3343 case XFS_LI_INODE: 3344 xlog_recover_inode_ra_pass2(log, item); 3345 break; 3346 case XFS_LI_DQUOT: 3347 xlog_recover_dquot_ra_pass2(log, item); 3348 break; 3349 case XFS_LI_EFI: 3350 case XFS_LI_EFD: 3351 case XFS_LI_QUOTAOFF: 3352 default: 3353 break; 3354 } 3355 } 3356 3357 STATIC int 3358 xlog_recover_commit_pass1( 3359 struct xlog *log, 3360 struct xlog_recover *trans, 3361 struct xlog_recover_item *item) 3362 { 3363 trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS1); 3364 3365 switch (ITEM_TYPE(item)) { 3366 case XFS_LI_BUF: 3367 return xlog_recover_buffer_pass1(log, item); 3368 case XFS_LI_QUOTAOFF: 3369 return xlog_recover_quotaoff_pass1(log, item); 3370 case XFS_LI_INODE: 3371 case XFS_LI_EFI: 3372 case XFS_LI_EFD: 3373 case XFS_LI_DQUOT: 3374 case XFS_LI_ICREATE: 3375 /* nothing to do in pass 1 */ 3376 return 0; 3377 default: 3378 xfs_warn(log->l_mp, "%s: invalid item type (%d)", 3379 __func__, ITEM_TYPE(item)); 3380 ASSERT(0); 3381 return XFS_ERROR(EIO); 3382 } 3383 } 3384 3385 STATIC int 3386 xlog_recover_commit_pass2( 3387 struct xlog *log, 3388 struct xlog_recover *trans, 3389 struct list_head *buffer_list, 3390 struct xlog_recover_item *item) 3391 { 3392 trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS2); 3393 3394 switch (ITEM_TYPE(item)) { 3395 case XFS_LI_BUF: 3396 return xlog_recover_buffer_pass2(log, buffer_list, item, 3397 trans->r_lsn); 3398 case XFS_LI_INODE: 3399 return xlog_recover_inode_pass2(log, buffer_list, item, 3400 trans->r_lsn); 3401 case XFS_LI_EFI: 3402 return xlog_recover_efi_pass2(log, item, trans->r_lsn); 3403 case XFS_LI_EFD: 3404 return xlog_recover_efd_pass2(log, item); 3405 case XFS_LI_DQUOT: 3406 return xlog_recover_dquot_pass2(log, buffer_list, item, 3407 trans->r_lsn); 3408 case XFS_LI_ICREATE: 3409 return xlog_recover_do_icreate_pass2(log, buffer_list, item); 3410 case XFS_LI_QUOTAOFF: 3411 /* nothing to do in pass2 */ 3412 return 0; 3413 default: 3414 xfs_warn(log->l_mp, "%s: invalid item type (%d)", 3415 __func__, ITEM_TYPE(item)); 3416 ASSERT(0); 3417 return XFS_ERROR(EIO); 3418 } 3419 } 3420 3421 STATIC int 3422 xlog_recover_items_pass2( 3423 struct xlog *log, 3424 struct xlog_recover *trans, 3425 struct list_head *buffer_list, 3426 struct list_head *item_list) 3427 { 3428 struct xlog_recover_item *item; 3429 int error = 0; 3430 3431 list_for_each_entry(item, item_list, ri_list) { 3432 error = xlog_recover_commit_pass2(log, trans, 3433 buffer_list, item); 3434 if (error) 3435 return error; 3436 } 3437 3438 return error; 3439 } 3440 3441 /* 3442 * Perform the transaction. 3443 * 3444 * If the transaction modifies a buffer or inode, do it now. Otherwise, 3445 * EFIs and EFDs get queued up by adding entries into the AIL for them. 3446 */ 3447 STATIC int 3448 xlog_recover_commit_trans( 3449 struct xlog *log, 3450 struct xlog_recover *trans, 3451 int pass) 3452 { 3453 int error = 0; 3454 int error2; 3455 int items_queued = 0; 3456 struct xlog_recover_item *item; 3457 struct xlog_recover_item *next; 3458 LIST_HEAD (buffer_list); 3459 LIST_HEAD (ra_list); 3460 LIST_HEAD (done_list); 3461 3462 #define XLOG_RECOVER_COMMIT_QUEUE_MAX 100 3463 3464 hlist_del(&trans->r_list); 3465 3466 error = xlog_recover_reorder_trans(log, trans, pass); 3467 if (error) 3468 return error; 3469 3470 list_for_each_entry_safe(item, next, &trans->r_itemq, ri_list) { 3471 switch (pass) { 3472 case XLOG_RECOVER_PASS1: 3473 error = xlog_recover_commit_pass1(log, trans, item); 3474 break; 3475 case XLOG_RECOVER_PASS2: 3476 xlog_recover_ra_pass2(log, item); 3477 list_move_tail(&item->ri_list, &ra_list); 3478 items_queued++; 3479 if (items_queued >= XLOG_RECOVER_COMMIT_QUEUE_MAX) { 3480 error = xlog_recover_items_pass2(log, trans, 3481 &buffer_list, &ra_list); 3482 list_splice_tail_init(&ra_list, &done_list); 3483 items_queued = 0; 3484 } 3485 3486 break; 3487 default: 3488 ASSERT(0); 3489 } 3490 3491 if (error) 3492 goto out; 3493 } 3494 3495 out: 3496 if (!list_empty(&ra_list)) { 3497 if (!error) 3498 error = xlog_recover_items_pass2(log, trans, 3499 &buffer_list, &ra_list); 3500 list_splice_tail_init(&ra_list, &done_list); 3501 } 3502 3503 if (!list_empty(&done_list)) 3504 list_splice_init(&done_list, &trans->r_itemq); 3505 3506 xlog_recover_free_trans(trans); 3507 3508 error2 = xfs_buf_delwri_submit(&buffer_list); 3509 return error ? error : error2; 3510 } 3511 3512 STATIC int 3513 xlog_recover_unmount_trans( 3514 struct xlog *log, 3515 struct xlog_recover *trans) 3516 { 3517 /* Do nothing now */ 3518 xfs_warn(log->l_mp, "%s: Unmount LR", __func__); 3519 return 0; 3520 } 3521 3522 /* 3523 * There are two valid states of the r_state field. 0 indicates that the 3524 * transaction structure is in a normal state. We have either seen the 3525 * start of the transaction or the last operation we added was not a partial 3526 * operation. If the last operation we added to the transaction was a 3527 * partial operation, we need to mark r_state with XLOG_WAS_CONT_TRANS. 3528 * 3529 * NOTE: skip LRs with 0 data length. 3530 */ 3531 STATIC int 3532 xlog_recover_process_data( 3533 struct xlog *log, 3534 struct hlist_head rhash[], 3535 struct xlog_rec_header *rhead, 3536 xfs_caddr_t dp, 3537 int pass) 3538 { 3539 xfs_caddr_t lp; 3540 int num_logops; 3541 xlog_op_header_t *ohead; 3542 xlog_recover_t *trans; 3543 xlog_tid_t tid; 3544 int error; 3545 unsigned long hash; 3546 uint flags; 3547 3548 lp = dp + be32_to_cpu(rhead->h_len); 3549 num_logops = be32_to_cpu(rhead->h_num_logops); 3550 3551 /* check the log format matches our own - else we can't recover */ 3552 if (xlog_header_check_recover(log->l_mp, rhead)) 3553 return (XFS_ERROR(EIO)); 3554 3555 while ((dp < lp) && num_logops) { 3556 ASSERT(dp + sizeof(xlog_op_header_t) <= lp); 3557 ohead = (xlog_op_header_t *)dp; 3558 dp += sizeof(xlog_op_header_t); 3559 if (ohead->oh_clientid != XFS_TRANSACTION && 3560 ohead->oh_clientid != XFS_LOG) { 3561 xfs_warn(log->l_mp, "%s: bad clientid 0x%x", 3562 __func__, ohead->oh_clientid); 3563 ASSERT(0); 3564 return (XFS_ERROR(EIO)); 3565 } 3566 tid = be32_to_cpu(ohead->oh_tid); 3567 hash = XLOG_RHASH(tid); 3568 trans = xlog_recover_find_tid(&rhash[hash], tid); 3569 if (trans == NULL) { /* not found; add new tid */ 3570 if (ohead->oh_flags & XLOG_START_TRANS) 3571 xlog_recover_new_tid(&rhash[hash], tid, 3572 be64_to_cpu(rhead->h_lsn)); 3573 } else { 3574 if (dp + be32_to_cpu(ohead->oh_len) > lp) { 3575 xfs_warn(log->l_mp, "%s: bad length 0x%x", 3576 __func__, be32_to_cpu(ohead->oh_len)); 3577 WARN_ON(1); 3578 return (XFS_ERROR(EIO)); 3579 } 3580 flags = ohead->oh_flags & ~XLOG_END_TRANS; 3581 if (flags & XLOG_WAS_CONT_TRANS) 3582 flags &= ~XLOG_CONTINUE_TRANS; 3583 switch (flags) { 3584 case XLOG_COMMIT_TRANS: 3585 error = xlog_recover_commit_trans(log, 3586 trans, pass); 3587 break; 3588 case XLOG_UNMOUNT_TRANS: 3589 error = xlog_recover_unmount_trans(log, trans); 3590 break; 3591 case XLOG_WAS_CONT_TRANS: 3592 error = xlog_recover_add_to_cont_trans(log, 3593 trans, dp, 3594 be32_to_cpu(ohead->oh_len)); 3595 break; 3596 case XLOG_START_TRANS: 3597 xfs_warn(log->l_mp, "%s: bad transaction", 3598 __func__); 3599 ASSERT(0); 3600 error = XFS_ERROR(EIO); 3601 break; 3602 case 0: 3603 case XLOG_CONTINUE_TRANS: 3604 error = xlog_recover_add_to_trans(log, trans, 3605 dp, be32_to_cpu(ohead->oh_len)); 3606 break; 3607 default: 3608 xfs_warn(log->l_mp, "%s: bad flag 0x%x", 3609 __func__, flags); 3610 ASSERT(0); 3611 error = XFS_ERROR(EIO); 3612 break; 3613 } 3614 if (error) 3615 return error; 3616 } 3617 dp += be32_to_cpu(ohead->oh_len); 3618 num_logops--; 3619 } 3620 return 0; 3621 } 3622 3623 /* 3624 * Process an extent free intent item that was recovered from 3625 * the log. We need to free the extents that it describes. 3626 */ 3627 STATIC int 3628 xlog_recover_process_efi( 3629 xfs_mount_t *mp, 3630 xfs_efi_log_item_t *efip) 3631 { 3632 xfs_efd_log_item_t *efdp; 3633 xfs_trans_t *tp; 3634 int i; 3635 int error = 0; 3636 xfs_extent_t *extp; 3637 xfs_fsblock_t startblock_fsb; 3638 3639 ASSERT(!test_bit(XFS_EFI_RECOVERED, &efip->efi_flags)); 3640 3641 /* 3642 * First check the validity of the extents described by the 3643 * EFI. If any are bad, then assume that all are bad and 3644 * just toss the EFI. 3645 */ 3646 for (i = 0; i < efip->efi_format.efi_nextents; i++) { 3647 extp = &(efip->efi_format.efi_extents[i]); 3648 startblock_fsb = XFS_BB_TO_FSB(mp, 3649 XFS_FSB_TO_DADDR(mp, extp->ext_start)); 3650 if ((startblock_fsb == 0) || 3651 (extp->ext_len == 0) || 3652 (startblock_fsb >= mp->m_sb.sb_dblocks) || 3653 (extp->ext_len >= mp->m_sb.sb_agblocks)) { 3654 /* 3655 * This will pull the EFI from the AIL and 3656 * free the memory associated with it. 3657 */ 3658 set_bit(XFS_EFI_RECOVERED, &efip->efi_flags); 3659 xfs_efi_release(efip, efip->efi_format.efi_nextents); 3660 return XFS_ERROR(EIO); 3661 } 3662 } 3663 3664 tp = xfs_trans_alloc(mp, 0); 3665 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0); 3666 if (error) 3667 goto abort_error; 3668 efdp = xfs_trans_get_efd(tp, efip, efip->efi_format.efi_nextents); 3669 3670 for (i = 0; i < efip->efi_format.efi_nextents; i++) { 3671 extp = &(efip->efi_format.efi_extents[i]); 3672 error = xfs_free_extent(tp, extp->ext_start, extp->ext_len); 3673 if (error) 3674 goto abort_error; 3675 xfs_trans_log_efd_extent(tp, efdp, extp->ext_start, 3676 extp->ext_len); 3677 } 3678 3679 set_bit(XFS_EFI_RECOVERED, &efip->efi_flags); 3680 error = xfs_trans_commit(tp, 0); 3681 return error; 3682 3683 abort_error: 3684 xfs_trans_cancel(tp, XFS_TRANS_ABORT); 3685 return error; 3686 } 3687 3688 /* 3689 * When this is called, all of the EFIs which did not have 3690 * corresponding EFDs should be in the AIL. What we do now 3691 * is free the extents associated with each one. 3692 * 3693 * Since we process the EFIs in normal transactions, they 3694 * will be removed at some point after the commit. This prevents 3695 * us from just walking down the list processing each one. 3696 * We'll use a flag in the EFI to skip those that we've already 3697 * processed and use the AIL iteration mechanism's generation 3698 * count to try to speed this up at least a bit. 3699 * 3700 * When we start, we know that the EFIs are the only things in 3701 * the AIL. As we process them, however, other items are added 3702 * to the AIL. Since everything added to the AIL must come after 3703 * everything already in the AIL, we stop processing as soon as 3704 * we see something other than an EFI in the AIL. 3705 */ 3706 STATIC int 3707 xlog_recover_process_efis( 3708 struct xlog *log) 3709 { 3710 xfs_log_item_t *lip; 3711 xfs_efi_log_item_t *efip; 3712 int error = 0; 3713 struct xfs_ail_cursor cur; 3714 struct xfs_ail *ailp; 3715 3716 ailp = log->l_ailp; 3717 spin_lock(&ailp->xa_lock); 3718 lip = xfs_trans_ail_cursor_first(ailp, &cur, 0); 3719 while (lip != NULL) { 3720 /* 3721 * We're done when we see something other than an EFI. 3722 * There should be no EFIs left in the AIL now. 3723 */ 3724 if (lip->li_type != XFS_LI_EFI) { 3725 #ifdef DEBUG 3726 for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur)) 3727 ASSERT(lip->li_type != XFS_LI_EFI); 3728 #endif 3729 break; 3730 } 3731 3732 /* 3733 * Skip EFIs that we've already processed. 3734 */ 3735 efip = (xfs_efi_log_item_t *)lip; 3736 if (test_bit(XFS_EFI_RECOVERED, &efip->efi_flags)) { 3737 lip = xfs_trans_ail_cursor_next(ailp, &cur); 3738 continue; 3739 } 3740 3741 spin_unlock(&ailp->xa_lock); 3742 error = xlog_recover_process_efi(log->l_mp, efip); 3743 spin_lock(&ailp->xa_lock); 3744 if (error) 3745 goto out; 3746 lip = xfs_trans_ail_cursor_next(ailp, &cur); 3747 } 3748 out: 3749 xfs_trans_ail_cursor_done(ailp, &cur); 3750 spin_unlock(&ailp->xa_lock); 3751 return error; 3752 } 3753 3754 /* 3755 * This routine performs a transaction to null out a bad inode pointer 3756 * in an agi unlinked inode hash bucket. 3757 */ 3758 STATIC void 3759 xlog_recover_clear_agi_bucket( 3760 xfs_mount_t *mp, 3761 xfs_agnumber_t agno, 3762 int bucket) 3763 { 3764 xfs_trans_t *tp; 3765 xfs_agi_t *agi; 3766 xfs_buf_t *agibp; 3767 int offset; 3768 int error; 3769 3770 tp = xfs_trans_alloc(mp, XFS_TRANS_CLEAR_AGI_BUCKET); 3771 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_clearagi, 0, 0); 3772 if (error) 3773 goto out_abort; 3774 3775 error = xfs_read_agi(mp, tp, agno, &agibp); 3776 if (error) 3777 goto out_abort; 3778 3779 agi = XFS_BUF_TO_AGI(agibp); 3780 agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO); 3781 offset = offsetof(xfs_agi_t, agi_unlinked) + 3782 (sizeof(xfs_agino_t) * bucket); 3783 xfs_trans_log_buf(tp, agibp, offset, 3784 (offset + sizeof(xfs_agino_t) - 1)); 3785 3786 error = xfs_trans_commit(tp, 0); 3787 if (error) 3788 goto out_error; 3789 return; 3790 3791 out_abort: 3792 xfs_trans_cancel(tp, XFS_TRANS_ABORT); 3793 out_error: 3794 xfs_warn(mp, "%s: failed to clear agi %d. Continuing.", __func__, agno); 3795 return; 3796 } 3797 3798 STATIC xfs_agino_t 3799 xlog_recover_process_one_iunlink( 3800 struct xfs_mount *mp, 3801 xfs_agnumber_t agno, 3802 xfs_agino_t agino, 3803 int bucket) 3804 { 3805 struct xfs_buf *ibp; 3806 struct xfs_dinode *dip; 3807 struct xfs_inode *ip; 3808 xfs_ino_t ino; 3809 int error; 3810 3811 ino = XFS_AGINO_TO_INO(mp, agno, agino); 3812 error = xfs_iget(mp, NULL, ino, 0, 0, &ip); 3813 if (error) 3814 goto fail; 3815 3816 /* 3817 * Get the on disk inode to find the next inode in the bucket. 3818 */ 3819 error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &ibp, 0, 0); 3820 if (error) 3821 goto fail_iput; 3822 3823 ASSERT(ip->i_d.di_nlink == 0); 3824 ASSERT(ip->i_d.di_mode != 0); 3825 3826 /* setup for the next pass */ 3827 agino = be32_to_cpu(dip->di_next_unlinked); 3828 xfs_buf_relse(ibp); 3829 3830 /* 3831 * Prevent any DMAPI event from being sent when the reference on 3832 * the inode is dropped. 3833 */ 3834 ip->i_d.di_dmevmask = 0; 3835 3836 IRELE(ip); 3837 return agino; 3838 3839 fail_iput: 3840 IRELE(ip); 3841 fail: 3842 /* 3843 * We can't read in the inode this bucket points to, or this inode 3844 * is messed up. Just ditch this bucket of inodes. We will lose 3845 * some inodes and space, but at least we won't hang. 3846 * 3847 * Call xlog_recover_clear_agi_bucket() to perform a transaction to 3848 * clear the inode pointer in the bucket. 3849 */ 3850 xlog_recover_clear_agi_bucket(mp, agno, bucket); 3851 return NULLAGINO; 3852 } 3853 3854 /* 3855 * xlog_iunlink_recover 3856 * 3857 * This is called during recovery to process any inodes which 3858 * we unlinked but not freed when the system crashed. These 3859 * inodes will be on the lists in the AGI blocks. What we do 3860 * here is scan all the AGIs and fully truncate and free any 3861 * inodes found on the lists. Each inode is removed from the 3862 * lists when it has been fully truncated and is freed. The 3863 * freeing of the inode and its removal from the list must be 3864 * atomic. 3865 */ 3866 STATIC void 3867 xlog_recover_process_iunlinks( 3868 struct xlog *log) 3869 { 3870 xfs_mount_t *mp; 3871 xfs_agnumber_t agno; 3872 xfs_agi_t *agi; 3873 xfs_buf_t *agibp; 3874 xfs_agino_t agino; 3875 int bucket; 3876 int error; 3877 uint mp_dmevmask; 3878 3879 mp = log->l_mp; 3880 3881 /* 3882 * Prevent any DMAPI event from being sent while in this function. 3883 */ 3884 mp_dmevmask = mp->m_dmevmask; 3885 mp->m_dmevmask = 0; 3886 3887 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) { 3888 /* 3889 * Find the agi for this ag. 3890 */ 3891 error = xfs_read_agi(mp, NULL, agno, &agibp); 3892 if (error) { 3893 /* 3894 * AGI is b0rked. Don't process it. 3895 * 3896 * We should probably mark the filesystem as corrupt 3897 * after we've recovered all the ag's we can.... 3898 */ 3899 continue; 3900 } 3901 /* 3902 * Unlock the buffer so that it can be acquired in the normal 3903 * course of the transaction to truncate and free each inode. 3904 * Because we are not racing with anyone else here for the AGI 3905 * buffer, we don't even need to hold it locked to read the 3906 * initial unlinked bucket entries out of the buffer. We keep 3907 * buffer reference though, so that it stays pinned in memory 3908 * while we need the buffer. 3909 */ 3910 agi = XFS_BUF_TO_AGI(agibp); 3911 xfs_buf_unlock(agibp); 3912 3913 for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) { 3914 agino = be32_to_cpu(agi->agi_unlinked[bucket]); 3915 while (agino != NULLAGINO) { 3916 agino = xlog_recover_process_one_iunlink(mp, 3917 agno, agino, bucket); 3918 } 3919 } 3920 xfs_buf_rele(agibp); 3921 } 3922 3923 mp->m_dmevmask = mp_dmevmask; 3924 } 3925 3926 /* 3927 * Upack the log buffer data and crc check it. If the check fails, issue a 3928 * warning if and only if the CRC in the header is non-zero. This makes the 3929 * check an advisory warning, and the zero CRC check will prevent failure 3930 * warnings from being emitted when upgrading the kernel from one that does not 3931 * add CRCs by default. 3932 * 3933 * When filesystems are CRC enabled, this CRC mismatch becomes a fatal log 3934 * corruption failure 3935 */ 3936 STATIC int 3937 xlog_unpack_data_crc( 3938 struct xlog_rec_header *rhead, 3939 xfs_caddr_t dp, 3940 struct xlog *log) 3941 { 3942 __le32 crc; 3943 3944 crc = xlog_cksum(log, rhead, dp, be32_to_cpu(rhead->h_len)); 3945 if (crc != rhead->h_crc) { 3946 if (rhead->h_crc || xfs_sb_version_hascrc(&log->l_mp->m_sb)) { 3947 xfs_alert(log->l_mp, 3948 "log record CRC mismatch: found 0x%x, expected 0x%x.", 3949 le32_to_cpu(rhead->h_crc), 3950 le32_to_cpu(crc)); 3951 xfs_hex_dump(dp, 32); 3952 } 3953 3954 /* 3955 * If we've detected a log record corruption, then we can't 3956 * recover past this point. Abort recovery if we are enforcing 3957 * CRC protection by punting an error back up the stack. 3958 */ 3959 if (xfs_sb_version_hascrc(&log->l_mp->m_sb)) 3960 return EFSCORRUPTED; 3961 } 3962 3963 return 0; 3964 } 3965 3966 STATIC int 3967 xlog_unpack_data( 3968 struct xlog_rec_header *rhead, 3969 xfs_caddr_t dp, 3970 struct xlog *log) 3971 { 3972 int i, j, k; 3973 int error; 3974 3975 error = xlog_unpack_data_crc(rhead, dp, log); 3976 if (error) 3977 return error; 3978 3979 for (i = 0; i < BTOBB(be32_to_cpu(rhead->h_len)) && 3980 i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) { 3981 *(__be32 *)dp = *(__be32 *)&rhead->h_cycle_data[i]; 3982 dp += BBSIZE; 3983 } 3984 3985 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) { 3986 xlog_in_core_2_t *xhdr = (xlog_in_core_2_t *)rhead; 3987 for ( ; i < BTOBB(be32_to_cpu(rhead->h_len)); i++) { 3988 j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE); 3989 k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE); 3990 *(__be32 *)dp = xhdr[j].hic_xheader.xh_cycle_data[k]; 3991 dp += BBSIZE; 3992 } 3993 } 3994 3995 return 0; 3996 } 3997 3998 STATIC int 3999 xlog_valid_rec_header( 4000 struct xlog *log, 4001 struct xlog_rec_header *rhead, 4002 xfs_daddr_t blkno) 4003 { 4004 int hlen; 4005 4006 if (unlikely(rhead->h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM))) { 4007 XFS_ERROR_REPORT("xlog_valid_rec_header(1)", 4008 XFS_ERRLEVEL_LOW, log->l_mp); 4009 return XFS_ERROR(EFSCORRUPTED); 4010 } 4011 if (unlikely( 4012 (!rhead->h_version || 4013 (be32_to_cpu(rhead->h_version) & (~XLOG_VERSION_OKBITS))))) { 4014 xfs_warn(log->l_mp, "%s: unrecognised log version (%d).", 4015 __func__, be32_to_cpu(rhead->h_version)); 4016 return XFS_ERROR(EIO); 4017 } 4018 4019 /* LR body must have data or it wouldn't have been written */ 4020 hlen = be32_to_cpu(rhead->h_len); 4021 if (unlikely( hlen <= 0 || hlen > INT_MAX )) { 4022 XFS_ERROR_REPORT("xlog_valid_rec_header(2)", 4023 XFS_ERRLEVEL_LOW, log->l_mp); 4024 return XFS_ERROR(EFSCORRUPTED); 4025 } 4026 if (unlikely( blkno > log->l_logBBsize || blkno > INT_MAX )) { 4027 XFS_ERROR_REPORT("xlog_valid_rec_header(3)", 4028 XFS_ERRLEVEL_LOW, log->l_mp); 4029 return XFS_ERROR(EFSCORRUPTED); 4030 } 4031 return 0; 4032 } 4033 4034 /* 4035 * Read the log from tail to head and process the log records found. 4036 * Handle the two cases where the tail and head are in the same cycle 4037 * and where the active portion of the log wraps around the end of 4038 * the physical log separately. The pass parameter is passed through 4039 * to the routines called to process the data and is not looked at 4040 * here. 4041 */ 4042 STATIC int 4043 xlog_do_recovery_pass( 4044 struct xlog *log, 4045 xfs_daddr_t head_blk, 4046 xfs_daddr_t tail_blk, 4047 int pass) 4048 { 4049 xlog_rec_header_t *rhead; 4050 xfs_daddr_t blk_no; 4051 xfs_caddr_t offset; 4052 xfs_buf_t *hbp, *dbp; 4053 int error = 0, h_size; 4054 int bblks, split_bblks; 4055 int hblks, split_hblks, wrapped_hblks; 4056 struct hlist_head rhash[XLOG_RHASH_SIZE]; 4057 4058 ASSERT(head_blk != tail_blk); 4059 4060 /* 4061 * Read the header of the tail block and get the iclog buffer size from 4062 * h_size. Use this to tell how many sectors make up the log header. 4063 */ 4064 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) { 4065 /* 4066 * When using variable length iclogs, read first sector of 4067 * iclog header and extract the header size from it. Get a 4068 * new hbp that is the correct size. 4069 */ 4070 hbp = xlog_get_bp(log, 1); 4071 if (!hbp) 4072 return ENOMEM; 4073 4074 error = xlog_bread(log, tail_blk, 1, hbp, &offset); 4075 if (error) 4076 goto bread_err1; 4077 4078 rhead = (xlog_rec_header_t *)offset; 4079 error = xlog_valid_rec_header(log, rhead, tail_blk); 4080 if (error) 4081 goto bread_err1; 4082 h_size = be32_to_cpu(rhead->h_size); 4083 if ((be32_to_cpu(rhead->h_version) & XLOG_VERSION_2) && 4084 (h_size > XLOG_HEADER_CYCLE_SIZE)) { 4085 hblks = h_size / XLOG_HEADER_CYCLE_SIZE; 4086 if (h_size % XLOG_HEADER_CYCLE_SIZE) 4087 hblks++; 4088 xlog_put_bp(hbp); 4089 hbp = xlog_get_bp(log, hblks); 4090 } else { 4091 hblks = 1; 4092 } 4093 } else { 4094 ASSERT(log->l_sectBBsize == 1); 4095 hblks = 1; 4096 hbp = xlog_get_bp(log, 1); 4097 h_size = XLOG_BIG_RECORD_BSIZE; 4098 } 4099 4100 if (!hbp) 4101 return ENOMEM; 4102 dbp = xlog_get_bp(log, BTOBB(h_size)); 4103 if (!dbp) { 4104 xlog_put_bp(hbp); 4105 return ENOMEM; 4106 } 4107 4108 memset(rhash, 0, sizeof(rhash)); 4109 if (tail_blk <= head_blk) { 4110 for (blk_no = tail_blk; blk_no < head_blk; ) { 4111 error = xlog_bread(log, blk_no, hblks, hbp, &offset); 4112 if (error) 4113 goto bread_err2; 4114 4115 rhead = (xlog_rec_header_t *)offset; 4116 error = xlog_valid_rec_header(log, rhead, blk_no); 4117 if (error) 4118 goto bread_err2; 4119 4120 /* blocks in data section */ 4121 bblks = (int)BTOBB(be32_to_cpu(rhead->h_len)); 4122 error = xlog_bread(log, blk_no + hblks, bblks, dbp, 4123 &offset); 4124 if (error) 4125 goto bread_err2; 4126 4127 error = xlog_unpack_data(rhead, offset, log); 4128 if (error) 4129 goto bread_err2; 4130 4131 error = xlog_recover_process_data(log, 4132 rhash, rhead, offset, pass); 4133 if (error) 4134 goto bread_err2; 4135 blk_no += bblks + hblks; 4136 } 4137 } else { 4138 /* 4139 * Perform recovery around the end of the physical log. 4140 * When the head is not on the same cycle number as the tail, 4141 * we can't do a sequential recovery as above. 4142 */ 4143 blk_no = tail_blk; 4144 while (blk_no < log->l_logBBsize) { 4145 /* 4146 * Check for header wrapping around physical end-of-log 4147 */ 4148 offset = hbp->b_addr; 4149 split_hblks = 0; 4150 wrapped_hblks = 0; 4151 if (blk_no + hblks <= log->l_logBBsize) { 4152 /* Read header in one read */ 4153 error = xlog_bread(log, blk_no, hblks, hbp, 4154 &offset); 4155 if (error) 4156 goto bread_err2; 4157 } else { 4158 /* This LR is split across physical log end */ 4159 if (blk_no != log->l_logBBsize) { 4160 /* some data before physical log end */ 4161 ASSERT(blk_no <= INT_MAX); 4162 split_hblks = log->l_logBBsize - (int)blk_no; 4163 ASSERT(split_hblks > 0); 4164 error = xlog_bread(log, blk_no, 4165 split_hblks, hbp, 4166 &offset); 4167 if (error) 4168 goto bread_err2; 4169 } 4170 4171 /* 4172 * Note: this black magic still works with 4173 * large sector sizes (non-512) only because: 4174 * - we increased the buffer size originally 4175 * by 1 sector giving us enough extra space 4176 * for the second read; 4177 * - the log start is guaranteed to be sector 4178 * aligned; 4179 * - we read the log end (LR header start) 4180 * _first_, then the log start (LR header end) 4181 * - order is important. 4182 */ 4183 wrapped_hblks = hblks - split_hblks; 4184 error = xlog_bread_offset(log, 0, 4185 wrapped_hblks, hbp, 4186 offset + BBTOB(split_hblks)); 4187 if (error) 4188 goto bread_err2; 4189 } 4190 rhead = (xlog_rec_header_t *)offset; 4191 error = xlog_valid_rec_header(log, rhead, 4192 split_hblks ? blk_no : 0); 4193 if (error) 4194 goto bread_err2; 4195 4196 bblks = (int)BTOBB(be32_to_cpu(rhead->h_len)); 4197 blk_no += hblks; 4198 4199 /* Read in data for log record */ 4200 if (blk_no + bblks <= log->l_logBBsize) { 4201 error = xlog_bread(log, blk_no, bblks, dbp, 4202 &offset); 4203 if (error) 4204 goto bread_err2; 4205 } else { 4206 /* This log record is split across the 4207 * physical end of log */ 4208 offset = dbp->b_addr; 4209 split_bblks = 0; 4210 if (blk_no != log->l_logBBsize) { 4211 /* some data is before the physical 4212 * end of log */ 4213 ASSERT(!wrapped_hblks); 4214 ASSERT(blk_no <= INT_MAX); 4215 split_bblks = 4216 log->l_logBBsize - (int)blk_no; 4217 ASSERT(split_bblks > 0); 4218 error = xlog_bread(log, blk_no, 4219 split_bblks, dbp, 4220 &offset); 4221 if (error) 4222 goto bread_err2; 4223 } 4224 4225 /* 4226 * Note: this black magic still works with 4227 * large sector sizes (non-512) only because: 4228 * - we increased the buffer size originally 4229 * by 1 sector giving us enough extra space 4230 * for the second read; 4231 * - the log start is guaranteed to be sector 4232 * aligned; 4233 * - we read the log end (LR header start) 4234 * _first_, then the log start (LR header end) 4235 * - order is important. 4236 */ 4237 error = xlog_bread_offset(log, 0, 4238 bblks - split_bblks, dbp, 4239 offset + BBTOB(split_bblks)); 4240 if (error) 4241 goto bread_err2; 4242 } 4243 4244 error = xlog_unpack_data(rhead, offset, log); 4245 if (error) 4246 goto bread_err2; 4247 4248 error = xlog_recover_process_data(log, rhash, 4249 rhead, offset, pass); 4250 if (error) 4251 goto bread_err2; 4252 blk_no += bblks; 4253 } 4254 4255 ASSERT(blk_no >= log->l_logBBsize); 4256 blk_no -= log->l_logBBsize; 4257 4258 /* read first part of physical log */ 4259 while (blk_no < head_blk) { 4260 error = xlog_bread(log, blk_no, hblks, hbp, &offset); 4261 if (error) 4262 goto bread_err2; 4263 4264 rhead = (xlog_rec_header_t *)offset; 4265 error = xlog_valid_rec_header(log, rhead, blk_no); 4266 if (error) 4267 goto bread_err2; 4268 4269 bblks = (int)BTOBB(be32_to_cpu(rhead->h_len)); 4270 error = xlog_bread(log, blk_no+hblks, bblks, dbp, 4271 &offset); 4272 if (error) 4273 goto bread_err2; 4274 4275 error = xlog_unpack_data(rhead, offset, log); 4276 if (error) 4277 goto bread_err2; 4278 4279 error = xlog_recover_process_data(log, rhash, 4280 rhead, offset, pass); 4281 if (error) 4282 goto bread_err2; 4283 blk_no += bblks + hblks; 4284 } 4285 } 4286 4287 bread_err2: 4288 xlog_put_bp(dbp); 4289 bread_err1: 4290 xlog_put_bp(hbp); 4291 return error; 4292 } 4293 4294 /* 4295 * Do the recovery of the log. We actually do this in two phases. 4296 * The two passes are necessary in order to implement the function 4297 * of cancelling a record written into the log. The first pass 4298 * determines those things which have been cancelled, and the 4299 * second pass replays log items normally except for those which 4300 * have been cancelled. The handling of the replay and cancellations 4301 * takes place in the log item type specific routines. 4302 * 4303 * The table of items which have cancel records in the log is allocated 4304 * and freed at this level, since only here do we know when all of 4305 * the log recovery has been completed. 4306 */ 4307 STATIC int 4308 xlog_do_log_recovery( 4309 struct xlog *log, 4310 xfs_daddr_t head_blk, 4311 xfs_daddr_t tail_blk) 4312 { 4313 int error, i; 4314 4315 ASSERT(head_blk != tail_blk); 4316 4317 /* 4318 * First do a pass to find all of the cancelled buf log items. 4319 * Store them in the buf_cancel_table for use in the second pass. 4320 */ 4321 log->l_buf_cancel_table = kmem_zalloc(XLOG_BC_TABLE_SIZE * 4322 sizeof(struct list_head), 4323 KM_SLEEP); 4324 for (i = 0; i < XLOG_BC_TABLE_SIZE; i++) 4325 INIT_LIST_HEAD(&log->l_buf_cancel_table[i]); 4326 4327 error = xlog_do_recovery_pass(log, head_blk, tail_blk, 4328 XLOG_RECOVER_PASS1); 4329 if (error != 0) { 4330 kmem_free(log->l_buf_cancel_table); 4331 log->l_buf_cancel_table = NULL; 4332 return error; 4333 } 4334 /* 4335 * Then do a second pass to actually recover the items in the log. 4336 * When it is complete free the table of buf cancel items. 4337 */ 4338 error = xlog_do_recovery_pass(log, head_blk, tail_blk, 4339 XLOG_RECOVER_PASS2); 4340 #ifdef DEBUG 4341 if (!error) { 4342 int i; 4343 4344 for (i = 0; i < XLOG_BC_TABLE_SIZE; i++) 4345 ASSERT(list_empty(&log->l_buf_cancel_table[i])); 4346 } 4347 #endif /* DEBUG */ 4348 4349 kmem_free(log->l_buf_cancel_table); 4350 log->l_buf_cancel_table = NULL; 4351 4352 return error; 4353 } 4354 4355 /* 4356 * Do the actual recovery 4357 */ 4358 STATIC int 4359 xlog_do_recover( 4360 struct xlog *log, 4361 xfs_daddr_t head_blk, 4362 xfs_daddr_t tail_blk) 4363 { 4364 int error; 4365 xfs_buf_t *bp; 4366 xfs_sb_t *sbp; 4367 4368 /* 4369 * First replay the images in the log. 4370 */ 4371 error = xlog_do_log_recovery(log, head_blk, tail_blk); 4372 if (error) 4373 return error; 4374 4375 /* 4376 * If IO errors happened during recovery, bail out. 4377 */ 4378 if (XFS_FORCED_SHUTDOWN(log->l_mp)) { 4379 return (EIO); 4380 } 4381 4382 /* 4383 * We now update the tail_lsn since much of the recovery has completed 4384 * and there may be space available to use. If there were no extent 4385 * or iunlinks, we can free up the entire log and set the tail_lsn to 4386 * be the last_sync_lsn. This was set in xlog_find_tail to be the 4387 * lsn of the last known good LR on disk. If there are extent frees 4388 * or iunlinks they will have some entries in the AIL; so we look at 4389 * the AIL to determine how to set the tail_lsn. 4390 */ 4391 xlog_assign_tail_lsn(log->l_mp); 4392 4393 /* 4394 * Now that we've finished replaying all buffer and inode 4395 * updates, re-read in the superblock and reverify it. 4396 */ 4397 bp = xfs_getsb(log->l_mp, 0); 4398 XFS_BUF_UNDONE(bp); 4399 ASSERT(!(XFS_BUF_ISWRITE(bp))); 4400 XFS_BUF_READ(bp); 4401 XFS_BUF_UNASYNC(bp); 4402 bp->b_ops = &xfs_sb_buf_ops; 4403 4404 if (XFS_FORCED_SHUTDOWN(log->l_mp)) { 4405 xfs_buf_relse(bp); 4406 return XFS_ERROR(EIO); 4407 } 4408 4409 xfs_buf_iorequest(bp); 4410 error = xfs_buf_iowait(bp); 4411 if (error) { 4412 xfs_buf_ioerror_alert(bp, __func__); 4413 ASSERT(0); 4414 xfs_buf_relse(bp); 4415 return error; 4416 } 4417 4418 /* Convert superblock from on-disk format */ 4419 sbp = &log->l_mp->m_sb; 4420 xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(bp)); 4421 ASSERT(sbp->sb_magicnum == XFS_SB_MAGIC); 4422 ASSERT(xfs_sb_good_version(sbp)); 4423 xfs_buf_relse(bp); 4424 4425 /* We've re-read the superblock so re-initialize per-cpu counters */ 4426 xfs_icsb_reinit_counters(log->l_mp); 4427 4428 xlog_recover_check_summary(log); 4429 4430 /* Normal transactions can now occur */ 4431 log->l_flags &= ~XLOG_ACTIVE_RECOVERY; 4432 return 0; 4433 } 4434 4435 /* 4436 * Perform recovery and re-initialize some log variables in xlog_find_tail. 4437 * 4438 * Return error or zero. 4439 */ 4440 int 4441 xlog_recover( 4442 struct xlog *log) 4443 { 4444 xfs_daddr_t head_blk, tail_blk; 4445 int error; 4446 4447 /* find the tail of the log */ 4448 if ((error = xlog_find_tail(log, &head_blk, &tail_blk))) 4449 return error; 4450 4451 if (tail_blk != head_blk) { 4452 /* There used to be a comment here: 4453 * 4454 * disallow recovery on read-only mounts. note -- mount 4455 * checks for ENOSPC and turns it into an intelligent 4456 * error message. 4457 * ...but this is no longer true. Now, unless you specify 4458 * NORECOVERY (in which case this function would never be 4459 * called), we just go ahead and recover. We do this all 4460 * under the vfs layer, so we can get away with it unless 4461 * the device itself is read-only, in which case we fail. 4462 */ 4463 if ((error = xfs_dev_is_read_only(log->l_mp, "recovery"))) { 4464 return error; 4465 } 4466 4467 /* 4468 * Version 5 superblock log feature mask validation. We know the 4469 * log is dirty so check if there are any unknown log features 4470 * in what we need to recover. If there are unknown features 4471 * (e.g. unsupported transactions, then simply reject the 4472 * attempt at recovery before touching anything. 4473 */ 4474 if (XFS_SB_VERSION_NUM(&log->l_mp->m_sb) == XFS_SB_VERSION_5 && 4475 xfs_sb_has_incompat_log_feature(&log->l_mp->m_sb, 4476 XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN)) { 4477 xfs_warn(log->l_mp, 4478 "Superblock has unknown incompatible log features (0x%x) enabled.\n" 4479 "The log can not be fully and/or safely recovered by this kernel.\n" 4480 "Please recover the log on a kernel that supports the unknown features.", 4481 (log->l_mp->m_sb.sb_features_log_incompat & 4482 XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN)); 4483 return EINVAL; 4484 } 4485 4486 xfs_notice(log->l_mp, "Starting recovery (logdev: %s)", 4487 log->l_mp->m_logname ? log->l_mp->m_logname 4488 : "internal"); 4489 4490 error = xlog_do_recover(log, head_blk, tail_blk); 4491 log->l_flags |= XLOG_RECOVERY_NEEDED; 4492 } 4493 return error; 4494 } 4495 4496 /* 4497 * In the first part of recovery we replay inodes and buffers and build 4498 * up the list of extent free items which need to be processed. Here 4499 * we process the extent free items and clean up the on disk unlinked 4500 * inode lists. This is separated from the first part of recovery so 4501 * that the root and real-time bitmap inodes can be read in from disk in 4502 * between the two stages. This is necessary so that we can free space 4503 * in the real-time portion of the file system. 4504 */ 4505 int 4506 xlog_recover_finish( 4507 struct xlog *log) 4508 { 4509 /* 4510 * Now we're ready to do the transactions needed for the 4511 * rest of recovery. Start with completing all the extent 4512 * free intent records and then process the unlinked inode 4513 * lists. At this point, we essentially run in normal mode 4514 * except that we're still performing recovery actions 4515 * rather than accepting new requests. 4516 */ 4517 if (log->l_flags & XLOG_RECOVERY_NEEDED) { 4518 int error; 4519 error = xlog_recover_process_efis(log); 4520 if (error) { 4521 xfs_alert(log->l_mp, "Failed to recover EFIs"); 4522 return error; 4523 } 4524 /* 4525 * Sync the log to get all the EFIs out of the AIL. 4526 * This isn't absolutely necessary, but it helps in 4527 * case the unlink transactions would have problems 4528 * pushing the EFIs out of the way. 4529 */ 4530 xfs_log_force(log->l_mp, XFS_LOG_SYNC); 4531 4532 xlog_recover_process_iunlinks(log); 4533 4534 xlog_recover_check_summary(log); 4535 4536 xfs_notice(log->l_mp, "Ending recovery (logdev: %s)", 4537 log->l_mp->m_logname ? log->l_mp->m_logname 4538 : "internal"); 4539 log->l_flags &= ~XLOG_RECOVERY_NEEDED; 4540 } else { 4541 xfs_info(log->l_mp, "Ending clean mount"); 4542 } 4543 return 0; 4544 } 4545 4546 4547 #if defined(DEBUG) 4548 /* 4549 * Read all of the agf and agi counters and check that they 4550 * are consistent with the superblock counters. 4551 */ 4552 void 4553 xlog_recover_check_summary( 4554 struct xlog *log) 4555 { 4556 xfs_mount_t *mp; 4557 xfs_agf_t *agfp; 4558 xfs_buf_t *agfbp; 4559 xfs_buf_t *agibp; 4560 xfs_agnumber_t agno; 4561 __uint64_t freeblks; 4562 __uint64_t itotal; 4563 __uint64_t ifree; 4564 int error; 4565 4566 mp = log->l_mp; 4567 4568 freeblks = 0LL; 4569 itotal = 0LL; 4570 ifree = 0LL; 4571 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) { 4572 error = xfs_read_agf(mp, NULL, agno, 0, &agfbp); 4573 if (error) { 4574 xfs_alert(mp, "%s agf read failed agno %d error %d", 4575 __func__, agno, error); 4576 } else { 4577 agfp = XFS_BUF_TO_AGF(agfbp); 4578 freeblks += be32_to_cpu(agfp->agf_freeblks) + 4579 be32_to_cpu(agfp->agf_flcount); 4580 xfs_buf_relse(agfbp); 4581 } 4582 4583 error = xfs_read_agi(mp, NULL, agno, &agibp); 4584 if (error) { 4585 xfs_alert(mp, "%s agi read failed agno %d error %d", 4586 __func__, agno, error); 4587 } else { 4588 struct xfs_agi *agi = XFS_BUF_TO_AGI(agibp); 4589 4590 itotal += be32_to_cpu(agi->agi_count); 4591 ifree += be32_to_cpu(agi->agi_freecount); 4592 xfs_buf_relse(agibp); 4593 } 4594 } 4595 } 4596 #endif /* DEBUG */ 4597