1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2006 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_log_format.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_bit.h" 13 #include "xfs_sb.h" 14 #include "xfs_mount.h" 15 #include "xfs_defer.h" 16 #include "xfs_inode.h" 17 #include "xfs_trans.h" 18 #include "xfs_log.h" 19 #include "xfs_log_priv.h" 20 #include "xfs_log_recover.h" 21 #include "xfs_trans_priv.h" 22 #include "xfs_alloc.h" 23 #include "xfs_ialloc.h" 24 #include "xfs_trace.h" 25 #include "xfs_icache.h" 26 #include "xfs_error.h" 27 #include "xfs_buf_item.h" 28 29 #define BLK_AVG(blk1, blk2) ((blk1+blk2) >> 1) 30 31 STATIC int 32 xlog_find_zeroed( 33 struct xlog *, 34 xfs_daddr_t *); 35 STATIC int 36 xlog_clear_stale_blocks( 37 struct xlog *, 38 xfs_lsn_t); 39 #if defined(DEBUG) 40 STATIC void 41 xlog_recover_check_summary( 42 struct xlog *); 43 #else 44 #define xlog_recover_check_summary(log) 45 #endif 46 STATIC int 47 xlog_do_recovery_pass( 48 struct xlog *, xfs_daddr_t, xfs_daddr_t, int, xfs_daddr_t *); 49 50 /* 51 * Sector aligned buffer routines for buffer create/read/write/access 52 */ 53 54 /* 55 * Verify the log-relative block number and length in basic blocks are valid for 56 * an operation involving the given XFS log buffer. Returns true if the fields 57 * are valid, false otherwise. 58 */ 59 static inline bool 60 xlog_verify_bno( 61 struct xlog *log, 62 xfs_daddr_t blk_no, 63 int bbcount) 64 { 65 if (blk_no < 0 || blk_no >= log->l_logBBsize) 66 return false; 67 if (bbcount <= 0 || (blk_no + bbcount) > log->l_logBBsize) 68 return false; 69 return true; 70 } 71 72 /* 73 * Allocate a buffer to hold log data. The buffer needs to be able to map to 74 * a range of nbblks basic blocks at any valid offset within the log. 75 */ 76 static char * 77 xlog_alloc_buffer( 78 struct xlog *log, 79 int nbblks) 80 { 81 int align_mask = xfs_buftarg_dma_alignment(log->l_targ); 82 83 /* 84 * Pass log block 0 since we don't have an addr yet, buffer will be 85 * verified on read. 86 */ 87 if (XFS_IS_CORRUPT(log->l_mp, !xlog_verify_bno(log, 0, nbblks))) { 88 xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer", 89 nbblks); 90 return NULL; 91 } 92 93 /* 94 * We do log I/O in units of log sectors (a power-of-2 multiple of the 95 * basic block size), so we round up the requested size to accommodate 96 * the basic blocks required for complete log sectors. 97 * 98 * In addition, the buffer may be used for a non-sector-aligned block 99 * offset, in which case an I/O of the requested size could extend 100 * beyond the end of the buffer. If the requested size is only 1 basic 101 * block it will never straddle a sector boundary, so this won't be an 102 * issue. Nor will this be a problem if the log I/O is done in basic 103 * blocks (sector size 1). But otherwise we extend the buffer by one 104 * extra log sector to ensure there's space to accommodate this 105 * possibility. 106 */ 107 if (nbblks > 1 && log->l_sectBBsize > 1) 108 nbblks += log->l_sectBBsize; 109 nbblks = round_up(nbblks, log->l_sectBBsize); 110 return kmem_alloc_io(BBTOB(nbblks), align_mask, KM_MAYFAIL | KM_ZERO); 111 } 112 113 /* 114 * Return the address of the start of the given block number's data 115 * in a log buffer. The buffer covers a log sector-aligned region. 116 */ 117 static inline unsigned int 118 xlog_align( 119 struct xlog *log, 120 xfs_daddr_t blk_no) 121 { 122 return BBTOB(blk_no & ((xfs_daddr_t)log->l_sectBBsize - 1)); 123 } 124 125 static int 126 xlog_do_io( 127 struct xlog *log, 128 xfs_daddr_t blk_no, 129 unsigned int nbblks, 130 char *data, 131 unsigned int op) 132 { 133 int error; 134 135 if (XFS_IS_CORRUPT(log->l_mp, !xlog_verify_bno(log, blk_no, nbblks))) { 136 xfs_warn(log->l_mp, 137 "Invalid log block/length (0x%llx, 0x%x) for buffer", 138 blk_no, nbblks); 139 return -EFSCORRUPTED; 140 } 141 142 blk_no = round_down(blk_no, log->l_sectBBsize); 143 nbblks = round_up(nbblks, log->l_sectBBsize); 144 ASSERT(nbblks > 0); 145 146 error = xfs_rw_bdev(log->l_targ->bt_bdev, log->l_logBBstart + blk_no, 147 BBTOB(nbblks), data, op); 148 if (error && !XFS_FORCED_SHUTDOWN(log->l_mp)) { 149 xfs_alert(log->l_mp, 150 "log recovery %s I/O error at daddr 0x%llx len %d error %d", 151 op == REQ_OP_WRITE ? "write" : "read", 152 blk_no, nbblks, error); 153 } 154 return error; 155 } 156 157 STATIC int 158 xlog_bread_noalign( 159 struct xlog *log, 160 xfs_daddr_t blk_no, 161 int nbblks, 162 char *data) 163 { 164 return xlog_do_io(log, blk_no, nbblks, data, REQ_OP_READ); 165 } 166 167 STATIC int 168 xlog_bread( 169 struct xlog *log, 170 xfs_daddr_t blk_no, 171 int nbblks, 172 char *data, 173 char **offset) 174 { 175 int error; 176 177 error = xlog_do_io(log, blk_no, nbblks, data, REQ_OP_READ); 178 if (!error) 179 *offset = data + xlog_align(log, blk_no); 180 return error; 181 } 182 183 STATIC int 184 xlog_bwrite( 185 struct xlog *log, 186 xfs_daddr_t blk_no, 187 int nbblks, 188 char *data) 189 { 190 return xlog_do_io(log, blk_no, nbblks, data, REQ_OP_WRITE); 191 } 192 193 #ifdef DEBUG 194 /* 195 * dump debug superblock and log record information 196 */ 197 STATIC void 198 xlog_header_check_dump( 199 xfs_mount_t *mp, 200 xlog_rec_header_t *head) 201 { 202 xfs_debug(mp, "%s: SB : uuid = %pU, fmt = %d", 203 __func__, &mp->m_sb.sb_uuid, XLOG_FMT); 204 xfs_debug(mp, " log : uuid = %pU, fmt = %d", 205 &head->h_fs_uuid, be32_to_cpu(head->h_fmt)); 206 } 207 #else 208 #define xlog_header_check_dump(mp, head) 209 #endif 210 211 /* 212 * check log record header for recovery 213 */ 214 STATIC int 215 xlog_header_check_recover( 216 xfs_mount_t *mp, 217 xlog_rec_header_t *head) 218 { 219 ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)); 220 221 /* 222 * IRIX doesn't write the h_fmt field and leaves it zeroed 223 * (XLOG_FMT_UNKNOWN). This stops us from trying to recover 224 * a dirty log created in IRIX. 225 */ 226 if (XFS_IS_CORRUPT(mp, head->h_fmt != cpu_to_be32(XLOG_FMT))) { 227 xfs_warn(mp, 228 "dirty log written in incompatible format - can't recover"); 229 xlog_header_check_dump(mp, head); 230 return -EFSCORRUPTED; 231 } 232 if (XFS_IS_CORRUPT(mp, !uuid_equal(&mp->m_sb.sb_uuid, 233 &head->h_fs_uuid))) { 234 xfs_warn(mp, 235 "dirty log entry has mismatched uuid - can't recover"); 236 xlog_header_check_dump(mp, head); 237 return -EFSCORRUPTED; 238 } 239 return 0; 240 } 241 242 /* 243 * read the head block of the log and check the header 244 */ 245 STATIC int 246 xlog_header_check_mount( 247 xfs_mount_t *mp, 248 xlog_rec_header_t *head) 249 { 250 ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)); 251 252 if (uuid_is_null(&head->h_fs_uuid)) { 253 /* 254 * IRIX doesn't write the h_fs_uuid or h_fmt fields. If 255 * h_fs_uuid is null, we assume this log was last mounted 256 * by IRIX and continue. 257 */ 258 xfs_warn(mp, "null uuid in log - IRIX style log"); 259 } else if (XFS_IS_CORRUPT(mp, !uuid_equal(&mp->m_sb.sb_uuid, 260 &head->h_fs_uuid))) { 261 xfs_warn(mp, "log has mismatched uuid - can't recover"); 262 xlog_header_check_dump(mp, head); 263 return -EFSCORRUPTED; 264 } 265 return 0; 266 } 267 268 /* 269 * This routine finds (to an approximation) the first block in the physical 270 * log which contains the given cycle. It uses a binary search algorithm. 271 * Note that the algorithm can not be perfect because the disk will not 272 * necessarily be perfect. 273 */ 274 STATIC int 275 xlog_find_cycle_start( 276 struct xlog *log, 277 char *buffer, 278 xfs_daddr_t first_blk, 279 xfs_daddr_t *last_blk, 280 uint cycle) 281 { 282 char *offset; 283 xfs_daddr_t mid_blk; 284 xfs_daddr_t end_blk; 285 uint mid_cycle; 286 int error; 287 288 end_blk = *last_blk; 289 mid_blk = BLK_AVG(first_blk, end_blk); 290 while (mid_blk != first_blk && mid_blk != end_blk) { 291 error = xlog_bread(log, mid_blk, 1, buffer, &offset); 292 if (error) 293 return error; 294 mid_cycle = xlog_get_cycle(offset); 295 if (mid_cycle == cycle) 296 end_blk = mid_blk; /* last_half_cycle == mid_cycle */ 297 else 298 first_blk = mid_blk; /* first_half_cycle == mid_cycle */ 299 mid_blk = BLK_AVG(first_blk, end_blk); 300 } 301 ASSERT((mid_blk == first_blk && mid_blk+1 == end_blk) || 302 (mid_blk == end_blk && mid_blk-1 == first_blk)); 303 304 *last_blk = end_blk; 305 306 return 0; 307 } 308 309 /* 310 * Check that a range of blocks does not contain stop_on_cycle_no. 311 * Fill in *new_blk with the block offset where such a block is 312 * found, or with -1 (an invalid block number) if there is no such 313 * block in the range. The scan needs to occur from front to back 314 * and the pointer into the region must be updated since a later 315 * routine will need to perform another test. 316 */ 317 STATIC int 318 xlog_find_verify_cycle( 319 struct xlog *log, 320 xfs_daddr_t start_blk, 321 int nbblks, 322 uint stop_on_cycle_no, 323 xfs_daddr_t *new_blk) 324 { 325 xfs_daddr_t i, j; 326 uint cycle; 327 char *buffer; 328 xfs_daddr_t bufblks; 329 char *buf = NULL; 330 int error = 0; 331 332 /* 333 * Greedily allocate a buffer big enough to handle the full 334 * range of basic blocks we'll be examining. If that fails, 335 * try a smaller size. We need to be able to read at least 336 * a log sector, or we're out of luck. 337 */ 338 bufblks = 1 << ffs(nbblks); 339 while (bufblks > log->l_logBBsize) 340 bufblks >>= 1; 341 while (!(buffer = xlog_alloc_buffer(log, bufblks))) { 342 bufblks >>= 1; 343 if (bufblks < log->l_sectBBsize) 344 return -ENOMEM; 345 } 346 347 for (i = start_blk; i < start_blk + nbblks; i += bufblks) { 348 int bcount; 349 350 bcount = min(bufblks, (start_blk + nbblks - i)); 351 352 error = xlog_bread(log, i, bcount, buffer, &buf); 353 if (error) 354 goto out; 355 356 for (j = 0; j < bcount; j++) { 357 cycle = xlog_get_cycle(buf); 358 if (cycle == stop_on_cycle_no) { 359 *new_blk = i+j; 360 goto out; 361 } 362 363 buf += BBSIZE; 364 } 365 } 366 367 *new_blk = -1; 368 369 out: 370 kmem_free(buffer); 371 return error; 372 } 373 374 /* 375 * Potentially backup over partial log record write. 376 * 377 * In the typical case, last_blk is the number of the block directly after 378 * a good log record. Therefore, we subtract one to get the block number 379 * of the last block in the given buffer. extra_bblks contains the number 380 * of blocks we would have read on a previous read. This happens when the 381 * last log record is split over the end of the physical log. 382 * 383 * extra_bblks is the number of blocks potentially verified on a previous 384 * call to this routine. 385 */ 386 STATIC int 387 xlog_find_verify_log_record( 388 struct xlog *log, 389 xfs_daddr_t start_blk, 390 xfs_daddr_t *last_blk, 391 int extra_bblks) 392 { 393 xfs_daddr_t i; 394 char *buffer; 395 char *offset = NULL; 396 xlog_rec_header_t *head = NULL; 397 int error = 0; 398 int smallmem = 0; 399 int num_blks = *last_blk - start_blk; 400 int xhdrs; 401 402 ASSERT(start_blk != 0 || *last_blk != start_blk); 403 404 buffer = xlog_alloc_buffer(log, num_blks); 405 if (!buffer) { 406 buffer = xlog_alloc_buffer(log, 1); 407 if (!buffer) 408 return -ENOMEM; 409 smallmem = 1; 410 } else { 411 error = xlog_bread(log, start_blk, num_blks, buffer, &offset); 412 if (error) 413 goto out; 414 offset += ((num_blks - 1) << BBSHIFT); 415 } 416 417 for (i = (*last_blk) - 1; i >= 0; i--) { 418 if (i < start_blk) { 419 /* valid log record not found */ 420 xfs_warn(log->l_mp, 421 "Log inconsistent (didn't find previous header)"); 422 ASSERT(0); 423 error = -EFSCORRUPTED; 424 goto out; 425 } 426 427 if (smallmem) { 428 error = xlog_bread(log, i, 1, buffer, &offset); 429 if (error) 430 goto out; 431 } 432 433 head = (xlog_rec_header_t *)offset; 434 435 if (head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) 436 break; 437 438 if (!smallmem) 439 offset -= BBSIZE; 440 } 441 442 /* 443 * We hit the beginning of the physical log & still no header. Return 444 * to caller. If caller can handle a return of -1, then this routine 445 * will be called again for the end of the physical log. 446 */ 447 if (i == -1) { 448 error = 1; 449 goto out; 450 } 451 452 /* 453 * We have the final block of the good log (the first block 454 * of the log record _before_ the head. So we check the uuid. 455 */ 456 if ((error = xlog_header_check_mount(log->l_mp, head))) 457 goto out; 458 459 /* 460 * We may have found a log record header before we expected one. 461 * last_blk will be the 1st block # with a given cycle #. We may end 462 * up reading an entire log record. In this case, we don't want to 463 * reset last_blk. Only when last_blk points in the middle of a log 464 * record do we update last_blk. 465 */ 466 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) { 467 uint h_size = be32_to_cpu(head->h_size); 468 469 xhdrs = h_size / XLOG_HEADER_CYCLE_SIZE; 470 if (h_size % XLOG_HEADER_CYCLE_SIZE) 471 xhdrs++; 472 } else { 473 xhdrs = 1; 474 } 475 476 if (*last_blk - i + extra_bblks != 477 BTOBB(be32_to_cpu(head->h_len)) + xhdrs) 478 *last_blk = i; 479 480 out: 481 kmem_free(buffer); 482 return error; 483 } 484 485 /* 486 * Head is defined to be the point of the log where the next log write 487 * could go. This means that incomplete LR writes at the end are 488 * eliminated when calculating the head. We aren't guaranteed that previous 489 * LR have complete transactions. We only know that a cycle number of 490 * current cycle number -1 won't be present in the log if we start writing 491 * from our current block number. 492 * 493 * last_blk contains the block number of the first block with a given 494 * cycle number. 495 * 496 * Return: zero if normal, non-zero if error. 497 */ 498 STATIC int 499 xlog_find_head( 500 struct xlog *log, 501 xfs_daddr_t *return_head_blk) 502 { 503 char *buffer; 504 char *offset; 505 xfs_daddr_t new_blk, first_blk, start_blk, last_blk, head_blk; 506 int num_scan_bblks; 507 uint first_half_cycle, last_half_cycle; 508 uint stop_on_cycle; 509 int error, log_bbnum = log->l_logBBsize; 510 511 /* Is the end of the log device zeroed? */ 512 error = xlog_find_zeroed(log, &first_blk); 513 if (error < 0) { 514 xfs_warn(log->l_mp, "empty log check failed"); 515 return error; 516 } 517 if (error == 1) { 518 *return_head_blk = first_blk; 519 520 /* Is the whole lot zeroed? */ 521 if (!first_blk) { 522 /* Linux XFS shouldn't generate totally zeroed logs - 523 * mkfs etc write a dummy unmount record to a fresh 524 * log so we can store the uuid in there 525 */ 526 xfs_warn(log->l_mp, "totally zeroed log"); 527 } 528 529 return 0; 530 } 531 532 first_blk = 0; /* get cycle # of 1st block */ 533 buffer = xlog_alloc_buffer(log, 1); 534 if (!buffer) 535 return -ENOMEM; 536 537 error = xlog_bread(log, 0, 1, buffer, &offset); 538 if (error) 539 goto out_free_buffer; 540 541 first_half_cycle = xlog_get_cycle(offset); 542 543 last_blk = head_blk = log_bbnum - 1; /* get cycle # of last block */ 544 error = xlog_bread(log, last_blk, 1, buffer, &offset); 545 if (error) 546 goto out_free_buffer; 547 548 last_half_cycle = xlog_get_cycle(offset); 549 ASSERT(last_half_cycle != 0); 550 551 /* 552 * If the 1st half cycle number is equal to the last half cycle number, 553 * then the entire log is stamped with the same cycle number. In this 554 * case, head_blk can't be set to zero (which makes sense). The below 555 * math doesn't work out properly with head_blk equal to zero. Instead, 556 * we set it to log_bbnum which is an invalid block number, but this 557 * value makes the math correct. If head_blk doesn't changed through 558 * all the tests below, *head_blk is set to zero at the very end rather 559 * than log_bbnum. In a sense, log_bbnum and zero are the same block 560 * in a circular file. 561 */ 562 if (first_half_cycle == last_half_cycle) { 563 /* 564 * In this case we believe that the entire log should have 565 * cycle number last_half_cycle. We need to scan backwards 566 * from the end verifying that there are no holes still 567 * containing last_half_cycle - 1. If we find such a hole, 568 * then the start of that hole will be the new head. The 569 * simple case looks like 570 * x | x ... | x - 1 | x 571 * Another case that fits this picture would be 572 * x | x + 1 | x ... | x 573 * In this case the head really is somewhere at the end of the 574 * log, as one of the latest writes at the beginning was 575 * incomplete. 576 * One more case is 577 * x | x + 1 | x ... | x - 1 | x 578 * This is really the combination of the above two cases, and 579 * the head has to end up at the start of the x-1 hole at the 580 * end of the log. 581 * 582 * In the 256k log case, we will read from the beginning to the 583 * end of the log and search for cycle numbers equal to x-1. 584 * We don't worry about the x+1 blocks that we encounter, 585 * because we know that they cannot be the head since the log 586 * started with x. 587 */ 588 head_blk = log_bbnum; 589 stop_on_cycle = last_half_cycle - 1; 590 } else { 591 /* 592 * In this case we want to find the first block with cycle 593 * number matching last_half_cycle. We expect the log to be 594 * some variation on 595 * x + 1 ... | x ... | x 596 * The first block with cycle number x (last_half_cycle) will 597 * be where the new head belongs. First we do a binary search 598 * for the first occurrence of last_half_cycle. The binary 599 * search may not be totally accurate, so then we scan back 600 * from there looking for occurrences of last_half_cycle before 601 * us. If that backwards scan wraps around the beginning of 602 * the log, then we look for occurrences of last_half_cycle - 1 603 * at the end of the log. The cases we're looking for look 604 * like 605 * v binary search stopped here 606 * x + 1 ... | x | x + 1 | x ... | x 607 * ^ but we want to locate this spot 608 * or 609 * <---------> less than scan distance 610 * x + 1 ... | x ... | x - 1 | x 611 * ^ we want to locate this spot 612 */ 613 stop_on_cycle = last_half_cycle; 614 error = xlog_find_cycle_start(log, buffer, first_blk, &head_blk, 615 last_half_cycle); 616 if (error) 617 goto out_free_buffer; 618 } 619 620 /* 621 * Now validate the answer. Scan back some number of maximum possible 622 * blocks and make sure each one has the expected cycle number. The 623 * maximum is determined by the total possible amount of buffering 624 * in the in-core log. The following number can be made tighter if 625 * we actually look at the block size of the filesystem. 626 */ 627 num_scan_bblks = min_t(int, log_bbnum, XLOG_TOTAL_REC_SHIFT(log)); 628 if (head_blk >= num_scan_bblks) { 629 /* 630 * We are guaranteed that the entire check can be performed 631 * in one buffer. 632 */ 633 start_blk = head_blk - num_scan_bblks; 634 if ((error = xlog_find_verify_cycle(log, 635 start_blk, num_scan_bblks, 636 stop_on_cycle, &new_blk))) 637 goto out_free_buffer; 638 if (new_blk != -1) 639 head_blk = new_blk; 640 } else { /* need to read 2 parts of log */ 641 /* 642 * We are going to scan backwards in the log in two parts. 643 * First we scan the physical end of the log. In this part 644 * of the log, we are looking for blocks with cycle number 645 * last_half_cycle - 1. 646 * If we find one, then we know that the log starts there, as 647 * we've found a hole that didn't get written in going around 648 * the end of the physical log. The simple case for this is 649 * x + 1 ... | x ... | x - 1 | x 650 * <---------> less than scan distance 651 * If all of the blocks at the end of the log have cycle number 652 * last_half_cycle, then we check the blocks at the start of 653 * the log looking for occurrences of last_half_cycle. If we 654 * find one, then our current estimate for the location of the 655 * first occurrence of last_half_cycle is wrong and we move 656 * back to the hole we've found. This case looks like 657 * x + 1 ... | x | x + 1 | x ... 658 * ^ binary search stopped here 659 * Another case we need to handle that only occurs in 256k 660 * logs is 661 * x + 1 ... | x ... | x+1 | x ... 662 * ^ binary search stops here 663 * In a 256k log, the scan at the end of the log will see the 664 * x + 1 blocks. We need to skip past those since that is 665 * certainly not the head of the log. By searching for 666 * last_half_cycle-1 we accomplish that. 667 */ 668 ASSERT(head_blk <= INT_MAX && 669 (xfs_daddr_t) num_scan_bblks >= head_blk); 670 start_blk = log_bbnum - (num_scan_bblks - head_blk); 671 if ((error = xlog_find_verify_cycle(log, start_blk, 672 num_scan_bblks - (int)head_blk, 673 (stop_on_cycle - 1), &new_blk))) 674 goto out_free_buffer; 675 if (new_blk != -1) { 676 head_blk = new_blk; 677 goto validate_head; 678 } 679 680 /* 681 * Scan beginning of log now. The last part of the physical 682 * log is good. This scan needs to verify that it doesn't find 683 * the last_half_cycle. 684 */ 685 start_blk = 0; 686 ASSERT(head_blk <= INT_MAX); 687 if ((error = xlog_find_verify_cycle(log, 688 start_blk, (int)head_blk, 689 stop_on_cycle, &new_blk))) 690 goto out_free_buffer; 691 if (new_blk != -1) 692 head_blk = new_blk; 693 } 694 695 validate_head: 696 /* 697 * Now we need to make sure head_blk is not pointing to a block in 698 * the middle of a log record. 699 */ 700 num_scan_bblks = XLOG_REC_SHIFT(log); 701 if (head_blk >= num_scan_bblks) { 702 start_blk = head_blk - num_scan_bblks; /* don't read head_blk */ 703 704 /* start ptr at last block ptr before head_blk */ 705 error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0); 706 if (error == 1) 707 error = -EIO; 708 if (error) 709 goto out_free_buffer; 710 } else { 711 start_blk = 0; 712 ASSERT(head_blk <= INT_MAX); 713 error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0); 714 if (error < 0) 715 goto out_free_buffer; 716 if (error == 1) { 717 /* We hit the beginning of the log during our search */ 718 start_blk = log_bbnum - (num_scan_bblks - head_blk); 719 new_blk = log_bbnum; 720 ASSERT(start_blk <= INT_MAX && 721 (xfs_daddr_t) log_bbnum-start_blk >= 0); 722 ASSERT(head_blk <= INT_MAX); 723 error = xlog_find_verify_log_record(log, start_blk, 724 &new_blk, (int)head_blk); 725 if (error == 1) 726 error = -EIO; 727 if (error) 728 goto out_free_buffer; 729 if (new_blk != log_bbnum) 730 head_blk = new_blk; 731 } else if (error) 732 goto out_free_buffer; 733 } 734 735 kmem_free(buffer); 736 if (head_blk == log_bbnum) 737 *return_head_blk = 0; 738 else 739 *return_head_blk = head_blk; 740 /* 741 * When returning here, we have a good block number. Bad block 742 * means that during a previous crash, we didn't have a clean break 743 * from cycle number N to cycle number N-1. In this case, we need 744 * to find the first block with cycle number N-1. 745 */ 746 return 0; 747 748 out_free_buffer: 749 kmem_free(buffer); 750 if (error) 751 xfs_warn(log->l_mp, "failed to find log head"); 752 return error; 753 } 754 755 /* 756 * Seek backwards in the log for log record headers. 757 * 758 * Given a starting log block, walk backwards until we find the provided number 759 * of records or hit the provided tail block. The return value is the number of 760 * records encountered or a negative error code. The log block and buffer 761 * pointer of the last record seen are returned in rblk and rhead respectively. 762 */ 763 STATIC int 764 xlog_rseek_logrec_hdr( 765 struct xlog *log, 766 xfs_daddr_t head_blk, 767 xfs_daddr_t tail_blk, 768 int count, 769 char *buffer, 770 xfs_daddr_t *rblk, 771 struct xlog_rec_header **rhead, 772 bool *wrapped) 773 { 774 int i; 775 int error; 776 int found = 0; 777 char *offset = NULL; 778 xfs_daddr_t end_blk; 779 780 *wrapped = false; 781 782 /* 783 * Walk backwards from the head block until we hit the tail or the first 784 * block in the log. 785 */ 786 end_blk = head_blk > tail_blk ? tail_blk : 0; 787 for (i = (int) head_blk - 1; i >= end_blk; i--) { 788 error = xlog_bread(log, i, 1, buffer, &offset); 789 if (error) 790 goto out_error; 791 792 if (*(__be32 *) offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) { 793 *rblk = i; 794 *rhead = (struct xlog_rec_header *) offset; 795 if (++found == count) 796 break; 797 } 798 } 799 800 /* 801 * If we haven't hit the tail block or the log record header count, 802 * start looking again from the end of the physical log. Note that 803 * callers can pass head == tail if the tail is not yet known. 804 */ 805 if (tail_blk >= head_blk && found != count) { 806 for (i = log->l_logBBsize - 1; i >= (int) tail_blk; i--) { 807 error = xlog_bread(log, i, 1, buffer, &offset); 808 if (error) 809 goto out_error; 810 811 if (*(__be32 *)offset == 812 cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) { 813 *wrapped = true; 814 *rblk = i; 815 *rhead = (struct xlog_rec_header *) offset; 816 if (++found == count) 817 break; 818 } 819 } 820 } 821 822 return found; 823 824 out_error: 825 return error; 826 } 827 828 /* 829 * Seek forward in the log for log record headers. 830 * 831 * Given head and tail blocks, walk forward from the tail block until we find 832 * the provided number of records or hit the head block. The return value is the 833 * number of records encountered or a negative error code. The log block and 834 * buffer pointer of the last record seen are returned in rblk and rhead 835 * respectively. 836 */ 837 STATIC int 838 xlog_seek_logrec_hdr( 839 struct xlog *log, 840 xfs_daddr_t head_blk, 841 xfs_daddr_t tail_blk, 842 int count, 843 char *buffer, 844 xfs_daddr_t *rblk, 845 struct xlog_rec_header **rhead, 846 bool *wrapped) 847 { 848 int i; 849 int error; 850 int found = 0; 851 char *offset = NULL; 852 xfs_daddr_t end_blk; 853 854 *wrapped = false; 855 856 /* 857 * Walk forward from the tail block until we hit the head or the last 858 * block in the log. 859 */ 860 end_blk = head_blk > tail_blk ? head_blk : log->l_logBBsize - 1; 861 for (i = (int) tail_blk; i <= end_blk; i++) { 862 error = xlog_bread(log, i, 1, buffer, &offset); 863 if (error) 864 goto out_error; 865 866 if (*(__be32 *) offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) { 867 *rblk = i; 868 *rhead = (struct xlog_rec_header *) offset; 869 if (++found == count) 870 break; 871 } 872 } 873 874 /* 875 * If we haven't hit the head block or the log record header count, 876 * start looking again from the start of the physical log. 877 */ 878 if (tail_blk > head_blk && found != count) { 879 for (i = 0; i < (int) head_blk; i++) { 880 error = xlog_bread(log, i, 1, buffer, &offset); 881 if (error) 882 goto out_error; 883 884 if (*(__be32 *)offset == 885 cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) { 886 *wrapped = true; 887 *rblk = i; 888 *rhead = (struct xlog_rec_header *) offset; 889 if (++found == count) 890 break; 891 } 892 } 893 } 894 895 return found; 896 897 out_error: 898 return error; 899 } 900 901 /* 902 * Calculate distance from head to tail (i.e., unused space in the log). 903 */ 904 static inline int 905 xlog_tail_distance( 906 struct xlog *log, 907 xfs_daddr_t head_blk, 908 xfs_daddr_t tail_blk) 909 { 910 if (head_blk < tail_blk) 911 return tail_blk - head_blk; 912 913 return tail_blk + (log->l_logBBsize - head_blk); 914 } 915 916 /* 917 * Verify the log tail. This is particularly important when torn or incomplete 918 * writes have been detected near the front of the log and the head has been 919 * walked back accordingly. 920 * 921 * We also have to handle the case where the tail was pinned and the head 922 * blocked behind the tail right before a crash. If the tail had been pushed 923 * immediately prior to the crash and the subsequent checkpoint was only 924 * partially written, it's possible it overwrote the last referenced tail in the 925 * log with garbage. This is not a coherency problem because the tail must have 926 * been pushed before it can be overwritten, but appears as log corruption to 927 * recovery because we have no way to know the tail was updated if the 928 * subsequent checkpoint didn't write successfully. 929 * 930 * Therefore, CRC check the log from tail to head. If a failure occurs and the 931 * offending record is within max iclog bufs from the head, walk the tail 932 * forward and retry until a valid tail is found or corruption is detected out 933 * of the range of a possible overwrite. 934 */ 935 STATIC int 936 xlog_verify_tail( 937 struct xlog *log, 938 xfs_daddr_t head_blk, 939 xfs_daddr_t *tail_blk, 940 int hsize) 941 { 942 struct xlog_rec_header *thead; 943 char *buffer; 944 xfs_daddr_t first_bad; 945 int error = 0; 946 bool wrapped; 947 xfs_daddr_t tmp_tail; 948 xfs_daddr_t orig_tail = *tail_blk; 949 950 buffer = xlog_alloc_buffer(log, 1); 951 if (!buffer) 952 return -ENOMEM; 953 954 /* 955 * Make sure the tail points to a record (returns positive count on 956 * success). 957 */ 958 error = xlog_seek_logrec_hdr(log, head_blk, *tail_blk, 1, buffer, 959 &tmp_tail, &thead, &wrapped); 960 if (error < 0) 961 goto out; 962 if (*tail_blk != tmp_tail) 963 *tail_blk = tmp_tail; 964 965 /* 966 * Run a CRC check from the tail to the head. We can't just check 967 * MAX_ICLOGS records past the tail because the tail may point to stale 968 * blocks cleared during the search for the head/tail. These blocks are 969 * overwritten with zero-length records and thus record count is not a 970 * reliable indicator of the iclog state before a crash. 971 */ 972 first_bad = 0; 973 error = xlog_do_recovery_pass(log, head_blk, *tail_blk, 974 XLOG_RECOVER_CRCPASS, &first_bad); 975 while ((error == -EFSBADCRC || error == -EFSCORRUPTED) && first_bad) { 976 int tail_distance; 977 978 /* 979 * Is corruption within range of the head? If so, retry from 980 * the next record. Otherwise return an error. 981 */ 982 tail_distance = xlog_tail_distance(log, head_blk, first_bad); 983 if (tail_distance > BTOBB(XLOG_MAX_ICLOGS * hsize)) 984 break; 985 986 /* skip to the next record; returns positive count on success */ 987 error = xlog_seek_logrec_hdr(log, head_blk, first_bad, 2, 988 buffer, &tmp_tail, &thead, &wrapped); 989 if (error < 0) 990 goto out; 991 992 *tail_blk = tmp_tail; 993 first_bad = 0; 994 error = xlog_do_recovery_pass(log, head_blk, *tail_blk, 995 XLOG_RECOVER_CRCPASS, &first_bad); 996 } 997 998 if (!error && *tail_blk != orig_tail) 999 xfs_warn(log->l_mp, 1000 "Tail block (0x%llx) overwrite detected. Updated to 0x%llx", 1001 orig_tail, *tail_blk); 1002 out: 1003 kmem_free(buffer); 1004 return error; 1005 } 1006 1007 /* 1008 * Detect and trim torn writes from the head of the log. 1009 * 1010 * Storage without sector atomicity guarantees can result in torn writes in the 1011 * log in the event of a crash. Our only means to detect this scenario is via 1012 * CRC verification. While we can't always be certain that CRC verification 1013 * failure is due to a torn write vs. an unrelated corruption, we do know that 1014 * only a certain number (XLOG_MAX_ICLOGS) of log records can be written out at 1015 * one time. Therefore, CRC verify up to XLOG_MAX_ICLOGS records at the head of 1016 * the log and treat failures in this range as torn writes as a matter of 1017 * policy. In the event of CRC failure, the head is walked back to the last good 1018 * record in the log and the tail is updated from that record and verified. 1019 */ 1020 STATIC int 1021 xlog_verify_head( 1022 struct xlog *log, 1023 xfs_daddr_t *head_blk, /* in/out: unverified head */ 1024 xfs_daddr_t *tail_blk, /* out: tail block */ 1025 char *buffer, 1026 xfs_daddr_t *rhead_blk, /* start blk of last record */ 1027 struct xlog_rec_header **rhead, /* ptr to last record */ 1028 bool *wrapped) /* last rec. wraps phys. log */ 1029 { 1030 struct xlog_rec_header *tmp_rhead; 1031 char *tmp_buffer; 1032 xfs_daddr_t first_bad; 1033 xfs_daddr_t tmp_rhead_blk; 1034 int found; 1035 int error; 1036 bool tmp_wrapped; 1037 1038 /* 1039 * Check the head of the log for torn writes. Search backwards from the 1040 * head until we hit the tail or the maximum number of log record I/Os 1041 * that could have been in flight at one time. Use a temporary buffer so 1042 * we don't trash the rhead/buffer pointers from the caller. 1043 */ 1044 tmp_buffer = xlog_alloc_buffer(log, 1); 1045 if (!tmp_buffer) 1046 return -ENOMEM; 1047 error = xlog_rseek_logrec_hdr(log, *head_blk, *tail_blk, 1048 XLOG_MAX_ICLOGS, tmp_buffer, 1049 &tmp_rhead_blk, &tmp_rhead, &tmp_wrapped); 1050 kmem_free(tmp_buffer); 1051 if (error < 0) 1052 return error; 1053 1054 /* 1055 * Now run a CRC verification pass over the records starting at the 1056 * block found above to the current head. If a CRC failure occurs, the 1057 * log block of the first bad record is saved in first_bad. 1058 */ 1059 error = xlog_do_recovery_pass(log, *head_blk, tmp_rhead_blk, 1060 XLOG_RECOVER_CRCPASS, &first_bad); 1061 if ((error == -EFSBADCRC || error == -EFSCORRUPTED) && first_bad) { 1062 /* 1063 * We've hit a potential torn write. Reset the error and warn 1064 * about it. 1065 */ 1066 error = 0; 1067 xfs_warn(log->l_mp, 1068 "Torn write (CRC failure) detected at log block 0x%llx. Truncating head block from 0x%llx.", 1069 first_bad, *head_blk); 1070 1071 /* 1072 * Get the header block and buffer pointer for the last good 1073 * record before the bad record. 1074 * 1075 * Note that xlog_find_tail() clears the blocks at the new head 1076 * (i.e., the records with invalid CRC) if the cycle number 1077 * matches the current cycle. 1078 */ 1079 found = xlog_rseek_logrec_hdr(log, first_bad, *tail_blk, 1, 1080 buffer, rhead_blk, rhead, wrapped); 1081 if (found < 0) 1082 return found; 1083 if (found == 0) /* XXX: right thing to do here? */ 1084 return -EIO; 1085 1086 /* 1087 * Reset the head block to the starting block of the first bad 1088 * log record and set the tail block based on the last good 1089 * record. 1090 * 1091 * Bail out if the updated head/tail match as this indicates 1092 * possible corruption outside of the acceptable 1093 * (XLOG_MAX_ICLOGS) range. This is a job for xfs_repair... 1094 */ 1095 *head_blk = first_bad; 1096 *tail_blk = BLOCK_LSN(be64_to_cpu((*rhead)->h_tail_lsn)); 1097 if (*head_blk == *tail_blk) { 1098 ASSERT(0); 1099 return 0; 1100 } 1101 } 1102 if (error) 1103 return error; 1104 1105 return xlog_verify_tail(log, *head_blk, tail_blk, 1106 be32_to_cpu((*rhead)->h_size)); 1107 } 1108 1109 /* 1110 * We need to make sure we handle log wrapping properly, so we can't use the 1111 * calculated logbno directly. Make sure it wraps to the correct bno inside the 1112 * log. 1113 * 1114 * The log is limited to 32 bit sizes, so we use the appropriate modulus 1115 * operation here and cast it back to a 64 bit daddr on return. 1116 */ 1117 static inline xfs_daddr_t 1118 xlog_wrap_logbno( 1119 struct xlog *log, 1120 xfs_daddr_t bno) 1121 { 1122 int mod; 1123 1124 div_s64_rem(bno, log->l_logBBsize, &mod); 1125 return mod; 1126 } 1127 1128 /* 1129 * Check whether the head of the log points to an unmount record. In other 1130 * words, determine whether the log is clean. If so, update the in-core state 1131 * appropriately. 1132 */ 1133 static int 1134 xlog_check_unmount_rec( 1135 struct xlog *log, 1136 xfs_daddr_t *head_blk, 1137 xfs_daddr_t *tail_blk, 1138 struct xlog_rec_header *rhead, 1139 xfs_daddr_t rhead_blk, 1140 char *buffer, 1141 bool *clean) 1142 { 1143 struct xlog_op_header *op_head; 1144 xfs_daddr_t umount_data_blk; 1145 xfs_daddr_t after_umount_blk; 1146 int hblks; 1147 int error; 1148 char *offset; 1149 1150 *clean = false; 1151 1152 /* 1153 * Look for unmount record. If we find it, then we know there was a 1154 * clean unmount. Since 'i' could be the last block in the physical 1155 * log, we convert to a log block before comparing to the head_blk. 1156 * 1157 * Save the current tail lsn to use to pass to xlog_clear_stale_blocks() 1158 * below. We won't want to clear the unmount record if there is one, so 1159 * we pass the lsn of the unmount record rather than the block after it. 1160 */ 1161 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) { 1162 int h_size = be32_to_cpu(rhead->h_size); 1163 int h_version = be32_to_cpu(rhead->h_version); 1164 1165 if ((h_version & XLOG_VERSION_2) && 1166 (h_size > XLOG_HEADER_CYCLE_SIZE)) { 1167 hblks = h_size / XLOG_HEADER_CYCLE_SIZE; 1168 if (h_size % XLOG_HEADER_CYCLE_SIZE) 1169 hblks++; 1170 } else { 1171 hblks = 1; 1172 } 1173 } else { 1174 hblks = 1; 1175 } 1176 1177 after_umount_blk = xlog_wrap_logbno(log, 1178 rhead_blk + hblks + BTOBB(be32_to_cpu(rhead->h_len))); 1179 1180 if (*head_blk == after_umount_blk && 1181 be32_to_cpu(rhead->h_num_logops) == 1) { 1182 umount_data_blk = xlog_wrap_logbno(log, rhead_blk + hblks); 1183 error = xlog_bread(log, umount_data_blk, 1, buffer, &offset); 1184 if (error) 1185 return error; 1186 1187 op_head = (struct xlog_op_header *)offset; 1188 if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) { 1189 /* 1190 * Set tail and last sync so that newly written log 1191 * records will point recovery to after the current 1192 * unmount record. 1193 */ 1194 xlog_assign_atomic_lsn(&log->l_tail_lsn, 1195 log->l_curr_cycle, after_umount_blk); 1196 xlog_assign_atomic_lsn(&log->l_last_sync_lsn, 1197 log->l_curr_cycle, after_umount_blk); 1198 *tail_blk = after_umount_blk; 1199 1200 *clean = true; 1201 } 1202 } 1203 1204 return 0; 1205 } 1206 1207 static void 1208 xlog_set_state( 1209 struct xlog *log, 1210 xfs_daddr_t head_blk, 1211 struct xlog_rec_header *rhead, 1212 xfs_daddr_t rhead_blk, 1213 bool bump_cycle) 1214 { 1215 /* 1216 * Reset log values according to the state of the log when we 1217 * crashed. In the case where head_blk == 0, we bump curr_cycle 1218 * one because the next write starts a new cycle rather than 1219 * continuing the cycle of the last good log record. At this 1220 * point we have guaranteed that all partial log records have been 1221 * accounted for. Therefore, we know that the last good log record 1222 * written was complete and ended exactly on the end boundary 1223 * of the physical log. 1224 */ 1225 log->l_prev_block = rhead_blk; 1226 log->l_curr_block = (int)head_blk; 1227 log->l_curr_cycle = be32_to_cpu(rhead->h_cycle); 1228 if (bump_cycle) 1229 log->l_curr_cycle++; 1230 atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn)); 1231 atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn)); 1232 xlog_assign_grant_head(&log->l_reserve_head.grant, log->l_curr_cycle, 1233 BBTOB(log->l_curr_block)); 1234 xlog_assign_grant_head(&log->l_write_head.grant, log->l_curr_cycle, 1235 BBTOB(log->l_curr_block)); 1236 } 1237 1238 /* 1239 * Find the sync block number or the tail of the log. 1240 * 1241 * This will be the block number of the last record to have its 1242 * associated buffers synced to disk. Every log record header has 1243 * a sync lsn embedded in it. LSNs hold block numbers, so it is easy 1244 * to get a sync block number. The only concern is to figure out which 1245 * log record header to believe. 1246 * 1247 * The following algorithm uses the log record header with the largest 1248 * lsn. The entire log record does not need to be valid. We only care 1249 * that the header is valid. 1250 * 1251 * We could speed up search by using current head_blk buffer, but it is not 1252 * available. 1253 */ 1254 STATIC int 1255 xlog_find_tail( 1256 struct xlog *log, 1257 xfs_daddr_t *head_blk, 1258 xfs_daddr_t *tail_blk) 1259 { 1260 xlog_rec_header_t *rhead; 1261 char *offset = NULL; 1262 char *buffer; 1263 int error; 1264 xfs_daddr_t rhead_blk; 1265 xfs_lsn_t tail_lsn; 1266 bool wrapped = false; 1267 bool clean = false; 1268 1269 /* 1270 * Find previous log record 1271 */ 1272 if ((error = xlog_find_head(log, head_blk))) 1273 return error; 1274 ASSERT(*head_blk < INT_MAX); 1275 1276 buffer = xlog_alloc_buffer(log, 1); 1277 if (!buffer) 1278 return -ENOMEM; 1279 if (*head_blk == 0) { /* special case */ 1280 error = xlog_bread(log, 0, 1, buffer, &offset); 1281 if (error) 1282 goto done; 1283 1284 if (xlog_get_cycle(offset) == 0) { 1285 *tail_blk = 0; 1286 /* leave all other log inited values alone */ 1287 goto done; 1288 } 1289 } 1290 1291 /* 1292 * Search backwards through the log looking for the log record header 1293 * block. This wraps all the way back around to the head so something is 1294 * seriously wrong if we can't find it. 1295 */ 1296 error = xlog_rseek_logrec_hdr(log, *head_blk, *head_blk, 1, buffer, 1297 &rhead_blk, &rhead, &wrapped); 1298 if (error < 0) 1299 goto done; 1300 if (!error) { 1301 xfs_warn(log->l_mp, "%s: couldn't find sync record", __func__); 1302 error = -EFSCORRUPTED; 1303 goto done; 1304 } 1305 *tail_blk = BLOCK_LSN(be64_to_cpu(rhead->h_tail_lsn)); 1306 1307 /* 1308 * Set the log state based on the current head record. 1309 */ 1310 xlog_set_state(log, *head_blk, rhead, rhead_blk, wrapped); 1311 tail_lsn = atomic64_read(&log->l_tail_lsn); 1312 1313 /* 1314 * Look for an unmount record at the head of the log. This sets the log 1315 * state to determine whether recovery is necessary. 1316 */ 1317 error = xlog_check_unmount_rec(log, head_blk, tail_blk, rhead, 1318 rhead_blk, buffer, &clean); 1319 if (error) 1320 goto done; 1321 1322 /* 1323 * Verify the log head if the log is not clean (e.g., we have anything 1324 * but an unmount record at the head). This uses CRC verification to 1325 * detect and trim torn writes. If discovered, CRC failures are 1326 * considered torn writes and the log head is trimmed accordingly. 1327 * 1328 * Note that we can only run CRC verification when the log is dirty 1329 * because there's no guarantee that the log data behind an unmount 1330 * record is compatible with the current architecture. 1331 */ 1332 if (!clean) { 1333 xfs_daddr_t orig_head = *head_blk; 1334 1335 error = xlog_verify_head(log, head_blk, tail_blk, buffer, 1336 &rhead_blk, &rhead, &wrapped); 1337 if (error) 1338 goto done; 1339 1340 /* update in-core state again if the head changed */ 1341 if (*head_blk != orig_head) { 1342 xlog_set_state(log, *head_blk, rhead, rhead_blk, 1343 wrapped); 1344 tail_lsn = atomic64_read(&log->l_tail_lsn); 1345 error = xlog_check_unmount_rec(log, head_blk, tail_blk, 1346 rhead, rhead_blk, buffer, 1347 &clean); 1348 if (error) 1349 goto done; 1350 } 1351 } 1352 1353 /* 1354 * Note that the unmount was clean. If the unmount was not clean, we 1355 * need to know this to rebuild the superblock counters from the perag 1356 * headers if we have a filesystem using non-persistent counters. 1357 */ 1358 if (clean) 1359 log->l_mp->m_flags |= XFS_MOUNT_WAS_CLEAN; 1360 1361 /* 1362 * Make sure that there are no blocks in front of the head 1363 * with the same cycle number as the head. This can happen 1364 * because we allow multiple outstanding log writes concurrently, 1365 * and the later writes might make it out before earlier ones. 1366 * 1367 * We use the lsn from before modifying it so that we'll never 1368 * overwrite the unmount record after a clean unmount. 1369 * 1370 * Do this only if we are going to recover the filesystem 1371 * 1372 * NOTE: This used to say "if (!readonly)" 1373 * However on Linux, we can & do recover a read-only filesystem. 1374 * We only skip recovery if NORECOVERY is specified on mount, 1375 * in which case we would not be here. 1376 * 1377 * But... if the -device- itself is readonly, just skip this. 1378 * We can't recover this device anyway, so it won't matter. 1379 */ 1380 if (!xfs_readonly_buftarg(log->l_targ)) 1381 error = xlog_clear_stale_blocks(log, tail_lsn); 1382 1383 done: 1384 kmem_free(buffer); 1385 1386 if (error) 1387 xfs_warn(log->l_mp, "failed to locate log tail"); 1388 return error; 1389 } 1390 1391 /* 1392 * Is the log zeroed at all? 1393 * 1394 * The last binary search should be changed to perform an X block read 1395 * once X becomes small enough. You can then search linearly through 1396 * the X blocks. This will cut down on the number of reads we need to do. 1397 * 1398 * If the log is partially zeroed, this routine will pass back the blkno 1399 * of the first block with cycle number 0. It won't have a complete LR 1400 * preceding it. 1401 * 1402 * Return: 1403 * 0 => the log is completely written to 1404 * 1 => use *blk_no as the first block of the log 1405 * <0 => error has occurred 1406 */ 1407 STATIC int 1408 xlog_find_zeroed( 1409 struct xlog *log, 1410 xfs_daddr_t *blk_no) 1411 { 1412 char *buffer; 1413 char *offset; 1414 uint first_cycle, last_cycle; 1415 xfs_daddr_t new_blk, last_blk, start_blk; 1416 xfs_daddr_t num_scan_bblks; 1417 int error, log_bbnum = log->l_logBBsize; 1418 1419 *blk_no = 0; 1420 1421 /* check totally zeroed log */ 1422 buffer = xlog_alloc_buffer(log, 1); 1423 if (!buffer) 1424 return -ENOMEM; 1425 error = xlog_bread(log, 0, 1, buffer, &offset); 1426 if (error) 1427 goto out_free_buffer; 1428 1429 first_cycle = xlog_get_cycle(offset); 1430 if (first_cycle == 0) { /* completely zeroed log */ 1431 *blk_no = 0; 1432 kmem_free(buffer); 1433 return 1; 1434 } 1435 1436 /* check partially zeroed log */ 1437 error = xlog_bread(log, log_bbnum-1, 1, buffer, &offset); 1438 if (error) 1439 goto out_free_buffer; 1440 1441 last_cycle = xlog_get_cycle(offset); 1442 if (last_cycle != 0) { /* log completely written to */ 1443 kmem_free(buffer); 1444 return 0; 1445 } 1446 1447 /* we have a partially zeroed log */ 1448 last_blk = log_bbnum-1; 1449 error = xlog_find_cycle_start(log, buffer, 0, &last_blk, 0); 1450 if (error) 1451 goto out_free_buffer; 1452 1453 /* 1454 * Validate the answer. Because there is no way to guarantee that 1455 * the entire log is made up of log records which are the same size, 1456 * we scan over the defined maximum blocks. At this point, the maximum 1457 * is not chosen to mean anything special. XXXmiken 1458 */ 1459 num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log); 1460 ASSERT(num_scan_bblks <= INT_MAX); 1461 1462 if (last_blk < num_scan_bblks) 1463 num_scan_bblks = last_blk; 1464 start_blk = last_blk - num_scan_bblks; 1465 1466 /* 1467 * We search for any instances of cycle number 0 that occur before 1468 * our current estimate of the head. What we're trying to detect is 1469 * 1 ... | 0 | 1 | 0... 1470 * ^ binary search ends here 1471 */ 1472 if ((error = xlog_find_verify_cycle(log, start_blk, 1473 (int)num_scan_bblks, 0, &new_blk))) 1474 goto out_free_buffer; 1475 if (new_blk != -1) 1476 last_blk = new_blk; 1477 1478 /* 1479 * Potentially backup over partial log record write. We don't need 1480 * to search the end of the log because we know it is zero. 1481 */ 1482 error = xlog_find_verify_log_record(log, start_blk, &last_blk, 0); 1483 if (error == 1) 1484 error = -EIO; 1485 if (error) 1486 goto out_free_buffer; 1487 1488 *blk_no = last_blk; 1489 out_free_buffer: 1490 kmem_free(buffer); 1491 if (error) 1492 return error; 1493 return 1; 1494 } 1495 1496 /* 1497 * These are simple subroutines used by xlog_clear_stale_blocks() below 1498 * to initialize a buffer full of empty log record headers and write 1499 * them into the log. 1500 */ 1501 STATIC void 1502 xlog_add_record( 1503 struct xlog *log, 1504 char *buf, 1505 int cycle, 1506 int block, 1507 int tail_cycle, 1508 int tail_block) 1509 { 1510 xlog_rec_header_t *recp = (xlog_rec_header_t *)buf; 1511 1512 memset(buf, 0, BBSIZE); 1513 recp->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM); 1514 recp->h_cycle = cpu_to_be32(cycle); 1515 recp->h_version = cpu_to_be32( 1516 xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1); 1517 recp->h_lsn = cpu_to_be64(xlog_assign_lsn(cycle, block)); 1518 recp->h_tail_lsn = cpu_to_be64(xlog_assign_lsn(tail_cycle, tail_block)); 1519 recp->h_fmt = cpu_to_be32(XLOG_FMT); 1520 memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t)); 1521 } 1522 1523 STATIC int 1524 xlog_write_log_records( 1525 struct xlog *log, 1526 int cycle, 1527 int start_block, 1528 int blocks, 1529 int tail_cycle, 1530 int tail_block) 1531 { 1532 char *offset; 1533 char *buffer; 1534 int balign, ealign; 1535 int sectbb = log->l_sectBBsize; 1536 int end_block = start_block + blocks; 1537 int bufblks; 1538 int error = 0; 1539 int i, j = 0; 1540 1541 /* 1542 * Greedily allocate a buffer big enough to handle the full 1543 * range of basic blocks to be written. If that fails, try 1544 * a smaller size. We need to be able to write at least a 1545 * log sector, or we're out of luck. 1546 */ 1547 bufblks = 1 << ffs(blocks); 1548 while (bufblks > log->l_logBBsize) 1549 bufblks >>= 1; 1550 while (!(buffer = xlog_alloc_buffer(log, bufblks))) { 1551 bufblks >>= 1; 1552 if (bufblks < sectbb) 1553 return -ENOMEM; 1554 } 1555 1556 /* We may need to do a read at the start to fill in part of 1557 * the buffer in the starting sector not covered by the first 1558 * write below. 1559 */ 1560 balign = round_down(start_block, sectbb); 1561 if (balign != start_block) { 1562 error = xlog_bread_noalign(log, start_block, 1, buffer); 1563 if (error) 1564 goto out_free_buffer; 1565 1566 j = start_block - balign; 1567 } 1568 1569 for (i = start_block; i < end_block; i += bufblks) { 1570 int bcount, endcount; 1571 1572 bcount = min(bufblks, end_block - start_block); 1573 endcount = bcount - j; 1574 1575 /* We may need to do a read at the end to fill in part of 1576 * the buffer in the final sector not covered by the write. 1577 * If this is the same sector as the above read, skip it. 1578 */ 1579 ealign = round_down(end_block, sectbb); 1580 if (j == 0 && (start_block + endcount > ealign)) { 1581 error = xlog_bread_noalign(log, ealign, sectbb, 1582 buffer + BBTOB(ealign - start_block)); 1583 if (error) 1584 break; 1585 1586 } 1587 1588 offset = buffer + xlog_align(log, start_block); 1589 for (; j < endcount; j++) { 1590 xlog_add_record(log, offset, cycle, i+j, 1591 tail_cycle, tail_block); 1592 offset += BBSIZE; 1593 } 1594 error = xlog_bwrite(log, start_block, endcount, buffer); 1595 if (error) 1596 break; 1597 start_block += endcount; 1598 j = 0; 1599 } 1600 1601 out_free_buffer: 1602 kmem_free(buffer); 1603 return error; 1604 } 1605 1606 /* 1607 * This routine is called to blow away any incomplete log writes out 1608 * in front of the log head. We do this so that we won't become confused 1609 * if we come up, write only a little bit more, and then crash again. 1610 * If we leave the partial log records out there, this situation could 1611 * cause us to think those partial writes are valid blocks since they 1612 * have the current cycle number. We get rid of them by overwriting them 1613 * with empty log records with the old cycle number rather than the 1614 * current one. 1615 * 1616 * The tail lsn is passed in rather than taken from 1617 * the log so that we will not write over the unmount record after a 1618 * clean unmount in a 512 block log. Doing so would leave the log without 1619 * any valid log records in it until a new one was written. If we crashed 1620 * during that time we would not be able to recover. 1621 */ 1622 STATIC int 1623 xlog_clear_stale_blocks( 1624 struct xlog *log, 1625 xfs_lsn_t tail_lsn) 1626 { 1627 int tail_cycle, head_cycle; 1628 int tail_block, head_block; 1629 int tail_distance, max_distance; 1630 int distance; 1631 int error; 1632 1633 tail_cycle = CYCLE_LSN(tail_lsn); 1634 tail_block = BLOCK_LSN(tail_lsn); 1635 head_cycle = log->l_curr_cycle; 1636 head_block = log->l_curr_block; 1637 1638 /* 1639 * Figure out the distance between the new head of the log 1640 * and the tail. We want to write over any blocks beyond the 1641 * head that we may have written just before the crash, but 1642 * we don't want to overwrite the tail of the log. 1643 */ 1644 if (head_cycle == tail_cycle) { 1645 /* 1646 * The tail is behind the head in the physical log, 1647 * so the distance from the head to the tail is the 1648 * distance from the head to the end of the log plus 1649 * the distance from the beginning of the log to the 1650 * tail. 1651 */ 1652 if (XFS_IS_CORRUPT(log->l_mp, 1653 head_block < tail_block || 1654 head_block >= log->l_logBBsize)) 1655 return -EFSCORRUPTED; 1656 tail_distance = tail_block + (log->l_logBBsize - head_block); 1657 } else { 1658 /* 1659 * The head is behind the tail in the physical log, 1660 * so the distance from the head to the tail is just 1661 * the tail block minus the head block. 1662 */ 1663 if (XFS_IS_CORRUPT(log->l_mp, 1664 head_block >= tail_block || 1665 head_cycle != tail_cycle + 1)) 1666 return -EFSCORRUPTED; 1667 tail_distance = tail_block - head_block; 1668 } 1669 1670 /* 1671 * If the head is right up against the tail, we can't clear 1672 * anything. 1673 */ 1674 if (tail_distance <= 0) { 1675 ASSERT(tail_distance == 0); 1676 return 0; 1677 } 1678 1679 max_distance = XLOG_TOTAL_REC_SHIFT(log); 1680 /* 1681 * Take the smaller of the maximum amount of outstanding I/O 1682 * we could have and the distance to the tail to clear out. 1683 * We take the smaller so that we don't overwrite the tail and 1684 * we don't waste all day writing from the head to the tail 1685 * for no reason. 1686 */ 1687 max_distance = min(max_distance, tail_distance); 1688 1689 if ((head_block + max_distance) <= log->l_logBBsize) { 1690 /* 1691 * We can stomp all the blocks we need to without 1692 * wrapping around the end of the log. Just do it 1693 * in a single write. Use the cycle number of the 1694 * current cycle minus one so that the log will look like: 1695 * n ... | n - 1 ... 1696 */ 1697 error = xlog_write_log_records(log, (head_cycle - 1), 1698 head_block, max_distance, tail_cycle, 1699 tail_block); 1700 if (error) 1701 return error; 1702 } else { 1703 /* 1704 * We need to wrap around the end of the physical log in 1705 * order to clear all the blocks. Do it in two separate 1706 * I/Os. The first write should be from the head to the 1707 * end of the physical log, and it should use the current 1708 * cycle number minus one just like above. 1709 */ 1710 distance = log->l_logBBsize - head_block; 1711 error = xlog_write_log_records(log, (head_cycle - 1), 1712 head_block, distance, tail_cycle, 1713 tail_block); 1714 1715 if (error) 1716 return error; 1717 1718 /* 1719 * Now write the blocks at the start of the physical log. 1720 * This writes the remainder of the blocks we want to clear. 1721 * It uses the current cycle number since we're now on the 1722 * same cycle as the head so that we get: 1723 * n ... n ... | n - 1 ... 1724 * ^^^^^ blocks we're writing 1725 */ 1726 distance = max_distance - (log->l_logBBsize - head_block); 1727 error = xlog_write_log_records(log, head_cycle, 0, distance, 1728 tail_cycle, tail_block); 1729 if (error) 1730 return error; 1731 } 1732 1733 return 0; 1734 } 1735 1736 /* 1737 * Release the recovered intent item in the AIL that matches the given intent 1738 * type and intent id. 1739 */ 1740 void 1741 xlog_recover_release_intent( 1742 struct xlog *log, 1743 unsigned short intent_type, 1744 uint64_t intent_id) 1745 { 1746 struct xfs_ail_cursor cur; 1747 struct xfs_log_item *lip; 1748 struct xfs_ail *ailp = log->l_ailp; 1749 1750 spin_lock(&ailp->ail_lock); 1751 for (lip = xfs_trans_ail_cursor_first(ailp, &cur, 0); lip != NULL; 1752 lip = xfs_trans_ail_cursor_next(ailp, &cur)) { 1753 if (lip->li_type != intent_type) 1754 continue; 1755 if (!lip->li_ops->iop_match(lip, intent_id)) 1756 continue; 1757 1758 spin_unlock(&ailp->ail_lock); 1759 lip->li_ops->iop_release(lip); 1760 spin_lock(&ailp->ail_lock); 1761 break; 1762 } 1763 1764 xfs_trans_ail_cursor_done(&cur); 1765 spin_unlock(&ailp->ail_lock); 1766 } 1767 1768 /****************************************************************************** 1769 * 1770 * Log recover routines 1771 * 1772 ****************************************************************************** 1773 */ 1774 static const struct xlog_recover_item_ops *xlog_recover_item_ops[] = { 1775 &xlog_buf_item_ops, 1776 &xlog_inode_item_ops, 1777 &xlog_dquot_item_ops, 1778 &xlog_quotaoff_item_ops, 1779 &xlog_icreate_item_ops, 1780 &xlog_efi_item_ops, 1781 &xlog_efd_item_ops, 1782 &xlog_rui_item_ops, 1783 &xlog_rud_item_ops, 1784 &xlog_cui_item_ops, 1785 &xlog_cud_item_ops, 1786 &xlog_bui_item_ops, 1787 &xlog_bud_item_ops, 1788 }; 1789 1790 static const struct xlog_recover_item_ops * 1791 xlog_find_item_ops( 1792 struct xlog_recover_item *item) 1793 { 1794 unsigned int i; 1795 1796 for (i = 0; i < ARRAY_SIZE(xlog_recover_item_ops); i++) 1797 if (ITEM_TYPE(item) == xlog_recover_item_ops[i]->item_type) 1798 return xlog_recover_item_ops[i]; 1799 1800 return NULL; 1801 } 1802 1803 /* 1804 * Sort the log items in the transaction. 1805 * 1806 * The ordering constraints are defined by the inode allocation and unlink 1807 * behaviour. The rules are: 1808 * 1809 * 1. Every item is only logged once in a given transaction. Hence it 1810 * represents the last logged state of the item. Hence ordering is 1811 * dependent on the order in which operations need to be performed so 1812 * required initial conditions are always met. 1813 * 1814 * 2. Cancelled buffers are recorded in pass 1 in a separate table and 1815 * there's nothing to replay from them so we can simply cull them 1816 * from the transaction. However, we can't do that until after we've 1817 * replayed all the other items because they may be dependent on the 1818 * cancelled buffer and replaying the cancelled buffer can remove it 1819 * form the cancelled buffer table. Hence they have tobe done last. 1820 * 1821 * 3. Inode allocation buffers must be replayed before inode items that 1822 * read the buffer and replay changes into it. For filesystems using the 1823 * ICREATE transactions, this means XFS_LI_ICREATE objects need to get 1824 * treated the same as inode allocation buffers as they create and 1825 * initialise the buffers directly. 1826 * 1827 * 4. Inode unlink buffers must be replayed after inode items are replayed. 1828 * This ensures that inodes are completely flushed to the inode buffer 1829 * in a "free" state before we remove the unlinked inode list pointer. 1830 * 1831 * Hence the ordering needs to be inode allocation buffers first, inode items 1832 * second, inode unlink buffers third and cancelled buffers last. 1833 * 1834 * But there's a problem with that - we can't tell an inode allocation buffer 1835 * apart from a regular buffer, so we can't separate them. We can, however, 1836 * tell an inode unlink buffer from the others, and so we can separate them out 1837 * from all the other buffers and move them to last. 1838 * 1839 * Hence, 4 lists, in order from head to tail: 1840 * - buffer_list for all buffers except cancelled/inode unlink buffers 1841 * - item_list for all non-buffer items 1842 * - inode_buffer_list for inode unlink buffers 1843 * - cancel_list for the cancelled buffers 1844 * 1845 * Note that we add objects to the tail of the lists so that first-to-last 1846 * ordering is preserved within the lists. Adding objects to the head of the 1847 * list means when we traverse from the head we walk them in last-to-first 1848 * order. For cancelled buffers and inode unlink buffers this doesn't matter, 1849 * but for all other items there may be specific ordering that we need to 1850 * preserve. 1851 */ 1852 STATIC int 1853 xlog_recover_reorder_trans( 1854 struct xlog *log, 1855 struct xlog_recover *trans, 1856 int pass) 1857 { 1858 struct xlog_recover_item *item, *n; 1859 int error = 0; 1860 LIST_HEAD(sort_list); 1861 LIST_HEAD(cancel_list); 1862 LIST_HEAD(buffer_list); 1863 LIST_HEAD(inode_buffer_list); 1864 LIST_HEAD(item_list); 1865 1866 list_splice_init(&trans->r_itemq, &sort_list); 1867 list_for_each_entry_safe(item, n, &sort_list, ri_list) { 1868 enum xlog_recover_reorder fate = XLOG_REORDER_ITEM_LIST; 1869 1870 item->ri_ops = xlog_find_item_ops(item); 1871 if (!item->ri_ops) { 1872 xfs_warn(log->l_mp, 1873 "%s: unrecognized type of log operation (%d)", 1874 __func__, ITEM_TYPE(item)); 1875 ASSERT(0); 1876 /* 1877 * return the remaining items back to the transaction 1878 * item list so they can be freed in caller. 1879 */ 1880 if (!list_empty(&sort_list)) 1881 list_splice_init(&sort_list, &trans->r_itemq); 1882 error = -EFSCORRUPTED; 1883 break; 1884 } 1885 1886 if (item->ri_ops->reorder) 1887 fate = item->ri_ops->reorder(item); 1888 1889 switch (fate) { 1890 case XLOG_REORDER_BUFFER_LIST: 1891 list_move_tail(&item->ri_list, &buffer_list); 1892 break; 1893 case XLOG_REORDER_CANCEL_LIST: 1894 trace_xfs_log_recover_item_reorder_head(log, 1895 trans, item, pass); 1896 list_move(&item->ri_list, &cancel_list); 1897 break; 1898 case XLOG_REORDER_INODE_BUFFER_LIST: 1899 list_move(&item->ri_list, &inode_buffer_list); 1900 break; 1901 case XLOG_REORDER_ITEM_LIST: 1902 trace_xfs_log_recover_item_reorder_tail(log, 1903 trans, item, pass); 1904 list_move_tail(&item->ri_list, &item_list); 1905 break; 1906 } 1907 } 1908 1909 ASSERT(list_empty(&sort_list)); 1910 if (!list_empty(&buffer_list)) 1911 list_splice(&buffer_list, &trans->r_itemq); 1912 if (!list_empty(&item_list)) 1913 list_splice_tail(&item_list, &trans->r_itemq); 1914 if (!list_empty(&inode_buffer_list)) 1915 list_splice_tail(&inode_buffer_list, &trans->r_itemq); 1916 if (!list_empty(&cancel_list)) 1917 list_splice_tail(&cancel_list, &trans->r_itemq); 1918 return error; 1919 } 1920 1921 void 1922 xlog_buf_readahead( 1923 struct xlog *log, 1924 xfs_daddr_t blkno, 1925 uint len, 1926 const struct xfs_buf_ops *ops) 1927 { 1928 if (!xlog_is_buffer_cancelled(log, blkno, len)) 1929 xfs_buf_readahead(log->l_mp->m_ddev_targp, blkno, len, ops); 1930 } 1931 1932 STATIC int 1933 xlog_recover_items_pass2( 1934 struct xlog *log, 1935 struct xlog_recover *trans, 1936 struct list_head *buffer_list, 1937 struct list_head *item_list) 1938 { 1939 struct xlog_recover_item *item; 1940 int error = 0; 1941 1942 list_for_each_entry(item, item_list, ri_list) { 1943 trace_xfs_log_recover_item_recover(log, trans, item, 1944 XLOG_RECOVER_PASS2); 1945 1946 if (item->ri_ops->commit_pass2) 1947 error = item->ri_ops->commit_pass2(log, buffer_list, 1948 item, trans->r_lsn); 1949 if (error) 1950 return error; 1951 } 1952 1953 return error; 1954 } 1955 1956 /* 1957 * Perform the transaction. 1958 * 1959 * If the transaction modifies a buffer or inode, do it now. Otherwise, 1960 * EFIs and EFDs get queued up by adding entries into the AIL for them. 1961 */ 1962 STATIC int 1963 xlog_recover_commit_trans( 1964 struct xlog *log, 1965 struct xlog_recover *trans, 1966 int pass, 1967 struct list_head *buffer_list) 1968 { 1969 int error = 0; 1970 int items_queued = 0; 1971 struct xlog_recover_item *item; 1972 struct xlog_recover_item *next; 1973 LIST_HEAD (ra_list); 1974 LIST_HEAD (done_list); 1975 1976 #define XLOG_RECOVER_COMMIT_QUEUE_MAX 100 1977 1978 hlist_del_init(&trans->r_list); 1979 1980 error = xlog_recover_reorder_trans(log, trans, pass); 1981 if (error) 1982 return error; 1983 1984 list_for_each_entry_safe(item, next, &trans->r_itemq, ri_list) { 1985 trace_xfs_log_recover_item_recover(log, trans, item, pass); 1986 1987 switch (pass) { 1988 case XLOG_RECOVER_PASS1: 1989 if (item->ri_ops->commit_pass1) 1990 error = item->ri_ops->commit_pass1(log, item); 1991 break; 1992 case XLOG_RECOVER_PASS2: 1993 if (item->ri_ops->ra_pass2) 1994 item->ri_ops->ra_pass2(log, item); 1995 list_move_tail(&item->ri_list, &ra_list); 1996 items_queued++; 1997 if (items_queued >= XLOG_RECOVER_COMMIT_QUEUE_MAX) { 1998 error = xlog_recover_items_pass2(log, trans, 1999 buffer_list, &ra_list); 2000 list_splice_tail_init(&ra_list, &done_list); 2001 items_queued = 0; 2002 } 2003 2004 break; 2005 default: 2006 ASSERT(0); 2007 } 2008 2009 if (error) 2010 goto out; 2011 } 2012 2013 out: 2014 if (!list_empty(&ra_list)) { 2015 if (!error) 2016 error = xlog_recover_items_pass2(log, trans, 2017 buffer_list, &ra_list); 2018 list_splice_tail_init(&ra_list, &done_list); 2019 } 2020 2021 if (!list_empty(&done_list)) 2022 list_splice_init(&done_list, &trans->r_itemq); 2023 2024 return error; 2025 } 2026 2027 STATIC void 2028 xlog_recover_add_item( 2029 struct list_head *head) 2030 { 2031 struct xlog_recover_item *item; 2032 2033 item = kmem_zalloc(sizeof(struct xlog_recover_item), 0); 2034 INIT_LIST_HEAD(&item->ri_list); 2035 list_add_tail(&item->ri_list, head); 2036 } 2037 2038 STATIC int 2039 xlog_recover_add_to_cont_trans( 2040 struct xlog *log, 2041 struct xlog_recover *trans, 2042 char *dp, 2043 int len) 2044 { 2045 struct xlog_recover_item *item; 2046 char *ptr, *old_ptr; 2047 int old_len; 2048 2049 /* 2050 * If the transaction is empty, the header was split across this and the 2051 * previous record. Copy the rest of the header. 2052 */ 2053 if (list_empty(&trans->r_itemq)) { 2054 ASSERT(len <= sizeof(struct xfs_trans_header)); 2055 if (len > sizeof(struct xfs_trans_header)) { 2056 xfs_warn(log->l_mp, "%s: bad header length", __func__); 2057 return -EFSCORRUPTED; 2058 } 2059 2060 xlog_recover_add_item(&trans->r_itemq); 2061 ptr = (char *)&trans->r_theader + 2062 sizeof(struct xfs_trans_header) - len; 2063 memcpy(ptr, dp, len); 2064 return 0; 2065 } 2066 2067 /* take the tail entry */ 2068 item = list_entry(trans->r_itemq.prev, struct xlog_recover_item, 2069 ri_list); 2070 2071 old_ptr = item->ri_buf[item->ri_cnt-1].i_addr; 2072 old_len = item->ri_buf[item->ri_cnt-1].i_len; 2073 2074 ptr = krealloc(old_ptr, len + old_len, GFP_KERNEL | __GFP_NOFAIL); 2075 memcpy(&ptr[old_len], dp, len); 2076 item->ri_buf[item->ri_cnt-1].i_len += len; 2077 item->ri_buf[item->ri_cnt-1].i_addr = ptr; 2078 trace_xfs_log_recover_item_add_cont(log, trans, item, 0); 2079 return 0; 2080 } 2081 2082 /* 2083 * The next region to add is the start of a new region. It could be 2084 * a whole region or it could be the first part of a new region. Because 2085 * of this, the assumption here is that the type and size fields of all 2086 * format structures fit into the first 32 bits of the structure. 2087 * 2088 * This works because all regions must be 32 bit aligned. Therefore, we 2089 * either have both fields or we have neither field. In the case we have 2090 * neither field, the data part of the region is zero length. We only have 2091 * a log_op_header and can throw away the header since a new one will appear 2092 * later. If we have at least 4 bytes, then we can determine how many regions 2093 * will appear in the current log item. 2094 */ 2095 STATIC int 2096 xlog_recover_add_to_trans( 2097 struct xlog *log, 2098 struct xlog_recover *trans, 2099 char *dp, 2100 int len) 2101 { 2102 struct xfs_inode_log_format *in_f; /* any will do */ 2103 struct xlog_recover_item *item; 2104 char *ptr; 2105 2106 if (!len) 2107 return 0; 2108 if (list_empty(&trans->r_itemq)) { 2109 /* we need to catch log corruptions here */ 2110 if (*(uint *)dp != XFS_TRANS_HEADER_MAGIC) { 2111 xfs_warn(log->l_mp, "%s: bad header magic number", 2112 __func__); 2113 ASSERT(0); 2114 return -EFSCORRUPTED; 2115 } 2116 2117 if (len > sizeof(struct xfs_trans_header)) { 2118 xfs_warn(log->l_mp, "%s: bad header length", __func__); 2119 ASSERT(0); 2120 return -EFSCORRUPTED; 2121 } 2122 2123 /* 2124 * The transaction header can be arbitrarily split across op 2125 * records. If we don't have the whole thing here, copy what we 2126 * do have and handle the rest in the next record. 2127 */ 2128 if (len == sizeof(struct xfs_trans_header)) 2129 xlog_recover_add_item(&trans->r_itemq); 2130 memcpy(&trans->r_theader, dp, len); 2131 return 0; 2132 } 2133 2134 ptr = kmem_alloc(len, 0); 2135 memcpy(ptr, dp, len); 2136 in_f = (struct xfs_inode_log_format *)ptr; 2137 2138 /* take the tail entry */ 2139 item = list_entry(trans->r_itemq.prev, struct xlog_recover_item, 2140 ri_list); 2141 if (item->ri_total != 0 && 2142 item->ri_total == item->ri_cnt) { 2143 /* tail item is in use, get a new one */ 2144 xlog_recover_add_item(&trans->r_itemq); 2145 item = list_entry(trans->r_itemq.prev, 2146 struct xlog_recover_item, ri_list); 2147 } 2148 2149 if (item->ri_total == 0) { /* first region to be added */ 2150 if (in_f->ilf_size == 0 || 2151 in_f->ilf_size > XLOG_MAX_REGIONS_IN_ITEM) { 2152 xfs_warn(log->l_mp, 2153 "bad number of regions (%d) in inode log format", 2154 in_f->ilf_size); 2155 ASSERT(0); 2156 kmem_free(ptr); 2157 return -EFSCORRUPTED; 2158 } 2159 2160 item->ri_total = in_f->ilf_size; 2161 item->ri_buf = 2162 kmem_zalloc(item->ri_total * sizeof(xfs_log_iovec_t), 2163 0); 2164 } 2165 2166 if (item->ri_total <= item->ri_cnt) { 2167 xfs_warn(log->l_mp, 2168 "log item region count (%d) overflowed size (%d)", 2169 item->ri_cnt, item->ri_total); 2170 ASSERT(0); 2171 kmem_free(ptr); 2172 return -EFSCORRUPTED; 2173 } 2174 2175 /* Description region is ri_buf[0] */ 2176 item->ri_buf[item->ri_cnt].i_addr = ptr; 2177 item->ri_buf[item->ri_cnt].i_len = len; 2178 item->ri_cnt++; 2179 trace_xfs_log_recover_item_add(log, trans, item, 0); 2180 return 0; 2181 } 2182 2183 /* 2184 * Free up any resources allocated by the transaction 2185 * 2186 * Remember that EFIs, EFDs, and IUNLINKs are handled later. 2187 */ 2188 STATIC void 2189 xlog_recover_free_trans( 2190 struct xlog_recover *trans) 2191 { 2192 struct xlog_recover_item *item, *n; 2193 int i; 2194 2195 hlist_del_init(&trans->r_list); 2196 2197 list_for_each_entry_safe(item, n, &trans->r_itemq, ri_list) { 2198 /* Free the regions in the item. */ 2199 list_del(&item->ri_list); 2200 for (i = 0; i < item->ri_cnt; i++) 2201 kmem_free(item->ri_buf[i].i_addr); 2202 /* Free the item itself */ 2203 kmem_free(item->ri_buf); 2204 kmem_free(item); 2205 } 2206 /* Free the transaction recover structure */ 2207 kmem_free(trans); 2208 } 2209 2210 /* 2211 * On error or completion, trans is freed. 2212 */ 2213 STATIC int 2214 xlog_recovery_process_trans( 2215 struct xlog *log, 2216 struct xlog_recover *trans, 2217 char *dp, 2218 unsigned int len, 2219 unsigned int flags, 2220 int pass, 2221 struct list_head *buffer_list) 2222 { 2223 int error = 0; 2224 bool freeit = false; 2225 2226 /* mask off ophdr transaction container flags */ 2227 flags &= ~XLOG_END_TRANS; 2228 if (flags & XLOG_WAS_CONT_TRANS) 2229 flags &= ~XLOG_CONTINUE_TRANS; 2230 2231 /* 2232 * Callees must not free the trans structure. We'll decide if we need to 2233 * free it or not based on the operation being done and it's result. 2234 */ 2235 switch (flags) { 2236 /* expected flag values */ 2237 case 0: 2238 case XLOG_CONTINUE_TRANS: 2239 error = xlog_recover_add_to_trans(log, trans, dp, len); 2240 break; 2241 case XLOG_WAS_CONT_TRANS: 2242 error = xlog_recover_add_to_cont_trans(log, trans, dp, len); 2243 break; 2244 case XLOG_COMMIT_TRANS: 2245 error = xlog_recover_commit_trans(log, trans, pass, 2246 buffer_list); 2247 /* success or fail, we are now done with this transaction. */ 2248 freeit = true; 2249 break; 2250 2251 /* unexpected flag values */ 2252 case XLOG_UNMOUNT_TRANS: 2253 /* just skip trans */ 2254 xfs_warn(log->l_mp, "%s: Unmount LR", __func__); 2255 freeit = true; 2256 break; 2257 case XLOG_START_TRANS: 2258 default: 2259 xfs_warn(log->l_mp, "%s: bad flag 0x%x", __func__, flags); 2260 ASSERT(0); 2261 error = -EFSCORRUPTED; 2262 break; 2263 } 2264 if (error || freeit) 2265 xlog_recover_free_trans(trans); 2266 return error; 2267 } 2268 2269 /* 2270 * Lookup the transaction recovery structure associated with the ID in the 2271 * current ophdr. If the transaction doesn't exist and the start flag is set in 2272 * the ophdr, then allocate a new transaction for future ID matches to find. 2273 * Either way, return what we found during the lookup - an existing transaction 2274 * or nothing. 2275 */ 2276 STATIC struct xlog_recover * 2277 xlog_recover_ophdr_to_trans( 2278 struct hlist_head rhash[], 2279 struct xlog_rec_header *rhead, 2280 struct xlog_op_header *ohead) 2281 { 2282 struct xlog_recover *trans; 2283 xlog_tid_t tid; 2284 struct hlist_head *rhp; 2285 2286 tid = be32_to_cpu(ohead->oh_tid); 2287 rhp = &rhash[XLOG_RHASH(tid)]; 2288 hlist_for_each_entry(trans, rhp, r_list) { 2289 if (trans->r_log_tid == tid) 2290 return trans; 2291 } 2292 2293 /* 2294 * skip over non-start transaction headers - we could be 2295 * processing slack space before the next transaction starts 2296 */ 2297 if (!(ohead->oh_flags & XLOG_START_TRANS)) 2298 return NULL; 2299 2300 ASSERT(be32_to_cpu(ohead->oh_len) == 0); 2301 2302 /* 2303 * This is a new transaction so allocate a new recovery container to 2304 * hold the recovery ops that will follow. 2305 */ 2306 trans = kmem_zalloc(sizeof(struct xlog_recover), 0); 2307 trans->r_log_tid = tid; 2308 trans->r_lsn = be64_to_cpu(rhead->h_lsn); 2309 INIT_LIST_HEAD(&trans->r_itemq); 2310 INIT_HLIST_NODE(&trans->r_list); 2311 hlist_add_head(&trans->r_list, rhp); 2312 2313 /* 2314 * Nothing more to do for this ophdr. Items to be added to this new 2315 * transaction will be in subsequent ophdr containers. 2316 */ 2317 return NULL; 2318 } 2319 2320 STATIC int 2321 xlog_recover_process_ophdr( 2322 struct xlog *log, 2323 struct hlist_head rhash[], 2324 struct xlog_rec_header *rhead, 2325 struct xlog_op_header *ohead, 2326 char *dp, 2327 char *end, 2328 int pass, 2329 struct list_head *buffer_list) 2330 { 2331 struct xlog_recover *trans; 2332 unsigned int len; 2333 int error; 2334 2335 /* Do we understand who wrote this op? */ 2336 if (ohead->oh_clientid != XFS_TRANSACTION && 2337 ohead->oh_clientid != XFS_LOG) { 2338 xfs_warn(log->l_mp, "%s: bad clientid 0x%x", 2339 __func__, ohead->oh_clientid); 2340 ASSERT(0); 2341 return -EFSCORRUPTED; 2342 } 2343 2344 /* 2345 * Check the ophdr contains all the data it is supposed to contain. 2346 */ 2347 len = be32_to_cpu(ohead->oh_len); 2348 if (dp + len > end) { 2349 xfs_warn(log->l_mp, "%s: bad length 0x%x", __func__, len); 2350 WARN_ON(1); 2351 return -EFSCORRUPTED; 2352 } 2353 2354 trans = xlog_recover_ophdr_to_trans(rhash, rhead, ohead); 2355 if (!trans) { 2356 /* nothing to do, so skip over this ophdr */ 2357 return 0; 2358 } 2359 2360 /* 2361 * The recovered buffer queue is drained only once we know that all 2362 * recovery items for the current LSN have been processed. This is 2363 * required because: 2364 * 2365 * - Buffer write submission updates the metadata LSN of the buffer. 2366 * - Log recovery skips items with a metadata LSN >= the current LSN of 2367 * the recovery item. 2368 * - Separate recovery items against the same metadata buffer can share 2369 * a current LSN. I.e., consider that the LSN of a recovery item is 2370 * defined as the starting LSN of the first record in which its 2371 * transaction appears, that a record can hold multiple transactions, 2372 * and/or that a transaction can span multiple records. 2373 * 2374 * In other words, we are allowed to submit a buffer from log recovery 2375 * once per current LSN. Otherwise, we may incorrectly skip recovery 2376 * items and cause corruption. 2377 * 2378 * We don't know up front whether buffers are updated multiple times per 2379 * LSN. Therefore, track the current LSN of each commit log record as it 2380 * is processed and drain the queue when it changes. Use commit records 2381 * because they are ordered correctly by the logging code. 2382 */ 2383 if (log->l_recovery_lsn != trans->r_lsn && 2384 ohead->oh_flags & XLOG_COMMIT_TRANS) { 2385 error = xfs_buf_delwri_submit(buffer_list); 2386 if (error) 2387 return error; 2388 log->l_recovery_lsn = trans->r_lsn; 2389 } 2390 2391 return xlog_recovery_process_trans(log, trans, dp, len, 2392 ohead->oh_flags, pass, buffer_list); 2393 } 2394 2395 /* 2396 * There are two valid states of the r_state field. 0 indicates that the 2397 * transaction structure is in a normal state. We have either seen the 2398 * start of the transaction or the last operation we added was not a partial 2399 * operation. If the last operation we added to the transaction was a 2400 * partial operation, we need to mark r_state with XLOG_WAS_CONT_TRANS. 2401 * 2402 * NOTE: skip LRs with 0 data length. 2403 */ 2404 STATIC int 2405 xlog_recover_process_data( 2406 struct xlog *log, 2407 struct hlist_head rhash[], 2408 struct xlog_rec_header *rhead, 2409 char *dp, 2410 int pass, 2411 struct list_head *buffer_list) 2412 { 2413 struct xlog_op_header *ohead; 2414 char *end; 2415 int num_logops; 2416 int error; 2417 2418 end = dp + be32_to_cpu(rhead->h_len); 2419 num_logops = be32_to_cpu(rhead->h_num_logops); 2420 2421 /* check the log format matches our own - else we can't recover */ 2422 if (xlog_header_check_recover(log->l_mp, rhead)) 2423 return -EIO; 2424 2425 trace_xfs_log_recover_record(log, rhead, pass); 2426 while ((dp < end) && num_logops) { 2427 2428 ohead = (struct xlog_op_header *)dp; 2429 dp += sizeof(*ohead); 2430 ASSERT(dp <= end); 2431 2432 /* errors will abort recovery */ 2433 error = xlog_recover_process_ophdr(log, rhash, rhead, ohead, 2434 dp, end, pass, buffer_list); 2435 if (error) 2436 return error; 2437 2438 dp += be32_to_cpu(ohead->oh_len); 2439 num_logops--; 2440 } 2441 return 0; 2442 } 2443 2444 /* Take all the collected deferred ops and finish them in order. */ 2445 static int 2446 xlog_finish_defer_ops( 2447 struct xfs_trans *parent_tp) 2448 { 2449 struct xfs_mount *mp = parent_tp->t_mountp; 2450 struct xfs_trans *tp; 2451 int64_t freeblks; 2452 uint resblks; 2453 int error; 2454 2455 /* 2456 * We're finishing the defer_ops that accumulated as a result of 2457 * recovering unfinished intent items during log recovery. We 2458 * reserve an itruncate transaction because it is the largest 2459 * permanent transaction type. Since we're the only user of the fs 2460 * right now, take 93% (15/16) of the available free blocks. Use 2461 * weird math to avoid a 64-bit division. 2462 */ 2463 freeblks = percpu_counter_sum(&mp->m_fdblocks); 2464 if (freeblks <= 0) 2465 return -ENOSPC; 2466 resblks = min_t(int64_t, UINT_MAX, freeblks); 2467 resblks = (resblks * 15) >> 4; 2468 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, resblks, 2469 0, XFS_TRANS_RESERVE, &tp); 2470 if (error) 2471 return error; 2472 /* transfer all collected dfops to this transaction */ 2473 xfs_defer_move(tp, parent_tp); 2474 2475 return xfs_trans_commit(tp); 2476 } 2477 2478 /* Is this log item a deferred action intent? */ 2479 static inline bool xlog_item_is_intent(struct xfs_log_item *lip) 2480 { 2481 return lip->li_ops->iop_recover != NULL && 2482 lip->li_ops->iop_match != NULL; 2483 } 2484 2485 /* 2486 * When this is called, all of the log intent items which did not have 2487 * corresponding log done items should be in the AIL. What we do now 2488 * is update the data structures associated with each one. 2489 * 2490 * Since we process the log intent items in normal transactions, they 2491 * will be removed at some point after the commit. This prevents us 2492 * from just walking down the list processing each one. We'll use a 2493 * flag in the intent item to skip those that we've already processed 2494 * and use the AIL iteration mechanism's generation count to try to 2495 * speed this up at least a bit. 2496 * 2497 * When we start, we know that the intents are the only things in the 2498 * AIL. As we process them, however, other items are added to the 2499 * AIL. 2500 */ 2501 STATIC int 2502 xlog_recover_process_intents( 2503 struct xlog *log) 2504 { 2505 struct xfs_trans *parent_tp; 2506 struct xfs_ail_cursor cur; 2507 struct xfs_log_item *lip; 2508 struct xfs_ail *ailp; 2509 int error; 2510 #if defined(DEBUG) || defined(XFS_WARN) 2511 xfs_lsn_t last_lsn; 2512 #endif 2513 2514 /* 2515 * The intent recovery handlers commit transactions to complete recovery 2516 * for individual intents, but any new deferred operations that are 2517 * queued during that process are held off until the very end. The 2518 * purpose of this transaction is to serve as a container for deferred 2519 * operations. Each intent recovery handler must transfer dfops here 2520 * before its local transaction commits, and we'll finish the entire 2521 * list below. 2522 */ 2523 error = xfs_trans_alloc_empty(log->l_mp, &parent_tp); 2524 if (error) 2525 return error; 2526 2527 ailp = log->l_ailp; 2528 spin_lock(&ailp->ail_lock); 2529 lip = xfs_trans_ail_cursor_first(ailp, &cur, 0); 2530 #if defined(DEBUG) || defined(XFS_WARN) 2531 last_lsn = xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block); 2532 #endif 2533 while (lip != NULL) { 2534 /* 2535 * We're done when we see something other than an intent. 2536 * There should be no intents left in the AIL now. 2537 */ 2538 if (!xlog_item_is_intent(lip)) { 2539 #ifdef DEBUG 2540 for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur)) 2541 ASSERT(!xlog_item_is_intent(lip)); 2542 #endif 2543 break; 2544 } 2545 2546 /* 2547 * We should never see a redo item with a LSN higher than 2548 * the last transaction we found in the log at the start 2549 * of recovery. 2550 */ 2551 ASSERT(XFS_LSN_CMP(last_lsn, lip->li_lsn) >= 0); 2552 2553 /* 2554 * NOTE: If your intent processing routine can create more 2555 * deferred ops, you /must/ attach them to the transaction in 2556 * this routine or else those subsequent intents will get 2557 * replayed in the wrong order! 2558 */ 2559 if (!test_and_set_bit(XFS_LI_RECOVERED, &lip->li_flags)) { 2560 spin_unlock(&ailp->ail_lock); 2561 error = lip->li_ops->iop_recover(lip, parent_tp); 2562 spin_lock(&ailp->ail_lock); 2563 } 2564 if (error) 2565 goto out; 2566 lip = xfs_trans_ail_cursor_next(ailp, &cur); 2567 } 2568 out: 2569 xfs_trans_ail_cursor_done(&cur); 2570 spin_unlock(&ailp->ail_lock); 2571 if (!error) 2572 error = xlog_finish_defer_ops(parent_tp); 2573 xfs_trans_cancel(parent_tp); 2574 2575 return error; 2576 } 2577 2578 /* 2579 * A cancel occurs when the mount has failed and we're bailing out. 2580 * Release all pending log intent items so they don't pin the AIL. 2581 */ 2582 STATIC void 2583 xlog_recover_cancel_intents( 2584 struct xlog *log) 2585 { 2586 struct xfs_log_item *lip; 2587 struct xfs_ail_cursor cur; 2588 struct xfs_ail *ailp; 2589 2590 ailp = log->l_ailp; 2591 spin_lock(&ailp->ail_lock); 2592 lip = xfs_trans_ail_cursor_first(ailp, &cur, 0); 2593 while (lip != NULL) { 2594 /* 2595 * We're done when we see something other than an intent. 2596 * There should be no intents left in the AIL now. 2597 */ 2598 if (!xlog_item_is_intent(lip)) { 2599 #ifdef DEBUG 2600 for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur)) 2601 ASSERT(!xlog_item_is_intent(lip)); 2602 #endif 2603 break; 2604 } 2605 2606 spin_unlock(&ailp->ail_lock); 2607 lip->li_ops->iop_release(lip); 2608 spin_lock(&ailp->ail_lock); 2609 lip = xfs_trans_ail_cursor_next(ailp, &cur); 2610 } 2611 2612 xfs_trans_ail_cursor_done(&cur); 2613 spin_unlock(&ailp->ail_lock); 2614 } 2615 2616 /* 2617 * This routine performs a transaction to null out a bad inode pointer 2618 * in an agi unlinked inode hash bucket. 2619 */ 2620 STATIC void 2621 xlog_recover_clear_agi_bucket( 2622 xfs_mount_t *mp, 2623 xfs_agnumber_t agno, 2624 int bucket) 2625 { 2626 xfs_trans_t *tp; 2627 xfs_agi_t *agi; 2628 xfs_buf_t *agibp; 2629 int offset; 2630 int error; 2631 2632 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_clearagi, 0, 0, 0, &tp); 2633 if (error) 2634 goto out_error; 2635 2636 error = xfs_read_agi(mp, tp, agno, &agibp); 2637 if (error) 2638 goto out_abort; 2639 2640 agi = agibp->b_addr; 2641 agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO); 2642 offset = offsetof(xfs_agi_t, agi_unlinked) + 2643 (sizeof(xfs_agino_t) * bucket); 2644 xfs_trans_log_buf(tp, agibp, offset, 2645 (offset + sizeof(xfs_agino_t) - 1)); 2646 2647 error = xfs_trans_commit(tp); 2648 if (error) 2649 goto out_error; 2650 return; 2651 2652 out_abort: 2653 xfs_trans_cancel(tp); 2654 out_error: 2655 xfs_warn(mp, "%s: failed to clear agi %d. Continuing.", __func__, agno); 2656 return; 2657 } 2658 2659 STATIC xfs_agino_t 2660 xlog_recover_process_one_iunlink( 2661 struct xfs_mount *mp, 2662 xfs_agnumber_t agno, 2663 xfs_agino_t agino, 2664 int bucket) 2665 { 2666 struct xfs_buf *ibp; 2667 struct xfs_dinode *dip; 2668 struct xfs_inode *ip; 2669 xfs_ino_t ino; 2670 int error; 2671 2672 ino = XFS_AGINO_TO_INO(mp, agno, agino); 2673 error = xfs_iget(mp, NULL, ino, 0, 0, &ip); 2674 if (error) 2675 goto fail; 2676 2677 /* 2678 * Get the on disk inode to find the next inode in the bucket. 2679 */ 2680 error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &ibp, 0); 2681 if (error) 2682 goto fail_iput; 2683 2684 xfs_iflags_clear(ip, XFS_IRECOVERY); 2685 ASSERT(VFS_I(ip)->i_nlink == 0); 2686 ASSERT(VFS_I(ip)->i_mode != 0); 2687 2688 /* setup for the next pass */ 2689 agino = be32_to_cpu(dip->di_next_unlinked); 2690 xfs_buf_relse(ibp); 2691 2692 /* 2693 * Prevent any DMAPI event from being sent when the reference on 2694 * the inode is dropped. 2695 */ 2696 ip->i_d.di_dmevmask = 0; 2697 2698 xfs_irele(ip); 2699 return agino; 2700 2701 fail_iput: 2702 xfs_irele(ip); 2703 fail: 2704 /* 2705 * We can't read in the inode this bucket points to, or this inode 2706 * is messed up. Just ditch this bucket of inodes. We will lose 2707 * some inodes and space, but at least we won't hang. 2708 * 2709 * Call xlog_recover_clear_agi_bucket() to perform a transaction to 2710 * clear the inode pointer in the bucket. 2711 */ 2712 xlog_recover_clear_agi_bucket(mp, agno, bucket); 2713 return NULLAGINO; 2714 } 2715 2716 /* 2717 * Recover AGI unlinked lists 2718 * 2719 * This is called during recovery to process any inodes which we unlinked but 2720 * not freed when the system crashed. These inodes will be on the lists in the 2721 * AGI blocks. What we do here is scan all the AGIs and fully truncate and free 2722 * any inodes found on the lists. Each inode is removed from the lists when it 2723 * has been fully truncated and is freed. The freeing of the inode and its 2724 * removal from the list must be atomic. 2725 * 2726 * If everything we touch in the agi processing loop is already in memory, this 2727 * loop can hold the cpu for a long time. It runs without lock contention, 2728 * memory allocation contention, the need wait for IO, etc, and so will run 2729 * until we either run out of inodes to process, run low on memory or we run out 2730 * of log space. 2731 * 2732 * This behaviour is bad for latency on single CPU and non-preemptible kernels, 2733 * and can prevent other filesytem work (such as CIL pushes) from running. This 2734 * can lead to deadlocks if the recovery process runs out of log reservation 2735 * space. Hence we need to yield the CPU when there is other kernel work 2736 * scheduled on this CPU to ensure other scheduled work can run without undue 2737 * latency. 2738 */ 2739 STATIC void 2740 xlog_recover_process_iunlinks( 2741 struct xlog *log) 2742 { 2743 xfs_mount_t *mp; 2744 xfs_agnumber_t agno; 2745 xfs_agi_t *agi; 2746 xfs_buf_t *agibp; 2747 xfs_agino_t agino; 2748 int bucket; 2749 int error; 2750 2751 mp = log->l_mp; 2752 2753 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) { 2754 /* 2755 * Find the agi for this ag. 2756 */ 2757 error = xfs_read_agi(mp, NULL, agno, &agibp); 2758 if (error) { 2759 /* 2760 * AGI is b0rked. Don't process it. 2761 * 2762 * We should probably mark the filesystem as corrupt 2763 * after we've recovered all the ag's we can.... 2764 */ 2765 continue; 2766 } 2767 /* 2768 * Unlock the buffer so that it can be acquired in the normal 2769 * course of the transaction to truncate and free each inode. 2770 * Because we are not racing with anyone else here for the AGI 2771 * buffer, we don't even need to hold it locked to read the 2772 * initial unlinked bucket entries out of the buffer. We keep 2773 * buffer reference though, so that it stays pinned in memory 2774 * while we need the buffer. 2775 */ 2776 agi = agibp->b_addr; 2777 xfs_buf_unlock(agibp); 2778 2779 for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) { 2780 agino = be32_to_cpu(agi->agi_unlinked[bucket]); 2781 while (agino != NULLAGINO) { 2782 agino = xlog_recover_process_one_iunlink(mp, 2783 agno, agino, bucket); 2784 cond_resched(); 2785 } 2786 } 2787 xfs_buf_rele(agibp); 2788 } 2789 } 2790 2791 STATIC void 2792 xlog_unpack_data( 2793 struct xlog_rec_header *rhead, 2794 char *dp, 2795 struct xlog *log) 2796 { 2797 int i, j, k; 2798 2799 for (i = 0; i < BTOBB(be32_to_cpu(rhead->h_len)) && 2800 i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) { 2801 *(__be32 *)dp = *(__be32 *)&rhead->h_cycle_data[i]; 2802 dp += BBSIZE; 2803 } 2804 2805 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) { 2806 xlog_in_core_2_t *xhdr = (xlog_in_core_2_t *)rhead; 2807 for ( ; i < BTOBB(be32_to_cpu(rhead->h_len)); i++) { 2808 j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE); 2809 k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE); 2810 *(__be32 *)dp = xhdr[j].hic_xheader.xh_cycle_data[k]; 2811 dp += BBSIZE; 2812 } 2813 } 2814 } 2815 2816 /* 2817 * CRC check, unpack and process a log record. 2818 */ 2819 STATIC int 2820 xlog_recover_process( 2821 struct xlog *log, 2822 struct hlist_head rhash[], 2823 struct xlog_rec_header *rhead, 2824 char *dp, 2825 int pass, 2826 struct list_head *buffer_list) 2827 { 2828 __le32 old_crc = rhead->h_crc; 2829 __le32 crc; 2830 2831 crc = xlog_cksum(log, rhead, dp, be32_to_cpu(rhead->h_len)); 2832 2833 /* 2834 * Nothing else to do if this is a CRC verification pass. Just return 2835 * if this a record with a non-zero crc. Unfortunately, mkfs always 2836 * sets old_crc to 0 so we must consider this valid even on v5 supers. 2837 * Otherwise, return EFSBADCRC on failure so the callers up the stack 2838 * know precisely what failed. 2839 */ 2840 if (pass == XLOG_RECOVER_CRCPASS) { 2841 if (old_crc && crc != old_crc) 2842 return -EFSBADCRC; 2843 return 0; 2844 } 2845 2846 /* 2847 * We're in the normal recovery path. Issue a warning if and only if the 2848 * CRC in the header is non-zero. This is an advisory warning and the 2849 * zero CRC check prevents warnings from being emitted when upgrading 2850 * the kernel from one that does not add CRCs by default. 2851 */ 2852 if (crc != old_crc) { 2853 if (old_crc || xfs_sb_version_hascrc(&log->l_mp->m_sb)) { 2854 xfs_alert(log->l_mp, 2855 "log record CRC mismatch: found 0x%x, expected 0x%x.", 2856 le32_to_cpu(old_crc), 2857 le32_to_cpu(crc)); 2858 xfs_hex_dump(dp, 32); 2859 } 2860 2861 /* 2862 * If the filesystem is CRC enabled, this mismatch becomes a 2863 * fatal log corruption failure. 2864 */ 2865 if (xfs_sb_version_hascrc(&log->l_mp->m_sb)) { 2866 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, log->l_mp); 2867 return -EFSCORRUPTED; 2868 } 2869 } 2870 2871 xlog_unpack_data(rhead, dp, log); 2872 2873 return xlog_recover_process_data(log, rhash, rhead, dp, pass, 2874 buffer_list); 2875 } 2876 2877 STATIC int 2878 xlog_valid_rec_header( 2879 struct xlog *log, 2880 struct xlog_rec_header *rhead, 2881 xfs_daddr_t blkno) 2882 { 2883 int hlen; 2884 2885 if (XFS_IS_CORRUPT(log->l_mp, 2886 rhead->h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM))) 2887 return -EFSCORRUPTED; 2888 if (XFS_IS_CORRUPT(log->l_mp, 2889 (!rhead->h_version || 2890 (be32_to_cpu(rhead->h_version) & 2891 (~XLOG_VERSION_OKBITS))))) { 2892 xfs_warn(log->l_mp, "%s: unrecognised log version (%d).", 2893 __func__, be32_to_cpu(rhead->h_version)); 2894 return -EFSCORRUPTED; 2895 } 2896 2897 /* LR body must have data or it wouldn't have been written */ 2898 hlen = be32_to_cpu(rhead->h_len); 2899 if (XFS_IS_CORRUPT(log->l_mp, hlen <= 0 || hlen > INT_MAX)) 2900 return -EFSCORRUPTED; 2901 if (XFS_IS_CORRUPT(log->l_mp, 2902 blkno > log->l_logBBsize || blkno > INT_MAX)) 2903 return -EFSCORRUPTED; 2904 return 0; 2905 } 2906 2907 /* 2908 * Read the log from tail to head and process the log records found. 2909 * Handle the two cases where the tail and head are in the same cycle 2910 * and where the active portion of the log wraps around the end of 2911 * the physical log separately. The pass parameter is passed through 2912 * to the routines called to process the data and is not looked at 2913 * here. 2914 */ 2915 STATIC int 2916 xlog_do_recovery_pass( 2917 struct xlog *log, 2918 xfs_daddr_t head_blk, 2919 xfs_daddr_t tail_blk, 2920 int pass, 2921 xfs_daddr_t *first_bad) /* out: first bad log rec */ 2922 { 2923 xlog_rec_header_t *rhead; 2924 xfs_daddr_t blk_no, rblk_no; 2925 xfs_daddr_t rhead_blk; 2926 char *offset; 2927 char *hbp, *dbp; 2928 int error = 0, h_size, h_len; 2929 int error2 = 0; 2930 int bblks, split_bblks; 2931 int hblks, split_hblks, wrapped_hblks; 2932 int i; 2933 struct hlist_head rhash[XLOG_RHASH_SIZE]; 2934 LIST_HEAD (buffer_list); 2935 2936 ASSERT(head_blk != tail_blk); 2937 blk_no = rhead_blk = tail_blk; 2938 2939 for (i = 0; i < XLOG_RHASH_SIZE; i++) 2940 INIT_HLIST_HEAD(&rhash[i]); 2941 2942 /* 2943 * Read the header of the tail block and get the iclog buffer size from 2944 * h_size. Use this to tell how many sectors make up the log header. 2945 */ 2946 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) { 2947 /* 2948 * When using variable length iclogs, read first sector of 2949 * iclog header and extract the header size from it. Get a 2950 * new hbp that is the correct size. 2951 */ 2952 hbp = xlog_alloc_buffer(log, 1); 2953 if (!hbp) 2954 return -ENOMEM; 2955 2956 error = xlog_bread(log, tail_blk, 1, hbp, &offset); 2957 if (error) 2958 goto bread_err1; 2959 2960 rhead = (xlog_rec_header_t *)offset; 2961 error = xlog_valid_rec_header(log, rhead, tail_blk); 2962 if (error) 2963 goto bread_err1; 2964 2965 /* 2966 * xfsprogs has a bug where record length is based on lsunit but 2967 * h_size (iclog size) is hardcoded to 32k. Now that we 2968 * unconditionally CRC verify the unmount record, this means the 2969 * log buffer can be too small for the record and cause an 2970 * overrun. 2971 * 2972 * Detect this condition here. Use lsunit for the buffer size as 2973 * long as this looks like the mkfs case. Otherwise, return an 2974 * error to avoid a buffer overrun. 2975 */ 2976 h_size = be32_to_cpu(rhead->h_size); 2977 h_len = be32_to_cpu(rhead->h_len); 2978 if (h_len > h_size) { 2979 if (h_len <= log->l_mp->m_logbsize && 2980 be32_to_cpu(rhead->h_num_logops) == 1) { 2981 xfs_warn(log->l_mp, 2982 "invalid iclog size (%d bytes), using lsunit (%d bytes)", 2983 h_size, log->l_mp->m_logbsize); 2984 h_size = log->l_mp->m_logbsize; 2985 } else { 2986 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, 2987 log->l_mp); 2988 error = -EFSCORRUPTED; 2989 goto bread_err1; 2990 } 2991 } 2992 2993 if ((be32_to_cpu(rhead->h_version) & XLOG_VERSION_2) && 2994 (h_size > XLOG_HEADER_CYCLE_SIZE)) { 2995 hblks = h_size / XLOG_HEADER_CYCLE_SIZE; 2996 if (h_size % XLOG_HEADER_CYCLE_SIZE) 2997 hblks++; 2998 kmem_free(hbp); 2999 hbp = xlog_alloc_buffer(log, hblks); 3000 } else { 3001 hblks = 1; 3002 } 3003 } else { 3004 ASSERT(log->l_sectBBsize == 1); 3005 hblks = 1; 3006 hbp = xlog_alloc_buffer(log, 1); 3007 h_size = XLOG_BIG_RECORD_BSIZE; 3008 } 3009 3010 if (!hbp) 3011 return -ENOMEM; 3012 dbp = xlog_alloc_buffer(log, BTOBB(h_size)); 3013 if (!dbp) { 3014 kmem_free(hbp); 3015 return -ENOMEM; 3016 } 3017 3018 memset(rhash, 0, sizeof(rhash)); 3019 if (tail_blk > head_blk) { 3020 /* 3021 * Perform recovery around the end of the physical log. 3022 * When the head is not on the same cycle number as the tail, 3023 * we can't do a sequential recovery. 3024 */ 3025 while (blk_no < log->l_logBBsize) { 3026 /* 3027 * Check for header wrapping around physical end-of-log 3028 */ 3029 offset = hbp; 3030 split_hblks = 0; 3031 wrapped_hblks = 0; 3032 if (blk_no + hblks <= log->l_logBBsize) { 3033 /* Read header in one read */ 3034 error = xlog_bread(log, blk_no, hblks, hbp, 3035 &offset); 3036 if (error) 3037 goto bread_err2; 3038 } else { 3039 /* This LR is split across physical log end */ 3040 if (blk_no != log->l_logBBsize) { 3041 /* some data before physical log end */ 3042 ASSERT(blk_no <= INT_MAX); 3043 split_hblks = log->l_logBBsize - (int)blk_no; 3044 ASSERT(split_hblks > 0); 3045 error = xlog_bread(log, blk_no, 3046 split_hblks, hbp, 3047 &offset); 3048 if (error) 3049 goto bread_err2; 3050 } 3051 3052 /* 3053 * Note: this black magic still works with 3054 * large sector sizes (non-512) only because: 3055 * - we increased the buffer size originally 3056 * by 1 sector giving us enough extra space 3057 * for the second read; 3058 * - the log start is guaranteed to be sector 3059 * aligned; 3060 * - we read the log end (LR header start) 3061 * _first_, then the log start (LR header end) 3062 * - order is important. 3063 */ 3064 wrapped_hblks = hblks - split_hblks; 3065 error = xlog_bread_noalign(log, 0, 3066 wrapped_hblks, 3067 offset + BBTOB(split_hblks)); 3068 if (error) 3069 goto bread_err2; 3070 } 3071 rhead = (xlog_rec_header_t *)offset; 3072 error = xlog_valid_rec_header(log, rhead, 3073 split_hblks ? blk_no : 0); 3074 if (error) 3075 goto bread_err2; 3076 3077 bblks = (int)BTOBB(be32_to_cpu(rhead->h_len)); 3078 blk_no += hblks; 3079 3080 /* 3081 * Read the log record data in multiple reads if it 3082 * wraps around the end of the log. Note that if the 3083 * header already wrapped, blk_no could point past the 3084 * end of the log. The record data is contiguous in 3085 * that case. 3086 */ 3087 if (blk_no + bblks <= log->l_logBBsize || 3088 blk_no >= log->l_logBBsize) { 3089 rblk_no = xlog_wrap_logbno(log, blk_no); 3090 error = xlog_bread(log, rblk_no, bblks, dbp, 3091 &offset); 3092 if (error) 3093 goto bread_err2; 3094 } else { 3095 /* This log record is split across the 3096 * physical end of log */ 3097 offset = dbp; 3098 split_bblks = 0; 3099 if (blk_no != log->l_logBBsize) { 3100 /* some data is before the physical 3101 * end of log */ 3102 ASSERT(!wrapped_hblks); 3103 ASSERT(blk_no <= INT_MAX); 3104 split_bblks = 3105 log->l_logBBsize - (int)blk_no; 3106 ASSERT(split_bblks > 0); 3107 error = xlog_bread(log, blk_no, 3108 split_bblks, dbp, 3109 &offset); 3110 if (error) 3111 goto bread_err2; 3112 } 3113 3114 /* 3115 * Note: this black magic still works with 3116 * large sector sizes (non-512) only because: 3117 * - we increased the buffer size originally 3118 * by 1 sector giving us enough extra space 3119 * for the second read; 3120 * - the log start is guaranteed to be sector 3121 * aligned; 3122 * - we read the log end (LR header start) 3123 * _first_, then the log start (LR header end) 3124 * - order is important. 3125 */ 3126 error = xlog_bread_noalign(log, 0, 3127 bblks - split_bblks, 3128 offset + BBTOB(split_bblks)); 3129 if (error) 3130 goto bread_err2; 3131 } 3132 3133 error = xlog_recover_process(log, rhash, rhead, offset, 3134 pass, &buffer_list); 3135 if (error) 3136 goto bread_err2; 3137 3138 blk_no += bblks; 3139 rhead_blk = blk_no; 3140 } 3141 3142 ASSERT(blk_no >= log->l_logBBsize); 3143 blk_no -= log->l_logBBsize; 3144 rhead_blk = blk_no; 3145 } 3146 3147 /* read first part of physical log */ 3148 while (blk_no < head_blk) { 3149 error = xlog_bread(log, blk_no, hblks, hbp, &offset); 3150 if (error) 3151 goto bread_err2; 3152 3153 rhead = (xlog_rec_header_t *)offset; 3154 error = xlog_valid_rec_header(log, rhead, blk_no); 3155 if (error) 3156 goto bread_err2; 3157 3158 /* blocks in data section */ 3159 bblks = (int)BTOBB(be32_to_cpu(rhead->h_len)); 3160 error = xlog_bread(log, blk_no+hblks, bblks, dbp, 3161 &offset); 3162 if (error) 3163 goto bread_err2; 3164 3165 error = xlog_recover_process(log, rhash, rhead, offset, pass, 3166 &buffer_list); 3167 if (error) 3168 goto bread_err2; 3169 3170 blk_no += bblks + hblks; 3171 rhead_blk = blk_no; 3172 } 3173 3174 bread_err2: 3175 kmem_free(dbp); 3176 bread_err1: 3177 kmem_free(hbp); 3178 3179 /* 3180 * Submit buffers that have been added from the last record processed, 3181 * regardless of error status. 3182 */ 3183 if (!list_empty(&buffer_list)) 3184 error2 = xfs_buf_delwri_submit(&buffer_list); 3185 3186 if (error && first_bad) 3187 *first_bad = rhead_blk; 3188 3189 /* 3190 * Transactions are freed at commit time but transactions without commit 3191 * records on disk are never committed. Free any that may be left in the 3192 * hash table. 3193 */ 3194 for (i = 0; i < XLOG_RHASH_SIZE; i++) { 3195 struct hlist_node *tmp; 3196 struct xlog_recover *trans; 3197 3198 hlist_for_each_entry_safe(trans, tmp, &rhash[i], r_list) 3199 xlog_recover_free_trans(trans); 3200 } 3201 3202 return error ? error : error2; 3203 } 3204 3205 /* 3206 * Do the recovery of the log. We actually do this in two phases. 3207 * The two passes are necessary in order to implement the function 3208 * of cancelling a record written into the log. The first pass 3209 * determines those things which have been cancelled, and the 3210 * second pass replays log items normally except for those which 3211 * have been cancelled. The handling of the replay and cancellations 3212 * takes place in the log item type specific routines. 3213 * 3214 * The table of items which have cancel records in the log is allocated 3215 * and freed at this level, since only here do we know when all of 3216 * the log recovery has been completed. 3217 */ 3218 STATIC int 3219 xlog_do_log_recovery( 3220 struct xlog *log, 3221 xfs_daddr_t head_blk, 3222 xfs_daddr_t tail_blk) 3223 { 3224 int error, i; 3225 3226 ASSERT(head_blk != tail_blk); 3227 3228 /* 3229 * First do a pass to find all of the cancelled buf log items. 3230 * Store them in the buf_cancel_table for use in the second pass. 3231 */ 3232 log->l_buf_cancel_table = kmem_zalloc(XLOG_BC_TABLE_SIZE * 3233 sizeof(struct list_head), 3234 0); 3235 for (i = 0; i < XLOG_BC_TABLE_SIZE; i++) 3236 INIT_LIST_HEAD(&log->l_buf_cancel_table[i]); 3237 3238 error = xlog_do_recovery_pass(log, head_blk, tail_blk, 3239 XLOG_RECOVER_PASS1, NULL); 3240 if (error != 0) { 3241 kmem_free(log->l_buf_cancel_table); 3242 log->l_buf_cancel_table = NULL; 3243 return error; 3244 } 3245 /* 3246 * Then do a second pass to actually recover the items in the log. 3247 * When it is complete free the table of buf cancel items. 3248 */ 3249 error = xlog_do_recovery_pass(log, head_blk, tail_blk, 3250 XLOG_RECOVER_PASS2, NULL); 3251 #ifdef DEBUG 3252 if (!error) { 3253 int i; 3254 3255 for (i = 0; i < XLOG_BC_TABLE_SIZE; i++) 3256 ASSERT(list_empty(&log->l_buf_cancel_table[i])); 3257 } 3258 #endif /* DEBUG */ 3259 3260 kmem_free(log->l_buf_cancel_table); 3261 log->l_buf_cancel_table = NULL; 3262 3263 return error; 3264 } 3265 3266 /* 3267 * Do the actual recovery 3268 */ 3269 STATIC int 3270 xlog_do_recover( 3271 struct xlog *log, 3272 xfs_daddr_t head_blk, 3273 xfs_daddr_t tail_blk) 3274 { 3275 struct xfs_mount *mp = log->l_mp; 3276 struct xfs_buf *bp = mp->m_sb_bp; 3277 struct xfs_sb *sbp = &mp->m_sb; 3278 int error; 3279 3280 trace_xfs_log_recover(log, head_blk, tail_blk); 3281 3282 /* 3283 * First replay the images in the log. 3284 */ 3285 error = xlog_do_log_recovery(log, head_blk, tail_blk); 3286 if (error) 3287 return error; 3288 3289 /* 3290 * If IO errors happened during recovery, bail out. 3291 */ 3292 if (XFS_FORCED_SHUTDOWN(mp)) 3293 return -EIO; 3294 3295 /* 3296 * We now update the tail_lsn since much of the recovery has completed 3297 * and there may be space available to use. If there were no extent 3298 * or iunlinks, we can free up the entire log and set the tail_lsn to 3299 * be the last_sync_lsn. This was set in xlog_find_tail to be the 3300 * lsn of the last known good LR on disk. If there are extent frees 3301 * or iunlinks they will have some entries in the AIL; so we look at 3302 * the AIL to determine how to set the tail_lsn. 3303 */ 3304 xlog_assign_tail_lsn(mp); 3305 3306 /* 3307 * Now that we've finished replaying all buffer and inode updates, 3308 * re-read the superblock and reverify it. 3309 */ 3310 xfs_buf_lock(bp); 3311 xfs_buf_hold(bp); 3312 error = _xfs_buf_read(bp, XBF_READ); 3313 if (error) { 3314 if (!XFS_FORCED_SHUTDOWN(mp)) { 3315 xfs_buf_ioerror_alert(bp, __this_address); 3316 ASSERT(0); 3317 } 3318 xfs_buf_relse(bp); 3319 return error; 3320 } 3321 3322 /* Convert superblock from on-disk format */ 3323 xfs_sb_from_disk(sbp, bp->b_addr); 3324 xfs_buf_relse(bp); 3325 3326 /* re-initialise in-core superblock and geometry structures */ 3327 xfs_reinit_percpu_counters(mp); 3328 error = xfs_initialize_perag(mp, sbp->sb_agcount, &mp->m_maxagi); 3329 if (error) { 3330 xfs_warn(mp, "Failed post-recovery per-ag init: %d", error); 3331 return error; 3332 } 3333 mp->m_alloc_set_aside = xfs_alloc_set_aside(mp); 3334 3335 xlog_recover_check_summary(log); 3336 3337 /* Normal transactions can now occur */ 3338 log->l_flags &= ~XLOG_ACTIVE_RECOVERY; 3339 return 0; 3340 } 3341 3342 /* 3343 * Perform recovery and re-initialize some log variables in xlog_find_tail. 3344 * 3345 * Return error or zero. 3346 */ 3347 int 3348 xlog_recover( 3349 struct xlog *log) 3350 { 3351 xfs_daddr_t head_blk, tail_blk; 3352 int error; 3353 3354 /* find the tail of the log */ 3355 error = xlog_find_tail(log, &head_blk, &tail_blk); 3356 if (error) 3357 return error; 3358 3359 /* 3360 * The superblock was read before the log was available and thus the LSN 3361 * could not be verified. Check the superblock LSN against the current 3362 * LSN now that it's known. 3363 */ 3364 if (xfs_sb_version_hascrc(&log->l_mp->m_sb) && 3365 !xfs_log_check_lsn(log->l_mp, log->l_mp->m_sb.sb_lsn)) 3366 return -EINVAL; 3367 3368 if (tail_blk != head_blk) { 3369 /* There used to be a comment here: 3370 * 3371 * disallow recovery on read-only mounts. note -- mount 3372 * checks for ENOSPC and turns it into an intelligent 3373 * error message. 3374 * ...but this is no longer true. Now, unless you specify 3375 * NORECOVERY (in which case this function would never be 3376 * called), we just go ahead and recover. We do this all 3377 * under the vfs layer, so we can get away with it unless 3378 * the device itself is read-only, in which case we fail. 3379 */ 3380 if ((error = xfs_dev_is_read_only(log->l_mp, "recovery"))) { 3381 return error; 3382 } 3383 3384 /* 3385 * Version 5 superblock log feature mask validation. We know the 3386 * log is dirty so check if there are any unknown log features 3387 * in what we need to recover. If there are unknown features 3388 * (e.g. unsupported transactions, then simply reject the 3389 * attempt at recovery before touching anything. 3390 */ 3391 if (XFS_SB_VERSION_NUM(&log->l_mp->m_sb) == XFS_SB_VERSION_5 && 3392 xfs_sb_has_incompat_log_feature(&log->l_mp->m_sb, 3393 XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN)) { 3394 xfs_warn(log->l_mp, 3395 "Superblock has unknown incompatible log features (0x%x) enabled.", 3396 (log->l_mp->m_sb.sb_features_log_incompat & 3397 XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN)); 3398 xfs_warn(log->l_mp, 3399 "The log can not be fully and/or safely recovered by this kernel."); 3400 xfs_warn(log->l_mp, 3401 "Please recover the log on a kernel that supports the unknown features."); 3402 return -EINVAL; 3403 } 3404 3405 /* 3406 * Delay log recovery if the debug hook is set. This is debug 3407 * instrumention to coordinate simulation of I/O failures with 3408 * log recovery. 3409 */ 3410 if (xfs_globals.log_recovery_delay) { 3411 xfs_notice(log->l_mp, 3412 "Delaying log recovery for %d seconds.", 3413 xfs_globals.log_recovery_delay); 3414 msleep(xfs_globals.log_recovery_delay * 1000); 3415 } 3416 3417 xfs_notice(log->l_mp, "Starting recovery (logdev: %s)", 3418 log->l_mp->m_logname ? log->l_mp->m_logname 3419 : "internal"); 3420 3421 error = xlog_do_recover(log, head_blk, tail_blk); 3422 log->l_flags |= XLOG_RECOVERY_NEEDED; 3423 } 3424 return error; 3425 } 3426 3427 /* 3428 * In the first part of recovery we replay inodes and buffers and build 3429 * up the list of extent free items which need to be processed. Here 3430 * we process the extent free items and clean up the on disk unlinked 3431 * inode lists. This is separated from the first part of recovery so 3432 * that the root and real-time bitmap inodes can be read in from disk in 3433 * between the two stages. This is necessary so that we can free space 3434 * in the real-time portion of the file system. 3435 */ 3436 int 3437 xlog_recover_finish( 3438 struct xlog *log) 3439 { 3440 /* 3441 * Now we're ready to do the transactions needed for the 3442 * rest of recovery. Start with completing all the extent 3443 * free intent records and then process the unlinked inode 3444 * lists. At this point, we essentially run in normal mode 3445 * except that we're still performing recovery actions 3446 * rather than accepting new requests. 3447 */ 3448 if (log->l_flags & XLOG_RECOVERY_NEEDED) { 3449 int error; 3450 error = xlog_recover_process_intents(log); 3451 if (error) { 3452 xfs_alert(log->l_mp, "Failed to recover intents"); 3453 return error; 3454 } 3455 3456 /* 3457 * Sync the log to get all the intents out of the AIL. 3458 * This isn't absolutely necessary, but it helps in 3459 * case the unlink transactions would have problems 3460 * pushing the intents out of the way. 3461 */ 3462 xfs_log_force(log->l_mp, XFS_LOG_SYNC); 3463 3464 xlog_recover_process_iunlinks(log); 3465 3466 xlog_recover_check_summary(log); 3467 3468 xfs_notice(log->l_mp, "Ending recovery (logdev: %s)", 3469 log->l_mp->m_logname ? log->l_mp->m_logname 3470 : "internal"); 3471 log->l_flags &= ~XLOG_RECOVERY_NEEDED; 3472 } else { 3473 xfs_info(log->l_mp, "Ending clean mount"); 3474 } 3475 return 0; 3476 } 3477 3478 void 3479 xlog_recover_cancel( 3480 struct xlog *log) 3481 { 3482 if (log->l_flags & XLOG_RECOVERY_NEEDED) 3483 xlog_recover_cancel_intents(log); 3484 } 3485 3486 #if defined(DEBUG) 3487 /* 3488 * Read all of the agf and agi counters and check that they 3489 * are consistent with the superblock counters. 3490 */ 3491 STATIC void 3492 xlog_recover_check_summary( 3493 struct xlog *log) 3494 { 3495 xfs_mount_t *mp; 3496 xfs_buf_t *agfbp; 3497 xfs_buf_t *agibp; 3498 xfs_agnumber_t agno; 3499 uint64_t freeblks; 3500 uint64_t itotal; 3501 uint64_t ifree; 3502 int error; 3503 3504 mp = log->l_mp; 3505 3506 freeblks = 0LL; 3507 itotal = 0LL; 3508 ifree = 0LL; 3509 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) { 3510 error = xfs_read_agf(mp, NULL, agno, 0, &agfbp); 3511 if (error) { 3512 xfs_alert(mp, "%s agf read failed agno %d error %d", 3513 __func__, agno, error); 3514 } else { 3515 struct xfs_agf *agfp = agfbp->b_addr; 3516 3517 freeblks += be32_to_cpu(agfp->agf_freeblks) + 3518 be32_to_cpu(agfp->agf_flcount); 3519 xfs_buf_relse(agfbp); 3520 } 3521 3522 error = xfs_read_agi(mp, NULL, agno, &agibp); 3523 if (error) { 3524 xfs_alert(mp, "%s agi read failed agno %d error %d", 3525 __func__, agno, error); 3526 } else { 3527 struct xfs_agi *agi = agibp->b_addr; 3528 3529 itotal += be32_to_cpu(agi->agi_count); 3530 ifree += be32_to_cpu(agi->agi_freecount); 3531 xfs_buf_relse(agibp); 3532 } 3533 } 3534 } 3535 #endif /* DEBUG */ 3536