1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_log_format.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_mount.h" 13 #include "xfs_errortag.h" 14 #include "xfs_error.h" 15 #include "xfs_trans.h" 16 #include "xfs_trans_priv.h" 17 #include "xfs_log.h" 18 #include "xfs_log_priv.h" 19 #include "xfs_trace.h" 20 #include "xfs_sysfs.h" 21 #include "xfs_sb.h" 22 #include "xfs_health.h" 23 24 kmem_zone_t *xfs_log_ticket_zone; 25 26 /* Local miscellaneous function prototypes */ 27 STATIC struct xlog * 28 xlog_alloc_log( 29 struct xfs_mount *mp, 30 struct xfs_buftarg *log_target, 31 xfs_daddr_t blk_offset, 32 int num_bblks); 33 STATIC int 34 xlog_space_left( 35 struct xlog *log, 36 atomic64_t *head); 37 STATIC void 38 xlog_dealloc_log( 39 struct xlog *log); 40 41 /* local state machine functions */ 42 STATIC void xlog_state_done_syncing( 43 struct xlog_in_core *iclog); 44 STATIC int 45 xlog_state_get_iclog_space( 46 struct xlog *log, 47 int len, 48 struct xlog_in_core **iclog, 49 struct xlog_ticket *ticket, 50 int *continued_write, 51 int *logoffsetp); 52 STATIC void 53 xlog_state_switch_iclogs( 54 struct xlog *log, 55 struct xlog_in_core *iclog, 56 int eventual_size); 57 STATIC void 58 xlog_grant_push_ail( 59 struct xlog *log, 60 int need_bytes); 61 STATIC void 62 xlog_sync( 63 struct xlog *log, 64 struct xlog_in_core *iclog); 65 #if defined(DEBUG) 66 STATIC void 67 xlog_verify_dest_ptr( 68 struct xlog *log, 69 void *ptr); 70 STATIC void 71 xlog_verify_grant_tail( 72 struct xlog *log); 73 STATIC void 74 xlog_verify_iclog( 75 struct xlog *log, 76 struct xlog_in_core *iclog, 77 int count); 78 STATIC void 79 xlog_verify_tail_lsn( 80 struct xlog *log, 81 struct xlog_in_core *iclog, 82 xfs_lsn_t tail_lsn); 83 #else 84 #define xlog_verify_dest_ptr(a,b) 85 #define xlog_verify_grant_tail(a) 86 #define xlog_verify_iclog(a,b,c) 87 #define xlog_verify_tail_lsn(a,b,c) 88 #endif 89 90 STATIC int 91 xlog_iclogs_empty( 92 struct xlog *log); 93 94 static int 95 xfs_log_cover(struct xfs_mount *); 96 97 static void 98 xlog_grant_sub_space( 99 struct xlog *log, 100 atomic64_t *head, 101 int bytes) 102 { 103 int64_t head_val = atomic64_read(head); 104 int64_t new, old; 105 106 do { 107 int cycle, space; 108 109 xlog_crack_grant_head_val(head_val, &cycle, &space); 110 111 space -= bytes; 112 if (space < 0) { 113 space += log->l_logsize; 114 cycle--; 115 } 116 117 old = head_val; 118 new = xlog_assign_grant_head_val(cycle, space); 119 head_val = atomic64_cmpxchg(head, old, new); 120 } while (head_val != old); 121 } 122 123 static void 124 xlog_grant_add_space( 125 struct xlog *log, 126 atomic64_t *head, 127 int bytes) 128 { 129 int64_t head_val = atomic64_read(head); 130 int64_t new, old; 131 132 do { 133 int tmp; 134 int cycle, space; 135 136 xlog_crack_grant_head_val(head_val, &cycle, &space); 137 138 tmp = log->l_logsize - space; 139 if (tmp > bytes) 140 space += bytes; 141 else { 142 space = bytes - tmp; 143 cycle++; 144 } 145 146 old = head_val; 147 new = xlog_assign_grant_head_val(cycle, space); 148 head_val = atomic64_cmpxchg(head, old, new); 149 } while (head_val != old); 150 } 151 152 STATIC void 153 xlog_grant_head_init( 154 struct xlog_grant_head *head) 155 { 156 xlog_assign_grant_head(&head->grant, 1, 0); 157 INIT_LIST_HEAD(&head->waiters); 158 spin_lock_init(&head->lock); 159 } 160 161 STATIC void 162 xlog_grant_head_wake_all( 163 struct xlog_grant_head *head) 164 { 165 struct xlog_ticket *tic; 166 167 spin_lock(&head->lock); 168 list_for_each_entry(tic, &head->waiters, t_queue) 169 wake_up_process(tic->t_task); 170 spin_unlock(&head->lock); 171 } 172 173 static inline int 174 xlog_ticket_reservation( 175 struct xlog *log, 176 struct xlog_grant_head *head, 177 struct xlog_ticket *tic) 178 { 179 if (head == &log->l_write_head) { 180 ASSERT(tic->t_flags & XLOG_TIC_PERM_RESERV); 181 return tic->t_unit_res; 182 } else { 183 if (tic->t_flags & XLOG_TIC_PERM_RESERV) 184 return tic->t_unit_res * tic->t_cnt; 185 else 186 return tic->t_unit_res; 187 } 188 } 189 190 STATIC bool 191 xlog_grant_head_wake( 192 struct xlog *log, 193 struct xlog_grant_head *head, 194 int *free_bytes) 195 { 196 struct xlog_ticket *tic; 197 int need_bytes; 198 bool woken_task = false; 199 200 list_for_each_entry(tic, &head->waiters, t_queue) { 201 202 /* 203 * There is a chance that the size of the CIL checkpoints in 204 * progress at the last AIL push target calculation resulted in 205 * limiting the target to the log head (l_last_sync_lsn) at the 206 * time. This may not reflect where the log head is now as the 207 * CIL checkpoints may have completed. 208 * 209 * Hence when we are woken here, it may be that the head of the 210 * log that has moved rather than the tail. As the tail didn't 211 * move, there still won't be space available for the 212 * reservation we require. However, if the AIL has already 213 * pushed to the target defined by the old log head location, we 214 * will hang here waiting for something else to update the AIL 215 * push target. 216 * 217 * Therefore, if there isn't space to wake the first waiter on 218 * the grant head, we need to push the AIL again to ensure the 219 * target reflects both the current log tail and log head 220 * position before we wait for the tail to move again. 221 */ 222 223 need_bytes = xlog_ticket_reservation(log, head, tic); 224 if (*free_bytes < need_bytes) { 225 if (!woken_task) 226 xlog_grant_push_ail(log, need_bytes); 227 return false; 228 } 229 230 *free_bytes -= need_bytes; 231 trace_xfs_log_grant_wake_up(log, tic); 232 wake_up_process(tic->t_task); 233 woken_task = true; 234 } 235 236 return true; 237 } 238 239 STATIC int 240 xlog_grant_head_wait( 241 struct xlog *log, 242 struct xlog_grant_head *head, 243 struct xlog_ticket *tic, 244 int need_bytes) __releases(&head->lock) 245 __acquires(&head->lock) 246 { 247 list_add_tail(&tic->t_queue, &head->waiters); 248 249 do { 250 if (XLOG_FORCED_SHUTDOWN(log)) 251 goto shutdown; 252 xlog_grant_push_ail(log, need_bytes); 253 254 __set_current_state(TASK_UNINTERRUPTIBLE); 255 spin_unlock(&head->lock); 256 257 XFS_STATS_INC(log->l_mp, xs_sleep_logspace); 258 259 trace_xfs_log_grant_sleep(log, tic); 260 schedule(); 261 trace_xfs_log_grant_wake(log, tic); 262 263 spin_lock(&head->lock); 264 if (XLOG_FORCED_SHUTDOWN(log)) 265 goto shutdown; 266 } while (xlog_space_left(log, &head->grant) < need_bytes); 267 268 list_del_init(&tic->t_queue); 269 return 0; 270 shutdown: 271 list_del_init(&tic->t_queue); 272 return -EIO; 273 } 274 275 /* 276 * Atomically get the log space required for a log ticket. 277 * 278 * Once a ticket gets put onto head->waiters, it will only return after the 279 * needed reservation is satisfied. 280 * 281 * This function is structured so that it has a lock free fast path. This is 282 * necessary because every new transaction reservation will come through this 283 * path. Hence any lock will be globally hot if we take it unconditionally on 284 * every pass. 285 * 286 * As tickets are only ever moved on and off head->waiters under head->lock, we 287 * only need to take that lock if we are going to add the ticket to the queue 288 * and sleep. We can avoid taking the lock if the ticket was never added to 289 * head->waiters because the t_queue list head will be empty and we hold the 290 * only reference to it so it can safely be checked unlocked. 291 */ 292 STATIC int 293 xlog_grant_head_check( 294 struct xlog *log, 295 struct xlog_grant_head *head, 296 struct xlog_ticket *tic, 297 int *need_bytes) 298 { 299 int free_bytes; 300 int error = 0; 301 302 ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY)); 303 304 /* 305 * If there are other waiters on the queue then give them a chance at 306 * logspace before us. Wake up the first waiters, if we do not wake 307 * up all the waiters then go to sleep waiting for more free space, 308 * otherwise try to get some space for this transaction. 309 */ 310 *need_bytes = xlog_ticket_reservation(log, head, tic); 311 free_bytes = xlog_space_left(log, &head->grant); 312 if (!list_empty_careful(&head->waiters)) { 313 spin_lock(&head->lock); 314 if (!xlog_grant_head_wake(log, head, &free_bytes) || 315 free_bytes < *need_bytes) { 316 error = xlog_grant_head_wait(log, head, tic, 317 *need_bytes); 318 } 319 spin_unlock(&head->lock); 320 } else if (free_bytes < *need_bytes) { 321 spin_lock(&head->lock); 322 error = xlog_grant_head_wait(log, head, tic, *need_bytes); 323 spin_unlock(&head->lock); 324 } 325 326 return error; 327 } 328 329 static void 330 xlog_tic_reset_res(xlog_ticket_t *tic) 331 { 332 tic->t_res_num = 0; 333 tic->t_res_arr_sum = 0; 334 tic->t_res_num_ophdrs = 0; 335 } 336 337 static void 338 xlog_tic_add_region(xlog_ticket_t *tic, uint len, uint type) 339 { 340 if (tic->t_res_num == XLOG_TIC_LEN_MAX) { 341 /* add to overflow and start again */ 342 tic->t_res_o_flow += tic->t_res_arr_sum; 343 tic->t_res_num = 0; 344 tic->t_res_arr_sum = 0; 345 } 346 347 tic->t_res_arr[tic->t_res_num].r_len = len; 348 tic->t_res_arr[tic->t_res_num].r_type = type; 349 tic->t_res_arr_sum += len; 350 tic->t_res_num++; 351 } 352 353 bool 354 xfs_log_writable( 355 struct xfs_mount *mp) 356 { 357 /* 358 * Do not write to the log on norecovery mounts, if the data or log 359 * devices are read-only, or if the filesystem is shutdown. Read-only 360 * mounts allow internal writes for log recovery and unmount purposes, 361 * so don't restrict that case. 362 */ 363 if (mp->m_flags & XFS_MOUNT_NORECOVERY) 364 return false; 365 if (xfs_readonly_buftarg(mp->m_ddev_targp)) 366 return false; 367 if (xfs_readonly_buftarg(mp->m_log->l_targ)) 368 return false; 369 if (XFS_FORCED_SHUTDOWN(mp)) 370 return false; 371 return true; 372 } 373 374 /* 375 * Replenish the byte reservation required by moving the grant write head. 376 */ 377 int 378 xfs_log_regrant( 379 struct xfs_mount *mp, 380 struct xlog_ticket *tic) 381 { 382 struct xlog *log = mp->m_log; 383 int need_bytes; 384 int error = 0; 385 386 if (XLOG_FORCED_SHUTDOWN(log)) 387 return -EIO; 388 389 XFS_STATS_INC(mp, xs_try_logspace); 390 391 /* 392 * This is a new transaction on the ticket, so we need to change the 393 * transaction ID so that the next transaction has a different TID in 394 * the log. Just add one to the existing tid so that we can see chains 395 * of rolling transactions in the log easily. 396 */ 397 tic->t_tid++; 398 399 xlog_grant_push_ail(log, tic->t_unit_res); 400 401 tic->t_curr_res = tic->t_unit_res; 402 xlog_tic_reset_res(tic); 403 404 if (tic->t_cnt > 0) 405 return 0; 406 407 trace_xfs_log_regrant(log, tic); 408 409 error = xlog_grant_head_check(log, &log->l_write_head, tic, 410 &need_bytes); 411 if (error) 412 goto out_error; 413 414 xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes); 415 trace_xfs_log_regrant_exit(log, tic); 416 xlog_verify_grant_tail(log); 417 return 0; 418 419 out_error: 420 /* 421 * If we are failing, make sure the ticket doesn't have any current 422 * reservations. We don't want to add this back when the ticket/ 423 * transaction gets cancelled. 424 */ 425 tic->t_curr_res = 0; 426 tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */ 427 return error; 428 } 429 430 /* 431 * Reserve log space and return a ticket corresponding to the reservation. 432 * 433 * Each reservation is going to reserve extra space for a log record header. 434 * When writes happen to the on-disk log, we don't subtract the length of the 435 * log record header from any reservation. By wasting space in each 436 * reservation, we prevent over allocation problems. 437 */ 438 int 439 xfs_log_reserve( 440 struct xfs_mount *mp, 441 int unit_bytes, 442 int cnt, 443 struct xlog_ticket **ticp, 444 uint8_t client, 445 bool permanent) 446 { 447 struct xlog *log = mp->m_log; 448 struct xlog_ticket *tic; 449 int need_bytes; 450 int error = 0; 451 452 ASSERT(client == XFS_TRANSACTION || client == XFS_LOG); 453 454 if (XLOG_FORCED_SHUTDOWN(log)) 455 return -EIO; 456 457 XFS_STATS_INC(mp, xs_try_logspace); 458 459 ASSERT(*ticp == NULL); 460 tic = xlog_ticket_alloc(log, unit_bytes, cnt, client, permanent); 461 *ticp = tic; 462 463 xlog_grant_push_ail(log, tic->t_cnt ? tic->t_unit_res * tic->t_cnt 464 : tic->t_unit_res); 465 466 trace_xfs_log_reserve(log, tic); 467 468 error = xlog_grant_head_check(log, &log->l_reserve_head, tic, 469 &need_bytes); 470 if (error) 471 goto out_error; 472 473 xlog_grant_add_space(log, &log->l_reserve_head.grant, need_bytes); 474 xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes); 475 trace_xfs_log_reserve_exit(log, tic); 476 xlog_verify_grant_tail(log); 477 return 0; 478 479 out_error: 480 /* 481 * If we are failing, make sure the ticket doesn't have any current 482 * reservations. We don't want to add this back when the ticket/ 483 * transaction gets cancelled. 484 */ 485 tic->t_curr_res = 0; 486 tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */ 487 return error; 488 } 489 490 static bool 491 __xlog_state_release_iclog( 492 struct xlog *log, 493 struct xlog_in_core *iclog) 494 { 495 lockdep_assert_held(&log->l_icloglock); 496 497 if (iclog->ic_state == XLOG_STATE_WANT_SYNC) { 498 /* update tail before writing to iclog */ 499 xfs_lsn_t tail_lsn = xlog_assign_tail_lsn(log->l_mp); 500 501 iclog->ic_state = XLOG_STATE_SYNCING; 502 iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn); 503 xlog_verify_tail_lsn(log, iclog, tail_lsn); 504 /* cycle incremented when incrementing curr_block */ 505 return true; 506 } 507 508 ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE); 509 return false; 510 } 511 512 /* 513 * Flush iclog to disk if this is the last reference to the given iclog and the 514 * it is in the WANT_SYNC state. 515 */ 516 static int 517 xlog_state_release_iclog( 518 struct xlog *log, 519 struct xlog_in_core *iclog) 520 { 521 lockdep_assert_held(&log->l_icloglock); 522 523 if (iclog->ic_state == XLOG_STATE_IOERROR) 524 return -EIO; 525 526 if (atomic_dec_and_test(&iclog->ic_refcnt) && 527 __xlog_state_release_iclog(log, iclog)) { 528 spin_unlock(&log->l_icloglock); 529 xlog_sync(log, iclog); 530 spin_lock(&log->l_icloglock); 531 } 532 533 return 0; 534 } 535 536 void 537 xfs_log_release_iclog( 538 struct xlog_in_core *iclog) 539 { 540 struct xlog *log = iclog->ic_log; 541 bool sync = false; 542 543 if (atomic_dec_and_lock(&iclog->ic_refcnt, &log->l_icloglock)) { 544 if (iclog->ic_state != XLOG_STATE_IOERROR) 545 sync = __xlog_state_release_iclog(log, iclog); 546 spin_unlock(&log->l_icloglock); 547 } 548 549 if (sync) 550 xlog_sync(log, iclog); 551 } 552 553 /* 554 * Mount a log filesystem 555 * 556 * mp - ubiquitous xfs mount point structure 557 * log_target - buftarg of on-disk log device 558 * blk_offset - Start block # where block size is 512 bytes (BBSIZE) 559 * num_bblocks - Number of BBSIZE blocks in on-disk log 560 * 561 * Return error or zero. 562 */ 563 int 564 xfs_log_mount( 565 xfs_mount_t *mp, 566 xfs_buftarg_t *log_target, 567 xfs_daddr_t blk_offset, 568 int num_bblks) 569 { 570 bool fatal = xfs_sb_version_hascrc(&mp->m_sb); 571 int error = 0; 572 int min_logfsbs; 573 574 if (!(mp->m_flags & XFS_MOUNT_NORECOVERY)) { 575 xfs_notice(mp, "Mounting V%d Filesystem", 576 XFS_SB_VERSION_NUM(&mp->m_sb)); 577 } else { 578 xfs_notice(mp, 579 "Mounting V%d filesystem in no-recovery mode. Filesystem will be inconsistent.", 580 XFS_SB_VERSION_NUM(&mp->m_sb)); 581 ASSERT(mp->m_flags & XFS_MOUNT_RDONLY); 582 } 583 584 mp->m_log = xlog_alloc_log(mp, log_target, blk_offset, num_bblks); 585 if (IS_ERR(mp->m_log)) { 586 error = PTR_ERR(mp->m_log); 587 goto out; 588 } 589 590 /* 591 * Validate the given log space and drop a critical message via syslog 592 * if the log size is too small that would lead to some unexpected 593 * situations in transaction log space reservation stage. 594 * 595 * Note: we can't just reject the mount if the validation fails. This 596 * would mean that people would have to downgrade their kernel just to 597 * remedy the situation as there is no way to grow the log (short of 598 * black magic surgery with xfs_db). 599 * 600 * We can, however, reject mounts for CRC format filesystems, as the 601 * mkfs binary being used to make the filesystem should never create a 602 * filesystem with a log that is too small. 603 */ 604 min_logfsbs = xfs_log_calc_minimum_size(mp); 605 606 if (mp->m_sb.sb_logblocks < min_logfsbs) { 607 xfs_warn(mp, 608 "Log size %d blocks too small, minimum size is %d blocks", 609 mp->m_sb.sb_logblocks, min_logfsbs); 610 error = -EINVAL; 611 } else if (mp->m_sb.sb_logblocks > XFS_MAX_LOG_BLOCKS) { 612 xfs_warn(mp, 613 "Log size %d blocks too large, maximum size is %lld blocks", 614 mp->m_sb.sb_logblocks, XFS_MAX_LOG_BLOCKS); 615 error = -EINVAL; 616 } else if (XFS_FSB_TO_B(mp, mp->m_sb.sb_logblocks) > XFS_MAX_LOG_BYTES) { 617 xfs_warn(mp, 618 "log size %lld bytes too large, maximum size is %lld bytes", 619 XFS_FSB_TO_B(mp, mp->m_sb.sb_logblocks), 620 XFS_MAX_LOG_BYTES); 621 error = -EINVAL; 622 } else if (mp->m_sb.sb_logsunit > 1 && 623 mp->m_sb.sb_logsunit % mp->m_sb.sb_blocksize) { 624 xfs_warn(mp, 625 "log stripe unit %u bytes must be a multiple of block size", 626 mp->m_sb.sb_logsunit); 627 error = -EINVAL; 628 fatal = true; 629 } 630 if (error) { 631 /* 632 * Log check errors are always fatal on v5; or whenever bad 633 * metadata leads to a crash. 634 */ 635 if (fatal) { 636 xfs_crit(mp, "AAIEEE! Log failed size checks. Abort!"); 637 ASSERT(0); 638 goto out_free_log; 639 } 640 xfs_crit(mp, "Log size out of supported range."); 641 xfs_crit(mp, 642 "Continuing onwards, but if log hangs are experienced then please report this message in the bug report."); 643 } 644 645 /* 646 * Initialize the AIL now we have a log. 647 */ 648 error = xfs_trans_ail_init(mp); 649 if (error) { 650 xfs_warn(mp, "AIL initialisation failed: error %d", error); 651 goto out_free_log; 652 } 653 mp->m_log->l_ailp = mp->m_ail; 654 655 /* 656 * skip log recovery on a norecovery mount. pretend it all 657 * just worked. 658 */ 659 if (!(mp->m_flags & XFS_MOUNT_NORECOVERY)) { 660 int readonly = (mp->m_flags & XFS_MOUNT_RDONLY); 661 662 if (readonly) 663 mp->m_flags &= ~XFS_MOUNT_RDONLY; 664 665 error = xlog_recover(mp->m_log); 666 667 if (readonly) 668 mp->m_flags |= XFS_MOUNT_RDONLY; 669 if (error) { 670 xfs_warn(mp, "log mount/recovery failed: error %d", 671 error); 672 xlog_recover_cancel(mp->m_log); 673 goto out_destroy_ail; 674 } 675 } 676 677 error = xfs_sysfs_init(&mp->m_log->l_kobj, &xfs_log_ktype, &mp->m_kobj, 678 "log"); 679 if (error) 680 goto out_destroy_ail; 681 682 /* Normal transactions can now occur */ 683 mp->m_log->l_flags &= ~XLOG_ACTIVE_RECOVERY; 684 685 /* 686 * Now the log has been fully initialised and we know were our 687 * space grant counters are, we can initialise the permanent ticket 688 * needed for delayed logging to work. 689 */ 690 xlog_cil_init_post_recovery(mp->m_log); 691 692 return 0; 693 694 out_destroy_ail: 695 xfs_trans_ail_destroy(mp); 696 out_free_log: 697 xlog_dealloc_log(mp->m_log); 698 out: 699 return error; 700 } 701 702 /* 703 * Finish the recovery of the file system. This is separate from the 704 * xfs_log_mount() call, because it depends on the code in xfs_mountfs() to read 705 * in the root and real-time bitmap inodes between calling xfs_log_mount() and 706 * here. 707 * 708 * If we finish recovery successfully, start the background log work. If we are 709 * not doing recovery, then we have a RO filesystem and we don't need to start 710 * it. 711 */ 712 int 713 xfs_log_mount_finish( 714 struct xfs_mount *mp) 715 { 716 int error = 0; 717 bool readonly = (mp->m_flags & XFS_MOUNT_RDONLY); 718 bool recovered = mp->m_log->l_flags & XLOG_RECOVERY_NEEDED; 719 720 if (mp->m_flags & XFS_MOUNT_NORECOVERY) { 721 ASSERT(mp->m_flags & XFS_MOUNT_RDONLY); 722 return 0; 723 } else if (readonly) { 724 /* Allow unlinked processing to proceed */ 725 mp->m_flags &= ~XFS_MOUNT_RDONLY; 726 } 727 728 /* 729 * During the second phase of log recovery, we need iget and 730 * iput to behave like they do for an active filesystem. 731 * xfs_fs_drop_inode needs to be able to prevent the deletion 732 * of inodes before we're done replaying log items on those 733 * inodes. Turn it off immediately after recovery finishes 734 * so that we don't leak the quota inodes if subsequent mount 735 * activities fail. 736 * 737 * We let all inodes involved in redo item processing end up on 738 * the LRU instead of being evicted immediately so that if we do 739 * something to an unlinked inode, the irele won't cause 740 * premature truncation and freeing of the inode, which results 741 * in log recovery failure. We have to evict the unreferenced 742 * lru inodes after clearing SB_ACTIVE because we don't 743 * otherwise clean up the lru if there's a subsequent failure in 744 * xfs_mountfs, which leads to us leaking the inodes if nothing 745 * else (e.g. quotacheck) references the inodes before the 746 * mount failure occurs. 747 */ 748 mp->m_super->s_flags |= SB_ACTIVE; 749 error = xlog_recover_finish(mp->m_log); 750 if (!error) 751 xfs_log_work_queue(mp); 752 mp->m_super->s_flags &= ~SB_ACTIVE; 753 evict_inodes(mp->m_super); 754 755 /* 756 * Drain the buffer LRU after log recovery. This is required for v4 757 * filesystems to avoid leaving around buffers with NULL verifier ops, 758 * but we do it unconditionally to make sure we're always in a clean 759 * cache state after mount. 760 * 761 * Don't push in the error case because the AIL may have pending intents 762 * that aren't removed until recovery is cancelled. 763 */ 764 if (!error && recovered) { 765 xfs_log_force(mp, XFS_LOG_SYNC); 766 xfs_ail_push_all_sync(mp->m_ail); 767 } 768 xfs_buftarg_drain(mp->m_ddev_targp); 769 770 if (readonly) 771 mp->m_flags |= XFS_MOUNT_RDONLY; 772 773 return error; 774 } 775 776 /* 777 * The mount has failed. Cancel the recovery if it hasn't completed and destroy 778 * the log. 779 */ 780 void 781 xfs_log_mount_cancel( 782 struct xfs_mount *mp) 783 { 784 xlog_recover_cancel(mp->m_log); 785 xfs_log_unmount(mp); 786 } 787 788 /* 789 * Wait for the iclog to be written disk, or return an error if the log has been 790 * shut down. 791 */ 792 static int 793 xlog_wait_on_iclog( 794 struct xlog_in_core *iclog) 795 __releases(iclog->ic_log->l_icloglock) 796 { 797 struct xlog *log = iclog->ic_log; 798 799 if (!XLOG_FORCED_SHUTDOWN(log) && 800 iclog->ic_state != XLOG_STATE_ACTIVE && 801 iclog->ic_state != XLOG_STATE_DIRTY) { 802 XFS_STATS_INC(log->l_mp, xs_log_force_sleep); 803 xlog_wait(&iclog->ic_force_wait, &log->l_icloglock); 804 } else { 805 spin_unlock(&log->l_icloglock); 806 } 807 808 if (XLOG_FORCED_SHUTDOWN(log)) 809 return -EIO; 810 return 0; 811 } 812 813 /* 814 * Write out an unmount record using the ticket provided. We have to account for 815 * the data space used in the unmount ticket as this write is not done from a 816 * transaction context that has already done the accounting for us. 817 */ 818 static int 819 xlog_write_unmount_record( 820 struct xlog *log, 821 struct xlog_ticket *ticket, 822 xfs_lsn_t *lsn, 823 uint flags) 824 { 825 struct xfs_unmount_log_format ulf = { 826 .magic = XLOG_UNMOUNT_TYPE, 827 }; 828 struct xfs_log_iovec reg = { 829 .i_addr = &ulf, 830 .i_len = sizeof(ulf), 831 .i_type = XLOG_REG_TYPE_UNMOUNT, 832 }; 833 struct xfs_log_vec vec = { 834 .lv_niovecs = 1, 835 .lv_iovecp = ®, 836 }; 837 838 /* account for space used by record data */ 839 ticket->t_curr_res -= sizeof(ulf); 840 return xlog_write(log, &vec, ticket, lsn, NULL, flags, false); 841 } 842 843 /* 844 * Mark the filesystem clean by writing an unmount record to the head of the 845 * log. 846 */ 847 static void 848 xlog_unmount_write( 849 struct xlog *log) 850 { 851 struct xfs_mount *mp = log->l_mp; 852 struct xlog_in_core *iclog; 853 struct xlog_ticket *tic = NULL; 854 xfs_lsn_t lsn; 855 uint flags = XLOG_UNMOUNT_TRANS; 856 int error; 857 858 error = xfs_log_reserve(mp, 600, 1, &tic, XFS_LOG, 0); 859 if (error) 860 goto out_err; 861 862 error = xlog_write_unmount_record(log, tic, &lsn, flags); 863 /* 864 * At this point, we're umounting anyway, so there's no point in 865 * transitioning log state to IOERROR. Just continue... 866 */ 867 out_err: 868 if (error) 869 xfs_alert(mp, "%s: unmount record failed", __func__); 870 871 spin_lock(&log->l_icloglock); 872 iclog = log->l_iclog; 873 atomic_inc(&iclog->ic_refcnt); 874 if (iclog->ic_state == XLOG_STATE_ACTIVE) 875 xlog_state_switch_iclogs(log, iclog, 0); 876 else 877 ASSERT(iclog->ic_state == XLOG_STATE_WANT_SYNC || 878 iclog->ic_state == XLOG_STATE_IOERROR); 879 error = xlog_state_release_iclog(log, iclog); 880 xlog_wait_on_iclog(iclog); 881 882 if (tic) { 883 trace_xfs_log_umount_write(log, tic); 884 xfs_log_ticket_ungrant(log, tic); 885 } 886 } 887 888 static void 889 xfs_log_unmount_verify_iclog( 890 struct xlog *log) 891 { 892 struct xlog_in_core *iclog = log->l_iclog; 893 894 do { 895 ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE); 896 ASSERT(iclog->ic_offset == 0); 897 } while ((iclog = iclog->ic_next) != log->l_iclog); 898 } 899 900 /* 901 * Unmount record used to have a string "Unmount filesystem--" in the 902 * data section where the "Un" was really a magic number (XLOG_UNMOUNT_TYPE). 903 * We just write the magic number now since that particular field isn't 904 * currently architecture converted and "Unmount" is a bit foo. 905 * As far as I know, there weren't any dependencies on the old behaviour. 906 */ 907 static void 908 xfs_log_unmount_write( 909 struct xfs_mount *mp) 910 { 911 struct xlog *log = mp->m_log; 912 913 if (!xfs_log_writable(mp)) 914 return; 915 916 xfs_log_force(mp, XFS_LOG_SYNC); 917 918 if (XLOG_FORCED_SHUTDOWN(log)) 919 return; 920 921 /* 922 * If we think the summary counters are bad, avoid writing the unmount 923 * record to force log recovery at next mount, after which the summary 924 * counters will be recalculated. Refer to xlog_check_unmount_rec for 925 * more details. 926 */ 927 if (XFS_TEST_ERROR(xfs_fs_has_sickness(mp, XFS_SICK_FS_COUNTERS), mp, 928 XFS_ERRTAG_FORCE_SUMMARY_RECALC)) { 929 xfs_alert(mp, "%s: will fix summary counters at next mount", 930 __func__); 931 return; 932 } 933 934 xfs_log_unmount_verify_iclog(log); 935 xlog_unmount_write(log); 936 } 937 938 /* 939 * Empty the log for unmount/freeze. 940 * 941 * To do this, we first need to shut down the background log work so it is not 942 * trying to cover the log as we clean up. We then need to unpin all objects in 943 * the log so we can then flush them out. Once they have completed their IO and 944 * run the callbacks removing themselves from the AIL, we can cover the log. 945 */ 946 int 947 xfs_log_quiesce( 948 struct xfs_mount *mp) 949 { 950 cancel_delayed_work_sync(&mp->m_log->l_work); 951 xfs_log_force(mp, XFS_LOG_SYNC); 952 953 /* 954 * The superblock buffer is uncached and while xfs_ail_push_all_sync() 955 * will push it, xfs_buftarg_wait() will not wait for it. Further, 956 * xfs_buf_iowait() cannot be used because it was pushed with the 957 * XBF_ASYNC flag set, so we need to use a lock/unlock pair to wait for 958 * the IO to complete. 959 */ 960 xfs_ail_push_all_sync(mp->m_ail); 961 xfs_buftarg_wait(mp->m_ddev_targp); 962 xfs_buf_lock(mp->m_sb_bp); 963 xfs_buf_unlock(mp->m_sb_bp); 964 965 return xfs_log_cover(mp); 966 } 967 968 void 969 xfs_log_clean( 970 struct xfs_mount *mp) 971 { 972 xfs_log_quiesce(mp); 973 xfs_log_unmount_write(mp); 974 } 975 976 /* 977 * Shut down and release the AIL and Log. 978 * 979 * During unmount, we need to ensure we flush all the dirty metadata objects 980 * from the AIL so that the log is empty before we write the unmount record to 981 * the log. Once this is done, we can tear down the AIL and the log. 982 */ 983 void 984 xfs_log_unmount( 985 struct xfs_mount *mp) 986 { 987 xfs_log_clean(mp); 988 989 xfs_buftarg_drain(mp->m_ddev_targp); 990 991 xfs_trans_ail_destroy(mp); 992 993 xfs_sysfs_del(&mp->m_log->l_kobj); 994 995 xlog_dealloc_log(mp->m_log); 996 } 997 998 void 999 xfs_log_item_init( 1000 struct xfs_mount *mp, 1001 struct xfs_log_item *item, 1002 int type, 1003 const struct xfs_item_ops *ops) 1004 { 1005 item->li_mountp = mp; 1006 item->li_ailp = mp->m_ail; 1007 item->li_type = type; 1008 item->li_ops = ops; 1009 item->li_lv = NULL; 1010 1011 INIT_LIST_HEAD(&item->li_ail); 1012 INIT_LIST_HEAD(&item->li_cil); 1013 INIT_LIST_HEAD(&item->li_bio_list); 1014 INIT_LIST_HEAD(&item->li_trans); 1015 } 1016 1017 /* 1018 * Wake up processes waiting for log space after we have moved the log tail. 1019 */ 1020 void 1021 xfs_log_space_wake( 1022 struct xfs_mount *mp) 1023 { 1024 struct xlog *log = mp->m_log; 1025 int free_bytes; 1026 1027 if (XLOG_FORCED_SHUTDOWN(log)) 1028 return; 1029 1030 if (!list_empty_careful(&log->l_write_head.waiters)) { 1031 ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY)); 1032 1033 spin_lock(&log->l_write_head.lock); 1034 free_bytes = xlog_space_left(log, &log->l_write_head.grant); 1035 xlog_grant_head_wake(log, &log->l_write_head, &free_bytes); 1036 spin_unlock(&log->l_write_head.lock); 1037 } 1038 1039 if (!list_empty_careful(&log->l_reserve_head.waiters)) { 1040 ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY)); 1041 1042 spin_lock(&log->l_reserve_head.lock); 1043 free_bytes = xlog_space_left(log, &log->l_reserve_head.grant); 1044 xlog_grant_head_wake(log, &log->l_reserve_head, &free_bytes); 1045 spin_unlock(&log->l_reserve_head.lock); 1046 } 1047 } 1048 1049 /* 1050 * Determine if we have a transaction that has gone to disk that needs to be 1051 * covered. To begin the transition to the idle state firstly the log needs to 1052 * be idle. That means the CIL, the AIL and the iclogs needs to be empty before 1053 * we start attempting to cover the log. 1054 * 1055 * Only if we are then in a state where covering is needed, the caller is 1056 * informed that dummy transactions are required to move the log into the idle 1057 * state. 1058 * 1059 * If there are any items in the AIl or CIL, then we do not want to attempt to 1060 * cover the log as we may be in a situation where there isn't log space 1061 * available to run a dummy transaction and this can lead to deadlocks when the 1062 * tail of the log is pinned by an item that is modified in the CIL. Hence 1063 * there's no point in running a dummy transaction at this point because we 1064 * can't start trying to idle the log until both the CIL and AIL are empty. 1065 */ 1066 static bool 1067 xfs_log_need_covered( 1068 struct xfs_mount *mp) 1069 { 1070 struct xlog *log = mp->m_log; 1071 bool needed = false; 1072 1073 if (!xlog_cil_empty(log)) 1074 return false; 1075 1076 spin_lock(&log->l_icloglock); 1077 switch (log->l_covered_state) { 1078 case XLOG_STATE_COVER_DONE: 1079 case XLOG_STATE_COVER_DONE2: 1080 case XLOG_STATE_COVER_IDLE: 1081 break; 1082 case XLOG_STATE_COVER_NEED: 1083 case XLOG_STATE_COVER_NEED2: 1084 if (xfs_ail_min_lsn(log->l_ailp)) 1085 break; 1086 if (!xlog_iclogs_empty(log)) 1087 break; 1088 1089 needed = true; 1090 if (log->l_covered_state == XLOG_STATE_COVER_NEED) 1091 log->l_covered_state = XLOG_STATE_COVER_DONE; 1092 else 1093 log->l_covered_state = XLOG_STATE_COVER_DONE2; 1094 break; 1095 default: 1096 needed = true; 1097 break; 1098 } 1099 spin_unlock(&log->l_icloglock); 1100 return needed; 1101 } 1102 1103 /* 1104 * Explicitly cover the log. This is similar to background log covering but 1105 * intended for usage in quiesce codepaths. The caller is responsible to ensure 1106 * the log is idle and suitable for covering. The CIL, iclog buffers and AIL 1107 * must all be empty. 1108 */ 1109 static int 1110 xfs_log_cover( 1111 struct xfs_mount *mp) 1112 { 1113 int error = 0; 1114 bool need_covered; 1115 1116 ASSERT((xlog_cil_empty(mp->m_log) && xlog_iclogs_empty(mp->m_log) && 1117 !xfs_ail_min_lsn(mp->m_log->l_ailp)) || 1118 XFS_FORCED_SHUTDOWN(mp)); 1119 1120 if (!xfs_log_writable(mp)) 1121 return 0; 1122 1123 /* 1124 * xfs_log_need_covered() is not idempotent because it progresses the 1125 * state machine if the log requires covering. Therefore, we must call 1126 * this function once and use the result until we've issued an sb sync. 1127 * Do so first to make that abundantly clear. 1128 * 1129 * Fall into the covering sequence if the log needs covering or the 1130 * mount has lazy superblock accounting to sync to disk. The sb sync 1131 * used for covering accumulates the in-core counters, so covering 1132 * handles this for us. 1133 */ 1134 need_covered = xfs_log_need_covered(mp); 1135 if (!need_covered && !xfs_sb_version_haslazysbcount(&mp->m_sb)) 1136 return 0; 1137 1138 /* 1139 * To cover the log, commit the superblock twice (at most) in 1140 * independent checkpoints. The first serves as a reference for the 1141 * tail pointer. The sync transaction and AIL push empties the AIL and 1142 * updates the in-core tail to the LSN of the first checkpoint. The 1143 * second commit updates the on-disk tail with the in-core LSN, 1144 * covering the log. Push the AIL one more time to leave it empty, as 1145 * we found it. 1146 */ 1147 do { 1148 error = xfs_sync_sb(mp, true); 1149 if (error) 1150 break; 1151 xfs_ail_push_all_sync(mp->m_ail); 1152 } while (xfs_log_need_covered(mp)); 1153 1154 return error; 1155 } 1156 1157 /* 1158 * We may be holding the log iclog lock upon entering this routine. 1159 */ 1160 xfs_lsn_t 1161 xlog_assign_tail_lsn_locked( 1162 struct xfs_mount *mp) 1163 { 1164 struct xlog *log = mp->m_log; 1165 struct xfs_log_item *lip; 1166 xfs_lsn_t tail_lsn; 1167 1168 assert_spin_locked(&mp->m_ail->ail_lock); 1169 1170 /* 1171 * To make sure we always have a valid LSN for the log tail we keep 1172 * track of the last LSN which was committed in log->l_last_sync_lsn, 1173 * and use that when the AIL was empty. 1174 */ 1175 lip = xfs_ail_min(mp->m_ail); 1176 if (lip) 1177 tail_lsn = lip->li_lsn; 1178 else 1179 tail_lsn = atomic64_read(&log->l_last_sync_lsn); 1180 trace_xfs_log_assign_tail_lsn(log, tail_lsn); 1181 atomic64_set(&log->l_tail_lsn, tail_lsn); 1182 return tail_lsn; 1183 } 1184 1185 xfs_lsn_t 1186 xlog_assign_tail_lsn( 1187 struct xfs_mount *mp) 1188 { 1189 xfs_lsn_t tail_lsn; 1190 1191 spin_lock(&mp->m_ail->ail_lock); 1192 tail_lsn = xlog_assign_tail_lsn_locked(mp); 1193 spin_unlock(&mp->m_ail->ail_lock); 1194 1195 return tail_lsn; 1196 } 1197 1198 /* 1199 * Return the space in the log between the tail and the head. The head 1200 * is passed in the cycle/bytes formal parms. In the special case where 1201 * the reserve head has wrapped passed the tail, this calculation is no 1202 * longer valid. In this case, just return 0 which means there is no space 1203 * in the log. This works for all places where this function is called 1204 * with the reserve head. Of course, if the write head were to ever 1205 * wrap the tail, we should blow up. Rather than catch this case here, 1206 * we depend on other ASSERTions in other parts of the code. XXXmiken 1207 * 1208 * This code also handles the case where the reservation head is behind 1209 * the tail. The details of this case are described below, but the end 1210 * result is that we return the size of the log as the amount of space left. 1211 */ 1212 STATIC int 1213 xlog_space_left( 1214 struct xlog *log, 1215 atomic64_t *head) 1216 { 1217 int free_bytes; 1218 int tail_bytes; 1219 int tail_cycle; 1220 int head_cycle; 1221 int head_bytes; 1222 1223 xlog_crack_grant_head(head, &head_cycle, &head_bytes); 1224 xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_bytes); 1225 tail_bytes = BBTOB(tail_bytes); 1226 if (tail_cycle == head_cycle && head_bytes >= tail_bytes) 1227 free_bytes = log->l_logsize - (head_bytes - tail_bytes); 1228 else if (tail_cycle + 1 < head_cycle) 1229 return 0; 1230 else if (tail_cycle < head_cycle) { 1231 ASSERT(tail_cycle == (head_cycle - 1)); 1232 free_bytes = tail_bytes - head_bytes; 1233 } else { 1234 /* 1235 * The reservation head is behind the tail. 1236 * In this case we just want to return the size of the 1237 * log as the amount of space left. 1238 */ 1239 xfs_alert(log->l_mp, "xlog_space_left: head behind tail"); 1240 xfs_alert(log->l_mp, 1241 " tail_cycle = %d, tail_bytes = %d", 1242 tail_cycle, tail_bytes); 1243 xfs_alert(log->l_mp, 1244 " GH cycle = %d, GH bytes = %d", 1245 head_cycle, head_bytes); 1246 ASSERT(0); 1247 free_bytes = log->l_logsize; 1248 } 1249 return free_bytes; 1250 } 1251 1252 1253 static void 1254 xlog_ioend_work( 1255 struct work_struct *work) 1256 { 1257 struct xlog_in_core *iclog = 1258 container_of(work, struct xlog_in_core, ic_end_io_work); 1259 struct xlog *log = iclog->ic_log; 1260 int error; 1261 1262 error = blk_status_to_errno(iclog->ic_bio.bi_status); 1263 #ifdef DEBUG 1264 /* treat writes with injected CRC errors as failed */ 1265 if (iclog->ic_fail_crc) 1266 error = -EIO; 1267 #endif 1268 1269 /* 1270 * Race to shutdown the filesystem if we see an error. 1271 */ 1272 if (XFS_TEST_ERROR(error, log->l_mp, XFS_ERRTAG_IODONE_IOERR)) { 1273 xfs_alert(log->l_mp, "log I/O error %d", error); 1274 xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR); 1275 } 1276 1277 xlog_state_done_syncing(iclog); 1278 bio_uninit(&iclog->ic_bio); 1279 1280 /* 1281 * Drop the lock to signal that we are done. Nothing references the 1282 * iclog after this, so an unmount waiting on this lock can now tear it 1283 * down safely. As such, it is unsafe to reference the iclog after the 1284 * unlock as we could race with it being freed. 1285 */ 1286 up(&iclog->ic_sema); 1287 } 1288 1289 /* 1290 * Return size of each in-core log record buffer. 1291 * 1292 * All machines get 8 x 32kB buffers by default, unless tuned otherwise. 1293 * 1294 * If the filesystem blocksize is too large, we may need to choose a 1295 * larger size since the directory code currently logs entire blocks. 1296 */ 1297 STATIC void 1298 xlog_get_iclog_buffer_size( 1299 struct xfs_mount *mp, 1300 struct xlog *log) 1301 { 1302 if (mp->m_logbufs <= 0) 1303 mp->m_logbufs = XLOG_MAX_ICLOGS; 1304 if (mp->m_logbsize <= 0) 1305 mp->m_logbsize = XLOG_BIG_RECORD_BSIZE; 1306 1307 log->l_iclog_bufs = mp->m_logbufs; 1308 log->l_iclog_size = mp->m_logbsize; 1309 1310 /* 1311 * # headers = size / 32k - one header holds cycles from 32k of data. 1312 */ 1313 log->l_iclog_heads = 1314 DIV_ROUND_UP(mp->m_logbsize, XLOG_HEADER_CYCLE_SIZE); 1315 log->l_iclog_hsize = log->l_iclog_heads << BBSHIFT; 1316 } 1317 1318 void 1319 xfs_log_work_queue( 1320 struct xfs_mount *mp) 1321 { 1322 queue_delayed_work(mp->m_sync_workqueue, &mp->m_log->l_work, 1323 msecs_to_jiffies(xfs_syncd_centisecs * 10)); 1324 } 1325 1326 /* 1327 * Every sync period we need to unpin all items in the AIL and push them to 1328 * disk. If there is nothing dirty, then we might need to cover the log to 1329 * indicate that the filesystem is idle. 1330 */ 1331 static void 1332 xfs_log_worker( 1333 struct work_struct *work) 1334 { 1335 struct xlog *log = container_of(to_delayed_work(work), 1336 struct xlog, l_work); 1337 struct xfs_mount *mp = log->l_mp; 1338 1339 /* dgc: errors ignored - not fatal and nowhere to report them */ 1340 if (xfs_fs_writable(mp, SB_FREEZE_WRITE) && xfs_log_need_covered(mp)) { 1341 /* 1342 * Dump a transaction into the log that contains no real change. 1343 * This is needed to stamp the current tail LSN into the log 1344 * during the covering operation. 1345 * 1346 * We cannot use an inode here for this - that will push dirty 1347 * state back up into the VFS and then periodic inode flushing 1348 * will prevent log covering from making progress. Hence we 1349 * synchronously log the superblock instead to ensure the 1350 * superblock is immediately unpinned and can be written back. 1351 */ 1352 xfs_sync_sb(mp, true); 1353 } else 1354 xfs_log_force(mp, 0); 1355 1356 /* start pushing all the metadata that is currently dirty */ 1357 xfs_ail_push_all(mp->m_ail); 1358 1359 /* queue us up again */ 1360 xfs_log_work_queue(mp); 1361 } 1362 1363 /* 1364 * This routine initializes some of the log structure for a given mount point. 1365 * Its primary purpose is to fill in enough, so recovery can occur. However, 1366 * some other stuff may be filled in too. 1367 */ 1368 STATIC struct xlog * 1369 xlog_alloc_log( 1370 struct xfs_mount *mp, 1371 struct xfs_buftarg *log_target, 1372 xfs_daddr_t blk_offset, 1373 int num_bblks) 1374 { 1375 struct xlog *log; 1376 xlog_rec_header_t *head; 1377 xlog_in_core_t **iclogp; 1378 xlog_in_core_t *iclog, *prev_iclog=NULL; 1379 int i; 1380 int error = -ENOMEM; 1381 uint log2_size = 0; 1382 1383 log = kmem_zalloc(sizeof(struct xlog), KM_MAYFAIL); 1384 if (!log) { 1385 xfs_warn(mp, "Log allocation failed: No memory!"); 1386 goto out; 1387 } 1388 1389 log->l_mp = mp; 1390 log->l_targ = log_target; 1391 log->l_logsize = BBTOB(num_bblks); 1392 log->l_logBBstart = blk_offset; 1393 log->l_logBBsize = num_bblks; 1394 log->l_covered_state = XLOG_STATE_COVER_IDLE; 1395 log->l_flags |= XLOG_ACTIVE_RECOVERY; 1396 INIT_DELAYED_WORK(&log->l_work, xfs_log_worker); 1397 1398 log->l_prev_block = -1; 1399 /* log->l_tail_lsn = 0x100000000LL; cycle = 1; current block = 0 */ 1400 xlog_assign_atomic_lsn(&log->l_tail_lsn, 1, 0); 1401 xlog_assign_atomic_lsn(&log->l_last_sync_lsn, 1, 0); 1402 log->l_curr_cycle = 1; /* 0 is bad since this is initial value */ 1403 1404 xlog_grant_head_init(&log->l_reserve_head); 1405 xlog_grant_head_init(&log->l_write_head); 1406 1407 error = -EFSCORRUPTED; 1408 if (xfs_sb_version_hassector(&mp->m_sb)) { 1409 log2_size = mp->m_sb.sb_logsectlog; 1410 if (log2_size < BBSHIFT) { 1411 xfs_warn(mp, "Log sector size too small (0x%x < 0x%x)", 1412 log2_size, BBSHIFT); 1413 goto out_free_log; 1414 } 1415 1416 log2_size -= BBSHIFT; 1417 if (log2_size > mp->m_sectbb_log) { 1418 xfs_warn(mp, "Log sector size too large (0x%x > 0x%x)", 1419 log2_size, mp->m_sectbb_log); 1420 goto out_free_log; 1421 } 1422 1423 /* for larger sector sizes, must have v2 or external log */ 1424 if (log2_size && log->l_logBBstart > 0 && 1425 !xfs_sb_version_haslogv2(&mp->m_sb)) { 1426 xfs_warn(mp, 1427 "log sector size (0x%x) invalid for configuration.", 1428 log2_size); 1429 goto out_free_log; 1430 } 1431 } 1432 log->l_sectBBsize = 1 << log2_size; 1433 1434 xlog_get_iclog_buffer_size(mp, log); 1435 1436 spin_lock_init(&log->l_icloglock); 1437 init_waitqueue_head(&log->l_flush_wait); 1438 1439 iclogp = &log->l_iclog; 1440 /* 1441 * The amount of memory to allocate for the iclog structure is 1442 * rather funky due to the way the structure is defined. It is 1443 * done this way so that we can use different sizes for machines 1444 * with different amounts of memory. See the definition of 1445 * xlog_in_core_t in xfs_log_priv.h for details. 1446 */ 1447 ASSERT(log->l_iclog_size >= 4096); 1448 for (i = 0; i < log->l_iclog_bufs; i++) { 1449 int align_mask = xfs_buftarg_dma_alignment(mp->m_logdev_targp); 1450 size_t bvec_size = howmany(log->l_iclog_size, PAGE_SIZE) * 1451 sizeof(struct bio_vec); 1452 1453 iclog = kmem_zalloc(sizeof(*iclog) + bvec_size, KM_MAYFAIL); 1454 if (!iclog) 1455 goto out_free_iclog; 1456 1457 *iclogp = iclog; 1458 iclog->ic_prev = prev_iclog; 1459 prev_iclog = iclog; 1460 1461 iclog->ic_data = kmem_alloc_io(log->l_iclog_size, align_mask, 1462 KM_MAYFAIL | KM_ZERO); 1463 if (!iclog->ic_data) 1464 goto out_free_iclog; 1465 #ifdef DEBUG 1466 log->l_iclog_bak[i] = &iclog->ic_header; 1467 #endif 1468 head = &iclog->ic_header; 1469 memset(head, 0, sizeof(xlog_rec_header_t)); 1470 head->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM); 1471 head->h_version = cpu_to_be32( 1472 xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1); 1473 head->h_size = cpu_to_be32(log->l_iclog_size); 1474 /* new fields */ 1475 head->h_fmt = cpu_to_be32(XLOG_FMT); 1476 memcpy(&head->h_fs_uuid, &mp->m_sb.sb_uuid, sizeof(uuid_t)); 1477 1478 iclog->ic_size = log->l_iclog_size - log->l_iclog_hsize; 1479 iclog->ic_state = XLOG_STATE_ACTIVE; 1480 iclog->ic_log = log; 1481 atomic_set(&iclog->ic_refcnt, 0); 1482 spin_lock_init(&iclog->ic_callback_lock); 1483 INIT_LIST_HEAD(&iclog->ic_callbacks); 1484 iclog->ic_datap = (char *)iclog->ic_data + log->l_iclog_hsize; 1485 1486 init_waitqueue_head(&iclog->ic_force_wait); 1487 init_waitqueue_head(&iclog->ic_write_wait); 1488 INIT_WORK(&iclog->ic_end_io_work, xlog_ioend_work); 1489 sema_init(&iclog->ic_sema, 1); 1490 1491 iclogp = &iclog->ic_next; 1492 } 1493 *iclogp = log->l_iclog; /* complete ring */ 1494 log->l_iclog->ic_prev = prev_iclog; /* re-write 1st prev ptr */ 1495 1496 log->l_ioend_workqueue = alloc_workqueue("xfs-log/%s", 1497 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM | 1498 WQ_HIGHPRI), 1499 0, mp->m_super->s_id); 1500 if (!log->l_ioend_workqueue) 1501 goto out_free_iclog; 1502 1503 error = xlog_cil_init(log); 1504 if (error) 1505 goto out_destroy_workqueue; 1506 return log; 1507 1508 out_destroy_workqueue: 1509 destroy_workqueue(log->l_ioend_workqueue); 1510 out_free_iclog: 1511 for (iclog = log->l_iclog; iclog; iclog = prev_iclog) { 1512 prev_iclog = iclog->ic_next; 1513 kmem_free(iclog->ic_data); 1514 kmem_free(iclog); 1515 if (prev_iclog == log->l_iclog) 1516 break; 1517 } 1518 out_free_log: 1519 kmem_free(log); 1520 out: 1521 return ERR_PTR(error); 1522 } /* xlog_alloc_log */ 1523 1524 /* 1525 * Write out the commit record of a transaction associated with the given 1526 * ticket to close off a running log write. Return the lsn of the commit record. 1527 */ 1528 int 1529 xlog_commit_record( 1530 struct xlog *log, 1531 struct xlog_ticket *ticket, 1532 struct xlog_in_core **iclog, 1533 xfs_lsn_t *lsn) 1534 { 1535 struct xfs_log_iovec reg = { 1536 .i_addr = NULL, 1537 .i_len = 0, 1538 .i_type = XLOG_REG_TYPE_COMMIT, 1539 }; 1540 struct xfs_log_vec vec = { 1541 .lv_niovecs = 1, 1542 .lv_iovecp = ®, 1543 }; 1544 int error; 1545 1546 if (XLOG_FORCED_SHUTDOWN(log)) 1547 return -EIO; 1548 1549 error = xlog_write(log, &vec, ticket, lsn, iclog, XLOG_COMMIT_TRANS, 1550 false); 1551 if (error) 1552 xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR); 1553 return error; 1554 } 1555 1556 /* 1557 * Compute the LSN that we'd need to push the log tail towards in order to have 1558 * (a) enough on-disk log space to log the number of bytes specified, (b) at 1559 * least 25% of the log space free, and (c) at least 256 blocks free. If the 1560 * log free space already meets all three thresholds, this function returns 1561 * NULLCOMMITLSN. 1562 */ 1563 xfs_lsn_t 1564 xlog_grant_push_threshold( 1565 struct xlog *log, 1566 int need_bytes) 1567 { 1568 xfs_lsn_t threshold_lsn = 0; 1569 xfs_lsn_t last_sync_lsn; 1570 int free_blocks; 1571 int free_bytes; 1572 int threshold_block; 1573 int threshold_cycle; 1574 int free_threshold; 1575 1576 ASSERT(BTOBB(need_bytes) < log->l_logBBsize); 1577 1578 free_bytes = xlog_space_left(log, &log->l_reserve_head.grant); 1579 free_blocks = BTOBBT(free_bytes); 1580 1581 /* 1582 * Set the threshold for the minimum number of free blocks in the 1583 * log to the maximum of what the caller needs, one quarter of the 1584 * log, and 256 blocks. 1585 */ 1586 free_threshold = BTOBB(need_bytes); 1587 free_threshold = max(free_threshold, (log->l_logBBsize >> 2)); 1588 free_threshold = max(free_threshold, 256); 1589 if (free_blocks >= free_threshold) 1590 return NULLCOMMITLSN; 1591 1592 xlog_crack_atomic_lsn(&log->l_tail_lsn, &threshold_cycle, 1593 &threshold_block); 1594 threshold_block += free_threshold; 1595 if (threshold_block >= log->l_logBBsize) { 1596 threshold_block -= log->l_logBBsize; 1597 threshold_cycle += 1; 1598 } 1599 threshold_lsn = xlog_assign_lsn(threshold_cycle, 1600 threshold_block); 1601 /* 1602 * Don't pass in an lsn greater than the lsn of the last 1603 * log record known to be on disk. Use a snapshot of the last sync lsn 1604 * so that it doesn't change between the compare and the set. 1605 */ 1606 last_sync_lsn = atomic64_read(&log->l_last_sync_lsn); 1607 if (XFS_LSN_CMP(threshold_lsn, last_sync_lsn) > 0) 1608 threshold_lsn = last_sync_lsn; 1609 1610 return threshold_lsn; 1611 } 1612 1613 /* 1614 * Push the tail of the log if we need to do so to maintain the free log space 1615 * thresholds set out by xlog_grant_push_threshold. We may need to adopt a 1616 * policy which pushes on an lsn which is further along in the log once we 1617 * reach the high water mark. In this manner, we would be creating a low water 1618 * mark. 1619 */ 1620 STATIC void 1621 xlog_grant_push_ail( 1622 struct xlog *log, 1623 int need_bytes) 1624 { 1625 xfs_lsn_t threshold_lsn; 1626 1627 threshold_lsn = xlog_grant_push_threshold(log, need_bytes); 1628 if (threshold_lsn == NULLCOMMITLSN || XLOG_FORCED_SHUTDOWN(log)) 1629 return; 1630 1631 /* 1632 * Get the transaction layer to kick the dirty buffers out to 1633 * disk asynchronously. No point in trying to do this if 1634 * the filesystem is shutting down. 1635 */ 1636 xfs_ail_push(log->l_ailp, threshold_lsn); 1637 } 1638 1639 /* 1640 * Stamp cycle number in every block 1641 */ 1642 STATIC void 1643 xlog_pack_data( 1644 struct xlog *log, 1645 struct xlog_in_core *iclog, 1646 int roundoff) 1647 { 1648 int i, j, k; 1649 int size = iclog->ic_offset + roundoff; 1650 __be32 cycle_lsn; 1651 char *dp; 1652 1653 cycle_lsn = CYCLE_LSN_DISK(iclog->ic_header.h_lsn); 1654 1655 dp = iclog->ic_datap; 1656 for (i = 0; i < BTOBB(size); i++) { 1657 if (i >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) 1658 break; 1659 iclog->ic_header.h_cycle_data[i] = *(__be32 *)dp; 1660 *(__be32 *)dp = cycle_lsn; 1661 dp += BBSIZE; 1662 } 1663 1664 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) { 1665 xlog_in_core_2_t *xhdr = iclog->ic_data; 1666 1667 for ( ; i < BTOBB(size); i++) { 1668 j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE); 1669 k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE); 1670 xhdr[j].hic_xheader.xh_cycle_data[k] = *(__be32 *)dp; 1671 *(__be32 *)dp = cycle_lsn; 1672 dp += BBSIZE; 1673 } 1674 1675 for (i = 1; i < log->l_iclog_heads; i++) 1676 xhdr[i].hic_xheader.xh_cycle = cycle_lsn; 1677 } 1678 } 1679 1680 /* 1681 * Calculate the checksum for a log buffer. 1682 * 1683 * This is a little more complicated than it should be because the various 1684 * headers and the actual data are non-contiguous. 1685 */ 1686 __le32 1687 xlog_cksum( 1688 struct xlog *log, 1689 struct xlog_rec_header *rhead, 1690 char *dp, 1691 int size) 1692 { 1693 uint32_t crc; 1694 1695 /* first generate the crc for the record header ... */ 1696 crc = xfs_start_cksum_update((char *)rhead, 1697 sizeof(struct xlog_rec_header), 1698 offsetof(struct xlog_rec_header, h_crc)); 1699 1700 /* ... then for additional cycle data for v2 logs ... */ 1701 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) { 1702 union xlog_in_core2 *xhdr = (union xlog_in_core2 *)rhead; 1703 int i; 1704 int xheads; 1705 1706 xheads = DIV_ROUND_UP(size, XLOG_HEADER_CYCLE_SIZE); 1707 1708 for (i = 1; i < xheads; i++) { 1709 crc = crc32c(crc, &xhdr[i].hic_xheader, 1710 sizeof(struct xlog_rec_ext_header)); 1711 } 1712 } 1713 1714 /* ... and finally for the payload */ 1715 crc = crc32c(crc, dp, size); 1716 1717 return xfs_end_cksum(crc); 1718 } 1719 1720 static void 1721 xlog_bio_end_io( 1722 struct bio *bio) 1723 { 1724 struct xlog_in_core *iclog = bio->bi_private; 1725 1726 queue_work(iclog->ic_log->l_ioend_workqueue, 1727 &iclog->ic_end_io_work); 1728 } 1729 1730 static int 1731 xlog_map_iclog_data( 1732 struct bio *bio, 1733 void *data, 1734 size_t count) 1735 { 1736 do { 1737 struct page *page = kmem_to_page(data); 1738 unsigned int off = offset_in_page(data); 1739 size_t len = min_t(size_t, count, PAGE_SIZE - off); 1740 1741 if (bio_add_page(bio, page, len, off) != len) 1742 return -EIO; 1743 1744 data += len; 1745 count -= len; 1746 } while (count); 1747 1748 return 0; 1749 } 1750 1751 STATIC void 1752 xlog_write_iclog( 1753 struct xlog *log, 1754 struct xlog_in_core *iclog, 1755 uint64_t bno, 1756 unsigned int count, 1757 bool need_flush) 1758 { 1759 ASSERT(bno < log->l_logBBsize); 1760 1761 /* 1762 * We lock the iclogbufs here so that we can serialise against I/O 1763 * completion during unmount. We might be processing a shutdown 1764 * triggered during unmount, and that can occur asynchronously to the 1765 * unmount thread, and hence we need to ensure that completes before 1766 * tearing down the iclogbufs. Hence we need to hold the buffer lock 1767 * across the log IO to archieve that. 1768 */ 1769 down(&iclog->ic_sema); 1770 if (unlikely(iclog->ic_state == XLOG_STATE_IOERROR)) { 1771 /* 1772 * It would seem logical to return EIO here, but we rely on 1773 * the log state machine to propagate I/O errors instead of 1774 * doing it here. We kick of the state machine and unlock 1775 * the buffer manually, the code needs to be kept in sync 1776 * with the I/O completion path. 1777 */ 1778 xlog_state_done_syncing(iclog); 1779 up(&iclog->ic_sema); 1780 return; 1781 } 1782 1783 bio_init(&iclog->ic_bio, iclog->ic_bvec, howmany(count, PAGE_SIZE)); 1784 bio_set_dev(&iclog->ic_bio, log->l_targ->bt_bdev); 1785 iclog->ic_bio.bi_iter.bi_sector = log->l_logBBstart + bno; 1786 iclog->ic_bio.bi_end_io = xlog_bio_end_io; 1787 iclog->ic_bio.bi_private = iclog; 1788 1789 /* 1790 * We use REQ_SYNC | REQ_IDLE here to tell the block layer the are more 1791 * IOs coming immediately after this one. This prevents the block layer 1792 * writeback throttle from throttling log writes behind background 1793 * metadata writeback and causing priority inversions. 1794 */ 1795 iclog->ic_bio.bi_opf = REQ_OP_WRITE | REQ_META | REQ_SYNC | 1796 REQ_IDLE | REQ_FUA; 1797 if (need_flush) 1798 iclog->ic_bio.bi_opf |= REQ_PREFLUSH; 1799 1800 if (xlog_map_iclog_data(&iclog->ic_bio, iclog->ic_data, count)) { 1801 xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR); 1802 return; 1803 } 1804 if (is_vmalloc_addr(iclog->ic_data)) 1805 flush_kernel_vmap_range(iclog->ic_data, count); 1806 1807 /* 1808 * If this log buffer would straddle the end of the log we will have 1809 * to split it up into two bios, so that we can continue at the start. 1810 */ 1811 if (bno + BTOBB(count) > log->l_logBBsize) { 1812 struct bio *split; 1813 1814 split = bio_split(&iclog->ic_bio, log->l_logBBsize - bno, 1815 GFP_NOIO, &fs_bio_set); 1816 bio_chain(split, &iclog->ic_bio); 1817 submit_bio(split); 1818 1819 /* restart at logical offset zero for the remainder */ 1820 iclog->ic_bio.bi_iter.bi_sector = log->l_logBBstart; 1821 } 1822 1823 submit_bio(&iclog->ic_bio); 1824 } 1825 1826 /* 1827 * We need to bump cycle number for the part of the iclog that is 1828 * written to the start of the log. Watch out for the header magic 1829 * number case, though. 1830 */ 1831 static void 1832 xlog_split_iclog( 1833 struct xlog *log, 1834 void *data, 1835 uint64_t bno, 1836 unsigned int count) 1837 { 1838 unsigned int split_offset = BBTOB(log->l_logBBsize - bno); 1839 unsigned int i; 1840 1841 for (i = split_offset; i < count; i += BBSIZE) { 1842 uint32_t cycle = get_unaligned_be32(data + i); 1843 1844 if (++cycle == XLOG_HEADER_MAGIC_NUM) 1845 cycle++; 1846 put_unaligned_be32(cycle, data + i); 1847 } 1848 } 1849 1850 static int 1851 xlog_calc_iclog_size( 1852 struct xlog *log, 1853 struct xlog_in_core *iclog, 1854 uint32_t *roundoff) 1855 { 1856 uint32_t count_init, count; 1857 bool use_lsunit; 1858 1859 use_lsunit = xfs_sb_version_haslogv2(&log->l_mp->m_sb) && 1860 log->l_mp->m_sb.sb_logsunit > 1; 1861 1862 /* Add for LR header */ 1863 count_init = log->l_iclog_hsize + iclog->ic_offset; 1864 1865 /* Round out the log write size */ 1866 if (use_lsunit) { 1867 /* we have a v2 stripe unit to use */ 1868 count = XLOG_LSUNITTOB(log, XLOG_BTOLSUNIT(log, count_init)); 1869 } else { 1870 count = BBTOB(BTOBB(count_init)); 1871 } 1872 1873 ASSERT(count >= count_init); 1874 *roundoff = count - count_init; 1875 1876 if (use_lsunit) 1877 ASSERT(*roundoff < log->l_mp->m_sb.sb_logsunit); 1878 else 1879 ASSERT(*roundoff < BBTOB(1)); 1880 return count; 1881 } 1882 1883 /* 1884 * Flush out the in-core log (iclog) to the on-disk log in an asynchronous 1885 * fashion. Previously, we should have moved the current iclog 1886 * ptr in the log to point to the next available iclog. This allows further 1887 * write to continue while this code syncs out an iclog ready to go. 1888 * Before an in-core log can be written out, the data section must be scanned 1889 * to save away the 1st word of each BBSIZE block into the header. We replace 1890 * it with the current cycle count. Each BBSIZE block is tagged with the 1891 * cycle count because there in an implicit assumption that drives will 1892 * guarantee that entire 512 byte blocks get written at once. In other words, 1893 * we can't have part of a 512 byte block written and part not written. By 1894 * tagging each block, we will know which blocks are valid when recovering 1895 * after an unclean shutdown. 1896 * 1897 * This routine is single threaded on the iclog. No other thread can be in 1898 * this routine with the same iclog. Changing contents of iclog can there- 1899 * fore be done without grabbing the state machine lock. Updating the global 1900 * log will require grabbing the lock though. 1901 * 1902 * The entire log manager uses a logical block numbering scheme. Only 1903 * xlog_write_iclog knows about the fact that the log may not start with 1904 * block zero on a given device. 1905 */ 1906 STATIC void 1907 xlog_sync( 1908 struct xlog *log, 1909 struct xlog_in_core *iclog) 1910 { 1911 unsigned int count; /* byte count of bwrite */ 1912 unsigned int roundoff; /* roundoff to BB or stripe */ 1913 uint64_t bno; 1914 unsigned int size; 1915 bool need_flush = true, split = false; 1916 1917 ASSERT(atomic_read(&iclog->ic_refcnt) == 0); 1918 1919 count = xlog_calc_iclog_size(log, iclog, &roundoff); 1920 1921 /* move grant heads by roundoff in sync */ 1922 xlog_grant_add_space(log, &log->l_reserve_head.grant, roundoff); 1923 xlog_grant_add_space(log, &log->l_write_head.grant, roundoff); 1924 1925 /* put cycle number in every block */ 1926 xlog_pack_data(log, iclog, roundoff); 1927 1928 /* real byte length */ 1929 size = iclog->ic_offset; 1930 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) 1931 size += roundoff; 1932 iclog->ic_header.h_len = cpu_to_be32(size); 1933 1934 XFS_STATS_INC(log->l_mp, xs_log_writes); 1935 XFS_STATS_ADD(log->l_mp, xs_log_blocks, BTOBB(count)); 1936 1937 bno = BLOCK_LSN(be64_to_cpu(iclog->ic_header.h_lsn)); 1938 1939 /* Do we need to split this write into 2 parts? */ 1940 if (bno + BTOBB(count) > log->l_logBBsize) { 1941 xlog_split_iclog(log, &iclog->ic_header, bno, count); 1942 split = true; 1943 } 1944 1945 /* calculcate the checksum */ 1946 iclog->ic_header.h_crc = xlog_cksum(log, &iclog->ic_header, 1947 iclog->ic_datap, size); 1948 /* 1949 * Intentionally corrupt the log record CRC based on the error injection 1950 * frequency, if defined. This facilitates testing log recovery in the 1951 * event of torn writes. Hence, set the IOABORT state to abort the log 1952 * write on I/O completion and shutdown the fs. The subsequent mount 1953 * detects the bad CRC and attempts to recover. 1954 */ 1955 #ifdef DEBUG 1956 if (XFS_TEST_ERROR(false, log->l_mp, XFS_ERRTAG_LOG_BAD_CRC)) { 1957 iclog->ic_header.h_crc &= cpu_to_le32(0xAAAAAAAA); 1958 iclog->ic_fail_crc = true; 1959 xfs_warn(log->l_mp, 1960 "Intentionally corrupted log record at LSN 0x%llx. Shutdown imminent.", 1961 be64_to_cpu(iclog->ic_header.h_lsn)); 1962 } 1963 #endif 1964 1965 /* 1966 * Flush the data device before flushing the log to make sure all meta 1967 * data written back from the AIL actually made it to disk before 1968 * stamping the new log tail LSN into the log buffer. For an external 1969 * log we need to issue the flush explicitly, and unfortunately 1970 * synchronously here; for an internal log we can simply use the block 1971 * layer state machine for preflushes. 1972 */ 1973 if (log->l_targ != log->l_mp->m_ddev_targp || split) { 1974 xfs_blkdev_issue_flush(log->l_mp->m_ddev_targp); 1975 need_flush = false; 1976 } 1977 1978 xlog_verify_iclog(log, iclog, count); 1979 xlog_write_iclog(log, iclog, bno, count, need_flush); 1980 } 1981 1982 /* 1983 * Deallocate a log structure 1984 */ 1985 STATIC void 1986 xlog_dealloc_log( 1987 struct xlog *log) 1988 { 1989 xlog_in_core_t *iclog, *next_iclog; 1990 int i; 1991 1992 xlog_cil_destroy(log); 1993 1994 /* 1995 * Cycle all the iclogbuf locks to make sure all log IO completion 1996 * is done before we tear down these buffers. 1997 */ 1998 iclog = log->l_iclog; 1999 for (i = 0; i < log->l_iclog_bufs; i++) { 2000 down(&iclog->ic_sema); 2001 up(&iclog->ic_sema); 2002 iclog = iclog->ic_next; 2003 } 2004 2005 iclog = log->l_iclog; 2006 for (i = 0; i < log->l_iclog_bufs; i++) { 2007 next_iclog = iclog->ic_next; 2008 kmem_free(iclog->ic_data); 2009 kmem_free(iclog); 2010 iclog = next_iclog; 2011 } 2012 2013 log->l_mp->m_log = NULL; 2014 destroy_workqueue(log->l_ioend_workqueue); 2015 kmem_free(log); 2016 } 2017 2018 /* 2019 * Update counters atomically now that memcpy is done. 2020 */ 2021 static inline void 2022 xlog_state_finish_copy( 2023 struct xlog *log, 2024 struct xlog_in_core *iclog, 2025 int record_cnt, 2026 int copy_bytes) 2027 { 2028 lockdep_assert_held(&log->l_icloglock); 2029 2030 be32_add_cpu(&iclog->ic_header.h_num_logops, record_cnt); 2031 iclog->ic_offset += copy_bytes; 2032 } 2033 2034 /* 2035 * print out info relating to regions written which consume 2036 * the reservation 2037 */ 2038 void 2039 xlog_print_tic_res( 2040 struct xfs_mount *mp, 2041 struct xlog_ticket *ticket) 2042 { 2043 uint i; 2044 uint ophdr_spc = ticket->t_res_num_ophdrs * (uint)sizeof(xlog_op_header_t); 2045 2046 /* match with XLOG_REG_TYPE_* in xfs_log.h */ 2047 #define REG_TYPE_STR(type, str) [XLOG_REG_TYPE_##type] = str 2048 static char *res_type_str[] = { 2049 REG_TYPE_STR(BFORMAT, "bformat"), 2050 REG_TYPE_STR(BCHUNK, "bchunk"), 2051 REG_TYPE_STR(EFI_FORMAT, "efi_format"), 2052 REG_TYPE_STR(EFD_FORMAT, "efd_format"), 2053 REG_TYPE_STR(IFORMAT, "iformat"), 2054 REG_TYPE_STR(ICORE, "icore"), 2055 REG_TYPE_STR(IEXT, "iext"), 2056 REG_TYPE_STR(IBROOT, "ibroot"), 2057 REG_TYPE_STR(ILOCAL, "ilocal"), 2058 REG_TYPE_STR(IATTR_EXT, "iattr_ext"), 2059 REG_TYPE_STR(IATTR_BROOT, "iattr_broot"), 2060 REG_TYPE_STR(IATTR_LOCAL, "iattr_local"), 2061 REG_TYPE_STR(QFORMAT, "qformat"), 2062 REG_TYPE_STR(DQUOT, "dquot"), 2063 REG_TYPE_STR(QUOTAOFF, "quotaoff"), 2064 REG_TYPE_STR(LRHEADER, "LR header"), 2065 REG_TYPE_STR(UNMOUNT, "unmount"), 2066 REG_TYPE_STR(COMMIT, "commit"), 2067 REG_TYPE_STR(TRANSHDR, "trans header"), 2068 REG_TYPE_STR(ICREATE, "inode create"), 2069 REG_TYPE_STR(RUI_FORMAT, "rui_format"), 2070 REG_TYPE_STR(RUD_FORMAT, "rud_format"), 2071 REG_TYPE_STR(CUI_FORMAT, "cui_format"), 2072 REG_TYPE_STR(CUD_FORMAT, "cud_format"), 2073 REG_TYPE_STR(BUI_FORMAT, "bui_format"), 2074 REG_TYPE_STR(BUD_FORMAT, "bud_format"), 2075 }; 2076 BUILD_BUG_ON(ARRAY_SIZE(res_type_str) != XLOG_REG_TYPE_MAX + 1); 2077 #undef REG_TYPE_STR 2078 2079 xfs_warn(mp, "ticket reservation summary:"); 2080 xfs_warn(mp, " unit res = %d bytes", 2081 ticket->t_unit_res); 2082 xfs_warn(mp, " current res = %d bytes", 2083 ticket->t_curr_res); 2084 xfs_warn(mp, " total reg = %u bytes (o/flow = %u bytes)", 2085 ticket->t_res_arr_sum, ticket->t_res_o_flow); 2086 xfs_warn(mp, " ophdrs = %u (ophdr space = %u bytes)", 2087 ticket->t_res_num_ophdrs, ophdr_spc); 2088 xfs_warn(mp, " ophdr + reg = %u bytes", 2089 ticket->t_res_arr_sum + ticket->t_res_o_flow + ophdr_spc); 2090 xfs_warn(mp, " num regions = %u", 2091 ticket->t_res_num); 2092 2093 for (i = 0; i < ticket->t_res_num; i++) { 2094 uint r_type = ticket->t_res_arr[i].r_type; 2095 xfs_warn(mp, "region[%u]: %s - %u bytes", i, 2096 ((r_type <= 0 || r_type > XLOG_REG_TYPE_MAX) ? 2097 "bad-rtype" : res_type_str[r_type]), 2098 ticket->t_res_arr[i].r_len); 2099 } 2100 } 2101 2102 /* 2103 * Print a summary of the transaction. 2104 */ 2105 void 2106 xlog_print_trans( 2107 struct xfs_trans *tp) 2108 { 2109 struct xfs_mount *mp = tp->t_mountp; 2110 struct xfs_log_item *lip; 2111 2112 /* dump core transaction and ticket info */ 2113 xfs_warn(mp, "transaction summary:"); 2114 xfs_warn(mp, " log res = %d", tp->t_log_res); 2115 xfs_warn(mp, " log count = %d", tp->t_log_count); 2116 xfs_warn(mp, " flags = 0x%x", tp->t_flags); 2117 2118 xlog_print_tic_res(mp, tp->t_ticket); 2119 2120 /* dump each log item */ 2121 list_for_each_entry(lip, &tp->t_items, li_trans) { 2122 struct xfs_log_vec *lv = lip->li_lv; 2123 struct xfs_log_iovec *vec; 2124 int i; 2125 2126 xfs_warn(mp, "log item: "); 2127 xfs_warn(mp, " type = 0x%x", lip->li_type); 2128 xfs_warn(mp, " flags = 0x%lx", lip->li_flags); 2129 if (!lv) 2130 continue; 2131 xfs_warn(mp, " niovecs = %d", lv->lv_niovecs); 2132 xfs_warn(mp, " size = %d", lv->lv_size); 2133 xfs_warn(mp, " bytes = %d", lv->lv_bytes); 2134 xfs_warn(mp, " buf len = %d", lv->lv_buf_len); 2135 2136 /* dump each iovec for the log item */ 2137 vec = lv->lv_iovecp; 2138 for (i = 0; i < lv->lv_niovecs; i++) { 2139 int dumplen = min(vec->i_len, 32); 2140 2141 xfs_warn(mp, " iovec[%d]", i); 2142 xfs_warn(mp, " type = 0x%x", vec->i_type); 2143 xfs_warn(mp, " len = %d", vec->i_len); 2144 xfs_warn(mp, " first %d bytes of iovec[%d]:", dumplen, i); 2145 xfs_hex_dump(vec->i_addr, dumplen); 2146 2147 vec++; 2148 } 2149 } 2150 } 2151 2152 /* 2153 * Calculate the potential space needed by the log vector. We may need a start 2154 * record, and each region gets its own struct xlog_op_header and may need to be 2155 * double word aligned. 2156 */ 2157 static int 2158 xlog_write_calc_vec_length( 2159 struct xlog_ticket *ticket, 2160 struct xfs_log_vec *log_vector, 2161 bool need_start_rec) 2162 { 2163 struct xfs_log_vec *lv; 2164 int headers = need_start_rec ? 1 : 0; 2165 int len = 0; 2166 int i; 2167 2168 for (lv = log_vector; lv; lv = lv->lv_next) { 2169 /* we don't write ordered log vectors */ 2170 if (lv->lv_buf_len == XFS_LOG_VEC_ORDERED) 2171 continue; 2172 2173 headers += lv->lv_niovecs; 2174 2175 for (i = 0; i < lv->lv_niovecs; i++) { 2176 struct xfs_log_iovec *vecp = &lv->lv_iovecp[i]; 2177 2178 len += vecp->i_len; 2179 xlog_tic_add_region(ticket, vecp->i_len, vecp->i_type); 2180 } 2181 } 2182 2183 ticket->t_res_num_ophdrs += headers; 2184 len += headers * sizeof(struct xlog_op_header); 2185 2186 return len; 2187 } 2188 2189 static void 2190 xlog_write_start_rec( 2191 struct xlog_op_header *ophdr, 2192 struct xlog_ticket *ticket) 2193 { 2194 ophdr->oh_tid = cpu_to_be32(ticket->t_tid); 2195 ophdr->oh_clientid = ticket->t_clientid; 2196 ophdr->oh_len = 0; 2197 ophdr->oh_flags = XLOG_START_TRANS; 2198 ophdr->oh_res2 = 0; 2199 } 2200 2201 static xlog_op_header_t * 2202 xlog_write_setup_ophdr( 2203 struct xlog *log, 2204 struct xlog_op_header *ophdr, 2205 struct xlog_ticket *ticket, 2206 uint flags) 2207 { 2208 ophdr->oh_tid = cpu_to_be32(ticket->t_tid); 2209 ophdr->oh_clientid = ticket->t_clientid; 2210 ophdr->oh_res2 = 0; 2211 2212 /* are we copying a commit or unmount record? */ 2213 ophdr->oh_flags = flags; 2214 2215 /* 2216 * We've seen logs corrupted with bad transaction client ids. This 2217 * makes sure that XFS doesn't generate them on. Turn this into an EIO 2218 * and shut down the filesystem. 2219 */ 2220 switch (ophdr->oh_clientid) { 2221 case XFS_TRANSACTION: 2222 case XFS_VOLUME: 2223 case XFS_LOG: 2224 break; 2225 default: 2226 xfs_warn(log->l_mp, 2227 "Bad XFS transaction clientid 0x%x in ticket "PTR_FMT, 2228 ophdr->oh_clientid, ticket); 2229 return NULL; 2230 } 2231 2232 return ophdr; 2233 } 2234 2235 /* 2236 * Set up the parameters of the region copy into the log. This has 2237 * to handle region write split across multiple log buffers - this 2238 * state is kept external to this function so that this code can 2239 * be written in an obvious, self documenting manner. 2240 */ 2241 static int 2242 xlog_write_setup_copy( 2243 struct xlog_ticket *ticket, 2244 struct xlog_op_header *ophdr, 2245 int space_available, 2246 int space_required, 2247 int *copy_off, 2248 int *copy_len, 2249 int *last_was_partial_copy, 2250 int *bytes_consumed) 2251 { 2252 int still_to_copy; 2253 2254 still_to_copy = space_required - *bytes_consumed; 2255 *copy_off = *bytes_consumed; 2256 2257 if (still_to_copy <= space_available) { 2258 /* write of region completes here */ 2259 *copy_len = still_to_copy; 2260 ophdr->oh_len = cpu_to_be32(*copy_len); 2261 if (*last_was_partial_copy) 2262 ophdr->oh_flags |= (XLOG_END_TRANS|XLOG_WAS_CONT_TRANS); 2263 *last_was_partial_copy = 0; 2264 *bytes_consumed = 0; 2265 return 0; 2266 } 2267 2268 /* partial write of region, needs extra log op header reservation */ 2269 *copy_len = space_available; 2270 ophdr->oh_len = cpu_to_be32(*copy_len); 2271 ophdr->oh_flags |= XLOG_CONTINUE_TRANS; 2272 if (*last_was_partial_copy) 2273 ophdr->oh_flags |= XLOG_WAS_CONT_TRANS; 2274 *bytes_consumed += *copy_len; 2275 (*last_was_partial_copy)++; 2276 2277 /* account for new log op header */ 2278 ticket->t_curr_res -= sizeof(struct xlog_op_header); 2279 ticket->t_res_num_ophdrs++; 2280 2281 return sizeof(struct xlog_op_header); 2282 } 2283 2284 static int 2285 xlog_write_copy_finish( 2286 struct xlog *log, 2287 struct xlog_in_core *iclog, 2288 uint flags, 2289 int *record_cnt, 2290 int *data_cnt, 2291 int *partial_copy, 2292 int *partial_copy_len, 2293 int log_offset, 2294 struct xlog_in_core **commit_iclog) 2295 { 2296 int error; 2297 2298 if (*partial_copy) { 2299 /* 2300 * This iclog has already been marked WANT_SYNC by 2301 * xlog_state_get_iclog_space. 2302 */ 2303 spin_lock(&log->l_icloglock); 2304 xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt); 2305 *record_cnt = 0; 2306 *data_cnt = 0; 2307 goto release_iclog; 2308 } 2309 2310 *partial_copy = 0; 2311 *partial_copy_len = 0; 2312 2313 if (iclog->ic_size - log_offset <= sizeof(xlog_op_header_t)) { 2314 /* no more space in this iclog - push it. */ 2315 spin_lock(&log->l_icloglock); 2316 xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt); 2317 *record_cnt = 0; 2318 *data_cnt = 0; 2319 2320 if (iclog->ic_state == XLOG_STATE_ACTIVE) 2321 xlog_state_switch_iclogs(log, iclog, 0); 2322 else 2323 ASSERT(iclog->ic_state == XLOG_STATE_WANT_SYNC || 2324 iclog->ic_state == XLOG_STATE_IOERROR); 2325 if (!commit_iclog) 2326 goto release_iclog; 2327 spin_unlock(&log->l_icloglock); 2328 ASSERT(flags & XLOG_COMMIT_TRANS); 2329 *commit_iclog = iclog; 2330 } 2331 2332 return 0; 2333 2334 release_iclog: 2335 error = xlog_state_release_iclog(log, iclog); 2336 spin_unlock(&log->l_icloglock); 2337 return error; 2338 } 2339 2340 /* 2341 * Write some region out to in-core log 2342 * 2343 * This will be called when writing externally provided regions or when 2344 * writing out a commit record for a given transaction. 2345 * 2346 * General algorithm: 2347 * 1. Find total length of this write. This may include adding to the 2348 * lengths passed in. 2349 * 2. Check whether we violate the tickets reservation. 2350 * 3. While writing to this iclog 2351 * A. Reserve as much space in this iclog as can get 2352 * B. If this is first write, save away start lsn 2353 * C. While writing this region: 2354 * 1. If first write of transaction, write start record 2355 * 2. Write log operation header (header per region) 2356 * 3. Find out if we can fit entire region into this iclog 2357 * 4. Potentially, verify destination memcpy ptr 2358 * 5. Memcpy (partial) region 2359 * 6. If partial copy, release iclog; otherwise, continue 2360 * copying more regions into current iclog 2361 * 4. Mark want sync bit (in simulation mode) 2362 * 5. Release iclog for potential flush to on-disk log. 2363 * 2364 * ERRORS: 2365 * 1. Panic if reservation is overrun. This should never happen since 2366 * reservation amounts are generated internal to the filesystem. 2367 * NOTES: 2368 * 1. Tickets are single threaded data structures. 2369 * 2. The XLOG_END_TRANS & XLOG_CONTINUE_TRANS flags are passed down to the 2370 * syncing routine. When a single log_write region needs to span 2371 * multiple in-core logs, the XLOG_CONTINUE_TRANS bit should be set 2372 * on all log operation writes which don't contain the end of the 2373 * region. The XLOG_END_TRANS bit is used for the in-core log 2374 * operation which contains the end of the continued log_write region. 2375 * 3. When xlog_state_get_iclog_space() grabs the rest of the current iclog, 2376 * we don't really know exactly how much space will be used. As a result, 2377 * we don't update ic_offset until the end when we know exactly how many 2378 * bytes have been written out. 2379 */ 2380 int 2381 xlog_write( 2382 struct xlog *log, 2383 struct xfs_log_vec *log_vector, 2384 struct xlog_ticket *ticket, 2385 xfs_lsn_t *start_lsn, 2386 struct xlog_in_core **commit_iclog, 2387 uint flags, 2388 bool need_start_rec) 2389 { 2390 struct xlog_in_core *iclog = NULL; 2391 struct xfs_log_vec *lv = log_vector; 2392 struct xfs_log_iovec *vecp = lv->lv_iovecp; 2393 int index = 0; 2394 int len; 2395 int partial_copy = 0; 2396 int partial_copy_len = 0; 2397 int contwr = 0; 2398 int record_cnt = 0; 2399 int data_cnt = 0; 2400 int error = 0; 2401 2402 /* 2403 * If this is a commit or unmount transaction, we don't need a start 2404 * record to be written. We do, however, have to account for the 2405 * commit or unmount header that gets written. Hence we always have 2406 * to account for an extra xlog_op_header here. 2407 */ 2408 ticket->t_curr_res -= sizeof(struct xlog_op_header); 2409 if (ticket->t_curr_res < 0) { 2410 xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES, 2411 "ctx ticket reservation ran out. Need to up reservation"); 2412 xlog_print_tic_res(log->l_mp, ticket); 2413 xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR); 2414 } 2415 2416 len = xlog_write_calc_vec_length(ticket, log_vector, need_start_rec); 2417 *start_lsn = 0; 2418 while (lv && (!lv->lv_niovecs || index < lv->lv_niovecs)) { 2419 void *ptr; 2420 int log_offset; 2421 2422 error = xlog_state_get_iclog_space(log, len, &iclog, ticket, 2423 &contwr, &log_offset); 2424 if (error) 2425 return error; 2426 2427 ASSERT(log_offset <= iclog->ic_size - 1); 2428 ptr = iclog->ic_datap + log_offset; 2429 2430 /* start_lsn is the first lsn written to. That's all we need. */ 2431 if (!*start_lsn) 2432 *start_lsn = be64_to_cpu(iclog->ic_header.h_lsn); 2433 2434 /* 2435 * This loop writes out as many regions as can fit in the amount 2436 * of space which was allocated by xlog_state_get_iclog_space(). 2437 */ 2438 while (lv && (!lv->lv_niovecs || index < lv->lv_niovecs)) { 2439 struct xfs_log_iovec *reg; 2440 struct xlog_op_header *ophdr; 2441 int copy_len; 2442 int copy_off; 2443 bool ordered = false; 2444 2445 /* ordered log vectors have no regions to write */ 2446 if (lv->lv_buf_len == XFS_LOG_VEC_ORDERED) { 2447 ASSERT(lv->lv_niovecs == 0); 2448 ordered = true; 2449 goto next_lv; 2450 } 2451 2452 reg = &vecp[index]; 2453 ASSERT(reg->i_len % sizeof(int32_t) == 0); 2454 ASSERT((unsigned long)ptr % sizeof(int32_t) == 0); 2455 2456 /* 2457 * Before we start formatting log vectors, we need to 2458 * write a start record. Only do this for the first 2459 * iclog we write to. 2460 */ 2461 if (need_start_rec) { 2462 xlog_write_start_rec(ptr, ticket); 2463 xlog_write_adv_cnt(&ptr, &len, &log_offset, 2464 sizeof(struct xlog_op_header)); 2465 } 2466 2467 ophdr = xlog_write_setup_ophdr(log, ptr, ticket, flags); 2468 if (!ophdr) 2469 return -EIO; 2470 2471 xlog_write_adv_cnt(&ptr, &len, &log_offset, 2472 sizeof(struct xlog_op_header)); 2473 2474 len += xlog_write_setup_copy(ticket, ophdr, 2475 iclog->ic_size-log_offset, 2476 reg->i_len, 2477 ©_off, ©_len, 2478 &partial_copy, 2479 &partial_copy_len); 2480 xlog_verify_dest_ptr(log, ptr); 2481 2482 /* 2483 * Copy region. 2484 * 2485 * Unmount records just log an opheader, so can have 2486 * empty payloads with no data region to copy. Hence we 2487 * only copy the payload if the vector says it has data 2488 * to copy. 2489 */ 2490 ASSERT(copy_len >= 0); 2491 if (copy_len > 0) { 2492 memcpy(ptr, reg->i_addr + copy_off, copy_len); 2493 xlog_write_adv_cnt(&ptr, &len, &log_offset, 2494 copy_len); 2495 } 2496 copy_len += sizeof(struct xlog_op_header); 2497 record_cnt++; 2498 if (need_start_rec) { 2499 copy_len += sizeof(struct xlog_op_header); 2500 record_cnt++; 2501 need_start_rec = false; 2502 } 2503 data_cnt += contwr ? copy_len : 0; 2504 2505 error = xlog_write_copy_finish(log, iclog, flags, 2506 &record_cnt, &data_cnt, 2507 &partial_copy, 2508 &partial_copy_len, 2509 log_offset, 2510 commit_iclog); 2511 if (error) 2512 return error; 2513 2514 /* 2515 * if we had a partial copy, we need to get more iclog 2516 * space but we don't want to increment the region 2517 * index because there is still more is this region to 2518 * write. 2519 * 2520 * If we completed writing this region, and we flushed 2521 * the iclog (indicated by resetting of the record 2522 * count), then we also need to get more log space. If 2523 * this was the last record, though, we are done and 2524 * can just return. 2525 */ 2526 if (partial_copy) 2527 break; 2528 2529 if (++index == lv->lv_niovecs) { 2530 next_lv: 2531 lv = lv->lv_next; 2532 index = 0; 2533 if (lv) 2534 vecp = lv->lv_iovecp; 2535 } 2536 if (record_cnt == 0 && !ordered) { 2537 if (!lv) 2538 return 0; 2539 break; 2540 } 2541 } 2542 } 2543 2544 ASSERT(len == 0); 2545 2546 spin_lock(&log->l_icloglock); 2547 xlog_state_finish_copy(log, iclog, record_cnt, data_cnt); 2548 if (commit_iclog) { 2549 ASSERT(flags & XLOG_COMMIT_TRANS); 2550 *commit_iclog = iclog; 2551 } else { 2552 error = xlog_state_release_iclog(log, iclog); 2553 } 2554 spin_unlock(&log->l_icloglock); 2555 2556 return error; 2557 } 2558 2559 static void 2560 xlog_state_activate_iclog( 2561 struct xlog_in_core *iclog, 2562 int *iclogs_changed) 2563 { 2564 ASSERT(list_empty_careful(&iclog->ic_callbacks)); 2565 2566 /* 2567 * If the number of ops in this iclog indicate it just contains the 2568 * dummy transaction, we can change state into IDLE (the second time 2569 * around). Otherwise we should change the state into NEED a dummy. 2570 * We don't need to cover the dummy. 2571 */ 2572 if (*iclogs_changed == 0 && 2573 iclog->ic_header.h_num_logops == cpu_to_be32(XLOG_COVER_OPS)) { 2574 *iclogs_changed = 1; 2575 } else { 2576 /* 2577 * We have two dirty iclogs so start over. This could also be 2578 * num of ops indicating this is not the dummy going out. 2579 */ 2580 *iclogs_changed = 2; 2581 } 2582 2583 iclog->ic_state = XLOG_STATE_ACTIVE; 2584 iclog->ic_offset = 0; 2585 iclog->ic_header.h_num_logops = 0; 2586 memset(iclog->ic_header.h_cycle_data, 0, 2587 sizeof(iclog->ic_header.h_cycle_data)); 2588 iclog->ic_header.h_lsn = 0; 2589 } 2590 2591 /* 2592 * Loop through all iclogs and mark all iclogs currently marked DIRTY as 2593 * ACTIVE after iclog I/O has completed. 2594 */ 2595 static void 2596 xlog_state_activate_iclogs( 2597 struct xlog *log, 2598 int *iclogs_changed) 2599 { 2600 struct xlog_in_core *iclog = log->l_iclog; 2601 2602 do { 2603 if (iclog->ic_state == XLOG_STATE_DIRTY) 2604 xlog_state_activate_iclog(iclog, iclogs_changed); 2605 /* 2606 * The ordering of marking iclogs ACTIVE must be maintained, so 2607 * an iclog doesn't become ACTIVE beyond one that is SYNCING. 2608 */ 2609 else if (iclog->ic_state != XLOG_STATE_ACTIVE) 2610 break; 2611 } while ((iclog = iclog->ic_next) != log->l_iclog); 2612 } 2613 2614 static int 2615 xlog_covered_state( 2616 int prev_state, 2617 int iclogs_changed) 2618 { 2619 /* 2620 * We go to NEED for any non-covering writes. We go to NEED2 if we just 2621 * wrote the first covering record (DONE). We go to IDLE if we just 2622 * wrote the second covering record (DONE2) and remain in IDLE until a 2623 * non-covering write occurs. 2624 */ 2625 switch (prev_state) { 2626 case XLOG_STATE_COVER_IDLE: 2627 if (iclogs_changed == 1) 2628 return XLOG_STATE_COVER_IDLE; 2629 case XLOG_STATE_COVER_NEED: 2630 case XLOG_STATE_COVER_NEED2: 2631 break; 2632 case XLOG_STATE_COVER_DONE: 2633 if (iclogs_changed == 1) 2634 return XLOG_STATE_COVER_NEED2; 2635 break; 2636 case XLOG_STATE_COVER_DONE2: 2637 if (iclogs_changed == 1) 2638 return XLOG_STATE_COVER_IDLE; 2639 break; 2640 default: 2641 ASSERT(0); 2642 } 2643 2644 return XLOG_STATE_COVER_NEED; 2645 } 2646 2647 STATIC void 2648 xlog_state_clean_iclog( 2649 struct xlog *log, 2650 struct xlog_in_core *dirty_iclog) 2651 { 2652 int iclogs_changed = 0; 2653 2654 dirty_iclog->ic_state = XLOG_STATE_DIRTY; 2655 2656 xlog_state_activate_iclogs(log, &iclogs_changed); 2657 wake_up_all(&dirty_iclog->ic_force_wait); 2658 2659 if (iclogs_changed) { 2660 log->l_covered_state = xlog_covered_state(log->l_covered_state, 2661 iclogs_changed); 2662 } 2663 } 2664 2665 STATIC xfs_lsn_t 2666 xlog_get_lowest_lsn( 2667 struct xlog *log) 2668 { 2669 struct xlog_in_core *iclog = log->l_iclog; 2670 xfs_lsn_t lowest_lsn = 0, lsn; 2671 2672 do { 2673 if (iclog->ic_state == XLOG_STATE_ACTIVE || 2674 iclog->ic_state == XLOG_STATE_DIRTY) 2675 continue; 2676 2677 lsn = be64_to_cpu(iclog->ic_header.h_lsn); 2678 if ((lsn && !lowest_lsn) || XFS_LSN_CMP(lsn, lowest_lsn) < 0) 2679 lowest_lsn = lsn; 2680 } while ((iclog = iclog->ic_next) != log->l_iclog); 2681 2682 return lowest_lsn; 2683 } 2684 2685 /* 2686 * Completion of a iclog IO does not imply that a transaction has completed, as 2687 * transactions can be large enough to span many iclogs. We cannot change the 2688 * tail of the log half way through a transaction as this may be the only 2689 * transaction in the log and moving the tail to point to the middle of it 2690 * will prevent recovery from finding the start of the transaction. Hence we 2691 * should only update the last_sync_lsn if this iclog contains transaction 2692 * completion callbacks on it. 2693 * 2694 * We have to do this before we drop the icloglock to ensure we are the only one 2695 * that can update it. 2696 * 2697 * If we are moving the last_sync_lsn forwards, we also need to ensure we kick 2698 * the reservation grant head pushing. This is due to the fact that the push 2699 * target is bound by the current last_sync_lsn value. Hence if we have a large 2700 * amount of log space bound up in this committing transaction then the 2701 * last_sync_lsn value may be the limiting factor preventing tail pushing from 2702 * freeing space in the log. Hence once we've updated the last_sync_lsn we 2703 * should push the AIL to ensure the push target (and hence the grant head) is 2704 * no longer bound by the old log head location and can move forwards and make 2705 * progress again. 2706 */ 2707 static void 2708 xlog_state_set_callback( 2709 struct xlog *log, 2710 struct xlog_in_core *iclog, 2711 xfs_lsn_t header_lsn) 2712 { 2713 iclog->ic_state = XLOG_STATE_CALLBACK; 2714 2715 ASSERT(XFS_LSN_CMP(atomic64_read(&log->l_last_sync_lsn), 2716 header_lsn) <= 0); 2717 2718 if (list_empty_careful(&iclog->ic_callbacks)) 2719 return; 2720 2721 atomic64_set(&log->l_last_sync_lsn, header_lsn); 2722 xlog_grant_push_ail(log, 0); 2723 } 2724 2725 /* 2726 * Return true if we need to stop processing, false to continue to the next 2727 * iclog. The caller will need to run callbacks if the iclog is returned in the 2728 * XLOG_STATE_CALLBACK state. 2729 */ 2730 static bool 2731 xlog_state_iodone_process_iclog( 2732 struct xlog *log, 2733 struct xlog_in_core *iclog, 2734 bool *ioerror) 2735 { 2736 xfs_lsn_t lowest_lsn; 2737 xfs_lsn_t header_lsn; 2738 2739 switch (iclog->ic_state) { 2740 case XLOG_STATE_ACTIVE: 2741 case XLOG_STATE_DIRTY: 2742 /* 2743 * Skip all iclogs in the ACTIVE & DIRTY states: 2744 */ 2745 return false; 2746 case XLOG_STATE_IOERROR: 2747 /* 2748 * Between marking a filesystem SHUTDOWN and stopping the log, 2749 * we do flush all iclogs to disk (if there wasn't a log I/O 2750 * error). So, we do want things to go smoothly in case of just 2751 * a SHUTDOWN w/o a LOG_IO_ERROR. 2752 */ 2753 *ioerror = true; 2754 return false; 2755 case XLOG_STATE_DONE_SYNC: 2756 /* 2757 * Now that we have an iclog that is in the DONE_SYNC state, do 2758 * one more check here to see if we have chased our tail around. 2759 * If this is not the lowest lsn iclog, then we will leave it 2760 * for another completion to process. 2761 */ 2762 header_lsn = be64_to_cpu(iclog->ic_header.h_lsn); 2763 lowest_lsn = xlog_get_lowest_lsn(log); 2764 if (lowest_lsn && XFS_LSN_CMP(lowest_lsn, header_lsn) < 0) 2765 return false; 2766 xlog_state_set_callback(log, iclog, header_lsn); 2767 return false; 2768 default: 2769 /* 2770 * Can only perform callbacks in order. Since this iclog is not 2771 * in the DONE_SYNC state, we skip the rest and just try to 2772 * clean up. 2773 */ 2774 return true; 2775 } 2776 } 2777 2778 /* 2779 * Keep processing entries in the iclog callback list until we come around and 2780 * it is empty. We need to atomically see that the list is empty and change the 2781 * state to DIRTY so that we don't miss any more callbacks being added. 2782 * 2783 * This function is called with the icloglock held and returns with it held. We 2784 * drop it while running callbacks, however, as holding it over thousands of 2785 * callbacks is unnecessary and causes excessive contention if we do. 2786 */ 2787 static void 2788 xlog_state_do_iclog_callbacks( 2789 struct xlog *log, 2790 struct xlog_in_core *iclog) 2791 __releases(&log->l_icloglock) 2792 __acquires(&log->l_icloglock) 2793 { 2794 spin_unlock(&log->l_icloglock); 2795 spin_lock(&iclog->ic_callback_lock); 2796 while (!list_empty(&iclog->ic_callbacks)) { 2797 LIST_HEAD(tmp); 2798 2799 list_splice_init(&iclog->ic_callbacks, &tmp); 2800 2801 spin_unlock(&iclog->ic_callback_lock); 2802 xlog_cil_process_committed(&tmp); 2803 spin_lock(&iclog->ic_callback_lock); 2804 } 2805 2806 /* 2807 * Pick up the icloglock while still holding the callback lock so we 2808 * serialise against anyone trying to add more callbacks to this iclog 2809 * now we've finished processing. 2810 */ 2811 spin_lock(&log->l_icloglock); 2812 spin_unlock(&iclog->ic_callback_lock); 2813 } 2814 2815 STATIC void 2816 xlog_state_do_callback( 2817 struct xlog *log) 2818 { 2819 struct xlog_in_core *iclog; 2820 struct xlog_in_core *first_iclog; 2821 bool cycled_icloglock; 2822 bool ioerror; 2823 int flushcnt = 0; 2824 int repeats = 0; 2825 2826 spin_lock(&log->l_icloglock); 2827 do { 2828 /* 2829 * Scan all iclogs starting with the one pointed to by the 2830 * log. Reset this starting point each time the log is 2831 * unlocked (during callbacks). 2832 * 2833 * Keep looping through iclogs until one full pass is made 2834 * without running any callbacks. 2835 */ 2836 first_iclog = log->l_iclog; 2837 iclog = log->l_iclog; 2838 cycled_icloglock = false; 2839 ioerror = false; 2840 repeats++; 2841 2842 do { 2843 if (xlog_state_iodone_process_iclog(log, iclog, 2844 &ioerror)) 2845 break; 2846 2847 if (iclog->ic_state != XLOG_STATE_CALLBACK && 2848 iclog->ic_state != XLOG_STATE_IOERROR) { 2849 iclog = iclog->ic_next; 2850 continue; 2851 } 2852 2853 /* 2854 * Running callbacks will drop the icloglock which means 2855 * we'll have to run at least one more complete loop. 2856 */ 2857 cycled_icloglock = true; 2858 xlog_state_do_iclog_callbacks(log, iclog); 2859 if (XLOG_FORCED_SHUTDOWN(log)) 2860 wake_up_all(&iclog->ic_force_wait); 2861 else 2862 xlog_state_clean_iclog(log, iclog); 2863 iclog = iclog->ic_next; 2864 } while (first_iclog != iclog); 2865 2866 if (repeats > 5000) { 2867 flushcnt += repeats; 2868 repeats = 0; 2869 xfs_warn(log->l_mp, 2870 "%s: possible infinite loop (%d iterations)", 2871 __func__, flushcnt); 2872 } 2873 } while (!ioerror && cycled_icloglock); 2874 2875 if (log->l_iclog->ic_state == XLOG_STATE_ACTIVE || 2876 log->l_iclog->ic_state == XLOG_STATE_IOERROR) 2877 wake_up_all(&log->l_flush_wait); 2878 2879 spin_unlock(&log->l_icloglock); 2880 } 2881 2882 2883 /* 2884 * Finish transitioning this iclog to the dirty state. 2885 * 2886 * Make sure that we completely execute this routine only when this is 2887 * the last call to the iclog. There is a good chance that iclog flushes, 2888 * when we reach the end of the physical log, get turned into 2 separate 2889 * calls to bwrite. Hence, one iclog flush could generate two calls to this 2890 * routine. By using the reference count bwritecnt, we guarantee that only 2891 * the second completion goes through. 2892 * 2893 * Callbacks could take time, so they are done outside the scope of the 2894 * global state machine log lock. 2895 */ 2896 STATIC void 2897 xlog_state_done_syncing( 2898 struct xlog_in_core *iclog) 2899 { 2900 struct xlog *log = iclog->ic_log; 2901 2902 spin_lock(&log->l_icloglock); 2903 ASSERT(atomic_read(&iclog->ic_refcnt) == 0); 2904 2905 /* 2906 * If we got an error, either on the first buffer, or in the case of 2907 * split log writes, on the second, we shut down the file system and 2908 * no iclogs should ever be attempted to be written to disk again. 2909 */ 2910 if (!XLOG_FORCED_SHUTDOWN(log)) { 2911 ASSERT(iclog->ic_state == XLOG_STATE_SYNCING); 2912 iclog->ic_state = XLOG_STATE_DONE_SYNC; 2913 } 2914 2915 /* 2916 * Someone could be sleeping prior to writing out the next 2917 * iclog buffer, we wake them all, one will get to do the 2918 * I/O, the others get to wait for the result. 2919 */ 2920 wake_up_all(&iclog->ic_write_wait); 2921 spin_unlock(&log->l_icloglock); 2922 xlog_state_do_callback(log); 2923 } 2924 2925 /* 2926 * If the head of the in-core log ring is not (ACTIVE or DIRTY), then we must 2927 * sleep. We wait on the flush queue on the head iclog as that should be 2928 * the first iclog to complete flushing. Hence if all iclogs are syncing, 2929 * we will wait here and all new writes will sleep until a sync completes. 2930 * 2931 * The in-core logs are used in a circular fashion. They are not used 2932 * out-of-order even when an iclog past the head is free. 2933 * 2934 * return: 2935 * * log_offset where xlog_write() can start writing into the in-core 2936 * log's data space. 2937 * * in-core log pointer to which xlog_write() should write. 2938 * * boolean indicating this is a continued write to an in-core log. 2939 * If this is the last write, then the in-core log's offset field 2940 * needs to be incremented, depending on the amount of data which 2941 * is copied. 2942 */ 2943 STATIC int 2944 xlog_state_get_iclog_space( 2945 struct xlog *log, 2946 int len, 2947 struct xlog_in_core **iclogp, 2948 struct xlog_ticket *ticket, 2949 int *continued_write, 2950 int *logoffsetp) 2951 { 2952 int log_offset; 2953 xlog_rec_header_t *head; 2954 xlog_in_core_t *iclog; 2955 2956 restart: 2957 spin_lock(&log->l_icloglock); 2958 if (XLOG_FORCED_SHUTDOWN(log)) { 2959 spin_unlock(&log->l_icloglock); 2960 return -EIO; 2961 } 2962 2963 iclog = log->l_iclog; 2964 if (iclog->ic_state != XLOG_STATE_ACTIVE) { 2965 XFS_STATS_INC(log->l_mp, xs_log_noiclogs); 2966 2967 /* Wait for log writes to have flushed */ 2968 xlog_wait(&log->l_flush_wait, &log->l_icloglock); 2969 goto restart; 2970 } 2971 2972 head = &iclog->ic_header; 2973 2974 atomic_inc(&iclog->ic_refcnt); /* prevents sync */ 2975 log_offset = iclog->ic_offset; 2976 2977 /* On the 1st write to an iclog, figure out lsn. This works 2978 * if iclogs marked XLOG_STATE_WANT_SYNC always write out what they are 2979 * committing to. If the offset is set, that's how many blocks 2980 * must be written. 2981 */ 2982 if (log_offset == 0) { 2983 ticket->t_curr_res -= log->l_iclog_hsize; 2984 xlog_tic_add_region(ticket, 2985 log->l_iclog_hsize, 2986 XLOG_REG_TYPE_LRHEADER); 2987 head->h_cycle = cpu_to_be32(log->l_curr_cycle); 2988 head->h_lsn = cpu_to_be64( 2989 xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block)); 2990 ASSERT(log->l_curr_block >= 0); 2991 } 2992 2993 /* If there is enough room to write everything, then do it. Otherwise, 2994 * claim the rest of the region and make sure the XLOG_STATE_WANT_SYNC 2995 * bit is on, so this will get flushed out. Don't update ic_offset 2996 * until you know exactly how many bytes get copied. Therefore, wait 2997 * until later to update ic_offset. 2998 * 2999 * xlog_write() algorithm assumes that at least 2 xlog_op_header_t's 3000 * can fit into remaining data section. 3001 */ 3002 if (iclog->ic_size - iclog->ic_offset < 2*sizeof(xlog_op_header_t)) { 3003 int error = 0; 3004 3005 xlog_state_switch_iclogs(log, iclog, iclog->ic_size); 3006 3007 /* 3008 * If we are the only one writing to this iclog, sync it to 3009 * disk. We need to do an atomic compare and decrement here to 3010 * avoid racing with concurrent atomic_dec_and_lock() calls in 3011 * xlog_state_release_iclog() when there is more than one 3012 * reference to the iclog. 3013 */ 3014 if (!atomic_add_unless(&iclog->ic_refcnt, -1, 1)) 3015 error = xlog_state_release_iclog(log, iclog); 3016 spin_unlock(&log->l_icloglock); 3017 if (error) 3018 return error; 3019 goto restart; 3020 } 3021 3022 /* Do we have enough room to write the full amount in the remainder 3023 * of this iclog? Or must we continue a write on the next iclog and 3024 * mark this iclog as completely taken? In the case where we switch 3025 * iclogs (to mark it taken), this particular iclog will release/sync 3026 * to disk in xlog_write(). 3027 */ 3028 if (len <= iclog->ic_size - iclog->ic_offset) { 3029 *continued_write = 0; 3030 iclog->ic_offset += len; 3031 } else { 3032 *continued_write = 1; 3033 xlog_state_switch_iclogs(log, iclog, iclog->ic_size); 3034 } 3035 *iclogp = iclog; 3036 3037 ASSERT(iclog->ic_offset <= iclog->ic_size); 3038 spin_unlock(&log->l_icloglock); 3039 3040 *logoffsetp = log_offset; 3041 return 0; 3042 } 3043 3044 /* 3045 * The first cnt-1 times a ticket goes through here we don't need to move the 3046 * grant write head because the permanent reservation has reserved cnt times the 3047 * unit amount. Release part of current permanent unit reservation and reset 3048 * current reservation to be one units worth. Also move grant reservation head 3049 * forward. 3050 */ 3051 void 3052 xfs_log_ticket_regrant( 3053 struct xlog *log, 3054 struct xlog_ticket *ticket) 3055 { 3056 trace_xfs_log_ticket_regrant(log, ticket); 3057 3058 if (ticket->t_cnt > 0) 3059 ticket->t_cnt--; 3060 3061 xlog_grant_sub_space(log, &log->l_reserve_head.grant, 3062 ticket->t_curr_res); 3063 xlog_grant_sub_space(log, &log->l_write_head.grant, 3064 ticket->t_curr_res); 3065 ticket->t_curr_res = ticket->t_unit_res; 3066 xlog_tic_reset_res(ticket); 3067 3068 trace_xfs_log_ticket_regrant_sub(log, ticket); 3069 3070 /* just return if we still have some of the pre-reserved space */ 3071 if (!ticket->t_cnt) { 3072 xlog_grant_add_space(log, &log->l_reserve_head.grant, 3073 ticket->t_unit_res); 3074 trace_xfs_log_ticket_regrant_exit(log, ticket); 3075 3076 ticket->t_curr_res = ticket->t_unit_res; 3077 xlog_tic_reset_res(ticket); 3078 } 3079 3080 xfs_log_ticket_put(ticket); 3081 } 3082 3083 /* 3084 * Give back the space left from a reservation. 3085 * 3086 * All the information we need to make a correct determination of space left 3087 * is present. For non-permanent reservations, things are quite easy. The 3088 * count should have been decremented to zero. We only need to deal with the 3089 * space remaining in the current reservation part of the ticket. If the 3090 * ticket contains a permanent reservation, there may be left over space which 3091 * needs to be released. A count of N means that N-1 refills of the current 3092 * reservation can be done before we need to ask for more space. The first 3093 * one goes to fill up the first current reservation. Once we run out of 3094 * space, the count will stay at zero and the only space remaining will be 3095 * in the current reservation field. 3096 */ 3097 void 3098 xfs_log_ticket_ungrant( 3099 struct xlog *log, 3100 struct xlog_ticket *ticket) 3101 { 3102 int bytes; 3103 3104 trace_xfs_log_ticket_ungrant(log, ticket); 3105 3106 if (ticket->t_cnt > 0) 3107 ticket->t_cnt--; 3108 3109 trace_xfs_log_ticket_ungrant_sub(log, ticket); 3110 3111 /* 3112 * If this is a permanent reservation ticket, we may be able to free 3113 * up more space based on the remaining count. 3114 */ 3115 bytes = ticket->t_curr_res; 3116 if (ticket->t_cnt > 0) { 3117 ASSERT(ticket->t_flags & XLOG_TIC_PERM_RESERV); 3118 bytes += ticket->t_unit_res*ticket->t_cnt; 3119 } 3120 3121 xlog_grant_sub_space(log, &log->l_reserve_head.grant, bytes); 3122 xlog_grant_sub_space(log, &log->l_write_head.grant, bytes); 3123 3124 trace_xfs_log_ticket_ungrant_exit(log, ticket); 3125 3126 xfs_log_space_wake(log->l_mp); 3127 xfs_log_ticket_put(ticket); 3128 } 3129 3130 /* 3131 * This routine will mark the current iclog in the ring as WANT_SYNC and move 3132 * the current iclog pointer to the next iclog in the ring. 3133 */ 3134 STATIC void 3135 xlog_state_switch_iclogs( 3136 struct xlog *log, 3137 struct xlog_in_core *iclog, 3138 int eventual_size) 3139 { 3140 ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE); 3141 assert_spin_locked(&log->l_icloglock); 3142 3143 if (!eventual_size) 3144 eventual_size = iclog->ic_offset; 3145 iclog->ic_state = XLOG_STATE_WANT_SYNC; 3146 iclog->ic_header.h_prev_block = cpu_to_be32(log->l_prev_block); 3147 log->l_prev_block = log->l_curr_block; 3148 log->l_prev_cycle = log->l_curr_cycle; 3149 3150 /* roll log?: ic_offset changed later */ 3151 log->l_curr_block += BTOBB(eventual_size)+BTOBB(log->l_iclog_hsize); 3152 3153 /* Round up to next log-sunit */ 3154 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb) && 3155 log->l_mp->m_sb.sb_logsunit > 1) { 3156 uint32_t sunit_bb = BTOBB(log->l_mp->m_sb.sb_logsunit); 3157 log->l_curr_block = roundup(log->l_curr_block, sunit_bb); 3158 } 3159 3160 if (log->l_curr_block >= log->l_logBBsize) { 3161 /* 3162 * Rewind the current block before the cycle is bumped to make 3163 * sure that the combined LSN never transiently moves forward 3164 * when the log wraps to the next cycle. This is to support the 3165 * unlocked sample of these fields from xlog_valid_lsn(). Most 3166 * other cases should acquire l_icloglock. 3167 */ 3168 log->l_curr_block -= log->l_logBBsize; 3169 ASSERT(log->l_curr_block >= 0); 3170 smp_wmb(); 3171 log->l_curr_cycle++; 3172 if (log->l_curr_cycle == XLOG_HEADER_MAGIC_NUM) 3173 log->l_curr_cycle++; 3174 } 3175 ASSERT(iclog == log->l_iclog); 3176 log->l_iclog = iclog->ic_next; 3177 } 3178 3179 /* 3180 * Write out all data in the in-core log as of this exact moment in time. 3181 * 3182 * Data may be written to the in-core log during this call. However, 3183 * we don't guarantee this data will be written out. A change from past 3184 * implementation means this routine will *not* write out zero length LRs. 3185 * 3186 * Basically, we try and perform an intelligent scan of the in-core logs. 3187 * If we determine there is no flushable data, we just return. There is no 3188 * flushable data if: 3189 * 3190 * 1. the current iclog is active and has no data; the previous iclog 3191 * is in the active or dirty state. 3192 * 2. the current iclog is drity, and the previous iclog is in the 3193 * active or dirty state. 3194 * 3195 * We may sleep if: 3196 * 3197 * 1. the current iclog is not in the active nor dirty state. 3198 * 2. the current iclog dirty, and the previous iclog is not in the 3199 * active nor dirty state. 3200 * 3. the current iclog is active, and there is another thread writing 3201 * to this particular iclog. 3202 * 4. a) the current iclog is active and has no other writers 3203 * b) when we return from flushing out this iclog, it is still 3204 * not in the active nor dirty state. 3205 */ 3206 int 3207 xfs_log_force( 3208 struct xfs_mount *mp, 3209 uint flags) 3210 { 3211 struct xlog *log = mp->m_log; 3212 struct xlog_in_core *iclog; 3213 xfs_lsn_t lsn; 3214 3215 XFS_STATS_INC(mp, xs_log_force); 3216 trace_xfs_log_force(mp, 0, _RET_IP_); 3217 3218 xlog_cil_force(log); 3219 3220 spin_lock(&log->l_icloglock); 3221 iclog = log->l_iclog; 3222 if (iclog->ic_state == XLOG_STATE_IOERROR) 3223 goto out_error; 3224 3225 if (iclog->ic_state == XLOG_STATE_DIRTY || 3226 (iclog->ic_state == XLOG_STATE_ACTIVE && 3227 atomic_read(&iclog->ic_refcnt) == 0 && iclog->ic_offset == 0)) { 3228 /* 3229 * If the head is dirty or (active and empty), then we need to 3230 * look at the previous iclog. 3231 * 3232 * If the previous iclog is active or dirty we are done. There 3233 * is nothing to sync out. Otherwise, we attach ourselves to the 3234 * previous iclog and go to sleep. 3235 */ 3236 iclog = iclog->ic_prev; 3237 } else if (iclog->ic_state == XLOG_STATE_ACTIVE) { 3238 if (atomic_read(&iclog->ic_refcnt) == 0) { 3239 /* 3240 * We are the only one with access to this iclog. 3241 * 3242 * Flush it out now. There should be a roundoff of zero 3243 * to show that someone has already taken care of the 3244 * roundoff from the previous sync. 3245 */ 3246 atomic_inc(&iclog->ic_refcnt); 3247 lsn = be64_to_cpu(iclog->ic_header.h_lsn); 3248 xlog_state_switch_iclogs(log, iclog, 0); 3249 if (xlog_state_release_iclog(log, iclog)) 3250 goto out_error; 3251 3252 if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn) 3253 goto out_unlock; 3254 } else { 3255 /* 3256 * Someone else is writing to this iclog. 3257 * 3258 * Use its call to flush out the data. However, the 3259 * other thread may not force out this LR, so we mark 3260 * it WANT_SYNC. 3261 */ 3262 xlog_state_switch_iclogs(log, iclog, 0); 3263 } 3264 } else { 3265 /* 3266 * If the head iclog is not active nor dirty, we just attach 3267 * ourselves to the head and go to sleep if necessary. 3268 */ 3269 ; 3270 } 3271 3272 if (flags & XFS_LOG_SYNC) 3273 return xlog_wait_on_iclog(iclog); 3274 out_unlock: 3275 spin_unlock(&log->l_icloglock); 3276 return 0; 3277 out_error: 3278 spin_unlock(&log->l_icloglock); 3279 return -EIO; 3280 } 3281 3282 static int 3283 __xfs_log_force_lsn( 3284 struct xfs_mount *mp, 3285 xfs_lsn_t lsn, 3286 uint flags, 3287 int *log_flushed, 3288 bool already_slept) 3289 { 3290 struct xlog *log = mp->m_log; 3291 struct xlog_in_core *iclog; 3292 3293 spin_lock(&log->l_icloglock); 3294 iclog = log->l_iclog; 3295 if (iclog->ic_state == XLOG_STATE_IOERROR) 3296 goto out_error; 3297 3298 while (be64_to_cpu(iclog->ic_header.h_lsn) != lsn) { 3299 iclog = iclog->ic_next; 3300 if (iclog == log->l_iclog) 3301 goto out_unlock; 3302 } 3303 3304 if (iclog->ic_state == XLOG_STATE_ACTIVE) { 3305 /* 3306 * We sleep here if we haven't already slept (e.g. this is the 3307 * first time we've looked at the correct iclog buf) and the 3308 * buffer before us is going to be sync'ed. The reason for this 3309 * is that if we are doing sync transactions here, by waiting 3310 * for the previous I/O to complete, we can allow a few more 3311 * transactions into this iclog before we close it down. 3312 * 3313 * Otherwise, we mark the buffer WANT_SYNC, and bump up the 3314 * refcnt so we can release the log (which drops the ref count). 3315 * The state switch keeps new transaction commits from using 3316 * this buffer. When the current commits finish writing into 3317 * the buffer, the refcount will drop to zero and the buffer 3318 * will go out then. 3319 */ 3320 if (!already_slept && 3321 (iclog->ic_prev->ic_state == XLOG_STATE_WANT_SYNC || 3322 iclog->ic_prev->ic_state == XLOG_STATE_SYNCING)) { 3323 XFS_STATS_INC(mp, xs_log_force_sleep); 3324 3325 xlog_wait(&iclog->ic_prev->ic_write_wait, 3326 &log->l_icloglock); 3327 return -EAGAIN; 3328 } 3329 atomic_inc(&iclog->ic_refcnt); 3330 xlog_state_switch_iclogs(log, iclog, 0); 3331 if (xlog_state_release_iclog(log, iclog)) 3332 goto out_error; 3333 if (log_flushed) 3334 *log_flushed = 1; 3335 } 3336 3337 if (flags & XFS_LOG_SYNC) 3338 return xlog_wait_on_iclog(iclog); 3339 out_unlock: 3340 spin_unlock(&log->l_icloglock); 3341 return 0; 3342 out_error: 3343 spin_unlock(&log->l_icloglock); 3344 return -EIO; 3345 } 3346 3347 /* 3348 * Force the in-core log to disk for a specific LSN. 3349 * 3350 * Find in-core log with lsn. 3351 * If it is in the DIRTY state, just return. 3352 * If it is in the ACTIVE state, move the in-core log into the WANT_SYNC 3353 * state and go to sleep or return. 3354 * If it is in any other state, go to sleep or return. 3355 * 3356 * Synchronous forces are implemented with a wait queue. All callers trying 3357 * to force a given lsn to disk must wait on the queue attached to the 3358 * specific in-core log. When given in-core log finally completes its write 3359 * to disk, that thread will wake up all threads waiting on the queue. 3360 */ 3361 int 3362 xfs_log_force_lsn( 3363 struct xfs_mount *mp, 3364 xfs_lsn_t lsn, 3365 uint flags, 3366 int *log_flushed) 3367 { 3368 int ret; 3369 ASSERT(lsn != 0); 3370 3371 XFS_STATS_INC(mp, xs_log_force); 3372 trace_xfs_log_force(mp, lsn, _RET_IP_); 3373 3374 lsn = xlog_cil_force_lsn(mp->m_log, lsn); 3375 if (lsn == NULLCOMMITLSN) 3376 return 0; 3377 3378 ret = __xfs_log_force_lsn(mp, lsn, flags, log_flushed, false); 3379 if (ret == -EAGAIN) 3380 ret = __xfs_log_force_lsn(mp, lsn, flags, log_flushed, true); 3381 return ret; 3382 } 3383 3384 /* 3385 * Free a used ticket when its refcount falls to zero. 3386 */ 3387 void 3388 xfs_log_ticket_put( 3389 xlog_ticket_t *ticket) 3390 { 3391 ASSERT(atomic_read(&ticket->t_ref) > 0); 3392 if (atomic_dec_and_test(&ticket->t_ref)) 3393 kmem_cache_free(xfs_log_ticket_zone, ticket); 3394 } 3395 3396 xlog_ticket_t * 3397 xfs_log_ticket_get( 3398 xlog_ticket_t *ticket) 3399 { 3400 ASSERT(atomic_read(&ticket->t_ref) > 0); 3401 atomic_inc(&ticket->t_ref); 3402 return ticket; 3403 } 3404 3405 /* 3406 * Figure out the total log space unit (in bytes) that would be 3407 * required for a log ticket. 3408 */ 3409 int 3410 xfs_log_calc_unit_res( 3411 struct xfs_mount *mp, 3412 int unit_bytes) 3413 { 3414 struct xlog *log = mp->m_log; 3415 int iclog_space; 3416 uint num_headers; 3417 3418 /* 3419 * Permanent reservations have up to 'cnt'-1 active log operations 3420 * in the log. A unit in this case is the amount of space for one 3421 * of these log operations. Normal reservations have a cnt of 1 3422 * and their unit amount is the total amount of space required. 3423 * 3424 * The following lines of code account for non-transaction data 3425 * which occupy space in the on-disk log. 3426 * 3427 * Normal form of a transaction is: 3428 * <oph><trans-hdr><start-oph><reg1-oph><reg1><reg2-oph>...<commit-oph> 3429 * and then there are LR hdrs, split-recs and roundoff at end of syncs. 3430 * 3431 * We need to account for all the leadup data and trailer data 3432 * around the transaction data. 3433 * And then we need to account for the worst case in terms of using 3434 * more space. 3435 * The worst case will happen if: 3436 * - the placement of the transaction happens to be such that the 3437 * roundoff is at its maximum 3438 * - the transaction data is synced before the commit record is synced 3439 * i.e. <transaction-data><roundoff> | <commit-rec><roundoff> 3440 * Therefore the commit record is in its own Log Record. 3441 * This can happen as the commit record is called with its 3442 * own region to xlog_write(). 3443 * This then means that in the worst case, roundoff can happen for 3444 * the commit-rec as well. 3445 * The commit-rec is smaller than padding in this scenario and so it is 3446 * not added separately. 3447 */ 3448 3449 /* for trans header */ 3450 unit_bytes += sizeof(xlog_op_header_t); 3451 unit_bytes += sizeof(xfs_trans_header_t); 3452 3453 /* for start-rec */ 3454 unit_bytes += sizeof(xlog_op_header_t); 3455 3456 /* 3457 * for LR headers - the space for data in an iclog is the size minus 3458 * the space used for the headers. If we use the iclog size, then we 3459 * undercalculate the number of headers required. 3460 * 3461 * Furthermore - the addition of op headers for split-recs might 3462 * increase the space required enough to require more log and op 3463 * headers, so take that into account too. 3464 * 3465 * IMPORTANT: This reservation makes the assumption that if this 3466 * transaction is the first in an iclog and hence has the LR headers 3467 * accounted to it, then the remaining space in the iclog is 3468 * exclusively for this transaction. i.e. if the transaction is larger 3469 * than the iclog, it will be the only thing in that iclog. 3470 * Fundamentally, this means we must pass the entire log vector to 3471 * xlog_write to guarantee this. 3472 */ 3473 iclog_space = log->l_iclog_size - log->l_iclog_hsize; 3474 num_headers = howmany(unit_bytes, iclog_space); 3475 3476 /* for split-recs - ophdrs added when data split over LRs */ 3477 unit_bytes += sizeof(xlog_op_header_t) * num_headers; 3478 3479 /* add extra header reservations if we overrun */ 3480 while (!num_headers || 3481 howmany(unit_bytes, iclog_space) > num_headers) { 3482 unit_bytes += sizeof(xlog_op_header_t); 3483 num_headers++; 3484 } 3485 unit_bytes += log->l_iclog_hsize * num_headers; 3486 3487 /* for commit-rec LR header - note: padding will subsume the ophdr */ 3488 unit_bytes += log->l_iclog_hsize; 3489 3490 /* for roundoff padding for transaction data and one for commit record */ 3491 if (xfs_sb_version_haslogv2(&mp->m_sb) && mp->m_sb.sb_logsunit > 1) { 3492 /* log su roundoff */ 3493 unit_bytes += 2 * mp->m_sb.sb_logsunit; 3494 } else { 3495 /* BB roundoff */ 3496 unit_bytes += 2 * BBSIZE; 3497 } 3498 3499 return unit_bytes; 3500 } 3501 3502 /* 3503 * Allocate and initialise a new log ticket. 3504 */ 3505 struct xlog_ticket * 3506 xlog_ticket_alloc( 3507 struct xlog *log, 3508 int unit_bytes, 3509 int cnt, 3510 char client, 3511 bool permanent) 3512 { 3513 struct xlog_ticket *tic; 3514 int unit_res; 3515 3516 tic = kmem_cache_zalloc(xfs_log_ticket_zone, GFP_NOFS | __GFP_NOFAIL); 3517 3518 unit_res = xfs_log_calc_unit_res(log->l_mp, unit_bytes); 3519 3520 atomic_set(&tic->t_ref, 1); 3521 tic->t_task = current; 3522 INIT_LIST_HEAD(&tic->t_queue); 3523 tic->t_unit_res = unit_res; 3524 tic->t_curr_res = unit_res; 3525 tic->t_cnt = cnt; 3526 tic->t_ocnt = cnt; 3527 tic->t_tid = prandom_u32(); 3528 tic->t_clientid = client; 3529 if (permanent) 3530 tic->t_flags |= XLOG_TIC_PERM_RESERV; 3531 3532 xlog_tic_reset_res(tic); 3533 3534 return tic; 3535 } 3536 3537 #if defined(DEBUG) 3538 /* 3539 * Make sure that the destination ptr is within the valid data region of 3540 * one of the iclogs. This uses backup pointers stored in a different 3541 * part of the log in case we trash the log structure. 3542 */ 3543 STATIC void 3544 xlog_verify_dest_ptr( 3545 struct xlog *log, 3546 void *ptr) 3547 { 3548 int i; 3549 int good_ptr = 0; 3550 3551 for (i = 0; i < log->l_iclog_bufs; i++) { 3552 if (ptr >= log->l_iclog_bak[i] && 3553 ptr <= log->l_iclog_bak[i] + log->l_iclog_size) 3554 good_ptr++; 3555 } 3556 3557 if (!good_ptr) 3558 xfs_emerg(log->l_mp, "%s: invalid ptr", __func__); 3559 } 3560 3561 /* 3562 * Check to make sure the grant write head didn't just over lap the tail. If 3563 * the cycles are the same, we can't be overlapping. Otherwise, make sure that 3564 * the cycles differ by exactly one and check the byte count. 3565 * 3566 * This check is run unlocked, so can give false positives. Rather than assert 3567 * on failures, use a warn-once flag and a panic tag to allow the admin to 3568 * determine if they want to panic the machine when such an error occurs. For 3569 * debug kernels this will have the same effect as using an assert but, unlinke 3570 * an assert, it can be turned off at runtime. 3571 */ 3572 STATIC void 3573 xlog_verify_grant_tail( 3574 struct xlog *log) 3575 { 3576 int tail_cycle, tail_blocks; 3577 int cycle, space; 3578 3579 xlog_crack_grant_head(&log->l_write_head.grant, &cycle, &space); 3580 xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_blocks); 3581 if (tail_cycle != cycle) { 3582 if (cycle - 1 != tail_cycle && 3583 !(log->l_flags & XLOG_TAIL_WARN)) { 3584 xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES, 3585 "%s: cycle - 1 != tail_cycle", __func__); 3586 log->l_flags |= XLOG_TAIL_WARN; 3587 } 3588 3589 if (space > BBTOB(tail_blocks) && 3590 !(log->l_flags & XLOG_TAIL_WARN)) { 3591 xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES, 3592 "%s: space > BBTOB(tail_blocks)", __func__); 3593 log->l_flags |= XLOG_TAIL_WARN; 3594 } 3595 } 3596 } 3597 3598 /* check if it will fit */ 3599 STATIC void 3600 xlog_verify_tail_lsn( 3601 struct xlog *log, 3602 struct xlog_in_core *iclog, 3603 xfs_lsn_t tail_lsn) 3604 { 3605 int blocks; 3606 3607 if (CYCLE_LSN(tail_lsn) == log->l_prev_cycle) { 3608 blocks = 3609 log->l_logBBsize - (log->l_prev_block - BLOCK_LSN(tail_lsn)); 3610 if (blocks < BTOBB(iclog->ic_offset)+BTOBB(log->l_iclog_hsize)) 3611 xfs_emerg(log->l_mp, "%s: ran out of log space", __func__); 3612 } else { 3613 ASSERT(CYCLE_LSN(tail_lsn)+1 == log->l_prev_cycle); 3614 3615 if (BLOCK_LSN(tail_lsn) == log->l_prev_block) 3616 xfs_emerg(log->l_mp, "%s: tail wrapped", __func__); 3617 3618 blocks = BLOCK_LSN(tail_lsn) - log->l_prev_block; 3619 if (blocks < BTOBB(iclog->ic_offset) + 1) 3620 xfs_emerg(log->l_mp, "%s: ran out of log space", __func__); 3621 } 3622 } 3623 3624 /* 3625 * Perform a number of checks on the iclog before writing to disk. 3626 * 3627 * 1. Make sure the iclogs are still circular 3628 * 2. Make sure we have a good magic number 3629 * 3. Make sure we don't have magic numbers in the data 3630 * 4. Check fields of each log operation header for: 3631 * A. Valid client identifier 3632 * B. tid ptr value falls in valid ptr space (user space code) 3633 * C. Length in log record header is correct according to the 3634 * individual operation headers within record. 3635 * 5. When a bwrite will occur within 5 blocks of the front of the physical 3636 * log, check the preceding blocks of the physical log to make sure all 3637 * the cycle numbers agree with the current cycle number. 3638 */ 3639 STATIC void 3640 xlog_verify_iclog( 3641 struct xlog *log, 3642 struct xlog_in_core *iclog, 3643 int count) 3644 { 3645 xlog_op_header_t *ophead; 3646 xlog_in_core_t *icptr; 3647 xlog_in_core_2_t *xhdr; 3648 void *base_ptr, *ptr, *p; 3649 ptrdiff_t field_offset; 3650 uint8_t clientid; 3651 int len, i, j, k, op_len; 3652 int idx; 3653 3654 /* check validity of iclog pointers */ 3655 spin_lock(&log->l_icloglock); 3656 icptr = log->l_iclog; 3657 for (i = 0; i < log->l_iclog_bufs; i++, icptr = icptr->ic_next) 3658 ASSERT(icptr); 3659 3660 if (icptr != log->l_iclog) 3661 xfs_emerg(log->l_mp, "%s: corrupt iclog ring", __func__); 3662 spin_unlock(&log->l_icloglock); 3663 3664 /* check log magic numbers */ 3665 if (iclog->ic_header.h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) 3666 xfs_emerg(log->l_mp, "%s: invalid magic num", __func__); 3667 3668 base_ptr = ptr = &iclog->ic_header; 3669 p = &iclog->ic_header; 3670 for (ptr += BBSIZE; ptr < base_ptr + count; ptr += BBSIZE) { 3671 if (*(__be32 *)ptr == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) 3672 xfs_emerg(log->l_mp, "%s: unexpected magic num", 3673 __func__); 3674 } 3675 3676 /* check fields */ 3677 len = be32_to_cpu(iclog->ic_header.h_num_logops); 3678 base_ptr = ptr = iclog->ic_datap; 3679 ophead = ptr; 3680 xhdr = iclog->ic_data; 3681 for (i = 0; i < len; i++) { 3682 ophead = ptr; 3683 3684 /* clientid is only 1 byte */ 3685 p = &ophead->oh_clientid; 3686 field_offset = p - base_ptr; 3687 if (field_offset & 0x1ff) { 3688 clientid = ophead->oh_clientid; 3689 } else { 3690 idx = BTOBBT((char *)&ophead->oh_clientid - iclog->ic_datap); 3691 if (idx >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) { 3692 j = idx / (XLOG_HEADER_CYCLE_SIZE / BBSIZE); 3693 k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE); 3694 clientid = xlog_get_client_id( 3695 xhdr[j].hic_xheader.xh_cycle_data[k]); 3696 } else { 3697 clientid = xlog_get_client_id( 3698 iclog->ic_header.h_cycle_data[idx]); 3699 } 3700 } 3701 if (clientid != XFS_TRANSACTION && clientid != XFS_LOG) 3702 xfs_warn(log->l_mp, 3703 "%s: invalid clientid %d op "PTR_FMT" offset 0x%lx", 3704 __func__, clientid, ophead, 3705 (unsigned long)field_offset); 3706 3707 /* check length */ 3708 p = &ophead->oh_len; 3709 field_offset = p - base_ptr; 3710 if (field_offset & 0x1ff) { 3711 op_len = be32_to_cpu(ophead->oh_len); 3712 } else { 3713 idx = BTOBBT((uintptr_t)&ophead->oh_len - 3714 (uintptr_t)iclog->ic_datap); 3715 if (idx >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) { 3716 j = idx / (XLOG_HEADER_CYCLE_SIZE / BBSIZE); 3717 k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE); 3718 op_len = be32_to_cpu(xhdr[j].hic_xheader.xh_cycle_data[k]); 3719 } else { 3720 op_len = be32_to_cpu(iclog->ic_header.h_cycle_data[idx]); 3721 } 3722 } 3723 ptr += sizeof(xlog_op_header_t) + op_len; 3724 } 3725 } 3726 #endif 3727 3728 /* 3729 * Mark all iclogs IOERROR. l_icloglock is held by the caller. 3730 */ 3731 STATIC int 3732 xlog_state_ioerror( 3733 struct xlog *log) 3734 { 3735 xlog_in_core_t *iclog, *ic; 3736 3737 iclog = log->l_iclog; 3738 if (iclog->ic_state != XLOG_STATE_IOERROR) { 3739 /* 3740 * Mark all the incore logs IOERROR. 3741 * From now on, no log flushes will result. 3742 */ 3743 ic = iclog; 3744 do { 3745 ic->ic_state = XLOG_STATE_IOERROR; 3746 ic = ic->ic_next; 3747 } while (ic != iclog); 3748 return 0; 3749 } 3750 /* 3751 * Return non-zero, if state transition has already happened. 3752 */ 3753 return 1; 3754 } 3755 3756 /* 3757 * This is called from xfs_force_shutdown, when we're forcibly 3758 * shutting down the filesystem, typically because of an IO error. 3759 * Our main objectives here are to make sure that: 3760 * a. if !logerror, flush the logs to disk. Anything modified 3761 * after this is ignored. 3762 * b. the filesystem gets marked 'SHUTDOWN' for all interested 3763 * parties to find out, 'atomically'. 3764 * c. those who're sleeping on log reservations, pinned objects and 3765 * other resources get woken up, and be told the bad news. 3766 * d. nothing new gets queued up after (b) and (c) are done. 3767 * 3768 * Note: for the !logerror case we need to flush the regions held in memory out 3769 * to disk first. This needs to be done before the log is marked as shutdown, 3770 * otherwise the iclog writes will fail. 3771 */ 3772 int 3773 xfs_log_force_umount( 3774 struct xfs_mount *mp, 3775 int logerror) 3776 { 3777 struct xlog *log; 3778 int retval; 3779 3780 log = mp->m_log; 3781 3782 /* 3783 * If this happens during log recovery, don't worry about 3784 * locking; the log isn't open for business yet. 3785 */ 3786 if (!log || 3787 log->l_flags & XLOG_ACTIVE_RECOVERY) { 3788 mp->m_flags |= XFS_MOUNT_FS_SHUTDOWN; 3789 if (mp->m_sb_bp) 3790 mp->m_sb_bp->b_flags |= XBF_DONE; 3791 return 0; 3792 } 3793 3794 /* 3795 * Somebody could've already done the hard work for us. 3796 * No need to get locks for this. 3797 */ 3798 if (logerror && log->l_iclog->ic_state == XLOG_STATE_IOERROR) { 3799 ASSERT(XLOG_FORCED_SHUTDOWN(log)); 3800 return 1; 3801 } 3802 3803 /* 3804 * Flush all the completed transactions to disk before marking the log 3805 * being shut down. We need to do it in this order to ensure that 3806 * completed operations are safely on disk before we shut down, and that 3807 * we don't have to issue any buffer IO after the shutdown flags are set 3808 * to guarantee this. 3809 */ 3810 if (!logerror) 3811 xfs_log_force(mp, XFS_LOG_SYNC); 3812 3813 /* 3814 * mark the filesystem and the as in a shutdown state and wake 3815 * everybody up to tell them the bad news. 3816 */ 3817 spin_lock(&log->l_icloglock); 3818 mp->m_flags |= XFS_MOUNT_FS_SHUTDOWN; 3819 if (mp->m_sb_bp) 3820 mp->m_sb_bp->b_flags |= XBF_DONE; 3821 3822 /* 3823 * Mark the log and the iclogs with IO error flags to prevent any 3824 * further log IO from being issued or completed. 3825 */ 3826 log->l_flags |= XLOG_IO_ERROR; 3827 retval = xlog_state_ioerror(log); 3828 spin_unlock(&log->l_icloglock); 3829 3830 /* 3831 * We don't want anybody waiting for log reservations after this. That 3832 * means we have to wake up everybody queued up on reserveq as well as 3833 * writeq. In addition, we make sure in xlog_{re}grant_log_space that 3834 * we don't enqueue anything once the SHUTDOWN flag is set, and this 3835 * action is protected by the grant locks. 3836 */ 3837 xlog_grant_head_wake_all(&log->l_reserve_head); 3838 xlog_grant_head_wake_all(&log->l_write_head); 3839 3840 /* 3841 * Wake up everybody waiting on xfs_log_force. Wake the CIL push first 3842 * as if the log writes were completed. The abort handling in the log 3843 * item committed callback functions will do this again under lock to 3844 * avoid races. 3845 */ 3846 spin_lock(&log->l_cilp->xc_push_lock); 3847 wake_up_all(&log->l_cilp->xc_commit_wait); 3848 spin_unlock(&log->l_cilp->xc_push_lock); 3849 xlog_state_do_callback(log); 3850 3851 /* return non-zero if log IOERROR transition had already happened */ 3852 return retval; 3853 } 3854 3855 STATIC int 3856 xlog_iclogs_empty( 3857 struct xlog *log) 3858 { 3859 xlog_in_core_t *iclog; 3860 3861 iclog = log->l_iclog; 3862 do { 3863 /* endianness does not matter here, zero is zero in 3864 * any language. 3865 */ 3866 if (iclog->ic_header.h_num_logops) 3867 return 0; 3868 iclog = iclog->ic_next; 3869 } while (iclog != log->l_iclog); 3870 return 1; 3871 } 3872 3873 /* 3874 * Verify that an LSN stamped into a piece of metadata is valid. This is 3875 * intended for use in read verifiers on v5 superblocks. 3876 */ 3877 bool 3878 xfs_log_check_lsn( 3879 struct xfs_mount *mp, 3880 xfs_lsn_t lsn) 3881 { 3882 struct xlog *log = mp->m_log; 3883 bool valid; 3884 3885 /* 3886 * norecovery mode skips mount-time log processing and unconditionally 3887 * resets the in-core LSN. We can't validate in this mode, but 3888 * modifications are not allowed anyways so just return true. 3889 */ 3890 if (mp->m_flags & XFS_MOUNT_NORECOVERY) 3891 return true; 3892 3893 /* 3894 * Some metadata LSNs are initialized to NULL (e.g., the agfl). This is 3895 * handled by recovery and thus safe to ignore here. 3896 */ 3897 if (lsn == NULLCOMMITLSN) 3898 return true; 3899 3900 valid = xlog_valid_lsn(mp->m_log, lsn); 3901 3902 /* warn the user about what's gone wrong before verifier failure */ 3903 if (!valid) { 3904 spin_lock(&log->l_icloglock); 3905 xfs_warn(mp, 3906 "Corruption warning: Metadata has LSN (%d:%d) ahead of current LSN (%d:%d). " 3907 "Please unmount and run xfs_repair (>= v4.3) to resolve.", 3908 CYCLE_LSN(lsn), BLOCK_LSN(lsn), 3909 log->l_curr_cycle, log->l_curr_block); 3910 spin_unlock(&log->l_icloglock); 3911 } 3912 3913 return valid; 3914 } 3915 3916 bool 3917 xfs_log_in_recovery( 3918 struct xfs_mount *mp) 3919 { 3920 struct xlog *log = mp->m_log; 3921 3922 return log->l_flags & XLOG_ACTIVE_RECOVERY; 3923 } 3924