1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_log_format.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_mount.h" 13 #include "xfs_errortag.h" 14 #include "xfs_error.h" 15 #include "xfs_trans.h" 16 #include "xfs_trans_priv.h" 17 #include "xfs_log.h" 18 #include "xfs_log_priv.h" 19 #include "xfs_trace.h" 20 #include "xfs_sysfs.h" 21 #include "xfs_sb.h" 22 #include "xfs_health.h" 23 24 kmem_zone_t *xfs_log_ticket_zone; 25 26 /* Local miscellaneous function prototypes */ 27 STATIC struct xlog * 28 xlog_alloc_log( 29 struct xfs_mount *mp, 30 struct xfs_buftarg *log_target, 31 xfs_daddr_t blk_offset, 32 int num_bblks); 33 STATIC int 34 xlog_space_left( 35 struct xlog *log, 36 atomic64_t *head); 37 STATIC void 38 xlog_dealloc_log( 39 struct xlog *log); 40 41 /* local state machine functions */ 42 STATIC void xlog_state_done_syncing( 43 struct xlog_in_core *iclog); 44 STATIC int 45 xlog_state_get_iclog_space( 46 struct xlog *log, 47 int len, 48 struct xlog_in_core **iclog, 49 struct xlog_ticket *ticket, 50 int *continued_write, 51 int *logoffsetp); 52 STATIC void 53 xlog_state_switch_iclogs( 54 struct xlog *log, 55 struct xlog_in_core *iclog, 56 int eventual_size); 57 STATIC void 58 xlog_grant_push_ail( 59 struct xlog *log, 60 int need_bytes); 61 STATIC void 62 xlog_sync( 63 struct xlog *log, 64 struct xlog_in_core *iclog); 65 #if defined(DEBUG) 66 STATIC void 67 xlog_verify_dest_ptr( 68 struct xlog *log, 69 void *ptr); 70 STATIC void 71 xlog_verify_grant_tail( 72 struct xlog *log); 73 STATIC void 74 xlog_verify_iclog( 75 struct xlog *log, 76 struct xlog_in_core *iclog, 77 int count); 78 STATIC void 79 xlog_verify_tail_lsn( 80 struct xlog *log, 81 struct xlog_in_core *iclog); 82 #else 83 #define xlog_verify_dest_ptr(a,b) 84 #define xlog_verify_grant_tail(a) 85 #define xlog_verify_iclog(a,b,c) 86 #define xlog_verify_tail_lsn(a,b) 87 #endif 88 89 STATIC int 90 xlog_iclogs_empty( 91 struct xlog *log); 92 93 static int 94 xfs_log_cover(struct xfs_mount *); 95 96 static void 97 xlog_grant_sub_space( 98 struct xlog *log, 99 atomic64_t *head, 100 int bytes) 101 { 102 int64_t head_val = atomic64_read(head); 103 int64_t new, old; 104 105 do { 106 int cycle, space; 107 108 xlog_crack_grant_head_val(head_val, &cycle, &space); 109 110 space -= bytes; 111 if (space < 0) { 112 space += log->l_logsize; 113 cycle--; 114 } 115 116 old = head_val; 117 new = xlog_assign_grant_head_val(cycle, space); 118 head_val = atomic64_cmpxchg(head, old, new); 119 } while (head_val != old); 120 } 121 122 static void 123 xlog_grant_add_space( 124 struct xlog *log, 125 atomic64_t *head, 126 int bytes) 127 { 128 int64_t head_val = atomic64_read(head); 129 int64_t new, old; 130 131 do { 132 int tmp; 133 int cycle, space; 134 135 xlog_crack_grant_head_val(head_val, &cycle, &space); 136 137 tmp = log->l_logsize - space; 138 if (tmp > bytes) 139 space += bytes; 140 else { 141 space = bytes - tmp; 142 cycle++; 143 } 144 145 old = head_val; 146 new = xlog_assign_grant_head_val(cycle, space); 147 head_val = atomic64_cmpxchg(head, old, new); 148 } while (head_val != old); 149 } 150 151 STATIC void 152 xlog_grant_head_init( 153 struct xlog_grant_head *head) 154 { 155 xlog_assign_grant_head(&head->grant, 1, 0); 156 INIT_LIST_HEAD(&head->waiters); 157 spin_lock_init(&head->lock); 158 } 159 160 STATIC void 161 xlog_grant_head_wake_all( 162 struct xlog_grant_head *head) 163 { 164 struct xlog_ticket *tic; 165 166 spin_lock(&head->lock); 167 list_for_each_entry(tic, &head->waiters, t_queue) 168 wake_up_process(tic->t_task); 169 spin_unlock(&head->lock); 170 } 171 172 static inline int 173 xlog_ticket_reservation( 174 struct xlog *log, 175 struct xlog_grant_head *head, 176 struct xlog_ticket *tic) 177 { 178 if (head == &log->l_write_head) { 179 ASSERT(tic->t_flags & XLOG_TIC_PERM_RESERV); 180 return tic->t_unit_res; 181 } else { 182 if (tic->t_flags & XLOG_TIC_PERM_RESERV) 183 return tic->t_unit_res * tic->t_cnt; 184 else 185 return tic->t_unit_res; 186 } 187 } 188 189 STATIC bool 190 xlog_grant_head_wake( 191 struct xlog *log, 192 struct xlog_grant_head *head, 193 int *free_bytes) 194 { 195 struct xlog_ticket *tic; 196 int need_bytes; 197 bool woken_task = false; 198 199 list_for_each_entry(tic, &head->waiters, t_queue) { 200 201 /* 202 * There is a chance that the size of the CIL checkpoints in 203 * progress at the last AIL push target calculation resulted in 204 * limiting the target to the log head (l_last_sync_lsn) at the 205 * time. This may not reflect where the log head is now as the 206 * CIL checkpoints may have completed. 207 * 208 * Hence when we are woken here, it may be that the head of the 209 * log that has moved rather than the tail. As the tail didn't 210 * move, there still won't be space available for the 211 * reservation we require. However, if the AIL has already 212 * pushed to the target defined by the old log head location, we 213 * will hang here waiting for something else to update the AIL 214 * push target. 215 * 216 * Therefore, if there isn't space to wake the first waiter on 217 * the grant head, we need to push the AIL again to ensure the 218 * target reflects both the current log tail and log head 219 * position before we wait for the tail to move again. 220 */ 221 222 need_bytes = xlog_ticket_reservation(log, head, tic); 223 if (*free_bytes < need_bytes) { 224 if (!woken_task) 225 xlog_grant_push_ail(log, need_bytes); 226 return false; 227 } 228 229 *free_bytes -= need_bytes; 230 trace_xfs_log_grant_wake_up(log, tic); 231 wake_up_process(tic->t_task); 232 woken_task = true; 233 } 234 235 return true; 236 } 237 238 STATIC int 239 xlog_grant_head_wait( 240 struct xlog *log, 241 struct xlog_grant_head *head, 242 struct xlog_ticket *tic, 243 int need_bytes) __releases(&head->lock) 244 __acquires(&head->lock) 245 { 246 list_add_tail(&tic->t_queue, &head->waiters); 247 248 do { 249 if (XLOG_FORCED_SHUTDOWN(log)) 250 goto shutdown; 251 xlog_grant_push_ail(log, need_bytes); 252 253 __set_current_state(TASK_UNINTERRUPTIBLE); 254 spin_unlock(&head->lock); 255 256 XFS_STATS_INC(log->l_mp, xs_sleep_logspace); 257 258 trace_xfs_log_grant_sleep(log, tic); 259 schedule(); 260 trace_xfs_log_grant_wake(log, tic); 261 262 spin_lock(&head->lock); 263 if (XLOG_FORCED_SHUTDOWN(log)) 264 goto shutdown; 265 } while (xlog_space_left(log, &head->grant) < need_bytes); 266 267 list_del_init(&tic->t_queue); 268 return 0; 269 shutdown: 270 list_del_init(&tic->t_queue); 271 return -EIO; 272 } 273 274 /* 275 * Atomically get the log space required for a log ticket. 276 * 277 * Once a ticket gets put onto head->waiters, it will only return after the 278 * needed reservation is satisfied. 279 * 280 * This function is structured so that it has a lock free fast path. This is 281 * necessary because every new transaction reservation will come through this 282 * path. Hence any lock will be globally hot if we take it unconditionally on 283 * every pass. 284 * 285 * As tickets are only ever moved on and off head->waiters under head->lock, we 286 * only need to take that lock if we are going to add the ticket to the queue 287 * and sleep. We can avoid taking the lock if the ticket was never added to 288 * head->waiters because the t_queue list head will be empty and we hold the 289 * only reference to it so it can safely be checked unlocked. 290 */ 291 STATIC int 292 xlog_grant_head_check( 293 struct xlog *log, 294 struct xlog_grant_head *head, 295 struct xlog_ticket *tic, 296 int *need_bytes) 297 { 298 int free_bytes; 299 int error = 0; 300 301 ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY)); 302 303 /* 304 * If there are other waiters on the queue then give them a chance at 305 * logspace before us. Wake up the first waiters, if we do not wake 306 * up all the waiters then go to sleep waiting for more free space, 307 * otherwise try to get some space for this transaction. 308 */ 309 *need_bytes = xlog_ticket_reservation(log, head, tic); 310 free_bytes = xlog_space_left(log, &head->grant); 311 if (!list_empty_careful(&head->waiters)) { 312 spin_lock(&head->lock); 313 if (!xlog_grant_head_wake(log, head, &free_bytes) || 314 free_bytes < *need_bytes) { 315 error = xlog_grant_head_wait(log, head, tic, 316 *need_bytes); 317 } 318 spin_unlock(&head->lock); 319 } else if (free_bytes < *need_bytes) { 320 spin_lock(&head->lock); 321 error = xlog_grant_head_wait(log, head, tic, *need_bytes); 322 spin_unlock(&head->lock); 323 } 324 325 return error; 326 } 327 328 static void 329 xlog_tic_reset_res(xlog_ticket_t *tic) 330 { 331 tic->t_res_num = 0; 332 tic->t_res_arr_sum = 0; 333 tic->t_res_num_ophdrs = 0; 334 } 335 336 static void 337 xlog_tic_add_region(xlog_ticket_t *tic, uint len, uint type) 338 { 339 if (tic->t_res_num == XLOG_TIC_LEN_MAX) { 340 /* add to overflow and start again */ 341 tic->t_res_o_flow += tic->t_res_arr_sum; 342 tic->t_res_num = 0; 343 tic->t_res_arr_sum = 0; 344 } 345 346 tic->t_res_arr[tic->t_res_num].r_len = len; 347 tic->t_res_arr[tic->t_res_num].r_type = type; 348 tic->t_res_arr_sum += len; 349 tic->t_res_num++; 350 } 351 352 bool 353 xfs_log_writable( 354 struct xfs_mount *mp) 355 { 356 /* 357 * Do not write to the log on norecovery mounts, if the data or log 358 * devices are read-only, or if the filesystem is shutdown. Read-only 359 * mounts allow internal writes for log recovery and unmount purposes, 360 * so don't restrict that case. 361 */ 362 if (mp->m_flags & XFS_MOUNT_NORECOVERY) 363 return false; 364 if (xfs_readonly_buftarg(mp->m_ddev_targp)) 365 return false; 366 if (xfs_readonly_buftarg(mp->m_log->l_targ)) 367 return false; 368 if (XFS_FORCED_SHUTDOWN(mp)) 369 return false; 370 return true; 371 } 372 373 /* 374 * Replenish the byte reservation required by moving the grant write head. 375 */ 376 int 377 xfs_log_regrant( 378 struct xfs_mount *mp, 379 struct xlog_ticket *tic) 380 { 381 struct xlog *log = mp->m_log; 382 int need_bytes; 383 int error = 0; 384 385 if (XLOG_FORCED_SHUTDOWN(log)) 386 return -EIO; 387 388 XFS_STATS_INC(mp, xs_try_logspace); 389 390 /* 391 * This is a new transaction on the ticket, so we need to change the 392 * transaction ID so that the next transaction has a different TID in 393 * the log. Just add one to the existing tid so that we can see chains 394 * of rolling transactions in the log easily. 395 */ 396 tic->t_tid++; 397 398 xlog_grant_push_ail(log, tic->t_unit_res); 399 400 tic->t_curr_res = tic->t_unit_res; 401 xlog_tic_reset_res(tic); 402 403 if (tic->t_cnt > 0) 404 return 0; 405 406 trace_xfs_log_regrant(log, tic); 407 408 error = xlog_grant_head_check(log, &log->l_write_head, tic, 409 &need_bytes); 410 if (error) 411 goto out_error; 412 413 xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes); 414 trace_xfs_log_regrant_exit(log, tic); 415 xlog_verify_grant_tail(log); 416 return 0; 417 418 out_error: 419 /* 420 * If we are failing, make sure the ticket doesn't have any current 421 * reservations. We don't want to add this back when the ticket/ 422 * transaction gets cancelled. 423 */ 424 tic->t_curr_res = 0; 425 tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */ 426 return error; 427 } 428 429 /* 430 * Reserve log space and return a ticket corresponding to the reservation. 431 * 432 * Each reservation is going to reserve extra space for a log record header. 433 * When writes happen to the on-disk log, we don't subtract the length of the 434 * log record header from any reservation. By wasting space in each 435 * reservation, we prevent over allocation problems. 436 */ 437 int 438 xfs_log_reserve( 439 struct xfs_mount *mp, 440 int unit_bytes, 441 int cnt, 442 struct xlog_ticket **ticp, 443 uint8_t client, 444 bool permanent) 445 { 446 struct xlog *log = mp->m_log; 447 struct xlog_ticket *tic; 448 int need_bytes; 449 int error = 0; 450 451 ASSERT(client == XFS_TRANSACTION || client == XFS_LOG); 452 453 if (XLOG_FORCED_SHUTDOWN(log)) 454 return -EIO; 455 456 XFS_STATS_INC(mp, xs_try_logspace); 457 458 ASSERT(*ticp == NULL); 459 tic = xlog_ticket_alloc(log, unit_bytes, cnt, client, permanent); 460 *ticp = tic; 461 462 xlog_grant_push_ail(log, tic->t_cnt ? tic->t_unit_res * tic->t_cnt 463 : tic->t_unit_res); 464 465 trace_xfs_log_reserve(log, tic); 466 467 error = xlog_grant_head_check(log, &log->l_reserve_head, tic, 468 &need_bytes); 469 if (error) 470 goto out_error; 471 472 xlog_grant_add_space(log, &log->l_reserve_head.grant, need_bytes); 473 xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes); 474 trace_xfs_log_reserve_exit(log, tic); 475 xlog_verify_grant_tail(log); 476 return 0; 477 478 out_error: 479 /* 480 * If we are failing, make sure the ticket doesn't have any current 481 * reservations. We don't want to add this back when the ticket/ 482 * transaction gets cancelled. 483 */ 484 tic->t_curr_res = 0; 485 tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */ 486 return error; 487 } 488 489 /* 490 * Flush iclog to disk if this is the last reference to the given iclog and the 491 * it is in the WANT_SYNC state. 492 * 493 * If the caller passes in a non-zero @old_tail_lsn and the current log tail 494 * does not match, there may be metadata on disk that must be persisted before 495 * this iclog is written. To satisfy that requirement, set the 496 * XLOG_ICL_NEED_FLUSH flag as a condition for writing this iclog with the new 497 * log tail value. 498 * 499 * If XLOG_ICL_NEED_FUA is already set on the iclog, we need to ensure that the 500 * log tail is updated correctly. NEED_FUA indicates that the iclog will be 501 * written to stable storage, and implies that a commit record is contained 502 * within the iclog. We need to ensure that the log tail does not move beyond 503 * the tail that the first commit record in the iclog ordered against, otherwise 504 * correct recovery of that checkpoint becomes dependent on future operations 505 * performed on this iclog. 506 * 507 * Hence if NEED_FUA is set and the current iclog tail lsn is empty, write the 508 * current tail into iclog. Once the iclog tail is set, future operations must 509 * not modify it, otherwise they potentially violate ordering constraints for 510 * the checkpoint commit that wrote the initial tail lsn value. The tail lsn in 511 * the iclog will get zeroed on activation of the iclog after sync, so we 512 * always capture the tail lsn on the iclog on the first NEED_FUA release 513 * regardless of the number of active reference counts on this iclog. 514 */ 515 516 int 517 xlog_state_release_iclog( 518 struct xlog *log, 519 struct xlog_in_core *iclog, 520 xfs_lsn_t old_tail_lsn) 521 { 522 xfs_lsn_t tail_lsn; 523 lockdep_assert_held(&log->l_icloglock); 524 525 trace_xlog_iclog_release(iclog, _RET_IP_); 526 if (iclog->ic_state == XLOG_STATE_IOERROR) 527 return -EIO; 528 529 /* 530 * Grabbing the current log tail needs to be atomic w.r.t. the writing 531 * of the tail LSN into the iclog so we guarantee that the log tail does 532 * not move between deciding if a cache flush is required and writing 533 * the LSN into the iclog below. 534 */ 535 if (old_tail_lsn || iclog->ic_state == XLOG_STATE_WANT_SYNC) { 536 tail_lsn = xlog_assign_tail_lsn(log->l_mp); 537 538 if (old_tail_lsn && tail_lsn != old_tail_lsn) 539 iclog->ic_flags |= XLOG_ICL_NEED_FLUSH; 540 541 if ((iclog->ic_flags & XLOG_ICL_NEED_FUA) && 542 !iclog->ic_header.h_tail_lsn) 543 iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn); 544 } 545 546 if (!atomic_dec_and_test(&iclog->ic_refcnt)) 547 return 0; 548 549 if (iclog->ic_state != XLOG_STATE_WANT_SYNC) { 550 ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE); 551 return 0; 552 } 553 554 iclog->ic_state = XLOG_STATE_SYNCING; 555 if (!iclog->ic_header.h_tail_lsn) 556 iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn); 557 xlog_verify_tail_lsn(log, iclog); 558 trace_xlog_iclog_syncing(iclog, _RET_IP_); 559 560 spin_unlock(&log->l_icloglock); 561 xlog_sync(log, iclog); 562 spin_lock(&log->l_icloglock); 563 return 0; 564 } 565 566 /* 567 * Mount a log filesystem 568 * 569 * mp - ubiquitous xfs mount point structure 570 * log_target - buftarg of on-disk log device 571 * blk_offset - Start block # where block size is 512 bytes (BBSIZE) 572 * num_bblocks - Number of BBSIZE blocks in on-disk log 573 * 574 * Return error or zero. 575 */ 576 int 577 xfs_log_mount( 578 xfs_mount_t *mp, 579 xfs_buftarg_t *log_target, 580 xfs_daddr_t blk_offset, 581 int num_bblks) 582 { 583 bool fatal = xfs_sb_version_hascrc(&mp->m_sb); 584 int error = 0; 585 int min_logfsbs; 586 587 if (!(mp->m_flags & XFS_MOUNT_NORECOVERY)) { 588 xfs_notice(mp, "Mounting V%d Filesystem", 589 XFS_SB_VERSION_NUM(&mp->m_sb)); 590 } else { 591 xfs_notice(mp, 592 "Mounting V%d filesystem in no-recovery mode. Filesystem will be inconsistent.", 593 XFS_SB_VERSION_NUM(&mp->m_sb)); 594 ASSERT(mp->m_flags & XFS_MOUNT_RDONLY); 595 } 596 597 mp->m_log = xlog_alloc_log(mp, log_target, blk_offset, num_bblks); 598 if (IS_ERR(mp->m_log)) { 599 error = PTR_ERR(mp->m_log); 600 goto out; 601 } 602 603 /* 604 * Validate the given log space and drop a critical message via syslog 605 * if the log size is too small that would lead to some unexpected 606 * situations in transaction log space reservation stage. 607 * 608 * Note: we can't just reject the mount if the validation fails. This 609 * would mean that people would have to downgrade their kernel just to 610 * remedy the situation as there is no way to grow the log (short of 611 * black magic surgery with xfs_db). 612 * 613 * We can, however, reject mounts for CRC format filesystems, as the 614 * mkfs binary being used to make the filesystem should never create a 615 * filesystem with a log that is too small. 616 */ 617 min_logfsbs = xfs_log_calc_minimum_size(mp); 618 619 if (mp->m_sb.sb_logblocks < min_logfsbs) { 620 xfs_warn(mp, 621 "Log size %d blocks too small, minimum size is %d blocks", 622 mp->m_sb.sb_logblocks, min_logfsbs); 623 error = -EINVAL; 624 } else if (mp->m_sb.sb_logblocks > XFS_MAX_LOG_BLOCKS) { 625 xfs_warn(mp, 626 "Log size %d blocks too large, maximum size is %lld blocks", 627 mp->m_sb.sb_logblocks, XFS_MAX_LOG_BLOCKS); 628 error = -EINVAL; 629 } else if (XFS_FSB_TO_B(mp, mp->m_sb.sb_logblocks) > XFS_MAX_LOG_BYTES) { 630 xfs_warn(mp, 631 "log size %lld bytes too large, maximum size is %lld bytes", 632 XFS_FSB_TO_B(mp, mp->m_sb.sb_logblocks), 633 XFS_MAX_LOG_BYTES); 634 error = -EINVAL; 635 } else if (mp->m_sb.sb_logsunit > 1 && 636 mp->m_sb.sb_logsunit % mp->m_sb.sb_blocksize) { 637 xfs_warn(mp, 638 "log stripe unit %u bytes must be a multiple of block size", 639 mp->m_sb.sb_logsunit); 640 error = -EINVAL; 641 fatal = true; 642 } 643 if (error) { 644 /* 645 * Log check errors are always fatal on v5; or whenever bad 646 * metadata leads to a crash. 647 */ 648 if (fatal) { 649 xfs_crit(mp, "AAIEEE! Log failed size checks. Abort!"); 650 ASSERT(0); 651 goto out_free_log; 652 } 653 xfs_crit(mp, "Log size out of supported range."); 654 xfs_crit(mp, 655 "Continuing onwards, but if log hangs are experienced then please report this message in the bug report."); 656 } 657 658 /* 659 * Initialize the AIL now we have a log. 660 */ 661 error = xfs_trans_ail_init(mp); 662 if (error) { 663 xfs_warn(mp, "AIL initialisation failed: error %d", error); 664 goto out_free_log; 665 } 666 mp->m_log->l_ailp = mp->m_ail; 667 668 /* 669 * skip log recovery on a norecovery mount. pretend it all 670 * just worked. 671 */ 672 if (!(mp->m_flags & XFS_MOUNT_NORECOVERY)) { 673 int readonly = (mp->m_flags & XFS_MOUNT_RDONLY); 674 675 if (readonly) 676 mp->m_flags &= ~XFS_MOUNT_RDONLY; 677 678 error = xlog_recover(mp->m_log); 679 680 if (readonly) 681 mp->m_flags |= XFS_MOUNT_RDONLY; 682 if (error) { 683 xfs_warn(mp, "log mount/recovery failed: error %d", 684 error); 685 xlog_recover_cancel(mp->m_log); 686 goto out_destroy_ail; 687 } 688 } 689 690 error = xfs_sysfs_init(&mp->m_log->l_kobj, &xfs_log_ktype, &mp->m_kobj, 691 "log"); 692 if (error) 693 goto out_destroy_ail; 694 695 /* Normal transactions can now occur */ 696 mp->m_log->l_flags &= ~XLOG_ACTIVE_RECOVERY; 697 698 /* 699 * Now the log has been fully initialised and we know were our 700 * space grant counters are, we can initialise the permanent ticket 701 * needed for delayed logging to work. 702 */ 703 xlog_cil_init_post_recovery(mp->m_log); 704 705 return 0; 706 707 out_destroy_ail: 708 xfs_trans_ail_destroy(mp); 709 out_free_log: 710 xlog_dealloc_log(mp->m_log); 711 out: 712 return error; 713 } 714 715 /* 716 * Finish the recovery of the file system. This is separate from the 717 * xfs_log_mount() call, because it depends on the code in xfs_mountfs() to read 718 * in the root and real-time bitmap inodes between calling xfs_log_mount() and 719 * here. 720 * 721 * If we finish recovery successfully, start the background log work. If we are 722 * not doing recovery, then we have a RO filesystem and we don't need to start 723 * it. 724 */ 725 int 726 xfs_log_mount_finish( 727 struct xfs_mount *mp) 728 { 729 int error = 0; 730 bool readonly = (mp->m_flags & XFS_MOUNT_RDONLY); 731 bool recovered = mp->m_log->l_flags & XLOG_RECOVERY_NEEDED; 732 733 if (mp->m_flags & XFS_MOUNT_NORECOVERY) { 734 ASSERT(mp->m_flags & XFS_MOUNT_RDONLY); 735 return 0; 736 } else if (readonly) { 737 /* Allow unlinked processing to proceed */ 738 mp->m_flags &= ~XFS_MOUNT_RDONLY; 739 } 740 741 /* 742 * During the second phase of log recovery, we need iget and 743 * iput to behave like they do for an active filesystem. 744 * xfs_fs_drop_inode needs to be able to prevent the deletion 745 * of inodes before we're done replaying log items on those 746 * inodes. Turn it off immediately after recovery finishes 747 * so that we don't leak the quota inodes if subsequent mount 748 * activities fail. 749 * 750 * We let all inodes involved in redo item processing end up on 751 * the LRU instead of being evicted immediately so that if we do 752 * something to an unlinked inode, the irele won't cause 753 * premature truncation and freeing of the inode, which results 754 * in log recovery failure. We have to evict the unreferenced 755 * lru inodes after clearing SB_ACTIVE because we don't 756 * otherwise clean up the lru if there's a subsequent failure in 757 * xfs_mountfs, which leads to us leaking the inodes if nothing 758 * else (e.g. quotacheck) references the inodes before the 759 * mount failure occurs. 760 */ 761 mp->m_super->s_flags |= SB_ACTIVE; 762 error = xlog_recover_finish(mp->m_log); 763 if (!error) 764 xfs_log_work_queue(mp); 765 mp->m_super->s_flags &= ~SB_ACTIVE; 766 evict_inodes(mp->m_super); 767 768 /* 769 * Drain the buffer LRU after log recovery. This is required for v4 770 * filesystems to avoid leaving around buffers with NULL verifier ops, 771 * but we do it unconditionally to make sure we're always in a clean 772 * cache state after mount. 773 * 774 * Don't push in the error case because the AIL may have pending intents 775 * that aren't removed until recovery is cancelled. 776 */ 777 if (!error && recovered) { 778 xfs_log_force(mp, XFS_LOG_SYNC); 779 xfs_ail_push_all_sync(mp->m_ail); 780 } 781 xfs_buftarg_drain(mp->m_ddev_targp); 782 783 if (readonly) 784 mp->m_flags |= XFS_MOUNT_RDONLY; 785 786 /* Make sure the log is dead if we're returning failure. */ 787 ASSERT(!error || (mp->m_log->l_flags & XLOG_IO_ERROR)); 788 789 return error; 790 } 791 792 /* 793 * The mount has failed. Cancel the recovery if it hasn't completed and destroy 794 * the log. 795 */ 796 void 797 xfs_log_mount_cancel( 798 struct xfs_mount *mp) 799 { 800 xlog_recover_cancel(mp->m_log); 801 xfs_log_unmount(mp); 802 } 803 804 /* 805 * Flush out the iclog to disk ensuring that device caches are flushed and 806 * the iclog hits stable storage before any completion waiters are woken. 807 */ 808 static inline int 809 xlog_force_iclog( 810 struct xlog_in_core *iclog) 811 { 812 atomic_inc(&iclog->ic_refcnt); 813 iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA; 814 if (iclog->ic_state == XLOG_STATE_ACTIVE) 815 xlog_state_switch_iclogs(iclog->ic_log, iclog, 0); 816 return xlog_state_release_iclog(iclog->ic_log, iclog, 0); 817 } 818 819 /* 820 * Wait for the iclog and all prior iclogs to be written disk as required by the 821 * log force state machine. Waiting on ic_force_wait ensures iclog completions 822 * have been ordered and callbacks run before we are woken here, hence 823 * guaranteeing that all the iclogs up to this one are on stable storage. 824 */ 825 int 826 xlog_wait_on_iclog( 827 struct xlog_in_core *iclog) 828 __releases(iclog->ic_log->l_icloglock) 829 { 830 struct xlog *log = iclog->ic_log; 831 832 trace_xlog_iclog_wait_on(iclog, _RET_IP_); 833 if (!XLOG_FORCED_SHUTDOWN(log) && 834 iclog->ic_state != XLOG_STATE_ACTIVE && 835 iclog->ic_state != XLOG_STATE_DIRTY) { 836 XFS_STATS_INC(log->l_mp, xs_log_force_sleep); 837 xlog_wait(&iclog->ic_force_wait, &log->l_icloglock); 838 } else { 839 spin_unlock(&log->l_icloglock); 840 } 841 842 if (XLOG_FORCED_SHUTDOWN(log)) 843 return -EIO; 844 return 0; 845 } 846 847 /* 848 * Write out an unmount record using the ticket provided. We have to account for 849 * the data space used in the unmount ticket as this write is not done from a 850 * transaction context that has already done the accounting for us. 851 */ 852 static int 853 xlog_write_unmount_record( 854 struct xlog *log, 855 struct xlog_ticket *ticket) 856 { 857 struct xfs_unmount_log_format ulf = { 858 .magic = XLOG_UNMOUNT_TYPE, 859 }; 860 struct xfs_log_iovec reg = { 861 .i_addr = &ulf, 862 .i_len = sizeof(ulf), 863 .i_type = XLOG_REG_TYPE_UNMOUNT, 864 }; 865 struct xfs_log_vec vec = { 866 .lv_niovecs = 1, 867 .lv_iovecp = ®, 868 }; 869 870 /* account for space used by record data */ 871 ticket->t_curr_res -= sizeof(ulf); 872 873 return xlog_write(log, &vec, ticket, NULL, NULL, XLOG_UNMOUNT_TRANS); 874 } 875 876 /* 877 * Mark the filesystem clean by writing an unmount record to the head of the 878 * log. 879 */ 880 static void 881 xlog_unmount_write( 882 struct xlog *log) 883 { 884 struct xfs_mount *mp = log->l_mp; 885 struct xlog_in_core *iclog; 886 struct xlog_ticket *tic = NULL; 887 int error; 888 889 error = xfs_log_reserve(mp, 600, 1, &tic, XFS_LOG, 0); 890 if (error) 891 goto out_err; 892 893 error = xlog_write_unmount_record(log, tic); 894 /* 895 * At this point, we're umounting anyway, so there's no point in 896 * transitioning log state to IOERROR. Just continue... 897 */ 898 out_err: 899 if (error) 900 xfs_alert(mp, "%s: unmount record failed", __func__); 901 902 spin_lock(&log->l_icloglock); 903 iclog = log->l_iclog; 904 error = xlog_force_iclog(iclog); 905 xlog_wait_on_iclog(iclog); 906 907 if (tic) { 908 trace_xfs_log_umount_write(log, tic); 909 xfs_log_ticket_ungrant(log, tic); 910 } 911 } 912 913 static void 914 xfs_log_unmount_verify_iclog( 915 struct xlog *log) 916 { 917 struct xlog_in_core *iclog = log->l_iclog; 918 919 do { 920 ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE); 921 ASSERT(iclog->ic_offset == 0); 922 } while ((iclog = iclog->ic_next) != log->l_iclog); 923 } 924 925 /* 926 * Unmount record used to have a string "Unmount filesystem--" in the 927 * data section where the "Un" was really a magic number (XLOG_UNMOUNT_TYPE). 928 * We just write the magic number now since that particular field isn't 929 * currently architecture converted and "Unmount" is a bit foo. 930 * As far as I know, there weren't any dependencies on the old behaviour. 931 */ 932 static void 933 xfs_log_unmount_write( 934 struct xfs_mount *mp) 935 { 936 struct xlog *log = mp->m_log; 937 938 if (!xfs_log_writable(mp)) 939 return; 940 941 xfs_log_force(mp, XFS_LOG_SYNC); 942 943 if (XLOG_FORCED_SHUTDOWN(log)) 944 return; 945 946 /* 947 * If we think the summary counters are bad, avoid writing the unmount 948 * record to force log recovery at next mount, after which the summary 949 * counters will be recalculated. Refer to xlog_check_unmount_rec for 950 * more details. 951 */ 952 if (XFS_TEST_ERROR(xfs_fs_has_sickness(mp, XFS_SICK_FS_COUNTERS), mp, 953 XFS_ERRTAG_FORCE_SUMMARY_RECALC)) { 954 xfs_alert(mp, "%s: will fix summary counters at next mount", 955 __func__); 956 return; 957 } 958 959 xfs_log_unmount_verify_iclog(log); 960 xlog_unmount_write(log); 961 } 962 963 /* 964 * Empty the log for unmount/freeze. 965 * 966 * To do this, we first need to shut down the background log work so it is not 967 * trying to cover the log as we clean up. We then need to unpin all objects in 968 * the log so we can then flush them out. Once they have completed their IO and 969 * run the callbacks removing themselves from the AIL, we can cover the log. 970 */ 971 int 972 xfs_log_quiesce( 973 struct xfs_mount *mp) 974 { 975 cancel_delayed_work_sync(&mp->m_log->l_work); 976 xfs_log_force(mp, XFS_LOG_SYNC); 977 978 /* 979 * The superblock buffer is uncached and while xfs_ail_push_all_sync() 980 * will push it, xfs_buftarg_wait() will not wait for it. Further, 981 * xfs_buf_iowait() cannot be used because it was pushed with the 982 * XBF_ASYNC flag set, so we need to use a lock/unlock pair to wait for 983 * the IO to complete. 984 */ 985 xfs_ail_push_all_sync(mp->m_ail); 986 xfs_buftarg_wait(mp->m_ddev_targp); 987 xfs_buf_lock(mp->m_sb_bp); 988 xfs_buf_unlock(mp->m_sb_bp); 989 990 return xfs_log_cover(mp); 991 } 992 993 void 994 xfs_log_clean( 995 struct xfs_mount *mp) 996 { 997 xfs_log_quiesce(mp); 998 xfs_log_unmount_write(mp); 999 } 1000 1001 /* 1002 * Shut down and release the AIL and Log. 1003 * 1004 * During unmount, we need to ensure we flush all the dirty metadata objects 1005 * from the AIL so that the log is empty before we write the unmount record to 1006 * the log. Once this is done, we can tear down the AIL and the log. 1007 */ 1008 void 1009 xfs_log_unmount( 1010 struct xfs_mount *mp) 1011 { 1012 xfs_log_clean(mp); 1013 1014 xfs_buftarg_drain(mp->m_ddev_targp); 1015 1016 xfs_trans_ail_destroy(mp); 1017 1018 xfs_sysfs_del(&mp->m_log->l_kobj); 1019 1020 xlog_dealloc_log(mp->m_log); 1021 } 1022 1023 void 1024 xfs_log_item_init( 1025 struct xfs_mount *mp, 1026 struct xfs_log_item *item, 1027 int type, 1028 const struct xfs_item_ops *ops) 1029 { 1030 item->li_mountp = mp; 1031 item->li_ailp = mp->m_ail; 1032 item->li_type = type; 1033 item->li_ops = ops; 1034 item->li_lv = NULL; 1035 1036 INIT_LIST_HEAD(&item->li_ail); 1037 INIT_LIST_HEAD(&item->li_cil); 1038 INIT_LIST_HEAD(&item->li_bio_list); 1039 INIT_LIST_HEAD(&item->li_trans); 1040 } 1041 1042 /* 1043 * Wake up processes waiting for log space after we have moved the log tail. 1044 */ 1045 void 1046 xfs_log_space_wake( 1047 struct xfs_mount *mp) 1048 { 1049 struct xlog *log = mp->m_log; 1050 int free_bytes; 1051 1052 if (XLOG_FORCED_SHUTDOWN(log)) 1053 return; 1054 1055 if (!list_empty_careful(&log->l_write_head.waiters)) { 1056 ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY)); 1057 1058 spin_lock(&log->l_write_head.lock); 1059 free_bytes = xlog_space_left(log, &log->l_write_head.grant); 1060 xlog_grant_head_wake(log, &log->l_write_head, &free_bytes); 1061 spin_unlock(&log->l_write_head.lock); 1062 } 1063 1064 if (!list_empty_careful(&log->l_reserve_head.waiters)) { 1065 ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY)); 1066 1067 spin_lock(&log->l_reserve_head.lock); 1068 free_bytes = xlog_space_left(log, &log->l_reserve_head.grant); 1069 xlog_grant_head_wake(log, &log->l_reserve_head, &free_bytes); 1070 spin_unlock(&log->l_reserve_head.lock); 1071 } 1072 } 1073 1074 /* 1075 * Determine if we have a transaction that has gone to disk that needs to be 1076 * covered. To begin the transition to the idle state firstly the log needs to 1077 * be idle. That means the CIL, the AIL and the iclogs needs to be empty before 1078 * we start attempting to cover the log. 1079 * 1080 * Only if we are then in a state where covering is needed, the caller is 1081 * informed that dummy transactions are required to move the log into the idle 1082 * state. 1083 * 1084 * If there are any items in the AIl or CIL, then we do not want to attempt to 1085 * cover the log as we may be in a situation where there isn't log space 1086 * available to run a dummy transaction and this can lead to deadlocks when the 1087 * tail of the log is pinned by an item that is modified in the CIL. Hence 1088 * there's no point in running a dummy transaction at this point because we 1089 * can't start trying to idle the log until both the CIL and AIL are empty. 1090 */ 1091 static bool 1092 xfs_log_need_covered( 1093 struct xfs_mount *mp) 1094 { 1095 struct xlog *log = mp->m_log; 1096 bool needed = false; 1097 1098 if (!xlog_cil_empty(log)) 1099 return false; 1100 1101 spin_lock(&log->l_icloglock); 1102 switch (log->l_covered_state) { 1103 case XLOG_STATE_COVER_DONE: 1104 case XLOG_STATE_COVER_DONE2: 1105 case XLOG_STATE_COVER_IDLE: 1106 break; 1107 case XLOG_STATE_COVER_NEED: 1108 case XLOG_STATE_COVER_NEED2: 1109 if (xfs_ail_min_lsn(log->l_ailp)) 1110 break; 1111 if (!xlog_iclogs_empty(log)) 1112 break; 1113 1114 needed = true; 1115 if (log->l_covered_state == XLOG_STATE_COVER_NEED) 1116 log->l_covered_state = XLOG_STATE_COVER_DONE; 1117 else 1118 log->l_covered_state = XLOG_STATE_COVER_DONE2; 1119 break; 1120 default: 1121 needed = true; 1122 break; 1123 } 1124 spin_unlock(&log->l_icloglock); 1125 return needed; 1126 } 1127 1128 /* 1129 * Explicitly cover the log. This is similar to background log covering but 1130 * intended for usage in quiesce codepaths. The caller is responsible to ensure 1131 * the log is idle and suitable for covering. The CIL, iclog buffers and AIL 1132 * must all be empty. 1133 */ 1134 static int 1135 xfs_log_cover( 1136 struct xfs_mount *mp) 1137 { 1138 int error = 0; 1139 bool need_covered; 1140 1141 ASSERT((xlog_cil_empty(mp->m_log) && xlog_iclogs_empty(mp->m_log) && 1142 !xfs_ail_min_lsn(mp->m_log->l_ailp)) || 1143 XFS_FORCED_SHUTDOWN(mp)); 1144 1145 if (!xfs_log_writable(mp)) 1146 return 0; 1147 1148 /* 1149 * xfs_log_need_covered() is not idempotent because it progresses the 1150 * state machine if the log requires covering. Therefore, we must call 1151 * this function once and use the result until we've issued an sb sync. 1152 * Do so first to make that abundantly clear. 1153 * 1154 * Fall into the covering sequence if the log needs covering or the 1155 * mount has lazy superblock accounting to sync to disk. The sb sync 1156 * used for covering accumulates the in-core counters, so covering 1157 * handles this for us. 1158 */ 1159 need_covered = xfs_log_need_covered(mp); 1160 if (!need_covered && !xfs_sb_version_haslazysbcount(&mp->m_sb)) 1161 return 0; 1162 1163 /* 1164 * To cover the log, commit the superblock twice (at most) in 1165 * independent checkpoints. The first serves as a reference for the 1166 * tail pointer. The sync transaction and AIL push empties the AIL and 1167 * updates the in-core tail to the LSN of the first checkpoint. The 1168 * second commit updates the on-disk tail with the in-core LSN, 1169 * covering the log. Push the AIL one more time to leave it empty, as 1170 * we found it. 1171 */ 1172 do { 1173 error = xfs_sync_sb(mp, true); 1174 if (error) 1175 break; 1176 xfs_ail_push_all_sync(mp->m_ail); 1177 } while (xfs_log_need_covered(mp)); 1178 1179 return error; 1180 } 1181 1182 /* 1183 * We may be holding the log iclog lock upon entering this routine. 1184 */ 1185 xfs_lsn_t 1186 xlog_assign_tail_lsn_locked( 1187 struct xfs_mount *mp) 1188 { 1189 struct xlog *log = mp->m_log; 1190 struct xfs_log_item *lip; 1191 xfs_lsn_t tail_lsn; 1192 1193 assert_spin_locked(&mp->m_ail->ail_lock); 1194 1195 /* 1196 * To make sure we always have a valid LSN for the log tail we keep 1197 * track of the last LSN which was committed in log->l_last_sync_lsn, 1198 * and use that when the AIL was empty. 1199 */ 1200 lip = xfs_ail_min(mp->m_ail); 1201 if (lip) 1202 tail_lsn = lip->li_lsn; 1203 else 1204 tail_lsn = atomic64_read(&log->l_last_sync_lsn); 1205 trace_xfs_log_assign_tail_lsn(log, tail_lsn); 1206 atomic64_set(&log->l_tail_lsn, tail_lsn); 1207 return tail_lsn; 1208 } 1209 1210 xfs_lsn_t 1211 xlog_assign_tail_lsn( 1212 struct xfs_mount *mp) 1213 { 1214 xfs_lsn_t tail_lsn; 1215 1216 spin_lock(&mp->m_ail->ail_lock); 1217 tail_lsn = xlog_assign_tail_lsn_locked(mp); 1218 spin_unlock(&mp->m_ail->ail_lock); 1219 1220 return tail_lsn; 1221 } 1222 1223 /* 1224 * Return the space in the log between the tail and the head. The head 1225 * is passed in the cycle/bytes formal parms. In the special case where 1226 * the reserve head has wrapped passed the tail, this calculation is no 1227 * longer valid. In this case, just return 0 which means there is no space 1228 * in the log. This works for all places where this function is called 1229 * with the reserve head. Of course, if the write head were to ever 1230 * wrap the tail, we should blow up. Rather than catch this case here, 1231 * we depend on other ASSERTions in other parts of the code. XXXmiken 1232 * 1233 * This code also handles the case where the reservation head is behind 1234 * the tail. The details of this case are described below, but the end 1235 * result is that we return the size of the log as the amount of space left. 1236 */ 1237 STATIC int 1238 xlog_space_left( 1239 struct xlog *log, 1240 atomic64_t *head) 1241 { 1242 int free_bytes; 1243 int tail_bytes; 1244 int tail_cycle; 1245 int head_cycle; 1246 int head_bytes; 1247 1248 xlog_crack_grant_head(head, &head_cycle, &head_bytes); 1249 xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_bytes); 1250 tail_bytes = BBTOB(tail_bytes); 1251 if (tail_cycle == head_cycle && head_bytes >= tail_bytes) 1252 free_bytes = log->l_logsize - (head_bytes - tail_bytes); 1253 else if (tail_cycle + 1 < head_cycle) 1254 return 0; 1255 else if (tail_cycle < head_cycle) { 1256 ASSERT(tail_cycle == (head_cycle - 1)); 1257 free_bytes = tail_bytes - head_bytes; 1258 } else { 1259 /* 1260 * The reservation head is behind the tail. 1261 * In this case we just want to return the size of the 1262 * log as the amount of space left. 1263 */ 1264 xfs_alert(log->l_mp, "xlog_space_left: head behind tail"); 1265 xfs_alert(log->l_mp, 1266 " tail_cycle = %d, tail_bytes = %d", 1267 tail_cycle, tail_bytes); 1268 xfs_alert(log->l_mp, 1269 " GH cycle = %d, GH bytes = %d", 1270 head_cycle, head_bytes); 1271 ASSERT(0); 1272 free_bytes = log->l_logsize; 1273 } 1274 return free_bytes; 1275 } 1276 1277 1278 static void 1279 xlog_ioend_work( 1280 struct work_struct *work) 1281 { 1282 struct xlog_in_core *iclog = 1283 container_of(work, struct xlog_in_core, ic_end_io_work); 1284 struct xlog *log = iclog->ic_log; 1285 int error; 1286 1287 error = blk_status_to_errno(iclog->ic_bio.bi_status); 1288 #ifdef DEBUG 1289 /* treat writes with injected CRC errors as failed */ 1290 if (iclog->ic_fail_crc) 1291 error = -EIO; 1292 #endif 1293 1294 /* 1295 * Race to shutdown the filesystem if we see an error. 1296 */ 1297 if (XFS_TEST_ERROR(error, log->l_mp, XFS_ERRTAG_IODONE_IOERR)) { 1298 xfs_alert(log->l_mp, "log I/O error %d", error); 1299 xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR); 1300 } 1301 1302 xlog_state_done_syncing(iclog); 1303 bio_uninit(&iclog->ic_bio); 1304 1305 /* 1306 * Drop the lock to signal that we are done. Nothing references the 1307 * iclog after this, so an unmount waiting on this lock can now tear it 1308 * down safely. As such, it is unsafe to reference the iclog after the 1309 * unlock as we could race with it being freed. 1310 */ 1311 up(&iclog->ic_sema); 1312 } 1313 1314 /* 1315 * Return size of each in-core log record buffer. 1316 * 1317 * All machines get 8 x 32kB buffers by default, unless tuned otherwise. 1318 * 1319 * If the filesystem blocksize is too large, we may need to choose a 1320 * larger size since the directory code currently logs entire blocks. 1321 */ 1322 STATIC void 1323 xlog_get_iclog_buffer_size( 1324 struct xfs_mount *mp, 1325 struct xlog *log) 1326 { 1327 if (mp->m_logbufs <= 0) 1328 mp->m_logbufs = XLOG_MAX_ICLOGS; 1329 if (mp->m_logbsize <= 0) 1330 mp->m_logbsize = XLOG_BIG_RECORD_BSIZE; 1331 1332 log->l_iclog_bufs = mp->m_logbufs; 1333 log->l_iclog_size = mp->m_logbsize; 1334 1335 /* 1336 * # headers = size / 32k - one header holds cycles from 32k of data. 1337 */ 1338 log->l_iclog_heads = 1339 DIV_ROUND_UP(mp->m_logbsize, XLOG_HEADER_CYCLE_SIZE); 1340 log->l_iclog_hsize = log->l_iclog_heads << BBSHIFT; 1341 } 1342 1343 void 1344 xfs_log_work_queue( 1345 struct xfs_mount *mp) 1346 { 1347 queue_delayed_work(mp->m_sync_workqueue, &mp->m_log->l_work, 1348 msecs_to_jiffies(xfs_syncd_centisecs * 10)); 1349 } 1350 1351 /* 1352 * Every sync period we need to unpin all items in the AIL and push them to 1353 * disk. If there is nothing dirty, then we might need to cover the log to 1354 * indicate that the filesystem is idle. 1355 */ 1356 static void 1357 xfs_log_worker( 1358 struct work_struct *work) 1359 { 1360 struct xlog *log = container_of(to_delayed_work(work), 1361 struct xlog, l_work); 1362 struct xfs_mount *mp = log->l_mp; 1363 1364 /* dgc: errors ignored - not fatal and nowhere to report them */ 1365 if (xfs_fs_writable(mp, SB_FREEZE_WRITE) && xfs_log_need_covered(mp)) { 1366 /* 1367 * Dump a transaction into the log that contains no real change. 1368 * This is needed to stamp the current tail LSN into the log 1369 * during the covering operation. 1370 * 1371 * We cannot use an inode here for this - that will push dirty 1372 * state back up into the VFS and then periodic inode flushing 1373 * will prevent log covering from making progress. Hence we 1374 * synchronously log the superblock instead to ensure the 1375 * superblock is immediately unpinned and can be written back. 1376 */ 1377 xfs_sync_sb(mp, true); 1378 } else 1379 xfs_log_force(mp, 0); 1380 1381 /* start pushing all the metadata that is currently dirty */ 1382 xfs_ail_push_all(mp->m_ail); 1383 1384 /* queue us up again */ 1385 xfs_log_work_queue(mp); 1386 } 1387 1388 /* 1389 * This routine initializes some of the log structure for a given mount point. 1390 * Its primary purpose is to fill in enough, so recovery can occur. However, 1391 * some other stuff may be filled in too. 1392 */ 1393 STATIC struct xlog * 1394 xlog_alloc_log( 1395 struct xfs_mount *mp, 1396 struct xfs_buftarg *log_target, 1397 xfs_daddr_t blk_offset, 1398 int num_bblks) 1399 { 1400 struct xlog *log; 1401 xlog_rec_header_t *head; 1402 xlog_in_core_t **iclogp; 1403 xlog_in_core_t *iclog, *prev_iclog=NULL; 1404 int i; 1405 int error = -ENOMEM; 1406 uint log2_size = 0; 1407 1408 log = kmem_zalloc(sizeof(struct xlog), KM_MAYFAIL); 1409 if (!log) { 1410 xfs_warn(mp, "Log allocation failed: No memory!"); 1411 goto out; 1412 } 1413 1414 log->l_mp = mp; 1415 log->l_targ = log_target; 1416 log->l_logsize = BBTOB(num_bblks); 1417 log->l_logBBstart = blk_offset; 1418 log->l_logBBsize = num_bblks; 1419 log->l_covered_state = XLOG_STATE_COVER_IDLE; 1420 log->l_flags |= XLOG_ACTIVE_RECOVERY; 1421 INIT_DELAYED_WORK(&log->l_work, xfs_log_worker); 1422 1423 log->l_prev_block = -1; 1424 /* log->l_tail_lsn = 0x100000000LL; cycle = 1; current block = 0 */ 1425 xlog_assign_atomic_lsn(&log->l_tail_lsn, 1, 0); 1426 xlog_assign_atomic_lsn(&log->l_last_sync_lsn, 1, 0); 1427 log->l_curr_cycle = 1; /* 0 is bad since this is initial value */ 1428 1429 if (xfs_sb_version_haslogv2(&mp->m_sb) && mp->m_sb.sb_logsunit > 1) 1430 log->l_iclog_roundoff = mp->m_sb.sb_logsunit; 1431 else 1432 log->l_iclog_roundoff = BBSIZE; 1433 1434 xlog_grant_head_init(&log->l_reserve_head); 1435 xlog_grant_head_init(&log->l_write_head); 1436 1437 error = -EFSCORRUPTED; 1438 if (xfs_sb_version_hassector(&mp->m_sb)) { 1439 log2_size = mp->m_sb.sb_logsectlog; 1440 if (log2_size < BBSHIFT) { 1441 xfs_warn(mp, "Log sector size too small (0x%x < 0x%x)", 1442 log2_size, BBSHIFT); 1443 goto out_free_log; 1444 } 1445 1446 log2_size -= BBSHIFT; 1447 if (log2_size > mp->m_sectbb_log) { 1448 xfs_warn(mp, "Log sector size too large (0x%x > 0x%x)", 1449 log2_size, mp->m_sectbb_log); 1450 goto out_free_log; 1451 } 1452 1453 /* for larger sector sizes, must have v2 or external log */ 1454 if (log2_size && log->l_logBBstart > 0 && 1455 !xfs_sb_version_haslogv2(&mp->m_sb)) { 1456 xfs_warn(mp, 1457 "log sector size (0x%x) invalid for configuration.", 1458 log2_size); 1459 goto out_free_log; 1460 } 1461 } 1462 log->l_sectBBsize = 1 << log2_size; 1463 1464 xlog_get_iclog_buffer_size(mp, log); 1465 1466 spin_lock_init(&log->l_icloglock); 1467 init_waitqueue_head(&log->l_flush_wait); 1468 1469 iclogp = &log->l_iclog; 1470 /* 1471 * The amount of memory to allocate for the iclog structure is 1472 * rather funky due to the way the structure is defined. It is 1473 * done this way so that we can use different sizes for machines 1474 * with different amounts of memory. See the definition of 1475 * xlog_in_core_t in xfs_log_priv.h for details. 1476 */ 1477 ASSERT(log->l_iclog_size >= 4096); 1478 for (i = 0; i < log->l_iclog_bufs; i++) { 1479 int align_mask = xfs_buftarg_dma_alignment(mp->m_logdev_targp); 1480 size_t bvec_size = howmany(log->l_iclog_size, PAGE_SIZE) * 1481 sizeof(struct bio_vec); 1482 1483 iclog = kmem_zalloc(sizeof(*iclog) + bvec_size, KM_MAYFAIL); 1484 if (!iclog) 1485 goto out_free_iclog; 1486 1487 *iclogp = iclog; 1488 iclog->ic_prev = prev_iclog; 1489 prev_iclog = iclog; 1490 1491 iclog->ic_data = kmem_alloc_io(log->l_iclog_size, align_mask, 1492 KM_MAYFAIL | KM_ZERO); 1493 if (!iclog->ic_data) 1494 goto out_free_iclog; 1495 #ifdef DEBUG 1496 log->l_iclog_bak[i] = &iclog->ic_header; 1497 #endif 1498 head = &iclog->ic_header; 1499 memset(head, 0, sizeof(xlog_rec_header_t)); 1500 head->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM); 1501 head->h_version = cpu_to_be32( 1502 xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1); 1503 head->h_size = cpu_to_be32(log->l_iclog_size); 1504 /* new fields */ 1505 head->h_fmt = cpu_to_be32(XLOG_FMT); 1506 memcpy(&head->h_fs_uuid, &mp->m_sb.sb_uuid, sizeof(uuid_t)); 1507 1508 iclog->ic_size = log->l_iclog_size - log->l_iclog_hsize; 1509 iclog->ic_state = XLOG_STATE_ACTIVE; 1510 iclog->ic_log = log; 1511 atomic_set(&iclog->ic_refcnt, 0); 1512 INIT_LIST_HEAD(&iclog->ic_callbacks); 1513 iclog->ic_datap = (char *)iclog->ic_data + log->l_iclog_hsize; 1514 1515 init_waitqueue_head(&iclog->ic_force_wait); 1516 init_waitqueue_head(&iclog->ic_write_wait); 1517 INIT_WORK(&iclog->ic_end_io_work, xlog_ioend_work); 1518 sema_init(&iclog->ic_sema, 1); 1519 1520 iclogp = &iclog->ic_next; 1521 } 1522 *iclogp = log->l_iclog; /* complete ring */ 1523 log->l_iclog->ic_prev = prev_iclog; /* re-write 1st prev ptr */ 1524 1525 log->l_ioend_workqueue = alloc_workqueue("xfs-log/%s", 1526 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM | 1527 WQ_HIGHPRI), 1528 0, mp->m_super->s_id); 1529 if (!log->l_ioend_workqueue) 1530 goto out_free_iclog; 1531 1532 error = xlog_cil_init(log); 1533 if (error) 1534 goto out_destroy_workqueue; 1535 return log; 1536 1537 out_destroy_workqueue: 1538 destroy_workqueue(log->l_ioend_workqueue); 1539 out_free_iclog: 1540 for (iclog = log->l_iclog; iclog; iclog = prev_iclog) { 1541 prev_iclog = iclog->ic_next; 1542 kmem_free(iclog->ic_data); 1543 kmem_free(iclog); 1544 if (prev_iclog == log->l_iclog) 1545 break; 1546 } 1547 out_free_log: 1548 kmem_free(log); 1549 out: 1550 return ERR_PTR(error); 1551 } /* xlog_alloc_log */ 1552 1553 /* 1554 * Write out the commit record of a transaction associated with the given 1555 * ticket to close off a running log write. Return the lsn of the commit record. 1556 */ 1557 int 1558 xlog_commit_record( 1559 struct xlog *log, 1560 struct xlog_ticket *ticket, 1561 struct xlog_in_core **iclog, 1562 xfs_lsn_t *lsn) 1563 { 1564 struct xfs_log_iovec reg = { 1565 .i_addr = NULL, 1566 .i_len = 0, 1567 .i_type = XLOG_REG_TYPE_COMMIT, 1568 }; 1569 struct xfs_log_vec vec = { 1570 .lv_niovecs = 1, 1571 .lv_iovecp = ®, 1572 }; 1573 int error; 1574 1575 if (XLOG_FORCED_SHUTDOWN(log)) 1576 return -EIO; 1577 1578 error = xlog_write(log, &vec, ticket, lsn, iclog, XLOG_COMMIT_TRANS); 1579 if (error) 1580 xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR); 1581 return error; 1582 } 1583 1584 /* 1585 * Compute the LSN that we'd need to push the log tail towards in order to have 1586 * (a) enough on-disk log space to log the number of bytes specified, (b) at 1587 * least 25% of the log space free, and (c) at least 256 blocks free. If the 1588 * log free space already meets all three thresholds, this function returns 1589 * NULLCOMMITLSN. 1590 */ 1591 xfs_lsn_t 1592 xlog_grant_push_threshold( 1593 struct xlog *log, 1594 int need_bytes) 1595 { 1596 xfs_lsn_t threshold_lsn = 0; 1597 xfs_lsn_t last_sync_lsn; 1598 int free_blocks; 1599 int free_bytes; 1600 int threshold_block; 1601 int threshold_cycle; 1602 int free_threshold; 1603 1604 ASSERT(BTOBB(need_bytes) < log->l_logBBsize); 1605 1606 free_bytes = xlog_space_left(log, &log->l_reserve_head.grant); 1607 free_blocks = BTOBBT(free_bytes); 1608 1609 /* 1610 * Set the threshold for the minimum number of free blocks in the 1611 * log to the maximum of what the caller needs, one quarter of the 1612 * log, and 256 blocks. 1613 */ 1614 free_threshold = BTOBB(need_bytes); 1615 free_threshold = max(free_threshold, (log->l_logBBsize >> 2)); 1616 free_threshold = max(free_threshold, 256); 1617 if (free_blocks >= free_threshold) 1618 return NULLCOMMITLSN; 1619 1620 xlog_crack_atomic_lsn(&log->l_tail_lsn, &threshold_cycle, 1621 &threshold_block); 1622 threshold_block += free_threshold; 1623 if (threshold_block >= log->l_logBBsize) { 1624 threshold_block -= log->l_logBBsize; 1625 threshold_cycle += 1; 1626 } 1627 threshold_lsn = xlog_assign_lsn(threshold_cycle, 1628 threshold_block); 1629 /* 1630 * Don't pass in an lsn greater than the lsn of the last 1631 * log record known to be on disk. Use a snapshot of the last sync lsn 1632 * so that it doesn't change between the compare and the set. 1633 */ 1634 last_sync_lsn = atomic64_read(&log->l_last_sync_lsn); 1635 if (XFS_LSN_CMP(threshold_lsn, last_sync_lsn) > 0) 1636 threshold_lsn = last_sync_lsn; 1637 1638 return threshold_lsn; 1639 } 1640 1641 /* 1642 * Push the tail of the log if we need to do so to maintain the free log space 1643 * thresholds set out by xlog_grant_push_threshold. We may need to adopt a 1644 * policy which pushes on an lsn which is further along in the log once we 1645 * reach the high water mark. In this manner, we would be creating a low water 1646 * mark. 1647 */ 1648 STATIC void 1649 xlog_grant_push_ail( 1650 struct xlog *log, 1651 int need_bytes) 1652 { 1653 xfs_lsn_t threshold_lsn; 1654 1655 threshold_lsn = xlog_grant_push_threshold(log, need_bytes); 1656 if (threshold_lsn == NULLCOMMITLSN || XLOG_FORCED_SHUTDOWN(log)) 1657 return; 1658 1659 /* 1660 * Get the transaction layer to kick the dirty buffers out to 1661 * disk asynchronously. No point in trying to do this if 1662 * the filesystem is shutting down. 1663 */ 1664 xfs_ail_push(log->l_ailp, threshold_lsn); 1665 } 1666 1667 /* 1668 * Stamp cycle number in every block 1669 */ 1670 STATIC void 1671 xlog_pack_data( 1672 struct xlog *log, 1673 struct xlog_in_core *iclog, 1674 int roundoff) 1675 { 1676 int i, j, k; 1677 int size = iclog->ic_offset + roundoff; 1678 __be32 cycle_lsn; 1679 char *dp; 1680 1681 cycle_lsn = CYCLE_LSN_DISK(iclog->ic_header.h_lsn); 1682 1683 dp = iclog->ic_datap; 1684 for (i = 0; i < BTOBB(size); i++) { 1685 if (i >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) 1686 break; 1687 iclog->ic_header.h_cycle_data[i] = *(__be32 *)dp; 1688 *(__be32 *)dp = cycle_lsn; 1689 dp += BBSIZE; 1690 } 1691 1692 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) { 1693 xlog_in_core_2_t *xhdr = iclog->ic_data; 1694 1695 for ( ; i < BTOBB(size); i++) { 1696 j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE); 1697 k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE); 1698 xhdr[j].hic_xheader.xh_cycle_data[k] = *(__be32 *)dp; 1699 *(__be32 *)dp = cycle_lsn; 1700 dp += BBSIZE; 1701 } 1702 1703 for (i = 1; i < log->l_iclog_heads; i++) 1704 xhdr[i].hic_xheader.xh_cycle = cycle_lsn; 1705 } 1706 } 1707 1708 /* 1709 * Calculate the checksum for a log buffer. 1710 * 1711 * This is a little more complicated than it should be because the various 1712 * headers and the actual data are non-contiguous. 1713 */ 1714 __le32 1715 xlog_cksum( 1716 struct xlog *log, 1717 struct xlog_rec_header *rhead, 1718 char *dp, 1719 int size) 1720 { 1721 uint32_t crc; 1722 1723 /* first generate the crc for the record header ... */ 1724 crc = xfs_start_cksum_update((char *)rhead, 1725 sizeof(struct xlog_rec_header), 1726 offsetof(struct xlog_rec_header, h_crc)); 1727 1728 /* ... then for additional cycle data for v2 logs ... */ 1729 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) { 1730 union xlog_in_core2 *xhdr = (union xlog_in_core2 *)rhead; 1731 int i; 1732 int xheads; 1733 1734 xheads = DIV_ROUND_UP(size, XLOG_HEADER_CYCLE_SIZE); 1735 1736 for (i = 1; i < xheads; i++) { 1737 crc = crc32c(crc, &xhdr[i].hic_xheader, 1738 sizeof(struct xlog_rec_ext_header)); 1739 } 1740 } 1741 1742 /* ... and finally for the payload */ 1743 crc = crc32c(crc, dp, size); 1744 1745 return xfs_end_cksum(crc); 1746 } 1747 1748 static void 1749 xlog_bio_end_io( 1750 struct bio *bio) 1751 { 1752 struct xlog_in_core *iclog = bio->bi_private; 1753 1754 queue_work(iclog->ic_log->l_ioend_workqueue, 1755 &iclog->ic_end_io_work); 1756 } 1757 1758 static int 1759 xlog_map_iclog_data( 1760 struct bio *bio, 1761 void *data, 1762 size_t count) 1763 { 1764 do { 1765 struct page *page = kmem_to_page(data); 1766 unsigned int off = offset_in_page(data); 1767 size_t len = min_t(size_t, count, PAGE_SIZE - off); 1768 1769 if (bio_add_page(bio, page, len, off) != len) 1770 return -EIO; 1771 1772 data += len; 1773 count -= len; 1774 } while (count); 1775 1776 return 0; 1777 } 1778 1779 STATIC void 1780 xlog_write_iclog( 1781 struct xlog *log, 1782 struct xlog_in_core *iclog, 1783 uint64_t bno, 1784 unsigned int count) 1785 { 1786 ASSERT(bno < log->l_logBBsize); 1787 trace_xlog_iclog_write(iclog, _RET_IP_); 1788 1789 /* 1790 * We lock the iclogbufs here so that we can serialise against I/O 1791 * completion during unmount. We might be processing a shutdown 1792 * triggered during unmount, and that can occur asynchronously to the 1793 * unmount thread, and hence we need to ensure that completes before 1794 * tearing down the iclogbufs. Hence we need to hold the buffer lock 1795 * across the log IO to archieve that. 1796 */ 1797 down(&iclog->ic_sema); 1798 if (unlikely(iclog->ic_state == XLOG_STATE_IOERROR)) { 1799 /* 1800 * It would seem logical to return EIO here, but we rely on 1801 * the log state machine to propagate I/O errors instead of 1802 * doing it here. We kick of the state machine and unlock 1803 * the buffer manually, the code needs to be kept in sync 1804 * with the I/O completion path. 1805 */ 1806 xlog_state_done_syncing(iclog); 1807 up(&iclog->ic_sema); 1808 return; 1809 } 1810 1811 bio_init(&iclog->ic_bio, iclog->ic_bvec, howmany(count, PAGE_SIZE)); 1812 bio_set_dev(&iclog->ic_bio, log->l_targ->bt_bdev); 1813 iclog->ic_bio.bi_iter.bi_sector = log->l_logBBstart + bno; 1814 iclog->ic_bio.bi_end_io = xlog_bio_end_io; 1815 iclog->ic_bio.bi_private = iclog; 1816 1817 /* 1818 * We use REQ_SYNC | REQ_IDLE here to tell the block layer the are more 1819 * IOs coming immediately after this one. This prevents the block layer 1820 * writeback throttle from throttling log writes behind background 1821 * metadata writeback and causing priority inversions. 1822 */ 1823 iclog->ic_bio.bi_opf = REQ_OP_WRITE | REQ_META | REQ_SYNC | REQ_IDLE; 1824 if (iclog->ic_flags & XLOG_ICL_NEED_FLUSH) { 1825 iclog->ic_bio.bi_opf |= REQ_PREFLUSH; 1826 /* 1827 * For external log devices, we also need to flush the data 1828 * device cache first to ensure all metadata writeback covered 1829 * by the LSN in this iclog is on stable storage. This is slow, 1830 * but it *must* complete before we issue the external log IO. 1831 */ 1832 if (log->l_targ != log->l_mp->m_ddev_targp) 1833 blkdev_issue_flush(log->l_mp->m_ddev_targp->bt_bdev); 1834 } 1835 if (iclog->ic_flags & XLOG_ICL_NEED_FUA) 1836 iclog->ic_bio.bi_opf |= REQ_FUA; 1837 1838 iclog->ic_flags &= ~(XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA); 1839 1840 if (xlog_map_iclog_data(&iclog->ic_bio, iclog->ic_data, count)) { 1841 xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR); 1842 return; 1843 } 1844 if (is_vmalloc_addr(iclog->ic_data)) 1845 flush_kernel_vmap_range(iclog->ic_data, count); 1846 1847 /* 1848 * If this log buffer would straddle the end of the log we will have 1849 * to split it up into two bios, so that we can continue at the start. 1850 */ 1851 if (bno + BTOBB(count) > log->l_logBBsize) { 1852 struct bio *split; 1853 1854 split = bio_split(&iclog->ic_bio, log->l_logBBsize - bno, 1855 GFP_NOIO, &fs_bio_set); 1856 bio_chain(split, &iclog->ic_bio); 1857 submit_bio(split); 1858 1859 /* restart at logical offset zero for the remainder */ 1860 iclog->ic_bio.bi_iter.bi_sector = log->l_logBBstart; 1861 } 1862 1863 submit_bio(&iclog->ic_bio); 1864 } 1865 1866 /* 1867 * We need to bump cycle number for the part of the iclog that is 1868 * written to the start of the log. Watch out for the header magic 1869 * number case, though. 1870 */ 1871 static void 1872 xlog_split_iclog( 1873 struct xlog *log, 1874 void *data, 1875 uint64_t bno, 1876 unsigned int count) 1877 { 1878 unsigned int split_offset = BBTOB(log->l_logBBsize - bno); 1879 unsigned int i; 1880 1881 for (i = split_offset; i < count; i += BBSIZE) { 1882 uint32_t cycle = get_unaligned_be32(data + i); 1883 1884 if (++cycle == XLOG_HEADER_MAGIC_NUM) 1885 cycle++; 1886 put_unaligned_be32(cycle, data + i); 1887 } 1888 } 1889 1890 static int 1891 xlog_calc_iclog_size( 1892 struct xlog *log, 1893 struct xlog_in_core *iclog, 1894 uint32_t *roundoff) 1895 { 1896 uint32_t count_init, count; 1897 1898 /* Add for LR header */ 1899 count_init = log->l_iclog_hsize + iclog->ic_offset; 1900 count = roundup(count_init, log->l_iclog_roundoff); 1901 1902 *roundoff = count - count_init; 1903 1904 ASSERT(count >= count_init); 1905 ASSERT(*roundoff < log->l_iclog_roundoff); 1906 return count; 1907 } 1908 1909 /* 1910 * Flush out the in-core log (iclog) to the on-disk log in an asynchronous 1911 * fashion. Previously, we should have moved the current iclog 1912 * ptr in the log to point to the next available iclog. This allows further 1913 * write to continue while this code syncs out an iclog ready to go. 1914 * Before an in-core log can be written out, the data section must be scanned 1915 * to save away the 1st word of each BBSIZE block into the header. We replace 1916 * it with the current cycle count. Each BBSIZE block is tagged with the 1917 * cycle count because there in an implicit assumption that drives will 1918 * guarantee that entire 512 byte blocks get written at once. In other words, 1919 * we can't have part of a 512 byte block written and part not written. By 1920 * tagging each block, we will know which blocks are valid when recovering 1921 * after an unclean shutdown. 1922 * 1923 * This routine is single threaded on the iclog. No other thread can be in 1924 * this routine with the same iclog. Changing contents of iclog can there- 1925 * fore be done without grabbing the state machine lock. Updating the global 1926 * log will require grabbing the lock though. 1927 * 1928 * The entire log manager uses a logical block numbering scheme. Only 1929 * xlog_write_iclog knows about the fact that the log may not start with 1930 * block zero on a given device. 1931 */ 1932 STATIC void 1933 xlog_sync( 1934 struct xlog *log, 1935 struct xlog_in_core *iclog) 1936 { 1937 unsigned int count; /* byte count of bwrite */ 1938 unsigned int roundoff; /* roundoff to BB or stripe */ 1939 uint64_t bno; 1940 unsigned int size; 1941 1942 ASSERT(atomic_read(&iclog->ic_refcnt) == 0); 1943 trace_xlog_iclog_sync(iclog, _RET_IP_); 1944 1945 count = xlog_calc_iclog_size(log, iclog, &roundoff); 1946 1947 /* move grant heads by roundoff in sync */ 1948 xlog_grant_add_space(log, &log->l_reserve_head.grant, roundoff); 1949 xlog_grant_add_space(log, &log->l_write_head.grant, roundoff); 1950 1951 /* put cycle number in every block */ 1952 xlog_pack_data(log, iclog, roundoff); 1953 1954 /* real byte length */ 1955 size = iclog->ic_offset; 1956 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) 1957 size += roundoff; 1958 iclog->ic_header.h_len = cpu_to_be32(size); 1959 1960 XFS_STATS_INC(log->l_mp, xs_log_writes); 1961 XFS_STATS_ADD(log->l_mp, xs_log_blocks, BTOBB(count)); 1962 1963 bno = BLOCK_LSN(be64_to_cpu(iclog->ic_header.h_lsn)); 1964 1965 /* Do we need to split this write into 2 parts? */ 1966 if (bno + BTOBB(count) > log->l_logBBsize) 1967 xlog_split_iclog(log, &iclog->ic_header, bno, count); 1968 1969 /* calculcate the checksum */ 1970 iclog->ic_header.h_crc = xlog_cksum(log, &iclog->ic_header, 1971 iclog->ic_datap, size); 1972 /* 1973 * Intentionally corrupt the log record CRC based on the error injection 1974 * frequency, if defined. This facilitates testing log recovery in the 1975 * event of torn writes. Hence, set the IOABORT state to abort the log 1976 * write on I/O completion and shutdown the fs. The subsequent mount 1977 * detects the bad CRC and attempts to recover. 1978 */ 1979 #ifdef DEBUG 1980 if (XFS_TEST_ERROR(false, log->l_mp, XFS_ERRTAG_LOG_BAD_CRC)) { 1981 iclog->ic_header.h_crc &= cpu_to_le32(0xAAAAAAAA); 1982 iclog->ic_fail_crc = true; 1983 xfs_warn(log->l_mp, 1984 "Intentionally corrupted log record at LSN 0x%llx. Shutdown imminent.", 1985 be64_to_cpu(iclog->ic_header.h_lsn)); 1986 } 1987 #endif 1988 xlog_verify_iclog(log, iclog, count); 1989 xlog_write_iclog(log, iclog, bno, count); 1990 } 1991 1992 /* 1993 * Deallocate a log structure 1994 */ 1995 STATIC void 1996 xlog_dealloc_log( 1997 struct xlog *log) 1998 { 1999 xlog_in_core_t *iclog, *next_iclog; 2000 int i; 2001 2002 xlog_cil_destroy(log); 2003 2004 /* 2005 * Cycle all the iclogbuf locks to make sure all log IO completion 2006 * is done before we tear down these buffers. 2007 */ 2008 iclog = log->l_iclog; 2009 for (i = 0; i < log->l_iclog_bufs; i++) { 2010 down(&iclog->ic_sema); 2011 up(&iclog->ic_sema); 2012 iclog = iclog->ic_next; 2013 } 2014 2015 iclog = log->l_iclog; 2016 for (i = 0; i < log->l_iclog_bufs; i++) { 2017 next_iclog = iclog->ic_next; 2018 kmem_free(iclog->ic_data); 2019 kmem_free(iclog); 2020 iclog = next_iclog; 2021 } 2022 2023 log->l_mp->m_log = NULL; 2024 destroy_workqueue(log->l_ioend_workqueue); 2025 kmem_free(log); 2026 } 2027 2028 /* 2029 * Update counters atomically now that memcpy is done. 2030 */ 2031 static inline void 2032 xlog_state_finish_copy( 2033 struct xlog *log, 2034 struct xlog_in_core *iclog, 2035 int record_cnt, 2036 int copy_bytes) 2037 { 2038 lockdep_assert_held(&log->l_icloglock); 2039 2040 be32_add_cpu(&iclog->ic_header.h_num_logops, record_cnt); 2041 iclog->ic_offset += copy_bytes; 2042 } 2043 2044 /* 2045 * print out info relating to regions written which consume 2046 * the reservation 2047 */ 2048 void 2049 xlog_print_tic_res( 2050 struct xfs_mount *mp, 2051 struct xlog_ticket *ticket) 2052 { 2053 uint i; 2054 uint ophdr_spc = ticket->t_res_num_ophdrs * (uint)sizeof(xlog_op_header_t); 2055 2056 /* match with XLOG_REG_TYPE_* in xfs_log.h */ 2057 #define REG_TYPE_STR(type, str) [XLOG_REG_TYPE_##type] = str 2058 static char *res_type_str[] = { 2059 REG_TYPE_STR(BFORMAT, "bformat"), 2060 REG_TYPE_STR(BCHUNK, "bchunk"), 2061 REG_TYPE_STR(EFI_FORMAT, "efi_format"), 2062 REG_TYPE_STR(EFD_FORMAT, "efd_format"), 2063 REG_TYPE_STR(IFORMAT, "iformat"), 2064 REG_TYPE_STR(ICORE, "icore"), 2065 REG_TYPE_STR(IEXT, "iext"), 2066 REG_TYPE_STR(IBROOT, "ibroot"), 2067 REG_TYPE_STR(ILOCAL, "ilocal"), 2068 REG_TYPE_STR(IATTR_EXT, "iattr_ext"), 2069 REG_TYPE_STR(IATTR_BROOT, "iattr_broot"), 2070 REG_TYPE_STR(IATTR_LOCAL, "iattr_local"), 2071 REG_TYPE_STR(QFORMAT, "qformat"), 2072 REG_TYPE_STR(DQUOT, "dquot"), 2073 REG_TYPE_STR(QUOTAOFF, "quotaoff"), 2074 REG_TYPE_STR(LRHEADER, "LR header"), 2075 REG_TYPE_STR(UNMOUNT, "unmount"), 2076 REG_TYPE_STR(COMMIT, "commit"), 2077 REG_TYPE_STR(TRANSHDR, "trans header"), 2078 REG_TYPE_STR(ICREATE, "inode create"), 2079 REG_TYPE_STR(RUI_FORMAT, "rui_format"), 2080 REG_TYPE_STR(RUD_FORMAT, "rud_format"), 2081 REG_TYPE_STR(CUI_FORMAT, "cui_format"), 2082 REG_TYPE_STR(CUD_FORMAT, "cud_format"), 2083 REG_TYPE_STR(BUI_FORMAT, "bui_format"), 2084 REG_TYPE_STR(BUD_FORMAT, "bud_format"), 2085 }; 2086 BUILD_BUG_ON(ARRAY_SIZE(res_type_str) != XLOG_REG_TYPE_MAX + 1); 2087 #undef REG_TYPE_STR 2088 2089 xfs_warn(mp, "ticket reservation summary:"); 2090 xfs_warn(mp, " unit res = %d bytes", 2091 ticket->t_unit_res); 2092 xfs_warn(mp, " current res = %d bytes", 2093 ticket->t_curr_res); 2094 xfs_warn(mp, " total reg = %u bytes (o/flow = %u bytes)", 2095 ticket->t_res_arr_sum, ticket->t_res_o_flow); 2096 xfs_warn(mp, " ophdrs = %u (ophdr space = %u bytes)", 2097 ticket->t_res_num_ophdrs, ophdr_spc); 2098 xfs_warn(mp, " ophdr + reg = %u bytes", 2099 ticket->t_res_arr_sum + ticket->t_res_o_flow + ophdr_spc); 2100 xfs_warn(mp, " num regions = %u", 2101 ticket->t_res_num); 2102 2103 for (i = 0; i < ticket->t_res_num; i++) { 2104 uint r_type = ticket->t_res_arr[i].r_type; 2105 xfs_warn(mp, "region[%u]: %s - %u bytes", i, 2106 ((r_type <= 0 || r_type > XLOG_REG_TYPE_MAX) ? 2107 "bad-rtype" : res_type_str[r_type]), 2108 ticket->t_res_arr[i].r_len); 2109 } 2110 } 2111 2112 /* 2113 * Print a summary of the transaction. 2114 */ 2115 void 2116 xlog_print_trans( 2117 struct xfs_trans *tp) 2118 { 2119 struct xfs_mount *mp = tp->t_mountp; 2120 struct xfs_log_item *lip; 2121 2122 /* dump core transaction and ticket info */ 2123 xfs_warn(mp, "transaction summary:"); 2124 xfs_warn(mp, " log res = %d", tp->t_log_res); 2125 xfs_warn(mp, " log count = %d", tp->t_log_count); 2126 xfs_warn(mp, " flags = 0x%x", tp->t_flags); 2127 2128 xlog_print_tic_res(mp, tp->t_ticket); 2129 2130 /* dump each log item */ 2131 list_for_each_entry(lip, &tp->t_items, li_trans) { 2132 struct xfs_log_vec *lv = lip->li_lv; 2133 struct xfs_log_iovec *vec; 2134 int i; 2135 2136 xfs_warn(mp, "log item: "); 2137 xfs_warn(mp, " type = 0x%x", lip->li_type); 2138 xfs_warn(mp, " flags = 0x%lx", lip->li_flags); 2139 if (!lv) 2140 continue; 2141 xfs_warn(mp, " niovecs = %d", lv->lv_niovecs); 2142 xfs_warn(mp, " size = %d", lv->lv_size); 2143 xfs_warn(mp, " bytes = %d", lv->lv_bytes); 2144 xfs_warn(mp, " buf len = %d", lv->lv_buf_len); 2145 2146 /* dump each iovec for the log item */ 2147 vec = lv->lv_iovecp; 2148 for (i = 0; i < lv->lv_niovecs; i++) { 2149 int dumplen = min(vec->i_len, 32); 2150 2151 xfs_warn(mp, " iovec[%d]", i); 2152 xfs_warn(mp, " type = 0x%x", vec->i_type); 2153 xfs_warn(mp, " len = %d", vec->i_len); 2154 xfs_warn(mp, " first %d bytes of iovec[%d]:", dumplen, i); 2155 xfs_hex_dump(vec->i_addr, dumplen); 2156 2157 vec++; 2158 } 2159 } 2160 } 2161 2162 /* 2163 * Calculate the potential space needed by the log vector. We may need a start 2164 * record, and each region gets its own struct xlog_op_header and may need to be 2165 * double word aligned. 2166 */ 2167 static int 2168 xlog_write_calc_vec_length( 2169 struct xlog_ticket *ticket, 2170 struct xfs_log_vec *log_vector, 2171 uint optype) 2172 { 2173 struct xfs_log_vec *lv; 2174 int headers = 0; 2175 int len = 0; 2176 int i; 2177 2178 if (optype & XLOG_START_TRANS) 2179 headers++; 2180 2181 for (lv = log_vector; lv; lv = lv->lv_next) { 2182 /* we don't write ordered log vectors */ 2183 if (lv->lv_buf_len == XFS_LOG_VEC_ORDERED) 2184 continue; 2185 2186 headers += lv->lv_niovecs; 2187 2188 for (i = 0; i < lv->lv_niovecs; i++) { 2189 struct xfs_log_iovec *vecp = &lv->lv_iovecp[i]; 2190 2191 len += vecp->i_len; 2192 xlog_tic_add_region(ticket, vecp->i_len, vecp->i_type); 2193 } 2194 } 2195 2196 ticket->t_res_num_ophdrs += headers; 2197 len += headers * sizeof(struct xlog_op_header); 2198 2199 return len; 2200 } 2201 2202 static void 2203 xlog_write_start_rec( 2204 struct xlog_op_header *ophdr, 2205 struct xlog_ticket *ticket) 2206 { 2207 ophdr->oh_tid = cpu_to_be32(ticket->t_tid); 2208 ophdr->oh_clientid = ticket->t_clientid; 2209 ophdr->oh_len = 0; 2210 ophdr->oh_flags = XLOG_START_TRANS; 2211 ophdr->oh_res2 = 0; 2212 } 2213 2214 static xlog_op_header_t * 2215 xlog_write_setup_ophdr( 2216 struct xlog *log, 2217 struct xlog_op_header *ophdr, 2218 struct xlog_ticket *ticket, 2219 uint flags) 2220 { 2221 ophdr->oh_tid = cpu_to_be32(ticket->t_tid); 2222 ophdr->oh_clientid = ticket->t_clientid; 2223 ophdr->oh_res2 = 0; 2224 2225 /* are we copying a commit or unmount record? */ 2226 ophdr->oh_flags = flags; 2227 2228 /* 2229 * We've seen logs corrupted with bad transaction client ids. This 2230 * makes sure that XFS doesn't generate them on. Turn this into an EIO 2231 * and shut down the filesystem. 2232 */ 2233 switch (ophdr->oh_clientid) { 2234 case XFS_TRANSACTION: 2235 case XFS_VOLUME: 2236 case XFS_LOG: 2237 break; 2238 default: 2239 xfs_warn(log->l_mp, 2240 "Bad XFS transaction clientid 0x%x in ticket "PTR_FMT, 2241 ophdr->oh_clientid, ticket); 2242 return NULL; 2243 } 2244 2245 return ophdr; 2246 } 2247 2248 /* 2249 * Set up the parameters of the region copy into the log. This has 2250 * to handle region write split across multiple log buffers - this 2251 * state is kept external to this function so that this code can 2252 * be written in an obvious, self documenting manner. 2253 */ 2254 static int 2255 xlog_write_setup_copy( 2256 struct xlog_ticket *ticket, 2257 struct xlog_op_header *ophdr, 2258 int space_available, 2259 int space_required, 2260 int *copy_off, 2261 int *copy_len, 2262 int *last_was_partial_copy, 2263 int *bytes_consumed) 2264 { 2265 int still_to_copy; 2266 2267 still_to_copy = space_required - *bytes_consumed; 2268 *copy_off = *bytes_consumed; 2269 2270 if (still_to_copy <= space_available) { 2271 /* write of region completes here */ 2272 *copy_len = still_to_copy; 2273 ophdr->oh_len = cpu_to_be32(*copy_len); 2274 if (*last_was_partial_copy) 2275 ophdr->oh_flags |= (XLOG_END_TRANS|XLOG_WAS_CONT_TRANS); 2276 *last_was_partial_copy = 0; 2277 *bytes_consumed = 0; 2278 return 0; 2279 } 2280 2281 /* partial write of region, needs extra log op header reservation */ 2282 *copy_len = space_available; 2283 ophdr->oh_len = cpu_to_be32(*copy_len); 2284 ophdr->oh_flags |= XLOG_CONTINUE_TRANS; 2285 if (*last_was_partial_copy) 2286 ophdr->oh_flags |= XLOG_WAS_CONT_TRANS; 2287 *bytes_consumed += *copy_len; 2288 (*last_was_partial_copy)++; 2289 2290 /* account for new log op header */ 2291 ticket->t_curr_res -= sizeof(struct xlog_op_header); 2292 ticket->t_res_num_ophdrs++; 2293 2294 return sizeof(struct xlog_op_header); 2295 } 2296 2297 static int 2298 xlog_write_copy_finish( 2299 struct xlog *log, 2300 struct xlog_in_core *iclog, 2301 uint flags, 2302 int *record_cnt, 2303 int *data_cnt, 2304 int *partial_copy, 2305 int *partial_copy_len, 2306 int log_offset, 2307 struct xlog_in_core **commit_iclog) 2308 { 2309 int error; 2310 2311 if (*partial_copy) { 2312 /* 2313 * This iclog has already been marked WANT_SYNC by 2314 * xlog_state_get_iclog_space. 2315 */ 2316 spin_lock(&log->l_icloglock); 2317 xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt); 2318 *record_cnt = 0; 2319 *data_cnt = 0; 2320 goto release_iclog; 2321 } 2322 2323 *partial_copy = 0; 2324 *partial_copy_len = 0; 2325 2326 if (iclog->ic_size - log_offset <= sizeof(xlog_op_header_t)) { 2327 /* no more space in this iclog - push it. */ 2328 spin_lock(&log->l_icloglock); 2329 xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt); 2330 *record_cnt = 0; 2331 *data_cnt = 0; 2332 2333 if (iclog->ic_state == XLOG_STATE_ACTIVE) 2334 xlog_state_switch_iclogs(log, iclog, 0); 2335 else 2336 ASSERT(iclog->ic_state == XLOG_STATE_WANT_SYNC || 2337 iclog->ic_state == XLOG_STATE_IOERROR); 2338 if (!commit_iclog) 2339 goto release_iclog; 2340 spin_unlock(&log->l_icloglock); 2341 ASSERT(flags & XLOG_COMMIT_TRANS); 2342 *commit_iclog = iclog; 2343 } 2344 2345 return 0; 2346 2347 release_iclog: 2348 error = xlog_state_release_iclog(log, iclog, 0); 2349 spin_unlock(&log->l_icloglock); 2350 return error; 2351 } 2352 2353 /* 2354 * Write some region out to in-core log 2355 * 2356 * This will be called when writing externally provided regions or when 2357 * writing out a commit record for a given transaction. 2358 * 2359 * General algorithm: 2360 * 1. Find total length of this write. This may include adding to the 2361 * lengths passed in. 2362 * 2. Check whether we violate the tickets reservation. 2363 * 3. While writing to this iclog 2364 * A. Reserve as much space in this iclog as can get 2365 * B. If this is first write, save away start lsn 2366 * C. While writing this region: 2367 * 1. If first write of transaction, write start record 2368 * 2. Write log operation header (header per region) 2369 * 3. Find out if we can fit entire region into this iclog 2370 * 4. Potentially, verify destination memcpy ptr 2371 * 5. Memcpy (partial) region 2372 * 6. If partial copy, release iclog; otherwise, continue 2373 * copying more regions into current iclog 2374 * 4. Mark want sync bit (in simulation mode) 2375 * 5. Release iclog for potential flush to on-disk log. 2376 * 2377 * ERRORS: 2378 * 1. Panic if reservation is overrun. This should never happen since 2379 * reservation amounts are generated internal to the filesystem. 2380 * NOTES: 2381 * 1. Tickets are single threaded data structures. 2382 * 2. The XLOG_END_TRANS & XLOG_CONTINUE_TRANS flags are passed down to the 2383 * syncing routine. When a single log_write region needs to span 2384 * multiple in-core logs, the XLOG_CONTINUE_TRANS bit should be set 2385 * on all log operation writes which don't contain the end of the 2386 * region. The XLOG_END_TRANS bit is used for the in-core log 2387 * operation which contains the end of the continued log_write region. 2388 * 3. When xlog_state_get_iclog_space() grabs the rest of the current iclog, 2389 * we don't really know exactly how much space will be used. As a result, 2390 * we don't update ic_offset until the end when we know exactly how many 2391 * bytes have been written out. 2392 */ 2393 int 2394 xlog_write( 2395 struct xlog *log, 2396 struct xfs_log_vec *log_vector, 2397 struct xlog_ticket *ticket, 2398 xfs_lsn_t *start_lsn, 2399 struct xlog_in_core **commit_iclog, 2400 uint optype) 2401 { 2402 struct xlog_in_core *iclog = NULL; 2403 struct xfs_log_vec *lv = log_vector; 2404 struct xfs_log_iovec *vecp = lv->lv_iovecp; 2405 int index = 0; 2406 int len; 2407 int partial_copy = 0; 2408 int partial_copy_len = 0; 2409 int contwr = 0; 2410 int record_cnt = 0; 2411 int data_cnt = 0; 2412 int error = 0; 2413 2414 /* 2415 * If this is a commit or unmount transaction, we don't need a start 2416 * record to be written. We do, however, have to account for the 2417 * commit or unmount header that gets written. Hence we always have 2418 * to account for an extra xlog_op_header here. 2419 */ 2420 ticket->t_curr_res -= sizeof(struct xlog_op_header); 2421 if (ticket->t_curr_res < 0) { 2422 xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES, 2423 "ctx ticket reservation ran out. Need to up reservation"); 2424 xlog_print_tic_res(log->l_mp, ticket); 2425 xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR); 2426 } 2427 2428 len = xlog_write_calc_vec_length(ticket, log_vector, optype); 2429 if (start_lsn) 2430 *start_lsn = 0; 2431 while (lv && (!lv->lv_niovecs || index < lv->lv_niovecs)) { 2432 void *ptr; 2433 int log_offset; 2434 2435 error = xlog_state_get_iclog_space(log, len, &iclog, ticket, 2436 &contwr, &log_offset); 2437 if (error) 2438 return error; 2439 2440 ASSERT(log_offset <= iclog->ic_size - 1); 2441 ptr = iclog->ic_datap + log_offset; 2442 2443 /* Start_lsn is the first lsn written to. */ 2444 if (start_lsn && !*start_lsn) 2445 *start_lsn = be64_to_cpu(iclog->ic_header.h_lsn); 2446 2447 /* 2448 * This loop writes out as many regions as can fit in the amount 2449 * of space which was allocated by xlog_state_get_iclog_space(). 2450 */ 2451 while (lv && (!lv->lv_niovecs || index < lv->lv_niovecs)) { 2452 struct xfs_log_iovec *reg; 2453 struct xlog_op_header *ophdr; 2454 int copy_len; 2455 int copy_off; 2456 bool ordered = false; 2457 bool wrote_start_rec = false; 2458 2459 /* ordered log vectors have no regions to write */ 2460 if (lv->lv_buf_len == XFS_LOG_VEC_ORDERED) { 2461 ASSERT(lv->lv_niovecs == 0); 2462 ordered = true; 2463 goto next_lv; 2464 } 2465 2466 reg = &vecp[index]; 2467 ASSERT(reg->i_len % sizeof(int32_t) == 0); 2468 ASSERT((unsigned long)ptr % sizeof(int32_t) == 0); 2469 2470 /* 2471 * Before we start formatting log vectors, we need to 2472 * write a start record. Only do this for the first 2473 * iclog we write to. 2474 */ 2475 if (optype & XLOG_START_TRANS) { 2476 xlog_write_start_rec(ptr, ticket); 2477 xlog_write_adv_cnt(&ptr, &len, &log_offset, 2478 sizeof(struct xlog_op_header)); 2479 optype &= ~XLOG_START_TRANS; 2480 wrote_start_rec = true; 2481 } 2482 2483 ophdr = xlog_write_setup_ophdr(log, ptr, ticket, optype); 2484 if (!ophdr) 2485 return -EIO; 2486 2487 xlog_write_adv_cnt(&ptr, &len, &log_offset, 2488 sizeof(struct xlog_op_header)); 2489 2490 len += xlog_write_setup_copy(ticket, ophdr, 2491 iclog->ic_size-log_offset, 2492 reg->i_len, 2493 ©_off, ©_len, 2494 &partial_copy, 2495 &partial_copy_len); 2496 xlog_verify_dest_ptr(log, ptr); 2497 2498 /* 2499 * Copy region. 2500 * 2501 * Unmount records just log an opheader, so can have 2502 * empty payloads with no data region to copy. Hence we 2503 * only copy the payload if the vector says it has data 2504 * to copy. 2505 */ 2506 ASSERT(copy_len >= 0); 2507 if (copy_len > 0) { 2508 memcpy(ptr, reg->i_addr + copy_off, copy_len); 2509 xlog_write_adv_cnt(&ptr, &len, &log_offset, 2510 copy_len); 2511 } 2512 copy_len += sizeof(struct xlog_op_header); 2513 record_cnt++; 2514 if (wrote_start_rec) { 2515 copy_len += sizeof(struct xlog_op_header); 2516 record_cnt++; 2517 } 2518 data_cnt += contwr ? copy_len : 0; 2519 2520 error = xlog_write_copy_finish(log, iclog, optype, 2521 &record_cnt, &data_cnt, 2522 &partial_copy, 2523 &partial_copy_len, 2524 log_offset, 2525 commit_iclog); 2526 if (error) 2527 return error; 2528 2529 /* 2530 * if we had a partial copy, we need to get more iclog 2531 * space but we don't want to increment the region 2532 * index because there is still more is this region to 2533 * write. 2534 * 2535 * If we completed writing this region, and we flushed 2536 * the iclog (indicated by resetting of the record 2537 * count), then we also need to get more log space. If 2538 * this was the last record, though, we are done and 2539 * can just return. 2540 */ 2541 if (partial_copy) 2542 break; 2543 2544 if (++index == lv->lv_niovecs) { 2545 next_lv: 2546 lv = lv->lv_next; 2547 index = 0; 2548 if (lv) 2549 vecp = lv->lv_iovecp; 2550 } 2551 if (record_cnt == 0 && !ordered) { 2552 if (!lv) 2553 return 0; 2554 break; 2555 } 2556 } 2557 } 2558 2559 ASSERT(len == 0); 2560 2561 spin_lock(&log->l_icloglock); 2562 xlog_state_finish_copy(log, iclog, record_cnt, data_cnt); 2563 if (commit_iclog) { 2564 ASSERT(optype & XLOG_COMMIT_TRANS); 2565 *commit_iclog = iclog; 2566 } else { 2567 error = xlog_state_release_iclog(log, iclog, 0); 2568 } 2569 spin_unlock(&log->l_icloglock); 2570 2571 return error; 2572 } 2573 2574 static void 2575 xlog_state_activate_iclog( 2576 struct xlog_in_core *iclog, 2577 int *iclogs_changed) 2578 { 2579 ASSERT(list_empty_careful(&iclog->ic_callbacks)); 2580 trace_xlog_iclog_activate(iclog, _RET_IP_); 2581 2582 /* 2583 * If the number of ops in this iclog indicate it just contains the 2584 * dummy transaction, we can change state into IDLE (the second time 2585 * around). Otherwise we should change the state into NEED a dummy. 2586 * We don't need to cover the dummy. 2587 */ 2588 if (*iclogs_changed == 0 && 2589 iclog->ic_header.h_num_logops == cpu_to_be32(XLOG_COVER_OPS)) { 2590 *iclogs_changed = 1; 2591 } else { 2592 /* 2593 * We have two dirty iclogs so start over. This could also be 2594 * num of ops indicating this is not the dummy going out. 2595 */ 2596 *iclogs_changed = 2; 2597 } 2598 2599 iclog->ic_state = XLOG_STATE_ACTIVE; 2600 iclog->ic_offset = 0; 2601 iclog->ic_header.h_num_logops = 0; 2602 memset(iclog->ic_header.h_cycle_data, 0, 2603 sizeof(iclog->ic_header.h_cycle_data)); 2604 iclog->ic_header.h_lsn = 0; 2605 iclog->ic_header.h_tail_lsn = 0; 2606 } 2607 2608 /* 2609 * Loop through all iclogs and mark all iclogs currently marked DIRTY as 2610 * ACTIVE after iclog I/O has completed. 2611 */ 2612 static void 2613 xlog_state_activate_iclogs( 2614 struct xlog *log, 2615 int *iclogs_changed) 2616 { 2617 struct xlog_in_core *iclog = log->l_iclog; 2618 2619 do { 2620 if (iclog->ic_state == XLOG_STATE_DIRTY) 2621 xlog_state_activate_iclog(iclog, iclogs_changed); 2622 /* 2623 * The ordering of marking iclogs ACTIVE must be maintained, so 2624 * an iclog doesn't become ACTIVE beyond one that is SYNCING. 2625 */ 2626 else if (iclog->ic_state != XLOG_STATE_ACTIVE) 2627 break; 2628 } while ((iclog = iclog->ic_next) != log->l_iclog); 2629 } 2630 2631 static int 2632 xlog_covered_state( 2633 int prev_state, 2634 int iclogs_changed) 2635 { 2636 /* 2637 * We go to NEED for any non-covering writes. We go to NEED2 if we just 2638 * wrote the first covering record (DONE). We go to IDLE if we just 2639 * wrote the second covering record (DONE2) and remain in IDLE until a 2640 * non-covering write occurs. 2641 */ 2642 switch (prev_state) { 2643 case XLOG_STATE_COVER_IDLE: 2644 if (iclogs_changed == 1) 2645 return XLOG_STATE_COVER_IDLE; 2646 fallthrough; 2647 case XLOG_STATE_COVER_NEED: 2648 case XLOG_STATE_COVER_NEED2: 2649 break; 2650 case XLOG_STATE_COVER_DONE: 2651 if (iclogs_changed == 1) 2652 return XLOG_STATE_COVER_NEED2; 2653 break; 2654 case XLOG_STATE_COVER_DONE2: 2655 if (iclogs_changed == 1) 2656 return XLOG_STATE_COVER_IDLE; 2657 break; 2658 default: 2659 ASSERT(0); 2660 } 2661 2662 return XLOG_STATE_COVER_NEED; 2663 } 2664 2665 STATIC void 2666 xlog_state_clean_iclog( 2667 struct xlog *log, 2668 struct xlog_in_core *dirty_iclog) 2669 { 2670 int iclogs_changed = 0; 2671 2672 trace_xlog_iclog_clean(dirty_iclog, _RET_IP_); 2673 2674 dirty_iclog->ic_state = XLOG_STATE_DIRTY; 2675 2676 xlog_state_activate_iclogs(log, &iclogs_changed); 2677 wake_up_all(&dirty_iclog->ic_force_wait); 2678 2679 if (iclogs_changed) { 2680 log->l_covered_state = xlog_covered_state(log->l_covered_state, 2681 iclogs_changed); 2682 } 2683 } 2684 2685 STATIC xfs_lsn_t 2686 xlog_get_lowest_lsn( 2687 struct xlog *log) 2688 { 2689 struct xlog_in_core *iclog = log->l_iclog; 2690 xfs_lsn_t lowest_lsn = 0, lsn; 2691 2692 do { 2693 if (iclog->ic_state == XLOG_STATE_ACTIVE || 2694 iclog->ic_state == XLOG_STATE_DIRTY) 2695 continue; 2696 2697 lsn = be64_to_cpu(iclog->ic_header.h_lsn); 2698 if ((lsn && !lowest_lsn) || XFS_LSN_CMP(lsn, lowest_lsn) < 0) 2699 lowest_lsn = lsn; 2700 } while ((iclog = iclog->ic_next) != log->l_iclog); 2701 2702 return lowest_lsn; 2703 } 2704 2705 /* 2706 * Completion of a iclog IO does not imply that a transaction has completed, as 2707 * transactions can be large enough to span many iclogs. We cannot change the 2708 * tail of the log half way through a transaction as this may be the only 2709 * transaction in the log and moving the tail to point to the middle of it 2710 * will prevent recovery from finding the start of the transaction. Hence we 2711 * should only update the last_sync_lsn if this iclog contains transaction 2712 * completion callbacks on it. 2713 * 2714 * We have to do this before we drop the icloglock to ensure we are the only one 2715 * that can update it. 2716 * 2717 * If we are moving the last_sync_lsn forwards, we also need to ensure we kick 2718 * the reservation grant head pushing. This is due to the fact that the push 2719 * target is bound by the current last_sync_lsn value. Hence if we have a large 2720 * amount of log space bound up in this committing transaction then the 2721 * last_sync_lsn value may be the limiting factor preventing tail pushing from 2722 * freeing space in the log. Hence once we've updated the last_sync_lsn we 2723 * should push the AIL to ensure the push target (and hence the grant head) is 2724 * no longer bound by the old log head location and can move forwards and make 2725 * progress again. 2726 */ 2727 static void 2728 xlog_state_set_callback( 2729 struct xlog *log, 2730 struct xlog_in_core *iclog, 2731 xfs_lsn_t header_lsn) 2732 { 2733 trace_xlog_iclog_callback(iclog, _RET_IP_); 2734 iclog->ic_state = XLOG_STATE_CALLBACK; 2735 2736 ASSERT(XFS_LSN_CMP(atomic64_read(&log->l_last_sync_lsn), 2737 header_lsn) <= 0); 2738 2739 if (list_empty_careful(&iclog->ic_callbacks)) 2740 return; 2741 2742 atomic64_set(&log->l_last_sync_lsn, header_lsn); 2743 xlog_grant_push_ail(log, 0); 2744 } 2745 2746 /* 2747 * Return true if we need to stop processing, false to continue to the next 2748 * iclog. The caller will need to run callbacks if the iclog is returned in the 2749 * XLOG_STATE_CALLBACK state. 2750 */ 2751 static bool 2752 xlog_state_iodone_process_iclog( 2753 struct xlog *log, 2754 struct xlog_in_core *iclog, 2755 bool *ioerror) 2756 { 2757 xfs_lsn_t lowest_lsn; 2758 xfs_lsn_t header_lsn; 2759 2760 switch (iclog->ic_state) { 2761 case XLOG_STATE_ACTIVE: 2762 case XLOG_STATE_DIRTY: 2763 /* 2764 * Skip all iclogs in the ACTIVE & DIRTY states: 2765 */ 2766 return false; 2767 case XLOG_STATE_IOERROR: 2768 /* 2769 * Between marking a filesystem SHUTDOWN and stopping the log, 2770 * we do flush all iclogs to disk (if there wasn't a log I/O 2771 * error). So, we do want things to go smoothly in case of just 2772 * a SHUTDOWN w/o a LOG_IO_ERROR. 2773 */ 2774 *ioerror = true; 2775 return false; 2776 case XLOG_STATE_DONE_SYNC: 2777 /* 2778 * Now that we have an iclog that is in the DONE_SYNC state, do 2779 * one more check here to see if we have chased our tail around. 2780 * If this is not the lowest lsn iclog, then we will leave it 2781 * for another completion to process. 2782 */ 2783 header_lsn = be64_to_cpu(iclog->ic_header.h_lsn); 2784 lowest_lsn = xlog_get_lowest_lsn(log); 2785 if (lowest_lsn && XFS_LSN_CMP(lowest_lsn, header_lsn) < 0) 2786 return false; 2787 xlog_state_set_callback(log, iclog, header_lsn); 2788 return false; 2789 default: 2790 /* 2791 * Can only perform callbacks in order. Since this iclog is not 2792 * in the DONE_SYNC state, we skip the rest and just try to 2793 * clean up. 2794 */ 2795 return true; 2796 } 2797 } 2798 2799 STATIC void 2800 xlog_state_do_callback( 2801 struct xlog *log) 2802 { 2803 struct xlog_in_core *iclog; 2804 struct xlog_in_core *first_iclog; 2805 bool cycled_icloglock; 2806 bool ioerror; 2807 int flushcnt = 0; 2808 int repeats = 0; 2809 2810 spin_lock(&log->l_icloglock); 2811 do { 2812 /* 2813 * Scan all iclogs starting with the one pointed to by the 2814 * log. Reset this starting point each time the log is 2815 * unlocked (during callbacks). 2816 * 2817 * Keep looping through iclogs until one full pass is made 2818 * without running any callbacks. 2819 */ 2820 first_iclog = log->l_iclog; 2821 iclog = log->l_iclog; 2822 cycled_icloglock = false; 2823 ioerror = false; 2824 repeats++; 2825 2826 do { 2827 LIST_HEAD(cb_list); 2828 2829 if (xlog_state_iodone_process_iclog(log, iclog, 2830 &ioerror)) 2831 break; 2832 2833 if (iclog->ic_state != XLOG_STATE_CALLBACK && 2834 iclog->ic_state != XLOG_STATE_IOERROR) { 2835 iclog = iclog->ic_next; 2836 continue; 2837 } 2838 list_splice_init(&iclog->ic_callbacks, &cb_list); 2839 spin_unlock(&log->l_icloglock); 2840 2841 trace_xlog_iclog_callbacks_start(iclog, _RET_IP_); 2842 xlog_cil_process_committed(&cb_list); 2843 trace_xlog_iclog_callbacks_done(iclog, _RET_IP_); 2844 cycled_icloglock = true; 2845 2846 spin_lock(&log->l_icloglock); 2847 if (XLOG_FORCED_SHUTDOWN(log)) 2848 wake_up_all(&iclog->ic_force_wait); 2849 else 2850 xlog_state_clean_iclog(log, iclog); 2851 iclog = iclog->ic_next; 2852 } while (first_iclog != iclog); 2853 2854 if (repeats > 5000) { 2855 flushcnt += repeats; 2856 repeats = 0; 2857 xfs_warn(log->l_mp, 2858 "%s: possible infinite loop (%d iterations)", 2859 __func__, flushcnt); 2860 } 2861 } while (!ioerror && cycled_icloglock); 2862 2863 if (log->l_iclog->ic_state == XLOG_STATE_ACTIVE || 2864 log->l_iclog->ic_state == XLOG_STATE_IOERROR) 2865 wake_up_all(&log->l_flush_wait); 2866 2867 spin_unlock(&log->l_icloglock); 2868 } 2869 2870 2871 /* 2872 * Finish transitioning this iclog to the dirty state. 2873 * 2874 * Make sure that we completely execute this routine only when this is 2875 * the last call to the iclog. There is a good chance that iclog flushes, 2876 * when we reach the end of the physical log, get turned into 2 separate 2877 * calls to bwrite. Hence, one iclog flush could generate two calls to this 2878 * routine. By using the reference count bwritecnt, we guarantee that only 2879 * the second completion goes through. 2880 * 2881 * Callbacks could take time, so they are done outside the scope of the 2882 * global state machine log lock. 2883 */ 2884 STATIC void 2885 xlog_state_done_syncing( 2886 struct xlog_in_core *iclog) 2887 { 2888 struct xlog *log = iclog->ic_log; 2889 2890 spin_lock(&log->l_icloglock); 2891 ASSERT(atomic_read(&iclog->ic_refcnt) == 0); 2892 trace_xlog_iclog_sync_done(iclog, _RET_IP_); 2893 2894 /* 2895 * If we got an error, either on the first buffer, or in the case of 2896 * split log writes, on the second, we shut down the file system and 2897 * no iclogs should ever be attempted to be written to disk again. 2898 */ 2899 if (!XLOG_FORCED_SHUTDOWN(log)) { 2900 ASSERT(iclog->ic_state == XLOG_STATE_SYNCING); 2901 iclog->ic_state = XLOG_STATE_DONE_SYNC; 2902 } 2903 2904 /* 2905 * Someone could be sleeping prior to writing out the next 2906 * iclog buffer, we wake them all, one will get to do the 2907 * I/O, the others get to wait for the result. 2908 */ 2909 wake_up_all(&iclog->ic_write_wait); 2910 spin_unlock(&log->l_icloglock); 2911 xlog_state_do_callback(log); 2912 } 2913 2914 /* 2915 * If the head of the in-core log ring is not (ACTIVE or DIRTY), then we must 2916 * sleep. We wait on the flush queue on the head iclog as that should be 2917 * the first iclog to complete flushing. Hence if all iclogs are syncing, 2918 * we will wait here and all new writes will sleep until a sync completes. 2919 * 2920 * The in-core logs are used in a circular fashion. They are not used 2921 * out-of-order even when an iclog past the head is free. 2922 * 2923 * return: 2924 * * log_offset where xlog_write() can start writing into the in-core 2925 * log's data space. 2926 * * in-core log pointer to which xlog_write() should write. 2927 * * boolean indicating this is a continued write to an in-core log. 2928 * If this is the last write, then the in-core log's offset field 2929 * needs to be incremented, depending on the amount of data which 2930 * is copied. 2931 */ 2932 STATIC int 2933 xlog_state_get_iclog_space( 2934 struct xlog *log, 2935 int len, 2936 struct xlog_in_core **iclogp, 2937 struct xlog_ticket *ticket, 2938 int *continued_write, 2939 int *logoffsetp) 2940 { 2941 int log_offset; 2942 xlog_rec_header_t *head; 2943 xlog_in_core_t *iclog; 2944 2945 restart: 2946 spin_lock(&log->l_icloglock); 2947 if (XLOG_FORCED_SHUTDOWN(log)) { 2948 spin_unlock(&log->l_icloglock); 2949 return -EIO; 2950 } 2951 2952 iclog = log->l_iclog; 2953 if (iclog->ic_state != XLOG_STATE_ACTIVE) { 2954 XFS_STATS_INC(log->l_mp, xs_log_noiclogs); 2955 2956 /* Wait for log writes to have flushed */ 2957 xlog_wait(&log->l_flush_wait, &log->l_icloglock); 2958 goto restart; 2959 } 2960 2961 head = &iclog->ic_header; 2962 2963 atomic_inc(&iclog->ic_refcnt); /* prevents sync */ 2964 log_offset = iclog->ic_offset; 2965 2966 trace_xlog_iclog_get_space(iclog, _RET_IP_); 2967 2968 /* On the 1st write to an iclog, figure out lsn. This works 2969 * if iclogs marked XLOG_STATE_WANT_SYNC always write out what they are 2970 * committing to. If the offset is set, that's how many blocks 2971 * must be written. 2972 */ 2973 if (log_offset == 0) { 2974 ticket->t_curr_res -= log->l_iclog_hsize; 2975 xlog_tic_add_region(ticket, 2976 log->l_iclog_hsize, 2977 XLOG_REG_TYPE_LRHEADER); 2978 head->h_cycle = cpu_to_be32(log->l_curr_cycle); 2979 head->h_lsn = cpu_to_be64( 2980 xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block)); 2981 ASSERT(log->l_curr_block >= 0); 2982 } 2983 2984 /* If there is enough room to write everything, then do it. Otherwise, 2985 * claim the rest of the region and make sure the XLOG_STATE_WANT_SYNC 2986 * bit is on, so this will get flushed out. Don't update ic_offset 2987 * until you know exactly how many bytes get copied. Therefore, wait 2988 * until later to update ic_offset. 2989 * 2990 * xlog_write() algorithm assumes that at least 2 xlog_op_header_t's 2991 * can fit into remaining data section. 2992 */ 2993 if (iclog->ic_size - iclog->ic_offset < 2*sizeof(xlog_op_header_t)) { 2994 int error = 0; 2995 2996 xlog_state_switch_iclogs(log, iclog, iclog->ic_size); 2997 2998 /* 2999 * If we are the only one writing to this iclog, sync it to 3000 * disk. We need to do an atomic compare and decrement here to 3001 * avoid racing with concurrent atomic_dec_and_lock() calls in 3002 * xlog_state_release_iclog() when there is more than one 3003 * reference to the iclog. 3004 */ 3005 if (!atomic_add_unless(&iclog->ic_refcnt, -1, 1)) 3006 error = xlog_state_release_iclog(log, iclog, 0); 3007 spin_unlock(&log->l_icloglock); 3008 if (error) 3009 return error; 3010 goto restart; 3011 } 3012 3013 /* Do we have enough room to write the full amount in the remainder 3014 * of this iclog? Or must we continue a write on the next iclog and 3015 * mark this iclog as completely taken? In the case where we switch 3016 * iclogs (to mark it taken), this particular iclog will release/sync 3017 * to disk in xlog_write(). 3018 */ 3019 if (len <= iclog->ic_size - iclog->ic_offset) { 3020 *continued_write = 0; 3021 iclog->ic_offset += len; 3022 } else { 3023 *continued_write = 1; 3024 xlog_state_switch_iclogs(log, iclog, iclog->ic_size); 3025 } 3026 *iclogp = iclog; 3027 3028 ASSERT(iclog->ic_offset <= iclog->ic_size); 3029 spin_unlock(&log->l_icloglock); 3030 3031 *logoffsetp = log_offset; 3032 return 0; 3033 } 3034 3035 /* 3036 * The first cnt-1 times a ticket goes through here we don't need to move the 3037 * grant write head because the permanent reservation has reserved cnt times the 3038 * unit amount. Release part of current permanent unit reservation and reset 3039 * current reservation to be one units worth. Also move grant reservation head 3040 * forward. 3041 */ 3042 void 3043 xfs_log_ticket_regrant( 3044 struct xlog *log, 3045 struct xlog_ticket *ticket) 3046 { 3047 trace_xfs_log_ticket_regrant(log, ticket); 3048 3049 if (ticket->t_cnt > 0) 3050 ticket->t_cnt--; 3051 3052 xlog_grant_sub_space(log, &log->l_reserve_head.grant, 3053 ticket->t_curr_res); 3054 xlog_grant_sub_space(log, &log->l_write_head.grant, 3055 ticket->t_curr_res); 3056 ticket->t_curr_res = ticket->t_unit_res; 3057 xlog_tic_reset_res(ticket); 3058 3059 trace_xfs_log_ticket_regrant_sub(log, ticket); 3060 3061 /* just return if we still have some of the pre-reserved space */ 3062 if (!ticket->t_cnt) { 3063 xlog_grant_add_space(log, &log->l_reserve_head.grant, 3064 ticket->t_unit_res); 3065 trace_xfs_log_ticket_regrant_exit(log, ticket); 3066 3067 ticket->t_curr_res = ticket->t_unit_res; 3068 xlog_tic_reset_res(ticket); 3069 } 3070 3071 xfs_log_ticket_put(ticket); 3072 } 3073 3074 /* 3075 * Give back the space left from a reservation. 3076 * 3077 * All the information we need to make a correct determination of space left 3078 * is present. For non-permanent reservations, things are quite easy. The 3079 * count should have been decremented to zero. We only need to deal with the 3080 * space remaining in the current reservation part of the ticket. If the 3081 * ticket contains a permanent reservation, there may be left over space which 3082 * needs to be released. A count of N means that N-1 refills of the current 3083 * reservation can be done before we need to ask for more space. The first 3084 * one goes to fill up the first current reservation. Once we run out of 3085 * space, the count will stay at zero and the only space remaining will be 3086 * in the current reservation field. 3087 */ 3088 void 3089 xfs_log_ticket_ungrant( 3090 struct xlog *log, 3091 struct xlog_ticket *ticket) 3092 { 3093 int bytes; 3094 3095 trace_xfs_log_ticket_ungrant(log, ticket); 3096 3097 if (ticket->t_cnt > 0) 3098 ticket->t_cnt--; 3099 3100 trace_xfs_log_ticket_ungrant_sub(log, ticket); 3101 3102 /* 3103 * If this is a permanent reservation ticket, we may be able to free 3104 * up more space based on the remaining count. 3105 */ 3106 bytes = ticket->t_curr_res; 3107 if (ticket->t_cnt > 0) { 3108 ASSERT(ticket->t_flags & XLOG_TIC_PERM_RESERV); 3109 bytes += ticket->t_unit_res*ticket->t_cnt; 3110 } 3111 3112 xlog_grant_sub_space(log, &log->l_reserve_head.grant, bytes); 3113 xlog_grant_sub_space(log, &log->l_write_head.grant, bytes); 3114 3115 trace_xfs_log_ticket_ungrant_exit(log, ticket); 3116 3117 xfs_log_space_wake(log->l_mp); 3118 xfs_log_ticket_put(ticket); 3119 } 3120 3121 /* 3122 * This routine will mark the current iclog in the ring as WANT_SYNC and move 3123 * the current iclog pointer to the next iclog in the ring. 3124 */ 3125 STATIC void 3126 xlog_state_switch_iclogs( 3127 struct xlog *log, 3128 struct xlog_in_core *iclog, 3129 int eventual_size) 3130 { 3131 ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE); 3132 assert_spin_locked(&log->l_icloglock); 3133 trace_xlog_iclog_switch(iclog, _RET_IP_); 3134 3135 if (!eventual_size) 3136 eventual_size = iclog->ic_offset; 3137 iclog->ic_state = XLOG_STATE_WANT_SYNC; 3138 iclog->ic_header.h_prev_block = cpu_to_be32(log->l_prev_block); 3139 log->l_prev_block = log->l_curr_block; 3140 log->l_prev_cycle = log->l_curr_cycle; 3141 3142 /* roll log?: ic_offset changed later */ 3143 log->l_curr_block += BTOBB(eventual_size)+BTOBB(log->l_iclog_hsize); 3144 3145 /* Round up to next log-sunit */ 3146 if (log->l_iclog_roundoff > BBSIZE) { 3147 uint32_t sunit_bb = BTOBB(log->l_iclog_roundoff); 3148 log->l_curr_block = roundup(log->l_curr_block, sunit_bb); 3149 } 3150 3151 if (log->l_curr_block >= log->l_logBBsize) { 3152 /* 3153 * Rewind the current block before the cycle is bumped to make 3154 * sure that the combined LSN never transiently moves forward 3155 * when the log wraps to the next cycle. This is to support the 3156 * unlocked sample of these fields from xlog_valid_lsn(). Most 3157 * other cases should acquire l_icloglock. 3158 */ 3159 log->l_curr_block -= log->l_logBBsize; 3160 ASSERT(log->l_curr_block >= 0); 3161 smp_wmb(); 3162 log->l_curr_cycle++; 3163 if (log->l_curr_cycle == XLOG_HEADER_MAGIC_NUM) 3164 log->l_curr_cycle++; 3165 } 3166 ASSERT(iclog == log->l_iclog); 3167 log->l_iclog = iclog->ic_next; 3168 } 3169 3170 /* 3171 * Force the iclog to disk and check if the iclog has been completed before 3172 * xlog_force_iclog() returns. This can happen on synchronous (e.g. 3173 * pmem) or fast async storage because we drop the icloglock to issue the IO. 3174 * If completion has already occurred, tell the caller so that it can avoid an 3175 * unnecessary wait on the iclog. 3176 */ 3177 static int 3178 xlog_force_and_check_iclog( 3179 struct xlog_in_core *iclog, 3180 bool *completed) 3181 { 3182 xfs_lsn_t lsn = be64_to_cpu(iclog->ic_header.h_lsn); 3183 int error; 3184 3185 *completed = false; 3186 error = xlog_force_iclog(iclog); 3187 if (error) 3188 return error; 3189 3190 /* 3191 * If the iclog has already been completed and reused the header LSN 3192 * will have been rewritten by completion 3193 */ 3194 if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn) 3195 *completed = true; 3196 return 0; 3197 } 3198 3199 /* 3200 * Write out all data in the in-core log as of this exact moment in time. 3201 * 3202 * Data may be written to the in-core log during this call. However, 3203 * we don't guarantee this data will be written out. A change from past 3204 * implementation means this routine will *not* write out zero length LRs. 3205 * 3206 * Basically, we try and perform an intelligent scan of the in-core logs. 3207 * If we determine there is no flushable data, we just return. There is no 3208 * flushable data if: 3209 * 3210 * 1. the current iclog is active and has no data; the previous iclog 3211 * is in the active or dirty state. 3212 * 2. the current iclog is drity, and the previous iclog is in the 3213 * active or dirty state. 3214 * 3215 * We may sleep if: 3216 * 3217 * 1. the current iclog is not in the active nor dirty state. 3218 * 2. the current iclog dirty, and the previous iclog is not in the 3219 * active nor dirty state. 3220 * 3. the current iclog is active, and there is another thread writing 3221 * to this particular iclog. 3222 * 4. a) the current iclog is active and has no other writers 3223 * b) when we return from flushing out this iclog, it is still 3224 * not in the active nor dirty state. 3225 */ 3226 int 3227 xfs_log_force( 3228 struct xfs_mount *mp, 3229 uint flags) 3230 { 3231 struct xlog *log = mp->m_log; 3232 struct xlog_in_core *iclog; 3233 3234 XFS_STATS_INC(mp, xs_log_force); 3235 trace_xfs_log_force(mp, 0, _RET_IP_); 3236 3237 xlog_cil_force(log); 3238 3239 spin_lock(&log->l_icloglock); 3240 iclog = log->l_iclog; 3241 if (iclog->ic_state == XLOG_STATE_IOERROR) 3242 goto out_error; 3243 3244 trace_xlog_iclog_force(iclog, _RET_IP_); 3245 3246 if (iclog->ic_state == XLOG_STATE_DIRTY || 3247 (iclog->ic_state == XLOG_STATE_ACTIVE && 3248 atomic_read(&iclog->ic_refcnt) == 0 && iclog->ic_offset == 0)) { 3249 /* 3250 * If the head is dirty or (active and empty), then we need to 3251 * look at the previous iclog. 3252 * 3253 * If the previous iclog is active or dirty we are done. There 3254 * is nothing to sync out. Otherwise, we attach ourselves to the 3255 * previous iclog and go to sleep. 3256 */ 3257 iclog = iclog->ic_prev; 3258 } else if (iclog->ic_state == XLOG_STATE_ACTIVE) { 3259 if (atomic_read(&iclog->ic_refcnt) == 0) { 3260 /* We have exclusive access to this iclog. */ 3261 bool completed; 3262 3263 if (xlog_force_and_check_iclog(iclog, &completed)) 3264 goto out_error; 3265 3266 if (completed) 3267 goto out_unlock; 3268 } else { 3269 /* 3270 * Someone else is still writing to this iclog, so we 3271 * need to ensure that when they release the iclog it 3272 * gets synced immediately as we may be waiting on it. 3273 */ 3274 xlog_state_switch_iclogs(log, iclog, 0); 3275 } 3276 } 3277 3278 /* 3279 * The iclog we are about to wait on may contain the checkpoint pushed 3280 * by the above xlog_cil_force() call, but it may not have been pushed 3281 * to disk yet. Like the ACTIVE case above, we need to make sure caches 3282 * are flushed when this iclog is written. 3283 */ 3284 if (iclog->ic_state == XLOG_STATE_WANT_SYNC) 3285 iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA; 3286 3287 if (flags & XFS_LOG_SYNC) 3288 return xlog_wait_on_iclog(iclog); 3289 out_unlock: 3290 spin_unlock(&log->l_icloglock); 3291 return 0; 3292 out_error: 3293 spin_unlock(&log->l_icloglock); 3294 return -EIO; 3295 } 3296 3297 static int 3298 xlog_force_lsn( 3299 struct xlog *log, 3300 xfs_lsn_t lsn, 3301 uint flags, 3302 int *log_flushed, 3303 bool already_slept) 3304 { 3305 struct xlog_in_core *iclog; 3306 bool completed; 3307 3308 spin_lock(&log->l_icloglock); 3309 iclog = log->l_iclog; 3310 if (iclog->ic_state == XLOG_STATE_IOERROR) 3311 goto out_error; 3312 3313 while (be64_to_cpu(iclog->ic_header.h_lsn) != lsn) { 3314 trace_xlog_iclog_force_lsn(iclog, _RET_IP_); 3315 iclog = iclog->ic_next; 3316 if (iclog == log->l_iclog) 3317 goto out_unlock; 3318 } 3319 3320 switch (iclog->ic_state) { 3321 case XLOG_STATE_ACTIVE: 3322 /* 3323 * We sleep here if we haven't already slept (e.g. this is the 3324 * first time we've looked at the correct iclog buf) and the 3325 * buffer before us is going to be sync'ed. The reason for this 3326 * is that if we are doing sync transactions here, by waiting 3327 * for the previous I/O to complete, we can allow a few more 3328 * transactions into this iclog before we close it down. 3329 * 3330 * Otherwise, we mark the buffer WANT_SYNC, and bump up the 3331 * refcnt so we can release the log (which drops the ref count). 3332 * The state switch keeps new transaction commits from using 3333 * this buffer. When the current commits finish writing into 3334 * the buffer, the refcount will drop to zero and the buffer 3335 * will go out then. 3336 */ 3337 if (!already_slept && 3338 (iclog->ic_prev->ic_state == XLOG_STATE_WANT_SYNC || 3339 iclog->ic_prev->ic_state == XLOG_STATE_SYNCING)) { 3340 xlog_wait(&iclog->ic_prev->ic_write_wait, 3341 &log->l_icloglock); 3342 return -EAGAIN; 3343 } 3344 if (xlog_force_and_check_iclog(iclog, &completed)) 3345 goto out_error; 3346 if (log_flushed) 3347 *log_flushed = 1; 3348 if (completed) 3349 goto out_unlock; 3350 break; 3351 case XLOG_STATE_WANT_SYNC: 3352 /* 3353 * This iclog may contain the checkpoint pushed by the 3354 * xlog_cil_force_seq() call, but there are other writers still 3355 * accessing it so it hasn't been pushed to disk yet. Like the 3356 * ACTIVE case above, we need to make sure caches are flushed 3357 * when this iclog is written. 3358 */ 3359 iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA; 3360 break; 3361 default: 3362 /* 3363 * The entire checkpoint was written by the CIL force and is on 3364 * its way to disk already. It will be stable when it 3365 * completes, so we don't need to manipulate caches here at all. 3366 * We just need to wait for completion if necessary. 3367 */ 3368 break; 3369 } 3370 3371 if (flags & XFS_LOG_SYNC) 3372 return xlog_wait_on_iclog(iclog); 3373 out_unlock: 3374 spin_unlock(&log->l_icloglock); 3375 return 0; 3376 out_error: 3377 spin_unlock(&log->l_icloglock); 3378 return -EIO; 3379 } 3380 3381 /* 3382 * Force the in-core log to disk for a specific LSN. 3383 * 3384 * Find in-core log with lsn. 3385 * If it is in the DIRTY state, just return. 3386 * If it is in the ACTIVE state, move the in-core log into the WANT_SYNC 3387 * state and go to sleep or return. 3388 * If it is in any other state, go to sleep or return. 3389 * 3390 * Synchronous forces are implemented with a wait queue. All callers trying 3391 * to force a given lsn to disk must wait on the queue attached to the 3392 * specific in-core log. When given in-core log finally completes its write 3393 * to disk, that thread will wake up all threads waiting on the queue. 3394 */ 3395 int 3396 xfs_log_force_seq( 3397 struct xfs_mount *mp, 3398 xfs_csn_t seq, 3399 uint flags, 3400 int *log_flushed) 3401 { 3402 struct xlog *log = mp->m_log; 3403 xfs_lsn_t lsn; 3404 int ret; 3405 ASSERT(seq != 0); 3406 3407 XFS_STATS_INC(mp, xs_log_force); 3408 trace_xfs_log_force(mp, seq, _RET_IP_); 3409 3410 lsn = xlog_cil_force_seq(log, seq); 3411 if (lsn == NULLCOMMITLSN) 3412 return 0; 3413 3414 ret = xlog_force_lsn(log, lsn, flags, log_flushed, false); 3415 if (ret == -EAGAIN) { 3416 XFS_STATS_INC(mp, xs_log_force_sleep); 3417 ret = xlog_force_lsn(log, lsn, flags, log_flushed, true); 3418 } 3419 return ret; 3420 } 3421 3422 /* 3423 * Free a used ticket when its refcount falls to zero. 3424 */ 3425 void 3426 xfs_log_ticket_put( 3427 xlog_ticket_t *ticket) 3428 { 3429 ASSERT(atomic_read(&ticket->t_ref) > 0); 3430 if (atomic_dec_and_test(&ticket->t_ref)) 3431 kmem_cache_free(xfs_log_ticket_zone, ticket); 3432 } 3433 3434 xlog_ticket_t * 3435 xfs_log_ticket_get( 3436 xlog_ticket_t *ticket) 3437 { 3438 ASSERT(atomic_read(&ticket->t_ref) > 0); 3439 atomic_inc(&ticket->t_ref); 3440 return ticket; 3441 } 3442 3443 /* 3444 * Figure out the total log space unit (in bytes) that would be 3445 * required for a log ticket. 3446 */ 3447 static int 3448 xlog_calc_unit_res( 3449 struct xlog *log, 3450 int unit_bytes) 3451 { 3452 int iclog_space; 3453 uint num_headers; 3454 3455 /* 3456 * Permanent reservations have up to 'cnt'-1 active log operations 3457 * in the log. A unit in this case is the amount of space for one 3458 * of these log operations. Normal reservations have a cnt of 1 3459 * and their unit amount is the total amount of space required. 3460 * 3461 * The following lines of code account for non-transaction data 3462 * which occupy space in the on-disk log. 3463 * 3464 * Normal form of a transaction is: 3465 * <oph><trans-hdr><start-oph><reg1-oph><reg1><reg2-oph>...<commit-oph> 3466 * and then there are LR hdrs, split-recs and roundoff at end of syncs. 3467 * 3468 * We need to account for all the leadup data and trailer data 3469 * around the transaction data. 3470 * And then we need to account for the worst case in terms of using 3471 * more space. 3472 * The worst case will happen if: 3473 * - the placement of the transaction happens to be such that the 3474 * roundoff is at its maximum 3475 * - the transaction data is synced before the commit record is synced 3476 * i.e. <transaction-data><roundoff> | <commit-rec><roundoff> 3477 * Therefore the commit record is in its own Log Record. 3478 * This can happen as the commit record is called with its 3479 * own region to xlog_write(). 3480 * This then means that in the worst case, roundoff can happen for 3481 * the commit-rec as well. 3482 * The commit-rec is smaller than padding in this scenario and so it is 3483 * not added separately. 3484 */ 3485 3486 /* for trans header */ 3487 unit_bytes += sizeof(xlog_op_header_t); 3488 unit_bytes += sizeof(xfs_trans_header_t); 3489 3490 /* for start-rec */ 3491 unit_bytes += sizeof(xlog_op_header_t); 3492 3493 /* 3494 * for LR headers - the space for data in an iclog is the size minus 3495 * the space used for the headers. If we use the iclog size, then we 3496 * undercalculate the number of headers required. 3497 * 3498 * Furthermore - the addition of op headers for split-recs might 3499 * increase the space required enough to require more log and op 3500 * headers, so take that into account too. 3501 * 3502 * IMPORTANT: This reservation makes the assumption that if this 3503 * transaction is the first in an iclog and hence has the LR headers 3504 * accounted to it, then the remaining space in the iclog is 3505 * exclusively for this transaction. i.e. if the transaction is larger 3506 * than the iclog, it will be the only thing in that iclog. 3507 * Fundamentally, this means we must pass the entire log vector to 3508 * xlog_write to guarantee this. 3509 */ 3510 iclog_space = log->l_iclog_size - log->l_iclog_hsize; 3511 num_headers = howmany(unit_bytes, iclog_space); 3512 3513 /* for split-recs - ophdrs added when data split over LRs */ 3514 unit_bytes += sizeof(xlog_op_header_t) * num_headers; 3515 3516 /* add extra header reservations if we overrun */ 3517 while (!num_headers || 3518 howmany(unit_bytes, iclog_space) > num_headers) { 3519 unit_bytes += sizeof(xlog_op_header_t); 3520 num_headers++; 3521 } 3522 unit_bytes += log->l_iclog_hsize * num_headers; 3523 3524 /* for commit-rec LR header - note: padding will subsume the ophdr */ 3525 unit_bytes += log->l_iclog_hsize; 3526 3527 /* roundoff padding for transaction data and one for commit record */ 3528 unit_bytes += 2 * log->l_iclog_roundoff; 3529 3530 return unit_bytes; 3531 } 3532 3533 int 3534 xfs_log_calc_unit_res( 3535 struct xfs_mount *mp, 3536 int unit_bytes) 3537 { 3538 return xlog_calc_unit_res(mp->m_log, unit_bytes); 3539 } 3540 3541 /* 3542 * Allocate and initialise a new log ticket. 3543 */ 3544 struct xlog_ticket * 3545 xlog_ticket_alloc( 3546 struct xlog *log, 3547 int unit_bytes, 3548 int cnt, 3549 char client, 3550 bool permanent) 3551 { 3552 struct xlog_ticket *tic; 3553 int unit_res; 3554 3555 tic = kmem_cache_zalloc(xfs_log_ticket_zone, GFP_NOFS | __GFP_NOFAIL); 3556 3557 unit_res = xlog_calc_unit_res(log, unit_bytes); 3558 3559 atomic_set(&tic->t_ref, 1); 3560 tic->t_task = current; 3561 INIT_LIST_HEAD(&tic->t_queue); 3562 tic->t_unit_res = unit_res; 3563 tic->t_curr_res = unit_res; 3564 tic->t_cnt = cnt; 3565 tic->t_ocnt = cnt; 3566 tic->t_tid = prandom_u32(); 3567 tic->t_clientid = client; 3568 if (permanent) 3569 tic->t_flags |= XLOG_TIC_PERM_RESERV; 3570 3571 xlog_tic_reset_res(tic); 3572 3573 return tic; 3574 } 3575 3576 #if defined(DEBUG) 3577 /* 3578 * Make sure that the destination ptr is within the valid data region of 3579 * one of the iclogs. This uses backup pointers stored in a different 3580 * part of the log in case we trash the log structure. 3581 */ 3582 STATIC void 3583 xlog_verify_dest_ptr( 3584 struct xlog *log, 3585 void *ptr) 3586 { 3587 int i; 3588 int good_ptr = 0; 3589 3590 for (i = 0; i < log->l_iclog_bufs; i++) { 3591 if (ptr >= log->l_iclog_bak[i] && 3592 ptr <= log->l_iclog_bak[i] + log->l_iclog_size) 3593 good_ptr++; 3594 } 3595 3596 if (!good_ptr) 3597 xfs_emerg(log->l_mp, "%s: invalid ptr", __func__); 3598 } 3599 3600 /* 3601 * Check to make sure the grant write head didn't just over lap the tail. If 3602 * the cycles are the same, we can't be overlapping. Otherwise, make sure that 3603 * the cycles differ by exactly one and check the byte count. 3604 * 3605 * This check is run unlocked, so can give false positives. Rather than assert 3606 * on failures, use a warn-once flag and a panic tag to allow the admin to 3607 * determine if they want to panic the machine when such an error occurs. For 3608 * debug kernels this will have the same effect as using an assert but, unlinke 3609 * an assert, it can be turned off at runtime. 3610 */ 3611 STATIC void 3612 xlog_verify_grant_tail( 3613 struct xlog *log) 3614 { 3615 int tail_cycle, tail_blocks; 3616 int cycle, space; 3617 3618 xlog_crack_grant_head(&log->l_write_head.grant, &cycle, &space); 3619 xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_blocks); 3620 if (tail_cycle != cycle) { 3621 if (cycle - 1 != tail_cycle && 3622 !(log->l_flags & XLOG_TAIL_WARN)) { 3623 xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES, 3624 "%s: cycle - 1 != tail_cycle", __func__); 3625 log->l_flags |= XLOG_TAIL_WARN; 3626 } 3627 3628 if (space > BBTOB(tail_blocks) && 3629 !(log->l_flags & XLOG_TAIL_WARN)) { 3630 xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES, 3631 "%s: space > BBTOB(tail_blocks)", __func__); 3632 log->l_flags |= XLOG_TAIL_WARN; 3633 } 3634 } 3635 } 3636 3637 /* check if it will fit */ 3638 STATIC void 3639 xlog_verify_tail_lsn( 3640 struct xlog *log, 3641 struct xlog_in_core *iclog) 3642 { 3643 xfs_lsn_t tail_lsn = be64_to_cpu(iclog->ic_header.h_tail_lsn); 3644 int blocks; 3645 3646 if (CYCLE_LSN(tail_lsn) == log->l_prev_cycle) { 3647 blocks = 3648 log->l_logBBsize - (log->l_prev_block - BLOCK_LSN(tail_lsn)); 3649 if (blocks < BTOBB(iclog->ic_offset)+BTOBB(log->l_iclog_hsize)) 3650 xfs_emerg(log->l_mp, "%s: ran out of log space", __func__); 3651 } else { 3652 ASSERT(CYCLE_LSN(tail_lsn)+1 == log->l_prev_cycle); 3653 3654 if (BLOCK_LSN(tail_lsn) == log->l_prev_block) 3655 xfs_emerg(log->l_mp, "%s: tail wrapped", __func__); 3656 3657 blocks = BLOCK_LSN(tail_lsn) - log->l_prev_block; 3658 if (blocks < BTOBB(iclog->ic_offset) + 1) 3659 xfs_emerg(log->l_mp, "%s: ran out of log space", __func__); 3660 } 3661 } 3662 3663 /* 3664 * Perform a number of checks on the iclog before writing to disk. 3665 * 3666 * 1. Make sure the iclogs are still circular 3667 * 2. Make sure we have a good magic number 3668 * 3. Make sure we don't have magic numbers in the data 3669 * 4. Check fields of each log operation header for: 3670 * A. Valid client identifier 3671 * B. tid ptr value falls in valid ptr space (user space code) 3672 * C. Length in log record header is correct according to the 3673 * individual operation headers within record. 3674 * 5. When a bwrite will occur within 5 blocks of the front of the physical 3675 * log, check the preceding blocks of the physical log to make sure all 3676 * the cycle numbers agree with the current cycle number. 3677 */ 3678 STATIC void 3679 xlog_verify_iclog( 3680 struct xlog *log, 3681 struct xlog_in_core *iclog, 3682 int count) 3683 { 3684 xlog_op_header_t *ophead; 3685 xlog_in_core_t *icptr; 3686 xlog_in_core_2_t *xhdr; 3687 void *base_ptr, *ptr, *p; 3688 ptrdiff_t field_offset; 3689 uint8_t clientid; 3690 int len, i, j, k, op_len; 3691 int idx; 3692 3693 /* check validity of iclog pointers */ 3694 spin_lock(&log->l_icloglock); 3695 icptr = log->l_iclog; 3696 for (i = 0; i < log->l_iclog_bufs; i++, icptr = icptr->ic_next) 3697 ASSERT(icptr); 3698 3699 if (icptr != log->l_iclog) 3700 xfs_emerg(log->l_mp, "%s: corrupt iclog ring", __func__); 3701 spin_unlock(&log->l_icloglock); 3702 3703 /* check log magic numbers */ 3704 if (iclog->ic_header.h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) 3705 xfs_emerg(log->l_mp, "%s: invalid magic num", __func__); 3706 3707 base_ptr = ptr = &iclog->ic_header; 3708 p = &iclog->ic_header; 3709 for (ptr += BBSIZE; ptr < base_ptr + count; ptr += BBSIZE) { 3710 if (*(__be32 *)ptr == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) 3711 xfs_emerg(log->l_mp, "%s: unexpected magic num", 3712 __func__); 3713 } 3714 3715 /* check fields */ 3716 len = be32_to_cpu(iclog->ic_header.h_num_logops); 3717 base_ptr = ptr = iclog->ic_datap; 3718 ophead = ptr; 3719 xhdr = iclog->ic_data; 3720 for (i = 0; i < len; i++) { 3721 ophead = ptr; 3722 3723 /* clientid is only 1 byte */ 3724 p = &ophead->oh_clientid; 3725 field_offset = p - base_ptr; 3726 if (field_offset & 0x1ff) { 3727 clientid = ophead->oh_clientid; 3728 } else { 3729 idx = BTOBBT((char *)&ophead->oh_clientid - iclog->ic_datap); 3730 if (idx >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) { 3731 j = idx / (XLOG_HEADER_CYCLE_SIZE / BBSIZE); 3732 k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE); 3733 clientid = xlog_get_client_id( 3734 xhdr[j].hic_xheader.xh_cycle_data[k]); 3735 } else { 3736 clientid = xlog_get_client_id( 3737 iclog->ic_header.h_cycle_data[idx]); 3738 } 3739 } 3740 if (clientid != XFS_TRANSACTION && clientid != XFS_LOG) 3741 xfs_warn(log->l_mp, 3742 "%s: invalid clientid %d op "PTR_FMT" offset 0x%lx", 3743 __func__, clientid, ophead, 3744 (unsigned long)field_offset); 3745 3746 /* check length */ 3747 p = &ophead->oh_len; 3748 field_offset = p - base_ptr; 3749 if (field_offset & 0x1ff) { 3750 op_len = be32_to_cpu(ophead->oh_len); 3751 } else { 3752 idx = BTOBBT((uintptr_t)&ophead->oh_len - 3753 (uintptr_t)iclog->ic_datap); 3754 if (idx >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) { 3755 j = idx / (XLOG_HEADER_CYCLE_SIZE / BBSIZE); 3756 k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE); 3757 op_len = be32_to_cpu(xhdr[j].hic_xheader.xh_cycle_data[k]); 3758 } else { 3759 op_len = be32_to_cpu(iclog->ic_header.h_cycle_data[idx]); 3760 } 3761 } 3762 ptr += sizeof(xlog_op_header_t) + op_len; 3763 } 3764 } 3765 #endif 3766 3767 /* 3768 * Mark all iclogs IOERROR. l_icloglock is held by the caller. 3769 */ 3770 STATIC int 3771 xlog_state_ioerror( 3772 struct xlog *log) 3773 { 3774 xlog_in_core_t *iclog, *ic; 3775 3776 iclog = log->l_iclog; 3777 if (iclog->ic_state != XLOG_STATE_IOERROR) { 3778 /* 3779 * Mark all the incore logs IOERROR. 3780 * From now on, no log flushes will result. 3781 */ 3782 ic = iclog; 3783 do { 3784 ic->ic_state = XLOG_STATE_IOERROR; 3785 ic = ic->ic_next; 3786 } while (ic != iclog); 3787 return 0; 3788 } 3789 /* 3790 * Return non-zero, if state transition has already happened. 3791 */ 3792 return 1; 3793 } 3794 3795 /* 3796 * This is called from xfs_force_shutdown, when we're forcibly 3797 * shutting down the filesystem, typically because of an IO error. 3798 * Our main objectives here are to make sure that: 3799 * a. if !logerror, flush the logs to disk. Anything modified 3800 * after this is ignored. 3801 * b. the filesystem gets marked 'SHUTDOWN' for all interested 3802 * parties to find out, 'atomically'. 3803 * c. those who're sleeping on log reservations, pinned objects and 3804 * other resources get woken up, and be told the bad news. 3805 * d. nothing new gets queued up after (b) and (c) are done. 3806 * 3807 * Note: for the !logerror case we need to flush the regions held in memory out 3808 * to disk first. This needs to be done before the log is marked as shutdown, 3809 * otherwise the iclog writes will fail. 3810 */ 3811 int 3812 xfs_log_force_umount( 3813 struct xfs_mount *mp, 3814 int logerror) 3815 { 3816 struct xlog *log; 3817 int retval; 3818 3819 log = mp->m_log; 3820 3821 /* 3822 * If this happens during log recovery, don't worry about 3823 * locking; the log isn't open for business yet. 3824 */ 3825 if (!log || 3826 log->l_flags & XLOG_ACTIVE_RECOVERY) { 3827 mp->m_flags |= XFS_MOUNT_FS_SHUTDOWN; 3828 if (mp->m_sb_bp) 3829 mp->m_sb_bp->b_flags |= XBF_DONE; 3830 return 0; 3831 } 3832 3833 /* 3834 * Somebody could've already done the hard work for us. 3835 * No need to get locks for this. 3836 */ 3837 if (logerror && log->l_iclog->ic_state == XLOG_STATE_IOERROR) { 3838 ASSERT(XLOG_FORCED_SHUTDOWN(log)); 3839 return 1; 3840 } 3841 3842 /* 3843 * Flush all the completed transactions to disk before marking the log 3844 * being shut down. We need to do it in this order to ensure that 3845 * completed operations are safely on disk before we shut down, and that 3846 * we don't have to issue any buffer IO after the shutdown flags are set 3847 * to guarantee this. 3848 */ 3849 if (!logerror) 3850 xfs_log_force(mp, XFS_LOG_SYNC); 3851 3852 /* 3853 * mark the filesystem and the as in a shutdown state and wake 3854 * everybody up to tell them the bad news. 3855 */ 3856 spin_lock(&log->l_icloglock); 3857 mp->m_flags |= XFS_MOUNT_FS_SHUTDOWN; 3858 if (mp->m_sb_bp) 3859 mp->m_sb_bp->b_flags |= XBF_DONE; 3860 3861 /* 3862 * Mark the log and the iclogs with IO error flags to prevent any 3863 * further log IO from being issued or completed. 3864 */ 3865 log->l_flags |= XLOG_IO_ERROR; 3866 retval = xlog_state_ioerror(log); 3867 spin_unlock(&log->l_icloglock); 3868 3869 /* 3870 * We don't want anybody waiting for log reservations after this. That 3871 * means we have to wake up everybody queued up on reserveq as well as 3872 * writeq. In addition, we make sure in xlog_{re}grant_log_space that 3873 * we don't enqueue anything once the SHUTDOWN flag is set, and this 3874 * action is protected by the grant locks. 3875 */ 3876 xlog_grant_head_wake_all(&log->l_reserve_head); 3877 xlog_grant_head_wake_all(&log->l_write_head); 3878 3879 /* 3880 * Wake up everybody waiting on xfs_log_force. Wake the CIL push first 3881 * as if the log writes were completed. The abort handling in the log 3882 * item committed callback functions will do this again under lock to 3883 * avoid races. 3884 */ 3885 spin_lock(&log->l_cilp->xc_push_lock); 3886 wake_up_all(&log->l_cilp->xc_commit_wait); 3887 spin_unlock(&log->l_cilp->xc_push_lock); 3888 xlog_state_do_callback(log); 3889 3890 /* return non-zero if log IOERROR transition had already happened */ 3891 return retval; 3892 } 3893 3894 STATIC int 3895 xlog_iclogs_empty( 3896 struct xlog *log) 3897 { 3898 xlog_in_core_t *iclog; 3899 3900 iclog = log->l_iclog; 3901 do { 3902 /* endianness does not matter here, zero is zero in 3903 * any language. 3904 */ 3905 if (iclog->ic_header.h_num_logops) 3906 return 0; 3907 iclog = iclog->ic_next; 3908 } while (iclog != log->l_iclog); 3909 return 1; 3910 } 3911 3912 /* 3913 * Verify that an LSN stamped into a piece of metadata is valid. This is 3914 * intended for use in read verifiers on v5 superblocks. 3915 */ 3916 bool 3917 xfs_log_check_lsn( 3918 struct xfs_mount *mp, 3919 xfs_lsn_t lsn) 3920 { 3921 struct xlog *log = mp->m_log; 3922 bool valid; 3923 3924 /* 3925 * norecovery mode skips mount-time log processing and unconditionally 3926 * resets the in-core LSN. We can't validate in this mode, but 3927 * modifications are not allowed anyways so just return true. 3928 */ 3929 if (mp->m_flags & XFS_MOUNT_NORECOVERY) 3930 return true; 3931 3932 /* 3933 * Some metadata LSNs are initialized to NULL (e.g., the agfl). This is 3934 * handled by recovery and thus safe to ignore here. 3935 */ 3936 if (lsn == NULLCOMMITLSN) 3937 return true; 3938 3939 valid = xlog_valid_lsn(mp->m_log, lsn); 3940 3941 /* warn the user about what's gone wrong before verifier failure */ 3942 if (!valid) { 3943 spin_lock(&log->l_icloglock); 3944 xfs_warn(mp, 3945 "Corruption warning: Metadata has LSN (%d:%d) ahead of current LSN (%d:%d). " 3946 "Please unmount and run xfs_repair (>= v4.3) to resolve.", 3947 CYCLE_LSN(lsn), BLOCK_LSN(lsn), 3948 log->l_curr_cycle, log->l_curr_block); 3949 spin_unlock(&log->l_icloglock); 3950 } 3951 3952 return valid; 3953 } 3954 3955 bool 3956 xfs_log_in_recovery( 3957 struct xfs_mount *mp) 3958 { 3959 struct xlog *log = mp->m_log; 3960 3961 return log->l_flags & XLOG_ACTIVE_RECOVERY; 3962 } 3963