1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_log_format.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_mount.h" 13 #include "xfs_errortag.h" 14 #include "xfs_error.h" 15 #include "xfs_trans.h" 16 #include "xfs_trans_priv.h" 17 #include "xfs_log.h" 18 #include "xfs_log_priv.h" 19 #include "xfs_trace.h" 20 #include "xfs_sysfs.h" 21 #include "xfs_sb.h" 22 #include "xfs_health.h" 23 24 struct kmem_cache *xfs_log_ticket_cache; 25 26 /* Local miscellaneous function prototypes */ 27 STATIC struct xlog * 28 xlog_alloc_log( 29 struct xfs_mount *mp, 30 struct xfs_buftarg *log_target, 31 xfs_daddr_t blk_offset, 32 int num_bblks); 33 STATIC int 34 xlog_space_left( 35 struct xlog *log, 36 atomic64_t *head); 37 STATIC void 38 xlog_dealloc_log( 39 struct xlog *log); 40 41 /* local state machine functions */ 42 STATIC void xlog_state_done_syncing( 43 struct xlog_in_core *iclog); 44 STATIC void xlog_state_do_callback( 45 struct xlog *log); 46 STATIC int 47 xlog_state_get_iclog_space( 48 struct xlog *log, 49 int len, 50 struct xlog_in_core **iclog, 51 struct xlog_ticket *ticket, 52 int *continued_write, 53 int *logoffsetp); 54 STATIC void 55 xlog_grant_push_ail( 56 struct xlog *log, 57 int need_bytes); 58 STATIC void 59 xlog_sync( 60 struct xlog *log, 61 struct xlog_in_core *iclog); 62 #if defined(DEBUG) 63 STATIC void 64 xlog_verify_dest_ptr( 65 struct xlog *log, 66 void *ptr); 67 STATIC void 68 xlog_verify_grant_tail( 69 struct xlog *log); 70 STATIC void 71 xlog_verify_iclog( 72 struct xlog *log, 73 struct xlog_in_core *iclog, 74 int count); 75 STATIC void 76 xlog_verify_tail_lsn( 77 struct xlog *log, 78 struct xlog_in_core *iclog); 79 #else 80 #define xlog_verify_dest_ptr(a,b) 81 #define xlog_verify_grant_tail(a) 82 #define xlog_verify_iclog(a,b,c) 83 #define xlog_verify_tail_lsn(a,b) 84 #endif 85 86 STATIC int 87 xlog_iclogs_empty( 88 struct xlog *log); 89 90 static int 91 xfs_log_cover(struct xfs_mount *); 92 93 static void 94 xlog_grant_sub_space( 95 struct xlog *log, 96 atomic64_t *head, 97 int bytes) 98 { 99 int64_t head_val = atomic64_read(head); 100 int64_t new, old; 101 102 do { 103 int cycle, space; 104 105 xlog_crack_grant_head_val(head_val, &cycle, &space); 106 107 space -= bytes; 108 if (space < 0) { 109 space += log->l_logsize; 110 cycle--; 111 } 112 113 old = head_val; 114 new = xlog_assign_grant_head_val(cycle, space); 115 head_val = atomic64_cmpxchg(head, old, new); 116 } while (head_val != old); 117 } 118 119 static void 120 xlog_grant_add_space( 121 struct xlog *log, 122 atomic64_t *head, 123 int bytes) 124 { 125 int64_t head_val = atomic64_read(head); 126 int64_t new, old; 127 128 do { 129 int tmp; 130 int cycle, space; 131 132 xlog_crack_grant_head_val(head_val, &cycle, &space); 133 134 tmp = log->l_logsize - space; 135 if (tmp > bytes) 136 space += bytes; 137 else { 138 space = bytes - tmp; 139 cycle++; 140 } 141 142 old = head_val; 143 new = xlog_assign_grant_head_val(cycle, space); 144 head_val = atomic64_cmpxchg(head, old, new); 145 } while (head_val != old); 146 } 147 148 STATIC void 149 xlog_grant_head_init( 150 struct xlog_grant_head *head) 151 { 152 xlog_assign_grant_head(&head->grant, 1, 0); 153 INIT_LIST_HEAD(&head->waiters); 154 spin_lock_init(&head->lock); 155 } 156 157 STATIC void 158 xlog_grant_head_wake_all( 159 struct xlog_grant_head *head) 160 { 161 struct xlog_ticket *tic; 162 163 spin_lock(&head->lock); 164 list_for_each_entry(tic, &head->waiters, t_queue) 165 wake_up_process(tic->t_task); 166 spin_unlock(&head->lock); 167 } 168 169 static inline int 170 xlog_ticket_reservation( 171 struct xlog *log, 172 struct xlog_grant_head *head, 173 struct xlog_ticket *tic) 174 { 175 if (head == &log->l_write_head) { 176 ASSERT(tic->t_flags & XLOG_TIC_PERM_RESERV); 177 return tic->t_unit_res; 178 } else { 179 if (tic->t_flags & XLOG_TIC_PERM_RESERV) 180 return tic->t_unit_res * tic->t_cnt; 181 else 182 return tic->t_unit_res; 183 } 184 } 185 186 STATIC bool 187 xlog_grant_head_wake( 188 struct xlog *log, 189 struct xlog_grant_head *head, 190 int *free_bytes) 191 { 192 struct xlog_ticket *tic; 193 int need_bytes; 194 bool woken_task = false; 195 196 list_for_each_entry(tic, &head->waiters, t_queue) { 197 198 /* 199 * There is a chance that the size of the CIL checkpoints in 200 * progress at the last AIL push target calculation resulted in 201 * limiting the target to the log head (l_last_sync_lsn) at the 202 * time. This may not reflect where the log head is now as the 203 * CIL checkpoints may have completed. 204 * 205 * Hence when we are woken here, it may be that the head of the 206 * log that has moved rather than the tail. As the tail didn't 207 * move, there still won't be space available for the 208 * reservation we require. However, if the AIL has already 209 * pushed to the target defined by the old log head location, we 210 * will hang here waiting for something else to update the AIL 211 * push target. 212 * 213 * Therefore, if there isn't space to wake the first waiter on 214 * the grant head, we need to push the AIL again to ensure the 215 * target reflects both the current log tail and log head 216 * position before we wait for the tail to move again. 217 */ 218 219 need_bytes = xlog_ticket_reservation(log, head, tic); 220 if (*free_bytes < need_bytes) { 221 if (!woken_task) 222 xlog_grant_push_ail(log, need_bytes); 223 return false; 224 } 225 226 *free_bytes -= need_bytes; 227 trace_xfs_log_grant_wake_up(log, tic); 228 wake_up_process(tic->t_task); 229 woken_task = true; 230 } 231 232 return true; 233 } 234 235 STATIC int 236 xlog_grant_head_wait( 237 struct xlog *log, 238 struct xlog_grant_head *head, 239 struct xlog_ticket *tic, 240 int need_bytes) __releases(&head->lock) 241 __acquires(&head->lock) 242 { 243 list_add_tail(&tic->t_queue, &head->waiters); 244 245 do { 246 if (xlog_is_shutdown(log)) 247 goto shutdown; 248 xlog_grant_push_ail(log, need_bytes); 249 250 __set_current_state(TASK_UNINTERRUPTIBLE); 251 spin_unlock(&head->lock); 252 253 XFS_STATS_INC(log->l_mp, xs_sleep_logspace); 254 255 trace_xfs_log_grant_sleep(log, tic); 256 schedule(); 257 trace_xfs_log_grant_wake(log, tic); 258 259 spin_lock(&head->lock); 260 if (xlog_is_shutdown(log)) 261 goto shutdown; 262 } while (xlog_space_left(log, &head->grant) < need_bytes); 263 264 list_del_init(&tic->t_queue); 265 return 0; 266 shutdown: 267 list_del_init(&tic->t_queue); 268 return -EIO; 269 } 270 271 /* 272 * Atomically get the log space required for a log ticket. 273 * 274 * Once a ticket gets put onto head->waiters, it will only return after the 275 * needed reservation is satisfied. 276 * 277 * This function is structured so that it has a lock free fast path. This is 278 * necessary because every new transaction reservation will come through this 279 * path. Hence any lock will be globally hot if we take it unconditionally on 280 * every pass. 281 * 282 * As tickets are only ever moved on and off head->waiters under head->lock, we 283 * only need to take that lock if we are going to add the ticket to the queue 284 * and sleep. We can avoid taking the lock if the ticket was never added to 285 * head->waiters because the t_queue list head will be empty and we hold the 286 * only reference to it so it can safely be checked unlocked. 287 */ 288 STATIC int 289 xlog_grant_head_check( 290 struct xlog *log, 291 struct xlog_grant_head *head, 292 struct xlog_ticket *tic, 293 int *need_bytes) 294 { 295 int free_bytes; 296 int error = 0; 297 298 ASSERT(!xlog_in_recovery(log)); 299 300 /* 301 * If there are other waiters on the queue then give them a chance at 302 * logspace before us. Wake up the first waiters, if we do not wake 303 * up all the waiters then go to sleep waiting for more free space, 304 * otherwise try to get some space for this transaction. 305 */ 306 *need_bytes = xlog_ticket_reservation(log, head, tic); 307 free_bytes = xlog_space_left(log, &head->grant); 308 if (!list_empty_careful(&head->waiters)) { 309 spin_lock(&head->lock); 310 if (!xlog_grant_head_wake(log, head, &free_bytes) || 311 free_bytes < *need_bytes) { 312 error = xlog_grant_head_wait(log, head, tic, 313 *need_bytes); 314 } 315 spin_unlock(&head->lock); 316 } else if (free_bytes < *need_bytes) { 317 spin_lock(&head->lock); 318 error = xlog_grant_head_wait(log, head, tic, *need_bytes); 319 spin_unlock(&head->lock); 320 } 321 322 return error; 323 } 324 325 static void 326 xlog_tic_reset_res(xlog_ticket_t *tic) 327 { 328 tic->t_res_num = 0; 329 tic->t_res_arr_sum = 0; 330 tic->t_res_num_ophdrs = 0; 331 } 332 333 static void 334 xlog_tic_add_region(xlog_ticket_t *tic, uint len, uint type) 335 { 336 if (tic->t_res_num == XLOG_TIC_LEN_MAX) { 337 /* add to overflow and start again */ 338 tic->t_res_o_flow += tic->t_res_arr_sum; 339 tic->t_res_num = 0; 340 tic->t_res_arr_sum = 0; 341 } 342 343 tic->t_res_arr[tic->t_res_num].r_len = len; 344 tic->t_res_arr[tic->t_res_num].r_type = type; 345 tic->t_res_arr_sum += len; 346 tic->t_res_num++; 347 } 348 349 bool 350 xfs_log_writable( 351 struct xfs_mount *mp) 352 { 353 /* 354 * Do not write to the log on norecovery mounts, if the data or log 355 * devices are read-only, or if the filesystem is shutdown. Read-only 356 * mounts allow internal writes for log recovery and unmount purposes, 357 * so don't restrict that case. 358 */ 359 if (xfs_has_norecovery(mp)) 360 return false; 361 if (xfs_readonly_buftarg(mp->m_ddev_targp)) 362 return false; 363 if (xfs_readonly_buftarg(mp->m_log->l_targ)) 364 return false; 365 if (xlog_is_shutdown(mp->m_log)) 366 return false; 367 return true; 368 } 369 370 /* 371 * Replenish the byte reservation required by moving the grant write head. 372 */ 373 int 374 xfs_log_regrant( 375 struct xfs_mount *mp, 376 struct xlog_ticket *tic) 377 { 378 struct xlog *log = mp->m_log; 379 int need_bytes; 380 int error = 0; 381 382 if (xlog_is_shutdown(log)) 383 return -EIO; 384 385 XFS_STATS_INC(mp, xs_try_logspace); 386 387 /* 388 * This is a new transaction on the ticket, so we need to change the 389 * transaction ID so that the next transaction has a different TID in 390 * the log. Just add one to the existing tid so that we can see chains 391 * of rolling transactions in the log easily. 392 */ 393 tic->t_tid++; 394 395 xlog_grant_push_ail(log, tic->t_unit_res); 396 397 tic->t_curr_res = tic->t_unit_res; 398 xlog_tic_reset_res(tic); 399 400 if (tic->t_cnt > 0) 401 return 0; 402 403 trace_xfs_log_regrant(log, tic); 404 405 error = xlog_grant_head_check(log, &log->l_write_head, tic, 406 &need_bytes); 407 if (error) 408 goto out_error; 409 410 xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes); 411 trace_xfs_log_regrant_exit(log, tic); 412 xlog_verify_grant_tail(log); 413 return 0; 414 415 out_error: 416 /* 417 * If we are failing, make sure the ticket doesn't have any current 418 * reservations. We don't want to add this back when the ticket/ 419 * transaction gets cancelled. 420 */ 421 tic->t_curr_res = 0; 422 tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */ 423 return error; 424 } 425 426 /* 427 * Reserve log space and return a ticket corresponding to the reservation. 428 * 429 * Each reservation is going to reserve extra space for a log record header. 430 * When writes happen to the on-disk log, we don't subtract the length of the 431 * log record header from any reservation. By wasting space in each 432 * reservation, we prevent over allocation problems. 433 */ 434 int 435 xfs_log_reserve( 436 struct xfs_mount *mp, 437 int unit_bytes, 438 int cnt, 439 struct xlog_ticket **ticp, 440 uint8_t client, 441 bool permanent) 442 { 443 struct xlog *log = mp->m_log; 444 struct xlog_ticket *tic; 445 int need_bytes; 446 int error = 0; 447 448 ASSERT(client == XFS_TRANSACTION || client == XFS_LOG); 449 450 if (xlog_is_shutdown(log)) 451 return -EIO; 452 453 XFS_STATS_INC(mp, xs_try_logspace); 454 455 ASSERT(*ticp == NULL); 456 tic = xlog_ticket_alloc(log, unit_bytes, cnt, client, permanent); 457 *ticp = tic; 458 459 xlog_grant_push_ail(log, tic->t_cnt ? tic->t_unit_res * tic->t_cnt 460 : tic->t_unit_res); 461 462 trace_xfs_log_reserve(log, tic); 463 464 error = xlog_grant_head_check(log, &log->l_reserve_head, tic, 465 &need_bytes); 466 if (error) 467 goto out_error; 468 469 xlog_grant_add_space(log, &log->l_reserve_head.grant, need_bytes); 470 xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes); 471 trace_xfs_log_reserve_exit(log, tic); 472 xlog_verify_grant_tail(log); 473 return 0; 474 475 out_error: 476 /* 477 * If we are failing, make sure the ticket doesn't have any current 478 * reservations. We don't want to add this back when the ticket/ 479 * transaction gets cancelled. 480 */ 481 tic->t_curr_res = 0; 482 tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */ 483 return error; 484 } 485 486 /* 487 * Run all the pending iclog callbacks and wake log force waiters and iclog 488 * space waiters so they can process the newly set shutdown state. We really 489 * don't care what order we process callbacks here because the log is shut down 490 * and so state cannot change on disk anymore. However, we cannot wake waiters 491 * until the callbacks have been processed because we may be in unmount and 492 * we must ensure that all AIL operations the callbacks perform have completed 493 * before we tear down the AIL. 494 * 495 * We avoid processing actively referenced iclogs so that we don't run callbacks 496 * while the iclog owner might still be preparing the iclog for IO submssion. 497 * These will be caught by xlog_state_iclog_release() and call this function 498 * again to process any callbacks that may have been added to that iclog. 499 */ 500 static void 501 xlog_state_shutdown_callbacks( 502 struct xlog *log) 503 { 504 struct xlog_in_core *iclog; 505 LIST_HEAD(cb_list); 506 507 iclog = log->l_iclog; 508 do { 509 if (atomic_read(&iclog->ic_refcnt)) { 510 /* Reference holder will re-run iclog callbacks. */ 511 continue; 512 } 513 list_splice_init(&iclog->ic_callbacks, &cb_list); 514 spin_unlock(&log->l_icloglock); 515 516 xlog_cil_process_committed(&cb_list); 517 518 spin_lock(&log->l_icloglock); 519 wake_up_all(&iclog->ic_write_wait); 520 wake_up_all(&iclog->ic_force_wait); 521 } while ((iclog = iclog->ic_next) != log->l_iclog); 522 523 wake_up_all(&log->l_flush_wait); 524 } 525 526 /* 527 * Flush iclog to disk if this is the last reference to the given iclog and the 528 * it is in the WANT_SYNC state. 529 * 530 * If XLOG_ICL_NEED_FUA is already set on the iclog, we need to ensure that the 531 * log tail is updated correctly. NEED_FUA indicates that the iclog will be 532 * written to stable storage, and implies that a commit record is contained 533 * within the iclog. We need to ensure that the log tail does not move beyond 534 * the tail that the first commit record in the iclog ordered against, otherwise 535 * correct recovery of that checkpoint becomes dependent on future operations 536 * performed on this iclog. 537 * 538 * Hence if NEED_FUA is set and the current iclog tail lsn is empty, write the 539 * current tail into iclog. Once the iclog tail is set, future operations must 540 * not modify it, otherwise they potentially violate ordering constraints for 541 * the checkpoint commit that wrote the initial tail lsn value. The tail lsn in 542 * the iclog will get zeroed on activation of the iclog after sync, so we 543 * always capture the tail lsn on the iclog on the first NEED_FUA release 544 * regardless of the number of active reference counts on this iclog. 545 */ 546 int 547 xlog_state_release_iclog( 548 struct xlog *log, 549 struct xlog_in_core *iclog) 550 { 551 xfs_lsn_t tail_lsn; 552 bool last_ref; 553 554 lockdep_assert_held(&log->l_icloglock); 555 556 trace_xlog_iclog_release(iclog, _RET_IP_); 557 /* 558 * Grabbing the current log tail needs to be atomic w.r.t. the writing 559 * of the tail LSN into the iclog so we guarantee that the log tail does 560 * not move between the first time we know that the iclog needs to be 561 * made stable and when we eventually submit it. 562 */ 563 if ((iclog->ic_state == XLOG_STATE_WANT_SYNC || 564 (iclog->ic_flags & XLOG_ICL_NEED_FUA)) && 565 !iclog->ic_header.h_tail_lsn) { 566 tail_lsn = xlog_assign_tail_lsn(log->l_mp); 567 iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn); 568 } 569 570 last_ref = atomic_dec_and_test(&iclog->ic_refcnt); 571 572 if (xlog_is_shutdown(log)) { 573 /* 574 * If there are no more references to this iclog, process the 575 * pending iclog callbacks that were waiting on the release of 576 * this iclog. 577 */ 578 if (last_ref) 579 xlog_state_shutdown_callbacks(log); 580 return -EIO; 581 } 582 583 if (!last_ref) 584 return 0; 585 586 if (iclog->ic_state != XLOG_STATE_WANT_SYNC) { 587 ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE); 588 return 0; 589 } 590 591 iclog->ic_state = XLOG_STATE_SYNCING; 592 xlog_verify_tail_lsn(log, iclog); 593 trace_xlog_iclog_syncing(iclog, _RET_IP_); 594 595 spin_unlock(&log->l_icloglock); 596 xlog_sync(log, iclog); 597 spin_lock(&log->l_icloglock); 598 return 0; 599 } 600 601 /* 602 * Mount a log filesystem 603 * 604 * mp - ubiquitous xfs mount point structure 605 * log_target - buftarg of on-disk log device 606 * blk_offset - Start block # where block size is 512 bytes (BBSIZE) 607 * num_bblocks - Number of BBSIZE blocks in on-disk log 608 * 609 * Return error or zero. 610 */ 611 int 612 xfs_log_mount( 613 xfs_mount_t *mp, 614 xfs_buftarg_t *log_target, 615 xfs_daddr_t blk_offset, 616 int num_bblks) 617 { 618 struct xlog *log; 619 bool fatal = xfs_has_crc(mp); 620 int error = 0; 621 int min_logfsbs; 622 623 if (!xfs_has_norecovery(mp)) { 624 xfs_notice(mp, "Mounting V%d Filesystem", 625 XFS_SB_VERSION_NUM(&mp->m_sb)); 626 } else { 627 xfs_notice(mp, 628 "Mounting V%d filesystem in no-recovery mode. Filesystem will be inconsistent.", 629 XFS_SB_VERSION_NUM(&mp->m_sb)); 630 ASSERT(xfs_is_readonly(mp)); 631 } 632 633 log = xlog_alloc_log(mp, log_target, blk_offset, num_bblks); 634 if (IS_ERR(log)) { 635 error = PTR_ERR(log); 636 goto out; 637 } 638 mp->m_log = log; 639 640 /* 641 * Validate the given log space and drop a critical message via syslog 642 * if the log size is too small that would lead to some unexpected 643 * situations in transaction log space reservation stage. 644 * 645 * Note: we can't just reject the mount if the validation fails. This 646 * would mean that people would have to downgrade their kernel just to 647 * remedy the situation as there is no way to grow the log (short of 648 * black magic surgery with xfs_db). 649 * 650 * We can, however, reject mounts for CRC format filesystems, as the 651 * mkfs binary being used to make the filesystem should never create a 652 * filesystem with a log that is too small. 653 */ 654 min_logfsbs = xfs_log_calc_minimum_size(mp); 655 656 if (mp->m_sb.sb_logblocks < min_logfsbs) { 657 xfs_warn(mp, 658 "Log size %d blocks too small, minimum size is %d blocks", 659 mp->m_sb.sb_logblocks, min_logfsbs); 660 error = -EINVAL; 661 } else if (mp->m_sb.sb_logblocks > XFS_MAX_LOG_BLOCKS) { 662 xfs_warn(mp, 663 "Log size %d blocks too large, maximum size is %lld blocks", 664 mp->m_sb.sb_logblocks, XFS_MAX_LOG_BLOCKS); 665 error = -EINVAL; 666 } else if (XFS_FSB_TO_B(mp, mp->m_sb.sb_logblocks) > XFS_MAX_LOG_BYTES) { 667 xfs_warn(mp, 668 "log size %lld bytes too large, maximum size is %lld bytes", 669 XFS_FSB_TO_B(mp, mp->m_sb.sb_logblocks), 670 XFS_MAX_LOG_BYTES); 671 error = -EINVAL; 672 } else if (mp->m_sb.sb_logsunit > 1 && 673 mp->m_sb.sb_logsunit % mp->m_sb.sb_blocksize) { 674 xfs_warn(mp, 675 "log stripe unit %u bytes must be a multiple of block size", 676 mp->m_sb.sb_logsunit); 677 error = -EINVAL; 678 fatal = true; 679 } 680 if (error) { 681 /* 682 * Log check errors are always fatal on v5; or whenever bad 683 * metadata leads to a crash. 684 */ 685 if (fatal) { 686 xfs_crit(mp, "AAIEEE! Log failed size checks. Abort!"); 687 ASSERT(0); 688 goto out_free_log; 689 } 690 xfs_crit(mp, "Log size out of supported range."); 691 xfs_crit(mp, 692 "Continuing onwards, but if log hangs are experienced then please report this message in the bug report."); 693 } 694 695 /* 696 * Initialize the AIL now we have a log. 697 */ 698 error = xfs_trans_ail_init(mp); 699 if (error) { 700 xfs_warn(mp, "AIL initialisation failed: error %d", error); 701 goto out_free_log; 702 } 703 log->l_ailp = mp->m_ail; 704 705 /* 706 * skip log recovery on a norecovery mount. pretend it all 707 * just worked. 708 */ 709 if (!xfs_has_norecovery(mp)) { 710 /* 711 * log recovery ignores readonly state and so we need to clear 712 * mount-based read only state so it can write to disk. 713 */ 714 bool readonly = test_and_clear_bit(XFS_OPSTATE_READONLY, 715 &mp->m_opstate); 716 error = xlog_recover(log); 717 if (readonly) 718 set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate); 719 if (error) { 720 xfs_warn(mp, "log mount/recovery failed: error %d", 721 error); 722 xlog_recover_cancel(log); 723 goto out_destroy_ail; 724 } 725 } 726 727 error = xfs_sysfs_init(&log->l_kobj, &xfs_log_ktype, &mp->m_kobj, 728 "log"); 729 if (error) 730 goto out_destroy_ail; 731 732 /* Normal transactions can now occur */ 733 clear_bit(XLOG_ACTIVE_RECOVERY, &log->l_opstate); 734 735 /* 736 * Now the log has been fully initialised and we know were our 737 * space grant counters are, we can initialise the permanent ticket 738 * needed for delayed logging to work. 739 */ 740 xlog_cil_init_post_recovery(log); 741 742 return 0; 743 744 out_destroy_ail: 745 xfs_trans_ail_destroy(mp); 746 out_free_log: 747 xlog_dealloc_log(log); 748 out: 749 return error; 750 } 751 752 /* 753 * Finish the recovery of the file system. This is separate from the 754 * xfs_log_mount() call, because it depends on the code in xfs_mountfs() to read 755 * in the root and real-time bitmap inodes between calling xfs_log_mount() and 756 * here. 757 * 758 * If we finish recovery successfully, start the background log work. If we are 759 * not doing recovery, then we have a RO filesystem and we don't need to start 760 * it. 761 */ 762 int 763 xfs_log_mount_finish( 764 struct xfs_mount *mp) 765 { 766 struct xlog *log = mp->m_log; 767 bool readonly; 768 int error = 0; 769 770 if (xfs_has_norecovery(mp)) { 771 ASSERT(xfs_is_readonly(mp)); 772 return 0; 773 } 774 775 /* 776 * log recovery ignores readonly state and so we need to clear 777 * mount-based read only state so it can write to disk. 778 */ 779 readonly = test_and_clear_bit(XFS_OPSTATE_READONLY, &mp->m_opstate); 780 781 /* 782 * During the second phase of log recovery, we need iget and 783 * iput to behave like they do for an active filesystem. 784 * xfs_fs_drop_inode needs to be able to prevent the deletion 785 * of inodes before we're done replaying log items on those 786 * inodes. Turn it off immediately after recovery finishes 787 * so that we don't leak the quota inodes if subsequent mount 788 * activities fail. 789 * 790 * We let all inodes involved in redo item processing end up on 791 * the LRU instead of being evicted immediately so that if we do 792 * something to an unlinked inode, the irele won't cause 793 * premature truncation and freeing of the inode, which results 794 * in log recovery failure. We have to evict the unreferenced 795 * lru inodes after clearing SB_ACTIVE because we don't 796 * otherwise clean up the lru if there's a subsequent failure in 797 * xfs_mountfs, which leads to us leaking the inodes if nothing 798 * else (e.g. quotacheck) references the inodes before the 799 * mount failure occurs. 800 */ 801 mp->m_super->s_flags |= SB_ACTIVE; 802 xfs_log_work_queue(mp); 803 if (xlog_recovery_needed(log)) 804 error = xlog_recover_finish(log); 805 mp->m_super->s_flags &= ~SB_ACTIVE; 806 evict_inodes(mp->m_super); 807 808 /* 809 * Drain the buffer LRU after log recovery. This is required for v4 810 * filesystems to avoid leaving around buffers with NULL verifier ops, 811 * but we do it unconditionally to make sure we're always in a clean 812 * cache state after mount. 813 * 814 * Don't push in the error case because the AIL may have pending intents 815 * that aren't removed until recovery is cancelled. 816 */ 817 if (xlog_recovery_needed(log)) { 818 if (!error) { 819 xfs_log_force(mp, XFS_LOG_SYNC); 820 xfs_ail_push_all_sync(mp->m_ail); 821 } 822 xfs_notice(mp, "Ending recovery (logdev: %s)", 823 mp->m_logname ? mp->m_logname : "internal"); 824 } else { 825 xfs_info(mp, "Ending clean mount"); 826 } 827 xfs_buftarg_drain(mp->m_ddev_targp); 828 829 clear_bit(XLOG_RECOVERY_NEEDED, &log->l_opstate); 830 if (readonly) 831 set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate); 832 833 /* Make sure the log is dead if we're returning failure. */ 834 ASSERT(!error || xlog_is_shutdown(log)); 835 836 return error; 837 } 838 839 /* 840 * The mount has failed. Cancel the recovery if it hasn't completed and destroy 841 * the log. 842 */ 843 void 844 xfs_log_mount_cancel( 845 struct xfs_mount *mp) 846 { 847 xlog_recover_cancel(mp->m_log); 848 xfs_log_unmount(mp); 849 } 850 851 /* 852 * Flush out the iclog to disk ensuring that device caches are flushed and 853 * the iclog hits stable storage before any completion waiters are woken. 854 */ 855 static inline int 856 xlog_force_iclog( 857 struct xlog_in_core *iclog) 858 { 859 atomic_inc(&iclog->ic_refcnt); 860 iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA; 861 if (iclog->ic_state == XLOG_STATE_ACTIVE) 862 xlog_state_switch_iclogs(iclog->ic_log, iclog, 0); 863 return xlog_state_release_iclog(iclog->ic_log, iclog); 864 } 865 866 /* 867 * Wait for the iclog and all prior iclogs to be written disk as required by the 868 * log force state machine. Waiting on ic_force_wait ensures iclog completions 869 * have been ordered and callbacks run before we are woken here, hence 870 * guaranteeing that all the iclogs up to this one are on stable storage. 871 */ 872 int 873 xlog_wait_on_iclog( 874 struct xlog_in_core *iclog) 875 __releases(iclog->ic_log->l_icloglock) 876 { 877 struct xlog *log = iclog->ic_log; 878 879 trace_xlog_iclog_wait_on(iclog, _RET_IP_); 880 if (!xlog_is_shutdown(log) && 881 iclog->ic_state != XLOG_STATE_ACTIVE && 882 iclog->ic_state != XLOG_STATE_DIRTY) { 883 XFS_STATS_INC(log->l_mp, xs_log_force_sleep); 884 xlog_wait(&iclog->ic_force_wait, &log->l_icloglock); 885 } else { 886 spin_unlock(&log->l_icloglock); 887 } 888 889 if (xlog_is_shutdown(log)) 890 return -EIO; 891 return 0; 892 } 893 894 /* 895 * Write out an unmount record using the ticket provided. We have to account for 896 * the data space used in the unmount ticket as this write is not done from a 897 * transaction context that has already done the accounting for us. 898 */ 899 static int 900 xlog_write_unmount_record( 901 struct xlog *log, 902 struct xlog_ticket *ticket) 903 { 904 struct xfs_unmount_log_format ulf = { 905 .magic = XLOG_UNMOUNT_TYPE, 906 }; 907 struct xfs_log_iovec reg = { 908 .i_addr = &ulf, 909 .i_len = sizeof(ulf), 910 .i_type = XLOG_REG_TYPE_UNMOUNT, 911 }; 912 struct xfs_log_vec vec = { 913 .lv_niovecs = 1, 914 .lv_iovecp = ®, 915 }; 916 917 /* account for space used by record data */ 918 ticket->t_curr_res -= sizeof(ulf); 919 920 return xlog_write(log, NULL, &vec, ticket, XLOG_UNMOUNT_TRANS); 921 } 922 923 /* 924 * Mark the filesystem clean by writing an unmount record to the head of the 925 * log. 926 */ 927 static void 928 xlog_unmount_write( 929 struct xlog *log) 930 { 931 struct xfs_mount *mp = log->l_mp; 932 struct xlog_in_core *iclog; 933 struct xlog_ticket *tic = NULL; 934 int error; 935 936 error = xfs_log_reserve(mp, 600, 1, &tic, XFS_LOG, 0); 937 if (error) 938 goto out_err; 939 940 error = xlog_write_unmount_record(log, tic); 941 /* 942 * At this point, we're umounting anyway, so there's no point in 943 * transitioning log state to shutdown. Just continue... 944 */ 945 out_err: 946 if (error) 947 xfs_alert(mp, "%s: unmount record failed", __func__); 948 949 spin_lock(&log->l_icloglock); 950 iclog = log->l_iclog; 951 error = xlog_force_iclog(iclog); 952 xlog_wait_on_iclog(iclog); 953 954 if (tic) { 955 trace_xfs_log_umount_write(log, tic); 956 xfs_log_ticket_ungrant(log, tic); 957 } 958 } 959 960 static void 961 xfs_log_unmount_verify_iclog( 962 struct xlog *log) 963 { 964 struct xlog_in_core *iclog = log->l_iclog; 965 966 do { 967 ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE); 968 ASSERT(iclog->ic_offset == 0); 969 } while ((iclog = iclog->ic_next) != log->l_iclog); 970 } 971 972 /* 973 * Unmount record used to have a string "Unmount filesystem--" in the 974 * data section where the "Un" was really a magic number (XLOG_UNMOUNT_TYPE). 975 * We just write the magic number now since that particular field isn't 976 * currently architecture converted and "Unmount" is a bit foo. 977 * As far as I know, there weren't any dependencies on the old behaviour. 978 */ 979 static void 980 xfs_log_unmount_write( 981 struct xfs_mount *mp) 982 { 983 struct xlog *log = mp->m_log; 984 985 if (!xfs_log_writable(mp)) 986 return; 987 988 xfs_log_force(mp, XFS_LOG_SYNC); 989 990 if (xlog_is_shutdown(log)) 991 return; 992 993 /* 994 * If we think the summary counters are bad, avoid writing the unmount 995 * record to force log recovery at next mount, after which the summary 996 * counters will be recalculated. Refer to xlog_check_unmount_rec for 997 * more details. 998 */ 999 if (XFS_TEST_ERROR(xfs_fs_has_sickness(mp, XFS_SICK_FS_COUNTERS), mp, 1000 XFS_ERRTAG_FORCE_SUMMARY_RECALC)) { 1001 xfs_alert(mp, "%s: will fix summary counters at next mount", 1002 __func__); 1003 return; 1004 } 1005 1006 xfs_log_unmount_verify_iclog(log); 1007 xlog_unmount_write(log); 1008 } 1009 1010 /* 1011 * Empty the log for unmount/freeze. 1012 * 1013 * To do this, we first need to shut down the background log work so it is not 1014 * trying to cover the log as we clean up. We then need to unpin all objects in 1015 * the log so we can then flush them out. Once they have completed their IO and 1016 * run the callbacks removing themselves from the AIL, we can cover the log. 1017 */ 1018 int 1019 xfs_log_quiesce( 1020 struct xfs_mount *mp) 1021 { 1022 /* 1023 * Clear log incompat features since we're quiescing the log. Report 1024 * failures, though it's not fatal to have a higher log feature 1025 * protection level than the log contents actually require. 1026 */ 1027 if (xfs_clear_incompat_log_features(mp)) { 1028 int error; 1029 1030 error = xfs_sync_sb(mp, false); 1031 if (error) 1032 xfs_warn(mp, 1033 "Failed to clear log incompat features on quiesce"); 1034 } 1035 1036 cancel_delayed_work_sync(&mp->m_log->l_work); 1037 xfs_log_force(mp, XFS_LOG_SYNC); 1038 1039 /* 1040 * The superblock buffer is uncached and while xfs_ail_push_all_sync() 1041 * will push it, xfs_buftarg_wait() will not wait for it. Further, 1042 * xfs_buf_iowait() cannot be used because it was pushed with the 1043 * XBF_ASYNC flag set, so we need to use a lock/unlock pair to wait for 1044 * the IO to complete. 1045 */ 1046 xfs_ail_push_all_sync(mp->m_ail); 1047 xfs_buftarg_wait(mp->m_ddev_targp); 1048 xfs_buf_lock(mp->m_sb_bp); 1049 xfs_buf_unlock(mp->m_sb_bp); 1050 1051 return xfs_log_cover(mp); 1052 } 1053 1054 void 1055 xfs_log_clean( 1056 struct xfs_mount *mp) 1057 { 1058 xfs_log_quiesce(mp); 1059 xfs_log_unmount_write(mp); 1060 } 1061 1062 /* 1063 * Shut down and release the AIL and Log. 1064 * 1065 * During unmount, we need to ensure we flush all the dirty metadata objects 1066 * from the AIL so that the log is empty before we write the unmount record to 1067 * the log. Once this is done, we can tear down the AIL and the log. 1068 */ 1069 void 1070 xfs_log_unmount( 1071 struct xfs_mount *mp) 1072 { 1073 xfs_log_clean(mp); 1074 1075 xfs_buftarg_drain(mp->m_ddev_targp); 1076 1077 xfs_trans_ail_destroy(mp); 1078 1079 xfs_sysfs_del(&mp->m_log->l_kobj); 1080 1081 xlog_dealloc_log(mp->m_log); 1082 } 1083 1084 void 1085 xfs_log_item_init( 1086 struct xfs_mount *mp, 1087 struct xfs_log_item *item, 1088 int type, 1089 const struct xfs_item_ops *ops) 1090 { 1091 item->li_log = mp->m_log; 1092 item->li_ailp = mp->m_ail; 1093 item->li_type = type; 1094 item->li_ops = ops; 1095 item->li_lv = NULL; 1096 1097 INIT_LIST_HEAD(&item->li_ail); 1098 INIT_LIST_HEAD(&item->li_cil); 1099 INIT_LIST_HEAD(&item->li_bio_list); 1100 INIT_LIST_HEAD(&item->li_trans); 1101 } 1102 1103 /* 1104 * Wake up processes waiting for log space after we have moved the log tail. 1105 */ 1106 void 1107 xfs_log_space_wake( 1108 struct xfs_mount *mp) 1109 { 1110 struct xlog *log = mp->m_log; 1111 int free_bytes; 1112 1113 if (xlog_is_shutdown(log)) 1114 return; 1115 1116 if (!list_empty_careful(&log->l_write_head.waiters)) { 1117 ASSERT(!xlog_in_recovery(log)); 1118 1119 spin_lock(&log->l_write_head.lock); 1120 free_bytes = xlog_space_left(log, &log->l_write_head.grant); 1121 xlog_grant_head_wake(log, &log->l_write_head, &free_bytes); 1122 spin_unlock(&log->l_write_head.lock); 1123 } 1124 1125 if (!list_empty_careful(&log->l_reserve_head.waiters)) { 1126 ASSERT(!xlog_in_recovery(log)); 1127 1128 spin_lock(&log->l_reserve_head.lock); 1129 free_bytes = xlog_space_left(log, &log->l_reserve_head.grant); 1130 xlog_grant_head_wake(log, &log->l_reserve_head, &free_bytes); 1131 spin_unlock(&log->l_reserve_head.lock); 1132 } 1133 } 1134 1135 /* 1136 * Determine if we have a transaction that has gone to disk that needs to be 1137 * covered. To begin the transition to the idle state firstly the log needs to 1138 * be idle. That means the CIL, the AIL and the iclogs needs to be empty before 1139 * we start attempting to cover the log. 1140 * 1141 * Only if we are then in a state where covering is needed, the caller is 1142 * informed that dummy transactions are required to move the log into the idle 1143 * state. 1144 * 1145 * If there are any items in the AIl or CIL, then we do not want to attempt to 1146 * cover the log as we may be in a situation where there isn't log space 1147 * available to run a dummy transaction and this can lead to deadlocks when the 1148 * tail of the log is pinned by an item that is modified in the CIL. Hence 1149 * there's no point in running a dummy transaction at this point because we 1150 * can't start trying to idle the log until both the CIL and AIL are empty. 1151 */ 1152 static bool 1153 xfs_log_need_covered( 1154 struct xfs_mount *mp) 1155 { 1156 struct xlog *log = mp->m_log; 1157 bool needed = false; 1158 1159 if (!xlog_cil_empty(log)) 1160 return false; 1161 1162 spin_lock(&log->l_icloglock); 1163 switch (log->l_covered_state) { 1164 case XLOG_STATE_COVER_DONE: 1165 case XLOG_STATE_COVER_DONE2: 1166 case XLOG_STATE_COVER_IDLE: 1167 break; 1168 case XLOG_STATE_COVER_NEED: 1169 case XLOG_STATE_COVER_NEED2: 1170 if (xfs_ail_min_lsn(log->l_ailp)) 1171 break; 1172 if (!xlog_iclogs_empty(log)) 1173 break; 1174 1175 needed = true; 1176 if (log->l_covered_state == XLOG_STATE_COVER_NEED) 1177 log->l_covered_state = XLOG_STATE_COVER_DONE; 1178 else 1179 log->l_covered_state = XLOG_STATE_COVER_DONE2; 1180 break; 1181 default: 1182 needed = true; 1183 break; 1184 } 1185 spin_unlock(&log->l_icloglock); 1186 return needed; 1187 } 1188 1189 /* 1190 * Explicitly cover the log. This is similar to background log covering but 1191 * intended for usage in quiesce codepaths. The caller is responsible to ensure 1192 * the log is idle and suitable for covering. The CIL, iclog buffers and AIL 1193 * must all be empty. 1194 */ 1195 static int 1196 xfs_log_cover( 1197 struct xfs_mount *mp) 1198 { 1199 int error = 0; 1200 bool need_covered; 1201 1202 ASSERT((xlog_cil_empty(mp->m_log) && xlog_iclogs_empty(mp->m_log) && 1203 !xfs_ail_min_lsn(mp->m_log->l_ailp)) || 1204 xlog_is_shutdown(mp->m_log)); 1205 1206 if (!xfs_log_writable(mp)) 1207 return 0; 1208 1209 /* 1210 * xfs_log_need_covered() is not idempotent because it progresses the 1211 * state machine if the log requires covering. Therefore, we must call 1212 * this function once and use the result until we've issued an sb sync. 1213 * Do so first to make that abundantly clear. 1214 * 1215 * Fall into the covering sequence if the log needs covering or the 1216 * mount has lazy superblock accounting to sync to disk. The sb sync 1217 * used for covering accumulates the in-core counters, so covering 1218 * handles this for us. 1219 */ 1220 need_covered = xfs_log_need_covered(mp); 1221 if (!need_covered && !xfs_has_lazysbcount(mp)) 1222 return 0; 1223 1224 /* 1225 * To cover the log, commit the superblock twice (at most) in 1226 * independent checkpoints. The first serves as a reference for the 1227 * tail pointer. The sync transaction and AIL push empties the AIL and 1228 * updates the in-core tail to the LSN of the first checkpoint. The 1229 * second commit updates the on-disk tail with the in-core LSN, 1230 * covering the log. Push the AIL one more time to leave it empty, as 1231 * we found it. 1232 */ 1233 do { 1234 error = xfs_sync_sb(mp, true); 1235 if (error) 1236 break; 1237 xfs_ail_push_all_sync(mp->m_ail); 1238 } while (xfs_log_need_covered(mp)); 1239 1240 return error; 1241 } 1242 1243 /* 1244 * We may be holding the log iclog lock upon entering this routine. 1245 */ 1246 xfs_lsn_t 1247 xlog_assign_tail_lsn_locked( 1248 struct xfs_mount *mp) 1249 { 1250 struct xlog *log = mp->m_log; 1251 struct xfs_log_item *lip; 1252 xfs_lsn_t tail_lsn; 1253 1254 assert_spin_locked(&mp->m_ail->ail_lock); 1255 1256 /* 1257 * To make sure we always have a valid LSN for the log tail we keep 1258 * track of the last LSN which was committed in log->l_last_sync_lsn, 1259 * and use that when the AIL was empty. 1260 */ 1261 lip = xfs_ail_min(mp->m_ail); 1262 if (lip) 1263 tail_lsn = lip->li_lsn; 1264 else 1265 tail_lsn = atomic64_read(&log->l_last_sync_lsn); 1266 trace_xfs_log_assign_tail_lsn(log, tail_lsn); 1267 atomic64_set(&log->l_tail_lsn, tail_lsn); 1268 return tail_lsn; 1269 } 1270 1271 xfs_lsn_t 1272 xlog_assign_tail_lsn( 1273 struct xfs_mount *mp) 1274 { 1275 xfs_lsn_t tail_lsn; 1276 1277 spin_lock(&mp->m_ail->ail_lock); 1278 tail_lsn = xlog_assign_tail_lsn_locked(mp); 1279 spin_unlock(&mp->m_ail->ail_lock); 1280 1281 return tail_lsn; 1282 } 1283 1284 /* 1285 * Return the space in the log between the tail and the head. The head 1286 * is passed in the cycle/bytes formal parms. In the special case where 1287 * the reserve head has wrapped passed the tail, this calculation is no 1288 * longer valid. In this case, just return 0 which means there is no space 1289 * in the log. This works for all places where this function is called 1290 * with the reserve head. Of course, if the write head were to ever 1291 * wrap the tail, we should blow up. Rather than catch this case here, 1292 * we depend on other ASSERTions in other parts of the code. XXXmiken 1293 * 1294 * If reservation head is behind the tail, we have a problem. Warn about it, 1295 * but then treat it as if the log is empty. 1296 * 1297 * If the log is shut down, the head and tail may be invalid or out of whack, so 1298 * shortcut invalidity asserts in this case so that we don't trigger them 1299 * falsely. 1300 */ 1301 STATIC int 1302 xlog_space_left( 1303 struct xlog *log, 1304 atomic64_t *head) 1305 { 1306 int tail_bytes; 1307 int tail_cycle; 1308 int head_cycle; 1309 int head_bytes; 1310 1311 xlog_crack_grant_head(head, &head_cycle, &head_bytes); 1312 xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_bytes); 1313 tail_bytes = BBTOB(tail_bytes); 1314 if (tail_cycle == head_cycle && head_bytes >= tail_bytes) 1315 return log->l_logsize - (head_bytes - tail_bytes); 1316 if (tail_cycle + 1 < head_cycle) 1317 return 0; 1318 1319 /* Ignore potential inconsistency when shutdown. */ 1320 if (xlog_is_shutdown(log)) 1321 return log->l_logsize; 1322 1323 if (tail_cycle < head_cycle) { 1324 ASSERT(tail_cycle == (head_cycle - 1)); 1325 return tail_bytes - head_bytes; 1326 } 1327 1328 /* 1329 * The reservation head is behind the tail. In this case we just want to 1330 * return the size of the log as the amount of space left. 1331 */ 1332 xfs_alert(log->l_mp, "xlog_space_left: head behind tail"); 1333 xfs_alert(log->l_mp, " tail_cycle = %d, tail_bytes = %d", 1334 tail_cycle, tail_bytes); 1335 xfs_alert(log->l_mp, " GH cycle = %d, GH bytes = %d", 1336 head_cycle, head_bytes); 1337 ASSERT(0); 1338 return log->l_logsize; 1339 } 1340 1341 1342 static void 1343 xlog_ioend_work( 1344 struct work_struct *work) 1345 { 1346 struct xlog_in_core *iclog = 1347 container_of(work, struct xlog_in_core, ic_end_io_work); 1348 struct xlog *log = iclog->ic_log; 1349 int error; 1350 1351 error = blk_status_to_errno(iclog->ic_bio.bi_status); 1352 #ifdef DEBUG 1353 /* treat writes with injected CRC errors as failed */ 1354 if (iclog->ic_fail_crc) 1355 error = -EIO; 1356 #endif 1357 1358 /* 1359 * Race to shutdown the filesystem if we see an error. 1360 */ 1361 if (XFS_TEST_ERROR(error, log->l_mp, XFS_ERRTAG_IODONE_IOERR)) { 1362 xfs_alert(log->l_mp, "log I/O error %d", error); 1363 xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR); 1364 } 1365 1366 xlog_state_done_syncing(iclog); 1367 bio_uninit(&iclog->ic_bio); 1368 1369 /* 1370 * Drop the lock to signal that we are done. Nothing references the 1371 * iclog after this, so an unmount waiting on this lock can now tear it 1372 * down safely. As such, it is unsafe to reference the iclog after the 1373 * unlock as we could race with it being freed. 1374 */ 1375 up(&iclog->ic_sema); 1376 } 1377 1378 /* 1379 * Return size of each in-core log record buffer. 1380 * 1381 * All machines get 8 x 32kB buffers by default, unless tuned otherwise. 1382 * 1383 * If the filesystem blocksize is too large, we may need to choose a 1384 * larger size since the directory code currently logs entire blocks. 1385 */ 1386 STATIC void 1387 xlog_get_iclog_buffer_size( 1388 struct xfs_mount *mp, 1389 struct xlog *log) 1390 { 1391 if (mp->m_logbufs <= 0) 1392 mp->m_logbufs = XLOG_MAX_ICLOGS; 1393 if (mp->m_logbsize <= 0) 1394 mp->m_logbsize = XLOG_BIG_RECORD_BSIZE; 1395 1396 log->l_iclog_bufs = mp->m_logbufs; 1397 log->l_iclog_size = mp->m_logbsize; 1398 1399 /* 1400 * # headers = size / 32k - one header holds cycles from 32k of data. 1401 */ 1402 log->l_iclog_heads = 1403 DIV_ROUND_UP(mp->m_logbsize, XLOG_HEADER_CYCLE_SIZE); 1404 log->l_iclog_hsize = log->l_iclog_heads << BBSHIFT; 1405 } 1406 1407 void 1408 xfs_log_work_queue( 1409 struct xfs_mount *mp) 1410 { 1411 queue_delayed_work(mp->m_sync_workqueue, &mp->m_log->l_work, 1412 msecs_to_jiffies(xfs_syncd_centisecs * 10)); 1413 } 1414 1415 /* 1416 * Clear the log incompat flags if we have the opportunity. 1417 * 1418 * This only happens if we're about to log the second dummy transaction as part 1419 * of covering the log and we can get the log incompat feature usage lock. 1420 */ 1421 static inline void 1422 xlog_clear_incompat( 1423 struct xlog *log) 1424 { 1425 struct xfs_mount *mp = log->l_mp; 1426 1427 if (!xfs_sb_has_incompat_log_feature(&mp->m_sb, 1428 XFS_SB_FEAT_INCOMPAT_LOG_ALL)) 1429 return; 1430 1431 if (log->l_covered_state != XLOG_STATE_COVER_DONE2) 1432 return; 1433 1434 if (!down_write_trylock(&log->l_incompat_users)) 1435 return; 1436 1437 xfs_clear_incompat_log_features(mp); 1438 up_write(&log->l_incompat_users); 1439 } 1440 1441 /* 1442 * Every sync period we need to unpin all items in the AIL and push them to 1443 * disk. If there is nothing dirty, then we might need to cover the log to 1444 * indicate that the filesystem is idle. 1445 */ 1446 static void 1447 xfs_log_worker( 1448 struct work_struct *work) 1449 { 1450 struct xlog *log = container_of(to_delayed_work(work), 1451 struct xlog, l_work); 1452 struct xfs_mount *mp = log->l_mp; 1453 1454 /* dgc: errors ignored - not fatal and nowhere to report them */ 1455 if (xfs_fs_writable(mp, SB_FREEZE_WRITE) && xfs_log_need_covered(mp)) { 1456 /* 1457 * Dump a transaction into the log that contains no real change. 1458 * This is needed to stamp the current tail LSN into the log 1459 * during the covering operation. 1460 * 1461 * We cannot use an inode here for this - that will push dirty 1462 * state back up into the VFS and then periodic inode flushing 1463 * will prevent log covering from making progress. Hence we 1464 * synchronously log the superblock instead to ensure the 1465 * superblock is immediately unpinned and can be written back. 1466 */ 1467 xlog_clear_incompat(log); 1468 xfs_sync_sb(mp, true); 1469 } else 1470 xfs_log_force(mp, 0); 1471 1472 /* start pushing all the metadata that is currently dirty */ 1473 xfs_ail_push_all(mp->m_ail); 1474 1475 /* queue us up again */ 1476 xfs_log_work_queue(mp); 1477 } 1478 1479 /* 1480 * This routine initializes some of the log structure for a given mount point. 1481 * Its primary purpose is to fill in enough, so recovery can occur. However, 1482 * some other stuff may be filled in too. 1483 */ 1484 STATIC struct xlog * 1485 xlog_alloc_log( 1486 struct xfs_mount *mp, 1487 struct xfs_buftarg *log_target, 1488 xfs_daddr_t blk_offset, 1489 int num_bblks) 1490 { 1491 struct xlog *log; 1492 xlog_rec_header_t *head; 1493 xlog_in_core_t **iclogp; 1494 xlog_in_core_t *iclog, *prev_iclog=NULL; 1495 int i; 1496 int error = -ENOMEM; 1497 uint log2_size = 0; 1498 1499 log = kmem_zalloc(sizeof(struct xlog), KM_MAYFAIL); 1500 if (!log) { 1501 xfs_warn(mp, "Log allocation failed: No memory!"); 1502 goto out; 1503 } 1504 1505 log->l_mp = mp; 1506 log->l_targ = log_target; 1507 log->l_logsize = BBTOB(num_bblks); 1508 log->l_logBBstart = blk_offset; 1509 log->l_logBBsize = num_bblks; 1510 log->l_covered_state = XLOG_STATE_COVER_IDLE; 1511 set_bit(XLOG_ACTIVE_RECOVERY, &log->l_opstate); 1512 INIT_DELAYED_WORK(&log->l_work, xfs_log_worker); 1513 1514 log->l_prev_block = -1; 1515 /* log->l_tail_lsn = 0x100000000LL; cycle = 1; current block = 0 */ 1516 xlog_assign_atomic_lsn(&log->l_tail_lsn, 1, 0); 1517 xlog_assign_atomic_lsn(&log->l_last_sync_lsn, 1, 0); 1518 log->l_curr_cycle = 1; /* 0 is bad since this is initial value */ 1519 1520 if (xfs_has_logv2(mp) && mp->m_sb.sb_logsunit > 1) 1521 log->l_iclog_roundoff = mp->m_sb.sb_logsunit; 1522 else 1523 log->l_iclog_roundoff = BBSIZE; 1524 1525 xlog_grant_head_init(&log->l_reserve_head); 1526 xlog_grant_head_init(&log->l_write_head); 1527 1528 error = -EFSCORRUPTED; 1529 if (xfs_has_sector(mp)) { 1530 log2_size = mp->m_sb.sb_logsectlog; 1531 if (log2_size < BBSHIFT) { 1532 xfs_warn(mp, "Log sector size too small (0x%x < 0x%x)", 1533 log2_size, BBSHIFT); 1534 goto out_free_log; 1535 } 1536 1537 log2_size -= BBSHIFT; 1538 if (log2_size > mp->m_sectbb_log) { 1539 xfs_warn(mp, "Log sector size too large (0x%x > 0x%x)", 1540 log2_size, mp->m_sectbb_log); 1541 goto out_free_log; 1542 } 1543 1544 /* for larger sector sizes, must have v2 or external log */ 1545 if (log2_size && log->l_logBBstart > 0 && 1546 !xfs_has_logv2(mp)) { 1547 xfs_warn(mp, 1548 "log sector size (0x%x) invalid for configuration.", 1549 log2_size); 1550 goto out_free_log; 1551 } 1552 } 1553 log->l_sectBBsize = 1 << log2_size; 1554 1555 init_rwsem(&log->l_incompat_users); 1556 1557 xlog_get_iclog_buffer_size(mp, log); 1558 1559 spin_lock_init(&log->l_icloglock); 1560 init_waitqueue_head(&log->l_flush_wait); 1561 1562 iclogp = &log->l_iclog; 1563 /* 1564 * The amount of memory to allocate for the iclog structure is 1565 * rather funky due to the way the structure is defined. It is 1566 * done this way so that we can use different sizes for machines 1567 * with different amounts of memory. See the definition of 1568 * xlog_in_core_t in xfs_log_priv.h for details. 1569 */ 1570 ASSERT(log->l_iclog_size >= 4096); 1571 for (i = 0; i < log->l_iclog_bufs; i++) { 1572 size_t bvec_size = howmany(log->l_iclog_size, PAGE_SIZE) * 1573 sizeof(struct bio_vec); 1574 1575 iclog = kmem_zalloc(sizeof(*iclog) + bvec_size, KM_MAYFAIL); 1576 if (!iclog) 1577 goto out_free_iclog; 1578 1579 *iclogp = iclog; 1580 iclog->ic_prev = prev_iclog; 1581 prev_iclog = iclog; 1582 1583 iclog->ic_data = kvzalloc(log->l_iclog_size, 1584 GFP_KERNEL | __GFP_RETRY_MAYFAIL); 1585 if (!iclog->ic_data) 1586 goto out_free_iclog; 1587 #ifdef DEBUG 1588 log->l_iclog_bak[i] = &iclog->ic_header; 1589 #endif 1590 head = &iclog->ic_header; 1591 memset(head, 0, sizeof(xlog_rec_header_t)); 1592 head->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM); 1593 head->h_version = cpu_to_be32( 1594 xfs_has_logv2(log->l_mp) ? 2 : 1); 1595 head->h_size = cpu_to_be32(log->l_iclog_size); 1596 /* new fields */ 1597 head->h_fmt = cpu_to_be32(XLOG_FMT); 1598 memcpy(&head->h_fs_uuid, &mp->m_sb.sb_uuid, sizeof(uuid_t)); 1599 1600 iclog->ic_size = log->l_iclog_size - log->l_iclog_hsize; 1601 iclog->ic_state = XLOG_STATE_ACTIVE; 1602 iclog->ic_log = log; 1603 atomic_set(&iclog->ic_refcnt, 0); 1604 INIT_LIST_HEAD(&iclog->ic_callbacks); 1605 iclog->ic_datap = (char *)iclog->ic_data + log->l_iclog_hsize; 1606 1607 init_waitqueue_head(&iclog->ic_force_wait); 1608 init_waitqueue_head(&iclog->ic_write_wait); 1609 INIT_WORK(&iclog->ic_end_io_work, xlog_ioend_work); 1610 sema_init(&iclog->ic_sema, 1); 1611 1612 iclogp = &iclog->ic_next; 1613 } 1614 *iclogp = log->l_iclog; /* complete ring */ 1615 log->l_iclog->ic_prev = prev_iclog; /* re-write 1st prev ptr */ 1616 1617 log->l_ioend_workqueue = alloc_workqueue("xfs-log/%s", 1618 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM | 1619 WQ_HIGHPRI), 1620 0, mp->m_super->s_id); 1621 if (!log->l_ioend_workqueue) 1622 goto out_free_iclog; 1623 1624 error = xlog_cil_init(log); 1625 if (error) 1626 goto out_destroy_workqueue; 1627 return log; 1628 1629 out_destroy_workqueue: 1630 destroy_workqueue(log->l_ioend_workqueue); 1631 out_free_iclog: 1632 for (iclog = log->l_iclog; iclog; iclog = prev_iclog) { 1633 prev_iclog = iclog->ic_next; 1634 kmem_free(iclog->ic_data); 1635 kmem_free(iclog); 1636 if (prev_iclog == log->l_iclog) 1637 break; 1638 } 1639 out_free_log: 1640 kmem_free(log); 1641 out: 1642 return ERR_PTR(error); 1643 } /* xlog_alloc_log */ 1644 1645 /* 1646 * Compute the LSN that we'd need to push the log tail towards in order to have 1647 * (a) enough on-disk log space to log the number of bytes specified, (b) at 1648 * least 25% of the log space free, and (c) at least 256 blocks free. If the 1649 * log free space already meets all three thresholds, this function returns 1650 * NULLCOMMITLSN. 1651 */ 1652 xfs_lsn_t 1653 xlog_grant_push_threshold( 1654 struct xlog *log, 1655 int need_bytes) 1656 { 1657 xfs_lsn_t threshold_lsn = 0; 1658 xfs_lsn_t last_sync_lsn; 1659 int free_blocks; 1660 int free_bytes; 1661 int threshold_block; 1662 int threshold_cycle; 1663 int free_threshold; 1664 1665 ASSERT(BTOBB(need_bytes) < log->l_logBBsize); 1666 1667 free_bytes = xlog_space_left(log, &log->l_reserve_head.grant); 1668 free_blocks = BTOBBT(free_bytes); 1669 1670 /* 1671 * Set the threshold for the minimum number of free blocks in the 1672 * log to the maximum of what the caller needs, one quarter of the 1673 * log, and 256 blocks. 1674 */ 1675 free_threshold = BTOBB(need_bytes); 1676 free_threshold = max(free_threshold, (log->l_logBBsize >> 2)); 1677 free_threshold = max(free_threshold, 256); 1678 if (free_blocks >= free_threshold) 1679 return NULLCOMMITLSN; 1680 1681 xlog_crack_atomic_lsn(&log->l_tail_lsn, &threshold_cycle, 1682 &threshold_block); 1683 threshold_block += free_threshold; 1684 if (threshold_block >= log->l_logBBsize) { 1685 threshold_block -= log->l_logBBsize; 1686 threshold_cycle += 1; 1687 } 1688 threshold_lsn = xlog_assign_lsn(threshold_cycle, 1689 threshold_block); 1690 /* 1691 * Don't pass in an lsn greater than the lsn of the last 1692 * log record known to be on disk. Use a snapshot of the last sync lsn 1693 * so that it doesn't change between the compare and the set. 1694 */ 1695 last_sync_lsn = atomic64_read(&log->l_last_sync_lsn); 1696 if (XFS_LSN_CMP(threshold_lsn, last_sync_lsn) > 0) 1697 threshold_lsn = last_sync_lsn; 1698 1699 return threshold_lsn; 1700 } 1701 1702 /* 1703 * Push the tail of the log if we need to do so to maintain the free log space 1704 * thresholds set out by xlog_grant_push_threshold. We may need to adopt a 1705 * policy which pushes on an lsn which is further along in the log once we 1706 * reach the high water mark. In this manner, we would be creating a low water 1707 * mark. 1708 */ 1709 STATIC void 1710 xlog_grant_push_ail( 1711 struct xlog *log, 1712 int need_bytes) 1713 { 1714 xfs_lsn_t threshold_lsn; 1715 1716 threshold_lsn = xlog_grant_push_threshold(log, need_bytes); 1717 if (threshold_lsn == NULLCOMMITLSN || xlog_is_shutdown(log)) 1718 return; 1719 1720 /* 1721 * Get the transaction layer to kick the dirty buffers out to 1722 * disk asynchronously. No point in trying to do this if 1723 * the filesystem is shutting down. 1724 */ 1725 xfs_ail_push(log->l_ailp, threshold_lsn); 1726 } 1727 1728 /* 1729 * Stamp cycle number in every block 1730 */ 1731 STATIC void 1732 xlog_pack_data( 1733 struct xlog *log, 1734 struct xlog_in_core *iclog, 1735 int roundoff) 1736 { 1737 int i, j, k; 1738 int size = iclog->ic_offset + roundoff; 1739 __be32 cycle_lsn; 1740 char *dp; 1741 1742 cycle_lsn = CYCLE_LSN_DISK(iclog->ic_header.h_lsn); 1743 1744 dp = iclog->ic_datap; 1745 for (i = 0; i < BTOBB(size); i++) { 1746 if (i >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) 1747 break; 1748 iclog->ic_header.h_cycle_data[i] = *(__be32 *)dp; 1749 *(__be32 *)dp = cycle_lsn; 1750 dp += BBSIZE; 1751 } 1752 1753 if (xfs_has_logv2(log->l_mp)) { 1754 xlog_in_core_2_t *xhdr = iclog->ic_data; 1755 1756 for ( ; i < BTOBB(size); i++) { 1757 j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE); 1758 k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE); 1759 xhdr[j].hic_xheader.xh_cycle_data[k] = *(__be32 *)dp; 1760 *(__be32 *)dp = cycle_lsn; 1761 dp += BBSIZE; 1762 } 1763 1764 for (i = 1; i < log->l_iclog_heads; i++) 1765 xhdr[i].hic_xheader.xh_cycle = cycle_lsn; 1766 } 1767 } 1768 1769 /* 1770 * Calculate the checksum for a log buffer. 1771 * 1772 * This is a little more complicated than it should be because the various 1773 * headers and the actual data are non-contiguous. 1774 */ 1775 __le32 1776 xlog_cksum( 1777 struct xlog *log, 1778 struct xlog_rec_header *rhead, 1779 char *dp, 1780 int size) 1781 { 1782 uint32_t crc; 1783 1784 /* first generate the crc for the record header ... */ 1785 crc = xfs_start_cksum_update((char *)rhead, 1786 sizeof(struct xlog_rec_header), 1787 offsetof(struct xlog_rec_header, h_crc)); 1788 1789 /* ... then for additional cycle data for v2 logs ... */ 1790 if (xfs_has_logv2(log->l_mp)) { 1791 union xlog_in_core2 *xhdr = (union xlog_in_core2 *)rhead; 1792 int i; 1793 int xheads; 1794 1795 xheads = DIV_ROUND_UP(size, XLOG_HEADER_CYCLE_SIZE); 1796 1797 for (i = 1; i < xheads; i++) { 1798 crc = crc32c(crc, &xhdr[i].hic_xheader, 1799 sizeof(struct xlog_rec_ext_header)); 1800 } 1801 } 1802 1803 /* ... and finally for the payload */ 1804 crc = crc32c(crc, dp, size); 1805 1806 return xfs_end_cksum(crc); 1807 } 1808 1809 static void 1810 xlog_bio_end_io( 1811 struct bio *bio) 1812 { 1813 struct xlog_in_core *iclog = bio->bi_private; 1814 1815 queue_work(iclog->ic_log->l_ioend_workqueue, 1816 &iclog->ic_end_io_work); 1817 } 1818 1819 static int 1820 xlog_map_iclog_data( 1821 struct bio *bio, 1822 void *data, 1823 size_t count) 1824 { 1825 do { 1826 struct page *page = kmem_to_page(data); 1827 unsigned int off = offset_in_page(data); 1828 size_t len = min_t(size_t, count, PAGE_SIZE - off); 1829 1830 if (bio_add_page(bio, page, len, off) != len) 1831 return -EIO; 1832 1833 data += len; 1834 count -= len; 1835 } while (count); 1836 1837 return 0; 1838 } 1839 1840 STATIC void 1841 xlog_write_iclog( 1842 struct xlog *log, 1843 struct xlog_in_core *iclog, 1844 uint64_t bno, 1845 unsigned int count) 1846 { 1847 ASSERT(bno < log->l_logBBsize); 1848 trace_xlog_iclog_write(iclog, _RET_IP_); 1849 1850 /* 1851 * We lock the iclogbufs here so that we can serialise against I/O 1852 * completion during unmount. We might be processing a shutdown 1853 * triggered during unmount, and that can occur asynchronously to the 1854 * unmount thread, and hence we need to ensure that completes before 1855 * tearing down the iclogbufs. Hence we need to hold the buffer lock 1856 * across the log IO to archieve that. 1857 */ 1858 down(&iclog->ic_sema); 1859 if (xlog_is_shutdown(log)) { 1860 /* 1861 * It would seem logical to return EIO here, but we rely on 1862 * the log state machine to propagate I/O errors instead of 1863 * doing it here. We kick of the state machine and unlock 1864 * the buffer manually, the code needs to be kept in sync 1865 * with the I/O completion path. 1866 */ 1867 xlog_state_done_syncing(iclog); 1868 up(&iclog->ic_sema); 1869 return; 1870 } 1871 1872 /* 1873 * We use REQ_SYNC | REQ_IDLE here to tell the block layer the are more 1874 * IOs coming immediately after this one. This prevents the block layer 1875 * writeback throttle from throttling log writes behind background 1876 * metadata writeback and causing priority inversions. 1877 */ 1878 bio_init(&iclog->ic_bio, log->l_targ->bt_bdev, iclog->ic_bvec, 1879 howmany(count, PAGE_SIZE), 1880 REQ_OP_WRITE | REQ_META | REQ_SYNC | REQ_IDLE); 1881 iclog->ic_bio.bi_iter.bi_sector = log->l_logBBstart + bno; 1882 iclog->ic_bio.bi_end_io = xlog_bio_end_io; 1883 iclog->ic_bio.bi_private = iclog; 1884 1885 if (iclog->ic_flags & XLOG_ICL_NEED_FLUSH) { 1886 iclog->ic_bio.bi_opf |= REQ_PREFLUSH; 1887 /* 1888 * For external log devices, we also need to flush the data 1889 * device cache first to ensure all metadata writeback covered 1890 * by the LSN in this iclog is on stable storage. This is slow, 1891 * but it *must* complete before we issue the external log IO. 1892 */ 1893 if (log->l_targ != log->l_mp->m_ddev_targp) 1894 blkdev_issue_flush(log->l_mp->m_ddev_targp->bt_bdev); 1895 } 1896 if (iclog->ic_flags & XLOG_ICL_NEED_FUA) 1897 iclog->ic_bio.bi_opf |= REQ_FUA; 1898 1899 iclog->ic_flags &= ~(XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA); 1900 1901 if (xlog_map_iclog_data(&iclog->ic_bio, iclog->ic_data, count)) { 1902 xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR); 1903 return; 1904 } 1905 if (is_vmalloc_addr(iclog->ic_data)) 1906 flush_kernel_vmap_range(iclog->ic_data, count); 1907 1908 /* 1909 * If this log buffer would straddle the end of the log we will have 1910 * to split it up into two bios, so that we can continue at the start. 1911 */ 1912 if (bno + BTOBB(count) > log->l_logBBsize) { 1913 struct bio *split; 1914 1915 split = bio_split(&iclog->ic_bio, log->l_logBBsize - bno, 1916 GFP_NOIO, &fs_bio_set); 1917 bio_chain(split, &iclog->ic_bio); 1918 submit_bio(split); 1919 1920 /* restart at logical offset zero for the remainder */ 1921 iclog->ic_bio.bi_iter.bi_sector = log->l_logBBstart; 1922 } 1923 1924 submit_bio(&iclog->ic_bio); 1925 } 1926 1927 /* 1928 * We need to bump cycle number for the part of the iclog that is 1929 * written to the start of the log. Watch out for the header magic 1930 * number case, though. 1931 */ 1932 static void 1933 xlog_split_iclog( 1934 struct xlog *log, 1935 void *data, 1936 uint64_t bno, 1937 unsigned int count) 1938 { 1939 unsigned int split_offset = BBTOB(log->l_logBBsize - bno); 1940 unsigned int i; 1941 1942 for (i = split_offset; i < count; i += BBSIZE) { 1943 uint32_t cycle = get_unaligned_be32(data + i); 1944 1945 if (++cycle == XLOG_HEADER_MAGIC_NUM) 1946 cycle++; 1947 put_unaligned_be32(cycle, data + i); 1948 } 1949 } 1950 1951 static int 1952 xlog_calc_iclog_size( 1953 struct xlog *log, 1954 struct xlog_in_core *iclog, 1955 uint32_t *roundoff) 1956 { 1957 uint32_t count_init, count; 1958 1959 /* Add for LR header */ 1960 count_init = log->l_iclog_hsize + iclog->ic_offset; 1961 count = roundup(count_init, log->l_iclog_roundoff); 1962 1963 *roundoff = count - count_init; 1964 1965 ASSERT(count >= count_init); 1966 ASSERT(*roundoff < log->l_iclog_roundoff); 1967 return count; 1968 } 1969 1970 /* 1971 * Flush out the in-core log (iclog) to the on-disk log in an asynchronous 1972 * fashion. Previously, we should have moved the current iclog 1973 * ptr in the log to point to the next available iclog. This allows further 1974 * write to continue while this code syncs out an iclog ready to go. 1975 * Before an in-core log can be written out, the data section must be scanned 1976 * to save away the 1st word of each BBSIZE block into the header. We replace 1977 * it with the current cycle count. Each BBSIZE block is tagged with the 1978 * cycle count because there in an implicit assumption that drives will 1979 * guarantee that entire 512 byte blocks get written at once. In other words, 1980 * we can't have part of a 512 byte block written and part not written. By 1981 * tagging each block, we will know which blocks are valid when recovering 1982 * after an unclean shutdown. 1983 * 1984 * This routine is single threaded on the iclog. No other thread can be in 1985 * this routine with the same iclog. Changing contents of iclog can there- 1986 * fore be done without grabbing the state machine lock. Updating the global 1987 * log will require grabbing the lock though. 1988 * 1989 * The entire log manager uses a logical block numbering scheme. Only 1990 * xlog_write_iclog knows about the fact that the log may not start with 1991 * block zero on a given device. 1992 */ 1993 STATIC void 1994 xlog_sync( 1995 struct xlog *log, 1996 struct xlog_in_core *iclog) 1997 { 1998 unsigned int count; /* byte count of bwrite */ 1999 unsigned int roundoff; /* roundoff to BB or stripe */ 2000 uint64_t bno; 2001 unsigned int size; 2002 2003 ASSERT(atomic_read(&iclog->ic_refcnt) == 0); 2004 trace_xlog_iclog_sync(iclog, _RET_IP_); 2005 2006 count = xlog_calc_iclog_size(log, iclog, &roundoff); 2007 2008 /* move grant heads by roundoff in sync */ 2009 xlog_grant_add_space(log, &log->l_reserve_head.grant, roundoff); 2010 xlog_grant_add_space(log, &log->l_write_head.grant, roundoff); 2011 2012 /* put cycle number in every block */ 2013 xlog_pack_data(log, iclog, roundoff); 2014 2015 /* real byte length */ 2016 size = iclog->ic_offset; 2017 if (xfs_has_logv2(log->l_mp)) 2018 size += roundoff; 2019 iclog->ic_header.h_len = cpu_to_be32(size); 2020 2021 XFS_STATS_INC(log->l_mp, xs_log_writes); 2022 XFS_STATS_ADD(log->l_mp, xs_log_blocks, BTOBB(count)); 2023 2024 bno = BLOCK_LSN(be64_to_cpu(iclog->ic_header.h_lsn)); 2025 2026 /* Do we need to split this write into 2 parts? */ 2027 if (bno + BTOBB(count) > log->l_logBBsize) 2028 xlog_split_iclog(log, &iclog->ic_header, bno, count); 2029 2030 /* calculcate the checksum */ 2031 iclog->ic_header.h_crc = xlog_cksum(log, &iclog->ic_header, 2032 iclog->ic_datap, size); 2033 /* 2034 * Intentionally corrupt the log record CRC based on the error injection 2035 * frequency, if defined. This facilitates testing log recovery in the 2036 * event of torn writes. Hence, set the IOABORT state to abort the log 2037 * write on I/O completion and shutdown the fs. The subsequent mount 2038 * detects the bad CRC and attempts to recover. 2039 */ 2040 #ifdef DEBUG 2041 if (XFS_TEST_ERROR(false, log->l_mp, XFS_ERRTAG_LOG_BAD_CRC)) { 2042 iclog->ic_header.h_crc &= cpu_to_le32(0xAAAAAAAA); 2043 iclog->ic_fail_crc = true; 2044 xfs_warn(log->l_mp, 2045 "Intentionally corrupted log record at LSN 0x%llx. Shutdown imminent.", 2046 be64_to_cpu(iclog->ic_header.h_lsn)); 2047 } 2048 #endif 2049 xlog_verify_iclog(log, iclog, count); 2050 xlog_write_iclog(log, iclog, bno, count); 2051 } 2052 2053 /* 2054 * Deallocate a log structure 2055 */ 2056 STATIC void 2057 xlog_dealloc_log( 2058 struct xlog *log) 2059 { 2060 xlog_in_core_t *iclog, *next_iclog; 2061 int i; 2062 2063 xlog_cil_destroy(log); 2064 2065 /* 2066 * Cycle all the iclogbuf locks to make sure all log IO completion 2067 * is done before we tear down these buffers. 2068 */ 2069 iclog = log->l_iclog; 2070 for (i = 0; i < log->l_iclog_bufs; i++) { 2071 down(&iclog->ic_sema); 2072 up(&iclog->ic_sema); 2073 iclog = iclog->ic_next; 2074 } 2075 2076 iclog = log->l_iclog; 2077 for (i = 0; i < log->l_iclog_bufs; i++) { 2078 next_iclog = iclog->ic_next; 2079 kmem_free(iclog->ic_data); 2080 kmem_free(iclog); 2081 iclog = next_iclog; 2082 } 2083 2084 log->l_mp->m_log = NULL; 2085 destroy_workqueue(log->l_ioend_workqueue); 2086 kmem_free(log); 2087 } 2088 2089 /* 2090 * Update counters atomically now that memcpy is done. 2091 */ 2092 static inline void 2093 xlog_state_finish_copy( 2094 struct xlog *log, 2095 struct xlog_in_core *iclog, 2096 int record_cnt, 2097 int copy_bytes) 2098 { 2099 lockdep_assert_held(&log->l_icloglock); 2100 2101 be32_add_cpu(&iclog->ic_header.h_num_logops, record_cnt); 2102 iclog->ic_offset += copy_bytes; 2103 } 2104 2105 /* 2106 * print out info relating to regions written which consume 2107 * the reservation 2108 */ 2109 void 2110 xlog_print_tic_res( 2111 struct xfs_mount *mp, 2112 struct xlog_ticket *ticket) 2113 { 2114 uint i; 2115 uint ophdr_spc = ticket->t_res_num_ophdrs * (uint)sizeof(xlog_op_header_t); 2116 2117 /* match with XLOG_REG_TYPE_* in xfs_log.h */ 2118 #define REG_TYPE_STR(type, str) [XLOG_REG_TYPE_##type] = str 2119 static char *res_type_str[] = { 2120 REG_TYPE_STR(BFORMAT, "bformat"), 2121 REG_TYPE_STR(BCHUNK, "bchunk"), 2122 REG_TYPE_STR(EFI_FORMAT, "efi_format"), 2123 REG_TYPE_STR(EFD_FORMAT, "efd_format"), 2124 REG_TYPE_STR(IFORMAT, "iformat"), 2125 REG_TYPE_STR(ICORE, "icore"), 2126 REG_TYPE_STR(IEXT, "iext"), 2127 REG_TYPE_STR(IBROOT, "ibroot"), 2128 REG_TYPE_STR(ILOCAL, "ilocal"), 2129 REG_TYPE_STR(IATTR_EXT, "iattr_ext"), 2130 REG_TYPE_STR(IATTR_BROOT, "iattr_broot"), 2131 REG_TYPE_STR(IATTR_LOCAL, "iattr_local"), 2132 REG_TYPE_STR(QFORMAT, "qformat"), 2133 REG_TYPE_STR(DQUOT, "dquot"), 2134 REG_TYPE_STR(QUOTAOFF, "quotaoff"), 2135 REG_TYPE_STR(LRHEADER, "LR header"), 2136 REG_TYPE_STR(UNMOUNT, "unmount"), 2137 REG_TYPE_STR(COMMIT, "commit"), 2138 REG_TYPE_STR(TRANSHDR, "trans header"), 2139 REG_TYPE_STR(ICREATE, "inode create"), 2140 REG_TYPE_STR(RUI_FORMAT, "rui_format"), 2141 REG_TYPE_STR(RUD_FORMAT, "rud_format"), 2142 REG_TYPE_STR(CUI_FORMAT, "cui_format"), 2143 REG_TYPE_STR(CUD_FORMAT, "cud_format"), 2144 REG_TYPE_STR(BUI_FORMAT, "bui_format"), 2145 REG_TYPE_STR(BUD_FORMAT, "bud_format"), 2146 }; 2147 BUILD_BUG_ON(ARRAY_SIZE(res_type_str) != XLOG_REG_TYPE_MAX + 1); 2148 #undef REG_TYPE_STR 2149 2150 xfs_warn(mp, "ticket reservation summary:"); 2151 xfs_warn(mp, " unit res = %d bytes", 2152 ticket->t_unit_res); 2153 xfs_warn(mp, " current res = %d bytes", 2154 ticket->t_curr_res); 2155 xfs_warn(mp, " total reg = %u bytes (o/flow = %u bytes)", 2156 ticket->t_res_arr_sum, ticket->t_res_o_flow); 2157 xfs_warn(mp, " ophdrs = %u (ophdr space = %u bytes)", 2158 ticket->t_res_num_ophdrs, ophdr_spc); 2159 xfs_warn(mp, " ophdr + reg = %u bytes", 2160 ticket->t_res_arr_sum + ticket->t_res_o_flow + ophdr_spc); 2161 xfs_warn(mp, " num regions = %u", 2162 ticket->t_res_num); 2163 2164 for (i = 0; i < ticket->t_res_num; i++) { 2165 uint r_type = ticket->t_res_arr[i].r_type; 2166 xfs_warn(mp, "region[%u]: %s - %u bytes", i, 2167 ((r_type <= 0 || r_type > XLOG_REG_TYPE_MAX) ? 2168 "bad-rtype" : res_type_str[r_type]), 2169 ticket->t_res_arr[i].r_len); 2170 } 2171 } 2172 2173 /* 2174 * Print a summary of the transaction. 2175 */ 2176 void 2177 xlog_print_trans( 2178 struct xfs_trans *tp) 2179 { 2180 struct xfs_mount *mp = tp->t_mountp; 2181 struct xfs_log_item *lip; 2182 2183 /* dump core transaction and ticket info */ 2184 xfs_warn(mp, "transaction summary:"); 2185 xfs_warn(mp, " log res = %d", tp->t_log_res); 2186 xfs_warn(mp, " log count = %d", tp->t_log_count); 2187 xfs_warn(mp, " flags = 0x%x", tp->t_flags); 2188 2189 xlog_print_tic_res(mp, tp->t_ticket); 2190 2191 /* dump each log item */ 2192 list_for_each_entry(lip, &tp->t_items, li_trans) { 2193 struct xfs_log_vec *lv = lip->li_lv; 2194 struct xfs_log_iovec *vec; 2195 int i; 2196 2197 xfs_warn(mp, "log item: "); 2198 xfs_warn(mp, " type = 0x%x", lip->li_type); 2199 xfs_warn(mp, " flags = 0x%lx", lip->li_flags); 2200 if (!lv) 2201 continue; 2202 xfs_warn(mp, " niovecs = %d", lv->lv_niovecs); 2203 xfs_warn(mp, " size = %d", lv->lv_size); 2204 xfs_warn(mp, " bytes = %d", lv->lv_bytes); 2205 xfs_warn(mp, " buf len = %d", lv->lv_buf_len); 2206 2207 /* dump each iovec for the log item */ 2208 vec = lv->lv_iovecp; 2209 for (i = 0; i < lv->lv_niovecs; i++) { 2210 int dumplen = min(vec->i_len, 32); 2211 2212 xfs_warn(mp, " iovec[%d]", i); 2213 xfs_warn(mp, " type = 0x%x", vec->i_type); 2214 xfs_warn(mp, " len = %d", vec->i_len); 2215 xfs_warn(mp, " first %d bytes of iovec[%d]:", dumplen, i); 2216 xfs_hex_dump(vec->i_addr, dumplen); 2217 2218 vec++; 2219 } 2220 } 2221 } 2222 2223 /* 2224 * Calculate the potential space needed by the log vector. We may need a start 2225 * record, and each region gets its own struct xlog_op_header and may need to be 2226 * double word aligned. 2227 */ 2228 static int 2229 xlog_write_calc_vec_length( 2230 struct xlog_ticket *ticket, 2231 struct xfs_log_vec *log_vector, 2232 uint optype) 2233 { 2234 struct xfs_log_vec *lv; 2235 int headers = 0; 2236 int len = 0; 2237 int i; 2238 2239 if (optype & XLOG_START_TRANS) 2240 headers++; 2241 2242 for (lv = log_vector; lv; lv = lv->lv_next) { 2243 /* we don't write ordered log vectors */ 2244 if (lv->lv_buf_len == XFS_LOG_VEC_ORDERED) 2245 continue; 2246 2247 headers += lv->lv_niovecs; 2248 2249 for (i = 0; i < lv->lv_niovecs; i++) { 2250 struct xfs_log_iovec *vecp = &lv->lv_iovecp[i]; 2251 2252 len += vecp->i_len; 2253 xlog_tic_add_region(ticket, vecp->i_len, vecp->i_type); 2254 } 2255 } 2256 2257 ticket->t_res_num_ophdrs += headers; 2258 len += headers * sizeof(struct xlog_op_header); 2259 2260 return len; 2261 } 2262 2263 static void 2264 xlog_write_start_rec( 2265 struct xlog_op_header *ophdr, 2266 struct xlog_ticket *ticket) 2267 { 2268 ophdr->oh_tid = cpu_to_be32(ticket->t_tid); 2269 ophdr->oh_clientid = ticket->t_clientid; 2270 ophdr->oh_len = 0; 2271 ophdr->oh_flags = XLOG_START_TRANS; 2272 ophdr->oh_res2 = 0; 2273 } 2274 2275 static xlog_op_header_t * 2276 xlog_write_setup_ophdr( 2277 struct xlog *log, 2278 struct xlog_op_header *ophdr, 2279 struct xlog_ticket *ticket, 2280 uint flags) 2281 { 2282 ophdr->oh_tid = cpu_to_be32(ticket->t_tid); 2283 ophdr->oh_clientid = ticket->t_clientid; 2284 ophdr->oh_res2 = 0; 2285 2286 /* are we copying a commit or unmount record? */ 2287 ophdr->oh_flags = flags; 2288 2289 /* 2290 * We've seen logs corrupted with bad transaction client ids. This 2291 * makes sure that XFS doesn't generate them on. Turn this into an EIO 2292 * and shut down the filesystem. 2293 */ 2294 switch (ophdr->oh_clientid) { 2295 case XFS_TRANSACTION: 2296 case XFS_VOLUME: 2297 case XFS_LOG: 2298 break; 2299 default: 2300 xfs_warn(log->l_mp, 2301 "Bad XFS transaction clientid 0x%x in ticket "PTR_FMT, 2302 ophdr->oh_clientid, ticket); 2303 return NULL; 2304 } 2305 2306 return ophdr; 2307 } 2308 2309 /* 2310 * Set up the parameters of the region copy into the log. This has 2311 * to handle region write split across multiple log buffers - this 2312 * state is kept external to this function so that this code can 2313 * be written in an obvious, self documenting manner. 2314 */ 2315 static int 2316 xlog_write_setup_copy( 2317 struct xlog_ticket *ticket, 2318 struct xlog_op_header *ophdr, 2319 int space_available, 2320 int space_required, 2321 int *copy_off, 2322 int *copy_len, 2323 int *last_was_partial_copy, 2324 int *bytes_consumed) 2325 { 2326 int still_to_copy; 2327 2328 still_to_copy = space_required - *bytes_consumed; 2329 *copy_off = *bytes_consumed; 2330 2331 if (still_to_copy <= space_available) { 2332 /* write of region completes here */ 2333 *copy_len = still_to_copy; 2334 ophdr->oh_len = cpu_to_be32(*copy_len); 2335 if (*last_was_partial_copy) 2336 ophdr->oh_flags |= (XLOG_END_TRANS|XLOG_WAS_CONT_TRANS); 2337 *last_was_partial_copy = 0; 2338 *bytes_consumed = 0; 2339 return 0; 2340 } 2341 2342 /* partial write of region, needs extra log op header reservation */ 2343 *copy_len = space_available; 2344 ophdr->oh_len = cpu_to_be32(*copy_len); 2345 ophdr->oh_flags |= XLOG_CONTINUE_TRANS; 2346 if (*last_was_partial_copy) 2347 ophdr->oh_flags |= XLOG_WAS_CONT_TRANS; 2348 *bytes_consumed += *copy_len; 2349 (*last_was_partial_copy)++; 2350 2351 /* account for new log op header */ 2352 ticket->t_curr_res -= sizeof(struct xlog_op_header); 2353 ticket->t_res_num_ophdrs++; 2354 2355 return sizeof(struct xlog_op_header); 2356 } 2357 2358 static int 2359 xlog_write_copy_finish( 2360 struct xlog *log, 2361 struct xlog_in_core *iclog, 2362 uint flags, 2363 int *record_cnt, 2364 int *data_cnt, 2365 int *partial_copy, 2366 int *partial_copy_len, 2367 int log_offset) 2368 { 2369 int error; 2370 2371 if (*partial_copy) { 2372 /* 2373 * This iclog has already been marked WANT_SYNC by 2374 * xlog_state_get_iclog_space. 2375 */ 2376 spin_lock(&log->l_icloglock); 2377 xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt); 2378 *record_cnt = 0; 2379 *data_cnt = 0; 2380 goto release_iclog; 2381 } 2382 2383 *partial_copy = 0; 2384 *partial_copy_len = 0; 2385 2386 if (iclog->ic_size - log_offset > sizeof(xlog_op_header_t)) 2387 return 0; 2388 2389 /* no more space in this iclog - push it. */ 2390 spin_lock(&log->l_icloglock); 2391 xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt); 2392 *record_cnt = 0; 2393 *data_cnt = 0; 2394 2395 if (iclog->ic_state == XLOG_STATE_ACTIVE) 2396 xlog_state_switch_iclogs(log, iclog, 0); 2397 else 2398 ASSERT(iclog->ic_state == XLOG_STATE_WANT_SYNC || 2399 xlog_is_shutdown(log)); 2400 release_iclog: 2401 error = xlog_state_release_iclog(log, iclog); 2402 spin_unlock(&log->l_icloglock); 2403 return error; 2404 } 2405 2406 /* 2407 * Write some region out to in-core log 2408 * 2409 * This will be called when writing externally provided regions or when 2410 * writing out a commit record for a given transaction. 2411 * 2412 * General algorithm: 2413 * 1. Find total length of this write. This may include adding to the 2414 * lengths passed in. 2415 * 2. Check whether we violate the tickets reservation. 2416 * 3. While writing to this iclog 2417 * A. Reserve as much space in this iclog as can get 2418 * B. If this is first write, save away start lsn 2419 * C. While writing this region: 2420 * 1. If first write of transaction, write start record 2421 * 2. Write log operation header (header per region) 2422 * 3. Find out if we can fit entire region into this iclog 2423 * 4. Potentially, verify destination memcpy ptr 2424 * 5. Memcpy (partial) region 2425 * 6. If partial copy, release iclog; otherwise, continue 2426 * copying more regions into current iclog 2427 * 4. Mark want sync bit (in simulation mode) 2428 * 5. Release iclog for potential flush to on-disk log. 2429 * 2430 * ERRORS: 2431 * 1. Panic if reservation is overrun. This should never happen since 2432 * reservation amounts are generated internal to the filesystem. 2433 * NOTES: 2434 * 1. Tickets are single threaded data structures. 2435 * 2. The XLOG_END_TRANS & XLOG_CONTINUE_TRANS flags are passed down to the 2436 * syncing routine. When a single log_write region needs to span 2437 * multiple in-core logs, the XLOG_CONTINUE_TRANS bit should be set 2438 * on all log operation writes which don't contain the end of the 2439 * region. The XLOG_END_TRANS bit is used for the in-core log 2440 * operation which contains the end of the continued log_write region. 2441 * 3. When xlog_state_get_iclog_space() grabs the rest of the current iclog, 2442 * we don't really know exactly how much space will be used. As a result, 2443 * we don't update ic_offset until the end when we know exactly how many 2444 * bytes have been written out. 2445 */ 2446 int 2447 xlog_write( 2448 struct xlog *log, 2449 struct xfs_cil_ctx *ctx, 2450 struct xfs_log_vec *log_vector, 2451 struct xlog_ticket *ticket, 2452 uint optype) 2453 { 2454 struct xlog_in_core *iclog = NULL; 2455 struct xfs_log_vec *lv = log_vector; 2456 struct xfs_log_iovec *vecp = lv->lv_iovecp; 2457 int index = 0; 2458 int len; 2459 int partial_copy = 0; 2460 int partial_copy_len = 0; 2461 int contwr = 0; 2462 int record_cnt = 0; 2463 int data_cnt = 0; 2464 int error = 0; 2465 2466 /* 2467 * If this is a commit or unmount transaction, we don't need a start 2468 * record to be written. We do, however, have to account for the 2469 * commit or unmount header that gets written. Hence we always have 2470 * to account for an extra xlog_op_header here. 2471 */ 2472 ticket->t_curr_res -= sizeof(struct xlog_op_header); 2473 if (ticket->t_curr_res < 0) { 2474 xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES, 2475 "ctx ticket reservation ran out. Need to up reservation"); 2476 xlog_print_tic_res(log->l_mp, ticket); 2477 xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR); 2478 } 2479 2480 len = xlog_write_calc_vec_length(ticket, log_vector, optype); 2481 while (lv && (!lv->lv_niovecs || index < lv->lv_niovecs)) { 2482 void *ptr; 2483 int log_offset; 2484 2485 error = xlog_state_get_iclog_space(log, len, &iclog, ticket, 2486 &contwr, &log_offset); 2487 if (error) 2488 return error; 2489 2490 ASSERT(log_offset <= iclog->ic_size - 1); 2491 ptr = iclog->ic_datap + log_offset; 2492 2493 /* 2494 * If we have a context pointer, pass it the first iclog we are 2495 * writing to so it can record state needed for iclog write 2496 * ordering. 2497 */ 2498 if (ctx) { 2499 xlog_cil_set_ctx_write_state(ctx, iclog); 2500 ctx = NULL; 2501 } 2502 2503 /* 2504 * This loop writes out as many regions as can fit in the amount 2505 * of space which was allocated by xlog_state_get_iclog_space(). 2506 */ 2507 while (lv && (!lv->lv_niovecs || index < lv->lv_niovecs)) { 2508 struct xfs_log_iovec *reg; 2509 struct xlog_op_header *ophdr; 2510 int copy_len; 2511 int copy_off; 2512 bool ordered = false; 2513 bool wrote_start_rec = false; 2514 2515 /* ordered log vectors have no regions to write */ 2516 if (lv->lv_buf_len == XFS_LOG_VEC_ORDERED) { 2517 ASSERT(lv->lv_niovecs == 0); 2518 ordered = true; 2519 goto next_lv; 2520 } 2521 2522 reg = &vecp[index]; 2523 ASSERT(reg->i_len % sizeof(int32_t) == 0); 2524 ASSERT((unsigned long)ptr % sizeof(int32_t) == 0); 2525 2526 /* 2527 * Before we start formatting log vectors, we need to 2528 * write a start record. Only do this for the first 2529 * iclog we write to. 2530 */ 2531 if (optype & XLOG_START_TRANS) { 2532 xlog_write_start_rec(ptr, ticket); 2533 xlog_write_adv_cnt(&ptr, &len, &log_offset, 2534 sizeof(struct xlog_op_header)); 2535 optype &= ~XLOG_START_TRANS; 2536 wrote_start_rec = true; 2537 } 2538 2539 ophdr = xlog_write_setup_ophdr(log, ptr, ticket, optype); 2540 if (!ophdr) 2541 return -EIO; 2542 2543 xlog_write_adv_cnt(&ptr, &len, &log_offset, 2544 sizeof(struct xlog_op_header)); 2545 2546 len += xlog_write_setup_copy(ticket, ophdr, 2547 iclog->ic_size-log_offset, 2548 reg->i_len, 2549 ©_off, ©_len, 2550 &partial_copy, 2551 &partial_copy_len); 2552 xlog_verify_dest_ptr(log, ptr); 2553 2554 /* 2555 * Copy region. 2556 * 2557 * Unmount records just log an opheader, so can have 2558 * empty payloads with no data region to copy. Hence we 2559 * only copy the payload if the vector says it has data 2560 * to copy. 2561 */ 2562 ASSERT(copy_len >= 0); 2563 if (copy_len > 0) { 2564 memcpy(ptr, reg->i_addr + copy_off, copy_len); 2565 xlog_write_adv_cnt(&ptr, &len, &log_offset, 2566 copy_len); 2567 } 2568 copy_len += sizeof(struct xlog_op_header); 2569 record_cnt++; 2570 if (wrote_start_rec) { 2571 copy_len += sizeof(struct xlog_op_header); 2572 record_cnt++; 2573 } 2574 data_cnt += contwr ? copy_len : 0; 2575 2576 error = xlog_write_copy_finish(log, iclog, optype, 2577 &record_cnt, &data_cnt, 2578 &partial_copy, 2579 &partial_copy_len, 2580 log_offset); 2581 if (error) 2582 return error; 2583 2584 /* 2585 * if we had a partial copy, we need to get more iclog 2586 * space but we don't want to increment the region 2587 * index because there is still more is this region to 2588 * write. 2589 * 2590 * If we completed writing this region, and we flushed 2591 * the iclog (indicated by resetting of the record 2592 * count), then we also need to get more log space. If 2593 * this was the last record, though, we are done and 2594 * can just return. 2595 */ 2596 if (partial_copy) 2597 break; 2598 2599 if (++index == lv->lv_niovecs) { 2600 next_lv: 2601 lv = lv->lv_next; 2602 index = 0; 2603 if (lv) 2604 vecp = lv->lv_iovecp; 2605 } 2606 if (record_cnt == 0 && !ordered) { 2607 if (!lv) 2608 return 0; 2609 break; 2610 } 2611 } 2612 } 2613 2614 ASSERT(len == 0); 2615 2616 spin_lock(&log->l_icloglock); 2617 xlog_state_finish_copy(log, iclog, record_cnt, data_cnt); 2618 error = xlog_state_release_iclog(log, iclog); 2619 spin_unlock(&log->l_icloglock); 2620 2621 return error; 2622 } 2623 2624 static void 2625 xlog_state_activate_iclog( 2626 struct xlog_in_core *iclog, 2627 int *iclogs_changed) 2628 { 2629 ASSERT(list_empty_careful(&iclog->ic_callbacks)); 2630 trace_xlog_iclog_activate(iclog, _RET_IP_); 2631 2632 /* 2633 * If the number of ops in this iclog indicate it just contains the 2634 * dummy transaction, we can change state into IDLE (the second time 2635 * around). Otherwise we should change the state into NEED a dummy. 2636 * We don't need to cover the dummy. 2637 */ 2638 if (*iclogs_changed == 0 && 2639 iclog->ic_header.h_num_logops == cpu_to_be32(XLOG_COVER_OPS)) { 2640 *iclogs_changed = 1; 2641 } else { 2642 /* 2643 * We have two dirty iclogs so start over. This could also be 2644 * num of ops indicating this is not the dummy going out. 2645 */ 2646 *iclogs_changed = 2; 2647 } 2648 2649 iclog->ic_state = XLOG_STATE_ACTIVE; 2650 iclog->ic_offset = 0; 2651 iclog->ic_header.h_num_logops = 0; 2652 memset(iclog->ic_header.h_cycle_data, 0, 2653 sizeof(iclog->ic_header.h_cycle_data)); 2654 iclog->ic_header.h_lsn = 0; 2655 iclog->ic_header.h_tail_lsn = 0; 2656 } 2657 2658 /* 2659 * Loop through all iclogs and mark all iclogs currently marked DIRTY as 2660 * ACTIVE after iclog I/O has completed. 2661 */ 2662 static void 2663 xlog_state_activate_iclogs( 2664 struct xlog *log, 2665 int *iclogs_changed) 2666 { 2667 struct xlog_in_core *iclog = log->l_iclog; 2668 2669 do { 2670 if (iclog->ic_state == XLOG_STATE_DIRTY) 2671 xlog_state_activate_iclog(iclog, iclogs_changed); 2672 /* 2673 * The ordering of marking iclogs ACTIVE must be maintained, so 2674 * an iclog doesn't become ACTIVE beyond one that is SYNCING. 2675 */ 2676 else if (iclog->ic_state != XLOG_STATE_ACTIVE) 2677 break; 2678 } while ((iclog = iclog->ic_next) != log->l_iclog); 2679 } 2680 2681 static int 2682 xlog_covered_state( 2683 int prev_state, 2684 int iclogs_changed) 2685 { 2686 /* 2687 * We go to NEED for any non-covering writes. We go to NEED2 if we just 2688 * wrote the first covering record (DONE). We go to IDLE if we just 2689 * wrote the second covering record (DONE2) and remain in IDLE until a 2690 * non-covering write occurs. 2691 */ 2692 switch (prev_state) { 2693 case XLOG_STATE_COVER_IDLE: 2694 if (iclogs_changed == 1) 2695 return XLOG_STATE_COVER_IDLE; 2696 fallthrough; 2697 case XLOG_STATE_COVER_NEED: 2698 case XLOG_STATE_COVER_NEED2: 2699 break; 2700 case XLOG_STATE_COVER_DONE: 2701 if (iclogs_changed == 1) 2702 return XLOG_STATE_COVER_NEED2; 2703 break; 2704 case XLOG_STATE_COVER_DONE2: 2705 if (iclogs_changed == 1) 2706 return XLOG_STATE_COVER_IDLE; 2707 break; 2708 default: 2709 ASSERT(0); 2710 } 2711 2712 return XLOG_STATE_COVER_NEED; 2713 } 2714 2715 STATIC void 2716 xlog_state_clean_iclog( 2717 struct xlog *log, 2718 struct xlog_in_core *dirty_iclog) 2719 { 2720 int iclogs_changed = 0; 2721 2722 trace_xlog_iclog_clean(dirty_iclog, _RET_IP_); 2723 2724 dirty_iclog->ic_state = XLOG_STATE_DIRTY; 2725 2726 xlog_state_activate_iclogs(log, &iclogs_changed); 2727 wake_up_all(&dirty_iclog->ic_force_wait); 2728 2729 if (iclogs_changed) { 2730 log->l_covered_state = xlog_covered_state(log->l_covered_state, 2731 iclogs_changed); 2732 } 2733 } 2734 2735 STATIC xfs_lsn_t 2736 xlog_get_lowest_lsn( 2737 struct xlog *log) 2738 { 2739 struct xlog_in_core *iclog = log->l_iclog; 2740 xfs_lsn_t lowest_lsn = 0, lsn; 2741 2742 do { 2743 if (iclog->ic_state == XLOG_STATE_ACTIVE || 2744 iclog->ic_state == XLOG_STATE_DIRTY) 2745 continue; 2746 2747 lsn = be64_to_cpu(iclog->ic_header.h_lsn); 2748 if ((lsn && !lowest_lsn) || XFS_LSN_CMP(lsn, lowest_lsn) < 0) 2749 lowest_lsn = lsn; 2750 } while ((iclog = iclog->ic_next) != log->l_iclog); 2751 2752 return lowest_lsn; 2753 } 2754 2755 /* 2756 * Completion of a iclog IO does not imply that a transaction has completed, as 2757 * transactions can be large enough to span many iclogs. We cannot change the 2758 * tail of the log half way through a transaction as this may be the only 2759 * transaction in the log and moving the tail to point to the middle of it 2760 * will prevent recovery from finding the start of the transaction. Hence we 2761 * should only update the last_sync_lsn if this iclog contains transaction 2762 * completion callbacks on it. 2763 * 2764 * We have to do this before we drop the icloglock to ensure we are the only one 2765 * that can update it. 2766 * 2767 * If we are moving the last_sync_lsn forwards, we also need to ensure we kick 2768 * the reservation grant head pushing. This is due to the fact that the push 2769 * target is bound by the current last_sync_lsn value. Hence if we have a large 2770 * amount of log space bound up in this committing transaction then the 2771 * last_sync_lsn value may be the limiting factor preventing tail pushing from 2772 * freeing space in the log. Hence once we've updated the last_sync_lsn we 2773 * should push the AIL to ensure the push target (and hence the grant head) is 2774 * no longer bound by the old log head location and can move forwards and make 2775 * progress again. 2776 */ 2777 static void 2778 xlog_state_set_callback( 2779 struct xlog *log, 2780 struct xlog_in_core *iclog, 2781 xfs_lsn_t header_lsn) 2782 { 2783 trace_xlog_iclog_callback(iclog, _RET_IP_); 2784 iclog->ic_state = XLOG_STATE_CALLBACK; 2785 2786 ASSERT(XFS_LSN_CMP(atomic64_read(&log->l_last_sync_lsn), 2787 header_lsn) <= 0); 2788 2789 if (list_empty_careful(&iclog->ic_callbacks)) 2790 return; 2791 2792 atomic64_set(&log->l_last_sync_lsn, header_lsn); 2793 xlog_grant_push_ail(log, 0); 2794 } 2795 2796 /* 2797 * Return true if we need to stop processing, false to continue to the next 2798 * iclog. The caller will need to run callbacks if the iclog is returned in the 2799 * XLOG_STATE_CALLBACK state. 2800 */ 2801 static bool 2802 xlog_state_iodone_process_iclog( 2803 struct xlog *log, 2804 struct xlog_in_core *iclog) 2805 { 2806 xfs_lsn_t lowest_lsn; 2807 xfs_lsn_t header_lsn; 2808 2809 switch (iclog->ic_state) { 2810 case XLOG_STATE_ACTIVE: 2811 case XLOG_STATE_DIRTY: 2812 /* 2813 * Skip all iclogs in the ACTIVE & DIRTY states: 2814 */ 2815 return false; 2816 case XLOG_STATE_DONE_SYNC: 2817 /* 2818 * Now that we have an iclog that is in the DONE_SYNC state, do 2819 * one more check here to see if we have chased our tail around. 2820 * If this is not the lowest lsn iclog, then we will leave it 2821 * for another completion to process. 2822 */ 2823 header_lsn = be64_to_cpu(iclog->ic_header.h_lsn); 2824 lowest_lsn = xlog_get_lowest_lsn(log); 2825 if (lowest_lsn && XFS_LSN_CMP(lowest_lsn, header_lsn) < 0) 2826 return false; 2827 xlog_state_set_callback(log, iclog, header_lsn); 2828 return false; 2829 default: 2830 /* 2831 * Can only perform callbacks in order. Since this iclog is not 2832 * in the DONE_SYNC state, we skip the rest and just try to 2833 * clean up. 2834 */ 2835 return true; 2836 } 2837 } 2838 2839 /* 2840 * Loop over all the iclogs, running attached callbacks on them. Return true if 2841 * we ran any callbacks, indicating that we dropped the icloglock. We don't need 2842 * to handle transient shutdown state here at all because 2843 * xlog_state_shutdown_callbacks() will be run to do the necessary shutdown 2844 * cleanup of the callbacks. 2845 */ 2846 static bool 2847 xlog_state_do_iclog_callbacks( 2848 struct xlog *log) 2849 __releases(&log->l_icloglock) 2850 __acquires(&log->l_icloglock) 2851 { 2852 struct xlog_in_core *first_iclog = log->l_iclog; 2853 struct xlog_in_core *iclog = first_iclog; 2854 bool ran_callback = false; 2855 2856 do { 2857 LIST_HEAD(cb_list); 2858 2859 if (xlog_state_iodone_process_iclog(log, iclog)) 2860 break; 2861 if (iclog->ic_state != XLOG_STATE_CALLBACK) { 2862 iclog = iclog->ic_next; 2863 continue; 2864 } 2865 list_splice_init(&iclog->ic_callbacks, &cb_list); 2866 spin_unlock(&log->l_icloglock); 2867 2868 trace_xlog_iclog_callbacks_start(iclog, _RET_IP_); 2869 xlog_cil_process_committed(&cb_list); 2870 trace_xlog_iclog_callbacks_done(iclog, _RET_IP_); 2871 ran_callback = true; 2872 2873 spin_lock(&log->l_icloglock); 2874 xlog_state_clean_iclog(log, iclog); 2875 iclog = iclog->ic_next; 2876 } while (iclog != first_iclog); 2877 2878 return ran_callback; 2879 } 2880 2881 2882 /* 2883 * Loop running iclog completion callbacks until there are no more iclogs in a 2884 * state that can run callbacks. 2885 */ 2886 STATIC void 2887 xlog_state_do_callback( 2888 struct xlog *log) 2889 { 2890 int flushcnt = 0; 2891 int repeats = 0; 2892 2893 spin_lock(&log->l_icloglock); 2894 while (xlog_state_do_iclog_callbacks(log)) { 2895 if (xlog_is_shutdown(log)) 2896 break; 2897 2898 if (++repeats > 5000) { 2899 flushcnt += repeats; 2900 repeats = 0; 2901 xfs_warn(log->l_mp, 2902 "%s: possible infinite loop (%d iterations)", 2903 __func__, flushcnt); 2904 } 2905 } 2906 2907 if (log->l_iclog->ic_state == XLOG_STATE_ACTIVE) 2908 wake_up_all(&log->l_flush_wait); 2909 2910 spin_unlock(&log->l_icloglock); 2911 } 2912 2913 2914 /* 2915 * Finish transitioning this iclog to the dirty state. 2916 * 2917 * Callbacks could take time, so they are done outside the scope of the 2918 * global state machine log lock. 2919 */ 2920 STATIC void 2921 xlog_state_done_syncing( 2922 struct xlog_in_core *iclog) 2923 { 2924 struct xlog *log = iclog->ic_log; 2925 2926 spin_lock(&log->l_icloglock); 2927 ASSERT(atomic_read(&iclog->ic_refcnt) == 0); 2928 trace_xlog_iclog_sync_done(iclog, _RET_IP_); 2929 2930 /* 2931 * If we got an error, either on the first buffer, or in the case of 2932 * split log writes, on the second, we shut down the file system and 2933 * no iclogs should ever be attempted to be written to disk again. 2934 */ 2935 if (!xlog_is_shutdown(log)) { 2936 ASSERT(iclog->ic_state == XLOG_STATE_SYNCING); 2937 iclog->ic_state = XLOG_STATE_DONE_SYNC; 2938 } 2939 2940 /* 2941 * Someone could be sleeping prior to writing out the next 2942 * iclog buffer, we wake them all, one will get to do the 2943 * I/O, the others get to wait for the result. 2944 */ 2945 wake_up_all(&iclog->ic_write_wait); 2946 spin_unlock(&log->l_icloglock); 2947 xlog_state_do_callback(log); 2948 } 2949 2950 /* 2951 * If the head of the in-core log ring is not (ACTIVE or DIRTY), then we must 2952 * sleep. We wait on the flush queue on the head iclog as that should be 2953 * the first iclog to complete flushing. Hence if all iclogs are syncing, 2954 * we will wait here and all new writes will sleep until a sync completes. 2955 * 2956 * The in-core logs are used in a circular fashion. They are not used 2957 * out-of-order even when an iclog past the head is free. 2958 * 2959 * return: 2960 * * log_offset where xlog_write() can start writing into the in-core 2961 * log's data space. 2962 * * in-core log pointer to which xlog_write() should write. 2963 * * boolean indicating this is a continued write to an in-core log. 2964 * If this is the last write, then the in-core log's offset field 2965 * needs to be incremented, depending on the amount of data which 2966 * is copied. 2967 */ 2968 STATIC int 2969 xlog_state_get_iclog_space( 2970 struct xlog *log, 2971 int len, 2972 struct xlog_in_core **iclogp, 2973 struct xlog_ticket *ticket, 2974 int *continued_write, 2975 int *logoffsetp) 2976 { 2977 int log_offset; 2978 xlog_rec_header_t *head; 2979 xlog_in_core_t *iclog; 2980 2981 restart: 2982 spin_lock(&log->l_icloglock); 2983 if (xlog_is_shutdown(log)) { 2984 spin_unlock(&log->l_icloglock); 2985 return -EIO; 2986 } 2987 2988 iclog = log->l_iclog; 2989 if (iclog->ic_state != XLOG_STATE_ACTIVE) { 2990 XFS_STATS_INC(log->l_mp, xs_log_noiclogs); 2991 2992 /* Wait for log writes to have flushed */ 2993 xlog_wait(&log->l_flush_wait, &log->l_icloglock); 2994 goto restart; 2995 } 2996 2997 head = &iclog->ic_header; 2998 2999 atomic_inc(&iclog->ic_refcnt); /* prevents sync */ 3000 log_offset = iclog->ic_offset; 3001 3002 trace_xlog_iclog_get_space(iclog, _RET_IP_); 3003 3004 /* On the 1st write to an iclog, figure out lsn. This works 3005 * if iclogs marked XLOG_STATE_WANT_SYNC always write out what they are 3006 * committing to. If the offset is set, that's how many blocks 3007 * must be written. 3008 */ 3009 if (log_offset == 0) { 3010 ticket->t_curr_res -= log->l_iclog_hsize; 3011 xlog_tic_add_region(ticket, 3012 log->l_iclog_hsize, 3013 XLOG_REG_TYPE_LRHEADER); 3014 head->h_cycle = cpu_to_be32(log->l_curr_cycle); 3015 head->h_lsn = cpu_to_be64( 3016 xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block)); 3017 ASSERT(log->l_curr_block >= 0); 3018 } 3019 3020 /* If there is enough room to write everything, then do it. Otherwise, 3021 * claim the rest of the region and make sure the XLOG_STATE_WANT_SYNC 3022 * bit is on, so this will get flushed out. Don't update ic_offset 3023 * until you know exactly how many bytes get copied. Therefore, wait 3024 * until later to update ic_offset. 3025 * 3026 * xlog_write() algorithm assumes that at least 2 xlog_op_header_t's 3027 * can fit into remaining data section. 3028 */ 3029 if (iclog->ic_size - iclog->ic_offset < 2*sizeof(xlog_op_header_t)) { 3030 int error = 0; 3031 3032 xlog_state_switch_iclogs(log, iclog, iclog->ic_size); 3033 3034 /* 3035 * If we are the only one writing to this iclog, sync it to 3036 * disk. We need to do an atomic compare and decrement here to 3037 * avoid racing with concurrent atomic_dec_and_lock() calls in 3038 * xlog_state_release_iclog() when there is more than one 3039 * reference to the iclog. 3040 */ 3041 if (!atomic_add_unless(&iclog->ic_refcnt, -1, 1)) 3042 error = xlog_state_release_iclog(log, iclog); 3043 spin_unlock(&log->l_icloglock); 3044 if (error) 3045 return error; 3046 goto restart; 3047 } 3048 3049 /* Do we have enough room to write the full amount in the remainder 3050 * of this iclog? Or must we continue a write on the next iclog and 3051 * mark this iclog as completely taken? In the case where we switch 3052 * iclogs (to mark it taken), this particular iclog will release/sync 3053 * to disk in xlog_write(). 3054 */ 3055 if (len <= iclog->ic_size - iclog->ic_offset) { 3056 *continued_write = 0; 3057 iclog->ic_offset += len; 3058 } else { 3059 *continued_write = 1; 3060 xlog_state_switch_iclogs(log, iclog, iclog->ic_size); 3061 } 3062 *iclogp = iclog; 3063 3064 ASSERT(iclog->ic_offset <= iclog->ic_size); 3065 spin_unlock(&log->l_icloglock); 3066 3067 *logoffsetp = log_offset; 3068 return 0; 3069 } 3070 3071 /* 3072 * The first cnt-1 times a ticket goes through here we don't need to move the 3073 * grant write head because the permanent reservation has reserved cnt times the 3074 * unit amount. Release part of current permanent unit reservation and reset 3075 * current reservation to be one units worth. Also move grant reservation head 3076 * forward. 3077 */ 3078 void 3079 xfs_log_ticket_regrant( 3080 struct xlog *log, 3081 struct xlog_ticket *ticket) 3082 { 3083 trace_xfs_log_ticket_regrant(log, ticket); 3084 3085 if (ticket->t_cnt > 0) 3086 ticket->t_cnt--; 3087 3088 xlog_grant_sub_space(log, &log->l_reserve_head.grant, 3089 ticket->t_curr_res); 3090 xlog_grant_sub_space(log, &log->l_write_head.grant, 3091 ticket->t_curr_res); 3092 ticket->t_curr_res = ticket->t_unit_res; 3093 xlog_tic_reset_res(ticket); 3094 3095 trace_xfs_log_ticket_regrant_sub(log, ticket); 3096 3097 /* just return if we still have some of the pre-reserved space */ 3098 if (!ticket->t_cnt) { 3099 xlog_grant_add_space(log, &log->l_reserve_head.grant, 3100 ticket->t_unit_res); 3101 trace_xfs_log_ticket_regrant_exit(log, ticket); 3102 3103 ticket->t_curr_res = ticket->t_unit_res; 3104 xlog_tic_reset_res(ticket); 3105 } 3106 3107 xfs_log_ticket_put(ticket); 3108 } 3109 3110 /* 3111 * Give back the space left from a reservation. 3112 * 3113 * All the information we need to make a correct determination of space left 3114 * is present. For non-permanent reservations, things are quite easy. The 3115 * count should have been decremented to zero. We only need to deal with the 3116 * space remaining in the current reservation part of the ticket. If the 3117 * ticket contains a permanent reservation, there may be left over space which 3118 * needs to be released. A count of N means that N-1 refills of the current 3119 * reservation can be done before we need to ask for more space. The first 3120 * one goes to fill up the first current reservation. Once we run out of 3121 * space, the count will stay at zero and the only space remaining will be 3122 * in the current reservation field. 3123 */ 3124 void 3125 xfs_log_ticket_ungrant( 3126 struct xlog *log, 3127 struct xlog_ticket *ticket) 3128 { 3129 int bytes; 3130 3131 trace_xfs_log_ticket_ungrant(log, ticket); 3132 3133 if (ticket->t_cnt > 0) 3134 ticket->t_cnt--; 3135 3136 trace_xfs_log_ticket_ungrant_sub(log, ticket); 3137 3138 /* 3139 * If this is a permanent reservation ticket, we may be able to free 3140 * up more space based on the remaining count. 3141 */ 3142 bytes = ticket->t_curr_res; 3143 if (ticket->t_cnt > 0) { 3144 ASSERT(ticket->t_flags & XLOG_TIC_PERM_RESERV); 3145 bytes += ticket->t_unit_res*ticket->t_cnt; 3146 } 3147 3148 xlog_grant_sub_space(log, &log->l_reserve_head.grant, bytes); 3149 xlog_grant_sub_space(log, &log->l_write_head.grant, bytes); 3150 3151 trace_xfs_log_ticket_ungrant_exit(log, ticket); 3152 3153 xfs_log_space_wake(log->l_mp); 3154 xfs_log_ticket_put(ticket); 3155 } 3156 3157 /* 3158 * This routine will mark the current iclog in the ring as WANT_SYNC and move 3159 * the current iclog pointer to the next iclog in the ring. 3160 */ 3161 void 3162 xlog_state_switch_iclogs( 3163 struct xlog *log, 3164 struct xlog_in_core *iclog, 3165 int eventual_size) 3166 { 3167 ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE); 3168 assert_spin_locked(&log->l_icloglock); 3169 trace_xlog_iclog_switch(iclog, _RET_IP_); 3170 3171 if (!eventual_size) 3172 eventual_size = iclog->ic_offset; 3173 iclog->ic_state = XLOG_STATE_WANT_SYNC; 3174 iclog->ic_header.h_prev_block = cpu_to_be32(log->l_prev_block); 3175 log->l_prev_block = log->l_curr_block; 3176 log->l_prev_cycle = log->l_curr_cycle; 3177 3178 /* roll log?: ic_offset changed later */ 3179 log->l_curr_block += BTOBB(eventual_size)+BTOBB(log->l_iclog_hsize); 3180 3181 /* Round up to next log-sunit */ 3182 if (log->l_iclog_roundoff > BBSIZE) { 3183 uint32_t sunit_bb = BTOBB(log->l_iclog_roundoff); 3184 log->l_curr_block = roundup(log->l_curr_block, sunit_bb); 3185 } 3186 3187 if (log->l_curr_block >= log->l_logBBsize) { 3188 /* 3189 * Rewind the current block before the cycle is bumped to make 3190 * sure that the combined LSN never transiently moves forward 3191 * when the log wraps to the next cycle. This is to support the 3192 * unlocked sample of these fields from xlog_valid_lsn(). Most 3193 * other cases should acquire l_icloglock. 3194 */ 3195 log->l_curr_block -= log->l_logBBsize; 3196 ASSERT(log->l_curr_block >= 0); 3197 smp_wmb(); 3198 log->l_curr_cycle++; 3199 if (log->l_curr_cycle == XLOG_HEADER_MAGIC_NUM) 3200 log->l_curr_cycle++; 3201 } 3202 ASSERT(iclog == log->l_iclog); 3203 log->l_iclog = iclog->ic_next; 3204 } 3205 3206 /* 3207 * Force the iclog to disk and check if the iclog has been completed before 3208 * xlog_force_iclog() returns. This can happen on synchronous (e.g. 3209 * pmem) or fast async storage because we drop the icloglock to issue the IO. 3210 * If completion has already occurred, tell the caller so that it can avoid an 3211 * unnecessary wait on the iclog. 3212 */ 3213 static int 3214 xlog_force_and_check_iclog( 3215 struct xlog_in_core *iclog, 3216 bool *completed) 3217 { 3218 xfs_lsn_t lsn = be64_to_cpu(iclog->ic_header.h_lsn); 3219 int error; 3220 3221 *completed = false; 3222 error = xlog_force_iclog(iclog); 3223 if (error) 3224 return error; 3225 3226 /* 3227 * If the iclog has already been completed and reused the header LSN 3228 * will have been rewritten by completion 3229 */ 3230 if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn) 3231 *completed = true; 3232 return 0; 3233 } 3234 3235 /* 3236 * Write out all data in the in-core log as of this exact moment in time. 3237 * 3238 * Data may be written to the in-core log during this call. However, 3239 * we don't guarantee this data will be written out. A change from past 3240 * implementation means this routine will *not* write out zero length LRs. 3241 * 3242 * Basically, we try and perform an intelligent scan of the in-core logs. 3243 * If we determine there is no flushable data, we just return. There is no 3244 * flushable data if: 3245 * 3246 * 1. the current iclog is active and has no data; the previous iclog 3247 * is in the active or dirty state. 3248 * 2. the current iclog is drity, and the previous iclog is in the 3249 * active or dirty state. 3250 * 3251 * We may sleep if: 3252 * 3253 * 1. the current iclog is not in the active nor dirty state. 3254 * 2. the current iclog dirty, and the previous iclog is not in the 3255 * active nor dirty state. 3256 * 3. the current iclog is active, and there is another thread writing 3257 * to this particular iclog. 3258 * 4. a) the current iclog is active and has no other writers 3259 * b) when we return from flushing out this iclog, it is still 3260 * not in the active nor dirty state. 3261 */ 3262 int 3263 xfs_log_force( 3264 struct xfs_mount *mp, 3265 uint flags) 3266 { 3267 struct xlog *log = mp->m_log; 3268 struct xlog_in_core *iclog; 3269 3270 XFS_STATS_INC(mp, xs_log_force); 3271 trace_xfs_log_force(mp, 0, _RET_IP_); 3272 3273 xlog_cil_force(log); 3274 3275 spin_lock(&log->l_icloglock); 3276 if (xlog_is_shutdown(log)) 3277 goto out_error; 3278 3279 iclog = log->l_iclog; 3280 trace_xlog_iclog_force(iclog, _RET_IP_); 3281 3282 if (iclog->ic_state == XLOG_STATE_DIRTY || 3283 (iclog->ic_state == XLOG_STATE_ACTIVE && 3284 atomic_read(&iclog->ic_refcnt) == 0 && iclog->ic_offset == 0)) { 3285 /* 3286 * If the head is dirty or (active and empty), then we need to 3287 * look at the previous iclog. 3288 * 3289 * If the previous iclog is active or dirty we are done. There 3290 * is nothing to sync out. Otherwise, we attach ourselves to the 3291 * previous iclog and go to sleep. 3292 */ 3293 iclog = iclog->ic_prev; 3294 } else if (iclog->ic_state == XLOG_STATE_ACTIVE) { 3295 if (atomic_read(&iclog->ic_refcnt) == 0) { 3296 /* We have exclusive access to this iclog. */ 3297 bool completed; 3298 3299 if (xlog_force_and_check_iclog(iclog, &completed)) 3300 goto out_error; 3301 3302 if (completed) 3303 goto out_unlock; 3304 } else { 3305 /* 3306 * Someone else is still writing to this iclog, so we 3307 * need to ensure that when they release the iclog it 3308 * gets synced immediately as we may be waiting on it. 3309 */ 3310 xlog_state_switch_iclogs(log, iclog, 0); 3311 } 3312 } 3313 3314 /* 3315 * The iclog we are about to wait on may contain the checkpoint pushed 3316 * by the above xlog_cil_force() call, but it may not have been pushed 3317 * to disk yet. Like the ACTIVE case above, we need to make sure caches 3318 * are flushed when this iclog is written. 3319 */ 3320 if (iclog->ic_state == XLOG_STATE_WANT_SYNC) 3321 iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA; 3322 3323 if (flags & XFS_LOG_SYNC) 3324 return xlog_wait_on_iclog(iclog); 3325 out_unlock: 3326 spin_unlock(&log->l_icloglock); 3327 return 0; 3328 out_error: 3329 spin_unlock(&log->l_icloglock); 3330 return -EIO; 3331 } 3332 3333 /* 3334 * Force the log to a specific LSN. 3335 * 3336 * If an iclog with that lsn can be found: 3337 * If it is in the DIRTY state, just return. 3338 * If it is in the ACTIVE state, move the in-core log into the WANT_SYNC 3339 * state and go to sleep or return. 3340 * If it is in any other state, go to sleep or return. 3341 * 3342 * Synchronous forces are implemented with a wait queue. All callers trying 3343 * to force a given lsn to disk must wait on the queue attached to the 3344 * specific in-core log. When given in-core log finally completes its write 3345 * to disk, that thread will wake up all threads waiting on the queue. 3346 */ 3347 static int 3348 xlog_force_lsn( 3349 struct xlog *log, 3350 xfs_lsn_t lsn, 3351 uint flags, 3352 int *log_flushed, 3353 bool already_slept) 3354 { 3355 struct xlog_in_core *iclog; 3356 bool completed; 3357 3358 spin_lock(&log->l_icloglock); 3359 if (xlog_is_shutdown(log)) 3360 goto out_error; 3361 3362 iclog = log->l_iclog; 3363 while (be64_to_cpu(iclog->ic_header.h_lsn) != lsn) { 3364 trace_xlog_iclog_force_lsn(iclog, _RET_IP_); 3365 iclog = iclog->ic_next; 3366 if (iclog == log->l_iclog) 3367 goto out_unlock; 3368 } 3369 3370 switch (iclog->ic_state) { 3371 case XLOG_STATE_ACTIVE: 3372 /* 3373 * We sleep here if we haven't already slept (e.g. this is the 3374 * first time we've looked at the correct iclog buf) and the 3375 * buffer before us is going to be sync'ed. The reason for this 3376 * is that if we are doing sync transactions here, by waiting 3377 * for the previous I/O to complete, we can allow a few more 3378 * transactions into this iclog before we close it down. 3379 * 3380 * Otherwise, we mark the buffer WANT_SYNC, and bump up the 3381 * refcnt so we can release the log (which drops the ref count). 3382 * The state switch keeps new transaction commits from using 3383 * this buffer. When the current commits finish writing into 3384 * the buffer, the refcount will drop to zero and the buffer 3385 * will go out then. 3386 */ 3387 if (!already_slept && 3388 (iclog->ic_prev->ic_state == XLOG_STATE_WANT_SYNC || 3389 iclog->ic_prev->ic_state == XLOG_STATE_SYNCING)) { 3390 xlog_wait(&iclog->ic_prev->ic_write_wait, 3391 &log->l_icloglock); 3392 return -EAGAIN; 3393 } 3394 if (xlog_force_and_check_iclog(iclog, &completed)) 3395 goto out_error; 3396 if (log_flushed) 3397 *log_flushed = 1; 3398 if (completed) 3399 goto out_unlock; 3400 break; 3401 case XLOG_STATE_WANT_SYNC: 3402 /* 3403 * This iclog may contain the checkpoint pushed by the 3404 * xlog_cil_force_seq() call, but there are other writers still 3405 * accessing it so it hasn't been pushed to disk yet. Like the 3406 * ACTIVE case above, we need to make sure caches are flushed 3407 * when this iclog is written. 3408 */ 3409 iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA; 3410 break; 3411 default: 3412 /* 3413 * The entire checkpoint was written by the CIL force and is on 3414 * its way to disk already. It will be stable when it 3415 * completes, so we don't need to manipulate caches here at all. 3416 * We just need to wait for completion if necessary. 3417 */ 3418 break; 3419 } 3420 3421 if (flags & XFS_LOG_SYNC) 3422 return xlog_wait_on_iclog(iclog); 3423 out_unlock: 3424 spin_unlock(&log->l_icloglock); 3425 return 0; 3426 out_error: 3427 spin_unlock(&log->l_icloglock); 3428 return -EIO; 3429 } 3430 3431 /* 3432 * Force the log to a specific checkpoint sequence. 3433 * 3434 * First force the CIL so that all the required changes have been flushed to the 3435 * iclogs. If the CIL force completed it will return a commit LSN that indicates 3436 * the iclog that needs to be flushed to stable storage. If the caller needs 3437 * a synchronous log force, we will wait on the iclog with the LSN returned by 3438 * xlog_cil_force_seq() to be completed. 3439 */ 3440 int 3441 xfs_log_force_seq( 3442 struct xfs_mount *mp, 3443 xfs_csn_t seq, 3444 uint flags, 3445 int *log_flushed) 3446 { 3447 struct xlog *log = mp->m_log; 3448 xfs_lsn_t lsn; 3449 int ret; 3450 ASSERT(seq != 0); 3451 3452 XFS_STATS_INC(mp, xs_log_force); 3453 trace_xfs_log_force(mp, seq, _RET_IP_); 3454 3455 lsn = xlog_cil_force_seq(log, seq); 3456 if (lsn == NULLCOMMITLSN) 3457 return 0; 3458 3459 ret = xlog_force_lsn(log, lsn, flags, log_flushed, false); 3460 if (ret == -EAGAIN) { 3461 XFS_STATS_INC(mp, xs_log_force_sleep); 3462 ret = xlog_force_lsn(log, lsn, flags, log_flushed, true); 3463 } 3464 return ret; 3465 } 3466 3467 /* 3468 * Free a used ticket when its refcount falls to zero. 3469 */ 3470 void 3471 xfs_log_ticket_put( 3472 xlog_ticket_t *ticket) 3473 { 3474 ASSERT(atomic_read(&ticket->t_ref) > 0); 3475 if (atomic_dec_and_test(&ticket->t_ref)) 3476 kmem_cache_free(xfs_log_ticket_cache, ticket); 3477 } 3478 3479 xlog_ticket_t * 3480 xfs_log_ticket_get( 3481 xlog_ticket_t *ticket) 3482 { 3483 ASSERT(atomic_read(&ticket->t_ref) > 0); 3484 atomic_inc(&ticket->t_ref); 3485 return ticket; 3486 } 3487 3488 /* 3489 * Figure out the total log space unit (in bytes) that would be 3490 * required for a log ticket. 3491 */ 3492 static int 3493 xlog_calc_unit_res( 3494 struct xlog *log, 3495 int unit_bytes) 3496 { 3497 int iclog_space; 3498 uint num_headers; 3499 3500 /* 3501 * Permanent reservations have up to 'cnt'-1 active log operations 3502 * in the log. A unit in this case is the amount of space for one 3503 * of these log operations. Normal reservations have a cnt of 1 3504 * and their unit amount is the total amount of space required. 3505 * 3506 * The following lines of code account for non-transaction data 3507 * which occupy space in the on-disk log. 3508 * 3509 * Normal form of a transaction is: 3510 * <oph><trans-hdr><start-oph><reg1-oph><reg1><reg2-oph>...<commit-oph> 3511 * and then there are LR hdrs, split-recs and roundoff at end of syncs. 3512 * 3513 * We need to account for all the leadup data and trailer data 3514 * around the transaction data. 3515 * And then we need to account for the worst case in terms of using 3516 * more space. 3517 * The worst case will happen if: 3518 * - the placement of the transaction happens to be such that the 3519 * roundoff is at its maximum 3520 * - the transaction data is synced before the commit record is synced 3521 * i.e. <transaction-data><roundoff> | <commit-rec><roundoff> 3522 * Therefore the commit record is in its own Log Record. 3523 * This can happen as the commit record is called with its 3524 * own region to xlog_write(). 3525 * This then means that in the worst case, roundoff can happen for 3526 * the commit-rec as well. 3527 * The commit-rec is smaller than padding in this scenario and so it is 3528 * not added separately. 3529 */ 3530 3531 /* for trans header */ 3532 unit_bytes += sizeof(xlog_op_header_t); 3533 unit_bytes += sizeof(xfs_trans_header_t); 3534 3535 /* for start-rec */ 3536 unit_bytes += sizeof(xlog_op_header_t); 3537 3538 /* 3539 * for LR headers - the space for data in an iclog is the size minus 3540 * the space used for the headers. If we use the iclog size, then we 3541 * undercalculate the number of headers required. 3542 * 3543 * Furthermore - the addition of op headers for split-recs might 3544 * increase the space required enough to require more log and op 3545 * headers, so take that into account too. 3546 * 3547 * IMPORTANT: This reservation makes the assumption that if this 3548 * transaction is the first in an iclog and hence has the LR headers 3549 * accounted to it, then the remaining space in the iclog is 3550 * exclusively for this transaction. i.e. if the transaction is larger 3551 * than the iclog, it will be the only thing in that iclog. 3552 * Fundamentally, this means we must pass the entire log vector to 3553 * xlog_write to guarantee this. 3554 */ 3555 iclog_space = log->l_iclog_size - log->l_iclog_hsize; 3556 num_headers = howmany(unit_bytes, iclog_space); 3557 3558 /* for split-recs - ophdrs added when data split over LRs */ 3559 unit_bytes += sizeof(xlog_op_header_t) * num_headers; 3560 3561 /* add extra header reservations if we overrun */ 3562 while (!num_headers || 3563 howmany(unit_bytes, iclog_space) > num_headers) { 3564 unit_bytes += sizeof(xlog_op_header_t); 3565 num_headers++; 3566 } 3567 unit_bytes += log->l_iclog_hsize * num_headers; 3568 3569 /* for commit-rec LR header - note: padding will subsume the ophdr */ 3570 unit_bytes += log->l_iclog_hsize; 3571 3572 /* roundoff padding for transaction data and one for commit record */ 3573 unit_bytes += 2 * log->l_iclog_roundoff; 3574 3575 return unit_bytes; 3576 } 3577 3578 int 3579 xfs_log_calc_unit_res( 3580 struct xfs_mount *mp, 3581 int unit_bytes) 3582 { 3583 return xlog_calc_unit_res(mp->m_log, unit_bytes); 3584 } 3585 3586 /* 3587 * Allocate and initialise a new log ticket. 3588 */ 3589 struct xlog_ticket * 3590 xlog_ticket_alloc( 3591 struct xlog *log, 3592 int unit_bytes, 3593 int cnt, 3594 char client, 3595 bool permanent) 3596 { 3597 struct xlog_ticket *tic; 3598 int unit_res; 3599 3600 tic = kmem_cache_zalloc(xfs_log_ticket_cache, GFP_NOFS | __GFP_NOFAIL); 3601 3602 unit_res = xlog_calc_unit_res(log, unit_bytes); 3603 3604 atomic_set(&tic->t_ref, 1); 3605 tic->t_task = current; 3606 INIT_LIST_HEAD(&tic->t_queue); 3607 tic->t_unit_res = unit_res; 3608 tic->t_curr_res = unit_res; 3609 tic->t_cnt = cnt; 3610 tic->t_ocnt = cnt; 3611 tic->t_tid = prandom_u32(); 3612 tic->t_clientid = client; 3613 if (permanent) 3614 tic->t_flags |= XLOG_TIC_PERM_RESERV; 3615 3616 xlog_tic_reset_res(tic); 3617 3618 return tic; 3619 } 3620 3621 #if defined(DEBUG) 3622 /* 3623 * Make sure that the destination ptr is within the valid data region of 3624 * one of the iclogs. This uses backup pointers stored in a different 3625 * part of the log in case we trash the log structure. 3626 */ 3627 STATIC void 3628 xlog_verify_dest_ptr( 3629 struct xlog *log, 3630 void *ptr) 3631 { 3632 int i; 3633 int good_ptr = 0; 3634 3635 for (i = 0; i < log->l_iclog_bufs; i++) { 3636 if (ptr >= log->l_iclog_bak[i] && 3637 ptr <= log->l_iclog_bak[i] + log->l_iclog_size) 3638 good_ptr++; 3639 } 3640 3641 if (!good_ptr) 3642 xfs_emerg(log->l_mp, "%s: invalid ptr", __func__); 3643 } 3644 3645 /* 3646 * Check to make sure the grant write head didn't just over lap the tail. If 3647 * the cycles are the same, we can't be overlapping. Otherwise, make sure that 3648 * the cycles differ by exactly one and check the byte count. 3649 * 3650 * This check is run unlocked, so can give false positives. Rather than assert 3651 * on failures, use a warn-once flag and a panic tag to allow the admin to 3652 * determine if they want to panic the machine when such an error occurs. For 3653 * debug kernels this will have the same effect as using an assert but, unlinke 3654 * an assert, it can be turned off at runtime. 3655 */ 3656 STATIC void 3657 xlog_verify_grant_tail( 3658 struct xlog *log) 3659 { 3660 int tail_cycle, tail_blocks; 3661 int cycle, space; 3662 3663 xlog_crack_grant_head(&log->l_write_head.grant, &cycle, &space); 3664 xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_blocks); 3665 if (tail_cycle != cycle) { 3666 if (cycle - 1 != tail_cycle && 3667 !test_and_set_bit(XLOG_TAIL_WARN, &log->l_opstate)) { 3668 xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES, 3669 "%s: cycle - 1 != tail_cycle", __func__); 3670 } 3671 3672 if (space > BBTOB(tail_blocks) && 3673 !test_and_set_bit(XLOG_TAIL_WARN, &log->l_opstate)) { 3674 xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES, 3675 "%s: space > BBTOB(tail_blocks)", __func__); 3676 } 3677 } 3678 } 3679 3680 /* check if it will fit */ 3681 STATIC void 3682 xlog_verify_tail_lsn( 3683 struct xlog *log, 3684 struct xlog_in_core *iclog) 3685 { 3686 xfs_lsn_t tail_lsn = be64_to_cpu(iclog->ic_header.h_tail_lsn); 3687 int blocks; 3688 3689 if (CYCLE_LSN(tail_lsn) == log->l_prev_cycle) { 3690 blocks = 3691 log->l_logBBsize - (log->l_prev_block - BLOCK_LSN(tail_lsn)); 3692 if (blocks < BTOBB(iclog->ic_offset)+BTOBB(log->l_iclog_hsize)) 3693 xfs_emerg(log->l_mp, "%s: ran out of log space", __func__); 3694 } else { 3695 ASSERT(CYCLE_LSN(tail_lsn)+1 == log->l_prev_cycle); 3696 3697 if (BLOCK_LSN(tail_lsn) == log->l_prev_block) 3698 xfs_emerg(log->l_mp, "%s: tail wrapped", __func__); 3699 3700 blocks = BLOCK_LSN(tail_lsn) - log->l_prev_block; 3701 if (blocks < BTOBB(iclog->ic_offset) + 1) 3702 xfs_emerg(log->l_mp, "%s: ran out of log space", __func__); 3703 } 3704 } 3705 3706 /* 3707 * Perform a number of checks on the iclog before writing to disk. 3708 * 3709 * 1. Make sure the iclogs are still circular 3710 * 2. Make sure we have a good magic number 3711 * 3. Make sure we don't have magic numbers in the data 3712 * 4. Check fields of each log operation header for: 3713 * A. Valid client identifier 3714 * B. tid ptr value falls in valid ptr space (user space code) 3715 * C. Length in log record header is correct according to the 3716 * individual operation headers within record. 3717 * 5. When a bwrite will occur within 5 blocks of the front of the physical 3718 * log, check the preceding blocks of the physical log to make sure all 3719 * the cycle numbers agree with the current cycle number. 3720 */ 3721 STATIC void 3722 xlog_verify_iclog( 3723 struct xlog *log, 3724 struct xlog_in_core *iclog, 3725 int count) 3726 { 3727 xlog_op_header_t *ophead; 3728 xlog_in_core_t *icptr; 3729 xlog_in_core_2_t *xhdr; 3730 void *base_ptr, *ptr, *p; 3731 ptrdiff_t field_offset; 3732 uint8_t clientid; 3733 int len, i, j, k, op_len; 3734 int idx; 3735 3736 /* check validity of iclog pointers */ 3737 spin_lock(&log->l_icloglock); 3738 icptr = log->l_iclog; 3739 for (i = 0; i < log->l_iclog_bufs; i++, icptr = icptr->ic_next) 3740 ASSERT(icptr); 3741 3742 if (icptr != log->l_iclog) 3743 xfs_emerg(log->l_mp, "%s: corrupt iclog ring", __func__); 3744 spin_unlock(&log->l_icloglock); 3745 3746 /* check log magic numbers */ 3747 if (iclog->ic_header.h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) 3748 xfs_emerg(log->l_mp, "%s: invalid magic num", __func__); 3749 3750 base_ptr = ptr = &iclog->ic_header; 3751 p = &iclog->ic_header; 3752 for (ptr += BBSIZE; ptr < base_ptr + count; ptr += BBSIZE) { 3753 if (*(__be32 *)ptr == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) 3754 xfs_emerg(log->l_mp, "%s: unexpected magic num", 3755 __func__); 3756 } 3757 3758 /* check fields */ 3759 len = be32_to_cpu(iclog->ic_header.h_num_logops); 3760 base_ptr = ptr = iclog->ic_datap; 3761 ophead = ptr; 3762 xhdr = iclog->ic_data; 3763 for (i = 0; i < len; i++) { 3764 ophead = ptr; 3765 3766 /* clientid is only 1 byte */ 3767 p = &ophead->oh_clientid; 3768 field_offset = p - base_ptr; 3769 if (field_offset & 0x1ff) { 3770 clientid = ophead->oh_clientid; 3771 } else { 3772 idx = BTOBBT((char *)&ophead->oh_clientid - iclog->ic_datap); 3773 if (idx >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) { 3774 j = idx / (XLOG_HEADER_CYCLE_SIZE / BBSIZE); 3775 k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE); 3776 clientid = xlog_get_client_id( 3777 xhdr[j].hic_xheader.xh_cycle_data[k]); 3778 } else { 3779 clientid = xlog_get_client_id( 3780 iclog->ic_header.h_cycle_data[idx]); 3781 } 3782 } 3783 if (clientid != XFS_TRANSACTION && clientid != XFS_LOG) 3784 xfs_warn(log->l_mp, 3785 "%s: invalid clientid %d op "PTR_FMT" offset 0x%lx", 3786 __func__, clientid, ophead, 3787 (unsigned long)field_offset); 3788 3789 /* check length */ 3790 p = &ophead->oh_len; 3791 field_offset = p - base_ptr; 3792 if (field_offset & 0x1ff) { 3793 op_len = be32_to_cpu(ophead->oh_len); 3794 } else { 3795 idx = BTOBBT((uintptr_t)&ophead->oh_len - 3796 (uintptr_t)iclog->ic_datap); 3797 if (idx >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) { 3798 j = idx / (XLOG_HEADER_CYCLE_SIZE / BBSIZE); 3799 k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE); 3800 op_len = be32_to_cpu(xhdr[j].hic_xheader.xh_cycle_data[k]); 3801 } else { 3802 op_len = be32_to_cpu(iclog->ic_header.h_cycle_data[idx]); 3803 } 3804 } 3805 ptr += sizeof(xlog_op_header_t) + op_len; 3806 } 3807 } 3808 #endif 3809 3810 /* 3811 * Perform a forced shutdown on the log. 3812 * 3813 * This can be called from low level log code to trigger a shutdown, or from the 3814 * high level mount shutdown code when the mount shuts down. 3815 * 3816 * Our main objectives here are to make sure that: 3817 * a. if the shutdown was not due to a log IO error, flush the logs to 3818 * disk. Anything modified after this is ignored. 3819 * b. the log gets atomically marked 'XLOG_IO_ERROR' for all interested 3820 * parties to find out. Nothing new gets queued after this is done. 3821 * c. Tasks sleeping on log reservations, pinned objects and 3822 * other resources get woken up. 3823 * d. The mount is also marked as shut down so that log triggered shutdowns 3824 * still behave the same as if they called xfs_forced_shutdown(). 3825 * 3826 * Return true if the shutdown cause was a log IO error and we actually shut the 3827 * log down. 3828 */ 3829 bool 3830 xlog_force_shutdown( 3831 struct xlog *log, 3832 int shutdown_flags) 3833 { 3834 bool log_error = (shutdown_flags & SHUTDOWN_LOG_IO_ERROR); 3835 3836 if (!log) 3837 return false; 3838 3839 /* 3840 * Flush all the completed transactions to disk before marking the log 3841 * being shut down. We need to do this first as shutting down the log 3842 * before the force will prevent the log force from flushing the iclogs 3843 * to disk. 3844 * 3845 * When we are in recovery, there are no transactions to flush, and 3846 * we don't want to touch the log because we don't want to perturb the 3847 * current head/tail for future recovery attempts. Hence we need to 3848 * avoid a log force in this case. 3849 * 3850 * If we are shutting down due to a log IO error, then we must avoid 3851 * trying to write the log as that may just result in more IO errors and 3852 * an endless shutdown/force loop. 3853 */ 3854 if (!log_error && !xlog_in_recovery(log)) 3855 xfs_log_force(log->l_mp, XFS_LOG_SYNC); 3856 3857 /* 3858 * Atomically set the shutdown state. If the shutdown state is already 3859 * set, there someone else is performing the shutdown and so we are done 3860 * here. This should never happen because we should only ever get called 3861 * once by the first shutdown caller. 3862 * 3863 * Much of the log state machine transitions assume that shutdown state 3864 * cannot change once they hold the log->l_icloglock. Hence we need to 3865 * hold that lock here, even though we use the atomic test_and_set_bit() 3866 * operation to set the shutdown state. 3867 */ 3868 spin_lock(&log->l_icloglock); 3869 if (test_and_set_bit(XLOG_IO_ERROR, &log->l_opstate)) { 3870 spin_unlock(&log->l_icloglock); 3871 return false; 3872 } 3873 spin_unlock(&log->l_icloglock); 3874 3875 /* 3876 * If this log shutdown also sets the mount shutdown state, issue a 3877 * shutdown warning message. 3878 */ 3879 if (!test_and_set_bit(XFS_OPSTATE_SHUTDOWN, &log->l_mp->m_opstate)) { 3880 xfs_alert_tag(log->l_mp, XFS_PTAG_SHUTDOWN_LOGERROR, 3881 "Filesystem has been shut down due to log error (0x%x).", 3882 shutdown_flags); 3883 xfs_alert(log->l_mp, 3884 "Please unmount the filesystem and rectify the problem(s)."); 3885 if (xfs_error_level >= XFS_ERRLEVEL_HIGH) 3886 xfs_stack_trace(); 3887 } 3888 3889 /* 3890 * We don't want anybody waiting for log reservations after this. That 3891 * means we have to wake up everybody queued up on reserveq as well as 3892 * writeq. In addition, we make sure in xlog_{re}grant_log_space that 3893 * we don't enqueue anything once the SHUTDOWN flag is set, and this 3894 * action is protected by the grant locks. 3895 */ 3896 xlog_grant_head_wake_all(&log->l_reserve_head); 3897 xlog_grant_head_wake_all(&log->l_write_head); 3898 3899 /* 3900 * Wake up everybody waiting on xfs_log_force. Wake the CIL push first 3901 * as if the log writes were completed. The abort handling in the log 3902 * item committed callback functions will do this again under lock to 3903 * avoid races. 3904 */ 3905 spin_lock(&log->l_cilp->xc_push_lock); 3906 wake_up_all(&log->l_cilp->xc_start_wait); 3907 wake_up_all(&log->l_cilp->xc_commit_wait); 3908 spin_unlock(&log->l_cilp->xc_push_lock); 3909 3910 spin_lock(&log->l_icloglock); 3911 xlog_state_shutdown_callbacks(log); 3912 spin_unlock(&log->l_icloglock); 3913 3914 wake_up_var(&log->l_opstate); 3915 return log_error; 3916 } 3917 3918 STATIC int 3919 xlog_iclogs_empty( 3920 struct xlog *log) 3921 { 3922 xlog_in_core_t *iclog; 3923 3924 iclog = log->l_iclog; 3925 do { 3926 /* endianness does not matter here, zero is zero in 3927 * any language. 3928 */ 3929 if (iclog->ic_header.h_num_logops) 3930 return 0; 3931 iclog = iclog->ic_next; 3932 } while (iclog != log->l_iclog); 3933 return 1; 3934 } 3935 3936 /* 3937 * Verify that an LSN stamped into a piece of metadata is valid. This is 3938 * intended for use in read verifiers on v5 superblocks. 3939 */ 3940 bool 3941 xfs_log_check_lsn( 3942 struct xfs_mount *mp, 3943 xfs_lsn_t lsn) 3944 { 3945 struct xlog *log = mp->m_log; 3946 bool valid; 3947 3948 /* 3949 * norecovery mode skips mount-time log processing and unconditionally 3950 * resets the in-core LSN. We can't validate in this mode, but 3951 * modifications are not allowed anyways so just return true. 3952 */ 3953 if (xfs_has_norecovery(mp)) 3954 return true; 3955 3956 /* 3957 * Some metadata LSNs are initialized to NULL (e.g., the agfl). This is 3958 * handled by recovery and thus safe to ignore here. 3959 */ 3960 if (lsn == NULLCOMMITLSN) 3961 return true; 3962 3963 valid = xlog_valid_lsn(mp->m_log, lsn); 3964 3965 /* warn the user about what's gone wrong before verifier failure */ 3966 if (!valid) { 3967 spin_lock(&log->l_icloglock); 3968 xfs_warn(mp, 3969 "Corruption warning: Metadata has LSN (%d:%d) ahead of current LSN (%d:%d). " 3970 "Please unmount and run xfs_repair (>= v4.3) to resolve.", 3971 CYCLE_LSN(lsn), BLOCK_LSN(lsn), 3972 log->l_curr_cycle, log->l_curr_block); 3973 spin_unlock(&log->l_icloglock); 3974 } 3975 3976 return valid; 3977 } 3978 3979 /* 3980 * Notify the log that we're about to start using a feature that is protected 3981 * by a log incompat feature flag. This will prevent log covering from 3982 * clearing those flags. 3983 */ 3984 void 3985 xlog_use_incompat_feat( 3986 struct xlog *log) 3987 { 3988 down_read(&log->l_incompat_users); 3989 } 3990 3991 /* Notify the log that we've finished using log incompat features. */ 3992 void 3993 xlog_drop_incompat_feat( 3994 struct xlog *log) 3995 { 3996 up_read(&log->l_incompat_users); 3997 } 3998