1 /* 2 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 #include "xfs.h" 19 #include "xfs_fs.h" 20 #include "xfs_shared.h" 21 #include "xfs_format.h" 22 #include "xfs_log_format.h" 23 #include "xfs_trans_resv.h" 24 #include "xfs_mount.h" 25 #include "xfs_errortag.h" 26 #include "xfs_error.h" 27 #include "xfs_trans.h" 28 #include "xfs_trans_priv.h" 29 #include "xfs_log.h" 30 #include "xfs_log_priv.h" 31 #include "xfs_log_recover.h" 32 #include "xfs_inode.h" 33 #include "xfs_trace.h" 34 #include "xfs_fsops.h" 35 #include "xfs_cksum.h" 36 #include "xfs_sysfs.h" 37 #include "xfs_sb.h" 38 39 kmem_zone_t *xfs_log_ticket_zone; 40 41 /* Local miscellaneous function prototypes */ 42 STATIC int 43 xlog_commit_record( 44 struct xlog *log, 45 struct xlog_ticket *ticket, 46 struct xlog_in_core **iclog, 47 xfs_lsn_t *commitlsnp); 48 49 STATIC struct xlog * 50 xlog_alloc_log( 51 struct xfs_mount *mp, 52 struct xfs_buftarg *log_target, 53 xfs_daddr_t blk_offset, 54 int num_bblks); 55 STATIC int 56 xlog_space_left( 57 struct xlog *log, 58 atomic64_t *head); 59 STATIC int 60 xlog_sync( 61 struct xlog *log, 62 struct xlog_in_core *iclog); 63 STATIC void 64 xlog_dealloc_log( 65 struct xlog *log); 66 67 /* local state machine functions */ 68 STATIC void xlog_state_done_syncing(xlog_in_core_t *iclog, int); 69 STATIC void 70 xlog_state_do_callback( 71 struct xlog *log, 72 int aborted, 73 struct xlog_in_core *iclog); 74 STATIC int 75 xlog_state_get_iclog_space( 76 struct xlog *log, 77 int len, 78 struct xlog_in_core **iclog, 79 struct xlog_ticket *ticket, 80 int *continued_write, 81 int *logoffsetp); 82 STATIC int 83 xlog_state_release_iclog( 84 struct xlog *log, 85 struct xlog_in_core *iclog); 86 STATIC void 87 xlog_state_switch_iclogs( 88 struct xlog *log, 89 struct xlog_in_core *iclog, 90 int eventual_size); 91 STATIC void 92 xlog_state_want_sync( 93 struct xlog *log, 94 struct xlog_in_core *iclog); 95 96 STATIC void 97 xlog_grant_push_ail( 98 struct xlog *log, 99 int need_bytes); 100 STATIC void 101 xlog_regrant_reserve_log_space( 102 struct xlog *log, 103 struct xlog_ticket *ticket); 104 STATIC void 105 xlog_ungrant_log_space( 106 struct xlog *log, 107 struct xlog_ticket *ticket); 108 109 #if defined(DEBUG) 110 STATIC void 111 xlog_verify_dest_ptr( 112 struct xlog *log, 113 void *ptr); 114 STATIC void 115 xlog_verify_grant_tail( 116 struct xlog *log); 117 STATIC void 118 xlog_verify_iclog( 119 struct xlog *log, 120 struct xlog_in_core *iclog, 121 int count, 122 bool syncing); 123 STATIC void 124 xlog_verify_tail_lsn( 125 struct xlog *log, 126 struct xlog_in_core *iclog, 127 xfs_lsn_t tail_lsn); 128 #else 129 #define xlog_verify_dest_ptr(a,b) 130 #define xlog_verify_grant_tail(a) 131 #define xlog_verify_iclog(a,b,c,d) 132 #define xlog_verify_tail_lsn(a,b,c) 133 #endif 134 135 STATIC int 136 xlog_iclogs_empty( 137 struct xlog *log); 138 139 static void 140 xlog_grant_sub_space( 141 struct xlog *log, 142 atomic64_t *head, 143 int bytes) 144 { 145 int64_t head_val = atomic64_read(head); 146 int64_t new, old; 147 148 do { 149 int cycle, space; 150 151 xlog_crack_grant_head_val(head_val, &cycle, &space); 152 153 space -= bytes; 154 if (space < 0) { 155 space += log->l_logsize; 156 cycle--; 157 } 158 159 old = head_val; 160 new = xlog_assign_grant_head_val(cycle, space); 161 head_val = atomic64_cmpxchg(head, old, new); 162 } while (head_val != old); 163 } 164 165 static void 166 xlog_grant_add_space( 167 struct xlog *log, 168 atomic64_t *head, 169 int bytes) 170 { 171 int64_t head_val = atomic64_read(head); 172 int64_t new, old; 173 174 do { 175 int tmp; 176 int cycle, space; 177 178 xlog_crack_grant_head_val(head_val, &cycle, &space); 179 180 tmp = log->l_logsize - space; 181 if (tmp > bytes) 182 space += bytes; 183 else { 184 space = bytes - tmp; 185 cycle++; 186 } 187 188 old = head_val; 189 new = xlog_assign_grant_head_val(cycle, space); 190 head_val = atomic64_cmpxchg(head, old, new); 191 } while (head_val != old); 192 } 193 194 STATIC void 195 xlog_grant_head_init( 196 struct xlog_grant_head *head) 197 { 198 xlog_assign_grant_head(&head->grant, 1, 0); 199 INIT_LIST_HEAD(&head->waiters); 200 spin_lock_init(&head->lock); 201 } 202 203 STATIC void 204 xlog_grant_head_wake_all( 205 struct xlog_grant_head *head) 206 { 207 struct xlog_ticket *tic; 208 209 spin_lock(&head->lock); 210 list_for_each_entry(tic, &head->waiters, t_queue) 211 wake_up_process(tic->t_task); 212 spin_unlock(&head->lock); 213 } 214 215 static inline int 216 xlog_ticket_reservation( 217 struct xlog *log, 218 struct xlog_grant_head *head, 219 struct xlog_ticket *tic) 220 { 221 if (head == &log->l_write_head) { 222 ASSERT(tic->t_flags & XLOG_TIC_PERM_RESERV); 223 return tic->t_unit_res; 224 } else { 225 if (tic->t_flags & XLOG_TIC_PERM_RESERV) 226 return tic->t_unit_res * tic->t_cnt; 227 else 228 return tic->t_unit_res; 229 } 230 } 231 232 STATIC bool 233 xlog_grant_head_wake( 234 struct xlog *log, 235 struct xlog_grant_head *head, 236 int *free_bytes) 237 { 238 struct xlog_ticket *tic; 239 int need_bytes; 240 241 list_for_each_entry(tic, &head->waiters, t_queue) { 242 need_bytes = xlog_ticket_reservation(log, head, tic); 243 if (*free_bytes < need_bytes) 244 return false; 245 246 *free_bytes -= need_bytes; 247 trace_xfs_log_grant_wake_up(log, tic); 248 wake_up_process(tic->t_task); 249 } 250 251 return true; 252 } 253 254 STATIC int 255 xlog_grant_head_wait( 256 struct xlog *log, 257 struct xlog_grant_head *head, 258 struct xlog_ticket *tic, 259 int need_bytes) __releases(&head->lock) 260 __acquires(&head->lock) 261 { 262 list_add_tail(&tic->t_queue, &head->waiters); 263 264 do { 265 if (XLOG_FORCED_SHUTDOWN(log)) 266 goto shutdown; 267 xlog_grant_push_ail(log, need_bytes); 268 269 __set_current_state(TASK_UNINTERRUPTIBLE); 270 spin_unlock(&head->lock); 271 272 XFS_STATS_INC(log->l_mp, xs_sleep_logspace); 273 274 trace_xfs_log_grant_sleep(log, tic); 275 schedule(); 276 trace_xfs_log_grant_wake(log, tic); 277 278 spin_lock(&head->lock); 279 if (XLOG_FORCED_SHUTDOWN(log)) 280 goto shutdown; 281 } while (xlog_space_left(log, &head->grant) < need_bytes); 282 283 list_del_init(&tic->t_queue); 284 return 0; 285 shutdown: 286 list_del_init(&tic->t_queue); 287 return -EIO; 288 } 289 290 /* 291 * Atomically get the log space required for a log ticket. 292 * 293 * Once a ticket gets put onto head->waiters, it will only return after the 294 * needed reservation is satisfied. 295 * 296 * This function is structured so that it has a lock free fast path. This is 297 * necessary because every new transaction reservation will come through this 298 * path. Hence any lock will be globally hot if we take it unconditionally on 299 * every pass. 300 * 301 * As tickets are only ever moved on and off head->waiters under head->lock, we 302 * only need to take that lock if we are going to add the ticket to the queue 303 * and sleep. We can avoid taking the lock if the ticket was never added to 304 * head->waiters because the t_queue list head will be empty and we hold the 305 * only reference to it so it can safely be checked unlocked. 306 */ 307 STATIC int 308 xlog_grant_head_check( 309 struct xlog *log, 310 struct xlog_grant_head *head, 311 struct xlog_ticket *tic, 312 int *need_bytes) 313 { 314 int free_bytes; 315 int error = 0; 316 317 ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY)); 318 319 /* 320 * If there are other waiters on the queue then give them a chance at 321 * logspace before us. Wake up the first waiters, if we do not wake 322 * up all the waiters then go to sleep waiting for more free space, 323 * otherwise try to get some space for this transaction. 324 */ 325 *need_bytes = xlog_ticket_reservation(log, head, tic); 326 free_bytes = xlog_space_left(log, &head->grant); 327 if (!list_empty_careful(&head->waiters)) { 328 spin_lock(&head->lock); 329 if (!xlog_grant_head_wake(log, head, &free_bytes) || 330 free_bytes < *need_bytes) { 331 error = xlog_grant_head_wait(log, head, tic, 332 *need_bytes); 333 } 334 spin_unlock(&head->lock); 335 } else if (free_bytes < *need_bytes) { 336 spin_lock(&head->lock); 337 error = xlog_grant_head_wait(log, head, tic, *need_bytes); 338 spin_unlock(&head->lock); 339 } 340 341 return error; 342 } 343 344 static void 345 xlog_tic_reset_res(xlog_ticket_t *tic) 346 { 347 tic->t_res_num = 0; 348 tic->t_res_arr_sum = 0; 349 tic->t_res_num_ophdrs = 0; 350 } 351 352 static void 353 xlog_tic_add_region(xlog_ticket_t *tic, uint len, uint type) 354 { 355 if (tic->t_res_num == XLOG_TIC_LEN_MAX) { 356 /* add to overflow and start again */ 357 tic->t_res_o_flow += tic->t_res_arr_sum; 358 tic->t_res_num = 0; 359 tic->t_res_arr_sum = 0; 360 } 361 362 tic->t_res_arr[tic->t_res_num].r_len = len; 363 tic->t_res_arr[tic->t_res_num].r_type = type; 364 tic->t_res_arr_sum += len; 365 tic->t_res_num++; 366 } 367 368 /* 369 * Replenish the byte reservation required by moving the grant write head. 370 */ 371 int 372 xfs_log_regrant( 373 struct xfs_mount *mp, 374 struct xlog_ticket *tic) 375 { 376 struct xlog *log = mp->m_log; 377 int need_bytes; 378 int error = 0; 379 380 if (XLOG_FORCED_SHUTDOWN(log)) 381 return -EIO; 382 383 XFS_STATS_INC(mp, xs_try_logspace); 384 385 /* 386 * This is a new transaction on the ticket, so we need to change the 387 * transaction ID so that the next transaction has a different TID in 388 * the log. Just add one to the existing tid so that we can see chains 389 * of rolling transactions in the log easily. 390 */ 391 tic->t_tid++; 392 393 xlog_grant_push_ail(log, tic->t_unit_res); 394 395 tic->t_curr_res = tic->t_unit_res; 396 xlog_tic_reset_res(tic); 397 398 if (tic->t_cnt > 0) 399 return 0; 400 401 trace_xfs_log_regrant(log, tic); 402 403 error = xlog_grant_head_check(log, &log->l_write_head, tic, 404 &need_bytes); 405 if (error) 406 goto out_error; 407 408 xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes); 409 trace_xfs_log_regrant_exit(log, tic); 410 xlog_verify_grant_tail(log); 411 return 0; 412 413 out_error: 414 /* 415 * If we are failing, make sure the ticket doesn't have any current 416 * reservations. We don't want to add this back when the ticket/ 417 * transaction gets cancelled. 418 */ 419 tic->t_curr_res = 0; 420 tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */ 421 return error; 422 } 423 424 /* 425 * Reserve log space and return a ticket corresponding the reservation. 426 * 427 * Each reservation is going to reserve extra space for a log record header. 428 * When writes happen to the on-disk log, we don't subtract the length of the 429 * log record header from any reservation. By wasting space in each 430 * reservation, we prevent over allocation problems. 431 */ 432 int 433 xfs_log_reserve( 434 struct xfs_mount *mp, 435 int unit_bytes, 436 int cnt, 437 struct xlog_ticket **ticp, 438 uint8_t client, 439 bool permanent) 440 { 441 struct xlog *log = mp->m_log; 442 struct xlog_ticket *tic; 443 int need_bytes; 444 int error = 0; 445 446 ASSERT(client == XFS_TRANSACTION || client == XFS_LOG); 447 448 if (XLOG_FORCED_SHUTDOWN(log)) 449 return -EIO; 450 451 XFS_STATS_INC(mp, xs_try_logspace); 452 453 ASSERT(*ticp == NULL); 454 tic = xlog_ticket_alloc(log, unit_bytes, cnt, client, permanent, 455 KM_SLEEP | KM_MAYFAIL); 456 if (!tic) 457 return -ENOMEM; 458 459 *ticp = tic; 460 461 xlog_grant_push_ail(log, tic->t_cnt ? tic->t_unit_res * tic->t_cnt 462 : tic->t_unit_res); 463 464 trace_xfs_log_reserve(log, tic); 465 466 error = xlog_grant_head_check(log, &log->l_reserve_head, tic, 467 &need_bytes); 468 if (error) 469 goto out_error; 470 471 xlog_grant_add_space(log, &log->l_reserve_head.grant, need_bytes); 472 xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes); 473 trace_xfs_log_reserve_exit(log, tic); 474 xlog_verify_grant_tail(log); 475 return 0; 476 477 out_error: 478 /* 479 * If we are failing, make sure the ticket doesn't have any current 480 * reservations. We don't want to add this back when the ticket/ 481 * transaction gets cancelled. 482 */ 483 tic->t_curr_res = 0; 484 tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */ 485 return error; 486 } 487 488 489 /* 490 * NOTES: 491 * 492 * 1. currblock field gets updated at startup and after in-core logs 493 * marked as with WANT_SYNC. 494 */ 495 496 /* 497 * This routine is called when a user of a log manager ticket is done with 498 * the reservation. If the ticket was ever used, then a commit record for 499 * the associated transaction is written out as a log operation header with 500 * no data. The flag XLOG_TIC_INITED is set when the first write occurs with 501 * a given ticket. If the ticket was one with a permanent reservation, then 502 * a few operations are done differently. Permanent reservation tickets by 503 * default don't release the reservation. They just commit the current 504 * transaction with the belief that the reservation is still needed. A flag 505 * must be passed in before permanent reservations are actually released. 506 * When these type of tickets are not released, they need to be set into 507 * the inited state again. By doing this, a start record will be written 508 * out when the next write occurs. 509 */ 510 xfs_lsn_t 511 xfs_log_done( 512 struct xfs_mount *mp, 513 struct xlog_ticket *ticket, 514 struct xlog_in_core **iclog, 515 bool regrant) 516 { 517 struct xlog *log = mp->m_log; 518 xfs_lsn_t lsn = 0; 519 520 if (XLOG_FORCED_SHUTDOWN(log) || 521 /* 522 * If nothing was ever written, don't write out commit record. 523 * If we get an error, just continue and give back the log ticket. 524 */ 525 (((ticket->t_flags & XLOG_TIC_INITED) == 0) && 526 (xlog_commit_record(log, ticket, iclog, &lsn)))) { 527 lsn = (xfs_lsn_t) -1; 528 regrant = false; 529 } 530 531 532 if (!regrant) { 533 trace_xfs_log_done_nonperm(log, ticket); 534 535 /* 536 * Release ticket if not permanent reservation or a specific 537 * request has been made to release a permanent reservation. 538 */ 539 xlog_ungrant_log_space(log, ticket); 540 } else { 541 trace_xfs_log_done_perm(log, ticket); 542 543 xlog_regrant_reserve_log_space(log, ticket); 544 /* If this ticket was a permanent reservation and we aren't 545 * trying to release it, reset the inited flags; so next time 546 * we write, a start record will be written out. 547 */ 548 ticket->t_flags |= XLOG_TIC_INITED; 549 } 550 551 xfs_log_ticket_put(ticket); 552 return lsn; 553 } 554 555 /* 556 * Attaches a new iclog I/O completion callback routine during 557 * transaction commit. If the log is in error state, a non-zero 558 * return code is handed back and the caller is responsible for 559 * executing the callback at an appropriate time. 560 */ 561 int 562 xfs_log_notify( 563 struct xfs_mount *mp, 564 struct xlog_in_core *iclog, 565 xfs_log_callback_t *cb) 566 { 567 int abortflg; 568 569 spin_lock(&iclog->ic_callback_lock); 570 abortflg = (iclog->ic_state & XLOG_STATE_IOERROR); 571 if (!abortflg) { 572 ASSERT_ALWAYS((iclog->ic_state == XLOG_STATE_ACTIVE) || 573 (iclog->ic_state == XLOG_STATE_WANT_SYNC)); 574 cb->cb_next = NULL; 575 *(iclog->ic_callback_tail) = cb; 576 iclog->ic_callback_tail = &(cb->cb_next); 577 } 578 spin_unlock(&iclog->ic_callback_lock); 579 return abortflg; 580 } 581 582 int 583 xfs_log_release_iclog( 584 struct xfs_mount *mp, 585 struct xlog_in_core *iclog) 586 { 587 if (xlog_state_release_iclog(mp->m_log, iclog)) { 588 xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR); 589 return -EIO; 590 } 591 592 return 0; 593 } 594 595 /* 596 * Mount a log filesystem 597 * 598 * mp - ubiquitous xfs mount point structure 599 * log_target - buftarg of on-disk log device 600 * blk_offset - Start block # where block size is 512 bytes (BBSIZE) 601 * num_bblocks - Number of BBSIZE blocks in on-disk log 602 * 603 * Return error or zero. 604 */ 605 int 606 xfs_log_mount( 607 xfs_mount_t *mp, 608 xfs_buftarg_t *log_target, 609 xfs_daddr_t blk_offset, 610 int num_bblks) 611 { 612 bool fatal = xfs_sb_version_hascrc(&mp->m_sb); 613 int error = 0; 614 int min_logfsbs; 615 616 if (!(mp->m_flags & XFS_MOUNT_NORECOVERY)) { 617 xfs_notice(mp, "Mounting V%d Filesystem", 618 XFS_SB_VERSION_NUM(&mp->m_sb)); 619 } else { 620 xfs_notice(mp, 621 "Mounting V%d filesystem in no-recovery mode. Filesystem will be inconsistent.", 622 XFS_SB_VERSION_NUM(&mp->m_sb)); 623 ASSERT(mp->m_flags & XFS_MOUNT_RDONLY); 624 } 625 626 mp->m_log = xlog_alloc_log(mp, log_target, blk_offset, num_bblks); 627 if (IS_ERR(mp->m_log)) { 628 error = PTR_ERR(mp->m_log); 629 goto out; 630 } 631 632 /* 633 * Validate the given log space and drop a critical message via syslog 634 * if the log size is too small that would lead to some unexpected 635 * situations in transaction log space reservation stage. 636 * 637 * Note: we can't just reject the mount if the validation fails. This 638 * would mean that people would have to downgrade their kernel just to 639 * remedy the situation as there is no way to grow the log (short of 640 * black magic surgery with xfs_db). 641 * 642 * We can, however, reject mounts for CRC format filesystems, as the 643 * mkfs binary being used to make the filesystem should never create a 644 * filesystem with a log that is too small. 645 */ 646 min_logfsbs = xfs_log_calc_minimum_size(mp); 647 648 if (mp->m_sb.sb_logblocks < min_logfsbs) { 649 xfs_warn(mp, 650 "Log size %d blocks too small, minimum size is %d blocks", 651 mp->m_sb.sb_logblocks, min_logfsbs); 652 error = -EINVAL; 653 } else if (mp->m_sb.sb_logblocks > XFS_MAX_LOG_BLOCKS) { 654 xfs_warn(mp, 655 "Log size %d blocks too large, maximum size is %lld blocks", 656 mp->m_sb.sb_logblocks, XFS_MAX_LOG_BLOCKS); 657 error = -EINVAL; 658 } else if (XFS_FSB_TO_B(mp, mp->m_sb.sb_logblocks) > XFS_MAX_LOG_BYTES) { 659 xfs_warn(mp, 660 "log size %lld bytes too large, maximum size is %lld bytes", 661 XFS_FSB_TO_B(mp, mp->m_sb.sb_logblocks), 662 XFS_MAX_LOG_BYTES); 663 error = -EINVAL; 664 } else if (mp->m_sb.sb_logsunit > 1 && 665 mp->m_sb.sb_logsunit % mp->m_sb.sb_blocksize) { 666 xfs_warn(mp, 667 "log stripe unit %u bytes must be a multiple of block size", 668 mp->m_sb.sb_logsunit); 669 error = -EINVAL; 670 fatal = true; 671 } 672 if (error) { 673 /* 674 * Log check errors are always fatal on v5; or whenever bad 675 * metadata leads to a crash. 676 */ 677 if (fatal) { 678 xfs_crit(mp, "AAIEEE! Log failed size checks. Abort!"); 679 ASSERT(0); 680 goto out_free_log; 681 } 682 xfs_crit(mp, "Log size out of supported range."); 683 xfs_crit(mp, 684 "Continuing onwards, but if log hangs are experienced then please report this message in the bug report."); 685 } 686 687 /* 688 * Initialize the AIL now we have a log. 689 */ 690 error = xfs_trans_ail_init(mp); 691 if (error) { 692 xfs_warn(mp, "AIL initialisation failed: error %d", error); 693 goto out_free_log; 694 } 695 mp->m_log->l_ailp = mp->m_ail; 696 697 /* 698 * skip log recovery on a norecovery mount. pretend it all 699 * just worked. 700 */ 701 if (!(mp->m_flags & XFS_MOUNT_NORECOVERY)) { 702 int readonly = (mp->m_flags & XFS_MOUNT_RDONLY); 703 704 if (readonly) 705 mp->m_flags &= ~XFS_MOUNT_RDONLY; 706 707 error = xlog_recover(mp->m_log); 708 709 if (readonly) 710 mp->m_flags |= XFS_MOUNT_RDONLY; 711 if (error) { 712 xfs_warn(mp, "log mount/recovery failed: error %d", 713 error); 714 xlog_recover_cancel(mp->m_log); 715 goto out_destroy_ail; 716 } 717 } 718 719 error = xfs_sysfs_init(&mp->m_log->l_kobj, &xfs_log_ktype, &mp->m_kobj, 720 "log"); 721 if (error) 722 goto out_destroy_ail; 723 724 /* Normal transactions can now occur */ 725 mp->m_log->l_flags &= ~XLOG_ACTIVE_RECOVERY; 726 727 /* 728 * Now the log has been fully initialised and we know were our 729 * space grant counters are, we can initialise the permanent ticket 730 * needed for delayed logging to work. 731 */ 732 xlog_cil_init_post_recovery(mp->m_log); 733 734 return 0; 735 736 out_destroy_ail: 737 xfs_trans_ail_destroy(mp); 738 out_free_log: 739 xlog_dealloc_log(mp->m_log); 740 out: 741 return error; 742 } 743 744 /* 745 * Finish the recovery of the file system. This is separate from the 746 * xfs_log_mount() call, because it depends on the code in xfs_mountfs() to read 747 * in the root and real-time bitmap inodes between calling xfs_log_mount() and 748 * here. 749 * 750 * If we finish recovery successfully, start the background log work. If we are 751 * not doing recovery, then we have a RO filesystem and we don't need to start 752 * it. 753 */ 754 int 755 xfs_log_mount_finish( 756 struct xfs_mount *mp) 757 { 758 int error = 0; 759 bool readonly = (mp->m_flags & XFS_MOUNT_RDONLY); 760 bool recovered = mp->m_log->l_flags & XLOG_RECOVERY_NEEDED; 761 762 if (mp->m_flags & XFS_MOUNT_NORECOVERY) { 763 ASSERT(mp->m_flags & XFS_MOUNT_RDONLY); 764 return 0; 765 } else if (readonly) { 766 /* Allow unlinked processing to proceed */ 767 mp->m_flags &= ~XFS_MOUNT_RDONLY; 768 } 769 770 /* 771 * During the second phase of log recovery, we need iget and 772 * iput to behave like they do for an active filesystem. 773 * xfs_fs_drop_inode needs to be able to prevent the deletion 774 * of inodes before we're done replaying log items on those 775 * inodes. Turn it off immediately after recovery finishes 776 * so that we don't leak the quota inodes if subsequent mount 777 * activities fail. 778 * 779 * We let all inodes involved in redo item processing end up on 780 * the LRU instead of being evicted immediately so that if we do 781 * something to an unlinked inode, the irele won't cause 782 * premature truncation and freeing of the inode, which results 783 * in log recovery failure. We have to evict the unreferenced 784 * lru inodes after clearing SB_ACTIVE because we don't 785 * otherwise clean up the lru if there's a subsequent failure in 786 * xfs_mountfs, which leads to us leaking the inodes if nothing 787 * else (e.g. quotacheck) references the inodes before the 788 * mount failure occurs. 789 */ 790 mp->m_super->s_flags |= SB_ACTIVE; 791 error = xlog_recover_finish(mp->m_log); 792 if (!error) 793 xfs_log_work_queue(mp); 794 mp->m_super->s_flags &= ~SB_ACTIVE; 795 evict_inodes(mp->m_super); 796 797 /* 798 * Drain the buffer LRU after log recovery. This is required for v4 799 * filesystems to avoid leaving around buffers with NULL verifier ops, 800 * but we do it unconditionally to make sure we're always in a clean 801 * cache state after mount. 802 * 803 * Don't push in the error case because the AIL may have pending intents 804 * that aren't removed until recovery is cancelled. 805 */ 806 if (!error && recovered) { 807 xfs_log_force(mp, XFS_LOG_SYNC); 808 xfs_ail_push_all_sync(mp->m_ail); 809 } 810 xfs_wait_buftarg(mp->m_ddev_targp); 811 812 if (readonly) 813 mp->m_flags |= XFS_MOUNT_RDONLY; 814 815 return error; 816 } 817 818 /* 819 * The mount has failed. Cancel the recovery if it hasn't completed and destroy 820 * the log. 821 */ 822 int 823 xfs_log_mount_cancel( 824 struct xfs_mount *mp) 825 { 826 int error; 827 828 error = xlog_recover_cancel(mp->m_log); 829 xfs_log_unmount(mp); 830 831 return error; 832 } 833 834 /* 835 * Final log writes as part of unmount. 836 * 837 * Mark the filesystem clean as unmount happens. Note that during relocation 838 * this routine needs to be executed as part of source-bag while the 839 * deallocation must not be done until source-end. 840 */ 841 842 /* 843 * Unmount record used to have a string "Unmount filesystem--" in the 844 * data section where the "Un" was really a magic number (XLOG_UNMOUNT_TYPE). 845 * We just write the magic number now since that particular field isn't 846 * currently architecture converted and "Unmount" is a bit foo. 847 * As far as I know, there weren't any dependencies on the old behaviour. 848 */ 849 850 static int 851 xfs_log_unmount_write(xfs_mount_t *mp) 852 { 853 struct xlog *log = mp->m_log; 854 xlog_in_core_t *iclog; 855 #ifdef DEBUG 856 xlog_in_core_t *first_iclog; 857 #endif 858 xlog_ticket_t *tic = NULL; 859 xfs_lsn_t lsn; 860 int error; 861 862 /* 863 * Don't write out unmount record on norecovery mounts or ro devices. 864 * Or, if we are doing a forced umount (typically because of IO errors). 865 */ 866 if (mp->m_flags & XFS_MOUNT_NORECOVERY || 867 xfs_readonly_buftarg(log->l_mp->m_logdev_targp)) { 868 ASSERT(mp->m_flags & XFS_MOUNT_RDONLY); 869 return 0; 870 } 871 872 error = _xfs_log_force(mp, XFS_LOG_SYNC, NULL); 873 ASSERT(error || !(XLOG_FORCED_SHUTDOWN(log))); 874 875 #ifdef DEBUG 876 first_iclog = iclog = log->l_iclog; 877 do { 878 if (!(iclog->ic_state & XLOG_STATE_IOERROR)) { 879 ASSERT(iclog->ic_state & XLOG_STATE_ACTIVE); 880 ASSERT(iclog->ic_offset == 0); 881 } 882 iclog = iclog->ic_next; 883 } while (iclog != first_iclog); 884 #endif 885 if (! (XLOG_FORCED_SHUTDOWN(log))) { 886 error = xfs_log_reserve(mp, 600, 1, &tic, XFS_LOG, 0); 887 if (!error) { 888 /* the data section must be 32 bit size aligned */ 889 struct { 890 uint16_t magic; 891 uint16_t pad1; 892 uint32_t pad2; /* may as well make it 64 bits */ 893 } magic = { 894 .magic = XLOG_UNMOUNT_TYPE, 895 }; 896 struct xfs_log_iovec reg = { 897 .i_addr = &magic, 898 .i_len = sizeof(magic), 899 .i_type = XLOG_REG_TYPE_UNMOUNT, 900 }; 901 struct xfs_log_vec vec = { 902 .lv_niovecs = 1, 903 .lv_iovecp = ®, 904 }; 905 906 /* remove inited flag, and account for space used */ 907 tic->t_flags = 0; 908 tic->t_curr_res -= sizeof(magic); 909 error = xlog_write(log, &vec, tic, &lsn, 910 NULL, XLOG_UNMOUNT_TRANS); 911 /* 912 * At this point, we're umounting anyway, 913 * so there's no point in transitioning log state 914 * to IOERROR. Just continue... 915 */ 916 } 917 918 if (error) 919 xfs_alert(mp, "%s: unmount record failed", __func__); 920 921 922 spin_lock(&log->l_icloglock); 923 iclog = log->l_iclog; 924 atomic_inc(&iclog->ic_refcnt); 925 xlog_state_want_sync(log, iclog); 926 spin_unlock(&log->l_icloglock); 927 error = xlog_state_release_iclog(log, iclog); 928 929 spin_lock(&log->l_icloglock); 930 if (!(iclog->ic_state == XLOG_STATE_ACTIVE || 931 iclog->ic_state == XLOG_STATE_DIRTY)) { 932 if (!XLOG_FORCED_SHUTDOWN(log)) { 933 xlog_wait(&iclog->ic_force_wait, 934 &log->l_icloglock); 935 } else { 936 spin_unlock(&log->l_icloglock); 937 } 938 } else { 939 spin_unlock(&log->l_icloglock); 940 } 941 if (tic) { 942 trace_xfs_log_umount_write(log, tic); 943 xlog_ungrant_log_space(log, tic); 944 xfs_log_ticket_put(tic); 945 } 946 } else { 947 /* 948 * We're already in forced_shutdown mode, couldn't 949 * even attempt to write out the unmount transaction. 950 * 951 * Go through the motions of sync'ing and releasing 952 * the iclog, even though no I/O will actually happen, 953 * we need to wait for other log I/Os that may already 954 * be in progress. Do this as a separate section of 955 * code so we'll know if we ever get stuck here that 956 * we're in this odd situation of trying to unmount 957 * a file system that went into forced_shutdown as 958 * the result of an unmount.. 959 */ 960 spin_lock(&log->l_icloglock); 961 iclog = log->l_iclog; 962 atomic_inc(&iclog->ic_refcnt); 963 964 xlog_state_want_sync(log, iclog); 965 spin_unlock(&log->l_icloglock); 966 error = xlog_state_release_iclog(log, iclog); 967 968 spin_lock(&log->l_icloglock); 969 970 if ( ! ( iclog->ic_state == XLOG_STATE_ACTIVE 971 || iclog->ic_state == XLOG_STATE_DIRTY 972 || iclog->ic_state == XLOG_STATE_IOERROR) ) { 973 974 xlog_wait(&iclog->ic_force_wait, 975 &log->l_icloglock); 976 } else { 977 spin_unlock(&log->l_icloglock); 978 } 979 } 980 981 return error; 982 } /* xfs_log_unmount_write */ 983 984 /* 985 * Empty the log for unmount/freeze. 986 * 987 * To do this, we first need to shut down the background log work so it is not 988 * trying to cover the log as we clean up. We then need to unpin all objects in 989 * the log so we can then flush them out. Once they have completed their IO and 990 * run the callbacks removing themselves from the AIL, we can write the unmount 991 * record. 992 */ 993 void 994 xfs_log_quiesce( 995 struct xfs_mount *mp) 996 { 997 cancel_delayed_work_sync(&mp->m_log->l_work); 998 xfs_log_force(mp, XFS_LOG_SYNC); 999 1000 /* 1001 * The superblock buffer is uncached and while xfs_ail_push_all_sync() 1002 * will push it, xfs_wait_buftarg() will not wait for it. Further, 1003 * xfs_buf_iowait() cannot be used because it was pushed with the 1004 * XBF_ASYNC flag set, so we need to use a lock/unlock pair to wait for 1005 * the IO to complete. 1006 */ 1007 xfs_ail_push_all_sync(mp->m_ail); 1008 xfs_wait_buftarg(mp->m_ddev_targp); 1009 xfs_buf_lock(mp->m_sb_bp); 1010 xfs_buf_unlock(mp->m_sb_bp); 1011 1012 xfs_log_unmount_write(mp); 1013 } 1014 1015 /* 1016 * Shut down and release the AIL and Log. 1017 * 1018 * During unmount, we need to ensure we flush all the dirty metadata objects 1019 * from the AIL so that the log is empty before we write the unmount record to 1020 * the log. Once this is done, we can tear down the AIL and the log. 1021 */ 1022 void 1023 xfs_log_unmount( 1024 struct xfs_mount *mp) 1025 { 1026 xfs_log_quiesce(mp); 1027 1028 xfs_trans_ail_destroy(mp); 1029 1030 xfs_sysfs_del(&mp->m_log->l_kobj); 1031 1032 xlog_dealloc_log(mp->m_log); 1033 } 1034 1035 void 1036 xfs_log_item_init( 1037 struct xfs_mount *mp, 1038 struct xfs_log_item *item, 1039 int type, 1040 const struct xfs_item_ops *ops) 1041 { 1042 item->li_mountp = mp; 1043 item->li_ailp = mp->m_ail; 1044 item->li_type = type; 1045 item->li_ops = ops; 1046 item->li_lv = NULL; 1047 1048 INIT_LIST_HEAD(&item->li_ail); 1049 INIT_LIST_HEAD(&item->li_cil); 1050 } 1051 1052 /* 1053 * Wake up processes waiting for log space after we have moved the log tail. 1054 */ 1055 void 1056 xfs_log_space_wake( 1057 struct xfs_mount *mp) 1058 { 1059 struct xlog *log = mp->m_log; 1060 int free_bytes; 1061 1062 if (XLOG_FORCED_SHUTDOWN(log)) 1063 return; 1064 1065 if (!list_empty_careful(&log->l_write_head.waiters)) { 1066 ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY)); 1067 1068 spin_lock(&log->l_write_head.lock); 1069 free_bytes = xlog_space_left(log, &log->l_write_head.grant); 1070 xlog_grant_head_wake(log, &log->l_write_head, &free_bytes); 1071 spin_unlock(&log->l_write_head.lock); 1072 } 1073 1074 if (!list_empty_careful(&log->l_reserve_head.waiters)) { 1075 ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY)); 1076 1077 spin_lock(&log->l_reserve_head.lock); 1078 free_bytes = xlog_space_left(log, &log->l_reserve_head.grant); 1079 xlog_grant_head_wake(log, &log->l_reserve_head, &free_bytes); 1080 spin_unlock(&log->l_reserve_head.lock); 1081 } 1082 } 1083 1084 /* 1085 * Determine if we have a transaction that has gone to disk that needs to be 1086 * covered. To begin the transition to the idle state firstly the log needs to 1087 * be idle. That means the CIL, the AIL and the iclogs needs to be empty before 1088 * we start attempting to cover the log. 1089 * 1090 * Only if we are then in a state where covering is needed, the caller is 1091 * informed that dummy transactions are required to move the log into the idle 1092 * state. 1093 * 1094 * If there are any items in the AIl or CIL, then we do not want to attempt to 1095 * cover the log as we may be in a situation where there isn't log space 1096 * available to run a dummy transaction and this can lead to deadlocks when the 1097 * tail of the log is pinned by an item that is modified in the CIL. Hence 1098 * there's no point in running a dummy transaction at this point because we 1099 * can't start trying to idle the log until both the CIL and AIL are empty. 1100 */ 1101 static int 1102 xfs_log_need_covered(xfs_mount_t *mp) 1103 { 1104 struct xlog *log = mp->m_log; 1105 int needed = 0; 1106 1107 if (!xfs_fs_writable(mp, SB_FREEZE_WRITE)) 1108 return 0; 1109 1110 if (!xlog_cil_empty(log)) 1111 return 0; 1112 1113 spin_lock(&log->l_icloglock); 1114 switch (log->l_covered_state) { 1115 case XLOG_STATE_COVER_DONE: 1116 case XLOG_STATE_COVER_DONE2: 1117 case XLOG_STATE_COVER_IDLE: 1118 break; 1119 case XLOG_STATE_COVER_NEED: 1120 case XLOG_STATE_COVER_NEED2: 1121 if (xfs_ail_min_lsn(log->l_ailp)) 1122 break; 1123 if (!xlog_iclogs_empty(log)) 1124 break; 1125 1126 needed = 1; 1127 if (log->l_covered_state == XLOG_STATE_COVER_NEED) 1128 log->l_covered_state = XLOG_STATE_COVER_DONE; 1129 else 1130 log->l_covered_state = XLOG_STATE_COVER_DONE2; 1131 break; 1132 default: 1133 needed = 1; 1134 break; 1135 } 1136 spin_unlock(&log->l_icloglock); 1137 return needed; 1138 } 1139 1140 /* 1141 * We may be holding the log iclog lock upon entering this routine. 1142 */ 1143 xfs_lsn_t 1144 xlog_assign_tail_lsn_locked( 1145 struct xfs_mount *mp) 1146 { 1147 struct xlog *log = mp->m_log; 1148 struct xfs_log_item *lip; 1149 xfs_lsn_t tail_lsn; 1150 1151 assert_spin_locked(&mp->m_ail->xa_lock); 1152 1153 /* 1154 * To make sure we always have a valid LSN for the log tail we keep 1155 * track of the last LSN which was committed in log->l_last_sync_lsn, 1156 * and use that when the AIL was empty. 1157 */ 1158 lip = xfs_ail_min(mp->m_ail); 1159 if (lip) 1160 tail_lsn = lip->li_lsn; 1161 else 1162 tail_lsn = atomic64_read(&log->l_last_sync_lsn); 1163 trace_xfs_log_assign_tail_lsn(log, tail_lsn); 1164 atomic64_set(&log->l_tail_lsn, tail_lsn); 1165 return tail_lsn; 1166 } 1167 1168 xfs_lsn_t 1169 xlog_assign_tail_lsn( 1170 struct xfs_mount *mp) 1171 { 1172 xfs_lsn_t tail_lsn; 1173 1174 spin_lock(&mp->m_ail->xa_lock); 1175 tail_lsn = xlog_assign_tail_lsn_locked(mp); 1176 spin_unlock(&mp->m_ail->xa_lock); 1177 1178 return tail_lsn; 1179 } 1180 1181 /* 1182 * Return the space in the log between the tail and the head. The head 1183 * is passed in the cycle/bytes formal parms. In the special case where 1184 * the reserve head has wrapped passed the tail, this calculation is no 1185 * longer valid. In this case, just return 0 which means there is no space 1186 * in the log. This works for all places where this function is called 1187 * with the reserve head. Of course, if the write head were to ever 1188 * wrap the tail, we should blow up. Rather than catch this case here, 1189 * we depend on other ASSERTions in other parts of the code. XXXmiken 1190 * 1191 * This code also handles the case where the reservation head is behind 1192 * the tail. The details of this case are described below, but the end 1193 * result is that we return the size of the log as the amount of space left. 1194 */ 1195 STATIC int 1196 xlog_space_left( 1197 struct xlog *log, 1198 atomic64_t *head) 1199 { 1200 int free_bytes; 1201 int tail_bytes; 1202 int tail_cycle; 1203 int head_cycle; 1204 int head_bytes; 1205 1206 xlog_crack_grant_head(head, &head_cycle, &head_bytes); 1207 xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_bytes); 1208 tail_bytes = BBTOB(tail_bytes); 1209 if (tail_cycle == head_cycle && head_bytes >= tail_bytes) 1210 free_bytes = log->l_logsize - (head_bytes - tail_bytes); 1211 else if (tail_cycle + 1 < head_cycle) 1212 return 0; 1213 else if (tail_cycle < head_cycle) { 1214 ASSERT(tail_cycle == (head_cycle - 1)); 1215 free_bytes = tail_bytes - head_bytes; 1216 } else { 1217 /* 1218 * The reservation head is behind the tail. 1219 * In this case we just want to return the size of the 1220 * log as the amount of space left. 1221 */ 1222 xfs_alert(log->l_mp, "xlog_space_left: head behind tail"); 1223 xfs_alert(log->l_mp, 1224 " tail_cycle = %d, tail_bytes = %d", 1225 tail_cycle, tail_bytes); 1226 xfs_alert(log->l_mp, 1227 " GH cycle = %d, GH bytes = %d", 1228 head_cycle, head_bytes); 1229 ASSERT(0); 1230 free_bytes = log->l_logsize; 1231 } 1232 return free_bytes; 1233 } 1234 1235 1236 /* 1237 * Log function which is called when an io completes. 1238 * 1239 * The log manager needs its own routine, in order to control what 1240 * happens with the buffer after the write completes. 1241 */ 1242 static void 1243 xlog_iodone(xfs_buf_t *bp) 1244 { 1245 struct xlog_in_core *iclog = bp->b_fspriv; 1246 struct xlog *l = iclog->ic_log; 1247 int aborted = 0; 1248 1249 /* 1250 * Race to shutdown the filesystem if we see an error or the iclog is in 1251 * IOABORT state. The IOABORT state is only set in DEBUG mode to inject 1252 * CRC errors into log recovery. 1253 */ 1254 if (XFS_TEST_ERROR(bp->b_error, l->l_mp, XFS_ERRTAG_IODONE_IOERR) || 1255 iclog->ic_state & XLOG_STATE_IOABORT) { 1256 if (iclog->ic_state & XLOG_STATE_IOABORT) 1257 iclog->ic_state &= ~XLOG_STATE_IOABORT; 1258 1259 xfs_buf_ioerror_alert(bp, __func__); 1260 xfs_buf_stale(bp); 1261 xfs_force_shutdown(l->l_mp, SHUTDOWN_LOG_IO_ERROR); 1262 /* 1263 * This flag will be propagated to the trans-committed 1264 * callback routines to let them know that the log-commit 1265 * didn't succeed. 1266 */ 1267 aborted = XFS_LI_ABORTED; 1268 } else if (iclog->ic_state & XLOG_STATE_IOERROR) { 1269 aborted = XFS_LI_ABORTED; 1270 } 1271 1272 /* log I/O is always issued ASYNC */ 1273 ASSERT(bp->b_flags & XBF_ASYNC); 1274 xlog_state_done_syncing(iclog, aborted); 1275 1276 /* 1277 * drop the buffer lock now that we are done. Nothing references 1278 * the buffer after this, so an unmount waiting on this lock can now 1279 * tear it down safely. As such, it is unsafe to reference the buffer 1280 * (bp) after the unlock as we could race with it being freed. 1281 */ 1282 xfs_buf_unlock(bp); 1283 } 1284 1285 /* 1286 * Return size of each in-core log record buffer. 1287 * 1288 * All machines get 8 x 32kB buffers by default, unless tuned otherwise. 1289 * 1290 * If the filesystem blocksize is too large, we may need to choose a 1291 * larger size since the directory code currently logs entire blocks. 1292 */ 1293 1294 STATIC void 1295 xlog_get_iclog_buffer_size( 1296 struct xfs_mount *mp, 1297 struct xlog *log) 1298 { 1299 int size; 1300 int xhdrs; 1301 1302 if (mp->m_logbufs <= 0) 1303 log->l_iclog_bufs = XLOG_MAX_ICLOGS; 1304 else 1305 log->l_iclog_bufs = mp->m_logbufs; 1306 1307 /* 1308 * Buffer size passed in from mount system call. 1309 */ 1310 if (mp->m_logbsize > 0) { 1311 size = log->l_iclog_size = mp->m_logbsize; 1312 log->l_iclog_size_log = 0; 1313 while (size != 1) { 1314 log->l_iclog_size_log++; 1315 size >>= 1; 1316 } 1317 1318 if (xfs_sb_version_haslogv2(&mp->m_sb)) { 1319 /* # headers = size / 32k 1320 * one header holds cycles from 32k of data 1321 */ 1322 1323 xhdrs = mp->m_logbsize / XLOG_HEADER_CYCLE_SIZE; 1324 if (mp->m_logbsize % XLOG_HEADER_CYCLE_SIZE) 1325 xhdrs++; 1326 log->l_iclog_hsize = xhdrs << BBSHIFT; 1327 log->l_iclog_heads = xhdrs; 1328 } else { 1329 ASSERT(mp->m_logbsize <= XLOG_BIG_RECORD_BSIZE); 1330 log->l_iclog_hsize = BBSIZE; 1331 log->l_iclog_heads = 1; 1332 } 1333 goto done; 1334 } 1335 1336 /* All machines use 32kB buffers by default. */ 1337 log->l_iclog_size = XLOG_BIG_RECORD_BSIZE; 1338 log->l_iclog_size_log = XLOG_BIG_RECORD_BSHIFT; 1339 1340 /* the default log size is 16k or 32k which is one header sector */ 1341 log->l_iclog_hsize = BBSIZE; 1342 log->l_iclog_heads = 1; 1343 1344 done: 1345 /* are we being asked to make the sizes selected above visible? */ 1346 if (mp->m_logbufs == 0) 1347 mp->m_logbufs = log->l_iclog_bufs; 1348 if (mp->m_logbsize == 0) 1349 mp->m_logbsize = log->l_iclog_size; 1350 } /* xlog_get_iclog_buffer_size */ 1351 1352 1353 void 1354 xfs_log_work_queue( 1355 struct xfs_mount *mp) 1356 { 1357 queue_delayed_work(mp->m_sync_workqueue, &mp->m_log->l_work, 1358 msecs_to_jiffies(xfs_syncd_centisecs * 10)); 1359 } 1360 1361 /* 1362 * Every sync period we need to unpin all items in the AIL and push them to 1363 * disk. If there is nothing dirty, then we might need to cover the log to 1364 * indicate that the filesystem is idle. 1365 */ 1366 static void 1367 xfs_log_worker( 1368 struct work_struct *work) 1369 { 1370 struct xlog *log = container_of(to_delayed_work(work), 1371 struct xlog, l_work); 1372 struct xfs_mount *mp = log->l_mp; 1373 1374 /* dgc: errors ignored - not fatal and nowhere to report them */ 1375 if (xfs_log_need_covered(mp)) { 1376 /* 1377 * Dump a transaction into the log that contains no real change. 1378 * This is needed to stamp the current tail LSN into the log 1379 * during the covering operation. 1380 * 1381 * We cannot use an inode here for this - that will push dirty 1382 * state back up into the VFS and then periodic inode flushing 1383 * will prevent log covering from making progress. Hence we 1384 * synchronously log the superblock instead to ensure the 1385 * superblock is immediately unpinned and can be written back. 1386 */ 1387 xfs_sync_sb(mp, true); 1388 } else 1389 xfs_log_force(mp, 0); 1390 1391 /* start pushing all the metadata that is currently dirty */ 1392 xfs_ail_push_all(mp->m_ail); 1393 1394 /* queue us up again */ 1395 xfs_log_work_queue(mp); 1396 } 1397 1398 /* 1399 * This routine initializes some of the log structure for a given mount point. 1400 * Its primary purpose is to fill in enough, so recovery can occur. However, 1401 * some other stuff may be filled in too. 1402 */ 1403 STATIC struct xlog * 1404 xlog_alloc_log( 1405 struct xfs_mount *mp, 1406 struct xfs_buftarg *log_target, 1407 xfs_daddr_t blk_offset, 1408 int num_bblks) 1409 { 1410 struct xlog *log; 1411 xlog_rec_header_t *head; 1412 xlog_in_core_t **iclogp; 1413 xlog_in_core_t *iclog, *prev_iclog=NULL; 1414 xfs_buf_t *bp; 1415 int i; 1416 int error = -ENOMEM; 1417 uint log2_size = 0; 1418 1419 log = kmem_zalloc(sizeof(struct xlog), KM_MAYFAIL); 1420 if (!log) { 1421 xfs_warn(mp, "Log allocation failed: No memory!"); 1422 goto out; 1423 } 1424 1425 log->l_mp = mp; 1426 log->l_targ = log_target; 1427 log->l_logsize = BBTOB(num_bblks); 1428 log->l_logBBstart = blk_offset; 1429 log->l_logBBsize = num_bblks; 1430 log->l_covered_state = XLOG_STATE_COVER_IDLE; 1431 log->l_flags |= XLOG_ACTIVE_RECOVERY; 1432 INIT_DELAYED_WORK(&log->l_work, xfs_log_worker); 1433 1434 log->l_prev_block = -1; 1435 /* log->l_tail_lsn = 0x100000000LL; cycle = 1; current block = 0 */ 1436 xlog_assign_atomic_lsn(&log->l_tail_lsn, 1, 0); 1437 xlog_assign_atomic_lsn(&log->l_last_sync_lsn, 1, 0); 1438 log->l_curr_cycle = 1; /* 0 is bad since this is initial value */ 1439 1440 xlog_grant_head_init(&log->l_reserve_head); 1441 xlog_grant_head_init(&log->l_write_head); 1442 1443 error = -EFSCORRUPTED; 1444 if (xfs_sb_version_hassector(&mp->m_sb)) { 1445 log2_size = mp->m_sb.sb_logsectlog; 1446 if (log2_size < BBSHIFT) { 1447 xfs_warn(mp, "Log sector size too small (0x%x < 0x%x)", 1448 log2_size, BBSHIFT); 1449 goto out_free_log; 1450 } 1451 1452 log2_size -= BBSHIFT; 1453 if (log2_size > mp->m_sectbb_log) { 1454 xfs_warn(mp, "Log sector size too large (0x%x > 0x%x)", 1455 log2_size, mp->m_sectbb_log); 1456 goto out_free_log; 1457 } 1458 1459 /* for larger sector sizes, must have v2 or external log */ 1460 if (log2_size && log->l_logBBstart > 0 && 1461 !xfs_sb_version_haslogv2(&mp->m_sb)) { 1462 xfs_warn(mp, 1463 "log sector size (0x%x) invalid for configuration.", 1464 log2_size); 1465 goto out_free_log; 1466 } 1467 } 1468 log->l_sectBBsize = 1 << log2_size; 1469 1470 xlog_get_iclog_buffer_size(mp, log); 1471 1472 /* 1473 * Use a NULL block for the extra log buffer used during splits so that 1474 * it will trigger errors if we ever try to do IO on it without first 1475 * having set it up properly. 1476 */ 1477 error = -ENOMEM; 1478 bp = xfs_buf_alloc(mp->m_logdev_targp, XFS_BUF_DADDR_NULL, 1479 BTOBB(log->l_iclog_size), XBF_NO_IOACCT); 1480 if (!bp) 1481 goto out_free_log; 1482 1483 /* 1484 * The iclogbuf buffer locks are held over IO but we are not going to do 1485 * IO yet. Hence unlock the buffer so that the log IO path can grab it 1486 * when appropriately. 1487 */ 1488 ASSERT(xfs_buf_islocked(bp)); 1489 xfs_buf_unlock(bp); 1490 1491 /* use high priority wq for log I/O completion */ 1492 bp->b_ioend_wq = mp->m_log_workqueue; 1493 bp->b_iodone = xlog_iodone; 1494 log->l_xbuf = bp; 1495 1496 spin_lock_init(&log->l_icloglock); 1497 init_waitqueue_head(&log->l_flush_wait); 1498 1499 iclogp = &log->l_iclog; 1500 /* 1501 * The amount of memory to allocate for the iclog structure is 1502 * rather funky due to the way the structure is defined. It is 1503 * done this way so that we can use different sizes for machines 1504 * with different amounts of memory. See the definition of 1505 * xlog_in_core_t in xfs_log_priv.h for details. 1506 */ 1507 ASSERT(log->l_iclog_size >= 4096); 1508 for (i=0; i < log->l_iclog_bufs; i++) { 1509 *iclogp = kmem_zalloc(sizeof(xlog_in_core_t), KM_MAYFAIL); 1510 if (!*iclogp) 1511 goto out_free_iclog; 1512 1513 iclog = *iclogp; 1514 iclog->ic_prev = prev_iclog; 1515 prev_iclog = iclog; 1516 1517 bp = xfs_buf_get_uncached(mp->m_logdev_targp, 1518 BTOBB(log->l_iclog_size), 1519 XBF_NO_IOACCT); 1520 if (!bp) 1521 goto out_free_iclog; 1522 1523 ASSERT(xfs_buf_islocked(bp)); 1524 xfs_buf_unlock(bp); 1525 1526 /* use high priority wq for log I/O completion */ 1527 bp->b_ioend_wq = mp->m_log_workqueue; 1528 bp->b_iodone = xlog_iodone; 1529 iclog->ic_bp = bp; 1530 iclog->ic_data = bp->b_addr; 1531 #ifdef DEBUG 1532 log->l_iclog_bak[i] = &iclog->ic_header; 1533 #endif 1534 head = &iclog->ic_header; 1535 memset(head, 0, sizeof(xlog_rec_header_t)); 1536 head->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM); 1537 head->h_version = cpu_to_be32( 1538 xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1); 1539 head->h_size = cpu_to_be32(log->l_iclog_size); 1540 /* new fields */ 1541 head->h_fmt = cpu_to_be32(XLOG_FMT); 1542 memcpy(&head->h_fs_uuid, &mp->m_sb.sb_uuid, sizeof(uuid_t)); 1543 1544 iclog->ic_size = BBTOB(bp->b_length) - log->l_iclog_hsize; 1545 iclog->ic_state = XLOG_STATE_ACTIVE; 1546 iclog->ic_log = log; 1547 atomic_set(&iclog->ic_refcnt, 0); 1548 spin_lock_init(&iclog->ic_callback_lock); 1549 iclog->ic_callback_tail = &(iclog->ic_callback); 1550 iclog->ic_datap = (char *)iclog->ic_data + log->l_iclog_hsize; 1551 1552 init_waitqueue_head(&iclog->ic_force_wait); 1553 init_waitqueue_head(&iclog->ic_write_wait); 1554 1555 iclogp = &iclog->ic_next; 1556 } 1557 *iclogp = log->l_iclog; /* complete ring */ 1558 log->l_iclog->ic_prev = prev_iclog; /* re-write 1st prev ptr */ 1559 1560 error = xlog_cil_init(log); 1561 if (error) 1562 goto out_free_iclog; 1563 return log; 1564 1565 out_free_iclog: 1566 for (iclog = log->l_iclog; iclog; iclog = prev_iclog) { 1567 prev_iclog = iclog->ic_next; 1568 if (iclog->ic_bp) 1569 xfs_buf_free(iclog->ic_bp); 1570 kmem_free(iclog); 1571 } 1572 spinlock_destroy(&log->l_icloglock); 1573 xfs_buf_free(log->l_xbuf); 1574 out_free_log: 1575 kmem_free(log); 1576 out: 1577 return ERR_PTR(error); 1578 } /* xlog_alloc_log */ 1579 1580 1581 /* 1582 * Write out the commit record of a transaction associated with the given 1583 * ticket. Return the lsn of the commit record. 1584 */ 1585 STATIC int 1586 xlog_commit_record( 1587 struct xlog *log, 1588 struct xlog_ticket *ticket, 1589 struct xlog_in_core **iclog, 1590 xfs_lsn_t *commitlsnp) 1591 { 1592 struct xfs_mount *mp = log->l_mp; 1593 int error; 1594 struct xfs_log_iovec reg = { 1595 .i_addr = NULL, 1596 .i_len = 0, 1597 .i_type = XLOG_REG_TYPE_COMMIT, 1598 }; 1599 struct xfs_log_vec vec = { 1600 .lv_niovecs = 1, 1601 .lv_iovecp = ®, 1602 }; 1603 1604 ASSERT_ALWAYS(iclog); 1605 error = xlog_write(log, &vec, ticket, commitlsnp, iclog, 1606 XLOG_COMMIT_TRANS); 1607 if (error) 1608 xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR); 1609 return error; 1610 } 1611 1612 /* 1613 * Push on the buffer cache code if we ever use more than 75% of the on-disk 1614 * log space. This code pushes on the lsn which would supposedly free up 1615 * the 25% which we want to leave free. We may need to adopt a policy which 1616 * pushes on an lsn which is further along in the log once we reach the high 1617 * water mark. In this manner, we would be creating a low water mark. 1618 */ 1619 STATIC void 1620 xlog_grant_push_ail( 1621 struct xlog *log, 1622 int need_bytes) 1623 { 1624 xfs_lsn_t threshold_lsn = 0; 1625 xfs_lsn_t last_sync_lsn; 1626 int free_blocks; 1627 int free_bytes; 1628 int threshold_block; 1629 int threshold_cycle; 1630 int free_threshold; 1631 1632 ASSERT(BTOBB(need_bytes) < log->l_logBBsize); 1633 1634 free_bytes = xlog_space_left(log, &log->l_reserve_head.grant); 1635 free_blocks = BTOBBT(free_bytes); 1636 1637 /* 1638 * Set the threshold for the minimum number of free blocks in the 1639 * log to the maximum of what the caller needs, one quarter of the 1640 * log, and 256 blocks. 1641 */ 1642 free_threshold = BTOBB(need_bytes); 1643 free_threshold = MAX(free_threshold, (log->l_logBBsize >> 2)); 1644 free_threshold = MAX(free_threshold, 256); 1645 if (free_blocks >= free_threshold) 1646 return; 1647 1648 xlog_crack_atomic_lsn(&log->l_tail_lsn, &threshold_cycle, 1649 &threshold_block); 1650 threshold_block += free_threshold; 1651 if (threshold_block >= log->l_logBBsize) { 1652 threshold_block -= log->l_logBBsize; 1653 threshold_cycle += 1; 1654 } 1655 threshold_lsn = xlog_assign_lsn(threshold_cycle, 1656 threshold_block); 1657 /* 1658 * Don't pass in an lsn greater than the lsn of the last 1659 * log record known to be on disk. Use a snapshot of the last sync lsn 1660 * so that it doesn't change between the compare and the set. 1661 */ 1662 last_sync_lsn = atomic64_read(&log->l_last_sync_lsn); 1663 if (XFS_LSN_CMP(threshold_lsn, last_sync_lsn) > 0) 1664 threshold_lsn = last_sync_lsn; 1665 1666 /* 1667 * Get the transaction layer to kick the dirty buffers out to 1668 * disk asynchronously. No point in trying to do this if 1669 * the filesystem is shutting down. 1670 */ 1671 if (!XLOG_FORCED_SHUTDOWN(log)) 1672 xfs_ail_push(log->l_ailp, threshold_lsn); 1673 } 1674 1675 /* 1676 * Stamp cycle number in every block 1677 */ 1678 STATIC void 1679 xlog_pack_data( 1680 struct xlog *log, 1681 struct xlog_in_core *iclog, 1682 int roundoff) 1683 { 1684 int i, j, k; 1685 int size = iclog->ic_offset + roundoff; 1686 __be32 cycle_lsn; 1687 char *dp; 1688 1689 cycle_lsn = CYCLE_LSN_DISK(iclog->ic_header.h_lsn); 1690 1691 dp = iclog->ic_datap; 1692 for (i = 0; i < BTOBB(size); i++) { 1693 if (i >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) 1694 break; 1695 iclog->ic_header.h_cycle_data[i] = *(__be32 *)dp; 1696 *(__be32 *)dp = cycle_lsn; 1697 dp += BBSIZE; 1698 } 1699 1700 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) { 1701 xlog_in_core_2_t *xhdr = iclog->ic_data; 1702 1703 for ( ; i < BTOBB(size); i++) { 1704 j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE); 1705 k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE); 1706 xhdr[j].hic_xheader.xh_cycle_data[k] = *(__be32 *)dp; 1707 *(__be32 *)dp = cycle_lsn; 1708 dp += BBSIZE; 1709 } 1710 1711 for (i = 1; i < log->l_iclog_heads; i++) 1712 xhdr[i].hic_xheader.xh_cycle = cycle_lsn; 1713 } 1714 } 1715 1716 /* 1717 * Calculate the checksum for a log buffer. 1718 * 1719 * This is a little more complicated than it should be because the various 1720 * headers and the actual data are non-contiguous. 1721 */ 1722 __le32 1723 xlog_cksum( 1724 struct xlog *log, 1725 struct xlog_rec_header *rhead, 1726 char *dp, 1727 int size) 1728 { 1729 uint32_t crc; 1730 1731 /* first generate the crc for the record header ... */ 1732 crc = xfs_start_cksum_update((char *)rhead, 1733 sizeof(struct xlog_rec_header), 1734 offsetof(struct xlog_rec_header, h_crc)); 1735 1736 /* ... then for additional cycle data for v2 logs ... */ 1737 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) { 1738 union xlog_in_core2 *xhdr = (union xlog_in_core2 *)rhead; 1739 int i; 1740 int xheads; 1741 1742 xheads = size / XLOG_HEADER_CYCLE_SIZE; 1743 if (size % XLOG_HEADER_CYCLE_SIZE) 1744 xheads++; 1745 1746 for (i = 1; i < xheads; i++) { 1747 crc = crc32c(crc, &xhdr[i].hic_xheader, 1748 sizeof(struct xlog_rec_ext_header)); 1749 } 1750 } 1751 1752 /* ... and finally for the payload */ 1753 crc = crc32c(crc, dp, size); 1754 1755 return xfs_end_cksum(crc); 1756 } 1757 1758 /* 1759 * The bdstrat callback function for log bufs. This gives us a central 1760 * place to trap bufs in case we get hit by a log I/O error and need to 1761 * shutdown. Actually, in practice, even when we didn't get a log error, 1762 * we transition the iclogs to IOERROR state *after* flushing all existing 1763 * iclogs to disk. This is because we don't want anymore new transactions to be 1764 * started or completed afterwards. 1765 * 1766 * We lock the iclogbufs here so that we can serialise against IO completion 1767 * during unmount. We might be processing a shutdown triggered during unmount, 1768 * and that can occur asynchronously to the unmount thread, and hence we need to 1769 * ensure that completes before tearing down the iclogbufs. Hence we need to 1770 * hold the buffer lock across the log IO to acheive that. 1771 */ 1772 STATIC int 1773 xlog_bdstrat( 1774 struct xfs_buf *bp) 1775 { 1776 struct xlog_in_core *iclog = bp->b_fspriv; 1777 1778 xfs_buf_lock(bp); 1779 if (iclog->ic_state & XLOG_STATE_IOERROR) { 1780 xfs_buf_ioerror(bp, -EIO); 1781 xfs_buf_stale(bp); 1782 xfs_buf_ioend(bp); 1783 /* 1784 * It would seem logical to return EIO here, but we rely on 1785 * the log state machine to propagate I/O errors instead of 1786 * doing it here. Similarly, IO completion will unlock the 1787 * buffer, so we don't do it here. 1788 */ 1789 return 0; 1790 } 1791 1792 xfs_buf_submit(bp); 1793 return 0; 1794 } 1795 1796 /* 1797 * Flush out the in-core log (iclog) to the on-disk log in an asynchronous 1798 * fashion. Previously, we should have moved the current iclog 1799 * ptr in the log to point to the next available iclog. This allows further 1800 * write to continue while this code syncs out an iclog ready to go. 1801 * Before an in-core log can be written out, the data section must be scanned 1802 * to save away the 1st word of each BBSIZE block into the header. We replace 1803 * it with the current cycle count. Each BBSIZE block is tagged with the 1804 * cycle count because there in an implicit assumption that drives will 1805 * guarantee that entire 512 byte blocks get written at once. In other words, 1806 * we can't have part of a 512 byte block written and part not written. By 1807 * tagging each block, we will know which blocks are valid when recovering 1808 * after an unclean shutdown. 1809 * 1810 * This routine is single threaded on the iclog. No other thread can be in 1811 * this routine with the same iclog. Changing contents of iclog can there- 1812 * fore be done without grabbing the state machine lock. Updating the global 1813 * log will require grabbing the lock though. 1814 * 1815 * The entire log manager uses a logical block numbering scheme. Only 1816 * log_sync (and then only bwrite()) know about the fact that the log may 1817 * not start with block zero on a given device. The log block start offset 1818 * is added immediately before calling bwrite(). 1819 */ 1820 1821 STATIC int 1822 xlog_sync( 1823 struct xlog *log, 1824 struct xlog_in_core *iclog) 1825 { 1826 xfs_buf_t *bp; 1827 int i; 1828 uint count; /* byte count of bwrite */ 1829 uint count_init; /* initial count before roundup */ 1830 int roundoff; /* roundoff to BB or stripe */ 1831 int split = 0; /* split write into two regions */ 1832 int error; 1833 int v2 = xfs_sb_version_haslogv2(&log->l_mp->m_sb); 1834 int size; 1835 1836 XFS_STATS_INC(log->l_mp, xs_log_writes); 1837 ASSERT(atomic_read(&iclog->ic_refcnt) == 0); 1838 1839 /* Add for LR header */ 1840 count_init = log->l_iclog_hsize + iclog->ic_offset; 1841 1842 /* Round out the log write size */ 1843 if (v2 && log->l_mp->m_sb.sb_logsunit > 1) { 1844 /* we have a v2 stripe unit to use */ 1845 count = XLOG_LSUNITTOB(log, XLOG_BTOLSUNIT(log, count_init)); 1846 } else { 1847 count = BBTOB(BTOBB(count_init)); 1848 } 1849 roundoff = count - count_init; 1850 ASSERT(roundoff >= 0); 1851 ASSERT((v2 && log->l_mp->m_sb.sb_logsunit > 1 && 1852 roundoff < log->l_mp->m_sb.sb_logsunit) 1853 || 1854 (log->l_mp->m_sb.sb_logsunit <= 1 && 1855 roundoff < BBTOB(1))); 1856 1857 /* move grant heads by roundoff in sync */ 1858 xlog_grant_add_space(log, &log->l_reserve_head.grant, roundoff); 1859 xlog_grant_add_space(log, &log->l_write_head.grant, roundoff); 1860 1861 /* put cycle number in every block */ 1862 xlog_pack_data(log, iclog, roundoff); 1863 1864 /* real byte length */ 1865 size = iclog->ic_offset; 1866 if (v2) 1867 size += roundoff; 1868 iclog->ic_header.h_len = cpu_to_be32(size); 1869 1870 bp = iclog->ic_bp; 1871 XFS_BUF_SET_ADDR(bp, BLOCK_LSN(be64_to_cpu(iclog->ic_header.h_lsn))); 1872 1873 XFS_STATS_ADD(log->l_mp, xs_log_blocks, BTOBB(count)); 1874 1875 /* Do we need to split this write into 2 parts? */ 1876 if (XFS_BUF_ADDR(bp) + BTOBB(count) > log->l_logBBsize) { 1877 char *dptr; 1878 1879 split = count - (BBTOB(log->l_logBBsize - XFS_BUF_ADDR(bp))); 1880 count = BBTOB(log->l_logBBsize - XFS_BUF_ADDR(bp)); 1881 iclog->ic_bwritecnt = 2; 1882 1883 /* 1884 * Bump the cycle numbers at the start of each block in the 1885 * part of the iclog that ends up in the buffer that gets 1886 * written to the start of the log. 1887 * 1888 * Watch out for the header magic number case, though. 1889 */ 1890 dptr = (char *)&iclog->ic_header + count; 1891 for (i = 0; i < split; i += BBSIZE) { 1892 uint32_t cycle = be32_to_cpu(*(__be32 *)dptr); 1893 if (++cycle == XLOG_HEADER_MAGIC_NUM) 1894 cycle++; 1895 *(__be32 *)dptr = cpu_to_be32(cycle); 1896 1897 dptr += BBSIZE; 1898 } 1899 } else { 1900 iclog->ic_bwritecnt = 1; 1901 } 1902 1903 /* calculcate the checksum */ 1904 iclog->ic_header.h_crc = xlog_cksum(log, &iclog->ic_header, 1905 iclog->ic_datap, size); 1906 /* 1907 * Intentionally corrupt the log record CRC based on the error injection 1908 * frequency, if defined. This facilitates testing log recovery in the 1909 * event of torn writes. Hence, set the IOABORT state to abort the log 1910 * write on I/O completion and shutdown the fs. The subsequent mount 1911 * detects the bad CRC and attempts to recover. 1912 */ 1913 if (XFS_TEST_ERROR(false, log->l_mp, XFS_ERRTAG_LOG_BAD_CRC)) { 1914 iclog->ic_header.h_crc &= cpu_to_le32(0xAAAAAAAA); 1915 iclog->ic_state |= XLOG_STATE_IOABORT; 1916 xfs_warn(log->l_mp, 1917 "Intentionally corrupted log record at LSN 0x%llx. Shutdown imminent.", 1918 be64_to_cpu(iclog->ic_header.h_lsn)); 1919 } 1920 1921 bp->b_io_length = BTOBB(count); 1922 bp->b_fspriv = iclog; 1923 bp->b_flags &= ~XBF_FLUSH; 1924 bp->b_flags |= (XBF_ASYNC | XBF_SYNCIO | XBF_WRITE | XBF_FUA); 1925 1926 /* 1927 * Flush the data device before flushing the log to make sure all meta 1928 * data written back from the AIL actually made it to disk before 1929 * stamping the new log tail LSN into the log buffer. For an external 1930 * log we need to issue the flush explicitly, and unfortunately 1931 * synchronously here; for an internal log we can simply use the block 1932 * layer state machine for preflushes. 1933 */ 1934 if (log->l_mp->m_logdev_targp != log->l_mp->m_ddev_targp) 1935 xfs_blkdev_issue_flush(log->l_mp->m_ddev_targp); 1936 else 1937 bp->b_flags |= XBF_FLUSH; 1938 1939 ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1); 1940 ASSERT(XFS_BUF_ADDR(bp) + BTOBB(count) <= log->l_logBBsize); 1941 1942 xlog_verify_iclog(log, iclog, count, true); 1943 1944 /* account for log which doesn't start at block #0 */ 1945 XFS_BUF_SET_ADDR(bp, XFS_BUF_ADDR(bp) + log->l_logBBstart); 1946 1947 /* 1948 * Don't call xfs_bwrite here. We do log-syncs even when the filesystem 1949 * is shutting down. 1950 */ 1951 error = xlog_bdstrat(bp); 1952 if (error) { 1953 xfs_buf_ioerror_alert(bp, "xlog_sync"); 1954 return error; 1955 } 1956 if (split) { 1957 bp = iclog->ic_log->l_xbuf; 1958 XFS_BUF_SET_ADDR(bp, 0); /* logical 0 */ 1959 xfs_buf_associate_memory(bp, 1960 (char *)&iclog->ic_header + count, split); 1961 bp->b_fspriv = iclog; 1962 bp->b_flags &= ~XBF_FLUSH; 1963 bp->b_flags |= (XBF_ASYNC | XBF_SYNCIO | XBF_WRITE | XBF_FUA); 1964 1965 ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1); 1966 ASSERT(XFS_BUF_ADDR(bp) + BTOBB(count) <= log->l_logBBsize); 1967 1968 /* account for internal log which doesn't start at block #0 */ 1969 XFS_BUF_SET_ADDR(bp, XFS_BUF_ADDR(bp) + log->l_logBBstart); 1970 error = xlog_bdstrat(bp); 1971 if (error) { 1972 xfs_buf_ioerror_alert(bp, "xlog_sync (split)"); 1973 return error; 1974 } 1975 } 1976 return 0; 1977 } /* xlog_sync */ 1978 1979 /* 1980 * Deallocate a log structure 1981 */ 1982 STATIC void 1983 xlog_dealloc_log( 1984 struct xlog *log) 1985 { 1986 xlog_in_core_t *iclog, *next_iclog; 1987 int i; 1988 1989 xlog_cil_destroy(log); 1990 1991 /* 1992 * Cycle all the iclogbuf locks to make sure all log IO completion 1993 * is done before we tear down these buffers. 1994 */ 1995 iclog = log->l_iclog; 1996 for (i = 0; i < log->l_iclog_bufs; i++) { 1997 xfs_buf_lock(iclog->ic_bp); 1998 xfs_buf_unlock(iclog->ic_bp); 1999 iclog = iclog->ic_next; 2000 } 2001 2002 /* 2003 * Always need to ensure that the extra buffer does not point to memory 2004 * owned by another log buffer before we free it. Also, cycle the lock 2005 * first to ensure we've completed IO on it. 2006 */ 2007 xfs_buf_lock(log->l_xbuf); 2008 xfs_buf_unlock(log->l_xbuf); 2009 xfs_buf_set_empty(log->l_xbuf, BTOBB(log->l_iclog_size)); 2010 xfs_buf_free(log->l_xbuf); 2011 2012 iclog = log->l_iclog; 2013 for (i = 0; i < log->l_iclog_bufs; i++) { 2014 xfs_buf_free(iclog->ic_bp); 2015 next_iclog = iclog->ic_next; 2016 kmem_free(iclog); 2017 iclog = next_iclog; 2018 } 2019 spinlock_destroy(&log->l_icloglock); 2020 2021 log->l_mp->m_log = NULL; 2022 kmem_free(log); 2023 } /* xlog_dealloc_log */ 2024 2025 /* 2026 * Update counters atomically now that memcpy is done. 2027 */ 2028 /* ARGSUSED */ 2029 static inline void 2030 xlog_state_finish_copy( 2031 struct xlog *log, 2032 struct xlog_in_core *iclog, 2033 int record_cnt, 2034 int copy_bytes) 2035 { 2036 spin_lock(&log->l_icloglock); 2037 2038 be32_add_cpu(&iclog->ic_header.h_num_logops, record_cnt); 2039 iclog->ic_offset += copy_bytes; 2040 2041 spin_unlock(&log->l_icloglock); 2042 } /* xlog_state_finish_copy */ 2043 2044 2045 2046 2047 /* 2048 * print out info relating to regions written which consume 2049 * the reservation 2050 */ 2051 void 2052 xlog_print_tic_res( 2053 struct xfs_mount *mp, 2054 struct xlog_ticket *ticket) 2055 { 2056 uint i; 2057 uint ophdr_spc = ticket->t_res_num_ophdrs * (uint)sizeof(xlog_op_header_t); 2058 2059 /* match with XLOG_REG_TYPE_* in xfs_log.h */ 2060 #define REG_TYPE_STR(type, str) [XLOG_REG_TYPE_##type] = str 2061 static char *res_type_str[XLOG_REG_TYPE_MAX + 1] = { 2062 REG_TYPE_STR(BFORMAT, "bformat"), 2063 REG_TYPE_STR(BCHUNK, "bchunk"), 2064 REG_TYPE_STR(EFI_FORMAT, "efi_format"), 2065 REG_TYPE_STR(EFD_FORMAT, "efd_format"), 2066 REG_TYPE_STR(IFORMAT, "iformat"), 2067 REG_TYPE_STR(ICORE, "icore"), 2068 REG_TYPE_STR(IEXT, "iext"), 2069 REG_TYPE_STR(IBROOT, "ibroot"), 2070 REG_TYPE_STR(ILOCAL, "ilocal"), 2071 REG_TYPE_STR(IATTR_EXT, "iattr_ext"), 2072 REG_TYPE_STR(IATTR_BROOT, "iattr_broot"), 2073 REG_TYPE_STR(IATTR_LOCAL, "iattr_local"), 2074 REG_TYPE_STR(QFORMAT, "qformat"), 2075 REG_TYPE_STR(DQUOT, "dquot"), 2076 REG_TYPE_STR(QUOTAOFF, "quotaoff"), 2077 REG_TYPE_STR(LRHEADER, "LR header"), 2078 REG_TYPE_STR(UNMOUNT, "unmount"), 2079 REG_TYPE_STR(COMMIT, "commit"), 2080 REG_TYPE_STR(TRANSHDR, "trans header"), 2081 REG_TYPE_STR(ICREATE, "inode create") 2082 }; 2083 #undef REG_TYPE_STR 2084 2085 xfs_warn(mp, "ticket reservation summary:"); 2086 xfs_warn(mp, " unit res = %d bytes", 2087 ticket->t_unit_res); 2088 xfs_warn(mp, " current res = %d bytes", 2089 ticket->t_curr_res); 2090 xfs_warn(mp, " total reg = %u bytes (o/flow = %u bytes)", 2091 ticket->t_res_arr_sum, ticket->t_res_o_flow); 2092 xfs_warn(mp, " ophdrs = %u (ophdr space = %u bytes)", 2093 ticket->t_res_num_ophdrs, ophdr_spc); 2094 xfs_warn(mp, " ophdr + reg = %u bytes", 2095 ticket->t_res_arr_sum + ticket->t_res_o_flow + ophdr_spc); 2096 xfs_warn(mp, " num regions = %u", 2097 ticket->t_res_num); 2098 2099 for (i = 0; i < ticket->t_res_num; i++) { 2100 uint r_type = ticket->t_res_arr[i].r_type; 2101 xfs_warn(mp, "region[%u]: %s - %u bytes", i, 2102 ((r_type <= 0 || r_type > XLOG_REG_TYPE_MAX) ? 2103 "bad-rtype" : res_type_str[r_type]), 2104 ticket->t_res_arr[i].r_len); 2105 } 2106 } 2107 2108 /* 2109 * Print a summary of the transaction. 2110 */ 2111 void 2112 xlog_print_trans( 2113 struct xfs_trans *tp) 2114 { 2115 struct xfs_mount *mp = tp->t_mountp; 2116 struct xfs_log_item_desc *lidp; 2117 2118 /* dump core transaction and ticket info */ 2119 xfs_warn(mp, "transaction summary:"); 2120 xfs_warn(mp, " flags = 0x%x", tp->t_flags); 2121 2122 xlog_print_tic_res(mp, tp->t_ticket); 2123 2124 /* dump each log item */ 2125 list_for_each_entry(lidp, &tp->t_items, lid_trans) { 2126 struct xfs_log_item *lip = lidp->lid_item; 2127 struct xfs_log_vec *lv = lip->li_lv; 2128 struct xfs_log_iovec *vec; 2129 int i; 2130 2131 xfs_warn(mp, "log item: "); 2132 xfs_warn(mp, " type = 0x%x", lip->li_type); 2133 xfs_warn(mp, " flags = 0x%x", lip->li_flags); 2134 if (!lv) 2135 continue; 2136 xfs_warn(mp, " niovecs = %d", lv->lv_niovecs); 2137 xfs_warn(mp, " size = %d", lv->lv_size); 2138 xfs_warn(mp, " bytes = %d", lv->lv_bytes); 2139 xfs_warn(mp, " buf len = %d", lv->lv_buf_len); 2140 2141 /* dump each iovec for the log item */ 2142 vec = lv->lv_iovecp; 2143 for (i = 0; i < lv->lv_niovecs; i++) { 2144 int dumplen = min(vec->i_len, 32); 2145 2146 xfs_warn(mp, " iovec[%d]", i); 2147 xfs_warn(mp, " type = 0x%x", vec->i_type); 2148 xfs_warn(mp, " len = %d", vec->i_len); 2149 xfs_warn(mp, " first %d bytes of iovec[%d]:", dumplen, i); 2150 xfs_hex_dump(vec->i_addr, dumplen); 2151 2152 vec++; 2153 } 2154 } 2155 } 2156 2157 /* 2158 * Calculate the potential space needed by the log vector. Each region gets 2159 * its own xlog_op_header_t and may need to be double word aligned. 2160 */ 2161 static int 2162 xlog_write_calc_vec_length( 2163 struct xlog_ticket *ticket, 2164 struct xfs_log_vec *log_vector) 2165 { 2166 struct xfs_log_vec *lv; 2167 int headers = 0; 2168 int len = 0; 2169 int i; 2170 2171 /* acct for start rec of xact */ 2172 if (ticket->t_flags & XLOG_TIC_INITED) 2173 headers++; 2174 2175 for (lv = log_vector; lv; lv = lv->lv_next) { 2176 /* we don't write ordered log vectors */ 2177 if (lv->lv_buf_len == XFS_LOG_VEC_ORDERED) 2178 continue; 2179 2180 headers += lv->lv_niovecs; 2181 2182 for (i = 0; i < lv->lv_niovecs; i++) { 2183 struct xfs_log_iovec *vecp = &lv->lv_iovecp[i]; 2184 2185 len += vecp->i_len; 2186 xlog_tic_add_region(ticket, vecp->i_len, vecp->i_type); 2187 } 2188 } 2189 2190 ticket->t_res_num_ophdrs += headers; 2191 len += headers * sizeof(struct xlog_op_header); 2192 2193 return len; 2194 } 2195 2196 /* 2197 * If first write for transaction, insert start record We can't be trying to 2198 * commit if we are inited. We can't have any "partial_copy" if we are inited. 2199 */ 2200 static int 2201 xlog_write_start_rec( 2202 struct xlog_op_header *ophdr, 2203 struct xlog_ticket *ticket) 2204 { 2205 if (!(ticket->t_flags & XLOG_TIC_INITED)) 2206 return 0; 2207 2208 ophdr->oh_tid = cpu_to_be32(ticket->t_tid); 2209 ophdr->oh_clientid = ticket->t_clientid; 2210 ophdr->oh_len = 0; 2211 ophdr->oh_flags = XLOG_START_TRANS; 2212 ophdr->oh_res2 = 0; 2213 2214 ticket->t_flags &= ~XLOG_TIC_INITED; 2215 2216 return sizeof(struct xlog_op_header); 2217 } 2218 2219 static xlog_op_header_t * 2220 xlog_write_setup_ophdr( 2221 struct xlog *log, 2222 struct xlog_op_header *ophdr, 2223 struct xlog_ticket *ticket, 2224 uint flags) 2225 { 2226 ophdr->oh_tid = cpu_to_be32(ticket->t_tid); 2227 ophdr->oh_clientid = ticket->t_clientid; 2228 ophdr->oh_res2 = 0; 2229 2230 /* are we copying a commit or unmount record? */ 2231 ophdr->oh_flags = flags; 2232 2233 /* 2234 * We've seen logs corrupted with bad transaction client ids. This 2235 * makes sure that XFS doesn't generate them on. Turn this into an EIO 2236 * and shut down the filesystem. 2237 */ 2238 switch (ophdr->oh_clientid) { 2239 case XFS_TRANSACTION: 2240 case XFS_VOLUME: 2241 case XFS_LOG: 2242 break; 2243 default: 2244 xfs_warn(log->l_mp, 2245 "Bad XFS transaction clientid 0x%x in ticket 0x%p", 2246 ophdr->oh_clientid, ticket); 2247 return NULL; 2248 } 2249 2250 return ophdr; 2251 } 2252 2253 /* 2254 * Set up the parameters of the region copy into the log. This has 2255 * to handle region write split across multiple log buffers - this 2256 * state is kept external to this function so that this code can 2257 * be written in an obvious, self documenting manner. 2258 */ 2259 static int 2260 xlog_write_setup_copy( 2261 struct xlog_ticket *ticket, 2262 struct xlog_op_header *ophdr, 2263 int space_available, 2264 int space_required, 2265 int *copy_off, 2266 int *copy_len, 2267 int *last_was_partial_copy, 2268 int *bytes_consumed) 2269 { 2270 int still_to_copy; 2271 2272 still_to_copy = space_required - *bytes_consumed; 2273 *copy_off = *bytes_consumed; 2274 2275 if (still_to_copy <= space_available) { 2276 /* write of region completes here */ 2277 *copy_len = still_to_copy; 2278 ophdr->oh_len = cpu_to_be32(*copy_len); 2279 if (*last_was_partial_copy) 2280 ophdr->oh_flags |= (XLOG_END_TRANS|XLOG_WAS_CONT_TRANS); 2281 *last_was_partial_copy = 0; 2282 *bytes_consumed = 0; 2283 return 0; 2284 } 2285 2286 /* partial write of region, needs extra log op header reservation */ 2287 *copy_len = space_available; 2288 ophdr->oh_len = cpu_to_be32(*copy_len); 2289 ophdr->oh_flags |= XLOG_CONTINUE_TRANS; 2290 if (*last_was_partial_copy) 2291 ophdr->oh_flags |= XLOG_WAS_CONT_TRANS; 2292 *bytes_consumed += *copy_len; 2293 (*last_was_partial_copy)++; 2294 2295 /* account for new log op header */ 2296 ticket->t_curr_res -= sizeof(struct xlog_op_header); 2297 ticket->t_res_num_ophdrs++; 2298 2299 return sizeof(struct xlog_op_header); 2300 } 2301 2302 static int 2303 xlog_write_copy_finish( 2304 struct xlog *log, 2305 struct xlog_in_core *iclog, 2306 uint flags, 2307 int *record_cnt, 2308 int *data_cnt, 2309 int *partial_copy, 2310 int *partial_copy_len, 2311 int log_offset, 2312 struct xlog_in_core **commit_iclog) 2313 { 2314 if (*partial_copy) { 2315 /* 2316 * This iclog has already been marked WANT_SYNC by 2317 * xlog_state_get_iclog_space. 2318 */ 2319 xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt); 2320 *record_cnt = 0; 2321 *data_cnt = 0; 2322 return xlog_state_release_iclog(log, iclog); 2323 } 2324 2325 *partial_copy = 0; 2326 *partial_copy_len = 0; 2327 2328 if (iclog->ic_size - log_offset <= sizeof(xlog_op_header_t)) { 2329 /* no more space in this iclog - push it. */ 2330 xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt); 2331 *record_cnt = 0; 2332 *data_cnt = 0; 2333 2334 spin_lock(&log->l_icloglock); 2335 xlog_state_want_sync(log, iclog); 2336 spin_unlock(&log->l_icloglock); 2337 2338 if (!commit_iclog) 2339 return xlog_state_release_iclog(log, iclog); 2340 ASSERT(flags & XLOG_COMMIT_TRANS); 2341 *commit_iclog = iclog; 2342 } 2343 2344 return 0; 2345 } 2346 2347 /* 2348 * Write some region out to in-core log 2349 * 2350 * This will be called when writing externally provided regions or when 2351 * writing out a commit record for a given transaction. 2352 * 2353 * General algorithm: 2354 * 1. Find total length of this write. This may include adding to the 2355 * lengths passed in. 2356 * 2. Check whether we violate the tickets reservation. 2357 * 3. While writing to this iclog 2358 * A. Reserve as much space in this iclog as can get 2359 * B. If this is first write, save away start lsn 2360 * C. While writing this region: 2361 * 1. If first write of transaction, write start record 2362 * 2. Write log operation header (header per region) 2363 * 3. Find out if we can fit entire region into this iclog 2364 * 4. Potentially, verify destination memcpy ptr 2365 * 5. Memcpy (partial) region 2366 * 6. If partial copy, release iclog; otherwise, continue 2367 * copying more regions into current iclog 2368 * 4. Mark want sync bit (in simulation mode) 2369 * 5. Release iclog for potential flush to on-disk log. 2370 * 2371 * ERRORS: 2372 * 1. Panic if reservation is overrun. This should never happen since 2373 * reservation amounts are generated internal to the filesystem. 2374 * NOTES: 2375 * 1. Tickets are single threaded data structures. 2376 * 2. The XLOG_END_TRANS & XLOG_CONTINUE_TRANS flags are passed down to the 2377 * syncing routine. When a single log_write region needs to span 2378 * multiple in-core logs, the XLOG_CONTINUE_TRANS bit should be set 2379 * on all log operation writes which don't contain the end of the 2380 * region. The XLOG_END_TRANS bit is used for the in-core log 2381 * operation which contains the end of the continued log_write region. 2382 * 3. When xlog_state_get_iclog_space() grabs the rest of the current iclog, 2383 * we don't really know exactly how much space will be used. As a result, 2384 * we don't update ic_offset until the end when we know exactly how many 2385 * bytes have been written out. 2386 */ 2387 int 2388 xlog_write( 2389 struct xlog *log, 2390 struct xfs_log_vec *log_vector, 2391 struct xlog_ticket *ticket, 2392 xfs_lsn_t *start_lsn, 2393 struct xlog_in_core **commit_iclog, 2394 uint flags) 2395 { 2396 struct xlog_in_core *iclog = NULL; 2397 struct xfs_log_iovec *vecp; 2398 struct xfs_log_vec *lv; 2399 int len; 2400 int index; 2401 int partial_copy = 0; 2402 int partial_copy_len = 0; 2403 int contwr = 0; 2404 int record_cnt = 0; 2405 int data_cnt = 0; 2406 int error; 2407 2408 *start_lsn = 0; 2409 2410 len = xlog_write_calc_vec_length(ticket, log_vector); 2411 2412 /* 2413 * Region headers and bytes are already accounted for. 2414 * We only need to take into account start records and 2415 * split regions in this function. 2416 */ 2417 if (ticket->t_flags & XLOG_TIC_INITED) 2418 ticket->t_curr_res -= sizeof(xlog_op_header_t); 2419 2420 /* 2421 * Commit record headers need to be accounted for. These 2422 * come in as separate writes so are easy to detect. 2423 */ 2424 if (flags & (XLOG_COMMIT_TRANS | XLOG_UNMOUNT_TRANS)) 2425 ticket->t_curr_res -= sizeof(xlog_op_header_t); 2426 2427 if (ticket->t_curr_res < 0) { 2428 xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES, 2429 "ctx ticket reservation ran out. Need to up reservation"); 2430 xlog_print_tic_res(log->l_mp, ticket); 2431 xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR); 2432 } 2433 2434 index = 0; 2435 lv = log_vector; 2436 vecp = lv->lv_iovecp; 2437 while (lv && (!lv->lv_niovecs || index < lv->lv_niovecs)) { 2438 void *ptr; 2439 int log_offset; 2440 2441 error = xlog_state_get_iclog_space(log, len, &iclog, ticket, 2442 &contwr, &log_offset); 2443 if (error) 2444 return error; 2445 2446 ASSERT(log_offset <= iclog->ic_size - 1); 2447 ptr = iclog->ic_datap + log_offset; 2448 2449 /* start_lsn is the first lsn written to. That's all we need. */ 2450 if (!*start_lsn) 2451 *start_lsn = be64_to_cpu(iclog->ic_header.h_lsn); 2452 2453 /* 2454 * This loop writes out as many regions as can fit in the amount 2455 * of space which was allocated by xlog_state_get_iclog_space(). 2456 */ 2457 while (lv && (!lv->lv_niovecs || index < lv->lv_niovecs)) { 2458 struct xfs_log_iovec *reg; 2459 struct xlog_op_header *ophdr; 2460 int start_rec_copy; 2461 int copy_len; 2462 int copy_off; 2463 bool ordered = false; 2464 2465 /* ordered log vectors have no regions to write */ 2466 if (lv->lv_buf_len == XFS_LOG_VEC_ORDERED) { 2467 ASSERT(lv->lv_niovecs == 0); 2468 ordered = true; 2469 goto next_lv; 2470 } 2471 2472 reg = &vecp[index]; 2473 ASSERT(reg->i_len % sizeof(int32_t) == 0); 2474 ASSERT((unsigned long)ptr % sizeof(int32_t) == 0); 2475 2476 start_rec_copy = xlog_write_start_rec(ptr, ticket); 2477 if (start_rec_copy) { 2478 record_cnt++; 2479 xlog_write_adv_cnt(&ptr, &len, &log_offset, 2480 start_rec_copy); 2481 } 2482 2483 ophdr = xlog_write_setup_ophdr(log, ptr, ticket, flags); 2484 if (!ophdr) 2485 return -EIO; 2486 2487 xlog_write_adv_cnt(&ptr, &len, &log_offset, 2488 sizeof(struct xlog_op_header)); 2489 2490 len += xlog_write_setup_copy(ticket, ophdr, 2491 iclog->ic_size-log_offset, 2492 reg->i_len, 2493 ©_off, ©_len, 2494 &partial_copy, 2495 &partial_copy_len); 2496 xlog_verify_dest_ptr(log, ptr); 2497 2498 /* 2499 * Copy region. 2500 * 2501 * Unmount records just log an opheader, so can have 2502 * empty payloads with no data region to copy. Hence we 2503 * only copy the payload if the vector says it has data 2504 * to copy. 2505 */ 2506 ASSERT(copy_len >= 0); 2507 if (copy_len > 0) { 2508 memcpy(ptr, reg->i_addr + copy_off, copy_len); 2509 xlog_write_adv_cnt(&ptr, &len, &log_offset, 2510 copy_len); 2511 } 2512 copy_len += start_rec_copy + sizeof(xlog_op_header_t); 2513 record_cnt++; 2514 data_cnt += contwr ? copy_len : 0; 2515 2516 error = xlog_write_copy_finish(log, iclog, flags, 2517 &record_cnt, &data_cnt, 2518 &partial_copy, 2519 &partial_copy_len, 2520 log_offset, 2521 commit_iclog); 2522 if (error) 2523 return error; 2524 2525 /* 2526 * if we had a partial copy, we need to get more iclog 2527 * space but we don't want to increment the region 2528 * index because there is still more is this region to 2529 * write. 2530 * 2531 * If we completed writing this region, and we flushed 2532 * the iclog (indicated by resetting of the record 2533 * count), then we also need to get more log space. If 2534 * this was the last record, though, we are done and 2535 * can just return. 2536 */ 2537 if (partial_copy) 2538 break; 2539 2540 if (++index == lv->lv_niovecs) { 2541 next_lv: 2542 lv = lv->lv_next; 2543 index = 0; 2544 if (lv) 2545 vecp = lv->lv_iovecp; 2546 } 2547 if (record_cnt == 0 && !ordered) { 2548 if (!lv) 2549 return 0; 2550 break; 2551 } 2552 } 2553 } 2554 2555 ASSERT(len == 0); 2556 2557 xlog_state_finish_copy(log, iclog, record_cnt, data_cnt); 2558 if (!commit_iclog) 2559 return xlog_state_release_iclog(log, iclog); 2560 2561 ASSERT(flags & XLOG_COMMIT_TRANS); 2562 *commit_iclog = iclog; 2563 return 0; 2564 } 2565 2566 2567 /***************************************************************************** 2568 * 2569 * State Machine functions 2570 * 2571 ***************************************************************************** 2572 */ 2573 2574 /* Clean iclogs starting from the head. This ordering must be 2575 * maintained, so an iclog doesn't become ACTIVE beyond one that 2576 * is SYNCING. This is also required to maintain the notion that we use 2577 * a ordered wait queue to hold off would be writers to the log when every 2578 * iclog is trying to sync to disk. 2579 * 2580 * State Change: DIRTY -> ACTIVE 2581 */ 2582 STATIC void 2583 xlog_state_clean_log( 2584 struct xlog *log) 2585 { 2586 xlog_in_core_t *iclog; 2587 int changed = 0; 2588 2589 iclog = log->l_iclog; 2590 do { 2591 if (iclog->ic_state == XLOG_STATE_DIRTY) { 2592 iclog->ic_state = XLOG_STATE_ACTIVE; 2593 iclog->ic_offset = 0; 2594 ASSERT(iclog->ic_callback == NULL); 2595 /* 2596 * If the number of ops in this iclog indicate it just 2597 * contains the dummy transaction, we can 2598 * change state into IDLE (the second time around). 2599 * Otherwise we should change the state into 2600 * NEED a dummy. 2601 * We don't need to cover the dummy. 2602 */ 2603 if (!changed && 2604 (be32_to_cpu(iclog->ic_header.h_num_logops) == 2605 XLOG_COVER_OPS)) { 2606 changed = 1; 2607 } else { 2608 /* 2609 * We have two dirty iclogs so start over 2610 * This could also be num of ops indicates 2611 * this is not the dummy going out. 2612 */ 2613 changed = 2; 2614 } 2615 iclog->ic_header.h_num_logops = 0; 2616 memset(iclog->ic_header.h_cycle_data, 0, 2617 sizeof(iclog->ic_header.h_cycle_data)); 2618 iclog->ic_header.h_lsn = 0; 2619 } else if (iclog->ic_state == XLOG_STATE_ACTIVE) 2620 /* do nothing */; 2621 else 2622 break; /* stop cleaning */ 2623 iclog = iclog->ic_next; 2624 } while (iclog != log->l_iclog); 2625 2626 /* log is locked when we are called */ 2627 /* 2628 * Change state for the dummy log recording. 2629 * We usually go to NEED. But we go to NEED2 if the changed indicates 2630 * we are done writing the dummy record. 2631 * If we are done with the second dummy recored (DONE2), then 2632 * we go to IDLE. 2633 */ 2634 if (changed) { 2635 switch (log->l_covered_state) { 2636 case XLOG_STATE_COVER_IDLE: 2637 case XLOG_STATE_COVER_NEED: 2638 case XLOG_STATE_COVER_NEED2: 2639 log->l_covered_state = XLOG_STATE_COVER_NEED; 2640 break; 2641 2642 case XLOG_STATE_COVER_DONE: 2643 if (changed == 1) 2644 log->l_covered_state = XLOG_STATE_COVER_NEED2; 2645 else 2646 log->l_covered_state = XLOG_STATE_COVER_NEED; 2647 break; 2648 2649 case XLOG_STATE_COVER_DONE2: 2650 if (changed == 1) 2651 log->l_covered_state = XLOG_STATE_COVER_IDLE; 2652 else 2653 log->l_covered_state = XLOG_STATE_COVER_NEED; 2654 break; 2655 2656 default: 2657 ASSERT(0); 2658 } 2659 } 2660 } /* xlog_state_clean_log */ 2661 2662 STATIC xfs_lsn_t 2663 xlog_get_lowest_lsn( 2664 struct xlog *log) 2665 { 2666 xlog_in_core_t *lsn_log; 2667 xfs_lsn_t lowest_lsn, lsn; 2668 2669 lsn_log = log->l_iclog; 2670 lowest_lsn = 0; 2671 do { 2672 if (!(lsn_log->ic_state & (XLOG_STATE_ACTIVE|XLOG_STATE_DIRTY))) { 2673 lsn = be64_to_cpu(lsn_log->ic_header.h_lsn); 2674 if ((lsn && !lowest_lsn) || 2675 (XFS_LSN_CMP(lsn, lowest_lsn) < 0)) { 2676 lowest_lsn = lsn; 2677 } 2678 } 2679 lsn_log = lsn_log->ic_next; 2680 } while (lsn_log != log->l_iclog); 2681 return lowest_lsn; 2682 } 2683 2684 2685 STATIC void 2686 xlog_state_do_callback( 2687 struct xlog *log, 2688 int aborted, 2689 struct xlog_in_core *ciclog) 2690 { 2691 xlog_in_core_t *iclog; 2692 xlog_in_core_t *first_iclog; /* used to know when we've 2693 * processed all iclogs once */ 2694 xfs_log_callback_t *cb, *cb_next; 2695 int flushcnt = 0; 2696 xfs_lsn_t lowest_lsn; 2697 int ioerrors; /* counter: iclogs with errors */ 2698 int loopdidcallbacks; /* flag: inner loop did callbacks*/ 2699 int funcdidcallbacks; /* flag: function did callbacks */ 2700 int repeats; /* for issuing console warnings if 2701 * looping too many times */ 2702 int wake = 0; 2703 2704 spin_lock(&log->l_icloglock); 2705 first_iclog = iclog = log->l_iclog; 2706 ioerrors = 0; 2707 funcdidcallbacks = 0; 2708 repeats = 0; 2709 2710 do { 2711 /* 2712 * Scan all iclogs starting with the one pointed to by the 2713 * log. Reset this starting point each time the log is 2714 * unlocked (during callbacks). 2715 * 2716 * Keep looping through iclogs until one full pass is made 2717 * without running any callbacks. 2718 */ 2719 first_iclog = log->l_iclog; 2720 iclog = log->l_iclog; 2721 loopdidcallbacks = 0; 2722 repeats++; 2723 2724 do { 2725 2726 /* skip all iclogs in the ACTIVE & DIRTY states */ 2727 if (iclog->ic_state & 2728 (XLOG_STATE_ACTIVE|XLOG_STATE_DIRTY)) { 2729 iclog = iclog->ic_next; 2730 continue; 2731 } 2732 2733 /* 2734 * Between marking a filesystem SHUTDOWN and stopping 2735 * the log, we do flush all iclogs to disk (if there 2736 * wasn't a log I/O error). So, we do want things to 2737 * go smoothly in case of just a SHUTDOWN w/o a 2738 * LOG_IO_ERROR. 2739 */ 2740 if (!(iclog->ic_state & XLOG_STATE_IOERROR)) { 2741 /* 2742 * Can only perform callbacks in order. Since 2743 * this iclog is not in the DONE_SYNC/ 2744 * DO_CALLBACK state, we skip the rest and 2745 * just try to clean up. If we set our iclog 2746 * to DO_CALLBACK, we will not process it when 2747 * we retry since a previous iclog is in the 2748 * CALLBACK and the state cannot change since 2749 * we are holding the l_icloglock. 2750 */ 2751 if (!(iclog->ic_state & 2752 (XLOG_STATE_DONE_SYNC | 2753 XLOG_STATE_DO_CALLBACK))) { 2754 if (ciclog && (ciclog->ic_state == 2755 XLOG_STATE_DONE_SYNC)) { 2756 ciclog->ic_state = XLOG_STATE_DO_CALLBACK; 2757 } 2758 break; 2759 } 2760 /* 2761 * We now have an iclog that is in either the 2762 * DO_CALLBACK or DONE_SYNC states. The other 2763 * states (WANT_SYNC, SYNCING, or CALLBACK were 2764 * caught by the above if and are going to 2765 * clean (i.e. we aren't doing their callbacks) 2766 * see the above if. 2767 */ 2768 2769 /* 2770 * We will do one more check here to see if we 2771 * have chased our tail around. 2772 */ 2773 2774 lowest_lsn = xlog_get_lowest_lsn(log); 2775 if (lowest_lsn && 2776 XFS_LSN_CMP(lowest_lsn, 2777 be64_to_cpu(iclog->ic_header.h_lsn)) < 0) { 2778 iclog = iclog->ic_next; 2779 continue; /* Leave this iclog for 2780 * another thread */ 2781 } 2782 2783 iclog->ic_state = XLOG_STATE_CALLBACK; 2784 2785 2786 /* 2787 * Completion of a iclog IO does not imply that 2788 * a transaction has completed, as transactions 2789 * can be large enough to span many iclogs. We 2790 * cannot change the tail of the log half way 2791 * through a transaction as this may be the only 2792 * transaction in the log and moving th etail to 2793 * point to the middle of it will prevent 2794 * recovery from finding the start of the 2795 * transaction. Hence we should only update the 2796 * last_sync_lsn if this iclog contains 2797 * transaction completion callbacks on it. 2798 * 2799 * We have to do this before we drop the 2800 * icloglock to ensure we are the only one that 2801 * can update it. 2802 */ 2803 ASSERT(XFS_LSN_CMP(atomic64_read(&log->l_last_sync_lsn), 2804 be64_to_cpu(iclog->ic_header.h_lsn)) <= 0); 2805 if (iclog->ic_callback) 2806 atomic64_set(&log->l_last_sync_lsn, 2807 be64_to_cpu(iclog->ic_header.h_lsn)); 2808 2809 } else 2810 ioerrors++; 2811 2812 spin_unlock(&log->l_icloglock); 2813 2814 /* 2815 * Keep processing entries in the callback list until 2816 * we come around and it is empty. We need to 2817 * atomically see that the list is empty and change the 2818 * state to DIRTY so that we don't miss any more 2819 * callbacks being added. 2820 */ 2821 spin_lock(&iclog->ic_callback_lock); 2822 cb = iclog->ic_callback; 2823 while (cb) { 2824 iclog->ic_callback_tail = &(iclog->ic_callback); 2825 iclog->ic_callback = NULL; 2826 spin_unlock(&iclog->ic_callback_lock); 2827 2828 /* perform callbacks in the order given */ 2829 for (; cb; cb = cb_next) { 2830 cb_next = cb->cb_next; 2831 cb->cb_func(cb->cb_arg, aborted); 2832 } 2833 spin_lock(&iclog->ic_callback_lock); 2834 cb = iclog->ic_callback; 2835 } 2836 2837 loopdidcallbacks++; 2838 funcdidcallbacks++; 2839 2840 spin_lock(&log->l_icloglock); 2841 ASSERT(iclog->ic_callback == NULL); 2842 spin_unlock(&iclog->ic_callback_lock); 2843 if (!(iclog->ic_state & XLOG_STATE_IOERROR)) 2844 iclog->ic_state = XLOG_STATE_DIRTY; 2845 2846 /* 2847 * Transition from DIRTY to ACTIVE if applicable. 2848 * NOP if STATE_IOERROR. 2849 */ 2850 xlog_state_clean_log(log); 2851 2852 /* wake up threads waiting in xfs_log_force() */ 2853 wake_up_all(&iclog->ic_force_wait); 2854 2855 iclog = iclog->ic_next; 2856 } while (first_iclog != iclog); 2857 2858 if (repeats > 5000) { 2859 flushcnt += repeats; 2860 repeats = 0; 2861 xfs_warn(log->l_mp, 2862 "%s: possible infinite loop (%d iterations)", 2863 __func__, flushcnt); 2864 } 2865 } while (!ioerrors && loopdidcallbacks); 2866 2867 #ifdef DEBUG 2868 /* 2869 * Make one last gasp attempt to see if iclogs are being left in limbo. 2870 * If the above loop finds an iclog earlier than the current iclog and 2871 * in one of the syncing states, the current iclog is put into 2872 * DO_CALLBACK and the callbacks are deferred to the completion of the 2873 * earlier iclog. Walk the iclogs in order and make sure that no iclog 2874 * is in DO_CALLBACK unless an earlier iclog is in one of the syncing 2875 * states. 2876 * 2877 * Note that SYNCING|IOABORT is a valid state so we cannot just check 2878 * for ic_state == SYNCING. 2879 */ 2880 if (funcdidcallbacks) { 2881 first_iclog = iclog = log->l_iclog; 2882 do { 2883 ASSERT(iclog->ic_state != XLOG_STATE_DO_CALLBACK); 2884 /* 2885 * Terminate the loop if iclogs are found in states 2886 * which will cause other threads to clean up iclogs. 2887 * 2888 * SYNCING - i/o completion will go through logs 2889 * DONE_SYNC - interrupt thread should be waiting for 2890 * l_icloglock 2891 * IOERROR - give up hope all ye who enter here 2892 */ 2893 if (iclog->ic_state == XLOG_STATE_WANT_SYNC || 2894 iclog->ic_state & XLOG_STATE_SYNCING || 2895 iclog->ic_state == XLOG_STATE_DONE_SYNC || 2896 iclog->ic_state == XLOG_STATE_IOERROR ) 2897 break; 2898 iclog = iclog->ic_next; 2899 } while (first_iclog != iclog); 2900 } 2901 #endif 2902 2903 if (log->l_iclog->ic_state & (XLOG_STATE_ACTIVE|XLOG_STATE_IOERROR)) 2904 wake = 1; 2905 spin_unlock(&log->l_icloglock); 2906 2907 if (wake) 2908 wake_up_all(&log->l_flush_wait); 2909 } 2910 2911 2912 /* 2913 * Finish transitioning this iclog to the dirty state. 2914 * 2915 * Make sure that we completely execute this routine only when this is 2916 * the last call to the iclog. There is a good chance that iclog flushes, 2917 * when we reach the end of the physical log, get turned into 2 separate 2918 * calls to bwrite. Hence, one iclog flush could generate two calls to this 2919 * routine. By using the reference count bwritecnt, we guarantee that only 2920 * the second completion goes through. 2921 * 2922 * Callbacks could take time, so they are done outside the scope of the 2923 * global state machine log lock. 2924 */ 2925 STATIC void 2926 xlog_state_done_syncing( 2927 xlog_in_core_t *iclog, 2928 int aborted) 2929 { 2930 struct xlog *log = iclog->ic_log; 2931 2932 spin_lock(&log->l_icloglock); 2933 2934 ASSERT(iclog->ic_state == XLOG_STATE_SYNCING || 2935 iclog->ic_state == XLOG_STATE_IOERROR); 2936 ASSERT(atomic_read(&iclog->ic_refcnt) == 0); 2937 ASSERT(iclog->ic_bwritecnt == 1 || iclog->ic_bwritecnt == 2); 2938 2939 2940 /* 2941 * If we got an error, either on the first buffer, or in the case of 2942 * split log writes, on the second, we mark ALL iclogs STATE_IOERROR, 2943 * and none should ever be attempted to be written to disk 2944 * again. 2945 */ 2946 if (iclog->ic_state != XLOG_STATE_IOERROR) { 2947 if (--iclog->ic_bwritecnt == 1) { 2948 spin_unlock(&log->l_icloglock); 2949 return; 2950 } 2951 iclog->ic_state = XLOG_STATE_DONE_SYNC; 2952 } 2953 2954 /* 2955 * Someone could be sleeping prior to writing out the next 2956 * iclog buffer, we wake them all, one will get to do the 2957 * I/O, the others get to wait for the result. 2958 */ 2959 wake_up_all(&iclog->ic_write_wait); 2960 spin_unlock(&log->l_icloglock); 2961 xlog_state_do_callback(log, aborted, iclog); /* also cleans log */ 2962 } /* xlog_state_done_syncing */ 2963 2964 2965 /* 2966 * If the head of the in-core log ring is not (ACTIVE or DIRTY), then we must 2967 * sleep. We wait on the flush queue on the head iclog as that should be 2968 * the first iclog to complete flushing. Hence if all iclogs are syncing, 2969 * we will wait here and all new writes will sleep until a sync completes. 2970 * 2971 * The in-core logs are used in a circular fashion. They are not used 2972 * out-of-order even when an iclog past the head is free. 2973 * 2974 * return: 2975 * * log_offset where xlog_write() can start writing into the in-core 2976 * log's data space. 2977 * * in-core log pointer to which xlog_write() should write. 2978 * * boolean indicating this is a continued write to an in-core log. 2979 * If this is the last write, then the in-core log's offset field 2980 * needs to be incremented, depending on the amount of data which 2981 * is copied. 2982 */ 2983 STATIC int 2984 xlog_state_get_iclog_space( 2985 struct xlog *log, 2986 int len, 2987 struct xlog_in_core **iclogp, 2988 struct xlog_ticket *ticket, 2989 int *continued_write, 2990 int *logoffsetp) 2991 { 2992 int log_offset; 2993 xlog_rec_header_t *head; 2994 xlog_in_core_t *iclog; 2995 int error; 2996 2997 restart: 2998 spin_lock(&log->l_icloglock); 2999 if (XLOG_FORCED_SHUTDOWN(log)) { 3000 spin_unlock(&log->l_icloglock); 3001 return -EIO; 3002 } 3003 3004 iclog = log->l_iclog; 3005 if (iclog->ic_state != XLOG_STATE_ACTIVE) { 3006 XFS_STATS_INC(log->l_mp, xs_log_noiclogs); 3007 3008 /* Wait for log writes to have flushed */ 3009 xlog_wait(&log->l_flush_wait, &log->l_icloglock); 3010 goto restart; 3011 } 3012 3013 head = &iclog->ic_header; 3014 3015 atomic_inc(&iclog->ic_refcnt); /* prevents sync */ 3016 log_offset = iclog->ic_offset; 3017 3018 /* On the 1st write to an iclog, figure out lsn. This works 3019 * if iclogs marked XLOG_STATE_WANT_SYNC always write out what they are 3020 * committing to. If the offset is set, that's how many blocks 3021 * must be written. 3022 */ 3023 if (log_offset == 0) { 3024 ticket->t_curr_res -= log->l_iclog_hsize; 3025 xlog_tic_add_region(ticket, 3026 log->l_iclog_hsize, 3027 XLOG_REG_TYPE_LRHEADER); 3028 head->h_cycle = cpu_to_be32(log->l_curr_cycle); 3029 head->h_lsn = cpu_to_be64( 3030 xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block)); 3031 ASSERT(log->l_curr_block >= 0); 3032 } 3033 3034 /* If there is enough room to write everything, then do it. Otherwise, 3035 * claim the rest of the region and make sure the XLOG_STATE_WANT_SYNC 3036 * bit is on, so this will get flushed out. Don't update ic_offset 3037 * until you know exactly how many bytes get copied. Therefore, wait 3038 * until later to update ic_offset. 3039 * 3040 * xlog_write() algorithm assumes that at least 2 xlog_op_header_t's 3041 * can fit into remaining data section. 3042 */ 3043 if (iclog->ic_size - iclog->ic_offset < 2*sizeof(xlog_op_header_t)) { 3044 xlog_state_switch_iclogs(log, iclog, iclog->ic_size); 3045 3046 /* 3047 * If I'm the only one writing to this iclog, sync it to disk. 3048 * We need to do an atomic compare and decrement here to avoid 3049 * racing with concurrent atomic_dec_and_lock() calls in 3050 * xlog_state_release_iclog() when there is more than one 3051 * reference to the iclog. 3052 */ 3053 if (!atomic_add_unless(&iclog->ic_refcnt, -1, 1)) { 3054 /* we are the only one */ 3055 spin_unlock(&log->l_icloglock); 3056 error = xlog_state_release_iclog(log, iclog); 3057 if (error) 3058 return error; 3059 } else { 3060 spin_unlock(&log->l_icloglock); 3061 } 3062 goto restart; 3063 } 3064 3065 /* Do we have enough room to write the full amount in the remainder 3066 * of this iclog? Or must we continue a write on the next iclog and 3067 * mark this iclog as completely taken? In the case where we switch 3068 * iclogs (to mark it taken), this particular iclog will release/sync 3069 * to disk in xlog_write(). 3070 */ 3071 if (len <= iclog->ic_size - iclog->ic_offset) { 3072 *continued_write = 0; 3073 iclog->ic_offset += len; 3074 } else { 3075 *continued_write = 1; 3076 xlog_state_switch_iclogs(log, iclog, iclog->ic_size); 3077 } 3078 *iclogp = iclog; 3079 3080 ASSERT(iclog->ic_offset <= iclog->ic_size); 3081 spin_unlock(&log->l_icloglock); 3082 3083 *logoffsetp = log_offset; 3084 return 0; 3085 } /* xlog_state_get_iclog_space */ 3086 3087 /* The first cnt-1 times through here we don't need to 3088 * move the grant write head because the permanent 3089 * reservation has reserved cnt times the unit amount. 3090 * Release part of current permanent unit reservation and 3091 * reset current reservation to be one units worth. Also 3092 * move grant reservation head forward. 3093 */ 3094 STATIC void 3095 xlog_regrant_reserve_log_space( 3096 struct xlog *log, 3097 struct xlog_ticket *ticket) 3098 { 3099 trace_xfs_log_regrant_reserve_enter(log, ticket); 3100 3101 if (ticket->t_cnt > 0) 3102 ticket->t_cnt--; 3103 3104 xlog_grant_sub_space(log, &log->l_reserve_head.grant, 3105 ticket->t_curr_res); 3106 xlog_grant_sub_space(log, &log->l_write_head.grant, 3107 ticket->t_curr_res); 3108 ticket->t_curr_res = ticket->t_unit_res; 3109 xlog_tic_reset_res(ticket); 3110 3111 trace_xfs_log_regrant_reserve_sub(log, ticket); 3112 3113 /* just return if we still have some of the pre-reserved space */ 3114 if (ticket->t_cnt > 0) 3115 return; 3116 3117 xlog_grant_add_space(log, &log->l_reserve_head.grant, 3118 ticket->t_unit_res); 3119 3120 trace_xfs_log_regrant_reserve_exit(log, ticket); 3121 3122 ticket->t_curr_res = ticket->t_unit_res; 3123 xlog_tic_reset_res(ticket); 3124 } /* xlog_regrant_reserve_log_space */ 3125 3126 3127 /* 3128 * Give back the space left from a reservation. 3129 * 3130 * All the information we need to make a correct determination of space left 3131 * is present. For non-permanent reservations, things are quite easy. The 3132 * count should have been decremented to zero. We only need to deal with the 3133 * space remaining in the current reservation part of the ticket. If the 3134 * ticket contains a permanent reservation, there may be left over space which 3135 * needs to be released. A count of N means that N-1 refills of the current 3136 * reservation can be done before we need to ask for more space. The first 3137 * one goes to fill up the first current reservation. Once we run out of 3138 * space, the count will stay at zero and the only space remaining will be 3139 * in the current reservation field. 3140 */ 3141 STATIC void 3142 xlog_ungrant_log_space( 3143 struct xlog *log, 3144 struct xlog_ticket *ticket) 3145 { 3146 int bytes; 3147 3148 if (ticket->t_cnt > 0) 3149 ticket->t_cnt--; 3150 3151 trace_xfs_log_ungrant_enter(log, ticket); 3152 trace_xfs_log_ungrant_sub(log, ticket); 3153 3154 /* 3155 * If this is a permanent reservation ticket, we may be able to free 3156 * up more space based on the remaining count. 3157 */ 3158 bytes = ticket->t_curr_res; 3159 if (ticket->t_cnt > 0) { 3160 ASSERT(ticket->t_flags & XLOG_TIC_PERM_RESERV); 3161 bytes += ticket->t_unit_res*ticket->t_cnt; 3162 } 3163 3164 xlog_grant_sub_space(log, &log->l_reserve_head.grant, bytes); 3165 xlog_grant_sub_space(log, &log->l_write_head.grant, bytes); 3166 3167 trace_xfs_log_ungrant_exit(log, ticket); 3168 3169 xfs_log_space_wake(log->l_mp); 3170 } 3171 3172 /* 3173 * Flush iclog to disk if this is the last reference to the given iclog and 3174 * the WANT_SYNC bit is set. 3175 * 3176 * When this function is entered, the iclog is not necessarily in the 3177 * WANT_SYNC state. It may be sitting around waiting to get filled. 3178 * 3179 * 3180 */ 3181 STATIC int 3182 xlog_state_release_iclog( 3183 struct xlog *log, 3184 struct xlog_in_core *iclog) 3185 { 3186 int sync = 0; /* do we sync? */ 3187 3188 if (iclog->ic_state & XLOG_STATE_IOERROR) 3189 return -EIO; 3190 3191 ASSERT(atomic_read(&iclog->ic_refcnt) > 0); 3192 if (!atomic_dec_and_lock(&iclog->ic_refcnt, &log->l_icloglock)) 3193 return 0; 3194 3195 if (iclog->ic_state & XLOG_STATE_IOERROR) { 3196 spin_unlock(&log->l_icloglock); 3197 return -EIO; 3198 } 3199 ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE || 3200 iclog->ic_state == XLOG_STATE_WANT_SYNC); 3201 3202 if (iclog->ic_state == XLOG_STATE_WANT_SYNC) { 3203 /* update tail before writing to iclog */ 3204 xfs_lsn_t tail_lsn = xlog_assign_tail_lsn(log->l_mp); 3205 sync++; 3206 iclog->ic_state = XLOG_STATE_SYNCING; 3207 iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn); 3208 xlog_verify_tail_lsn(log, iclog, tail_lsn); 3209 /* cycle incremented when incrementing curr_block */ 3210 } 3211 spin_unlock(&log->l_icloglock); 3212 3213 /* 3214 * We let the log lock go, so it's possible that we hit a log I/O 3215 * error or some other SHUTDOWN condition that marks the iclog 3216 * as XLOG_STATE_IOERROR before the bwrite. However, we know that 3217 * this iclog has consistent data, so we ignore IOERROR 3218 * flags after this point. 3219 */ 3220 if (sync) 3221 return xlog_sync(log, iclog); 3222 return 0; 3223 } /* xlog_state_release_iclog */ 3224 3225 3226 /* 3227 * This routine will mark the current iclog in the ring as WANT_SYNC 3228 * and move the current iclog pointer to the next iclog in the ring. 3229 * When this routine is called from xlog_state_get_iclog_space(), the 3230 * exact size of the iclog has not yet been determined. All we know is 3231 * that every data block. We have run out of space in this log record. 3232 */ 3233 STATIC void 3234 xlog_state_switch_iclogs( 3235 struct xlog *log, 3236 struct xlog_in_core *iclog, 3237 int eventual_size) 3238 { 3239 ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE); 3240 if (!eventual_size) 3241 eventual_size = iclog->ic_offset; 3242 iclog->ic_state = XLOG_STATE_WANT_SYNC; 3243 iclog->ic_header.h_prev_block = cpu_to_be32(log->l_prev_block); 3244 log->l_prev_block = log->l_curr_block; 3245 log->l_prev_cycle = log->l_curr_cycle; 3246 3247 /* roll log?: ic_offset changed later */ 3248 log->l_curr_block += BTOBB(eventual_size)+BTOBB(log->l_iclog_hsize); 3249 3250 /* Round up to next log-sunit */ 3251 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb) && 3252 log->l_mp->m_sb.sb_logsunit > 1) { 3253 uint32_t sunit_bb = BTOBB(log->l_mp->m_sb.sb_logsunit); 3254 log->l_curr_block = roundup(log->l_curr_block, sunit_bb); 3255 } 3256 3257 if (log->l_curr_block >= log->l_logBBsize) { 3258 /* 3259 * Rewind the current block before the cycle is bumped to make 3260 * sure that the combined LSN never transiently moves forward 3261 * when the log wraps to the next cycle. This is to support the 3262 * unlocked sample of these fields from xlog_valid_lsn(). Most 3263 * other cases should acquire l_icloglock. 3264 */ 3265 log->l_curr_block -= log->l_logBBsize; 3266 ASSERT(log->l_curr_block >= 0); 3267 smp_wmb(); 3268 log->l_curr_cycle++; 3269 if (log->l_curr_cycle == XLOG_HEADER_MAGIC_NUM) 3270 log->l_curr_cycle++; 3271 } 3272 ASSERT(iclog == log->l_iclog); 3273 log->l_iclog = iclog->ic_next; 3274 } /* xlog_state_switch_iclogs */ 3275 3276 /* 3277 * Write out all data in the in-core log as of this exact moment in time. 3278 * 3279 * Data may be written to the in-core log during this call. However, 3280 * we don't guarantee this data will be written out. A change from past 3281 * implementation means this routine will *not* write out zero length LRs. 3282 * 3283 * Basically, we try and perform an intelligent scan of the in-core logs. 3284 * If we determine there is no flushable data, we just return. There is no 3285 * flushable data if: 3286 * 3287 * 1. the current iclog is active and has no data; the previous iclog 3288 * is in the active or dirty state. 3289 * 2. the current iclog is drity, and the previous iclog is in the 3290 * active or dirty state. 3291 * 3292 * We may sleep if: 3293 * 3294 * 1. the current iclog is not in the active nor dirty state. 3295 * 2. the current iclog dirty, and the previous iclog is not in the 3296 * active nor dirty state. 3297 * 3. the current iclog is active, and there is another thread writing 3298 * to this particular iclog. 3299 * 4. a) the current iclog is active and has no other writers 3300 * b) when we return from flushing out this iclog, it is still 3301 * not in the active nor dirty state. 3302 */ 3303 int 3304 _xfs_log_force( 3305 struct xfs_mount *mp, 3306 uint flags, 3307 int *log_flushed) 3308 { 3309 struct xlog *log = mp->m_log; 3310 struct xlog_in_core *iclog; 3311 xfs_lsn_t lsn; 3312 3313 XFS_STATS_INC(mp, xs_log_force); 3314 3315 xlog_cil_force(log); 3316 3317 spin_lock(&log->l_icloglock); 3318 3319 iclog = log->l_iclog; 3320 if (iclog->ic_state & XLOG_STATE_IOERROR) { 3321 spin_unlock(&log->l_icloglock); 3322 return -EIO; 3323 } 3324 3325 /* If the head iclog is not active nor dirty, we just attach 3326 * ourselves to the head and go to sleep. 3327 */ 3328 if (iclog->ic_state == XLOG_STATE_ACTIVE || 3329 iclog->ic_state == XLOG_STATE_DIRTY) { 3330 /* 3331 * If the head is dirty or (active and empty), then 3332 * we need to look at the previous iclog. If the previous 3333 * iclog is active or dirty we are done. There is nothing 3334 * to sync out. Otherwise, we attach ourselves to the 3335 * previous iclog and go to sleep. 3336 */ 3337 if (iclog->ic_state == XLOG_STATE_DIRTY || 3338 (atomic_read(&iclog->ic_refcnt) == 0 3339 && iclog->ic_offset == 0)) { 3340 iclog = iclog->ic_prev; 3341 if (iclog->ic_state == XLOG_STATE_ACTIVE || 3342 iclog->ic_state == XLOG_STATE_DIRTY) 3343 goto no_sleep; 3344 else 3345 goto maybe_sleep; 3346 } else { 3347 if (atomic_read(&iclog->ic_refcnt) == 0) { 3348 /* We are the only one with access to this 3349 * iclog. Flush it out now. There should 3350 * be a roundoff of zero to show that someone 3351 * has already taken care of the roundoff from 3352 * the previous sync. 3353 */ 3354 atomic_inc(&iclog->ic_refcnt); 3355 lsn = be64_to_cpu(iclog->ic_header.h_lsn); 3356 xlog_state_switch_iclogs(log, iclog, 0); 3357 spin_unlock(&log->l_icloglock); 3358 3359 if (xlog_state_release_iclog(log, iclog)) 3360 return -EIO; 3361 3362 if (log_flushed) 3363 *log_flushed = 1; 3364 spin_lock(&log->l_icloglock); 3365 if (be64_to_cpu(iclog->ic_header.h_lsn) == lsn && 3366 iclog->ic_state != XLOG_STATE_DIRTY) 3367 goto maybe_sleep; 3368 else 3369 goto no_sleep; 3370 } else { 3371 /* Someone else is writing to this iclog. 3372 * Use its call to flush out the data. However, 3373 * the other thread may not force out this LR, 3374 * so we mark it WANT_SYNC. 3375 */ 3376 xlog_state_switch_iclogs(log, iclog, 0); 3377 goto maybe_sleep; 3378 } 3379 } 3380 } 3381 3382 /* By the time we come around again, the iclog could've been filled 3383 * which would give it another lsn. If we have a new lsn, just 3384 * return because the relevant data has been flushed. 3385 */ 3386 maybe_sleep: 3387 if (flags & XFS_LOG_SYNC) { 3388 /* 3389 * We must check if we're shutting down here, before 3390 * we wait, while we're holding the l_icloglock. 3391 * Then we check again after waking up, in case our 3392 * sleep was disturbed by a bad news. 3393 */ 3394 if (iclog->ic_state & XLOG_STATE_IOERROR) { 3395 spin_unlock(&log->l_icloglock); 3396 return -EIO; 3397 } 3398 XFS_STATS_INC(mp, xs_log_force_sleep); 3399 xlog_wait(&iclog->ic_force_wait, &log->l_icloglock); 3400 /* 3401 * No need to grab the log lock here since we're 3402 * only deciding whether or not to return EIO 3403 * and the memory read should be atomic. 3404 */ 3405 if (iclog->ic_state & XLOG_STATE_IOERROR) 3406 return -EIO; 3407 } else { 3408 3409 no_sleep: 3410 spin_unlock(&log->l_icloglock); 3411 } 3412 return 0; 3413 } 3414 3415 /* 3416 * Wrapper for _xfs_log_force(), to be used when caller doesn't care 3417 * about errors or whether the log was flushed or not. This is the normal 3418 * interface to use when trying to unpin items or move the log forward. 3419 */ 3420 void 3421 xfs_log_force( 3422 xfs_mount_t *mp, 3423 uint flags) 3424 { 3425 trace_xfs_log_force(mp, 0, _RET_IP_); 3426 _xfs_log_force(mp, flags, NULL); 3427 } 3428 3429 /* 3430 * Force the in-core log to disk for a specific LSN. 3431 * 3432 * Find in-core log with lsn. 3433 * If it is in the DIRTY state, just return. 3434 * If it is in the ACTIVE state, move the in-core log into the WANT_SYNC 3435 * state and go to sleep or return. 3436 * If it is in any other state, go to sleep or return. 3437 * 3438 * Synchronous forces are implemented with a signal variable. All callers 3439 * to force a given lsn to disk will wait on a the sv attached to the 3440 * specific in-core log. When given in-core log finally completes its 3441 * write to disk, that thread will wake up all threads waiting on the 3442 * sv. 3443 */ 3444 int 3445 _xfs_log_force_lsn( 3446 struct xfs_mount *mp, 3447 xfs_lsn_t lsn, 3448 uint flags, 3449 int *log_flushed) 3450 { 3451 struct xlog *log = mp->m_log; 3452 struct xlog_in_core *iclog; 3453 int already_slept = 0; 3454 3455 ASSERT(lsn != 0); 3456 3457 XFS_STATS_INC(mp, xs_log_force); 3458 3459 lsn = xlog_cil_force_lsn(log, lsn); 3460 if (lsn == NULLCOMMITLSN) 3461 return 0; 3462 3463 try_again: 3464 spin_lock(&log->l_icloglock); 3465 iclog = log->l_iclog; 3466 if (iclog->ic_state & XLOG_STATE_IOERROR) { 3467 spin_unlock(&log->l_icloglock); 3468 return -EIO; 3469 } 3470 3471 do { 3472 if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn) { 3473 iclog = iclog->ic_next; 3474 continue; 3475 } 3476 3477 if (iclog->ic_state == XLOG_STATE_DIRTY) { 3478 spin_unlock(&log->l_icloglock); 3479 return 0; 3480 } 3481 3482 if (iclog->ic_state == XLOG_STATE_ACTIVE) { 3483 /* 3484 * We sleep here if we haven't already slept (e.g. 3485 * this is the first time we've looked at the correct 3486 * iclog buf) and the buffer before us is going to 3487 * be sync'ed. The reason for this is that if we 3488 * are doing sync transactions here, by waiting for 3489 * the previous I/O to complete, we can allow a few 3490 * more transactions into this iclog before we close 3491 * it down. 3492 * 3493 * Otherwise, we mark the buffer WANT_SYNC, and bump 3494 * up the refcnt so we can release the log (which 3495 * drops the ref count). The state switch keeps new 3496 * transaction commits from using this buffer. When 3497 * the current commits finish writing into the buffer, 3498 * the refcount will drop to zero and the buffer will 3499 * go out then. 3500 */ 3501 if (!already_slept && 3502 (iclog->ic_prev->ic_state & 3503 (XLOG_STATE_WANT_SYNC | XLOG_STATE_SYNCING))) { 3504 ASSERT(!(iclog->ic_state & XLOG_STATE_IOERROR)); 3505 3506 XFS_STATS_INC(mp, xs_log_force_sleep); 3507 3508 xlog_wait(&iclog->ic_prev->ic_write_wait, 3509 &log->l_icloglock); 3510 already_slept = 1; 3511 goto try_again; 3512 } 3513 atomic_inc(&iclog->ic_refcnt); 3514 xlog_state_switch_iclogs(log, iclog, 0); 3515 spin_unlock(&log->l_icloglock); 3516 if (xlog_state_release_iclog(log, iclog)) 3517 return -EIO; 3518 if (log_flushed) 3519 *log_flushed = 1; 3520 spin_lock(&log->l_icloglock); 3521 } 3522 3523 if ((flags & XFS_LOG_SYNC) && /* sleep */ 3524 !(iclog->ic_state & 3525 (XLOG_STATE_ACTIVE | XLOG_STATE_DIRTY))) { 3526 /* 3527 * Don't wait on completion if we know that we've 3528 * gotten a log write error. 3529 */ 3530 if (iclog->ic_state & XLOG_STATE_IOERROR) { 3531 spin_unlock(&log->l_icloglock); 3532 return -EIO; 3533 } 3534 XFS_STATS_INC(mp, xs_log_force_sleep); 3535 xlog_wait(&iclog->ic_force_wait, &log->l_icloglock); 3536 /* 3537 * No need to grab the log lock here since we're 3538 * only deciding whether or not to return EIO 3539 * and the memory read should be atomic. 3540 */ 3541 if (iclog->ic_state & XLOG_STATE_IOERROR) 3542 return -EIO; 3543 } else { /* just return */ 3544 spin_unlock(&log->l_icloglock); 3545 } 3546 3547 return 0; 3548 } while (iclog != log->l_iclog); 3549 3550 spin_unlock(&log->l_icloglock); 3551 return 0; 3552 } 3553 3554 /* 3555 * Wrapper for _xfs_log_force_lsn(), to be used when caller doesn't care 3556 * about errors or whether the log was flushed or not. This is the normal 3557 * interface to use when trying to unpin items or move the log forward. 3558 */ 3559 void 3560 xfs_log_force_lsn( 3561 xfs_mount_t *mp, 3562 xfs_lsn_t lsn, 3563 uint flags) 3564 { 3565 trace_xfs_log_force(mp, lsn, _RET_IP_); 3566 _xfs_log_force_lsn(mp, lsn, flags, NULL); 3567 } 3568 3569 /* 3570 * Called when we want to mark the current iclog as being ready to sync to 3571 * disk. 3572 */ 3573 STATIC void 3574 xlog_state_want_sync( 3575 struct xlog *log, 3576 struct xlog_in_core *iclog) 3577 { 3578 assert_spin_locked(&log->l_icloglock); 3579 3580 if (iclog->ic_state == XLOG_STATE_ACTIVE) { 3581 xlog_state_switch_iclogs(log, iclog, 0); 3582 } else { 3583 ASSERT(iclog->ic_state & 3584 (XLOG_STATE_WANT_SYNC|XLOG_STATE_IOERROR)); 3585 } 3586 } 3587 3588 3589 /***************************************************************************** 3590 * 3591 * TICKET functions 3592 * 3593 ***************************************************************************** 3594 */ 3595 3596 /* 3597 * Free a used ticket when its refcount falls to zero. 3598 */ 3599 void 3600 xfs_log_ticket_put( 3601 xlog_ticket_t *ticket) 3602 { 3603 ASSERT(atomic_read(&ticket->t_ref) > 0); 3604 if (atomic_dec_and_test(&ticket->t_ref)) 3605 kmem_zone_free(xfs_log_ticket_zone, ticket); 3606 } 3607 3608 xlog_ticket_t * 3609 xfs_log_ticket_get( 3610 xlog_ticket_t *ticket) 3611 { 3612 ASSERT(atomic_read(&ticket->t_ref) > 0); 3613 atomic_inc(&ticket->t_ref); 3614 return ticket; 3615 } 3616 3617 /* 3618 * Figure out the total log space unit (in bytes) that would be 3619 * required for a log ticket. 3620 */ 3621 int 3622 xfs_log_calc_unit_res( 3623 struct xfs_mount *mp, 3624 int unit_bytes) 3625 { 3626 struct xlog *log = mp->m_log; 3627 int iclog_space; 3628 uint num_headers; 3629 3630 /* 3631 * Permanent reservations have up to 'cnt'-1 active log operations 3632 * in the log. A unit in this case is the amount of space for one 3633 * of these log operations. Normal reservations have a cnt of 1 3634 * and their unit amount is the total amount of space required. 3635 * 3636 * The following lines of code account for non-transaction data 3637 * which occupy space in the on-disk log. 3638 * 3639 * Normal form of a transaction is: 3640 * <oph><trans-hdr><start-oph><reg1-oph><reg1><reg2-oph>...<commit-oph> 3641 * and then there are LR hdrs, split-recs and roundoff at end of syncs. 3642 * 3643 * We need to account for all the leadup data and trailer data 3644 * around the transaction data. 3645 * And then we need to account for the worst case in terms of using 3646 * more space. 3647 * The worst case will happen if: 3648 * - the placement of the transaction happens to be such that the 3649 * roundoff is at its maximum 3650 * - the transaction data is synced before the commit record is synced 3651 * i.e. <transaction-data><roundoff> | <commit-rec><roundoff> 3652 * Therefore the commit record is in its own Log Record. 3653 * This can happen as the commit record is called with its 3654 * own region to xlog_write(). 3655 * This then means that in the worst case, roundoff can happen for 3656 * the commit-rec as well. 3657 * The commit-rec is smaller than padding in this scenario and so it is 3658 * not added separately. 3659 */ 3660 3661 /* for trans header */ 3662 unit_bytes += sizeof(xlog_op_header_t); 3663 unit_bytes += sizeof(xfs_trans_header_t); 3664 3665 /* for start-rec */ 3666 unit_bytes += sizeof(xlog_op_header_t); 3667 3668 /* 3669 * for LR headers - the space for data in an iclog is the size minus 3670 * the space used for the headers. If we use the iclog size, then we 3671 * undercalculate the number of headers required. 3672 * 3673 * Furthermore - the addition of op headers for split-recs might 3674 * increase the space required enough to require more log and op 3675 * headers, so take that into account too. 3676 * 3677 * IMPORTANT: This reservation makes the assumption that if this 3678 * transaction is the first in an iclog and hence has the LR headers 3679 * accounted to it, then the remaining space in the iclog is 3680 * exclusively for this transaction. i.e. if the transaction is larger 3681 * than the iclog, it will be the only thing in that iclog. 3682 * Fundamentally, this means we must pass the entire log vector to 3683 * xlog_write to guarantee this. 3684 */ 3685 iclog_space = log->l_iclog_size - log->l_iclog_hsize; 3686 num_headers = howmany(unit_bytes, iclog_space); 3687 3688 /* for split-recs - ophdrs added when data split over LRs */ 3689 unit_bytes += sizeof(xlog_op_header_t) * num_headers; 3690 3691 /* add extra header reservations if we overrun */ 3692 while (!num_headers || 3693 howmany(unit_bytes, iclog_space) > num_headers) { 3694 unit_bytes += sizeof(xlog_op_header_t); 3695 num_headers++; 3696 } 3697 unit_bytes += log->l_iclog_hsize * num_headers; 3698 3699 /* for commit-rec LR header - note: padding will subsume the ophdr */ 3700 unit_bytes += log->l_iclog_hsize; 3701 3702 /* for roundoff padding for transaction data and one for commit record */ 3703 if (xfs_sb_version_haslogv2(&mp->m_sb) && mp->m_sb.sb_logsunit > 1) { 3704 /* log su roundoff */ 3705 unit_bytes += 2 * mp->m_sb.sb_logsunit; 3706 } else { 3707 /* BB roundoff */ 3708 unit_bytes += 2 * BBSIZE; 3709 } 3710 3711 return unit_bytes; 3712 } 3713 3714 /* 3715 * Allocate and initialise a new log ticket. 3716 */ 3717 struct xlog_ticket * 3718 xlog_ticket_alloc( 3719 struct xlog *log, 3720 int unit_bytes, 3721 int cnt, 3722 char client, 3723 bool permanent, 3724 xfs_km_flags_t alloc_flags) 3725 { 3726 struct xlog_ticket *tic; 3727 int unit_res; 3728 3729 tic = kmem_zone_zalloc(xfs_log_ticket_zone, alloc_flags); 3730 if (!tic) 3731 return NULL; 3732 3733 unit_res = xfs_log_calc_unit_res(log->l_mp, unit_bytes); 3734 3735 atomic_set(&tic->t_ref, 1); 3736 tic->t_task = current; 3737 INIT_LIST_HEAD(&tic->t_queue); 3738 tic->t_unit_res = unit_res; 3739 tic->t_curr_res = unit_res; 3740 tic->t_cnt = cnt; 3741 tic->t_ocnt = cnt; 3742 tic->t_tid = prandom_u32(); 3743 tic->t_clientid = client; 3744 tic->t_flags = XLOG_TIC_INITED; 3745 if (permanent) 3746 tic->t_flags |= XLOG_TIC_PERM_RESERV; 3747 3748 xlog_tic_reset_res(tic); 3749 3750 return tic; 3751 } 3752 3753 3754 /****************************************************************************** 3755 * 3756 * Log debug routines 3757 * 3758 ****************************************************************************** 3759 */ 3760 #if defined(DEBUG) 3761 /* 3762 * Make sure that the destination ptr is within the valid data region of 3763 * one of the iclogs. This uses backup pointers stored in a different 3764 * part of the log in case we trash the log structure. 3765 */ 3766 STATIC void 3767 xlog_verify_dest_ptr( 3768 struct xlog *log, 3769 void *ptr) 3770 { 3771 int i; 3772 int good_ptr = 0; 3773 3774 for (i = 0; i < log->l_iclog_bufs; i++) { 3775 if (ptr >= log->l_iclog_bak[i] && 3776 ptr <= log->l_iclog_bak[i] + log->l_iclog_size) 3777 good_ptr++; 3778 } 3779 3780 if (!good_ptr) 3781 xfs_emerg(log->l_mp, "%s: invalid ptr", __func__); 3782 } 3783 3784 /* 3785 * Check to make sure the grant write head didn't just over lap the tail. If 3786 * the cycles are the same, we can't be overlapping. Otherwise, make sure that 3787 * the cycles differ by exactly one and check the byte count. 3788 * 3789 * This check is run unlocked, so can give false positives. Rather than assert 3790 * on failures, use a warn-once flag and a panic tag to allow the admin to 3791 * determine if they want to panic the machine when such an error occurs. For 3792 * debug kernels this will have the same effect as using an assert but, unlinke 3793 * an assert, it can be turned off at runtime. 3794 */ 3795 STATIC void 3796 xlog_verify_grant_tail( 3797 struct xlog *log) 3798 { 3799 int tail_cycle, tail_blocks; 3800 int cycle, space; 3801 3802 xlog_crack_grant_head(&log->l_write_head.grant, &cycle, &space); 3803 xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_blocks); 3804 if (tail_cycle != cycle) { 3805 if (cycle - 1 != tail_cycle && 3806 !(log->l_flags & XLOG_TAIL_WARN)) { 3807 xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES, 3808 "%s: cycle - 1 != tail_cycle", __func__); 3809 log->l_flags |= XLOG_TAIL_WARN; 3810 } 3811 3812 if (space > BBTOB(tail_blocks) && 3813 !(log->l_flags & XLOG_TAIL_WARN)) { 3814 xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES, 3815 "%s: space > BBTOB(tail_blocks)", __func__); 3816 log->l_flags |= XLOG_TAIL_WARN; 3817 } 3818 } 3819 } 3820 3821 /* check if it will fit */ 3822 STATIC void 3823 xlog_verify_tail_lsn( 3824 struct xlog *log, 3825 struct xlog_in_core *iclog, 3826 xfs_lsn_t tail_lsn) 3827 { 3828 int blocks; 3829 3830 if (CYCLE_LSN(tail_lsn) == log->l_prev_cycle) { 3831 blocks = 3832 log->l_logBBsize - (log->l_prev_block - BLOCK_LSN(tail_lsn)); 3833 if (blocks < BTOBB(iclog->ic_offset)+BTOBB(log->l_iclog_hsize)) 3834 xfs_emerg(log->l_mp, "%s: ran out of log space", __func__); 3835 } else { 3836 ASSERT(CYCLE_LSN(tail_lsn)+1 == log->l_prev_cycle); 3837 3838 if (BLOCK_LSN(tail_lsn) == log->l_prev_block) 3839 xfs_emerg(log->l_mp, "%s: tail wrapped", __func__); 3840 3841 blocks = BLOCK_LSN(tail_lsn) - log->l_prev_block; 3842 if (blocks < BTOBB(iclog->ic_offset) + 1) 3843 xfs_emerg(log->l_mp, "%s: ran out of log space", __func__); 3844 } 3845 } /* xlog_verify_tail_lsn */ 3846 3847 /* 3848 * Perform a number of checks on the iclog before writing to disk. 3849 * 3850 * 1. Make sure the iclogs are still circular 3851 * 2. Make sure we have a good magic number 3852 * 3. Make sure we don't have magic numbers in the data 3853 * 4. Check fields of each log operation header for: 3854 * A. Valid client identifier 3855 * B. tid ptr value falls in valid ptr space (user space code) 3856 * C. Length in log record header is correct according to the 3857 * individual operation headers within record. 3858 * 5. When a bwrite will occur within 5 blocks of the front of the physical 3859 * log, check the preceding blocks of the physical log to make sure all 3860 * the cycle numbers agree with the current cycle number. 3861 */ 3862 STATIC void 3863 xlog_verify_iclog( 3864 struct xlog *log, 3865 struct xlog_in_core *iclog, 3866 int count, 3867 bool syncing) 3868 { 3869 xlog_op_header_t *ophead; 3870 xlog_in_core_t *icptr; 3871 xlog_in_core_2_t *xhdr; 3872 void *base_ptr, *ptr, *p; 3873 ptrdiff_t field_offset; 3874 uint8_t clientid; 3875 int len, i, j, k, op_len; 3876 int idx; 3877 3878 /* check validity of iclog pointers */ 3879 spin_lock(&log->l_icloglock); 3880 icptr = log->l_iclog; 3881 for (i = 0; i < log->l_iclog_bufs; i++, icptr = icptr->ic_next) 3882 ASSERT(icptr); 3883 3884 if (icptr != log->l_iclog) 3885 xfs_emerg(log->l_mp, "%s: corrupt iclog ring", __func__); 3886 spin_unlock(&log->l_icloglock); 3887 3888 /* check log magic numbers */ 3889 if (iclog->ic_header.h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) 3890 xfs_emerg(log->l_mp, "%s: invalid magic num", __func__); 3891 3892 base_ptr = ptr = &iclog->ic_header; 3893 p = &iclog->ic_header; 3894 for (ptr += BBSIZE; ptr < base_ptr + count; ptr += BBSIZE) { 3895 if (*(__be32 *)ptr == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) 3896 xfs_emerg(log->l_mp, "%s: unexpected magic num", 3897 __func__); 3898 } 3899 3900 /* check fields */ 3901 len = be32_to_cpu(iclog->ic_header.h_num_logops); 3902 base_ptr = ptr = iclog->ic_datap; 3903 ophead = ptr; 3904 xhdr = iclog->ic_data; 3905 for (i = 0; i < len; i++) { 3906 ophead = ptr; 3907 3908 /* clientid is only 1 byte */ 3909 p = &ophead->oh_clientid; 3910 field_offset = p - base_ptr; 3911 if (!syncing || (field_offset & 0x1ff)) { 3912 clientid = ophead->oh_clientid; 3913 } else { 3914 idx = BTOBBT((char *)&ophead->oh_clientid - iclog->ic_datap); 3915 if (idx >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) { 3916 j = idx / (XLOG_HEADER_CYCLE_SIZE / BBSIZE); 3917 k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE); 3918 clientid = xlog_get_client_id( 3919 xhdr[j].hic_xheader.xh_cycle_data[k]); 3920 } else { 3921 clientid = xlog_get_client_id( 3922 iclog->ic_header.h_cycle_data[idx]); 3923 } 3924 } 3925 if (clientid != XFS_TRANSACTION && clientid != XFS_LOG) 3926 xfs_warn(log->l_mp, 3927 "%s: invalid clientid %d op 0x%p offset 0x%lx", 3928 __func__, clientid, ophead, 3929 (unsigned long)field_offset); 3930 3931 /* check length */ 3932 p = &ophead->oh_len; 3933 field_offset = p - base_ptr; 3934 if (!syncing || (field_offset & 0x1ff)) { 3935 op_len = be32_to_cpu(ophead->oh_len); 3936 } else { 3937 idx = BTOBBT((uintptr_t)&ophead->oh_len - 3938 (uintptr_t)iclog->ic_datap); 3939 if (idx >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) { 3940 j = idx / (XLOG_HEADER_CYCLE_SIZE / BBSIZE); 3941 k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE); 3942 op_len = be32_to_cpu(xhdr[j].hic_xheader.xh_cycle_data[k]); 3943 } else { 3944 op_len = be32_to_cpu(iclog->ic_header.h_cycle_data[idx]); 3945 } 3946 } 3947 ptr += sizeof(xlog_op_header_t) + op_len; 3948 } 3949 } /* xlog_verify_iclog */ 3950 #endif 3951 3952 /* 3953 * Mark all iclogs IOERROR. l_icloglock is held by the caller. 3954 */ 3955 STATIC int 3956 xlog_state_ioerror( 3957 struct xlog *log) 3958 { 3959 xlog_in_core_t *iclog, *ic; 3960 3961 iclog = log->l_iclog; 3962 if (! (iclog->ic_state & XLOG_STATE_IOERROR)) { 3963 /* 3964 * Mark all the incore logs IOERROR. 3965 * From now on, no log flushes will result. 3966 */ 3967 ic = iclog; 3968 do { 3969 ic->ic_state = XLOG_STATE_IOERROR; 3970 ic = ic->ic_next; 3971 } while (ic != iclog); 3972 return 0; 3973 } 3974 /* 3975 * Return non-zero, if state transition has already happened. 3976 */ 3977 return 1; 3978 } 3979 3980 /* 3981 * This is called from xfs_force_shutdown, when we're forcibly 3982 * shutting down the filesystem, typically because of an IO error. 3983 * Our main objectives here are to make sure that: 3984 * a. if !logerror, flush the logs to disk. Anything modified 3985 * after this is ignored. 3986 * b. the filesystem gets marked 'SHUTDOWN' for all interested 3987 * parties to find out, 'atomically'. 3988 * c. those who're sleeping on log reservations, pinned objects and 3989 * other resources get woken up, and be told the bad news. 3990 * d. nothing new gets queued up after (b) and (c) are done. 3991 * 3992 * Note: for the !logerror case we need to flush the regions held in memory out 3993 * to disk first. This needs to be done before the log is marked as shutdown, 3994 * otherwise the iclog writes will fail. 3995 */ 3996 int 3997 xfs_log_force_umount( 3998 struct xfs_mount *mp, 3999 int logerror) 4000 { 4001 struct xlog *log; 4002 int retval; 4003 4004 log = mp->m_log; 4005 4006 /* 4007 * If this happens during log recovery, don't worry about 4008 * locking; the log isn't open for business yet. 4009 */ 4010 if (!log || 4011 log->l_flags & XLOG_ACTIVE_RECOVERY) { 4012 mp->m_flags |= XFS_MOUNT_FS_SHUTDOWN; 4013 if (mp->m_sb_bp) 4014 mp->m_sb_bp->b_flags |= XBF_DONE; 4015 return 0; 4016 } 4017 4018 /* 4019 * Somebody could've already done the hard work for us. 4020 * No need to get locks for this. 4021 */ 4022 if (logerror && log->l_iclog->ic_state & XLOG_STATE_IOERROR) { 4023 ASSERT(XLOG_FORCED_SHUTDOWN(log)); 4024 return 1; 4025 } 4026 4027 /* 4028 * Flush all the completed transactions to disk before marking the log 4029 * being shut down. We need to do it in this order to ensure that 4030 * completed operations are safely on disk before we shut down, and that 4031 * we don't have to issue any buffer IO after the shutdown flags are set 4032 * to guarantee this. 4033 */ 4034 if (!logerror) 4035 _xfs_log_force(mp, XFS_LOG_SYNC, NULL); 4036 4037 /* 4038 * mark the filesystem and the as in a shutdown state and wake 4039 * everybody up to tell them the bad news. 4040 */ 4041 spin_lock(&log->l_icloglock); 4042 mp->m_flags |= XFS_MOUNT_FS_SHUTDOWN; 4043 if (mp->m_sb_bp) 4044 mp->m_sb_bp->b_flags |= XBF_DONE; 4045 4046 /* 4047 * Mark the log and the iclogs with IO error flags to prevent any 4048 * further log IO from being issued or completed. 4049 */ 4050 log->l_flags |= XLOG_IO_ERROR; 4051 retval = xlog_state_ioerror(log); 4052 spin_unlock(&log->l_icloglock); 4053 4054 /* 4055 * We don't want anybody waiting for log reservations after this. That 4056 * means we have to wake up everybody queued up on reserveq as well as 4057 * writeq. In addition, we make sure in xlog_{re}grant_log_space that 4058 * we don't enqueue anything once the SHUTDOWN flag is set, and this 4059 * action is protected by the grant locks. 4060 */ 4061 xlog_grant_head_wake_all(&log->l_reserve_head); 4062 xlog_grant_head_wake_all(&log->l_write_head); 4063 4064 /* 4065 * Wake up everybody waiting on xfs_log_force. Wake the CIL push first 4066 * as if the log writes were completed. The abort handling in the log 4067 * item committed callback functions will do this again under lock to 4068 * avoid races. 4069 */ 4070 wake_up_all(&log->l_cilp->xc_commit_wait); 4071 xlog_state_do_callback(log, XFS_LI_ABORTED, NULL); 4072 4073 #ifdef XFSERRORDEBUG 4074 { 4075 xlog_in_core_t *iclog; 4076 4077 spin_lock(&log->l_icloglock); 4078 iclog = log->l_iclog; 4079 do { 4080 ASSERT(iclog->ic_callback == 0); 4081 iclog = iclog->ic_next; 4082 } while (iclog != log->l_iclog); 4083 spin_unlock(&log->l_icloglock); 4084 } 4085 #endif 4086 /* return non-zero if log IOERROR transition had already happened */ 4087 return retval; 4088 } 4089 4090 STATIC int 4091 xlog_iclogs_empty( 4092 struct xlog *log) 4093 { 4094 xlog_in_core_t *iclog; 4095 4096 iclog = log->l_iclog; 4097 do { 4098 /* endianness does not matter here, zero is zero in 4099 * any language. 4100 */ 4101 if (iclog->ic_header.h_num_logops) 4102 return 0; 4103 iclog = iclog->ic_next; 4104 } while (iclog != log->l_iclog); 4105 return 1; 4106 } 4107 4108 /* 4109 * Verify that an LSN stamped into a piece of metadata is valid. This is 4110 * intended for use in read verifiers on v5 superblocks. 4111 */ 4112 bool 4113 xfs_log_check_lsn( 4114 struct xfs_mount *mp, 4115 xfs_lsn_t lsn) 4116 { 4117 struct xlog *log = mp->m_log; 4118 bool valid; 4119 4120 /* 4121 * norecovery mode skips mount-time log processing and unconditionally 4122 * resets the in-core LSN. We can't validate in this mode, but 4123 * modifications are not allowed anyways so just return true. 4124 */ 4125 if (mp->m_flags & XFS_MOUNT_NORECOVERY) 4126 return true; 4127 4128 /* 4129 * Some metadata LSNs are initialized to NULL (e.g., the agfl). This is 4130 * handled by recovery and thus safe to ignore here. 4131 */ 4132 if (lsn == NULLCOMMITLSN) 4133 return true; 4134 4135 valid = xlog_valid_lsn(mp->m_log, lsn); 4136 4137 /* warn the user about what's gone wrong before verifier failure */ 4138 if (!valid) { 4139 spin_lock(&log->l_icloglock); 4140 xfs_warn(mp, 4141 "Corruption warning: Metadata has LSN (%d:%d) ahead of current LSN (%d:%d). " 4142 "Please unmount and run xfs_repair (>= v4.3) to resolve.", 4143 CYCLE_LSN(lsn), BLOCK_LSN(lsn), 4144 log->l_curr_cycle, log->l_curr_block); 4145 spin_unlock(&log->l_icloglock); 4146 } 4147 4148 return valid; 4149 } 4150