1 /* 2 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 #include "xfs.h" 19 #include "xfs_fs.h" 20 #include "xfs_shared.h" 21 #include "xfs_format.h" 22 #include "xfs_log_format.h" 23 #include "xfs_trans_resv.h" 24 #include "xfs_mount.h" 25 #include "xfs_errortag.h" 26 #include "xfs_error.h" 27 #include "xfs_trans.h" 28 #include "xfs_trans_priv.h" 29 #include "xfs_log.h" 30 #include "xfs_log_priv.h" 31 #include "xfs_log_recover.h" 32 #include "xfs_inode.h" 33 #include "xfs_trace.h" 34 #include "xfs_fsops.h" 35 #include "xfs_cksum.h" 36 #include "xfs_sysfs.h" 37 #include "xfs_sb.h" 38 39 kmem_zone_t *xfs_log_ticket_zone; 40 41 /* Local miscellaneous function prototypes */ 42 STATIC int 43 xlog_commit_record( 44 struct xlog *log, 45 struct xlog_ticket *ticket, 46 struct xlog_in_core **iclog, 47 xfs_lsn_t *commitlsnp); 48 49 STATIC struct xlog * 50 xlog_alloc_log( 51 struct xfs_mount *mp, 52 struct xfs_buftarg *log_target, 53 xfs_daddr_t blk_offset, 54 int num_bblks); 55 STATIC int 56 xlog_space_left( 57 struct xlog *log, 58 atomic64_t *head); 59 STATIC int 60 xlog_sync( 61 struct xlog *log, 62 struct xlog_in_core *iclog); 63 STATIC void 64 xlog_dealloc_log( 65 struct xlog *log); 66 67 /* local state machine functions */ 68 STATIC void xlog_state_done_syncing(xlog_in_core_t *iclog, int); 69 STATIC void 70 xlog_state_do_callback( 71 struct xlog *log, 72 int aborted, 73 struct xlog_in_core *iclog); 74 STATIC int 75 xlog_state_get_iclog_space( 76 struct xlog *log, 77 int len, 78 struct xlog_in_core **iclog, 79 struct xlog_ticket *ticket, 80 int *continued_write, 81 int *logoffsetp); 82 STATIC int 83 xlog_state_release_iclog( 84 struct xlog *log, 85 struct xlog_in_core *iclog); 86 STATIC void 87 xlog_state_switch_iclogs( 88 struct xlog *log, 89 struct xlog_in_core *iclog, 90 int eventual_size); 91 STATIC void 92 xlog_state_want_sync( 93 struct xlog *log, 94 struct xlog_in_core *iclog); 95 96 STATIC void 97 xlog_grant_push_ail( 98 struct xlog *log, 99 int need_bytes); 100 STATIC void 101 xlog_regrant_reserve_log_space( 102 struct xlog *log, 103 struct xlog_ticket *ticket); 104 STATIC void 105 xlog_ungrant_log_space( 106 struct xlog *log, 107 struct xlog_ticket *ticket); 108 109 #if defined(DEBUG) 110 STATIC void 111 xlog_verify_dest_ptr( 112 struct xlog *log, 113 void *ptr); 114 STATIC void 115 xlog_verify_grant_tail( 116 struct xlog *log); 117 STATIC void 118 xlog_verify_iclog( 119 struct xlog *log, 120 struct xlog_in_core *iclog, 121 int count, 122 bool syncing); 123 STATIC void 124 xlog_verify_tail_lsn( 125 struct xlog *log, 126 struct xlog_in_core *iclog, 127 xfs_lsn_t tail_lsn); 128 #else 129 #define xlog_verify_dest_ptr(a,b) 130 #define xlog_verify_grant_tail(a) 131 #define xlog_verify_iclog(a,b,c,d) 132 #define xlog_verify_tail_lsn(a,b,c) 133 #endif 134 135 STATIC int 136 xlog_iclogs_empty( 137 struct xlog *log); 138 139 static void 140 xlog_grant_sub_space( 141 struct xlog *log, 142 atomic64_t *head, 143 int bytes) 144 { 145 int64_t head_val = atomic64_read(head); 146 int64_t new, old; 147 148 do { 149 int cycle, space; 150 151 xlog_crack_grant_head_val(head_val, &cycle, &space); 152 153 space -= bytes; 154 if (space < 0) { 155 space += log->l_logsize; 156 cycle--; 157 } 158 159 old = head_val; 160 new = xlog_assign_grant_head_val(cycle, space); 161 head_val = atomic64_cmpxchg(head, old, new); 162 } while (head_val != old); 163 } 164 165 static void 166 xlog_grant_add_space( 167 struct xlog *log, 168 atomic64_t *head, 169 int bytes) 170 { 171 int64_t head_val = atomic64_read(head); 172 int64_t new, old; 173 174 do { 175 int tmp; 176 int cycle, space; 177 178 xlog_crack_grant_head_val(head_val, &cycle, &space); 179 180 tmp = log->l_logsize - space; 181 if (tmp > bytes) 182 space += bytes; 183 else { 184 space = bytes - tmp; 185 cycle++; 186 } 187 188 old = head_val; 189 new = xlog_assign_grant_head_val(cycle, space); 190 head_val = atomic64_cmpxchg(head, old, new); 191 } while (head_val != old); 192 } 193 194 STATIC void 195 xlog_grant_head_init( 196 struct xlog_grant_head *head) 197 { 198 xlog_assign_grant_head(&head->grant, 1, 0); 199 INIT_LIST_HEAD(&head->waiters); 200 spin_lock_init(&head->lock); 201 } 202 203 STATIC void 204 xlog_grant_head_wake_all( 205 struct xlog_grant_head *head) 206 { 207 struct xlog_ticket *tic; 208 209 spin_lock(&head->lock); 210 list_for_each_entry(tic, &head->waiters, t_queue) 211 wake_up_process(tic->t_task); 212 spin_unlock(&head->lock); 213 } 214 215 static inline int 216 xlog_ticket_reservation( 217 struct xlog *log, 218 struct xlog_grant_head *head, 219 struct xlog_ticket *tic) 220 { 221 if (head == &log->l_write_head) { 222 ASSERT(tic->t_flags & XLOG_TIC_PERM_RESERV); 223 return tic->t_unit_res; 224 } else { 225 if (tic->t_flags & XLOG_TIC_PERM_RESERV) 226 return tic->t_unit_res * tic->t_cnt; 227 else 228 return tic->t_unit_res; 229 } 230 } 231 232 STATIC bool 233 xlog_grant_head_wake( 234 struct xlog *log, 235 struct xlog_grant_head *head, 236 int *free_bytes) 237 { 238 struct xlog_ticket *tic; 239 int need_bytes; 240 241 list_for_each_entry(tic, &head->waiters, t_queue) { 242 need_bytes = xlog_ticket_reservation(log, head, tic); 243 if (*free_bytes < need_bytes) 244 return false; 245 246 *free_bytes -= need_bytes; 247 trace_xfs_log_grant_wake_up(log, tic); 248 wake_up_process(tic->t_task); 249 } 250 251 return true; 252 } 253 254 STATIC int 255 xlog_grant_head_wait( 256 struct xlog *log, 257 struct xlog_grant_head *head, 258 struct xlog_ticket *tic, 259 int need_bytes) __releases(&head->lock) 260 __acquires(&head->lock) 261 { 262 list_add_tail(&tic->t_queue, &head->waiters); 263 264 do { 265 if (XLOG_FORCED_SHUTDOWN(log)) 266 goto shutdown; 267 xlog_grant_push_ail(log, need_bytes); 268 269 __set_current_state(TASK_UNINTERRUPTIBLE); 270 spin_unlock(&head->lock); 271 272 XFS_STATS_INC(log->l_mp, xs_sleep_logspace); 273 274 trace_xfs_log_grant_sleep(log, tic); 275 schedule(); 276 trace_xfs_log_grant_wake(log, tic); 277 278 spin_lock(&head->lock); 279 if (XLOG_FORCED_SHUTDOWN(log)) 280 goto shutdown; 281 } while (xlog_space_left(log, &head->grant) < need_bytes); 282 283 list_del_init(&tic->t_queue); 284 return 0; 285 shutdown: 286 list_del_init(&tic->t_queue); 287 return -EIO; 288 } 289 290 /* 291 * Atomically get the log space required for a log ticket. 292 * 293 * Once a ticket gets put onto head->waiters, it will only return after the 294 * needed reservation is satisfied. 295 * 296 * This function is structured so that it has a lock free fast path. This is 297 * necessary because every new transaction reservation will come through this 298 * path. Hence any lock will be globally hot if we take it unconditionally on 299 * every pass. 300 * 301 * As tickets are only ever moved on and off head->waiters under head->lock, we 302 * only need to take that lock if we are going to add the ticket to the queue 303 * and sleep. We can avoid taking the lock if the ticket was never added to 304 * head->waiters because the t_queue list head will be empty and we hold the 305 * only reference to it so it can safely be checked unlocked. 306 */ 307 STATIC int 308 xlog_grant_head_check( 309 struct xlog *log, 310 struct xlog_grant_head *head, 311 struct xlog_ticket *tic, 312 int *need_bytes) 313 { 314 int free_bytes; 315 int error = 0; 316 317 ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY)); 318 319 /* 320 * If there are other waiters on the queue then give them a chance at 321 * logspace before us. Wake up the first waiters, if we do not wake 322 * up all the waiters then go to sleep waiting for more free space, 323 * otherwise try to get some space for this transaction. 324 */ 325 *need_bytes = xlog_ticket_reservation(log, head, tic); 326 free_bytes = xlog_space_left(log, &head->grant); 327 if (!list_empty_careful(&head->waiters)) { 328 spin_lock(&head->lock); 329 if (!xlog_grant_head_wake(log, head, &free_bytes) || 330 free_bytes < *need_bytes) { 331 error = xlog_grant_head_wait(log, head, tic, 332 *need_bytes); 333 } 334 spin_unlock(&head->lock); 335 } else if (free_bytes < *need_bytes) { 336 spin_lock(&head->lock); 337 error = xlog_grant_head_wait(log, head, tic, *need_bytes); 338 spin_unlock(&head->lock); 339 } 340 341 return error; 342 } 343 344 static void 345 xlog_tic_reset_res(xlog_ticket_t *tic) 346 { 347 tic->t_res_num = 0; 348 tic->t_res_arr_sum = 0; 349 tic->t_res_num_ophdrs = 0; 350 } 351 352 static void 353 xlog_tic_add_region(xlog_ticket_t *tic, uint len, uint type) 354 { 355 if (tic->t_res_num == XLOG_TIC_LEN_MAX) { 356 /* add to overflow and start again */ 357 tic->t_res_o_flow += tic->t_res_arr_sum; 358 tic->t_res_num = 0; 359 tic->t_res_arr_sum = 0; 360 } 361 362 tic->t_res_arr[tic->t_res_num].r_len = len; 363 tic->t_res_arr[tic->t_res_num].r_type = type; 364 tic->t_res_arr_sum += len; 365 tic->t_res_num++; 366 } 367 368 /* 369 * Replenish the byte reservation required by moving the grant write head. 370 */ 371 int 372 xfs_log_regrant( 373 struct xfs_mount *mp, 374 struct xlog_ticket *tic) 375 { 376 struct xlog *log = mp->m_log; 377 int need_bytes; 378 int error = 0; 379 380 if (XLOG_FORCED_SHUTDOWN(log)) 381 return -EIO; 382 383 XFS_STATS_INC(mp, xs_try_logspace); 384 385 /* 386 * This is a new transaction on the ticket, so we need to change the 387 * transaction ID so that the next transaction has a different TID in 388 * the log. Just add one to the existing tid so that we can see chains 389 * of rolling transactions in the log easily. 390 */ 391 tic->t_tid++; 392 393 xlog_grant_push_ail(log, tic->t_unit_res); 394 395 tic->t_curr_res = tic->t_unit_res; 396 xlog_tic_reset_res(tic); 397 398 if (tic->t_cnt > 0) 399 return 0; 400 401 trace_xfs_log_regrant(log, tic); 402 403 error = xlog_grant_head_check(log, &log->l_write_head, tic, 404 &need_bytes); 405 if (error) 406 goto out_error; 407 408 xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes); 409 trace_xfs_log_regrant_exit(log, tic); 410 xlog_verify_grant_tail(log); 411 return 0; 412 413 out_error: 414 /* 415 * If we are failing, make sure the ticket doesn't have any current 416 * reservations. We don't want to add this back when the ticket/ 417 * transaction gets cancelled. 418 */ 419 tic->t_curr_res = 0; 420 tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */ 421 return error; 422 } 423 424 /* 425 * Reserve log space and return a ticket corresponding the reservation. 426 * 427 * Each reservation is going to reserve extra space for a log record header. 428 * When writes happen to the on-disk log, we don't subtract the length of the 429 * log record header from any reservation. By wasting space in each 430 * reservation, we prevent over allocation problems. 431 */ 432 int 433 xfs_log_reserve( 434 struct xfs_mount *mp, 435 int unit_bytes, 436 int cnt, 437 struct xlog_ticket **ticp, 438 uint8_t client, 439 bool permanent) 440 { 441 struct xlog *log = mp->m_log; 442 struct xlog_ticket *tic; 443 int need_bytes; 444 int error = 0; 445 446 ASSERT(client == XFS_TRANSACTION || client == XFS_LOG); 447 448 if (XLOG_FORCED_SHUTDOWN(log)) 449 return -EIO; 450 451 XFS_STATS_INC(mp, xs_try_logspace); 452 453 ASSERT(*ticp == NULL); 454 tic = xlog_ticket_alloc(log, unit_bytes, cnt, client, permanent, 455 KM_SLEEP | KM_MAYFAIL); 456 if (!tic) 457 return -ENOMEM; 458 459 *ticp = tic; 460 461 xlog_grant_push_ail(log, tic->t_cnt ? tic->t_unit_res * tic->t_cnt 462 : tic->t_unit_res); 463 464 trace_xfs_log_reserve(log, tic); 465 466 error = xlog_grant_head_check(log, &log->l_reserve_head, tic, 467 &need_bytes); 468 if (error) 469 goto out_error; 470 471 xlog_grant_add_space(log, &log->l_reserve_head.grant, need_bytes); 472 xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes); 473 trace_xfs_log_reserve_exit(log, tic); 474 xlog_verify_grant_tail(log); 475 return 0; 476 477 out_error: 478 /* 479 * If we are failing, make sure the ticket doesn't have any current 480 * reservations. We don't want to add this back when the ticket/ 481 * transaction gets cancelled. 482 */ 483 tic->t_curr_res = 0; 484 tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */ 485 return error; 486 } 487 488 489 /* 490 * NOTES: 491 * 492 * 1. currblock field gets updated at startup and after in-core logs 493 * marked as with WANT_SYNC. 494 */ 495 496 /* 497 * This routine is called when a user of a log manager ticket is done with 498 * the reservation. If the ticket was ever used, then a commit record for 499 * the associated transaction is written out as a log operation header with 500 * no data. The flag XLOG_TIC_INITED is set when the first write occurs with 501 * a given ticket. If the ticket was one with a permanent reservation, then 502 * a few operations are done differently. Permanent reservation tickets by 503 * default don't release the reservation. They just commit the current 504 * transaction with the belief that the reservation is still needed. A flag 505 * must be passed in before permanent reservations are actually released. 506 * When these type of tickets are not released, they need to be set into 507 * the inited state again. By doing this, a start record will be written 508 * out when the next write occurs. 509 */ 510 xfs_lsn_t 511 xfs_log_done( 512 struct xfs_mount *mp, 513 struct xlog_ticket *ticket, 514 struct xlog_in_core **iclog, 515 bool regrant) 516 { 517 struct xlog *log = mp->m_log; 518 xfs_lsn_t lsn = 0; 519 520 if (XLOG_FORCED_SHUTDOWN(log) || 521 /* 522 * If nothing was ever written, don't write out commit record. 523 * If we get an error, just continue and give back the log ticket. 524 */ 525 (((ticket->t_flags & XLOG_TIC_INITED) == 0) && 526 (xlog_commit_record(log, ticket, iclog, &lsn)))) { 527 lsn = (xfs_lsn_t) -1; 528 regrant = false; 529 } 530 531 532 if (!regrant) { 533 trace_xfs_log_done_nonperm(log, ticket); 534 535 /* 536 * Release ticket if not permanent reservation or a specific 537 * request has been made to release a permanent reservation. 538 */ 539 xlog_ungrant_log_space(log, ticket); 540 } else { 541 trace_xfs_log_done_perm(log, ticket); 542 543 xlog_regrant_reserve_log_space(log, ticket); 544 /* If this ticket was a permanent reservation and we aren't 545 * trying to release it, reset the inited flags; so next time 546 * we write, a start record will be written out. 547 */ 548 ticket->t_flags |= XLOG_TIC_INITED; 549 } 550 551 xfs_log_ticket_put(ticket); 552 return lsn; 553 } 554 555 /* 556 * Attaches a new iclog I/O completion callback routine during 557 * transaction commit. If the log is in error state, a non-zero 558 * return code is handed back and the caller is responsible for 559 * executing the callback at an appropriate time. 560 */ 561 int 562 xfs_log_notify( 563 struct xlog_in_core *iclog, 564 xfs_log_callback_t *cb) 565 { 566 int abortflg; 567 568 spin_lock(&iclog->ic_callback_lock); 569 abortflg = (iclog->ic_state & XLOG_STATE_IOERROR); 570 if (!abortflg) { 571 ASSERT_ALWAYS((iclog->ic_state == XLOG_STATE_ACTIVE) || 572 (iclog->ic_state == XLOG_STATE_WANT_SYNC)); 573 cb->cb_next = NULL; 574 *(iclog->ic_callback_tail) = cb; 575 iclog->ic_callback_tail = &(cb->cb_next); 576 } 577 spin_unlock(&iclog->ic_callback_lock); 578 return abortflg; 579 } 580 581 int 582 xfs_log_release_iclog( 583 struct xfs_mount *mp, 584 struct xlog_in_core *iclog) 585 { 586 if (xlog_state_release_iclog(mp->m_log, iclog)) { 587 xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR); 588 return -EIO; 589 } 590 591 return 0; 592 } 593 594 /* 595 * Mount a log filesystem 596 * 597 * mp - ubiquitous xfs mount point structure 598 * log_target - buftarg of on-disk log device 599 * blk_offset - Start block # where block size is 512 bytes (BBSIZE) 600 * num_bblocks - Number of BBSIZE blocks in on-disk log 601 * 602 * Return error or zero. 603 */ 604 int 605 xfs_log_mount( 606 xfs_mount_t *mp, 607 xfs_buftarg_t *log_target, 608 xfs_daddr_t blk_offset, 609 int num_bblks) 610 { 611 bool fatal = xfs_sb_version_hascrc(&mp->m_sb); 612 int error = 0; 613 int min_logfsbs; 614 615 if (!(mp->m_flags & XFS_MOUNT_NORECOVERY)) { 616 xfs_notice(mp, "Mounting V%d Filesystem", 617 XFS_SB_VERSION_NUM(&mp->m_sb)); 618 } else { 619 xfs_notice(mp, 620 "Mounting V%d filesystem in no-recovery mode. Filesystem will be inconsistent.", 621 XFS_SB_VERSION_NUM(&mp->m_sb)); 622 ASSERT(mp->m_flags & XFS_MOUNT_RDONLY); 623 } 624 625 mp->m_log = xlog_alloc_log(mp, log_target, blk_offset, num_bblks); 626 if (IS_ERR(mp->m_log)) { 627 error = PTR_ERR(mp->m_log); 628 goto out; 629 } 630 631 /* 632 * Validate the given log space and drop a critical message via syslog 633 * if the log size is too small that would lead to some unexpected 634 * situations in transaction log space reservation stage. 635 * 636 * Note: we can't just reject the mount if the validation fails. This 637 * would mean that people would have to downgrade their kernel just to 638 * remedy the situation as there is no way to grow the log (short of 639 * black magic surgery with xfs_db). 640 * 641 * We can, however, reject mounts for CRC format filesystems, as the 642 * mkfs binary being used to make the filesystem should never create a 643 * filesystem with a log that is too small. 644 */ 645 min_logfsbs = xfs_log_calc_minimum_size(mp); 646 647 if (mp->m_sb.sb_logblocks < min_logfsbs) { 648 xfs_warn(mp, 649 "Log size %d blocks too small, minimum size is %d blocks", 650 mp->m_sb.sb_logblocks, min_logfsbs); 651 error = -EINVAL; 652 } else if (mp->m_sb.sb_logblocks > XFS_MAX_LOG_BLOCKS) { 653 xfs_warn(mp, 654 "Log size %d blocks too large, maximum size is %lld blocks", 655 mp->m_sb.sb_logblocks, XFS_MAX_LOG_BLOCKS); 656 error = -EINVAL; 657 } else if (XFS_FSB_TO_B(mp, mp->m_sb.sb_logblocks) > XFS_MAX_LOG_BYTES) { 658 xfs_warn(mp, 659 "log size %lld bytes too large, maximum size is %lld bytes", 660 XFS_FSB_TO_B(mp, mp->m_sb.sb_logblocks), 661 XFS_MAX_LOG_BYTES); 662 error = -EINVAL; 663 } else if (mp->m_sb.sb_logsunit > 1 && 664 mp->m_sb.sb_logsunit % mp->m_sb.sb_blocksize) { 665 xfs_warn(mp, 666 "log stripe unit %u bytes must be a multiple of block size", 667 mp->m_sb.sb_logsunit); 668 error = -EINVAL; 669 fatal = true; 670 } 671 if (error) { 672 /* 673 * Log check errors are always fatal on v5; or whenever bad 674 * metadata leads to a crash. 675 */ 676 if (fatal) { 677 xfs_crit(mp, "AAIEEE! Log failed size checks. Abort!"); 678 ASSERT(0); 679 goto out_free_log; 680 } 681 xfs_crit(mp, "Log size out of supported range."); 682 xfs_crit(mp, 683 "Continuing onwards, but if log hangs are experienced then please report this message in the bug report."); 684 } 685 686 /* 687 * Initialize the AIL now we have a log. 688 */ 689 error = xfs_trans_ail_init(mp); 690 if (error) { 691 xfs_warn(mp, "AIL initialisation failed: error %d", error); 692 goto out_free_log; 693 } 694 mp->m_log->l_ailp = mp->m_ail; 695 696 /* 697 * skip log recovery on a norecovery mount. pretend it all 698 * just worked. 699 */ 700 if (!(mp->m_flags & XFS_MOUNT_NORECOVERY)) { 701 int readonly = (mp->m_flags & XFS_MOUNT_RDONLY); 702 703 if (readonly) 704 mp->m_flags &= ~XFS_MOUNT_RDONLY; 705 706 error = xlog_recover(mp->m_log); 707 708 if (readonly) 709 mp->m_flags |= XFS_MOUNT_RDONLY; 710 if (error) { 711 xfs_warn(mp, "log mount/recovery failed: error %d", 712 error); 713 xlog_recover_cancel(mp->m_log); 714 goto out_destroy_ail; 715 } 716 } 717 718 error = xfs_sysfs_init(&mp->m_log->l_kobj, &xfs_log_ktype, &mp->m_kobj, 719 "log"); 720 if (error) 721 goto out_destroy_ail; 722 723 /* Normal transactions can now occur */ 724 mp->m_log->l_flags &= ~XLOG_ACTIVE_RECOVERY; 725 726 /* 727 * Now the log has been fully initialised and we know were our 728 * space grant counters are, we can initialise the permanent ticket 729 * needed for delayed logging to work. 730 */ 731 xlog_cil_init_post_recovery(mp->m_log); 732 733 return 0; 734 735 out_destroy_ail: 736 xfs_trans_ail_destroy(mp); 737 out_free_log: 738 xlog_dealloc_log(mp->m_log); 739 out: 740 return error; 741 } 742 743 /* 744 * Finish the recovery of the file system. This is separate from the 745 * xfs_log_mount() call, because it depends on the code in xfs_mountfs() to read 746 * in the root and real-time bitmap inodes between calling xfs_log_mount() and 747 * here. 748 * 749 * If we finish recovery successfully, start the background log work. If we are 750 * not doing recovery, then we have a RO filesystem and we don't need to start 751 * it. 752 */ 753 int 754 xfs_log_mount_finish( 755 struct xfs_mount *mp) 756 { 757 int error = 0; 758 bool readonly = (mp->m_flags & XFS_MOUNT_RDONLY); 759 bool recovered = mp->m_log->l_flags & XLOG_RECOVERY_NEEDED; 760 761 if (mp->m_flags & XFS_MOUNT_NORECOVERY) { 762 ASSERT(mp->m_flags & XFS_MOUNT_RDONLY); 763 return 0; 764 } else if (readonly) { 765 /* Allow unlinked processing to proceed */ 766 mp->m_flags &= ~XFS_MOUNT_RDONLY; 767 } 768 769 /* 770 * During the second phase of log recovery, we need iget and 771 * iput to behave like they do for an active filesystem. 772 * xfs_fs_drop_inode needs to be able to prevent the deletion 773 * of inodes before we're done replaying log items on those 774 * inodes. Turn it off immediately after recovery finishes 775 * so that we don't leak the quota inodes if subsequent mount 776 * activities fail. 777 * 778 * We let all inodes involved in redo item processing end up on 779 * the LRU instead of being evicted immediately so that if we do 780 * something to an unlinked inode, the irele won't cause 781 * premature truncation and freeing of the inode, which results 782 * in log recovery failure. We have to evict the unreferenced 783 * lru inodes after clearing SB_ACTIVE because we don't 784 * otherwise clean up the lru if there's a subsequent failure in 785 * xfs_mountfs, which leads to us leaking the inodes if nothing 786 * else (e.g. quotacheck) references the inodes before the 787 * mount failure occurs. 788 */ 789 mp->m_super->s_flags |= SB_ACTIVE; 790 error = xlog_recover_finish(mp->m_log); 791 if (!error) 792 xfs_log_work_queue(mp); 793 mp->m_super->s_flags &= ~SB_ACTIVE; 794 evict_inodes(mp->m_super); 795 796 /* 797 * Drain the buffer LRU after log recovery. This is required for v4 798 * filesystems to avoid leaving around buffers with NULL verifier ops, 799 * but we do it unconditionally to make sure we're always in a clean 800 * cache state after mount. 801 * 802 * Don't push in the error case because the AIL may have pending intents 803 * that aren't removed until recovery is cancelled. 804 */ 805 if (!error && recovered) { 806 xfs_log_force(mp, XFS_LOG_SYNC); 807 xfs_ail_push_all_sync(mp->m_ail); 808 } 809 xfs_wait_buftarg(mp->m_ddev_targp); 810 811 if (readonly) 812 mp->m_flags |= XFS_MOUNT_RDONLY; 813 814 return error; 815 } 816 817 /* 818 * The mount has failed. Cancel the recovery if it hasn't completed and destroy 819 * the log. 820 */ 821 int 822 xfs_log_mount_cancel( 823 struct xfs_mount *mp) 824 { 825 int error; 826 827 error = xlog_recover_cancel(mp->m_log); 828 xfs_log_unmount(mp); 829 830 return error; 831 } 832 833 /* 834 * Final log writes as part of unmount. 835 * 836 * Mark the filesystem clean as unmount happens. Note that during relocation 837 * this routine needs to be executed as part of source-bag while the 838 * deallocation must not be done until source-end. 839 */ 840 841 /* 842 * Unmount record used to have a string "Unmount filesystem--" in the 843 * data section where the "Un" was really a magic number (XLOG_UNMOUNT_TYPE). 844 * We just write the magic number now since that particular field isn't 845 * currently architecture converted and "Unmount" is a bit foo. 846 * As far as I know, there weren't any dependencies on the old behaviour. 847 */ 848 849 static int 850 xfs_log_unmount_write(xfs_mount_t *mp) 851 { 852 struct xlog *log = mp->m_log; 853 xlog_in_core_t *iclog; 854 #ifdef DEBUG 855 xlog_in_core_t *first_iclog; 856 #endif 857 xlog_ticket_t *tic = NULL; 858 xfs_lsn_t lsn; 859 int error; 860 861 /* 862 * Don't write out unmount record on norecovery mounts or ro devices. 863 * Or, if we are doing a forced umount (typically because of IO errors). 864 */ 865 if (mp->m_flags & XFS_MOUNT_NORECOVERY || 866 xfs_readonly_buftarg(log->l_mp->m_logdev_targp)) { 867 ASSERT(mp->m_flags & XFS_MOUNT_RDONLY); 868 return 0; 869 } 870 871 error = xfs_log_force(mp, XFS_LOG_SYNC); 872 ASSERT(error || !(XLOG_FORCED_SHUTDOWN(log))); 873 874 #ifdef DEBUG 875 first_iclog = iclog = log->l_iclog; 876 do { 877 if (!(iclog->ic_state & XLOG_STATE_IOERROR)) { 878 ASSERT(iclog->ic_state & XLOG_STATE_ACTIVE); 879 ASSERT(iclog->ic_offset == 0); 880 } 881 iclog = iclog->ic_next; 882 } while (iclog != first_iclog); 883 #endif 884 if (! (XLOG_FORCED_SHUTDOWN(log))) { 885 error = xfs_log_reserve(mp, 600, 1, &tic, XFS_LOG, 0); 886 if (!error) { 887 /* the data section must be 32 bit size aligned */ 888 struct { 889 uint16_t magic; 890 uint16_t pad1; 891 uint32_t pad2; /* may as well make it 64 bits */ 892 } magic = { 893 .magic = XLOG_UNMOUNT_TYPE, 894 }; 895 struct xfs_log_iovec reg = { 896 .i_addr = &magic, 897 .i_len = sizeof(magic), 898 .i_type = XLOG_REG_TYPE_UNMOUNT, 899 }; 900 struct xfs_log_vec vec = { 901 .lv_niovecs = 1, 902 .lv_iovecp = ®, 903 }; 904 905 /* remove inited flag, and account for space used */ 906 tic->t_flags = 0; 907 tic->t_curr_res -= sizeof(magic); 908 error = xlog_write(log, &vec, tic, &lsn, 909 NULL, XLOG_UNMOUNT_TRANS); 910 /* 911 * At this point, we're umounting anyway, 912 * so there's no point in transitioning log state 913 * to IOERROR. Just continue... 914 */ 915 } 916 917 if (error) 918 xfs_alert(mp, "%s: unmount record failed", __func__); 919 920 921 spin_lock(&log->l_icloglock); 922 iclog = log->l_iclog; 923 atomic_inc(&iclog->ic_refcnt); 924 xlog_state_want_sync(log, iclog); 925 spin_unlock(&log->l_icloglock); 926 error = xlog_state_release_iclog(log, iclog); 927 928 spin_lock(&log->l_icloglock); 929 if (!(iclog->ic_state == XLOG_STATE_ACTIVE || 930 iclog->ic_state == XLOG_STATE_DIRTY)) { 931 if (!XLOG_FORCED_SHUTDOWN(log)) { 932 xlog_wait(&iclog->ic_force_wait, 933 &log->l_icloglock); 934 } else { 935 spin_unlock(&log->l_icloglock); 936 } 937 } else { 938 spin_unlock(&log->l_icloglock); 939 } 940 if (tic) { 941 trace_xfs_log_umount_write(log, tic); 942 xlog_ungrant_log_space(log, tic); 943 xfs_log_ticket_put(tic); 944 } 945 } else { 946 /* 947 * We're already in forced_shutdown mode, couldn't 948 * even attempt to write out the unmount transaction. 949 * 950 * Go through the motions of sync'ing and releasing 951 * the iclog, even though no I/O will actually happen, 952 * we need to wait for other log I/Os that may already 953 * be in progress. Do this as a separate section of 954 * code so we'll know if we ever get stuck here that 955 * we're in this odd situation of trying to unmount 956 * a file system that went into forced_shutdown as 957 * the result of an unmount.. 958 */ 959 spin_lock(&log->l_icloglock); 960 iclog = log->l_iclog; 961 atomic_inc(&iclog->ic_refcnt); 962 963 xlog_state_want_sync(log, iclog); 964 spin_unlock(&log->l_icloglock); 965 error = xlog_state_release_iclog(log, iclog); 966 967 spin_lock(&log->l_icloglock); 968 969 if ( ! ( iclog->ic_state == XLOG_STATE_ACTIVE 970 || iclog->ic_state == XLOG_STATE_DIRTY 971 || iclog->ic_state == XLOG_STATE_IOERROR) ) { 972 973 xlog_wait(&iclog->ic_force_wait, 974 &log->l_icloglock); 975 } else { 976 spin_unlock(&log->l_icloglock); 977 } 978 } 979 980 return error; 981 } /* xfs_log_unmount_write */ 982 983 /* 984 * Empty the log for unmount/freeze. 985 * 986 * To do this, we first need to shut down the background log work so it is not 987 * trying to cover the log as we clean up. We then need to unpin all objects in 988 * the log so we can then flush them out. Once they have completed their IO and 989 * run the callbacks removing themselves from the AIL, we can write the unmount 990 * record. 991 */ 992 void 993 xfs_log_quiesce( 994 struct xfs_mount *mp) 995 { 996 cancel_delayed_work_sync(&mp->m_log->l_work); 997 xfs_log_force(mp, XFS_LOG_SYNC); 998 999 /* 1000 * The superblock buffer is uncached and while xfs_ail_push_all_sync() 1001 * will push it, xfs_wait_buftarg() will not wait for it. Further, 1002 * xfs_buf_iowait() cannot be used because it was pushed with the 1003 * XBF_ASYNC flag set, so we need to use a lock/unlock pair to wait for 1004 * the IO to complete. 1005 */ 1006 xfs_ail_push_all_sync(mp->m_ail); 1007 xfs_wait_buftarg(mp->m_ddev_targp); 1008 xfs_buf_lock(mp->m_sb_bp); 1009 xfs_buf_unlock(mp->m_sb_bp); 1010 1011 xfs_log_unmount_write(mp); 1012 } 1013 1014 /* 1015 * Shut down and release the AIL and Log. 1016 * 1017 * During unmount, we need to ensure we flush all the dirty metadata objects 1018 * from the AIL so that the log is empty before we write the unmount record to 1019 * the log. Once this is done, we can tear down the AIL and the log. 1020 */ 1021 void 1022 xfs_log_unmount( 1023 struct xfs_mount *mp) 1024 { 1025 xfs_log_quiesce(mp); 1026 1027 xfs_trans_ail_destroy(mp); 1028 1029 xfs_sysfs_del(&mp->m_log->l_kobj); 1030 1031 xlog_dealloc_log(mp->m_log); 1032 } 1033 1034 void 1035 xfs_log_item_init( 1036 struct xfs_mount *mp, 1037 struct xfs_log_item *item, 1038 int type, 1039 const struct xfs_item_ops *ops) 1040 { 1041 item->li_mountp = mp; 1042 item->li_ailp = mp->m_ail; 1043 item->li_type = type; 1044 item->li_ops = ops; 1045 item->li_lv = NULL; 1046 1047 INIT_LIST_HEAD(&item->li_ail); 1048 INIT_LIST_HEAD(&item->li_cil); 1049 INIT_LIST_HEAD(&item->li_bio_list); 1050 } 1051 1052 /* 1053 * Wake up processes waiting for log space after we have moved the log tail. 1054 */ 1055 void 1056 xfs_log_space_wake( 1057 struct xfs_mount *mp) 1058 { 1059 struct xlog *log = mp->m_log; 1060 int free_bytes; 1061 1062 if (XLOG_FORCED_SHUTDOWN(log)) 1063 return; 1064 1065 if (!list_empty_careful(&log->l_write_head.waiters)) { 1066 ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY)); 1067 1068 spin_lock(&log->l_write_head.lock); 1069 free_bytes = xlog_space_left(log, &log->l_write_head.grant); 1070 xlog_grant_head_wake(log, &log->l_write_head, &free_bytes); 1071 spin_unlock(&log->l_write_head.lock); 1072 } 1073 1074 if (!list_empty_careful(&log->l_reserve_head.waiters)) { 1075 ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY)); 1076 1077 spin_lock(&log->l_reserve_head.lock); 1078 free_bytes = xlog_space_left(log, &log->l_reserve_head.grant); 1079 xlog_grant_head_wake(log, &log->l_reserve_head, &free_bytes); 1080 spin_unlock(&log->l_reserve_head.lock); 1081 } 1082 } 1083 1084 /* 1085 * Determine if we have a transaction that has gone to disk that needs to be 1086 * covered. To begin the transition to the idle state firstly the log needs to 1087 * be idle. That means the CIL, the AIL and the iclogs needs to be empty before 1088 * we start attempting to cover the log. 1089 * 1090 * Only if we are then in a state where covering is needed, the caller is 1091 * informed that dummy transactions are required to move the log into the idle 1092 * state. 1093 * 1094 * If there are any items in the AIl or CIL, then we do not want to attempt to 1095 * cover the log as we may be in a situation where there isn't log space 1096 * available to run a dummy transaction and this can lead to deadlocks when the 1097 * tail of the log is pinned by an item that is modified in the CIL. Hence 1098 * there's no point in running a dummy transaction at this point because we 1099 * can't start trying to idle the log until both the CIL and AIL are empty. 1100 */ 1101 static int 1102 xfs_log_need_covered(xfs_mount_t *mp) 1103 { 1104 struct xlog *log = mp->m_log; 1105 int needed = 0; 1106 1107 if (!xfs_fs_writable(mp, SB_FREEZE_WRITE)) 1108 return 0; 1109 1110 if (!xlog_cil_empty(log)) 1111 return 0; 1112 1113 spin_lock(&log->l_icloglock); 1114 switch (log->l_covered_state) { 1115 case XLOG_STATE_COVER_DONE: 1116 case XLOG_STATE_COVER_DONE2: 1117 case XLOG_STATE_COVER_IDLE: 1118 break; 1119 case XLOG_STATE_COVER_NEED: 1120 case XLOG_STATE_COVER_NEED2: 1121 if (xfs_ail_min_lsn(log->l_ailp)) 1122 break; 1123 if (!xlog_iclogs_empty(log)) 1124 break; 1125 1126 needed = 1; 1127 if (log->l_covered_state == XLOG_STATE_COVER_NEED) 1128 log->l_covered_state = XLOG_STATE_COVER_DONE; 1129 else 1130 log->l_covered_state = XLOG_STATE_COVER_DONE2; 1131 break; 1132 default: 1133 needed = 1; 1134 break; 1135 } 1136 spin_unlock(&log->l_icloglock); 1137 return needed; 1138 } 1139 1140 /* 1141 * We may be holding the log iclog lock upon entering this routine. 1142 */ 1143 xfs_lsn_t 1144 xlog_assign_tail_lsn_locked( 1145 struct xfs_mount *mp) 1146 { 1147 struct xlog *log = mp->m_log; 1148 struct xfs_log_item *lip; 1149 xfs_lsn_t tail_lsn; 1150 1151 assert_spin_locked(&mp->m_ail->ail_lock); 1152 1153 /* 1154 * To make sure we always have a valid LSN for the log tail we keep 1155 * track of the last LSN which was committed in log->l_last_sync_lsn, 1156 * and use that when the AIL was empty. 1157 */ 1158 lip = xfs_ail_min(mp->m_ail); 1159 if (lip) 1160 tail_lsn = lip->li_lsn; 1161 else 1162 tail_lsn = atomic64_read(&log->l_last_sync_lsn); 1163 trace_xfs_log_assign_tail_lsn(log, tail_lsn); 1164 atomic64_set(&log->l_tail_lsn, tail_lsn); 1165 return tail_lsn; 1166 } 1167 1168 xfs_lsn_t 1169 xlog_assign_tail_lsn( 1170 struct xfs_mount *mp) 1171 { 1172 xfs_lsn_t tail_lsn; 1173 1174 spin_lock(&mp->m_ail->ail_lock); 1175 tail_lsn = xlog_assign_tail_lsn_locked(mp); 1176 spin_unlock(&mp->m_ail->ail_lock); 1177 1178 return tail_lsn; 1179 } 1180 1181 /* 1182 * Return the space in the log between the tail and the head. The head 1183 * is passed in the cycle/bytes formal parms. In the special case where 1184 * the reserve head has wrapped passed the tail, this calculation is no 1185 * longer valid. In this case, just return 0 which means there is no space 1186 * in the log. This works for all places where this function is called 1187 * with the reserve head. Of course, if the write head were to ever 1188 * wrap the tail, we should blow up. Rather than catch this case here, 1189 * we depend on other ASSERTions in other parts of the code. XXXmiken 1190 * 1191 * This code also handles the case where the reservation head is behind 1192 * the tail. The details of this case are described below, but the end 1193 * result is that we return the size of the log as the amount of space left. 1194 */ 1195 STATIC int 1196 xlog_space_left( 1197 struct xlog *log, 1198 atomic64_t *head) 1199 { 1200 int free_bytes; 1201 int tail_bytes; 1202 int tail_cycle; 1203 int head_cycle; 1204 int head_bytes; 1205 1206 xlog_crack_grant_head(head, &head_cycle, &head_bytes); 1207 xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_bytes); 1208 tail_bytes = BBTOB(tail_bytes); 1209 if (tail_cycle == head_cycle && head_bytes >= tail_bytes) 1210 free_bytes = log->l_logsize - (head_bytes - tail_bytes); 1211 else if (tail_cycle + 1 < head_cycle) 1212 return 0; 1213 else if (tail_cycle < head_cycle) { 1214 ASSERT(tail_cycle == (head_cycle - 1)); 1215 free_bytes = tail_bytes - head_bytes; 1216 } else { 1217 /* 1218 * The reservation head is behind the tail. 1219 * In this case we just want to return the size of the 1220 * log as the amount of space left. 1221 */ 1222 xfs_alert(log->l_mp, "xlog_space_left: head behind tail"); 1223 xfs_alert(log->l_mp, 1224 " tail_cycle = %d, tail_bytes = %d", 1225 tail_cycle, tail_bytes); 1226 xfs_alert(log->l_mp, 1227 " GH cycle = %d, GH bytes = %d", 1228 head_cycle, head_bytes); 1229 ASSERT(0); 1230 free_bytes = log->l_logsize; 1231 } 1232 return free_bytes; 1233 } 1234 1235 1236 /* 1237 * Log function which is called when an io completes. 1238 * 1239 * The log manager needs its own routine, in order to control what 1240 * happens with the buffer after the write completes. 1241 */ 1242 static void 1243 xlog_iodone(xfs_buf_t *bp) 1244 { 1245 struct xlog_in_core *iclog = bp->b_log_item; 1246 struct xlog *l = iclog->ic_log; 1247 int aborted = 0; 1248 1249 /* 1250 * Race to shutdown the filesystem if we see an error or the iclog is in 1251 * IOABORT state. The IOABORT state is only set in DEBUG mode to inject 1252 * CRC errors into log recovery. 1253 */ 1254 if (XFS_TEST_ERROR(bp->b_error, l->l_mp, XFS_ERRTAG_IODONE_IOERR) || 1255 iclog->ic_state & XLOG_STATE_IOABORT) { 1256 if (iclog->ic_state & XLOG_STATE_IOABORT) 1257 iclog->ic_state &= ~XLOG_STATE_IOABORT; 1258 1259 xfs_buf_ioerror_alert(bp, __func__); 1260 xfs_buf_stale(bp); 1261 xfs_force_shutdown(l->l_mp, SHUTDOWN_LOG_IO_ERROR); 1262 /* 1263 * This flag will be propagated to the trans-committed 1264 * callback routines to let them know that the log-commit 1265 * didn't succeed. 1266 */ 1267 aborted = XFS_LI_ABORTED; 1268 } else if (iclog->ic_state & XLOG_STATE_IOERROR) { 1269 aborted = XFS_LI_ABORTED; 1270 } 1271 1272 /* log I/O is always issued ASYNC */ 1273 ASSERT(bp->b_flags & XBF_ASYNC); 1274 xlog_state_done_syncing(iclog, aborted); 1275 1276 /* 1277 * drop the buffer lock now that we are done. Nothing references 1278 * the buffer after this, so an unmount waiting on this lock can now 1279 * tear it down safely. As such, it is unsafe to reference the buffer 1280 * (bp) after the unlock as we could race with it being freed. 1281 */ 1282 xfs_buf_unlock(bp); 1283 } 1284 1285 /* 1286 * Return size of each in-core log record buffer. 1287 * 1288 * All machines get 8 x 32kB buffers by default, unless tuned otherwise. 1289 * 1290 * If the filesystem blocksize is too large, we may need to choose a 1291 * larger size since the directory code currently logs entire blocks. 1292 */ 1293 1294 STATIC void 1295 xlog_get_iclog_buffer_size( 1296 struct xfs_mount *mp, 1297 struct xlog *log) 1298 { 1299 int size; 1300 int xhdrs; 1301 1302 if (mp->m_logbufs <= 0) 1303 log->l_iclog_bufs = XLOG_MAX_ICLOGS; 1304 else 1305 log->l_iclog_bufs = mp->m_logbufs; 1306 1307 /* 1308 * Buffer size passed in from mount system call. 1309 */ 1310 if (mp->m_logbsize > 0) { 1311 size = log->l_iclog_size = mp->m_logbsize; 1312 log->l_iclog_size_log = 0; 1313 while (size != 1) { 1314 log->l_iclog_size_log++; 1315 size >>= 1; 1316 } 1317 1318 if (xfs_sb_version_haslogv2(&mp->m_sb)) { 1319 /* # headers = size / 32k 1320 * one header holds cycles from 32k of data 1321 */ 1322 1323 xhdrs = mp->m_logbsize / XLOG_HEADER_CYCLE_SIZE; 1324 if (mp->m_logbsize % XLOG_HEADER_CYCLE_SIZE) 1325 xhdrs++; 1326 log->l_iclog_hsize = xhdrs << BBSHIFT; 1327 log->l_iclog_heads = xhdrs; 1328 } else { 1329 ASSERT(mp->m_logbsize <= XLOG_BIG_RECORD_BSIZE); 1330 log->l_iclog_hsize = BBSIZE; 1331 log->l_iclog_heads = 1; 1332 } 1333 goto done; 1334 } 1335 1336 /* All machines use 32kB buffers by default. */ 1337 log->l_iclog_size = XLOG_BIG_RECORD_BSIZE; 1338 log->l_iclog_size_log = XLOG_BIG_RECORD_BSHIFT; 1339 1340 /* the default log size is 16k or 32k which is one header sector */ 1341 log->l_iclog_hsize = BBSIZE; 1342 log->l_iclog_heads = 1; 1343 1344 done: 1345 /* are we being asked to make the sizes selected above visible? */ 1346 if (mp->m_logbufs == 0) 1347 mp->m_logbufs = log->l_iclog_bufs; 1348 if (mp->m_logbsize == 0) 1349 mp->m_logbsize = log->l_iclog_size; 1350 } /* xlog_get_iclog_buffer_size */ 1351 1352 1353 void 1354 xfs_log_work_queue( 1355 struct xfs_mount *mp) 1356 { 1357 queue_delayed_work(mp->m_sync_workqueue, &mp->m_log->l_work, 1358 msecs_to_jiffies(xfs_syncd_centisecs * 10)); 1359 } 1360 1361 /* 1362 * Every sync period we need to unpin all items in the AIL and push them to 1363 * disk. If there is nothing dirty, then we might need to cover the log to 1364 * indicate that the filesystem is idle. 1365 */ 1366 static void 1367 xfs_log_worker( 1368 struct work_struct *work) 1369 { 1370 struct xlog *log = container_of(to_delayed_work(work), 1371 struct xlog, l_work); 1372 struct xfs_mount *mp = log->l_mp; 1373 1374 /* dgc: errors ignored - not fatal and nowhere to report them */ 1375 if (xfs_log_need_covered(mp)) { 1376 /* 1377 * Dump a transaction into the log that contains no real change. 1378 * This is needed to stamp the current tail LSN into the log 1379 * during the covering operation. 1380 * 1381 * We cannot use an inode here for this - that will push dirty 1382 * state back up into the VFS and then periodic inode flushing 1383 * will prevent log covering from making progress. Hence we 1384 * synchronously log the superblock instead to ensure the 1385 * superblock is immediately unpinned and can be written back. 1386 */ 1387 xfs_sync_sb(mp, true); 1388 } else 1389 xfs_log_force(mp, 0); 1390 1391 /* start pushing all the metadata that is currently dirty */ 1392 xfs_ail_push_all(mp->m_ail); 1393 1394 /* queue us up again */ 1395 xfs_log_work_queue(mp); 1396 } 1397 1398 /* 1399 * This routine initializes some of the log structure for a given mount point. 1400 * Its primary purpose is to fill in enough, so recovery can occur. However, 1401 * some other stuff may be filled in too. 1402 */ 1403 STATIC struct xlog * 1404 xlog_alloc_log( 1405 struct xfs_mount *mp, 1406 struct xfs_buftarg *log_target, 1407 xfs_daddr_t blk_offset, 1408 int num_bblks) 1409 { 1410 struct xlog *log; 1411 xlog_rec_header_t *head; 1412 xlog_in_core_t **iclogp; 1413 xlog_in_core_t *iclog, *prev_iclog=NULL; 1414 xfs_buf_t *bp; 1415 int i; 1416 int error = -ENOMEM; 1417 uint log2_size = 0; 1418 1419 log = kmem_zalloc(sizeof(struct xlog), KM_MAYFAIL); 1420 if (!log) { 1421 xfs_warn(mp, "Log allocation failed: No memory!"); 1422 goto out; 1423 } 1424 1425 log->l_mp = mp; 1426 log->l_targ = log_target; 1427 log->l_logsize = BBTOB(num_bblks); 1428 log->l_logBBstart = blk_offset; 1429 log->l_logBBsize = num_bblks; 1430 log->l_covered_state = XLOG_STATE_COVER_IDLE; 1431 log->l_flags |= XLOG_ACTIVE_RECOVERY; 1432 INIT_DELAYED_WORK(&log->l_work, xfs_log_worker); 1433 1434 log->l_prev_block = -1; 1435 /* log->l_tail_lsn = 0x100000000LL; cycle = 1; current block = 0 */ 1436 xlog_assign_atomic_lsn(&log->l_tail_lsn, 1, 0); 1437 xlog_assign_atomic_lsn(&log->l_last_sync_lsn, 1, 0); 1438 log->l_curr_cycle = 1; /* 0 is bad since this is initial value */ 1439 1440 xlog_grant_head_init(&log->l_reserve_head); 1441 xlog_grant_head_init(&log->l_write_head); 1442 1443 error = -EFSCORRUPTED; 1444 if (xfs_sb_version_hassector(&mp->m_sb)) { 1445 log2_size = mp->m_sb.sb_logsectlog; 1446 if (log2_size < BBSHIFT) { 1447 xfs_warn(mp, "Log sector size too small (0x%x < 0x%x)", 1448 log2_size, BBSHIFT); 1449 goto out_free_log; 1450 } 1451 1452 log2_size -= BBSHIFT; 1453 if (log2_size > mp->m_sectbb_log) { 1454 xfs_warn(mp, "Log sector size too large (0x%x > 0x%x)", 1455 log2_size, mp->m_sectbb_log); 1456 goto out_free_log; 1457 } 1458 1459 /* for larger sector sizes, must have v2 or external log */ 1460 if (log2_size && log->l_logBBstart > 0 && 1461 !xfs_sb_version_haslogv2(&mp->m_sb)) { 1462 xfs_warn(mp, 1463 "log sector size (0x%x) invalid for configuration.", 1464 log2_size); 1465 goto out_free_log; 1466 } 1467 } 1468 log->l_sectBBsize = 1 << log2_size; 1469 1470 xlog_get_iclog_buffer_size(mp, log); 1471 1472 /* 1473 * Use a NULL block for the extra log buffer used during splits so that 1474 * it will trigger errors if we ever try to do IO on it without first 1475 * having set it up properly. 1476 */ 1477 error = -ENOMEM; 1478 bp = xfs_buf_alloc(mp->m_logdev_targp, XFS_BUF_DADDR_NULL, 1479 BTOBB(log->l_iclog_size), XBF_NO_IOACCT); 1480 if (!bp) 1481 goto out_free_log; 1482 1483 /* 1484 * The iclogbuf buffer locks are held over IO but we are not going to do 1485 * IO yet. Hence unlock the buffer so that the log IO path can grab it 1486 * when appropriately. 1487 */ 1488 ASSERT(xfs_buf_islocked(bp)); 1489 xfs_buf_unlock(bp); 1490 1491 /* use high priority wq for log I/O completion */ 1492 bp->b_ioend_wq = mp->m_log_workqueue; 1493 bp->b_iodone = xlog_iodone; 1494 log->l_xbuf = bp; 1495 1496 spin_lock_init(&log->l_icloglock); 1497 init_waitqueue_head(&log->l_flush_wait); 1498 1499 iclogp = &log->l_iclog; 1500 /* 1501 * The amount of memory to allocate for the iclog structure is 1502 * rather funky due to the way the structure is defined. It is 1503 * done this way so that we can use different sizes for machines 1504 * with different amounts of memory. See the definition of 1505 * xlog_in_core_t in xfs_log_priv.h for details. 1506 */ 1507 ASSERT(log->l_iclog_size >= 4096); 1508 for (i=0; i < log->l_iclog_bufs; i++) { 1509 *iclogp = kmem_zalloc(sizeof(xlog_in_core_t), KM_MAYFAIL); 1510 if (!*iclogp) 1511 goto out_free_iclog; 1512 1513 iclog = *iclogp; 1514 iclog->ic_prev = prev_iclog; 1515 prev_iclog = iclog; 1516 1517 bp = xfs_buf_get_uncached(mp->m_logdev_targp, 1518 BTOBB(log->l_iclog_size), 1519 XBF_NO_IOACCT); 1520 if (!bp) 1521 goto out_free_iclog; 1522 1523 ASSERT(xfs_buf_islocked(bp)); 1524 xfs_buf_unlock(bp); 1525 1526 /* use high priority wq for log I/O completion */ 1527 bp->b_ioend_wq = mp->m_log_workqueue; 1528 bp->b_iodone = xlog_iodone; 1529 iclog->ic_bp = bp; 1530 iclog->ic_data = bp->b_addr; 1531 #ifdef DEBUG 1532 log->l_iclog_bak[i] = &iclog->ic_header; 1533 #endif 1534 head = &iclog->ic_header; 1535 memset(head, 0, sizeof(xlog_rec_header_t)); 1536 head->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM); 1537 head->h_version = cpu_to_be32( 1538 xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1); 1539 head->h_size = cpu_to_be32(log->l_iclog_size); 1540 /* new fields */ 1541 head->h_fmt = cpu_to_be32(XLOG_FMT); 1542 memcpy(&head->h_fs_uuid, &mp->m_sb.sb_uuid, sizeof(uuid_t)); 1543 1544 iclog->ic_size = BBTOB(bp->b_length) - log->l_iclog_hsize; 1545 iclog->ic_state = XLOG_STATE_ACTIVE; 1546 iclog->ic_log = log; 1547 atomic_set(&iclog->ic_refcnt, 0); 1548 spin_lock_init(&iclog->ic_callback_lock); 1549 iclog->ic_callback_tail = &(iclog->ic_callback); 1550 iclog->ic_datap = (char *)iclog->ic_data + log->l_iclog_hsize; 1551 1552 init_waitqueue_head(&iclog->ic_force_wait); 1553 init_waitqueue_head(&iclog->ic_write_wait); 1554 1555 iclogp = &iclog->ic_next; 1556 } 1557 *iclogp = log->l_iclog; /* complete ring */ 1558 log->l_iclog->ic_prev = prev_iclog; /* re-write 1st prev ptr */ 1559 1560 error = xlog_cil_init(log); 1561 if (error) 1562 goto out_free_iclog; 1563 return log; 1564 1565 out_free_iclog: 1566 for (iclog = log->l_iclog; iclog; iclog = prev_iclog) { 1567 prev_iclog = iclog->ic_next; 1568 if (iclog->ic_bp) 1569 xfs_buf_free(iclog->ic_bp); 1570 kmem_free(iclog); 1571 } 1572 spinlock_destroy(&log->l_icloglock); 1573 xfs_buf_free(log->l_xbuf); 1574 out_free_log: 1575 kmem_free(log); 1576 out: 1577 return ERR_PTR(error); 1578 } /* xlog_alloc_log */ 1579 1580 1581 /* 1582 * Write out the commit record of a transaction associated with the given 1583 * ticket. Return the lsn of the commit record. 1584 */ 1585 STATIC int 1586 xlog_commit_record( 1587 struct xlog *log, 1588 struct xlog_ticket *ticket, 1589 struct xlog_in_core **iclog, 1590 xfs_lsn_t *commitlsnp) 1591 { 1592 struct xfs_mount *mp = log->l_mp; 1593 int error; 1594 struct xfs_log_iovec reg = { 1595 .i_addr = NULL, 1596 .i_len = 0, 1597 .i_type = XLOG_REG_TYPE_COMMIT, 1598 }; 1599 struct xfs_log_vec vec = { 1600 .lv_niovecs = 1, 1601 .lv_iovecp = ®, 1602 }; 1603 1604 ASSERT_ALWAYS(iclog); 1605 error = xlog_write(log, &vec, ticket, commitlsnp, iclog, 1606 XLOG_COMMIT_TRANS); 1607 if (error) 1608 xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR); 1609 return error; 1610 } 1611 1612 /* 1613 * Push on the buffer cache code if we ever use more than 75% of the on-disk 1614 * log space. This code pushes on the lsn which would supposedly free up 1615 * the 25% which we want to leave free. We may need to adopt a policy which 1616 * pushes on an lsn which is further along in the log once we reach the high 1617 * water mark. In this manner, we would be creating a low water mark. 1618 */ 1619 STATIC void 1620 xlog_grant_push_ail( 1621 struct xlog *log, 1622 int need_bytes) 1623 { 1624 xfs_lsn_t threshold_lsn = 0; 1625 xfs_lsn_t last_sync_lsn; 1626 int free_blocks; 1627 int free_bytes; 1628 int threshold_block; 1629 int threshold_cycle; 1630 int free_threshold; 1631 1632 ASSERT(BTOBB(need_bytes) < log->l_logBBsize); 1633 1634 free_bytes = xlog_space_left(log, &log->l_reserve_head.grant); 1635 free_blocks = BTOBBT(free_bytes); 1636 1637 /* 1638 * Set the threshold for the minimum number of free blocks in the 1639 * log to the maximum of what the caller needs, one quarter of the 1640 * log, and 256 blocks. 1641 */ 1642 free_threshold = BTOBB(need_bytes); 1643 free_threshold = MAX(free_threshold, (log->l_logBBsize >> 2)); 1644 free_threshold = MAX(free_threshold, 256); 1645 if (free_blocks >= free_threshold) 1646 return; 1647 1648 xlog_crack_atomic_lsn(&log->l_tail_lsn, &threshold_cycle, 1649 &threshold_block); 1650 threshold_block += free_threshold; 1651 if (threshold_block >= log->l_logBBsize) { 1652 threshold_block -= log->l_logBBsize; 1653 threshold_cycle += 1; 1654 } 1655 threshold_lsn = xlog_assign_lsn(threshold_cycle, 1656 threshold_block); 1657 /* 1658 * Don't pass in an lsn greater than the lsn of the last 1659 * log record known to be on disk. Use a snapshot of the last sync lsn 1660 * so that it doesn't change between the compare and the set. 1661 */ 1662 last_sync_lsn = atomic64_read(&log->l_last_sync_lsn); 1663 if (XFS_LSN_CMP(threshold_lsn, last_sync_lsn) > 0) 1664 threshold_lsn = last_sync_lsn; 1665 1666 /* 1667 * Get the transaction layer to kick the dirty buffers out to 1668 * disk asynchronously. No point in trying to do this if 1669 * the filesystem is shutting down. 1670 */ 1671 if (!XLOG_FORCED_SHUTDOWN(log)) 1672 xfs_ail_push(log->l_ailp, threshold_lsn); 1673 } 1674 1675 /* 1676 * Stamp cycle number in every block 1677 */ 1678 STATIC void 1679 xlog_pack_data( 1680 struct xlog *log, 1681 struct xlog_in_core *iclog, 1682 int roundoff) 1683 { 1684 int i, j, k; 1685 int size = iclog->ic_offset + roundoff; 1686 __be32 cycle_lsn; 1687 char *dp; 1688 1689 cycle_lsn = CYCLE_LSN_DISK(iclog->ic_header.h_lsn); 1690 1691 dp = iclog->ic_datap; 1692 for (i = 0; i < BTOBB(size); i++) { 1693 if (i >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) 1694 break; 1695 iclog->ic_header.h_cycle_data[i] = *(__be32 *)dp; 1696 *(__be32 *)dp = cycle_lsn; 1697 dp += BBSIZE; 1698 } 1699 1700 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) { 1701 xlog_in_core_2_t *xhdr = iclog->ic_data; 1702 1703 for ( ; i < BTOBB(size); i++) { 1704 j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE); 1705 k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE); 1706 xhdr[j].hic_xheader.xh_cycle_data[k] = *(__be32 *)dp; 1707 *(__be32 *)dp = cycle_lsn; 1708 dp += BBSIZE; 1709 } 1710 1711 for (i = 1; i < log->l_iclog_heads; i++) 1712 xhdr[i].hic_xheader.xh_cycle = cycle_lsn; 1713 } 1714 } 1715 1716 /* 1717 * Calculate the checksum for a log buffer. 1718 * 1719 * This is a little more complicated than it should be because the various 1720 * headers and the actual data are non-contiguous. 1721 */ 1722 __le32 1723 xlog_cksum( 1724 struct xlog *log, 1725 struct xlog_rec_header *rhead, 1726 char *dp, 1727 int size) 1728 { 1729 uint32_t crc; 1730 1731 /* first generate the crc for the record header ... */ 1732 crc = xfs_start_cksum_update((char *)rhead, 1733 sizeof(struct xlog_rec_header), 1734 offsetof(struct xlog_rec_header, h_crc)); 1735 1736 /* ... then for additional cycle data for v2 logs ... */ 1737 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) { 1738 union xlog_in_core2 *xhdr = (union xlog_in_core2 *)rhead; 1739 int i; 1740 int xheads; 1741 1742 xheads = size / XLOG_HEADER_CYCLE_SIZE; 1743 if (size % XLOG_HEADER_CYCLE_SIZE) 1744 xheads++; 1745 1746 for (i = 1; i < xheads; i++) { 1747 crc = crc32c(crc, &xhdr[i].hic_xheader, 1748 sizeof(struct xlog_rec_ext_header)); 1749 } 1750 } 1751 1752 /* ... and finally for the payload */ 1753 crc = crc32c(crc, dp, size); 1754 1755 return xfs_end_cksum(crc); 1756 } 1757 1758 /* 1759 * The bdstrat callback function for log bufs. This gives us a central 1760 * place to trap bufs in case we get hit by a log I/O error and need to 1761 * shutdown. Actually, in practice, even when we didn't get a log error, 1762 * we transition the iclogs to IOERROR state *after* flushing all existing 1763 * iclogs to disk. This is because we don't want anymore new transactions to be 1764 * started or completed afterwards. 1765 * 1766 * We lock the iclogbufs here so that we can serialise against IO completion 1767 * during unmount. We might be processing a shutdown triggered during unmount, 1768 * and that can occur asynchronously to the unmount thread, and hence we need to 1769 * ensure that completes before tearing down the iclogbufs. Hence we need to 1770 * hold the buffer lock across the log IO to acheive that. 1771 */ 1772 STATIC int 1773 xlog_bdstrat( 1774 struct xfs_buf *bp) 1775 { 1776 struct xlog_in_core *iclog = bp->b_log_item; 1777 1778 xfs_buf_lock(bp); 1779 if (iclog->ic_state & XLOG_STATE_IOERROR) { 1780 xfs_buf_ioerror(bp, -EIO); 1781 xfs_buf_stale(bp); 1782 xfs_buf_ioend(bp); 1783 /* 1784 * It would seem logical to return EIO here, but we rely on 1785 * the log state machine to propagate I/O errors instead of 1786 * doing it here. Similarly, IO completion will unlock the 1787 * buffer, so we don't do it here. 1788 */ 1789 return 0; 1790 } 1791 1792 xfs_buf_submit(bp); 1793 return 0; 1794 } 1795 1796 /* 1797 * Flush out the in-core log (iclog) to the on-disk log in an asynchronous 1798 * fashion. Previously, we should have moved the current iclog 1799 * ptr in the log to point to the next available iclog. This allows further 1800 * write to continue while this code syncs out an iclog ready to go. 1801 * Before an in-core log can be written out, the data section must be scanned 1802 * to save away the 1st word of each BBSIZE block into the header. We replace 1803 * it with the current cycle count. Each BBSIZE block is tagged with the 1804 * cycle count because there in an implicit assumption that drives will 1805 * guarantee that entire 512 byte blocks get written at once. In other words, 1806 * we can't have part of a 512 byte block written and part not written. By 1807 * tagging each block, we will know which blocks are valid when recovering 1808 * after an unclean shutdown. 1809 * 1810 * This routine is single threaded on the iclog. No other thread can be in 1811 * this routine with the same iclog. Changing contents of iclog can there- 1812 * fore be done without grabbing the state machine lock. Updating the global 1813 * log will require grabbing the lock though. 1814 * 1815 * The entire log manager uses a logical block numbering scheme. Only 1816 * log_sync (and then only bwrite()) know about the fact that the log may 1817 * not start with block zero on a given device. The log block start offset 1818 * is added immediately before calling bwrite(). 1819 */ 1820 1821 STATIC int 1822 xlog_sync( 1823 struct xlog *log, 1824 struct xlog_in_core *iclog) 1825 { 1826 xfs_buf_t *bp; 1827 int i; 1828 uint count; /* byte count of bwrite */ 1829 uint count_init; /* initial count before roundup */ 1830 int roundoff; /* roundoff to BB or stripe */ 1831 int split = 0; /* split write into two regions */ 1832 int error; 1833 int v2 = xfs_sb_version_haslogv2(&log->l_mp->m_sb); 1834 int size; 1835 1836 XFS_STATS_INC(log->l_mp, xs_log_writes); 1837 ASSERT(atomic_read(&iclog->ic_refcnt) == 0); 1838 1839 /* Add for LR header */ 1840 count_init = log->l_iclog_hsize + iclog->ic_offset; 1841 1842 /* Round out the log write size */ 1843 if (v2 && log->l_mp->m_sb.sb_logsunit > 1) { 1844 /* we have a v2 stripe unit to use */ 1845 count = XLOG_LSUNITTOB(log, XLOG_BTOLSUNIT(log, count_init)); 1846 } else { 1847 count = BBTOB(BTOBB(count_init)); 1848 } 1849 roundoff = count - count_init; 1850 ASSERT(roundoff >= 0); 1851 ASSERT((v2 && log->l_mp->m_sb.sb_logsunit > 1 && 1852 roundoff < log->l_mp->m_sb.sb_logsunit) 1853 || 1854 (log->l_mp->m_sb.sb_logsunit <= 1 && 1855 roundoff < BBTOB(1))); 1856 1857 /* move grant heads by roundoff in sync */ 1858 xlog_grant_add_space(log, &log->l_reserve_head.grant, roundoff); 1859 xlog_grant_add_space(log, &log->l_write_head.grant, roundoff); 1860 1861 /* put cycle number in every block */ 1862 xlog_pack_data(log, iclog, roundoff); 1863 1864 /* real byte length */ 1865 size = iclog->ic_offset; 1866 if (v2) 1867 size += roundoff; 1868 iclog->ic_header.h_len = cpu_to_be32(size); 1869 1870 bp = iclog->ic_bp; 1871 XFS_BUF_SET_ADDR(bp, BLOCK_LSN(be64_to_cpu(iclog->ic_header.h_lsn))); 1872 1873 XFS_STATS_ADD(log->l_mp, xs_log_blocks, BTOBB(count)); 1874 1875 /* Do we need to split this write into 2 parts? */ 1876 if (XFS_BUF_ADDR(bp) + BTOBB(count) > log->l_logBBsize) { 1877 char *dptr; 1878 1879 split = count - (BBTOB(log->l_logBBsize - XFS_BUF_ADDR(bp))); 1880 count = BBTOB(log->l_logBBsize - XFS_BUF_ADDR(bp)); 1881 iclog->ic_bwritecnt = 2; 1882 1883 /* 1884 * Bump the cycle numbers at the start of each block in the 1885 * part of the iclog that ends up in the buffer that gets 1886 * written to the start of the log. 1887 * 1888 * Watch out for the header magic number case, though. 1889 */ 1890 dptr = (char *)&iclog->ic_header + count; 1891 for (i = 0; i < split; i += BBSIZE) { 1892 uint32_t cycle = be32_to_cpu(*(__be32 *)dptr); 1893 if (++cycle == XLOG_HEADER_MAGIC_NUM) 1894 cycle++; 1895 *(__be32 *)dptr = cpu_to_be32(cycle); 1896 1897 dptr += BBSIZE; 1898 } 1899 } else { 1900 iclog->ic_bwritecnt = 1; 1901 } 1902 1903 /* calculcate the checksum */ 1904 iclog->ic_header.h_crc = xlog_cksum(log, &iclog->ic_header, 1905 iclog->ic_datap, size); 1906 /* 1907 * Intentionally corrupt the log record CRC based on the error injection 1908 * frequency, if defined. This facilitates testing log recovery in the 1909 * event of torn writes. Hence, set the IOABORT state to abort the log 1910 * write on I/O completion and shutdown the fs. The subsequent mount 1911 * detects the bad CRC and attempts to recover. 1912 */ 1913 if (XFS_TEST_ERROR(false, log->l_mp, XFS_ERRTAG_LOG_BAD_CRC)) { 1914 iclog->ic_header.h_crc &= cpu_to_le32(0xAAAAAAAA); 1915 iclog->ic_state |= XLOG_STATE_IOABORT; 1916 xfs_warn(log->l_mp, 1917 "Intentionally corrupted log record at LSN 0x%llx. Shutdown imminent.", 1918 be64_to_cpu(iclog->ic_header.h_lsn)); 1919 } 1920 1921 bp->b_io_length = BTOBB(count); 1922 bp->b_log_item = iclog; 1923 bp->b_flags &= ~XBF_FLUSH; 1924 bp->b_flags |= (XBF_ASYNC | XBF_SYNCIO | XBF_WRITE | XBF_FUA); 1925 1926 /* 1927 * Flush the data device before flushing the log to make sure all meta 1928 * data written back from the AIL actually made it to disk before 1929 * stamping the new log tail LSN into the log buffer. For an external 1930 * log we need to issue the flush explicitly, and unfortunately 1931 * synchronously here; for an internal log we can simply use the block 1932 * layer state machine for preflushes. 1933 */ 1934 if (log->l_mp->m_logdev_targp != log->l_mp->m_ddev_targp) 1935 xfs_blkdev_issue_flush(log->l_mp->m_ddev_targp); 1936 else 1937 bp->b_flags |= XBF_FLUSH; 1938 1939 ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1); 1940 ASSERT(XFS_BUF_ADDR(bp) + BTOBB(count) <= log->l_logBBsize); 1941 1942 xlog_verify_iclog(log, iclog, count, true); 1943 1944 /* account for log which doesn't start at block #0 */ 1945 XFS_BUF_SET_ADDR(bp, XFS_BUF_ADDR(bp) + log->l_logBBstart); 1946 1947 /* 1948 * Don't call xfs_bwrite here. We do log-syncs even when the filesystem 1949 * is shutting down. 1950 */ 1951 error = xlog_bdstrat(bp); 1952 if (error) { 1953 xfs_buf_ioerror_alert(bp, "xlog_sync"); 1954 return error; 1955 } 1956 if (split) { 1957 bp = iclog->ic_log->l_xbuf; 1958 XFS_BUF_SET_ADDR(bp, 0); /* logical 0 */ 1959 xfs_buf_associate_memory(bp, 1960 (char *)&iclog->ic_header + count, split); 1961 bp->b_log_item = iclog; 1962 bp->b_flags &= ~XBF_FLUSH; 1963 bp->b_flags |= (XBF_ASYNC | XBF_SYNCIO | XBF_WRITE | XBF_FUA); 1964 1965 ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1); 1966 ASSERT(XFS_BUF_ADDR(bp) + BTOBB(count) <= log->l_logBBsize); 1967 1968 /* account for internal log which doesn't start at block #0 */ 1969 XFS_BUF_SET_ADDR(bp, XFS_BUF_ADDR(bp) + log->l_logBBstart); 1970 error = xlog_bdstrat(bp); 1971 if (error) { 1972 xfs_buf_ioerror_alert(bp, "xlog_sync (split)"); 1973 return error; 1974 } 1975 } 1976 return 0; 1977 } /* xlog_sync */ 1978 1979 /* 1980 * Deallocate a log structure 1981 */ 1982 STATIC void 1983 xlog_dealloc_log( 1984 struct xlog *log) 1985 { 1986 xlog_in_core_t *iclog, *next_iclog; 1987 int i; 1988 1989 xlog_cil_destroy(log); 1990 1991 /* 1992 * Cycle all the iclogbuf locks to make sure all log IO completion 1993 * is done before we tear down these buffers. 1994 */ 1995 iclog = log->l_iclog; 1996 for (i = 0; i < log->l_iclog_bufs; i++) { 1997 xfs_buf_lock(iclog->ic_bp); 1998 xfs_buf_unlock(iclog->ic_bp); 1999 iclog = iclog->ic_next; 2000 } 2001 2002 /* 2003 * Always need to ensure that the extra buffer does not point to memory 2004 * owned by another log buffer before we free it. Also, cycle the lock 2005 * first to ensure we've completed IO on it. 2006 */ 2007 xfs_buf_lock(log->l_xbuf); 2008 xfs_buf_unlock(log->l_xbuf); 2009 xfs_buf_set_empty(log->l_xbuf, BTOBB(log->l_iclog_size)); 2010 xfs_buf_free(log->l_xbuf); 2011 2012 iclog = log->l_iclog; 2013 for (i = 0; i < log->l_iclog_bufs; i++) { 2014 xfs_buf_free(iclog->ic_bp); 2015 next_iclog = iclog->ic_next; 2016 kmem_free(iclog); 2017 iclog = next_iclog; 2018 } 2019 spinlock_destroy(&log->l_icloglock); 2020 2021 log->l_mp->m_log = NULL; 2022 kmem_free(log); 2023 } /* xlog_dealloc_log */ 2024 2025 /* 2026 * Update counters atomically now that memcpy is done. 2027 */ 2028 /* ARGSUSED */ 2029 static inline void 2030 xlog_state_finish_copy( 2031 struct xlog *log, 2032 struct xlog_in_core *iclog, 2033 int record_cnt, 2034 int copy_bytes) 2035 { 2036 spin_lock(&log->l_icloglock); 2037 2038 be32_add_cpu(&iclog->ic_header.h_num_logops, record_cnt); 2039 iclog->ic_offset += copy_bytes; 2040 2041 spin_unlock(&log->l_icloglock); 2042 } /* xlog_state_finish_copy */ 2043 2044 2045 2046 2047 /* 2048 * print out info relating to regions written which consume 2049 * the reservation 2050 */ 2051 void 2052 xlog_print_tic_res( 2053 struct xfs_mount *mp, 2054 struct xlog_ticket *ticket) 2055 { 2056 uint i; 2057 uint ophdr_spc = ticket->t_res_num_ophdrs * (uint)sizeof(xlog_op_header_t); 2058 2059 /* match with XLOG_REG_TYPE_* in xfs_log.h */ 2060 #define REG_TYPE_STR(type, str) [XLOG_REG_TYPE_##type] = str 2061 static char *res_type_str[XLOG_REG_TYPE_MAX + 1] = { 2062 REG_TYPE_STR(BFORMAT, "bformat"), 2063 REG_TYPE_STR(BCHUNK, "bchunk"), 2064 REG_TYPE_STR(EFI_FORMAT, "efi_format"), 2065 REG_TYPE_STR(EFD_FORMAT, "efd_format"), 2066 REG_TYPE_STR(IFORMAT, "iformat"), 2067 REG_TYPE_STR(ICORE, "icore"), 2068 REG_TYPE_STR(IEXT, "iext"), 2069 REG_TYPE_STR(IBROOT, "ibroot"), 2070 REG_TYPE_STR(ILOCAL, "ilocal"), 2071 REG_TYPE_STR(IATTR_EXT, "iattr_ext"), 2072 REG_TYPE_STR(IATTR_BROOT, "iattr_broot"), 2073 REG_TYPE_STR(IATTR_LOCAL, "iattr_local"), 2074 REG_TYPE_STR(QFORMAT, "qformat"), 2075 REG_TYPE_STR(DQUOT, "dquot"), 2076 REG_TYPE_STR(QUOTAOFF, "quotaoff"), 2077 REG_TYPE_STR(LRHEADER, "LR header"), 2078 REG_TYPE_STR(UNMOUNT, "unmount"), 2079 REG_TYPE_STR(COMMIT, "commit"), 2080 REG_TYPE_STR(TRANSHDR, "trans header"), 2081 REG_TYPE_STR(ICREATE, "inode create") 2082 }; 2083 #undef REG_TYPE_STR 2084 2085 xfs_warn(mp, "ticket reservation summary:"); 2086 xfs_warn(mp, " unit res = %d bytes", 2087 ticket->t_unit_res); 2088 xfs_warn(mp, " current res = %d bytes", 2089 ticket->t_curr_res); 2090 xfs_warn(mp, " total reg = %u bytes (o/flow = %u bytes)", 2091 ticket->t_res_arr_sum, ticket->t_res_o_flow); 2092 xfs_warn(mp, " ophdrs = %u (ophdr space = %u bytes)", 2093 ticket->t_res_num_ophdrs, ophdr_spc); 2094 xfs_warn(mp, " ophdr + reg = %u bytes", 2095 ticket->t_res_arr_sum + ticket->t_res_o_flow + ophdr_spc); 2096 xfs_warn(mp, " num regions = %u", 2097 ticket->t_res_num); 2098 2099 for (i = 0; i < ticket->t_res_num; i++) { 2100 uint r_type = ticket->t_res_arr[i].r_type; 2101 xfs_warn(mp, "region[%u]: %s - %u bytes", i, 2102 ((r_type <= 0 || r_type > XLOG_REG_TYPE_MAX) ? 2103 "bad-rtype" : res_type_str[r_type]), 2104 ticket->t_res_arr[i].r_len); 2105 } 2106 } 2107 2108 /* 2109 * Print a summary of the transaction. 2110 */ 2111 void 2112 xlog_print_trans( 2113 struct xfs_trans *tp) 2114 { 2115 struct xfs_mount *mp = tp->t_mountp; 2116 struct xfs_log_item_desc *lidp; 2117 2118 /* dump core transaction and ticket info */ 2119 xfs_warn(mp, "transaction summary:"); 2120 xfs_warn(mp, " log res = %d", tp->t_log_res); 2121 xfs_warn(mp, " log count = %d", tp->t_log_count); 2122 xfs_warn(mp, " flags = 0x%x", tp->t_flags); 2123 2124 xlog_print_tic_res(mp, tp->t_ticket); 2125 2126 /* dump each log item */ 2127 list_for_each_entry(lidp, &tp->t_items, lid_trans) { 2128 struct xfs_log_item *lip = lidp->lid_item; 2129 struct xfs_log_vec *lv = lip->li_lv; 2130 struct xfs_log_iovec *vec; 2131 int i; 2132 2133 xfs_warn(mp, "log item: "); 2134 xfs_warn(mp, " type = 0x%x", lip->li_type); 2135 xfs_warn(mp, " flags = 0x%x", lip->li_flags); 2136 if (!lv) 2137 continue; 2138 xfs_warn(mp, " niovecs = %d", lv->lv_niovecs); 2139 xfs_warn(mp, " size = %d", lv->lv_size); 2140 xfs_warn(mp, " bytes = %d", lv->lv_bytes); 2141 xfs_warn(mp, " buf len = %d", lv->lv_buf_len); 2142 2143 /* dump each iovec for the log item */ 2144 vec = lv->lv_iovecp; 2145 for (i = 0; i < lv->lv_niovecs; i++) { 2146 int dumplen = min(vec->i_len, 32); 2147 2148 xfs_warn(mp, " iovec[%d]", i); 2149 xfs_warn(mp, " type = 0x%x", vec->i_type); 2150 xfs_warn(mp, " len = %d", vec->i_len); 2151 xfs_warn(mp, " first %d bytes of iovec[%d]:", dumplen, i); 2152 xfs_hex_dump(vec->i_addr, dumplen); 2153 2154 vec++; 2155 } 2156 } 2157 } 2158 2159 /* 2160 * Calculate the potential space needed by the log vector. Each region gets 2161 * its own xlog_op_header_t and may need to be double word aligned. 2162 */ 2163 static int 2164 xlog_write_calc_vec_length( 2165 struct xlog_ticket *ticket, 2166 struct xfs_log_vec *log_vector) 2167 { 2168 struct xfs_log_vec *lv; 2169 int headers = 0; 2170 int len = 0; 2171 int i; 2172 2173 /* acct for start rec of xact */ 2174 if (ticket->t_flags & XLOG_TIC_INITED) 2175 headers++; 2176 2177 for (lv = log_vector; lv; lv = lv->lv_next) { 2178 /* we don't write ordered log vectors */ 2179 if (lv->lv_buf_len == XFS_LOG_VEC_ORDERED) 2180 continue; 2181 2182 headers += lv->lv_niovecs; 2183 2184 for (i = 0; i < lv->lv_niovecs; i++) { 2185 struct xfs_log_iovec *vecp = &lv->lv_iovecp[i]; 2186 2187 len += vecp->i_len; 2188 xlog_tic_add_region(ticket, vecp->i_len, vecp->i_type); 2189 } 2190 } 2191 2192 ticket->t_res_num_ophdrs += headers; 2193 len += headers * sizeof(struct xlog_op_header); 2194 2195 return len; 2196 } 2197 2198 /* 2199 * If first write for transaction, insert start record We can't be trying to 2200 * commit if we are inited. We can't have any "partial_copy" if we are inited. 2201 */ 2202 static int 2203 xlog_write_start_rec( 2204 struct xlog_op_header *ophdr, 2205 struct xlog_ticket *ticket) 2206 { 2207 if (!(ticket->t_flags & XLOG_TIC_INITED)) 2208 return 0; 2209 2210 ophdr->oh_tid = cpu_to_be32(ticket->t_tid); 2211 ophdr->oh_clientid = ticket->t_clientid; 2212 ophdr->oh_len = 0; 2213 ophdr->oh_flags = XLOG_START_TRANS; 2214 ophdr->oh_res2 = 0; 2215 2216 ticket->t_flags &= ~XLOG_TIC_INITED; 2217 2218 return sizeof(struct xlog_op_header); 2219 } 2220 2221 static xlog_op_header_t * 2222 xlog_write_setup_ophdr( 2223 struct xlog *log, 2224 struct xlog_op_header *ophdr, 2225 struct xlog_ticket *ticket, 2226 uint flags) 2227 { 2228 ophdr->oh_tid = cpu_to_be32(ticket->t_tid); 2229 ophdr->oh_clientid = ticket->t_clientid; 2230 ophdr->oh_res2 = 0; 2231 2232 /* are we copying a commit or unmount record? */ 2233 ophdr->oh_flags = flags; 2234 2235 /* 2236 * We've seen logs corrupted with bad transaction client ids. This 2237 * makes sure that XFS doesn't generate them on. Turn this into an EIO 2238 * and shut down the filesystem. 2239 */ 2240 switch (ophdr->oh_clientid) { 2241 case XFS_TRANSACTION: 2242 case XFS_VOLUME: 2243 case XFS_LOG: 2244 break; 2245 default: 2246 xfs_warn(log->l_mp, 2247 "Bad XFS transaction clientid 0x%x in ticket "PTR_FMT, 2248 ophdr->oh_clientid, ticket); 2249 return NULL; 2250 } 2251 2252 return ophdr; 2253 } 2254 2255 /* 2256 * Set up the parameters of the region copy into the log. This has 2257 * to handle region write split across multiple log buffers - this 2258 * state is kept external to this function so that this code can 2259 * be written in an obvious, self documenting manner. 2260 */ 2261 static int 2262 xlog_write_setup_copy( 2263 struct xlog_ticket *ticket, 2264 struct xlog_op_header *ophdr, 2265 int space_available, 2266 int space_required, 2267 int *copy_off, 2268 int *copy_len, 2269 int *last_was_partial_copy, 2270 int *bytes_consumed) 2271 { 2272 int still_to_copy; 2273 2274 still_to_copy = space_required - *bytes_consumed; 2275 *copy_off = *bytes_consumed; 2276 2277 if (still_to_copy <= space_available) { 2278 /* write of region completes here */ 2279 *copy_len = still_to_copy; 2280 ophdr->oh_len = cpu_to_be32(*copy_len); 2281 if (*last_was_partial_copy) 2282 ophdr->oh_flags |= (XLOG_END_TRANS|XLOG_WAS_CONT_TRANS); 2283 *last_was_partial_copy = 0; 2284 *bytes_consumed = 0; 2285 return 0; 2286 } 2287 2288 /* partial write of region, needs extra log op header reservation */ 2289 *copy_len = space_available; 2290 ophdr->oh_len = cpu_to_be32(*copy_len); 2291 ophdr->oh_flags |= XLOG_CONTINUE_TRANS; 2292 if (*last_was_partial_copy) 2293 ophdr->oh_flags |= XLOG_WAS_CONT_TRANS; 2294 *bytes_consumed += *copy_len; 2295 (*last_was_partial_copy)++; 2296 2297 /* account for new log op header */ 2298 ticket->t_curr_res -= sizeof(struct xlog_op_header); 2299 ticket->t_res_num_ophdrs++; 2300 2301 return sizeof(struct xlog_op_header); 2302 } 2303 2304 static int 2305 xlog_write_copy_finish( 2306 struct xlog *log, 2307 struct xlog_in_core *iclog, 2308 uint flags, 2309 int *record_cnt, 2310 int *data_cnt, 2311 int *partial_copy, 2312 int *partial_copy_len, 2313 int log_offset, 2314 struct xlog_in_core **commit_iclog) 2315 { 2316 if (*partial_copy) { 2317 /* 2318 * This iclog has already been marked WANT_SYNC by 2319 * xlog_state_get_iclog_space. 2320 */ 2321 xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt); 2322 *record_cnt = 0; 2323 *data_cnt = 0; 2324 return xlog_state_release_iclog(log, iclog); 2325 } 2326 2327 *partial_copy = 0; 2328 *partial_copy_len = 0; 2329 2330 if (iclog->ic_size - log_offset <= sizeof(xlog_op_header_t)) { 2331 /* no more space in this iclog - push it. */ 2332 xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt); 2333 *record_cnt = 0; 2334 *data_cnt = 0; 2335 2336 spin_lock(&log->l_icloglock); 2337 xlog_state_want_sync(log, iclog); 2338 spin_unlock(&log->l_icloglock); 2339 2340 if (!commit_iclog) 2341 return xlog_state_release_iclog(log, iclog); 2342 ASSERT(flags & XLOG_COMMIT_TRANS); 2343 *commit_iclog = iclog; 2344 } 2345 2346 return 0; 2347 } 2348 2349 /* 2350 * Write some region out to in-core log 2351 * 2352 * This will be called when writing externally provided regions or when 2353 * writing out a commit record for a given transaction. 2354 * 2355 * General algorithm: 2356 * 1. Find total length of this write. This may include adding to the 2357 * lengths passed in. 2358 * 2. Check whether we violate the tickets reservation. 2359 * 3. While writing to this iclog 2360 * A. Reserve as much space in this iclog as can get 2361 * B. If this is first write, save away start lsn 2362 * C. While writing this region: 2363 * 1. If first write of transaction, write start record 2364 * 2. Write log operation header (header per region) 2365 * 3. Find out if we can fit entire region into this iclog 2366 * 4. Potentially, verify destination memcpy ptr 2367 * 5. Memcpy (partial) region 2368 * 6. If partial copy, release iclog; otherwise, continue 2369 * copying more regions into current iclog 2370 * 4. Mark want sync bit (in simulation mode) 2371 * 5. Release iclog for potential flush to on-disk log. 2372 * 2373 * ERRORS: 2374 * 1. Panic if reservation is overrun. This should never happen since 2375 * reservation amounts are generated internal to the filesystem. 2376 * NOTES: 2377 * 1. Tickets are single threaded data structures. 2378 * 2. The XLOG_END_TRANS & XLOG_CONTINUE_TRANS flags are passed down to the 2379 * syncing routine. When a single log_write region needs to span 2380 * multiple in-core logs, the XLOG_CONTINUE_TRANS bit should be set 2381 * on all log operation writes which don't contain the end of the 2382 * region. The XLOG_END_TRANS bit is used for the in-core log 2383 * operation which contains the end of the continued log_write region. 2384 * 3. When xlog_state_get_iclog_space() grabs the rest of the current iclog, 2385 * we don't really know exactly how much space will be used. As a result, 2386 * we don't update ic_offset until the end when we know exactly how many 2387 * bytes have been written out. 2388 */ 2389 int 2390 xlog_write( 2391 struct xlog *log, 2392 struct xfs_log_vec *log_vector, 2393 struct xlog_ticket *ticket, 2394 xfs_lsn_t *start_lsn, 2395 struct xlog_in_core **commit_iclog, 2396 uint flags) 2397 { 2398 struct xlog_in_core *iclog = NULL; 2399 struct xfs_log_iovec *vecp; 2400 struct xfs_log_vec *lv; 2401 int len; 2402 int index; 2403 int partial_copy = 0; 2404 int partial_copy_len = 0; 2405 int contwr = 0; 2406 int record_cnt = 0; 2407 int data_cnt = 0; 2408 int error; 2409 2410 *start_lsn = 0; 2411 2412 len = xlog_write_calc_vec_length(ticket, log_vector); 2413 2414 /* 2415 * Region headers and bytes are already accounted for. 2416 * We only need to take into account start records and 2417 * split regions in this function. 2418 */ 2419 if (ticket->t_flags & XLOG_TIC_INITED) 2420 ticket->t_curr_res -= sizeof(xlog_op_header_t); 2421 2422 /* 2423 * Commit record headers need to be accounted for. These 2424 * come in as separate writes so are easy to detect. 2425 */ 2426 if (flags & (XLOG_COMMIT_TRANS | XLOG_UNMOUNT_TRANS)) 2427 ticket->t_curr_res -= sizeof(xlog_op_header_t); 2428 2429 if (ticket->t_curr_res < 0) { 2430 xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES, 2431 "ctx ticket reservation ran out. Need to up reservation"); 2432 xlog_print_tic_res(log->l_mp, ticket); 2433 xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR); 2434 } 2435 2436 index = 0; 2437 lv = log_vector; 2438 vecp = lv->lv_iovecp; 2439 while (lv && (!lv->lv_niovecs || index < lv->lv_niovecs)) { 2440 void *ptr; 2441 int log_offset; 2442 2443 error = xlog_state_get_iclog_space(log, len, &iclog, ticket, 2444 &contwr, &log_offset); 2445 if (error) 2446 return error; 2447 2448 ASSERT(log_offset <= iclog->ic_size - 1); 2449 ptr = iclog->ic_datap + log_offset; 2450 2451 /* start_lsn is the first lsn written to. That's all we need. */ 2452 if (!*start_lsn) 2453 *start_lsn = be64_to_cpu(iclog->ic_header.h_lsn); 2454 2455 /* 2456 * This loop writes out as many regions as can fit in the amount 2457 * of space which was allocated by xlog_state_get_iclog_space(). 2458 */ 2459 while (lv && (!lv->lv_niovecs || index < lv->lv_niovecs)) { 2460 struct xfs_log_iovec *reg; 2461 struct xlog_op_header *ophdr; 2462 int start_rec_copy; 2463 int copy_len; 2464 int copy_off; 2465 bool ordered = false; 2466 2467 /* ordered log vectors have no regions to write */ 2468 if (lv->lv_buf_len == XFS_LOG_VEC_ORDERED) { 2469 ASSERT(lv->lv_niovecs == 0); 2470 ordered = true; 2471 goto next_lv; 2472 } 2473 2474 reg = &vecp[index]; 2475 ASSERT(reg->i_len % sizeof(int32_t) == 0); 2476 ASSERT((unsigned long)ptr % sizeof(int32_t) == 0); 2477 2478 start_rec_copy = xlog_write_start_rec(ptr, ticket); 2479 if (start_rec_copy) { 2480 record_cnt++; 2481 xlog_write_adv_cnt(&ptr, &len, &log_offset, 2482 start_rec_copy); 2483 } 2484 2485 ophdr = xlog_write_setup_ophdr(log, ptr, ticket, flags); 2486 if (!ophdr) 2487 return -EIO; 2488 2489 xlog_write_adv_cnt(&ptr, &len, &log_offset, 2490 sizeof(struct xlog_op_header)); 2491 2492 len += xlog_write_setup_copy(ticket, ophdr, 2493 iclog->ic_size-log_offset, 2494 reg->i_len, 2495 ©_off, ©_len, 2496 &partial_copy, 2497 &partial_copy_len); 2498 xlog_verify_dest_ptr(log, ptr); 2499 2500 /* 2501 * Copy region. 2502 * 2503 * Unmount records just log an opheader, so can have 2504 * empty payloads with no data region to copy. Hence we 2505 * only copy the payload if the vector says it has data 2506 * to copy. 2507 */ 2508 ASSERT(copy_len >= 0); 2509 if (copy_len > 0) { 2510 memcpy(ptr, reg->i_addr + copy_off, copy_len); 2511 xlog_write_adv_cnt(&ptr, &len, &log_offset, 2512 copy_len); 2513 } 2514 copy_len += start_rec_copy + sizeof(xlog_op_header_t); 2515 record_cnt++; 2516 data_cnt += contwr ? copy_len : 0; 2517 2518 error = xlog_write_copy_finish(log, iclog, flags, 2519 &record_cnt, &data_cnt, 2520 &partial_copy, 2521 &partial_copy_len, 2522 log_offset, 2523 commit_iclog); 2524 if (error) 2525 return error; 2526 2527 /* 2528 * if we had a partial copy, we need to get more iclog 2529 * space but we don't want to increment the region 2530 * index because there is still more is this region to 2531 * write. 2532 * 2533 * If we completed writing this region, and we flushed 2534 * the iclog (indicated by resetting of the record 2535 * count), then we also need to get more log space. If 2536 * this was the last record, though, we are done and 2537 * can just return. 2538 */ 2539 if (partial_copy) 2540 break; 2541 2542 if (++index == lv->lv_niovecs) { 2543 next_lv: 2544 lv = lv->lv_next; 2545 index = 0; 2546 if (lv) 2547 vecp = lv->lv_iovecp; 2548 } 2549 if (record_cnt == 0 && !ordered) { 2550 if (!lv) 2551 return 0; 2552 break; 2553 } 2554 } 2555 } 2556 2557 ASSERT(len == 0); 2558 2559 xlog_state_finish_copy(log, iclog, record_cnt, data_cnt); 2560 if (!commit_iclog) 2561 return xlog_state_release_iclog(log, iclog); 2562 2563 ASSERT(flags & XLOG_COMMIT_TRANS); 2564 *commit_iclog = iclog; 2565 return 0; 2566 } 2567 2568 2569 /***************************************************************************** 2570 * 2571 * State Machine functions 2572 * 2573 ***************************************************************************** 2574 */ 2575 2576 /* Clean iclogs starting from the head. This ordering must be 2577 * maintained, so an iclog doesn't become ACTIVE beyond one that 2578 * is SYNCING. This is also required to maintain the notion that we use 2579 * a ordered wait queue to hold off would be writers to the log when every 2580 * iclog is trying to sync to disk. 2581 * 2582 * State Change: DIRTY -> ACTIVE 2583 */ 2584 STATIC void 2585 xlog_state_clean_log( 2586 struct xlog *log) 2587 { 2588 xlog_in_core_t *iclog; 2589 int changed = 0; 2590 2591 iclog = log->l_iclog; 2592 do { 2593 if (iclog->ic_state == XLOG_STATE_DIRTY) { 2594 iclog->ic_state = XLOG_STATE_ACTIVE; 2595 iclog->ic_offset = 0; 2596 ASSERT(iclog->ic_callback == NULL); 2597 /* 2598 * If the number of ops in this iclog indicate it just 2599 * contains the dummy transaction, we can 2600 * change state into IDLE (the second time around). 2601 * Otherwise we should change the state into 2602 * NEED a dummy. 2603 * We don't need to cover the dummy. 2604 */ 2605 if (!changed && 2606 (be32_to_cpu(iclog->ic_header.h_num_logops) == 2607 XLOG_COVER_OPS)) { 2608 changed = 1; 2609 } else { 2610 /* 2611 * We have two dirty iclogs so start over 2612 * This could also be num of ops indicates 2613 * this is not the dummy going out. 2614 */ 2615 changed = 2; 2616 } 2617 iclog->ic_header.h_num_logops = 0; 2618 memset(iclog->ic_header.h_cycle_data, 0, 2619 sizeof(iclog->ic_header.h_cycle_data)); 2620 iclog->ic_header.h_lsn = 0; 2621 } else if (iclog->ic_state == XLOG_STATE_ACTIVE) 2622 /* do nothing */; 2623 else 2624 break; /* stop cleaning */ 2625 iclog = iclog->ic_next; 2626 } while (iclog != log->l_iclog); 2627 2628 /* log is locked when we are called */ 2629 /* 2630 * Change state for the dummy log recording. 2631 * We usually go to NEED. But we go to NEED2 if the changed indicates 2632 * we are done writing the dummy record. 2633 * If we are done with the second dummy recored (DONE2), then 2634 * we go to IDLE. 2635 */ 2636 if (changed) { 2637 switch (log->l_covered_state) { 2638 case XLOG_STATE_COVER_IDLE: 2639 case XLOG_STATE_COVER_NEED: 2640 case XLOG_STATE_COVER_NEED2: 2641 log->l_covered_state = XLOG_STATE_COVER_NEED; 2642 break; 2643 2644 case XLOG_STATE_COVER_DONE: 2645 if (changed == 1) 2646 log->l_covered_state = XLOG_STATE_COVER_NEED2; 2647 else 2648 log->l_covered_state = XLOG_STATE_COVER_NEED; 2649 break; 2650 2651 case XLOG_STATE_COVER_DONE2: 2652 if (changed == 1) 2653 log->l_covered_state = XLOG_STATE_COVER_IDLE; 2654 else 2655 log->l_covered_state = XLOG_STATE_COVER_NEED; 2656 break; 2657 2658 default: 2659 ASSERT(0); 2660 } 2661 } 2662 } /* xlog_state_clean_log */ 2663 2664 STATIC xfs_lsn_t 2665 xlog_get_lowest_lsn( 2666 struct xlog *log) 2667 { 2668 xlog_in_core_t *lsn_log; 2669 xfs_lsn_t lowest_lsn, lsn; 2670 2671 lsn_log = log->l_iclog; 2672 lowest_lsn = 0; 2673 do { 2674 if (!(lsn_log->ic_state & (XLOG_STATE_ACTIVE|XLOG_STATE_DIRTY))) { 2675 lsn = be64_to_cpu(lsn_log->ic_header.h_lsn); 2676 if ((lsn && !lowest_lsn) || 2677 (XFS_LSN_CMP(lsn, lowest_lsn) < 0)) { 2678 lowest_lsn = lsn; 2679 } 2680 } 2681 lsn_log = lsn_log->ic_next; 2682 } while (lsn_log != log->l_iclog); 2683 return lowest_lsn; 2684 } 2685 2686 2687 STATIC void 2688 xlog_state_do_callback( 2689 struct xlog *log, 2690 int aborted, 2691 struct xlog_in_core *ciclog) 2692 { 2693 xlog_in_core_t *iclog; 2694 xlog_in_core_t *first_iclog; /* used to know when we've 2695 * processed all iclogs once */ 2696 xfs_log_callback_t *cb, *cb_next; 2697 int flushcnt = 0; 2698 xfs_lsn_t lowest_lsn; 2699 int ioerrors; /* counter: iclogs with errors */ 2700 int loopdidcallbacks; /* flag: inner loop did callbacks*/ 2701 int funcdidcallbacks; /* flag: function did callbacks */ 2702 int repeats; /* for issuing console warnings if 2703 * looping too many times */ 2704 int wake = 0; 2705 2706 spin_lock(&log->l_icloglock); 2707 first_iclog = iclog = log->l_iclog; 2708 ioerrors = 0; 2709 funcdidcallbacks = 0; 2710 repeats = 0; 2711 2712 do { 2713 /* 2714 * Scan all iclogs starting with the one pointed to by the 2715 * log. Reset this starting point each time the log is 2716 * unlocked (during callbacks). 2717 * 2718 * Keep looping through iclogs until one full pass is made 2719 * without running any callbacks. 2720 */ 2721 first_iclog = log->l_iclog; 2722 iclog = log->l_iclog; 2723 loopdidcallbacks = 0; 2724 repeats++; 2725 2726 do { 2727 2728 /* skip all iclogs in the ACTIVE & DIRTY states */ 2729 if (iclog->ic_state & 2730 (XLOG_STATE_ACTIVE|XLOG_STATE_DIRTY)) { 2731 iclog = iclog->ic_next; 2732 continue; 2733 } 2734 2735 /* 2736 * Between marking a filesystem SHUTDOWN and stopping 2737 * the log, we do flush all iclogs to disk (if there 2738 * wasn't a log I/O error). So, we do want things to 2739 * go smoothly in case of just a SHUTDOWN w/o a 2740 * LOG_IO_ERROR. 2741 */ 2742 if (!(iclog->ic_state & XLOG_STATE_IOERROR)) { 2743 /* 2744 * Can only perform callbacks in order. Since 2745 * this iclog is not in the DONE_SYNC/ 2746 * DO_CALLBACK state, we skip the rest and 2747 * just try to clean up. If we set our iclog 2748 * to DO_CALLBACK, we will not process it when 2749 * we retry since a previous iclog is in the 2750 * CALLBACK and the state cannot change since 2751 * we are holding the l_icloglock. 2752 */ 2753 if (!(iclog->ic_state & 2754 (XLOG_STATE_DONE_SYNC | 2755 XLOG_STATE_DO_CALLBACK))) { 2756 if (ciclog && (ciclog->ic_state == 2757 XLOG_STATE_DONE_SYNC)) { 2758 ciclog->ic_state = XLOG_STATE_DO_CALLBACK; 2759 } 2760 break; 2761 } 2762 /* 2763 * We now have an iclog that is in either the 2764 * DO_CALLBACK or DONE_SYNC states. The other 2765 * states (WANT_SYNC, SYNCING, or CALLBACK were 2766 * caught by the above if and are going to 2767 * clean (i.e. we aren't doing their callbacks) 2768 * see the above if. 2769 */ 2770 2771 /* 2772 * We will do one more check here to see if we 2773 * have chased our tail around. 2774 */ 2775 2776 lowest_lsn = xlog_get_lowest_lsn(log); 2777 if (lowest_lsn && 2778 XFS_LSN_CMP(lowest_lsn, 2779 be64_to_cpu(iclog->ic_header.h_lsn)) < 0) { 2780 iclog = iclog->ic_next; 2781 continue; /* Leave this iclog for 2782 * another thread */ 2783 } 2784 2785 iclog->ic_state = XLOG_STATE_CALLBACK; 2786 2787 2788 /* 2789 * Completion of a iclog IO does not imply that 2790 * a transaction has completed, as transactions 2791 * can be large enough to span many iclogs. We 2792 * cannot change the tail of the log half way 2793 * through a transaction as this may be the only 2794 * transaction in the log and moving th etail to 2795 * point to the middle of it will prevent 2796 * recovery from finding the start of the 2797 * transaction. Hence we should only update the 2798 * last_sync_lsn if this iclog contains 2799 * transaction completion callbacks on it. 2800 * 2801 * We have to do this before we drop the 2802 * icloglock to ensure we are the only one that 2803 * can update it. 2804 */ 2805 ASSERT(XFS_LSN_CMP(atomic64_read(&log->l_last_sync_lsn), 2806 be64_to_cpu(iclog->ic_header.h_lsn)) <= 0); 2807 if (iclog->ic_callback) 2808 atomic64_set(&log->l_last_sync_lsn, 2809 be64_to_cpu(iclog->ic_header.h_lsn)); 2810 2811 } else 2812 ioerrors++; 2813 2814 spin_unlock(&log->l_icloglock); 2815 2816 /* 2817 * Keep processing entries in the callback list until 2818 * we come around and it is empty. We need to 2819 * atomically see that the list is empty and change the 2820 * state to DIRTY so that we don't miss any more 2821 * callbacks being added. 2822 */ 2823 spin_lock(&iclog->ic_callback_lock); 2824 cb = iclog->ic_callback; 2825 while (cb) { 2826 iclog->ic_callback_tail = &(iclog->ic_callback); 2827 iclog->ic_callback = NULL; 2828 spin_unlock(&iclog->ic_callback_lock); 2829 2830 /* perform callbacks in the order given */ 2831 for (; cb; cb = cb_next) { 2832 cb_next = cb->cb_next; 2833 cb->cb_func(cb->cb_arg, aborted); 2834 } 2835 spin_lock(&iclog->ic_callback_lock); 2836 cb = iclog->ic_callback; 2837 } 2838 2839 loopdidcallbacks++; 2840 funcdidcallbacks++; 2841 2842 spin_lock(&log->l_icloglock); 2843 ASSERT(iclog->ic_callback == NULL); 2844 spin_unlock(&iclog->ic_callback_lock); 2845 if (!(iclog->ic_state & XLOG_STATE_IOERROR)) 2846 iclog->ic_state = XLOG_STATE_DIRTY; 2847 2848 /* 2849 * Transition from DIRTY to ACTIVE if applicable. 2850 * NOP if STATE_IOERROR. 2851 */ 2852 xlog_state_clean_log(log); 2853 2854 /* wake up threads waiting in xfs_log_force() */ 2855 wake_up_all(&iclog->ic_force_wait); 2856 2857 iclog = iclog->ic_next; 2858 } while (first_iclog != iclog); 2859 2860 if (repeats > 5000) { 2861 flushcnt += repeats; 2862 repeats = 0; 2863 xfs_warn(log->l_mp, 2864 "%s: possible infinite loop (%d iterations)", 2865 __func__, flushcnt); 2866 } 2867 } while (!ioerrors && loopdidcallbacks); 2868 2869 #ifdef DEBUG 2870 /* 2871 * Make one last gasp attempt to see if iclogs are being left in limbo. 2872 * If the above loop finds an iclog earlier than the current iclog and 2873 * in one of the syncing states, the current iclog is put into 2874 * DO_CALLBACK and the callbacks are deferred to the completion of the 2875 * earlier iclog. Walk the iclogs in order and make sure that no iclog 2876 * is in DO_CALLBACK unless an earlier iclog is in one of the syncing 2877 * states. 2878 * 2879 * Note that SYNCING|IOABORT is a valid state so we cannot just check 2880 * for ic_state == SYNCING. 2881 */ 2882 if (funcdidcallbacks) { 2883 first_iclog = iclog = log->l_iclog; 2884 do { 2885 ASSERT(iclog->ic_state != XLOG_STATE_DO_CALLBACK); 2886 /* 2887 * Terminate the loop if iclogs are found in states 2888 * which will cause other threads to clean up iclogs. 2889 * 2890 * SYNCING - i/o completion will go through logs 2891 * DONE_SYNC - interrupt thread should be waiting for 2892 * l_icloglock 2893 * IOERROR - give up hope all ye who enter here 2894 */ 2895 if (iclog->ic_state == XLOG_STATE_WANT_SYNC || 2896 iclog->ic_state & XLOG_STATE_SYNCING || 2897 iclog->ic_state == XLOG_STATE_DONE_SYNC || 2898 iclog->ic_state == XLOG_STATE_IOERROR ) 2899 break; 2900 iclog = iclog->ic_next; 2901 } while (first_iclog != iclog); 2902 } 2903 #endif 2904 2905 if (log->l_iclog->ic_state & (XLOG_STATE_ACTIVE|XLOG_STATE_IOERROR)) 2906 wake = 1; 2907 spin_unlock(&log->l_icloglock); 2908 2909 if (wake) 2910 wake_up_all(&log->l_flush_wait); 2911 } 2912 2913 2914 /* 2915 * Finish transitioning this iclog to the dirty state. 2916 * 2917 * Make sure that we completely execute this routine only when this is 2918 * the last call to the iclog. There is a good chance that iclog flushes, 2919 * when we reach the end of the physical log, get turned into 2 separate 2920 * calls to bwrite. Hence, one iclog flush could generate two calls to this 2921 * routine. By using the reference count bwritecnt, we guarantee that only 2922 * the second completion goes through. 2923 * 2924 * Callbacks could take time, so they are done outside the scope of the 2925 * global state machine log lock. 2926 */ 2927 STATIC void 2928 xlog_state_done_syncing( 2929 xlog_in_core_t *iclog, 2930 int aborted) 2931 { 2932 struct xlog *log = iclog->ic_log; 2933 2934 spin_lock(&log->l_icloglock); 2935 2936 ASSERT(iclog->ic_state == XLOG_STATE_SYNCING || 2937 iclog->ic_state == XLOG_STATE_IOERROR); 2938 ASSERT(atomic_read(&iclog->ic_refcnt) == 0); 2939 ASSERT(iclog->ic_bwritecnt == 1 || iclog->ic_bwritecnt == 2); 2940 2941 2942 /* 2943 * If we got an error, either on the first buffer, or in the case of 2944 * split log writes, on the second, we mark ALL iclogs STATE_IOERROR, 2945 * and none should ever be attempted to be written to disk 2946 * again. 2947 */ 2948 if (iclog->ic_state != XLOG_STATE_IOERROR) { 2949 if (--iclog->ic_bwritecnt == 1) { 2950 spin_unlock(&log->l_icloglock); 2951 return; 2952 } 2953 iclog->ic_state = XLOG_STATE_DONE_SYNC; 2954 } 2955 2956 /* 2957 * Someone could be sleeping prior to writing out the next 2958 * iclog buffer, we wake them all, one will get to do the 2959 * I/O, the others get to wait for the result. 2960 */ 2961 wake_up_all(&iclog->ic_write_wait); 2962 spin_unlock(&log->l_icloglock); 2963 xlog_state_do_callback(log, aborted, iclog); /* also cleans log */ 2964 } /* xlog_state_done_syncing */ 2965 2966 2967 /* 2968 * If the head of the in-core log ring is not (ACTIVE or DIRTY), then we must 2969 * sleep. We wait on the flush queue on the head iclog as that should be 2970 * the first iclog to complete flushing. Hence if all iclogs are syncing, 2971 * we will wait here and all new writes will sleep until a sync completes. 2972 * 2973 * The in-core logs are used in a circular fashion. They are not used 2974 * out-of-order even when an iclog past the head is free. 2975 * 2976 * return: 2977 * * log_offset where xlog_write() can start writing into the in-core 2978 * log's data space. 2979 * * in-core log pointer to which xlog_write() should write. 2980 * * boolean indicating this is a continued write to an in-core log. 2981 * If this is the last write, then the in-core log's offset field 2982 * needs to be incremented, depending on the amount of data which 2983 * is copied. 2984 */ 2985 STATIC int 2986 xlog_state_get_iclog_space( 2987 struct xlog *log, 2988 int len, 2989 struct xlog_in_core **iclogp, 2990 struct xlog_ticket *ticket, 2991 int *continued_write, 2992 int *logoffsetp) 2993 { 2994 int log_offset; 2995 xlog_rec_header_t *head; 2996 xlog_in_core_t *iclog; 2997 int error; 2998 2999 restart: 3000 spin_lock(&log->l_icloglock); 3001 if (XLOG_FORCED_SHUTDOWN(log)) { 3002 spin_unlock(&log->l_icloglock); 3003 return -EIO; 3004 } 3005 3006 iclog = log->l_iclog; 3007 if (iclog->ic_state != XLOG_STATE_ACTIVE) { 3008 XFS_STATS_INC(log->l_mp, xs_log_noiclogs); 3009 3010 /* Wait for log writes to have flushed */ 3011 xlog_wait(&log->l_flush_wait, &log->l_icloglock); 3012 goto restart; 3013 } 3014 3015 head = &iclog->ic_header; 3016 3017 atomic_inc(&iclog->ic_refcnt); /* prevents sync */ 3018 log_offset = iclog->ic_offset; 3019 3020 /* On the 1st write to an iclog, figure out lsn. This works 3021 * if iclogs marked XLOG_STATE_WANT_SYNC always write out what they are 3022 * committing to. If the offset is set, that's how many blocks 3023 * must be written. 3024 */ 3025 if (log_offset == 0) { 3026 ticket->t_curr_res -= log->l_iclog_hsize; 3027 xlog_tic_add_region(ticket, 3028 log->l_iclog_hsize, 3029 XLOG_REG_TYPE_LRHEADER); 3030 head->h_cycle = cpu_to_be32(log->l_curr_cycle); 3031 head->h_lsn = cpu_to_be64( 3032 xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block)); 3033 ASSERT(log->l_curr_block >= 0); 3034 } 3035 3036 /* If there is enough room to write everything, then do it. Otherwise, 3037 * claim the rest of the region and make sure the XLOG_STATE_WANT_SYNC 3038 * bit is on, so this will get flushed out. Don't update ic_offset 3039 * until you know exactly how many bytes get copied. Therefore, wait 3040 * until later to update ic_offset. 3041 * 3042 * xlog_write() algorithm assumes that at least 2 xlog_op_header_t's 3043 * can fit into remaining data section. 3044 */ 3045 if (iclog->ic_size - iclog->ic_offset < 2*sizeof(xlog_op_header_t)) { 3046 xlog_state_switch_iclogs(log, iclog, iclog->ic_size); 3047 3048 /* 3049 * If I'm the only one writing to this iclog, sync it to disk. 3050 * We need to do an atomic compare and decrement here to avoid 3051 * racing with concurrent atomic_dec_and_lock() calls in 3052 * xlog_state_release_iclog() when there is more than one 3053 * reference to the iclog. 3054 */ 3055 if (!atomic_add_unless(&iclog->ic_refcnt, -1, 1)) { 3056 /* we are the only one */ 3057 spin_unlock(&log->l_icloglock); 3058 error = xlog_state_release_iclog(log, iclog); 3059 if (error) 3060 return error; 3061 } else { 3062 spin_unlock(&log->l_icloglock); 3063 } 3064 goto restart; 3065 } 3066 3067 /* Do we have enough room to write the full amount in the remainder 3068 * of this iclog? Or must we continue a write on the next iclog and 3069 * mark this iclog as completely taken? In the case where we switch 3070 * iclogs (to mark it taken), this particular iclog will release/sync 3071 * to disk in xlog_write(). 3072 */ 3073 if (len <= iclog->ic_size - iclog->ic_offset) { 3074 *continued_write = 0; 3075 iclog->ic_offset += len; 3076 } else { 3077 *continued_write = 1; 3078 xlog_state_switch_iclogs(log, iclog, iclog->ic_size); 3079 } 3080 *iclogp = iclog; 3081 3082 ASSERT(iclog->ic_offset <= iclog->ic_size); 3083 spin_unlock(&log->l_icloglock); 3084 3085 *logoffsetp = log_offset; 3086 return 0; 3087 } /* xlog_state_get_iclog_space */ 3088 3089 /* The first cnt-1 times through here we don't need to 3090 * move the grant write head because the permanent 3091 * reservation has reserved cnt times the unit amount. 3092 * Release part of current permanent unit reservation and 3093 * reset current reservation to be one units worth. Also 3094 * move grant reservation head forward. 3095 */ 3096 STATIC void 3097 xlog_regrant_reserve_log_space( 3098 struct xlog *log, 3099 struct xlog_ticket *ticket) 3100 { 3101 trace_xfs_log_regrant_reserve_enter(log, ticket); 3102 3103 if (ticket->t_cnt > 0) 3104 ticket->t_cnt--; 3105 3106 xlog_grant_sub_space(log, &log->l_reserve_head.grant, 3107 ticket->t_curr_res); 3108 xlog_grant_sub_space(log, &log->l_write_head.grant, 3109 ticket->t_curr_res); 3110 ticket->t_curr_res = ticket->t_unit_res; 3111 xlog_tic_reset_res(ticket); 3112 3113 trace_xfs_log_regrant_reserve_sub(log, ticket); 3114 3115 /* just return if we still have some of the pre-reserved space */ 3116 if (ticket->t_cnt > 0) 3117 return; 3118 3119 xlog_grant_add_space(log, &log->l_reserve_head.grant, 3120 ticket->t_unit_res); 3121 3122 trace_xfs_log_regrant_reserve_exit(log, ticket); 3123 3124 ticket->t_curr_res = ticket->t_unit_res; 3125 xlog_tic_reset_res(ticket); 3126 } /* xlog_regrant_reserve_log_space */ 3127 3128 3129 /* 3130 * Give back the space left from a reservation. 3131 * 3132 * All the information we need to make a correct determination of space left 3133 * is present. For non-permanent reservations, things are quite easy. The 3134 * count should have been decremented to zero. We only need to deal with the 3135 * space remaining in the current reservation part of the ticket. If the 3136 * ticket contains a permanent reservation, there may be left over space which 3137 * needs to be released. A count of N means that N-1 refills of the current 3138 * reservation can be done before we need to ask for more space. The first 3139 * one goes to fill up the first current reservation. Once we run out of 3140 * space, the count will stay at zero and the only space remaining will be 3141 * in the current reservation field. 3142 */ 3143 STATIC void 3144 xlog_ungrant_log_space( 3145 struct xlog *log, 3146 struct xlog_ticket *ticket) 3147 { 3148 int bytes; 3149 3150 if (ticket->t_cnt > 0) 3151 ticket->t_cnt--; 3152 3153 trace_xfs_log_ungrant_enter(log, ticket); 3154 trace_xfs_log_ungrant_sub(log, ticket); 3155 3156 /* 3157 * If this is a permanent reservation ticket, we may be able to free 3158 * up more space based on the remaining count. 3159 */ 3160 bytes = ticket->t_curr_res; 3161 if (ticket->t_cnt > 0) { 3162 ASSERT(ticket->t_flags & XLOG_TIC_PERM_RESERV); 3163 bytes += ticket->t_unit_res*ticket->t_cnt; 3164 } 3165 3166 xlog_grant_sub_space(log, &log->l_reserve_head.grant, bytes); 3167 xlog_grant_sub_space(log, &log->l_write_head.grant, bytes); 3168 3169 trace_xfs_log_ungrant_exit(log, ticket); 3170 3171 xfs_log_space_wake(log->l_mp); 3172 } 3173 3174 /* 3175 * Flush iclog to disk if this is the last reference to the given iclog and 3176 * the WANT_SYNC bit is set. 3177 * 3178 * When this function is entered, the iclog is not necessarily in the 3179 * WANT_SYNC state. It may be sitting around waiting to get filled. 3180 * 3181 * 3182 */ 3183 STATIC int 3184 xlog_state_release_iclog( 3185 struct xlog *log, 3186 struct xlog_in_core *iclog) 3187 { 3188 int sync = 0; /* do we sync? */ 3189 3190 if (iclog->ic_state & XLOG_STATE_IOERROR) 3191 return -EIO; 3192 3193 ASSERT(atomic_read(&iclog->ic_refcnt) > 0); 3194 if (!atomic_dec_and_lock(&iclog->ic_refcnt, &log->l_icloglock)) 3195 return 0; 3196 3197 if (iclog->ic_state & XLOG_STATE_IOERROR) { 3198 spin_unlock(&log->l_icloglock); 3199 return -EIO; 3200 } 3201 ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE || 3202 iclog->ic_state == XLOG_STATE_WANT_SYNC); 3203 3204 if (iclog->ic_state == XLOG_STATE_WANT_SYNC) { 3205 /* update tail before writing to iclog */ 3206 xfs_lsn_t tail_lsn = xlog_assign_tail_lsn(log->l_mp); 3207 sync++; 3208 iclog->ic_state = XLOG_STATE_SYNCING; 3209 iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn); 3210 xlog_verify_tail_lsn(log, iclog, tail_lsn); 3211 /* cycle incremented when incrementing curr_block */ 3212 } 3213 spin_unlock(&log->l_icloglock); 3214 3215 /* 3216 * We let the log lock go, so it's possible that we hit a log I/O 3217 * error or some other SHUTDOWN condition that marks the iclog 3218 * as XLOG_STATE_IOERROR before the bwrite. However, we know that 3219 * this iclog has consistent data, so we ignore IOERROR 3220 * flags after this point. 3221 */ 3222 if (sync) 3223 return xlog_sync(log, iclog); 3224 return 0; 3225 } /* xlog_state_release_iclog */ 3226 3227 3228 /* 3229 * This routine will mark the current iclog in the ring as WANT_SYNC 3230 * and move the current iclog pointer to the next iclog in the ring. 3231 * When this routine is called from xlog_state_get_iclog_space(), the 3232 * exact size of the iclog has not yet been determined. All we know is 3233 * that every data block. We have run out of space in this log record. 3234 */ 3235 STATIC void 3236 xlog_state_switch_iclogs( 3237 struct xlog *log, 3238 struct xlog_in_core *iclog, 3239 int eventual_size) 3240 { 3241 ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE); 3242 if (!eventual_size) 3243 eventual_size = iclog->ic_offset; 3244 iclog->ic_state = XLOG_STATE_WANT_SYNC; 3245 iclog->ic_header.h_prev_block = cpu_to_be32(log->l_prev_block); 3246 log->l_prev_block = log->l_curr_block; 3247 log->l_prev_cycle = log->l_curr_cycle; 3248 3249 /* roll log?: ic_offset changed later */ 3250 log->l_curr_block += BTOBB(eventual_size)+BTOBB(log->l_iclog_hsize); 3251 3252 /* Round up to next log-sunit */ 3253 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb) && 3254 log->l_mp->m_sb.sb_logsunit > 1) { 3255 uint32_t sunit_bb = BTOBB(log->l_mp->m_sb.sb_logsunit); 3256 log->l_curr_block = roundup(log->l_curr_block, sunit_bb); 3257 } 3258 3259 if (log->l_curr_block >= log->l_logBBsize) { 3260 /* 3261 * Rewind the current block before the cycle is bumped to make 3262 * sure that the combined LSN never transiently moves forward 3263 * when the log wraps to the next cycle. This is to support the 3264 * unlocked sample of these fields from xlog_valid_lsn(). Most 3265 * other cases should acquire l_icloglock. 3266 */ 3267 log->l_curr_block -= log->l_logBBsize; 3268 ASSERT(log->l_curr_block >= 0); 3269 smp_wmb(); 3270 log->l_curr_cycle++; 3271 if (log->l_curr_cycle == XLOG_HEADER_MAGIC_NUM) 3272 log->l_curr_cycle++; 3273 } 3274 ASSERT(iclog == log->l_iclog); 3275 log->l_iclog = iclog->ic_next; 3276 } /* xlog_state_switch_iclogs */ 3277 3278 /* 3279 * Write out all data in the in-core log as of this exact moment in time. 3280 * 3281 * Data may be written to the in-core log during this call. However, 3282 * we don't guarantee this data will be written out. A change from past 3283 * implementation means this routine will *not* write out zero length LRs. 3284 * 3285 * Basically, we try and perform an intelligent scan of the in-core logs. 3286 * If we determine there is no flushable data, we just return. There is no 3287 * flushable data if: 3288 * 3289 * 1. the current iclog is active and has no data; the previous iclog 3290 * is in the active or dirty state. 3291 * 2. the current iclog is drity, and the previous iclog is in the 3292 * active or dirty state. 3293 * 3294 * We may sleep if: 3295 * 3296 * 1. the current iclog is not in the active nor dirty state. 3297 * 2. the current iclog dirty, and the previous iclog is not in the 3298 * active nor dirty state. 3299 * 3. the current iclog is active, and there is another thread writing 3300 * to this particular iclog. 3301 * 4. a) the current iclog is active and has no other writers 3302 * b) when we return from flushing out this iclog, it is still 3303 * not in the active nor dirty state. 3304 */ 3305 int 3306 xfs_log_force( 3307 struct xfs_mount *mp, 3308 uint flags) 3309 { 3310 struct xlog *log = mp->m_log; 3311 struct xlog_in_core *iclog; 3312 xfs_lsn_t lsn; 3313 3314 XFS_STATS_INC(mp, xs_log_force); 3315 trace_xfs_log_force(mp, 0, _RET_IP_); 3316 3317 xlog_cil_force(log); 3318 3319 spin_lock(&log->l_icloglock); 3320 iclog = log->l_iclog; 3321 if (iclog->ic_state & XLOG_STATE_IOERROR) 3322 goto out_error; 3323 3324 if (iclog->ic_state == XLOG_STATE_DIRTY || 3325 (iclog->ic_state == XLOG_STATE_ACTIVE && 3326 atomic_read(&iclog->ic_refcnt) == 0 && iclog->ic_offset == 0)) { 3327 /* 3328 * If the head is dirty or (active and empty), then we need to 3329 * look at the previous iclog. 3330 * 3331 * If the previous iclog is active or dirty we are done. There 3332 * is nothing to sync out. Otherwise, we attach ourselves to the 3333 * previous iclog and go to sleep. 3334 */ 3335 iclog = iclog->ic_prev; 3336 if (iclog->ic_state == XLOG_STATE_ACTIVE || 3337 iclog->ic_state == XLOG_STATE_DIRTY) 3338 goto out_unlock; 3339 } else if (iclog->ic_state == XLOG_STATE_ACTIVE) { 3340 if (atomic_read(&iclog->ic_refcnt) == 0) { 3341 /* 3342 * We are the only one with access to this iclog. 3343 * 3344 * Flush it out now. There should be a roundoff of zero 3345 * to show that someone has already taken care of the 3346 * roundoff from the previous sync. 3347 */ 3348 atomic_inc(&iclog->ic_refcnt); 3349 lsn = be64_to_cpu(iclog->ic_header.h_lsn); 3350 xlog_state_switch_iclogs(log, iclog, 0); 3351 spin_unlock(&log->l_icloglock); 3352 3353 if (xlog_state_release_iclog(log, iclog)) 3354 return -EIO; 3355 3356 spin_lock(&log->l_icloglock); 3357 if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn || 3358 iclog->ic_state == XLOG_STATE_DIRTY) 3359 goto out_unlock; 3360 } else { 3361 /* 3362 * Someone else is writing to this iclog. 3363 * 3364 * Use its call to flush out the data. However, the 3365 * other thread may not force out this LR, so we mark 3366 * it WANT_SYNC. 3367 */ 3368 xlog_state_switch_iclogs(log, iclog, 0); 3369 } 3370 } else { 3371 /* 3372 * If the head iclog is not active nor dirty, we just attach 3373 * ourselves to the head and go to sleep if necessary. 3374 */ 3375 ; 3376 } 3377 3378 if (!(flags & XFS_LOG_SYNC)) 3379 goto out_unlock; 3380 3381 if (iclog->ic_state & XLOG_STATE_IOERROR) 3382 goto out_error; 3383 XFS_STATS_INC(mp, xs_log_force_sleep); 3384 xlog_wait(&iclog->ic_force_wait, &log->l_icloglock); 3385 if (iclog->ic_state & XLOG_STATE_IOERROR) 3386 return -EIO; 3387 return 0; 3388 3389 out_unlock: 3390 spin_unlock(&log->l_icloglock); 3391 return 0; 3392 out_error: 3393 spin_unlock(&log->l_icloglock); 3394 return -EIO; 3395 } 3396 3397 static int 3398 __xfs_log_force_lsn( 3399 struct xfs_mount *mp, 3400 xfs_lsn_t lsn, 3401 uint flags, 3402 int *log_flushed, 3403 bool already_slept) 3404 { 3405 struct xlog *log = mp->m_log; 3406 struct xlog_in_core *iclog; 3407 3408 spin_lock(&log->l_icloglock); 3409 iclog = log->l_iclog; 3410 if (iclog->ic_state & XLOG_STATE_IOERROR) 3411 goto out_error; 3412 3413 while (be64_to_cpu(iclog->ic_header.h_lsn) != lsn) { 3414 iclog = iclog->ic_next; 3415 if (iclog == log->l_iclog) 3416 goto out_unlock; 3417 } 3418 3419 if (iclog->ic_state == XLOG_STATE_DIRTY) 3420 goto out_unlock; 3421 3422 if (iclog->ic_state == XLOG_STATE_ACTIVE) { 3423 /* 3424 * We sleep here if we haven't already slept (e.g. this is the 3425 * first time we've looked at the correct iclog buf) and the 3426 * buffer before us is going to be sync'ed. The reason for this 3427 * is that if we are doing sync transactions here, by waiting 3428 * for the previous I/O to complete, we can allow a few more 3429 * transactions into this iclog before we close it down. 3430 * 3431 * Otherwise, we mark the buffer WANT_SYNC, and bump up the 3432 * refcnt so we can release the log (which drops the ref count). 3433 * The state switch keeps new transaction commits from using 3434 * this buffer. When the current commits finish writing into 3435 * the buffer, the refcount will drop to zero and the buffer 3436 * will go out then. 3437 */ 3438 if (!already_slept && 3439 (iclog->ic_prev->ic_state & 3440 (XLOG_STATE_WANT_SYNC | XLOG_STATE_SYNCING))) { 3441 ASSERT(!(iclog->ic_state & XLOG_STATE_IOERROR)); 3442 3443 XFS_STATS_INC(mp, xs_log_force_sleep); 3444 3445 xlog_wait(&iclog->ic_prev->ic_write_wait, 3446 &log->l_icloglock); 3447 return -EAGAIN; 3448 } 3449 atomic_inc(&iclog->ic_refcnt); 3450 xlog_state_switch_iclogs(log, iclog, 0); 3451 spin_unlock(&log->l_icloglock); 3452 if (xlog_state_release_iclog(log, iclog)) 3453 return -EIO; 3454 if (log_flushed) 3455 *log_flushed = 1; 3456 spin_lock(&log->l_icloglock); 3457 } 3458 3459 if (!(flags & XFS_LOG_SYNC) || 3460 (iclog->ic_state & (XLOG_STATE_ACTIVE | XLOG_STATE_DIRTY))) 3461 goto out_unlock; 3462 3463 if (iclog->ic_state & XLOG_STATE_IOERROR) 3464 goto out_error; 3465 3466 XFS_STATS_INC(mp, xs_log_force_sleep); 3467 xlog_wait(&iclog->ic_force_wait, &log->l_icloglock); 3468 if (iclog->ic_state & XLOG_STATE_IOERROR) 3469 return -EIO; 3470 return 0; 3471 3472 out_unlock: 3473 spin_unlock(&log->l_icloglock); 3474 return 0; 3475 out_error: 3476 spin_unlock(&log->l_icloglock); 3477 return -EIO; 3478 } 3479 3480 /* 3481 * Force the in-core log to disk for a specific LSN. 3482 * 3483 * Find in-core log with lsn. 3484 * If it is in the DIRTY state, just return. 3485 * If it is in the ACTIVE state, move the in-core log into the WANT_SYNC 3486 * state and go to sleep or return. 3487 * If it is in any other state, go to sleep or return. 3488 * 3489 * Synchronous forces are implemented with a wait queue. All callers trying 3490 * to force a given lsn to disk must wait on the queue attached to the 3491 * specific in-core log. When given in-core log finally completes its write 3492 * to disk, that thread will wake up all threads waiting on the queue. 3493 */ 3494 int 3495 xfs_log_force_lsn( 3496 struct xfs_mount *mp, 3497 xfs_lsn_t lsn, 3498 uint flags, 3499 int *log_flushed) 3500 { 3501 int ret; 3502 ASSERT(lsn != 0); 3503 3504 XFS_STATS_INC(mp, xs_log_force); 3505 trace_xfs_log_force(mp, lsn, _RET_IP_); 3506 3507 lsn = xlog_cil_force_lsn(mp->m_log, lsn); 3508 if (lsn == NULLCOMMITLSN) 3509 return 0; 3510 3511 ret = __xfs_log_force_lsn(mp, lsn, flags, log_flushed, false); 3512 if (ret == -EAGAIN) 3513 ret = __xfs_log_force_lsn(mp, lsn, flags, log_flushed, true); 3514 return ret; 3515 } 3516 3517 /* 3518 * Called when we want to mark the current iclog as being ready to sync to 3519 * disk. 3520 */ 3521 STATIC void 3522 xlog_state_want_sync( 3523 struct xlog *log, 3524 struct xlog_in_core *iclog) 3525 { 3526 assert_spin_locked(&log->l_icloglock); 3527 3528 if (iclog->ic_state == XLOG_STATE_ACTIVE) { 3529 xlog_state_switch_iclogs(log, iclog, 0); 3530 } else { 3531 ASSERT(iclog->ic_state & 3532 (XLOG_STATE_WANT_SYNC|XLOG_STATE_IOERROR)); 3533 } 3534 } 3535 3536 3537 /***************************************************************************** 3538 * 3539 * TICKET functions 3540 * 3541 ***************************************************************************** 3542 */ 3543 3544 /* 3545 * Free a used ticket when its refcount falls to zero. 3546 */ 3547 void 3548 xfs_log_ticket_put( 3549 xlog_ticket_t *ticket) 3550 { 3551 ASSERT(atomic_read(&ticket->t_ref) > 0); 3552 if (atomic_dec_and_test(&ticket->t_ref)) 3553 kmem_zone_free(xfs_log_ticket_zone, ticket); 3554 } 3555 3556 xlog_ticket_t * 3557 xfs_log_ticket_get( 3558 xlog_ticket_t *ticket) 3559 { 3560 ASSERT(atomic_read(&ticket->t_ref) > 0); 3561 atomic_inc(&ticket->t_ref); 3562 return ticket; 3563 } 3564 3565 /* 3566 * Figure out the total log space unit (in bytes) that would be 3567 * required for a log ticket. 3568 */ 3569 int 3570 xfs_log_calc_unit_res( 3571 struct xfs_mount *mp, 3572 int unit_bytes) 3573 { 3574 struct xlog *log = mp->m_log; 3575 int iclog_space; 3576 uint num_headers; 3577 3578 /* 3579 * Permanent reservations have up to 'cnt'-1 active log operations 3580 * in the log. A unit in this case is the amount of space for one 3581 * of these log operations. Normal reservations have a cnt of 1 3582 * and their unit amount is the total amount of space required. 3583 * 3584 * The following lines of code account for non-transaction data 3585 * which occupy space in the on-disk log. 3586 * 3587 * Normal form of a transaction is: 3588 * <oph><trans-hdr><start-oph><reg1-oph><reg1><reg2-oph>...<commit-oph> 3589 * and then there are LR hdrs, split-recs and roundoff at end of syncs. 3590 * 3591 * We need to account for all the leadup data and trailer data 3592 * around the transaction data. 3593 * And then we need to account for the worst case in terms of using 3594 * more space. 3595 * The worst case will happen if: 3596 * - the placement of the transaction happens to be such that the 3597 * roundoff is at its maximum 3598 * - the transaction data is synced before the commit record is synced 3599 * i.e. <transaction-data><roundoff> | <commit-rec><roundoff> 3600 * Therefore the commit record is in its own Log Record. 3601 * This can happen as the commit record is called with its 3602 * own region to xlog_write(). 3603 * This then means that in the worst case, roundoff can happen for 3604 * the commit-rec as well. 3605 * The commit-rec is smaller than padding in this scenario and so it is 3606 * not added separately. 3607 */ 3608 3609 /* for trans header */ 3610 unit_bytes += sizeof(xlog_op_header_t); 3611 unit_bytes += sizeof(xfs_trans_header_t); 3612 3613 /* for start-rec */ 3614 unit_bytes += sizeof(xlog_op_header_t); 3615 3616 /* 3617 * for LR headers - the space for data in an iclog is the size minus 3618 * the space used for the headers. If we use the iclog size, then we 3619 * undercalculate the number of headers required. 3620 * 3621 * Furthermore - the addition of op headers for split-recs might 3622 * increase the space required enough to require more log and op 3623 * headers, so take that into account too. 3624 * 3625 * IMPORTANT: This reservation makes the assumption that if this 3626 * transaction is the first in an iclog and hence has the LR headers 3627 * accounted to it, then the remaining space in the iclog is 3628 * exclusively for this transaction. i.e. if the transaction is larger 3629 * than the iclog, it will be the only thing in that iclog. 3630 * Fundamentally, this means we must pass the entire log vector to 3631 * xlog_write to guarantee this. 3632 */ 3633 iclog_space = log->l_iclog_size - log->l_iclog_hsize; 3634 num_headers = howmany(unit_bytes, iclog_space); 3635 3636 /* for split-recs - ophdrs added when data split over LRs */ 3637 unit_bytes += sizeof(xlog_op_header_t) * num_headers; 3638 3639 /* add extra header reservations if we overrun */ 3640 while (!num_headers || 3641 howmany(unit_bytes, iclog_space) > num_headers) { 3642 unit_bytes += sizeof(xlog_op_header_t); 3643 num_headers++; 3644 } 3645 unit_bytes += log->l_iclog_hsize * num_headers; 3646 3647 /* for commit-rec LR header - note: padding will subsume the ophdr */ 3648 unit_bytes += log->l_iclog_hsize; 3649 3650 /* for roundoff padding for transaction data and one for commit record */ 3651 if (xfs_sb_version_haslogv2(&mp->m_sb) && mp->m_sb.sb_logsunit > 1) { 3652 /* log su roundoff */ 3653 unit_bytes += 2 * mp->m_sb.sb_logsunit; 3654 } else { 3655 /* BB roundoff */ 3656 unit_bytes += 2 * BBSIZE; 3657 } 3658 3659 return unit_bytes; 3660 } 3661 3662 /* 3663 * Allocate and initialise a new log ticket. 3664 */ 3665 struct xlog_ticket * 3666 xlog_ticket_alloc( 3667 struct xlog *log, 3668 int unit_bytes, 3669 int cnt, 3670 char client, 3671 bool permanent, 3672 xfs_km_flags_t alloc_flags) 3673 { 3674 struct xlog_ticket *tic; 3675 int unit_res; 3676 3677 tic = kmem_zone_zalloc(xfs_log_ticket_zone, alloc_flags); 3678 if (!tic) 3679 return NULL; 3680 3681 unit_res = xfs_log_calc_unit_res(log->l_mp, unit_bytes); 3682 3683 atomic_set(&tic->t_ref, 1); 3684 tic->t_task = current; 3685 INIT_LIST_HEAD(&tic->t_queue); 3686 tic->t_unit_res = unit_res; 3687 tic->t_curr_res = unit_res; 3688 tic->t_cnt = cnt; 3689 tic->t_ocnt = cnt; 3690 tic->t_tid = prandom_u32(); 3691 tic->t_clientid = client; 3692 tic->t_flags = XLOG_TIC_INITED; 3693 if (permanent) 3694 tic->t_flags |= XLOG_TIC_PERM_RESERV; 3695 3696 xlog_tic_reset_res(tic); 3697 3698 return tic; 3699 } 3700 3701 3702 /****************************************************************************** 3703 * 3704 * Log debug routines 3705 * 3706 ****************************************************************************** 3707 */ 3708 #if defined(DEBUG) 3709 /* 3710 * Make sure that the destination ptr is within the valid data region of 3711 * one of the iclogs. This uses backup pointers stored in a different 3712 * part of the log in case we trash the log structure. 3713 */ 3714 STATIC void 3715 xlog_verify_dest_ptr( 3716 struct xlog *log, 3717 void *ptr) 3718 { 3719 int i; 3720 int good_ptr = 0; 3721 3722 for (i = 0; i < log->l_iclog_bufs; i++) { 3723 if (ptr >= log->l_iclog_bak[i] && 3724 ptr <= log->l_iclog_bak[i] + log->l_iclog_size) 3725 good_ptr++; 3726 } 3727 3728 if (!good_ptr) 3729 xfs_emerg(log->l_mp, "%s: invalid ptr", __func__); 3730 } 3731 3732 /* 3733 * Check to make sure the grant write head didn't just over lap the tail. If 3734 * the cycles are the same, we can't be overlapping. Otherwise, make sure that 3735 * the cycles differ by exactly one and check the byte count. 3736 * 3737 * This check is run unlocked, so can give false positives. Rather than assert 3738 * on failures, use a warn-once flag and a panic tag to allow the admin to 3739 * determine if they want to panic the machine when such an error occurs. For 3740 * debug kernels this will have the same effect as using an assert but, unlinke 3741 * an assert, it can be turned off at runtime. 3742 */ 3743 STATIC void 3744 xlog_verify_grant_tail( 3745 struct xlog *log) 3746 { 3747 int tail_cycle, tail_blocks; 3748 int cycle, space; 3749 3750 xlog_crack_grant_head(&log->l_write_head.grant, &cycle, &space); 3751 xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_blocks); 3752 if (tail_cycle != cycle) { 3753 if (cycle - 1 != tail_cycle && 3754 !(log->l_flags & XLOG_TAIL_WARN)) { 3755 xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES, 3756 "%s: cycle - 1 != tail_cycle", __func__); 3757 log->l_flags |= XLOG_TAIL_WARN; 3758 } 3759 3760 if (space > BBTOB(tail_blocks) && 3761 !(log->l_flags & XLOG_TAIL_WARN)) { 3762 xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES, 3763 "%s: space > BBTOB(tail_blocks)", __func__); 3764 log->l_flags |= XLOG_TAIL_WARN; 3765 } 3766 } 3767 } 3768 3769 /* check if it will fit */ 3770 STATIC void 3771 xlog_verify_tail_lsn( 3772 struct xlog *log, 3773 struct xlog_in_core *iclog, 3774 xfs_lsn_t tail_lsn) 3775 { 3776 int blocks; 3777 3778 if (CYCLE_LSN(tail_lsn) == log->l_prev_cycle) { 3779 blocks = 3780 log->l_logBBsize - (log->l_prev_block - BLOCK_LSN(tail_lsn)); 3781 if (blocks < BTOBB(iclog->ic_offset)+BTOBB(log->l_iclog_hsize)) 3782 xfs_emerg(log->l_mp, "%s: ran out of log space", __func__); 3783 } else { 3784 ASSERT(CYCLE_LSN(tail_lsn)+1 == log->l_prev_cycle); 3785 3786 if (BLOCK_LSN(tail_lsn) == log->l_prev_block) 3787 xfs_emerg(log->l_mp, "%s: tail wrapped", __func__); 3788 3789 blocks = BLOCK_LSN(tail_lsn) - log->l_prev_block; 3790 if (blocks < BTOBB(iclog->ic_offset) + 1) 3791 xfs_emerg(log->l_mp, "%s: ran out of log space", __func__); 3792 } 3793 } /* xlog_verify_tail_lsn */ 3794 3795 /* 3796 * Perform a number of checks on the iclog before writing to disk. 3797 * 3798 * 1. Make sure the iclogs are still circular 3799 * 2. Make sure we have a good magic number 3800 * 3. Make sure we don't have magic numbers in the data 3801 * 4. Check fields of each log operation header for: 3802 * A. Valid client identifier 3803 * B. tid ptr value falls in valid ptr space (user space code) 3804 * C. Length in log record header is correct according to the 3805 * individual operation headers within record. 3806 * 5. When a bwrite will occur within 5 blocks of the front of the physical 3807 * log, check the preceding blocks of the physical log to make sure all 3808 * the cycle numbers agree with the current cycle number. 3809 */ 3810 STATIC void 3811 xlog_verify_iclog( 3812 struct xlog *log, 3813 struct xlog_in_core *iclog, 3814 int count, 3815 bool syncing) 3816 { 3817 xlog_op_header_t *ophead; 3818 xlog_in_core_t *icptr; 3819 xlog_in_core_2_t *xhdr; 3820 void *base_ptr, *ptr, *p; 3821 ptrdiff_t field_offset; 3822 uint8_t clientid; 3823 int len, i, j, k, op_len; 3824 int idx; 3825 3826 /* check validity of iclog pointers */ 3827 spin_lock(&log->l_icloglock); 3828 icptr = log->l_iclog; 3829 for (i = 0; i < log->l_iclog_bufs; i++, icptr = icptr->ic_next) 3830 ASSERT(icptr); 3831 3832 if (icptr != log->l_iclog) 3833 xfs_emerg(log->l_mp, "%s: corrupt iclog ring", __func__); 3834 spin_unlock(&log->l_icloglock); 3835 3836 /* check log magic numbers */ 3837 if (iclog->ic_header.h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) 3838 xfs_emerg(log->l_mp, "%s: invalid magic num", __func__); 3839 3840 base_ptr = ptr = &iclog->ic_header; 3841 p = &iclog->ic_header; 3842 for (ptr += BBSIZE; ptr < base_ptr + count; ptr += BBSIZE) { 3843 if (*(__be32 *)ptr == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) 3844 xfs_emerg(log->l_mp, "%s: unexpected magic num", 3845 __func__); 3846 } 3847 3848 /* check fields */ 3849 len = be32_to_cpu(iclog->ic_header.h_num_logops); 3850 base_ptr = ptr = iclog->ic_datap; 3851 ophead = ptr; 3852 xhdr = iclog->ic_data; 3853 for (i = 0; i < len; i++) { 3854 ophead = ptr; 3855 3856 /* clientid is only 1 byte */ 3857 p = &ophead->oh_clientid; 3858 field_offset = p - base_ptr; 3859 if (!syncing || (field_offset & 0x1ff)) { 3860 clientid = ophead->oh_clientid; 3861 } else { 3862 idx = BTOBBT((char *)&ophead->oh_clientid - iclog->ic_datap); 3863 if (idx >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) { 3864 j = idx / (XLOG_HEADER_CYCLE_SIZE / BBSIZE); 3865 k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE); 3866 clientid = xlog_get_client_id( 3867 xhdr[j].hic_xheader.xh_cycle_data[k]); 3868 } else { 3869 clientid = xlog_get_client_id( 3870 iclog->ic_header.h_cycle_data[idx]); 3871 } 3872 } 3873 if (clientid != XFS_TRANSACTION && clientid != XFS_LOG) 3874 xfs_warn(log->l_mp, 3875 "%s: invalid clientid %d op "PTR_FMT" offset 0x%lx", 3876 __func__, clientid, ophead, 3877 (unsigned long)field_offset); 3878 3879 /* check length */ 3880 p = &ophead->oh_len; 3881 field_offset = p - base_ptr; 3882 if (!syncing || (field_offset & 0x1ff)) { 3883 op_len = be32_to_cpu(ophead->oh_len); 3884 } else { 3885 idx = BTOBBT((uintptr_t)&ophead->oh_len - 3886 (uintptr_t)iclog->ic_datap); 3887 if (idx >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) { 3888 j = idx / (XLOG_HEADER_CYCLE_SIZE / BBSIZE); 3889 k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE); 3890 op_len = be32_to_cpu(xhdr[j].hic_xheader.xh_cycle_data[k]); 3891 } else { 3892 op_len = be32_to_cpu(iclog->ic_header.h_cycle_data[idx]); 3893 } 3894 } 3895 ptr += sizeof(xlog_op_header_t) + op_len; 3896 } 3897 } /* xlog_verify_iclog */ 3898 #endif 3899 3900 /* 3901 * Mark all iclogs IOERROR. l_icloglock is held by the caller. 3902 */ 3903 STATIC int 3904 xlog_state_ioerror( 3905 struct xlog *log) 3906 { 3907 xlog_in_core_t *iclog, *ic; 3908 3909 iclog = log->l_iclog; 3910 if (! (iclog->ic_state & XLOG_STATE_IOERROR)) { 3911 /* 3912 * Mark all the incore logs IOERROR. 3913 * From now on, no log flushes will result. 3914 */ 3915 ic = iclog; 3916 do { 3917 ic->ic_state = XLOG_STATE_IOERROR; 3918 ic = ic->ic_next; 3919 } while (ic != iclog); 3920 return 0; 3921 } 3922 /* 3923 * Return non-zero, if state transition has already happened. 3924 */ 3925 return 1; 3926 } 3927 3928 /* 3929 * This is called from xfs_force_shutdown, when we're forcibly 3930 * shutting down the filesystem, typically because of an IO error. 3931 * Our main objectives here are to make sure that: 3932 * a. if !logerror, flush the logs to disk. Anything modified 3933 * after this is ignored. 3934 * b. the filesystem gets marked 'SHUTDOWN' for all interested 3935 * parties to find out, 'atomically'. 3936 * c. those who're sleeping on log reservations, pinned objects and 3937 * other resources get woken up, and be told the bad news. 3938 * d. nothing new gets queued up after (b) and (c) are done. 3939 * 3940 * Note: for the !logerror case we need to flush the regions held in memory out 3941 * to disk first. This needs to be done before the log is marked as shutdown, 3942 * otherwise the iclog writes will fail. 3943 */ 3944 int 3945 xfs_log_force_umount( 3946 struct xfs_mount *mp, 3947 int logerror) 3948 { 3949 struct xlog *log; 3950 int retval; 3951 3952 log = mp->m_log; 3953 3954 /* 3955 * If this happens during log recovery, don't worry about 3956 * locking; the log isn't open for business yet. 3957 */ 3958 if (!log || 3959 log->l_flags & XLOG_ACTIVE_RECOVERY) { 3960 mp->m_flags |= XFS_MOUNT_FS_SHUTDOWN; 3961 if (mp->m_sb_bp) 3962 mp->m_sb_bp->b_flags |= XBF_DONE; 3963 return 0; 3964 } 3965 3966 /* 3967 * Somebody could've already done the hard work for us. 3968 * No need to get locks for this. 3969 */ 3970 if (logerror && log->l_iclog->ic_state & XLOG_STATE_IOERROR) { 3971 ASSERT(XLOG_FORCED_SHUTDOWN(log)); 3972 return 1; 3973 } 3974 3975 /* 3976 * Flush all the completed transactions to disk before marking the log 3977 * being shut down. We need to do it in this order to ensure that 3978 * completed operations are safely on disk before we shut down, and that 3979 * we don't have to issue any buffer IO after the shutdown flags are set 3980 * to guarantee this. 3981 */ 3982 if (!logerror) 3983 xfs_log_force(mp, XFS_LOG_SYNC); 3984 3985 /* 3986 * mark the filesystem and the as in a shutdown state and wake 3987 * everybody up to tell them the bad news. 3988 */ 3989 spin_lock(&log->l_icloglock); 3990 mp->m_flags |= XFS_MOUNT_FS_SHUTDOWN; 3991 if (mp->m_sb_bp) 3992 mp->m_sb_bp->b_flags |= XBF_DONE; 3993 3994 /* 3995 * Mark the log and the iclogs with IO error flags to prevent any 3996 * further log IO from being issued or completed. 3997 */ 3998 log->l_flags |= XLOG_IO_ERROR; 3999 retval = xlog_state_ioerror(log); 4000 spin_unlock(&log->l_icloglock); 4001 4002 /* 4003 * We don't want anybody waiting for log reservations after this. That 4004 * means we have to wake up everybody queued up on reserveq as well as 4005 * writeq. In addition, we make sure in xlog_{re}grant_log_space that 4006 * we don't enqueue anything once the SHUTDOWN flag is set, and this 4007 * action is protected by the grant locks. 4008 */ 4009 xlog_grant_head_wake_all(&log->l_reserve_head); 4010 xlog_grant_head_wake_all(&log->l_write_head); 4011 4012 /* 4013 * Wake up everybody waiting on xfs_log_force. Wake the CIL push first 4014 * as if the log writes were completed. The abort handling in the log 4015 * item committed callback functions will do this again under lock to 4016 * avoid races. 4017 */ 4018 wake_up_all(&log->l_cilp->xc_commit_wait); 4019 xlog_state_do_callback(log, XFS_LI_ABORTED, NULL); 4020 4021 #ifdef XFSERRORDEBUG 4022 { 4023 xlog_in_core_t *iclog; 4024 4025 spin_lock(&log->l_icloglock); 4026 iclog = log->l_iclog; 4027 do { 4028 ASSERT(iclog->ic_callback == 0); 4029 iclog = iclog->ic_next; 4030 } while (iclog != log->l_iclog); 4031 spin_unlock(&log->l_icloglock); 4032 } 4033 #endif 4034 /* return non-zero if log IOERROR transition had already happened */ 4035 return retval; 4036 } 4037 4038 STATIC int 4039 xlog_iclogs_empty( 4040 struct xlog *log) 4041 { 4042 xlog_in_core_t *iclog; 4043 4044 iclog = log->l_iclog; 4045 do { 4046 /* endianness does not matter here, zero is zero in 4047 * any language. 4048 */ 4049 if (iclog->ic_header.h_num_logops) 4050 return 0; 4051 iclog = iclog->ic_next; 4052 } while (iclog != log->l_iclog); 4053 return 1; 4054 } 4055 4056 /* 4057 * Verify that an LSN stamped into a piece of metadata is valid. This is 4058 * intended for use in read verifiers on v5 superblocks. 4059 */ 4060 bool 4061 xfs_log_check_lsn( 4062 struct xfs_mount *mp, 4063 xfs_lsn_t lsn) 4064 { 4065 struct xlog *log = mp->m_log; 4066 bool valid; 4067 4068 /* 4069 * norecovery mode skips mount-time log processing and unconditionally 4070 * resets the in-core LSN. We can't validate in this mode, but 4071 * modifications are not allowed anyways so just return true. 4072 */ 4073 if (mp->m_flags & XFS_MOUNT_NORECOVERY) 4074 return true; 4075 4076 /* 4077 * Some metadata LSNs are initialized to NULL (e.g., the agfl). This is 4078 * handled by recovery and thus safe to ignore here. 4079 */ 4080 if (lsn == NULLCOMMITLSN) 4081 return true; 4082 4083 valid = xlog_valid_lsn(mp->m_log, lsn); 4084 4085 /* warn the user about what's gone wrong before verifier failure */ 4086 if (!valid) { 4087 spin_lock(&log->l_icloglock); 4088 xfs_warn(mp, 4089 "Corruption warning: Metadata has LSN (%d:%d) ahead of current LSN (%d:%d). " 4090 "Please unmount and run xfs_repair (>= v4.3) to resolve.", 4091 CYCLE_LSN(lsn), BLOCK_LSN(lsn), 4092 log->l_curr_cycle, log->l_curr_block); 4093 spin_unlock(&log->l_icloglock); 4094 } 4095 4096 return valid; 4097 } 4098