1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * linux/fs/jbd2/checkpoint.c 4 * 5 * Written by Stephen C. Tweedie <sct@redhat.com>, 1999 6 * 7 * Copyright 1999 Red Hat Software --- All Rights Reserved 8 * 9 * Checkpoint routines for the generic filesystem journaling code. 10 * Part of the ext2fs journaling system. 11 * 12 * Checkpointing is the process of ensuring that a section of the log is 13 * committed fully to disk, so that that portion of the log can be 14 * reused. 15 */ 16 17 #include <linux/time.h> 18 #include <linux/fs.h> 19 #include <linux/jbd2.h> 20 #include <linux/errno.h> 21 #include <linux/slab.h> 22 #include <linux/blkdev.h> 23 #include <trace/events/jbd2.h> 24 25 /* 26 * Unlink a buffer from a transaction checkpoint list. 27 * 28 * Called with j_list_lock held. 29 */ 30 static inline void __buffer_unlink(struct journal_head *jh) 31 { 32 transaction_t *transaction = jh->b_cp_transaction; 33 34 jh->b_cpnext->b_cpprev = jh->b_cpprev; 35 jh->b_cpprev->b_cpnext = jh->b_cpnext; 36 if (transaction->t_checkpoint_list == jh) { 37 transaction->t_checkpoint_list = jh->b_cpnext; 38 if (transaction->t_checkpoint_list == jh) 39 transaction->t_checkpoint_list = NULL; 40 } 41 } 42 43 /* 44 * __jbd2_log_wait_for_space: wait until there is space in the journal. 45 * 46 * Called under j-state_lock *only*. It will be unlocked if we have to wait 47 * for a checkpoint to free up some space in the log. 48 */ 49 void __jbd2_log_wait_for_space(journal_t *journal) 50 __acquires(&journal->j_state_lock) 51 __releases(&journal->j_state_lock) 52 { 53 int nblocks, space_left; 54 /* assert_spin_locked(&journal->j_state_lock); */ 55 56 nblocks = journal->j_max_transaction_buffers; 57 while (jbd2_log_space_left(journal) < nblocks) { 58 write_unlock(&journal->j_state_lock); 59 mutex_lock_io(&journal->j_checkpoint_mutex); 60 61 /* 62 * Test again, another process may have checkpointed while we 63 * were waiting for the checkpoint lock. If there are no 64 * transactions ready to be checkpointed, try to recover 65 * journal space by calling cleanup_journal_tail(), and if 66 * that doesn't work, by waiting for the currently committing 67 * transaction to complete. If there is absolutely no way 68 * to make progress, this is either a BUG or corrupted 69 * filesystem, so abort the journal and leave a stack 70 * trace for forensic evidence. 71 */ 72 write_lock(&journal->j_state_lock); 73 if (journal->j_flags & JBD2_ABORT) { 74 mutex_unlock(&journal->j_checkpoint_mutex); 75 return; 76 } 77 spin_lock(&journal->j_list_lock); 78 space_left = jbd2_log_space_left(journal); 79 if (space_left < nblocks) { 80 int chkpt = journal->j_checkpoint_transactions != NULL; 81 tid_t tid = 0; 82 bool has_transaction = false; 83 84 if (journal->j_committing_transaction) { 85 tid = journal->j_committing_transaction->t_tid; 86 has_transaction = true; 87 } 88 spin_unlock(&journal->j_list_lock); 89 write_unlock(&journal->j_state_lock); 90 if (chkpt) { 91 jbd2_log_do_checkpoint(journal); 92 } else if (jbd2_cleanup_journal_tail(journal) <= 0) { 93 /* 94 * We were able to recover space or the 95 * journal was aborted due to an error. 96 */ 97 ; 98 } else if (has_transaction) { 99 /* 100 * jbd2_journal_commit_transaction() may want 101 * to take the checkpoint_mutex if JBD2_FLUSHED 102 * is set. So we need to temporarily drop it. 103 */ 104 mutex_unlock(&journal->j_checkpoint_mutex); 105 jbd2_log_wait_commit(journal, tid); 106 write_lock(&journal->j_state_lock); 107 continue; 108 } else { 109 printk(KERN_ERR "%s: needed %d blocks and " 110 "only had %d space available\n", 111 __func__, nblocks, space_left); 112 printk(KERN_ERR "%s: no way to get more " 113 "journal space in %s\n", __func__, 114 journal->j_devname); 115 WARN_ON(1); 116 jbd2_journal_abort(journal, -EIO); 117 } 118 write_lock(&journal->j_state_lock); 119 } else { 120 spin_unlock(&journal->j_list_lock); 121 } 122 mutex_unlock(&journal->j_checkpoint_mutex); 123 } 124 } 125 126 static void 127 __flush_batch(journal_t *journal, int *batch_count) 128 { 129 int i; 130 struct blk_plug plug; 131 132 blk_start_plug(&plug); 133 for (i = 0; i < *batch_count; i++) 134 write_dirty_buffer(journal->j_chkpt_bhs[i], REQ_SYNC); 135 blk_finish_plug(&plug); 136 137 for (i = 0; i < *batch_count; i++) { 138 struct buffer_head *bh = journal->j_chkpt_bhs[i]; 139 BUFFER_TRACE(bh, "brelse"); 140 __brelse(bh); 141 journal->j_chkpt_bhs[i] = NULL; 142 } 143 *batch_count = 0; 144 } 145 146 /* 147 * Perform an actual checkpoint. We take the first transaction on the 148 * list of transactions to be checkpointed and send all its buffers 149 * to disk. We submit larger chunks of data at once. 150 * 151 * The journal should be locked before calling this function. 152 * Called with j_checkpoint_mutex held. 153 */ 154 int jbd2_log_do_checkpoint(journal_t *journal) 155 { 156 struct journal_head *jh; 157 struct buffer_head *bh; 158 transaction_t *transaction; 159 tid_t this_tid; 160 int result, batch_count = 0; 161 162 jbd2_debug(1, "Start checkpoint\n"); 163 164 /* 165 * First thing: if there are any transactions in the log which 166 * don't need checkpointing, just eliminate them from the 167 * journal straight away. 168 */ 169 result = jbd2_cleanup_journal_tail(journal); 170 trace_jbd2_checkpoint(journal, result); 171 jbd2_debug(1, "cleanup_journal_tail returned %d\n", result); 172 if (result <= 0) 173 return result; 174 175 /* 176 * OK, we need to start writing disk blocks. Take one transaction 177 * and write it. 178 */ 179 spin_lock(&journal->j_list_lock); 180 if (!journal->j_checkpoint_transactions) 181 goto out; 182 transaction = journal->j_checkpoint_transactions; 183 if (transaction->t_chp_stats.cs_chp_time == 0) 184 transaction->t_chp_stats.cs_chp_time = jiffies; 185 this_tid = transaction->t_tid; 186 restart: 187 /* 188 * If someone cleaned up this transaction while we slept, we're 189 * done (maybe it's a new transaction, but it fell at the same 190 * address). 191 */ 192 if (journal->j_checkpoint_transactions != transaction || 193 transaction->t_tid != this_tid) 194 goto out; 195 196 /* checkpoint all of the transaction's buffers */ 197 while (transaction->t_checkpoint_list) { 198 jh = transaction->t_checkpoint_list; 199 bh = jh2bh(jh); 200 201 if (jh->b_transaction != NULL) { 202 transaction_t *t = jh->b_transaction; 203 tid_t tid = t->t_tid; 204 205 transaction->t_chp_stats.cs_forced_to_close++; 206 spin_unlock(&journal->j_list_lock); 207 if (unlikely(journal->j_flags & JBD2_UNMOUNT)) 208 /* 209 * The journal thread is dead; so 210 * starting and waiting for a commit 211 * to finish will cause us to wait for 212 * a _very_ long time. 213 */ 214 printk(KERN_ERR 215 "JBD2: %s: Waiting for Godot: block %llu\n", 216 journal->j_devname, (unsigned long long) bh->b_blocknr); 217 218 if (batch_count) 219 __flush_batch(journal, &batch_count); 220 jbd2_log_start_commit(journal, tid); 221 /* 222 * jbd2_journal_commit_transaction() may want 223 * to take the checkpoint_mutex if JBD2_FLUSHED 224 * is set, jbd2_update_log_tail() called by 225 * jbd2_journal_commit_transaction() may also take 226 * checkpoint_mutex. So we need to temporarily 227 * drop it. 228 */ 229 mutex_unlock(&journal->j_checkpoint_mutex); 230 jbd2_log_wait_commit(journal, tid); 231 mutex_lock_io(&journal->j_checkpoint_mutex); 232 spin_lock(&journal->j_list_lock); 233 goto restart; 234 } 235 if (!trylock_buffer(bh)) { 236 /* 237 * The buffer is locked, it may be writing back, or 238 * flushing out in the last couple of cycles, or 239 * re-adding into a new transaction, need to check 240 * it again until it's unlocked. 241 */ 242 get_bh(bh); 243 spin_unlock(&journal->j_list_lock); 244 wait_on_buffer(bh); 245 /* the journal_head may have gone by now */ 246 BUFFER_TRACE(bh, "brelse"); 247 __brelse(bh); 248 goto retry; 249 } else if (!buffer_dirty(bh)) { 250 unlock_buffer(bh); 251 BUFFER_TRACE(bh, "remove from checkpoint"); 252 /* 253 * If the transaction was released or the checkpoint 254 * list was empty, we're done. 255 */ 256 if (__jbd2_journal_remove_checkpoint(jh) || 257 !transaction->t_checkpoint_list) 258 goto out; 259 } else { 260 unlock_buffer(bh); 261 /* 262 * We are about to write the buffer, it could be 263 * raced by some other transaction shrink or buffer 264 * re-log logic once we release the j_list_lock, 265 * leave it on the checkpoint list and check status 266 * again to make sure it's clean. 267 */ 268 BUFFER_TRACE(bh, "queue"); 269 get_bh(bh); 270 J_ASSERT_BH(bh, !buffer_jwrite(bh)); 271 journal->j_chkpt_bhs[batch_count++] = bh; 272 transaction->t_chp_stats.cs_written++; 273 transaction->t_checkpoint_list = jh->b_cpnext; 274 } 275 276 if ((batch_count == JBD2_NR_BATCH) || 277 need_resched() || spin_needbreak(&journal->j_list_lock) || 278 jh2bh(transaction->t_checkpoint_list) == journal->j_chkpt_bhs[0]) 279 goto unlock_and_flush; 280 } 281 282 if (batch_count) { 283 unlock_and_flush: 284 spin_unlock(&journal->j_list_lock); 285 retry: 286 if (batch_count) 287 __flush_batch(journal, &batch_count); 288 spin_lock(&journal->j_list_lock); 289 goto restart; 290 } 291 292 out: 293 spin_unlock(&journal->j_list_lock); 294 result = jbd2_cleanup_journal_tail(journal); 295 296 return (result < 0) ? result : 0; 297 } 298 299 /* 300 * Check the list of checkpoint transactions for the journal to see if 301 * we have already got rid of any since the last update of the log tail 302 * in the journal superblock. If so, we can instantly roll the 303 * superblock forward to remove those transactions from the log. 304 * 305 * Return <0 on error, 0 on success, 1 if there was nothing to clean up. 306 * 307 * Called with the journal lock held. 308 * 309 * This is the only part of the journaling code which really needs to be 310 * aware of transaction aborts. Checkpointing involves writing to the 311 * main filesystem area rather than to the journal, so it can proceed 312 * even in abort state, but we must not update the super block if 313 * checkpointing may have failed. Otherwise, we would lose some metadata 314 * buffers which should be written-back to the filesystem. 315 */ 316 317 int jbd2_cleanup_journal_tail(journal_t *journal) 318 { 319 tid_t first_tid; 320 unsigned long blocknr; 321 322 if (is_journal_aborted(journal)) 323 return -EIO; 324 325 if (!jbd2_journal_get_log_tail(journal, &first_tid, &blocknr)) 326 return 1; 327 J_ASSERT(blocknr != 0); 328 329 /* 330 * We need to make sure that any blocks that were recently written out 331 * --- perhaps by jbd2_log_do_checkpoint() --- are flushed out before 332 * we drop the transactions from the journal. It's unlikely this will 333 * be necessary, especially with an appropriately sized journal, but we 334 * need this to guarantee correctness. Fortunately 335 * jbd2_cleanup_journal_tail() doesn't get called all that often. 336 */ 337 if (journal->j_flags & JBD2_BARRIER) 338 blkdev_issue_flush(journal->j_fs_dev); 339 340 return __jbd2_update_log_tail(journal, first_tid, blocknr); 341 } 342 343 344 /* Checkpoint list management */ 345 346 enum shrink_type {SHRINK_DESTROY, SHRINK_BUSY_STOP, SHRINK_BUSY_SKIP}; 347 348 /* 349 * journal_shrink_one_cp_list 350 * 351 * Find all the written-back checkpoint buffers in the given list 352 * and try to release them. If the whole transaction is released, set 353 * the 'released' parameter. Return the number of released checkpointed 354 * buffers. 355 * 356 * Called with j_list_lock held. 357 */ 358 static unsigned long journal_shrink_one_cp_list(struct journal_head *jh, 359 enum shrink_type type, 360 bool *released) 361 { 362 struct journal_head *last_jh; 363 struct journal_head *next_jh = jh; 364 unsigned long nr_freed = 0; 365 int ret; 366 367 *released = false; 368 if (!jh) 369 return 0; 370 371 last_jh = jh->b_cpprev; 372 do { 373 jh = next_jh; 374 next_jh = jh->b_cpnext; 375 376 if (type == SHRINK_DESTROY) { 377 ret = __jbd2_journal_remove_checkpoint(jh); 378 } else { 379 ret = jbd2_journal_try_remove_checkpoint(jh); 380 if (ret < 0) { 381 if (type == SHRINK_BUSY_SKIP) 382 continue; 383 break; 384 } 385 } 386 387 nr_freed++; 388 if (ret) { 389 *released = true; 390 break; 391 } 392 393 if (need_resched()) 394 break; 395 } while (jh != last_jh); 396 397 return nr_freed; 398 } 399 400 /* 401 * jbd2_journal_shrink_checkpoint_list 402 * 403 * Find 'nr_to_scan' written-back checkpoint buffers in the journal 404 * and try to release them. Return the number of released checkpointed 405 * buffers. 406 * 407 * Called with j_list_lock held. 408 */ 409 unsigned long jbd2_journal_shrink_checkpoint_list(journal_t *journal, 410 unsigned long *nr_to_scan) 411 { 412 transaction_t *transaction, *last_transaction, *next_transaction; 413 bool __maybe_unused released; 414 tid_t first_tid = 0, last_tid = 0, next_tid = 0; 415 tid_t tid = 0; 416 unsigned long nr_freed = 0; 417 unsigned long freed; 418 bool first_set = false; 419 420 again: 421 spin_lock(&journal->j_list_lock); 422 if (!journal->j_checkpoint_transactions) { 423 spin_unlock(&journal->j_list_lock); 424 goto out; 425 } 426 427 /* 428 * Get next shrink transaction, resume previous scan or start 429 * over again. If some others do checkpoint and drop transaction 430 * from the checkpoint list, we ignore saved j_shrink_transaction 431 * and start over unconditionally. 432 */ 433 if (journal->j_shrink_transaction) 434 transaction = journal->j_shrink_transaction; 435 else 436 transaction = journal->j_checkpoint_transactions; 437 438 if (!first_set) { 439 first_tid = transaction->t_tid; 440 first_set = true; 441 } 442 last_transaction = journal->j_checkpoint_transactions->t_cpprev; 443 next_transaction = transaction; 444 last_tid = last_transaction->t_tid; 445 do { 446 transaction = next_transaction; 447 next_transaction = transaction->t_cpnext; 448 tid = transaction->t_tid; 449 450 freed = journal_shrink_one_cp_list(transaction->t_checkpoint_list, 451 SHRINK_BUSY_SKIP, &released); 452 nr_freed += freed; 453 (*nr_to_scan) -= min(*nr_to_scan, freed); 454 if (*nr_to_scan == 0) 455 break; 456 if (need_resched() || spin_needbreak(&journal->j_list_lock)) 457 break; 458 } while (transaction != last_transaction); 459 460 if (transaction != last_transaction) { 461 journal->j_shrink_transaction = next_transaction; 462 next_tid = next_transaction->t_tid; 463 } else { 464 journal->j_shrink_transaction = NULL; 465 next_tid = 0; 466 } 467 468 spin_unlock(&journal->j_list_lock); 469 cond_resched(); 470 471 if (*nr_to_scan && journal->j_shrink_transaction) 472 goto again; 473 out: 474 trace_jbd2_shrink_checkpoint_list(journal, first_tid, tid, last_tid, 475 nr_freed, next_tid); 476 477 return nr_freed; 478 } 479 480 /* 481 * journal_clean_checkpoint_list 482 * 483 * Find all the written-back checkpoint buffers in the journal and release them. 484 * If 'destroy' is set, release all buffers unconditionally. 485 * 486 * Called with j_list_lock held. 487 */ 488 void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy) 489 { 490 transaction_t *transaction, *last_transaction, *next_transaction; 491 enum shrink_type type; 492 bool released; 493 494 transaction = journal->j_checkpoint_transactions; 495 if (!transaction) 496 return; 497 498 type = destroy ? SHRINK_DESTROY : SHRINK_BUSY_STOP; 499 last_transaction = transaction->t_cpprev; 500 next_transaction = transaction; 501 do { 502 transaction = next_transaction; 503 next_transaction = transaction->t_cpnext; 504 journal_shrink_one_cp_list(transaction->t_checkpoint_list, 505 type, &released); 506 /* 507 * This function only frees up some memory if possible so we 508 * dont have an obligation to finish processing. Bail out if 509 * preemption requested: 510 */ 511 if (need_resched()) 512 return; 513 /* 514 * Stop scanning if we couldn't free the transaction. This 515 * avoids pointless scanning of transactions which still 516 * weren't checkpointed. 517 */ 518 if (!released) 519 return; 520 } while (transaction != last_transaction); 521 } 522 523 /* 524 * Remove buffers from all checkpoint lists as journal is aborted and we just 525 * need to free memory 526 */ 527 void jbd2_journal_destroy_checkpoint(journal_t *journal) 528 { 529 /* 530 * We loop because __jbd2_journal_clean_checkpoint_list() may abort 531 * early due to a need of rescheduling. 532 */ 533 while (1) { 534 spin_lock(&journal->j_list_lock); 535 if (!journal->j_checkpoint_transactions) { 536 spin_unlock(&journal->j_list_lock); 537 break; 538 } 539 __jbd2_journal_clean_checkpoint_list(journal, true); 540 spin_unlock(&journal->j_list_lock); 541 cond_resched(); 542 } 543 } 544 545 /* 546 * journal_remove_checkpoint: called after a buffer has been committed 547 * to disk (either by being write-back flushed to disk, or being 548 * committed to the log). 549 * 550 * We cannot safely clean a transaction out of the log until all of the 551 * buffer updates committed in that transaction have safely been stored 552 * elsewhere on disk. To achieve this, all of the buffers in a 553 * transaction need to be maintained on the transaction's checkpoint 554 * lists until they have been rewritten, at which point this function is 555 * called to remove the buffer from the existing transaction's 556 * checkpoint lists. 557 * 558 * The function returns 1 if it frees the transaction, 0 otherwise. 559 * The function can free jh and bh. 560 * 561 * This function is called with j_list_lock held. 562 */ 563 int __jbd2_journal_remove_checkpoint(struct journal_head *jh) 564 { 565 struct transaction_chp_stats_s *stats; 566 transaction_t *transaction; 567 journal_t *journal; 568 struct buffer_head *bh = jh2bh(jh); 569 570 JBUFFER_TRACE(jh, "entry"); 571 572 transaction = jh->b_cp_transaction; 573 if (!transaction) { 574 JBUFFER_TRACE(jh, "not on transaction"); 575 return 0; 576 } 577 journal = transaction->t_journal; 578 579 JBUFFER_TRACE(jh, "removing from transaction"); 580 581 /* 582 * If we have failed to write the buffer out to disk, the filesystem 583 * may become inconsistent. We cannot abort the journal here since 584 * we hold j_list_lock and we have to be careful about races with 585 * jbd2_journal_destroy(). So mark the writeback IO error in the 586 * journal here and we abort the journal later from a better context. 587 */ 588 if (buffer_write_io_error(bh)) 589 set_bit(JBD2_CHECKPOINT_IO_ERROR, &journal->j_atomic_flags); 590 591 __buffer_unlink(jh); 592 jh->b_cp_transaction = NULL; 593 percpu_counter_dec(&journal->j_checkpoint_jh_count); 594 jbd2_journal_put_journal_head(jh); 595 596 /* Is this transaction empty? */ 597 if (transaction->t_checkpoint_list) 598 return 0; 599 600 /* 601 * There is one special case to worry about: if we have just pulled the 602 * buffer off a running or committing transaction's checkpoing list, 603 * then even if the checkpoint list is empty, the transaction obviously 604 * cannot be dropped! 605 * 606 * The locking here around t_state is a bit sleazy. 607 * See the comment at the end of jbd2_journal_commit_transaction(). 608 */ 609 if (transaction->t_state != T_FINISHED) 610 return 0; 611 612 /* 613 * OK, that was the last buffer for the transaction, we can now 614 * safely remove this transaction from the log. 615 */ 616 stats = &transaction->t_chp_stats; 617 if (stats->cs_chp_time) 618 stats->cs_chp_time = jbd2_time_diff(stats->cs_chp_time, 619 jiffies); 620 trace_jbd2_checkpoint_stats(journal->j_fs_dev->bd_dev, 621 transaction->t_tid, stats); 622 623 __jbd2_journal_drop_transaction(journal, transaction); 624 jbd2_journal_free_transaction(transaction); 625 return 1; 626 } 627 628 /* 629 * Check the checkpoint buffer and try to remove it from the checkpoint 630 * list if it's clean. Returns -EBUSY if it is not clean, returns 1 if 631 * it frees the transaction, 0 otherwise. 632 * 633 * This function is called with j_list_lock held. 634 */ 635 int jbd2_journal_try_remove_checkpoint(struct journal_head *jh) 636 { 637 struct buffer_head *bh = jh2bh(jh); 638 639 if (jh->b_transaction) 640 return -EBUSY; 641 if (!trylock_buffer(bh)) 642 return -EBUSY; 643 if (buffer_dirty(bh)) { 644 unlock_buffer(bh); 645 return -EBUSY; 646 } 647 unlock_buffer(bh); 648 649 /* 650 * Buffer is clean and the IO has finished (we held the buffer 651 * lock) so the checkpoint is done. We can safely remove the 652 * buffer from this transaction. 653 */ 654 JBUFFER_TRACE(jh, "remove from checkpoint list"); 655 return __jbd2_journal_remove_checkpoint(jh); 656 } 657 658 /* 659 * journal_insert_checkpoint: put a committed buffer onto a checkpoint 660 * list so that we know when it is safe to clean the transaction out of 661 * the log. 662 * 663 * Called with the journal locked. 664 * Called with j_list_lock held. 665 */ 666 void __jbd2_journal_insert_checkpoint(struct journal_head *jh, 667 transaction_t *transaction) 668 { 669 JBUFFER_TRACE(jh, "entry"); 670 J_ASSERT_JH(jh, buffer_dirty(jh2bh(jh)) || buffer_jbddirty(jh2bh(jh))); 671 J_ASSERT_JH(jh, jh->b_cp_transaction == NULL); 672 673 /* Get reference for checkpointing transaction */ 674 jbd2_journal_grab_journal_head(jh2bh(jh)); 675 jh->b_cp_transaction = transaction; 676 677 if (!transaction->t_checkpoint_list) { 678 jh->b_cpnext = jh->b_cpprev = jh; 679 } else { 680 jh->b_cpnext = transaction->t_checkpoint_list; 681 jh->b_cpprev = transaction->t_checkpoint_list->b_cpprev; 682 jh->b_cpprev->b_cpnext = jh; 683 jh->b_cpnext->b_cpprev = jh; 684 } 685 transaction->t_checkpoint_list = jh; 686 percpu_counter_inc(&transaction->t_journal->j_checkpoint_jh_count); 687 } 688 689 /* 690 * We've finished with this transaction structure: adios... 691 * 692 * The transaction must have no links except for the checkpoint by this 693 * point. 694 * 695 * Called with the journal locked. 696 * Called with j_list_lock held. 697 */ 698 699 void __jbd2_journal_drop_transaction(journal_t *journal, transaction_t *transaction) 700 { 701 assert_spin_locked(&journal->j_list_lock); 702 703 journal->j_shrink_transaction = NULL; 704 if (transaction->t_cpnext) { 705 transaction->t_cpnext->t_cpprev = transaction->t_cpprev; 706 transaction->t_cpprev->t_cpnext = transaction->t_cpnext; 707 if (journal->j_checkpoint_transactions == transaction) 708 journal->j_checkpoint_transactions = 709 transaction->t_cpnext; 710 if (journal->j_checkpoint_transactions == transaction) 711 journal->j_checkpoint_transactions = NULL; 712 } 713 714 J_ASSERT(transaction->t_state == T_FINISHED); 715 J_ASSERT(transaction->t_buffers == NULL); 716 J_ASSERT(transaction->t_forget == NULL); 717 J_ASSERT(transaction->t_shadow_list == NULL); 718 J_ASSERT(transaction->t_checkpoint_list == NULL); 719 J_ASSERT(atomic_read(&transaction->t_updates) == 0); 720 J_ASSERT(journal->j_committing_transaction != transaction); 721 J_ASSERT(journal->j_running_transaction != transaction); 722 723 trace_jbd2_drop_transaction(journal, transaction); 724 725 jbd2_debug(1, "Dropping transaction %d, all done\n", transaction->t_tid); 726 } 727