1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * linux/fs/jbd2/transaction.c 4 * 5 * Written by Stephen C. Tweedie <sct@redhat.com>, 1998 6 * 7 * Copyright 1998 Red Hat corp --- All Rights Reserved 8 * 9 * Generic filesystem transaction handling code; part of the ext2fs 10 * journaling system. 11 * 12 * This file manages transactions (compound commits managed by the 13 * journaling code) and handles (individual atomic operations by the 14 * filesystem). 15 */ 16 17 #include <linux/time.h> 18 #include <linux/fs.h> 19 #include <linux/jbd2.h> 20 #include <linux/errno.h> 21 #include <linux/slab.h> 22 #include <linux/timer.h> 23 #include <linux/mm.h> 24 #include <linux/highmem.h> 25 #include <linux/hrtimer.h> 26 #include <linux/backing-dev.h> 27 #include <linux/bug.h> 28 #include <linux/module.h> 29 #include <linux/sched/mm.h> 30 31 #include <trace/events/jbd2.h> 32 33 static void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh); 34 static void __jbd2_journal_unfile_buffer(struct journal_head *jh); 35 36 static struct kmem_cache *transaction_cache; 37 int __init jbd2_journal_init_transaction_cache(void) 38 { 39 J_ASSERT(!transaction_cache); 40 transaction_cache = kmem_cache_create("jbd2_transaction_s", 41 sizeof(transaction_t), 42 0, 43 SLAB_HWCACHE_ALIGN|SLAB_TEMPORARY, 44 NULL); 45 if (!transaction_cache) { 46 pr_emerg("JBD2: failed to create transaction cache\n"); 47 return -ENOMEM; 48 } 49 return 0; 50 } 51 52 void jbd2_journal_destroy_transaction_cache(void) 53 { 54 kmem_cache_destroy(transaction_cache); 55 transaction_cache = NULL; 56 } 57 58 void jbd2_journal_free_transaction(transaction_t *transaction) 59 { 60 if (unlikely(ZERO_OR_NULL_PTR(transaction))) 61 return; 62 kmem_cache_free(transaction_cache, transaction); 63 } 64 65 /* 66 * Base amount of descriptor blocks we reserve for each transaction. 67 */ 68 static int jbd2_descriptor_blocks_per_trans(journal_t *journal) 69 { 70 int tag_space = journal->j_blocksize - sizeof(journal_header_t); 71 int tags_per_block; 72 73 /* Subtract UUID */ 74 tag_space -= 16; 75 if (jbd2_journal_has_csum_v2or3(journal)) 76 tag_space -= sizeof(struct jbd2_journal_block_tail); 77 /* Commit code leaves a slack space of 16 bytes at the end of block */ 78 tags_per_block = (tag_space - 16) / journal_tag_bytes(journal); 79 /* 80 * Revoke descriptors are accounted separately so we need to reserve 81 * space for commit block and normal transaction descriptor blocks. 82 */ 83 return 1 + DIV_ROUND_UP(journal->j_max_transaction_buffers, 84 tags_per_block); 85 } 86 87 /* 88 * jbd2_get_transaction: obtain a new transaction_t object. 89 * 90 * Simply initialise a new transaction. Initialize it in 91 * RUNNING state and add it to the current journal (which should not 92 * have an existing running transaction: we only make a new transaction 93 * once we have started to commit the old one). 94 * 95 * Preconditions: 96 * The journal MUST be locked. We don't perform atomic mallocs on the 97 * new transaction and we can't block without protecting against other 98 * processes trying to touch the journal while it is in transition. 99 * 100 */ 101 102 static void jbd2_get_transaction(journal_t *journal, 103 transaction_t *transaction) 104 { 105 transaction->t_journal = journal; 106 transaction->t_state = T_RUNNING; 107 transaction->t_start_time = ktime_get(); 108 transaction->t_tid = journal->j_transaction_sequence++; 109 transaction->t_expires = jiffies + journal->j_commit_interval; 110 atomic_set(&transaction->t_updates, 0); 111 atomic_set(&transaction->t_outstanding_credits, 112 jbd2_descriptor_blocks_per_trans(journal) + 113 atomic_read(&journal->j_reserved_credits)); 114 atomic_set(&transaction->t_outstanding_revokes, 0); 115 atomic_set(&transaction->t_handle_count, 0); 116 INIT_LIST_HEAD(&transaction->t_inode_list); 117 INIT_LIST_HEAD(&transaction->t_private_list); 118 119 /* Set up the commit timer for the new transaction. */ 120 journal->j_commit_timer.expires = round_jiffies_up(transaction->t_expires); 121 add_timer(&journal->j_commit_timer); 122 123 J_ASSERT(journal->j_running_transaction == NULL); 124 journal->j_running_transaction = transaction; 125 transaction->t_max_wait = 0; 126 transaction->t_start = jiffies; 127 transaction->t_requested = 0; 128 } 129 130 /* 131 * Handle management. 132 * 133 * A handle_t is an object which represents a single atomic update to a 134 * filesystem, and which tracks all of the modifications which form part 135 * of that one update. 136 */ 137 138 /* 139 * Update transaction's maximum wait time, if debugging is enabled. 140 * 141 * t_max_wait is carefully updated here with use of atomic compare exchange. 142 * Note that there could be multiplre threads trying to do this simultaneously 143 * hence using cmpxchg to avoid any use of locks in this case. 144 * With this t_max_wait can be updated w/o enabling jbd2_journal_enable_debug. 145 */ 146 static inline void update_t_max_wait(transaction_t *transaction, 147 unsigned long ts) 148 { 149 unsigned long oldts, newts; 150 151 if (time_after(transaction->t_start, ts)) { 152 newts = jbd2_time_diff(ts, transaction->t_start); 153 oldts = READ_ONCE(transaction->t_max_wait); 154 while (oldts < newts) 155 oldts = cmpxchg(&transaction->t_max_wait, oldts, newts); 156 } 157 } 158 159 /* 160 * Wait until running transaction passes to T_FLUSH state and new transaction 161 * can thus be started. Also starts the commit if needed. The function expects 162 * running transaction to exist and releases j_state_lock. 163 */ 164 static void wait_transaction_locked(journal_t *journal) 165 __releases(journal->j_state_lock) 166 { 167 DEFINE_WAIT(wait); 168 int need_to_start; 169 tid_t tid = journal->j_running_transaction->t_tid; 170 171 prepare_to_wait(&journal->j_wait_transaction_locked, &wait, 172 TASK_UNINTERRUPTIBLE); 173 need_to_start = !tid_geq(journal->j_commit_request, tid); 174 read_unlock(&journal->j_state_lock); 175 if (need_to_start) 176 jbd2_log_start_commit(journal, tid); 177 jbd2_might_wait_for_commit(journal); 178 schedule(); 179 finish_wait(&journal->j_wait_transaction_locked, &wait); 180 } 181 182 /* 183 * Wait until running transaction transitions from T_SWITCH to T_FLUSH 184 * state and new transaction can thus be started. The function releases 185 * j_state_lock. 186 */ 187 static void wait_transaction_switching(journal_t *journal) 188 __releases(journal->j_state_lock) 189 { 190 DEFINE_WAIT(wait); 191 192 if (WARN_ON(!journal->j_running_transaction || 193 journal->j_running_transaction->t_state != T_SWITCH)) { 194 read_unlock(&journal->j_state_lock); 195 return; 196 } 197 prepare_to_wait(&journal->j_wait_transaction_locked, &wait, 198 TASK_UNINTERRUPTIBLE); 199 read_unlock(&journal->j_state_lock); 200 /* 201 * We don't call jbd2_might_wait_for_commit() here as there's no 202 * waiting for outstanding handles happening anymore in T_SWITCH state 203 * and handling of reserved handles actually relies on that for 204 * correctness. 205 */ 206 schedule(); 207 finish_wait(&journal->j_wait_transaction_locked, &wait); 208 } 209 210 static void sub_reserved_credits(journal_t *journal, int blocks) 211 { 212 atomic_sub(blocks, &journal->j_reserved_credits); 213 wake_up(&journal->j_wait_reserved); 214 } 215 216 /* 217 * Wait until we can add credits for handle to the running transaction. Called 218 * with j_state_lock held for reading. Returns 0 if handle joined the running 219 * transaction. Returns 1 if we had to wait, j_state_lock is dropped, and 220 * caller must retry. 221 * 222 * Note: because j_state_lock may be dropped depending on the return 223 * value, we need to fake out sparse so ti doesn't complain about a 224 * locking imbalance. Callers of add_transaction_credits will need to 225 * make a similar accomodation. 226 */ 227 static int add_transaction_credits(journal_t *journal, int blocks, 228 int rsv_blocks) 229 __must_hold(&journal->j_state_lock) 230 { 231 transaction_t *t = journal->j_running_transaction; 232 int needed; 233 int total = blocks + rsv_blocks; 234 235 /* 236 * If the current transaction is locked down for commit, wait 237 * for the lock to be released. 238 */ 239 if (t->t_state != T_RUNNING) { 240 WARN_ON_ONCE(t->t_state >= T_FLUSH); 241 wait_transaction_locked(journal); 242 __acquire(&journal->j_state_lock); /* fake out sparse */ 243 return 1; 244 } 245 246 /* 247 * If there is not enough space left in the log to write all 248 * potential buffers requested by this operation, we need to 249 * stall pending a log checkpoint to free some more log space. 250 */ 251 needed = atomic_add_return(total, &t->t_outstanding_credits); 252 if (needed > journal->j_max_transaction_buffers) { 253 /* 254 * If the current transaction is already too large, 255 * then start to commit it: we can then go back and 256 * attach this handle to a new transaction. 257 */ 258 atomic_sub(total, &t->t_outstanding_credits); 259 260 /* 261 * Is the number of reserved credits in the current transaction too 262 * big to fit this handle? Wait until reserved credits are freed. 263 */ 264 if (atomic_read(&journal->j_reserved_credits) + total > 265 journal->j_max_transaction_buffers) { 266 read_unlock(&journal->j_state_lock); 267 jbd2_might_wait_for_commit(journal); 268 wait_event(journal->j_wait_reserved, 269 atomic_read(&journal->j_reserved_credits) + total <= 270 journal->j_max_transaction_buffers); 271 __acquire(&journal->j_state_lock); /* fake out sparse */ 272 return 1; 273 } 274 275 wait_transaction_locked(journal); 276 __acquire(&journal->j_state_lock); /* fake out sparse */ 277 return 1; 278 } 279 280 /* 281 * The commit code assumes that it can get enough log space 282 * without forcing a checkpoint. This is *critical* for 283 * correctness: a checkpoint of a buffer which is also 284 * associated with a committing transaction creates a deadlock, 285 * so commit simply cannot force through checkpoints. 286 * 287 * We must therefore ensure the necessary space in the journal 288 * *before* starting to dirty potentially checkpointed buffers 289 * in the new transaction. 290 */ 291 if (jbd2_log_space_left(journal) < journal->j_max_transaction_buffers) { 292 atomic_sub(total, &t->t_outstanding_credits); 293 read_unlock(&journal->j_state_lock); 294 jbd2_might_wait_for_commit(journal); 295 write_lock(&journal->j_state_lock); 296 if (jbd2_log_space_left(journal) < 297 journal->j_max_transaction_buffers) 298 __jbd2_log_wait_for_space(journal); 299 write_unlock(&journal->j_state_lock); 300 __acquire(&journal->j_state_lock); /* fake out sparse */ 301 return 1; 302 } 303 304 /* No reservation? We are done... */ 305 if (!rsv_blocks) 306 return 0; 307 308 needed = atomic_add_return(rsv_blocks, &journal->j_reserved_credits); 309 /* We allow at most half of a transaction to be reserved */ 310 if (needed > journal->j_max_transaction_buffers / 2) { 311 sub_reserved_credits(journal, rsv_blocks); 312 atomic_sub(total, &t->t_outstanding_credits); 313 read_unlock(&journal->j_state_lock); 314 jbd2_might_wait_for_commit(journal); 315 wait_event(journal->j_wait_reserved, 316 atomic_read(&journal->j_reserved_credits) + rsv_blocks 317 <= journal->j_max_transaction_buffers / 2); 318 __acquire(&journal->j_state_lock); /* fake out sparse */ 319 return 1; 320 } 321 return 0; 322 } 323 324 /* 325 * start_this_handle: Given a handle, deal with any locking or stalling 326 * needed to make sure that there is enough journal space for the handle 327 * to begin. Attach the handle to a transaction and set up the 328 * transaction's buffer credits. 329 */ 330 331 static int start_this_handle(journal_t *journal, handle_t *handle, 332 gfp_t gfp_mask) 333 { 334 transaction_t *transaction, *new_transaction = NULL; 335 int blocks = handle->h_total_credits; 336 int rsv_blocks = 0; 337 unsigned long ts = jiffies; 338 339 if (handle->h_rsv_handle) 340 rsv_blocks = handle->h_rsv_handle->h_total_credits; 341 342 /* 343 * Limit the number of reserved credits to 1/2 of maximum transaction 344 * size and limit the number of total credits to not exceed maximum 345 * transaction size per operation. 346 */ 347 if ((rsv_blocks > journal->j_max_transaction_buffers / 2) || 348 (rsv_blocks + blocks > journal->j_max_transaction_buffers)) { 349 printk(KERN_ERR "JBD2: %s wants too many credits " 350 "credits:%d rsv_credits:%d max:%d\n", 351 current->comm, blocks, rsv_blocks, 352 journal->j_max_transaction_buffers); 353 WARN_ON(1); 354 return -ENOSPC; 355 } 356 357 alloc_transaction: 358 /* 359 * This check is racy but it is just an optimization of allocating new 360 * transaction early if there are high chances we'll need it. If we 361 * guess wrong, we'll retry or free unused transaction. 362 */ 363 if (!data_race(journal->j_running_transaction)) { 364 /* 365 * If __GFP_FS is not present, then we may be being called from 366 * inside the fs writeback layer, so we MUST NOT fail. 367 */ 368 if ((gfp_mask & __GFP_FS) == 0) 369 gfp_mask |= __GFP_NOFAIL; 370 new_transaction = kmem_cache_zalloc(transaction_cache, 371 gfp_mask); 372 if (!new_transaction) 373 return -ENOMEM; 374 } 375 376 jbd_debug(3, "New handle %p going live.\n", handle); 377 378 /* 379 * We need to hold j_state_lock until t_updates has been incremented, 380 * for proper journal barrier handling 381 */ 382 repeat: 383 read_lock(&journal->j_state_lock); 384 BUG_ON(journal->j_flags & JBD2_UNMOUNT); 385 if (is_journal_aborted(journal) || 386 (journal->j_errno != 0 && !(journal->j_flags & JBD2_ACK_ERR))) { 387 read_unlock(&journal->j_state_lock); 388 jbd2_journal_free_transaction(new_transaction); 389 return -EROFS; 390 } 391 392 /* 393 * Wait on the journal's transaction barrier if necessary. Specifically 394 * we allow reserved handles to proceed because otherwise commit could 395 * deadlock on page writeback not being able to complete. 396 */ 397 if (!handle->h_reserved && journal->j_barrier_count) { 398 read_unlock(&journal->j_state_lock); 399 wait_event(journal->j_wait_transaction_locked, 400 journal->j_barrier_count == 0); 401 goto repeat; 402 } 403 404 if (!journal->j_running_transaction) { 405 read_unlock(&journal->j_state_lock); 406 if (!new_transaction) 407 goto alloc_transaction; 408 write_lock(&journal->j_state_lock); 409 if (!journal->j_running_transaction && 410 (handle->h_reserved || !journal->j_barrier_count)) { 411 jbd2_get_transaction(journal, new_transaction); 412 new_transaction = NULL; 413 } 414 write_unlock(&journal->j_state_lock); 415 goto repeat; 416 } 417 418 transaction = journal->j_running_transaction; 419 420 if (!handle->h_reserved) { 421 /* We may have dropped j_state_lock - restart in that case */ 422 if (add_transaction_credits(journal, blocks, rsv_blocks)) { 423 /* 424 * add_transaction_credits releases 425 * j_state_lock on a non-zero return 426 */ 427 __release(&journal->j_state_lock); 428 goto repeat; 429 } 430 } else { 431 /* 432 * We have handle reserved so we are allowed to join T_LOCKED 433 * transaction and we don't have to check for transaction size 434 * and journal space. But we still have to wait while running 435 * transaction is being switched to a committing one as it 436 * won't wait for any handles anymore. 437 */ 438 if (transaction->t_state == T_SWITCH) { 439 wait_transaction_switching(journal); 440 goto repeat; 441 } 442 sub_reserved_credits(journal, blocks); 443 handle->h_reserved = 0; 444 } 445 446 /* OK, account for the buffers that this operation expects to 447 * use and add the handle to the running transaction. 448 */ 449 update_t_max_wait(transaction, ts); 450 handle->h_transaction = transaction; 451 handle->h_requested_credits = blocks; 452 handle->h_revoke_credits_requested = handle->h_revoke_credits; 453 handle->h_start_jiffies = jiffies; 454 atomic_inc(&transaction->t_updates); 455 atomic_inc(&transaction->t_handle_count); 456 jbd_debug(4, "Handle %p given %d credits (total %d, free %lu)\n", 457 handle, blocks, 458 atomic_read(&transaction->t_outstanding_credits), 459 jbd2_log_space_left(journal)); 460 read_unlock(&journal->j_state_lock); 461 current->journal_info = handle; 462 463 rwsem_acquire_read(&journal->j_trans_commit_map, 0, 0, _THIS_IP_); 464 jbd2_journal_free_transaction(new_transaction); 465 /* 466 * Ensure that no allocations done while the transaction is open are 467 * going to recurse back to the fs layer. 468 */ 469 handle->saved_alloc_context = memalloc_nofs_save(); 470 return 0; 471 } 472 473 /* Allocate a new handle. This should probably be in a slab... */ 474 static handle_t *new_handle(int nblocks) 475 { 476 handle_t *handle = jbd2_alloc_handle(GFP_NOFS); 477 if (!handle) 478 return NULL; 479 handle->h_total_credits = nblocks; 480 handle->h_ref = 1; 481 482 return handle; 483 } 484 485 handle_t *jbd2__journal_start(journal_t *journal, int nblocks, int rsv_blocks, 486 int revoke_records, gfp_t gfp_mask, 487 unsigned int type, unsigned int line_no) 488 { 489 handle_t *handle = journal_current_handle(); 490 int err; 491 492 if (!journal) 493 return ERR_PTR(-EROFS); 494 495 if (handle) { 496 J_ASSERT(handle->h_transaction->t_journal == journal); 497 handle->h_ref++; 498 return handle; 499 } 500 501 nblocks += DIV_ROUND_UP(revoke_records, 502 journal->j_revoke_records_per_block); 503 handle = new_handle(nblocks); 504 if (!handle) 505 return ERR_PTR(-ENOMEM); 506 if (rsv_blocks) { 507 handle_t *rsv_handle; 508 509 rsv_handle = new_handle(rsv_blocks); 510 if (!rsv_handle) { 511 jbd2_free_handle(handle); 512 return ERR_PTR(-ENOMEM); 513 } 514 rsv_handle->h_reserved = 1; 515 rsv_handle->h_journal = journal; 516 handle->h_rsv_handle = rsv_handle; 517 } 518 handle->h_revoke_credits = revoke_records; 519 520 err = start_this_handle(journal, handle, gfp_mask); 521 if (err < 0) { 522 if (handle->h_rsv_handle) 523 jbd2_free_handle(handle->h_rsv_handle); 524 jbd2_free_handle(handle); 525 return ERR_PTR(err); 526 } 527 handle->h_type = type; 528 handle->h_line_no = line_no; 529 trace_jbd2_handle_start(journal->j_fs_dev->bd_dev, 530 handle->h_transaction->t_tid, type, 531 line_no, nblocks); 532 533 return handle; 534 } 535 EXPORT_SYMBOL(jbd2__journal_start); 536 537 538 /** 539 * jbd2_journal_start() - Obtain a new handle. 540 * @journal: Journal to start transaction on. 541 * @nblocks: number of block buffer we might modify 542 * 543 * We make sure that the transaction can guarantee at least nblocks of 544 * modified buffers in the log. We block until the log can guarantee 545 * that much space. Additionally, if rsv_blocks > 0, we also create another 546 * handle with rsv_blocks reserved blocks in the journal. This handle is 547 * stored in h_rsv_handle. It is not attached to any particular transaction 548 * and thus doesn't block transaction commit. If the caller uses this reserved 549 * handle, it has to set h_rsv_handle to NULL as otherwise jbd2_journal_stop() 550 * on the parent handle will dispose the reserved one. Reserved handle has to 551 * be converted to a normal handle using jbd2_journal_start_reserved() before 552 * it can be used. 553 * 554 * Return a pointer to a newly allocated handle, or an ERR_PTR() value 555 * on failure. 556 */ 557 handle_t *jbd2_journal_start(journal_t *journal, int nblocks) 558 { 559 return jbd2__journal_start(journal, nblocks, 0, 0, GFP_NOFS, 0, 0); 560 } 561 EXPORT_SYMBOL(jbd2_journal_start); 562 563 static void __jbd2_journal_unreserve_handle(handle_t *handle, transaction_t *t) 564 { 565 journal_t *journal = handle->h_journal; 566 567 WARN_ON(!handle->h_reserved); 568 sub_reserved_credits(journal, handle->h_total_credits); 569 if (t) 570 atomic_sub(handle->h_total_credits, &t->t_outstanding_credits); 571 } 572 573 void jbd2_journal_free_reserved(handle_t *handle) 574 { 575 journal_t *journal = handle->h_journal; 576 577 /* Get j_state_lock to pin running transaction if it exists */ 578 read_lock(&journal->j_state_lock); 579 __jbd2_journal_unreserve_handle(handle, journal->j_running_transaction); 580 read_unlock(&journal->j_state_lock); 581 jbd2_free_handle(handle); 582 } 583 EXPORT_SYMBOL(jbd2_journal_free_reserved); 584 585 /** 586 * jbd2_journal_start_reserved() - start reserved handle 587 * @handle: handle to start 588 * @type: for handle statistics 589 * @line_no: for handle statistics 590 * 591 * Start handle that has been previously reserved with jbd2_journal_reserve(). 592 * This attaches @handle to the running transaction (or creates one if there's 593 * not transaction running). Unlike jbd2_journal_start() this function cannot 594 * block on journal commit, checkpointing, or similar stuff. It can block on 595 * memory allocation or frozen journal though. 596 * 597 * Return 0 on success, non-zero on error - handle is freed in that case. 598 */ 599 int jbd2_journal_start_reserved(handle_t *handle, unsigned int type, 600 unsigned int line_no) 601 { 602 journal_t *journal = handle->h_journal; 603 int ret = -EIO; 604 605 if (WARN_ON(!handle->h_reserved)) { 606 /* Someone passed in normal handle? Just stop it. */ 607 jbd2_journal_stop(handle); 608 return ret; 609 } 610 /* 611 * Usefulness of mixing of reserved and unreserved handles is 612 * questionable. So far nobody seems to need it so just error out. 613 */ 614 if (WARN_ON(current->journal_info)) { 615 jbd2_journal_free_reserved(handle); 616 return ret; 617 } 618 619 handle->h_journal = NULL; 620 /* 621 * GFP_NOFS is here because callers are likely from writeback or 622 * similarly constrained call sites 623 */ 624 ret = start_this_handle(journal, handle, GFP_NOFS); 625 if (ret < 0) { 626 handle->h_journal = journal; 627 jbd2_journal_free_reserved(handle); 628 return ret; 629 } 630 handle->h_type = type; 631 handle->h_line_no = line_no; 632 trace_jbd2_handle_start(journal->j_fs_dev->bd_dev, 633 handle->h_transaction->t_tid, type, 634 line_no, handle->h_total_credits); 635 return 0; 636 } 637 EXPORT_SYMBOL(jbd2_journal_start_reserved); 638 639 /** 640 * jbd2_journal_extend() - extend buffer credits. 641 * @handle: handle to 'extend' 642 * @nblocks: nr blocks to try to extend by. 643 * @revoke_records: number of revoke records to try to extend by. 644 * 645 * Some transactions, such as large extends and truncates, can be done 646 * atomically all at once or in several stages. The operation requests 647 * a credit for a number of buffer modifications in advance, but can 648 * extend its credit if it needs more. 649 * 650 * jbd2_journal_extend tries to give the running handle more buffer credits. 651 * It does not guarantee that allocation - this is a best-effort only. 652 * The calling process MUST be able to deal cleanly with a failure to 653 * extend here. 654 * 655 * Return 0 on success, non-zero on failure. 656 * 657 * return code < 0 implies an error 658 * return code > 0 implies normal transaction-full status. 659 */ 660 int jbd2_journal_extend(handle_t *handle, int nblocks, int revoke_records) 661 { 662 transaction_t *transaction = handle->h_transaction; 663 journal_t *journal; 664 int result; 665 int wanted; 666 667 if (is_handle_aborted(handle)) 668 return -EROFS; 669 journal = transaction->t_journal; 670 671 result = 1; 672 673 read_lock(&journal->j_state_lock); 674 675 /* Don't extend a locked-down transaction! */ 676 if (transaction->t_state != T_RUNNING) { 677 jbd_debug(3, "denied handle %p %d blocks: " 678 "transaction not running\n", handle, nblocks); 679 goto error_out; 680 } 681 682 nblocks += DIV_ROUND_UP( 683 handle->h_revoke_credits_requested + revoke_records, 684 journal->j_revoke_records_per_block) - 685 DIV_ROUND_UP( 686 handle->h_revoke_credits_requested, 687 journal->j_revoke_records_per_block); 688 wanted = atomic_add_return(nblocks, 689 &transaction->t_outstanding_credits); 690 691 if (wanted > journal->j_max_transaction_buffers) { 692 jbd_debug(3, "denied handle %p %d blocks: " 693 "transaction too large\n", handle, nblocks); 694 atomic_sub(nblocks, &transaction->t_outstanding_credits); 695 goto error_out; 696 } 697 698 trace_jbd2_handle_extend(journal->j_fs_dev->bd_dev, 699 transaction->t_tid, 700 handle->h_type, handle->h_line_no, 701 handle->h_total_credits, 702 nblocks); 703 704 handle->h_total_credits += nblocks; 705 handle->h_requested_credits += nblocks; 706 handle->h_revoke_credits += revoke_records; 707 handle->h_revoke_credits_requested += revoke_records; 708 result = 0; 709 710 jbd_debug(3, "extended handle %p by %d\n", handle, nblocks); 711 error_out: 712 read_unlock(&journal->j_state_lock); 713 return result; 714 } 715 716 static void stop_this_handle(handle_t *handle) 717 { 718 transaction_t *transaction = handle->h_transaction; 719 journal_t *journal = transaction->t_journal; 720 int revokes; 721 722 J_ASSERT(journal_current_handle() == handle); 723 J_ASSERT(atomic_read(&transaction->t_updates) > 0); 724 current->journal_info = NULL; 725 /* 726 * Subtract necessary revoke descriptor blocks from handle credits. We 727 * take care to account only for revoke descriptor blocks the 728 * transaction will really need as large sequences of transactions with 729 * small numbers of revokes are relatively common. 730 */ 731 revokes = handle->h_revoke_credits_requested - handle->h_revoke_credits; 732 if (revokes) { 733 int t_revokes, revoke_descriptors; 734 int rr_per_blk = journal->j_revoke_records_per_block; 735 736 WARN_ON_ONCE(DIV_ROUND_UP(revokes, rr_per_blk) 737 > handle->h_total_credits); 738 t_revokes = atomic_add_return(revokes, 739 &transaction->t_outstanding_revokes); 740 revoke_descriptors = 741 DIV_ROUND_UP(t_revokes, rr_per_blk) - 742 DIV_ROUND_UP(t_revokes - revokes, rr_per_blk); 743 handle->h_total_credits -= revoke_descriptors; 744 } 745 atomic_sub(handle->h_total_credits, 746 &transaction->t_outstanding_credits); 747 if (handle->h_rsv_handle) 748 __jbd2_journal_unreserve_handle(handle->h_rsv_handle, 749 transaction); 750 if (atomic_dec_and_test(&transaction->t_updates)) 751 wake_up(&journal->j_wait_updates); 752 753 rwsem_release(&journal->j_trans_commit_map, _THIS_IP_); 754 /* 755 * Scope of the GFP_NOFS context is over here and so we can restore the 756 * original alloc context. 757 */ 758 memalloc_nofs_restore(handle->saved_alloc_context); 759 } 760 761 /** 762 * jbd2__journal_restart() - restart a handle . 763 * @handle: handle to restart 764 * @nblocks: nr credits requested 765 * @revoke_records: number of revoke record credits requested 766 * @gfp_mask: memory allocation flags (for start_this_handle) 767 * 768 * Restart a handle for a multi-transaction filesystem 769 * operation. 770 * 771 * If the jbd2_journal_extend() call above fails to grant new buffer credits 772 * to a running handle, a call to jbd2_journal_restart will commit the 773 * handle's transaction so far and reattach the handle to a new 774 * transaction capable of guaranteeing the requested number of 775 * credits. We preserve reserved handle if there's any attached to the 776 * passed in handle. 777 */ 778 int jbd2__journal_restart(handle_t *handle, int nblocks, int revoke_records, 779 gfp_t gfp_mask) 780 { 781 transaction_t *transaction = handle->h_transaction; 782 journal_t *journal; 783 tid_t tid; 784 int need_to_start; 785 int ret; 786 787 /* If we've had an abort of any type, don't even think about 788 * actually doing the restart! */ 789 if (is_handle_aborted(handle)) 790 return 0; 791 journal = transaction->t_journal; 792 tid = transaction->t_tid; 793 794 /* 795 * First unlink the handle from its current transaction, and start the 796 * commit on that. 797 */ 798 jbd_debug(2, "restarting handle %p\n", handle); 799 stop_this_handle(handle); 800 handle->h_transaction = NULL; 801 802 /* 803 * TODO: If we use READ_ONCE / WRITE_ONCE for j_commit_request we can 804 * get rid of pointless j_state_lock traffic like this. 805 */ 806 read_lock(&journal->j_state_lock); 807 need_to_start = !tid_geq(journal->j_commit_request, tid); 808 read_unlock(&journal->j_state_lock); 809 if (need_to_start) 810 jbd2_log_start_commit(journal, tid); 811 handle->h_total_credits = nblocks + 812 DIV_ROUND_UP(revoke_records, 813 journal->j_revoke_records_per_block); 814 handle->h_revoke_credits = revoke_records; 815 ret = start_this_handle(journal, handle, gfp_mask); 816 trace_jbd2_handle_restart(journal->j_fs_dev->bd_dev, 817 ret ? 0 : handle->h_transaction->t_tid, 818 handle->h_type, handle->h_line_no, 819 handle->h_total_credits); 820 return ret; 821 } 822 EXPORT_SYMBOL(jbd2__journal_restart); 823 824 825 int jbd2_journal_restart(handle_t *handle, int nblocks) 826 { 827 return jbd2__journal_restart(handle, nblocks, 0, GFP_NOFS); 828 } 829 EXPORT_SYMBOL(jbd2_journal_restart); 830 831 /* 832 * Waits for any outstanding t_updates to finish. 833 * This is called with write j_state_lock held. 834 */ 835 void jbd2_journal_wait_updates(journal_t *journal) 836 { 837 DEFINE_WAIT(wait); 838 839 while (1) { 840 /* 841 * Note that the running transaction can get freed under us if 842 * this transaction is getting committed in 843 * jbd2_journal_commit_transaction() -> 844 * jbd2_journal_free_transaction(). This can only happen when we 845 * release j_state_lock -> schedule() -> acquire j_state_lock. 846 * Hence we should everytime retrieve new j_running_transaction 847 * value (after j_state_lock release acquire cycle), else it may 848 * lead to use-after-free of old freed transaction. 849 */ 850 transaction_t *transaction = journal->j_running_transaction; 851 852 if (!transaction) 853 break; 854 855 prepare_to_wait(&journal->j_wait_updates, &wait, 856 TASK_UNINTERRUPTIBLE); 857 if (!atomic_read(&transaction->t_updates)) { 858 finish_wait(&journal->j_wait_updates, &wait); 859 break; 860 } 861 write_unlock(&journal->j_state_lock); 862 schedule(); 863 finish_wait(&journal->j_wait_updates, &wait); 864 write_lock(&journal->j_state_lock); 865 } 866 } 867 868 /** 869 * jbd2_journal_lock_updates () - establish a transaction barrier. 870 * @journal: Journal to establish a barrier on. 871 * 872 * This locks out any further updates from being started, and blocks 873 * until all existing updates have completed, returning only once the 874 * journal is in a quiescent state with no updates running. 875 * 876 * The journal lock should not be held on entry. 877 */ 878 void jbd2_journal_lock_updates(journal_t *journal) 879 { 880 jbd2_might_wait_for_commit(journal); 881 882 write_lock(&journal->j_state_lock); 883 ++journal->j_barrier_count; 884 885 /* Wait until there are no reserved handles */ 886 if (atomic_read(&journal->j_reserved_credits)) { 887 write_unlock(&journal->j_state_lock); 888 wait_event(journal->j_wait_reserved, 889 atomic_read(&journal->j_reserved_credits) == 0); 890 write_lock(&journal->j_state_lock); 891 } 892 893 /* Wait until there are no running t_updates */ 894 jbd2_journal_wait_updates(journal); 895 896 write_unlock(&journal->j_state_lock); 897 898 /* 899 * We have now established a barrier against other normal updates, but 900 * we also need to barrier against other jbd2_journal_lock_updates() calls 901 * to make sure that we serialise special journal-locked operations 902 * too. 903 */ 904 mutex_lock(&journal->j_barrier); 905 } 906 907 /** 908 * jbd2_journal_unlock_updates () - release barrier 909 * @journal: Journal to release the barrier on. 910 * 911 * Release a transaction barrier obtained with jbd2_journal_lock_updates(). 912 * 913 * Should be called without the journal lock held. 914 */ 915 void jbd2_journal_unlock_updates (journal_t *journal) 916 { 917 J_ASSERT(journal->j_barrier_count != 0); 918 919 mutex_unlock(&journal->j_barrier); 920 write_lock(&journal->j_state_lock); 921 --journal->j_barrier_count; 922 write_unlock(&journal->j_state_lock); 923 wake_up(&journal->j_wait_transaction_locked); 924 } 925 926 static void warn_dirty_buffer(struct buffer_head *bh) 927 { 928 printk(KERN_WARNING 929 "JBD2: Spotted dirty metadata buffer (dev = %pg, blocknr = %llu). " 930 "There's a risk of filesystem corruption in case of system " 931 "crash.\n", 932 bh->b_bdev, (unsigned long long)bh->b_blocknr); 933 } 934 935 /* Call t_frozen trigger and copy buffer data into jh->b_frozen_data. */ 936 static void jbd2_freeze_jh_data(struct journal_head *jh) 937 { 938 struct page *page; 939 int offset; 940 char *source; 941 struct buffer_head *bh = jh2bh(jh); 942 943 J_EXPECT_JH(jh, buffer_uptodate(bh), "Possible IO failure.\n"); 944 page = bh->b_page; 945 offset = offset_in_page(bh->b_data); 946 source = kmap_atomic(page); 947 /* Fire data frozen trigger just before we copy the data */ 948 jbd2_buffer_frozen_trigger(jh, source + offset, jh->b_triggers); 949 memcpy(jh->b_frozen_data, source + offset, bh->b_size); 950 kunmap_atomic(source); 951 952 /* 953 * Now that the frozen data is saved off, we need to store any matching 954 * triggers. 955 */ 956 jh->b_frozen_triggers = jh->b_triggers; 957 } 958 959 /* 960 * If the buffer is already part of the current transaction, then there 961 * is nothing we need to do. If it is already part of a prior 962 * transaction which we are still committing to disk, then we need to 963 * make sure that we do not overwrite the old copy: we do copy-out to 964 * preserve the copy going to disk. We also account the buffer against 965 * the handle's metadata buffer credits (unless the buffer is already 966 * part of the transaction, that is). 967 * 968 */ 969 static int 970 do_get_write_access(handle_t *handle, struct journal_head *jh, 971 int force_copy) 972 { 973 struct buffer_head *bh; 974 transaction_t *transaction = handle->h_transaction; 975 journal_t *journal; 976 int error; 977 char *frozen_buffer = NULL; 978 unsigned long start_lock, time_lock; 979 980 journal = transaction->t_journal; 981 982 jbd_debug(5, "journal_head %p, force_copy %d\n", jh, force_copy); 983 984 JBUFFER_TRACE(jh, "entry"); 985 repeat: 986 bh = jh2bh(jh); 987 988 /* @@@ Need to check for errors here at some point. */ 989 990 start_lock = jiffies; 991 lock_buffer(bh); 992 spin_lock(&jh->b_state_lock); 993 994 /* If it takes too long to lock the buffer, trace it */ 995 time_lock = jbd2_time_diff(start_lock, jiffies); 996 if (time_lock > HZ/10) 997 trace_jbd2_lock_buffer_stall(bh->b_bdev->bd_dev, 998 jiffies_to_msecs(time_lock)); 999 1000 /* We now hold the buffer lock so it is safe to query the buffer 1001 * state. Is the buffer dirty? 1002 * 1003 * If so, there are two possibilities. The buffer may be 1004 * non-journaled, and undergoing a quite legitimate writeback. 1005 * Otherwise, it is journaled, and we don't expect dirty buffers 1006 * in that state (the buffers should be marked JBD_Dirty 1007 * instead.) So either the IO is being done under our own 1008 * control and this is a bug, or it's a third party IO such as 1009 * dump(8) (which may leave the buffer scheduled for read --- 1010 * ie. locked but not dirty) or tune2fs (which may actually have 1011 * the buffer dirtied, ugh.) */ 1012 1013 if (buffer_dirty(bh)) { 1014 /* 1015 * First question: is this buffer already part of the current 1016 * transaction or the existing committing transaction? 1017 */ 1018 if (jh->b_transaction) { 1019 J_ASSERT_JH(jh, 1020 jh->b_transaction == transaction || 1021 jh->b_transaction == 1022 journal->j_committing_transaction); 1023 if (jh->b_next_transaction) 1024 J_ASSERT_JH(jh, jh->b_next_transaction == 1025 transaction); 1026 warn_dirty_buffer(bh); 1027 } 1028 /* 1029 * In any case we need to clean the dirty flag and we must 1030 * do it under the buffer lock to be sure we don't race 1031 * with running write-out. 1032 */ 1033 JBUFFER_TRACE(jh, "Journalling dirty buffer"); 1034 clear_buffer_dirty(bh); 1035 set_buffer_jbddirty(bh); 1036 } 1037 1038 unlock_buffer(bh); 1039 1040 error = -EROFS; 1041 if (is_handle_aborted(handle)) { 1042 spin_unlock(&jh->b_state_lock); 1043 goto out; 1044 } 1045 error = 0; 1046 1047 /* 1048 * The buffer is already part of this transaction if b_transaction or 1049 * b_next_transaction points to it 1050 */ 1051 if (jh->b_transaction == transaction || 1052 jh->b_next_transaction == transaction) 1053 goto done; 1054 1055 /* 1056 * this is the first time this transaction is touching this buffer, 1057 * reset the modified flag 1058 */ 1059 jh->b_modified = 0; 1060 1061 /* 1062 * If the buffer is not journaled right now, we need to make sure it 1063 * doesn't get written to disk before the caller actually commits the 1064 * new data 1065 */ 1066 if (!jh->b_transaction) { 1067 JBUFFER_TRACE(jh, "no transaction"); 1068 J_ASSERT_JH(jh, !jh->b_next_transaction); 1069 JBUFFER_TRACE(jh, "file as BJ_Reserved"); 1070 /* 1071 * Make sure all stores to jh (b_modified, b_frozen_data) are 1072 * visible before attaching it to the running transaction. 1073 * Paired with barrier in jbd2_write_access_granted() 1074 */ 1075 smp_wmb(); 1076 spin_lock(&journal->j_list_lock); 1077 __jbd2_journal_file_buffer(jh, transaction, BJ_Reserved); 1078 spin_unlock(&journal->j_list_lock); 1079 goto done; 1080 } 1081 /* 1082 * If there is already a copy-out version of this buffer, then we don't 1083 * need to make another one 1084 */ 1085 if (jh->b_frozen_data) { 1086 JBUFFER_TRACE(jh, "has frozen data"); 1087 J_ASSERT_JH(jh, jh->b_next_transaction == NULL); 1088 goto attach_next; 1089 } 1090 1091 JBUFFER_TRACE(jh, "owned by older transaction"); 1092 J_ASSERT_JH(jh, jh->b_next_transaction == NULL); 1093 J_ASSERT_JH(jh, jh->b_transaction == journal->j_committing_transaction); 1094 1095 /* 1096 * There is one case we have to be very careful about. If the 1097 * committing transaction is currently writing this buffer out to disk 1098 * and has NOT made a copy-out, then we cannot modify the buffer 1099 * contents at all right now. The essence of copy-out is that it is 1100 * the extra copy, not the primary copy, which gets journaled. If the 1101 * primary copy is already going to disk then we cannot do copy-out 1102 * here. 1103 */ 1104 if (buffer_shadow(bh)) { 1105 JBUFFER_TRACE(jh, "on shadow: sleep"); 1106 spin_unlock(&jh->b_state_lock); 1107 wait_on_bit_io(&bh->b_state, BH_Shadow, TASK_UNINTERRUPTIBLE); 1108 goto repeat; 1109 } 1110 1111 /* 1112 * Only do the copy if the currently-owning transaction still needs it. 1113 * If buffer isn't on BJ_Metadata list, the committing transaction is 1114 * past that stage (here we use the fact that BH_Shadow is set under 1115 * bh_state lock together with refiling to BJ_Shadow list and at this 1116 * point we know the buffer doesn't have BH_Shadow set). 1117 * 1118 * Subtle point, though: if this is a get_undo_access, then we will be 1119 * relying on the frozen_data to contain the new value of the 1120 * committed_data record after the transaction, so we HAVE to force the 1121 * frozen_data copy in that case. 1122 */ 1123 if (jh->b_jlist == BJ_Metadata || force_copy) { 1124 JBUFFER_TRACE(jh, "generate frozen data"); 1125 if (!frozen_buffer) { 1126 JBUFFER_TRACE(jh, "allocate memory for buffer"); 1127 spin_unlock(&jh->b_state_lock); 1128 frozen_buffer = jbd2_alloc(jh2bh(jh)->b_size, 1129 GFP_NOFS | __GFP_NOFAIL); 1130 goto repeat; 1131 } 1132 jh->b_frozen_data = frozen_buffer; 1133 frozen_buffer = NULL; 1134 jbd2_freeze_jh_data(jh); 1135 } 1136 attach_next: 1137 /* 1138 * Make sure all stores to jh (b_modified, b_frozen_data) are visible 1139 * before attaching it to the running transaction. Paired with barrier 1140 * in jbd2_write_access_granted() 1141 */ 1142 smp_wmb(); 1143 jh->b_next_transaction = transaction; 1144 1145 done: 1146 spin_unlock(&jh->b_state_lock); 1147 1148 /* 1149 * If we are about to journal a buffer, then any revoke pending on it is 1150 * no longer valid 1151 */ 1152 jbd2_journal_cancel_revoke(handle, jh); 1153 1154 out: 1155 if (unlikely(frozen_buffer)) /* It's usually NULL */ 1156 jbd2_free(frozen_buffer, bh->b_size); 1157 1158 JBUFFER_TRACE(jh, "exit"); 1159 return error; 1160 } 1161 1162 /* Fast check whether buffer is already attached to the required transaction */ 1163 static bool jbd2_write_access_granted(handle_t *handle, struct buffer_head *bh, 1164 bool undo) 1165 { 1166 struct journal_head *jh; 1167 bool ret = false; 1168 1169 /* Dirty buffers require special handling... */ 1170 if (buffer_dirty(bh)) 1171 return false; 1172 1173 /* 1174 * RCU protects us from dereferencing freed pages. So the checks we do 1175 * are guaranteed not to oops. However the jh slab object can get freed 1176 * & reallocated while we work with it. So we have to be careful. When 1177 * we see jh attached to the running transaction, we know it must stay 1178 * so until the transaction is committed. Thus jh won't be freed and 1179 * will be attached to the same bh while we run. However it can 1180 * happen jh gets freed, reallocated, and attached to the transaction 1181 * just after we get pointer to it from bh. So we have to be careful 1182 * and recheck jh still belongs to our bh before we return success. 1183 */ 1184 rcu_read_lock(); 1185 if (!buffer_jbd(bh)) 1186 goto out; 1187 /* This should be bh2jh() but that doesn't work with inline functions */ 1188 jh = READ_ONCE(bh->b_private); 1189 if (!jh) 1190 goto out; 1191 /* For undo access buffer must have data copied */ 1192 if (undo && !jh->b_committed_data) 1193 goto out; 1194 if (READ_ONCE(jh->b_transaction) != handle->h_transaction && 1195 READ_ONCE(jh->b_next_transaction) != handle->h_transaction) 1196 goto out; 1197 /* 1198 * There are two reasons for the barrier here: 1199 * 1) Make sure to fetch b_bh after we did previous checks so that we 1200 * detect when jh went through free, realloc, attach to transaction 1201 * while we were checking. Paired with implicit barrier in that path. 1202 * 2) So that access to bh done after jbd2_write_access_granted() 1203 * doesn't get reordered and see inconsistent state of concurrent 1204 * do_get_write_access(). 1205 */ 1206 smp_mb(); 1207 if (unlikely(jh->b_bh != bh)) 1208 goto out; 1209 ret = true; 1210 out: 1211 rcu_read_unlock(); 1212 return ret; 1213 } 1214 1215 /** 1216 * jbd2_journal_get_write_access() - notify intent to modify a buffer 1217 * for metadata (not data) update. 1218 * @handle: transaction to add buffer modifications to 1219 * @bh: bh to be used for metadata writes 1220 * 1221 * Returns: error code or 0 on success. 1222 * 1223 * In full data journalling mode the buffer may be of type BJ_AsyncData, 1224 * because we're ``write()ing`` a buffer which is also part of a shared mapping. 1225 */ 1226 1227 int jbd2_journal_get_write_access(handle_t *handle, struct buffer_head *bh) 1228 { 1229 struct journal_head *jh; 1230 int rc; 1231 1232 if (is_handle_aborted(handle)) 1233 return -EROFS; 1234 1235 if (jbd2_write_access_granted(handle, bh, false)) 1236 return 0; 1237 1238 jh = jbd2_journal_add_journal_head(bh); 1239 /* We do not want to get caught playing with fields which the 1240 * log thread also manipulates. Make sure that the buffer 1241 * completes any outstanding IO before proceeding. */ 1242 rc = do_get_write_access(handle, jh, 0); 1243 jbd2_journal_put_journal_head(jh); 1244 return rc; 1245 } 1246 1247 1248 /* 1249 * When the user wants to journal a newly created buffer_head 1250 * (ie. getblk() returned a new buffer and we are going to populate it 1251 * manually rather than reading off disk), then we need to keep the 1252 * buffer_head locked until it has been completely filled with new 1253 * data. In this case, we should be able to make the assertion that 1254 * the bh is not already part of an existing transaction. 1255 * 1256 * The buffer should already be locked by the caller by this point. 1257 * There is no lock ranking violation: it was a newly created, 1258 * unlocked buffer beforehand. */ 1259 1260 /** 1261 * jbd2_journal_get_create_access () - notify intent to use newly created bh 1262 * @handle: transaction to new buffer to 1263 * @bh: new buffer. 1264 * 1265 * Call this if you create a new bh. 1266 */ 1267 int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh) 1268 { 1269 transaction_t *transaction = handle->h_transaction; 1270 journal_t *journal; 1271 struct journal_head *jh = jbd2_journal_add_journal_head(bh); 1272 int err; 1273 1274 jbd_debug(5, "journal_head %p\n", jh); 1275 err = -EROFS; 1276 if (is_handle_aborted(handle)) 1277 goto out; 1278 journal = transaction->t_journal; 1279 err = 0; 1280 1281 JBUFFER_TRACE(jh, "entry"); 1282 /* 1283 * The buffer may already belong to this transaction due to pre-zeroing 1284 * in the filesystem's new_block code. It may also be on the previous, 1285 * committing transaction's lists, but it HAS to be in Forget state in 1286 * that case: the transaction must have deleted the buffer for it to be 1287 * reused here. 1288 */ 1289 spin_lock(&jh->b_state_lock); 1290 J_ASSERT_JH(jh, (jh->b_transaction == transaction || 1291 jh->b_transaction == NULL || 1292 (jh->b_transaction == journal->j_committing_transaction && 1293 jh->b_jlist == BJ_Forget))); 1294 1295 J_ASSERT_JH(jh, jh->b_next_transaction == NULL); 1296 J_ASSERT_JH(jh, buffer_locked(jh2bh(jh))); 1297 1298 if (jh->b_transaction == NULL) { 1299 /* 1300 * Previous jbd2_journal_forget() could have left the buffer 1301 * with jbddirty bit set because it was being committed. When 1302 * the commit finished, we've filed the buffer for 1303 * checkpointing and marked it dirty. Now we are reallocating 1304 * the buffer so the transaction freeing it must have 1305 * committed and so it's safe to clear the dirty bit. 1306 */ 1307 clear_buffer_dirty(jh2bh(jh)); 1308 /* first access by this transaction */ 1309 jh->b_modified = 0; 1310 1311 JBUFFER_TRACE(jh, "file as BJ_Reserved"); 1312 spin_lock(&journal->j_list_lock); 1313 __jbd2_journal_file_buffer(jh, transaction, BJ_Reserved); 1314 spin_unlock(&journal->j_list_lock); 1315 } else if (jh->b_transaction == journal->j_committing_transaction) { 1316 /* first access by this transaction */ 1317 jh->b_modified = 0; 1318 1319 JBUFFER_TRACE(jh, "set next transaction"); 1320 spin_lock(&journal->j_list_lock); 1321 jh->b_next_transaction = transaction; 1322 spin_unlock(&journal->j_list_lock); 1323 } 1324 spin_unlock(&jh->b_state_lock); 1325 1326 /* 1327 * akpm: I added this. ext3_alloc_branch can pick up new indirect 1328 * blocks which contain freed but then revoked metadata. We need 1329 * to cancel the revoke in case we end up freeing it yet again 1330 * and the reallocating as data - this would cause a second revoke, 1331 * which hits an assertion error. 1332 */ 1333 JBUFFER_TRACE(jh, "cancelling revoke"); 1334 jbd2_journal_cancel_revoke(handle, jh); 1335 out: 1336 jbd2_journal_put_journal_head(jh); 1337 return err; 1338 } 1339 1340 /** 1341 * jbd2_journal_get_undo_access() - Notify intent to modify metadata with 1342 * non-rewindable consequences 1343 * @handle: transaction 1344 * @bh: buffer to undo 1345 * 1346 * Sometimes there is a need to distinguish between metadata which has 1347 * been committed to disk and that which has not. The ext3fs code uses 1348 * this for freeing and allocating space, we have to make sure that we 1349 * do not reuse freed space until the deallocation has been committed, 1350 * since if we overwrote that space we would make the delete 1351 * un-rewindable in case of a crash. 1352 * 1353 * To deal with that, jbd2_journal_get_undo_access requests write access to a 1354 * buffer for parts of non-rewindable operations such as delete 1355 * operations on the bitmaps. The journaling code must keep a copy of 1356 * the buffer's contents prior to the undo_access call until such time 1357 * as we know that the buffer has definitely been committed to disk. 1358 * 1359 * We never need to know which transaction the committed data is part 1360 * of, buffers touched here are guaranteed to be dirtied later and so 1361 * will be committed to a new transaction in due course, at which point 1362 * we can discard the old committed data pointer. 1363 * 1364 * Returns error number or 0 on success. 1365 */ 1366 int jbd2_journal_get_undo_access(handle_t *handle, struct buffer_head *bh) 1367 { 1368 int err; 1369 struct journal_head *jh; 1370 char *committed_data = NULL; 1371 1372 if (is_handle_aborted(handle)) 1373 return -EROFS; 1374 1375 if (jbd2_write_access_granted(handle, bh, true)) 1376 return 0; 1377 1378 jh = jbd2_journal_add_journal_head(bh); 1379 JBUFFER_TRACE(jh, "entry"); 1380 1381 /* 1382 * Do this first --- it can drop the journal lock, so we want to 1383 * make sure that obtaining the committed_data is done 1384 * atomically wrt. completion of any outstanding commits. 1385 */ 1386 err = do_get_write_access(handle, jh, 1); 1387 if (err) 1388 goto out; 1389 1390 repeat: 1391 if (!jh->b_committed_data) 1392 committed_data = jbd2_alloc(jh2bh(jh)->b_size, 1393 GFP_NOFS|__GFP_NOFAIL); 1394 1395 spin_lock(&jh->b_state_lock); 1396 if (!jh->b_committed_data) { 1397 /* Copy out the current buffer contents into the 1398 * preserved, committed copy. */ 1399 JBUFFER_TRACE(jh, "generate b_committed data"); 1400 if (!committed_data) { 1401 spin_unlock(&jh->b_state_lock); 1402 goto repeat; 1403 } 1404 1405 jh->b_committed_data = committed_data; 1406 committed_data = NULL; 1407 memcpy(jh->b_committed_data, bh->b_data, bh->b_size); 1408 } 1409 spin_unlock(&jh->b_state_lock); 1410 out: 1411 jbd2_journal_put_journal_head(jh); 1412 if (unlikely(committed_data)) 1413 jbd2_free(committed_data, bh->b_size); 1414 return err; 1415 } 1416 1417 /** 1418 * jbd2_journal_set_triggers() - Add triggers for commit writeout 1419 * @bh: buffer to trigger on 1420 * @type: struct jbd2_buffer_trigger_type containing the trigger(s). 1421 * 1422 * Set any triggers on this journal_head. This is always safe, because 1423 * triggers for a committing buffer will be saved off, and triggers for 1424 * a running transaction will match the buffer in that transaction. 1425 * 1426 * Call with NULL to clear the triggers. 1427 */ 1428 void jbd2_journal_set_triggers(struct buffer_head *bh, 1429 struct jbd2_buffer_trigger_type *type) 1430 { 1431 struct journal_head *jh = jbd2_journal_grab_journal_head(bh); 1432 1433 if (WARN_ON_ONCE(!jh)) 1434 return; 1435 jh->b_triggers = type; 1436 jbd2_journal_put_journal_head(jh); 1437 } 1438 1439 void jbd2_buffer_frozen_trigger(struct journal_head *jh, void *mapped_data, 1440 struct jbd2_buffer_trigger_type *triggers) 1441 { 1442 struct buffer_head *bh = jh2bh(jh); 1443 1444 if (!triggers || !triggers->t_frozen) 1445 return; 1446 1447 triggers->t_frozen(triggers, bh, mapped_data, bh->b_size); 1448 } 1449 1450 void jbd2_buffer_abort_trigger(struct journal_head *jh, 1451 struct jbd2_buffer_trigger_type *triggers) 1452 { 1453 if (!triggers || !triggers->t_abort) 1454 return; 1455 1456 triggers->t_abort(triggers, jh2bh(jh)); 1457 } 1458 1459 /** 1460 * jbd2_journal_dirty_metadata() - mark a buffer as containing dirty metadata 1461 * @handle: transaction to add buffer to. 1462 * @bh: buffer to mark 1463 * 1464 * mark dirty metadata which needs to be journaled as part of the current 1465 * transaction. 1466 * 1467 * The buffer must have previously had jbd2_journal_get_write_access() 1468 * called so that it has a valid journal_head attached to the buffer 1469 * head. 1470 * 1471 * The buffer is placed on the transaction's metadata list and is marked 1472 * as belonging to the transaction. 1473 * 1474 * Returns error number or 0 on success. 1475 * 1476 * Special care needs to be taken if the buffer already belongs to the 1477 * current committing transaction (in which case we should have frozen 1478 * data present for that commit). In that case, we don't relink the 1479 * buffer: that only gets done when the old transaction finally 1480 * completes its commit. 1481 */ 1482 int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh) 1483 { 1484 transaction_t *transaction = handle->h_transaction; 1485 journal_t *journal; 1486 struct journal_head *jh; 1487 int ret = 0; 1488 1489 if (is_handle_aborted(handle)) 1490 return -EROFS; 1491 if (!buffer_jbd(bh)) 1492 return -EUCLEAN; 1493 1494 /* 1495 * We don't grab jh reference here since the buffer must be part 1496 * of the running transaction. 1497 */ 1498 jh = bh2jh(bh); 1499 jbd_debug(5, "journal_head %p\n", jh); 1500 JBUFFER_TRACE(jh, "entry"); 1501 1502 /* 1503 * This and the following assertions are unreliable since we may see jh 1504 * in inconsistent state unless we grab bh_state lock. But this is 1505 * crucial to catch bugs so let's do a reliable check until the 1506 * lockless handling is fully proven. 1507 */ 1508 if (data_race(jh->b_transaction != transaction && 1509 jh->b_next_transaction != transaction)) { 1510 spin_lock(&jh->b_state_lock); 1511 J_ASSERT_JH(jh, jh->b_transaction == transaction || 1512 jh->b_next_transaction == transaction); 1513 spin_unlock(&jh->b_state_lock); 1514 } 1515 if (jh->b_modified == 1) { 1516 /* If it's in our transaction it must be in BJ_Metadata list. */ 1517 if (data_race(jh->b_transaction == transaction && 1518 jh->b_jlist != BJ_Metadata)) { 1519 spin_lock(&jh->b_state_lock); 1520 if (jh->b_transaction == transaction && 1521 jh->b_jlist != BJ_Metadata) 1522 pr_err("JBD2: assertion failure: h_type=%u " 1523 "h_line_no=%u block_no=%llu jlist=%u\n", 1524 handle->h_type, handle->h_line_no, 1525 (unsigned long long) bh->b_blocknr, 1526 jh->b_jlist); 1527 J_ASSERT_JH(jh, jh->b_transaction != transaction || 1528 jh->b_jlist == BJ_Metadata); 1529 spin_unlock(&jh->b_state_lock); 1530 } 1531 goto out; 1532 } 1533 1534 journal = transaction->t_journal; 1535 spin_lock(&jh->b_state_lock); 1536 1537 if (jh->b_modified == 0) { 1538 /* 1539 * This buffer's got modified and becoming part 1540 * of the transaction. This needs to be done 1541 * once a transaction -bzzz 1542 */ 1543 if (WARN_ON_ONCE(jbd2_handle_buffer_credits(handle) <= 0)) { 1544 ret = -ENOSPC; 1545 goto out_unlock_bh; 1546 } 1547 jh->b_modified = 1; 1548 handle->h_total_credits--; 1549 } 1550 1551 /* 1552 * fastpath, to avoid expensive locking. If this buffer is already 1553 * on the running transaction's metadata list there is nothing to do. 1554 * Nobody can take it off again because there is a handle open. 1555 * I _think_ we're OK here with SMP barriers - a mistaken decision will 1556 * result in this test being false, so we go in and take the locks. 1557 */ 1558 if (jh->b_transaction == transaction && jh->b_jlist == BJ_Metadata) { 1559 JBUFFER_TRACE(jh, "fastpath"); 1560 if (unlikely(jh->b_transaction != 1561 journal->j_running_transaction)) { 1562 printk(KERN_ERR "JBD2: %s: " 1563 "jh->b_transaction (%llu, %p, %u) != " 1564 "journal->j_running_transaction (%p, %u)\n", 1565 journal->j_devname, 1566 (unsigned long long) bh->b_blocknr, 1567 jh->b_transaction, 1568 jh->b_transaction ? jh->b_transaction->t_tid : 0, 1569 journal->j_running_transaction, 1570 journal->j_running_transaction ? 1571 journal->j_running_transaction->t_tid : 0); 1572 ret = -EINVAL; 1573 } 1574 goto out_unlock_bh; 1575 } 1576 1577 set_buffer_jbddirty(bh); 1578 1579 /* 1580 * Metadata already on the current transaction list doesn't 1581 * need to be filed. Metadata on another transaction's list must 1582 * be committing, and will be refiled once the commit completes: 1583 * leave it alone for now. 1584 */ 1585 if (jh->b_transaction != transaction) { 1586 JBUFFER_TRACE(jh, "already on other transaction"); 1587 if (unlikely(((jh->b_transaction != 1588 journal->j_committing_transaction)) || 1589 (jh->b_next_transaction != transaction))) { 1590 printk(KERN_ERR "jbd2_journal_dirty_metadata: %s: " 1591 "bad jh for block %llu: " 1592 "transaction (%p, %u), " 1593 "jh->b_transaction (%p, %u), " 1594 "jh->b_next_transaction (%p, %u), jlist %u\n", 1595 journal->j_devname, 1596 (unsigned long long) bh->b_blocknr, 1597 transaction, transaction->t_tid, 1598 jh->b_transaction, 1599 jh->b_transaction ? 1600 jh->b_transaction->t_tid : 0, 1601 jh->b_next_transaction, 1602 jh->b_next_transaction ? 1603 jh->b_next_transaction->t_tid : 0, 1604 jh->b_jlist); 1605 WARN_ON(1); 1606 ret = -EINVAL; 1607 } 1608 /* And this case is illegal: we can't reuse another 1609 * transaction's data buffer, ever. */ 1610 goto out_unlock_bh; 1611 } 1612 1613 /* That test should have eliminated the following case: */ 1614 J_ASSERT_JH(jh, jh->b_frozen_data == NULL); 1615 1616 JBUFFER_TRACE(jh, "file as BJ_Metadata"); 1617 spin_lock(&journal->j_list_lock); 1618 __jbd2_journal_file_buffer(jh, transaction, BJ_Metadata); 1619 spin_unlock(&journal->j_list_lock); 1620 out_unlock_bh: 1621 spin_unlock(&jh->b_state_lock); 1622 out: 1623 JBUFFER_TRACE(jh, "exit"); 1624 return ret; 1625 } 1626 1627 /** 1628 * jbd2_journal_forget() - bforget() for potentially-journaled buffers. 1629 * @handle: transaction handle 1630 * @bh: bh to 'forget' 1631 * 1632 * We can only do the bforget if there are no commits pending against the 1633 * buffer. If the buffer is dirty in the current running transaction we 1634 * can safely unlink it. 1635 * 1636 * bh may not be a journalled buffer at all - it may be a non-JBD 1637 * buffer which came off the hashtable. Check for this. 1638 * 1639 * Decrements bh->b_count by one. 1640 * 1641 * Allow this call even if the handle has aborted --- it may be part of 1642 * the caller's cleanup after an abort. 1643 */ 1644 int jbd2_journal_forget(handle_t *handle, struct buffer_head *bh) 1645 { 1646 transaction_t *transaction = handle->h_transaction; 1647 journal_t *journal; 1648 struct journal_head *jh; 1649 int drop_reserve = 0; 1650 int err = 0; 1651 int was_modified = 0; 1652 1653 if (is_handle_aborted(handle)) 1654 return -EROFS; 1655 journal = transaction->t_journal; 1656 1657 BUFFER_TRACE(bh, "entry"); 1658 1659 jh = jbd2_journal_grab_journal_head(bh); 1660 if (!jh) { 1661 __bforget(bh); 1662 return 0; 1663 } 1664 1665 spin_lock(&jh->b_state_lock); 1666 1667 /* Critical error: attempting to delete a bitmap buffer, maybe? 1668 * Don't do any jbd operations, and return an error. */ 1669 if (!J_EXPECT_JH(jh, !jh->b_committed_data, 1670 "inconsistent data on disk")) { 1671 err = -EIO; 1672 goto drop; 1673 } 1674 1675 /* keep track of whether or not this transaction modified us */ 1676 was_modified = jh->b_modified; 1677 1678 /* 1679 * The buffer's going from the transaction, we must drop 1680 * all references -bzzz 1681 */ 1682 jh->b_modified = 0; 1683 1684 if (jh->b_transaction == transaction) { 1685 J_ASSERT_JH(jh, !jh->b_frozen_data); 1686 1687 /* If we are forgetting a buffer which is already part 1688 * of this transaction, then we can just drop it from 1689 * the transaction immediately. */ 1690 clear_buffer_dirty(bh); 1691 clear_buffer_jbddirty(bh); 1692 1693 JBUFFER_TRACE(jh, "belongs to current transaction: unfile"); 1694 1695 /* 1696 * we only want to drop a reference if this transaction 1697 * modified the buffer 1698 */ 1699 if (was_modified) 1700 drop_reserve = 1; 1701 1702 /* 1703 * We are no longer going to journal this buffer. 1704 * However, the commit of this transaction is still 1705 * important to the buffer: the delete that we are now 1706 * processing might obsolete an old log entry, so by 1707 * committing, we can satisfy the buffer's checkpoint. 1708 * 1709 * So, if we have a checkpoint on the buffer, we should 1710 * now refile the buffer on our BJ_Forget list so that 1711 * we know to remove the checkpoint after we commit. 1712 */ 1713 1714 spin_lock(&journal->j_list_lock); 1715 if (jh->b_cp_transaction) { 1716 __jbd2_journal_temp_unlink_buffer(jh); 1717 __jbd2_journal_file_buffer(jh, transaction, BJ_Forget); 1718 } else { 1719 __jbd2_journal_unfile_buffer(jh); 1720 jbd2_journal_put_journal_head(jh); 1721 } 1722 spin_unlock(&journal->j_list_lock); 1723 } else if (jh->b_transaction) { 1724 J_ASSERT_JH(jh, (jh->b_transaction == 1725 journal->j_committing_transaction)); 1726 /* However, if the buffer is still owned by a prior 1727 * (committing) transaction, we can't drop it yet... */ 1728 JBUFFER_TRACE(jh, "belongs to older transaction"); 1729 /* ... but we CAN drop it from the new transaction through 1730 * marking the buffer as freed and set j_next_transaction to 1731 * the new transaction, so that not only the commit code 1732 * knows it should clear dirty bits when it is done with the 1733 * buffer, but also the buffer can be checkpointed only 1734 * after the new transaction commits. */ 1735 1736 set_buffer_freed(bh); 1737 1738 if (!jh->b_next_transaction) { 1739 spin_lock(&journal->j_list_lock); 1740 jh->b_next_transaction = transaction; 1741 spin_unlock(&journal->j_list_lock); 1742 } else { 1743 J_ASSERT(jh->b_next_transaction == transaction); 1744 1745 /* 1746 * only drop a reference if this transaction modified 1747 * the buffer 1748 */ 1749 if (was_modified) 1750 drop_reserve = 1; 1751 } 1752 } else { 1753 /* 1754 * Finally, if the buffer is not belongs to any 1755 * transaction, we can just drop it now if it has no 1756 * checkpoint. 1757 */ 1758 spin_lock(&journal->j_list_lock); 1759 if (!jh->b_cp_transaction) { 1760 JBUFFER_TRACE(jh, "belongs to none transaction"); 1761 spin_unlock(&journal->j_list_lock); 1762 goto drop; 1763 } 1764 1765 /* 1766 * Otherwise, if the buffer has been written to disk, 1767 * it is safe to remove the checkpoint and drop it. 1768 */ 1769 if (!buffer_dirty(bh)) { 1770 __jbd2_journal_remove_checkpoint(jh); 1771 spin_unlock(&journal->j_list_lock); 1772 goto drop; 1773 } 1774 1775 /* 1776 * The buffer is still not written to disk, we should 1777 * attach this buffer to current transaction so that the 1778 * buffer can be checkpointed only after the current 1779 * transaction commits. 1780 */ 1781 clear_buffer_dirty(bh); 1782 __jbd2_journal_file_buffer(jh, transaction, BJ_Forget); 1783 spin_unlock(&journal->j_list_lock); 1784 } 1785 drop: 1786 __brelse(bh); 1787 spin_unlock(&jh->b_state_lock); 1788 jbd2_journal_put_journal_head(jh); 1789 if (drop_reserve) { 1790 /* no need to reserve log space for this block -bzzz */ 1791 handle->h_total_credits++; 1792 } 1793 return err; 1794 } 1795 1796 /** 1797 * jbd2_journal_stop() - complete a transaction 1798 * @handle: transaction to complete. 1799 * 1800 * All done for a particular handle. 1801 * 1802 * There is not much action needed here. We just return any remaining 1803 * buffer credits to the transaction and remove the handle. The only 1804 * complication is that we need to start a commit operation if the 1805 * filesystem is marked for synchronous update. 1806 * 1807 * jbd2_journal_stop itself will not usually return an error, but it may 1808 * do so in unusual circumstances. In particular, expect it to 1809 * return -EIO if a jbd2_journal_abort has been executed since the 1810 * transaction began. 1811 */ 1812 int jbd2_journal_stop(handle_t *handle) 1813 { 1814 transaction_t *transaction = handle->h_transaction; 1815 journal_t *journal; 1816 int err = 0, wait_for_commit = 0; 1817 tid_t tid; 1818 pid_t pid; 1819 1820 if (--handle->h_ref > 0) { 1821 jbd_debug(4, "h_ref %d -> %d\n", handle->h_ref + 1, 1822 handle->h_ref); 1823 if (is_handle_aborted(handle)) 1824 return -EIO; 1825 return 0; 1826 } 1827 if (!transaction) { 1828 /* 1829 * Handle is already detached from the transaction so there is 1830 * nothing to do other than free the handle. 1831 */ 1832 memalloc_nofs_restore(handle->saved_alloc_context); 1833 goto free_and_exit; 1834 } 1835 journal = transaction->t_journal; 1836 tid = transaction->t_tid; 1837 1838 if (is_handle_aborted(handle)) 1839 err = -EIO; 1840 1841 jbd_debug(4, "Handle %p going down\n", handle); 1842 trace_jbd2_handle_stats(journal->j_fs_dev->bd_dev, 1843 tid, handle->h_type, handle->h_line_no, 1844 jiffies - handle->h_start_jiffies, 1845 handle->h_sync, handle->h_requested_credits, 1846 (handle->h_requested_credits - 1847 handle->h_total_credits)); 1848 1849 /* 1850 * Implement synchronous transaction batching. If the handle 1851 * was synchronous, don't force a commit immediately. Let's 1852 * yield and let another thread piggyback onto this 1853 * transaction. Keep doing that while new threads continue to 1854 * arrive. It doesn't cost much - we're about to run a commit 1855 * and sleep on IO anyway. Speeds up many-threaded, many-dir 1856 * operations by 30x or more... 1857 * 1858 * We try and optimize the sleep time against what the 1859 * underlying disk can do, instead of having a static sleep 1860 * time. This is useful for the case where our storage is so 1861 * fast that it is more optimal to go ahead and force a flush 1862 * and wait for the transaction to be committed than it is to 1863 * wait for an arbitrary amount of time for new writers to 1864 * join the transaction. We achieve this by measuring how 1865 * long it takes to commit a transaction, and compare it with 1866 * how long this transaction has been running, and if run time 1867 * < commit time then we sleep for the delta and commit. This 1868 * greatly helps super fast disks that would see slowdowns as 1869 * more threads started doing fsyncs. 1870 * 1871 * But don't do this if this process was the most recent one 1872 * to perform a synchronous write. We do this to detect the 1873 * case where a single process is doing a stream of sync 1874 * writes. No point in waiting for joiners in that case. 1875 * 1876 * Setting max_batch_time to 0 disables this completely. 1877 */ 1878 pid = current->pid; 1879 if (handle->h_sync && journal->j_last_sync_writer != pid && 1880 journal->j_max_batch_time) { 1881 u64 commit_time, trans_time; 1882 1883 journal->j_last_sync_writer = pid; 1884 1885 read_lock(&journal->j_state_lock); 1886 commit_time = journal->j_average_commit_time; 1887 read_unlock(&journal->j_state_lock); 1888 1889 trans_time = ktime_to_ns(ktime_sub(ktime_get(), 1890 transaction->t_start_time)); 1891 1892 commit_time = max_t(u64, commit_time, 1893 1000*journal->j_min_batch_time); 1894 commit_time = min_t(u64, commit_time, 1895 1000*journal->j_max_batch_time); 1896 1897 if (trans_time < commit_time) { 1898 ktime_t expires = ktime_add_ns(ktime_get(), 1899 commit_time); 1900 set_current_state(TASK_UNINTERRUPTIBLE); 1901 schedule_hrtimeout(&expires, HRTIMER_MODE_ABS); 1902 } 1903 } 1904 1905 if (handle->h_sync) 1906 transaction->t_synchronous_commit = 1; 1907 1908 /* 1909 * If the handle is marked SYNC, we need to set another commit 1910 * going! We also want to force a commit if the transaction is too 1911 * old now. 1912 */ 1913 if (handle->h_sync || 1914 time_after_eq(jiffies, transaction->t_expires)) { 1915 /* Do this even for aborted journals: an abort still 1916 * completes the commit thread, it just doesn't write 1917 * anything to disk. */ 1918 1919 jbd_debug(2, "transaction too old, requesting commit for " 1920 "handle %p\n", handle); 1921 /* This is non-blocking */ 1922 jbd2_log_start_commit(journal, tid); 1923 1924 /* 1925 * Special case: JBD2_SYNC synchronous updates require us 1926 * to wait for the commit to complete. 1927 */ 1928 if (handle->h_sync && !(current->flags & PF_MEMALLOC)) 1929 wait_for_commit = 1; 1930 } 1931 1932 /* 1933 * Once stop_this_handle() drops t_updates, the transaction could start 1934 * committing on us and eventually disappear. So we must not 1935 * dereference transaction pointer again after calling 1936 * stop_this_handle(). 1937 */ 1938 stop_this_handle(handle); 1939 1940 if (wait_for_commit) 1941 err = jbd2_log_wait_commit(journal, tid); 1942 1943 free_and_exit: 1944 if (handle->h_rsv_handle) 1945 jbd2_free_handle(handle->h_rsv_handle); 1946 jbd2_free_handle(handle); 1947 return err; 1948 } 1949 1950 /* 1951 * 1952 * List management code snippets: various functions for manipulating the 1953 * transaction buffer lists. 1954 * 1955 */ 1956 1957 /* 1958 * Append a buffer to a transaction list, given the transaction's list head 1959 * pointer. 1960 * 1961 * j_list_lock is held. 1962 * 1963 * jh->b_state_lock is held. 1964 */ 1965 1966 static inline void 1967 __blist_add_buffer(struct journal_head **list, struct journal_head *jh) 1968 { 1969 if (!*list) { 1970 jh->b_tnext = jh->b_tprev = jh; 1971 *list = jh; 1972 } else { 1973 /* Insert at the tail of the list to preserve order */ 1974 struct journal_head *first = *list, *last = first->b_tprev; 1975 jh->b_tprev = last; 1976 jh->b_tnext = first; 1977 last->b_tnext = first->b_tprev = jh; 1978 } 1979 } 1980 1981 /* 1982 * Remove a buffer from a transaction list, given the transaction's list 1983 * head pointer. 1984 * 1985 * Called with j_list_lock held, and the journal may not be locked. 1986 * 1987 * jh->b_state_lock is held. 1988 */ 1989 1990 static inline void 1991 __blist_del_buffer(struct journal_head **list, struct journal_head *jh) 1992 { 1993 if (*list == jh) { 1994 *list = jh->b_tnext; 1995 if (*list == jh) 1996 *list = NULL; 1997 } 1998 jh->b_tprev->b_tnext = jh->b_tnext; 1999 jh->b_tnext->b_tprev = jh->b_tprev; 2000 } 2001 2002 /* 2003 * Remove a buffer from the appropriate transaction list. 2004 * 2005 * Note that this function can *change* the value of 2006 * bh->b_transaction->t_buffers, t_forget, t_shadow_list, t_log_list or 2007 * t_reserved_list. If the caller is holding onto a copy of one of these 2008 * pointers, it could go bad. Generally the caller needs to re-read the 2009 * pointer from the transaction_t. 2010 * 2011 * Called under j_list_lock. 2012 */ 2013 static void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh) 2014 { 2015 struct journal_head **list = NULL; 2016 transaction_t *transaction; 2017 struct buffer_head *bh = jh2bh(jh); 2018 2019 lockdep_assert_held(&jh->b_state_lock); 2020 transaction = jh->b_transaction; 2021 if (transaction) 2022 assert_spin_locked(&transaction->t_journal->j_list_lock); 2023 2024 J_ASSERT_JH(jh, jh->b_jlist < BJ_Types); 2025 if (jh->b_jlist != BJ_None) 2026 J_ASSERT_JH(jh, transaction != NULL); 2027 2028 switch (jh->b_jlist) { 2029 case BJ_None: 2030 return; 2031 case BJ_Metadata: 2032 transaction->t_nr_buffers--; 2033 J_ASSERT_JH(jh, transaction->t_nr_buffers >= 0); 2034 list = &transaction->t_buffers; 2035 break; 2036 case BJ_Forget: 2037 list = &transaction->t_forget; 2038 break; 2039 case BJ_Shadow: 2040 list = &transaction->t_shadow_list; 2041 break; 2042 case BJ_Reserved: 2043 list = &transaction->t_reserved_list; 2044 break; 2045 } 2046 2047 __blist_del_buffer(list, jh); 2048 jh->b_jlist = BJ_None; 2049 if (transaction && is_journal_aborted(transaction->t_journal)) 2050 clear_buffer_jbddirty(bh); 2051 else if (test_clear_buffer_jbddirty(bh)) 2052 mark_buffer_dirty(bh); /* Expose it to the VM */ 2053 } 2054 2055 /* 2056 * Remove buffer from all transactions. The caller is responsible for dropping 2057 * the jh reference that belonged to the transaction. 2058 * 2059 * Called with bh_state lock and j_list_lock 2060 */ 2061 static void __jbd2_journal_unfile_buffer(struct journal_head *jh) 2062 { 2063 J_ASSERT_JH(jh, jh->b_transaction != NULL); 2064 J_ASSERT_JH(jh, jh->b_next_transaction == NULL); 2065 2066 __jbd2_journal_temp_unlink_buffer(jh); 2067 jh->b_transaction = NULL; 2068 } 2069 2070 void jbd2_journal_unfile_buffer(journal_t *journal, struct journal_head *jh) 2071 { 2072 struct buffer_head *bh = jh2bh(jh); 2073 2074 /* Get reference so that buffer cannot be freed before we unlock it */ 2075 get_bh(bh); 2076 spin_lock(&jh->b_state_lock); 2077 spin_lock(&journal->j_list_lock); 2078 __jbd2_journal_unfile_buffer(jh); 2079 spin_unlock(&journal->j_list_lock); 2080 spin_unlock(&jh->b_state_lock); 2081 jbd2_journal_put_journal_head(jh); 2082 __brelse(bh); 2083 } 2084 2085 /* 2086 * Called from jbd2_journal_try_to_free_buffers(). 2087 * 2088 * Called under jh->b_state_lock 2089 */ 2090 static void 2091 __journal_try_to_free_buffer(journal_t *journal, struct buffer_head *bh) 2092 { 2093 struct journal_head *jh; 2094 2095 jh = bh2jh(bh); 2096 2097 if (buffer_locked(bh) || buffer_dirty(bh)) 2098 goto out; 2099 2100 if (jh->b_next_transaction != NULL || jh->b_transaction != NULL) 2101 goto out; 2102 2103 spin_lock(&journal->j_list_lock); 2104 if (jh->b_cp_transaction != NULL) { 2105 /* written-back checkpointed metadata buffer */ 2106 JBUFFER_TRACE(jh, "remove from checkpoint list"); 2107 __jbd2_journal_remove_checkpoint(jh); 2108 } 2109 spin_unlock(&journal->j_list_lock); 2110 out: 2111 return; 2112 } 2113 2114 /** 2115 * jbd2_journal_try_to_free_buffers() - try to free page buffers. 2116 * @journal: journal for operation 2117 * @folio: Folio to detach data from. 2118 * 2119 * For all the buffers on this page, 2120 * if they are fully written out ordered data, move them onto BUF_CLEAN 2121 * so try_to_free_buffers() can reap them. 2122 * 2123 * This function returns non-zero if we wish try_to_free_buffers() 2124 * to be called. We do this if the page is releasable by try_to_free_buffers(). 2125 * We also do it if the page has locked or dirty buffers and the caller wants 2126 * us to perform sync or async writeout. 2127 * 2128 * This complicates JBD locking somewhat. We aren't protected by the 2129 * BKL here. We wish to remove the buffer from its committing or 2130 * running transaction's ->t_datalist via __jbd2_journal_unfile_buffer. 2131 * 2132 * This may *change* the value of transaction_t->t_datalist, so anyone 2133 * who looks at t_datalist needs to lock against this function. 2134 * 2135 * Even worse, someone may be doing a jbd2_journal_dirty_data on this 2136 * buffer. So we need to lock against that. jbd2_journal_dirty_data() 2137 * will come out of the lock with the buffer dirty, which makes it 2138 * ineligible for release here. 2139 * 2140 * Who else is affected by this? hmm... Really the only contender 2141 * is do_get_write_access() - it could be looking at the buffer while 2142 * journal_try_to_free_buffer() is changing its state. But that 2143 * cannot happen because we never reallocate freed data as metadata 2144 * while the data is part of a transaction. Yes? 2145 * 2146 * Return false on failure, true on success 2147 */ 2148 bool jbd2_journal_try_to_free_buffers(journal_t *journal, struct folio *folio) 2149 { 2150 struct buffer_head *head; 2151 struct buffer_head *bh; 2152 bool ret = false; 2153 2154 J_ASSERT(folio_test_locked(folio)); 2155 2156 head = folio_buffers(folio); 2157 bh = head; 2158 do { 2159 struct journal_head *jh; 2160 2161 /* 2162 * We take our own ref against the journal_head here to avoid 2163 * having to add tons of locking around each instance of 2164 * jbd2_journal_put_journal_head(). 2165 */ 2166 jh = jbd2_journal_grab_journal_head(bh); 2167 if (!jh) 2168 continue; 2169 2170 spin_lock(&jh->b_state_lock); 2171 __journal_try_to_free_buffer(journal, bh); 2172 spin_unlock(&jh->b_state_lock); 2173 jbd2_journal_put_journal_head(jh); 2174 if (buffer_jbd(bh)) 2175 goto busy; 2176 } while ((bh = bh->b_this_page) != head); 2177 2178 ret = try_to_free_buffers(folio); 2179 busy: 2180 return ret; 2181 } 2182 2183 /* 2184 * This buffer is no longer needed. If it is on an older transaction's 2185 * checkpoint list we need to record it on this transaction's forget list 2186 * to pin this buffer (and hence its checkpointing transaction) down until 2187 * this transaction commits. If the buffer isn't on a checkpoint list, we 2188 * release it. 2189 * Returns non-zero if JBD no longer has an interest in the buffer. 2190 * 2191 * Called under j_list_lock. 2192 * 2193 * Called under jh->b_state_lock. 2194 */ 2195 static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction) 2196 { 2197 int may_free = 1; 2198 struct buffer_head *bh = jh2bh(jh); 2199 2200 if (jh->b_cp_transaction) { 2201 JBUFFER_TRACE(jh, "on running+cp transaction"); 2202 __jbd2_journal_temp_unlink_buffer(jh); 2203 /* 2204 * We don't want to write the buffer anymore, clear the 2205 * bit so that we don't confuse checks in 2206 * __journal_file_buffer 2207 */ 2208 clear_buffer_dirty(bh); 2209 __jbd2_journal_file_buffer(jh, transaction, BJ_Forget); 2210 may_free = 0; 2211 } else { 2212 JBUFFER_TRACE(jh, "on running transaction"); 2213 __jbd2_journal_unfile_buffer(jh); 2214 jbd2_journal_put_journal_head(jh); 2215 } 2216 return may_free; 2217 } 2218 2219 /* 2220 * jbd2_journal_invalidate_folio 2221 * 2222 * This code is tricky. It has a number of cases to deal with. 2223 * 2224 * There are two invariants which this code relies on: 2225 * 2226 * i_size must be updated on disk before we start calling invalidate_folio 2227 * on the data. 2228 * 2229 * This is done in ext3 by defining an ext3_setattr method which 2230 * updates i_size before truncate gets going. By maintaining this 2231 * invariant, we can be sure that it is safe to throw away any buffers 2232 * attached to the current transaction: once the transaction commits, 2233 * we know that the data will not be needed. 2234 * 2235 * Note however that we can *not* throw away data belonging to the 2236 * previous, committing transaction! 2237 * 2238 * Any disk blocks which *are* part of the previous, committing 2239 * transaction (and which therefore cannot be discarded immediately) are 2240 * not going to be reused in the new running transaction 2241 * 2242 * The bitmap committed_data images guarantee this: any block which is 2243 * allocated in one transaction and removed in the next will be marked 2244 * as in-use in the committed_data bitmap, so cannot be reused until 2245 * the next transaction to delete the block commits. This means that 2246 * leaving committing buffers dirty is quite safe: the disk blocks 2247 * cannot be reallocated to a different file and so buffer aliasing is 2248 * not possible. 2249 * 2250 * 2251 * The above applies mainly to ordered data mode. In writeback mode we 2252 * don't make guarantees about the order in which data hits disk --- in 2253 * particular we don't guarantee that new dirty data is flushed before 2254 * transaction commit --- so it is always safe just to discard data 2255 * immediately in that mode. --sct 2256 */ 2257 2258 /* 2259 * The journal_unmap_buffer helper function returns zero if the buffer 2260 * concerned remains pinned as an anonymous buffer belonging to an older 2261 * transaction. 2262 * 2263 * We're outside-transaction here. Either or both of j_running_transaction 2264 * and j_committing_transaction may be NULL. 2265 */ 2266 static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh, 2267 int partial_page) 2268 { 2269 transaction_t *transaction; 2270 struct journal_head *jh; 2271 int may_free = 1; 2272 2273 BUFFER_TRACE(bh, "entry"); 2274 2275 /* 2276 * It is safe to proceed here without the j_list_lock because the 2277 * buffers cannot be stolen by try_to_free_buffers as long as we are 2278 * holding the page lock. --sct 2279 */ 2280 2281 jh = jbd2_journal_grab_journal_head(bh); 2282 if (!jh) 2283 goto zap_buffer_unlocked; 2284 2285 /* OK, we have data buffer in journaled mode */ 2286 write_lock(&journal->j_state_lock); 2287 spin_lock(&jh->b_state_lock); 2288 spin_lock(&journal->j_list_lock); 2289 2290 /* 2291 * We cannot remove the buffer from checkpoint lists until the 2292 * transaction adding inode to orphan list (let's call it T) 2293 * is committed. Otherwise if the transaction changing the 2294 * buffer would be cleaned from the journal before T is 2295 * committed, a crash will cause that the correct contents of 2296 * the buffer will be lost. On the other hand we have to 2297 * clear the buffer dirty bit at latest at the moment when the 2298 * transaction marking the buffer as freed in the filesystem 2299 * structures is committed because from that moment on the 2300 * block can be reallocated and used by a different page. 2301 * Since the block hasn't been freed yet but the inode has 2302 * already been added to orphan list, it is safe for us to add 2303 * the buffer to BJ_Forget list of the newest transaction. 2304 * 2305 * Also we have to clear buffer_mapped flag of a truncated buffer 2306 * because the buffer_head may be attached to the page straddling 2307 * i_size (can happen only when blocksize < pagesize) and thus the 2308 * buffer_head can be reused when the file is extended again. So we end 2309 * up keeping around invalidated buffers attached to transactions' 2310 * BJ_Forget list just to stop checkpointing code from cleaning up 2311 * the transaction this buffer was modified in. 2312 */ 2313 transaction = jh->b_transaction; 2314 if (transaction == NULL) { 2315 /* First case: not on any transaction. If it 2316 * has no checkpoint link, then we can zap it: 2317 * it's a writeback-mode buffer so we don't care 2318 * if it hits disk safely. */ 2319 if (!jh->b_cp_transaction) { 2320 JBUFFER_TRACE(jh, "not on any transaction: zap"); 2321 goto zap_buffer; 2322 } 2323 2324 if (!buffer_dirty(bh)) { 2325 /* bdflush has written it. We can drop it now */ 2326 __jbd2_journal_remove_checkpoint(jh); 2327 goto zap_buffer; 2328 } 2329 2330 /* OK, it must be in the journal but still not 2331 * written fully to disk: it's metadata or 2332 * journaled data... */ 2333 2334 if (journal->j_running_transaction) { 2335 /* ... and once the current transaction has 2336 * committed, the buffer won't be needed any 2337 * longer. */ 2338 JBUFFER_TRACE(jh, "checkpointed: add to BJ_Forget"); 2339 may_free = __dispose_buffer(jh, 2340 journal->j_running_transaction); 2341 goto zap_buffer; 2342 } else { 2343 /* There is no currently-running transaction. So the 2344 * orphan record which we wrote for this file must have 2345 * passed into commit. We must attach this buffer to 2346 * the committing transaction, if it exists. */ 2347 if (journal->j_committing_transaction) { 2348 JBUFFER_TRACE(jh, "give to committing trans"); 2349 may_free = __dispose_buffer(jh, 2350 journal->j_committing_transaction); 2351 goto zap_buffer; 2352 } else { 2353 /* The orphan record's transaction has 2354 * committed. We can cleanse this buffer */ 2355 clear_buffer_jbddirty(bh); 2356 __jbd2_journal_remove_checkpoint(jh); 2357 goto zap_buffer; 2358 } 2359 } 2360 } else if (transaction == journal->j_committing_transaction) { 2361 JBUFFER_TRACE(jh, "on committing transaction"); 2362 /* 2363 * The buffer is committing, we simply cannot touch 2364 * it. If the page is straddling i_size we have to wait 2365 * for commit and try again. 2366 */ 2367 if (partial_page) { 2368 spin_unlock(&journal->j_list_lock); 2369 spin_unlock(&jh->b_state_lock); 2370 write_unlock(&journal->j_state_lock); 2371 jbd2_journal_put_journal_head(jh); 2372 return -EBUSY; 2373 } 2374 /* 2375 * OK, buffer won't be reachable after truncate. We just clear 2376 * b_modified to not confuse transaction credit accounting, and 2377 * set j_next_transaction to the running transaction (if there 2378 * is one) and mark buffer as freed so that commit code knows 2379 * it should clear dirty bits when it is done with the buffer. 2380 */ 2381 set_buffer_freed(bh); 2382 if (journal->j_running_transaction && buffer_jbddirty(bh)) 2383 jh->b_next_transaction = journal->j_running_transaction; 2384 jh->b_modified = 0; 2385 spin_unlock(&journal->j_list_lock); 2386 spin_unlock(&jh->b_state_lock); 2387 write_unlock(&journal->j_state_lock); 2388 jbd2_journal_put_journal_head(jh); 2389 return 0; 2390 } else { 2391 /* Good, the buffer belongs to the running transaction. 2392 * We are writing our own transaction's data, not any 2393 * previous one's, so it is safe to throw it away 2394 * (remember that we expect the filesystem to have set 2395 * i_size already for this truncate so recovery will not 2396 * expose the disk blocks we are discarding here.) */ 2397 J_ASSERT_JH(jh, transaction == journal->j_running_transaction); 2398 JBUFFER_TRACE(jh, "on running transaction"); 2399 may_free = __dispose_buffer(jh, transaction); 2400 } 2401 2402 zap_buffer: 2403 /* 2404 * This is tricky. Although the buffer is truncated, it may be reused 2405 * if blocksize < pagesize and it is attached to the page straddling 2406 * EOF. Since the buffer might have been added to BJ_Forget list of the 2407 * running transaction, journal_get_write_access() won't clear 2408 * b_modified and credit accounting gets confused. So clear b_modified 2409 * here. 2410 */ 2411 jh->b_modified = 0; 2412 spin_unlock(&journal->j_list_lock); 2413 spin_unlock(&jh->b_state_lock); 2414 write_unlock(&journal->j_state_lock); 2415 jbd2_journal_put_journal_head(jh); 2416 zap_buffer_unlocked: 2417 clear_buffer_dirty(bh); 2418 J_ASSERT_BH(bh, !buffer_jbddirty(bh)); 2419 clear_buffer_mapped(bh); 2420 clear_buffer_req(bh); 2421 clear_buffer_new(bh); 2422 clear_buffer_delay(bh); 2423 clear_buffer_unwritten(bh); 2424 bh->b_bdev = NULL; 2425 return may_free; 2426 } 2427 2428 /** 2429 * jbd2_journal_invalidate_folio() 2430 * @journal: journal to use for flush... 2431 * @folio: folio to flush 2432 * @offset: start of the range to invalidate 2433 * @length: length of the range to invalidate 2434 * 2435 * Reap page buffers containing data after in the specified range in page. 2436 * Can return -EBUSY if buffers are part of the committing transaction and 2437 * the page is straddling i_size. Caller then has to wait for current commit 2438 * and try again. 2439 */ 2440 int jbd2_journal_invalidate_folio(journal_t *journal, struct folio *folio, 2441 size_t offset, size_t length) 2442 { 2443 struct buffer_head *head, *bh, *next; 2444 unsigned int stop = offset + length; 2445 unsigned int curr_off = 0; 2446 int partial_page = (offset || length < folio_size(folio)); 2447 int may_free = 1; 2448 int ret = 0; 2449 2450 if (!folio_test_locked(folio)) 2451 BUG(); 2452 head = folio_buffers(folio); 2453 if (!head) 2454 return 0; 2455 2456 BUG_ON(stop > folio_size(folio) || stop < length); 2457 2458 /* We will potentially be playing with lists other than just the 2459 * data lists (especially for journaled data mode), so be 2460 * cautious in our locking. */ 2461 2462 bh = head; 2463 do { 2464 unsigned int next_off = curr_off + bh->b_size; 2465 next = bh->b_this_page; 2466 2467 if (next_off > stop) 2468 return 0; 2469 2470 if (offset <= curr_off) { 2471 /* This block is wholly outside the truncation point */ 2472 lock_buffer(bh); 2473 ret = journal_unmap_buffer(journal, bh, partial_page); 2474 unlock_buffer(bh); 2475 if (ret < 0) 2476 return ret; 2477 may_free &= ret; 2478 } 2479 curr_off = next_off; 2480 bh = next; 2481 2482 } while (bh != head); 2483 2484 if (!partial_page) { 2485 if (may_free && try_to_free_buffers(folio)) 2486 J_ASSERT(!folio_buffers(folio)); 2487 } 2488 return 0; 2489 } 2490 2491 /* 2492 * File a buffer on the given transaction list. 2493 */ 2494 void __jbd2_journal_file_buffer(struct journal_head *jh, 2495 transaction_t *transaction, int jlist) 2496 { 2497 struct journal_head **list = NULL; 2498 int was_dirty = 0; 2499 struct buffer_head *bh = jh2bh(jh); 2500 2501 lockdep_assert_held(&jh->b_state_lock); 2502 assert_spin_locked(&transaction->t_journal->j_list_lock); 2503 2504 J_ASSERT_JH(jh, jh->b_jlist < BJ_Types); 2505 J_ASSERT_JH(jh, jh->b_transaction == transaction || 2506 jh->b_transaction == NULL); 2507 2508 if (jh->b_transaction && jh->b_jlist == jlist) 2509 return; 2510 2511 if (jlist == BJ_Metadata || jlist == BJ_Reserved || 2512 jlist == BJ_Shadow || jlist == BJ_Forget) { 2513 /* 2514 * For metadata buffers, we track dirty bit in buffer_jbddirty 2515 * instead of buffer_dirty. We should not see a dirty bit set 2516 * here because we clear it in do_get_write_access but e.g. 2517 * tune2fs can modify the sb and set the dirty bit at any time 2518 * so we try to gracefully handle that. 2519 */ 2520 if (buffer_dirty(bh)) 2521 warn_dirty_buffer(bh); 2522 if (test_clear_buffer_dirty(bh) || 2523 test_clear_buffer_jbddirty(bh)) 2524 was_dirty = 1; 2525 } 2526 2527 if (jh->b_transaction) 2528 __jbd2_journal_temp_unlink_buffer(jh); 2529 else 2530 jbd2_journal_grab_journal_head(bh); 2531 jh->b_transaction = transaction; 2532 2533 switch (jlist) { 2534 case BJ_None: 2535 J_ASSERT_JH(jh, !jh->b_committed_data); 2536 J_ASSERT_JH(jh, !jh->b_frozen_data); 2537 return; 2538 case BJ_Metadata: 2539 transaction->t_nr_buffers++; 2540 list = &transaction->t_buffers; 2541 break; 2542 case BJ_Forget: 2543 list = &transaction->t_forget; 2544 break; 2545 case BJ_Shadow: 2546 list = &transaction->t_shadow_list; 2547 break; 2548 case BJ_Reserved: 2549 list = &transaction->t_reserved_list; 2550 break; 2551 } 2552 2553 __blist_add_buffer(list, jh); 2554 jh->b_jlist = jlist; 2555 2556 if (was_dirty) 2557 set_buffer_jbddirty(bh); 2558 } 2559 2560 void jbd2_journal_file_buffer(struct journal_head *jh, 2561 transaction_t *transaction, int jlist) 2562 { 2563 spin_lock(&jh->b_state_lock); 2564 spin_lock(&transaction->t_journal->j_list_lock); 2565 __jbd2_journal_file_buffer(jh, transaction, jlist); 2566 spin_unlock(&transaction->t_journal->j_list_lock); 2567 spin_unlock(&jh->b_state_lock); 2568 } 2569 2570 /* 2571 * Remove a buffer from its current buffer list in preparation for 2572 * dropping it from its current transaction entirely. If the buffer has 2573 * already started to be used by a subsequent transaction, refile the 2574 * buffer on that transaction's metadata list. 2575 * 2576 * Called under j_list_lock 2577 * Called under jh->b_state_lock 2578 * 2579 * When this function returns true, there's no next transaction to refile to 2580 * and the caller has to drop jh reference through 2581 * jbd2_journal_put_journal_head(). 2582 */ 2583 bool __jbd2_journal_refile_buffer(struct journal_head *jh) 2584 { 2585 int was_dirty, jlist; 2586 struct buffer_head *bh = jh2bh(jh); 2587 2588 lockdep_assert_held(&jh->b_state_lock); 2589 if (jh->b_transaction) 2590 assert_spin_locked(&jh->b_transaction->t_journal->j_list_lock); 2591 2592 /* If the buffer is now unused, just drop it. */ 2593 if (jh->b_next_transaction == NULL) { 2594 __jbd2_journal_unfile_buffer(jh); 2595 return true; 2596 } 2597 2598 /* 2599 * It has been modified by a later transaction: add it to the new 2600 * transaction's metadata list. 2601 */ 2602 2603 was_dirty = test_clear_buffer_jbddirty(bh); 2604 __jbd2_journal_temp_unlink_buffer(jh); 2605 2606 /* 2607 * b_transaction must be set, otherwise the new b_transaction won't 2608 * be holding jh reference 2609 */ 2610 J_ASSERT_JH(jh, jh->b_transaction != NULL); 2611 2612 /* 2613 * We set b_transaction here because b_next_transaction will inherit 2614 * our jh reference and thus __jbd2_journal_file_buffer() must not 2615 * take a new one. 2616 */ 2617 WRITE_ONCE(jh->b_transaction, jh->b_next_transaction); 2618 WRITE_ONCE(jh->b_next_transaction, NULL); 2619 if (buffer_freed(bh)) 2620 jlist = BJ_Forget; 2621 else if (jh->b_modified) 2622 jlist = BJ_Metadata; 2623 else 2624 jlist = BJ_Reserved; 2625 __jbd2_journal_file_buffer(jh, jh->b_transaction, jlist); 2626 J_ASSERT_JH(jh, jh->b_transaction->t_state == T_RUNNING); 2627 2628 if (was_dirty) 2629 set_buffer_jbddirty(bh); 2630 return false; 2631 } 2632 2633 /* 2634 * __jbd2_journal_refile_buffer() with necessary locking added. We take our 2635 * bh reference so that we can safely unlock bh. 2636 * 2637 * The jh and bh may be freed by this call. 2638 */ 2639 void jbd2_journal_refile_buffer(journal_t *journal, struct journal_head *jh) 2640 { 2641 bool drop; 2642 2643 spin_lock(&jh->b_state_lock); 2644 spin_lock(&journal->j_list_lock); 2645 drop = __jbd2_journal_refile_buffer(jh); 2646 spin_unlock(&jh->b_state_lock); 2647 spin_unlock(&journal->j_list_lock); 2648 if (drop) 2649 jbd2_journal_put_journal_head(jh); 2650 } 2651 2652 /* 2653 * File inode in the inode list of the handle's transaction 2654 */ 2655 static int jbd2_journal_file_inode(handle_t *handle, struct jbd2_inode *jinode, 2656 unsigned long flags, loff_t start_byte, loff_t end_byte) 2657 { 2658 transaction_t *transaction = handle->h_transaction; 2659 journal_t *journal; 2660 2661 if (is_handle_aborted(handle)) 2662 return -EROFS; 2663 journal = transaction->t_journal; 2664 2665 jbd_debug(4, "Adding inode %lu, tid:%d\n", jinode->i_vfs_inode->i_ino, 2666 transaction->t_tid); 2667 2668 spin_lock(&journal->j_list_lock); 2669 jinode->i_flags |= flags; 2670 2671 if (jinode->i_dirty_end) { 2672 jinode->i_dirty_start = min(jinode->i_dirty_start, start_byte); 2673 jinode->i_dirty_end = max(jinode->i_dirty_end, end_byte); 2674 } else { 2675 jinode->i_dirty_start = start_byte; 2676 jinode->i_dirty_end = end_byte; 2677 } 2678 2679 /* Is inode already attached where we need it? */ 2680 if (jinode->i_transaction == transaction || 2681 jinode->i_next_transaction == transaction) 2682 goto done; 2683 2684 /* 2685 * We only ever set this variable to 1 so the test is safe. Since 2686 * t_need_data_flush is likely to be set, we do the test to save some 2687 * cacheline bouncing 2688 */ 2689 if (!transaction->t_need_data_flush) 2690 transaction->t_need_data_flush = 1; 2691 /* On some different transaction's list - should be 2692 * the committing one */ 2693 if (jinode->i_transaction) { 2694 J_ASSERT(jinode->i_next_transaction == NULL); 2695 J_ASSERT(jinode->i_transaction == 2696 journal->j_committing_transaction); 2697 jinode->i_next_transaction = transaction; 2698 goto done; 2699 } 2700 /* Not on any transaction list... */ 2701 J_ASSERT(!jinode->i_next_transaction); 2702 jinode->i_transaction = transaction; 2703 list_add(&jinode->i_list, &transaction->t_inode_list); 2704 done: 2705 spin_unlock(&journal->j_list_lock); 2706 2707 return 0; 2708 } 2709 2710 int jbd2_journal_inode_ranged_write(handle_t *handle, 2711 struct jbd2_inode *jinode, loff_t start_byte, loff_t length) 2712 { 2713 return jbd2_journal_file_inode(handle, jinode, 2714 JI_WRITE_DATA | JI_WAIT_DATA, start_byte, 2715 start_byte + length - 1); 2716 } 2717 2718 int jbd2_journal_inode_ranged_wait(handle_t *handle, struct jbd2_inode *jinode, 2719 loff_t start_byte, loff_t length) 2720 { 2721 return jbd2_journal_file_inode(handle, jinode, JI_WAIT_DATA, 2722 start_byte, start_byte + length - 1); 2723 } 2724 2725 /* 2726 * File truncate and transaction commit interact with each other in a 2727 * non-trivial way. If a transaction writing data block A is 2728 * committing, we cannot discard the data by truncate until we have 2729 * written them. Otherwise if we crashed after the transaction with 2730 * write has committed but before the transaction with truncate has 2731 * committed, we could see stale data in block A. This function is a 2732 * helper to solve this problem. It starts writeout of the truncated 2733 * part in case it is in the committing transaction. 2734 * 2735 * Filesystem code must call this function when inode is journaled in 2736 * ordered mode before truncation happens and after the inode has been 2737 * placed on orphan list with the new inode size. The second condition 2738 * avoids the race that someone writes new data and we start 2739 * committing the transaction after this function has been called but 2740 * before a transaction for truncate is started (and furthermore it 2741 * allows us to optimize the case where the addition to orphan list 2742 * happens in the same transaction as write --- we don't have to write 2743 * any data in such case). 2744 */ 2745 int jbd2_journal_begin_ordered_truncate(journal_t *journal, 2746 struct jbd2_inode *jinode, 2747 loff_t new_size) 2748 { 2749 transaction_t *inode_trans, *commit_trans; 2750 int ret = 0; 2751 2752 /* This is a quick check to avoid locking if not necessary */ 2753 if (!jinode->i_transaction) 2754 goto out; 2755 /* Locks are here just to force reading of recent values, it is 2756 * enough that the transaction was not committing before we started 2757 * a transaction adding the inode to orphan list */ 2758 read_lock(&journal->j_state_lock); 2759 commit_trans = journal->j_committing_transaction; 2760 read_unlock(&journal->j_state_lock); 2761 spin_lock(&journal->j_list_lock); 2762 inode_trans = jinode->i_transaction; 2763 spin_unlock(&journal->j_list_lock); 2764 if (inode_trans == commit_trans) { 2765 ret = filemap_fdatawrite_range(jinode->i_vfs_inode->i_mapping, 2766 new_size, LLONG_MAX); 2767 if (ret) 2768 jbd2_journal_abort(journal, ret); 2769 } 2770 out: 2771 return ret; 2772 } 2773