1 /* 2 * linux/fs/jbd2/transaction.c 3 * 4 * Written by Stephen C. Tweedie <sct@redhat.com>, 1998 5 * 6 * Copyright 1998 Red Hat corp --- All Rights Reserved 7 * 8 * This file is part of the Linux kernel and is made available under 9 * the terms of the GNU General Public License, version 2, or at your 10 * option, any later version, incorporated herein by reference. 11 * 12 * Generic filesystem transaction handling code; part of the ext2fs 13 * journaling system. 14 * 15 * This file manages transactions (compound commits managed by the 16 * journaling code) and handles (individual atomic operations by the 17 * filesystem). 18 */ 19 20 #include <linux/time.h> 21 #include <linux/fs.h> 22 #include <linux/jbd2.h> 23 #include <linux/errno.h> 24 #include <linux/slab.h> 25 #include <linux/timer.h> 26 #include <linux/mm.h> 27 #include <linux/highmem.h> 28 #include <linux/hrtimer.h> 29 #include <linux/backing-dev.h> 30 #include <linux/bug.h> 31 #include <linux/module.h> 32 33 #include <trace/events/jbd2.h> 34 35 static void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh); 36 static void __jbd2_journal_unfile_buffer(struct journal_head *jh); 37 38 static struct kmem_cache *transaction_cache; 39 int __init jbd2_journal_init_transaction_cache(void) 40 { 41 J_ASSERT(!transaction_cache); 42 transaction_cache = kmem_cache_create("jbd2_transaction_s", 43 sizeof(transaction_t), 44 0, 45 SLAB_HWCACHE_ALIGN|SLAB_TEMPORARY, 46 NULL); 47 if (transaction_cache) 48 return 0; 49 return -ENOMEM; 50 } 51 52 void jbd2_journal_destroy_transaction_cache(void) 53 { 54 if (transaction_cache) { 55 kmem_cache_destroy(transaction_cache); 56 transaction_cache = NULL; 57 } 58 } 59 60 void jbd2_journal_free_transaction(transaction_t *transaction) 61 { 62 if (unlikely(ZERO_OR_NULL_PTR(transaction))) 63 return; 64 kmem_cache_free(transaction_cache, transaction); 65 } 66 67 /* 68 * jbd2_get_transaction: obtain a new transaction_t object. 69 * 70 * Simply allocate and initialise a new transaction. Create it in 71 * RUNNING state and add it to the current journal (which should not 72 * have an existing running transaction: we only make a new transaction 73 * once we have started to commit the old one). 74 * 75 * Preconditions: 76 * The journal MUST be locked. We don't perform atomic mallocs on the 77 * new transaction and we can't block without protecting against other 78 * processes trying to touch the journal while it is in transition. 79 * 80 */ 81 82 static transaction_t * 83 jbd2_get_transaction(journal_t *journal, transaction_t *transaction) 84 { 85 transaction->t_journal = journal; 86 transaction->t_state = T_RUNNING; 87 transaction->t_start_time = ktime_get(); 88 transaction->t_tid = journal->j_transaction_sequence++; 89 transaction->t_expires = jiffies + journal->j_commit_interval; 90 spin_lock_init(&transaction->t_handle_lock); 91 atomic_set(&transaction->t_updates, 0); 92 atomic_set(&transaction->t_outstanding_credits, 93 atomic_read(&journal->j_reserved_credits)); 94 atomic_set(&transaction->t_handle_count, 0); 95 INIT_LIST_HEAD(&transaction->t_inode_list); 96 INIT_LIST_HEAD(&transaction->t_private_list); 97 98 /* Set up the commit timer for the new transaction. */ 99 journal->j_commit_timer.expires = round_jiffies_up(transaction->t_expires); 100 add_timer(&journal->j_commit_timer); 101 102 J_ASSERT(journal->j_running_transaction == NULL); 103 journal->j_running_transaction = transaction; 104 transaction->t_max_wait = 0; 105 transaction->t_start = jiffies; 106 transaction->t_requested = 0; 107 108 return transaction; 109 } 110 111 /* 112 * Handle management. 113 * 114 * A handle_t is an object which represents a single atomic update to a 115 * filesystem, and which tracks all of the modifications which form part 116 * of that one update. 117 */ 118 119 /* 120 * Update transaction's maximum wait time, if debugging is enabled. 121 * 122 * In order for t_max_wait to be reliable, it must be protected by a 123 * lock. But doing so will mean that start_this_handle() can not be 124 * run in parallel on SMP systems, which limits our scalability. So 125 * unless debugging is enabled, we no longer update t_max_wait, which 126 * means that maximum wait time reported by the jbd2_run_stats 127 * tracepoint will always be zero. 128 */ 129 static inline void update_t_max_wait(transaction_t *transaction, 130 unsigned long ts) 131 { 132 #ifdef CONFIG_JBD2_DEBUG 133 if (jbd2_journal_enable_debug && 134 time_after(transaction->t_start, ts)) { 135 ts = jbd2_time_diff(ts, transaction->t_start); 136 spin_lock(&transaction->t_handle_lock); 137 if (ts > transaction->t_max_wait) 138 transaction->t_max_wait = ts; 139 spin_unlock(&transaction->t_handle_lock); 140 } 141 #endif 142 } 143 144 /* 145 * Wait until running transaction passes T_LOCKED state. Also starts the commit 146 * if needed. The function expects running transaction to exist and releases 147 * j_state_lock. 148 */ 149 static void wait_transaction_locked(journal_t *journal) 150 __releases(journal->j_state_lock) 151 { 152 DEFINE_WAIT(wait); 153 int need_to_start; 154 tid_t tid = journal->j_running_transaction->t_tid; 155 156 prepare_to_wait(&journal->j_wait_transaction_locked, &wait, 157 TASK_UNINTERRUPTIBLE); 158 need_to_start = !tid_geq(journal->j_commit_request, tid); 159 read_unlock(&journal->j_state_lock); 160 if (need_to_start) 161 jbd2_log_start_commit(journal, tid); 162 schedule(); 163 finish_wait(&journal->j_wait_transaction_locked, &wait); 164 } 165 166 static void sub_reserved_credits(journal_t *journal, int blocks) 167 { 168 atomic_sub(blocks, &journal->j_reserved_credits); 169 wake_up(&journal->j_wait_reserved); 170 } 171 172 /* 173 * Wait until we can add credits for handle to the running transaction. Called 174 * with j_state_lock held for reading. Returns 0 if handle joined the running 175 * transaction. Returns 1 if we had to wait, j_state_lock is dropped, and 176 * caller must retry. 177 */ 178 static int add_transaction_credits(journal_t *journal, int blocks, 179 int rsv_blocks) 180 { 181 transaction_t *t = journal->j_running_transaction; 182 int needed; 183 int total = blocks + rsv_blocks; 184 185 /* 186 * If the current transaction is locked down for commit, wait 187 * for the lock to be released. 188 */ 189 if (t->t_state == T_LOCKED) { 190 wait_transaction_locked(journal); 191 return 1; 192 } 193 194 /* 195 * If there is not enough space left in the log to write all 196 * potential buffers requested by this operation, we need to 197 * stall pending a log checkpoint to free some more log space. 198 */ 199 needed = atomic_add_return(total, &t->t_outstanding_credits); 200 if (needed > journal->j_max_transaction_buffers) { 201 /* 202 * If the current transaction is already too large, 203 * then start to commit it: we can then go back and 204 * attach this handle to a new transaction. 205 */ 206 atomic_sub(total, &t->t_outstanding_credits); 207 208 /* 209 * Is the number of reserved credits in the current transaction too 210 * big to fit this handle? Wait until reserved credits are freed. 211 */ 212 if (atomic_read(&journal->j_reserved_credits) + total > 213 journal->j_max_transaction_buffers) { 214 read_unlock(&journal->j_state_lock); 215 wait_event(journal->j_wait_reserved, 216 atomic_read(&journal->j_reserved_credits) + total <= 217 journal->j_max_transaction_buffers); 218 return 1; 219 } 220 221 wait_transaction_locked(journal); 222 return 1; 223 } 224 225 /* 226 * The commit code assumes that it can get enough log space 227 * without forcing a checkpoint. This is *critical* for 228 * correctness: a checkpoint of a buffer which is also 229 * associated with a committing transaction creates a deadlock, 230 * so commit simply cannot force through checkpoints. 231 * 232 * We must therefore ensure the necessary space in the journal 233 * *before* starting to dirty potentially checkpointed buffers 234 * in the new transaction. 235 */ 236 if (jbd2_log_space_left(journal) < jbd2_space_needed(journal)) { 237 atomic_sub(total, &t->t_outstanding_credits); 238 read_unlock(&journal->j_state_lock); 239 write_lock(&journal->j_state_lock); 240 if (jbd2_log_space_left(journal) < jbd2_space_needed(journal)) 241 __jbd2_log_wait_for_space(journal); 242 write_unlock(&journal->j_state_lock); 243 return 1; 244 } 245 246 /* No reservation? We are done... */ 247 if (!rsv_blocks) 248 return 0; 249 250 needed = atomic_add_return(rsv_blocks, &journal->j_reserved_credits); 251 /* We allow at most half of a transaction to be reserved */ 252 if (needed > journal->j_max_transaction_buffers / 2) { 253 sub_reserved_credits(journal, rsv_blocks); 254 atomic_sub(total, &t->t_outstanding_credits); 255 read_unlock(&journal->j_state_lock); 256 wait_event(journal->j_wait_reserved, 257 atomic_read(&journal->j_reserved_credits) + rsv_blocks 258 <= journal->j_max_transaction_buffers / 2); 259 return 1; 260 } 261 return 0; 262 } 263 264 /* 265 * start_this_handle: Given a handle, deal with any locking or stalling 266 * needed to make sure that there is enough journal space for the handle 267 * to begin. Attach the handle to a transaction and set up the 268 * transaction's buffer credits. 269 */ 270 271 static int start_this_handle(journal_t *journal, handle_t *handle, 272 gfp_t gfp_mask) 273 { 274 transaction_t *transaction, *new_transaction = NULL; 275 int blocks = handle->h_buffer_credits; 276 int rsv_blocks = 0; 277 unsigned long ts = jiffies; 278 279 if (handle->h_rsv_handle) 280 rsv_blocks = handle->h_rsv_handle->h_buffer_credits; 281 282 /* 283 * Limit the number of reserved credits to 1/2 of maximum transaction 284 * size and limit the number of total credits to not exceed maximum 285 * transaction size per operation. 286 */ 287 if ((rsv_blocks > journal->j_max_transaction_buffers / 2) || 288 (rsv_blocks + blocks > journal->j_max_transaction_buffers)) { 289 printk(KERN_ERR "JBD2: %s wants too many credits " 290 "credits:%d rsv_credits:%d max:%d\n", 291 current->comm, blocks, rsv_blocks, 292 journal->j_max_transaction_buffers); 293 WARN_ON(1); 294 return -ENOSPC; 295 } 296 297 alloc_transaction: 298 if (!journal->j_running_transaction) { 299 /* 300 * If __GFP_FS is not present, then we may be being called from 301 * inside the fs writeback layer, so we MUST NOT fail. 302 */ 303 if ((gfp_mask & __GFP_FS) == 0) 304 gfp_mask |= __GFP_NOFAIL; 305 new_transaction = kmem_cache_zalloc(transaction_cache, 306 gfp_mask); 307 if (!new_transaction) 308 return -ENOMEM; 309 } 310 311 jbd_debug(3, "New handle %p going live.\n", handle); 312 313 /* 314 * We need to hold j_state_lock until t_updates has been incremented, 315 * for proper journal barrier handling 316 */ 317 repeat: 318 read_lock(&journal->j_state_lock); 319 BUG_ON(journal->j_flags & JBD2_UNMOUNT); 320 if (is_journal_aborted(journal) || 321 (journal->j_errno != 0 && !(journal->j_flags & JBD2_ACK_ERR))) { 322 read_unlock(&journal->j_state_lock); 323 jbd2_journal_free_transaction(new_transaction); 324 return -EROFS; 325 } 326 327 /* 328 * Wait on the journal's transaction barrier if necessary. Specifically 329 * we allow reserved handles to proceed because otherwise commit could 330 * deadlock on page writeback not being able to complete. 331 */ 332 if (!handle->h_reserved && journal->j_barrier_count) { 333 read_unlock(&journal->j_state_lock); 334 wait_event(journal->j_wait_transaction_locked, 335 journal->j_barrier_count == 0); 336 goto repeat; 337 } 338 339 if (!journal->j_running_transaction) { 340 read_unlock(&journal->j_state_lock); 341 if (!new_transaction) 342 goto alloc_transaction; 343 write_lock(&journal->j_state_lock); 344 if (!journal->j_running_transaction && 345 (handle->h_reserved || !journal->j_barrier_count)) { 346 jbd2_get_transaction(journal, new_transaction); 347 new_transaction = NULL; 348 } 349 write_unlock(&journal->j_state_lock); 350 goto repeat; 351 } 352 353 transaction = journal->j_running_transaction; 354 355 if (!handle->h_reserved) { 356 /* We may have dropped j_state_lock - restart in that case */ 357 if (add_transaction_credits(journal, blocks, rsv_blocks)) 358 goto repeat; 359 } else { 360 /* 361 * We have handle reserved so we are allowed to join T_LOCKED 362 * transaction and we don't have to check for transaction size 363 * and journal space. 364 */ 365 sub_reserved_credits(journal, blocks); 366 handle->h_reserved = 0; 367 } 368 369 /* OK, account for the buffers that this operation expects to 370 * use and add the handle to the running transaction. 371 */ 372 update_t_max_wait(transaction, ts); 373 handle->h_transaction = transaction; 374 handle->h_requested_credits = blocks; 375 handle->h_start_jiffies = jiffies; 376 atomic_inc(&transaction->t_updates); 377 atomic_inc(&transaction->t_handle_count); 378 jbd_debug(4, "Handle %p given %d credits (total %d, free %lu)\n", 379 handle, blocks, 380 atomic_read(&transaction->t_outstanding_credits), 381 jbd2_log_space_left(journal)); 382 read_unlock(&journal->j_state_lock); 383 current->journal_info = handle; 384 385 lock_map_acquire(&handle->h_lockdep_map); 386 jbd2_journal_free_transaction(new_transaction); 387 return 0; 388 } 389 390 static struct lock_class_key jbd2_handle_key; 391 392 /* Allocate a new handle. This should probably be in a slab... */ 393 static handle_t *new_handle(int nblocks) 394 { 395 handle_t *handle = jbd2_alloc_handle(GFP_NOFS); 396 if (!handle) 397 return NULL; 398 handle->h_buffer_credits = nblocks; 399 handle->h_ref = 1; 400 401 lockdep_init_map(&handle->h_lockdep_map, "jbd2_handle", 402 &jbd2_handle_key, 0); 403 404 return handle; 405 } 406 407 /** 408 * handle_t *jbd2_journal_start() - Obtain a new handle. 409 * @journal: Journal to start transaction on. 410 * @nblocks: number of block buffer we might modify 411 * 412 * We make sure that the transaction can guarantee at least nblocks of 413 * modified buffers in the log. We block until the log can guarantee 414 * that much space. Additionally, if rsv_blocks > 0, we also create another 415 * handle with rsv_blocks reserved blocks in the journal. This handle is 416 * is stored in h_rsv_handle. It is not attached to any particular transaction 417 * and thus doesn't block transaction commit. If the caller uses this reserved 418 * handle, it has to set h_rsv_handle to NULL as otherwise jbd2_journal_stop() 419 * on the parent handle will dispose the reserved one. Reserved handle has to 420 * be converted to a normal handle using jbd2_journal_start_reserved() before 421 * it can be used. 422 * 423 * Return a pointer to a newly allocated handle, or an ERR_PTR() value 424 * on failure. 425 */ 426 handle_t *jbd2__journal_start(journal_t *journal, int nblocks, int rsv_blocks, 427 gfp_t gfp_mask, unsigned int type, 428 unsigned int line_no) 429 { 430 handle_t *handle = journal_current_handle(); 431 int err; 432 433 if (!journal) 434 return ERR_PTR(-EROFS); 435 436 if (handle) { 437 J_ASSERT(handle->h_transaction->t_journal == journal); 438 handle->h_ref++; 439 return handle; 440 } 441 442 handle = new_handle(nblocks); 443 if (!handle) 444 return ERR_PTR(-ENOMEM); 445 if (rsv_blocks) { 446 handle_t *rsv_handle; 447 448 rsv_handle = new_handle(rsv_blocks); 449 if (!rsv_handle) { 450 jbd2_free_handle(handle); 451 return ERR_PTR(-ENOMEM); 452 } 453 rsv_handle->h_reserved = 1; 454 rsv_handle->h_journal = journal; 455 handle->h_rsv_handle = rsv_handle; 456 } 457 458 err = start_this_handle(journal, handle, gfp_mask); 459 if (err < 0) { 460 if (handle->h_rsv_handle) 461 jbd2_free_handle(handle->h_rsv_handle); 462 jbd2_free_handle(handle); 463 return ERR_PTR(err); 464 } 465 handle->h_type = type; 466 handle->h_line_no = line_no; 467 trace_jbd2_handle_start(journal->j_fs_dev->bd_dev, 468 handle->h_transaction->t_tid, type, 469 line_no, nblocks); 470 return handle; 471 } 472 EXPORT_SYMBOL(jbd2__journal_start); 473 474 475 handle_t *jbd2_journal_start(journal_t *journal, int nblocks) 476 { 477 return jbd2__journal_start(journal, nblocks, 0, GFP_NOFS, 0, 0); 478 } 479 EXPORT_SYMBOL(jbd2_journal_start); 480 481 void jbd2_journal_free_reserved(handle_t *handle) 482 { 483 journal_t *journal = handle->h_journal; 484 485 WARN_ON(!handle->h_reserved); 486 sub_reserved_credits(journal, handle->h_buffer_credits); 487 jbd2_free_handle(handle); 488 } 489 EXPORT_SYMBOL(jbd2_journal_free_reserved); 490 491 /** 492 * int jbd2_journal_start_reserved(handle_t *handle) - start reserved handle 493 * @handle: handle to start 494 * 495 * Start handle that has been previously reserved with jbd2_journal_reserve(). 496 * This attaches @handle to the running transaction (or creates one if there's 497 * not transaction running). Unlike jbd2_journal_start() this function cannot 498 * block on journal commit, checkpointing, or similar stuff. It can block on 499 * memory allocation or frozen journal though. 500 * 501 * Return 0 on success, non-zero on error - handle is freed in that case. 502 */ 503 int jbd2_journal_start_reserved(handle_t *handle, unsigned int type, 504 unsigned int line_no) 505 { 506 journal_t *journal = handle->h_journal; 507 int ret = -EIO; 508 509 if (WARN_ON(!handle->h_reserved)) { 510 /* Someone passed in normal handle? Just stop it. */ 511 jbd2_journal_stop(handle); 512 return ret; 513 } 514 /* 515 * Usefulness of mixing of reserved and unreserved handles is 516 * questionable. So far nobody seems to need it so just error out. 517 */ 518 if (WARN_ON(current->journal_info)) { 519 jbd2_journal_free_reserved(handle); 520 return ret; 521 } 522 523 handle->h_journal = NULL; 524 /* 525 * GFP_NOFS is here because callers are likely from writeback or 526 * similarly constrained call sites 527 */ 528 ret = start_this_handle(journal, handle, GFP_NOFS); 529 if (ret < 0) { 530 jbd2_journal_free_reserved(handle); 531 return ret; 532 } 533 handle->h_type = type; 534 handle->h_line_no = line_no; 535 return 0; 536 } 537 EXPORT_SYMBOL(jbd2_journal_start_reserved); 538 539 /** 540 * int jbd2_journal_extend() - extend buffer credits. 541 * @handle: handle to 'extend' 542 * @nblocks: nr blocks to try to extend by. 543 * 544 * Some transactions, such as large extends and truncates, can be done 545 * atomically all at once or in several stages. The operation requests 546 * a credit for a number of buffer modications in advance, but can 547 * extend its credit if it needs more. 548 * 549 * jbd2_journal_extend tries to give the running handle more buffer credits. 550 * It does not guarantee that allocation - this is a best-effort only. 551 * The calling process MUST be able to deal cleanly with a failure to 552 * extend here. 553 * 554 * Return 0 on success, non-zero on failure. 555 * 556 * return code < 0 implies an error 557 * return code > 0 implies normal transaction-full status. 558 */ 559 int jbd2_journal_extend(handle_t *handle, int nblocks) 560 { 561 transaction_t *transaction = handle->h_transaction; 562 journal_t *journal; 563 int result; 564 int wanted; 565 566 if (is_handle_aborted(handle)) 567 return -EROFS; 568 journal = transaction->t_journal; 569 570 result = 1; 571 572 read_lock(&journal->j_state_lock); 573 574 /* Don't extend a locked-down transaction! */ 575 if (transaction->t_state != T_RUNNING) { 576 jbd_debug(3, "denied handle %p %d blocks: " 577 "transaction not running\n", handle, nblocks); 578 goto error_out; 579 } 580 581 spin_lock(&transaction->t_handle_lock); 582 wanted = atomic_add_return(nblocks, 583 &transaction->t_outstanding_credits); 584 585 if (wanted > journal->j_max_transaction_buffers) { 586 jbd_debug(3, "denied handle %p %d blocks: " 587 "transaction too large\n", handle, nblocks); 588 atomic_sub(nblocks, &transaction->t_outstanding_credits); 589 goto unlock; 590 } 591 592 if (wanted + (wanted >> JBD2_CONTROL_BLOCKS_SHIFT) > 593 jbd2_log_space_left(journal)) { 594 jbd_debug(3, "denied handle %p %d blocks: " 595 "insufficient log space\n", handle, nblocks); 596 atomic_sub(nblocks, &transaction->t_outstanding_credits); 597 goto unlock; 598 } 599 600 trace_jbd2_handle_extend(journal->j_fs_dev->bd_dev, 601 transaction->t_tid, 602 handle->h_type, handle->h_line_no, 603 handle->h_buffer_credits, 604 nblocks); 605 606 handle->h_buffer_credits += nblocks; 607 handle->h_requested_credits += nblocks; 608 result = 0; 609 610 jbd_debug(3, "extended handle %p by %d\n", handle, nblocks); 611 unlock: 612 spin_unlock(&transaction->t_handle_lock); 613 error_out: 614 read_unlock(&journal->j_state_lock); 615 return result; 616 } 617 618 619 /** 620 * int jbd2_journal_restart() - restart a handle . 621 * @handle: handle to restart 622 * @nblocks: nr credits requested 623 * 624 * Restart a handle for a multi-transaction filesystem 625 * operation. 626 * 627 * If the jbd2_journal_extend() call above fails to grant new buffer credits 628 * to a running handle, a call to jbd2_journal_restart will commit the 629 * handle's transaction so far and reattach the handle to a new 630 * transaction capabable of guaranteeing the requested number of 631 * credits. We preserve reserved handle if there's any attached to the 632 * passed in handle. 633 */ 634 int jbd2__journal_restart(handle_t *handle, int nblocks, gfp_t gfp_mask) 635 { 636 transaction_t *transaction = handle->h_transaction; 637 journal_t *journal; 638 tid_t tid; 639 int need_to_start, ret; 640 641 /* If we've had an abort of any type, don't even think about 642 * actually doing the restart! */ 643 if (is_handle_aborted(handle)) 644 return 0; 645 journal = transaction->t_journal; 646 647 /* 648 * First unlink the handle from its current transaction, and start the 649 * commit on that. 650 */ 651 J_ASSERT(atomic_read(&transaction->t_updates) > 0); 652 J_ASSERT(journal_current_handle() == handle); 653 654 read_lock(&journal->j_state_lock); 655 spin_lock(&transaction->t_handle_lock); 656 atomic_sub(handle->h_buffer_credits, 657 &transaction->t_outstanding_credits); 658 if (handle->h_rsv_handle) { 659 sub_reserved_credits(journal, 660 handle->h_rsv_handle->h_buffer_credits); 661 } 662 if (atomic_dec_and_test(&transaction->t_updates)) 663 wake_up(&journal->j_wait_updates); 664 tid = transaction->t_tid; 665 spin_unlock(&transaction->t_handle_lock); 666 handle->h_transaction = NULL; 667 current->journal_info = NULL; 668 669 jbd_debug(2, "restarting handle %p\n", handle); 670 need_to_start = !tid_geq(journal->j_commit_request, tid); 671 read_unlock(&journal->j_state_lock); 672 if (need_to_start) 673 jbd2_log_start_commit(journal, tid); 674 675 lock_map_release(&handle->h_lockdep_map); 676 handle->h_buffer_credits = nblocks; 677 ret = start_this_handle(journal, handle, gfp_mask); 678 return ret; 679 } 680 EXPORT_SYMBOL(jbd2__journal_restart); 681 682 683 int jbd2_journal_restart(handle_t *handle, int nblocks) 684 { 685 return jbd2__journal_restart(handle, nblocks, GFP_NOFS); 686 } 687 EXPORT_SYMBOL(jbd2_journal_restart); 688 689 /** 690 * void jbd2_journal_lock_updates () - establish a transaction barrier. 691 * @journal: Journal to establish a barrier on. 692 * 693 * This locks out any further updates from being started, and blocks 694 * until all existing updates have completed, returning only once the 695 * journal is in a quiescent state with no updates running. 696 * 697 * The journal lock should not be held on entry. 698 */ 699 void jbd2_journal_lock_updates(journal_t *journal) 700 { 701 DEFINE_WAIT(wait); 702 703 write_lock(&journal->j_state_lock); 704 ++journal->j_barrier_count; 705 706 /* Wait until there are no reserved handles */ 707 if (atomic_read(&journal->j_reserved_credits)) { 708 write_unlock(&journal->j_state_lock); 709 wait_event(journal->j_wait_reserved, 710 atomic_read(&journal->j_reserved_credits) == 0); 711 write_lock(&journal->j_state_lock); 712 } 713 714 /* Wait until there are no running updates */ 715 while (1) { 716 transaction_t *transaction = journal->j_running_transaction; 717 718 if (!transaction) 719 break; 720 721 spin_lock(&transaction->t_handle_lock); 722 prepare_to_wait(&journal->j_wait_updates, &wait, 723 TASK_UNINTERRUPTIBLE); 724 if (!atomic_read(&transaction->t_updates)) { 725 spin_unlock(&transaction->t_handle_lock); 726 finish_wait(&journal->j_wait_updates, &wait); 727 break; 728 } 729 spin_unlock(&transaction->t_handle_lock); 730 write_unlock(&journal->j_state_lock); 731 schedule(); 732 finish_wait(&journal->j_wait_updates, &wait); 733 write_lock(&journal->j_state_lock); 734 } 735 write_unlock(&journal->j_state_lock); 736 737 /* 738 * We have now established a barrier against other normal updates, but 739 * we also need to barrier against other jbd2_journal_lock_updates() calls 740 * to make sure that we serialise special journal-locked operations 741 * too. 742 */ 743 mutex_lock(&journal->j_barrier); 744 } 745 746 /** 747 * void jbd2_journal_unlock_updates (journal_t* journal) - release barrier 748 * @journal: Journal to release the barrier on. 749 * 750 * Release a transaction barrier obtained with jbd2_journal_lock_updates(). 751 * 752 * Should be called without the journal lock held. 753 */ 754 void jbd2_journal_unlock_updates (journal_t *journal) 755 { 756 J_ASSERT(journal->j_barrier_count != 0); 757 758 mutex_unlock(&journal->j_barrier); 759 write_lock(&journal->j_state_lock); 760 --journal->j_barrier_count; 761 write_unlock(&journal->j_state_lock); 762 wake_up(&journal->j_wait_transaction_locked); 763 } 764 765 static void warn_dirty_buffer(struct buffer_head *bh) 766 { 767 printk(KERN_WARNING 768 "JBD2: Spotted dirty metadata buffer (dev = %pg, blocknr = %llu). " 769 "There's a risk of filesystem corruption in case of system " 770 "crash.\n", 771 bh->b_bdev, (unsigned long long)bh->b_blocknr); 772 } 773 774 /* Call t_frozen trigger and copy buffer data into jh->b_frozen_data. */ 775 static void jbd2_freeze_jh_data(struct journal_head *jh) 776 { 777 struct page *page; 778 int offset; 779 char *source; 780 struct buffer_head *bh = jh2bh(jh); 781 782 J_EXPECT_JH(jh, buffer_uptodate(bh), "Possible IO failure.\n"); 783 page = bh->b_page; 784 offset = offset_in_page(bh->b_data); 785 source = kmap_atomic(page); 786 /* Fire data frozen trigger just before we copy the data */ 787 jbd2_buffer_frozen_trigger(jh, source + offset, jh->b_triggers); 788 memcpy(jh->b_frozen_data, source + offset, bh->b_size); 789 kunmap_atomic(source); 790 791 /* 792 * Now that the frozen data is saved off, we need to store any matching 793 * triggers. 794 */ 795 jh->b_frozen_triggers = jh->b_triggers; 796 } 797 798 /* 799 * If the buffer is already part of the current transaction, then there 800 * is nothing we need to do. If it is already part of a prior 801 * transaction which we are still committing to disk, then we need to 802 * make sure that we do not overwrite the old copy: we do copy-out to 803 * preserve the copy going to disk. We also account the buffer against 804 * the handle's metadata buffer credits (unless the buffer is already 805 * part of the transaction, that is). 806 * 807 */ 808 static int 809 do_get_write_access(handle_t *handle, struct journal_head *jh, 810 int force_copy) 811 { 812 struct buffer_head *bh; 813 transaction_t *transaction = handle->h_transaction; 814 journal_t *journal; 815 int error; 816 char *frozen_buffer = NULL; 817 unsigned long start_lock, time_lock; 818 819 if (is_handle_aborted(handle)) 820 return -EROFS; 821 journal = transaction->t_journal; 822 823 jbd_debug(5, "journal_head %p, force_copy %d\n", jh, force_copy); 824 825 JBUFFER_TRACE(jh, "entry"); 826 repeat: 827 bh = jh2bh(jh); 828 829 /* @@@ Need to check for errors here at some point. */ 830 831 start_lock = jiffies; 832 lock_buffer(bh); 833 jbd_lock_bh_state(bh); 834 835 /* If it takes too long to lock the buffer, trace it */ 836 time_lock = jbd2_time_diff(start_lock, jiffies); 837 if (time_lock > HZ/10) 838 trace_jbd2_lock_buffer_stall(bh->b_bdev->bd_dev, 839 jiffies_to_msecs(time_lock)); 840 841 /* We now hold the buffer lock so it is safe to query the buffer 842 * state. Is the buffer dirty? 843 * 844 * If so, there are two possibilities. The buffer may be 845 * non-journaled, and undergoing a quite legitimate writeback. 846 * Otherwise, it is journaled, and we don't expect dirty buffers 847 * in that state (the buffers should be marked JBD_Dirty 848 * instead.) So either the IO is being done under our own 849 * control and this is a bug, or it's a third party IO such as 850 * dump(8) (which may leave the buffer scheduled for read --- 851 * ie. locked but not dirty) or tune2fs (which may actually have 852 * the buffer dirtied, ugh.) */ 853 854 if (buffer_dirty(bh)) { 855 /* 856 * First question: is this buffer already part of the current 857 * transaction or the existing committing transaction? 858 */ 859 if (jh->b_transaction) { 860 J_ASSERT_JH(jh, 861 jh->b_transaction == transaction || 862 jh->b_transaction == 863 journal->j_committing_transaction); 864 if (jh->b_next_transaction) 865 J_ASSERT_JH(jh, jh->b_next_transaction == 866 transaction); 867 warn_dirty_buffer(bh); 868 } 869 /* 870 * In any case we need to clean the dirty flag and we must 871 * do it under the buffer lock to be sure we don't race 872 * with running write-out. 873 */ 874 JBUFFER_TRACE(jh, "Journalling dirty buffer"); 875 clear_buffer_dirty(bh); 876 set_buffer_jbddirty(bh); 877 } 878 879 unlock_buffer(bh); 880 881 error = -EROFS; 882 if (is_handle_aborted(handle)) { 883 jbd_unlock_bh_state(bh); 884 goto out; 885 } 886 error = 0; 887 888 /* 889 * The buffer is already part of this transaction if b_transaction or 890 * b_next_transaction points to it 891 */ 892 if (jh->b_transaction == transaction || 893 jh->b_next_transaction == transaction) 894 goto done; 895 896 /* 897 * this is the first time this transaction is touching this buffer, 898 * reset the modified flag 899 */ 900 jh->b_modified = 0; 901 902 /* 903 * If the buffer is not journaled right now, we need to make sure it 904 * doesn't get written to disk before the caller actually commits the 905 * new data 906 */ 907 if (!jh->b_transaction) { 908 JBUFFER_TRACE(jh, "no transaction"); 909 J_ASSERT_JH(jh, !jh->b_next_transaction); 910 JBUFFER_TRACE(jh, "file as BJ_Reserved"); 911 /* 912 * Make sure all stores to jh (b_modified, b_frozen_data) are 913 * visible before attaching it to the running transaction. 914 * Paired with barrier in jbd2_write_access_granted() 915 */ 916 smp_wmb(); 917 spin_lock(&journal->j_list_lock); 918 __jbd2_journal_file_buffer(jh, transaction, BJ_Reserved); 919 spin_unlock(&journal->j_list_lock); 920 goto done; 921 } 922 /* 923 * If there is already a copy-out version of this buffer, then we don't 924 * need to make another one 925 */ 926 if (jh->b_frozen_data) { 927 JBUFFER_TRACE(jh, "has frozen data"); 928 J_ASSERT_JH(jh, jh->b_next_transaction == NULL); 929 goto attach_next; 930 } 931 932 JBUFFER_TRACE(jh, "owned by older transaction"); 933 J_ASSERT_JH(jh, jh->b_next_transaction == NULL); 934 J_ASSERT_JH(jh, jh->b_transaction == journal->j_committing_transaction); 935 936 /* 937 * There is one case we have to be very careful about. If the 938 * committing transaction is currently writing this buffer out to disk 939 * and has NOT made a copy-out, then we cannot modify the buffer 940 * contents at all right now. The essence of copy-out is that it is 941 * the extra copy, not the primary copy, which gets journaled. If the 942 * primary copy is already going to disk then we cannot do copy-out 943 * here. 944 */ 945 if (buffer_shadow(bh)) { 946 JBUFFER_TRACE(jh, "on shadow: sleep"); 947 jbd_unlock_bh_state(bh); 948 wait_on_bit_io(&bh->b_state, BH_Shadow, TASK_UNINTERRUPTIBLE); 949 goto repeat; 950 } 951 952 /* 953 * Only do the copy if the currently-owning transaction still needs it. 954 * If buffer isn't on BJ_Metadata list, the committing transaction is 955 * past that stage (here we use the fact that BH_Shadow is set under 956 * bh_state lock together with refiling to BJ_Shadow list and at this 957 * point we know the buffer doesn't have BH_Shadow set). 958 * 959 * Subtle point, though: if this is a get_undo_access, then we will be 960 * relying on the frozen_data to contain the new value of the 961 * committed_data record after the transaction, so we HAVE to force the 962 * frozen_data copy in that case. 963 */ 964 if (jh->b_jlist == BJ_Metadata || force_copy) { 965 JBUFFER_TRACE(jh, "generate frozen data"); 966 if (!frozen_buffer) { 967 JBUFFER_TRACE(jh, "allocate memory for buffer"); 968 jbd_unlock_bh_state(bh); 969 frozen_buffer = jbd2_alloc(jh2bh(jh)->b_size, GFP_NOFS); 970 if (!frozen_buffer) { 971 printk(KERN_ERR "%s: OOM for frozen_buffer\n", 972 __func__); 973 JBUFFER_TRACE(jh, "oom!"); 974 error = -ENOMEM; 975 goto out; 976 } 977 goto repeat; 978 } 979 jh->b_frozen_data = frozen_buffer; 980 frozen_buffer = NULL; 981 jbd2_freeze_jh_data(jh); 982 } 983 attach_next: 984 /* 985 * Make sure all stores to jh (b_modified, b_frozen_data) are visible 986 * before attaching it to the running transaction. Paired with barrier 987 * in jbd2_write_access_granted() 988 */ 989 smp_wmb(); 990 jh->b_next_transaction = transaction; 991 992 done: 993 jbd_unlock_bh_state(bh); 994 995 /* 996 * If we are about to journal a buffer, then any revoke pending on it is 997 * no longer valid 998 */ 999 jbd2_journal_cancel_revoke(handle, jh); 1000 1001 out: 1002 if (unlikely(frozen_buffer)) /* It's usually NULL */ 1003 jbd2_free(frozen_buffer, bh->b_size); 1004 1005 JBUFFER_TRACE(jh, "exit"); 1006 return error; 1007 } 1008 1009 /* Fast check whether buffer is already attached to the required transaction */ 1010 static bool jbd2_write_access_granted(handle_t *handle, struct buffer_head *bh, 1011 bool undo) 1012 { 1013 struct journal_head *jh; 1014 bool ret = false; 1015 1016 /* Dirty buffers require special handling... */ 1017 if (buffer_dirty(bh)) 1018 return false; 1019 1020 /* 1021 * RCU protects us from dereferencing freed pages. So the checks we do 1022 * are guaranteed not to oops. However the jh slab object can get freed 1023 * & reallocated while we work with it. So we have to be careful. When 1024 * we see jh attached to the running transaction, we know it must stay 1025 * so until the transaction is committed. Thus jh won't be freed and 1026 * will be attached to the same bh while we run. However it can 1027 * happen jh gets freed, reallocated, and attached to the transaction 1028 * just after we get pointer to it from bh. So we have to be careful 1029 * and recheck jh still belongs to our bh before we return success. 1030 */ 1031 rcu_read_lock(); 1032 if (!buffer_jbd(bh)) 1033 goto out; 1034 /* This should be bh2jh() but that doesn't work with inline functions */ 1035 jh = READ_ONCE(bh->b_private); 1036 if (!jh) 1037 goto out; 1038 /* For undo access buffer must have data copied */ 1039 if (undo && !jh->b_committed_data) 1040 goto out; 1041 if (jh->b_transaction != handle->h_transaction && 1042 jh->b_next_transaction != handle->h_transaction) 1043 goto out; 1044 /* 1045 * There are two reasons for the barrier here: 1046 * 1) Make sure to fetch b_bh after we did previous checks so that we 1047 * detect when jh went through free, realloc, attach to transaction 1048 * while we were checking. Paired with implicit barrier in that path. 1049 * 2) So that access to bh done after jbd2_write_access_granted() 1050 * doesn't get reordered and see inconsistent state of concurrent 1051 * do_get_write_access(). 1052 */ 1053 smp_mb(); 1054 if (unlikely(jh->b_bh != bh)) 1055 goto out; 1056 ret = true; 1057 out: 1058 rcu_read_unlock(); 1059 return ret; 1060 } 1061 1062 /** 1063 * int jbd2_journal_get_write_access() - notify intent to modify a buffer for metadata (not data) update. 1064 * @handle: transaction to add buffer modifications to 1065 * @bh: bh to be used for metadata writes 1066 * 1067 * Returns an error code or 0 on success. 1068 * 1069 * In full data journalling mode the buffer may be of type BJ_AsyncData, 1070 * because we're write()ing a buffer which is also part of a shared mapping. 1071 */ 1072 1073 int jbd2_journal_get_write_access(handle_t *handle, struct buffer_head *bh) 1074 { 1075 struct journal_head *jh; 1076 int rc; 1077 1078 if (jbd2_write_access_granted(handle, bh, false)) 1079 return 0; 1080 1081 jh = jbd2_journal_add_journal_head(bh); 1082 /* We do not want to get caught playing with fields which the 1083 * log thread also manipulates. Make sure that the buffer 1084 * completes any outstanding IO before proceeding. */ 1085 rc = do_get_write_access(handle, jh, 0); 1086 jbd2_journal_put_journal_head(jh); 1087 return rc; 1088 } 1089 1090 1091 /* 1092 * When the user wants to journal a newly created buffer_head 1093 * (ie. getblk() returned a new buffer and we are going to populate it 1094 * manually rather than reading off disk), then we need to keep the 1095 * buffer_head locked until it has been completely filled with new 1096 * data. In this case, we should be able to make the assertion that 1097 * the bh is not already part of an existing transaction. 1098 * 1099 * The buffer should already be locked by the caller by this point. 1100 * There is no lock ranking violation: it was a newly created, 1101 * unlocked buffer beforehand. */ 1102 1103 /** 1104 * int jbd2_journal_get_create_access () - notify intent to use newly created bh 1105 * @handle: transaction to new buffer to 1106 * @bh: new buffer. 1107 * 1108 * Call this if you create a new bh. 1109 */ 1110 int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh) 1111 { 1112 transaction_t *transaction = handle->h_transaction; 1113 journal_t *journal; 1114 struct journal_head *jh = jbd2_journal_add_journal_head(bh); 1115 int err; 1116 1117 jbd_debug(5, "journal_head %p\n", jh); 1118 err = -EROFS; 1119 if (is_handle_aborted(handle)) 1120 goto out; 1121 journal = transaction->t_journal; 1122 err = 0; 1123 1124 JBUFFER_TRACE(jh, "entry"); 1125 /* 1126 * The buffer may already belong to this transaction due to pre-zeroing 1127 * in the filesystem's new_block code. It may also be on the previous, 1128 * committing transaction's lists, but it HAS to be in Forget state in 1129 * that case: the transaction must have deleted the buffer for it to be 1130 * reused here. 1131 */ 1132 jbd_lock_bh_state(bh); 1133 J_ASSERT_JH(jh, (jh->b_transaction == transaction || 1134 jh->b_transaction == NULL || 1135 (jh->b_transaction == journal->j_committing_transaction && 1136 jh->b_jlist == BJ_Forget))); 1137 1138 J_ASSERT_JH(jh, jh->b_next_transaction == NULL); 1139 J_ASSERT_JH(jh, buffer_locked(jh2bh(jh))); 1140 1141 if (jh->b_transaction == NULL) { 1142 /* 1143 * Previous jbd2_journal_forget() could have left the buffer 1144 * with jbddirty bit set because it was being committed. When 1145 * the commit finished, we've filed the buffer for 1146 * checkpointing and marked it dirty. Now we are reallocating 1147 * the buffer so the transaction freeing it must have 1148 * committed and so it's safe to clear the dirty bit. 1149 */ 1150 clear_buffer_dirty(jh2bh(jh)); 1151 /* first access by this transaction */ 1152 jh->b_modified = 0; 1153 1154 JBUFFER_TRACE(jh, "file as BJ_Reserved"); 1155 spin_lock(&journal->j_list_lock); 1156 __jbd2_journal_file_buffer(jh, transaction, BJ_Reserved); 1157 } else if (jh->b_transaction == journal->j_committing_transaction) { 1158 /* first access by this transaction */ 1159 jh->b_modified = 0; 1160 1161 JBUFFER_TRACE(jh, "set next transaction"); 1162 spin_lock(&journal->j_list_lock); 1163 jh->b_next_transaction = transaction; 1164 } 1165 spin_unlock(&journal->j_list_lock); 1166 jbd_unlock_bh_state(bh); 1167 1168 /* 1169 * akpm: I added this. ext3_alloc_branch can pick up new indirect 1170 * blocks which contain freed but then revoked metadata. We need 1171 * to cancel the revoke in case we end up freeing it yet again 1172 * and the reallocating as data - this would cause a second revoke, 1173 * which hits an assertion error. 1174 */ 1175 JBUFFER_TRACE(jh, "cancelling revoke"); 1176 jbd2_journal_cancel_revoke(handle, jh); 1177 out: 1178 jbd2_journal_put_journal_head(jh); 1179 return err; 1180 } 1181 1182 /** 1183 * int jbd2_journal_get_undo_access() - Notify intent to modify metadata with 1184 * non-rewindable consequences 1185 * @handle: transaction 1186 * @bh: buffer to undo 1187 * 1188 * Sometimes there is a need to distinguish between metadata which has 1189 * been committed to disk and that which has not. The ext3fs code uses 1190 * this for freeing and allocating space, we have to make sure that we 1191 * do not reuse freed space until the deallocation has been committed, 1192 * since if we overwrote that space we would make the delete 1193 * un-rewindable in case of a crash. 1194 * 1195 * To deal with that, jbd2_journal_get_undo_access requests write access to a 1196 * buffer for parts of non-rewindable operations such as delete 1197 * operations on the bitmaps. The journaling code must keep a copy of 1198 * the buffer's contents prior to the undo_access call until such time 1199 * as we know that the buffer has definitely been committed to disk. 1200 * 1201 * We never need to know which transaction the committed data is part 1202 * of, buffers touched here are guaranteed to be dirtied later and so 1203 * will be committed to a new transaction in due course, at which point 1204 * we can discard the old committed data pointer. 1205 * 1206 * Returns error number or 0 on success. 1207 */ 1208 int jbd2_journal_get_undo_access(handle_t *handle, struct buffer_head *bh) 1209 { 1210 int err; 1211 struct journal_head *jh; 1212 char *committed_data = NULL; 1213 1214 JBUFFER_TRACE(jh, "entry"); 1215 if (jbd2_write_access_granted(handle, bh, true)) 1216 return 0; 1217 1218 jh = jbd2_journal_add_journal_head(bh); 1219 /* 1220 * Do this first --- it can drop the journal lock, so we want to 1221 * make sure that obtaining the committed_data is done 1222 * atomically wrt. completion of any outstanding commits. 1223 */ 1224 err = do_get_write_access(handle, jh, 1); 1225 if (err) 1226 goto out; 1227 1228 repeat: 1229 if (!jh->b_committed_data) { 1230 committed_data = jbd2_alloc(jh2bh(jh)->b_size, GFP_NOFS); 1231 if (!committed_data) { 1232 printk(KERN_ERR "%s: No memory for committed data\n", 1233 __func__); 1234 err = -ENOMEM; 1235 goto out; 1236 } 1237 } 1238 1239 jbd_lock_bh_state(bh); 1240 if (!jh->b_committed_data) { 1241 /* Copy out the current buffer contents into the 1242 * preserved, committed copy. */ 1243 JBUFFER_TRACE(jh, "generate b_committed data"); 1244 if (!committed_data) { 1245 jbd_unlock_bh_state(bh); 1246 goto repeat; 1247 } 1248 1249 jh->b_committed_data = committed_data; 1250 committed_data = NULL; 1251 memcpy(jh->b_committed_data, bh->b_data, bh->b_size); 1252 } 1253 jbd_unlock_bh_state(bh); 1254 out: 1255 jbd2_journal_put_journal_head(jh); 1256 if (unlikely(committed_data)) 1257 jbd2_free(committed_data, bh->b_size); 1258 return err; 1259 } 1260 1261 /** 1262 * void jbd2_journal_set_triggers() - Add triggers for commit writeout 1263 * @bh: buffer to trigger on 1264 * @type: struct jbd2_buffer_trigger_type containing the trigger(s). 1265 * 1266 * Set any triggers on this journal_head. This is always safe, because 1267 * triggers for a committing buffer will be saved off, and triggers for 1268 * a running transaction will match the buffer in that transaction. 1269 * 1270 * Call with NULL to clear the triggers. 1271 */ 1272 void jbd2_journal_set_triggers(struct buffer_head *bh, 1273 struct jbd2_buffer_trigger_type *type) 1274 { 1275 struct journal_head *jh = jbd2_journal_grab_journal_head(bh); 1276 1277 if (WARN_ON(!jh)) 1278 return; 1279 jh->b_triggers = type; 1280 jbd2_journal_put_journal_head(jh); 1281 } 1282 1283 void jbd2_buffer_frozen_trigger(struct journal_head *jh, void *mapped_data, 1284 struct jbd2_buffer_trigger_type *triggers) 1285 { 1286 struct buffer_head *bh = jh2bh(jh); 1287 1288 if (!triggers || !triggers->t_frozen) 1289 return; 1290 1291 triggers->t_frozen(triggers, bh, mapped_data, bh->b_size); 1292 } 1293 1294 void jbd2_buffer_abort_trigger(struct journal_head *jh, 1295 struct jbd2_buffer_trigger_type *triggers) 1296 { 1297 if (!triggers || !triggers->t_abort) 1298 return; 1299 1300 triggers->t_abort(triggers, jh2bh(jh)); 1301 } 1302 1303 /** 1304 * int jbd2_journal_dirty_metadata() - mark a buffer as containing dirty metadata 1305 * @handle: transaction to add buffer to. 1306 * @bh: buffer to mark 1307 * 1308 * mark dirty metadata which needs to be journaled as part of the current 1309 * transaction. 1310 * 1311 * The buffer must have previously had jbd2_journal_get_write_access() 1312 * called so that it has a valid journal_head attached to the buffer 1313 * head. 1314 * 1315 * The buffer is placed on the transaction's metadata list and is marked 1316 * as belonging to the transaction. 1317 * 1318 * Returns error number or 0 on success. 1319 * 1320 * Special care needs to be taken if the buffer already belongs to the 1321 * current committing transaction (in which case we should have frozen 1322 * data present for that commit). In that case, we don't relink the 1323 * buffer: that only gets done when the old transaction finally 1324 * completes its commit. 1325 */ 1326 int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh) 1327 { 1328 transaction_t *transaction = handle->h_transaction; 1329 journal_t *journal; 1330 struct journal_head *jh; 1331 int ret = 0; 1332 1333 if (is_handle_aborted(handle)) 1334 return -EROFS; 1335 if (!buffer_jbd(bh)) { 1336 ret = -EUCLEAN; 1337 goto out; 1338 } 1339 /* 1340 * We don't grab jh reference here since the buffer must be part 1341 * of the running transaction. 1342 */ 1343 jh = bh2jh(bh); 1344 /* 1345 * This and the following assertions are unreliable since we may see jh 1346 * in inconsistent state unless we grab bh_state lock. But this is 1347 * crucial to catch bugs so let's do a reliable check until the 1348 * lockless handling is fully proven. 1349 */ 1350 if (jh->b_transaction != transaction && 1351 jh->b_next_transaction != transaction) { 1352 jbd_lock_bh_state(bh); 1353 J_ASSERT_JH(jh, jh->b_transaction == transaction || 1354 jh->b_next_transaction == transaction); 1355 jbd_unlock_bh_state(bh); 1356 } 1357 if (jh->b_modified == 1) { 1358 /* If it's in our transaction it must be in BJ_Metadata list. */ 1359 if (jh->b_transaction == transaction && 1360 jh->b_jlist != BJ_Metadata) { 1361 jbd_lock_bh_state(bh); 1362 J_ASSERT_JH(jh, jh->b_transaction != transaction || 1363 jh->b_jlist == BJ_Metadata); 1364 jbd_unlock_bh_state(bh); 1365 } 1366 goto out; 1367 } 1368 1369 journal = transaction->t_journal; 1370 jbd_debug(5, "journal_head %p\n", jh); 1371 JBUFFER_TRACE(jh, "entry"); 1372 1373 jbd_lock_bh_state(bh); 1374 1375 if (jh->b_modified == 0) { 1376 /* 1377 * This buffer's got modified and becoming part 1378 * of the transaction. This needs to be done 1379 * once a transaction -bzzz 1380 */ 1381 jh->b_modified = 1; 1382 if (handle->h_buffer_credits <= 0) { 1383 ret = -ENOSPC; 1384 goto out_unlock_bh; 1385 } 1386 handle->h_buffer_credits--; 1387 } 1388 1389 /* 1390 * fastpath, to avoid expensive locking. If this buffer is already 1391 * on the running transaction's metadata list there is nothing to do. 1392 * Nobody can take it off again because there is a handle open. 1393 * I _think_ we're OK here with SMP barriers - a mistaken decision will 1394 * result in this test being false, so we go in and take the locks. 1395 */ 1396 if (jh->b_transaction == transaction && jh->b_jlist == BJ_Metadata) { 1397 JBUFFER_TRACE(jh, "fastpath"); 1398 if (unlikely(jh->b_transaction != 1399 journal->j_running_transaction)) { 1400 printk(KERN_ERR "JBD2: %s: " 1401 "jh->b_transaction (%llu, %p, %u) != " 1402 "journal->j_running_transaction (%p, %u)\n", 1403 journal->j_devname, 1404 (unsigned long long) bh->b_blocknr, 1405 jh->b_transaction, 1406 jh->b_transaction ? jh->b_transaction->t_tid : 0, 1407 journal->j_running_transaction, 1408 journal->j_running_transaction ? 1409 journal->j_running_transaction->t_tid : 0); 1410 ret = -EINVAL; 1411 } 1412 goto out_unlock_bh; 1413 } 1414 1415 set_buffer_jbddirty(bh); 1416 1417 /* 1418 * Metadata already on the current transaction list doesn't 1419 * need to be filed. Metadata on another transaction's list must 1420 * be committing, and will be refiled once the commit completes: 1421 * leave it alone for now. 1422 */ 1423 if (jh->b_transaction != transaction) { 1424 JBUFFER_TRACE(jh, "already on other transaction"); 1425 if (unlikely(((jh->b_transaction != 1426 journal->j_committing_transaction)) || 1427 (jh->b_next_transaction != transaction))) { 1428 printk(KERN_ERR "jbd2_journal_dirty_metadata: %s: " 1429 "bad jh for block %llu: " 1430 "transaction (%p, %u), " 1431 "jh->b_transaction (%p, %u), " 1432 "jh->b_next_transaction (%p, %u), jlist %u\n", 1433 journal->j_devname, 1434 (unsigned long long) bh->b_blocknr, 1435 transaction, transaction->t_tid, 1436 jh->b_transaction, 1437 jh->b_transaction ? 1438 jh->b_transaction->t_tid : 0, 1439 jh->b_next_transaction, 1440 jh->b_next_transaction ? 1441 jh->b_next_transaction->t_tid : 0, 1442 jh->b_jlist); 1443 WARN_ON(1); 1444 ret = -EINVAL; 1445 } 1446 /* And this case is illegal: we can't reuse another 1447 * transaction's data buffer, ever. */ 1448 goto out_unlock_bh; 1449 } 1450 1451 /* That test should have eliminated the following case: */ 1452 J_ASSERT_JH(jh, jh->b_frozen_data == NULL); 1453 1454 JBUFFER_TRACE(jh, "file as BJ_Metadata"); 1455 spin_lock(&journal->j_list_lock); 1456 __jbd2_journal_file_buffer(jh, transaction, BJ_Metadata); 1457 spin_unlock(&journal->j_list_lock); 1458 out_unlock_bh: 1459 jbd_unlock_bh_state(bh); 1460 out: 1461 JBUFFER_TRACE(jh, "exit"); 1462 return ret; 1463 } 1464 1465 /** 1466 * void jbd2_journal_forget() - bforget() for potentially-journaled buffers. 1467 * @handle: transaction handle 1468 * @bh: bh to 'forget' 1469 * 1470 * We can only do the bforget if there are no commits pending against the 1471 * buffer. If the buffer is dirty in the current running transaction we 1472 * can safely unlink it. 1473 * 1474 * bh may not be a journalled buffer at all - it may be a non-JBD 1475 * buffer which came off the hashtable. Check for this. 1476 * 1477 * Decrements bh->b_count by one. 1478 * 1479 * Allow this call even if the handle has aborted --- it may be part of 1480 * the caller's cleanup after an abort. 1481 */ 1482 int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh) 1483 { 1484 transaction_t *transaction = handle->h_transaction; 1485 journal_t *journal; 1486 struct journal_head *jh; 1487 int drop_reserve = 0; 1488 int err = 0; 1489 int was_modified = 0; 1490 1491 if (is_handle_aborted(handle)) 1492 return -EROFS; 1493 journal = transaction->t_journal; 1494 1495 BUFFER_TRACE(bh, "entry"); 1496 1497 jbd_lock_bh_state(bh); 1498 1499 if (!buffer_jbd(bh)) 1500 goto not_jbd; 1501 jh = bh2jh(bh); 1502 1503 /* Critical error: attempting to delete a bitmap buffer, maybe? 1504 * Don't do any jbd operations, and return an error. */ 1505 if (!J_EXPECT_JH(jh, !jh->b_committed_data, 1506 "inconsistent data on disk")) { 1507 err = -EIO; 1508 goto not_jbd; 1509 } 1510 1511 /* keep track of whether or not this transaction modified us */ 1512 was_modified = jh->b_modified; 1513 1514 /* 1515 * The buffer's going from the transaction, we must drop 1516 * all references -bzzz 1517 */ 1518 jh->b_modified = 0; 1519 1520 if (jh->b_transaction == transaction) { 1521 J_ASSERT_JH(jh, !jh->b_frozen_data); 1522 1523 /* If we are forgetting a buffer which is already part 1524 * of this transaction, then we can just drop it from 1525 * the transaction immediately. */ 1526 clear_buffer_dirty(bh); 1527 clear_buffer_jbddirty(bh); 1528 1529 JBUFFER_TRACE(jh, "belongs to current transaction: unfile"); 1530 1531 /* 1532 * we only want to drop a reference if this transaction 1533 * modified the buffer 1534 */ 1535 if (was_modified) 1536 drop_reserve = 1; 1537 1538 /* 1539 * We are no longer going to journal this buffer. 1540 * However, the commit of this transaction is still 1541 * important to the buffer: the delete that we are now 1542 * processing might obsolete an old log entry, so by 1543 * committing, we can satisfy the buffer's checkpoint. 1544 * 1545 * So, if we have a checkpoint on the buffer, we should 1546 * now refile the buffer on our BJ_Forget list so that 1547 * we know to remove the checkpoint after we commit. 1548 */ 1549 1550 spin_lock(&journal->j_list_lock); 1551 if (jh->b_cp_transaction) { 1552 __jbd2_journal_temp_unlink_buffer(jh); 1553 __jbd2_journal_file_buffer(jh, transaction, BJ_Forget); 1554 } else { 1555 __jbd2_journal_unfile_buffer(jh); 1556 if (!buffer_jbd(bh)) { 1557 spin_unlock(&journal->j_list_lock); 1558 jbd_unlock_bh_state(bh); 1559 __bforget(bh); 1560 goto drop; 1561 } 1562 } 1563 spin_unlock(&journal->j_list_lock); 1564 } else if (jh->b_transaction) { 1565 J_ASSERT_JH(jh, (jh->b_transaction == 1566 journal->j_committing_transaction)); 1567 /* However, if the buffer is still owned by a prior 1568 * (committing) transaction, we can't drop it yet... */ 1569 JBUFFER_TRACE(jh, "belongs to older transaction"); 1570 /* ... but we CAN drop it from the new transaction if we 1571 * have also modified it since the original commit. */ 1572 1573 if (jh->b_next_transaction) { 1574 J_ASSERT(jh->b_next_transaction == transaction); 1575 spin_lock(&journal->j_list_lock); 1576 jh->b_next_transaction = NULL; 1577 spin_unlock(&journal->j_list_lock); 1578 1579 /* 1580 * only drop a reference if this transaction modified 1581 * the buffer 1582 */ 1583 if (was_modified) 1584 drop_reserve = 1; 1585 } 1586 } 1587 1588 not_jbd: 1589 jbd_unlock_bh_state(bh); 1590 __brelse(bh); 1591 drop: 1592 if (drop_reserve) { 1593 /* no need to reserve log space for this block -bzzz */ 1594 handle->h_buffer_credits++; 1595 } 1596 return err; 1597 } 1598 1599 /** 1600 * int jbd2_journal_stop() - complete a transaction 1601 * @handle: tranaction to complete. 1602 * 1603 * All done for a particular handle. 1604 * 1605 * There is not much action needed here. We just return any remaining 1606 * buffer credits to the transaction and remove the handle. The only 1607 * complication is that we need to start a commit operation if the 1608 * filesystem is marked for synchronous update. 1609 * 1610 * jbd2_journal_stop itself will not usually return an error, but it may 1611 * do so in unusual circumstances. In particular, expect it to 1612 * return -EIO if a jbd2_journal_abort has been executed since the 1613 * transaction began. 1614 */ 1615 int jbd2_journal_stop(handle_t *handle) 1616 { 1617 transaction_t *transaction = handle->h_transaction; 1618 journal_t *journal; 1619 int err = 0, wait_for_commit = 0; 1620 tid_t tid; 1621 pid_t pid; 1622 1623 if (!transaction) { 1624 /* 1625 * Handle is already detached from the transaction so 1626 * there is nothing to do other than decrease a refcount, 1627 * or free the handle if refcount drops to zero 1628 */ 1629 if (--handle->h_ref > 0) { 1630 jbd_debug(4, "h_ref %d -> %d\n", handle->h_ref + 1, 1631 handle->h_ref); 1632 return err; 1633 } else { 1634 if (handle->h_rsv_handle) 1635 jbd2_free_handle(handle->h_rsv_handle); 1636 goto free_and_exit; 1637 } 1638 } 1639 journal = transaction->t_journal; 1640 1641 J_ASSERT(journal_current_handle() == handle); 1642 1643 if (is_handle_aborted(handle)) 1644 err = -EIO; 1645 else 1646 J_ASSERT(atomic_read(&transaction->t_updates) > 0); 1647 1648 if (--handle->h_ref > 0) { 1649 jbd_debug(4, "h_ref %d -> %d\n", handle->h_ref + 1, 1650 handle->h_ref); 1651 return err; 1652 } 1653 1654 jbd_debug(4, "Handle %p going down\n", handle); 1655 trace_jbd2_handle_stats(journal->j_fs_dev->bd_dev, 1656 transaction->t_tid, 1657 handle->h_type, handle->h_line_no, 1658 jiffies - handle->h_start_jiffies, 1659 handle->h_sync, handle->h_requested_credits, 1660 (handle->h_requested_credits - 1661 handle->h_buffer_credits)); 1662 1663 /* 1664 * Implement synchronous transaction batching. If the handle 1665 * was synchronous, don't force a commit immediately. Let's 1666 * yield and let another thread piggyback onto this 1667 * transaction. Keep doing that while new threads continue to 1668 * arrive. It doesn't cost much - we're about to run a commit 1669 * and sleep on IO anyway. Speeds up many-threaded, many-dir 1670 * operations by 30x or more... 1671 * 1672 * We try and optimize the sleep time against what the 1673 * underlying disk can do, instead of having a static sleep 1674 * time. This is useful for the case where our storage is so 1675 * fast that it is more optimal to go ahead and force a flush 1676 * and wait for the transaction to be committed than it is to 1677 * wait for an arbitrary amount of time for new writers to 1678 * join the transaction. We achieve this by measuring how 1679 * long it takes to commit a transaction, and compare it with 1680 * how long this transaction has been running, and if run time 1681 * < commit time then we sleep for the delta and commit. This 1682 * greatly helps super fast disks that would see slowdowns as 1683 * more threads started doing fsyncs. 1684 * 1685 * But don't do this if this process was the most recent one 1686 * to perform a synchronous write. We do this to detect the 1687 * case where a single process is doing a stream of sync 1688 * writes. No point in waiting for joiners in that case. 1689 * 1690 * Setting max_batch_time to 0 disables this completely. 1691 */ 1692 pid = current->pid; 1693 if (handle->h_sync && journal->j_last_sync_writer != pid && 1694 journal->j_max_batch_time) { 1695 u64 commit_time, trans_time; 1696 1697 journal->j_last_sync_writer = pid; 1698 1699 read_lock(&journal->j_state_lock); 1700 commit_time = journal->j_average_commit_time; 1701 read_unlock(&journal->j_state_lock); 1702 1703 trans_time = ktime_to_ns(ktime_sub(ktime_get(), 1704 transaction->t_start_time)); 1705 1706 commit_time = max_t(u64, commit_time, 1707 1000*journal->j_min_batch_time); 1708 commit_time = min_t(u64, commit_time, 1709 1000*journal->j_max_batch_time); 1710 1711 if (trans_time < commit_time) { 1712 ktime_t expires = ktime_add_ns(ktime_get(), 1713 commit_time); 1714 set_current_state(TASK_UNINTERRUPTIBLE); 1715 schedule_hrtimeout(&expires, HRTIMER_MODE_ABS); 1716 } 1717 } 1718 1719 if (handle->h_sync) 1720 transaction->t_synchronous_commit = 1; 1721 current->journal_info = NULL; 1722 atomic_sub(handle->h_buffer_credits, 1723 &transaction->t_outstanding_credits); 1724 1725 /* 1726 * If the handle is marked SYNC, we need to set another commit 1727 * going! We also want to force a commit if the current 1728 * transaction is occupying too much of the log, or if the 1729 * transaction is too old now. 1730 */ 1731 if (handle->h_sync || 1732 (atomic_read(&transaction->t_outstanding_credits) > 1733 journal->j_max_transaction_buffers) || 1734 time_after_eq(jiffies, transaction->t_expires)) { 1735 /* Do this even for aborted journals: an abort still 1736 * completes the commit thread, it just doesn't write 1737 * anything to disk. */ 1738 1739 jbd_debug(2, "transaction too old, requesting commit for " 1740 "handle %p\n", handle); 1741 /* This is non-blocking */ 1742 jbd2_log_start_commit(journal, transaction->t_tid); 1743 1744 /* 1745 * Special case: JBD2_SYNC synchronous updates require us 1746 * to wait for the commit to complete. 1747 */ 1748 if (handle->h_sync && !(current->flags & PF_MEMALLOC)) 1749 wait_for_commit = 1; 1750 } 1751 1752 /* 1753 * Once we drop t_updates, if it goes to zero the transaction 1754 * could start committing on us and eventually disappear. So 1755 * once we do this, we must not dereference transaction 1756 * pointer again. 1757 */ 1758 tid = transaction->t_tid; 1759 if (atomic_dec_and_test(&transaction->t_updates)) { 1760 wake_up(&journal->j_wait_updates); 1761 if (journal->j_barrier_count) 1762 wake_up(&journal->j_wait_transaction_locked); 1763 } 1764 1765 if (wait_for_commit) 1766 err = jbd2_log_wait_commit(journal, tid); 1767 1768 lock_map_release(&handle->h_lockdep_map); 1769 1770 if (handle->h_rsv_handle) 1771 jbd2_journal_free_reserved(handle->h_rsv_handle); 1772 free_and_exit: 1773 jbd2_free_handle(handle); 1774 return err; 1775 } 1776 1777 /* 1778 * 1779 * List management code snippets: various functions for manipulating the 1780 * transaction buffer lists. 1781 * 1782 */ 1783 1784 /* 1785 * Append a buffer to a transaction list, given the transaction's list head 1786 * pointer. 1787 * 1788 * j_list_lock is held. 1789 * 1790 * jbd_lock_bh_state(jh2bh(jh)) is held. 1791 */ 1792 1793 static inline void 1794 __blist_add_buffer(struct journal_head **list, struct journal_head *jh) 1795 { 1796 if (!*list) { 1797 jh->b_tnext = jh->b_tprev = jh; 1798 *list = jh; 1799 } else { 1800 /* Insert at the tail of the list to preserve order */ 1801 struct journal_head *first = *list, *last = first->b_tprev; 1802 jh->b_tprev = last; 1803 jh->b_tnext = first; 1804 last->b_tnext = first->b_tprev = jh; 1805 } 1806 } 1807 1808 /* 1809 * Remove a buffer from a transaction list, given the transaction's list 1810 * head pointer. 1811 * 1812 * Called with j_list_lock held, and the journal may not be locked. 1813 * 1814 * jbd_lock_bh_state(jh2bh(jh)) is held. 1815 */ 1816 1817 static inline void 1818 __blist_del_buffer(struct journal_head **list, struct journal_head *jh) 1819 { 1820 if (*list == jh) { 1821 *list = jh->b_tnext; 1822 if (*list == jh) 1823 *list = NULL; 1824 } 1825 jh->b_tprev->b_tnext = jh->b_tnext; 1826 jh->b_tnext->b_tprev = jh->b_tprev; 1827 } 1828 1829 /* 1830 * Remove a buffer from the appropriate transaction list. 1831 * 1832 * Note that this function can *change* the value of 1833 * bh->b_transaction->t_buffers, t_forget, t_shadow_list, t_log_list or 1834 * t_reserved_list. If the caller is holding onto a copy of one of these 1835 * pointers, it could go bad. Generally the caller needs to re-read the 1836 * pointer from the transaction_t. 1837 * 1838 * Called under j_list_lock. 1839 */ 1840 static void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh) 1841 { 1842 struct journal_head **list = NULL; 1843 transaction_t *transaction; 1844 struct buffer_head *bh = jh2bh(jh); 1845 1846 J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh)); 1847 transaction = jh->b_transaction; 1848 if (transaction) 1849 assert_spin_locked(&transaction->t_journal->j_list_lock); 1850 1851 J_ASSERT_JH(jh, jh->b_jlist < BJ_Types); 1852 if (jh->b_jlist != BJ_None) 1853 J_ASSERT_JH(jh, transaction != NULL); 1854 1855 switch (jh->b_jlist) { 1856 case BJ_None: 1857 return; 1858 case BJ_Metadata: 1859 transaction->t_nr_buffers--; 1860 J_ASSERT_JH(jh, transaction->t_nr_buffers >= 0); 1861 list = &transaction->t_buffers; 1862 break; 1863 case BJ_Forget: 1864 list = &transaction->t_forget; 1865 break; 1866 case BJ_Shadow: 1867 list = &transaction->t_shadow_list; 1868 break; 1869 case BJ_Reserved: 1870 list = &transaction->t_reserved_list; 1871 break; 1872 } 1873 1874 __blist_del_buffer(list, jh); 1875 jh->b_jlist = BJ_None; 1876 if (test_clear_buffer_jbddirty(bh)) 1877 mark_buffer_dirty(bh); /* Expose it to the VM */ 1878 } 1879 1880 /* 1881 * Remove buffer from all transactions. 1882 * 1883 * Called with bh_state lock and j_list_lock 1884 * 1885 * jh and bh may be already freed when this function returns. 1886 */ 1887 static void __jbd2_journal_unfile_buffer(struct journal_head *jh) 1888 { 1889 __jbd2_journal_temp_unlink_buffer(jh); 1890 jh->b_transaction = NULL; 1891 jbd2_journal_put_journal_head(jh); 1892 } 1893 1894 void jbd2_journal_unfile_buffer(journal_t *journal, struct journal_head *jh) 1895 { 1896 struct buffer_head *bh = jh2bh(jh); 1897 1898 /* Get reference so that buffer cannot be freed before we unlock it */ 1899 get_bh(bh); 1900 jbd_lock_bh_state(bh); 1901 spin_lock(&journal->j_list_lock); 1902 __jbd2_journal_unfile_buffer(jh); 1903 spin_unlock(&journal->j_list_lock); 1904 jbd_unlock_bh_state(bh); 1905 __brelse(bh); 1906 } 1907 1908 /* 1909 * Called from jbd2_journal_try_to_free_buffers(). 1910 * 1911 * Called under jbd_lock_bh_state(bh) 1912 */ 1913 static void 1914 __journal_try_to_free_buffer(journal_t *journal, struct buffer_head *bh) 1915 { 1916 struct journal_head *jh; 1917 1918 jh = bh2jh(bh); 1919 1920 if (buffer_locked(bh) || buffer_dirty(bh)) 1921 goto out; 1922 1923 if (jh->b_next_transaction != NULL || jh->b_transaction != NULL) 1924 goto out; 1925 1926 spin_lock(&journal->j_list_lock); 1927 if (jh->b_cp_transaction != NULL) { 1928 /* written-back checkpointed metadata buffer */ 1929 JBUFFER_TRACE(jh, "remove from checkpoint list"); 1930 __jbd2_journal_remove_checkpoint(jh); 1931 } 1932 spin_unlock(&journal->j_list_lock); 1933 out: 1934 return; 1935 } 1936 1937 /** 1938 * int jbd2_journal_try_to_free_buffers() - try to free page buffers. 1939 * @journal: journal for operation 1940 * @page: to try and free 1941 * @gfp_mask: we use the mask to detect how hard should we try to release 1942 * buffers. If __GFP_DIRECT_RECLAIM and __GFP_FS is set, we wait for commit 1943 * code to release the buffers. 1944 * 1945 * 1946 * For all the buffers on this page, 1947 * if they are fully written out ordered data, move them onto BUF_CLEAN 1948 * so try_to_free_buffers() can reap them. 1949 * 1950 * This function returns non-zero if we wish try_to_free_buffers() 1951 * to be called. We do this if the page is releasable by try_to_free_buffers(). 1952 * We also do it if the page has locked or dirty buffers and the caller wants 1953 * us to perform sync or async writeout. 1954 * 1955 * This complicates JBD locking somewhat. We aren't protected by the 1956 * BKL here. We wish to remove the buffer from its committing or 1957 * running transaction's ->t_datalist via __jbd2_journal_unfile_buffer. 1958 * 1959 * This may *change* the value of transaction_t->t_datalist, so anyone 1960 * who looks at t_datalist needs to lock against this function. 1961 * 1962 * Even worse, someone may be doing a jbd2_journal_dirty_data on this 1963 * buffer. So we need to lock against that. jbd2_journal_dirty_data() 1964 * will come out of the lock with the buffer dirty, which makes it 1965 * ineligible for release here. 1966 * 1967 * Who else is affected by this? hmm... Really the only contender 1968 * is do_get_write_access() - it could be looking at the buffer while 1969 * journal_try_to_free_buffer() is changing its state. But that 1970 * cannot happen because we never reallocate freed data as metadata 1971 * while the data is part of a transaction. Yes? 1972 * 1973 * Return 0 on failure, 1 on success 1974 */ 1975 int jbd2_journal_try_to_free_buffers(journal_t *journal, 1976 struct page *page, gfp_t gfp_mask) 1977 { 1978 struct buffer_head *head; 1979 struct buffer_head *bh; 1980 int ret = 0; 1981 1982 J_ASSERT(PageLocked(page)); 1983 1984 head = page_buffers(page); 1985 bh = head; 1986 do { 1987 struct journal_head *jh; 1988 1989 /* 1990 * We take our own ref against the journal_head here to avoid 1991 * having to add tons of locking around each instance of 1992 * jbd2_journal_put_journal_head(). 1993 */ 1994 jh = jbd2_journal_grab_journal_head(bh); 1995 if (!jh) 1996 continue; 1997 1998 jbd_lock_bh_state(bh); 1999 __journal_try_to_free_buffer(journal, bh); 2000 jbd2_journal_put_journal_head(jh); 2001 jbd_unlock_bh_state(bh); 2002 if (buffer_jbd(bh)) 2003 goto busy; 2004 } while ((bh = bh->b_this_page) != head); 2005 2006 ret = try_to_free_buffers(page); 2007 2008 busy: 2009 return ret; 2010 } 2011 2012 /* 2013 * This buffer is no longer needed. If it is on an older transaction's 2014 * checkpoint list we need to record it on this transaction's forget list 2015 * to pin this buffer (and hence its checkpointing transaction) down until 2016 * this transaction commits. If the buffer isn't on a checkpoint list, we 2017 * release it. 2018 * Returns non-zero if JBD no longer has an interest in the buffer. 2019 * 2020 * Called under j_list_lock. 2021 * 2022 * Called under jbd_lock_bh_state(bh). 2023 */ 2024 static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction) 2025 { 2026 int may_free = 1; 2027 struct buffer_head *bh = jh2bh(jh); 2028 2029 if (jh->b_cp_transaction) { 2030 JBUFFER_TRACE(jh, "on running+cp transaction"); 2031 __jbd2_journal_temp_unlink_buffer(jh); 2032 /* 2033 * We don't want to write the buffer anymore, clear the 2034 * bit so that we don't confuse checks in 2035 * __journal_file_buffer 2036 */ 2037 clear_buffer_dirty(bh); 2038 __jbd2_journal_file_buffer(jh, transaction, BJ_Forget); 2039 may_free = 0; 2040 } else { 2041 JBUFFER_TRACE(jh, "on running transaction"); 2042 __jbd2_journal_unfile_buffer(jh); 2043 } 2044 return may_free; 2045 } 2046 2047 /* 2048 * jbd2_journal_invalidatepage 2049 * 2050 * This code is tricky. It has a number of cases to deal with. 2051 * 2052 * There are two invariants which this code relies on: 2053 * 2054 * i_size must be updated on disk before we start calling invalidatepage on the 2055 * data. 2056 * 2057 * This is done in ext3 by defining an ext3_setattr method which 2058 * updates i_size before truncate gets going. By maintaining this 2059 * invariant, we can be sure that it is safe to throw away any buffers 2060 * attached to the current transaction: once the transaction commits, 2061 * we know that the data will not be needed. 2062 * 2063 * Note however that we can *not* throw away data belonging to the 2064 * previous, committing transaction! 2065 * 2066 * Any disk blocks which *are* part of the previous, committing 2067 * transaction (and which therefore cannot be discarded immediately) are 2068 * not going to be reused in the new running transaction 2069 * 2070 * The bitmap committed_data images guarantee this: any block which is 2071 * allocated in one transaction and removed in the next will be marked 2072 * as in-use in the committed_data bitmap, so cannot be reused until 2073 * the next transaction to delete the block commits. This means that 2074 * leaving committing buffers dirty is quite safe: the disk blocks 2075 * cannot be reallocated to a different file and so buffer aliasing is 2076 * not possible. 2077 * 2078 * 2079 * The above applies mainly to ordered data mode. In writeback mode we 2080 * don't make guarantees about the order in which data hits disk --- in 2081 * particular we don't guarantee that new dirty data is flushed before 2082 * transaction commit --- so it is always safe just to discard data 2083 * immediately in that mode. --sct 2084 */ 2085 2086 /* 2087 * The journal_unmap_buffer helper function returns zero if the buffer 2088 * concerned remains pinned as an anonymous buffer belonging to an older 2089 * transaction. 2090 * 2091 * We're outside-transaction here. Either or both of j_running_transaction 2092 * and j_committing_transaction may be NULL. 2093 */ 2094 static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh, 2095 int partial_page) 2096 { 2097 transaction_t *transaction; 2098 struct journal_head *jh; 2099 int may_free = 1; 2100 2101 BUFFER_TRACE(bh, "entry"); 2102 2103 /* 2104 * It is safe to proceed here without the j_list_lock because the 2105 * buffers cannot be stolen by try_to_free_buffers as long as we are 2106 * holding the page lock. --sct 2107 */ 2108 2109 if (!buffer_jbd(bh)) 2110 goto zap_buffer_unlocked; 2111 2112 /* OK, we have data buffer in journaled mode */ 2113 write_lock(&journal->j_state_lock); 2114 jbd_lock_bh_state(bh); 2115 spin_lock(&journal->j_list_lock); 2116 2117 jh = jbd2_journal_grab_journal_head(bh); 2118 if (!jh) 2119 goto zap_buffer_no_jh; 2120 2121 /* 2122 * We cannot remove the buffer from checkpoint lists until the 2123 * transaction adding inode to orphan list (let's call it T) 2124 * is committed. Otherwise if the transaction changing the 2125 * buffer would be cleaned from the journal before T is 2126 * committed, a crash will cause that the correct contents of 2127 * the buffer will be lost. On the other hand we have to 2128 * clear the buffer dirty bit at latest at the moment when the 2129 * transaction marking the buffer as freed in the filesystem 2130 * structures is committed because from that moment on the 2131 * block can be reallocated and used by a different page. 2132 * Since the block hasn't been freed yet but the inode has 2133 * already been added to orphan list, it is safe for us to add 2134 * the buffer to BJ_Forget list of the newest transaction. 2135 * 2136 * Also we have to clear buffer_mapped flag of a truncated buffer 2137 * because the buffer_head may be attached to the page straddling 2138 * i_size (can happen only when blocksize < pagesize) and thus the 2139 * buffer_head can be reused when the file is extended again. So we end 2140 * up keeping around invalidated buffers attached to transactions' 2141 * BJ_Forget list just to stop checkpointing code from cleaning up 2142 * the transaction this buffer was modified in. 2143 */ 2144 transaction = jh->b_transaction; 2145 if (transaction == NULL) { 2146 /* First case: not on any transaction. If it 2147 * has no checkpoint link, then we can zap it: 2148 * it's a writeback-mode buffer so we don't care 2149 * if it hits disk safely. */ 2150 if (!jh->b_cp_transaction) { 2151 JBUFFER_TRACE(jh, "not on any transaction: zap"); 2152 goto zap_buffer; 2153 } 2154 2155 if (!buffer_dirty(bh)) { 2156 /* bdflush has written it. We can drop it now */ 2157 __jbd2_journal_remove_checkpoint(jh); 2158 goto zap_buffer; 2159 } 2160 2161 /* OK, it must be in the journal but still not 2162 * written fully to disk: it's metadata or 2163 * journaled data... */ 2164 2165 if (journal->j_running_transaction) { 2166 /* ... and once the current transaction has 2167 * committed, the buffer won't be needed any 2168 * longer. */ 2169 JBUFFER_TRACE(jh, "checkpointed: add to BJ_Forget"); 2170 may_free = __dispose_buffer(jh, 2171 journal->j_running_transaction); 2172 goto zap_buffer; 2173 } else { 2174 /* There is no currently-running transaction. So the 2175 * orphan record which we wrote for this file must have 2176 * passed into commit. We must attach this buffer to 2177 * the committing transaction, if it exists. */ 2178 if (journal->j_committing_transaction) { 2179 JBUFFER_TRACE(jh, "give to committing trans"); 2180 may_free = __dispose_buffer(jh, 2181 journal->j_committing_transaction); 2182 goto zap_buffer; 2183 } else { 2184 /* The orphan record's transaction has 2185 * committed. We can cleanse this buffer */ 2186 clear_buffer_jbddirty(bh); 2187 __jbd2_journal_remove_checkpoint(jh); 2188 goto zap_buffer; 2189 } 2190 } 2191 } else if (transaction == journal->j_committing_transaction) { 2192 JBUFFER_TRACE(jh, "on committing transaction"); 2193 /* 2194 * The buffer is committing, we simply cannot touch 2195 * it. If the page is straddling i_size we have to wait 2196 * for commit and try again. 2197 */ 2198 if (partial_page) { 2199 jbd2_journal_put_journal_head(jh); 2200 spin_unlock(&journal->j_list_lock); 2201 jbd_unlock_bh_state(bh); 2202 write_unlock(&journal->j_state_lock); 2203 return -EBUSY; 2204 } 2205 /* 2206 * OK, buffer won't be reachable after truncate. We just set 2207 * j_next_transaction to the running transaction (if there is 2208 * one) and mark buffer as freed so that commit code knows it 2209 * should clear dirty bits when it is done with the buffer. 2210 */ 2211 set_buffer_freed(bh); 2212 if (journal->j_running_transaction && buffer_jbddirty(bh)) 2213 jh->b_next_transaction = journal->j_running_transaction; 2214 jbd2_journal_put_journal_head(jh); 2215 spin_unlock(&journal->j_list_lock); 2216 jbd_unlock_bh_state(bh); 2217 write_unlock(&journal->j_state_lock); 2218 return 0; 2219 } else { 2220 /* Good, the buffer belongs to the running transaction. 2221 * We are writing our own transaction's data, not any 2222 * previous one's, so it is safe to throw it away 2223 * (remember that we expect the filesystem to have set 2224 * i_size already for this truncate so recovery will not 2225 * expose the disk blocks we are discarding here.) */ 2226 J_ASSERT_JH(jh, transaction == journal->j_running_transaction); 2227 JBUFFER_TRACE(jh, "on running transaction"); 2228 may_free = __dispose_buffer(jh, transaction); 2229 } 2230 2231 zap_buffer: 2232 /* 2233 * This is tricky. Although the buffer is truncated, it may be reused 2234 * if blocksize < pagesize and it is attached to the page straddling 2235 * EOF. Since the buffer might have been added to BJ_Forget list of the 2236 * running transaction, journal_get_write_access() won't clear 2237 * b_modified and credit accounting gets confused. So clear b_modified 2238 * here. 2239 */ 2240 jh->b_modified = 0; 2241 jbd2_journal_put_journal_head(jh); 2242 zap_buffer_no_jh: 2243 spin_unlock(&journal->j_list_lock); 2244 jbd_unlock_bh_state(bh); 2245 write_unlock(&journal->j_state_lock); 2246 zap_buffer_unlocked: 2247 clear_buffer_dirty(bh); 2248 J_ASSERT_BH(bh, !buffer_jbddirty(bh)); 2249 clear_buffer_mapped(bh); 2250 clear_buffer_req(bh); 2251 clear_buffer_new(bh); 2252 clear_buffer_delay(bh); 2253 clear_buffer_unwritten(bh); 2254 bh->b_bdev = NULL; 2255 return may_free; 2256 } 2257 2258 /** 2259 * void jbd2_journal_invalidatepage() 2260 * @journal: journal to use for flush... 2261 * @page: page to flush 2262 * @offset: start of the range to invalidate 2263 * @length: length of the range to invalidate 2264 * 2265 * Reap page buffers containing data after in the specified range in page. 2266 * Can return -EBUSY if buffers are part of the committing transaction and 2267 * the page is straddling i_size. Caller then has to wait for current commit 2268 * and try again. 2269 */ 2270 int jbd2_journal_invalidatepage(journal_t *journal, 2271 struct page *page, 2272 unsigned int offset, 2273 unsigned int length) 2274 { 2275 struct buffer_head *head, *bh, *next; 2276 unsigned int stop = offset + length; 2277 unsigned int curr_off = 0; 2278 int partial_page = (offset || length < PAGE_CACHE_SIZE); 2279 int may_free = 1; 2280 int ret = 0; 2281 2282 if (!PageLocked(page)) 2283 BUG(); 2284 if (!page_has_buffers(page)) 2285 return 0; 2286 2287 BUG_ON(stop > PAGE_CACHE_SIZE || stop < length); 2288 2289 /* We will potentially be playing with lists other than just the 2290 * data lists (especially for journaled data mode), so be 2291 * cautious in our locking. */ 2292 2293 head = bh = page_buffers(page); 2294 do { 2295 unsigned int next_off = curr_off + bh->b_size; 2296 next = bh->b_this_page; 2297 2298 if (next_off > stop) 2299 return 0; 2300 2301 if (offset <= curr_off) { 2302 /* This block is wholly outside the truncation point */ 2303 lock_buffer(bh); 2304 ret = journal_unmap_buffer(journal, bh, partial_page); 2305 unlock_buffer(bh); 2306 if (ret < 0) 2307 return ret; 2308 may_free &= ret; 2309 } 2310 curr_off = next_off; 2311 bh = next; 2312 2313 } while (bh != head); 2314 2315 if (!partial_page) { 2316 if (may_free && try_to_free_buffers(page)) 2317 J_ASSERT(!page_has_buffers(page)); 2318 } 2319 return 0; 2320 } 2321 2322 /* 2323 * File a buffer on the given transaction list. 2324 */ 2325 void __jbd2_journal_file_buffer(struct journal_head *jh, 2326 transaction_t *transaction, int jlist) 2327 { 2328 struct journal_head **list = NULL; 2329 int was_dirty = 0; 2330 struct buffer_head *bh = jh2bh(jh); 2331 2332 J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh)); 2333 assert_spin_locked(&transaction->t_journal->j_list_lock); 2334 2335 J_ASSERT_JH(jh, jh->b_jlist < BJ_Types); 2336 J_ASSERT_JH(jh, jh->b_transaction == transaction || 2337 jh->b_transaction == NULL); 2338 2339 if (jh->b_transaction && jh->b_jlist == jlist) 2340 return; 2341 2342 if (jlist == BJ_Metadata || jlist == BJ_Reserved || 2343 jlist == BJ_Shadow || jlist == BJ_Forget) { 2344 /* 2345 * For metadata buffers, we track dirty bit in buffer_jbddirty 2346 * instead of buffer_dirty. We should not see a dirty bit set 2347 * here because we clear it in do_get_write_access but e.g. 2348 * tune2fs can modify the sb and set the dirty bit at any time 2349 * so we try to gracefully handle that. 2350 */ 2351 if (buffer_dirty(bh)) 2352 warn_dirty_buffer(bh); 2353 if (test_clear_buffer_dirty(bh) || 2354 test_clear_buffer_jbddirty(bh)) 2355 was_dirty = 1; 2356 } 2357 2358 if (jh->b_transaction) 2359 __jbd2_journal_temp_unlink_buffer(jh); 2360 else 2361 jbd2_journal_grab_journal_head(bh); 2362 jh->b_transaction = transaction; 2363 2364 switch (jlist) { 2365 case BJ_None: 2366 J_ASSERT_JH(jh, !jh->b_committed_data); 2367 J_ASSERT_JH(jh, !jh->b_frozen_data); 2368 return; 2369 case BJ_Metadata: 2370 transaction->t_nr_buffers++; 2371 list = &transaction->t_buffers; 2372 break; 2373 case BJ_Forget: 2374 list = &transaction->t_forget; 2375 break; 2376 case BJ_Shadow: 2377 list = &transaction->t_shadow_list; 2378 break; 2379 case BJ_Reserved: 2380 list = &transaction->t_reserved_list; 2381 break; 2382 } 2383 2384 __blist_add_buffer(list, jh); 2385 jh->b_jlist = jlist; 2386 2387 if (was_dirty) 2388 set_buffer_jbddirty(bh); 2389 } 2390 2391 void jbd2_journal_file_buffer(struct journal_head *jh, 2392 transaction_t *transaction, int jlist) 2393 { 2394 jbd_lock_bh_state(jh2bh(jh)); 2395 spin_lock(&transaction->t_journal->j_list_lock); 2396 __jbd2_journal_file_buffer(jh, transaction, jlist); 2397 spin_unlock(&transaction->t_journal->j_list_lock); 2398 jbd_unlock_bh_state(jh2bh(jh)); 2399 } 2400 2401 /* 2402 * Remove a buffer from its current buffer list in preparation for 2403 * dropping it from its current transaction entirely. If the buffer has 2404 * already started to be used by a subsequent transaction, refile the 2405 * buffer on that transaction's metadata list. 2406 * 2407 * Called under j_list_lock 2408 * Called under jbd_lock_bh_state(jh2bh(jh)) 2409 * 2410 * jh and bh may be already free when this function returns 2411 */ 2412 void __jbd2_journal_refile_buffer(struct journal_head *jh) 2413 { 2414 int was_dirty, jlist; 2415 struct buffer_head *bh = jh2bh(jh); 2416 2417 J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh)); 2418 if (jh->b_transaction) 2419 assert_spin_locked(&jh->b_transaction->t_journal->j_list_lock); 2420 2421 /* If the buffer is now unused, just drop it. */ 2422 if (jh->b_next_transaction == NULL) { 2423 __jbd2_journal_unfile_buffer(jh); 2424 return; 2425 } 2426 2427 /* 2428 * It has been modified by a later transaction: add it to the new 2429 * transaction's metadata list. 2430 */ 2431 2432 was_dirty = test_clear_buffer_jbddirty(bh); 2433 __jbd2_journal_temp_unlink_buffer(jh); 2434 /* 2435 * We set b_transaction here because b_next_transaction will inherit 2436 * our jh reference and thus __jbd2_journal_file_buffer() must not 2437 * take a new one. 2438 */ 2439 jh->b_transaction = jh->b_next_transaction; 2440 jh->b_next_transaction = NULL; 2441 if (buffer_freed(bh)) 2442 jlist = BJ_Forget; 2443 else if (jh->b_modified) 2444 jlist = BJ_Metadata; 2445 else 2446 jlist = BJ_Reserved; 2447 __jbd2_journal_file_buffer(jh, jh->b_transaction, jlist); 2448 J_ASSERT_JH(jh, jh->b_transaction->t_state == T_RUNNING); 2449 2450 if (was_dirty) 2451 set_buffer_jbddirty(bh); 2452 } 2453 2454 /* 2455 * __jbd2_journal_refile_buffer() with necessary locking added. We take our 2456 * bh reference so that we can safely unlock bh. 2457 * 2458 * The jh and bh may be freed by this call. 2459 */ 2460 void jbd2_journal_refile_buffer(journal_t *journal, struct journal_head *jh) 2461 { 2462 struct buffer_head *bh = jh2bh(jh); 2463 2464 /* Get reference so that buffer cannot be freed before we unlock it */ 2465 get_bh(bh); 2466 jbd_lock_bh_state(bh); 2467 spin_lock(&journal->j_list_lock); 2468 __jbd2_journal_refile_buffer(jh); 2469 jbd_unlock_bh_state(bh); 2470 spin_unlock(&journal->j_list_lock); 2471 __brelse(bh); 2472 } 2473 2474 /* 2475 * File inode in the inode list of the handle's transaction 2476 */ 2477 int jbd2_journal_file_inode(handle_t *handle, struct jbd2_inode *jinode) 2478 { 2479 transaction_t *transaction = handle->h_transaction; 2480 journal_t *journal; 2481 2482 if (is_handle_aborted(handle)) 2483 return -EROFS; 2484 journal = transaction->t_journal; 2485 2486 jbd_debug(4, "Adding inode %lu, tid:%d\n", jinode->i_vfs_inode->i_ino, 2487 transaction->t_tid); 2488 2489 /* 2490 * First check whether inode isn't already on the transaction's 2491 * lists without taking the lock. Note that this check is safe 2492 * without the lock as we cannot race with somebody removing inode 2493 * from the transaction. The reason is that we remove inode from the 2494 * transaction only in journal_release_jbd_inode() and when we commit 2495 * the transaction. We are guarded from the first case by holding 2496 * a reference to the inode. We are safe against the second case 2497 * because if jinode->i_transaction == transaction, commit code 2498 * cannot touch the transaction because we hold reference to it, 2499 * and if jinode->i_next_transaction == transaction, commit code 2500 * will only file the inode where we want it. 2501 */ 2502 if (jinode->i_transaction == transaction || 2503 jinode->i_next_transaction == transaction) 2504 return 0; 2505 2506 spin_lock(&journal->j_list_lock); 2507 2508 if (jinode->i_transaction == transaction || 2509 jinode->i_next_transaction == transaction) 2510 goto done; 2511 2512 /* 2513 * We only ever set this variable to 1 so the test is safe. Since 2514 * t_need_data_flush is likely to be set, we do the test to save some 2515 * cacheline bouncing 2516 */ 2517 if (!transaction->t_need_data_flush) 2518 transaction->t_need_data_flush = 1; 2519 /* On some different transaction's list - should be 2520 * the committing one */ 2521 if (jinode->i_transaction) { 2522 J_ASSERT(jinode->i_next_transaction == NULL); 2523 J_ASSERT(jinode->i_transaction == 2524 journal->j_committing_transaction); 2525 jinode->i_next_transaction = transaction; 2526 goto done; 2527 } 2528 /* Not on any transaction list... */ 2529 J_ASSERT(!jinode->i_next_transaction); 2530 jinode->i_transaction = transaction; 2531 list_add(&jinode->i_list, &transaction->t_inode_list); 2532 done: 2533 spin_unlock(&journal->j_list_lock); 2534 2535 return 0; 2536 } 2537 2538 /* 2539 * File truncate and transaction commit interact with each other in a 2540 * non-trivial way. If a transaction writing data block A is 2541 * committing, we cannot discard the data by truncate until we have 2542 * written them. Otherwise if we crashed after the transaction with 2543 * write has committed but before the transaction with truncate has 2544 * committed, we could see stale data in block A. This function is a 2545 * helper to solve this problem. It starts writeout of the truncated 2546 * part in case it is in the committing transaction. 2547 * 2548 * Filesystem code must call this function when inode is journaled in 2549 * ordered mode before truncation happens and after the inode has been 2550 * placed on orphan list with the new inode size. The second condition 2551 * avoids the race that someone writes new data and we start 2552 * committing the transaction after this function has been called but 2553 * before a transaction for truncate is started (and furthermore it 2554 * allows us to optimize the case where the addition to orphan list 2555 * happens in the same transaction as write --- we don't have to write 2556 * any data in such case). 2557 */ 2558 int jbd2_journal_begin_ordered_truncate(journal_t *journal, 2559 struct jbd2_inode *jinode, 2560 loff_t new_size) 2561 { 2562 transaction_t *inode_trans, *commit_trans; 2563 int ret = 0; 2564 2565 /* This is a quick check to avoid locking if not necessary */ 2566 if (!jinode->i_transaction) 2567 goto out; 2568 /* Locks are here just to force reading of recent values, it is 2569 * enough that the transaction was not committing before we started 2570 * a transaction adding the inode to orphan list */ 2571 read_lock(&journal->j_state_lock); 2572 commit_trans = journal->j_committing_transaction; 2573 read_unlock(&journal->j_state_lock); 2574 spin_lock(&journal->j_list_lock); 2575 inode_trans = jinode->i_transaction; 2576 spin_unlock(&journal->j_list_lock); 2577 if (inode_trans == commit_trans) { 2578 ret = filemap_fdatawrite_range(jinode->i_vfs_inode->i_mapping, 2579 new_size, LLONG_MAX); 2580 if (ret) 2581 jbd2_journal_abort(journal, ret); 2582 } 2583 out: 2584 return ret; 2585 } 2586