1470decc6SDave Kleikamp /* 258862699SUwe Kleine-König * linux/fs/jbd2/transaction.c 3470decc6SDave Kleikamp * 4470decc6SDave Kleikamp * Written by Stephen C. Tweedie <sct@redhat.com>, 1998 5470decc6SDave Kleikamp * 6470decc6SDave Kleikamp * Copyright 1998 Red Hat corp --- All Rights Reserved 7470decc6SDave Kleikamp * 8470decc6SDave Kleikamp * This file is part of the Linux kernel and is made available under 9470decc6SDave Kleikamp * the terms of the GNU General Public License, version 2, or at your 10470decc6SDave Kleikamp * option, any later version, incorporated herein by reference. 11470decc6SDave Kleikamp * 12470decc6SDave Kleikamp * Generic filesystem transaction handling code; part of the ext2fs 13470decc6SDave Kleikamp * journaling system. 14470decc6SDave Kleikamp * 15470decc6SDave Kleikamp * This file manages transactions (compound commits managed by the 16470decc6SDave Kleikamp * journaling code) and handles (individual atomic operations by the 17470decc6SDave Kleikamp * filesystem). 18470decc6SDave Kleikamp */ 19470decc6SDave Kleikamp 20470decc6SDave Kleikamp #include <linux/time.h> 21470decc6SDave Kleikamp #include <linux/fs.h> 22f7f4bccbSMingming Cao #include <linux/jbd2.h> 23470decc6SDave Kleikamp #include <linux/errno.h> 24470decc6SDave Kleikamp #include <linux/slab.h> 25470decc6SDave Kleikamp #include <linux/timer.h> 26470decc6SDave Kleikamp #include <linux/mm.h> 27470decc6SDave Kleikamp #include <linux/highmem.h> 28470decc6SDave Kleikamp 297ddae860SAdrian Bunk static void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh); 307ddae860SAdrian Bunk 31470decc6SDave Kleikamp /* 32f7f4bccbSMingming Cao * jbd2_get_transaction: obtain a new transaction_t object. 33470decc6SDave Kleikamp * 34470decc6SDave Kleikamp * Simply allocate and initialise a new transaction. Create it in 35470decc6SDave Kleikamp * RUNNING state and add it to the current journal (which should not 36470decc6SDave Kleikamp * have an existing running transaction: we only make a new transaction 37470decc6SDave Kleikamp * once we have started to commit the old one). 38470decc6SDave Kleikamp * 39470decc6SDave Kleikamp * Preconditions: 40470decc6SDave Kleikamp * The journal MUST be locked. We don't perform atomic mallocs on the 41470decc6SDave Kleikamp * new transaction and we can't block without protecting against other 42470decc6SDave Kleikamp * processes trying to touch the journal while it is in transition. 43470decc6SDave Kleikamp * 44470decc6SDave Kleikamp * Called under j_state_lock 45470decc6SDave Kleikamp */ 46470decc6SDave Kleikamp 47470decc6SDave Kleikamp static transaction_t * 48f7f4bccbSMingming Cao jbd2_get_transaction(journal_t *journal, transaction_t *transaction) 49470decc6SDave Kleikamp { 50470decc6SDave Kleikamp transaction->t_journal = journal; 51470decc6SDave Kleikamp transaction->t_state = T_RUNNING; 52470decc6SDave Kleikamp transaction->t_tid = journal->j_transaction_sequence++; 53470decc6SDave Kleikamp transaction->t_expires = jiffies + journal->j_commit_interval; 54470decc6SDave Kleikamp spin_lock_init(&transaction->t_handle_lock); 55470decc6SDave Kleikamp 56470decc6SDave Kleikamp /* Set up the commit timer for the new transaction. */ 57470decc6SDave Kleikamp journal->j_commit_timer.expires = transaction->t_expires; 58470decc6SDave Kleikamp add_timer(&journal->j_commit_timer); 59470decc6SDave Kleikamp 60470decc6SDave Kleikamp J_ASSERT(journal->j_running_transaction == NULL); 61470decc6SDave Kleikamp journal->j_running_transaction = transaction; 62470decc6SDave Kleikamp 63470decc6SDave Kleikamp return transaction; 64470decc6SDave Kleikamp } 65470decc6SDave Kleikamp 66470decc6SDave Kleikamp /* 67470decc6SDave Kleikamp * Handle management. 68470decc6SDave Kleikamp * 69470decc6SDave Kleikamp * A handle_t is an object which represents a single atomic update to a 70470decc6SDave Kleikamp * filesystem, and which tracks all of the modifications which form part 71470decc6SDave Kleikamp * of that one update. 72470decc6SDave Kleikamp */ 73470decc6SDave Kleikamp 74470decc6SDave Kleikamp /* 75470decc6SDave Kleikamp * start_this_handle: Given a handle, deal with any locking or stalling 76470decc6SDave Kleikamp * needed to make sure that there is enough journal space for the handle 77470decc6SDave Kleikamp * to begin. Attach the handle to a transaction and set up the 78470decc6SDave Kleikamp * transaction's buffer credits. 79470decc6SDave Kleikamp */ 80470decc6SDave Kleikamp 81470decc6SDave Kleikamp static int start_this_handle(journal_t *journal, handle_t *handle) 82470decc6SDave Kleikamp { 83470decc6SDave Kleikamp transaction_t *transaction; 84470decc6SDave Kleikamp int needed; 85470decc6SDave Kleikamp int nblocks = handle->h_buffer_credits; 86470decc6SDave Kleikamp transaction_t *new_transaction = NULL; 87470decc6SDave Kleikamp int ret = 0; 88470decc6SDave Kleikamp 89470decc6SDave Kleikamp if (nblocks > journal->j_max_transaction_buffers) { 90470decc6SDave Kleikamp printk(KERN_ERR "JBD: %s wants too many credits (%d > %d)\n", 91470decc6SDave Kleikamp current->comm, nblocks, 92470decc6SDave Kleikamp journal->j_max_transaction_buffers); 93470decc6SDave Kleikamp ret = -ENOSPC; 94470decc6SDave Kleikamp goto out; 95470decc6SDave Kleikamp } 96470decc6SDave Kleikamp 97470decc6SDave Kleikamp alloc_transaction: 98470decc6SDave Kleikamp if (!journal->j_running_transaction) { 992d917969SMingming Cao new_transaction = kmalloc(sizeof(*new_transaction), 1002d917969SMingming Cao GFP_NOFS|__GFP_NOFAIL); 101470decc6SDave Kleikamp if (!new_transaction) { 102470decc6SDave Kleikamp ret = -ENOMEM; 103470decc6SDave Kleikamp goto out; 104470decc6SDave Kleikamp } 105470decc6SDave Kleikamp memset(new_transaction, 0, sizeof(*new_transaction)); 106470decc6SDave Kleikamp } 107470decc6SDave Kleikamp 108470decc6SDave Kleikamp jbd_debug(3, "New handle %p going live.\n", handle); 109470decc6SDave Kleikamp 110470decc6SDave Kleikamp repeat: 111470decc6SDave Kleikamp 112470decc6SDave Kleikamp /* 113470decc6SDave Kleikamp * We need to hold j_state_lock until t_updates has been incremented, 114470decc6SDave Kleikamp * for proper journal barrier handling 115470decc6SDave Kleikamp */ 116470decc6SDave Kleikamp spin_lock(&journal->j_state_lock); 117470decc6SDave Kleikamp repeat_locked: 118470decc6SDave Kleikamp if (is_journal_aborted(journal) || 119f7f4bccbSMingming Cao (journal->j_errno != 0 && !(journal->j_flags & JBD2_ACK_ERR))) { 120470decc6SDave Kleikamp spin_unlock(&journal->j_state_lock); 121470decc6SDave Kleikamp ret = -EROFS; 122470decc6SDave Kleikamp goto out; 123470decc6SDave Kleikamp } 124470decc6SDave Kleikamp 125470decc6SDave Kleikamp /* Wait on the journal's transaction barrier if necessary */ 126470decc6SDave Kleikamp if (journal->j_barrier_count) { 127470decc6SDave Kleikamp spin_unlock(&journal->j_state_lock); 128470decc6SDave Kleikamp wait_event(journal->j_wait_transaction_locked, 129470decc6SDave Kleikamp journal->j_barrier_count == 0); 130470decc6SDave Kleikamp goto repeat; 131470decc6SDave Kleikamp } 132470decc6SDave Kleikamp 133470decc6SDave Kleikamp if (!journal->j_running_transaction) { 134470decc6SDave Kleikamp if (!new_transaction) { 135470decc6SDave Kleikamp spin_unlock(&journal->j_state_lock); 136470decc6SDave Kleikamp goto alloc_transaction; 137470decc6SDave Kleikamp } 138f7f4bccbSMingming Cao jbd2_get_transaction(journal, new_transaction); 139470decc6SDave Kleikamp new_transaction = NULL; 140470decc6SDave Kleikamp } 141470decc6SDave Kleikamp 142470decc6SDave Kleikamp transaction = journal->j_running_transaction; 143470decc6SDave Kleikamp 144470decc6SDave Kleikamp /* 145470decc6SDave Kleikamp * If the current transaction is locked down for commit, wait for the 146470decc6SDave Kleikamp * lock to be released. 147470decc6SDave Kleikamp */ 148470decc6SDave Kleikamp if (transaction->t_state == T_LOCKED) { 149470decc6SDave Kleikamp DEFINE_WAIT(wait); 150470decc6SDave Kleikamp 151470decc6SDave Kleikamp prepare_to_wait(&journal->j_wait_transaction_locked, 152470decc6SDave Kleikamp &wait, TASK_UNINTERRUPTIBLE); 153470decc6SDave Kleikamp spin_unlock(&journal->j_state_lock); 154470decc6SDave Kleikamp schedule(); 155470decc6SDave Kleikamp finish_wait(&journal->j_wait_transaction_locked, &wait); 156470decc6SDave Kleikamp goto repeat; 157470decc6SDave Kleikamp } 158470decc6SDave Kleikamp 159470decc6SDave Kleikamp /* 160470decc6SDave Kleikamp * If there is not enough space left in the log to write all potential 161470decc6SDave Kleikamp * buffers requested by this operation, we need to stall pending a log 162470decc6SDave Kleikamp * checkpoint to free some more log space. 163470decc6SDave Kleikamp */ 164470decc6SDave Kleikamp spin_lock(&transaction->t_handle_lock); 165470decc6SDave Kleikamp needed = transaction->t_outstanding_credits + nblocks; 166470decc6SDave Kleikamp 167470decc6SDave Kleikamp if (needed > journal->j_max_transaction_buffers) { 168470decc6SDave Kleikamp /* 169470decc6SDave Kleikamp * If the current transaction is already too large, then start 170470decc6SDave Kleikamp * to commit it: we can then go back and attach this handle to 171470decc6SDave Kleikamp * a new transaction. 172470decc6SDave Kleikamp */ 173470decc6SDave Kleikamp DEFINE_WAIT(wait); 174470decc6SDave Kleikamp 175470decc6SDave Kleikamp jbd_debug(2, "Handle %p starting new commit...\n", handle); 176470decc6SDave Kleikamp spin_unlock(&transaction->t_handle_lock); 177470decc6SDave Kleikamp prepare_to_wait(&journal->j_wait_transaction_locked, &wait, 178470decc6SDave Kleikamp TASK_UNINTERRUPTIBLE); 179f7f4bccbSMingming Cao __jbd2_log_start_commit(journal, transaction->t_tid); 180470decc6SDave Kleikamp spin_unlock(&journal->j_state_lock); 181470decc6SDave Kleikamp schedule(); 182470decc6SDave Kleikamp finish_wait(&journal->j_wait_transaction_locked, &wait); 183470decc6SDave Kleikamp goto repeat; 184470decc6SDave Kleikamp } 185470decc6SDave Kleikamp 186470decc6SDave Kleikamp /* 187470decc6SDave Kleikamp * The commit code assumes that it can get enough log space 188470decc6SDave Kleikamp * without forcing a checkpoint. This is *critical* for 189470decc6SDave Kleikamp * correctness: a checkpoint of a buffer which is also 190470decc6SDave Kleikamp * associated with a committing transaction creates a deadlock, 191470decc6SDave Kleikamp * so commit simply cannot force through checkpoints. 192470decc6SDave Kleikamp * 193470decc6SDave Kleikamp * We must therefore ensure the necessary space in the journal 194470decc6SDave Kleikamp * *before* starting to dirty potentially checkpointed buffers 195470decc6SDave Kleikamp * in the new transaction. 196470decc6SDave Kleikamp * 197470decc6SDave Kleikamp * The worst part is, any transaction currently committing can 198470decc6SDave Kleikamp * reduce the free space arbitrarily. Be careful to account for 199470decc6SDave Kleikamp * those buffers when checkpointing. 200470decc6SDave Kleikamp */ 201470decc6SDave Kleikamp 202470decc6SDave Kleikamp /* 203470decc6SDave Kleikamp * @@@ AKPM: This seems rather over-defensive. We're giving commit 204470decc6SDave Kleikamp * a _lot_ of headroom: 1/4 of the journal plus the size of 205470decc6SDave Kleikamp * the committing transaction. Really, we only need to give it 206470decc6SDave Kleikamp * committing_transaction->t_outstanding_credits plus "enough" for 207470decc6SDave Kleikamp * the log control blocks. 208470decc6SDave Kleikamp * Also, this test is inconsitent with the matching one in 209f7f4bccbSMingming Cao * jbd2_journal_extend(). 210470decc6SDave Kleikamp */ 211f7f4bccbSMingming Cao if (__jbd2_log_space_left(journal) < jbd_space_needed(journal)) { 212470decc6SDave Kleikamp jbd_debug(2, "Handle %p waiting for checkpoint...\n", handle); 213470decc6SDave Kleikamp spin_unlock(&transaction->t_handle_lock); 214f7f4bccbSMingming Cao __jbd2_log_wait_for_space(journal); 215470decc6SDave Kleikamp goto repeat_locked; 216470decc6SDave Kleikamp } 217470decc6SDave Kleikamp 218470decc6SDave Kleikamp /* OK, account for the buffers that this operation expects to 219470decc6SDave Kleikamp * use and add the handle to the running transaction. */ 220470decc6SDave Kleikamp 221470decc6SDave Kleikamp handle->h_transaction = transaction; 222470decc6SDave Kleikamp transaction->t_outstanding_credits += nblocks; 223470decc6SDave Kleikamp transaction->t_updates++; 224470decc6SDave Kleikamp transaction->t_handle_count++; 225470decc6SDave Kleikamp jbd_debug(4, "Handle %p given %d credits (total %d, free %d)\n", 226470decc6SDave Kleikamp handle, nblocks, transaction->t_outstanding_credits, 227f7f4bccbSMingming Cao __jbd2_log_space_left(journal)); 228470decc6SDave Kleikamp spin_unlock(&transaction->t_handle_lock); 229470decc6SDave Kleikamp spin_unlock(&journal->j_state_lock); 230470decc6SDave Kleikamp out: 231470decc6SDave Kleikamp if (unlikely(new_transaction)) /* It's usually NULL */ 232470decc6SDave Kleikamp kfree(new_transaction); 233470decc6SDave Kleikamp return ret; 234470decc6SDave Kleikamp } 235470decc6SDave Kleikamp 236470decc6SDave Kleikamp /* Allocate a new handle. This should probably be in a slab... */ 237470decc6SDave Kleikamp static handle_t *new_handle(int nblocks) 238470decc6SDave Kleikamp { 239af1e76d6SMingming Cao handle_t *handle = jbd2_alloc_handle(GFP_NOFS); 240470decc6SDave Kleikamp if (!handle) 241470decc6SDave Kleikamp return NULL; 242470decc6SDave Kleikamp memset(handle, 0, sizeof(*handle)); 243470decc6SDave Kleikamp handle->h_buffer_credits = nblocks; 244470decc6SDave Kleikamp handle->h_ref = 1; 245470decc6SDave Kleikamp 246470decc6SDave Kleikamp return handle; 247470decc6SDave Kleikamp } 248470decc6SDave Kleikamp 249470decc6SDave Kleikamp /** 250f7f4bccbSMingming Cao * handle_t *jbd2_journal_start() - Obtain a new handle. 251470decc6SDave Kleikamp * @journal: Journal to start transaction on. 252470decc6SDave Kleikamp * @nblocks: number of block buffer we might modify 253470decc6SDave Kleikamp * 254470decc6SDave Kleikamp * We make sure that the transaction can guarantee at least nblocks of 255470decc6SDave Kleikamp * modified buffers in the log. We block until the log can guarantee 256470decc6SDave Kleikamp * that much space. 257470decc6SDave Kleikamp * 258470decc6SDave Kleikamp * This function is visible to journal users (like ext3fs), so is not 259470decc6SDave Kleikamp * called with the journal already locked. 260470decc6SDave Kleikamp * 261470decc6SDave Kleikamp * Return a pointer to a newly allocated handle, or NULL on failure 262470decc6SDave Kleikamp */ 263f7f4bccbSMingming Cao handle_t *jbd2_journal_start(journal_t *journal, int nblocks) 264470decc6SDave Kleikamp { 265470decc6SDave Kleikamp handle_t *handle = journal_current_handle(); 266470decc6SDave Kleikamp int err; 267470decc6SDave Kleikamp 268470decc6SDave Kleikamp if (!journal) 269470decc6SDave Kleikamp return ERR_PTR(-EROFS); 270470decc6SDave Kleikamp 271470decc6SDave Kleikamp if (handle) { 272470decc6SDave Kleikamp J_ASSERT(handle->h_transaction->t_journal == journal); 273470decc6SDave Kleikamp handle->h_ref++; 274470decc6SDave Kleikamp return handle; 275470decc6SDave Kleikamp } 276470decc6SDave Kleikamp 277470decc6SDave Kleikamp handle = new_handle(nblocks); 278470decc6SDave Kleikamp if (!handle) 279470decc6SDave Kleikamp return ERR_PTR(-ENOMEM); 280470decc6SDave Kleikamp 281470decc6SDave Kleikamp current->journal_info = handle; 282470decc6SDave Kleikamp 283470decc6SDave Kleikamp err = start_this_handle(journal, handle); 284470decc6SDave Kleikamp if (err < 0) { 285af1e76d6SMingming Cao jbd2_free_handle(handle); 286470decc6SDave Kleikamp current->journal_info = NULL; 287470decc6SDave Kleikamp handle = ERR_PTR(err); 288470decc6SDave Kleikamp } 289470decc6SDave Kleikamp return handle; 290470decc6SDave Kleikamp } 291470decc6SDave Kleikamp 292470decc6SDave Kleikamp /** 293f7f4bccbSMingming Cao * int jbd2_journal_extend() - extend buffer credits. 294470decc6SDave Kleikamp * @handle: handle to 'extend' 295470decc6SDave Kleikamp * @nblocks: nr blocks to try to extend by. 296470decc6SDave Kleikamp * 297470decc6SDave Kleikamp * Some transactions, such as large extends and truncates, can be done 298470decc6SDave Kleikamp * atomically all at once or in several stages. The operation requests 299470decc6SDave Kleikamp * a credit for a number of buffer modications in advance, but can 300470decc6SDave Kleikamp * extend its credit if it needs more. 301470decc6SDave Kleikamp * 302f7f4bccbSMingming Cao * jbd2_journal_extend tries to give the running handle more buffer credits. 303470decc6SDave Kleikamp * It does not guarantee that allocation - this is a best-effort only. 304470decc6SDave Kleikamp * The calling process MUST be able to deal cleanly with a failure to 305470decc6SDave Kleikamp * extend here. 306470decc6SDave Kleikamp * 307470decc6SDave Kleikamp * Return 0 on success, non-zero on failure. 308470decc6SDave Kleikamp * 309470decc6SDave Kleikamp * return code < 0 implies an error 310470decc6SDave Kleikamp * return code > 0 implies normal transaction-full status. 311470decc6SDave Kleikamp */ 312f7f4bccbSMingming Cao int jbd2_journal_extend(handle_t *handle, int nblocks) 313470decc6SDave Kleikamp { 314470decc6SDave Kleikamp transaction_t *transaction = handle->h_transaction; 315470decc6SDave Kleikamp journal_t *journal = transaction->t_journal; 316470decc6SDave Kleikamp int result; 317470decc6SDave Kleikamp int wanted; 318470decc6SDave Kleikamp 319470decc6SDave Kleikamp result = -EIO; 320470decc6SDave Kleikamp if (is_handle_aborted(handle)) 321470decc6SDave Kleikamp goto out; 322470decc6SDave Kleikamp 323470decc6SDave Kleikamp result = 1; 324470decc6SDave Kleikamp 325470decc6SDave Kleikamp spin_lock(&journal->j_state_lock); 326470decc6SDave Kleikamp 327470decc6SDave Kleikamp /* Don't extend a locked-down transaction! */ 328470decc6SDave Kleikamp if (handle->h_transaction->t_state != T_RUNNING) { 329470decc6SDave Kleikamp jbd_debug(3, "denied handle %p %d blocks: " 330470decc6SDave Kleikamp "transaction not running\n", handle, nblocks); 331470decc6SDave Kleikamp goto error_out; 332470decc6SDave Kleikamp } 333470decc6SDave Kleikamp 334470decc6SDave Kleikamp spin_lock(&transaction->t_handle_lock); 335470decc6SDave Kleikamp wanted = transaction->t_outstanding_credits + nblocks; 336470decc6SDave Kleikamp 337470decc6SDave Kleikamp if (wanted > journal->j_max_transaction_buffers) { 338470decc6SDave Kleikamp jbd_debug(3, "denied handle %p %d blocks: " 339470decc6SDave Kleikamp "transaction too large\n", handle, nblocks); 340470decc6SDave Kleikamp goto unlock; 341470decc6SDave Kleikamp } 342470decc6SDave Kleikamp 343f7f4bccbSMingming Cao if (wanted > __jbd2_log_space_left(journal)) { 344470decc6SDave Kleikamp jbd_debug(3, "denied handle %p %d blocks: " 345470decc6SDave Kleikamp "insufficient log space\n", handle, nblocks); 346470decc6SDave Kleikamp goto unlock; 347470decc6SDave Kleikamp } 348470decc6SDave Kleikamp 349470decc6SDave Kleikamp handle->h_buffer_credits += nblocks; 350470decc6SDave Kleikamp transaction->t_outstanding_credits += nblocks; 351470decc6SDave Kleikamp result = 0; 352470decc6SDave Kleikamp 353470decc6SDave Kleikamp jbd_debug(3, "extended handle %p by %d\n", handle, nblocks); 354470decc6SDave Kleikamp unlock: 355470decc6SDave Kleikamp spin_unlock(&transaction->t_handle_lock); 356470decc6SDave Kleikamp error_out: 357470decc6SDave Kleikamp spin_unlock(&journal->j_state_lock); 358470decc6SDave Kleikamp out: 359470decc6SDave Kleikamp return result; 360470decc6SDave Kleikamp } 361470decc6SDave Kleikamp 362470decc6SDave Kleikamp 363470decc6SDave Kleikamp /** 364f7f4bccbSMingming Cao * int jbd2_journal_restart() - restart a handle . 365470decc6SDave Kleikamp * @handle: handle to restart 366470decc6SDave Kleikamp * @nblocks: nr credits requested 367470decc6SDave Kleikamp * 368470decc6SDave Kleikamp * Restart a handle for a multi-transaction filesystem 369470decc6SDave Kleikamp * operation. 370470decc6SDave Kleikamp * 371f7f4bccbSMingming Cao * If the jbd2_journal_extend() call above fails to grant new buffer credits 372f7f4bccbSMingming Cao * to a running handle, a call to jbd2_journal_restart will commit the 373470decc6SDave Kleikamp * handle's transaction so far and reattach the handle to a new 374470decc6SDave Kleikamp * transaction capabable of guaranteeing the requested number of 375470decc6SDave Kleikamp * credits. 376470decc6SDave Kleikamp */ 377470decc6SDave Kleikamp 378f7f4bccbSMingming Cao int jbd2_journal_restart(handle_t *handle, int nblocks) 379470decc6SDave Kleikamp { 380470decc6SDave Kleikamp transaction_t *transaction = handle->h_transaction; 381470decc6SDave Kleikamp journal_t *journal = transaction->t_journal; 382470decc6SDave Kleikamp int ret; 383470decc6SDave Kleikamp 384470decc6SDave Kleikamp /* If we've had an abort of any type, don't even think about 385470decc6SDave Kleikamp * actually doing the restart! */ 386470decc6SDave Kleikamp if (is_handle_aborted(handle)) 387470decc6SDave Kleikamp return 0; 388470decc6SDave Kleikamp 389470decc6SDave Kleikamp /* 390470decc6SDave Kleikamp * First unlink the handle from its current transaction, and start the 391470decc6SDave Kleikamp * commit on that. 392470decc6SDave Kleikamp */ 393470decc6SDave Kleikamp J_ASSERT(transaction->t_updates > 0); 394470decc6SDave Kleikamp J_ASSERT(journal_current_handle() == handle); 395470decc6SDave Kleikamp 396470decc6SDave Kleikamp spin_lock(&journal->j_state_lock); 397470decc6SDave Kleikamp spin_lock(&transaction->t_handle_lock); 398470decc6SDave Kleikamp transaction->t_outstanding_credits -= handle->h_buffer_credits; 399470decc6SDave Kleikamp transaction->t_updates--; 400470decc6SDave Kleikamp 401470decc6SDave Kleikamp if (!transaction->t_updates) 402470decc6SDave Kleikamp wake_up(&journal->j_wait_updates); 403470decc6SDave Kleikamp spin_unlock(&transaction->t_handle_lock); 404470decc6SDave Kleikamp 405470decc6SDave Kleikamp jbd_debug(2, "restarting handle %p\n", handle); 406f7f4bccbSMingming Cao __jbd2_log_start_commit(journal, transaction->t_tid); 407470decc6SDave Kleikamp spin_unlock(&journal->j_state_lock); 408470decc6SDave Kleikamp 409470decc6SDave Kleikamp handle->h_buffer_credits = nblocks; 410470decc6SDave Kleikamp ret = start_this_handle(journal, handle); 411470decc6SDave Kleikamp return ret; 412470decc6SDave Kleikamp } 413470decc6SDave Kleikamp 414470decc6SDave Kleikamp 415470decc6SDave Kleikamp /** 416f7f4bccbSMingming Cao * void jbd2_journal_lock_updates () - establish a transaction barrier. 417470decc6SDave Kleikamp * @journal: Journal to establish a barrier on. 418470decc6SDave Kleikamp * 419470decc6SDave Kleikamp * This locks out any further updates from being started, and blocks 420470decc6SDave Kleikamp * until all existing updates have completed, returning only once the 421470decc6SDave Kleikamp * journal is in a quiescent state with no updates running. 422470decc6SDave Kleikamp * 423470decc6SDave Kleikamp * The journal lock should not be held on entry. 424470decc6SDave Kleikamp */ 425f7f4bccbSMingming Cao void jbd2_journal_lock_updates(journal_t *journal) 426470decc6SDave Kleikamp { 427470decc6SDave Kleikamp DEFINE_WAIT(wait); 428470decc6SDave Kleikamp 429470decc6SDave Kleikamp spin_lock(&journal->j_state_lock); 430470decc6SDave Kleikamp ++journal->j_barrier_count; 431470decc6SDave Kleikamp 432470decc6SDave Kleikamp /* Wait until there are no running updates */ 433470decc6SDave Kleikamp while (1) { 434470decc6SDave Kleikamp transaction_t *transaction = journal->j_running_transaction; 435470decc6SDave Kleikamp 436470decc6SDave Kleikamp if (!transaction) 437470decc6SDave Kleikamp break; 438470decc6SDave Kleikamp 439470decc6SDave Kleikamp spin_lock(&transaction->t_handle_lock); 440470decc6SDave Kleikamp if (!transaction->t_updates) { 441470decc6SDave Kleikamp spin_unlock(&transaction->t_handle_lock); 442470decc6SDave Kleikamp break; 443470decc6SDave Kleikamp } 444470decc6SDave Kleikamp prepare_to_wait(&journal->j_wait_updates, &wait, 445470decc6SDave Kleikamp TASK_UNINTERRUPTIBLE); 446470decc6SDave Kleikamp spin_unlock(&transaction->t_handle_lock); 447470decc6SDave Kleikamp spin_unlock(&journal->j_state_lock); 448470decc6SDave Kleikamp schedule(); 449470decc6SDave Kleikamp finish_wait(&journal->j_wait_updates, &wait); 450470decc6SDave Kleikamp spin_lock(&journal->j_state_lock); 451470decc6SDave Kleikamp } 452470decc6SDave Kleikamp spin_unlock(&journal->j_state_lock); 453470decc6SDave Kleikamp 454470decc6SDave Kleikamp /* 455470decc6SDave Kleikamp * We have now established a barrier against other normal updates, but 456f7f4bccbSMingming Cao * we also need to barrier against other jbd2_journal_lock_updates() calls 457470decc6SDave Kleikamp * to make sure that we serialise special journal-locked operations 458470decc6SDave Kleikamp * too. 459470decc6SDave Kleikamp */ 460470decc6SDave Kleikamp mutex_lock(&journal->j_barrier); 461470decc6SDave Kleikamp } 462470decc6SDave Kleikamp 463470decc6SDave Kleikamp /** 464f7f4bccbSMingming Cao * void jbd2_journal_unlock_updates (journal_t* journal) - release barrier 465470decc6SDave Kleikamp * @journal: Journal to release the barrier on. 466470decc6SDave Kleikamp * 467f7f4bccbSMingming Cao * Release a transaction barrier obtained with jbd2_journal_lock_updates(). 468470decc6SDave Kleikamp * 469470decc6SDave Kleikamp * Should be called without the journal lock held. 470470decc6SDave Kleikamp */ 471f7f4bccbSMingming Cao void jbd2_journal_unlock_updates (journal_t *journal) 472470decc6SDave Kleikamp { 473470decc6SDave Kleikamp J_ASSERT(journal->j_barrier_count != 0); 474470decc6SDave Kleikamp 475470decc6SDave Kleikamp mutex_unlock(&journal->j_barrier); 476470decc6SDave Kleikamp spin_lock(&journal->j_state_lock); 477470decc6SDave Kleikamp --journal->j_barrier_count; 478470decc6SDave Kleikamp spin_unlock(&journal->j_state_lock); 479470decc6SDave Kleikamp wake_up(&journal->j_wait_transaction_locked); 480470decc6SDave Kleikamp } 481470decc6SDave Kleikamp 482470decc6SDave Kleikamp /* 483470decc6SDave Kleikamp * Report any unexpected dirty buffers which turn up. Normally those 484470decc6SDave Kleikamp * indicate an error, but they can occur if the user is running (say) 485470decc6SDave Kleikamp * tune2fs to modify the live filesystem, so we need the option of 486470decc6SDave Kleikamp * continuing as gracefully as possible. # 487470decc6SDave Kleikamp * 488470decc6SDave Kleikamp * The caller should already hold the journal lock and 489470decc6SDave Kleikamp * j_list_lock spinlock: most callers will need those anyway 490470decc6SDave Kleikamp * in order to probe the buffer's journaling state safely. 491470decc6SDave Kleikamp */ 492470decc6SDave Kleikamp static void jbd_unexpected_dirty_buffer(struct journal_head *jh) 493470decc6SDave Kleikamp { 494470decc6SDave Kleikamp int jlist; 495470decc6SDave Kleikamp 496470decc6SDave Kleikamp /* If this buffer is one which might reasonably be dirty 497470decc6SDave Kleikamp * --- ie. data, or not part of this journal --- then 498470decc6SDave Kleikamp * we're OK to leave it alone, but otherwise we need to 499470decc6SDave Kleikamp * move the dirty bit to the journal's own internal 500470decc6SDave Kleikamp * JBDDirty bit. */ 501470decc6SDave Kleikamp jlist = jh->b_jlist; 502470decc6SDave Kleikamp 503470decc6SDave Kleikamp if (jlist == BJ_Metadata || jlist == BJ_Reserved || 504470decc6SDave Kleikamp jlist == BJ_Shadow || jlist == BJ_Forget) { 505470decc6SDave Kleikamp struct buffer_head *bh = jh2bh(jh); 506470decc6SDave Kleikamp 507470decc6SDave Kleikamp if (test_clear_buffer_dirty(bh)) 508470decc6SDave Kleikamp set_buffer_jbddirty(bh); 509470decc6SDave Kleikamp } 510470decc6SDave Kleikamp } 511470decc6SDave Kleikamp 512470decc6SDave Kleikamp /* 513470decc6SDave Kleikamp * If the buffer is already part of the current transaction, then there 514470decc6SDave Kleikamp * is nothing we need to do. If it is already part of a prior 515470decc6SDave Kleikamp * transaction which we are still committing to disk, then we need to 516470decc6SDave Kleikamp * make sure that we do not overwrite the old copy: we do copy-out to 517470decc6SDave Kleikamp * preserve the copy going to disk. We also account the buffer against 518470decc6SDave Kleikamp * the handle's metadata buffer credits (unless the buffer is already 519470decc6SDave Kleikamp * part of the transaction, that is). 520470decc6SDave Kleikamp * 521470decc6SDave Kleikamp */ 522470decc6SDave Kleikamp static int 523470decc6SDave Kleikamp do_get_write_access(handle_t *handle, struct journal_head *jh, 524470decc6SDave Kleikamp int force_copy) 525470decc6SDave Kleikamp { 526470decc6SDave Kleikamp struct buffer_head *bh; 527470decc6SDave Kleikamp transaction_t *transaction; 528470decc6SDave Kleikamp journal_t *journal; 529470decc6SDave Kleikamp int error; 530470decc6SDave Kleikamp char *frozen_buffer = NULL; 531470decc6SDave Kleikamp int need_copy = 0; 532470decc6SDave Kleikamp 533470decc6SDave Kleikamp if (is_handle_aborted(handle)) 534470decc6SDave Kleikamp return -EROFS; 535470decc6SDave Kleikamp 536470decc6SDave Kleikamp transaction = handle->h_transaction; 537470decc6SDave Kleikamp journal = transaction->t_journal; 538470decc6SDave Kleikamp 539470decc6SDave Kleikamp jbd_debug(5, "buffer_head %p, force_copy %d\n", jh, force_copy); 540470decc6SDave Kleikamp 541470decc6SDave Kleikamp JBUFFER_TRACE(jh, "entry"); 542470decc6SDave Kleikamp repeat: 543470decc6SDave Kleikamp bh = jh2bh(jh); 544470decc6SDave Kleikamp 545470decc6SDave Kleikamp /* @@@ Need to check for errors here at some point. */ 546470decc6SDave Kleikamp 547470decc6SDave Kleikamp lock_buffer(bh); 548470decc6SDave Kleikamp jbd_lock_bh_state(bh); 549470decc6SDave Kleikamp 550470decc6SDave Kleikamp /* We now hold the buffer lock so it is safe to query the buffer 551470decc6SDave Kleikamp * state. Is the buffer dirty? 552470decc6SDave Kleikamp * 553470decc6SDave Kleikamp * If so, there are two possibilities. The buffer may be 554470decc6SDave Kleikamp * non-journaled, and undergoing a quite legitimate writeback. 555470decc6SDave Kleikamp * Otherwise, it is journaled, and we don't expect dirty buffers 556470decc6SDave Kleikamp * in that state (the buffers should be marked JBD_Dirty 557470decc6SDave Kleikamp * instead.) So either the IO is being done under our own 558470decc6SDave Kleikamp * control and this is a bug, or it's a third party IO such as 559470decc6SDave Kleikamp * dump(8) (which may leave the buffer scheduled for read --- 560470decc6SDave Kleikamp * ie. locked but not dirty) or tune2fs (which may actually have 561470decc6SDave Kleikamp * the buffer dirtied, ugh.) */ 562470decc6SDave Kleikamp 563470decc6SDave Kleikamp if (buffer_dirty(bh)) { 564470decc6SDave Kleikamp /* 565470decc6SDave Kleikamp * First question: is this buffer already part of the current 566470decc6SDave Kleikamp * transaction or the existing committing transaction? 567470decc6SDave Kleikamp */ 568470decc6SDave Kleikamp if (jh->b_transaction) { 569470decc6SDave Kleikamp J_ASSERT_JH(jh, 570470decc6SDave Kleikamp jh->b_transaction == transaction || 571470decc6SDave Kleikamp jh->b_transaction == 572470decc6SDave Kleikamp journal->j_committing_transaction); 573470decc6SDave Kleikamp if (jh->b_next_transaction) 574470decc6SDave Kleikamp J_ASSERT_JH(jh, jh->b_next_transaction == 575470decc6SDave Kleikamp transaction); 576470decc6SDave Kleikamp } 577470decc6SDave Kleikamp /* 578470decc6SDave Kleikamp * In any case we need to clean the dirty flag and we must 579470decc6SDave Kleikamp * do it under the buffer lock to be sure we don't race 580470decc6SDave Kleikamp * with running write-out. 581470decc6SDave Kleikamp */ 582470decc6SDave Kleikamp JBUFFER_TRACE(jh, "Unexpected dirty buffer"); 583470decc6SDave Kleikamp jbd_unexpected_dirty_buffer(jh); 584470decc6SDave Kleikamp } 585470decc6SDave Kleikamp 586470decc6SDave Kleikamp unlock_buffer(bh); 587470decc6SDave Kleikamp 588470decc6SDave Kleikamp error = -EROFS; 589470decc6SDave Kleikamp if (is_handle_aborted(handle)) { 590470decc6SDave Kleikamp jbd_unlock_bh_state(bh); 591470decc6SDave Kleikamp goto out; 592470decc6SDave Kleikamp } 593470decc6SDave Kleikamp error = 0; 594470decc6SDave Kleikamp 595470decc6SDave Kleikamp /* 596470decc6SDave Kleikamp * The buffer is already part of this transaction if b_transaction or 597470decc6SDave Kleikamp * b_next_transaction points to it 598470decc6SDave Kleikamp */ 599470decc6SDave Kleikamp if (jh->b_transaction == transaction || 600470decc6SDave Kleikamp jh->b_next_transaction == transaction) 601470decc6SDave Kleikamp goto done; 602470decc6SDave Kleikamp 603470decc6SDave Kleikamp /* 604470decc6SDave Kleikamp * If there is already a copy-out version of this buffer, then we don't 605470decc6SDave Kleikamp * need to make another one 606470decc6SDave Kleikamp */ 607470decc6SDave Kleikamp if (jh->b_frozen_data) { 608470decc6SDave Kleikamp JBUFFER_TRACE(jh, "has frozen data"); 609470decc6SDave Kleikamp J_ASSERT_JH(jh, jh->b_next_transaction == NULL); 610470decc6SDave Kleikamp jh->b_next_transaction = transaction; 611470decc6SDave Kleikamp goto done; 612470decc6SDave Kleikamp } 613470decc6SDave Kleikamp 614470decc6SDave Kleikamp /* Is there data here we need to preserve? */ 615470decc6SDave Kleikamp 616470decc6SDave Kleikamp if (jh->b_transaction && jh->b_transaction != transaction) { 617470decc6SDave Kleikamp JBUFFER_TRACE(jh, "owned by older transaction"); 618470decc6SDave Kleikamp J_ASSERT_JH(jh, jh->b_next_transaction == NULL); 619470decc6SDave Kleikamp J_ASSERT_JH(jh, jh->b_transaction == 620470decc6SDave Kleikamp journal->j_committing_transaction); 621470decc6SDave Kleikamp 622470decc6SDave Kleikamp /* There is one case we have to be very careful about. 623470decc6SDave Kleikamp * If the committing transaction is currently writing 624470decc6SDave Kleikamp * this buffer out to disk and has NOT made a copy-out, 625470decc6SDave Kleikamp * then we cannot modify the buffer contents at all 626470decc6SDave Kleikamp * right now. The essence of copy-out is that it is the 627470decc6SDave Kleikamp * extra copy, not the primary copy, which gets 628470decc6SDave Kleikamp * journaled. If the primary copy is already going to 629470decc6SDave Kleikamp * disk then we cannot do copy-out here. */ 630470decc6SDave Kleikamp 631470decc6SDave Kleikamp if (jh->b_jlist == BJ_Shadow) { 632470decc6SDave Kleikamp DEFINE_WAIT_BIT(wait, &bh->b_state, BH_Unshadow); 633470decc6SDave Kleikamp wait_queue_head_t *wqh; 634470decc6SDave Kleikamp 635470decc6SDave Kleikamp wqh = bit_waitqueue(&bh->b_state, BH_Unshadow); 636470decc6SDave Kleikamp 637470decc6SDave Kleikamp JBUFFER_TRACE(jh, "on shadow: sleep"); 638470decc6SDave Kleikamp jbd_unlock_bh_state(bh); 639470decc6SDave Kleikamp /* commit wakes up all shadow buffers after IO */ 640470decc6SDave Kleikamp for ( ; ; ) { 641470decc6SDave Kleikamp prepare_to_wait(wqh, &wait.wait, 642470decc6SDave Kleikamp TASK_UNINTERRUPTIBLE); 643470decc6SDave Kleikamp if (jh->b_jlist != BJ_Shadow) 644470decc6SDave Kleikamp break; 645470decc6SDave Kleikamp schedule(); 646470decc6SDave Kleikamp } 647470decc6SDave Kleikamp finish_wait(wqh, &wait.wait); 648470decc6SDave Kleikamp goto repeat; 649470decc6SDave Kleikamp } 650470decc6SDave Kleikamp 651470decc6SDave Kleikamp /* Only do the copy if the currently-owning transaction 652470decc6SDave Kleikamp * still needs it. If it is on the Forget list, the 653470decc6SDave Kleikamp * committing transaction is past that stage. The 654470decc6SDave Kleikamp * buffer had better remain locked during the kmalloc, 655470decc6SDave Kleikamp * but that should be true --- we hold the journal lock 656470decc6SDave Kleikamp * still and the buffer is already on the BUF_JOURNAL 657470decc6SDave Kleikamp * list so won't be flushed. 658470decc6SDave Kleikamp * 659470decc6SDave Kleikamp * Subtle point, though: if this is a get_undo_access, 660470decc6SDave Kleikamp * then we will be relying on the frozen_data to contain 661470decc6SDave Kleikamp * the new value of the committed_data record after the 662470decc6SDave Kleikamp * transaction, so we HAVE to force the frozen_data copy 663470decc6SDave Kleikamp * in that case. */ 664470decc6SDave Kleikamp 665470decc6SDave Kleikamp if (jh->b_jlist != BJ_Forget || force_copy) { 666470decc6SDave Kleikamp JBUFFER_TRACE(jh, "generate frozen data"); 667470decc6SDave Kleikamp if (!frozen_buffer) { 668470decc6SDave Kleikamp JBUFFER_TRACE(jh, "allocate memory for buffer"); 669470decc6SDave Kleikamp jbd_unlock_bh_state(bh); 670470decc6SDave Kleikamp frozen_buffer = 671af1e76d6SMingming Cao jbd2_alloc(jh2bh(jh)->b_size, 672470decc6SDave Kleikamp GFP_NOFS); 673470decc6SDave Kleikamp if (!frozen_buffer) { 674470decc6SDave Kleikamp printk(KERN_EMERG 675470decc6SDave Kleikamp "%s: OOM for frozen_buffer\n", 676470decc6SDave Kleikamp __FUNCTION__); 677470decc6SDave Kleikamp JBUFFER_TRACE(jh, "oom!"); 678470decc6SDave Kleikamp error = -ENOMEM; 679470decc6SDave Kleikamp jbd_lock_bh_state(bh); 680470decc6SDave Kleikamp goto done; 681470decc6SDave Kleikamp } 682470decc6SDave Kleikamp goto repeat; 683470decc6SDave Kleikamp } 684470decc6SDave Kleikamp jh->b_frozen_data = frozen_buffer; 685470decc6SDave Kleikamp frozen_buffer = NULL; 686470decc6SDave Kleikamp need_copy = 1; 687470decc6SDave Kleikamp } 688470decc6SDave Kleikamp jh->b_next_transaction = transaction; 689470decc6SDave Kleikamp } 690470decc6SDave Kleikamp 691470decc6SDave Kleikamp 692470decc6SDave Kleikamp /* 693470decc6SDave Kleikamp * Finally, if the buffer is not journaled right now, we need to make 694470decc6SDave Kleikamp * sure it doesn't get written to disk before the caller actually 695470decc6SDave Kleikamp * commits the new data 696470decc6SDave Kleikamp */ 697470decc6SDave Kleikamp if (!jh->b_transaction) { 698470decc6SDave Kleikamp JBUFFER_TRACE(jh, "no transaction"); 699470decc6SDave Kleikamp J_ASSERT_JH(jh, !jh->b_next_transaction); 700470decc6SDave Kleikamp jh->b_transaction = transaction; 701470decc6SDave Kleikamp JBUFFER_TRACE(jh, "file as BJ_Reserved"); 702470decc6SDave Kleikamp spin_lock(&journal->j_list_lock); 703f7f4bccbSMingming Cao __jbd2_journal_file_buffer(jh, transaction, BJ_Reserved); 704470decc6SDave Kleikamp spin_unlock(&journal->j_list_lock); 705470decc6SDave Kleikamp } 706470decc6SDave Kleikamp 707470decc6SDave Kleikamp done: 708470decc6SDave Kleikamp if (need_copy) { 709470decc6SDave Kleikamp struct page *page; 710470decc6SDave Kleikamp int offset; 711470decc6SDave Kleikamp char *source; 712470decc6SDave Kleikamp 713470decc6SDave Kleikamp J_EXPECT_JH(jh, buffer_uptodate(jh2bh(jh)), 714470decc6SDave Kleikamp "Possible IO failure.\n"); 715470decc6SDave Kleikamp page = jh2bh(jh)->b_page; 716470decc6SDave Kleikamp offset = ((unsigned long) jh2bh(jh)->b_data) & ~PAGE_MASK; 717470decc6SDave Kleikamp source = kmap_atomic(page, KM_USER0); 718470decc6SDave Kleikamp memcpy(jh->b_frozen_data, source+offset, jh2bh(jh)->b_size); 719470decc6SDave Kleikamp kunmap_atomic(source, KM_USER0); 720470decc6SDave Kleikamp } 721470decc6SDave Kleikamp jbd_unlock_bh_state(bh); 722470decc6SDave Kleikamp 723470decc6SDave Kleikamp /* 724470decc6SDave Kleikamp * If we are about to journal a buffer, then any revoke pending on it is 725470decc6SDave Kleikamp * no longer valid 726470decc6SDave Kleikamp */ 727f7f4bccbSMingming Cao jbd2_journal_cancel_revoke(handle, jh); 728470decc6SDave Kleikamp 729470decc6SDave Kleikamp out: 730470decc6SDave Kleikamp if (unlikely(frozen_buffer)) /* It's usually NULL */ 731af1e76d6SMingming Cao jbd2_free(frozen_buffer, bh->b_size); 732470decc6SDave Kleikamp 733470decc6SDave Kleikamp JBUFFER_TRACE(jh, "exit"); 734470decc6SDave Kleikamp return error; 735470decc6SDave Kleikamp } 736470decc6SDave Kleikamp 737470decc6SDave Kleikamp /** 738f7f4bccbSMingming Cao * int jbd2_journal_get_write_access() - notify intent to modify a buffer for metadata (not data) update. 739470decc6SDave Kleikamp * @handle: transaction to add buffer modifications to 740470decc6SDave Kleikamp * @bh: bh to be used for metadata writes 741470decc6SDave Kleikamp * @credits: variable that will receive credits for the buffer 742470decc6SDave Kleikamp * 743470decc6SDave Kleikamp * Returns an error code or 0 on success. 744470decc6SDave Kleikamp * 745470decc6SDave Kleikamp * In full data journalling mode the buffer may be of type BJ_AsyncData, 746470decc6SDave Kleikamp * because we're write()ing a buffer which is also part of a shared mapping. 747470decc6SDave Kleikamp */ 748470decc6SDave Kleikamp 749f7f4bccbSMingming Cao int jbd2_journal_get_write_access(handle_t *handle, struct buffer_head *bh) 750470decc6SDave Kleikamp { 751f7f4bccbSMingming Cao struct journal_head *jh = jbd2_journal_add_journal_head(bh); 752470decc6SDave Kleikamp int rc; 753470decc6SDave Kleikamp 754470decc6SDave Kleikamp /* We do not want to get caught playing with fields which the 755470decc6SDave Kleikamp * log thread also manipulates. Make sure that the buffer 756470decc6SDave Kleikamp * completes any outstanding IO before proceeding. */ 757470decc6SDave Kleikamp rc = do_get_write_access(handle, jh, 0); 758f7f4bccbSMingming Cao jbd2_journal_put_journal_head(jh); 759470decc6SDave Kleikamp return rc; 760470decc6SDave Kleikamp } 761470decc6SDave Kleikamp 762470decc6SDave Kleikamp 763470decc6SDave Kleikamp /* 764470decc6SDave Kleikamp * When the user wants to journal a newly created buffer_head 765470decc6SDave Kleikamp * (ie. getblk() returned a new buffer and we are going to populate it 766470decc6SDave Kleikamp * manually rather than reading off disk), then we need to keep the 767470decc6SDave Kleikamp * buffer_head locked until it has been completely filled with new 768470decc6SDave Kleikamp * data. In this case, we should be able to make the assertion that 769470decc6SDave Kleikamp * the bh is not already part of an existing transaction. 770470decc6SDave Kleikamp * 771470decc6SDave Kleikamp * The buffer should already be locked by the caller by this point. 772470decc6SDave Kleikamp * There is no lock ranking violation: it was a newly created, 773470decc6SDave Kleikamp * unlocked buffer beforehand. */ 774470decc6SDave Kleikamp 775470decc6SDave Kleikamp /** 776f7f4bccbSMingming Cao * int jbd2_journal_get_create_access () - notify intent to use newly created bh 777470decc6SDave Kleikamp * @handle: transaction to new buffer to 778470decc6SDave Kleikamp * @bh: new buffer. 779470decc6SDave Kleikamp * 780470decc6SDave Kleikamp * Call this if you create a new bh. 781470decc6SDave Kleikamp */ 782f7f4bccbSMingming Cao int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh) 783470decc6SDave Kleikamp { 784470decc6SDave Kleikamp transaction_t *transaction = handle->h_transaction; 785470decc6SDave Kleikamp journal_t *journal = transaction->t_journal; 786f7f4bccbSMingming Cao struct journal_head *jh = jbd2_journal_add_journal_head(bh); 787470decc6SDave Kleikamp int err; 788470decc6SDave Kleikamp 789470decc6SDave Kleikamp jbd_debug(5, "journal_head %p\n", jh); 790470decc6SDave Kleikamp err = -EROFS; 791470decc6SDave Kleikamp if (is_handle_aborted(handle)) 792470decc6SDave Kleikamp goto out; 793470decc6SDave Kleikamp err = 0; 794470decc6SDave Kleikamp 795470decc6SDave Kleikamp JBUFFER_TRACE(jh, "entry"); 796470decc6SDave Kleikamp /* 797470decc6SDave Kleikamp * The buffer may already belong to this transaction due to pre-zeroing 798470decc6SDave Kleikamp * in the filesystem's new_block code. It may also be on the previous, 799470decc6SDave Kleikamp * committing transaction's lists, but it HAS to be in Forget state in 800470decc6SDave Kleikamp * that case: the transaction must have deleted the buffer for it to be 801470decc6SDave Kleikamp * reused here. 802470decc6SDave Kleikamp */ 803470decc6SDave Kleikamp jbd_lock_bh_state(bh); 804470decc6SDave Kleikamp spin_lock(&journal->j_list_lock); 805470decc6SDave Kleikamp J_ASSERT_JH(jh, (jh->b_transaction == transaction || 806470decc6SDave Kleikamp jh->b_transaction == NULL || 807470decc6SDave Kleikamp (jh->b_transaction == journal->j_committing_transaction && 808470decc6SDave Kleikamp jh->b_jlist == BJ_Forget))); 809470decc6SDave Kleikamp 810470decc6SDave Kleikamp J_ASSERT_JH(jh, jh->b_next_transaction == NULL); 811470decc6SDave Kleikamp J_ASSERT_JH(jh, buffer_locked(jh2bh(jh))); 812470decc6SDave Kleikamp 813470decc6SDave Kleikamp if (jh->b_transaction == NULL) { 814470decc6SDave Kleikamp jh->b_transaction = transaction; 815470decc6SDave Kleikamp JBUFFER_TRACE(jh, "file as BJ_Reserved"); 816f7f4bccbSMingming Cao __jbd2_journal_file_buffer(jh, transaction, BJ_Reserved); 817470decc6SDave Kleikamp } else if (jh->b_transaction == journal->j_committing_transaction) { 818470decc6SDave Kleikamp JBUFFER_TRACE(jh, "set next transaction"); 819470decc6SDave Kleikamp jh->b_next_transaction = transaction; 820470decc6SDave Kleikamp } 821470decc6SDave Kleikamp spin_unlock(&journal->j_list_lock); 822470decc6SDave Kleikamp jbd_unlock_bh_state(bh); 823470decc6SDave Kleikamp 824470decc6SDave Kleikamp /* 825470decc6SDave Kleikamp * akpm: I added this. ext3_alloc_branch can pick up new indirect 826470decc6SDave Kleikamp * blocks which contain freed but then revoked metadata. We need 827470decc6SDave Kleikamp * to cancel the revoke in case we end up freeing it yet again 828470decc6SDave Kleikamp * and the reallocating as data - this would cause a second revoke, 829470decc6SDave Kleikamp * which hits an assertion error. 830470decc6SDave Kleikamp */ 831470decc6SDave Kleikamp JBUFFER_TRACE(jh, "cancelling revoke"); 832f7f4bccbSMingming Cao jbd2_journal_cancel_revoke(handle, jh); 833f7f4bccbSMingming Cao jbd2_journal_put_journal_head(jh); 834470decc6SDave Kleikamp out: 835470decc6SDave Kleikamp return err; 836470decc6SDave Kleikamp } 837470decc6SDave Kleikamp 838470decc6SDave Kleikamp /** 839f7f4bccbSMingming Cao * int jbd2_journal_get_undo_access() - Notify intent to modify metadata with 840470decc6SDave Kleikamp * non-rewindable consequences 841470decc6SDave Kleikamp * @handle: transaction 842470decc6SDave Kleikamp * @bh: buffer to undo 843470decc6SDave Kleikamp * @credits: store the number of taken credits here (if not NULL) 844470decc6SDave Kleikamp * 845470decc6SDave Kleikamp * Sometimes there is a need to distinguish between metadata which has 846470decc6SDave Kleikamp * been committed to disk and that which has not. The ext3fs code uses 847470decc6SDave Kleikamp * this for freeing and allocating space, we have to make sure that we 848470decc6SDave Kleikamp * do not reuse freed space until the deallocation has been committed, 849470decc6SDave Kleikamp * since if we overwrote that space we would make the delete 850470decc6SDave Kleikamp * un-rewindable in case of a crash. 851470decc6SDave Kleikamp * 852f7f4bccbSMingming Cao * To deal with that, jbd2_journal_get_undo_access requests write access to a 853470decc6SDave Kleikamp * buffer for parts of non-rewindable operations such as delete 854470decc6SDave Kleikamp * operations on the bitmaps. The journaling code must keep a copy of 855470decc6SDave Kleikamp * the buffer's contents prior to the undo_access call until such time 856470decc6SDave Kleikamp * as we know that the buffer has definitely been committed to disk. 857470decc6SDave Kleikamp * 858470decc6SDave Kleikamp * We never need to know which transaction the committed data is part 859470decc6SDave Kleikamp * of, buffers touched here are guaranteed to be dirtied later and so 860470decc6SDave Kleikamp * will be committed to a new transaction in due course, at which point 861470decc6SDave Kleikamp * we can discard the old committed data pointer. 862470decc6SDave Kleikamp * 863470decc6SDave Kleikamp * Returns error number or 0 on success. 864470decc6SDave Kleikamp */ 865f7f4bccbSMingming Cao int jbd2_journal_get_undo_access(handle_t *handle, struct buffer_head *bh) 866470decc6SDave Kleikamp { 867470decc6SDave Kleikamp int err; 868f7f4bccbSMingming Cao struct journal_head *jh = jbd2_journal_add_journal_head(bh); 869470decc6SDave Kleikamp char *committed_data = NULL; 870470decc6SDave Kleikamp 871470decc6SDave Kleikamp JBUFFER_TRACE(jh, "entry"); 872470decc6SDave Kleikamp 873470decc6SDave Kleikamp /* 874470decc6SDave Kleikamp * Do this first --- it can drop the journal lock, so we want to 875470decc6SDave Kleikamp * make sure that obtaining the committed_data is done 876470decc6SDave Kleikamp * atomically wrt. completion of any outstanding commits. 877470decc6SDave Kleikamp */ 878470decc6SDave Kleikamp err = do_get_write_access(handle, jh, 1); 879470decc6SDave Kleikamp if (err) 880470decc6SDave Kleikamp goto out; 881470decc6SDave Kleikamp 882470decc6SDave Kleikamp repeat: 883470decc6SDave Kleikamp if (!jh->b_committed_data) { 884af1e76d6SMingming Cao committed_data = jbd2_alloc(jh2bh(jh)->b_size, GFP_NOFS); 885470decc6SDave Kleikamp if (!committed_data) { 886470decc6SDave Kleikamp printk(KERN_EMERG "%s: No memory for committed data\n", 887470decc6SDave Kleikamp __FUNCTION__); 888470decc6SDave Kleikamp err = -ENOMEM; 889470decc6SDave Kleikamp goto out; 890470decc6SDave Kleikamp } 891470decc6SDave Kleikamp } 892470decc6SDave Kleikamp 893470decc6SDave Kleikamp jbd_lock_bh_state(bh); 894470decc6SDave Kleikamp if (!jh->b_committed_data) { 895470decc6SDave Kleikamp /* Copy out the current buffer contents into the 896470decc6SDave Kleikamp * preserved, committed copy. */ 897470decc6SDave Kleikamp JBUFFER_TRACE(jh, "generate b_committed data"); 898470decc6SDave Kleikamp if (!committed_data) { 899470decc6SDave Kleikamp jbd_unlock_bh_state(bh); 900470decc6SDave Kleikamp goto repeat; 901470decc6SDave Kleikamp } 902470decc6SDave Kleikamp 903470decc6SDave Kleikamp jh->b_committed_data = committed_data; 904470decc6SDave Kleikamp committed_data = NULL; 905470decc6SDave Kleikamp memcpy(jh->b_committed_data, bh->b_data, bh->b_size); 906470decc6SDave Kleikamp } 907470decc6SDave Kleikamp jbd_unlock_bh_state(bh); 908470decc6SDave Kleikamp out: 909f7f4bccbSMingming Cao jbd2_journal_put_journal_head(jh); 910470decc6SDave Kleikamp if (unlikely(committed_data)) 911af1e76d6SMingming Cao jbd2_free(committed_data, bh->b_size); 912470decc6SDave Kleikamp return err; 913470decc6SDave Kleikamp } 914470decc6SDave Kleikamp 915470decc6SDave Kleikamp /** 916f7f4bccbSMingming Cao * int jbd2_journal_dirty_data() - mark a buffer as containing dirty data which 917470decc6SDave Kleikamp * needs to be flushed before we can commit the 918470decc6SDave Kleikamp * current transaction. 919470decc6SDave Kleikamp * @handle: transaction 920470decc6SDave Kleikamp * @bh: bufferhead to mark 921470decc6SDave Kleikamp * 922470decc6SDave Kleikamp * The buffer is placed on the transaction's data list and is marked as 923470decc6SDave Kleikamp * belonging to the transaction. 924470decc6SDave Kleikamp * 925470decc6SDave Kleikamp * Returns error number or 0 on success. 926470decc6SDave Kleikamp * 927f7f4bccbSMingming Cao * jbd2_journal_dirty_data() can be called via page_launder->ext3_writepage 928470decc6SDave Kleikamp * by kswapd. 929470decc6SDave Kleikamp */ 930f7f4bccbSMingming Cao int jbd2_journal_dirty_data(handle_t *handle, struct buffer_head *bh) 931470decc6SDave Kleikamp { 932470decc6SDave Kleikamp journal_t *journal = handle->h_transaction->t_journal; 933470decc6SDave Kleikamp int need_brelse = 0; 934470decc6SDave Kleikamp struct journal_head *jh; 935470decc6SDave Kleikamp 936470decc6SDave Kleikamp if (is_handle_aborted(handle)) 937470decc6SDave Kleikamp return 0; 938470decc6SDave Kleikamp 939f7f4bccbSMingming Cao jh = jbd2_journal_add_journal_head(bh); 940470decc6SDave Kleikamp JBUFFER_TRACE(jh, "entry"); 941470decc6SDave Kleikamp 942470decc6SDave Kleikamp /* 943470decc6SDave Kleikamp * The buffer could *already* be dirty. Writeout can start 944470decc6SDave Kleikamp * at any time. 945470decc6SDave Kleikamp */ 946470decc6SDave Kleikamp jbd_debug(4, "jh: %p, tid:%d\n", jh, handle->h_transaction->t_tid); 947470decc6SDave Kleikamp 948470decc6SDave Kleikamp /* 949470decc6SDave Kleikamp * What if the buffer is already part of a running transaction? 950470decc6SDave Kleikamp * 951470decc6SDave Kleikamp * There are two cases: 952470decc6SDave Kleikamp * 1) It is part of the current running transaction. Refile it, 953470decc6SDave Kleikamp * just in case we have allocated it as metadata, deallocated 954470decc6SDave Kleikamp * it, then reallocated it as data. 955470decc6SDave Kleikamp * 2) It is part of the previous, still-committing transaction. 956470decc6SDave Kleikamp * If all we want to do is to guarantee that the buffer will be 957470decc6SDave Kleikamp * written to disk before this new transaction commits, then 958470decc6SDave Kleikamp * being sure that the *previous* transaction has this same 959470decc6SDave Kleikamp * property is sufficient for us! Just leave it on its old 960470decc6SDave Kleikamp * transaction. 961470decc6SDave Kleikamp * 962470decc6SDave Kleikamp * In case (2), the buffer must not already exist as metadata 963470decc6SDave Kleikamp * --- that would violate write ordering (a transaction is free 964470decc6SDave Kleikamp * to write its data at any point, even before the previous 965470decc6SDave Kleikamp * committing transaction has committed). The caller must 966470decc6SDave Kleikamp * never, ever allow this to happen: there's nothing we can do 967470decc6SDave Kleikamp * about it in this layer. 968470decc6SDave Kleikamp */ 969470decc6SDave Kleikamp jbd_lock_bh_state(bh); 970470decc6SDave Kleikamp spin_lock(&journal->j_list_lock); 9719b57988dSEric Sandeen 9729b57988dSEric Sandeen /* Now that we have bh_state locked, are we really still mapped? */ 9739b57988dSEric Sandeen if (!buffer_mapped(bh)) { 9749b57988dSEric Sandeen JBUFFER_TRACE(jh, "unmapped buffer, bailing out"); 9759b57988dSEric Sandeen goto no_journal; 9769b57988dSEric Sandeen } 9779b57988dSEric Sandeen 978470decc6SDave Kleikamp if (jh->b_transaction) { 979470decc6SDave Kleikamp JBUFFER_TRACE(jh, "has transaction"); 980470decc6SDave Kleikamp if (jh->b_transaction != handle->h_transaction) { 981470decc6SDave Kleikamp JBUFFER_TRACE(jh, "belongs to older transaction"); 982470decc6SDave Kleikamp J_ASSERT_JH(jh, jh->b_transaction == 983470decc6SDave Kleikamp journal->j_committing_transaction); 984470decc6SDave Kleikamp 985470decc6SDave Kleikamp /* @@@ IS THIS TRUE ? */ 986470decc6SDave Kleikamp /* 987470decc6SDave Kleikamp * Not any more. Scenario: someone does a write() 988470decc6SDave Kleikamp * in data=journal mode. The buffer's transaction has 989470decc6SDave Kleikamp * moved into commit. Then someone does another 990470decc6SDave Kleikamp * write() to the file. We do the frozen data copyout 991470decc6SDave Kleikamp * and set b_next_transaction to point to j_running_t. 992470decc6SDave Kleikamp * And while we're in that state, someone does a 993470decc6SDave Kleikamp * writepage() in an attempt to pageout the same area 994470decc6SDave Kleikamp * of the file via a shared mapping. At present that 995f7f4bccbSMingming Cao * calls jbd2_journal_dirty_data(), and we get right here. 996470decc6SDave Kleikamp * It may be too late to journal the data. Simply 997470decc6SDave Kleikamp * falling through to the next test will suffice: the 998470decc6SDave Kleikamp * data will be dirty and wil be checkpointed. The 999470decc6SDave Kleikamp * ordering comments in the next comment block still 1000470decc6SDave Kleikamp * apply. 1001470decc6SDave Kleikamp */ 1002470decc6SDave Kleikamp //J_ASSERT_JH(jh, jh->b_next_transaction == NULL); 1003470decc6SDave Kleikamp 1004470decc6SDave Kleikamp /* 1005470decc6SDave Kleikamp * If we're journalling data, and this buffer was 1006470decc6SDave Kleikamp * subject to a write(), it could be metadata, forget 1007470decc6SDave Kleikamp * or shadow against the committing transaction. Now, 1008470decc6SDave Kleikamp * someone has dirtied the same darn page via a mapping 1009470decc6SDave Kleikamp * and it is being writepage()'d. 1010470decc6SDave Kleikamp * We *could* just steal the page from commit, with some 1011470decc6SDave Kleikamp * fancy locking there. Instead, we just skip it - 1012470decc6SDave Kleikamp * don't tie the page's buffers to the new transaction 1013470decc6SDave Kleikamp * at all. 1014470decc6SDave Kleikamp * Implication: if we crash before the writepage() data 1015470decc6SDave Kleikamp * is written into the filesystem, recovery will replay 1016470decc6SDave Kleikamp * the write() data. 1017470decc6SDave Kleikamp */ 1018470decc6SDave Kleikamp if (jh->b_jlist != BJ_None && 1019470decc6SDave Kleikamp jh->b_jlist != BJ_SyncData && 1020470decc6SDave Kleikamp jh->b_jlist != BJ_Locked) { 1021470decc6SDave Kleikamp JBUFFER_TRACE(jh, "Not stealing"); 1022470decc6SDave Kleikamp goto no_journal; 1023470decc6SDave Kleikamp } 1024470decc6SDave Kleikamp 1025470decc6SDave Kleikamp /* 1026470decc6SDave Kleikamp * This buffer may be undergoing writeout in commit. We 1027470decc6SDave Kleikamp * can't return from here and let the caller dirty it 1028470decc6SDave Kleikamp * again because that can cause the write-out loop in 1029470decc6SDave Kleikamp * commit to never terminate. 1030470decc6SDave Kleikamp */ 1031470decc6SDave Kleikamp if (buffer_dirty(bh)) { 1032470decc6SDave Kleikamp get_bh(bh); 1033470decc6SDave Kleikamp spin_unlock(&journal->j_list_lock); 1034470decc6SDave Kleikamp jbd_unlock_bh_state(bh); 1035470decc6SDave Kleikamp need_brelse = 1; 1036470decc6SDave Kleikamp sync_dirty_buffer(bh); 1037470decc6SDave Kleikamp jbd_lock_bh_state(bh); 1038470decc6SDave Kleikamp spin_lock(&journal->j_list_lock); 10399b57988dSEric Sandeen /* Since we dropped the lock... */ 10409b57988dSEric Sandeen if (!buffer_mapped(bh)) { 10419b57988dSEric Sandeen JBUFFER_TRACE(jh, "buffer got unmapped"); 10429b57988dSEric Sandeen goto no_journal; 10439b57988dSEric Sandeen } 1044470decc6SDave Kleikamp /* The buffer may become locked again at any 1045470decc6SDave Kleikamp time if it is redirtied */ 1046470decc6SDave Kleikamp } 1047470decc6SDave Kleikamp 1048470decc6SDave Kleikamp /* journal_clean_data_list() may have got there first */ 1049470decc6SDave Kleikamp if (jh->b_transaction != NULL) { 1050470decc6SDave Kleikamp JBUFFER_TRACE(jh, "unfile from commit"); 1051f7f4bccbSMingming Cao __jbd2_journal_temp_unlink_buffer(jh); 1052470decc6SDave Kleikamp /* It still points to the committing 1053470decc6SDave Kleikamp * transaction; move it to this one so 1054470decc6SDave Kleikamp * that the refile assert checks are 1055470decc6SDave Kleikamp * happy. */ 1056470decc6SDave Kleikamp jh->b_transaction = handle->h_transaction; 1057470decc6SDave Kleikamp } 1058470decc6SDave Kleikamp /* The buffer will be refiled below */ 1059470decc6SDave Kleikamp 1060470decc6SDave Kleikamp } 1061470decc6SDave Kleikamp /* 1062470decc6SDave Kleikamp * Special case --- the buffer might actually have been 1063470decc6SDave Kleikamp * allocated and then immediately deallocated in the previous, 1064470decc6SDave Kleikamp * committing transaction, so might still be left on that 1065470decc6SDave Kleikamp * transaction's metadata lists. 1066470decc6SDave Kleikamp */ 1067470decc6SDave Kleikamp if (jh->b_jlist != BJ_SyncData && jh->b_jlist != BJ_Locked) { 1068470decc6SDave Kleikamp JBUFFER_TRACE(jh, "not on correct data list: unfile"); 1069470decc6SDave Kleikamp J_ASSERT_JH(jh, jh->b_jlist != BJ_Shadow); 1070f7f4bccbSMingming Cao __jbd2_journal_temp_unlink_buffer(jh); 1071470decc6SDave Kleikamp jh->b_transaction = handle->h_transaction; 1072470decc6SDave Kleikamp JBUFFER_TRACE(jh, "file as data"); 1073f7f4bccbSMingming Cao __jbd2_journal_file_buffer(jh, handle->h_transaction, 1074470decc6SDave Kleikamp BJ_SyncData); 1075470decc6SDave Kleikamp } 1076470decc6SDave Kleikamp } else { 1077470decc6SDave Kleikamp JBUFFER_TRACE(jh, "not on a transaction"); 1078f7f4bccbSMingming Cao __jbd2_journal_file_buffer(jh, handle->h_transaction, BJ_SyncData); 1079470decc6SDave Kleikamp } 1080470decc6SDave Kleikamp no_journal: 1081470decc6SDave Kleikamp spin_unlock(&journal->j_list_lock); 1082470decc6SDave Kleikamp jbd_unlock_bh_state(bh); 1083470decc6SDave Kleikamp if (need_brelse) { 1084470decc6SDave Kleikamp BUFFER_TRACE(bh, "brelse"); 1085470decc6SDave Kleikamp __brelse(bh); 1086470decc6SDave Kleikamp } 1087470decc6SDave Kleikamp JBUFFER_TRACE(jh, "exit"); 1088f7f4bccbSMingming Cao jbd2_journal_put_journal_head(jh); 1089470decc6SDave Kleikamp return 0; 1090470decc6SDave Kleikamp } 1091470decc6SDave Kleikamp 1092470decc6SDave Kleikamp /** 1093f7f4bccbSMingming Cao * int jbd2_journal_dirty_metadata() - mark a buffer as containing dirty metadata 1094470decc6SDave Kleikamp * @handle: transaction to add buffer to. 1095470decc6SDave Kleikamp * @bh: buffer to mark 1096470decc6SDave Kleikamp * 1097470decc6SDave Kleikamp * mark dirty metadata which needs to be journaled as part of the current 1098470decc6SDave Kleikamp * transaction. 1099470decc6SDave Kleikamp * 1100470decc6SDave Kleikamp * The buffer is placed on the transaction's metadata list and is marked 1101470decc6SDave Kleikamp * as belonging to the transaction. 1102470decc6SDave Kleikamp * 1103470decc6SDave Kleikamp * Returns error number or 0 on success. 1104470decc6SDave Kleikamp * 1105470decc6SDave Kleikamp * Special care needs to be taken if the buffer already belongs to the 1106470decc6SDave Kleikamp * current committing transaction (in which case we should have frozen 1107470decc6SDave Kleikamp * data present for that commit). In that case, we don't relink the 1108470decc6SDave Kleikamp * buffer: that only gets done when the old transaction finally 1109470decc6SDave Kleikamp * completes its commit. 1110470decc6SDave Kleikamp */ 1111f7f4bccbSMingming Cao int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh) 1112470decc6SDave Kleikamp { 1113470decc6SDave Kleikamp transaction_t *transaction = handle->h_transaction; 1114470decc6SDave Kleikamp journal_t *journal = transaction->t_journal; 1115470decc6SDave Kleikamp struct journal_head *jh = bh2jh(bh); 1116470decc6SDave Kleikamp 1117470decc6SDave Kleikamp jbd_debug(5, "journal_head %p\n", jh); 1118470decc6SDave Kleikamp JBUFFER_TRACE(jh, "entry"); 1119470decc6SDave Kleikamp if (is_handle_aborted(handle)) 1120470decc6SDave Kleikamp goto out; 1121470decc6SDave Kleikamp 1122470decc6SDave Kleikamp jbd_lock_bh_state(bh); 1123470decc6SDave Kleikamp 1124470decc6SDave Kleikamp if (jh->b_modified == 0) { 1125470decc6SDave Kleikamp /* 1126470decc6SDave Kleikamp * This buffer's got modified and becoming part 1127470decc6SDave Kleikamp * of the transaction. This needs to be done 1128470decc6SDave Kleikamp * once a transaction -bzzz 1129470decc6SDave Kleikamp */ 1130470decc6SDave Kleikamp jh->b_modified = 1; 1131470decc6SDave Kleikamp J_ASSERT_JH(jh, handle->h_buffer_credits > 0); 1132470decc6SDave Kleikamp handle->h_buffer_credits--; 1133470decc6SDave Kleikamp } 1134470decc6SDave Kleikamp 1135470decc6SDave Kleikamp /* 1136470decc6SDave Kleikamp * fastpath, to avoid expensive locking. If this buffer is already 1137470decc6SDave Kleikamp * on the running transaction's metadata list there is nothing to do. 1138470decc6SDave Kleikamp * Nobody can take it off again because there is a handle open. 1139470decc6SDave Kleikamp * I _think_ we're OK here with SMP barriers - a mistaken decision will 1140470decc6SDave Kleikamp * result in this test being false, so we go in and take the locks. 1141470decc6SDave Kleikamp */ 1142470decc6SDave Kleikamp if (jh->b_transaction == transaction && jh->b_jlist == BJ_Metadata) { 1143470decc6SDave Kleikamp JBUFFER_TRACE(jh, "fastpath"); 1144470decc6SDave Kleikamp J_ASSERT_JH(jh, jh->b_transaction == 1145470decc6SDave Kleikamp journal->j_running_transaction); 1146470decc6SDave Kleikamp goto out_unlock_bh; 1147470decc6SDave Kleikamp } 1148470decc6SDave Kleikamp 1149470decc6SDave Kleikamp set_buffer_jbddirty(bh); 1150470decc6SDave Kleikamp 1151470decc6SDave Kleikamp /* 1152470decc6SDave Kleikamp * Metadata already on the current transaction list doesn't 1153470decc6SDave Kleikamp * need to be filed. Metadata on another transaction's list must 1154470decc6SDave Kleikamp * be committing, and will be refiled once the commit completes: 1155470decc6SDave Kleikamp * leave it alone for now. 1156470decc6SDave Kleikamp */ 1157470decc6SDave Kleikamp if (jh->b_transaction != transaction) { 1158470decc6SDave Kleikamp JBUFFER_TRACE(jh, "already on other transaction"); 1159470decc6SDave Kleikamp J_ASSERT_JH(jh, jh->b_transaction == 1160470decc6SDave Kleikamp journal->j_committing_transaction); 1161470decc6SDave Kleikamp J_ASSERT_JH(jh, jh->b_next_transaction == transaction); 1162470decc6SDave Kleikamp /* And this case is illegal: we can't reuse another 1163470decc6SDave Kleikamp * transaction's data buffer, ever. */ 1164470decc6SDave Kleikamp goto out_unlock_bh; 1165470decc6SDave Kleikamp } 1166470decc6SDave Kleikamp 1167470decc6SDave Kleikamp /* That test should have eliminated the following case: */ 1168470decc6SDave Kleikamp J_ASSERT_JH(jh, jh->b_frozen_data == 0); 1169470decc6SDave Kleikamp 1170470decc6SDave Kleikamp JBUFFER_TRACE(jh, "file as BJ_Metadata"); 1171470decc6SDave Kleikamp spin_lock(&journal->j_list_lock); 1172f7f4bccbSMingming Cao __jbd2_journal_file_buffer(jh, handle->h_transaction, BJ_Metadata); 1173470decc6SDave Kleikamp spin_unlock(&journal->j_list_lock); 1174470decc6SDave Kleikamp out_unlock_bh: 1175470decc6SDave Kleikamp jbd_unlock_bh_state(bh); 1176470decc6SDave Kleikamp out: 1177470decc6SDave Kleikamp JBUFFER_TRACE(jh, "exit"); 1178470decc6SDave Kleikamp return 0; 1179470decc6SDave Kleikamp } 1180470decc6SDave Kleikamp 1181470decc6SDave Kleikamp /* 1182f7f4bccbSMingming Cao * jbd2_journal_release_buffer: undo a get_write_access without any buffer 1183470decc6SDave Kleikamp * updates, if the update decided in the end that it didn't need access. 1184470decc6SDave Kleikamp * 1185470decc6SDave Kleikamp */ 1186470decc6SDave Kleikamp void 1187f7f4bccbSMingming Cao jbd2_journal_release_buffer(handle_t *handle, struct buffer_head *bh) 1188470decc6SDave Kleikamp { 1189470decc6SDave Kleikamp BUFFER_TRACE(bh, "entry"); 1190470decc6SDave Kleikamp } 1191470decc6SDave Kleikamp 1192470decc6SDave Kleikamp /** 1193f7f4bccbSMingming Cao * void jbd2_journal_forget() - bforget() for potentially-journaled buffers. 1194470decc6SDave Kleikamp * @handle: transaction handle 1195470decc6SDave Kleikamp * @bh: bh to 'forget' 1196470decc6SDave Kleikamp * 1197470decc6SDave Kleikamp * We can only do the bforget if there are no commits pending against the 1198470decc6SDave Kleikamp * buffer. If the buffer is dirty in the current running transaction we 1199470decc6SDave Kleikamp * can safely unlink it. 1200470decc6SDave Kleikamp * 1201470decc6SDave Kleikamp * bh may not be a journalled buffer at all - it may be a non-JBD 1202470decc6SDave Kleikamp * buffer which came off the hashtable. Check for this. 1203470decc6SDave Kleikamp * 1204470decc6SDave Kleikamp * Decrements bh->b_count by one. 1205470decc6SDave Kleikamp * 1206470decc6SDave Kleikamp * Allow this call even if the handle has aborted --- it may be part of 1207470decc6SDave Kleikamp * the caller's cleanup after an abort. 1208470decc6SDave Kleikamp */ 1209f7f4bccbSMingming Cao int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh) 1210470decc6SDave Kleikamp { 1211470decc6SDave Kleikamp transaction_t *transaction = handle->h_transaction; 1212470decc6SDave Kleikamp journal_t *journal = transaction->t_journal; 1213470decc6SDave Kleikamp struct journal_head *jh; 1214470decc6SDave Kleikamp int drop_reserve = 0; 1215470decc6SDave Kleikamp int err = 0; 1216470decc6SDave Kleikamp 1217470decc6SDave Kleikamp BUFFER_TRACE(bh, "entry"); 1218470decc6SDave Kleikamp 1219470decc6SDave Kleikamp jbd_lock_bh_state(bh); 1220470decc6SDave Kleikamp spin_lock(&journal->j_list_lock); 1221470decc6SDave Kleikamp 1222470decc6SDave Kleikamp if (!buffer_jbd(bh)) 1223470decc6SDave Kleikamp goto not_jbd; 1224470decc6SDave Kleikamp jh = bh2jh(bh); 1225470decc6SDave Kleikamp 1226470decc6SDave Kleikamp /* Critical error: attempting to delete a bitmap buffer, maybe? 1227470decc6SDave Kleikamp * Don't do any jbd operations, and return an error. */ 1228470decc6SDave Kleikamp if (!J_EXPECT_JH(jh, !jh->b_committed_data, 1229470decc6SDave Kleikamp "inconsistent data on disk")) { 1230470decc6SDave Kleikamp err = -EIO; 1231470decc6SDave Kleikamp goto not_jbd; 1232470decc6SDave Kleikamp } 1233470decc6SDave Kleikamp 1234470decc6SDave Kleikamp /* 1235470decc6SDave Kleikamp * The buffer's going from the transaction, we must drop 1236470decc6SDave Kleikamp * all references -bzzz 1237470decc6SDave Kleikamp */ 1238470decc6SDave Kleikamp jh->b_modified = 0; 1239470decc6SDave Kleikamp 1240470decc6SDave Kleikamp if (jh->b_transaction == handle->h_transaction) { 1241470decc6SDave Kleikamp J_ASSERT_JH(jh, !jh->b_frozen_data); 1242470decc6SDave Kleikamp 1243470decc6SDave Kleikamp /* If we are forgetting a buffer which is already part 1244470decc6SDave Kleikamp * of this transaction, then we can just drop it from 1245470decc6SDave Kleikamp * the transaction immediately. */ 1246470decc6SDave Kleikamp clear_buffer_dirty(bh); 1247470decc6SDave Kleikamp clear_buffer_jbddirty(bh); 1248470decc6SDave Kleikamp 1249470decc6SDave Kleikamp JBUFFER_TRACE(jh, "belongs to current transaction: unfile"); 1250470decc6SDave Kleikamp 1251470decc6SDave Kleikamp drop_reserve = 1; 1252470decc6SDave Kleikamp 1253470decc6SDave Kleikamp /* 1254470decc6SDave Kleikamp * We are no longer going to journal this buffer. 1255470decc6SDave Kleikamp * However, the commit of this transaction is still 1256470decc6SDave Kleikamp * important to the buffer: the delete that we are now 1257470decc6SDave Kleikamp * processing might obsolete an old log entry, so by 1258470decc6SDave Kleikamp * committing, we can satisfy the buffer's checkpoint. 1259470decc6SDave Kleikamp * 1260470decc6SDave Kleikamp * So, if we have a checkpoint on the buffer, we should 1261470decc6SDave Kleikamp * now refile the buffer on our BJ_Forget list so that 1262470decc6SDave Kleikamp * we know to remove the checkpoint after we commit. 1263470decc6SDave Kleikamp */ 1264470decc6SDave Kleikamp 1265470decc6SDave Kleikamp if (jh->b_cp_transaction) { 1266f7f4bccbSMingming Cao __jbd2_journal_temp_unlink_buffer(jh); 1267f7f4bccbSMingming Cao __jbd2_journal_file_buffer(jh, transaction, BJ_Forget); 1268470decc6SDave Kleikamp } else { 1269f7f4bccbSMingming Cao __jbd2_journal_unfile_buffer(jh); 1270f7f4bccbSMingming Cao jbd2_journal_remove_journal_head(bh); 1271470decc6SDave Kleikamp __brelse(bh); 1272470decc6SDave Kleikamp if (!buffer_jbd(bh)) { 1273470decc6SDave Kleikamp spin_unlock(&journal->j_list_lock); 1274470decc6SDave Kleikamp jbd_unlock_bh_state(bh); 1275470decc6SDave Kleikamp __bforget(bh); 1276470decc6SDave Kleikamp goto drop; 1277470decc6SDave Kleikamp } 1278470decc6SDave Kleikamp } 1279470decc6SDave Kleikamp } else if (jh->b_transaction) { 1280470decc6SDave Kleikamp J_ASSERT_JH(jh, (jh->b_transaction == 1281470decc6SDave Kleikamp journal->j_committing_transaction)); 1282470decc6SDave Kleikamp /* However, if the buffer is still owned by a prior 1283470decc6SDave Kleikamp * (committing) transaction, we can't drop it yet... */ 1284470decc6SDave Kleikamp JBUFFER_TRACE(jh, "belongs to older transaction"); 1285470decc6SDave Kleikamp /* ... but we CAN drop it from the new transaction if we 1286470decc6SDave Kleikamp * have also modified it since the original commit. */ 1287470decc6SDave Kleikamp 1288470decc6SDave Kleikamp if (jh->b_next_transaction) { 1289470decc6SDave Kleikamp J_ASSERT(jh->b_next_transaction == transaction); 1290470decc6SDave Kleikamp jh->b_next_transaction = NULL; 1291470decc6SDave Kleikamp drop_reserve = 1; 1292470decc6SDave Kleikamp } 1293470decc6SDave Kleikamp } 1294470decc6SDave Kleikamp 1295470decc6SDave Kleikamp not_jbd: 1296470decc6SDave Kleikamp spin_unlock(&journal->j_list_lock); 1297470decc6SDave Kleikamp jbd_unlock_bh_state(bh); 1298470decc6SDave Kleikamp __brelse(bh); 1299470decc6SDave Kleikamp drop: 1300470decc6SDave Kleikamp if (drop_reserve) { 1301470decc6SDave Kleikamp /* no need to reserve log space for this block -bzzz */ 1302470decc6SDave Kleikamp handle->h_buffer_credits++; 1303470decc6SDave Kleikamp } 1304470decc6SDave Kleikamp return err; 1305470decc6SDave Kleikamp } 1306470decc6SDave Kleikamp 1307470decc6SDave Kleikamp /** 1308f7f4bccbSMingming Cao * int jbd2_journal_stop() - complete a transaction 1309470decc6SDave Kleikamp * @handle: tranaction to complete. 1310470decc6SDave Kleikamp * 1311470decc6SDave Kleikamp * All done for a particular handle. 1312470decc6SDave Kleikamp * 1313470decc6SDave Kleikamp * There is not much action needed here. We just return any remaining 1314470decc6SDave Kleikamp * buffer credits to the transaction and remove the handle. The only 1315470decc6SDave Kleikamp * complication is that we need to start a commit operation if the 1316470decc6SDave Kleikamp * filesystem is marked for synchronous update. 1317470decc6SDave Kleikamp * 1318f7f4bccbSMingming Cao * jbd2_journal_stop itself will not usually return an error, but it may 1319470decc6SDave Kleikamp * do so in unusual circumstances. In particular, expect it to 1320f7f4bccbSMingming Cao * return -EIO if a jbd2_journal_abort has been executed since the 1321470decc6SDave Kleikamp * transaction began. 1322470decc6SDave Kleikamp */ 1323f7f4bccbSMingming Cao int jbd2_journal_stop(handle_t *handle) 1324470decc6SDave Kleikamp { 1325470decc6SDave Kleikamp transaction_t *transaction = handle->h_transaction; 1326470decc6SDave Kleikamp journal_t *journal = transaction->t_journal; 1327470decc6SDave Kleikamp int old_handle_count, err; 1328470decc6SDave Kleikamp pid_t pid; 1329470decc6SDave Kleikamp 1330470decc6SDave Kleikamp J_ASSERT(journal_current_handle() == handle); 1331470decc6SDave Kleikamp 1332470decc6SDave Kleikamp if (is_handle_aborted(handle)) 1333470decc6SDave Kleikamp err = -EIO; 13343e2a532bSOGAWA Hirofumi else { 13353e2a532bSOGAWA Hirofumi J_ASSERT(transaction->t_updates > 0); 1336470decc6SDave Kleikamp err = 0; 13373e2a532bSOGAWA Hirofumi } 1338470decc6SDave Kleikamp 1339470decc6SDave Kleikamp if (--handle->h_ref > 0) { 1340470decc6SDave Kleikamp jbd_debug(4, "h_ref %d -> %d\n", handle->h_ref + 1, 1341470decc6SDave Kleikamp handle->h_ref); 1342470decc6SDave Kleikamp return err; 1343470decc6SDave Kleikamp } 1344470decc6SDave Kleikamp 1345470decc6SDave Kleikamp jbd_debug(4, "Handle %p going down\n", handle); 1346470decc6SDave Kleikamp 1347470decc6SDave Kleikamp /* 1348470decc6SDave Kleikamp * Implement synchronous transaction batching. If the handle 1349470decc6SDave Kleikamp * was synchronous, don't force a commit immediately. Let's 1350470decc6SDave Kleikamp * yield and let another thread piggyback onto this transaction. 1351470decc6SDave Kleikamp * Keep doing that while new threads continue to arrive. 1352470decc6SDave Kleikamp * It doesn't cost much - we're about to run a commit and sleep 1353470decc6SDave Kleikamp * on IO anyway. Speeds up many-threaded, many-dir operations 1354470decc6SDave Kleikamp * by 30x or more... 1355470decc6SDave Kleikamp * 1356470decc6SDave Kleikamp * But don't do this if this process was the most recent one to 1357470decc6SDave Kleikamp * perform a synchronous write. We do this to detect the case where a 1358470decc6SDave Kleikamp * single process is doing a stream of sync writes. No point in waiting 1359470decc6SDave Kleikamp * for joiners in that case. 1360470decc6SDave Kleikamp */ 1361470decc6SDave Kleikamp pid = current->pid; 1362470decc6SDave Kleikamp if (handle->h_sync && journal->j_last_sync_writer != pid) { 1363470decc6SDave Kleikamp journal->j_last_sync_writer = pid; 1364470decc6SDave Kleikamp do { 1365470decc6SDave Kleikamp old_handle_count = transaction->t_handle_count; 1366470decc6SDave Kleikamp schedule_timeout_uninterruptible(1); 1367470decc6SDave Kleikamp } while (old_handle_count != transaction->t_handle_count); 1368470decc6SDave Kleikamp } 1369470decc6SDave Kleikamp 1370470decc6SDave Kleikamp current->journal_info = NULL; 1371470decc6SDave Kleikamp spin_lock(&journal->j_state_lock); 1372470decc6SDave Kleikamp spin_lock(&transaction->t_handle_lock); 1373470decc6SDave Kleikamp transaction->t_outstanding_credits -= handle->h_buffer_credits; 1374470decc6SDave Kleikamp transaction->t_updates--; 1375470decc6SDave Kleikamp if (!transaction->t_updates) { 1376470decc6SDave Kleikamp wake_up(&journal->j_wait_updates); 1377470decc6SDave Kleikamp if (journal->j_barrier_count) 1378470decc6SDave Kleikamp wake_up(&journal->j_wait_transaction_locked); 1379470decc6SDave Kleikamp } 1380470decc6SDave Kleikamp 1381470decc6SDave Kleikamp /* 1382470decc6SDave Kleikamp * If the handle is marked SYNC, we need to set another commit 1383470decc6SDave Kleikamp * going! We also want to force a commit if the current 1384470decc6SDave Kleikamp * transaction is occupying too much of the log, or if the 1385470decc6SDave Kleikamp * transaction is too old now. 1386470decc6SDave Kleikamp */ 1387470decc6SDave Kleikamp if (handle->h_sync || 1388470decc6SDave Kleikamp transaction->t_outstanding_credits > 1389470decc6SDave Kleikamp journal->j_max_transaction_buffers || 1390470decc6SDave Kleikamp time_after_eq(jiffies, transaction->t_expires)) { 1391470decc6SDave Kleikamp /* Do this even for aborted journals: an abort still 1392470decc6SDave Kleikamp * completes the commit thread, it just doesn't write 1393470decc6SDave Kleikamp * anything to disk. */ 1394470decc6SDave Kleikamp tid_t tid = transaction->t_tid; 1395470decc6SDave Kleikamp 1396470decc6SDave Kleikamp spin_unlock(&transaction->t_handle_lock); 1397470decc6SDave Kleikamp jbd_debug(2, "transaction too old, requesting commit for " 1398470decc6SDave Kleikamp "handle %p\n", handle); 1399470decc6SDave Kleikamp /* This is non-blocking */ 1400f7f4bccbSMingming Cao __jbd2_log_start_commit(journal, transaction->t_tid); 1401470decc6SDave Kleikamp spin_unlock(&journal->j_state_lock); 1402470decc6SDave Kleikamp 1403470decc6SDave Kleikamp /* 1404f7f4bccbSMingming Cao * Special case: JBD2_SYNC synchronous updates require us 1405470decc6SDave Kleikamp * to wait for the commit to complete. 1406470decc6SDave Kleikamp */ 1407470decc6SDave Kleikamp if (handle->h_sync && !(current->flags & PF_MEMALLOC)) 1408f7f4bccbSMingming Cao err = jbd2_log_wait_commit(journal, tid); 1409470decc6SDave Kleikamp } else { 1410470decc6SDave Kleikamp spin_unlock(&transaction->t_handle_lock); 1411470decc6SDave Kleikamp spin_unlock(&journal->j_state_lock); 1412470decc6SDave Kleikamp } 1413470decc6SDave Kleikamp 1414af1e76d6SMingming Cao jbd2_free_handle(handle); 1415470decc6SDave Kleikamp return err; 1416470decc6SDave Kleikamp } 1417470decc6SDave Kleikamp 1418f7f4bccbSMingming Cao /**int jbd2_journal_force_commit() - force any uncommitted transactions 1419470decc6SDave Kleikamp * @journal: journal to force 1420470decc6SDave Kleikamp * 1421470decc6SDave Kleikamp * For synchronous operations: force any uncommitted transactions 1422470decc6SDave Kleikamp * to disk. May seem kludgy, but it reuses all the handle batching 1423470decc6SDave Kleikamp * code in a very simple manner. 1424470decc6SDave Kleikamp */ 1425f7f4bccbSMingming Cao int jbd2_journal_force_commit(journal_t *journal) 1426470decc6SDave Kleikamp { 1427470decc6SDave Kleikamp handle_t *handle; 1428470decc6SDave Kleikamp int ret; 1429470decc6SDave Kleikamp 1430f7f4bccbSMingming Cao handle = jbd2_journal_start(journal, 1); 1431470decc6SDave Kleikamp if (IS_ERR(handle)) { 1432470decc6SDave Kleikamp ret = PTR_ERR(handle); 1433470decc6SDave Kleikamp } else { 1434470decc6SDave Kleikamp handle->h_sync = 1; 1435f7f4bccbSMingming Cao ret = jbd2_journal_stop(handle); 1436470decc6SDave Kleikamp } 1437470decc6SDave Kleikamp return ret; 1438470decc6SDave Kleikamp } 1439470decc6SDave Kleikamp 1440470decc6SDave Kleikamp /* 1441470decc6SDave Kleikamp * 1442470decc6SDave Kleikamp * List management code snippets: various functions for manipulating the 1443470decc6SDave Kleikamp * transaction buffer lists. 1444470decc6SDave Kleikamp * 1445470decc6SDave Kleikamp */ 1446470decc6SDave Kleikamp 1447470decc6SDave Kleikamp /* 1448470decc6SDave Kleikamp * Append a buffer to a transaction list, given the transaction's list head 1449470decc6SDave Kleikamp * pointer. 1450470decc6SDave Kleikamp * 1451470decc6SDave Kleikamp * j_list_lock is held. 1452470decc6SDave Kleikamp * 1453470decc6SDave Kleikamp * jbd_lock_bh_state(jh2bh(jh)) is held. 1454470decc6SDave Kleikamp */ 1455470decc6SDave Kleikamp 1456470decc6SDave Kleikamp static inline void 1457470decc6SDave Kleikamp __blist_add_buffer(struct journal_head **list, struct journal_head *jh) 1458470decc6SDave Kleikamp { 1459470decc6SDave Kleikamp if (!*list) { 1460470decc6SDave Kleikamp jh->b_tnext = jh->b_tprev = jh; 1461470decc6SDave Kleikamp *list = jh; 1462470decc6SDave Kleikamp } else { 1463470decc6SDave Kleikamp /* Insert at the tail of the list to preserve order */ 1464470decc6SDave Kleikamp struct journal_head *first = *list, *last = first->b_tprev; 1465470decc6SDave Kleikamp jh->b_tprev = last; 1466470decc6SDave Kleikamp jh->b_tnext = first; 1467470decc6SDave Kleikamp last->b_tnext = first->b_tprev = jh; 1468470decc6SDave Kleikamp } 1469470decc6SDave Kleikamp } 1470470decc6SDave Kleikamp 1471470decc6SDave Kleikamp /* 1472470decc6SDave Kleikamp * Remove a buffer from a transaction list, given the transaction's list 1473470decc6SDave Kleikamp * head pointer. 1474470decc6SDave Kleikamp * 1475470decc6SDave Kleikamp * Called with j_list_lock held, and the journal may not be locked. 1476470decc6SDave Kleikamp * 1477470decc6SDave Kleikamp * jbd_lock_bh_state(jh2bh(jh)) is held. 1478470decc6SDave Kleikamp */ 1479470decc6SDave Kleikamp 1480470decc6SDave Kleikamp static inline void 1481470decc6SDave Kleikamp __blist_del_buffer(struct journal_head **list, struct journal_head *jh) 1482470decc6SDave Kleikamp { 1483470decc6SDave Kleikamp if (*list == jh) { 1484470decc6SDave Kleikamp *list = jh->b_tnext; 1485470decc6SDave Kleikamp if (*list == jh) 1486470decc6SDave Kleikamp *list = NULL; 1487470decc6SDave Kleikamp } 1488470decc6SDave Kleikamp jh->b_tprev->b_tnext = jh->b_tnext; 1489470decc6SDave Kleikamp jh->b_tnext->b_tprev = jh->b_tprev; 1490470decc6SDave Kleikamp } 1491470decc6SDave Kleikamp 1492470decc6SDave Kleikamp /* 1493470decc6SDave Kleikamp * Remove a buffer from the appropriate transaction list. 1494470decc6SDave Kleikamp * 1495470decc6SDave Kleikamp * Note that this function can *change* the value of 1496470decc6SDave Kleikamp * bh->b_transaction->t_sync_datalist, t_buffers, t_forget, 1497470decc6SDave Kleikamp * t_iobuf_list, t_shadow_list, t_log_list or t_reserved_list. If the caller 1498470decc6SDave Kleikamp * is holding onto a copy of one of thee pointers, it could go bad. 1499470decc6SDave Kleikamp * Generally the caller needs to re-read the pointer from the transaction_t. 1500470decc6SDave Kleikamp * 1501470decc6SDave Kleikamp * Called under j_list_lock. The journal may not be locked. 1502470decc6SDave Kleikamp */ 1503f7f4bccbSMingming Cao void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh) 1504470decc6SDave Kleikamp { 1505470decc6SDave Kleikamp struct journal_head **list = NULL; 1506470decc6SDave Kleikamp transaction_t *transaction; 1507470decc6SDave Kleikamp struct buffer_head *bh = jh2bh(jh); 1508470decc6SDave Kleikamp 1509470decc6SDave Kleikamp J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh)); 1510470decc6SDave Kleikamp transaction = jh->b_transaction; 1511470decc6SDave Kleikamp if (transaction) 1512470decc6SDave Kleikamp assert_spin_locked(&transaction->t_journal->j_list_lock); 1513470decc6SDave Kleikamp 1514470decc6SDave Kleikamp J_ASSERT_JH(jh, jh->b_jlist < BJ_Types); 1515470decc6SDave Kleikamp if (jh->b_jlist != BJ_None) 1516470decc6SDave Kleikamp J_ASSERT_JH(jh, transaction != 0); 1517470decc6SDave Kleikamp 1518470decc6SDave Kleikamp switch (jh->b_jlist) { 1519470decc6SDave Kleikamp case BJ_None: 1520470decc6SDave Kleikamp return; 1521470decc6SDave Kleikamp case BJ_SyncData: 1522470decc6SDave Kleikamp list = &transaction->t_sync_datalist; 1523470decc6SDave Kleikamp break; 1524470decc6SDave Kleikamp case BJ_Metadata: 1525470decc6SDave Kleikamp transaction->t_nr_buffers--; 1526470decc6SDave Kleikamp J_ASSERT_JH(jh, transaction->t_nr_buffers >= 0); 1527470decc6SDave Kleikamp list = &transaction->t_buffers; 1528470decc6SDave Kleikamp break; 1529470decc6SDave Kleikamp case BJ_Forget: 1530470decc6SDave Kleikamp list = &transaction->t_forget; 1531470decc6SDave Kleikamp break; 1532470decc6SDave Kleikamp case BJ_IO: 1533470decc6SDave Kleikamp list = &transaction->t_iobuf_list; 1534470decc6SDave Kleikamp break; 1535470decc6SDave Kleikamp case BJ_Shadow: 1536470decc6SDave Kleikamp list = &transaction->t_shadow_list; 1537470decc6SDave Kleikamp break; 1538470decc6SDave Kleikamp case BJ_LogCtl: 1539470decc6SDave Kleikamp list = &transaction->t_log_list; 1540470decc6SDave Kleikamp break; 1541470decc6SDave Kleikamp case BJ_Reserved: 1542470decc6SDave Kleikamp list = &transaction->t_reserved_list; 1543470decc6SDave Kleikamp break; 1544470decc6SDave Kleikamp case BJ_Locked: 1545470decc6SDave Kleikamp list = &transaction->t_locked_list; 1546470decc6SDave Kleikamp break; 1547470decc6SDave Kleikamp } 1548470decc6SDave Kleikamp 1549470decc6SDave Kleikamp __blist_del_buffer(list, jh); 1550470decc6SDave Kleikamp jh->b_jlist = BJ_None; 1551470decc6SDave Kleikamp if (test_clear_buffer_jbddirty(bh)) 1552470decc6SDave Kleikamp mark_buffer_dirty(bh); /* Expose it to the VM */ 1553470decc6SDave Kleikamp } 1554470decc6SDave Kleikamp 1555f7f4bccbSMingming Cao void __jbd2_journal_unfile_buffer(struct journal_head *jh) 1556470decc6SDave Kleikamp { 1557f7f4bccbSMingming Cao __jbd2_journal_temp_unlink_buffer(jh); 1558470decc6SDave Kleikamp jh->b_transaction = NULL; 1559470decc6SDave Kleikamp } 1560470decc6SDave Kleikamp 1561f7f4bccbSMingming Cao void jbd2_journal_unfile_buffer(journal_t *journal, struct journal_head *jh) 1562470decc6SDave Kleikamp { 1563470decc6SDave Kleikamp jbd_lock_bh_state(jh2bh(jh)); 1564470decc6SDave Kleikamp spin_lock(&journal->j_list_lock); 1565f7f4bccbSMingming Cao __jbd2_journal_unfile_buffer(jh); 1566470decc6SDave Kleikamp spin_unlock(&journal->j_list_lock); 1567470decc6SDave Kleikamp jbd_unlock_bh_state(jh2bh(jh)); 1568470decc6SDave Kleikamp } 1569470decc6SDave Kleikamp 1570470decc6SDave Kleikamp /* 1571f7f4bccbSMingming Cao * Called from jbd2_journal_try_to_free_buffers(). 1572470decc6SDave Kleikamp * 1573470decc6SDave Kleikamp * Called under jbd_lock_bh_state(bh) 1574470decc6SDave Kleikamp */ 1575470decc6SDave Kleikamp static void 1576470decc6SDave Kleikamp __journal_try_to_free_buffer(journal_t *journal, struct buffer_head *bh) 1577470decc6SDave Kleikamp { 1578470decc6SDave Kleikamp struct journal_head *jh; 1579470decc6SDave Kleikamp 1580470decc6SDave Kleikamp jh = bh2jh(bh); 1581470decc6SDave Kleikamp 1582470decc6SDave Kleikamp if (buffer_locked(bh) || buffer_dirty(bh)) 1583470decc6SDave Kleikamp goto out; 1584470decc6SDave Kleikamp 1585470decc6SDave Kleikamp if (jh->b_next_transaction != 0) 1586470decc6SDave Kleikamp goto out; 1587470decc6SDave Kleikamp 1588470decc6SDave Kleikamp spin_lock(&journal->j_list_lock); 1589470decc6SDave Kleikamp if (jh->b_transaction != 0 && jh->b_cp_transaction == 0) { 1590470decc6SDave Kleikamp if (jh->b_jlist == BJ_SyncData || jh->b_jlist == BJ_Locked) { 1591470decc6SDave Kleikamp /* A written-back ordered data buffer */ 1592470decc6SDave Kleikamp JBUFFER_TRACE(jh, "release data"); 1593f7f4bccbSMingming Cao __jbd2_journal_unfile_buffer(jh); 1594f7f4bccbSMingming Cao jbd2_journal_remove_journal_head(bh); 1595470decc6SDave Kleikamp __brelse(bh); 1596470decc6SDave Kleikamp } 1597470decc6SDave Kleikamp } else if (jh->b_cp_transaction != 0 && jh->b_transaction == 0) { 1598470decc6SDave Kleikamp /* written-back checkpointed metadata buffer */ 1599470decc6SDave Kleikamp if (jh->b_jlist == BJ_None) { 1600470decc6SDave Kleikamp JBUFFER_TRACE(jh, "remove from checkpoint list"); 1601f7f4bccbSMingming Cao __jbd2_journal_remove_checkpoint(jh); 1602f7f4bccbSMingming Cao jbd2_journal_remove_journal_head(bh); 1603470decc6SDave Kleikamp __brelse(bh); 1604470decc6SDave Kleikamp } 1605470decc6SDave Kleikamp } 1606470decc6SDave Kleikamp spin_unlock(&journal->j_list_lock); 1607470decc6SDave Kleikamp out: 1608470decc6SDave Kleikamp return; 1609470decc6SDave Kleikamp } 1610470decc6SDave Kleikamp 1611470decc6SDave Kleikamp 1612470decc6SDave Kleikamp /** 1613f7f4bccbSMingming Cao * int jbd2_journal_try_to_free_buffers() - try to free page buffers. 1614470decc6SDave Kleikamp * @journal: journal for operation 1615470decc6SDave Kleikamp * @page: to try and free 1616470decc6SDave Kleikamp * @unused_gfp_mask: unused 1617470decc6SDave Kleikamp * 1618470decc6SDave Kleikamp * 1619470decc6SDave Kleikamp * For all the buffers on this page, 1620470decc6SDave Kleikamp * if they are fully written out ordered data, move them onto BUF_CLEAN 1621470decc6SDave Kleikamp * so try_to_free_buffers() can reap them. 1622470decc6SDave Kleikamp * 1623470decc6SDave Kleikamp * This function returns non-zero if we wish try_to_free_buffers() 1624470decc6SDave Kleikamp * to be called. We do this if the page is releasable by try_to_free_buffers(). 1625470decc6SDave Kleikamp * We also do it if the page has locked or dirty buffers and the caller wants 1626470decc6SDave Kleikamp * us to perform sync or async writeout. 1627470decc6SDave Kleikamp * 1628470decc6SDave Kleikamp * This complicates JBD locking somewhat. We aren't protected by the 1629470decc6SDave Kleikamp * BKL here. We wish to remove the buffer from its committing or 1630f7f4bccbSMingming Cao * running transaction's ->t_datalist via __jbd2_journal_unfile_buffer. 1631470decc6SDave Kleikamp * 1632470decc6SDave Kleikamp * This may *change* the value of transaction_t->t_datalist, so anyone 1633470decc6SDave Kleikamp * who looks at t_datalist needs to lock against this function. 1634470decc6SDave Kleikamp * 1635f7f4bccbSMingming Cao * Even worse, someone may be doing a jbd2_journal_dirty_data on this 1636f7f4bccbSMingming Cao * buffer. So we need to lock against that. jbd2_journal_dirty_data() 1637470decc6SDave Kleikamp * will come out of the lock with the buffer dirty, which makes it 1638470decc6SDave Kleikamp * ineligible for release here. 1639470decc6SDave Kleikamp * 1640470decc6SDave Kleikamp * Who else is affected by this? hmm... Really the only contender 1641470decc6SDave Kleikamp * is do_get_write_access() - it could be looking at the buffer while 1642470decc6SDave Kleikamp * journal_try_to_free_buffer() is changing its state. But that 1643470decc6SDave Kleikamp * cannot happen because we never reallocate freed data as metadata 1644470decc6SDave Kleikamp * while the data is part of a transaction. Yes? 1645470decc6SDave Kleikamp */ 1646f7f4bccbSMingming Cao int jbd2_journal_try_to_free_buffers(journal_t *journal, 1647470decc6SDave Kleikamp struct page *page, gfp_t unused_gfp_mask) 1648470decc6SDave Kleikamp { 1649470decc6SDave Kleikamp struct buffer_head *head; 1650470decc6SDave Kleikamp struct buffer_head *bh; 1651470decc6SDave Kleikamp int ret = 0; 1652470decc6SDave Kleikamp 1653470decc6SDave Kleikamp J_ASSERT(PageLocked(page)); 1654470decc6SDave Kleikamp 1655470decc6SDave Kleikamp head = page_buffers(page); 1656470decc6SDave Kleikamp bh = head; 1657470decc6SDave Kleikamp do { 1658470decc6SDave Kleikamp struct journal_head *jh; 1659470decc6SDave Kleikamp 1660470decc6SDave Kleikamp /* 1661470decc6SDave Kleikamp * We take our own ref against the journal_head here to avoid 1662470decc6SDave Kleikamp * having to add tons of locking around each instance of 1663f7f4bccbSMingming Cao * jbd2_journal_remove_journal_head() and jbd2_journal_put_journal_head(). 1664470decc6SDave Kleikamp */ 1665f7f4bccbSMingming Cao jh = jbd2_journal_grab_journal_head(bh); 1666470decc6SDave Kleikamp if (!jh) 1667470decc6SDave Kleikamp continue; 1668470decc6SDave Kleikamp 1669470decc6SDave Kleikamp jbd_lock_bh_state(bh); 1670470decc6SDave Kleikamp __journal_try_to_free_buffer(journal, bh); 1671f7f4bccbSMingming Cao jbd2_journal_put_journal_head(jh); 1672470decc6SDave Kleikamp jbd_unlock_bh_state(bh); 1673470decc6SDave Kleikamp if (buffer_jbd(bh)) 1674470decc6SDave Kleikamp goto busy; 1675470decc6SDave Kleikamp } while ((bh = bh->b_this_page) != head); 1676470decc6SDave Kleikamp ret = try_to_free_buffers(page); 1677470decc6SDave Kleikamp busy: 1678470decc6SDave Kleikamp return ret; 1679470decc6SDave Kleikamp } 1680470decc6SDave Kleikamp 1681470decc6SDave Kleikamp /* 1682470decc6SDave Kleikamp * This buffer is no longer needed. If it is on an older transaction's 1683470decc6SDave Kleikamp * checkpoint list we need to record it on this transaction's forget list 1684470decc6SDave Kleikamp * to pin this buffer (and hence its checkpointing transaction) down until 1685470decc6SDave Kleikamp * this transaction commits. If the buffer isn't on a checkpoint list, we 1686470decc6SDave Kleikamp * release it. 1687470decc6SDave Kleikamp * Returns non-zero if JBD no longer has an interest in the buffer. 1688470decc6SDave Kleikamp * 1689470decc6SDave Kleikamp * Called under j_list_lock. 1690470decc6SDave Kleikamp * 1691470decc6SDave Kleikamp * Called under jbd_lock_bh_state(bh). 1692470decc6SDave Kleikamp */ 1693470decc6SDave Kleikamp static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction) 1694470decc6SDave Kleikamp { 1695470decc6SDave Kleikamp int may_free = 1; 1696470decc6SDave Kleikamp struct buffer_head *bh = jh2bh(jh); 1697470decc6SDave Kleikamp 1698f7f4bccbSMingming Cao __jbd2_journal_unfile_buffer(jh); 1699470decc6SDave Kleikamp 1700470decc6SDave Kleikamp if (jh->b_cp_transaction) { 1701470decc6SDave Kleikamp JBUFFER_TRACE(jh, "on running+cp transaction"); 1702f7f4bccbSMingming Cao __jbd2_journal_file_buffer(jh, transaction, BJ_Forget); 1703470decc6SDave Kleikamp clear_buffer_jbddirty(bh); 1704470decc6SDave Kleikamp may_free = 0; 1705470decc6SDave Kleikamp } else { 1706470decc6SDave Kleikamp JBUFFER_TRACE(jh, "on running transaction"); 1707f7f4bccbSMingming Cao jbd2_journal_remove_journal_head(bh); 1708470decc6SDave Kleikamp __brelse(bh); 1709470decc6SDave Kleikamp } 1710470decc6SDave Kleikamp return may_free; 1711470decc6SDave Kleikamp } 1712470decc6SDave Kleikamp 1713470decc6SDave Kleikamp /* 1714f7f4bccbSMingming Cao * jbd2_journal_invalidatepage 1715470decc6SDave Kleikamp * 1716470decc6SDave Kleikamp * This code is tricky. It has a number of cases to deal with. 1717470decc6SDave Kleikamp * 1718470decc6SDave Kleikamp * There are two invariants which this code relies on: 1719470decc6SDave Kleikamp * 1720470decc6SDave Kleikamp * i_size must be updated on disk before we start calling invalidatepage on the 1721470decc6SDave Kleikamp * data. 1722470decc6SDave Kleikamp * 1723470decc6SDave Kleikamp * This is done in ext3 by defining an ext3_setattr method which 1724470decc6SDave Kleikamp * updates i_size before truncate gets going. By maintaining this 1725470decc6SDave Kleikamp * invariant, we can be sure that it is safe to throw away any buffers 1726470decc6SDave Kleikamp * attached to the current transaction: once the transaction commits, 1727470decc6SDave Kleikamp * we know that the data will not be needed. 1728470decc6SDave Kleikamp * 1729470decc6SDave Kleikamp * Note however that we can *not* throw away data belonging to the 1730470decc6SDave Kleikamp * previous, committing transaction! 1731470decc6SDave Kleikamp * 1732470decc6SDave Kleikamp * Any disk blocks which *are* part of the previous, committing 1733470decc6SDave Kleikamp * transaction (and which therefore cannot be discarded immediately) are 1734470decc6SDave Kleikamp * not going to be reused in the new running transaction 1735470decc6SDave Kleikamp * 1736470decc6SDave Kleikamp * The bitmap committed_data images guarantee this: any block which is 1737470decc6SDave Kleikamp * allocated in one transaction and removed in the next will be marked 1738470decc6SDave Kleikamp * as in-use in the committed_data bitmap, so cannot be reused until 1739470decc6SDave Kleikamp * the next transaction to delete the block commits. This means that 1740470decc6SDave Kleikamp * leaving committing buffers dirty is quite safe: the disk blocks 1741470decc6SDave Kleikamp * cannot be reallocated to a different file and so buffer aliasing is 1742470decc6SDave Kleikamp * not possible. 1743470decc6SDave Kleikamp * 1744470decc6SDave Kleikamp * 1745470decc6SDave Kleikamp * The above applies mainly to ordered data mode. In writeback mode we 1746470decc6SDave Kleikamp * don't make guarantees about the order in which data hits disk --- in 1747470decc6SDave Kleikamp * particular we don't guarantee that new dirty data is flushed before 1748470decc6SDave Kleikamp * transaction commit --- so it is always safe just to discard data 1749470decc6SDave Kleikamp * immediately in that mode. --sct 1750470decc6SDave Kleikamp */ 1751470decc6SDave Kleikamp 1752470decc6SDave Kleikamp /* 1753470decc6SDave Kleikamp * The journal_unmap_buffer helper function returns zero if the buffer 1754470decc6SDave Kleikamp * concerned remains pinned as an anonymous buffer belonging to an older 1755470decc6SDave Kleikamp * transaction. 1756470decc6SDave Kleikamp * 1757470decc6SDave Kleikamp * We're outside-transaction here. Either or both of j_running_transaction 1758470decc6SDave Kleikamp * and j_committing_transaction may be NULL. 1759470decc6SDave Kleikamp */ 1760470decc6SDave Kleikamp static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh) 1761470decc6SDave Kleikamp { 1762470decc6SDave Kleikamp transaction_t *transaction; 1763470decc6SDave Kleikamp struct journal_head *jh; 1764470decc6SDave Kleikamp int may_free = 1; 1765470decc6SDave Kleikamp int ret; 1766470decc6SDave Kleikamp 1767470decc6SDave Kleikamp BUFFER_TRACE(bh, "entry"); 1768470decc6SDave Kleikamp 1769470decc6SDave Kleikamp /* 1770470decc6SDave Kleikamp * It is safe to proceed here without the j_list_lock because the 1771470decc6SDave Kleikamp * buffers cannot be stolen by try_to_free_buffers as long as we are 1772470decc6SDave Kleikamp * holding the page lock. --sct 1773470decc6SDave Kleikamp */ 1774470decc6SDave Kleikamp 1775470decc6SDave Kleikamp if (!buffer_jbd(bh)) 1776470decc6SDave Kleikamp goto zap_buffer_unlocked; 1777470decc6SDave Kleikamp 1778470decc6SDave Kleikamp spin_lock(&journal->j_state_lock); 1779470decc6SDave Kleikamp jbd_lock_bh_state(bh); 1780470decc6SDave Kleikamp spin_lock(&journal->j_list_lock); 1781470decc6SDave Kleikamp 1782f7f4bccbSMingming Cao jh = jbd2_journal_grab_journal_head(bh); 1783470decc6SDave Kleikamp if (!jh) 1784470decc6SDave Kleikamp goto zap_buffer_no_jh; 1785470decc6SDave Kleikamp 1786470decc6SDave Kleikamp transaction = jh->b_transaction; 1787470decc6SDave Kleikamp if (transaction == NULL) { 1788470decc6SDave Kleikamp /* First case: not on any transaction. If it 1789470decc6SDave Kleikamp * has no checkpoint link, then we can zap it: 1790470decc6SDave Kleikamp * it's a writeback-mode buffer so we don't care 1791470decc6SDave Kleikamp * if it hits disk safely. */ 1792470decc6SDave Kleikamp if (!jh->b_cp_transaction) { 1793470decc6SDave Kleikamp JBUFFER_TRACE(jh, "not on any transaction: zap"); 1794470decc6SDave Kleikamp goto zap_buffer; 1795470decc6SDave Kleikamp } 1796470decc6SDave Kleikamp 1797470decc6SDave Kleikamp if (!buffer_dirty(bh)) { 1798470decc6SDave Kleikamp /* bdflush has written it. We can drop it now */ 1799470decc6SDave Kleikamp goto zap_buffer; 1800470decc6SDave Kleikamp } 1801470decc6SDave Kleikamp 1802470decc6SDave Kleikamp /* OK, it must be in the journal but still not 1803470decc6SDave Kleikamp * written fully to disk: it's metadata or 1804470decc6SDave Kleikamp * journaled data... */ 1805470decc6SDave Kleikamp 1806470decc6SDave Kleikamp if (journal->j_running_transaction) { 1807470decc6SDave Kleikamp /* ... and once the current transaction has 1808470decc6SDave Kleikamp * committed, the buffer won't be needed any 1809470decc6SDave Kleikamp * longer. */ 1810470decc6SDave Kleikamp JBUFFER_TRACE(jh, "checkpointed: add to BJ_Forget"); 1811470decc6SDave Kleikamp ret = __dispose_buffer(jh, 1812470decc6SDave Kleikamp journal->j_running_transaction); 1813f7f4bccbSMingming Cao jbd2_journal_put_journal_head(jh); 1814470decc6SDave Kleikamp spin_unlock(&journal->j_list_lock); 1815470decc6SDave Kleikamp jbd_unlock_bh_state(bh); 1816470decc6SDave Kleikamp spin_unlock(&journal->j_state_lock); 1817470decc6SDave Kleikamp return ret; 1818470decc6SDave Kleikamp } else { 1819470decc6SDave Kleikamp /* There is no currently-running transaction. So the 1820470decc6SDave Kleikamp * orphan record which we wrote for this file must have 1821470decc6SDave Kleikamp * passed into commit. We must attach this buffer to 1822470decc6SDave Kleikamp * the committing transaction, if it exists. */ 1823470decc6SDave Kleikamp if (journal->j_committing_transaction) { 1824470decc6SDave Kleikamp JBUFFER_TRACE(jh, "give to committing trans"); 1825470decc6SDave Kleikamp ret = __dispose_buffer(jh, 1826470decc6SDave Kleikamp journal->j_committing_transaction); 1827f7f4bccbSMingming Cao jbd2_journal_put_journal_head(jh); 1828470decc6SDave Kleikamp spin_unlock(&journal->j_list_lock); 1829470decc6SDave Kleikamp jbd_unlock_bh_state(bh); 1830470decc6SDave Kleikamp spin_unlock(&journal->j_state_lock); 1831470decc6SDave Kleikamp return ret; 1832470decc6SDave Kleikamp } else { 1833470decc6SDave Kleikamp /* The orphan record's transaction has 1834470decc6SDave Kleikamp * committed. We can cleanse this buffer */ 1835470decc6SDave Kleikamp clear_buffer_jbddirty(bh); 1836470decc6SDave Kleikamp goto zap_buffer; 1837470decc6SDave Kleikamp } 1838470decc6SDave Kleikamp } 1839470decc6SDave Kleikamp } else if (transaction == journal->j_committing_transaction) { 18409b57988dSEric Sandeen JBUFFER_TRACE(jh, "on committing transaction"); 1841470decc6SDave Kleikamp if (jh->b_jlist == BJ_Locked) { 1842470decc6SDave Kleikamp /* 1843470decc6SDave Kleikamp * The buffer is on the committing transaction's locked 1844470decc6SDave Kleikamp * list. We have the buffer locked, so I/O has 1845470decc6SDave Kleikamp * completed. So we can nail the buffer now. 1846470decc6SDave Kleikamp */ 1847470decc6SDave Kleikamp may_free = __dispose_buffer(jh, transaction); 1848470decc6SDave Kleikamp goto zap_buffer; 1849470decc6SDave Kleikamp } 1850470decc6SDave Kleikamp /* 1851470decc6SDave Kleikamp * If it is committing, we simply cannot touch it. We 1852470decc6SDave Kleikamp * can remove it's next_transaction pointer from the 1853470decc6SDave Kleikamp * running transaction if that is set, but nothing 1854470decc6SDave Kleikamp * else. */ 1855470decc6SDave Kleikamp set_buffer_freed(bh); 1856470decc6SDave Kleikamp if (jh->b_next_transaction) { 1857470decc6SDave Kleikamp J_ASSERT(jh->b_next_transaction == 1858470decc6SDave Kleikamp journal->j_running_transaction); 1859470decc6SDave Kleikamp jh->b_next_transaction = NULL; 1860470decc6SDave Kleikamp } 1861f7f4bccbSMingming Cao jbd2_journal_put_journal_head(jh); 1862470decc6SDave Kleikamp spin_unlock(&journal->j_list_lock); 1863470decc6SDave Kleikamp jbd_unlock_bh_state(bh); 1864470decc6SDave Kleikamp spin_unlock(&journal->j_state_lock); 1865470decc6SDave Kleikamp return 0; 1866470decc6SDave Kleikamp } else { 1867470decc6SDave Kleikamp /* Good, the buffer belongs to the running transaction. 1868470decc6SDave Kleikamp * We are writing our own transaction's data, not any 1869470decc6SDave Kleikamp * previous one's, so it is safe to throw it away 1870470decc6SDave Kleikamp * (remember that we expect the filesystem to have set 1871470decc6SDave Kleikamp * i_size already for this truncate so recovery will not 1872470decc6SDave Kleikamp * expose the disk blocks we are discarding here.) */ 1873470decc6SDave Kleikamp J_ASSERT_JH(jh, transaction == journal->j_running_transaction); 18749b57988dSEric Sandeen JBUFFER_TRACE(jh, "on running transaction"); 1875470decc6SDave Kleikamp may_free = __dispose_buffer(jh, transaction); 1876470decc6SDave Kleikamp } 1877470decc6SDave Kleikamp 1878470decc6SDave Kleikamp zap_buffer: 1879f7f4bccbSMingming Cao jbd2_journal_put_journal_head(jh); 1880470decc6SDave Kleikamp zap_buffer_no_jh: 1881470decc6SDave Kleikamp spin_unlock(&journal->j_list_lock); 1882470decc6SDave Kleikamp jbd_unlock_bh_state(bh); 1883470decc6SDave Kleikamp spin_unlock(&journal->j_state_lock); 1884470decc6SDave Kleikamp zap_buffer_unlocked: 1885470decc6SDave Kleikamp clear_buffer_dirty(bh); 1886470decc6SDave Kleikamp J_ASSERT_BH(bh, !buffer_jbddirty(bh)); 1887470decc6SDave Kleikamp clear_buffer_mapped(bh); 1888470decc6SDave Kleikamp clear_buffer_req(bh); 1889470decc6SDave Kleikamp clear_buffer_new(bh); 1890470decc6SDave Kleikamp bh->b_bdev = NULL; 1891470decc6SDave Kleikamp return may_free; 1892470decc6SDave Kleikamp } 1893470decc6SDave Kleikamp 1894470decc6SDave Kleikamp /** 1895f7f4bccbSMingming Cao * void jbd2_journal_invalidatepage() 1896470decc6SDave Kleikamp * @journal: journal to use for flush... 1897470decc6SDave Kleikamp * @page: page to flush 1898470decc6SDave Kleikamp * @offset: length of page to invalidate. 1899470decc6SDave Kleikamp * 1900470decc6SDave Kleikamp * Reap page buffers containing data after offset in page. 1901470decc6SDave Kleikamp * 1902470decc6SDave Kleikamp */ 1903f7f4bccbSMingming Cao void jbd2_journal_invalidatepage(journal_t *journal, 1904470decc6SDave Kleikamp struct page *page, 1905470decc6SDave Kleikamp unsigned long offset) 1906470decc6SDave Kleikamp { 1907470decc6SDave Kleikamp struct buffer_head *head, *bh, *next; 1908470decc6SDave Kleikamp unsigned int curr_off = 0; 1909470decc6SDave Kleikamp int may_free = 1; 1910470decc6SDave Kleikamp 1911470decc6SDave Kleikamp if (!PageLocked(page)) 1912470decc6SDave Kleikamp BUG(); 1913470decc6SDave Kleikamp if (!page_has_buffers(page)) 1914470decc6SDave Kleikamp return; 1915470decc6SDave Kleikamp 1916470decc6SDave Kleikamp /* We will potentially be playing with lists other than just the 1917470decc6SDave Kleikamp * data lists (especially for journaled data mode), so be 1918470decc6SDave Kleikamp * cautious in our locking. */ 1919470decc6SDave Kleikamp 1920470decc6SDave Kleikamp head = bh = page_buffers(page); 1921470decc6SDave Kleikamp do { 1922470decc6SDave Kleikamp unsigned int next_off = curr_off + bh->b_size; 1923470decc6SDave Kleikamp next = bh->b_this_page; 1924470decc6SDave Kleikamp 1925470decc6SDave Kleikamp if (offset <= curr_off) { 1926470decc6SDave Kleikamp /* This block is wholly outside the truncation point */ 1927470decc6SDave Kleikamp lock_buffer(bh); 1928470decc6SDave Kleikamp may_free &= journal_unmap_buffer(journal, bh); 1929470decc6SDave Kleikamp unlock_buffer(bh); 1930470decc6SDave Kleikamp } 1931470decc6SDave Kleikamp curr_off = next_off; 1932470decc6SDave Kleikamp bh = next; 1933470decc6SDave Kleikamp 1934470decc6SDave Kleikamp } while (bh != head); 1935470decc6SDave Kleikamp 1936470decc6SDave Kleikamp if (!offset) { 1937470decc6SDave Kleikamp if (may_free && try_to_free_buffers(page)) 1938470decc6SDave Kleikamp J_ASSERT(!page_has_buffers(page)); 1939470decc6SDave Kleikamp } 1940470decc6SDave Kleikamp } 1941470decc6SDave Kleikamp 1942470decc6SDave Kleikamp /* 1943470decc6SDave Kleikamp * File a buffer on the given transaction list. 1944470decc6SDave Kleikamp */ 1945f7f4bccbSMingming Cao void __jbd2_journal_file_buffer(struct journal_head *jh, 1946470decc6SDave Kleikamp transaction_t *transaction, int jlist) 1947470decc6SDave Kleikamp { 1948470decc6SDave Kleikamp struct journal_head **list = NULL; 1949470decc6SDave Kleikamp int was_dirty = 0; 1950470decc6SDave Kleikamp struct buffer_head *bh = jh2bh(jh); 1951470decc6SDave Kleikamp 1952470decc6SDave Kleikamp J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh)); 1953470decc6SDave Kleikamp assert_spin_locked(&transaction->t_journal->j_list_lock); 1954470decc6SDave Kleikamp 1955470decc6SDave Kleikamp J_ASSERT_JH(jh, jh->b_jlist < BJ_Types); 1956470decc6SDave Kleikamp J_ASSERT_JH(jh, jh->b_transaction == transaction || 1957470decc6SDave Kleikamp jh->b_transaction == 0); 1958470decc6SDave Kleikamp 1959470decc6SDave Kleikamp if (jh->b_transaction && jh->b_jlist == jlist) 1960470decc6SDave Kleikamp return; 1961470decc6SDave Kleikamp 1962470decc6SDave Kleikamp /* The following list of buffer states needs to be consistent 1963470decc6SDave Kleikamp * with __jbd_unexpected_dirty_buffer()'s handling of dirty 1964470decc6SDave Kleikamp * state. */ 1965470decc6SDave Kleikamp 1966470decc6SDave Kleikamp if (jlist == BJ_Metadata || jlist == BJ_Reserved || 1967470decc6SDave Kleikamp jlist == BJ_Shadow || jlist == BJ_Forget) { 1968470decc6SDave Kleikamp if (test_clear_buffer_dirty(bh) || 1969470decc6SDave Kleikamp test_clear_buffer_jbddirty(bh)) 1970470decc6SDave Kleikamp was_dirty = 1; 1971470decc6SDave Kleikamp } 1972470decc6SDave Kleikamp 1973470decc6SDave Kleikamp if (jh->b_transaction) 1974f7f4bccbSMingming Cao __jbd2_journal_temp_unlink_buffer(jh); 1975470decc6SDave Kleikamp jh->b_transaction = transaction; 1976470decc6SDave Kleikamp 1977470decc6SDave Kleikamp switch (jlist) { 1978470decc6SDave Kleikamp case BJ_None: 1979470decc6SDave Kleikamp J_ASSERT_JH(jh, !jh->b_committed_data); 1980470decc6SDave Kleikamp J_ASSERT_JH(jh, !jh->b_frozen_data); 1981470decc6SDave Kleikamp return; 1982470decc6SDave Kleikamp case BJ_SyncData: 1983470decc6SDave Kleikamp list = &transaction->t_sync_datalist; 1984470decc6SDave Kleikamp break; 1985470decc6SDave Kleikamp case BJ_Metadata: 1986470decc6SDave Kleikamp transaction->t_nr_buffers++; 1987470decc6SDave Kleikamp list = &transaction->t_buffers; 1988470decc6SDave Kleikamp break; 1989470decc6SDave Kleikamp case BJ_Forget: 1990470decc6SDave Kleikamp list = &transaction->t_forget; 1991470decc6SDave Kleikamp break; 1992470decc6SDave Kleikamp case BJ_IO: 1993470decc6SDave Kleikamp list = &transaction->t_iobuf_list; 1994470decc6SDave Kleikamp break; 1995470decc6SDave Kleikamp case BJ_Shadow: 1996470decc6SDave Kleikamp list = &transaction->t_shadow_list; 1997470decc6SDave Kleikamp break; 1998470decc6SDave Kleikamp case BJ_LogCtl: 1999470decc6SDave Kleikamp list = &transaction->t_log_list; 2000470decc6SDave Kleikamp break; 2001470decc6SDave Kleikamp case BJ_Reserved: 2002470decc6SDave Kleikamp list = &transaction->t_reserved_list; 2003470decc6SDave Kleikamp break; 2004470decc6SDave Kleikamp case BJ_Locked: 2005470decc6SDave Kleikamp list = &transaction->t_locked_list; 2006470decc6SDave Kleikamp break; 2007470decc6SDave Kleikamp } 2008470decc6SDave Kleikamp 2009470decc6SDave Kleikamp __blist_add_buffer(list, jh); 2010470decc6SDave Kleikamp jh->b_jlist = jlist; 2011470decc6SDave Kleikamp 2012470decc6SDave Kleikamp if (was_dirty) 2013470decc6SDave Kleikamp set_buffer_jbddirty(bh); 2014470decc6SDave Kleikamp } 2015470decc6SDave Kleikamp 2016f7f4bccbSMingming Cao void jbd2_journal_file_buffer(struct journal_head *jh, 2017470decc6SDave Kleikamp transaction_t *transaction, int jlist) 2018470decc6SDave Kleikamp { 2019470decc6SDave Kleikamp jbd_lock_bh_state(jh2bh(jh)); 2020470decc6SDave Kleikamp spin_lock(&transaction->t_journal->j_list_lock); 2021f7f4bccbSMingming Cao __jbd2_journal_file_buffer(jh, transaction, jlist); 2022470decc6SDave Kleikamp spin_unlock(&transaction->t_journal->j_list_lock); 2023470decc6SDave Kleikamp jbd_unlock_bh_state(jh2bh(jh)); 2024470decc6SDave Kleikamp } 2025470decc6SDave Kleikamp 2026470decc6SDave Kleikamp /* 2027470decc6SDave Kleikamp * Remove a buffer from its current buffer list in preparation for 2028470decc6SDave Kleikamp * dropping it from its current transaction entirely. If the buffer has 2029470decc6SDave Kleikamp * already started to be used by a subsequent transaction, refile the 2030470decc6SDave Kleikamp * buffer on that transaction's metadata list. 2031470decc6SDave Kleikamp * 2032470decc6SDave Kleikamp * Called under journal->j_list_lock 2033470decc6SDave Kleikamp * 2034470decc6SDave Kleikamp * Called under jbd_lock_bh_state(jh2bh(jh)) 2035470decc6SDave Kleikamp */ 2036f7f4bccbSMingming Cao void __jbd2_journal_refile_buffer(struct journal_head *jh) 2037470decc6SDave Kleikamp { 2038470decc6SDave Kleikamp int was_dirty; 2039470decc6SDave Kleikamp struct buffer_head *bh = jh2bh(jh); 2040470decc6SDave Kleikamp 2041470decc6SDave Kleikamp J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh)); 2042470decc6SDave Kleikamp if (jh->b_transaction) 2043470decc6SDave Kleikamp assert_spin_locked(&jh->b_transaction->t_journal->j_list_lock); 2044470decc6SDave Kleikamp 2045470decc6SDave Kleikamp /* If the buffer is now unused, just drop it. */ 2046470decc6SDave Kleikamp if (jh->b_next_transaction == NULL) { 2047f7f4bccbSMingming Cao __jbd2_journal_unfile_buffer(jh); 2048470decc6SDave Kleikamp return; 2049470decc6SDave Kleikamp } 2050470decc6SDave Kleikamp 2051470decc6SDave Kleikamp /* 2052470decc6SDave Kleikamp * It has been modified by a later transaction: add it to the new 2053470decc6SDave Kleikamp * transaction's metadata list. 2054470decc6SDave Kleikamp */ 2055470decc6SDave Kleikamp 2056470decc6SDave Kleikamp was_dirty = test_clear_buffer_jbddirty(bh); 2057f7f4bccbSMingming Cao __jbd2_journal_temp_unlink_buffer(jh); 2058470decc6SDave Kleikamp jh->b_transaction = jh->b_next_transaction; 2059470decc6SDave Kleikamp jh->b_next_transaction = NULL; 2060f7f4bccbSMingming Cao __jbd2_journal_file_buffer(jh, jh->b_transaction, 2061470decc6SDave Kleikamp was_dirty ? BJ_Metadata : BJ_Reserved); 2062470decc6SDave Kleikamp J_ASSERT_JH(jh, jh->b_transaction->t_state == T_RUNNING); 2063470decc6SDave Kleikamp 2064470decc6SDave Kleikamp if (was_dirty) 2065470decc6SDave Kleikamp set_buffer_jbddirty(bh); 2066470decc6SDave Kleikamp } 2067470decc6SDave Kleikamp 2068470decc6SDave Kleikamp /* 2069470decc6SDave Kleikamp * For the unlocked version of this call, also make sure that any 2070470decc6SDave Kleikamp * hanging journal_head is cleaned up if necessary. 2071470decc6SDave Kleikamp * 2072f7f4bccbSMingming Cao * __jbd2_journal_refile_buffer is usually called as part of a single locked 2073470decc6SDave Kleikamp * operation on a buffer_head, in which the caller is probably going to 2074470decc6SDave Kleikamp * be hooking the journal_head onto other lists. In that case it is up 2075470decc6SDave Kleikamp * to the caller to remove the journal_head if necessary. For the 2076f7f4bccbSMingming Cao * unlocked jbd2_journal_refile_buffer call, the caller isn't going to be 2077470decc6SDave Kleikamp * doing anything else to the buffer so we need to do the cleanup 2078470decc6SDave Kleikamp * ourselves to avoid a jh leak. 2079470decc6SDave Kleikamp * 2080470decc6SDave Kleikamp * *** The journal_head may be freed by this call! *** 2081470decc6SDave Kleikamp */ 2082f7f4bccbSMingming Cao void jbd2_journal_refile_buffer(journal_t *journal, struct journal_head *jh) 2083470decc6SDave Kleikamp { 2084470decc6SDave Kleikamp struct buffer_head *bh = jh2bh(jh); 2085470decc6SDave Kleikamp 2086470decc6SDave Kleikamp jbd_lock_bh_state(bh); 2087470decc6SDave Kleikamp spin_lock(&journal->j_list_lock); 2088470decc6SDave Kleikamp 2089f7f4bccbSMingming Cao __jbd2_journal_refile_buffer(jh); 2090470decc6SDave Kleikamp jbd_unlock_bh_state(bh); 2091f7f4bccbSMingming Cao jbd2_journal_remove_journal_head(bh); 2092470decc6SDave Kleikamp 2093470decc6SDave Kleikamp spin_unlock(&journal->j_list_lock); 2094470decc6SDave Kleikamp __brelse(bh); 2095470decc6SDave Kleikamp } 2096