xref: /openbmc/linux/fs/jbd2/transaction.c (revision fa0dadde)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * linux/fs/jbd2/transaction.c
4  *
5  * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
6  *
7  * Copyright 1998 Red Hat corp --- All Rights Reserved
8  *
9  * Generic filesystem transaction handling code; part of the ext2fs
10  * journaling system.
11  *
12  * This file manages transactions (compound commits managed by the
13  * journaling code) and handles (individual atomic operations by the
14  * filesystem).
15  */
16 
17 #include <linux/time.h>
18 #include <linux/fs.h>
19 #include <linux/jbd2.h>
20 #include <linux/errno.h>
21 #include <linux/slab.h>
22 #include <linux/timer.h>
23 #include <linux/mm.h>
24 #include <linux/highmem.h>
25 #include <linux/hrtimer.h>
26 #include <linux/backing-dev.h>
27 #include <linux/bug.h>
28 #include <linux/module.h>
29 #include <linux/sched/mm.h>
30 
31 #include <trace/events/jbd2.h>
32 
33 static void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh);
34 static void __jbd2_journal_unfile_buffer(struct journal_head *jh);
35 
36 static struct kmem_cache *transaction_cache;
37 int __init jbd2_journal_init_transaction_cache(void)
38 {
39 	J_ASSERT(!transaction_cache);
40 	transaction_cache = kmem_cache_create("jbd2_transaction_s",
41 					sizeof(transaction_t),
42 					0,
43 					SLAB_HWCACHE_ALIGN|SLAB_TEMPORARY,
44 					NULL);
45 	if (!transaction_cache) {
46 		pr_emerg("JBD2: failed to create transaction cache\n");
47 		return -ENOMEM;
48 	}
49 	return 0;
50 }
51 
52 void jbd2_journal_destroy_transaction_cache(void)
53 {
54 	kmem_cache_destroy(transaction_cache);
55 	transaction_cache = NULL;
56 }
57 
58 void jbd2_journal_free_transaction(transaction_t *transaction)
59 {
60 	if (unlikely(ZERO_OR_NULL_PTR(transaction)))
61 		return;
62 	kmem_cache_free(transaction_cache, transaction);
63 }
64 
65 /*
66  * Base amount of descriptor blocks we reserve for each transaction.
67  */
68 static int jbd2_descriptor_blocks_per_trans(journal_t *journal)
69 {
70 	int tag_space = journal->j_blocksize - sizeof(journal_header_t);
71 	int tags_per_block;
72 
73 	/* Subtract UUID */
74 	tag_space -= 16;
75 	if (jbd2_journal_has_csum_v2or3(journal))
76 		tag_space -= sizeof(struct jbd2_journal_block_tail);
77 	/* Commit code leaves a slack space of 16 bytes at the end of block */
78 	tags_per_block = (tag_space - 16) / journal_tag_bytes(journal);
79 	/*
80 	 * Revoke descriptors are accounted separately so we need to reserve
81 	 * space for commit block and normal transaction descriptor blocks.
82 	 */
83 	return 1 + DIV_ROUND_UP(journal->j_max_transaction_buffers,
84 				tags_per_block);
85 }
86 
87 /*
88  * jbd2_get_transaction: obtain a new transaction_t object.
89  *
90  * Simply initialise a new transaction. Initialize it in
91  * RUNNING state and add it to the current journal (which should not
92  * have an existing running transaction: we only make a new transaction
93  * once we have started to commit the old one).
94  *
95  * Preconditions:
96  *	The journal MUST be locked.  We don't perform atomic mallocs on the
97  *	new transaction	and we can't block without protecting against other
98  *	processes trying to touch the journal while it is in transition.
99  *
100  */
101 
102 static void jbd2_get_transaction(journal_t *journal,
103 				transaction_t *transaction)
104 {
105 	transaction->t_journal = journal;
106 	transaction->t_state = T_RUNNING;
107 	transaction->t_start_time = ktime_get();
108 	transaction->t_tid = journal->j_transaction_sequence++;
109 	transaction->t_expires = jiffies + journal->j_commit_interval;
110 	atomic_set(&transaction->t_updates, 0);
111 	atomic_set(&transaction->t_outstanding_credits,
112 		   jbd2_descriptor_blocks_per_trans(journal) +
113 		   atomic_read(&journal->j_reserved_credits));
114 	atomic_set(&transaction->t_outstanding_revokes, 0);
115 	atomic_set(&transaction->t_handle_count, 0);
116 	INIT_LIST_HEAD(&transaction->t_inode_list);
117 	INIT_LIST_HEAD(&transaction->t_private_list);
118 
119 	/* Set up the commit timer for the new transaction. */
120 	journal->j_commit_timer.expires = round_jiffies_up(transaction->t_expires);
121 	add_timer(&journal->j_commit_timer);
122 
123 	J_ASSERT(journal->j_running_transaction == NULL);
124 	journal->j_running_transaction = transaction;
125 	transaction->t_max_wait = 0;
126 	transaction->t_start = jiffies;
127 	transaction->t_requested = 0;
128 }
129 
130 /*
131  * Handle management.
132  *
133  * A handle_t is an object which represents a single atomic update to a
134  * filesystem, and which tracks all of the modifications which form part
135  * of that one update.
136  */
137 
138 /*
139  * Update transaction's maximum wait time, if debugging is enabled.
140  *
141  * t_max_wait is carefully updated here with use of atomic compare exchange.
142  * Note that there could be multiplre threads trying to do this simultaneously
143  * hence using cmpxchg to avoid any use of locks in this case.
144  * With this t_max_wait can be updated w/o enabling jbd2_journal_enable_debug.
145  */
146 static inline void update_t_max_wait(transaction_t *transaction,
147 				     unsigned long ts)
148 {
149 	unsigned long oldts, newts;
150 
151 	if (time_after(transaction->t_start, ts)) {
152 		newts = jbd2_time_diff(ts, transaction->t_start);
153 		oldts = READ_ONCE(transaction->t_max_wait);
154 		while (oldts < newts)
155 			oldts = cmpxchg(&transaction->t_max_wait, oldts, newts);
156 	}
157 }
158 
159 /*
160  * Wait until running transaction passes to T_FLUSH state and new transaction
161  * can thus be started. Also starts the commit if needed. The function expects
162  * running transaction to exist and releases j_state_lock.
163  */
164 static void wait_transaction_locked(journal_t *journal)
165 	__releases(journal->j_state_lock)
166 {
167 	DEFINE_WAIT(wait);
168 	int need_to_start;
169 	tid_t tid = journal->j_running_transaction->t_tid;
170 
171 	prepare_to_wait_exclusive(&journal->j_wait_transaction_locked, &wait,
172 			TASK_UNINTERRUPTIBLE);
173 	need_to_start = !tid_geq(journal->j_commit_request, tid);
174 	read_unlock(&journal->j_state_lock);
175 	if (need_to_start)
176 		jbd2_log_start_commit(journal, tid);
177 	jbd2_might_wait_for_commit(journal);
178 	schedule();
179 	finish_wait(&journal->j_wait_transaction_locked, &wait);
180 }
181 
182 /*
183  * Wait until running transaction transitions from T_SWITCH to T_FLUSH
184  * state and new transaction can thus be started. The function releases
185  * j_state_lock.
186  */
187 static void wait_transaction_switching(journal_t *journal)
188 	__releases(journal->j_state_lock)
189 {
190 	DEFINE_WAIT(wait);
191 
192 	if (WARN_ON(!journal->j_running_transaction ||
193 		    journal->j_running_transaction->t_state != T_SWITCH)) {
194 		read_unlock(&journal->j_state_lock);
195 		return;
196 	}
197 	prepare_to_wait_exclusive(&journal->j_wait_transaction_locked, &wait,
198 			TASK_UNINTERRUPTIBLE);
199 	read_unlock(&journal->j_state_lock);
200 	/*
201 	 * We don't call jbd2_might_wait_for_commit() here as there's no
202 	 * waiting for outstanding handles happening anymore in T_SWITCH state
203 	 * and handling of reserved handles actually relies on that for
204 	 * correctness.
205 	 */
206 	schedule();
207 	finish_wait(&journal->j_wait_transaction_locked, &wait);
208 }
209 
210 static void sub_reserved_credits(journal_t *journal, int blocks)
211 {
212 	atomic_sub(blocks, &journal->j_reserved_credits);
213 	wake_up(&journal->j_wait_reserved);
214 }
215 
216 /*
217  * Wait until we can add credits for handle to the running transaction.  Called
218  * with j_state_lock held for reading. Returns 0 if handle joined the running
219  * transaction. Returns 1 if we had to wait, j_state_lock is dropped, and
220  * caller must retry.
221  *
222  * Note: because j_state_lock may be dropped depending on the return
223  * value, we need to fake out sparse so ti doesn't complain about a
224  * locking imbalance.  Callers of add_transaction_credits will need to
225  * make a similar accomodation.
226  */
227 static int add_transaction_credits(journal_t *journal, int blocks,
228 				   int rsv_blocks)
229 __must_hold(&journal->j_state_lock)
230 {
231 	transaction_t *t = journal->j_running_transaction;
232 	int needed;
233 	int total = blocks + rsv_blocks;
234 
235 	/*
236 	 * If the current transaction is locked down for commit, wait
237 	 * for the lock to be released.
238 	 */
239 	if (t->t_state != T_RUNNING) {
240 		WARN_ON_ONCE(t->t_state >= T_FLUSH);
241 		wait_transaction_locked(journal);
242 		__acquire(&journal->j_state_lock); /* fake out sparse */
243 		return 1;
244 	}
245 
246 	/*
247 	 * If there is not enough space left in the log to write all
248 	 * potential buffers requested by this operation, we need to
249 	 * stall pending a log checkpoint to free some more log space.
250 	 */
251 	needed = atomic_add_return(total, &t->t_outstanding_credits);
252 	if (needed > journal->j_max_transaction_buffers) {
253 		/*
254 		 * If the current transaction is already too large,
255 		 * then start to commit it: we can then go back and
256 		 * attach this handle to a new transaction.
257 		 */
258 		atomic_sub(total, &t->t_outstanding_credits);
259 
260 		/*
261 		 * Is the number of reserved credits in the current transaction too
262 		 * big to fit this handle? Wait until reserved credits are freed.
263 		 */
264 		if (atomic_read(&journal->j_reserved_credits) + total >
265 		    journal->j_max_transaction_buffers) {
266 			read_unlock(&journal->j_state_lock);
267 			jbd2_might_wait_for_commit(journal);
268 			wait_event(journal->j_wait_reserved,
269 				   atomic_read(&journal->j_reserved_credits) + total <=
270 				   journal->j_max_transaction_buffers);
271 			__acquire(&journal->j_state_lock); /* fake out sparse */
272 			return 1;
273 		}
274 
275 		wait_transaction_locked(journal);
276 		__acquire(&journal->j_state_lock); /* fake out sparse */
277 		return 1;
278 	}
279 
280 	/*
281 	 * The commit code assumes that it can get enough log space
282 	 * without forcing a checkpoint.  This is *critical* for
283 	 * correctness: a checkpoint of a buffer which is also
284 	 * associated with a committing transaction creates a deadlock,
285 	 * so commit simply cannot force through checkpoints.
286 	 *
287 	 * We must therefore ensure the necessary space in the journal
288 	 * *before* starting to dirty potentially checkpointed buffers
289 	 * in the new transaction.
290 	 */
291 	if (jbd2_log_space_left(journal) < journal->j_max_transaction_buffers) {
292 		atomic_sub(total, &t->t_outstanding_credits);
293 		read_unlock(&journal->j_state_lock);
294 		jbd2_might_wait_for_commit(journal);
295 		write_lock(&journal->j_state_lock);
296 		if (jbd2_log_space_left(journal) <
297 					journal->j_max_transaction_buffers)
298 			__jbd2_log_wait_for_space(journal);
299 		write_unlock(&journal->j_state_lock);
300 		__acquire(&journal->j_state_lock); /* fake out sparse */
301 		return 1;
302 	}
303 
304 	/* No reservation? We are done... */
305 	if (!rsv_blocks)
306 		return 0;
307 
308 	needed = atomic_add_return(rsv_blocks, &journal->j_reserved_credits);
309 	/* We allow at most half of a transaction to be reserved */
310 	if (needed > journal->j_max_transaction_buffers / 2) {
311 		sub_reserved_credits(journal, rsv_blocks);
312 		atomic_sub(total, &t->t_outstanding_credits);
313 		read_unlock(&journal->j_state_lock);
314 		jbd2_might_wait_for_commit(journal);
315 		wait_event(journal->j_wait_reserved,
316 			 atomic_read(&journal->j_reserved_credits) + rsv_blocks
317 			 <= journal->j_max_transaction_buffers / 2);
318 		__acquire(&journal->j_state_lock); /* fake out sparse */
319 		return 1;
320 	}
321 	return 0;
322 }
323 
324 /*
325  * start_this_handle: Given a handle, deal with any locking or stalling
326  * needed to make sure that there is enough journal space for the handle
327  * to begin.  Attach the handle to a transaction and set up the
328  * transaction's buffer credits.
329  */
330 
331 static int start_this_handle(journal_t *journal, handle_t *handle,
332 			     gfp_t gfp_mask)
333 {
334 	transaction_t	*transaction, *new_transaction = NULL;
335 	int		blocks = handle->h_total_credits;
336 	int		rsv_blocks = 0;
337 	unsigned long ts = jiffies;
338 
339 	if (handle->h_rsv_handle)
340 		rsv_blocks = handle->h_rsv_handle->h_total_credits;
341 
342 	/*
343 	 * Limit the number of reserved credits to 1/2 of maximum transaction
344 	 * size and limit the number of total credits to not exceed maximum
345 	 * transaction size per operation.
346 	 */
347 	if ((rsv_blocks > journal->j_max_transaction_buffers / 2) ||
348 	    (rsv_blocks + blocks > journal->j_max_transaction_buffers)) {
349 		printk(KERN_ERR "JBD2: %s wants too many credits "
350 		       "credits:%d rsv_credits:%d max:%d\n",
351 		       current->comm, blocks, rsv_blocks,
352 		       journal->j_max_transaction_buffers);
353 		WARN_ON(1);
354 		return -ENOSPC;
355 	}
356 
357 alloc_transaction:
358 	/*
359 	 * This check is racy but it is just an optimization of allocating new
360 	 * transaction early if there are high chances we'll need it. If we
361 	 * guess wrong, we'll retry or free unused transaction.
362 	 */
363 	if (!data_race(journal->j_running_transaction)) {
364 		/*
365 		 * If __GFP_FS is not present, then we may be being called from
366 		 * inside the fs writeback layer, so we MUST NOT fail.
367 		 */
368 		if ((gfp_mask & __GFP_FS) == 0)
369 			gfp_mask |= __GFP_NOFAIL;
370 		new_transaction = kmem_cache_zalloc(transaction_cache,
371 						    gfp_mask);
372 		if (!new_transaction)
373 			return -ENOMEM;
374 	}
375 
376 	jbd2_debug(3, "New handle %p going live.\n", handle);
377 
378 	/*
379 	 * We need to hold j_state_lock until t_updates has been incremented,
380 	 * for proper journal barrier handling
381 	 */
382 repeat:
383 	read_lock(&journal->j_state_lock);
384 	BUG_ON(journal->j_flags & JBD2_UNMOUNT);
385 	if (is_journal_aborted(journal) ||
386 	    (journal->j_errno != 0 && !(journal->j_flags & JBD2_ACK_ERR))) {
387 		read_unlock(&journal->j_state_lock);
388 		jbd2_journal_free_transaction(new_transaction);
389 		return -EROFS;
390 	}
391 
392 	/*
393 	 * Wait on the journal's transaction barrier if necessary. Specifically
394 	 * we allow reserved handles to proceed because otherwise commit could
395 	 * deadlock on page writeback not being able to complete.
396 	 */
397 	if (!handle->h_reserved && journal->j_barrier_count) {
398 		read_unlock(&journal->j_state_lock);
399 		wait_event(journal->j_wait_transaction_locked,
400 				journal->j_barrier_count == 0);
401 		goto repeat;
402 	}
403 
404 	if (!journal->j_running_transaction) {
405 		read_unlock(&journal->j_state_lock);
406 		if (!new_transaction)
407 			goto alloc_transaction;
408 		write_lock(&journal->j_state_lock);
409 		if (!journal->j_running_transaction &&
410 		    (handle->h_reserved || !journal->j_barrier_count)) {
411 			jbd2_get_transaction(journal, new_transaction);
412 			new_transaction = NULL;
413 		}
414 		write_unlock(&journal->j_state_lock);
415 		goto repeat;
416 	}
417 
418 	transaction = journal->j_running_transaction;
419 
420 	if (!handle->h_reserved) {
421 		/* We may have dropped j_state_lock - restart in that case */
422 		if (add_transaction_credits(journal, blocks, rsv_blocks)) {
423 			/*
424 			 * add_transaction_credits releases
425 			 * j_state_lock on a non-zero return
426 			 */
427 			__release(&journal->j_state_lock);
428 			goto repeat;
429 		}
430 	} else {
431 		/*
432 		 * We have handle reserved so we are allowed to join T_LOCKED
433 		 * transaction and we don't have to check for transaction size
434 		 * and journal space. But we still have to wait while running
435 		 * transaction is being switched to a committing one as it
436 		 * won't wait for any handles anymore.
437 		 */
438 		if (transaction->t_state == T_SWITCH) {
439 			wait_transaction_switching(journal);
440 			goto repeat;
441 		}
442 		sub_reserved_credits(journal, blocks);
443 		handle->h_reserved = 0;
444 	}
445 
446 	/* OK, account for the buffers that this operation expects to
447 	 * use and add the handle to the running transaction.
448 	 */
449 	update_t_max_wait(transaction, ts);
450 	handle->h_transaction = transaction;
451 	handle->h_requested_credits = blocks;
452 	handle->h_revoke_credits_requested = handle->h_revoke_credits;
453 	handle->h_start_jiffies = jiffies;
454 	atomic_inc(&transaction->t_updates);
455 	atomic_inc(&transaction->t_handle_count);
456 	jbd2_debug(4, "Handle %p given %d credits (total %d, free %lu)\n",
457 		  handle, blocks,
458 		  atomic_read(&transaction->t_outstanding_credits),
459 		  jbd2_log_space_left(journal));
460 	read_unlock(&journal->j_state_lock);
461 	current->journal_info = handle;
462 
463 	rwsem_acquire_read(&journal->j_trans_commit_map, 0, 0, _THIS_IP_);
464 	jbd2_journal_free_transaction(new_transaction);
465 	/*
466 	 * Ensure that no allocations done while the transaction is open are
467 	 * going to recurse back to the fs layer.
468 	 */
469 	handle->saved_alloc_context = memalloc_nofs_save();
470 	return 0;
471 }
472 
473 /* Allocate a new handle.  This should probably be in a slab... */
474 static handle_t *new_handle(int nblocks)
475 {
476 	handle_t *handle = jbd2_alloc_handle(GFP_NOFS);
477 	if (!handle)
478 		return NULL;
479 	handle->h_total_credits = nblocks;
480 	handle->h_ref = 1;
481 
482 	return handle;
483 }
484 
485 handle_t *jbd2__journal_start(journal_t *journal, int nblocks, int rsv_blocks,
486 			      int revoke_records, gfp_t gfp_mask,
487 			      unsigned int type, unsigned int line_no)
488 {
489 	handle_t *handle = journal_current_handle();
490 	int err;
491 
492 	if (!journal)
493 		return ERR_PTR(-EROFS);
494 
495 	if (handle) {
496 		J_ASSERT(handle->h_transaction->t_journal == journal);
497 		handle->h_ref++;
498 		return handle;
499 	}
500 
501 	nblocks += DIV_ROUND_UP(revoke_records,
502 				journal->j_revoke_records_per_block);
503 	handle = new_handle(nblocks);
504 	if (!handle)
505 		return ERR_PTR(-ENOMEM);
506 	if (rsv_blocks) {
507 		handle_t *rsv_handle;
508 
509 		rsv_handle = new_handle(rsv_blocks);
510 		if (!rsv_handle) {
511 			jbd2_free_handle(handle);
512 			return ERR_PTR(-ENOMEM);
513 		}
514 		rsv_handle->h_reserved = 1;
515 		rsv_handle->h_journal = journal;
516 		handle->h_rsv_handle = rsv_handle;
517 	}
518 	handle->h_revoke_credits = revoke_records;
519 
520 	err = start_this_handle(journal, handle, gfp_mask);
521 	if (err < 0) {
522 		if (handle->h_rsv_handle)
523 			jbd2_free_handle(handle->h_rsv_handle);
524 		jbd2_free_handle(handle);
525 		return ERR_PTR(err);
526 	}
527 	handle->h_type = type;
528 	handle->h_line_no = line_no;
529 	trace_jbd2_handle_start(journal->j_fs_dev->bd_dev,
530 				handle->h_transaction->t_tid, type,
531 				line_no, nblocks);
532 
533 	return handle;
534 }
535 EXPORT_SYMBOL(jbd2__journal_start);
536 
537 
538 /**
539  * jbd2_journal_start() - Obtain a new handle.
540  * @journal: Journal to start transaction on.
541  * @nblocks: number of block buffer we might modify
542  *
543  * We make sure that the transaction can guarantee at least nblocks of
544  * modified buffers in the log.  We block until the log can guarantee
545  * that much space. Additionally, if rsv_blocks > 0, we also create another
546  * handle with rsv_blocks reserved blocks in the journal. This handle is
547  * stored in h_rsv_handle. It is not attached to any particular transaction
548  * and thus doesn't block transaction commit. If the caller uses this reserved
549  * handle, it has to set h_rsv_handle to NULL as otherwise jbd2_journal_stop()
550  * on the parent handle will dispose the reserved one. Reserved handle has to
551  * be converted to a normal handle using jbd2_journal_start_reserved() before
552  * it can be used.
553  *
554  * Return a pointer to a newly allocated handle, or an ERR_PTR() value
555  * on failure.
556  */
557 handle_t *jbd2_journal_start(journal_t *journal, int nblocks)
558 {
559 	return jbd2__journal_start(journal, nblocks, 0, 0, GFP_NOFS, 0, 0);
560 }
561 EXPORT_SYMBOL(jbd2_journal_start);
562 
563 static void __jbd2_journal_unreserve_handle(handle_t *handle, transaction_t *t)
564 {
565 	journal_t *journal = handle->h_journal;
566 
567 	WARN_ON(!handle->h_reserved);
568 	sub_reserved_credits(journal, handle->h_total_credits);
569 	if (t)
570 		atomic_sub(handle->h_total_credits, &t->t_outstanding_credits);
571 }
572 
573 void jbd2_journal_free_reserved(handle_t *handle)
574 {
575 	journal_t *journal = handle->h_journal;
576 
577 	/* Get j_state_lock to pin running transaction if it exists */
578 	read_lock(&journal->j_state_lock);
579 	__jbd2_journal_unreserve_handle(handle, journal->j_running_transaction);
580 	read_unlock(&journal->j_state_lock);
581 	jbd2_free_handle(handle);
582 }
583 EXPORT_SYMBOL(jbd2_journal_free_reserved);
584 
585 /**
586  * jbd2_journal_start_reserved() - start reserved handle
587  * @handle: handle to start
588  * @type: for handle statistics
589  * @line_no: for handle statistics
590  *
591  * Start handle that has been previously reserved with jbd2_journal_reserve().
592  * This attaches @handle to the running transaction (or creates one if there's
593  * not transaction running). Unlike jbd2_journal_start() this function cannot
594  * block on journal commit, checkpointing, or similar stuff. It can block on
595  * memory allocation or frozen journal though.
596  *
597  * Return 0 on success, non-zero on error - handle is freed in that case.
598  */
599 int jbd2_journal_start_reserved(handle_t *handle, unsigned int type,
600 				unsigned int line_no)
601 {
602 	journal_t *journal = handle->h_journal;
603 	int ret = -EIO;
604 
605 	if (WARN_ON(!handle->h_reserved)) {
606 		/* Someone passed in normal handle? Just stop it. */
607 		jbd2_journal_stop(handle);
608 		return ret;
609 	}
610 	/*
611 	 * Usefulness of mixing of reserved and unreserved handles is
612 	 * questionable. So far nobody seems to need it so just error out.
613 	 */
614 	if (WARN_ON(current->journal_info)) {
615 		jbd2_journal_free_reserved(handle);
616 		return ret;
617 	}
618 
619 	handle->h_journal = NULL;
620 	/*
621 	 * GFP_NOFS is here because callers are likely from writeback or
622 	 * similarly constrained call sites
623 	 */
624 	ret = start_this_handle(journal, handle, GFP_NOFS);
625 	if (ret < 0) {
626 		handle->h_journal = journal;
627 		jbd2_journal_free_reserved(handle);
628 		return ret;
629 	}
630 	handle->h_type = type;
631 	handle->h_line_no = line_no;
632 	trace_jbd2_handle_start(journal->j_fs_dev->bd_dev,
633 				handle->h_transaction->t_tid, type,
634 				line_no, handle->h_total_credits);
635 	return 0;
636 }
637 EXPORT_SYMBOL(jbd2_journal_start_reserved);
638 
639 /**
640  * jbd2_journal_extend() - extend buffer credits.
641  * @handle:  handle to 'extend'
642  * @nblocks: nr blocks to try to extend by.
643  * @revoke_records: number of revoke records to try to extend by.
644  *
645  * Some transactions, such as large extends and truncates, can be done
646  * atomically all at once or in several stages.  The operation requests
647  * a credit for a number of buffer modifications in advance, but can
648  * extend its credit if it needs more.
649  *
650  * jbd2_journal_extend tries to give the running handle more buffer credits.
651  * It does not guarantee that allocation - this is a best-effort only.
652  * The calling process MUST be able to deal cleanly with a failure to
653  * extend here.
654  *
655  * Return 0 on success, non-zero on failure.
656  *
657  * return code < 0 implies an error
658  * return code > 0 implies normal transaction-full status.
659  */
660 int jbd2_journal_extend(handle_t *handle, int nblocks, int revoke_records)
661 {
662 	transaction_t *transaction = handle->h_transaction;
663 	journal_t *journal;
664 	int result;
665 	int wanted;
666 
667 	if (is_handle_aborted(handle))
668 		return -EROFS;
669 	journal = transaction->t_journal;
670 
671 	result = 1;
672 
673 	read_lock(&journal->j_state_lock);
674 
675 	/* Don't extend a locked-down transaction! */
676 	if (transaction->t_state != T_RUNNING) {
677 		jbd2_debug(3, "denied handle %p %d blocks: "
678 			  "transaction not running\n", handle, nblocks);
679 		goto error_out;
680 	}
681 
682 	nblocks += DIV_ROUND_UP(
683 			handle->h_revoke_credits_requested + revoke_records,
684 			journal->j_revoke_records_per_block) -
685 		DIV_ROUND_UP(
686 			handle->h_revoke_credits_requested,
687 			journal->j_revoke_records_per_block);
688 	wanted = atomic_add_return(nblocks,
689 				   &transaction->t_outstanding_credits);
690 
691 	if (wanted > journal->j_max_transaction_buffers) {
692 		jbd2_debug(3, "denied handle %p %d blocks: "
693 			  "transaction too large\n", handle, nblocks);
694 		atomic_sub(nblocks, &transaction->t_outstanding_credits);
695 		goto error_out;
696 	}
697 
698 	trace_jbd2_handle_extend(journal->j_fs_dev->bd_dev,
699 				 transaction->t_tid,
700 				 handle->h_type, handle->h_line_no,
701 				 handle->h_total_credits,
702 				 nblocks);
703 
704 	handle->h_total_credits += nblocks;
705 	handle->h_requested_credits += nblocks;
706 	handle->h_revoke_credits += revoke_records;
707 	handle->h_revoke_credits_requested += revoke_records;
708 	result = 0;
709 
710 	jbd2_debug(3, "extended handle %p by %d\n", handle, nblocks);
711 error_out:
712 	read_unlock(&journal->j_state_lock);
713 	return result;
714 }
715 
716 static void stop_this_handle(handle_t *handle)
717 {
718 	transaction_t *transaction = handle->h_transaction;
719 	journal_t *journal = transaction->t_journal;
720 	int revokes;
721 
722 	J_ASSERT(journal_current_handle() == handle);
723 	J_ASSERT(atomic_read(&transaction->t_updates) > 0);
724 	current->journal_info = NULL;
725 	/*
726 	 * Subtract necessary revoke descriptor blocks from handle credits. We
727 	 * take care to account only for revoke descriptor blocks the
728 	 * transaction will really need as large sequences of transactions with
729 	 * small numbers of revokes are relatively common.
730 	 */
731 	revokes = handle->h_revoke_credits_requested - handle->h_revoke_credits;
732 	if (revokes) {
733 		int t_revokes, revoke_descriptors;
734 		int rr_per_blk = journal->j_revoke_records_per_block;
735 
736 		WARN_ON_ONCE(DIV_ROUND_UP(revokes, rr_per_blk)
737 				> handle->h_total_credits);
738 		t_revokes = atomic_add_return(revokes,
739 				&transaction->t_outstanding_revokes);
740 		revoke_descriptors =
741 			DIV_ROUND_UP(t_revokes, rr_per_blk) -
742 			DIV_ROUND_UP(t_revokes - revokes, rr_per_blk);
743 		handle->h_total_credits -= revoke_descriptors;
744 	}
745 	atomic_sub(handle->h_total_credits,
746 		   &transaction->t_outstanding_credits);
747 	if (handle->h_rsv_handle)
748 		__jbd2_journal_unreserve_handle(handle->h_rsv_handle,
749 						transaction);
750 	if (atomic_dec_and_test(&transaction->t_updates))
751 		wake_up(&journal->j_wait_updates);
752 
753 	rwsem_release(&journal->j_trans_commit_map, _THIS_IP_);
754 	/*
755 	 * Scope of the GFP_NOFS context is over here and so we can restore the
756 	 * original alloc context.
757 	 */
758 	memalloc_nofs_restore(handle->saved_alloc_context);
759 }
760 
761 /**
762  * jbd2__journal_restart() - restart a handle .
763  * @handle:  handle to restart
764  * @nblocks: nr credits requested
765  * @revoke_records: number of revoke record credits requested
766  * @gfp_mask: memory allocation flags (for start_this_handle)
767  *
768  * Restart a handle for a multi-transaction filesystem
769  * operation.
770  *
771  * If the jbd2_journal_extend() call above fails to grant new buffer credits
772  * to a running handle, a call to jbd2_journal_restart will commit the
773  * handle's transaction so far and reattach the handle to a new
774  * transaction capable of guaranteeing the requested number of
775  * credits. We preserve reserved handle if there's any attached to the
776  * passed in handle.
777  */
778 int jbd2__journal_restart(handle_t *handle, int nblocks, int revoke_records,
779 			  gfp_t gfp_mask)
780 {
781 	transaction_t *transaction = handle->h_transaction;
782 	journal_t *journal;
783 	tid_t		tid;
784 	int		need_to_start;
785 	int		ret;
786 
787 	/* If we've had an abort of any type, don't even think about
788 	 * actually doing the restart! */
789 	if (is_handle_aborted(handle))
790 		return 0;
791 	journal = transaction->t_journal;
792 	tid = transaction->t_tid;
793 
794 	/*
795 	 * First unlink the handle from its current transaction, and start the
796 	 * commit on that.
797 	 */
798 	jbd2_debug(2, "restarting handle %p\n", handle);
799 	stop_this_handle(handle);
800 	handle->h_transaction = NULL;
801 
802 	/*
803 	 * TODO: If we use READ_ONCE / WRITE_ONCE for j_commit_request we can
804  	 * get rid of pointless j_state_lock traffic like this.
805 	 */
806 	read_lock(&journal->j_state_lock);
807 	need_to_start = !tid_geq(journal->j_commit_request, tid);
808 	read_unlock(&journal->j_state_lock);
809 	if (need_to_start)
810 		jbd2_log_start_commit(journal, tid);
811 	handle->h_total_credits = nblocks +
812 		DIV_ROUND_UP(revoke_records,
813 			     journal->j_revoke_records_per_block);
814 	handle->h_revoke_credits = revoke_records;
815 	ret = start_this_handle(journal, handle, gfp_mask);
816 	trace_jbd2_handle_restart(journal->j_fs_dev->bd_dev,
817 				 ret ? 0 : handle->h_transaction->t_tid,
818 				 handle->h_type, handle->h_line_no,
819 				 handle->h_total_credits);
820 	return ret;
821 }
822 EXPORT_SYMBOL(jbd2__journal_restart);
823 
824 
825 int jbd2_journal_restart(handle_t *handle, int nblocks)
826 {
827 	return jbd2__journal_restart(handle, nblocks, 0, GFP_NOFS);
828 }
829 EXPORT_SYMBOL(jbd2_journal_restart);
830 
831 /*
832  * Waits for any outstanding t_updates to finish.
833  * This is called with write j_state_lock held.
834  */
835 void jbd2_journal_wait_updates(journal_t *journal)
836 {
837 	DEFINE_WAIT(wait);
838 
839 	while (1) {
840 		/*
841 		 * Note that the running transaction can get freed under us if
842 		 * this transaction is getting committed in
843 		 * jbd2_journal_commit_transaction() ->
844 		 * jbd2_journal_free_transaction(). This can only happen when we
845 		 * release j_state_lock -> schedule() -> acquire j_state_lock.
846 		 * Hence we should everytime retrieve new j_running_transaction
847 		 * value (after j_state_lock release acquire cycle), else it may
848 		 * lead to use-after-free of old freed transaction.
849 		 */
850 		transaction_t *transaction = journal->j_running_transaction;
851 
852 		if (!transaction)
853 			break;
854 
855 		prepare_to_wait(&journal->j_wait_updates, &wait,
856 				TASK_UNINTERRUPTIBLE);
857 		if (!atomic_read(&transaction->t_updates)) {
858 			finish_wait(&journal->j_wait_updates, &wait);
859 			break;
860 		}
861 		write_unlock(&journal->j_state_lock);
862 		schedule();
863 		finish_wait(&journal->j_wait_updates, &wait);
864 		write_lock(&journal->j_state_lock);
865 	}
866 }
867 
868 /**
869  * jbd2_journal_lock_updates () - establish a transaction barrier.
870  * @journal:  Journal to establish a barrier on.
871  *
872  * This locks out any further updates from being started, and blocks
873  * until all existing updates have completed, returning only once the
874  * journal is in a quiescent state with no updates running.
875  *
876  * The journal lock should not be held on entry.
877  */
878 void jbd2_journal_lock_updates(journal_t *journal)
879 {
880 	jbd2_might_wait_for_commit(journal);
881 
882 	write_lock(&journal->j_state_lock);
883 	++journal->j_barrier_count;
884 
885 	/* Wait until there are no reserved handles */
886 	if (atomic_read(&journal->j_reserved_credits)) {
887 		write_unlock(&journal->j_state_lock);
888 		wait_event(journal->j_wait_reserved,
889 			   atomic_read(&journal->j_reserved_credits) == 0);
890 		write_lock(&journal->j_state_lock);
891 	}
892 
893 	/* Wait until there are no running t_updates */
894 	jbd2_journal_wait_updates(journal);
895 
896 	write_unlock(&journal->j_state_lock);
897 
898 	/*
899 	 * We have now established a barrier against other normal updates, but
900 	 * we also need to barrier against other jbd2_journal_lock_updates() calls
901 	 * to make sure that we serialise special journal-locked operations
902 	 * too.
903 	 */
904 	mutex_lock(&journal->j_barrier);
905 }
906 
907 /**
908  * jbd2_journal_unlock_updates () - release barrier
909  * @journal:  Journal to release the barrier on.
910  *
911  * Release a transaction barrier obtained with jbd2_journal_lock_updates().
912  *
913  * Should be called without the journal lock held.
914  */
915 void jbd2_journal_unlock_updates (journal_t *journal)
916 {
917 	J_ASSERT(journal->j_barrier_count != 0);
918 
919 	mutex_unlock(&journal->j_barrier);
920 	write_lock(&journal->j_state_lock);
921 	--journal->j_barrier_count;
922 	write_unlock(&journal->j_state_lock);
923 	wake_up_all(&journal->j_wait_transaction_locked);
924 }
925 
926 static void warn_dirty_buffer(struct buffer_head *bh)
927 {
928 	printk(KERN_WARNING
929 	       "JBD2: Spotted dirty metadata buffer (dev = %pg, blocknr = %llu). "
930 	       "There's a risk of filesystem corruption in case of system "
931 	       "crash.\n",
932 	       bh->b_bdev, (unsigned long long)bh->b_blocknr);
933 }
934 
935 /* Call t_frozen trigger and copy buffer data into jh->b_frozen_data. */
936 static void jbd2_freeze_jh_data(struct journal_head *jh)
937 {
938 	struct page *page;
939 	int offset;
940 	char *source;
941 	struct buffer_head *bh = jh2bh(jh);
942 
943 	J_EXPECT_JH(jh, buffer_uptodate(bh), "Possible IO failure.\n");
944 	page = bh->b_page;
945 	offset = offset_in_page(bh->b_data);
946 	source = kmap_atomic(page);
947 	/* Fire data frozen trigger just before we copy the data */
948 	jbd2_buffer_frozen_trigger(jh, source + offset, jh->b_triggers);
949 	memcpy(jh->b_frozen_data, source + offset, bh->b_size);
950 	kunmap_atomic(source);
951 
952 	/*
953 	 * Now that the frozen data is saved off, we need to store any matching
954 	 * triggers.
955 	 */
956 	jh->b_frozen_triggers = jh->b_triggers;
957 }
958 
959 /*
960  * If the buffer is already part of the current transaction, then there
961  * is nothing we need to do.  If it is already part of a prior
962  * transaction which we are still committing to disk, then we need to
963  * make sure that we do not overwrite the old copy: we do copy-out to
964  * preserve the copy going to disk.  We also account the buffer against
965  * the handle's metadata buffer credits (unless the buffer is already
966  * part of the transaction, that is).
967  *
968  */
969 static int
970 do_get_write_access(handle_t *handle, struct journal_head *jh,
971 			int force_copy)
972 {
973 	struct buffer_head *bh;
974 	transaction_t *transaction = handle->h_transaction;
975 	journal_t *journal;
976 	int error;
977 	char *frozen_buffer = NULL;
978 	unsigned long start_lock, time_lock;
979 
980 	journal = transaction->t_journal;
981 
982 	jbd2_debug(5, "journal_head %p, force_copy %d\n", jh, force_copy);
983 
984 	JBUFFER_TRACE(jh, "entry");
985 repeat:
986 	bh = jh2bh(jh);
987 
988 	/* @@@ Need to check for errors here at some point. */
989 
990  	start_lock = jiffies;
991 	lock_buffer(bh);
992 	spin_lock(&jh->b_state_lock);
993 
994 	/* If it takes too long to lock the buffer, trace it */
995 	time_lock = jbd2_time_diff(start_lock, jiffies);
996 	if (time_lock > HZ/10)
997 		trace_jbd2_lock_buffer_stall(bh->b_bdev->bd_dev,
998 			jiffies_to_msecs(time_lock));
999 
1000 	/* We now hold the buffer lock so it is safe to query the buffer
1001 	 * state.  Is the buffer dirty?
1002 	 *
1003 	 * If so, there are two possibilities.  The buffer may be
1004 	 * non-journaled, and undergoing a quite legitimate writeback.
1005 	 * Otherwise, it is journaled, and we don't expect dirty buffers
1006 	 * in that state (the buffers should be marked JBD_Dirty
1007 	 * instead.)  So either the IO is being done under our own
1008 	 * control and this is a bug, or it's a third party IO such as
1009 	 * dump(8) (which may leave the buffer scheduled for read ---
1010 	 * ie. locked but not dirty) or tune2fs (which may actually have
1011 	 * the buffer dirtied, ugh.)  */
1012 
1013 	if (buffer_dirty(bh) && jh->b_transaction) {
1014 		warn_dirty_buffer(bh);
1015 		/*
1016 		 * We need to clean the dirty flag and we must do it under the
1017 		 * buffer lock to be sure we don't race with running write-out.
1018 		 */
1019 		JBUFFER_TRACE(jh, "Journalling dirty buffer");
1020 		clear_buffer_dirty(bh);
1021 		/*
1022 		 * The buffer is going to be added to BJ_Reserved list now and
1023 		 * nothing guarantees jbd2_journal_dirty_metadata() will be
1024 		 * ever called for it. So we need to set jbddirty bit here to
1025 		 * make sure the buffer is dirtied and written out when the
1026 		 * journaling machinery is done with it.
1027 		 */
1028 		set_buffer_jbddirty(bh);
1029 	}
1030 
1031 	error = -EROFS;
1032 	if (is_handle_aborted(handle)) {
1033 		spin_unlock(&jh->b_state_lock);
1034 		unlock_buffer(bh);
1035 		goto out;
1036 	}
1037 	error = 0;
1038 
1039 	/*
1040 	 * The buffer is already part of this transaction if b_transaction or
1041 	 * b_next_transaction points to it
1042 	 */
1043 	if (jh->b_transaction == transaction ||
1044 	    jh->b_next_transaction == transaction) {
1045 		unlock_buffer(bh);
1046 		goto done;
1047 	}
1048 
1049 	/*
1050 	 * this is the first time this transaction is touching this buffer,
1051 	 * reset the modified flag
1052 	 */
1053 	jh->b_modified = 0;
1054 
1055 	/*
1056 	 * If the buffer is not journaled right now, we need to make sure it
1057 	 * doesn't get written to disk before the caller actually commits the
1058 	 * new data
1059 	 */
1060 	if (!jh->b_transaction) {
1061 		JBUFFER_TRACE(jh, "no transaction");
1062 		J_ASSERT_JH(jh, !jh->b_next_transaction);
1063 		JBUFFER_TRACE(jh, "file as BJ_Reserved");
1064 		/*
1065 		 * Make sure all stores to jh (b_modified, b_frozen_data) are
1066 		 * visible before attaching it to the running transaction.
1067 		 * Paired with barrier in jbd2_write_access_granted()
1068 		 */
1069 		smp_wmb();
1070 		spin_lock(&journal->j_list_lock);
1071 		if (test_clear_buffer_dirty(bh)) {
1072 			/*
1073 			 * Execute buffer dirty clearing and jh->b_transaction
1074 			 * assignment under journal->j_list_lock locked to
1075 			 * prevent bh being removed from checkpoint list if
1076 			 * the buffer is in an intermediate state (not dirty
1077 			 * and jh->b_transaction is NULL).
1078 			 */
1079 			JBUFFER_TRACE(jh, "Journalling dirty buffer");
1080 			set_buffer_jbddirty(bh);
1081 		}
1082 		__jbd2_journal_file_buffer(jh, transaction, BJ_Reserved);
1083 		spin_unlock(&journal->j_list_lock);
1084 		unlock_buffer(bh);
1085 		goto done;
1086 	}
1087 	unlock_buffer(bh);
1088 
1089 	/*
1090 	 * If there is already a copy-out version of this buffer, then we don't
1091 	 * need to make another one
1092 	 */
1093 	if (jh->b_frozen_data) {
1094 		JBUFFER_TRACE(jh, "has frozen data");
1095 		J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
1096 		goto attach_next;
1097 	}
1098 
1099 	JBUFFER_TRACE(jh, "owned by older transaction");
1100 	J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
1101 	J_ASSERT_JH(jh, jh->b_transaction == journal->j_committing_transaction);
1102 
1103 	/*
1104 	 * There is one case we have to be very careful about.  If the
1105 	 * committing transaction is currently writing this buffer out to disk
1106 	 * and has NOT made a copy-out, then we cannot modify the buffer
1107 	 * contents at all right now.  The essence of copy-out is that it is
1108 	 * the extra copy, not the primary copy, which gets journaled.  If the
1109 	 * primary copy is already going to disk then we cannot do copy-out
1110 	 * here.
1111 	 */
1112 	if (buffer_shadow(bh)) {
1113 		JBUFFER_TRACE(jh, "on shadow: sleep");
1114 		spin_unlock(&jh->b_state_lock);
1115 		wait_on_bit_io(&bh->b_state, BH_Shadow, TASK_UNINTERRUPTIBLE);
1116 		goto repeat;
1117 	}
1118 
1119 	/*
1120 	 * Only do the copy if the currently-owning transaction still needs it.
1121 	 * If buffer isn't on BJ_Metadata list, the committing transaction is
1122 	 * past that stage (here we use the fact that BH_Shadow is set under
1123 	 * bh_state lock together with refiling to BJ_Shadow list and at this
1124 	 * point we know the buffer doesn't have BH_Shadow set).
1125 	 *
1126 	 * Subtle point, though: if this is a get_undo_access, then we will be
1127 	 * relying on the frozen_data to contain the new value of the
1128 	 * committed_data record after the transaction, so we HAVE to force the
1129 	 * frozen_data copy in that case.
1130 	 */
1131 	if (jh->b_jlist == BJ_Metadata || force_copy) {
1132 		JBUFFER_TRACE(jh, "generate frozen data");
1133 		if (!frozen_buffer) {
1134 			JBUFFER_TRACE(jh, "allocate memory for buffer");
1135 			spin_unlock(&jh->b_state_lock);
1136 			frozen_buffer = jbd2_alloc(jh2bh(jh)->b_size,
1137 						   GFP_NOFS | __GFP_NOFAIL);
1138 			goto repeat;
1139 		}
1140 		jh->b_frozen_data = frozen_buffer;
1141 		frozen_buffer = NULL;
1142 		jbd2_freeze_jh_data(jh);
1143 	}
1144 attach_next:
1145 	/*
1146 	 * Make sure all stores to jh (b_modified, b_frozen_data) are visible
1147 	 * before attaching it to the running transaction. Paired with barrier
1148 	 * in jbd2_write_access_granted()
1149 	 */
1150 	smp_wmb();
1151 	jh->b_next_transaction = transaction;
1152 
1153 done:
1154 	spin_unlock(&jh->b_state_lock);
1155 
1156 	/*
1157 	 * If we are about to journal a buffer, then any revoke pending on it is
1158 	 * no longer valid
1159 	 */
1160 	jbd2_journal_cancel_revoke(handle, jh);
1161 
1162 out:
1163 	if (unlikely(frozen_buffer))	/* It's usually NULL */
1164 		jbd2_free(frozen_buffer, bh->b_size);
1165 
1166 	JBUFFER_TRACE(jh, "exit");
1167 	return error;
1168 }
1169 
1170 /* Fast check whether buffer is already attached to the required transaction */
1171 static bool jbd2_write_access_granted(handle_t *handle, struct buffer_head *bh,
1172 							bool undo)
1173 {
1174 	struct journal_head *jh;
1175 	bool ret = false;
1176 
1177 	/* Dirty buffers require special handling... */
1178 	if (buffer_dirty(bh))
1179 		return false;
1180 
1181 	/*
1182 	 * RCU protects us from dereferencing freed pages. So the checks we do
1183 	 * are guaranteed not to oops. However the jh slab object can get freed
1184 	 * & reallocated while we work with it. So we have to be careful. When
1185 	 * we see jh attached to the running transaction, we know it must stay
1186 	 * so until the transaction is committed. Thus jh won't be freed and
1187 	 * will be attached to the same bh while we run.  However it can
1188 	 * happen jh gets freed, reallocated, and attached to the transaction
1189 	 * just after we get pointer to it from bh. So we have to be careful
1190 	 * and recheck jh still belongs to our bh before we return success.
1191 	 */
1192 	rcu_read_lock();
1193 	if (!buffer_jbd(bh))
1194 		goto out;
1195 	/* This should be bh2jh() but that doesn't work with inline functions */
1196 	jh = READ_ONCE(bh->b_private);
1197 	if (!jh)
1198 		goto out;
1199 	/* For undo access buffer must have data copied */
1200 	if (undo && !jh->b_committed_data)
1201 		goto out;
1202 	if (READ_ONCE(jh->b_transaction) != handle->h_transaction &&
1203 	    READ_ONCE(jh->b_next_transaction) != handle->h_transaction)
1204 		goto out;
1205 	/*
1206 	 * There are two reasons for the barrier here:
1207 	 * 1) Make sure to fetch b_bh after we did previous checks so that we
1208 	 * detect when jh went through free, realloc, attach to transaction
1209 	 * while we were checking. Paired with implicit barrier in that path.
1210 	 * 2) So that access to bh done after jbd2_write_access_granted()
1211 	 * doesn't get reordered and see inconsistent state of concurrent
1212 	 * do_get_write_access().
1213 	 */
1214 	smp_mb();
1215 	if (unlikely(jh->b_bh != bh))
1216 		goto out;
1217 	ret = true;
1218 out:
1219 	rcu_read_unlock();
1220 	return ret;
1221 }
1222 
1223 /**
1224  * jbd2_journal_get_write_access() - notify intent to modify a buffer
1225  *				     for metadata (not data) update.
1226  * @handle: transaction to add buffer modifications to
1227  * @bh:     bh to be used for metadata writes
1228  *
1229  * Returns: error code or 0 on success.
1230  *
1231  * In full data journalling mode the buffer may be of type BJ_AsyncData,
1232  * because we're ``write()ing`` a buffer which is also part of a shared mapping.
1233  */
1234 
1235 int jbd2_journal_get_write_access(handle_t *handle, struct buffer_head *bh)
1236 {
1237 	struct journal_head *jh;
1238 	int rc;
1239 
1240 	if (is_handle_aborted(handle))
1241 		return -EROFS;
1242 
1243 	if (jbd2_write_access_granted(handle, bh, false))
1244 		return 0;
1245 
1246 	jh = jbd2_journal_add_journal_head(bh);
1247 	/* We do not want to get caught playing with fields which the
1248 	 * log thread also manipulates.  Make sure that the buffer
1249 	 * completes any outstanding IO before proceeding. */
1250 	rc = do_get_write_access(handle, jh, 0);
1251 	jbd2_journal_put_journal_head(jh);
1252 	return rc;
1253 }
1254 
1255 
1256 /*
1257  * When the user wants to journal a newly created buffer_head
1258  * (ie. getblk() returned a new buffer and we are going to populate it
1259  * manually rather than reading off disk), then we need to keep the
1260  * buffer_head locked until it has been completely filled with new
1261  * data.  In this case, we should be able to make the assertion that
1262  * the bh is not already part of an existing transaction.
1263  *
1264  * The buffer should already be locked by the caller by this point.
1265  * There is no lock ranking violation: it was a newly created,
1266  * unlocked buffer beforehand. */
1267 
1268 /**
1269  * jbd2_journal_get_create_access () - notify intent to use newly created bh
1270  * @handle: transaction to new buffer to
1271  * @bh: new buffer.
1272  *
1273  * Call this if you create a new bh.
1274  */
1275 int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh)
1276 {
1277 	transaction_t *transaction = handle->h_transaction;
1278 	journal_t *journal;
1279 	struct journal_head *jh = jbd2_journal_add_journal_head(bh);
1280 	int err;
1281 
1282 	jbd2_debug(5, "journal_head %p\n", jh);
1283 	err = -EROFS;
1284 	if (is_handle_aborted(handle))
1285 		goto out;
1286 	journal = transaction->t_journal;
1287 	err = 0;
1288 
1289 	JBUFFER_TRACE(jh, "entry");
1290 	/*
1291 	 * The buffer may already belong to this transaction due to pre-zeroing
1292 	 * in the filesystem's new_block code.  It may also be on the previous,
1293 	 * committing transaction's lists, but it HAS to be in Forget state in
1294 	 * that case: the transaction must have deleted the buffer for it to be
1295 	 * reused here.
1296 	 */
1297 	spin_lock(&jh->b_state_lock);
1298 	J_ASSERT_JH(jh, (jh->b_transaction == transaction ||
1299 		jh->b_transaction == NULL ||
1300 		(jh->b_transaction == journal->j_committing_transaction &&
1301 			  jh->b_jlist == BJ_Forget)));
1302 
1303 	J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
1304 	J_ASSERT_JH(jh, buffer_locked(jh2bh(jh)));
1305 
1306 	if (jh->b_transaction == NULL) {
1307 		/*
1308 		 * Previous jbd2_journal_forget() could have left the buffer
1309 		 * with jbddirty bit set because it was being committed. When
1310 		 * the commit finished, we've filed the buffer for
1311 		 * checkpointing and marked it dirty. Now we are reallocating
1312 		 * the buffer so the transaction freeing it must have
1313 		 * committed and so it's safe to clear the dirty bit.
1314 		 */
1315 		clear_buffer_dirty(jh2bh(jh));
1316 		/* first access by this transaction */
1317 		jh->b_modified = 0;
1318 
1319 		JBUFFER_TRACE(jh, "file as BJ_Reserved");
1320 		spin_lock(&journal->j_list_lock);
1321 		__jbd2_journal_file_buffer(jh, transaction, BJ_Reserved);
1322 		spin_unlock(&journal->j_list_lock);
1323 	} else if (jh->b_transaction == journal->j_committing_transaction) {
1324 		/* first access by this transaction */
1325 		jh->b_modified = 0;
1326 
1327 		JBUFFER_TRACE(jh, "set next transaction");
1328 		spin_lock(&journal->j_list_lock);
1329 		jh->b_next_transaction = transaction;
1330 		spin_unlock(&journal->j_list_lock);
1331 	}
1332 	spin_unlock(&jh->b_state_lock);
1333 
1334 	/*
1335 	 * akpm: I added this.  ext3_alloc_branch can pick up new indirect
1336 	 * blocks which contain freed but then revoked metadata.  We need
1337 	 * to cancel the revoke in case we end up freeing it yet again
1338 	 * and the reallocating as data - this would cause a second revoke,
1339 	 * which hits an assertion error.
1340 	 */
1341 	JBUFFER_TRACE(jh, "cancelling revoke");
1342 	jbd2_journal_cancel_revoke(handle, jh);
1343 out:
1344 	jbd2_journal_put_journal_head(jh);
1345 	return err;
1346 }
1347 
1348 /**
1349  * jbd2_journal_get_undo_access() -  Notify intent to modify metadata with
1350  *     non-rewindable consequences
1351  * @handle: transaction
1352  * @bh: buffer to undo
1353  *
1354  * Sometimes there is a need to distinguish between metadata which has
1355  * been committed to disk and that which has not.  The ext3fs code uses
1356  * this for freeing and allocating space, we have to make sure that we
1357  * do not reuse freed space until the deallocation has been committed,
1358  * since if we overwrote that space we would make the delete
1359  * un-rewindable in case of a crash.
1360  *
1361  * To deal with that, jbd2_journal_get_undo_access requests write access to a
1362  * buffer for parts of non-rewindable operations such as delete
1363  * operations on the bitmaps.  The journaling code must keep a copy of
1364  * the buffer's contents prior to the undo_access call until such time
1365  * as we know that the buffer has definitely been committed to disk.
1366  *
1367  * We never need to know which transaction the committed data is part
1368  * of, buffers touched here are guaranteed to be dirtied later and so
1369  * will be committed to a new transaction in due course, at which point
1370  * we can discard the old committed data pointer.
1371  *
1372  * Returns error number or 0 on success.
1373  */
1374 int jbd2_journal_get_undo_access(handle_t *handle, struct buffer_head *bh)
1375 {
1376 	int err;
1377 	struct journal_head *jh;
1378 	char *committed_data = NULL;
1379 
1380 	if (is_handle_aborted(handle))
1381 		return -EROFS;
1382 
1383 	if (jbd2_write_access_granted(handle, bh, true))
1384 		return 0;
1385 
1386 	jh = jbd2_journal_add_journal_head(bh);
1387 	JBUFFER_TRACE(jh, "entry");
1388 
1389 	/*
1390 	 * Do this first --- it can drop the journal lock, so we want to
1391 	 * make sure that obtaining the committed_data is done
1392 	 * atomically wrt. completion of any outstanding commits.
1393 	 */
1394 	err = do_get_write_access(handle, jh, 1);
1395 	if (err)
1396 		goto out;
1397 
1398 repeat:
1399 	if (!jh->b_committed_data)
1400 		committed_data = jbd2_alloc(jh2bh(jh)->b_size,
1401 					    GFP_NOFS|__GFP_NOFAIL);
1402 
1403 	spin_lock(&jh->b_state_lock);
1404 	if (!jh->b_committed_data) {
1405 		/* Copy out the current buffer contents into the
1406 		 * preserved, committed copy. */
1407 		JBUFFER_TRACE(jh, "generate b_committed data");
1408 		if (!committed_data) {
1409 			spin_unlock(&jh->b_state_lock);
1410 			goto repeat;
1411 		}
1412 
1413 		jh->b_committed_data = committed_data;
1414 		committed_data = NULL;
1415 		memcpy(jh->b_committed_data, bh->b_data, bh->b_size);
1416 	}
1417 	spin_unlock(&jh->b_state_lock);
1418 out:
1419 	jbd2_journal_put_journal_head(jh);
1420 	if (unlikely(committed_data))
1421 		jbd2_free(committed_data, bh->b_size);
1422 	return err;
1423 }
1424 
1425 /**
1426  * jbd2_journal_set_triggers() - Add triggers for commit writeout
1427  * @bh: buffer to trigger on
1428  * @type: struct jbd2_buffer_trigger_type containing the trigger(s).
1429  *
1430  * Set any triggers on this journal_head.  This is always safe, because
1431  * triggers for a committing buffer will be saved off, and triggers for
1432  * a running transaction will match the buffer in that transaction.
1433  *
1434  * Call with NULL to clear the triggers.
1435  */
1436 void jbd2_journal_set_triggers(struct buffer_head *bh,
1437 			       struct jbd2_buffer_trigger_type *type)
1438 {
1439 	struct journal_head *jh = jbd2_journal_grab_journal_head(bh);
1440 
1441 	if (WARN_ON_ONCE(!jh))
1442 		return;
1443 	jh->b_triggers = type;
1444 	jbd2_journal_put_journal_head(jh);
1445 }
1446 
1447 void jbd2_buffer_frozen_trigger(struct journal_head *jh, void *mapped_data,
1448 				struct jbd2_buffer_trigger_type *triggers)
1449 {
1450 	struct buffer_head *bh = jh2bh(jh);
1451 
1452 	if (!triggers || !triggers->t_frozen)
1453 		return;
1454 
1455 	triggers->t_frozen(triggers, bh, mapped_data, bh->b_size);
1456 }
1457 
1458 void jbd2_buffer_abort_trigger(struct journal_head *jh,
1459 			       struct jbd2_buffer_trigger_type *triggers)
1460 {
1461 	if (!triggers || !triggers->t_abort)
1462 		return;
1463 
1464 	triggers->t_abort(triggers, jh2bh(jh));
1465 }
1466 
1467 /**
1468  * jbd2_journal_dirty_metadata() -  mark a buffer as containing dirty metadata
1469  * @handle: transaction to add buffer to.
1470  * @bh: buffer to mark
1471  *
1472  * mark dirty metadata which needs to be journaled as part of the current
1473  * transaction.
1474  *
1475  * The buffer must have previously had jbd2_journal_get_write_access()
1476  * called so that it has a valid journal_head attached to the buffer
1477  * head.
1478  *
1479  * The buffer is placed on the transaction's metadata list and is marked
1480  * as belonging to the transaction.
1481  *
1482  * Returns error number or 0 on success.
1483  *
1484  * Special care needs to be taken if the buffer already belongs to the
1485  * current committing transaction (in which case we should have frozen
1486  * data present for that commit).  In that case, we don't relink the
1487  * buffer: that only gets done when the old transaction finally
1488  * completes its commit.
1489  */
1490 int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
1491 {
1492 	transaction_t *transaction = handle->h_transaction;
1493 	journal_t *journal;
1494 	struct journal_head *jh;
1495 	int ret = 0;
1496 
1497 	if (!buffer_jbd(bh))
1498 		return -EUCLEAN;
1499 
1500 	/*
1501 	 * We don't grab jh reference here since the buffer must be part
1502 	 * of the running transaction.
1503 	 */
1504 	jh = bh2jh(bh);
1505 	jbd2_debug(5, "journal_head %p\n", jh);
1506 	JBUFFER_TRACE(jh, "entry");
1507 
1508 	/*
1509 	 * This and the following assertions are unreliable since we may see jh
1510 	 * in inconsistent state unless we grab bh_state lock. But this is
1511 	 * crucial to catch bugs so let's do a reliable check until the
1512 	 * lockless handling is fully proven.
1513 	 */
1514 	if (data_race(jh->b_transaction != transaction &&
1515 	    jh->b_next_transaction != transaction)) {
1516 		spin_lock(&jh->b_state_lock);
1517 		J_ASSERT_JH(jh, jh->b_transaction == transaction ||
1518 				jh->b_next_transaction == transaction);
1519 		spin_unlock(&jh->b_state_lock);
1520 	}
1521 	if (jh->b_modified == 1) {
1522 		/* If it's in our transaction it must be in BJ_Metadata list. */
1523 		if (data_race(jh->b_transaction == transaction &&
1524 		    jh->b_jlist != BJ_Metadata)) {
1525 			spin_lock(&jh->b_state_lock);
1526 			if (jh->b_transaction == transaction &&
1527 			    jh->b_jlist != BJ_Metadata)
1528 				pr_err("JBD2: assertion failure: h_type=%u "
1529 				       "h_line_no=%u block_no=%llu jlist=%u\n",
1530 				       handle->h_type, handle->h_line_no,
1531 				       (unsigned long long) bh->b_blocknr,
1532 				       jh->b_jlist);
1533 			J_ASSERT_JH(jh, jh->b_transaction != transaction ||
1534 					jh->b_jlist == BJ_Metadata);
1535 			spin_unlock(&jh->b_state_lock);
1536 		}
1537 		goto out;
1538 	}
1539 
1540 	journal = transaction->t_journal;
1541 	spin_lock(&jh->b_state_lock);
1542 
1543 	if (is_handle_aborted(handle)) {
1544 		/*
1545 		 * Check journal aborting with @jh->b_state_lock locked,
1546 		 * since 'jh->b_transaction' could be replaced with
1547 		 * 'jh->b_next_transaction' during old transaction
1548 		 * committing if journal aborted, which may fail
1549 		 * assertion on 'jh->b_frozen_data == NULL'.
1550 		 */
1551 		ret = -EROFS;
1552 		goto out_unlock_bh;
1553 	}
1554 
1555 	if (jh->b_modified == 0) {
1556 		/*
1557 		 * This buffer's got modified and becoming part
1558 		 * of the transaction. This needs to be done
1559 		 * once a transaction -bzzz
1560 		 */
1561 		if (WARN_ON_ONCE(jbd2_handle_buffer_credits(handle) <= 0)) {
1562 			ret = -ENOSPC;
1563 			goto out_unlock_bh;
1564 		}
1565 		jh->b_modified = 1;
1566 		handle->h_total_credits--;
1567 	}
1568 
1569 	/*
1570 	 * fastpath, to avoid expensive locking.  If this buffer is already
1571 	 * on the running transaction's metadata list there is nothing to do.
1572 	 * Nobody can take it off again because there is a handle open.
1573 	 * I _think_ we're OK here with SMP barriers - a mistaken decision will
1574 	 * result in this test being false, so we go in and take the locks.
1575 	 */
1576 	if (jh->b_transaction == transaction && jh->b_jlist == BJ_Metadata) {
1577 		JBUFFER_TRACE(jh, "fastpath");
1578 		if (unlikely(jh->b_transaction !=
1579 			     journal->j_running_transaction)) {
1580 			printk(KERN_ERR "JBD2: %s: "
1581 			       "jh->b_transaction (%llu, %p, %u) != "
1582 			       "journal->j_running_transaction (%p, %u)\n",
1583 			       journal->j_devname,
1584 			       (unsigned long long) bh->b_blocknr,
1585 			       jh->b_transaction,
1586 			       jh->b_transaction ? jh->b_transaction->t_tid : 0,
1587 			       journal->j_running_transaction,
1588 			       journal->j_running_transaction ?
1589 			       journal->j_running_transaction->t_tid : 0);
1590 			ret = -EINVAL;
1591 		}
1592 		goto out_unlock_bh;
1593 	}
1594 
1595 	set_buffer_jbddirty(bh);
1596 
1597 	/*
1598 	 * Metadata already on the current transaction list doesn't
1599 	 * need to be filed.  Metadata on another transaction's list must
1600 	 * be committing, and will be refiled once the commit completes:
1601 	 * leave it alone for now.
1602 	 */
1603 	if (jh->b_transaction != transaction) {
1604 		JBUFFER_TRACE(jh, "already on other transaction");
1605 		if (unlikely(((jh->b_transaction !=
1606 			       journal->j_committing_transaction)) ||
1607 			     (jh->b_next_transaction != transaction))) {
1608 			printk(KERN_ERR "jbd2_journal_dirty_metadata: %s: "
1609 			       "bad jh for block %llu: "
1610 			       "transaction (%p, %u), "
1611 			       "jh->b_transaction (%p, %u), "
1612 			       "jh->b_next_transaction (%p, %u), jlist %u\n",
1613 			       journal->j_devname,
1614 			       (unsigned long long) bh->b_blocknr,
1615 			       transaction, transaction->t_tid,
1616 			       jh->b_transaction,
1617 			       jh->b_transaction ?
1618 			       jh->b_transaction->t_tid : 0,
1619 			       jh->b_next_transaction,
1620 			       jh->b_next_transaction ?
1621 			       jh->b_next_transaction->t_tid : 0,
1622 			       jh->b_jlist);
1623 			WARN_ON(1);
1624 			ret = -EINVAL;
1625 		}
1626 		/* And this case is illegal: we can't reuse another
1627 		 * transaction's data buffer, ever. */
1628 		goto out_unlock_bh;
1629 	}
1630 
1631 	/* That test should have eliminated the following case: */
1632 	J_ASSERT_JH(jh, jh->b_frozen_data == NULL);
1633 
1634 	JBUFFER_TRACE(jh, "file as BJ_Metadata");
1635 	spin_lock(&journal->j_list_lock);
1636 	__jbd2_journal_file_buffer(jh, transaction, BJ_Metadata);
1637 	spin_unlock(&journal->j_list_lock);
1638 out_unlock_bh:
1639 	spin_unlock(&jh->b_state_lock);
1640 out:
1641 	JBUFFER_TRACE(jh, "exit");
1642 	return ret;
1643 }
1644 
1645 /**
1646  * jbd2_journal_forget() - bforget() for potentially-journaled buffers.
1647  * @handle: transaction handle
1648  * @bh:     bh to 'forget'
1649  *
1650  * We can only do the bforget if there are no commits pending against the
1651  * buffer.  If the buffer is dirty in the current running transaction we
1652  * can safely unlink it.
1653  *
1654  * bh may not be a journalled buffer at all - it may be a non-JBD
1655  * buffer which came off the hashtable.  Check for this.
1656  *
1657  * Decrements bh->b_count by one.
1658  *
1659  * Allow this call even if the handle has aborted --- it may be part of
1660  * the caller's cleanup after an abort.
1661  */
1662 int jbd2_journal_forget(handle_t *handle, struct buffer_head *bh)
1663 {
1664 	transaction_t *transaction = handle->h_transaction;
1665 	journal_t *journal;
1666 	struct journal_head *jh;
1667 	int drop_reserve = 0;
1668 	int err = 0;
1669 	int was_modified = 0;
1670 
1671 	if (is_handle_aborted(handle))
1672 		return -EROFS;
1673 	journal = transaction->t_journal;
1674 
1675 	BUFFER_TRACE(bh, "entry");
1676 
1677 	jh = jbd2_journal_grab_journal_head(bh);
1678 	if (!jh) {
1679 		__bforget(bh);
1680 		return 0;
1681 	}
1682 
1683 	spin_lock(&jh->b_state_lock);
1684 
1685 	/* Critical error: attempting to delete a bitmap buffer, maybe?
1686 	 * Don't do any jbd operations, and return an error. */
1687 	if (!J_EXPECT_JH(jh, !jh->b_committed_data,
1688 			 "inconsistent data on disk")) {
1689 		err = -EIO;
1690 		goto drop;
1691 	}
1692 
1693 	/* keep track of whether or not this transaction modified us */
1694 	was_modified = jh->b_modified;
1695 
1696 	/*
1697 	 * The buffer's going from the transaction, we must drop
1698 	 * all references -bzzz
1699 	 */
1700 	jh->b_modified = 0;
1701 
1702 	if (jh->b_transaction == transaction) {
1703 		J_ASSERT_JH(jh, !jh->b_frozen_data);
1704 
1705 		/* If we are forgetting a buffer which is already part
1706 		 * of this transaction, then we can just drop it from
1707 		 * the transaction immediately. */
1708 		clear_buffer_dirty(bh);
1709 		clear_buffer_jbddirty(bh);
1710 
1711 		JBUFFER_TRACE(jh, "belongs to current transaction: unfile");
1712 
1713 		/*
1714 		 * we only want to drop a reference if this transaction
1715 		 * modified the buffer
1716 		 */
1717 		if (was_modified)
1718 			drop_reserve = 1;
1719 
1720 		/*
1721 		 * We are no longer going to journal this buffer.
1722 		 * However, the commit of this transaction is still
1723 		 * important to the buffer: the delete that we are now
1724 		 * processing might obsolete an old log entry, so by
1725 		 * committing, we can satisfy the buffer's checkpoint.
1726 		 *
1727 		 * So, if we have a checkpoint on the buffer, we should
1728 		 * now refile the buffer on our BJ_Forget list so that
1729 		 * we know to remove the checkpoint after we commit.
1730 		 */
1731 
1732 		spin_lock(&journal->j_list_lock);
1733 		if (jh->b_cp_transaction) {
1734 			__jbd2_journal_temp_unlink_buffer(jh);
1735 			__jbd2_journal_file_buffer(jh, transaction, BJ_Forget);
1736 		} else {
1737 			__jbd2_journal_unfile_buffer(jh);
1738 			jbd2_journal_put_journal_head(jh);
1739 		}
1740 		spin_unlock(&journal->j_list_lock);
1741 	} else if (jh->b_transaction) {
1742 		J_ASSERT_JH(jh, (jh->b_transaction ==
1743 				 journal->j_committing_transaction));
1744 		/* However, if the buffer is still owned by a prior
1745 		 * (committing) transaction, we can't drop it yet... */
1746 		JBUFFER_TRACE(jh, "belongs to older transaction");
1747 		/* ... but we CAN drop it from the new transaction through
1748 		 * marking the buffer as freed and set j_next_transaction to
1749 		 * the new transaction, so that not only the commit code
1750 		 * knows it should clear dirty bits when it is done with the
1751 		 * buffer, but also the buffer can be checkpointed only
1752 		 * after the new transaction commits. */
1753 
1754 		set_buffer_freed(bh);
1755 
1756 		if (!jh->b_next_transaction) {
1757 			spin_lock(&journal->j_list_lock);
1758 			jh->b_next_transaction = transaction;
1759 			spin_unlock(&journal->j_list_lock);
1760 		} else {
1761 			J_ASSERT(jh->b_next_transaction == transaction);
1762 
1763 			/*
1764 			 * only drop a reference if this transaction modified
1765 			 * the buffer
1766 			 */
1767 			if (was_modified)
1768 				drop_reserve = 1;
1769 		}
1770 	} else {
1771 		/*
1772 		 * Finally, if the buffer is not belongs to any
1773 		 * transaction, we can just drop it now if it has no
1774 		 * checkpoint.
1775 		 */
1776 		spin_lock(&journal->j_list_lock);
1777 		if (!jh->b_cp_transaction) {
1778 			JBUFFER_TRACE(jh, "belongs to none transaction");
1779 			spin_unlock(&journal->j_list_lock);
1780 			goto drop;
1781 		}
1782 
1783 		/*
1784 		 * Otherwise, if the buffer has been written to disk,
1785 		 * it is safe to remove the checkpoint and drop it.
1786 		 */
1787 		if (!buffer_dirty(bh)) {
1788 			__jbd2_journal_remove_checkpoint(jh);
1789 			spin_unlock(&journal->j_list_lock);
1790 			goto drop;
1791 		}
1792 
1793 		/*
1794 		 * The buffer is still not written to disk, we should
1795 		 * attach this buffer to current transaction so that the
1796 		 * buffer can be checkpointed only after the current
1797 		 * transaction commits.
1798 		 */
1799 		clear_buffer_dirty(bh);
1800 		__jbd2_journal_file_buffer(jh, transaction, BJ_Forget);
1801 		spin_unlock(&journal->j_list_lock);
1802 	}
1803 drop:
1804 	__brelse(bh);
1805 	spin_unlock(&jh->b_state_lock);
1806 	jbd2_journal_put_journal_head(jh);
1807 	if (drop_reserve) {
1808 		/* no need to reserve log space for this block -bzzz */
1809 		handle->h_total_credits++;
1810 	}
1811 	return err;
1812 }
1813 
1814 /**
1815  * jbd2_journal_stop() - complete a transaction
1816  * @handle: transaction to complete.
1817  *
1818  * All done for a particular handle.
1819  *
1820  * There is not much action needed here.  We just return any remaining
1821  * buffer credits to the transaction and remove the handle.  The only
1822  * complication is that we need to start a commit operation if the
1823  * filesystem is marked for synchronous update.
1824  *
1825  * jbd2_journal_stop itself will not usually return an error, but it may
1826  * do so in unusual circumstances.  In particular, expect it to
1827  * return -EIO if a jbd2_journal_abort has been executed since the
1828  * transaction began.
1829  */
1830 int jbd2_journal_stop(handle_t *handle)
1831 {
1832 	transaction_t *transaction = handle->h_transaction;
1833 	journal_t *journal;
1834 	int err = 0, wait_for_commit = 0;
1835 	tid_t tid;
1836 	pid_t pid;
1837 
1838 	if (--handle->h_ref > 0) {
1839 		jbd2_debug(4, "h_ref %d -> %d\n", handle->h_ref + 1,
1840 						 handle->h_ref);
1841 		if (is_handle_aborted(handle))
1842 			return -EIO;
1843 		return 0;
1844 	}
1845 	if (!transaction) {
1846 		/*
1847 		 * Handle is already detached from the transaction so there is
1848 		 * nothing to do other than free the handle.
1849 		 */
1850 		memalloc_nofs_restore(handle->saved_alloc_context);
1851 		goto free_and_exit;
1852 	}
1853 	journal = transaction->t_journal;
1854 	tid = transaction->t_tid;
1855 
1856 	if (is_handle_aborted(handle))
1857 		err = -EIO;
1858 
1859 	jbd2_debug(4, "Handle %p going down\n", handle);
1860 	trace_jbd2_handle_stats(journal->j_fs_dev->bd_dev,
1861 				tid, handle->h_type, handle->h_line_no,
1862 				jiffies - handle->h_start_jiffies,
1863 				handle->h_sync, handle->h_requested_credits,
1864 				(handle->h_requested_credits -
1865 				 handle->h_total_credits));
1866 
1867 	/*
1868 	 * Implement synchronous transaction batching.  If the handle
1869 	 * was synchronous, don't force a commit immediately.  Let's
1870 	 * yield and let another thread piggyback onto this
1871 	 * transaction.  Keep doing that while new threads continue to
1872 	 * arrive.  It doesn't cost much - we're about to run a commit
1873 	 * and sleep on IO anyway.  Speeds up many-threaded, many-dir
1874 	 * operations by 30x or more...
1875 	 *
1876 	 * We try and optimize the sleep time against what the
1877 	 * underlying disk can do, instead of having a static sleep
1878 	 * time.  This is useful for the case where our storage is so
1879 	 * fast that it is more optimal to go ahead and force a flush
1880 	 * and wait for the transaction to be committed than it is to
1881 	 * wait for an arbitrary amount of time for new writers to
1882 	 * join the transaction.  We achieve this by measuring how
1883 	 * long it takes to commit a transaction, and compare it with
1884 	 * how long this transaction has been running, and if run time
1885 	 * < commit time then we sleep for the delta and commit.  This
1886 	 * greatly helps super fast disks that would see slowdowns as
1887 	 * more threads started doing fsyncs.
1888 	 *
1889 	 * But don't do this if this process was the most recent one
1890 	 * to perform a synchronous write.  We do this to detect the
1891 	 * case where a single process is doing a stream of sync
1892 	 * writes.  No point in waiting for joiners in that case.
1893 	 *
1894 	 * Setting max_batch_time to 0 disables this completely.
1895 	 */
1896 	pid = current->pid;
1897 	if (handle->h_sync && journal->j_last_sync_writer != pid &&
1898 	    journal->j_max_batch_time) {
1899 		u64 commit_time, trans_time;
1900 
1901 		journal->j_last_sync_writer = pid;
1902 
1903 		read_lock(&journal->j_state_lock);
1904 		commit_time = journal->j_average_commit_time;
1905 		read_unlock(&journal->j_state_lock);
1906 
1907 		trans_time = ktime_to_ns(ktime_sub(ktime_get(),
1908 						   transaction->t_start_time));
1909 
1910 		commit_time = max_t(u64, commit_time,
1911 				    1000*journal->j_min_batch_time);
1912 		commit_time = min_t(u64, commit_time,
1913 				    1000*journal->j_max_batch_time);
1914 
1915 		if (trans_time < commit_time) {
1916 			ktime_t expires = ktime_add_ns(ktime_get(),
1917 						       commit_time);
1918 			set_current_state(TASK_UNINTERRUPTIBLE);
1919 			schedule_hrtimeout(&expires, HRTIMER_MODE_ABS);
1920 		}
1921 	}
1922 
1923 	if (handle->h_sync)
1924 		transaction->t_synchronous_commit = 1;
1925 
1926 	/*
1927 	 * If the handle is marked SYNC, we need to set another commit
1928 	 * going!  We also want to force a commit if the transaction is too
1929 	 * old now.
1930 	 */
1931 	if (handle->h_sync ||
1932 	    time_after_eq(jiffies, transaction->t_expires)) {
1933 		/* Do this even for aborted journals: an abort still
1934 		 * completes the commit thread, it just doesn't write
1935 		 * anything to disk. */
1936 
1937 		jbd2_debug(2, "transaction too old, requesting commit for "
1938 					"handle %p\n", handle);
1939 		/* This is non-blocking */
1940 		jbd2_log_start_commit(journal, tid);
1941 
1942 		/*
1943 		 * Special case: JBD2_SYNC synchronous updates require us
1944 		 * to wait for the commit to complete.
1945 		 */
1946 		if (handle->h_sync && !(current->flags & PF_MEMALLOC))
1947 			wait_for_commit = 1;
1948 	}
1949 
1950 	/*
1951 	 * Once stop_this_handle() drops t_updates, the transaction could start
1952 	 * committing on us and eventually disappear.  So we must not
1953 	 * dereference transaction pointer again after calling
1954 	 * stop_this_handle().
1955 	 */
1956 	stop_this_handle(handle);
1957 
1958 	if (wait_for_commit)
1959 		err = jbd2_log_wait_commit(journal, tid);
1960 
1961 free_and_exit:
1962 	if (handle->h_rsv_handle)
1963 		jbd2_free_handle(handle->h_rsv_handle);
1964 	jbd2_free_handle(handle);
1965 	return err;
1966 }
1967 
1968 /*
1969  *
1970  * List management code snippets: various functions for manipulating the
1971  * transaction buffer lists.
1972  *
1973  */
1974 
1975 /*
1976  * Append a buffer to a transaction list, given the transaction's list head
1977  * pointer.
1978  *
1979  * j_list_lock is held.
1980  *
1981  * jh->b_state_lock is held.
1982  */
1983 
1984 static inline void
1985 __blist_add_buffer(struct journal_head **list, struct journal_head *jh)
1986 {
1987 	if (!*list) {
1988 		jh->b_tnext = jh->b_tprev = jh;
1989 		*list = jh;
1990 	} else {
1991 		/* Insert at the tail of the list to preserve order */
1992 		struct journal_head *first = *list, *last = first->b_tprev;
1993 		jh->b_tprev = last;
1994 		jh->b_tnext = first;
1995 		last->b_tnext = first->b_tprev = jh;
1996 	}
1997 }
1998 
1999 /*
2000  * Remove a buffer from a transaction list, given the transaction's list
2001  * head pointer.
2002  *
2003  * Called with j_list_lock held, and the journal may not be locked.
2004  *
2005  * jh->b_state_lock is held.
2006  */
2007 
2008 static inline void
2009 __blist_del_buffer(struct journal_head **list, struct journal_head *jh)
2010 {
2011 	if (*list == jh) {
2012 		*list = jh->b_tnext;
2013 		if (*list == jh)
2014 			*list = NULL;
2015 	}
2016 	jh->b_tprev->b_tnext = jh->b_tnext;
2017 	jh->b_tnext->b_tprev = jh->b_tprev;
2018 }
2019 
2020 /*
2021  * Remove a buffer from the appropriate transaction list.
2022  *
2023  * Note that this function can *change* the value of
2024  * bh->b_transaction->t_buffers, t_forget, t_shadow_list, t_log_list or
2025  * t_reserved_list.  If the caller is holding onto a copy of one of these
2026  * pointers, it could go bad.  Generally the caller needs to re-read the
2027  * pointer from the transaction_t.
2028  *
2029  * Called under j_list_lock.
2030  */
2031 static void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh)
2032 {
2033 	struct journal_head **list = NULL;
2034 	transaction_t *transaction;
2035 	struct buffer_head *bh = jh2bh(jh);
2036 
2037 	lockdep_assert_held(&jh->b_state_lock);
2038 	transaction = jh->b_transaction;
2039 	if (transaction)
2040 		assert_spin_locked(&transaction->t_journal->j_list_lock);
2041 
2042 	J_ASSERT_JH(jh, jh->b_jlist < BJ_Types);
2043 	if (jh->b_jlist != BJ_None)
2044 		J_ASSERT_JH(jh, transaction != NULL);
2045 
2046 	switch (jh->b_jlist) {
2047 	case BJ_None:
2048 		return;
2049 	case BJ_Metadata:
2050 		transaction->t_nr_buffers--;
2051 		J_ASSERT_JH(jh, transaction->t_nr_buffers >= 0);
2052 		list = &transaction->t_buffers;
2053 		break;
2054 	case BJ_Forget:
2055 		list = &transaction->t_forget;
2056 		break;
2057 	case BJ_Shadow:
2058 		list = &transaction->t_shadow_list;
2059 		break;
2060 	case BJ_Reserved:
2061 		list = &transaction->t_reserved_list;
2062 		break;
2063 	}
2064 
2065 	__blist_del_buffer(list, jh);
2066 	jh->b_jlist = BJ_None;
2067 	if (transaction && is_journal_aborted(transaction->t_journal))
2068 		clear_buffer_jbddirty(bh);
2069 	else if (test_clear_buffer_jbddirty(bh))
2070 		mark_buffer_dirty(bh);	/* Expose it to the VM */
2071 }
2072 
2073 /*
2074  * Remove buffer from all transactions. The caller is responsible for dropping
2075  * the jh reference that belonged to the transaction.
2076  *
2077  * Called with bh_state lock and j_list_lock
2078  */
2079 static void __jbd2_journal_unfile_buffer(struct journal_head *jh)
2080 {
2081 	J_ASSERT_JH(jh, jh->b_transaction != NULL);
2082 	J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
2083 
2084 	__jbd2_journal_temp_unlink_buffer(jh);
2085 	jh->b_transaction = NULL;
2086 }
2087 
2088 void jbd2_journal_unfile_buffer(journal_t *journal, struct journal_head *jh)
2089 {
2090 	struct buffer_head *bh = jh2bh(jh);
2091 
2092 	/* Get reference so that buffer cannot be freed before we unlock it */
2093 	get_bh(bh);
2094 	spin_lock(&jh->b_state_lock);
2095 	spin_lock(&journal->j_list_lock);
2096 	__jbd2_journal_unfile_buffer(jh);
2097 	spin_unlock(&journal->j_list_lock);
2098 	spin_unlock(&jh->b_state_lock);
2099 	jbd2_journal_put_journal_head(jh);
2100 	__brelse(bh);
2101 }
2102 
2103 /*
2104  * Called from jbd2_journal_try_to_free_buffers().
2105  *
2106  * Called under jh->b_state_lock
2107  */
2108 static void
2109 __journal_try_to_free_buffer(journal_t *journal, struct buffer_head *bh)
2110 {
2111 	struct journal_head *jh;
2112 
2113 	jh = bh2jh(bh);
2114 
2115 	if (buffer_locked(bh) || buffer_dirty(bh))
2116 		goto out;
2117 
2118 	if (jh->b_next_transaction != NULL || jh->b_transaction != NULL)
2119 		goto out;
2120 
2121 	spin_lock(&journal->j_list_lock);
2122 	if (jh->b_cp_transaction != NULL) {
2123 		/* written-back checkpointed metadata buffer */
2124 		JBUFFER_TRACE(jh, "remove from checkpoint list");
2125 		__jbd2_journal_remove_checkpoint(jh);
2126 	}
2127 	spin_unlock(&journal->j_list_lock);
2128 out:
2129 	return;
2130 }
2131 
2132 /**
2133  * jbd2_journal_try_to_free_buffers() - try to free page buffers.
2134  * @journal: journal for operation
2135  * @folio: Folio to detach data from.
2136  *
2137  * For all the buffers on this page,
2138  * if they are fully written out ordered data, move them onto BUF_CLEAN
2139  * so try_to_free_buffers() can reap them.
2140  *
2141  * This function returns non-zero if we wish try_to_free_buffers()
2142  * to be called. We do this if the page is releasable by try_to_free_buffers().
2143  * We also do it if the page has locked or dirty buffers and the caller wants
2144  * us to perform sync or async writeout.
2145  *
2146  * This complicates JBD locking somewhat.  We aren't protected by the
2147  * BKL here.  We wish to remove the buffer from its committing or
2148  * running transaction's ->t_datalist via __jbd2_journal_unfile_buffer.
2149  *
2150  * This may *change* the value of transaction_t->t_datalist, so anyone
2151  * who looks at t_datalist needs to lock against this function.
2152  *
2153  * Even worse, someone may be doing a jbd2_journal_dirty_data on this
2154  * buffer.  So we need to lock against that.  jbd2_journal_dirty_data()
2155  * will come out of the lock with the buffer dirty, which makes it
2156  * ineligible for release here.
2157  *
2158  * Who else is affected by this?  hmm...  Really the only contender
2159  * is do_get_write_access() - it could be looking at the buffer while
2160  * journal_try_to_free_buffer() is changing its state.  But that
2161  * cannot happen because we never reallocate freed data as metadata
2162  * while the data is part of a transaction.  Yes?
2163  *
2164  * Return false on failure, true on success
2165  */
2166 bool jbd2_journal_try_to_free_buffers(journal_t *journal, struct folio *folio)
2167 {
2168 	struct buffer_head *head;
2169 	struct buffer_head *bh;
2170 	bool ret = false;
2171 
2172 	J_ASSERT(folio_test_locked(folio));
2173 
2174 	head = folio_buffers(folio);
2175 	bh = head;
2176 	do {
2177 		struct journal_head *jh;
2178 
2179 		/*
2180 		 * We take our own ref against the journal_head here to avoid
2181 		 * having to add tons of locking around each instance of
2182 		 * jbd2_journal_put_journal_head().
2183 		 */
2184 		jh = jbd2_journal_grab_journal_head(bh);
2185 		if (!jh)
2186 			continue;
2187 
2188 		spin_lock(&jh->b_state_lock);
2189 		__journal_try_to_free_buffer(journal, bh);
2190 		spin_unlock(&jh->b_state_lock);
2191 		jbd2_journal_put_journal_head(jh);
2192 		if (buffer_jbd(bh))
2193 			goto busy;
2194 	} while ((bh = bh->b_this_page) != head);
2195 
2196 	ret = try_to_free_buffers(folio);
2197 busy:
2198 	return ret;
2199 }
2200 
2201 /*
2202  * This buffer is no longer needed.  If it is on an older transaction's
2203  * checkpoint list we need to record it on this transaction's forget list
2204  * to pin this buffer (and hence its checkpointing transaction) down until
2205  * this transaction commits.  If the buffer isn't on a checkpoint list, we
2206  * release it.
2207  * Returns non-zero if JBD no longer has an interest in the buffer.
2208  *
2209  * Called under j_list_lock.
2210  *
2211  * Called under jh->b_state_lock.
2212  */
2213 static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction)
2214 {
2215 	int may_free = 1;
2216 	struct buffer_head *bh = jh2bh(jh);
2217 
2218 	if (jh->b_cp_transaction) {
2219 		JBUFFER_TRACE(jh, "on running+cp transaction");
2220 		__jbd2_journal_temp_unlink_buffer(jh);
2221 		/*
2222 		 * We don't want to write the buffer anymore, clear the
2223 		 * bit so that we don't confuse checks in
2224 		 * __journal_file_buffer
2225 		 */
2226 		clear_buffer_dirty(bh);
2227 		__jbd2_journal_file_buffer(jh, transaction, BJ_Forget);
2228 		may_free = 0;
2229 	} else {
2230 		JBUFFER_TRACE(jh, "on running transaction");
2231 		__jbd2_journal_unfile_buffer(jh);
2232 		jbd2_journal_put_journal_head(jh);
2233 	}
2234 	return may_free;
2235 }
2236 
2237 /*
2238  * jbd2_journal_invalidate_folio
2239  *
2240  * This code is tricky.  It has a number of cases to deal with.
2241  *
2242  * There are two invariants which this code relies on:
2243  *
2244  * i_size must be updated on disk before we start calling invalidate_folio
2245  * on the data.
2246  *
2247  *  This is done in ext3 by defining an ext3_setattr method which
2248  *  updates i_size before truncate gets going.  By maintaining this
2249  *  invariant, we can be sure that it is safe to throw away any buffers
2250  *  attached to the current transaction: once the transaction commits,
2251  *  we know that the data will not be needed.
2252  *
2253  *  Note however that we can *not* throw away data belonging to the
2254  *  previous, committing transaction!
2255  *
2256  * Any disk blocks which *are* part of the previous, committing
2257  * transaction (and which therefore cannot be discarded immediately) are
2258  * not going to be reused in the new running transaction
2259  *
2260  *  The bitmap committed_data images guarantee this: any block which is
2261  *  allocated in one transaction and removed in the next will be marked
2262  *  as in-use in the committed_data bitmap, so cannot be reused until
2263  *  the next transaction to delete the block commits.  This means that
2264  *  leaving committing buffers dirty is quite safe: the disk blocks
2265  *  cannot be reallocated to a different file and so buffer aliasing is
2266  *  not possible.
2267  *
2268  *
2269  * The above applies mainly to ordered data mode.  In writeback mode we
2270  * don't make guarantees about the order in which data hits disk --- in
2271  * particular we don't guarantee that new dirty data is flushed before
2272  * transaction commit --- so it is always safe just to discard data
2273  * immediately in that mode.  --sct
2274  */
2275 
2276 /*
2277  * The journal_unmap_buffer helper function returns zero if the buffer
2278  * concerned remains pinned as an anonymous buffer belonging to an older
2279  * transaction.
2280  *
2281  * We're outside-transaction here.  Either or both of j_running_transaction
2282  * and j_committing_transaction may be NULL.
2283  */
2284 static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh,
2285 				int partial_page)
2286 {
2287 	transaction_t *transaction;
2288 	struct journal_head *jh;
2289 	int may_free = 1;
2290 
2291 	BUFFER_TRACE(bh, "entry");
2292 
2293 	/*
2294 	 * It is safe to proceed here without the j_list_lock because the
2295 	 * buffers cannot be stolen by try_to_free_buffers as long as we are
2296 	 * holding the page lock. --sct
2297 	 */
2298 
2299 	jh = jbd2_journal_grab_journal_head(bh);
2300 	if (!jh)
2301 		goto zap_buffer_unlocked;
2302 
2303 	/* OK, we have data buffer in journaled mode */
2304 	write_lock(&journal->j_state_lock);
2305 	spin_lock(&jh->b_state_lock);
2306 	spin_lock(&journal->j_list_lock);
2307 
2308 	/*
2309 	 * We cannot remove the buffer from checkpoint lists until the
2310 	 * transaction adding inode to orphan list (let's call it T)
2311 	 * is committed.  Otherwise if the transaction changing the
2312 	 * buffer would be cleaned from the journal before T is
2313 	 * committed, a crash will cause that the correct contents of
2314 	 * the buffer will be lost.  On the other hand we have to
2315 	 * clear the buffer dirty bit at latest at the moment when the
2316 	 * transaction marking the buffer as freed in the filesystem
2317 	 * structures is committed because from that moment on the
2318 	 * block can be reallocated and used by a different page.
2319 	 * Since the block hasn't been freed yet but the inode has
2320 	 * already been added to orphan list, it is safe for us to add
2321 	 * the buffer to BJ_Forget list of the newest transaction.
2322 	 *
2323 	 * Also we have to clear buffer_mapped flag of a truncated buffer
2324 	 * because the buffer_head may be attached to the page straddling
2325 	 * i_size (can happen only when blocksize < pagesize) and thus the
2326 	 * buffer_head can be reused when the file is extended again. So we end
2327 	 * up keeping around invalidated buffers attached to transactions'
2328 	 * BJ_Forget list just to stop checkpointing code from cleaning up
2329 	 * the transaction this buffer was modified in.
2330 	 */
2331 	transaction = jh->b_transaction;
2332 	if (transaction == NULL) {
2333 		/* First case: not on any transaction.  If it
2334 		 * has no checkpoint link, then we can zap it:
2335 		 * it's a writeback-mode buffer so we don't care
2336 		 * if it hits disk safely. */
2337 		if (!jh->b_cp_transaction) {
2338 			JBUFFER_TRACE(jh, "not on any transaction: zap");
2339 			goto zap_buffer;
2340 		}
2341 
2342 		if (!buffer_dirty(bh)) {
2343 			/* bdflush has written it.  We can drop it now */
2344 			__jbd2_journal_remove_checkpoint(jh);
2345 			goto zap_buffer;
2346 		}
2347 
2348 		/* OK, it must be in the journal but still not
2349 		 * written fully to disk: it's metadata or
2350 		 * journaled data... */
2351 
2352 		if (journal->j_running_transaction) {
2353 			/* ... and once the current transaction has
2354 			 * committed, the buffer won't be needed any
2355 			 * longer. */
2356 			JBUFFER_TRACE(jh, "checkpointed: add to BJ_Forget");
2357 			may_free = __dispose_buffer(jh,
2358 					journal->j_running_transaction);
2359 			goto zap_buffer;
2360 		} else {
2361 			/* There is no currently-running transaction. So the
2362 			 * orphan record which we wrote for this file must have
2363 			 * passed into commit.  We must attach this buffer to
2364 			 * the committing transaction, if it exists. */
2365 			if (journal->j_committing_transaction) {
2366 				JBUFFER_TRACE(jh, "give to committing trans");
2367 				may_free = __dispose_buffer(jh,
2368 					journal->j_committing_transaction);
2369 				goto zap_buffer;
2370 			} else {
2371 				/* The orphan record's transaction has
2372 				 * committed.  We can cleanse this buffer */
2373 				clear_buffer_jbddirty(bh);
2374 				__jbd2_journal_remove_checkpoint(jh);
2375 				goto zap_buffer;
2376 			}
2377 		}
2378 	} else if (transaction == journal->j_committing_transaction) {
2379 		JBUFFER_TRACE(jh, "on committing transaction");
2380 		/*
2381 		 * The buffer is committing, we simply cannot touch
2382 		 * it. If the page is straddling i_size we have to wait
2383 		 * for commit and try again.
2384 		 */
2385 		if (partial_page) {
2386 			spin_unlock(&journal->j_list_lock);
2387 			spin_unlock(&jh->b_state_lock);
2388 			write_unlock(&journal->j_state_lock);
2389 			jbd2_journal_put_journal_head(jh);
2390 			/* Already zapped buffer? Nothing to do... */
2391 			if (!bh->b_bdev)
2392 				return 0;
2393 			return -EBUSY;
2394 		}
2395 		/*
2396 		 * OK, buffer won't be reachable after truncate. We just clear
2397 		 * b_modified to not confuse transaction credit accounting, and
2398 		 * set j_next_transaction to the running transaction (if there
2399 		 * is one) and mark buffer as freed so that commit code knows
2400 		 * it should clear dirty bits when it is done with the buffer.
2401 		 */
2402 		set_buffer_freed(bh);
2403 		if (journal->j_running_transaction && buffer_jbddirty(bh))
2404 			jh->b_next_transaction = journal->j_running_transaction;
2405 		jh->b_modified = 0;
2406 		spin_unlock(&journal->j_list_lock);
2407 		spin_unlock(&jh->b_state_lock);
2408 		write_unlock(&journal->j_state_lock);
2409 		jbd2_journal_put_journal_head(jh);
2410 		return 0;
2411 	} else {
2412 		/* Good, the buffer belongs to the running transaction.
2413 		 * We are writing our own transaction's data, not any
2414 		 * previous one's, so it is safe to throw it away
2415 		 * (remember that we expect the filesystem to have set
2416 		 * i_size already for this truncate so recovery will not
2417 		 * expose the disk blocks we are discarding here.) */
2418 		J_ASSERT_JH(jh, transaction == journal->j_running_transaction);
2419 		JBUFFER_TRACE(jh, "on running transaction");
2420 		may_free = __dispose_buffer(jh, transaction);
2421 	}
2422 
2423 zap_buffer:
2424 	/*
2425 	 * This is tricky. Although the buffer is truncated, it may be reused
2426 	 * if blocksize < pagesize and it is attached to the page straddling
2427 	 * EOF. Since the buffer might have been added to BJ_Forget list of the
2428 	 * running transaction, journal_get_write_access() won't clear
2429 	 * b_modified and credit accounting gets confused. So clear b_modified
2430 	 * here.
2431 	 */
2432 	jh->b_modified = 0;
2433 	spin_unlock(&journal->j_list_lock);
2434 	spin_unlock(&jh->b_state_lock);
2435 	write_unlock(&journal->j_state_lock);
2436 	jbd2_journal_put_journal_head(jh);
2437 zap_buffer_unlocked:
2438 	clear_buffer_dirty(bh);
2439 	J_ASSERT_BH(bh, !buffer_jbddirty(bh));
2440 	clear_buffer_mapped(bh);
2441 	clear_buffer_req(bh);
2442 	clear_buffer_new(bh);
2443 	clear_buffer_delay(bh);
2444 	clear_buffer_unwritten(bh);
2445 	bh->b_bdev = NULL;
2446 	return may_free;
2447 }
2448 
2449 /**
2450  * jbd2_journal_invalidate_folio()
2451  * @journal: journal to use for flush...
2452  * @folio:    folio to flush
2453  * @offset:  start of the range to invalidate
2454  * @length:  length of the range to invalidate
2455  *
2456  * Reap page buffers containing data after in the specified range in page.
2457  * Can return -EBUSY if buffers are part of the committing transaction and
2458  * the page is straddling i_size. Caller then has to wait for current commit
2459  * and try again.
2460  */
2461 int jbd2_journal_invalidate_folio(journal_t *journal, struct folio *folio,
2462 				size_t offset, size_t length)
2463 {
2464 	struct buffer_head *head, *bh, *next;
2465 	unsigned int stop = offset + length;
2466 	unsigned int curr_off = 0;
2467 	int partial_page = (offset || length < folio_size(folio));
2468 	int may_free = 1;
2469 	int ret = 0;
2470 
2471 	if (!folio_test_locked(folio))
2472 		BUG();
2473 	head = folio_buffers(folio);
2474 	if (!head)
2475 		return 0;
2476 
2477 	BUG_ON(stop > folio_size(folio) || stop < length);
2478 
2479 	/* We will potentially be playing with lists other than just the
2480 	 * data lists (especially for journaled data mode), so be
2481 	 * cautious in our locking. */
2482 
2483 	bh = head;
2484 	do {
2485 		unsigned int next_off = curr_off + bh->b_size;
2486 		next = bh->b_this_page;
2487 
2488 		if (next_off > stop)
2489 			return 0;
2490 
2491 		if (offset <= curr_off) {
2492 			/* This block is wholly outside the truncation point */
2493 			lock_buffer(bh);
2494 			ret = journal_unmap_buffer(journal, bh, partial_page);
2495 			unlock_buffer(bh);
2496 			if (ret < 0)
2497 				return ret;
2498 			may_free &= ret;
2499 		}
2500 		curr_off = next_off;
2501 		bh = next;
2502 
2503 	} while (bh != head);
2504 
2505 	if (!partial_page) {
2506 		if (may_free && try_to_free_buffers(folio))
2507 			J_ASSERT(!folio_buffers(folio));
2508 	}
2509 	return 0;
2510 }
2511 
2512 /*
2513  * File a buffer on the given transaction list.
2514  */
2515 void __jbd2_journal_file_buffer(struct journal_head *jh,
2516 			transaction_t *transaction, int jlist)
2517 {
2518 	struct journal_head **list = NULL;
2519 	int was_dirty = 0;
2520 	struct buffer_head *bh = jh2bh(jh);
2521 
2522 	lockdep_assert_held(&jh->b_state_lock);
2523 	assert_spin_locked(&transaction->t_journal->j_list_lock);
2524 
2525 	J_ASSERT_JH(jh, jh->b_jlist < BJ_Types);
2526 	J_ASSERT_JH(jh, jh->b_transaction == transaction ||
2527 				jh->b_transaction == NULL);
2528 
2529 	if (jh->b_transaction && jh->b_jlist == jlist)
2530 		return;
2531 
2532 	if (jlist == BJ_Metadata || jlist == BJ_Reserved ||
2533 	    jlist == BJ_Shadow || jlist == BJ_Forget) {
2534 		/*
2535 		 * For metadata buffers, we track dirty bit in buffer_jbddirty
2536 		 * instead of buffer_dirty. We should not see a dirty bit set
2537 		 * here because we clear it in do_get_write_access but e.g.
2538 		 * tune2fs can modify the sb and set the dirty bit at any time
2539 		 * so we try to gracefully handle that.
2540 		 */
2541 		if (buffer_dirty(bh))
2542 			warn_dirty_buffer(bh);
2543 		if (test_clear_buffer_dirty(bh) ||
2544 		    test_clear_buffer_jbddirty(bh))
2545 			was_dirty = 1;
2546 	}
2547 
2548 	if (jh->b_transaction)
2549 		__jbd2_journal_temp_unlink_buffer(jh);
2550 	else
2551 		jbd2_journal_grab_journal_head(bh);
2552 	jh->b_transaction = transaction;
2553 
2554 	switch (jlist) {
2555 	case BJ_None:
2556 		J_ASSERT_JH(jh, !jh->b_committed_data);
2557 		J_ASSERT_JH(jh, !jh->b_frozen_data);
2558 		return;
2559 	case BJ_Metadata:
2560 		transaction->t_nr_buffers++;
2561 		list = &transaction->t_buffers;
2562 		break;
2563 	case BJ_Forget:
2564 		list = &transaction->t_forget;
2565 		break;
2566 	case BJ_Shadow:
2567 		list = &transaction->t_shadow_list;
2568 		break;
2569 	case BJ_Reserved:
2570 		list = &transaction->t_reserved_list;
2571 		break;
2572 	}
2573 
2574 	__blist_add_buffer(list, jh);
2575 	jh->b_jlist = jlist;
2576 
2577 	if (was_dirty)
2578 		set_buffer_jbddirty(bh);
2579 }
2580 
2581 void jbd2_journal_file_buffer(struct journal_head *jh,
2582 				transaction_t *transaction, int jlist)
2583 {
2584 	spin_lock(&jh->b_state_lock);
2585 	spin_lock(&transaction->t_journal->j_list_lock);
2586 	__jbd2_journal_file_buffer(jh, transaction, jlist);
2587 	spin_unlock(&transaction->t_journal->j_list_lock);
2588 	spin_unlock(&jh->b_state_lock);
2589 }
2590 
2591 /*
2592  * Remove a buffer from its current buffer list in preparation for
2593  * dropping it from its current transaction entirely.  If the buffer has
2594  * already started to be used by a subsequent transaction, refile the
2595  * buffer on that transaction's metadata list.
2596  *
2597  * Called under j_list_lock
2598  * Called under jh->b_state_lock
2599  *
2600  * When this function returns true, there's no next transaction to refile to
2601  * and the caller has to drop jh reference through
2602  * jbd2_journal_put_journal_head().
2603  */
2604 bool __jbd2_journal_refile_buffer(struct journal_head *jh)
2605 {
2606 	int was_dirty, jlist;
2607 	struct buffer_head *bh = jh2bh(jh);
2608 
2609 	lockdep_assert_held(&jh->b_state_lock);
2610 	if (jh->b_transaction)
2611 		assert_spin_locked(&jh->b_transaction->t_journal->j_list_lock);
2612 
2613 	/* If the buffer is now unused, just drop it. */
2614 	if (jh->b_next_transaction == NULL) {
2615 		__jbd2_journal_unfile_buffer(jh);
2616 		return true;
2617 	}
2618 
2619 	/*
2620 	 * It has been modified by a later transaction: add it to the new
2621 	 * transaction's metadata list.
2622 	 */
2623 
2624 	was_dirty = test_clear_buffer_jbddirty(bh);
2625 	__jbd2_journal_temp_unlink_buffer(jh);
2626 
2627 	/*
2628 	 * b_transaction must be set, otherwise the new b_transaction won't
2629 	 * be holding jh reference
2630 	 */
2631 	J_ASSERT_JH(jh, jh->b_transaction != NULL);
2632 
2633 	/*
2634 	 * We set b_transaction here because b_next_transaction will inherit
2635 	 * our jh reference and thus __jbd2_journal_file_buffer() must not
2636 	 * take a new one.
2637 	 */
2638 	WRITE_ONCE(jh->b_transaction, jh->b_next_transaction);
2639 	WRITE_ONCE(jh->b_next_transaction, NULL);
2640 	if (buffer_freed(bh))
2641 		jlist = BJ_Forget;
2642 	else if (jh->b_modified)
2643 		jlist = BJ_Metadata;
2644 	else
2645 		jlist = BJ_Reserved;
2646 	__jbd2_journal_file_buffer(jh, jh->b_transaction, jlist);
2647 	J_ASSERT_JH(jh, jh->b_transaction->t_state == T_RUNNING);
2648 
2649 	if (was_dirty)
2650 		set_buffer_jbddirty(bh);
2651 	return false;
2652 }
2653 
2654 /*
2655  * __jbd2_journal_refile_buffer() with necessary locking added. We take our
2656  * bh reference so that we can safely unlock bh.
2657  *
2658  * The jh and bh may be freed by this call.
2659  */
2660 void jbd2_journal_refile_buffer(journal_t *journal, struct journal_head *jh)
2661 {
2662 	bool drop;
2663 
2664 	spin_lock(&jh->b_state_lock);
2665 	spin_lock(&journal->j_list_lock);
2666 	drop = __jbd2_journal_refile_buffer(jh);
2667 	spin_unlock(&jh->b_state_lock);
2668 	spin_unlock(&journal->j_list_lock);
2669 	if (drop)
2670 		jbd2_journal_put_journal_head(jh);
2671 }
2672 
2673 /*
2674  * File inode in the inode list of the handle's transaction
2675  */
2676 static int jbd2_journal_file_inode(handle_t *handle, struct jbd2_inode *jinode,
2677 		unsigned long flags, loff_t start_byte, loff_t end_byte)
2678 {
2679 	transaction_t *transaction = handle->h_transaction;
2680 	journal_t *journal;
2681 
2682 	if (is_handle_aborted(handle))
2683 		return -EROFS;
2684 	journal = transaction->t_journal;
2685 
2686 	jbd2_debug(4, "Adding inode %lu, tid:%d\n", jinode->i_vfs_inode->i_ino,
2687 			transaction->t_tid);
2688 
2689 	spin_lock(&journal->j_list_lock);
2690 	jinode->i_flags |= flags;
2691 
2692 	if (jinode->i_dirty_end) {
2693 		jinode->i_dirty_start = min(jinode->i_dirty_start, start_byte);
2694 		jinode->i_dirty_end = max(jinode->i_dirty_end, end_byte);
2695 	} else {
2696 		jinode->i_dirty_start = start_byte;
2697 		jinode->i_dirty_end = end_byte;
2698 	}
2699 
2700 	/* Is inode already attached where we need it? */
2701 	if (jinode->i_transaction == transaction ||
2702 	    jinode->i_next_transaction == transaction)
2703 		goto done;
2704 
2705 	/*
2706 	 * We only ever set this variable to 1 so the test is safe. Since
2707 	 * t_need_data_flush is likely to be set, we do the test to save some
2708 	 * cacheline bouncing
2709 	 */
2710 	if (!transaction->t_need_data_flush)
2711 		transaction->t_need_data_flush = 1;
2712 	/* On some different transaction's list - should be
2713 	 * the committing one */
2714 	if (jinode->i_transaction) {
2715 		J_ASSERT(jinode->i_next_transaction == NULL);
2716 		J_ASSERT(jinode->i_transaction ==
2717 					journal->j_committing_transaction);
2718 		jinode->i_next_transaction = transaction;
2719 		goto done;
2720 	}
2721 	/* Not on any transaction list... */
2722 	J_ASSERT(!jinode->i_next_transaction);
2723 	jinode->i_transaction = transaction;
2724 	list_add(&jinode->i_list, &transaction->t_inode_list);
2725 done:
2726 	spin_unlock(&journal->j_list_lock);
2727 
2728 	return 0;
2729 }
2730 
2731 int jbd2_journal_inode_ranged_write(handle_t *handle,
2732 		struct jbd2_inode *jinode, loff_t start_byte, loff_t length)
2733 {
2734 	return jbd2_journal_file_inode(handle, jinode,
2735 			JI_WRITE_DATA | JI_WAIT_DATA, start_byte,
2736 			start_byte + length - 1);
2737 }
2738 
2739 int jbd2_journal_inode_ranged_wait(handle_t *handle, struct jbd2_inode *jinode,
2740 		loff_t start_byte, loff_t length)
2741 {
2742 	return jbd2_journal_file_inode(handle, jinode, JI_WAIT_DATA,
2743 			start_byte, start_byte + length - 1);
2744 }
2745 
2746 /*
2747  * File truncate and transaction commit interact with each other in a
2748  * non-trivial way.  If a transaction writing data block A is
2749  * committing, we cannot discard the data by truncate until we have
2750  * written them.  Otherwise if we crashed after the transaction with
2751  * write has committed but before the transaction with truncate has
2752  * committed, we could see stale data in block A.  This function is a
2753  * helper to solve this problem.  It starts writeout of the truncated
2754  * part in case it is in the committing transaction.
2755  *
2756  * Filesystem code must call this function when inode is journaled in
2757  * ordered mode before truncation happens and after the inode has been
2758  * placed on orphan list with the new inode size. The second condition
2759  * avoids the race that someone writes new data and we start
2760  * committing the transaction after this function has been called but
2761  * before a transaction for truncate is started (and furthermore it
2762  * allows us to optimize the case where the addition to orphan list
2763  * happens in the same transaction as write --- we don't have to write
2764  * any data in such case).
2765  */
2766 int jbd2_journal_begin_ordered_truncate(journal_t *journal,
2767 					struct jbd2_inode *jinode,
2768 					loff_t new_size)
2769 {
2770 	transaction_t *inode_trans, *commit_trans;
2771 	int ret = 0;
2772 
2773 	/* This is a quick check to avoid locking if not necessary */
2774 	if (!jinode->i_transaction)
2775 		goto out;
2776 	/* Locks are here just to force reading of recent values, it is
2777 	 * enough that the transaction was not committing before we started
2778 	 * a transaction adding the inode to orphan list */
2779 	read_lock(&journal->j_state_lock);
2780 	commit_trans = journal->j_committing_transaction;
2781 	read_unlock(&journal->j_state_lock);
2782 	spin_lock(&journal->j_list_lock);
2783 	inode_trans = jinode->i_transaction;
2784 	spin_unlock(&journal->j_list_lock);
2785 	if (inode_trans == commit_trans) {
2786 		ret = filemap_fdatawrite_range(jinode->i_vfs_inode->i_mapping,
2787 			new_size, LLONG_MAX);
2788 		if (ret)
2789 			jbd2_journal_abort(journal, ret);
2790 	}
2791 out:
2792 	return ret;
2793 }
2794