xref: /openbmc/linux/fs/gfs2/log.c (revision 0efc4976)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
4  * Copyright (C) 2004-2007 Red Hat, Inc.  All rights reserved.
5  */
6 
7 #include <linux/sched.h>
8 #include <linux/slab.h>
9 #include <linux/spinlock.h>
10 #include <linux/completion.h>
11 #include <linux/buffer_head.h>
12 #include <linux/gfs2_ondisk.h>
13 #include <linux/crc32.h>
14 #include <linux/crc32c.h>
15 #include <linux/delay.h>
16 #include <linux/kthread.h>
17 #include <linux/freezer.h>
18 #include <linux/bio.h>
19 #include <linux/blkdev.h>
20 #include <linux/writeback.h>
21 #include <linux/list_sort.h>
22 
23 #include "gfs2.h"
24 #include "incore.h"
25 #include "bmap.h"
26 #include "glock.h"
27 #include "log.h"
28 #include "lops.h"
29 #include "meta_io.h"
30 #include "util.h"
31 #include "dir.h"
32 #include "trace_gfs2.h"
33 #include "trans.h"
34 
35 static void gfs2_log_shutdown(struct gfs2_sbd *sdp);
36 
37 /**
38  * gfs2_struct2blk - compute stuff
39  * @sdp: the filesystem
40  * @nstruct: the number of structures
41  *
42  * Compute the number of log descriptor blocks needed to hold a certain number
43  * of structures of a certain size.
44  *
45  * Returns: the number of blocks needed (minimum is always 1)
46  */
47 
48 unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct)
49 {
50 	unsigned int blks;
51 	unsigned int first, second;
52 
53 	/* The initial struct gfs2_log_descriptor block */
54 	blks = 1;
55 	first = sdp->sd_ldptrs;
56 
57 	if (nstruct > first) {
58 		/* Subsequent struct gfs2_meta_header blocks */
59 		second = sdp->sd_inptrs;
60 		blks += DIV_ROUND_UP(nstruct - first, second);
61 	}
62 
63 	return blks;
64 }
65 
66 /**
67  * gfs2_remove_from_ail - Remove an entry from the ail lists, updating counters
68  * @mapping: The associated mapping (maybe NULL)
69  * @bd: The gfs2_bufdata to remove
70  *
71  * The ail lock _must_ be held when calling this function
72  *
73  */
74 
75 void gfs2_remove_from_ail(struct gfs2_bufdata *bd)
76 {
77 	bd->bd_tr = NULL;
78 	list_del_init(&bd->bd_ail_st_list);
79 	list_del_init(&bd->bd_ail_gl_list);
80 	atomic_dec(&bd->bd_gl->gl_ail_count);
81 	brelse(bd->bd_bh);
82 }
83 
84 /**
85  * gfs2_ail1_start_one - Start I/O on a part of the AIL
86  * @sdp: the filesystem
87  * @wbc: The writeback control structure
88  * @ai: The ail structure
89  *
90  */
91 
92 static int gfs2_ail1_start_one(struct gfs2_sbd *sdp,
93 			       struct writeback_control *wbc,
94 			       struct gfs2_trans *tr, struct blk_plug *plug)
95 __releases(&sdp->sd_ail_lock)
96 __acquires(&sdp->sd_ail_lock)
97 {
98 	struct gfs2_glock *gl = NULL;
99 	struct address_space *mapping;
100 	struct gfs2_bufdata *bd, *s;
101 	struct buffer_head *bh;
102 	int ret = 0;
103 
104 	list_for_each_entry_safe_reverse(bd, s, &tr->tr_ail1_list, bd_ail_st_list) {
105 		bh = bd->bd_bh;
106 
107 		gfs2_assert(sdp, bd->bd_tr == tr);
108 
109 		if (!buffer_busy(bh)) {
110 			if (buffer_uptodate(bh)) {
111 				list_move(&bd->bd_ail_st_list,
112 					  &tr->tr_ail2_list);
113 				continue;
114 			}
115 			if (!cmpxchg(&sdp->sd_log_error, 0, -EIO)) {
116 				gfs2_io_error_bh(sdp, bh);
117 				gfs2_withdraw_delayed(sdp);
118 			}
119 		}
120 
121 		if (gfs2_withdrawn(sdp)) {
122 			gfs2_remove_from_ail(bd);
123 			continue;
124 		}
125 		if (!buffer_dirty(bh))
126 			continue;
127 		if (gl == bd->bd_gl)
128 			continue;
129 		gl = bd->bd_gl;
130 		list_move(&bd->bd_ail_st_list, &tr->tr_ail1_list);
131 		mapping = bh->b_page->mapping;
132 		if (!mapping)
133 			continue;
134 		spin_unlock(&sdp->sd_ail_lock);
135 		ret = generic_writepages(mapping, wbc);
136 		if (need_resched()) {
137 			blk_finish_plug(plug);
138 			cond_resched();
139 			blk_start_plug(plug);
140 		}
141 		spin_lock(&sdp->sd_ail_lock);
142 		if (ret == -ENODATA) /* if a jdata write into a new hole */
143 			ret = 0; /* ignore it */
144 		if (ret || wbc->nr_to_write <= 0)
145 			break;
146 		return -EBUSY;
147 	}
148 
149 	return ret;
150 }
151 
152 static void dump_ail_list(struct gfs2_sbd *sdp)
153 {
154 	struct gfs2_trans *tr;
155 	struct gfs2_bufdata *bd;
156 	struct buffer_head *bh;
157 
158 	list_for_each_entry_reverse(tr, &sdp->sd_ail1_list, tr_list) {
159 		list_for_each_entry_reverse(bd, &tr->tr_ail1_list,
160 					    bd_ail_st_list) {
161 			bh = bd->bd_bh;
162 			fs_err(sdp, "bd %p: blk:0x%llx bh=%p ", bd,
163 			       (unsigned long long)bd->bd_blkno, bh);
164 			if (!bh) {
165 				fs_err(sdp, "\n");
166 				continue;
167 			}
168 			fs_err(sdp, "0x%llx up2:%d dirt:%d lkd:%d req:%d "
169 			       "map:%d new:%d ar:%d aw:%d delay:%d "
170 			       "io err:%d unwritten:%d dfr:%d pin:%d esc:%d\n",
171 			       (unsigned long long)bh->b_blocknr,
172 			       buffer_uptodate(bh), buffer_dirty(bh),
173 			       buffer_locked(bh), buffer_req(bh),
174 			       buffer_mapped(bh), buffer_new(bh),
175 			       buffer_async_read(bh), buffer_async_write(bh),
176 			       buffer_delay(bh), buffer_write_io_error(bh),
177 			       buffer_unwritten(bh),
178 			       buffer_defer_completion(bh),
179 			       buffer_pinned(bh), buffer_escaped(bh));
180 		}
181 	}
182 }
183 
184 /**
185  * gfs2_ail1_flush - start writeback of some ail1 entries
186  * @sdp: The super block
187  * @wbc: The writeback control structure
188  *
189  * Writes back some ail1 entries, according to the limits in the
190  * writeback control structure
191  */
192 
193 void gfs2_ail1_flush(struct gfs2_sbd *sdp, struct writeback_control *wbc)
194 {
195 	struct list_head *head = &sdp->sd_ail1_list;
196 	struct gfs2_trans *tr;
197 	struct blk_plug plug;
198 	int ret;
199 	unsigned long flush_start = jiffies;
200 
201 	trace_gfs2_ail_flush(sdp, wbc, 1);
202 	blk_start_plug(&plug);
203 	spin_lock(&sdp->sd_ail_lock);
204 restart:
205 	ret = 0;
206 	if (time_after(jiffies, flush_start + (HZ * 600))) {
207 		fs_err(sdp, "Error: In %s for ten minutes! t=%d\n",
208 		       __func__, current->journal_info ? 1 : 0);
209 		dump_ail_list(sdp);
210 		goto out;
211 	}
212 	list_for_each_entry_reverse(tr, head, tr_list) {
213 		if (wbc->nr_to_write <= 0)
214 			break;
215 		ret = gfs2_ail1_start_one(sdp, wbc, tr, &plug);
216 		if (ret) {
217 			if (ret == -EBUSY)
218 				goto restart;
219 			break;
220 		}
221 	}
222 out:
223 	spin_unlock(&sdp->sd_ail_lock);
224 	blk_finish_plug(&plug);
225 	if (ret) {
226 		gfs2_lm(sdp, "gfs2_ail1_start_one (generic_writepages) "
227 			"returned: %d\n", ret);
228 		gfs2_withdraw(sdp);
229 	}
230 	trace_gfs2_ail_flush(sdp, wbc, 0);
231 }
232 
233 /**
234  * gfs2_ail1_start - start writeback of all ail1 entries
235  * @sdp: The superblock
236  */
237 
238 static void gfs2_ail1_start(struct gfs2_sbd *sdp)
239 {
240 	struct writeback_control wbc = {
241 		.sync_mode = WB_SYNC_NONE,
242 		.nr_to_write = LONG_MAX,
243 		.range_start = 0,
244 		.range_end = LLONG_MAX,
245 	};
246 
247 	return gfs2_ail1_flush(sdp, &wbc);
248 }
249 
250 static void gfs2_log_update_flush_tail(struct gfs2_sbd *sdp)
251 {
252 	unsigned int new_flush_tail = sdp->sd_log_head;
253 	struct gfs2_trans *tr;
254 
255 	if (!list_empty(&sdp->sd_ail1_list)) {
256 		tr = list_last_entry(&sdp->sd_ail1_list,
257 				     struct gfs2_trans, tr_list);
258 		new_flush_tail = tr->tr_first;
259 	}
260 	sdp->sd_log_flush_tail = new_flush_tail;
261 }
262 
263 static void gfs2_log_update_head(struct gfs2_sbd *sdp)
264 {
265 	unsigned int new_head = sdp->sd_log_flush_head;
266 
267 	if (sdp->sd_log_flush_tail == sdp->sd_log_head)
268 		sdp->sd_log_flush_tail = new_head;
269 	sdp->sd_log_head = new_head;
270 }
271 
272 /**
273  * gfs2_ail_empty_tr - empty one of the ail lists of a transaction
274  */
275 
276 static void gfs2_ail_empty_tr(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
277 			      struct list_head *head)
278 {
279 	struct gfs2_bufdata *bd;
280 
281 	while (!list_empty(head)) {
282 		bd = list_first_entry(head, struct gfs2_bufdata,
283 				      bd_ail_st_list);
284 		gfs2_assert(sdp, bd->bd_tr == tr);
285 		gfs2_remove_from_ail(bd);
286 	}
287 }
288 
289 /**
290  * gfs2_ail1_empty_one - Check whether or not a trans in the AIL has been synced
291  * @sdp: the filesystem
292  * @tr: the transaction
293  * @max_revokes: If nonzero, issue revokes for the bd items for written buffers
294  *
295  * returns: the transaction's count of remaining active items
296  */
297 
298 static int gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
299 				int *max_revokes)
300 {
301 	struct gfs2_bufdata *bd, *s;
302 	struct buffer_head *bh;
303 	int active_count = 0;
304 
305 	list_for_each_entry_safe_reverse(bd, s, &tr->tr_ail1_list,
306 					 bd_ail_st_list) {
307 		bh = bd->bd_bh;
308 		gfs2_assert(sdp, bd->bd_tr == tr);
309 		/*
310 		 * If another process flagged an io error, e.g. writing to the
311 		 * journal, error all other bhs and move them off the ail1 to
312 		 * prevent a tight loop when unmount tries to flush ail1,
313 		 * regardless of whether they're still busy. If no outside
314 		 * errors were found and the buffer is busy, move to the next.
315 		 * If the ail buffer is not busy and caught an error, flag it
316 		 * for others.
317 		 */
318 		if (!sdp->sd_log_error && buffer_busy(bh)) {
319 			active_count++;
320 			continue;
321 		}
322 		if (!buffer_uptodate(bh) &&
323 		    !cmpxchg(&sdp->sd_log_error, 0, -EIO)) {
324 			gfs2_io_error_bh(sdp, bh);
325 			gfs2_withdraw_delayed(sdp);
326 		}
327 		/*
328 		 * If we have space for revokes and the bd is no longer on any
329 		 * buf list, we can just add a revoke for it immediately and
330 		 * avoid having to put it on the ail2 list, where it would need
331 		 * to be revoked later.
332 		 */
333 		if (*max_revokes && list_empty(&bd->bd_list)) {
334 			gfs2_add_revoke(sdp, bd);
335 			(*max_revokes)--;
336 			continue;
337 		}
338 		list_move(&bd->bd_ail_st_list, &tr->tr_ail2_list);
339 	}
340 	return active_count;
341 }
342 
343 /**
344  * gfs2_ail1_empty - Try to empty the ail1 lists
345  * @sdp: The superblock
346  * @max_revokes: If non-zero, add revokes where appropriate
347  *
348  * Tries to empty the ail1 lists, starting with the oldest first
349  */
350 
351 static int gfs2_ail1_empty(struct gfs2_sbd *sdp, int max_revokes)
352 {
353 	struct gfs2_trans *tr, *s;
354 	int oldest_tr = 1;
355 	int ret;
356 
357 	spin_lock(&sdp->sd_ail_lock);
358 	list_for_each_entry_safe_reverse(tr, s, &sdp->sd_ail1_list, tr_list) {
359 		if (!gfs2_ail1_empty_one(sdp, tr, &max_revokes) && oldest_tr)
360 			list_move(&tr->tr_list, &sdp->sd_ail2_list);
361 		else
362 			oldest_tr = 0;
363 	}
364 	gfs2_log_update_flush_tail(sdp);
365 	ret = list_empty(&sdp->sd_ail1_list);
366 	spin_unlock(&sdp->sd_ail_lock);
367 
368 	if (test_bit(SDF_WITHDRAWING, &sdp->sd_flags)) {
369 		gfs2_lm(sdp, "fatal: I/O error(s)\n");
370 		gfs2_withdraw(sdp);
371 	}
372 
373 	return ret;
374 }
375 
376 static void gfs2_ail1_wait(struct gfs2_sbd *sdp)
377 {
378 	struct gfs2_trans *tr;
379 	struct gfs2_bufdata *bd;
380 	struct buffer_head *bh;
381 
382 	spin_lock(&sdp->sd_ail_lock);
383 	list_for_each_entry_reverse(tr, &sdp->sd_ail1_list, tr_list) {
384 		list_for_each_entry(bd, &tr->tr_ail1_list, bd_ail_st_list) {
385 			bh = bd->bd_bh;
386 			if (!buffer_locked(bh))
387 				continue;
388 			get_bh(bh);
389 			spin_unlock(&sdp->sd_ail_lock);
390 			wait_on_buffer(bh);
391 			brelse(bh);
392 			return;
393 		}
394 	}
395 	spin_unlock(&sdp->sd_ail_lock);
396 }
397 
398 static void __ail2_empty(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
399 {
400 	gfs2_ail_empty_tr(sdp, tr, &tr->tr_ail2_list);
401 	list_del(&tr->tr_list);
402 	gfs2_assert_warn(sdp, list_empty(&tr->tr_ail1_list));
403 	gfs2_assert_warn(sdp, list_empty(&tr->tr_ail2_list));
404 	gfs2_trans_free(sdp, tr);
405 }
406 
407 static void ail2_empty(struct gfs2_sbd *sdp, unsigned int new_tail)
408 {
409 	struct list_head *ail2_list = &sdp->sd_ail2_list;
410 	unsigned int old_tail = sdp->sd_log_tail;
411 	struct gfs2_trans *tr, *safe;
412 
413 	spin_lock(&sdp->sd_ail_lock);
414 	if (old_tail <= new_tail) {
415 		list_for_each_entry_safe(tr, safe, ail2_list, tr_list) {
416 			if (old_tail <= tr->tr_first && tr->tr_first < new_tail)
417 				__ail2_empty(sdp, tr);
418 		}
419 	} else {
420 		list_for_each_entry_safe(tr, safe, ail2_list, tr_list) {
421 			if (old_tail <= tr->tr_first || tr->tr_first < new_tail)
422 				__ail2_empty(sdp, tr);
423 		}
424 	}
425 	spin_unlock(&sdp->sd_ail_lock);
426 }
427 
428 /**
429  * gfs2_log_is_empty - Check if the log is empty
430  * @sdp: The GFS2 superblock
431  */
432 
433 bool gfs2_log_is_empty(struct gfs2_sbd *sdp) {
434 	return atomic_read(&sdp->sd_log_blks_free) == sdp->sd_jdesc->jd_blocks;
435 }
436 
437 static bool __gfs2_log_try_reserve_revokes(struct gfs2_sbd *sdp, unsigned int revokes)
438 {
439 	unsigned int available;
440 
441 	available = atomic_read(&sdp->sd_log_revokes_available);
442 	while (available >= revokes) {
443 		if (atomic_try_cmpxchg(&sdp->sd_log_revokes_available,
444 				       &available, available - revokes))
445 			return true;
446 	}
447 	return false;
448 }
449 
450 /**
451  * gfs2_log_release_revokes - Release a given number of revokes
452  * @sdp: The GFS2 superblock
453  * @revokes: The number of revokes to release
454  *
455  * sdp->sd_log_flush_lock must be held.
456  */
457 void gfs2_log_release_revokes(struct gfs2_sbd *sdp, unsigned int revokes)
458 {
459 	if (revokes)
460 		atomic_add(revokes, &sdp->sd_log_revokes_available);
461 }
462 
463 /**
464  * gfs2_log_release - Release a given number of log blocks
465  * @sdp: The GFS2 superblock
466  * @blks: The number of blocks
467  *
468  */
469 
470 void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks)
471 {
472 	atomic_add(blks, &sdp->sd_log_blks_free);
473 	trace_gfs2_log_blocks(sdp, blks);
474 	gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
475 				  sdp->sd_jdesc->jd_blocks);
476 	if (atomic_read(&sdp->sd_log_blks_needed))
477 		wake_up(&sdp->sd_log_waitq);
478 }
479 
480 /**
481  * __gfs2_log_try_reserve - Try to make a log reservation
482  * @sdp: The GFS2 superblock
483  * @blks: The number of blocks to reserve
484  * @taboo_blks: The number of blocks to leave free
485  *
486  * Try to do the same as __gfs2_log_reserve(), but fail if no more log
487  * space is immediately available.
488  */
489 static bool __gfs2_log_try_reserve(struct gfs2_sbd *sdp, unsigned int blks,
490 				   unsigned int taboo_blks)
491 {
492 	unsigned wanted = blks + taboo_blks;
493 	unsigned int free_blocks;
494 
495 	free_blocks = atomic_read(&sdp->sd_log_blks_free);
496 	while (free_blocks >= wanted) {
497 		if (atomic_try_cmpxchg(&sdp->sd_log_blks_free, &free_blocks,
498 				       free_blocks - blks)) {
499 			trace_gfs2_log_blocks(sdp, -blks);
500 			return true;
501 		}
502 	}
503 	return false;
504 }
505 
506 /**
507  * __gfs2_log_reserve - Make a log reservation
508  * @sdp: The GFS2 superblock
509  * @blks: The number of blocks to reserve
510  * @taboo_blks: The number of blocks to leave free
511  *
512  * @taboo_blks is set to 0 for logd, and to GFS2_LOG_FLUSH_MIN_BLOCKS
513  * for all other processes.  This ensures that when the log is almost full,
514  * logd will still be able to call gfs2_log_flush one more time  without
515  * blocking, which will advance the tail and make some more log space
516  * available.
517  *
518  * We no longer flush the log here, instead we wake up logd to do that
519  * for us. To avoid the thundering herd and to ensure that we deal fairly
520  * with queued waiters, we use an exclusive wait. This means that when we
521  * get woken with enough journal space to get our reservation, we need to
522  * wake the next waiter on the list.
523  */
524 
525 static void __gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks,
526 			       unsigned int taboo_blks)
527 {
528 	unsigned wanted = blks + taboo_blks;
529 	unsigned int free_blocks;
530 
531 	atomic_add(blks, &sdp->sd_log_blks_needed);
532 	for (;;) {
533 		if (current != sdp->sd_logd_process)
534 			wake_up(&sdp->sd_logd_waitq);
535 		io_wait_event(sdp->sd_log_waitq,
536 			(free_blocks = atomic_read(&sdp->sd_log_blks_free),
537 			 free_blocks >= wanted));
538 		do {
539 			if (atomic_try_cmpxchg(&sdp->sd_log_blks_free,
540 					       &free_blocks,
541 					       free_blocks - blks))
542 				goto reserved;
543 		} while (free_blocks >= wanted);
544 	}
545 
546 reserved:
547 	trace_gfs2_log_blocks(sdp, -blks);
548 	if (atomic_sub_return(blks, &sdp->sd_log_blks_needed))
549 		wake_up(&sdp->sd_log_waitq);
550 }
551 
552 /**
553  * gfs2_log_try_reserve - Try to make a log reservation
554  * @sdp: The GFS2 superblock
555  * @tr: The transaction
556  * @extra_revokes: The number of additional revokes reserved (output)
557  *
558  * This is similar to gfs2_log_reserve, but sdp->sd_log_flush_lock must be
559  * held for correct revoke accounting.
560  */
561 
562 bool gfs2_log_try_reserve(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
563 			  unsigned int *extra_revokes)
564 {
565 	unsigned int blks = tr->tr_reserved;
566 	unsigned int revokes = tr->tr_revokes;
567 	unsigned int revoke_blks = 0;
568 
569 	*extra_revokes = 0;
570 	if (revokes && !__gfs2_log_try_reserve_revokes(sdp, revokes)) {
571 		revoke_blks = DIV_ROUND_UP(revokes, sdp->sd_inptrs);
572 		*extra_revokes = revoke_blks * sdp->sd_inptrs - revokes;
573 		blks += revoke_blks;
574 	}
575 	if (!blks)
576 		return true;
577 	if (__gfs2_log_try_reserve(sdp, blks, GFS2_LOG_FLUSH_MIN_BLOCKS))
578 		return true;
579 	if (!revoke_blks)
580 		gfs2_log_release_revokes(sdp, revokes);
581 	return false;
582 }
583 
584 /**
585  * gfs2_log_reserve - Make a log reservation
586  * @sdp: The GFS2 superblock
587  * @tr: The transaction
588  * @extra_revokes: The number of additional revokes reserved (output)
589  *
590  * sdp->sd_log_flush_lock must not be held.
591  */
592 
593 void gfs2_log_reserve(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
594 		      unsigned int *extra_revokes)
595 {
596 	unsigned int blks = tr->tr_reserved;
597 	unsigned int revokes = tr->tr_revokes;
598 	unsigned int revoke_blks = 0;
599 
600 	*extra_revokes = 0;
601 	if (revokes) {
602 		revoke_blks = DIV_ROUND_UP(revokes, sdp->sd_inptrs);
603 		*extra_revokes = revoke_blks * sdp->sd_inptrs - revokes;
604 		blks += revoke_blks;
605 	}
606 	__gfs2_log_reserve(sdp, blks, GFS2_LOG_FLUSH_MIN_BLOCKS);
607 }
608 
609 /**
610  * log_distance - Compute distance between two journal blocks
611  * @sdp: The GFS2 superblock
612  * @newer: The most recent journal block of the pair
613  * @older: The older journal block of the pair
614  *
615  *   Compute the distance (in the journal direction) between two
616  *   blocks in the journal
617  *
618  * Returns: the distance in blocks
619  */
620 
621 static inline unsigned int log_distance(struct gfs2_sbd *sdp, unsigned int newer,
622 					unsigned int older)
623 {
624 	int dist;
625 
626 	dist = newer - older;
627 	if (dist < 0)
628 		dist += sdp->sd_jdesc->jd_blocks;
629 
630 	return dist;
631 }
632 
633 /**
634  * calc_reserved - Calculate the number of blocks to keep reserved
635  * @sdp: The GFS2 superblock
636  *
637  * This is complex.  We need to reserve room for all our currently used
638  * metadata blocks (e.g. normal file I/O rewriting file time stamps) and
639  * all our journaled data blocks for journaled files (e.g. files in the
640  * meta_fs like rindex, or files for which chattr +j was done.)
641  * If we don't reserve enough space, corruption will follow.
642  *
643  * We can have metadata blocks and jdata blocks in the same journal.  Each
644  * type gets its own log descriptor, for which we need to reserve a block.
645  * In fact, each type has the potential for needing more than one log descriptor
646  * in cases where we have more blocks than will fit in a log descriptor.
647  * Metadata journal entries take up half the space of journaled buffer entries.
648  *
649  * Also, we need to reserve blocks for revoke journal entries and one for an
650  * overall header for the lot.
651  *
652  * Returns: the number of blocks reserved
653  */
654 static unsigned int calc_reserved(struct gfs2_sbd *sdp)
655 {
656 	unsigned int reserved = GFS2_LOG_FLUSH_MIN_BLOCKS;
657 	unsigned int blocks;
658 	struct gfs2_trans *tr = sdp->sd_log_tr;
659 
660 	if (tr) {
661 		blocks = tr->tr_num_buf_new - tr->tr_num_buf_rm;
662 		reserved += blocks + DIV_ROUND_UP(blocks, buf_limit(sdp));
663 		blocks = tr->tr_num_databuf_new - tr->tr_num_databuf_rm;
664 		reserved += blocks + DIV_ROUND_UP(blocks, databuf_limit(sdp));
665 	}
666 	return reserved;
667 }
668 
669 static void log_pull_tail(struct gfs2_sbd *sdp)
670 {
671 	unsigned int new_tail = sdp->sd_log_flush_tail;
672 	unsigned int dist;
673 
674 	if (new_tail == sdp->sd_log_tail)
675 		return;
676 	dist = log_distance(sdp, new_tail, sdp->sd_log_tail);
677 	ail2_empty(sdp, new_tail);
678 	gfs2_log_release(sdp, dist);
679 	sdp->sd_log_tail = new_tail;
680 }
681 
682 
683 void log_flush_wait(struct gfs2_sbd *sdp)
684 {
685 	DEFINE_WAIT(wait);
686 
687 	if (atomic_read(&sdp->sd_log_in_flight)) {
688 		do {
689 			prepare_to_wait(&sdp->sd_log_flush_wait, &wait,
690 					TASK_UNINTERRUPTIBLE);
691 			if (atomic_read(&sdp->sd_log_in_flight))
692 				io_schedule();
693 		} while(atomic_read(&sdp->sd_log_in_flight));
694 		finish_wait(&sdp->sd_log_flush_wait, &wait);
695 	}
696 }
697 
698 static int ip_cmp(void *priv, struct list_head *a, struct list_head *b)
699 {
700 	struct gfs2_inode *ipa, *ipb;
701 
702 	ipa = list_entry(a, struct gfs2_inode, i_ordered);
703 	ipb = list_entry(b, struct gfs2_inode, i_ordered);
704 
705 	if (ipa->i_no_addr < ipb->i_no_addr)
706 		return -1;
707 	if (ipa->i_no_addr > ipb->i_no_addr)
708 		return 1;
709 	return 0;
710 }
711 
712 static void __ordered_del_inode(struct gfs2_inode *ip)
713 {
714 	if (!list_empty(&ip->i_ordered))
715 		list_del_init(&ip->i_ordered);
716 }
717 
718 static void gfs2_ordered_write(struct gfs2_sbd *sdp)
719 {
720 	struct gfs2_inode *ip;
721 	LIST_HEAD(written);
722 
723 	spin_lock(&sdp->sd_ordered_lock);
724 	list_sort(NULL, &sdp->sd_log_ordered, &ip_cmp);
725 	while (!list_empty(&sdp->sd_log_ordered)) {
726 		ip = list_first_entry(&sdp->sd_log_ordered, struct gfs2_inode, i_ordered);
727 		if (ip->i_inode.i_mapping->nrpages == 0) {
728 			__ordered_del_inode(ip);
729 			continue;
730 		}
731 		list_move(&ip->i_ordered, &written);
732 		spin_unlock(&sdp->sd_ordered_lock);
733 		filemap_fdatawrite(ip->i_inode.i_mapping);
734 		spin_lock(&sdp->sd_ordered_lock);
735 	}
736 	list_splice(&written, &sdp->sd_log_ordered);
737 	spin_unlock(&sdp->sd_ordered_lock);
738 }
739 
740 static void gfs2_ordered_wait(struct gfs2_sbd *sdp)
741 {
742 	struct gfs2_inode *ip;
743 
744 	spin_lock(&sdp->sd_ordered_lock);
745 	while (!list_empty(&sdp->sd_log_ordered)) {
746 		ip = list_first_entry(&sdp->sd_log_ordered, struct gfs2_inode, i_ordered);
747 		__ordered_del_inode(ip);
748 		if (ip->i_inode.i_mapping->nrpages == 0)
749 			continue;
750 		spin_unlock(&sdp->sd_ordered_lock);
751 		filemap_fdatawait(ip->i_inode.i_mapping);
752 		spin_lock(&sdp->sd_ordered_lock);
753 	}
754 	spin_unlock(&sdp->sd_ordered_lock);
755 }
756 
757 void gfs2_ordered_del_inode(struct gfs2_inode *ip)
758 {
759 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
760 
761 	spin_lock(&sdp->sd_ordered_lock);
762 	__ordered_del_inode(ip);
763 	spin_unlock(&sdp->sd_ordered_lock);
764 }
765 
766 void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
767 {
768 	struct buffer_head *bh = bd->bd_bh;
769 	struct gfs2_glock *gl = bd->bd_gl;
770 
771 	sdp->sd_log_num_revoke++;
772 	if (atomic_inc_return(&gl->gl_revokes) == 1)
773 		gfs2_glock_hold(gl);
774 	bh->b_private = NULL;
775 	bd->bd_blkno = bh->b_blocknr;
776 	gfs2_remove_from_ail(bd); /* drops ref on bh */
777 	bd->bd_bh = NULL;
778 	set_bit(GLF_LFLUSH, &gl->gl_flags);
779 	list_add(&bd->bd_list, &sdp->sd_log_revokes);
780 }
781 
782 void gfs2_glock_remove_revoke(struct gfs2_glock *gl)
783 {
784 	if (atomic_dec_return(&gl->gl_revokes) == 0) {
785 		clear_bit(GLF_LFLUSH, &gl->gl_flags);
786 		gfs2_glock_queue_put(gl);
787 	}
788 }
789 
790 /**
791  * gfs2_flush_revokes - Add as many revokes to the system transaction as we can
792  * @sdp: The GFS2 superblock
793  *
794  * Our usual strategy is to defer writing revokes as much as we can in the hope
795  * that we'll eventually overwrite the journal, which will make those revokes
796  * go away.  This changes when we flush the log: at that point, there will
797  * likely be some left-over space in the last revoke block of that transaction.
798  * We can fill that space with additional revokes for blocks that have already
799  * been written back.  This will basically come at no cost now, and will save
800  * us from having to keep track of those blocks on the AIL2 list later.
801  */
802 void gfs2_flush_revokes(struct gfs2_sbd *sdp)
803 {
804 	/* number of revokes we still have room for */
805 	unsigned int max_revokes = atomic_read(&sdp->sd_log_revokes_available);
806 
807 	gfs2_log_lock(sdp);
808 	gfs2_ail1_empty(sdp, max_revokes);
809 	gfs2_log_unlock(sdp);
810 }
811 
812 /**
813  * gfs2_write_log_header - Write a journal log header buffer at lblock
814  * @sdp: The GFS2 superblock
815  * @jd: journal descriptor of the journal to which we are writing
816  * @seq: sequence number
817  * @tail: tail of the log
818  * @lblock: value for lh_blkno (block number relative to start of journal)
819  * @flags: log header flags GFS2_LOG_HEAD_*
820  * @op_flags: flags to pass to the bio
821  *
822  * Returns: the initialized log buffer descriptor
823  */
824 
825 void gfs2_write_log_header(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
826 			   u64 seq, u32 tail, u32 lblock, u32 flags,
827 			   int op_flags)
828 {
829 	struct gfs2_log_header *lh;
830 	u32 hash, crc;
831 	struct page *page;
832 	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
833 	struct timespec64 tv;
834 	struct super_block *sb = sdp->sd_vfs;
835 	u64 dblock;
836 
837 	if (gfs2_withdrawn(sdp))
838 		return;
839 
840 	page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
841 	lh = page_address(page);
842 	clear_page(lh);
843 
844 	lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
845 	lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH);
846 	lh->lh_header.__pad0 = cpu_to_be64(0);
847 	lh->lh_header.mh_format = cpu_to_be32(GFS2_FORMAT_LH);
848 	lh->lh_header.mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid);
849 	lh->lh_sequence = cpu_to_be64(seq);
850 	lh->lh_flags = cpu_to_be32(flags);
851 	lh->lh_tail = cpu_to_be32(tail);
852 	lh->lh_blkno = cpu_to_be32(lblock);
853 	hash = ~crc32(~0, lh, LH_V1_SIZE);
854 	lh->lh_hash = cpu_to_be32(hash);
855 
856 	ktime_get_coarse_real_ts64(&tv);
857 	lh->lh_nsec = cpu_to_be32(tv.tv_nsec);
858 	lh->lh_sec = cpu_to_be64(tv.tv_sec);
859 	if (!list_empty(&jd->extent_list))
860 		dblock = gfs2_log_bmap(jd, lblock);
861 	else {
862 		int ret = gfs2_lblk_to_dblk(jd->jd_inode, lblock, &dblock);
863 		if (gfs2_assert_withdraw(sdp, ret == 0))
864 			return;
865 	}
866 	lh->lh_addr = cpu_to_be64(dblock);
867 	lh->lh_jinode = cpu_to_be64(GFS2_I(jd->jd_inode)->i_no_addr);
868 
869 	/* We may only write local statfs, quota, etc., when writing to our
870 	   own journal. The values are left 0 when recovering a journal
871 	   different from our own. */
872 	if (!(flags & GFS2_LOG_HEAD_RECOVERY)) {
873 		lh->lh_statfs_addr =
874 			cpu_to_be64(GFS2_I(sdp->sd_sc_inode)->i_no_addr);
875 		lh->lh_quota_addr =
876 			cpu_to_be64(GFS2_I(sdp->sd_qc_inode)->i_no_addr);
877 
878 		spin_lock(&sdp->sd_statfs_spin);
879 		lh->lh_local_total = cpu_to_be64(l_sc->sc_total);
880 		lh->lh_local_free = cpu_to_be64(l_sc->sc_free);
881 		lh->lh_local_dinodes = cpu_to_be64(l_sc->sc_dinodes);
882 		spin_unlock(&sdp->sd_statfs_spin);
883 	}
884 
885 	BUILD_BUG_ON(offsetof(struct gfs2_log_header, lh_crc) != LH_V1_SIZE);
886 
887 	crc = crc32c(~0, (void *)lh + LH_V1_SIZE + 4,
888 		     sb->s_blocksize - LH_V1_SIZE - 4);
889 	lh->lh_crc = cpu_to_be32(crc);
890 
891 	gfs2_log_write(sdp, jd, page, sb->s_blocksize, 0, dblock);
892 	gfs2_log_submit_bio(&jd->jd_log_bio, REQ_OP_WRITE | op_flags);
893 }
894 
895 /**
896  * log_write_header - Get and initialize a journal header buffer
897  * @sdp: The GFS2 superblock
898  * @flags: The log header flags, including log header origin
899  *
900  * Returns: the initialized log buffer descriptor
901  */
902 
903 static void log_write_header(struct gfs2_sbd *sdp, u32 flags)
904 {
905 	int op_flags = REQ_PREFLUSH | REQ_FUA | REQ_META | REQ_SYNC;
906 	enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state);
907 
908 	gfs2_assert_withdraw(sdp, (state != SFS_FROZEN));
909 
910 	if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) {
911 		gfs2_ordered_wait(sdp);
912 		log_flush_wait(sdp);
913 		op_flags = REQ_SYNC | REQ_META | REQ_PRIO;
914 	}
915 	sdp->sd_log_idle = (sdp->sd_log_flush_tail == sdp->sd_log_flush_head);
916 	gfs2_write_log_header(sdp, sdp->sd_jdesc, sdp->sd_log_sequence++,
917 			      sdp->sd_log_flush_tail, sdp->sd_log_flush_head,
918 			      flags, op_flags);
919 	gfs2_log_incr_head(sdp);
920 	log_flush_wait(sdp);
921 	log_pull_tail(sdp);
922 	gfs2_log_update_head(sdp);
923 }
924 
925 /**
926  * ail_drain - drain the ail lists after a withdraw
927  * @sdp: Pointer to GFS2 superblock
928  */
929 static void ail_drain(struct gfs2_sbd *sdp)
930 {
931 	struct gfs2_trans *tr;
932 
933 	spin_lock(&sdp->sd_ail_lock);
934 	/*
935 	 * For transactions on the sd_ail1_list we need to drain both the
936 	 * ail1 and ail2 lists. That's because function gfs2_ail1_start_one
937 	 * (temporarily) moves items from its tr_ail1 list to tr_ail2 list
938 	 * before revokes are sent for that block. Items on the sd_ail2_list
939 	 * should have already gotten beyond that point, so no need.
940 	 */
941 	while (!list_empty(&sdp->sd_ail1_list)) {
942 		tr = list_first_entry(&sdp->sd_ail1_list, struct gfs2_trans,
943 				      tr_list);
944 		gfs2_ail_empty_tr(sdp, tr, &tr->tr_ail1_list);
945 		gfs2_ail_empty_tr(sdp, tr, &tr->tr_ail2_list);
946 		list_del(&tr->tr_list);
947 		gfs2_trans_free(sdp, tr);
948 	}
949 	while (!list_empty(&sdp->sd_ail2_list)) {
950 		tr = list_first_entry(&sdp->sd_ail2_list, struct gfs2_trans,
951 				      tr_list);
952 		gfs2_ail_empty_tr(sdp, tr, &tr->tr_ail2_list);
953 		list_del(&tr->tr_list);
954 		gfs2_trans_free(sdp, tr);
955 	}
956 	spin_unlock(&sdp->sd_ail_lock);
957 }
958 
959 /**
960  * empty_ail1_list - try to start IO and empty the ail1 list
961  * @sdp: Pointer to GFS2 superblock
962  */
963 static void empty_ail1_list(struct gfs2_sbd *sdp)
964 {
965 	unsigned long start = jiffies;
966 
967 	for (;;) {
968 		if (time_after(jiffies, start + (HZ * 600))) {
969 			fs_err(sdp, "Error: In %s for 10 minutes! t=%d\n",
970 			       __func__, current->journal_info ? 1 : 0);
971 			dump_ail_list(sdp);
972 			return;
973 		}
974 		gfs2_ail1_start(sdp);
975 		gfs2_ail1_wait(sdp);
976 		if (gfs2_ail1_empty(sdp, 0))
977 			return;
978 	}
979 }
980 
981 /**
982  * trans_drain - drain the buf and databuf queue for a failed transaction
983  * @tr: the transaction to drain
984  *
985  * When this is called, we're taking an error exit for a log write that failed
986  * but since we bypassed the after_commit functions, we need to remove the
987  * items from the buf and databuf queue.
988  */
989 static void trans_drain(struct gfs2_trans *tr)
990 {
991 	struct gfs2_bufdata *bd;
992 	struct list_head *head;
993 
994 	if (!tr)
995 		return;
996 
997 	head = &tr->tr_buf;
998 	while (!list_empty(head)) {
999 		bd = list_first_entry(head, struct gfs2_bufdata, bd_list);
1000 		list_del_init(&bd->bd_list);
1001 		if (!list_empty(&bd->bd_ail_st_list))
1002 			gfs2_remove_from_ail(bd);
1003 		kmem_cache_free(gfs2_bufdata_cachep, bd);
1004 	}
1005 	head = &tr->tr_databuf;
1006 	while (!list_empty(head)) {
1007 		bd = list_first_entry(head, struct gfs2_bufdata, bd_list);
1008 		list_del_init(&bd->bd_list);
1009 		if (!list_empty(&bd->bd_ail_st_list))
1010 			gfs2_remove_from_ail(bd);
1011 		kmem_cache_free(gfs2_bufdata_cachep, bd);
1012 	}
1013 }
1014 
1015 /**
1016  * gfs2_log_flush - flush incore transaction(s)
1017  * @sdp: the filesystem
1018  * @gl: The glock structure to flush.  If NULL, flush the whole incore log
1019  * @flags: The log header flags: GFS2_LOG_HEAD_FLUSH_* and debug flags
1020  *
1021  */
1022 
1023 void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags)
1024 {
1025 	struct gfs2_trans *tr = NULL;
1026 	unsigned int reserved_blocks = 0, used_blocks = 0;
1027 	enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state);
1028 	unsigned int first_log_head;
1029 	unsigned int reserved_revokes = 0;
1030 
1031 	down_write(&sdp->sd_log_flush_lock);
1032 	trace_gfs2_log_flush(sdp, 1, flags);
1033 
1034 repeat:
1035 	/*
1036 	 * Do this check while holding the log_flush_lock to prevent new
1037 	 * buffers from being added to the ail via gfs2_pin()
1038 	 */
1039 	if (gfs2_withdrawn(sdp) || !test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
1040 		goto out;
1041 
1042 	/* Log might have been flushed while we waited for the flush lock */
1043 	if (gl && !test_bit(GLF_LFLUSH, &gl->gl_flags))
1044 		goto out;
1045 
1046 	first_log_head = sdp->sd_log_head;
1047 	sdp->sd_log_flush_head = first_log_head;
1048 
1049 	tr = sdp->sd_log_tr;
1050 	if (tr || sdp->sd_log_num_revoke) {
1051 		if (reserved_blocks)
1052 			gfs2_log_release(sdp, reserved_blocks);
1053 		reserved_blocks = sdp->sd_log_blks_reserved;
1054 		reserved_revokes = sdp->sd_log_num_revoke;
1055 		if (tr) {
1056 			sdp->sd_log_tr = NULL;
1057 			tr->tr_first = first_log_head;
1058 			if (unlikely (state == SFS_FROZEN)) {
1059 				if (gfs2_assert_withdraw_delayed(sdp,
1060 				       !tr->tr_num_buf_new && !tr->tr_num_databuf_new))
1061 					goto out_withdraw;
1062 			}
1063 		}
1064 	} else if (!reserved_blocks) {
1065 		unsigned int taboo_blocks = GFS2_LOG_FLUSH_MIN_BLOCKS;
1066 
1067 		reserved_blocks = GFS2_LOG_FLUSH_MIN_BLOCKS;
1068 		if (current == sdp->sd_logd_process)
1069 			taboo_blocks = 0;
1070 
1071 		if (!__gfs2_log_try_reserve(sdp, reserved_blocks, taboo_blocks)) {
1072 			up_write(&sdp->sd_log_flush_lock);
1073 			__gfs2_log_reserve(sdp, reserved_blocks, taboo_blocks);
1074 			down_write(&sdp->sd_log_flush_lock);
1075 			goto repeat;
1076 		}
1077 		BUG_ON(sdp->sd_log_num_revoke);
1078 	}
1079 
1080 	if (flags & GFS2_LOG_HEAD_FLUSH_SHUTDOWN)
1081 		clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
1082 
1083 	if (unlikely(state == SFS_FROZEN))
1084 		if (gfs2_assert_withdraw_delayed(sdp, !reserved_revokes))
1085 			goto out_withdraw;
1086 
1087 	gfs2_ordered_write(sdp);
1088 	if (gfs2_withdrawn(sdp))
1089 		goto out_withdraw;
1090 	lops_before_commit(sdp, tr);
1091 	if (gfs2_withdrawn(sdp))
1092 		goto out_withdraw;
1093 	gfs2_log_submit_bio(&sdp->sd_jdesc->jd_log_bio, REQ_OP_WRITE);
1094 	if (gfs2_withdrawn(sdp))
1095 		goto out_withdraw;
1096 
1097 	if (sdp->sd_log_head != sdp->sd_log_flush_head) {
1098 		log_write_header(sdp, flags);
1099 	} else if (sdp->sd_log_tail != sdp->sd_log_flush_tail && !sdp->sd_log_idle) {
1100 		log_write_header(sdp, flags);
1101 	}
1102 	if (gfs2_withdrawn(sdp))
1103 		goto out_withdraw;
1104 	lops_after_commit(sdp, tr);
1105 
1106 	gfs2_log_lock(sdp);
1107 	sdp->sd_log_blks_reserved = 0;
1108 
1109 	spin_lock(&sdp->sd_ail_lock);
1110 	if (tr && !list_empty(&tr->tr_ail1_list)) {
1111 		list_add(&tr->tr_list, &sdp->sd_ail1_list);
1112 		tr = NULL;
1113 	}
1114 	spin_unlock(&sdp->sd_ail_lock);
1115 	gfs2_log_unlock(sdp);
1116 
1117 	if (!(flags & GFS2_LOG_HEAD_FLUSH_NORMAL)) {
1118 		if (!sdp->sd_log_idle) {
1119 			empty_ail1_list(sdp);
1120 			if (gfs2_withdrawn(sdp))
1121 				goto out_withdraw;
1122 			log_write_header(sdp, flags);
1123 		}
1124 		if (flags & (GFS2_LOG_HEAD_FLUSH_SHUTDOWN |
1125 			     GFS2_LOG_HEAD_FLUSH_FREEZE))
1126 			gfs2_log_shutdown(sdp);
1127 		if (flags & GFS2_LOG_HEAD_FLUSH_FREEZE)
1128 			atomic_set(&sdp->sd_freeze_state, SFS_FROZEN);
1129 	}
1130 
1131 out_end:
1132 	used_blocks = log_distance(sdp, sdp->sd_log_flush_head, first_log_head);
1133 	reserved_revokes += atomic_read(&sdp->sd_log_revokes_available);
1134 	atomic_set(&sdp->sd_log_revokes_available, sdp->sd_ldptrs);
1135 	gfs2_assert_withdraw(sdp, reserved_revokes % sdp->sd_inptrs == sdp->sd_ldptrs);
1136 	if (reserved_revokes > sdp->sd_ldptrs)
1137 		reserved_blocks += (reserved_revokes - sdp->sd_ldptrs) / sdp->sd_inptrs;
1138 out:
1139 	if (used_blocks != reserved_blocks) {
1140 		gfs2_assert_withdraw_delayed(sdp, used_blocks < reserved_blocks);
1141 		gfs2_log_release(sdp, reserved_blocks - used_blocks);
1142 	}
1143 	up_write(&sdp->sd_log_flush_lock);
1144 	gfs2_trans_free(sdp, tr);
1145 	if (gfs2_withdrawing(sdp))
1146 		gfs2_withdraw(sdp);
1147 	trace_gfs2_log_flush(sdp, 0, flags);
1148 	return;
1149 
1150 out_withdraw:
1151 	trans_drain(tr);
1152 	/**
1153 	 * If the tr_list is empty, we're withdrawing during a log
1154 	 * flush that targets a transaction, but the transaction was
1155 	 * never queued onto any of the ail lists. Here we add it to
1156 	 * ail1 just so that ail_drain() will find and free it.
1157 	 */
1158 	spin_lock(&sdp->sd_ail_lock);
1159 	if (tr && list_empty(&tr->tr_list))
1160 		list_add(&tr->tr_list, &sdp->sd_ail1_list);
1161 	spin_unlock(&sdp->sd_ail_lock);
1162 	ail_drain(sdp); /* frees all transactions */
1163 	tr = NULL;
1164 	goto out_end;
1165 }
1166 
1167 /**
1168  * gfs2_merge_trans - Merge a new transaction into a cached transaction
1169  * @old: Original transaction to be expanded
1170  * @new: New transaction to be merged
1171  */
1172 
1173 static void gfs2_merge_trans(struct gfs2_sbd *sdp, struct gfs2_trans *new)
1174 {
1175 	struct gfs2_trans *old = sdp->sd_log_tr;
1176 
1177 	WARN_ON_ONCE(!test_bit(TR_ATTACHED, &old->tr_flags));
1178 
1179 	old->tr_num_buf_new	+= new->tr_num_buf_new;
1180 	old->tr_num_databuf_new	+= new->tr_num_databuf_new;
1181 	old->tr_num_buf_rm	+= new->tr_num_buf_rm;
1182 	old->tr_num_databuf_rm	+= new->tr_num_databuf_rm;
1183 	old->tr_revokes		+= new->tr_revokes;
1184 	old->tr_num_revoke	+= new->tr_num_revoke;
1185 
1186 	list_splice_tail_init(&new->tr_databuf, &old->tr_databuf);
1187 	list_splice_tail_init(&new->tr_buf, &old->tr_buf);
1188 
1189 	spin_lock(&sdp->sd_ail_lock);
1190 	list_splice_tail_init(&new->tr_ail1_list, &old->tr_ail1_list);
1191 	list_splice_tail_init(&new->tr_ail2_list, &old->tr_ail2_list);
1192 	spin_unlock(&sdp->sd_ail_lock);
1193 }
1194 
1195 static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
1196 {
1197 	unsigned int reserved;
1198 	unsigned int unused;
1199 	unsigned int maxres;
1200 
1201 	gfs2_log_lock(sdp);
1202 
1203 	if (sdp->sd_log_tr) {
1204 		gfs2_merge_trans(sdp, tr);
1205 	} else if (tr->tr_num_buf_new || tr->tr_num_databuf_new) {
1206 		gfs2_assert_withdraw(sdp, !test_bit(TR_ONSTACK, &tr->tr_flags));
1207 		sdp->sd_log_tr = tr;
1208 		set_bit(TR_ATTACHED, &tr->tr_flags);
1209 	}
1210 
1211 	reserved = calc_reserved(sdp);
1212 	maxres = sdp->sd_log_blks_reserved + tr->tr_reserved;
1213 	gfs2_assert_withdraw(sdp, maxres >= reserved);
1214 	unused = maxres - reserved;
1215 	if (unused)
1216 		gfs2_log_release(sdp, unused);
1217 	sdp->sd_log_blks_reserved = reserved;
1218 
1219 	gfs2_log_unlock(sdp);
1220 }
1221 
1222 /**
1223  * gfs2_log_commit - Commit a transaction to the log
1224  * @sdp: the filesystem
1225  * @tr: the transaction
1226  *
1227  * We wake up gfs2_logd if the number of pinned blocks exceed thresh1
1228  * or the total number of used blocks (pinned blocks plus AIL blocks)
1229  * is greater than thresh2.
1230  *
1231  * At mount time thresh1 is 2/5ths of journal size, thresh2 is 4/5ths of
1232  * journal size.
1233  *
1234  * Returns: errno
1235  */
1236 
1237 void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
1238 {
1239 	log_refund(sdp, tr);
1240 
1241 	if (atomic_read(&sdp->sd_log_pinned) > atomic_read(&sdp->sd_log_thresh1) ||
1242 	    ((sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free)) >
1243 	    atomic_read(&sdp->sd_log_thresh2)))
1244 		wake_up(&sdp->sd_logd_waitq);
1245 }
1246 
1247 /**
1248  * gfs2_log_shutdown - write a shutdown header into a journal
1249  * @sdp: the filesystem
1250  *
1251  */
1252 
1253 static void gfs2_log_shutdown(struct gfs2_sbd *sdp)
1254 {
1255 	gfs2_assert_withdraw(sdp, !sdp->sd_log_blks_reserved);
1256 	gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
1257 	gfs2_assert_withdraw(sdp, list_empty(&sdp->sd_ail1_list));
1258 
1259 	log_write_header(sdp, GFS2_LOG_HEAD_UNMOUNT | GFS2_LFC_SHUTDOWN);
1260 	log_pull_tail(sdp);
1261 
1262 	gfs2_assert_warn(sdp, sdp->sd_log_head == sdp->sd_log_tail);
1263 	gfs2_assert_warn(sdp, list_empty(&sdp->sd_ail2_list));
1264 }
1265 
1266 static inline int gfs2_jrnl_flush_reqd(struct gfs2_sbd *sdp)
1267 {
1268 	return (atomic_read(&sdp->sd_log_pinned) +
1269 		atomic_read(&sdp->sd_log_blks_needed) >=
1270 		atomic_read(&sdp->sd_log_thresh1));
1271 }
1272 
1273 static inline int gfs2_ail_flush_reqd(struct gfs2_sbd *sdp)
1274 {
1275 	unsigned int used_blocks = sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free);
1276 
1277 	if (test_and_clear_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags))
1278 		return 1;
1279 
1280 	return used_blocks + atomic_read(&sdp->sd_log_blks_needed) >=
1281 		atomic_read(&sdp->sd_log_thresh2);
1282 }
1283 
1284 /**
1285  * gfs2_logd - Update log tail as Active Items get flushed to in-place blocks
1286  * @sdp: Pointer to GFS2 superblock
1287  *
1288  * Also, periodically check to make sure that we're using the most recent
1289  * journal index.
1290  */
1291 
1292 int gfs2_logd(void *data)
1293 {
1294 	struct gfs2_sbd *sdp = data;
1295 	unsigned long t = 1;
1296 	DEFINE_WAIT(wait);
1297 
1298 	while (!kthread_should_stop()) {
1299 
1300 		if (gfs2_withdrawn(sdp)) {
1301 			msleep_interruptible(HZ);
1302 			continue;
1303 		}
1304 		/* Check for errors writing to the journal */
1305 		if (sdp->sd_log_error) {
1306 			gfs2_lm(sdp,
1307 				"GFS2: fsid=%s: error %d: "
1308 				"withdrawing the file system to "
1309 				"prevent further damage.\n",
1310 				sdp->sd_fsname, sdp->sd_log_error);
1311 			gfs2_withdraw(sdp);
1312 			continue;
1313 		}
1314 
1315 		if (gfs2_jrnl_flush_reqd(sdp) || t == 0) {
1316 			gfs2_ail1_empty(sdp, 0);
1317 			gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
1318 						  GFS2_LFC_LOGD_JFLUSH_REQD);
1319 		}
1320 
1321 		if (gfs2_ail_flush_reqd(sdp)) {
1322 			gfs2_ail1_start(sdp);
1323 			gfs2_ail1_wait(sdp);
1324 			gfs2_ail1_empty(sdp, 0);
1325 			gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
1326 						  GFS2_LFC_LOGD_AIL_FLUSH_REQD);
1327 		}
1328 
1329 		t = gfs2_tune_get(sdp, gt_logd_secs) * HZ;
1330 
1331 		try_to_freeze();
1332 
1333 		do {
1334 			prepare_to_wait(&sdp->sd_logd_waitq, &wait,
1335 					TASK_INTERRUPTIBLE);
1336 			if (!gfs2_ail_flush_reqd(sdp) &&
1337 			    !gfs2_jrnl_flush_reqd(sdp) &&
1338 			    !kthread_should_stop())
1339 				t = schedule_timeout(t);
1340 		} while(t && !gfs2_ail_flush_reqd(sdp) &&
1341 			!gfs2_jrnl_flush_reqd(sdp) &&
1342 			!kthread_should_stop());
1343 		finish_wait(&sdp->sd_logd_waitq, &wait);
1344 	}
1345 
1346 	return 0;
1347 }
1348 
1349