xref: /openbmc/linux/fs/gfs2/log.c (revision e79e40c8)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
4  * Copyright (C) 2004-2007 Red Hat, Inc.  All rights reserved.
5  */
6 
7 #include <linux/sched.h>
8 #include <linux/slab.h>
9 #include <linux/spinlock.h>
10 #include <linux/completion.h>
11 #include <linux/buffer_head.h>
12 #include <linux/gfs2_ondisk.h>
13 #include <linux/crc32.h>
14 #include <linux/crc32c.h>
15 #include <linux/delay.h>
16 #include <linux/kthread.h>
17 #include <linux/freezer.h>
18 #include <linux/bio.h>
19 #include <linux/blkdev.h>
20 #include <linux/writeback.h>
21 #include <linux/list_sort.h>
22 
23 #include "gfs2.h"
24 #include "incore.h"
25 #include "bmap.h"
26 #include "glock.h"
27 #include "log.h"
28 #include "lops.h"
29 #include "meta_io.h"
30 #include "util.h"
31 #include "dir.h"
32 #include "trace_gfs2.h"
33 #include "trans.h"
34 
35 static void gfs2_log_shutdown(struct gfs2_sbd *sdp);
36 
37 /**
38  * gfs2_struct2blk - compute stuff
39  * @sdp: the filesystem
40  * @nstruct: the number of structures
41  *
42  * Compute the number of log descriptor blocks needed to hold a certain number
43  * of structures of a certain size.
44  *
45  * Returns: the number of blocks needed (minimum is always 1)
46  */
47 
48 unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct)
49 {
50 	unsigned int blks;
51 	unsigned int first, second;
52 
53 	/* The initial struct gfs2_log_descriptor block */
54 	blks = 1;
55 	first = sdp->sd_ldptrs;
56 
57 	if (nstruct > first) {
58 		/* Subsequent struct gfs2_meta_header blocks */
59 		second = sdp->sd_inptrs;
60 		blks += DIV_ROUND_UP(nstruct - first, second);
61 	}
62 
63 	return blks;
64 }
65 
66 /**
67  * gfs2_remove_from_ail - Remove an entry from the ail lists, updating counters
68  * @bd: The gfs2_bufdata to remove
69  *
70  * The ail lock _must_ be held when calling this function
71  *
72  */
73 
74 void gfs2_remove_from_ail(struct gfs2_bufdata *bd)
75 {
76 	bd->bd_tr = NULL;
77 	list_del_init(&bd->bd_ail_st_list);
78 	list_del_init(&bd->bd_ail_gl_list);
79 	atomic_dec(&bd->bd_gl->gl_ail_count);
80 	brelse(bd->bd_bh);
81 }
82 
83 /**
84  * gfs2_ail1_start_one - Start I/O on a transaction
85  * @sdp: The superblock
86  * @wbc: The writeback control structure
87  * @tr: The transaction to start I/O on
88  * @plug: The block plug currently active
89  */
90 
91 static int gfs2_ail1_start_one(struct gfs2_sbd *sdp,
92 			       struct writeback_control *wbc,
93 			       struct gfs2_trans *tr, struct blk_plug *plug)
94 __releases(&sdp->sd_ail_lock)
95 __acquires(&sdp->sd_ail_lock)
96 {
97 	struct gfs2_glock *gl = NULL;
98 	struct address_space *mapping;
99 	struct gfs2_bufdata *bd, *s;
100 	struct buffer_head *bh;
101 	int ret = 0;
102 
103 	list_for_each_entry_safe_reverse(bd, s, &tr->tr_ail1_list, bd_ail_st_list) {
104 		bh = bd->bd_bh;
105 
106 		gfs2_assert(sdp, bd->bd_tr == tr);
107 
108 		if (!buffer_busy(bh)) {
109 			if (buffer_uptodate(bh)) {
110 				list_move(&bd->bd_ail_st_list,
111 					  &tr->tr_ail2_list);
112 				continue;
113 			}
114 			if (!cmpxchg(&sdp->sd_log_error, 0, -EIO)) {
115 				gfs2_io_error_bh(sdp, bh);
116 				gfs2_withdraw_delayed(sdp);
117 			}
118 		}
119 
120 		if (gfs2_withdrawn(sdp)) {
121 			gfs2_remove_from_ail(bd);
122 			continue;
123 		}
124 		if (!buffer_dirty(bh))
125 			continue;
126 		if (gl == bd->bd_gl)
127 			continue;
128 		gl = bd->bd_gl;
129 		list_move(&bd->bd_ail_st_list, &tr->tr_ail1_list);
130 		mapping = bh->b_page->mapping;
131 		if (!mapping)
132 			continue;
133 		spin_unlock(&sdp->sd_ail_lock);
134 		ret = filemap_fdatawrite_wbc(mapping, wbc);
135 		if (need_resched()) {
136 			blk_finish_plug(plug);
137 			cond_resched();
138 			blk_start_plug(plug);
139 		}
140 		spin_lock(&sdp->sd_ail_lock);
141 		if (ret == -ENODATA) /* if a jdata write into a new hole */
142 			ret = 0; /* ignore it */
143 		if (ret || wbc->nr_to_write <= 0)
144 			break;
145 		return -EBUSY;
146 	}
147 
148 	return ret;
149 }
150 
151 static void dump_ail_list(struct gfs2_sbd *sdp)
152 {
153 	struct gfs2_trans *tr;
154 	struct gfs2_bufdata *bd;
155 	struct buffer_head *bh;
156 
157 	list_for_each_entry_reverse(tr, &sdp->sd_ail1_list, tr_list) {
158 		list_for_each_entry_reverse(bd, &tr->tr_ail1_list,
159 					    bd_ail_st_list) {
160 			bh = bd->bd_bh;
161 			fs_err(sdp, "bd %p: blk:0x%llx bh=%p ", bd,
162 			       (unsigned long long)bd->bd_blkno, bh);
163 			if (!bh) {
164 				fs_err(sdp, "\n");
165 				continue;
166 			}
167 			fs_err(sdp, "0x%llx up2:%d dirt:%d lkd:%d req:%d "
168 			       "map:%d new:%d ar:%d aw:%d delay:%d "
169 			       "io err:%d unwritten:%d dfr:%d pin:%d esc:%d\n",
170 			       (unsigned long long)bh->b_blocknr,
171 			       buffer_uptodate(bh), buffer_dirty(bh),
172 			       buffer_locked(bh), buffer_req(bh),
173 			       buffer_mapped(bh), buffer_new(bh),
174 			       buffer_async_read(bh), buffer_async_write(bh),
175 			       buffer_delay(bh), buffer_write_io_error(bh),
176 			       buffer_unwritten(bh),
177 			       buffer_defer_completion(bh),
178 			       buffer_pinned(bh), buffer_escaped(bh));
179 		}
180 	}
181 }
182 
183 /**
184  * gfs2_ail1_flush - start writeback of some ail1 entries
185  * @sdp: The super block
186  * @wbc: The writeback control structure
187  *
188  * Writes back some ail1 entries, according to the limits in the
189  * writeback control structure
190  */
191 
192 void gfs2_ail1_flush(struct gfs2_sbd *sdp, struct writeback_control *wbc)
193 {
194 	struct list_head *head = &sdp->sd_ail1_list;
195 	struct gfs2_trans *tr;
196 	struct blk_plug plug;
197 	int ret;
198 	unsigned long flush_start = jiffies;
199 
200 	trace_gfs2_ail_flush(sdp, wbc, 1);
201 	blk_start_plug(&plug);
202 	spin_lock(&sdp->sd_ail_lock);
203 restart:
204 	ret = 0;
205 	if (time_after(jiffies, flush_start + (HZ * 600))) {
206 		fs_err(sdp, "Error: In %s for ten minutes! t=%d\n",
207 		       __func__, current->journal_info ? 1 : 0);
208 		dump_ail_list(sdp);
209 		goto out;
210 	}
211 	list_for_each_entry_reverse(tr, head, tr_list) {
212 		if (wbc->nr_to_write <= 0)
213 			break;
214 		ret = gfs2_ail1_start_one(sdp, wbc, tr, &plug);
215 		if (ret) {
216 			if (ret == -EBUSY)
217 				goto restart;
218 			break;
219 		}
220 	}
221 out:
222 	spin_unlock(&sdp->sd_ail_lock);
223 	blk_finish_plug(&plug);
224 	if (ret) {
225 		gfs2_lm(sdp, "gfs2_ail1_start_one returned: %d\n", ret);
226 		gfs2_withdraw(sdp);
227 	}
228 	trace_gfs2_ail_flush(sdp, wbc, 0);
229 }
230 
231 /**
232  * gfs2_ail1_start - start writeback of all ail1 entries
233  * @sdp: The superblock
234  */
235 
236 static void gfs2_ail1_start(struct gfs2_sbd *sdp)
237 {
238 	struct writeback_control wbc = {
239 		.sync_mode = WB_SYNC_NONE,
240 		.nr_to_write = LONG_MAX,
241 		.range_start = 0,
242 		.range_end = LLONG_MAX,
243 	};
244 
245 	return gfs2_ail1_flush(sdp, &wbc);
246 }
247 
248 static void gfs2_log_update_flush_tail(struct gfs2_sbd *sdp)
249 {
250 	unsigned int new_flush_tail = sdp->sd_log_head;
251 	struct gfs2_trans *tr;
252 
253 	if (!list_empty(&sdp->sd_ail1_list)) {
254 		tr = list_last_entry(&sdp->sd_ail1_list,
255 				     struct gfs2_trans, tr_list);
256 		new_flush_tail = tr->tr_first;
257 	}
258 	sdp->sd_log_flush_tail = new_flush_tail;
259 }
260 
261 static void gfs2_log_update_head(struct gfs2_sbd *sdp)
262 {
263 	unsigned int new_head = sdp->sd_log_flush_head;
264 
265 	if (sdp->sd_log_flush_tail == sdp->sd_log_head)
266 		sdp->sd_log_flush_tail = new_head;
267 	sdp->sd_log_head = new_head;
268 }
269 
270 /*
271  * gfs2_ail_empty_tr - empty one of the ail lists of a transaction
272  */
273 
274 static void gfs2_ail_empty_tr(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
275 			      struct list_head *head)
276 {
277 	struct gfs2_bufdata *bd;
278 
279 	while (!list_empty(head)) {
280 		bd = list_first_entry(head, struct gfs2_bufdata,
281 				      bd_ail_st_list);
282 		gfs2_assert(sdp, bd->bd_tr == tr);
283 		gfs2_remove_from_ail(bd);
284 	}
285 }
286 
287 /**
288  * gfs2_ail1_empty_one - Check whether or not a trans in the AIL has been synced
289  * @sdp: the filesystem
290  * @tr: the transaction
291  * @max_revokes: If nonzero, issue revokes for the bd items for written buffers
292  *
293  * returns: the transaction's count of remaining active items
294  */
295 
296 static int gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
297 				int *max_revokes)
298 {
299 	struct gfs2_bufdata *bd, *s;
300 	struct buffer_head *bh;
301 	int active_count = 0;
302 
303 	list_for_each_entry_safe_reverse(bd, s, &tr->tr_ail1_list,
304 					 bd_ail_st_list) {
305 		bh = bd->bd_bh;
306 		gfs2_assert(sdp, bd->bd_tr == tr);
307 		/*
308 		 * If another process flagged an io error, e.g. writing to the
309 		 * journal, error all other bhs and move them off the ail1 to
310 		 * prevent a tight loop when unmount tries to flush ail1,
311 		 * regardless of whether they're still busy. If no outside
312 		 * errors were found and the buffer is busy, move to the next.
313 		 * If the ail buffer is not busy and caught an error, flag it
314 		 * for others.
315 		 */
316 		if (!sdp->sd_log_error && buffer_busy(bh)) {
317 			active_count++;
318 			continue;
319 		}
320 		if (!buffer_uptodate(bh) &&
321 		    !cmpxchg(&sdp->sd_log_error, 0, -EIO)) {
322 			gfs2_io_error_bh(sdp, bh);
323 			gfs2_withdraw_delayed(sdp);
324 		}
325 		/*
326 		 * If we have space for revokes and the bd is no longer on any
327 		 * buf list, we can just add a revoke for it immediately and
328 		 * avoid having to put it on the ail2 list, where it would need
329 		 * to be revoked later.
330 		 */
331 		if (*max_revokes && list_empty(&bd->bd_list)) {
332 			gfs2_add_revoke(sdp, bd);
333 			(*max_revokes)--;
334 			continue;
335 		}
336 		list_move(&bd->bd_ail_st_list, &tr->tr_ail2_list);
337 	}
338 	return active_count;
339 }
340 
341 /**
342  * gfs2_ail1_empty - Try to empty the ail1 lists
343  * @sdp: The superblock
344  * @max_revokes: If non-zero, add revokes where appropriate
345  *
346  * Tries to empty the ail1 lists, starting with the oldest first
347  */
348 
349 static int gfs2_ail1_empty(struct gfs2_sbd *sdp, int max_revokes)
350 {
351 	struct gfs2_trans *tr, *s;
352 	int oldest_tr = 1;
353 	int ret;
354 
355 	spin_lock(&sdp->sd_ail_lock);
356 	list_for_each_entry_safe_reverse(tr, s, &sdp->sd_ail1_list, tr_list) {
357 		if (!gfs2_ail1_empty_one(sdp, tr, &max_revokes) && oldest_tr)
358 			list_move(&tr->tr_list, &sdp->sd_ail2_list);
359 		else
360 			oldest_tr = 0;
361 	}
362 	gfs2_log_update_flush_tail(sdp);
363 	ret = list_empty(&sdp->sd_ail1_list);
364 	spin_unlock(&sdp->sd_ail_lock);
365 
366 	if (test_bit(SDF_WITHDRAWING, &sdp->sd_flags)) {
367 		gfs2_lm(sdp, "fatal: I/O error(s)\n");
368 		gfs2_withdraw(sdp);
369 	}
370 
371 	return ret;
372 }
373 
374 static void gfs2_ail1_wait(struct gfs2_sbd *sdp)
375 {
376 	struct gfs2_trans *tr;
377 	struct gfs2_bufdata *bd;
378 	struct buffer_head *bh;
379 
380 	spin_lock(&sdp->sd_ail_lock);
381 	list_for_each_entry_reverse(tr, &sdp->sd_ail1_list, tr_list) {
382 		list_for_each_entry(bd, &tr->tr_ail1_list, bd_ail_st_list) {
383 			bh = bd->bd_bh;
384 			if (!buffer_locked(bh))
385 				continue;
386 			get_bh(bh);
387 			spin_unlock(&sdp->sd_ail_lock);
388 			wait_on_buffer(bh);
389 			brelse(bh);
390 			return;
391 		}
392 	}
393 	spin_unlock(&sdp->sd_ail_lock);
394 }
395 
396 static void __ail2_empty(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
397 {
398 	gfs2_ail_empty_tr(sdp, tr, &tr->tr_ail2_list);
399 	list_del(&tr->tr_list);
400 	gfs2_assert_warn(sdp, list_empty(&tr->tr_ail1_list));
401 	gfs2_assert_warn(sdp, list_empty(&tr->tr_ail2_list));
402 	gfs2_trans_free(sdp, tr);
403 }
404 
405 static void ail2_empty(struct gfs2_sbd *sdp, unsigned int new_tail)
406 {
407 	struct list_head *ail2_list = &sdp->sd_ail2_list;
408 	unsigned int old_tail = sdp->sd_log_tail;
409 	struct gfs2_trans *tr, *safe;
410 
411 	spin_lock(&sdp->sd_ail_lock);
412 	if (old_tail <= new_tail) {
413 		list_for_each_entry_safe(tr, safe, ail2_list, tr_list) {
414 			if (old_tail <= tr->tr_first && tr->tr_first < new_tail)
415 				__ail2_empty(sdp, tr);
416 		}
417 	} else {
418 		list_for_each_entry_safe(tr, safe, ail2_list, tr_list) {
419 			if (old_tail <= tr->tr_first || tr->tr_first < new_tail)
420 				__ail2_empty(sdp, tr);
421 		}
422 	}
423 	spin_unlock(&sdp->sd_ail_lock);
424 }
425 
426 /**
427  * gfs2_log_is_empty - Check if the log is empty
428  * @sdp: The GFS2 superblock
429  */
430 
431 bool gfs2_log_is_empty(struct gfs2_sbd *sdp) {
432 	return atomic_read(&sdp->sd_log_blks_free) == sdp->sd_jdesc->jd_blocks;
433 }
434 
435 static bool __gfs2_log_try_reserve_revokes(struct gfs2_sbd *sdp, unsigned int revokes)
436 {
437 	unsigned int available;
438 
439 	available = atomic_read(&sdp->sd_log_revokes_available);
440 	while (available >= revokes) {
441 		if (atomic_try_cmpxchg(&sdp->sd_log_revokes_available,
442 				       &available, available - revokes))
443 			return true;
444 	}
445 	return false;
446 }
447 
448 /**
449  * gfs2_log_release_revokes - Release a given number of revokes
450  * @sdp: The GFS2 superblock
451  * @revokes: The number of revokes to release
452  *
453  * sdp->sd_log_flush_lock must be held.
454  */
455 void gfs2_log_release_revokes(struct gfs2_sbd *sdp, unsigned int revokes)
456 {
457 	if (revokes)
458 		atomic_add(revokes, &sdp->sd_log_revokes_available);
459 }
460 
461 /**
462  * gfs2_log_release - Release a given number of log blocks
463  * @sdp: The GFS2 superblock
464  * @blks: The number of blocks
465  *
466  */
467 
468 void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks)
469 {
470 	atomic_add(blks, &sdp->sd_log_blks_free);
471 	trace_gfs2_log_blocks(sdp, blks);
472 	gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
473 				  sdp->sd_jdesc->jd_blocks);
474 	if (atomic_read(&sdp->sd_log_blks_needed))
475 		wake_up(&sdp->sd_log_waitq);
476 }
477 
478 /**
479  * __gfs2_log_try_reserve - Try to make a log reservation
480  * @sdp: The GFS2 superblock
481  * @blks: The number of blocks to reserve
482  * @taboo_blks: The number of blocks to leave free
483  *
484  * Try to do the same as __gfs2_log_reserve(), but fail if no more log
485  * space is immediately available.
486  */
487 static bool __gfs2_log_try_reserve(struct gfs2_sbd *sdp, unsigned int blks,
488 				   unsigned int taboo_blks)
489 {
490 	unsigned wanted = blks + taboo_blks;
491 	unsigned int free_blocks;
492 
493 	free_blocks = atomic_read(&sdp->sd_log_blks_free);
494 	while (free_blocks >= wanted) {
495 		if (atomic_try_cmpxchg(&sdp->sd_log_blks_free, &free_blocks,
496 				       free_blocks - blks)) {
497 			trace_gfs2_log_blocks(sdp, -blks);
498 			return true;
499 		}
500 	}
501 	return false;
502 }
503 
504 /**
505  * __gfs2_log_reserve - Make a log reservation
506  * @sdp: The GFS2 superblock
507  * @blks: The number of blocks to reserve
508  * @taboo_blks: The number of blocks to leave free
509  *
510  * @taboo_blks is set to 0 for logd, and to GFS2_LOG_FLUSH_MIN_BLOCKS
511  * for all other processes.  This ensures that when the log is almost full,
512  * logd will still be able to call gfs2_log_flush one more time  without
513  * blocking, which will advance the tail and make some more log space
514  * available.
515  *
516  * We no longer flush the log here, instead we wake up logd to do that
517  * for us. To avoid the thundering herd and to ensure that we deal fairly
518  * with queued waiters, we use an exclusive wait. This means that when we
519  * get woken with enough journal space to get our reservation, we need to
520  * wake the next waiter on the list.
521  */
522 
523 static void __gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks,
524 			       unsigned int taboo_blks)
525 {
526 	unsigned wanted = blks + taboo_blks;
527 	unsigned int free_blocks;
528 
529 	atomic_add(blks, &sdp->sd_log_blks_needed);
530 	for (;;) {
531 		if (current != sdp->sd_logd_process)
532 			wake_up(&sdp->sd_logd_waitq);
533 		io_wait_event(sdp->sd_log_waitq,
534 			(free_blocks = atomic_read(&sdp->sd_log_blks_free),
535 			 free_blocks >= wanted));
536 		do {
537 			if (atomic_try_cmpxchg(&sdp->sd_log_blks_free,
538 					       &free_blocks,
539 					       free_blocks - blks))
540 				goto reserved;
541 		} while (free_blocks >= wanted);
542 	}
543 
544 reserved:
545 	trace_gfs2_log_blocks(sdp, -blks);
546 	if (atomic_sub_return(blks, &sdp->sd_log_blks_needed))
547 		wake_up(&sdp->sd_log_waitq);
548 }
549 
550 /**
551  * gfs2_log_try_reserve - Try to make a log reservation
552  * @sdp: The GFS2 superblock
553  * @tr: The transaction
554  * @extra_revokes: The number of additional revokes reserved (output)
555  *
556  * This is similar to gfs2_log_reserve, but sdp->sd_log_flush_lock must be
557  * held for correct revoke accounting.
558  */
559 
560 bool gfs2_log_try_reserve(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
561 			  unsigned int *extra_revokes)
562 {
563 	unsigned int blks = tr->tr_reserved;
564 	unsigned int revokes = tr->tr_revokes;
565 	unsigned int revoke_blks = 0;
566 
567 	*extra_revokes = 0;
568 	if (revokes && !__gfs2_log_try_reserve_revokes(sdp, revokes)) {
569 		revoke_blks = DIV_ROUND_UP(revokes, sdp->sd_inptrs);
570 		*extra_revokes = revoke_blks * sdp->sd_inptrs - revokes;
571 		blks += revoke_blks;
572 	}
573 	if (!blks)
574 		return true;
575 	if (__gfs2_log_try_reserve(sdp, blks, GFS2_LOG_FLUSH_MIN_BLOCKS))
576 		return true;
577 	if (!revoke_blks)
578 		gfs2_log_release_revokes(sdp, revokes);
579 	return false;
580 }
581 
582 /**
583  * gfs2_log_reserve - Make a log reservation
584  * @sdp: The GFS2 superblock
585  * @tr: The transaction
586  * @extra_revokes: The number of additional revokes reserved (output)
587  *
588  * sdp->sd_log_flush_lock must not be held.
589  */
590 
591 void gfs2_log_reserve(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
592 		      unsigned int *extra_revokes)
593 {
594 	unsigned int blks = tr->tr_reserved;
595 	unsigned int revokes = tr->tr_revokes;
596 	unsigned int revoke_blks;
597 
598 	*extra_revokes = 0;
599 	if (revokes) {
600 		revoke_blks = DIV_ROUND_UP(revokes, sdp->sd_inptrs);
601 		*extra_revokes = revoke_blks * sdp->sd_inptrs - revokes;
602 		blks += revoke_blks;
603 	}
604 	__gfs2_log_reserve(sdp, blks, GFS2_LOG_FLUSH_MIN_BLOCKS);
605 }
606 
607 /**
608  * log_distance - Compute distance between two journal blocks
609  * @sdp: The GFS2 superblock
610  * @newer: The most recent journal block of the pair
611  * @older: The older journal block of the pair
612  *
613  *   Compute the distance (in the journal direction) between two
614  *   blocks in the journal
615  *
616  * Returns: the distance in blocks
617  */
618 
619 static inline unsigned int log_distance(struct gfs2_sbd *sdp, unsigned int newer,
620 					unsigned int older)
621 {
622 	int dist;
623 
624 	dist = newer - older;
625 	if (dist < 0)
626 		dist += sdp->sd_jdesc->jd_blocks;
627 
628 	return dist;
629 }
630 
631 /**
632  * calc_reserved - Calculate the number of blocks to keep reserved
633  * @sdp: The GFS2 superblock
634  *
635  * This is complex.  We need to reserve room for all our currently used
636  * metadata blocks (e.g. normal file I/O rewriting file time stamps) and
637  * all our journaled data blocks for journaled files (e.g. files in the
638  * meta_fs like rindex, or files for which chattr +j was done.)
639  * If we don't reserve enough space, corruption will follow.
640  *
641  * We can have metadata blocks and jdata blocks in the same journal.  Each
642  * type gets its own log descriptor, for which we need to reserve a block.
643  * In fact, each type has the potential for needing more than one log descriptor
644  * in cases where we have more blocks than will fit in a log descriptor.
645  * Metadata journal entries take up half the space of journaled buffer entries.
646  *
647  * Also, we need to reserve blocks for revoke journal entries and one for an
648  * overall header for the lot.
649  *
650  * Returns: the number of blocks reserved
651  */
652 static unsigned int calc_reserved(struct gfs2_sbd *sdp)
653 {
654 	unsigned int reserved = GFS2_LOG_FLUSH_MIN_BLOCKS;
655 	unsigned int blocks;
656 	struct gfs2_trans *tr = sdp->sd_log_tr;
657 
658 	if (tr) {
659 		blocks = tr->tr_num_buf_new - tr->tr_num_buf_rm;
660 		reserved += blocks + DIV_ROUND_UP(blocks, buf_limit(sdp));
661 		blocks = tr->tr_num_databuf_new - tr->tr_num_databuf_rm;
662 		reserved += blocks + DIV_ROUND_UP(blocks, databuf_limit(sdp));
663 	}
664 	return reserved;
665 }
666 
667 static void log_pull_tail(struct gfs2_sbd *sdp)
668 {
669 	unsigned int new_tail = sdp->sd_log_flush_tail;
670 	unsigned int dist;
671 
672 	if (new_tail == sdp->sd_log_tail)
673 		return;
674 	dist = log_distance(sdp, new_tail, sdp->sd_log_tail);
675 	ail2_empty(sdp, new_tail);
676 	gfs2_log_release(sdp, dist);
677 	sdp->sd_log_tail = new_tail;
678 }
679 
680 
681 void log_flush_wait(struct gfs2_sbd *sdp)
682 {
683 	DEFINE_WAIT(wait);
684 
685 	if (atomic_read(&sdp->sd_log_in_flight)) {
686 		do {
687 			prepare_to_wait(&sdp->sd_log_flush_wait, &wait,
688 					TASK_UNINTERRUPTIBLE);
689 			if (atomic_read(&sdp->sd_log_in_flight))
690 				io_schedule();
691 		} while(atomic_read(&sdp->sd_log_in_flight));
692 		finish_wait(&sdp->sd_log_flush_wait, &wait);
693 	}
694 }
695 
696 static int ip_cmp(void *priv, const struct list_head *a, const struct list_head *b)
697 {
698 	struct gfs2_inode *ipa, *ipb;
699 
700 	ipa = list_entry(a, struct gfs2_inode, i_ordered);
701 	ipb = list_entry(b, struct gfs2_inode, i_ordered);
702 
703 	if (ipa->i_no_addr < ipb->i_no_addr)
704 		return -1;
705 	if (ipa->i_no_addr > ipb->i_no_addr)
706 		return 1;
707 	return 0;
708 }
709 
710 static void __ordered_del_inode(struct gfs2_inode *ip)
711 {
712 	if (!list_empty(&ip->i_ordered))
713 		list_del_init(&ip->i_ordered);
714 }
715 
716 static void gfs2_ordered_write(struct gfs2_sbd *sdp)
717 {
718 	struct gfs2_inode *ip;
719 	LIST_HEAD(written);
720 
721 	spin_lock(&sdp->sd_ordered_lock);
722 	list_sort(NULL, &sdp->sd_log_ordered, &ip_cmp);
723 	while (!list_empty(&sdp->sd_log_ordered)) {
724 		ip = list_first_entry(&sdp->sd_log_ordered, struct gfs2_inode, i_ordered);
725 		if (ip->i_inode.i_mapping->nrpages == 0) {
726 			__ordered_del_inode(ip);
727 			continue;
728 		}
729 		list_move(&ip->i_ordered, &written);
730 		spin_unlock(&sdp->sd_ordered_lock);
731 		filemap_fdatawrite(ip->i_inode.i_mapping);
732 		spin_lock(&sdp->sd_ordered_lock);
733 	}
734 	list_splice(&written, &sdp->sd_log_ordered);
735 	spin_unlock(&sdp->sd_ordered_lock);
736 }
737 
738 static void gfs2_ordered_wait(struct gfs2_sbd *sdp)
739 {
740 	struct gfs2_inode *ip;
741 
742 	spin_lock(&sdp->sd_ordered_lock);
743 	while (!list_empty(&sdp->sd_log_ordered)) {
744 		ip = list_first_entry(&sdp->sd_log_ordered, struct gfs2_inode, i_ordered);
745 		__ordered_del_inode(ip);
746 		if (ip->i_inode.i_mapping->nrpages == 0)
747 			continue;
748 		spin_unlock(&sdp->sd_ordered_lock);
749 		filemap_fdatawait(ip->i_inode.i_mapping);
750 		spin_lock(&sdp->sd_ordered_lock);
751 	}
752 	spin_unlock(&sdp->sd_ordered_lock);
753 }
754 
755 void gfs2_ordered_del_inode(struct gfs2_inode *ip)
756 {
757 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
758 
759 	spin_lock(&sdp->sd_ordered_lock);
760 	__ordered_del_inode(ip);
761 	spin_unlock(&sdp->sd_ordered_lock);
762 }
763 
764 void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
765 {
766 	struct buffer_head *bh = bd->bd_bh;
767 	struct gfs2_glock *gl = bd->bd_gl;
768 
769 	sdp->sd_log_num_revoke++;
770 	if (atomic_inc_return(&gl->gl_revokes) == 1)
771 		gfs2_glock_hold(gl);
772 	bh->b_private = NULL;
773 	bd->bd_blkno = bh->b_blocknr;
774 	gfs2_remove_from_ail(bd); /* drops ref on bh */
775 	bd->bd_bh = NULL;
776 	set_bit(GLF_LFLUSH, &gl->gl_flags);
777 	list_add(&bd->bd_list, &sdp->sd_log_revokes);
778 }
779 
780 void gfs2_glock_remove_revoke(struct gfs2_glock *gl)
781 {
782 	if (atomic_dec_return(&gl->gl_revokes) == 0) {
783 		clear_bit(GLF_LFLUSH, &gl->gl_flags);
784 		gfs2_glock_queue_put(gl);
785 	}
786 }
787 
788 /**
789  * gfs2_flush_revokes - Add as many revokes to the system transaction as we can
790  * @sdp: The GFS2 superblock
791  *
792  * Our usual strategy is to defer writing revokes as much as we can in the hope
793  * that we'll eventually overwrite the journal, which will make those revokes
794  * go away.  This changes when we flush the log: at that point, there will
795  * likely be some left-over space in the last revoke block of that transaction.
796  * We can fill that space with additional revokes for blocks that have already
797  * been written back.  This will basically come at no cost now, and will save
798  * us from having to keep track of those blocks on the AIL2 list later.
799  */
800 void gfs2_flush_revokes(struct gfs2_sbd *sdp)
801 {
802 	/* number of revokes we still have room for */
803 	unsigned int max_revokes = atomic_read(&sdp->sd_log_revokes_available);
804 
805 	gfs2_log_lock(sdp);
806 	gfs2_ail1_empty(sdp, max_revokes);
807 	gfs2_log_unlock(sdp);
808 }
809 
810 /**
811  * gfs2_write_log_header - Write a journal log header buffer at lblock
812  * @sdp: The GFS2 superblock
813  * @jd: journal descriptor of the journal to which we are writing
814  * @seq: sequence number
815  * @tail: tail of the log
816  * @lblock: value for lh_blkno (block number relative to start of journal)
817  * @flags: log header flags GFS2_LOG_HEAD_*
818  * @op_flags: flags to pass to the bio
819  *
820  * Returns: the initialized log buffer descriptor
821  */
822 
823 void gfs2_write_log_header(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
824 			   u64 seq, u32 tail, u32 lblock, u32 flags,
825 			   blk_opf_t op_flags)
826 {
827 	struct gfs2_log_header *lh;
828 	u32 hash, crc;
829 	struct page *page;
830 	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
831 	struct timespec64 tv;
832 	struct super_block *sb = sdp->sd_vfs;
833 	u64 dblock;
834 
835 	if (gfs2_withdrawn(sdp))
836 		return;
837 
838 	page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
839 	lh = page_address(page);
840 	clear_page(lh);
841 
842 	lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
843 	lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH);
844 	lh->lh_header.__pad0 = cpu_to_be64(0);
845 	lh->lh_header.mh_format = cpu_to_be32(GFS2_FORMAT_LH);
846 	lh->lh_header.mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid);
847 	lh->lh_sequence = cpu_to_be64(seq);
848 	lh->lh_flags = cpu_to_be32(flags);
849 	lh->lh_tail = cpu_to_be32(tail);
850 	lh->lh_blkno = cpu_to_be32(lblock);
851 	hash = ~crc32(~0, lh, LH_V1_SIZE);
852 	lh->lh_hash = cpu_to_be32(hash);
853 
854 	ktime_get_coarse_real_ts64(&tv);
855 	lh->lh_nsec = cpu_to_be32(tv.tv_nsec);
856 	lh->lh_sec = cpu_to_be64(tv.tv_sec);
857 	if (!list_empty(&jd->extent_list))
858 		dblock = gfs2_log_bmap(jd, lblock);
859 	else {
860 		unsigned int extlen;
861 		int ret;
862 
863 		extlen = 1;
864 		ret = gfs2_get_extent(jd->jd_inode, lblock, &dblock, &extlen);
865 		if (gfs2_assert_withdraw(sdp, ret == 0))
866 			return;
867 	}
868 	lh->lh_addr = cpu_to_be64(dblock);
869 	lh->lh_jinode = cpu_to_be64(GFS2_I(jd->jd_inode)->i_no_addr);
870 
871 	/* We may only write local statfs, quota, etc., when writing to our
872 	   own journal. The values are left 0 when recovering a journal
873 	   different from our own. */
874 	if (!(flags & GFS2_LOG_HEAD_RECOVERY)) {
875 		lh->lh_statfs_addr =
876 			cpu_to_be64(GFS2_I(sdp->sd_sc_inode)->i_no_addr);
877 		lh->lh_quota_addr =
878 			cpu_to_be64(GFS2_I(sdp->sd_qc_inode)->i_no_addr);
879 
880 		spin_lock(&sdp->sd_statfs_spin);
881 		lh->lh_local_total = cpu_to_be64(l_sc->sc_total);
882 		lh->lh_local_free = cpu_to_be64(l_sc->sc_free);
883 		lh->lh_local_dinodes = cpu_to_be64(l_sc->sc_dinodes);
884 		spin_unlock(&sdp->sd_statfs_spin);
885 	}
886 
887 	BUILD_BUG_ON(offsetof(struct gfs2_log_header, lh_crc) != LH_V1_SIZE);
888 
889 	crc = crc32c(~0, (void *)lh + LH_V1_SIZE + 4,
890 		     sb->s_blocksize - LH_V1_SIZE - 4);
891 	lh->lh_crc = cpu_to_be32(crc);
892 
893 	gfs2_log_write(sdp, jd, page, sb->s_blocksize, 0, dblock);
894 	gfs2_log_submit_bio(&jd->jd_log_bio, REQ_OP_WRITE | op_flags);
895 }
896 
897 /**
898  * log_write_header - Get and initialize a journal header buffer
899  * @sdp: The GFS2 superblock
900  * @flags: The log header flags, including log header origin
901  *
902  * Returns: the initialized log buffer descriptor
903  */
904 
905 static void log_write_header(struct gfs2_sbd *sdp, u32 flags)
906 {
907 	blk_opf_t op_flags = REQ_PREFLUSH | REQ_FUA | REQ_META | REQ_SYNC;
908 	enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state);
909 
910 	gfs2_assert_withdraw(sdp, (state != SFS_FROZEN));
911 
912 	if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) {
913 		gfs2_ordered_wait(sdp);
914 		log_flush_wait(sdp);
915 		op_flags = REQ_SYNC | REQ_META | REQ_PRIO;
916 	}
917 	sdp->sd_log_idle = (sdp->sd_log_flush_tail == sdp->sd_log_flush_head);
918 	gfs2_write_log_header(sdp, sdp->sd_jdesc, sdp->sd_log_sequence++,
919 			      sdp->sd_log_flush_tail, sdp->sd_log_flush_head,
920 			      flags, op_flags);
921 	gfs2_log_incr_head(sdp);
922 	log_flush_wait(sdp);
923 	log_pull_tail(sdp);
924 	gfs2_log_update_head(sdp);
925 }
926 
927 /**
928  * gfs2_ail_drain - drain the ail lists after a withdraw
929  * @sdp: Pointer to GFS2 superblock
930  */
931 void gfs2_ail_drain(struct gfs2_sbd *sdp)
932 {
933 	struct gfs2_trans *tr;
934 
935 	spin_lock(&sdp->sd_ail_lock);
936 	/*
937 	 * For transactions on the sd_ail1_list we need to drain both the
938 	 * ail1 and ail2 lists. That's because function gfs2_ail1_start_one
939 	 * (temporarily) moves items from its tr_ail1 list to tr_ail2 list
940 	 * before revokes are sent for that block. Items on the sd_ail2_list
941 	 * should have already gotten beyond that point, so no need.
942 	 */
943 	while (!list_empty(&sdp->sd_ail1_list)) {
944 		tr = list_first_entry(&sdp->sd_ail1_list, struct gfs2_trans,
945 				      tr_list);
946 		gfs2_ail_empty_tr(sdp, tr, &tr->tr_ail1_list);
947 		gfs2_ail_empty_tr(sdp, tr, &tr->tr_ail2_list);
948 		list_del(&tr->tr_list);
949 		gfs2_trans_free(sdp, tr);
950 	}
951 	while (!list_empty(&sdp->sd_ail2_list)) {
952 		tr = list_first_entry(&sdp->sd_ail2_list, struct gfs2_trans,
953 				      tr_list);
954 		gfs2_ail_empty_tr(sdp, tr, &tr->tr_ail2_list);
955 		list_del(&tr->tr_list);
956 		gfs2_trans_free(sdp, tr);
957 	}
958 	gfs2_drain_revokes(sdp);
959 	spin_unlock(&sdp->sd_ail_lock);
960 }
961 
962 /**
963  * empty_ail1_list - try to start IO and empty the ail1 list
964  * @sdp: Pointer to GFS2 superblock
965  */
966 static void empty_ail1_list(struct gfs2_sbd *sdp)
967 {
968 	unsigned long start = jiffies;
969 
970 	for (;;) {
971 		if (time_after(jiffies, start + (HZ * 600))) {
972 			fs_err(sdp, "Error: In %s for 10 minutes! t=%d\n",
973 			       __func__, current->journal_info ? 1 : 0);
974 			dump_ail_list(sdp);
975 			return;
976 		}
977 		gfs2_ail1_start(sdp);
978 		gfs2_ail1_wait(sdp);
979 		if (gfs2_ail1_empty(sdp, 0))
980 			return;
981 	}
982 }
983 
984 /**
985  * trans_drain - drain the buf and databuf queue for a failed transaction
986  * @tr: the transaction to drain
987  *
988  * When this is called, we're taking an error exit for a log write that failed
989  * but since we bypassed the after_commit functions, we need to remove the
990  * items from the buf and databuf queue.
991  */
992 static void trans_drain(struct gfs2_trans *tr)
993 {
994 	struct gfs2_bufdata *bd;
995 	struct list_head *head;
996 
997 	if (!tr)
998 		return;
999 
1000 	head = &tr->tr_buf;
1001 	while (!list_empty(head)) {
1002 		bd = list_first_entry(head, struct gfs2_bufdata, bd_list);
1003 		list_del_init(&bd->bd_list);
1004 		if (!list_empty(&bd->bd_ail_st_list))
1005 			gfs2_remove_from_ail(bd);
1006 		kmem_cache_free(gfs2_bufdata_cachep, bd);
1007 	}
1008 	head = &tr->tr_databuf;
1009 	while (!list_empty(head)) {
1010 		bd = list_first_entry(head, struct gfs2_bufdata, bd_list);
1011 		list_del_init(&bd->bd_list);
1012 		if (!list_empty(&bd->bd_ail_st_list))
1013 			gfs2_remove_from_ail(bd);
1014 		kmem_cache_free(gfs2_bufdata_cachep, bd);
1015 	}
1016 }
1017 
1018 /**
1019  * gfs2_log_flush - flush incore transaction(s)
1020  * @sdp: The filesystem
1021  * @gl: The glock structure to flush.  If NULL, flush the whole incore log
1022  * @flags: The log header flags: GFS2_LOG_HEAD_FLUSH_* and debug flags
1023  *
1024  */
1025 
1026 void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags)
1027 {
1028 	struct gfs2_trans *tr = NULL;
1029 	unsigned int reserved_blocks = 0, used_blocks = 0;
1030 	enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state);
1031 	unsigned int first_log_head;
1032 	unsigned int reserved_revokes = 0;
1033 
1034 	down_write(&sdp->sd_log_flush_lock);
1035 	trace_gfs2_log_flush(sdp, 1, flags);
1036 
1037 repeat:
1038 	/*
1039 	 * Do this check while holding the log_flush_lock to prevent new
1040 	 * buffers from being added to the ail via gfs2_pin()
1041 	 */
1042 	if (gfs2_withdrawn(sdp) || !test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
1043 		goto out;
1044 
1045 	/* Log might have been flushed while we waited for the flush lock */
1046 	if (gl && !test_bit(GLF_LFLUSH, &gl->gl_flags))
1047 		goto out;
1048 
1049 	first_log_head = sdp->sd_log_head;
1050 	sdp->sd_log_flush_head = first_log_head;
1051 
1052 	tr = sdp->sd_log_tr;
1053 	if (tr || sdp->sd_log_num_revoke) {
1054 		if (reserved_blocks)
1055 			gfs2_log_release(sdp, reserved_blocks);
1056 		reserved_blocks = sdp->sd_log_blks_reserved;
1057 		reserved_revokes = sdp->sd_log_num_revoke;
1058 		if (tr) {
1059 			sdp->sd_log_tr = NULL;
1060 			tr->tr_first = first_log_head;
1061 			if (unlikely (state == SFS_FROZEN)) {
1062 				if (gfs2_assert_withdraw_delayed(sdp,
1063 				       !tr->tr_num_buf_new && !tr->tr_num_databuf_new))
1064 					goto out_withdraw;
1065 			}
1066 		}
1067 	} else if (!reserved_blocks) {
1068 		unsigned int taboo_blocks = GFS2_LOG_FLUSH_MIN_BLOCKS;
1069 
1070 		reserved_blocks = GFS2_LOG_FLUSH_MIN_BLOCKS;
1071 		if (current == sdp->sd_logd_process)
1072 			taboo_blocks = 0;
1073 
1074 		if (!__gfs2_log_try_reserve(sdp, reserved_blocks, taboo_blocks)) {
1075 			up_write(&sdp->sd_log_flush_lock);
1076 			__gfs2_log_reserve(sdp, reserved_blocks, taboo_blocks);
1077 			down_write(&sdp->sd_log_flush_lock);
1078 			goto repeat;
1079 		}
1080 		BUG_ON(sdp->sd_log_num_revoke);
1081 	}
1082 
1083 	if (flags & GFS2_LOG_HEAD_FLUSH_SHUTDOWN)
1084 		clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
1085 
1086 	if (unlikely(state == SFS_FROZEN))
1087 		if (gfs2_assert_withdraw_delayed(sdp, !reserved_revokes))
1088 			goto out_withdraw;
1089 
1090 	gfs2_ordered_write(sdp);
1091 	if (gfs2_withdrawn(sdp))
1092 		goto out_withdraw;
1093 	lops_before_commit(sdp, tr);
1094 	if (gfs2_withdrawn(sdp))
1095 		goto out_withdraw;
1096 	gfs2_log_submit_bio(&sdp->sd_jdesc->jd_log_bio, REQ_OP_WRITE);
1097 	if (gfs2_withdrawn(sdp))
1098 		goto out_withdraw;
1099 
1100 	if (sdp->sd_log_head != sdp->sd_log_flush_head) {
1101 		log_write_header(sdp, flags);
1102 	} else if (sdp->sd_log_tail != sdp->sd_log_flush_tail && !sdp->sd_log_idle) {
1103 		log_write_header(sdp, flags);
1104 	}
1105 	if (gfs2_withdrawn(sdp))
1106 		goto out_withdraw;
1107 	lops_after_commit(sdp, tr);
1108 
1109 	gfs2_log_lock(sdp);
1110 	sdp->sd_log_blks_reserved = 0;
1111 
1112 	spin_lock(&sdp->sd_ail_lock);
1113 	if (tr && !list_empty(&tr->tr_ail1_list)) {
1114 		list_add(&tr->tr_list, &sdp->sd_ail1_list);
1115 		tr = NULL;
1116 	}
1117 	spin_unlock(&sdp->sd_ail_lock);
1118 	gfs2_log_unlock(sdp);
1119 
1120 	if (!(flags & GFS2_LOG_HEAD_FLUSH_NORMAL)) {
1121 		if (!sdp->sd_log_idle) {
1122 			empty_ail1_list(sdp);
1123 			if (gfs2_withdrawn(sdp))
1124 				goto out_withdraw;
1125 			log_write_header(sdp, flags);
1126 		}
1127 		if (flags & (GFS2_LOG_HEAD_FLUSH_SHUTDOWN |
1128 			     GFS2_LOG_HEAD_FLUSH_FREEZE))
1129 			gfs2_log_shutdown(sdp);
1130 		if (flags & GFS2_LOG_HEAD_FLUSH_FREEZE)
1131 			atomic_set(&sdp->sd_freeze_state, SFS_FROZEN);
1132 	}
1133 
1134 out_end:
1135 	used_blocks = log_distance(sdp, sdp->sd_log_flush_head, first_log_head);
1136 	reserved_revokes += atomic_read(&sdp->sd_log_revokes_available);
1137 	atomic_set(&sdp->sd_log_revokes_available, sdp->sd_ldptrs);
1138 	gfs2_assert_withdraw(sdp, reserved_revokes % sdp->sd_inptrs == sdp->sd_ldptrs);
1139 	if (reserved_revokes > sdp->sd_ldptrs)
1140 		reserved_blocks += (reserved_revokes - sdp->sd_ldptrs) / sdp->sd_inptrs;
1141 out:
1142 	if (used_blocks != reserved_blocks) {
1143 		gfs2_assert_withdraw_delayed(sdp, used_blocks < reserved_blocks);
1144 		gfs2_log_release(sdp, reserved_blocks - used_blocks);
1145 	}
1146 	up_write(&sdp->sd_log_flush_lock);
1147 	gfs2_trans_free(sdp, tr);
1148 	if (gfs2_withdrawing(sdp))
1149 		gfs2_withdraw(sdp);
1150 	trace_gfs2_log_flush(sdp, 0, flags);
1151 	return;
1152 
1153 out_withdraw:
1154 	trans_drain(tr);
1155 	/**
1156 	 * If the tr_list is empty, we're withdrawing during a log
1157 	 * flush that targets a transaction, but the transaction was
1158 	 * never queued onto any of the ail lists. Here we add it to
1159 	 * ail1 just so that ail_drain() will find and free it.
1160 	 */
1161 	spin_lock(&sdp->sd_ail_lock);
1162 	if (tr && list_empty(&tr->tr_list))
1163 		list_add(&tr->tr_list, &sdp->sd_ail1_list);
1164 	spin_unlock(&sdp->sd_ail_lock);
1165 	tr = NULL;
1166 	goto out_end;
1167 }
1168 
1169 /**
1170  * gfs2_merge_trans - Merge a new transaction into a cached transaction
1171  * @sdp: the filesystem
1172  * @new: New transaction to be merged
1173  */
1174 
1175 static void gfs2_merge_trans(struct gfs2_sbd *sdp, struct gfs2_trans *new)
1176 {
1177 	struct gfs2_trans *old = sdp->sd_log_tr;
1178 
1179 	WARN_ON_ONCE(!test_bit(TR_ATTACHED, &old->tr_flags));
1180 
1181 	old->tr_num_buf_new	+= new->tr_num_buf_new;
1182 	old->tr_num_databuf_new	+= new->tr_num_databuf_new;
1183 	old->tr_num_buf_rm	+= new->tr_num_buf_rm;
1184 	old->tr_num_databuf_rm	+= new->tr_num_databuf_rm;
1185 	old->tr_revokes		+= new->tr_revokes;
1186 	old->tr_num_revoke	+= new->tr_num_revoke;
1187 
1188 	list_splice_tail_init(&new->tr_databuf, &old->tr_databuf);
1189 	list_splice_tail_init(&new->tr_buf, &old->tr_buf);
1190 
1191 	spin_lock(&sdp->sd_ail_lock);
1192 	list_splice_tail_init(&new->tr_ail1_list, &old->tr_ail1_list);
1193 	list_splice_tail_init(&new->tr_ail2_list, &old->tr_ail2_list);
1194 	spin_unlock(&sdp->sd_ail_lock);
1195 }
1196 
1197 static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
1198 {
1199 	unsigned int reserved;
1200 	unsigned int unused;
1201 	unsigned int maxres;
1202 
1203 	gfs2_log_lock(sdp);
1204 
1205 	if (sdp->sd_log_tr) {
1206 		gfs2_merge_trans(sdp, tr);
1207 	} else if (tr->tr_num_buf_new || tr->tr_num_databuf_new) {
1208 		gfs2_assert_withdraw(sdp, !test_bit(TR_ONSTACK, &tr->tr_flags));
1209 		sdp->sd_log_tr = tr;
1210 		set_bit(TR_ATTACHED, &tr->tr_flags);
1211 	}
1212 
1213 	reserved = calc_reserved(sdp);
1214 	maxres = sdp->sd_log_blks_reserved + tr->tr_reserved;
1215 	gfs2_assert_withdraw(sdp, maxres >= reserved);
1216 	unused = maxres - reserved;
1217 	if (unused)
1218 		gfs2_log_release(sdp, unused);
1219 	sdp->sd_log_blks_reserved = reserved;
1220 
1221 	gfs2_log_unlock(sdp);
1222 }
1223 
1224 /**
1225  * gfs2_log_commit - Commit a transaction to the log
1226  * @sdp: the filesystem
1227  * @tr: the transaction
1228  *
1229  * We wake up gfs2_logd if the number of pinned blocks exceed thresh1
1230  * or the total number of used blocks (pinned blocks plus AIL blocks)
1231  * is greater than thresh2.
1232  *
1233  * At mount time thresh1 is 2/5ths of journal size, thresh2 is 4/5ths of
1234  * journal size.
1235  *
1236  * Returns: errno
1237  */
1238 
1239 void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
1240 {
1241 	log_refund(sdp, tr);
1242 
1243 	if (atomic_read(&sdp->sd_log_pinned) > atomic_read(&sdp->sd_log_thresh1) ||
1244 	    ((sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free)) >
1245 	    atomic_read(&sdp->sd_log_thresh2)))
1246 		wake_up(&sdp->sd_logd_waitq);
1247 }
1248 
1249 /**
1250  * gfs2_log_shutdown - write a shutdown header into a journal
1251  * @sdp: the filesystem
1252  *
1253  */
1254 
1255 static void gfs2_log_shutdown(struct gfs2_sbd *sdp)
1256 {
1257 	gfs2_assert_withdraw(sdp, !sdp->sd_log_blks_reserved);
1258 	gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
1259 	gfs2_assert_withdraw(sdp, list_empty(&sdp->sd_ail1_list));
1260 
1261 	log_write_header(sdp, GFS2_LOG_HEAD_UNMOUNT | GFS2_LFC_SHUTDOWN);
1262 	log_pull_tail(sdp);
1263 
1264 	gfs2_assert_warn(sdp, sdp->sd_log_head == sdp->sd_log_tail);
1265 	gfs2_assert_warn(sdp, list_empty(&sdp->sd_ail2_list));
1266 }
1267 
1268 static inline int gfs2_jrnl_flush_reqd(struct gfs2_sbd *sdp)
1269 {
1270 	return (atomic_read(&sdp->sd_log_pinned) +
1271 		atomic_read(&sdp->sd_log_blks_needed) >=
1272 		atomic_read(&sdp->sd_log_thresh1));
1273 }
1274 
1275 static inline int gfs2_ail_flush_reqd(struct gfs2_sbd *sdp)
1276 {
1277 	unsigned int used_blocks = sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free);
1278 
1279 	if (test_and_clear_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags))
1280 		return 1;
1281 
1282 	return used_blocks + atomic_read(&sdp->sd_log_blks_needed) >=
1283 		atomic_read(&sdp->sd_log_thresh2);
1284 }
1285 
1286 /**
1287  * gfs2_logd - Update log tail as Active Items get flushed to in-place blocks
1288  * @data: Pointer to GFS2 superblock
1289  *
1290  * Also, periodically check to make sure that we're using the most recent
1291  * journal index.
1292  */
1293 
1294 int gfs2_logd(void *data)
1295 {
1296 	struct gfs2_sbd *sdp = data;
1297 	unsigned long t = 1;
1298 	DEFINE_WAIT(wait);
1299 
1300 	while (!kthread_should_stop()) {
1301 
1302 		if (gfs2_withdrawn(sdp)) {
1303 			msleep_interruptible(HZ);
1304 			continue;
1305 		}
1306 		/* Check for errors writing to the journal */
1307 		if (sdp->sd_log_error) {
1308 			gfs2_lm(sdp,
1309 				"GFS2: fsid=%s: error %d: "
1310 				"withdrawing the file system to "
1311 				"prevent further damage.\n",
1312 				sdp->sd_fsname, sdp->sd_log_error);
1313 			gfs2_withdraw(sdp);
1314 			continue;
1315 		}
1316 
1317 		if (gfs2_jrnl_flush_reqd(sdp) || t == 0) {
1318 			gfs2_ail1_empty(sdp, 0);
1319 			gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
1320 						  GFS2_LFC_LOGD_JFLUSH_REQD);
1321 		}
1322 
1323 		if (gfs2_ail_flush_reqd(sdp)) {
1324 			gfs2_ail1_start(sdp);
1325 			gfs2_ail1_wait(sdp);
1326 			gfs2_ail1_empty(sdp, 0);
1327 			gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
1328 						  GFS2_LFC_LOGD_AIL_FLUSH_REQD);
1329 		}
1330 
1331 		t = gfs2_tune_get(sdp, gt_logd_secs) * HZ;
1332 
1333 		try_to_freeze();
1334 
1335 		do {
1336 			prepare_to_wait(&sdp->sd_logd_waitq, &wait,
1337 					TASK_INTERRUPTIBLE);
1338 			if (!gfs2_ail_flush_reqd(sdp) &&
1339 			    !gfs2_jrnl_flush_reqd(sdp) &&
1340 			    !kthread_should_stop())
1341 				t = schedule_timeout(t);
1342 		} while(t && !gfs2_ail_flush_reqd(sdp) &&
1343 			!gfs2_jrnl_flush_reqd(sdp) &&
1344 			!kthread_should_stop());
1345 		finish_wait(&sdp->sd_logd_waitq, &wait);
1346 	}
1347 
1348 	return 0;
1349 }
1350 
1351