xref: /openbmc/linux/fs/gfs2/log.c (revision 4a3d049db42b42a36ae84eb8b59d2f5119737253)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
4  * Copyright (C) 2004-2007 Red Hat, Inc.  All rights reserved.
5  */
6 
7 #include <linux/sched.h>
8 #include <linux/slab.h>
9 #include <linux/spinlock.h>
10 #include <linux/completion.h>
11 #include <linux/buffer_head.h>
12 #include <linux/gfs2_ondisk.h>
13 #include <linux/crc32.h>
14 #include <linux/crc32c.h>
15 #include <linux/delay.h>
16 #include <linux/kthread.h>
17 #include <linux/freezer.h>
18 #include <linux/bio.h>
19 #include <linux/blkdev.h>
20 #include <linux/writeback.h>
21 #include <linux/list_sort.h>
22 
23 #include "gfs2.h"
24 #include "incore.h"
25 #include "bmap.h"
26 #include "glock.h"
27 #include "log.h"
28 #include "lops.h"
29 #include "meta_io.h"
30 #include "util.h"
31 #include "dir.h"
32 #include "trace_gfs2.h"
33 #include "trans.h"
34 
35 static void gfs2_log_shutdown(struct gfs2_sbd *sdp);
36 
37 /**
38  * gfs2_struct2blk - compute stuff
39  * @sdp: the filesystem
40  * @nstruct: the number of structures
41  *
42  * Compute the number of log descriptor blocks needed to hold a certain number
43  * of structures of a certain size.
44  *
45  * Returns: the number of blocks needed (minimum is always 1)
46  */
47 
48 unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct)
49 {
50 	unsigned int blks;
51 	unsigned int first, second;
52 
53 	/* The initial struct gfs2_log_descriptor block */
54 	blks = 1;
55 	first = sdp->sd_ldptrs;
56 
57 	if (nstruct > first) {
58 		/* Subsequent struct gfs2_meta_header blocks */
59 		second = sdp->sd_inptrs;
60 		blks += DIV_ROUND_UP(nstruct - first, second);
61 	}
62 
63 	return blks;
64 }
65 
66 /**
67  * gfs2_remove_from_ail - Remove an entry from the ail lists, updating counters
68  * @mapping: The associated mapping (maybe NULL)
69  * @bd: The gfs2_bufdata to remove
70  *
71  * The ail lock _must_ be held when calling this function
72  *
73  */
74 
75 void gfs2_remove_from_ail(struct gfs2_bufdata *bd)
76 {
77 	bd->bd_tr = NULL;
78 	list_del_init(&bd->bd_ail_st_list);
79 	list_del_init(&bd->bd_ail_gl_list);
80 	atomic_dec(&bd->bd_gl->gl_ail_count);
81 	brelse(bd->bd_bh);
82 }
83 
84 /**
85  * gfs2_ail1_start_one - Start I/O on a part of the AIL
86  * @sdp: the filesystem
87  * @wbc: The writeback control structure
88  * @ai: The ail structure
89  *
90  */
91 
92 static int gfs2_ail1_start_one(struct gfs2_sbd *sdp,
93 			       struct writeback_control *wbc,
94 			       struct gfs2_trans *tr)
95 __releases(&sdp->sd_ail_lock)
96 __acquires(&sdp->sd_ail_lock)
97 {
98 	struct gfs2_glock *gl = NULL;
99 	struct address_space *mapping;
100 	struct gfs2_bufdata *bd, *s;
101 	struct buffer_head *bh;
102 	int ret = 0;
103 
104 	list_for_each_entry_safe_reverse(bd, s, &tr->tr_ail1_list, bd_ail_st_list) {
105 		bh = bd->bd_bh;
106 
107 		gfs2_assert(sdp, bd->bd_tr == tr);
108 
109 		if (!buffer_busy(bh)) {
110 			if (buffer_uptodate(bh)) {
111 				list_move(&bd->bd_ail_st_list,
112 					  &tr->tr_ail2_list);
113 				continue;
114 			}
115 			if (!cmpxchg(&sdp->sd_log_error, 0, -EIO)) {
116 				gfs2_io_error_bh(sdp, bh);
117 				gfs2_withdraw_delayed(sdp);
118 			}
119 		}
120 
121 		if (gfs2_withdrawn(sdp)) {
122 			gfs2_remove_from_ail(bd);
123 			continue;
124 		}
125 		if (!buffer_dirty(bh))
126 			continue;
127 		if (gl == bd->bd_gl)
128 			continue;
129 		gl = bd->bd_gl;
130 		list_move(&bd->bd_ail_st_list, &tr->tr_ail1_list);
131 		mapping = bh->b_page->mapping;
132 		if (!mapping)
133 			continue;
134 		spin_unlock(&sdp->sd_ail_lock);
135 		ret = generic_writepages(mapping, wbc);
136 		spin_lock(&sdp->sd_ail_lock);
137 		if (ret == -ENODATA) /* if a jdata write into a new hole */
138 			ret = 0; /* ignore it */
139 		if (ret || wbc->nr_to_write <= 0)
140 			break;
141 		return -EBUSY;
142 	}
143 
144 	return ret;
145 }
146 
147 static void dump_ail_list(struct gfs2_sbd *sdp)
148 {
149 	struct gfs2_trans *tr;
150 	struct gfs2_bufdata *bd;
151 	struct buffer_head *bh;
152 
153 	list_for_each_entry_reverse(tr, &sdp->sd_ail1_list, tr_list) {
154 		list_for_each_entry_reverse(bd, &tr->tr_ail1_list,
155 					    bd_ail_st_list) {
156 			bh = bd->bd_bh;
157 			fs_err(sdp, "bd %p: blk:0x%llx bh=%p ", bd,
158 			       (unsigned long long)bd->bd_blkno, bh);
159 			if (!bh) {
160 				fs_err(sdp, "\n");
161 				continue;
162 			}
163 			fs_err(sdp, "0x%llx up2:%d dirt:%d lkd:%d req:%d "
164 			       "map:%d new:%d ar:%d aw:%d delay:%d "
165 			       "io err:%d unwritten:%d dfr:%d pin:%d esc:%d\n",
166 			       (unsigned long long)bh->b_blocknr,
167 			       buffer_uptodate(bh), buffer_dirty(bh),
168 			       buffer_locked(bh), buffer_req(bh),
169 			       buffer_mapped(bh), buffer_new(bh),
170 			       buffer_async_read(bh), buffer_async_write(bh),
171 			       buffer_delay(bh), buffer_write_io_error(bh),
172 			       buffer_unwritten(bh),
173 			       buffer_defer_completion(bh),
174 			       buffer_pinned(bh), buffer_escaped(bh));
175 		}
176 	}
177 }
178 
179 /**
180  * gfs2_ail1_flush - start writeback of some ail1 entries
181  * @sdp: The super block
182  * @wbc: The writeback control structure
183  *
184  * Writes back some ail1 entries, according to the limits in the
185  * writeback control structure
186  */
187 
188 void gfs2_ail1_flush(struct gfs2_sbd *sdp, struct writeback_control *wbc)
189 {
190 	struct list_head *head = &sdp->sd_ail1_list;
191 	struct gfs2_trans *tr;
192 	struct blk_plug plug;
193 	int ret;
194 	unsigned long flush_start = jiffies;
195 
196 	trace_gfs2_ail_flush(sdp, wbc, 1);
197 	blk_start_plug(&plug);
198 	spin_lock(&sdp->sd_ail_lock);
199 restart:
200 	ret = 0;
201 	if (time_after(jiffies, flush_start + (HZ * 600))) {
202 		fs_err(sdp, "Error: In %s for ten minutes! t=%d\n",
203 		       __func__, current->journal_info ? 1 : 0);
204 		dump_ail_list(sdp);
205 		goto out;
206 	}
207 	list_for_each_entry_reverse(tr, head, tr_list) {
208 		if (wbc->nr_to_write <= 0)
209 			break;
210 		ret = gfs2_ail1_start_one(sdp, wbc, tr);
211 		if (ret) {
212 			if (ret == -EBUSY)
213 				goto restart;
214 			break;
215 		}
216 	}
217 out:
218 	spin_unlock(&sdp->sd_ail_lock);
219 	blk_finish_plug(&plug);
220 	if (ret) {
221 		gfs2_lm(sdp, "gfs2_ail1_start_one (generic_writepages) "
222 			"returned: %d\n", ret);
223 		gfs2_withdraw(sdp);
224 	}
225 	trace_gfs2_ail_flush(sdp, wbc, 0);
226 }
227 
228 /**
229  * gfs2_ail1_start - start writeback of all ail1 entries
230  * @sdp: The superblock
231  */
232 
233 static void gfs2_ail1_start(struct gfs2_sbd *sdp)
234 {
235 	struct writeback_control wbc = {
236 		.sync_mode = WB_SYNC_NONE,
237 		.nr_to_write = LONG_MAX,
238 		.range_start = 0,
239 		.range_end = LLONG_MAX,
240 	};
241 
242 	return gfs2_ail1_flush(sdp, &wbc);
243 }
244 
245 /**
246  * gfs2_ail1_empty_one - Check whether or not a trans in the AIL has been synced
247  * @sdp: the filesystem
248  * @tr: the transaction
249  * @max_revokes: If nonzero, issue revokes for the bd items for written buffers
250  *
251  * returns: the transaction's count of remaining active items
252  */
253 
254 static int gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
255 				int *max_revokes)
256 {
257 	struct gfs2_bufdata *bd, *s;
258 	struct buffer_head *bh;
259 	int active_count = 0;
260 
261 	list_for_each_entry_safe_reverse(bd, s, &tr->tr_ail1_list,
262 					 bd_ail_st_list) {
263 		bh = bd->bd_bh;
264 		gfs2_assert(sdp, bd->bd_tr == tr);
265 		/*
266 		 * If another process flagged an io error, e.g. writing to the
267 		 * journal, error all other bhs and move them off the ail1 to
268 		 * prevent a tight loop when unmount tries to flush ail1,
269 		 * regardless of whether they're still busy. If no outside
270 		 * errors were found and the buffer is busy, move to the next.
271 		 * If the ail buffer is not busy and caught an error, flag it
272 		 * for others.
273 		 */
274 		if (!sdp->sd_log_error && buffer_busy(bh)) {
275 			active_count++;
276 			continue;
277 		}
278 		if (!buffer_uptodate(bh) &&
279 		    !cmpxchg(&sdp->sd_log_error, 0, -EIO)) {
280 			gfs2_io_error_bh(sdp, bh);
281 			gfs2_withdraw_delayed(sdp);
282 		}
283 		/*
284 		 * If we have space for revokes and the bd is no longer on any
285 		 * buf list, we can just add a revoke for it immediately and
286 		 * avoid having to put it on the ail2 list, where it would need
287 		 * to be revoked later.
288 		 */
289 		if (*max_revokes && list_empty(&bd->bd_list)) {
290 			gfs2_add_revoke(sdp, bd);
291 			(*max_revokes)--;
292 			continue;
293 		}
294 		list_move(&bd->bd_ail_st_list, &tr->tr_ail2_list);
295 	}
296 	return active_count;
297 }
298 
299 /**
300  * gfs2_ail1_empty - Try to empty the ail1 lists
301  * @sdp: The superblock
302  * @max_revokes: If non-zero, add revokes where appropriate
303  *
304  * Tries to empty the ail1 lists, starting with the oldest first
305  */
306 
307 static int gfs2_ail1_empty(struct gfs2_sbd *sdp, int max_revokes)
308 {
309 	struct gfs2_trans *tr, *s;
310 	int oldest_tr = 1;
311 	int ret;
312 
313 	spin_lock(&sdp->sd_ail_lock);
314 	list_for_each_entry_safe_reverse(tr, s, &sdp->sd_ail1_list, tr_list) {
315 		if (!gfs2_ail1_empty_one(sdp, tr, &max_revokes) && oldest_tr)
316 			list_move(&tr->tr_list, &sdp->sd_ail2_list);
317 		else
318 			oldest_tr = 0;
319 	}
320 	ret = list_empty(&sdp->sd_ail1_list);
321 	spin_unlock(&sdp->sd_ail_lock);
322 
323 	if (test_bit(SDF_WITHDRAWING, &sdp->sd_flags)) {
324 		gfs2_lm(sdp, "fatal: I/O error(s)\n");
325 		gfs2_withdraw(sdp);
326 	}
327 
328 	return ret;
329 }
330 
331 static void gfs2_ail1_wait(struct gfs2_sbd *sdp)
332 {
333 	struct gfs2_trans *tr;
334 	struct gfs2_bufdata *bd;
335 	struct buffer_head *bh;
336 
337 	spin_lock(&sdp->sd_ail_lock);
338 	list_for_each_entry_reverse(tr, &sdp->sd_ail1_list, tr_list) {
339 		list_for_each_entry(bd, &tr->tr_ail1_list, bd_ail_st_list) {
340 			bh = bd->bd_bh;
341 			if (!buffer_locked(bh))
342 				continue;
343 			get_bh(bh);
344 			spin_unlock(&sdp->sd_ail_lock);
345 			wait_on_buffer(bh);
346 			brelse(bh);
347 			return;
348 		}
349 	}
350 	spin_unlock(&sdp->sd_ail_lock);
351 }
352 
353 /**
354  * gfs2_ail_empty_tr - empty one of the ail lists for a transaction
355  */
356 
357 static void gfs2_ail_empty_tr(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
358 			      struct list_head *head)
359 {
360 	struct gfs2_bufdata *bd;
361 
362 	while (!list_empty(head)) {
363 		bd = list_first_entry(head, struct gfs2_bufdata,
364 				      bd_ail_st_list);
365 		gfs2_assert(sdp, bd->bd_tr == tr);
366 		gfs2_remove_from_ail(bd);
367 	}
368 }
369 
370 static void __ail2_empty(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
371 {
372 	gfs2_ail_empty_tr(sdp, tr, &tr->tr_ail2_list);
373 	list_del(&tr->tr_list);
374 	gfs2_assert_warn(sdp, list_empty(&tr->tr_ail1_list));
375 	gfs2_assert_warn(sdp, list_empty(&tr->tr_ail2_list));
376 	gfs2_trans_free(sdp, tr);
377 }
378 
379 static void ail2_empty(struct gfs2_sbd *sdp, unsigned int new_tail)
380 {
381 	struct list_head *ail2_list = &sdp->sd_ail2_list;
382 	unsigned int old_tail = sdp->sd_log_tail;
383 	struct gfs2_trans *tr, *safe;
384 
385 	spin_lock(&sdp->sd_ail_lock);
386 	if (old_tail <= new_tail) {
387 		list_for_each_entry_safe(tr, safe, ail2_list, tr_list) {
388 			if (old_tail <= tr->tr_first && tr->tr_first < new_tail)
389 				__ail2_empty(sdp, tr);
390 		}
391 	} else {
392 		list_for_each_entry_safe(tr, safe, ail2_list, tr_list) {
393 			if (old_tail <= tr->tr_first || tr->tr_first < new_tail)
394 				__ail2_empty(sdp, tr);
395 		}
396 	}
397 	spin_unlock(&sdp->sd_ail_lock);
398 }
399 
400 /**
401  * gfs2_log_is_empty - Check if the log is empty
402  * @sdp: The GFS2 superblock
403  */
404 
405 bool gfs2_log_is_empty(struct gfs2_sbd *sdp) {
406 	return atomic_read(&sdp->sd_log_blks_free) == sdp->sd_jdesc->jd_blocks;
407 }
408 
409 /**
410  * gfs2_log_release - Release a given number of log blocks
411  * @sdp: The GFS2 superblock
412  * @blks: The number of blocks
413  *
414  */
415 
416 void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks)
417 {
418 
419 	atomic_add(blks, &sdp->sd_log_blks_free);
420 	trace_gfs2_log_blocks(sdp, blks);
421 	gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
422 				  sdp->sd_jdesc->jd_blocks);
423 }
424 
425 /**
426  * gfs2_log_reserve - Make a log reservation
427  * @sdp: The GFS2 superblock
428  * @blks: The number of blocks to reserve
429  *
430  * Note that we never give out the last few blocks of the journal. Thats
431  * due to the fact that there is a small number of header blocks
432  * associated with each log flush. The exact number can't be known until
433  * flush time, so we ensure that we have just enough free blocks at all
434  * times to avoid running out during a log flush.
435  *
436  * We no longer flush the log here, instead we wake up logd to do that
437  * for us. To avoid the thundering herd and to ensure that we deal fairly
438  * with queued waiters, we use an exclusive wait. This means that when we
439  * get woken with enough journal space to get our reservation, we need to
440  * wake the next waiter on the list.
441  */
442 
443 void gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks)
444 {
445 	unsigned reserved_blks = 7 * (4096 / sdp->sd_vfs->s_blocksize);
446 	unsigned wanted = blks + reserved_blks;
447 	DEFINE_WAIT(wait);
448 	int did_wait = 0;
449 	unsigned int free_blocks;
450 
451 	atomic_add(blks, &sdp->sd_log_blks_needed);
452 retry:
453 	free_blocks = atomic_read(&sdp->sd_log_blks_free);
454 	if (unlikely(free_blocks <= wanted)) {
455 		do {
456 			prepare_to_wait_exclusive(&sdp->sd_log_waitq, &wait,
457 					TASK_UNINTERRUPTIBLE);
458 			wake_up(&sdp->sd_logd_waitq);
459 			did_wait = 1;
460 			if (atomic_read(&sdp->sd_log_blks_free) <= wanted)
461 				io_schedule();
462 			free_blocks = atomic_read(&sdp->sd_log_blks_free);
463 		} while(free_blocks <= wanted);
464 		finish_wait(&sdp->sd_log_waitq, &wait);
465 	}
466 	if (atomic_cmpxchg(&sdp->sd_log_blks_free, free_blocks,
467 				free_blocks - blks) != free_blocks)
468 		goto retry;
469 	atomic_sub(blks, &sdp->sd_log_blks_needed);
470 	trace_gfs2_log_blocks(sdp, -blks);
471 
472 	/*
473 	 * If we waited, then so might others, wake them up _after_ we get
474 	 * our share of the log.
475 	 */
476 	if (unlikely(did_wait))
477 		wake_up(&sdp->sd_log_waitq);
478 }
479 
480 /**
481  * log_distance - Compute distance between two journal blocks
482  * @sdp: The GFS2 superblock
483  * @newer: The most recent journal block of the pair
484  * @older: The older journal block of the pair
485  *
486  *   Compute the distance (in the journal direction) between two
487  *   blocks in the journal
488  *
489  * Returns: the distance in blocks
490  */
491 
492 static inline unsigned int log_distance(struct gfs2_sbd *sdp, unsigned int newer,
493 					unsigned int older)
494 {
495 	int dist;
496 
497 	dist = newer - older;
498 	if (dist < 0)
499 		dist += sdp->sd_jdesc->jd_blocks;
500 
501 	return dist;
502 }
503 
504 /**
505  * calc_reserved - Calculate the number of blocks to keep reserved
506  * @sdp: The GFS2 superblock
507  *
508  * This is complex.  We need to reserve room for all our currently used
509  * metadata blocks (e.g. normal file I/O rewriting file time stamps) and
510  * all our journaled data blocks for journaled files (e.g. files in the
511  * meta_fs like rindex, or files for which chattr +j was done.)
512  * If we don't reserve enough space, corruption will follow.
513  *
514  * We can have metadata blocks and jdata blocks in the same journal.  Each
515  * type gets its own log descriptor, for which we need to reserve a block.
516  * In fact, each type has the potential for needing more than one log descriptor
517  * in cases where we have more blocks than will fit in a log descriptor.
518  * Metadata journal entries take up half the space of journaled buffer entries.
519  *
520  * Also, we need to reserve blocks for revoke journal entries and one for an
521  * overall header for the lot.
522  *
523  * Returns: the number of blocks reserved
524  */
525 static unsigned int calc_reserved(struct gfs2_sbd *sdp)
526 {
527 	unsigned int reserved = 0;
528 	unsigned int mbuf;
529 	unsigned int dbuf;
530 	struct gfs2_trans *tr = sdp->sd_log_tr;
531 
532 	if (tr) {
533 		mbuf = tr->tr_num_buf_new - tr->tr_num_buf_rm;
534 		dbuf = tr->tr_num_databuf_new - tr->tr_num_databuf_rm;
535 		reserved = mbuf + dbuf;
536 		/* Account for header blocks */
537 		reserved += DIV_ROUND_UP(mbuf, buf_limit(sdp));
538 		reserved += DIV_ROUND_UP(dbuf, databuf_limit(sdp));
539 	}
540 
541 	if (sdp->sd_log_committed_revoke > 0)
542 		reserved += gfs2_struct2blk(sdp, sdp->sd_log_committed_revoke);
543 	/* One for the overall header */
544 	if (reserved)
545 		reserved++;
546 	return reserved;
547 }
548 
549 static unsigned int current_tail(struct gfs2_sbd *sdp)
550 {
551 	struct gfs2_trans *tr;
552 	unsigned int tail;
553 
554 	spin_lock(&sdp->sd_ail_lock);
555 
556 	if (list_empty(&sdp->sd_ail1_list)) {
557 		tail = sdp->sd_log_head;
558 	} else {
559 		tr = list_last_entry(&sdp->sd_ail1_list, struct gfs2_trans,
560 				tr_list);
561 		tail = tr->tr_first;
562 	}
563 
564 	spin_unlock(&sdp->sd_ail_lock);
565 
566 	return tail;
567 }
568 
569 static void log_pull_tail(struct gfs2_sbd *sdp, unsigned int new_tail)
570 {
571 	unsigned int dist = log_distance(sdp, new_tail, sdp->sd_log_tail);
572 
573 	ail2_empty(sdp, new_tail);
574 	gfs2_log_release(sdp, dist);
575 	sdp->sd_log_tail = new_tail;
576 }
577 
578 
579 void log_flush_wait(struct gfs2_sbd *sdp)
580 {
581 	DEFINE_WAIT(wait);
582 
583 	if (atomic_read(&sdp->sd_log_in_flight)) {
584 		do {
585 			prepare_to_wait(&sdp->sd_log_flush_wait, &wait,
586 					TASK_UNINTERRUPTIBLE);
587 			if (atomic_read(&sdp->sd_log_in_flight))
588 				io_schedule();
589 		} while(atomic_read(&sdp->sd_log_in_flight));
590 		finish_wait(&sdp->sd_log_flush_wait, &wait);
591 	}
592 }
593 
594 static int ip_cmp(void *priv, struct list_head *a, struct list_head *b)
595 {
596 	struct gfs2_inode *ipa, *ipb;
597 
598 	ipa = list_entry(a, struct gfs2_inode, i_ordered);
599 	ipb = list_entry(b, struct gfs2_inode, i_ordered);
600 
601 	if (ipa->i_no_addr < ipb->i_no_addr)
602 		return -1;
603 	if (ipa->i_no_addr > ipb->i_no_addr)
604 		return 1;
605 	return 0;
606 }
607 
608 static void __ordered_del_inode(struct gfs2_inode *ip)
609 {
610 	if (!list_empty(&ip->i_ordered))
611 		list_del_init(&ip->i_ordered);
612 }
613 
614 static void gfs2_ordered_write(struct gfs2_sbd *sdp)
615 {
616 	struct gfs2_inode *ip;
617 	LIST_HEAD(written);
618 
619 	spin_lock(&sdp->sd_ordered_lock);
620 	list_sort(NULL, &sdp->sd_log_ordered, &ip_cmp);
621 	while (!list_empty(&sdp->sd_log_ordered)) {
622 		ip = list_first_entry(&sdp->sd_log_ordered, struct gfs2_inode, i_ordered);
623 		if (ip->i_inode.i_mapping->nrpages == 0) {
624 			__ordered_del_inode(ip);
625 			continue;
626 		}
627 		list_move(&ip->i_ordered, &written);
628 		spin_unlock(&sdp->sd_ordered_lock);
629 		filemap_fdatawrite(ip->i_inode.i_mapping);
630 		spin_lock(&sdp->sd_ordered_lock);
631 	}
632 	list_splice(&written, &sdp->sd_log_ordered);
633 	spin_unlock(&sdp->sd_ordered_lock);
634 }
635 
636 static void gfs2_ordered_wait(struct gfs2_sbd *sdp)
637 {
638 	struct gfs2_inode *ip;
639 
640 	spin_lock(&sdp->sd_ordered_lock);
641 	while (!list_empty(&sdp->sd_log_ordered)) {
642 		ip = list_first_entry(&sdp->sd_log_ordered, struct gfs2_inode, i_ordered);
643 		__ordered_del_inode(ip);
644 		if (ip->i_inode.i_mapping->nrpages == 0)
645 			continue;
646 		spin_unlock(&sdp->sd_ordered_lock);
647 		filemap_fdatawait(ip->i_inode.i_mapping);
648 		spin_lock(&sdp->sd_ordered_lock);
649 	}
650 	spin_unlock(&sdp->sd_ordered_lock);
651 }
652 
653 void gfs2_ordered_del_inode(struct gfs2_inode *ip)
654 {
655 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
656 
657 	spin_lock(&sdp->sd_ordered_lock);
658 	__ordered_del_inode(ip);
659 	spin_unlock(&sdp->sd_ordered_lock);
660 }
661 
662 void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
663 {
664 	struct buffer_head *bh = bd->bd_bh;
665 	struct gfs2_glock *gl = bd->bd_gl;
666 
667 	sdp->sd_log_num_revoke++;
668 	if (atomic_inc_return(&gl->gl_revokes) == 1)
669 		gfs2_glock_hold(gl);
670 	bh->b_private = NULL;
671 	bd->bd_blkno = bh->b_blocknr;
672 	gfs2_remove_from_ail(bd); /* drops ref on bh */
673 	bd->bd_bh = NULL;
674 	set_bit(GLF_LFLUSH, &gl->gl_flags);
675 	list_add(&bd->bd_list, &sdp->sd_log_revokes);
676 }
677 
678 void gfs2_glock_remove_revoke(struct gfs2_glock *gl)
679 {
680 	if (atomic_dec_return(&gl->gl_revokes) == 0) {
681 		clear_bit(GLF_LFLUSH, &gl->gl_flags);
682 		gfs2_glock_queue_put(gl);
683 	}
684 }
685 
686 /**
687  * gfs2_flush_revokes - Add as many revokes to the system transaction as we can
688  * @sdp: The GFS2 superblock
689  *
690  * Our usual strategy is to defer writing revokes as much as we can in the hope
691  * that we'll eventually overwrite the journal, which will make those revokes
692  * go away.  This changes when we flush the log: at that point, there will
693  * likely be some left-over space in the last revoke block of that transaction.
694  * We can fill that space with additional revokes for blocks that have already
695  * been written back.  This will basically come at no cost now, and will save
696  * us from having to keep track of those blocks on the AIL2 list later.
697  */
698 void gfs2_flush_revokes(struct gfs2_sbd *sdp)
699 {
700 	/* number of revokes we still have room for */
701 	unsigned int max_revokes;
702 
703 	gfs2_log_lock(sdp);
704 	max_revokes = sdp->sd_ldptrs;
705 	if (sdp->sd_log_num_revoke > sdp->sd_ldptrs)
706 		max_revokes += roundup(sdp->sd_log_num_revoke - sdp->sd_ldptrs,
707 				       sdp->sd_inptrs);
708 	max_revokes -= sdp->sd_log_num_revoke;
709 	if (!sdp->sd_log_num_revoke) {
710 		atomic_dec(&sdp->sd_log_blks_free);
711 		/* If no blocks have been reserved, we need to also
712 		 * reserve a block for the header */
713 		if (!sdp->sd_log_blks_reserved) {
714 			atomic_dec(&sdp->sd_log_blks_free);
715 			trace_gfs2_log_blocks(sdp, -2);
716 		} else {
717 			trace_gfs2_log_blocks(sdp, -1);
718 		}
719 	}
720 	gfs2_ail1_empty(sdp, max_revokes);
721 	gfs2_log_unlock(sdp);
722 
723 	if (!sdp->sd_log_num_revoke) {
724 		atomic_inc(&sdp->sd_log_blks_free);
725 		if (!sdp->sd_log_blks_reserved) {
726 			atomic_inc(&sdp->sd_log_blks_free);
727 			trace_gfs2_log_blocks(sdp, 2);
728 		} else {
729 			trace_gfs2_log_blocks(sdp, 1);
730 		}
731 	}
732 }
733 
734 /**
735  * gfs2_write_log_header - Write a journal log header buffer at lblock
736  * @sdp: The GFS2 superblock
737  * @jd: journal descriptor of the journal to which we are writing
738  * @seq: sequence number
739  * @tail: tail of the log
740  * @lblock: value for lh_blkno (block number relative to start of journal)
741  * @flags: log header flags GFS2_LOG_HEAD_*
742  * @op_flags: flags to pass to the bio
743  *
744  * Returns: the initialized log buffer descriptor
745  */
746 
747 void gfs2_write_log_header(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
748 			   u64 seq, u32 tail, u32 lblock, u32 flags,
749 			   int op_flags)
750 {
751 	struct gfs2_log_header *lh;
752 	u32 hash, crc;
753 	struct page *page;
754 	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
755 	struct timespec64 tv;
756 	struct super_block *sb = sdp->sd_vfs;
757 	u64 dblock;
758 
759 	if (gfs2_withdrawn(sdp))
760 		return;
761 
762 	page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
763 	lh = page_address(page);
764 	clear_page(lh);
765 
766 	lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
767 	lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH);
768 	lh->lh_header.__pad0 = cpu_to_be64(0);
769 	lh->lh_header.mh_format = cpu_to_be32(GFS2_FORMAT_LH);
770 	lh->lh_header.mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid);
771 	lh->lh_sequence = cpu_to_be64(seq);
772 	lh->lh_flags = cpu_to_be32(flags);
773 	lh->lh_tail = cpu_to_be32(tail);
774 	lh->lh_blkno = cpu_to_be32(lblock);
775 	hash = ~crc32(~0, lh, LH_V1_SIZE);
776 	lh->lh_hash = cpu_to_be32(hash);
777 
778 	ktime_get_coarse_real_ts64(&tv);
779 	lh->lh_nsec = cpu_to_be32(tv.tv_nsec);
780 	lh->lh_sec = cpu_to_be64(tv.tv_sec);
781 	if (!list_empty(&jd->extent_list))
782 		dblock = gfs2_log_bmap(jd, lblock);
783 	else {
784 		int ret = gfs2_lblk_to_dblk(jd->jd_inode, lblock, &dblock);
785 		if (gfs2_assert_withdraw(sdp, ret == 0))
786 			return;
787 	}
788 	lh->lh_addr = cpu_to_be64(dblock);
789 	lh->lh_jinode = cpu_to_be64(GFS2_I(jd->jd_inode)->i_no_addr);
790 
791 	/* We may only write local statfs, quota, etc., when writing to our
792 	   own journal. The values are left 0 when recovering a journal
793 	   different from our own. */
794 	if (!(flags & GFS2_LOG_HEAD_RECOVERY)) {
795 		lh->lh_statfs_addr =
796 			cpu_to_be64(GFS2_I(sdp->sd_sc_inode)->i_no_addr);
797 		lh->lh_quota_addr =
798 			cpu_to_be64(GFS2_I(sdp->sd_qc_inode)->i_no_addr);
799 
800 		spin_lock(&sdp->sd_statfs_spin);
801 		lh->lh_local_total = cpu_to_be64(l_sc->sc_total);
802 		lh->lh_local_free = cpu_to_be64(l_sc->sc_free);
803 		lh->lh_local_dinodes = cpu_to_be64(l_sc->sc_dinodes);
804 		spin_unlock(&sdp->sd_statfs_spin);
805 	}
806 
807 	BUILD_BUG_ON(offsetof(struct gfs2_log_header, lh_crc) != LH_V1_SIZE);
808 
809 	crc = crc32c(~0, (void *)lh + LH_V1_SIZE + 4,
810 		     sb->s_blocksize - LH_V1_SIZE - 4);
811 	lh->lh_crc = cpu_to_be32(crc);
812 
813 	gfs2_log_write(sdp, page, sb->s_blocksize, 0, dblock);
814 	gfs2_log_submit_bio(&sdp->sd_log_bio, REQ_OP_WRITE | op_flags);
815 }
816 
817 /**
818  * log_write_header - Get and initialize a journal header buffer
819  * @sdp: The GFS2 superblock
820  * @flags: The log header flags, including log header origin
821  *
822  * Returns: the initialized log buffer descriptor
823  */
824 
825 static void log_write_header(struct gfs2_sbd *sdp, u32 flags)
826 {
827 	unsigned int tail;
828 	int op_flags = REQ_PREFLUSH | REQ_FUA | REQ_META | REQ_SYNC;
829 	enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state);
830 
831 	gfs2_assert_withdraw(sdp, (state != SFS_FROZEN));
832 	tail = current_tail(sdp);
833 
834 	if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) {
835 		gfs2_ordered_wait(sdp);
836 		log_flush_wait(sdp);
837 		op_flags = REQ_SYNC | REQ_META | REQ_PRIO;
838 	}
839 	sdp->sd_log_idle = (tail == sdp->sd_log_flush_head);
840 	gfs2_write_log_header(sdp, sdp->sd_jdesc, sdp->sd_log_sequence++, tail,
841 			      sdp->sd_log_flush_head, flags, op_flags);
842 	gfs2_log_incr_head(sdp);
843 	log_flush_wait(sdp);
844 
845 	if (sdp->sd_log_tail != tail)
846 		log_pull_tail(sdp, tail);
847 }
848 
849 /**
850  * ail_drain - drain the ail lists after a withdraw
851  * @sdp: Pointer to GFS2 superblock
852  */
853 static void ail_drain(struct gfs2_sbd *sdp)
854 {
855 	struct gfs2_trans *tr;
856 
857 	spin_lock(&sdp->sd_ail_lock);
858 	/*
859 	 * For transactions on the sd_ail1_list we need to drain both the
860 	 * ail1 and ail2 lists. That's because function gfs2_ail1_start_one
861 	 * (temporarily) moves items from its tr_ail1 list to tr_ail2 list
862 	 * before revokes are sent for that block. Items on the sd_ail2_list
863 	 * should have already gotten beyond that point, so no need.
864 	 */
865 	while (!list_empty(&sdp->sd_ail1_list)) {
866 		tr = list_first_entry(&sdp->sd_ail1_list, struct gfs2_trans,
867 				      tr_list);
868 		gfs2_ail_empty_tr(sdp, tr, &tr->tr_ail1_list);
869 		gfs2_ail_empty_tr(sdp, tr, &tr->tr_ail2_list);
870 		list_del(&tr->tr_list);
871 		gfs2_trans_free(sdp, tr);
872 	}
873 	while (!list_empty(&sdp->sd_ail2_list)) {
874 		tr = list_first_entry(&sdp->sd_ail2_list, struct gfs2_trans,
875 				      tr_list);
876 		gfs2_ail_empty_tr(sdp, tr, &tr->tr_ail2_list);
877 		list_del(&tr->tr_list);
878 		gfs2_trans_free(sdp, tr);
879 	}
880 	spin_unlock(&sdp->sd_ail_lock);
881 }
882 
883 /**
884  * empty_ail1_list - try to start IO and empty the ail1 list
885  * @sdp: Pointer to GFS2 superblock
886  */
887 static void empty_ail1_list(struct gfs2_sbd *sdp)
888 {
889 	unsigned long start = jiffies;
890 
891 	for (;;) {
892 		if (time_after(jiffies, start + (HZ * 600))) {
893 			fs_err(sdp, "Error: In %s for 10 minutes! t=%d\n",
894 			       __func__, current->journal_info ? 1 : 0);
895 			dump_ail_list(sdp);
896 			return;
897 		}
898 		gfs2_ail1_start(sdp);
899 		gfs2_ail1_wait(sdp);
900 		if (gfs2_ail1_empty(sdp, 0))
901 			return;
902 	}
903 }
904 
905 /**
906  * trans_drain - drain the buf and databuf queue for a failed transaction
907  * @tr: the transaction to drain
908  *
909  * When this is called, we're taking an error exit for a log write that failed
910  * but since we bypassed the after_commit functions, we need to remove the
911  * items from the buf and databuf queue.
912  */
913 static void trans_drain(struct gfs2_trans *tr)
914 {
915 	struct gfs2_bufdata *bd;
916 	struct list_head *head;
917 
918 	if (!tr)
919 		return;
920 
921 	head = &tr->tr_buf;
922 	while (!list_empty(head)) {
923 		bd = list_first_entry(head, struct gfs2_bufdata, bd_list);
924 		list_del_init(&bd->bd_list);
925 		kmem_cache_free(gfs2_bufdata_cachep, bd);
926 	}
927 	head = &tr->tr_databuf;
928 	while (!list_empty(head)) {
929 		bd = list_first_entry(head, struct gfs2_bufdata, bd_list);
930 		list_del_init(&bd->bd_list);
931 		kmem_cache_free(gfs2_bufdata_cachep, bd);
932 	}
933 }
934 
935 /**
936  * gfs2_log_flush - flush incore transaction(s)
937  * @sdp: the filesystem
938  * @gl: The glock structure to flush.  If NULL, flush the whole incore log
939  * @flags: The log header flags: GFS2_LOG_HEAD_FLUSH_* and debug flags
940  *
941  */
942 
943 void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags)
944 {
945 	struct gfs2_trans *tr = NULL;
946 	enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state);
947 
948 	down_write(&sdp->sd_log_flush_lock);
949 
950 	/*
951 	 * Do this check while holding the log_flush_lock to prevent new
952 	 * buffers from being added to the ail via gfs2_pin()
953 	 */
954 	if (gfs2_withdrawn(sdp))
955 		goto out;
956 
957 	/* Log might have been flushed while we waited for the flush lock */
958 	if (gl && !test_bit(GLF_LFLUSH, &gl->gl_flags))
959 		goto out;
960 	trace_gfs2_log_flush(sdp, 1, flags);
961 
962 	if (flags & GFS2_LOG_HEAD_FLUSH_SHUTDOWN)
963 		clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
964 
965 	sdp->sd_log_flush_head = sdp->sd_log_head;
966 	tr = sdp->sd_log_tr;
967 	if (tr) {
968 		sdp->sd_log_tr = NULL;
969 		tr->tr_first = sdp->sd_log_flush_head;
970 		if (unlikely (state == SFS_FROZEN))
971 			if (gfs2_assert_withdraw_delayed(sdp,
972 			       !tr->tr_num_buf_new && !tr->tr_num_databuf_new))
973 				goto out_withdraw;
974 	}
975 
976 	if (unlikely(state == SFS_FROZEN))
977 		if (gfs2_assert_withdraw_delayed(sdp, !sdp->sd_log_num_revoke))
978 			goto out_withdraw;
979 	if (gfs2_assert_withdraw_delayed(sdp,
980 			sdp->sd_log_num_revoke == sdp->sd_log_committed_revoke))
981 		goto out_withdraw;
982 
983 	gfs2_ordered_write(sdp);
984 	if (gfs2_withdrawn(sdp))
985 		goto out_withdraw;
986 	lops_before_commit(sdp, tr);
987 	if (gfs2_withdrawn(sdp))
988 		goto out_withdraw;
989 	gfs2_log_submit_bio(&sdp->sd_log_bio, REQ_OP_WRITE);
990 	if (gfs2_withdrawn(sdp))
991 		goto out_withdraw;
992 
993 	if (sdp->sd_log_head != sdp->sd_log_flush_head) {
994 		log_flush_wait(sdp);
995 		log_write_header(sdp, flags);
996 	} else if (sdp->sd_log_tail != current_tail(sdp) && !sdp->sd_log_idle) {
997 		atomic_dec(&sdp->sd_log_blks_free); /* Adjust for unreserved buffer */
998 		trace_gfs2_log_blocks(sdp, -1);
999 		log_write_header(sdp, flags);
1000 	}
1001 	if (gfs2_withdrawn(sdp))
1002 		goto out_withdraw;
1003 	lops_after_commit(sdp, tr);
1004 
1005 	gfs2_log_lock(sdp);
1006 	sdp->sd_log_head = sdp->sd_log_flush_head;
1007 	sdp->sd_log_blks_reserved = 0;
1008 	sdp->sd_log_committed_revoke = 0;
1009 
1010 	spin_lock(&sdp->sd_ail_lock);
1011 	if (tr && !list_empty(&tr->tr_ail1_list)) {
1012 		list_add(&tr->tr_list, &sdp->sd_ail1_list);
1013 		tr = NULL;
1014 	}
1015 	spin_unlock(&sdp->sd_ail_lock);
1016 	gfs2_log_unlock(sdp);
1017 
1018 	if (!(flags & GFS2_LOG_HEAD_FLUSH_NORMAL)) {
1019 		if (!sdp->sd_log_idle) {
1020 			empty_ail1_list(sdp);
1021 			if (gfs2_withdrawn(sdp))
1022 				goto out_withdraw;
1023 			atomic_dec(&sdp->sd_log_blks_free); /* Adjust for unreserved buffer */
1024 			trace_gfs2_log_blocks(sdp, -1);
1025 			log_write_header(sdp, flags);
1026 			sdp->sd_log_head = sdp->sd_log_flush_head;
1027 		}
1028 		if (flags & (GFS2_LOG_HEAD_FLUSH_SHUTDOWN |
1029 			     GFS2_LOG_HEAD_FLUSH_FREEZE))
1030 			gfs2_log_shutdown(sdp);
1031 		if (flags & GFS2_LOG_HEAD_FLUSH_FREEZE)
1032 			atomic_set(&sdp->sd_freeze_state, SFS_FROZEN);
1033 	}
1034 
1035 out_end:
1036 	trace_gfs2_log_flush(sdp, 0, flags);
1037 out:
1038 	up_write(&sdp->sd_log_flush_lock);
1039 	gfs2_trans_free(sdp, tr);
1040 	if (gfs2_withdrawing(sdp))
1041 		gfs2_withdraw(sdp);
1042 	return;
1043 
1044 out_withdraw:
1045 	trans_drain(tr);
1046 	/**
1047 	 * If the tr_list is empty, we're withdrawing during a log
1048 	 * flush that targets a transaction, but the transaction was
1049 	 * never queued onto any of the ail lists. Here we add it to
1050 	 * ail1 just so that ail_drain() will find and free it.
1051 	 */
1052 	spin_lock(&sdp->sd_ail_lock);
1053 	if (tr && list_empty(&tr->tr_list))
1054 		list_add(&tr->tr_list, &sdp->sd_ail1_list);
1055 	spin_unlock(&sdp->sd_ail_lock);
1056 	ail_drain(sdp); /* frees all transactions */
1057 	tr = NULL;
1058 	goto out_end;
1059 }
1060 
1061 /**
1062  * gfs2_merge_trans - Merge a new transaction into a cached transaction
1063  * @old: Original transaction to be expanded
1064  * @new: New transaction to be merged
1065  */
1066 
1067 static void gfs2_merge_trans(struct gfs2_sbd *sdp, struct gfs2_trans *new)
1068 {
1069 	struct gfs2_trans *old = sdp->sd_log_tr;
1070 
1071 	WARN_ON_ONCE(!test_bit(TR_ATTACHED, &old->tr_flags));
1072 
1073 	old->tr_num_buf_new	+= new->tr_num_buf_new;
1074 	old->tr_num_databuf_new	+= new->tr_num_databuf_new;
1075 	old->tr_num_buf_rm	+= new->tr_num_buf_rm;
1076 	old->tr_num_databuf_rm	+= new->tr_num_databuf_rm;
1077 	old->tr_num_revoke	+= new->tr_num_revoke;
1078 	old->tr_num_revoke_rm	+= new->tr_num_revoke_rm;
1079 
1080 	list_splice_tail_init(&new->tr_databuf, &old->tr_databuf);
1081 	list_splice_tail_init(&new->tr_buf, &old->tr_buf);
1082 
1083 	spin_lock(&sdp->sd_ail_lock);
1084 	list_splice_tail_init(&new->tr_ail1_list, &old->tr_ail1_list);
1085 	list_splice_tail_init(&new->tr_ail2_list, &old->tr_ail2_list);
1086 	spin_unlock(&sdp->sd_ail_lock);
1087 }
1088 
1089 static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
1090 {
1091 	unsigned int reserved;
1092 	unsigned int unused;
1093 	unsigned int maxres;
1094 
1095 	gfs2_log_lock(sdp);
1096 
1097 	if (sdp->sd_log_tr) {
1098 		gfs2_merge_trans(sdp, tr);
1099 	} else if (tr->tr_num_buf_new || tr->tr_num_databuf_new) {
1100 		gfs2_assert_withdraw(sdp, !test_bit(TR_ONSTACK, &tr->tr_flags));
1101 		sdp->sd_log_tr = tr;
1102 		set_bit(TR_ATTACHED, &tr->tr_flags);
1103 	}
1104 
1105 	sdp->sd_log_committed_revoke += tr->tr_num_revoke - tr->tr_num_revoke_rm;
1106 	reserved = calc_reserved(sdp);
1107 	maxres = sdp->sd_log_blks_reserved + tr->tr_reserved;
1108 	gfs2_assert_withdraw(sdp, maxres >= reserved);
1109 	unused = maxres - reserved;
1110 	gfs2_log_release(sdp, unused);
1111 	sdp->sd_log_blks_reserved = reserved;
1112 
1113 	gfs2_log_unlock(sdp);
1114 }
1115 
1116 /**
1117  * gfs2_log_commit - Commit a transaction to the log
1118  * @sdp: the filesystem
1119  * @tr: the transaction
1120  *
1121  * We wake up gfs2_logd if the number of pinned blocks exceed thresh1
1122  * or the total number of used blocks (pinned blocks plus AIL blocks)
1123  * is greater than thresh2.
1124  *
1125  * At mount time thresh1 is 2/5ths of journal size, thresh2 is 4/5ths of
1126  * journal size.
1127  *
1128  * Returns: errno
1129  */
1130 
1131 void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
1132 {
1133 	log_refund(sdp, tr);
1134 
1135 	if (atomic_read(&sdp->sd_log_pinned) > atomic_read(&sdp->sd_log_thresh1) ||
1136 	    ((sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free)) >
1137 	    atomic_read(&sdp->sd_log_thresh2)))
1138 		wake_up(&sdp->sd_logd_waitq);
1139 }
1140 
1141 /**
1142  * gfs2_log_shutdown - write a shutdown header into a journal
1143  * @sdp: the filesystem
1144  *
1145  */
1146 
1147 static void gfs2_log_shutdown(struct gfs2_sbd *sdp)
1148 {
1149 	gfs2_assert_withdraw(sdp, !sdp->sd_log_blks_reserved);
1150 	gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
1151 	gfs2_assert_withdraw(sdp, list_empty(&sdp->sd_ail1_list));
1152 
1153 	sdp->sd_log_flush_head = sdp->sd_log_head;
1154 
1155 	log_write_header(sdp, GFS2_LOG_HEAD_UNMOUNT | GFS2_LFC_SHUTDOWN);
1156 
1157 	gfs2_assert_warn(sdp, sdp->sd_log_head == sdp->sd_log_tail);
1158 	gfs2_assert_warn(sdp, list_empty(&sdp->sd_ail2_list));
1159 
1160 	sdp->sd_log_head = sdp->sd_log_flush_head;
1161 	sdp->sd_log_tail = sdp->sd_log_head;
1162 }
1163 
1164 static inline int gfs2_jrnl_flush_reqd(struct gfs2_sbd *sdp)
1165 {
1166 	return (atomic_read(&sdp->sd_log_pinned) +
1167 		atomic_read(&sdp->sd_log_blks_needed) >=
1168 		atomic_read(&sdp->sd_log_thresh1));
1169 }
1170 
1171 static inline int gfs2_ail_flush_reqd(struct gfs2_sbd *sdp)
1172 {
1173 	unsigned int used_blocks = sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free);
1174 
1175 	if (test_and_clear_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags))
1176 		return 1;
1177 
1178 	return used_blocks + atomic_read(&sdp->sd_log_blks_needed) >=
1179 		atomic_read(&sdp->sd_log_thresh2);
1180 }
1181 
1182 /**
1183  * gfs2_logd - Update log tail as Active Items get flushed to in-place blocks
1184  * @sdp: Pointer to GFS2 superblock
1185  *
1186  * Also, periodically check to make sure that we're using the most recent
1187  * journal index.
1188  */
1189 
1190 int gfs2_logd(void *data)
1191 {
1192 	struct gfs2_sbd *sdp = data;
1193 	unsigned long t = 1;
1194 	DEFINE_WAIT(wait);
1195 	bool did_flush;
1196 
1197 	while (!kthread_should_stop()) {
1198 
1199 		if (gfs2_withdrawn(sdp)) {
1200 			msleep_interruptible(HZ);
1201 			continue;
1202 		}
1203 		/* Check for errors writing to the journal */
1204 		if (sdp->sd_log_error) {
1205 			gfs2_lm(sdp,
1206 				"GFS2: fsid=%s: error %d: "
1207 				"withdrawing the file system to "
1208 				"prevent further damage.\n",
1209 				sdp->sd_fsname, sdp->sd_log_error);
1210 			gfs2_withdraw(sdp);
1211 			continue;
1212 		}
1213 
1214 		did_flush = false;
1215 		if (gfs2_jrnl_flush_reqd(sdp) || t == 0) {
1216 			gfs2_ail1_empty(sdp, 0);
1217 			gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
1218 				       GFS2_LFC_LOGD_JFLUSH_REQD);
1219 			did_flush = true;
1220 		}
1221 
1222 		if (gfs2_ail_flush_reqd(sdp)) {
1223 			gfs2_ail1_start(sdp);
1224 			gfs2_ail1_wait(sdp);
1225 			gfs2_ail1_empty(sdp, 0);
1226 			gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
1227 				       GFS2_LFC_LOGD_AIL_FLUSH_REQD);
1228 			did_flush = true;
1229 		}
1230 
1231 		if (!gfs2_ail_flush_reqd(sdp) || did_flush)
1232 			wake_up(&sdp->sd_log_waitq);
1233 
1234 		t = gfs2_tune_get(sdp, gt_logd_secs) * HZ;
1235 
1236 		try_to_freeze();
1237 
1238 		do {
1239 			prepare_to_wait(&sdp->sd_logd_waitq, &wait,
1240 					TASK_INTERRUPTIBLE);
1241 			if (!gfs2_ail_flush_reqd(sdp) &&
1242 			    !gfs2_jrnl_flush_reqd(sdp) &&
1243 			    !kthread_should_stop())
1244 				t = schedule_timeout(t);
1245 		} while(t && !gfs2_ail_flush_reqd(sdp) &&
1246 			!gfs2_jrnl_flush_reqd(sdp) &&
1247 			!kthread_should_stop());
1248 		finish_wait(&sdp->sd_logd_waitq, &wait);
1249 	}
1250 
1251 	return 0;
1252 }
1253 
1254