xref: /openbmc/linux/fs/gfs2/log.c (revision 9592ea80ad13fe06d7848028af6c917aa1cd0aaa)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
4  * Copyright (C) 2004-2007 Red Hat, Inc.  All rights reserved.
5  */
6 
7 #include <linux/sched.h>
8 #include <linux/slab.h>
9 #include <linux/spinlock.h>
10 #include <linux/completion.h>
11 #include <linux/buffer_head.h>
12 #include <linux/gfs2_ondisk.h>
13 #include <linux/crc32.h>
14 #include <linux/crc32c.h>
15 #include <linux/delay.h>
16 #include <linux/kthread.h>
17 #include <linux/freezer.h>
18 #include <linux/bio.h>
19 #include <linux/blkdev.h>
20 #include <linux/writeback.h>
21 #include <linux/list_sort.h>
22 
23 #include "gfs2.h"
24 #include "incore.h"
25 #include "bmap.h"
26 #include "glock.h"
27 #include "log.h"
28 #include "lops.h"
29 #include "meta_io.h"
30 #include "util.h"
31 #include "dir.h"
32 #include "trace_gfs2.h"
33 
34 static void gfs2_log_shutdown(struct gfs2_sbd *sdp);
35 
36 /**
37  * gfs2_struct2blk - compute stuff
38  * @sdp: the filesystem
39  * @nstruct: the number of structures
40  *
41  * Compute the number of log descriptor blocks needed to hold a certain number
42  * of structures of a certain size.
43  *
44  * Returns: the number of blocks needed (minimum is always 1)
45  */
46 
47 unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct)
48 {
49 	unsigned int blks;
50 	unsigned int first, second;
51 
52 	blks = 1;
53 	first = sdp->sd_ldptrs;
54 
55 	if (nstruct > first) {
56 		second = sdp->sd_inptrs;
57 		blks += DIV_ROUND_UP(nstruct - first, second);
58 	}
59 
60 	return blks;
61 }
62 
63 /**
64  * gfs2_remove_from_ail - Remove an entry from the ail lists, updating counters
65  * @mapping: The associated mapping (maybe NULL)
66  * @bd: The gfs2_bufdata to remove
67  *
68  * The ail lock _must_ be held when calling this function
69  *
70  */
71 
72 static void gfs2_remove_from_ail(struct gfs2_bufdata *bd)
73 {
74 	bd->bd_tr = NULL;
75 	list_del_init(&bd->bd_ail_st_list);
76 	list_del_init(&bd->bd_ail_gl_list);
77 	atomic_dec(&bd->bd_gl->gl_ail_count);
78 	brelse(bd->bd_bh);
79 }
80 
81 /**
82  * gfs2_ail1_start_one - Start I/O on a part of the AIL
83  * @sdp: the filesystem
84  * @wbc: The writeback control structure
85  * @ai: The ail structure
86  *
87  */
88 
89 static int gfs2_ail1_start_one(struct gfs2_sbd *sdp,
90 			       struct writeback_control *wbc,
91 			       struct gfs2_trans *tr)
92 __releases(&sdp->sd_ail_lock)
93 __acquires(&sdp->sd_ail_lock)
94 {
95 	struct gfs2_glock *gl = NULL;
96 	struct address_space *mapping;
97 	struct gfs2_bufdata *bd, *s;
98 	struct buffer_head *bh;
99 	int ret = 0;
100 
101 	list_for_each_entry_safe_reverse(bd, s, &tr->tr_ail1_list, bd_ail_st_list) {
102 		bh = bd->bd_bh;
103 
104 		gfs2_assert(sdp, bd->bd_tr == tr);
105 
106 		if (!buffer_busy(bh)) {
107 			if (buffer_uptodate(bh)) {
108 				list_move(&bd->bd_ail_st_list,
109 					  &tr->tr_ail2_list);
110 				continue;
111 			}
112 			if (!cmpxchg(&sdp->sd_log_error, 0, -EIO)) {
113 				gfs2_io_error_bh(sdp, bh);
114 				gfs2_withdraw_delayed(sdp);
115 			}
116 		}
117 
118 		if (gfs2_withdrawn(sdp)) {
119 			gfs2_remove_from_ail(bd);
120 			continue;
121 		}
122 		if (!buffer_dirty(bh))
123 			continue;
124 		if (gl == bd->bd_gl)
125 			continue;
126 		gl = bd->bd_gl;
127 		list_move(&bd->bd_ail_st_list, &tr->tr_ail1_list);
128 		mapping = bh->b_page->mapping;
129 		if (!mapping)
130 			continue;
131 		spin_unlock(&sdp->sd_ail_lock);
132 		ret = generic_writepages(mapping, wbc);
133 		spin_lock(&sdp->sd_ail_lock);
134 		if (ret || wbc->nr_to_write <= 0)
135 			break;
136 		return -EBUSY;
137 	}
138 
139 	return ret;
140 }
141 
142 static void dump_ail_list(struct gfs2_sbd *sdp)
143 {
144 	struct gfs2_trans *tr;
145 	struct gfs2_bufdata *bd;
146 	struct buffer_head *bh;
147 
148 	fs_err(sdp, "Error: In gfs2_ail1_flush for ten minutes! t=%d\n",
149 	       current->journal_info ? 1 : 0);
150 
151 	list_for_each_entry_reverse(tr, &sdp->sd_ail1_list, tr_list) {
152 		list_for_each_entry_reverse(bd, &tr->tr_ail1_list,
153 					    bd_ail_st_list) {
154 			bh = bd->bd_bh;
155 			fs_err(sdp, "bd %p: blk:0x%llx bh=%p ", bd,
156 			       (unsigned long long)bd->bd_blkno, bh);
157 			if (!bh) {
158 				fs_err(sdp, "\n");
159 				continue;
160 			}
161 			fs_err(sdp, "0x%llx up2:%d dirt:%d lkd:%d req:%d "
162 			       "map:%d new:%d ar:%d aw:%d delay:%d "
163 			       "io err:%d unwritten:%d dfr:%d pin:%d esc:%d\n",
164 			       (unsigned long long)bh->b_blocknr,
165 			       buffer_uptodate(bh), buffer_dirty(bh),
166 			       buffer_locked(bh), buffer_req(bh),
167 			       buffer_mapped(bh), buffer_new(bh),
168 			       buffer_async_read(bh), buffer_async_write(bh),
169 			       buffer_delay(bh), buffer_write_io_error(bh),
170 			       buffer_unwritten(bh),
171 			       buffer_defer_completion(bh),
172 			       buffer_pinned(bh), buffer_escaped(bh));
173 		}
174 	}
175 }
176 
177 /**
178  * gfs2_ail1_flush - start writeback of some ail1 entries
179  * @sdp: The super block
180  * @wbc: The writeback control structure
181  *
182  * Writes back some ail1 entries, according to the limits in the
183  * writeback control structure
184  */
185 
186 void gfs2_ail1_flush(struct gfs2_sbd *sdp, struct writeback_control *wbc)
187 {
188 	struct list_head *head = &sdp->sd_ail1_list;
189 	struct gfs2_trans *tr;
190 	struct blk_plug plug;
191 	int ret = 0;
192 	unsigned long flush_start = jiffies;
193 
194 	trace_gfs2_ail_flush(sdp, wbc, 1);
195 	blk_start_plug(&plug);
196 	spin_lock(&sdp->sd_ail_lock);
197 restart:
198 	if (time_after(jiffies, flush_start + (HZ * 600))) {
199 		dump_ail_list(sdp);
200 		goto out;
201 	}
202 	list_for_each_entry_reverse(tr, head, tr_list) {
203 		if (wbc->nr_to_write <= 0)
204 			break;
205 		ret = gfs2_ail1_start_one(sdp, wbc, tr);
206 		if (ret) {
207 			if (ret == -EBUSY)
208 				goto restart;
209 			break;
210 		}
211 	}
212 out:
213 	spin_unlock(&sdp->sd_ail_lock);
214 	blk_finish_plug(&plug);
215 	if (ret) {
216 		gfs2_lm(sdp, "gfs2_ail1_start_one (generic_writepages) "
217 			"returned: %d\n", ret);
218 		gfs2_withdraw(sdp);
219 	}
220 	trace_gfs2_ail_flush(sdp, wbc, 0);
221 }
222 
223 /**
224  * gfs2_ail1_start - start writeback of all ail1 entries
225  * @sdp: The superblock
226  */
227 
228 static void gfs2_ail1_start(struct gfs2_sbd *sdp)
229 {
230 	struct writeback_control wbc = {
231 		.sync_mode = WB_SYNC_NONE,
232 		.nr_to_write = LONG_MAX,
233 		.range_start = 0,
234 		.range_end = LLONG_MAX,
235 	};
236 
237 	return gfs2_ail1_flush(sdp, &wbc);
238 }
239 
240 /**
241  * gfs2_ail1_empty_one - Check whether or not a trans in the AIL has been synced
242  * @sdp: the filesystem
243  * @tr: the transaction
244  * @max_revokes: If nonzero, issue revokes for the bd items for written buffers
245  *
246  */
247 
248 static void gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
249 				int *max_revokes)
250 {
251 	struct gfs2_bufdata *bd, *s;
252 	struct buffer_head *bh;
253 
254 	list_for_each_entry_safe_reverse(bd, s, &tr->tr_ail1_list,
255 					 bd_ail_st_list) {
256 		bh = bd->bd_bh;
257 		gfs2_assert(sdp, bd->bd_tr == tr);
258 		/*
259 		 * If another process flagged an io error, e.g. writing to the
260 		 * journal, error all other bhs and move them off the ail1 to
261 		 * prevent a tight loop when unmount tries to flush ail1,
262 		 * regardless of whether they're still busy. If no outside
263 		 * errors were found and the buffer is busy, move to the next.
264 		 * If the ail buffer is not busy and caught an error, flag it
265 		 * for others.
266 		 */
267 		if (!sdp->sd_log_error && buffer_busy(bh))
268 			continue;
269 		if (!buffer_uptodate(bh) &&
270 		    !cmpxchg(&sdp->sd_log_error, 0, -EIO)) {
271 			gfs2_io_error_bh(sdp, bh);
272 			gfs2_withdraw_delayed(sdp);
273 		}
274 		/*
275 		 * If we have space for revokes and the bd is no longer on any
276 		 * buf list, we can just add a revoke for it immediately and
277 		 * avoid having to put it on the ail2 list, where it would need
278 		 * to be revoked later.
279 		 */
280 		if (*max_revokes && list_empty(&bd->bd_list)) {
281 			gfs2_add_revoke(sdp, bd);
282 			(*max_revokes)--;
283 			continue;
284 		}
285 		list_move(&bd->bd_ail_st_list, &tr->tr_ail2_list);
286 	}
287 }
288 
289 /**
290  * gfs2_ail1_empty - Try to empty the ail1 lists
291  * @sdp: The superblock
292  * @max_revokes: If non-zero, add revokes where appropriate
293  *
294  * Tries to empty the ail1 lists, starting with the oldest first
295  */
296 
297 static int gfs2_ail1_empty(struct gfs2_sbd *sdp, int max_revokes)
298 {
299 	struct gfs2_trans *tr, *s;
300 	int oldest_tr = 1;
301 	int ret;
302 
303 	spin_lock(&sdp->sd_ail_lock);
304 	list_for_each_entry_safe_reverse(tr, s, &sdp->sd_ail1_list, tr_list) {
305 		gfs2_ail1_empty_one(sdp, tr, &max_revokes);
306 		if (list_empty(&tr->tr_ail1_list) && oldest_tr)
307 			list_move(&tr->tr_list, &sdp->sd_ail2_list);
308 		else
309 			oldest_tr = 0;
310 	}
311 	ret = list_empty(&sdp->sd_ail1_list);
312 	spin_unlock(&sdp->sd_ail_lock);
313 
314 	if (test_bit(SDF_WITHDRAWING, &sdp->sd_flags)) {
315 		gfs2_lm(sdp, "fatal: I/O error(s)\n");
316 		gfs2_withdraw(sdp);
317 	}
318 
319 	return ret;
320 }
321 
322 static void gfs2_ail1_wait(struct gfs2_sbd *sdp)
323 {
324 	struct gfs2_trans *tr;
325 	struct gfs2_bufdata *bd;
326 	struct buffer_head *bh;
327 
328 	spin_lock(&sdp->sd_ail_lock);
329 	list_for_each_entry_reverse(tr, &sdp->sd_ail1_list, tr_list) {
330 		list_for_each_entry(bd, &tr->tr_ail1_list, bd_ail_st_list) {
331 			bh = bd->bd_bh;
332 			if (!buffer_locked(bh))
333 				continue;
334 			get_bh(bh);
335 			spin_unlock(&sdp->sd_ail_lock);
336 			wait_on_buffer(bh);
337 			brelse(bh);
338 			return;
339 		}
340 	}
341 	spin_unlock(&sdp->sd_ail_lock);
342 }
343 
344 /**
345  * gfs2_ail_empty_tr - empty one of the ail lists for a transaction
346  */
347 
348 static void gfs2_ail_empty_tr(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
349 			      struct list_head *head)
350 {
351 	struct gfs2_bufdata *bd;
352 
353 	while (!list_empty(head)) {
354 		bd = list_first_entry(head, struct gfs2_bufdata,
355 				      bd_ail_st_list);
356 		gfs2_assert(sdp, bd->bd_tr == tr);
357 		gfs2_remove_from_ail(bd);
358 	}
359 }
360 
361 static void ail2_empty(struct gfs2_sbd *sdp, unsigned int new_tail)
362 {
363 	struct gfs2_trans *tr, *safe;
364 	unsigned int old_tail = sdp->sd_log_tail;
365 	int wrap = (new_tail < old_tail);
366 	int a, b, rm;
367 
368 	spin_lock(&sdp->sd_ail_lock);
369 
370 	list_for_each_entry_safe(tr, safe, &sdp->sd_ail2_list, tr_list) {
371 		a = (old_tail <= tr->tr_first);
372 		b = (tr->tr_first < new_tail);
373 		rm = (wrap) ? (a || b) : (a && b);
374 		if (!rm)
375 			continue;
376 
377 		gfs2_ail_empty_tr(sdp, tr, &tr->tr_ail2_list);
378 		list_del(&tr->tr_list);
379 		gfs2_assert_warn(sdp, list_empty(&tr->tr_ail1_list));
380 		gfs2_assert_warn(sdp, list_empty(&tr->tr_ail2_list));
381 		kfree(tr);
382 	}
383 
384 	spin_unlock(&sdp->sd_ail_lock);
385 }
386 
387 /**
388  * gfs2_log_release - Release a given number of log blocks
389  * @sdp: The GFS2 superblock
390  * @blks: The number of blocks
391  *
392  */
393 
394 void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks)
395 {
396 
397 	atomic_add(blks, &sdp->sd_log_blks_free);
398 	trace_gfs2_log_blocks(sdp, blks);
399 	gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
400 				  sdp->sd_jdesc->jd_blocks);
401 	up_read(&sdp->sd_log_flush_lock);
402 }
403 
404 /**
405  * gfs2_log_reserve - Make a log reservation
406  * @sdp: The GFS2 superblock
407  * @blks: The number of blocks to reserve
408  *
409  * Note that we never give out the last few blocks of the journal. Thats
410  * due to the fact that there is a small number of header blocks
411  * associated with each log flush. The exact number can't be known until
412  * flush time, so we ensure that we have just enough free blocks at all
413  * times to avoid running out during a log flush.
414  *
415  * We no longer flush the log here, instead we wake up logd to do that
416  * for us. To avoid the thundering herd and to ensure that we deal fairly
417  * with queued waiters, we use an exclusive wait. This means that when we
418  * get woken with enough journal space to get our reservation, we need to
419  * wake the next waiter on the list.
420  *
421  * Returns: errno
422  */
423 
424 int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks)
425 {
426 	int ret = 0;
427 	unsigned reserved_blks = 7 * (4096 / sdp->sd_vfs->s_blocksize);
428 	unsigned wanted = blks + reserved_blks;
429 	DEFINE_WAIT(wait);
430 	int did_wait = 0;
431 	unsigned int free_blocks;
432 
433 	if (gfs2_assert_warn(sdp, blks) ||
434 	    gfs2_assert_warn(sdp, blks <= sdp->sd_jdesc->jd_blocks))
435 		return -EINVAL;
436 	atomic_add(blks, &sdp->sd_log_blks_needed);
437 retry:
438 	free_blocks = atomic_read(&sdp->sd_log_blks_free);
439 	if (unlikely(free_blocks <= wanted)) {
440 		do {
441 			prepare_to_wait_exclusive(&sdp->sd_log_waitq, &wait,
442 					TASK_UNINTERRUPTIBLE);
443 			wake_up(&sdp->sd_logd_waitq);
444 			did_wait = 1;
445 			if (atomic_read(&sdp->sd_log_blks_free) <= wanted)
446 				io_schedule();
447 			free_blocks = atomic_read(&sdp->sd_log_blks_free);
448 		} while(free_blocks <= wanted);
449 		finish_wait(&sdp->sd_log_waitq, &wait);
450 	}
451 	atomic_inc(&sdp->sd_reserving_log);
452 	if (atomic_cmpxchg(&sdp->sd_log_blks_free, free_blocks,
453 				free_blocks - blks) != free_blocks) {
454 		if (atomic_dec_and_test(&sdp->sd_reserving_log))
455 			wake_up(&sdp->sd_reserving_log_wait);
456 		goto retry;
457 	}
458 	atomic_sub(blks, &sdp->sd_log_blks_needed);
459 	trace_gfs2_log_blocks(sdp, -blks);
460 
461 	/*
462 	 * If we waited, then so might others, wake them up _after_ we get
463 	 * our share of the log.
464 	 */
465 	if (unlikely(did_wait))
466 		wake_up(&sdp->sd_log_waitq);
467 
468 	down_read(&sdp->sd_log_flush_lock);
469 	if (unlikely(!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))) {
470 		gfs2_log_release(sdp, blks);
471 		ret = -EROFS;
472 	}
473 	if (atomic_dec_and_test(&sdp->sd_reserving_log))
474 		wake_up(&sdp->sd_reserving_log_wait);
475 	return ret;
476 }
477 
478 /**
479  * log_distance - Compute distance between two journal blocks
480  * @sdp: The GFS2 superblock
481  * @newer: The most recent journal block of the pair
482  * @older: The older journal block of the pair
483  *
484  *   Compute the distance (in the journal direction) between two
485  *   blocks in the journal
486  *
487  * Returns: the distance in blocks
488  */
489 
490 static inline unsigned int log_distance(struct gfs2_sbd *sdp, unsigned int newer,
491 					unsigned int older)
492 {
493 	int dist;
494 
495 	dist = newer - older;
496 	if (dist < 0)
497 		dist += sdp->sd_jdesc->jd_blocks;
498 
499 	return dist;
500 }
501 
502 /**
503  * calc_reserved - Calculate the number of blocks to reserve when
504  *                 refunding a transaction's unused buffers.
505  * @sdp: The GFS2 superblock
506  *
507  * This is complex.  We need to reserve room for all our currently used
508  * metadata buffers (e.g. normal file I/O rewriting file time stamps) and
509  * all our journaled data buffers for journaled files (e.g. files in the
510  * meta_fs like rindex, or files for which chattr +j was done.)
511  * If we don't reserve enough space, gfs2_log_refund and gfs2_log_flush
512  * will count it as free space (sd_log_blks_free) and corruption will follow.
513  *
514  * We can have metadata bufs and jdata bufs in the same journal.  So each
515  * type gets its own log header, for which we need to reserve a block.
516  * In fact, each type has the potential for needing more than one header
517  * in cases where we have more buffers than will fit on a journal page.
518  * Metadata journal entries take up half the space of journaled buffer entries.
519  * Thus, metadata entries have buf_limit (502) and journaled buffers have
520  * databuf_limit (251) before they cause a wrap around.
521  *
522  * Also, we need to reserve blocks for revoke journal entries and one for an
523  * overall header for the lot.
524  *
525  * Returns: the number of blocks reserved
526  */
527 static unsigned int calc_reserved(struct gfs2_sbd *sdp)
528 {
529 	unsigned int reserved = 0;
530 	unsigned int mbuf;
531 	unsigned int dbuf;
532 	struct gfs2_trans *tr = sdp->sd_log_tr;
533 
534 	if (tr) {
535 		mbuf = tr->tr_num_buf_new - tr->tr_num_buf_rm;
536 		dbuf = tr->tr_num_databuf_new - tr->tr_num_databuf_rm;
537 		reserved = mbuf + dbuf;
538 		/* Account for header blocks */
539 		reserved += DIV_ROUND_UP(mbuf, buf_limit(sdp));
540 		reserved += DIV_ROUND_UP(dbuf, databuf_limit(sdp));
541 	}
542 
543 	if (sdp->sd_log_committed_revoke > 0)
544 		reserved += gfs2_struct2blk(sdp, sdp->sd_log_committed_revoke);
545 	/* One for the overall header */
546 	if (reserved)
547 		reserved++;
548 	return reserved;
549 }
550 
551 static unsigned int current_tail(struct gfs2_sbd *sdp)
552 {
553 	struct gfs2_trans *tr;
554 	unsigned int tail;
555 
556 	spin_lock(&sdp->sd_ail_lock);
557 
558 	if (list_empty(&sdp->sd_ail1_list)) {
559 		tail = sdp->sd_log_head;
560 	} else {
561 		tr = list_last_entry(&sdp->sd_ail1_list, struct gfs2_trans,
562 				tr_list);
563 		tail = tr->tr_first;
564 	}
565 
566 	spin_unlock(&sdp->sd_ail_lock);
567 
568 	return tail;
569 }
570 
571 static void log_pull_tail(struct gfs2_sbd *sdp, unsigned int new_tail)
572 {
573 	unsigned int dist = log_distance(sdp, new_tail, sdp->sd_log_tail);
574 
575 	ail2_empty(sdp, new_tail);
576 
577 	atomic_add(dist, &sdp->sd_log_blks_free);
578 	trace_gfs2_log_blocks(sdp, dist);
579 	gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
580 			     sdp->sd_jdesc->jd_blocks);
581 
582 	sdp->sd_log_tail = new_tail;
583 }
584 
585 
586 void log_flush_wait(struct gfs2_sbd *sdp)
587 {
588 	DEFINE_WAIT(wait);
589 
590 	if (atomic_read(&sdp->sd_log_in_flight)) {
591 		do {
592 			prepare_to_wait(&sdp->sd_log_flush_wait, &wait,
593 					TASK_UNINTERRUPTIBLE);
594 			if (atomic_read(&sdp->sd_log_in_flight))
595 				io_schedule();
596 		} while(atomic_read(&sdp->sd_log_in_flight));
597 		finish_wait(&sdp->sd_log_flush_wait, &wait);
598 	}
599 }
600 
601 static int ip_cmp(void *priv, struct list_head *a, struct list_head *b)
602 {
603 	struct gfs2_inode *ipa, *ipb;
604 
605 	ipa = list_entry(a, struct gfs2_inode, i_ordered);
606 	ipb = list_entry(b, struct gfs2_inode, i_ordered);
607 
608 	if (ipa->i_no_addr < ipb->i_no_addr)
609 		return -1;
610 	if (ipa->i_no_addr > ipb->i_no_addr)
611 		return 1;
612 	return 0;
613 }
614 
615 static void gfs2_ordered_write(struct gfs2_sbd *sdp)
616 {
617 	struct gfs2_inode *ip;
618 	LIST_HEAD(written);
619 
620 	spin_lock(&sdp->sd_ordered_lock);
621 	list_sort(NULL, &sdp->sd_log_ordered, &ip_cmp);
622 	while (!list_empty(&sdp->sd_log_ordered)) {
623 		ip = list_first_entry(&sdp->sd_log_ordered, struct gfs2_inode, i_ordered);
624 		if (ip->i_inode.i_mapping->nrpages == 0) {
625 			test_and_clear_bit(GIF_ORDERED, &ip->i_flags);
626 			list_del(&ip->i_ordered);
627 			continue;
628 		}
629 		list_move(&ip->i_ordered, &written);
630 		spin_unlock(&sdp->sd_ordered_lock);
631 		filemap_fdatawrite(ip->i_inode.i_mapping);
632 		spin_lock(&sdp->sd_ordered_lock);
633 	}
634 	list_splice(&written, &sdp->sd_log_ordered);
635 	spin_unlock(&sdp->sd_ordered_lock);
636 }
637 
638 static void gfs2_ordered_wait(struct gfs2_sbd *sdp)
639 {
640 	struct gfs2_inode *ip;
641 
642 	spin_lock(&sdp->sd_ordered_lock);
643 	while (!list_empty(&sdp->sd_log_ordered)) {
644 		ip = list_first_entry(&sdp->sd_log_ordered, struct gfs2_inode, i_ordered);
645 		list_del(&ip->i_ordered);
646 		WARN_ON(!test_and_clear_bit(GIF_ORDERED, &ip->i_flags));
647 		if (ip->i_inode.i_mapping->nrpages == 0)
648 			continue;
649 		spin_unlock(&sdp->sd_ordered_lock);
650 		filemap_fdatawait(ip->i_inode.i_mapping);
651 		spin_lock(&sdp->sd_ordered_lock);
652 	}
653 	spin_unlock(&sdp->sd_ordered_lock);
654 }
655 
656 void gfs2_ordered_del_inode(struct gfs2_inode *ip)
657 {
658 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
659 
660 	spin_lock(&sdp->sd_ordered_lock);
661 	if (test_and_clear_bit(GIF_ORDERED, &ip->i_flags))
662 		list_del(&ip->i_ordered);
663 	spin_unlock(&sdp->sd_ordered_lock);
664 }
665 
666 void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
667 {
668 	struct buffer_head *bh = bd->bd_bh;
669 	struct gfs2_glock *gl = bd->bd_gl;
670 
671 	bh->b_private = NULL;
672 	bd->bd_blkno = bh->b_blocknr;
673 	gfs2_remove_from_ail(bd); /* drops ref on bh */
674 	bd->bd_bh = NULL;
675 	sdp->sd_log_num_revoke++;
676 	if (atomic_inc_return(&gl->gl_revokes) == 1)
677 		gfs2_glock_hold(gl);
678 	set_bit(GLF_LFLUSH, &gl->gl_flags);
679 	list_add(&bd->bd_list, &sdp->sd_log_revokes);
680 }
681 
682 void gfs2_glock_remove_revoke(struct gfs2_glock *gl)
683 {
684 	if (atomic_dec_return(&gl->gl_revokes) == 0) {
685 		clear_bit(GLF_LFLUSH, &gl->gl_flags);
686 		gfs2_glock_queue_put(gl);
687 	}
688 }
689 
690 /**
691  * gfs2_write_revokes - Add as many revokes to the system transaction as we can
692  * @sdp: The GFS2 superblock
693  *
694  * Our usual strategy is to defer writing revokes as much as we can in the hope
695  * that we'll eventually overwrite the journal, which will make those revokes
696  * go away.  This changes when we flush the log: at that point, there will
697  * likely be some left-over space in the last revoke block of that transaction.
698  * We can fill that space with additional revokes for blocks that have already
699  * been written back.  This will basically come at no cost now, and will save
700  * us from having to keep track of those blocks on the AIL2 list later.
701  */
702 void gfs2_write_revokes(struct gfs2_sbd *sdp)
703 {
704 	/* number of revokes we still have room for */
705 	int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64);
706 
707 	gfs2_log_lock(sdp);
708 	while (sdp->sd_log_num_revoke > max_revokes)
709 		max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64);
710 	max_revokes -= sdp->sd_log_num_revoke;
711 	if (!sdp->sd_log_num_revoke) {
712 		atomic_dec(&sdp->sd_log_blks_free);
713 		/* If no blocks have been reserved, we need to also
714 		 * reserve a block for the header */
715 		if (!sdp->sd_log_blks_reserved)
716 			atomic_dec(&sdp->sd_log_blks_free);
717 	}
718 	gfs2_ail1_empty(sdp, max_revokes);
719 	gfs2_log_unlock(sdp);
720 
721 	if (!sdp->sd_log_num_revoke) {
722 		atomic_inc(&sdp->sd_log_blks_free);
723 		if (!sdp->sd_log_blks_reserved)
724 			atomic_inc(&sdp->sd_log_blks_free);
725 	}
726 }
727 
728 /**
729  * gfs2_write_log_header - Write a journal log header buffer at lblock
730  * @sdp: The GFS2 superblock
731  * @jd: journal descriptor of the journal to which we are writing
732  * @seq: sequence number
733  * @tail: tail of the log
734  * @lblock: value for lh_blkno (block number relative to start of journal)
735  * @flags: log header flags GFS2_LOG_HEAD_*
736  * @op_flags: flags to pass to the bio
737  *
738  * Returns: the initialized log buffer descriptor
739  */
740 
741 void gfs2_write_log_header(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
742 			   u64 seq, u32 tail, u32 lblock, u32 flags,
743 			   int op_flags)
744 {
745 	struct gfs2_log_header *lh;
746 	u32 hash, crc;
747 	struct page *page;
748 	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
749 	struct timespec64 tv;
750 	struct super_block *sb = sdp->sd_vfs;
751 	u64 dblock;
752 
753 	if (gfs2_withdrawn(sdp))
754 		goto out;
755 
756 	page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
757 	lh = page_address(page);
758 	clear_page(lh);
759 
760 	lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
761 	lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH);
762 	lh->lh_header.__pad0 = cpu_to_be64(0);
763 	lh->lh_header.mh_format = cpu_to_be32(GFS2_FORMAT_LH);
764 	lh->lh_header.mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid);
765 	lh->lh_sequence = cpu_to_be64(seq);
766 	lh->lh_flags = cpu_to_be32(flags);
767 	lh->lh_tail = cpu_to_be32(tail);
768 	lh->lh_blkno = cpu_to_be32(lblock);
769 	hash = ~crc32(~0, lh, LH_V1_SIZE);
770 	lh->lh_hash = cpu_to_be32(hash);
771 
772 	ktime_get_coarse_real_ts64(&tv);
773 	lh->lh_nsec = cpu_to_be32(tv.tv_nsec);
774 	lh->lh_sec = cpu_to_be64(tv.tv_sec);
775 	if (!list_empty(&jd->extent_list))
776 		dblock = gfs2_log_bmap(jd, lblock);
777 	else {
778 		int ret = gfs2_lblk_to_dblk(jd->jd_inode, lblock, &dblock);
779 		if (gfs2_assert_withdraw(sdp, ret == 0))
780 			return;
781 	}
782 	lh->lh_addr = cpu_to_be64(dblock);
783 	lh->lh_jinode = cpu_to_be64(GFS2_I(jd->jd_inode)->i_no_addr);
784 
785 	/* We may only write local statfs, quota, etc., when writing to our
786 	   own journal. The values are left 0 when recovering a journal
787 	   different from our own. */
788 	if (!(flags & GFS2_LOG_HEAD_RECOVERY)) {
789 		lh->lh_statfs_addr =
790 			cpu_to_be64(GFS2_I(sdp->sd_sc_inode)->i_no_addr);
791 		lh->lh_quota_addr =
792 			cpu_to_be64(GFS2_I(sdp->sd_qc_inode)->i_no_addr);
793 
794 		spin_lock(&sdp->sd_statfs_spin);
795 		lh->lh_local_total = cpu_to_be64(l_sc->sc_total);
796 		lh->lh_local_free = cpu_to_be64(l_sc->sc_free);
797 		lh->lh_local_dinodes = cpu_to_be64(l_sc->sc_dinodes);
798 		spin_unlock(&sdp->sd_statfs_spin);
799 	}
800 
801 	BUILD_BUG_ON(offsetof(struct gfs2_log_header, lh_crc) != LH_V1_SIZE);
802 
803 	crc = crc32c(~0, (void *)lh + LH_V1_SIZE + 4,
804 		     sb->s_blocksize - LH_V1_SIZE - 4);
805 	lh->lh_crc = cpu_to_be32(crc);
806 
807 	gfs2_log_write(sdp, page, sb->s_blocksize, 0, dblock);
808 	gfs2_log_submit_bio(&sdp->sd_log_bio, REQ_OP_WRITE | op_flags);
809 out:
810 	log_flush_wait(sdp);
811 }
812 
813 /**
814  * log_write_header - Get and initialize a journal header buffer
815  * @sdp: The GFS2 superblock
816  * @flags: The log header flags, including log header origin
817  *
818  * Returns: the initialized log buffer descriptor
819  */
820 
821 static void log_write_header(struct gfs2_sbd *sdp, u32 flags)
822 {
823 	unsigned int tail;
824 	int op_flags = REQ_PREFLUSH | REQ_FUA | REQ_META | REQ_SYNC;
825 	enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state);
826 
827 	gfs2_assert_withdraw(sdp, (state != SFS_FROZEN));
828 	tail = current_tail(sdp);
829 
830 	if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) {
831 		gfs2_ordered_wait(sdp);
832 		log_flush_wait(sdp);
833 		op_flags = REQ_SYNC | REQ_META | REQ_PRIO;
834 	}
835 	sdp->sd_log_idle = (tail == sdp->sd_log_flush_head);
836 	gfs2_write_log_header(sdp, sdp->sd_jdesc, sdp->sd_log_sequence++, tail,
837 			      sdp->sd_log_flush_head, flags, op_flags);
838 	gfs2_log_incr_head(sdp);
839 
840 	if (sdp->sd_log_tail != tail)
841 		log_pull_tail(sdp, tail);
842 }
843 
844 /**
845  * ail_drain - drain the ail lists after a withdraw
846  * @sdp: Pointer to GFS2 superblock
847  */
848 static void ail_drain(struct gfs2_sbd *sdp)
849 {
850 	struct gfs2_trans *tr;
851 
852 	spin_lock(&sdp->sd_ail_lock);
853 	/*
854 	 * For transactions on the sd_ail1_list we need to drain both the
855 	 * ail1 and ail2 lists. That's because function gfs2_ail1_start_one
856 	 * (temporarily) moves items from its tr_ail1 list to tr_ail2 list
857 	 * before revokes are sent for that block. Items on the sd_ail2_list
858 	 * should have already gotten beyond that point, so no need.
859 	 */
860 	while (!list_empty(&sdp->sd_ail1_list)) {
861 		tr = list_first_entry(&sdp->sd_ail1_list, struct gfs2_trans,
862 				      tr_list);
863 		gfs2_ail_empty_tr(sdp, tr, &tr->tr_ail1_list);
864 		gfs2_ail_empty_tr(sdp, tr, &tr->tr_ail2_list);
865 		list_del(&tr->tr_list);
866 		kfree(tr);
867 	}
868 	while (!list_empty(&sdp->sd_ail2_list)) {
869 		tr = list_first_entry(&sdp->sd_ail2_list, struct gfs2_trans,
870 				      tr_list);
871 		gfs2_ail_empty_tr(sdp, tr, &tr->tr_ail2_list);
872 		list_del(&tr->tr_list);
873 		kfree(tr);
874 	}
875 	spin_unlock(&sdp->sd_ail_lock);
876 }
877 
878 /**
879  * gfs2_log_flush - flush incore transaction(s)
880  * @sdp: the filesystem
881  * @gl: The glock structure to flush.  If NULL, flush the whole incore log
882  * @flags: The log header flags: GFS2_LOG_HEAD_FLUSH_* and debug flags
883  *
884  */
885 
886 void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags)
887 {
888 	struct gfs2_trans *tr = NULL;
889 	enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state);
890 
891 	down_write(&sdp->sd_log_flush_lock);
892 
893 	/*
894 	 * Do this check while holding the log_flush_lock to prevent new
895 	 * buffers from being added to the ail via gfs2_pin()
896 	 */
897 	if (gfs2_withdrawn(sdp))
898 		goto out;
899 
900 	/* Log might have been flushed while we waited for the flush lock */
901 	if (gl && !test_bit(GLF_LFLUSH, &gl->gl_flags)) {
902 		up_write(&sdp->sd_log_flush_lock);
903 		return;
904 	}
905 	trace_gfs2_log_flush(sdp, 1, flags);
906 
907 	if (flags & GFS2_LOG_HEAD_FLUSH_SHUTDOWN)
908 		clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
909 
910 	sdp->sd_log_flush_head = sdp->sd_log_head;
911 	tr = sdp->sd_log_tr;
912 	if (tr) {
913 		sdp->sd_log_tr = NULL;
914 		INIT_LIST_HEAD(&tr->tr_ail1_list);
915 		INIT_LIST_HEAD(&tr->tr_ail2_list);
916 		tr->tr_first = sdp->sd_log_flush_head;
917 		if (unlikely (state == SFS_FROZEN))
918 			if (gfs2_assert_withdraw_delayed(sdp,
919 			       !tr->tr_num_buf_new && !tr->tr_num_databuf_new))
920 				goto out;
921 	}
922 
923 	if (unlikely(state == SFS_FROZEN))
924 		if (gfs2_assert_withdraw_delayed(sdp, !sdp->sd_log_num_revoke))
925 			goto out;
926 	if (gfs2_assert_withdraw_delayed(sdp,
927 			sdp->sd_log_num_revoke == sdp->sd_log_committed_revoke))
928 		goto out;
929 
930 	gfs2_ordered_write(sdp);
931 	if (gfs2_withdrawn(sdp))
932 		goto out;
933 	lops_before_commit(sdp, tr);
934 	if (gfs2_withdrawn(sdp))
935 		goto out;
936 	gfs2_log_submit_bio(&sdp->sd_log_bio, REQ_OP_WRITE);
937 	if (gfs2_withdrawn(sdp))
938 		goto out;
939 
940 	if (sdp->sd_log_head != sdp->sd_log_flush_head) {
941 		log_flush_wait(sdp);
942 		log_write_header(sdp, flags);
943 	} else if (sdp->sd_log_tail != current_tail(sdp) && !sdp->sd_log_idle){
944 		atomic_dec(&sdp->sd_log_blks_free); /* Adjust for unreserved buffer */
945 		trace_gfs2_log_blocks(sdp, -1);
946 		log_write_header(sdp, flags);
947 	}
948 	if (gfs2_withdrawn(sdp))
949 		goto out;
950 	lops_after_commit(sdp, tr);
951 
952 	gfs2_log_lock(sdp);
953 	sdp->sd_log_head = sdp->sd_log_flush_head;
954 	sdp->sd_log_blks_reserved = 0;
955 	sdp->sd_log_committed_revoke = 0;
956 
957 	spin_lock(&sdp->sd_ail_lock);
958 	if (tr && !list_empty(&tr->tr_ail1_list)) {
959 		list_add(&tr->tr_list, &sdp->sd_ail1_list);
960 		tr = NULL;
961 	}
962 	spin_unlock(&sdp->sd_ail_lock);
963 	gfs2_log_unlock(sdp);
964 
965 	if (!(flags & GFS2_LOG_HEAD_FLUSH_NORMAL)) {
966 		if (!sdp->sd_log_idle) {
967 			for (;;) {
968 				gfs2_ail1_start(sdp);
969 				gfs2_ail1_wait(sdp);
970 				if (gfs2_ail1_empty(sdp, 0))
971 					break;
972 			}
973 			if (gfs2_withdrawn(sdp))
974 				goto out;
975 			atomic_dec(&sdp->sd_log_blks_free); /* Adjust for unreserved buffer */
976 			trace_gfs2_log_blocks(sdp, -1);
977 			log_write_header(sdp, flags);
978 			sdp->sd_log_head = sdp->sd_log_flush_head;
979 		}
980 		if (flags & (GFS2_LOG_HEAD_FLUSH_SHUTDOWN |
981 			     GFS2_LOG_HEAD_FLUSH_FREEZE))
982 			gfs2_log_shutdown(sdp);
983 		if (flags & GFS2_LOG_HEAD_FLUSH_FREEZE)
984 			atomic_set(&sdp->sd_freeze_state, SFS_FROZEN);
985 	}
986 
987 out:
988 	if (gfs2_withdrawn(sdp)) {
989 		ail_drain(sdp); /* frees all transactions */
990 		tr = NULL;
991 	}
992 
993 	trace_gfs2_log_flush(sdp, 0, flags);
994 	up_write(&sdp->sd_log_flush_lock);
995 
996 	kfree(tr);
997 }
998 
999 /**
1000  * gfs2_merge_trans - Merge a new transaction into a cached transaction
1001  * @old: Original transaction to be expanded
1002  * @new: New transaction to be merged
1003  */
1004 
1005 static void gfs2_merge_trans(struct gfs2_trans *old, struct gfs2_trans *new)
1006 {
1007 	WARN_ON_ONCE(!test_bit(TR_ATTACHED, &old->tr_flags));
1008 
1009 	old->tr_num_buf_new	+= new->tr_num_buf_new;
1010 	old->tr_num_databuf_new	+= new->tr_num_databuf_new;
1011 	old->tr_num_buf_rm	+= new->tr_num_buf_rm;
1012 	old->tr_num_databuf_rm	+= new->tr_num_databuf_rm;
1013 	old->tr_num_revoke	+= new->tr_num_revoke;
1014 	old->tr_num_revoke_rm	+= new->tr_num_revoke_rm;
1015 
1016 	list_splice_tail_init(&new->tr_databuf, &old->tr_databuf);
1017 	list_splice_tail_init(&new->tr_buf, &old->tr_buf);
1018 }
1019 
1020 static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
1021 {
1022 	unsigned int reserved;
1023 	unsigned int unused;
1024 	unsigned int maxres;
1025 
1026 	gfs2_log_lock(sdp);
1027 
1028 	if (sdp->sd_log_tr) {
1029 		gfs2_merge_trans(sdp->sd_log_tr, tr);
1030 	} else if (tr->tr_num_buf_new || tr->tr_num_databuf_new) {
1031 		gfs2_assert_withdraw(sdp, test_bit(TR_ALLOCED, &tr->tr_flags));
1032 		sdp->sd_log_tr = tr;
1033 		set_bit(TR_ATTACHED, &tr->tr_flags);
1034 	}
1035 
1036 	sdp->sd_log_committed_revoke += tr->tr_num_revoke - tr->tr_num_revoke_rm;
1037 	reserved = calc_reserved(sdp);
1038 	maxres = sdp->sd_log_blks_reserved + tr->tr_reserved;
1039 	gfs2_assert_withdraw(sdp, maxres >= reserved);
1040 	unused = maxres - reserved;
1041 	atomic_add(unused, &sdp->sd_log_blks_free);
1042 	trace_gfs2_log_blocks(sdp, unused);
1043 	gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
1044 			     sdp->sd_jdesc->jd_blocks);
1045 	sdp->sd_log_blks_reserved = reserved;
1046 
1047 	gfs2_log_unlock(sdp);
1048 }
1049 
1050 /**
1051  * gfs2_log_commit - Commit a transaction to the log
1052  * @sdp: the filesystem
1053  * @tr: the transaction
1054  *
1055  * We wake up gfs2_logd if the number of pinned blocks exceed thresh1
1056  * or the total number of used blocks (pinned blocks plus AIL blocks)
1057  * is greater than thresh2.
1058  *
1059  * At mount time thresh1 is 1/3rd of journal size, thresh2 is 2/3rd of
1060  * journal size.
1061  *
1062  * Returns: errno
1063  */
1064 
1065 void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
1066 {
1067 	log_refund(sdp, tr);
1068 
1069 	if (atomic_read(&sdp->sd_log_pinned) > atomic_read(&sdp->sd_log_thresh1) ||
1070 	    ((sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free)) >
1071 	    atomic_read(&sdp->sd_log_thresh2)))
1072 		wake_up(&sdp->sd_logd_waitq);
1073 }
1074 
1075 /**
1076  * gfs2_log_shutdown - write a shutdown header into a journal
1077  * @sdp: the filesystem
1078  *
1079  */
1080 
1081 static void gfs2_log_shutdown(struct gfs2_sbd *sdp)
1082 {
1083 	gfs2_assert_withdraw(sdp, !sdp->sd_log_blks_reserved);
1084 	gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
1085 	gfs2_assert_withdraw(sdp, list_empty(&sdp->sd_ail1_list));
1086 
1087 	sdp->sd_log_flush_head = sdp->sd_log_head;
1088 
1089 	log_write_header(sdp, GFS2_LOG_HEAD_UNMOUNT | GFS2_LFC_SHUTDOWN);
1090 
1091 	gfs2_assert_warn(sdp, sdp->sd_log_head == sdp->sd_log_tail);
1092 	gfs2_assert_warn(sdp, list_empty(&sdp->sd_ail2_list));
1093 
1094 	sdp->sd_log_head = sdp->sd_log_flush_head;
1095 	sdp->sd_log_tail = sdp->sd_log_head;
1096 }
1097 
1098 static inline int gfs2_jrnl_flush_reqd(struct gfs2_sbd *sdp)
1099 {
1100 	return (atomic_read(&sdp->sd_log_pinned) +
1101 		atomic_read(&sdp->sd_log_blks_needed) >=
1102 		atomic_read(&sdp->sd_log_thresh1));
1103 }
1104 
1105 static inline int gfs2_ail_flush_reqd(struct gfs2_sbd *sdp)
1106 {
1107 	unsigned int used_blocks = sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free);
1108 
1109 	if (test_and_clear_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags))
1110 		return 1;
1111 
1112 	return used_blocks + atomic_read(&sdp->sd_log_blks_needed) >=
1113 		atomic_read(&sdp->sd_log_thresh2);
1114 }
1115 
1116 /**
1117  * gfs2_logd - Update log tail as Active Items get flushed to in-place blocks
1118  * @sdp: Pointer to GFS2 superblock
1119  *
1120  * Also, periodically check to make sure that we're using the most recent
1121  * journal index.
1122  */
1123 
1124 int gfs2_logd(void *data)
1125 {
1126 	struct gfs2_sbd *sdp = data;
1127 	unsigned long t = 1;
1128 	DEFINE_WAIT(wait);
1129 	bool did_flush;
1130 
1131 	while (!kthread_should_stop()) {
1132 
1133 		/* Check for errors writing to the journal */
1134 		if (sdp->sd_log_error) {
1135 			gfs2_lm(sdp,
1136 				"GFS2: fsid=%s: error %d: "
1137 				"withdrawing the file system to "
1138 				"prevent further damage.\n",
1139 				sdp->sd_fsname, sdp->sd_log_error);
1140 			gfs2_withdraw(sdp);
1141 		}
1142 
1143 		did_flush = false;
1144 		if (gfs2_jrnl_flush_reqd(sdp) || t == 0) {
1145 			gfs2_ail1_empty(sdp, 0);
1146 			gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
1147 				       GFS2_LFC_LOGD_JFLUSH_REQD);
1148 			did_flush = true;
1149 		}
1150 
1151 		if (gfs2_ail_flush_reqd(sdp)) {
1152 			gfs2_ail1_start(sdp);
1153 			gfs2_ail1_wait(sdp);
1154 			gfs2_ail1_empty(sdp, 0);
1155 			gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
1156 				       GFS2_LFC_LOGD_AIL_FLUSH_REQD);
1157 			did_flush = true;
1158 		}
1159 
1160 		if (!gfs2_ail_flush_reqd(sdp) || did_flush)
1161 			wake_up(&sdp->sd_log_waitq);
1162 
1163 		t = gfs2_tune_get(sdp, gt_logd_secs) * HZ;
1164 
1165 		try_to_freeze();
1166 
1167 		do {
1168 			prepare_to_wait(&sdp->sd_logd_waitq, &wait,
1169 					TASK_INTERRUPTIBLE);
1170 			if (!gfs2_ail_flush_reqd(sdp) &&
1171 			    !gfs2_jrnl_flush_reqd(sdp) &&
1172 			    !kthread_should_stop())
1173 				t = schedule_timeout(t);
1174 		} while(t && !gfs2_ail_flush_reqd(sdp) &&
1175 			!gfs2_jrnl_flush_reqd(sdp) &&
1176 			!kthread_should_stop());
1177 		finish_wait(&sdp->sd_logd_waitq, &wait);
1178 	}
1179 
1180 	return 0;
1181 }
1182 
1183