xref: /openbmc/linux/fs/gfs2/log.c (revision 581701b7efd60ba13d8a7eed60cbdd7fefaf6696)
1  // SPDX-License-Identifier: GPL-2.0-only
2  /*
3   * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
4   * Copyright (C) 2004-2007 Red Hat, Inc.  All rights reserved.
5   */
6  
7  #include <linux/sched.h>
8  #include <linux/slab.h>
9  #include <linux/spinlock.h>
10  #include <linux/completion.h>
11  #include <linux/buffer_head.h>
12  #include <linux/gfs2_ondisk.h>
13  #include <linux/crc32.h>
14  #include <linux/crc32c.h>
15  #include <linux/delay.h>
16  #include <linux/kthread.h>
17  #include <linux/freezer.h>
18  #include <linux/bio.h>
19  #include <linux/blkdev.h>
20  #include <linux/writeback.h>
21  #include <linux/list_sort.h>
22  
23  #include "gfs2.h"
24  #include "incore.h"
25  #include "bmap.h"
26  #include "glock.h"
27  #include "log.h"
28  #include "lops.h"
29  #include "meta_io.h"
30  #include "util.h"
31  #include "dir.h"
32  #include "trace_gfs2.h"
33  
34  static void gfs2_log_shutdown(struct gfs2_sbd *sdp);
35  
36  /**
37   * gfs2_struct2blk - compute stuff
38   * @sdp: the filesystem
39   * @nstruct: the number of structures
40   *
41   * Compute the number of log descriptor blocks needed to hold a certain number
42   * of structures of a certain size.
43   *
44   * Returns: the number of blocks needed (minimum is always 1)
45   */
46  
47  unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct)
48  {
49  	unsigned int blks;
50  	unsigned int first, second;
51  
52  	blks = 1;
53  	first = sdp->sd_ldptrs;
54  
55  	if (nstruct > first) {
56  		second = sdp->sd_inptrs;
57  		blks += DIV_ROUND_UP(nstruct - first, second);
58  	}
59  
60  	return blks;
61  }
62  
63  /**
64   * gfs2_remove_from_ail - Remove an entry from the ail lists, updating counters
65   * @mapping: The associated mapping (maybe NULL)
66   * @bd: The gfs2_bufdata to remove
67   *
68   * The ail lock _must_ be held when calling this function
69   *
70   */
71  
72  static void gfs2_remove_from_ail(struct gfs2_bufdata *bd)
73  {
74  	bd->bd_tr = NULL;
75  	list_del_init(&bd->bd_ail_st_list);
76  	list_del_init(&bd->bd_ail_gl_list);
77  	atomic_dec(&bd->bd_gl->gl_ail_count);
78  	brelse(bd->bd_bh);
79  }
80  
81  /**
82   * gfs2_ail1_start_one - Start I/O on a part of the AIL
83   * @sdp: the filesystem
84   * @wbc: The writeback control structure
85   * @ai: The ail structure
86   *
87   */
88  
89  static int gfs2_ail1_start_one(struct gfs2_sbd *sdp,
90  			       struct writeback_control *wbc,
91  			       struct gfs2_trans *tr)
92  __releases(&sdp->sd_ail_lock)
93  __acquires(&sdp->sd_ail_lock)
94  {
95  	struct gfs2_glock *gl = NULL;
96  	struct address_space *mapping;
97  	struct gfs2_bufdata *bd, *s;
98  	struct buffer_head *bh;
99  	int ret = 0;
100  
101  	list_for_each_entry_safe_reverse(bd, s, &tr->tr_ail1_list, bd_ail_st_list) {
102  		bh = bd->bd_bh;
103  
104  		gfs2_assert(sdp, bd->bd_tr == tr);
105  
106  		if (!buffer_busy(bh)) {
107  			if (buffer_uptodate(bh)) {
108  				list_move(&bd->bd_ail_st_list,
109  					  &tr->tr_ail2_list);
110  				continue;
111  			}
112  			if (!cmpxchg(&sdp->sd_log_error, 0, -EIO)) {
113  				gfs2_io_error_bh(sdp, bh);
114  				gfs2_withdraw_delayed(sdp);
115  			}
116  		}
117  
118  		if (gfs2_withdrawn(sdp)) {
119  			gfs2_remove_from_ail(bd);
120  			continue;
121  		}
122  		if (!buffer_dirty(bh))
123  			continue;
124  		if (gl == bd->bd_gl)
125  			continue;
126  		gl = bd->bd_gl;
127  		list_move(&bd->bd_ail_st_list, &tr->tr_ail1_list);
128  		mapping = bh->b_page->mapping;
129  		if (!mapping)
130  			continue;
131  		spin_unlock(&sdp->sd_ail_lock);
132  		ret = generic_writepages(mapping, wbc);
133  		spin_lock(&sdp->sd_ail_lock);
134  		if (ret || wbc->nr_to_write <= 0)
135  			break;
136  		return -EBUSY;
137  	}
138  
139  	return ret;
140  }
141  
142  static void dump_ail_list(struct gfs2_sbd *sdp)
143  {
144  	struct gfs2_trans *tr;
145  	struct gfs2_bufdata *bd;
146  	struct buffer_head *bh;
147  
148  	fs_err(sdp, "Error: In gfs2_ail1_flush for ten minutes! t=%d\n",
149  	       current->journal_info ? 1 : 0);
150  
151  	list_for_each_entry_reverse(tr, &sdp->sd_ail1_list, tr_list) {
152  		list_for_each_entry_reverse(bd, &tr->tr_ail1_list,
153  					    bd_ail_st_list) {
154  			bh = bd->bd_bh;
155  			fs_err(sdp, "bd %p: blk:0x%llx bh=%p ", bd,
156  			       (unsigned long long)bd->bd_blkno, bh);
157  			if (!bh) {
158  				fs_err(sdp, "\n");
159  				continue;
160  			}
161  			fs_err(sdp, "0x%llx up2:%d dirt:%d lkd:%d req:%d "
162  			       "map:%d new:%d ar:%d aw:%d delay:%d "
163  			       "io err:%d unwritten:%d dfr:%d pin:%d esc:%d\n",
164  			       (unsigned long long)bh->b_blocknr,
165  			       buffer_uptodate(bh), buffer_dirty(bh),
166  			       buffer_locked(bh), buffer_req(bh),
167  			       buffer_mapped(bh), buffer_new(bh),
168  			       buffer_async_read(bh), buffer_async_write(bh),
169  			       buffer_delay(bh), buffer_write_io_error(bh),
170  			       buffer_unwritten(bh),
171  			       buffer_defer_completion(bh),
172  			       buffer_pinned(bh), buffer_escaped(bh));
173  		}
174  	}
175  }
176  
177  /**
178   * gfs2_ail1_flush - start writeback of some ail1 entries
179   * @sdp: The super block
180   * @wbc: The writeback control structure
181   *
182   * Writes back some ail1 entries, according to the limits in the
183   * writeback control structure
184   */
185  
186  void gfs2_ail1_flush(struct gfs2_sbd *sdp, struct writeback_control *wbc)
187  {
188  	struct list_head *head = &sdp->sd_ail1_list;
189  	struct gfs2_trans *tr;
190  	struct blk_plug plug;
191  	int ret;
192  	unsigned long flush_start = jiffies;
193  
194  	trace_gfs2_ail_flush(sdp, wbc, 1);
195  	blk_start_plug(&plug);
196  	spin_lock(&sdp->sd_ail_lock);
197  restart:
198  	ret = 0;
199  	if (time_after(jiffies, flush_start + (HZ * 600))) {
200  		dump_ail_list(sdp);
201  		goto out;
202  	}
203  	list_for_each_entry_reverse(tr, head, tr_list) {
204  		if (wbc->nr_to_write <= 0)
205  			break;
206  		ret = gfs2_ail1_start_one(sdp, wbc, tr);
207  		if (ret) {
208  			if (ret == -EBUSY)
209  				goto restart;
210  			break;
211  		}
212  	}
213  out:
214  	spin_unlock(&sdp->sd_ail_lock);
215  	blk_finish_plug(&plug);
216  	if (ret) {
217  		gfs2_lm(sdp, "gfs2_ail1_start_one (generic_writepages) "
218  			"returned: %d\n", ret);
219  		gfs2_withdraw(sdp);
220  	}
221  	trace_gfs2_ail_flush(sdp, wbc, 0);
222  }
223  
224  /**
225   * gfs2_ail1_start - start writeback of all ail1 entries
226   * @sdp: The superblock
227   */
228  
229  static void gfs2_ail1_start(struct gfs2_sbd *sdp)
230  {
231  	struct writeback_control wbc = {
232  		.sync_mode = WB_SYNC_NONE,
233  		.nr_to_write = LONG_MAX,
234  		.range_start = 0,
235  		.range_end = LLONG_MAX,
236  	};
237  
238  	return gfs2_ail1_flush(sdp, &wbc);
239  }
240  
241  /**
242   * gfs2_ail1_empty_one - Check whether or not a trans in the AIL has been synced
243   * @sdp: the filesystem
244   * @tr: the transaction
245   * @max_revokes: If nonzero, issue revokes for the bd items for written buffers
246   *
247   */
248  
249  static void gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
250  				int *max_revokes)
251  {
252  	struct gfs2_bufdata *bd, *s;
253  	struct buffer_head *bh;
254  
255  	list_for_each_entry_safe_reverse(bd, s, &tr->tr_ail1_list,
256  					 bd_ail_st_list) {
257  		bh = bd->bd_bh;
258  		gfs2_assert(sdp, bd->bd_tr == tr);
259  		/*
260  		 * If another process flagged an io error, e.g. writing to the
261  		 * journal, error all other bhs and move them off the ail1 to
262  		 * prevent a tight loop when unmount tries to flush ail1,
263  		 * regardless of whether they're still busy. If no outside
264  		 * errors were found and the buffer is busy, move to the next.
265  		 * If the ail buffer is not busy and caught an error, flag it
266  		 * for others.
267  		 */
268  		if (!sdp->sd_log_error && buffer_busy(bh))
269  			continue;
270  		if (!buffer_uptodate(bh) &&
271  		    !cmpxchg(&sdp->sd_log_error, 0, -EIO)) {
272  			gfs2_io_error_bh(sdp, bh);
273  			gfs2_withdraw_delayed(sdp);
274  		}
275  		/*
276  		 * If we have space for revokes and the bd is no longer on any
277  		 * buf list, we can just add a revoke for it immediately and
278  		 * avoid having to put it on the ail2 list, where it would need
279  		 * to be revoked later.
280  		 */
281  		if (*max_revokes && list_empty(&bd->bd_list)) {
282  			gfs2_add_revoke(sdp, bd);
283  			(*max_revokes)--;
284  			continue;
285  		}
286  		list_move(&bd->bd_ail_st_list, &tr->tr_ail2_list);
287  	}
288  }
289  
290  /**
291   * gfs2_ail1_empty - Try to empty the ail1 lists
292   * @sdp: The superblock
293   * @max_revokes: If non-zero, add revokes where appropriate
294   *
295   * Tries to empty the ail1 lists, starting with the oldest first
296   */
297  
298  static int gfs2_ail1_empty(struct gfs2_sbd *sdp, int max_revokes)
299  {
300  	struct gfs2_trans *tr, *s;
301  	int oldest_tr = 1;
302  	int ret;
303  
304  	spin_lock(&sdp->sd_ail_lock);
305  	list_for_each_entry_safe_reverse(tr, s, &sdp->sd_ail1_list, tr_list) {
306  		gfs2_ail1_empty_one(sdp, tr, &max_revokes);
307  		if (list_empty(&tr->tr_ail1_list) && oldest_tr)
308  			list_move(&tr->tr_list, &sdp->sd_ail2_list);
309  		else
310  			oldest_tr = 0;
311  	}
312  	ret = list_empty(&sdp->sd_ail1_list);
313  	spin_unlock(&sdp->sd_ail_lock);
314  
315  	if (test_bit(SDF_WITHDRAWING, &sdp->sd_flags)) {
316  		gfs2_lm(sdp, "fatal: I/O error(s)\n");
317  		gfs2_withdraw(sdp);
318  	}
319  
320  	return ret;
321  }
322  
323  static void gfs2_ail1_wait(struct gfs2_sbd *sdp)
324  {
325  	struct gfs2_trans *tr;
326  	struct gfs2_bufdata *bd;
327  	struct buffer_head *bh;
328  
329  	spin_lock(&sdp->sd_ail_lock);
330  	list_for_each_entry_reverse(tr, &sdp->sd_ail1_list, tr_list) {
331  		list_for_each_entry(bd, &tr->tr_ail1_list, bd_ail_st_list) {
332  			bh = bd->bd_bh;
333  			if (!buffer_locked(bh))
334  				continue;
335  			get_bh(bh);
336  			spin_unlock(&sdp->sd_ail_lock);
337  			wait_on_buffer(bh);
338  			brelse(bh);
339  			return;
340  		}
341  	}
342  	spin_unlock(&sdp->sd_ail_lock);
343  }
344  
345  /**
346   * gfs2_ail_empty_tr - empty one of the ail lists for a transaction
347   */
348  
349  static void gfs2_ail_empty_tr(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
350  			      struct list_head *head)
351  {
352  	struct gfs2_bufdata *bd;
353  
354  	while (!list_empty(head)) {
355  		bd = list_first_entry(head, struct gfs2_bufdata,
356  				      bd_ail_st_list);
357  		gfs2_assert(sdp, bd->bd_tr == tr);
358  		gfs2_remove_from_ail(bd);
359  	}
360  }
361  
362  static void ail2_empty(struct gfs2_sbd *sdp, unsigned int new_tail)
363  {
364  	struct gfs2_trans *tr, *safe;
365  	unsigned int old_tail = sdp->sd_log_tail;
366  	int wrap = (new_tail < old_tail);
367  	int a, b, rm;
368  
369  	spin_lock(&sdp->sd_ail_lock);
370  
371  	list_for_each_entry_safe(tr, safe, &sdp->sd_ail2_list, tr_list) {
372  		a = (old_tail <= tr->tr_first);
373  		b = (tr->tr_first < new_tail);
374  		rm = (wrap) ? (a || b) : (a && b);
375  		if (!rm)
376  			continue;
377  
378  		gfs2_ail_empty_tr(sdp, tr, &tr->tr_ail2_list);
379  		list_del(&tr->tr_list);
380  		gfs2_assert_warn(sdp, list_empty(&tr->tr_ail1_list));
381  		gfs2_assert_warn(sdp, list_empty(&tr->tr_ail2_list));
382  		kfree(tr);
383  	}
384  
385  	spin_unlock(&sdp->sd_ail_lock);
386  }
387  
388  /**
389   * gfs2_log_release - Release a given number of log blocks
390   * @sdp: The GFS2 superblock
391   * @blks: The number of blocks
392   *
393   */
394  
395  void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks)
396  {
397  
398  	atomic_add(blks, &sdp->sd_log_blks_free);
399  	trace_gfs2_log_blocks(sdp, blks);
400  	gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
401  				  sdp->sd_jdesc->jd_blocks);
402  	up_read(&sdp->sd_log_flush_lock);
403  }
404  
405  /**
406   * gfs2_log_reserve - Make a log reservation
407   * @sdp: The GFS2 superblock
408   * @blks: The number of blocks to reserve
409   *
410   * Note that we never give out the last few blocks of the journal. Thats
411   * due to the fact that there is a small number of header blocks
412   * associated with each log flush. The exact number can't be known until
413   * flush time, so we ensure that we have just enough free blocks at all
414   * times to avoid running out during a log flush.
415   *
416   * We no longer flush the log here, instead we wake up logd to do that
417   * for us. To avoid the thundering herd and to ensure that we deal fairly
418   * with queued waiters, we use an exclusive wait. This means that when we
419   * get woken with enough journal space to get our reservation, we need to
420   * wake the next waiter on the list.
421   *
422   * Returns: errno
423   */
424  
425  int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks)
426  {
427  	int ret = 0;
428  	unsigned reserved_blks = 7 * (4096 / sdp->sd_vfs->s_blocksize);
429  	unsigned wanted = blks + reserved_blks;
430  	DEFINE_WAIT(wait);
431  	int did_wait = 0;
432  	unsigned int free_blocks;
433  
434  	if (gfs2_assert_warn(sdp, blks) ||
435  	    gfs2_assert_warn(sdp, blks <= sdp->sd_jdesc->jd_blocks))
436  		return -EINVAL;
437  	atomic_add(blks, &sdp->sd_log_blks_needed);
438  retry:
439  	free_blocks = atomic_read(&sdp->sd_log_blks_free);
440  	if (unlikely(free_blocks <= wanted)) {
441  		do {
442  			prepare_to_wait_exclusive(&sdp->sd_log_waitq, &wait,
443  					TASK_UNINTERRUPTIBLE);
444  			wake_up(&sdp->sd_logd_waitq);
445  			did_wait = 1;
446  			if (atomic_read(&sdp->sd_log_blks_free) <= wanted)
447  				io_schedule();
448  			free_blocks = atomic_read(&sdp->sd_log_blks_free);
449  		} while(free_blocks <= wanted);
450  		finish_wait(&sdp->sd_log_waitq, &wait);
451  	}
452  	atomic_inc(&sdp->sd_reserving_log);
453  	if (atomic_cmpxchg(&sdp->sd_log_blks_free, free_blocks,
454  				free_blocks - blks) != free_blocks) {
455  		if (atomic_dec_and_test(&sdp->sd_reserving_log))
456  			wake_up(&sdp->sd_reserving_log_wait);
457  		goto retry;
458  	}
459  	atomic_sub(blks, &sdp->sd_log_blks_needed);
460  	trace_gfs2_log_blocks(sdp, -blks);
461  
462  	/*
463  	 * If we waited, then so might others, wake them up _after_ we get
464  	 * our share of the log.
465  	 */
466  	if (unlikely(did_wait))
467  		wake_up(&sdp->sd_log_waitq);
468  
469  	down_read(&sdp->sd_log_flush_lock);
470  	if (unlikely(!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))) {
471  		gfs2_log_release(sdp, blks);
472  		ret = -EROFS;
473  	}
474  	if (atomic_dec_and_test(&sdp->sd_reserving_log))
475  		wake_up(&sdp->sd_reserving_log_wait);
476  	return ret;
477  }
478  
479  /**
480   * log_distance - Compute distance between two journal blocks
481   * @sdp: The GFS2 superblock
482   * @newer: The most recent journal block of the pair
483   * @older: The older journal block of the pair
484   *
485   *   Compute the distance (in the journal direction) between two
486   *   blocks in the journal
487   *
488   * Returns: the distance in blocks
489   */
490  
491  static inline unsigned int log_distance(struct gfs2_sbd *sdp, unsigned int newer,
492  					unsigned int older)
493  {
494  	int dist;
495  
496  	dist = newer - older;
497  	if (dist < 0)
498  		dist += sdp->sd_jdesc->jd_blocks;
499  
500  	return dist;
501  }
502  
503  /**
504   * calc_reserved - Calculate the number of blocks to reserve when
505   *                 refunding a transaction's unused buffers.
506   * @sdp: The GFS2 superblock
507   *
508   * This is complex.  We need to reserve room for all our currently used
509   * metadata buffers (e.g. normal file I/O rewriting file time stamps) and
510   * all our journaled data buffers for journaled files (e.g. files in the
511   * meta_fs like rindex, or files for which chattr +j was done.)
512   * If we don't reserve enough space, gfs2_log_refund and gfs2_log_flush
513   * will count it as free space (sd_log_blks_free) and corruption will follow.
514   *
515   * We can have metadata bufs and jdata bufs in the same journal.  So each
516   * type gets its own log header, for which we need to reserve a block.
517   * In fact, each type has the potential for needing more than one header
518   * in cases where we have more buffers than will fit on a journal page.
519   * Metadata journal entries take up half the space of journaled buffer entries.
520   * Thus, metadata entries have buf_limit (502) and journaled buffers have
521   * databuf_limit (251) before they cause a wrap around.
522   *
523   * Also, we need to reserve blocks for revoke journal entries and one for an
524   * overall header for the lot.
525   *
526   * Returns: the number of blocks reserved
527   */
528  static unsigned int calc_reserved(struct gfs2_sbd *sdp)
529  {
530  	unsigned int reserved = 0;
531  	unsigned int mbuf;
532  	unsigned int dbuf;
533  	struct gfs2_trans *tr = sdp->sd_log_tr;
534  
535  	if (tr) {
536  		mbuf = tr->tr_num_buf_new - tr->tr_num_buf_rm;
537  		dbuf = tr->tr_num_databuf_new - tr->tr_num_databuf_rm;
538  		reserved = mbuf + dbuf;
539  		/* Account for header blocks */
540  		reserved += DIV_ROUND_UP(mbuf, buf_limit(sdp));
541  		reserved += DIV_ROUND_UP(dbuf, databuf_limit(sdp));
542  	}
543  
544  	if (sdp->sd_log_committed_revoke > 0)
545  		reserved += gfs2_struct2blk(sdp, sdp->sd_log_committed_revoke);
546  	/* One for the overall header */
547  	if (reserved)
548  		reserved++;
549  	return reserved;
550  }
551  
552  static unsigned int current_tail(struct gfs2_sbd *sdp)
553  {
554  	struct gfs2_trans *tr;
555  	unsigned int tail;
556  
557  	spin_lock(&sdp->sd_ail_lock);
558  
559  	if (list_empty(&sdp->sd_ail1_list)) {
560  		tail = sdp->sd_log_head;
561  	} else {
562  		tr = list_last_entry(&sdp->sd_ail1_list, struct gfs2_trans,
563  				tr_list);
564  		tail = tr->tr_first;
565  	}
566  
567  	spin_unlock(&sdp->sd_ail_lock);
568  
569  	return tail;
570  }
571  
572  static void log_pull_tail(struct gfs2_sbd *sdp, unsigned int new_tail)
573  {
574  	unsigned int dist = log_distance(sdp, new_tail, sdp->sd_log_tail);
575  
576  	ail2_empty(sdp, new_tail);
577  
578  	atomic_add(dist, &sdp->sd_log_blks_free);
579  	trace_gfs2_log_blocks(sdp, dist);
580  	gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
581  			     sdp->sd_jdesc->jd_blocks);
582  
583  	sdp->sd_log_tail = new_tail;
584  }
585  
586  
587  void log_flush_wait(struct gfs2_sbd *sdp)
588  {
589  	DEFINE_WAIT(wait);
590  
591  	if (atomic_read(&sdp->sd_log_in_flight)) {
592  		do {
593  			prepare_to_wait(&sdp->sd_log_flush_wait, &wait,
594  					TASK_UNINTERRUPTIBLE);
595  			if (atomic_read(&sdp->sd_log_in_flight))
596  				io_schedule();
597  		} while(atomic_read(&sdp->sd_log_in_flight));
598  		finish_wait(&sdp->sd_log_flush_wait, &wait);
599  	}
600  }
601  
602  static int ip_cmp(void *priv, struct list_head *a, struct list_head *b)
603  {
604  	struct gfs2_inode *ipa, *ipb;
605  
606  	ipa = list_entry(a, struct gfs2_inode, i_ordered);
607  	ipb = list_entry(b, struct gfs2_inode, i_ordered);
608  
609  	if (ipa->i_no_addr < ipb->i_no_addr)
610  		return -1;
611  	if (ipa->i_no_addr > ipb->i_no_addr)
612  		return 1;
613  	return 0;
614  }
615  
616  static void gfs2_ordered_write(struct gfs2_sbd *sdp)
617  {
618  	struct gfs2_inode *ip;
619  	LIST_HEAD(written);
620  
621  	spin_lock(&sdp->sd_ordered_lock);
622  	list_sort(NULL, &sdp->sd_log_ordered, &ip_cmp);
623  	while (!list_empty(&sdp->sd_log_ordered)) {
624  		ip = list_first_entry(&sdp->sd_log_ordered, struct gfs2_inode, i_ordered);
625  		if (ip->i_inode.i_mapping->nrpages == 0) {
626  			test_and_clear_bit(GIF_ORDERED, &ip->i_flags);
627  			list_del(&ip->i_ordered);
628  			continue;
629  		}
630  		list_move(&ip->i_ordered, &written);
631  		spin_unlock(&sdp->sd_ordered_lock);
632  		filemap_fdatawrite(ip->i_inode.i_mapping);
633  		spin_lock(&sdp->sd_ordered_lock);
634  	}
635  	list_splice(&written, &sdp->sd_log_ordered);
636  	spin_unlock(&sdp->sd_ordered_lock);
637  }
638  
639  static void gfs2_ordered_wait(struct gfs2_sbd *sdp)
640  {
641  	struct gfs2_inode *ip;
642  
643  	spin_lock(&sdp->sd_ordered_lock);
644  	while (!list_empty(&sdp->sd_log_ordered)) {
645  		ip = list_first_entry(&sdp->sd_log_ordered, struct gfs2_inode, i_ordered);
646  		list_del(&ip->i_ordered);
647  		WARN_ON(!test_and_clear_bit(GIF_ORDERED, &ip->i_flags));
648  		if (ip->i_inode.i_mapping->nrpages == 0)
649  			continue;
650  		spin_unlock(&sdp->sd_ordered_lock);
651  		filemap_fdatawait(ip->i_inode.i_mapping);
652  		spin_lock(&sdp->sd_ordered_lock);
653  	}
654  	spin_unlock(&sdp->sd_ordered_lock);
655  }
656  
657  void gfs2_ordered_del_inode(struct gfs2_inode *ip)
658  {
659  	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
660  
661  	spin_lock(&sdp->sd_ordered_lock);
662  	if (test_and_clear_bit(GIF_ORDERED, &ip->i_flags))
663  		list_del(&ip->i_ordered);
664  	spin_unlock(&sdp->sd_ordered_lock);
665  }
666  
667  void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
668  {
669  	struct buffer_head *bh = bd->bd_bh;
670  	struct gfs2_glock *gl = bd->bd_gl;
671  
672  	bh->b_private = NULL;
673  	bd->bd_blkno = bh->b_blocknr;
674  	gfs2_remove_from_ail(bd); /* drops ref on bh */
675  	bd->bd_bh = NULL;
676  	sdp->sd_log_num_revoke++;
677  	if (atomic_inc_return(&gl->gl_revokes) == 1)
678  		gfs2_glock_hold(gl);
679  	set_bit(GLF_LFLUSH, &gl->gl_flags);
680  	list_add(&bd->bd_list, &sdp->sd_log_revokes);
681  }
682  
683  void gfs2_glock_remove_revoke(struct gfs2_glock *gl)
684  {
685  	if (atomic_dec_return(&gl->gl_revokes) == 0) {
686  		clear_bit(GLF_LFLUSH, &gl->gl_flags);
687  		gfs2_glock_queue_put(gl);
688  	}
689  }
690  
691  /**
692   * gfs2_write_revokes - Add as many revokes to the system transaction as we can
693   * @sdp: The GFS2 superblock
694   *
695   * Our usual strategy is to defer writing revokes as much as we can in the hope
696   * that we'll eventually overwrite the journal, which will make those revokes
697   * go away.  This changes when we flush the log: at that point, there will
698   * likely be some left-over space in the last revoke block of that transaction.
699   * We can fill that space with additional revokes for blocks that have already
700   * been written back.  This will basically come at no cost now, and will save
701   * us from having to keep track of those blocks on the AIL2 list later.
702   */
703  void gfs2_write_revokes(struct gfs2_sbd *sdp)
704  {
705  	/* number of revokes we still have room for */
706  	int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64);
707  
708  	gfs2_log_lock(sdp);
709  	while (sdp->sd_log_num_revoke > max_revokes)
710  		max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64);
711  	max_revokes -= sdp->sd_log_num_revoke;
712  	if (!sdp->sd_log_num_revoke) {
713  		atomic_dec(&sdp->sd_log_blks_free);
714  		/* If no blocks have been reserved, we need to also
715  		 * reserve a block for the header */
716  		if (!sdp->sd_log_blks_reserved)
717  			atomic_dec(&sdp->sd_log_blks_free);
718  	}
719  	gfs2_ail1_empty(sdp, max_revokes);
720  	gfs2_log_unlock(sdp);
721  
722  	if (!sdp->sd_log_num_revoke) {
723  		atomic_inc(&sdp->sd_log_blks_free);
724  		if (!sdp->sd_log_blks_reserved)
725  			atomic_inc(&sdp->sd_log_blks_free);
726  	}
727  }
728  
729  /**
730   * gfs2_write_log_header - Write a journal log header buffer at lblock
731   * @sdp: The GFS2 superblock
732   * @jd: journal descriptor of the journal to which we are writing
733   * @seq: sequence number
734   * @tail: tail of the log
735   * @lblock: value for lh_blkno (block number relative to start of journal)
736   * @flags: log header flags GFS2_LOG_HEAD_*
737   * @op_flags: flags to pass to the bio
738   *
739   * Returns: the initialized log buffer descriptor
740   */
741  
742  void gfs2_write_log_header(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
743  			   u64 seq, u32 tail, u32 lblock, u32 flags,
744  			   int op_flags)
745  {
746  	struct gfs2_log_header *lh;
747  	u32 hash, crc;
748  	struct page *page;
749  	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
750  	struct timespec64 tv;
751  	struct super_block *sb = sdp->sd_vfs;
752  	u64 dblock;
753  
754  	if (gfs2_withdrawn(sdp))
755  		goto out;
756  
757  	page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
758  	lh = page_address(page);
759  	clear_page(lh);
760  
761  	lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
762  	lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH);
763  	lh->lh_header.__pad0 = cpu_to_be64(0);
764  	lh->lh_header.mh_format = cpu_to_be32(GFS2_FORMAT_LH);
765  	lh->lh_header.mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid);
766  	lh->lh_sequence = cpu_to_be64(seq);
767  	lh->lh_flags = cpu_to_be32(flags);
768  	lh->lh_tail = cpu_to_be32(tail);
769  	lh->lh_blkno = cpu_to_be32(lblock);
770  	hash = ~crc32(~0, lh, LH_V1_SIZE);
771  	lh->lh_hash = cpu_to_be32(hash);
772  
773  	ktime_get_coarse_real_ts64(&tv);
774  	lh->lh_nsec = cpu_to_be32(tv.tv_nsec);
775  	lh->lh_sec = cpu_to_be64(tv.tv_sec);
776  	if (!list_empty(&jd->extent_list))
777  		dblock = gfs2_log_bmap(jd, lblock);
778  	else {
779  		int ret = gfs2_lblk_to_dblk(jd->jd_inode, lblock, &dblock);
780  		if (gfs2_assert_withdraw(sdp, ret == 0))
781  			return;
782  	}
783  	lh->lh_addr = cpu_to_be64(dblock);
784  	lh->lh_jinode = cpu_to_be64(GFS2_I(jd->jd_inode)->i_no_addr);
785  
786  	/* We may only write local statfs, quota, etc., when writing to our
787  	   own journal. The values are left 0 when recovering a journal
788  	   different from our own. */
789  	if (!(flags & GFS2_LOG_HEAD_RECOVERY)) {
790  		lh->lh_statfs_addr =
791  			cpu_to_be64(GFS2_I(sdp->sd_sc_inode)->i_no_addr);
792  		lh->lh_quota_addr =
793  			cpu_to_be64(GFS2_I(sdp->sd_qc_inode)->i_no_addr);
794  
795  		spin_lock(&sdp->sd_statfs_spin);
796  		lh->lh_local_total = cpu_to_be64(l_sc->sc_total);
797  		lh->lh_local_free = cpu_to_be64(l_sc->sc_free);
798  		lh->lh_local_dinodes = cpu_to_be64(l_sc->sc_dinodes);
799  		spin_unlock(&sdp->sd_statfs_spin);
800  	}
801  
802  	BUILD_BUG_ON(offsetof(struct gfs2_log_header, lh_crc) != LH_V1_SIZE);
803  
804  	crc = crc32c(~0, (void *)lh + LH_V1_SIZE + 4,
805  		     sb->s_blocksize - LH_V1_SIZE - 4);
806  	lh->lh_crc = cpu_to_be32(crc);
807  
808  	gfs2_log_write(sdp, page, sb->s_blocksize, 0, dblock);
809  	gfs2_log_submit_bio(&sdp->sd_log_bio, REQ_OP_WRITE | op_flags);
810  out:
811  	log_flush_wait(sdp);
812  }
813  
814  /**
815   * log_write_header - Get and initialize a journal header buffer
816   * @sdp: The GFS2 superblock
817   * @flags: The log header flags, including log header origin
818   *
819   * Returns: the initialized log buffer descriptor
820   */
821  
822  static void log_write_header(struct gfs2_sbd *sdp, u32 flags)
823  {
824  	unsigned int tail;
825  	int op_flags = REQ_PREFLUSH | REQ_FUA | REQ_META | REQ_SYNC;
826  	enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state);
827  
828  	gfs2_assert_withdraw(sdp, (state != SFS_FROZEN));
829  	tail = current_tail(sdp);
830  
831  	if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) {
832  		gfs2_ordered_wait(sdp);
833  		log_flush_wait(sdp);
834  		op_flags = REQ_SYNC | REQ_META | REQ_PRIO;
835  	}
836  	sdp->sd_log_idle = (tail == sdp->sd_log_flush_head);
837  	gfs2_write_log_header(sdp, sdp->sd_jdesc, sdp->sd_log_sequence++, tail,
838  			      sdp->sd_log_flush_head, flags, op_flags);
839  	gfs2_log_incr_head(sdp);
840  
841  	if (sdp->sd_log_tail != tail)
842  		log_pull_tail(sdp, tail);
843  }
844  
845  /**
846   * ail_drain - drain the ail lists after a withdraw
847   * @sdp: Pointer to GFS2 superblock
848   */
849  static void ail_drain(struct gfs2_sbd *sdp)
850  {
851  	struct gfs2_trans *tr;
852  
853  	spin_lock(&sdp->sd_ail_lock);
854  	/*
855  	 * For transactions on the sd_ail1_list we need to drain both the
856  	 * ail1 and ail2 lists. That's because function gfs2_ail1_start_one
857  	 * (temporarily) moves items from its tr_ail1 list to tr_ail2 list
858  	 * before revokes are sent for that block. Items on the sd_ail2_list
859  	 * should have already gotten beyond that point, so no need.
860  	 */
861  	while (!list_empty(&sdp->sd_ail1_list)) {
862  		tr = list_first_entry(&sdp->sd_ail1_list, struct gfs2_trans,
863  				      tr_list);
864  		gfs2_ail_empty_tr(sdp, tr, &tr->tr_ail1_list);
865  		gfs2_ail_empty_tr(sdp, tr, &tr->tr_ail2_list);
866  		list_del(&tr->tr_list);
867  		kfree(tr);
868  	}
869  	while (!list_empty(&sdp->sd_ail2_list)) {
870  		tr = list_first_entry(&sdp->sd_ail2_list, struct gfs2_trans,
871  				      tr_list);
872  		gfs2_ail_empty_tr(sdp, tr, &tr->tr_ail2_list);
873  		list_del(&tr->tr_list);
874  		kfree(tr);
875  	}
876  	spin_unlock(&sdp->sd_ail_lock);
877  }
878  
879  /**
880   * gfs2_log_flush - flush incore transaction(s)
881   * @sdp: the filesystem
882   * @gl: The glock structure to flush.  If NULL, flush the whole incore log
883   * @flags: The log header flags: GFS2_LOG_HEAD_FLUSH_* and debug flags
884   *
885   */
886  
887  void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags)
888  {
889  	struct gfs2_trans *tr = NULL;
890  	enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state);
891  
892  	down_write(&sdp->sd_log_flush_lock);
893  
894  	/*
895  	 * Do this check while holding the log_flush_lock to prevent new
896  	 * buffers from being added to the ail via gfs2_pin()
897  	 */
898  	if (gfs2_withdrawn(sdp))
899  		goto out;
900  
901  	/* Log might have been flushed while we waited for the flush lock */
902  	if (gl && !test_bit(GLF_LFLUSH, &gl->gl_flags)) {
903  		up_write(&sdp->sd_log_flush_lock);
904  		return;
905  	}
906  	trace_gfs2_log_flush(sdp, 1, flags);
907  
908  	if (flags & GFS2_LOG_HEAD_FLUSH_SHUTDOWN)
909  		clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
910  
911  	sdp->sd_log_flush_head = sdp->sd_log_head;
912  	tr = sdp->sd_log_tr;
913  	if (tr) {
914  		sdp->sd_log_tr = NULL;
915  		INIT_LIST_HEAD(&tr->tr_ail1_list);
916  		INIT_LIST_HEAD(&tr->tr_ail2_list);
917  		tr->tr_first = sdp->sd_log_flush_head;
918  		if (unlikely (state == SFS_FROZEN))
919  			if (gfs2_assert_withdraw_delayed(sdp,
920  			       !tr->tr_num_buf_new && !tr->tr_num_databuf_new))
921  				goto out;
922  	}
923  
924  	if (unlikely(state == SFS_FROZEN))
925  		if (gfs2_assert_withdraw_delayed(sdp, !sdp->sd_log_num_revoke))
926  			goto out;
927  	if (gfs2_assert_withdraw_delayed(sdp,
928  			sdp->sd_log_num_revoke == sdp->sd_log_committed_revoke))
929  		goto out;
930  
931  	gfs2_ordered_write(sdp);
932  	if (gfs2_withdrawn(sdp))
933  		goto out;
934  	lops_before_commit(sdp, tr);
935  	if (gfs2_withdrawn(sdp))
936  		goto out;
937  	gfs2_log_submit_bio(&sdp->sd_log_bio, REQ_OP_WRITE);
938  	if (gfs2_withdrawn(sdp))
939  		goto out;
940  
941  	if (sdp->sd_log_head != sdp->sd_log_flush_head) {
942  		log_flush_wait(sdp);
943  		log_write_header(sdp, flags);
944  	} else if (sdp->sd_log_tail != current_tail(sdp) && !sdp->sd_log_idle){
945  		atomic_dec(&sdp->sd_log_blks_free); /* Adjust for unreserved buffer */
946  		trace_gfs2_log_blocks(sdp, -1);
947  		log_write_header(sdp, flags);
948  	}
949  	if (gfs2_withdrawn(sdp))
950  		goto out;
951  	lops_after_commit(sdp, tr);
952  
953  	gfs2_log_lock(sdp);
954  	sdp->sd_log_head = sdp->sd_log_flush_head;
955  	sdp->sd_log_blks_reserved = 0;
956  	sdp->sd_log_committed_revoke = 0;
957  
958  	spin_lock(&sdp->sd_ail_lock);
959  	if (tr && !list_empty(&tr->tr_ail1_list)) {
960  		list_add(&tr->tr_list, &sdp->sd_ail1_list);
961  		tr = NULL;
962  	}
963  	spin_unlock(&sdp->sd_ail_lock);
964  	gfs2_log_unlock(sdp);
965  
966  	if (!(flags & GFS2_LOG_HEAD_FLUSH_NORMAL)) {
967  		if (!sdp->sd_log_idle) {
968  			for (;;) {
969  				gfs2_ail1_start(sdp);
970  				gfs2_ail1_wait(sdp);
971  				if (gfs2_ail1_empty(sdp, 0))
972  					break;
973  			}
974  			if (gfs2_withdrawn(sdp))
975  				goto out;
976  			atomic_dec(&sdp->sd_log_blks_free); /* Adjust for unreserved buffer */
977  			trace_gfs2_log_blocks(sdp, -1);
978  			log_write_header(sdp, flags);
979  			sdp->sd_log_head = sdp->sd_log_flush_head;
980  		}
981  		if (flags & (GFS2_LOG_HEAD_FLUSH_SHUTDOWN |
982  			     GFS2_LOG_HEAD_FLUSH_FREEZE))
983  			gfs2_log_shutdown(sdp);
984  		if (flags & GFS2_LOG_HEAD_FLUSH_FREEZE)
985  			atomic_set(&sdp->sd_freeze_state, SFS_FROZEN);
986  	}
987  
988  out:
989  	if (gfs2_withdrawn(sdp)) {
990  		ail_drain(sdp); /* frees all transactions */
991  		tr = NULL;
992  	}
993  
994  	trace_gfs2_log_flush(sdp, 0, flags);
995  	up_write(&sdp->sd_log_flush_lock);
996  
997  	kfree(tr);
998  }
999  
1000  /**
1001   * gfs2_merge_trans - Merge a new transaction into a cached transaction
1002   * @old: Original transaction to be expanded
1003   * @new: New transaction to be merged
1004   */
1005  
1006  static void gfs2_merge_trans(struct gfs2_trans *old, struct gfs2_trans *new)
1007  {
1008  	WARN_ON_ONCE(!test_bit(TR_ATTACHED, &old->tr_flags));
1009  
1010  	old->tr_num_buf_new	+= new->tr_num_buf_new;
1011  	old->tr_num_databuf_new	+= new->tr_num_databuf_new;
1012  	old->tr_num_buf_rm	+= new->tr_num_buf_rm;
1013  	old->tr_num_databuf_rm	+= new->tr_num_databuf_rm;
1014  	old->tr_num_revoke	+= new->tr_num_revoke;
1015  	old->tr_num_revoke_rm	+= new->tr_num_revoke_rm;
1016  
1017  	list_splice_tail_init(&new->tr_databuf, &old->tr_databuf);
1018  	list_splice_tail_init(&new->tr_buf, &old->tr_buf);
1019  }
1020  
1021  static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
1022  {
1023  	unsigned int reserved;
1024  	unsigned int unused;
1025  	unsigned int maxres;
1026  
1027  	gfs2_log_lock(sdp);
1028  
1029  	if (sdp->sd_log_tr) {
1030  		gfs2_merge_trans(sdp->sd_log_tr, tr);
1031  	} else if (tr->tr_num_buf_new || tr->tr_num_databuf_new) {
1032  		gfs2_assert_withdraw(sdp, test_bit(TR_ALLOCED, &tr->tr_flags));
1033  		sdp->sd_log_tr = tr;
1034  		set_bit(TR_ATTACHED, &tr->tr_flags);
1035  	}
1036  
1037  	sdp->sd_log_committed_revoke += tr->tr_num_revoke - tr->tr_num_revoke_rm;
1038  	reserved = calc_reserved(sdp);
1039  	maxres = sdp->sd_log_blks_reserved + tr->tr_reserved;
1040  	gfs2_assert_withdraw(sdp, maxres >= reserved);
1041  	unused = maxres - reserved;
1042  	atomic_add(unused, &sdp->sd_log_blks_free);
1043  	trace_gfs2_log_blocks(sdp, unused);
1044  	gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
1045  			     sdp->sd_jdesc->jd_blocks);
1046  	sdp->sd_log_blks_reserved = reserved;
1047  
1048  	gfs2_log_unlock(sdp);
1049  }
1050  
1051  /**
1052   * gfs2_log_commit - Commit a transaction to the log
1053   * @sdp: the filesystem
1054   * @tr: the transaction
1055   *
1056   * We wake up gfs2_logd if the number of pinned blocks exceed thresh1
1057   * or the total number of used blocks (pinned blocks plus AIL blocks)
1058   * is greater than thresh2.
1059   *
1060   * At mount time thresh1 is 1/3rd of journal size, thresh2 is 2/3rd of
1061   * journal size.
1062   *
1063   * Returns: errno
1064   */
1065  
1066  void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
1067  {
1068  	log_refund(sdp, tr);
1069  
1070  	if (atomic_read(&sdp->sd_log_pinned) > atomic_read(&sdp->sd_log_thresh1) ||
1071  	    ((sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free)) >
1072  	    atomic_read(&sdp->sd_log_thresh2)))
1073  		wake_up(&sdp->sd_logd_waitq);
1074  }
1075  
1076  /**
1077   * gfs2_log_shutdown - write a shutdown header into a journal
1078   * @sdp: the filesystem
1079   *
1080   */
1081  
1082  static void gfs2_log_shutdown(struct gfs2_sbd *sdp)
1083  {
1084  	gfs2_assert_withdraw(sdp, !sdp->sd_log_blks_reserved);
1085  	gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
1086  	gfs2_assert_withdraw(sdp, list_empty(&sdp->sd_ail1_list));
1087  
1088  	sdp->sd_log_flush_head = sdp->sd_log_head;
1089  
1090  	log_write_header(sdp, GFS2_LOG_HEAD_UNMOUNT | GFS2_LFC_SHUTDOWN);
1091  
1092  	gfs2_assert_warn(sdp, sdp->sd_log_head == sdp->sd_log_tail);
1093  	gfs2_assert_warn(sdp, list_empty(&sdp->sd_ail2_list));
1094  
1095  	sdp->sd_log_head = sdp->sd_log_flush_head;
1096  	sdp->sd_log_tail = sdp->sd_log_head;
1097  }
1098  
1099  static inline int gfs2_jrnl_flush_reqd(struct gfs2_sbd *sdp)
1100  {
1101  	return (atomic_read(&sdp->sd_log_pinned) +
1102  		atomic_read(&sdp->sd_log_blks_needed) >=
1103  		atomic_read(&sdp->sd_log_thresh1));
1104  }
1105  
1106  static inline int gfs2_ail_flush_reqd(struct gfs2_sbd *sdp)
1107  {
1108  	unsigned int used_blocks = sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free);
1109  
1110  	if (test_and_clear_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags))
1111  		return 1;
1112  
1113  	return used_blocks + atomic_read(&sdp->sd_log_blks_needed) >=
1114  		atomic_read(&sdp->sd_log_thresh2);
1115  }
1116  
1117  /**
1118   * gfs2_logd - Update log tail as Active Items get flushed to in-place blocks
1119   * @sdp: Pointer to GFS2 superblock
1120   *
1121   * Also, periodically check to make sure that we're using the most recent
1122   * journal index.
1123   */
1124  
1125  int gfs2_logd(void *data)
1126  {
1127  	struct gfs2_sbd *sdp = data;
1128  	unsigned long t = 1;
1129  	DEFINE_WAIT(wait);
1130  	bool did_flush;
1131  
1132  	while (!kthread_should_stop()) {
1133  
1134  		/* Check for errors writing to the journal */
1135  		if (sdp->sd_log_error) {
1136  			gfs2_lm(sdp,
1137  				"GFS2: fsid=%s: error %d: "
1138  				"withdrawing the file system to "
1139  				"prevent further damage.\n",
1140  				sdp->sd_fsname, sdp->sd_log_error);
1141  			gfs2_withdraw(sdp);
1142  		}
1143  
1144  		did_flush = false;
1145  		if (gfs2_jrnl_flush_reqd(sdp) || t == 0) {
1146  			gfs2_ail1_empty(sdp, 0);
1147  			gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
1148  				       GFS2_LFC_LOGD_JFLUSH_REQD);
1149  			did_flush = true;
1150  		}
1151  
1152  		if (gfs2_ail_flush_reqd(sdp)) {
1153  			gfs2_ail1_start(sdp);
1154  			gfs2_ail1_wait(sdp);
1155  			gfs2_ail1_empty(sdp, 0);
1156  			gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
1157  				       GFS2_LFC_LOGD_AIL_FLUSH_REQD);
1158  			did_flush = true;
1159  		}
1160  
1161  		if (!gfs2_ail_flush_reqd(sdp) || did_flush)
1162  			wake_up(&sdp->sd_log_waitq);
1163  
1164  		t = gfs2_tune_get(sdp, gt_logd_secs) * HZ;
1165  
1166  		try_to_freeze();
1167  
1168  		do {
1169  			prepare_to_wait(&sdp->sd_logd_waitq, &wait,
1170  					TASK_INTERRUPTIBLE);
1171  			if (!gfs2_ail_flush_reqd(sdp) &&
1172  			    !gfs2_jrnl_flush_reqd(sdp) &&
1173  			    !kthread_should_stop())
1174  				t = schedule_timeout(t);
1175  		} while(t && !gfs2_ail_flush_reqd(sdp) &&
1176  			!gfs2_jrnl_flush_reqd(sdp) &&
1177  			!kthread_should_stop());
1178  		finish_wait(&sdp->sd_logd_waitq, &wait);
1179  	}
1180  
1181  	return 0;
1182  }
1183  
1184