xref: /openbmc/linux/fs/xfs/xfs_log_priv.h (revision 335f70fa)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
4  * All Rights Reserved.
5  */
6 #ifndef	__XFS_LOG_PRIV_H__
7 #define __XFS_LOG_PRIV_H__
8 
9 struct xfs_buf;
10 struct xlog;
11 struct xlog_ticket;
12 struct xfs_mount;
13 
14 /*
15  * get client id from packed copy.
16  *
17  * this hack is here because the xlog_pack code copies four bytes
18  * of xlog_op_header containing the fields oh_clientid, oh_flags
19  * and oh_res2 into the packed copy.
20  *
21  * later on this four byte chunk is treated as an int and the
22  * client id is pulled out.
23  *
24  * this has endian issues, of course.
25  */
26 static inline uint xlog_get_client_id(__be32 i)
27 {
28 	return be32_to_cpu(i) >> 24;
29 }
30 
31 /*
32  * In core log state
33  */
34 enum xlog_iclog_state {
35 	XLOG_STATE_ACTIVE,	/* Current IC log being written to */
36 	XLOG_STATE_WANT_SYNC,	/* Want to sync this iclog; no more writes */
37 	XLOG_STATE_SYNCING,	/* This IC log is syncing */
38 	XLOG_STATE_DONE_SYNC,	/* Done syncing to disk */
39 	XLOG_STATE_CALLBACK,	/* Callback functions now */
40 	XLOG_STATE_DIRTY,	/* Dirty IC log, not ready for ACTIVE status */
41 };
42 
43 #define XLOG_STATE_STRINGS \
44 	{ XLOG_STATE_ACTIVE,	"XLOG_STATE_ACTIVE" }, \
45 	{ XLOG_STATE_WANT_SYNC,	"XLOG_STATE_WANT_SYNC" }, \
46 	{ XLOG_STATE_SYNCING,	"XLOG_STATE_SYNCING" }, \
47 	{ XLOG_STATE_DONE_SYNC,	"XLOG_STATE_DONE_SYNC" }, \
48 	{ XLOG_STATE_CALLBACK,	"XLOG_STATE_CALLBACK" }, \
49 	{ XLOG_STATE_DIRTY,	"XLOG_STATE_DIRTY" }
50 
51 /*
52  * In core log flags
53  */
54 #define XLOG_ICL_NEED_FLUSH	(1 << 0)	/* iclog needs REQ_PREFLUSH */
55 #define XLOG_ICL_NEED_FUA	(1 << 1)	/* iclog needs REQ_FUA */
56 
57 #define XLOG_ICL_STRINGS \
58 	{ XLOG_ICL_NEED_FLUSH,	"XLOG_ICL_NEED_FLUSH" }, \
59 	{ XLOG_ICL_NEED_FUA,	"XLOG_ICL_NEED_FUA" }
60 
61 
62 /*
63  * Log ticket flags
64  */
65 #define XLOG_TIC_PERM_RESERV	0x1	/* permanent reservation */
66 
67 #define XLOG_TIC_FLAGS \
68 	{ XLOG_TIC_PERM_RESERV,	"XLOG_TIC_PERM_RESERV" }
69 
70 /*
71  * Below are states for covering allocation transactions.
72  * By covering, we mean changing the h_tail_lsn in the last on-disk
73  * log write such that no allocation transactions will be re-done during
74  * recovery after a system crash. Recovery starts at the last on-disk
75  * log write.
76  *
77  * These states are used to insert dummy log entries to cover
78  * space allocation transactions which can undo non-transactional changes
79  * after a crash. Writes to a file with space
80  * already allocated do not result in any transactions. Allocations
81  * might include space beyond the EOF. So if we just push the EOF a
82  * little, the last transaction for the file could contain the wrong
83  * size. If there is no file system activity, after an allocation
84  * transaction, and the system crashes, the allocation transaction
85  * will get replayed and the file will be truncated. This could
86  * be hours/days/... after the allocation occurred.
87  *
88  * The fix for this is to do two dummy transactions when the
89  * system is idle. We need two dummy transaction because the h_tail_lsn
90  * in the log record header needs to point beyond the last possible
91  * non-dummy transaction. The first dummy changes the h_tail_lsn to
92  * the first transaction before the dummy. The second dummy causes
93  * h_tail_lsn to point to the first dummy. Recovery starts at h_tail_lsn.
94  *
95  * These dummy transactions get committed when everything
96  * is idle (after there has been some activity).
97  *
98  * There are 5 states used to control this.
99  *
100  *  IDLE -- no logging has been done on the file system or
101  *		we are done covering previous transactions.
102  *  NEED -- logging has occurred and we need a dummy transaction
103  *		when the log becomes idle.
104  *  DONE -- we were in the NEED state and have committed a dummy
105  *		transaction.
106  *  NEED2 -- we detected that a dummy transaction has gone to the
107  *		on disk log with no other transactions.
108  *  DONE2 -- we committed a dummy transaction when in the NEED2 state.
109  *
110  * There are two places where we switch states:
111  *
112  * 1.) In xfs_sync, when we detect an idle log and are in NEED or NEED2.
113  *	We commit the dummy transaction and switch to DONE or DONE2,
114  *	respectively. In all other states, we don't do anything.
115  *
116  * 2.) When we finish writing the on-disk log (xlog_state_clean_log).
117  *
118  *	No matter what state we are in, if this isn't the dummy
119  *	transaction going out, the next state is NEED.
120  *	So, if we aren't in the DONE or DONE2 states, the next state
121  *	is NEED. We can't be finishing a write of the dummy record
122  *	unless it was committed and the state switched to DONE or DONE2.
123  *
124  *	If we are in the DONE state and this was a write of the
125  *		dummy transaction, we move to NEED2.
126  *
127  *	If we are in the DONE2 state and this was a write of the
128  *		dummy transaction, we move to IDLE.
129  *
130  *
131  * Writing only one dummy transaction can get appended to
132  * one file space allocation. When this happens, the log recovery
133  * code replays the space allocation and a file could be truncated.
134  * This is why we have the NEED2 and DONE2 states before going idle.
135  */
136 
137 #define XLOG_STATE_COVER_IDLE	0
138 #define XLOG_STATE_COVER_NEED	1
139 #define XLOG_STATE_COVER_DONE	2
140 #define XLOG_STATE_COVER_NEED2	3
141 #define XLOG_STATE_COVER_DONE2	4
142 
143 #define XLOG_COVER_OPS		5
144 
145 /* Ticket reservation region accounting */
146 #define XLOG_TIC_LEN_MAX	15
147 
148 /*
149  * Reservation region
150  * As would be stored in xfs_log_iovec but without the i_addr which
151  * we don't care about.
152  */
153 typedef struct xlog_res {
154 	uint	r_len;	/* region length		:4 */
155 	uint	r_type;	/* region's transaction type	:4 */
156 } xlog_res_t;
157 
158 typedef struct xlog_ticket {
159 	struct list_head   t_queue;	 /* reserve/write queue */
160 	struct task_struct *t_task;	 /* task that owns this ticket */
161 	xlog_tid_t	   t_tid;	 /* transaction identifier	 : 4  */
162 	atomic_t	   t_ref;	 /* ticket reference count       : 4  */
163 	int		   t_curr_res;	 /* current reservation in bytes : 4  */
164 	int		   t_unit_res;	 /* unit reservation in bytes    : 4  */
165 	char		   t_ocnt;	 /* original count		 : 1  */
166 	char		   t_cnt;	 /* current count		 : 1  */
167 	char		   t_clientid;	 /* who does this belong to;	 : 1  */
168 	char		   t_flags;	 /* properties of reservation	 : 1  */
169 
170         /* reservation array fields */
171 	uint		   t_res_num;                    /* num in array : 4 */
172 	uint		   t_res_num_ophdrs;		 /* num op hdrs  : 4 */
173 	uint		   t_res_arr_sum;		 /* array sum    : 4 */
174 	uint		   t_res_o_flow;		 /* sum overflow : 4 */
175 	xlog_res_t	   t_res_arr[XLOG_TIC_LEN_MAX];  /* array of res : 8 * 15 */
176 } xlog_ticket_t;
177 
178 /*
179  * - A log record header is 512 bytes.  There is plenty of room to grow the
180  *	xlog_rec_header_t into the reserved space.
181  * - ic_data follows, so a write to disk can start at the beginning of
182  *	the iclog.
183  * - ic_forcewait is used to implement synchronous forcing of the iclog to disk.
184  * - ic_next is the pointer to the next iclog in the ring.
185  * - ic_log is a pointer back to the global log structure.
186  * - ic_size is the full size of the log buffer, minus the cycle headers.
187  * - ic_offset is the current number of bytes written to in this iclog.
188  * - ic_refcnt is bumped when someone is writing to the log.
189  * - ic_state is the state of the iclog.
190  *
191  * Because of cacheline contention on large machines, we need to separate
192  * various resources onto different cachelines. To start with, make the
193  * structure cacheline aligned. The following fields can be contended on
194  * by independent processes:
195  *
196  *	- ic_callbacks
197  *	- ic_refcnt
198  *	- fields protected by the global l_icloglock
199  *
200  * so we need to ensure that these fields are located in separate cachelines.
201  * We'll put all the read-only and l_icloglock fields in the first cacheline,
202  * and move everything else out to subsequent cachelines.
203  */
204 typedef struct xlog_in_core {
205 	wait_queue_head_t	ic_force_wait;
206 	wait_queue_head_t	ic_write_wait;
207 	struct xlog_in_core	*ic_next;
208 	struct xlog_in_core	*ic_prev;
209 	struct xlog		*ic_log;
210 	u32			ic_size;
211 	u32			ic_offset;
212 	enum xlog_iclog_state	ic_state;
213 	unsigned int		ic_flags;
214 	char			*ic_datap;	/* pointer to iclog data */
215 	struct list_head	ic_callbacks;
216 
217 	/* reference counts need their own cacheline */
218 	atomic_t		ic_refcnt ____cacheline_aligned_in_smp;
219 	xlog_in_core_2_t	*ic_data;
220 #define ic_header	ic_data->hic_header
221 #ifdef DEBUG
222 	bool			ic_fail_crc : 1;
223 #endif
224 	struct semaphore	ic_sema;
225 	struct work_struct	ic_end_io_work;
226 	struct bio		ic_bio;
227 	struct bio_vec		ic_bvec[];
228 } xlog_in_core_t;
229 
230 /*
231  * The CIL context is used to aggregate per-transaction details as well be
232  * passed to the iclog for checkpoint post-commit processing.  After being
233  * passed to the iclog, another context needs to be allocated for tracking the
234  * next set of transactions to be aggregated into a checkpoint.
235  */
236 struct xfs_cil;
237 
238 struct xfs_cil_ctx {
239 	struct xfs_cil		*cil;
240 	xfs_csn_t		sequence;	/* chkpt sequence # */
241 	xfs_lsn_t		start_lsn;	/* first LSN of chkpt commit */
242 	xfs_lsn_t		commit_lsn;	/* chkpt commit record lsn */
243 	struct xlog_in_core	*commit_iclog;
244 	struct xlog_ticket	*ticket;	/* chkpt ticket */
245 	int			nvecs;		/* number of regions */
246 	int			space_used;	/* aggregate size of regions */
247 	struct list_head	busy_extents;	/* busy extents in chkpt */
248 	struct xfs_log_vec	*lv_chain;	/* logvecs being pushed */
249 	struct list_head	iclog_entry;
250 	struct list_head	committing;	/* ctx committing list */
251 	struct work_struct	discard_endio_work;
252 	struct work_struct	push_work;
253 };
254 
255 /*
256  * Committed Item List structure
257  *
258  * This structure is used to track log items that have been committed but not
259  * yet written into the log. It is used only when the delayed logging mount
260  * option is enabled.
261  *
262  * This structure tracks the list of committing checkpoint contexts so
263  * we can avoid the problem of having to hold out new transactions during a
264  * flush until we have a the commit record LSN of the checkpoint. We can
265  * traverse the list of committing contexts in xlog_cil_push_lsn() to find a
266  * sequence match and extract the commit LSN directly from there. If the
267  * checkpoint is still in the process of committing, we can block waiting for
268  * the commit LSN to be determined as well. This should make synchronous
269  * operations almost as efficient as the old logging methods.
270  */
271 struct xfs_cil {
272 	struct xlog		*xc_log;
273 	struct list_head	xc_cil;
274 	spinlock_t		xc_cil_lock;
275 	struct workqueue_struct	*xc_push_wq;
276 
277 	struct rw_semaphore	xc_ctx_lock ____cacheline_aligned_in_smp;
278 	struct xfs_cil_ctx	*xc_ctx;
279 
280 	spinlock_t		xc_push_lock ____cacheline_aligned_in_smp;
281 	xfs_csn_t		xc_push_seq;
282 	bool			xc_push_commit_stable;
283 	struct list_head	xc_committing;
284 	wait_queue_head_t	xc_commit_wait;
285 	wait_queue_head_t	xc_start_wait;
286 	xfs_csn_t		xc_current_sequence;
287 	wait_queue_head_t	xc_push_wait;	/* background push throttle */
288 } ____cacheline_aligned_in_smp;
289 
290 /*
291  * The amount of log space we allow the CIL to aggregate is difficult to size.
292  * Whatever we choose, we have to make sure we can get a reservation for the
293  * log space effectively, that it is large enough to capture sufficient
294  * relogging to reduce log buffer IO significantly, but it is not too large for
295  * the log or induces too much latency when writing out through the iclogs. We
296  * track both space consumed and the number of vectors in the checkpoint
297  * context, so we need to decide which to use for limiting.
298  *
299  * Every log buffer we write out during a push needs a header reserved, which
300  * is at least one sector and more for v2 logs. Hence we need a reservation of
301  * at least 512 bytes per 32k of log space just for the LR headers. That means
302  * 16KB of reservation per megabyte of delayed logging space we will consume,
303  * plus various headers.  The number of headers will vary based on the num of
304  * io vectors, so limiting on a specific number of vectors is going to result
305  * in transactions of varying size. IOWs, it is more consistent to track and
306  * limit space consumed in the log rather than by the number of objects being
307  * logged in order to prevent checkpoint ticket overruns.
308  *
309  * Further, use of static reservations through the log grant mechanism is
310  * problematic. It introduces a lot of complexity (e.g. reserve grant vs write
311  * grant) and a significant deadlock potential because regranting write space
312  * can block on log pushes. Hence if we have to regrant log space during a log
313  * push, we can deadlock.
314  *
315  * However, we can avoid this by use of a dynamic "reservation stealing"
316  * technique during transaction commit whereby unused reservation space in the
317  * transaction ticket is transferred to the CIL ctx commit ticket to cover the
318  * space needed by the checkpoint transaction. This means that we never need to
319  * specifically reserve space for the CIL checkpoint transaction, nor do we
320  * need to regrant space once the checkpoint completes. This also means the
321  * checkpoint transaction ticket is specific to the checkpoint context, rather
322  * than the CIL itself.
323  *
324  * With dynamic reservations, we can effectively make up arbitrary limits for
325  * the checkpoint size so long as they don't violate any other size rules.
326  * Recovery imposes a rule that no transaction exceed half the log, so we are
327  * limited by that.  Furthermore, the log transaction reservation subsystem
328  * tries to keep 25% of the log free, so we need to keep below that limit or we
329  * risk running out of free log space to start any new transactions.
330  *
331  * In order to keep background CIL push efficient, we only need to ensure the
332  * CIL is large enough to maintain sufficient in-memory relogging to avoid
333  * repeated physical writes of frequently modified metadata. If we allow the CIL
334  * to grow to a substantial fraction of the log, then we may be pinning hundreds
335  * of megabytes of metadata in memory until the CIL flushes. This can cause
336  * issues when we are running low on memory - pinned memory cannot be reclaimed,
337  * and the CIL consumes a lot of memory. Hence we need to set an upper physical
338  * size limit for the CIL that limits the maximum amount of memory pinned by the
339  * CIL but does not limit performance by reducing relogging efficiency
340  * significantly.
341  *
342  * As such, the CIL push threshold ends up being the smaller of two thresholds:
343  * - a threshold large enough that it allows CIL to be pushed and progress to be
344  *   made without excessive blocking of incoming transaction commits. This is
345  *   defined to be 12.5% of the log space - half the 25% push threshold of the
346  *   AIL.
347  * - small enough that it doesn't pin excessive amounts of memory but maintains
348  *   close to peak relogging efficiency. This is defined to be 16x the iclog
349  *   buffer window (32MB) as measurements have shown this to be roughly the
350  *   point of diminishing performance increases under highly concurrent
351  *   modification workloads.
352  *
353  * To prevent the CIL from overflowing upper commit size bounds, we introduce a
354  * new threshold at which we block committing transactions until the background
355  * CIL commit commences and switches to a new context. While this is not a hard
356  * limit, it forces the process committing a transaction to the CIL to block and
357  * yeild the CPU, giving the CIL push work a chance to be scheduled and start
358  * work. This prevents a process running lots of transactions from overfilling
359  * the CIL because it is not yielding the CPU. We set the blocking limit at
360  * twice the background push space threshold so we keep in line with the AIL
361  * push thresholds.
362  *
363  * Note: this is not a -hard- limit as blocking is applied after the transaction
364  * is inserted into the CIL and the push has been triggered. It is largely a
365  * throttling mechanism that allows the CIL push to be scheduled and run. A hard
366  * limit will be difficult to implement without introducing global serialisation
367  * in the CIL commit fast path, and it's not at all clear that we actually need
368  * such hard limits given the ~7 years we've run without a hard limit before
369  * finding the first situation where a checkpoint size overflow actually
370  * occurred. Hence the simple throttle, and an ASSERT check to tell us that
371  * we've overrun the max size.
372  */
373 #define XLOG_CIL_SPACE_LIMIT(log)	\
374 	min_t(int, (log)->l_logsize >> 3, BBTOB(XLOG_TOTAL_REC_SHIFT(log)) << 4)
375 
376 #define XLOG_CIL_BLOCKING_SPACE_LIMIT(log)	\
377 	(XLOG_CIL_SPACE_LIMIT(log) * 2)
378 
379 /*
380  * ticket grant locks, queues and accounting have their own cachlines
381  * as these are quite hot and can be operated on concurrently.
382  */
383 struct xlog_grant_head {
384 	spinlock_t		lock ____cacheline_aligned_in_smp;
385 	struct list_head	waiters;
386 	atomic64_t		grant;
387 };
388 
389 /*
390  * The reservation head lsn is not made up of a cycle number and block number.
391  * Instead, it uses a cycle number and byte number.  Logs don't expect to
392  * overflow 31 bits worth of byte offset, so using a byte number will mean
393  * that round off problems won't occur when releasing partial reservations.
394  */
395 struct xlog {
396 	/* The following fields don't need locking */
397 	struct xfs_mount	*l_mp;	        /* mount point */
398 	struct xfs_ail		*l_ailp;	/* AIL log is working with */
399 	struct xfs_cil		*l_cilp;	/* CIL log is working with */
400 	struct xfs_buftarg	*l_targ;        /* buftarg of log */
401 	struct workqueue_struct	*l_ioend_workqueue; /* for I/O completions */
402 	struct delayed_work	l_work;		/* background flush work */
403 	long			l_opstate;	/* operational state */
404 	uint			l_quotaoffs_flag; /* XFS_DQ_*, for QUOTAOFFs */
405 	struct list_head	*l_buf_cancel_table;
406 	int			l_iclog_hsize;  /* size of iclog header */
407 	int			l_iclog_heads;  /* # of iclog header sectors */
408 	uint			l_sectBBsize;   /* sector size in BBs (2^n) */
409 	int			l_iclog_size;	/* size of log in bytes */
410 	int			l_iclog_bufs;	/* number of iclog buffers */
411 	xfs_daddr_t		l_logBBstart;   /* start block of log */
412 	int			l_logsize;      /* size of log in bytes */
413 	int			l_logBBsize;    /* size of log in BB chunks */
414 
415 	/* The following block of fields are changed while holding icloglock */
416 	wait_queue_head_t	l_flush_wait ____cacheline_aligned_in_smp;
417 						/* waiting for iclog flush */
418 	int			l_covered_state;/* state of "covering disk
419 						 * log entries" */
420 	xlog_in_core_t		*l_iclog;       /* head log queue	*/
421 	spinlock_t		l_icloglock;    /* grab to change iclog state */
422 	int			l_curr_cycle;   /* Cycle number of log writes */
423 	int			l_prev_cycle;   /* Cycle number before last
424 						 * block increment */
425 	int			l_curr_block;   /* current logical log block */
426 	int			l_prev_block;   /* previous logical log block */
427 
428 	/*
429 	 * l_last_sync_lsn and l_tail_lsn are atomics so they can be set and
430 	 * read without needing to hold specific locks. To avoid operations
431 	 * contending with other hot objects, place each of them on a separate
432 	 * cacheline.
433 	 */
434 	/* lsn of last LR on disk */
435 	atomic64_t		l_last_sync_lsn ____cacheline_aligned_in_smp;
436 	/* lsn of 1st LR with unflushed * buffers */
437 	atomic64_t		l_tail_lsn ____cacheline_aligned_in_smp;
438 
439 	struct xlog_grant_head	l_reserve_head;
440 	struct xlog_grant_head	l_write_head;
441 
442 	struct xfs_kobj		l_kobj;
443 
444 	/* The following field are used for debugging; need to hold icloglock */
445 #ifdef DEBUG
446 	void			*l_iclog_bak[XLOG_MAX_ICLOGS];
447 #endif
448 	/* log recovery lsn tracking (for buffer submission */
449 	xfs_lsn_t		l_recovery_lsn;
450 
451 	uint32_t		l_iclog_roundoff;/* padding roundoff */
452 
453 	/* Users of log incompat features should take a read lock. */
454 	struct rw_semaphore	l_incompat_users;
455 };
456 
457 #define XLOG_BUF_CANCEL_BUCKET(log, blkno) \
458 	((log)->l_buf_cancel_table + ((uint64_t)blkno % XLOG_BC_TABLE_SIZE))
459 
460 /*
461  * Bits for operational state
462  */
463 #define XLOG_ACTIVE_RECOVERY	0	/* in the middle of recovery */
464 #define XLOG_RECOVERY_NEEDED	1	/* log was recovered */
465 #define XLOG_IO_ERROR		2	/* log hit an I/O error, and being
466 				   shutdown */
467 #define XLOG_TAIL_WARN		3	/* log tail verify warning issued */
468 
469 static inline bool
470 xlog_recovery_needed(struct xlog *log)
471 {
472 	return test_bit(XLOG_RECOVERY_NEEDED, &log->l_opstate);
473 }
474 
475 static inline bool
476 xlog_in_recovery(struct xlog *log)
477 {
478 	return test_bit(XLOG_ACTIVE_RECOVERY, &log->l_opstate);
479 }
480 
481 static inline bool
482 xlog_is_shutdown(struct xlog *log)
483 {
484 	return test_bit(XLOG_IO_ERROR, &log->l_opstate);
485 }
486 
487 /*
488  * Wait until the xlog_force_shutdown() has marked the log as shut down
489  * so xlog_is_shutdown() will always return true.
490  */
491 static inline void
492 xlog_shutdown_wait(
493 	struct xlog	*log)
494 {
495 	wait_var_event(&log->l_opstate, xlog_is_shutdown(log));
496 }
497 
498 /* common routines */
499 extern int
500 xlog_recover(
501 	struct xlog		*log);
502 extern int
503 xlog_recover_finish(
504 	struct xlog		*log);
505 extern void
506 xlog_recover_cancel(struct xlog *);
507 
508 extern __le32	 xlog_cksum(struct xlog *log, struct xlog_rec_header *rhead,
509 			    char *dp, int size);
510 
511 extern struct kmem_cache *xfs_log_ticket_cache;
512 struct xlog_ticket *
513 xlog_ticket_alloc(
514 	struct xlog	*log,
515 	int		unit_bytes,
516 	int		count,
517 	char		client,
518 	bool		permanent);
519 
520 static inline void
521 xlog_write_adv_cnt(void **ptr, int *len, int *off, size_t bytes)
522 {
523 	*ptr += bytes;
524 	*len -= bytes;
525 	*off += bytes;
526 }
527 
528 void	xlog_print_tic_res(struct xfs_mount *mp, struct xlog_ticket *ticket);
529 void	xlog_print_trans(struct xfs_trans *);
530 int	xlog_write(struct xlog *log, struct xfs_cil_ctx *ctx,
531 		struct xfs_log_vec *log_vector, struct xlog_ticket *tic,
532 		uint optype);
533 void	xfs_log_ticket_ungrant(struct xlog *log, struct xlog_ticket *ticket);
534 void	xfs_log_ticket_regrant(struct xlog *log, struct xlog_ticket *ticket);
535 
536 void xlog_state_switch_iclogs(struct xlog *log, struct xlog_in_core *iclog,
537 		int eventual_size);
538 int xlog_state_release_iclog(struct xlog *log, struct xlog_in_core *iclog);
539 
540 /*
541  * When we crack an atomic LSN, we sample it first so that the value will not
542  * change while we are cracking it into the component values. This means we
543  * will always get consistent component values to work from. This should always
544  * be used to sample and crack LSNs that are stored and updated in atomic
545  * variables.
546  */
547 static inline void
548 xlog_crack_atomic_lsn(atomic64_t *lsn, uint *cycle, uint *block)
549 {
550 	xfs_lsn_t val = atomic64_read(lsn);
551 
552 	*cycle = CYCLE_LSN(val);
553 	*block = BLOCK_LSN(val);
554 }
555 
556 /*
557  * Calculate and assign a value to an atomic LSN variable from component pieces.
558  */
559 static inline void
560 xlog_assign_atomic_lsn(atomic64_t *lsn, uint cycle, uint block)
561 {
562 	atomic64_set(lsn, xlog_assign_lsn(cycle, block));
563 }
564 
565 /*
566  * When we crack the grant head, we sample it first so that the value will not
567  * change while we are cracking it into the component values. This means we
568  * will always get consistent component values to work from.
569  */
570 static inline void
571 xlog_crack_grant_head_val(int64_t val, int *cycle, int *space)
572 {
573 	*cycle = val >> 32;
574 	*space = val & 0xffffffff;
575 }
576 
577 static inline void
578 xlog_crack_grant_head(atomic64_t *head, int *cycle, int *space)
579 {
580 	xlog_crack_grant_head_val(atomic64_read(head), cycle, space);
581 }
582 
583 static inline int64_t
584 xlog_assign_grant_head_val(int cycle, int space)
585 {
586 	return ((int64_t)cycle << 32) | space;
587 }
588 
589 static inline void
590 xlog_assign_grant_head(atomic64_t *head, int cycle, int space)
591 {
592 	atomic64_set(head, xlog_assign_grant_head_val(cycle, space));
593 }
594 
595 /*
596  * Committed Item List interfaces
597  */
598 int	xlog_cil_init(struct xlog *log);
599 void	xlog_cil_init_post_recovery(struct xlog *log);
600 void	xlog_cil_destroy(struct xlog *log);
601 bool	xlog_cil_empty(struct xlog *log);
602 void	xlog_cil_commit(struct xlog *log, struct xfs_trans *tp,
603 			xfs_csn_t *commit_seq, bool regrant);
604 void	xlog_cil_set_ctx_write_state(struct xfs_cil_ctx *ctx,
605 			struct xlog_in_core *iclog);
606 
607 
608 /*
609  * CIL force routines
610  */
611 void xlog_cil_flush(struct xlog *log);
612 xfs_lsn_t xlog_cil_force_seq(struct xlog *log, xfs_csn_t sequence);
613 
614 static inline void
615 xlog_cil_force(struct xlog *log)
616 {
617 	xlog_cil_force_seq(log, log->l_cilp->xc_current_sequence);
618 }
619 
620 /*
621  * Wrapper function for waiting on a wait queue serialised against wakeups
622  * by a spinlock. This matches the semantics of all the wait queues used in the
623  * log code.
624  */
625 static inline void
626 xlog_wait(
627 	struct wait_queue_head	*wq,
628 	struct spinlock		*lock)
629 		__releases(lock)
630 {
631 	DECLARE_WAITQUEUE(wait, current);
632 
633 	add_wait_queue_exclusive(wq, &wait);
634 	__set_current_state(TASK_UNINTERRUPTIBLE);
635 	spin_unlock(lock);
636 	schedule();
637 	remove_wait_queue(wq, &wait);
638 }
639 
640 int xlog_wait_on_iclog(struct xlog_in_core *iclog);
641 
642 /*
643  * The LSN is valid so long as it is behind the current LSN. If it isn't, this
644  * means that the next log record that includes this metadata could have a
645  * smaller LSN. In turn, this means that the modification in the log would not
646  * replay.
647  */
648 static inline bool
649 xlog_valid_lsn(
650 	struct xlog	*log,
651 	xfs_lsn_t	lsn)
652 {
653 	int		cur_cycle;
654 	int		cur_block;
655 	bool		valid = true;
656 
657 	/*
658 	 * First, sample the current lsn without locking to avoid added
659 	 * contention from metadata I/O. The current cycle and block are updated
660 	 * (in xlog_state_switch_iclogs()) and read here in a particular order
661 	 * to avoid false negatives (e.g., thinking the metadata LSN is valid
662 	 * when it is not).
663 	 *
664 	 * The current block is always rewound before the cycle is bumped in
665 	 * xlog_state_switch_iclogs() to ensure the current LSN is never seen in
666 	 * a transiently forward state. Instead, we can see the LSN in a
667 	 * transiently behind state if we happen to race with a cycle wrap.
668 	 */
669 	cur_cycle = READ_ONCE(log->l_curr_cycle);
670 	smp_rmb();
671 	cur_block = READ_ONCE(log->l_curr_block);
672 
673 	if ((CYCLE_LSN(lsn) > cur_cycle) ||
674 	    (CYCLE_LSN(lsn) == cur_cycle && BLOCK_LSN(lsn) > cur_block)) {
675 		/*
676 		 * If the metadata LSN appears invalid, it's possible the check
677 		 * above raced with a wrap to the next log cycle. Grab the lock
678 		 * to check for sure.
679 		 */
680 		spin_lock(&log->l_icloglock);
681 		cur_cycle = log->l_curr_cycle;
682 		cur_block = log->l_curr_block;
683 		spin_unlock(&log->l_icloglock);
684 
685 		if ((CYCLE_LSN(lsn) > cur_cycle) ||
686 		    (CYCLE_LSN(lsn) == cur_cycle && BLOCK_LSN(lsn) > cur_block))
687 			valid = false;
688 	}
689 
690 	return valid;
691 }
692 
693 #endif	/* __XFS_LOG_PRIV_H__ */
694