xref: /openbmc/linux/fs/xfs/xfs_log_priv.h (revision 239480ab)
1 /*
2  * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #ifndef	__XFS_LOG_PRIV_H__
19 #define __XFS_LOG_PRIV_H__
20 
21 struct xfs_buf;
22 struct xlog;
23 struct xlog_ticket;
24 struct xfs_mount;
25 struct xfs_log_callback;
26 
27 /*
28  * Flags for log structure
29  */
30 #define XLOG_ACTIVE_RECOVERY	0x2	/* in the middle of recovery */
31 #define	XLOG_RECOVERY_NEEDED	0x4	/* log was recovered */
32 #define XLOG_IO_ERROR		0x8	/* log hit an I/O error, and being
33 					   shutdown */
34 #define XLOG_TAIL_WARN		0x10	/* log tail verify warning issued */
35 
36 /*
37  * get client id from packed copy.
38  *
39  * this hack is here because the xlog_pack code copies four bytes
40  * of xlog_op_header containing the fields oh_clientid, oh_flags
41  * and oh_res2 into the packed copy.
42  *
43  * later on this four byte chunk is treated as an int and the
44  * client id is pulled out.
45  *
46  * this has endian issues, of course.
47  */
48 static inline uint xlog_get_client_id(__be32 i)
49 {
50 	return be32_to_cpu(i) >> 24;
51 }
52 
53 /*
54  * In core log state
55  */
56 #define XLOG_STATE_ACTIVE    0x0001 /* Current IC log being written to */
57 #define XLOG_STATE_WANT_SYNC 0x0002 /* Want to sync this iclog; no more writes */
58 #define XLOG_STATE_SYNCING   0x0004 /* This IC log is syncing */
59 #define XLOG_STATE_DONE_SYNC 0x0008 /* Done syncing to disk */
60 #define XLOG_STATE_DO_CALLBACK \
61 			     0x0010 /* Process callback functions */
62 #define XLOG_STATE_CALLBACK  0x0020 /* Callback functions now */
63 #define XLOG_STATE_DIRTY     0x0040 /* Dirty IC log, not ready for ACTIVE status*/
64 #define XLOG_STATE_IOERROR   0x0080 /* IO error happened in sync'ing log */
65 #define XLOG_STATE_IOABORT   0x0100 /* force abort on I/O completion (debug) */
66 #define XLOG_STATE_ALL	     0x7FFF /* All possible valid flags */
67 #define XLOG_STATE_NOTUSED   0x8000 /* This IC log not being used */
68 
69 /*
70  * Flags to log ticket
71  */
72 #define XLOG_TIC_INITED		0x1	/* has been initialized */
73 #define XLOG_TIC_PERM_RESERV	0x2	/* permanent reservation */
74 
75 #define XLOG_TIC_FLAGS \
76 	{ XLOG_TIC_INITED,	"XLOG_TIC_INITED" }, \
77 	{ XLOG_TIC_PERM_RESERV,	"XLOG_TIC_PERM_RESERV" }
78 
79 /*
80  * Below are states for covering allocation transactions.
81  * By covering, we mean changing the h_tail_lsn in the last on-disk
82  * log write such that no allocation transactions will be re-done during
83  * recovery after a system crash. Recovery starts at the last on-disk
84  * log write.
85  *
86  * These states are used to insert dummy log entries to cover
87  * space allocation transactions which can undo non-transactional changes
88  * after a crash. Writes to a file with space
89  * already allocated do not result in any transactions. Allocations
90  * might include space beyond the EOF. So if we just push the EOF a
91  * little, the last transaction for the file could contain the wrong
92  * size. If there is no file system activity, after an allocation
93  * transaction, and the system crashes, the allocation transaction
94  * will get replayed and the file will be truncated. This could
95  * be hours/days/... after the allocation occurred.
96  *
97  * The fix for this is to do two dummy transactions when the
98  * system is idle. We need two dummy transaction because the h_tail_lsn
99  * in the log record header needs to point beyond the last possible
100  * non-dummy transaction. The first dummy changes the h_tail_lsn to
101  * the first transaction before the dummy. The second dummy causes
102  * h_tail_lsn to point to the first dummy. Recovery starts at h_tail_lsn.
103  *
104  * These dummy transactions get committed when everything
105  * is idle (after there has been some activity).
106  *
107  * There are 5 states used to control this.
108  *
109  *  IDLE -- no logging has been done on the file system or
110  *		we are done covering previous transactions.
111  *  NEED -- logging has occurred and we need a dummy transaction
112  *		when the log becomes idle.
113  *  DONE -- we were in the NEED state and have committed a dummy
114  *		transaction.
115  *  NEED2 -- we detected that a dummy transaction has gone to the
116  *		on disk log with no other transactions.
117  *  DONE2 -- we committed a dummy transaction when in the NEED2 state.
118  *
119  * There are two places where we switch states:
120  *
121  * 1.) In xfs_sync, when we detect an idle log and are in NEED or NEED2.
122  *	We commit the dummy transaction and switch to DONE or DONE2,
123  *	respectively. In all other states, we don't do anything.
124  *
125  * 2.) When we finish writing the on-disk log (xlog_state_clean_log).
126  *
127  *	No matter what state we are in, if this isn't the dummy
128  *	transaction going out, the next state is NEED.
129  *	So, if we aren't in the DONE or DONE2 states, the next state
130  *	is NEED. We can't be finishing a write of the dummy record
131  *	unless it was committed and the state switched to DONE or DONE2.
132  *
133  *	If we are in the DONE state and this was a write of the
134  *		dummy transaction, we move to NEED2.
135  *
136  *	If we are in the DONE2 state and this was a write of the
137  *		dummy transaction, we move to IDLE.
138  *
139  *
140  * Writing only one dummy transaction can get appended to
141  * one file space allocation. When this happens, the log recovery
142  * code replays the space allocation and a file could be truncated.
143  * This is why we have the NEED2 and DONE2 states before going idle.
144  */
145 
146 #define XLOG_STATE_COVER_IDLE	0
147 #define XLOG_STATE_COVER_NEED	1
148 #define XLOG_STATE_COVER_DONE	2
149 #define XLOG_STATE_COVER_NEED2	3
150 #define XLOG_STATE_COVER_DONE2	4
151 
152 #define XLOG_COVER_OPS		5
153 
154 /* Ticket reservation region accounting */
155 #define XLOG_TIC_LEN_MAX	15
156 
157 /*
158  * Reservation region
159  * As would be stored in xfs_log_iovec but without the i_addr which
160  * we don't care about.
161  */
162 typedef struct xlog_res {
163 	uint	r_len;	/* region length		:4 */
164 	uint	r_type;	/* region's transaction type	:4 */
165 } xlog_res_t;
166 
167 typedef struct xlog_ticket {
168 	struct list_head   t_queue;	 /* reserve/write queue */
169 	struct task_struct *t_task;	 /* task that owns this ticket */
170 	xlog_tid_t	   t_tid;	 /* transaction identifier	 : 4  */
171 	atomic_t	   t_ref;	 /* ticket reference count       : 4  */
172 	int		   t_curr_res;	 /* current reservation in bytes : 4  */
173 	int		   t_unit_res;	 /* unit reservation in bytes    : 4  */
174 	char		   t_ocnt;	 /* original count		 : 1  */
175 	char		   t_cnt;	 /* current count		 : 1  */
176 	char		   t_clientid;	 /* who does this belong to;	 : 1  */
177 	char		   t_flags;	 /* properties of reservation	 : 1  */
178 
179         /* reservation array fields */
180 	uint		   t_res_num;                    /* num in array : 4 */
181 	uint		   t_res_num_ophdrs;		 /* num op hdrs  : 4 */
182 	uint		   t_res_arr_sum;		 /* array sum    : 4 */
183 	uint		   t_res_o_flow;		 /* sum overflow : 4 */
184 	xlog_res_t	   t_res_arr[XLOG_TIC_LEN_MAX];  /* array of res : 8 * 15 */
185 } xlog_ticket_t;
186 
187 /*
188  * - A log record header is 512 bytes.  There is plenty of room to grow the
189  *	xlog_rec_header_t into the reserved space.
190  * - ic_data follows, so a write to disk can start at the beginning of
191  *	the iclog.
192  * - ic_forcewait is used to implement synchronous forcing of the iclog to disk.
193  * - ic_next is the pointer to the next iclog in the ring.
194  * - ic_bp is a pointer to the buffer used to write this incore log to disk.
195  * - ic_log is a pointer back to the global log structure.
196  * - ic_callback is a linked list of callback function/argument pairs to be
197  *	called after an iclog finishes writing.
198  * - ic_size is the full size of the header plus data.
199  * - ic_offset is the current number of bytes written to in this iclog.
200  * - ic_refcnt is bumped when someone is writing to the log.
201  * - ic_state is the state of the iclog.
202  *
203  * Because of cacheline contention on large machines, we need to separate
204  * various resources onto different cachelines. To start with, make the
205  * structure cacheline aligned. The following fields can be contended on
206  * by independent processes:
207  *
208  *	- ic_callback_*
209  *	- ic_refcnt
210  *	- fields protected by the global l_icloglock
211  *
212  * so we need to ensure that these fields are located in separate cachelines.
213  * We'll put all the read-only and l_icloglock fields in the first cacheline,
214  * and move everything else out to subsequent cachelines.
215  */
216 typedef struct xlog_in_core {
217 	wait_queue_head_t	ic_force_wait;
218 	wait_queue_head_t	ic_write_wait;
219 	struct xlog_in_core	*ic_next;
220 	struct xlog_in_core	*ic_prev;
221 	struct xfs_buf		*ic_bp;
222 	struct xlog		*ic_log;
223 	int			ic_size;
224 	int			ic_offset;
225 	int			ic_bwritecnt;
226 	unsigned short		ic_state;
227 	char			*ic_datap;	/* pointer to iclog data */
228 
229 	/* Callback structures need their own cacheline */
230 	spinlock_t		ic_callback_lock ____cacheline_aligned_in_smp;
231 	struct xfs_log_callback	*ic_callback;
232 	struct xfs_log_callback	**ic_callback_tail;
233 
234 	/* reference counts need their own cacheline */
235 	atomic_t		ic_refcnt ____cacheline_aligned_in_smp;
236 	xlog_in_core_2_t	*ic_data;
237 #define ic_header	ic_data->hic_header
238 } xlog_in_core_t;
239 
240 /*
241  * The CIL context is used to aggregate per-transaction details as well be
242  * passed to the iclog for checkpoint post-commit processing.  After being
243  * passed to the iclog, another context needs to be allocated for tracking the
244  * next set of transactions to be aggregated into a checkpoint.
245  */
246 struct xfs_cil;
247 
248 struct xfs_cil_ctx {
249 	struct xfs_cil		*cil;
250 	xfs_lsn_t		sequence;	/* chkpt sequence # */
251 	xfs_lsn_t		start_lsn;	/* first LSN of chkpt commit */
252 	xfs_lsn_t		commit_lsn;	/* chkpt commit record lsn */
253 	struct xlog_ticket	*ticket;	/* chkpt ticket */
254 	int			nvecs;		/* number of regions */
255 	int			space_used;	/* aggregate size of regions */
256 	struct list_head	busy_extents;	/* busy extents in chkpt */
257 	struct xfs_log_vec	*lv_chain;	/* logvecs being pushed */
258 	struct xfs_log_callback	log_cb;		/* completion callback hook. */
259 	struct list_head	committing;	/* ctx committing list */
260 	struct work_struct	discard_endio_work;
261 };
262 
263 /*
264  * Committed Item List structure
265  *
266  * This structure is used to track log items that have been committed but not
267  * yet written into the log. It is used only when the delayed logging mount
268  * option is enabled.
269  *
270  * This structure tracks the list of committing checkpoint contexts so
271  * we can avoid the problem of having to hold out new transactions during a
272  * flush until we have a the commit record LSN of the checkpoint. We can
273  * traverse the list of committing contexts in xlog_cil_push_lsn() to find a
274  * sequence match and extract the commit LSN directly from there. If the
275  * checkpoint is still in the process of committing, we can block waiting for
276  * the commit LSN to be determined as well. This should make synchronous
277  * operations almost as efficient as the old logging methods.
278  */
279 struct xfs_cil {
280 	struct xlog		*xc_log;
281 	struct list_head	xc_cil;
282 	spinlock_t		xc_cil_lock;
283 
284 	struct rw_semaphore	xc_ctx_lock ____cacheline_aligned_in_smp;
285 	struct xfs_cil_ctx	*xc_ctx;
286 
287 	spinlock_t		xc_push_lock ____cacheline_aligned_in_smp;
288 	xfs_lsn_t		xc_push_seq;
289 	struct list_head	xc_committing;
290 	wait_queue_head_t	xc_commit_wait;
291 	xfs_lsn_t		xc_current_sequence;
292 	struct work_struct	xc_push_work;
293 } ____cacheline_aligned_in_smp;
294 
295 /*
296  * The amount of log space we allow the CIL to aggregate is difficult to size.
297  * Whatever we choose, we have to make sure we can get a reservation for the
298  * log space effectively, that it is large enough to capture sufficient
299  * relogging to reduce log buffer IO significantly, but it is not too large for
300  * the log or induces too much latency when writing out through the iclogs. We
301  * track both space consumed and the number of vectors in the checkpoint
302  * context, so we need to decide which to use for limiting.
303  *
304  * Every log buffer we write out during a push needs a header reserved, which
305  * is at least one sector and more for v2 logs. Hence we need a reservation of
306  * at least 512 bytes per 32k of log space just for the LR headers. That means
307  * 16KB of reservation per megabyte of delayed logging space we will consume,
308  * plus various headers.  The number of headers will vary based on the num of
309  * io vectors, so limiting on a specific number of vectors is going to result
310  * in transactions of varying size. IOWs, it is more consistent to track and
311  * limit space consumed in the log rather than by the number of objects being
312  * logged in order to prevent checkpoint ticket overruns.
313  *
314  * Further, use of static reservations through the log grant mechanism is
315  * problematic. It introduces a lot of complexity (e.g. reserve grant vs write
316  * grant) and a significant deadlock potential because regranting write space
317  * can block on log pushes. Hence if we have to regrant log space during a log
318  * push, we can deadlock.
319  *
320  * However, we can avoid this by use of a dynamic "reservation stealing"
321  * technique during transaction commit whereby unused reservation space in the
322  * transaction ticket is transferred to the CIL ctx commit ticket to cover the
323  * space needed by the checkpoint transaction. This means that we never need to
324  * specifically reserve space for the CIL checkpoint transaction, nor do we
325  * need to regrant space once the checkpoint completes. This also means the
326  * checkpoint transaction ticket is specific to the checkpoint context, rather
327  * than the CIL itself.
328  *
329  * With dynamic reservations, we can effectively make up arbitrary limits for
330  * the checkpoint size so long as they don't violate any other size rules.
331  * Recovery imposes a rule that no transaction exceed half the log, so we are
332  * limited by that.  Furthermore, the log transaction reservation subsystem
333  * tries to keep 25% of the log free, so we need to keep below that limit or we
334  * risk running out of free log space to start any new transactions.
335  *
336  * In order to keep background CIL push efficient, we will set a lower
337  * threshold at which background pushing is attempted without blocking current
338  * transaction commits.  A separate, higher bound defines when CIL pushes are
339  * enforced to ensure we stay within our maximum checkpoint size bounds.
340  * threshold, yet give us plenty of space for aggregation on large logs.
341  */
342 #define XLOG_CIL_SPACE_LIMIT(log)	(log->l_logsize >> 3)
343 
344 /*
345  * ticket grant locks, queues and accounting have their own cachlines
346  * as these are quite hot and can be operated on concurrently.
347  */
348 struct xlog_grant_head {
349 	spinlock_t		lock ____cacheline_aligned_in_smp;
350 	struct list_head	waiters;
351 	atomic64_t		grant;
352 };
353 
354 /*
355  * The reservation head lsn is not made up of a cycle number and block number.
356  * Instead, it uses a cycle number and byte number.  Logs don't expect to
357  * overflow 31 bits worth of byte offset, so using a byte number will mean
358  * that round off problems won't occur when releasing partial reservations.
359  */
360 struct xlog {
361 	/* The following fields don't need locking */
362 	struct xfs_mount	*l_mp;	        /* mount point */
363 	struct xfs_ail		*l_ailp;	/* AIL log is working with */
364 	struct xfs_cil		*l_cilp;	/* CIL log is working with */
365 	struct xfs_buf		*l_xbuf;        /* extra buffer for log
366 						 * wrapping */
367 	struct xfs_buftarg	*l_targ;        /* buftarg of log */
368 	struct delayed_work	l_work;		/* background flush work */
369 	uint			l_flags;
370 	uint			l_quotaoffs_flag; /* XFS_DQ_*, for QUOTAOFFs */
371 	struct list_head	*l_buf_cancel_table;
372 	int			l_iclog_hsize;  /* size of iclog header */
373 	int			l_iclog_heads;  /* # of iclog header sectors */
374 	uint			l_sectBBsize;   /* sector size in BBs (2^n) */
375 	int			l_iclog_size;	/* size of log in bytes */
376 	int			l_iclog_size_log; /* log power size of log */
377 	int			l_iclog_bufs;	/* number of iclog buffers */
378 	xfs_daddr_t		l_logBBstart;   /* start block of log */
379 	int			l_logsize;      /* size of log in bytes */
380 	int			l_logBBsize;    /* size of log in BB chunks */
381 
382 	/* The following block of fields are changed while holding icloglock */
383 	wait_queue_head_t	l_flush_wait ____cacheline_aligned_in_smp;
384 						/* waiting for iclog flush */
385 	int			l_covered_state;/* state of "covering disk
386 						 * log entries" */
387 	xlog_in_core_t		*l_iclog;       /* head log queue	*/
388 	spinlock_t		l_icloglock;    /* grab to change iclog state */
389 	int			l_curr_cycle;   /* Cycle number of log writes */
390 	int			l_prev_cycle;   /* Cycle number before last
391 						 * block increment */
392 	int			l_curr_block;   /* current logical log block */
393 	int			l_prev_block;   /* previous logical log block */
394 
395 	/*
396 	 * l_last_sync_lsn and l_tail_lsn are atomics so they can be set and
397 	 * read without needing to hold specific locks. To avoid operations
398 	 * contending with other hot objects, place each of them on a separate
399 	 * cacheline.
400 	 */
401 	/* lsn of last LR on disk */
402 	atomic64_t		l_last_sync_lsn ____cacheline_aligned_in_smp;
403 	/* lsn of 1st LR with unflushed * buffers */
404 	atomic64_t		l_tail_lsn ____cacheline_aligned_in_smp;
405 
406 	struct xlog_grant_head	l_reserve_head;
407 	struct xlog_grant_head	l_write_head;
408 
409 	struct xfs_kobj		l_kobj;
410 
411 	/* The following field are used for debugging; need to hold icloglock */
412 #ifdef DEBUG
413 	void			*l_iclog_bak[XLOG_MAX_ICLOGS];
414 	/* log record crc error injection factor */
415 	uint32_t		l_badcrc_factor;
416 #endif
417 	/* log recovery lsn tracking (for buffer submission */
418 	xfs_lsn_t		l_recovery_lsn;
419 };
420 
421 #define XLOG_BUF_CANCEL_BUCKET(log, blkno) \
422 	((log)->l_buf_cancel_table + ((__uint64_t)blkno % XLOG_BC_TABLE_SIZE))
423 
424 #define XLOG_FORCED_SHUTDOWN(log)	((log)->l_flags & XLOG_IO_ERROR)
425 
426 /* common routines */
427 extern int
428 xlog_recover(
429 	struct xlog		*log);
430 extern int
431 xlog_recover_finish(
432 	struct xlog		*log);
433 extern int
434 xlog_recover_cancel(struct xlog *);
435 
436 extern __le32	 xlog_cksum(struct xlog *log, struct xlog_rec_header *rhead,
437 			    char *dp, int size);
438 
439 extern kmem_zone_t *xfs_log_ticket_zone;
440 struct xlog_ticket *
441 xlog_ticket_alloc(
442 	struct xlog	*log,
443 	int		unit_bytes,
444 	int		count,
445 	char		client,
446 	bool		permanent,
447 	xfs_km_flags_t	alloc_flags);
448 
449 
450 static inline void
451 xlog_write_adv_cnt(void **ptr, int *len, int *off, size_t bytes)
452 {
453 	*ptr += bytes;
454 	*len -= bytes;
455 	*off += bytes;
456 }
457 
458 void	xlog_print_tic_res(struct xfs_mount *mp, struct xlog_ticket *ticket);
459 int
460 xlog_write(
461 	struct xlog		*log,
462 	struct xfs_log_vec	*log_vector,
463 	struct xlog_ticket	*tic,
464 	xfs_lsn_t		*start_lsn,
465 	struct xlog_in_core	**commit_iclog,
466 	uint			flags);
467 
468 /*
469  * When we crack an atomic LSN, we sample it first so that the value will not
470  * change while we are cracking it into the component values. This means we
471  * will always get consistent component values to work from. This should always
472  * be used to sample and crack LSNs that are stored and updated in atomic
473  * variables.
474  */
475 static inline void
476 xlog_crack_atomic_lsn(atomic64_t *lsn, uint *cycle, uint *block)
477 {
478 	xfs_lsn_t val = atomic64_read(lsn);
479 
480 	*cycle = CYCLE_LSN(val);
481 	*block = BLOCK_LSN(val);
482 }
483 
484 /*
485  * Calculate and assign a value to an atomic LSN variable from component pieces.
486  */
487 static inline void
488 xlog_assign_atomic_lsn(atomic64_t *lsn, uint cycle, uint block)
489 {
490 	atomic64_set(lsn, xlog_assign_lsn(cycle, block));
491 }
492 
493 /*
494  * When we crack the grant head, we sample it first so that the value will not
495  * change while we are cracking it into the component values. This means we
496  * will always get consistent component values to work from.
497  */
498 static inline void
499 xlog_crack_grant_head_val(int64_t val, int *cycle, int *space)
500 {
501 	*cycle = val >> 32;
502 	*space = val & 0xffffffff;
503 }
504 
505 static inline void
506 xlog_crack_grant_head(atomic64_t *head, int *cycle, int *space)
507 {
508 	xlog_crack_grant_head_val(atomic64_read(head), cycle, space);
509 }
510 
511 static inline int64_t
512 xlog_assign_grant_head_val(int cycle, int space)
513 {
514 	return ((int64_t)cycle << 32) | space;
515 }
516 
517 static inline void
518 xlog_assign_grant_head(atomic64_t *head, int cycle, int space)
519 {
520 	atomic64_set(head, xlog_assign_grant_head_val(cycle, space));
521 }
522 
523 /*
524  * Committed Item List interfaces
525  */
526 int	xlog_cil_init(struct xlog *log);
527 void	xlog_cil_init_post_recovery(struct xlog *log);
528 void	xlog_cil_destroy(struct xlog *log);
529 bool	xlog_cil_empty(struct xlog *log);
530 
531 /*
532  * CIL force routines
533  */
534 xfs_lsn_t
535 xlog_cil_force_lsn(
536 	struct xlog *log,
537 	xfs_lsn_t sequence);
538 
539 static inline void
540 xlog_cil_force(struct xlog *log)
541 {
542 	xlog_cil_force_lsn(log, log->l_cilp->xc_current_sequence);
543 }
544 
545 /*
546  * Unmount record type is used as a pseudo transaction type for the ticket.
547  * It's value must be outside the range of XFS_TRANS_* values.
548  */
549 #define XLOG_UNMOUNT_REC_TYPE	(-1U)
550 
551 /*
552  * Wrapper function for waiting on a wait queue serialised against wakeups
553  * by a spinlock. This matches the semantics of all the wait queues used in the
554  * log code.
555  */
556 static inline void xlog_wait(wait_queue_head_t *wq, spinlock_t *lock)
557 {
558 	DECLARE_WAITQUEUE(wait, current);
559 
560 	add_wait_queue_exclusive(wq, &wait);
561 	__set_current_state(TASK_UNINTERRUPTIBLE);
562 	spin_unlock(lock);
563 	schedule();
564 	remove_wait_queue(wq, &wait);
565 }
566 
567 /*
568  * The LSN is valid so long as it is behind the current LSN. If it isn't, this
569  * means that the next log record that includes this metadata could have a
570  * smaller LSN. In turn, this means that the modification in the log would not
571  * replay.
572  */
573 static inline bool
574 xlog_valid_lsn(
575 	struct xlog	*log,
576 	xfs_lsn_t	lsn)
577 {
578 	int		cur_cycle;
579 	int		cur_block;
580 	bool		valid = true;
581 
582 	/*
583 	 * First, sample the current lsn without locking to avoid added
584 	 * contention from metadata I/O. The current cycle and block are updated
585 	 * (in xlog_state_switch_iclogs()) and read here in a particular order
586 	 * to avoid false negatives (e.g., thinking the metadata LSN is valid
587 	 * when it is not).
588 	 *
589 	 * The current block is always rewound before the cycle is bumped in
590 	 * xlog_state_switch_iclogs() to ensure the current LSN is never seen in
591 	 * a transiently forward state. Instead, we can see the LSN in a
592 	 * transiently behind state if we happen to race with a cycle wrap.
593 	 */
594 	cur_cycle = ACCESS_ONCE(log->l_curr_cycle);
595 	smp_rmb();
596 	cur_block = ACCESS_ONCE(log->l_curr_block);
597 
598 	if ((CYCLE_LSN(lsn) > cur_cycle) ||
599 	    (CYCLE_LSN(lsn) == cur_cycle && BLOCK_LSN(lsn) > cur_block)) {
600 		/*
601 		 * If the metadata LSN appears invalid, it's possible the check
602 		 * above raced with a wrap to the next log cycle. Grab the lock
603 		 * to check for sure.
604 		 */
605 		spin_lock(&log->l_icloglock);
606 		cur_cycle = log->l_curr_cycle;
607 		cur_block = log->l_curr_block;
608 		spin_unlock(&log->l_icloglock);
609 
610 		if ((CYCLE_LSN(lsn) > cur_cycle) ||
611 		    (CYCLE_LSN(lsn) == cur_cycle && BLOCK_LSN(lsn) > cur_block))
612 			valid = false;
613 	}
614 
615 	return valid;
616 }
617 
618 #endif	/* __XFS_LOG_PRIV_H__ */
619