xref: /openbmc/linux/fs/xfs/xfs_trans_ail.c (revision a1e58bbd)
1 /*
2  * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_types.h"
21 #include "xfs_log.h"
22 #include "xfs_inum.h"
23 #include "xfs_trans.h"
24 #include "xfs_sb.h"
25 #include "xfs_ag.h"
26 #include "xfs_dmapi.h"
27 #include "xfs_mount.h"
28 #include "xfs_trans_priv.h"
29 #include "xfs_error.h"
30 
31 STATIC void xfs_ail_insert(xfs_ail_entry_t *, xfs_log_item_t *);
32 STATIC xfs_log_item_t * xfs_ail_delete(xfs_ail_entry_t *, xfs_log_item_t *);
33 STATIC xfs_log_item_t * xfs_ail_min(xfs_ail_entry_t *);
34 STATIC xfs_log_item_t * xfs_ail_next(xfs_ail_entry_t *, xfs_log_item_t *);
35 
36 #ifdef DEBUG
37 STATIC void xfs_ail_check(xfs_ail_entry_t *, xfs_log_item_t *);
38 #else
39 #define	xfs_ail_check(a,l)
40 #endif /* DEBUG */
41 
42 
43 /*
44  * This is called by the log manager code to determine the LSN
45  * of the tail of the log.  This is exactly the LSN of the first
46  * item in the AIL.  If the AIL is empty, then this function
47  * returns 0.
48  *
49  * We need the AIL lock in order to get a coherent read of the
50  * lsn of the last item in the AIL.
51  */
52 xfs_lsn_t
53 xfs_trans_tail_ail(
54 	xfs_mount_t	*mp)
55 {
56 	xfs_lsn_t	lsn;
57 	xfs_log_item_t	*lip;
58 
59 	spin_lock(&mp->m_ail_lock);
60 	lip = xfs_ail_min(&(mp->m_ail.xa_ail));
61 	if (lip == NULL) {
62 		lsn = (xfs_lsn_t)0;
63 	} else {
64 		lsn = lip->li_lsn;
65 	}
66 	spin_unlock(&mp->m_ail_lock);
67 
68 	return lsn;
69 }
70 
71 /*
72  * xfs_trans_push_ail
73  *
74  * This routine is called to move the tail of the AIL forward.  It does this by
75  * trying to flush items in the AIL whose lsns are below the given
76  * threshold_lsn.
77  *
78  * the push is run asynchronously in a separate thread, so we return the tail
79  * of the log right now instead of the tail after the push. This means we will
80  * either continue right away, or we will sleep waiting on the async thread to
81  * do it's work.
82  *
83  * We do this unlocked - we only need to know whether there is anything in the
84  * AIL at the time we are called. We don't need to access the contents of
85  * any of the objects, so the lock is not needed.
86  */
87 void
88 xfs_trans_push_ail(
89 	xfs_mount_t		*mp,
90 	xfs_lsn_t		threshold_lsn)
91 {
92 	xfs_log_item_t		*lip;
93 
94 	lip = xfs_ail_min(&mp->m_ail.xa_ail);
95 	if (lip && !XFS_FORCED_SHUTDOWN(mp)) {
96 		if (XFS_LSN_CMP(threshold_lsn, mp->m_ail.xa_target) > 0)
97 			xfsaild_wakeup(mp, threshold_lsn);
98 	}
99 }
100 
101 /*
102  * Return the item in the AIL with the current lsn.
103  * Return the current tree generation number for use
104  * in calls to xfs_trans_next_ail().
105  */
106 STATIC xfs_log_item_t *
107 xfs_trans_first_push_ail(
108 	xfs_mount_t	*mp,
109 	int		*gen,
110 	xfs_lsn_t	lsn)
111 {
112 	xfs_log_item_t	*lip;
113 
114 	lip = xfs_ail_min(&(mp->m_ail.xa_ail));
115 	*gen = (int)mp->m_ail.xa_gen;
116 	if (lsn == 0)
117 		return lip;
118 
119 	while (lip && (XFS_LSN_CMP(lip->li_lsn, lsn) < 0))
120 		lip = lip->li_ail.ail_forw;
121 
122 	return lip;
123 }
124 
125 /*
126  * Function that does the work of pushing on the AIL
127  */
128 long
129 xfsaild_push(
130 	xfs_mount_t	*mp,
131 	xfs_lsn_t	*last_lsn)
132 {
133 	long		tout = 1000; /* milliseconds */
134 	xfs_lsn_t	last_pushed_lsn = *last_lsn;
135 	xfs_lsn_t	target =  mp->m_ail.xa_target;
136 	xfs_lsn_t	lsn;
137 	xfs_log_item_t	*lip;
138 	int		gen;
139 	int		restarts;
140 	int		flush_log, count, stuck;
141 
142 #define	XFS_TRANS_PUSH_AIL_RESTARTS	10
143 
144 	spin_lock(&mp->m_ail_lock);
145 	lip = xfs_trans_first_push_ail(mp, &gen, *last_lsn);
146 	if (!lip || XFS_FORCED_SHUTDOWN(mp)) {
147 		/*
148 		 * AIL is empty or our push has reached the end.
149 		 */
150 		spin_unlock(&mp->m_ail_lock);
151 		last_pushed_lsn = 0;
152 		goto out;
153 	}
154 
155 	XFS_STATS_INC(xs_push_ail);
156 
157 	/*
158 	 * While the item we are looking at is below the given threshold
159 	 * try to flush it out. We'd like not to stop until we've at least
160 	 * tried to push on everything in the AIL with an LSN less than
161 	 * the given threshold.
162 	 *
163 	 * However, we will stop after a certain number of pushes and wait
164 	 * for a reduced timeout to fire before pushing further. This
165 	 * prevents use from spinning when we can't do anything or there is
166 	 * lots of contention on the AIL lists.
167 	 */
168 	tout = 10;
169 	lsn = lip->li_lsn;
170 	flush_log = stuck = count = restarts = 0;
171 	while ((XFS_LSN_CMP(lip->li_lsn, target) < 0)) {
172 		int	lock_result;
173 		/*
174 		 * If we can lock the item without sleeping, unlock the AIL
175 		 * lock and flush the item.  Then re-grab the AIL lock so we
176 		 * can look for the next item on the AIL. List changes are
177 		 * handled by the AIL lookup functions internally
178 		 *
179 		 * If we can't lock the item, either its holder will flush it
180 		 * or it is already being flushed or it is being relogged.  In
181 		 * any of these case it is being taken care of and we can just
182 		 * skip to the next item in the list.
183 		 */
184 		lock_result = IOP_TRYLOCK(lip);
185 		spin_unlock(&mp->m_ail_lock);
186 		switch (lock_result) {
187 		case XFS_ITEM_SUCCESS:
188 			XFS_STATS_INC(xs_push_ail_success);
189 			IOP_PUSH(lip);
190 			last_pushed_lsn = lsn;
191 			break;
192 
193 		case XFS_ITEM_PUSHBUF:
194 			XFS_STATS_INC(xs_push_ail_pushbuf);
195 			IOP_PUSHBUF(lip);
196 			last_pushed_lsn = lsn;
197 			break;
198 
199 		case XFS_ITEM_PINNED:
200 			XFS_STATS_INC(xs_push_ail_pinned);
201 			stuck++;
202 			flush_log = 1;
203 			break;
204 
205 		case XFS_ITEM_LOCKED:
206 			XFS_STATS_INC(xs_push_ail_locked);
207 			last_pushed_lsn = lsn;
208 			stuck++;
209 			break;
210 
211 		case XFS_ITEM_FLUSHING:
212 			XFS_STATS_INC(xs_push_ail_flushing);
213 			last_pushed_lsn = lsn;
214 			stuck++;
215 			break;
216 
217 		default:
218 			ASSERT(0);
219 			break;
220 		}
221 
222 		spin_lock(&mp->m_ail_lock);
223 		/* should we bother continuing? */
224 		if (XFS_FORCED_SHUTDOWN(mp))
225 			break;
226 		ASSERT(mp->m_log);
227 
228 		count++;
229 
230 		/*
231 		 * Are there too many items we can't do anything with?
232 		 * If we we are skipping too many items because we can't flush
233 		 * them or they are already being flushed, we back off and
234 		 * given them time to complete whatever operation is being
235 		 * done. i.e. remove pressure from the AIL while we can't make
236 		 * progress so traversals don't slow down further inserts and
237 		 * removals to/from the AIL.
238 		 *
239 		 * The value of 100 is an arbitrary magic number based on
240 		 * observation.
241 		 */
242 		if (stuck > 100)
243 			break;
244 
245 		lip = xfs_trans_next_ail(mp, lip, &gen, &restarts);
246 		if (lip == NULL)
247 			break;
248 		if (restarts > XFS_TRANS_PUSH_AIL_RESTARTS)
249 			break;
250 		lsn = lip->li_lsn;
251 	}
252 	spin_unlock(&mp->m_ail_lock);
253 
254 	if (flush_log) {
255 		/*
256 		 * If something we need to push out was pinned, then
257 		 * push out the log so it will become unpinned and
258 		 * move forward in the AIL.
259 		 */
260 		XFS_STATS_INC(xs_push_ail_flush);
261 		xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE);
262 	}
263 
264 	if (!count) {
265 		/* We're past our target or empty, so idle */
266 		tout = 1000;
267 	} else if (XFS_LSN_CMP(lsn, target) >= 0) {
268 		/*
269 		 * We reached the target so wait a bit longer for I/O to
270 		 * complete and remove pushed items from the AIL before we
271 		 * start the next scan from the start of the AIL.
272 		 */
273 		tout += 20;
274 		last_pushed_lsn = 0;
275 	} else if ((restarts > XFS_TRANS_PUSH_AIL_RESTARTS) ||
276 		   ((stuck * 100) / count > 90)) {
277 		/*
278 		 * Either there is a lot of contention on the AIL or we
279 		 * are stuck due to operations in progress. "Stuck" in this
280 		 * case is defined as >90% of the items we tried to push
281 		 * were stuck.
282 		 *
283 		 * Backoff a bit more to allow some I/O to complete before
284 		 * continuing from where we were.
285 		 */
286 		tout += 10;
287 	}
288 out:
289 	*last_lsn = last_pushed_lsn;
290 	return tout;
291 }	/* xfsaild_push */
292 
293 
294 /*
295  * This is to be called when an item is unlocked that may have
296  * been in the AIL.  It will wake up the first member of the AIL
297  * wait list if this item's unlocking might allow it to progress.
298  * If the item is in the AIL, then we need to get the AIL lock
299  * while doing our checking so we don't race with someone going
300  * to sleep waiting for this event in xfs_trans_push_ail().
301  */
302 void
303 xfs_trans_unlocked_item(
304 	xfs_mount_t	*mp,
305 	xfs_log_item_t	*lip)
306 {
307 	xfs_log_item_t	*min_lip;
308 
309 	/*
310 	 * If we're forcibly shutting down, we may have
311 	 * unlocked log items arbitrarily. The last thing
312 	 * we want to do is to move the tail of the log
313 	 * over some potentially valid data.
314 	 */
315 	if (!(lip->li_flags & XFS_LI_IN_AIL) ||
316 	    XFS_FORCED_SHUTDOWN(mp)) {
317 		return;
318 	}
319 
320 	/*
321 	 * This is the one case where we can call into xfs_ail_min()
322 	 * without holding the AIL lock because we only care about the
323 	 * case where we are at the tail of the AIL.  If the object isn't
324 	 * at the tail, it doesn't matter what result we get back.  This
325 	 * is slightly racy because since we were just unlocked, we could
326 	 * go to sleep between the call to xfs_ail_min and the call to
327 	 * xfs_log_move_tail, have someone else lock us, commit to us disk,
328 	 * move us out of the tail of the AIL, and then we wake up.  However,
329 	 * the call to xfs_log_move_tail() doesn't do anything if there's
330 	 * not enough free space to wake people up so we're safe calling it.
331 	 */
332 	min_lip = xfs_ail_min(&mp->m_ail.xa_ail);
333 
334 	if (min_lip == lip)
335 		xfs_log_move_tail(mp, 1);
336 }	/* xfs_trans_unlocked_item */
337 
338 
339 /*
340  * Update the position of the item in the AIL with the new
341  * lsn.  If it is not yet in the AIL, add it.  Otherwise, move
342  * it to its new position by removing it and re-adding it.
343  *
344  * Wakeup anyone with an lsn less than the item's lsn.  If the item
345  * we move in the AIL is the minimum one, update the tail lsn in the
346  * log manager.
347  *
348  * Increment the AIL's generation count to indicate that the tree
349  * has changed.
350  *
351  * This function must be called with the AIL lock held.  The lock
352  * is dropped before returning.
353  */
354 void
355 xfs_trans_update_ail(
356 	xfs_mount_t	*mp,
357 	xfs_log_item_t	*lip,
358 	xfs_lsn_t	lsn) __releases(mp->m_ail_lock)
359 {
360 	xfs_ail_entry_t		*ailp;
361 	xfs_log_item_t		*dlip=NULL;
362 	xfs_log_item_t		*mlip;	/* ptr to minimum lip */
363 
364 	ailp = &(mp->m_ail.xa_ail);
365 	mlip = xfs_ail_min(ailp);
366 
367 	if (lip->li_flags & XFS_LI_IN_AIL) {
368 		dlip = xfs_ail_delete(ailp, lip);
369 		ASSERT(dlip == lip);
370 	} else {
371 		lip->li_flags |= XFS_LI_IN_AIL;
372 	}
373 
374 	lip->li_lsn = lsn;
375 
376 	xfs_ail_insert(ailp, lip);
377 	mp->m_ail.xa_gen++;
378 
379 	if (mlip == dlip) {
380 		mlip = xfs_ail_min(&(mp->m_ail.xa_ail));
381 		spin_unlock(&mp->m_ail_lock);
382 		xfs_log_move_tail(mp, mlip->li_lsn);
383 	} else {
384 		spin_unlock(&mp->m_ail_lock);
385 	}
386 
387 
388 }	/* xfs_trans_update_ail */
389 
390 /*
391  * Delete the given item from the AIL.  It must already be in
392  * the AIL.
393  *
394  * Wakeup anyone with an lsn less than item's lsn.    If the item
395  * we delete in the AIL is the minimum one, update the tail lsn in the
396  * log manager.
397  *
398  * Clear the IN_AIL flag from the item, reset its lsn to 0, and
399  * bump the AIL's generation count to indicate that the tree
400  * has changed.
401  *
402  * This function must be called with the AIL lock held.  The lock
403  * is dropped before returning.
404  */
405 void
406 xfs_trans_delete_ail(
407 	xfs_mount_t	*mp,
408 	xfs_log_item_t	*lip) __releases(mp->m_ail_lock)
409 {
410 	xfs_ail_entry_t		*ailp;
411 	xfs_log_item_t		*dlip;
412 	xfs_log_item_t		*mlip;
413 
414 	if (lip->li_flags & XFS_LI_IN_AIL) {
415 		ailp = &(mp->m_ail.xa_ail);
416 		mlip = xfs_ail_min(ailp);
417 		dlip = xfs_ail_delete(ailp, lip);
418 		ASSERT(dlip == lip);
419 
420 
421 		lip->li_flags &= ~XFS_LI_IN_AIL;
422 		lip->li_lsn = 0;
423 		mp->m_ail.xa_gen++;
424 
425 		if (mlip == dlip) {
426 			mlip = xfs_ail_min(&(mp->m_ail.xa_ail));
427 			spin_unlock(&mp->m_ail_lock);
428 			xfs_log_move_tail(mp, (mlip ? mlip->li_lsn : 0));
429 		} else {
430 			spin_unlock(&mp->m_ail_lock);
431 		}
432 	}
433 	else {
434 		/*
435 		 * If the file system is not being shutdown, we are in
436 		 * serious trouble if we get to this stage.
437 		 */
438 		if (XFS_FORCED_SHUTDOWN(mp))
439 			spin_unlock(&mp->m_ail_lock);
440 		else {
441 			xfs_cmn_err(XFS_PTAG_AILDELETE, CE_ALERT, mp,
442 		"%s: attempting to delete a log item that is not in the AIL",
443 					__FUNCTION__);
444 			spin_unlock(&mp->m_ail_lock);
445 			xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
446 		}
447 	}
448 }
449 
450 
451 
452 /*
453  * Return the item in the AIL with the smallest lsn.
454  * Return the current tree generation number for use
455  * in calls to xfs_trans_next_ail().
456  */
457 xfs_log_item_t *
458 xfs_trans_first_ail(
459 	xfs_mount_t	*mp,
460 	int		*gen)
461 {
462 	xfs_log_item_t	*lip;
463 
464 	lip = xfs_ail_min(&(mp->m_ail.xa_ail));
465 	*gen = (int)mp->m_ail.xa_gen;
466 
467 	return lip;
468 }
469 
470 /*
471  * If the generation count of the tree has not changed since the
472  * caller last took something from the AIL, then return the elmt
473  * in the tree which follows the one given.  If the count has changed,
474  * then return the minimum elmt of the AIL and bump the restarts counter
475  * if one is given.
476  */
477 xfs_log_item_t *
478 xfs_trans_next_ail(
479 	xfs_mount_t	*mp,
480 	xfs_log_item_t	*lip,
481 	int		*gen,
482 	int		*restarts)
483 {
484 	xfs_log_item_t	*nlip;
485 
486 	ASSERT(mp && lip && gen);
487 	if (mp->m_ail.xa_gen == *gen) {
488 		nlip = xfs_ail_next(&(mp->m_ail.xa_ail), lip);
489 	} else {
490 		nlip = xfs_ail_min(&(mp->m_ail).xa_ail);
491 		*gen = (int)mp->m_ail.xa_gen;
492 		if (restarts != NULL) {
493 			XFS_STATS_INC(xs_push_ail_restarts);
494 			(*restarts)++;
495 		}
496 	}
497 
498 	return (nlip);
499 }
500 
501 
502 /*
503  * The active item list (AIL) is a doubly linked list of log
504  * items sorted by ascending lsn.  The base of the list is
505  * a forw/back pointer pair embedded in the xfs mount structure.
506  * The base is initialized with both pointers pointing to the
507  * base.  This case always needs to be distinguished, because
508  * the base has no lsn to look at.  We almost always insert
509  * at the end of the list, so on inserts we search from the
510  * end of the list to find where the new item belongs.
511  */
512 
513 /*
514  * Initialize the doubly linked list to point only to itself.
515  */
516 int
517 xfs_trans_ail_init(
518 	xfs_mount_t	*mp)
519 {
520 	mp->m_ail.xa_ail.ail_forw = (xfs_log_item_t*)&mp->m_ail.xa_ail;
521 	mp->m_ail.xa_ail.ail_back = (xfs_log_item_t*)&mp->m_ail.xa_ail;
522 	return xfsaild_start(mp);
523 }
524 
525 void
526 xfs_trans_ail_destroy(
527 	xfs_mount_t	*mp)
528 {
529 	xfsaild_stop(mp);
530 }
531 
532 /*
533  * Insert the given log item into the AIL.
534  * We almost always insert at the end of the list, so on inserts
535  * we search from the end of the list to find where the
536  * new item belongs.
537  */
538 STATIC void
539 xfs_ail_insert(
540 	xfs_ail_entry_t	*base,
541 	xfs_log_item_t	*lip)
542 /* ARGSUSED */
543 {
544 	xfs_log_item_t	*next_lip;
545 
546 	/*
547 	 * If the list is empty, just insert the item.
548 	 */
549 	if (base->ail_back == (xfs_log_item_t*)base) {
550 		base->ail_forw = lip;
551 		base->ail_back = lip;
552 		lip->li_ail.ail_forw = (xfs_log_item_t*)base;
553 		lip->li_ail.ail_back = (xfs_log_item_t*)base;
554 		return;
555 	}
556 
557 	next_lip = base->ail_back;
558 	while ((next_lip != (xfs_log_item_t*)base) &&
559 	       (XFS_LSN_CMP(next_lip->li_lsn, lip->li_lsn) > 0)) {
560 		next_lip = next_lip->li_ail.ail_back;
561 	}
562 	ASSERT((next_lip == (xfs_log_item_t*)base) ||
563 	       (XFS_LSN_CMP(next_lip->li_lsn, lip->li_lsn) <= 0));
564 	lip->li_ail.ail_forw = next_lip->li_ail.ail_forw;
565 	lip->li_ail.ail_back = next_lip;
566 	next_lip->li_ail.ail_forw = lip;
567 	lip->li_ail.ail_forw->li_ail.ail_back = lip;
568 
569 	xfs_ail_check(base, lip);
570 	return;
571 }
572 
573 /*
574  * Delete the given item from the AIL.  Return a pointer to the item.
575  */
576 /*ARGSUSED*/
577 STATIC xfs_log_item_t *
578 xfs_ail_delete(
579 	xfs_ail_entry_t	*base,
580 	xfs_log_item_t	*lip)
581 /* ARGSUSED */
582 {
583 	xfs_ail_check(base, lip);
584 	lip->li_ail.ail_forw->li_ail.ail_back = lip->li_ail.ail_back;
585 	lip->li_ail.ail_back->li_ail.ail_forw = lip->li_ail.ail_forw;
586 	lip->li_ail.ail_forw = NULL;
587 	lip->li_ail.ail_back = NULL;
588 
589 	return lip;
590 }
591 
592 /*
593  * Return a pointer to the first item in the AIL.
594  * If the AIL is empty, then return NULL.
595  */
596 STATIC xfs_log_item_t *
597 xfs_ail_min(
598 	xfs_ail_entry_t	*base)
599 /* ARGSUSED */
600 {
601 	register xfs_log_item_t *forw = base->ail_forw;
602 	if (forw == (xfs_log_item_t*)base) {
603 		return NULL;
604 	}
605 	return forw;
606 }
607 
608 /*
609  * Return a pointer to the item which follows
610  * the given item in the AIL.  If the given item
611  * is the last item in the list, then return NULL.
612  */
613 STATIC xfs_log_item_t *
614 xfs_ail_next(
615 	xfs_ail_entry_t	*base,
616 	xfs_log_item_t	*lip)
617 /* ARGSUSED */
618 {
619 	if (lip->li_ail.ail_forw == (xfs_log_item_t*)base) {
620 		return NULL;
621 	}
622 	return lip->li_ail.ail_forw;
623 
624 }
625 
626 #ifdef DEBUG
627 /*
628  * Check that the list is sorted as it should be.
629  */
630 STATIC void
631 xfs_ail_check(
632 	xfs_ail_entry_t *base,
633 	xfs_log_item_t	*lip)
634 {
635 	xfs_log_item_t	*prev_lip;
636 
637 	prev_lip = base->ail_forw;
638 	if (prev_lip == (xfs_log_item_t*)base) {
639 		/*
640 		 * Make sure the pointers are correct when the list
641 		 * is empty.
642 		 */
643 		ASSERT(base->ail_back == (xfs_log_item_t*)base);
644 		return;
645 	}
646 
647 	/*
648 	 * Check the next and previous entries are valid.
649 	 */
650 	ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0);
651 	prev_lip = lip->li_ail.ail_back;
652 	if (prev_lip != (xfs_log_item_t*)base) {
653 		ASSERT(prev_lip->li_ail.ail_forw == lip);
654 		ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0);
655 	}
656 	prev_lip = lip->li_ail.ail_forw;
657 	if (prev_lip != (xfs_log_item_t*)base) {
658 		ASSERT(prev_lip->li_ail.ail_back == lip);
659 		ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) >= 0);
660 	}
661 
662 
663 #ifdef XFS_TRANS_DEBUG
664 	/*
665 	 * Walk the list checking forward and backward pointers,
666 	 * lsn ordering, and that every entry has the XFS_LI_IN_AIL
667 	 * flag set. This is really expensive, so only do it when
668 	 * specifically debugging the transaction subsystem.
669 	 */
670 	prev_lip = (xfs_log_item_t*)base;
671 	while (lip != (xfs_log_item_t*)base) {
672 		if (prev_lip != (xfs_log_item_t*)base) {
673 			ASSERT(prev_lip->li_ail.ail_forw == lip);
674 			ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0);
675 		}
676 		ASSERT(lip->li_ail.ail_back == prev_lip);
677 		ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0);
678 		prev_lip = lip;
679 		lip = lip->li_ail.ail_forw;
680 	}
681 	ASSERT(lip == (xfs_log_item_t*)base);
682 	ASSERT(base->ail_back == prev_lip);
683 #endif /* XFS_TRANS_DEBUG */
684 }
685 #endif /* DEBUG */
686