xref: /openbmc/linux/fs/xfs/xfs_trans_ail.c (revision 7fe2f639)
1 /*
2  * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
3  * Copyright (c) 2008 Dave Chinner
4  * All Rights Reserved.
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it would be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write the Free Software Foundation,
17  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
18  */
19 #include "xfs.h"
20 #include "xfs_fs.h"
21 #include "xfs_types.h"
22 #include "xfs_log.h"
23 #include "xfs_inum.h"
24 #include "xfs_trans.h"
25 #include "xfs_sb.h"
26 #include "xfs_ag.h"
27 #include "xfs_mount.h"
28 #include "xfs_trans_priv.h"
29 #include "xfs_error.h"
30 
31 struct workqueue_struct	*xfs_ail_wq;	/* AIL workqueue */
32 
33 #ifdef DEBUG
34 /*
35  * Check that the list is sorted as it should be.
36  */
37 STATIC void
38 xfs_ail_check(
39 	struct xfs_ail	*ailp,
40 	xfs_log_item_t	*lip)
41 {
42 	xfs_log_item_t	*prev_lip;
43 
44 	if (list_empty(&ailp->xa_ail))
45 		return;
46 
47 	/*
48 	 * Check the next and previous entries are valid.
49 	 */
50 	ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0);
51 	prev_lip = list_entry(lip->li_ail.prev, xfs_log_item_t, li_ail);
52 	if (&prev_lip->li_ail != &ailp->xa_ail)
53 		ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0);
54 
55 	prev_lip = list_entry(lip->li_ail.next, xfs_log_item_t, li_ail);
56 	if (&prev_lip->li_ail != &ailp->xa_ail)
57 		ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) >= 0);
58 
59 
60 #ifdef XFS_TRANS_DEBUG
61 	/*
62 	 * Walk the list checking lsn ordering, and that every entry has the
63 	 * XFS_LI_IN_AIL flag set. This is really expensive, so only do it
64 	 * when specifically debugging the transaction subsystem.
65 	 */
66 	prev_lip = list_entry(&ailp->xa_ail, xfs_log_item_t, li_ail);
67 	list_for_each_entry(lip, &ailp->xa_ail, li_ail) {
68 		if (&prev_lip->li_ail != &ailp->xa_ail)
69 			ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0);
70 		ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0);
71 		prev_lip = lip;
72 	}
73 #endif /* XFS_TRANS_DEBUG */
74 }
75 #else /* !DEBUG */
76 #define	xfs_ail_check(a,l)
77 #endif /* DEBUG */
78 
79 /*
80  * Return a pointer to the first item in the AIL.  If the AIL is empty, then
81  * return NULL.
82  */
83 static xfs_log_item_t *
84 xfs_ail_min(
85 	struct xfs_ail  *ailp)
86 {
87 	if (list_empty(&ailp->xa_ail))
88 		return NULL;
89 
90 	return list_first_entry(&ailp->xa_ail, xfs_log_item_t, li_ail);
91 }
92 
93  /*
94  * Return a pointer to the last item in the AIL.  If the AIL is empty, then
95  * return NULL.
96  */
97 static xfs_log_item_t *
98 xfs_ail_max(
99 	struct xfs_ail  *ailp)
100 {
101 	if (list_empty(&ailp->xa_ail))
102 		return NULL;
103 
104 	return list_entry(ailp->xa_ail.prev, xfs_log_item_t, li_ail);
105 }
106 
107 /*
108  * Return a pointer to the item which follows the given item in the AIL.  If
109  * the given item is the last item in the list, then return NULL.
110  */
111 static xfs_log_item_t *
112 xfs_ail_next(
113 	struct xfs_ail  *ailp,
114 	xfs_log_item_t  *lip)
115 {
116 	if (lip->li_ail.next == &ailp->xa_ail)
117 		return NULL;
118 
119 	return list_first_entry(&lip->li_ail, xfs_log_item_t, li_ail);
120 }
121 
122 /*
123  * This is called by the log manager code to determine the LSN of the tail of
124  * the log.  This is exactly the LSN of the first item in the AIL.  If the AIL
125  * is empty, then this function returns 0.
126  *
127  * We need the AIL lock in order to get a coherent read of the lsn of the last
128  * item in the AIL.
129  */
130 xfs_lsn_t
131 xfs_ail_min_lsn(
132 	struct xfs_ail	*ailp)
133 {
134 	xfs_lsn_t	lsn = 0;
135 	xfs_log_item_t	*lip;
136 
137 	spin_lock(&ailp->xa_lock);
138 	lip = xfs_ail_min(ailp);
139 	if (lip)
140 		lsn = lip->li_lsn;
141 	spin_unlock(&ailp->xa_lock);
142 
143 	return lsn;
144 }
145 
146 /*
147  * Return the maximum lsn held in the AIL, or zero if the AIL is empty.
148  */
149 static xfs_lsn_t
150 xfs_ail_max_lsn(
151 	struct xfs_ail  *ailp)
152 {
153 	xfs_lsn_t       lsn = 0;
154 	xfs_log_item_t  *lip;
155 
156 	spin_lock(&ailp->xa_lock);
157 	lip = xfs_ail_max(ailp);
158 	if (lip)
159 		lsn = lip->li_lsn;
160 	spin_unlock(&ailp->xa_lock);
161 
162 	return lsn;
163 }
164 
165 /*
166  * AIL traversal cursor initialisation.
167  *
168  * The cursor keeps track of where our current traversal is up
169  * to by tracking the next ƣtem in the list for us. However, for
170  * this to be safe, removing an object from the AIL needs to invalidate
171  * any cursor that points to it. hence the traversal cursor needs to
172  * be linked to the struct xfs_ail so that deletion can search all the
173  * active cursors for invalidation.
174  *
175  * We don't link the push cursor because it is embedded in the struct
176  * xfs_ail and hence easily findable.
177  */
178 STATIC void
179 xfs_trans_ail_cursor_init(
180 	struct xfs_ail		*ailp,
181 	struct xfs_ail_cursor	*cur)
182 {
183 	cur->item = NULL;
184 	if (cur == &ailp->xa_cursors)
185 		return;
186 
187 	cur->next = ailp->xa_cursors.next;
188 	ailp->xa_cursors.next = cur;
189 }
190 
191 /*
192  * Set the cursor to the next item, because when we look
193  * up the cursor the current item may have been freed.
194  */
195 STATIC void
196 xfs_trans_ail_cursor_set(
197 	struct xfs_ail		*ailp,
198 	struct xfs_ail_cursor	*cur,
199 	struct xfs_log_item	*lip)
200 {
201 	if (lip)
202 		cur->item = xfs_ail_next(ailp, lip);
203 }
204 
205 /*
206  * Get the next item in the traversal and advance the cursor.
207  * If the cursor was invalidated (inidicated by a lip of 1),
208  * restart the traversal.
209  */
210 struct xfs_log_item *
211 xfs_trans_ail_cursor_next(
212 	struct xfs_ail		*ailp,
213 	struct xfs_ail_cursor	*cur)
214 {
215 	struct xfs_log_item	*lip = cur->item;
216 
217 	if ((__psint_t)lip & 1)
218 		lip = xfs_ail_min(ailp);
219 	xfs_trans_ail_cursor_set(ailp, cur, lip);
220 	return lip;
221 }
222 
223 /*
224  * Now that the traversal is complete, we need to remove the cursor
225  * from the list of traversing cursors. Avoid removing the embedded
226  * push cursor, but use the fact it is always present to make the
227  * list deletion simple.
228  */
229 void
230 xfs_trans_ail_cursor_done(
231 	struct xfs_ail		*ailp,
232 	struct xfs_ail_cursor	*done)
233 {
234 	struct xfs_ail_cursor	*prev = NULL;
235 	struct xfs_ail_cursor	*cur;
236 
237 	done->item = NULL;
238 	if (done == &ailp->xa_cursors)
239 		return;
240 	prev = &ailp->xa_cursors;
241 	for (cur = prev->next; cur; prev = cur, cur = prev->next) {
242 		if (cur == done) {
243 			prev->next = cur->next;
244 			break;
245 		}
246 	}
247 	ASSERT(cur);
248 }
249 
250 /*
251  * Invalidate any cursor that is pointing to this item. This is
252  * called when an item is removed from the AIL. Any cursor pointing
253  * to this object is now invalid and the traversal needs to be
254  * terminated so it doesn't reference a freed object. We set the
255  * cursor item to a value of 1 so we can distinguish between an
256  * invalidation and the end of the list when getting the next item
257  * from the cursor.
258  */
259 STATIC void
260 xfs_trans_ail_cursor_clear(
261 	struct xfs_ail		*ailp,
262 	struct xfs_log_item	*lip)
263 {
264 	struct xfs_ail_cursor	*cur;
265 
266 	/* need to search all cursors */
267 	for (cur = &ailp->xa_cursors; cur; cur = cur->next) {
268 		if (cur->item == lip)
269 			cur->item = (struct xfs_log_item *)
270 					((__psint_t)cur->item | 1);
271 	}
272 }
273 
274 /*
275  * Return the item in the AIL with the current lsn.
276  * Return the current tree generation number for use
277  * in calls to xfs_trans_next_ail().
278  */
279 xfs_log_item_t *
280 xfs_trans_ail_cursor_first(
281 	struct xfs_ail		*ailp,
282 	struct xfs_ail_cursor	*cur,
283 	xfs_lsn_t		lsn)
284 {
285 	xfs_log_item_t		*lip;
286 
287 	xfs_trans_ail_cursor_init(ailp, cur);
288 	lip = xfs_ail_min(ailp);
289 	if (lsn == 0)
290 		goto out;
291 
292 	list_for_each_entry(lip, &ailp->xa_ail, li_ail) {
293 		if (XFS_LSN_CMP(lip->li_lsn, lsn) >= 0)
294 			goto out;
295 	}
296 	lip = NULL;
297 out:
298 	xfs_trans_ail_cursor_set(ailp, cur, lip);
299 	return lip;
300 }
301 
302 /*
303  * splice the log item list into the AIL at the given LSN.
304  */
305 static void
306 xfs_ail_splice(
307 	struct xfs_ail  *ailp,
308 	struct list_head *list,
309 	xfs_lsn_t       lsn)
310 {
311 	xfs_log_item_t  *next_lip;
312 
313 	/* If the list is empty, just insert the item.  */
314 	if (list_empty(&ailp->xa_ail)) {
315 		list_splice(list, &ailp->xa_ail);
316 		return;
317 	}
318 
319 	list_for_each_entry_reverse(next_lip, &ailp->xa_ail, li_ail) {
320 		if (XFS_LSN_CMP(next_lip->li_lsn, lsn) <= 0)
321 			break;
322 	}
323 
324 	ASSERT(&next_lip->li_ail == &ailp->xa_ail ||
325 	       XFS_LSN_CMP(next_lip->li_lsn, lsn) <= 0);
326 
327 	list_splice_init(list, &next_lip->li_ail);
328 }
329 
330 /*
331  * Delete the given item from the AIL.  Return a pointer to the item.
332  */
333 static void
334 xfs_ail_delete(
335 	struct xfs_ail  *ailp,
336 	xfs_log_item_t  *lip)
337 {
338 	xfs_ail_check(ailp, lip);
339 	list_del(&lip->li_ail);
340 	xfs_trans_ail_cursor_clear(ailp, lip);
341 }
342 
343 /*
344  * xfs_ail_worker does the work of pushing on the AIL. It will requeue itself
345  * to run at a later time if there is more work to do to complete the push.
346  */
347 STATIC void
348 xfs_ail_worker(
349 	struct work_struct	*work)
350 {
351 	struct xfs_ail		*ailp = container_of(to_delayed_work(work),
352 					struct xfs_ail, xa_work);
353 	xfs_mount_t		*mp = ailp->xa_mount;
354 	struct xfs_ail_cursor	*cur = &ailp->xa_cursors;
355 	xfs_log_item_t		*lip;
356 	xfs_lsn_t		lsn;
357 	xfs_lsn_t		target;
358 	long			tout = 10;
359 	int			flush_log = 0;
360 	int			stuck = 0;
361 	int			count = 0;
362 	int			push_xfsbufd = 0;
363 
364 	spin_lock(&ailp->xa_lock);
365 	target = ailp->xa_target;
366 	xfs_trans_ail_cursor_init(ailp, cur);
367 	lip = xfs_trans_ail_cursor_first(ailp, cur, ailp->xa_last_pushed_lsn);
368 	if (!lip || XFS_FORCED_SHUTDOWN(mp)) {
369 		/*
370 		 * AIL is empty or our push has reached the end.
371 		 */
372 		xfs_trans_ail_cursor_done(ailp, cur);
373 		spin_unlock(&ailp->xa_lock);
374 		goto out_done;
375 	}
376 
377 	XFS_STATS_INC(xs_push_ail);
378 
379 	/*
380 	 * While the item we are looking at is below the given threshold
381 	 * try to flush it out. We'd like not to stop until we've at least
382 	 * tried to push on everything in the AIL with an LSN less than
383 	 * the given threshold.
384 	 *
385 	 * However, we will stop after a certain number of pushes and wait
386 	 * for a reduced timeout to fire before pushing further. This
387 	 * prevents use from spinning when we can't do anything or there is
388 	 * lots of contention on the AIL lists.
389 	 */
390 	lsn = lip->li_lsn;
391 	while ((XFS_LSN_CMP(lip->li_lsn, target) <= 0)) {
392 		int	lock_result;
393 		/*
394 		 * If we can lock the item without sleeping, unlock the AIL
395 		 * lock and flush the item.  Then re-grab the AIL lock so we
396 		 * can look for the next item on the AIL. List changes are
397 		 * handled by the AIL lookup functions internally
398 		 *
399 		 * If we can't lock the item, either its holder will flush it
400 		 * or it is already being flushed or it is being relogged.  In
401 		 * any of these case it is being taken care of and we can just
402 		 * skip to the next item in the list.
403 		 */
404 		lock_result = IOP_TRYLOCK(lip);
405 		spin_unlock(&ailp->xa_lock);
406 		switch (lock_result) {
407 		case XFS_ITEM_SUCCESS:
408 			XFS_STATS_INC(xs_push_ail_success);
409 			IOP_PUSH(lip);
410 			ailp->xa_last_pushed_lsn = lsn;
411 			break;
412 
413 		case XFS_ITEM_PUSHBUF:
414 			XFS_STATS_INC(xs_push_ail_pushbuf);
415 			IOP_PUSHBUF(lip);
416 			ailp->xa_last_pushed_lsn = lsn;
417 			push_xfsbufd = 1;
418 			break;
419 
420 		case XFS_ITEM_PINNED:
421 			XFS_STATS_INC(xs_push_ail_pinned);
422 			stuck++;
423 			flush_log = 1;
424 			break;
425 
426 		case XFS_ITEM_LOCKED:
427 			XFS_STATS_INC(xs_push_ail_locked);
428 			ailp->xa_last_pushed_lsn = lsn;
429 			stuck++;
430 			break;
431 
432 		default:
433 			ASSERT(0);
434 			break;
435 		}
436 
437 		spin_lock(&ailp->xa_lock);
438 		/* should we bother continuing? */
439 		if (XFS_FORCED_SHUTDOWN(mp))
440 			break;
441 		ASSERT(mp->m_log);
442 
443 		count++;
444 
445 		/*
446 		 * Are there too many items we can't do anything with?
447 		 * If we we are skipping too many items because we can't flush
448 		 * them or they are already being flushed, we back off and
449 		 * given them time to complete whatever operation is being
450 		 * done. i.e. remove pressure from the AIL while we can't make
451 		 * progress so traversals don't slow down further inserts and
452 		 * removals to/from the AIL.
453 		 *
454 		 * The value of 100 is an arbitrary magic number based on
455 		 * observation.
456 		 */
457 		if (stuck > 100)
458 			break;
459 
460 		lip = xfs_trans_ail_cursor_next(ailp, cur);
461 		if (lip == NULL)
462 			break;
463 		lsn = lip->li_lsn;
464 	}
465 	xfs_trans_ail_cursor_done(ailp, cur);
466 	spin_unlock(&ailp->xa_lock);
467 
468 	if (flush_log) {
469 		/*
470 		 * If something we need to push out was pinned, then
471 		 * push out the log so it will become unpinned and
472 		 * move forward in the AIL.
473 		 */
474 		XFS_STATS_INC(xs_push_ail_flush);
475 		xfs_log_force(mp, 0);
476 	}
477 
478 	if (push_xfsbufd) {
479 		/* we've got delayed write buffers to flush */
480 		wake_up_process(mp->m_ddev_targp->bt_task);
481 	}
482 
483 	/* assume we have more work to do in a short while */
484 out_done:
485 	if (!count) {
486 		/* We're past our target or empty, so idle */
487 		ailp->xa_last_pushed_lsn = 0;
488 
489 		/*
490 		 * We clear the XFS_AIL_PUSHING_BIT first before checking
491 		 * whether the target has changed. If the target has changed,
492 		 * this pushes the requeue race directly onto the result of the
493 		 * atomic test/set bit, so we are guaranteed that either the
494 		 * the pusher that changed the target or ourselves will requeue
495 		 * the work (but not both).
496 		 */
497 		clear_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags);
498 		smp_rmb();
499 		if (XFS_LSN_CMP(ailp->xa_target, target) == 0 ||
500 		    test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags))
501 			return;
502 
503 		tout = 50;
504 	} else if (XFS_LSN_CMP(lsn, target) >= 0) {
505 		/*
506 		 * We reached the target so wait a bit longer for I/O to
507 		 * complete and remove pushed items from the AIL before we
508 		 * start the next scan from the start of the AIL.
509 		 */
510 		tout = 50;
511 		ailp->xa_last_pushed_lsn = 0;
512 	} else if ((stuck * 100) / count > 90) {
513 		/*
514 		 * Either there is a lot of contention on the AIL or we
515 		 * are stuck due to operations in progress. "Stuck" in this
516 		 * case is defined as >90% of the items we tried to push
517 		 * were stuck.
518 		 *
519 		 * Backoff a bit more to allow some I/O to complete before
520 		 * continuing from where we were.
521 		 */
522 		tout = 20;
523 	}
524 
525 	/* There is more to do, requeue us.  */
526 	queue_delayed_work(xfs_syncd_wq, &ailp->xa_work,
527 					msecs_to_jiffies(tout));
528 }
529 
530 /*
531  * This routine is called to move the tail of the AIL forward.  It does this by
532  * trying to flush items in the AIL whose lsns are below the given
533  * threshold_lsn.
534  *
535  * The push is run asynchronously in a workqueue, which means the caller needs
536  * to handle waiting on the async flush for space to become available.
537  * We don't want to interrupt any push that is in progress, hence we only queue
538  * work if we set the pushing bit approriately.
539  *
540  * We do this unlocked - we only need to know whether there is anything in the
541  * AIL at the time we are called. We don't need to access the contents of
542  * any of the objects, so the lock is not needed.
543  */
544 void
545 xfs_ail_push(
546 	struct xfs_ail	*ailp,
547 	xfs_lsn_t	threshold_lsn)
548 {
549 	xfs_log_item_t	*lip;
550 
551 	lip = xfs_ail_min(ailp);
552 	if (!lip || XFS_FORCED_SHUTDOWN(ailp->xa_mount) ||
553 	    XFS_LSN_CMP(threshold_lsn, ailp->xa_target) <= 0)
554 		return;
555 
556 	/*
557 	 * Ensure that the new target is noticed in push code before it clears
558 	 * the XFS_AIL_PUSHING_BIT.
559 	 */
560 	smp_wmb();
561 	xfs_trans_ail_copy_lsn(ailp, &ailp->xa_target, &threshold_lsn);
562 	if (!test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags))
563 		queue_delayed_work(xfs_syncd_wq, &ailp->xa_work, 0);
564 }
565 
566 /*
567  * Push out all items in the AIL immediately
568  */
569 void
570 xfs_ail_push_all(
571 	struct xfs_ail  *ailp)
572 {
573 	xfs_lsn_t       threshold_lsn = xfs_ail_max_lsn(ailp);
574 
575 	if (threshold_lsn)
576 		xfs_ail_push(ailp, threshold_lsn);
577 }
578 
579 /*
580  * This is to be called when an item is unlocked that may have
581  * been in the AIL.  It will wake up the first member of the AIL
582  * wait list if this item's unlocking might allow it to progress.
583  * If the item is in the AIL, then we need to get the AIL lock
584  * while doing our checking so we don't race with someone going
585  * to sleep waiting for this event in xfs_trans_push_ail().
586  */
587 void
588 xfs_trans_unlocked_item(
589 	struct xfs_ail	*ailp,
590 	xfs_log_item_t	*lip)
591 {
592 	xfs_log_item_t	*min_lip;
593 
594 	/*
595 	 * If we're forcibly shutting down, we may have
596 	 * unlocked log items arbitrarily. The last thing
597 	 * we want to do is to move the tail of the log
598 	 * over some potentially valid data.
599 	 */
600 	if (!(lip->li_flags & XFS_LI_IN_AIL) ||
601 	    XFS_FORCED_SHUTDOWN(ailp->xa_mount)) {
602 		return;
603 	}
604 
605 	/*
606 	 * This is the one case where we can call into xfs_ail_min()
607 	 * without holding the AIL lock because we only care about the
608 	 * case where we are at the tail of the AIL.  If the object isn't
609 	 * at the tail, it doesn't matter what result we get back.  This
610 	 * is slightly racy because since we were just unlocked, we could
611 	 * go to sleep between the call to xfs_ail_min and the call to
612 	 * xfs_log_move_tail, have someone else lock us, commit to us disk,
613 	 * move us out of the tail of the AIL, and then we wake up.  However,
614 	 * the call to xfs_log_move_tail() doesn't do anything if there's
615 	 * not enough free space to wake people up so we're safe calling it.
616 	 */
617 	min_lip = xfs_ail_min(ailp);
618 
619 	if (min_lip == lip)
620 		xfs_log_move_tail(ailp->xa_mount, 1);
621 }	/* xfs_trans_unlocked_item */
622 
623 /*
624  * xfs_trans_ail_update - bulk AIL insertion operation.
625  *
626  * @xfs_trans_ail_update takes an array of log items that all need to be
627  * positioned at the same LSN in the AIL. If an item is not in the AIL, it will
628  * be added.  Otherwise, it will be repositioned  by removing it and re-adding
629  * it to the AIL. If we move the first item in the AIL, update the log tail to
630  * match the new minimum LSN in the AIL.
631  *
632  * This function takes the AIL lock once to execute the update operations on
633  * all the items in the array, and as such should not be called with the AIL
634  * lock held. As a result, once we have the AIL lock, we need to check each log
635  * item LSN to confirm it needs to be moved forward in the AIL.
636  *
637  * To optimise the insert operation, we delete all the items from the AIL in
638  * the first pass, moving them into a temporary list, then splice the temporary
639  * list into the correct position in the AIL. This avoids needing to do an
640  * insert operation on every item.
641  *
642  * This function must be called with the AIL lock held.  The lock is dropped
643  * before returning.
644  */
645 void
646 xfs_trans_ail_update_bulk(
647 	struct xfs_ail		*ailp,
648 	struct xfs_log_item	**log_items,
649 	int			nr_items,
650 	xfs_lsn_t		lsn) __releases(ailp->xa_lock)
651 {
652 	xfs_log_item_t		*mlip;
653 	xfs_lsn_t		tail_lsn;
654 	int			mlip_changed = 0;
655 	int			i;
656 	LIST_HEAD(tmp);
657 
658 	mlip = xfs_ail_min(ailp);
659 
660 	for (i = 0; i < nr_items; i++) {
661 		struct xfs_log_item *lip = log_items[i];
662 		if (lip->li_flags & XFS_LI_IN_AIL) {
663 			/* check if we really need to move the item */
664 			if (XFS_LSN_CMP(lsn, lip->li_lsn) <= 0)
665 				continue;
666 
667 			xfs_ail_delete(ailp, lip);
668 			if (mlip == lip)
669 				mlip_changed = 1;
670 		} else {
671 			lip->li_flags |= XFS_LI_IN_AIL;
672 		}
673 		lip->li_lsn = lsn;
674 		list_add(&lip->li_ail, &tmp);
675 	}
676 
677 	xfs_ail_splice(ailp, &tmp, lsn);
678 
679 	if (!mlip_changed) {
680 		spin_unlock(&ailp->xa_lock);
681 		return;
682 	}
683 
684 	/*
685 	 * It is not safe to access mlip after the AIL lock is dropped, so we
686 	 * must get a copy of li_lsn before we do so.  This is especially
687 	 * important on 32-bit platforms where accessing and updating 64-bit
688 	 * values like li_lsn is not atomic.
689 	 */
690 	mlip = xfs_ail_min(ailp);
691 	tail_lsn = mlip->li_lsn;
692 	spin_unlock(&ailp->xa_lock);
693 	xfs_log_move_tail(ailp->xa_mount, tail_lsn);
694 }
695 
696 /*
697  * xfs_trans_ail_delete_bulk - remove multiple log items from the AIL
698  *
699  * @xfs_trans_ail_delete_bulk takes an array of log items that all need to
700  * removed from the AIL. The caller is already holding the AIL lock, and done
701  * all the checks necessary to ensure the items passed in via @log_items are
702  * ready for deletion. This includes checking that the items are in the AIL.
703  *
704  * For each log item to be removed, unlink it  from the AIL, clear the IN_AIL
705  * flag from the item and reset the item's lsn to 0. If we remove the first
706  * item in the AIL, update the log tail to match the new minimum LSN in the
707  * AIL.
708  *
709  * This function will not drop the AIL lock until all items are removed from
710  * the AIL to minimise the amount of lock traffic on the AIL. This does not
711  * greatly increase the AIL hold time, but does significantly reduce the amount
712  * of traffic on the lock, especially during IO completion.
713  *
714  * This function must be called with the AIL lock held.  The lock is dropped
715  * before returning.
716  */
717 void
718 xfs_trans_ail_delete_bulk(
719 	struct xfs_ail		*ailp,
720 	struct xfs_log_item	**log_items,
721 	int			nr_items) __releases(ailp->xa_lock)
722 {
723 	xfs_log_item_t		*mlip;
724 	xfs_lsn_t		tail_lsn;
725 	int			mlip_changed = 0;
726 	int			i;
727 
728 	mlip = xfs_ail_min(ailp);
729 
730 	for (i = 0; i < nr_items; i++) {
731 		struct xfs_log_item *lip = log_items[i];
732 		if (!(lip->li_flags & XFS_LI_IN_AIL)) {
733 			struct xfs_mount	*mp = ailp->xa_mount;
734 
735 			spin_unlock(&ailp->xa_lock);
736 			if (!XFS_FORCED_SHUTDOWN(mp)) {
737 				xfs_alert_tag(mp, XFS_PTAG_AILDELETE,
738 		"%s: attempting to delete a log item that is not in the AIL",
739 						__func__);
740 				xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
741 			}
742 			return;
743 		}
744 
745 		xfs_ail_delete(ailp, lip);
746 		lip->li_flags &= ~XFS_LI_IN_AIL;
747 		lip->li_lsn = 0;
748 		if (mlip == lip)
749 			mlip_changed = 1;
750 	}
751 
752 	if (!mlip_changed) {
753 		spin_unlock(&ailp->xa_lock);
754 		return;
755 	}
756 
757 	/*
758 	 * It is not safe to access mlip after the AIL lock is dropped, so we
759 	 * must get a copy of li_lsn before we do so.  This is especially
760 	 * important on 32-bit platforms where accessing and updating 64-bit
761 	 * values like li_lsn is not atomic. It is possible we've emptied the
762 	 * AIL here, so if that is the case, pass an LSN of 0 to the tail move.
763 	 */
764 	mlip = xfs_ail_min(ailp);
765 	tail_lsn = mlip ? mlip->li_lsn : 0;
766 	spin_unlock(&ailp->xa_lock);
767 	xfs_log_move_tail(ailp->xa_mount, tail_lsn);
768 }
769 
770 /*
771  * The active item list (AIL) is a doubly linked list of log
772  * items sorted by ascending lsn.  The base of the list is
773  * a forw/back pointer pair embedded in the xfs mount structure.
774  * The base is initialized with both pointers pointing to the
775  * base.  This case always needs to be distinguished, because
776  * the base has no lsn to look at.  We almost always insert
777  * at the end of the list, so on inserts we search from the
778  * end of the list to find where the new item belongs.
779  */
780 
781 /*
782  * Initialize the doubly linked list to point only to itself.
783  */
784 int
785 xfs_trans_ail_init(
786 	xfs_mount_t	*mp)
787 {
788 	struct xfs_ail	*ailp;
789 
790 	ailp = kmem_zalloc(sizeof(struct xfs_ail), KM_MAYFAIL);
791 	if (!ailp)
792 		return ENOMEM;
793 
794 	ailp->xa_mount = mp;
795 	INIT_LIST_HEAD(&ailp->xa_ail);
796 	spin_lock_init(&ailp->xa_lock);
797 	INIT_DELAYED_WORK(&ailp->xa_work, xfs_ail_worker);
798 	mp->m_ail = ailp;
799 	return 0;
800 }
801 
802 void
803 xfs_trans_ail_destroy(
804 	xfs_mount_t	*mp)
805 {
806 	struct xfs_ail	*ailp = mp->m_ail;
807 
808 	cancel_delayed_work_sync(&ailp->xa_work);
809 	kmem_free(ailp);
810 }
811