xref: /openbmc/linux/fs/xfs/xfs_trans_ail.c (revision 7490ca1e)
1 /*
2  * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
3  * Copyright (c) 2008 Dave Chinner
4  * All Rights Reserved.
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it would be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write the Free Software Foundation,
17  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
18  */
19 #include "xfs.h"
20 #include "xfs_fs.h"
21 #include "xfs_types.h"
22 #include "xfs_log.h"
23 #include "xfs_inum.h"
24 #include "xfs_trans.h"
25 #include "xfs_sb.h"
26 #include "xfs_ag.h"
27 #include "xfs_mount.h"
28 #include "xfs_trans_priv.h"
29 #include "xfs_trace.h"
30 #include "xfs_error.h"
31 
32 #ifdef DEBUG
33 /*
34  * Check that the list is sorted as it should be.
35  */
36 STATIC void
37 xfs_ail_check(
38 	struct xfs_ail	*ailp,
39 	xfs_log_item_t	*lip)
40 {
41 	xfs_log_item_t	*prev_lip;
42 
43 	if (list_empty(&ailp->xa_ail))
44 		return;
45 
46 	/*
47 	 * Check the next and previous entries are valid.
48 	 */
49 	ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0);
50 	prev_lip = list_entry(lip->li_ail.prev, xfs_log_item_t, li_ail);
51 	if (&prev_lip->li_ail != &ailp->xa_ail)
52 		ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0);
53 
54 	prev_lip = list_entry(lip->li_ail.next, xfs_log_item_t, li_ail);
55 	if (&prev_lip->li_ail != &ailp->xa_ail)
56 		ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) >= 0);
57 
58 
59 #ifdef XFS_TRANS_DEBUG
60 	/*
61 	 * Walk the list checking lsn ordering, and that every entry has the
62 	 * XFS_LI_IN_AIL flag set. This is really expensive, so only do it
63 	 * when specifically debugging the transaction subsystem.
64 	 */
65 	prev_lip = list_entry(&ailp->xa_ail, xfs_log_item_t, li_ail);
66 	list_for_each_entry(lip, &ailp->xa_ail, li_ail) {
67 		if (&prev_lip->li_ail != &ailp->xa_ail)
68 			ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0);
69 		ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0);
70 		prev_lip = lip;
71 	}
72 #endif /* XFS_TRANS_DEBUG */
73 }
74 #else /* !DEBUG */
75 #define	xfs_ail_check(a,l)
76 #endif /* DEBUG */
77 
78 /*
79  * Return a pointer to the first item in the AIL.  If the AIL is empty, then
80  * return NULL.
81  */
82 static xfs_log_item_t *
83 xfs_ail_min(
84 	struct xfs_ail  *ailp)
85 {
86 	if (list_empty(&ailp->xa_ail))
87 		return NULL;
88 
89 	return list_first_entry(&ailp->xa_ail, xfs_log_item_t, li_ail);
90 }
91 
92  /*
93  * Return a pointer to the last item in the AIL.  If the AIL is empty, then
94  * return NULL.
95  */
96 static xfs_log_item_t *
97 xfs_ail_max(
98 	struct xfs_ail  *ailp)
99 {
100 	if (list_empty(&ailp->xa_ail))
101 		return NULL;
102 
103 	return list_entry(ailp->xa_ail.prev, xfs_log_item_t, li_ail);
104 }
105 
106 /*
107  * Return a pointer to the item which follows the given item in the AIL.  If
108  * the given item is the last item in the list, then return NULL.
109  */
110 static xfs_log_item_t *
111 xfs_ail_next(
112 	struct xfs_ail  *ailp,
113 	xfs_log_item_t  *lip)
114 {
115 	if (lip->li_ail.next == &ailp->xa_ail)
116 		return NULL;
117 
118 	return list_first_entry(&lip->li_ail, xfs_log_item_t, li_ail);
119 }
120 
121 /*
122  * This is called by the log manager code to determine the LSN of the tail of
123  * the log.  This is exactly the LSN of the first item in the AIL.  If the AIL
124  * is empty, then this function returns 0.
125  *
126  * We need the AIL lock in order to get a coherent read of the lsn of the last
127  * item in the AIL.
128  */
129 xfs_lsn_t
130 xfs_ail_min_lsn(
131 	struct xfs_ail	*ailp)
132 {
133 	xfs_lsn_t	lsn = 0;
134 	xfs_log_item_t	*lip;
135 
136 	spin_lock(&ailp->xa_lock);
137 	lip = xfs_ail_min(ailp);
138 	if (lip)
139 		lsn = lip->li_lsn;
140 	spin_unlock(&ailp->xa_lock);
141 
142 	return lsn;
143 }
144 
145 /*
146  * Return the maximum lsn held in the AIL, or zero if the AIL is empty.
147  */
148 static xfs_lsn_t
149 xfs_ail_max_lsn(
150 	struct xfs_ail  *ailp)
151 {
152 	xfs_lsn_t       lsn = 0;
153 	xfs_log_item_t  *lip;
154 
155 	spin_lock(&ailp->xa_lock);
156 	lip = xfs_ail_max(ailp);
157 	if (lip)
158 		lsn = lip->li_lsn;
159 	spin_unlock(&ailp->xa_lock);
160 
161 	return lsn;
162 }
163 
164 /*
165  * The cursor keeps track of where our current traversal is up to by tracking
166  * the next item in the list for us. However, for this to be safe, removing an
167  * object from the AIL needs to invalidate any cursor that points to it. hence
168  * the traversal cursor needs to be linked to the struct xfs_ail so that
169  * deletion can search all the active cursors for invalidation.
170  */
171 STATIC void
172 xfs_trans_ail_cursor_init(
173 	struct xfs_ail		*ailp,
174 	struct xfs_ail_cursor	*cur)
175 {
176 	cur->item = NULL;
177 	list_add_tail(&cur->list, &ailp->xa_cursors);
178 }
179 
180 /*
181  * Get the next item in the traversal and advance the cursor.  If the cursor
182  * was invalidated (indicated by a lip of 1), restart the traversal.
183  */
184 struct xfs_log_item *
185 xfs_trans_ail_cursor_next(
186 	struct xfs_ail		*ailp,
187 	struct xfs_ail_cursor	*cur)
188 {
189 	struct xfs_log_item	*lip = cur->item;
190 
191 	if ((__psint_t)lip & 1)
192 		lip = xfs_ail_min(ailp);
193 	if (lip)
194 		cur->item = xfs_ail_next(ailp, lip);
195 	return lip;
196 }
197 
198 /*
199  * When the traversal is complete, we need to remove the cursor from the list
200  * of traversing cursors.
201  */
202 void
203 xfs_trans_ail_cursor_done(
204 	struct xfs_ail		*ailp,
205 	struct xfs_ail_cursor	*cur)
206 {
207 	cur->item = NULL;
208 	list_del_init(&cur->list);
209 }
210 
211 /*
212  * Invalidate any cursor that is pointing to this item. This is called when an
213  * item is removed from the AIL. Any cursor pointing to this object is now
214  * invalid and the traversal needs to be terminated so it doesn't reference a
215  * freed object. We set the low bit of the cursor item pointer so we can
216  * distinguish between an invalidation and the end of the list when getting the
217  * next item from the cursor.
218  */
219 STATIC void
220 xfs_trans_ail_cursor_clear(
221 	struct xfs_ail		*ailp,
222 	struct xfs_log_item	*lip)
223 {
224 	struct xfs_ail_cursor	*cur;
225 
226 	list_for_each_entry(cur, &ailp->xa_cursors, list) {
227 		if (cur->item == lip)
228 			cur->item = (struct xfs_log_item *)
229 					((__psint_t)cur->item | 1);
230 	}
231 }
232 
233 /*
234  * Find the first item in the AIL with the given @lsn by searching in ascending
235  * LSN order and initialise the cursor to point to the next item for a
236  * ascending traversal.  Pass a @lsn of zero to initialise the cursor to the
237  * first item in the AIL. Returns NULL if the list is empty.
238  */
239 xfs_log_item_t *
240 xfs_trans_ail_cursor_first(
241 	struct xfs_ail		*ailp,
242 	struct xfs_ail_cursor	*cur,
243 	xfs_lsn_t		lsn)
244 {
245 	xfs_log_item_t		*lip;
246 
247 	xfs_trans_ail_cursor_init(ailp, cur);
248 
249 	if (lsn == 0) {
250 		lip = xfs_ail_min(ailp);
251 		goto out;
252 	}
253 
254 	list_for_each_entry(lip, &ailp->xa_ail, li_ail) {
255 		if (XFS_LSN_CMP(lip->li_lsn, lsn) >= 0)
256 			goto out;
257 	}
258 	return NULL;
259 
260 out:
261 	if (lip)
262 		cur->item = xfs_ail_next(ailp, lip);
263 	return lip;
264 }
265 
266 static struct xfs_log_item *
267 __xfs_trans_ail_cursor_last(
268 	struct xfs_ail		*ailp,
269 	xfs_lsn_t		lsn)
270 {
271 	xfs_log_item_t		*lip;
272 
273 	list_for_each_entry_reverse(lip, &ailp->xa_ail, li_ail) {
274 		if (XFS_LSN_CMP(lip->li_lsn, lsn) <= 0)
275 			return lip;
276 	}
277 	return NULL;
278 }
279 
280 /*
281  * Find the last item in the AIL with the given @lsn by searching in descending
282  * LSN order and initialise the cursor to point to that item.  If there is no
283  * item with the value of @lsn, then it sets the cursor to the last item with an
284  * LSN lower than @lsn.  Returns NULL if the list is empty.
285  */
286 struct xfs_log_item *
287 xfs_trans_ail_cursor_last(
288 	struct xfs_ail		*ailp,
289 	struct xfs_ail_cursor	*cur,
290 	xfs_lsn_t		lsn)
291 {
292 	xfs_trans_ail_cursor_init(ailp, cur);
293 	cur->item = __xfs_trans_ail_cursor_last(ailp, lsn);
294 	return cur->item;
295 }
296 
297 /*
298  * Splice the log item list into the AIL at the given LSN. We splice to the
299  * tail of the given LSN to maintain insert order for push traversals. The
300  * cursor is optional, allowing repeated updates to the same LSN to avoid
301  * repeated traversals.  This should not be called with an empty list.
302  */
303 static void
304 xfs_ail_splice(
305 	struct xfs_ail		*ailp,
306 	struct xfs_ail_cursor	*cur,
307 	struct list_head	*list,
308 	xfs_lsn_t		lsn)
309 {
310 	struct xfs_log_item	*lip;
311 
312 	ASSERT(!list_empty(list));
313 
314 	/*
315 	 * Use the cursor to determine the insertion point if one is
316 	 * provided.  If not, or if the one we got is not valid,
317 	 * find the place in the AIL where the items belong.
318 	 */
319 	lip = cur ? cur->item : NULL;
320 	if (!lip || (__psint_t) lip & 1)
321 		lip = __xfs_trans_ail_cursor_last(ailp, lsn);
322 
323 	/*
324 	 * If a cursor is provided, we know we're processing the AIL
325 	 * in lsn order, and future items to be spliced in will
326 	 * follow the last one being inserted now.  Update the
327 	 * cursor to point to that last item, now while we have a
328 	 * reliable pointer to it.
329 	 */
330 	if (cur)
331 		cur->item = list_entry(list->prev, struct xfs_log_item, li_ail);
332 
333 	/*
334 	 * Finally perform the splice.  Unless the AIL was empty,
335 	 * lip points to the item in the AIL _after_ which the new
336 	 * items should go.  If lip is null the AIL was empty, so
337 	 * the new items go at the head of the AIL.
338 	 */
339 	if (lip)
340 		list_splice(list, &lip->li_ail);
341 	else
342 		list_splice(list, &ailp->xa_ail);
343 }
344 
345 /*
346  * Delete the given item from the AIL.  Return a pointer to the item.
347  */
348 static void
349 xfs_ail_delete(
350 	struct xfs_ail  *ailp,
351 	xfs_log_item_t  *lip)
352 {
353 	xfs_ail_check(ailp, lip);
354 	list_del(&lip->li_ail);
355 	xfs_trans_ail_cursor_clear(ailp, lip);
356 }
357 
358 static long
359 xfsaild_push(
360 	struct xfs_ail		*ailp)
361 {
362 	xfs_mount_t		*mp = ailp->xa_mount;
363 	struct xfs_ail_cursor	cur;
364 	xfs_log_item_t		*lip;
365 	xfs_lsn_t		lsn;
366 	xfs_lsn_t		target;
367 	long			tout = 10;
368 	int			stuck = 0;
369 	int			count = 0;
370 	int			push_xfsbufd = 0;
371 
372 	/*
373 	 * If last time we ran we encountered pinned items, force the log first
374 	 * and wait for it before pushing again.
375 	 */
376 	spin_lock(&ailp->xa_lock);
377 	if (ailp->xa_last_pushed_lsn == 0 && ailp->xa_log_flush &&
378 	    !list_empty(&ailp->xa_ail)) {
379 		ailp->xa_log_flush = 0;
380 		spin_unlock(&ailp->xa_lock);
381 		XFS_STATS_INC(xs_push_ail_flush);
382 		xfs_log_force(mp, XFS_LOG_SYNC);
383 		spin_lock(&ailp->xa_lock);
384 	}
385 
386 	target = ailp->xa_target;
387 	lip = xfs_trans_ail_cursor_first(ailp, &cur, ailp->xa_last_pushed_lsn);
388 	if (!lip || XFS_FORCED_SHUTDOWN(mp)) {
389 		/*
390 		 * AIL is empty or our push has reached the end.
391 		 */
392 		xfs_trans_ail_cursor_done(ailp, &cur);
393 		spin_unlock(&ailp->xa_lock);
394 		goto out_done;
395 	}
396 
397 	XFS_STATS_INC(xs_push_ail);
398 
399 	/*
400 	 * While the item we are looking at is below the given threshold
401 	 * try to flush it out. We'd like not to stop until we've at least
402 	 * tried to push on everything in the AIL with an LSN less than
403 	 * the given threshold.
404 	 *
405 	 * However, we will stop after a certain number of pushes and wait
406 	 * for a reduced timeout to fire before pushing further. This
407 	 * prevents use from spinning when we can't do anything or there is
408 	 * lots of contention on the AIL lists.
409 	 */
410 	lsn = lip->li_lsn;
411 	while ((XFS_LSN_CMP(lip->li_lsn, target) <= 0)) {
412 		int	lock_result;
413 		/*
414 		 * If we can lock the item without sleeping, unlock the AIL
415 		 * lock and flush the item.  Then re-grab the AIL lock so we
416 		 * can look for the next item on the AIL. List changes are
417 		 * handled by the AIL lookup functions internally
418 		 *
419 		 * If we can't lock the item, either its holder will flush it
420 		 * or it is already being flushed or it is being relogged.  In
421 		 * any of these case it is being taken care of and we can just
422 		 * skip to the next item in the list.
423 		 */
424 		lock_result = IOP_TRYLOCK(lip);
425 		spin_unlock(&ailp->xa_lock);
426 		switch (lock_result) {
427 		case XFS_ITEM_SUCCESS:
428 			XFS_STATS_INC(xs_push_ail_success);
429 			trace_xfs_ail_push(lip);
430 
431 			IOP_PUSH(lip);
432 			ailp->xa_last_pushed_lsn = lsn;
433 			break;
434 
435 		case XFS_ITEM_PUSHBUF:
436 			XFS_STATS_INC(xs_push_ail_pushbuf);
437 			trace_xfs_ail_pushbuf(lip);
438 
439 			if (!IOP_PUSHBUF(lip)) {
440 				trace_xfs_ail_pushbuf_pinned(lip);
441 				stuck++;
442 				ailp->xa_log_flush++;
443 			} else {
444 				ailp->xa_last_pushed_lsn = lsn;
445 			}
446 			push_xfsbufd = 1;
447 			break;
448 
449 		case XFS_ITEM_PINNED:
450 			XFS_STATS_INC(xs_push_ail_pinned);
451 			trace_xfs_ail_pinned(lip);
452 
453 			stuck++;
454 			ailp->xa_log_flush++;
455 			break;
456 
457 		case XFS_ITEM_LOCKED:
458 			XFS_STATS_INC(xs_push_ail_locked);
459 			trace_xfs_ail_locked(lip);
460 			stuck++;
461 			break;
462 
463 		default:
464 			ASSERT(0);
465 			break;
466 		}
467 
468 		spin_lock(&ailp->xa_lock);
469 		/* should we bother continuing? */
470 		if (XFS_FORCED_SHUTDOWN(mp))
471 			break;
472 		ASSERT(mp->m_log);
473 
474 		count++;
475 
476 		/*
477 		 * Are there too many items we can't do anything with?
478 		 * If we we are skipping too many items because we can't flush
479 		 * them or they are already being flushed, we back off and
480 		 * given them time to complete whatever operation is being
481 		 * done. i.e. remove pressure from the AIL while we can't make
482 		 * progress so traversals don't slow down further inserts and
483 		 * removals to/from the AIL.
484 		 *
485 		 * The value of 100 is an arbitrary magic number based on
486 		 * observation.
487 		 */
488 		if (stuck > 100)
489 			break;
490 
491 		lip = xfs_trans_ail_cursor_next(ailp, &cur);
492 		if (lip == NULL)
493 			break;
494 		lsn = lip->li_lsn;
495 	}
496 	xfs_trans_ail_cursor_done(ailp, &cur);
497 	spin_unlock(&ailp->xa_lock);
498 
499 	if (push_xfsbufd) {
500 		/* we've got delayed write buffers to flush */
501 		wake_up_process(mp->m_ddev_targp->bt_task);
502 	}
503 
504 	/* assume we have more work to do in a short while */
505 out_done:
506 	if (!count) {
507 		/* We're past our target or empty, so idle */
508 		ailp->xa_last_pushed_lsn = 0;
509 		ailp->xa_log_flush = 0;
510 
511 		tout = 50;
512 	} else if (XFS_LSN_CMP(lsn, target) >= 0) {
513 		/*
514 		 * We reached the target so wait a bit longer for I/O to
515 		 * complete and remove pushed items from the AIL before we
516 		 * start the next scan from the start of the AIL.
517 		 */
518 		tout = 50;
519 		ailp->xa_last_pushed_lsn = 0;
520 	} else if ((stuck * 100) / count > 90) {
521 		/*
522 		 * Either there is a lot of contention on the AIL or we
523 		 * are stuck due to operations in progress. "Stuck" in this
524 		 * case is defined as >90% of the items we tried to push
525 		 * were stuck.
526 		 *
527 		 * Backoff a bit more to allow some I/O to complete before
528 		 * restarting from the start of the AIL. This prevents us
529 		 * from spinning on the same items, and if they are pinned will
530 		 * all the restart to issue a log force to unpin the stuck
531 		 * items.
532 		 */
533 		tout = 20;
534 		ailp->xa_last_pushed_lsn = 0;
535 	}
536 
537 	return tout;
538 }
539 
540 static int
541 xfsaild(
542 	void		*data)
543 {
544 	struct xfs_ail	*ailp = data;
545 	long		tout = 0;	/* milliseconds */
546 
547 	while (!kthread_should_stop()) {
548 		if (tout && tout <= 20)
549 			__set_current_state(TASK_KILLABLE);
550 		else
551 			__set_current_state(TASK_INTERRUPTIBLE);
552 		schedule_timeout(tout ?
553 				 msecs_to_jiffies(tout) : MAX_SCHEDULE_TIMEOUT);
554 
555 		try_to_freeze();
556 
557 		tout = xfsaild_push(ailp);
558 	}
559 
560 	return 0;
561 }
562 
563 /*
564  * This routine is called to move the tail of the AIL forward.  It does this by
565  * trying to flush items in the AIL whose lsns are below the given
566  * threshold_lsn.
567  *
568  * The push is run asynchronously in a workqueue, which means the caller needs
569  * to handle waiting on the async flush for space to become available.
570  * We don't want to interrupt any push that is in progress, hence we only queue
571  * work if we set the pushing bit approriately.
572  *
573  * We do this unlocked - we only need to know whether there is anything in the
574  * AIL at the time we are called. We don't need to access the contents of
575  * any of the objects, so the lock is not needed.
576  */
577 void
578 xfs_ail_push(
579 	struct xfs_ail	*ailp,
580 	xfs_lsn_t	threshold_lsn)
581 {
582 	xfs_log_item_t	*lip;
583 
584 	lip = xfs_ail_min(ailp);
585 	if (!lip || XFS_FORCED_SHUTDOWN(ailp->xa_mount) ||
586 	    XFS_LSN_CMP(threshold_lsn, ailp->xa_target) <= 0)
587 		return;
588 
589 	/*
590 	 * Ensure that the new target is noticed in push code before it clears
591 	 * the XFS_AIL_PUSHING_BIT.
592 	 */
593 	smp_wmb();
594 	xfs_trans_ail_copy_lsn(ailp, &ailp->xa_target, &threshold_lsn);
595 	smp_wmb();
596 
597 	wake_up_process(ailp->xa_task);
598 }
599 
600 /*
601  * Push out all items in the AIL immediately
602  */
603 void
604 xfs_ail_push_all(
605 	struct xfs_ail  *ailp)
606 {
607 	xfs_lsn_t       threshold_lsn = xfs_ail_max_lsn(ailp);
608 
609 	if (threshold_lsn)
610 		xfs_ail_push(ailp, threshold_lsn);
611 }
612 
613 /*
614  * This is to be called when an item is unlocked that may have
615  * been in the AIL.  It will wake up the first member of the AIL
616  * wait list if this item's unlocking might allow it to progress.
617  * If the item is in the AIL, then we need to get the AIL lock
618  * while doing our checking so we don't race with someone going
619  * to sleep waiting for this event in xfs_trans_push_ail().
620  */
621 void
622 xfs_trans_unlocked_item(
623 	struct xfs_ail	*ailp,
624 	xfs_log_item_t	*lip)
625 {
626 	xfs_log_item_t	*min_lip;
627 
628 	/*
629 	 * If we're forcibly shutting down, we may have
630 	 * unlocked log items arbitrarily. The last thing
631 	 * we want to do is to move the tail of the log
632 	 * over some potentially valid data.
633 	 */
634 	if (!(lip->li_flags & XFS_LI_IN_AIL) ||
635 	    XFS_FORCED_SHUTDOWN(ailp->xa_mount)) {
636 		return;
637 	}
638 
639 	/*
640 	 * This is the one case where we can call into xfs_ail_min()
641 	 * without holding the AIL lock because we only care about the
642 	 * case where we are at the tail of the AIL.  If the object isn't
643 	 * at the tail, it doesn't matter what result we get back.  This
644 	 * is slightly racy because since we were just unlocked, we could
645 	 * go to sleep between the call to xfs_ail_min and the call to
646 	 * xfs_log_move_tail, have someone else lock us, commit to us disk,
647 	 * move us out of the tail of the AIL, and then we wake up.  However,
648 	 * the call to xfs_log_move_tail() doesn't do anything if there's
649 	 * not enough free space to wake people up so we're safe calling it.
650 	 */
651 	min_lip = xfs_ail_min(ailp);
652 
653 	if (min_lip == lip)
654 		xfs_log_move_tail(ailp->xa_mount, 1);
655 }	/* xfs_trans_unlocked_item */
656 
657 /*
658  * xfs_trans_ail_update - bulk AIL insertion operation.
659  *
660  * @xfs_trans_ail_update takes an array of log items that all need to be
661  * positioned at the same LSN in the AIL. If an item is not in the AIL, it will
662  * be added.  Otherwise, it will be repositioned  by removing it and re-adding
663  * it to the AIL. If we move the first item in the AIL, update the log tail to
664  * match the new minimum LSN in the AIL.
665  *
666  * This function takes the AIL lock once to execute the update operations on
667  * all the items in the array, and as such should not be called with the AIL
668  * lock held. As a result, once we have the AIL lock, we need to check each log
669  * item LSN to confirm it needs to be moved forward in the AIL.
670  *
671  * To optimise the insert operation, we delete all the items from the AIL in
672  * the first pass, moving them into a temporary list, then splice the temporary
673  * list into the correct position in the AIL. This avoids needing to do an
674  * insert operation on every item.
675  *
676  * This function must be called with the AIL lock held.  The lock is dropped
677  * before returning.
678  */
679 void
680 xfs_trans_ail_update_bulk(
681 	struct xfs_ail		*ailp,
682 	struct xfs_ail_cursor	*cur,
683 	struct xfs_log_item	**log_items,
684 	int			nr_items,
685 	xfs_lsn_t		lsn) __releases(ailp->xa_lock)
686 {
687 	xfs_log_item_t		*mlip;
688 	xfs_lsn_t		tail_lsn;
689 	int			mlip_changed = 0;
690 	int			i;
691 	LIST_HEAD(tmp);
692 
693 	ASSERT(nr_items > 0);		/* Not required, but true. */
694 	mlip = xfs_ail_min(ailp);
695 
696 	for (i = 0; i < nr_items; i++) {
697 		struct xfs_log_item *lip = log_items[i];
698 		if (lip->li_flags & XFS_LI_IN_AIL) {
699 			/* check if we really need to move the item */
700 			if (XFS_LSN_CMP(lsn, lip->li_lsn) <= 0)
701 				continue;
702 
703 			xfs_ail_delete(ailp, lip);
704 			if (mlip == lip)
705 				mlip_changed = 1;
706 		} else {
707 			lip->li_flags |= XFS_LI_IN_AIL;
708 		}
709 		lip->li_lsn = lsn;
710 		list_add(&lip->li_ail, &tmp);
711 	}
712 
713 	if (!list_empty(&tmp))
714 		xfs_ail_splice(ailp, cur, &tmp, lsn);
715 
716 	if (!mlip_changed) {
717 		spin_unlock(&ailp->xa_lock);
718 		return;
719 	}
720 
721 	/*
722 	 * It is not safe to access mlip after the AIL lock is dropped, so we
723 	 * must get a copy of li_lsn before we do so.  This is especially
724 	 * important on 32-bit platforms where accessing and updating 64-bit
725 	 * values like li_lsn is not atomic.
726 	 */
727 	mlip = xfs_ail_min(ailp);
728 	tail_lsn = mlip->li_lsn;
729 	spin_unlock(&ailp->xa_lock);
730 	xfs_log_move_tail(ailp->xa_mount, tail_lsn);
731 }
732 
733 /*
734  * xfs_trans_ail_delete_bulk - remove multiple log items from the AIL
735  *
736  * @xfs_trans_ail_delete_bulk takes an array of log items that all need to
737  * removed from the AIL. The caller is already holding the AIL lock, and done
738  * all the checks necessary to ensure the items passed in via @log_items are
739  * ready for deletion. This includes checking that the items are in the AIL.
740  *
741  * For each log item to be removed, unlink it  from the AIL, clear the IN_AIL
742  * flag from the item and reset the item's lsn to 0. If we remove the first
743  * item in the AIL, update the log tail to match the new minimum LSN in the
744  * AIL.
745  *
746  * This function will not drop the AIL lock until all items are removed from
747  * the AIL to minimise the amount of lock traffic on the AIL. This does not
748  * greatly increase the AIL hold time, but does significantly reduce the amount
749  * of traffic on the lock, especially during IO completion.
750  *
751  * This function must be called with the AIL lock held.  The lock is dropped
752  * before returning.
753  */
754 void
755 xfs_trans_ail_delete_bulk(
756 	struct xfs_ail		*ailp,
757 	struct xfs_log_item	**log_items,
758 	int			nr_items) __releases(ailp->xa_lock)
759 {
760 	xfs_log_item_t		*mlip;
761 	xfs_lsn_t		tail_lsn;
762 	int			mlip_changed = 0;
763 	int			i;
764 
765 	mlip = xfs_ail_min(ailp);
766 
767 	for (i = 0; i < nr_items; i++) {
768 		struct xfs_log_item *lip = log_items[i];
769 		if (!(lip->li_flags & XFS_LI_IN_AIL)) {
770 			struct xfs_mount	*mp = ailp->xa_mount;
771 
772 			spin_unlock(&ailp->xa_lock);
773 			if (!XFS_FORCED_SHUTDOWN(mp)) {
774 				xfs_alert_tag(mp, XFS_PTAG_AILDELETE,
775 		"%s: attempting to delete a log item that is not in the AIL",
776 						__func__);
777 				xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
778 			}
779 			return;
780 		}
781 
782 		xfs_ail_delete(ailp, lip);
783 		lip->li_flags &= ~XFS_LI_IN_AIL;
784 		lip->li_lsn = 0;
785 		if (mlip == lip)
786 			mlip_changed = 1;
787 	}
788 
789 	if (!mlip_changed) {
790 		spin_unlock(&ailp->xa_lock);
791 		return;
792 	}
793 
794 	/*
795 	 * It is not safe to access mlip after the AIL lock is dropped, so we
796 	 * must get a copy of li_lsn before we do so.  This is especially
797 	 * important on 32-bit platforms where accessing and updating 64-bit
798 	 * values like li_lsn is not atomic. It is possible we've emptied the
799 	 * AIL here, so if that is the case, pass an LSN of 0 to the tail move.
800 	 */
801 	mlip = xfs_ail_min(ailp);
802 	tail_lsn = mlip ? mlip->li_lsn : 0;
803 	spin_unlock(&ailp->xa_lock);
804 	xfs_log_move_tail(ailp->xa_mount, tail_lsn);
805 }
806 
807 /*
808  * The active item list (AIL) is a doubly linked list of log
809  * items sorted by ascending lsn.  The base of the list is
810  * a forw/back pointer pair embedded in the xfs mount structure.
811  * The base is initialized with both pointers pointing to the
812  * base.  This case always needs to be distinguished, because
813  * the base has no lsn to look at.  We almost always insert
814  * at the end of the list, so on inserts we search from the
815  * end of the list to find where the new item belongs.
816  */
817 
818 /*
819  * Initialize the doubly linked list to point only to itself.
820  */
821 int
822 xfs_trans_ail_init(
823 	xfs_mount_t	*mp)
824 {
825 	struct xfs_ail	*ailp;
826 
827 	ailp = kmem_zalloc(sizeof(struct xfs_ail), KM_MAYFAIL);
828 	if (!ailp)
829 		return ENOMEM;
830 
831 	ailp->xa_mount = mp;
832 	INIT_LIST_HEAD(&ailp->xa_ail);
833 	INIT_LIST_HEAD(&ailp->xa_cursors);
834 	spin_lock_init(&ailp->xa_lock);
835 
836 	ailp->xa_task = kthread_run(xfsaild, ailp, "xfsaild/%s",
837 			ailp->xa_mount->m_fsname);
838 	if (IS_ERR(ailp->xa_task))
839 		goto out_free_ailp;
840 
841 	mp->m_ail = ailp;
842 	return 0;
843 
844 out_free_ailp:
845 	kmem_free(ailp);
846 	return ENOMEM;
847 }
848 
849 void
850 xfs_trans_ail_destroy(
851 	xfs_mount_t	*mp)
852 {
853 	struct xfs_ail	*ailp = mp->m_ail;
854 
855 	kthread_stop(ailp->xa_task);
856 	kmem_free(ailp);
857 }
858