11da177e4SLinus Torvalds /* 27b718769SNathan Scott * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc. 3c7e8f268SDavid Chinner * Copyright (c) 2008 Dave Chinner 47b718769SNathan Scott * All Rights Reserved. 51da177e4SLinus Torvalds * 67b718769SNathan Scott * This program is free software; you can redistribute it and/or 77b718769SNathan Scott * modify it under the terms of the GNU General Public License as 81da177e4SLinus Torvalds * published by the Free Software Foundation. 91da177e4SLinus Torvalds * 107b718769SNathan Scott * This program is distributed in the hope that it would be useful, 117b718769SNathan Scott * but WITHOUT ANY WARRANTY; without even the implied warranty of 127b718769SNathan Scott * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 137b718769SNathan Scott * GNU General Public License for more details. 141da177e4SLinus Torvalds * 157b718769SNathan Scott * You should have received a copy of the GNU General Public License 167b718769SNathan Scott * along with this program; if not, write the Free Software Foundation, 177b718769SNathan Scott * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 181da177e4SLinus Torvalds */ 191da177e4SLinus Torvalds #include "xfs.h" 20a844f451SNathan Scott #include "xfs_fs.h" 211da177e4SLinus Torvalds #include "xfs_types.h" 221da177e4SLinus Torvalds #include "xfs_log.h" 23a844f451SNathan Scott #include "xfs_inum.h" 241da177e4SLinus Torvalds #include "xfs_trans.h" 251da177e4SLinus Torvalds #include "xfs_sb.h" 26da353b0dSDavid Chinner #include "xfs_ag.h" 271da177e4SLinus Torvalds #include "xfs_mount.h" 281da177e4SLinus Torvalds #include "xfs_trans_priv.h" 291da177e4SLinus Torvalds #include "xfs_error.h" 301da177e4SLinus Torvalds 310bf6a5bdSDave Chinner struct workqueue_struct *xfs_ail_wq; /* AIL workqueue */ 320bf6a5bdSDave Chinner 331da177e4SLinus Torvalds #ifdef DEBUG 34cd4a3c50SDave Chinner /* 35cd4a3c50SDave Chinner * Check that the list is sorted as it should be. 36cd4a3c50SDave Chinner */ 37cd4a3c50SDave Chinner STATIC void 38cd4a3c50SDave Chinner xfs_ail_check( 39cd4a3c50SDave Chinner struct xfs_ail *ailp, 40cd4a3c50SDave Chinner xfs_log_item_t *lip) 41cd4a3c50SDave Chinner { 42cd4a3c50SDave Chinner xfs_log_item_t *prev_lip; 43cd4a3c50SDave Chinner 44cd4a3c50SDave Chinner if (list_empty(&ailp->xa_ail)) 45cd4a3c50SDave Chinner return; 46cd4a3c50SDave Chinner 47cd4a3c50SDave Chinner /* 48cd4a3c50SDave Chinner * Check the next and previous entries are valid. 49cd4a3c50SDave Chinner */ 50cd4a3c50SDave Chinner ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0); 51cd4a3c50SDave Chinner prev_lip = list_entry(lip->li_ail.prev, xfs_log_item_t, li_ail); 52cd4a3c50SDave Chinner if (&prev_lip->li_ail != &ailp->xa_ail) 53cd4a3c50SDave Chinner ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0); 54cd4a3c50SDave Chinner 55cd4a3c50SDave Chinner prev_lip = list_entry(lip->li_ail.next, xfs_log_item_t, li_ail); 56cd4a3c50SDave Chinner if (&prev_lip->li_ail != &ailp->xa_ail) 57cd4a3c50SDave Chinner ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) >= 0); 58cd4a3c50SDave Chinner 59cd4a3c50SDave Chinner 60cd4a3c50SDave Chinner #ifdef XFS_TRANS_DEBUG 61cd4a3c50SDave Chinner /* 62cd4a3c50SDave Chinner * Walk the list checking lsn ordering, and that every entry has the 63cd4a3c50SDave Chinner * XFS_LI_IN_AIL flag set. This is really expensive, so only do it 64cd4a3c50SDave Chinner * when specifically debugging the transaction subsystem. 65cd4a3c50SDave Chinner */ 66cd4a3c50SDave Chinner prev_lip = list_entry(&ailp->xa_ail, xfs_log_item_t, li_ail); 67cd4a3c50SDave Chinner list_for_each_entry(lip, &ailp->xa_ail, li_ail) { 68cd4a3c50SDave Chinner if (&prev_lip->li_ail != &ailp->xa_ail) 69cd4a3c50SDave Chinner ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0); 70cd4a3c50SDave Chinner ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0); 71cd4a3c50SDave Chinner prev_lip = lip; 72cd4a3c50SDave Chinner } 73cd4a3c50SDave Chinner #endif /* XFS_TRANS_DEBUG */ 74cd4a3c50SDave Chinner } 75cd4a3c50SDave Chinner #else /* !DEBUG */ 76de08dbc1SDavid Chinner #define xfs_ail_check(a,l) 771da177e4SLinus Torvalds #endif /* DEBUG */ 781da177e4SLinus Torvalds 79cd4a3c50SDave Chinner /* 80cd4a3c50SDave Chinner * Return a pointer to the first item in the AIL. If the AIL is empty, then 81cd4a3c50SDave Chinner * return NULL. 82cd4a3c50SDave Chinner */ 83cd4a3c50SDave Chinner static xfs_log_item_t * 84cd4a3c50SDave Chinner xfs_ail_min( 85cd4a3c50SDave Chinner struct xfs_ail *ailp) 86cd4a3c50SDave Chinner { 87cd4a3c50SDave Chinner if (list_empty(&ailp->xa_ail)) 88cd4a3c50SDave Chinner return NULL; 89cd4a3c50SDave Chinner 90cd4a3c50SDave Chinner return list_first_entry(&ailp->xa_ail, xfs_log_item_t, li_ail); 91cd4a3c50SDave Chinner } 921da177e4SLinus Torvalds 931da177e4SLinus Torvalds /* 94fd074841SDave Chinner * Return a pointer to the last item in the AIL. If the AIL is empty, then 95fd074841SDave Chinner * return NULL. 96fd074841SDave Chinner */ 97fd074841SDave Chinner static xfs_log_item_t * 98fd074841SDave Chinner xfs_ail_max( 99fd074841SDave Chinner struct xfs_ail *ailp) 100fd074841SDave Chinner { 101fd074841SDave Chinner if (list_empty(&ailp->xa_ail)) 102fd074841SDave Chinner return NULL; 103fd074841SDave Chinner 104fd074841SDave Chinner return list_entry(ailp->xa_ail.prev, xfs_log_item_t, li_ail); 105fd074841SDave Chinner } 106fd074841SDave Chinner 107fd074841SDave Chinner /* 108cd4a3c50SDave Chinner * Return a pointer to the item which follows the given item in the AIL. If 109cd4a3c50SDave Chinner * the given item is the last item in the list, then return NULL. 110cd4a3c50SDave Chinner */ 111cd4a3c50SDave Chinner static xfs_log_item_t * 112cd4a3c50SDave Chinner xfs_ail_next( 113cd4a3c50SDave Chinner struct xfs_ail *ailp, 114cd4a3c50SDave Chinner xfs_log_item_t *lip) 115cd4a3c50SDave Chinner { 116cd4a3c50SDave Chinner if (lip->li_ail.next == &ailp->xa_ail) 117cd4a3c50SDave Chinner return NULL; 118cd4a3c50SDave Chinner 119cd4a3c50SDave Chinner return list_first_entry(&lip->li_ail, xfs_log_item_t, li_ail); 120cd4a3c50SDave Chinner } 121cd4a3c50SDave Chinner 122cd4a3c50SDave Chinner /* 123cd4a3c50SDave Chinner * This is called by the log manager code to determine the LSN of the tail of 124cd4a3c50SDave Chinner * the log. This is exactly the LSN of the first item in the AIL. If the AIL 125cd4a3c50SDave Chinner * is empty, then this function returns 0. 1261da177e4SLinus Torvalds * 127cd4a3c50SDave Chinner * We need the AIL lock in order to get a coherent read of the lsn of the last 128cd4a3c50SDave Chinner * item in the AIL. 1291da177e4SLinus Torvalds */ 1301da177e4SLinus Torvalds xfs_lsn_t 131fd074841SDave Chinner xfs_ail_min_lsn( 1325b00f14fSDavid Chinner struct xfs_ail *ailp) 1331da177e4SLinus Torvalds { 134cd4a3c50SDave Chinner xfs_lsn_t lsn = 0; 1351da177e4SLinus Torvalds xfs_log_item_t *lip; 1361da177e4SLinus Torvalds 137c7e8f268SDavid Chinner spin_lock(&ailp->xa_lock); 1385b00f14fSDavid Chinner lip = xfs_ail_min(ailp); 139cd4a3c50SDave Chinner if (lip) 1401da177e4SLinus Torvalds lsn = lip->li_lsn; 141c7e8f268SDavid Chinner spin_unlock(&ailp->xa_lock); 1421da177e4SLinus Torvalds 1431da177e4SLinus Torvalds return lsn; 1441da177e4SLinus Torvalds } 1451da177e4SLinus Torvalds 1461da177e4SLinus Torvalds /* 147fd074841SDave Chinner * Return the maximum lsn held in the AIL, or zero if the AIL is empty. 148fd074841SDave Chinner */ 149fd074841SDave Chinner static xfs_lsn_t 150fd074841SDave Chinner xfs_ail_max_lsn( 151fd074841SDave Chinner struct xfs_ail *ailp) 152fd074841SDave Chinner { 153fd074841SDave Chinner xfs_lsn_t lsn = 0; 154fd074841SDave Chinner xfs_log_item_t *lip; 155fd074841SDave Chinner 156fd074841SDave Chinner spin_lock(&ailp->xa_lock); 157fd074841SDave Chinner lip = xfs_ail_max(ailp); 158fd074841SDave Chinner if (lip) 159fd074841SDave Chinner lsn = lip->li_lsn; 160fd074841SDave Chinner spin_unlock(&ailp->xa_lock); 161fd074841SDave Chinner 162fd074841SDave Chinner return lsn; 163fd074841SDave Chinner } 164fd074841SDave Chinner 165fd074841SDave Chinner /* 16627d8d5feSDavid Chinner * AIL traversal cursor initialisation. 16727d8d5feSDavid Chinner * 16827d8d5feSDavid Chinner * The cursor keeps track of where our current traversal is up 16927d8d5feSDavid Chinner * to by tracking the next ƣtem in the list for us. However, for 17027d8d5feSDavid Chinner * this to be safe, removing an object from the AIL needs to invalidate 17127d8d5feSDavid Chinner * any cursor that points to it. hence the traversal cursor needs to 17227d8d5feSDavid Chinner * be linked to the struct xfs_ail so that deletion can search all the 17327d8d5feSDavid Chinner * active cursors for invalidation. 17427d8d5feSDavid Chinner * 17527d8d5feSDavid Chinner * We don't link the push cursor because it is embedded in the struct 17627d8d5feSDavid Chinner * xfs_ail and hence easily findable. 17727d8d5feSDavid Chinner */ 1785b00f14fSDavid Chinner STATIC void 17927d8d5feSDavid Chinner xfs_trans_ail_cursor_init( 18027d8d5feSDavid Chinner struct xfs_ail *ailp, 18127d8d5feSDavid Chinner struct xfs_ail_cursor *cur) 18227d8d5feSDavid Chinner { 18327d8d5feSDavid Chinner cur->item = NULL; 18427d8d5feSDavid Chinner if (cur == &ailp->xa_cursors) 18527d8d5feSDavid Chinner return; 18627d8d5feSDavid Chinner 18727d8d5feSDavid Chinner cur->next = ailp->xa_cursors.next; 18827d8d5feSDavid Chinner ailp->xa_cursors.next = cur; 18927d8d5feSDavid Chinner } 19027d8d5feSDavid Chinner 19127d8d5feSDavid Chinner /* 19227d8d5feSDavid Chinner * Set the cursor to the next item, because when we look 19327d8d5feSDavid Chinner * up the cursor the current item may have been freed. 19427d8d5feSDavid Chinner */ 19527d8d5feSDavid Chinner STATIC void 19627d8d5feSDavid Chinner xfs_trans_ail_cursor_set( 19727d8d5feSDavid Chinner struct xfs_ail *ailp, 19827d8d5feSDavid Chinner struct xfs_ail_cursor *cur, 19927d8d5feSDavid Chinner struct xfs_log_item *lip) 20027d8d5feSDavid Chinner { 20127d8d5feSDavid Chinner if (lip) 20227d8d5feSDavid Chinner cur->item = xfs_ail_next(ailp, lip); 20327d8d5feSDavid Chinner } 20427d8d5feSDavid Chinner 20527d8d5feSDavid Chinner /* 20627d8d5feSDavid Chinner * Get the next item in the traversal and advance the cursor. 20727d8d5feSDavid Chinner * If the cursor was invalidated (inidicated by a lip of 1), 20827d8d5feSDavid Chinner * restart the traversal. 20927d8d5feSDavid Chinner */ 2105b00f14fSDavid Chinner struct xfs_log_item * 21127d8d5feSDavid Chinner xfs_trans_ail_cursor_next( 21227d8d5feSDavid Chinner struct xfs_ail *ailp, 21327d8d5feSDavid Chinner struct xfs_ail_cursor *cur) 21427d8d5feSDavid Chinner { 21527d8d5feSDavid Chinner struct xfs_log_item *lip = cur->item; 21627d8d5feSDavid Chinner 21727d8d5feSDavid Chinner if ((__psint_t)lip & 1) 21827d8d5feSDavid Chinner lip = xfs_ail_min(ailp); 21927d8d5feSDavid Chinner xfs_trans_ail_cursor_set(ailp, cur, lip); 22027d8d5feSDavid Chinner return lip; 22127d8d5feSDavid Chinner } 22227d8d5feSDavid Chinner 22327d8d5feSDavid Chinner /* 22427d8d5feSDavid Chinner * Now that the traversal is complete, we need to remove the cursor 22527d8d5feSDavid Chinner * from the list of traversing cursors. Avoid removing the embedded 2269da096fdSMalcolm Parsons * push cursor, but use the fact it is always present to make the 22727d8d5feSDavid Chinner * list deletion simple. 22827d8d5feSDavid Chinner */ 22927d8d5feSDavid Chinner void 23027d8d5feSDavid Chinner xfs_trans_ail_cursor_done( 23127d8d5feSDavid Chinner struct xfs_ail *ailp, 23227d8d5feSDavid Chinner struct xfs_ail_cursor *done) 23327d8d5feSDavid Chinner { 23427d8d5feSDavid Chinner struct xfs_ail_cursor *prev = NULL; 23527d8d5feSDavid Chinner struct xfs_ail_cursor *cur; 23627d8d5feSDavid Chinner 23727d8d5feSDavid Chinner done->item = NULL; 23827d8d5feSDavid Chinner if (done == &ailp->xa_cursors) 23927d8d5feSDavid Chinner return; 24027d8d5feSDavid Chinner prev = &ailp->xa_cursors; 24127d8d5feSDavid Chinner for (cur = prev->next; cur; prev = cur, cur = prev->next) { 24227d8d5feSDavid Chinner if (cur == done) { 24327d8d5feSDavid Chinner prev->next = cur->next; 24427d8d5feSDavid Chinner break; 24527d8d5feSDavid Chinner } 24627d8d5feSDavid Chinner } 24727d8d5feSDavid Chinner ASSERT(cur); 24827d8d5feSDavid Chinner } 24927d8d5feSDavid Chinner 25027d8d5feSDavid Chinner /* 2515b00f14fSDavid Chinner * Invalidate any cursor that is pointing to this item. This is 2525b00f14fSDavid Chinner * called when an item is removed from the AIL. Any cursor pointing 2535b00f14fSDavid Chinner * to this object is now invalid and the traversal needs to be 2545b00f14fSDavid Chinner * terminated so it doesn't reference a freed object. We set the 2555b00f14fSDavid Chinner * cursor item to a value of 1 so we can distinguish between an 2565b00f14fSDavid Chinner * invalidation and the end of the list when getting the next item 2575b00f14fSDavid Chinner * from the cursor. 2585b00f14fSDavid Chinner */ 2595b00f14fSDavid Chinner STATIC void 2605b00f14fSDavid Chinner xfs_trans_ail_cursor_clear( 2615b00f14fSDavid Chinner struct xfs_ail *ailp, 2625b00f14fSDavid Chinner struct xfs_log_item *lip) 2635b00f14fSDavid Chinner { 2645b00f14fSDavid Chinner struct xfs_ail_cursor *cur; 2655b00f14fSDavid Chinner 2665b00f14fSDavid Chinner /* need to search all cursors */ 2675b00f14fSDavid Chinner for (cur = &ailp->xa_cursors; cur; cur = cur->next) { 2685b00f14fSDavid Chinner if (cur->item == lip) 2695b00f14fSDavid Chinner cur->item = (struct xfs_log_item *) 2705b00f14fSDavid Chinner ((__psint_t)cur->item | 1); 2715b00f14fSDavid Chinner } 2725b00f14fSDavid Chinner } 2735b00f14fSDavid Chinner 2745b00f14fSDavid Chinner /* 2751d8c95a3SDave Chinner * Initialise the cursor to the first item in the AIL with the given @lsn. 2761d8c95a3SDave Chinner * This searches the list from lowest LSN to highest. Pass a @lsn of zero 2771d8c95a3SDave Chinner * to initialise the cursor to the first item in the AIL. 278249a8c11SDavid Chinner */ 2795b00f14fSDavid Chinner xfs_log_item_t * 2805b00f14fSDavid Chinner xfs_trans_ail_cursor_first( 28127d8d5feSDavid Chinner struct xfs_ail *ailp, 28227d8d5feSDavid Chinner struct xfs_ail_cursor *cur, 283249a8c11SDavid Chinner xfs_lsn_t lsn) 284249a8c11SDavid Chinner { 285249a8c11SDavid Chinner xfs_log_item_t *lip; 286249a8c11SDavid Chinner 2875b00f14fSDavid Chinner xfs_trans_ail_cursor_init(ailp, cur); 28827d8d5feSDavid Chinner lip = xfs_ail_min(ailp); 289249a8c11SDavid Chinner if (lsn == 0) 2905b00f14fSDavid Chinner goto out; 291249a8c11SDavid Chinner 29227d8d5feSDavid Chinner list_for_each_entry(lip, &ailp->xa_ail, li_ail) { 2935b00f14fSDavid Chinner if (XFS_LSN_CMP(lip->li_lsn, lsn) >= 0) 2947ee49acfSDavid Chinner goto out; 2955b00f14fSDavid Chinner } 2965b00f14fSDavid Chinner lip = NULL; 2975b00f14fSDavid Chinner out: 29827d8d5feSDavid Chinner xfs_trans_ail_cursor_set(ailp, cur, lip); 299249a8c11SDavid Chinner return lip; 300249a8c11SDavid Chinner } 301535f6b37SJosef 'Jeff' Sipek 302249a8c11SDavid Chinner /* 3031d8c95a3SDave Chinner * Initialise the cursor to the last item in the AIL with the given @lsn. 3041d8c95a3SDave Chinner * This searches the list from highest LSN to lowest. If there is no item with 3051d8c95a3SDave Chinner * the value of @lsn, then it sets the cursor to the last item with an LSN lower 3061d8c95a3SDave Chinner * than @lsn. 3071d8c95a3SDave Chinner */ 3081d8c95a3SDave Chinner static struct xfs_log_item * 3091d8c95a3SDave Chinner __xfs_trans_ail_cursor_last( 3101d8c95a3SDave Chinner struct xfs_ail *ailp, 3111d8c95a3SDave Chinner xfs_lsn_t lsn) 3121d8c95a3SDave Chinner { 3131d8c95a3SDave Chinner xfs_log_item_t *lip; 3141d8c95a3SDave Chinner 3151d8c95a3SDave Chinner list_for_each_entry_reverse(lip, &ailp->xa_ail, li_ail) { 3161d8c95a3SDave Chinner if (XFS_LSN_CMP(lip->li_lsn, lsn) <= 0) 3171d8c95a3SDave Chinner return lip; 3181d8c95a3SDave Chinner } 3191d8c95a3SDave Chinner return NULL; 3201d8c95a3SDave Chinner } 3211d8c95a3SDave Chinner 3221d8c95a3SDave Chinner /* 3231d8c95a3SDave Chinner * Initialise the cursor to the last item in the AIL with the given @lsn. 3241d8c95a3SDave Chinner * This searches the list from highest LSN to lowest. 3251d8c95a3SDave Chinner */ 3261d8c95a3SDave Chinner struct xfs_log_item * 3271d8c95a3SDave Chinner xfs_trans_ail_cursor_last( 3281d8c95a3SDave Chinner struct xfs_ail *ailp, 3291d8c95a3SDave Chinner struct xfs_ail_cursor *cur, 3301d8c95a3SDave Chinner xfs_lsn_t lsn) 3311d8c95a3SDave Chinner { 3321d8c95a3SDave Chinner xfs_trans_ail_cursor_init(ailp, cur); 3331d8c95a3SDave Chinner cur->item = __xfs_trans_ail_cursor_last(ailp, lsn); 3341d8c95a3SDave Chinner return cur->item; 3351d8c95a3SDave Chinner } 3361d8c95a3SDave Chinner 3371d8c95a3SDave Chinner /* 3381d8c95a3SDave Chinner * splice the log item list into the AIL at the given LSN. We splice to the 3391d8c95a3SDave Chinner * tail of the given LSN to maintain insert order for push traversals. The 3401d8c95a3SDave Chinner * cursor is optional, allowing repeated updates to the same LSN to avoid 3411d8c95a3SDave Chinner * repeated traversals. 342cd4a3c50SDave Chinner */ 343cd4a3c50SDave Chinner static void 344cd4a3c50SDave Chinner xfs_ail_splice( 345cd4a3c50SDave Chinner struct xfs_ail *ailp, 3461d8c95a3SDave Chinner struct xfs_ail_cursor *cur, 347cd4a3c50SDave Chinner struct list_head *list, 348cd4a3c50SDave Chinner xfs_lsn_t lsn) 349cd4a3c50SDave Chinner { 3501d8c95a3SDave Chinner struct xfs_log_item *lip = cur ? cur->item : NULL; 3511d8c95a3SDave Chinner struct xfs_log_item *next_lip; 352cd4a3c50SDave Chinner 3531d8c95a3SDave Chinner /* 3541d8c95a3SDave Chinner * Get a new cursor if we don't have a placeholder or the existing one 3551d8c95a3SDave Chinner * has been invalidated. 3561d8c95a3SDave Chinner */ 3571d8c95a3SDave Chinner if (!lip || (__psint_t)lip & 1) { 3581d8c95a3SDave Chinner lip = __xfs_trans_ail_cursor_last(ailp, lsn); 3591d8c95a3SDave Chinner 3601d8c95a3SDave Chinner if (!lip) { 3611d8c95a3SDave Chinner /* The list is empty, so just splice and return. */ 3621d8c95a3SDave Chinner if (cur) 3631d8c95a3SDave Chinner cur->item = NULL; 364cd4a3c50SDave Chinner list_splice(list, &ailp->xa_ail); 365cd4a3c50SDave Chinner return; 366cd4a3c50SDave Chinner } 367cd4a3c50SDave Chinner } 368cd4a3c50SDave Chinner 3691d8c95a3SDave Chinner /* 3701d8c95a3SDave Chinner * Our cursor points to the item we want to insert _after_, so we have 3711d8c95a3SDave Chinner * to update the cursor to point to the end of the list we are splicing 3721d8c95a3SDave Chinner * in so that it points to the correct location for the next splice. 3731d8c95a3SDave Chinner * i.e. before the splice 3741d8c95a3SDave Chinner * 3751d8c95a3SDave Chinner * lsn -> lsn -> lsn + x -> lsn + x ... 3761d8c95a3SDave Chinner * ^ 3771d8c95a3SDave Chinner * | cursor points here 3781d8c95a3SDave Chinner * 3791d8c95a3SDave Chinner * After the splice we have: 3801d8c95a3SDave Chinner * 3811d8c95a3SDave Chinner * lsn -> lsn -> lsn -> lsn -> .... -> lsn -> lsn + x -> lsn + x ... 3821d8c95a3SDave Chinner * ^ ^ 3831d8c95a3SDave Chinner * | cursor points here | needs to move here 3841d8c95a3SDave Chinner * 3851d8c95a3SDave Chinner * So we set the cursor to the last item in the list to be spliced 3861d8c95a3SDave Chinner * before we execute the splice, resulting in the cursor pointing to 3871d8c95a3SDave Chinner * the correct item after the splice occurs. 3881d8c95a3SDave Chinner */ 3891d8c95a3SDave Chinner if (cur) { 3901d8c95a3SDave Chinner next_lip = list_entry(list->prev, struct xfs_log_item, li_ail); 3911d8c95a3SDave Chinner cur->item = next_lip; 3921d8c95a3SDave Chinner } 3931d8c95a3SDave Chinner list_splice(list, &lip->li_ail); 394cd4a3c50SDave Chinner } 395cd4a3c50SDave Chinner 396cd4a3c50SDave Chinner /* 397cd4a3c50SDave Chinner * Delete the given item from the AIL. Return a pointer to the item. 398cd4a3c50SDave Chinner */ 399cd4a3c50SDave Chinner static void 400cd4a3c50SDave Chinner xfs_ail_delete( 401cd4a3c50SDave Chinner struct xfs_ail *ailp, 402cd4a3c50SDave Chinner xfs_log_item_t *lip) 403cd4a3c50SDave Chinner { 404cd4a3c50SDave Chinner xfs_ail_check(ailp, lip); 405cd4a3c50SDave Chinner list_del(&lip->li_ail); 406cd4a3c50SDave Chinner xfs_trans_ail_cursor_clear(ailp, lip); 407cd4a3c50SDave Chinner } 408cd4a3c50SDave Chinner 409cd4a3c50SDave Chinner /* 4100bf6a5bdSDave Chinner * xfs_ail_worker does the work of pushing on the AIL. It will requeue itself 4110bf6a5bdSDave Chinner * to run at a later time if there is more work to do to complete the push. 412249a8c11SDavid Chinner */ 4130bf6a5bdSDave Chinner STATIC void 4140bf6a5bdSDave Chinner xfs_ail_worker( 4150bf6a5bdSDave Chinner struct work_struct *work) 416249a8c11SDavid Chinner { 4170bf6a5bdSDave Chinner struct xfs_ail *ailp = container_of(to_delayed_work(work), 4180bf6a5bdSDave Chinner struct xfs_ail, xa_work); 41982fa9012SDavid Chinner xfs_mount_t *mp = ailp->xa_mount; 42027d8d5feSDavid Chinner struct xfs_ail_cursor *cur = &ailp->xa_cursors; 4219e7004e7SDave Chinner xfs_log_item_t *lip; 4229e7004e7SDave Chinner xfs_lsn_t lsn; 423fe0da767SDave Chinner xfs_lsn_t target; 4249e7004e7SDave Chinner long tout = 10; 4259e7004e7SDave Chinner int flush_log = 0; 4269e7004e7SDave Chinner int stuck = 0; 4279e7004e7SDave Chinner int count = 0; 428d808f617SDave Chinner int push_xfsbufd = 0; 4291da177e4SLinus Torvalds 430c7e8f268SDavid Chinner spin_lock(&ailp->xa_lock); 431fe0da767SDave Chinner target = ailp->xa_target; 43227d8d5feSDavid Chinner xfs_trans_ail_cursor_init(ailp, cur); 4330bf6a5bdSDave Chinner lip = xfs_trans_ail_cursor_first(ailp, cur, ailp->xa_last_pushed_lsn); 434249a8c11SDavid Chinner if (!lip || XFS_FORCED_SHUTDOWN(mp)) { 4351da177e4SLinus Torvalds /* 436249a8c11SDavid Chinner * AIL is empty or our push has reached the end. 4371da177e4SLinus Torvalds */ 43827d8d5feSDavid Chinner xfs_trans_ail_cursor_done(ailp, cur); 439c7e8f268SDavid Chinner spin_unlock(&ailp->xa_lock); 4409e7004e7SDave Chinner goto out_done; 4411da177e4SLinus Torvalds } 4421da177e4SLinus Torvalds 4431da177e4SLinus Torvalds XFS_STATS_INC(xs_push_ail); 4441da177e4SLinus Torvalds 4451da177e4SLinus Torvalds /* 4461da177e4SLinus Torvalds * While the item we are looking at is below the given threshold 447249a8c11SDavid Chinner * try to flush it out. We'd like not to stop until we've at least 4481da177e4SLinus Torvalds * tried to push on everything in the AIL with an LSN less than 449249a8c11SDavid Chinner * the given threshold. 4501da177e4SLinus Torvalds * 451249a8c11SDavid Chinner * However, we will stop after a certain number of pushes and wait 452249a8c11SDavid Chinner * for a reduced timeout to fire before pushing further. This 453249a8c11SDavid Chinner * prevents use from spinning when we can't do anything or there is 454249a8c11SDavid Chinner * lots of contention on the AIL lists. 455249a8c11SDavid Chinner */ 456249a8c11SDavid Chinner lsn = lip->li_lsn; 45750e86686SDave Chinner while ((XFS_LSN_CMP(lip->li_lsn, target) <= 0)) { 458249a8c11SDavid Chinner int lock_result; 459249a8c11SDavid Chinner /* 460249a8c11SDavid Chinner * If we can lock the item without sleeping, unlock the AIL 461249a8c11SDavid Chinner * lock and flush the item. Then re-grab the AIL lock so we 462249a8c11SDavid Chinner * can look for the next item on the AIL. List changes are 463249a8c11SDavid Chinner * handled by the AIL lookup functions internally 464249a8c11SDavid Chinner * 465249a8c11SDavid Chinner * If we can't lock the item, either its holder will flush it 466249a8c11SDavid Chinner * or it is already being flushed or it is being relogged. In 467249a8c11SDavid Chinner * any of these case it is being taken care of and we can just 468249a8c11SDavid Chinner * skip to the next item in the list. 4691da177e4SLinus Torvalds */ 4701da177e4SLinus Torvalds lock_result = IOP_TRYLOCK(lip); 471c7e8f268SDavid Chinner spin_unlock(&ailp->xa_lock); 4721da177e4SLinus Torvalds switch (lock_result) { 4731da177e4SLinus Torvalds case XFS_ITEM_SUCCESS: 4741da177e4SLinus Torvalds XFS_STATS_INC(xs_push_ail_success); 4751da177e4SLinus Torvalds IOP_PUSH(lip); 4760bf6a5bdSDave Chinner ailp->xa_last_pushed_lsn = lsn; 4771da177e4SLinus Torvalds break; 4781da177e4SLinus Torvalds 4791da177e4SLinus Torvalds case XFS_ITEM_PUSHBUF: 4801da177e4SLinus Torvalds XFS_STATS_INC(xs_push_ail_pushbuf); 4811da177e4SLinus Torvalds IOP_PUSHBUF(lip); 4820bf6a5bdSDave Chinner ailp->xa_last_pushed_lsn = lsn; 483d808f617SDave Chinner push_xfsbufd = 1; 4841da177e4SLinus Torvalds break; 4851da177e4SLinus Torvalds 4861da177e4SLinus Torvalds case XFS_ITEM_PINNED: 4871da177e4SLinus Torvalds XFS_STATS_INC(xs_push_ail_pinned); 488249a8c11SDavid Chinner stuck++; 4891da177e4SLinus Torvalds flush_log = 1; 4901da177e4SLinus Torvalds break; 4911da177e4SLinus Torvalds 4921da177e4SLinus Torvalds case XFS_ITEM_LOCKED: 4931da177e4SLinus Torvalds XFS_STATS_INC(xs_push_ail_locked); 4940bf6a5bdSDave Chinner ailp->xa_last_pushed_lsn = lsn; 495249a8c11SDavid Chinner stuck++; 4961da177e4SLinus Torvalds break; 4971da177e4SLinus Torvalds 4981da177e4SLinus Torvalds default: 4991da177e4SLinus Torvalds ASSERT(0); 5001da177e4SLinus Torvalds break; 5011da177e4SLinus Torvalds } 5021da177e4SLinus Torvalds 503c7e8f268SDavid Chinner spin_lock(&ailp->xa_lock); 504249a8c11SDavid Chinner /* should we bother continuing? */ 505249a8c11SDavid Chinner if (XFS_FORCED_SHUTDOWN(mp)) 5061da177e4SLinus Torvalds break; 507249a8c11SDavid Chinner ASSERT(mp->m_log); 5081da177e4SLinus Torvalds 509249a8c11SDavid Chinner count++; 510249a8c11SDavid Chinner 511249a8c11SDavid Chinner /* 512249a8c11SDavid Chinner * Are there too many items we can't do anything with? 513249a8c11SDavid Chinner * If we we are skipping too many items because we can't flush 514249a8c11SDavid Chinner * them or they are already being flushed, we back off and 515249a8c11SDavid Chinner * given them time to complete whatever operation is being 516249a8c11SDavid Chinner * done. i.e. remove pressure from the AIL while we can't make 517249a8c11SDavid Chinner * progress so traversals don't slow down further inserts and 518249a8c11SDavid Chinner * removals to/from the AIL. 519249a8c11SDavid Chinner * 520249a8c11SDavid Chinner * The value of 100 is an arbitrary magic number based on 521249a8c11SDavid Chinner * observation. 522249a8c11SDavid Chinner */ 523249a8c11SDavid Chinner if (stuck > 100) 524249a8c11SDavid Chinner break; 525249a8c11SDavid Chinner 52627d8d5feSDavid Chinner lip = xfs_trans_ail_cursor_next(ailp, cur); 527249a8c11SDavid Chinner if (lip == NULL) 528249a8c11SDavid Chinner break; 529249a8c11SDavid Chinner lsn = lip->li_lsn; 5301da177e4SLinus Torvalds } 53127d8d5feSDavid Chinner xfs_trans_ail_cursor_done(ailp, cur); 532c7e8f268SDavid Chinner spin_unlock(&ailp->xa_lock); 5331da177e4SLinus Torvalds 5341da177e4SLinus Torvalds if (flush_log) { 5351da177e4SLinus Torvalds /* 5361da177e4SLinus Torvalds * If something we need to push out was pinned, then 5371da177e4SLinus Torvalds * push out the log so it will become unpinned and 5381da177e4SLinus Torvalds * move forward in the AIL. 5391da177e4SLinus Torvalds */ 5401da177e4SLinus Torvalds XFS_STATS_INC(xs_push_ail_flush); 541a14a348bSChristoph Hellwig xfs_log_force(mp, 0); 5421da177e4SLinus Torvalds } 5431da177e4SLinus Torvalds 544d808f617SDave Chinner if (push_xfsbufd) { 545d808f617SDave Chinner /* we've got delayed write buffers to flush */ 546d808f617SDave Chinner wake_up_process(mp->m_ddev_targp->bt_task); 547d808f617SDave Chinner } 548d808f617SDave Chinner 5490bf6a5bdSDave Chinner /* assume we have more work to do in a short while */ 5509e7004e7SDave Chinner out_done: 55192d9cd10SDavid Chinner if (!count) { 55292d9cd10SDavid Chinner /* We're past our target or empty, so idle */ 5530bf6a5bdSDave Chinner ailp->xa_last_pushed_lsn = 0; 5540bf6a5bdSDave Chinner 5550bf6a5bdSDave Chinner /* 5567ac95657SDave Chinner * We clear the XFS_AIL_PUSHING_BIT first before checking 5577ac95657SDave Chinner * whether the target has changed. If the target has changed, 5587ac95657SDave Chinner * this pushes the requeue race directly onto the result of the 5597ac95657SDave Chinner * atomic test/set bit, so we are guaranteed that either the 5607ac95657SDave Chinner * the pusher that changed the target or ourselves will requeue 5617ac95657SDave Chinner * the work (but not both). 5620bf6a5bdSDave Chinner */ 5630bf6a5bdSDave Chinner clear_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags); 5647ac95657SDave Chinner smp_rmb(); 5657ac95657SDave Chinner if (XFS_LSN_CMP(ailp->xa_target, target) == 0 || 5667ac95657SDave Chinner test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags)) 5670bf6a5bdSDave Chinner return; 5687ac95657SDave Chinner 5690bf6a5bdSDave Chinner tout = 50; 57092d9cd10SDavid Chinner } else if (XFS_LSN_CMP(lsn, target) >= 0) { 571249a8c11SDavid Chinner /* 57292d9cd10SDavid Chinner * We reached the target so wait a bit longer for I/O to 57392d9cd10SDavid Chinner * complete and remove pushed items from the AIL before we 57492d9cd10SDavid Chinner * start the next scan from the start of the AIL. 575249a8c11SDavid Chinner */ 576453eac8aSDave Chinner tout = 50; 5770bf6a5bdSDave Chinner ailp->xa_last_pushed_lsn = 0; 57827d8d5feSDavid Chinner } else if ((stuck * 100) / count > 90) { 579249a8c11SDavid Chinner /* 580249a8c11SDavid Chinner * Either there is a lot of contention on the AIL or we 581249a8c11SDavid Chinner * are stuck due to operations in progress. "Stuck" in this 582249a8c11SDavid Chinner * case is defined as >90% of the items we tried to push 583249a8c11SDavid Chinner * were stuck. 584249a8c11SDavid Chinner * 585249a8c11SDavid Chinner * Backoff a bit more to allow some I/O to complete before 586249a8c11SDavid Chinner * continuing from where we were. 587249a8c11SDavid Chinner */ 588453eac8aSDave Chinner tout = 20; 589453eac8aSDave Chinner } 5901da177e4SLinus Torvalds 5910bf6a5bdSDave Chinner /* There is more to do, requeue us. */ 5920bf6a5bdSDave Chinner queue_delayed_work(xfs_syncd_wq, &ailp->xa_work, 5930bf6a5bdSDave Chinner msecs_to_jiffies(tout)); 5940bf6a5bdSDave Chinner } 5950bf6a5bdSDave Chinner 5960bf6a5bdSDave Chinner /* 5970bf6a5bdSDave Chinner * This routine is called to move the tail of the AIL forward. It does this by 5980bf6a5bdSDave Chinner * trying to flush items in the AIL whose lsns are below the given 5990bf6a5bdSDave Chinner * threshold_lsn. 6000bf6a5bdSDave Chinner * 6010bf6a5bdSDave Chinner * The push is run asynchronously in a workqueue, which means the caller needs 6020bf6a5bdSDave Chinner * to handle waiting on the async flush for space to become available. 6030bf6a5bdSDave Chinner * We don't want to interrupt any push that is in progress, hence we only queue 6040bf6a5bdSDave Chinner * work if we set the pushing bit approriately. 6050bf6a5bdSDave Chinner * 6060bf6a5bdSDave Chinner * We do this unlocked - we only need to know whether there is anything in the 6070bf6a5bdSDave Chinner * AIL at the time we are called. We don't need to access the contents of 6080bf6a5bdSDave Chinner * any of the objects, so the lock is not needed. 6090bf6a5bdSDave Chinner */ 6100bf6a5bdSDave Chinner void 611fd074841SDave Chinner xfs_ail_push( 6120bf6a5bdSDave Chinner struct xfs_ail *ailp, 6130bf6a5bdSDave Chinner xfs_lsn_t threshold_lsn) 6140bf6a5bdSDave Chinner { 6150bf6a5bdSDave Chinner xfs_log_item_t *lip; 6160bf6a5bdSDave Chinner 6170bf6a5bdSDave Chinner lip = xfs_ail_min(ailp); 6180bf6a5bdSDave Chinner if (!lip || XFS_FORCED_SHUTDOWN(ailp->xa_mount) || 6190bf6a5bdSDave Chinner XFS_LSN_CMP(threshold_lsn, ailp->xa_target) <= 0) 6200bf6a5bdSDave Chinner return; 6210bf6a5bdSDave Chinner 6220bf6a5bdSDave Chinner /* 6230bf6a5bdSDave Chinner * Ensure that the new target is noticed in push code before it clears 6240bf6a5bdSDave Chinner * the XFS_AIL_PUSHING_BIT. 6250bf6a5bdSDave Chinner */ 6260bf6a5bdSDave Chinner smp_wmb(); 627fe0da767SDave Chinner xfs_trans_ail_copy_lsn(ailp, &ailp->xa_target, &threshold_lsn); 6280bf6a5bdSDave Chinner if (!test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags)) 6290bf6a5bdSDave Chinner queue_delayed_work(xfs_syncd_wq, &ailp->xa_work, 0); 6300bf6a5bdSDave Chinner } 6311da177e4SLinus Torvalds 6321da177e4SLinus Torvalds /* 633fd074841SDave Chinner * Push out all items in the AIL immediately 634fd074841SDave Chinner */ 635fd074841SDave Chinner void 636fd074841SDave Chinner xfs_ail_push_all( 637fd074841SDave Chinner struct xfs_ail *ailp) 638fd074841SDave Chinner { 639fd074841SDave Chinner xfs_lsn_t threshold_lsn = xfs_ail_max_lsn(ailp); 640fd074841SDave Chinner 641fd074841SDave Chinner if (threshold_lsn) 642fd074841SDave Chinner xfs_ail_push(ailp, threshold_lsn); 643fd074841SDave Chinner } 644fd074841SDave Chinner 645fd074841SDave Chinner /* 6461da177e4SLinus Torvalds * This is to be called when an item is unlocked that may have 6471da177e4SLinus Torvalds * been in the AIL. It will wake up the first member of the AIL 6481da177e4SLinus Torvalds * wait list if this item's unlocking might allow it to progress. 6491da177e4SLinus Torvalds * If the item is in the AIL, then we need to get the AIL lock 6501da177e4SLinus Torvalds * while doing our checking so we don't race with someone going 6511da177e4SLinus Torvalds * to sleep waiting for this event in xfs_trans_push_ail(). 6521da177e4SLinus Torvalds */ 6531da177e4SLinus Torvalds void 6541da177e4SLinus Torvalds xfs_trans_unlocked_item( 655783a2f65SDavid Chinner struct xfs_ail *ailp, 6561da177e4SLinus Torvalds xfs_log_item_t *lip) 6571da177e4SLinus Torvalds { 6581da177e4SLinus Torvalds xfs_log_item_t *min_lip; 6591da177e4SLinus Torvalds 6601da177e4SLinus Torvalds /* 6611da177e4SLinus Torvalds * If we're forcibly shutting down, we may have 6621da177e4SLinus Torvalds * unlocked log items arbitrarily. The last thing 6631da177e4SLinus Torvalds * we want to do is to move the tail of the log 6641da177e4SLinus Torvalds * over some potentially valid data. 6651da177e4SLinus Torvalds */ 6661da177e4SLinus Torvalds if (!(lip->li_flags & XFS_LI_IN_AIL) || 667783a2f65SDavid Chinner XFS_FORCED_SHUTDOWN(ailp->xa_mount)) { 6681da177e4SLinus Torvalds return; 6691da177e4SLinus Torvalds } 6701da177e4SLinus Torvalds 6711da177e4SLinus Torvalds /* 6721da177e4SLinus Torvalds * This is the one case where we can call into xfs_ail_min() 6731da177e4SLinus Torvalds * without holding the AIL lock because we only care about the 6741da177e4SLinus Torvalds * case where we are at the tail of the AIL. If the object isn't 6751da177e4SLinus Torvalds * at the tail, it doesn't matter what result we get back. This 6761da177e4SLinus Torvalds * is slightly racy because since we were just unlocked, we could 6771da177e4SLinus Torvalds * go to sleep between the call to xfs_ail_min and the call to 6781da177e4SLinus Torvalds * xfs_log_move_tail, have someone else lock us, commit to us disk, 6791da177e4SLinus Torvalds * move us out of the tail of the AIL, and then we wake up. However, 6801da177e4SLinus Torvalds * the call to xfs_log_move_tail() doesn't do anything if there's 6811da177e4SLinus Torvalds * not enough free space to wake people up so we're safe calling it. 6821da177e4SLinus Torvalds */ 683783a2f65SDavid Chinner min_lip = xfs_ail_min(ailp); 6841da177e4SLinus Torvalds 6851da177e4SLinus Torvalds if (min_lip == lip) 686783a2f65SDavid Chinner xfs_log_move_tail(ailp->xa_mount, 1); 6871da177e4SLinus Torvalds } /* xfs_trans_unlocked_item */ 6881da177e4SLinus Torvalds 6891da177e4SLinus Torvalds /* 6900e57f6a3SDave Chinner * xfs_trans_ail_update - bulk AIL insertion operation. 6910e57f6a3SDave Chinner * 6920e57f6a3SDave Chinner * @xfs_trans_ail_update takes an array of log items that all need to be 6930e57f6a3SDave Chinner * positioned at the same LSN in the AIL. If an item is not in the AIL, it will 6940e57f6a3SDave Chinner * be added. Otherwise, it will be repositioned by removing it and re-adding 6950e57f6a3SDave Chinner * it to the AIL. If we move the first item in the AIL, update the log tail to 6960e57f6a3SDave Chinner * match the new minimum LSN in the AIL. 6970e57f6a3SDave Chinner * 6980e57f6a3SDave Chinner * This function takes the AIL lock once to execute the update operations on 6990e57f6a3SDave Chinner * all the items in the array, and as such should not be called with the AIL 7000e57f6a3SDave Chinner * lock held. As a result, once we have the AIL lock, we need to check each log 7010e57f6a3SDave Chinner * item LSN to confirm it needs to be moved forward in the AIL. 7020e57f6a3SDave Chinner * 7030e57f6a3SDave Chinner * To optimise the insert operation, we delete all the items from the AIL in 7040e57f6a3SDave Chinner * the first pass, moving them into a temporary list, then splice the temporary 7050e57f6a3SDave Chinner * list into the correct position in the AIL. This avoids needing to do an 7060e57f6a3SDave Chinner * insert operation on every item. 7070e57f6a3SDave Chinner * 7080e57f6a3SDave Chinner * This function must be called with the AIL lock held. The lock is dropped 7090e57f6a3SDave Chinner * before returning. 7100e57f6a3SDave Chinner */ 7110e57f6a3SDave Chinner void 7120e57f6a3SDave Chinner xfs_trans_ail_update_bulk( 7130e57f6a3SDave Chinner struct xfs_ail *ailp, 7141d8c95a3SDave Chinner struct xfs_ail_cursor *cur, 7150e57f6a3SDave Chinner struct xfs_log_item **log_items, 7160e57f6a3SDave Chinner int nr_items, 7170e57f6a3SDave Chinner xfs_lsn_t lsn) __releases(ailp->xa_lock) 7180e57f6a3SDave Chinner { 7190e57f6a3SDave Chinner xfs_log_item_t *mlip; 7200e57f6a3SDave Chinner xfs_lsn_t tail_lsn; 7210e57f6a3SDave Chinner int mlip_changed = 0; 7220e57f6a3SDave Chinner int i; 7230e57f6a3SDave Chinner LIST_HEAD(tmp); 7240e57f6a3SDave Chinner 7250e57f6a3SDave Chinner mlip = xfs_ail_min(ailp); 7260e57f6a3SDave Chinner 7270e57f6a3SDave Chinner for (i = 0; i < nr_items; i++) { 7280e57f6a3SDave Chinner struct xfs_log_item *lip = log_items[i]; 7290e57f6a3SDave Chinner if (lip->li_flags & XFS_LI_IN_AIL) { 7300e57f6a3SDave Chinner /* check if we really need to move the item */ 7310e57f6a3SDave Chinner if (XFS_LSN_CMP(lsn, lip->li_lsn) <= 0) 7320e57f6a3SDave Chinner continue; 7330e57f6a3SDave Chinner 7340e57f6a3SDave Chinner xfs_ail_delete(ailp, lip); 7350e57f6a3SDave Chinner if (mlip == lip) 7360e57f6a3SDave Chinner mlip_changed = 1; 7370e57f6a3SDave Chinner } else { 7380e57f6a3SDave Chinner lip->li_flags |= XFS_LI_IN_AIL; 7390e57f6a3SDave Chinner } 7400e57f6a3SDave Chinner lip->li_lsn = lsn; 7410e57f6a3SDave Chinner list_add(&lip->li_ail, &tmp); 7420e57f6a3SDave Chinner } 7430e57f6a3SDave Chinner 7441d8c95a3SDave Chinner xfs_ail_splice(ailp, cur, &tmp, lsn); 7450e57f6a3SDave Chinner 7460e57f6a3SDave Chinner if (!mlip_changed) { 7470e57f6a3SDave Chinner spin_unlock(&ailp->xa_lock); 7480e57f6a3SDave Chinner return; 7490e57f6a3SDave Chinner } 7500e57f6a3SDave Chinner 7510e57f6a3SDave Chinner /* 7520e57f6a3SDave Chinner * It is not safe to access mlip after the AIL lock is dropped, so we 7530e57f6a3SDave Chinner * must get a copy of li_lsn before we do so. This is especially 7540e57f6a3SDave Chinner * important on 32-bit platforms where accessing and updating 64-bit 7550e57f6a3SDave Chinner * values like li_lsn is not atomic. 7560e57f6a3SDave Chinner */ 7570e57f6a3SDave Chinner mlip = xfs_ail_min(ailp); 7580e57f6a3SDave Chinner tail_lsn = mlip->li_lsn; 7590e57f6a3SDave Chinner spin_unlock(&ailp->xa_lock); 7600e57f6a3SDave Chinner xfs_log_move_tail(ailp->xa_mount, tail_lsn); 7610e57f6a3SDave Chinner } 7620e57f6a3SDave Chinner 7630e57f6a3SDave Chinner /* 76430136832SDave Chinner * xfs_trans_ail_delete_bulk - remove multiple log items from the AIL 76530136832SDave Chinner * 76630136832SDave Chinner * @xfs_trans_ail_delete_bulk takes an array of log items that all need to 76730136832SDave Chinner * removed from the AIL. The caller is already holding the AIL lock, and done 76830136832SDave Chinner * all the checks necessary to ensure the items passed in via @log_items are 76930136832SDave Chinner * ready for deletion. This includes checking that the items are in the AIL. 77030136832SDave Chinner * 77130136832SDave Chinner * For each log item to be removed, unlink it from the AIL, clear the IN_AIL 77230136832SDave Chinner * flag from the item and reset the item's lsn to 0. If we remove the first 77330136832SDave Chinner * item in the AIL, update the log tail to match the new minimum LSN in the 77430136832SDave Chinner * AIL. 77530136832SDave Chinner * 77630136832SDave Chinner * This function will not drop the AIL lock until all items are removed from 77730136832SDave Chinner * the AIL to minimise the amount of lock traffic on the AIL. This does not 77830136832SDave Chinner * greatly increase the AIL hold time, but does significantly reduce the amount 77930136832SDave Chinner * of traffic on the lock, especially during IO completion. 78030136832SDave Chinner * 78130136832SDave Chinner * This function must be called with the AIL lock held. The lock is dropped 78230136832SDave Chinner * before returning. 78330136832SDave Chinner */ 78430136832SDave Chinner void 78530136832SDave Chinner xfs_trans_ail_delete_bulk( 78630136832SDave Chinner struct xfs_ail *ailp, 78730136832SDave Chinner struct xfs_log_item **log_items, 78830136832SDave Chinner int nr_items) __releases(ailp->xa_lock) 78930136832SDave Chinner { 79030136832SDave Chinner xfs_log_item_t *mlip; 79130136832SDave Chinner xfs_lsn_t tail_lsn; 79230136832SDave Chinner int mlip_changed = 0; 79330136832SDave Chinner int i; 79430136832SDave Chinner 79530136832SDave Chinner mlip = xfs_ail_min(ailp); 79630136832SDave Chinner 79730136832SDave Chinner for (i = 0; i < nr_items; i++) { 79830136832SDave Chinner struct xfs_log_item *lip = log_items[i]; 79930136832SDave Chinner if (!(lip->li_flags & XFS_LI_IN_AIL)) { 80030136832SDave Chinner struct xfs_mount *mp = ailp->xa_mount; 80130136832SDave Chinner 80230136832SDave Chinner spin_unlock(&ailp->xa_lock); 80330136832SDave Chinner if (!XFS_FORCED_SHUTDOWN(mp)) { 8046a19d939SDave Chinner xfs_alert_tag(mp, XFS_PTAG_AILDELETE, 80530136832SDave Chinner "%s: attempting to delete a log item that is not in the AIL", 80630136832SDave Chinner __func__); 80730136832SDave Chinner xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 80830136832SDave Chinner } 80930136832SDave Chinner return; 81030136832SDave Chinner } 81130136832SDave Chinner 81230136832SDave Chinner xfs_ail_delete(ailp, lip); 81330136832SDave Chinner lip->li_flags &= ~XFS_LI_IN_AIL; 81430136832SDave Chinner lip->li_lsn = 0; 81530136832SDave Chinner if (mlip == lip) 81630136832SDave Chinner mlip_changed = 1; 81730136832SDave Chinner } 81830136832SDave Chinner 81930136832SDave Chinner if (!mlip_changed) { 82030136832SDave Chinner spin_unlock(&ailp->xa_lock); 82130136832SDave Chinner return; 82230136832SDave Chinner } 82330136832SDave Chinner 82430136832SDave Chinner /* 82530136832SDave Chinner * It is not safe to access mlip after the AIL lock is dropped, so we 82630136832SDave Chinner * must get a copy of li_lsn before we do so. This is especially 82730136832SDave Chinner * important on 32-bit platforms where accessing and updating 64-bit 82830136832SDave Chinner * values like li_lsn is not atomic. It is possible we've emptied the 82930136832SDave Chinner * AIL here, so if that is the case, pass an LSN of 0 to the tail move. 83030136832SDave Chinner */ 83130136832SDave Chinner mlip = xfs_ail_min(ailp); 83230136832SDave Chinner tail_lsn = mlip ? mlip->li_lsn : 0; 83330136832SDave Chinner spin_unlock(&ailp->xa_lock); 83430136832SDave Chinner xfs_log_move_tail(ailp->xa_mount, tail_lsn); 83530136832SDave Chinner } 8361da177e4SLinus Torvalds 8371da177e4SLinus Torvalds /* 8381da177e4SLinus Torvalds * The active item list (AIL) is a doubly linked list of log 8391da177e4SLinus Torvalds * items sorted by ascending lsn. The base of the list is 8401da177e4SLinus Torvalds * a forw/back pointer pair embedded in the xfs mount structure. 8411da177e4SLinus Torvalds * The base is initialized with both pointers pointing to the 8421da177e4SLinus Torvalds * base. This case always needs to be distinguished, because 8431da177e4SLinus Torvalds * the base has no lsn to look at. We almost always insert 8441da177e4SLinus Torvalds * at the end of the list, so on inserts we search from the 8451da177e4SLinus Torvalds * end of the list to find where the new item belongs. 8461da177e4SLinus Torvalds */ 8471da177e4SLinus Torvalds 8481da177e4SLinus Torvalds /* 8491da177e4SLinus Torvalds * Initialize the doubly linked list to point only to itself. 8501da177e4SLinus Torvalds */ 851249a8c11SDavid Chinner int 8521da177e4SLinus Torvalds xfs_trans_ail_init( 8531da177e4SLinus Torvalds xfs_mount_t *mp) 8541da177e4SLinus Torvalds { 85582fa9012SDavid Chinner struct xfs_ail *ailp; 85682fa9012SDavid Chinner 85782fa9012SDavid Chinner ailp = kmem_zalloc(sizeof(struct xfs_ail), KM_MAYFAIL); 85882fa9012SDavid Chinner if (!ailp) 85982fa9012SDavid Chinner return ENOMEM; 86082fa9012SDavid Chinner 86182fa9012SDavid Chinner ailp->xa_mount = mp; 86282fa9012SDavid Chinner INIT_LIST_HEAD(&ailp->xa_ail); 863c7e8f268SDavid Chinner spin_lock_init(&ailp->xa_lock); 8640bf6a5bdSDave Chinner INIT_DELAYED_WORK(&ailp->xa_work, xfs_ail_worker); 86527d8d5feSDavid Chinner mp->m_ail = ailp; 86627d8d5feSDavid Chinner return 0; 867249a8c11SDavid Chinner } 868249a8c11SDavid Chinner 869249a8c11SDavid Chinner void 870249a8c11SDavid Chinner xfs_trans_ail_destroy( 871249a8c11SDavid Chinner xfs_mount_t *mp) 872249a8c11SDavid Chinner { 87382fa9012SDavid Chinner struct xfs_ail *ailp = mp->m_ail; 87482fa9012SDavid Chinner 8750bf6a5bdSDave Chinner cancel_delayed_work_sync(&ailp->xa_work); 87682fa9012SDavid Chinner kmem_free(ailp); 8771da177e4SLinus Torvalds } 878