11da177e4SLinus Torvalds /* 27b718769SNathan Scott * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc. 3c7e8f268SDavid Chinner * Copyright (c) 2008 Dave Chinner 47b718769SNathan Scott * All Rights Reserved. 51da177e4SLinus Torvalds * 67b718769SNathan Scott * This program is free software; you can redistribute it and/or 77b718769SNathan Scott * modify it under the terms of the GNU General Public License as 81da177e4SLinus Torvalds * published by the Free Software Foundation. 91da177e4SLinus Torvalds * 107b718769SNathan Scott * This program is distributed in the hope that it would be useful, 117b718769SNathan Scott * but WITHOUT ANY WARRANTY; without even the implied warranty of 127b718769SNathan Scott * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 137b718769SNathan Scott * GNU General Public License for more details. 141da177e4SLinus Torvalds * 157b718769SNathan Scott * You should have received a copy of the GNU General Public License 167b718769SNathan Scott * along with this program; if not, write the Free Software Foundation, 177b718769SNathan Scott * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 181da177e4SLinus Torvalds */ 191da177e4SLinus Torvalds #include "xfs.h" 20a844f451SNathan Scott #include "xfs_fs.h" 211da177e4SLinus Torvalds #include "xfs_types.h" 221da177e4SLinus Torvalds #include "xfs_log.h" 23a844f451SNathan Scott #include "xfs_inum.h" 241da177e4SLinus Torvalds #include "xfs_trans.h" 251da177e4SLinus Torvalds #include "xfs_sb.h" 26da353b0dSDavid Chinner #include "xfs_ag.h" 271da177e4SLinus Torvalds #include "xfs_mount.h" 281da177e4SLinus Torvalds #include "xfs_trans_priv.h" 291da177e4SLinus Torvalds #include "xfs_error.h" 301da177e4SLinus Torvalds 310bf6a5bdSDave Chinner struct workqueue_struct *xfs_ail_wq; /* AIL workqueue */ 320bf6a5bdSDave Chinner 331da177e4SLinus Torvalds #ifdef DEBUG 34cd4a3c50SDave Chinner /* 35cd4a3c50SDave Chinner * Check that the list is sorted as it should be. 36cd4a3c50SDave Chinner */ 37cd4a3c50SDave Chinner STATIC void 38cd4a3c50SDave Chinner xfs_ail_check( 39cd4a3c50SDave Chinner struct xfs_ail *ailp, 40cd4a3c50SDave Chinner xfs_log_item_t *lip) 41cd4a3c50SDave Chinner { 42cd4a3c50SDave Chinner xfs_log_item_t *prev_lip; 43cd4a3c50SDave Chinner 44cd4a3c50SDave Chinner if (list_empty(&ailp->xa_ail)) 45cd4a3c50SDave Chinner return; 46cd4a3c50SDave Chinner 47cd4a3c50SDave Chinner /* 48cd4a3c50SDave Chinner * Check the next and previous entries are valid. 49cd4a3c50SDave Chinner */ 50cd4a3c50SDave Chinner ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0); 51cd4a3c50SDave Chinner prev_lip = list_entry(lip->li_ail.prev, xfs_log_item_t, li_ail); 52cd4a3c50SDave Chinner if (&prev_lip->li_ail != &ailp->xa_ail) 53cd4a3c50SDave Chinner ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0); 54cd4a3c50SDave Chinner 55cd4a3c50SDave Chinner prev_lip = list_entry(lip->li_ail.next, xfs_log_item_t, li_ail); 56cd4a3c50SDave Chinner if (&prev_lip->li_ail != &ailp->xa_ail) 57cd4a3c50SDave Chinner ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) >= 0); 58cd4a3c50SDave Chinner 59cd4a3c50SDave Chinner 60cd4a3c50SDave Chinner #ifdef XFS_TRANS_DEBUG 61cd4a3c50SDave Chinner /* 62cd4a3c50SDave Chinner * Walk the list checking lsn ordering, and that every entry has the 63cd4a3c50SDave Chinner * XFS_LI_IN_AIL flag set. This is really expensive, so only do it 64cd4a3c50SDave Chinner * when specifically debugging the transaction subsystem. 65cd4a3c50SDave Chinner */ 66cd4a3c50SDave Chinner prev_lip = list_entry(&ailp->xa_ail, xfs_log_item_t, li_ail); 67cd4a3c50SDave Chinner list_for_each_entry(lip, &ailp->xa_ail, li_ail) { 68cd4a3c50SDave Chinner if (&prev_lip->li_ail != &ailp->xa_ail) 69cd4a3c50SDave Chinner ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0); 70cd4a3c50SDave Chinner ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0); 71cd4a3c50SDave Chinner prev_lip = lip; 72cd4a3c50SDave Chinner } 73cd4a3c50SDave Chinner #endif /* XFS_TRANS_DEBUG */ 74cd4a3c50SDave Chinner } 75cd4a3c50SDave Chinner #else /* !DEBUG */ 76de08dbc1SDavid Chinner #define xfs_ail_check(a,l) 771da177e4SLinus Torvalds #endif /* DEBUG */ 781da177e4SLinus Torvalds 79cd4a3c50SDave Chinner /* 80cd4a3c50SDave Chinner * Return a pointer to the first item in the AIL. If the AIL is empty, then 81cd4a3c50SDave Chinner * return NULL. 82cd4a3c50SDave Chinner */ 83cd4a3c50SDave Chinner static xfs_log_item_t * 84cd4a3c50SDave Chinner xfs_ail_min( 85cd4a3c50SDave Chinner struct xfs_ail *ailp) 86cd4a3c50SDave Chinner { 87cd4a3c50SDave Chinner if (list_empty(&ailp->xa_ail)) 88cd4a3c50SDave Chinner return NULL; 89cd4a3c50SDave Chinner 90cd4a3c50SDave Chinner return list_first_entry(&ailp->xa_ail, xfs_log_item_t, li_ail); 91cd4a3c50SDave Chinner } 921da177e4SLinus Torvalds 931da177e4SLinus Torvalds /* 94fd074841SDave Chinner * Return a pointer to the last item in the AIL. If the AIL is empty, then 95fd074841SDave Chinner * return NULL. 96fd074841SDave Chinner */ 97fd074841SDave Chinner static xfs_log_item_t * 98fd074841SDave Chinner xfs_ail_max( 99fd074841SDave Chinner struct xfs_ail *ailp) 100fd074841SDave Chinner { 101fd074841SDave Chinner if (list_empty(&ailp->xa_ail)) 102fd074841SDave Chinner return NULL; 103fd074841SDave Chinner 104fd074841SDave Chinner return list_entry(ailp->xa_ail.prev, xfs_log_item_t, li_ail); 105fd074841SDave Chinner } 106fd074841SDave Chinner 107fd074841SDave Chinner /* 108cd4a3c50SDave Chinner * Return a pointer to the item which follows the given item in the AIL. If 109cd4a3c50SDave Chinner * the given item is the last item in the list, then return NULL. 110cd4a3c50SDave Chinner */ 111cd4a3c50SDave Chinner static xfs_log_item_t * 112cd4a3c50SDave Chinner xfs_ail_next( 113cd4a3c50SDave Chinner struct xfs_ail *ailp, 114cd4a3c50SDave Chinner xfs_log_item_t *lip) 115cd4a3c50SDave Chinner { 116cd4a3c50SDave Chinner if (lip->li_ail.next == &ailp->xa_ail) 117cd4a3c50SDave Chinner return NULL; 118cd4a3c50SDave Chinner 119cd4a3c50SDave Chinner return list_first_entry(&lip->li_ail, xfs_log_item_t, li_ail); 120cd4a3c50SDave Chinner } 121cd4a3c50SDave Chinner 122cd4a3c50SDave Chinner /* 123cd4a3c50SDave Chinner * This is called by the log manager code to determine the LSN of the tail of 124cd4a3c50SDave Chinner * the log. This is exactly the LSN of the first item in the AIL. If the AIL 125cd4a3c50SDave Chinner * is empty, then this function returns 0. 1261da177e4SLinus Torvalds * 127cd4a3c50SDave Chinner * We need the AIL lock in order to get a coherent read of the lsn of the last 128cd4a3c50SDave Chinner * item in the AIL. 1291da177e4SLinus Torvalds */ 1301da177e4SLinus Torvalds xfs_lsn_t 131fd074841SDave Chinner xfs_ail_min_lsn( 1325b00f14fSDavid Chinner struct xfs_ail *ailp) 1331da177e4SLinus Torvalds { 134cd4a3c50SDave Chinner xfs_lsn_t lsn = 0; 1351da177e4SLinus Torvalds xfs_log_item_t *lip; 1361da177e4SLinus Torvalds 137c7e8f268SDavid Chinner spin_lock(&ailp->xa_lock); 1385b00f14fSDavid Chinner lip = xfs_ail_min(ailp); 139cd4a3c50SDave Chinner if (lip) 1401da177e4SLinus Torvalds lsn = lip->li_lsn; 141c7e8f268SDavid Chinner spin_unlock(&ailp->xa_lock); 1421da177e4SLinus Torvalds 1431da177e4SLinus Torvalds return lsn; 1441da177e4SLinus Torvalds } 1451da177e4SLinus Torvalds 1461da177e4SLinus Torvalds /* 147fd074841SDave Chinner * Return the maximum lsn held in the AIL, or zero if the AIL is empty. 148fd074841SDave Chinner */ 149fd074841SDave Chinner static xfs_lsn_t 150fd074841SDave Chinner xfs_ail_max_lsn( 151fd074841SDave Chinner struct xfs_ail *ailp) 152fd074841SDave Chinner { 153fd074841SDave Chinner xfs_lsn_t lsn = 0; 154fd074841SDave Chinner xfs_log_item_t *lip; 155fd074841SDave Chinner 156fd074841SDave Chinner spin_lock(&ailp->xa_lock); 157fd074841SDave Chinner lip = xfs_ail_max(ailp); 158fd074841SDave Chinner if (lip) 159fd074841SDave Chinner lsn = lip->li_lsn; 160fd074841SDave Chinner spin_unlock(&ailp->xa_lock); 161fd074841SDave Chinner 162fd074841SDave Chinner return lsn; 163fd074841SDave Chinner } 164fd074841SDave Chinner 165fd074841SDave Chinner /* 16627d8d5feSDavid Chinner * AIL traversal cursor initialisation. 16727d8d5feSDavid Chinner * 16827d8d5feSDavid Chinner * The cursor keeps track of where our current traversal is up 16927d8d5feSDavid Chinner * to by tracking the next ƣtem in the list for us. However, for 17027d8d5feSDavid Chinner * this to be safe, removing an object from the AIL needs to invalidate 17127d8d5feSDavid Chinner * any cursor that points to it. hence the traversal cursor needs to 17227d8d5feSDavid Chinner * be linked to the struct xfs_ail so that deletion can search all the 17327d8d5feSDavid Chinner * active cursors for invalidation. 17427d8d5feSDavid Chinner * 17527d8d5feSDavid Chinner * We don't link the push cursor because it is embedded in the struct 17627d8d5feSDavid Chinner * xfs_ail and hence easily findable. 17727d8d5feSDavid Chinner */ 1785b00f14fSDavid Chinner STATIC void 17927d8d5feSDavid Chinner xfs_trans_ail_cursor_init( 18027d8d5feSDavid Chinner struct xfs_ail *ailp, 18127d8d5feSDavid Chinner struct xfs_ail_cursor *cur) 18227d8d5feSDavid Chinner { 18327d8d5feSDavid Chinner cur->item = NULL; 18427d8d5feSDavid Chinner if (cur == &ailp->xa_cursors) 18527d8d5feSDavid Chinner return; 18627d8d5feSDavid Chinner 18727d8d5feSDavid Chinner cur->next = ailp->xa_cursors.next; 18827d8d5feSDavid Chinner ailp->xa_cursors.next = cur; 18927d8d5feSDavid Chinner } 19027d8d5feSDavid Chinner 19127d8d5feSDavid Chinner /* 19227d8d5feSDavid Chinner * Get the next item in the traversal and advance the cursor. 19327d8d5feSDavid Chinner * If the cursor was invalidated (inidicated by a lip of 1), 19427d8d5feSDavid Chinner * restart the traversal. 19527d8d5feSDavid Chinner */ 1965b00f14fSDavid Chinner struct xfs_log_item * 19727d8d5feSDavid Chinner xfs_trans_ail_cursor_next( 19827d8d5feSDavid Chinner struct xfs_ail *ailp, 19927d8d5feSDavid Chinner struct xfs_ail_cursor *cur) 20027d8d5feSDavid Chinner { 20127d8d5feSDavid Chinner struct xfs_log_item *lip = cur->item; 20227d8d5feSDavid Chinner 20327d8d5feSDavid Chinner if ((__psint_t)lip & 1) 20427d8d5feSDavid Chinner lip = xfs_ail_min(ailp); 20516b59029SDave Chinner if (lip) 20616b59029SDave Chinner cur->item = xfs_ail_next(ailp, lip); 20727d8d5feSDavid Chinner return lip; 20827d8d5feSDavid Chinner } 20927d8d5feSDavid Chinner 21027d8d5feSDavid Chinner /* 21127d8d5feSDavid Chinner * Now that the traversal is complete, we need to remove the cursor 21227d8d5feSDavid Chinner * from the list of traversing cursors. Avoid removing the embedded 2139da096fdSMalcolm Parsons * push cursor, but use the fact it is always present to make the 21427d8d5feSDavid Chinner * list deletion simple. 21527d8d5feSDavid Chinner */ 21627d8d5feSDavid Chinner void 21727d8d5feSDavid Chinner xfs_trans_ail_cursor_done( 21827d8d5feSDavid Chinner struct xfs_ail *ailp, 21927d8d5feSDavid Chinner struct xfs_ail_cursor *done) 22027d8d5feSDavid Chinner { 22127d8d5feSDavid Chinner struct xfs_ail_cursor *prev = NULL; 22227d8d5feSDavid Chinner struct xfs_ail_cursor *cur; 22327d8d5feSDavid Chinner 22427d8d5feSDavid Chinner done->item = NULL; 22527d8d5feSDavid Chinner if (done == &ailp->xa_cursors) 22627d8d5feSDavid Chinner return; 22727d8d5feSDavid Chinner prev = &ailp->xa_cursors; 22827d8d5feSDavid Chinner for (cur = prev->next; cur; prev = cur, cur = prev->next) { 22927d8d5feSDavid Chinner if (cur == done) { 23027d8d5feSDavid Chinner prev->next = cur->next; 23127d8d5feSDavid Chinner break; 23227d8d5feSDavid Chinner } 23327d8d5feSDavid Chinner } 23427d8d5feSDavid Chinner ASSERT(cur); 23527d8d5feSDavid Chinner } 23627d8d5feSDavid Chinner 23727d8d5feSDavid Chinner /* 2385b00f14fSDavid Chinner * Invalidate any cursor that is pointing to this item. This is 2395b00f14fSDavid Chinner * called when an item is removed from the AIL. Any cursor pointing 2405b00f14fSDavid Chinner * to this object is now invalid and the traversal needs to be 2415b00f14fSDavid Chinner * terminated so it doesn't reference a freed object. We set the 2425b00f14fSDavid Chinner * cursor item to a value of 1 so we can distinguish between an 2435b00f14fSDavid Chinner * invalidation and the end of the list when getting the next item 2445b00f14fSDavid Chinner * from the cursor. 2455b00f14fSDavid Chinner */ 2465b00f14fSDavid Chinner STATIC void 2475b00f14fSDavid Chinner xfs_trans_ail_cursor_clear( 2485b00f14fSDavid Chinner struct xfs_ail *ailp, 2495b00f14fSDavid Chinner struct xfs_log_item *lip) 2505b00f14fSDavid Chinner { 2515b00f14fSDavid Chinner struct xfs_ail_cursor *cur; 2525b00f14fSDavid Chinner 2535b00f14fSDavid Chinner /* need to search all cursors */ 2545b00f14fSDavid Chinner for (cur = &ailp->xa_cursors; cur; cur = cur->next) { 2555b00f14fSDavid Chinner if (cur->item == lip) 2565b00f14fSDavid Chinner cur->item = (struct xfs_log_item *) 2575b00f14fSDavid Chinner ((__psint_t)cur->item | 1); 2585b00f14fSDavid Chinner } 2595b00f14fSDavid Chinner } 2605b00f14fSDavid Chinner 2615b00f14fSDavid Chinner /* 26216b59029SDave Chinner * Find the first item in the AIL with the given @lsn by searching in ascending 26316b59029SDave Chinner * LSN order and initialise the cursor to point to the next item for a 26416b59029SDave Chinner * ascending traversal. Pass a @lsn of zero to initialise the cursor to the 26516b59029SDave Chinner * first item in the AIL. Returns NULL if the list is empty. 266249a8c11SDavid Chinner */ 2675b00f14fSDavid Chinner xfs_log_item_t * 2685b00f14fSDavid Chinner xfs_trans_ail_cursor_first( 26927d8d5feSDavid Chinner struct xfs_ail *ailp, 27027d8d5feSDavid Chinner struct xfs_ail_cursor *cur, 271249a8c11SDavid Chinner xfs_lsn_t lsn) 272249a8c11SDavid Chinner { 273249a8c11SDavid Chinner xfs_log_item_t *lip; 274249a8c11SDavid Chinner 2755b00f14fSDavid Chinner xfs_trans_ail_cursor_init(ailp, cur); 27616b59029SDave Chinner 27716b59029SDave Chinner if (lsn == 0) { 27827d8d5feSDavid Chinner lip = xfs_ail_min(ailp); 2795b00f14fSDavid Chinner goto out; 28016b59029SDave Chinner } 281249a8c11SDavid Chinner 28227d8d5feSDavid Chinner list_for_each_entry(lip, &ailp->xa_ail, li_ail) { 2835b00f14fSDavid Chinner if (XFS_LSN_CMP(lip->li_lsn, lsn) >= 0) 2847ee49acfSDavid Chinner goto out; 2855b00f14fSDavid Chinner } 28616b59029SDave Chinner return NULL; 28716b59029SDave Chinner 2885b00f14fSDavid Chinner out: 28916b59029SDave Chinner if (lip) 29016b59029SDave Chinner cur->item = xfs_ail_next(ailp, lip); 291249a8c11SDavid Chinner return lip; 292249a8c11SDavid Chinner } 293535f6b37SJosef 'Jeff' Sipek 2941d8c95a3SDave Chinner static struct xfs_log_item * 2951d8c95a3SDave Chinner __xfs_trans_ail_cursor_last( 2961d8c95a3SDave Chinner struct xfs_ail *ailp, 2971d8c95a3SDave Chinner xfs_lsn_t lsn) 2981d8c95a3SDave Chinner { 2991d8c95a3SDave Chinner xfs_log_item_t *lip; 3001d8c95a3SDave Chinner 3011d8c95a3SDave Chinner list_for_each_entry_reverse(lip, &ailp->xa_ail, li_ail) { 3021d8c95a3SDave Chinner if (XFS_LSN_CMP(lip->li_lsn, lsn) <= 0) 3031d8c95a3SDave Chinner return lip; 3041d8c95a3SDave Chinner } 3051d8c95a3SDave Chinner return NULL; 3061d8c95a3SDave Chinner } 3071d8c95a3SDave Chinner 3081d8c95a3SDave Chinner /* 30916b59029SDave Chinner * Find the last item in the AIL with the given @lsn by searching in descending 31016b59029SDave Chinner * LSN order and initialise the cursor to point to that item. If there is no 31116b59029SDave Chinner * item with the value of @lsn, then it sets the cursor to the last item with an 31216b59029SDave Chinner * LSN lower than @lsn. Returns NULL if the list is empty. 3131d8c95a3SDave Chinner */ 3141d8c95a3SDave Chinner struct xfs_log_item * 3151d8c95a3SDave Chinner xfs_trans_ail_cursor_last( 3161d8c95a3SDave Chinner struct xfs_ail *ailp, 3171d8c95a3SDave Chinner struct xfs_ail_cursor *cur, 3181d8c95a3SDave Chinner xfs_lsn_t lsn) 3191d8c95a3SDave Chinner { 3201d8c95a3SDave Chinner xfs_trans_ail_cursor_init(ailp, cur); 3211d8c95a3SDave Chinner cur->item = __xfs_trans_ail_cursor_last(ailp, lsn); 3221d8c95a3SDave Chinner return cur->item; 3231d8c95a3SDave Chinner } 3241d8c95a3SDave Chinner 3251d8c95a3SDave Chinner /* 32616b59029SDave Chinner * Splice the log item list into the AIL at the given LSN. We splice to the 3271d8c95a3SDave Chinner * tail of the given LSN to maintain insert order for push traversals. The 3281d8c95a3SDave Chinner * cursor is optional, allowing repeated updates to the same LSN to avoid 3291d8c95a3SDave Chinner * repeated traversals. 330cd4a3c50SDave Chinner */ 331cd4a3c50SDave Chinner static void 332cd4a3c50SDave Chinner xfs_ail_splice( 333cd4a3c50SDave Chinner struct xfs_ail *ailp, 3341d8c95a3SDave Chinner struct xfs_ail_cursor *cur, 335cd4a3c50SDave Chinner struct list_head *list, 336cd4a3c50SDave Chinner xfs_lsn_t lsn) 337cd4a3c50SDave Chinner { 3381d8c95a3SDave Chinner struct xfs_log_item *lip = cur ? cur->item : NULL; 3391d8c95a3SDave Chinner struct xfs_log_item *next_lip; 340cd4a3c50SDave Chinner 3411d8c95a3SDave Chinner /* 3421d8c95a3SDave Chinner * Get a new cursor if we don't have a placeholder or the existing one 3431d8c95a3SDave Chinner * has been invalidated. 3441d8c95a3SDave Chinner */ 3451d8c95a3SDave Chinner if (!lip || (__psint_t)lip & 1) { 3461d8c95a3SDave Chinner lip = __xfs_trans_ail_cursor_last(ailp, lsn); 3471d8c95a3SDave Chinner 3481d8c95a3SDave Chinner if (!lip) { 3491d8c95a3SDave Chinner /* The list is empty, so just splice and return. */ 3501d8c95a3SDave Chinner if (cur) 3511d8c95a3SDave Chinner cur->item = NULL; 352cd4a3c50SDave Chinner list_splice(list, &ailp->xa_ail); 353cd4a3c50SDave Chinner return; 354cd4a3c50SDave Chinner } 355cd4a3c50SDave Chinner } 356cd4a3c50SDave Chinner 3571d8c95a3SDave Chinner /* 3581d8c95a3SDave Chinner * Our cursor points to the item we want to insert _after_, so we have 3591d8c95a3SDave Chinner * to update the cursor to point to the end of the list we are splicing 3601d8c95a3SDave Chinner * in so that it points to the correct location for the next splice. 3611d8c95a3SDave Chinner * i.e. before the splice 3621d8c95a3SDave Chinner * 3631d8c95a3SDave Chinner * lsn -> lsn -> lsn + x -> lsn + x ... 3641d8c95a3SDave Chinner * ^ 3651d8c95a3SDave Chinner * | cursor points here 3661d8c95a3SDave Chinner * 3671d8c95a3SDave Chinner * After the splice we have: 3681d8c95a3SDave Chinner * 3691d8c95a3SDave Chinner * lsn -> lsn -> lsn -> lsn -> .... -> lsn -> lsn + x -> lsn + x ... 3701d8c95a3SDave Chinner * ^ ^ 3711d8c95a3SDave Chinner * | cursor points here | needs to move here 3721d8c95a3SDave Chinner * 3731d8c95a3SDave Chinner * So we set the cursor to the last item in the list to be spliced 3741d8c95a3SDave Chinner * before we execute the splice, resulting in the cursor pointing to 3751d8c95a3SDave Chinner * the correct item after the splice occurs. 3761d8c95a3SDave Chinner */ 3771d8c95a3SDave Chinner if (cur) { 3781d8c95a3SDave Chinner next_lip = list_entry(list->prev, struct xfs_log_item, li_ail); 3791d8c95a3SDave Chinner cur->item = next_lip; 3801d8c95a3SDave Chinner } 3811d8c95a3SDave Chinner list_splice(list, &lip->li_ail); 382cd4a3c50SDave Chinner } 383cd4a3c50SDave Chinner 384cd4a3c50SDave Chinner /* 385cd4a3c50SDave Chinner * Delete the given item from the AIL. Return a pointer to the item. 386cd4a3c50SDave Chinner */ 387cd4a3c50SDave Chinner static void 388cd4a3c50SDave Chinner xfs_ail_delete( 389cd4a3c50SDave Chinner struct xfs_ail *ailp, 390cd4a3c50SDave Chinner xfs_log_item_t *lip) 391cd4a3c50SDave Chinner { 392cd4a3c50SDave Chinner xfs_ail_check(ailp, lip); 393cd4a3c50SDave Chinner list_del(&lip->li_ail); 394cd4a3c50SDave Chinner xfs_trans_ail_cursor_clear(ailp, lip); 395cd4a3c50SDave Chinner } 396cd4a3c50SDave Chinner 397cd4a3c50SDave Chinner /* 3980bf6a5bdSDave Chinner * xfs_ail_worker does the work of pushing on the AIL. It will requeue itself 3990bf6a5bdSDave Chinner * to run at a later time if there is more work to do to complete the push. 400249a8c11SDavid Chinner */ 4010bf6a5bdSDave Chinner STATIC void 4020bf6a5bdSDave Chinner xfs_ail_worker( 4030bf6a5bdSDave Chinner struct work_struct *work) 404249a8c11SDavid Chinner { 4050bf6a5bdSDave Chinner struct xfs_ail *ailp = container_of(to_delayed_work(work), 4060bf6a5bdSDave Chinner struct xfs_ail, xa_work); 40782fa9012SDavid Chinner xfs_mount_t *mp = ailp->xa_mount; 40827d8d5feSDavid Chinner struct xfs_ail_cursor *cur = &ailp->xa_cursors; 4099e7004e7SDave Chinner xfs_log_item_t *lip; 4109e7004e7SDave Chinner xfs_lsn_t lsn; 411fe0da767SDave Chinner xfs_lsn_t target; 4129e7004e7SDave Chinner long tout = 10; 4139e7004e7SDave Chinner int flush_log = 0; 4149e7004e7SDave Chinner int stuck = 0; 4159e7004e7SDave Chinner int count = 0; 416d808f617SDave Chinner int push_xfsbufd = 0; 4171da177e4SLinus Torvalds 418c7e8f268SDavid Chinner spin_lock(&ailp->xa_lock); 419fe0da767SDave Chinner target = ailp->xa_target; 42027d8d5feSDavid Chinner xfs_trans_ail_cursor_init(ailp, cur); 4210bf6a5bdSDave Chinner lip = xfs_trans_ail_cursor_first(ailp, cur, ailp->xa_last_pushed_lsn); 422249a8c11SDavid Chinner if (!lip || XFS_FORCED_SHUTDOWN(mp)) { 4231da177e4SLinus Torvalds /* 424249a8c11SDavid Chinner * AIL is empty or our push has reached the end. 4251da177e4SLinus Torvalds */ 42627d8d5feSDavid Chinner xfs_trans_ail_cursor_done(ailp, cur); 427c7e8f268SDavid Chinner spin_unlock(&ailp->xa_lock); 4289e7004e7SDave Chinner goto out_done; 4291da177e4SLinus Torvalds } 4301da177e4SLinus Torvalds 4311da177e4SLinus Torvalds XFS_STATS_INC(xs_push_ail); 4321da177e4SLinus Torvalds 4331da177e4SLinus Torvalds /* 4341da177e4SLinus Torvalds * While the item we are looking at is below the given threshold 435249a8c11SDavid Chinner * try to flush it out. We'd like not to stop until we've at least 4361da177e4SLinus Torvalds * tried to push on everything in the AIL with an LSN less than 437249a8c11SDavid Chinner * the given threshold. 4381da177e4SLinus Torvalds * 439249a8c11SDavid Chinner * However, we will stop after a certain number of pushes and wait 440249a8c11SDavid Chinner * for a reduced timeout to fire before pushing further. This 441249a8c11SDavid Chinner * prevents use from spinning when we can't do anything or there is 442249a8c11SDavid Chinner * lots of contention on the AIL lists. 443249a8c11SDavid Chinner */ 444249a8c11SDavid Chinner lsn = lip->li_lsn; 44550e86686SDave Chinner while ((XFS_LSN_CMP(lip->li_lsn, target) <= 0)) { 446249a8c11SDavid Chinner int lock_result; 447249a8c11SDavid Chinner /* 448249a8c11SDavid Chinner * If we can lock the item without sleeping, unlock the AIL 449249a8c11SDavid Chinner * lock and flush the item. Then re-grab the AIL lock so we 450249a8c11SDavid Chinner * can look for the next item on the AIL. List changes are 451249a8c11SDavid Chinner * handled by the AIL lookup functions internally 452249a8c11SDavid Chinner * 453249a8c11SDavid Chinner * If we can't lock the item, either its holder will flush it 454249a8c11SDavid Chinner * or it is already being flushed or it is being relogged. In 455249a8c11SDavid Chinner * any of these case it is being taken care of and we can just 456249a8c11SDavid Chinner * skip to the next item in the list. 4571da177e4SLinus Torvalds */ 4581da177e4SLinus Torvalds lock_result = IOP_TRYLOCK(lip); 459c7e8f268SDavid Chinner spin_unlock(&ailp->xa_lock); 4601da177e4SLinus Torvalds switch (lock_result) { 4611da177e4SLinus Torvalds case XFS_ITEM_SUCCESS: 4621da177e4SLinus Torvalds XFS_STATS_INC(xs_push_ail_success); 4631da177e4SLinus Torvalds IOP_PUSH(lip); 4640bf6a5bdSDave Chinner ailp->xa_last_pushed_lsn = lsn; 4651da177e4SLinus Torvalds break; 4661da177e4SLinus Torvalds 4671da177e4SLinus Torvalds case XFS_ITEM_PUSHBUF: 4681da177e4SLinus Torvalds XFS_STATS_INC(xs_push_ail_pushbuf); 4691da177e4SLinus Torvalds IOP_PUSHBUF(lip); 4700bf6a5bdSDave Chinner ailp->xa_last_pushed_lsn = lsn; 471d808f617SDave Chinner push_xfsbufd = 1; 4721da177e4SLinus Torvalds break; 4731da177e4SLinus Torvalds 4741da177e4SLinus Torvalds case XFS_ITEM_PINNED: 4751da177e4SLinus Torvalds XFS_STATS_INC(xs_push_ail_pinned); 476249a8c11SDavid Chinner stuck++; 4771da177e4SLinus Torvalds flush_log = 1; 4781da177e4SLinus Torvalds break; 4791da177e4SLinus Torvalds 4801da177e4SLinus Torvalds case XFS_ITEM_LOCKED: 4811da177e4SLinus Torvalds XFS_STATS_INC(xs_push_ail_locked); 4820bf6a5bdSDave Chinner ailp->xa_last_pushed_lsn = lsn; 483249a8c11SDavid Chinner stuck++; 4841da177e4SLinus Torvalds break; 4851da177e4SLinus Torvalds 4861da177e4SLinus Torvalds default: 4871da177e4SLinus Torvalds ASSERT(0); 4881da177e4SLinus Torvalds break; 4891da177e4SLinus Torvalds } 4901da177e4SLinus Torvalds 491c7e8f268SDavid Chinner spin_lock(&ailp->xa_lock); 492249a8c11SDavid Chinner /* should we bother continuing? */ 493249a8c11SDavid Chinner if (XFS_FORCED_SHUTDOWN(mp)) 4941da177e4SLinus Torvalds break; 495249a8c11SDavid Chinner ASSERT(mp->m_log); 4961da177e4SLinus Torvalds 497249a8c11SDavid Chinner count++; 498249a8c11SDavid Chinner 499249a8c11SDavid Chinner /* 500249a8c11SDavid Chinner * Are there too many items we can't do anything with? 501249a8c11SDavid Chinner * If we we are skipping too many items because we can't flush 502249a8c11SDavid Chinner * them or they are already being flushed, we back off and 503249a8c11SDavid Chinner * given them time to complete whatever operation is being 504249a8c11SDavid Chinner * done. i.e. remove pressure from the AIL while we can't make 505249a8c11SDavid Chinner * progress so traversals don't slow down further inserts and 506249a8c11SDavid Chinner * removals to/from the AIL. 507249a8c11SDavid Chinner * 508249a8c11SDavid Chinner * The value of 100 is an arbitrary magic number based on 509249a8c11SDavid Chinner * observation. 510249a8c11SDavid Chinner */ 511249a8c11SDavid Chinner if (stuck > 100) 512249a8c11SDavid Chinner break; 513249a8c11SDavid Chinner 51427d8d5feSDavid Chinner lip = xfs_trans_ail_cursor_next(ailp, cur); 515249a8c11SDavid Chinner if (lip == NULL) 516249a8c11SDavid Chinner break; 517249a8c11SDavid Chinner lsn = lip->li_lsn; 5181da177e4SLinus Torvalds } 51927d8d5feSDavid Chinner xfs_trans_ail_cursor_done(ailp, cur); 520c7e8f268SDavid Chinner spin_unlock(&ailp->xa_lock); 5211da177e4SLinus Torvalds 5221da177e4SLinus Torvalds if (flush_log) { 5231da177e4SLinus Torvalds /* 5241da177e4SLinus Torvalds * If something we need to push out was pinned, then 5251da177e4SLinus Torvalds * push out the log so it will become unpinned and 5261da177e4SLinus Torvalds * move forward in the AIL. 5271da177e4SLinus Torvalds */ 5281da177e4SLinus Torvalds XFS_STATS_INC(xs_push_ail_flush); 529a14a348bSChristoph Hellwig xfs_log_force(mp, 0); 5301da177e4SLinus Torvalds } 5311da177e4SLinus Torvalds 532d808f617SDave Chinner if (push_xfsbufd) { 533d808f617SDave Chinner /* we've got delayed write buffers to flush */ 534d808f617SDave Chinner wake_up_process(mp->m_ddev_targp->bt_task); 535d808f617SDave Chinner } 536d808f617SDave Chinner 5370bf6a5bdSDave Chinner /* assume we have more work to do in a short while */ 5389e7004e7SDave Chinner out_done: 53992d9cd10SDavid Chinner if (!count) { 54092d9cd10SDavid Chinner /* We're past our target or empty, so idle */ 5410bf6a5bdSDave Chinner ailp->xa_last_pushed_lsn = 0; 5420bf6a5bdSDave Chinner 5430bf6a5bdSDave Chinner /* 5447ac95657SDave Chinner * We clear the XFS_AIL_PUSHING_BIT first before checking 5457ac95657SDave Chinner * whether the target has changed. If the target has changed, 5467ac95657SDave Chinner * this pushes the requeue race directly onto the result of the 5477ac95657SDave Chinner * atomic test/set bit, so we are guaranteed that either the 5487ac95657SDave Chinner * the pusher that changed the target or ourselves will requeue 5497ac95657SDave Chinner * the work (but not both). 5500bf6a5bdSDave Chinner */ 5510bf6a5bdSDave Chinner clear_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags); 5527ac95657SDave Chinner smp_rmb(); 5537ac95657SDave Chinner if (XFS_LSN_CMP(ailp->xa_target, target) == 0 || 5547ac95657SDave Chinner test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags)) 5550bf6a5bdSDave Chinner return; 5567ac95657SDave Chinner 5570bf6a5bdSDave Chinner tout = 50; 55892d9cd10SDavid Chinner } else if (XFS_LSN_CMP(lsn, target) >= 0) { 559249a8c11SDavid Chinner /* 56092d9cd10SDavid Chinner * We reached the target so wait a bit longer for I/O to 56192d9cd10SDavid Chinner * complete and remove pushed items from the AIL before we 56292d9cd10SDavid Chinner * start the next scan from the start of the AIL. 563249a8c11SDavid Chinner */ 564453eac8aSDave Chinner tout = 50; 5650bf6a5bdSDave Chinner ailp->xa_last_pushed_lsn = 0; 56627d8d5feSDavid Chinner } else if ((stuck * 100) / count > 90) { 567249a8c11SDavid Chinner /* 568249a8c11SDavid Chinner * Either there is a lot of contention on the AIL or we 569249a8c11SDavid Chinner * are stuck due to operations in progress. "Stuck" in this 570249a8c11SDavid Chinner * case is defined as >90% of the items we tried to push 571249a8c11SDavid Chinner * were stuck. 572249a8c11SDavid Chinner * 573249a8c11SDavid Chinner * Backoff a bit more to allow some I/O to complete before 574249a8c11SDavid Chinner * continuing from where we were. 575249a8c11SDavid Chinner */ 576453eac8aSDave Chinner tout = 20; 577453eac8aSDave Chinner } 5781da177e4SLinus Torvalds 5790bf6a5bdSDave Chinner /* There is more to do, requeue us. */ 5800bf6a5bdSDave Chinner queue_delayed_work(xfs_syncd_wq, &ailp->xa_work, 5810bf6a5bdSDave Chinner msecs_to_jiffies(tout)); 5820bf6a5bdSDave Chinner } 5830bf6a5bdSDave Chinner 5840bf6a5bdSDave Chinner /* 5850bf6a5bdSDave Chinner * This routine is called to move the tail of the AIL forward. It does this by 5860bf6a5bdSDave Chinner * trying to flush items in the AIL whose lsns are below the given 5870bf6a5bdSDave Chinner * threshold_lsn. 5880bf6a5bdSDave Chinner * 5890bf6a5bdSDave Chinner * The push is run asynchronously in a workqueue, which means the caller needs 5900bf6a5bdSDave Chinner * to handle waiting on the async flush for space to become available. 5910bf6a5bdSDave Chinner * We don't want to interrupt any push that is in progress, hence we only queue 5920bf6a5bdSDave Chinner * work if we set the pushing bit approriately. 5930bf6a5bdSDave Chinner * 5940bf6a5bdSDave Chinner * We do this unlocked - we only need to know whether there is anything in the 5950bf6a5bdSDave Chinner * AIL at the time we are called. We don't need to access the contents of 5960bf6a5bdSDave Chinner * any of the objects, so the lock is not needed. 5970bf6a5bdSDave Chinner */ 5980bf6a5bdSDave Chinner void 599fd074841SDave Chinner xfs_ail_push( 6000bf6a5bdSDave Chinner struct xfs_ail *ailp, 6010bf6a5bdSDave Chinner xfs_lsn_t threshold_lsn) 6020bf6a5bdSDave Chinner { 6030bf6a5bdSDave Chinner xfs_log_item_t *lip; 6040bf6a5bdSDave Chinner 6050bf6a5bdSDave Chinner lip = xfs_ail_min(ailp); 6060bf6a5bdSDave Chinner if (!lip || XFS_FORCED_SHUTDOWN(ailp->xa_mount) || 6070bf6a5bdSDave Chinner XFS_LSN_CMP(threshold_lsn, ailp->xa_target) <= 0) 6080bf6a5bdSDave Chinner return; 6090bf6a5bdSDave Chinner 6100bf6a5bdSDave Chinner /* 6110bf6a5bdSDave Chinner * Ensure that the new target is noticed in push code before it clears 6120bf6a5bdSDave Chinner * the XFS_AIL_PUSHING_BIT. 6130bf6a5bdSDave Chinner */ 6140bf6a5bdSDave Chinner smp_wmb(); 615fe0da767SDave Chinner xfs_trans_ail_copy_lsn(ailp, &ailp->xa_target, &threshold_lsn); 6160bf6a5bdSDave Chinner if (!test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags)) 6170bf6a5bdSDave Chinner queue_delayed_work(xfs_syncd_wq, &ailp->xa_work, 0); 6180bf6a5bdSDave Chinner } 6191da177e4SLinus Torvalds 6201da177e4SLinus Torvalds /* 621fd074841SDave Chinner * Push out all items in the AIL immediately 622fd074841SDave Chinner */ 623fd074841SDave Chinner void 624fd074841SDave Chinner xfs_ail_push_all( 625fd074841SDave Chinner struct xfs_ail *ailp) 626fd074841SDave Chinner { 627fd074841SDave Chinner xfs_lsn_t threshold_lsn = xfs_ail_max_lsn(ailp); 628fd074841SDave Chinner 629fd074841SDave Chinner if (threshold_lsn) 630fd074841SDave Chinner xfs_ail_push(ailp, threshold_lsn); 631fd074841SDave Chinner } 632fd074841SDave Chinner 633fd074841SDave Chinner /* 6341da177e4SLinus Torvalds * This is to be called when an item is unlocked that may have 6351da177e4SLinus Torvalds * been in the AIL. It will wake up the first member of the AIL 6361da177e4SLinus Torvalds * wait list if this item's unlocking might allow it to progress. 6371da177e4SLinus Torvalds * If the item is in the AIL, then we need to get the AIL lock 6381da177e4SLinus Torvalds * while doing our checking so we don't race with someone going 6391da177e4SLinus Torvalds * to sleep waiting for this event in xfs_trans_push_ail(). 6401da177e4SLinus Torvalds */ 6411da177e4SLinus Torvalds void 6421da177e4SLinus Torvalds xfs_trans_unlocked_item( 643783a2f65SDavid Chinner struct xfs_ail *ailp, 6441da177e4SLinus Torvalds xfs_log_item_t *lip) 6451da177e4SLinus Torvalds { 6461da177e4SLinus Torvalds xfs_log_item_t *min_lip; 6471da177e4SLinus Torvalds 6481da177e4SLinus Torvalds /* 6491da177e4SLinus Torvalds * If we're forcibly shutting down, we may have 6501da177e4SLinus Torvalds * unlocked log items arbitrarily. The last thing 6511da177e4SLinus Torvalds * we want to do is to move the tail of the log 6521da177e4SLinus Torvalds * over some potentially valid data. 6531da177e4SLinus Torvalds */ 6541da177e4SLinus Torvalds if (!(lip->li_flags & XFS_LI_IN_AIL) || 655783a2f65SDavid Chinner XFS_FORCED_SHUTDOWN(ailp->xa_mount)) { 6561da177e4SLinus Torvalds return; 6571da177e4SLinus Torvalds } 6581da177e4SLinus Torvalds 6591da177e4SLinus Torvalds /* 6601da177e4SLinus Torvalds * This is the one case where we can call into xfs_ail_min() 6611da177e4SLinus Torvalds * without holding the AIL lock because we only care about the 6621da177e4SLinus Torvalds * case where we are at the tail of the AIL. If the object isn't 6631da177e4SLinus Torvalds * at the tail, it doesn't matter what result we get back. This 6641da177e4SLinus Torvalds * is slightly racy because since we were just unlocked, we could 6651da177e4SLinus Torvalds * go to sleep between the call to xfs_ail_min and the call to 6661da177e4SLinus Torvalds * xfs_log_move_tail, have someone else lock us, commit to us disk, 6671da177e4SLinus Torvalds * move us out of the tail of the AIL, and then we wake up. However, 6681da177e4SLinus Torvalds * the call to xfs_log_move_tail() doesn't do anything if there's 6691da177e4SLinus Torvalds * not enough free space to wake people up so we're safe calling it. 6701da177e4SLinus Torvalds */ 671783a2f65SDavid Chinner min_lip = xfs_ail_min(ailp); 6721da177e4SLinus Torvalds 6731da177e4SLinus Torvalds if (min_lip == lip) 674783a2f65SDavid Chinner xfs_log_move_tail(ailp->xa_mount, 1); 6751da177e4SLinus Torvalds } /* xfs_trans_unlocked_item */ 6761da177e4SLinus Torvalds 6771da177e4SLinus Torvalds /* 6780e57f6a3SDave Chinner * xfs_trans_ail_update - bulk AIL insertion operation. 6790e57f6a3SDave Chinner * 6800e57f6a3SDave Chinner * @xfs_trans_ail_update takes an array of log items that all need to be 6810e57f6a3SDave Chinner * positioned at the same LSN in the AIL. If an item is not in the AIL, it will 6820e57f6a3SDave Chinner * be added. Otherwise, it will be repositioned by removing it and re-adding 6830e57f6a3SDave Chinner * it to the AIL. If we move the first item in the AIL, update the log tail to 6840e57f6a3SDave Chinner * match the new minimum LSN in the AIL. 6850e57f6a3SDave Chinner * 6860e57f6a3SDave Chinner * This function takes the AIL lock once to execute the update operations on 6870e57f6a3SDave Chinner * all the items in the array, and as such should not be called with the AIL 6880e57f6a3SDave Chinner * lock held. As a result, once we have the AIL lock, we need to check each log 6890e57f6a3SDave Chinner * item LSN to confirm it needs to be moved forward in the AIL. 6900e57f6a3SDave Chinner * 6910e57f6a3SDave Chinner * To optimise the insert operation, we delete all the items from the AIL in 6920e57f6a3SDave Chinner * the first pass, moving them into a temporary list, then splice the temporary 6930e57f6a3SDave Chinner * list into the correct position in the AIL. This avoids needing to do an 6940e57f6a3SDave Chinner * insert operation on every item. 6950e57f6a3SDave Chinner * 6960e57f6a3SDave Chinner * This function must be called with the AIL lock held. The lock is dropped 6970e57f6a3SDave Chinner * before returning. 6980e57f6a3SDave Chinner */ 6990e57f6a3SDave Chinner void 7000e57f6a3SDave Chinner xfs_trans_ail_update_bulk( 7010e57f6a3SDave Chinner struct xfs_ail *ailp, 7021d8c95a3SDave Chinner struct xfs_ail_cursor *cur, 7030e57f6a3SDave Chinner struct xfs_log_item **log_items, 7040e57f6a3SDave Chinner int nr_items, 7050e57f6a3SDave Chinner xfs_lsn_t lsn) __releases(ailp->xa_lock) 7060e57f6a3SDave Chinner { 7070e57f6a3SDave Chinner xfs_log_item_t *mlip; 7080e57f6a3SDave Chinner xfs_lsn_t tail_lsn; 7090e57f6a3SDave Chinner int mlip_changed = 0; 7100e57f6a3SDave Chinner int i; 7110e57f6a3SDave Chinner LIST_HEAD(tmp); 7120e57f6a3SDave Chinner 7130e57f6a3SDave Chinner mlip = xfs_ail_min(ailp); 7140e57f6a3SDave Chinner 7150e57f6a3SDave Chinner for (i = 0; i < nr_items; i++) { 7160e57f6a3SDave Chinner struct xfs_log_item *lip = log_items[i]; 7170e57f6a3SDave Chinner if (lip->li_flags & XFS_LI_IN_AIL) { 7180e57f6a3SDave Chinner /* check if we really need to move the item */ 7190e57f6a3SDave Chinner if (XFS_LSN_CMP(lsn, lip->li_lsn) <= 0) 7200e57f6a3SDave Chinner continue; 7210e57f6a3SDave Chinner 7220e57f6a3SDave Chinner xfs_ail_delete(ailp, lip); 7230e57f6a3SDave Chinner if (mlip == lip) 7240e57f6a3SDave Chinner mlip_changed = 1; 7250e57f6a3SDave Chinner } else { 7260e57f6a3SDave Chinner lip->li_flags |= XFS_LI_IN_AIL; 7270e57f6a3SDave Chinner } 7280e57f6a3SDave Chinner lip->li_lsn = lsn; 7290e57f6a3SDave Chinner list_add(&lip->li_ail, &tmp); 7300e57f6a3SDave Chinner } 7310e57f6a3SDave Chinner 7321d8c95a3SDave Chinner xfs_ail_splice(ailp, cur, &tmp, lsn); 7330e57f6a3SDave Chinner 7340e57f6a3SDave Chinner if (!mlip_changed) { 7350e57f6a3SDave Chinner spin_unlock(&ailp->xa_lock); 7360e57f6a3SDave Chinner return; 7370e57f6a3SDave Chinner } 7380e57f6a3SDave Chinner 7390e57f6a3SDave Chinner /* 7400e57f6a3SDave Chinner * It is not safe to access mlip after the AIL lock is dropped, so we 7410e57f6a3SDave Chinner * must get a copy of li_lsn before we do so. This is especially 7420e57f6a3SDave Chinner * important on 32-bit platforms where accessing and updating 64-bit 7430e57f6a3SDave Chinner * values like li_lsn is not atomic. 7440e57f6a3SDave Chinner */ 7450e57f6a3SDave Chinner mlip = xfs_ail_min(ailp); 7460e57f6a3SDave Chinner tail_lsn = mlip->li_lsn; 7470e57f6a3SDave Chinner spin_unlock(&ailp->xa_lock); 7480e57f6a3SDave Chinner xfs_log_move_tail(ailp->xa_mount, tail_lsn); 7490e57f6a3SDave Chinner } 7500e57f6a3SDave Chinner 7510e57f6a3SDave Chinner /* 75230136832SDave Chinner * xfs_trans_ail_delete_bulk - remove multiple log items from the AIL 75330136832SDave Chinner * 75430136832SDave Chinner * @xfs_trans_ail_delete_bulk takes an array of log items that all need to 75530136832SDave Chinner * removed from the AIL. The caller is already holding the AIL lock, and done 75630136832SDave Chinner * all the checks necessary to ensure the items passed in via @log_items are 75730136832SDave Chinner * ready for deletion. This includes checking that the items are in the AIL. 75830136832SDave Chinner * 75930136832SDave Chinner * For each log item to be removed, unlink it from the AIL, clear the IN_AIL 76030136832SDave Chinner * flag from the item and reset the item's lsn to 0. If we remove the first 76130136832SDave Chinner * item in the AIL, update the log tail to match the new minimum LSN in the 76230136832SDave Chinner * AIL. 76330136832SDave Chinner * 76430136832SDave Chinner * This function will not drop the AIL lock until all items are removed from 76530136832SDave Chinner * the AIL to minimise the amount of lock traffic on the AIL. This does not 76630136832SDave Chinner * greatly increase the AIL hold time, but does significantly reduce the amount 76730136832SDave Chinner * of traffic on the lock, especially during IO completion. 76830136832SDave Chinner * 76930136832SDave Chinner * This function must be called with the AIL lock held. The lock is dropped 77030136832SDave Chinner * before returning. 77130136832SDave Chinner */ 77230136832SDave Chinner void 77330136832SDave Chinner xfs_trans_ail_delete_bulk( 77430136832SDave Chinner struct xfs_ail *ailp, 77530136832SDave Chinner struct xfs_log_item **log_items, 77630136832SDave Chinner int nr_items) __releases(ailp->xa_lock) 77730136832SDave Chinner { 77830136832SDave Chinner xfs_log_item_t *mlip; 77930136832SDave Chinner xfs_lsn_t tail_lsn; 78030136832SDave Chinner int mlip_changed = 0; 78130136832SDave Chinner int i; 78230136832SDave Chinner 78330136832SDave Chinner mlip = xfs_ail_min(ailp); 78430136832SDave Chinner 78530136832SDave Chinner for (i = 0; i < nr_items; i++) { 78630136832SDave Chinner struct xfs_log_item *lip = log_items[i]; 78730136832SDave Chinner if (!(lip->li_flags & XFS_LI_IN_AIL)) { 78830136832SDave Chinner struct xfs_mount *mp = ailp->xa_mount; 78930136832SDave Chinner 79030136832SDave Chinner spin_unlock(&ailp->xa_lock); 79130136832SDave Chinner if (!XFS_FORCED_SHUTDOWN(mp)) { 7926a19d939SDave Chinner xfs_alert_tag(mp, XFS_PTAG_AILDELETE, 79330136832SDave Chinner "%s: attempting to delete a log item that is not in the AIL", 79430136832SDave Chinner __func__); 79530136832SDave Chinner xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 79630136832SDave Chinner } 79730136832SDave Chinner return; 79830136832SDave Chinner } 79930136832SDave Chinner 80030136832SDave Chinner xfs_ail_delete(ailp, lip); 80130136832SDave Chinner lip->li_flags &= ~XFS_LI_IN_AIL; 80230136832SDave Chinner lip->li_lsn = 0; 80330136832SDave Chinner if (mlip == lip) 80430136832SDave Chinner mlip_changed = 1; 80530136832SDave Chinner } 80630136832SDave Chinner 80730136832SDave Chinner if (!mlip_changed) { 80830136832SDave Chinner spin_unlock(&ailp->xa_lock); 80930136832SDave Chinner return; 81030136832SDave Chinner } 81130136832SDave Chinner 81230136832SDave Chinner /* 81330136832SDave Chinner * It is not safe to access mlip after the AIL lock is dropped, so we 81430136832SDave Chinner * must get a copy of li_lsn before we do so. This is especially 81530136832SDave Chinner * important on 32-bit platforms where accessing and updating 64-bit 81630136832SDave Chinner * values like li_lsn is not atomic. It is possible we've emptied the 81730136832SDave Chinner * AIL here, so if that is the case, pass an LSN of 0 to the tail move. 81830136832SDave Chinner */ 81930136832SDave Chinner mlip = xfs_ail_min(ailp); 82030136832SDave Chinner tail_lsn = mlip ? mlip->li_lsn : 0; 82130136832SDave Chinner spin_unlock(&ailp->xa_lock); 82230136832SDave Chinner xfs_log_move_tail(ailp->xa_mount, tail_lsn); 82330136832SDave Chinner } 8241da177e4SLinus Torvalds 8251da177e4SLinus Torvalds /* 8261da177e4SLinus Torvalds * The active item list (AIL) is a doubly linked list of log 8271da177e4SLinus Torvalds * items sorted by ascending lsn. The base of the list is 8281da177e4SLinus Torvalds * a forw/back pointer pair embedded in the xfs mount structure. 8291da177e4SLinus Torvalds * The base is initialized with both pointers pointing to the 8301da177e4SLinus Torvalds * base. This case always needs to be distinguished, because 8311da177e4SLinus Torvalds * the base has no lsn to look at. We almost always insert 8321da177e4SLinus Torvalds * at the end of the list, so on inserts we search from the 8331da177e4SLinus Torvalds * end of the list to find where the new item belongs. 8341da177e4SLinus Torvalds */ 8351da177e4SLinus Torvalds 8361da177e4SLinus Torvalds /* 8371da177e4SLinus Torvalds * Initialize the doubly linked list to point only to itself. 8381da177e4SLinus Torvalds */ 839249a8c11SDavid Chinner int 8401da177e4SLinus Torvalds xfs_trans_ail_init( 8411da177e4SLinus Torvalds xfs_mount_t *mp) 8421da177e4SLinus Torvalds { 84382fa9012SDavid Chinner struct xfs_ail *ailp; 84482fa9012SDavid Chinner 84582fa9012SDavid Chinner ailp = kmem_zalloc(sizeof(struct xfs_ail), KM_MAYFAIL); 84682fa9012SDavid Chinner if (!ailp) 84782fa9012SDavid Chinner return ENOMEM; 84882fa9012SDavid Chinner 84982fa9012SDavid Chinner ailp->xa_mount = mp; 85082fa9012SDavid Chinner INIT_LIST_HEAD(&ailp->xa_ail); 851c7e8f268SDavid Chinner spin_lock_init(&ailp->xa_lock); 8520bf6a5bdSDave Chinner INIT_DELAYED_WORK(&ailp->xa_work, xfs_ail_worker); 85327d8d5feSDavid Chinner mp->m_ail = ailp; 85427d8d5feSDavid Chinner return 0; 855249a8c11SDavid Chinner } 856249a8c11SDavid Chinner 857249a8c11SDavid Chinner void 858249a8c11SDavid Chinner xfs_trans_ail_destroy( 859249a8c11SDavid Chinner xfs_mount_t *mp) 860249a8c11SDavid Chinner { 86182fa9012SDavid Chinner struct xfs_ail *ailp = mp->m_ail; 86282fa9012SDavid Chinner 8630bf6a5bdSDave Chinner cancel_delayed_work_sync(&ailp->xa_work); 86482fa9012SDavid Chinner kmem_free(ailp); 8651da177e4SLinus Torvalds } 866