11da177e4SLinus Torvalds /* 27b718769SNathan Scott * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc. 3c7e8f268SDavid Chinner * Copyright (c) 2008 Dave Chinner 47b718769SNathan Scott * All Rights Reserved. 51da177e4SLinus Torvalds * 67b718769SNathan Scott * This program is free software; you can redistribute it and/or 77b718769SNathan Scott * modify it under the terms of the GNU General Public License as 81da177e4SLinus Torvalds * published by the Free Software Foundation. 91da177e4SLinus Torvalds * 107b718769SNathan Scott * This program is distributed in the hope that it would be useful, 117b718769SNathan Scott * but WITHOUT ANY WARRANTY; without even the implied warranty of 127b718769SNathan Scott * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 137b718769SNathan Scott * GNU General Public License for more details. 141da177e4SLinus Torvalds * 157b718769SNathan Scott * You should have received a copy of the GNU General Public License 167b718769SNathan Scott * along with this program; if not, write the Free Software Foundation, 177b718769SNathan Scott * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 181da177e4SLinus Torvalds */ 191da177e4SLinus Torvalds #include "xfs.h" 20a844f451SNathan Scott #include "xfs_fs.h" 211da177e4SLinus Torvalds #include "xfs_types.h" 221da177e4SLinus Torvalds #include "xfs_log.h" 23a844f451SNathan Scott #include "xfs_inum.h" 241da177e4SLinus Torvalds #include "xfs_trans.h" 251da177e4SLinus Torvalds #include "xfs_sb.h" 26da353b0dSDavid Chinner #include "xfs_ag.h" 271da177e4SLinus Torvalds #include "xfs_mount.h" 281da177e4SLinus Torvalds #include "xfs_trans_priv.h" 291da177e4SLinus Torvalds #include "xfs_error.h" 301da177e4SLinus Torvalds 310bf6a5bdSDave Chinner struct workqueue_struct *xfs_ail_wq; /* AIL workqueue */ 320bf6a5bdSDave Chinner 331da177e4SLinus Torvalds #ifdef DEBUG 34cd4a3c50SDave Chinner /* 35cd4a3c50SDave Chinner * Check that the list is sorted as it should be. 36cd4a3c50SDave Chinner */ 37cd4a3c50SDave Chinner STATIC void 38cd4a3c50SDave Chinner xfs_ail_check( 39cd4a3c50SDave Chinner struct xfs_ail *ailp, 40cd4a3c50SDave Chinner xfs_log_item_t *lip) 41cd4a3c50SDave Chinner { 42cd4a3c50SDave Chinner xfs_log_item_t *prev_lip; 43cd4a3c50SDave Chinner 44cd4a3c50SDave Chinner if (list_empty(&ailp->xa_ail)) 45cd4a3c50SDave Chinner return; 46cd4a3c50SDave Chinner 47cd4a3c50SDave Chinner /* 48cd4a3c50SDave Chinner * Check the next and previous entries are valid. 49cd4a3c50SDave Chinner */ 50cd4a3c50SDave Chinner ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0); 51cd4a3c50SDave Chinner prev_lip = list_entry(lip->li_ail.prev, xfs_log_item_t, li_ail); 52cd4a3c50SDave Chinner if (&prev_lip->li_ail != &ailp->xa_ail) 53cd4a3c50SDave Chinner ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0); 54cd4a3c50SDave Chinner 55cd4a3c50SDave Chinner prev_lip = list_entry(lip->li_ail.next, xfs_log_item_t, li_ail); 56cd4a3c50SDave Chinner if (&prev_lip->li_ail != &ailp->xa_ail) 57cd4a3c50SDave Chinner ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) >= 0); 58cd4a3c50SDave Chinner 59cd4a3c50SDave Chinner 60cd4a3c50SDave Chinner #ifdef XFS_TRANS_DEBUG 61cd4a3c50SDave Chinner /* 62cd4a3c50SDave Chinner * Walk the list checking lsn ordering, and that every entry has the 63cd4a3c50SDave Chinner * XFS_LI_IN_AIL flag set. This is really expensive, so only do it 64cd4a3c50SDave Chinner * when specifically debugging the transaction subsystem. 65cd4a3c50SDave Chinner */ 66cd4a3c50SDave Chinner prev_lip = list_entry(&ailp->xa_ail, xfs_log_item_t, li_ail); 67cd4a3c50SDave Chinner list_for_each_entry(lip, &ailp->xa_ail, li_ail) { 68cd4a3c50SDave Chinner if (&prev_lip->li_ail != &ailp->xa_ail) 69cd4a3c50SDave Chinner ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0); 70cd4a3c50SDave Chinner ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0); 71cd4a3c50SDave Chinner prev_lip = lip; 72cd4a3c50SDave Chinner } 73cd4a3c50SDave Chinner #endif /* XFS_TRANS_DEBUG */ 74cd4a3c50SDave Chinner } 75cd4a3c50SDave Chinner #else /* !DEBUG */ 76de08dbc1SDavid Chinner #define xfs_ail_check(a,l) 771da177e4SLinus Torvalds #endif /* DEBUG */ 781da177e4SLinus Torvalds 79cd4a3c50SDave Chinner /* 80cd4a3c50SDave Chinner * Return a pointer to the first item in the AIL. If the AIL is empty, then 81cd4a3c50SDave Chinner * return NULL. 82cd4a3c50SDave Chinner */ 83cd4a3c50SDave Chinner static xfs_log_item_t * 84cd4a3c50SDave Chinner xfs_ail_min( 85cd4a3c50SDave Chinner struct xfs_ail *ailp) 86cd4a3c50SDave Chinner { 87cd4a3c50SDave Chinner if (list_empty(&ailp->xa_ail)) 88cd4a3c50SDave Chinner return NULL; 89cd4a3c50SDave Chinner 90cd4a3c50SDave Chinner return list_first_entry(&ailp->xa_ail, xfs_log_item_t, li_ail); 91cd4a3c50SDave Chinner } 921da177e4SLinus Torvalds 931da177e4SLinus Torvalds /* 94fd074841SDave Chinner * Return a pointer to the last item in the AIL. If the AIL is empty, then 95fd074841SDave Chinner * return NULL. 96fd074841SDave Chinner */ 97fd074841SDave Chinner static xfs_log_item_t * 98fd074841SDave Chinner xfs_ail_max( 99fd074841SDave Chinner struct xfs_ail *ailp) 100fd074841SDave Chinner { 101fd074841SDave Chinner if (list_empty(&ailp->xa_ail)) 102fd074841SDave Chinner return NULL; 103fd074841SDave Chinner 104fd074841SDave Chinner return list_entry(ailp->xa_ail.prev, xfs_log_item_t, li_ail); 105fd074841SDave Chinner } 106fd074841SDave Chinner 107fd074841SDave Chinner /* 108cd4a3c50SDave Chinner * Return a pointer to the item which follows the given item in the AIL. If 109cd4a3c50SDave Chinner * the given item is the last item in the list, then return NULL. 110cd4a3c50SDave Chinner */ 111cd4a3c50SDave Chinner static xfs_log_item_t * 112cd4a3c50SDave Chinner xfs_ail_next( 113cd4a3c50SDave Chinner struct xfs_ail *ailp, 114cd4a3c50SDave Chinner xfs_log_item_t *lip) 115cd4a3c50SDave Chinner { 116cd4a3c50SDave Chinner if (lip->li_ail.next == &ailp->xa_ail) 117cd4a3c50SDave Chinner return NULL; 118cd4a3c50SDave Chinner 119cd4a3c50SDave Chinner return list_first_entry(&lip->li_ail, xfs_log_item_t, li_ail); 120cd4a3c50SDave Chinner } 121cd4a3c50SDave Chinner 122cd4a3c50SDave Chinner /* 123cd4a3c50SDave Chinner * This is called by the log manager code to determine the LSN of the tail of 124cd4a3c50SDave Chinner * the log. This is exactly the LSN of the first item in the AIL. If the AIL 125cd4a3c50SDave Chinner * is empty, then this function returns 0. 1261da177e4SLinus Torvalds * 127cd4a3c50SDave Chinner * We need the AIL lock in order to get a coherent read of the lsn of the last 128cd4a3c50SDave Chinner * item in the AIL. 1291da177e4SLinus Torvalds */ 1301da177e4SLinus Torvalds xfs_lsn_t 131fd074841SDave Chinner xfs_ail_min_lsn( 1325b00f14fSDavid Chinner struct xfs_ail *ailp) 1331da177e4SLinus Torvalds { 134cd4a3c50SDave Chinner xfs_lsn_t lsn = 0; 1351da177e4SLinus Torvalds xfs_log_item_t *lip; 1361da177e4SLinus Torvalds 137c7e8f268SDavid Chinner spin_lock(&ailp->xa_lock); 1385b00f14fSDavid Chinner lip = xfs_ail_min(ailp); 139cd4a3c50SDave Chinner if (lip) 1401da177e4SLinus Torvalds lsn = lip->li_lsn; 141c7e8f268SDavid Chinner spin_unlock(&ailp->xa_lock); 1421da177e4SLinus Torvalds 1431da177e4SLinus Torvalds return lsn; 1441da177e4SLinus Torvalds } 1451da177e4SLinus Torvalds 1461da177e4SLinus Torvalds /* 147fd074841SDave Chinner * Return the maximum lsn held in the AIL, or zero if the AIL is empty. 148fd074841SDave Chinner */ 149fd074841SDave Chinner static xfs_lsn_t 150fd074841SDave Chinner xfs_ail_max_lsn( 151fd074841SDave Chinner struct xfs_ail *ailp) 152fd074841SDave Chinner { 153fd074841SDave Chinner xfs_lsn_t lsn = 0; 154fd074841SDave Chinner xfs_log_item_t *lip; 155fd074841SDave Chinner 156fd074841SDave Chinner spin_lock(&ailp->xa_lock); 157fd074841SDave Chinner lip = xfs_ail_max(ailp); 158fd074841SDave Chinner if (lip) 159fd074841SDave Chinner lsn = lip->li_lsn; 160fd074841SDave Chinner spin_unlock(&ailp->xa_lock); 161fd074841SDave Chinner 162fd074841SDave Chinner return lsn; 163fd074841SDave Chinner } 164fd074841SDave Chinner 165fd074841SDave Chinner /* 166*af3e4022SDave Chinner * The cursor keeps track of where our current traversal is up to by tracking 167*af3e4022SDave Chinner * the next item in the list for us. However, for this to be safe, removing an 168*af3e4022SDave Chinner * object from the AIL needs to invalidate any cursor that points to it. hence 169*af3e4022SDave Chinner * the traversal cursor needs to be linked to the struct xfs_ail so that 170*af3e4022SDave Chinner * deletion can search all the active cursors for invalidation. 17127d8d5feSDavid Chinner */ 1725b00f14fSDavid Chinner STATIC void 17327d8d5feSDavid Chinner xfs_trans_ail_cursor_init( 17427d8d5feSDavid Chinner struct xfs_ail *ailp, 17527d8d5feSDavid Chinner struct xfs_ail_cursor *cur) 17627d8d5feSDavid Chinner { 17727d8d5feSDavid Chinner cur->item = NULL; 178*af3e4022SDave Chinner list_add_tail(&cur->list, &ailp->xa_cursors); 17927d8d5feSDavid Chinner } 18027d8d5feSDavid Chinner 18127d8d5feSDavid Chinner /* 182*af3e4022SDave Chinner * Get the next item in the traversal and advance the cursor. If the cursor 183*af3e4022SDave Chinner * was invalidated (indicated by a lip of 1), restart the traversal. 18427d8d5feSDavid Chinner */ 1855b00f14fSDavid Chinner struct xfs_log_item * 18627d8d5feSDavid Chinner xfs_trans_ail_cursor_next( 18727d8d5feSDavid Chinner struct xfs_ail *ailp, 18827d8d5feSDavid Chinner struct xfs_ail_cursor *cur) 18927d8d5feSDavid Chinner { 19027d8d5feSDavid Chinner struct xfs_log_item *lip = cur->item; 19127d8d5feSDavid Chinner 19227d8d5feSDavid Chinner if ((__psint_t)lip & 1) 19327d8d5feSDavid Chinner lip = xfs_ail_min(ailp); 19416b59029SDave Chinner if (lip) 19516b59029SDave Chinner cur->item = xfs_ail_next(ailp, lip); 19627d8d5feSDavid Chinner return lip; 19727d8d5feSDavid Chinner } 19827d8d5feSDavid Chinner 19927d8d5feSDavid Chinner /* 200*af3e4022SDave Chinner * When the traversal is complete, we need to remove the cursor from the list 201*af3e4022SDave Chinner * of traversing cursors. 20227d8d5feSDavid Chinner */ 20327d8d5feSDavid Chinner void 20427d8d5feSDavid Chinner xfs_trans_ail_cursor_done( 20527d8d5feSDavid Chinner struct xfs_ail *ailp, 206*af3e4022SDave Chinner struct xfs_ail_cursor *cur) 20727d8d5feSDavid Chinner { 208*af3e4022SDave Chinner cur->item = NULL; 209*af3e4022SDave Chinner list_del_init(&cur->list); 21027d8d5feSDavid Chinner } 21127d8d5feSDavid Chinner 21227d8d5feSDavid Chinner /* 213*af3e4022SDave Chinner * Invalidate any cursor that is pointing to this item. This is called when an 214*af3e4022SDave Chinner * item is removed from the AIL. Any cursor pointing to this object is now 215*af3e4022SDave Chinner * invalid and the traversal needs to be terminated so it doesn't reference a 216*af3e4022SDave Chinner * freed object. We set the low bit of the cursor item pointer so we can 217*af3e4022SDave Chinner * distinguish between an invalidation and the end of the list when getting the 218*af3e4022SDave Chinner * next item from the cursor. 2195b00f14fSDavid Chinner */ 2205b00f14fSDavid Chinner STATIC void 2215b00f14fSDavid Chinner xfs_trans_ail_cursor_clear( 2225b00f14fSDavid Chinner struct xfs_ail *ailp, 2235b00f14fSDavid Chinner struct xfs_log_item *lip) 2245b00f14fSDavid Chinner { 2255b00f14fSDavid Chinner struct xfs_ail_cursor *cur; 2265b00f14fSDavid Chinner 227*af3e4022SDave Chinner list_for_each_entry(cur, &ailp->xa_cursors, list) { 2285b00f14fSDavid Chinner if (cur->item == lip) 2295b00f14fSDavid Chinner cur->item = (struct xfs_log_item *) 2305b00f14fSDavid Chinner ((__psint_t)cur->item | 1); 2315b00f14fSDavid Chinner } 2325b00f14fSDavid Chinner } 2335b00f14fSDavid Chinner 2345b00f14fSDavid Chinner /* 23516b59029SDave Chinner * Find the first item in the AIL with the given @lsn by searching in ascending 23616b59029SDave Chinner * LSN order and initialise the cursor to point to the next item for a 23716b59029SDave Chinner * ascending traversal. Pass a @lsn of zero to initialise the cursor to the 23816b59029SDave Chinner * first item in the AIL. Returns NULL if the list is empty. 239249a8c11SDavid Chinner */ 2405b00f14fSDavid Chinner xfs_log_item_t * 2415b00f14fSDavid Chinner xfs_trans_ail_cursor_first( 24227d8d5feSDavid Chinner struct xfs_ail *ailp, 24327d8d5feSDavid Chinner struct xfs_ail_cursor *cur, 244249a8c11SDavid Chinner xfs_lsn_t lsn) 245249a8c11SDavid Chinner { 246249a8c11SDavid Chinner xfs_log_item_t *lip; 247249a8c11SDavid Chinner 2485b00f14fSDavid Chinner xfs_trans_ail_cursor_init(ailp, cur); 24916b59029SDave Chinner 25016b59029SDave Chinner if (lsn == 0) { 25127d8d5feSDavid Chinner lip = xfs_ail_min(ailp); 2525b00f14fSDavid Chinner goto out; 25316b59029SDave Chinner } 254249a8c11SDavid Chinner 25527d8d5feSDavid Chinner list_for_each_entry(lip, &ailp->xa_ail, li_ail) { 2565b00f14fSDavid Chinner if (XFS_LSN_CMP(lip->li_lsn, lsn) >= 0) 2577ee49acfSDavid Chinner goto out; 2585b00f14fSDavid Chinner } 25916b59029SDave Chinner return NULL; 26016b59029SDave Chinner 2615b00f14fSDavid Chinner out: 26216b59029SDave Chinner if (lip) 26316b59029SDave Chinner cur->item = xfs_ail_next(ailp, lip); 264249a8c11SDavid Chinner return lip; 265249a8c11SDavid Chinner } 266535f6b37SJosef 'Jeff' Sipek 2671d8c95a3SDave Chinner static struct xfs_log_item * 2681d8c95a3SDave Chinner __xfs_trans_ail_cursor_last( 2691d8c95a3SDave Chinner struct xfs_ail *ailp, 2701d8c95a3SDave Chinner xfs_lsn_t lsn) 2711d8c95a3SDave Chinner { 2721d8c95a3SDave Chinner xfs_log_item_t *lip; 2731d8c95a3SDave Chinner 2741d8c95a3SDave Chinner list_for_each_entry_reverse(lip, &ailp->xa_ail, li_ail) { 2751d8c95a3SDave Chinner if (XFS_LSN_CMP(lip->li_lsn, lsn) <= 0) 2761d8c95a3SDave Chinner return lip; 2771d8c95a3SDave Chinner } 2781d8c95a3SDave Chinner return NULL; 2791d8c95a3SDave Chinner } 2801d8c95a3SDave Chinner 2811d8c95a3SDave Chinner /* 28216b59029SDave Chinner * Find the last item in the AIL with the given @lsn by searching in descending 28316b59029SDave Chinner * LSN order and initialise the cursor to point to that item. If there is no 28416b59029SDave Chinner * item with the value of @lsn, then it sets the cursor to the last item with an 28516b59029SDave Chinner * LSN lower than @lsn. Returns NULL if the list is empty. 2861d8c95a3SDave Chinner */ 2871d8c95a3SDave Chinner struct xfs_log_item * 2881d8c95a3SDave Chinner xfs_trans_ail_cursor_last( 2891d8c95a3SDave Chinner struct xfs_ail *ailp, 2901d8c95a3SDave Chinner struct xfs_ail_cursor *cur, 2911d8c95a3SDave Chinner xfs_lsn_t lsn) 2921d8c95a3SDave Chinner { 2931d8c95a3SDave Chinner xfs_trans_ail_cursor_init(ailp, cur); 2941d8c95a3SDave Chinner cur->item = __xfs_trans_ail_cursor_last(ailp, lsn); 2951d8c95a3SDave Chinner return cur->item; 2961d8c95a3SDave Chinner } 2971d8c95a3SDave Chinner 2981d8c95a3SDave Chinner /* 29916b59029SDave Chinner * Splice the log item list into the AIL at the given LSN. We splice to the 3001d8c95a3SDave Chinner * tail of the given LSN to maintain insert order for push traversals. The 3011d8c95a3SDave Chinner * cursor is optional, allowing repeated updates to the same LSN to avoid 3021d8c95a3SDave Chinner * repeated traversals. 303cd4a3c50SDave Chinner */ 304cd4a3c50SDave Chinner static void 305cd4a3c50SDave Chinner xfs_ail_splice( 306cd4a3c50SDave Chinner struct xfs_ail *ailp, 3071d8c95a3SDave Chinner struct xfs_ail_cursor *cur, 308cd4a3c50SDave Chinner struct list_head *list, 309cd4a3c50SDave Chinner xfs_lsn_t lsn) 310cd4a3c50SDave Chinner { 3111d8c95a3SDave Chinner struct xfs_log_item *lip = cur ? cur->item : NULL; 3121d8c95a3SDave Chinner struct xfs_log_item *next_lip; 313cd4a3c50SDave Chinner 3141d8c95a3SDave Chinner /* 3151d8c95a3SDave Chinner * Get a new cursor if we don't have a placeholder or the existing one 3161d8c95a3SDave Chinner * has been invalidated. 3171d8c95a3SDave Chinner */ 3181d8c95a3SDave Chinner if (!lip || (__psint_t)lip & 1) { 3191d8c95a3SDave Chinner lip = __xfs_trans_ail_cursor_last(ailp, lsn); 3201d8c95a3SDave Chinner 3211d8c95a3SDave Chinner if (!lip) { 3221d8c95a3SDave Chinner /* The list is empty, so just splice and return. */ 3231d8c95a3SDave Chinner if (cur) 3241d8c95a3SDave Chinner cur->item = NULL; 325cd4a3c50SDave Chinner list_splice(list, &ailp->xa_ail); 326cd4a3c50SDave Chinner return; 327cd4a3c50SDave Chinner } 328cd4a3c50SDave Chinner } 329cd4a3c50SDave Chinner 3301d8c95a3SDave Chinner /* 3311d8c95a3SDave Chinner * Our cursor points to the item we want to insert _after_, so we have 3321d8c95a3SDave Chinner * to update the cursor to point to the end of the list we are splicing 3331d8c95a3SDave Chinner * in so that it points to the correct location for the next splice. 3341d8c95a3SDave Chinner * i.e. before the splice 3351d8c95a3SDave Chinner * 3361d8c95a3SDave Chinner * lsn -> lsn -> lsn + x -> lsn + x ... 3371d8c95a3SDave Chinner * ^ 3381d8c95a3SDave Chinner * | cursor points here 3391d8c95a3SDave Chinner * 3401d8c95a3SDave Chinner * After the splice we have: 3411d8c95a3SDave Chinner * 3421d8c95a3SDave Chinner * lsn -> lsn -> lsn -> lsn -> .... -> lsn -> lsn + x -> lsn + x ... 3431d8c95a3SDave Chinner * ^ ^ 3441d8c95a3SDave Chinner * | cursor points here | needs to move here 3451d8c95a3SDave Chinner * 3461d8c95a3SDave Chinner * So we set the cursor to the last item in the list to be spliced 3471d8c95a3SDave Chinner * before we execute the splice, resulting in the cursor pointing to 3481d8c95a3SDave Chinner * the correct item after the splice occurs. 3491d8c95a3SDave Chinner */ 3501d8c95a3SDave Chinner if (cur) { 3511d8c95a3SDave Chinner next_lip = list_entry(list->prev, struct xfs_log_item, li_ail); 3521d8c95a3SDave Chinner cur->item = next_lip; 3531d8c95a3SDave Chinner } 3541d8c95a3SDave Chinner list_splice(list, &lip->li_ail); 355cd4a3c50SDave Chinner } 356cd4a3c50SDave Chinner 357cd4a3c50SDave Chinner /* 358cd4a3c50SDave Chinner * Delete the given item from the AIL. Return a pointer to the item. 359cd4a3c50SDave Chinner */ 360cd4a3c50SDave Chinner static void 361cd4a3c50SDave Chinner xfs_ail_delete( 362cd4a3c50SDave Chinner struct xfs_ail *ailp, 363cd4a3c50SDave Chinner xfs_log_item_t *lip) 364cd4a3c50SDave Chinner { 365cd4a3c50SDave Chinner xfs_ail_check(ailp, lip); 366cd4a3c50SDave Chinner list_del(&lip->li_ail); 367cd4a3c50SDave Chinner xfs_trans_ail_cursor_clear(ailp, lip); 368cd4a3c50SDave Chinner } 369cd4a3c50SDave Chinner 370cd4a3c50SDave Chinner /* 3710bf6a5bdSDave Chinner * xfs_ail_worker does the work of pushing on the AIL. It will requeue itself 3720bf6a5bdSDave Chinner * to run at a later time if there is more work to do to complete the push. 373249a8c11SDavid Chinner */ 3740bf6a5bdSDave Chinner STATIC void 3750bf6a5bdSDave Chinner xfs_ail_worker( 3760bf6a5bdSDave Chinner struct work_struct *work) 377249a8c11SDavid Chinner { 3780bf6a5bdSDave Chinner struct xfs_ail *ailp = container_of(to_delayed_work(work), 3790bf6a5bdSDave Chinner struct xfs_ail, xa_work); 38082fa9012SDavid Chinner xfs_mount_t *mp = ailp->xa_mount; 381*af3e4022SDave Chinner struct xfs_ail_cursor cur; 3829e7004e7SDave Chinner xfs_log_item_t *lip; 3839e7004e7SDave Chinner xfs_lsn_t lsn; 384fe0da767SDave Chinner xfs_lsn_t target; 3859e7004e7SDave Chinner long tout = 10; 3869e7004e7SDave Chinner int flush_log = 0; 3879e7004e7SDave Chinner int stuck = 0; 3889e7004e7SDave Chinner int count = 0; 389d808f617SDave Chinner int push_xfsbufd = 0; 3901da177e4SLinus Torvalds 391c7e8f268SDavid Chinner spin_lock(&ailp->xa_lock); 392fe0da767SDave Chinner target = ailp->xa_target; 393*af3e4022SDave Chinner lip = xfs_trans_ail_cursor_first(ailp, &cur, ailp->xa_last_pushed_lsn); 394249a8c11SDavid Chinner if (!lip || XFS_FORCED_SHUTDOWN(mp)) { 3951da177e4SLinus Torvalds /* 396249a8c11SDavid Chinner * AIL is empty or our push has reached the end. 3971da177e4SLinus Torvalds */ 398*af3e4022SDave Chinner xfs_trans_ail_cursor_done(ailp, &cur); 399c7e8f268SDavid Chinner spin_unlock(&ailp->xa_lock); 4009e7004e7SDave Chinner goto out_done; 4011da177e4SLinus Torvalds } 4021da177e4SLinus Torvalds 4031da177e4SLinus Torvalds XFS_STATS_INC(xs_push_ail); 4041da177e4SLinus Torvalds 4051da177e4SLinus Torvalds /* 4061da177e4SLinus Torvalds * While the item we are looking at is below the given threshold 407249a8c11SDavid Chinner * try to flush it out. We'd like not to stop until we've at least 4081da177e4SLinus Torvalds * tried to push on everything in the AIL with an LSN less than 409249a8c11SDavid Chinner * the given threshold. 4101da177e4SLinus Torvalds * 411249a8c11SDavid Chinner * However, we will stop after a certain number of pushes and wait 412249a8c11SDavid Chinner * for a reduced timeout to fire before pushing further. This 413249a8c11SDavid Chinner * prevents use from spinning when we can't do anything or there is 414249a8c11SDavid Chinner * lots of contention on the AIL lists. 415249a8c11SDavid Chinner */ 416249a8c11SDavid Chinner lsn = lip->li_lsn; 41750e86686SDave Chinner while ((XFS_LSN_CMP(lip->li_lsn, target) <= 0)) { 418249a8c11SDavid Chinner int lock_result; 419249a8c11SDavid Chinner /* 420249a8c11SDavid Chinner * If we can lock the item without sleeping, unlock the AIL 421249a8c11SDavid Chinner * lock and flush the item. Then re-grab the AIL lock so we 422249a8c11SDavid Chinner * can look for the next item on the AIL. List changes are 423249a8c11SDavid Chinner * handled by the AIL lookup functions internally 424249a8c11SDavid Chinner * 425249a8c11SDavid Chinner * If we can't lock the item, either its holder will flush it 426249a8c11SDavid Chinner * or it is already being flushed or it is being relogged. In 427249a8c11SDavid Chinner * any of these case it is being taken care of and we can just 428249a8c11SDavid Chinner * skip to the next item in the list. 4291da177e4SLinus Torvalds */ 4301da177e4SLinus Torvalds lock_result = IOP_TRYLOCK(lip); 431c7e8f268SDavid Chinner spin_unlock(&ailp->xa_lock); 4321da177e4SLinus Torvalds switch (lock_result) { 4331da177e4SLinus Torvalds case XFS_ITEM_SUCCESS: 4341da177e4SLinus Torvalds XFS_STATS_INC(xs_push_ail_success); 4351da177e4SLinus Torvalds IOP_PUSH(lip); 4360bf6a5bdSDave Chinner ailp->xa_last_pushed_lsn = lsn; 4371da177e4SLinus Torvalds break; 4381da177e4SLinus Torvalds 4391da177e4SLinus Torvalds case XFS_ITEM_PUSHBUF: 4401da177e4SLinus Torvalds XFS_STATS_INC(xs_push_ail_pushbuf); 4411da177e4SLinus Torvalds IOP_PUSHBUF(lip); 4420bf6a5bdSDave Chinner ailp->xa_last_pushed_lsn = lsn; 443d808f617SDave Chinner push_xfsbufd = 1; 4441da177e4SLinus Torvalds break; 4451da177e4SLinus Torvalds 4461da177e4SLinus Torvalds case XFS_ITEM_PINNED: 4471da177e4SLinus Torvalds XFS_STATS_INC(xs_push_ail_pinned); 448249a8c11SDavid Chinner stuck++; 4491da177e4SLinus Torvalds flush_log = 1; 4501da177e4SLinus Torvalds break; 4511da177e4SLinus Torvalds 4521da177e4SLinus Torvalds case XFS_ITEM_LOCKED: 4531da177e4SLinus Torvalds XFS_STATS_INC(xs_push_ail_locked); 4540bf6a5bdSDave Chinner ailp->xa_last_pushed_lsn = lsn; 455249a8c11SDavid Chinner stuck++; 4561da177e4SLinus Torvalds break; 4571da177e4SLinus Torvalds 4581da177e4SLinus Torvalds default: 4591da177e4SLinus Torvalds ASSERT(0); 4601da177e4SLinus Torvalds break; 4611da177e4SLinus Torvalds } 4621da177e4SLinus Torvalds 463c7e8f268SDavid Chinner spin_lock(&ailp->xa_lock); 464249a8c11SDavid Chinner /* should we bother continuing? */ 465249a8c11SDavid Chinner if (XFS_FORCED_SHUTDOWN(mp)) 4661da177e4SLinus Torvalds break; 467249a8c11SDavid Chinner ASSERT(mp->m_log); 4681da177e4SLinus Torvalds 469249a8c11SDavid Chinner count++; 470249a8c11SDavid Chinner 471249a8c11SDavid Chinner /* 472249a8c11SDavid Chinner * Are there too many items we can't do anything with? 473249a8c11SDavid Chinner * If we we are skipping too many items because we can't flush 474249a8c11SDavid Chinner * them or they are already being flushed, we back off and 475249a8c11SDavid Chinner * given them time to complete whatever operation is being 476249a8c11SDavid Chinner * done. i.e. remove pressure from the AIL while we can't make 477249a8c11SDavid Chinner * progress so traversals don't slow down further inserts and 478249a8c11SDavid Chinner * removals to/from the AIL. 479249a8c11SDavid Chinner * 480249a8c11SDavid Chinner * The value of 100 is an arbitrary magic number based on 481249a8c11SDavid Chinner * observation. 482249a8c11SDavid Chinner */ 483249a8c11SDavid Chinner if (stuck > 100) 484249a8c11SDavid Chinner break; 485249a8c11SDavid Chinner 486*af3e4022SDave Chinner lip = xfs_trans_ail_cursor_next(ailp, &cur); 487249a8c11SDavid Chinner if (lip == NULL) 488249a8c11SDavid Chinner break; 489249a8c11SDavid Chinner lsn = lip->li_lsn; 4901da177e4SLinus Torvalds } 491*af3e4022SDave Chinner xfs_trans_ail_cursor_done(ailp, &cur); 492c7e8f268SDavid Chinner spin_unlock(&ailp->xa_lock); 4931da177e4SLinus Torvalds 4941da177e4SLinus Torvalds if (flush_log) { 4951da177e4SLinus Torvalds /* 4961da177e4SLinus Torvalds * If something we need to push out was pinned, then 4971da177e4SLinus Torvalds * push out the log so it will become unpinned and 4981da177e4SLinus Torvalds * move forward in the AIL. 4991da177e4SLinus Torvalds */ 5001da177e4SLinus Torvalds XFS_STATS_INC(xs_push_ail_flush); 501a14a348bSChristoph Hellwig xfs_log_force(mp, 0); 5021da177e4SLinus Torvalds } 5031da177e4SLinus Torvalds 504d808f617SDave Chinner if (push_xfsbufd) { 505d808f617SDave Chinner /* we've got delayed write buffers to flush */ 506d808f617SDave Chinner wake_up_process(mp->m_ddev_targp->bt_task); 507d808f617SDave Chinner } 508d808f617SDave Chinner 5090bf6a5bdSDave Chinner /* assume we have more work to do in a short while */ 5109e7004e7SDave Chinner out_done: 51192d9cd10SDavid Chinner if (!count) { 51292d9cd10SDavid Chinner /* We're past our target or empty, so idle */ 5130bf6a5bdSDave Chinner ailp->xa_last_pushed_lsn = 0; 5140bf6a5bdSDave Chinner 5150bf6a5bdSDave Chinner /* 5167ac95657SDave Chinner * We clear the XFS_AIL_PUSHING_BIT first before checking 5177ac95657SDave Chinner * whether the target has changed. If the target has changed, 5187ac95657SDave Chinner * this pushes the requeue race directly onto the result of the 5197ac95657SDave Chinner * atomic test/set bit, so we are guaranteed that either the 5207ac95657SDave Chinner * the pusher that changed the target or ourselves will requeue 5217ac95657SDave Chinner * the work (but not both). 5220bf6a5bdSDave Chinner */ 5230bf6a5bdSDave Chinner clear_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags); 5247ac95657SDave Chinner smp_rmb(); 5257ac95657SDave Chinner if (XFS_LSN_CMP(ailp->xa_target, target) == 0 || 5267ac95657SDave Chinner test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags)) 5270bf6a5bdSDave Chinner return; 5287ac95657SDave Chinner 5290bf6a5bdSDave Chinner tout = 50; 53092d9cd10SDavid Chinner } else if (XFS_LSN_CMP(lsn, target) >= 0) { 531249a8c11SDavid Chinner /* 53292d9cd10SDavid Chinner * We reached the target so wait a bit longer for I/O to 53392d9cd10SDavid Chinner * complete and remove pushed items from the AIL before we 53492d9cd10SDavid Chinner * start the next scan from the start of the AIL. 535249a8c11SDavid Chinner */ 536453eac8aSDave Chinner tout = 50; 5370bf6a5bdSDave Chinner ailp->xa_last_pushed_lsn = 0; 53827d8d5feSDavid Chinner } else if ((stuck * 100) / count > 90) { 539249a8c11SDavid Chinner /* 540249a8c11SDavid Chinner * Either there is a lot of contention on the AIL or we 541249a8c11SDavid Chinner * are stuck due to operations in progress. "Stuck" in this 542249a8c11SDavid Chinner * case is defined as >90% of the items we tried to push 543249a8c11SDavid Chinner * were stuck. 544249a8c11SDavid Chinner * 545249a8c11SDavid Chinner * Backoff a bit more to allow some I/O to complete before 546249a8c11SDavid Chinner * continuing from where we were. 547249a8c11SDavid Chinner */ 548453eac8aSDave Chinner tout = 20; 549453eac8aSDave Chinner } 5501da177e4SLinus Torvalds 5510bf6a5bdSDave Chinner /* There is more to do, requeue us. */ 5520bf6a5bdSDave Chinner queue_delayed_work(xfs_syncd_wq, &ailp->xa_work, 5530bf6a5bdSDave Chinner msecs_to_jiffies(tout)); 5540bf6a5bdSDave Chinner } 5550bf6a5bdSDave Chinner 5560bf6a5bdSDave Chinner /* 5570bf6a5bdSDave Chinner * This routine is called to move the tail of the AIL forward. It does this by 5580bf6a5bdSDave Chinner * trying to flush items in the AIL whose lsns are below the given 5590bf6a5bdSDave Chinner * threshold_lsn. 5600bf6a5bdSDave Chinner * 5610bf6a5bdSDave Chinner * The push is run asynchronously in a workqueue, which means the caller needs 5620bf6a5bdSDave Chinner * to handle waiting on the async flush for space to become available. 5630bf6a5bdSDave Chinner * We don't want to interrupt any push that is in progress, hence we only queue 5640bf6a5bdSDave Chinner * work if we set the pushing bit approriately. 5650bf6a5bdSDave Chinner * 5660bf6a5bdSDave Chinner * We do this unlocked - we only need to know whether there is anything in the 5670bf6a5bdSDave Chinner * AIL at the time we are called. We don't need to access the contents of 5680bf6a5bdSDave Chinner * any of the objects, so the lock is not needed. 5690bf6a5bdSDave Chinner */ 5700bf6a5bdSDave Chinner void 571fd074841SDave Chinner xfs_ail_push( 5720bf6a5bdSDave Chinner struct xfs_ail *ailp, 5730bf6a5bdSDave Chinner xfs_lsn_t threshold_lsn) 5740bf6a5bdSDave Chinner { 5750bf6a5bdSDave Chinner xfs_log_item_t *lip; 5760bf6a5bdSDave Chinner 5770bf6a5bdSDave Chinner lip = xfs_ail_min(ailp); 5780bf6a5bdSDave Chinner if (!lip || XFS_FORCED_SHUTDOWN(ailp->xa_mount) || 5790bf6a5bdSDave Chinner XFS_LSN_CMP(threshold_lsn, ailp->xa_target) <= 0) 5800bf6a5bdSDave Chinner return; 5810bf6a5bdSDave Chinner 5820bf6a5bdSDave Chinner /* 5830bf6a5bdSDave Chinner * Ensure that the new target is noticed in push code before it clears 5840bf6a5bdSDave Chinner * the XFS_AIL_PUSHING_BIT. 5850bf6a5bdSDave Chinner */ 5860bf6a5bdSDave Chinner smp_wmb(); 587fe0da767SDave Chinner xfs_trans_ail_copy_lsn(ailp, &ailp->xa_target, &threshold_lsn); 5880bf6a5bdSDave Chinner if (!test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags)) 5890bf6a5bdSDave Chinner queue_delayed_work(xfs_syncd_wq, &ailp->xa_work, 0); 5900bf6a5bdSDave Chinner } 5911da177e4SLinus Torvalds 5921da177e4SLinus Torvalds /* 593fd074841SDave Chinner * Push out all items in the AIL immediately 594fd074841SDave Chinner */ 595fd074841SDave Chinner void 596fd074841SDave Chinner xfs_ail_push_all( 597fd074841SDave Chinner struct xfs_ail *ailp) 598fd074841SDave Chinner { 599fd074841SDave Chinner xfs_lsn_t threshold_lsn = xfs_ail_max_lsn(ailp); 600fd074841SDave Chinner 601fd074841SDave Chinner if (threshold_lsn) 602fd074841SDave Chinner xfs_ail_push(ailp, threshold_lsn); 603fd074841SDave Chinner } 604fd074841SDave Chinner 605fd074841SDave Chinner /* 6061da177e4SLinus Torvalds * This is to be called when an item is unlocked that may have 6071da177e4SLinus Torvalds * been in the AIL. It will wake up the first member of the AIL 6081da177e4SLinus Torvalds * wait list if this item's unlocking might allow it to progress. 6091da177e4SLinus Torvalds * If the item is in the AIL, then we need to get the AIL lock 6101da177e4SLinus Torvalds * while doing our checking so we don't race with someone going 6111da177e4SLinus Torvalds * to sleep waiting for this event in xfs_trans_push_ail(). 6121da177e4SLinus Torvalds */ 6131da177e4SLinus Torvalds void 6141da177e4SLinus Torvalds xfs_trans_unlocked_item( 615783a2f65SDavid Chinner struct xfs_ail *ailp, 6161da177e4SLinus Torvalds xfs_log_item_t *lip) 6171da177e4SLinus Torvalds { 6181da177e4SLinus Torvalds xfs_log_item_t *min_lip; 6191da177e4SLinus Torvalds 6201da177e4SLinus Torvalds /* 6211da177e4SLinus Torvalds * If we're forcibly shutting down, we may have 6221da177e4SLinus Torvalds * unlocked log items arbitrarily. The last thing 6231da177e4SLinus Torvalds * we want to do is to move the tail of the log 6241da177e4SLinus Torvalds * over some potentially valid data. 6251da177e4SLinus Torvalds */ 6261da177e4SLinus Torvalds if (!(lip->li_flags & XFS_LI_IN_AIL) || 627783a2f65SDavid Chinner XFS_FORCED_SHUTDOWN(ailp->xa_mount)) { 6281da177e4SLinus Torvalds return; 6291da177e4SLinus Torvalds } 6301da177e4SLinus Torvalds 6311da177e4SLinus Torvalds /* 6321da177e4SLinus Torvalds * This is the one case where we can call into xfs_ail_min() 6331da177e4SLinus Torvalds * without holding the AIL lock because we only care about the 6341da177e4SLinus Torvalds * case where we are at the tail of the AIL. If the object isn't 6351da177e4SLinus Torvalds * at the tail, it doesn't matter what result we get back. This 6361da177e4SLinus Torvalds * is slightly racy because since we were just unlocked, we could 6371da177e4SLinus Torvalds * go to sleep between the call to xfs_ail_min and the call to 6381da177e4SLinus Torvalds * xfs_log_move_tail, have someone else lock us, commit to us disk, 6391da177e4SLinus Torvalds * move us out of the tail of the AIL, and then we wake up. However, 6401da177e4SLinus Torvalds * the call to xfs_log_move_tail() doesn't do anything if there's 6411da177e4SLinus Torvalds * not enough free space to wake people up so we're safe calling it. 6421da177e4SLinus Torvalds */ 643783a2f65SDavid Chinner min_lip = xfs_ail_min(ailp); 6441da177e4SLinus Torvalds 6451da177e4SLinus Torvalds if (min_lip == lip) 646783a2f65SDavid Chinner xfs_log_move_tail(ailp->xa_mount, 1); 6471da177e4SLinus Torvalds } /* xfs_trans_unlocked_item */ 6481da177e4SLinus Torvalds 6491da177e4SLinus Torvalds /* 6500e57f6a3SDave Chinner * xfs_trans_ail_update - bulk AIL insertion operation. 6510e57f6a3SDave Chinner * 6520e57f6a3SDave Chinner * @xfs_trans_ail_update takes an array of log items that all need to be 6530e57f6a3SDave Chinner * positioned at the same LSN in the AIL. If an item is not in the AIL, it will 6540e57f6a3SDave Chinner * be added. Otherwise, it will be repositioned by removing it and re-adding 6550e57f6a3SDave Chinner * it to the AIL. If we move the first item in the AIL, update the log tail to 6560e57f6a3SDave Chinner * match the new minimum LSN in the AIL. 6570e57f6a3SDave Chinner * 6580e57f6a3SDave Chinner * This function takes the AIL lock once to execute the update operations on 6590e57f6a3SDave Chinner * all the items in the array, and as such should not be called with the AIL 6600e57f6a3SDave Chinner * lock held. As a result, once we have the AIL lock, we need to check each log 6610e57f6a3SDave Chinner * item LSN to confirm it needs to be moved forward in the AIL. 6620e57f6a3SDave Chinner * 6630e57f6a3SDave Chinner * To optimise the insert operation, we delete all the items from the AIL in 6640e57f6a3SDave Chinner * the first pass, moving them into a temporary list, then splice the temporary 6650e57f6a3SDave Chinner * list into the correct position in the AIL. This avoids needing to do an 6660e57f6a3SDave Chinner * insert operation on every item. 6670e57f6a3SDave Chinner * 6680e57f6a3SDave Chinner * This function must be called with the AIL lock held. The lock is dropped 6690e57f6a3SDave Chinner * before returning. 6700e57f6a3SDave Chinner */ 6710e57f6a3SDave Chinner void 6720e57f6a3SDave Chinner xfs_trans_ail_update_bulk( 6730e57f6a3SDave Chinner struct xfs_ail *ailp, 6741d8c95a3SDave Chinner struct xfs_ail_cursor *cur, 6750e57f6a3SDave Chinner struct xfs_log_item **log_items, 6760e57f6a3SDave Chinner int nr_items, 6770e57f6a3SDave Chinner xfs_lsn_t lsn) __releases(ailp->xa_lock) 6780e57f6a3SDave Chinner { 6790e57f6a3SDave Chinner xfs_log_item_t *mlip; 6800e57f6a3SDave Chinner xfs_lsn_t tail_lsn; 6810e57f6a3SDave Chinner int mlip_changed = 0; 6820e57f6a3SDave Chinner int i; 6830e57f6a3SDave Chinner LIST_HEAD(tmp); 6840e57f6a3SDave Chinner 6850e57f6a3SDave Chinner mlip = xfs_ail_min(ailp); 6860e57f6a3SDave Chinner 6870e57f6a3SDave Chinner for (i = 0; i < nr_items; i++) { 6880e57f6a3SDave Chinner struct xfs_log_item *lip = log_items[i]; 6890e57f6a3SDave Chinner if (lip->li_flags & XFS_LI_IN_AIL) { 6900e57f6a3SDave Chinner /* check if we really need to move the item */ 6910e57f6a3SDave Chinner if (XFS_LSN_CMP(lsn, lip->li_lsn) <= 0) 6920e57f6a3SDave Chinner continue; 6930e57f6a3SDave Chinner 6940e57f6a3SDave Chinner xfs_ail_delete(ailp, lip); 6950e57f6a3SDave Chinner if (mlip == lip) 6960e57f6a3SDave Chinner mlip_changed = 1; 6970e57f6a3SDave Chinner } else { 6980e57f6a3SDave Chinner lip->li_flags |= XFS_LI_IN_AIL; 6990e57f6a3SDave Chinner } 7000e57f6a3SDave Chinner lip->li_lsn = lsn; 7010e57f6a3SDave Chinner list_add(&lip->li_ail, &tmp); 7020e57f6a3SDave Chinner } 7030e57f6a3SDave Chinner 7041d8c95a3SDave Chinner xfs_ail_splice(ailp, cur, &tmp, lsn); 7050e57f6a3SDave Chinner 7060e57f6a3SDave Chinner if (!mlip_changed) { 7070e57f6a3SDave Chinner spin_unlock(&ailp->xa_lock); 7080e57f6a3SDave Chinner return; 7090e57f6a3SDave Chinner } 7100e57f6a3SDave Chinner 7110e57f6a3SDave Chinner /* 7120e57f6a3SDave Chinner * It is not safe to access mlip after the AIL lock is dropped, so we 7130e57f6a3SDave Chinner * must get a copy of li_lsn before we do so. This is especially 7140e57f6a3SDave Chinner * important on 32-bit platforms where accessing and updating 64-bit 7150e57f6a3SDave Chinner * values like li_lsn is not atomic. 7160e57f6a3SDave Chinner */ 7170e57f6a3SDave Chinner mlip = xfs_ail_min(ailp); 7180e57f6a3SDave Chinner tail_lsn = mlip->li_lsn; 7190e57f6a3SDave Chinner spin_unlock(&ailp->xa_lock); 7200e57f6a3SDave Chinner xfs_log_move_tail(ailp->xa_mount, tail_lsn); 7210e57f6a3SDave Chinner } 7220e57f6a3SDave Chinner 7230e57f6a3SDave Chinner /* 72430136832SDave Chinner * xfs_trans_ail_delete_bulk - remove multiple log items from the AIL 72530136832SDave Chinner * 72630136832SDave Chinner * @xfs_trans_ail_delete_bulk takes an array of log items that all need to 72730136832SDave Chinner * removed from the AIL. The caller is already holding the AIL lock, and done 72830136832SDave Chinner * all the checks necessary to ensure the items passed in via @log_items are 72930136832SDave Chinner * ready for deletion. This includes checking that the items are in the AIL. 73030136832SDave Chinner * 73130136832SDave Chinner * For each log item to be removed, unlink it from the AIL, clear the IN_AIL 73230136832SDave Chinner * flag from the item and reset the item's lsn to 0. If we remove the first 73330136832SDave Chinner * item in the AIL, update the log tail to match the new minimum LSN in the 73430136832SDave Chinner * AIL. 73530136832SDave Chinner * 73630136832SDave Chinner * This function will not drop the AIL lock until all items are removed from 73730136832SDave Chinner * the AIL to minimise the amount of lock traffic on the AIL. This does not 73830136832SDave Chinner * greatly increase the AIL hold time, but does significantly reduce the amount 73930136832SDave Chinner * of traffic on the lock, especially during IO completion. 74030136832SDave Chinner * 74130136832SDave Chinner * This function must be called with the AIL lock held. The lock is dropped 74230136832SDave Chinner * before returning. 74330136832SDave Chinner */ 74430136832SDave Chinner void 74530136832SDave Chinner xfs_trans_ail_delete_bulk( 74630136832SDave Chinner struct xfs_ail *ailp, 74730136832SDave Chinner struct xfs_log_item **log_items, 74830136832SDave Chinner int nr_items) __releases(ailp->xa_lock) 74930136832SDave Chinner { 75030136832SDave Chinner xfs_log_item_t *mlip; 75130136832SDave Chinner xfs_lsn_t tail_lsn; 75230136832SDave Chinner int mlip_changed = 0; 75330136832SDave Chinner int i; 75430136832SDave Chinner 75530136832SDave Chinner mlip = xfs_ail_min(ailp); 75630136832SDave Chinner 75730136832SDave Chinner for (i = 0; i < nr_items; i++) { 75830136832SDave Chinner struct xfs_log_item *lip = log_items[i]; 75930136832SDave Chinner if (!(lip->li_flags & XFS_LI_IN_AIL)) { 76030136832SDave Chinner struct xfs_mount *mp = ailp->xa_mount; 76130136832SDave Chinner 76230136832SDave Chinner spin_unlock(&ailp->xa_lock); 76330136832SDave Chinner if (!XFS_FORCED_SHUTDOWN(mp)) { 7646a19d939SDave Chinner xfs_alert_tag(mp, XFS_PTAG_AILDELETE, 76530136832SDave Chinner "%s: attempting to delete a log item that is not in the AIL", 76630136832SDave Chinner __func__); 76730136832SDave Chinner xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 76830136832SDave Chinner } 76930136832SDave Chinner return; 77030136832SDave Chinner } 77130136832SDave Chinner 77230136832SDave Chinner xfs_ail_delete(ailp, lip); 77330136832SDave Chinner lip->li_flags &= ~XFS_LI_IN_AIL; 77430136832SDave Chinner lip->li_lsn = 0; 77530136832SDave Chinner if (mlip == lip) 77630136832SDave Chinner mlip_changed = 1; 77730136832SDave Chinner } 77830136832SDave Chinner 77930136832SDave Chinner if (!mlip_changed) { 78030136832SDave Chinner spin_unlock(&ailp->xa_lock); 78130136832SDave Chinner return; 78230136832SDave Chinner } 78330136832SDave Chinner 78430136832SDave Chinner /* 78530136832SDave Chinner * It is not safe to access mlip after the AIL lock is dropped, so we 78630136832SDave Chinner * must get a copy of li_lsn before we do so. This is especially 78730136832SDave Chinner * important on 32-bit platforms where accessing and updating 64-bit 78830136832SDave Chinner * values like li_lsn is not atomic. It is possible we've emptied the 78930136832SDave Chinner * AIL here, so if that is the case, pass an LSN of 0 to the tail move. 79030136832SDave Chinner */ 79130136832SDave Chinner mlip = xfs_ail_min(ailp); 79230136832SDave Chinner tail_lsn = mlip ? mlip->li_lsn : 0; 79330136832SDave Chinner spin_unlock(&ailp->xa_lock); 79430136832SDave Chinner xfs_log_move_tail(ailp->xa_mount, tail_lsn); 79530136832SDave Chinner } 7961da177e4SLinus Torvalds 7971da177e4SLinus Torvalds /* 7981da177e4SLinus Torvalds * The active item list (AIL) is a doubly linked list of log 7991da177e4SLinus Torvalds * items sorted by ascending lsn. The base of the list is 8001da177e4SLinus Torvalds * a forw/back pointer pair embedded in the xfs mount structure. 8011da177e4SLinus Torvalds * The base is initialized with both pointers pointing to the 8021da177e4SLinus Torvalds * base. This case always needs to be distinguished, because 8031da177e4SLinus Torvalds * the base has no lsn to look at. We almost always insert 8041da177e4SLinus Torvalds * at the end of the list, so on inserts we search from the 8051da177e4SLinus Torvalds * end of the list to find where the new item belongs. 8061da177e4SLinus Torvalds */ 8071da177e4SLinus Torvalds 8081da177e4SLinus Torvalds /* 8091da177e4SLinus Torvalds * Initialize the doubly linked list to point only to itself. 8101da177e4SLinus Torvalds */ 811249a8c11SDavid Chinner int 8121da177e4SLinus Torvalds xfs_trans_ail_init( 8131da177e4SLinus Torvalds xfs_mount_t *mp) 8141da177e4SLinus Torvalds { 81582fa9012SDavid Chinner struct xfs_ail *ailp; 81682fa9012SDavid Chinner 81782fa9012SDavid Chinner ailp = kmem_zalloc(sizeof(struct xfs_ail), KM_MAYFAIL); 81882fa9012SDavid Chinner if (!ailp) 81982fa9012SDavid Chinner return ENOMEM; 82082fa9012SDavid Chinner 82182fa9012SDavid Chinner ailp->xa_mount = mp; 82282fa9012SDavid Chinner INIT_LIST_HEAD(&ailp->xa_ail); 823*af3e4022SDave Chinner INIT_LIST_HEAD(&ailp->xa_cursors); 824c7e8f268SDavid Chinner spin_lock_init(&ailp->xa_lock); 8250bf6a5bdSDave Chinner INIT_DELAYED_WORK(&ailp->xa_work, xfs_ail_worker); 82627d8d5feSDavid Chinner mp->m_ail = ailp; 82727d8d5feSDavid Chinner return 0; 828249a8c11SDavid Chinner } 829249a8c11SDavid Chinner 830249a8c11SDavid Chinner void 831249a8c11SDavid Chinner xfs_trans_ail_destroy( 832249a8c11SDavid Chinner xfs_mount_t *mp) 833249a8c11SDavid Chinner { 83482fa9012SDavid Chinner struct xfs_ail *ailp = mp->m_ail; 83582fa9012SDavid Chinner 8360bf6a5bdSDave Chinner cancel_delayed_work_sync(&ailp->xa_work); 83782fa9012SDavid Chinner kmem_free(ailp); 8381da177e4SLinus Torvalds } 839