11da177e4SLinus Torvalds /* 27b718769SNathan Scott * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc. 3c7e8f268SDavid Chinner * Copyright (c) 2008 Dave Chinner 47b718769SNathan Scott * All Rights Reserved. 51da177e4SLinus Torvalds * 67b718769SNathan Scott * This program is free software; you can redistribute it and/or 77b718769SNathan Scott * modify it under the terms of the GNU General Public License as 81da177e4SLinus Torvalds * published by the Free Software Foundation. 91da177e4SLinus Torvalds * 107b718769SNathan Scott * This program is distributed in the hope that it would be useful, 117b718769SNathan Scott * but WITHOUT ANY WARRANTY; without even the implied warranty of 127b718769SNathan Scott * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 137b718769SNathan Scott * GNU General Public License for more details. 141da177e4SLinus Torvalds * 157b718769SNathan Scott * You should have received a copy of the GNU General Public License 167b718769SNathan Scott * along with this program; if not, write the Free Software Foundation, 177b718769SNathan Scott * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 181da177e4SLinus Torvalds */ 191da177e4SLinus Torvalds #include "xfs.h" 20a844f451SNathan Scott #include "xfs_fs.h" 211da177e4SLinus Torvalds #include "xfs_types.h" 221da177e4SLinus Torvalds #include "xfs_log.h" 23a844f451SNathan Scott #include "xfs_inum.h" 241da177e4SLinus Torvalds #include "xfs_trans.h" 251da177e4SLinus Torvalds #include "xfs_sb.h" 26da353b0dSDavid Chinner #include "xfs_ag.h" 271da177e4SLinus Torvalds #include "xfs_mount.h" 281da177e4SLinus Torvalds #include "xfs_trans_priv.h" 291da177e4SLinus Torvalds #include "xfs_error.h" 301da177e4SLinus Torvalds 310bf6a5bdSDave Chinner struct workqueue_struct *xfs_ail_wq; /* AIL workqueue */ 320bf6a5bdSDave Chinner 331da177e4SLinus Torvalds #ifdef DEBUG 34cd4a3c50SDave Chinner /* 35cd4a3c50SDave Chinner * Check that the list is sorted as it should be. 36cd4a3c50SDave Chinner */ 37cd4a3c50SDave Chinner STATIC void 38cd4a3c50SDave Chinner xfs_ail_check( 39cd4a3c50SDave Chinner struct xfs_ail *ailp, 40cd4a3c50SDave Chinner xfs_log_item_t *lip) 41cd4a3c50SDave Chinner { 42cd4a3c50SDave Chinner xfs_log_item_t *prev_lip; 43cd4a3c50SDave Chinner 44cd4a3c50SDave Chinner if (list_empty(&ailp->xa_ail)) 45cd4a3c50SDave Chinner return; 46cd4a3c50SDave Chinner 47cd4a3c50SDave Chinner /* 48cd4a3c50SDave Chinner * Check the next and previous entries are valid. 49cd4a3c50SDave Chinner */ 50cd4a3c50SDave Chinner ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0); 51cd4a3c50SDave Chinner prev_lip = list_entry(lip->li_ail.prev, xfs_log_item_t, li_ail); 52cd4a3c50SDave Chinner if (&prev_lip->li_ail != &ailp->xa_ail) 53cd4a3c50SDave Chinner ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0); 54cd4a3c50SDave Chinner 55cd4a3c50SDave Chinner prev_lip = list_entry(lip->li_ail.next, xfs_log_item_t, li_ail); 56cd4a3c50SDave Chinner if (&prev_lip->li_ail != &ailp->xa_ail) 57cd4a3c50SDave Chinner ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) >= 0); 58cd4a3c50SDave Chinner 59cd4a3c50SDave Chinner 60cd4a3c50SDave Chinner #ifdef XFS_TRANS_DEBUG 61cd4a3c50SDave Chinner /* 62cd4a3c50SDave Chinner * Walk the list checking lsn ordering, and that every entry has the 63cd4a3c50SDave Chinner * XFS_LI_IN_AIL flag set. This is really expensive, so only do it 64cd4a3c50SDave Chinner * when specifically debugging the transaction subsystem. 65cd4a3c50SDave Chinner */ 66cd4a3c50SDave Chinner prev_lip = list_entry(&ailp->xa_ail, xfs_log_item_t, li_ail); 67cd4a3c50SDave Chinner list_for_each_entry(lip, &ailp->xa_ail, li_ail) { 68cd4a3c50SDave Chinner if (&prev_lip->li_ail != &ailp->xa_ail) 69cd4a3c50SDave Chinner ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0); 70cd4a3c50SDave Chinner ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0); 71cd4a3c50SDave Chinner prev_lip = lip; 72cd4a3c50SDave Chinner } 73cd4a3c50SDave Chinner #endif /* XFS_TRANS_DEBUG */ 74cd4a3c50SDave Chinner } 75cd4a3c50SDave Chinner #else /* !DEBUG */ 76de08dbc1SDavid Chinner #define xfs_ail_check(a,l) 771da177e4SLinus Torvalds #endif /* DEBUG */ 781da177e4SLinus Torvalds 79cd4a3c50SDave Chinner /* 80cd4a3c50SDave Chinner * Return a pointer to the first item in the AIL. If the AIL is empty, then 81cd4a3c50SDave Chinner * return NULL. 82cd4a3c50SDave Chinner */ 83cd4a3c50SDave Chinner static xfs_log_item_t * 84cd4a3c50SDave Chinner xfs_ail_min( 85cd4a3c50SDave Chinner struct xfs_ail *ailp) 86cd4a3c50SDave Chinner { 87cd4a3c50SDave Chinner if (list_empty(&ailp->xa_ail)) 88cd4a3c50SDave Chinner return NULL; 89cd4a3c50SDave Chinner 90cd4a3c50SDave Chinner return list_first_entry(&ailp->xa_ail, xfs_log_item_t, li_ail); 91cd4a3c50SDave Chinner } 921da177e4SLinus Torvalds 931da177e4SLinus Torvalds /* 94fd074841SDave Chinner * Return a pointer to the last item in the AIL. If the AIL is empty, then 95fd074841SDave Chinner * return NULL. 96fd074841SDave Chinner */ 97fd074841SDave Chinner static xfs_log_item_t * 98fd074841SDave Chinner xfs_ail_max( 99fd074841SDave Chinner struct xfs_ail *ailp) 100fd074841SDave Chinner { 101fd074841SDave Chinner if (list_empty(&ailp->xa_ail)) 102fd074841SDave Chinner return NULL; 103fd074841SDave Chinner 104fd074841SDave Chinner return list_entry(ailp->xa_ail.prev, xfs_log_item_t, li_ail); 105fd074841SDave Chinner } 106fd074841SDave Chinner 107fd074841SDave Chinner /* 108cd4a3c50SDave Chinner * Return a pointer to the item which follows the given item in the AIL. If 109cd4a3c50SDave Chinner * the given item is the last item in the list, then return NULL. 110cd4a3c50SDave Chinner */ 111cd4a3c50SDave Chinner static xfs_log_item_t * 112cd4a3c50SDave Chinner xfs_ail_next( 113cd4a3c50SDave Chinner struct xfs_ail *ailp, 114cd4a3c50SDave Chinner xfs_log_item_t *lip) 115cd4a3c50SDave Chinner { 116cd4a3c50SDave Chinner if (lip->li_ail.next == &ailp->xa_ail) 117cd4a3c50SDave Chinner return NULL; 118cd4a3c50SDave Chinner 119cd4a3c50SDave Chinner return list_first_entry(&lip->li_ail, xfs_log_item_t, li_ail); 120cd4a3c50SDave Chinner } 121cd4a3c50SDave Chinner 122cd4a3c50SDave Chinner /* 123cd4a3c50SDave Chinner * This is called by the log manager code to determine the LSN of the tail of 124cd4a3c50SDave Chinner * the log. This is exactly the LSN of the first item in the AIL. If the AIL 125cd4a3c50SDave Chinner * is empty, then this function returns 0. 1261da177e4SLinus Torvalds * 127cd4a3c50SDave Chinner * We need the AIL lock in order to get a coherent read of the lsn of the last 128cd4a3c50SDave Chinner * item in the AIL. 1291da177e4SLinus Torvalds */ 1301da177e4SLinus Torvalds xfs_lsn_t 131fd074841SDave Chinner xfs_ail_min_lsn( 1325b00f14fSDavid Chinner struct xfs_ail *ailp) 1331da177e4SLinus Torvalds { 134cd4a3c50SDave Chinner xfs_lsn_t lsn = 0; 1351da177e4SLinus Torvalds xfs_log_item_t *lip; 1361da177e4SLinus Torvalds 137c7e8f268SDavid Chinner spin_lock(&ailp->xa_lock); 1385b00f14fSDavid Chinner lip = xfs_ail_min(ailp); 139cd4a3c50SDave Chinner if (lip) 1401da177e4SLinus Torvalds lsn = lip->li_lsn; 141c7e8f268SDavid Chinner spin_unlock(&ailp->xa_lock); 1421da177e4SLinus Torvalds 1431da177e4SLinus Torvalds return lsn; 1441da177e4SLinus Torvalds } 1451da177e4SLinus Torvalds 1461da177e4SLinus Torvalds /* 147fd074841SDave Chinner * Return the maximum lsn held in the AIL, or zero if the AIL is empty. 148fd074841SDave Chinner */ 149fd074841SDave Chinner static xfs_lsn_t 150fd074841SDave Chinner xfs_ail_max_lsn( 151fd074841SDave Chinner struct xfs_ail *ailp) 152fd074841SDave Chinner { 153fd074841SDave Chinner xfs_lsn_t lsn = 0; 154fd074841SDave Chinner xfs_log_item_t *lip; 155fd074841SDave Chinner 156fd074841SDave Chinner spin_lock(&ailp->xa_lock); 157fd074841SDave Chinner lip = xfs_ail_max(ailp); 158fd074841SDave Chinner if (lip) 159fd074841SDave Chinner lsn = lip->li_lsn; 160fd074841SDave Chinner spin_unlock(&ailp->xa_lock); 161fd074841SDave Chinner 162fd074841SDave Chinner return lsn; 163fd074841SDave Chinner } 164fd074841SDave Chinner 165fd074841SDave Chinner /* 166af3e4022SDave Chinner * The cursor keeps track of where our current traversal is up to by tracking 167af3e4022SDave Chinner * the next item in the list for us. However, for this to be safe, removing an 168af3e4022SDave Chinner * object from the AIL needs to invalidate any cursor that points to it. hence 169af3e4022SDave Chinner * the traversal cursor needs to be linked to the struct xfs_ail so that 170af3e4022SDave Chinner * deletion can search all the active cursors for invalidation. 17127d8d5feSDavid Chinner */ 1725b00f14fSDavid Chinner STATIC void 17327d8d5feSDavid Chinner xfs_trans_ail_cursor_init( 17427d8d5feSDavid Chinner struct xfs_ail *ailp, 17527d8d5feSDavid Chinner struct xfs_ail_cursor *cur) 17627d8d5feSDavid Chinner { 17727d8d5feSDavid Chinner cur->item = NULL; 178af3e4022SDave Chinner list_add_tail(&cur->list, &ailp->xa_cursors); 17927d8d5feSDavid Chinner } 18027d8d5feSDavid Chinner 18127d8d5feSDavid Chinner /* 182af3e4022SDave Chinner * Get the next item in the traversal and advance the cursor. If the cursor 183af3e4022SDave Chinner * was invalidated (indicated by a lip of 1), restart the traversal. 18427d8d5feSDavid Chinner */ 1855b00f14fSDavid Chinner struct xfs_log_item * 18627d8d5feSDavid Chinner xfs_trans_ail_cursor_next( 18727d8d5feSDavid Chinner struct xfs_ail *ailp, 18827d8d5feSDavid Chinner struct xfs_ail_cursor *cur) 18927d8d5feSDavid Chinner { 19027d8d5feSDavid Chinner struct xfs_log_item *lip = cur->item; 19127d8d5feSDavid Chinner 19227d8d5feSDavid Chinner if ((__psint_t)lip & 1) 19327d8d5feSDavid Chinner lip = xfs_ail_min(ailp); 19416b59029SDave Chinner if (lip) 19516b59029SDave Chinner cur->item = xfs_ail_next(ailp, lip); 19627d8d5feSDavid Chinner return lip; 19727d8d5feSDavid Chinner } 19827d8d5feSDavid Chinner 19927d8d5feSDavid Chinner /* 200af3e4022SDave Chinner * When the traversal is complete, we need to remove the cursor from the list 201af3e4022SDave Chinner * of traversing cursors. 20227d8d5feSDavid Chinner */ 20327d8d5feSDavid Chinner void 20427d8d5feSDavid Chinner xfs_trans_ail_cursor_done( 20527d8d5feSDavid Chinner struct xfs_ail *ailp, 206af3e4022SDave Chinner struct xfs_ail_cursor *cur) 20727d8d5feSDavid Chinner { 208af3e4022SDave Chinner cur->item = NULL; 209af3e4022SDave Chinner list_del_init(&cur->list); 21027d8d5feSDavid Chinner } 21127d8d5feSDavid Chinner 21227d8d5feSDavid Chinner /* 213af3e4022SDave Chinner * Invalidate any cursor that is pointing to this item. This is called when an 214af3e4022SDave Chinner * item is removed from the AIL. Any cursor pointing to this object is now 215af3e4022SDave Chinner * invalid and the traversal needs to be terminated so it doesn't reference a 216af3e4022SDave Chinner * freed object. We set the low bit of the cursor item pointer so we can 217af3e4022SDave Chinner * distinguish between an invalidation and the end of the list when getting the 218af3e4022SDave Chinner * next item from the cursor. 2195b00f14fSDavid Chinner */ 2205b00f14fSDavid Chinner STATIC void 2215b00f14fSDavid Chinner xfs_trans_ail_cursor_clear( 2225b00f14fSDavid Chinner struct xfs_ail *ailp, 2235b00f14fSDavid Chinner struct xfs_log_item *lip) 2245b00f14fSDavid Chinner { 2255b00f14fSDavid Chinner struct xfs_ail_cursor *cur; 2265b00f14fSDavid Chinner 227af3e4022SDave Chinner list_for_each_entry(cur, &ailp->xa_cursors, list) { 2285b00f14fSDavid Chinner if (cur->item == lip) 2295b00f14fSDavid Chinner cur->item = (struct xfs_log_item *) 2305b00f14fSDavid Chinner ((__psint_t)cur->item | 1); 2315b00f14fSDavid Chinner } 2325b00f14fSDavid Chinner } 2335b00f14fSDavid Chinner 2345b00f14fSDavid Chinner /* 23516b59029SDave Chinner * Find the first item in the AIL with the given @lsn by searching in ascending 23616b59029SDave Chinner * LSN order and initialise the cursor to point to the next item for a 23716b59029SDave Chinner * ascending traversal. Pass a @lsn of zero to initialise the cursor to the 23816b59029SDave Chinner * first item in the AIL. Returns NULL if the list is empty. 239249a8c11SDavid Chinner */ 2405b00f14fSDavid Chinner xfs_log_item_t * 2415b00f14fSDavid Chinner xfs_trans_ail_cursor_first( 24227d8d5feSDavid Chinner struct xfs_ail *ailp, 24327d8d5feSDavid Chinner struct xfs_ail_cursor *cur, 244249a8c11SDavid Chinner xfs_lsn_t lsn) 245249a8c11SDavid Chinner { 246249a8c11SDavid Chinner xfs_log_item_t *lip; 247249a8c11SDavid Chinner 2485b00f14fSDavid Chinner xfs_trans_ail_cursor_init(ailp, cur); 24916b59029SDave Chinner 25016b59029SDave Chinner if (lsn == 0) { 25127d8d5feSDavid Chinner lip = xfs_ail_min(ailp); 2525b00f14fSDavid Chinner goto out; 25316b59029SDave Chinner } 254249a8c11SDavid Chinner 25527d8d5feSDavid Chinner list_for_each_entry(lip, &ailp->xa_ail, li_ail) { 2565b00f14fSDavid Chinner if (XFS_LSN_CMP(lip->li_lsn, lsn) >= 0) 2577ee49acfSDavid Chinner goto out; 2585b00f14fSDavid Chinner } 25916b59029SDave Chinner return NULL; 26016b59029SDave Chinner 2615b00f14fSDavid Chinner out: 26216b59029SDave Chinner if (lip) 26316b59029SDave Chinner cur->item = xfs_ail_next(ailp, lip); 264249a8c11SDavid Chinner return lip; 265249a8c11SDavid Chinner } 266535f6b37SJosef 'Jeff' Sipek 2671d8c95a3SDave Chinner static struct xfs_log_item * 2681d8c95a3SDave Chinner __xfs_trans_ail_cursor_last( 2691d8c95a3SDave Chinner struct xfs_ail *ailp, 2701d8c95a3SDave Chinner xfs_lsn_t lsn) 2711d8c95a3SDave Chinner { 2721d8c95a3SDave Chinner xfs_log_item_t *lip; 2731d8c95a3SDave Chinner 2741d8c95a3SDave Chinner list_for_each_entry_reverse(lip, &ailp->xa_ail, li_ail) { 2751d8c95a3SDave Chinner if (XFS_LSN_CMP(lip->li_lsn, lsn) <= 0) 2761d8c95a3SDave Chinner return lip; 2771d8c95a3SDave Chinner } 2781d8c95a3SDave Chinner return NULL; 2791d8c95a3SDave Chinner } 2801d8c95a3SDave Chinner 2811d8c95a3SDave Chinner /* 28216b59029SDave Chinner * Find the last item in the AIL with the given @lsn by searching in descending 28316b59029SDave Chinner * LSN order and initialise the cursor to point to that item. If there is no 28416b59029SDave Chinner * item with the value of @lsn, then it sets the cursor to the last item with an 28516b59029SDave Chinner * LSN lower than @lsn. Returns NULL if the list is empty. 2861d8c95a3SDave Chinner */ 2871d8c95a3SDave Chinner struct xfs_log_item * 2881d8c95a3SDave Chinner xfs_trans_ail_cursor_last( 2891d8c95a3SDave Chinner struct xfs_ail *ailp, 2901d8c95a3SDave Chinner struct xfs_ail_cursor *cur, 2911d8c95a3SDave Chinner xfs_lsn_t lsn) 2921d8c95a3SDave Chinner { 2931d8c95a3SDave Chinner xfs_trans_ail_cursor_init(ailp, cur); 2941d8c95a3SDave Chinner cur->item = __xfs_trans_ail_cursor_last(ailp, lsn); 2951d8c95a3SDave Chinner return cur->item; 2961d8c95a3SDave Chinner } 2971d8c95a3SDave Chinner 2981d8c95a3SDave Chinner /* 29916b59029SDave Chinner * Splice the log item list into the AIL at the given LSN. We splice to the 3001d8c95a3SDave Chinner * tail of the given LSN to maintain insert order for push traversals. The 3011d8c95a3SDave Chinner * cursor is optional, allowing repeated updates to the same LSN to avoid 302e44f4112SAlex Elder * repeated traversals. This should not be called with an empty list. 303cd4a3c50SDave Chinner */ 304cd4a3c50SDave Chinner static void 305cd4a3c50SDave Chinner xfs_ail_splice( 306cd4a3c50SDave Chinner struct xfs_ail *ailp, 3071d8c95a3SDave Chinner struct xfs_ail_cursor *cur, 308cd4a3c50SDave Chinner struct list_head *list, 309cd4a3c50SDave Chinner xfs_lsn_t lsn) 310cd4a3c50SDave Chinner { 311e44f4112SAlex Elder struct xfs_log_item *lip; 312e44f4112SAlex Elder 313e44f4112SAlex Elder ASSERT(!list_empty(list)); 314cd4a3c50SDave Chinner 3151d8c95a3SDave Chinner /* 316e44f4112SAlex Elder * Use the cursor to determine the insertion point if one is 317e44f4112SAlex Elder * provided. If not, or if the one we got is not valid, 318e44f4112SAlex Elder * find the place in the AIL where the items belong. 3191d8c95a3SDave Chinner */ 320e44f4112SAlex Elder lip = cur ? cur->item : NULL; 321e44f4112SAlex Elder if (!lip || (__psint_t) lip & 1) 3221d8c95a3SDave Chinner lip = __xfs_trans_ail_cursor_last(ailp, lsn); 3231d8c95a3SDave Chinner 324e44f4112SAlex Elder /* 325e44f4112SAlex Elder * If a cursor is provided, we know we're processing the AIL 326e44f4112SAlex Elder * in lsn order, and future items to be spliced in will 327e44f4112SAlex Elder * follow the last one being inserted now. Update the 328e44f4112SAlex Elder * cursor to point to that last item, now while we have a 329e44f4112SAlex Elder * reliable pointer to it. 330e44f4112SAlex Elder */ 3311d8c95a3SDave Chinner if (cur) 332e44f4112SAlex Elder cur->item = list_entry(list->prev, struct xfs_log_item, li_ail); 333cd4a3c50SDave Chinner 3341d8c95a3SDave Chinner /* 335e44f4112SAlex Elder * Finally perform the splice. Unless the AIL was empty, 336e44f4112SAlex Elder * lip points to the item in the AIL _after_ which the new 337e44f4112SAlex Elder * items should go. If lip is null the AIL was empty, so 338e44f4112SAlex Elder * the new items go at the head of the AIL. 3391d8c95a3SDave Chinner */ 340e44f4112SAlex Elder if (lip) 3411d8c95a3SDave Chinner list_splice(list, &lip->li_ail); 342e44f4112SAlex Elder else 343e44f4112SAlex Elder list_splice(list, &ailp->xa_ail); 344cd4a3c50SDave Chinner } 345cd4a3c50SDave Chinner 346cd4a3c50SDave Chinner /* 347cd4a3c50SDave Chinner * Delete the given item from the AIL. Return a pointer to the item. 348cd4a3c50SDave Chinner */ 349cd4a3c50SDave Chinner static void 350cd4a3c50SDave Chinner xfs_ail_delete( 351cd4a3c50SDave Chinner struct xfs_ail *ailp, 352cd4a3c50SDave Chinner xfs_log_item_t *lip) 353cd4a3c50SDave Chinner { 354cd4a3c50SDave Chinner xfs_ail_check(ailp, lip); 355cd4a3c50SDave Chinner list_del(&lip->li_ail); 356cd4a3c50SDave Chinner xfs_trans_ail_cursor_clear(ailp, lip); 357cd4a3c50SDave Chinner } 358cd4a3c50SDave Chinner 359cd4a3c50SDave Chinner /* 3600bf6a5bdSDave Chinner * xfs_ail_worker does the work of pushing on the AIL. It will requeue itself 3610bf6a5bdSDave Chinner * to run at a later time if there is more work to do to complete the push. 362249a8c11SDavid Chinner */ 3630bf6a5bdSDave Chinner STATIC void 3640bf6a5bdSDave Chinner xfs_ail_worker( 3650bf6a5bdSDave Chinner struct work_struct *work) 366249a8c11SDavid Chinner { 3670bf6a5bdSDave Chinner struct xfs_ail *ailp = container_of(to_delayed_work(work), 3680bf6a5bdSDave Chinner struct xfs_ail, xa_work); 36982fa9012SDavid Chinner xfs_mount_t *mp = ailp->xa_mount; 370af3e4022SDave Chinner struct xfs_ail_cursor cur; 3719e7004e7SDave Chinner xfs_log_item_t *lip; 3729e7004e7SDave Chinner xfs_lsn_t lsn; 373fe0da767SDave Chinner xfs_lsn_t target; 3749e7004e7SDave Chinner long tout = 10; 3759e7004e7SDave Chinner int flush_log = 0; 3769e7004e7SDave Chinner int stuck = 0; 3779e7004e7SDave Chinner int count = 0; 378d808f617SDave Chinner int push_xfsbufd = 0; 3791da177e4SLinus Torvalds 380c7e8f268SDavid Chinner spin_lock(&ailp->xa_lock); 381fe0da767SDave Chinner target = ailp->xa_target; 382af3e4022SDave Chinner lip = xfs_trans_ail_cursor_first(ailp, &cur, ailp->xa_last_pushed_lsn); 383249a8c11SDavid Chinner if (!lip || XFS_FORCED_SHUTDOWN(mp)) { 3841da177e4SLinus Torvalds /* 385249a8c11SDavid Chinner * AIL is empty or our push has reached the end. 3861da177e4SLinus Torvalds */ 387af3e4022SDave Chinner xfs_trans_ail_cursor_done(ailp, &cur); 388c7e8f268SDavid Chinner spin_unlock(&ailp->xa_lock); 3899e7004e7SDave Chinner goto out_done; 3901da177e4SLinus Torvalds } 3911da177e4SLinus Torvalds 3921da177e4SLinus Torvalds XFS_STATS_INC(xs_push_ail); 3931da177e4SLinus Torvalds 3941da177e4SLinus Torvalds /* 3951da177e4SLinus Torvalds * While the item we are looking at is below the given threshold 396249a8c11SDavid Chinner * try to flush it out. We'd like not to stop until we've at least 3971da177e4SLinus Torvalds * tried to push on everything in the AIL with an LSN less than 398249a8c11SDavid Chinner * the given threshold. 3991da177e4SLinus Torvalds * 400249a8c11SDavid Chinner * However, we will stop after a certain number of pushes and wait 401249a8c11SDavid Chinner * for a reduced timeout to fire before pushing further. This 402249a8c11SDavid Chinner * prevents use from spinning when we can't do anything or there is 403249a8c11SDavid Chinner * lots of contention on the AIL lists. 404249a8c11SDavid Chinner */ 405249a8c11SDavid Chinner lsn = lip->li_lsn; 40650e86686SDave Chinner while ((XFS_LSN_CMP(lip->li_lsn, target) <= 0)) { 407249a8c11SDavid Chinner int lock_result; 408249a8c11SDavid Chinner /* 409249a8c11SDavid Chinner * If we can lock the item without sleeping, unlock the AIL 410249a8c11SDavid Chinner * lock and flush the item. Then re-grab the AIL lock so we 411249a8c11SDavid Chinner * can look for the next item on the AIL. List changes are 412249a8c11SDavid Chinner * handled by the AIL lookup functions internally 413249a8c11SDavid Chinner * 414249a8c11SDavid Chinner * If we can't lock the item, either its holder will flush it 415249a8c11SDavid Chinner * or it is already being flushed or it is being relogged. In 416249a8c11SDavid Chinner * any of these case it is being taken care of and we can just 417249a8c11SDavid Chinner * skip to the next item in the list. 4181da177e4SLinus Torvalds */ 4191da177e4SLinus Torvalds lock_result = IOP_TRYLOCK(lip); 420c7e8f268SDavid Chinner spin_unlock(&ailp->xa_lock); 4211da177e4SLinus Torvalds switch (lock_result) { 4221da177e4SLinus Torvalds case XFS_ITEM_SUCCESS: 4231da177e4SLinus Torvalds XFS_STATS_INC(xs_push_ail_success); 4241da177e4SLinus Torvalds IOP_PUSH(lip); 4250bf6a5bdSDave Chinner ailp->xa_last_pushed_lsn = lsn; 4261da177e4SLinus Torvalds break; 4271da177e4SLinus Torvalds 4281da177e4SLinus Torvalds case XFS_ITEM_PUSHBUF: 4291da177e4SLinus Torvalds XFS_STATS_INC(xs_push_ail_pushbuf); 430*17b38471SChristoph Hellwig 431*17b38471SChristoph Hellwig if (!IOP_PUSHBUF(lip)) { 432*17b38471SChristoph Hellwig stuck++; 433*17b38471SChristoph Hellwig flush_log = 1; 434*17b38471SChristoph Hellwig } else { 4350bf6a5bdSDave Chinner ailp->xa_last_pushed_lsn = lsn; 436*17b38471SChristoph Hellwig } 437d808f617SDave Chinner push_xfsbufd = 1; 4381da177e4SLinus Torvalds break; 4391da177e4SLinus Torvalds 4401da177e4SLinus Torvalds case XFS_ITEM_PINNED: 4411da177e4SLinus Torvalds XFS_STATS_INC(xs_push_ail_pinned); 442249a8c11SDavid Chinner stuck++; 4431da177e4SLinus Torvalds flush_log = 1; 4441da177e4SLinus Torvalds break; 4451da177e4SLinus Torvalds 4461da177e4SLinus Torvalds case XFS_ITEM_LOCKED: 4471da177e4SLinus Torvalds XFS_STATS_INC(xs_push_ail_locked); 448249a8c11SDavid Chinner stuck++; 4491da177e4SLinus Torvalds break; 4501da177e4SLinus Torvalds 4511da177e4SLinus Torvalds default: 4521da177e4SLinus Torvalds ASSERT(0); 4531da177e4SLinus Torvalds break; 4541da177e4SLinus Torvalds } 4551da177e4SLinus Torvalds 456c7e8f268SDavid Chinner spin_lock(&ailp->xa_lock); 457249a8c11SDavid Chinner /* should we bother continuing? */ 458249a8c11SDavid Chinner if (XFS_FORCED_SHUTDOWN(mp)) 4591da177e4SLinus Torvalds break; 460249a8c11SDavid Chinner ASSERT(mp->m_log); 4611da177e4SLinus Torvalds 462249a8c11SDavid Chinner count++; 463249a8c11SDavid Chinner 464249a8c11SDavid Chinner /* 465249a8c11SDavid Chinner * Are there too many items we can't do anything with? 466249a8c11SDavid Chinner * If we we are skipping too many items because we can't flush 467249a8c11SDavid Chinner * them or they are already being flushed, we back off and 468249a8c11SDavid Chinner * given them time to complete whatever operation is being 469249a8c11SDavid Chinner * done. i.e. remove pressure from the AIL while we can't make 470249a8c11SDavid Chinner * progress so traversals don't slow down further inserts and 471249a8c11SDavid Chinner * removals to/from the AIL. 472249a8c11SDavid Chinner * 473249a8c11SDavid Chinner * The value of 100 is an arbitrary magic number based on 474249a8c11SDavid Chinner * observation. 475249a8c11SDavid Chinner */ 476249a8c11SDavid Chinner if (stuck > 100) 477249a8c11SDavid Chinner break; 478249a8c11SDavid Chinner 479af3e4022SDave Chinner lip = xfs_trans_ail_cursor_next(ailp, &cur); 480249a8c11SDavid Chinner if (lip == NULL) 481249a8c11SDavid Chinner break; 482249a8c11SDavid Chinner lsn = lip->li_lsn; 4831da177e4SLinus Torvalds } 484af3e4022SDave Chinner xfs_trans_ail_cursor_done(ailp, &cur); 485c7e8f268SDavid Chinner spin_unlock(&ailp->xa_lock); 4861da177e4SLinus Torvalds 4871da177e4SLinus Torvalds if (flush_log) { 4881da177e4SLinus Torvalds /* 4891da177e4SLinus Torvalds * If something we need to push out was pinned, then 4901da177e4SLinus Torvalds * push out the log so it will become unpinned and 4911da177e4SLinus Torvalds * move forward in the AIL. 4921da177e4SLinus Torvalds */ 4931da177e4SLinus Torvalds XFS_STATS_INC(xs_push_ail_flush); 494a14a348bSChristoph Hellwig xfs_log_force(mp, 0); 4951da177e4SLinus Torvalds } 4961da177e4SLinus Torvalds 497d808f617SDave Chinner if (push_xfsbufd) { 498d808f617SDave Chinner /* we've got delayed write buffers to flush */ 499d808f617SDave Chinner wake_up_process(mp->m_ddev_targp->bt_task); 500d808f617SDave Chinner } 501d808f617SDave Chinner 5020bf6a5bdSDave Chinner /* assume we have more work to do in a short while */ 5039e7004e7SDave Chinner out_done: 50492d9cd10SDavid Chinner if (!count) { 50592d9cd10SDavid Chinner /* We're past our target or empty, so idle */ 5060bf6a5bdSDave Chinner ailp->xa_last_pushed_lsn = 0; 5070bf6a5bdSDave Chinner 5080bf6a5bdSDave Chinner /* 5097ac95657SDave Chinner * We clear the XFS_AIL_PUSHING_BIT first before checking 5107ac95657SDave Chinner * whether the target has changed. If the target has changed, 5117ac95657SDave Chinner * this pushes the requeue race directly onto the result of the 5127ac95657SDave Chinner * atomic test/set bit, so we are guaranteed that either the 5137ac95657SDave Chinner * the pusher that changed the target or ourselves will requeue 5147ac95657SDave Chinner * the work (but not both). 5150bf6a5bdSDave Chinner */ 5160bf6a5bdSDave Chinner clear_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags); 5177ac95657SDave Chinner smp_rmb(); 5187ac95657SDave Chinner if (XFS_LSN_CMP(ailp->xa_target, target) == 0 || 5197ac95657SDave Chinner test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags)) 5200bf6a5bdSDave Chinner return; 5217ac95657SDave Chinner 5220bf6a5bdSDave Chinner tout = 50; 52392d9cd10SDavid Chinner } else if (XFS_LSN_CMP(lsn, target) >= 0) { 524249a8c11SDavid Chinner /* 52592d9cd10SDavid Chinner * We reached the target so wait a bit longer for I/O to 52692d9cd10SDavid Chinner * complete and remove pushed items from the AIL before we 52792d9cd10SDavid Chinner * start the next scan from the start of the AIL. 528249a8c11SDavid Chinner */ 529453eac8aSDave Chinner tout = 50; 5300bf6a5bdSDave Chinner ailp->xa_last_pushed_lsn = 0; 53127d8d5feSDavid Chinner } else if ((stuck * 100) / count > 90) { 532249a8c11SDavid Chinner /* 533249a8c11SDavid Chinner * Either there is a lot of contention on the AIL or we 534249a8c11SDavid Chinner * are stuck due to operations in progress. "Stuck" in this 535249a8c11SDavid Chinner * case is defined as >90% of the items we tried to push 536249a8c11SDavid Chinner * were stuck. 537249a8c11SDavid Chinner * 538249a8c11SDavid Chinner * Backoff a bit more to allow some I/O to complete before 539249a8c11SDavid Chinner * continuing from where we were. 540249a8c11SDavid Chinner */ 541453eac8aSDave Chinner tout = 20; 542453eac8aSDave Chinner } 5431da177e4SLinus Torvalds 5440bf6a5bdSDave Chinner /* There is more to do, requeue us. */ 5450bf6a5bdSDave Chinner queue_delayed_work(xfs_syncd_wq, &ailp->xa_work, 5460bf6a5bdSDave Chinner msecs_to_jiffies(tout)); 5470bf6a5bdSDave Chinner } 5480bf6a5bdSDave Chinner 5490bf6a5bdSDave Chinner /* 5500bf6a5bdSDave Chinner * This routine is called to move the tail of the AIL forward. It does this by 5510bf6a5bdSDave Chinner * trying to flush items in the AIL whose lsns are below the given 5520bf6a5bdSDave Chinner * threshold_lsn. 5530bf6a5bdSDave Chinner * 5540bf6a5bdSDave Chinner * The push is run asynchronously in a workqueue, which means the caller needs 5550bf6a5bdSDave Chinner * to handle waiting on the async flush for space to become available. 5560bf6a5bdSDave Chinner * We don't want to interrupt any push that is in progress, hence we only queue 5570bf6a5bdSDave Chinner * work if we set the pushing bit approriately. 5580bf6a5bdSDave Chinner * 5590bf6a5bdSDave Chinner * We do this unlocked - we only need to know whether there is anything in the 5600bf6a5bdSDave Chinner * AIL at the time we are called. We don't need to access the contents of 5610bf6a5bdSDave Chinner * any of the objects, so the lock is not needed. 5620bf6a5bdSDave Chinner */ 5630bf6a5bdSDave Chinner void 564fd074841SDave Chinner xfs_ail_push( 5650bf6a5bdSDave Chinner struct xfs_ail *ailp, 5660bf6a5bdSDave Chinner xfs_lsn_t threshold_lsn) 5670bf6a5bdSDave Chinner { 5680bf6a5bdSDave Chinner xfs_log_item_t *lip; 5690bf6a5bdSDave Chinner 5700bf6a5bdSDave Chinner lip = xfs_ail_min(ailp); 5710bf6a5bdSDave Chinner if (!lip || XFS_FORCED_SHUTDOWN(ailp->xa_mount) || 5720bf6a5bdSDave Chinner XFS_LSN_CMP(threshold_lsn, ailp->xa_target) <= 0) 5730bf6a5bdSDave Chinner return; 5740bf6a5bdSDave Chinner 5750bf6a5bdSDave Chinner /* 5760bf6a5bdSDave Chinner * Ensure that the new target is noticed in push code before it clears 5770bf6a5bdSDave Chinner * the XFS_AIL_PUSHING_BIT. 5780bf6a5bdSDave Chinner */ 5790bf6a5bdSDave Chinner smp_wmb(); 580fe0da767SDave Chinner xfs_trans_ail_copy_lsn(ailp, &ailp->xa_target, &threshold_lsn); 5810bf6a5bdSDave Chinner if (!test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags)) 5820bf6a5bdSDave Chinner queue_delayed_work(xfs_syncd_wq, &ailp->xa_work, 0); 5830bf6a5bdSDave Chinner } 5841da177e4SLinus Torvalds 5851da177e4SLinus Torvalds /* 586fd074841SDave Chinner * Push out all items in the AIL immediately 587fd074841SDave Chinner */ 588fd074841SDave Chinner void 589fd074841SDave Chinner xfs_ail_push_all( 590fd074841SDave Chinner struct xfs_ail *ailp) 591fd074841SDave Chinner { 592fd074841SDave Chinner xfs_lsn_t threshold_lsn = xfs_ail_max_lsn(ailp); 593fd074841SDave Chinner 594fd074841SDave Chinner if (threshold_lsn) 595fd074841SDave Chinner xfs_ail_push(ailp, threshold_lsn); 596fd074841SDave Chinner } 597fd074841SDave Chinner 598fd074841SDave Chinner /* 5991da177e4SLinus Torvalds * This is to be called when an item is unlocked that may have 6001da177e4SLinus Torvalds * been in the AIL. It will wake up the first member of the AIL 6011da177e4SLinus Torvalds * wait list if this item's unlocking might allow it to progress. 6021da177e4SLinus Torvalds * If the item is in the AIL, then we need to get the AIL lock 6031da177e4SLinus Torvalds * while doing our checking so we don't race with someone going 6041da177e4SLinus Torvalds * to sleep waiting for this event in xfs_trans_push_ail(). 6051da177e4SLinus Torvalds */ 6061da177e4SLinus Torvalds void 6071da177e4SLinus Torvalds xfs_trans_unlocked_item( 608783a2f65SDavid Chinner struct xfs_ail *ailp, 6091da177e4SLinus Torvalds xfs_log_item_t *lip) 6101da177e4SLinus Torvalds { 6111da177e4SLinus Torvalds xfs_log_item_t *min_lip; 6121da177e4SLinus Torvalds 6131da177e4SLinus Torvalds /* 6141da177e4SLinus Torvalds * If we're forcibly shutting down, we may have 6151da177e4SLinus Torvalds * unlocked log items arbitrarily. The last thing 6161da177e4SLinus Torvalds * we want to do is to move the tail of the log 6171da177e4SLinus Torvalds * over some potentially valid data. 6181da177e4SLinus Torvalds */ 6191da177e4SLinus Torvalds if (!(lip->li_flags & XFS_LI_IN_AIL) || 620783a2f65SDavid Chinner XFS_FORCED_SHUTDOWN(ailp->xa_mount)) { 6211da177e4SLinus Torvalds return; 6221da177e4SLinus Torvalds } 6231da177e4SLinus Torvalds 6241da177e4SLinus Torvalds /* 6251da177e4SLinus Torvalds * This is the one case where we can call into xfs_ail_min() 6261da177e4SLinus Torvalds * without holding the AIL lock because we only care about the 6271da177e4SLinus Torvalds * case where we are at the tail of the AIL. If the object isn't 6281da177e4SLinus Torvalds * at the tail, it doesn't matter what result we get back. This 6291da177e4SLinus Torvalds * is slightly racy because since we were just unlocked, we could 6301da177e4SLinus Torvalds * go to sleep between the call to xfs_ail_min and the call to 6311da177e4SLinus Torvalds * xfs_log_move_tail, have someone else lock us, commit to us disk, 6321da177e4SLinus Torvalds * move us out of the tail of the AIL, and then we wake up. However, 6331da177e4SLinus Torvalds * the call to xfs_log_move_tail() doesn't do anything if there's 6341da177e4SLinus Torvalds * not enough free space to wake people up so we're safe calling it. 6351da177e4SLinus Torvalds */ 636783a2f65SDavid Chinner min_lip = xfs_ail_min(ailp); 6371da177e4SLinus Torvalds 6381da177e4SLinus Torvalds if (min_lip == lip) 639783a2f65SDavid Chinner xfs_log_move_tail(ailp->xa_mount, 1); 6401da177e4SLinus Torvalds } /* xfs_trans_unlocked_item */ 6411da177e4SLinus Torvalds 6421da177e4SLinus Torvalds /* 6430e57f6a3SDave Chinner * xfs_trans_ail_update - bulk AIL insertion operation. 6440e57f6a3SDave Chinner * 6450e57f6a3SDave Chinner * @xfs_trans_ail_update takes an array of log items that all need to be 6460e57f6a3SDave Chinner * positioned at the same LSN in the AIL. If an item is not in the AIL, it will 6470e57f6a3SDave Chinner * be added. Otherwise, it will be repositioned by removing it and re-adding 6480e57f6a3SDave Chinner * it to the AIL. If we move the first item in the AIL, update the log tail to 6490e57f6a3SDave Chinner * match the new minimum LSN in the AIL. 6500e57f6a3SDave Chinner * 6510e57f6a3SDave Chinner * This function takes the AIL lock once to execute the update operations on 6520e57f6a3SDave Chinner * all the items in the array, and as such should not be called with the AIL 6530e57f6a3SDave Chinner * lock held. As a result, once we have the AIL lock, we need to check each log 6540e57f6a3SDave Chinner * item LSN to confirm it needs to be moved forward in the AIL. 6550e57f6a3SDave Chinner * 6560e57f6a3SDave Chinner * To optimise the insert operation, we delete all the items from the AIL in 6570e57f6a3SDave Chinner * the first pass, moving them into a temporary list, then splice the temporary 6580e57f6a3SDave Chinner * list into the correct position in the AIL. This avoids needing to do an 6590e57f6a3SDave Chinner * insert operation on every item. 6600e57f6a3SDave Chinner * 6610e57f6a3SDave Chinner * This function must be called with the AIL lock held. The lock is dropped 6620e57f6a3SDave Chinner * before returning. 6630e57f6a3SDave Chinner */ 6640e57f6a3SDave Chinner void 6650e57f6a3SDave Chinner xfs_trans_ail_update_bulk( 6660e57f6a3SDave Chinner struct xfs_ail *ailp, 6671d8c95a3SDave Chinner struct xfs_ail_cursor *cur, 6680e57f6a3SDave Chinner struct xfs_log_item **log_items, 6690e57f6a3SDave Chinner int nr_items, 6700e57f6a3SDave Chinner xfs_lsn_t lsn) __releases(ailp->xa_lock) 6710e57f6a3SDave Chinner { 6720e57f6a3SDave Chinner xfs_log_item_t *mlip; 6730e57f6a3SDave Chinner xfs_lsn_t tail_lsn; 6740e57f6a3SDave Chinner int mlip_changed = 0; 6750e57f6a3SDave Chinner int i; 6760e57f6a3SDave Chinner LIST_HEAD(tmp); 6770e57f6a3SDave Chinner 678e44f4112SAlex Elder ASSERT(nr_items > 0); /* Not required, but true. */ 6790e57f6a3SDave Chinner mlip = xfs_ail_min(ailp); 6800e57f6a3SDave Chinner 6810e57f6a3SDave Chinner for (i = 0; i < nr_items; i++) { 6820e57f6a3SDave Chinner struct xfs_log_item *lip = log_items[i]; 6830e57f6a3SDave Chinner if (lip->li_flags & XFS_LI_IN_AIL) { 6840e57f6a3SDave Chinner /* check if we really need to move the item */ 6850e57f6a3SDave Chinner if (XFS_LSN_CMP(lsn, lip->li_lsn) <= 0) 6860e57f6a3SDave Chinner continue; 6870e57f6a3SDave Chinner 6880e57f6a3SDave Chinner xfs_ail_delete(ailp, lip); 6890e57f6a3SDave Chinner if (mlip == lip) 6900e57f6a3SDave Chinner mlip_changed = 1; 6910e57f6a3SDave Chinner } else { 6920e57f6a3SDave Chinner lip->li_flags |= XFS_LI_IN_AIL; 6930e57f6a3SDave Chinner } 6940e57f6a3SDave Chinner lip->li_lsn = lsn; 6950e57f6a3SDave Chinner list_add(&lip->li_ail, &tmp); 6960e57f6a3SDave Chinner } 6970e57f6a3SDave Chinner 698e44f4112SAlex Elder if (!list_empty(&tmp)) 6991d8c95a3SDave Chinner xfs_ail_splice(ailp, cur, &tmp, lsn); 7000e57f6a3SDave Chinner 7010e57f6a3SDave Chinner if (!mlip_changed) { 7020e57f6a3SDave Chinner spin_unlock(&ailp->xa_lock); 7030e57f6a3SDave Chinner return; 7040e57f6a3SDave Chinner } 7050e57f6a3SDave Chinner 7060e57f6a3SDave Chinner /* 7070e57f6a3SDave Chinner * It is not safe to access mlip after the AIL lock is dropped, so we 7080e57f6a3SDave Chinner * must get a copy of li_lsn before we do so. This is especially 7090e57f6a3SDave Chinner * important on 32-bit platforms where accessing and updating 64-bit 7100e57f6a3SDave Chinner * values like li_lsn is not atomic. 7110e57f6a3SDave Chinner */ 7120e57f6a3SDave Chinner mlip = xfs_ail_min(ailp); 7130e57f6a3SDave Chinner tail_lsn = mlip->li_lsn; 7140e57f6a3SDave Chinner spin_unlock(&ailp->xa_lock); 7150e57f6a3SDave Chinner xfs_log_move_tail(ailp->xa_mount, tail_lsn); 7160e57f6a3SDave Chinner } 7170e57f6a3SDave Chinner 7180e57f6a3SDave Chinner /* 71930136832SDave Chinner * xfs_trans_ail_delete_bulk - remove multiple log items from the AIL 72030136832SDave Chinner * 72130136832SDave Chinner * @xfs_trans_ail_delete_bulk takes an array of log items that all need to 72230136832SDave Chinner * removed from the AIL. The caller is already holding the AIL lock, and done 72330136832SDave Chinner * all the checks necessary to ensure the items passed in via @log_items are 72430136832SDave Chinner * ready for deletion. This includes checking that the items are in the AIL. 72530136832SDave Chinner * 72630136832SDave Chinner * For each log item to be removed, unlink it from the AIL, clear the IN_AIL 72730136832SDave Chinner * flag from the item and reset the item's lsn to 0. If we remove the first 72830136832SDave Chinner * item in the AIL, update the log tail to match the new minimum LSN in the 72930136832SDave Chinner * AIL. 73030136832SDave Chinner * 73130136832SDave Chinner * This function will not drop the AIL lock until all items are removed from 73230136832SDave Chinner * the AIL to minimise the amount of lock traffic on the AIL. This does not 73330136832SDave Chinner * greatly increase the AIL hold time, but does significantly reduce the amount 73430136832SDave Chinner * of traffic on the lock, especially during IO completion. 73530136832SDave Chinner * 73630136832SDave Chinner * This function must be called with the AIL lock held. The lock is dropped 73730136832SDave Chinner * before returning. 73830136832SDave Chinner */ 73930136832SDave Chinner void 74030136832SDave Chinner xfs_trans_ail_delete_bulk( 74130136832SDave Chinner struct xfs_ail *ailp, 74230136832SDave Chinner struct xfs_log_item **log_items, 74330136832SDave Chinner int nr_items) __releases(ailp->xa_lock) 74430136832SDave Chinner { 74530136832SDave Chinner xfs_log_item_t *mlip; 74630136832SDave Chinner xfs_lsn_t tail_lsn; 74730136832SDave Chinner int mlip_changed = 0; 74830136832SDave Chinner int i; 74930136832SDave Chinner 75030136832SDave Chinner mlip = xfs_ail_min(ailp); 75130136832SDave Chinner 75230136832SDave Chinner for (i = 0; i < nr_items; i++) { 75330136832SDave Chinner struct xfs_log_item *lip = log_items[i]; 75430136832SDave Chinner if (!(lip->li_flags & XFS_LI_IN_AIL)) { 75530136832SDave Chinner struct xfs_mount *mp = ailp->xa_mount; 75630136832SDave Chinner 75730136832SDave Chinner spin_unlock(&ailp->xa_lock); 75830136832SDave Chinner if (!XFS_FORCED_SHUTDOWN(mp)) { 7596a19d939SDave Chinner xfs_alert_tag(mp, XFS_PTAG_AILDELETE, 76030136832SDave Chinner "%s: attempting to delete a log item that is not in the AIL", 76130136832SDave Chinner __func__); 76230136832SDave Chinner xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 76330136832SDave Chinner } 76430136832SDave Chinner return; 76530136832SDave Chinner } 76630136832SDave Chinner 76730136832SDave Chinner xfs_ail_delete(ailp, lip); 76830136832SDave Chinner lip->li_flags &= ~XFS_LI_IN_AIL; 76930136832SDave Chinner lip->li_lsn = 0; 77030136832SDave Chinner if (mlip == lip) 77130136832SDave Chinner mlip_changed = 1; 77230136832SDave Chinner } 77330136832SDave Chinner 77430136832SDave Chinner if (!mlip_changed) { 77530136832SDave Chinner spin_unlock(&ailp->xa_lock); 77630136832SDave Chinner return; 77730136832SDave Chinner } 77830136832SDave Chinner 77930136832SDave Chinner /* 78030136832SDave Chinner * It is not safe to access mlip after the AIL lock is dropped, so we 78130136832SDave Chinner * must get a copy of li_lsn before we do so. This is especially 78230136832SDave Chinner * important on 32-bit platforms where accessing and updating 64-bit 78330136832SDave Chinner * values like li_lsn is not atomic. It is possible we've emptied the 78430136832SDave Chinner * AIL here, so if that is the case, pass an LSN of 0 to the tail move. 78530136832SDave Chinner */ 78630136832SDave Chinner mlip = xfs_ail_min(ailp); 78730136832SDave Chinner tail_lsn = mlip ? mlip->li_lsn : 0; 78830136832SDave Chinner spin_unlock(&ailp->xa_lock); 78930136832SDave Chinner xfs_log_move_tail(ailp->xa_mount, tail_lsn); 79030136832SDave Chinner } 7911da177e4SLinus Torvalds 7921da177e4SLinus Torvalds /* 7931da177e4SLinus Torvalds * The active item list (AIL) is a doubly linked list of log 7941da177e4SLinus Torvalds * items sorted by ascending lsn. The base of the list is 7951da177e4SLinus Torvalds * a forw/back pointer pair embedded in the xfs mount structure. 7961da177e4SLinus Torvalds * The base is initialized with both pointers pointing to the 7971da177e4SLinus Torvalds * base. This case always needs to be distinguished, because 7981da177e4SLinus Torvalds * the base has no lsn to look at. We almost always insert 7991da177e4SLinus Torvalds * at the end of the list, so on inserts we search from the 8001da177e4SLinus Torvalds * end of the list to find where the new item belongs. 8011da177e4SLinus Torvalds */ 8021da177e4SLinus Torvalds 8031da177e4SLinus Torvalds /* 8041da177e4SLinus Torvalds * Initialize the doubly linked list to point only to itself. 8051da177e4SLinus Torvalds */ 806249a8c11SDavid Chinner int 8071da177e4SLinus Torvalds xfs_trans_ail_init( 8081da177e4SLinus Torvalds xfs_mount_t *mp) 8091da177e4SLinus Torvalds { 81082fa9012SDavid Chinner struct xfs_ail *ailp; 81182fa9012SDavid Chinner 81282fa9012SDavid Chinner ailp = kmem_zalloc(sizeof(struct xfs_ail), KM_MAYFAIL); 81382fa9012SDavid Chinner if (!ailp) 81482fa9012SDavid Chinner return ENOMEM; 81582fa9012SDavid Chinner 81682fa9012SDavid Chinner ailp->xa_mount = mp; 81782fa9012SDavid Chinner INIT_LIST_HEAD(&ailp->xa_ail); 818af3e4022SDave Chinner INIT_LIST_HEAD(&ailp->xa_cursors); 819c7e8f268SDavid Chinner spin_lock_init(&ailp->xa_lock); 8200bf6a5bdSDave Chinner INIT_DELAYED_WORK(&ailp->xa_work, xfs_ail_worker); 82127d8d5feSDavid Chinner mp->m_ail = ailp; 82227d8d5feSDavid Chinner return 0; 823249a8c11SDavid Chinner } 824249a8c11SDavid Chinner 825249a8c11SDavid Chinner void 826249a8c11SDavid Chinner xfs_trans_ail_destroy( 827249a8c11SDavid Chinner xfs_mount_t *mp) 828249a8c11SDavid Chinner { 82982fa9012SDavid Chinner struct xfs_ail *ailp = mp->m_ail; 83082fa9012SDavid Chinner 8310bf6a5bdSDave Chinner cancel_delayed_work_sync(&ailp->xa_work); 83282fa9012SDavid Chinner kmem_free(ailp); 8331da177e4SLinus Torvalds } 834