11da177e4SLinus Torvalds /* 27b718769SNathan Scott * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc. 3c7e8f268SDavid Chinner * Copyright (c) 2008 Dave Chinner 47b718769SNathan Scott * All Rights Reserved. 51da177e4SLinus Torvalds * 67b718769SNathan Scott * This program is free software; you can redistribute it and/or 77b718769SNathan Scott * modify it under the terms of the GNU General Public License as 81da177e4SLinus Torvalds * published by the Free Software Foundation. 91da177e4SLinus Torvalds * 107b718769SNathan Scott * This program is distributed in the hope that it would be useful, 117b718769SNathan Scott * but WITHOUT ANY WARRANTY; without even the implied warranty of 127b718769SNathan Scott * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 137b718769SNathan Scott * GNU General Public License for more details. 141da177e4SLinus Torvalds * 157b718769SNathan Scott * You should have received a copy of the GNU General Public License 167b718769SNathan Scott * along with this program; if not, write the Free Software Foundation, 177b718769SNathan Scott * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 181da177e4SLinus Torvalds */ 191da177e4SLinus Torvalds #include "xfs.h" 20a844f451SNathan Scott #include "xfs_fs.h" 211da177e4SLinus Torvalds #include "xfs_types.h" 221da177e4SLinus Torvalds #include "xfs_log.h" 23a844f451SNathan Scott #include "xfs_inum.h" 241da177e4SLinus Torvalds #include "xfs_trans.h" 251da177e4SLinus Torvalds #include "xfs_sb.h" 26da353b0dSDavid Chinner #include "xfs_ag.h" 271da177e4SLinus Torvalds #include "xfs_mount.h" 281da177e4SLinus Torvalds #include "xfs_trans_priv.h" 291da177e4SLinus Torvalds #include "xfs_error.h" 301da177e4SLinus Torvalds 31*0bf6a5bdSDave Chinner struct workqueue_struct *xfs_ail_wq; /* AIL workqueue */ 32*0bf6a5bdSDave Chinner 330e57f6a3SDave Chinner STATIC void xfs_ail_splice(struct xfs_ail *, struct list_head *, xfs_lsn_t); 34eb3efa12SDave Chinner STATIC void xfs_ail_delete(struct xfs_ail *, xfs_log_item_t *); 3582fa9012SDavid Chinner STATIC xfs_log_item_t * xfs_ail_min(struct xfs_ail *); 3682fa9012SDavid Chinner STATIC xfs_log_item_t * xfs_ail_next(struct xfs_ail *, xfs_log_item_t *); 371da177e4SLinus Torvalds 381da177e4SLinus Torvalds #ifdef DEBUG 3982fa9012SDavid Chinner STATIC void xfs_ail_check(struct xfs_ail *, xfs_log_item_t *); 401da177e4SLinus Torvalds #else 41de08dbc1SDavid Chinner #define xfs_ail_check(a,l) 421da177e4SLinus Torvalds #endif /* DEBUG */ 431da177e4SLinus Torvalds 441da177e4SLinus Torvalds 451da177e4SLinus Torvalds /* 461da177e4SLinus Torvalds * This is called by the log manager code to determine the LSN 471da177e4SLinus Torvalds * of the tail of the log. This is exactly the LSN of the first 481da177e4SLinus Torvalds * item in the AIL. If the AIL is empty, then this function 491da177e4SLinus Torvalds * returns 0. 501da177e4SLinus Torvalds * 511da177e4SLinus Torvalds * We need the AIL lock in order to get a coherent read of the 521da177e4SLinus Torvalds * lsn of the last item in the AIL. 531da177e4SLinus Torvalds */ 541da177e4SLinus Torvalds xfs_lsn_t 555b00f14fSDavid Chinner xfs_trans_ail_tail( 565b00f14fSDavid Chinner struct xfs_ail *ailp) 571da177e4SLinus Torvalds { 581da177e4SLinus Torvalds xfs_lsn_t lsn; 591da177e4SLinus Torvalds xfs_log_item_t *lip; 601da177e4SLinus Torvalds 61c7e8f268SDavid Chinner spin_lock(&ailp->xa_lock); 625b00f14fSDavid Chinner lip = xfs_ail_min(ailp); 631da177e4SLinus Torvalds if (lip == NULL) { 641da177e4SLinus Torvalds lsn = (xfs_lsn_t)0; 651da177e4SLinus Torvalds } else { 661da177e4SLinus Torvalds lsn = lip->li_lsn; 671da177e4SLinus Torvalds } 68c7e8f268SDavid Chinner spin_unlock(&ailp->xa_lock); 691da177e4SLinus Torvalds 701da177e4SLinus Torvalds return lsn; 711da177e4SLinus Torvalds } 721da177e4SLinus Torvalds 731da177e4SLinus Torvalds /* 7427d8d5feSDavid Chinner * AIL traversal cursor initialisation. 7527d8d5feSDavid Chinner * 7627d8d5feSDavid Chinner * The cursor keeps track of where our current traversal is up 7727d8d5feSDavid Chinner * to by tracking the next ƣtem in the list for us. However, for 7827d8d5feSDavid Chinner * this to be safe, removing an object from the AIL needs to invalidate 7927d8d5feSDavid Chinner * any cursor that points to it. hence the traversal cursor needs to 8027d8d5feSDavid Chinner * be linked to the struct xfs_ail so that deletion can search all the 8127d8d5feSDavid Chinner * active cursors for invalidation. 8227d8d5feSDavid Chinner * 8327d8d5feSDavid Chinner * We don't link the push cursor because it is embedded in the struct 8427d8d5feSDavid Chinner * xfs_ail and hence easily findable. 8527d8d5feSDavid Chinner */ 865b00f14fSDavid Chinner STATIC void 8727d8d5feSDavid Chinner xfs_trans_ail_cursor_init( 8827d8d5feSDavid Chinner struct xfs_ail *ailp, 8927d8d5feSDavid Chinner struct xfs_ail_cursor *cur) 9027d8d5feSDavid Chinner { 9127d8d5feSDavid Chinner cur->item = NULL; 9227d8d5feSDavid Chinner if (cur == &ailp->xa_cursors) 9327d8d5feSDavid Chinner return; 9427d8d5feSDavid Chinner 9527d8d5feSDavid Chinner cur->next = ailp->xa_cursors.next; 9627d8d5feSDavid Chinner ailp->xa_cursors.next = cur; 9727d8d5feSDavid Chinner } 9827d8d5feSDavid Chinner 9927d8d5feSDavid Chinner /* 10027d8d5feSDavid Chinner * Set the cursor to the next item, because when we look 10127d8d5feSDavid Chinner * up the cursor the current item may have been freed. 10227d8d5feSDavid Chinner */ 10327d8d5feSDavid Chinner STATIC void 10427d8d5feSDavid Chinner xfs_trans_ail_cursor_set( 10527d8d5feSDavid Chinner struct xfs_ail *ailp, 10627d8d5feSDavid Chinner struct xfs_ail_cursor *cur, 10727d8d5feSDavid Chinner struct xfs_log_item *lip) 10827d8d5feSDavid Chinner { 10927d8d5feSDavid Chinner if (lip) 11027d8d5feSDavid Chinner cur->item = xfs_ail_next(ailp, lip); 11127d8d5feSDavid Chinner } 11227d8d5feSDavid Chinner 11327d8d5feSDavid Chinner /* 11427d8d5feSDavid Chinner * Get the next item in the traversal and advance the cursor. 11527d8d5feSDavid Chinner * If the cursor was invalidated (inidicated by a lip of 1), 11627d8d5feSDavid Chinner * restart the traversal. 11727d8d5feSDavid Chinner */ 1185b00f14fSDavid Chinner struct xfs_log_item * 11927d8d5feSDavid Chinner xfs_trans_ail_cursor_next( 12027d8d5feSDavid Chinner struct xfs_ail *ailp, 12127d8d5feSDavid Chinner struct xfs_ail_cursor *cur) 12227d8d5feSDavid Chinner { 12327d8d5feSDavid Chinner struct xfs_log_item *lip = cur->item; 12427d8d5feSDavid Chinner 12527d8d5feSDavid Chinner if ((__psint_t)lip & 1) 12627d8d5feSDavid Chinner lip = xfs_ail_min(ailp); 12727d8d5feSDavid Chinner xfs_trans_ail_cursor_set(ailp, cur, lip); 12827d8d5feSDavid Chinner return lip; 12927d8d5feSDavid Chinner } 13027d8d5feSDavid Chinner 13127d8d5feSDavid Chinner /* 13227d8d5feSDavid Chinner * Now that the traversal is complete, we need to remove the cursor 13327d8d5feSDavid Chinner * from the list of traversing cursors. Avoid removing the embedded 1349da096fdSMalcolm Parsons * push cursor, but use the fact it is always present to make the 13527d8d5feSDavid Chinner * list deletion simple. 13627d8d5feSDavid Chinner */ 13727d8d5feSDavid Chinner void 13827d8d5feSDavid Chinner xfs_trans_ail_cursor_done( 13927d8d5feSDavid Chinner struct xfs_ail *ailp, 14027d8d5feSDavid Chinner struct xfs_ail_cursor *done) 14127d8d5feSDavid Chinner { 14227d8d5feSDavid Chinner struct xfs_ail_cursor *prev = NULL; 14327d8d5feSDavid Chinner struct xfs_ail_cursor *cur; 14427d8d5feSDavid Chinner 14527d8d5feSDavid Chinner done->item = NULL; 14627d8d5feSDavid Chinner if (done == &ailp->xa_cursors) 14727d8d5feSDavid Chinner return; 14827d8d5feSDavid Chinner prev = &ailp->xa_cursors; 14927d8d5feSDavid Chinner for (cur = prev->next; cur; prev = cur, cur = prev->next) { 15027d8d5feSDavid Chinner if (cur == done) { 15127d8d5feSDavid Chinner prev->next = cur->next; 15227d8d5feSDavid Chinner break; 15327d8d5feSDavid Chinner } 15427d8d5feSDavid Chinner } 15527d8d5feSDavid Chinner ASSERT(cur); 15627d8d5feSDavid Chinner } 15727d8d5feSDavid Chinner 15827d8d5feSDavid Chinner /* 1595b00f14fSDavid Chinner * Invalidate any cursor that is pointing to this item. This is 1605b00f14fSDavid Chinner * called when an item is removed from the AIL. Any cursor pointing 1615b00f14fSDavid Chinner * to this object is now invalid and the traversal needs to be 1625b00f14fSDavid Chinner * terminated so it doesn't reference a freed object. We set the 1635b00f14fSDavid Chinner * cursor item to a value of 1 so we can distinguish between an 1645b00f14fSDavid Chinner * invalidation and the end of the list when getting the next item 1655b00f14fSDavid Chinner * from the cursor. 1665b00f14fSDavid Chinner */ 1675b00f14fSDavid Chinner STATIC void 1685b00f14fSDavid Chinner xfs_trans_ail_cursor_clear( 1695b00f14fSDavid Chinner struct xfs_ail *ailp, 1705b00f14fSDavid Chinner struct xfs_log_item *lip) 1715b00f14fSDavid Chinner { 1725b00f14fSDavid Chinner struct xfs_ail_cursor *cur; 1735b00f14fSDavid Chinner 1745b00f14fSDavid Chinner /* need to search all cursors */ 1755b00f14fSDavid Chinner for (cur = &ailp->xa_cursors; cur; cur = cur->next) { 1765b00f14fSDavid Chinner if (cur->item == lip) 1775b00f14fSDavid Chinner cur->item = (struct xfs_log_item *) 1785b00f14fSDavid Chinner ((__psint_t)cur->item | 1); 1795b00f14fSDavid Chinner } 1805b00f14fSDavid Chinner } 1815b00f14fSDavid Chinner 1825b00f14fSDavid Chinner /* 183249a8c11SDavid Chinner * Return the item in the AIL with the current lsn. 184249a8c11SDavid Chinner * Return the current tree generation number for use 185249a8c11SDavid Chinner * in calls to xfs_trans_next_ail(). 186249a8c11SDavid Chinner */ 1875b00f14fSDavid Chinner xfs_log_item_t * 1885b00f14fSDavid Chinner xfs_trans_ail_cursor_first( 18927d8d5feSDavid Chinner struct xfs_ail *ailp, 19027d8d5feSDavid Chinner struct xfs_ail_cursor *cur, 191249a8c11SDavid Chinner xfs_lsn_t lsn) 192249a8c11SDavid Chinner { 193249a8c11SDavid Chinner xfs_log_item_t *lip; 194249a8c11SDavid Chinner 1955b00f14fSDavid Chinner xfs_trans_ail_cursor_init(ailp, cur); 19627d8d5feSDavid Chinner lip = xfs_ail_min(ailp); 197249a8c11SDavid Chinner if (lsn == 0) 1985b00f14fSDavid Chinner goto out; 199249a8c11SDavid Chinner 20027d8d5feSDavid Chinner list_for_each_entry(lip, &ailp->xa_ail, li_ail) { 2015b00f14fSDavid Chinner if (XFS_LSN_CMP(lip->li_lsn, lsn) >= 0) 2027ee49acfSDavid Chinner goto out; 2035b00f14fSDavid Chinner } 2045b00f14fSDavid Chinner lip = NULL; 2055b00f14fSDavid Chinner out: 20627d8d5feSDavid Chinner xfs_trans_ail_cursor_set(ailp, cur, lip); 207249a8c11SDavid Chinner return lip; 208249a8c11SDavid Chinner } 209535f6b37SJosef 'Jeff' Sipek 210249a8c11SDavid Chinner /* 211*0bf6a5bdSDave Chinner * xfs_ail_worker does the work of pushing on the AIL. It will requeue itself 212*0bf6a5bdSDave Chinner * to run at a later time if there is more work to do to complete the push. 213249a8c11SDavid Chinner */ 214*0bf6a5bdSDave Chinner STATIC void 215*0bf6a5bdSDave Chinner xfs_ail_worker( 216*0bf6a5bdSDave Chinner struct work_struct *work) 217249a8c11SDavid Chinner { 218*0bf6a5bdSDave Chinner struct xfs_ail *ailp = container_of(to_delayed_work(work), 219*0bf6a5bdSDave Chinner struct xfs_ail, xa_work); 220*0bf6a5bdSDave Chinner long tout; 22182fa9012SDavid Chinner xfs_lsn_t target = ailp->xa_target; 2221da177e4SLinus Torvalds xfs_lsn_t lsn; 2231da177e4SLinus Torvalds xfs_log_item_t *lip; 224249a8c11SDavid Chinner int flush_log, count, stuck; 22582fa9012SDavid Chinner xfs_mount_t *mp = ailp->xa_mount; 22627d8d5feSDavid Chinner struct xfs_ail_cursor *cur = &ailp->xa_cursors; 227d808f617SDave Chinner int push_xfsbufd = 0; 2281da177e4SLinus Torvalds 229c7e8f268SDavid Chinner spin_lock(&ailp->xa_lock); 23027d8d5feSDavid Chinner xfs_trans_ail_cursor_init(ailp, cur); 231*0bf6a5bdSDave Chinner lip = xfs_trans_ail_cursor_first(ailp, cur, ailp->xa_last_pushed_lsn); 232249a8c11SDavid Chinner if (!lip || XFS_FORCED_SHUTDOWN(mp)) { 2331da177e4SLinus Torvalds /* 234249a8c11SDavid Chinner * AIL is empty or our push has reached the end. 2351da177e4SLinus Torvalds */ 23627d8d5feSDavid Chinner xfs_trans_ail_cursor_done(ailp, cur); 237c7e8f268SDavid Chinner spin_unlock(&ailp->xa_lock); 238*0bf6a5bdSDave Chinner ailp->xa_last_pushed_lsn = 0; 239*0bf6a5bdSDave Chinner return; 2401da177e4SLinus Torvalds } 2411da177e4SLinus Torvalds 2421da177e4SLinus Torvalds XFS_STATS_INC(xs_push_ail); 2431da177e4SLinus Torvalds 2441da177e4SLinus Torvalds /* 2451da177e4SLinus Torvalds * While the item we are looking at is below the given threshold 246249a8c11SDavid Chinner * try to flush it out. We'd like not to stop until we've at least 2471da177e4SLinus Torvalds * tried to push on everything in the AIL with an LSN less than 248249a8c11SDavid Chinner * the given threshold. 2491da177e4SLinus Torvalds * 250249a8c11SDavid Chinner * However, we will stop after a certain number of pushes and wait 251249a8c11SDavid Chinner * for a reduced timeout to fire before pushing further. This 252249a8c11SDavid Chinner * prevents use from spinning when we can't do anything or there is 253249a8c11SDavid Chinner * lots of contention on the AIL lists. 254249a8c11SDavid Chinner */ 255249a8c11SDavid Chinner lsn = lip->li_lsn; 25627d8d5feSDavid Chinner flush_log = stuck = count = 0; 257249a8c11SDavid Chinner while ((XFS_LSN_CMP(lip->li_lsn, target) < 0)) { 258249a8c11SDavid Chinner int lock_result; 259249a8c11SDavid Chinner /* 260249a8c11SDavid Chinner * If we can lock the item without sleeping, unlock the AIL 261249a8c11SDavid Chinner * lock and flush the item. Then re-grab the AIL lock so we 262249a8c11SDavid Chinner * can look for the next item on the AIL. List changes are 263249a8c11SDavid Chinner * handled by the AIL lookup functions internally 264249a8c11SDavid Chinner * 265249a8c11SDavid Chinner * If we can't lock the item, either its holder will flush it 266249a8c11SDavid Chinner * or it is already being flushed or it is being relogged. In 267249a8c11SDavid Chinner * any of these case it is being taken care of and we can just 268249a8c11SDavid Chinner * skip to the next item in the list. 2691da177e4SLinus Torvalds */ 2701da177e4SLinus Torvalds lock_result = IOP_TRYLOCK(lip); 271c7e8f268SDavid Chinner spin_unlock(&ailp->xa_lock); 2721da177e4SLinus Torvalds switch (lock_result) { 2731da177e4SLinus Torvalds case XFS_ITEM_SUCCESS: 2741da177e4SLinus Torvalds XFS_STATS_INC(xs_push_ail_success); 2751da177e4SLinus Torvalds IOP_PUSH(lip); 276*0bf6a5bdSDave Chinner ailp->xa_last_pushed_lsn = lsn; 2771da177e4SLinus Torvalds break; 2781da177e4SLinus Torvalds 2791da177e4SLinus Torvalds case XFS_ITEM_PUSHBUF: 2801da177e4SLinus Torvalds XFS_STATS_INC(xs_push_ail_pushbuf); 2811da177e4SLinus Torvalds IOP_PUSHBUF(lip); 282*0bf6a5bdSDave Chinner ailp->xa_last_pushed_lsn = lsn; 283d808f617SDave Chinner push_xfsbufd = 1; 2841da177e4SLinus Torvalds break; 2851da177e4SLinus Torvalds 2861da177e4SLinus Torvalds case XFS_ITEM_PINNED: 2871da177e4SLinus Torvalds XFS_STATS_INC(xs_push_ail_pinned); 288249a8c11SDavid Chinner stuck++; 2891da177e4SLinus Torvalds flush_log = 1; 2901da177e4SLinus Torvalds break; 2911da177e4SLinus Torvalds 2921da177e4SLinus Torvalds case XFS_ITEM_LOCKED: 2931da177e4SLinus Torvalds XFS_STATS_INC(xs_push_ail_locked); 294*0bf6a5bdSDave Chinner ailp->xa_last_pushed_lsn = lsn; 295249a8c11SDavid Chinner stuck++; 2961da177e4SLinus Torvalds break; 2971da177e4SLinus Torvalds 2981da177e4SLinus Torvalds default: 2991da177e4SLinus Torvalds ASSERT(0); 3001da177e4SLinus Torvalds break; 3011da177e4SLinus Torvalds } 3021da177e4SLinus Torvalds 303c7e8f268SDavid Chinner spin_lock(&ailp->xa_lock); 304249a8c11SDavid Chinner /* should we bother continuing? */ 305249a8c11SDavid Chinner if (XFS_FORCED_SHUTDOWN(mp)) 3061da177e4SLinus Torvalds break; 307249a8c11SDavid Chinner ASSERT(mp->m_log); 3081da177e4SLinus Torvalds 309249a8c11SDavid Chinner count++; 310249a8c11SDavid Chinner 311249a8c11SDavid Chinner /* 312249a8c11SDavid Chinner * Are there too many items we can't do anything with? 313249a8c11SDavid Chinner * If we we are skipping too many items because we can't flush 314249a8c11SDavid Chinner * them or they are already being flushed, we back off and 315249a8c11SDavid Chinner * given them time to complete whatever operation is being 316249a8c11SDavid Chinner * done. i.e. remove pressure from the AIL while we can't make 317249a8c11SDavid Chinner * progress so traversals don't slow down further inserts and 318249a8c11SDavid Chinner * removals to/from the AIL. 319249a8c11SDavid Chinner * 320249a8c11SDavid Chinner * The value of 100 is an arbitrary magic number based on 321249a8c11SDavid Chinner * observation. 322249a8c11SDavid Chinner */ 323249a8c11SDavid Chinner if (stuck > 100) 324249a8c11SDavid Chinner break; 325249a8c11SDavid Chinner 32627d8d5feSDavid Chinner lip = xfs_trans_ail_cursor_next(ailp, cur); 327249a8c11SDavid Chinner if (lip == NULL) 328249a8c11SDavid Chinner break; 329249a8c11SDavid Chinner lsn = lip->li_lsn; 3301da177e4SLinus Torvalds } 33127d8d5feSDavid Chinner xfs_trans_ail_cursor_done(ailp, cur); 332c7e8f268SDavid Chinner spin_unlock(&ailp->xa_lock); 3331da177e4SLinus Torvalds 3341da177e4SLinus Torvalds if (flush_log) { 3351da177e4SLinus Torvalds /* 3361da177e4SLinus Torvalds * If something we need to push out was pinned, then 3371da177e4SLinus Torvalds * push out the log so it will become unpinned and 3381da177e4SLinus Torvalds * move forward in the AIL. 3391da177e4SLinus Torvalds */ 3401da177e4SLinus Torvalds XFS_STATS_INC(xs_push_ail_flush); 341a14a348bSChristoph Hellwig xfs_log_force(mp, 0); 3421da177e4SLinus Torvalds } 3431da177e4SLinus Torvalds 344d808f617SDave Chinner if (push_xfsbufd) { 345d808f617SDave Chinner /* we've got delayed write buffers to flush */ 346d808f617SDave Chinner wake_up_process(mp->m_ddev_targp->bt_task); 347d808f617SDave Chinner } 348d808f617SDave Chinner 349*0bf6a5bdSDave Chinner /* assume we have more work to do in a short while */ 350*0bf6a5bdSDave Chinner tout = 10; 35192d9cd10SDavid Chinner if (!count) { 35292d9cd10SDavid Chinner /* We're past our target or empty, so idle */ 353*0bf6a5bdSDave Chinner ailp->xa_last_pushed_lsn = 0; 354*0bf6a5bdSDave Chinner 355*0bf6a5bdSDave Chinner /* 356*0bf6a5bdSDave Chinner * Check for an updated push target before clearing the 357*0bf6a5bdSDave Chinner * XFS_AIL_PUSHING_BIT. If the target changed, we've got more 358*0bf6a5bdSDave Chinner * work to do. Wait a bit longer before starting that work. 359*0bf6a5bdSDave Chinner */ 360*0bf6a5bdSDave Chinner smp_rmb(); 361*0bf6a5bdSDave Chinner if (ailp->xa_target == target) { 362*0bf6a5bdSDave Chinner clear_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags); 363*0bf6a5bdSDave Chinner return; 364*0bf6a5bdSDave Chinner } 365*0bf6a5bdSDave Chinner tout = 50; 36692d9cd10SDavid Chinner } else if (XFS_LSN_CMP(lsn, target) >= 0) { 367249a8c11SDavid Chinner /* 36892d9cd10SDavid Chinner * We reached the target so wait a bit longer for I/O to 36992d9cd10SDavid Chinner * complete and remove pushed items from the AIL before we 37092d9cd10SDavid Chinner * start the next scan from the start of the AIL. 371249a8c11SDavid Chinner */ 372453eac8aSDave Chinner tout = 50; 373*0bf6a5bdSDave Chinner ailp->xa_last_pushed_lsn = 0; 37427d8d5feSDavid Chinner } else if ((stuck * 100) / count > 90) { 375249a8c11SDavid Chinner /* 376249a8c11SDavid Chinner * Either there is a lot of contention on the AIL or we 377249a8c11SDavid Chinner * are stuck due to operations in progress. "Stuck" in this 378249a8c11SDavid Chinner * case is defined as >90% of the items we tried to push 379249a8c11SDavid Chinner * were stuck. 380249a8c11SDavid Chinner * 381249a8c11SDavid Chinner * Backoff a bit more to allow some I/O to complete before 382249a8c11SDavid Chinner * continuing from where we were. 383249a8c11SDavid Chinner */ 384453eac8aSDave Chinner tout = 20; 385453eac8aSDave Chinner } 3861da177e4SLinus Torvalds 387*0bf6a5bdSDave Chinner /* There is more to do, requeue us. */ 388*0bf6a5bdSDave Chinner queue_delayed_work(xfs_syncd_wq, &ailp->xa_work, 389*0bf6a5bdSDave Chinner msecs_to_jiffies(tout)); 390*0bf6a5bdSDave Chinner } 391*0bf6a5bdSDave Chinner 392*0bf6a5bdSDave Chinner /* 393*0bf6a5bdSDave Chinner * This routine is called to move the tail of the AIL forward. It does this by 394*0bf6a5bdSDave Chinner * trying to flush items in the AIL whose lsns are below the given 395*0bf6a5bdSDave Chinner * threshold_lsn. 396*0bf6a5bdSDave Chinner * 397*0bf6a5bdSDave Chinner * The push is run asynchronously in a workqueue, which means the caller needs 398*0bf6a5bdSDave Chinner * to handle waiting on the async flush for space to become available. 399*0bf6a5bdSDave Chinner * We don't want to interrupt any push that is in progress, hence we only queue 400*0bf6a5bdSDave Chinner * work if we set the pushing bit approriately. 401*0bf6a5bdSDave Chinner * 402*0bf6a5bdSDave Chinner * We do this unlocked - we only need to know whether there is anything in the 403*0bf6a5bdSDave Chinner * AIL at the time we are called. We don't need to access the contents of 404*0bf6a5bdSDave Chinner * any of the objects, so the lock is not needed. 405*0bf6a5bdSDave Chinner */ 406*0bf6a5bdSDave Chinner void 407*0bf6a5bdSDave Chinner xfs_trans_ail_push( 408*0bf6a5bdSDave Chinner struct xfs_ail *ailp, 409*0bf6a5bdSDave Chinner xfs_lsn_t threshold_lsn) 410*0bf6a5bdSDave Chinner { 411*0bf6a5bdSDave Chinner xfs_log_item_t *lip; 412*0bf6a5bdSDave Chinner 413*0bf6a5bdSDave Chinner lip = xfs_ail_min(ailp); 414*0bf6a5bdSDave Chinner if (!lip || XFS_FORCED_SHUTDOWN(ailp->xa_mount) || 415*0bf6a5bdSDave Chinner XFS_LSN_CMP(threshold_lsn, ailp->xa_target) <= 0) 416*0bf6a5bdSDave Chinner return; 417*0bf6a5bdSDave Chinner 418*0bf6a5bdSDave Chinner /* 419*0bf6a5bdSDave Chinner * Ensure that the new target is noticed in push code before it clears 420*0bf6a5bdSDave Chinner * the XFS_AIL_PUSHING_BIT. 421*0bf6a5bdSDave Chinner */ 422*0bf6a5bdSDave Chinner smp_wmb(); 423*0bf6a5bdSDave Chinner ailp->xa_target = threshold_lsn; 424*0bf6a5bdSDave Chinner if (!test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags)) 425*0bf6a5bdSDave Chinner queue_delayed_work(xfs_syncd_wq, &ailp->xa_work, 0); 426*0bf6a5bdSDave Chinner } 4271da177e4SLinus Torvalds 4281da177e4SLinus Torvalds /* 4291da177e4SLinus Torvalds * This is to be called when an item is unlocked that may have 4301da177e4SLinus Torvalds * been in the AIL. It will wake up the first member of the AIL 4311da177e4SLinus Torvalds * wait list if this item's unlocking might allow it to progress. 4321da177e4SLinus Torvalds * If the item is in the AIL, then we need to get the AIL lock 4331da177e4SLinus Torvalds * while doing our checking so we don't race with someone going 4341da177e4SLinus Torvalds * to sleep waiting for this event in xfs_trans_push_ail(). 4351da177e4SLinus Torvalds */ 4361da177e4SLinus Torvalds void 4371da177e4SLinus Torvalds xfs_trans_unlocked_item( 438783a2f65SDavid Chinner struct xfs_ail *ailp, 4391da177e4SLinus Torvalds xfs_log_item_t *lip) 4401da177e4SLinus Torvalds { 4411da177e4SLinus Torvalds xfs_log_item_t *min_lip; 4421da177e4SLinus Torvalds 4431da177e4SLinus Torvalds /* 4441da177e4SLinus Torvalds * If we're forcibly shutting down, we may have 4451da177e4SLinus Torvalds * unlocked log items arbitrarily. The last thing 4461da177e4SLinus Torvalds * we want to do is to move the tail of the log 4471da177e4SLinus Torvalds * over some potentially valid data. 4481da177e4SLinus Torvalds */ 4491da177e4SLinus Torvalds if (!(lip->li_flags & XFS_LI_IN_AIL) || 450783a2f65SDavid Chinner XFS_FORCED_SHUTDOWN(ailp->xa_mount)) { 4511da177e4SLinus Torvalds return; 4521da177e4SLinus Torvalds } 4531da177e4SLinus Torvalds 4541da177e4SLinus Torvalds /* 4551da177e4SLinus Torvalds * This is the one case where we can call into xfs_ail_min() 4561da177e4SLinus Torvalds * without holding the AIL lock because we only care about the 4571da177e4SLinus Torvalds * case where we are at the tail of the AIL. If the object isn't 4581da177e4SLinus Torvalds * at the tail, it doesn't matter what result we get back. This 4591da177e4SLinus Torvalds * is slightly racy because since we were just unlocked, we could 4601da177e4SLinus Torvalds * go to sleep between the call to xfs_ail_min and the call to 4611da177e4SLinus Torvalds * xfs_log_move_tail, have someone else lock us, commit to us disk, 4621da177e4SLinus Torvalds * move us out of the tail of the AIL, and then we wake up. However, 4631da177e4SLinus Torvalds * the call to xfs_log_move_tail() doesn't do anything if there's 4641da177e4SLinus Torvalds * not enough free space to wake people up so we're safe calling it. 4651da177e4SLinus Torvalds */ 466783a2f65SDavid Chinner min_lip = xfs_ail_min(ailp); 4671da177e4SLinus Torvalds 4681da177e4SLinus Torvalds if (min_lip == lip) 469783a2f65SDavid Chinner xfs_log_move_tail(ailp->xa_mount, 1); 4701da177e4SLinus Torvalds } /* xfs_trans_unlocked_item */ 4711da177e4SLinus Torvalds 4721da177e4SLinus Torvalds /* 4730e57f6a3SDave Chinner * xfs_trans_ail_update - bulk AIL insertion operation. 4740e57f6a3SDave Chinner * 4750e57f6a3SDave Chinner * @xfs_trans_ail_update takes an array of log items that all need to be 4760e57f6a3SDave Chinner * positioned at the same LSN in the AIL. If an item is not in the AIL, it will 4770e57f6a3SDave Chinner * be added. Otherwise, it will be repositioned by removing it and re-adding 4780e57f6a3SDave Chinner * it to the AIL. If we move the first item in the AIL, update the log tail to 4790e57f6a3SDave Chinner * match the new minimum LSN in the AIL. 4800e57f6a3SDave Chinner * 4810e57f6a3SDave Chinner * This function takes the AIL lock once to execute the update operations on 4820e57f6a3SDave Chinner * all the items in the array, and as such should not be called with the AIL 4830e57f6a3SDave Chinner * lock held. As a result, once we have the AIL lock, we need to check each log 4840e57f6a3SDave Chinner * item LSN to confirm it needs to be moved forward in the AIL. 4850e57f6a3SDave Chinner * 4860e57f6a3SDave Chinner * To optimise the insert operation, we delete all the items from the AIL in 4870e57f6a3SDave Chinner * the first pass, moving them into a temporary list, then splice the temporary 4880e57f6a3SDave Chinner * list into the correct position in the AIL. This avoids needing to do an 4890e57f6a3SDave Chinner * insert operation on every item. 4900e57f6a3SDave Chinner * 4910e57f6a3SDave Chinner * This function must be called with the AIL lock held. The lock is dropped 4920e57f6a3SDave Chinner * before returning. 4930e57f6a3SDave Chinner */ 4940e57f6a3SDave Chinner void 4950e57f6a3SDave Chinner xfs_trans_ail_update_bulk( 4960e57f6a3SDave Chinner struct xfs_ail *ailp, 4970e57f6a3SDave Chinner struct xfs_log_item **log_items, 4980e57f6a3SDave Chinner int nr_items, 4990e57f6a3SDave Chinner xfs_lsn_t lsn) __releases(ailp->xa_lock) 5000e57f6a3SDave Chinner { 5010e57f6a3SDave Chinner xfs_log_item_t *mlip; 5020e57f6a3SDave Chinner xfs_lsn_t tail_lsn; 5030e57f6a3SDave Chinner int mlip_changed = 0; 5040e57f6a3SDave Chinner int i; 5050e57f6a3SDave Chinner LIST_HEAD(tmp); 5060e57f6a3SDave Chinner 5070e57f6a3SDave Chinner mlip = xfs_ail_min(ailp); 5080e57f6a3SDave Chinner 5090e57f6a3SDave Chinner for (i = 0; i < nr_items; i++) { 5100e57f6a3SDave Chinner struct xfs_log_item *lip = log_items[i]; 5110e57f6a3SDave Chinner if (lip->li_flags & XFS_LI_IN_AIL) { 5120e57f6a3SDave Chinner /* check if we really need to move the item */ 5130e57f6a3SDave Chinner if (XFS_LSN_CMP(lsn, lip->li_lsn) <= 0) 5140e57f6a3SDave Chinner continue; 5150e57f6a3SDave Chinner 5160e57f6a3SDave Chinner xfs_ail_delete(ailp, lip); 5170e57f6a3SDave Chinner if (mlip == lip) 5180e57f6a3SDave Chinner mlip_changed = 1; 5190e57f6a3SDave Chinner } else { 5200e57f6a3SDave Chinner lip->li_flags |= XFS_LI_IN_AIL; 5210e57f6a3SDave Chinner } 5220e57f6a3SDave Chinner lip->li_lsn = lsn; 5230e57f6a3SDave Chinner list_add(&lip->li_ail, &tmp); 5240e57f6a3SDave Chinner } 5250e57f6a3SDave Chinner 5260e57f6a3SDave Chinner xfs_ail_splice(ailp, &tmp, lsn); 5270e57f6a3SDave Chinner 5280e57f6a3SDave Chinner if (!mlip_changed) { 5290e57f6a3SDave Chinner spin_unlock(&ailp->xa_lock); 5300e57f6a3SDave Chinner return; 5310e57f6a3SDave Chinner } 5320e57f6a3SDave Chinner 5330e57f6a3SDave Chinner /* 5340e57f6a3SDave Chinner * It is not safe to access mlip after the AIL lock is dropped, so we 5350e57f6a3SDave Chinner * must get a copy of li_lsn before we do so. This is especially 5360e57f6a3SDave Chinner * important on 32-bit platforms where accessing and updating 64-bit 5370e57f6a3SDave Chinner * values like li_lsn is not atomic. 5380e57f6a3SDave Chinner */ 5390e57f6a3SDave Chinner mlip = xfs_ail_min(ailp); 5400e57f6a3SDave Chinner tail_lsn = mlip->li_lsn; 5410e57f6a3SDave Chinner spin_unlock(&ailp->xa_lock); 5420e57f6a3SDave Chinner xfs_log_move_tail(ailp->xa_mount, tail_lsn); 5430e57f6a3SDave Chinner } 5440e57f6a3SDave Chinner 5450e57f6a3SDave Chinner /* 54630136832SDave Chinner * xfs_trans_ail_delete_bulk - remove multiple log items from the AIL 54730136832SDave Chinner * 54830136832SDave Chinner * @xfs_trans_ail_delete_bulk takes an array of log items that all need to 54930136832SDave Chinner * removed from the AIL. The caller is already holding the AIL lock, and done 55030136832SDave Chinner * all the checks necessary to ensure the items passed in via @log_items are 55130136832SDave Chinner * ready for deletion. This includes checking that the items are in the AIL. 55230136832SDave Chinner * 55330136832SDave Chinner * For each log item to be removed, unlink it from the AIL, clear the IN_AIL 55430136832SDave Chinner * flag from the item and reset the item's lsn to 0. If we remove the first 55530136832SDave Chinner * item in the AIL, update the log tail to match the new minimum LSN in the 55630136832SDave Chinner * AIL. 55730136832SDave Chinner * 55830136832SDave Chinner * This function will not drop the AIL lock until all items are removed from 55930136832SDave Chinner * the AIL to minimise the amount of lock traffic on the AIL. This does not 56030136832SDave Chinner * greatly increase the AIL hold time, but does significantly reduce the amount 56130136832SDave Chinner * of traffic on the lock, especially during IO completion. 56230136832SDave Chinner * 56330136832SDave Chinner * This function must be called with the AIL lock held. The lock is dropped 56430136832SDave Chinner * before returning. 56530136832SDave Chinner */ 56630136832SDave Chinner void 56730136832SDave Chinner xfs_trans_ail_delete_bulk( 56830136832SDave Chinner struct xfs_ail *ailp, 56930136832SDave Chinner struct xfs_log_item **log_items, 57030136832SDave Chinner int nr_items) __releases(ailp->xa_lock) 57130136832SDave Chinner { 57230136832SDave Chinner xfs_log_item_t *mlip; 57330136832SDave Chinner xfs_lsn_t tail_lsn; 57430136832SDave Chinner int mlip_changed = 0; 57530136832SDave Chinner int i; 57630136832SDave Chinner 57730136832SDave Chinner mlip = xfs_ail_min(ailp); 57830136832SDave Chinner 57930136832SDave Chinner for (i = 0; i < nr_items; i++) { 58030136832SDave Chinner struct xfs_log_item *lip = log_items[i]; 58130136832SDave Chinner if (!(lip->li_flags & XFS_LI_IN_AIL)) { 58230136832SDave Chinner struct xfs_mount *mp = ailp->xa_mount; 58330136832SDave Chinner 58430136832SDave Chinner spin_unlock(&ailp->xa_lock); 58530136832SDave Chinner if (!XFS_FORCED_SHUTDOWN(mp)) { 5866a19d939SDave Chinner xfs_alert_tag(mp, XFS_PTAG_AILDELETE, 58730136832SDave Chinner "%s: attempting to delete a log item that is not in the AIL", 58830136832SDave Chinner __func__); 58930136832SDave Chinner xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 59030136832SDave Chinner } 59130136832SDave Chinner return; 59230136832SDave Chinner } 59330136832SDave Chinner 59430136832SDave Chinner xfs_ail_delete(ailp, lip); 59530136832SDave Chinner lip->li_flags &= ~XFS_LI_IN_AIL; 59630136832SDave Chinner lip->li_lsn = 0; 59730136832SDave Chinner if (mlip == lip) 59830136832SDave Chinner mlip_changed = 1; 59930136832SDave Chinner } 60030136832SDave Chinner 60130136832SDave Chinner if (!mlip_changed) { 60230136832SDave Chinner spin_unlock(&ailp->xa_lock); 60330136832SDave Chinner return; 60430136832SDave Chinner } 60530136832SDave Chinner 60630136832SDave Chinner /* 60730136832SDave Chinner * It is not safe to access mlip after the AIL lock is dropped, so we 60830136832SDave Chinner * must get a copy of li_lsn before we do so. This is especially 60930136832SDave Chinner * important on 32-bit platforms where accessing and updating 64-bit 61030136832SDave Chinner * values like li_lsn is not atomic. It is possible we've emptied the 61130136832SDave Chinner * AIL here, so if that is the case, pass an LSN of 0 to the tail move. 61230136832SDave Chinner */ 61330136832SDave Chinner mlip = xfs_ail_min(ailp); 61430136832SDave Chinner tail_lsn = mlip ? mlip->li_lsn : 0; 61530136832SDave Chinner spin_unlock(&ailp->xa_lock); 61630136832SDave Chinner xfs_log_move_tail(ailp->xa_mount, tail_lsn); 61730136832SDave Chinner } 6181da177e4SLinus Torvalds 6191da177e4SLinus Torvalds /* 6201da177e4SLinus Torvalds * The active item list (AIL) is a doubly linked list of log 6211da177e4SLinus Torvalds * items sorted by ascending lsn. The base of the list is 6221da177e4SLinus Torvalds * a forw/back pointer pair embedded in the xfs mount structure. 6231da177e4SLinus Torvalds * The base is initialized with both pointers pointing to the 6241da177e4SLinus Torvalds * base. This case always needs to be distinguished, because 6251da177e4SLinus Torvalds * the base has no lsn to look at. We almost always insert 6261da177e4SLinus Torvalds * at the end of the list, so on inserts we search from the 6271da177e4SLinus Torvalds * end of the list to find where the new item belongs. 6281da177e4SLinus Torvalds */ 6291da177e4SLinus Torvalds 6301da177e4SLinus Torvalds /* 6311da177e4SLinus Torvalds * Initialize the doubly linked list to point only to itself. 6321da177e4SLinus Torvalds */ 633249a8c11SDavid Chinner int 6341da177e4SLinus Torvalds xfs_trans_ail_init( 6351da177e4SLinus Torvalds xfs_mount_t *mp) 6361da177e4SLinus Torvalds { 63782fa9012SDavid Chinner struct xfs_ail *ailp; 63882fa9012SDavid Chinner 63982fa9012SDavid Chinner ailp = kmem_zalloc(sizeof(struct xfs_ail), KM_MAYFAIL); 64082fa9012SDavid Chinner if (!ailp) 64182fa9012SDavid Chinner return ENOMEM; 64282fa9012SDavid Chinner 64382fa9012SDavid Chinner ailp->xa_mount = mp; 64482fa9012SDavid Chinner INIT_LIST_HEAD(&ailp->xa_ail); 645c7e8f268SDavid Chinner spin_lock_init(&ailp->xa_lock); 646*0bf6a5bdSDave Chinner INIT_DELAYED_WORK(&ailp->xa_work, xfs_ail_worker); 64727d8d5feSDavid Chinner mp->m_ail = ailp; 64827d8d5feSDavid Chinner return 0; 649249a8c11SDavid Chinner } 650249a8c11SDavid Chinner 651249a8c11SDavid Chinner void 652249a8c11SDavid Chinner xfs_trans_ail_destroy( 653249a8c11SDavid Chinner xfs_mount_t *mp) 654249a8c11SDavid Chinner { 65582fa9012SDavid Chinner struct xfs_ail *ailp = mp->m_ail; 65682fa9012SDavid Chinner 657*0bf6a5bdSDave Chinner cancel_delayed_work_sync(&ailp->xa_work); 65882fa9012SDavid Chinner kmem_free(ailp); 6591da177e4SLinus Torvalds } 6601da177e4SLinus Torvalds 6611da177e4SLinus Torvalds /* 6620e57f6a3SDave Chinner * splice the log item list into the AIL at the given LSN. 6630e57f6a3SDave Chinner */ 6640e57f6a3SDave Chinner STATIC void 6650e57f6a3SDave Chinner xfs_ail_splice( 6660e57f6a3SDave Chinner struct xfs_ail *ailp, 6670e57f6a3SDave Chinner struct list_head *list, 6680e57f6a3SDave Chinner xfs_lsn_t lsn) 6690e57f6a3SDave Chinner { 6700e57f6a3SDave Chinner xfs_log_item_t *next_lip; 6710e57f6a3SDave Chinner 6720e57f6a3SDave Chinner /* 6730e57f6a3SDave Chinner * If the list is empty, just insert the item. 6740e57f6a3SDave Chinner */ 6750e57f6a3SDave Chinner if (list_empty(&ailp->xa_ail)) { 6760e57f6a3SDave Chinner list_splice(list, &ailp->xa_ail); 6770e57f6a3SDave Chinner return; 6780e57f6a3SDave Chinner } 6790e57f6a3SDave Chinner 6800e57f6a3SDave Chinner list_for_each_entry_reverse(next_lip, &ailp->xa_ail, li_ail) { 6810e57f6a3SDave Chinner if (XFS_LSN_CMP(next_lip->li_lsn, lsn) <= 0) 6820e57f6a3SDave Chinner break; 6830e57f6a3SDave Chinner } 6840e57f6a3SDave Chinner 6850e57f6a3SDave Chinner ASSERT((&next_lip->li_ail == &ailp->xa_ail) || 6860e57f6a3SDave Chinner (XFS_LSN_CMP(next_lip->li_lsn, lsn) <= 0)); 6870e57f6a3SDave Chinner 6880e57f6a3SDave Chinner list_splice_init(list, &next_lip->li_ail); 6890e57f6a3SDave Chinner return; 6900e57f6a3SDave Chinner } 6910e57f6a3SDave Chinner 6920e57f6a3SDave Chinner /* 6931da177e4SLinus Torvalds * Delete the given item from the AIL. Return a pointer to the item. 6941da177e4SLinus Torvalds */ 695eb3efa12SDave Chinner STATIC void 6961da177e4SLinus Torvalds xfs_ail_delete( 69782fa9012SDavid Chinner struct xfs_ail *ailp, 6981da177e4SLinus Torvalds xfs_log_item_t *lip) 6991da177e4SLinus Torvalds { 700535f6b37SJosef 'Jeff' Sipek xfs_ail_check(ailp, lip); 701535f6b37SJosef 'Jeff' Sipek list_del(&lip->li_ail); 702eb3efa12SDave Chinner xfs_trans_ail_cursor_clear(ailp, lip); 7031da177e4SLinus Torvalds } 7041da177e4SLinus Torvalds 7051da177e4SLinus Torvalds /* 7061da177e4SLinus Torvalds * Return a pointer to the first item in the AIL. 7071da177e4SLinus Torvalds * If the AIL is empty, then return NULL. 7081da177e4SLinus Torvalds */ 7091da177e4SLinus Torvalds STATIC xfs_log_item_t * 7101da177e4SLinus Torvalds xfs_ail_min( 71182fa9012SDavid Chinner struct xfs_ail *ailp) 7121da177e4SLinus Torvalds { 713535f6b37SJosef 'Jeff' Sipek if (list_empty(&ailp->xa_ail)) 7141da177e4SLinus Torvalds return NULL; 715535f6b37SJosef 'Jeff' Sipek 716535f6b37SJosef 'Jeff' Sipek return list_first_entry(&ailp->xa_ail, xfs_log_item_t, li_ail); 7171da177e4SLinus Torvalds } 7181da177e4SLinus Torvalds 7191da177e4SLinus Torvalds /* 7201da177e4SLinus Torvalds * Return a pointer to the item which follows 7211da177e4SLinus Torvalds * the given item in the AIL. If the given item 7221da177e4SLinus Torvalds * is the last item in the list, then return NULL. 7231da177e4SLinus Torvalds */ 7241da177e4SLinus Torvalds STATIC xfs_log_item_t * 7251da177e4SLinus Torvalds xfs_ail_next( 72682fa9012SDavid Chinner struct xfs_ail *ailp, 7271da177e4SLinus Torvalds xfs_log_item_t *lip) 7281da177e4SLinus Torvalds { 729535f6b37SJosef 'Jeff' Sipek if (lip->li_ail.next == &ailp->xa_ail) 7301da177e4SLinus Torvalds return NULL; 7311da177e4SLinus Torvalds 732535f6b37SJosef 'Jeff' Sipek return list_first_entry(&lip->li_ail, xfs_log_item_t, li_ail); 7331da177e4SLinus Torvalds } 7341da177e4SLinus Torvalds 7351da177e4SLinus Torvalds #ifdef DEBUG 7361da177e4SLinus Torvalds /* 7371da177e4SLinus Torvalds * Check that the list is sorted as it should be. 7381da177e4SLinus Torvalds */ 7391da177e4SLinus Torvalds STATIC void 7401da177e4SLinus Torvalds xfs_ail_check( 74182fa9012SDavid Chinner struct xfs_ail *ailp, 742de08dbc1SDavid Chinner xfs_log_item_t *lip) 7431da177e4SLinus Torvalds { 7441da177e4SLinus Torvalds xfs_log_item_t *prev_lip; 7451da177e4SLinus Torvalds 746535f6b37SJosef 'Jeff' Sipek if (list_empty(&ailp->xa_ail)) 7471da177e4SLinus Torvalds return; 7481da177e4SLinus Torvalds 7491da177e4SLinus Torvalds /* 750de08dbc1SDavid Chinner * Check the next and previous entries are valid. 751de08dbc1SDavid Chinner */ 752de08dbc1SDavid Chinner ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0); 753535f6b37SJosef 'Jeff' Sipek prev_lip = list_entry(lip->li_ail.prev, xfs_log_item_t, li_ail); 754535f6b37SJosef 'Jeff' Sipek if (&prev_lip->li_ail != &ailp->xa_ail) 755de08dbc1SDavid Chinner ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0); 756535f6b37SJosef 'Jeff' Sipek 757535f6b37SJosef 'Jeff' Sipek prev_lip = list_entry(lip->li_ail.next, xfs_log_item_t, li_ail); 758535f6b37SJosef 'Jeff' Sipek if (&prev_lip->li_ail != &ailp->xa_ail) 759de08dbc1SDavid Chinner ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) >= 0); 760de08dbc1SDavid Chinner 761de08dbc1SDavid Chinner 762de08dbc1SDavid Chinner #ifdef XFS_TRANS_DEBUG 763de08dbc1SDavid Chinner /* 764535f6b37SJosef 'Jeff' Sipek * Walk the list checking lsn ordering, and that every entry has the 765535f6b37SJosef 'Jeff' Sipek * XFS_LI_IN_AIL flag set. This is really expensive, so only do it 766535f6b37SJosef 'Jeff' Sipek * when specifically debugging the transaction subsystem. 7671da177e4SLinus Torvalds */ 768535f6b37SJosef 'Jeff' Sipek prev_lip = list_entry(&ailp->xa_ail, xfs_log_item_t, li_ail); 769535f6b37SJosef 'Jeff' Sipek list_for_each_entry(lip, &ailp->xa_ail, li_ail) { 770535f6b37SJosef 'Jeff' Sipek if (&prev_lip->li_ail != &ailp->xa_ail) 7711da177e4SLinus Torvalds ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0); 7721da177e4SLinus Torvalds ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0); 7731da177e4SLinus Torvalds prev_lip = lip; 7741da177e4SLinus Torvalds } 775de08dbc1SDavid Chinner #endif /* XFS_TRANS_DEBUG */ 7761da177e4SLinus Torvalds } 7771da177e4SLinus Torvalds #endif /* DEBUG */ 778