1 /* 2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc. 3 * Copyright (c) 2008 Dave Chinner 4 * All Rights Reserved. 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License as 8 * published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it would be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 18 */ 19 #include "xfs.h" 20 #include "xfs_fs.h" 21 #include "xfs_types.h" 22 #include "xfs_log.h" 23 #include "xfs_inum.h" 24 #include "xfs_trans.h" 25 #include "xfs_sb.h" 26 #include "xfs_ag.h" 27 #include "xfs_mount.h" 28 #include "xfs_trans_priv.h" 29 #include "xfs_error.h" 30 31 struct workqueue_struct *xfs_ail_wq; /* AIL workqueue */ 32 33 #ifdef DEBUG 34 /* 35 * Check that the list is sorted as it should be. 36 */ 37 STATIC void 38 xfs_ail_check( 39 struct xfs_ail *ailp, 40 xfs_log_item_t *lip) 41 { 42 xfs_log_item_t *prev_lip; 43 44 if (list_empty(&ailp->xa_ail)) 45 return; 46 47 /* 48 * Check the next and previous entries are valid. 49 */ 50 ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0); 51 prev_lip = list_entry(lip->li_ail.prev, xfs_log_item_t, li_ail); 52 if (&prev_lip->li_ail != &ailp->xa_ail) 53 ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0); 54 55 prev_lip = list_entry(lip->li_ail.next, xfs_log_item_t, li_ail); 56 if (&prev_lip->li_ail != &ailp->xa_ail) 57 ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) >= 0); 58 59 60 #ifdef XFS_TRANS_DEBUG 61 /* 62 * Walk the list checking lsn ordering, and that every entry has the 63 * XFS_LI_IN_AIL flag set. This is really expensive, so only do it 64 * when specifically debugging the transaction subsystem. 65 */ 66 prev_lip = list_entry(&ailp->xa_ail, xfs_log_item_t, li_ail); 67 list_for_each_entry(lip, &ailp->xa_ail, li_ail) { 68 if (&prev_lip->li_ail != &ailp->xa_ail) 69 ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0); 70 ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0); 71 prev_lip = lip; 72 } 73 #endif /* XFS_TRANS_DEBUG */ 74 } 75 #else /* !DEBUG */ 76 #define xfs_ail_check(a,l) 77 #endif /* DEBUG */ 78 79 /* 80 * Return a pointer to the first item in the AIL. If the AIL is empty, then 81 * return NULL. 82 */ 83 static xfs_log_item_t * 84 xfs_ail_min( 85 struct xfs_ail *ailp) 86 { 87 if (list_empty(&ailp->xa_ail)) 88 return NULL; 89 90 return list_first_entry(&ailp->xa_ail, xfs_log_item_t, li_ail); 91 } 92 93 /* 94 * Return a pointer to the last item in the AIL. If the AIL is empty, then 95 * return NULL. 96 */ 97 static xfs_log_item_t * 98 xfs_ail_max( 99 struct xfs_ail *ailp) 100 { 101 if (list_empty(&ailp->xa_ail)) 102 return NULL; 103 104 return list_entry(ailp->xa_ail.prev, xfs_log_item_t, li_ail); 105 } 106 107 /* 108 * Return a pointer to the item which follows the given item in the AIL. If 109 * the given item is the last item in the list, then return NULL. 110 */ 111 static xfs_log_item_t * 112 xfs_ail_next( 113 struct xfs_ail *ailp, 114 xfs_log_item_t *lip) 115 { 116 if (lip->li_ail.next == &ailp->xa_ail) 117 return NULL; 118 119 return list_first_entry(&lip->li_ail, xfs_log_item_t, li_ail); 120 } 121 122 /* 123 * This is called by the log manager code to determine the LSN of the tail of 124 * the log. This is exactly the LSN of the first item in the AIL. If the AIL 125 * is empty, then this function returns 0. 126 * 127 * We need the AIL lock in order to get a coherent read of the lsn of the last 128 * item in the AIL. 129 */ 130 xfs_lsn_t 131 xfs_ail_min_lsn( 132 struct xfs_ail *ailp) 133 { 134 xfs_lsn_t lsn = 0; 135 xfs_log_item_t *lip; 136 137 spin_lock(&ailp->xa_lock); 138 lip = xfs_ail_min(ailp); 139 if (lip) 140 lsn = lip->li_lsn; 141 spin_unlock(&ailp->xa_lock); 142 143 return lsn; 144 } 145 146 /* 147 * Return the maximum lsn held in the AIL, or zero if the AIL is empty. 148 */ 149 static xfs_lsn_t 150 xfs_ail_max_lsn( 151 struct xfs_ail *ailp) 152 { 153 xfs_lsn_t lsn = 0; 154 xfs_log_item_t *lip; 155 156 spin_lock(&ailp->xa_lock); 157 lip = xfs_ail_max(ailp); 158 if (lip) 159 lsn = lip->li_lsn; 160 spin_unlock(&ailp->xa_lock); 161 162 return lsn; 163 } 164 165 /* 166 * The cursor keeps track of where our current traversal is up to by tracking 167 * the next item in the list for us. However, for this to be safe, removing an 168 * object from the AIL needs to invalidate any cursor that points to it. hence 169 * the traversal cursor needs to be linked to the struct xfs_ail so that 170 * deletion can search all the active cursors for invalidation. 171 */ 172 STATIC void 173 xfs_trans_ail_cursor_init( 174 struct xfs_ail *ailp, 175 struct xfs_ail_cursor *cur) 176 { 177 cur->item = NULL; 178 list_add_tail(&cur->list, &ailp->xa_cursors); 179 } 180 181 /* 182 * Get the next item in the traversal and advance the cursor. If the cursor 183 * was invalidated (indicated by a lip of 1), restart the traversal. 184 */ 185 struct xfs_log_item * 186 xfs_trans_ail_cursor_next( 187 struct xfs_ail *ailp, 188 struct xfs_ail_cursor *cur) 189 { 190 struct xfs_log_item *lip = cur->item; 191 192 if ((__psint_t)lip & 1) 193 lip = xfs_ail_min(ailp); 194 if (lip) 195 cur->item = xfs_ail_next(ailp, lip); 196 return lip; 197 } 198 199 /* 200 * When the traversal is complete, we need to remove the cursor from the list 201 * of traversing cursors. 202 */ 203 void 204 xfs_trans_ail_cursor_done( 205 struct xfs_ail *ailp, 206 struct xfs_ail_cursor *cur) 207 { 208 cur->item = NULL; 209 list_del_init(&cur->list); 210 } 211 212 /* 213 * Invalidate any cursor that is pointing to this item. This is called when an 214 * item is removed from the AIL. Any cursor pointing to this object is now 215 * invalid and the traversal needs to be terminated so it doesn't reference a 216 * freed object. We set the low bit of the cursor item pointer so we can 217 * distinguish between an invalidation and the end of the list when getting the 218 * next item from the cursor. 219 */ 220 STATIC void 221 xfs_trans_ail_cursor_clear( 222 struct xfs_ail *ailp, 223 struct xfs_log_item *lip) 224 { 225 struct xfs_ail_cursor *cur; 226 227 list_for_each_entry(cur, &ailp->xa_cursors, list) { 228 if (cur->item == lip) 229 cur->item = (struct xfs_log_item *) 230 ((__psint_t)cur->item | 1); 231 } 232 } 233 234 /* 235 * Find the first item in the AIL with the given @lsn by searching in ascending 236 * LSN order and initialise the cursor to point to the next item for a 237 * ascending traversal. Pass a @lsn of zero to initialise the cursor to the 238 * first item in the AIL. Returns NULL if the list is empty. 239 */ 240 xfs_log_item_t * 241 xfs_trans_ail_cursor_first( 242 struct xfs_ail *ailp, 243 struct xfs_ail_cursor *cur, 244 xfs_lsn_t lsn) 245 { 246 xfs_log_item_t *lip; 247 248 xfs_trans_ail_cursor_init(ailp, cur); 249 250 if (lsn == 0) { 251 lip = xfs_ail_min(ailp); 252 goto out; 253 } 254 255 list_for_each_entry(lip, &ailp->xa_ail, li_ail) { 256 if (XFS_LSN_CMP(lip->li_lsn, lsn) >= 0) 257 goto out; 258 } 259 return NULL; 260 261 out: 262 if (lip) 263 cur->item = xfs_ail_next(ailp, lip); 264 return lip; 265 } 266 267 static struct xfs_log_item * 268 __xfs_trans_ail_cursor_last( 269 struct xfs_ail *ailp, 270 xfs_lsn_t lsn) 271 { 272 xfs_log_item_t *lip; 273 274 list_for_each_entry_reverse(lip, &ailp->xa_ail, li_ail) { 275 if (XFS_LSN_CMP(lip->li_lsn, lsn) <= 0) 276 return lip; 277 } 278 return NULL; 279 } 280 281 /* 282 * Find the last item in the AIL with the given @lsn by searching in descending 283 * LSN order and initialise the cursor to point to that item. If there is no 284 * item with the value of @lsn, then it sets the cursor to the last item with an 285 * LSN lower than @lsn. Returns NULL if the list is empty. 286 */ 287 struct xfs_log_item * 288 xfs_trans_ail_cursor_last( 289 struct xfs_ail *ailp, 290 struct xfs_ail_cursor *cur, 291 xfs_lsn_t lsn) 292 { 293 xfs_trans_ail_cursor_init(ailp, cur); 294 cur->item = __xfs_trans_ail_cursor_last(ailp, lsn); 295 return cur->item; 296 } 297 298 /* 299 * Splice the log item list into the AIL at the given LSN. We splice to the 300 * tail of the given LSN to maintain insert order for push traversals. The 301 * cursor is optional, allowing repeated updates to the same LSN to avoid 302 * repeated traversals. 303 */ 304 static void 305 xfs_ail_splice( 306 struct xfs_ail *ailp, 307 struct xfs_ail_cursor *cur, 308 struct list_head *list, 309 xfs_lsn_t lsn) 310 { 311 struct xfs_log_item *lip = cur ? cur->item : NULL; 312 struct xfs_log_item *next_lip; 313 314 /* 315 * Get a new cursor if we don't have a placeholder or the existing one 316 * has been invalidated. 317 */ 318 if (!lip || (__psint_t)lip & 1) { 319 lip = __xfs_trans_ail_cursor_last(ailp, lsn); 320 321 if (!lip) { 322 /* The list is empty, so just splice and return. */ 323 if (cur) 324 cur->item = NULL; 325 list_splice(list, &ailp->xa_ail); 326 return; 327 } 328 } 329 330 /* 331 * Our cursor points to the item we want to insert _after_, so we have 332 * to update the cursor to point to the end of the list we are splicing 333 * in so that it points to the correct location for the next splice. 334 * i.e. before the splice 335 * 336 * lsn -> lsn -> lsn + x -> lsn + x ... 337 * ^ 338 * | cursor points here 339 * 340 * After the splice we have: 341 * 342 * lsn -> lsn -> lsn -> lsn -> .... -> lsn -> lsn + x -> lsn + x ... 343 * ^ ^ 344 * | cursor points here | needs to move here 345 * 346 * So we set the cursor to the last item in the list to be spliced 347 * before we execute the splice, resulting in the cursor pointing to 348 * the correct item after the splice occurs. 349 */ 350 if (cur) { 351 next_lip = list_entry(list->prev, struct xfs_log_item, li_ail); 352 cur->item = next_lip; 353 } 354 list_splice(list, &lip->li_ail); 355 } 356 357 /* 358 * Delete the given item from the AIL. Return a pointer to the item. 359 */ 360 static void 361 xfs_ail_delete( 362 struct xfs_ail *ailp, 363 xfs_log_item_t *lip) 364 { 365 xfs_ail_check(ailp, lip); 366 list_del(&lip->li_ail); 367 xfs_trans_ail_cursor_clear(ailp, lip); 368 } 369 370 /* 371 * xfs_ail_worker does the work of pushing on the AIL. It will requeue itself 372 * to run at a later time if there is more work to do to complete the push. 373 */ 374 STATIC void 375 xfs_ail_worker( 376 struct work_struct *work) 377 { 378 struct xfs_ail *ailp = container_of(to_delayed_work(work), 379 struct xfs_ail, xa_work); 380 xfs_mount_t *mp = ailp->xa_mount; 381 struct xfs_ail_cursor cur; 382 xfs_log_item_t *lip; 383 xfs_lsn_t lsn; 384 xfs_lsn_t target; 385 long tout = 10; 386 int flush_log = 0; 387 int stuck = 0; 388 int count = 0; 389 int push_xfsbufd = 0; 390 391 spin_lock(&ailp->xa_lock); 392 target = ailp->xa_target; 393 lip = xfs_trans_ail_cursor_first(ailp, &cur, ailp->xa_last_pushed_lsn); 394 if (!lip || XFS_FORCED_SHUTDOWN(mp)) { 395 /* 396 * AIL is empty or our push has reached the end. 397 */ 398 xfs_trans_ail_cursor_done(ailp, &cur); 399 spin_unlock(&ailp->xa_lock); 400 goto out_done; 401 } 402 403 XFS_STATS_INC(xs_push_ail); 404 405 /* 406 * While the item we are looking at is below the given threshold 407 * try to flush it out. We'd like not to stop until we've at least 408 * tried to push on everything in the AIL with an LSN less than 409 * the given threshold. 410 * 411 * However, we will stop after a certain number of pushes and wait 412 * for a reduced timeout to fire before pushing further. This 413 * prevents use from spinning when we can't do anything or there is 414 * lots of contention on the AIL lists. 415 */ 416 lsn = lip->li_lsn; 417 while ((XFS_LSN_CMP(lip->li_lsn, target) <= 0)) { 418 int lock_result; 419 /* 420 * If we can lock the item without sleeping, unlock the AIL 421 * lock and flush the item. Then re-grab the AIL lock so we 422 * can look for the next item on the AIL. List changes are 423 * handled by the AIL lookup functions internally 424 * 425 * If we can't lock the item, either its holder will flush it 426 * or it is already being flushed or it is being relogged. In 427 * any of these case it is being taken care of and we can just 428 * skip to the next item in the list. 429 */ 430 lock_result = IOP_TRYLOCK(lip); 431 spin_unlock(&ailp->xa_lock); 432 switch (lock_result) { 433 case XFS_ITEM_SUCCESS: 434 XFS_STATS_INC(xs_push_ail_success); 435 IOP_PUSH(lip); 436 ailp->xa_last_pushed_lsn = lsn; 437 break; 438 439 case XFS_ITEM_PUSHBUF: 440 XFS_STATS_INC(xs_push_ail_pushbuf); 441 IOP_PUSHBUF(lip); 442 ailp->xa_last_pushed_lsn = lsn; 443 push_xfsbufd = 1; 444 break; 445 446 case XFS_ITEM_PINNED: 447 XFS_STATS_INC(xs_push_ail_pinned); 448 stuck++; 449 flush_log = 1; 450 break; 451 452 case XFS_ITEM_LOCKED: 453 XFS_STATS_INC(xs_push_ail_locked); 454 ailp->xa_last_pushed_lsn = lsn; 455 stuck++; 456 break; 457 458 default: 459 ASSERT(0); 460 break; 461 } 462 463 spin_lock(&ailp->xa_lock); 464 /* should we bother continuing? */ 465 if (XFS_FORCED_SHUTDOWN(mp)) 466 break; 467 ASSERT(mp->m_log); 468 469 count++; 470 471 /* 472 * Are there too many items we can't do anything with? 473 * If we we are skipping too many items because we can't flush 474 * them or they are already being flushed, we back off and 475 * given them time to complete whatever operation is being 476 * done. i.e. remove pressure from the AIL while we can't make 477 * progress so traversals don't slow down further inserts and 478 * removals to/from the AIL. 479 * 480 * The value of 100 is an arbitrary magic number based on 481 * observation. 482 */ 483 if (stuck > 100) 484 break; 485 486 lip = xfs_trans_ail_cursor_next(ailp, &cur); 487 if (lip == NULL) 488 break; 489 lsn = lip->li_lsn; 490 } 491 xfs_trans_ail_cursor_done(ailp, &cur); 492 spin_unlock(&ailp->xa_lock); 493 494 if (flush_log) { 495 /* 496 * If something we need to push out was pinned, then 497 * push out the log so it will become unpinned and 498 * move forward in the AIL. 499 */ 500 XFS_STATS_INC(xs_push_ail_flush); 501 xfs_log_force(mp, 0); 502 } 503 504 if (push_xfsbufd) { 505 /* we've got delayed write buffers to flush */ 506 wake_up_process(mp->m_ddev_targp->bt_task); 507 } 508 509 /* assume we have more work to do in a short while */ 510 out_done: 511 if (!count) { 512 /* We're past our target or empty, so idle */ 513 ailp->xa_last_pushed_lsn = 0; 514 515 /* 516 * We clear the XFS_AIL_PUSHING_BIT first before checking 517 * whether the target has changed. If the target has changed, 518 * this pushes the requeue race directly onto the result of the 519 * atomic test/set bit, so we are guaranteed that either the 520 * the pusher that changed the target or ourselves will requeue 521 * the work (but not both). 522 */ 523 clear_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags); 524 smp_rmb(); 525 if (XFS_LSN_CMP(ailp->xa_target, target) == 0 || 526 test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags)) 527 return; 528 529 tout = 50; 530 } else if (XFS_LSN_CMP(lsn, target) >= 0) { 531 /* 532 * We reached the target so wait a bit longer for I/O to 533 * complete and remove pushed items from the AIL before we 534 * start the next scan from the start of the AIL. 535 */ 536 tout = 50; 537 ailp->xa_last_pushed_lsn = 0; 538 } else if ((stuck * 100) / count > 90) { 539 /* 540 * Either there is a lot of contention on the AIL or we 541 * are stuck due to operations in progress. "Stuck" in this 542 * case is defined as >90% of the items we tried to push 543 * were stuck. 544 * 545 * Backoff a bit more to allow some I/O to complete before 546 * continuing from where we were. 547 */ 548 tout = 20; 549 } 550 551 /* There is more to do, requeue us. */ 552 queue_delayed_work(xfs_syncd_wq, &ailp->xa_work, 553 msecs_to_jiffies(tout)); 554 } 555 556 /* 557 * This routine is called to move the tail of the AIL forward. It does this by 558 * trying to flush items in the AIL whose lsns are below the given 559 * threshold_lsn. 560 * 561 * The push is run asynchronously in a workqueue, which means the caller needs 562 * to handle waiting on the async flush for space to become available. 563 * We don't want to interrupt any push that is in progress, hence we only queue 564 * work if we set the pushing bit approriately. 565 * 566 * We do this unlocked - we only need to know whether there is anything in the 567 * AIL at the time we are called. We don't need to access the contents of 568 * any of the objects, so the lock is not needed. 569 */ 570 void 571 xfs_ail_push( 572 struct xfs_ail *ailp, 573 xfs_lsn_t threshold_lsn) 574 { 575 xfs_log_item_t *lip; 576 577 lip = xfs_ail_min(ailp); 578 if (!lip || XFS_FORCED_SHUTDOWN(ailp->xa_mount) || 579 XFS_LSN_CMP(threshold_lsn, ailp->xa_target) <= 0) 580 return; 581 582 /* 583 * Ensure that the new target is noticed in push code before it clears 584 * the XFS_AIL_PUSHING_BIT. 585 */ 586 smp_wmb(); 587 xfs_trans_ail_copy_lsn(ailp, &ailp->xa_target, &threshold_lsn); 588 if (!test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags)) 589 queue_delayed_work(xfs_syncd_wq, &ailp->xa_work, 0); 590 } 591 592 /* 593 * Push out all items in the AIL immediately 594 */ 595 void 596 xfs_ail_push_all( 597 struct xfs_ail *ailp) 598 { 599 xfs_lsn_t threshold_lsn = xfs_ail_max_lsn(ailp); 600 601 if (threshold_lsn) 602 xfs_ail_push(ailp, threshold_lsn); 603 } 604 605 /* 606 * This is to be called when an item is unlocked that may have 607 * been in the AIL. It will wake up the first member of the AIL 608 * wait list if this item's unlocking might allow it to progress. 609 * If the item is in the AIL, then we need to get the AIL lock 610 * while doing our checking so we don't race with someone going 611 * to sleep waiting for this event in xfs_trans_push_ail(). 612 */ 613 void 614 xfs_trans_unlocked_item( 615 struct xfs_ail *ailp, 616 xfs_log_item_t *lip) 617 { 618 xfs_log_item_t *min_lip; 619 620 /* 621 * If we're forcibly shutting down, we may have 622 * unlocked log items arbitrarily. The last thing 623 * we want to do is to move the tail of the log 624 * over some potentially valid data. 625 */ 626 if (!(lip->li_flags & XFS_LI_IN_AIL) || 627 XFS_FORCED_SHUTDOWN(ailp->xa_mount)) { 628 return; 629 } 630 631 /* 632 * This is the one case where we can call into xfs_ail_min() 633 * without holding the AIL lock because we only care about the 634 * case where we are at the tail of the AIL. If the object isn't 635 * at the tail, it doesn't matter what result we get back. This 636 * is slightly racy because since we were just unlocked, we could 637 * go to sleep between the call to xfs_ail_min and the call to 638 * xfs_log_move_tail, have someone else lock us, commit to us disk, 639 * move us out of the tail of the AIL, and then we wake up. However, 640 * the call to xfs_log_move_tail() doesn't do anything if there's 641 * not enough free space to wake people up so we're safe calling it. 642 */ 643 min_lip = xfs_ail_min(ailp); 644 645 if (min_lip == lip) 646 xfs_log_move_tail(ailp->xa_mount, 1); 647 } /* xfs_trans_unlocked_item */ 648 649 /* 650 * xfs_trans_ail_update - bulk AIL insertion operation. 651 * 652 * @xfs_trans_ail_update takes an array of log items that all need to be 653 * positioned at the same LSN in the AIL. If an item is not in the AIL, it will 654 * be added. Otherwise, it will be repositioned by removing it and re-adding 655 * it to the AIL. If we move the first item in the AIL, update the log tail to 656 * match the new minimum LSN in the AIL. 657 * 658 * This function takes the AIL lock once to execute the update operations on 659 * all the items in the array, and as such should not be called with the AIL 660 * lock held. As a result, once we have the AIL lock, we need to check each log 661 * item LSN to confirm it needs to be moved forward in the AIL. 662 * 663 * To optimise the insert operation, we delete all the items from the AIL in 664 * the first pass, moving them into a temporary list, then splice the temporary 665 * list into the correct position in the AIL. This avoids needing to do an 666 * insert operation on every item. 667 * 668 * This function must be called with the AIL lock held. The lock is dropped 669 * before returning. 670 */ 671 void 672 xfs_trans_ail_update_bulk( 673 struct xfs_ail *ailp, 674 struct xfs_ail_cursor *cur, 675 struct xfs_log_item **log_items, 676 int nr_items, 677 xfs_lsn_t lsn) __releases(ailp->xa_lock) 678 { 679 xfs_log_item_t *mlip; 680 xfs_lsn_t tail_lsn; 681 int mlip_changed = 0; 682 int i; 683 LIST_HEAD(tmp); 684 685 mlip = xfs_ail_min(ailp); 686 687 for (i = 0; i < nr_items; i++) { 688 struct xfs_log_item *lip = log_items[i]; 689 if (lip->li_flags & XFS_LI_IN_AIL) { 690 /* check if we really need to move the item */ 691 if (XFS_LSN_CMP(lsn, lip->li_lsn) <= 0) 692 continue; 693 694 xfs_ail_delete(ailp, lip); 695 if (mlip == lip) 696 mlip_changed = 1; 697 } else { 698 lip->li_flags |= XFS_LI_IN_AIL; 699 } 700 lip->li_lsn = lsn; 701 list_add(&lip->li_ail, &tmp); 702 } 703 704 xfs_ail_splice(ailp, cur, &tmp, lsn); 705 706 if (!mlip_changed) { 707 spin_unlock(&ailp->xa_lock); 708 return; 709 } 710 711 /* 712 * It is not safe to access mlip after the AIL lock is dropped, so we 713 * must get a copy of li_lsn before we do so. This is especially 714 * important on 32-bit platforms where accessing and updating 64-bit 715 * values like li_lsn is not atomic. 716 */ 717 mlip = xfs_ail_min(ailp); 718 tail_lsn = mlip->li_lsn; 719 spin_unlock(&ailp->xa_lock); 720 xfs_log_move_tail(ailp->xa_mount, tail_lsn); 721 } 722 723 /* 724 * xfs_trans_ail_delete_bulk - remove multiple log items from the AIL 725 * 726 * @xfs_trans_ail_delete_bulk takes an array of log items that all need to 727 * removed from the AIL. The caller is already holding the AIL lock, and done 728 * all the checks necessary to ensure the items passed in via @log_items are 729 * ready for deletion. This includes checking that the items are in the AIL. 730 * 731 * For each log item to be removed, unlink it from the AIL, clear the IN_AIL 732 * flag from the item and reset the item's lsn to 0. If we remove the first 733 * item in the AIL, update the log tail to match the new minimum LSN in the 734 * AIL. 735 * 736 * This function will not drop the AIL lock until all items are removed from 737 * the AIL to minimise the amount of lock traffic on the AIL. This does not 738 * greatly increase the AIL hold time, but does significantly reduce the amount 739 * of traffic on the lock, especially during IO completion. 740 * 741 * This function must be called with the AIL lock held. The lock is dropped 742 * before returning. 743 */ 744 void 745 xfs_trans_ail_delete_bulk( 746 struct xfs_ail *ailp, 747 struct xfs_log_item **log_items, 748 int nr_items) __releases(ailp->xa_lock) 749 { 750 xfs_log_item_t *mlip; 751 xfs_lsn_t tail_lsn; 752 int mlip_changed = 0; 753 int i; 754 755 mlip = xfs_ail_min(ailp); 756 757 for (i = 0; i < nr_items; i++) { 758 struct xfs_log_item *lip = log_items[i]; 759 if (!(lip->li_flags & XFS_LI_IN_AIL)) { 760 struct xfs_mount *mp = ailp->xa_mount; 761 762 spin_unlock(&ailp->xa_lock); 763 if (!XFS_FORCED_SHUTDOWN(mp)) { 764 xfs_alert_tag(mp, XFS_PTAG_AILDELETE, 765 "%s: attempting to delete a log item that is not in the AIL", 766 __func__); 767 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 768 } 769 return; 770 } 771 772 xfs_ail_delete(ailp, lip); 773 lip->li_flags &= ~XFS_LI_IN_AIL; 774 lip->li_lsn = 0; 775 if (mlip == lip) 776 mlip_changed = 1; 777 } 778 779 if (!mlip_changed) { 780 spin_unlock(&ailp->xa_lock); 781 return; 782 } 783 784 /* 785 * It is not safe to access mlip after the AIL lock is dropped, so we 786 * must get a copy of li_lsn before we do so. This is especially 787 * important on 32-bit platforms where accessing and updating 64-bit 788 * values like li_lsn is not atomic. It is possible we've emptied the 789 * AIL here, so if that is the case, pass an LSN of 0 to the tail move. 790 */ 791 mlip = xfs_ail_min(ailp); 792 tail_lsn = mlip ? mlip->li_lsn : 0; 793 spin_unlock(&ailp->xa_lock); 794 xfs_log_move_tail(ailp->xa_mount, tail_lsn); 795 } 796 797 /* 798 * The active item list (AIL) is a doubly linked list of log 799 * items sorted by ascending lsn. The base of the list is 800 * a forw/back pointer pair embedded in the xfs mount structure. 801 * The base is initialized with both pointers pointing to the 802 * base. This case always needs to be distinguished, because 803 * the base has no lsn to look at. We almost always insert 804 * at the end of the list, so on inserts we search from the 805 * end of the list to find where the new item belongs. 806 */ 807 808 /* 809 * Initialize the doubly linked list to point only to itself. 810 */ 811 int 812 xfs_trans_ail_init( 813 xfs_mount_t *mp) 814 { 815 struct xfs_ail *ailp; 816 817 ailp = kmem_zalloc(sizeof(struct xfs_ail), KM_MAYFAIL); 818 if (!ailp) 819 return ENOMEM; 820 821 ailp->xa_mount = mp; 822 INIT_LIST_HEAD(&ailp->xa_ail); 823 INIT_LIST_HEAD(&ailp->xa_cursors); 824 spin_lock_init(&ailp->xa_lock); 825 INIT_DELAYED_WORK(&ailp->xa_work, xfs_ail_worker); 826 mp->m_ail = ailp; 827 return 0; 828 } 829 830 void 831 xfs_trans_ail_destroy( 832 xfs_mount_t *mp) 833 { 834 struct xfs_ail *ailp = mp->m_ail; 835 836 cancel_delayed_work_sync(&ailp->xa_work); 837 kmem_free(ailp); 838 } 839