1 /* 2 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 #include "xfs.h" 19 #include "xfs_fs.h" 20 #include "xfs_types.h" 21 #include "xfs_bit.h" 22 #include "xfs_log.h" 23 #include "xfs_inum.h" 24 #include "xfs_trans.h" 25 #include "xfs_sb.h" 26 #include "xfs_ag.h" 27 #include "xfs_dir2.h" 28 #include "xfs_dmapi.h" 29 #include "xfs_mount.h" 30 #include "xfs_error.h" 31 #include "xfs_da_btree.h" 32 #include "xfs_bmap_btree.h" 33 #include "xfs_alloc_btree.h" 34 #include "xfs_ialloc_btree.h" 35 #include "xfs_dir2_sf.h" 36 #include "xfs_attr_sf.h" 37 #include "xfs_dinode.h" 38 #include "xfs_inode.h" 39 #include "xfs_btree.h" 40 #include "xfs_ialloc.h" 41 #include "xfs_alloc.h" 42 #include "xfs_bmap.h" 43 #include "xfs_quota.h" 44 #include "xfs_trans_priv.h" 45 #include "xfs_trans_space.h" 46 47 48 STATIC void xfs_trans_apply_sb_deltas(xfs_trans_t *); 49 STATIC uint xfs_trans_count_vecs(xfs_trans_t *); 50 STATIC void xfs_trans_fill_vecs(xfs_trans_t *, xfs_log_iovec_t *); 51 STATIC void xfs_trans_uncommit(xfs_trans_t *, uint); 52 STATIC void xfs_trans_committed(xfs_trans_t *, int); 53 STATIC void xfs_trans_chunk_committed(xfs_log_item_chunk_t *, xfs_lsn_t, int); 54 STATIC void xfs_trans_free(xfs_trans_t *); 55 56 kmem_zone_t *xfs_trans_zone; 57 58 59 /* 60 * Reservation functions here avoid a huge stack in xfs_trans_init 61 * due to register overflow from temporaries in the calculations. 62 */ 63 64 STATIC uint 65 xfs_calc_write_reservation(xfs_mount_t *mp) 66 { 67 return XFS_CALC_WRITE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); 68 } 69 70 STATIC uint 71 xfs_calc_itruncate_reservation(xfs_mount_t *mp) 72 { 73 return XFS_CALC_ITRUNCATE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); 74 } 75 76 STATIC uint 77 xfs_calc_rename_reservation(xfs_mount_t *mp) 78 { 79 return XFS_CALC_RENAME_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); 80 } 81 82 STATIC uint 83 xfs_calc_link_reservation(xfs_mount_t *mp) 84 { 85 return XFS_CALC_LINK_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); 86 } 87 88 STATIC uint 89 xfs_calc_remove_reservation(xfs_mount_t *mp) 90 { 91 return XFS_CALC_REMOVE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); 92 } 93 94 STATIC uint 95 xfs_calc_symlink_reservation(xfs_mount_t *mp) 96 { 97 return XFS_CALC_SYMLINK_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); 98 } 99 100 STATIC uint 101 xfs_calc_create_reservation(xfs_mount_t *mp) 102 { 103 return XFS_CALC_CREATE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); 104 } 105 106 STATIC uint 107 xfs_calc_mkdir_reservation(xfs_mount_t *mp) 108 { 109 return XFS_CALC_MKDIR_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); 110 } 111 112 STATIC uint 113 xfs_calc_ifree_reservation(xfs_mount_t *mp) 114 { 115 return XFS_CALC_IFREE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); 116 } 117 118 STATIC uint 119 xfs_calc_ichange_reservation(xfs_mount_t *mp) 120 { 121 return XFS_CALC_ICHANGE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); 122 } 123 124 STATIC uint 125 xfs_calc_growdata_reservation(xfs_mount_t *mp) 126 { 127 return XFS_CALC_GROWDATA_LOG_RES(mp); 128 } 129 130 STATIC uint 131 xfs_calc_growrtalloc_reservation(xfs_mount_t *mp) 132 { 133 return XFS_CALC_GROWRTALLOC_LOG_RES(mp); 134 } 135 136 STATIC uint 137 xfs_calc_growrtzero_reservation(xfs_mount_t *mp) 138 { 139 return XFS_CALC_GROWRTZERO_LOG_RES(mp); 140 } 141 142 STATIC uint 143 xfs_calc_growrtfree_reservation(xfs_mount_t *mp) 144 { 145 return XFS_CALC_GROWRTFREE_LOG_RES(mp); 146 } 147 148 STATIC uint 149 xfs_calc_swrite_reservation(xfs_mount_t *mp) 150 { 151 return XFS_CALC_SWRITE_LOG_RES(mp); 152 } 153 154 STATIC uint 155 xfs_calc_writeid_reservation(xfs_mount_t *mp) 156 { 157 return XFS_CALC_WRITEID_LOG_RES(mp); 158 } 159 160 STATIC uint 161 xfs_calc_addafork_reservation(xfs_mount_t *mp) 162 { 163 return XFS_CALC_ADDAFORK_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); 164 } 165 166 STATIC uint 167 xfs_calc_attrinval_reservation(xfs_mount_t *mp) 168 { 169 return XFS_CALC_ATTRINVAL_LOG_RES(mp); 170 } 171 172 STATIC uint 173 xfs_calc_attrset_reservation(xfs_mount_t *mp) 174 { 175 return XFS_CALC_ATTRSET_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); 176 } 177 178 STATIC uint 179 xfs_calc_attrrm_reservation(xfs_mount_t *mp) 180 { 181 return XFS_CALC_ATTRRM_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); 182 } 183 184 STATIC uint 185 xfs_calc_clear_agi_bucket_reservation(xfs_mount_t *mp) 186 { 187 return XFS_CALC_CLEAR_AGI_BUCKET_LOG_RES(mp); 188 } 189 190 /* 191 * Initialize the precomputed transaction reservation values 192 * in the mount structure. 193 */ 194 void 195 xfs_trans_init( 196 xfs_mount_t *mp) 197 { 198 xfs_trans_reservations_t *resp; 199 200 resp = &(mp->m_reservations); 201 resp->tr_write = xfs_calc_write_reservation(mp); 202 resp->tr_itruncate = xfs_calc_itruncate_reservation(mp); 203 resp->tr_rename = xfs_calc_rename_reservation(mp); 204 resp->tr_link = xfs_calc_link_reservation(mp); 205 resp->tr_remove = xfs_calc_remove_reservation(mp); 206 resp->tr_symlink = xfs_calc_symlink_reservation(mp); 207 resp->tr_create = xfs_calc_create_reservation(mp); 208 resp->tr_mkdir = xfs_calc_mkdir_reservation(mp); 209 resp->tr_ifree = xfs_calc_ifree_reservation(mp); 210 resp->tr_ichange = xfs_calc_ichange_reservation(mp); 211 resp->tr_growdata = xfs_calc_growdata_reservation(mp); 212 resp->tr_swrite = xfs_calc_swrite_reservation(mp); 213 resp->tr_writeid = xfs_calc_writeid_reservation(mp); 214 resp->tr_addafork = xfs_calc_addafork_reservation(mp); 215 resp->tr_attrinval = xfs_calc_attrinval_reservation(mp); 216 resp->tr_attrset = xfs_calc_attrset_reservation(mp); 217 resp->tr_attrrm = xfs_calc_attrrm_reservation(mp); 218 resp->tr_clearagi = xfs_calc_clear_agi_bucket_reservation(mp); 219 resp->tr_growrtalloc = xfs_calc_growrtalloc_reservation(mp); 220 resp->tr_growrtzero = xfs_calc_growrtzero_reservation(mp); 221 resp->tr_growrtfree = xfs_calc_growrtfree_reservation(mp); 222 } 223 224 /* 225 * This routine is called to allocate a transaction structure. 226 * The type parameter indicates the type of the transaction. These 227 * are enumerated in xfs_trans.h. 228 * 229 * Dynamically allocate the transaction structure from the transaction 230 * zone, initialize it, and return it to the caller. 231 */ 232 xfs_trans_t * 233 xfs_trans_alloc( 234 xfs_mount_t *mp, 235 uint type) 236 { 237 vfs_wait_for_freeze(XFS_MTOVFS(mp), SB_FREEZE_TRANS); 238 return _xfs_trans_alloc(mp, type); 239 } 240 241 xfs_trans_t * 242 _xfs_trans_alloc( 243 xfs_mount_t *mp, 244 uint type) 245 { 246 xfs_trans_t *tp; 247 248 atomic_inc(&mp->m_active_trans); 249 250 tp = kmem_zone_zalloc(xfs_trans_zone, KM_SLEEP); 251 tp->t_magic = XFS_TRANS_MAGIC; 252 tp->t_type = type; 253 tp->t_mountp = mp; 254 tp->t_items_free = XFS_LIC_NUM_SLOTS; 255 tp->t_busy_free = XFS_LBC_NUM_SLOTS; 256 XFS_LIC_INIT(&(tp->t_items)); 257 XFS_LBC_INIT(&(tp->t_busy)); 258 return tp; 259 } 260 261 /* 262 * This is called to create a new transaction which will share the 263 * permanent log reservation of the given transaction. The remaining 264 * unused block and rt extent reservations are also inherited. This 265 * implies that the original transaction is no longer allowed to allocate 266 * blocks. Locks and log items, however, are no inherited. They must 267 * be added to the new transaction explicitly. 268 */ 269 xfs_trans_t * 270 xfs_trans_dup( 271 xfs_trans_t *tp) 272 { 273 xfs_trans_t *ntp; 274 275 ntp = kmem_zone_zalloc(xfs_trans_zone, KM_SLEEP); 276 277 /* 278 * Initialize the new transaction structure. 279 */ 280 ntp->t_magic = XFS_TRANS_MAGIC; 281 ntp->t_type = tp->t_type; 282 ntp->t_mountp = tp->t_mountp; 283 ntp->t_items_free = XFS_LIC_NUM_SLOTS; 284 ntp->t_busy_free = XFS_LBC_NUM_SLOTS; 285 XFS_LIC_INIT(&(ntp->t_items)); 286 XFS_LBC_INIT(&(ntp->t_busy)); 287 288 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES); 289 ASSERT(tp->t_ticket != NULL); 290 291 ntp->t_flags = XFS_TRANS_PERM_LOG_RES | (tp->t_flags & XFS_TRANS_RESERVE); 292 ntp->t_ticket = tp->t_ticket; 293 ntp->t_blk_res = tp->t_blk_res - tp->t_blk_res_used; 294 tp->t_blk_res = tp->t_blk_res_used; 295 ntp->t_rtx_res = tp->t_rtx_res - tp->t_rtx_res_used; 296 tp->t_rtx_res = tp->t_rtx_res_used; 297 ntp->t_pflags = tp->t_pflags; 298 299 XFS_TRANS_DUP_DQINFO(tp->t_mountp, tp, ntp); 300 301 atomic_inc(&tp->t_mountp->m_active_trans); 302 return ntp; 303 } 304 305 /* 306 * This is called to reserve free disk blocks and log space for the 307 * given transaction. This must be done before allocating any resources 308 * within the transaction. 309 * 310 * This will return ENOSPC if there are not enough blocks available. 311 * It will sleep waiting for available log space. 312 * The only valid value for the flags parameter is XFS_RES_LOG_PERM, which 313 * is used by long running transactions. If any one of the reservations 314 * fails then they will all be backed out. 315 * 316 * This does not do quota reservations. That typically is done by the 317 * caller afterwards. 318 */ 319 int 320 xfs_trans_reserve( 321 xfs_trans_t *tp, 322 uint blocks, 323 uint logspace, 324 uint rtextents, 325 uint flags, 326 uint logcount) 327 { 328 int log_flags; 329 int error = 0; 330 int rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0; 331 332 /* Mark this thread as being in a transaction */ 333 current_set_flags_nested(&tp->t_pflags, PF_FSTRANS); 334 335 /* 336 * Attempt to reserve the needed disk blocks by decrementing 337 * the number needed from the number available. This will 338 * fail if the count would go below zero. 339 */ 340 if (blocks > 0) { 341 error = xfs_mod_incore_sb(tp->t_mountp, XFS_SBS_FDBLOCKS, 342 -((int64_t)blocks), rsvd); 343 if (error != 0) { 344 current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS); 345 return (XFS_ERROR(ENOSPC)); 346 } 347 tp->t_blk_res += blocks; 348 } 349 350 /* 351 * Reserve the log space needed for this transaction. 352 */ 353 if (logspace > 0) { 354 ASSERT((tp->t_log_res == 0) || (tp->t_log_res == logspace)); 355 ASSERT((tp->t_log_count == 0) || 356 (tp->t_log_count == logcount)); 357 if (flags & XFS_TRANS_PERM_LOG_RES) { 358 log_flags = XFS_LOG_PERM_RESERV; 359 tp->t_flags |= XFS_TRANS_PERM_LOG_RES; 360 } else { 361 ASSERT(tp->t_ticket == NULL); 362 ASSERT(!(tp->t_flags & XFS_TRANS_PERM_LOG_RES)); 363 log_flags = 0; 364 } 365 366 error = xfs_log_reserve(tp->t_mountp, logspace, logcount, 367 &tp->t_ticket, 368 XFS_TRANSACTION, log_flags, tp->t_type); 369 if (error) { 370 goto undo_blocks; 371 } 372 tp->t_log_res = logspace; 373 tp->t_log_count = logcount; 374 } 375 376 /* 377 * Attempt to reserve the needed realtime extents by decrementing 378 * the number needed from the number available. This will 379 * fail if the count would go below zero. 380 */ 381 if (rtextents > 0) { 382 error = xfs_mod_incore_sb(tp->t_mountp, XFS_SBS_FREXTENTS, 383 -((int64_t)rtextents), rsvd); 384 if (error) { 385 error = XFS_ERROR(ENOSPC); 386 goto undo_log; 387 } 388 tp->t_rtx_res += rtextents; 389 } 390 391 return 0; 392 393 /* 394 * Error cases jump to one of these labels to undo any 395 * reservations which have already been performed. 396 */ 397 undo_log: 398 if (logspace > 0) { 399 if (flags & XFS_TRANS_PERM_LOG_RES) { 400 log_flags = XFS_LOG_REL_PERM_RESERV; 401 } else { 402 log_flags = 0; 403 } 404 xfs_log_done(tp->t_mountp, tp->t_ticket, NULL, log_flags); 405 tp->t_ticket = NULL; 406 tp->t_log_res = 0; 407 tp->t_flags &= ~XFS_TRANS_PERM_LOG_RES; 408 } 409 410 undo_blocks: 411 if (blocks > 0) { 412 (void) xfs_mod_incore_sb(tp->t_mountp, XFS_SBS_FDBLOCKS, 413 (int64_t)blocks, rsvd); 414 tp->t_blk_res = 0; 415 } 416 417 current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS); 418 419 return error; 420 } 421 422 423 /* 424 * Record the indicated change to the given field for application 425 * to the file system's superblock when the transaction commits. 426 * For now, just store the change in the transaction structure. 427 * 428 * Mark the transaction structure to indicate that the superblock 429 * needs to be updated before committing. 430 */ 431 void 432 xfs_trans_mod_sb( 433 xfs_trans_t *tp, 434 uint field, 435 int64_t delta) 436 { 437 438 switch (field) { 439 case XFS_TRANS_SB_ICOUNT: 440 tp->t_icount_delta += delta; 441 break; 442 case XFS_TRANS_SB_IFREE: 443 tp->t_ifree_delta += delta; 444 break; 445 case XFS_TRANS_SB_FDBLOCKS: 446 /* 447 * Track the number of blocks allocated in the 448 * transaction. Make sure it does not exceed the 449 * number reserved. 450 */ 451 if (delta < 0) { 452 tp->t_blk_res_used += (uint)-delta; 453 ASSERT(tp->t_blk_res_used <= tp->t_blk_res); 454 } 455 tp->t_fdblocks_delta += delta; 456 break; 457 case XFS_TRANS_SB_RES_FDBLOCKS: 458 /* 459 * The allocation has already been applied to the 460 * in-core superblock's counter. This should only 461 * be applied to the on-disk superblock. 462 */ 463 ASSERT(delta < 0); 464 tp->t_res_fdblocks_delta += delta; 465 break; 466 case XFS_TRANS_SB_FREXTENTS: 467 /* 468 * Track the number of blocks allocated in the 469 * transaction. Make sure it does not exceed the 470 * number reserved. 471 */ 472 if (delta < 0) { 473 tp->t_rtx_res_used += (uint)-delta; 474 ASSERT(tp->t_rtx_res_used <= tp->t_rtx_res); 475 } 476 tp->t_frextents_delta += delta; 477 break; 478 case XFS_TRANS_SB_RES_FREXTENTS: 479 /* 480 * The allocation has already been applied to the 481 * in-core superblock's counter. This should only 482 * be applied to the on-disk superblock. 483 */ 484 ASSERT(delta < 0); 485 tp->t_res_frextents_delta += delta; 486 break; 487 case XFS_TRANS_SB_DBLOCKS: 488 ASSERT(delta > 0); 489 tp->t_dblocks_delta += delta; 490 break; 491 case XFS_TRANS_SB_AGCOUNT: 492 ASSERT(delta > 0); 493 tp->t_agcount_delta += delta; 494 break; 495 case XFS_TRANS_SB_IMAXPCT: 496 tp->t_imaxpct_delta += delta; 497 break; 498 case XFS_TRANS_SB_REXTSIZE: 499 tp->t_rextsize_delta += delta; 500 break; 501 case XFS_TRANS_SB_RBMBLOCKS: 502 tp->t_rbmblocks_delta += delta; 503 break; 504 case XFS_TRANS_SB_RBLOCKS: 505 tp->t_rblocks_delta += delta; 506 break; 507 case XFS_TRANS_SB_REXTENTS: 508 tp->t_rextents_delta += delta; 509 break; 510 case XFS_TRANS_SB_REXTSLOG: 511 tp->t_rextslog_delta += delta; 512 break; 513 default: 514 ASSERT(0); 515 return; 516 } 517 518 tp->t_flags |= (XFS_TRANS_SB_DIRTY | XFS_TRANS_DIRTY); 519 } 520 521 /* 522 * xfs_trans_apply_sb_deltas() is called from the commit code 523 * to bring the superblock buffer into the current transaction 524 * and modify it as requested by earlier calls to xfs_trans_mod_sb(). 525 * 526 * For now we just look at each field allowed to change and change 527 * it if necessary. 528 */ 529 STATIC void 530 xfs_trans_apply_sb_deltas( 531 xfs_trans_t *tp) 532 { 533 xfs_sb_t *sbp; 534 xfs_buf_t *bp; 535 int whole = 0; 536 537 bp = xfs_trans_getsb(tp, tp->t_mountp, 0); 538 sbp = XFS_BUF_TO_SBP(bp); 539 540 /* 541 * Check that superblock mods match the mods made to AGF counters. 542 */ 543 ASSERT((tp->t_fdblocks_delta + tp->t_res_fdblocks_delta) == 544 (tp->t_ag_freeblks_delta + tp->t_ag_flist_delta + 545 tp->t_ag_btree_delta)); 546 547 if (tp->t_icount_delta != 0) { 548 INT_MOD(sbp->sb_icount, ARCH_CONVERT, tp->t_icount_delta); 549 } 550 if (tp->t_ifree_delta != 0) { 551 INT_MOD(sbp->sb_ifree, ARCH_CONVERT, tp->t_ifree_delta); 552 } 553 554 if (tp->t_fdblocks_delta != 0) { 555 INT_MOD(sbp->sb_fdblocks, ARCH_CONVERT, tp->t_fdblocks_delta); 556 } 557 if (tp->t_res_fdblocks_delta != 0) { 558 INT_MOD(sbp->sb_fdblocks, ARCH_CONVERT, tp->t_res_fdblocks_delta); 559 } 560 561 if (tp->t_frextents_delta != 0) { 562 INT_MOD(sbp->sb_frextents, ARCH_CONVERT, tp->t_frextents_delta); 563 } 564 if (tp->t_res_frextents_delta != 0) { 565 INT_MOD(sbp->sb_frextents, ARCH_CONVERT, tp->t_res_frextents_delta); 566 } 567 if (tp->t_dblocks_delta != 0) { 568 INT_MOD(sbp->sb_dblocks, ARCH_CONVERT, tp->t_dblocks_delta); 569 whole = 1; 570 } 571 if (tp->t_agcount_delta != 0) { 572 INT_MOD(sbp->sb_agcount, ARCH_CONVERT, tp->t_agcount_delta); 573 whole = 1; 574 } 575 if (tp->t_imaxpct_delta != 0) { 576 INT_MOD(sbp->sb_imax_pct, ARCH_CONVERT, tp->t_imaxpct_delta); 577 whole = 1; 578 } 579 if (tp->t_rextsize_delta != 0) { 580 INT_MOD(sbp->sb_rextsize, ARCH_CONVERT, tp->t_rextsize_delta); 581 whole = 1; 582 } 583 if (tp->t_rbmblocks_delta != 0) { 584 INT_MOD(sbp->sb_rbmblocks, ARCH_CONVERT, tp->t_rbmblocks_delta); 585 whole = 1; 586 } 587 if (tp->t_rblocks_delta != 0) { 588 INT_MOD(sbp->sb_rblocks, ARCH_CONVERT, tp->t_rblocks_delta); 589 whole = 1; 590 } 591 if (tp->t_rextents_delta != 0) { 592 INT_MOD(sbp->sb_rextents, ARCH_CONVERT, tp->t_rextents_delta); 593 whole = 1; 594 } 595 if (tp->t_rextslog_delta != 0) { 596 INT_MOD(sbp->sb_rextslog, ARCH_CONVERT, tp->t_rextslog_delta); 597 whole = 1; 598 } 599 600 if (whole) 601 /* 602 * Log the whole thing, the fields are noncontiguous. 603 */ 604 xfs_trans_log_buf(tp, bp, 0, sizeof(xfs_sb_t) - 1); 605 else 606 /* 607 * Since all the modifiable fields are contiguous, we 608 * can get away with this. 609 */ 610 xfs_trans_log_buf(tp, bp, offsetof(xfs_sb_t, sb_icount), 611 offsetof(xfs_sb_t, sb_frextents) + 612 sizeof(sbp->sb_frextents) - 1); 613 614 XFS_MTOVFS(tp->t_mountp)->vfs_super->s_dirt = 1; 615 } 616 617 /* 618 * xfs_trans_unreserve_and_mod_sb() is called to release unused 619 * reservations and apply superblock counter changes to the in-core 620 * superblock. 621 * 622 * This is done efficiently with a single call to xfs_mod_incore_sb_batch(). 623 */ 624 STATIC void 625 xfs_trans_unreserve_and_mod_sb( 626 xfs_trans_t *tp) 627 { 628 xfs_mod_sb_t msb[14]; /* If you add cases, add entries */ 629 xfs_mod_sb_t *msbp; 630 /* REFERENCED */ 631 int error; 632 int rsvd; 633 634 msbp = msb; 635 rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0; 636 637 /* 638 * Release any reserved blocks. Any that were allocated 639 * will be taken back again by fdblocks_delta below. 640 */ 641 if (tp->t_blk_res > 0) { 642 msbp->msb_field = XFS_SBS_FDBLOCKS; 643 msbp->msb_delta = tp->t_blk_res; 644 msbp++; 645 } 646 647 /* 648 * Release any reserved real time extents . Any that were 649 * allocated will be taken back again by frextents_delta below. 650 */ 651 if (tp->t_rtx_res > 0) { 652 msbp->msb_field = XFS_SBS_FREXTENTS; 653 msbp->msb_delta = tp->t_rtx_res; 654 msbp++; 655 } 656 657 /* 658 * Apply any superblock modifications to the in-core version. 659 * The t_res_fdblocks_delta and t_res_frextents_delta fields are 660 * explicitly NOT applied to the in-core superblock. 661 * The idea is that that has already been done. 662 */ 663 if (tp->t_flags & XFS_TRANS_SB_DIRTY) { 664 if (tp->t_icount_delta != 0) { 665 msbp->msb_field = XFS_SBS_ICOUNT; 666 msbp->msb_delta = tp->t_icount_delta; 667 msbp++; 668 } 669 if (tp->t_ifree_delta != 0) { 670 msbp->msb_field = XFS_SBS_IFREE; 671 msbp->msb_delta = tp->t_ifree_delta; 672 msbp++; 673 } 674 if (tp->t_fdblocks_delta != 0) { 675 msbp->msb_field = XFS_SBS_FDBLOCKS; 676 msbp->msb_delta = tp->t_fdblocks_delta; 677 msbp++; 678 } 679 if (tp->t_frextents_delta != 0) { 680 msbp->msb_field = XFS_SBS_FREXTENTS; 681 msbp->msb_delta = tp->t_frextents_delta; 682 msbp++; 683 } 684 if (tp->t_dblocks_delta != 0) { 685 msbp->msb_field = XFS_SBS_DBLOCKS; 686 msbp->msb_delta = tp->t_dblocks_delta; 687 msbp++; 688 } 689 if (tp->t_agcount_delta != 0) { 690 msbp->msb_field = XFS_SBS_AGCOUNT; 691 msbp->msb_delta = tp->t_agcount_delta; 692 msbp++; 693 } 694 if (tp->t_imaxpct_delta != 0) { 695 msbp->msb_field = XFS_SBS_IMAX_PCT; 696 msbp->msb_delta = tp->t_imaxpct_delta; 697 msbp++; 698 } 699 if (tp->t_rextsize_delta != 0) { 700 msbp->msb_field = XFS_SBS_REXTSIZE; 701 msbp->msb_delta = tp->t_rextsize_delta; 702 msbp++; 703 } 704 if (tp->t_rbmblocks_delta != 0) { 705 msbp->msb_field = XFS_SBS_RBMBLOCKS; 706 msbp->msb_delta = tp->t_rbmblocks_delta; 707 msbp++; 708 } 709 if (tp->t_rblocks_delta != 0) { 710 msbp->msb_field = XFS_SBS_RBLOCKS; 711 msbp->msb_delta = tp->t_rblocks_delta; 712 msbp++; 713 } 714 if (tp->t_rextents_delta != 0) { 715 msbp->msb_field = XFS_SBS_REXTENTS; 716 msbp->msb_delta = tp->t_rextents_delta; 717 msbp++; 718 } 719 if (tp->t_rextslog_delta != 0) { 720 msbp->msb_field = XFS_SBS_REXTSLOG; 721 msbp->msb_delta = tp->t_rextslog_delta; 722 msbp++; 723 } 724 } 725 726 /* 727 * If we need to change anything, do it. 728 */ 729 if (msbp > msb) { 730 error = xfs_mod_incore_sb_batch(tp->t_mountp, msb, 731 (uint)(msbp - msb), rsvd); 732 ASSERT(error == 0); 733 } 734 } 735 736 737 /* 738 * xfs_trans_commit 739 * 740 * Commit the given transaction to the log a/synchronously. 741 * 742 * XFS disk error handling mechanism is not based on a typical 743 * transaction abort mechanism. Logically after the filesystem 744 * gets marked 'SHUTDOWN', we can't let any new transactions 745 * be durable - ie. committed to disk - because some metadata might 746 * be inconsistent. In such cases, this returns an error, and the 747 * caller may assume that all locked objects joined to the transaction 748 * have already been unlocked as if the commit had succeeded. 749 * Do not reference the transaction structure after this call. 750 */ 751 /*ARGSUSED*/ 752 int 753 _xfs_trans_commit( 754 xfs_trans_t *tp, 755 uint flags, 756 int *log_flushed) 757 { 758 xfs_log_iovec_t *log_vector; 759 int nvec; 760 xfs_mount_t *mp; 761 xfs_lsn_t commit_lsn; 762 /* REFERENCED */ 763 int error; 764 int log_flags; 765 int sync; 766 #define XFS_TRANS_LOGVEC_COUNT 16 767 xfs_log_iovec_t log_vector_fast[XFS_TRANS_LOGVEC_COUNT]; 768 void *commit_iclog; 769 int shutdown; 770 771 commit_lsn = -1; 772 773 /* 774 * Determine whether this commit is releasing a permanent 775 * log reservation or not. 776 */ 777 if (flags & XFS_TRANS_RELEASE_LOG_RES) { 778 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES); 779 log_flags = XFS_LOG_REL_PERM_RESERV; 780 } else { 781 log_flags = 0; 782 } 783 mp = tp->t_mountp; 784 785 /* 786 * If there is nothing to be logged by the transaction, 787 * then unlock all of the items associated with the 788 * transaction and free the transaction structure. 789 * Also make sure to return any reserved blocks to 790 * the free pool. 791 */ 792 shut_us_down: 793 shutdown = XFS_FORCED_SHUTDOWN(mp) ? EIO : 0; 794 if (!(tp->t_flags & XFS_TRANS_DIRTY) || shutdown) { 795 xfs_trans_unreserve_and_mod_sb(tp); 796 /* 797 * It is indeed possible for the transaction to be 798 * not dirty but the dqinfo portion to be. All that 799 * means is that we have some (non-persistent) quota 800 * reservations that need to be unreserved. 801 */ 802 XFS_TRANS_UNRESERVE_AND_MOD_DQUOTS(mp, tp); 803 if (tp->t_ticket) { 804 commit_lsn = xfs_log_done(mp, tp->t_ticket, 805 NULL, log_flags); 806 if (commit_lsn == -1 && !shutdown) 807 shutdown = XFS_ERROR(EIO); 808 } 809 current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS); 810 xfs_trans_free_items(tp, shutdown? XFS_TRANS_ABORT : 0); 811 xfs_trans_free_busy(tp); 812 xfs_trans_free(tp); 813 XFS_STATS_INC(xs_trans_empty); 814 return (shutdown); 815 } 816 ASSERT(tp->t_ticket != NULL); 817 818 /* 819 * If we need to update the superblock, then do it now. 820 */ 821 if (tp->t_flags & XFS_TRANS_SB_DIRTY) { 822 xfs_trans_apply_sb_deltas(tp); 823 } 824 XFS_TRANS_APPLY_DQUOT_DELTAS(mp, tp); 825 826 /* 827 * Ask each log item how many log_vector entries it will 828 * need so we can figure out how many to allocate. 829 * Try to avoid the kmem_alloc() call in the common case 830 * by using a vector from the stack when it fits. 831 */ 832 nvec = xfs_trans_count_vecs(tp); 833 if (nvec == 0) { 834 xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR); 835 goto shut_us_down; 836 } else if (nvec <= XFS_TRANS_LOGVEC_COUNT) { 837 log_vector = log_vector_fast; 838 } else { 839 log_vector = (xfs_log_iovec_t *)kmem_alloc(nvec * 840 sizeof(xfs_log_iovec_t), 841 KM_SLEEP); 842 } 843 844 /* 845 * Fill in the log_vector and pin the logged items, and 846 * then write the transaction to the log. 847 */ 848 xfs_trans_fill_vecs(tp, log_vector); 849 850 error = xfs_log_write(mp, log_vector, nvec, tp->t_ticket, &(tp->t_lsn)); 851 852 /* 853 * The transaction is committed incore here, and can go out to disk 854 * at any time after this call. However, all the items associated 855 * with the transaction are still locked and pinned in memory. 856 */ 857 commit_lsn = xfs_log_done(mp, tp->t_ticket, &commit_iclog, log_flags); 858 859 tp->t_commit_lsn = commit_lsn; 860 if (nvec > XFS_TRANS_LOGVEC_COUNT) { 861 kmem_free(log_vector, nvec * sizeof(xfs_log_iovec_t)); 862 } 863 864 /* 865 * If we got a log write error. Unpin the logitems that we 866 * had pinned, clean up, free trans structure, and return error. 867 */ 868 if (error || commit_lsn == -1) { 869 current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS); 870 xfs_trans_uncommit(tp, flags|XFS_TRANS_ABORT); 871 return XFS_ERROR(EIO); 872 } 873 874 /* 875 * Once the transaction has committed, unused 876 * reservations need to be released and changes to 877 * the superblock need to be reflected in the in-core 878 * version. Do that now. 879 */ 880 xfs_trans_unreserve_and_mod_sb(tp); 881 882 sync = tp->t_flags & XFS_TRANS_SYNC; 883 884 /* 885 * Tell the LM to call the transaction completion routine 886 * when the log write with LSN commit_lsn completes (e.g. 887 * when the transaction commit really hits the on-disk log). 888 * After this call we cannot reference tp, because the call 889 * can happen at any time and the call will free the transaction 890 * structure pointed to by tp. The only case where we call 891 * the completion routine (xfs_trans_committed) directly is 892 * if the log is turned off on a debug kernel or we're 893 * running in simulation mode (the log is explicitly turned 894 * off). 895 */ 896 tp->t_logcb.cb_func = (void(*)(void*, int))xfs_trans_committed; 897 tp->t_logcb.cb_arg = tp; 898 899 /* 900 * We need to pass the iclog buffer which was used for the 901 * transaction commit record into this function, and attach 902 * the callback to it. The callback must be attached before 903 * the items are unlocked to avoid racing with other threads 904 * waiting for an item to unlock. 905 */ 906 shutdown = xfs_log_notify(mp, commit_iclog, &(tp->t_logcb)); 907 908 /* 909 * Mark this thread as no longer being in a transaction 910 */ 911 current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS); 912 913 /* 914 * Once all the items of the transaction have been copied 915 * to the in core log and the callback is attached, the 916 * items can be unlocked. 917 * 918 * This will free descriptors pointing to items which were 919 * not logged since there is nothing more to do with them. 920 * For items which were logged, we will keep pointers to them 921 * so they can be unpinned after the transaction commits to disk. 922 * This will also stamp each modified meta-data item with 923 * the commit lsn of this transaction for dependency tracking 924 * purposes. 925 */ 926 xfs_trans_unlock_items(tp, commit_lsn); 927 928 /* 929 * If we detected a log error earlier, finish committing 930 * the transaction now (unpin log items, etc). 931 * 932 * Order is critical here, to avoid using the transaction 933 * pointer after its been freed (by xfs_trans_committed 934 * either here now, or as a callback). We cannot do this 935 * step inside xfs_log_notify as was done earlier because 936 * of this issue. 937 */ 938 if (shutdown) 939 xfs_trans_committed(tp, XFS_LI_ABORTED); 940 941 /* 942 * Now that the xfs_trans_committed callback has been attached, 943 * and the items are released we can finally allow the iclog to 944 * go to disk. 945 */ 946 error = xfs_log_release_iclog(mp, commit_iclog); 947 948 /* 949 * If the transaction needs to be synchronous, then force the 950 * log out now and wait for it. 951 */ 952 if (sync) { 953 if (!error) { 954 error = _xfs_log_force(mp, commit_lsn, 955 XFS_LOG_FORCE | XFS_LOG_SYNC, 956 log_flushed); 957 } 958 XFS_STATS_INC(xs_trans_sync); 959 } else { 960 XFS_STATS_INC(xs_trans_async); 961 } 962 963 return (error); 964 } 965 966 967 /* 968 * Total up the number of log iovecs needed to commit this 969 * transaction. The transaction itself needs one for the 970 * transaction header. Ask each dirty item in turn how many 971 * it needs to get the total. 972 */ 973 STATIC uint 974 xfs_trans_count_vecs( 975 xfs_trans_t *tp) 976 { 977 int nvecs; 978 xfs_log_item_desc_t *lidp; 979 980 nvecs = 1; 981 lidp = xfs_trans_first_item(tp); 982 ASSERT(lidp != NULL); 983 984 /* In the non-debug case we need to start bailing out if we 985 * didn't find a log_item here, return zero and let trans_commit 986 * deal with it. 987 */ 988 if (lidp == NULL) 989 return 0; 990 991 while (lidp != NULL) { 992 /* 993 * Skip items which aren't dirty in this transaction. 994 */ 995 if (!(lidp->lid_flags & XFS_LID_DIRTY)) { 996 lidp = xfs_trans_next_item(tp, lidp); 997 continue; 998 } 999 lidp->lid_size = IOP_SIZE(lidp->lid_item); 1000 nvecs += lidp->lid_size; 1001 lidp = xfs_trans_next_item(tp, lidp); 1002 } 1003 1004 return nvecs; 1005 } 1006 1007 /* 1008 * Called from the trans_commit code when we notice that 1009 * the filesystem is in the middle of a forced shutdown. 1010 */ 1011 STATIC void 1012 xfs_trans_uncommit( 1013 xfs_trans_t *tp, 1014 uint flags) 1015 { 1016 xfs_log_item_desc_t *lidp; 1017 1018 for (lidp = xfs_trans_first_item(tp); 1019 lidp != NULL; 1020 lidp = xfs_trans_next_item(tp, lidp)) { 1021 /* 1022 * Unpin all but those that aren't dirty. 1023 */ 1024 if (lidp->lid_flags & XFS_LID_DIRTY) 1025 IOP_UNPIN_REMOVE(lidp->lid_item, tp); 1026 } 1027 1028 xfs_trans_unreserve_and_mod_sb(tp); 1029 XFS_TRANS_UNRESERVE_AND_MOD_DQUOTS(tp->t_mountp, tp); 1030 1031 xfs_trans_free_items(tp, flags); 1032 xfs_trans_free_busy(tp); 1033 xfs_trans_free(tp); 1034 } 1035 1036 /* 1037 * Fill in the vector with pointers to data to be logged 1038 * by this transaction. The transaction header takes 1039 * the first vector, and then each dirty item takes the 1040 * number of vectors it indicated it needed in xfs_trans_count_vecs(). 1041 * 1042 * As each item fills in the entries it needs, also pin the item 1043 * so that it cannot be flushed out until the log write completes. 1044 */ 1045 STATIC void 1046 xfs_trans_fill_vecs( 1047 xfs_trans_t *tp, 1048 xfs_log_iovec_t *log_vector) 1049 { 1050 xfs_log_item_desc_t *lidp; 1051 xfs_log_iovec_t *vecp; 1052 uint nitems; 1053 1054 /* 1055 * Skip over the entry for the transaction header, we'll 1056 * fill that in at the end. 1057 */ 1058 vecp = log_vector + 1; /* pointer arithmetic */ 1059 1060 nitems = 0; 1061 lidp = xfs_trans_first_item(tp); 1062 ASSERT(lidp != NULL); 1063 while (lidp != NULL) { 1064 /* 1065 * Skip items which aren't dirty in this transaction. 1066 */ 1067 if (!(lidp->lid_flags & XFS_LID_DIRTY)) { 1068 lidp = xfs_trans_next_item(tp, lidp); 1069 continue; 1070 } 1071 /* 1072 * The item may be marked dirty but not log anything. 1073 * This can be used to get called when a transaction 1074 * is committed. 1075 */ 1076 if (lidp->lid_size) { 1077 nitems++; 1078 } 1079 IOP_FORMAT(lidp->lid_item, vecp); 1080 vecp += lidp->lid_size; /* pointer arithmetic */ 1081 IOP_PIN(lidp->lid_item); 1082 lidp = xfs_trans_next_item(tp, lidp); 1083 } 1084 1085 /* 1086 * Now that we've counted the number of items in this 1087 * transaction, fill in the transaction header. 1088 */ 1089 tp->t_header.th_magic = XFS_TRANS_HEADER_MAGIC; 1090 tp->t_header.th_type = tp->t_type; 1091 tp->t_header.th_num_items = nitems; 1092 log_vector->i_addr = (xfs_caddr_t)&tp->t_header; 1093 log_vector->i_len = sizeof(xfs_trans_header_t); 1094 XLOG_VEC_SET_TYPE(log_vector, XLOG_REG_TYPE_TRANSHDR); 1095 } 1096 1097 1098 /* 1099 * Unlock all of the transaction's items and free the transaction. 1100 * The transaction must not have modified any of its items, because 1101 * there is no way to restore them to their previous state. 1102 * 1103 * If the transaction has made a log reservation, make sure to release 1104 * it as well. 1105 */ 1106 void 1107 xfs_trans_cancel( 1108 xfs_trans_t *tp, 1109 int flags) 1110 { 1111 int log_flags; 1112 #ifdef DEBUG 1113 xfs_log_item_chunk_t *licp; 1114 xfs_log_item_desc_t *lidp; 1115 xfs_log_item_t *lip; 1116 int i; 1117 #endif 1118 xfs_mount_t *mp = tp->t_mountp; 1119 1120 /* 1121 * See if the caller is being too lazy to figure out if 1122 * the transaction really needs an abort. 1123 */ 1124 if ((flags & XFS_TRANS_ABORT) && !(tp->t_flags & XFS_TRANS_DIRTY)) 1125 flags &= ~XFS_TRANS_ABORT; 1126 /* 1127 * See if the caller is relying on us to shut down the 1128 * filesystem. This happens in paths where we detect 1129 * corruption and decide to give up. 1130 */ 1131 if ((tp->t_flags & XFS_TRANS_DIRTY) && !XFS_FORCED_SHUTDOWN(mp)) { 1132 XFS_ERROR_REPORT("xfs_trans_cancel", XFS_ERRLEVEL_LOW, mp); 1133 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 1134 } 1135 #ifdef DEBUG 1136 if (!(flags & XFS_TRANS_ABORT)) { 1137 licp = &(tp->t_items); 1138 while (licp != NULL) { 1139 lidp = licp->lic_descs; 1140 for (i = 0; i < licp->lic_unused; i++, lidp++) { 1141 if (XFS_LIC_ISFREE(licp, i)) { 1142 continue; 1143 } 1144 1145 lip = lidp->lid_item; 1146 if (!XFS_FORCED_SHUTDOWN(mp)) 1147 ASSERT(!(lip->li_type == XFS_LI_EFD)); 1148 } 1149 licp = licp->lic_next; 1150 } 1151 } 1152 #endif 1153 xfs_trans_unreserve_and_mod_sb(tp); 1154 XFS_TRANS_UNRESERVE_AND_MOD_DQUOTS(mp, tp); 1155 1156 if (tp->t_ticket) { 1157 if (flags & XFS_TRANS_RELEASE_LOG_RES) { 1158 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES); 1159 log_flags = XFS_LOG_REL_PERM_RESERV; 1160 } else { 1161 log_flags = 0; 1162 } 1163 xfs_log_done(mp, tp->t_ticket, NULL, log_flags); 1164 } 1165 1166 /* mark this thread as no longer being in a transaction */ 1167 current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS); 1168 1169 xfs_trans_free_items(tp, flags); 1170 xfs_trans_free_busy(tp); 1171 xfs_trans_free(tp); 1172 } 1173 1174 1175 /* 1176 * Free the transaction structure. If there is more clean up 1177 * to do when the structure is freed, add it here. 1178 */ 1179 STATIC void 1180 xfs_trans_free( 1181 xfs_trans_t *tp) 1182 { 1183 atomic_dec(&tp->t_mountp->m_active_trans); 1184 XFS_TRANS_FREE_DQINFO(tp->t_mountp, tp); 1185 kmem_zone_free(xfs_trans_zone, tp); 1186 } 1187 1188 1189 /* 1190 * THIS SHOULD BE REWRITTEN TO USE xfs_trans_next_item(). 1191 * 1192 * This is typically called by the LM when a transaction has been fully 1193 * committed to disk. It needs to unpin the items which have 1194 * been logged by the transaction and update their positions 1195 * in the AIL if necessary. 1196 * This also gets called when the transactions didn't get written out 1197 * because of an I/O error. Abortflag & XFS_LI_ABORTED is set then. 1198 * 1199 * Call xfs_trans_chunk_committed() to process the items in 1200 * each chunk. 1201 */ 1202 STATIC void 1203 xfs_trans_committed( 1204 xfs_trans_t *tp, 1205 int abortflag) 1206 { 1207 xfs_log_item_chunk_t *licp; 1208 xfs_log_item_chunk_t *next_licp; 1209 xfs_log_busy_chunk_t *lbcp; 1210 xfs_log_busy_slot_t *lbsp; 1211 int i; 1212 1213 /* 1214 * Call the transaction's completion callback if there 1215 * is one. 1216 */ 1217 if (tp->t_callback != NULL) { 1218 tp->t_callback(tp, tp->t_callarg); 1219 } 1220 1221 /* 1222 * Special case the chunk embedded in the transaction. 1223 */ 1224 licp = &(tp->t_items); 1225 if (!(XFS_LIC_ARE_ALL_FREE(licp))) { 1226 xfs_trans_chunk_committed(licp, tp->t_lsn, abortflag); 1227 } 1228 1229 /* 1230 * Process the items in each chunk in turn. 1231 */ 1232 licp = licp->lic_next; 1233 while (licp != NULL) { 1234 ASSERT(!XFS_LIC_ARE_ALL_FREE(licp)); 1235 xfs_trans_chunk_committed(licp, tp->t_lsn, abortflag); 1236 next_licp = licp->lic_next; 1237 kmem_free(licp, sizeof(xfs_log_item_chunk_t)); 1238 licp = next_licp; 1239 } 1240 1241 /* 1242 * Clear all the per-AG busy list items listed in this transaction 1243 */ 1244 lbcp = &tp->t_busy; 1245 while (lbcp != NULL) { 1246 for (i = 0, lbsp = lbcp->lbc_busy; i < lbcp->lbc_unused; i++, lbsp++) { 1247 if (!XFS_LBC_ISFREE(lbcp, i)) { 1248 xfs_alloc_clear_busy(tp, lbsp->lbc_ag, 1249 lbsp->lbc_idx); 1250 } 1251 } 1252 lbcp = lbcp->lbc_next; 1253 } 1254 xfs_trans_free_busy(tp); 1255 1256 /* 1257 * That's it for the transaction structure. Free it. 1258 */ 1259 xfs_trans_free(tp); 1260 } 1261 1262 /* 1263 * This is called to perform the commit processing for each 1264 * item described by the given chunk. 1265 * 1266 * The commit processing consists of unlocking items which were 1267 * held locked with the SYNC_UNLOCK attribute, calling the committed 1268 * routine of each logged item, updating the item's position in the AIL 1269 * if necessary, and unpinning each item. If the committed routine 1270 * returns -1, then do nothing further with the item because it 1271 * may have been freed. 1272 * 1273 * Since items are unlocked when they are copied to the incore 1274 * log, it is possible for two transactions to be completing 1275 * and manipulating the same item simultaneously. The AIL lock 1276 * will protect the lsn field of each item. The value of this 1277 * field can never go backwards. 1278 * 1279 * We unpin the items after repositioning them in the AIL, because 1280 * otherwise they could be immediately flushed and we'd have to race 1281 * with the flusher trying to pull the item from the AIL as we add it. 1282 */ 1283 STATIC void 1284 xfs_trans_chunk_committed( 1285 xfs_log_item_chunk_t *licp, 1286 xfs_lsn_t lsn, 1287 int aborted) 1288 { 1289 xfs_log_item_desc_t *lidp; 1290 xfs_log_item_t *lip; 1291 xfs_lsn_t item_lsn; 1292 struct xfs_mount *mp; 1293 int i; 1294 SPLDECL(s); 1295 1296 lidp = licp->lic_descs; 1297 for (i = 0; i < licp->lic_unused; i++, lidp++) { 1298 if (XFS_LIC_ISFREE(licp, i)) { 1299 continue; 1300 } 1301 1302 lip = lidp->lid_item; 1303 if (aborted) 1304 lip->li_flags |= XFS_LI_ABORTED; 1305 1306 /* 1307 * Send in the ABORTED flag to the COMMITTED routine 1308 * so that it knows whether the transaction was aborted 1309 * or not. 1310 */ 1311 item_lsn = IOP_COMMITTED(lip, lsn); 1312 1313 /* 1314 * If the committed routine returns -1, make 1315 * no more references to the item. 1316 */ 1317 if (XFS_LSN_CMP(item_lsn, (xfs_lsn_t)-1) == 0) { 1318 continue; 1319 } 1320 1321 /* 1322 * If the returned lsn is greater than what it 1323 * contained before, update the location of the 1324 * item in the AIL. If it is not, then do nothing. 1325 * Items can never move backwards in the AIL. 1326 * 1327 * While the new lsn should usually be greater, it 1328 * is possible that a later transaction completing 1329 * simultaneously with an earlier one using the 1330 * same item could complete first with a higher lsn. 1331 * This would cause the earlier transaction to fail 1332 * the test below. 1333 */ 1334 mp = lip->li_mountp; 1335 AIL_LOCK(mp,s); 1336 if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0) { 1337 /* 1338 * This will set the item's lsn to item_lsn 1339 * and update the position of the item in 1340 * the AIL. 1341 * 1342 * xfs_trans_update_ail() drops the AIL lock. 1343 */ 1344 xfs_trans_update_ail(mp, lip, item_lsn, s); 1345 } else { 1346 AIL_UNLOCK(mp, s); 1347 } 1348 1349 /* 1350 * Now that we've repositioned the item in the AIL, 1351 * unpin it so it can be flushed. Pass information 1352 * about buffer stale state down from the log item 1353 * flags, if anyone else stales the buffer we do not 1354 * want to pay any attention to it. 1355 */ 1356 IOP_UNPIN(lip, lidp->lid_flags & XFS_LID_BUF_STALE); 1357 } 1358 } 1359