1 /* 2 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc. 3 * Copyright (C) 2010 Red Hat, Inc. 4 * All Rights Reserved. 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License as 8 * published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it would be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 18 */ 19 #include "xfs.h" 20 #include "xfs_fs.h" 21 #include "xfs_shared.h" 22 #include "xfs_format.h" 23 #include "xfs_log_format.h" 24 #include "xfs_trans_resv.h" 25 #include "xfs_mount.h" 26 #include "xfs_inode.h" 27 #include "xfs_extent_busy.h" 28 #include "xfs_quota.h" 29 #include "xfs_trans.h" 30 #include "xfs_trans_priv.h" 31 #include "xfs_log.h" 32 #include "xfs_trace.h" 33 #include "xfs_error.h" 34 35 kmem_zone_t *xfs_trans_zone; 36 kmem_zone_t *xfs_log_item_desc_zone; 37 38 /* 39 * Initialize the precomputed transaction reservation values 40 * in the mount structure. 41 */ 42 void 43 xfs_trans_init( 44 struct xfs_mount *mp) 45 { 46 xfs_trans_resv_calc(mp, M_RES(mp)); 47 } 48 49 /* 50 * Free the transaction structure. If there is more clean up 51 * to do when the structure is freed, add it here. 52 */ 53 STATIC void 54 xfs_trans_free( 55 struct xfs_trans *tp) 56 { 57 xfs_extent_busy_sort(&tp->t_busy); 58 xfs_extent_busy_clear(tp->t_mountp, &tp->t_busy, false); 59 60 atomic_dec(&tp->t_mountp->m_active_trans); 61 if (!(tp->t_flags & XFS_TRANS_NO_WRITECOUNT)) 62 sb_end_intwrite(tp->t_mountp->m_super); 63 xfs_trans_free_dqinfo(tp); 64 kmem_zone_free(xfs_trans_zone, tp); 65 } 66 67 /* 68 * This is called to create a new transaction which will share the 69 * permanent log reservation of the given transaction. The remaining 70 * unused block and rt extent reservations are also inherited. This 71 * implies that the original transaction is no longer allowed to allocate 72 * blocks. Locks and log items, however, are no inherited. They must 73 * be added to the new transaction explicitly. 74 */ 75 STATIC xfs_trans_t * 76 xfs_trans_dup( 77 xfs_trans_t *tp) 78 { 79 xfs_trans_t *ntp; 80 81 ntp = kmem_zone_zalloc(xfs_trans_zone, KM_SLEEP); 82 83 /* 84 * Initialize the new transaction structure. 85 */ 86 ntp->t_magic = XFS_TRANS_HEADER_MAGIC; 87 ntp->t_mountp = tp->t_mountp; 88 INIT_LIST_HEAD(&ntp->t_items); 89 INIT_LIST_HEAD(&ntp->t_busy); 90 91 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES); 92 ASSERT(tp->t_ticket != NULL); 93 94 ntp->t_flags = XFS_TRANS_PERM_LOG_RES | 95 (tp->t_flags & XFS_TRANS_RESERVE) | 96 (tp->t_flags & XFS_TRANS_NO_WRITECOUNT); 97 /* We gave our writer reference to the new transaction */ 98 tp->t_flags |= XFS_TRANS_NO_WRITECOUNT; 99 ntp->t_ticket = xfs_log_ticket_get(tp->t_ticket); 100 ntp->t_blk_res = tp->t_blk_res - tp->t_blk_res_used; 101 tp->t_blk_res = tp->t_blk_res_used; 102 ntp->t_rtx_res = tp->t_rtx_res - tp->t_rtx_res_used; 103 tp->t_rtx_res = tp->t_rtx_res_used; 104 ntp->t_pflags = tp->t_pflags; 105 106 xfs_trans_dup_dqinfo(tp, ntp); 107 108 atomic_inc(&tp->t_mountp->m_active_trans); 109 return ntp; 110 } 111 112 /* 113 * This is called to reserve free disk blocks and log space for the 114 * given transaction. This must be done before allocating any resources 115 * within the transaction. 116 * 117 * This will return ENOSPC if there are not enough blocks available. 118 * It will sleep waiting for available log space. 119 * The only valid value for the flags parameter is XFS_RES_LOG_PERM, which 120 * is used by long running transactions. If any one of the reservations 121 * fails then they will all be backed out. 122 * 123 * This does not do quota reservations. That typically is done by the 124 * caller afterwards. 125 */ 126 static int 127 xfs_trans_reserve( 128 struct xfs_trans *tp, 129 struct xfs_trans_res *resp, 130 uint blocks, 131 uint rtextents) 132 { 133 int error = 0; 134 bool rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0; 135 136 /* Mark this thread as being in a transaction */ 137 current_set_flags_nested(&tp->t_pflags, PF_FSTRANS); 138 139 /* 140 * Attempt to reserve the needed disk blocks by decrementing 141 * the number needed from the number available. This will 142 * fail if the count would go below zero. 143 */ 144 if (blocks > 0) { 145 error = xfs_mod_fdblocks(tp->t_mountp, -((int64_t)blocks), rsvd); 146 if (error != 0) { 147 current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS); 148 return -ENOSPC; 149 } 150 tp->t_blk_res += blocks; 151 } 152 153 /* 154 * Reserve the log space needed for this transaction. 155 */ 156 if (resp->tr_logres > 0) { 157 bool permanent = false; 158 159 ASSERT(tp->t_log_res == 0 || 160 tp->t_log_res == resp->tr_logres); 161 ASSERT(tp->t_log_count == 0 || 162 tp->t_log_count == resp->tr_logcount); 163 164 if (resp->tr_logflags & XFS_TRANS_PERM_LOG_RES) { 165 tp->t_flags |= XFS_TRANS_PERM_LOG_RES; 166 permanent = true; 167 } else { 168 ASSERT(tp->t_ticket == NULL); 169 ASSERT(!(tp->t_flags & XFS_TRANS_PERM_LOG_RES)); 170 } 171 172 if (tp->t_ticket != NULL) { 173 ASSERT(resp->tr_logflags & XFS_TRANS_PERM_LOG_RES); 174 error = xfs_log_regrant(tp->t_mountp, tp->t_ticket); 175 } else { 176 error = xfs_log_reserve(tp->t_mountp, 177 resp->tr_logres, 178 resp->tr_logcount, 179 &tp->t_ticket, XFS_TRANSACTION, 180 permanent); 181 } 182 183 if (error) 184 goto undo_blocks; 185 186 tp->t_log_res = resp->tr_logres; 187 tp->t_log_count = resp->tr_logcount; 188 } 189 190 /* 191 * Attempt to reserve the needed realtime extents by decrementing 192 * the number needed from the number available. This will 193 * fail if the count would go below zero. 194 */ 195 if (rtextents > 0) { 196 error = xfs_mod_frextents(tp->t_mountp, -((int64_t)rtextents)); 197 if (error) { 198 error = -ENOSPC; 199 goto undo_log; 200 } 201 tp->t_rtx_res += rtextents; 202 } 203 204 return 0; 205 206 /* 207 * Error cases jump to one of these labels to undo any 208 * reservations which have already been performed. 209 */ 210 undo_log: 211 if (resp->tr_logres > 0) { 212 xfs_log_done(tp->t_mountp, tp->t_ticket, NULL, false); 213 tp->t_ticket = NULL; 214 tp->t_log_res = 0; 215 tp->t_flags &= ~XFS_TRANS_PERM_LOG_RES; 216 } 217 218 undo_blocks: 219 if (blocks > 0) { 220 xfs_mod_fdblocks(tp->t_mountp, (int64_t)blocks, rsvd); 221 tp->t_blk_res = 0; 222 } 223 224 current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS); 225 226 return error; 227 } 228 229 int 230 xfs_trans_alloc( 231 struct xfs_mount *mp, 232 struct xfs_trans_res *resp, 233 uint blocks, 234 uint rtextents, 235 uint flags, 236 struct xfs_trans **tpp) 237 { 238 struct xfs_trans *tp; 239 int error; 240 241 if (!(flags & XFS_TRANS_NO_WRITECOUNT)) 242 sb_start_intwrite(mp->m_super); 243 244 WARN_ON(mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE); 245 atomic_inc(&mp->m_active_trans); 246 247 tp = kmem_zone_zalloc(xfs_trans_zone, 248 (flags & XFS_TRANS_NOFS) ? KM_NOFS : KM_SLEEP); 249 tp->t_magic = XFS_TRANS_HEADER_MAGIC; 250 tp->t_flags = flags; 251 tp->t_mountp = mp; 252 INIT_LIST_HEAD(&tp->t_items); 253 INIT_LIST_HEAD(&tp->t_busy); 254 255 error = xfs_trans_reserve(tp, resp, blocks, rtextents); 256 if (error) { 257 xfs_trans_cancel(tp); 258 return error; 259 } 260 261 *tpp = tp; 262 return 0; 263 } 264 265 /* 266 * Record the indicated change to the given field for application 267 * to the file system's superblock when the transaction commits. 268 * For now, just store the change in the transaction structure. 269 * 270 * Mark the transaction structure to indicate that the superblock 271 * needs to be updated before committing. 272 * 273 * Because we may not be keeping track of allocated/free inodes and 274 * used filesystem blocks in the superblock, we do not mark the 275 * superblock dirty in this transaction if we modify these fields. 276 * We still need to update the transaction deltas so that they get 277 * applied to the incore superblock, but we don't want them to 278 * cause the superblock to get locked and logged if these are the 279 * only fields in the superblock that the transaction modifies. 280 */ 281 void 282 xfs_trans_mod_sb( 283 xfs_trans_t *tp, 284 uint field, 285 int64_t delta) 286 { 287 uint32_t flags = (XFS_TRANS_DIRTY|XFS_TRANS_SB_DIRTY); 288 xfs_mount_t *mp = tp->t_mountp; 289 290 switch (field) { 291 case XFS_TRANS_SB_ICOUNT: 292 tp->t_icount_delta += delta; 293 if (xfs_sb_version_haslazysbcount(&mp->m_sb)) 294 flags &= ~XFS_TRANS_SB_DIRTY; 295 break; 296 case XFS_TRANS_SB_IFREE: 297 tp->t_ifree_delta += delta; 298 if (xfs_sb_version_haslazysbcount(&mp->m_sb)) 299 flags &= ~XFS_TRANS_SB_DIRTY; 300 break; 301 case XFS_TRANS_SB_FDBLOCKS: 302 /* 303 * Track the number of blocks allocated in the 304 * transaction. Make sure it does not exceed the 305 * number reserved. 306 */ 307 if (delta < 0) { 308 tp->t_blk_res_used += (uint)-delta; 309 ASSERT(tp->t_blk_res_used <= tp->t_blk_res); 310 } 311 tp->t_fdblocks_delta += delta; 312 if (xfs_sb_version_haslazysbcount(&mp->m_sb)) 313 flags &= ~XFS_TRANS_SB_DIRTY; 314 break; 315 case XFS_TRANS_SB_RES_FDBLOCKS: 316 /* 317 * The allocation has already been applied to the 318 * in-core superblock's counter. This should only 319 * be applied to the on-disk superblock. 320 */ 321 tp->t_res_fdblocks_delta += delta; 322 if (xfs_sb_version_haslazysbcount(&mp->m_sb)) 323 flags &= ~XFS_TRANS_SB_DIRTY; 324 break; 325 case XFS_TRANS_SB_FREXTENTS: 326 /* 327 * Track the number of blocks allocated in the 328 * transaction. Make sure it does not exceed the 329 * number reserved. 330 */ 331 if (delta < 0) { 332 tp->t_rtx_res_used += (uint)-delta; 333 ASSERT(tp->t_rtx_res_used <= tp->t_rtx_res); 334 } 335 tp->t_frextents_delta += delta; 336 break; 337 case XFS_TRANS_SB_RES_FREXTENTS: 338 /* 339 * The allocation has already been applied to the 340 * in-core superblock's counter. This should only 341 * be applied to the on-disk superblock. 342 */ 343 ASSERT(delta < 0); 344 tp->t_res_frextents_delta += delta; 345 break; 346 case XFS_TRANS_SB_DBLOCKS: 347 ASSERT(delta > 0); 348 tp->t_dblocks_delta += delta; 349 break; 350 case XFS_TRANS_SB_AGCOUNT: 351 ASSERT(delta > 0); 352 tp->t_agcount_delta += delta; 353 break; 354 case XFS_TRANS_SB_IMAXPCT: 355 tp->t_imaxpct_delta += delta; 356 break; 357 case XFS_TRANS_SB_REXTSIZE: 358 tp->t_rextsize_delta += delta; 359 break; 360 case XFS_TRANS_SB_RBMBLOCKS: 361 tp->t_rbmblocks_delta += delta; 362 break; 363 case XFS_TRANS_SB_RBLOCKS: 364 tp->t_rblocks_delta += delta; 365 break; 366 case XFS_TRANS_SB_REXTENTS: 367 tp->t_rextents_delta += delta; 368 break; 369 case XFS_TRANS_SB_REXTSLOG: 370 tp->t_rextslog_delta += delta; 371 break; 372 default: 373 ASSERT(0); 374 return; 375 } 376 377 tp->t_flags |= flags; 378 } 379 380 /* 381 * xfs_trans_apply_sb_deltas() is called from the commit code 382 * to bring the superblock buffer into the current transaction 383 * and modify it as requested by earlier calls to xfs_trans_mod_sb(). 384 * 385 * For now we just look at each field allowed to change and change 386 * it if necessary. 387 */ 388 STATIC void 389 xfs_trans_apply_sb_deltas( 390 xfs_trans_t *tp) 391 { 392 xfs_dsb_t *sbp; 393 xfs_buf_t *bp; 394 int whole = 0; 395 396 bp = xfs_trans_getsb(tp, tp->t_mountp, 0); 397 sbp = XFS_BUF_TO_SBP(bp); 398 399 /* 400 * Check that superblock mods match the mods made to AGF counters. 401 */ 402 ASSERT((tp->t_fdblocks_delta + tp->t_res_fdblocks_delta) == 403 (tp->t_ag_freeblks_delta + tp->t_ag_flist_delta + 404 tp->t_ag_btree_delta)); 405 406 /* 407 * Only update the superblock counters if we are logging them 408 */ 409 if (!xfs_sb_version_haslazysbcount(&(tp->t_mountp->m_sb))) { 410 if (tp->t_icount_delta) 411 be64_add_cpu(&sbp->sb_icount, tp->t_icount_delta); 412 if (tp->t_ifree_delta) 413 be64_add_cpu(&sbp->sb_ifree, tp->t_ifree_delta); 414 if (tp->t_fdblocks_delta) 415 be64_add_cpu(&sbp->sb_fdblocks, tp->t_fdblocks_delta); 416 if (tp->t_res_fdblocks_delta) 417 be64_add_cpu(&sbp->sb_fdblocks, tp->t_res_fdblocks_delta); 418 } 419 420 if (tp->t_frextents_delta) 421 be64_add_cpu(&sbp->sb_frextents, tp->t_frextents_delta); 422 if (tp->t_res_frextents_delta) 423 be64_add_cpu(&sbp->sb_frextents, tp->t_res_frextents_delta); 424 425 if (tp->t_dblocks_delta) { 426 be64_add_cpu(&sbp->sb_dblocks, tp->t_dblocks_delta); 427 whole = 1; 428 } 429 if (tp->t_agcount_delta) { 430 be32_add_cpu(&sbp->sb_agcount, tp->t_agcount_delta); 431 whole = 1; 432 } 433 if (tp->t_imaxpct_delta) { 434 sbp->sb_imax_pct += tp->t_imaxpct_delta; 435 whole = 1; 436 } 437 if (tp->t_rextsize_delta) { 438 be32_add_cpu(&sbp->sb_rextsize, tp->t_rextsize_delta); 439 whole = 1; 440 } 441 if (tp->t_rbmblocks_delta) { 442 be32_add_cpu(&sbp->sb_rbmblocks, tp->t_rbmblocks_delta); 443 whole = 1; 444 } 445 if (tp->t_rblocks_delta) { 446 be64_add_cpu(&sbp->sb_rblocks, tp->t_rblocks_delta); 447 whole = 1; 448 } 449 if (tp->t_rextents_delta) { 450 be64_add_cpu(&sbp->sb_rextents, tp->t_rextents_delta); 451 whole = 1; 452 } 453 if (tp->t_rextslog_delta) { 454 sbp->sb_rextslog += tp->t_rextslog_delta; 455 whole = 1; 456 } 457 458 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF); 459 if (whole) 460 /* 461 * Log the whole thing, the fields are noncontiguous. 462 */ 463 xfs_trans_log_buf(tp, bp, 0, sizeof(xfs_dsb_t) - 1); 464 else 465 /* 466 * Since all the modifiable fields are contiguous, we 467 * can get away with this. 468 */ 469 xfs_trans_log_buf(tp, bp, offsetof(xfs_dsb_t, sb_icount), 470 offsetof(xfs_dsb_t, sb_frextents) + 471 sizeof(sbp->sb_frextents) - 1); 472 } 473 474 STATIC int 475 xfs_sb_mod8( 476 uint8_t *field, 477 int8_t delta) 478 { 479 int8_t counter = *field; 480 481 counter += delta; 482 if (counter < 0) { 483 ASSERT(0); 484 return -EINVAL; 485 } 486 *field = counter; 487 return 0; 488 } 489 490 STATIC int 491 xfs_sb_mod32( 492 uint32_t *field, 493 int32_t delta) 494 { 495 int32_t counter = *field; 496 497 counter += delta; 498 if (counter < 0) { 499 ASSERT(0); 500 return -EINVAL; 501 } 502 *field = counter; 503 return 0; 504 } 505 506 STATIC int 507 xfs_sb_mod64( 508 uint64_t *field, 509 int64_t delta) 510 { 511 int64_t counter = *field; 512 513 counter += delta; 514 if (counter < 0) { 515 ASSERT(0); 516 return -EINVAL; 517 } 518 *field = counter; 519 return 0; 520 } 521 522 /* 523 * xfs_trans_unreserve_and_mod_sb() is called to release unused reservations 524 * and apply superblock counter changes to the in-core superblock. The 525 * t_res_fdblocks_delta and t_res_frextents_delta fields are explicitly NOT 526 * applied to the in-core superblock. The idea is that that has already been 527 * done. 528 * 529 * If we are not logging superblock counters, then the inode allocated/free and 530 * used block counts are not updated in the on disk superblock. In this case, 531 * XFS_TRANS_SB_DIRTY will not be set when the transaction is updated but we 532 * still need to update the incore superblock with the changes. 533 */ 534 void 535 xfs_trans_unreserve_and_mod_sb( 536 struct xfs_trans *tp) 537 { 538 struct xfs_mount *mp = tp->t_mountp; 539 bool rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0; 540 int64_t blkdelta = 0; 541 int64_t rtxdelta = 0; 542 int64_t idelta = 0; 543 int64_t ifreedelta = 0; 544 int error; 545 546 /* calculate deltas */ 547 if (tp->t_blk_res > 0) 548 blkdelta = tp->t_blk_res; 549 if ((tp->t_fdblocks_delta != 0) && 550 (xfs_sb_version_haslazysbcount(&mp->m_sb) || 551 (tp->t_flags & XFS_TRANS_SB_DIRTY))) 552 blkdelta += tp->t_fdblocks_delta; 553 554 if (tp->t_rtx_res > 0) 555 rtxdelta = tp->t_rtx_res; 556 if ((tp->t_frextents_delta != 0) && 557 (tp->t_flags & XFS_TRANS_SB_DIRTY)) 558 rtxdelta += tp->t_frextents_delta; 559 560 if (xfs_sb_version_haslazysbcount(&mp->m_sb) || 561 (tp->t_flags & XFS_TRANS_SB_DIRTY)) { 562 idelta = tp->t_icount_delta; 563 ifreedelta = tp->t_ifree_delta; 564 } 565 566 /* apply the per-cpu counters */ 567 if (blkdelta) { 568 error = xfs_mod_fdblocks(mp, blkdelta, rsvd); 569 if (error) 570 goto out; 571 } 572 573 if (idelta) { 574 error = xfs_mod_icount(mp, idelta); 575 if (error) 576 goto out_undo_fdblocks; 577 } 578 579 if (ifreedelta) { 580 error = xfs_mod_ifree(mp, ifreedelta); 581 if (error) 582 goto out_undo_icount; 583 } 584 585 if (rtxdelta == 0 && !(tp->t_flags & XFS_TRANS_SB_DIRTY)) 586 return; 587 588 /* apply remaining deltas */ 589 spin_lock(&mp->m_sb_lock); 590 if (rtxdelta) { 591 error = xfs_sb_mod64(&mp->m_sb.sb_frextents, rtxdelta); 592 if (error) 593 goto out_undo_ifree; 594 } 595 596 if (tp->t_dblocks_delta != 0) { 597 error = xfs_sb_mod64(&mp->m_sb.sb_dblocks, tp->t_dblocks_delta); 598 if (error) 599 goto out_undo_frextents; 600 } 601 if (tp->t_agcount_delta != 0) { 602 error = xfs_sb_mod32(&mp->m_sb.sb_agcount, tp->t_agcount_delta); 603 if (error) 604 goto out_undo_dblocks; 605 } 606 if (tp->t_imaxpct_delta != 0) { 607 error = xfs_sb_mod8(&mp->m_sb.sb_imax_pct, tp->t_imaxpct_delta); 608 if (error) 609 goto out_undo_agcount; 610 } 611 if (tp->t_rextsize_delta != 0) { 612 error = xfs_sb_mod32(&mp->m_sb.sb_rextsize, 613 tp->t_rextsize_delta); 614 if (error) 615 goto out_undo_imaxpct; 616 } 617 if (tp->t_rbmblocks_delta != 0) { 618 error = xfs_sb_mod32(&mp->m_sb.sb_rbmblocks, 619 tp->t_rbmblocks_delta); 620 if (error) 621 goto out_undo_rextsize; 622 } 623 if (tp->t_rblocks_delta != 0) { 624 error = xfs_sb_mod64(&mp->m_sb.sb_rblocks, tp->t_rblocks_delta); 625 if (error) 626 goto out_undo_rbmblocks; 627 } 628 if (tp->t_rextents_delta != 0) { 629 error = xfs_sb_mod64(&mp->m_sb.sb_rextents, 630 tp->t_rextents_delta); 631 if (error) 632 goto out_undo_rblocks; 633 } 634 if (tp->t_rextslog_delta != 0) { 635 error = xfs_sb_mod8(&mp->m_sb.sb_rextslog, 636 tp->t_rextslog_delta); 637 if (error) 638 goto out_undo_rextents; 639 } 640 spin_unlock(&mp->m_sb_lock); 641 return; 642 643 out_undo_rextents: 644 if (tp->t_rextents_delta) 645 xfs_sb_mod64(&mp->m_sb.sb_rextents, -tp->t_rextents_delta); 646 out_undo_rblocks: 647 if (tp->t_rblocks_delta) 648 xfs_sb_mod64(&mp->m_sb.sb_rblocks, -tp->t_rblocks_delta); 649 out_undo_rbmblocks: 650 if (tp->t_rbmblocks_delta) 651 xfs_sb_mod32(&mp->m_sb.sb_rbmblocks, -tp->t_rbmblocks_delta); 652 out_undo_rextsize: 653 if (tp->t_rextsize_delta) 654 xfs_sb_mod32(&mp->m_sb.sb_rextsize, -tp->t_rextsize_delta); 655 out_undo_imaxpct: 656 if (tp->t_rextsize_delta) 657 xfs_sb_mod8(&mp->m_sb.sb_imax_pct, -tp->t_imaxpct_delta); 658 out_undo_agcount: 659 if (tp->t_agcount_delta) 660 xfs_sb_mod32(&mp->m_sb.sb_agcount, -tp->t_agcount_delta); 661 out_undo_dblocks: 662 if (tp->t_dblocks_delta) 663 xfs_sb_mod64(&mp->m_sb.sb_dblocks, -tp->t_dblocks_delta); 664 out_undo_frextents: 665 if (rtxdelta) 666 xfs_sb_mod64(&mp->m_sb.sb_frextents, -rtxdelta); 667 out_undo_ifree: 668 spin_unlock(&mp->m_sb_lock); 669 if (ifreedelta) 670 xfs_mod_ifree(mp, -ifreedelta); 671 out_undo_icount: 672 if (idelta) 673 xfs_mod_icount(mp, -idelta); 674 out_undo_fdblocks: 675 if (blkdelta) 676 xfs_mod_fdblocks(mp, -blkdelta, rsvd); 677 out: 678 ASSERT(error == 0); 679 return; 680 } 681 682 /* 683 * Add the given log item to the transaction's list of log items. 684 * 685 * The log item will now point to its new descriptor with its li_desc field. 686 */ 687 void 688 xfs_trans_add_item( 689 struct xfs_trans *tp, 690 struct xfs_log_item *lip) 691 { 692 struct xfs_log_item_desc *lidp; 693 694 ASSERT(lip->li_mountp == tp->t_mountp); 695 ASSERT(lip->li_ailp == tp->t_mountp->m_ail); 696 697 lidp = kmem_zone_zalloc(xfs_log_item_desc_zone, KM_SLEEP | KM_NOFS); 698 699 lidp->lid_item = lip; 700 lidp->lid_flags = 0; 701 list_add_tail(&lidp->lid_trans, &tp->t_items); 702 703 lip->li_desc = lidp; 704 } 705 706 STATIC void 707 xfs_trans_free_item_desc( 708 struct xfs_log_item_desc *lidp) 709 { 710 list_del_init(&lidp->lid_trans); 711 kmem_zone_free(xfs_log_item_desc_zone, lidp); 712 } 713 714 /* 715 * Unlink and free the given descriptor. 716 */ 717 void 718 xfs_trans_del_item( 719 struct xfs_log_item *lip) 720 { 721 xfs_trans_free_item_desc(lip->li_desc); 722 lip->li_desc = NULL; 723 } 724 725 /* 726 * Unlock all of the items of a transaction and free all the descriptors 727 * of that transaction. 728 */ 729 void 730 xfs_trans_free_items( 731 struct xfs_trans *tp, 732 xfs_lsn_t commit_lsn, 733 bool abort) 734 { 735 struct xfs_log_item_desc *lidp, *next; 736 737 list_for_each_entry_safe(lidp, next, &tp->t_items, lid_trans) { 738 struct xfs_log_item *lip = lidp->lid_item; 739 740 lip->li_desc = NULL; 741 742 if (commit_lsn != NULLCOMMITLSN) 743 lip->li_ops->iop_committing(lip, commit_lsn); 744 if (abort) 745 lip->li_flags |= XFS_LI_ABORTED; 746 lip->li_ops->iop_unlock(lip); 747 748 xfs_trans_free_item_desc(lidp); 749 } 750 } 751 752 static inline void 753 xfs_log_item_batch_insert( 754 struct xfs_ail *ailp, 755 struct xfs_ail_cursor *cur, 756 struct xfs_log_item **log_items, 757 int nr_items, 758 xfs_lsn_t commit_lsn) 759 { 760 int i; 761 762 spin_lock(&ailp->xa_lock); 763 /* xfs_trans_ail_update_bulk drops ailp->xa_lock */ 764 xfs_trans_ail_update_bulk(ailp, cur, log_items, nr_items, commit_lsn); 765 766 for (i = 0; i < nr_items; i++) { 767 struct xfs_log_item *lip = log_items[i]; 768 769 lip->li_ops->iop_unpin(lip, 0); 770 } 771 } 772 773 /* 774 * Bulk operation version of xfs_trans_committed that takes a log vector of 775 * items to insert into the AIL. This uses bulk AIL insertion techniques to 776 * minimise lock traffic. 777 * 778 * If we are called with the aborted flag set, it is because a log write during 779 * a CIL checkpoint commit has failed. In this case, all the items in the 780 * checkpoint have already gone through iop_commited and iop_unlock, which 781 * means that checkpoint commit abort handling is treated exactly the same 782 * as an iclog write error even though we haven't started any IO yet. Hence in 783 * this case all we need to do is iop_committed processing, followed by an 784 * iop_unpin(aborted) call. 785 * 786 * The AIL cursor is used to optimise the insert process. If commit_lsn is not 787 * at the end of the AIL, the insert cursor avoids the need to walk 788 * the AIL to find the insertion point on every xfs_log_item_batch_insert() 789 * call. This saves a lot of needless list walking and is a net win, even 790 * though it slightly increases that amount of AIL lock traffic to set it up 791 * and tear it down. 792 */ 793 void 794 xfs_trans_committed_bulk( 795 struct xfs_ail *ailp, 796 struct xfs_log_vec *log_vector, 797 xfs_lsn_t commit_lsn, 798 int aborted) 799 { 800 #define LOG_ITEM_BATCH_SIZE 32 801 struct xfs_log_item *log_items[LOG_ITEM_BATCH_SIZE]; 802 struct xfs_log_vec *lv; 803 struct xfs_ail_cursor cur; 804 int i = 0; 805 806 spin_lock(&ailp->xa_lock); 807 xfs_trans_ail_cursor_last(ailp, &cur, commit_lsn); 808 spin_unlock(&ailp->xa_lock); 809 810 /* unpin all the log items */ 811 for (lv = log_vector; lv; lv = lv->lv_next ) { 812 struct xfs_log_item *lip = lv->lv_item; 813 xfs_lsn_t item_lsn; 814 815 if (aborted) 816 lip->li_flags |= XFS_LI_ABORTED; 817 item_lsn = lip->li_ops->iop_committed(lip, commit_lsn); 818 819 /* item_lsn of -1 means the item needs no further processing */ 820 if (XFS_LSN_CMP(item_lsn, (xfs_lsn_t)-1) == 0) 821 continue; 822 823 /* 824 * if we are aborting the operation, no point in inserting the 825 * object into the AIL as we are in a shutdown situation. 826 */ 827 if (aborted) { 828 ASSERT(XFS_FORCED_SHUTDOWN(ailp->xa_mount)); 829 lip->li_ops->iop_unpin(lip, 1); 830 continue; 831 } 832 833 if (item_lsn != commit_lsn) { 834 835 /* 836 * Not a bulk update option due to unusual item_lsn. 837 * Push into AIL immediately, rechecking the lsn once 838 * we have the ail lock. Then unpin the item. This does 839 * not affect the AIL cursor the bulk insert path is 840 * using. 841 */ 842 spin_lock(&ailp->xa_lock); 843 if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0) 844 xfs_trans_ail_update(ailp, lip, item_lsn); 845 else 846 spin_unlock(&ailp->xa_lock); 847 lip->li_ops->iop_unpin(lip, 0); 848 continue; 849 } 850 851 /* Item is a candidate for bulk AIL insert. */ 852 log_items[i++] = lv->lv_item; 853 if (i >= LOG_ITEM_BATCH_SIZE) { 854 xfs_log_item_batch_insert(ailp, &cur, log_items, 855 LOG_ITEM_BATCH_SIZE, commit_lsn); 856 i = 0; 857 } 858 } 859 860 /* make sure we insert the remainder! */ 861 if (i) 862 xfs_log_item_batch_insert(ailp, &cur, log_items, i, commit_lsn); 863 864 spin_lock(&ailp->xa_lock); 865 xfs_trans_ail_cursor_done(&cur); 866 spin_unlock(&ailp->xa_lock); 867 } 868 869 /* 870 * Commit the given transaction to the log. 871 * 872 * XFS disk error handling mechanism is not based on a typical 873 * transaction abort mechanism. Logically after the filesystem 874 * gets marked 'SHUTDOWN', we can't let any new transactions 875 * be durable - ie. committed to disk - because some metadata might 876 * be inconsistent. In such cases, this returns an error, and the 877 * caller may assume that all locked objects joined to the transaction 878 * have already been unlocked as if the commit had succeeded. 879 * Do not reference the transaction structure after this call. 880 */ 881 static int 882 __xfs_trans_commit( 883 struct xfs_trans *tp, 884 bool regrant) 885 { 886 struct xfs_mount *mp = tp->t_mountp; 887 xfs_lsn_t commit_lsn = -1; 888 int error = 0; 889 int sync = tp->t_flags & XFS_TRANS_SYNC; 890 891 /* 892 * If there is nothing to be logged by the transaction, 893 * then unlock all of the items associated with the 894 * transaction and free the transaction structure. 895 * Also make sure to return any reserved blocks to 896 * the free pool. 897 */ 898 if (!(tp->t_flags & XFS_TRANS_DIRTY)) 899 goto out_unreserve; 900 901 if (XFS_FORCED_SHUTDOWN(mp)) { 902 error = -EIO; 903 goto out_unreserve; 904 } 905 906 ASSERT(tp->t_ticket != NULL); 907 908 /* 909 * If we need to update the superblock, then do it now. 910 */ 911 if (tp->t_flags & XFS_TRANS_SB_DIRTY) 912 xfs_trans_apply_sb_deltas(tp); 913 xfs_trans_apply_dquot_deltas(tp); 914 915 xfs_log_commit_cil(mp, tp, &commit_lsn, regrant); 916 917 current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS); 918 xfs_trans_free(tp); 919 920 /* 921 * If the transaction needs to be synchronous, then force the 922 * log out now and wait for it. 923 */ 924 if (sync) { 925 error = _xfs_log_force_lsn(mp, commit_lsn, XFS_LOG_SYNC, NULL); 926 XFS_STATS_INC(mp, xs_trans_sync); 927 } else { 928 XFS_STATS_INC(mp, xs_trans_async); 929 } 930 931 return error; 932 933 out_unreserve: 934 xfs_trans_unreserve_and_mod_sb(tp); 935 936 /* 937 * It is indeed possible for the transaction to be not dirty but 938 * the dqinfo portion to be. All that means is that we have some 939 * (non-persistent) quota reservations that need to be unreserved. 940 */ 941 xfs_trans_unreserve_and_mod_dquots(tp); 942 if (tp->t_ticket) { 943 commit_lsn = xfs_log_done(mp, tp->t_ticket, NULL, regrant); 944 if (commit_lsn == -1 && !error) 945 error = -EIO; 946 } 947 current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS); 948 xfs_trans_free_items(tp, NULLCOMMITLSN, !!error); 949 xfs_trans_free(tp); 950 951 XFS_STATS_INC(mp, xs_trans_empty); 952 return error; 953 } 954 955 int 956 xfs_trans_commit( 957 struct xfs_trans *tp) 958 { 959 return __xfs_trans_commit(tp, false); 960 } 961 962 /* 963 * Unlock all of the transaction's items and free the transaction. 964 * The transaction must not have modified any of its items, because 965 * there is no way to restore them to their previous state. 966 * 967 * If the transaction has made a log reservation, make sure to release 968 * it as well. 969 */ 970 void 971 xfs_trans_cancel( 972 struct xfs_trans *tp) 973 { 974 struct xfs_mount *mp = tp->t_mountp; 975 bool dirty = (tp->t_flags & XFS_TRANS_DIRTY); 976 977 /* 978 * See if the caller is relying on us to shut down the 979 * filesystem. This happens in paths where we detect 980 * corruption and decide to give up. 981 */ 982 if (dirty && !XFS_FORCED_SHUTDOWN(mp)) { 983 XFS_ERROR_REPORT("xfs_trans_cancel", XFS_ERRLEVEL_LOW, mp); 984 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 985 } 986 #ifdef DEBUG 987 if (!dirty && !XFS_FORCED_SHUTDOWN(mp)) { 988 struct xfs_log_item_desc *lidp; 989 990 list_for_each_entry(lidp, &tp->t_items, lid_trans) 991 ASSERT(!(lidp->lid_item->li_type == XFS_LI_EFD)); 992 } 993 #endif 994 xfs_trans_unreserve_and_mod_sb(tp); 995 xfs_trans_unreserve_and_mod_dquots(tp); 996 997 if (tp->t_ticket) 998 xfs_log_done(mp, tp->t_ticket, NULL, false); 999 1000 /* mark this thread as no longer being in a transaction */ 1001 current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS); 1002 1003 xfs_trans_free_items(tp, NULLCOMMITLSN, dirty); 1004 xfs_trans_free(tp); 1005 } 1006 1007 /* 1008 * Roll from one trans in the sequence of PERMANENT transactions to 1009 * the next: permanent transactions are only flushed out when 1010 * committed with xfs_trans_commit(), but we still want as soon 1011 * as possible to let chunks of it go to the log. So we commit the 1012 * chunk we've been working on and get a new transaction to continue. 1013 */ 1014 int 1015 __xfs_trans_roll( 1016 struct xfs_trans **tpp, 1017 struct xfs_inode *dp, 1018 int *committed) 1019 { 1020 struct xfs_trans *trans; 1021 struct xfs_trans_res tres; 1022 int error; 1023 1024 *committed = 0; 1025 1026 /* 1027 * Ensure that the inode is always logged. 1028 */ 1029 trans = *tpp; 1030 if (dp) 1031 xfs_trans_log_inode(trans, dp, XFS_ILOG_CORE); 1032 1033 /* 1034 * Copy the critical parameters from one trans to the next. 1035 */ 1036 tres.tr_logres = trans->t_log_res; 1037 tres.tr_logcount = trans->t_log_count; 1038 *tpp = xfs_trans_dup(trans); 1039 1040 /* 1041 * Commit the current transaction. 1042 * If this commit failed, then it'd just unlock those items that 1043 * are not marked ihold. That also means that a filesystem shutdown 1044 * is in progress. The caller takes the responsibility to cancel 1045 * the duplicate transaction that gets returned. 1046 */ 1047 error = __xfs_trans_commit(trans, true); 1048 if (error) 1049 return error; 1050 1051 *committed = 1; 1052 trans = *tpp; 1053 1054 /* 1055 * Reserve space in the log for th next transaction. 1056 * This also pushes items in the "AIL", the list of logged items, 1057 * out to disk if they are taking up space at the tail of the log 1058 * that we want to use. This requires that either nothing be locked 1059 * across this call, or that anything that is locked be logged in 1060 * the prior and the next transactions. 1061 */ 1062 tres.tr_logflags = XFS_TRANS_PERM_LOG_RES; 1063 error = xfs_trans_reserve(trans, &tres, 0, 0); 1064 /* 1065 * Ensure that the inode is in the new transaction and locked. 1066 */ 1067 if (error) 1068 return error; 1069 1070 if (dp) 1071 xfs_trans_ijoin(trans, dp, 0); 1072 return 0; 1073 } 1074 1075 int 1076 xfs_trans_roll( 1077 struct xfs_trans **tpp, 1078 struct xfs_inode *dp) 1079 { 1080 int committed; 1081 return __xfs_trans_roll(tpp, dp, &committed); 1082 } 1083