1 /* 2 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc. 3 * Copyright (C) 2010 Red Hat, Inc. 4 * All Rights Reserved. 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License as 8 * published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it would be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 18 */ 19 #include "xfs.h" 20 #include "xfs_fs.h" 21 #include "xfs_shared.h" 22 #include "xfs_format.h" 23 #include "xfs_log_format.h" 24 #include "xfs_trans_resv.h" 25 #include "xfs_mount.h" 26 #include "xfs_inode.h" 27 #include "xfs_extent_busy.h" 28 #include "xfs_quota.h" 29 #include "xfs_trans.h" 30 #include "xfs_trans_priv.h" 31 #include "xfs_log.h" 32 #include "xfs_trace.h" 33 #include "xfs_error.h" 34 35 kmem_zone_t *xfs_trans_zone; 36 kmem_zone_t *xfs_log_item_desc_zone; 37 38 /* 39 * Initialize the precomputed transaction reservation values 40 * in the mount structure. 41 */ 42 void 43 xfs_trans_init( 44 struct xfs_mount *mp) 45 { 46 xfs_trans_resv_calc(mp, M_RES(mp)); 47 } 48 49 /* 50 * This routine is called to allocate a transaction structure. 51 * The type parameter indicates the type of the transaction. These 52 * are enumerated in xfs_trans.h. 53 * 54 * Dynamically allocate the transaction structure from the transaction 55 * zone, initialize it, and return it to the caller. 56 */ 57 xfs_trans_t * 58 xfs_trans_alloc( 59 xfs_mount_t *mp, 60 uint type) 61 { 62 xfs_trans_t *tp; 63 64 sb_start_intwrite(mp->m_super); 65 tp = _xfs_trans_alloc(mp, type, KM_SLEEP); 66 tp->t_flags |= XFS_TRANS_FREEZE_PROT; 67 return tp; 68 } 69 70 xfs_trans_t * 71 _xfs_trans_alloc( 72 xfs_mount_t *mp, 73 uint type, 74 xfs_km_flags_t memflags) 75 { 76 xfs_trans_t *tp; 77 78 WARN_ON(mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE); 79 atomic_inc(&mp->m_active_trans); 80 81 tp = kmem_zone_zalloc(xfs_trans_zone, memflags); 82 tp->t_magic = XFS_TRANS_HEADER_MAGIC; 83 tp->t_type = type; 84 tp->t_mountp = mp; 85 INIT_LIST_HEAD(&tp->t_items); 86 INIT_LIST_HEAD(&tp->t_busy); 87 return tp; 88 } 89 90 /* 91 * Free the transaction structure. If there is more clean up 92 * to do when the structure is freed, add it here. 93 */ 94 STATIC void 95 xfs_trans_free( 96 struct xfs_trans *tp) 97 { 98 xfs_extent_busy_sort(&tp->t_busy); 99 xfs_extent_busy_clear(tp->t_mountp, &tp->t_busy, false); 100 101 atomic_dec(&tp->t_mountp->m_active_trans); 102 if (tp->t_flags & XFS_TRANS_FREEZE_PROT) 103 sb_end_intwrite(tp->t_mountp->m_super); 104 xfs_trans_free_dqinfo(tp); 105 kmem_zone_free(xfs_trans_zone, tp); 106 } 107 108 /* 109 * This is called to create a new transaction which will share the 110 * permanent log reservation of the given transaction. The remaining 111 * unused block and rt extent reservations are also inherited. This 112 * implies that the original transaction is no longer allowed to allocate 113 * blocks. Locks and log items, however, are no inherited. They must 114 * be added to the new transaction explicitly. 115 */ 116 STATIC xfs_trans_t * 117 xfs_trans_dup( 118 xfs_trans_t *tp) 119 { 120 xfs_trans_t *ntp; 121 122 ntp = kmem_zone_zalloc(xfs_trans_zone, KM_SLEEP); 123 124 /* 125 * Initialize the new transaction structure. 126 */ 127 ntp->t_magic = XFS_TRANS_HEADER_MAGIC; 128 ntp->t_type = tp->t_type; 129 ntp->t_mountp = tp->t_mountp; 130 INIT_LIST_HEAD(&ntp->t_items); 131 INIT_LIST_HEAD(&ntp->t_busy); 132 133 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES); 134 ASSERT(tp->t_ticket != NULL); 135 136 ntp->t_flags = XFS_TRANS_PERM_LOG_RES | 137 (tp->t_flags & XFS_TRANS_RESERVE) | 138 (tp->t_flags & XFS_TRANS_FREEZE_PROT); 139 /* We gave our writer reference to the new transaction */ 140 tp->t_flags &= ~XFS_TRANS_FREEZE_PROT; 141 ntp->t_ticket = xfs_log_ticket_get(tp->t_ticket); 142 ntp->t_blk_res = tp->t_blk_res - tp->t_blk_res_used; 143 tp->t_blk_res = tp->t_blk_res_used; 144 ntp->t_rtx_res = tp->t_rtx_res - tp->t_rtx_res_used; 145 tp->t_rtx_res = tp->t_rtx_res_used; 146 ntp->t_pflags = tp->t_pflags; 147 148 xfs_trans_dup_dqinfo(tp, ntp); 149 150 atomic_inc(&tp->t_mountp->m_active_trans); 151 return ntp; 152 } 153 154 /* 155 * This is called to reserve free disk blocks and log space for the 156 * given transaction. This must be done before allocating any resources 157 * within the transaction. 158 * 159 * This will return ENOSPC if there are not enough blocks available. 160 * It will sleep waiting for available log space. 161 * The only valid value for the flags parameter is XFS_RES_LOG_PERM, which 162 * is used by long running transactions. If any one of the reservations 163 * fails then they will all be backed out. 164 * 165 * This does not do quota reservations. That typically is done by the 166 * caller afterwards. 167 */ 168 int 169 xfs_trans_reserve( 170 struct xfs_trans *tp, 171 struct xfs_trans_res *resp, 172 uint blocks, 173 uint rtextents) 174 { 175 int error = 0; 176 bool rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0; 177 178 /* Mark this thread as being in a transaction */ 179 current_set_flags_nested(&tp->t_pflags, PF_FSTRANS); 180 181 /* 182 * Attempt to reserve the needed disk blocks by decrementing 183 * the number needed from the number available. This will 184 * fail if the count would go below zero. 185 */ 186 if (blocks > 0) { 187 error = xfs_mod_fdblocks(tp->t_mountp, -((int64_t)blocks), rsvd); 188 if (error != 0) { 189 current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS); 190 return -ENOSPC; 191 } 192 tp->t_blk_res += blocks; 193 } 194 195 /* 196 * Reserve the log space needed for this transaction. 197 */ 198 if (resp->tr_logres > 0) { 199 bool permanent = false; 200 201 ASSERT(tp->t_log_res == 0 || 202 tp->t_log_res == resp->tr_logres); 203 ASSERT(tp->t_log_count == 0 || 204 tp->t_log_count == resp->tr_logcount); 205 206 if (resp->tr_logflags & XFS_TRANS_PERM_LOG_RES) { 207 tp->t_flags |= XFS_TRANS_PERM_LOG_RES; 208 permanent = true; 209 } else { 210 ASSERT(tp->t_ticket == NULL); 211 ASSERT(!(tp->t_flags & XFS_TRANS_PERM_LOG_RES)); 212 } 213 214 if (tp->t_ticket != NULL) { 215 ASSERT(resp->tr_logflags & XFS_TRANS_PERM_LOG_RES); 216 error = xfs_log_regrant(tp->t_mountp, tp->t_ticket); 217 } else { 218 error = xfs_log_reserve(tp->t_mountp, 219 resp->tr_logres, 220 resp->tr_logcount, 221 &tp->t_ticket, XFS_TRANSACTION, 222 permanent, tp->t_type); 223 } 224 225 if (error) 226 goto undo_blocks; 227 228 tp->t_log_res = resp->tr_logres; 229 tp->t_log_count = resp->tr_logcount; 230 } 231 232 /* 233 * Attempt to reserve the needed realtime extents by decrementing 234 * the number needed from the number available. This will 235 * fail if the count would go below zero. 236 */ 237 if (rtextents > 0) { 238 error = xfs_mod_frextents(tp->t_mountp, -((int64_t)rtextents)); 239 if (error) { 240 error = -ENOSPC; 241 goto undo_log; 242 } 243 tp->t_rtx_res += rtextents; 244 } 245 246 return 0; 247 248 /* 249 * Error cases jump to one of these labels to undo any 250 * reservations which have already been performed. 251 */ 252 undo_log: 253 if (resp->tr_logres > 0) { 254 xfs_log_done(tp->t_mountp, tp->t_ticket, NULL, false); 255 tp->t_ticket = NULL; 256 tp->t_log_res = 0; 257 tp->t_flags &= ~XFS_TRANS_PERM_LOG_RES; 258 } 259 260 undo_blocks: 261 if (blocks > 0) { 262 xfs_mod_fdblocks(tp->t_mountp, -((int64_t)blocks), rsvd); 263 tp->t_blk_res = 0; 264 } 265 266 current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS); 267 268 return error; 269 } 270 271 /* 272 * Record the indicated change to the given field for application 273 * to the file system's superblock when the transaction commits. 274 * For now, just store the change in the transaction structure. 275 * 276 * Mark the transaction structure to indicate that the superblock 277 * needs to be updated before committing. 278 * 279 * Because we may not be keeping track of allocated/free inodes and 280 * used filesystem blocks in the superblock, we do not mark the 281 * superblock dirty in this transaction if we modify these fields. 282 * We still need to update the transaction deltas so that they get 283 * applied to the incore superblock, but we don't want them to 284 * cause the superblock to get locked and logged if these are the 285 * only fields in the superblock that the transaction modifies. 286 */ 287 void 288 xfs_trans_mod_sb( 289 xfs_trans_t *tp, 290 uint field, 291 int64_t delta) 292 { 293 uint32_t flags = (XFS_TRANS_DIRTY|XFS_TRANS_SB_DIRTY); 294 xfs_mount_t *mp = tp->t_mountp; 295 296 switch (field) { 297 case XFS_TRANS_SB_ICOUNT: 298 tp->t_icount_delta += delta; 299 if (xfs_sb_version_haslazysbcount(&mp->m_sb)) 300 flags &= ~XFS_TRANS_SB_DIRTY; 301 break; 302 case XFS_TRANS_SB_IFREE: 303 tp->t_ifree_delta += delta; 304 if (xfs_sb_version_haslazysbcount(&mp->m_sb)) 305 flags &= ~XFS_TRANS_SB_DIRTY; 306 break; 307 case XFS_TRANS_SB_FDBLOCKS: 308 /* 309 * Track the number of blocks allocated in the 310 * transaction. Make sure it does not exceed the 311 * number reserved. 312 */ 313 if (delta < 0) { 314 tp->t_blk_res_used += (uint)-delta; 315 ASSERT(tp->t_blk_res_used <= tp->t_blk_res); 316 } 317 tp->t_fdblocks_delta += delta; 318 if (xfs_sb_version_haslazysbcount(&mp->m_sb)) 319 flags &= ~XFS_TRANS_SB_DIRTY; 320 break; 321 case XFS_TRANS_SB_RES_FDBLOCKS: 322 /* 323 * The allocation has already been applied to the 324 * in-core superblock's counter. This should only 325 * be applied to the on-disk superblock. 326 */ 327 ASSERT(delta < 0); 328 tp->t_res_fdblocks_delta += delta; 329 if (xfs_sb_version_haslazysbcount(&mp->m_sb)) 330 flags &= ~XFS_TRANS_SB_DIRTY; 331 break; 332 case XFS_TRANS_SB_FREXTENTS: 333 /* 334 * Track the number of blocks allocated in the 335 * transaction. Make sure it does not exceed the 336 * number reserved. 337 */ 338 if (delta < 0) { 339 tp->t_rtx_res_used += (uint)-delta; 340 ASSERT(tp->t_rtx_res_used <= tp->t_rtx_res); 341 } 342 tp->t_frextents_delta += delta; 343 break; 344 case XFS_TRANS_SB_RES_FREXTENTS: 345 /* 346 * The allocation has already been applied to the 347 * in-core superblock's counter. This should only 348 * be applied to the on-disk superblock. 349 */ 350 ASSERT(delta < 0); 351 tp->t_res_frextents_delta += delta; 352 break; 353 case XFS_TRANS_SB_DBLOCKS: 354 ASSERT(delta > 0); 355 tp->t_dblocks_delta += delta; 356 break; 357 case XFS_TRANS_SB_AGCOUNT: 358 ASSERT(delta > 0); 359 tp->t_agcount_delta += delta; 360 break; 361 case XFS_TRANS_SB_IMAXPCT: 362 tp->t_imaxpct_delta += delta; 363 break; 364 case XFS_TRANS_SB_REXTSIZE: 365 tp->t_rextsize_delta += delta; 366 break; 367 case XFS_TRANS_SB_RBMBLOCKS: 368 tp->t_rbmblocks_delta += delta; 369 break; 370 case XFS_TRANS_SB_RBLOCKS: 371 tp->t_rblocks_delta += delta; 372 break; 373 case XFS_TRANS_SB_REXTENTS: 374 tp->t_rextents_delta += delta; 375 break; 376 case XFS_TRANS_SB_REXTSLOG: 377 tp->t_rextslog_delta += delta; 378 break; 379 default: 380 ASSERT(0); 381 return; 382 } 383 384 tp->t_flags |= flags; 385 } 386 387 /* 388 * xfs_trans_apply_sb_deltas() is called from the commit code 389 * to bring the superblock buffer into the current transaction 390 * and modify it as requested by earlier calls to xfs_trans_mod_sb(). 391 * 392 * For now we just look at each field allowed to change and change 393 * it if necessary. 394 */ 395 STATIC void 396 xfs_trans_apply_sb_deltas( 397 xfs_trans_t *tp) 398 { 399 xfs_dsb_t *sbp; 400 xfs_buf_t *bp; 401 int whole = 0; 402 403 bp = xfs_trans_getsb(tp, tp->t_mountp, 0); 404 sbp = XFS_BUF_TO_SBP(bp); 405 406 /* 407 * Check that superblock mods match the mods made to AGF counters. 408 */ 409 ASSERT((tp->t_fdblocks_delta + tp->t_res_fdblocks_delta) == 410 (tp->t_ag_freeblks_delta + tp->t_ag_flist_delta + 411 tp->t_ag_btree_delta)); 412 413 /* 414 * Only update the superblock counters if we are logging them 415 */ 416 if (!xfs_sb_version_haslazysbcount(&(tp->t_mountp->m_sb))) { 417 if (tp->t_icount_delta) 418 be64_add_cpu(&sbp->sb_icount, tp->t_icount_delta); 419 if (tp->t_ifree_delta) 420 be64_add_cpu(&sbp->sb_ifree, tp->t_ifree_delta); 421 if (tp->t_fdblocks_delta) 422 be64_add_cpu(&sbp->sb_fdblocks, tp->t_fdblocks_delta); 423 if (tp->t_res_fdblocks_delta) 424 be64_add_cpu(&sbp->sb_fdblocks, tp->t_res_fdblocks_delta); 425 } 426 427 if (tp->t_frextents_delta) 428 be64_add_cpu(&sbp->sb_frextents, tp->t_frextents_delta); 429 if (tp->t_res_frextents_delta) 430 be64_add_cpu(&sbp->sb_frextents, tp->t_res_frextents_delta); 431 432 if (tp->t_dblocks_delta) { 433 be64_add_cpu(&sbp->sb_dblocks, tp->t_dblocks_delta); 434 whole = 1; 435 } 436 if (tp->t_agcount_delta) { 437 be32_add_cpu(&sbp->sb_agcount, tp->t_agcount_delta); 438 whole = 1; 439 } 440 if (tp->t_imaxpct_delta) { 441 sbp->sb_imax_pct += tp->t_imaxpct_delta; 442 whole = 1; 443 } 444 if (tp->t_rextsize_delta) { 445 be32_add_cpu(&sbp->sb_rextsize, tp->t_rextsize_delta); 446 whole = 1; 447 } 448 if (tp->t_rbmblocks_delta) { 449 be32_add_cpu(&sbp->sb_rbmblocks, tp->t_rbmblocks_delta); 450 whole = 1; 451 } 452 if (tp->t_rblocks_delta) { 453 be64_add_cpu(&sbp->sb_rblocks, tp->t_rblocks_delta); 454 whole = 1; 455 } 456 if (tp->t_rextents_delta) { 457 be64_add_cpu(&sbp->sb_rextents, tp->t_rextents_delta); 458 whole = 1; 459 } 460 if (tp->t_rextslog_delta) { 461 sbp->sb_rextslog += tp->t_rextslog_delta; 462 whole = 1; 463 } 464 465 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF); 466 if (whole) 467 /* 468 * Log the whole thing, the fields are noncontiguous. 469 */ 470 xfs_trans_log_buf(tp, bp, 0, sizeof(xfs_dsb_t) - 1); 471 else 472 /* 473 * Since all the modifiable fields are contiguous, we 474 * can get away with this. 475 */ 476 xfs_trans_log_buf(tp, bp, offsetof(xfs_dsb_t, sb_icount), 477 offsetof(xfs_dsb_t, sb_frextents) + 478 sizeof(sbp->sb_frextents) - 1); 479 } 480 481 STATIC int 482 xfs_sb_mod8( 483 uint8_t *field, 484 int8_t delta) 485 { 486 int8_t counter = *field; 487 488 counter += delta; 489 if (counter < 0) { 490 ASSERT(0); 491 return -EINVAL; 492 } 493 *field = counter; 494 return 0; 495 } 496 497 STATIC int 498 xfs_sb_mod32( 499 uint32_t *field, 500 int32_t delta) 501 { 502 int32_t counter = *field; 503 504 counter += delta; 505 if (counter < 0) { 506 ASSERT(0); 507 return -EINVAL; 508 } 509 *field = counter; 510 return 0; 511 } 512 513 STATIC int 514 xfs_sb_mod64( 515 uint64_t *field, 516 int64_t delta) 517 { 518 int64_t counter = *field; 519 520 counter += delta; 521 if (counter < 0) { 522 ASSERT(0); 523 return -EINVAL; 524 } 525 *field = counter; 526 return 0; 527 } 528 529 /* 530 * xfs_trans_unreserve_and_mod_sb() is called to release unused reservations 531 * and apply superblock counter changes to the in-core superblock. The 532 * t_res_fdblocks_delta and t_res_frextents_delta fields are explicitly NOT 533 * applied to the in-core superblock. The idea is that that has already been 534 * done. 535 * 536 * If we are not logging superblock counters, then the inode allocated/free and 537 * used block counts are not updated in the on disk superblock. In this case, 538 * XFS_TRANS_SB_DIRTY will not be set when the transaction is updated but we 539 * still need to update the incore superblock with the changes. 540 */ 541 void 542 xfs_trans_unreserve_and_mod_sb( 543 struct xfs_trans *tp) 544 { 545 struct xfs_mount *mp = tp->t_mountp; 546 bool rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0; 547 int64_t blkdelta = 0; 548 int64_t rtxdelta = 0; 549 int64_t idelta = 0; 550 int64_t ifreedelta = 0; 551 int error; 552 553 /* calculate deltas */ 554 if (tp->t_blk_res > 0) 555 blkdelta = tp->t_blk_res; 556 if ((tp->t_fdblocks_delta != 0) && 557 (xfs_sb_version_haslazysbcount(&mp->m_sb) || 558 (tp->t_flags & XFS_TRANS_SB_DIRTY))) 559 blkdelta += tp->t_fdblocks_delta; 560 561 if (tp->t_rtx_res > 0) 562 rtxdelta = tp->t_rtx_res; 563 if ((tp->t_frextents_delta != 0) && 564 (tp->t_flags & XFS_TRANS_SB_DIRTY)) 565 rtxdelta += tp->t_frextents_delta; 566 567 if (xfs_sb_version_haslazysbcount(&mp->m_sb) || 568 (tp->t_flags & XFS_TRANS_SB_DIRTY)) { 569 idelta = tp->t_icount_delta; 570 ifreedelta = tp->t_ifree_delta; 571 } 572 573 /* apply the per-cpu counters */ 574 if (blkdelta) { 575 error = xfs_mod_fdblocks(mp, blkdelta, rsvd); 576 if (error) 577 goto out; 578 } 579 580 if (idelta) { 581 error = xfs_mod_icount(mp, idelta); 582 if (error) 583 goto out_undo_fdblocks; 584 } 585 586 if (ifreedelta) { 587 error = xfs_mod_ifree(mp, ifreedelta); 588 if (error) 589 goto out_undo_icount; 590 } 591 592 if (rtxdelta == 0 && !(tp->t_flags & XFS_TRANS_SB_DIRTY)) 593 return; 594 595 /* apply remaining deltas */ 596 spin_lock(&mp->m_sb_lock); 597 if (rtxdelta) { 598 error = xfs_sb_mod64(&mp->m_sb.sb_frextents, rtxdelta); 599 if (error) 600 goto out_undo_ifree; 601 } 602 603 if (tp->t_dblocks_delta != 0) { 604 error = xfs_sb_mod64(&mp->m_sb.sb_dblocks, tp->t_dblocks_delta); 605 if (error) 606 goto out_undo_frextents; 607 } 608 if (tp->t_agcount_delta != 0) { 609 error = xfs_sb_mod32(&mp->m_sb.sb_agcount, tp->t_agcount_delta); 610 if (error) 611 goto out_undo_dblocks; 612 } 613 if (tp->t_imaxpct_delta != 0) { 614 error = xfs_sb_mod8(&mp->m_sb.sb_imax_pct, tp->t_imaxpct_delta); 615 if (error) 616 goto out_undo_agcount; 617 } 618 if (tp->t_rextsize_delta != 0) { 619 error = xfs_sb_mod32(&mp->m_sb.sb_rextsize, 620 tp->t_rextsize_delta); 621 if (error) 622 goto out_undo_imaxpct; 623 } 624 if (tp->t_rbmblocks_delta != 0) { 625 error = xfs_sb_mod32(&mp->m_sb.sb_rbmblocks, 626 tp->t_rbmblocks_delta); 627 if (error) 628 goto out_undo_rextsize; 629 } 630 if (tp->t_rblocks_delta != 0) { 631 error = xfs_sb_mod64(&mp->m_sb.sb_rblocks, tp->t_rblocks_delta); 632 if (error) 633 goto out_undo_rbmblocks; 634 } 635 if (tp->t_rextents_delta != 0) { 636 error = xfs_sb_mod64(&mp->m_sb.sb_rextents, 637 tp->t_rextents_delta); 638 if (error) 639 goto out_undo_rblocks; 640 } 641 if (tp->t_rextslog_delta != 0) { 642 error = xfs_sb_mod8(&mp->m_sb.sb_rextslog, 643 tp->t_rextslog_delta); 644 if (error) 645 goto out_undo_rextents; 646 } 647 spin_unlock(&mp->m_sb_lock); 648 return; 649 650 out_undo_rextents: 651 if (tp->t_rextents_delta) 652 xfs_sb_mod64(&mp->m_sb.sb_rextents, -tp->t_rextents_delta); 653 out_undo_rblocks: 654 if (tp->t_rblocks_delta) 655 xfs_sb_mod64(&mp->m_sb.sb_rblocks, -tp->t_rblocks_delta); 656 out_undo_rbmblocks: 657 if (tp->t_rbmblocks_delta) 658 xfs_sb_mod32(&mp->m_sb.sb_rbmblocks, -tp->t_rbmblocks_delta); 659 out_undo_rextsize: 660 if (tp->t_rextsize_delta) 661 xfs_sb_mod32(&mp->m_sb.sb_rextsize, -tp->t_rextsize_delta); 662 out_undo_imaxpct: 663 if (tp->t_rextsize_delta) 664 xfs_sb_mod8(&mp->m_sb.sb_imax_pct, -tp->t_imaxpct_delta); 665 out_undo_agcount: 666 if (tp->t_agcount_delta) 667 xfs_sb_mod32(&mp->m_sb.sb_agcount, -tp->t_agcount_delta); 668 out_undo_dblocks: 669 if (tp->t_dblocks_delta) 670 xfs_sb_mod64(&mp->m_sb.sb_dblocks, -tp->t_dblocks_delta); 671 out_undo_frextents: 672 if (rtxdelta) 673 xfs_sb_mod64(&mp->m_sb.sb_frextents, -rtxdelta); 674 out_undo_ifree: 675 spin_unlock(&mp->m_sb_lock); 676 if (ifreedelta) 677 xfs_mod_ifree(mp, -ifreedelta); 678 out_undo_icount: 679 if (idelta) 680 xfs_mod_icount(mp, -idelta); 681 out_undo_fdblocks: 682 if (blkdelta) 683 xfs_mod_fdblocks(mp, -blkdelta, rsvd); 684 out: 685 ASSERT(error == 0); 686 return; 687 } 688 689 /* 690 * Add the given log item to the transaction's list of log items. 691 * 692 * The log item will now point to its new descriptor with its li_desc field. 693 */ 694 void 695 xfs_trans_add_item( 696 struct xfs_trans *tp, 697 struct xfs_log_item *lip) 698 { 699 struct xfs_log_item_desc *lidp; 700 701 ASSERT(lip->li_mountp == tp->t_mountp); 702 ASSERT(lip->li_ailp == tp->t_mountp->m_ail); 703 704 lidp = kmem_zone_zalloc(xfs_log_item_desc_zone, KM_SLEEP | KM_NOFS); 705 706 lidp->lid_item = lip; 707 lidp->lid_flags = 0; 708 list_add_tail(&lidp->lid_trans, &tp->t_items); 709 710 lip->li_desc = lidp; 711 } 712 713 STATIC void 714 xfs_trans_free_item_desc( 715 struct xfs_log_item_desc *lidp) 716 { 717 list_del_init(&lidp->lid_trans); 718 kmem_zone_free(xfs_log_item_desc_zone, lidp); 719 } 720 721 /* 722 * Unlink and free the given descriptor. 723 */ 724 void 725 xfs_trans_del_item( 726 struct xfs_log_item *lip) 727 { 728 xfs_trans_free_item_desc(lip->li_desc); 729 lip->li_desc = NULL; 730 } 731 732 /* 733 * Unlock all of the items of a transaction and free all the descriptors 734 * of that transaction. 735 */ 736 void 737 xfs_trans_free_items( 738 struct xfs_trans *tp, 739 xfs_lsn_t commit_lsn, 740 bool abort) 741 { 742 struct xfs_log_item_desc *lidp, *next; 743 744 list_for_each_entry_safe(lidp, next, &tp->t_items, lid_trans) { 745 struct xfs_log_item *lip = lidp->lid_item; 746 747 lip->li_desc = NULL; 748 749 if (commit_lsn != NULLCOMMITLSN) 750 lip->li_ops->iop_committing(lip, commit_lsn); 751 if (abort) 752 lip->li_flags |= XFS_LI_ABORTED; 753 lip->li_ops->iop_unlock(lip); 754 755 xfs_trans_free_item_desc(lidp); 756 } 757 } 758 759 static inline void 760 xfs_log_item_batch_insert( 761 struct xfs_ail *ailp, 762 struct xfs_ail_cursor *cur, 763 struct xfs_log_item **log_items, 764 int nr_items, 765 xfs_lsn_t commit_lsn) 766 { 767 int i; 768 769 spin_lock(&ailp->xa_lock); 770 /* xfs_trans_ail_update_bulk drops ailp->xa_lock */ 771 xfs_trans_ail_update_bulk(ailp, cur, log_items, nr_items, commit_lsn); 772 773 for (i = 0; i < nr_items; i++) { 774 struct xfs_log_item *lip = log_items[i]; 775 776 lip->li_ops->iop_unpin(lip, 0); 777 } 778 } 779 780 /* 781 * Bulk operation version of xfs_trans_committed that takes a log vector of 782 * items to insert into the AIL. This uses bulk AIL insertion techniques to 783 * minimise lock traffic. 784 * 785 * If we are called with the aborted flag set, it is because a log write during 786 * a CIL checkpoint commit has failed. In this case, all the items in the 787 * checkpoint have already gone through iop_commited and iop_unlock, which 788 * means that checkpoint commit abort handling is treated exactly the same 789 * as an iclog write error even though we haven't started any IO yet. Hence in 790 * this case all we need to do is iop_committed processing, followed by an 791 * iop_unpin(aborted) call. 792 * 793 * The AIL cursor is used to optimise the insert process. If commit_lsn is not 794 * at the end of the AIL, the insert cursor avoids the need to walk 795 * the AIL to find the insertion point on every xfs_log_item_batch_insert() 796 * call. This saves a lot of needless list walking and is a net win, even 797 * though it slightly increases that amount of AIL lock traffic to set it up 798 * and tear it down. 799 */ 800 void 801 xfs_trans_committed_bulk( 802 struct xfs_ail *ailp, 803 struct xfs_log_vec *log_vector, 804 xfs_lsn_t commit_lsn, 805 int aborted) 806 { 807 #define LOG_ITEM_BATCH_SIZE 32 808 struct xfs_log_item *log_items[LOG_ITEM_BATCH_SIZE]; 809 struct xfs_log_vec *lv; 810 struct xfs_ail_cursor cur; 811 int i = 0; 812 813 spin_lock(&ailp->xa_lock); 814 xfs_trans_ail_cursor_last(ailp, &cur, commit_lsn); 815 spin_unlock(&ailp->xa_lock); 816 817 /* unpin all the log items */ 818 for (lv = log_vector; lv; lv = lv->lv_next ) { 819 struct xfs_log_item *lip = lv->lv_item; 820 xfs_lsn_t item_lsn; 821 822 if (aborted) 823 lip->li_flags |= XFS_LI_ABORTED; 824 item_lsn = lip->li_ops->iop_committed(lip, commit_lsn); 825 826 /* item_lsn of -1 means the item needs no further processing */ 827 if (XFS_LSN_CMP(item_lsn, (xfs_lsn_t)-1) == 0) 828 continue; 829 830 /* 831 * if we are aborting the operation, no point in inserting the 832 * object into the AIL as we are in a shutdown situation. 833 */ 834 if (aborted) { 835 ASSERT(XFS_FORCED_SHUTDOWN(ailp->xa_mount)); 836 lip->li_ops->iop_unpin(lip, 1); 837 continue; 838 } 839 840 if (item_lsn != commit_lsn) { 841 842 /* 843 * Not a bulk update option due to unusual item_lsn. 844 * Push into AIL immediately, rechecking the lsn once 845 * we have the ail lock. Then unpin the item. This does 846 * not affect the AIL cursor the bulk insert path is 847 * using. 848 */ 849 spin_lock(&ailp->xa_lock); 850 if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0) 851 xfs_trans_ail_update(ailp, lip, item_lsn); 852 else 853 spin_unlock(&ailp->xa_lock); 854 lip->li_ops->iop_unpin(lip, 0); 855 continue; 856 } 857 858 /* Item is a candidate for bulk AIL insert. */ 859 log_items[i++] = lv->lv_item; 860 if (i >= LOG_ITEM_BATCH_SIZE) { 861 xfs_log_item_batch_insert(ailp, &cur, log_items, 862 LOG_ITEM_BATCH_SIZE, commit_lsn); 863 i = 0; 864 } 865 } 866 867 /* make sure we insert the remainder! */ 868 if (i) 869 xfs_log_item_batch_insert(ailp, &cur, log_items, i, commit_lsn); 870 871 spin_lock(&ailp->xa_lock); 872 xfs_trans_ail_cursor_done(&cur); 873 spin_unlock(&ailp->xa_lock); 874 } 875 876 /* 877 * Commit the given transaction to the log. 878 * 879 * XFS disk error handling mechanism is not based on a typical 880 * transaction abort mechanism. Logically after the filesystem 881 * gets marked 'SHUTDOWN', we can't let any new transactions 882 * be durable - ie. committed to disk - because some metadata might 883 * be inconsistent. In such cases, this returns an error, and the 884 * caller may assume that all locked objects joined to the transaction 885 * have already been unlocked as if the commit had succeeded. 886 * Do not reference the transaction structure after this call. 887 */ 888 static int 889 __xfs_trans_commit( 890 struct xfs_trans *tp, 891 bool regrant) 892 { 893 struct xfs_mount *mp = tp->t_mountp; 894 xfs_lsn_t commit_lsn = -1; 895 int error = 0; 896 int sync = tp->t_flags & XFS_TRANS_SYNC; 897 898 /* 899 * If there is nothing to be logged by the transaction, 900 * then unlock all of the items associated with the 901 * transaction and free the transaction structure. 902 * Also make sure to return any reserved blocks to 903 * the free pool. 904 */ 905 if (!(tp->t_flags & XFS_TRANS_DIRTY)) 906 goto out_unreserve; 907 908 if (XFS_FORCED_SHUTDOWN(mp)) { 909 error = -EIO; 910 goto out_unreserve; 911 } 912 913 ASSERT(tp->t_ticket != NULL); 914 915 /* 916 * If we need to update the superblock, then do it now. 917 */ 918 if (tp->t_flags & XFS_TRANS_SB_DIRTY) 919 xfs_trans_apply_sb_deltas(tp); 920 xfs_trans_apply_dquot_deltas(tp); 921 922 xfs_log_commit_cil(mp, tp, &commit_lsn, regrant); 923 924 current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS); 925 xfs_trans_free(tp); 926 927 /* 928 * If the transaction needs to be synchronous, then force the 929 * log out now and wait for it. 930 */ 931 if (sync) { 932 error = _xfs_log_force_lsn(mp, commit_lsn, XFS_LOG_SYNC, NULL); 933 XFS_STATS_INC(xs_trans_sync); 934 } else { 935 XFS_STATS_INC(xs_trans_async); 936 } 937 938 return error; 939 940 out_unreserve: 941 xfs_trans_unreserve_and_mod_sb(tp); 942 943 /* 944 * It is indeed possible for the transaction to be not dirty but 945 * the dqinfo portion to be. All that means is that we have some 946 * (non-persistent) quota reservations that need to be unreserved. 947 */ 948 xfs_trans_unreserve_and_mod_dquots(tp); 949 if (tp->t_ticket) { 950 commit_lsn = xfs_log_done(mp, tp->t_ticket, NULL, regrant); 951 if (commit_lsn == -1 && !error) 952 error = -EIO; 953 } 954 current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS); 955 xfs_trans_free_items(tp, NULLCOMMITLSN, !!error); 956 xfs_trans_free(tp); 957 958 XFS_STATS_INC(xs_trans_empty); 959 return error; 960 } 961 962 int 963 xfs_trans_commit( 964 struct xfs_trans *tp) 965 { 966 return __xfs_trans_commit(tp, false); 967 } 968 969 /* 970 * Unlock all of the transaction's items and free the transaction. 971 * The transaction must not have modified any of its items, because 972 * there is no way to restore them to their previous state. 973 * 974 * If the transaction has made a log reservation, make sure to release 975 * it as well. 976 */ 977 void 978 xfs_trans_cancel( 979 struct xfs_trans *tp) 980 { 981 struct xfs_mount *mp = tp->t_mountp; 982 bool dirty = (tp->t_flags & XFS_TRANS_DIRTY); 983 984 /* 985 * See if the caller is relying on us to shut down the 986 * filesystem. This happens in paths where we detect 987 * corruption and decide to give up. 988 */ 989 if (dirty && !XFS_FORCED_SHUTDOWN(mp)) { 990 XFS_ERROR_REPORT("xfs_trans_cancel", XFS_ERRLEVEL_LOW, mp); 991 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 992 } 993 #ifdef DEBUG 994 if (!dirty && !XFS_FORCED_SHUTDOWN(mp)) { 995 struct xfs_log_item_desc *lidp; 996 997 list_for_each_entry(lidp, &tp->t_items, lid_trans) 998 ASSERT(!(lidp->lid_item->li_type == XFS_LI_EFD)); 999 } 1000 #endif 1001 xfs_trans_unreserve_and_mod_sb(tp); 1002 xfs_trans_unreserve_and_mod_dquots(tp); 1003 1004 if (tp->t_ticket) 1005 xfs_log_done(mp, tp->t_ticket, NULL, false); 1006 1007 /* mark this thread as no longer being in a transaction */ 1008 current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS); 1009 1010 xfs_trans_free_items(tp, NULLCOMMITLSN, dirty); 1011 xfs_trans_free(tp); 1012 } 1013 1014 /* 1015 * Roll from one trans in the sequence of PERMANENT transactions to 1016 * the next: permanent transactions are only flushed out when 1017 * committed with xfs_trans_commit(), but we still want as soon 1018 * as possible to let chunks of it go to the log. So we commit the 1019 * chunk we've been working on and get a new transaction to continue. 1020 */ 1021 int 1022 __xfs_trans_roll( 1023 struct xfs_trans **tpp, 1024 struct xfs_inode *dp, 1025 int *committed) 1026 { 1027 struct xfs_trans *trans; 1028 struct xfs_trans_res tres; 1029 int error; 1030 1031 /* 1032 * Ensure that the inode is always logged. 1033 */ 1034 trans = *tpp; 1035 if (dp) 1036 xfs_trans_log_inode(trans, dp, XFS_ILOG_CORE); 1037 1038 /* 1039 * Copy the critical parameters from one trans to the next. 1040 */ 1041 tres.tr_logres = trans->t_log_res; 1042 tres.tr_logcount = trans->t_log_count; 1043 *tpp = xfs_trans_dup(trans); 1044 1045 /* 1046 * Commit the current transaction. 1047 * If this commit failed, then it'd just unlock those items that 1048 * are not marked ihold. That also means that a filesystem shutdown 1049 * is in progress. The caller takes the responsibility to cancel 1050 * the duplicate transaction that gets returned. 1051 */ 1052 error = __xfs_trans_commit(trans, true); 1053 if (error) 1054 return error; 1055 1056 *committed = 1; 1057 trans = *tpp; 1058 1059 /* 1060 * Reserve space in the log for th next transaction. 1061 * This also pushes items in the "AIL", the list of logged items, 1062 * out to disk if they are taking up space at the tail of the log 1063 * that we want to use. This requires that either nothing be locked 1064 * across this call, or that anything that is locked be logged in 1065 * the prior and the next transactions. 1066 */ 1067 tres.tr_logflags = XFS_TRANS_PERM_LOG_RES; 1068 error = xfs_trans_reserve(trans, &tres, 0, 0); 1069 /* 1070 * Ensure that the inode is in the new transaction and locked. 1071 */ 1072 if (error) 1073 return error; 1074 1075 if (dp) 1076 xfs_trans_ijoin(trans, dp, 0); 1077 return 0; 1078 } 1079 1080 int 1081 xfs_trans_roll( 1082 struct xfs_trans **tpp, 1083 struct xfs_inode *dp) 1084 { 1085 int committed = 0; 1086 return __xfs_trans_roll(tpp, dp, &committed); 1087 } 1088