1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Copyright (C) 2016 Oracle. All Rights Reserved. 4 * Author: Darrick J. Wong <darrick.wong@oracle.com> 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_log_format.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_mount.h" 13 #include "xfs_defer.h" 14 #include "xfs_trans.h" 15 #include "xfs_buf_item.h" 16 #include "xfs_inode.h" 17 #include "xfs_inode_item.h" 18 #include "xfs_trace.h" 19 #include "xfs_icache.h" 20 #include "xfs_log.h" 21 #include "xfs_rmap.h" 22 #include "xfs_refcount.h" 23 #include "xfs_bmap.h" 24 #include "xfs_alloc.h" 25 26 static struct kmem_cache *xfs_defer_pending_cache; 27 28 /* 29 * Deferred Operations in XFS 30 * 31 * Due to the way locking rules work in XFS, certain transactions (block 32 * mapping and unmapping, typically) have permanent reservations so that 33 * we can roll the transaction to adhere to AG locking order rules and 34 * to unlock buffers between metadata updates. Prior to rmap/reflink, 35 * the mapping code had a mechanism to perform these deferrals for 36 * extents that were going to be freed; this code makes that facility 37 * more generic. 38 * 39 * When adding the reverse mapping and reflink features, it became 40 * necessary to perform complex remapping multi-transactions to comply 41 * with AG locking order rules, and to be able to spread a single 42 * refcount update operation (an operation on an n-block extent can 43 * update as many as n records!) among multiple transactions. XFS can 44 * roll a transaction to facilitate this, but using this facility 45 * requires us to log "intent" items in case log recovery needs to 46 * redo the operation, and to log "done" items to indicate that redo 47 * is not necessary. 48 * 49 * Deferred work is tracked in xfs_defer_pending items. Each pending 50 * item tracks one type of deferred work. Incoming work items (which 51 * have not yet had an intent logged) are attached to a pending item 52 * on the dop_intake list, where they wait for the caller to finish 53 * the deferred operations. 54 * 55 * Finishing a set of deferred operations is an involved process. To 56 * start, we define "rolling a deferred-op transaction" as follows: 57 * 58 * > For each xfs_defer_pending item on the dop_intake list, 59 * - Sort the work items in AG order. XFS locking 60 * order rules require us to lock buffers in AG order. 61 * - Create a log intent item for that type. 62 * - Attach it to the pending item. 63 * - Move the pending item from the dop_intake list to the 64 * dop_pending list. 65 * > Roll the transaction. 66 * 67 * NOTE: To avoid exceeding the transaction reservation, we limit the 68 * number of items that we attach to a given xfs_defer_pending. 69 * 70 * The actual finishing process looks like this: 71 * 72 * > For each xfs_defer_pending in the dop_pending list, 73 * - Roll the deferred-op transaction as above. 74 * - Create a log done item for that type, and attach it to the 75 * log intent item. 76 * - For each work item attached to the log intent item, 77 * * Perform the described action. 78 * * Attach the work item to the log done item. 79 * * If the result of doing the work was -EAGAIN, ->finish work 80 * wants a new transaction. See the "Requesting a Fresh 81 * Transaction while Finishing Deferred Work" section below for 82 * details. 83 * 84 * The key here is that we must log an intent item for all pending 85 * work items every time we roll the transaction, and that we must log 86 * a done item as soon as the work is completed. With this mechanism 87 * we can perform complex remapping operations, chaining intent items 88 * as needed. 89 * 90 * Requesting a Fresh Transaction while Finishing Deferred Work 91 * 92 * If ->finish_item decides that it needs a fresh transaction to 93 * finish the work, it must ask its caller (xfs_defer_finish) for a 94 * continuation. The most likely cause of this circumstance are the 95 * refcount adjust functions deciding that they've logged enough items 96 * to be at risk of exceeding the transaction reservation. 97 * 98 * To get a fresh transaction, we want to log the existing log done 99 * item to prevent the log intent item from replaying, immediately log 100 * a new log intent item with the unfinished work items, roll the 101 * transaction, and re-call ->finish_item wherever it left off. The 102 * log done item and the new log intent item must be in the same 103 * transaction or atomicity cannot be guaranteed; defer_finish ensures 104 * that this happens. 105 * 106 * This requires some coordination between ->finish_item and 107 * defer_finish. Upon deciding to request a new transaction, 108 * ->finish_item should update the current work item to reflect the 109 * unfinished work. Next, it should reset the log done item's list 110 * count to the number of items finished, and return -EAGAIN. 111 * defer_finish sees the -EAGAIN, logs the new log intent item 112 * with the remaining work items, and leaves the xfs_defer_pending 113 * item at the head of the dop_work queue. Then it rolls the 114 * transaction and picks up processing where it left off. It is 115 * required that ->finish_item must be careful to leave enough 116 * transaction reservation to fit the new log intent item. 117 * 118 * This is an example of remapping the extent (E, E+B) into file X at 119 * offset A and dealing with the extent (C, C+B) already being mapped 120 * there: 121 * +-------------------------------------------------+ 122 * | Unmap file X startblock C offset A length B | t0 123 * | Intent to reduce refcount for extent (C, B) | 124 * | Intent to remove rmap (X, C, A, B) | 125 * | Intent to free extent (D, 1) (bmbt block) | 126 * | Intent to map (X, A, B) at startblock E | 127 * +-------------------------------------------------+ 128 * | Map file X startblock E offset A length B | t1 129 * | Done mapping (X, E, A, B) | 130 * | Intent to increase refcount for extent (E, B) | 131 * | Intent to add rmap (X, E, A, B) | 132 * +-------------------------------------------------+ 133 * | Reduce refcount for extent (C, B) | t2 134 * | Done reducing refcount for extent (C, 9) | 135 * | Intent to reduce refcount for extent (C+9, B-9) | 136 * | (ran out of space after 9 refcount updates) | 137 * +-------------------------------------------------+ 138 * | Reduce refcount for extent (C+9, B+9) | t3 139 * | Done reducing refcount for extent (C+9, B-9) | 140 * | Increase refcount for extent (E, B) | 141 * | Done increasing refcount for extent (E, B) | 142 * | Intent to free extent (C, B) | 143 * | Intent to free extent (F, 1) (refcountbt block) | 144 * | Intent to remove rmap (F, 1, REFC) | 145 * +-------------------------------------------------+ 146 * | Remove rmap (X, C, A, B) | t4 147 * | Done removing rmap (X, C, A, B) | 148 * | Add rmap (X, E, A, B) | 149 * | Done adding rmap (X, E, A, B) | 150 * | Remove rmap (F, 1, REFC) | 151 * | Done removing rmap (F, 1, REFC) | 152 * +-------------------------------------------------+ 153 * | Free extent (C, B) | t5 154 * | Done freeing extent (C, B) | 155 * | Free extent (D, 1) | 156 * | Done freeing extent (D, 1) | 157 * | Free extent (F, 1) | 158 * | Done freeing extent (F, 1) | 159 * +-------------------------------------------------+ 160 * 161 * If we should crash before t2 commits, log recovery replays 162 * the following intent items: 163 * 164 * - Intent to reduce refcount for extent (C, B) 165 * - Intent to remove rmap (X, C, A, B) 166 * - Intent to free extent (D, 1) (bmbt block) 167 * - Intent to increase refcount for extent (E, B) 168 * - Intent to add rmap (X, E, A, B) 169 * 170 * In the process of recovering, it should also generate and take care 171 * of these intent items: 172 * 173 * - Intent to free extent (C, B) 174 * - Intent to free extent (F, 1) (refcountbt block) 175 * - Intent to remove rmap (F, 1, REFC) 176 * 177 * Note that the continuation requested between t2 and t3 is likely to 178 * reoccur. 179 */ 180 181 static const struct xfs_defer_op_type *defer_op_types[] = { 182 [XFS_DEFER_OPS_TYPE_BMAP] = &xfs_bmap_update_defer_type, 183 [XFS_DEFER_OPS_TYPE_REFCOUNT] = &xfs_refcount_update_defer_type, 184 [XFS_DEFER_OPS_TYPE_RMAP] = &xfs_rmap_update_defer_type, 185 [XFS_DEFER_OPS_TYPE_FREE] = &xfs_extent_free_defer_type, 186 [XFS_DEFER_OPS_TYPE_AGFL_FREE] = &xfs_agfl_free_defer_type, 187 }; 188 189 static bool 190 xfs_defer_create_intent( 191 struct xfs_trans *tp, 192 struct xfs_defer_pending *dfp, 193 bool sort) 194 { 195 const struct xfs_defer_op_type *ops = defer_op_types[dfp->dfp_type]; 196 197 if (!dfp->dfp_intent) 198 dfp->dfp_intent = ops->create_intent(tp, &dfp->dfp_work, 199 dfp->dfp_count, sort); 200 return dfp->dfp_intent != NULL; 201 } 202 203 /* 204 * For each pending item in the intake list, log its intent item and the 205 * associated extents, then add the entire intake list to the end of 206 * the pending list. 207 */ 208 static bool 209 xfs_defer_create_intents( 210 struct xfs_trans *tp) 211 { 212 struct xfs_defer_pending *dfp; 213 bool ret = false; 214 215 list_for_each_entry(dfp, &tp->t_dfops, dfp_list) { 216 trace_xfs_defer_create_intent(tp->t_mountp, dfp); 217 ret |= xfs_defer_create_intent(tp, dfp, true); 218 } 219 return ret; 220 } 221 222 /* Abort all the intents that were committed. */ 223 STATIC void 224 xfs_defer_trans_abort( 225 struct xfs_trans *tp, 226 struct list_head *dop_pending) 227 { 228 struct xfs_defer_pending *dfp; 229 const struct xfs_defer_op_type *ops; 230 231 trace_xfs_defer_trans_abort(tp, _RET_IP_); 232 233 /* Abort intent items that don't have a done item. */ 234 list_for_each_entry(dfp, dop_pending, dfp_list) { 235 ops = defer_op_types[dfp->dfp_type]; 236 trace_xfs_defer_pending_abort(tp->t_mountp, dfp); 237 if (dfp->dfp_intent && !dfp->dfp_done) { 238 ops->abort_intent(dfp->dfp_intent); 239 dfp->dfp_intent = NULL; 240 } 241 } 242 } 243 244 /* 245 * Capture resources that the caller said not to release ("held") when the 246 * transaction commits. Caller is responsible for zero-initializing @dres. 247 */ 248 static int 249 xfs_defer_save_resources( 250 struct xfs_defer_resources *dres, 251 struct xfs_trans *tp) 252 { 253 struct xfs_buf_log_item *bli; 254 struct xfs_inode_log_item *ili; 255 struct xfs_log_item *lip; 256 257 BUILD_BUG_ON(NBBY * sizeof(dres->dr_ordered) < XFS_DEFER_OPS_NR_BUFS); 258 259 list_for_each_entry(lip, &tp->t_items, li_trans) { 260 switch (lip->li_type) { 261 case XFS_LI_BUF: 262 bli = container_of(lip, struct xfs_buf_log_item, 263 bli_item); 264 if (bli->bli_flags & XFS_BLI_HOLD) { 265 if (dres->dr_bufs >= XFS_DEFER_OPS_NR_BUFS) { 266 ASSERT(0); 267 return -EFSCORRUPTED; 268 } 269 if (bli->bli_flags & XFS_BLI_ORDERED) 270 dres->dr_ordered |= 271 (1U << dres->dr_bufs); 272 else 273 xfs_trans_dirty_buf(tp, bli->bli_buf); 274 dres->dr_bp[dres->dr_bufs++] = bli->bli_buf; 275 } 276 break; 277 case XFS_LI_INODE: 278 ili = container_of(lip, struct xfs_inode_log_item, 279 ili_item); 280 if (ili->ili_lock_flags == 0) { 281 if (dres->dr_inos >= XFS_DEFER_OPS_NR_INODES) { 282 ASSERT(0); 283 return -EFSCORRUPTED; 284 } 285 xfs_trans_log_inode(tp, ili->ili_inode, 286 XFS_ILOG_CORE); 287 dres->dr_ip[dres->dr_inos++] = ili->ili_inode; 288 } 289 break; 290 default: 291 break; 292 } 293 } 294 295 return 0; 296 } 297 298 /* Attach the held resources to the transaction. */ 299 static void 300 xfs_defer_restore_resources( 301 struct xfs_trans *tp, 302 struct xfs_defer_resources *dres) 303 { 304 unsigned short i; 305 306 /* Rejoin the joined inodes. */ 307 for (i = 0; i < dres->dr_inos; i++) 308 xfs_trans_ijoin(tp, dres->dr_ip[i], 0); 309 310 /* Rejoin the buffers and dirty them so the log moves forward. */ 311 for (i = 0; i < dres->dr_bufs; i++) { 312 xfs_trans_bjoin(tp, dres->dr_bp[i]); 313 if (dres->dr_ordered & (1U << i)) 314 xfs_trans_ordered_buf(tp, dres->dr_bp[i]); 315 xfs_trans_bhold(tp, dres->dr_bp[i]); 316 } 317 } 318 319 /* Roll a transaction so we can do some deferred op processing. */ 320 STATIC int 321 xfs_defer_trans_roll( 322 struct xfs_trans **tpp) 323 { 324 struct xfs_defer_resources dres = { }; 325 int error; 326 327 error = xfs_defer_save_resources(&dres, *tpp); 328 if (error) 329 return error; 330 331 trace_xfs_defer_trans_roll(*tpp, _RET_IP_); 332 333 /* 334 * Roll the transaction. Rolling always given a new transaction (even 335 * if committing the old one fails!) to hand back to the caller, so we 336 * join the held resources to the new transaction so that we always 337 * return with the held resources joined to @tpp, no matter what 338 * happened. 339 */ 340 error = xfs_trans_roll(tpp); 341 342 xfs_defer_restore_resources(*tpp, &dres); 343 344 if (error) 345 trace_xfs_defer_trans_roll_error(*tpp, error); 346 return error; 347 } 348 349 /* 350 * Free up any items left in the list. 351 */ 352 static void 353 xfs_defer_cancel_list( 354 struct xfs_mount *mp, 355 struct list_head *dop_list) 356 { 357 struct xfs_defer_pending *dfp; 358 struct xfs_defer_pending *pli; 359 struct list_head *pwi; 360 struct list_head *n; 361 const struct xfs_defer_op_type *ops; 362 363 /* 364 * Free the pending items. Caller should already have arranged 365 * for the intent items to be released. 366 */ 367 list_for_each_entry_safe(dfp, pli, dop_list, dfp_list) { 368 ops = defer_op_types[dfp->dfp_type]; 369 trace_xfs_defer_cancel_list(mp, dfp); 370 list_del(&dfp->dfp_list); 371 list_for_each_safe(pwi, n, &dfp->dfp_work) { 372 list_del(pwi); 373 dfp->dfp_count--; 374 ops->cancel_item(pwi); 375 } 376 ASSERT(dfp->dfp_count == 0); 377 kmem_cache_free(xfs_defer_pending_cache, dfp); 378 } 379 } 380 381 /* 382 * Prevent a log intent item from pinning the tail of the log by logging a 383 * done item to release the intent item; and then log a new intent item. 384 * The caller should provide a fresh transaction and roll it after we're done. 385 */ 386 static int 387 xfs_defer_relog( 388 struct xfs_trans **tpp, 389 struct list_head *dfops) 390 { 391 struct xlog *log = (*tpp)->t_mountp->m_log; 392 struct xfs_defer_pending *dfp; 393 xfs_lsn_t threshold_lsn = NULLCOMMITLSN; 394 395 396 ASSERT((*tpp)->t_flags & XFS_TRANS_PERM_LOG_RES); 397 398 list_for_each_entry(dfp, dfops, dfp_list) { 399 /* 400 * If the log intent item for this deferred op is not a part of 401 * the current log checkpoint, relog the intent item to keep 402 * the log tail moving forward. We're ok with this being racy 403 * because an incorrect decision means we'll be a little slower 404 * at pushing the tail. 405 */ 406 if (dfp->dfp_intent == NULL || 407 xfs_log_item_in_current_chkpt(dfp->dfp_intent)) 408 continue; 409 410 /* 411 * Figure out where we need the tail to be in order to maintain 412 * the minimum required free space in the log. Only sample 413 * the log threshold once per call. 414 */ 415 if (threshold_lsn == NULLCOMMITLSN) { 416 threshold_lsn = xlog_grant_push_threshold(log, 0); 417 if (threshold_lsn == NULLCOMMITLSN) 418 break; 419 } 420 if (XFS_LSN_CMP(dfp->dfp_intent->li_lsn, threshold_lsn) >= 0) 421 continue; 422 423 trace_xfs_defer_relog_intent((*tpp)->t_mountp, dfp); 424 XFS_STATS_INC((*tpp)->t_mountp, defer_relog); 425 dfp->dfp_intent = xfs_trans_item_relog(dfp->dfp_intent, *tpp); 426 } 427 428 if ((*tpp)->t_flags & XFS_TRANS_DIRTY) 429 return xfs_defer_trans_roll(tpp); 430 return 0; 431 } 432 433 /* 434 * Log an intent-done item for the first pending intent, and finish the work 435 * items. 436 */ 437 static int 438 xfs_defer_finish_one( 439 struct xfs_trans *tp, 440 struct xfs_defer_pending *dfp) 441 { 442 const struct xfs_defer_op_type *ops = defer_op_types[dfp->dfp_type]; 443 struct xfs_btree_cur *state = NULL; 444 struct list_head *li, *n; 445 int error; 446 447 trace_xfs_defer_pending_finish(tp->t_mountp, dfp); 448 449 dfp->dfp_done = ops->create_done(tp, dfp->dfp_intent, dfp->dfp_count); 450 list_for_each_safe(li, n, &dfp->dfp_work) { 451 list_del(li); 452 dfp->dfp_count--; 453 error = ops->finish_item(tp, dfp->dfp_done, li, &state); 454 if (error == -EAGAIN) { 455 /* 456 * Caller wants a fresh transaction; put the work item 457 * back on the list and log a new log intent item to 458 * replace the old one. See "Requesting a Fresh 459 * Transaction while Finishing Deferred Work" above. 460 */ 461 list_add(li, &dfp->dfp_work); 462 dfp->dfp_count++; 463 dfp->dfp_done = NULL; 464 dfp->dfp_intent = NULL; 465 xfs_defer_create_intent(tp, dfp, false); 466 } 467 468 if (error) 469 goto out; 470 } 471 472 /* Done with the dfp, free it. */ 473 list_del(&dfp->dfp_list); 474 kmem_cache_free(xfs_defer_pending_cache, dfp); 475 out: 476 if (ops->finish_cleanup) 477 ops->finish_cleanup(tp, state, error); 478 return error; 479 } 480 481 /* 482 * Finish all the pending work. This involves logging intent items for 483 * any work items that wandered in since the last transaction roll (if 484 * one has even happened), rolling the transaction, and finishing the 485 * work items in the first item on the logged-and-pending list. 486 * 487 * If an inode is provided, relog it to the new transaction. 488 */ 489 int 490 xfs_defer_finish_noroll( 491 struct xfs_trans **tp) 492 { 493 struct xfs_defer_pending *dfp = NULL; 494 int error = 0; 495 LIST_HEAD(dop_pending); 496 497 ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES); 498 499 trace_xfs_defer_finish(*tp, _RET_IP_); 500 501 /* Until we run out of pending work to finish... */ 502 while (!list_empty(&dop_pending) || !list_empty(&(*tp)->t_dfops)) { 503 /* 504 * Deferred items that are created in the process of finishing 505 * other deferred work items should be queued at the head of 506 * the pending list, which puts them ahead of the deferred work 507 * that was created by the caller. This keeps the number of 508 * pending work items to a minimum, which decreases the amount 509 * of time that any one intent item can stick around in memory, 510 * pinning the log tail. 511 */ 512 bool has_intents = xfs_defer_create_intents(*tp); 513 514 list_splice_init(&(*tp)->t_dfops, &dop_pending); 515 516 if (has_intents || dfp) { 517 error = xfs_defer_trans_roll(tp); 518 if (error) 519 goto out_shutdown; 520 521 /* Relog intent items to keep the log moving. */ 522 error = xfs_defer_relog(tp, &dop_pending); 523 if (error) 524 goto out_shutdown; 525 } 526 527 dfp = list_first_entry(&dop_pending, struct xfs_defer_pending, 528 dfp_list); 529 error = xfs_defer_finish_one(*tp, dfp); 530 if (error && error != -EAGAIN) 531 goto out_shutdown; 532 } 533 534 trace_xfs_defer_finish_done(*tp, _RET_IP_); 535 return 0; 536 537 out_shutdown: 538 xfs_defer_trans_abort(*tp, &dop_pending); 539 xfs_force_shutdown((*tp)->t_mountp, SHUTDOWN_CORRUPT_INCORE); 540 trace_xfs_defer_finish_error(*tp, error); 541 xfs_defer_cancel_list((*tp)->t_mountp, &dop_pending); 542 xfs_defer_cancel(*tp); 543 return error; 544 } 545 546 int 547 xfs_defer_finish( 548 struct xfs_trans **tp) 549 { 550 int error; 551 552 /* 553 * Finish and roll the transaction once more to avoid returning to the 554 * caller with a dirty transaction. 555 */ 556 error = xfs_defer_finish_noroll(tp); 557 if (error) 558 return error; 559 if ((*tp)->t_flags & XFS_TRANS_DIRTY) { 560 error = xfs_defer_trans_roll(tp); 561 if (error) { 562 xfs_force_shutdown((*tp)->t_mountp, 563 SHUTDOWN_CORRUPT_INCORE); 564 return error; 565 } 566 } 567 568 /* Reset LOWMODE now that we've finished all the dfops. */ 569 ASSERT(list_empty(&(*tp)->t_dfops)); 570 (*tp)->t_flags &= ~XFS_TRANS_LOWMODE; 571 return 0; 572 } 573 574 void 575 xfs_defer_cancel( 576 struct xfs_trans *tp) 577 { 578 struct xfs_mount *mp = tp->t_mountp; 579 580 trace_xfs_defer_cancel(tp, _RET_IP_); 581 xfs_defer_cancel_list(mp, &tp->t_dfops); 582 } 583 584 /* Add an item for later deferred processing. */ 585 void 586 xfs_defer_add( 587 struct xfs_trans *tp, 588 enum xfs_defer_ops_type type, 589 struct list_head *li) 590 { 591 struct xfs_defer_pending *dfp = NULL; 592 const struct xfs_defer_op_type *ops; 593 594 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES); 595 BUILD_BUG_ON(ARRAY_SIZE(defer_op_types) != XFS_DEFER_OPS_TYPE_MAX); 596 597 /* 598 * Add the item to a pending item at the end of the intake list. 599 * If the last pending item has the same type, reuse it. Else, 600 * create a new pending item at the end of the intake list. 601 */ 602 if (!list_empty(&tp->t_dfops)) { 603 dfp = list_last_entry(&tp->t_dfops, 604 struct xfs_defer_pending, dfp_list); 605 ops = defer_op_types[dfp->dfp_type]; 606 if (dfp->dfp_type != type || 607 (ops->max_items && dfp->dfp_count >= ops->max_items)) 608 dfp = NULL; 609 } 610 if (!dfp) { 611 dfp = kmem_cache_zalloc(xfs_defer_pending_cache, 612 GFP_NOFS | __GFP_NOFAIL); 613 dfp->dfp_type = type; 614 dfp->dfp_intent = NULL; 615 dfp->dfp_done = NULL; 616 dfp->dfp_count = 0; 617 INIT_LIST_HEAD(&dfp->dfp_work); 618 list_add_tail(&dfp->dfp_list, &tp->t_dfops); 619 } 620 621 list_add_tail(li, &dfp->dfp_work); 622 dfp->dfp_count++; 623 } 624 625 /* 626 * Move deferred ops from one transaction to another and reset the source to 627 * initial state. This is primarily used to carry state forward across 628 * transaction rolls with pending dfops. 629 */ 630 void 631 xfs_defer_move( 632 struct xfs_trans *dtp, 633 struct xfs_trans *stp) 634 { 635 list_splice_init(&stp->t_dfops, &dtp->t_dfops); 636 637 /* 638 * Low free space mode was historically controlled by a dfops field. 639 * This meant that low mode state potentially carried across multiple 640 * transaction rolls. Transfer low mode on a dfops move to preserve 641 * that behavior. 642 */ 643 dtp->t_flags |= (stp->t_flags & XFS_TRANS_LOWMODE); 644 stp->t_flags &= ~XFS_TRANS_LOWMODE; 645 } 646 647 /* 648 * Prepare a chain of fresh deferred ops work items to be completed later. Log 649 * recovery requires the ability to put off until later the actual finishing 650 * work so that it can process unfinished items recovered from the log in 651 * correct order. 652 * 653 * Create and log intent items for all the work that we're capturing so that we 654 * can be assured that the items will get replayed if the system goes down 655 * before log recovery gets a chance to finish the work it put off. The entire 656 * deferred ops state is transferred to the capture structure and the 657 * transaction is then ready for the caller to commit it. If there are no 658 * intent items to capture, this function returns NULL. 659 * 660 * If capture_ip is not NULL, the capture structure will obtain an extra 661 * reference to the inode. 662 */ 663 static struct xfs_defer_capture * 664 xfs_defer_ops_capture( 665 struct xfs_trans *tp) 666 { 667 struct xfs_defer_capture *dfc; 668 unsigned short i; 669 int error; 670 671 if (list_empty(&tp->t_dfops)) 672 return NULL; 673 674 /* Create an object to capture the defer ops. */ 675 dfc = kmem_zalloc(sizeof(*dfc), KM_NOFS); 676 INIT_LIST_HEAD(&dfc->dfc_list); 677 INIT_LIST_HEAD(&dfc->dfc_dfops); 678 679 xfs_defer_create_intents(tp); 680 681 /* Move the dfops chain and transaction state to the capture struct. */ 682 list_splice_init(&tp->t_dfops, &dfc->dfc_dfops); 683 dfc->dfc_tpflags = tp->t_flags & XFS_TRANS_LOWMODE; 684 tp->t_flags &= ~XFS_TRANS_LOWMODE; 685 686 /* Capture the remaining block reservations along with the dfops. */ 687 dfc->dfc_blkres = tp->t_blk_res - tp->t_blk_res_used; 688 dfc->dfc_rtxres = tp->t_rtx_res - tp->t_rtx_res_used; 689 690 /* Preserve the log reservation size. */ 691 dfc->dfc_logres = tp->t_log_res; 692 693 error = xfs_defer_save_resources(&dfc->dfc_held, tp); 694 if (error) { 695 /* 696 * Resource capture should never fail, but if it does, we 697 * still have to shut down the log and release things 698 * properly. 699 */ 700 xfs_force_shutdown(tp->t_mountp, SHUTDOWN_CORRUPT_INCORE); 701 } 702 703 /* 704 * Grab extra references to the inodes and buffers because callers are 705 * expected to release their held references after we commit the 706 * transaction. 707 */ 708 for (i = 0; i < dfc->dfc_held.dr_inos; i++) { 709 ASSERT(xfs_isilocked(dfc->dfc_held.dr_ip[i], XFS_ILOCK_EXCL)); 710 ihold(VFS_I(dfc->dfc_held.dr_ip[i])); 711 } 712 713 for (i = 0; i < dfc->dfc_held.dr_bufs; i++) 714 xfs_buf_hold(dfc->dfc_held.dr_bp[i]); 715 716 return dfc; 717 } 718 719 /* Release all resources that we used to capture deferred ops. */ 720 void 721 xfs_defer_ops_capture_free( 722 struct xfs_mount *mp, 723 struct xfs_defer_capture *dfc) 724 { 725 unsigned short i; 726 727 xfs_defer_cancel_list(mp, &dfc->dfc_dfops); 728 729 for (i = 0; i < dfc->dfc_held.dr_bufs; i++) 730 xfs_buf_relse(dfc->dfc_held.dr_bp[i]); 731 732 for (i = 0; i < dfc->dfc_held.dr_inos; i++) 733 xfs_irele(dfc->dfc_held.dr_ip[i]); 734 735 kmem_free(dfc); 736 } 737 738 /* 739 * Capture any deferred ops and commit the transaction. This is the last step 740 * needed to finish a log intent item that we recovered from the log. If any 741 * of the deferred ops operate on an inode, the caller must pass in that inode 742 * so that the reference can be transferred to the capture structure. The 743 * caller must hold ILOCK_EXCL on the inode, and must unlock it before calling 744 * xfs_defer_ops_continue. 745 */ 746 int 747 xfs_defer_ops_capture_and_commit( 748 struct xfs_trans *tp, 749 struct list_head *capture_list) 750 { 751 struct xfs_mount *mp = tp->t_mountp; 752 struct xfs_defer_capture *dfc; 753 int error; 754 755 /* If we don't capture anything, commit transaction and exit. */ 756 dfc = xfs_defer_ops_capture(tp); 757 if (!dfc) 758 return xfs_trans_commit(tp); 759 760 /* Commit the transaction and add the capture structure to the list. */ 761 error = xfs_trans_commit(tp); 762 if (error) { 763 xfs_defer_ops_capture_free(mp, dfc); 764 return error; 765 } 766 767 list_add_tail(&dfc->dfc_list, capture_list); 768 return 0; 769 } 770 771 /* 772 * Attach a chain of captured deferred ops to a new transaction and free the 773 * capture structure. If an inode was captured, it will be passed back to the 774 * caller with ILOCK_EXCL held and joined to the transaction with lockflags==0. 775 * The caller now owns the inode reference. 776 */ 777 void 778 xfs_defer_ops_continue( 779 struct xfs_defer_capture *dfc, 780 struct xfs_trans *tp, 781 struct xfs_defer_resources *dres) 782 { 783 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES); 784 ASSERT(!(tp->t_flags & XFS_TRANS_DIRTY)); 785 786 /* Lock and join the captured inode to the new transaction. */ 787 if (dfc->dfc_held.dr_inos == 2) 788 xfs_lock_two_inodes(dfc->dfc_held.dr_ip[0], XFS_ILOCK_EXCL, 789 dfc->dfc_held.dr_ip[1], XFS_ILOCK_EXCL); 790 else if (dfc->dfc_held.dr_inos == 1) 791 xfs_ilock(dfc->dfc_held.dr_ip[0], XFS_ILOCK_EXCL); 792 xfs_defer_restore_resources(tp, &dfc->dfc_held); 793 memcpy(dres, &dfc->dfc_held, sizeof(struct xfs_defer_resources)); 794 795 /* Move captured dfops chain and state to the transaction. */ 796 list_splice_init(&dfc->dfc_dfops, &tp->t_dfops); 797 tp->t_flags |= dfc->dfc_tpflags; 798 799 kmem_free(dfc); 800 } 801 802 /* Release the resources captured and continued during recovery. */ 803 void 804 xfs_defer_resources_rele( 805 struct xfs_defer_resources *dres) 806 { 807 unsigned short i; 808 809 for (i = 0; i < dres->dr_inos; i++) { 810 xfs_iunlock(dres->dr_ip[i], XFS_ILOCK_EXCL); 811 xfs_irele(dres->dr_ip[i]); 812 dres->dr_ip[i] = NULL; 813 } 814 815 for (i = 0; i < dres->dr_bufs; i++) { 816 xfs_buf_relse(dres->dr_bp[i]); 817 dres->dr_bp[i] = NULL; 818 } 819 820 dres->dr_inos = 0; 821 dres->dr_bufs = 0; 822 dres->dr_ordered = 0; 823 } 824 825 static inline int __init 826 xfs_defer_init_cache(void) 827 { 828 xfs_defer_pending_cache = kmem_cache_create("xfs_defer_pending", 829 sizeof(struct xfs_defer_pending), 830 0, 0, NULL); 831 832 return xfs_defer_pending_cache != NULL ? 0 : -ENOMEM; 833 } 834 835 static inline void 836 xfs_defer_destroy_cache(void) 837 { 838 kmem_cache_destroy(xfs_defer_pending_cache); 839 xfs_defer_pending_cache = NULL; 840 } 841 842 /* Set up caches for deferred work items. */ 843 int __init 844 xfs_defer_init_item_caches(void) 845 { 846 int error; 847 848 error = xfs_defer_init_cache(); 849 if (error) 850 return error; 851 error = xfs_rmap_intent_init_cache(); 852 if (error) 853 goto err; 854 error = xfs_refcount_intent_init_cache(); 855 if (error) 856 goto err; 857 error = xfs_bmap_intent_init_cache(); 858 if (error) 859 goto err; 860 error = xfs_extfree_intent_init_cache(); 861 if (error) 862 goto err; 863 864 return 0; 865 err: 866 xfs_defer_destroy_item_caches(); 867 return error; 868 } 869 870 /* Destroy all the deferred work item caches, if they've been allocated. */ 871 void 872 xfs_defer_destroy_item_caches(void) 873 { 874 xfs_extfree_intent_destroy_cache(); 875 xfs_bmap_intent_destroy_cache(); 876 xfs_refcount_intent_destroy_cache(); 877 xfs_rmap_intent_destroy_cache(); 878 xfs_defer_destroy_cache(); 879 } 880