1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Copyright (C) 2016 Oracle. All Rights Reserved. 4 * Author: Darrick J. Wong <darrick.wong@oracle.com> 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_log_format.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_mount.h" 13 #include "xfs_defer.h" 14 #include "xfs_trans.h" 15 #include "xfs_buf_item.h" 16 #include "xfs_inode.h" 17 #include "xfs_inode_item.h" 18 #include "xfs_trace.h" 19 #include "xfs_icache.h" 20 #include "xfs_log.h" 21 #include "xfs_rmap.h" 22 #include "xfs_refcount.h" 23 #include "xfs_bmap.h" 24 25 static struct kmem_cache *xfs_defer_pending_cache; 26 27 /* 28 * Deferred Operations in XFS 29 * 30 * Due to the way locking rules work in XFS, certain transactions (block 31 * mapping and unmapping, typically) have permanent reservations so that 32 * we can roll the transaction to adhere to AG locking order rules and 33 * to unlock buffers between metadata updates. Prior to rmap/reflink, 34 * the mapping code had a mechanism to perform these deferrals for 35 * extents that were going to be freed; this code makes that facility 36 * more generic. 37 * 38 * When adding the reverse mapping and reflink features, it became 39 * necessary to perform complex remapping multi-transactions to comply 40 * with AG locking order rules, and to be able to spread a single 41 * refcount update operation (an operation on an n-block extent can 42 * update as many as n records!) among multiple transactions. XFS can 43 * roll a transaction to facilitate this, but using this facility 44 * requires us to log "intent" items in case log recovery needs to 45 * redo the operation, and to log "done" items to indicate that redo 46 * is not necessary. 47 * 48 * Deferred work is tracked in xfs_defer_pending items. Each pending 49 * item tracks one type of deferred work. Incoming work items (which 50 * have not yet had an intent logged) are attached to a pending item 51 * on the dop_intake list, where they wait for the caller to finish 52 * the deferred operations. 53 * 54 * Finishing a set of deferred operations is an involved process. To 55 * start, we define "rolling a deferred-op transaction" as follows: 56 * 57 * > For each xfs_defer_pending item on the dop_intake list, 58 * - Sort the work items in AG order. XFS locking 59 * order rules require us to lock buffers in AG order. 60 * - Create a log intent item for that type. 61 * - Attach it to the pending item. 62 * - Move the pending item from the dop_intake list to the 63 * dop_pending list. 64 * > Roll the transaction. 65 * 66 * NOTE: To avoid exceeding the transaction reservation, we limit the 67 * number of items that we attach to a given xfs_defer_pending. 68 * 69 * The actual finishing process looks like this: 70 * 71 * > For each xfs_defer_pending in the dop_pending list, 72 * - Roll the deferred-op transaction as above. 73 * - Create a log done item for that type, and attach it to the 74 * log intent item. 75 * - For each work item attached to the log intent item, 76 * * Perform the described action. 77 * * Attach the work item to the log done item. 78 * * If the result of doing the work was -EAGAIN, ->finish work 79 * wants a new transaction. See the "Requesting a Fresh 80 * Transaction while Finishing Deferred Work" section below for 81 * details. 82 * 83 * The key here is that we must log an intent item for all pending 84 * work items every time we roll the transaction, and that we must log 85 * a done item as soon as the work is completed. With this mechanism 86 * we can perform complex remapping operations, chaining intent items 87 * as needed. 88 * 89 * Requesting a Fresh Transaction while Finishing Deferred Work 90 * 91 * If ->finish_item decides that it needs a fresh transaction to 92 * finish the work, it must ask its caller (xfs_defer_finish) for a 93 * continuation. The most likely cause of this circumstance are the 94 * refcount adjust functions deciding that they've logged enough items 95 * to be at risk of exceeding the transaction reservation. 96 * 97 * To get a fresh transaction, we want to log the existing log done 98 * item to prevent the log intent item from replaying, immediately log 99 * a new log intent item with the unfinished work items, roll the 100 * transaction, and re-call ->finish_item wherever it left off. The 101 * log done item and the new log intent item must be in the same 102 * transaction or atomicity cannot be guaranteed; defer_finish ensures 103 * that this happens. 104 * 105 * This requires some coordination between ->finish_item and 106 * defer_finish. Upon deciding to request a new transaction, 107 * ->finish_item should update the current work item to reflect the 108 * unfinished work. Next, it should reset the log done item's list 109 * count to the number of items finished, and return -EAGAIN. 110 * defer_finish sees the -EAGAIN, logs the new log intent item 111 * with the remaining work items, and leaves the xfs_defer_pending 112 * item at the head of the dop_work queue. Then it rolls the 113 * transaction and picks up processing where it left off. It is 114 * required that ->finish_item must be careful to leave enough 115 * transaction reservation to fit the new log intent item. 116 * 117 * This is an example of remapping the extent (E, E+B) into file X at 118 * offset A and dealing with the extent (C, C+B) already being mapped 119 * there: 120 * +-------------------------------------------------+ 121 * | Unmap file X startblock C offset A length B | t0 122 * | Intent to reduce refcount for extent (C, B) | 123 * | Intent to remove rmap (X, C, A, B) | 124 * | Intent to free extent (D, 1) (bmbt block) | 125 * | Intent to map (X, A, B) at startblock E | 126 * +-------------------------------------------------+ 127 * | Map file X startblock E offset A length B | t1 128 * | Done mapping (X, E, A, B) | 129 * | Intent to increase refcount for extent (E, B) | 130 * | Intent to add rmap (X, E, A, B) | 131 * +-------------------------------------------------+ 132 * | Reduce refcount for extent (C, B) | t2 133 * | Done reducing refcount for extent (C, 9) | 134 * | Intent to reduce refcount for extent (C+9, B-9) | 135 * | (ran out of space after 9 refcount updates) | 136 * +-------------------------------------------------+ 137 * | Reduce refcount for extent (C+9, B+9) | t3 138 * | Done reducing refcount for extent (C+9, B-9) | 139 * | Increase refcount for extent (E, B) | 140 * | Done increasing refcount for extent (E, B) | 141 * | Intent to free extent (C, B) | 142 * | Intent to free extent (F, 1) (refcountbt block) | 143 * | Intent to remove rmap (F, 1, REFC) | 144 * +-------------------------------------------------+ 145 * | Remove rmap (X, C, A, B) | t4 146 * | Done removing rmap (X, C, A, B) | 147 * | Add rmap (X, E, A, B) | 148 * | Done adding rmap (X, E, A, B) | 149 * | Remove rmap (F, 1, REFC) | 150 * | Done removing rmap (F, 1, REFC) | 151 * +-------------------------------------------------+ 152 * | Free extent (C, B) | t5 153 * | Done freeing extent (C, B) | 154 * | Free extent (D, 1) | 155 * | Done freeing extent (D, 1) | 156 * | Free extent (F, 1) | 157 * | Done freeing extent (F, 1) | 158 * +-------------------------------------------------+ 159 * 160 * If we should crash before t2 commits, log recovery replays 161 * the following intent items: 162 * 163 * - Intent to reduce refcount for extent (C, B) 164 * - Intent to remove rmap (X, C, A, B) 165 * - Intent to free extent (D, 1) (bmbt block) 166 * - Intent to increase refcount for extent (E, B) 167 * - Intent to add rmap (X, E, A, B) 168 * 169 * In the process of recovering, it should also generate and take care 170 * of these intent items: 171 * 172 * - Intent to free extent (C, B) 173 * - Intent to free extent (F, 1) (refcountbt block) 174 * - Intent to remove rmap (F, 1, REFC) 175 * 176 * Note that the continuation requested between t2 and t3 is likely to 177 * reoccur. 178 */ 179 180 static const struct xfs_defer_op_type *defer_op_types[] = { 181 [XFS_DEFER_OPS_TYPE_BMAP] = &xfs_bmap_update_defer_type, 182 [XFS_DEFER_OPS_TYPE_REFCOUNT] = &xfs_refcount_update_defer_type, 183 [XFS_DEFER_OPS_TYPE_RMAP] = &xfs_rmap_update_defer_type, 184 [XFS_DEFER_OPS_TYPE_FREE] = &xfs_extent_free_defer_type, 185 [XFS_DEFER_OPS_TYPE_AGFL_FREE] = &xfs_agfl_free_defer_type, 186 }; 187 188 static void 189 xfs_defer_create_intent( 190 struct xfs_trans *tp, 191 struct xfs_defer_pending *dfp, 192 bool sort) 193 { 194 const struct xfs_defer_op_type *ops = defer_op_types[dfp->dfp_type]; 195 196 if (!dfp->dfp_intent) 197 dfp->dfp_intent = ops->create_intent(tp, &dfp->dfp_work, 198 dfp->dfp_count, sort); 199 } 200 201 /* 202 * For each pending item in the intake list, log its intent item and the 203 * associated extents, then add the entire intake list to the end of 204 * the pending list. 205 */ 206 STATIC void 207 xfs_defer_create_intents( 208 struct xfs_trans *tp) 209 { 210 struct xfs_defer_pending *dfp; 211 212 list_for_each_entry(dfp, &tp->t_dfops, dfp_list) { 213 trace_xfs_defer_create_intent(tp->t_mountp, dfp); 214 xfs_defer_create_intent(tp, dfp, true); 215 } 216 } 217 218 /* Abort all the intents that were committed. */ 219 STATIC void 220 xfs_defer_trans_abort( 221 struct xfs_trans *tp, 222 struct list_head *dop_pending) 223 { 224 struct xfs_defer_pending *dfp; 225 const struct xfs_defer_op_type *ops; 226 227 trace_xfs_defer_trans_abort(tp, _RET_IP_); 228 229 /* Abort intent items that don't have a done item. */ 230 list_for_each_entry(dfp, dop_pending, dfp_list) { 231 ops = defer_op_types[dfp->dfp_type]; 232 trace_xfs_defer_pending_abort(tp->t_mountp, dfp); 233 if (dfp->dfp_intent && !dfp->dfp_done) { 234 ops->abort_intent(dfp->dfp_intent); 235 dfp->dfp_intent = NULL; 236 } 237 } 238 } 239 240 /* 241 * Capture resources that the caller said not to release ("held") when the 242 * transaction commits. Caller is responsible for zero-initializing @dres. 243 */ 244 static int 245 xfs_defer_save_resources( 246 struct xfs_defer_resources *dres, 247 struct xfs_trans *tp) 248 { 249 struct xfs_buf_log_item *bli; 250 struct xfs_inode_log_item *ili; 251 struct xfs_log_item *lip; 252 253 BUILD_BUG_ON(NBBY * sizeof(dres->dr_ordered) < XFS_DEFER_OPS_NR_BUFS); 254 255 list_for_each_entry(lip, &tp->t_items, li_trans) { 256 switch (lip->li_type) { 257 case XFS_LI_BUF: 258 bli = container_of(lip, struct xfs_buf_log_item, 259 bli_item); 260 if (bli->bli_flags & XFS_BLI_HOLD) { 261 if (dres->dr_bufs >= XFS_DEFER_OPS_NR_BUFS) { 262 ASSERT(0); 263 return -EFSCORRUPTED; 264 } 265 if (bli->bli_flags & XFS_BLI_ORDERED) 266 dres->dr_ordered |= 267 (1U << dres->dr_bufs); 268 else 269 xfs_trans_dirty_buf(tp, bli->bli_buf); 270 dres->dr_bp[dres->dr_bufs++] = bli->bli_buf; 271 } 272 break; 273 case XFS_LI_INODE: 274 ili = container_of(lip, struct xfs_inode_log_item, 275 ili_item); 276 if (ili->ili_lock_flags == 0) { 277 if (dres->dr_inos >= XFS_DEFER_OPS_NR_INODES) { 278 ASSERT(0); 279 return -EFSCORRUPTED; 280 } 281 xfs_trans_log_inode(tp, ili->ili_inode, 282 XFS_ILOG_CORE); 283 dres->dr_ip[dres->dr_inos++] = ili->ili_inode; 284 } 285 break; 286 default: 287 break; 288 } 289 } 290 291 return 0; 292 } 293 294 /* Attach the held resources to the transaction. */ 295 static void 296 xfs_defer_restore_resources( 297 struct xfs_trans *tp, 298 struct xfs_defer_resources *dres) 299 { 300 unsigned short i; 301 302 /* Rejoin the joined inodes. */ 303 for (i = 0; i < dres->dr_inos; i++) 304 xfs_trans_ijoin(tp, dres->dr_ip[i], 0); 305 306 /* Rejoin the buffers and dirty them so the log moves forward. */ 307 for (i = 0; i < dres->dr_bufs; i++) { 308 xfs_trans_bjoin(tp, dres->dr_bp[i]); 309 if (dres->dr_ordered & (1U << i)) 310 xfs_trans_ordered_buf(tp, dres->dr_bp[i]); 311 xfs_trans_bhold(tp, dres->dr_bp[i]); 312 } 313 } 314 315 /* Roll a transaction so we can do some deferred op processing. */ 316 STATIC int 317 xfs_defer_trans_roll( 318 struct xfs_trans **tpp) 319 { 320 struct xfs_defer_resources dres = { }; 321 int error; 322 323 error = xfs_defer_save_resources(&dres, *tpp); 324 if (error) 325 return error; 326 327 trace_xfs_defer_trans_roll(*tpp, _RET_IP_); 328 329 /* 330 * Roll the transaction. Rolling always given a new transaction (even 331 * if committing the old one fails!) to hand back to the caller, so we 332 * join the held resources to the new transaction so that we always 333 * return with the held resources joined to @tpp, no matter what 334 * happened. 335 */ 336 error = xfs_trans_roll(tpp); 337 338 xfs_defer_restore_resources(*tpp, &dres); 339 340 if (error) 341 trace_xfs_defer_trans_roll_error(*tpp, error); 342 return error; 343 } 344 345 /* 346 * Free up any items left in the list. 347 */ 348 static void 349 xfs_defer_cancel_list( 350 struct xfs_mount *mp, 351 struct list_head *dop_list) 352 { 353 struct xfs_defer_pending *dfp; 354 struct xfs_defer_pending *pli; 355 struct list_head *pwi; 356 struct list_head *n; 357 const struct xfs_defer_op_type *ops; 358 359 /* 360 * Free the pending items. Caller should already have arranged 361 * for the intent items to be released. 362 */ 363 list_for_each_entry_safe(dfp, pli, dop_list, dfp_list) { 364 ops = defer_op_types[dfp->dfp_type]; 365 trace_xfs_defer_cancel_list(mp, dfp); 366 list_del(&dfp->dfp_list); 367 list_for_each_safe(pwi, n, &dfp->dfp_work) { 368 list_del(pwi); 369 dfp->dfp_count--; 370 ops->cancel_item(pwi); 371 } 372 ASSERT(dfp->dfp_count == 0); 373 kmem_cache_free(xfs_defer_pending_cache, dfp); 374 } 375 } 376 377 /* 378 * Prevent a log intent item from pinning the tail of the log by logging a 379 * done item to release the intent item; and then log a new intent item. 380 * The caller should provide a fresh transaction and roll it after we're done. 381 */ 382 static int 383 xfs_defer_relog( 384 struct xfs_trans **tpp, 385 struct list_head *dfops) 386 { 387 struct xlog *log = (*tpp)->t_mountp->m_log; 388 struct xfs_defer_pending *dfp; 389 xfs_lsn_t threshold_lsn = NULLCOMMITLSN; 390 391 392 ASSERT((*tpp)->t_flags & XFS_TRANS_PERM_LOG_RES); 393 394 list_for_each_entry(dfp, dfops, dfp_list) { 395 /* 396 * If the log intent item for this deferred op is not a part of 397 * the current log checkpoint, relog the intent item to keep 398 * the log tail moving forward. We're ok with this being racy 399 * because an incorrect decision means we'll be a little slower 400 * at pushing the tail. 401 */ 402 if (dfp->dfp_intent == NULL || 403 xfs_log_item_in_current_chkpt(dfp->dfp_intent)) 404 continue; 405 406 /* 407 * Figure out where we need the tail to be in order to maintain 408 * the minimum required free space in the log. Only sample 409 * the log threshold once per call. 410 */ 411 if (threshold_lsn == NULLCOMMITLSN) { 412 threshold_lsn = xlog_grant_push_threshold(log, 0); 413 if (threshold_lsn == NULLCOMMITLSN) 414 break; 415 } 416 if (XFS_LSN_CMP(dfp->dfp_intent->li_lsn, threshold_lsn) >= 0) 417 continue; 418 419 trace_xfs_defer_relog_intent((*tpp)->t_mountp, dfp); 420 XFS_STATS_INC((*tpp)->t_mountp, defer_relog); 421 dfp->dfp_intent = xfs_trans_item_relog(dfp->dfp_intent, *tpp); 422 } 423 424 if ((*tpp)->t_flags & XFS_TRANS_DIRTY) 425 return xfs_defer_trans_roll(tpp); 426 return 0; 427 } 428 429 /* 430 * Log an intent-done item for the first pending intent, and finish the work 431 * items. 432 */ 433 static int 434 xfs_defer_finish_one( 435 struct xfs_trans *tp, 436 struct xfs_defer_pending *dfp) 437 { 438 const struct xfs_defer_op_type *ops = defer_op_types[dfp->dfp_type]; 439 struct xfs_btree_cur *state = NULL; 440 struct list_head *li, *n; 441 int error; 442 443 trace_xfs_defer_pending_finish(tp->t_mountp, dfp); 444 445 dfp->dfp_done = ops->create_done(tp, dfp->dfp_intent, dfp->dfp_count); 446 list_for_each_safe(li, n, &dfp->dfp_work) { 447 list_del(li); 448 dfp->dfp_count--; 449 error = ops->finish_item(tp, dfp->dfp_done, li, &state); 450 if (error == -EAGAIN) { 451 /* 452 * Caller wants a fresh transaction; put the work item 453 * back on the list and log a new log intent item to 454 * replace the old one. See "Requesting a Fresh 455 * Transaction while Finishing Deferred Work" above. 456 */ 457 list_add(li, &dfp->dfp_work); 458 dfp->dfp_count++; 459 dfp->dfp_done = NULL; 460 dfp->dfp_intent = NULL; 461 xfs_defer_create_intent(tp, dfp, false); 462 } 463 464 if (error) 465 goto out; 466 } 467 468 /* Done with the dfp, free it. */ 469 list_del(&dfp->dfp_list); 470 kmem_cache_free(xfs_defer_pending_cache, dfp); 471 out: 472 if (ops->finish_cleanup) 473 ops->finish_cleanup(tp, state, error); 474 return error; 475 } 476 477 /* 478 * Finish all the pending work. This involves logging intent items for 479 * any work items that wandered in since the last transaction roll (if 480 * one has even happened), rolling the transaction, and finishing the 481 * work items in the first item on the logged-and-pending list. 482 * 483 * If an inode is provided, relog it to the new transaction. 484 */ 485 int 486 xfs_defer_finish_noroll( 487 struct xfs_trans **tp) 488 { 489 struct xfs_defer_pending *dfp; 490 int error = 0; 491 LIST_HEAD(dop_pending); 492 493 ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES); 494 495 trace_xfs_defer_finish(*tp, _RET_IP_); 496 497 /* Until we run out of pending work to finish... */ 498 while (!list_empty(&dop_pending) || !list_empty(&(*tp)->t_dfops)) { 499 /* 500 * Deferred items that are created in the process of finishing 501 * other deferred work items should be queued at the head of 502 * the pending list, which puts them ahead of the deferred work 503 * that was created by the caller. This keeps the number of 504 * pending work items to a minimum, which decreases the amount 505 * of time that any one intent item can stick around in memory, 506 * pinning the log tail. 507 */ 508 xfs_defer_create_intents(*tp); 509 list_splice_init(&(*tp)->t_dfops, &dop_pending); 510 511 error = xfs_defer_trans_roll(tp); 512 if (error) 513 goto out_shutdown; 514 515 /* Possibly relog intent items to keep the log moving. */ 516 error = xfs_defer_relog(tp, &dop_pending); 517 if (error) 518 goto out_shutdown; 519 520 dfp = list_first_entry(&dop_pending, struct xfs_defer_pending, 521 dfp_list); 522 error = xfs_defer_finish_one(*tp, dfp); 523 if (error && error != -EAGAIN) 524 goto out_shutdown; 525 } 526 527 trace_xfs_defer_finish_done(*tp, _RET_IP_); 528 return 0; 529 530 out_shutdown: 531 xfs_defer_trans_abort(*tp, &dop_pending); 532 xfs_force_shutdown((*tp)->t_mountp, SHUTDOWN_CORRUPT_INCORE); 533 trace_xfs_defer_finish_error(*tp, error); 534 xfs_defer_cancel_list((*tp)->t_mountp, &dop_pending); 535 xfs_defer_cancel(*tp); 536 return error; 537 } 538 539 int 540 xfs_defer_finish( 541 struct xfs_trans **tp) 542 { 543 int error; 544 545 /* 546 * Finish and roll the transaction once more to avoid returning to the 547 * caller with a dirty transaction. 548 */ 549 error = xfs_defer_finish_noroll(tp); 550 if (error) 551 return error; 552 if ((*tp)->t_flags & XFS_TRANS_DIRTY) { 553 error = xfs_defer_trans_roll(tp); 554 if (error) { 555 xfs_force_shutdown((*tp)->t_mountp, 556 SHUTDOWN_CORRUPT_INCORE); 557 return error; 558 } 559 } 560 561 /* Reset LOWMODE now that we've finished all the dfops. */ 562 ASSERT(list_empty(&(*tp)->t_dfops)); 563 (*tp)->t_flags &= ~XFS_TRANS_LOWMODE; 564 return 0; 565 } 566 567 void 568 xfs_defer_cancel( 569 struct xfs_trans *tp) 570 { 571 struct xfs_mount *mp = tp->t_mountp; 572 573 trace_xfs_defer_cancel(tp, _RET_IP_); 574 xfs_defer_cancel_list(mp, &tp->t_dfops); 575 } 576 577 /* Add an item for later deferred processing. */ 578 void 579 xfs_defer_add( 580 struct xfs_trans *tp, 581 enum xfs_defer_ops_type type, 582 struct list_head *li) 583 { 584 struct xfs_defer_pending *dfp = NULL; 585 const struct xfs_defer_op_type *ops; 586 587 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES); 588 BUILD_BUG_ON(ARRAY_SIZE(defer_op_types) != XFS_DEFER_OPS_TYPE_MAX); 589 590 /* 591 * Add the item to a pending item at the end of the intake list. 592 * If the last pending item has the same type, reuse it. Else, 593 * create a new pending item at the end of the intake list. 594 */ 595 if (!list_empty(&tp->t_dfops)) { 596 dfp = list_last_entry(&tp->t_dfops, 597 struct xfs_defer_pending, dfp_list); 598 ops = defer_op_types[dfp->dfp_type]; 599 if (dfp->dfp_type != type || 600 (ops->max_items && dfp->dfp_count >= ops->max_items)) 601 dfp = NULL; 602 } 603 if (!dfp) { 604 dfp = kmem_cache_zalloc(xfs_defer_pending_cache, 605 GFP_NOFS | __GFP_NOFAIL); 606 dfp->dfp_type = type; 607 dfp->dfp_intent = NULL; 608 dfp->dfp_done = NULL; 609 dfp->dfp_count = 0; 610 INIT_LIST_HEAD(&dfp->dfp_work); 611 list_add_tail(&dfp->dfp_list, &tp->t_dfops); 612 } 613 614 list_add_tail(li, &dfp->dfp_work); 615 dfp->dfp_count++; 616 } 617 618 /* 619 * Move deferred ops from one transaction to another and reset the source to 620 * initial state. This is primarily used to carry state forward across 621 * transaction rolls with pending dfops. 622 */ 623 void 624 xfs_defer_move( 625 struct xfs_trans *dtp, 626 struct xfs_trans *stp) 627 { 628 list_splice_init(&stp->t_dfops, &dtp->t_dfops); 629 630 /* 631 * Low free space mode was historically controlled by a dfops field. 632 * This meant that low mode state potentially carried across multiple 633 * transaction rolls. Transfer low mode on a dfops move to preserve 634 * that behavior. 635 */ 636 dtp->t_flags |= (stp->t_flags & XFS_TRANS_LOWMODE); 637 stp->t_flags &= ~XFS_TRANS_LOWMODE; 638 } 639 640 /* 641 * Prepare a chain of fresh deferred ops work items to be completed later. Log 642 * recovery requires the ability to put off until later the actual finishing 643 * work so that it can process unfinished items recovered from the log in 644 * correct order. 645 * 646 * Create and log intent items for all the work that we're capturing so that we 647 * can be assured that the items will get replayed if the system goes down 648 * before log recovery gets a chance to finish the work it put off. The entire 649 * deferred ops state is transferred to the capture structure and the 650 * transaction is then ready for the caller to commit it. If there are no 651 * intent items to capture, this function returns NULL. 652 * 653 * If capture_ip is not NULL, the capture structure will obtain an extra 654 * reference to the inode. 655 */ 656 static struct xfs_defer_capture * 657 xfs_defer_ops_capture( 658 struct xfs_trans *tp) 659 { 660 struct xfs_defer_capture *dfc; 661 unsigned short i; 662 int error; 663 664 if (list_empty(&tp->t_dfops)) 665 return NULL; 666 667 /* Create an object to capture the defer ops. */ 668 dfc = kmem_zalloc(sizeof(*dfc), KM_NOFS); 669 INIT_LIST_HEAD(&dfc->dfc_list); 670 INIT_LIST_HEAD(&dfc->dfc_dfops); 671 672 xfs_defer_create_intents(tp); 673 674 /* Move the dfops chain and transaction state to the capture struct. */ 675 list_splice_init(&tp->t_dfops, &dfc->dfc_dfops); 676 dfc->dfc_tpflags = tp->t_flags & XFS_TRANS_LOWMODE; 677 tp->t_flags &= ~XFS_TRANS_LOWMODE; 678 679 /* Capture the remaining block reservations along with the dfops. */ 680 dfc->dfc_blkres = tp->t_blk_res - tp->t_blk_res_used; 681 dfc->dfc_rtxres = tp->t_rtx_res - tp->t_rtx_res_used; 682 683 /* Preserve the log reservation size. */ 684 dfc->dfc_logres = tp->t_log_res; 685 686 error = xfs_defer_save_resources(&dfc->dfc_held, tp); 687 if (error) { 688 /* 689 * Resource capture should never fail, but if it does, we 690 * still have to shut down the log and release things 691 * properly. 692 */ 693 xfs_force_shutdown(tp->t_mountp, SHUTDOWN_CORRUPT_INCORE); 694 } 695 696 /* 697 * Grab extra references to the inodes and buffers because callers are 698 * expected to release their held references after we commit the 699 * transaction. 700 */ 701 for (i = 0; i < dfc->dfc_held.dr_inos; i++) { 702 ASSERT(xfs_isilocked(dfc->dfc_held.dr_ip[i], XFS_ILOCK_EXCL)); 703 ihold(VFS_I(dfc->dfc_held.dr_ip[i])); 704 } 705 706 for (i = 0; i < dfc->dfc_held.dr_bufs; i++) 707 xfs_buf_hold(dfc->dfc_held.dr_bp[i]); 708 709 return dfc; 710 } 711 712 /* Release all resources that we used to capture deferred ops. */ 713 void 714 xfs_defer_ops_capture_free( 715 struct xfs_mount *mp, 716 struct xfs_defer_capture *dfc) 717 { 718 unsigned short i; 719 720 xfs_defer_cancel_list(mp, &dfc->dfc_dfops); 721 722 for (i = 0; i < dfc->dfc_held.dr_bufs; i++) 723 xfs_buf_relse(dfc->dfc_held.dr_bp[i]); 724 725 for (i = 0; i < dfc->dfc_held.dr_inos; i++) 726 xfs_irele(dfc->dfc_held.dr_ip[i]); 727 728 kmem_free(dfc); 729 } 730 731 /* 732 * Capture any deferred ops and commit the transaction. This is the last step 733 * needed to finish a log intent item that we recovered from the log. If any 734 * of the deferred ops operate on an inode, the caller must pass in that inode 735 * so that the reference can be transferred to the capture structure. The 736 * caller must hold ILOCK_EXCL on the inode, and must unlock it before calling 737 * xfs_defer_ops_continue. 738 */ 739 int 740 xfs_defer_ops_capture_and_commit( 741 struct xfs_trans *tp, 742 struct list_head *capture_list) 743 { 744 struct xfs_mount *mp = tp->t_mountp; 745 struct xfs_defer_capture *dfc; 746 int error; 747 748 /* If we don't capture anything, commit transaction and exit. */ 749 dfc = xfs_defer_ops_capture(tp); 750 if (!dfc) 751 return xfs_trans_commit(tp); 752 753 /* Commit the transaction and add the capture structure to the list. */ 754 error = xfs_trans_commit(tp); 755 if (error) { 756 xfs_defer_ops_capture_free(mp, dfc); 757 return error; 758 } 759 760 list_add_tail(&dfc->dfc_list, capture_list); 761 return 0; 762 } 763 764 /* 765 * Attach a chain of captured deferred ops to a new transaction and free the 766 * capture structure. If an inode was captured, it will be passed back to the 767 * caller with ILOCK_EXCL held and joined to the transaction with lockflags==0. 768 * The caller now owns the inode reference. 769 */ 770 void 771 xfs_defer_ops_continue( 772 struct xfs_defer_capture *dfc, 773 struct xfs_trans *tp, 774 struct xfs_defer_resources *dres) 775 { 776 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES); 777 ASSERT(!(tp->t_flags & XFS_TRANS_DIRTY)); 778 779 /* Lock and join the captured inode to the new transaction. */ 780 if (dfc->dfc_held.dr_inos == 2) 781 xfs_lock_two_inodes(dfc->dfc_held.dr_ip[0], XFS_ILOCK_EXCL, 782 dfc->dfc_held.dr_ip[1], XFS_ILOCK_EXCL); 783 else if (dfc->dfc_held.dr_inos == 1) 784 xfs_ilock(dfc->dfc_held.dr_ip[0], XFS_ILOCK_EXCL); 785 xfs_defer_restore_resources(tp, &dfc->dfc_held); 786 memcpy(dres, &dfc->dfc_held, sizeof(struct xfs_defer_resources)); 787 788 /* Move captured dfops chain and state to the transaction. */ 789 list_splice_init(&dfc->dfc_dfops, &tp->t_dfops); 790 tp->t_flags |= dfc->dfc_tpflags; 791 792 kmem_free(dfc); 793 } 794 795 /* Release the resources captured and continued during recovery. */ 796 void 797 xfs_defer_resources_rele( 798 struct xfs_defer_resources *dres) 799 { 800 unsigned short i; 801 802 for (i = 0; i < dres->dr_inos; i++) { 803 xfs_iunlock(dres->dr_ip[i], XFS_ILOCK_EXCL); 804 xfs_irele(dres->dr_ip[i]); 805 dres->dr_ip[i] = NULL; 806 } 807 808 for (i = 0; i < dres->dr_bufs; i++) { 809 xfs_buf_relse(dres->dr_bp[i]); 810 dres->dr_bp[i] = NULL; 811 } 812 813 dres->dr_inos = 0; 814 dres->dr_bufs = 0; 815 dres->dr_ordered = 0; 816 } 817 818 static inline int __init 819 xfs_defer_init_cache(void) 820 { 821 xfs_defer_pending_cache = kmem_cache_create("xfs_defer_pending", 822 sizeof(struct xfs_defer_pending), 823 0, 0, NULL); 824 825 return xfs_defer_pending_cache != NULL ? 0 : -ENOMEM; 826 } 827 828 static inline void 829 xfs_defer_destroy_cache(void) 830 { 831 kmem_cache_destroy(xfs_defer_pending_cache); 832 xfs_defer_pending_cache = NULL; 833 } 834 835 /* Set up caches for deferred work items. */ 836 int __init 837 xfs_defer_init_item_caches(void) 838 { 839 int error; 840 841 error = xfs_defer_init_cache(); 842 if (error) 843 return error; 844 error = xfs_rmap_intent_init_cache(); 845 if (error) 846 goto err; 847 error = xfs_refcount_intent_init_cache(); 848 if (error) 849 goto err; 850 error = xfs_bmap_intent_init_cache(); 851 if (error) 852 goto err; 853 854 return 0; 855 err: 856 xfs_defer_destroy_item_caches(); 857 return error; 858 } 859 860 /* Destroy all the deferred work item caches, if they've been allocated. */ 861 void 862 xfs_defer_destroy_item_caches(void) 863 { 864 xfs_bmap_intent_destroy_cache(); 865 xfs_refcount_intent_destroy_cache(); 866 xfs_rmap_intent_destroy_cache(); 867 xfs_defer_destroy_cache(); 868 } 869