1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Copyright (C) 2016 Oracle. All Rights Reserved. 4 * Author: Darrick J. Wong <darrick.wong@oracle.com> 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_log_format.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_mount.h" 13 #include "xfs_defer.h" 14 #include "xfs_trans.h" 15 #include "xfs_buf_item.h" 16 #include "xfs_inode.h" 17 #include "xfs_inode_item.h" 18 #include "xfs_trace.h" 19 #include "xfs_icache.h" 20 #include "xfs_log.h" 21 #include "xfs_rmap.h" 22 #include "xfs_refcount.h" 23 #include "xfs_bmap.h" 24 #include "xfs_alloc.h" 25 #include "xfs_buf.h" 26 #include "xfs_da_format.h" 27 #include "xfs_da_btree.h" 28 #include "xfs_attr.h" 29 30 static struct kmem_cache *xfs_defer_pending_cache; 31 32 /* 33 * Deferred Operations in XFS 34 * 35 * Due to the way locking rules work in XFS, certain transactions (block 36 * mapping and unmapping, typically) have permanent reservations so that 37 * we can roll the transaction to adhere to AG locking order rules and 38 * to unlock buffers between metadata updates. Prior to rmap/reflink, 39 * the mapping code had a mechanism to perform these deferrals for 40 * extents that were going to be freed; this code makes that facility 41 * more generic. 42 * 43 * When adding the reverse mapping and reflink features, it became 44 * necessary to perform complex remapping multi-transactions to comply 45 * with AG locking order rules, and to be able to spread a single 46 * refcount update operation (an operation on an n-block extent can 47 * update as many as n records!) among multiple transactions. XFS can 48 * roll a transaction to facilitate this, but using this facility 49 * requires us to log "intent" items in case log recovery needs to 50 * redo the operation, and to log "done" items to indicate that redo 51 * is not necessary. 52 * 53 * Deferred work is tracked in xfs_defer_pending items. Each pending 54 * item tracks one type of deferred work. Incoming work items (which 55 * have not yet had an intent logged) are attached to a pending item 56 * on the dop_intake list, where they wait for the caller to finish 57 * the deferred operations. 58 * 59 * Finishing a set of deferred operations is an involved process. To 60 * start, we define "rolling a deferred-op transaction" as follows: 61 * 62 * > For each xfs_defer_pending item on the dop_intake list, 63 * - Sort the work items in AG order. XFS locking 64 * order rules require us to lock buffers in AG order. 65 * - Create a log intent item for that type. 66 * - Attach it to the pending item. 67 * - Move the pending item from the dop_intake list to the 68 * dop_pending list. 69 * > Roll the transaction. 70 * 71 * NOTE: To avoid exceeding the transaction reservation, we limit the 72 * number of items that we attach to a given xfs_defer_pending. 73 * 74 * The actual finishing process looks like this: 75 * 76 * > For each xfs_defer_pending in the dop_pending list, 77 * - Roll the deferred-op transaction as above. 78 * - Create a log done item for that type, and attach it to the 79 * log intent item. 80 * - For each work item attached to the log intent item, 81 * * Perform the described action. 82 * * Attach the work item to the log done item. 83 * * If the result of doing the work was -EAGAIN, ->finish work 84 * wants a new transaction. See the "Requesting a Fresh 85 * Transaction while Finishing Deferred Work" section below for 86 * details. 87 * 88 * The key here is that we must log an intent item for all pending 89 * work items every time we roll the transaction, and that we must log 90 * a done item as soon as the work is completed. With this mechanism 91 * we can perform complex remapping operations, chaining intent items 92 * as needed. 93 * 94 * Requesting a Fresh Transaction while Finishing Deferred Work 95 * 96 * If ->finish_item decides that it needs a fresh transaction to 97 * finish the work, it must ask its caller (xfs_defer_finish) for a 98 * continuation. The most likely cause of this circumstance are the 99 * refcount adjust functions deciding that they've logged enough items 100 * to be at risk of exceeding the transaction reservation. 101 * 102 * To get a fresh transaction, we want to log the existing log done 103 * item to prevent the log intent item from replaying, immediately log 104 * a new log intent item with the unfinished work items, roll the 105 * transaction, and re-call ->finish_item wherever it left off. The 106 * log done item and the new log intent item must be in the same 107 * transaction or atomicity cannot be guaranteed; defer_finish ensures 108 * that this happens. 109 * 110 * This requires some coordination between ->finish_item and 111 * defer_finish. Upon deciding to request a new transaction, 112 * ->finish_item should update the current work item to reflect the 113 * unfinished work. Next, it should reset the log done item's list 114 * count to the number of items finished, and return -EAGAIN. 115 * defer_finish sees the -EAGAIN, logs the new log intent item 116 * with the remaining work items, and leaves the xfs_defer_pending 117 * item at the head of the dop_work queue. Then it rolls the 118 * transaction and picks up processing where it left off. It is 119 * required that ->finish_item must be careful to leave enough 120 * transaction reservation to fit the new log intent item. 121 * 122 * This is an example of remapping the extent (E, E+B) into file X at 123 * offset A and dealing with the extent (C, C+B) already being mapped 124 * there: 125 * +-------------------------------------------------+ 126 * | Unmap file X startblock C offset A length B | t0 127 * | Intent to reduce refcount for extent (C, B) | 128 * | Intent to remove rmap (X, C, A, B) | 129 * | Intent to free extent (D, 1) (bmbt block) | 130 * | Intent to map (X, A, B) at startblock E | 131 * +-------------------------------------------------+ 132 * | Map file X startblock E offset A length B | t1 133 * | Done mapping (X, E, A, B) | 134 * | Intent to increase refcount for extent (E, B) | 135 * | Intent to add rmap (X, E, A, B) | 136 * +-------------------------------------------------+ 137 * | Reduce refcount for extent (C, B) | t2 138 * | Done reducing refcount for extent (C, 9) | 139 * | Intent to reduce refcount for extent (C+9, B-9) | 140 * | (ran out of space after 9 refcount updates) | 141 * +-------------------------------------------------+ 142 * | Reduce refcount for extent (C+9, B+9) | t3 143 * | Done reducing refcount for extent (C+9, B-9) | 144 * | Increase refcount for extent (E, B) | 145 * | Done increasing refcount for extent (E, B) | 146 * | Intent to free extent (C, B) | 147 * | Intent to free extent (F, 1) (refcountbt block) | 148 * | Intent to remove rmap (F, 1, REFC) | 149 * +-------------------------------------------------+ 150 * | Remove rmap (X, C, A, B) | t4 151 * | Done removing rmap (X, C, A, B) | 152 * | Add rmap (X, E, A, B) | 153 * | Done adding rmap (X, E, A, B) | 154 * | Remove rmap (F, 1, REFC) | 155 * | Done removing rmap (F, 1, REFC) | 156 * +-------------------------------------------------+ 157 * | Free extent (C, B) | t5 158 * | Done freeing extent (C, B) | 159 * | Free extent (D, 1) | 160 * | Done freeing extent (D, 1) | 161 * | Free extent (F, 1) | 162 * | Done freeing extent (F, 1) | 163 * +-------------------------------------------------+ 164 * 165 * If we should crash before t2 commits, log recovery replays 166 * the following intent items: 167 * 168 * - Intent to reduce refcount for extent (C, B) 169 * - Intent to remove rmap (X, C, A, B) 170 * - Intent to free extent (D, 1) (bmbt block) 171 * - Intent to increase refcount for extent (E, B) 172 * - Intent to add rmap (X, E, A, B) 173 * 174 * In the process of recovering, it should also generate and take care 175 * of these intent items: 176 * 177 * - Intent to free extent (C, B) 178 * - Intent to free extent (F, 1) (refcountbt block) 179 * - Intent to remove rmap (F, 1, REFC) 180 * 181 * Note that the continuation requested between t2 and t3 is likely to 182 * reoccur. 183 */ 184 185 static const struct xfs_defer_op_type *defer_op_types[] = { 186 [XFS_DEFER_OPS_TYPE_BMAP] = &xfs_bmap_update_defer_type, 187 [XFS_DEFER_OPS_TYPE_REFCOUNT] = &xfs_refcount_update_defer_type, 188 [XFS_DEFER_OPS_TYPE_RMAP] = &xfs_rmap_update_defer_type, 189 [XFS_DEFER_OPS_TYPE_FREE] = &xfs_extent_free_defer_type, 190 [XFS_DEFER_OPS_TYPE_AGFL_FREE] = &xfs_agfl_free_defer_type, 191 [XFS_DEFER_OPS_TYPE_ATTR] = &xfs_attr_defer_type, 192 }; 193 194 static bool 195 xfs_defer_create_intent( 196 struct xfs_trans *tp, 197 struct xfs_defer_pending *dfp, 198 bool sort) 199 { 200 const struct xfs_defer_op_type *ops = defer_op_types[dfp->dfp_type]; 201 202 if (!dfp->dfp_intent) 203 dfp->dfp_intent = ops->create_intent(tp, &dfp->dfp_work, 204 dfp->dfp_count, sort); 205 return dfp->dfp_intent != NULL; 206 } 207 208 /* 209 * For each pending item in the intake list, log its intent item and the 210 * associated extents, then add the entire intake list to the end of 211 * the pending list. 212 */ 213 static bool 214 xfs_defer_create_intents( 215 struct xfs_trans *tp) 216 { 217 struct xfs_defer_pending *dfp; 218 bool ret = false; 219 220 list_for_each_entry(dfp, &tp->t_dfops, dfp_list) { 221 trace_xfs_defer_create_intent(tp->t_mountp, dfp); 222 ret |= xfs_defer_create_intent(tp, dfp, true); 223 } 224 return ret; 225 } 226 227 /* Abort all the intents that were committed. */ 228 STATIC void 229 xfs_defer_trans_abort( 230 struct xfs_trans *tp, 231 struct list_head *dop_pending) 232 { 233 struct xfs_defer_pending *dfp; 234 const struct xfs_defer_op_type *ops; 235 236 trace_xfs_defer_trans_abort(tp, _RET_IP_); 237 238 /* Abort intent items that don't have a done item. */ 239 list_for_each_entry(dfp, dop_pending, dfp_list) { 240 ops = defer_op_types[dfp->dfp_type]; 241 trace_xfs_defer_pending_abort(tp->t_mountp, dfp); 242 if (dfp->dfp_intent && !dfp->dfp_done) { 243 ops->abort_intent(dfp->dfp_intent); 244 dfp->dfp_intent = NULL; 245 } 246 } 247 } 248 249 /* 250 * Capture resources that the caller said not to release ("held") when the 251 * transaction commits. Caller is responsible for zero-initializing @dres. 252 */ 253 static int 254 xfs_defer_save_resources( 255 struct xfs_defer_resources *dres, 256 struct xfs_trans *tp) 257 { 258 struct xfs_buf_log_item *bli; 259 struct xfs_inode_log_item *ili; 260 struct xfs_log_item *lip; 261 262 BUILD_BUG_ON(NBBY * sizeof(dres->dr_ordered) < XFS_DEFER_OPS_NR_BUFS); 263 264 list_for_each_entry(lip, &tp->t_items, li_trans) { 265 switch (lip->li_type) { 266 case XFS_LI_BUF: 267 bli = container_of(lip, struct xfs_buf_log_item, 268 bli_item); 269 if (bli->bli_flags & XFS_BLI_HOLD) { 270 if (dres->dr_bufs >= XFS_DEFER_OPS_NR_BUFS) { 271 ASSERT(0); 272 return -EFSCORRUPTED; 273 } 274 if (bli->bli_flags & XFS_BLI_ORDERED) 275 dres->dr_ordered |= 276 (1U << dres->dr_bufs); 277 else 278 xfs_trans_dirty_buf(tp, bli->bli_buf); 279 dres->dr_bp[dres->dr_bufs++] = bli->bli_buf; 280 } 281 break; 282 case XFS_LI_INODE: 283 ili = container_of(lip, struct xfs_inode_log_item, 284 ili_item); 285 if (ili->ili_lock_flags == 0) { 286 if (dres->dr_inos >= XFS_DEFER_OPS_NR_INODES) { 287 ASSERT(0); 288 return -EFSCORRUPTED; 289 } 290 xfs_trans_log_inode(tp, ili->ili_inode, 291 XFS_ILOG_CORE); 292 dres->dr_ip[dres->dr_inos++] = ili->ili_inode; 293 } 294 break; 295 default: 296 break; 297 } 298 } 299 300 return 0; 301 } 302 303 /* Attach the held resources to the transaction. */ 304 static void 305 xfs_defer_restore_resources( 306 struct xfs_trans *tp, 307 struct xfs_defer_resources *dres) 308 { 309 unsigned short i; 310 311 /* Rejoin the joined inodes. */ 312 for (i = 0; i < dres->dr_inos; i++) 313 xfs_trans_ijoin(tp, dres->dr_ip[i], 0); 314 315 /* Rejoin the buffers and dirty them so the log moves forward. */ 316 for (i = 0; i < dres->dr_bufs; i++) { 317 xfs_trans_bjoin(tp, dres->dr_bp[i]); 318 if (dres->dr_ordered & (1U << i)) 319 xfs_trans_ordered_buf(tp, dres->dr_bp[i]); 320 xfs_trans_bhold(tp, dres->dr_bp[i]); 321 } 322 } 323 324 /* Roll a transaction so we can do some deferred op processing. */ 325 STATIC int 326 xfs_defer_trans_roll( 327 struct xfs_trans **tpp) 328 { 329 struct xfs_defer_resources dres = { }; 330 int error; 331 332 error = xfs_defer_save_resources(&dres, *tpp); 333 if (error) 334 return error; 335 336 trace_xfs_defer_trans_roll(*tpp, _RET_IP_); 337 338 /* 339 * Roll the transaction. Rolling always given a new transaction (even 340 * if committing the old one fails!) to hand back to the caller, so we 341 * join the held resources to the new transaction so that we always 342 * return with the held resources joined to @tpp, no matter what 343 * happened. 344 */ 345 error = xfs_trans_roll(tpp); 346 347 xfs_defer_restore_resources(*tpp, &dres); 348 349 if (error) 350 trace_xfs_defer_trans_roll_error(*tpp, error); 351 return error; 352 } 353 354 /* 355 * Free up any items left in the list. 356 */ 357 static void 358 xfs_defer_cancel_list( 359 struct xfs_mount *mp, 360 struct list_head *dop_list) 361 { 362 struct xfs_defer_pending *dfp; 363 struct xfs_defer_pending *pli; 364 struct list_head *pwi; 365 struct list_head *n; 366 const struct xfs_defer_op_type *ops; 367 368 /* 369 * Free the pending items. Caller should already have arranged 370 * for the intent items to be released. 371 */ 372 list_for_each_entry_safe(dfp, pli, dop_list, dfp_list) { 373 ops = defer_op_types[dfp->dfp_type]; 374 trace_xfs_defer_cancel_list(mp, dfp); 375 list_del(&dfp->dfp_list); 376 list_for_each_safe(pwi, n, &dfp->dfp_work) { 377 list_del(pwi); 378 dfp->dfp_count--; 379 ops->cancel_item(pwi); 380 } 381 ASSERT(dfp->dfp_count == 0); 382 kmem_cache_free(xfs_defer_pending_cache, dfp); 383 } 384 } 385 386 /* 387 * Prevent a log intent item from pinning the tail of the log by logging a 388 * done item to release the intent item; and then log a new intent item. 389 * The caller should provide a fresh transaction and roll it after we're done. 390 */ 391 static int 392 xfs_defer_relog( 393 struct xfs_trans **tpp, 394 struct list_head *dfops) 395 { 396 struct xlog *log = (*tpp)->t_mountp->m_log; 397 struct xfs_defer_pending *dfp; 398 xfs_lsn_t threshold_lsn = NULLCOMMITLSN; 399 400 401 ASSERT((*tpp)->t_flags & XFS_TRANS_PERM_LOG_RES); 402 403 list_for_each_entry(dfp, dfops, dfp_list) { 404 /* 405 * If the log intent item for this deferred op is not a part of 406 * the current log checkpoint, relog the intent item to keep 407 * the log tail moving forward. We're ok with this being racy 408 * because an incorrect decision means we'll be a little slower 409 * at pushing the tail. 410 */ 411 if (dfp->dfp_intent == NULL || 412 xfs_log_item_in_current_chkpt(dfp->dfp_intent)) 413 continue; 414 415 /* 416 * Figure out where we need the tail to be in order to maintain 417 * the minimum required free space in the log. Only sample 418 * the log threshold once per call. 419 */ 420 if (threshold_lsn == NULLCOMMITLSN) { 421 threshold_lsn = xlog_grant_push_threshold(log, 0); 422 if (threshold_lsn == NULLCOMMITLSN) 423 break; 424 } 425 if (XFS_LSN_CMP(dfp->dfp_intent->li_lsn, threshold_lsn) >= 0) 426 continue; 427 428 trace_xfs_defer_relog_intent((*tpp)->t_mountp, dfp); 429 XFS_STATS_INC((*tpp)->t_mountp, defer_relog); 430 dfp->dfp_intent = xfs_trans_item_relog(dfp->dfp_intent, *tpp); 431 } 432 433 if ((*tpp)->t_flags & XFS_TRANS_DIRTY) 434 return xfs_defer_trans_roll(tpp); 435 return 0; 436 } 437 438 /* 439 * Log an intent-done item for the first pending intent, and finish the work 440 * items. 441 */ 442 static int 443 xfs_defer_finish_one( 444 struct xfs_trans *tp, 445 struct xfs_defer_pending *dfp) 446 { 447 const struct xfs_defer_op_type *ops = defer_op_types[dfp->dfp_type]; 448 struct xfs_btree_cur *state = NULL; 449 struct list_head *li, *n; 450 int error; 451 452 trace_xfs_defer_pending_finish(tp->t_mountp, dfp); 453 454 dfp->dfp_done = ops->create_done(tp, dfp->dfp_intent, dfp->dfp_count); 455 list_for_each_safe(li, n, &dfp->dfp_work) { 456 list_del(li); 457 dfp->dfp_count--; 458 error = ops->finish_item(tp, dfp->dfp_done, li, &state); 459 if (error == -EAGAIN) { 460 /* 461 * Caller wants a fresh transaction; put the work item 462 * back on the list and log a new log intent item to 463 * replace the old one. See "Requesting a Fresh 464 * Transaction while Finishing Deferred Work" above. 465 */ 466 list_add(li, &dfp->dfp_work); 467 dfp->dfp_count++; 468 dfp->dfp_done = NULL; 469 dfp->dfp_intent = NULL; 470 xfs_defer_create_intent(tp, dfp, false); 471 } 472 473 if (error) 474 goto out; 475 } 476 477 /* Done with the dfp, free it. */ 478 list_del(&dfp->dfp_list); 479 kmem_cache_free(xfs_defer_pending_cache, dfp); 480 out: 481 if (ops->finish_cleanup) 482 ops->finish_cleanup(tp, state, error); 483 return error; 484 } 485 486 /* 487 * Finish all the pending work. This involves logging intent items for 488 * any work items that wandered in since the last transaction roll (if 489 * one has even happened), rolling the transaction, and finishing the 490 * work items in the first item on the logged-and-pending list. 491 * 492 * If an inode is provided, relog it to the new transaction. 493 */ 494 int 495 xfs_defer_finish_noroll( 496 struct xfs_trans **tp) 497 { 498 struct xfs_defer_pending *dfp = NULL; 499 int error = 0; 500 LIST_HEAD(dop_pending); 501 502 ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES); 503 504 trace_xfs_defer_finish(*tp, _RET_IP_); 505 506 /* Until we run out of pending work to finish... */ 507 while (!list_empty(&dop_pending) || !list_empty(&(*tp)->t_dfops)) { 508 /* 509 * Deferred items that are created in the process of finishing 510 * other deferred work items should be queued at the head of 511 * the pending list, which puts them ahead of the deferred work 512 * that was created by the caller. This keeps the number of 513 * pending work items to a minimum, which decreases the amount 514 * of time that any one intent item can stick around in memory, 515 * pinning the log tail. 516 */ 517 bool has_intents = xfs_defer_create_intents(*tp); 518 519 list_splice_init(&(*tp)->t_dfops, &dop_pending); 520 521 if (has_intents || dfp) { 522 error = xfs_defer_trans_roll(tp); 523 if (error) 524 goto out_shutdown; 525 526 /* Relog intent items to keep the log moving. */ 527 error = xfs_defer_relog(tp, &dop_pending); 528 if (error) 529 goto out_shutdown; 530 } 531 532 dfp = list_first_entry(&dop_pending, struct xfs_defer_pending, 533 dfp_list); 534 error = xfs_defer_finish_one(*tp, dfp); 535 if (error && error != -EAGAIN) 536 goto out_shutdown; 537 } 538 539 trace_xfs_defer_finish_done(*tp, _RET_IP_); 540 return 0; 541 542 out_shutdown: 543 xfs_defer_trans_abort(*tp, &dop_pending); 544 xfs_force_shutdown((*tp)->t_mountp, SHUTDOWN_CORRUPT_INCORE); 545 trace_xfs_defer_finish_error(*tp, error); 546 xfs_defer_cancel_list((*tp)->t_mountp, &dop_pending); 547 xfs_defer_cancel(*tp); 548 return error; 549 } 550 551 int 552 xfs_defer_finish( 553 struct xfs_trans **tp) 554 { 555 int error; 556 557 /* 558 * Finish and roll the transaction once more to avoid returning to the 559 * caller with a dirty transaction. 560 */ 561 error = xfs_defer_finish_noroll(tp); 562 if (error) 563 return error; 564 if ((*tp)->t_flags & XFS_TRANS_DIRTY) { 565 error = xfs_defer_trans_roll(tp); 566 if (error) { 567 xfs_force_shutdown((*tp)->t_mountp, 568 SHUTDOWN_CORRUPT_INCORE); 569 return error; 570 } 571 } 572 573 /* Reset LOWMODE now that we've finished all the dfops. */ 574 ASSERT(list_empty(&(*tp)->t_dfops)); 575 (*tp)->t_flags &= ~XFS_TRANS_LOWMODE; 576 return 0; 577 } 578 579 void 580 xfs_defer_cancel( 581 struct xfs_trans *tp) 582 { 583 struct xfs_mount *mp = tp->t_mountp; 584 585 trace_xfs_defer_cancel(tp, _RET_IP_); 586 xfs_defer_cancel_list(mp, &tp->t_dfops); 587 } 588 589 /* Add an item for later deferred processing. */ 590 void 591 xfs_defer_add( 592 struct xfs_trans *tp, 593 enum xfs_defer_ops_type type, 594 struct list_head *li) 595 { 596 struct xfs_defer_pending *dfp = NULL; 597 const struct xfs_defer_op_type *ops; 598 599 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES); 600 BUILD_BUG_ON(ARRAY_SIZE(defer_op_types) != XFS_DEFER_OPS_TYPE_MAX); 601 602 /* 603 * Add the item to a pending item at the end of the intake list. 604 * If the last pending item has the same type, reuse it. Else, 605 * create a new pending item at the end of the intake list. 606 */ 607 if (!list_empty(&tp->t_dfops)) { 608 dfp = list_last_entry(&tp->t_dfops, 609 struct xfs_defer_pending, dfp_list); 610 ops = defer_op_types[dfp->dfp_type]; 611 if (dfp->dfp_type != type || 612 (ops->max_items && dfp->dfp_count >= ops->max_items)) 613 dfp = NULL; 614 } 615 if (!dfp) { 616 dfp = kmem_cache_zalloc(xfs_defer_pending_cache, 617 GFP_NOFS | __GFP_NOFAIL); 618 dfp->dfp_type = type; 619 dfp->dfp_intent = NULL; 620 dfp->dfp_done = NULL; 621 dfp->dfp_count = 0; 622 INIT_LIST_HEAD(&dfp->dfp_work); 623 list_add_tail(&dfp->dfp_list, &tp->t_dfops); 624 } 625 626 list_add_tail(li, &dfp->dfp_work); 627 dfp->dfp_count++; 628 } 629 630 /* 631 * Move deferred ops from one transaction to another and reset the source to 632 * initial state. This is primarily used to carry state forward across 633 * transaction rolls with pending dfops. 634 */ 635 void 636 xfs_defer_move( 637 struct xfs_trans *dtp, 638 struct xfs_trans *stp) 639 { 640 list_splice_init(&stp->t_dfops, &dtp->t_dfops); 641 642 /* 643 * Low free space mode was historically controlled by a dfops field. 644 * This meant that low mode state potentially carried across multiple 645 * transaction rolls. Transfer low mode on a dfops move to preserve 646 * that behavior. 647 */ 648 dtp->t_flags |= (stp->t_flags & XFS_TRANS_LOWMODE); 649 stp->t_flags &= ~XFS_TRANS_LOWMODE; 650 } 651 652 /* 653 * Prepare a chain of fresh deferred ops work items to be completed later. Log 654 * recovery requires the ability to put off until later the actual finishing 655 * work so that it can process unfinished items recovered from the log in 656 * correct order. 657 * 658 * Create and log intent items for all the work that we're capturing so that we 659 * can be assured that the items will get replayed if the system goes down 660 * before log recovery gets a chance to finish the work it put off. The entire 661 * deferred ops state is transferred to the capture structure and the 662 * transaction is then ready for the caller to commit it. If there are no 663 * intent items to capture, this function returns NULL. 664 * 665 * If capture_ip is not NULL, the capture structure will obtain an extra 666 * reference to the inode. 667 */ 668 static struct xfs_defer_capture * 669 xfs_defer_ops_capture( 670 struct xfs_trans *tp) 671 { 672 struct xfs_defer_capture *dfc; 673 unsigned short i; 674 int error; 675 676 if (list_empty(&tp->t_dfops)) 677 return NULL; 678 679 /* Create an object to capture the defer ops. */ 680 dfc = kmem_zalloc(sizeof(*dfc), KM_NOFS); 681 INIT_LIST_HEAD(&dfc->dfc_list); 682 INIT_LIST_HEAD(&dfc->dfc_dfops); 683 684 xfs_defer_create_intents(tp); 685 686 /* Move the dfops chain and transaction state to the capture struct. */ 687 list_splice_init(&tp->t_dfops, &dfc->dfc_dfops); 688 dfc->dfc_tpflags = tp->t_flags & XFS_TRANS_LOWMODE; 689 tp->t_flags &= ~XFS_TRANS_LOWMODE; 690 691 /* Capture the remaining block reservations along with the dfops. */ 692 dfc->dfc_blkres = tp->t_blk_res - tp->t_blk_res_used; 693 dfc->dfc_rtxres = tp->t_rtx_res - tp->t_rtx_res_used; 694 695 /* Preserve the log reservation size. */ 696 dfc->dfc_logres = tp->t_log_res; 697 698 error = xfs_defer_save_resources(&dfc->dfc_held, tp); 699 if (error) { 700 /* 701 * Resource capture should never fail, but if it does, we 702 * still have to shut down the log and release things 703 * properly. 704 */ 705 xfs_force_shutdown(tp->t_mountp, SHUTDOWN_CORRUPT_INCORE); 706 } 707 708 /* 709 * Grab extra references to the inodes and buffers because callers are 710 * expected to release their held references after we commit the 711 * transaction. 712 */ 713 for (i = 0; i < dfc->dfc_held.dr_inos; i++) { 714 ASSERT(xfs_isilocked(dfc->dfc_held.dr_ip[i], XFS_ILOCK_EXCL)); 715 ihold(VFS_I(dfc->dfc_held.dr_ip[i])); 716 } 717 718 for (i = 0; i < dfc->dfc_held.dr_bufs; i++) 719 xfs_buf_hold(dfc->dfc_held.dr_bp[i]); 720 721 return dfc; 722 } 723 724 /* Release all resources that we used to capture deferred ops. */ 725 void 726 xfs_defer_ops_capture_free( 727 struct xfs_mount *mp, 728 struct xfs_defer_capture *dfc) 729 { 730 unsigned short i; 731 732 xfs_defer_cancel_list(mp, &dfc->dfc_dfops); 733 734 for (i = 0; i < dfc->dfc_held.dr_bufs; i++) 735 xfs_buf_relse(dfc->dfc_held.dr_bp[i]); 736 737 for (i = 0; i < dfc->dfc_held.dr_inos; i++) 738 xfs_irele(dfc->dfc_held.dr_ip[i]); 739 740 kmem_free(dfc); 741 } 742 743 /* 744 * Capture any deferred ops and commit the transaction. This is the last step 745 * needed to finish a log intent item that we recovered from the log. If any 746 * of the deferred ops operate on an inode, the caller must pass in that inode 747 * so that the reference can be transferred to the capture structure. The 748 * caller must hold ILOCK_EXCL on the inode, and must unlock it before calling 749 * xfs_defer_ops_continue. 750 */ 751 int 752 xfs_defer_ops_capture_and_commit( 753 struct xfs_trans *tp, 754 struct list_head *capture_list) 755 { 756 struct xfs_mount *mp = tp->t_mountp; 757 struct xfs_defer_capture *dfc; 758 int error; 759 760 /* If we don't capture anything, commit transaction and exit. */ 761 dfc = xfs_defer_ops_capture(tp); 762 if (!dfc) 763 return xfs_trans_commit(tp); 764 765 /* Commit the transaction and add the capture structure to the list. */ 766 error = xfs_trans_commit(tp); 767 if (error) { 768 xfs_defer_ops_capture_free(mp, dfc); 769 return error; 770 } 771 772 list_add_tail(&dfc->dfc_list, capture_list); 773 return 0; 774 } 775 776 /* 777 * Attach a chain of captured deferred ops to a new transaction and free the 778 * capture structure. If an inode was captured, it will be passed back to the 779 * caller with ILOCK_EXCL held and joined to the transaction with lockflags==0. 780 * The caller now owns the inode reference. 781 */ 782 void 783 xfs_defer_ops_continue( 784 struct xfs_defer_capture *dfc, 785 struct xfs_trans *tp, 786 struct xfs_defer_resources *dres) 787 { 788 unsigned int i; 789 790 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES); 791 ASSERT(!(tp->t_flags & XFS_TRANS_DIRTY)); 792 793 /* Lock the captured resources to the new transaction. */ 794 if (dfc->dfc_held.dr_inos == 2) 795 xfs_lock_two_inodes(dfc->dfc_held.dr_ip[0], XFS_ILOCK_EXCL, 796 dfc->dfc_held.dr_ip[1], XFS_ILOCK_EXCL); 797 else if (dfc->dfc_held.dr_inos == 1) 798 xfs_ilock(dfc->dfc_held.dr_ip[0], XFS_ILOCK_EXCL); 799 800 for (i = 0; i < dfc->dfc_held.dr_bufs; i++) 801 xfs_buf_lock(dfc->dfc_held.dr_bp[i]); 802 803 /* Join the captured resources to the new transaction. */ 804 xfs_defer_restore_resources(tp, &dfc->dfc_held); 805 memcpy(dres, &dfc->dfc_held, sizeof(struct xfs_defer_resources)); 806 dres->dr_bufs = 0; 807 808 /* Move captured dfops chain and state to the transaction. */ 809 list_splice_init(&dfc->dfc_dfops, &tp->t_dfops); 810 tp->t_flags |= dfc->dfc_tpflags; 811 812 kmem_free(dfc); 813 } 814 815 /* Release the resources captured and continued during recovery. */ 816 void 817 xfs_defer_resources_rele( 818 struct xfs_defer_resources *dres) 819 { 820 unsigned short i; 821 822 for (i = 0; i < dres->dr_inos; i++) { 823 xfs_iunlock(dres->dr_ip[i], XFS_ILOCK_EXCL); 824 xfs_irele(dres->dr_ip[i]); 825 dres->dr_ip[i] = NULL; 826 } 827 828 for (i = 0; i < dres->dr_bufs; i++) { 829 xfs_buf_relse(dres->dr_bp[i]); 830 dres->dr_bp[i] = NULL; 831 } 832 833 dres->dr_inos = 0; 834 dres->dr_bufs = 0; 835 dres->dr_ordered = 0; 836 } 837 838 static inline int __init 839 xfs_defer_init_cache(void) 840 { 841 xfs_defer_pending_cache = kmem_cache_create("xfs_defer_pending", 842 sizeof(struct xfs_defer_pending), 843 0, 0, NULL); 844 845 return xfs_defer_pending_cache != NULL ? 0 : -ENOMEM; 846 } 847 848 static inline void 849 xfs_defer_destroy_cache(void) 850 { 851 kmem_cache_destroy(xfs_defer_pending_cache); 852 xfs_defer_pending_cache = NULL; 853 } 854 855 /* Set up caches for deferred work items. */ 856 int __init 857 xfs_defer_init_item_caches(void) 858 { 859 int error; 860 861 error = xfs_defer_init_cache(); 862 if (error) 863 return error; 864 error = xfs_rmap_intent_init_cache(); 865 if (error) 866 goto err; 867 error = xfs_refcount_intent_init_cache(); 868 if (error) 869 goto err; 870 error = xfs_bmap_intent_init_cache(); 871 if (error) 872 goto err; 873 error = xfs_extfree_intent_init_cache(); 874 if (error) 875 goto err; 876 error = xfs_attri_init_cache(); 877 if (error) 878 goto err; 879 error = xfs_attrd_init_cache(); 880 if (error) 881 goto err; 882 return 0; 883 err: 884 xfs_defer_destroy_item_caches(); 885 return error; 886 } 887 888 /* Destroy all the deferred work item caches, if they've been allocated. */ 889 void 890 xfs_defer_destroy_item_caches(void) 891 { 892 xfs_attri_destroy_cache(); 893 xfs_attrd_destroy_cache(); 894 xfs_extfree_intent_destroy_cache(); 895 xfs_bmap_intent_destroy_cache(); 896 xfs_refcount_intent_destroy_cache(); 897 xfs_rmap_intent_destroy_cache(); 898 xfs_defer_destroy_cache(); 899 } 900