1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Copyright (C) 2016 Oracle. All Rights Reserved. 4 * Author: Darrick J. Wong <darrick.wong@oracle.com> 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_log_format.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_bit.h" 13 #include "xfs_sb.h" 14 #include "xfs_mount.h" 15 #include "xfs_defer.h" 16 #include "xfs_trans.h" 17 #include "xfs_buf_item.h" 18 #include "xfs_trace.h" 19 20 /* 21 * Deferred Operations in XFS 22 * 23 * Due to the way locking rules work in XFS, certain transactions (block 24 * mapping and unmapping, typically) have permanent reservations so that 25 * we can roll the transaction to adhere to AG locking order rules and 26 * to unlock buffers between metadata updates. Prior to rmap/reflink, 27 * the mapping code had a mechanism to perform these deferrals for 28 * extents that were going to be freed; this code makes that facility 29 * more generic. 30 * 31 * When adding the reverse mapping and reflink features, it became 32 * necessary to perform complex remapping multi-transactions to comply 33 * with AG locking order rules, and to be able to spread a single 34 * refcount update operation (an operation on an n-block extent can 35 * update as many as n records!) among multiple transactions. XFS can 36 * roll a transaction to facilitate this, but using this facility 37 * requires us to log "intent" items in case log recovery needs to 38 * redo the operation, and to log "done" items to indicate that redo 39 * is not necessary. 40 * 41 * Deferred work is tracked in xfs_defer_pending items. Each pending 42 * item tracks one type of deferred work. Incoming work items (which 43 * have not yet had an intent logged) are attached to a pending item 44 * on the dop_intake list, where they wait for the caller to finish 45 * the deferred operations. 46 * 47 * Finishing a set of deferred operations is an involved process. To 48 * start, we define "rolling a deferred-op transaction" as follows: 49 * 50 * > For each xfs_defer_pending item on the dop_intake list, 51 * - Sort the work items in AG order. XFS locking 52 * order rules require us to lock buffers in AG order. 53 * - Create a log intent item for that type. 54 * - Attach it to the pending item. 55 * - Move the pending item from the dop_intake list to the 56 * dop_pending list. 57 * > Roll the transaction. 58 * 59 * NOTE: To avoid exceeding the transaction reservation, we limit the 60 * number of items that we attach to a given xfs_defer_pending. 61 * 62 * The actual finishing process looks like this: 63 * 64 * > For each xfs_defer_pending in the dop_pending list, 65 * - Roll the deferred-op transaction as above. 66 * - Create a log done item for that type, and attach it to the 67 * log intent item. 68 * - For each work item attached to the log intent item, 69 * * Perform the described action. 70 * * Attach the work item to the log done item. 71 * * If the result of doing the work was -EAGAIN, ->finish work 72 * wants a new transaction. See the "Requesting a Fresh 73 * Transaction while Finishing Deferred Work" section below for 74 * details. 75 * 76 * The key here is that we must log an intent item for all pending 77 * work items every time we roll the transaction, and that we must log 78 * a done item as soon as the work is completed. With this mechanism 79 * we can perform complex remapping operations, chaining intent items 80 * as needed. 81 * 82 * Requesting a Fresh Transaction while Finishing Deferred Work 83 * 84 * If ->finish_item decides that it needs a fresh transaction to 85 * finish the work, it must ask its caller (xfs_defer_finish) for a 86 * continuation. The most likely cause of this circumstance are the 87 * refcount adjust functions deciding that they've logged enough items 88 * to be at risk of exceeding the transaction reservation. 89 * 90 * To get a fresh transaction, we want to log the existing log done 91 * item to prevent the log intent item from replaying, immediately log 92 * a new log intent item with the unfinished work items, roll the 93 * transaction, and re-call ->finish_item wherever it left off. The 94 * log done item and the new log intent item must be in the same 95 * transaction or atomicity cannot be guaranteed; defer_finish ensures 96 * that this happens. 97 * 98 * This requires some coordination between ->finish_item and 99 * defer_finish. Upon deciding to request a new transaction, 100 * ->finish_item should update the current work item to reflect the 101 * unfinished work. Next, it should reset the log done item's list 102 * count to the number of items finished, and return -EAGAIN. 103 * defer_finish sees the -EAGAIN, logs the new log intent item 104 * with the remaining work items, and leaves the xfs_defer_pending 105 * item at the head of the dop_work queue. Then it rolls the 106 * transaction and picks up processing where it left off. It is 107 * required that ->finish_item must be careful to leave enough 108 * transaction reservation to fit the new log intent item. 109 * 110 * This is an example of remapping the extent (E, E+B) into file X at 111 * offset A and dealing with the extent (C, C+B) already being mapped 112 * there: 113 * +-------------------------------------------------+ 114 * | Unmap file X startblock C offset A length B | t0 115 * | Intent to reduce refcount for extent (C, B) | 116 * | Intent to remove rmap (X, C, A, B) | 117 * | Intent to free extent (D, 1) (bmbt block) | 118 * | Intent to map (X, A, B) at startblock E | 119 * +-------------------------------------------------+ 120 * | Map file X startblock E offset A length B | t1 121 * | Done mapping (X, E, A, B) | 122 * | Intent to increase refcount for extent (E, B) | 123 * | Intent to add rmap (X, E, A, B) | 124 * +-------------------------------------------------+ 125 * | Reduce refcount for extent (C, B) | t2 126 * | Done reducing refcount for extent (C, 9) | 127 * | Intent to reduce refcount for extent (C+9, B-9) | 128 * | (ran out of space after 9 refcount updates) | 129 * +-------------------------------------------------+ 130 * | Reduce refcount for extent (C+9, B+9) | t3 131 * | Done reducing refcount for extent (C+9, B-9) | 132 * | Increase refcount for extent (E, B) | 133 * | Done increasing refcount for extent (E, B) | 134 * | Intent to free extent (C, B) | 135 * | Intent to free extent (F, 1) (refcountbt block) | 136 * | Intent to remove rmap (F, 1, REFC) | 137 * +-------------------------------------------------+ 138 * | Remove rmap (X, C, A, B) | t4 139 * | Done removing rmap (X, C, A, B) | 140 * | Add rmap (X, E, A, B) | 141 * | Done adding rmap (X, E, A, B) | 142 * | Remove rmap (F, 1, REFC) | 143 * | Done removing rmap (F, 1, REFC) | 144 * +-------------------------------------------------+ 145 * | Free extent (C, B) | t5 146 * | Done freeing extent (C, B) | 147 * | Free extent (D, 1) | 148 * | Done freeing extent (D, 1) | 149 * | Free extent (F, 1) | 150 * | Done freeing extent (F, 1) | 151 * +-------------------------------------------------+ 152 * 153 * If we should crash before t2 commits, log recovery replays 154 * the following intent items: 155 * 156 * - Intent to reduce refcount for extent (C, B) 157 * - Intent to remove rmap (X, C, A, B) 158 * - Intent to free extent (D, 1) (bmbt block) 159 * - Intent to increase refcount for extent (E, B) 160 * - Intent to add rmap (X, E, A, B) 161 * 162 * In the process of recovering, it should also generate and take care 163 * of these intent items: 164 * 165 * - Intent to free extent (C, B) 166 * - Intent to free extent (F, 1) (refcountbt block) 167 * - Intent to remove rmap (F, 1, REFC) 168 * 169 * Note that the continuation requested between t2 and t3 is likely to 170 * reoccur. 171 */ 172 173 static const struct xfs_defer_op_type *defer_op_types[XFS_DEFER_OPS_TYPE_MAX]; 174 175 /* 176 * For each pending item in the intake list, log its intent item and the 177 * associated extents, then add the entire intake list to the end of 178 * the pending list. 179 */ 180 STATIC void 181 xfs_defer_intake_work( 182 struct xfs_trans *tp, 183 struct xfs_defer_ops *dop) 184 { 185 struct list_head *li; 186 struct xfs_defer_pending *dfp; 187 188 list_for_each_entry(dfp, &dop->dop_intake, dfp_list) { 189 dfp->dfp_intent = dfp->dfp_type->create_intent(tp, 190 dfp->dfp_count); 191 trace_xfs_defer_intake_work(tp->t_mountp, dfp); 192 list_sort(tp->t_mountp, &dfp->dfp_work, 193 dfp->dfp_type->diff_items); 194 list_for_each(li, &dfp->dfp_work) 195 dfp->dfp_type->log_item(tp, dfp->dfp_intent, li); 196 } 197 198 list_splice_tail_init(&dop->dop_intake, &dop->dop_pending); 199 } 200 201 /* Abort all the intents that were committed. */ 202 STATIC void 203 xfs_defer_trans_abort( 204 struct xfs_trans *tp, 205 struct xfs_defer_ops *dop, 206 int error) 207 { 208 struct xfs_defer_pending *dfp; 209 210 trace_xfs_defer_trans_abort(tp->t_mountp, dop, _RET_IP_); 211 212 /* Abort intent items that don't have a done item. */ 213 list_for_each_entry(dfp, &dop->dop_pending, dfp_list) { 214 trace_xfs_defer_pending_abort(tp->t_mountp, dfp); 215 if (dfp->dfp_intent && !dfp->dfp_done) { 216 dfp->dfp_type->abort_intent(dfp->dfp_intent); 217 dfp->dfp_intent = NULL; 218 } 219 } 220 221 /* Shut down FS. */ 222 xfs_force_shutdown(tp->t_mountp, (error == -EFSCORRUPTED) ? 223 SHUTDOWN_CORRUPT_INCORE : SHUTDOWN_META_IO_ERROR); 224 } 225 226 /* Roll a transaction so we can do some deferred op processing. */ 227 STATIC int 228 xfs_defer_trans_roll( 229 struct xfs_trans **tp) 230 { 231 struct xfs_defer_ops *dop = (*tp)->t_dfops; 232 struct xfs_buf_log_item *bli; 233 struct xfs_log_item *lip; 234 struct xfs_buf *bplist[XFS_DEFER_OPS_NR_BUFS]; 235 int bpcount = 0; 236 int i; 237 int error; 238 239 /* Log all the joined inodes. */ 240 for (i = 0; i < XFS_DEFER_OPS_NR_INODES && dop->dop_inodes[i]; i++) 241 xfs_trans_log_inode(*tp, dop->dop_inodes[i], XFS_ILOG_CORE); 242 243 list_for_each_entry(lip, &(*tp)->t_items, li_trans) { 244 switch (lip->li_type) { 245 case XFS_LI_BUF: 246 bli = container_of(lip, struct xfs_buf_log_item, 247 bli_item); 248 if (bli->bli_flags & XFS_BLI_HOLD) { 249 if (bpcount >= XFS_DEFER_OPS_NR_BUFS) { 250 ASSERT(0); 251 return -EFSCORRUPTED; 252 } 253 xfs_trans_dirty_buf(*tp, bli->bli_buf); 254 bplist[bpcount++] = bli->bli_buf; 255 } 256 break; 257 default: 258 break; 259 } 260 } 261 262 trace_xfs_defer_trans_roll((*tp)->t_mountp, dop, _RET_IP_); 263 264 /* Roll the transaction. */ 265 error = xfs_trans_roll(tp); 266 dop = (*tp)->t_dfops; 267 if (error) { 268 trace_xfs_defer_trans_roll_error((*tp)->t_mountp, dop, error); 269 xfs_defer_trans_abort(*tp, dop, error); 270 return error; 271 } 272 273 /* Rejoin the joined inodes. */ 274 for (i = 0; i < XFS_DEFER_OPS_NR_INODES && dop->dop_inodes[i]; i++) 275 xfs_trans_ijoin(*tp, dop->dop_inodes[i], 0); 276 277 /* Rejoin the buffers and dirty them so the log moves forward. */ 278 for (i = 0; i < bpcount; i++) { 279 xfs_trans_bjoin(*tp, bplist[i]); 280 xfs_trans_bhold(*tp, bplist[i]); 281 } 282 283 return error; 284 } 285 286 /* Do we have any work items to finish? */ 287 bool 288 xfs_defer_has_unfinished_work( 289 struct xfs_defer_ops *dop) 290 { 291 return !list_empty(&dop->dop_pending) || !list_empty(&dop->dop_intake); 292 } 293 294 /* 295 * Add this inode to the deferred op. Each joined inode is relogged 296 * each time we roll the transaction. 297 */ 298 int 299 xfs_defer_ijoin( 300 struct xfs_defer_ops *dop, 301 struct xfs_inode *ip) 302 { 303 int i; 304 305 for (i = 0; i < XFS_DEFER_OPS_NR_INODES; i++) { 306 if (dop->dop_inodes[i] == ip) 307 return 0; 308 else if (dop->dop_inodes[i] == NULL) { 309 dop->dop_inodes[i] = ip; 310 return 0; 311 } 312 } 313 314 ASSERT(0); 315 return -EFSCORRUPTED; 316 } 317 318 /* 319 * Reset an already used dfops after finish. 320 */ 321 static void 322 xfs_defer_reset( 323 struct xfs_trans *tp) 324 { 325 struct xfs_defer_ops *dop = tp->t_dfops; 326 327 ASSERT(!xfs_defer_has_unfinished_work(dop)); 328 329 memset(dop->dop_inodes, 0, sizeof(dop->dop_inodes)); 330 331 /* 332 * Low mode state transfers across transaction rolls to mirror dfops 333 * lifetime. Clear it now that dfops is reset. 334 */ 335 tp->t_flags &= ~XFS_TRANS_LOWMODE; 336 } 337 338 /* 339 * Finish all the pending work. This involves logging intent items for 340 * any work items that wandered in since the last transaction roll (if 341 * one has even happened), rolling the transaction, and finishing the 342 * work items in the first item on the logged-and-pending list. 343 * 344 * If an inode is provided, relog it to the new transaction. 345 */ 346 int 347 xfs_defer_finish_noroll( 348 struct xfs_trans **tp) 349 { 350 struct xfs_defer_ops *dop = (*tp)->t_dfops; 351 struct xfs_defer_pending *dfp; 352 struct list_head *li; 353 struct list_head *n; 354 void *state; 355 int error = 0; 356 void (*cleanup_fn)(struct xfs_trans *, void *, int); 357 358 ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES); 359 360 trace_xfs_defer_finish((*tp)->t_mountp, dop, _RET_IP_); 361 362 /* Until we run out of pending work to finish... */ 363 while (xfs_defer_has_unfinished_work(dop)) { 364 /* Log intents for work items sitting in the intake. */ 365 xfs_defer_intake_work(*tp, dop); 366 367 /* 368 * Roll the transaction and update dop in case dfops was 369 * embedded in the transaction. 370 */ 371 error = xfs_defer_trans_roll(tp); 372 if (error) 373 goto out; 374 dop = (*tp)->t_dfops; 375 376 /* Log an intent-done item for the first pending item. */ 377 dfp = list_first_entry(&dop->dop_pending, 378 struct xfs_defer_pending, dfp_list); 379 trace_xfs_defer_pending_finish((*tp)->t_mountp, dfp); 380 dfp->dfp_done = dfp->dfp_type->create_done(*tp, dfp->dfp_intent, 381 dfp->dfp_count); 382 cleanup_fn = dfp->dfp_type->finish_cleanup; 383 384 /* Finish the work items. */ 385 state = NULL; 386 list_for_each_safe(li, n, &dfp->dfp_work) { 387 list_del(li); 388 dfp->dfp_count--; 389 error = dfp->dfp_type->finish_item(*tp, dop, li, 390 dfp->dfp_done, &state); 391 if (error == -EAGAIN) { 392 /* 393 * Caller wants a fresh transaction; 394 * put the work item back on the list 395 * and jump out. 396 */ 397 list_add(li, &dfp->dfp_work); 398 dfp->dfp_count++; 399 break; 400 } else if (error) { 401 /* 402 * Clean up after ourselves and jump out. 403 * xfs_defer_cancel will take care of freeing 404 * all these lists and stuff. 405 */ 406 if (cleanup_fn) 407 cleanup_fn(*tp, state, error); 408 xfs_defer_trans_abort(*tp, dop, error); 409 goto out; 410 } 411 } 412 if (error == -EAGAIN) { 413 /* 414 * Caller wants a fresh transaction, so log a 415 * new log intent item to replace the old one 416 * and roll the transaction. See "Requesting 417 * a Fresh Transaction while Finishing 418 * Deferred Work" above. 419 */ 420 dfp->dfp_intent = dfp->dfp_type->create_intent(*tp, 421 dfp->dfp_count); 422 dfp->dfp_done = NULL; 423 list_for_each(li, &dfp->dfp_work) 424 dfp->dfp_type->log_item(*tp, dfp->dfp_intent, 425 li); 426 } else { 427 /* Done with the dfp, free it. */ 428 list_del(&dfp->dfp_list); 429 kmem_free(dfp); 430 } 431 432 if (cleanup_fn) 433 cleanup_fn(*tp, state, error); 434 } 435 436 out: 437 if (error) 438 trace_xfs_defer_finish_error((*tp)->t_mountp, dop, error); 439 else 440 trace_xfs_defer_finish_done((*tp)->t_mountp, dop, _RET_IP_); 441 442 return error; 443 } 444 445 int 446 xfs_defer_finish( 447 struct xfs_trans **tp) 448 { 449 int error; 450 451 /* 452 * Finish and roll the transaction once more to avoid returning to the 453 * caller with a dirty transaction. 454 */ 455 error = xfs_defer_finish_noroll(tp); 456 if (error) 457 return error; 458 if ((*tp)->t_flags & XFS_TRANS_DIRTY) { 459 error = xfs_defer_trans_roll(tp); 460 if (error) 461 return error; 462 } 463 xfs_defer_reset(*tp); 464 return 0; 465 } 466 467 /* 468 * Free up any items left in the list. 469 */ 470 void 471 xfs_defer_cancel( 472 struct xfs_trans *tp) 473 { 474 struct xfs_defer_ops *dop = tp->t_dfops; 475 struct xfs_defer_pending *dfp; 476 struct xfs_defer_pending *pli; 477 struct list_head *pwi; 478 struct list_head *n; 479 480 trace_xfs_defer_cancel(NULL, dop, _RET_IP_); 481 482 /* 483 * Free the pending items. Caller should already have arranged 484 * for the intent items to be released. 485 */ 486 list_for_each_entry_safe(dfp, pli, &dop->dop_intake, dfp_list) { 487 trace_xfs_defer_intake_cancel(NULL, dfp); 488 list_del(&dfp->dfp_list); 489 list_for_each_safe(pwi, n, &dfp->dfp_work) { 490 list_del(pwi); 491 dfp->dfp_count--; 492 dfp->dfp_type->cancel_item(pwi); 493 } 494 ASSERT(dfp->dfp_count == 0); 495 kmem_free(dfp); 496 } 497 list_for_each_entry_safe(dfp, pli, &dop->dop_pending, dfp_list) { 498 trace_xfs_defer_pending_cancel(NULL, dfp); 499 list_del(&dfp->dfp_list); 500 list_for_each_safe(pwi, n, &dfp->dfp_work) { 501 list_del(pwi); 502 dfp->dfp_count--; 503 dfp->dfp_type->cancel_item(pwi); 504 } 505 ASSERT(dfp->dfp_count == 0); 506 kmem_free(dfp); 507 } 508 } 509 510 /* Add an item for later deferred processing. */ 511 void 512 xfs_defer_add( 513 struct xfs_defer_ops *dop, 514 enum xfs_defer_ops_type type, 515 struct list_head *li) 516 { 517 struct xfs_defer_pending *dfp = NULL; 518 519 /* 520 * Add the item to a pending item at the end of the intake list. 521 * If the last pending item has the same type, reuse it. Else, 522 * create a new pending item at the end of the intake list. 523 */ 524 if (!list_empty(&dop->dop_intake)) { 525 dfp = list_last_entry(&dop->dop_intake, 526 struct xfs_defer_pending, dfp_list); 527 if (dfp->dfp_type->type != type || 528 (dfp->dfp_type->max_items && 529 dfp->dfp_count >= dfp->dfp_type->max_items)) 530 dfp = NULL; 531 } 532 if (!dfp) { 533 dfp = kmem_alloc(sizeof(struct xfs_defer_pending), 534 KM_SLEEP | KM_NOFS); 535 dfp->dfp_type = defer_op_types[type]; 536 dfp->dfp_intent = NULL; 537 dfp->dfp_done = NULL; 538 dfp->dfp_count = 0; 539 INIT_LIST_HEAD(&dfp->dfp_work); 540 list_add_tail(&dfp->dfp_list, &dop->dop_intake); 541 } 542 543 list_add_tail(li, &dfp->dfp_work); 544 dfp->dfp_count++; 545 } 546 547 /* Initialize a deferred operation list. */ 548 void 549 xfs_defer_init_op_type( 550 const struct xfs_defer_op_type *type) 551 { 552 defer_op_types[type->type] = type; 553 } 554 555 /* Initialize a deferred operation. */ 556 void 557 xfs_defer_init( 558 struct xfs_trans *tp, 559 struct xfs_defer_ops *dop) 560 { 561 struct xfs_mount *mp = NULL; 562 563 memset(dop, 0, sizeof(struct xfs_defer_ops)); 564 INIT_LIST_HEAD(&dop->dop_intake); 565 INIT_LIST_HEAD(&dop->dop_pending); 566 if (tp) { 567 ASSERT(tp->t_firstblock == NULLFSBLOCK); 568 tp->t_dfops = dop; 569 mp = tp->t_mountp; 570 } 571 trace_xfs_defer_init(mp, dop, _RET_IP_); 572 } 573 574 /* 575 * Move state from one xfs_defer_ops to another and reset the source to initial 576 * state. This is primarily used to carry state forward across transaction rolls 577 * with internal dfops. 578 */ 579 void 580 xfs_defer_move( 581 struct xfs_trans *dtp, 582 struct xfs_trans *stp) 583 { 584 struct xfs_defer_ops *dst = dtp->t_dfops; 585 struct xfs_defer_ops *src = stp->t_dfops; 586 ASSERT(dst != src); 587 588 list_splice_init(&src->dop_intake, &dst->dop_intake); 589 list_splice_init(&src->dop_pending, &dst->dop_pending); 590 591 memcpy(dst->dop_inodes, src->dop_inodes, sizeof(dst->dop_inodes)); 592 593 /* 594 * Low free space mode was historically controlled by a dfops field. 595 * This meant that low mode state potentially carried across multiple 596 * transaction rolls. Transfer low mode on a dfops move to preserve 597 * that behavior. 598 */ 599 dtp->t_flags |= (stp->t_flags & XFS_TRANS_LOWMODE); 600 601 xfs_defer_reset(stp); 602 } 603