1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Copyright (C) 2016 Oracle. All Rights Reserved. 4 * Author: Darrick J. Wong <darrick.wong@oracle.com> 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_log_format.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_bit.h" 13 #include "xfs_sb.h" 14 #include "xfs_mount.h" 15 #include "xfs_defer.h" 16 #include "xfs_trans.h" 17 #include "xfs_trace.h" 18 19 /* 20 * Deferred Operations in XFS 21 * 22 * Due to the way locking rules work in XFS, certain transactions (block 23 * mapping and unmapping, typically) have permanent reservations so that 24 * we can roll the transaction to adhere to AG locking order rules and 25 * to unlock buffers between metadata updates. Prior to rmap/reflink, 26 * the mapping code had a mechanism to perform these deferrals for 27 * extents that were going to be freed; this code makes that facility 28 * more generic. 29 * 30 * When adding the reverse mapping and reflink features, it became 31 * necessary to perform complex remapping multi-transactions to comply 32 * with AG locking order rules, and to be able to spread a single 33 * refcount update operation (an operation on an n-block extent can 34 * update as many as n records!) among multiple transactions. XFS can 35 * roll a transaction to facilitate this, but using this facility 36 * requires us to log "intent" items in case log recovery needs to 37 * redo the operation, and to log "done" items to indicate that redo 38 * is not necessary. 39 * 40 * Deferred work is tracked in xfs_defer_pending items. Each pending 41 * item tracks one type of deferred work. Incoming work items (which 42 * have not yet had an intent logged) are attached to a pending item 43 * on the dop_intake list, where they wait for the caller to finish 44 * the deferred operations. 45 * 46 * Finishing a set of deferred operations is an involved process. To 47 * start, we define "rolling a deferred-op transaction" as follows: 48 * 49 * > For each xfs_defer_pending item on the dop_intake list, 50 * - Sort the work items in AG order. XFS locking 51 * order rules require us to lock buffers in AG order. 52 * - Create a log intent item for that type. 53 * - Attach it to the pending item. 54 * - Move the pending item from the dop_intake list to the 55 * dop_pending list. 56 * > Roll the transaction. 57 * 58 * NOTE: To avoid exceeding the transaction reservation, we limit the 59 * number of items that we attach to a given xfs_defer_pending. 60 * 61 * The actual finishing process looks like this: 62 * 63 * > For each xfs_defer_pending in the dop_pending list, 64 * - Roll the deferred-op transaction as above. 65 * - Create a log done item for that type, and attach it to the 66 * log intent item. 67 * - For each work item attached to the log intent item, 68 * * Perform the described action. 69 * * Attach the work item to the log done item. 70 * * If the result of doing the work was -EAGAIN, ->finish work 71 * wants a new transaction. See the "Requesting a Fresh 72 * Transaction while Finishing Deferred Work" section below for 73 * details. 74 * 75 * The key here is that we must log an intent item for all pending 76 * work items every time we roll the transaction, and that we must log 77 * a done item as soon as the work is completed. With this mechanism 78 * we can perform complex remapping operations, chaining intent items 79 * as needed. 80 * 81 * Requesting a Fresh Transaction while Finishing Deferred Work 82 * 83 * If ->finish_item decides that it needs a fresh transaction to 84 * finish the work, it must ask its caller (xfs_defer_finish) for a 85 * continuation. The most likely cause of this circumstance are the 86 * refcount adjust functions deciding that they've logged enough items 87 * to be at risk of exceeding the transaction reservation. 88 * 89 * To get a fresh transaction, we want to log the existing log done 90 * item to prevent the log intent item from replaying, immediately log 91 * a new log intent item with the unfinished work items, roll the 92 * transaction, and re-call ->finish_item wherever it left off. The 93 * log done item and the new log intent item must be in the same 94 * transaction or atomicity cannot be guaranteed; defer_finish ensures 95 * that this happens. 96 * 97 * This requires some coordination between ->finish_item and 98 * defer_finish. Upon deciding to request a new transaction, 99 * ->finish_item should update the current work item to reflect the 100 * unfinished work. Next, it should reset the log done item's list 101 * count to the number of items finished, and return -EAGAIN. 102 * defer_finish sees the -EAGAIN, logs the new log intent item 103 * with the remaining work items, and leaves the xfs_defer_pending 104 * item at the head of the dop_work queue. Then it rolls the 105 * transaction and picks up processing where it left off. It is 106 * required that ->finish_item must be careful to leave enough 107 * transaction reservation to fit the new log intent item. 108 * 109 * This is an example of remapping the extent (E, E+B) into file X at 110 * offset A and dealing with the extent (C, C+B) already being mapped 111 * there: 112 * +-------------------------------------------------+ 113 * | Unmap file X startblock C offset A length B | t0 114 * | Intent to reduce refcount for extent (C, B) | 115 * | Intent to remove rmap (X, C, A, B) | 116 * | Intent to free extent (D, 1) (bmbt block) | 117 * | Intent to map (X, A, B) at startblock E | 118 * +-------------------------------------------------+ 119 * | Map file X startblock E offset A length B | t1 120 * | Done mapping (X, E, A, B) | 121 * | Intent to increase refcount for extent (E, B) | 122 * | Intent to add rmap (X, E, A, B) | 123 * +-------------------------------------------------+ 124 * | Reduce refcount for extent (C, B) | t2 125 * | Done reducing refcount for extent (C, 9) | 126 * | Intent to reduce refcount for extent (C+9, B-9) | 127 * | (ran out of space after 9 refcount updates) | 128 * +-------------------------------------------------+ 129 * | Reduce refcount for extent (C+9, B+9) | t3 130 * | Done reducing refcount for extent (C+9, B-9) | 131 * | Increase refcount for extent (E, B) | 132 * | Done increasing refcount for extent (E, B) | 133 * | Intent to free extent (C, B) | 134 * | Intent to free extent (F, 1) (refcountbt block) | 135 * | Intent to remove rmap (F, 1, REFC) | 136 * +-------------------------------------------------+ 137 * | Remove rmap (X, C, A, B) | t4 138 * | Done removing rmap (X, C, A, B) | 139 * | Add rmap (X, E, A, B) | 140 * | Done adding rmap (X, E, A, B) | 141 * | Remove rmap (F, 1, REFC) | 142 * | Done removing rmap (F, 1, REFC) | 143 * +-------------------------------------------------+ 144 * | Free extent (C, B) | t5 145 * | Done freeing extent (C, B) | 146 * | Free extent (D, 1) | 147 * | Done freeing extent (D, 1) | 148 * | Free extent (F, 1) | 149 * | Done freeing extent (F, 1) | 150 * +-------------------------------------------------+ 151 * 152 * If we should crash before t2 commits, log recovery replays 153 * the following intent items: 154 * 155 * - Intent to reduce refcount for extent (C, B) 156 * - Intent to remove rmap (X, C, A, B) 157 * - Intent to free extent (D, 1) (bmbt block) 158 * - Intent to increase refcount for extent (E, B) 159 * - Intent to add rmap (X, E, A, B) 160 * 161 * In the process of recovering, it should also generate and take care 162 * of these intent items: 163 * 164 * - Intent to free extent (C, B) 165 * - Intent to free extent (F, 1) (refcountbt block) 166 * - Intent to remove rmap (F, 1, REFC) 167 * 168 * Note that the continuation requested between t2 and t3 is likely to 169 * reoccur. 170 */ 171 172 static const struct xfs_defer_op_type *defer_op_types[XFS_DEFER_OPS_TYPE_MAX]; 173 174 /* 175 * For each pending item in the intake list, log its intent item and the 176 * associated extents, then add the entire intake list to the end of 177 * the pending list. 178 */ 179 STATIC void 180 xfs_defer_intake_work( 181 struct xfs_trans *tp, 182 struct xfs_defer_ops *dop) 183 { 184 struct list_head *li; 185 struct xfs_defer_pending *dfp; 186 187 list_for_each_entry(dfp, &dop->dop_intake, dfp_list) { 188 dfp->dfp_intent = dfp->dfp_type->create_intent(tp, 189 dfp->dfp_count); 190 trace_xfs_defer_intake_work(tp->t_mountp, dfp); 191 list_sort(tp->t_mountp, &dfp->dfp_work, 192 dfp->dfp_type->diff_items); 193 list_for_each(li, &dfp->dfp_work) 194 dfp->dfp_type->log_item(tp, dfp->dfp_intent, li); 195 } 196 197 list_splice_tail_init(&dop->dop_intake, &dop->dop_pending); 198 } 199 200 /* Abort all the intents that were committed. */ 201 STATIC void 202 xfs_defer_trans_abort( 203 struct xfs_trans *tp, 204 struct xfs_defer_ops *dop, 205 int error) 206 { 207 struct xfs_defer_pending *dfp; 208 209 trace_xfs_defer_trans_abort(tp->t_mountp, dop, _RET_IP_); 210 211 /* Abort intent items that don't have a done item. */ 212 list_for_each_entry(dfp, &dop->dop_pending, dfp_list) { 213 trace_xfs_defer_pending_abort(tp->t_mountp, dfp); 214 if (dfp->dfp_intent && !dfp->dfp_done) { 215 dfp->dfp_type->abort_intent(dfp->dfp_intent); 216 dfp->dfp_intent = NULL; 217 } 218 } 219 220 /* Shut down FS. */ 221 xfs_force_shutdown(tp->t_mountp, (error == -EFSCORRUPTED) ? 222 SHUTDOWN_CORRUPT_INCORE : SHUTDOWN_META_IO_ERROR); 223 } 224 225 /* Roll a transaction so we can do some deferred op processing. */ 226 STATIC int 227 xfs_defer_trans_roll( 228 struct xfs_trans **tp, 229 struct xfs_defer_ops *dop) 230 { 231 int i; 232 int error; 233 234 /* Log all the joined inodes. */ 235 for (i = 0; i < XFS_DEFER_OPS_NR_INODES && dop->dop_inodes[i]; i++) 236 xfs_trans_log_inode(*tp, dop->dop_inodes[i], XFS_ILOG_CORE); 237 238 /* Hold the (previously bjoin'd) buffer locked across the roll. */ 239 for (i = 0; i < XFS_DEFER_OPS_NR_BUFS && dop->dop_bufs[i]; i++) 240 xfs_trans_dirty_buf(*tp, dop->dop_bufs[i]); 241 242 trace_xfs_defer_trans_roll((*tp)->t_mountp, dop, _RET_IP_); 243 244 /* Roll the transaction. */ 245 error = xfs_trans_roll(tp); 246 if (error) { 247 trace_xfs_defer_trans_roll_error((*tp)->t_mountp, dop, error); 248 xfs_defer_trans_abort(*tp, dop, error); 249 return error; 250 } 251 dop->dop_committed = true; 252 253 /* Rejoin the joined inodes. */ 254 for (i = 0; i < XFS_DEFER_OPS_NR_INODES && dop->dop_inodes[i]; i++) 255 xfs_trans_ijoin(*tp, dop->dop_inodes[i], 0); 256 257 /* Rejoin the buffers and dirty them so the log moves forward. */ 258 for (i = 0; i < XFS_DEFER_OPS_NR_BUFS && dop->dop_bufs[i]; i++) { 259 xfs_trans_bjoin(*tp, dop->dop_bufs[i]); 260 xfs_trans_bhold(*tp, dop->dop_bufs[i]); 261 } 262 263 return error; 264 } 265 266 /* Do we have any work items to finish? */ 267 bool 268 xfs_defer_has_unfinished_work( 269 struct xfs_defer_ops *dop) 270 { 271 return !list_empty(&dop->dop_pending) || !list_empty(&dop->dop_intake); 272 } 273 274 /* 275 * Add this inode to the deferred op. Each joined inode is relogged 276 * each time we roll the transaction. 277 */ 278 int 279 xfs_defer_ijoin( 280 struct xfs_defer_ops *dop, 281 struct xfs_inode *ip) 282 { 283 int i; 284 285 for (i = 0; i < XFS_DEFER_OPS_NR_INODES; i++) { 286 if (dop->dop_inodes[i] == ip) 287 return 0; 288 else if (dop->dop_inodes[i] == NULL) { 289 dop->dop_inodes[i] = ip; 290 return 0; 291 } 292 } 293 294 ASSERT(0); 295 return -EFSCORRUPTED; 296 } 297 298 /* 299 * Add this buffer to the deferred op. Each joined buffer is relogged 300 * each time we roll the transaction. 301 */ 302 int 303 xfs_defer_bjoin( 304 struct xfs_defer_ops *dop, 305 struct xfs_buf *bp) 306 { 307 int i; 308 309 for (i = 0; i < XFS_DEFER_OPS_NR_BUFS; i++) { 310 if (dop->dop_bufs[i] == bp) 311 return 0; 312 else if (dop->dop_bufs[i] == NULL) { 313 dop->dop_bufs[i] = bp; 314 return 0; 315 } 316 } 317 318 ASSERT(0); 319 return -EFSCORRUPTED; 320 } 321 322 /* 323 * Finish all the pending work. This involves logging intent items for 324 * any work items that wandered in since the last transaction roll (if 325 * one has even happened), rolling the transaction, and finishing the 326 * work items in the first item on the logged-and-pending list. 327 * 328 * If an inode is provided, relog it to the new transaction. 329 */ 330 int 331 xfs_defer_finish( 332 struct xfs_trans **tp, 333 struct xfs_defer_ops *dop) 334 { 335 struct xfs_defer_pending *dfp; 336 struct list_head *li; 337 struct list_head *n; 338 void *state; 339 int error = 0; 340 void (*cleanup_fn)(struct xfs_trans *, void *, int); 341 struct xfs_defer_ops *orig_dop; 342 343 ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES); 344 345 trace_xfs_defer_finish((*tp)->t_mountp, dop, _RET_IP_); 346 347 /* 348 * Attach dfops to the transaction during deferred ops processing. This 349 * explicitly causes calls into the allocator to defer AGFL block frees. 350 * Note that this code can go away once all dfops users attach to the 351 * associated tp. 352 */ 353 ASSERT(!(*tp)->t_agfl_dfops || ((*tp)->t_agfl_dfops == dop)); 354 orig_dop = (*tp)->t_agfl_dfops; 355 (*tp)->t_agfl_dfops = dop; 356 357 /* Until we run out of pending work to finish... */ 358 while (xfs_defer_has_unfinished_work(dop)) { 359 /* Log intents for work items sitting in the intake. */ 360 xfs_defer_intake_work(*tp, dop); 361 362 /* Roll the transaction. */ 363 error = xfs_defer_trans_roll(tp, dop); 364 if (error) 365 goto out; 366 367 /* Log an intent-done item for the first pending item. */ 368 dfp = list_first_entry(&dop->dop_pending, 369 struct xfs_defer_pending, dfp_list); 370 trace_xfs_defer_pending_finish((*tp)->t_mountp, dfp); 371 dfp->dfp_done = dfp->dfp_type->create_done(*tp, dfp->dfp_intent, 372 dfp->dfp_count); 373 cleanup_fn = dfp->dfp_type->finish_cleanup; 374 375 /* Finish the work items. */ 376 state = NULL; 377 list_for_each_safe(li, n, &dfp->dfp_work) { 378 list_del(li); 379 dfp->dfp_count--; 380 error = dfp->dfp_type->finish_item(*tp, dop, li, 381 dfp->dfp_done, &state); 382 if (error == -EAGAIN) { 383 /* 384 * Caller wants a fresh transaction; 385 * put the work item back on the list 386 * and jump out. 387 */ 388 list_add(li, &dfp->dfp_work); 389 dfp->dfp_count++; 390 break; 391 } else if (error) { 392 /* 393 * Clean up after ourselves and jump out. 394 * xfs_defer_cancel will take care of freeing 395 * all these lists and stuff. 396 */ 397 if (cleanup_fn) 398 cleanup_fn(*tp, state, error); 399 xfs_defer_trans_abort(*tp, dop, error); 400 goto out; 401 } 402 } 403 if (error == -EAGAIN) { 404 /* 405 * Caller wants a fresh transaction, so log a 406 * new log intent item to replace the old one 407 * and roll the transaction. See "Requesting 408 * a Fresh Transaction while Finishing 409 * Deferred Work" above. 410 */ 411 dfp->dfp_intent = dfp->dfp_type->create_intent(*tp, 412 dfp->dfp_count); 413 dfp->dfp_done = NULL; 414 list_for_each(li, &dfp->dfp_work) 415 dfp->dfp_type->log_item(*tp, dfp->dfp_intent, 416 li); 417 } else { 418 /* Done with the dfp, free it. */ 419 list_del(&dfp->dfp_list); 420 kmem_free(dfp); 421 } 422 423 if (cleanup_fn) 424 cleanup_fn(*tp, state, error); 425 } 426 427 out: 428 (*tp)->t_agfl_dfops = orig_dop; 429 if (error) 430 trace_xfs_defer_finish_error((*tp)->t_mountp, dop, error); 431 else 432 trace_xfs_defer_finish_done((*tp)->t_mountp, dop, _RET_IP_); 433 return error; 434 } 435 436 /* 437 * Free up any items left in the list. 438 */ 439 void 440 xfs_defer_cancel( 441 struct xfs_defer_ops *dop) 442 { 443 struct xfs_defer_pending *dfp; 444 struct xfs_defer_pending *pli; 445 struct list_head *pwi; 446 struct list_head *n; 447 448 trace_xfs_defer_cancel(NULL, dop, _RET_IP_); 449 450 /* 451 * Free the pending items. Caller should already have arranged 452 * for the intent items to be released. 453 */ 454 list_for_each_entry_safe(dfp, pli, &dop->dop_intake, dfp_list) { 455 trace_xfs_defer_intake_cancel(NULL, dfp); 456 list_del(&dfp->dfp_list); 457 list_for_each_safe(pwi, n, &dfp->dfp_work) { 458 list_del(pwi); 459 dfp->dfp_count--; 460 dfp->dfp_type->cancel_item(pwi); 461 } 462 ASSERT(dfp->dfp_count == 0); 463 kmem_free(dfp); 464 } 465 list_for_each_entry_safe(dfp, pli, &dop->dop_pending, dfp_list) { 466 trace_xfs_defer_pending_cancel(NULL, dfp); 467 list_del(&dfp->dfp_list); 468 list_for_each_safe(pwi, n, &dfp->dfp_work) { 469 list_del(pwi); 470 dfp->dfp_count--; 471 dfp->dfp_type->cancel_item(pwi); 472 } 473 ASSERT(dfp->dfp_count == 0); 474 kmem_free(dfp); 475 } 476 } 477 478 /* Add an item for later deferred processing. */ 479 void 480 xfs_defer_add( 481 struct xfs_defer_ops *dop, 482 enum xfs_defer_ops_type type, 483 struct list_head *li) 484 { 485 struct xfs_defer_pending *dfp = NULL; 486 487 /* 488 * Add the item to a pending item at the end of the intake list. 489 * If the last pending item has the same type, reuse it. Else, 490 * create a new pending item at the end of the intake list. 491 */ 492 if (!list_empty(&dop->dop_intake)) { 493 dfp = list_last_entry(&dop->dop_intake, 494 struct xfs_defer_pending, dfp_list); 495 if (dfp->dfp_type->type != type || 496 (dfp->dfp_type->max_items && 497 dfp->dfp_count >= dfp->dfp_type->max_items)) 498 dfp = NULL; 499 } 500 if (!dfp) { 501 dfp = kmem_alloc(sizeof(struct xfs_defer_pending), 502 KM_SLEEP | KM_NOFS); 503 dfp->dfp_type = defer_op_types[type]; 504 dfp->dfp_intent = NULL; 505 dfp->dfp_done = NULL; 506 dfp->dfp_count = 0; 507 INIT_LIST_HEAD(&dfp->dfp_work); 508 list_add_tail(&dfp->dfp_list, &dop->dop_intake); 509 } 510 511 list_add_tail(li, &dfp->dfp_work); 512 dfp->dfp_count++; 513 } 514 515 /* Initialize a deferred operation list. */ 516 void 517 xfs_defer_init_op_type( 518 const struct xfs_defer_op_type *type) 519 { 520 defer_op_types[type->type] = type; 521 } 522 523 /* Initialize a deferred operation. */ 524 void 525 xfs_defer_init( 526 struct xfs_defer_ops *dop, 527 xfs_fsblock_t *fbp) 528 { 529 memset(dop, 0, sizeof(struct xfs_defer_ops)); 530 *fbp = NULLFSBLOCK; 531 INIT_LIST_HEAD(&dop->dop_intake); 532 INIT_LIST_HEAD(&dop->dop_pending); 533 trace_xfs_defer_init(NULL, dop, _RET_IP_); 534 } 535