1 /* 2 * Copyright (C) 2016 Oracle. All Rights Reserved. 3 * 4 * Author: Darrick J. Wong <darrick.wong@oracle.com> 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 2 9 * of the License, or (at your option) any later version. 10 * 11 * This program is distributed in the hope that it would be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. 19 */ 20 #include "xfs.h" 21 #include "xfs_fs.h" 22 #include "xfs_shared.h" 23 #include "xfs_format.h" 24 #include "xfs_log_format.h" 25 #include "xfs_trans_resv.h" 26 #include "xfs_bit.h" 27 #include "xfs_sb.h" 28 #include "xfs_mount.h" 29 #include "xfs_defer.h" 30 #include "xfs_trans.h" 31 #include "xfs_trace.h" 32 33 /* 34 * Deferred Operations in XFS 35 * 36 * Due to the way locking rules work in XFS, certain transactions (block 37 * mapping and unmapping, typically) have permanent reservations so that 38 * we can roll the transaction to adhere to AG locking order rules and 39 * to unlock buffers between metadata updates. Prior to rmap/reflink, 40 * the mapping code had a mechanism to perform these deferrals for 41 * extents that were going to be freed; this code makes that facility 42 * more generic. 43 * 44 * When adding the reverse mapping and reflink features, it became 45 * necessary to perform complex remapping multi-transactions to comply 46 * with AG locking order rules, and to be able to spread a single 47 * refcount update operation (an operation on an n-block extent can 48 * update as many as n records!) among multiple transactions. XFS can 49 * roll a transaction to facilitate this, but using this facility 50 * requires us to log "intent" items in case log recovery needs to 51 * redo the operation, and to log "done" items to indicate that redo 52 * is not necessary. 53 * 54 * Deferred work is tracked in xfs_defer_pending items. Each pending 55 * item tracks one type of deferred work. Incoming work items (which 56 * have not yet had an intent logged) are attached to a pending item 57 * on the dop_intake list, where they wait for the caller to finish 58 * the deferred operations. 59 * 60 * Finishing a set of deferred operations is an involved process. To 61 * start, we define "rolling a deferred-op transaction" as follows: 62 * 63 * > For each xfs_defer_pending item on the dop_intake list, 64 * - Sort the work items in AG order. XFS locking 65 * order rules require us to lock buffers in AG order. 66 * - Create a log intent item for that type. 67 * - Attach it to the pending item. 68 * - Move the pending item from the dop_intake list to the 69 * dop_pending list. 70 * > Roll the transaction. 71 * 72 * NOTE: To avoid exceeding the transaction reservation, we limit the 73 * number of items that we attach to a given xfs_defer_pending. 74 * 75 * The actual finishing process looks like this: 76 * 77 * > For each xfs_defer_pending in the dop_pending list, 78 * - Roll the deferred-op transaction as above. 79 * - Create a log done item for that type, and attach it to the 80 * log intent item. 81 * - For each work item attached to the log intent item, 82 * * Perform the described action. 83 * * Attach the work item to the log done item. 84 * * If the result of doing the work was -EAGAIN, ->finish work 85 * wants a new transaction. See the "Requesting a Fresh 86 * Transaction while Finishing Deferred Work" section below for 87 * details. 88 * 89 * The key here is that we must log an intent item for all pending 90 * work items every time we roll the transaction, and that we must log 91 * a done item as soon as the work is completed. With this mechanism 92 * we can perform complex remapping operations, chaining intent items 93 * as needed. 94 * 95 * Requesting a Fresh Transaction while Finishing Deferred Work 96 * 97 * If ->finish_item decides that it needs a fresh transaction to 98 * finish the work, it must ask its caller (xfs_defer_finish) for a 99 * continuation. The most likely cause of this circumstance are the 100 * refcount adjust functions deciding that they've logged enough items 101 * to be at risk of exceeding the transaction reservation. 102 * 103 * To get a fresh transaction, we want to log the existing log done 104 * item to prevent the log intent item from replaying, immediately log 105 * a new log intent item with the unfinished work items, roll the 106 * transaction, and re-call ->finish_item wherever it left off. The 107 * log done item and the new log intent item must be in the same 108 * transaction or atomicity cannot be guaranteed; defer_finish ensures 109 * that this happens. 110 * 111 * This requires some coordination between ->finish_item and 112 * defer_finish. Upon deciding to request a new transaction, 113 * ->finish_item should update the current work item to reflect the 114 * unfinished work. Next, it should reset the log done item's list 115 * count to the number of items finished, and return -EAGAIN. 116 * defer_finish sees the -EAGAIN, logs the new log intent item 117 * with the remaining work items, and leaves the xfs_defer_pending 118 * item at the head of the dop_work queue. Then it rolls the 119 * transaction and picks up processing where it left off. It is 120 * required that ->finish_item must be careful to leave enough 121 * transaction reservation to fit the new log intent item. 122 * 123 * This is an example of remapping the extent (E, E+B) into file X at 124 * offset A and dealing with the extent (C, C+B) already being mapped 125 * there: 126 * +-------------------------------------------------+ 127 * | Unmap file X startblock C offset A length B | t0 128 * | Intent to reduce refcount for extent (C, B) | 129 * | Intent to remove rmap (X, C, A, B) | 130 * | Intent to free extent (D, 1) (bmbt block) | 131 * | Intent to map (X, A, B) at startblock E | 132 * +-------------------------------------------------+ 133 * | Map file X startblock E offset A length B | t1 134 * | Done mapping (X, E, A, B) | 135 * | Intent to increase refcount for extent (E, B) | 136 * | Intent to add rmap (X, E, A, B) | 137 * +-------------------------------------------------+ 138 * | Reduce refcount for extent (C, B) | t2 139 * | Done reducing refcount for extent (C, 9) | 140 * | Intent to reduce refcount for extent (C+9, B-9) | 141 * | (ran out of space after 9 refcount updates) | 142 * +-------------------------------------------------+ 143 * | Reduce refcount for extent (C+9, B+9) | t3 144 * | Done reducing refcount for extent (C+9, B-9) | 145 * | Increase refcount for extent (E, B) | 146 * | Done increasing refcount for extent (E, B) | 147 * | Intent to free extent (C, B) | 148 * | Intent to free extent (F, 1) (refcountbt block) | 149 * | Intent to remove rmap (F, 1, REFC) | 150 * +-------------------------------------------------+ 151 * | Remove rmap (X, C, A, B) | t4 152 * | Done removing rmap (X, C, A, B) | 153 * | Add rmap (X, E, A, B) | 154 * | Done adding rmap (X, E, A, B) | 155 * | Remove rmap (F, 1, REFC) | 156 * | Done removing rmap (F, 1, REFC) | 157 * +-------------------------------------------------+ 158 * | Free extent (C, B) | t5 159 * | Done freeing extent (C, B) | 160 * | Free extent (D, 1) | 161 * | Done freeing extent (D, 1) | 162 * | Free extent (F, 1) | 163 * | Done freeing extent (F, 1) | 164 * +-------------------------------------------------+ 165 * 166 * If we should crash before t2 commits, log recovery replays 167 * the following intent items: 168 * 169 * - Intent to reduce refcount for extent (C, B) 170 * - Intent to remove rmap (X, C, A, B) 171 * - Intent to free extent (D, 1) (bmbt block) 172 * - Intent to increase refcount for extent (E, B) 173 * - Intent to add rmap (X, E, A, B) 174 * 175 * In the process of recovering, it should also generate and take care 176 * of these intent items: 177 * 178 * - Intent to free extent (C, B) 179 * - Intent to free extent (F, 1) (refcountbt block) 180 * - Intent to remove rmap (F, 1, REFC) 181 * 182 * Note that the continuation requested between t2 and t3 is likely to 183 * reoccur. 184 */ 185 186 static const struct xfs_defer_op_type *defer_op_types[XFS_DEFER_OPS_TYPE_MAX]; 187 188 /* 189 * For each pending item in the intake list, log its intent item and the 190 * associated extents, then add the entire intake list to the end of 191 * the pending list. 192 */ 193 STATIC void 194 xfs_defer_intake_work( 195 struct xfs_trans *tp, 196 struct xfs_defer_ops *dop) 197 { 198 struct list_head *li; 199 struct xfs_defer_pending *dfp; 200 201 list_for_each_entry(dfp, &dop->dop_intake, dfp_list) { 202 dfp->dfp_intent = dfp->dfp_type->create_intent(tp, 203 dfp->dfp_count); 204 trace_xfs_defer_intake_work(tp->t_mountp, dfp); 205 list_sort(tp->t_mountp, &dfp->dfp_work, 206 dfp->dfp_type->diff_items); 207 list_for_each(li, &dfp->dfp_work) 208 dfp->dfp_type->log_item(tp, dfp->dfp_intent, li); 209 } 210 211 list_splice_tail_init(&dop->dop_intake, &dop->dop_pending); 212 } 213 214 /* Abort all the intents that were committed. */ 215 STATIC void 216 xfs_defer_trans_abort( 217 struct xfs_trans *tp, 218 struct xfs_defer_ops *dop, 219 int error) 220 { 221 struct xfs_defer_pending *dfp; 222 223 trace_xfs_defer_trans_abort(tp->t_mountp, dop); 224 225 /* Abort intent items that don't have a done item. */ 226 list_for_each_entry(dfp, &dop->dop_pending, dfp_list) { 227 trace_xfs_defer_pending_abort(tp->t_mountp, dfp); 228 if (dfp->dfp_intent && !dfp->dfp_done) { 229 dfp->dfp_type->abort_intent(dfp->dfp_intent); 230 dfp->dfp_intent = NULL; 231 } 232 } 233 234 /* Shut down FS. */ 235 xfs_force_shutdown(tp->t_mountp, (error == -EFSCORRUPTED) ? 236 SHUTDOWN_CORRUPT_INCORE : SHUTDOWN_META_IO_ERROR); 237 } 238 239 /* Roll a transaction so we can do some deferred op processing. */ 240 STATIC int 241 xfs_defer_trans_roll( 242 struct xfs_trans **tp, 243 struct xfs_defer_ops *dop) 244 { 245 int i; 246 int error; 247 248 /* Log all the joined inodes. */ 249 for (i = 0; i < XFS_DEFER_OPS_NR_INODES && dop->dop_inodes[i]; i++) 250 xfs_trans_log_inode(*tp, dop->dop_inodes[i], XFS_ILOG_CORE); 251 252 /* Hold the (previously bjoin'd) buffer locked across the roll. */ 253 for (i = 0; i < XFS_DEFER_OPS_NR_BUFS && dop->dop_bufs[i]; i++) 254 xfs_trans_dirty_buf(*tp, dop->dop_bufs[i]); 255 256 trace_xfs_defer_trans_roll((*tp)->t_mountp, dop); 257 258 /* Roll the transaction. */ 259 error = xfs_trans_roll(tp); 260 if (error) { 261 trace_xfs_defer_trans_roll_error((*tp)->t_mountp, dop, error); 262 xfs_defer_trans_abort(*tp, dop, error); 263 return error; 264 } 265 dop->dop_committed = true; 266 267 /* Rejoin the joined inodes. */ 268 for (i = 0; i < XFS_DEFER_OPS_NR_INODES && dop->dop_inodes[i]; i++) 269 xfs_trans_ijoin(*tp, dop->dop_inodes[i], 0); 270 271 /* Rejoin the buffers and dirty them so the log moves forward. */ 272 for (i = 0; i < XFS_DEFER_OPS_NR_BUFS && dop->dop_bufs[i]; i++) { 273 xfs_trans_bjoin(*tp, dop->dop_bufs[i]); 274 xfs_trans_bhold(*tp, dop->dop_bufs[i]); 275 } 276 277 return error; 278 } 279 280 /* Do we have any work items to finish? */ 281 bool 282 xfs_defer_has_unfinished_work( 283 struct xfs_defer_ops *dop) 284 { 285 return !list_empty(&dop->dop_pending) || !list_empty(&dop->dop_intake); 286 } 287 288 /* 289 * Add this inode to the deferred op. Each joined inode is relogged 290 * each time we roll the transaction. 291 */ 292 int 293 xfs_defer_ijoin( 294 struct xfs_defer_ops *dop, 295 struct xfs_inode *ip) 296 { 297 int i; 298 299 for (i = 0; i < XFS_DEFER_OPS_NR_INODES; i++) { 300 if (dop->dop_inodes[i] == ip) 301 return 0; 302 else if (dop->dop_inodes[i] == NULL) { 303 dop->dop_inodes[i] = ip; 304 return 0; 305 } 306 } 307 308 ASSERT(0); 309 return -EFSCORRUPTED; 310 } 311 312 /* 313 * Add this buffer to the deferred op. Each joined buffer is relogged 314 * each time we roll the transaction. 315 */ 316 int 317 xfs_defer_bjoin( 318 struct xfs_defer_ops *dop, 319 struct xfs_buf *bp) 320 { 321 int i; 322 323 for (i = 0; i < XFS_DEFER_OPS_NR_BUFS; i++) { 324 if (dop->dop_bufs[i] == bp) 325 return 0; 326 else if (dop->dop_bufs[i] == NULL) { 327 dop->dop_bufs[i] = bp; 328 return 0; 329 } 330 } 331 332 ASSERT(0); 333 return -EFSCORRUPTED; 334 } 335 336 /* 337 * Finish all the pending work. This involves logging intent items for 338 * any work items that wandered in since the last transaction roll (if 339 * one has even happened), rolling the transaction, and finishing the 340 * work items in the first item on the logged-and-pending list. 341 * 342 * If an inode is provided, relog it to the new transaction. 343 */ 344 int 345 xfs_defer_finish( 346 struct xfs_trans **tp, 347 struct xfs_defer_ops *dop) 348 { 349 struct xfs_defer_pending *dfp; 350 struct list_head *li; 351 struct list_head *n; 352 void *state; 353 int error = 0; 354 void (*cleanup_fn)(struct xfs_trans *, void *, int); 355 356 ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES); 357 358 trace_xfs_defer_finish((*tp)->t_mountp, dop); 359 360 /* Until we run out of pending work to finish... */ 361 while (xfs_defer_has_unfinished_work(dop)) { 362 /* Log intents for work items sitting in the intake. */ 363 xfs_defer_intake_work(*tp, dop); 364 365 /* Roll the transaction. */ 366 error = xfs_defer_trans_roll(tp, dop); 367 if (error) 368 goto out; 369 370 /* Log an intent-done item for the first pending item. */ 371 dfp = list_first_entry(&dop->dop_pending, 372 struct xfs_defer_pending, dfp_list); 373 trace_xfs_defer_pending_finish((*tp)->t_mountp, dfp); 374 dfp->dfp_done = dfp->dfp_type->create_done(*tp, dfp->dfp_intent, 375 dfp->dfp_count); 376 cleanup_fn = dfp->dfp_type->finish_cleanup; 377 378 /* Finish the work items. */ 379 state = NULL; 380 list_for_each_safe(li, n, &dfp->dfp_work) { 381 list_del(li); 382 dfp->dfp_count--; 383 error = dfp->dfp_type->finish_item(*tp, dop, li, 384 dfp->dfp_done, &state); 385 if (error == -EAGAIN) { 386 /* 387 * Caller wants a fresh transaction; 388 * put the work item back on the list 389 * and jump out. 390 */ 391 list_add(li, &dfp->dfp_work); 392 dfp->dfp_count++; 393 break; 394 } else if (error) { 395 /* 396 * Clean up after ourselves and jump out. 397 * xfs_defer_cancel will take care of freeing 398 * all these lists and stuff. 399 */ 400 if (cleanup_fn) 401 cleanup_fn(*tp, state, error); 402 xfs_defer_trans_abort(*tp, dop, error); 403 goto out; 404 } 405 } 406 if (error == -EAGAIN) { 407 /* 408 * Caller wants a fresh transaction, so log a 409 * new log intent item to replace the old one 410 * and roll the transaction. See "Requesting 411 * a Fresh Transaction while Finishing 412 * Deferred Work" above. 413 */ 414 dfp->dfp_intent = dfp->dfp_type->create_intent(*tp, 415 dfp->dfp_count); 416 dfp->dfp_done = NULL; 417 list_for_each(li, &dfp->dfp_work) 418 dfp->dfp_type->log_item(*tp, dfp->dfp_intent, 419 li); 420 } else { 421 /* Done with the dfp, free it. */ 422 list_del(&dfp->dfp_list); 423 kmem_free(dfp); 424 } 425 426 if (cleanup_fn) 427 cleanup_fn(*tp, state, error); 428 } 429 430 out: 431 if (error) 432 trace_xfs_defer_finish_error((*tp)->t_mountp, dop, error); 433 else 434 trace_xfs_defer_finish_done((*tp)->t_mountp, dop); 435 return error; 436 } 437 438 /* 439 * Free up any items left in the list. 440 */ 441 void 442 xfs_defer_cancel( 443 struct xfs_defer_ops *dop) 444 { 445 struct xfs_defer_pending *dfp; 446 struct xfs_defer_pending *pli; 447 struct list_head *pwi; 448 struct list_head *n; 449 450 trace_xfs_defer_cancel(NULL, dop); 451 452 /* 453 * Free the pending items. Caller should already have arranged 454 * for the intent items to be released. 455 */ 456 list_for_each_entry_safe(dfp, pli, &dop->dop_intake, dfp_list) { 457 trace_xfs_defer_intake_cancel(NULL, dfp); 458 list_del(&dfp->dfp_list); 459 list_for_each_safe(pwi, n, &dfp->dfp_work) { 460 list_del(pwi); 461 dfp->dfp_count--; 462 dfp->dfp_type->cancel_item(pwi); 463 } 464 ASSERT(dfp->dfp_count == 0); 465 kmem_free(dfp); 466 } 467 list_for_each_entry_safe(dfp, pli, &dop->dop_pending, dfp_list) { 468 trace_xfs_defer_pending_cancel(NULL, dfp); 469 list_del(&dfp->dfp_list); 470 list_for_each_safe(pwi, n, &dfp->dfp_work) { 471 list_del(pwi); 472 dfp->dfp_count--; 473 dfp->dfp_type->cancel_item(pwi); 474 } 475 ASSERT(dfp->dfp_count == 0); 476 kmem_free(dfp); 477 } 478 } 479 480 /* Add an item for later deferred processing. */ 481 void 482 xfs_defer_add( 483 struct xfs_defer_ops *dop, 484 enum xfs_defer_ops_type type, 485 struct list_head *li) 486 { 487 struct xfs_defer_pending *dfp = NULL; 488 489 /* 490 * Add the item to a pending item at the end of the intake list. 491 * If the last pending item has the same type, reuse it. Else, 492 * create a new pending item at the end of the intake list. 493 */ 494 if (!list_empty(&dop->dop_intake)) { 495 dfp = list_last_entry(&dop->dop_intake, 496 struct xfs_defer_pending, dfp_list); 497 if (dfp->dfp_type->type != type || 498 (dfp->dfp_type->max_items && 499 dfp->dfp_count >= dfp->dfp_type->max_items)) 500 dfp = NULL; 501 } 502 if (!dfp) { 503 dfp = kmem_alloc(sizeof(struct xfs_defer_pending), 504 KM_SLEEP | KM_NOFS); 505 dfp->dfp_type = defer_op_types[type]; 506 dfp->dfp_intent = NULL; 507 dfp->dfp_done = NULL; 508 dfp->dfp_count = 0; 509 INIT_LIST_HEAD(&dfp->dfp_work); 510 list_add_tail(&dfp->dfp_list, &dop->dop_intake); 511 } 512 513 list_add_tail(li, &dfp->dfp_work); 514 dfp->dfp_count++; 515 } 516 517 /* Initialize a deferred operation list. */ 518 void 519 xfs_defer_init_op_type( 520 const struct xfs_defer_op_type *type) 521 { 522 defer_op_types[type->type] = type; 523 } 524 525 /* Initialize a deferred operation. */ 526 void 527 xfs_defer_init( 528 struct xfs_defer_ops *dop, 529 xfs_fsblock_t *fbp) 530 { 531 memset(dop, 0, sizeof(struct xfs_defer_ops)); 532 *fbp = NULLFSBLOCK; 533 INIT_LIST_HEAD(&dop->dop_intake); 534 INIT_LIST_HEAD(&dop->dop_pending); 535 trace_xfs_defer_init(NULL, dop); 536 } 537