1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2010 Red Hat, Inc. All Rights Reserved. 4 */ 5 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_format.h" 9 #include "xfs_log_format.h" 10 #include "xfs_shared.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_mount.h" 13 #include "xfs_extent_busy.h" 14 #include "xfs_trans.h" 15 #include "xfs_trans_priv.h" 16 #include "xfs_log.h" 17 #include "xfs_log_priv.h" 18 #include "xfs_trace.h" 19 20 struct workqueue_struct *xfs_discard_wq; 21 22 /* 23 * Allocate a new ticket. Failing to get a new ticket makes it really hard to 24 * recover, so we don't allow failure here. Also, we allocate in a context that 25 * we don't want to be issuing transactions from, so we need to tell the 26 * allocation code this as well. 27 * 28 * We don't reserve any space for the ticket - we are going to steal whatever 29 * space we require from transactions as they commit. To ensure we reserve all 30 * the space required, we need to set the current reservation of the ticket to 31 * zero so that we know to steal the initial transaction overhead from the 32 * first transaction commit. 33 */ 34 static struct xlog_ticket * 35 xlog_cil_ticket_alloc( 36 struct xlog *log) 37 { 38 struct xlog_ticket *tic; 39 40 tic = xlog_ticket_alloc(log, 0, 1, 0); 41 42 /* 43 * set the current reservation to zero so we know to steal the basic 44 * transaction overhead reservation from the first transaction commit. 45 */ 46 tic->t_curr_res = 0; 47 return tic; 48 } 49 50 /* 51 * Check if the current log item was first committed in this sequence. 52 * We can't rely on just the log item being in the CIL, we have to check 53 * the recorded commit sequence number. 54 * 55 * Note: for this to be used in a non-racy manner, it has to be called with 56 * CIL flushing locked out. As a result, it should only be used during the 57 * transaction commit process when deciding what to format into the item. 58 */ 59 static bool 60 xlog_item_in_current_chkpt( 61 struct xfs_cil *cil, 62 struct xfs_log_item *lip) 63 { 64 if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags)) 65 return false; 66 67 /* 68 * li_seq is written on the first commit of a log item to record the 69 * first checkpoint it is written to. Hence if it is different to the 70 * current sequence, we're in a new checkpoint. 71 */ 72 return lip->li_seq == READ_ONCE(cil->xc_current_sequence); 73 } 74 75 bool 76 xfs_log_item_in_current_chkpt( 77 struct xfs_log_item *lip) 78 { 79 return xlog_item_in_current_chkpt(lip->li_log->l_cilp, lip); 80 } 81 82 /* 83 * Unavoidable forward declaration - xlog_cil_push_work() calls 84 * xlog_cil_ctx_alloc() itself. 85 */ 86 static void xlog_cil_push_work(struct work_struct *work); 87 88 static struct xfs_cil_ctx * 89 xlog_cil_ctx_alloc(void) 90 { 91 struct xfs_cil_ctx *ctx; 92 93 ctx = kmem_zalloc(sizeof(*ctx), KM_NOFS); 94 INIT_LIST_HEAD(&ctx->committing); 95 INIT_LIST_HEAD(&ctx->busy_extents); 96 INIT_WORK(&ctx->push_work, xlog_cil_push_work); 97 return ctx; 98 } 99 100 static void 101 xlog_cil_ctx_switch( 102 struct xfs_cil *cil, 103 struct xfs_cil_ctx *ctx) 104 { 105 set_bit(XLOG_CIL_EMPTY, &cil->xc_flags); 106 ctx->sequence = ++cil->xc_current_sequence; 107 ctx->cil = cil; 108 cil->xc_ctx = ctx; 109 } 110 111 /* 112 * After the first stage of log recovery is done, we know where the head and 113 * tail of the log are. We need this log initialisation done before we can 114 * initialise the first CIL checkpoint context. 115 * 116 * Here we allocate a log ticket to track space usage during a CIL push. This 117 * ticket is passed to xlog_write() directly so that we don't slowly leak log 118 * space by failing to account for space used by log headers and additional 119 * region headers for split regions. 120 */ 121 void 122 xlog_cil_init_post_recovery( 123 struct xlog *log) 124 { 125 log->l_cilp->xc_ctx->ticket = xlog_cil_ticket_alloc(log); 126 log->l_cilp->xc_ctx->sequence = 1; 127 } 128 129 static inline int 130 xlog_cil_iovec_space( 131 uint niovecs) 132 { 133 return round_up((sizeof(struct xfs_log_vec) + 134 niovecs * sizeof(struct xfs_log_iovec)), 135 sizeof(uint64_t)); 136 } 137 138 /* 139 * Allocate or pin log vector buffers for CIL insertion. 140 * 141 * The CIL currently uses disposable buffers for copying a snapshot of the 142 * modified items into the log during a push. The biggest problem with this is 143 * the requirement to allocate the disposable buffer during the commit if: 144 * a) does not exist; or 145 * b) it is too small 146 * 147 * If we do this allocation within xlog_cil_insert_format_items(), it is done 148 * under the xc_ctx_lock, which means that a CIL push cannot occur during 149 * the memory allocation. This means that we have a potential deadlock situation 150 * under low memory conditions when we have lots of dirty metadata pinned in 151 * the CIL and we need a CIL commit to occur to free memory. 152 * 153 * To avoid this, we need to move the memory allocation outside the 154 * xc_ctx_lock, but because the log vector buffers are disposable, that opens 155 * up a TOCTOU race condition w.r.t. the CIL committing and removing the log 156 * vector buffers between the check and the formatting of the item into the 157 * log vector buffer within the xc_ctx_lock. 158 * 159 * Because the log vector buffer needs to be unchanged during the CIL push 160 * process, we cannot share the buffer between the transaction commit (which 161 * modifies the buffer) and the CIL push context that is writing the changes 162 * into the log. This means skipping preallocation of buffer space is 163 * unreliable, but we most definitely do not want to be allocating and freeing 164 * buffers unnecessarily during commits when overwrites can be done safely. 165 * 166 * The simplest solution to this problem is to allocate a shadow buffer when a 167 * log item is committed for the second time, and then to only use this buffer 168 * if necessary. The buffer can remain attached to the log item until such time 169 * it is needed, and this is the buffer that is reallocated to match the size of 170 * the incoming modification. Then during the formatting of the item we can swap 171 * the active buffer with the new one if we can't reuse the existing buffer. We 172 * don't free the old buffer as it may be reused on the next modification if 173 * it's size is right, otherwise we'll free and reallocate it at that point. 174 * 175 * This function builds a vector for the changes in each log item in the 176 * transaction. It then works out the length of the buffer needed for each log 177 * item, allocates them and attaches the vector to the log item in preparation 178 * for the formatting step which occurs under the xc_ctx_lock. 179 * 180 * While this means the memory footprint goes up, it avoids the repeated 181 * alloc/free pattern that repeated modifications of an item would otherwise 182 * cause, and hence minimises the CPU overhead of such behaviour. 183 */ 184 static void 185 xlog_cil_alloc_shadow_bufs( 186 struct xlog *log, 187 struct xfs_trans *tp) 188 { 189 struct xfs_log_item *lip; 190 191 list_for_each_entry(lip, &tp->t_items, li_trans) { 192 struct xfs_log_vec *lv; 193 int niovecs = 0; 194 int nbytes = 0; 195 int buf_size; 196 bool ordered = false; 197 198 /* Skip items which aren't dirty in this transaction. */ 199 if (!test_bit(XFS_LI_DIRTY, &lip->li_flags)) 200 continue; 201 202 /* get number of vecs and size of data to be stored */ 203 lip->li_ops->iop_size(lip, &niovecs, &nbytes); 204 205 /* 206 * Ordered items need to be tracked but we do not wish to write 207 * them. We need a logvec to track the object, but we do not 208 * need an iovec or buffer to be allocated for copying data. 209 */ 210 if (niovecs == XFS_LOG_VEC_ORDERED) { 211 ordered = true; 212 niovecs = 0; 213 nbytes = 0; 214 } 215 216 /* 217 * We 64-bit align the length of each iovec so that the start of 218 * the next one is naturally aligned. We'll need to account for 219 * that slack space here. 220 * 221 * We also add the xlog_op_header to each region when 222 * formatting, but that's not accounted to the size of the item 223 * at this point. Hence we'll need an addition number of bytes 224 * for each vector to hold an opheader. 225 * 226 * Then round nbytes up to 64-bit alignment so that the initial 227 * buffer alignment is easy to calculate and verify. 228 */ 229 nbytes += niovecs * 230 (sizeof(uint64_t) + sizeof(struct xlog_op_header)); 231 nbytes = round_up(nbytes, sizeof(uint64_t)); 232 233 /* 234 * The data buffer needs to start 64-bit aligned, so round up 235 * that space to ensure we can align it appropriately and not 236 * overrun the buffer. 237 */ 238 buf_size = nbytes + xlog_cil_iovec_space(niovecs); 239 240 /* 241 * if we have no shadow buffer, or it is too small, we need to 242 * reallocate it. 243 */ 244 if (!lip->li_lv_shadow || 245 buf_size > lip->li_lv_shadow->lv_size) { 246 /* 247 * We free and allocate here as a realloc would copy 248 * unnecessary data. We don't use kvzalloc() for the 249 * same reason - we don't need to zero the data area in 250 * the buffer, only the log vector header and the iovec 251 * storage. 252 */ 253 kmem_free(lip->li_lv_shadow); 254 lv = xlog_kvmalloc(buf_size); 255 256 memset(lv, 0, xlog_cil_iovec_space(niovecs)); 257 258 lv->lv_item = lip; 259 lv->lv_size = buf_size; 260 if (ordered) 261 lv->lv_buf_len = XFS_LOG_VEC_ORDERED; 262 else 263 lv->lv_iovecp = (struct xfs_log_iovec *)&lv[1]; 264 lip->li_lv_shadow = lv; 265 } else { 266 /* same or smaller, optimise common overwrite case */ 267 lv = lip->li_lv_shadow; 268 if (ordered) 269 lv->lv_buf_len = XFS_LOG_VEC_ORDERED; 270 else 271 lv->lv_buf_len = 0; 272 lv->lv_bytes = 0; 273 lv->lv_next = NULL; 274 } 275 276 /* Ensure the lv is set up according to ->iop_size */ 277 lv->lv_niovecs = niovecs; 278 279 /* The allocated data region lies beyond the iovec region */ 280 lv->lv_buf = (char *)lv + xlog_cil_iovec_space(niovecs); 281 } 282 283 } 284 285 /* 286 * Prepare the log item for insertion into the CIL. Calculate the difference in 287 * log space it will consume, and if it is a new item pin it as well. 288 */ 289 STATIC void 290 xfs_cil_prepare_item( 291 struct xlog *log, 292 struct xfs_log_vec *lv, 293 struct xfs_log_vec *old_lv, 294 int *diff_len) 295 { 296 /* Account for the new LV being passed in */ 297 if (lv->lv_buf_len != XFS_LOG_VEC_ORDERED) 298 *diff_len += lv->lv_bytes; 299 300 /* 301 * If there is no old LV, this is the first time we've seen the item in 302 * this CIL context and so we need to pin it. If we are replacing the 303 * old_lv, then remove the space it accounts for and make it the shadow 304 * buffer for later freeing. In both cases we are now switching to the 305 * shadow buffer, so update the pointer to it appropriately. 306 */ 307 if (!old_lv) { 308 if (lv->lv_item->li_ops->iop_pin) 309 lv->lv_item->li_ops->iop_pin(lv->lv_item); 310 lv->lv_item->li_lv_shadow = NULL; 311 } else if (old_lv != lv) { 312 ASSERT(lv->lv_buf_len != XFS_LOG_VEC_ORDERED); 313 314 *diff_len -= old_lv->lv_bytes; 315 lv->lv_item->li_lv_shadow = old_lv; 316 } 317 318 /* attach new log vector to log item */ 319 lv->lv_item->li_lv = lv; 320 321 /* 322 * If this is the first time the item is being committed to the 323 * CIL, store the sequence number on the log item so we can 324 * tell in future commits whether this is the first checkpoint 325 * the item is being committed into. 326 */ 327 if (!lv->lv_item->li_seq) 328 lv->lv_item->li_seq = log->l_cilp->xc_ctx->sequence; 329 } 330 331 /* 332 * Format log item into a flat buffers 333 * 334 * For delayed logging, we need to hold a formatted buffer containing all the 335 * changes on the log item. This enables us to relog the item in memory and 336 * write it out asynchronously without needing to relock the object that was 337 * modified at the time it gets written into the iclog. 338 * 339 * This function takes the prepared log vectors attached to each log item, and 340 * formats the changes into the log vector buffer. The buffer it uses is 341 * dependent on the current state of the vector in the CIL - the shadow lv is 342 * guaranteed to be large enough for the current modification, but we will only 343 * use that if we can't reuse the existing lv. If we can't reuse the existing 344 * lv, then simple swap it out for the shadow lv. We don't free it - that is 345 * done lazily either by th enext modification or the freeing of the log item. 346 * 347 * We don't set up region headers during this process; we simply copy the 348 * regions into the flat buffer. We can do this because we still have to do a 349 * formatting step to write the regions into the iclog buffer. Writing the 350 * ophdrs during the iclog write means that we can support splitting large 351 * regions across iclog boundares without needing a change in the format of the 352 * item/region encapsulation. 353 * 354 * Hence what we need to do now is change the rewrite the vector array to point 355 * to the copied region inside the buffer we just allocated. This allows us to 356 * format the regions into the iclog as though they are being formatted 357 * directly out of the objects themselves. 358 */ 359 static void 360 xlog_cil_insert_format_items( 361 struct xlog *log, 362 struct xfs_trans *tp, 363 int *diff_len) 364 { 365 struct xfs_log_item *lip; 366 367 /* Bail out if we didn't find a log item. */ 368 if (list_empty(&tp->t_items)) { 369 ASSERT(0); 370 return; 371 } 372 373 list_for_each_entry(lip, &tp->t_items, li_trans) { 374 struct xfs_log_vec *lv; 375 struct xfs_log_vec *old_lv = NULL; 376 struct xfs_log_vec *shadow; 377 bool ordered = false; 378 379 /* Skip items which aren't dirty in this transaction. */ 380 if (!test_bit(XFS_LI_DIRTY, &lip->li_flags)) 381 continue; 382 383 /* 384 * The formatting size information is already attached to 385 * the shadow lv on the log item. 386 */ 387 shadow = lip->li_lv_shadow; 388 if (shadow->lv_buf_len == XFS_LOG_VEC_ORDERED) 389 ordered = true; 390 391 /* Skip items that do not have any vectors for writing */ 392 if (!shadow->lv_niovecs && !ordered) 393 continue; 394 395 /* compare to existing item size */ 396 old_lv = lip->li_lv; 397 if (lip->li_lv && shadow->lv_size <= lip->li_lv->lv_size) { 398 /* same or smaller, optimise common overwrite case */ 399 lv = lip->li_lv; 400 lv->lv_next = NULL; 401 402 if (ordered) 403 goto insert; 404 405 /* 406 * set the item up as though it is a new insertion so 407 * that the space reservation accounting is correct. 408 */ 409 *diff_len -= lv->lv_bytes; 410 411 /* Ensure the lv is set up according to ->iop_size */ 412 lv->lv_niovecs = shadow->lv_niovecs; 413 414 /* reset the lv buffer information for new formatting */ 415 lv->lv_buf_len = 0; 416 lv->lv_bytes = 0; 417 lv->lv_buf = (char *)lv + 418 xlog_cil_iovec_space(lv->lv_niovecs); 419 } else { 420 /* switch to shadow buffer! */ 421 lv = shadow; 422 lv->lv_item = lip; 423 if (ordered) { 424 /* track as an ordered logvec */ 425 ASSERT(lip->li_lv == NULL); 426 goto insert; 427 } 428 } 429 430 ASSERT(IS_ALIGNED((unsigned long)lv->lv_buf, sizeof(uint64_t))); 431 lip->li_ops->iop_format(lip, lv); 432 insert: 433 xfs_cil_prepare_item(log, lv, old_lv, diff_len); 434 } 435 } 436 437 /* 438 * Insert the log items into the CIL and calculate the difference in space 439 * consumed by the item. Add the space to the checkpoint ticket and calculate 440 * if the change requires additional log metadata. If it does, take that space 441 * as well. Remove the amount of space we added to the checkpoint ticket from 442 * the current transaction ticket so that the accounting works out correctly. 443 */ 444 static void 445 xlog_cil_insert_items( 446 struct xlog *log, 447 struct xfs_trans *tp, 448 uint32_t released_space) 449 { 450 struct xfs_cil *cil = log->l_cilp; 451 struct xfs_cil_ctx *ctx = cil->xc_ctx; 452 struct xfs_log_item *lip; 453 int len = 0; 454 int iclog_space; 455 int iovhdr_res = 0, split_res = 0, ctx_res = 0; 456 457 ASSERT(tp); 458 459 /* 460 * We can do this safely because the context can't checkpoint until we 461 * are done so it doesn't matter exactly how we update the CIL. 462 */ 463 xlog_cil_insert_format_items(log, tp, &len); 464 465 /* 466 * We need to take the CIL checkpoint unit reservation on the first 467 * commit into the CIL. Test the XLOG_CIL_EMPTY bit first so we don't 468 * unnecessarily do an atomic op in the fast path here. We don't need to 469 * hold the xc_cil_lock here to clear the XLOG_CIL_EMPTY bit as we are 470 * under the xc_ctx_lock here and that needs to be held exclusively to 471 * reset the XLOG_CIL_EMPTY bit. 472 */ 473 if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags) && 474 test_and_clear_bit(XLOG_CIL_EMPTY, &cil->xc_flags)) 475 ctx_res = ctx->ticket->t_unit_res; 476 477 spin_lock(&cil->xc_cil_lock); 478 479 /* do we need space for more log record headers? */ 480 iclog_space = log->l_iclog_size - log->l_iclog_hsize; 481 if (len > 0 && (ctx->space_used / iclog_space != 482 (ctx->space_used + len) / iclog_space)) { 483 split_res = (len + iclog_space - 1) / iclog_space; 484 /* need to take into account split region headers, too */ 485 split_res *= log->l_iclog_hsize + sizeof(struct xlog_op_header); 486 ctx->ticket->t_unit_res += split_res; 487 } 488 tp->t_ticket->t_curr_res -= split_res + ctx_res + len; 489 ctx->ticket->t_curr_res += split_res + ctx_res; 490 ctx->space_used += len; 491 492 tp->t_ticket->t_curr_res += released_space; 493 ctx->space_used -= released_space; 494 495 /* 496 * If we've overrun the reservation, dump the tx details before we move 497 * the log items. Shutdown is imminent... 498 */ 499 if (WARN_ON(tp->t_ticket->t_curr_res < 0)) { 500 xfs_warn(log->l_mp, "Transaction log reservation overrun:"); 501 xfs_warn(log->l_mp, 502 " log items: %d bytes (iov hdrs: %d bytes)", 503 len, iovhdr_res); 504 xfs_warn(log->l_mp, " split region headers: %d bytes", 505 split_res); 506 xfs_warn(log->l_mp, " ctx ticket: %d bytes", ctx_res); 507 xlog_print_trans(tp); 508 } 509 510 /* 511 * Now (re-)position everything modified at the tail of the CIL. 512 * We do this here so we only need to take the CIL lock once during 513 * the transaction commit. 514 */ 515 list_for_each_entry(lip, &tp->t_items, li_trans) { 516 517 /* Skip items which aren't dirty in this transaction. */ 518 if (!test_bit(XFS_LI_DIRTY, &lip->li_flags)) 519 continue; 520 521 /* 522 * Only move the item if it isn't already at the tail. This is 523 * to prevent a transient list_empty() state when reinserting 524 * an item that is already the only item in the CIL. 525 */ 526 if (!list_is_last(&lip->li_cil, &cil->xc_cil)) 527 list_move_tail(&lip->li_cil, &cil->xc_cil); 528 } 529 530 /* attach the transaction to the CIL if it has any busy extents */ 531 if (!list_empty(&tp->t_busy)) 532 list_splice_init(&tp->t_busy, &ctx->busy_extents); 533 spin_unlock(&cil->xc_cil_lock); 534 535 if (tp->t_ticket->t_curr_res < 0) 536 xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR); 537 } 538 539 static void 540 xlog_cil_free_logvec( 541 struct xfs_log_vec *log_vector) 542 { 543 struct xfs_log_vec *lv; 544 545 for (lv = log_vector; lv; ) { 546 struct xfs_log_vec *next = lv->lv_next; 547 kmem_free(lv); 548 lv = next; 549 } 550 } 551 552 static void 553 xlog_discard_endio_work( 554 struct work_struct *work) 555 { 556 struct xfs_cil_ctx *ctx = 557 container_of(work, struct xfs_cil_ctx, discard_endio_work); 558 struct xfs_mount *mp = ctx->cil->xc_log->l_mp; 559 560 xfs_extent_busy_clear(mp, &ctx->busy_extents, false); 561 kmem_free(ctx); 562 } 563 564 /* 565 * Queue up the actual completion to a thread to avoid IRQ-safe locking for 566 * pagb_lock. Note that we need a unbounded workqueue, otherwise we might 567 * get the execution delayed up to 30 seconds for weird reasons. 568 */ 569 static void 570 xlog_discard_endio( 571 struct bio *bio) 572 { 573 struct xfs_cil_ctx *ctx = bio->bi_private; 574 575 INIT_WORK(&ctx->discard_endio_work, xlog_discard_endio_work); 576 queue_work(xfs_discard_wq, &ctx->discard_endio_work); 577 bio_put(bio); 578 } 579 580 static void 581 xlog_discard_busy_extents( 582 struct xfs_mount *mp, 583 struct xfs_cil_ctx *ctx) 584 { 585 struct list_head *list = &ctx->busy_extents; 586 struct xfs_extent_busy *busyp; 587 struct bio *bio = NULL; 588 struct blk_plug plug; 589 int error = 0; 590 591 ASSERT(xfs_has_discard(mp)); 592 593 blk_start_plug(&plug); 594 list_for_each_entry(busyp, list, list) { 595 trace_xfs_discard_extent(mp, busyp->agno, busyp->bno, 596 busyp->length); 597 598 error = __blkdev_issue_discard(mp->m_ddev_targp->bt_bdev, 599 XFS_AGB_TO_DADDR(mp, busyp->agno, busyp->bno), 600 XFS_FSB_TO_BB(mp, busyp->length), 601 GFP_NOFS, &bio); 602 if (error && error != -EOPNOTSUPP) { 603 xfs_info(mp, 604 "discard failed for extent [0x%llx,%u], error %d", 605 (unsigned long long)busyp->bno, 606 busyp->length, 607 error); 608 break; 609 } 610 } 611 612 if (bio) { 613 bio->bi_private = ctx; 614 bio->bi_end_io = xlog_discard_endio; 615 submit_bio(bio); 616 } else { 617 xlog_discard_endio_work(&ctx->discard_endio_work); 618 } 619 blk_finish_plug(&plug); 620 } 621 622 /* 623 * Mark all items committed and clear busy extents. We free the log vector 624 * chains in a separate pass so that we unpin the log items as quickly as 625 * possible. 626 */ 627 static void 628 xlog_cil_committed( 629 struct xfs_cil_ctx *ctx) 630 { 631 struct xfs_mount *mp = ctx->cil->xc_log->l_mp; 632 bool abort = xlog_is_shutdown(ctx->cil->xc_log); 633 634 /* 635 * If the I/O failed, we're aborting the commit and already shutdown. 636 * Wake any commit waiters before aborting the log items so we don't 637 * block async log pushers on callbacks. Async log pushers explicitly do 638 * not wait on log force completion because they may be holding locks 639 * required to unpin items. 640 */ 641 if (abort) { 642 spin_lock(&ctx->cil->xc_push_lock); 643 wake_up_all(&ctx->cil->xc_start_wait); 644 wake_up_all(&ctx->cil->xc_commit_wait); 645 spin_unlock(&ctx->cil->xc_push_lock); 646 } 647 648 xfs_trans_committed_bulk(ctx->cil->xc_log->l_ailp, ctx->lv_chain, 649 ctx->start_lsn, abort); 650 651 xfs_extent_busy_sort(&ctx->busy_extents); 652 xfs_extent_busy_clear(mp, &ctx->busy_extents, 653 xfs_has_discard(mp) && !abort); 654 655 spin_lock(&ctx->cil->xc_push_lock); 656 list_del(&ctx->committing); 657 spin_unlock(&ctx->cil->xc_push_lock); 658 659 xlog_cil_free_logvec(ctx->lv_chain); 660 661 if (!list_empty(&ctx->busy_extents)) 662 xlog_discard_busy_extents(mp, ctx); 663 else 664 kmem_free(ctx); 665 } 666 667 void 668 xlog_cil_process_committed( 669 struct list_head *list) 670 { 671 struct xfs_cil_ctx *ctx; 672 673 while ((ctx = list_first_entry_or_null(list, 674 struct xfs_cil_ctx, iclog_entry))) { 675 list_del(&ctx->iclog_entry); 676 xlog_cil_committed(ctx); 677 } 678 } 679 680 /* 681 * Record the LSN of the iclog we were just granted space to start writing into. 682 * If the context doesn't have a start_lsn recorded, then this iclog will 683 * contain the start record for the checkpoint. Otherwise this write contains 684 * the commit record for the checkpoint. 685 */ 686 void 687 xlog_cil_set_ctx_write_state( 688 struct xfs_cil_ctx *ctx, 689 struct xlog_in_core *iclog) 690 { 691 struct xfs_cil *cil = ctx->cil; 692 xfs_lsn_t lsn = be64_to_cpu(iclog->ic_header.h_lsn); 693 694 ASSERT(!ctx->commit_lsn); 695 if (!ctx->start_lsn) { 696 spin_lock(&cil->xc_push_lock); 697 /* 698 * The LSN we need to pass to the log items on transaction 699 * commit is the LSN reported by the first log vector write, not 700 * the commit lsn. If we use the commit record lsn then we can 701 * move the grant write head beyond the tail LSN and overwrite 702 * it. 703 */ 704 ctx->start_lsn = lsn; 705 wake_up_all(&cil->xc_start_wait); 706 spin_unlock(&cil->xc_push_lock); 707 708 /* 709 * Make sure the metadata we are about to overwrite in the log 710 * has been flushed to stable storage before this iclog is 711 * issued. 712 */ 713 spin_lock(&cil->xc_log->l_icloglock); 714 iclog->ic_flags |= XLOG_ICL_NEED_FLUSH; 715 spin_unlock(&cil->xc_log->l_icloglock); 716 return; 717 } 718 719 /* 720 * Take a reference to the iclog for the context so that we still hold 721 * it when xlog_write is done and has released it. This means the 722 * context controls when the iclog is released for IO. 723 */ 724 atomic_inc(&iclog->ic_refcnt); 725 726 /* 727 * xlog_state_get_iclog_space() guarantees there is enough space in the 728 * iclog for an entire commit record, so we can attach the context 729 * callbacks now. This needs to be done before we make the commit_lsn 730 * visible to waiters so that checkpoints with commit records in the 731 * same iclog order their IO completion callbacks in the same order that 732 * the commit records appear in the iclog. 733 */ 734 spin_lock(&cil->xc_log->l_icloglock); 735 list_add_tail(&ctx->iclog_entry, &iclog->ic_callbacks); 736 spin_unlock(&cil->xc_log->l_icloglock); 737 738 /* 739 * Now we can record the commit LSN and wake anyone waiting for this 740 * sequence to have the ordered commit record assigned to a physical 741 * location in the log. 742 */ 743 spin_lock(&cil->xc_push_lock); 744 ctx->commit_iclog = iclog; 745 ctx->commit_lsn = lsn; 746 wake_up_all(&cil->xc_commit_wait); 747 spin_unlock(&cil->xc_push_lock); 748 } 749 750 751 /* 752 * Ensure that the order of log writes follows checkpoint sequence order. This 753 * relies on the context LSN being zero until the log write has guaranteed the 754 * LSN that the log write will start at via xlog_state_get_iclog_space(). 755 */ 756 enum _record_type { 757 _START_RECORD, 758 _COMMIT_RECORD, 759 }; 760 761 static int 762 xlog_cil_order_write( 763 struct xfs_cil *cil, 764 xfs_csn_t sequence, 765 enum _record_type record) 766 { 767 struct xfs_cil_ctx *ctx; 768 769 restart: 770 spin_lock(&cil->xc_push_lock); 771 list_for_each_entry(ctx, &cil->xc_committing, committing) { 772 /* 773 * Avoid getting stuck in this loop because we were woken by the 774 * shutdown, but then went back to sleep once already in the 775 * shutdown state. 776 */ 777 if (xlog_is_shutdown(cil->xc_log)) { 778 spin_unlock(&cil->xc_push_lock); 779 return -EIO; 780 } 781 782 /* 783 * Higher sequences will wait for this one so skip them. 784 * Don't wait for our own sequence, either. 785 */ 786 if (ctx->sequence >= sequence) 787 continue; 788 789 /* Wait until the LSN for the record has been recorded. */ 790 switch (record) { 791 case _START_RECORD: 792 if (!ctx->start_lsn) { 793 xlog_wait(&cil->xc_start_wait, &cil->xc_push_lock); 794 goto restart; 795 } 796 break; 797 case _COMMIT_RECORD: 798 if (!ctx->commit_lsn) { 799 xlog_wait(&cil->xc_commit_wait, &cil->xc_push_lock); 800 goto restart; 801 } 802 break; 803 } 804 } 805 spin_unlock(&cil->xc_push_lock); 806 return 0; 807 } 808 809 /* 810 * Write out the log vector change now attached to the CIL context. This will 811 * write a start record that needs to be strictly ordered in ascending CIL 812 * sequence order so that log recovery will always use in-order start LSNs when 813 * replaying checkpoints. 814 */ 815 static int 816 xlog_cil_write_chain( 817 struct xfs_cil_ctx *ctx, 818 struct xfs_log_vec *chain, 819 uint32_t chain_len) 820 { 821 struct xlog *log = ctx->cil->xc_log; 822 int error; 823 824 error = xlog_cil_order_write(ctx->cil, ctx->sequence, _START_RECORD); 825 if (error) 826 return error; 827 return xlog_write(log, ctx, chain, ctx->ticket, chain_len); 828 } 829 830 /* 831 * Write out the commit record of a checkpoint transaction to close off a 832 * running log write. These commit records are strictly ordered in ascending CIL 833 * sequence order so that log recovery will always replay the checkpoints in the 834 * correct order. 835 */ 836 static int 837 xlog_cil_write_commit_record( 838 struct xfs_cil_ctx *ctx) 839 { 840 struct xlog *log = ctx->cil->xc_log; 841 struct xlog_op_header ophdr = { 842 .oh_clientid = XFS_TRANSACTION, 843 .oh_tid = cpu_to_be32(ctx->ticket->t_tid), 844 .oh_flags = XLOG_COMMIT_TRANS, 845 }; 846 struct xfs_log_iovec reg = { 847 .i_addr = &ophdr, 848 .i_len = sizeof(struct xlog_op_header), 849 .i_type = XLOG_REG_TYPE_COMMIT, 850 }; 851 struct xfs_log_vec vec = { 852 .lv_niovecs = 1, 853 .lv_iovecp = ®, 854 }; 855 int error; 856 857 if (xlog_is_shutdown(log)) 858 return -EIO; 859 860 error = xlog_cil_order_write(ctx->cil, ctx->sequence, _COMMIT_RECORD); 861 if (error) 862 return error; 863 864 /* account for space used by record data */ 865 ctx->ticket->t_curr_res -= reg.i_len; 866 error = xlog_write(log, ctx, &vec, ctx->ticket, reg.i_len); 867 if (error) 868 xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR); 869 return error; 870 } 871 872 struct xlog_cil_trans_hdr { 873 struct xlog_op_header oph[2]; 874 struct xfs_trans_header thdr; 875 struct xfs_log_iovec lhdr[2]; 876 }; 877 878 /* 879 * Build a checkpoint transaction header to begin the journal transaction. We 880 * need to account for the space used by the transaction header here as it is 881 * not accounted for in xlog_write(). 882 * 883 * This is the only place we write a transaction header, so we also build the 884 * log opheaders that indicate the start of a log transaction and wrap the 885 * transaction header. We keep the start record in it's own log vector rather 886 * than compacting them into a single region as this ends up making the logic 887 * in xlog_write() for handling empty opheaders for start, commit and unmount 888 * records much simpler. 889 */ 890 static void 891 xlog_cil_build_trans_hdr( 892 struct xfs_cil_ctx *ctx, 893 struct xlog_cil_trans_hdr *hdr, 894 struct xfs_log_vec *lvhdr, 895 int num_iovecs) 896 { 897 struct xlog_ticket *tic = ctx->ticket; 898 __be32 tid = cpu_to_be32(tic->t_tid); 899 900 memset(hdr, 0, sizeof(*hdr)); 901 902 /* Log start record */ 903 hdr->oph[0].oh_tid = tid; 904 hdr->oph[0].oh_clientid = XFS_TRANSACTION; 905 hdr->oph[0].oh_flags = XLOG_START_TRANS; 906 907 /* log iovec region pointer */ 908 hdr->lhdr[0].i_addr = &hdr->oph[0]; 909 hdr->lhdr[0].i_len = sizeof(struct xlog_op_header); 910 hdr->lhdr[0].i_type = XLOG_REG_TYPE_LRHEADER; 911 912 /* log opheader */ 913 hdr->oph[1].oh_tid = tid; 914 hdr->oph[1].oh_clientid = XFS_TRANSACTION; 915 hdr->oph[1].oh_len = cpu_to_be32(sizeof(struct xfs_trans_header)); 916 917 /* transaction header in host byte order format */ 918 hdr->thdr.th_magic = XFS_TRANS_HEADER_MAGIC; 919 hdr->thdr.th_type = XFS_TRANS_CHECKPOINT; 920 hdr->thdr.th_tid = tic->t_tid; 921 hdr->thdr.th_num_items = num_iovecs; 922 923 /* log iovec region pointer */ 924 hdr->lhdr[1].i_addr = &hdr->oph[1]; 925 hdr->lhdr[1].i_len = sizeof(struct xlog_op_header) + 926 sizeof(struct xfs_trans_header); 927 hdr->lhdr[1].i_type = XLOG_REG_TYPE_TRANSHDR; 928 929 lvhdr->lv_niovecs = 2; 930 lvhdr->lv_iovecp = &hdr->lhdr[0]; 931 lvhdr->lv_bytes = hdr->lhdr[0].i_len + hdr->lhdr[1].i_len; 932 lvhdr->lv_next = ctx->lv_chain; 933 934 tic->t_curr_res -= lvhdr->lv_bytes; 935 } 936 937 /* 938 * Pull all the log vectors off the items in the CIL, and remove the items from 939 * the CIL. We don't need the CIL lock here because it's only needed on the 940 * transaction commit side which is currently locked out by the flush lock. 941 * 942 * If a log item is marked with a whiteout, we do not need to write it to the 943 * journal and so we just move them to the whiteout list for the caller to 944 * dispose of appropriately. 945 */ 946 static void 947 xlog_cil_build_lv_chain( 948 struct xfs_cil *cil, 949 struct xfs_cil_ctx *ctx, 950 struct list_head *whiteouts, 951 uint32_t *num_iovecs, 952 uint32_t *num_bytes) 953 { 954 struct xfs_log_vec *lv = NULL; 955 956 while (!list_empty(&cil->xc_cil)) { 957 struct xfs_log_item *item; 958 959 item = list_first_entry(&cil->xc_cil, 960 struct xfs_log_item, li_cil); 961 962 if (test_bit(XFS_LI_WHITEOUT, &item->li_flags)) { 963 list_move(&item->li_cil, whiteouts); 964 trace_xfs_cil_whiteout_skip(item); 965 continue; 966 } 967 968 list_del_init(&item->li_cil); 969 if (!ctx->lv_chain) 970 ctx->lv_chain = item->li_lv; 971 else 972 lv->lv_next = item->li_lv; 973 lv = item->li_lv; 974 item->li_lv = NULL; 975 *num_iovecs += lv->lv_niovecs; 976 977 /* we don't write ordered log vectors */ 978 if (lv->lv_buf_len != XFS_LOG_VEC_ORDERED) 979 *num_bytes += lv->lv_bytes; 980 } 981 } 982 983 static void 984 xlog_cil_cleanup_whiteouts( 985 struct list_head *whiteouts) 986 { 987 while (!list_empty(whiteouts)) { 988 struct xfs_log_item *item = list_first_entry(whiteouts, 989 struct xfs_log_item, li_cil); 990 list_del_init(&item->li_cil); 991 trace_xfs_cil_whiteout_unpin(item); 992 item->li_ops->iop_unpin(item, 1); 993 } 994 } 995 996 /* 997 * Push the Committed Item List to the log. 998 * 999 * If the current sequence is the same as xc_push_seq we need to do a flush. If 1000 * xc_push_seq is less than the current sequence, then it has already been 1001 * flushed and we don't need to do anything - the caller will wait for it to 1002 * complete if necessary. 1003 * 1004 * xc_push_seq is checked unlocked against the sequence number for a match. 1005 * Hence we can allow log forces to run racily and not issue pushes for the 1006 * same sequence twice. If we get a race between multiple pushes for the same 1007 * sequence they will block on the first one and then abort, hence avoiding 1008 * needless pushes. 1009 */ 1010 static void 1011 xlog_cil_push_work( 1012 struct work_struct *work) 1013 { 1014 struct xfs_cil_ctx *ctx = 1015 container_of(work, struct xfs_cil_ctx, push_work); 1016 struct xfs_cil *cil = ctx->cil; 1017 struct xlog *log = cil->xc_log; 1018 struct xfs_cil_ctx *new_ctx; 1019 int num_iovecs = 0; 1020 int num_bytes = 0; 1021 int error = 0; 1022 struct xlog_cil_trans_hdr thdr; 1023 struct xfs_log_vec lvhdr = { NULL }; 1024 xfs_csn_t push_seq; 1025 bool push_commit_stable; 1026 LIST_HEAD (whiteouts); 1027 1028 new_ctx = xlog_cil_ctx_alloc(); 1029 new_ctx->ticket = xlog_cil_ticket_alloc(log); 1030 1031 down_write(&cil->xc_ctx_lock); 1032 1033 spin_lock(&cil->xc_push_lock); 1034 push_seq = cil->xc_push_seq; 1035 ASSERT(push_seq <= ctx->sequence); 1036 push_commit_stable = cil->xc_push_commit_stable; 1037 cil->xc_push_commit_stable = false; 1038 1039 /* 1040 * As we are about to switch to a new, empty CIL context, we no longer 1041 * need to throttle tasks on CIL space overruns. Wake any waiters that 1042 * the hard push throttle may have caught so they can start committing 1043 * to the new context. The ctx->xc_push_lock provides the serialisation 1044 * necessary for safely using the lockless waitqueue_active() check in 1045 * this context. 1046 */ 1047 if (waitqueue_active(&cil->xc_push_wait)) 1048 wake_up_all(&cil->xc_push_wait); 1049 1050 /* 1051 * Check if we've anything to push. If there is nothing, then we don't 1052 * move on to a new sequence number and so we have to be able to push 1053 * this sequence again later. 1054 */ 1055 if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags)) { 1056 cil->xc_push_seq = 0; 1057 spin_unlock(&cil->xc_push_lock); 1058 goto out_skip; 1059 } 1060 1061 1062 /* check for a previously pushed sequence */ 1063 if (push_seq < ctx->sequence) { 1064 spin_unlock(&cil->xc_push_lock); 1065 goto out_skip; 1066 } 1067 1068 /* 1069 * We are now going to push this context, so add it to the committing 1070 * list before we do anything else. This ensures that anyone waiting on 1071 * this push can easily detect the difference between a "push in 1072 * progress" and "CIL is empty, nothing to do". 1073 * 1074 * IOWs, a wait loop can now check for: 1075 * the current sequence not being found on the committing list; 1076 * an empty CIL; and 1077 * an unchanged sequence number 1078 * to detect a push that had nothing to do and therefore does not need 1079 * waiting on. If the CIL is not empty, we get put on the committing 1080 * list before emptying the CIL and bumping the sequence number. Hence 1081 * an empty CIL and an unchanged sequence number means we jumped out 1082 * above after doing nothing. 1083 * 1084 * Hence the waiter will either find the commit sequence on the 1085 * committing list or the sequence number will be unchanged and the CIL 1086 * still dirty. In that latter case, the push has not yet started, and 1087 * so the waiter will have to continue trying to check the CIL 1088 * committing list until it is found. In extreme cases of delay, the 1089 * sequence may fully commit between the attempts the wait makes to wait 1090 * on the commit sequence. 1091 */ 1092 list_add(&ctx->committing, &cil->xc_committing); 1093 spin_unlock(&cil->xc_push_lock); 1094 1095 xlog_cil_build_lv_chain(cil, ctx, &whiteouts, &num_iovecs, &num_bytes); 1096 1097 /* 1098 * Switch the contexts so we can drop the context lock and move out 1099 * of a shared context. We can't just go straight to the commit record, 1100 * though - we need to synchronise with previous and future commits so 1101 * that the commit records are correctly ordered in the log to ensure 1102 * that we process items during log IO completion in the correct order. 1103 * 1104 * For example, if we get an EFI in one checkpoint and the EFD in the 1105 * next (e.g. due to log forces), we do not want the checkpoint with 1106 * the EFD to be committed before the checkpoint with the EFI. Hence 1107 * we must strictly order the commit records of the checkpoints so 1108 * that: a) the checkpoint callbacks are attached to the iclogs in the 1109 * correct order; and b) the checkpoints are replayed in correct order 1110 * in log recovery. 1111 * 1112 * Hence we need to add this context to the committing context list so 1113 * that higher sequences will wait for us to write out a commit record 1114 * before they do. 1115 * 1116 * xfs_log_force_seq requires us to mirror the new sequence into the cil 1117 * structure atomically with the addition of this sequence to the 1118 * committing list. This also ensures that we can do unlocked checks 1119 * against the current sequence in log forces without risking 1120 * deferencing a freed context pointer. 1121 */ 1122 spin_lock(&cil->xc_push_lock); 1123 xlog_cil_ctx_switch(cil, new_ctx); 1124 spin_unlock(&cil->xc_push_lock); 1125 up_write(&cil->xc_ctx_lock); 1126 1127 /* 1128 * Build a checkpoint transaction header and write it to the log to 1129 * begin the transaction. We need to account for the space used by the 1130 * transaction header here as it is not accounted for in xlog_write(). 1131 */ 1132 xlog_cil_build_trans_hdr(ctx, &thdr, &lvhdr, num_iovecs); 1133 num_bytes += lvhdr.lv_bytes; 1134 1135 error = xlog_cil_write_chain(ctx, &lvhdr, num_bytes); 1136 if (error) 1137 goto out_abort_free_ticket; 1138 1139 error = xlog_cil_write_commit_record(ctx); 1140 if (error) 1141 goto out_abort_free_ticket; 1142 1143 xfs_log_ticket_ungrant(log, ctx->ticket); 1144 1145 /* 1146 * If the checkpoint spans multiple iclogs, wait for all previous iclogs 1147 * to complete before we submit the commit_iclog. We can't use state 1148 * checks for this - ACTIVE can be either a past completed iclog or a 1149 * future iclog being filled, while WANT_SYNC through SYNC_DONE can be a 1150 * past or future iclog awaiting IO or ordered IO completion to be run. 1151 * In the latter case, if it's a future iclog and we wait on it, the we 1152 * will hang because it won't get processed through to ic_force_wait 1153 * wakeup until this commit_iclog is written to disk. Hence we use the 1154 * iclog header lsn and compare it to the commit lsn to determine if we 1155 * need to wait on iclogs or not. 1156 */ 1157 spin_lock(&log->l_icloglock); 1158 if (ctx->start_lsn != ctx->commit_lsn) { 1159 xfs_lsn_t plsn; 1160 1161 plsn = be64_to_cpu(ctx->commit_iclog->ic_prev->ic_header.h_lsn); 1162 if (plsn && XFS_LSN_CMP(plsn, ctx->commit_lsn) < 0) { 1163 /* 1164 * Waiting on ic_force_wait orders the completion of 1165 * iclogs older than ic_prev. Hence we only need to wait 1166 * on the most recent older iclog here. 1167 */ 1168 xlog_wait_on_iclog(ctx->commit_iclog->ic_prev); 1169 spin_lock(&log->l_icloglock); 1170 } 1171 1172 /* 1173 * We need to issue a pre-flush so that the ordering for this 1174 * checkpoint is correctly preserved down to stable storage. 1175 */ 1176 ctx->commit_iclog->ic_flags |= XLOG_ICL_NEED_FLUSH; 1177 } 1178 1179 /* 1180 * The commit iclog must be written to stable storage to guarantee 1181 * journal IO vs metadata writeback IO is correctly ordered on stable 1182 * storage. 1183 * 1184 * If the push caller needs the commit to be immediately stable and the 1185 * commit_iclog is not yet marked as XLOG_STATE_WANT_SYNC to indicate it 1186 * will be written when released, switch it's state to WANT_SYNC right 1187 * now. 1188 */ 1189 ctx->commit_iclog->ic_flags |= XLOG_ICL_NEED_FUA; 1190 if (push_commit_stable && 1191 ctx->commit_iclog->ic_state == XLOG_STATE_ACTIVE) 1192 xlog_state_switch_iclogs(log, ctx->commit_iclog, 0); 1193 xlog_state_release_iclog(log, ctx->commit_iclog); 1194 1195 /* Not safe to reference ctx now! */ 1196 1197 spin_unlock(&log->l_icloglock); 1198 xlog_cil_cleanup_whiteouts(&whiteouts); 1199 return; 1200 1201 out_skip: 1202 up_write(&cil->xc_ctx_lock); 1203 xfs_log_ticket_put(new_ctx->ticket); 1204 kmem_free(new_ctx); 1205 return; 1206 1207 out_abort_free_ticket: 1208 xfs_log_ticket_ungrant(log, ctx->ticket); 1209 ASSERT(xlog_is_shutdown(log)); 1210 xlog_cil_cleanup_whiteouts(&whiteouts); 1211 if (!ctx->commit_iclog) { 1212 xlog_cil_committed(ctx); 1213 return; 1214 } 1215 spin_lock(&log->l_icloglock); 1216 xlog_state_release_iclog(log, ctx->commit_iclog); 1217 /* Not safe to reference ctx now! */ 1218 spin_unlock(&log->l_icloglock); 1219 } 1220 1221 /* 1222 * We need to push CIL every so often so we don't cache more than we can fit in 1223 * the log. The limit really is that a checkpoint can't be more than half the 1224 * log (the current checkpoint is not allowed to overwrite the previous 1225 * checkpoint), but commit latency and memory usage limit this to a smaller 1226 * size. 1227 */ 1228 static void 1229 xlog_cil_push_background( 1230 struct xlog *log) __releases(cil->xc_ctx_lock) 1231 { 1232 struct xfs_cil *cil = log->l_cilp; 1233 1234 /* 1235 * The cil won't be empty because we are called while holding the 1236 * context lock so whatever we added to the CIL will still be there. 1237 */ 1238 ASSERT(!list_empty(&cil->xc_cil)); 1239 ASSERT(!test_bit(XLOG_CIL_EMPTY, &cil->xc_flags)); 1240 1241 /* 1242 * Don't do a background push if we haven't used up all the 1243 * space available yet. 1244 */ 1245 if (cil->xc_ctx->space_used < XLOG_CIL_SPACE_LIMIT(log)) { 1246 up_read(&cil->xc_ctx_lock); 1247 return; 1248 } 1249 1250 spin_lock(&cil->xc_push_lock); 1251 if (cil->xc_push_seq < cil->xc_current_sequence) { 1252 cil->xc_push_seq = cil->xc_current_sequence; 1253 queue_work(cil->xc_push_wq, &cil->xc_ctx->push_work); 1254 } 1255 1256 /* 1257 * Drop the context lock now, we can't hold that if we need to sleep 1258 * because we are over the blocking threshold. The push_lock is still 1259 * held, so blocking threshold sleep/wakeup is still correctly 1260 * serialised here. 1261 */ 1262 up_read(&cil->xc_ctx_lock); 1263 1264 /* 1265 * If we are well over the space limit, throttle the work that is being 1266 * done until the push work on this context has begun. Enforce the hard 1267 * throttle on all transaction commits once it has been activated, even 1268 * if the committing transactions have resulted in the space usage 1269 * dipping back down under the hard limit. 1270 * 1271 * The ctx->xc_push_lock provides the serialisation necessary for safely 1272 * using the lockless waitqueue_active() check in this context. 1273 */ 1274 if (cil->xc_ctx->space_used >= XLOG_CIL_BLOCKING_SPACE_LIMIT(log) || 1275 waitqueue_active(&cil->xc_push_wait)) { 1276 trace_xfs_log_cil_wait(log, cil->xc_ctx->ticket); 1277 ASSERT(cil->xc_ctx->space_used < log->l_logsize); 1278 xlog_wait(&cil->xc_push_wait, &cil->xc_push_lock); 1279 return; 1280 } 1281 1282 spin_unlock(&cil->xc_push_lock); 1283 1284 } 1285 1286 /* 1287 * xlog_cil_push_now() is used to trigger an immediate CIL push to the sequence 1288 * number that is passed. When it returns, the work will be queued for 1289 * @push_seq, but it won't be completed. 1290 * 1291 * If the caller is performing a synchronous force, we will flush the workqueue 1292 * to get previously queued work moving to minimise the wait time they will 1293 * undergo waiting for all outstanding pushes to complete. The caller is 1294 * expected to do the required waiting for push_seq to complete. 1295 * 1296 * If the caller is performing an async push, we need to ensure that the 1297 * checkpoint is fully flushed out of the iclogs when we finish the push. If we 1298 * don't do this, then the commit record may remain sitting in memory in an 1299 * ACTIVE iclog. This then requires another full log force to push to disk, 1300 * which defeats the purpose of having an async, non-blocking CIL force 1301 * mechanism. Hence in this case we need to pass a flag to the push work to 1302 * indicate it needs to flush the commit record itself. 1303 */ 1304 static void 1305 xlog_cil_push_now( 1306 struct xlog *log, 1307 xfs_lsn_t push_seq, 1308 bool async) 1309 { 1310 struct xfs_cil *cil = log->l_cilp; 1311 1312 if (!cil) 1313 return; 1314 1315 ASSERT(push_seq && push_seq <= cil->xc_current_sequence); 1316 1317 /* start on any pending background push to minimise wait time on it */ 1318 if (!async) 1319 flush_workqueue(cil->xc_push_wq); 1320 1321 spin_lock(&cil->xc_push_lock); 1322 1323 /* 1324 * If this is an async flush request, we always need to set the 1325 * xc_push_commit_stable flag even if something else has already queued 1326 * a push. The flush caller is asking for the CIL to be on stable 1327 * storage when the next push completes, so regardless of who has queued 1328 * the push, the flush requires stable semantics from it. 1329 */ 1330 cil->xc_push_commit_stable = async; 1331 1332 /* 1333 * If the CIL is empty or we've already pushed the sequence then 1334 * there's no more work that we need to do. 1335 */ 1336 if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags) || 1337 push_seq <= cil->xc_push_seq) { 1338 spin_unlock(&cil->xc_push_lock); 1339 return; 1340 } 1341 1342 cil->xc_push_seq = push_seq; 1343 queue_work(cil->xc_push_wq, &cil->xc_ctx->push_work); 1344 spin_unlock(&cil->xc_push_lock); 1345 } 1346 1347 bool 1348 xlog_cil_empty( 1349 struct xlog *log) 1350 { 1351 struct xfs_cil *cil = log->l_cilp; 1352 bool empty = false; 1353 1354 spin_lock(&cil->xc_push_lock); 1355 if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags)) 1356 empty = true; 1357 spin_unlock(&cil->xc_push_lock); 1358 return empty; 1359 } 1360 1361 /* 1362 * If there are intent done items in this transaction and the related intent was 1363 * committed in the current (same) CIL checkpoint, we don't need to write either 1364 * the intent or intent done item to the journal as the change will be 1365 * journalled atomically within this checkpoint. As we cannot remove items from 1366 * the CIL here, mark the related intent with a whiteout so that the CIL push 1367 * can remove it rather than writing it to the journal. Then remove the intent 1368 * done item from the current transaction and release it so it doesn't get put 1369 * into the CIL at all. 1370 */ 1371 static uint32_t 1372 xlog_cil_process_intents( 1373 struct xfs_cil *cil, 1374 struct xfs_trans *tp) 1375 { 1376 struct xfs_log_item *lip, *ilip, *next; 1377 uint32_t len = 0; 1378 1379 list_for_each_entry_safe(lip, next, &tp->t_items, li_trans) { 1380 if (!(lip->li_ops->flags & XFS_ITEM_INTENT_DONE)) 1381 continue; 1382 1383 ilip = lip->li_ops->iop_intent(lip); 1384 if (!ilip || !xlog_item_in_current_chkpt(cil, ilip)) 1385 continue; 1386 set_bit(XFS_LI_WHITEOUT, &ilip->li_flags); 1387 trace_xfs_cil_whiteout_mark(ilip); 1388 len += ilip->li_lv->lv_bytes; 1389 kmem_free(ilip->li_lv); 1390 ilip->li_lv = NULL; 1391 1392 xfs_trans_del_item(lip); 1393 lip->li_ops->iop_release(lip); 1394 } 1395 return len; 1396 } 1397 1398 /* 1399 * Commit a transaction with the given vector to the Committed Item List. 1400 * 1401 * To do this, we need to format the item, pin it in memory if required and 1402 * account for the space used by the transaction. Once we have done that we 1403 * need to release the unused reservation for the transaction, attach the 1404 * transaction to the checkpoint context so we carry the busy extents through 1405 * to checkpoint completion, and then unlock all the items in the transaction. 1406 * 1407 * Called with the context lock already held in read mode to lock out 1408 * background commit, returns without it held once background commits are 1409 * allowed again. 1410 */ 1411 void 1412 xlog_cil_commit( 1413 struct xlog *log, 1414 struct xfs_trans *tp, 1415 xfs_csn_t *commit_seq, 1416 bool regrant) 1417 { 1418 struct xfs_cil *cil = log->l_cilp; 1419 struct xfs_log_item *lip, *next; 1420 uint32_t released_space = 0; 1421 1422 /* 1423 * Do all necessary memory allocation before we lock the CIL. 1424 * This ensures the allocation does not deadlock with a CIL 1425 * push in memory reclaim (e.g. from kswapd). 1426 */ 1427 xlog_cil_alloc_shadow_bufs(log, tp); 1428 1429 /* lock out background commit */ 1430 down_read(&cil->xc_ctx_lock); 1431 1432 if (tp->t_flags & XFS_TRANS_HAS_INTENT_DONE) 1433 released_space = xlog_cil_process_intents(cil, tp); 1434 1435 xlog_cil_insert_items(log, tp, released_space); 1436 1437 if (regrant && !xlog_is_shutdown(log)) 1438 xfs_log_ticket_regrant(log, tp->t_ticket); 1439 else 1440 xfs_log_ticket_ungrant(log, tp->t_ticket); 1441 tp->t_ticket = NULL; 1442 xfs_trans_unreserve_and_mod_sb(tp); 1443 1444 /* 1445 * Once all the items of the transaction have been copied to the CIL, 1446 * the items can be unlocked and possibly freed. 1447 * 1448 * This needs to be done before we drop the CIL context lock because we 1449 * have to update state in the log items and unlock them before they go 1450 * to disk. If we don't, then the CIL checkpoint can race with us and 1451 * we can run checkpoint completion before we've updated and unlocked 1452 * the log items. This affects (at least) processing of stale buffers, 1453 * inodes and EFIs. 1454 */ 1455 trace_xfs_trans_commit_items(tp, _RET_IP_); 1456 list_for_each_entry_safe(lip, next, &tp->t_items, li_trans) { 1457 xfs_trans_del_item(lip); 1458 if (lip->li_ops->iop_committing) 1459 lip->li_ops->iop_committing(lip, cil->xc_ctx->sequence); 1460 } 1461 if (commit_seq) 1462 *commit_seq = cil->xc_ctx->sequence; 1463 1464 /* xlog_cil_push_background() releases cil->xc_ctx_lock */ 1465 xlog_cil_push_background(log); 1466 } 1467 1468 /* 1469 * Flush the CIL to stable storage but don't wait for it to complete. This 1470 * requires the CIL push to ensure the commit record for the push hits the disk, 1471 * but otherwise is no different to a push done from a log force. 1472 */ 1473 void 1474 xlog_cil_flush( 1475 struct xlog *log) 1476 { 1477 xfs_csn_t seq = log->l_cilp->xc_current_sequence; 1478 1479 trace_xfs_log_force(log->l_mp, seq, _RET_IP_); 1480 xlog_cil_push_now(log, seq, true); 1481 1482 /* 1483 * If the CIL is empty, make sure that any previous checkpoint that may 1484 * still be in an active iclog is pushed to stable storage. 1485 */ 1486 if (list_empty(&log->l_cilp->xc_cil)) 1487 xfs_log_force(log->l_mp, 0); 1488 } 1489 1490 /* 1491 * Conditionally push the CIL based on the sequence passed in. 1492 * 1493 * We only need to push if we haven't already pushed the sequence number given. 1494 * Hence the only time we will trigger a push here is if the push sequence is 1495 * the same as the current context. 1496 * 1497 * We return the current commit lsn to allow the callers to determine if a 1498 * iclog flush is necessary following this call. 1499 */ 1500 xfs_lsn_t 1501 xlog_cil_force_seq( 1502 struct xlog *log, 1503 xfs_csn_t sequence) 1504 { 1505 struct xfs_cil *cil = log->l_cilp; 1506 struct xfs_cil_ctx *ctx; 1507 xfs_lsn_t commit_lsn = NULLCOMMITLSN; 1508 1509 ASSERT(sequence <= cil->xc_current_sequence); 1510 1511 if (!sequence) 1512 sequence = cil->xc_current_sequence; 1513 trace_xfs_log_force(log->l_mp, sequence, _RET_IP_); 1514 1515 /* 1516 * check to see if we need to force out the current context. 1517 * xlog_cil_push() handles racing pushes for the same sequence, 1518 * so no need to deal with it here. 1519 */ 1520 restart: 1521 xlog_cil_push_now(log, sequence, false); 1522 1523 /* 1524 * See if we can find a previous sequence still committing. 1525 * We need to wait for all previous sequence commits to complete 1526 * before allowing the force of push_seq to go ahead. Hence block 1527 * on commits for those as well. 1528 */ 1529 spin_lock(&cil->xc_push_lock); 1530 list_for_each_entry(ctx, &cil->xc_committing, committing) { 1531 /* 1532 * Avoid getting stuck in this loop because we were woken by the 1533 * shutdown, but then went back to sleep once already in the 1534 * shutdown state. 1535 */ 1536 if (xlog_is_shutdown(log)) 1537 goto out_shutdown; 1538 if (ctx->sequence > sequence) 1539 continue; 1540 if (!ctx->commit_lsn) { 1541 /* 1542 * It is still being pushed! Wait for the push to 1543 * complete, then start again from the beginning. 1544 */ 1545 XFS_STATS_INC(log->l_mp, xs_log_force_sleep); 1546 xlog_wait(&cil->xc_commit_wait, &cil->xc_push_lock); 1547 goto restart; 1548 } 1549 if (ctx->sequence != sequence) 1550 continue; 1551 /* found it! */ 1552 commit_lsn = ctx->commit_lsn; 1553 } 1554 1555 /* 1556 * The call to xlog_cil_push_now() executes the push in the background. 1557 * Hence by the time we have got here it our sequence may not have been 1558 * pushed yet. This is true if the current sequence still matches the 1559 * push sequence after the above wait loop and the CIL still contains 1560 * dirty objects. This is guaranteed by the push code first adding the 1561 * context to the committing list before emptying the CIL. 1562 * 1563 * Hence if we don't find the context in the committing list and the 1564 * current sequence number is unchanged then the CIL contents are 1565 * significant. If the CIL is empty, if means there was nothing to push 1566 * and that means there is nothing to wait for. If the CIL is not empty, 1567 * it means we haven't yet started the push, because if it had started 1568 * we would have found the context on the committing list. 1569 */ 1570 if (sequence == cil->xc_current_sequence && 1571 !test_bit(XLOG_CIL_EMPTY, &cil->xc_flags)) { 1572 spin_unlock(&cil->xc_push_lock); 1573 goto restart; 1574 } 1575 1576 spin_unlock(&cil->xc_push_lock); 1577 return commit_lsn; 1578 1579 /* 1580 * We detected a shutdown in progress. We need to trigger the log force 1581 * to pass through it's iclog state machine error handling, even though 1582 * we are already in a shutdown state. Hence we can't return 1583 * NULLCOMMITLSN here as that has special meaning to log forces (i.e. 1584 * LSN is already stable), so we return a zero LSN instead. 1585 */ 1586 out_shutdown: 1587 spin_unlock(&cil->xc_push_lock); 1588 return 0; 1589 } 1590 1591 /* 1592 * Perform initial CIL structure initialisation. 1593 */ 1594 int 1595 xlog_cil_init( 1596 struct xlog *log) 1597 { 1598 struct xfs_cil *cil; 1599 struct xfs_cil_ctx *ctx; 1600 1601 cil = kmem_zalloc(sizeof(*cil), KM_MAYFAIL); 1602 if (!cil) 1603 return -ENOMEM; 1604 /* 1605 * Limit the CIL pipeline depth to 4 concurrent works to bound the 1606 * concurrency the log spinlocks will be exposed to. 1607 */ 1608 cil->xc_push_wq = alloc_workqueue("xfs-cil/%s", 1609 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_UNBOUND), 1610 4, log->l_mp->m_super->s_id); 1611 if (!cil->xc_push_wq) 1612 goto out_destroy_cil; 1613 1614 INIT_LIST_HEAD(&cil->xc_cil); 1615 INIT_LIST_HEAD(&cil->xc_committing); 1616 spin_lock_init(&cil->xc_cil_lock); 1617 spin_lock_init(&cil->xc_push_lock); 1618 init_waitqueue_head(&cil->xc_push_wait); 1619 init_rwsem(&cil->xc_ctx_lock); 1620 init_waitqueue_head(&cil->xc_start_wait); 1621 init_waitqueue_head(&cil->xc_commit_wait); 1622 cil->xc_log = log; 1623 log->l_cilp = cil; 1624 1625 ctx = xlog_cil_ctx_alloc(); 1626 xlog_cil_ctx_switch(cil, ctx); 1627 1628 return 0; 1629 1630 out_destroy_cil: 1631 kmem_free(cil); 1632 return -ENOMEM; 1633 } 1634 1635 void 1636 xlog_cil_destroy( 1637 struct xlog *log) 1638 { 1639 struct xfs_cil *cil = log->l_cilp; 1640 1641 if (cil->xc_ctx) { 1642 if (cil->xc_ctx->ticket) 1643 xfs_log_ticket_put(cil->xc_ctx->ticket); 1644 kmem_free(cil->xc_ctx); 1645 } 1646 1647 ASSERT(list_empty(&cil->xc_cil)); 1648 ASSERT(test_bit(XLOG_CIL_EMPTY, &cil->xc_flags)); 1649 destroy_workqueue(cil->xc_push_wq); 1650 kmem_free(cil); 1651 } 1652 1653