1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2010 Red Hat, Inc. All Rights Reserved. 4 */ 5 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_format.h" 9 #include "xfs_log_format.h" 10 #include "xfs_shared.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_mount.h" 13 #include "xfs_extent_busy.h" 14 #include "xfs_trans.h" 15 #include "xfs_trans_priv.h" 16 #include "xfs_log.h" 17 #include "xfs_log_priv.h" 18 #include "xfs_trace.h" 19 20 struct workqueue_struct *xfs_discard_wq; 21 22 /* 23 * Allocate a new ticket. Failing to get a new ticket makes it really hard to 24 * recover, so we don't allow failure here. Also, we allocate in a context that 25 * we don't want to be issuing transactions from, so we need to tell the 26 * allocation code this as well. 27 * 28 * We don't reserve any space for the ticket - we are going to steal whatever 29 * space we require from transactions as they commit. To ensure we reserve all 30 * the space required, we need to set the current reservation of the ticket to 31 * zero so that we know to steal the initial transaction overhead from the 32 * first transaction commit. 33 */ 34 static struct xlog_ticket * 35 xlog_cil_ticket_alloc( 36 struct xlog *log) 37 { 38 struct xlog_ticket *tic; 39 40 tic = xlog_ticket_alloc(log, 0, 1, 0); 41 42 /* 43 * set the current reservation to zero so we know to steal the basic 44 * transaction overhead reservation from the first transaction commit. 45 */ 46 tic->t_curr_res = 0; 47 return tic; 48 } 49 50 /* 51 * Unavoidable forward declaration - xlog_cil_push_work() calls 52 * xlog_cil_ctx_alloc() itself. 53 */ 54 static void xlog_cil_push_work(struct work_struct *work); 55 56 static struct xfs_cil_ctx * 57 xlog_cil_ctx_alloc(void) 58 { 59 struct xfs_cil_ctx *ctx; 60 61 ctx = kmem_zalloc(sizeof(*ctx), KM_NOFS); 62 INIT_LIST_HEAD(&ctx->committing); 63 INIT_LIST_HEAD(&ctx->busy_extents); 64 INIT_WORK(&ctx->push_work, xlog_cil_push_work); 65 return ctx; 66 } 67 68 static void 69 xlog_cil_ctx_switch( 70 struct xfs_cil *cil, 71 struct xfs_cil_ctx *ctx) 72 { 73 ctx->sequence = ++cil->xc_current_sequence; 74 ctx->cil = cil; 75 cil->xc_ctx = ctx; 76 } 77 78 /* 79 * After the first stage of log recovery is done, we know where the head and 80 * tail of the log are. We need this log initialisation done before we can 81 * initialise the first CIL checkpoint context. 82 * 83 * Here we allocate a log ticket to track space usage during a CIL push. This 84 * ticket is passed to xlog_write() directly so that we don't slowly leak log 85 * space by failing to account for space used by log headers and additional 86 * region headers for split regions. 87 */ 88 void 89 xlog_cil_init_post_recovery( 90 struct xlog *log) 91 { 92 log->l_cilp->xc_ctx->ticket = xlog_cil_ticket_alloc(log); 93 log->l_cilp->xc_ctx->sequence = 1; 94 } 95 96 static inline int 97 xlog_cil_iovec_space( 98 uint niovecs) 99 { 100 return round_up((sizeof(struct xfs_log_vec) + 101 niovecs * sizeof(struct xfs_log_iovec)), 102 sizeof(uint64_t)); 103 } 104 105 /* 106 * shadow buffers can be large, so we need to use kvmalloc() here to ensure 107 * success. Unfortunately, kvmalloc() only allows GFP_KERNEL contexts to fall 108 * back to vmalloc, so we can't actually do anything useful with gfp flags to 109 * control the kmalloc() behaviour within kvmalloc(). Hence kmalloc() will do 110 * direct reclaim and compaction in the slow path, both of which are 111 * horrendously expensive. We just want kmalloc to fail fast and fall back to 112 * vmalloc if it can't get somethign straight away from the free lists or buddy 113 * allocator. Hence we have to open code kvmalloc outselves here. 114 * 115 * Also, we are in memalloc_nofs_save task context here, so despite the use of 116 * GFP_KERNEL here, we are actually going to be doing GFP_NOFS allocations. This 117 * is actually the only way to make vmalloc() do GFP_NOFS allocations, so lets 118 * just all pretend this is a GFP_KERNEL context operation.... 119 */ 120 static inline void * 121 xlog_cil_kvmalloc( 122 size_t buf_size) 123 { 124 gfp_t flags = GFP_KERNEL; 125 void *p; 126 127 flags &= ~__GFP_DIRECT_RECLAIM; 128 flags |= __GFP_NOWARN | __GFP_NORETRY; 129 do { 130 p = kmalloc(buf_size, flags); 131 if (!p) 132 p = vmalloc(buf_size); 133 } while (!p); 134 135 return p; 136 } 137 138 /* 139 * Allocate or pin log vector buffers for CIL insertion. 140 * 141 * The CIL currently uses disposable buffers for copying a snapshot of the 142 * modified items into the log during a push. The biggest problem with this is 143 * the requirement to allocate the disposable buffer during the commit if: 144 * a) does not exist; or 145 * b) it is too small 146 * 147 * If we do this allocation within xlog_cil_insert_format_items(), it is done 148 * under the xc_ctx_lock, which means that a CIL push cannot occur during 149 * the memory allocation. This means that we have a potential deadlock situation 150 * under low memory conditions when we have lots of dirty metadata pinned in 151 * the CIL and we need a CIL commit to occur to free memory. 152 * 153 * To avoid this, we need to move the memory allocation outside the 154 * xc_ctx_lock, but because the log vector buffers are disposable, that opens 155 * up a TOCTOU race condition w.r.t. the CIL committing and removing the log 156 * vector buffers between the check and the formatting of the item into the 157 * log vector buffer within the xc_ctx_lock. 158 * 159 * Because the log vector buffer needs to be unchanged during the CIL push 160 * process, we cannot share the buffer between the transaction commit (which 161 * modifies the buffer) and the CIL push context that is writing the changes 162 * into the log. This means skipping preallocation of buffer space is 163 * unreliable, but we most definitely do not want to be allocating and freeing 164 * buffers unnecessarily during commits when overwrites can be done safely. 165 * 166 * The simplest solution to this problem is to allocate a shadow buffer when a 167 * log item is committed for the second time, and then to only use this buffer 168 * if necessary. The buffer can remain attached to the log item until such time 169 * it is needed, and this is the buffer that is reallocated to match the size of 170 * the incoming modification. Then during the formatting of the item we can swap 171 * the active buffer with the new one if we can't reuse the existing buffer. We 172 * don't free the old buffer as it may be reused on the next modification if 173 * it's size is right, otherwise we'll free and reallocate it at that point. 174 * 175 * This function builds a vector for the changes in each log item in the 176 * transaction. It then works out the length of the buffer needed for each log 177 * item, allocates them and attaches the vector to the log item in preparation 178 * for the formatting step which occurs under the xc_ctx_lock. 179 * 180 * While this means the memory footprint goes up, it avoids the repeated 181 * alloc/free pattern that repeated modifications of an item would otherwise 182 * cause, and hence minimises the CPU overhead of such behaviour. 183 */ 184 static void 185 xlog_cil_alloc_shadow_bufs( 186 struct xlog *log, 187 struct xfs_trans *tp) 188 { 189 struct xfs_log_item *lip; 190 191 list_for_each_entry(lip, &tp->t_items, li_trans) { 192 struct xfs_log_vec *lv; 193 int niovecs = 0; 194 int nbytes = 0; 195 int buf_size; 196 bool ordered = false; 197 198 /* Skip items which aren't dirty in this transaction. */ 199 if (!test_bit(XFS_LI_DIRTY, &lip->li_flags)) 200 continue; 201 202 /* get number of vecs and size of data to be stored */ 203 lip->li_ops->iop_size(lip, &niovecs, &nbytes); 204 205 /* 206 * Ordered items need to be tracked but we do not wish to write 207 * them. We need a logvec to track the object, but we do not 208 * need an iovec or buffer to be allocated for copying data. 209 */ 210 if (niovecs == XFS_LOG_VEC_ORDERED) { 211 ordered = true; 212 niovecs = 0; 213 nbytes = 0; 214 } 215 216 /* 217 * We 64-bit align the length of each iovec so that the start of 218 * the next one is naturally aligned. We'll need to account for 219 * that slack space here. 220 * 221 * We also add the xlog_op_header to each region when 222 * formatting, but that's not accounted to the size of the item 223 * at this point. Hence we'll need an addition number of bytes 224 * for each vector to hold an opheader. 225 * 226 * Then round nbytes up to 64-bit alignment so that the initial 227 * buffer alignment is easy to calculate and verify. 228 */ 229 nbytes += niovecs * 230 (sizeof(uint64_t) + sizeof(struct xlog_op_header)); 231 nbytes = round_up(nbytes, sizeof(uint64_t)); 232 233 /* 234 * The data buffer needs to start 64-bit aligned, so round up 235 * that space to ensure we can align it appropriately and not 236 * overrun the buffer. 237 */ 238 buf_size = nbytes + xlog_cil_iovec_space(niovecs); 239 240 /* 241 * if we have no shadow buffer, or it is too small, we need to 242 * reallocate it. 243 */ 244 if (!lip->li_lv_shadow || 245 buf_size > lip->li_lv_shadow->lv_size) { 246 /* 247 * We free and allocate here as a realloc would copy 248 * unnecessary data. We don't use kvzalloc() for the 249 * same reason - we don't need to zero the data area in 250 * the buffer, only the log vector header and the iovec 251 * storage. 252 */ 253 kmem_free(lip->li_lv_shadow); 254 lv = xlog_cil_kvmalloc(buf_size); 255 256 memset(lv, 0, xlog_cil_iovec_space(niovecs)); 257 258 lv->lv_item = lip; 259 lv->lv_size = buf_size; 260 if (ordered) 261 lv->lv_buf_len = XFS_LOG_VEC_ORDERED; 262 else 263 lv->lv_iovecp = (struct xfs_log_iovec *)&lv[1]; 264 lip->li_lv_shadow = lv; 265 } else { 266 /* same or smaller, optimise common overwrite case */ 267 lv = lip->li_lv_shadow; 268 if (ordered) 269 lv->lv_buf_len = XFS_LOG_VEC_ORDERED; 270 else 271 lv->lv_buf_len = 0; 272 lv->lv_bytes = 0; 273 lv->lv_next = NULL; 274 } 275 276 /* Ensure the lv is set up according to ->iop_size */ 277 lv->lv_niovecs = niovecs; 278 279 /* The allocated data region lies beyond the iovec region */ 280 lv->lv_buf = (char *)lv + xlog_cil_iovec_space(niovecs); 281 } 282 283 } 284 285 /* 286 * Prepare the log item for insertion into the CIL. Calculate the difference in 287 * log space and vectors it will consume, and if it is a new item pin it as 288 * well. 289 */ 290 STATIC void 291 xfs_cil_prepare_item( 292 struct xlog *log, 293 struct xfs_log_vec *lv, 294 struct xfs_log_vec *old_lv, 295 int *diff_len, 296 int *diff_iovecs) 297 { 298 /* Account for the new LV being passed in */ 299 if (lv->lv_buf_len != XFS_LOG_VEC_ORDERED) { 300 *diff_len += lv->lv_bytes; 301 *diff_iovecs += lv->lv_niovecs; 302 } 303 304 /* 305 * If there is no old LV, this is the first time we've seen the item in 306 * this CIL context and so we need to pin it. If we are replacing the 307 * old_lv, then remove the space it accounts for and make it the shadow 308 * buffer for later freeing. In both cases we are now switching to the 309 * shadow buffer, so update the pointer to it appropriately. 310 */ 311 if (!old_lv) { 312 if (lv->lv_item->li_ops->iop_pin) 313 lv->lv_item->li_ops->iop_pin(lv->lv_item); 314 lv->lv_item->li_lv_shadow = NULL; 315 } else if (old_lv != lv) { 316 ASSERT(lv->lv_buf_len != XFS_LOG_VEC_ORDERED); 317 318 *diff_len -= old_lv->lv_bytes; 319 *diff_iovecs -= old_lv->lv_niovecs; 320 lv->lv_item->li_lv_shadow = old_lv; 321 } 322 323 /* attach new log vector to log item */ 324 lv->lv_item->li_lv = lv; 325 326 /* 327 * If this is the first time the item is being committed to the 328 * CIL, store the sequence number on the log item so we can 329 * tell in future commits whether this is the first checkpoint 330 * the item is being committed into. 331 */ 332 if (!lv->lv_item->li_seq) 333 lv->lv_item->li_seq = log->l_cilp->xc_ctx->sequence; 334 } 335 336 /* 337 * Format log item into a flat buffers 338 * 339 * For delayed logging, we need to hold a formatted buffer containing all the 340 * changes on the log item. This enables us to relog the item in memory and 341 * write it out asynchronously without needing to relock the object that was 342 * modified at the time it gets written into the iclog. 343 * 344 * This function takes the prepared log vectors attached to each log item, and 345 * formats the changes into the log vector buffer. The buffer it uses is 346 * dependent on the current state of the vector in the CIL - the shadow lv is 347 * guaranteed to be large enough for the current modification, but we will only 348 * use that if we can't reuse the existing lv. If we can't reuse the existing 349 * lv, then simple swap it out for the shadow lv. We don't free it - that is 350 * done lazily either by th enext modification or the freeing of the log item. 351 * 352 * We don't set up region headers during this process; we simply copy the 353 * regions into the flat buffer. We can do this because we still have to do a 354 * formatting step to write the regions into the iclog buffer. Writing the 355 * ophdrs during the iclog write means that we can support splitting large 356 * regions across iclog boundares without needing a change in the format of the 357 * item/region encapsulation. 358 * 359 * Hence what we need to do now is change the rewrite the vector array to point 360 * to the copied region inside the buffer we just allocated. This allows us to 361 * format the regions into the iclog as though they are being formatted 362 * directly out of the objects themselves. 363 */ 364 static void 365 xlog_cil_insert_format_items( 366 struct xlog *log, 367 struct xfs_trans *tp, 368 int *diff_len, 369 int *diff_iovecs) 370 { 371 struct xfs_log_item *lip; 372 373 374 /* Bail out if we didn't find a log item. */ 375 if (list_empty(&tp->t_items)) { 376 ASSERT(0); 377 return; 378 } 379 380 list_for_each_entry(lip, &tp->t_items, li_trans) { 381 struct xfs_log_vec *lv; 382 struct xfs_log_vec *old_lv = NULL; 383 struct xfs_log_vec *shadow; 384 bool ordered = false; 385 386 /* Skip items which aren't dirty in this transaction. */ 387 if (!test_bit(XFS_LI_DIRTY, &lip->li_flags)) 388 continue; 389 390 /* 391 * The formatting size information is already attached to 392 * the shadow lv on the log item. 393 */ 394 shadow = lip->li_lv_shadow; 395 if (shadow->lv_buf_len == XFS_LOG_VEC_ORDERED) 396 ordered = true; 397 398 /* Skip items that do not have any vectors for writing */ 399 if (!shadow->lv_niovecs && !ordered) 400 continue; 401 402 /* compare to existing item size */ 403 old_lv = lip->li_lv; 404 if (lip->li_lv && shadow->lv_size <= lip->li_lv->lv_size) { 405 /* same or smaller, optimise common overwrite case */ 406 lv = lip->li_lv; 407 lv->lv_next = NULL; 408 409 if (ordered) 410 goto insert; 411 412 /* 413 * set the item up as though it is a new insertion so 414 * that the space reservation accounting is correct. 415 */ 416 *diff_iovecs -= lv->lv_niovecs; 417 *diff_len -= lv->lv_bytes; 418 419 /* Ensure the lv is set up according to ->iop_size */ 420 lv->lv_niovecs = shadow->lv_niovecs; 421 422 /* reset the lv buffer information for new formatting */ 423 lv->lv_buf_len = 0; 424 lv->lv_bytes = 0; 425 lv->lv_buf = (char *)lv + 426 xlog_cil_iovec_space(lv->lv_niovecs); 427 } else { 428 /* switch to shadow buffer! */ 429 lv = shadow; 430 lv->lv_item = lip; 431 if (ordered) { 432 /* track as an ordered logvec */ 433 ASSERT(lip->li_lv == NULL); 434 goto insert; 435 } 436 } 437 438 ASSERT(IS_ALIGNED((unsigned long)lv->lv_buf, sizeof(uint64_t))); 439 lip->li_ops->iop_format(lip, lv); 440 insert: 441 xfs_cil_prepare_item(log, lv, old_lv, diff_len, diff_iovecs); 442 } 443 } 444 445 /* 446 * Insert the log items into the CIL and calculate the difference in space 447 * consumed by the item. Add the space to the checkpoint ticket and calculate 448 * if the change requires additional log metadata. If it does, take that space 449 * as well. Remove the amount of space we added to the checkpoint ticket from 450 * the current transaction ticket so that the accounting works out correctly. 451 */ 452 static void 453 xlog_cil_insert_items( 454 struct xlog *log, 455 struct xfs_trans *tp) 456 { 457 struct xfs_cil *cil = log->l_cilp; 458 struct xfs_cil_ctx *ctx = cil->xc_ctx; 459 struct xfs_log_item *lip; 460 int len = 0; 461 int diff_iovecs = 0; 462 int iclog_space; 463 int iovhdr_res = 0, split_res = 0, ctx_res = 0; 464 465 ASSERT(tp); 466 467 /* 468 * We can do this safely because the context can't checkpoint until we 469 * are done so it doesn't matter exactly how we update the CIL. 470 */ 471 xlog_cil_insert_format_items(log, tp, &len, &diff_iovecs); 472 473 spin_lock(&cil->xc_cil_lock); 474 475 /* attach the transaction to the CIL if it has any busy extents */ 476 if (!list_empty(&tp->t_busy)) 477 list_splice_init(&tp->t_busy, &ctx->busy_extents); 478 479 /* 480 * Now transfer enough transaction reservation to the context ticket 481 * for the checkpoint. The context ticket is special - the unit 482 * reservation has to grow as well as the current reservation as we 483 * steal from tickets so we can correctly determine the space used 484 * during the transaction commit. 485 */ 486 if (ctx->ticket->t_curr_res == 0) { 487 ctx_res = ctx->ticket->t_unit_res; 488 ctx->ticket->t_curr_res = ctx_res; 489 tp->t_ticket->t_curr_res -= ctx_res; 490 } 491 492 /* do we need space for more log record headers? */ 493 iclog_space = log->l_iclog_size - log->l_iclog_hsize; 494 if (len > 0 && (ctx->space_used / iclog_space != 495 (ctx->space_used + len) / iclog_space)) { 496 split_res = (len + iclog_space - 1) / iclog_space; 497 /* need to take into account split region headers, too */ 498 split_res *= log->l_iclog_hsize + sizeof(struct xlog_op_header); 499 ctx->ticket->t_unit_res += split_res; 500 ctx->ticket->t_curr_res += split_res; 501 tp->t_ticket->t_curr_res -= split_res; 502 ASSERT(tp->t_ticket->t_curr_res >= len); 503 } 504 tp->t_ticket->t_curr_res -= len; 505 ctx->space_used += len; 506 ctx->nvecs += diff_iovecs; 507 508 /* 509 * If we've overrun the reservation, dump the tx details before we move 510 * the log items. Shutdown is imminent... 511 */ 512 if (WARN_ON(tp->t_ticket->t_curr_res < 0)) { 513 xfs_warn(log->l_mp, "Transaction log reservation overrun:"); 514 xfs_warn(log->l_mp, 515 " log items: %d bytes (iov hdrs: %d bytes)", 516 len, iovhdr_res); 517 xfs_warn(log->l_mp, " split region headers: %d bytes", 518 split_res); 519 xfs_warn(log->l_mp, " ctx ticket: %d bytes", ctx_res); 520 xlog_print_trans(tp); 521 } 522 523 /* 524 * Now (re-)position everything modified at the tail of the CIL. 525 * We do this here so we only need to take the CIL lock once during 526 * the transaction commit. 527 */ 528 list_for_each_entry(lip, &tp->t_items, li_trans) { 529 530 /* Skip items which aren't dirty in this transaction. */ 531 if (!test_bit(XFS_LI_DIRTY, &lip->li_flags)) 532 continue; 533 534 /* 535 * Only move the item if it isn't already at the tail. This is 536 * to prevent a transient list_empty() state when reinserting 537 * an item that is already the only item in the CIL. 538 */ 539 if (!list_is_last(&lip->li_cil, &cil->xc_cil)) 540 list_move_tail(&lip->li_cil, &cil->xc_cil); 541 } 542 543 spin_unlock(&cil->xc_cil_lock); 544 545 if (tp->t_ticket->t_curr_res < 0) 546 xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR); 547 } 548 549 static void 550 xlog_cil_free_logvec( 551 struct xfs_log_vec *log_vector) 552 { 553 struct xfs_log_vec *lv; 554 555 for (lv = log_vector; lv; ) { 556 struct xfs_log_vec *next = lv->lv_next; 557 kmem_free(lv); 558 lv = next; 559 } 560 } 561 562 static void 563 xlog_discard_endio_work( 564 struct work_struct *work) 565 { 566 struct xfs_cil_ctx *ctx = 567 container_of(work, struct xfs_cil_ctx, discard_endio_work); 568 struct xfs_mount *mp = ctx->cil->xc_log->l_mp; 569 570 xfs_extent_busy_clear(mp, &ctx->busy_extents, false); 571 kmem_free(ctx); 572 } 573 574 /* 575 * Queue up the actual completion to a thread to avoid IRQ-safe locking for 576 * pagb_lock. Note that we need a unbounded workqueue, otherwise we might 577 * get the execution delayed up to 30 seconds for weird reasons. 578 */ 579 static void 580 xlog_discard_endio( 581 struct bio *bio) 582 { 583 struct xfs_cil_ctx *ctx = bio->bi_private; 584 585 INIT_WORK(&ctx->discard_endio_work, xlog_discard_endio_work); 586 queue_work(xfs_discard_wq, &ctx->discard_endio_work); 587 bio_put(bio); 588 } 589 590 static void 591 xlog_discard_busy_extents( 592 struct xfs_mount *mp, 593 struct xfs_cil_ctx *ctx) 594 { 595 struct list_head *list = &ctx->busy_extents; 596 struct xfs_extent_busy *busyp; 597 struct bio *bio = NULL; 598 struct blk_plug plug; 599 int error = 0; 600 601 ASSERT(xfs_has_discard(mp)); 602 603 blk_start_plug(&plug); 604 list_for_each_entry(busyp, list, list) { 605 trace_xfs_discard_extent(mp, busyp->agno, busyp->bno, 606 busyp->length); 607 608 error = __blkdev_issue_discard(mp->m_ddev_targp->bt_bdev, 609 XFS_AGB_TO_DADDR(mp, busyp->agno, busyp->bno), 610 XFS_FSB_TO_BB(mp, busyp->length), 611 GFP_NOFS, 0, &bio); 612 if (error && error != -EOPNOTSUPP) { 613 xfs_info(mp, 614 "discard failed for extent [0x%llx,%u], error %d", 615 (unsigned long long)busyp->bno, 616 busyp->length, 617 error); 618 break; 619 } 620 } 621 622 if (bio) { 623 bio->bi_private = ctx; 624 bio->bi_end_io = xlog_discard_endio; 625 submit_bio(bio); 626 } else { 627 xlog_discard_endio_work(&ctx->discard_endio_work); 628 } 629 blk_finish_plug(&plug); 630 } 631 632 /* 633 * Mark all items committed and clear busy extents. We free the log vector 634 * chains in a separate pass so that we unpin the log items as quickly as 635 * possible. 636 */ 637 static void 638 xlog_cil_committed( 639 struct xfs_cil_ctx *ctx) 640 { 641 struct xfs_mount *mp = ctx->cil->xc_log->l_mp; 642 bool abort = xlog_is_shutdown(ctx->cil->xc_log); 643 644 /* 645 * If the I/O failed, we're aborting the commit and already shutdown. 646 * Wake any commit waiters before aborting the log items so we don't 647 * block async log pushers on callbacks. Async log pushers explicitly do 648 * not wait on log force completion because they may be holding locks 649 * required to unpin items. 650 */ 651 if (abort) { 652 spin_lock(&ctx->cil->xc_push_lock); 653 wake_up_all(&ctx->cil->xc_start_wait); 654 wake_up_all(&ctx->cil->xc_commit_wait); 655 spin_unlock(&ctx->cil->xc_push_lock); 656 } 657 658 xfs_trans_committed_bulk(ctx->cil->xc_log->l_ailp, ctx->lv_chain, 659 ctx->start_lsn, abort); 660 661 xfs_extent_busy_sort(&ctx->busy_extents); 662 xfs_extent_busy_clear(mp, &ctx->busy_extents, 663 xfs_has_discard(mp) && !abort); 664 665 spin_lock(&ctx->cil->xc_push_lock); 666 list_del(&ctx->committing); 667 spin_unlock(&ctx->cil->xc_push_lock); 668 669 xlog_cil_free_logvec(ctx->lv_chain); 670 671 if (!list_empty(&ctx->busy_extents)) 672 xlog_discard_busy_extents(mp, ctx); 673 else 674 kmem_free(ctx); 675 } 676 677 void 678 xlog_cil_process_committed( 679 struct list_head *list) 680 { 681 struct xfs_cil_ctx *ctx; 682 683 while ((ctx = list_first_entry_or_null(list, 684 struct xfs_cil_ctx, iclog_entry))) { 685 list_del(&ctx->iclog_entry); 686 xlog_cil_committed(ctx); 687 } 688 } 689 690 /* 691 * Record the LSN of the iclog we were just granted space to start writing into. 692 * If the context doesn't have a start_lsn recorded, then this iclog will 693 * contain the start record for the checkpoint. Otherwise this write contains 694 * the commit record for the checkpoint. 695 */ 696 void 697 xlog_cil_set_ctx_write_state( 698 struct xfs_cil_ctx *ctx, 699 struct xlog_in_core *iclog) 700 { 701 struct xfs_cil *cil = ctx->cil; 702 xfs_lsn_t lsn = be64_to_cpu(iclog->ic_header.h_lsn); 703 704 ASSERT(!ctx->commit_lsn); 705 if (!ctx->start_lsn) { 706 spin_lock(&cil->xc_push_lock); 707 /* 708 * The LSN we need to pass to the log items on transaction 709 * commit is the LSN reported by the first log vector write, not 710 * the commit lsn. If we use the commit record lsn then we can 711 * move the grant write head beyond the tail LSN and overwrite 712 * it. 713 */ 714 ctx->start_lsn = lsn; 715 wake_up_all(&cil->xc_start_wait); 716 spin_unlock(&cil->xc_push_lock); 717 718 /* 719 * Make sure the metadata we are about to overwrite in the log 720 * has been flushed to stable storage before this iclog is 721 * issued. 722 */ 723 spin_lock(&cil->xc_log->l_icloglock); 724 iclog->ic_flags |= XLOG_ICL_NEED_FLUSH; 725 spin_unlock(&cil->xc_log->l_icloglock); 726 return; 727 } 728 729 /* 730 * Take a reference to the iclog for the context so that we still hold 731 * it when xlog_write is done and has released it. This means the 732 * context controls when the iclog is released for IO. 733 */ 734 atomic_inc(&iclog->ic_refcnt); 735 736 /* 737 * xlog_state_get_iclog_space() guarantees there is enough space in the 738 * iclog for an entire commit record, so we can attach the context 739 * callbacks now. This needs to be done before we make the commit_lsn 740 * visible to waiters so that checkpoints with commit records in the 741 * same iclog order their IO completion callbacks in the same order that 742 * the commit records appear in the iclog. 743 */ 744 spin_lock(&cil->xc_log->l_icloglock); 745 list_add_tail(&ctx->iclog_entry, &iclog->ic_callbacks); 746 spin_unlock(&cil->xc_log->l_icloglock); 747 748 /* 749 * Now we can record the commit LSN and wake anyone waiting for this 750 * sequence to have the ordered commit record assigned to a physical 751 * location in the log. 752 */ 753 spin_lock(&cil->xc_push_lock); 754 ctx->commit_iclog = iclog; 755 ctx->commit_lsn = lsn; 756 wake_up_all(&cil->xc_commit_wait); 757 spin_unlock(&cil->xc_push_lock); 758 } 759 760 761 /* 762 * Ensure that the order of log writes follows checkpoint sequence order. This 763 * relies on the context LSN being zero until the log write has guaranteed the 764 * LSN that the log write will start at via xlog_state_get_iclog_space(). 765 */ 766 enum _record_type { 767 _START_RECORD, 768 _COMMIT_RECORD, 769 }; 770 771 static int 772 xlog_cil_order_write( 773 struct xfs_cil *cil, 774 xfs_csn_t sequence, 775 enum _record_type record) 776 { 777 struct xfs_cil_ctx *ctx; 778 779 restart: 780 spin_lock(&cil->xc_push_lock); 781 list_for_each_entry(ctx, &cil->xc_committing, committing) { 782 /* 783 * Avoid getting stuck in this loop because we were woken by the 784 * shutdown, but then went back to sleep once already in the 785 * shutdown state. 786 */ 787 if (xlog_is_shutdown(cil->xc_log)) { 788 spin_unlock(&cil->xc_push_lock); 789 return -EIO; 790 } 791 792 /* 793 * Higher sequences will wait for this one so skip them. 794 * Don't wait for our own sequence, either. 795 */ 796 if (ctx->sequence >= sequence) 797 continue; 798 799 /* Wait until the LSN for the record has been recorded. */ 800 switch (record) { 801 case _START_RECORD: 802 if (!ctx->start_lsn) { 803 xlog_wait(&cil->xc_start_wait, &cil->xc_push_lock); 804 goto restart; 805 } 806 break; 807 case _COMMIT_RECORD: 808 if (!ctx->commit_lsn) { 809 xlog_wait(&cil->xc_commit_wait, &cil->xc_push_lock); 810 goto restart; 811 } 812 break; 813 } 814 } 815 spin_unlock(&cil->xc_push_lock); 816 return 0; 817 } 818 819 /* 820 * Write out the log vector change now attached to the CIL context. This will 821 * write a start record that needs to be strictly ordered in ascending CIL 822 * sequence order so that log recovery will always use in-order start LSNs when 823 * replaying checkpoints. 824 */ 825 static int 826 xlog_cil_write_chain( 827 struct xfs_cil_ctx *ctx, 828 struct xfs_log_vec *chain, 829 uint32_t chain_len) 830 { 831 struct xlog *log = ctx->cil->xc_log; 832 int error; 833 834 error = xlog_cil_order_write(ctx->cil, ctx->sequence, _START_RECORD); 835 if (error) 836 return error; 837 return xlog_write(log, ctx, chain, ctx->ticket, chain_len); 838 } 839 840 /* 841 * Write out the commit record of a checkpoint transaction to close off a 842 * running log write. These commit records are strictly ordered in ascending CIL 843 * sequence order so that log recovery will always replay the checkpoints in the 844 * correct order. 845 */ 846 static int 847 xlog_cil_write_commit_record( 848 struct xfs_cil_ctx *ctx) 849 { 850 struct xlog *log = ctx->cil->xc_log; 851 struct xlog_op_header ophdr = { 852 .oh_clientid = XFS_TRANSACTION, 853 .oh_tid = cpu_to_be32(ctx->ticket->t_tid), 854 .oh_flags = XLOG_COMMIT_TRANS, 855 }; 856 struct xfs_log_iovec reg = { 857 .i_addr = &ophdr, 858 .i_len = sizeof(struct xlog_op_header), 859 .i_type = XLOG_REG_TYPE_COMMIT, 860 }; 861 struct xfs_log_vec vec = { 862 .lv_niovecs = 1, 863 .lv_iovecp = ®, 864 }; 865 int error; 866 867 if (xlog_is_shutdown(log)) 868 return -EIO; 869 870 error = xlog_cil_order_write(ctx->cil, ctx->sequence, _COMMIT_RECORD); 871 if (error) 872 return error; 873 874 /* account for space used by record data */ 875 ctx->ticket->t_curr_res -= reg.i_len; 876 error = xlog_write(log, ctx, &vec, ctx->ticket, reg.i_len); 877 if (error) 878 xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR); 879 return error; 880 } 881 882 struct xlog_cil_trans_hdr { 883 struct xlog_op_header oph[2]; 884 struct xfs_trans_header thdr; 885 struct xfs_log_iovec lhdr[2]; 886 }; 887 888 /* 889 * Build a checkpoint transaction header to begin the journal transaction. We 890 * need to account for the space used by the transaction header here as it is 891 * not accounted for in xlog_write(). 892 * 893 * This is the only place we write a transaction header, so we also build the 894 * log opheaders that indicate the start of a log transaction and wrap the 895 * transaction header. We keep the start record in it's own log vector rather 896 * than compacting them into a single region as this ends up making the logic 897 * in xlog_write() for handling empty opheaders for start, commit and unmount 898 * records much simpler. 899 */ 900 static void 901 xlog_cil_build_trans_hdr( 902 struct xfs_cil_ctx *ctx, 903 struct xlog_cil_trans_hdr *hdr, 904 struct xfs_log_vec *lvhdr, 905 int num_iovecs) 906 { 907 struct xlog_ticket *tic = ctx->ticket; 908 __be32 tid = cpu_to_be32(tic->t_tid); 909 910 memset(hdr, 0, sizeof(*hdr)); 911 912 /* Log start record */ 913 hdr->oph[0].oh_tid = tid; 914 hdr->oph[0].oh_clientid = XFS_TRANSACTION; 915 hdr->oph[0].oh_flags = XLOG_START_TRANS; 916 917 /* log iovec region pointer */ 918 hdr->lhdr[0].i_addr = &hdr->oph[0]; 919 hdr->lhdr[0].i_len = sizeof(struct xlog_op_header); 920 hdr->lhdr[0].i_type = XLOG_REG_TYPE_LRHEADER; 921 922 /* log opheader */ 923 hdr->oph[1].oh_tid = tid; 924 hdr->oph[1].oh_clientid = XFS_TRANSACTION; 925 hdr->oph[1].oh_len = cpu_to_be32(sizeof(struct xfs_trans_header)); 926 927 /* transaction header in host byte order format */ 928 hdr->thdr.th_magic = XFS_TRANS_HEADER_MAGIC; 929 hdr->thdr.th_type = XFS_TRANS_CHECKPOINT; 930 hdr->thdr.th_tid = tic->t_tid; 931 hdr->thdr.th_num_items = num_iovecs; 932 933 /* log iovec region pointer */ 934 hdr->lhdr[1].i_addr = &hdr->oph[1]; 935 hdr->lhdr[1].i_len = sizeof(struct xlog_op_header) + 936 sizeof(struct xfs_trans_header); 937 hdr->lhdr[1].i_type = XLOG_REG_TYPE_TRANSHDR; 938 939 lvhdr->lv_niovecs = 2; 940 lvhdr->lv_iovecp = &hdr->lhdr[0]; 941 lvhdr->lv_bytes = hdr->lhdr[0].i_len + hdr->lhdr[1].i_len; 942 lvhdr->lv_next = ctx->lv_chain; 943 944 tic->t_curr_res -= lvhdr->lv_bytes; 945 } 946 947 /* 948 * Push the Committed Item List to the log. 949 * 950 * If the current sequence is the same as xc_push_seq we need to do a flush. If 951 * xc_push_seq is less than the current sequence, then it has already been 952 * flushed and we don't need to do anything - the caller will wait for it to 953 * complete if necessary. 954 * 955 * xc_push_seq is checked unlocked against the sequence number for a match. 956 * Hence we can allow log forces to run racily and not issue pushes for the 957 * same sequence twice. If we get a race between multiple pushes for the same 958 * sequence they will block on the first one and then abort, hence avoiding 959 * needless pushes. 960 */ 961 static void 962 xlog_cil_push_work( 963 struct work_struct *work) 964 { 965 struct xfs_cil_ctx *ctx = 966 container_of(work, struct xfs_cil_ctx, push_work); 967 struct xfs_cil *cil = ctx->cil; 968 struct xlog *log = cil->xc_log; 969 struct xfs_log_vec *lv; 970 struct xfs_cil_ctx *new_ctx; 971 int num_iovecs = 0; 972 int num_bytes = 0; 973 int error = 0; 974 struct xlog_cil_trans_hdr thdr; 975 struct xfs_log_vec lvhdr = { NULL }; 976 xfs_csn_t push_seq; 977 bool push_commit_stable; 978 979 new_ctx = xlog_cil_ctx_alloc(); 980 new_ctx->ticket = xlog_cil_ticket_alloc(log); 981 982 down_write(&cil->xc_ctx_lock); 983 984 spin_lock(&cil->xc_push_lock); 985 push_seq = cil->xc_push_seq; 986 ASSERT(push_seq <= ctx->sequence); 987 push_commit_stable = cil->xc_push_commit_stable; 988 cil->xc_push_commit_stable = false; 989 990 /* 991 * As we are about to switch to a new, empty CIL context, we no longer 992 * need to throttle tasks on CIL space overruns. Wake any waiters that 993 * the hard push throttle may have caught so they can start committing 994 * to the new context. The ctx->xc_push_lock provides the serialisation 995 * necessary for safely using the lockless waitqueue_active() check in 996 * this context. 997 */ 998 if (waitqueue_active(&cil->xc_push_wait)) 999 wake_up_all(&cil->xc_push_wait); 1000 1001 /* 1002 * Check if we've anything to push. If there is nothing, then we don't 1003 * move on to a new sequence number and so we have to be able to push 1004 * this sequence again later. 1005 */ 1006 if (list_empty(&cil->xc_cil)) { 1007 cil->xc_push_seq = 0; 1008 spin_unlock(&cil->xc_push_lock); 1009 goto out_skip; 1010 } 1011 1012 1013 /* check for a previously pushed sequence */ 1014 if (push_seq < ctx->sequence) { 1015 spin_unlock(&cil->xc_push_lock); 1016 goto out_skip; 1017 } 1018 1019 /* 1020 * We are now going to push this context, so add it to the committing 1021 * list before we do anything else. This ensures that anyone waiting on 1022 * this push can easily detect the difference between a "push in 1023 * progress" and "CIL is empty, nothing to do". 1024 * 1025 * IOWs, a wait loop can now check for: 1026 * the current sequence not being found on the committing list; 1027 * an empty CIL; and 1028 * an unchanged sequence number 1029 * to detect a push that had nothing to do and therefore does not need 1030 * waiting on. If the CIL is not empty, we get put on the committing 1031 * list before emptying the CIL and bumping the sequence number. Hence 1032 * an empty CIL and an unchanged sequence number means we jumped out 1033 * above after doing nothing. 1034 * 1035 * Hence the waiter will either find the commit sequence on the 1036 * committing list or the sequence number will be unchanged and the CIL 1037 * still dirty. In that latter case, the push has not yet started, and 1038 * so the waiter will have to continue trying to check the CIL 1039 * committing list until it is found. In extreme cases of delay, the 1040 * sequence may fully commit between the attempts the wait makes to wait 1041 * on the commit sequence. 1042 */ 1043 list_add(&ctx->committing, &cil->xc_committing); 1044 spin_unlock(&cil->xc_push_lock); 1045 1046 /* 1047 * Pull all the log vectors off the items in the CIL, and remove the 1048 * items from the CIL. We don't need the CIL lock here because it's only 1049 * needed on the transaction commit side which is currently locked out 1050 * by the flush lock. 1051 */ 1052 lv = NULL; 1053 while (!list_empty(&cil->xc_cil)) { 1054 struct xfs_log_item *item; 1055 1056 item = list_first_entry(&cil->xc_cil, 1057 struct xfs_log_item, li_cil); 1058 list_del_init(&item->li_cil); 1059 if (!ctx->lv_chain) 1060 ctx->lv_chain = item->li_lv; 1061 else 1062 lv->lv_next = item->li_lv; 1063 lv = item->li_lv; 1064 item->li_lv = NULL; 1065 num_iovecs += lv->lv_niovecs; 1066 1067 /* we don't write ordered log vectors */ 1068 if (lv->lv_buf_len != XFS_LOG_VEC_ORDERED) 1069 num_bytes += lv->lv_bytes; 1070 } 1071 1072 /* 1073 * Switch the contexts so we can drop the context lock and move out 1074 * of a shared context. We can't just go straight to the commit record, 1075 * though - we need to synchronise with previous and future commits so 1076 * that the commit records are correctly ordered in the log to ensure 1077 * that we process items during log IO completion in the correct order. 1078 * 1079 * For example, if we get an EFI in one checkpoint and the EFD in the 1080 * next (e.g. due to log forces), we do not want the checkpoint with 1081 * the EFD to be committed before the checkpoint with the EFI. Hence 1082 * we must strictly order the commit records of the checkpoints so 1083 * that: a) the checkpoint callbacks are attached to the iclogs in the 1084 * correct order; and b) the checkpoints are replayed in correct order 1085 * in log recovery. 1086 * 1087 * Hence we need to add this context to the committing context list so 1088 * that higher sequences will wait for us to write out a commit record 1089 * before they do. 1090 * 1091 * xfs_log_force_seq requires us to mirror the new sequence into the cil 1092 * structure atomically with the addition of this sequence to the 1093 * committing list. This also ensures that we can do unlocked checks 1094 * against the current sequence in log forces without risking 1095 * deferencing a freed context pointer. 1096 */ 1097 spin_lock(&cil->xc_push_lock); 1098 xlog_cil_ctx_switch(cil, new_ctx); 1099 spin_unlock(&cil->xc_push_lock); 1100 up_write(&cil->xc_ctx_lock); 1101 1102 /* 1103 * Build a checkpoint transaction header and write it to the log to 1104 * begin the transaction. We need to account for the space used by the 1105 * transaction header here as it is not accounted for in xlog_write(). 1106 */ 1107 xlog_cil_build_trans_hdr(ctx, &thdr, &lvhdr, num_iovecs); 1108 num_bytes += lvhdr.lv_bytes; 1109 1110 error = xlog_cil_write_chain(ctx, &lvhdr, num_bytes); 1111 if (error) 1112 goto out_abort_free_ticket; 1113 1114 error = xlog_cil_write_commit_record(ctx); 1115 if (error) 1116 goto out_abort_free_ticket; 1117 1118 xfs_log_ticket_ungrant(log, ctx->ticket); 1119 1120 /* 1121 * If the checkpoint spans multiple iclogs, wait for all previous iclogs 1122 * to complete before we submit the commit_iclog. We can't use state 1123 * checks for this - ACTIVE can be either a past completed iclog or a 1124 * future iclog being filled, while WANT_SYNC through SYNC_DONE can be a 1125 * past or future iclog awaiting IO or ordered IO completion to be run. 1126 * In the latter case, if it's a future iclog and we wait on it, the we 1127 * will hang because it won't get processed through to ic_force_wait 1128 * wakeup until this commit_iclog is written to disk. Hence we use the 1129 * iclog header lsn and compare it to the commit lsn to determine if we 1130 * need to wait on iclogs or not. 1131 */ 1132 spin_lock(&log->l_icloglock); 1133 if (ctx->start_lsn != ctx->commit_lsn) { 1134 xfs_lsn_t plsn; 1135 1136 plsn = be64_to_cpu(ctx->commit_iclog->ic_prev->ic_header.h_lsn); 1137 if (plsn && XFS_LSN_CMP(plsn, ctx->commit_lsn) < 0) { 1138 /* 1139 * Waiting on ic_force_wait orders the completion of 1140 * iclogs older than ic_prev. Hence we only need to wait 1141 * on the most recent older iclog here. 1142 */ 1143 xlog_wait_on_iclog(ctx->commit_iclog->ic_prev); 1144 spin_lock(&log->l_icloglock); 1145 } 1146 1147 /* 1148 * We need to issue a pre-flush so that the ordering for this 1149 * checkpoint is correctly preserved down to stable storage. 1150 */ 1151 ctx->commit_iclog->ic_flags |= XLOG_ICL_NEED_FLUSH; 1152 } 1153 1154 /* 1155 * The commit iclog must be written to stable storage to guarantee 1156 * journal IO vs metadata writeback IO is correctly ordered on stable 1157 * storage. 1158 * 1159 * If the push caller needs the commit to be immediately stable and the 1160 * commit_iclog is not yet marked as XLOG_STATE_WANT_SYNC to indicate it 1161 * will be written when released, switch it's state to WANT_SYNC right 1162 * now. 1163 */ 1164 ctx->commit_iclog->ic_flags |= XLOG_ICL_NEED_FUA; 1165 if (push_commit_stable && 1166 ctx->commit_iclog->ic_state == XLOG_STATE_ACTIVE) 1167 xlog_state_switch_iclogs(log, ctx->commit_iclog, 0); 1168 xlog_state_release_iclog(log, ctx->commit_iclog); 1169 1170 /* Not safe to reference ctx now! */ 1171 1172 spin_unlock(&log->l_icloglock); 1173 return; 1174 1175 out_skip: 1176 up_write(&cil->xc_ctx_lock); 1177 xfs_log_ticket_put(new_ctx->ticket); 1178 kmem_free(new_ctx); 1179 return; 1180 1181 out_abort_free_ticket: 1182 xfs_log_ticket_ungrant(log, ctx->ticket); 1183 ASSERT(xlog_is_shutdown(log)); 1184 if (!ctx->commit_iclog) { 1185 xlog_cil_committed(ctx); 1186 return; 1187 } 1188 spin_lock(&log->l_icloglock); 1189 xlog_state_release_iclog(log, ctx->commit_iclog); 1190 /* Not safe to reference ctx now! */ 1191 spin_unlock(&log->l_icloglock); 1192 } 1193 1194 /* 1195 * We need to push CIL every so often so we don't cache more than we can fit in 1196 * the log. The limit really is that a checkpoint can't be more than half the 1197 * log (the current checkpoint is not allowed to overwrite the previous 1198 * checkpoint), but commit latency and memory usage limit this to a smaller 1199 * size. 1200 */ 1201 static void 1202 xlog_cil_push_background( 1203 struct xlog *log) __releases(cil->xc_ctx_lock) 1204 { 1205 struct xfs_cil *cil = log->l_cilp; 1206 1207 /* 1208 * The cil won't be empty because we are called while holding the 1209 * context lock so whatever we added to the CIL will still be there 1210 */ 1211 ASSERT(!list_empty(&cil->xc_cil)); 1212 1213 /* 1214 * Don't do a background push if we haven't used up all the 1215 * space available yet. 1216 */ 1217 if (cil->xc_ctx->space_used < XLOG_CIL_SPACE_LIMIT(log)) { 1218 up_read(&cil->xc_ctx_lock); 1219 return; 1220 } 1221 1222 spin_lock(&cil->xc_push_lock); 1223 if (cil->xc_push_seq < cil->xc_current_sequence) { 1224 cil->xc_push_seq = cil->xc_current_sequence; 1225 queue_work(cil->xc_push_wq, &cil->xc_ctx->push_work); 1226 } 1227 1228 /* 1229 * Drop the context lock now, we can't hold that if we need to sleep 1230 * because we are over the blocking threshold. The push_lock is still 1231 * held, so blocking threshold sleep/wakeup is still correctly 1232 * serialised here. 1233 */ 1234 up_read(&cil->xc_ctx_lock); 1235 1236 /* 1237 * If we are well over the space limit, throttle the work that is being 1238 * done until the push work on this context has begun. Enforce the hard 1239 * throttle on all transaction commits once it has been activated, even 1240 * if the committing transactions have resulted in the space usage 1241 * dipping back down under the hard limit. 1242 * 1243 * The ctx->xc_push_lock provides the serialisation necessary for safely 1244 * using the lockless waitqueue_active() check in this context. 1245 */ 1246 if (cil->xc_ctx->space_used >= XLOG_CIL_BLOCKING_SPACE_LIMIT(log) || 1247 waitqueue_active(&cil->xc_push_wait)) { 1248 trace_xfs_log_cil_wait(log, cil->xc_ctx->ticket); 1249 ASSERT(cil->xc_ctx->space_used < log->l_logsize); 1250 xlog_wait(&cil->xc_push_wait, &cil->xc_push_lock); 1251 return; 1252 } 1253 1254 spin_unlock(&cil->xc_push_lock); 1255 1256 } 1257 1258 /* 1259 * xlog_cil_push_now() is used to trigger an immediate CIL push to the sequence 1260 * number that is passed. When it returns, the work will be queued for 1261 * @push_seq, but it won't be completed. 1262 * 1263 * If the caller is performing a synchronous force, we will flush the workqueue 1264 * to get previously queued work moving to minimise the wait time they will 1265 * undergo waiting for all outstanding pushes to complete. The caller is 1266 * expected to do the required waiting for push_seq to complete. 1267 * 1268 * If the caller is performing an async push, we need to ensure that the 1269 * checkpoint is fully flushed out of the iclogs when we finish the push. If we 1270 * don't do this, then the commit record may remain sitting in memory in an 1271 * ACTIVE iclog. This then requires another full log force to push to disk, 1272 * which defeats the purpose of having an async, non-blocking CIL force 1273 * mechanism. Hence in this case we need to pass a flag to the push work to 1274 * indicate it needs to flush the commit record itself. 1275 */ 1276 static void 1277 xlog_cil_push_now( 1278 struct xlog *log, 1279 xfs_lsn_t push_seq, 1280 bool async) 1281 { 1282 struct xfs_cil *cil = log->l_cilp; 1283 1284 if (!cil) 1285 return; 1286 1287 ASSERT(push_seq && push_seq <= cil->xc_current_sequence); 1288 1289 /* start on any pending background push to minimise wait time on it */ 1290 if (!async) 1291 flush_workqueue(cil->xc_push_wq); 1292 1293 spin_lock(&cil->xc_push_lock); 1294 1295 /* 1296 * If this is an async flush request, we always need to set the 1297 * xc_push_commit_stable flag even if something else has already queued 1298 * a push. The flush caller is asking for the CIL to be on stable 1299 * storage when the next push completes, so regardless of who has queued 1300 * the push, the flush requires stable semantics from it. 1301 */ 1302 cil->xc_push_commit_stable = async; 1303 1304 /* 1305 * If the CIL is empty or we've already pushed the sequence then 1306 * there's no more work that we need to do. 1307 */ 1308 if (list_empty(&cil->xc_cil) || push_seq <= cil->xc_push_seq) { 1309 spin_unlock(&cil->xc_push_lock); 1310 return; 1311 } 1312 1313 cil->xc_push_seq = push_seq; 1314 queue_work(cil->xc_push_wq, &cil->xc_ctx->push_work); 1315 spin_unlock(&cil->xc_push_lock); 1316 } 1317 1318 bool 1319 xlog_cil_empty( 1320 struct xlog *log) 1321 { 1322 struct xfs_cil *cil = log->l_cilp; 1323 bool empty = false; 1324 1325 spin_lock(&cil->xc_push_lock); 1326 if (list_empty(&cil->xc_cil)) 1327 empty = true; 1328 spin_unlock(&cil->xc_push_lock); 1329 return empty; 1330 } 1331 1332 /* 1333 * Commit a transaction with the given vector to the Committed Item List. 1334 * 1335 * To do this, we need to format the item, pin it in memory if required and 1336 * account for the space used by the transaction. Once we have done that we 1337 * need to release the unused reservation for the transaction, attach the 1338 * transaction to the checkpoint context so we carry the busy extents through 1339 * to checkpoint completion, and then unlock all the items in the transaction. 1340 * 1341 * Called with the context lock already held in read mode to lock out 1342 * background commit, returns without it held once background commits are 1343 * allowed again. 1344 */ 1345 void 1346 xlog_cil_commit( 1347 struct xlog *log, 1348 struct xfs_trans *tp, 1349 xfs_csn_t *commit_seq, 1350 bool regrant) 1351 { 1352 struct xfs_cil *cil = log->l_cilp; 1353 struct xfs_log_item *lip, *next; 1354 1355 /* 1356 * Do all necessary memory allocation before we lock the CIL. 1357 * This ensures the allocation does not deadlock with a CIL 1358 * push in memory reclaim (e.g. from kswapd). 1359 */ 1360 xlog_cil_alloc_shadow_bufs(log, tp); 1361 1362 /* lock out background commit */ 1363 down_read(&cil->xc_ctx_lock); 1364 1365 xlog_cil_insert_items(log, tp); 1366 1367 if (regrant && !xlog_is_shutdown(log)) 1368 xfs_log_ticket_regrant(log, tp->t_ticket); 1369 else 1370 xfs_log_ticket_ungrant(log, tp->t_ticket); 1371 tp->t_ticket = NULL; 1372 xfs_trans_unreserve_and_mod_sb(tp); 1373 1374 /* 1375 * Once all the items of the transaction have been copied to the CIL, 1376 * the items can be unlocked and possibly freed. 1377 * 1378 * This needs to be done before we drop the CIL context lock because we 1379 * have to update state in the log items and unlock them before they go 1380 * to disk. If we don't, then the CIL checkpoint can race with us and 1381 * we can run checkpoint completion before we've updated and unlocked 1382 * the log items. This affects (at least) processing of stale buffers, 1383 * inodes and EFIs. 1384 */ 1385 trace_xfs_trans_commit_items(tp, _RET_IP_); 1386 list_for_each_entry_safe(lip, next, &tp->t_items, li_trans) { 1387 xfs_trans_del_item(lip); 1388 if (lip->li_ops->iop_committing) 1389 lip->li_ops->iop_committing(lip, cil->xc_ctx->sequence); 1390 } 1391 if (commit_seq) 1392 *commit_seq = cil->xc_ctx->sequence; 1393 1394 /* xlog_cil_push_background() releases cil->xc_ctx_lock */ 1395 xlog_cil_push_background(log); 1396 } 1397 1398 /* 1399 * Flush the CIL to stable storage but don't wait for it to complete. This 1400 * requires the CIL push to ensure the commit record for the push hits the disk, 1401 * but otherwise is no different to a push done from a log force. 1402 */ 1403 void 1404 xlog_cil_flush( 1405 struct xlog *log) 1406 { 1407 xfs_csn_t seq = log->l_cilp->xc_current_sequence; 1408 1409 trace_xfs_log_force(log->l_mp, seq, _RET_IP_); 1410 xlog_cil_push_now(log, seq, true); 1411 1412 /* 1413 * If the CIL is empty, make sure that any previous checkpoint that may 1414 * still be in an active iclog is pushed to stable storage. 1415 */ 1416 if (list_empty(&log->l_cilp->xc_cil)) 1417 xfs_log_force(log->l_mp, 0); 1418 } 1419 1420 /* 1421 * Conditionally push the CIL based on the sequence passed in. 1422 * 1423 * We only need to push if we haven't already pushed the sequence number given. 1424 * Hence the only time we will trigger a push here is if the push sequence is 1425 * the same as the current context. 1426 * 1427 * We return the current commit lsn to allow the callers to determine if a 1428 * iclog flush is necessary following this call. 1429 */ 1430 xfs_lsn_t 1431 xlog_cil_force_seq( 1432 struct xlog *log, 1433 xfs_csn_t sequence) 1434 { 1435 struct xfs_cil *cil = log->l_cilp; 1436 struct xfs_cil_ctx *ctx; 1437 xfs_lsn_t commit_lsn = NULLCOMMITLSN; 1438 1439 ASSERT(sequence <= cil->xc_current_sequence); 1440 1441 if (!sequence) 1442 sequence = cil->xc_current_sequence; 1443 trace_xfs_log_force(log->l_mp, sequence, _RET_IP_); 1444 1445 /* 1446 * check to see if we need to force out the current context. 1447 * xlog_cil_push() handles racing pushes for the same sequence, 1448 * so no need to deal with it here. 1449 */ 1450 restart: 1451 xlog_cil_push_now(log, sequence, false); 1452 1453 /* 1454 * See if we can find a previous sequence still committing. 1455 * We need to wait for all previous sequence commits to complete 1456 * before allowing the force of push_seq to go ahead. Hence block 1457 * on commits for those as well. 1458 */ 1459 spin_lock(&cil->xc_push_lock); 1460 list_for_each_entry(ctx, &cil->xc_committing, committing) { 1461 /* 1462 * Avoid getting stuck in this loop because we were woken by the 1463 * shutdown, but then went back to sleep once already in the 1464 * shutdown state. 1465 */ 1466 if (xlog_is_shutdown(log)) 1467 goto out_shutdown; 1468 if (ctx->sequence > sequence) 1469 continue; 1470 if (!ctx->commit_lsn) { 1471 /* 1472 * It is still being pushed! Wait for the push to 1473 * complete, then start again from the beginning. 1474 */ 1475 XFS_STATS_INC(log->l_mp, xs_log_force_sleep); 1476 xlog_wait(&cil->xc_commit_wait, &cil->xc_push_lock); 1477 goto restart; 1478 } 1479 if (ctx->sequence != sequence) 1480 continue; 1481 /* found it! */ 1482 commit_lsn = ctx->commit_lsn; 1483 } 1484 1485 /* 1486 * The call to xlog_cil_push_now() executes the push in the background. 1487 * Hence by the time we have got here it our sequence may not have been 1488 * pushed yet. This is true if the current sequence still matches the 1489 * push sequence after the above wait loop and the CIL still contains 1490 * dirty objects. This is guaranteed by the push code first adding the 1491 * context to the committing list before emptying the CIL. 1492 * 1493 * Hence if we don't find the context in the committing list and the 1494 * current sequence number is unchanged then the CIL contents are 1495 * significant. If the CIL is empty, if means there was nothing to push 1496 * and that means there is nothing to wait for. If the CIL is not empty, 1497 * it means we haven't yet started the push, because if it had started 1498 * we would have found the context on the committing list. 1499 */ 1500 if (sequence == cil->xc_current_sequence && 1501 !list_empty(&cil->xc_cil)) { 1502 spin_unlock(&cil->xc_push_lock); 1503 goto restart; 1504 } 1505 1506 spin_unlock(&cil->xc_push_lock); 1507 return commit_lsn; 1508 1509 /* 1510 * We detected a shutdown in progress. We need to trigger the log force 1511 * to pass through it's iclog state machine error handling, even though 1512 * we are already in a shutdown state. Hence we can't return 1513 * NULLCOMMITLSN here as that has special meaning to log forces (i.e. 1514 * LSN is already stable), so we return a zero LSN instead. 1515 */ 1516 out_shutdown: 1517 spin_unlock(&cil->xc_push_lock); 1518 return 0; 1519 } 1520 1521 /* 1522 * Check if the current log item was first committed in this sequence. 1523 * We can't rely on just the log item being in the CIL, we have to check 1524 * the recorded commit sequence number. 1525 * 1526 * Note: for this to be used in a non-racy manner, it has to be called with 1527 * CIL flushing locked out. As a result, it should only be used during the 1528 * transaction commit process when deciding what to format into the item. 1529 */ 1530 bool 1531 xfs_log_item_in_current_chkpt( 1532 struct xfs_log_item *lip) 1533 { 1534 struct xfs_cil *cil = lip->li_log->l_cilp; 1535 1536 if (list_empty(&lip->li_cil)) 1537 return false; 1538 1539 /* 1540 * li_seq is written on the first commit of a log item to record the 1541 * first checkpoint it is written to. Hence if it is different to the 1542 * current sequence, we're in a new checkpoint. 1543 */ 1544 return lip->li_seq == READ_ONCE(cil->xc_current_sequence); 1545 } 1546 1547 /* 1548 * Perform initial CIL structure initialisation. 1549 */ 1550 int 1551 xlog_cil_init( 1552 struct xlog *log) 1553 { 1554 struct xfs_cil *cil; 1555 struct xfs_cil_ctx *ctx; 1556 1557 cil = kmem_zalloc(sizeof(*cil), KM_MAYFAIL); 1558 if (!cil) 1559 return -ENOMEM; 1560 /* 1561 * Limit the CIL pipeline depth to 4 concurrent works to bound the 1562 * concurrency the log spinlocks will be exposed to. 1563 */ 1564 cil->xc_push_wq = alloc_workqueue("xfs-cil/%s", 1565 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_UNBOUND), 1566 4, log->l_mp->m_super->s_id); 1567 if (!cil->xc_push_wq) 1568 goto out_destroy_cil; 1569 1570 INIT_LIST_HEAD(&cil->xc_cil); 1571 INIT_LIST_HEAD(&cil->xc_committing); 1572 spin_lock_init(&cil->xc_cil_lock); 1573 spin_lock_init(&cil->xc_push_lock); 1574 init_waitqueue_head(&cil->xc_push_wait); 1575 init_rwsem(&cil->xc_ctx_lock); 1576 init_waitqueue_head(&cil->xc_start_wait); 1577 init_waitqueue_head(&cil->xc_commit_wait); 1578 cil->xc_log = log; 1579 log->l_cilp = cil; 1580 1581 ctx = xlog_cil_ctx_alloc(); 1582 xlog_cil_ctx_switch(cil, ctx); 1583 1584 return 0; 1585 1586 out_destroy_cil: 1587 kmem_free(cil); 1588 return -ENOMEM; 1589 } 1590 1591 void 1592 xlog_cil_destroy( 1593 struct xlog *log) 1594 { 1595 if (log->l_cilp->xc_ctx) { 1596 if (log->l_cilp->xc_ctx->ticket) 1597 xfs_log_ticket_put(log->l_cilp->xc_ctx->ticket); 1598 kmem_free(log->l_cilp->xc_ctx); 1599 } 1600 1601 ASSERT(list_empty(&log->l_cilp->xc_cil)); 1602 destroy_workqueue(log->l_cilp->xc_push_wq); 1603 kmem_free(log->l_cilp); 1604 } 1605 1606