1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2010 Red Hat, Inc. All Rights Reserved. 4 */ 5 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_format.h" 9 #include "xfs_log_format.h" 10 #include "xfs_shared.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_mount.h" 13 #include "xfs_extent_busy.h" 14 #include "xfs_trans.h" 15 #include "xfs_trans_priv.h" 16 #include "xfs_log.h" 17 #include "xfs_log_priv.h" 18 #include "xfs_trace.h" 19 20 struct workqueue_struct *xfs_discard_wq; 21 22 /* 23 * Allocate a new ticket. Failing to get a new ticket makes it really hard to 24 * recover, so we don't allow failure here. Also, we allocate in a context that 25 * we don't want to be issuing transactions from, so we need to tell the 26 * allocation code this as well. 27 * 28 * We don't reserve any space for the ticket - we are going to steal whatever 29 * space we require from transactions as they commit. To ensure we reserve all 30 * the space required, we need to set the current reservation of the ticket to 31 * zero so that we know to steal the initial transaction overhead from the 32 * first transaction commit. 33 */ 34 static struct xlog_ticket * 35 xlog_cil_ticket_alloc( 36 struct xlog *log) 37 { 38 struct xlog_ticket *tic; 39 40 tic = xlog_ticket_alloc(log, 0, 1, 0); 41 42 /* 43 * set the current reservation to zero so we know to steal the basic 44 * transaction overhead reservation from the first transaction commit. 45 */ 46 tic->t_curr_res = 0; 47 tic->t_iclog_hdrs = 0; 48 return tic; 49 } 50 51 static inline void 52 xlog_cil_set_iclog_hdr_count(struct xfs_cil *cil) 53 { 54 struct xlog *log = cil->xc_log; 55 56 atomic_set(&cil->xc_iclog_hdrs, 57 (XLOG_CIL_BLOCKING_SPACE_LIMIT(log) / 58 (log->l_iclog_size - log->l_iclog_hsize))); 59 } 60 61 /* 62 * Check if the current log item was first committed in this sequence. 63 * We can't rely on just the log item being in the CIL, we have to check 64 * the recorded commit sequence number. 65 * 66 * Note: for this to be used in a non-racy manner, it has to be called with 67 * CIL flushing locked out. As a result, it should only be used during the 68 * transaction commit process when deciding what to format into the item. 69 */ 70 static bool 71 xlog_item_in_current_chkpt( 72 struct xfs_cil *cil, 73 struct xfs_log_item *lip) 74 { 75 if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags)) 76 return false; 77 78 /* 79 * li_seq is written on the first commit of a log item to record the 80 * first checkpoint it is written to. Hence if it is different to the 81 * current sequence, we're in a new checkpoint. 82 */ 83 return lip->li_seq == READ_ONCE(cil->xc_current_sequence); 84 } 85 86 bool 87 xfs_log_item_in_current_chkpt( 88 struct xfs_log_item *lip) 89 { 90 return xlog_item_in_current_chkpt(lip->li_log->l_cilp, lip); 91 } 92 93 /* 94 * Unavoidable forward declaration - xlog_cil_push_work() calls 95 * xlog_cil_ctx_alloc() itself. 96 */ 97 static void xlog_cil_push_work(struct work_struct *work); 98 99 static struct xfs_cil_ctx * 100 xlog_cil_ctx_alloc(void) 101 { 102 struct xfs_cil_ctx *ctx; 103 104 ctx = kmem_zalloc(sizeof(*ctx), KM_NOFS); 105 INIT_LIST_HEAD(&ctx->committing); 106 INIT_LIST_HEAD(&ctx->busy_extents); 107 INIT_LIST_HEAD(&ctx->log_items); 108 INIT_WORK(&ctx->push_work, xlog_cil_push_work); 109 return ctx; 110 } 111 112 /* 113 * Aggregate the CIL per cpu structures into global counts, lists, etc and 114 * clear the percpu state ready for the next context to use. This is called 115 * from the push code with the context lock held exclusively, hence nothing else 116 * will be accessing or modifying the per-cpu counters. 117 */ 118 static void 119 xlog_cil_push_pcp_aggregate( 120 struct xfs_cil *cil, 121 struct xfs_cil_ctx *ctx) 122 { 123 struct xlog_cil_pcp *cilpcp; 124 int cpu; 125 126 for_each_online_cpu(cpu) { 127 cilpcp = per_cpu_ptr(cil->xc_pcp, cpu); 128 129 ctx->ticket->t_curr_res += cilpcp->space_reserved; 130 cilpcp->space_reserved = 0; 131 132 if (!list_empty(&cilpcp->busy_extents)) { 133 list_splice_init(&cilpcp->busy_extents, 134 &ctx->busy_extents); 135 } 136 if (!list_empty(&cilpcp->log_items)) 137 list_splice_init(&cilpcp->log_items, &ctx->log_items); 138 139 /* 140 * We're in the middle of switching cil contexts. Reset the 141 * counter we use to detect when the current context is nearing 142 * full. 143 */ 144 cilpcp->space_used = 0; 145 } 146 } 147 148 /* 149 * Aggregate the CIL per-cpu space used counters into the global atomic value. 150 * This is called when the per-cpu counter aggregation will first pass the soft 151 * limit threshold so we can switch to atomic counter aggregation for accurate 152 * detection of hard limit traversal. 153 */ 154 static void 155 xlog_cil_insert_pcp_aggregate( 156 struct xfs_cil *cil, 157 struct xfs_cil_ctx *ctx) 158 { 159 struct xlog_cil_pcp *cilpcp; 160 int cpu; 161 int count = 0; 162 163 /* Trigger atomic updates then aggregate only for the first caller */ 164 if (!test_and_clear_bit(XLOG_CIL_PCP_SPACE, &cil->xc_flags)) 165 return; 166 167 for_each_online_cpu(cpu) { 168 int old, prev; 169 170 cilpcp = per_cpu_ptr(cil->xc_pcp, cpu); 171 do { 172 old = cilpcp->space_used; 173 prev = cmpxchg(&cilpcp->space_used, old, 0); 174 } while (old != prev); 175 count += old; 176 } 177 atomic_add(count, &ctx->space_used); 178 } 179 180 static void 181 xlog_cil_ctx_switch( 182 struct xfs_cil *cil, 183 struct xfs_cil_ctx *ctx) 184 { 185 xlog_cil_set_iclog_hdr_count(cil); 186 set_bit(XLOG_CIL_EMPTY, &cil->xc_flags); 187 set_bit(XLOG_CIL_PCP_SPACE, &cil->xc_flags); 188 ctx->sequence = ++cil->xc_current_sequence; 189 ctx->cil = cil; 190 cil->xc_ctx = ctx; 191 } 192 193 /* 194 * After the first stage of log recovery is done, we know where the head and 195 * tail of the log are. We need this log initialisation done before we can 196 * initialise the first CIL checkpoint context. 197 * 198 * Here we allocate a log ticket to track space usage during a CIL push. This 199 * ticket is passed to xlog_write() directly so that we don't slowly leak log 200 * space by failing to account for space used by log headers and additional 201 * region headers for split regions. 202 */ 203 void 204 xlog_cil_init_post_recovery( 205 struct xlog *log) 206 { 207 log->l_cilp->xc_ctx->ticket = xlog_cil_ticket_alloc(log); 208 log->l_cilp->xc_ctx->sequence = 1; 209 xlog_cil_set_iclog_hdr_count(log->l_cilp); 210 } 211 212 static inline int 213 xlog_cil_iovec_space( 214 uint niovecs) 215 { 216 return round_up((sizeof(struct xfs_log_vec) + 217 niovecs * sizeof(struct xfs_log_iovec)), 218 sizeof(uint64_t)); 219 } 220 221 /* 222 * Allocate or pin log vector buffers for CIL insertion. 223 * 224 * The CIL currently uses disposable buffers for copying a snapshot of the 225 * modified items into the log during a push. The biggest problem with this is 226 * the requirement to allocate the disposable buffer during the commit if: 227 * a) does not exist; or 228 * b) it is too small 229 * 230 * If we do this allocation within xlog_cil_insert_format_items(), it is done 231 * under the xc_ctx_lock, which means that a CIL push cannot occur during 232 * the memory allocation. This means that we have a potential deadlock situation 233 * under low memory conditions when we have lots of dirty metadata pinned in 234 * the CIL and we need a CIL commit to occur to free memory. 235 * 236 * To avoid this, we need to move the memory allocation outside the 237 * xc_ctx_lock, but because the log vector buffers are disposable, that opens 238 * up a TOCTOU race condition w.r.t. the CIL committing and removing the log 239 * vector buffers between the check and the formatting of the item into the 240 * log vector buffer within the xc_ctx_lock. 241 * 242 * Because the log vector buffer needs to be unchanged during the CIL push 243 * process, we cannot share the buffer between the transaction commit (which 244 * modifies the buffer) and the CIL push context that is writing the changes 245 * into the log. This means skipping preallocation of buffer space is 246 * unreliable, but we most definitely do not want to be allocating and freeing 247 * buffers unnecessarily during commits when overwrites can be done safely. 248 * 249 * The simplest solution to this problem is to allocate a shadow buffer when a 250 * log item is committed for the second time, and then to only use this buffer 251 * if necessary. The buffer can remain attached to the log item until such time 252 * it is needed, and this is the buffer that is reallocated to match the size of 253 * the incoming modification. Then during the formatting of the item we can swap 254 * the active buffer with the new one if we can't reuse the existing buffer. We 255 * don't free the old buffer as it may be reused on the next modification if 256 * it's size is right, otherwise we'll free and reallocate it at that point. 257 * 258 * This function builds a vector for the changes in each log item in the 259 * transaction. It then works out the length of the buffer needed for each log 260 * item, allocates them and attaches the vector to the log item in preparation 261 * for the formatting step which occurs under the xc_ctx_lock. 262 * 263 * While this means the memory footprint goes up, it avoids the repeated 264 * alloc/free pattern that repeated modifications of an item would otherwise 265 * cause, and hence minimises the CPU overhead of such behaviour. 266 */ 267 static void 268 xlog_cil_alloc_shadow_bufs( 269 struct xlog *log, 270 struct xfs_trans *tp) 271 { 272 struct xfs_log_item *lip; 273 274 list_for_each_entry(lip, &tp->t_items, li_trans) { 275 struct xfs_log_vec *lv; 276 int niovecs = 0; 277 int nbytes = 0; 278 int buf_size; 279 bool ordered = false; 280 281 /* Skip items which aren't dirty in this transaction. */ 282 if (!test_bit(XFS_LI_DIRTY, &lip->li_flags)) 283 continue; 284 285 /* get number of vecs and size of data to be stored */ 286 lip->li_ops->iop_size(lip, &niovecs, &nbytes); 287 288 /* 289 * Ordered items need to be tracked but we do not wish to write 290 * them. We need a logvec to track the object, but we do not 291 * need an iovec or buffer to be allocated for copying data. 292 */ 293 if (niovecs == XFS_LOG_VEC_ORDERED) { 294 ordered = true; 295 niovecs = 0; 296 nbytes = 0; 297 } 298 299 /* 300 * We 64-bit align the length of each iovec so that the start of 301 * the next one is naturally aligned. We'll need to account for 302 * that slack space here. 303 * 304 * We also add the xlog_op_header to each region when 305 * formatting, but that's not accounted to the size of the item 306 * at this point. Hence we'll need an addition number of bytes 307 * for each vector to hold an opheader. 308 * 309 * Then round nbytes up to 64-bit alignment so that the initial 310 * buffer alignment is easy to calculate and verify. 311 */ 312 nbytes += niovecs * 313 (sizeof(uint64_t) + sizeof(struct xlog_op_header)); 314 nbytes = round_up(nbytes, sizeof(uint64_t)); 315 316 /* 317 * The data buffer needs to start 64-bit aligned, so round up 318 * that space to ensure we can align it appropriately and not 319 * overrun the buffer. 320 */ 321 buf_size = nbytes + xlog_cil_iovec_space(niovecs); 322 323 /* 324 * if we have no shadow buffer, or it is too small, we need to 325 * reallocate it. 326 */ 327 if (!lip->li_lv_shadow || 328 buf_size > lip->li_lv_shadow->lv_size) { 329 /* 330 * We free and allocate here as a realloc would copy 331 * unnecessary data. We don't use kvzalloc() for the 332 * same reason - we don't need to zero the data area in 333 * the buffer, only the log vector header and the iovec 334 * storage. 335 */ 336 kmem_free(lip->li_lv_shadow); 337 lv = xlog_kvmalloc(buf_size); 338 339 memset(lv, 0, xlog_cil_iovec_space(niovecs)); 340 341 lv->lv_item = lip; 342 lv->lv_size = buf_size; 343 if (ordered) 344 lv->lv_buf_len = XFS_LOG_VEC_ORDERED; 345 else 346 lv->lv_iovecp = (struct xfs_log_iovec *)&lv[1]; 347 lip->li_lv_shadow = lv; 348 } else { 349 /* same or smaller, optimise common overwrite case */ 350 lv = lip->li_lv_shadow; 351 if (ordered) 352 lv->lv_buf_len = XFS_LOG_VEC_ORDERED; 353 else 354 lv->lv_buf_len = 0; 355 lv->lv_bytes = 0; 356 lv->lv_next = NULL; 357 } 358 359 /* Ensure the lv is set up according to ->iop_size */ 360 lv->lv_niovecs = niovecs; 361 362 /* The allocated data region lies beyond the iovec region */ 363 lv->lv_buf = (char *)lv + xlog_cil_iovec_space(niovecs); 364 } 365 366 } 367 368 /* 369 * Prepare the log item for insertion into the CIL. Calculate the difference in 370 * log space it will consume, and if it is a new item pin it as well. 371 */ 372 STATIC void 373 xfs_cil_prepare_item( 374 struct xlog *log, 375 struct xfs_log_vec *lv, 376 struct xfs_log_vec *old_lv, 377 int *diff_len) 378 { 379 /* Account for the new LV being passed in */ 380 if (lv->lv_buf_len != XFS_LOG_VEC_ORDERED) 381 *diff_len += lv->lv_bytes; 382 383 /* 384 * If there is no old LV, this is the first time we've seen the item in 385 * this CIL context and so we need to pin it. If we are replacing the 386 * old_lv, then remove the space it accounts for and make it the shadow 387 * buffer for later freeing. In both cases we are now switching to the 388 * shadow buffer, so update the pointer to it appropriately. 389 */ 390 if (!old_lv) { 391 if (lv->lv_item->li_ops->iop_pin) 392 lv->lv_item->li_ops->iop_pin(lv->lv_item); 393 lv->lv_item->li_lv_shadow = NULL; 394 } else if (old_lv != lv) { 395 ASSERT(lv->lv_buf_len != XFS_LOG_VEC_ORDERED); 396 397 *diff_len -= old_lv->lv_bytes; 398 lv->lv_item->li_lv_shadow = old_lv; 399 } 400 401 /* attach new log vector to log item */ 402 lv->lv_item->li_lv = lv; 403 404 /* 405 * If this is the first time the item is being committed to the 406 * CIL, store the sequence number on the log item so we can 407 * tell in future commits whether this is the first checkpoint 408 * the item is being committed into. 409 */ 410 if (!lv->lv_item->li_seq) 411 lv->lv_item->li_seq = log->l_cilp->xc_ctx->sequence; 412 } 413 414 /* 415 * Format log item into a flat buffers 416 * 417 * For delayed logging, we need to hold a formatted buffer containing all the 418 * changes on the log item. This enables us to relog the item in memory and 419 * write it out asynchronously without needing to relock the object that was 420 * modified at the time it gets written into the iclog. 421 * 422 * This function takes the prepared log vectors attached to each log item, and 423 * formats the changes into the log vector buffer. The buffer it uses is 424 * dependent on the current state of the vector in the CIL - the shadow lv is 425 * guaranteed to be large enough for the current modification, but we will only 426 * use that if we can't reuse the existing lv. If we can't reuse the existing 427 * lv, then simple swap it out for the shadow lv. We don't free it - that is 428 * done lazily either by th enext modification or the freeing of the log item. 429 * 430 * We don't set up region headers during this process; we simply copy the 431 * regions into the flat buffer. We can do this because we still have to do a 432 * formatting step to write the regions into the iclog buffer. Writing the 433 * ophdrs during the iclog write means that we can support splitting large 434 * regions across iclog boundares without needing a change in the format of the 435 * item/region encapsulation. 436 * 437 * Hence what we need to do now is change the rewrite the vector array to point 438 * to the copied region inside the buffer we just allocated. This allows us to 439 * format the regions into the iclog as though they are being formatted 440 * directly out of the objects themselves. 441 */ 442 static void 443 xlog_cil_insert_format_items( 444 struct xlog *log, 445 struct xfs_trans *tp, 446 int *diff_len) 447 { 448 struct xfs_log_item *lip; 449 450 /* Bail out if we didn't find a log item. */ 451 if (list_empty(&tp->t_items)) { 452 ASSERT(0); 453 return; 454 } 455 456 list_for_each_entry(lip, &tp->t_items, li_trans) { 457 struct xfs_log_vec *lv; 458 struct xfs_log_vec *old_lv = NULL; 459 struct xfs_log_vec *shadow; 460 bool ordered = false; 461 462 /* Skip items which aren't dirty in this transaction. */ 463 if (!test_bit(XFS_LI_DIRTY, &lip->li_flags)) 464 continue; 465 466 /* 467 * The formatting size information is already attached to 468 * the shadow lv on the log item. 469 */ 470 shadow = lip->li_lv_shadow; 471 if (shadow->lv_buf_len == XFS_LOG_VEC_ORDERED) 472 ordered = true; 473 474 /* Skip items that do not have any vectors for writing */ 475 if (!shadow->lv_niovecs && !ordered) 476 continue; 477 478 /* compare to existing item size */ 479 old_lv = lip->li_lv; 480 if (lip->li_lv && shadow->lv_size <= lip->li_lv->lv_size) { 481 /* same or smaller, optimise common overwrite case */ 482 lv = lip->li_lv; 483 lv->lv_next = NULL; 484 485 if (ordered) 486 goto insert; 487 488 /* 489 * set the item up as though it is a new insertion so 490 * that the space reservation accounting is correct. 491 */ 492 *diff_len -= lv->lv_bytes; 493 494 /* Ensure the lv is set up according to ->iop_size */ 495 lv->lv_niovecs = shadow->lv_niovecs; 496 497 /* reset the lv buffer information for new formatting */ 498 lv->lv_buf_len = 0; 499 lv->lv_bytes = 0; 500 lv->lv_buf = (char *)lv + 501 xlog_cil_iovec_space(lv->lv_niovecs); 502 } else { 503 /* switch to shadow buffer! */ 504 lv = shadow; 505 lv->lv_item = lip; 506 if (ordered) { 507 /* track as an ordered logvec */ 508 ASSERT(lip->li_lv == NULL); 509 goto insert; 510 } 511 } 512 513 ASSERT(IS_ALIGNED((unsigned long)lv->lv_buf, sizeof(uint64_t))); 514 lip->li_ops->iop_format(lip, lv); 515 insert: 516 xfs_cil_prepare_item(log, lv, old_lv, diff_len); 517 } 518 } 519 520 /* 521 * The use of lockless waitqueue_active() requires that the caller has 522 * serialised itself against the wakeup call in xlog_cil_push_work(). That 523 * can be done by either holding the push lock or the context lock. 524 */ 525 static inline bool 526 xlog_cil_over_hard_limit( 527 struct xlog *log, 528 int32_t space_used) 529 { 530 if (waitqueue_active(&log->l_cilp->xc_push_wait)) 531 return true; 532 if (space_used >= XLOG_CIL_BLOCKING_SPACE_LIMIT(log)) 533 return true; 534 return false; 535 } 536 537 /* 538 * Insert the log items into the CIL and calculate the difference in space 539 * consumed by the item. Add the space to the checkpoint ticket and calculate 540 * if the change requires additional log metadata. If it does, take that space 541 * as well. Remove the amount of space we added to the checkpoint ticket from 542 * the current transaction ticket so that the accounting works out correctly. 543 */ 544 static void 545 xlog_cil_insert_items( 546 struct xlog *log, 547 struct xfs_trans *tp, 548 uint32_t released_space) 549 { 550 struct xfs_cil *cil = log->l_cilp; 551 struct xfs_cil_ctx *ctx = cil->xc_ctx; 552 struct xfs_log_item *lip; 553 int len = 0; 554 int iovhdr_res = 0, split_res = 0, ctx_res = 0; 555 int space_used; 556 int order; 557 struct xlog_cil_pcp *cilpcp; 558 559 ASSERT(tp); 560 561 /* 562 * We can do this safely because the context can't checkpoint until we 563 * are done so it doesn't matter exactly how we update the CIL. 564 */ 565 xlog_cil_insert_format_items(log, tp, &len); 566 567 /* 568 * Subtract the space released by intent cancelation from the space we 569 * consumed so that we remove it from the CIL space and add it back to 570 * the current transaction reservation context. 571 */ 572 len -= released_space; 573 574 /* 575 * Grab the per-cpu pointer for the CIL before we start any accounting. 576 * That ensures that we are running with pre-emption disabled and so we 577 * can't be scheduled away between split sample/update operations that 578 * are done without outside locking to serialise them. 579 */ 580 cilpcp = get_cpu_ptr(cil->xc_pcp); 581 582 /* 583 * We need to take the CIL checkpoint unit reservation on the first 584 * commit into the CIL. Test the XLOG_CIL_EMPTY bit first so we don't 585 * unnecessarily do an atomic op in the fast path here. We can clear the 586 * XLOG_CIL_EMPTY bit as we are under the xc_ctx_lock here and that 587 * needs to be held exclusively to reset the XLOG_CIL_EMPTY bit. 588 */ 589 if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags) && 590 test_and_clear_bit(XLOG_CIL_EMPTY, &cil->xc_flags)) 591 ctx_res = ctx->ticket->t_unit_res; 592 593 /* 594 * Check if we need to steal iclog headers. atomic_read() is not a 595 * locked atomic operation, so we can check the value before we do any 596 * real atomic ops in the fast path. If we've already taken the CIL unit 597 * reservation from this commit, we've already got one iclog header 598 * space reserved so we have to account for that otherwise we risk 599 * overrunning the reservation on this ticket. 600 * 601 * If the CIL is already at the hard limit, we might need more header 602 * space that originally reserved. So steal more header space from every 603 * commit that occurs once we are over the hard limit to ensure the CIL 604 * push won't run out of reservation space. 605 * 606 * This can steal more than we need, but that's OK. 607 * 608 * The cil->xc_ctx_lock provides the serialisation necessary for safely 609 * calling xlog_cil_over_hard_limit() in this context. 610 */ 611 space_used = atomic_read(&ctx->space_used) + cilpcp->space_used + len; 612 if (atomic_read(&cil->xc_iclog_hdrs) > 0 || 613 xlog_cil_over_hard_limit(log, space_used)) { 614 split_res = log->l_iclog_hsize + 615 sizeof(struct xlog_op_header); 616 if (ctx_res) 617 ctx_res += split_res * (tp->t_ticket->t_iclog_hdrs - 1); 618 else 619 ctx_res = split_res * tp->t_ticket->t_iclog_hdrs; 620 atomic_sub(tp->t_ticket->t_iclog_hdrs, &cil->xc_iclog_hdrs); 621 } 622 cilpcp->space_reserved += ctx_res; 623 624 /* 625 * Accurately account when over the soft limit, otherwise fold the 626 * percpu count into the global count if over the per-cpu threshold. 627 */ 628 if (!test_bit(XLOG_CIL_PCP_SPACE, &cil->xc_flags)) { 629 atomic_add(len, &ctx->space_used); 630 } else if (cilpcp->space_used + len > 631 (XLOG_CIL_SPACE_LIMIT(log) / num_online_cpus())) { 632 space_used = atomic_add_return(cilpcp->space_used + len, 633 &ctx->space_used); 634 cilpcp->space_used = 0; 635 636 /* 637 * If we just transitioned over the soft limit, we need to 638 * transition to the global atomic counter. 639 */ 640 if (space_used >= XLOG_CIL_SPACE_LIMIT(log)) 641 xlog_cil_insert_pcp_aggregate(cil, ctx); 642 } else { 643 cilpcp->space_used += len; 644 } 645 /* attach the transaction to the CIL if it has any busy extents */ 646 if (!list_empty(&tp->t_busy)) 647 list_splice_init(&tp->t_busy, &cilpcp->busy_extents); 648 649 /* 650 * Now update the order of everything modified in the transaction 651 * and insert items into the CIL if they aren't already there. 652 * We do this here so we only need to take the CIL lock once during 653 * the transaction commit. 654 */ 655 order = atomic_inc_return(&ctx->order_id); 656 list_for_each_entry(lip, &tp->t_items, li_trans) { 657 /* Skip items which aren't dirty in this transaction. */ 658 if (!test_bit(XFS_LI_DIRTY, &lip->li_flags)) 659 continue; 660 661 lip->li_order_id = order; 662 if (!list_empty(&lip->li_cil)) 663 continue; 664 list_add_tail(&lip->li_cil, &cilpcp->log_items); 665 } 666 put_cpu_ptr(cilpcp); 667 668 /* 669 * If we've overrun the reservation, dump the tx details before we move 670 * the log items. Shutdown is imminent... 671 */ 672 tp->t_ticket->t_curr_res -= ctx_res + len; 673 if (WARN_ON(tp->t_ticket->t_curr_res < 0)) { 674 xfs_warn(log->l_mp, "Transaction log reservation overrun:"); 675 xfs_warn(log->l_mp, 676 " log items: %d bytes (iov hdrs: %d bytes)", 677 len, iovhdr_res); 678 xfs_warn(log->l_mp, " split region headers: %d bytes", 679 split_res); 680 xfs_warn(log->l_mp, " ctx ticket: %d bytes", ctx_res); 681 xlog_print_trans(tp); 682 xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR); 683 } 684 } 685 686 static void 687 xlog_cil_free_logvec( 688 struct xfs_log_vec *log_vector) 689 { 690 struct xfs_log_vec *lv; 691 692 for (lv = log_vector; lv; ) { 693 struct xfs_log_vec *next = lv->lv_next; 694 kmem_free(lv); 695 lv = next; 696 } 697 } 698 699 static void 700 xlog_discard_endio_work( 701 struct work_struct *work) 702 { 703 struct xfs_cil_ctx *ctx = 704 container_of(work, struct xfs_cil_ctx, discard_endio_work); 705 struct xfs_mount *mp = ctx->cil->xc_log->l_mp; 706 707 xfs_extent_busy_clear(mp, &ctx->busy_extents, false); 708 kmem_free(ctx); 709 } 710 711 /* 712 * Queue up the actual completion to a thread to avoid IRQ-safe locking for 713 * pagb_lock. Note that we need a unbounded workqueue, otherwise we might 714 * get the execution delayed up to 30 seconds for weird reasons. 715 */ 716 static void 717 xlog_discard_endio( 718 struct bio *bio) 719 { 720 struct xfs_cil_ctx *ctx = bio->bi_private; 721 722 INIT_WORK(&ctx->discard_endio_work, xlog_discard_endio_work); 723 queue_work(xfs_discard_wq, &ctx->discard_endio_work); 724 bio_put(bio); 725 } 726 727 static void 728 xlog_discard_busy_extents( 729 struct xfs_mount *mp, 730 struct xfs_cil_ctx *ctx) 731 { 732 struct list_head *list = &ctx->busy_extents; 733 struct xfs_extent_busy *busyp; 734 struct bio *bio = NULL; 735 struct blk_plug plug; 736 int error = 0; 737 738 ASSERT(xfs_has_discard(mp)); 739 740 blk_start_plug(&plug); 741 list_for_each_entry(busyp, list, list) { 742 trace_xfs_discard_extent(mp, busyp->agno, busyp->bno, 743 busyp->length); 744 745 error = __blkdev_issue_discard(mp->m_ddev_targp->bt_bdev, 746 XFS_AGB_TO_DADDR(mp, busyp->agno, busyp->bno), 747 XFS_FSB_TO_BB(mp, busyp->length), 748 GFP_NOFS, &bio); 749 if (error && error != -EOPNOTSUPP) { 750 xfs_info(mp, 751 "discard failed for extent [0x%llx,%u], error %d", 752 (unsigned long long)busyp->bno, 753 busyp->length, 754 error); 755 break; 756 } 757 } 758 759 if (bio) { 760 bio->bi_private = ctx; 761 bio->bi_end_io = xlog_discard_endio; 762 submit_bio(bio); 763 } else { 764 xlog_discard_endio_work(&ctx->discard_endio_work); 765 } 766 blk_finish_plug(&plug); 767 } 768 769 /* 770 * Mark all items committed and clear busy extents. We free the log vector 771 * chains in a separate pass so that we unpin the log items as quickly as 772 * possible. 773 */ 774 static void 775 xlog_cil_committed( 776 struct xfs_cil_ctx *ctx) 777 { 778 struct xfs_mount *mp = ctx->cil->xc_log->l_mp; 779 bool abort = xlog_is_shutdown(ctx->cil->xc_log); 780 781 /* 782 * If the I/O failed, we're aborting the commit and already shutdown. 783 * Wake any commit waiters before aborting the log items so we don't 784 * block async log pushers on callbacks. Async log pushers explicitly do 785 * not wait on log force completion because they may be holding locks 786 * required to unpin items. 787 */ 788 if (abort) { 789 spin_lock(&ctx->cil->xc_push_lock); 790 wake_up_all(&ctx->cil->xc_start_wait); 791 wake_up_all(&ctx->cil->xc_commit_wait); 792 spin_unlock(&ctx->cil->xc_push_lock); 793 } 794 795 xfs_trans_committed_bulk(ctx->cil->xc_log->l_ailp, ctx->lv_chain, 796 ctx->start_lsn, abort); 797 798 xfs_extent_busy_sort(&ctx->busy_extents); 799 xfs_extent_busy_clear(mp, &ctx->busy_extents, 800 xfs_has_discard(mp) && !abort); 801 802 spin_lock(&ctx->cil->xc_push_lock); 803 list_del(&ctx->committing); 804 spin_unlock(&ctx->cil->xc_push_lock); 805 806 xlog_cil_free_logvec(ctx->lv_chain); 807 808 if (!list_empty(&ctx->busy_extents)) 809 xlog_discard_busy_extents(mp, ctx); 810 else 811 kmem_free(ctx); 812 } 813 814 void 815 xlog_cil_process_committed( 816 struct list_head *list) 817 { 818 struct xfs_cil_ctx *ctx; 819 820 while ((ctx = list_first_entry_or_null(list, 821 struct xfs_cil_ctx, iclog_entry))) { 822 list_del(&ctx->iclog_entry); 823 xlog_cil_committed(ctx); 824 } 825 } 826 827 /* 828 * Record the LSN of the iclog we were just granted space to start writing into. 829 * If the context doesn't have a start_lsn recorded, then this iclog will 830 * contain the start record for the checkpoint. Otherwise this write contains 831 * the commit record for the checkpoint. 832 */ 833 void 834 xlog_cil_set_ctx_write_state( 835 struct xfs_cil_ctx *ctx, 836 struct xlog_in_core *iclog) 837 { 838 struct xfs_cil *cil = ctx->cil; 839 xfs_lsn_t lsn = be64_to_cpu(iclog->ic_header.h_lsn); 840 841 ASSERT(!ctx->commit_lsn); 842 if (!ctx->start_lsn) { 843 spin_lock(&cil->xc_push_lock); 844 /* 845 * The LSN we need to pass to the log items on transaction 846 * commit is the LSN reported by the first log vector write, not 847 * the commit lsn. If we use the commit record lsn then we can 848 * move the grant write head beyond the tail LSN and overwrite 849 * it. 850 */ 851 ctx->start_lsn = lsn; 852 wake_up_all(&cil->xc_start_wait); 853 spin_unlock(&cil->xc_push_lock); 854 855 /* 856 * Make sure the metadata we are about to overwrite in the log 857 * has been flushed to stable storage before this iclog is 858 * issued. 859 */ 860 spin_lock(&cil->xc_log->l_icloglock); 861 iclog->ic_flags |= XLOG_ICL_NEED_FLUSH; 862 spin_unlock(&cil->xc_log->l_icloglock); 863 return; 864 } 865 866 /* 867 * Take a reference to the iclog for the context so that we still hold 868 * it when xlog_write is done and has released it. This means the 869 * context controls when the iclog is released for IO. 870 */ 871 atomic_inc(&iclog->ic_refcnt); 872 873 /* 874 * xlog_state_get_iclog_space() guarantees there is enough space in the 875 * iclog for an entire commit record, so we can attach the context 876 * callbacks now. This needs to be done before we make the commit_lsn 877 * visible to waiters so that checkpoints with commit records in the 878 * same iclog order their IO completion callbacks in the same order that 879 * the commit records appear in the iclog. 880 */ 881 spin_lock(&cil->xc_log->l_icloglock); 882 list_add_tail(&ctx->iclog_entry, &iclog->ic_callbacks); 883 spin_unlock(&cil->xc_log->l_icloglock); 884 885 /* 886 * Now we can record the commit LSN and wake anyone waiting for this 887 * sequence to have the ordered commit record assigned to a physical 888 * location in the log. 889 */ 890 spin_lock(&cil->xc_push_lock); 891 ctx->commit_iclog = iclog; 892 ctx->commit_lsn = lsn; 893 wake_up_all(&cil->xc_commit_wait); 894 spin_unlock(&cil->xc_push_lock); 895 } 896 897 898 /* 899 * Ensure that the order of log writes follows checkpoint sequence order. This 900 * relies on the context LSN being zero until the log write has guaranteed the 901 * LSN that the log write will start at via xlog_state_get_iclog_space(). 902 */ 903 enum _record_type { 904 _START_RECORD, 905 _COMMIT_RECORD, 906 }; 907 908 static int 909 xlog_cil_order_write( 910 struct xfs_cil *cil, 911 xfs_csn_t sequence, 912 enum _record_type record) 913 { 914 struct xfs_cil_ctx *ctx; 915 916 restart: 917 spin_lock(&cil->xc_push_lock); 918 list_for_each_entry(ctx, &cil->xc_committing, committing) { 919 /* 920 * Avoid getting stuck in this loop because we were woken by the 921 * shutdown, but then went back to sleep once already in the 922 * shutdown state. 923 */ 924 if (xlog_is_shutdown(cil->xc_log)) { 925 spin_unlock(&cil->xc_push_lock); 926 return -EIO; 927 } 928 929 /* 930 * Higher sequences will wait for this one so skip them. 931 * Don't wait for our own sequence, either. 932 */ 933 if (ctx->sequence >= sequence) 934 continue; 935 936 /* Wait until the LSN for the record has been recorded. */ 937 switch (record) { 938 case _START_RECORD: 939 if (!ctx->start_lsn) { 940 xlog_wait(&cil->xc_start_wait, &cil->xc_push_lock); 941 goto restart; 942 } 943 break; 944 case _COMMIT_RECORD: 945 if (!ctx->commit_lsn) { 946 xlog_wait(&cil->xc_commit_wait, &cil->xc_push_lock); 947 goto restart; 948 } 949 break; 950 } 951 } 952 spin_unlock(&cil->xc_push_lock); 953 return 0; 954 } 955 956 /* 957 * Write out the log vector change now attached to the CIL context. This will 958 * write a start record that needs to be strictly ordered in ascending CIL 959 * sequence order so that log recovery will always use in-order start LSNs when 960 * replaying checkpoints. 961 */ 962 static int 963 xlog_cil_write_chain( 964 struct xfs_cil_ctx *ctx, 965 struct xfs_log_vec *chain, 966 uint32_t chain_len) 967 { 968 struct xlog *log = ctx->cil->xc_log; 969 int error; 970 971 error = xlog_cil_order_write(ctx->cil, ctx->sequence, _START_RECORD); 972 if (error) 973 return error; 974 return xlog_write(log, ctx, chain, ctx->ticket, chain_len); 975 } 976 977 /* 978 * Write out the commit record of a checkpoint transaction to close off a 979 * running log write. These commit records are strictly ordered in ascending CIL 980 * sequence order so that log recovery will always replay the checkpoints in the 981 * correct order. 982 */ 983 static int 984 xlog_cil_write_commit_record( 985 struct xfs_cil_ctx *ctx) 986 { 987 struct xlog *log = ctx->cil->xc_log; 988 struct xlog_op_header ophdr = { 989 .oh_clientid = XFS_TRANSACTION, 990 .oh_tid = cpu_to_be32(ctx->ticket->t_tid), 991 .oh_flags = XLOG_COMMIT_TRANS, 992 }; 993 struct xfs_log_iovec reg = { 994 .i_addr = &ophdr, 995 .i_len = sizeof(struct xlog_op_header), 996 .i_type = XLOG_REG_TYPE_COMMIT, 997 }; 998 struct xfs_log_vec vec = { 999 .lv_niovecs = 1, 1000 .lv_iovecp = ®, 1001 }; 1002 int error; 1003 1004 if (xlog_is_shutdown(log)) 1005 return -EIO; 1006 1007 error = xlog_cil_order_write(ctx->cil, ctx->sequence, _COMMIT_RECORD); 1008 if (error) 1009 return error; 1010 1011 /* account for space used by record data */ 1012 ctx->ticket->t_curr_res -= reg.i_len; 1013 error = xlog_write(log, ctx, &vec, ctx->ticket, reg.i_len); 1014 if (error) 1015 xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR); 1016 return error; 1017 } 1018 1019 struct xlog_cil_trans_hdr { 1020 struct xlog_op_header oph[2]; 1021 struct xfs_trans_header thdr; 1022 struct xfs_log_iovec lhdr[2]; 1023 }; 1024 1025 /* 1026 * Build a checkpoint transaction header to begin the journal transaction. We 1027 * need to account for the space used by the transaction header here as it is 1028 * not accounted for in xlog_write(). 1029 * 1030 * This is the only place we write a transaction header, so we also build the 1031 * log opheaders that indicate the start of a log transaction and wrap the 1032 * transaction header. We keep the start record in it's own log vector rather 1033 * than compacting them into a single region as this ends up making the logic 1034 * in xlog_write() for handling empty opheaders for start, commit and unmount 1035 * records much simpler. 1036 */ 1037 static void 1038 xlog_cil_build_trans_hdr( 1039 struct xfs_cil_ctx *ctx, 1040 struct xlog_cil_trans_hdr *hdr, 1041 struct xfs_log_vec *lvhdr, 1042 int num_iovecs) 1043 { 1044 struct xlog_ticket *tic = ctx->ticket; 1045 __be32 tid = cpu_to_be32(tic->t_tid); 1046 1047 memset(hdr, 0, sizeof(*hdr)); 1048 1049 /* Log start record */ 1050 hdr->oph[0].oh_tid = tid; 1051 hdr->oph[0].oh_clientid = XFS_TRANSACTION; 1052 hdr->oph[0].oh_flags = XLOG_START_TRANS; 1053 1054 /* log iovec region pointer */ 1055 hdr->lhdr[0].i_addr = &hdr->oph[0]; 1056 hdr->lhdr[0].i_len = sizeof(struct xlog_op_header); 1057 hdr->lhdr[0].i_type = XLOG_REG_TYPE_LRHEADER; 1058 1059 /* log opheader */ 1060 hdr->oph[1].oh_tid = tid; 1061 hdr->oph[1].oh_clientid = XFS_TRANSACTION; 1062 hdr->oph[1].oh_len = cpu_to_be32(sizeof(struct xfs_trans_header)); 1063 1064 /* transaction header in host byte order format */ 1065 hdr->thdr.th_magic = XFS_TRANS_HEADER_MAGIC; 1066 hdr->thdr.th_type = XFS_TRANS_CHECKPOINT; 1067 hdr->thdr.th_tid = tic->t_tid; 1068 hdr->thdr.th_num_items = num_iovecs; 1069 1070 /* log iovec region pointer */ 1071 hdr->lhdr[1].i_addr = &hdr->oph[1]; 1072 hdr->lhdr[1].i_len = sizeof(struct xlog_op_header) + 1073 sizeof(struct xfs_trans_header); 1074 hdr->lhdr[1].i_type = XLOG_REG_TYPE_TRANSHDR; 1075 1076 lvhdr->lv_niovecs = 2; 1077 lvhdr->lv_iovecp = &hdr->lhdr[0]; 1078 lvhdr->lv_bytes = hdr->lhdr[0].i_len + hdr->lhdr[1].i_len; 1079 lvhdr->lv_next = ctx->lv_chain; 1080 1081 tic->t_curr_res -= lvhdr->lv_bytes; 1082 } 1083 1084 /* 1085 * CIL item reordering compare function. We want to order in ascending ID order, 1086 * but we want to leave items with the same ID in the order they were added to 1087 * the list. This is important for operations like reflink where we log 4 order 1088 * dependent intents in a single transaction when we overwrite an existing 1089 * shared extent with a new shared extent. i.e. BUI(unmap), CUI(drop), 1090 * CUI (inc), BUI(remap)... 1091 */ 1092 static int 1093 xlog_cil_order_cmp( 1094 void *priv, 1095 const struct list_head *a, 1096 const struct list_head *b) 1097 { 1098 struct xfs_log_item *l1 = container_of(a, struct xfs_log_item, li_cil); 1099 struct xfs_log_item *l2 = container_of(b, struct xfs_log_item, li_cil); 1100 1101 return l1->li_order_id > l2->li_order_id; 1102 } 1103 1104 /* 1105 * Pull all the log vectors off the items in the CIL, and remove the items from 1106 * the CIL. We don't need the CIL lock here because it's only needed on the 1107 * transaction commit side which is currently locked out by the flush lock. 1108 * 1109 * If a log item is marked with a whiteout, we do not need to write it to the 1110 * journal and so we just move them to the whiteout list for the caller to 1111 * dispose of appropriately. 1112 */ 1113 static void 1114 xlog_cil_build_lv_chain( 1115 struct xfs_cil_ctx *ctx, 1116 struct list_head *whiteouts, 1117 uint32_t *num_iovecs, 1118 uint32_t *num_bytes) 1119 { 1120 struct xfs_log_vec *lv = NULL; 1121 1122 list_sort(NULL, &ctx->log_items, xlog_cil_order_cmp); 1123 1124 while (!list_empty(&ctx->log_items)) { 1125 struct xfs_log_item *item; 1126 1127 item = list_first_entry(&ctx->log_items, 1128 struct xfs_log_item, li_cil); 1129 1130 if (test_bit(XFS_LI_WHITEOUT, &item->li_flags)) { 1131 list_move(&item->li_cil, whiteouts); 1132 trace_xfs_cil_whiteout_skip(item); 1133 continue; 1134 } 1135 1136 list_del_init(&item->li_cil); 1137 item->li_order_id = 0; 1138 if (!ctx->lv_chain) 1139 ctx->lv_chain = item->li_lv; 1140 else 1141 lv->lv_next = item->li_lv; 1142 lv = item->li_lv; 1143 item->li_lv = NULL; 1144 *num_iovecs += lv->lv_niovecs; 1145 1146 /* we don't write ordered log vectors */ 1147 if (lv->lv_buf_len != XFS_LOG_VEC_ORDERED) 1148 *num_bytes += lv->lv_bytes; 1149 } 1150 } 1151 1152 static void 1153 xlog_cil_cleanup_whiteouts( 1154 struct list_head *whiteouts) 1155 { 1156 while (!list_empty(whiteouts)) { 1157 struct xfs_log_item *item = list_first_entry(whiteouts, 1158 struct xfs_log_item, li_cil); 1159 list_del_init(&item->li_cil); 1160 trace_xfs_cil_whiteout_unpin(item); 1161 item->li_ops->iop_unpin(item, 1); 1162 } 1163 } 1164 1165 /* 1166 * Push the Committed Item List to the log. 1167 * 1168 * If the current sequence is the same as xc_push_seq we need to do a flush. If 1169 * xc_push_seq is less than the current sequence, then it has already been 1170 * flushed and we don't need to do anything - the caller will wait for it to 1171 * complete if necessary. 1172 * 1173 * xc_push_seq is checked unlocked against the sequence number for a match. 1174 * Hence we can allow log forces to run racily and not issue pushes for the 1175 * same sequence twice. If we get a race between multiple pushes for the same 1176 * sequence they will block on the first one and then abort, hence avoiding 1177 * needless pushes. 1178 */ 1179 static void 1180 xlog_cil_push_work( 1181 struct work_struct *work) 1182 { 1183 struct xfs_cil_ctx *ctx = 1184 container_of(work, struct xfs_cil_ctx, push_work); 1185 struct xfs_cil *cil = ctx->cil; 1186 struct xlog *log = cil->xc_log; 1187 struct xfs_cil_ctx *new_ctx; 1188 int num_iovecs = 0; 1189 int num_bytes = 0; 1190 int error = 0; 1191 struct xlog_cil_trans_hdr thdr; 1192 struct xfs_log_vec lvhdr = { NULL }; 1193 xfs_csn_t push_seq; 1194 bool push_commit_stable; 1195 LIST_HEAD (whiteouts); 1196 1197 new_ctx = xlog_cil_ctx_alloc(); 1198 new_ctx->ticket = xlog_cil_ticket_alloc(log); 1199 1200 down_write(&cil->xc_ctx_lock); 1201 1202 spin_lock(&cil->xc_push_lock); 1203 push_seq = cil->xc_push_seq; 1204 ASSERT(push_seq <= ctx->sequence); 1205 push_commit_stable = cil->xc_push_commit_stable; 1206 cil->xc_push_commit_stable = false; 1207 1208 /* 1209 * As we are about to switch to a new, empty CIL context, we no longer 1210 * need to throttle tasks on CIL space overruns. Wake any waiters that 1211 * the hard push throttle may have caught so they can start committing 1212 * to the new context. The ctx->xc_push_lock provides the serialisation 1213 * necessary for safely using the lockless waitqueue_active() check in 1214 * this context. 1215 */ 1216 if (waitqueue_active(&cil->xc_push_wait)) 1217 wake_up_all(&cil->xc_push_wait); 1218 1219 xlog_cil_push_pcp_aggregate(cil, ctx); 1220 1221 /* 1222 * Check if we've anything to push. If there is nothing, then we don't 1223 * move on to a new sequence number and so we have to be able to push 1224 * this sequence again later. 1225 */ 1226 if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags)) { 1227 cil->xc_push_seq = 0; 1228 spin_unlock(&cil->xc_push_lock); 1229 goto out_skip; 1230 } 1231 1232 1233 /* check for a previously pushed sequence */ 1234 if (push_seq < ctx->sequence) { 1235 spin_unlock(&cil->xc_push_lock); 1236 goto out_skip; 1237 } 1238 1239 /* 1240 * We are now going to push this context, so add it to the committing 1241 * list before we do anything else. This ensures that anyone waiting on 1242 * this push can easily detect the difference between a "push in 1243 * progress" and "CIL is empty, nothing to do". 1244 * 1245 * IOWs, a wait loop can now check for: 1246 * the current sequence not being found on the committing list; 1247 * an empty CIL; and 1248 * an unchanged sequence number 1249 * to detect a push that had nothing to do and therefore does not need 1250 * waiting on. If the CIL is not empty, we get put on the committing 1251 * list before emptying the CIL and bumping the sequence number. Hence 1252 * an empty CIL and an unchanged sequence number means we jumped out 1253 * above after doing nothing. 1254 * 1255 * Hence the waiter will either find the commit sequence on the 1256 * committing list or the sequence number will be unchanged and the CIL 1257 * still dirty. In that latter case, the push has not yet started, and 1258 * so the waiter will have to continue trying to check the CIL 1259 * committing list until it is found. In extreme cases of delay, the 1260 * sequence may fully commit between the attempts the wait makes to wait 1261 * on the commit sequence. 1262 */ 1263 list_add(&ctx->committing, &cil->xc_committing); 1264 spin_unlock(&cil->xc_push_lock); 1265 1266 xlog_cil_build_lv_chain(ctx, &whiteouts, &num_iovecs, &num_bytes); 1267 1268 /* 1269 * Switch the contexts so we can drop the context lock and move out 1270 * of a shared context. We can't just go straight to the commit record, 1271 * though - we need to synchronise with previous and future commits so 1272 * that the commit records are correctly ordered in the log to ensure 1273 * that we process items during log IO completion in the correct order. 1274 * 1275 * For example, if we get an EFI in one checkpoint and the EFD in the 1276 * next (e.g. due to log forces), we do not want the checkpoint with 1277 * the EFD to be committed before the checkpoint with the EFI. Hence 1278 * we must strictly order the commit records of the checkpoints so 1279 * that: a) the checkpoint callbacks are attached to the iclogs in the 1280 * correct order; and b) the checkpoints are replayed in correct order 1281 * in log recovery. 1282 * 1283 * Hence we need to add this context to the committing context list so 1284 * that higher sequences will wait for us to write out a commit record 1285 * before they do. 1286 * 1287 * xfs_log_force_seq requires us to mirror the new sequence into the cil 1288 * structure atomically with the addition of this sequence to the 1289 * committing list. This also ensures that we can do unlocked checks 1290 * against the current sequence in log forces without risking 1291 * deferencing a freed context pointer. 1292 */ 1293 spin_lock(&cil->xc_push_lock); 1294 xlog_cil_ctx_switch(cil, new_ctx); 1295 spin_unlock(&cil->xc_push_lock); 1296 up_write(&cil->xc_ctx_lock); 1297 1298 /* 1299 * Build a checkpoint transaction header and write it to the log to 1300 * begin the transaction. We need to account for the space used by the 1301 * transaction header here as it is not accounted for in xlog_write(). 1302 */ 1303 xlog_cil_build_trans_hdr(ctx, &thdr, &lvhdr, num_iovecs); 1304 num_bytes += lvhdr.lv_bytes; 1305 1306 error = xlog_cil_write_chain(ctx, &lvhdr, num_bytes); 1307 if (error) 1308 goto out_abort_free_ticket; 1309 1310 error = xlog_cil_write_commit_record(ctx); 1311 if (error) 1312 goto out_abort_free_ticket; 1313 1314 xfs_log_ticket_ungrant(log, ctx->ticket); 1315 1316 /* 1317 * If the checkpoint spans multiple iclogs, wait for all previous iclogs 1318 * to complete before we submit the commit_iclog. We can't use state 1319 * checks for this - ACTIVE can be either a past completed iclog or a 1320 * future iclog being filled, while WANT_SYNC through SYNC_DONE can be a 1321 * past or future iclog awaiting IO or ordered IO completion to be run. 1322 * In the latter case, if it's a future iclog and we wait on it, the we 1323 * will hang because it won't get processed through to ic_force_wait 1324 * wakeup until this commit_iclog is written to disk. Hence we use the 1325 * iclog header lsn and compare it to the commit lsn to determine if we 1326 * need to wait on iclogs or not. 1327 */ 1328 spin_lock(&log->l_icloglock); 1329 if (ctx->start_lsn != ctx->commit_lsn) { 1330 xfs_lsn_t plsn; 1331 1332 plsn = be64_to_cpu(ctx->commit_iclog->ic_prev->ic_header.h_lsn); 1333 if (plsn && XFS_LSN_CMP(plsn, ctx->commit_lsn) < 0) { 1334 /* 1335 * Waiting on ic_force_wait orders the completion of 1336 * iclogs older than ic_prev. Hence we only need to wait 1337 * on the most recent older iclog here. 1338 */ 1339 xlog_wait_on_iclog(ctx->commit_iclog->ic_prev); 1340 spin_lock(&log->l_icloglock); 1341 } 1342 1343 /* 1344 * We need to issue a pre-flush so that the ordering for this 1345 * checkpoint is correctly preserved down to stable storage. 1346 */ 1347 ctx->commit_iclog->ic_flags |= XLOG_ICL_NEED_FLUSH; 1348 } 1349 1350 /* 1351 * The commit iclog must be written to stable storage to guarantee 1352 * journal IO vs metadata writeback IO is correctly ordered on stable 1353 * storage. 1354 * 1355 * If the push caller needs the commit to be immediately stable and the 1356 * commit_iclog is not yet marked as XLOG_STATE_WANT_SYNC to indicate it 1357 * will be written when released, switch it's state to WANT_SYNC right 1358 * now. 1359 */ 1360 ctx->commit_iclog->ic_flags |= XLOG_ICL_NEED_FUA; 1361 if (push_commit_stable && 1362 ctx->commit_iclog->ic_state == XLOG_STATE_ACTIVE) 1363 xlog_state_switch_iclogs(log, ctx->commit_iclog, 0); 1364 xlog_state_release_iclog(log, ctx->commit_iclog); 1365 1366 /* Not safe to reference ctx now! */ 1367 1368 spin_unlock(&log->l_icloglock); 1369 xlog_cil_cleanup_whiteouts(&whiteouts); 1370 return; 1371 1372 out_skip: 1373 up_write(&cil->xc_ctx_lock); 1374 xfs_log_ticket_put(new_ctx->ticket); 1375 kmem_free(new_ctx); 1376 return; 1377 1378 out_abort_free_ticket: 1379 xfs_log_ticket_ungrant(log, ctx->ticket); 1380 ASSERT(xlog_is_shutdown(log)); 1381 xlog_cil_cleanup_whiteouts(&whiteouts); 1382 if (!ctx->commit_iclog) { 1383 xlog_cil_committed(ctx); 1384 return; 1385 } 1386 spin_lock(&log->l_icloglock); 1387 xlog_state_release_iclog(log, ctx->commit_iclog); 1388 /* Not safe to reference ctx now! */ 1389 spin_unlock(&log->l_icloglock); 1390 } 1391 1392 /* 1393 * We need to push CIL every so often so we don't cache more than we can fit in 1394 * the log. The limit really is that a checkpoint can't be more than half the 1395 * log (the current checkpoint is not allowed to overwrite the previous 1396 * checkpoint), but commit latency and memory usage limit this to a smaller 1397 * size. 1398 */ 1399 static void 1400 xlog_cil_push_background( 1401 struct xlog *log) __releases(cil->xc_ctx_lock) 1402 { 1403 struct xfs_cil *cil = log->l_cilp; 1404 int space_used = atomic_read(&cil->xc_ctx->space_used); 1405 1406 /* 1407 * The cil won't be empty because we are called while holding the 1408 * context lock so whatever we added to the CIL will still be there. 1409 */ 1410 ASSERT(!test_bit(XLOG_CIL_EMPTY, &cil->xc_flags)); 1411 1412 /* 1413 * Don't do a background push if we haven't used up all the 1414 * space available yet. 1415 */ 1416 if (space_used < XLOG_CIL_SPACE_LIMIT(log)) { 1417 up_read(&cil->xc_ctx_lock); 1418 return; 1419 } 1420 1421 spin_lock(&cil->xc_push_lock); 1422 if (cil->xc_push_seq < cil->xc_current_sequence) { 1423 cil->xc_push_seq = cil->xc_current_sequence; 1424 queue_work(cil->xc_push_wq, &cil->xc_ctx->push_work); 1425 } 1426 1427 /* 1428 * Drop the context lock now, we can't hold that if we need to sleep 1429 * because we are over the blocking threshold. The push_lock is still 1430 * held, so blocking threshold sleep/wakeup is still correctly 1431 * serialised here. 1432 */ 1433 up_read(&cil->xc_ctx_lock); 1434 1435 /* 1436 * If we are well over the space limit, throttle the work that is being 1437 * done until the push work on this context has begun. Enforce the hard 1438 * throttle on all transaction commits once it has been activated, even 1439 * if the committing transactions have resulted in the space usage 1440 * dipping back down under the hard limit. 1441 * 1442 * The ctx->xc_push_lock provides the serialisation necessary for safely 1443 * calling xlog_cil_over_hard_limit() in this context. 1444 */ 1445 if (xlog_cil_over_hard_limit(log, space_used)) { 1446 trace_xfs_log_cil_wait(log, cil->xc_ctx->ticket); 1447 ASSERT(space_used < log->l_logsize); 1448 xlog_wait(&cil->xc_push_wait, &cil->xc_push_lock); 1449 return; 1450 } 1451 1452 spin_unlock(&cil->xc_push_lock); 1453 1454 } 1455 1456 /* 1457 * xlog_cil_push_now() is used to trigger an immediate CIL push to the sequence 1458 * number that is passed. When it returns, the work will be queued for 1459 * @push_seq, but it won't be completed. 1460 * 1461 * If the caller is performing a synchronous force, we will flush the workqueue 1462 * to get previously queued work moving to minimise the wait time they will 1463 * undergo waiting for all outstanding pushes to complete. The caller is 1464 * expected to do the required waiting for push_seq to complete. 1465 * 1466 * If the caller is performing an async push, we need to ensure that the 1467 * checkpoint is fully flushed out of the iclogs when we finish the push. If we 1468 * don't do this, then the commit record may remain sitting in memory in an 1469 * ACTIVE iclog. This then requires another full log force to push to disk, 1470 * which defeats the purpose of having an async, non-blocking CIL force 1471 * mechanism. Hence in this case we need to pass a flag to the push work to 1472 * indicate it needs to flush the commit record itself. 1473 */ 1474 static void 1475 xlog_cil_push_now( 1476 struct xlog *log, 1477 xfs_lsn_t push_seq, 1478 bool async) 1479 { 1480 struct xfs_cil *cil = log->l_cilp; 1481 1482 if (!cil) 1483 return; 1484 1485 ASSERT(push_seq && push_seq <= cil->xc_current_sequence); 1486 1487 /* start on any pending background push to minimise wait time on it */ 1488 if (!async) 1489 flush_workqueue(cil->xc_push_wq); 1490 1491 spin_lock(&cil->xc_push_lock); 1492 1493 /* 1494 * If this is an async flush request, we always need to set the 1495 * xc_push_commit_stable flag even if something else has already queued 1496 * a push. The flush caller is asking for the CIL to be on stable 1497 * storage when the next push completes, so regardless of who has queued 1498 * the push, the flush requires stable semantics from it. 1499 */ 1500 cil->xc_push_commit_stable = async; 1501 1502 /* 1503 * If the CIL is empty or we've already pushed the sequence then 1504 * there's no more work that we need to do. 1505 */ 1506 if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags) || 1507 push_seq <= cil->xc_push_seq) { 1508 spin_unlock(&cil->xc_push_lock); 1509 return; 1510 } 1511 1512 cil->xc_push_seq = push_seq; 1513 queue_work(cil->xc_push_wq, &cil->xc_ctx->push_work); 1514 spin_unlock(&cil->xc_push_lock); 1515 } 1516 1517 bool 1518 xlog_cil_empty( 1519 struct xlog *log) 1520 { 1521 struct xfs_cil *cil = log->l_cilp; 1522 bool empty = false; 1523 1524 spin_lock(&cil->xc_push_lock); 1525 if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags)) 1526 empty = true; 1527 spin_unlock(&cil->xc_push_lock); 1528 return empty; 1529 } 1530 1531 /* 1532 * If there are intent done items in this transaction and the related intent was 1533 * committed in the current (same) CIL checkpoint, we don't need to write either 1534 * the intent or intent done item to the journal as the change will be 1535 * journalled atomically within this checkpoint. As we cannot remove items from 1536 * the CIL here, mark the related intent with a whiteout so that the CIL push 1537 * can remove it rather than writing it to the journal. Then remove the intent 1538 * done item from the current transaction and release it so it doesn't get put 1539 * into the CIL at all. 1540 */ 1541 static uint32_t 1542 xlog_cil_process_intents( 1543 struct xfs_cil *cil, 1544 struct xfs_trans *tp) 1545 { 1546 struct xfs_log_item *lip, *ilip, *next; 1547 uint32_t len = 0; 1548 1549 list_for_each_entry_safe(lip, next, &tp->t_items, li_trans) { 1550 if (!(lip->li_ops->flags & XFS_ITEM_INTENT_DONE)) 1551 continue; 1552 1553 ilip = lip->li_ops->iop_intent(lip); 1554 if (!ilip || !xlog_item_in_current_chkpt(cil, ilip)) 1555 continue; 1556 set_bit(XFS_LI_WHITEOUT, &ilip->li_flags); 1557 trace_xfs_cil_whiteout_mark(ilip); 1558 len += ilip->li_lv->lv_bytes; 1559 kmem_free(ilip->li_lv); 1560 ilip->li_lv = NULL; 1561 1562 xfs_trans_del_item(lip); 1563 lip->li_ops->iop_release(lip); 1564 } 1565 return len; 1566 } 1567 1568 /* 1569 * Commit a transaction with the given vector to the Committed Item List. 1570 * 1571 * To do this, we need to format the item, pin it in memory if required and 1572 * account for the space used by the transaction. Once we have done that we 1573 * need to release the unused reservation for the transaction, attach the 1574 * transaction to the checkpoint context so we carry the busy extents through 1575 * to checkpoint completion, and then unlock all the items in the transaction. 1576 * 1577 * Called with the context lock already held in read mode to lock out 1578 * background commit, returns without it held once background commits are 1579 * allowed again. 1580 */ 1581 void 1582 xlog_cil_commit( 1583 struct xlog *log, 1584 struct xfs_trans *tp, 1585 xfs_csn_t *commit_seq, 1586 bool regrant) 1587 { 1588 struct xfs_cil *cil = log->l_cilp; 1589 struct xfs_log_item *lip, *next; 1590 uint32_t released_space = 0; 1591 1592 /* 1593 * Do all necessary memory allocation before we lock the CIL. 1594 * This ensures the allocation does not deadlock with a CIL 1595 * push in memory reclaim (e.g. from kswapd). 1596 */ 1597 xlog_cil_alloc_shadow_bufs(log, tp); 1598 1599 /* lock out background commit */ 1600 down_read(&cil->xc_ctx_lock); 1601 1602 if (tp->t_flags & XFS_TRANS_HAS_INTENT_DONE) 1603 released_space = xlog_cil_process_intents(cil, tp); 1604 1605 xlog_cil_insert_items(log, tp, released_space); 1606 1607 if (regrant && !xlog_is_shutdown(log)) 1608 xfs_log_ticket_regrant(log, tp->t_ticket); 1609 else 1610 xfs_log_ticket_ungrant(log, tp->t_ticket); 1611 tp->t_ticket = NULL; 1612 xfs_trans_unreserve_and_mod_sb(tp); 1613 1614 /* 1615 * Once all the items of the transaction have been copied to the CIL, 1616 * the items can be unlocked and possibly freed. 1617 * 1618 * This needs to be done before we drop the CIL context lock because we 1619 * have to update state in the log items and unlock them before they go 1620 * to disk. If we don't, then the CIL checkpoint can race with us and 1621 * we can run checkpoint completion before we've updated and unlocked 1622 * the log items. This affects (at least) processing of stale buffers, 1623 * inodes and EFIs. 1624 */ 1625 trace_xfs_trans_commit_items(tp, _RET_IP_); 1626 list_for_each_entry_safe(lip, next, &tp->t_items, li_trans) { 1627 xfs_trans_del_item(lip); 1628 if (lip->li_ops->iop_committing) 1629 lip->li_ops->iop_committing(lip, cil->xc_ctx->sequence); 1630 } 1631 if (commit_seq) 1632 *commit_seq = cil->xc_ctx->sequence; 1633 1634 /* xlog_cil_push_background() releases cil->xc_ctx_lock */ 1635 xlog_cil_push_background(log); 1636 } 1637 1638 /* 1639 * Flush the CIL to stable storage but don't wait for it to complete. This 1640 * requires the CIL push to ensure the commit record for the push hits the disk, 1641 * but otherwise is no different to a push done from a log force. 1642 */ 1643 void 1644 xlog_cil_flush( 1645 struct xlog *log) 1646 { 1647 xfs_csn_t seq = log->l_cilp->xc_current_sequence; 1648 1649 trace_xfs_log_force(log->l_mp, seq, _RET_IP_); 1650 xlog_cil_push_now(log, seq, true); 1651 1652 /* 1653 * If the CIL is empty, make sure that any previous checkpoint that may 1654 * still be in an active iclog is pushed to stable storage. 1655 */ 1656 if (test_bit(XLOG_CIL_EMPTY, &log->l_cilp->xc_flags)) 1657 xfs_log_force(log->l_mp, 0); 1658 } 1659 1660 /* 1661 * Conditionally push the CIL based on the sequence passed in. 1662 * 1663 * We only need to push if we haven't already pushed the sequence number given. 1664 * Hence the only time we will trigger a push here is if the push sequence is 1665 * the same as the current context. 1666 * 1667 * We return the current commit lsn to allow the callers to determine if a 1668 * iclog flush is necessary following this call. 1669 */ 1670 xfs_lsn_t 1671 xlog_cil_force_seq( 1672 struct xlog *log, 1673 xfs_csn_t sequence) 1674 { 1675 struct xfs_cil *cil = log->l_cilp; 1676 struct xfs_cil_ctx *ctx; 1677 xfs_lsn_t commit_lsn = NULLCOMMITLSN; 1678 1679 ASSERT(sequence <= cil->xc_current_sequence); 1680 1681 if (!sequence) 1682 sequence = cil->xc_current_sequence; 1683 trace_xfs_log_force(log->l_mp, sequence, _RET_IP_); 1684 1685 /* 1686 * check to see if we need to force out the current context. 1687 * xlog_cil_push() handles racing pushes for the same sequence, 1688 * so no need to deal with it here. 1689 */ 1690 restart: 1691 xlog_cil_push_now(log, sequence, false); 1692 1693 /* 1694 * See if we can find a previous sequence still committing. 1695 * We need to wait for all previous sequence commits to complete 1696 * before allowing the force of push_seq to go ahead. Hence block 1697 * on commits for those as well. 1698 */ 1699 spin_lock(&cil->xc_push_lock); 1700 list_for_each_entry(ctx, &cil->xc_committing, committing) { 1701 /* 1702 * Avoid getting stuck in this loop because we were woken by the 1703 * shutdown, but then went back to sleep once already in the 1704 * shutdown state. 1705 */ 1706 if (xlog_is_shutdown(log)) 1707 goto out_shutdown; 1708 if (ctx->sequence > sequence) 1709 continue; 1710 if (!ctx->commit_lsn) { 1711 /* 1712 * It is still being pushed! Wait for the push to 1713 * complete, then start again from the beginning. 1714 */ 1715 XFS_STATS_INC(log->l_mp, xs_log_force_sleep); 1716 xlog_wait(&cil->xc_commit_wait, &cil->xc_push_lock); 1717 goto restart; 1718 } 1719 if (ctx->sequence != sequence) 1720 continue; 1721 /* found it! */ 1722 commit_lsn = ctx->commit_lsn; 1723 } 1724 1725 /* 1726 * The call to xlog_cil_push_now() executes the push in the background. 1727 * Hence by the time we have got here it our sequence may not have been 1728 * pushed yet. This is true if the current sequence still matches the 1729 * push sequence after the above wait loop and the CIL still contains 1730 * dirty objects. This is guaranteed by the push code first adding the 1731 * context to the committing list before emptying the CIL. 1732 * 1733 * Hence if we don't find the context in the committing list and the 1734 * current sequence number is unchanged then the CIL contents are 1735 * significant. If the CIL is empty, if means there was nothing to push 1736 * and that means there is nothing to wait for. If the CIL is not empty, 1737 * it means we haven't yet started the push, because if it had started 1738 * we would have found the context on the committing list. 1739 */ 1740 if (sequence == cil->xc_current_sequence && 1741 !test_bit(XLOG_CIL_EMPTY, &cil->xc_flags)) { 1742 spin_unlock(&cil->xc_push_lock); 1743 goto restart; 1744 } 1745 1746 spin_unlock(&cil->xc_push_lock); 1747 return commit_lsn; 1748 1749 /* 1750 * We detected a shutdown in progress. We need to trigger the log force 1751 * to pass through it's iclog state machine error handling, even though 1752 * we are already in a shutdown state. Hence we can't return 1753 * NULLCOMMITLSN here as that has special meaning to log forces (i.e. 1754 * LSN is already stable), so we return a zero LSN instead. 1755 */ 1756 out_shutdown: 1757 spin_unlock(&cil->xc_push_lock); 1758 return 0; 1759 } 1760 1761 /* 1762 * Move dead percpu state to the relevant CIL context structures. 1763 * 1764 * We have to lock the CIL context here to ensure that nothing is modifying 1765 * the percpu state, either addition or removal. Both of these are done under 1766 * the CIL context lock, so grabbing that exclusively here will ensure we can 1767 * safely drain the cilpcp for the CPU that is dying. 1768 */ 1769 void 1770 xlog_cil_pcp_dead( 1771 struct xlog *log, 1772 unsigned int cpu) 1773 { 1774 struct xfs_cil *cil = log->l_cilp; 1775 struct xlog_cil_pcp *cilpcp = per_cpu_ptr(cil->xc_pcp, cpu); 1776 struct xfs_cil_ctx *ctx; 1777 1778 down_write(&cil->xc_ctx_lock); 1779 ctx = cil->xc_ctx; 1780 if (ctx->ticket) 1781 ctx->ticket->t_curr_res += cilpcp->space_reserved; 1782 cilpcp->space_reserved = 0; 1783 1784 if (!list_empty(&cilpcp->log_items)) 1785 list_splice_init(&cilpcp->log_items, &ctx->log_items); 1786 if (!list_empty(&cilpcp->busy_extents)) 1787 list_splice_init(&cilpcp->busy_extents, &ctx->busy_extents); 1788 atomic_add(cilpcp->space_used, &ctx->space_used); 1789 cilpcp->space_used = 0; 1790 up_write(&cil->xc_ctx_lock); 1791 } 1792 1793 /* 1794 * Perform initial CIL structure initialisation. 1795 */ 1796 int 1797 xlog_cil_init( 1798 struct xlog *log) 1799 { 1800 struct xfs_cil *cil; 1801 struct xfs_cil_ctx *ctx; 1802 struct xlog_cil_pcp *cilpcp; 1803 int cpu; 1804 1805 cil = kmem_zalloc(sizeof(*cil), KM_MAYFAIL); 1806 if (!cil) 1807 return -ENOMEM; 1808 /* 1809 * Limit the CIL pipeline depth to 4 concurrent works to bound the 1810 * concurrency the log spinlocks will be exposed to. 1811 */ 1812 cil->xc_push_wq = alloc_workqueue("xfs-cil/%s", 1813 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_UNBOUND), 1814 4, log->l_mp->m_super->s_id); 1815 if (!cil->xc_push_wq) 1816 goto out_destroy_cil; 1817 1818 cil->xc_log = log; 1819 cil->xc_pcp = alloc_percpu(struct xlog_cil_pcp); 1820 if (!cil->xc_pcp) 1821 goto out_destroy_wq; 1822 1823 for_each_possible_cpu(cpu) { 1824 cilpcp = per_cpu_ptr(cil->xc_pcp, cpu); 1825 INIT_LIST_HEAD(&cilpcp->busy_extents); 1826 INIT_LIST_HEAD(&cilpcp->log_items); 1827 } 1828 1829 INIT_LIST_HEAD(&cil->xc_committing); 1830 spin_lock_init(&cil->xc_push_lock); 1831 init_waitqueue_head(&cil->xc_push_wait); 1832 init_rwsem(&cil->xc_ctx_lock); 1833 init_waitqueue_head(&cil->xc_start_wait); 1834 init_waitqueue_head(&cil->xc_commit_wait); 1835 log->l_cilp = cil; 1836 1837 ctx = xlog_cil_ctx_alloc(); 1838 xlog_cil_ctx_switch(cil, ctx); 1839 return 0; 1840 1841 out_destroy_wq: 1842 destroy_workqueue(cil->xc_push_wq); 1843 out_destroy_cil: 1844 kmem_free(cil); 1845 return -ENOMEM; 1846 } 1847 1848 void 1849 xlog_cil_destroy( 1850 struct xlog *log) 1851 { 1852 struct xfs_cil *cil = log->l_cilp; 1853 1854 if (cil->xc_ctx) { 1855 if (cil->xc_ctx->ticket) 1856 xfs_log_ticket_put(cil->xc_ctx->ticket); 1857 kmem_free(cil->xc_ctx); 1858 } 1859 1860 ASSERT(test_bit(XLOG_CIL_EMPTY, &cil->xc_flags)); 1861 free_percpu(cil->xc_pcp); 1862 destroy_workqueue(cil->xc_push_wq); 1863 kmem_free(cil); 1864 } 1865 1866