1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2010 Red Hat, Inc. All Rights Reserved. 4 */ 5 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_format.h" 9 #include "xfs_log_format.h" 10 #include "xfs_shared.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_mount.h" 13 #include "xfs_extent_busy.h" 14 #include "xfs_trans.h" 15 #include "xfs_trans_priv.h" 16 #include "xfs_log.h" 17 #include "xfs_log_priv.h" 18 #include "xfs_trace.h" 19 20 struct workqueue_struct *xfs_discard_wq; 21 22 /* 23 * Allocate a new ticket. Failing to get a new ticket makes it really hard to 24 * recover, so we don't allow failure here. Also, we allocate in a context that 25 * we don't want to be issuing transactions from, so we need to tell the 26 * allocation code this as well. 27 * 28 * We don't reserve any space for the ticket - we are going to steal whatever 29 * space we require from transactions as they commit. To ensure we reserve all 30 * the space required, we need to set the current reservation of the ticket to 31 * zero so that we know to steal the initial transaction overhead from the 32 * first transaction commit. 33 */ 34 static struct xlog_ticket * 35 xlog_cil_ticket_alloc( 36 struct xlog *log) 37 { 38 struct xlog_ticket *tic; 39 40 tic = xlog_ticket_alloc(log, 0, 1, 0); 41 42 /* 43 * set the current reservation to zero so we know to steal the basic 44 * transaction overhead reservation from the first transaction commit. 45 */ 46 tic->t_curr_res = 0; 47 tic->t_iclog_hdrs = 0; 48 return tic; 49 } 50 51 static inline void 52 xlog_cil_set_iclog_hdr_count(struct xfs_cil *cil) 53 { 54 struct xlog *log = cil->xc_log; 55 56 atomic_set(&cil->xc_iclog_hdrs, 57 (XLOG_CIL_BLOCKING_SPACE_LIMIT(log) / 58 (log->l_iclog_size - log->l_iclog_hsize))); 59 } 60 61 /* 62 * Check if the current log item was first committed in this sequence. 63 * We can't rely on just the log item being in the CIL, we have to check 64 * the recorded commit sequence number. 65 * 66 * Note: for this to be used in a non-racy manner, it has to be called with 67 * CIL flushing locked out. As a result, it should only be used during the 68 * transaction commit process when deciding what to format into the item. 69 */ 70 static bool 71 xlog_item_in_current_chkpt( 72 struct xfs_cil *cil, 73 struct xfs_log_item *lip) 74 { 75 if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags)) 76 return false; 77 78 /* 79 * li_seq is written on the first commit of a log item to record the 80 * first checkpoint it is written to. Hence if it is different to the 81 * current sequence, we're in a new checkpoint. 82 */ 83 return lip->li_seq == READ_ONCE(cil->xc_current_sequence); 84 } 85 86 bool 87 xfs_log_item_in_current_chkpt( 88 struct xfs_log_item *lip) 89 { 90 return xlog_item_in_current_chkpt(lip->li_log->l_cilp, lip); 91 } 92 93 /* 94 * Unavoidable forward declaration - xlog_cil_push_work() calls 95 * xlog_cil_ctx_alloc() itself. 96 */ 97 static void xlog_cil_push_work(struct work_struct *work); 98 99 static struct xfs_cil_ctx * 100 xlog_cil_ctx_alloc(void) 101 { 102 struct xfs_cil_ctx *ctx; 103 104 ctx = kmem_zalloc(sizeof(*ctx), KM_NOFS); 105 INIT_LIST_HEAD(&ctx->committing); 106 INIT_LIST_HEAD(&ctx->busy_extents); 107 INIT_LIST_HEAD(&ctx->log_items); 108 INIT_LIST_HEAD(&ctx->lv_chain); 109 INIT_WORK(&ctx->push_work, xlog_cil_push_work); 110 return ctx; 111 } 112 113 /* 114 * Aggregate the CIL per cpu structures into global counts, lists, etc and 115 * clear the percpu state ready for the next context to use. This is called 116 * from the push code with the context lock held exclusively, hence nothing else 117 * will be accessing or modifying the per-cpu counters. 118 */ 119 static void 120 xlog_cil_push_pcp_aggregate( 121 struct xfs_cil *cil, 122 struct xfs_cil_ctx *ctx) 123 { 124 struct xlog_cil_pcp *cilpcp; 125 int cpu; 126 127 for_each_cpu(cpu, &ctx->cil_pcpmask) { 128 cilpcp = per_cpu_ptr(cil->xc_pcp, cpu); 129 130 ctx->ticket->t_curr_res += cilpcp->space_reserved; 131 cilpcp->space_reserved = 0; 132 133 if (!list_empty(&cilpcp->busy_extents)) { 134 list_splice_init(&cilpcp->busy_extents, 135 &ctx->busy_extents); 136 } 137 if (!list_empty(&cilpcp->log_items)) 138 list_splice_init(&cilpcp->log_items, &ctx->log_items); 139 140 /* 141 * We're in the middle of switching cil contexts. Reset the 142 * counter we use to detect when the current context is nearing 143 * full. 144 */ 145 cilpcp->space_used = 0; 146 } 147 } 148 149 /* 150 * Aggregate the CIL per-cpu space used counters into the global atomic value. 151 * This is called when the per-cpu counter aggregation will first pass the soft 152 * limit threshold so we can switch to atomic counter aggregation for accurate 153 * detection of hard limit traversal. 154 */ 155 static void 156 xlog_cil_insert_pcp_aggregate( 157 struct xfs_cil *cil, 158 struct xfs_cil_ctx *ctx) 159 { 160 struct xlog_cil_pcp *cilpcp; 161 int cpu; 162 int count = 0; 163 164 /* Trigger atomic updates then aggregate only for the first caller */ 165 if (!test_and_clear_bit(XLOG_CIL_PCP_SPACE, &cil->xc_flags)) 166 return; 167 168 /* 169 * We can race with other cpus setting cil_pcpmask. However, we've 170 * atomically cleared PCP_SPACE which forces other threads to add to 171 * the global space used count. cil_pcpmask is a superset of cilpcp 172 * structures that could have a nonzero space_used. 173 */ 174 for_each_cpu(cpu, &ctx->cil_pcpmask) { 175 int old, prev; 176 177 cilpcp = per_cpu_ptr(cil->xc_pcp, cpu); 178 do { 179 old = cilpcp->space_used; 180 prev = cmpxchg(&cilpcp->space_used, old, 0); 181 } while (old != prev); 182 count += old; 183 } 184 atomic_add(count, &ctx->space_used); 185 } 186 187 static void 188 xlog_cil_ctx_switch( 189 struct xfs_cil *cil, 190 struct xfs_cil_ctx *ctx) 191 { 192 xlog_cil_set_iclog_hdr_count(cil); 193 set_bit(XLOG_CIL_EMPTY, &cil->xc_flags); 194 set_bit(XLOG_CIL_PCP_SPACE, &cil->xc_flags); 195 ctx->sequence = ++cil->xc_current_sequence; 196 ctx->cil = cil; 197 cil->xc_ctx = ctx; 198 } 199 200 /* 201 * After the first stage of log recovery is done, we know where the head and 202 * tail of the log are. We need this log initialisation done before we can 203 * initialise the first CIL checkpoint context. 204 * 205 * Here we allocate a log ticket to track space usage during a CIL push. This 206 * ticket is passed to xlog_write() directly so that we don't slowly leak log 207 * space by failing to account for space used by log headers and additional 208 * region headers for split regions. 209 */ 210 void 211 xlog_cil_init_post_recovery( 212 struct xlog *log) 213 { 214 log->l_cilp->xc_ctx->ticket = xlog_cil_ticket_alloc(log); 215 log->l_cilp->xc_ctx->sequence = 1; 216 xlog_cil_set_iclog_hdr_count(log->l_cilp); 217 } 218 219 static inline int 220 xlog_cil_iovec_space( 221 uint niovecs) 222 { 223 return round_up((sizeof(struct xfs_log_vec) + 224 niovecs * sizeof(struct xfs_log_iovec)), 225 sizeof(uint64_t)); 226 } 227 228 /* 229 * Allocate or pin log vector buffers for CIL insertion. 230 * 231 * The CIL currently uses disposable buffers for copying a snapshot of the 232 * modified items into the log during a push. The biggest problem with this is 233 * the requirement to allocate the disposable buffer during the commit if: 234 * a) does not exist; or 235 * b) it is too small 236 * 237 * If we do this allocation within xlog_cil_insert_format_items(), it is done 238 * under the xc_ctx_lock, which means that a CIL push cannot occur during 239 * the memory allocation. This means that we have a potential deadlock situation 240 * under low memory conditions when we have lots of dirty metadata pinned in 241 * the CIL and we need a CIL commit to occur to free memory. 242 * 243 * To avoid this, we need to move the memory allocation outside the 244 * xc_ctx_lock, but because the log vector buffers are disposable, that opens 245 * up a TOCTOU race condition w.r.t. the CIL committing and removing the log 246 * vector buffers between the check and the formatting of the item into the 247 * log vector buffer within the xc_ctx_lock. 248 * 249 * Because the log vector buffer needs to be unchanged during the CIL push 250 * process, we cannot share the buffer between the transaction commit (which 251 * modifies the buffer) and the CIL push context that is writing the changes 252 * into the log. This means skipping preallocation of buffer space is 253 * unreliable, but we most definitely do not want to be allocating and freeing 254 * buffers unnecessarily during commits when overwrites can be done safely. 255 * 256 * The simplest solution to this problem is to allocate a shadow buffer when a 257 * log item is committed for the second time, and then to only use this buffer 258 * if necessary. The buffer can remain attached to the log item until such time 259 * it is needed, and this is the buffer that is reallocated to match the size of 260 * the incoming modification. Then during the formatting of the item we can swap 261 * the active buffer with the new one if we can't reuse the existing buffer. We 262 * don't free the old buffer as it may be reused on the next modification if 263 * it's size is right, otherwise we'll free and reallocate it at that point. 264 * 265 * This function builds a vector for the changes in each log item in the 266 * transaction. It then works out the length of the buffer needed for each log 267 * item, allocates them and attaches the vector to the log item in preparation 268 * for the formatting step which occurs under the xc_ctx_lock. 269 * 270 * While this means the memory footprint goes up, it avoids the repeated 271 * alloc/free pattern that repeated modifications of an item would otherwise 272 * cause, and hence minimises the CPU overhead of such behaviour. 273 */ 274 static void 275 xlog_cil_alloc_shadow_bufs( 276 struct xlog *log, 277 struct xfs_trans *tp) 278 { 279 struct xfs_log_item *lip; 280 281 list_for_each_entry(lip, &tp->t_items, li_trans) { 282 struct xfs_log_vec *lv; 283 int niovecs = 0; 284 int nbytes = 0; 285 int buf_size; 286 bool ordered = false; 287 288 /* Skip items which aren't dirty in this transaction. */ 289 if (!test_bit(XFS_LI_DIRTY, &lip->li_flags)) 290 continue; 291 292 /* get number of vecs and size of data to be stored */ 293 lip->li_ops->iop_size(lip, &niovecs, &nbytes); 294 295 /* 296 * Ordered items need to be tracked but we do not wish to write 297 * them. We need a logvec to track the object, but we do not 298 * need an iovec or buffer to be allocated for copying data. 299 */ 300 if (niovecs == XFS_LOG_VEC_ORDERED) { 301 ordered = true; 302 niovecs = 0; 303 nbytes = 0; 304 } 305 306 /* 307 * We 64-bit align the length of each iovec so that the start of 308 * the next one is naturally aligned. We'll need to account for 309 * that slack space here. 310 * 311 * We also add the xlog_op_header to each region when 312 * formatting, but that's not accounted to the size of the item 313 * at this point. Hence we'll need an addition number of bytes 314 * for each vector to hold an opheader. 315 * 316 * Then round nbytes up to 64-bit alignment so that the initial 317 * buffer alignment is easy to calculate and verify. 318 */ 319 nbytes += niovecs * 320 (sizeof(uint64_t) + sizeof(struct xlog_op_header)); 321 nbytes = round_up(nbytes, sizeof(uint64_t)); 322 323 /* 324 * The data buffer needs to start 64-bit aligned, so round up 325 * that space to ensure we can align it appropriately and not 326 * overrun the buffer. 327 */ 328 buf_size = nbytes + xlog_cil_iovec_space(niovecs); 329 330 /* 331 * if we have no shadow buffer, or it is too small, we need to 332 * reallocate it. 333 */ 334 if (!lip->li_lv_shadow || 335 buf_size > lip->li_lv_shadow->lv_size) { 336 /* 337 * We free and allocate here as a realloc would copy 338 * unnecessary data. We don't use kvzalloc() for the 339 * same reason - we don't need to zero the data area in 340 * the buffer, only the log vector header and the iovec 341 * storage. 342 */ 343 kmem_free(lip->li_lv_shadow); 344 lv = xlog_kvmalloc(buf_size); 345 346 memset(lv, 0, xlog_cil_iovec_space(niovecs)); 347 348 INIT_LIST_HEAD(&lv->lv_list); 349 lv->lv_item = lip; 350 lv->lv_size = buf_size; 351 if (ordered) 352 lv->lv_buf_len = XFS_LOG_VEC_ORDERED; 353 else 354 lv->lv_iovecp = (struct xfs_log_iovec *)&lv[1]; 355 lip->li_lv_shadow = lv; 356 } else { 357 /* same or smaller, optimise common overwrite case */ 358 lv = lip->li_lv_shadow; 359 if (ordered) 360 lv->lv_buf_len = XFS_LOG_VEC_ORDERED; 361 else 362 lv->lv_buf_len = 0; 363 lv->lv_bytes = 0; 364 } 365 366 /* Ensure the lv is set up according to ->iop_size */ 367 lv->lv_niovecs = niovecs; 368 369 /* The allocated data region lies beyond the iovec region */ 370 lv->lv_buf = (char *)lv + xlog_cil_iovec_space(niovecs); 371 } 372 373 } 374 375 /* 376 * Prepare the log item for insertion into the CIL. Calculate the difference in 377 * log space it will consume, and if it is a new item pin it as well. 378 */ 379 STATIC void 380 xfs_cil_prepare_item( 381 struct xlog *log, 382 struct xfs_log_vec *lv, 383 struct xfs_log_vec *old_lv, 384 int *diff_len) 385 { 386 /* Account for the new LV being passed in */ 387 if (lv->lv_buf_len != XFS_LOG_VEC_ORDERED) 388 *diff_len += lv->lv_bytes; 389 390 /* 391 * If there is no old LV, this is the first time we've seen the item in 392 * this CIL context and so we need to pin it. If we are replacing the 393 * old_lv, then remove the space it accounts for and make it the shadow 394 * buffer for later freeing. In both cases we are now switching to the 395 * shadow buffer, so update the pointer to it appropriately. 396 */ 397 if (!old_lv) { 398 if (lv->lv_item->li_ops->iop_pin) 399 lv->lv_item->li_ops->iop_pin(lv->lv_item); 400 lv->lv_item->li_lv_shadow = NULL; 401 } else if (old_lv != lv) { 402 ASSERT(lv->lv_buf_len != XFS_LOG_VEC_ORDERED); 403 404 *diff_len -= old_lv->lv_bytes; 405 lv->lv_item->li_lv_shadow = old_lv; 406 } 407 408 /* attach new log vector to log item */ 409 lv->lv_item->li_lv = lv; 410 411 /* 412 * If this is the first time the item is being committed to the 413 * CIL, store the sequence number on the log item so we can 414 * tell in future commits whether this is the first checkpoint 415 * the item is being committed into. 416 */ 417 if (!lv->lv_item->li_seq) 418 lv->lv_item->li_seq = log->l_cilp->xc_ctx->sequence; 419 } 420 421 /* 422 * Format log item into a flat buffers 423 * 424 * For delayed logging, we need to hold a formatted buffer containing all the 425 * changes on the log item. This enables us to relog the item in memory and 426 * write it out asynchronously without needing to relock the object that was 427 * modified at the time it gets written into the iclog. 428 * 429 * This function takes the prepared log vectors attached to each log item, and 430 * formats the changes into the log vector buffer. The buffer it uses is 431 * dependent on the current state of the vector in the CIL - the shadow lv is 432 * guaranteed to be large enough for the current modification, but we will only 433 * use that if we can't reuse the existing lv. If we can't reuse the existing 434 * lv, then simple swap it out for the shadow lv. We don't free it - that is 435 * done lazily either by th enext modification or the freeing of the log item. 436 * 437 * We don't set up region headers during this process; we simply copy the 438 * regions into the flat buffer. We can do this because we still have to do a 439 * formatting step to write the regions into the iclog buffer. Writing the 440 * ophdrs during the iclog write means that we can support splitting large 441 * regions across iclog boundares without needing a change in the format of the 442 * item/region encapsulation. 443 * 444 * Hence what we need to do now is change the rewrite the vector array to point 445 * to the copied region inside the buffer we just allocated. This allows us to 446 * format the regions into the iclog as though they are being formatted 447 * directly out of the objects themselves. 448 */ 449 static void 450 xlog_cil_insert_format_items( 451 struct xlog *log, 452 struct xfs_trans *tp, 453 int *diff_len) 454 { 455 struct xfs_log_item *lip; 456 457 /* Bail out if we didn't find a log item. */ 458 if (list_empty(&tp->t_items)) { 459 ASSERT(0); 460 return; 461 } 462 463 list_for_each_entry(lip, &tp->t_items, li_trans) { 464 struct xfs_log_vec *lv; 465 struct xfs_log_vec *old_lv = NULL; 466 struct xfs_log_vec *shadow; 467 bool ordered = false; 468 469 /* Skip items which aren't dirty in this transaction. */ 470 if (!test_bit(XFS_LI_DIRTY, &lip->li_flags)) 471 continue; 472 473 /* 474 * The formatting size information is already attached to 475 * the shadow lv on the log item. 476 */ 477 shadow = lip->li_lv_shadow; 478 if (shadow->lv_buf_len == XFS_LOG_VEC_ORDERED) 479 ordered = true; 480 481 /* Skip items that do not have any vectors for writing */ 482 if (!shadow->lv_niovecs && !ordered) 483 continue; 484 485 /* compare to existing item size */ 486 old_lv = lip->li_lv; 487 if (lip->li_lv && shadow->lv_size <= lip->li_lv->lv_size) { 488 /* same or smaller, optimise common overwrite case */ 489 lv = lip->li_lv; 490 491 if (ordered) 492 goto insert; 493 494 /* 495 * set the item up as though it is a new insertion so 496 * that the space reservation accounting is correct. 497 */ 498 *diff_len -= lv->lv_bytes; 499 500 /* Ensure the lv is set up according to ->iop_size */ 501 lv->lv_niovecs = shadow->lv_niovecs; 502 503 /* reset the lv buffer information for new formatting */ 504 lv->lv_buf_len = 0; 505 lv->lv_bytes = 0; 506 lv->lv_buf = (char *)lv + 507 xlog_cil_iovec_space(lv->lv_niovecs); 508 } else { 509 /* switch to shadow buffer! */ 510 lv = shadow; 511 lv->lv_item = lip; 512 if (ordered) { 513 /* track as an ordered logvec */ 514 ASSERT(lip->li_lv == NULL); 515 goto insert; 516 } 517 } 518 519 ASSERT(IS_ALIGNED((unsigned long)lv->lv_buf, sizeof(uint64_t))); 520 lip->li_ops->iop_format(lip, lv); 521 insert: 522 xfs_cil_prepare_item(log, lv, old_lv, diff_len); 523 } 524 } 525 526 /* 527 * The use of lockless waitqueue_active() requires that the caller has 528 * serialised itself against the wakeup call in xlog_cil_push_work(). That 529 * can be done by either holding the push lock or the context lock. 530 */ 531 static inline bool 532 xlog_cil_over_hard_limit( 533 struct xlog *log, 534 int32_t space_used) 535 { 536 if (waitqueue_active(&log->l_cilp->xc_push_wait)) 537 return true; 538 if (space_used >= XLOG_CIL_BLOCKING_SPACE_LIMIT(log)) 539 return true; 540 return false; 541 } 542 543 /* 544 * Insert the log items into the CIL and calculate the difference in space 545 * consumed by the item. Add the space to the checkpoint ticket and calculate 546 * if the change requires additional log metadata. If it does, take that space 547 * as well. Remove the amount of space we added to the checkpoint ticket from 548 * the current transaction ticket so that the accounting works out correctly. 549 */ 550 static void 551 xlog_cil_insert_items( 552 struct xlog *log, 553 struct xfs_trans *tp, 554 uint32_t released_space) 555 { 556 struct xfs_cil *cil = log->l_cilp; 557 struct xfs_cil_ctx *ctx = cil->xc_ctx; 558 struct xfs_log_item *lip; 559 int len = 0; 560 int iovhdr_res = 0, split_res = 0, ctx_res = 0; 561 int space_used; 562 int order; 563 unsigned int cpu_nr; 564 struct xlog_cil_pcp *cilpcp; 565 566 ASSERT(tp); 567 568 /* 569 * We can do this safely because the context can't checkpoint until we 570 * are done so it doesn't matter exactly how we update the CIL. 571 */ 572 xlog_cil_insert_format_items(log, tp, &len); 573 574 /* 575 * Subtract the space released by intent cancelation from the space we 576 * consumed so that we remove it from the CIL space and add it back to 577 * the current transaction reservation context. 578 */ 579 len -= released_space; 580 581 /* 582 * Grab the per-cpu pointer for the CIL before we start any accounting. 583 * That ensures that we are running with pre-emption disabled and so we 584 * can't be scheduled away between split sample/update operations that 585 * are done without outside locking to serialise them. 586 */ 587 cpu_nr = get_cpu(); 588 cilpcp = this_cpu_ptr(cil->xc_pcp); 589 590 /* Tell the future push that there was work added by this CPU. */ 591 if (!cpumask_test_cpu(cpu_nr, &ctx->cil_pcpmask)) 592 cpumask_test_and_set_cpu(cpu_nr, &ctx->cil_pcpmask); 593 594 /* 595 * We need to take the CIL checkpoint unit reservation on the first 596 * commit into the CIL. Test the XLOG_CIL_EMPTY bit first so we don't 597 * unnecessarily do an atomic op in the fast path here. We can clear the 598 * XLOG_CIL_EMPTY bit as we are under the xc_ctx_lock here and that 599 * needs to be held exclusively to reset the XLOG_CIL_EMPTY bit. 600 */ 601 if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags) && 602 test_and_clear_bit(XLOG_CIL_EMPTY, &cil->xc_flags)) 603 ctx_res = ctx->ticket->t_unit_res; 604 605 /* 606 * Check if we need to steal iclog headers. atomic_read() is not a 607 * locked atomic operation, so we can check the value before we do any 608 * real atomic ops in the fast path. If we've already taken the CIL unit 609 * reservation from this commit, we've already got one iclog header 610 * space reserved so we have to account for that otherwise we risk 611 * overrunning the reservation on this ticket. 612 * 613 * If the CIL is already at the hard limit, we might need more header 614 * space that originally reserved. So steal more header space from every 615 * commit that occurs once we are over the hard limit to ensure the CIL 616 * push won't run out of reservation space. 617 * 618 * This can steal more than we need, but that's OK. 619 * 620 * The cil->xc_ctx_lock provides the serialisation necessary for safely 621 * calling xlog_cil_over_hard_limit() in this context. 622 */ 623 space_used = atomic_read(&ctx->space_used) + cilpcp->space_used + len; 624 if (atomic_read(&cil->xc_iclog_hdrs) > 0 || 625 xlog_cil_over_hard_limit(log, space_used)) { 626 split_res = log->l_iclog_hsize + 627 sizeof(struct xlog_op_header); 628 if (ctx_res) 629 ctx_res += split_res * (tp->t_ticket->t_iclog_hdrs - 1); 630 else 631 ctx_res = split_res * tp->t_ticket->t_iclog_hdrs; 632 atomic_sub(tp->t_ticket->t_iclog_hdrs, &cil->xc_iclog_hdrs); 633 } 634 cilpcp->space_reserved += ctx_res; 635 636 /* 637 * Accurately account when over the soft limit, otherwise fold the 638 * percpu count into the global count if over the per-cpu threshold. 639 */ 640 if (!test_bit(XLOG_CIL_PCP_SPACE, &cil->xc_flags)) { 641 atomic_add(len, &ctx->space_used); 642 } else if (cilpcp->space_used + len > 643 (XLOG_CIL_SPACE_LIMIT(log) / num_online_cpus())) { 644 space_used = atomic_add_return(cilpcp->space_used + len, 645 &ctx->space_used); 646 cilpcp->space_used = 0; 647 648 /* 649 * If we just transitioned over the soft limit, we need to 650 * transition to the global atomic counter. 651 */ 652 if (space_used >= XLOG_CIL_SPACE_LIMIT(log)) 653 xlog_cil_insert_pcp_aggregate(cil, ctx); 654 } else { 655 cilpcp->space_used += len; 656 } 657 /* attach the transaction to the CIL if it has any busy extents */ 658 if (!list_empty(&tp->t_busy)) 659 list_splice_init(&tp->t_busy, &cilpcp->busy_extents); 660 661 /* 662 * Now update the order of everything modified in the transaction 663 * and insert items into the CIL if they aren't already there. 664 * We do this here so we only need to take the CIL lock once during 665 * the transaction commit. 666 */ 667 order = atomic_inc_return(&ctx->order_id); 668 list_for_each_entry(lip, &tp->t_items, li_trans) { 669 /* Skip items which aren't dirty in this transaction. */ 670 if (!test_bit(XFS_LI_DIRTY, &lip->li_flags)) 671 continue; 672 673 lip->li_order_id = order; 674 if (!list_empty(&lip->li_cil)) 675 continue; 676 list_add_tail(&lip->li_cil, &cilpcp->log_items); 677 } 678 put_cpu(); 679 680 /* 681 * If we've overrun the reservation, dump the tx details before we move 682 * the log items. Shutdown is imminent... 683 */ 684 tp->t_ticket->t_curr_res -= ctx_res + len; 685 if (WARN_ON(tp->t_ticket->t_curr_res < 0)) { 686 xfs_warn(log->l_mp, "Transaction log reservation overrun:"); 687 xfs_warn(log->l_mp, 688 " log items: %d bytes (iov hdrs: %d bytes)", 689 len, iovhdr_res); 690 xfs_warn(log->l_mp, " split region headers: %d bytes", 691 split_res); 692 xfs_warn(log->l_mp, " ctx ticket: %d bytes", ctx_res); 693 xlog_print_trans(tp); 694 xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR); 695 } 696 } 697 698 static void 699 xlog_cil_free_logvec( 700 struct list_head *lv_chain) 701 { 702 struct xfs_log_vec *lv; 703 704 while (!list_empty(lv_chain)) { 705 lv = list_first_entry(lv_chain, struct xfs_log_vec, lv_list); 706 list_del_init(&lv->lv_list); 707 kmem_free(lv); 708 } 709 } 710 711 static void 712 xlog_discard_endio_work( 713 struct work_struct *work) 714 { 715 struct xfs_cil_ctx *ctx = 716 container_of(work, struct xfs_cil_ctx, discard_endio_work); 717 struct xfs_mount *mp = ctx->cil->xc_log->l_mp; 718 719 xfs_extent_busy_clear(mp, &ctx->busy_extents, false); 720 kmem_free(ctx); 721 } 722 723 /* 724 * Queue up the actual completion to a thread to avoid IRQ-safe locking for 725 * pagb_lock. Note that we need a unbounded workqueue, otherwise we might 726 * get the execution delayed up to 30 seconds for weird reasons. 727 */ 728 static void 729 xlog_discard_endio( 730 struct bio *bio) 731 { 732 struct xfs_cil_ctx *ctx = bio->bi_private; 733 734 INIT_WORK(&ctx->discard_endio_work, xlog_discard_endio_work); 735 queue_work(xfs_discard_wq, &ctx->discard_endio_work); 736 bio_put(bio); 737 } 738 739 static void 740 xlog_discard_busy_extents( 741 struct xfs_mount *mp, 742 struct xfs_cil_ctx *ctx) 743 { 744 struct list_head *list = &ctx->busy_extents; 745 struct xfs_extent_busy *busyp; 746 struct bio *bio = NULL; 747 struct blk_plug plug; 748 int error = 0; 749 750 ASSERT(xfs_has_discard(mp)); 751 752 blk_start_plug(&plug); 753 list_for_each_entry(busyp, list, list) { 754 trace_xfs_discard_extent(mp, busyp->agno, busyp->bno, 755 busyp->length); 756 757 error = __blkdev_issue_discard(mp->m_ddev_targp->bt_bdev, 758 XFS_AGB_TO_DADDR(mp, busyp->agno, busyp->bno), 759 XFS_FSB_TO_BB(mp, busyp->length), 760 GFP_NOFS, &bio); 761 if (error && error != -EOPNOTSUPP) { 762 xfs_info(mp, 763 "discard failed for extent [0x%llx,%u], error %d", 764 (unsigned long long)busyp->bno, 765 busyp->length, 766 error); 767 break; 768 } 769 } 770 771 if (bio) { 772 bio->bi_private = ctx; 773 bio->bi_end_io = xlog_discard_endio; 774 submit_bio(bio); 775 } else { 776 xlog_discard_endio_work(&ctx->discard_endio_work); 777 } 778 blk_finish_plug(&plug); 779 } 780 781 /* 782 * Mark all items committed and clear busy extents. We free the log vector 783 * chains in a separate pass so that we unpin the log items as quickly as 784 * possible. 785 */ 786 static void 787 xlog_cil_committed( 788 struct xfs_cil_ctx *ctx) 789 { 790 struct xfs_mount *mp = ctx->cil->xc_log->l_mp; 791 bool abort = xlog_is_shutdown(ctx->cil->xc_log); 792 793 /* 794 * If the I/O failed, we're aborting the commit and already shutdown. 795 * Wake any commit waiters before aborting the log items so we don't 796 * block async log pushers on callbacks. Async log pushers explicitly do 797 * not wait on log force completion because they may be holding locks 798 * required to unpin items. 799 */ 800 if (abort) { 801 spin_lock(&ctx->cil->xc_push_lock); 802 wake_up_all(&ctx->cil->xc_start_wait); 803 wake_up_all(&ctx->cil->xc_commit_wait); 804 spin_unlock(&ctx->cil->xc_push_lock); 805 } 806 807 xfs_trans_committed_bulk(ctx->cil->xc_log->l_ailp, &ctx->lv_chain, 808 ctx->start_lsn, abort); 809 810 xfs_extent_busy_sort(&ctx->busy_extents); 811 xfs_extent_busy_clear(mp, &ctx->busy_extents, 812 xfs_has_discard(mp) && !abort); 813 814 spin_lock(&ctx->cil->xc_push_lock); 815 list_del(&ctx->committing); 816 spin_unlock(&ctx->cil->xc_push_lock); 817 818 xlog_cil_free_logvec(&ctx->lv_chain); 819 820 if (!list_empty(&ctx->busy_extents)) 821 xlog_discard_busy_extents(mp, ctx); 822 else 823 kmem_free(ctx); 824 } 825 826 void 827 xlog_cil_process_committed( 828 struct list_head *list) 829 { 830 struct xfs_cil_ctx *ctx; 831 832 while ((ctx = list_first_entry_or_null(list, 833 struct xfs_cil_ctx, iclog_entry))) { 834 list_del(&ctx->iclog_entry); 835 xlog_cil_committed(ctx); 836 } 837 } 838 839 /* 840 * Record the LSN of the iclog we were just granted space to start writing into. 841 * If the context doesn't have a start_lsn recorded, then this iclog will 842 * contain the start record for the checkpoint. Otherwise this write contains 843 * the commit record for the checkpoint. 844 */ 845 void 846 xlog_cil_set_ctx_write_state( 847 struct xfs_cil_ctx *ctx, 848 struct xlog_in_core *iclog) 849 { 850 struct xfs_cil *cil = ctx->cil; 851 xfs_lsn_t lsn = be64_to_cpu(iclog->ic_header.h_lsn); 852 853 ASSERT(!ctx->commit_lsn); 854 if (!ctx->start_lsn) { 855 spin_lock(&cil->xc_push_lock); 856 /* 857 * The LSN we need to pass to the log items on transaction 858 * commit is the LSN reported by the first log vector write, not 859 * the commit lsn. If we use the commit record lsn then we can 860 * move the grant write head beyond the tail LSN and overwrite 861 * it. 862 */ 863 ctx->start_lsn = lsn; 864 wake_up_all(&cil->xc_start_wait); 865 spin_unlock(&cil->xc_push_lock); 866 867 /* 868 * Make sure the metadata we are about to overwrite in the log 869 * has been flushed to stable storage before this iclog is 870 * issued. 871 */ 872 spin_lock(&cil->xc_log->l_icloglock); 873 iclog->ic_flags |= XLOG_ICL_NEED_FLUSH; 874 spin_unlock(&cil->xc_log->l_icloglock); 875 return; 876 } 877 878 /* 879 * Take a reference to the iclog for the context so that we still hold 880 * it when xlog_write is done and has released it. This means the 881 * context controls when the iclog is released for IO. 882 */ 883 atomic_inc(&iclog->ic_refcnt); 884 885 /* 886 * xlog_state_get_iclog_space() guarantees there is enough space in the 887 * iclog for an entire commit record, so we can attach the context 888 * callbacks now. This needs to be done before we make the commit_lsn 889 * visible to waiters so that checkpoints with commit records in the 890 * same iclog order their IO completion callbacks in the same order that 891 * the commit records appear in the iclog. 892 */ 893 spin_lock(&cil->xc_log->l_icloglock); 894 list_add_tail(&ctx->iclog_entry, &iclog->ic_callbacks); 895 spin_unlock(&cil->xc_log->l_icloglock); 896 897 /* 898 * Now we can record the commit LSN and wake anyone waiting for this 899 * sequence to have the ordered commit record assigned to a physical 900 * location in the log. 901 */ 902 spin_lock(&cil->xc_push_lock); 903 ctx->commit_iclog = iclog; 904 ctx->commit_lsn = lsn; 905 wake_up_all(&cil->xc_commit_wait); 906 spin_unlock(&cil->xc_push_lock); 907 } 908 909 910 /* 911 * Ensure that the order of log writes follows checkpoint sequence order. This 912 * relies on the context LSN being zero until the log write has guaranteed the 913 * LSN that the log write will start at via xlog_state_get_iclog_space(). 914 */ 915 enum _record_type { 916 _START_RECORD, 917 _COMMIT_RECORD, 918 }; 919 920 static int 921 xlog_cil_order_write( 922 struct xfs_cil *cil, 923 xfs_csn_t sequence, 924 enum _record_type record) 925 { 926 struct xfs_cil_ctx *ctx; 927 928 restart: 929 spin_lock(&cil->xc_push_lock); 930 list_for_each_entry(ctx, &cil->xc_committing, committing) { 931 /* 932 * Avoid getting stuck in this loop because we were woken by the 933 * shutdown, but then went back to sleep once already in the 934 * shutdown state. 935 */ 936 if (xlog_is_shutdown(cil->xc_log)) { 937 spin_unlock(&cil->xc_push_lock); 938 return -EIO; 939 } 940 941 /* 942 * Higher sequences will wait for this one so skip them. 943 * Don't wait for our own sequence, either. 944 */ 945 if (ctx->sequence >= sequence) 946 continue; 947 948 /* Wait until the LSN for the record has been recorded. */ 949 switch (record) { 950 case _START_RECORD: 951 if (!ctx->start_lsn) { 952 xlog_wait(&cil->xc_start_wait, &cil->xc_push_lock); 953 goto restart; 954 } 955 break; 956 case _COMMIT_RECORD: 957 if (!ctx->commit_lsn) { 958 xlog_wait(&cil->xc_commit_wait, &cil->xc_push_lock); 959 goto restart; 960 } 961 break; 962 } 963 } 964 spin_unlock(&cil->xc_push_lock); 965 return 0; 966 } 967 968 /* 969 * Write out the log vector change now attached to the CIL context. This will 970 * write a start record that needs to be strictly ordered in ascending CIL 971 * sequence order so that log recovery will always use in-order start LSNs when 972 * replaying checkpoints. 973 */ 974 static int 975 xlog_cil_write_chain( 976 struct xfs_cil_ctx *ctx, 977 uint32_t chain_len) 978 { 979 struct xlog *log = ctx->cil->xc_log; 980 int error; 981 982 error = xlog_cil_order_write(ctx->cil, ctx->sequence, _START_RECORD); 983 if (error) 984 return error; 985 return xlog_write(log, ctx, &ctx->lv_chain, ctx->ticket, chain_len); 986 } 987 988 /* 989 * Write out the commit record of a checkpoint transaction to close off a 990 * running log write. These commit records are strictly ordered in ascending CIL 991 * sequence order so that log recovery will always replay the checkpoints in the 992 * correct order. 993 */ 994 static int 995 xlog_cil_write_commit_record( 996 struct xfs_cil_ctx *ctx) 997 { 998 struct xlog *log = ctx->cil->xc_log; 999 struct xlog_op_header ophdr = { 1000 .oh_clientid = XFS_TRANSACTION, 1001 .oh_tid = cpu_to_be32(ctx->ticket->t_tid), 1002 .oh_flags = XLOG_COMMIT_TRANS, 1003 }; 1004 struct xfs_log_iovec reg = { 1005 .i_addr = &ophdr, 1006 .i_len = sizeof(struct xlog_op_header), 1007 .i_type = XLOG_REG_TYPE_COMMIT, 1008 }; 1009 struct xfs_log_vec vec = { 1010 .lv_niovecs = 1, 1011 .lv_iovecp = ®, 1012 }; 1013 int error; 1014 LIST_HEAD(lv_chain); 1015 list_add(&vec.lv_list, &lv_chain); 1016 1017 if (xlog_is_shutdown(log)) 1018 return -EIO; 1019 1020 error = xlog_cil_order_write(ctx->cil, ctx->sequence, _COMMIT_RECORD); 1021 if (error) 1022 return error; 1023 1024 /* account for space used by record data */ 1025 ctx->ticket->t_curr_res -= reg.i_len; 1026 error = xlog_write(log, ctx, &lv_chain, ctx->ticket, reg.i_len); 1027 if (error) 1028 xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR); 1029 return error; 1030 } 1031 1032 struct xlog_cil_trans_hdr { 1033 struct xlog_op_header oph[2]; 1034 struct xfs_trans_header thdr; 1035 struct xfs_log_iovec lhdr[2]; 1036 }; 1037 1038 /* 1039 * Build a checkpoint transaction header to begin the journal transaction. We 1040 * need to account for the space used by the transaction header here as it is 1041 * not accounted for in xlog_write(). 1042 * 1043 * This is the only place we write a transaction header, so we also build the 1044 * log opheaders that indicate the start of a log transaction and wrap the 1045 * transaction header. We keep the start record in it's own log vector rather 1046 * than compacting them into a single region as this ends up making the logic 1047 * in xlog_write() for handling empty opheaders for start, commit and unmount 1048 * records much simpler. 1049 */ 1050 static void 1051 xlog_cil_build_trans_hdr( 1052 struct xfs_cil_ctx *ctx, 1053 struct xlog_cil_trans_hdr *hdr, 1054 struct xfs_log_vec *lvhdr, 1055 int num_iovecs) 1056 { 1057 struct xlog_ticket *tic = ctx->ticket; 1058 __be32 tid = cpu_to_be32(tic->t_tid); 1059 1060 memset(hdr, 0, sizeof(*hdr)); 1061 1062 /* Log start record */ 1063 hdr->oph[0].oh_tid = tid; 1064 hdr->oph[0].oh_clientid = XFS_TRANSACTION; 1065 hdr->oph[0].oh_flags = XLOG_START_TRANS; 1066 1067 /* log iovec region pointer */ 1068 hdr->lhdr[0].i_addr = &hdr->oph[0]; 1069 hdr->lhdr[0].i_len = sizeof(struct xlog_op_header); 1070 hdr->lhdr[0].i_type = XLOG_REG_TYPE_LRHEADER; 1071 1072 /* log opheader */ 1073 hdr->oph[1].oh_tid = tid; 1074 hdr->oph[1].oh_clientid = XFS_TRANSACTION; 1075 hdr->oph[1].oh_len = cpu_to_be32(sizeof(struct xfs_trans_header)); 1076 1077 /* transaction header in host byte order format */ 1078 hdr->thdr.th_magic = XFS_TRANS_HEADER_MAGIC; 1079 hdr->thdr.th_type = XFS_TRANS_CHECKPOINT; 1080 hdr->thdr.th_tid = tic->t_tid; 1081 hdr->thdr.th_num_items = num_iovecs; 1082 1083 /* log iovec region pointer */ 1084 hdr->lhdr[1].i_addr = &hdr->oph[1]; 1085 hdr->lhdr[1].i_len = sizeof(struct xlog_op_header) + 1086 sizeof(struct xfs_trans_header); 1087 hdr->lhdr[1].i_type = XLOG_REG_TYPE_TRANSHDR; 1088 1089 lvhdr->lv_niovecs = 2; 1090 lvhdr->lv_iovecp = &hdr->lhdr[0]; 1091 lvhdr->lv_bytes = hdr->lhdr[0].i_len + hdr->lhdr[1].i_len; 1092 1093 tic->t_curr_res -= lvhdr->lv_bytes; 1094 } 1095 1096 /* 1097 * CIL item reordering compare function. We want to order in ascending ID order, 1098 * but we want to leave items with the same ID in the order they were added to 1099 * the list. This is important for operations like reflink where we log 4 order 1100 * dependent intents in a single transaction when we overwrite an existing 1101 * shared extent with a new shared extent. i.e. BUI(unmap), CUI(drop), 1102 * CUI (inc), BUI(remap)... 1103 */ 1104 static int 1105 xlog_cil_order_cmp( 1106 void *priv, 1107 const struct list_head *a, 1108 const struct list_head *b) 1109 { 1110 struct xfs_log_vec *l1 = container_of(a, struct xfs_log_vec, lv_list); 1111 struct xfs_log_vec *l2 = container_of(b, struct xfs_log_vec, lv_list); 1112 1113 return l1->lv_order_id > l2->lv_order_id; 1114 } 1115 1116 /* 1117 * Pull all the log vectors off the items in the CIL, and remove the items from 1118 * the CIL. We don't need the CIL lock here because it's only needed on the 1119 * transaction commit side which is currently locked out by the flush lock. 1120 * 1121 * If a log item is marked with a whiteout, we do not need to write it to the 1122 * journal and so we just move them to the whiteout list for the caller to 1123 * dispose of appropriately. 1124 */ 1125 static void 1126 xlog_cil_build_lv_chain( 1127 struct xfs_cil_ctx *ctx, 1128 struct list_head *whiteouts, 1129 uint32_t *num_iovecs, 1130 uint32_t *num_bytes) 1131 { 1132 while (!list_empty(&ctx->log_items)) { 1133 struct xfs_log_item *item; 1134 struct xfs_log_vec *lv; 1135 1136 item = list_first_entry(&ctx->log_items, 1137 struct xfs_log_item, li_cil); 1138 1139 if (test_bit(XFS_LI_WHITEOUT, &item->li_flags)) { 1140 list_move(&item->li_cil, whiteouts); 1141 trace_xfs_cil_whiteout_skip(item); 1142 continue; 1143 } 1144 1145 lv = item->li_lv; 1146 lv->lv_order_id = item->li_order_id; 1147 1148 /* we don't write ordered log vectors */ 1149 if (lv->lv_buf_len != XFS_LOG_VEC_ORDERED) 1150 *num_bytes += lv->lv_bytes; 1151 *num_iovecs += lv->lv_niovecs; 1152 list_add_tail(&lv->lv_list, &ctx->lv_chain); 1153 1154 list_del_init(&item->li_cil); 1155 item->li_order_id = 0; 1156 item->li_lv = NULL; 1157 } 1158 } 1159 1160 static void 1161 xlog_cil_cleanup_whiteouts( 1162 struct list_head *whiteouts) 1163 { 1164 while (!list_empty(whiteouts)) { 1165 struct xfs_log_item *item = list_first_entry(whiteouts, 1166 struct xfs_log_item, li_cil); 1167 list_del_init(&item->li_cil); 1168 trace_xfs_cil_whiteout_unpin(item); 1169 item->li_ops->iop_unpin(item, 1); 1170 } 1171 } 1172 1173 /* 1174 * Push the Committed Item List to the log. 1175 * 1176 * If the current sequence is the same as xc_push_seq we need to do a flush. If 1177 * xc_push_seq is less than the current sequence, then it has already been 1178 * flushed and we don't need to do anything - the caller will wait for it to 1179 * complete if necessary. 1180 * 1181 * xc_push_seq is checked unlocked against the sequence number for a match. 1182 * Hence we can allow log forces to run racily and not issue pushes for the 1183 * same sequence twice. If we get a race between multiple pushes for the same 1184 * sequence they will block on the first one and then abort, hence avoiding 1185 * needless pushes. 1186 */ 1187 static void 1188 xlog_cil_push_work( 1189 struct work_struct *work) 1190 { 1191 struct xfs_cil_ctx *ctx = 1192 container_of(work, struct xfs_cil_ctx, push_work); 1193 struct xfs_cil *cil = ctx->cil; 1194 struct xlog *log = cil->xc_log; 1195 struct xfs_cil_ctx *new_ctx; 1196 int num_iovecs = 0; 1197 int num_bytes = 0; 1198 int error = 0; 1199 struct xlog_cil_trans_hdr thdr; 1200 struct xfs_log_vec lvhdr = {}; 1201 xfs_csn_t push_seq; 1202 bool push_commit_stable; 1203 LIST_HEAD (whiteouts); 1204 struct xlog_ticket *ticket; 1205 1206 new_ctx = xlog_cil_ctx_alloc(); 1207 new_ctx->ticket = xlog_cil_ticket_alloc(log); 1208 1209 down_write(&cil->xc_ctx_lock); 1210 1211 spin_lock(&cil->xc_push_lock); 1212 push_seq = cil->xc_push_seq; 1213 ASSERT(push_seq <= ctx->sequence); 1214 push_commit_stable = cil->xc_push_commit_stable; 1215 cil->xc_push_commit_stable = false; 1216 1217 /* 1218 * As we are about to switch to a new, empty CIL context, we no longer 1219 * need to throttle tasks on CIL space overruns. Wake any waiters that 1220 * the hard push throttle may have caught so they can start committing 1221 * to the new context. The ctx->xc_push_lock provides the serialisation 1222 * necessary for safely using the lockless waitqueue_active() check in 1223 * this context. 1224 */ 1225 if (waitqueue_active(&cil->xc_push_wait)) 1226 wake_up_all(&cil->xc_push_wait); 1227 1228 xlog_cil_push_pcp_aggregate(cil, ctx); 1229 1230 /* 1231 * Check if we've anything to push. If there is nothing, then we don't 1232 * move on to a new sequence number and so we have to be able to push 1233 * this sequence again later. 1234 */ 1235 if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags)) { 1236 cil->xc_push_seq = 0; 1237 spin_unlock(&cil->xc_push_lock); 1238 goto out_skip; 1239 } 1240 1241 1242 /* check for a previously pushed sequence */ 1243 if (push_seq < ctx->sequence) { 1244 spin_unlock(&cil->xc_push_lock); 1245 goto out_skip; 1246 } 1247 1248 /* 1249 * We are now going to push this context, so add it to the committing 1250 * list before we do anything else. This ensures that anyone waiting on 1251 * this push can easily detect the difference between a "push in 1252 * progress" and "CIL is empty, nothing to do". 1253 * 1254 * IOWs, a wait loop can now check for: 1255 * the current sequence not being found on the committing list; 1256 * an empty CIL; and 1257 * an unchanged sequence number 1258 * to detect a push that had nothing to do and therefore does not need 1259 * waiting on. If the CIL is not empty, we get put on the committing 1260 * list before emptying the CIL and bumping the sequence number. Hence 1261 * an empty CIL and an unchanged sequence number means we jumped out 1262 * above after doing nothing. 1263 * 1264 * Hence the waiter will either find the commit sequence on the 1265 * committing list or the sequence number will be unchanged and the CIL 1266 * still dirty. In that latter case, the push has not yet started, and 1267 * so the waiter will have to continue trying to check the CIL 1268 * committing list until it is found. In extreme cases of delay, the 1269 * sequence may fully commit between the attempts the wait makes to wait 1270 * on the commit sequence. 1271 */ 1272 list_add(&ctx->committing, &cil->xc_committing); 1273 spin_unlock(&cil->xc_push_lock); 1274 1275 xlog_cil_build_lv_chain(ctx, &whiteouts, &num_iovecs, &num_bytes); 1276 1277 /* 1278 * Switch the contexts so we can drop the context lock and move out 1279 * of a shared context. We can't just go straight to the commit record, 1280 * though - we need to synchronise with previous and future commits so 1281 * that the commit records are correctly ordered in the log to ensure 1282 * that we process items during log IO completion in the correct order. 1283 * 1284 * For example, if we get an EFI in one checkpoint and the EFD in the 1285 * next (e.g. due to log forces), we do not want the checkpoint with 1286 * the EFD to be committed before the checkpoint with the EFI. Hence 1287 * we must strictly order the commit records of the checkpoints so 1288 * that: a) the checkpoint callbacks are attached to the iclogs in the 1289 * correct order; and b) the checkpoints are replayed in correct order 1290 * in log recovery. 1291 * 1292 * Hence we need to add this context to the committing context list so 1293 * that higher sequences will wait for us to write out a commit record 1294 * before they do. 1295 * 1296 * xfs_log_force_seq requires us to mirror the new sequence into the cil 1297 * structure atomically with the addition of this sequence to the 1298 * committing list. This also ensures that we can do unlocked checks 1299 * against the current sequence in log forces without risking 1300 * deferencing a freed context pointer. 1301 */ 1302 spin_lock(&cil->xc_push_lock); 1303 xlog_cil_ctx_switch(cil, new_ctx); 1304 spin_unlock(&cil->xc_push_lock); 1305 up_write(&cil->xc_ctx_lock); 1306 1307 /* 1308 * Sort the log vector chain before we add the transaction headers. 1309 * This ensures we always have the transaction headers at the start 1310 * of the chain. 1311 */ 1312 list_sort(NULL, &ctx->lv_chain, xlog_cil_order_cmp); 1313 1314 /* 1315 * Build a checkpoint transaction header and write it to the log to 1316 * begin the transaction. We need to account for the space used by the 1317 * transaction header here as it is not accounted for in xlog_write(). 1318 * Add the lvhdr to the head of the lv chain we pass to xlog_write() so 1319 * it gets written into the iclog first. 1320 */ 1321 xlog_cil_build_trans_hdr(ctx, &thdr, &lvhdr, num_iovecs); 1322 num_bytes += lvhdr.lv_bytes; 1323 list_add(&lvhdr.lv_list, &ctx->lv_chain); 1324 1325 /* 1326 * Take the lvhdr back off the lv_chain immediately after calling 1327 * xlog_cil_write_chain() as it should not be passed to log IO 1328 * completion. 1329 */ 1330 error = xlog_cil_write_chain(ctx, num_bytes); 1331 list_del(&lvhdr.lv_list); 1332 if (error) 1333 goto out_abort_free_ticket; 1334 1335 error = xlog_cil_write_commit_record(ctx); 1336 if (error) 1337 goto out_abort_free_ticket; 1338 1339 /* 1340 * Grab the ticket from the ctx so we can ungrant it after releasing the 1341 * commit_iclog. The ctx may be freed by the time we return from 1342 * releasing the commit_iclog (i.e. checkpoint has been completed and 1343 * callback run) so we can't reference the ctx after the call to 1344 * xlog_state_release_iclog(). 1345 */ 1346 ticket = ctx->ticket; 1347 1348 /* 1349 * If the checkpoint spans multiple iclogs, wait for all previous iclogs 1350 * to complete before we submit the commit_iclog. We can't use state 1351 * checks for this - ACTIVE can be either a past completed iclog or a 1352 * future iclog being filled, while WANT_SYNC through SYNC_DONE can be a 1353 * past or future iclog awaiting IO or ordered IO completion to be run. 1354 * In the latter case, if it's a future iclog and we wait on it, the we 1355 * will hang because it won't get processed through to ic_force_wait 1356 * wakeup until this commit_iclog is written to disk. Hence we use the 1357 * iclog header lsn and compare it to the commit lsn to determine if we 1358 * need to wait on iclogs or not. 1359 */ 1360 spin_lock(&log->l_icloglock); 1361 if (ctx->start_lsn != ctx->commit_lsn) { 1362 xfs_lsn_t plsn; 1363 1364 plsn = be64_to_cpu(ctx->commit_iclog->ic_prev->ic_header.h_lsn); 1365 if (plsn && XFS_LSN_CMP(plsn, ctx->commit_lsn) < 0) { 1366 /* 1367 * Waiting on ic_force_wait orders the completion of 1368 * iclogs older than ic_prev. Hence we only need to wait 1369 * on the most recent older iclog here. 1370 */ 1371 xlog_wait_on_iclog(ctx->commit_iclog->ic_prev); 1372 spin_lock(&log->l_icloglock); 1373 } 1374 1375 /* 1376 * We need to issue a pre-flush so that the ordering for this 1377 * checkpoint is correctly preserved down to stable storage. 1378 */ 1379 ctx->commit_iclog->ic_flags |= XLOG_ICL_NEED_FLUSH; 1380 } 1381 1382 /* 1383 * The commit iclog must be written to stable storage to guarantee 1384 * journal IO vs metadata writeback IO is correctly ordered on stable 1385 * storage. 1386 * 1387 * If the push caller needs the commit to be immediately stable and the 1388 * commit_iclog is not yet marked as XLOG_STATE_WANT_SYNC to indicate it 1389 * will be written when released, switch it's state to WANT_SYNC right 1390 * now. 1391 */ 1392 ctx->commit_iclog->ic_flags |= XLOG_ICL_NEED_FUA; 1393 if (push_commit_stable && 1394 ctx->commit_iclog->ic_state == XLOG_STATE_ACTIVE) 1395 xlog_state_switch_iclogs(log, ctx->commit_iclog, 0); 1396 ticket = ctx->ticket; 1397 xlog_state_release_iclog(log, ctx->commit_iclog, ticket); 1398 1399 /* Not safe to reference ctx now! */ 1400 1401 spin_unlock(&log->l_icloglock); 1402 xlog_cil_cleanup_whiteouts(&whiteouts); 1403 xfs_log_ticket_ungrant(log, ticket); 1404 return; 1405 1406 out_skip: 1407 up_write(&cil->xc_ctx_lock); 1408 xfs_log_ticket_put(new_ctx->ticket); 1409 kmem_free(new_ctx); 1410 return; 1411 1412 out_abort_free_ticket: 1413 ASSERT(xlog_is_shutdown(log)); 1414 xlog_cil_cleanup_whiteouts(&whiteouts); 1415 if (!ctx->commit_iclog) { 1416 xfs_log_ticket_ungrant(log, ctx->ticket); 1417 xlog_cil_committed(ctx); 1418 return; 1419 } 1420 spin_lock(&log->l_icloglock); 1421 ticket = ctx->ticket; 1422 xlog_state_release_iclog(log, ctx->commit_iclog, ticket); 1423 /* Not safe to reference ctx now! */ 1424 spin_unlock(&log->l_icloglock); 1425 xfs_log_ticket_ungrant(log, ticket); 1426 } 1427 1428 /* 1429 * We need to push CIL every so often so we don't cache more than we can fit in 1430 * the log. The limit really is that a checkpoint can't be more than half the 1431 * log (the current checkpoint is not allowed to overwrite the previous 1432 * checkpoint), but commit latency and memory usage limit this to a smaller 1433 * size. 1434 */ 1435 static void 1436 xlog_cil_push_background( 1437 struct xlog *log) __releases(cil->xc_ctx_lock) 1438 { 1439 struct xfs_cil *cil = log->l_cilp; 1440 int space_used = atomic_read(&cil->xc_ctx->space_used); 1441 1442 /* 1443 * The cil won't be empty because we are called while holding the 1444 * context lock so whatever we added to the CIL will still be there. 1445 */ 1446 ASSERT(!test_bit(XLOG_CIL_EMPTY, &cil->xc_flags)); 1447 1448 /* 1449 * We are done if: 1450 * - we haven't used up all the space available yet; or 1451 * - we've already queued up a push; and 1452 * - we're not over the hard limit; and 1453 * - nothing has been over the hard limit. 1454 * 1455 * If so, we don't need to take the push lock as there's nothing to do. 1456 */ 1457 if (space_used < XLOG_CIL_SPACE_LIMIT(log) || 1458 (cil->xc_push_seq == cil->xc_current_sequence && 1459 space_used < XLOG_CIL_BLOCKING_SPACE_LIMIT(log) && 1460 !waitqueue_active(&cil->xc_push_wait))) { 1461 up_read(&cil->xc_ctx_lock); 1462 return; 1463 } 1464 1465 spin_lock(&cil->xc_push_lock); 1466 if (cil->xc_push_seq < cil->xc_current_sequence) { 1467 cil->xc_push_seq = cil->xc_current_sequence; 1468 queue_work(cil->xc_push_wq, &cil->xc_ctx->push_work); 1469 } 1470 1471 /* 1472 * Drop the context lock now, we can't hold that if we need to sleep 1473 * because we are over the blocking threshold. The push_lock is still 1474 * held, so blocking threshold sleep/wakeup is still correctly 1475 * serialised here. 1476 */ 1477 up_read(&cil->xc_ctx_lock); 1478 1479 /* 1480 * If we are well over the space limit, throttle the work that is being 1481 * done until the push work on this context has begun. Enforce the hard 1482 * throttle on all transaction commits once it has been activated, even 1483 * if the committing transactions have resulted in the space usage 1484 * dipping back down under the hard limit. 1485 * 1486 * The ctx->xc_push_lock provides the serialisation necessary for safely 1487 * calling xlog_cil_over_hard_limit() in this context. 1488 */ 1489 if (xlog_cil_over_hard_limit(log, space_used)) { 1490 trace_xfs_log_cil_wait(log, cil->xc_ctx->ticket); 1491 ASSERT(space_used < log->l_logsize); 1492 xlog_wait(&cil->xc_push_wait, &cil->xc_push_lock); 1493 return; 1494 } 1495 1496 spin_unlock(&cil->xc_push_lock); 1497 1498 } 1499 1500 /* 1501 * xlog_cil_push_now() is used to trigger an immediate CIL push to the sequence 1502 * number that is passed. When it returns, the work will be queued for 1503 * @push_seq, but it won't be completed. 1504 * 1505 * If the caller is performing a synchronous force, we will flush the workqueue 1506 * to get previously queued work moving to minimise the wait time they will 1507 * undergo waiting for all outstanding pushes to complete. The caller is 1508 * expected to do the required waiting for push_seq to complete. 1509 * 1510 * If the caller is performing an async push, we need to ensure that the 1511 * checkpoint is fully flushed out of the iclogs when we finish the push. If we 1512 * don't do this, then the commit record may remain sitting in memory in an 1513 * ACTIVE iclog. This then requires another full log force to push to disk, 1514 * which defeats the purpose of having an async, non-blocking CIL force 1515 * mechanism. Hence in this case we need to pass a flag to the push work to 1516 * indicate it needs to flush the commit record itself. 1517 */ 1518 static void 1519 xlog_cil_push_now( 1520 struct xlog *log, 1521 xfs_lsn_t push_seq, 1522 bool async) 1523 { 1524 struct xfs_cil *cil = log->l_cilp; 1525 1526 if (!cil) 1527 return; 1528 1529 ASSERT(push_seq && push_seq <= cil->xc_current_sequence); 1530 1531 /* start on any pending background push to minimise wait time on it */ 1532 if (!async) 1533 flush_workqueue(cil->xc_push_wq); 1534 1535 spin_lock(&cil->xc_push_lock); 1536 1537 /* 1538 * If this is an async flush request, we always need to set the 1539 * xc_push_commit_stable flag even if something else has already queued 1540 * a push. The flush caller is asking for the CIL to be on stable 1541 * storage when the next push completes, so regardless of who has queued 1542 * the push, the flush requires stable semantics from it. 1543 */ 1544 cil->xc_push_commit_stable = async; 1545 1546 /* 1547 * If the CIL is empty or we've already pushed the sequence then 1548 * there's no more work that we need to do. 1549 */ 1550 if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags) || 1551 push_seq <= cil->xc_push_seq) { 1552 spin_unlock(&cil->xc_push_lock); 1553 return; 1554 } 1555 1556 cil->xc_push_seq = push_seq; 1557 queue_work(cil->xc_push_wq, &cil->xc_ctx->push_work); 1558 spin_unlock(&cil->xc_push_lock); 1559 } 1560 1561 bool 1562 xlog_cil_empty( 1563 struct xlog *log) 1564 { 1565 struct xfs_cil *cil = log->l_cilp; 1566 bool empty = false; 1567 1568 spin_lock(&cil->xc_push_lock); 1569 if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags)) 1570 empty = true; 1571 spin_unlock(&cil->xc_push_lock); 1572 return empty; 1573 } 1574 1575 /* 1576 * If there are intent done items in this transaction and the related intent was 1577 * committed in the current (same) CIL checkpoint, we don't need to write either 1578 * the intent or intent done item to the journal as the change will be 1579 * journalled atomically within this checkpoint. As we cannot remove items from 1580 * the CIL here, mark the related intent with a whiteout so that the CIL push 1581 * can remove it rather than writing it to the journal. Then remove the intent 1582 * done item from the current transaction and release it so it doesn't get put 1583 * into the CIL at all. 1584 */ 1585 static uint32_t 1586 xlog_cil_process_intents( 1587 struct xfs_cil *cil, 1588 struct xfs_trans *tp) 1589 { 1590 struct xfs_log_item *lip, *ilip, *next; 1591 uint32_t len = 0; 1592 1593 list_for_each_entry_safe(lip, next, &tp->t_items, li_trans) { 1594 if (!(lip->li_ops->flags & XFS_ITEM_INTENT_DONE)) 1595 continue; 1596 1597 ilip = lip->li_ops->iop_intent(lip); 1598 if (!ilip || !xlog_item_in_current_chkpt(cil, ilip)) 1599 continue; 1600 set_bit(XFS_LI_WHITEOUT, &ilip->li_flags); 1601 trace_xfs_cil_whiteout_mark(ilip); 1602 len += ilip->li_lv->lv_bytes; 1603 kmem_free(ilip->li_lv); 1604 ilip->li_lv = NULL; 1605 1606 xfs_trans_del_item(lip); 1607 lip->li_ops->iop_release(lip); 1608 } 1609 return len; 1610 } 1611 1612 /* 1613 * Commit a transaction with the given vector to the Committed Item List. 1614 * 1615 * To do this, we need to format the item, pin it in memory if required and 1616 * account for the space used by the transaction. Once we have done that we 1617 * need to release the unused reservation for the transaction, attach the 1618 * transaction to the checkpoint context so we carry the busy extents through 1619 * to checkpoint completion, and then unlock all the items in the transaction. 1620 * 1621 * Called with the context lock already held in read mode to lock out 1622 * background commit, returns without it held once background commits are 1623 * allowed again. 1624 */ 1625 void 1626 xlog_cil_commit( 1627 struct xlog *log, 1628 struct xfs_trans *tp, 1629 xfs_csn_t *commit_seq, 1630 bool regrant) 1631 { 1632 struct xfs_cil *cil = log->l_cilp; 1633 struct xfs_log_item *lip, *next; 1634 uint32_t released_space = 0; 1635 1636 /* 1637 * Do all necessary memory allocation before we lock the CIL. 1638 * This ensures the allocation does not deadlock with a CIL 1639 * push in memory reclaim (e.g. from kswapd). 1640 */ 1641 xlog_cil_alloc_shadow_bufs(log, tp); 1642 1643 /* lock out background commit */ 1644 down_read(&cil->xc_ctx_lock); 1645 1646 if (tp->t_flags & XFS_TRANS_HAS_INTENT_DONE) 1647 released_space = xlog_cil_process_intents(cil, tp); 1648 1649 xlog_cil_insert_items(log, tp, released_space); 1650 1651 if (regrant && !xlog_is_shutdown(log)) 1652 xfs_log_ticket_regrant(log, tp->t_ticket); 1653 else 1654 xfs_log_ticket_ungrant(log, tp->t_ticket); 1655 tp->t_ticket = NULL; 1656 xfs_trans_unreserve_and_mod_sb(tp); 1657 1658 /* 1659 * Once all the items of the transaction have been copied to the CIL, 1660 * the items can be unlocked and possibly freed. 1661 * 1662 * This needs to be done before we drop the CIL context lock because we 1663 * have to update state in the log items and unlock them before they go 1664 * to disk. If we don't, then the CIL checkpoint can race with us and 1665 * we can run checkpoint completion before we've updated and unlocked 1666 * the log items. This affects (at least) processing of stale buffers, 1667 * inodes and EFIs. 1668 */ 1669 trace_xfs_trans_commit_items(tp, _RET_IP_); 1670 list_for_each_entry_safe(lip, next, &tp->t_items, li_trans) { 1671 xfs_trans_del_item(lip); 1672 if (lip->li_ops->iop_committing) 1673 lip->li_ops->iop_committing(lip, cil->xc_ctx->sequence); 1674 } 1675 if (commit_seq) 1676 *commit_seq = cil->xc_ctx->sequence; 1677 1678 /* xlog_cil_push_background() releases cil->xc_ctx_lock */ 1679 xlog_cil_push_background(log); 1680 } 1681 1682 /* 1683 * Flush the CIL to stable storage but don't wait for it to complete. This 1684 * requires the CIL push to ensure the commit record for the push hits the disk, 1685 * but otherwise is no different to a push done from a log force. 1686 */ 1687 void 1688 xlog_cil_flush( 1689 struct xlog *log) 1690 { 1691 xfs_csn_t seq = log->l_cilp->xc_current_sequence; 1692 1693 trace_xfs_log_force(log->l_mp, seq, _RET_IP_); 1694 xlog_cil_push_now(log, seq, true); 1695 1696 /* 1697 * If the CIL is empty, make sure that any previous checkpoint that may 1698 * still be in an active iclog is pushed to stable storage. 1699 */ 1700 if (test_bit(XLOG_CIL_EMPTY, &log->l_cilp->xc_flags)) 1701 xfs_log_force(log->l_mp, 0); 1702 } 1703 1704 /* 1705 * Conditionally push the CIL based on the sequence passed in. 1706 * 1707 * We only need to push if we haven't already pushed the sequence number given. 1708 * Hence the only time we will trigger a push here is if the push sequence is 1709 * the same as the current context. 1710 * 1711 * We return the current commit lsn to allow the callers to determine if a 1712 * iclog flush is necessary following this call. 1713 */ 1714 xfs_lsn_t 1715 xlog_cil_force_seq( 1716 struct xlog *log, 1717 xfs_csn_t sequence) 1718 { 1719 struct xfs_cil *cil = log->l_cilp; 1720 struct xfs_cil_ctx *ctx; 1721 xfs_lsn_t commit_lsn = NULLCOMMITLSN; 1722 1723 ASSERT(sequence <= cil->xc_current_sequence); 1724 1725 if (!sequence) 1726 sequence = cil->xc_current_sequence; 1727 trace_xfs_log_force(log->l_mp, sequence, _RET_IP_); 1728 1729 /* 1730 * check to see if we need to force out the current context. 1731 * xlog_cil_push() handles racing pushes for the same sequence, 1732 * so no need to deal with it here. 1733 */ 1734 restart: 1735 xlog_cil_push_now(log, sequence, false); 1736 1737 /* 1738 * See if we can find a previous sequence still committing. 1739 * We need to wait for all previous sequence commits to complete 1740 * before allowing the force of push_seq to go ahead. Hence block 1741 * on commits for those as well. 1742 */ 1743 spin_lock(&cil->xc_push_lock); 1744 list_for_each_entry(ctx, &cil->xc_committing, committing) { 1745 /* 1746 * Avoid getting stuck in this loop because we were woken by the 1747 * shutdown, but then went back to sleep once already in the 1748 * shutdown state. 1749 */ 1750 if (xlog_is_shutdown(log)) 1751 goto out_shutdown; 1752 if (ctx->sequence > sequence) 1753 continue; 1754 if (!ctx->commit_lsn) { 1755 /* 1756 * It is still being pushed! Wait for the push to 1757 * complete, then start again from the beginning. 1758 */ 1759 XFS_STATS_INC(log->l_mp, xs_log_force_sleep); 1760 xlog_wait(&cil->xc_commit_wait, &cil->xc_push_lock); 1761 goto restart; 1762 } 1763 if (ctx->sequence != sequence) 1764 continue; 1765 /* found it! */ 1766 commit_lsn = ctx->commit_lsn; 1767 } 1768 1769 /* 1770 * The call to xlog_cil_push_now() executes the push in the background. 1771 * Hence by the time we have got here it our sequence may not have been 1772 * pushed yet. This is true if the current sequence still matches the 1773 * push sequence after the above wait loop and the CIL still contains 1774 * dirty objects. This is guaranteed by the push code first adding the 1775 * context to the committing list before emptying the CIL. 1776 * 1777 * Hence if we don't find the context in the committing list and the 1778 * current sequence number is unchanged then the CIL contents are 1779 * significant. If the CIL is empty, if means there was nothing to push 1780 * and that means there is nothing to wait for. If the CIL is not empty, 1781 * it means we haven't yet started the push, because if it had started 1782 * we would have found the context on the committing list. 1783 */ 1784 if (sequence == cil->xc_current_sequence && 1785 !test_bit(XLOG_CIL_EMPTY, &cil->xc_flags)) { 1786 spin_unlock(&cil->xc_push_lock); 1787 goto restart; 1788 } 1789 1790 spin_unlock(&cil->xc_push_lock); 1791 return commit_lsn; 1792 1793 /* 1794 * We detected a shutdown in progress. We need to trigger the log force 1795 * to pass through it's iclog state machine error handling, even though 1796 * we are already in a shutdown state. Hence we can't return 1797 * NULLCOMMITLSN here as that has special meaning to log forces (i.e. 1798 * LSN is already stable), so we return a zero LSN instead. 1799 */ 1800 out_shutdown: 1801 spin_unlock(&cil->xc_push_lock); 1802 return 0; 1803 } 1804 1805 /* 1806 * Perform initial CIL structure initialisation. 1807 */ 1808 int 1809 xlog_cil_init( 1810 struct xlog *log) 1811 { 1812 struct xfs_cil *cil; 1813 struct xfs_cil_ctx *ctx; 1814 struct xlog_cil_pcp *cilpcp; 1815 int cpu; 1816 1817 cil = kmem_zalloc(sizeof(*cil), KM_MAYFAIL); 1818 if (!cil) 1819 return -ENOMEM; 1820 /* 1821 * Limit the CIL pipeline depth to 4 concurrent works to bound the 1822 * concurrency the log spinlocks will be exposed to. 1823 */ 1824 cil->xc_push_wq = alloc_workqueue("xfs-cil/%s", 1825 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_UNBOUND), 1826 4, log->l_mp->m_super->s_id); 1827 if (!cil->xc_push_wq) 1828 goto out_destroy_cil; 1829 1830 cil->xc_log = log; 1831 cil->xc_pcp = alloc_percpu(struct xlog_cil_pcp); 1832 if (!cil->xc_pcp) 1833 goto out_destroy_wq; 1834 1835 for_each_possible_cpu(cpu) { 1836 cilpcp = per_cpu_ptr(cil->xc_pcp, cpu); 1837 INIT_LIST_HEAD(&cilpcp->busy_extents); 1838 INIT_LIST_HEAD(&cilpcp->log_items); 1839 } 1840 1841 INIT_LIST_HEAD(&cil->xc_committing); 1842 spin_lock_init(&cil->xc_push_lock); 1843 init_waitqueue_head(&cil->xc_push_wait); 1844 init_rwsem(&cil->xc_ctx_lock); 1845 init_waitqueue_head(&cil->xc_start_wait); 1846 init_waitqueue_head(&cil->xc_commit_wait); 1847 log->l_cilp = cil; 1848 1849 ctx = xlog_cil_ctx_alloc(); 1850 xlog_cil_ctx_switch(cil, ctx); 1851 return 0; 1852 1853 out_destroy_wq: 1854 destroy_workqueue(cil->xc_push_wq); 1855 out_destroy_cil: 1856 kmem_free(cil); 1857 return -ENOMEM; 1858 } 1859 1860 void 1861 xlog_cil_destroy( 1862 struct xlog *log) 1863 { 1864 struct xfs_cil *cil = log->l_cilp; 1865 1866 if (cil->xc_ctx) { 1867 if (cil->xc_ctx->ticket) 1868 xfs_log_ticket_put(cil->xc_ctx->ticket); 1869 kmem_free(cil->xc_ctx); 1870 } 1871 1872 ASSERT(test_bit(XLOG_CIL_EMPTY, &cil->xc_flags)); 1873 free_percpu(cil->xc_pcp); 1874 destroy_workqueue(cil->xc_push_wq); 1875 kmem_free(cil); 1876 } 1877 1878