1 /* 2 * Copyright (c) 2010 Red Hat, Inc. All Rights Reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License as 6 * published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it would be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * GNU General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public License 14 * along with this program; if not, write the Free Software Foundation, 15 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 16 */ 17 18 #include "xfs.h" 19 #include "xfs_fs.h" 20 #include "xfs_format.h" 21 #include "xfs_log_format.h" 22 #include "xfs_shared.h" 23 #include "xfs_trans_resv.h" 24 #include "xfs_mount.h" 25 #include "xfs_error.h" 26 #include "xfs_alloc.h" 27 #include "xfs_extent_busy.h" 28 #include "xfs_discard.h" 29 #include "xfs_trans.h" 30 #include "xfs_trans_priv.h" 31 #include "xfs_log.h" 32 #include "xfs_log_priv.h" 33 #include "xfs_trace.h" 34 35 struct workqueue_struct *xfs_discard_wq; 36 37 /* 38 * Allocate a new ticket. Failing to get a new ticket makes it really hard to 39 * recover, so we don't allow failure here. Also, we allocate in a context that 40 * we don't want to be issuing transactions from, so we need to tell the 41 * allocation code this as well. 42 * 43 * We don't reserve any space for the ticket - we are going to steal whatever 44 * space we require from transactions as they commit. To ensure we reserve all 45 * the space required, we need to set the current reservation of the ticket to 46 * zero so that we know to steal the initial transaction overhead from the 47 * first transaction commit. 48 */ 49 static struct xlog_ticket * 50 xlog_cil_ticket_alloc( 51 struct xlog *log) 52 { 53 struct xlog_ticket *tic; 54 55 tic = xlog_ticket_alloc(log, 0, 1, XFS_TRANSACTION, 0, 56 KM_SLEEP|KM_NOFS); 57 58 /* 59 * set the current reservation to zero so we know to steal the basic 60 * transaction overhead reservation from the first transaction commit. 61 */ 62 tic->t_curr_res = 0; 63 return tic; 64 } 65 66 /* 67 * After the first stage of log recovery is done, we know where the head and 68 * tail of the log are. We need this log initialisation done before we can 69 * initialise the first CIL checkpoint context. 70 * 71 * Here we allocate a log ticket to track space usage during a CIL push. This 72 * ticket is passed to xlog_write() directly so that we don't slowly leak log 73 * space by failing to account for space used by log headers and additional 74 * region headers for split regions. 75 */ 76 void 77 xlog_cil_init_post_recovery( 78 struct xlog *log) 79 { 80 log->l_cilp->xc_ctx->ticket = xlog_cil_ticket_alloc(log); 81 log->l_cilp->xc_ctx->sequence = 1; 82 } 83 84 static inline int 85 xlog_cil_iovec_space( 86 uint niovecs) 87 { 88 return round_up((sizeof(struct xfs_log_vec) + 89 niovecs * sizeof(struct xfs_log_iovec)), 90 sizeof(uint64_t)); 91 } 92 93 /* 94 * Allocate or pin log vector buffers for CIL insertion. 95 * 96 * The CIL currently uses disposable buffers for copying a snapshot of the 97 * modified items into the log during a push. The biggest problem with this is 98 * the requirement to allocate the disposable buffer during the commit if: 99 * a) does not exist; or 100 * b) it is too small 101 * 102 * If we do this allocation within xlog_cil_insert_format_items(), it is done 103 * under the xc_ctx_lock, which means that a CIL push cannot occur during 104 * the memory allocation. This means that we have a potential deadlock situation 105 * under low memory conditions when we have lots of dirty metadata pinned in 106 * the CIL and we need a CIL commit to occur to free memory. 107 * 108 * To avoid this, we need to move the memory allocation outside the 109 * xc_ctx_lock, but because the log vector buffers are disposable, that opens 110 * up a TOCTOU race condition w.r.t. the CIL committing and removing the log 111 * vector buffers between the check and the formatting of the item into the 112 * log vector buffer within the xc_ctx_lock. 113 * 114 * Because the log vector buffer needs to be unchanged during the CIL push 115 * process, we cannot share the buffer between the transaction commit (which 116 * modifies the buffer) and the CIL push context that is writing the changes 117 * into the log. This means skipping preallocation of buffer space is 118 * unreliable, but we most definitely do not want to be allocating and freeing 119 * buffers unnecessarily during commits when overwrites can be done safely. 120 * 121 * The simplest solution to this problem is to allocate a shadow buffer when a 122 * log item is committed for the second time, and then to only use this buffer 123 * if necessary. The buffer can remain attached to the log item until such time 124 * it is needed, and this is the buffer that is reallocated to match the size of 125 * the incoming modification. Then during the formatting of the item we can swap 126 * the active buffer with the new one if we can't reuse the existing buffer. We 127 * don't free the old buffer as it may be reused on the next modification if 128 * it's size is right, otherwise we'll free and reallocate it at that point. 129 * 130 * This function builds a vector for the changes in each log item in the 131 * transaction. It then works out the length of the buffer needed for each log 132 * item, allocates them and attaches the vector to the log item in preparation 133 * for the formatting step which occurs under the xc_ctx_lock. 134 * 135 * While this means the memory footprint goes up, it avoids the repeated 136 * alloc/free pattern that repeated modifications of an item would otherwise 137 * cause, and hence minimises the CPU overhead of such behaviour. 138 */ 139 static void 140 xlog_cil_alloc_shadow_bufs( 141 struct xlog *log, 142 struct xfs_trans *tp) 143 { 144 struct xfs_log_item_desc *lidp; 145 146 list_for_each_entry(lidp, &tp->t_items, lid_trans) { 147 struct xfs_log_item *lip = lidp->lid_item; 148 struct xfs_log_vec *lv; 149 int niovecs = 0; 150 int nbytes = 0; 151 int buf_size; 152 bool ordered = false; 153 154 /* Skip items which aren't dirty in this transaction. */ 155 if (!(lidp->lid_flags & XFS_LID_DIRTY)) 156 continue; 157 158 /* get number of vecs and size of data to be stored */ 159 lip->li_ops->iop_size(lip, &niovecs, &nbytes); 160 161 /* 162 * Ordered items need to be tracked but we do not wish to write 163 * them. We need a logvec to track the object, but we do not 164 * need an iovec or buffer to be allocated for copying data. 165 */ 166 if (niovecs == XFS_LOG_VEC_ORDERED) { 167 ordered = true; 168 niovecs = 0; 169 nbytes = 0; 170 } 171 172 /* 173 * We 64-bit align the length of each iovec so that the start 174 * of the next one is naturally aligned. We'll need to 175 * account for that slack space here. Then round nbytes up 176 * to 64-bit alignment so that the initial buffer alignment is 177 * easy to calculate and verify. 178 */ 179 nbytes += niovecs * sizeof(uint64_t); 180 nbytes = round_up(nbytes, sizeof(uint64_t)); 181 182 /* 183 * The data buffer needs to start 64-bit aligned, so round up 184 * that space to ensure we can align it appropriately and not 185 * overrun the buffer. 186 */ 187 buf_size = nbytes + xlog_cil_iovec_space(niovecs); 188 189 /* 190 * if we have no shadow buffer, or it is too small, we need to 191 * reallocate it. 192 */ 193 if (!lip->li_lv_shadow || 194 buf_size > lip->li_lv_shadow->lv_size) { 195 196 /* 197 * We free and allocate here as a realloc would copy 198 * unecessary data. We don't use kmem_zalloc() for the 199 * same reason - we don't need to zero the data area in 200 * the buffer, only the log vector header and the iovec 201 * storage. 202 */ 203 kmem_free(lip->li_lv_shadow); 204 205 lv = kmem_alloc(buf_size, KM_SLEEP|KM_NOFS); 206 memset(lv, 0, xlog_cil_iovec_space(niovecs)); 207 208 lv->lv_item = lip; 209 lv->lv_size = buf_size; 210 if (ordered) 211 lv->lv_buf_len = XFS_LOG_VEC_ORDERED; 212 else 213 lv->lv_iovecp = (struct xfs_log_iovec *)&lv[1]; 214 lip->li_lv_shadow = lv; 215 } else { 216 /* same or smaller, optimise common overwrite case */ 217 lv = lip->li_lv_shadow; 218 if (ordered) 219 lv->lv_buf_len = XFS_LOG_VEC_ORDERED; 220 else 221 lv->lv_buf_len = 0; 222 lv->lv_bytes = 0; 223 lv->lv_next = NULL; 224 } 225 226 /* Ensure the lv is set up according to ->iop_size */ 227 lv->lv_niovecs = niovecs; 228 229 /* The allocated data region lies beyond the iovec region */ 230 lv->lv_buf = (char *)lv + xlog_cil_iovec_space(niovecs); 231 } 232 233 } 234 235 /* 236 * Prepare the log item for insertion into the CIL. Calculate the difference in 237 * log space and vectors it will consume, and if it is a new item pin it as 238 * well. 239 */ 240 STATIC void 241 xfs_cil_prepare_item( 242 struct xlog *log, 243 struct xfs_log_vec *lv, 244 struct xfs_log_vec *old_lv, 245 int *diff_len, 246 int *diff_iovecs) 247 { 248 /* Account for the new LV being passed in */ 249 if (lv->lv_buf_len != XFS_LOG_VEC_ORDERED) { 250 *diff_len += lv->lv_bytes; 251 *diff_iovecs += lv->lv_niovecs; 252 } 253 254 /* 255 * If there is no old LV, this is the first time we've seen the item in 256 * this CIL context and so we need to pin it. If we are replacing the 257 * old_lv, then remove the space it accounts for and make it the shadow 258 * buffer for later freeing. In both cases we are now switching to the 259 * shadow buffer, so update the the pointer to it appropriately. 260 */ 261 if (!old_lv) { 262 lv->lv_item->li_ops->iop_pin(lv->lv_item); 263 lv->lv_item->li_lv_shadow = NULL; 264 } else if (old_lv != lv) { 265 ASSERT(lv->lv_buf_len != XFS_LOG_VEC_ORDERED); 266 267 *diff_len -= old_lv->lv_bytes; 268 *diff_iovecs -= old_lv->lv_niovecs; 269 lv->lv_item->li_lv_shadow = old_lv; 270 } 271 272 /* attach new log vector to log item */ 273 lv->lv_item->li_lv = lv; 274 275 /* 276 * If this is the first time the item is being committed to the 277 * CIL, store the sequence number on the log item so we can 278 * tell in future commits whether this is the first checkpoint 279 * the item is being committed into. 280 */ 281 if (!lv->lv_item->li_seq) 282 lv->lv_item->li_seq = log->l_cilp->xc_ctx->sequence; 283 } 284 285 /* 286 * Format log item into a flat buffers 287 * 288 * For delayed logging, we need to hold a formatted buffer containing all the 289 * changes on the log item. This enables us to relog the item in memory and 290 * write it out asynchronously without needing to relock the object that was 291 * modified at the time it gets written into the iclog. 292 * 293 * This function takes the prepared log vectors attached to each log item, and 294 * formats the changes into the log vector buffer. The buffer it uses is 295 * dependent on the current state of the vector in the CIL - the shadow lv is 296 * guaranteed to be large enough for the current modification, but we will only 297 * use that if we can't reuse the existing lv. If we can't reuse the existing 298 * lv, then simple swap it out for the shadow lv. We don't free it - that is 299 * done lazily either by th enext modification or the freeing of the log item. 300 * 301 * We don't set up region headers during this process; we simply copy the 302 * regions into the flat buffer. We can do this because we still have to do a 303 * formatting step to write the regions into the iclog buffer. Writing the 304 * ophdrs during the iclog write means that we can support splitting large 305 * regions across iclog boundares without needing a change in the format of the 306 * item/region encapsulation. 307 * 308 * Hence what we need to do now is change the rewrite the vector array to point 309 * to the copied region inside the buffer we just allocated. This allows us to 310 * format the regions into the iclog as though they are being formatted 311 * directly out of the objects themselves. 312 */ 313 static void 314 xlog_cil_insert_format_items( 315 struct xlog *log, 316 struct xfs_trans *tp, 317 int *diff_len, 318 int *diff_iovecs) 319 { 320 struct xfs_log_item_desc *lidp; 321 322 323 /* Bail out if we didn't find a log item. */ 324 if (list_empty(&tp->t_items)) { 325 ASSERT(0); 326 return; 327 } 328 329 list_for_each_entry(lidp, &tp->t_items, lid_trans) { 330 struct xfs_log_item *lip = lidp->lid_item; 331 struct xfs_log_vec *lv; 332 struct xfs_log_vec *old_lv = NULL; 333 struct xfs_log_vec *shadow; 334 bool ordered = false; 335 336 /* Skip items which aren't dirty in this transaction. */ 337 if (!(lidp->lid_flags & XFS_LID_DIRTY)) 338 continue; 339 340 /* 341 * The formatting size information is already attached to 342 * the shadow lv on the log item. 343 */ 344 shadow = lip->li_lv_shadow; 345 if (shadow->lv_buf_len == XFS_LOG_VEC_ORDERED) 346 ordered = true; 347 348 /* Skip items that do not have any vectors for writing */ 349 if (!shadow->lv_niovecs && !ordered) 350 continue; 351 352 /* compare to existing item size */ 353 old_lv = lip->li_lv; 354 if (lip->li_lv && shadow->lv_size <= lip->li_lv->lv_size) { 355 /* same or smaller, optimise common overwrite case */ 356 lv = lip->li_lv; 357 lv->lv_next = NULL; 358 359 if (ordered) 360 goto insert; 361 362 /* 363 * set the item up as though it is a new insertion so 364 * that the space reservation accounting is correct. 365 */ 366 *diff_iovecs -= lv->lv_niovecs; 367 *diff_len -= lv->lv_bytes; 368 369 /* Ensure the lv is set up according to ->iop_size */ 370 lv->lv_niovecs = shadow->lv_niovecs; 371 372 /* reset the lv buffer information for new formatting */ 373 lv->lv_buf_len = 0; 374 lv->lv_bytes = 0; 375 lv->lv_buf = (char *)lv + 376 xlog_cil_iovec_space(lv->lv_niovecs); 377 } else { 378 /* switch to shadow buffer! */ 379 lv = shadow; 380 lv->lv_item = lip; 381 if (ordered) { 382 /* track as an ordered logvec */ 383 ASSERT(lip->li_lv == NULL); 384 goto insert; 385 } 386 } 387 388 ASSERT(IS_ALIGNED((unsigned long)lv->lv_buf, sizeof(uint64_t))); 389 lip->li_ops->iop_format(lip, lv); 390 insert: 391 xfs_cil_prepare_item(log, lv, old_lv, diff_len, diff_iovecs); 392 } 393 } 394 395 /* 396 * Insert the log items into the CIL and calculate the difference in space 397 * consumed by the item. Add the space to the checkpoint ticket and calculate 398 * if the change requires additional log metadata. If it does, take that space 399 * as well. Remove the amount of space we added to the checkpoint ticket from 400 * the current transaction ticket so that the accounting works out correctly. 401 */ 402 static void 403 xlog_cil_insert_items( 404 struct xlog *log, 405 struct xfs_trans *tp) 406 { 407 struct xfs_cil *cil = log->l_cilp; 408 struct xfs_cil_ctx *ctx = cil->xc_ctx; 409 struct xfs_log_item_desc *lidp; 410 int len = 0; 411 int diff_iovecs = 0; 412 int iclog_space; 413 414 ASSERT(tp); 415 416 /* 417 * We can do this safely because the context can't checkpoint until we 418 * are done so it doesn't matter exactly how we update the CIL. 419 */ 420 xlog_cil_insert_format_items(log, tp, &len, &diff_iovecs); 421 422 /* 423 * Now (re-)position everything modified at the tail of the CIL. 424 * We do this here so we only need to take the CIL lock once during 425 * the transaction commit. 426 */ 427 spin_lock(&cil->xc_cil_lock); 428 list_for_each_entry(lidp, &tp->t_items, lid_trans) { 429 struct xfs_log_item *lip = lidp->lid_item; 430 431 /* Skip items which aren't dirty in this transaction. */ 432 if (!(lidp->lid_flags & XFS_LID_DIRTY)) 433 continue; 434 435 /* 436 * Only move the item if it isn't already at the tail. This is 437 * to prevent a transient list_empty() state when reinserting 438 * an item that is already the only item in the CIL. 439 */ 440 if (!list_is_last(&lip->li_cil, &cil->xc_cil)) 441 list_move_tail(&lip->li_cil, &cil->xc_cil); 442 } 443 444 /* account for space used by new iovec headers */ 445 len += diff_iovecs * sizeof(xlog_op_header_t); 446 ctx->nvecs += diff_iovecs; 447 448 /* attach the transaction to the CIL if it has any busy extents */ 449 if (!list_empty(&tp->t_busy)) 450 list_splice_init(&tp->t_busy, &ctx->busy_extents); 451 452 /* 453 * Now transfer enough transaction reservation to the context ticket 454 * for the checkpoint. The context ticket is special - the unit 455 * reservation has to grow as well as the current reservation as we 456 * steal from tickets so we can correctly determine the space used 457 * during the transaction commit. 458 */ 459 if (ctx->ticket->t_curr_res == 0) { 460 ctx->ticket->t_curr_res = ctx->ticket->t_unit_res; 461 tp->t_ticket->t_curr_res -= ctx->ticket->t_unit_res; 462 } 463 464 /* do we need space for more log record headers? */ 465 iclog_space = log->l_iclog_size - log->l_iclog_hsize; 466 if (len > 0 && (ctx->space_used / iclog_space != 467 (ctx->space_used + len) / iclog_space)) { 468 int hdrs; 469 470 hdrs = (len + iclog_space - 1) / iclog_space; 471 /* need to take into account split region headers, too */ 472 hdrs *= log->l_iclog_hsize + sizeof(struct xlog_op_header); 473 ctx->ticket->t_unit_res += hdrs; 474 ctx->ticket->t_curr_res += hdrs; 475 tp->t_ticket->t_curr_res -= hdrs; 476 ASSERT(tp->t_ticket->t_curr_res >= len); 477 } 478 tp->t_ticket->t_curr_res -= len; 479 ctx->space_used += len; 480 481 spin_unlock(&cil->xc_cil_lock); 482 } 483 484 static void 485 xlog_cil_free_logvec( 486 struct xfs_log_vec *log_vector) 487 { 488 struct xfs_log_vec *lv; 489 490 for (lv = log_vector; lv; ) { 491 struct xfs_log_vec *next = lv->lv_next; 492 kmem_free(lv); 493 lv = next; 494 } 495 } 496 497 static void 498 xlog_discard_endio_work( 499 struct work_struct *work) 500 { 501 struct xfs_cil_ctx *ctx = 502 container_of(work, struct xfs_cil_ctx, discard_endio_work); 503 struct xfs_mount *mp = ctx->cil->xc_log->l_mp; 504 505 xfs_extent_busy_clear(mp, &ctx->busy_extents, false); 506 kmem_free(ctx); 507 } 508 509 /* 510 * Queue up the actual completion to a thread to avoid IRQ-safe locking for 511 * pagb_lock. Note that we need a unbounded workqueue, otherwise we might 512 * get the execution delayed up to 30 seconds for weird reasons. 513 */ 514 static void 515 xlog_discard_endio( 516 struct bio *bio) 517 { 518 struct xfs_cil_ctx *ctx = bio->bi_private; 519 520 INIT_WORK(&ctx->discard_endio_work, xlog_discard_endio_work); 521 queue_work(xfs_discard_wq, &ctx->discard_endio_work); 522 } 523 524 static void 525 xlog_discard_busy_extents( 526 struct xfs_mount *mp, 527 struct xfs_cil_ctx *ctx) 528 { 529 struct list_head *list = &ctx->busy_extents; 530 struct xfs_extent_busy *busyp; 531 struct bio *bio = NULL; 532 struct blk_plug plug; 533 int error = 0; 534 535 ASSERT(mp->m_flags & XFS_MOUNT_DISCARD); 536 537 blk_start_plug(&plug); 538 list_for_each_entry(busyp, list, list) { 539 trace_xfs_discard_extent(mp, busyp->agno, busyp->bno, 540 busyp->length); 541 542 error = __blkdev_issue_discard(mp->m_ddev_targp->bt_bdev, 543 XFS_AGB_TO_DADDR(mp, busyp->agno, busyp->bno), 544 XFS_FSB_TO_BB(mp, busyp->length), 545 GFP_NOFS, 0, &bio); 546 if (error && error != -EOPNOTSUPP) { 547 xfs_info(mp, 548 "discard failed for extent [0x%llx,%u], error %d", 549 (unsigned long long)busyp->bno, 550 busyp->length, 551 error); 552 break; 553 } 554 } 555 556 if (bio) { 557 bio->bi_private = ctx; 558 bio->bi_end_io = xlog_discard_endio; 559 submit_bio(bio); 560 } else { 561 xlog_discard_endio_work(&ctx->discard_endio_work); 562 } 563 blk_finish_plug(&plug); 564 } 565 566 /* 567 * Mark all items committed and clear busy extents. We free the log vector 568 * chains in a separate pass so that we unpin the log items as quickly as 569 * possible. 570 */ 571 static void 572 xlog_cil_committed( 573 void *args, 574 int abort) 575 { 576 struct xfs_cil_ctx *ctx = args; 577 struct xfs_mount *mp = ctx->cil->xc_log->l_mp; 578 579 xfs_trans_committed_bulk(ctx->cil->xc_log->l_ailp, ctx->lv_chain, 580 ctx->start_lsn, abort); 581 582 xfs_extent_busy_sort(&ctx->busy_extents); 583 xfs_extent_busy_clear(mp, &ctx->busy_extents, 584 (mp->m_flags & XFS_MOUNT_DISCARD) && !abort); 585 586 /* 587 * If we are aborting the commit, wake up anyone waiting on the 588 * committing list. If we don't, then a shutdown we can leave processes 589 * waiting in xlog_cil_force_lsn() waiting on a sequence commit that 590 * will never happen because we aborted it. 591 */ 592 spin_lock(&ctx->cil->xc_push_lock); 593 if (abort) 594 wake_up_all(&ctx->cil->xc_commit_wait); 595 list_del(&ctx->committing); 596 spin_unlock(&ctx->cil->xc_push_lock); 597 598 xlog_cil_free_logvec(ctx->lv_chain); 599 600 if (!list_empty(&ctx->busy_extents)) 601 xlog_discard_busy_extents(mp, ctx); 602 else 603 kmem_free(ctx); 604 } 605 606 /* 607 * Push the Committed Item List to the log. If @push_seq flag is zero, then it 608 * is a background flush and so we can chose to ignore it. Otherwise, if the 609 * current sequence is the same as @push_seq we need to do a flush. If 610 * @push_seq is less than the current sequence, then it has already been 611 * flushed and we don't need to do anything - the caller will wait for it to 612 * complete if necessary. 613 * 614 * @push_seq is a value rather than a flag because that allows us to do an 615 * unlocked check of the sequence number for a match. Hence we can allows log 616 * forces to run racily and not issue pushes for the same sequence twice. If we 617 * get a race between multiple pushes for the same sequence they will block on 618 * the first one and then abort, hence avoiding needless pushes. 619 */ 620 STATIC int 621 xlog_cil_push( 622 struct xlog *log) 623 { 624 struct xfs_cil *cil = log->l_cilp; 625 struct xfs_log_vec *lv; 626 struct xfs_cil_ctx *ctx; 627 struct xfs_cil_ctx *new_ctx; 628 struct xlog_in_core *commit_iclog; 629 struct xlog_ticket *tic; 630 int num_iovecs; 631 int error = 0; 632 struct xfs_trans_header thdr; 633 struct xfs_log_iovec lhdr; 634 struct xfs_log_vec lvhdr = { NULL }; 635 xfs_lsn_t commit_lsn; 636 xfs_lsn_t push_seq; 637 638 if (!cil) 639 return 0; 640 641 new_ctx = kmem_zalloc(sizeof(*new_ctx), KM_SLEEP|KM_NOFS); 642 new_ctx->ticket = xlog_cil_ticket_alloc(log); 643 644 down_write(&cil->xc_ctx_lock); 645 ctx = cil->xc_ctx; 646 647 spin_lock(&cil->xc_push_lock); 648 push_seq = cil->xc_push_seq; 649 ASSERT(push_seq <= ctx->sequence); 650 651 /* 652 * Check if we've anything to push. If there is nothing, then we don't 653 * move on to a new sequence number and so we have to be able to push 654 * this sequence again later. 655 */ 656 if (list_empty(&cil->xc_cil)) { 657 cil->xc_push_seq = 0; 658 spin_unlock(&cil->xc_push_lock); 659 goto out_skip; 660 } 661 662 663 /* check for a previously pushed seqeunce */ 664 if (push_seq < cil->xc_ctx->sequence) { 665 spin_unlock(&cil->xc_push_lock); 666 goto out_skip; 667 } 668 669 /* 670 * We are now going to push this context, so add it to the committing 671 * list before we do anything else. This ensures that anyone waiting on 672 * this push can easily detect the difference between a "push in 673 * progress" and "CIL is empty, nothing to do". 674 * 675 * IOWs, a wait loop can now check for: 676 * the current sequence not being found on the committing list; 677 * an empty CIL; and 678 * an unchanged sequence number 679 * to detect a push that had nothing to do and therefore does not need 680 * waiting on. If the CIL is not empty, we get put on the committing 681 * list before emptying the CIL and bumping the sequence number. Hence 682 * an empty CIL and an unchanged sequence number means we jumped out 683 * above after doing nothing. 684 * 685 * Hence the waiter will either find the commit sequence on the 686 * committing list or the sequence number will be unchanged and the CIL 687 * still dirty. In that latter case, the push has not yet started, and 688 * so the waiter will have to continue trying to check the CIL 689 * committing list until it is found. In extreme cases of delay, the 690 * sequence may fully commit between the attempts the wait makes to wait 691 * on the commit sequence. 692 */ 693 list_add(&ctx->committing, &cil->xc_committing); 694 spin_unlock(&cil->xc_push_lock); 695 696 /* 697 * pull all the log vectors off the items in the CIL, and 698 * remove the items from the CIL. We don't need the CIL lock 699 * here because it's only needed on the transaction commit 700 * side which is currently locked out by the flush lock. 701 */ 702 lv = NULL; 703 num_iovecs = 0; 704 while (!list_empty(&cil->xc_cil)) { 705 struct xfs_log_item *item; 706 707 item = list_first_entry(&cil->xc_cil, 708 struct xfs_log_item, li_cil); 709 list_del_init(&item->li_cil); 710 if (!ctx->lv_chain) 711 ctx->lv_chain = item->li_lv; 712 else 713 lv->lv_next = item->li_lv; 714 lv = item->li_lv; 715 item->li_lv = NULL; 716 num_iovecs += lv->lv_niovecs; 717 } 718 719 /* 720 * initialise the new context and attach it to the CIL. Then attach 721 * the current context to the CIL committing lsit so it can be found 722 * during log forces to extract the commit lsn of the sequence that 723 * needs to be forced. 724 */ 725 INIT_LIST_HEAD(&new_ctx->committing); 726 INIT_LIST_HEAD(&new_ctx->busy_extents); 727 new_ctx->sequence = ctx->sequence + 1; 728 new_ctx->cil = cil; 729 cil->xc_ctx = new_ctx; 730 731 /* 732 * The switch is now done, so we can drop the context lock and move out 733 * of a shared context. We can't just go straight to the commit record, 734 * though - we need to synchronise with previous and future commits so 735 * that the commit records are correctly ordered in the log to ensure 736 * that we process items during log IO completion in the correct order. 737 * 738 * For example, if we get an EFI in one checkpoint and the EFD in the 739 * next (e.g. due to log forces), we do not want the checkpoint with 740 * the EFD to be committed before the checkpoint with the EFI. Hence 741 * we must strictly order the commit records of the checkpoints so 742 * that: a) the checkpoint callbacks are attached to the iclogs in the 743 * correct order; and b) the checkpoints are replayed in correct order 744 * in log recovery. 745 * 746 * Hence we need to add this context to the committing context list so 747 * that higher sequences will wait for us to write out a commit record 748 * before they do. 749 * 750 * xfs_log_force_lsn requires us to mirror the new sequence into the cil 751 * structure atomically with the addition of this sequence to the 752 * committing list. This also ensures that we can do unlocked checks 753 * against the current sequence in log forces without risking 754 * deferencing a freed context pointer. 755 */ 756 spin_lock(&cil->xc_push_lock); 757 cil->xc_current_sequence = new_ctx->sequence; 758 spin_unlock(&cil->xc_push_lock); 759 up_write(&cil->xc_ctx_lock); 760 761 /* 762 * Build a checkpoint transaction header and write it to the log to 763 * begin the transaction. We need to account for the space used by the 764 * transaction header here as it is not accounted for in xlog_write(). 765 * 766 * The LSN we need to pass to the log items on transaction commit is 767 * the LSN reported by the first log vector write. If we use the commit 768 * record lsn then we can move the tail beyond the grant write head. 769 */ 770 tic = ctx->ticket; 771 thdr.th_magic = XFS_TRANS_HEADER_MAGIC; 772 thdr.th_type = XFS_TRANS_CHECKPOINT; 773 thdr.th_tid = tic->t_tid; 774 thdr.th_num_items = num_iovecs; 775 lhdr.i_addr = &thdr; 776 lhdr.i_len = sizeof(xfs_trans_header_t); 777 lhdr.i_type = XLOG_REG_TYPE_TRANSHDR; 778 tic->t_curr_res -= lhdr.i_len + sizeof(xlog_op_header_t); 779 780 lvhdr.lv_niovecs = 1; 781 lvhdr.lv_iovecp = &lhdr; 782 lvhdr.lv_next = ctx->lv_chain; 783 784 error = xlog_write(log, &lvhdr, tic, &ctx->start_lsn, NULL, 0); 785 if (error) 786 goto out_abort_free_ticket; 787 788 /* 789 * now that we've written the checkpoint into the log, strictly 790 * order the commit records so replay will get them in the right order. 791 */ 792 restart: 793 spin_lock(&cil->xc_push_lock); 794 list_for_each_entry(new_ctx, &cil->xc_committing, committing) { 795 /* 796 * Avoid getting stuck in this loop because we were woken by the 797 * shutdown, but then went back to sleep once already in the 798 * shutdown state. 799 */ 800 if (XLOG_FORCED_SHUTDOWN(log)) { 801 spin_unlock(&cil->xc_push_lock); 802 goto out_abort_free_ticket; 803 } 804 805 /* 806 * Higher sequences will wait for this one so skip them. 807 * Don't wait for our own sequence, either. 808 */ 809 if (new_ctx->sequence >= ctx->sequence) 810 continue; 811 if (!new_ctx->commit_lsn) { 812 /* 813 * It is still being pushed! Wait for the push to 814 * complete, then start again from the beginning. 815 */ 816 xlog_wait(&cil->xc_commit_wait, &cil->xc_push_lock); 817 goto restart; 818 } 819 } 820 spin_unlock(&cil->xc_push_lock); 821 822 /* xfs_log_done always frees the ticket on error. */ 823 commit_lsn = xfs_log_done(log->l_mp, tic, &commit_iclog, false); 824 if (commit_lsn == -1) 825 goto out_abort; 826 827 /* attach all the transactions w/ busy extents to iclog */ 828 ctx->log_cb.cb_func = xlog_cil_committed; 829 ctx->log_cb.cb_arg = ctx; 830 error = xfs_log_notify(log->l_mp, commit_iclog, &ctx->log_cb); 831 if (error) 832 goto out_abort; 833 834 /* 835 * now the checkpoint commit is complete and we've attached the 836 * callbacks to the iclog we can assign the commit LSN to the context 837 * and wake up anyone who is waiting for the commit to complete. 838 */ 839 spin_lock(&cil->xc_push_lock); 840 ctx->commit_lsn = commit_lsn; 841 wake_up_all(&cil->xc_commit_wait); 842 spin_unlock(&cil->xc_push_lock); 843 844 /* release the hounds! */ 845 return xfs_log_release_iclog(log->l_mp, commit_iclog); 846 847 out_skip: 848 up_write(&cil->xc_ctx_lock); 849 xfs_log_ticket_put(new_ctx->ticket); 850 kmem_free(new_ctx); 851 return 0; 852 853 out_abort_free_ticket: 854 xfs_log_ticket_put(tic); 855 out_abort: 856 xlog_cil_committed(ctx, XFS_LI_ABORTED); 857 return -EIO; 858 } 859 860 static void 861 xlog_cil_push_work( 862 struct work_struct *work) 863 { 864 struct xfs_cil *cil = container_of(work, struct xfs_cil, 865 xc_push_work); 866 xlog_cil_push(cil->xc_log); 867 } 868 869 /* 870 * We need to push CIL every so often so we don't cache more than we can fit in 871 * the log. The limit really is that a checkpoint can't be more than half the 872 * log (the current checkpoint is not allowed to overwrite the previous 873 * checkpoint), but commit latency and memory usage limit this to a smaller 874 * size. 875 */ 876 static void 877 xlog_cil_push_background( 878 struct xlog *log) 879 { 880 struct xfs_cil *cil = log->l_cilp; 881 882 /* 883 * The cil won't be empty because we are called while holding the 884 * context lock so whatever we added to the CIL will still be there 885 */ 886 ASSERT(!list_empty(&cil->xc_cil)); 887 888 /* 889 * don't do a background push if we haven't used up all the 890 * space available yet. 891 */ 892 if (cil->xc_ctx->space_used < XLOG_CIL_SPACE_LIMIT(log)) 893 return; 894 895 spin_lock(&cil->xc_push_lock); 896 if (cil->xc_push_seq < cil->xc_current_sequence) { 897 cil->xc_push_seq = cil->xc_current_sequence; 898 queue_work(log->l_mp->m_cil_workqueue, &cil->xc_push_work); 899 } 900 spin_unlock(&cil->xc_push_lock); 901 902 } 903 904 /* 905 * xlog_cil_push_now() is used to trigger an immediate CIL push to the sequence 906 * number that is passed. When it returns, the work will be queued for 907 * @push_seq, but it won't be completed. The caller is expected to do any 908 * waiting for push_seq to complete if it is required. 909 */ 910 static void 911 xlog_cil_push_now( 912 struct xlog *log, 913 xfs_lsn_t push_seq) 914 { 915 struct xfs_cil *cil = log->l_cilp; 916 917 if (!cil) 918 return; 919 920 ASSERT(push_seq && push_seq <= cil->xc_current_sequence); 921 922 /* start on any pending background push to minimise wait time on it */ 923 flush_work(&cil->xc_push_work); 924 925 /* 926 * If the CIL is empty or we've already pushed the sequence then 927 * there's no work we need to do. 928 */ 929 spin_lock(&cil->xc_push_lock); 930 if (list_empty(&cil->xc_cil) || push_seq <= cil->xc_push_seq) { 931 spin_unlock(&cil->xc_push_lock); 932 return; 933 } 934 935 cil->xc_push_seq = push_seq; 936 queue_work(log->l_mp->m_cil_workqueue, &cil->xc_push_work); 937 spin_unlock(&cil->xc_push_lock); 938 } 939 940 bool 941 xlog_cil_empty( 942 struct xlog *log) 943 { 944 struct xfs_cil *cil = log->l_cilp; 945 bool empty = false; 946 947 spin_lock(&cil->xc_push_lock); 948 if (list_empty(&cil->xc_cil)) 949 empty = true; 950 spin_unlock(&cil->xc_push_lock); 951 return empty; 952 } 953 954 /* 955 * Commit a transaction with the given vector to the Committed Item List. 956 * 957 * To do this, we need to format the item, pin it in memory if required and 958 * account for the space used by the transaction. Once we have done that we 959 * need to release the unused reservation for the transaction, attach the 960 * transaction to the checkpoint context so we carry the busy extents through 961 * to checkpoint completion, and then unlock all the items in the transaction. 962 * 963 * Called with the context lock already held in read mode to lock out 964 * background commit, returns without it held once background commits are 965 * allowed again. 966 */ 967 void 968 xfs_log_commit_cil( 969 struct xfs_mount *mp, 970 struct xfs_trans *tp, 971 xfs_lsn_t *commit_lsn, 972 bool regrant) 973 { 974 struct xlog *log = mp->m_log; 975 struct xfs_cil *cil = log->l_cilp; 976 977 /* 978 * Do all necessary memory allocation before we lock the CIL. 979 * This ensures the allocation does not deadlock with a CIL 980 * push in memory reclaim (e.g. from kswapd). 981 */ 982 xlog_cil_alloc_shadow_bufs(log, tp); 983 984 /* lock out background commit */ 985 down_read(&cil->xc_ctx_lock); 986 987 xlog_cil_insert_items(log, tp); 988 989 /* check we didn't blow the reservation */ 990 if (tp->t_ticket->t_curr_res < 0) 991 xlog_print_tic_res(mp, tp->t_ticket); 992 993 tp->t_commit_lsn = cil->xc_ctx->sequence; 994 if (commit_lsn) 995 *commit_lsn = tp->t_commit_lsn; 996 997 xfs_log_done(mp, tp->t_ticket, NULL, regrant); 998 xfs_trans_unreserve_and_mod_sb(tp); 999 1000 /* 1001 * Once all the items of the transaction have been copied to the CIL, 1002 * the items can be unlocked and freed. 1003 * 1004 * This needs to be done before we drop the CIL context lock because we 1005 * have to update state in the log items and unlock them before they go 1006 * to disk. If we don't, then the CIL checkpoint can race with us and 1007 * we can run checkpoint completion before we've updated and unlocked 1008 * the log items. This affects (at least) processing of stale buffers, 1009 * inodes and EFIs. 1010 */ 1011 xfs_trans_free_items(tp, tp->t_commit_lsn, false); 1012 1013 xlog_cil_push_background(log); 1014 1015 up_read(&cil->xc_ctx_lock); 1016 } 1017 1018 /* 1019 * Conditionally push the CIL based on the sequence passed in. 1020 * 1021 * We only need to push if we haven't already pushed the sequence 1022 * number given. Hence the only time we will trigger a push here is 1023 * if the push sequence is the same as the current context. 1024 * 1025 * We return the current commit lsn to allow the callers to determine if a 1026 * iclog flush is necessary following this call. 1027 */ 1028 xfs_lsn_t 1029 xlog_cil_force_lsn( 1030 struct xlog *log, 1031 xfs_lsn_t sequence) 1032 { 1033 struct xfs_cil *cil = log->l_cilp; 1034 struct xfs_cil_ctx *ctx; 1035 xfs_lsn_t commit_lsn = NULLCOMMITLSN; 1036 1037 ASSERT(sequence <= cil->xc_current_sequence); 1038 1039 /* 1040 * check to see if we need to force out the current context. 1041 * xlog_cil_push() handles racing pushes for the same sequence, 1042 * so no need to deal with it here. 1043 */ 1044 restart: 1045 xlog_cil_push_now(log, sequence); 1046 1047 /* 1048 * See if we can find a previous sequence still committing. 1049 * We need to wait for all previous sequence commits to complete 1050 * before allowing the force of push_seq to go ahead. Hence block 1051 * on commits for those as well. 1052 */ 1053 spin_lock(&cil->xc_push_lock); 1054 list_for_each_entry(ctx, &cil->xc_committing, committing) { 1055 /* 1056 * Avoid getting stuck in this loop because we were woken by the 1057 * shutdown, but then went back to sleep once already in the 1058 * shutdown state. 1059 */ 1060 if (XLOG_FORCED_SHUTDOWN(log)) 1061 goto out_shutdown; 1062 if (ctx->sequence > sequence) 1063 continue; 1064 if (!ctx->commit_lsn) { 1065 /* 1066 * It is still being pushed! Wait for the push to 1067 * complete, then start again from the beginning. 1068 */ 1069 xlog_wait(&cil->xc_commit_wait, &cil->xc_push_lock); 1070 goto restart; 1071 } 1072 if (ctx->sequence != sequence) 1073 continue; 1074 /* found it! */ 1075 commit_lsn = ctx->commit_lsn; 1076 } 1077 1078 /* 1079 * The call to xlog_cil_push_now() executes the push in the background. 1080 * Hence by the time we have got here it our sequence may not have been 1081 * pushed yet. This is true if the current sequence still matches the 1082 * push sequence after the above wait loop and the CIL still contains 1083 * dirty objects. This is guaranteed by the push code first adding the 1084 * context to the committing list before emptying the CIL. 1085 * 1086 * Hence if we don't find the context in the committing list and the 1087 * current sequence number is unchanged then the CIL contents are 1088 * significant. If the CIL is empty, if means there was nothing to push 1089 * and that means there is nothing to wait for. If the CIL is not empty, 1090 * it means we haven't yet started the push, because if it had started 1091 * we would have found the context on the committing list. 1092 */ 1093 if (sequence == cil->xc_current_sequence && 1094 !list_empty(&cil->xc_cil)) { 1095 spin_unlock(&cil->xc_push_lock); 1096 goto restart; 1097 } 1098 1099 spin_unlock(&cil->xc_push_lock); 1100 return commit_lsn; 1101 1102 /* 1103 * We detected a shutdown in progress. We need to trigger the log force 1104 * to pass through it's iclog state machine error handling, even though 1105 * we are already in a shutdown state. Hence we can't return 1106 * NULLCOMMITLSN here as that has special meaning to log forces (i.e. 1107 * LSN is already stable), so we return a zero LSN instead. 1108 */ 1109 out_shutdown: 1110 spin_unlock(&cil->xc_push_lock); 1111 return 0; 1112 } 1113 1114 /* 1115 * Check if the current log item was first committed in this sequence. 1116 * We can't rely on just the log item being in the CIL, we have to check 1117 * the recorded commit sequence number. 1118 * 1119 * Note: for this to be used in a non-racy manner, it has to be called with 1120 * CIL flushing locked out. As a result, it should only be used during the 1121 * transaction commit process when deciding what to format into the item. 1122 */ 1123 bool 1124 xfs_log_item_in_current_chkpt( 1125 struct xfs_log_item *lip) 1126 { 1127 struct xfs_cil_ctx *ctx; 1128 1129 if (list_empty(&lip->li_cil)) 1130 return false; 1131 1132 ctx = lip->li_mountp->m_log->l_cilp->xc_ctx; 1133 1134 /* 1135 * li_seq is written on the first commit of a log item to record the 1136 * first checkpoint it is written to. Hence if it is different to the 1137 * current sequence, we're in a new checkpoint. 1138 */ 1139 if (XFS_LSN_CMP(lip->li_seq, ctx->sequence) != 0) 1140 return false; 1141 return true; 1142 } 1143 1144 /* 1145 * Perform initial CIL structure initialisation. 1146 */ 1147 int 1148 xlog_cil_init( 1149 struct xlog *log) 1150 { 1151 struct xfs_cil *cil; 1152 struct xfs_cil_ctx *ctx; 1153 1154 cil = kmem_zalloc(sizeof(*cil), KM_SLEEP|KM_MAYFAIL); 1155 if (!cil) 1156 return -ENOMEM; 1157 1158 ctx = kmem_zalloc(sizeof(*ctx), KM_SLEEP|KM_MAYFAIL); 1159 if (!ctx) { 1160 kmem_free(cil); 1161 return -ENOMEM; 1162 } 1163 1164 INIT_WORK(&cil->xc_push_work, xlog_cil_push_work); 1165 INIT_LIST_HEAD(&cil->xc_cil); 1166 INIT_LIST_HEAD(&cil->xc_committing); 1167 spin_lock_init(&cil->xc_cil_lock); 1168 spin_lock_init(&cil->xc_push_lock); 1169 init_rwsem(&cil->xc_ctx_lock); 1170 init_waitqueue_head(&cil->xc_commit_wait); 1171 1172 INIT_LIST_HEAD(&ctx->committing); 1173 INIT_LIST_HEAD(&ctx->busy_extents); 1174 ctx->sequence = 1; 1175 ctx->cil = cil; 1176 cil->xc_ctx = ctx; 1177 cil->xc_current_sequence = ctx->sequence; 1178 1179 cil->xc_log = log; 1180 log->l_cilp = cil; 1181 return 0; 1182 } 1183 1184 void 1185 xlog_cil_destroy( 1186 struct xlog *log) 1187 { 1188 if (log->l_cilp->xc_ctx) { 1189 if (log->l_cilp->xc_ctx->ticket) 1190 xfs_log_ticket_put(log->l_cilp->xc_ctx->ticket); 1191 kmem_free(log->l_cilp->xc_ctx); 1192 } 1193 1194 ASSERT(list_empty(&log->l_cilp->xc_cil)); 1195 kmem_free(log->l_cilp); 1196 } 1197 1198