1 /* 2 * Copyright (c) 2010 Red Hat, Inc. All Rights Reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License as 6 * published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it would be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * GNU General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public License 14 * along with this program; if not, write the Free Software Foundation, 15 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 16 */ 17 18 #include "xfs.h" 19 #include "xfs_fs.h" 20 #include "xfs_format.h" 21 #include "xfs_log_format.h" 22 #include "xfs_shared.h" 23 #include "xfs_trans_resv.h" 24 #include "xfs_mount.h" 25 #include "xfs_error.h" 26 #include "xfs_alloc.h" 27 #include "xfs_extent_busy.h" 28 #include "xfs_discard.h" 29 #include "xfs_trans.h" 30 #include "xfs_trans_priv.h" 31 #include "xfs_log.h" 32 #include "xfs_log_priv.h" 33 #include "xfs_trace.h" 34 35 struct workqueue_struct *xfs_discard_wq; 36 37 /* 38 * Allocate a new ticket. Failing to get a new ticket makes it really hard to 39 * recover, so we don't allow failure here. Also, we allocate in a context that 40 * we don't want to be issuing transactions from, so we need to tell the 41 * allocation code this as well. 42 * 43 * We don't reserve any space for the ticket - we are going to steal whatever 44 * space we require from transactions as they commit. To ensure we reserve all 45 * the space required, we need to set the current reservation of the ticket to 46 * zero so that we know to steal the initial transaction overhead from the 47 * first transaction commit. 48 */ 49 static struct xlog_ticket * 50 xlog_cil_ticket_alloc( 51 struct xlog *log) 52 { 53 struct xlog_ticket *tic; 54 55 tic = xlog_ticket_alloc(log, 0, 1, XFS_TRANSACTION, 0, 56 KM_SLEEP|KM_NOFS); 57 58 /* 59 * set the current reservation to zero so we know to steal the basic 60 * transaction overhead reservation from the first transaction commit. 61 */ 62 tic->t_curr_res = 0; 63 return tic; 64 } 65 66 /* 67 * After the first stage of log recovery is done, we know where the head and 68 * tail of the log are. We need this log initialisation done before we can 69 * initialise the first CIL checkpoint context. 70 * 71 * Here we allocate a log ticket to track space usage during a CIL push. This 72 * ticket is passed to xlog_write() directly so that we don't slowly leak log 73 * space by failing to account for space used by log headers and additional 74 * region headers for split regions. 75 */ 76 void 77 xlog_cil_init_post_recovery( 78 struct xlog *log) 79 { 80 log->l_cilp->xc_ctx->ticket = xlog_cil_ticket_alloc(log); 81 log->l_cilp->xc_ctx->sequence = 1; 82 } 83 84 static inline int 85 xlog_cil_iovec_space( 86 uint niovecs) 87 { 88 return round_up((sizeof(struct xfs_log_vec) + 89 niovecs * sizeof(struct xfs_log_iovec)), 90 sizeof(uint64_t)); 91 } 92 93 /* 94 * Allocate or pin log vector buffers for CIL insertion. 95 * 96 * The CIL currently uses disposable buffers for copying a snapshot of the 97 * modified items into the log during a push. The biggest problem with this is 98 * the requirement to allocate the disposable buffer during the commit if: 99 * a) does not exist; or 100 * b) it is too small 101 * 102 * If we do this allocation within xlog_cil_insert_format_items(), it is done 103 * under the xc_ctx_lock, which means that a CIL push cannot occur during 104 * the memory allocation. This means that we have a potential deadlock situation 105 * under low memory conditions when we have lots of dirty metadata pinned in 106 * the CIL and we need a CIL commit to occur to free memory. 107 * 108 * To avoid this, we need to move the memory allocation outside the 109 * xc_ctx_lock, but because the log vector buffers are disposable, that opens 110 * up a TOCTOU race condition w.r.t. the CIL committing and removing the log 111 * vector buffers between the check and the formatting of the item into the 112 * log vector buffer within the xc_ctx_lock. 113 * 114 * Because the log vector buffer needs to be unchanged during the CIL push 115 * process, we cannot share the buffer between the transaction commit (which 116 * modifies the buffer) and the CIL push context that is writing the changes 117 * into the log. This means skipping preallocation of buffer space is 118 * unreliable, but we most definitely do not want to be allocating and freeing 119 * buffers unnecessarily during commits when overwrites can be done safely. 120 * 121 * The simplest solution to this problem is to allocate a shadow buffer when a 122 * log item is committed for the second time, and then to only use this buffer 123 * if necessary. The buffer can remain attached to the log item until such time 124 * it is needed, and this is the buffer that is reallocated to match the size of 125 * the incoming modification. Then during the formatting of the item we can swap 126 * the active buffer with the new one if we can't reuse the existing buffer. We 127 * don't free the old buffer as it may be reused on the next modification if 128 * it's size is right, otherwise we'll free and reallocate it at that point. 129 * 130 * This function builds a vector for the changes in each log item in the 131 * transaction. It then works out the length of the buffer needed for each log 132 * item, allocates them and attaches the vector to the log item in preparation 133 * for the formatting step which occurs under the xc_ctx_lock. 134 * 135 * While this means the memory footprint goes up, it avoids the repeated 136 * alloc/free pattern that repeated modifications of an item would otherwise 137 * cause, and hence minimises the CPU overhead of such behaviour. 138 */ 139 static void 140 xlog_cil_alloc_shadow_bufs( 141 struct xlog *log, 142 struct xfs_trans *tp) 143 { 144 struct xfs_log_item *lip; 145 146 list_for_each_entry(lip, &tp->t_items, li_trans) { 147 struct xfs_log_vec *lv; 148 int niovecs = 0; 149 int nbytes = 0; 150 int buf_size; 151 bool ordered = false; 152 153 /* Skip items which aren't dirty in this transaction. */ 154 if (!test_bit(XFS_LI_DIRTY, &lip->li_flags)) 155 continue; 156 157 /* get number of vecs and size of data to be stored */ 158 lip->li_ops->iop_size(lip, &niovecs, &nbytes); 159 160 /* 161 * Ordered items need to be tracked but we do not wish to write 162 * them. We need a logvec to track the object, but we do not 163 * need an iovec or buffer to be allocated for copying data. 164 */ 165 if (niovecs == XFS_LOG_VEC_ORDERED) { 166 ordered = true; 167 niovecs = 0; 168 nbytes = 0; 169 } 170 171 /* 172 * We 64-bit align the length of each iovec so that the start 173 * of the next one is naturally aligned. We'll need to 174 * account for that slack space here. Then round nbytes up 175 * to 64-bit alignment so that the initial buffer alignment is 176 * easy to calculate and verify. 177 */ 178 nbytes += niovecs * sizeof(uint64_t); 179 nbytes = round_up(nbytes, sizeof(uint64_t)); 180 181 /* 182 * The data buffer needs to start 64-bit aligned, so round up 183 * that space to ensure we can align it appropriately and not 184 * overrun the buffer. 185 */ 186 buf_size = nbytes + xlog_cil_iovec_space(niovecs); 187 188 /* 189 * if we have no shadow buffer, or it is too small, we need to 190 * reallocate it. 191 */ 192 if (!lip->li_lv_shadow || 193 buf_size > lip->li_lv_shadow->lv_size) { 194 195 /* 196 * We free and allocate here as a realloc would copy 197 * unecessary data. We don't use kmem_zalloc() for the 198 * same reason - we don't need to zero the data area in 199 * the buffer, only the log vector header and the iovec 200 * storage. 201 */ 202 kmem_free(lip->li_lv_shadow); 203 204 lv = kmem_alloc_large(buf_size, KM_SLEEP | KM_NOFS); 205 memset(lv, 0, xlog_cil_iovec_space(niovecs)); 206 207 lv->lv_item = lip; 208 lv->lv_size = buf_size; 209 if (ordered) 210 lv->lv_buf_len = XFS_LOG_VEC_ORDERED; 211 else 212 lv->lv_iovecp = (struct xfs_log_iovec *)&lv[1]; 213 lip->li_lv_shadow = lv; 214 } else { 215 /* same or smaller, optimise common overwrite case */ 216 lv = lip->li_lv_shadow; 217 if (ordered) 218 lv->lv_buf_len = XFS_LOG_VEC_ORDERED; 219 else 220 lv->lv_buf_len = 0; 221 lv->lv_bytes = 0; 222 lv->lv_next = NULL; 223 } 224 225 /* Ensure the lv is set up according to ->iop_size */ 226 lv->lv_niovecs = niovecs; 227 228 /* The allocated data region lies beyond the iovec region */ 229 lv->lv_buf = (char *)lv + xlog_cil_iovec_space(niovecs); 230 } 231 232 } 233 234 /* 235 * Prepare the log item for insertion into the CIL. Calculate the difference in 236 * log space and vectors it will consume, and if it is a new item pin it as 237 * well. 238 */ 239 STATIC void 240 xfs_cil_prepare_item( 241 struct xlog *log, 242 struct xfs_log_vec *lv, 243 struct xfs_log_vec *old_lv, 244 int *diff_len, 245 int *diff_iovecs) 246 { 247 /* Account for the new LV being passed in */ 248 if (lv->lv_buf_len != XFS_LOG_VEC_ORDERED) { 249 *diff_len += lv->lv_bytes; 250 *diff_iovecs += lv->lv_niovecs; 251 } 252 253 /* 254 * If there is no old LV, this is the first time we've seen the item in 255 * this CIL context and so we need to pin it. If we are replacing the 256 * old_lv, then remove the space it accounts for and make it the shadow 257 * buffer for later freeing. In both cases we are now switching to the 258 * shadow buffer, so update the the pointer to it appropriately. 259 */ 260 if (!old_lv) { 261 lv->lv_item->li_ops->iop_pin(lv->lv_item); 262 lv->lv_item->li_lv_shadow = NULL; 263 } else if (old_lv != lv) { 264 ASSERT(lv->lv_buf_len != XFS_LOG_VEC_ORDERED); 265 266 *diff_len -= old_lv->lv_bytes; 267 *diff_iovecs -= old_lv->lv_niovecs; 268 lv->lv_item->li_lv_shadow = old_lv; 269 } 270 271 /* attach new log vector to log item */ 272 lv->lv_item->li_lv = lv; 273 274 /* 275 * If this is the first time the item is being committed to the 276 * CIL, store the sequence number on the log item so we can 277 * tell in future commits whether this is the first checkpoint 278 * the item is being committed into. 279 */ 280 if (!lv->lv_item->li_seq) 281 lv->lv_item->li_seq = log->l_cilp->xc_ctx->sequence; 282 } 283 284 /* 285 * Format log item into a flat buffers 286 * 287 * For delayed logging, we need to hold a formatted buffer containing all the 288 * changes on the log item. This enables us to relog the item in memory and 289 * write it out asynchronously without needing to relock the object that was 290 * modified at the time it gets written into the iclog. 291 * 292 * This function takes the prepared log vectors attached to each log item, and 293 * formats the changes into the log vector buffer. The buffer it uses is 294 * dependent on the current state of the vector in the CIL - the shadow lv is 295 * guaranteed to be large enough for the current modification, but we will only 296 * use that if we can't reuse the existing lv. If we can't reuse the existing 297 * lv, then simple swap it out for the shadow lv. We don't free it - that is 298 * done lazily either by th enext modification or the freeing of the log item. 299 * 300 * We don't set up region headers during this process; we simply copy the 301 * regions into the flat buffer. We can do this because we still have to do a 302 * formatting step to write the regions into the iclog buffer. Writing the 303 * ophdrs during the iclog write means that we can support splitting large 304 * regions across iclog boundares without needing a change in the format of the 305 * item/region encapsulation. 306 * 307 * Hence what we need to do now is change the rewrite the vector array to point 308 * to the copied region inside the buffer we just allocated. This allows us to 309 * format the regions into the iclog as though they are being formatted 310 * directly out of the objects themselves. 311 */ 312 static void 313 xlog_cil_insert_format_items( 314 struct xlog *log, 315 struct xfs_trans *tp, 316 int *diff_len, 317 int *diff_iovecs) 318 { 319 struct xfs_log_item *lip; 320 321 322 /* Bail out if we didn't find a log item. */ 323 if (list_empty(&tp->t_items)) { 324 ASSERT(0); 325 return; 326 } 327 328 list_for_each_entry(lip, &tp->t_items, li_trans) { 329 struct xfs_log_vec *lv; 330 struct xfs_log_vec *old_lv = NULL; 331 struct xfs_log_vec *shadow; 332 bool ordered = false; 333 334 /* Skip items which aren't dirty in this transaction. */ 335 if (!test_bit(XFS_LI_DIRTY, &lip->li_flags)) 336 continue; 337 338 /* 339 * The formatting size information is already attached to 340 * the shadow lv on the log item. 341 */ 342 shadow = lip->li_lv_shadow; 343 if (shadow->lv_buf_len == XFS_LOG_VEC_ORDERED) 344 ordered = true; 345 346 /* Skip items that do not have any vectors for writing */ 347 if (!shadow->lv_niovecs && !ordered) 348 continue; 349 350 /* compare to existing item size */ 351 old_lv = lip->li_lv; 352 if (lip->li_lv && shadow->lv_size <= lip->li_lv->lv_size) { 353 /* same or smaller, optimise common overwrite case */ 354 lv = lip->li_lv; 355 lv->lv_next = NULL; 356 357 if (ordered) 358 goto insert; 359 360 /* 361 * set the item up as though it is a new insertion so 362 * that the space reservation accounting is correct. 363 */ 364 *diff_iovecs -= lv->lv_niovecs; 365 *diff_len -= lv->lv_bytes; 366 367 /* Ensure the lv is set up according to ->iop_size */ 368 lv->lv_niovecs = shadow->lv_niovecs; 369 370 /* reset the lv buffer information for new formatting */ 371 lv->lv_buf_len = 0; 372 lv->lv_bytes = 0; 373 lv->lv_buf = (char *)lv + 374 xlog_cil_iovec_space(lv->lv_niovecs); 375 } else { 376 /* switch to shadow buffer! */ 377 lv = shadow; 378 lv->lv_item = lip; 379 if (ordered) { 380 /* track as an ordered logvec */ 381 ASSERT(lip->li_lv == NULL); 382 goto insert; 383 } 384 } 385 386 ASSERT(IS_ALIGNED((unsigned long)lv->lv_buf, sizeof(uint64_t))); 387 lip->li_ops->iop_format(lip, lv); 388 insert: 389 xfs_cil_prepare_item(log, lv, old_lv, diff_len, diff_iovecs); 390 } 391 } 392 393 /* 394 * Insert the log items into the CIL and calculate the difference in space 395 * consumed by the item. Add the space to the checkpoint ticket and calculate 396 * if the change requires additional log metadata. If it does, take that space 397 * as well. Remove the amount of space we added to the checkpoint ticket from 398 * the current transaction ticket so that the accounting works out correctly. 399 */ 400 static void 401 xlog_cil_insert_items( 402 struct xlog *log, 403 struct xfs_trans *tp) 404 { 405 struct xfs_cil *cil = log->l_cilp; 406 struct xfs_cil_ctx *ctx = cil->xc_ctx; 407 struct xfs_log_item *lip; 408 int len = 0; 409 int diff_iovecs = 0; 410 int iclog_space; 411 int iovhdr_res = 0, split_res = 0, ctx_res = 0; 412 413 ASSERT(tp); 414 415 /* 416 * We can do this safely because the context can't checkpoint until we 417 * are done so it doesn't matter exactly how we update the CIL. 418 */ 419 xlog_cil_insert_format_items(log, tp, &len, &diff_iovecs); 420 421 spin_lock(&cil->xc_cil_lock); 422 423 /* account for space used by new iovec headers */ 424 iovhdr_res = diff_iovecs * sizeof(xlog_op_header_t); 425 len += iovhdr_res; 426 ctx->nvecs += diff_iovecs; 427 428 /* attach the transaction to the CIL if it has any busy extents */ 429 if (!list_empty(&tp->t_busy)) 430 list_splice_init(&tp->t_busy, &ctx->busy_extents); 431 432 /* 433 * Now transfer enough transaction reservation to the context ticket 434 * for the checkpoint. The context ticket is special - the unit 435 * reservation has to grow as well as the current reservation as we 436 * steal from tickets so we can correctly determine the space used 437 * during the transaction commit. 438 */ 439 if (ctx->ticket->t_curr_res == 0) { 440 ctx_res = ctx->ticket->t_unit_res; 441 ctx->ticket->t_curr_res = ctx_res; 442 tp->t_ticket->t_curr_res -= ctx_res; 443 } 444 445 /* do we need space for more log record headers? */ 446 iclog_space = log->l_iclog_size - log->l_iclog_hsize; 447 if (len > 0 && (ctx->space_used / iclog_space != 448 (ctx->space_used + len) / iclog_space)) { 449 split_res = (len + iclog_space - 1) / iclog_space; 450 /* need to take into account split region headers, too */ 451 split_res *= log->l_iclog_hsize + sizeof(struct xlog_op_header); 452 ctx->ticket->t_unit_res += split_res; 453 ctx->ticket->t_curr_res += split_res; 454 tp->t_ticket->t_curr_res -= split_res; 455 ASSERT(tp->t_ticket->t_curr_res >= len); 456 } 457 tp->t_ticket->t_curr_res -= len; 458 ctx->space_used += len; 459 460 /* 461 * If we've overrun the reservation, dump the tx details before we move 462 * the log items. Shutdown is imminent... 463 */ 464 if (WARN_ON(tp->t_ticket->t_curr_res < 0)) { 465 xfs_warn(log->l_mp, "Transaction log reservation overrun:"); 466 xfs_warn(log->l_mp, 467 " log items: %d bytes (iov hdrs: %d bytes)", 468 len, iovhdr_res); 469 xfs_warn(log->l_mp, " split region headers: %d bytes", 470 split_res); 471 xfs_warn(log->l_mp, " ctx ticket: %d bytes", ctx_res); 472 xlog_print_trans(tp); 473 } 474 475 /* 476 * Now (re-)position everything modified at the tail of the CIL. 477 * We do this here so we only need to take the CIL lock once during 478 * the transaction commit. 479 */ 480 list_for_each_entry(lip, &tp->t_items, li_trans) { 481 482 /* Skip items which aren't dirty in this transaction. */ 483 if (!test_bit(XFS_LI_DIRTY, &lip->li_flags)) 484 continue; 485 486 /* 487 * Only move the item if it isn't already at the tail. This is 488 * to prevent a transient list_empty() state when reinserting 489 * an item that is already the only item in the CIL. 490 */ 491 if (!list_is_last(&lip->li_cil, &cil->xc_cil)) 492 list_move_tail(&lip->li_cil, &cil->xc_cil); 493 } 494 495 spin_unlock(&cil->xc_cil_lock); 496 497 if (tp->t_ticket->t_curr_res < 0) 498 xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR); 499 } 500 501 static void 502 xlog_cil_free_logvec( 503 struct xfs_log_vec *log_vector) 504 { 505 struct xfs_log_vec *lv; 506 507 for (lv = log_vector; lv; ) { 508 struct xfs_log_vec *next = lv->lv_next; 509 kmem_free(lv); 510 lv = next; 511 } 512 } 513 514 static void 515 xlog_discard_endio_work( 516 struct work_struct *work) 517 { 518 struct xfs_cil_ctx *ctx = 519 container_of(work, struct xfs_cil_ctx, discard_endio_work); 520 struct xfs_mount *mp = ctx->cil->xc_log->l_mp; 521 522 xfs_extent_busy_clear(mp, &ctx->busy_extents, false); 523 kmem_free(ctx); 524 } 525 526 /* 527 * Queue up the actual completion to a thread to avoid IRQ-safe locking for 528 * pagb_lock. Note that we need a unbounded workqueue, otherwise we might 529 * get the execution delayed up to 30 seconds for weird reasons. 530 */ 531 static void 532 xlog_discard_endio( 533 struct bio *bio) 534 { 535 struct xfs_cil_ctx *ctx = bio->bi_private; 536 537 INIT_WORK(&ctx->discard_endio_work, xlog_discard_endio_work); 538 queue_work(xfs_discard_wq, &ctx->discard_endio_work); 539 bio_put(bio); 540 } 541 542 static void 543 xlog_discard_busy_extents( 544 struct xfs_mount *mp, 545 struct xfs_cil_ctx *ctx) 546 { 547 struct list_head *list = &ctx->busy_extents; 548 struct xfs_extent_busy *busyp; 549 struct bio *bio = NULL; 550 struct blk_plug plug; 551 int error = 0; 552 553 ASSERT(mp->m_flags & XFS_MOUNT_DISCARD); 554 555 blk_start_plug(&plug); 556 list_for_each_entry(busyp, list, list) { 557 trace_xfs_discard_extent(mp, busyp->agno, busyp->bno, 558 busyp->length); 559 560 error = __blkdev_issue_discard(mp->m_ddev_targp->bt_bdev, 561 XFS_AGB_TO_DADDR(mp, busyp->agno, busyp->bno), 562 XFS_FSB_TO_BB(mp, busyp->length), 563 GFP_NOFS, 0, &bio); 564 if (error && error != -EOPNOTSUPP) { 565 xfs_info(mp, 566 "discard failed for extent [0x%llx,%u], error %d", 567 (unsigned long long)busyp->bno, 568 busyp->length, 569 error); 570 break; 571 } 572 } 573 574 if (bio) { 575 bio->bi_private = ctx; 576 bio->bi_end_io = xlog_discard_endio; 577 submit_bio(bio); 578 } else { 579 xlog_discard_endio_work(&ctx->discard_endio_work); 580 } 581 blk_finish_plug(&plug); 582 } 583 584 /* 585 * Mark all items committed and clear busy extents. We free the log vector 586 * chains in a separate pass so that we unpin the log items as quickly as 587 * possible. 588 */ 589 static void 590 xlog_cil_committed( 591 void *args, 592 int abort) 593 { 594 struct xfs_cil_ctx *ctx = args; 595 struct xfs_mount *mp = ctx->cil->xc_log->l_mp; 596 597 xfs_trans_committed_bulk(ctx->cil->xc_log->l_ailp, ctx->lv_chain, 598 ctx->start_lsn, abort); 599 600 xfs_extent_busy_sort(&ctx->busy_extents); 601 xfs_extent_busy_clear(mp, &ctx->busy_extents, 602 (mp->m_flags & XFS_MOUNT_DISCARD) && !abort); 603 604 /* 605 * If we are aborting the commit, wake up anyone waiting on the 606 * committing list. If we don't, then a shutdown we can leave processes 607 * waiting in xlog_cil_force_lsn() waiting on a sequence commit that 608 * will never happen because we aborted it. 609 */ 610 spin_lock(&ctx->cil->xc_push_lock); 611 if (abort) 612 wake_up_all(&ctx->cil->xc_commit_wait); 613 list_del(&ctx->committing); 614 spin_unlock(&ctx->cil->xc_push_lock); 615 616 xlog_cil_free_logvec(ctx->lv_chain); 617 618 if (!list_empty(&ctx->busy_extents)) 619 xlog_discard_busy_extents(mp, ctx); 620 else 621 kmem_free(ctx); 622 } 623 624 /* 625 * Push the Committed Item List to the log. If @push_seq flag is zero, then it 626 * is a background flush and so we can chose to ignore it. Otherwise, if the 627 * current sequence is the same as @push_seq we need to do a flush. If 628 * @push_seq is less than the current sequence, then it has already been 629 * flushed and we don't need to do anything - the caller will wait for it to 630 * complete if necessary. 631 * 632 * @push_seq is a value rather than a flag because that allows us to do an 633 * unlocked check of the sequence number for a match. Hence we can allows log 634 * forces to run racily and not issue pushes for the same sequence twice. If we 635 * get a race between multiple pushes for the same sequence they will block on 636 * the first one and then abort, hence avoiding needless pushes. 637 */ 638 STATIC int 639 xlog_cil_push( 640 struct xlog *log) 641 { 642 struct xfs_cil *cil = log->l_cilp; 643 struct xfs_log_vec *lv; 644 struct xfs_cil_ctx *ctx; 645 struct xfs_cil_ctx *new_ctx; 646 struct xlog_in_core *commit_iclog; 647 struct xlog_ticket *tic; 648 int num_iovecs; 649 int error = 0; 650 struct xfs_trans_header thdr; 651 struct xfs_log_iovec lhdr; 652 struct xfs_log_vec lvhdr = { NULL }; 653 xfs_lsn_t commit_lsn; 654 xfs_lsn_t push_seq; 655 656 if (!cil) 657 return 0; 658 659 new_ctx = kmem_zalloc(sizeof(*new_ctx), KM_SLEEP|KM_NOFS); 660 new_ctx->ticket = xlog_cil_ticket_alloc(log); 661 662 down_write(&cil->xc_ctx_lock); 663 ctx = cil->xc_ctx; 664 665 spin_lock(&cil->xc_push_lock); 666 push_seq = cil->xc_push_seq; 667 ASSERT(push_seq <= ctx->sequence); 668 669 /* 670 * Check if we've anything to push. If there is nothing, then we don't 671 * move on to a new sequence number and so we have to be able to push 672 * this sequence again later. 673 */ 674 if (list_empty(&cil->xc_cil)) { 675 cil->xc_push_seq = 0; 676 spin_unlock(&cil->xc_push_lock); 677 goto out_skip; 678 } 679 680 681 /* check for a previously pushed seqeunce */ 682 if (push_seq < cil->xc_ctx->sequence) { 683 spin_unlock(&cil->xc_push_lock); 684 goto out_skip; 685 } 686 687 /* 688 * We are now going to push this context, so add it to the committing 689 * list before we do anything else. This ensures that anyone waiting on 690 * this push can easily detect the difference between a "push in 691 * progress" and "CIL is empty, nothing to do". 692 * 693 * IOWs, a wait loop can now check for: 694 * the current sequence not being found on the committing list; 695 * an empty CIL; and 696 * an unchanged sequence number 697 * to detect a push that had nothing to do and therefore does not need 698 * waiting on. If the CIL is not empty, we get put on the committing 699 * list before emptying the CIL and bumping the sequence number. Hence 700 * an empty CIL and an unchanged sequence number means we jumped out 701 * above after doing nothing. 702 * 703 * Hence the waiter will either find the commit sequence on the 704 * committing list or the sequence number will be unchanged and the CIL 705 * still dirty. In that latter case, the push has not yet started, and 706 * so the waiter will have to continue trying to check the CIL 707 * committing list until it is found. In extreme cases of delay, the 708 * sequence may fully commit between the attempts the wait makes to wait 709 * on the commit sequence. 710 */ 711 list_add(&ctx->committing, &cil->xc_committing); 712 spin_unlock(&cil->xc_push_lock); 713 714 /* 715 * pull all the log vectors off the items in the CIL, and 716 * remove the items from the CIL. We don't need the CIL lock 717 * here because it's only needed on the transaction commit 718 * side which is currently locked out by the flush lock. 719 */ 720 lv = NULL; 721 num_iovecs = 0; 722 while (!list_empty(&cil->xc_cil)) { 723 struct xfs_log_item *item; 724 725 item = list_first_entry(&cil->xc_cil, 726 struct xfs_log_item, li_cil); 727 list_del_init(&item->li_cil); 728 if (!ctx->lv_chain) 729 ctx->lv_chain = item->li_lv; 730 else 731 lv->lv_next = item->li_lv; 732 lv = item->li_lv; 733 item->li_lv = NULL; 734 num_iovecs += lv->lv_niovecs; 735 } 736 737 /* 738 * initialise the new context and attach it to the CIL. Then attach 739 * the current context to the CIL committing lsit so it can be found 740 * during log forces to extract the commit lsn of the sequence that 741 * needs to be forced. 742 */ 743 INIT_LIST_HEAD(&new_ctx->committing); 744 INIT_LIST_HEAD(&new_ctx->busy_extents); 745 new_ctx->sequence = ctx->sequence + 1; 746 new_ctx->cil = cil; 747 cil->xc_ctx = new_ctx; 748 749 /* 750 * The switch is now done, so we can drop the context lock and move out 751 * of a shared context. We can't just go straight to the commit record, 752 * though - we need to synchronise with previous and future commits so 753 * that the commit records are correctly ordered in the log to ensure 754 * that we process items during log IO completion in the correct order. 755 * 756 * For example, if we get an EFI in one checkpoint and the EFD in the 757 * next (e.g. due to log forces), we do not want the checkpoint with 758 * the EFD to be committed before the checkpoint with the EFI. Hence 759 * we must strictly order the commit records of the checkpoints so 760 * that: a) the checkpoint callbacks are attached to the iclogs in the 761 * correct order; and b) the checkpoints are replayed in correct order 762 * in log recovery. 763 * 764 * Hence we need to add this context to the committing context list so 765 * that higher sequences will wait for us to write out a commit record 766 * before they do. 767 * 768 * xfs_log_force_lsn requires us to mirror the new sequence into the cil 769 * structure atomically with the addition of this sequence to the 770 * committing list. This also ensures that we can do unlocked checks 771 * against the current sequence in log forces without risking 772 * deferencing a freed context pointer. 773 */ 774 spin_lock(&cil->xc_push_lock); 775 cil->xc_current_sequence = new_ctx->sequence; 776 spin_unlock(&cil->xc_push_lock); 777 up_write(&cil->xc_ctx_lock); 778 779 /* 780 * Build a checkpoint transaction header and write it to the log to 781 * begin the transaction. We need to account for the space used by the 782 * transaction header here as it is not accounted for in xlog_write(). 783 * 784 * The LSN we need to pass to the log items on transaction commit is 785 * the LSN reported by the first log vector write. If we use the commit 786 * record lsn then we can move the tail beyond the grant write head. 787 */ 788 tic = ctx->ticket; 789 thdr.th_magic = XFS_TRANS_HEADER_MAGIC; 790 thdr.th_type = XFS_TRANS_CHECKPOINT; 791 thdr.th_tid = tic->t_tid; 792 thdr.th_num_items = num_iovecs; 793 lhdr.i_addr = &thdr; 794 lhdr.i_len = sizeof(xfs_trans_header_t); 795 lhdr.i_type = XLOG_REG_TYPE_TRANSHDR; 796 tic->t_curr_res -= lhdr.i_len + sizeof(xlog_op_header_t); 797 798 lvhdr.lv_niovecs = 1; 799 lvhdr.lv_iovecp = &lhdr; 800 lvhdr.lv_next = ctx->lv_chain; 801 802 error = xlog_write(log, &lvhdr, tic, &ctx->start_lsn, NULL, 0); 803 if (error) 804 goto out_abort_free_ticket; 805 806 /* 807 * now that we've written the checkpoint into the log, strictly 808 * order the commit records so replay will get them in the right order. 809 */ 810 restart: 811 spin_lock(&cil->xc_push_lock); 812 list_for_each_entry(new_ctx, &cil->xc_committing, committing) { 813 /* 814 * Avoid getting stuck in this loop because we were woken by the 815 * shutdown, but then went back to sleep once already in the 816 * shutdown state. 817 */ 818 if (XLOG_FORCED_SHUTDOWN(log)) { 819 spin_unlock(&cil->xc_push_lock); 820 goto out_abort_free_ticket; 821 } 822 823 /* 824 * Higher sequences will wait for this one so skip them. 825 * Don't wait for our own sequence, either. 826 */ 827 if (new_ctx->sequence >= ctx->sequence) 828 continue; 829 if (!new_ctx->commit_lsn) { 830 /* 831 * It is still being pushed! Wait for the push to 832 * complete, then start again from the beginning. 833 */ 834 xlog_wait(&cil->xc_commit_wait, &cil->xc_push_lock); 835 goto restart; 836 } 837 } 838 spin_unlock(&cil->xc_push_lock); 839 840 /* xfs_log_done always frees the ticket on error. */ 841 commit_lsn = xfs_log_done(log->l_mp, tic, &commit_iclog, false); 842 if (commit_lsn == -1) 843 goto out_abort; 844 845 /* attach all the transactions w/ busy extents to iclog */ 846 ctx->log_cb.cb_func = xlog_cil_committed; 847 ctx->log_cb.cb_arg = ctx; 848 error = xfs_log_notify(commit_iclog, &ctx->log_cb); 849 if (error) 850 goto out_abort; 851 852 /* 853 * now the checkpoint commit is complete and we've attached the 854 * callbacks to the iclog we can assign the commit LSN to the context 855 * and wake up anyone who is waiting for the commit to complete. 856 */ 857 spin_lock(&cil->xc_push_lock); 858 ctx->commit_lsn = commit_lsn; 859 wake_up_all(&cil->xc_commit_wait); 860 spin_unlock(&cil->xc_push_lock); 861 862 /* release the hounds! */ 863 return xfs_log_release_iclog(log->l_mp, commit_iclog); 864 865 out_skip: 866 up_write(&cil->xc_ctx_lock); 867 xfs_log_ticket_put(new_ctx->ticket); 868 kmem_free(new_ctx); 869 return 0; 870 871 out_abort_free_ticket: 872 xfs_log_ticket_put(tic); 873 out_abort: 874 xlog_cil_committed(ctx, XFS_LI_ABORTED); 875 return -EIO; 876 } 877 878 static void 879 xlog_cil_push_work( 880 struct work_struct *work) 881 { 882 struct xfs_cil *cil = container_of(work, struct xfs_cil, 883 xc_push_work); 884 xlog_cil_push(cil->xc_log); 885 } 886 887 /* 888 * We need to push CIL every so often so we don't cache more than we can fit in 889 * the log. The limit really is that a checkpoint can't be more than half the 890 * log (the current checkpoint is not allowed to overwrite the previous 891 * checkpoint), but commit latency and memory usage limit this to a smaller 892 * size. 893 */ 894 static void 895 xlog_cil_push_background( 896 struct xlog *log) 897 { 898 struct xfs_cil *cil = log->l_cilp; 899 900 /* 901 * The cil won't be empty because we are called while holding the 902 * context lock so whatever we added to the CIL will still be there 903 */ 904 ASSERT(!list_empty(&cil->xc_cil)); 905 906 /* 907 * don't do a background push if we haven't used up all the 908 * space available yet. 909 */ 910 if (cil->xc_ctx->space_used < XLOG_CIL_SPACE_LIMIT(log)) 911 return; 912 913 spin_lock(&cil->xc_push_lock); 914 if (cil->xc_push_seq < cil->xc_current_sequence) { 915 cil->xc_push_seq = cil->xc_current_sequence; 916 queue_work(log->l_mp->m_cil_workqueue, &cil->xc_push_work); 917 } 918 spin_unlock(&cil->xc_push_lock); 919 920 } 921 922 /* 923 * xlog_cil_push_now() is used to trigger an immediate CIL push to the sequence 924 * number that is passed. When it returns, the work will be queued for 925 * @push_seq, but it won't be completed. The caller is expected to do any 926 * waiting for push_seq to complete if it is required. 927 */ 928 static void 929 xlog_cil_push_now( 930 struct xlog *log, 931 xfs_lsn_t push_seq) 932 { 933 struct xfs_cil *cil = log->l_cilp; 934 935 if (!cil) 936 return; 937 938 ASSERT(push_seq && push_seq <= cil->xc_current_sequence); 939 940 /* start on any pending background push to minimise wait time on it */ 941 flush_work(&cil->xc_push_work); 942 943 /* 944 * If the CIL is empty or we've already pushed the sequence then 945 * there's no work we need to do. 946 */ 947 spin_lock(&cil->xc_push_lock); 948 if (list_empty(&cil->xc_cil) || push_seq <= cil->xc_push_seq) { 949 spin_unlock(&cil->xc_push_lock); 950 return; 951 } 952 953 cil->xc_push_seq = push_seq; 954 queue_work(log->l_mp->m_cil_workqueue, &cil->xc_push_work); 955 spin_unlock(&cil->xc_push_lock); 956 } 957 958 bool 959 xlog_cil_empty( 960 struct xlog *log) 961 { 962 struct xfs_cil *cil = log->l_cilp; 963 bool empty = false; 964 965 spin_lock(&cil->xc_push_lock); 966 if (list_empty(&cil->xc_cil)) 967 empty = true; 968 spin_unlock(&cil->xc_push_lock); 969 return empty; 970 } 971 972 /* 973 * Commit a transaction with the given vector to the Committed Item List. 974 * 975 * To do this, we need to format the item, pin it in memory if required and 976 * account for the space used by the transaction. Once we have done that we 977 * need to release the unused reservation for the transaction, attach the 978 * transaction to the checkpoint context so we carry the busy extents through 979 * to checkpoint completion, and then unlock all the items in the transaction. 980 * 981 * Called with the context lock already held in read mode to lock out 982 * background commit, returns without it held once background commits are 983 * allowed again. 984 */ 985 void 986 xfs_log_commit_cil( 987 struct xfs_mount *mp, 988 struct xfs_trans *tp, 989 xfs_lsn_t *commit_lsn, 990 bool regrant) 991 { 992 struct xlog *log = mp->m_log; 993 struct xfs_cil *cil = log->l_cilp; 994 xfs_lsn_t xc_commit_lsn; 995 996 /* 997 * Do all necessary memory allocation before we lock the CIL. 998 * This ensures the allocation does not deadlock with a CIL 999 * push in memory reclaim (e.g. from kswapd). 1000 */ 1001 xlog_cil_alloc_shadow_bufs(log, tp); 1002 1003 /* lock out background commit */ 1004 down_read(&cil->xc_ctx_lock); 1005 1006 xlog_cil_insert_items(log, tp); 1007 1008 xc_commit_lsn = cil->xc_ctx->sequence; 1009 if (commit_lsn) 1010 *commit_lsn = xc_commit_lsn; 1011 1012 xfs_log_done(mp, tp->t_ticket, NULL, regrant); 1013 tp->t_ticket = NULL; 1014 xfs_trans_unreserve_and_mod_sb(tp); 1015 1016 /* 1017 * Once all the items of the transaction have been copied to the CIL, 1018 * the items can be unlocked and freed. 1019 * 1020 * This needs to be done before we drop the CIL context lock because we 1021 * have to update state in the log items and unlock them before they go 1022 * to disk. If we don't, then the CIL checkpoint can race with us and 1023 * we can run checkpoint completion before we've updated and unlocked 1024 * the log items. This affects (at least) processing of stale buffers, 1025 * inodes and EFIs. 1026 */ 1027 xfs_trans_free_items(tp, xc_commit_lsn, false); 1028 1029 xlog_cil_push_background(log); 1030 1031 up_read(&cil->xc_ctx_lock); 1032 } 1033 1034 /* 1035 * Conditionally push the CIL based on the sequence passed in. 1036 * 1037 * We only need to push if we haven't already pushed the sequence 1038 * number given. Hence the only time we will trigger a push here is 1039 * if the push sequence is the same as the current context. 1040 * 1041 * We return the current commit lsn to allow the callers to determine if a 1042 * iclog flush is necessary following this call. 1043 */ 1044 xfs_lsn_t 1045 xlog_cil_force_lsn( 1046 struct xlog *log, 1047 xfs_lsn_t sequence) 1048 { 1049 struct xfs_cil *cil = log->l_cilp; 1050 struct xfs_cil_ctx *ctx; 1051 xfs_lsn_t commit_lsn = NULLCOMMITLSN; 1052 1053 ASSERT(sequence <= cil->xc_current_sequence); 1054 1055 /* 1056 * check to see if we need to force out the current context. 1057 * xlog_cil_push() handles racing pushes for the same sequence, 1058 * so no need to deal with it here. 1059 */ 1060 restart: 1061 xlog_cil_push_now(log, sequence); 1062 1063 /* 1064 * See if we can find a previous sequence still committing. 1065 * We need to wait for all previous sequence commits to complete 1066 * before allowing the force of push_seq to go ahead. Hence block 1067 * on commits for those as well. 1068 */ 1069 spin_lock(&cil->xc_push_lock); 1070 list_for_each_entry(ctx, &cil->xc_committing, committing) { 1071 /* 1072 * Avoid getting stuck in this loop because we were woken by the 1073 * shutdown, but then went back to sleep once already in the 1074 * shutdown state. 1075 */ 1076 if (XLOG_FORCED_SHUTDOWN(log)) 1077 goto out_shutdown; 1078 if (ctx->sequence > sequence) 1079 continue; 1080 if (!ctx->commit_lsn) { 1081 /* 1082 * It is still being pushed! Wait for the push to 1083 * complete, then start again from the beginning. 1084 */ 1085 xlog_wait(&cil->xc_commit_wait, &cil->xc_push_lock); 1086 goto restart; 1087 } 1088 if (ctx->sequence != sequence) 1089 continue; 1090 /* found it! */ 1091 commit_lsn = ctx->commit_lsn; 1092 } 1093 1094 /* 1095 * The call to xlog_cil_push_now() executes the push in the background. 1096 * Hence by the time we have got here it our sequence may not have been 1097 * pushed yet. This is true if the current sequence still matches the 1098 * push sequence after the above wait loop and the CIL still contains 1099 * dirty objects. This is guaranteed by the push code first adding the 1100 * context to the committing list before emptying the CIL. 1101 * 1102 * Hence if we don't find the context in the committing list and the 1103 * current sequence number is unchanged then the CIL contents are 1104 * significant. If the CIL is empty, if means there was nothing to push 1105 * and that means there is nothing to wait for. If the CIL is not empty, 1106 * it means we haven't yet started the push, because if it had started 1107 * we would have found the context on the committing list. 1108 */ 1109 if (sequence == cil->xc_current_sequence && 1110 !list_empty(&cil->xc_cil)) { 1111 spin_unlock(&cil->xc_push_lock); 1112 goto restart; 1113 } 1114 1115 spin_unlock(&cil->xc_push_lock); 1116 return commit_lsn; 1117 1118 /* 1119 * We detected a shutdown in progress. We need to trigger the log force 1120 * to pass through it's iclog state machine error handling, even though 1121 * we are already in a shutdown state. Hence we can't return 1122 * NULLCOMMITLSN here as that has special meaning to log forces (i.e. 1123 * LSN is already stable), so we return a zero LSN instead. 1124 */ 1125 out_shutdown: 1126 spin_unlock(&cil->xc_push_lock); 1127 return 0; 1128 } 1129 1130 /* 1131 * Check if the current log item was first committed in this sequence. 1132 * We can't rely on just the log item being in the CIL, we have to check 1133 * the recorded commit sequence number. 1134 * 1135 * Note: for this to be used in a non-racy manner, it has to be called with 1136 * CIL flushing locked out. As a result, it should only be used during the 1137 * transaction commit process when deciding what to format into the item. 1138 */ 1139 bool 1140 xfs_log_item_in_current_chkpt( 1141 struct xfs_log_item *lip) 1142 { 1143 struct xfs_cil_ctx *ctx; 1144 1145 if (list_empty(&lip->li_cil)) 1146 return false; 1147 1148 ctx = lip->li_mountp->m_log->l_cilp->xc_ctx; 1149 1150 /* 1151 * li_seq is written on the first commit of a log item to record the 1152 * first checkpoint it is written to. Hence if it is different to the 1153 * current sequence, we're in a new checkpoint. 1154 */ 1155 if (XFS_LSN_CMP(lip->li_seq, ctx->sequence) != 0) 1156 return false; 1157 return true; 1158 } 1159 1160 /* 1161 * Perform initial CIL structure initialisation. 1162 */ 1163 int 1164 xlog_cil_init( 1165 struct xlog *log) 1166 { 1167 struct xfs_cil *cil; 1168 struct xfs_cil_ctx *ctx; 1169 1170 cil = kmem_zalloc(sizeof(*cil), KM_SLEEP|KM_MAYFAIL); 1171 if (!cil) 1172 return -ENOMEM; 1173 1174 ctx = kmem_zalloc(sizeof(*ctx), KM_SLEEP|KM_MAYFAIL); 1175 if (!ctx) { 1176 kmem_free(cil); 1177 return -ENOMEM; 1178 } 1179 1180 INIT_WORK(&cil->xc_push_work, xlog_cil_push_work); 1181 INIT_LIST_HEAD(&cil->xc_cil); 1182 INIT_LIST_HEAD(&cil->xc_committing); 1183 spin_lock_init(&cil->xc_cil_lock); 1184 spin_lock_init(&cil->xc_push_lock); 1185 init_rwsem(&cil->xc_ctx_lock); 1186 init_waitqueue_head(&cil->xc_commit_wait); 1187 1188 INIT_LIST_HEAD(&ctx->committing); 1189 INIT_LIST_HEAD(&ctx->busy_extents); 1190 ctx->sequence = 1; 1191 ctx->cil = cil; 1192 cil->xc_ctx = ctx; 1193 cil->xc_current_sequence = ctx->sequence; 1194 1195 cil->xc_log = log; 1196 log->l_cilp = cil; 1197 return 0; 1198 } 1199 1200 void 1201 xlog_cil_destroy( 1202 struct xlog *log) 1203 { 1204 if (log->l_cilp->xc_ctx) { 1205 if (log->l_cilp->xc_ctx->ticket) 1206 xfs_log_ticket_put(log->l_cilp->xc_ctx->ticket); 1207 kmem_free(log->l_cilp->xc_ctx); 1208 } 1209 1210 ASSERT(list_empty(&log->l_cilp->xc_cil)); 1211 kmem_free(log->l_cilp); 1212 } 1213 1214