1 /* 2 * Copyright (c) 2010 Red Hat, Inc. All Rights Reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License as 6 * published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it would be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * GNU General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public License 14 * along with this program; if not, write the Free Software Foundation, 15 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 16 */ 17 18 #include "xfs.h" 19 #include "xfs_fs.h" 20 #include "xfs_log_format.h" 21 #include "xfs_shared.h" 22 #include "xfs_trans_resv.h" 23 #include "xfs_sb.h" 24 #include "xfs_ag.h" 25 #include "xfs_mount.h" 26 #include "xfs_error.h" 27 #include "xfs_alloc.h" 28 #include "xfs_extent_busy.h" 29 #include "xfs_discard.h" 30 #include "xfs_trans.h" 31 #include "xfs_trans_priv.h" 32 #include "xfs_log.h" 33 #include "xfs_log_priv.h" 34 35 /* 36 * Allocate a new ticket. Failing to get a new ticket makes it really hard to 37 * recover, so we don't allow failure here. Also, we allocate in a context that 38 * we don't want to be issuing transactions from, so we need to tell the 39 * allocation code this as well. 40 * 41 * We don't reserve any space for the ticket - we are going to steal whatever 42 * space we require from transactions as they commit. To ensure we reserve all 43 * the space required, we need to set the current reservation of the ticket to 44 * zero so that we know to steal the initial transaction overhead from the 45 * first transaction commit. 46 */ 47 static struct xlog_ticket * 48 xlog_cil_ticket_alloc( 49 struct xlog *log) 50 { 51 struct xlog_ticket *tic; 52 53 tic = xlog_ticket_alloc(log, 0, 1, XFS_TRANSACTION, 0, 54 KM_SLEEP|KM_NOFS); 55 tic->t_trans_type = XFS_TRANS_CHECKPOINT; 56 57 /* 58 * set the current reservation to zero so we know to steal the basic 59 * transaction overhead reservation from the first transaction commit. 60 */ 61 tic->t_curr_res = 0; 62 return tic; 63 } 64 65 /* 66 * After the first stage of log recovery is done, we know where the head and 67 * tail of the log are. We need this log initialisation done before we can 68 * initialise the first CIL checkpoint context. 69 * 70 * Here we allocate a log ticket to track space usage during a CIL push. This 71 * ticket is passed to xlog_write() directly so that we don't slowly leak log 72 * space by failing to account for space used by log headers and additional 73 * region headers for split regions. 74 */ 75 void 76 xlog_cil_init_post_recovery( 77 struct xlog *log) 78 { 79 log->l_cilp->xc_ctx->ticket = xlog_cil_ticket_alloc(log); 80 log->l_cilp->xc_ctx->sequence = 1; 81 log->l_cilp->xc_ctx->commit_lsn = xlog_assign_lsn(log->l_curr_cycle, 82 log->l_curr_block); 83 } 84 85 /* 86 * Prepare the log item for insertion into the CIL. Calculate the difference in 87 * log space and vectors it will consume, and if it is a new item pin it as 88 * well. 89 */ 90 STATIC void 91 xfs_cil_prepare_item( 92 struct xlog *log, 93 struct xfs_log_vec *lv, 94 struct xfs_log_vec *old_lv, 95 int *diff_len, 96 int *diff_iovecs) 97 { 98 /* Account for the new LV being passed in */ 99 if (lv->lv_buf_len != XFS_LOG_VEC_ORDERED) { 100 *diff_len += lv->lv_buf_len; 101 *diff_iovecs += lv->lv_niovecs; 102 } 103 104 /* 105 * If there is no old LV, this is the first time we've seen the item in 106 * this CIL context and so we need to pin it. If we are replacing the 107 * old_lv, then remove the space it accounts for and free it. 108 */ 109 if (!old_lv) 110 lv->lv_item->li_ops->iop_pin(lv->lv_item); 111 else if (old_lv != lv) { 112 ASSERT(lv->lv_buf_len != XFS_LOG_VEC_ORDERED); 113 114 *diff_len -= old_lv->lv_buf_len; 115 *diff_iovecs -= old_lv->lv_niovecs; 116 kmem_free(old_lv); 117 } 118 119 /* attach new log vector to log item */ 120 lv->lv_item->li_lv = lv; 121 122 /* 123 * If this is the first time the item is being committed to the 124 * CIL, store the sequence number on the log item so we can 125 * tell in future commits whether this is the first checkpoint 126 * the item is being committed into. 127 */ 128 if (!lv->lv_item->li_seq) 129 lv->lv_item->li_seq = log->l_cilp->xc_ctx->sequence; 130 } 131 132 /* 133 * Format log item into a flat buffers 134 * 135 * For delayed logging, we need to hold a formatted buffer containing all the 136 * changes on the log item. This enables us to relog the item in memory and 137 * write it out asynchronously without needing to relock the object that was 138 * modified at the time it gets written into the iclog. 139 * 140 * This function builds a vector for the changes in each log item in the 141 * transaction. It then works out the length of the buffer needed for each log 142 * item, allocates them and formats the vector for the item into the buffer. 143 * The buffer is then attached to the log item are then inserted into the 144 * Committed Item List for tracking until the next checkpoint is written out. 145 * 146 * We don't set up region headers during this process; we simply copy the 147 * regions into the flat buffer. We can do this because we still have to do a 148 * formatting step to write the regions into the iclog buffer. Writing the 149 * ophdrs during the iclog write means that we can support splitting large 150 * regions across iclog boundares without needing a change in the format of the 151 * item/region encapsulation. 152 * 153 * Hence what we need to do now is change the rewrite the vector array to point 154 * to the copied region inside the buffer we just allocated. This allows us to 155 * format the regions into the iclog as though they are being formatted 156 * directly out of the objects themselves. 157 */ 158 static void 159 xlog_cil_insert_format_items( 160 struct xlog *log, 161 struct xfs_trans *tp, 162 int *diff_len, 163 int *diff_iovecs) 164 { 165 struct xfs_log_item_desc *lidp; 166 167 168 /* Bail out if we didn't find a log item. */ 169 if (list_empty(&tp->t_items)) { 170 ASSERT(0); 171 return; 172 } 173 174 list_for_each_entry(lidp, &tp->t_items, lid_trans) { 175 struct xfs_log_item *lip = lidp->lid_item; 176 struct xfs_log_vec *lv; 177 struct xfs_log_vec *old_lv; 178 int niovecs = 0; 179 int nbytes = 0; 180 int buf_size; 181 bool ordered = false; 182 183 /* Skip items which aren't dirty in this transaction. */ 184 if (!(lidp->lid_flags & XFS_LID_DIRTY)) 185 continue; 186 187 /* get number of vecs and size of data to be stored */ 188 lip->li_ops->iop_size(lip, &niovecs, &nbytes); 189 190 /* Skip items that do not have any vectors for writing */ 191 if (!niovecs) 192 continue; 193 194 /* 195 * Ordered items need to be tracked but we do not wish to write 196 * them. We need a logvec to track the object, but we do not 197 * need an iovec or buffer to be allocated for copying data. 198 */ 199 if (niovecs == XFS_LOG_VEC_ORDERED) { 200 ordered = true; 201 niovecs = 0; 202 nbytes = 0; 203 } 204 205 /* 206 * We 64-bit align the length of each iovec so that the start 207 * of the next one is naturally aligned. We'll need to 208 * account for that slack space here. 209 */ 210 nbytes += niovecs * sizeof(uint64_t); 211 212 /* grab the old item if it exists for reservation accounting */ 213 old_lv = lip->li_lv; 214 215 /* calc buffer size */ 216 buf_size = sizeof(struct xfs_log_vec) + nbytes + 217 niovecs * sizeof(struct xfs_log_iovec); 218 219 /* compare to existing item size */ 220 if (lip->li_lv && buf_size <= lip->li_lv->lv_size) { 221 /* same or smaller, optimise common overwrite case */ 222 lv = lip->li_lv; 223 lv->lv_next = NULL; 224 225 if (ordered) 226 goto insert; 227 228 /* 229 * set the item up as though it is a new insertion so 230 * that the space reservation accounting is correct. 231 */ 232 *diff_iovecs -= lv->lv_niovecs; 233 *diff_len -= lv->lv_buf_len; 234 } else { 235 /* allocate new data chunk */ 236 lv = kmem_zalloc(buf_size, KM_SLEEP|KM_NOFS); 237 lv->lv_item = lip; 238 lv->lv_size = buf_size; 239 if (ordered) { 240 /* track as an ordered logvec */ 241 ASSERT(lip->li_lv == NULL); 242 lv->lv_buf_len = XFS_LOG_VEC_ORDERED; 243 goto insert; 244 } 245 lv->lv_iovecp = (struct xfs_log_iovec *)&lv[1]; 246 } 247 248 /* Ensure the lv is set up according to ->iop_size */ 249 lv->lv_niovecs = niovecs; 250 251 /* The allocated data region lies beyond the iovec region */ 252 lv->lv_buf_len = 0; 253 lv->lv_buf = (char *)lv + buf_size - nbytes; 254 lip->li_ops->iop_format(lip, lv); 255 insert: 256 ASSERT(lv->lv_buf_len <= nbytes); 257 xfs_cil_prepare_item(log, lv, old_lv, diff_len, diff_iovecs); 258 } 259 } 260 261 /* 262 * Insert the log items into the CIL and calculate the difference in space 263 * consumed by the item. Add the space to the checkpoint ticket and calculate 264 * if the change requires additional log metadata. If it does, take that space 265 * as well. Remove the amount of space we added to the checkpoint ticket from 266 * the current transaction ticket so that the accounting works out correctly. 267 */ 268 static void 269 xlog_cil_insert_items( 270 struct xlog *log, 271 struct xfs_trans *tp) 272 { 273 struct xfs_cil *cil = log->l_cilp; 274 struct xfs_cil_ctx *ctx = cil->xc_ctx; 275 struct xfs_log_item_desc *lidp; 276 int len = 0; 277 int diff_iovecs = 0; 278 int iclog_space; 279 280 ASSERT(tp); 281 282 /* 283 * We can do this safely because the context can't checkpoint until we 284 * are done so it doesn't matter exactly how we update the CIL. 285 */ 286 xlog_cil_insert_format_items(log, tp, &len, &diff_iovecs); 287 288 /* 289 * Now (re-)position everything modified at the tail of the CIL. 290 * We do this here so we only need to take the CIL lock once during 291 * the transaction commit. 292 */ 293 spin_lock(&cil->xc_cil_lock); 294 list_for_each_entry(lidp, &tp->t_items, lid_trans) { 295 struct xfs_log_item *lip = lidp->lid_item; 296 297 /* Skip items which aren't dirty in this transaction. */ 298 if (!(lidp->lid_flags & XFS_LID_DIRTY)) 299 continue; 300 301 list_move_tail(&lip->li_cil, &cil->xc_cil); 302 } 303 304 /* account for space used by new iovec headers */ 305 len += diff_iovecs * sizeof(xlog_op_header_t); 306 ctx->nvecs += diff_iovecs; 307 308 /* attach the transaction to the CIL if it has any busy extents */ 309 if (!list_empty(&tp->t_busy)) 310 list_splice_init(&tp->t_busy, &ctx->busy_extents); 311 312 /* 313 * Now transfer enough transaction reservation to the context ticket 314 * for the checkpoint. The context ticket is special - the unit 315 * reservation has to grow as well as the current reservation as we 316 * steal from tickets so we can correctly determine the space used 317 * during the transaction commit. 318 */ 319 if (ctx->ticket->t_curr_res == 0) { 320 ctx->ticket->t_curr_res = ctx->ticket->t_unit_res; 321 tp->t_ticket->t_curr_res -= ctx->ticket->t_unit_res; 322 } 323 324 /* do we need space for more log record headers? */ 325 iclog_space = log->l_iclog_size - log->l_iclog_hsize; 326 if (len > 0 && (ctx->space_used / iclog_space != 327 (ctx->space_used + len) / iclog_space)) { 328 int hdrs; 329 330 hdrs = (len + iclog_space - 1) / iclog_space; 331 /* need to take into account split region headers, too */ 332 hdrs *= log->l_iclog_hsize + sizeof(struct xlog_op_header); 333 ctx->ticket->t_unit_res += hdrs; 334 ctx->ticket->t_curr_res += hdrs; 335 tp->t_ticket->t_curr_res -= hdrs; 336 ASSERT(tp->t_ticket->t_curr_res >= len); 337 } 338 tp->t_ticket->t_curr_res -= len; 339 ctx->space_used += len; 340 341 spin_unlock(&cil->xc_cil_lock); 342 } 343 344 static void 345 xlog_cil_free_logvec( 346 struct xfs_log_vec *log_vector) 347 { 348 struct xfs_log_vec *lv; 349 350 for (lv = log_vector; lv; ) { 351 struct xfs_log_vec *next = lv->lv_next; 352 kmem_free(lv); 353 lv = next; 354 } 355 } 356 357 /* 358 * Mark all items committed and clear busy extents. We free the log vector 359 * chains in a separate pass so that we unpin the log items as quickly as 360 * possible. 361 */ 362 static void 363 xlog_cil_committed( 364 void *args, 365 int abort) 366 { 367 struct xfs_cil_ctx *ctx = args; 368 struct xfs_mount *mp = ctx->cil->xc_log->l_mp; 369 370 xfs_trans_committed_bulk(ctx->cil->xc_log->l_ailp, ctx->lv_chain, 371 ctx->start_lsn, abort); 372 373 xfs_extent_busy_sort(&ctx->busy_extents); 374 xfs_extent_busy_clear(mp, &ctx->busy_extents, 375 (mp->m_flags & XFS_MOUNT_DISCARD) && !abort); 376 377 spin_lock(&ctx->cil->xc_push_lock); 378 list_del(&ctx->committing); 379 spin_unlock(&ctx->cil->xc_push_lock); 380 381 xlog_cil_free_logvec(ctx->lv_chain); 382 383 if (!list_empty(&ctx->busy_extents)) { 384 ASSERT(mp->m_flags & XFS_MOUNT_DISCARD); 385 386 xfs_discard_extents(mp, &ctx->busy_extents); 387 xfs_extent_busy_clear(mp, &ctx->busy_extents, false); 388 } 389 390 kmem_free(ctx); 391 } 392 393 /* 394 * Push the Committed Item List to the log. If @push_seq flag is zero, then it 395 * is a background flush and so we can chose to ignore it. Otherwise, if the 396 * current sequence is the same as @push_seq we need to do a flush. If 397 * @push_seq is less than the current sequence, then it has already been 398 * flushed and we don't need to do anything - the caller will wait for it to 399 * complete if necessary. 400 * 401 * @push_seq is a value rather than a flag because that allows us to do an 402 * unlocked check of the sequence number for a match. Hence we can allows log 403 * forces to run racily and not issue pushes for the same sequence twice. If we 404 * get a race between multiple pushes for the same sequence they will block on 405 * the first one and then abort, hence avoiding needless pushes. 406 */ 407 STATIC int 408 xlog_cil_push( 409 struct xlog *log) 410 { 411 struct xfs_cil *cil = log->l_cilp; 412 struct xfs_log_vec *lv; 413 struct xfs_cil_ctx *ctx; 414 struct xfs_cil_ctx *new_ctx; 415 struct xlog_in_core *commit_iclog; 416 struct xlog_ticket *tic; 417 int num_iovecs; 418 int error = 0; 419 struct xfs_trans_header thdr; 420 struct xfs_log_iovec lhdr; 421 struct xfs_log_vec lvhdr = { NULL }; 422 xfs_lsn_t commit_lsn; 423 xfs_lsn_t push_seq; 424 425 if (!cil) 426 return 0; 427 428 new_ctx = kmem_zalloc(sizeof(*new_ctx), KM_SLEEP|KM_NOFS); 429 new_ctx->ticket = xlog_cil_ticket_alloc(log); 430 431 down_write(&cil->xc_ctx_lock); 432 ctx = cil->xc_ctx; 433 434 spin_lock(&cil->xc_push_lock); 435 push_seq = cil->xc_push_seq; 436 ASSERT(push_seq <= ctx->sequence); 437 438 /* 439 * Check if we've anything to push. If there is nothing, then we don't 440 * move on to a new sequence number and so we have to be able to push 441 * this sequence again later. 442 */ 443 if (list_empty(&cil->xc_cil)) { 444 cil->xc_push_seq = 0; 445 spin_unlock(&cil->xc_push_lock); 446 goto out_skip; 447 } 448 spin_unlock(&cil->xc_push_lock); 449 450 451 /* check for a previously pushed seqeunce */ 452 if (push_seq < cil->xc_ctx->sequence) 453 goto out_skip; 454 455 /* 456 * pull all the log vectors off the items in the CIL, and 457 * remove the items from the CIL. We don't need the CIL lock 458 * here because it's only needed on the transaction commit 459 * side which is currently locked out by the flush lock. 460 */ 461 lv = NULL; 462 num_iovecs = 0; 463 while (!list_empty(&cil->xc_cil)) { 464 struct xfs_log_item *item; 465 466 item = list_first_entry(&cil->xc_cil, 467 struct xfs_log_item, li_cil); 468 list_del_init(&item->li_cil); 469 if (!ctx->lv_chain) 470 ctx->lv_chain = item->li_lv; 471 else 472 lv->lv_next = item->li_lv; 473 lv = item->li_lv; 474 item->li_lv = NULL; 475 num_iovecs += lv->lv_niovecs; 476 } 477 478 /* 479 * initialise the new context and attach it to the CIL. Then attach 480 * the current context to the CIL committing lsit so it can be found 481 * during log forces to extract the commit lsn of the sequence that 482 * needs to be forced. 483 */ 484 INIT_LIST_HEAD(&new_ctx->committing); 485 INIT_LIST_HEAD(&new_ctx->busy_extents); 486 new_ctx->sequence = ctx->sequence + 1; 487 new_ctx->cil = cil; 488 cil->xc_ctx = new_ctx; 489 490 /* 491 * mirror the new sequence into the cil structure so that we can do 492 * unlocked checks against the current sequence in log forces without 493 * risking deferencing a freed context pointer. 494 */ 495 cil->xc_current_sequence = new_ctx->sequence; 496 497 /* 498 * The switch is now done, so we can drop the context lock and move out 499 * of a shared context. We can't just go straight to the commit record, 500 * though - we need to synchronise with previous and future commits so 501 * that the commit records are correctly ordered in the log to ensure 502 * that we process items during log IO completion in the correct order. 503 * 504 * For example, if we get an EFI in one checkpoint and the EFD in the 505 * next (e.g. due to log forces), we do not want the checkpoint with 506 * the EFD to be committed before the checkpoint with the EFI. Hence 507 * we must strictly order the commit records of the checkpoints so 508 * that: a) the checkpoint callbacks are attached to the iclogs in the 509 * correct order; and b) the checkpoints are replayed in correct order 510 * in log recovery. 511 * 512 * Hence we need to add this context to the committing context list so 513 * that higher sequences will wait for us to write out a commit record 514 * before they do. 515 */ 516 spin_lock(&cil->xc_push_lock); 517 list_add(&ctx->committing, &cil->xc_committing); 518 spin_unlock(&cil->xc_push_lock); 519 up_write(&cil->xc_ctx_lock); 520 521 /* 522 * Build a checkpoint transaction header and write it to the log to 523 * begin the transaction. We need to account for the space used by the 524 * transaction header here as it is not accounted for in xlog_write(). 525 * 526 * The LSN we need to pass to the log items on transaction commit is 527 * the LSN reported by the first log vector write. If we use the commit 528 * record lsn then we can move the tail beyond the grant write head. 529 */ 530 tic = ctx->ticket; 531 thdr.th_magic = XFS_TRANS_HEADER_MAGIC; 532 thdr.th_type = XFS_TRANS_CHECKPOINT; 533 thdr.th_tid = tic->t_tid; 534 thdr.th_num_items = num_iovecs; 535 lhdr.i_addr = &thdr; 536 lhdr.i_len = sizeof(xfs_trans_header_t); 537 lhdr.i_type = XLOG_REG_TYPE_TRANSHDR; 538 tic->t_curr_res -= lhdr.i_len + sizeof(xlog_op_header_t); 539 540 lvhdr.lv_niovecs = 1; 541 lvhdr.lv_iovecp = &lhdr; 542 lvhdr.lv_next = ctx->lv_chain; 543 544 error = xlog_write(log, &lvhdr, tic, &ctx->start_lsn, NULL, 0); 545 if (error) 546 goto out_abort_free_ticket; 547 548 /* 549 * now that we've written the checkpoint into the log, strictly 550 * order the commit records so replay will get them in the right order. 551 */ 552 restart: 553 spin_lock(&cil->xc_push_lock); 554 list_for_each_entry(new_ctx, &cil->xc_committing, committing) { 555 /* 556 * Higher sequences will wait for this one so skip them. 557 * Don't wait for own own sequence, either. 558 */ 559 if (new_ctx->sequence >= ctx->sequence) 560 continue; 561 if (!new_ctx->commit_lsn) { 562 /* 563 * It is still being pushed! Wait for the push to 564 * complete, then start again from the beginning. 565 */ 566 xlog_wait(&cil->xc_commit_wait, &cil->xc_push_lock); 567 goto restart; 568 } 569 } 570 spin_unlock(&cil->xc_push_lock); 571 572 /* xfs_log_done always frees the ticket on error. */ 573 commit_lsn = xfs_log_done(log->l_mp, tic, &commit_iclog, 0); 574 if (commit_lsn == -1) 575 goto out_abort; 576 577 /* attach all the transactions w/ busy extents to iclog */ 578 ctx->log_cb.cb_func = xlog_cil_committed; 579 ctx->log_cb.cb_arg = ctx; 580 error = xfs_log_notify(log->l_mp, commit_iclog, &ctx->log_cb); 581 if (error) 582 goto out_abort; 583 584 /* 585 * now the checkpoint commit is complete and we've attached the 586 * callbacks to the iclog we can assign the commit LSN to the context 587 * and wake up anyone who is waiting for the commit to complete. 588 */ 589 spin_lock(&cil->xc_push_lock); 590 ctx->commit_lsn = commit_lsn; 591 wake_up_all(&cil->xc_commit_wait); 592 spin_unlock(&cil->xc_push_lock); 593 594 /* release the hounds! */ 595 return xfs_log_release_iclog(log->l_mp, commit_iclog); 596 597 out_skip: 598 up_write(&cil->xc_ctx_lock); 599 xfs_log_ticket_put(new_ctx->ticket); 600 kmem_free(new_ctx); 601 return 0; 602 603 out_abort_free_ticket: 604 xfs_log_ticket_put(tic); 605 out_abort: 606 xlog_cil_committed(ctx, XFS_LI_ABORTED); 607 return XFS_ERROR(EIO); 608 } 609 610 static void 611 xlog_cil_push_work( 612 struct work_struct *work) 613 { 614 struct xfs_cil *cil = container_of(work, struct xfs_cil, 615 xc_push_work); 616 xlog_cil_push(cil->xc_log); 617 } 618 619 /* 620 * We need to push CIL every so often so we don't cache more than we can fit in 621 * the log. The limit really is that a checkpoint can't be more than half the 622 * log (the current checkpoint is not allowed to overwrite the previous 623 * checkpoint), but commit latency and memory usage limit this to a smaller 624 * size. 625 */ 626 static void 627 xlog_cil_push_background( 628 struct xlog *log) 629 { 630 struct xfs_cil *cil = log->l_cilp; 631 632 /* 633 * The cil won't be empty because we are called while holding the 634 * context lock so whatever we added to the CIL will still be there 635 */ 636 ASSERT(!list_empty(&cil->xc_cil)); 637 638 /* 639 * don't do a background push if we haven't used up all the 640 * space available yet. 641 */ 642 if (cil->xc_ctx->space_used < XLOG_CIL_SPACE_LIMIT(log)) 643 return; 644 645 spin_lock(&cil->xc_push_lock); 646 if (cil->xc_push_seq < cil->xc_current_sequence) { 647 cil->xc_push_seq = cil->xc_current_sequence; 648 queue_work(log->l_mp->m_cil_workqueue, &cil->xc_push_work); 649 } 650 spin_unlock(&cil->xc_push_lock); 651 652 } 653 654 static void 655 xlog_cil_push_foreground( 656 struct xlog *log, 657 xfs_lsn_t push_seq) 658 { 659 struct xfs_cil *cil = log->l_cilp; 660 661 if (!cil) 662 return; 663 664 ASSERT(push_seq && push_seq <= cil->xc_current_sequence); 665 666 /* start on any pending background push to minimise wait time on it */ 667 flush_work(&cil->xc_push_work); 668 669 /* 670 * If the CIL is empty or we've already pushed the sequence then 671 * there's no work we need to do. 672 */ 673 spin_lock(&cil->xc_push_lock); 674 if (list_empty(&cil->xc_cil) || push_seq <= cil->xc_push_seq) { 675 spin_unlock(&cil->xc_push_lock); 676 return; 677 } 678 679 cil->xc_push_seq = push_seq; 680 spin_unlock(&cil->xc_push_lock); 681 682 /* do the push now */ 683 xlog_cil_push(log); 684 } 685 686 bool 687 xlog_cil_empty( 688 struct xlog *log) 689 { 690 struct xfs_cil *cil = log->l_cilp; 691 bool empty = false; 692 693 spin_lock(&cil->xc_push_lock); 694 if (list_empty(&cil->xc_cil)) 695 empty = true; 696 spin_unlock(&cil->xc_push_lock); 697 return empty; 698 } 699 700 /* 701 * Commit a transaction with the given vector to the Committed Item List. 702 * 703 * To do this, we need to format the item, pin it in memory if required and 704 * account for the space used by the transaction. Once we have done that we 705 * need to release the unused reservation for the transaction, attach the 706 * transaction to the checkpoint context so we carry the busy extents through 707 * to checkpoint completion, and then unlock all the items in the transaction. 708 * 709 * Called with the context lock already held in read mode to lock out 710 * background commit, returns without it held once background commits are 711 * allowed again. 712 */ 713 int 714 xfs_log_commit_cil( 715 struct xfs_mount *mp, 716 struct xfs_trans *tp, 717 xfs_lsn_t *commit_lsn, 718 int flags) 719 { 720 struct xlog *log = mp->m_log; 721 struct xfs_cil *cil = log->l_cilp; 722 int log_flags = 0; 723 724 if (flags & XFS_TRANS_RELEASE_LOG_RES) 725 log_flags = XFS_LOG_REL_PERM_RESERV; 726 727 /* lock out background commit */ 728 down_read(&cil->xc_ctx_lock); 729 730 xlog_cil_insert_items(log, tp); 731 732 /* check we didn't blow the reservation */ 733 if (tp->t_ticket->t_curr_res < 0) 734 xlog_print_tic_res(mp, tp->t_ticket); 735 736 tp->t_commit_lsn = cil->xc_ctx->sequence; 737 if (commit_lsn) 738 *commit_lsn = tp->t_commit_lsn; 739 740 xfs_log_done(mp, tp->t_ticket, NULL, log_flags); 741 xfs_trans_unreserve_and_mod_sb(tp); 742 743 /* 744 * Once all the items of the transaction have been copied to the CIL, 745 * the items can be unlocked and freed. 746 * 747 * This needs to be done before we drop the CIL context lock because we 748 * have to update state in the log items and unlock them before they go 749 * to disk. If we don't, then the CIL checkpoint can race with us and 750 * we can run checkpoint completion before we've updated and unlocked 751 * the log items. This affects (at least) processing of stale buffers, 752 * inodes and EFIs. 753 */ 754 xfs_trans_free_items(tp, tp->t_commit_lsn, 0); 755 756 xlog_cil_push_background(log); 757 758 up_read(&cil->xc_ctx_lock); 759 return 0; 760 } 761 762 /* 763 * Conditionally push the CIL based on the sequence passed in. 764 * 765 * We only need to push if we haven't already pushed the sequence 766 * number given. Hence the only time we will trigger a push here is 767 * if the push sequence is the same as the current context. 768 * 769 * We return the current commit lsn to allow the callers to determine if a 770 * iclog flush is necessary following this call. 771 */ 772 xfs_lsn_t 773 xlog_cil_force_lsn( 774 struct xlog *log, 775 xfs_lsn_t sequence) 776 { 777 struct xfs_cil *cil = log->l_cilp; 778 struct xfs_cil_ctx *ctx; 779 xfs_lsn_t commit_lsn = NULLCOMMITLSN; 780 781 ASSERT(sequence <= cil->xc_current_sequence); 782 783 /* 784 * check to see if we need to force out the current context. 785 * xlog_cil_push() handles racing pushes for the same sequence, 786 * so no need to deal with it here. 787 */ 788 xlog_cil_push_foreground(log, sequence); 789 790 /* 791 * See if we can find a previous sequence still committing. 792 * We need to wait for all previous sequence commits to complete 793 * before allowing the force of push_seq to go ahead. Hence block 794 * on commits for those as well. 795 */ 796 restart: 797 spin_lock(&cil->xc_push_lock); 798 list_for_each_entry(ctx, &cil->xc_committing, committing) { 799 if (ctx->sequence > sequence) 800 continue; 801 if (!ctx->commit_lsn) { 802 /* 803 * It is still being pushed! Wait for the push to 804 * complete, then start again from the beginning. 805 */ 806 xlog_wait(&cil->xc_commit_wait, &cil->xc_push_lock); 807 goto restart; 808 } 809 if (ctx->sequence != sequence) 810 continue; 811 /* found it! */ 812 commit_lsn = ctx->commit_lsn; 813 } 814 spin_unlock(&cil->xc_push_lock); 815 return commit_lsn; 816 } 817 818 /* 819 * Check if the current log item was first committed in this sequence. 820 * We can't rely on just the log item being in the CIL, we have to check 821 * the recorded commit sequence number. 822 * 823 * Note: for this to be used in a non-racy manner, it has to be called with 824 * CIL flushing locked out. As a result, it should only be used during the 825 * transaction commit process when deciding what to format into the item. 826 */ 827 bool 828 xfs_log_item_in_current_chkpt( 829 struct xfs_log_item *lip) 830 { 831 struct xfs_cil_ctx *ctx; 832 833 if (list_empty(&lip->li_cil)) 834 return false; 835 836 ctx = lip->li_mountp->m_log->l_cilp->xc_ctx; 837 838 /* 839 * li_seq is written on the first commit of a log item to record the 840 * first checkpoint it is written to. Hence if it is different to the 841 * current sequence, we're in a new checkpoint. 842 */ 843 if (XFS_LSN_CMP(lip->li_seq, ctx->sequence) != 0) 844 return false; 845 return true; 846 } 847 848 /* 849 * Perform initial CIL structure initialisation. 850 */ 851 int 852 xlog_cil_init( 853 struct xlog *log) 854 { 855 struct xfs_cil *cil; 856 struct xfs_cil_ctx *ctx; 857 858 cil = kmem_zalloc(sizeof(*cil), KM_SLEEP|KM_MAYFAIL); 859 if (!cil) 860 return ENOMEM; 861 862 ctx = kmem_zalloc(sizeof(*ctx), KM_SLEEP|KM_MAYFAIL); 863 if (!ctx) { 864 kmem_free(cil); 865 return ENOMEM; 866 } 867 868 INIT_WORK(&cil->xc_push_work, xlog_cil_push_work); 869 INIT_LIST_HEAD(&cil->xc_cil); 870 INIT_LIST_HEAD(&cil->xc_committing); 871 spin_lock_init(&cil->xc_cil_lock); 872 spin_lock_init(&cil->xc_push_lock); 873 init_rwsem(&cil->xc_ctx_lock); 874 init_waitqueue_head(&cil->xc_commit_wait); 875 876 INIT_LIST_HEAD(&ctx->committing); 877 INIT_LIST_HEAD(&ctx->busy_extents); 878 ctx->sequence = 1; 879 ctx->cil = cil; 880 cil->xc_ctx = ctx; 881 cil->xc_current_sequence = ctx->sequence; 882 883 cil->xc_log = log; 884 log->l_cilp = cil; 885 return 0; 886 } 887 888 void 889 xlog_cil_destroy( 890 struct xlog *log) 891 { 892 if (log->l_cilp->xc_ctx) { 893 if (log->l_cilp->xc_ctx->ticket) 894 xfs_log_ticket_put(log->l_cilp->xc_ctx->ticket); 895 kmem_free(log->l_cilp->xc_ctx); 896 } 897 898 ASSERT(list_empty(&log->l_cilp->xc_cil)); 899 kmem_free(log->l_cilp); 900 } 901 902