1 /* 2 * Copyright (c) 2010 Red Hat, Inc. All Rights Reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License as 6 * published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it would be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * GNU General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public License 14 * along with this program; if not, write the Free Software Foundation, 15 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 16 */ 17 18 #include "xfs.h" 19 #include "xfs_fs.h" 20 #include "xfs_types.h" 21 #include "xfs_bit.h" 22 #include "xfs_log.h" 23 #include "xfs_inum.h" 24 #include "xfs_trans.h" 25 #include "xfs_trans_priv.h" 26 #include "xfs_log_priv.h" 27 #include "xfs_sb.h" 28 #include "xfs_ag.h" 29 #include "xfs_mount.h" 30 #include "xfs_error.h" 31 #include "xfs_alloc.h" 32 33 /* 34 * Perform initial CIL structure initialisation. If the CIL is not 35 * enabled in this filesystem, ensure the log->l_cilp is null so 36 * we can check this conditional to determine if we are doing delayed 37 * logging or not. 38 */ 39 int 40 xlog_cil_init( 41 struct log *log) 42 { 43 struct xfs_cil *cil; 44 struct xfs_cil_ctx *ctx; 45 46 log->l_cilp = NULL; 47 if (!(log->l_mp->m_flags & XFS_MOUNT_DELAYLOG)) 48 return 0; 49 50 cil = kmem_zalloc(sizeof(*cil), KM_SLEEP|KM_MAYFAIL); 51 if (!cil) 52 return ENOMEM; 53 54 ctx = kmem_zalloc(sizeof(*ctx), KM_SLEEP|KM_MAYFAIL); 55 if (!ctx) { 56 kmem_free(cil); 57 return ENOMEM; 58 } 59 60 INIT_LIST_HEAD(&cil->xc_cil); 61 INIT_LIST_HEAD(&cil->xc_committing); 62 spin_lock_init(&cil->xc_cil_lock); 63 init_rwsem(&cil->xc_ctx_lock); 64 sv_init(&cil->xc_commit_wait, SV_DEFAULT, "cilwait"); 65 66 INIT_LIST_HEAD(&ctx->committing); 67 INIT_LIST_HEAD(&ctx->busy_extents); 68 ctx->sequence = 1; 69 ctx->cil = cil; 70 cil->xc_ctx = ctx; 71 72 cil->xc_log = log; 73 log->l_cilp = cil; 74 return 0; 75 } 76 77 void 78 xlog_cil_destroy( 79 struct log *log) 80 { 81 if (!log->l_cilp) 82 return; 83 84 if (log->l_cilp->xc_ctx) { 85 if (log->l_cilp->xc_ctx->ticket) 86 xfs_log_ticket_put(log->l_cilp->xc_ctx->ticket); 87 kmem_free(log->l_cilp->xc_ctx); 88 } 89 90 ASSERT(list_empty(&log->l_cilp->xc_cil)); 91 kmem_free(log->l_cilp); 92 } 93 94 /* 95 * Allocate a new ticket. Failing to get a new ticket makes it really hard to 96 * recover, so we don't allow failure here. Also, we allocate in a context that 97 * we don't want to be issuing transactions from, so we need to tell the 98 * allocation code this as well. 99 * 100 * We don't reserve any space for the ticket - we are going to steal whatever 101 * space we require from transactions as they commit. To ensure we reserve all 102 * the space required, we need to set the current reservation of the ticket to 103 * zero so that we know to steal the initial transaction overhead from the 104 * first transaction commit. 105 */ 106 static struct xlog_ticket * 107 xlog_cil_ticket_alloc( 108 struct log *log) 109 { 110 struct xlog_ticket *tic; 111 112 tic = xlog_ticket_alloc(log, 0, 1, XFS_TRANSACTION, 0, 113 KM_SLEEP|KM_NOFS); 114 tic->t_trans_type = XFS_TRANS_CHECKPOINT; 115 116 /* 117 * set the current reservation to zero so we know to steal the basic 118 * transaction overhead reservation from the first transaction commit. 119 */ 120 tic->t_curr_res = 0; 121 return tic; 122 } 123 124 /* 125 * After the first stage of log recovery is done, we know where the head and 126 * tail of the log are. We need this log initialisation done before we can 127 * initialise the first CIL checkpoint context. 128 * 129 * Here we allocate a log ticket to track space usage during a CIL push. This 130 * ticket is passed to xlog_write() directly so that we don't slowly leak log 131 * space by failing to account for space used by log headers and additional 132 * region headers for split regions. 133 */ 134 void 135 xlog_cil_init_post_recovery( 136 struct log *log) 137 { 138 if (!log->l_cilp) 139 return; 140 141 log->l_cilp->xc_ctx->ticket = xlog_cil_ticket_alloc(log); 142 log->l_cilp->xc_ctx->sequence = 1; 143 log->l_cilp->xc_ctx->commit_lsn = xlog_assign_lsn(log->l_curr_cycle, 144 log->l_curr_block); 145 } 146 147 /* 148 * Insert the log item into the CIL and calculate the difference in space 149 * consumed by the item. Add the space to the checkpoint ticket and calculate 150 * if the change requires additional log metadata. If it does, take that space 151 * as well. Remove the amount of space we addded to the checkpoint ticket from 152 * the current transaction ticket so that the accounting works out correctly. 153 * 154 * If this is the first time the item is being placed into the CIL in this 155 * context, pin it so it can't be written to disk until the CIL is flushed to 156 * the iclog and the iclog written to disk. 157 */ 158 static void 159 xlog_cil_insert( 160 struct log *log, 161 struct xlog_ticket *ticket, 162 struct xfs_log_item *item, 163 struct xfs_log_vec *lv) 164 { 165 struct xfs_cil *cil = log->l_cilp; 166 struct xfs_log_vec *old = lv->lv_item->li_lv; 167 struct xfs_cil_ctx *ctx = cil->xc_ctx; 168 int len; 169 int diff_iovecs; 170 int iclog_space; 171 172 if (old) { 173 /* existing lv on log item, space used is a delta */ 174 ASSERT(!list_empty(&item->li_cil)); 175 ASSERT(old->lv_buf && old->lv_buf_len && old->lv_niovecs); 176 177 len = lv->lv_buf_len - old->lv_buf_len; 178 diff_iovecs = lv->lv_niovecs - old->lv_niovecs; 179 kmem_free(old->lv_buf); 180 kmem_free(old); 181 } else { 182 /* new lv, must pin the log item */ 183 ASSERT(!lv->lv_item->li_lv); 184 ASSERT(list_empty(&item->li_cil)); 185 186 len = lv->lv_buf_len; 187 diff_iovecs = lv->lv_niovecs; 188 IOP_PIN(lv->lv_item); 189 190 } 191 len += diff_iovecs * sizeof(xlog_op_header_t); 192 193 /* attach new log vector to log item */ 194 lv->lv_item->li_lv = lv; 195 196 spin_lock(&cil->xc_cil_lock); 197 list_move_tail(&item->li_cil, &cil->xc_cil); 198 ctx->nvecs += diff_iovecs; 199 200 /* 201 * If this is the first time the item is being committed to the CIL, 202 * store the sequence number on the log item so we can tell 203 * in future commits whether this is the first checkpoint the item is 204 * being committed into. 205 */ 206 if (!item->li_seq) 207 item->li_seq = ctx->sequence; 208 209 /* 210 * Now transfer enough transaction reservation to the context ticket 211 * for the checkpoint. The context ticket is special - the unit 212 * reservation has to grow as well as the current reservation as we 213 * steal from tickets so we can correctly determine the space used 214 * during the transaction commit. 215 */ 216 if (ctx->ticket->t_curr_res == 0) { 217 /* first commit in checkpoint, steal the header reservation */ 218 ASSERT(ticket->t_curr_res >= ctx->ticket->t_unit_res + len); 219 ctx->ticket->t_curr_res = ctx->ticket->t_unit_res; 220 ticket->t_curr_res -= ctx->ticket->t_unit_res; 221 } 222 223 /* do we need space for more log record headers? */ 224 iclog_space = log->l_iclog_size - log->l_iclog_hsize; 225 if (len > 0 && (ctx->space_used / iclog_space != 226 (ctx->space_used + len) / iclog_space)) { 227 int hdrs; 228 229 hdrs = (len + iclog_space - 1) / iclog_space; 230 /* need to take into account split region headers, too */ 231 hdrs *= log->l_iclog_hsize + sizeof(struct xlog_op_header); 232 ctx->ticket->t_unit_res += hdrs; 233 ctx->ticket->t_curr_res += hdrs; 234 ticket->t_curr_res -= hdrs; 235 ASSERT(ticket->t_curr_res >= len); 236 } 237 ticket->t_curr_res -= len; 238 ctx->space_used += len; 239 240 spin_unlock(&cil->xc_cil_lock); 241 } 242 243 /* 244 * Format log item into a flat buffers 245 * 246 * For delayed logging, we need to hold a formatted buffer containing all the 247 * changes on the log item. This enables us to relog the item in memory and 248 * write it out asynchronously without needing to relock the object that was 249 * modified at the time it gets written into the iclog. 250 * 251 * This function builds a vector for the changes in each log item in the 252 * transaction. It then works out the length of the buffer needed for each log 253 * item, allocates them and formats the vector for the item into the buffer. 254 * The buffer is then attached to the log item are then inserted into the 255 * Committed Item List for tracking until the next checkpoint is written out. 256 * 257 * We don't set up region headers during this process; we simply copy the 258 * regions into the flat buffer. We can do this because we still have to do a 259 * formatting step to write the regions into the iclog buffer. Writing the 260 * ophdrs during the iclog write means that we can support splitting large 261 * regions across iclog boundares without needing a change in the format of the 262 * item/region encapsulation. 263 * 264 * Hence what we need to do now is change the rewrite the vector array to point 265 * to the copied region inside the buffer we just allocated. This allows us to 266 * format the regions into the iclog as though they are being formatted 267 * directly out of the objects themselves. 268 */ 269 static void 270 xlog_cil_format_items( 271 struct log *log, 272 struct xfs_log_vec *log_vector, 273 struct xlog_ticket *ticket, 274 xfs_lsn_t *start_lsn) 275 { 276 struct xfs_log_vec *lv; 277 278 if (start_lsn) 279 *start_lsn = log->l_cilp->xc_ctx->sequence; 280 281 ASSERT(log_vector); 282 for (lv = log_vector; lv; lv = lv->lv_next) { 283 void *ptr; 284 int index; 285 int len = 0; 286 287 /* build the vector array and calculate it's length */ 288 IOP_FORMAT(lv->lv_item, lv->lv_iovecp); 289 for (index = 0; index < lv->lv_niovecs; index++) 290 len += lv->lv_iovecp[index].i_len; 291 292 lv->lv_buf_len = len; 293 lv->lv_buf = kmem_zalloc(lv->lv_buf_len, KM_SLEEP|KM_NOFS); 294 ptr = lv->lv_buf; 295 296 for (index = 0; index < lv->lv_niovecs; index++) { 297 struct xfs_log_iovec *vec = &lv->lv_iovecp[index]; 298 299 memcpy(ptr, vec->i_addr, vec->i_len); 300 vec->i_addr = ptr; 301 ptr += vec->i_len; 302 } 303 ASSERT(ptr == lv->lv_buf + lv->lv_buf_len); 304 305 xlog_cil_insert(log, ticket, lv->lv_item, lv); 306 } 307 } 308 309 static void 310 xlog_cil_free_logvec( 311 struct xfs_log_vec *log_vector) 312 { 313 struct xfs_log_vec *lv; 314 315 for (lv = log_vector; lv; ) { 316 struct xfs_log_vec *next = lv->lv_next; 317 kmem_free(lv->lv_buf); 318 kmem_free(lv); 319 lv = next; 320 } 321 } 322 323 /* 324 * Commit a transaction with the given vector to the Committed Item List. 325 * 326 * To do this, we need to format the item, pin it in memory if required and 327 * account for the space used by the transaction. Once we have done that we 328 * need to release the unused reservation for the transaction, attach the 329 * transaction to the checkpoint context so we carry the busy extents through 330 * to checkpoint completion, and then unlock all the items in the transaction. 331 * 332 * For more specific information about the order of operations in 333 * xfs_log_commit_cil() please refer to the comments in 334 * xfs_trans_commit_iclog(). 335 * 336 * Called with the context lock already held in read mode to lock out 337 * background commit, returns without it held once background commits are 338 * allowed again. 339 */ 340 int 341 xfs_log_commit_cil( 342 struct xfs_mount *mp, 343 struct xfs_trans *tp, 344 struct xfs_log_vec *log_vector, 345 xfs_lsn_t *commit_lsn, 346 int flags) 347 { 348 struct log *log = mp->m_log; 349 int log_flags = 0; 350 int push = 0; 351 352 if (flags & XFS_TRANS_RELEASE_LOG_RES) 353 log_flags = XFS_LOG_REL_PERM_RESERV; 354 355 if (XLOG_FORCED_SHUTDOWN(log)) { 356 xlog_cil_free_logvec(log_vector); 357 return XFS_ERROR(EIO); 358 } 359 360 /* lock out background commit */ 361 down_read(&log->l_cilp->xc_ctx_lock); 362 xlog_cil_format_items(log, log_vector, tp->t_ticket, commit_lsn); 363 364 /* check we didn't blow the reservation */ 365 if (tp->t_ticket->t_curr_res < 0) 366 xlog_print_tic_res(log->l_mp, tp->t_ticket); 367 368 /* attach the transaction to the CIL if it has any busy extents */ 369 if (!list_empty(&tp->t_busy)) { 370 spin_lock(&log->l_cilp->xc_cil_lock); 371 list_splice_init(&tp->t_busy, 372 &log->l_cilp->xc_ctx->busy_extents); 373 spin_unlock(&log->l_cilp->xc_cil_lock); 374 } 375 376 tp->t_commit_lsn = *commit_lsn; 377 xfs_log_done(mp, tp->t_ticket, NULL, log_flags); 378 xfs_trans_unreserve_and_mod_sb(tp); 379 380 /* check for background commit before unlock */ 381 if (log->l_cilp->xc_ctx->space_used > XLOG_CIL_SPACE_LIMIT(log)) 382 push = 1; 383 up_read(&log->l_cilp->xc_ctx_lock); 384 385 /* 386 * We need to push CIL every so often so we don't cache more than we 387 * can fit in the log. The limit really is that a checkpoint can't be 388 * more than half the log (the current checkpoint is not allowed to 389 * overwrite the previous checkpoint), but commit latency and memory 390 * usage limit this to a smaller size in most cases. 391 */ 392 if (push) 393 xlog_cil_push(log, 0); 394 return 0; 395 } 396 397 /* 398 * Mark all items committed and clear busy extents. We free the log vector 399 * chains in a separate pass so that we unpin the log items as quickly as 400 * possible. 401 */ 402 static void 403 xlog_cil_committed( 404 void *args, 405 int abort) 406 { 407 struct xfs_cil_ctx *ctx = args; 408 struct xfs_log_vec *lv; 409 int abortflag = abort ? XFS_LI_ABORTED : 0; 410 struct xfs_busy_extent *busyp, *n; 411 412 /* unpin all the log items */ 413 for (lv = ctx->lv_chain; lv; lv = lv->lv_next ) { 414 xfs_trans_item_committed(lv->lv_item, ctx->start_lsn, 415 abortflag); 416 } 417 418 list_for_each_entry_safe(busyp, n, &ctx->busy_extents, list) 419 xfs_alloc_busy_clear(ctx->cil->xc_log->l_mp, busyp); 420 421 spin_lock(&ctx->cil->xc_cil_lock); 422 list_del(&ctx->committing); 423 spin_unlock(&ctx->cil->xc_cil_lock); 424 425 xlog_cil_free_logvec(ctx->lv_chain); 426 kmem_free(ctx); 427 } 428 429 /* 430 * Push the Committed Item List to the log. If the push_now flag is not set, 431 * then it is a background flush and so we can chose to ignore it. 432 */ 433 int 434 xlog_cil_push( 435 struct log *log, 436 int push_now) 437 { 438 struct xfs_cil *cil = log->l_cilp; 439 struct xfs_log_vec *lv; 440 struct xfs_cil_ctx *ctx; 441 struct xfs_cil_ctx *new_ctx; 442 struct xlog_in_core *commit_iclog; 443 struct xlog_ticket *tic; 444 int num_lv; 445 int num_iovecs; 446 int len; 447 int error = 0; 448 struct xfs_trans_header thdr; 449 struct xfs_log_iovec lhdr; 450 struct xfs_log_vec lvhdr = { NULL }; 451 xfs_lsn_t commit_lsn; 452 453 if (!cil) 454 return 0; 455 456 new_ctx = kmem_zalloc(sizeof(*new_ctx), KM_SLEEP|KM_NOFS); 457 new_ctx->ticket = xlog_cil_ticket_alloc(log); 458 459 /* lock out transaction commit, but don't block on background push */ 460 if (!down_write_trylock(&cil->xc_ctx_lock)) { 461 if (!push_now) 462 goto out_free_ticket; 463 down_write(&cil->xc_ctx_lock); 464 } 465 ctx = cil->xc_ctx; 466 467 /* check if we've anything to push */ 468 if (list_empty(&cil->xc_cil)) 469 goto out_skip; 470 471 /* check for spurious background flush */ 472 if (!push_now && cil->xc_ctx->space_used < XLOG_CIL_SPACE_LIMIT(log)) 473 goto out_skip; 474 475 /* 476 * pull all the log vectors off the items in the CIL, and 477 * remove the items from the CIL. We don't need the CIL lock 478 * here because it's only needed on the transaction commit 479 * side which is currently locked out by the flush lock. 480 */ 481 lv = NULL; 482 num_lv = 0; 483 num_iovecs = 0; 484 len = 0; 485 while (!list_empty(&cil->xc_cil)) { 486 struct xfs_log_item *item; 487 int i; 488 489 item = list_first_entry(&cil->xc_cil, 490 struct xfs_log_item, li_cil); 491 list_del_init(&item->li_cil); 492 if (!ctx->lv_chain) 493 ctx->lv_chain = item->li_lv; 494 else 495 lv->lv_next = item->li_lv; 496 lv = item->li_lv; 497 item->li_lv = NULL; 498 499 num_lv++; 500 num_iovecs += lv->lv_niovecs; 501 for (i = 0; i < lv->lv_niovecs; i++) 502 len += lv->lv_iovecp[i].i_len; 503 } 504 505 /* 506 * initialise the new context and attach it to the CIL. Then attach 507 * the current context to the CIL committing lsit so it can be found 508 * during log forces to extract the commit lsn of the sequence that 509 * needs to be forced. 510 */ 511 INIT_LIST_HEAD(&new_ctx->committing); 512 INIT_LIST_HEAD(&new_ctx->busy_extents); 513 new_ctx->sequence = ctx->sequence + 1; 514 new_ctx->cil = cil; 515 cil->xc_ctx = new_ctx; 516 517 /* 518 * The switch is now done, so we can drop the context lock and move out 519 * of a shared context. We can't just go straight to the commit record, 520 * though - we need to synchronise with previous and future commits so 521 * that the commit records are correctly ordered in the log to ensure 522 * that we process items during log IO completion in the correct order. 523 * 524 * For example, if we get an EFI in one checkpoint and the EFD in the 525 * next (e.g. due to log forces), we do not want the checkpoint with 526 * the EFD to be committed before the checkpoint with the EFI. Hence 527 * we must strictly order the commit records of the checkpoints so 528 * that: a) the checkpoint callbacks are attached to the iclogs in the 529 * correct order; and b) the checkpoints are replayed in correct order 530 * in log recovery. 531 * 532 * Hence we need to add this context to the committing context list so 533 * that higher sequences will wait for us to write out a commit record 534 * before they do. 535 */ 536 spin_lock(&cil->xc_cil_lock); 537 list_add(&ctx->committing, &cil->xc_committing); 538 spin_unlock(&cil->xc_cil_lock); 539 up_write(&cil->xc_ctx_lock); 540 541 /* 542 * Build a checkpoint transaction header and write it to the log to 543 * begin the transaction. We need to account for the space used by the 544 * transaction header here as it is not accounted for in xlog_write(). 545 * 546 * The LSN we need to pass to the log items on transaction commit is 547 * the LSN reported by the first log vector write. If we use the commit 548 * record lsn then we can move the tail beyond the grant write head. 549 */ 550 tic = ctx->ticket; 551 thdr.th_magic = XFS_TRANS_HEADER_MAGIC; 552 thdr.th_type = XFS_TRANS_CHECKPOINT; 553 thdr.th_tid = tic->t_tid; 554 thdr.th_num_items = num_iovecs; 555 lhdr.i_addr = &thdr; 556 lhdr.i_len = sizeof(xfs_trans_header_t); 557 lhdr.i_type = XLOG_REG_TYPE_TRANSHDR; 558 tic->t_curr_res -= lhdr.i_len + sizeof(xlog_op_header_t); 559 560 lvhdr.lv_niovecs = 1; 561 lvhdr.lv_iovecp = &lhdr; 562 lvhdr.lv_next = ctx->lv_chain; 563 564 error = xlog_write(log, &lvhdr, tic, &ctx->start_lsn, NULL, 0); 565 if (error) 566 goto out_abort; 567 568 /* 569 * now that we've written the checkpoint into the log, strictly 570 * order the commit records so replay will get them in the right order. 571 */ 572 restart: 573 spin_lock(&cil->xc_cil_lock); 574 list_for_each_entry(new_ctx, &cil->xc_committing, committing) { 575 /* 576 * Higher sequences will wait for this one so skip them. 577 * Don't wait for own own sequence, either. 578 */ 579 if (new_ctx->sequence >= ctx->sequence) 580 continue; 581 if (!new_ctx->commit_lsn) { 582 /* 583 * It is still being pushed! Wait for the push to 584 * complete, then start again from the beginning. 585 */ 586 sv_wait(&cil->xc_commit_wait, 0, &cil->xc_cil_lock, 0); 587 goto restart; 588 } 589 } 590 spin_unlock(&cil->xc_cil_lock); 591 592 commit_lsn = xfs_log_done(log->l_mp, tic, &commit_iclog, 0); 593 if (error || commit_lsn == -1) 594 goto out_abort; 595 596 /* attach all the transactions w/ busy extents to iclog */ 597 ctx->log_cb.cb_func = xlog_cil_committed; 598 ctx->log_cb.cb_arg = ctx; 599 error = xfs_log_notify(log->l_mp, commit_iclog, &ctx->log_cb); 600 if (error) 601 goto out_abort; 602 603 /* 604 * now the checkpoint commit is complete and we've attached the 605 * callbacks to the iclog we can assign the commit LSN to the context 606 * and wake up anyone who is waiting for the commit to complete. 607 */ 608 spin_lock(&cil->xc_cil_lock); 609 ctx->commit_lsn = commit_lsn; 610 sv_broadcast(&cil->xc_commit_wait); 611 spin_unlock(&cil->xc_cil_lock); 612 613 /* release the hounds! */ 614 return xfs_log_release_iclog(log->l_mp, commit_iclog); 615 616 out_skip: 617 up_write(&cil->xc_ctx_lock); 618 out_free_ticket: 619 xfs_log_ticket_put(new_ctx->ticket); 620 kmem_free(new_ctx); 621 return 0; 622 623 out_abort: 624 xlog_cil_committed(ctx, XFS_LI_ABORTED); 625 return XFS_ERROR(EIO); 626 } 627 628 /* 629 * Conditionally push the CIL based on the sequence passed in. 630 * 631 * We only need to push if we haven't already pushed the sequence 632 * number given. Hence the only time we will trigger a push here is 633 * if the push sequence is the same as the current context. 634 * 635 * We return the current commit lsn to allow the callers to determine if a 636 * iclog flush is necessary following this call. 637 * 638 * XXX: Initially, just push the CIL unconditionally and return whatever 639 * commit lsn is there. It'll be empty, so this is broken for now. 640 */ 641 xfs_lsn_t 642 xlog_cil_push_lsn( 643 struct log *log, 644 xfs_lsn_t push_seq) 645 { 646 struct xfs_cil *cil = log->l_cilp; 647 struct xfs_cil_ctx *ctx; 648 xfs_lsn_t commit_lsn = NULLCOMMITLSN; 649 650 restart: 651 down_write(&cil->xc_ctx_lock); 652 ASSERT(push_seq <= cil->xc_ctx->sequence); 653 654 /* check to see if we need to force out the current context */ 655 if (push_seq == cil->xc_ctx->sequence) { 656 up_write(&cil->xc_ctx_lock); 657 xlog_cil_push(log, 1); 658 goto restart; 659 } 660 661 /* 662 * See if we can find a previous sequence still committing. 663 * We can drop the flush lock as soon as we have the cil lock 664 * because we are now only comparing contexts protected by 665 * the cil lock. 666 * 667 * We need to wait for all previous sequence commits to complete 668 * before allowing the force of push_seq to go ahead. Hence block 669 * on commits for those as well. 670 */ 671 spin_lock(&cil->xc_cil_lock); 672 up_write(&cil->xc_ctx_lock); 673 list_for_each_entry(ctx, &cil->xc_committing, committing) { 674 if (ctx->sequence > push_seq) 675 continue; 676 if (!ctx->commit_lsn) { 677 /* 678 * It is still being pushed! Wait for the push to 679 * complete, then start again from the beginning. 680 */ 681 sv_wait(&cil->xc_commit_wait, 0, &cil->xc_cil_lock, 0); 682 goto restart; 683 } 684 if (ctx->sequence != push_seq) 685 continue; 686 /* found it! */ 687 commit_lsn = ctx->commit_lsn; 688 } 689 spin_unlock(&cil->xc_cil_lock); 690 return commit_lsn; 691 } 692 693 /* 694 * Check if the current log item was first committed in this sequence. 695 * We can't rely on just the log item being in the CIL, we have to check 696 * the recorded commit sequence number. 697 * 698 * Note: for this to be used in a non-racy manner, it has to be called with 699 * CIL flushing locked out. As a result, it should only be used during the 700 * transaction commit process when deciding what to format into the item. 701 */ 702 bool 703 xfs_log_item_in_current_chkpt( 704 struct xfs_log_item *lip) 705 { 706 struct xfs_cil_ctx *ctx; 707 708 if (!(lip->li_mountp->m_flags & XFS_MOUNT_DELAYLOG)) 709 return false; 710 if (list_empty(&lip->li_cil)) 711 return false; 712 713 ctx = lip->li_mountp->m_log->l_cilp->xc_ctx; 714 715 /* 716 * li_seq is written on the first commit of a log item to record the 717 * first checkpoint it is written to. Hence if it is different to the 718 * current sequence, we're in a new checkpoint. 719 */ 720 if (XFS_LSN_CMP(lip->li_seq, ctx->sequence) != 0) 721 return false; 722 return true; 723 } 724