1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_log_format.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_bit.h" 13 #include "xfs_mount.h" 14 #include "xfs_trans.h" 15 #include "xfs_trans_priv.h" 16 #include "xfs_buf_item.h" 17 #include "xfs_inode.h" 18 #include "xfs_inode_item.h" 19 #include "xfs_quota.h" 20 #include "xfs_dquot_item.h" 21 #include "xfs_dquot.h" 22 #include "xfs_trace.h" 23 #include "xfs_log.h" 24 25 26 kmem_zone_t *xfs_buf_item_zone; 27 28 static inline struct xfs_buf_log_item *BUF_ITEM(struct xfs_log_item *lip) 29 { 30 return container_of(lip, struct xfs_buf_log_item, bli_item); 31 } 32 33 /* Is this log iovec plausibly large enough to contain the buffer log format? */ 34 bool 35 xfs_buf_log_check_iovec( 36 struct xfs_log_iovec *iovec) 37 { 38 struct xfs_buf_log_format *blfp = iovec->i_addr; 39 char *bmp_end; 40 char *item_end; 41 42 if (offsetof(struct xfs_buf_log_format, blf_data_map) > iovec->i_len) 43 return false; 44 45 item_end = (char *)iovec->i_addr + iovec->i_len; 46 bmp_end = (char *)&blfp->blf_data_map[blfp->blf_map_size]; 47 return bmp_end <= item_end; 48 } 49 50 static inline int 51 xfs_buf_log_format_size( 52 struct xfs_buf_log_format *blfp) 53 { 54 return offsetof(struct xfs_buf_log_format, blf_data_map) + 55 (blfp->blf_map_size * sizeof(blfp->blf_data_map[0])); 56 } 57 58 /* 59 * This returns the number of log iovecs needed to log the 60 * given buf log item. 61 * 62 * It calculates this as 1 iovec for the buf log format structure 63 * and 1 for each stretch of non-contiguous chunks to be logged. 64 * Contiguous chunks are logged in a single iovec. 65 * 66 * If the XFS_BLI_STALE flag has been set, then log nothing. 67 */ 68 STATIC void 69 xfs_buf_item_size_segment( 70 struct xfs_buf_log_item *bip, 71 struct xfs_buf_log_format *blfp, 72 int *nvecs, 73 int *nbytes) 74 { 75 struct xfs_buf *bp = bip->bli_buf; 76 int next_bit; 77 int last_bit; 78 79 last_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0); 80 if (last_bit == -1) 81 return; 82 83 /* 84 * initial count for a dirty buffer is 2 vectors - the format structure 85 * and the first dirty region. 86 */ 87 *nvecs += 2; 88 *nbytes += xfs_buf_log_format_size(blfp) + XFS_BLF_CHUNK; 89 90 while (last_bit != -1) { 91 /* 92 * This takes the bit number to start looking from and 93 * returns the next set bit from there. It returns -1 94 * if there are no more bits set or the start bit is 95 * beyond the end of the bitmap. 96 */ 97 next_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 98 last_bit + 1); 99 /* 100 * If we run out of bits, leave the loop, 101 * else if we find a new set of bits bump the number of vecs, 102 * else keep scanning the current set of bits. 103 */ 104 if (next_bit == -1) { 105 break; 106 } else if (next_bit != last_bit + 1) { 107 last_bit = next_bit; 108 (*nvecs)++; 109 } else if (xfs_buf_offset(bp, next_bit * XFS_BLF_CHUNK) != 110 (xfs_buf_offset(bp, last_bit * XFS_BLF_CHUNK) + 111 XFS_BLF_CHUNK)) { 112 last_bit = next_bit; 113 (*nvecs)++; 114 } else { 115 last_bit++; 116 } 117 *nbytes += XFS_BLF_CHUNK; 118 } 119 } 120 121 /* 122 * This returns the number of log iovecs needed to log the given buf log item. 123 * 124 * It calculates this as 1 iovec for the buf log format structure and 1 for each 125 * stretch of non-contiguous chunks to be logged. Contiguous chunks are logged 126 * in a single iovec. 127 * 128 * Discontiguous buffers need a format structure per region that is being 129 * logged. This makes the changes in the buffer appear to log recovery as though 130 * they came from separate buffers, just like would occur if multiple buffers 131 * were used instead of a single discontiguous buffer. This enables 132 * discontiguous buffers to be in-memory constructs, completely transparent to 133 * what ends up on disk. 134 * 135 * If the XFS_BLI_STALE flag has been set, then log nothing but the buf log 136 * format structures. 137 */ 138 STATIC void 139 xfs_buf_item_size( 140 struct xfs_log_item *lip, 141 int *nvecs, 142 int *nbytes) 143 { 144 struct xfs_buf_log_item *bip = BUF_ITEM(lip); 145 int i; 146 147 ASSERT(atomic_read(&bip->bli_refcount) > 0); 148 if (bip->bli_flags & XFS_BLI_STALE) { 149 /* 150 * The buffer is stale, so all we need to log 151 * is the buf log format structure with the 152 * cancel flag in it. 153 */ 154 trace_xfs_buf_item_size_stale(bip); 155 ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL); 156 *nvecs += bip->bli_format_count; 157 for (i = 0; i < bip->bli_format_count; i++) { 158 *nbytes += xfs_buf_log_format_size(&bip->bli_formats[i]); 159 } 160 return; 161 } 162 163 ASSERT(bip->bli_flags & XFS_BLI_LOGGED); 164 165 if (bip->bli_flags & XFS_BLI_ORDERED) { 166 /* 167 * The buffer has been logged just to order it. 168 * It is not being included in the transaction 169 * commit, so no vectors are used at all. 170 */ 171 trace_xfs_buf_item_size_ordered(bip); 172 *nvecs = XFS_LOG_VEC_ORDERED; 173 return; 174 } 175 176 /* 177 * the vector count is based on the number of buffer vectors we have 178 * dirty bits in. This will only be greater than one when we have a 179 * compound buffer with more than one segment dirty. Hence for compound 180 * buffers we need to track which segment the dirty bits correspond to, 181 * and when we move from one segment to the next increment the vector 182 * count for the extra buf log format structure that will need to be 183 * written. 184 */ 185 for (i = 0; i < bip->bli_format_count; i++) { 186 xfs_buf_item_size_segment(bip, &bip->bli_formats[i], 187 nvecs, nbytes); 188 } 189 trace_xfs_buf_item_size(bip); 190 } 191 192 static inline void 193 xfs_buf_item_copy_iovec( 194 struct xfs_log_vec *lv, 195 struct xfs_log_iovec **vecp, 196 struct xfs_buf *bp, 197 uint offset, 198 int first_bit, 199 uint nbits) 200 { 201 offset += first_bit * XFS_BLF_CHUNK; 202 xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_BCHUNK, 203 xfs_buf_offset(bp, offset), 204 nbits * XFS_BLF_CHUNK); 205 } 206 207 static inline bool 208 xfs_buf_item_straddle( 209 struct xfs_buf *bp, 210 uint offset, 211 int next_bit, 212 int last_bit) 213 { 214 return xfs_buf_offset(bp, offset + (next_bit << XFS_BLF_SHIFT)) != 215 (xfs_buf_offset(bp, offset + (last_bit << XFS_BLF_SHIFT)) + 216 XFS_BLF_CHUNK); 217 } 218 219 static void 220 xfs_buf_item_format_segment( 221 struct xfs_buf_log_item *bip, 222 struct xfs_log_vec *lv, 223 struct xfs_log_iovec **vecp, 224 uint offset, 225 struct xfs_buf_log_format *blfp) 226 { 227 struct xfs_buf *bp = bip->bli_buf; 228 uint base_size; 229 int first_bit; 230 int last_bit; 231 int next_bit; 232 uint nbits; 233 234 /* copy the flags across from the base format item */ 235 blfp->blf_flags = bip->__bli_format.blf_flags; 236 237 /* 238 * Base size is the actual size of the ondisk structure - it reflects 239 * the actual size of the dirty bitmap rather than the size of the in 240 * memory structure. 241 */ 242 base_size = xfs_buf_log_format_size(blfp); 243 244 first_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0); 245 if (!(bip->bli_flags & XFS_BLI_STALE) && first_bit == -1) { 246 /* 247 * If the map is not be dirty in the transaction, mark 248 * the size as zero and do not advance the vector pointer. 249 */ 250 return; 251 } 252 253 blfp = xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_BFORMAT, blfp, base_size); 254 blfp->blf_size = 1; 255 256 if (bip->bli_flags & XFS_BLI_STALE) { 257 /* 258 * The buffer is stale, so all we need to log 259 * is the buf log format structure with the 260 * cancel flag in it. 261 */ 262 trace_xfs_buf_item_format_stale(bip); 263 ASSERT(blfp->blf_flags & XFS_BLF_CANCEL); 264 return; 265 } 266 267 268 /* 269 * Fill in an iovec for each set of contiguous chunks. 270 */ 271 last_bit = first_bit; 272 nbits = 1; 273 for (;;) { 274 /* 275 * This takes the bit number to start looking from and 276 * returns the next set bit from there. It returns -1 277 * if there are no more bits set or the start bit is 278 * beyond the end of the bitmap. 279 */ 280 next_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 281 (uint)last_bit + 1); 282 /* 283 * If we run out of bits fill in the last iovec and get out of 284 * the loop. Else if we start a new set of bits then fill in 285 * the iovec for the series we were looking at and start 286 * counting the bits in the new one. Else we're still in the 287 * same set of bits so just keep counting and scanning. 288 */ 289 if (next_bit == -1) { 290 xfs_buf_item_copy_iovec(lv, vecp, bp, offset, 291 first_bit, nbits); 292 blfp->blf_size++; 293 break; 294 } else if (next_bit != last_bit + 1 || 295 xfs_buf_item_straddle(bp, offset, next_bit, last_bit)) { 296 xfs_buf_item_copy_iovec(lv, vecp, bp, offset, 297 first_bit, nbits); 298 blfp->blf_size++; 299 first_bit = next_bit; 300 last_bit = next_bit; 301 nbits = 1; 302 } else { 303 last_bit++; 304 nbits++; 305 } 306 } 307 } 308 309 /* 310 * This is called to fill in the vector of log iovecs for the 311 * given log buf item. It fills the first entry with a buf log 312 * format structure, and the rest point to contiguous chunks 313 * within the buffer. 314 */ 315 STATIC void 316 xfs_buf_item_format( 317 struct xfs_log_item *lip, 318 struct xfs_log_vec *lv) 319 { 320 struct xfs_buf_log_item *bip = BUF_ITEM(lip); 321 struct xfs_buf *bp = bip->bli_buf; 322 struct xfs_log_iovec *vecp = NULL; 323 uint offset = 0; 324 int i; 325 326 ASSERT(atomic_read(&bip->bli_refcount) > 0); 327 ASSERT((bip->bli_flags & XFS_BLI_LOGGED) || 328 (bip->bli_flags & XFS_BLI_STALE)); 329 ASSERT((bip->bli_flags & XFS_BLI_STALE) || 330 (xfs_blft_from_flags(&bip->__bli_format) > XFS_BLFT_UNKNOWN_BUF 331 && xfs_blft_from_flags(&bip->__bli_format) < XFS_BLFT_MAX_BUF)); 332 ASSERT(!(bip->bli_flags & XFS_BLI_ORDERED) || 333 (bip->bli_flags & XFS_BLI_STALE)); 334 335 336 /* 337 * If it is an inode buffer, transfer the in-memory state to the 338 * format flags and clear the in-memory state. 339 * 340 * For buffer based inode allocation, we do not transfer 341 * this state if the inode buffer allocation has not yet been committed 342 * to the log as setting the XFS_BLI_INODE_BUF flag will prevent 343 * correct replay of the inode allocation. 344 * 345 * For icreate item based inode allocation, the buffers aren't written 346 * to the journal during allocation, and hence we should always tag the 347 * buffer as an inode buffer so that the correct unlinked list replay 348 * occurs during recovery. 349 */ 350 if (bip->bli_flags & XFS_BLI_INODE_BUF) { 351 if (xfs_sb_version_has_v3inode(&lip->li_mountp->m_sb) || 352 !((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) && 353 xfs_log_item_in_current_chkpt(lip))) 354 bip->__bli_format.blf_flags |= XFS_BLF_INODE_BUF; 355 bip->bli_flags &= ~XFS_BLI_INODE_BUF; 356 } 357 358 for (i = 0; i < bip->bli_format_count; i++) { 359 xfs_buf_item_format_segment(bip, lv, &vecp, offset, 360 &bip->bli_formats[i]); 361 offset += BBTOB(bp->b_maps[i].bm_len); 362 } 363 364 /* 365 * Check to make sure everything is consistent. 366 */ 367 trace_xfs_buf_item_format(bip); 368 } 369 370 /* 371 * This is called to pin the buffer associated with the buf log item in memory 372 * so it cannot be written out. 373 * 374 * We also always take a reference to the buffer log item here so that the bli 375 * is held while the item is pinned in memory. This means that we can 376 * unconditionally drop the reference count a transaction holds when the 377 * transaction is completed. 378 */ 379 STATIC void 380 xfs_buf_item_pin( 381 struct xfs_log_item *lip) 382 { 383 struct xfs_buf_log_item *bip = BUF_ITEM(lip); 384 385 ASSERT(atomic_read(&bip->bli_refcount) > 0); 386 ASSERT((bip->bli_flags & XFS_BLI_LOGGED) || 387 (bip->bli_flags & XFS_BLI_ORDERED) || 388 (bip->bli_flags & XFS_BLI_STALE)); 389 390 trace_xfs_buf_item_pin(bip); 391 392 atomic_inc(&bip->bli_refcount); 393 atomic_inc(&bip->bli_buf->b_pin_count); 394 } 395 396 /* 397 * This is called to unpin the buffer associated with the buf log 398 * item which was previously pinned with a call to xfs_buf_item_pin(). 399 * 400 * Also drop the reference to the buf item for the current transaction. 401 * If the XFS_BLI_STALE flag is set and we are the last reference, 402 * then free up the buf log item and unlock the buffer. 403 * 404 * If the remove flag is set we are called from uncommit in the 405 * forced-shutdown path. If that is true and the reference count on 406 * the log item is going to drop to zero we need to free the item's 407 * descriptor in the transaction. 408 */ 409 STATIC void 410 xfs_buf_item_unpin( 411 struct xfs_log_item *lip, 412 int remove) 413 { 414 struct xfs_buf_log_item *bip = BUF_ITEM(lip); 415 struct xfs_buf *bp = bip->bli_buf; 416 int stale = bip->bli_flags & XFS_BLI_STALE; 417 int freed; 418 419 ASSERT(bp->b_log_item == bip); 420 ASSERT(atomic_read(&bip->bli_refcount) > 0); 421 422 trace_xfs_buf_item_unpin(bip); 423 424 freed = atomic_dec_and_test(&bip->bli_refcount); 425 426 if (atomic_dec_and_test(&bp->b_pin_count)) 427 wake_up_all(&bp->b_waiters); 428 429 if (freed && stale) { 430 ASSERT(bip->bli_flags & XFS_BLI_STALE); 431 ASSERT(xfs_buf_islocked(bp)); 432 ASSERT(bp->b_flags & XBF_STALE); 433 ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL); 434 435 trace_xfs_buf_item_unpin_stale(bip); 436 437 if (remove) { 438 /* 439 * If we are in a transaction context, we have to 440 * remove the log item from the transaction as we are 441 * about to release our reference to the buffer. If we 442 * don't, the unlock that occurs later in 443 * xfs_trans_uncommit() will try to reference the 444 * buffer which we no longer have a hold on. 445 */ 446 if (!list_empty(&lip->li_trans)) 447 xfs_trans_del_item(lip); 448 449 /* 450 * Since the transaction no longer refers to the buffer, 451 * the buffer should no longer refer to the transaction. 452 */ 453 bp->b_transp = NULL; 454 } 455 456 /* 457 * If we get called here because of an IO error, we may or may 458 * not have the item on the AIL. xfs_trans_ail_delete() will 459 * take care of that situation. xfs_trans_ail_delete() drops 460 * the AIL lock. 461 */ 462 if (bip->bli_flags & XFS_BLI_STALE_INODE) { 463 xfs_buf_item_done(bp); 464 xfs_buf_inode_iodone(bp); 465 ASSERT(list_empty(&bp->b_li_list)); 466 } else { 467 xfs_trans_ail_delete(lip, SHUTDOWN_LOG_IO_ERROR); 468 xfs_buf_item_relse(bp); 469 ASSERT(bp->b_log_item == NULL); 470 } 471 xfs_buf_relse(bp); 472 } else if (freed && remove) { 473 /* 474 * The buffer must be locked and held by the caller to simulate 475 * an async I/O failure. 476 */ 477 xfs_buf_lock(bp); 478 xfs_buf_hold(bp); 479 bp->b_flags |= XBF_ASYNC; 480 xfs_buf_ioend_fail(bp); 481 } 482 } 483 484 STATIC uint 485 xfs_buf_item_push( 486 struct xfs_log_item *lip, 487 struct list_head *buffer_list) 488 { 489 struct xfs_buf_log_item *bip = BUF_ITEM(lip); 490 struct xfs_buf *bp = bip->bli_buf; 491 uint rval = XFS_ITEM_SUCCESS; 492 493 if (xfs_buf_ispinned(bp)) 494 return XFS_ITEM_PINNED; 495 if (!xfs_buf_trylock(bp)) { 496 /* 497 * If we have just raced with a buffer being pinned and it has 498 * been marked stale, we could end up stalling until someone else 499 * issues a log force to unpin the stale buffer. Check for the 500 * race condition here so xfsaild recognizes the buffer is pinned 501 * and queues a log force to move it along. 502 */ 503 if (xfs_buf_ispinned(bp)) 504 return XFS_ITEM_PINNED; 505 return XFS_ITEM_LOCKED; 506 } 507 508 ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); 509 510 trace_xfs_buf_item_push(bip); 511 512 /* has a previous flush failed due to IO errors? */ 513 if (bp->b_flags & XBF_WRITE_FAIL) { 514 xfs_buf_alert_ratelimited(bp, "XFS: Failing async write", 515 "Failing async write on buffer block 0x%llx. Retrying async write.", 516 (long long)bp->b_bn); 517 } 518 519 if (!xfs_buf_delwri_queue(bp, buffer_list)) 520 rval = XFS_ITEM_FLUSHING; 521 xfs_buf_unlock(bp); 522 return rval; 523 } 524 525 /* 526 * Drop the buffer log item refcount and take appropriate action. This helper 527 * determines whether the bli must be freed or not, since a decrement to zero 528 * does not necessarily mean the bli is unused. 529 * 530 * Return true if the bli is freed, false otherwise. 531 */ 532 bool 533 xfs_buf_item_put( 534 struct xfs_buf_log_item *bip) 535 { 536 struct xfs_log_item *lip = &bip->bli_item; 537 bool aborted; 538 bool dirty; 539 540 /* drop the bli ref and return if it wasn't the last one */ 541 if (!atomic_dec_and_test(&bip->bli_refcount)) 542 return false; 543 544 /* 545 * We dropped the last ref and must free the item if clean or aborted. 546 * If the bli is dirty and non-aborted, the buffer was clean in the 547 * transaction but still awaiting writeback from previous changes. In 548 * that case, the bli is freed on buffer writeback completion. 549 */ 550 aborted = test_bit(XFS_LI_ABORTED, &lip->li_flags) || 551 XFS_FORCED_SHUTDOWN(lip->li_mountp); 552 dirty = bip->bli_flags & XFS_BLI_DIRTY; 553 if (dirty && !aborted) 554 return false; 555 556 /* 557 * The bli is aborted or clean. An aborted item may be in the AIL 558 * regardless of dirty state. For example, consider an aborted 559 * transaction that invalidated a dirty bli and cleared the dirty 560 * state. 561 */ 562 if (aborted) 563 xfs_trans_ail_delete(lip, 0); 564 xfs_buf_item_relse(bip->bli_buf); 565 return true; 566 } 567 568 /* 569 * Release the buffer associated with the buf log item. If there is no dirty 570 * logged data associated with the buffer recorded in the buf log item, then 571 * free the buf log item and remove the reference to it in the buffer. 572 * 573 * This call ignores the recursion count. It is only called when the buffer 574 * should REALLY be unlocked, regardless of the recursion count. 575 * 576 * We unconditionally drop the transaction's reference to the log item. If the 577 * item was logged, then another reference was taken when it was pinned, so we 578 * can safely drop the transaction reference now. This also allows us to avoid 579 * potential races with the unpin code freeing the bli by not referencing the 580 * bli after we've dropped the reference count. 581 * 582 * If the XFS_BLI_HOLD flag is set in the buf log item, then free the log item 583 * if necessary but do not unlock the buffer. This is for support of 584 * xfs_trans_bhold(). Make sure the XFS_BLI_HOLD field is cleared if we don't 585 * free the item. 586 */ 587 STATIC void 588 xfs_buf_item_release( 589 struct xfs_log_item *lip) 590 { 591 struct xfs_buf_log_item *bip = BUF_ITEM(lip); 592 struct xfs_buf *bp = bip->bli_buf; 593 bool released; 594 bool hold = bip->bli_flags & XFS_BLI_HOLD; 595 bool stale = bip->bli_flags & XFS_BLI_STALE; 596 #if defined(DEBUG) || defined(XFS_WARN) 597 bool ordered = bip->bli_flags & XFS_BLI_ORDERED; 598 bool dirty = bip->bli_flags & XFS_BLI_DIRTY; 599 bool aborted = test_bit(XFS_LI_ABORTED, 600 &lip->li_flags); 601 #endif 602 603 trace_xfs_buf_item_release(bip); 604 605 /* 606 * The bli dirty state should match whether the blf has logged segments 607 * except for ordered buffers, where only the bli should be dirty. 608 */ 609 ASSERT((!ordered && dirty == xfs_buf_item_dirty_format(bip)) || 610 (ordered && dirty && !xfs_buf_item_dirty_format(bip))); 611 ASSERT(!stale || (bip->__bli_format.blf_flags & XFS_BLF_CANCEL)); 612 613 /* 614 * Clear the buffer's association with this transaction and 615 * per-transaction state from the bli, which has been copied above. 616 */ 617 bp->b_transp = NULL; 618 bip->bli_flags &= ~(XFS_BLI_LOGGED | XFS_BLI_HOLD | XFS_BLI_ORDERED); 619 620 /* 621 * Unref the item and unlock the buffer unless held or stale. Stale 622 * buffers remain locked until final unpin unless the bli is freed by 623 * the unref call. The latter implies shutdown because buffer 624 * invalidation dirties the bli and transaction. 625 */ 626 released = xfs_buf_item_put(bip); 627 if (hold || (stale && !released)) 628 return; 629 ASSERT(!stale || aborted); 630 xfs_buf_relse(bp); 631 } 632 633 STATIC void 634 xfs_buf_item_committing( 635 struct xfs_log_item *lip, 636 xfs_lsn_t commit_lsn) 637 { 638 return xfs_buf_item_release(lip); 639 } 640 641 /* 642 * This is called to find out where the oldest active copy of the 643 * buf log item in the on disk log resides now that the last log 644 * write of it completed at the given lsn. 645 * We always re-log all the dirty data in a buffer, so usually the 646 * latest copy in the on disk log is the only one that matters. For 647 * those cases we simply return the given lsn. 648 * 649 * The one exception to this is for buffers full of newly allocated 650 * inodes. These buffers are only relogged with the XFS_BLI_INODE_BUF 651 * flag set, indicating that only the di_next_unlinked fields from the 652 * inodes in the buffers will be replayed during recovery. If the 653 * original newly allocated inode images have not yet been flushed 654 * when the buffer is so relogged, then we need to make sure that we 655 * keep the old images in the 'active' portion of the log. We do this 656 * by returning the original lsn of that transaction here rather than 657 * the current one. 658 */ 659 STATIC xfs_lsn_t 660 xfs_buf_item_committed( 661 struct xfs_log_item *lip, 662 xfs_lsn_t lsn) 663 { 664 struct xfs_buf_log_item *bip = BUF_ITEM(lip); 665 666 trace_xfs_buf_item_committed(bip); 667 668 if ((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) && lip->li_lsn != 0) 669 return lip->li_lsn; 670 return lsn; 671 } 672 673 static const struct xfs_item_ops xfs_buf_item_ops = { 674 .iop_size = xfs_buf_item_size, 675 .iop_format = xfs_buf_item_format, 676 .iop_pin = xfs_buf_item_pin, 677 .iop_unpin = xfs_buf_item_unpin, 678 .iop_release = xfs_buf_item_release, 679 .iop_committing = xfs_buf_item_committing, 680 .iop_committed = xfs_buf_item_committed, 681 .iop_push = xfs_buf_item_push, 682 }; 683 684 STATIC void 685 xfs_buf_item_get_format( 686 struct xfs_buf_log_item *bip, 687 int count) 688 { 689 ASSERT(bip->bli_formats == NULL); 690 bip->bli_format_count = count; 691 692 if (count == 1) { 693 bip->bli_formats = &bip->__bli_format; 694 return; 695 } 696 697 bip->bli_formats = kmem_zalloc(count * sizeof(struct xfs_buf_log_format), 698 0); 699 } 700 701 STATIC void 702 xfs_buf_item_free_format( 703 struct xfs_buf_log_item *bip) 704 { 705 if (bip->bli_formats != &bip->__bli_format) { 706 kmem_free(bip->bli_formats); 707 bip->bli_formats = NULL; 708 } 709 } 710 711 /* 712 * Allocate a new buf log item to go with the given buffer. 713 * Set the buffer's b_log_item field to point to the new 714 * buf log item. 715 */ 716 int 717 xfs_buf_item_init( 718 struct xfs_buf *bp, 719 struct xfs_mount *mp) 720 { 721 struct xfs_buf_log_item *bip = bp->b_log_item; 722 int chunks; 723 int map_size; 724 int i; 725 726 /* 727 * Check to see if there is already a buf log item for 728 * this buffer. If we do already have one, there is 729 * nothing to do here so return. 730 */ 731 ASSERT(bp->b_mount == mp); 732 if (bip) { 733 ASSERT(bip->bli_item.li_type == XFS_LI_BUF); 734 ASSERT(!bp->b_transp); 735 ASSERT(bip->bli_buf == bp); 736 return 0; 737 } 738 739 bip = kmem_cache_zalloc(xfs_buf_item_zone, GFP_KERNEL | __GFP_NOFAIL); 740 xfs_log_item_init(mp, &bip->bli_item, XFS_LI_BUF, &xfs_buf_item_ops); 741 bip->bli_buf = bp; 742 743 /* 744 * chunks is the number of XFS_BLF_CHUNK size pieces the buffer 745 * can be divided into. Make sure not to truncate any pieces. 746 * map_size is the size of the bitmap needed to describe the 747 * chunks of the buffer. 748 * 749 * Discontiguous buffer support follows the layout of the underlying 750 * buffer. This makes the implementation as simple as possible. 751 */ 752 xfs_buf_item_get_format(bip, bp->b_map_count); 753 754 for (i = 0; i < bip->bli_format_count; i++) { 755 chunks = DIV_ROUND_UP(BBTOB(bp->b_maps[i].bm_len), 756 XFS_BLF_CHUNK); 757 map_size = DIV_ROUND_UP(chunks, NBWORD); 758 759 if (map_size > XFS_BLF_DATAMAP_SIZE) { 760 kmem_cache_free(xfs_buf_item_zone, bip); 761 xfs_err(mp, 762 "buffer item dirty bitmap (%u uints) too small to reflect %u bytes!", 763 map_size, 764 BBTOB(bp->b_maps[i].bm_len)); 765 return -EFSCORRUPTED; 766 } 767 768 bip->bli_formats[i].blf_type = XFS_LI_BUF; 769 bip->bli_formats[i].blf_blkno = bp->b_maps[i].bm_bn; 770 bip->bli_formats[i].blf_len = bp->b_maps[i].bm_len; 771 bip->bli_formats[i].blf_map_size = map_size; 772 } 773 774 bp->b_log_item = bip; 775 xfs_buf_hold(bp); 776 return 0; 777 } 778 779 780 /* 781 * Mark bytes first through last inclusive as dirty in the buf 782 * item's bitmap. 783 */ 784 static void 785 xfs_buf_item_log_segment( 786 uint first, 787 uint last, 788 uint *map) 789 { 790 uint first_bit; 791 uint last_bit; 792 uint bits_to_set; 793 uint bits_set; 794 uint word_num; 795 uint *wordp; 796 uint bit; 797 uint end_bit; 798 uint mask; 799 800 ASSERT(first < XFS_BLF_DATAMAP_SIZE * XFS_BLF_CHUNK * NBWORD); 801 ASSERT(last < XFS_BLF_DATAMAP_SIZE * XFS_BLF_CHUNK * NBWORD); 802 803 /* 804 * Convert byte offsets to bit numbers. 805 */ 806 first_bit = first >> XFS_BLF_SHIFT; 807 last_bit = last >> XFS_BLF_SHIFT; 808 809 /* 810 * Calculate the total number of bits to be set. 811 */ 812 bits_to_set = last_bit - first_bit + 1; 813 814 /* 815 * Get a pointer to the first word in the bitmap 816 * to set a bit in. 817 */ 818 word_num = first_bit >> BIT_TO_WORD_SHIFT; 819 wordp = &map[word_num]; 820 821 /* 822 * Calculate the starting bit in the first word. 823 */ 824 bit = first_bit & (uint)(NBWORD - 1); 825 826 /* 827 * First set any bits in the first word of our range. 828 * If it starts at bit 0 of the word, it will be 829 * set below rather than here. That is what the variable 830 * bit tells us. The variable bits_set tracks the number 831 * of bits that have been set so far. End_bit is the number 832 * of the last bit to be set in this word plus one. 833 */ 834 if (bit) { 835 end_bit = min(bit + bits_to_set, (uint)NBWORD); 836 mask = ((1U << (end_bit - bit)) - 1) << bit; 837 *wordp |= mask; 838 wordp++; 839 bits_set = end_bit - bit; 840 } else { 841 bits_set = 0; 842 } 843 844 /* 845 * Now set bits a whole word at a time that are between 846 * first_bit and last_bit. 847 */ 848 while ((bits_to_set - bits_set) >= NBWORD) { 849 *wordp = 0xffffffff; 850 bits_set += NBWORD; 851 wordp++; 852 } 853 854 /* 855 * Finally, set any bits left to be set in one last partial word. 856 */ 857 end_bit = bits_to_set - bits_set; 858 if (end_bit) { 859 mask = (1U << end_bit) - 1; 860 *wordp |= mask; 861 } 862 } 863 864 /* 865 * Mark bytes first through last inclusive as dirty in the buf 866 * item's bitmap. 867 */ 868 void 869 xfs_buf_item_log( 870 struct xfs_buf_log_item *bip, 871 uint first, 872 uint last) 873 { 874 int i; 875 uint start; 876 uint end; 877 struct xfs_buf *bp = bip->bli_buf; 878 879 /* 880 * walk each buffer segment and mark them dirty appropriately. 881 */ 882 start = 0; 883 for (i = 0; i < bip->bli_format_count; i++) { 884 if (start > last) 885 break; 886 end = start + BBTOB(bp->b_maps[i].bm_len) - 1; 887 888 /* skip to the map that includes the first byte to log */ 889 if (first > end) { 890 start += BBTOB(bp->b_maps[i].bm_len); 891 continue; 892 } 893 894 /* 895 * Trim the range to this segment and mark it in the bitmap. 896 * Note that we must convert buffer offsets to segment relative 897 * offsets (e.g., the first byte of each segment is byte 0 of 898 * that segment). 899 */ 900 if (first < start) 901 first = start; 902 if (end > last) 903 end = last; 904 xfs_buf_item_log_segment(first - start, end - start, 905 &bip->bli_formats[i].blf_data_map[0]); 906 907 start += BBTOB(bp->b_maps[i].bm_len); 908 } 909 } 910 911 912 /* 913 * Return true if the buffer has any ranges logged/dirtied by a transaction, 914 * false otherwise. 915 */ 916 bool 917 xfs_buf_item_dirty_format( 918 struct xfs_buf_log_item *bip) 919 { 920 int i; 921 922 for (i = 0; i < bip->bli_format_count; i++) { 923 if (!xfs_bitmap_empty(bip->bli_formats[i].blf_data_map, 924 bip->bli_formats[i].blf_map_size)) 925 return true; 926 } 927 928 return false; 929 } 930 931 STATIC void 932 xfs_buf_item_free( 933 struct xfs_buf_log_item *bip) 934 { 935 xfs_buf_item_free_format(bip); 936 kmem_free(bip->bli_item.li_lv_shadow); 937 kmem_cache_free(xfs_buf_item_zone, bip); 938 } 939 940 /* 941 * xfs_buf_item_relse() is called when the buf log item is no longer needed. 942 */ 943 void 944 xfs_buf_item_relse( 945 struct xfs_buf *bp) 946 { 947 struct xfs_buf_log_item *bip = bp->b_log_item; 948 949 trace_xfs_buf_item_relse(bp, _RET_IP_); 950 ASSERT(!test_bit(XFS_LI_IN_AIL, &bip->bli_item.li_flags)); 951 952 bp->b_log_item = NULL; 953 xfs_buf_rele(bp); 954 xfs_buf_item_free(bip); 955 } 956 957 void 958 xfs_buf_item_done( 959 struct xfs_buf *bp) 960 { 961 /* 962 * If we are forcibly shutting down, this may well be off the AIL 963 * already. That's because we simulate the log-committed callbacks to 964 * unpin these buffers. Or we may never have put this item on AIL 965 * because of the transaction was aborted forcibly. 966 * xfs_trans_ail_delete() takes care of these. 967 * 968 * Either way, AIL is useless if we're forcing a shutdown. 969 * 970 * Note that log recovery writes might have buffer items that are not on 971 * the AIL even when the file system is not shut down. 972 */ 973 xfs_trans_ail_delete(&bp->b_log_item->bli_item, 974 (bp->b_flags & _XBF_LOGRECOVERY) ? 0 : 975 SHUTDOWN_CORRUPT_INCORE); 976 xfs_buf_item_relse(bp); 977 } 978