1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_log_format.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_bit.h" 13 #include "xfs_mount.h" 14 #include "xfs_trans.h" 15 #include "xfs_trans_priv.h" 16 #include "xfs_buf_item.h" 17 #include "xfs_inode.h" 18 #include "xfs_inode_item.h" 19 #include "xfs_quota.h" 20 #include "xfs_dquot_item.h" 21 #include "xfs_dquot.h" 22 #include "xfs_trace.h" 23 #include "xfs_log.h" 24 25 26 kmem_zone_t *xfs_buf_item_zone; 27 28 static inline struct xfs_buf_log_item *BUF_ITEM(struct xfs_log_item *lip) 29 { 30 return container_of(lip, struct xfs_buf_log_item, bli_item); 31 } 32 33 static void xfs_buf_item_done(struct xfs_buf *bp); 34 35 /* Is this log iovec plausibly large enough to contain the buffer log format? */ 36 bool 37 xfs_buf_log_check_iovec( 38 struct xfs_log_iovec *iovec) 39 { 40 struct xfs_buf_log_format *blfp = iovec->i_addr; 41 char *bmp_end; 42 char *item_end; 43 44 if (offsetof(struct xfs_buf_log_format, blf_data_map) > iovec->i_len) 45 return false; 46 47 item_end = (char *)iovec->i_addr + iovec->i_len; 48 bmp_end = (char *)&blfp->blf_data_map[blfp->blf_map_size]; 49 return bmp_end <= item_end; 50 } 51 52 static inline int 53 xfs_buf_log_format_size( 54 struct xfs_buf_log_format *blfp) 55 { 56 return offsetof(struct xfs_buf_log_format, blf_data_map) + 57 (blfp->blf_map_size * sizeof(blfp->blf_data_map[0])); 58 } 59 60 /* 61 * This returns the number of log iovecs needed to log the 62 * given buf log item. 63 * 64 * It calculates this as 1 iovec for the buf log format structure 65 * and 1 for each stretch of non-contiguous chunks to be logged. 66 * Contiguous chunks are logged in a single iovec. 67 * 68 * If the XFS_BLI_STALE flag has been set, then log nothing. 69 */ 70 STATIC void 71 xfs_buf_item_size_segment( 72 struct xfs_buf_log_item *bip, 73 struct xfs_buf_log_format *blfp, 74 int *nvecs, 75 int *nbytes) 76 { 77 struct xfs_buf *bp = bip->bli_buf; 78 int next_bit; 79 int last_bit; 80 81 last_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0); 82 if (last_bit == -1) 83 return; 84 85 /* 86 * initial count for a dirty buffer is 2 vectors - the format structure 87 * and the first dirty region. 88 */ 89 *nvecs += 2; 90 *nbytes += xfs_buf_log_format_size(blfp) + XFS_BLF_CHUNK; 91 92 while (last_bit != -1) { 93 /* 94 * This takes the bit number to start looking from and 95 * returns the next set bit from there. It returns -1 96 * if there are no more bits set or the start bit is 97 * beyond the end of the bitmap. 98 */ 99 next_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 100 last_bit + 1); 101 /* 102 * If we run out of bits, leave the loop, 103 * else if we find a new set of bits bump the number of vecs, 104 * else keep scanning the current set of bits. 105 */ 106 if (next_bit == -1) { 107 break; 108 } else if (next_bit != last_bit + 1) { 109 last_bit = next_bit; 110 (*nvecs)++; 111 } else if (xfs_buf_offset(bp, next_bit * XFS_BLF_CHUNK) != 112 (xfs_buf_offset(bp, last_bit * XFS_BLF_CHUNK) + 113 XFS_BLF_CHUNK)) { 114 last_bit = next_bit; 115 (*nvecs)++; 116 } else { 117 last_bit++; 118 } 119 *nbytes += XFS_BLF_CHUNK; 120 } 121 } 122 123 /* 124 * This returns the number of log iovecs needed to log the given buf log item. 125 * 126 * It calculates this as 1 iovec for the buf log format structure and 1 for each 127 * stretch of non-contiguous chunks to be logged. Contiguous chunks are logged 128 * in a single iovec. 129 * 130 * Discontiguous buffers need a format structure per region that is being 131 * logged. This makes the changes in the buffer appear to log recovery as though 132 * they came from separate buffers, just like would occur if multiple buffers 133 * were used instead of a single discontiguous buffer. This enables 134 * discontiguous buffers to be in-memory constructs, completely transparent to 135 * what ends up on disk. 136 * 137 * If the XFS_BLI_STALE flag has been set, then log nothing but the buf log 138 * format structures. 139 */ 140 STATIC void 141 xfs_buf_item_size( 142 struct xfs_log_item *lip, 143 int *nvecs, 144 int *nbytes) 145 { 146 struct xfs_buf_log_item *bip = BUF_ITEM(lip); 147 int i; 148 149 ASSERT(atomic_read(&bip->bli_refcount) > 0); 150 if (bip->bli_flags & XFS_BLI_STALE) { 151 /* 152 * The buffer is stale, so all we need to log 153 * is the buf log format structure with the 154 * cancel flag in it. 155 */ 156 trace_xfs_buf_item_size_stale(bip); 157 ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL); 158 *nvecs += bip->bli_format_count; 159 for (i = 0; i < bip->bli_format_count; i++) { 160 *nbytes += xfs_buf_log_format_size(&bip->bli_formats[i]); 161 } 162 return; 163 } 164 165 ASSERT(bip->bli_flags & XFS_BLI_LOGGED); 166 167 if (bip->bli_flags & XFS_BLI_ORDERED) { 168 /* 169 * The buffer has been logged just to order it. 170 * It is not being included in the transaction 171 * commit, so no vectors are used at all. 172 */ 173 trace_xfs_buf_item_size_ordered(bip); 174 *nvecs = XFS_LOG_VEC_ORDERED; 175 return; 176 } 177 178 /* 179 * the vector count is based on the number of buffer vectors we have 180 * dirty bits in. This will only be greater than one when we have a 181 * compound buffer with more than one segment dirty. Hence for compound 182 * buffers we need to track which segment the dirty bits correspond to, 183 * and when we move from one segment to the next increment the vector 184 * count for the extra buf log format structure that will need to be 185 * written. 186 */ 187 for (i = 0; i < bip->bli_format_count; i++) { 188 xfs_buf_item_size_segment(bip, &bip->bli_formats[i], 189 nvecs, nbytes); 190 } 191 trace_xfs_buf_item_size(bip); 192 } 193 194 static inline void 195 xfs_buf_item_copy_iovec( 196 struct xfs_log_vec *lv, 197 struct xfs_log_iovec **vecp, 198 struct xfs_buf *bp, 199 uint offset, 200 int first_bit, 201 uint nbits) 202 { 203 offset += first_bit * XFS_BLF_CHUNK; 204 xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_BCHUNK, 205 xfs_buf_offset(bp, offset), 206 nbits * XFS_BLF_CHUNK); 207 } 208 209 static inline bool 210 xfs_buf_item_straddle( 211 struct xfs_buf *bp, 212 uint offset, 213 int next_bit, 214 int last_bit) 215 { 216 return xfs_buf_offset(bp, offset + (next_bit << XFS_BLF_SHIFT)) != 217 (xfs_buf_offset(bp, offset + (last_bit << XFS_BLF_SHIFT)) + 218 XFS_BLF_CHUNK); 219 } 220 221 static void 222 xfs_buf_item_format_segment( 223 struct xfs_buf_log_item *bip, 224 struct xfs_log_vec *lv, 225 struct xfs_log_iovec **vecp, 226 uint offset, 227 struct xfs_buf_log_format *blfp) 228 { 229 struct xfs_buf *bp = bip->bli_buf; 230 uint base_size; 231 int first_bit; 232 int last_bit; 233 int next_bit; 234 uint nbits; 235 236 /* copy the flags across from the base format item */ 237 blfp->blf_flags = bip->__bli_format.blf_flags; 238 239 /* 240 * Base size is the actual size of the ondisk structure - it reflects 241 * the actual size of the dirty bitmap rather than the size of the in 242 * memory structure. 243 */ 244 base_size = xfs_buf_log_format_size(blfp); 245 246 first_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0); 247 if (!(bip->bli_flags & XFS_BLI_STALE) && first_bit == -1) { 248 /* 249 * If the map is not be dirty in the transaction, mark 250 * the size as zero and do not advance the vector pointer. 251 */ 252 return; 253 } 254 255 blfp = xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_BFORMAT, blfp, base_size); 256 blfp->blf_size = 1; 257 258 if (bip->bli_flags & XFS_BLI_STALE) { 259 /* 260 * The buffer is stale, so all we need to log 261 * is the buf log format structure with the 262 * cancel flag in it. 263 */ 264 trace_xfs_buf_item_format_stale(bip); 265 ASSERT(blfp->blf_flags & XFS_BLF_CANCEL); 266 return; 267 } 268 269 270 /* 271 * Fill in an iovec for each set of contiguous chunks. 272 */ 273 last_bit = first_bit; 274 nbits = 1; 275 for (;;) { 276 /* 277 * This takes the bit number to start looking from and 278 * returns the next set bit from there. It returns -1 279 * if there are no more bits set or the start bit is 280 * beyond the end of the bitmap. 281 */ 282 next_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 283 (uint)last_bit + 1); 284 /* 285 * If we run out of bits fill in the last iovec and get out of 286 * the loop. Else if we start a new set of bits then fill in 287 * the iovec for the series we were looking at and start 288 * counting the bits in the new one. Else we're still in the 289 * same set of bits so just keep counting and scanning. 290 */ 291 if (next_bit == -1) { 292 xfs_buf_item_copy_iovec(lv, vecp, bp, offset, 293 first_bit, nbits); 294 blfp->blf_size++; 295 break; 296 } else if (next_bit != last_bit + 1 || 297 xfs_buf_item_straddle(bp, offset, next_bit, last_bit)) { 298 xfs_buf_item_copy_iovec(lv, vecp, bp, offset, 299 first_bit, nbits); 300 blfp->blf_size++; 301 first_bit = next_bit; 302 last_bit = next_bit; 303 nbits = 1; 304 } else { 305 last_bit++; 306 nbits++; 307 } 308 } 309 } 310 311 /* 312 * This is called to fill in the vector of log iovecs for the 313 * given log buf item. It fills the first entry with a buf log 314 * format structure, and the rest point to contiguous chunks 315 * within the buffer. 316 */ 317 STATIC void 318 xfs_buf_item_format( 319 struct xfs_log_item *lip, 320 struct xfs_log_vec *lv) 321 { 322 struct xfs_buf_log_item *bip = BUF_ITEM(lip); 323 struct xfs_buf *bp = bip->bli_buf; 324 struct xfs_log_iovec *vecp = NULL; 325 uint offset = 0; 326 int i; 327 328 ASSERT(atomic_read(&bip->bli_refcount) > 0); 329 ASSERT((bip->bli_flags & XFS_BLI_LOGGED) || 330 (bip->bli_flags & XFS_BLI_STALE)); 331 ASSERT((bip->bli_flags & XFS_BLI_STALE) || 332 (xfs_blft_from_flags(&bip->__bli_format) > XFS_BLFT_UNKNOWN_BUF 333 && xfs_blft_from_flags(&bip->__bli_format) < XFS_BLFT_MAX_BUF)); 334 ASSERT(!(bip->bli_flags & XFS_BLI_ORDERED) || 335 (bip->bli_flags & XFS_BLI_STALE)); 336 337 338 /* 339 * If it is an inode buffer, transfer the in-memory state to the 340 * format flags and clear the in-memory state. 341 * 342 * For buffer based inode allocation, we do not transfer 343 * this state if the inode buffer allocation has not yet been committed 344 * to the log as setting the XFS_BLI_INODE_BUF flag will prevent 345 * correct replay of the inode allocation. 346 * 347 * For icreate item based inode allocation, the buffers aren't written 348 * to the journal during allocation, and hence we should always tag the 349 * buffer as an inode buffer so that the correct unlinked list replay 350 * occurs during recovery. 351 */ 352 if (bip->bli_flags & XFS_BLI_INODE_BUF) { 353 if (xfs_sb_version_has_v3inode(&lip->li_mountp->m_sb) || 354 !((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) && 355 xfs_log_item_in_current_chkpt(lip))) 356 bip->__bli_format.blf_flags |= XFS_BLF_INODE_BUF; 357 bip->bli_flags &= ~XFS_BLI_INODE_BUF; 358 } 359 360 for (i = 0; i < bip->bli_format_count; i++) { 361 xfs_buf_item_format_segment(bip, lv, &vecp, offset, 362 &bip->bli_formats[i]); 363 offset += BBTOB(bp->b_maps[i].bm_len); 364 } 365 366 /* 367 * Check to make sure everything is consistent. 368 */ 369 trace_xfs_buf_item_format(bip); 370 } 371 372 /* 373 * This is called to pin the buffer associated with the buf log item in memory 374 * so it cannot be written out. 375 * 376 * We also always take a reference to the buffer log item here so that the bli 377 * is held while the item is pinned in memory. This means that we can 378 * unconditionally drop the reference count a transaction holds when the 379 * transaction is completed. 380 */ 381 STATIC void 382 xfs_buf_item_pin( 383 struct xfs_log_item *lip) 384 { 385 struct xfs_buf_log_item *bip = BUF_ITEM(lip); 386 387 ASSERT(atomic_read(&bip->bli_refcount) > 0); 388 ASSERT((bip->bli_flags & XFS_BLI_LOGGED) || 389 (bip->bli_flags & XFS_BLI_ORDERED) || 390 (bip->bli_flags & XFS_BLI_STALE)); 391 392 trace_xfs_buf_item_pin(bip); 393 394 atomic_inc(&bip->bli_refcount); 395 atomic_inc(&bip->bli_buf->b_pin_count); 396 } 397 398 /* 399 * This is called to unpin the buffer associated with the buf log 400 * item which was previously pinned with a call to xfs_buf_item_pin(). 401 * 402 * Also drop the reference to the buf item for the current transaction. 403 * If the XFS_BLI_STALE flag is set and we are the last reference, 404 * then free up the buf log item and unlock the buffer. 405 * 406 * If the remove flag is set we are called from uncommit in the 407 * forced-shutdown path. If that is true and the reference count on 408 * the log item is going to drop to zero we need to free the item's 409 * descriptor in the transaction. 410 */ 411 STATIC void 412 xfs_buf_item_unpin( 413 struct xfs_log_item *lip, 414 int remove) 415 { 416 struct xfs_buf_log_item *bip = BUF_ITEM(lip); 417 xfs_buf_t *bp = bip->bli_buf; 418 int stale = bip->bli_flags & XFS_BLI_STALE; 419 int freed; 420 421 ASSERT(bp->b_log_item == bip); 422 ASSERT(atomic_read(&bip->bli_refcount) > 0); 423 424 trace_xfs_buf_item_unpin(bip); 425 426 freed = atomic_dec_and_test(&bip->bli_refcount); 427 428 if (atomic_dec_and_test(&bp->b_pin_count)) 429 wake_up_all(&bp->b_waiters); 430 431 if (freed && stale) { 432 ASSERT(bip->bli_flags & XFS_BLI_STALE); 433 ASSERT(xfs_buf_islocked(bp)); 434 ASSERT(bp->b_flags & XBF_STALE); 435 ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL); 436 437 trace_xfs_buf_item_unpin_stale(bip); 438 439 if (remove) { 440 /* 441 * If we are in a transaction context, we have to 442 * remove the log item from the transaction as we are 443 * about to release our reference to the buffer. If we 444 * don't, the unlock that occurs later in 445 * xfs_trans_uncommit() will try to reference the 446 * buffer which we no longer have a hold on. 447 */ 448 if (!list_empty(&lip->li_trans)) 449 xfs_trans_del_item(lip); 450 451 /* 452 * Since the transaction no longer refers to the buffer, 453 * the buffer should no longer refer to the transaction. 454 */ 455 bp->b_transp = NULL; 456 } 457 458 /* 459 * If we get called here because of an IO error, we may or may 460 * not have the item on the AIL. xfs_trans_ail_delete() will 461 * take care of that situation. xfs_trans_ail_delete() drops 462 * the AIL lock. 463 */ 464 if (bip->bli_flags & XFS_BLI_STALE_INODE) { 465 xfs_buf_item_done(bp); 466 xfs_iflush_done(bp); 467 ASSERT(list_empty(&bp->b_li_list)); 468 } else { 469 xfs_trans_ail_delete(lip, SHUTDOWN_LOG_IO_ERROR); 470 xfs_buf_item_relse(bp); 471 ASSERT(bp->b_log_item == NULL); 472 } 473 xfs_buf_relse(bp); 474 } else if (freed && remove) { 475 /* 476 * The buffer must be locked and held by the caller to simulate 477 * an async I/O failure. 478 */ 479 xfs_buf_lock(bp); 480 xfs_buf_hold(bp); 481 bp->b_flags |= XBF_ASYNC; 482 xfs_buf_ioend_fail(bp); 483 } 484 } 485 486 STATIC uint 487 xfs_buf_item_push( 488 struct xfs_log_item *lip, 489 struct list_head *buffer_list) 490 { 491 struct xfs_buf_log_item *bip = BUF_ITEM(lip); 492 struct xfs_buf *bp = bip->bli_buf; 493 uint rval = XFS_ITEM_SUCCESS; 494 495 if (xfs_buf_ispinned(bp)) 496 return XFS_ITEM_PINNED; 497 if (!xfs_buf_trylock(bp)) { 498 /* 499 * If we have just raced with a buffer being pinned and it has 500 * been marked stale, we could end up stalling until someone else 501 * issues a log force to unpin the stale buffer. Check for the 502 * race condition here so xfsaild recognizes the buffer is pinned 503 * and queues a log force to move it along. 504 */ 505 if (xfs_buf_ispinned(bp)) 506 return XFS_ITEM_PINNED; 507 return XFS_ITEM_LOCKED; 508 } 509 510 ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); 511 512 trace_xfs_buf_item_push(bip); 513 514 /* has a previous flush failed due to IO errors? */ 515 if (bp->b_flags & XBF_WRITE_FAIL) { 516 xfs_buf_alert_ratelimited(bp, "XFS: Failing async write", 517 "Failing async write on buffer block 0x%llx. Retrying async write.", 518 (long long)bp->b_bn); 519 } 520 521 if (!xfs_buf_delwri_queue(bp, buffer_list)) 522 rval = XFS_ITEM_FLUSHING; 523 xfs_buf_unlock(bp); 524 return rval; 525 } 526 527 /* 528 * Drop the buffer log item refcount and take appropriate action. This helper 529 * determines whether the bli must be freed or not, since a decrement to zero 530 * does not necessarily mean the bli is unused. 531 * 532 * Return true if the bli is freed, false otherwise. 533 */ 534 bool 535 xfs_buf_item_put( 536 struct xfs_buf_log_item *bip) 537 { 538 struct xfs_log_item *lip = &bip->bli_item; 539 bool aborted; 540 bool dirty; 541 542 /* drop the bli ref and return if it wasn't the last one */ 543 if (!atomic_dec_and_test(&bip->bli_refcount)) 544 return false; 545 546 /* 547 * We dropped the last ref and must free the item if clean or aborted. 548 * If the bli is dirty and non-aborted, the buffer was clean in the 549 * transaction but still awaiting writeback from previous changes. In 550 * that case, the bli is freed on buffer writeback completion. 551 */ 552 aborted = test_bit(XFS_LI_ABORTED, &lip->li_flags) || 553 XFS_FORCED_SHUTDOWN(lip->li_mountp); 554 dirty = bip->bli_flags & XFS_BLI_DIRTY; 555 if (dirty && !aborted) 556 return false; 557 558 /* 559 * The bli is aborted or clean. An aborted item may be in the AIL 560 * regardless of dirty state. For example, consider an aborted 561 * transaction that invalidated a dirty bli and cleared the dirty 562 * state. 563 */ 564 if (aborted) 565 xfs_trans_ail_delete(lip, 0); 566 xfs_buf_item_relse(bip->bli_buf); 567 return true; 568 } 569 570 /* 571 * Release the buffer associated with the buf log item. If there is no dirty 572 * logged data associated with the buffer recorded in the buf log item, then 573 * free the buf log item and remove the reference to it in the buffer. 574 * 575 * This call ignores the recursion count. It is only called when the buffer 576 * should REALLY be unlocked, regardless of the recursion count. 577 * 578 * We unconditionally drop the transaction's reference to the log item. If the 579 * item was logged, then another reference was taken when it was pinned, so we 580 * can safely drop the transaction reference now. This also allows us to avoid 581 * potential races with the unpin code freeing the bli by not referencing the 582 * bli after we've dropped the reference count. 583 * 584 * If the XFS_BLI_HOLD flag is set in the buf log item, then free the log item 585 * if necessary but do not unlock the buffer. This is for support of 586 * xfs_trans_bhold(). Make sure the XFS_BLI_HOLD field is cleared if we don't 587 * free the item. 588 */ 589 STATIC void 590 xfs_buf_item_release( 591 struct xfs_log_item *lip) 592 { 593 struct xfs_buf_log_item *bip = BUF_ITEM(lip); 594 struct xfs_buf *bp = bip->bli_buf; 595 bool released; 596 bool hold = bip->bli_flags & XFS_BLI_HOLD; 597 bool stale = bip->bli_flags & XFS_BLI_STALE; 598 #if defined(DEBUG) || defined(XFS_WARN) 599 bool ordered = bip->bli_flags & XFS_BLI_ORDERED; 600 bool dirty = bip->bli_flags & XFS_BLI_DIRTY; 601 bool aborted = test_bit(XFS_LI_ABORTED, 602 &lip->li_flags); 603 #endif 604 605 trace_xfs_buf_item_release(bip); 606 607 /* 608 * The bli dirty state should match whether the blf has logged segments 609 * except for ordered buffers, where only the bli should be dirty. 610 */ 611 ASSERT((!ordered && dirty == xfs_buf_item_dirty_format(bip)) || 612 (ordered && dirty && !xfs_buf_item_dirty_format(bip))); 613 ASSERT(!stale || (bip->__bli_format.blf_flags & XFS_BLF_CANCEL)); 614 615 /* 616 * Clear the buffer's association with this transaction and 617 * per-transaction state from the bli, which has been copied above. 618 */ 619 bp->b_transp = NULL; 620 bip->bli_flags &= ~(XFS_BLI_LOGGED | XFS_BLI_HOLD | XFS_BLI_ORDERED); 621 622 /* 623 * Unref the item and unlock the buffer unless held or stale. Stale 624 * buffers remain locked until final unpin unless the bli is freed by 625 * the unref call. The latter implies shutdown because buffer 626 * invalidation dirties the bli and transaction. 627 */ 628 released = xfs_buf_item_put(bip); 629 if (hold || (stale && !released)) 630 return; 631 ASSERT(!stale || aborted); 632 xfs_buf_relse(bp); 633 } 634 635 STATIC void 636 xfs_buf_item_committing( 637 struct xfs_log_item *lip, 638 xfs_lsn_t commit_lsn) 639 { 640 return xfs_buf_item_release(lip); 641 } 642 643 /* 644 * This is called to find out where the oldest active copy of the 645 * buf log item in the on disk log resides now that the last log 646 * write of it completed at the given lsn. 647 * We always re-log all the dirty data in a buffer, so usually the 648 * latest copy in the on disk log is the only one that matters. For 649 * those cases we simply return the given lsn. 650 * 651 * The one exception to this is for buffers full of newly allocated 652 * inodes. These buffers are only relogged with the XFS_BLI_INODE_BUF 653 * flag set, indicating that only the di_next_unlinked fields from the 654 * inodes in the buffers will be replayed during recovery. If the 655 * original newly allocated inode images have not yet been flushed 656 * when the buffer is so relogged, then we need to make sure that we 657 * keep the old images in the 'active' portion of the log. We do this 658 * by returning the original lsn of that transaction here rather than 659 * the current one. 660 */ 661 STATIC xfs_lsn_t 662 xfs_buf_item_committed( 663 struct xfs_log_item *lip, 664 xfs_lsn_t lsn) 665 { 666 struct xfs_buf_log_item *bip = BUF_ITEM(lip); 667 668 trace_xfs_buf_item_committed(bip); 669 670 if ((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) && lip->li_lsn != 0) 671 return lip->li_lsn; 672 return lsn; 673 } 674 675 static const struct xfs_item_ops xfs_buf_item_ops = { 676 .iop_size = xfs_buf_item_size, 677 .iop_format = xfs_buf_item_format, 678 .iop_pin = xfs_buf_item_pin, 679 .iop_unpin = xfs_buf_item_unpin, 680 .iop_release = xfs_buf_item_release, 681 .iop_committing = xfs_buf_item_committing, 682 .iop_committed = xfs_buf_item_committed, 683 .iop_push = xfs_buf_item_push, 684 }; 685 686 STATIC void 687 xfs_buf_item_get_format( 688 struct xfs_buf_log_item *bip, 689 int count) 690 { 691 ASSERT(bip->bli_formats == NULL); 692 bip->bli_format_count = count; 693 694 if (count == 1) { 695 bip->bli_formats = &bip->__bli_format; 696 return; 697 } 698 699 bip->bli_formats = kmem_zalloc(count * sizeof(struct xfs_buf_log_format), 700 0); 701 } 702 703 STATIC void 704 xfs_buf_item_free_format( 705 struct xfs_buf_log_item *bip) 706 { 707 if (bip->bli_formats != &bip->__bli_format) { 708 kmem_free(bip->bli_formats); 709 bip->bli_formats = NULL; 710 } 711 } 712 713 /* 714 * Allocate a new buf log item to go with the given buffer. 715 * Set the buffer's b_log_item field to point to the new 716 * buf log item. 717 */ 718 int 719 xfs_buf_item_init( 720 struct xfs_buf *bp, 721 struct xfs_mount *mp) 722 { 723 struct xfs_buf_log_item *bip = bp->b_log_item; 724 int chunks; 725 int map_size; 726 int i; 727 728 /* 729 * Check to see if there is already a buf log item for 730 * this buffer. If we do already have one, there is 731 * nothing to do here so return. 732 */ 733 ASSERT(bp->b_mount == mp); 734 if (bip) { 735 ASSERT(bip->bli_item.li_type == XFS_LI_BUF); 736 ASSERT(!bp->b_transp); 737 ASSERT(bip->bli_buf == bp); 738 return 0; 739 } 740 741 bip = kmem_cache_zalloc(xfs_buf_item_zone, GFP_KERNEL | __GFP_NOFAIL); 742 xfs_log_item_init(mp, &bip->bli_item, XFS_LI_BUF, &xfs_buf_item_ops); 743 bip->bli_buf = bp; 744 745 /* 746 * chunks is the number of XFS_BLF_CHUNK size pieces the buffer 747 * can be divided into. Make sure not to truncate any pieces. 748 * map_size is the size of the bitmap needed to describe the 749 * chunks of the buffer. 750 * 751 * Discontiguous buffer support follows the layout of the underlying 752 * buffer. This makes the implementation as simple as possible. 753 */ 754 xfs_buf_item_get_format(bip, bp->b_map_count); 755 756 for (i = 0; i < bip->bli_format_count; i++) { 757 chunks = DIV_ROUND_UP(BBTOB(bp->b_maps[i].bm_len), 758 XFS_BLF_CHUNK); 759 map_size = DIV_ROUND_UP(chunks, NBWORD); 760 761 if (map_size > XFS_BLF_DATAMAP_SIZE) { 762 kmem_cache_free(xfs_buf_item_zone, bip); 763 xfs_err(mp, 764 "buffer item dirty bitmap (%u uints) too small to reflect %u bytes!", 765 map_size, 766 BBTOB(bp->b_maps[i].bm_len)); 767 return -EFSCORRUPTED; 768 } 769 770 bip->bli_formats[i].blf_type = XFS_LI_BUF; 771 bip->bli_formats[i].blf_blkno = bp->b_maps[i].bm_bn; 772 bip->bli_formats[i].blf_len = bp->b_maps[i].bm_len; 773 bip->bli_formats[i].blf_map_size = map_size; 774 } 775 776 bp->b_log_item = bip; 777 xfs_buf_hold(bp); 778 return 0; 779 } 780 781 782 /* 783 * Mark bytes first through last inclusive as dirty in the buf 784 * item's bitmap. 785 */ 786 static void 787 xfs_buf_item_log_segment( 788 uint first, 789 uint last, 790 uint *map) 791 { 792 uint first_bit; 793 uint last_bit; 794 uint bits_to_set; 795 uint bits_set; 796 uint word_num; 797 uint *wordp; 798 uint bit; 799 uint end_bit; 800 uint mask; 801 802 ASSERT(first < XFS_BLF_DATAMAP_SIZE * XFS_BLF_CHUNK * NBWORD); 803 ASSERT(last < XFS_BLF_DATAMAP_SIZE * XFS_BLF_CHUNK * NBWORD); 804 805 /* 806 * Convert byte offsets to bit numbers. 807 */ 808 first_bit = first >> XFS_BLF_SHIFT; 809 last_bit = last >> XFS_BLF_SHIFT; 810 811 /* 812 * Calculate the total number of bits to be set. 813 */ 814 bits_to_set = last_bit - first_bit + 1; 815 816 /* 817 * Get a pointer to the first word in the bitmap 818 * to set a bit in. 819 */ 820 word_num = first_bit >> BIT_TO_WORD_SHIFT; 821 wordp = &map[word_num]; 822 823 /* 824 * Calculate the starting bit in the first word. 825 */ 826 bit = first_bit & (uint)(NBWORD - 1); 827 828 /* 829 * First set any bits in the first word of our range. 830 * If it starts at bit 0 of the word, it will be 831 * set below rather than here. That is what the variable 832 * bit tells us. The variable bits_set tracks the number 833 * of bits that have been set so far. End_bit is the number 834 * of the last bit to be set in this word plus one. 835 */ 836 if (bit) { 837 end_bit = min(bit + bits_to_set, (uint)NBWORD); 838 mask = ((1U << (end_bit - bit)) - 1) << bit; 839 *wordp |= mask; 840 wordp++; 841 bits_set = end_bit - bit; 842 } else { 843 bits_set = 0; 844 } 845 846 /* 847 * Now set bits a whole word at a time that are between 848 * first_bit and last_bit. 849 */ 850 while ((bits_to_set - bits_set) >= NBWORD) { 851 *wordp = 0xffffffff; 852 bits_set += NBWORD; 853 wordp++; 854 } 855 856 /* 857 * Finally, set any bits left to be set in one last partial word. 858 */ 859 end_bit = bits_to_set - bits_set; 860 if (end_bit) { 861 mask = (1U << end_bit) - 1; 862 *wordp |= mask; 863 } 864 } 865 866 /* 867 * Mark bytes first through last inclusive as dirty in the buf 868 * item's bitmap. 869 */ 870 void 871 xfs_buf_item_log( 872 struct xfs_buf_log_item *bip, 873 uint first, 874 uint last) 875 { 876 int i; 877 uint start; 878 uint end; 879 struct xfs_buf *bp = bip->bli_buf; 880 881 /* 882 * walk each buffer segment and mark them dirty appropriately. 883 */ 884 start = 0; 885 for (i = 0; i < bip->bli_format_count; i++) { 886 if (start > last) 887 break; 888 end = start + BBTOB(bp->b_maps[i].bm_len) - 1; 889 890 /* skip to the map that includes the first byte to log */ 891 if (first > end) { 892 start += BBTOB(bp->b_maps[i].bm_len); 893 continue; 894 } 895 896 /* 897 * Trim the range to this segment and mark it in the bitmap. 898 * Note that we must convert buffer offsets to segment relative 899 * offsets (e.g., the first byte of each segment is byte 0 of 900 * that segment). 901 */ 902 if (first < start) 903 first = start; 904 if (end > last) 905 end = last; 906 xfs_buf_item_log_segment(first - start, end - start, 907 &bip->bli_formats[i].blf_data_map[0]); 908 909 start += BBTOB(bp->b_maps[i].bm_len); 910 } 911 } 912 913 914 /* 915 * Return true if the buffer has any ranges logged/dirtied by a transaction, 916 * false otherwise. 917 */ 918 bool 919 xfs_buf_item_dirty_format( 920 struct xfs_buf_log_item *bip) 921 { 922 int i; 923 924 for (i = 0; i < bip->bli_format_count; i++) { 925 if (!xfs_bitmap_empty(bip->bli_formats[i].blf_data_map, 926 bip->bli_formats[i].blf_map_size)) 927 return true; 928 } 929 930 return false; 931 } 932 933 STATIC void 934 xfs_buf_item_free( 935 struct xfs_buf_log_item *bip) 936 { 937 xfs_buf_item_free_format(bip); 938 kmem_free(bip->bli_item.li_lv_shadow); 939 kmem_cache_free(xfs_buf_item_zone, bip); 940 } 941 942 /* 943 * xfs_buf_item_relse() is called when the buf log item is no longer needed. 944 */ 945 void 946 xfs_buf_item_relse( 947 xfs_buf_t *bp) 948 { 949 struct xfs_buf_log_item *bip = bp->b_log_item; 950 951 trace_xfs_buf_item_relse(bp, _RET_IP_); 952 ASSERT(!test_bit(XFS_LI_IN_AIL, &bip->bli_item.li_flags)); 953 954 bp->b_log_item = NULL; 955 xfs_buf_rele(bp); 956 xfs_buf_item_free(bip); 957 } 958 959 /* 960 * Decide if we're going to retry the write after a failure, and prepare 961 * the buffer for retrying the write. 962 */ 963 static bool 964 xfs_buf_ioerror_fail_without_retry( 965 struct xfs_buf *bp) 966 { 967 struct xfs_mount *mp = bp->b_mount; 968 static ulong lasttime; 969 static xfs_buftarg_t *lasttarg; 970 971 /* 972 * If we've already decided to shutdown the filesystem because of 973 * I/O errors, there's no point in giving this a retry. 974 */ 975 if (XFS_FORCED_SHUTDOWN(mp)) 976 return true; 977 978 if (bp->b_target != lasttarg || 979 time_after(jiffies, (lasttime + 5*HZ))) { 980 lasttime = jiffies; 981 xfs_buf_ioerror_alert(bp, __this_address); 982 } 983 lasttarg = bp->b_target; 984 985 /* synchronous writes will have callers process the error */ 986 if (!(bp->b_flags & XBF_ASYNC)) 987 return true; 988 return false; 989 } 990 991 static bool 992 xfs_buf_ioerror_retry( 993 struct xfs_buf *bp, 994 struct xfs_error_cfg *cfg) 995 { 996 if ((bp->b_flags & (XBF_STALE | XBF_WRITE_FAIL)) && 997 bp->b_last_error == bp->b_error) 998 return false; 999 1000 bp->b_flags |= (XBF_WRITE | XBF_DONE | XBF_WRITE_FAIL); 1001 bp->b_last_error = bp->b_error; 1002 if (cfg->retry_timeout != XFS_ERR_RETRY_FOREVER && 1003 !bp->b_first_retry_time) 1004 bp->b_first_retry_time = jiffies; 1005 return true; 1006 } 1007 1008 /* 1009 * Account for this latest trip around the retry handler, and decide if 1010 * we've failed enough times to constitute a permanent failure. 1011 */ 1012 static bool 1013 xfs_buf_ioerror_permanent( 1014 struct xfs_buf *bp, 1015 struct xfs_error_cfg *cfg) 1016 { 1017 struct xfs_mount *mp = bp->b_mount; 1018 1019 if (cfg->max_retries != XFS_ERR_RETRY_FOREVER && 1020 ++bp->b_retries > cfg->max_retries) 1021 return true; 1022 if (cfg->retry_timeout != XFS_ERR_RETRY_FOREVER && 1023 time_after(jiffies, cfg->retry_timeout + bp->b_first_retry_time)) 1024 return true; 1025 1026 /* At unmount we may treat errors differently */ 1027 if ((mp->m_flags & XFS_MOUNT_UNMOUNTING) && mp->m_fail_unmount) 1028 return true; 1029 1030 return false; 1031 } 1032 1033 /* 1034 * On a sync write or shutdown we just want to stale the buffer and let the 1035 * caller handle the error in bp->b_error appropriately. 1036 * 1037 * If the write was asynchronous then no one will be looking for the error. If 1038 * this is the first failure of this type, clear the error state and write the 1039 * buffer out again. This means we always retry an async write failure at least 1040 * once, but we also need to set the buffer up to behave correctly now for 1041 * repeated failures. 1042 * 1043 * If we get repeated async write failures, then we take action according to the 1044 * error configuration we have been set up to use. 1045 * 1046 * Multi-state return value: 1047 * 1048 * XBF_IOERROR_FINISH: clear IO error retry state and run callback completions 1049 * XBF_IOERROR_DONE: resubmitted immediately, do not run any completions 1050 * XBF_IOERROR_FAIL: transient error, run failure callback completions and then 1051 * release the buffer 1052 */ 1053 enum { 1054 XBF_IOERROR_FINISH, 1055 XBF_IOERROR_DONE, 1056 XBF_IOERROR_FAIL, 1057 }; 1058 1059 static int 1060 xfs_buf_iodone_error( 1061 struct xfs_buf *bp) 1062 { 1063 struct xfs_mount *mp = bp->b_mount; 1064 struct xfs_error_cfg *cfg; 1065 1066 if (xfs_buf_ioerror_fail_without_retry(bp)) 1067 goto out_stale; 1068 1069 trace_xfs_buf_item_iodone_async(bp, _RET_IP_); 1070 1071 cfg = xfs_error_get_cfg(mp, XFS_ERR_METADATA, bp->b_error); 1072 if (xfs_buf_ioerror_retry(bp, cfg)) { 1073 xfs_buf_ioerror(bp, 0); 1074 xfs_buf_submit(bp); 1075 return XBF_IOERROR_DONE; 1076 } 1077 1078 /* 1079 * Permanent error - we need to trigger a shutdown if we haven't already 1080 * to indicate that inconsistency will result from this action. 1081 */ 1082 if (xfs_buf_ioerror_permanent(bp, cfg)) { 1083 xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR); 1084 goto out_stale; 1085 } 1086 1087 /* Still considered a transient error. Caller will schedule retries. */ 1088 return XBF_IOERROR_FAIL; 1089 1090 out_stale: 1091 xfs_buf_stale(bp); 1092 bp->b_flags |= XBF_DONE; 1093 trace_xfs_buf_error_relse(bp, _RET_IP_); 1094 return XBF_IOERROR_FINISH; 1095 } 1096 1097 static void 1098 xfs_buf_item_done( 1099 struct xfs_buf *bp) 1100 { 1101 struct xfs_buf_log_item *bip = bp->b_log_item; 1102 1103 if (!bip) 1104 return; 1105 1106 /* 1107 * If we are forcibly shutting down, this may well be off the AIL 1108 * already. That's because we simulate the log-committed callbacks to 1109 * unpin these buffers. Or we may never have put this item on AIL 1110 * because of the transaction was aborted forcibly. 1111 * xfs_trans_ail_delete() takes care of these. 1112 * 1113 * Either way, AIL is useless if we're forcing a shutdown. 1114 */ 1115 xfs_trans_ail_delete(&bip->bli_item, SHUTDOWN_CORRUPT_INCORE); 1116 bp->b_log_item = NULL; 1117 xfs_buf_item_free(bip); 1118 xfs_buf_rele(bp); 1119 } 1120 1121 static inline void 1122 xfs_buf_clear_ioerror_retry_state( 1123 struct xfs_buf *bp) 1124 { 1125 bp->b_last_error = 0; 1126 bp->b_retries = 0; 1127 bp->b_first_retry_time = 0; 1128 } 1129 1130 /* 1131 * Inode buffer iodone callback function. 1132 */ 1133 void 1134 xfs_buf_inode_iodone( 1135 struct xfs_buf *bp) 1136 { 1137 if (bp->b_error) { 1138 struct xfs_log_item *lip; 1139 int ret = xfs_buf_iodone_error(bp); 1140 1141 if (ret == XBF_IOERROR_FINISH) 1142 goto finish_iodone; 1143 if (ret == XBF_IOERROR_DONE) 1144 return; 1145 ASSERT(ret == XBF_IOERROR_FAIL); 1146 list_for_each_entry(lip, &bp->b_li_list, li_bio_list) { 1147 set_bit(XFS_LI_FAILED, &lip->li_flags); 1148 } 1149 xfs_buf_ioerror(bp, 0); 1150 xfs_buf_relse(bp); 1151 return; 1152 } 1153 1154 finish_iodone: 1155 xfs_buf_clear_ioerror_retry_state(bp); 1156 xfs_buf_item_done(bp); 1157 xfs_iflush_done(bp); 1158 xfs_buf_ioend_finish(bp); 1159 } 1160 1161 /* 1162 * Dquot buffer iodone callback function. 1163 */ 1164 void 1165 xfs_buf_dquot_iodone( 1166 struct xfs_buf *bp) 1167 { 1168 if (bp->b_error) { 1169 struct xfs_log_item *lip; 1170 int ret = xfs_buf_iodone_error(bp); 1171 1172 if (ret == XBF_IOERROR_FINISH) 1173 goto finish_iodone; 1174 if (ret == XBF_IOERROR_DONE) 1175 return; 1176 ASSERT(ret == XBF_IOERROR_FAIL); 1177 spin_lock(&bp->b_mount->m_ail->ail_lock); 1178 list_for_each_entry(lip, &bp->b_li_list, li_bio_list) { 1179 xfs_set_li_failed(lip, bp); 1180 } 1181 spin_unlock(&bp->b_mount->m_ail->ail_lock); 1182 xfs_buf_ioerror(bp, 0); 1183 xfs_buf_relse(bp); 1184 return; 1185 } 1186 1187 finish_iodone: 1188 xfs_buf_clear_ioerror_retry_state(bp); 1189 /* a newly allocated dquot buffer might have a log item attached */ 1190 xfs_buf_item_done(bp); 1191 xfs_dquot_done(bp); 1192 xfs_buf_ioend_finish(bp); 1193 } 1194 1195 /* 1196 * Dirty buffer iodone callback function. 1197 * 1198 * Note that for things like remote attribute buffers, there may not be a buffer 1199 * log item here, so processing the buffer log item must remain be optional. 1200 */ 1201 void 1202 xfs_buf_iodone( 1203 struct xfs_buf *bp) 1204 { 1205 if (bp->b_error) { 1206 int ret = xfs_buf_iodone_error(bp); 1207 1208 if (ret == XBF_IOERROR_FINISH) 1209 goto finish_iodone; 1210 if (ret == XBF_IOERROR_DONE) 1211 return; 1212 ASSERT(ret == XBF_IOERROR_FAIL); 1213 ASSERT(list_empty(&bp->b_li_list)); 1214 xfs_buf_ioerror(bp, 0); 1215 xfs_buf_relse(bp); 1216 return; 1217 } 1218 1219 finish_iodone: 1220 xfs_buf_clear_ioerror_retry_state(bp); 1221 xfs_buf_item_done(bp); 1222 xfs_buf_ioend_finish(bp); 1223 } 1224