1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_log_format.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_bit.h" 13 #include "xfs_mount.h" 14 #include "xfs_trans.h" 15 #include "xfs_buf_item.h" 16 #include "xfs_trans_priv.h" 17 #include "xfs_trace.h" 18 #include "xfs_log.h" 19 20 21 kmem_zone_t *xfs_buf_item_zone; 22 23 static inline struct xfs_buf_log_item *BUF_ITEM(struct xfs_log_item *lip) 24 { 25 return container_of(lip, struct xfs_buf_log_item, bli_item); 26 } 27 28 STATIC void xfs_buf_do_callbacks(struct xfs_buf *bp); 29 30 static inline int 31 xfs_buf_log_format_size( 32 struct xfs_buf_log_format *blfp) 33 { 34 return offsetof(struct xfs_buf_log_format, blf_data_map) + 35 (blfp->blf_map_size * sizeof(blfp->blf_data_map[0])); 36 } 37 38 /* 39 * This returns the number of log iovecs needed to log the 40 * given buf log item. 41 * 42 * It calculates this as 1 iovec for the buf log format structure 43 * and 1 for each stretch of non-contiguous chunks to be logged. 44 * Contiguous chunks are logged in a single iovec. 45 * 46 * If the XFS_BLI_STALE flag has been set, then log nothing. 47 */ 48 STATIC void 49 xfs_buf_item_size_segment( 50 struct xfs_buf_log_item *bip, 51 struct xfs_buf_log_format *blfp, 52 int *nvecs, 53 int *nbytes) 54 { 55 struct xfs_buf *bp = bip->bli_buf; 56 int next_bit; 57 int last_bit; 58 59 last_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0); 60 if (last_bit == -1) 61 return; 62 63 /* 64 * initial count for a dirty buffer is 2 vectors - the format structure 65 * and the first dirty region. 66 */ 67 *nvecs += 2; 68 *nbytes += xfs_buf_log_format_size(blfp) + XFS_BLF_CHUNK; 69 70 while (last_bit != -1) { 71 /* 72 * This takes the bit number to start looking from and 73 * returns the next set bit from there. It returns -1 74 * if there are no more bits set or the start bit is 75 * beyond the end of the bitmap. 76 */ 77 next_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 78 last_bit + 1); 79 /* 80 * If we run out of bits, leave the loop, 81 * else if we find a new set of bits bump the number of vecs, 82 * else keep scanning the current set of bits. 83 */ 84 if (next_bit == -1) { 85 break; 86 } else if (next_bit != last_bit + 1) { 87 last_bit = next_bit; 88 (*nvecs)++; 89 } else if (xfs_buf_offset(bp, next_bit * XFS_BLF_CHUNK) != 90 (xfs_buf_offset(bp, last_bit * XFS_BLF_CHUNK) + 91 XFS_BLF_CHUNK)) { 92 last_bit = next_bit; 93 (*nvecs)++; 94 } else { 95 last_bit++; 96 } 97 *nbytes += XFS_BLF_CHUNK; 98 } 99 } 100 101 /* 102 * This returns the number of log iovecs needed to log the given buf log item. 103 * 104 * It calculates this as 1 iovec for the buf log format structure and 1 for each 105 * stretch of non-contiguous chunks to be logged. Contiguous chunks are logged 106 * in a single iovec. 107 * 108 * Discontiguous buffers need a format structure per region that that is being 109 * logged. This makes the changes in the buffer appear to log recovery as though 110 * they came from separate buffers, just like would occur if multiple buffers 111 * were used instead of a single discontiguous buffer. This enables 112 * discontiguous buffers to be in-memory constructs, completely transparent to 113 * what ends up on disk. 114 * 115 * If the XFS_BLI_STALE flag has been set, then log nothing but the buf log 116 * format structures. 117 */ 118 STATIC void 119 xfs_buf_item_size( 120 struct xfs_log_item *lip, 121 int *nvecs, 122 int *nbytes) 123 { 124 struct xfs_buf_log_item *bip = BUF_ITEM(lip); 125 int i; 126 127 ASSERT(atomic_read(&bip->bli_refcount) > 0); 128 if (bip->bli_flags & XFS_BLI_STALE) { 129 /* 130 * The buffer is stale, so all we need to log 131 * is the buf log format structure with the 132 * cancel flag in it. 133 */ 134 trace_xfs_buf_item_size_stale(bip); 135 ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL); 136 *nvecs += bip->bli_format_count; 137 for (i = 0; i < bip->bli_format_count; i++) { 138 *nbytes += xfs_buf_log_format_size(&bip->bli_formats[i]); 139 } 140 return; 141 } 142 143 ASSERT(bip->bli_flags & XFS_BLI_LOGGED); 144 145 if (bip->bli_flags & XFS_BLI_ORDERED) { 146 /* 147 * The buffer has been logged just to order it. 148 * It is not being included in the transaction 149 * commit, so no vectors are used at all. 150 */ 151 trace_xfs_buf_item_size_ordered(bip); 152 *nvecs = XFS_LOG_VEC_ORDERED; 153 return; 154 } 155 156 /* 157 * the vector count is based on the number of buffer vectors we have 158 * dirty bits in. This will only be greater than one when we have a 159 * compound buffer with more than one segment dirty. Hence for compound 160 * buffers we need to track which segment the dirty bits correspond to, 161 * and when we move from one segment to the next increment the vector 162 * count for the extra buf log format structure that will need to be 163 * written. 164 */ 165 for (i = 0; i < bip->bli_format_count; i++) { 166 xfs_buf_item_size_segment(bip, &bip->bli_formats[i], 167 nvecs, nbytes); 168 } 169 trace_xfs_buf_item_size(bip); 170 } 171 172 static inline void 173 xfs_buf_item_copy_iovec( 174 struct xfs_log_vec *lv, 175 struct xfs_log_iovec **vecp, 176 struct xfs_buf *bp, 177 uint offset, 178 int first_bit, 179 uint nbits) 180 { 181 offset += first_bit * XFS_BLF_CHUNK; 182 xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_BCHUNK, 183 xfs_buf_offset(bp, offset), 184 nbits * XFS_BLF_CHUNK); 185 } 186 187 static inline bool 188 xfs_buf_item_straddle( 189 struct xfs_buf *bp, 190 uint offset, 191 int next_bit, 192 int last_bit) 193 { 194 return xfs_buf_offset(bp, offset + (next_bit << XFS_BLF_SHIFT)) != 195 (xfs_buf_offset(bp, offset + (last_bit << XFS_BLF_SHIFT)) + 196 XFS_BLF_CHUNK); 197 } 198 199 static void 200 xfs_buf_item_format_segment( 201 struct xfs_buf_log_item *bip, 202 struct xfs_log_vec *lv, 203 struct xfs_log_iovec **vecp, 204 uint offset, 205 struct xfs_buf_log_format *blfp) 206 { 207 struct xfs_buf *bp = bip->bli_buf; 208 uint base_size; 209 int first_bit; 210 int last_bit; 211 int next_bit; 212 uint nbits; 213 214 /* copy the flags across from the base format item */ 215 blfp->blf_flags = bip->__bli_format.blf_flags; 216 217 /* 218 * Base size is the actual size of the ondisk structure - it reflects 219 * the actual size of the dirty bitmap rather than the size of the in 220 * memory structure. 221 */ 222 base_size = xfs_buf_log_format_size(blfp); 223 224 first_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0); 225 if (!(bip->bli_flags & XFS_BLI_STALE) && first_bit == -1) { 226 /* 227 * If the map is not be dirty in the transaction, mark 228 * the size as zero and do not advance the vector pointer. 229 */ 230 return; 231 } 232 233 blfp = xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_BFORMAT, blfp, base_size); 234 blfp->blf_size = 1; 235 236 if (bip->bli_flags & XFS_BLI_STALE) { 237 /* 238 * The buffer is stale, so all we need to log 239 * is the buf log format structure with the 240 * cancel flag in it. 241 */ 242 trace_xfs_buf_item_format_stale(bip); 243 ASSERT(blfp->blf_flags & XFS_BLF_CANCEL); 244 return; 245 } 246 247 248 /* 249 * Fill in an iovec for each set of contiguous chunks. 250 */ 251 last_bit = first_bit; 252 nbits = 1; 253 for (;;) { 254 /* 255 * This takes the bit number to start looking from and 256 * returns the next set bit from there. It returns -1 257 * if there are no more bits set or the start bit is 258 * beyond the end of the bitmap. 259 */ 260 next_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 261 (uint)last_bit + 1); 262 /* 263 * If we run out of bits fill in the last iovec and get out of 264 * the loop. Else if we start a new set of bits then fill in 265 * the iovec for the series we were looking at and start 266 * counting the bits in the new one. Else we're still in the 267 * same set of bits so just keep counting and scanning. 268 */ 269 if (next_bit == -1) { 270 xfs_buf_item_copy_iovec(lv, vecp, bp, offset, 271 first_bit, nbits); 272 blfp->blf_size++; 273 break; 274 } else if (next_bit != last_bit + 1 || 275 xfs_buf_item_straddle(bp, offset, next_bit, last_bit)) { 276 xfs_buf_item_copy_iovec(lv, vecp, bp, offset, 277 first_bit, nbits); 278 blfp->blf_size++; 279 first_bit = next_bit; 280 last_bit = next_bit; 281 nbits = 1; 282 } else { 283 last_bit++; 284 nbits++; 285 } 286 } 287 } 288 289 /* 290 * This is called to fill in the vector of log iovecs for the 291 * given log buf item. It fills the first entry with a buf log 292 * format structure, and the rest point to contiguous chunks 293 * within the buffer. 294 */ 295 STATIC void 296 xfs_buf_item_format( 297 struct xfs_log_item *lip, 298 struct xfs_log_vec *lv) 299 { 300 struct xfs_buf_log_item *bip = BUF_ITEM(lip); 301 struct xfs_buf *bp = bip->bli_buf; 302 struct xfs_log_iovec *vecp = NULL; 303 uint offset = 0; 304 int i; 305 306 ASSERT(atomic_read(&bip->bli_refcount) > 0); 307 ASSERT((bip->bli_flags & XFS_BLI_LOGGED) || 308 (bip->bli_flags & XFS_BLI_STALE)); 309 ASSERT((bip->bli_flags & XFS_BLI_STALE) || 310 (xfs_blft_from_flags(&bip->__bli_format) > XFS_BLFT_UNKNOWN_BUF 311 && xfs_blft_from_flags(&bip->__bli_format) < XFS_BLFT_MAX_BUF)); 312 ASSERT(!(bip->bli_flags & XFS_BLI_ORDERED) || 313 (bip->bli_flags & XFS_BLI_STALE)); 314 315 316 /* 317 * If it is an inode buffer, transfer the in-memory state to the 318 * format flags and clear the in-memory state. 319 * 320 * For buffer based inode allocation, we do not transfer 321 * this state if the inode buffer allocation has not yet been committed 322 * to the log as setting the XFS_BLI_INODE_BUF flag will prevent 323 * correct replay of the inode allocation. 324 * 325 * For icreate item based inode allocation, the buffers aren't written 326 * to the journal during allocation, and hence we should always tag the 327 * buffer as an inode buffer so that the correct unlinked list replay 328 * occurs during recovery. 329 */ 330 if (bip->bli_flags & XFS_BLI_INODE_BUF) { 331 if (xfs_sb_version_hascrc(&lip->li_mountp->m_sb) || 332 !((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) && 333 xfs_log_item_in_current_chkpt(lip))) 334 bip->__bli_format.blf_flags |= XFS_BLF_INODE_BUF; 335 bip->bli_flags &= ~XFS_BLI_INODE_BUF; 336 } 337 338 for (i = 0; i < bip->bli_format_count; i++) { 339 xfs_buf_item_format_segment(bip, lv, &vecp, offset, 340 &bip->bli_formats[i]); 341 offset += BBTOB(bp->b_maps[i].bm_len); 342 } 343 344 /* 345 * Check to make sure everything is consistent. 346 */ 347 trace_xfs_buf_item_format(bip); 348 } 349 350 /* 351 * This is called to pin the buffer associated with the buf log item in memory 352 * so it cannot be written out. 353 * 354 * We also always take a reference to the buffer log item here so that the bli 355 * is held while the item is pinned in memory. This means that we can 356 * unconditionally drop the reference count a transaction holds when the 357 * transaction is completed. 358 */ 359 STATIC void 360 xfs_buf_item_pin( 361 struct xfs_log_item *lip) 362 { 363 struct xfs_buf_log_item *bip = BUF_ITEM(lip); 364 365 ASSERT(atomic_read(&bip->bli_refcount) > 0); 366 ASSERT((bip->bli_flags & XFS_BLI_LOGGED) || 367 (bip->bli_flags & XFS_BLI_ORDERED) || 368 (bip->bli_flags & XFS_BLI_STALE)); 369 370 trace_xfs_buf_item_pin(bip); 371 372 atomic_inc(&bip->bli_refcount); 373 atomic_inc(&bip->bli_buf->b_pin_count); 374 } 375 376 /* 377 * This is called to unpin the buffer associated with the buf log 378 * item which was previously pinned with a call to xfs_buf_item_pin(). 379 * 380 * Also drop the reference to the buf item for the current transaction. 381 * If the XFS_BLI_STALE flag is set and we are the last reference, 382 * then free up the buf log item and unlock the buffer. 383 * 384 * If the remove flag is set we are called from uncommit in the 385 * forced-shutdown path. If that is true and the reference count on 386 * the log item is going to drop to zero we need to free the item's 387 * descriptor in the transaction. 388 */ 389 STATIC void 390 xfs_buf_item_unpin( 391 struct xfs_log_item *lip, 392 int remove) 393 { 394 struct xfs_buf_log_item *bip = BUF_ITEM(lip); 395 xfs_buf_t *bp = bip->bli_buf; 396 struct xfs_ail *ailp = lip->li_ailp; 397 int stale = bip->bli_flags & XFS_BLI_STALE; 398 int freed; 399 400 ASSERT(bp->b_log_item == bip); 401 ASSERT(atomic_read(&bip->bli_refcount) > 0); 402 403 trace_xfs_buf_item_unpin(bip); 404 405 freed = atomic_dec_and_test(&bip->bli_refcount); 406 407 if (atomic_dec_and_test(&bp->b_pin_count)) 408 wake_up_all(&bp->b_waiters); 409 410 if (freed && stale) { 411 ASSERT(bip->bli_flags & XFS_BLI_STALE); 412 ASSERT(xfs_buf_islocked(bp)); 413 ASSERT(bp->b_flags & XBF_STALE); 414 ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL); 415 416 trace_xfs_buf_item_unpin_stale(bip); 417 418 if (remove) { 419 /* 420 * If we are in a transaction context, we have to 421 * remove the log item from the transaction as we are 422 * about to release our reference to the buffer. If we 423 * don't, the unlock that occurs later in 424 * xfs_trans_uncommit() will try to reference the 425 * buffer which we no longer have a hold on. 426 */ 427 if (!list_empty(&lip->li_trans)) 428 xfs_trans_del_item(lip); 429 430 /* 431 * Since the transaction no longer refers to the buffer, 432 * the buffer should no longer refer to the transaction. 433 */ 434 bp->b_transp = NULL; 435 } 436 437 /* 438 * If we get called here because of an IO error, we may 439 * or may not have the item on the AIL. xfs_trans_ail_delete() 440 * will take care of that situation. 441 * xfs_trans_ail_delete() drops the AIL lock. 442 */ 443 if (bip->bli_flags & XFS_BLI_STALE_INODE) { 444 xfs_buf_do_callbacks(bp); 445 bp->b_log_item = NULL; 446 list_del_init(&bp->b_li_list); 447 bp->b_iodone = NULL; 448 } else { 449 spin_lock(&ailp->ail_lock); 450 xfs_trans_ail_delete(ailp, lip, SHUTDOWN_LOG_IO_ERROR); 451 xfs_buf_item_relse(bp); 452 ASSERT(bp->b_log_item == NULL); 453 } 454 xfs_buf_relse(bp); 455 } else if (freed && remove) { 456 /* 457 * There are currently two references to the buffer - the active 458 * LRU reference and the buf log item. What we are about to do 459 * here - simulate a failed IO completion - requires 3 460 * references. 461 * 462 * The LRU reference is removed by the xfs_buf_stale() call. The 463 * buf item reference is removed by the xfs_buf_iodone() 464 * callback that is run by xfs_buf_do_callbacks() during ioend 465 * processing (via the bp->b_iodone callback), and then finally 466 * the ioend processing will drop the IO reference if the buffer 467 * is marked XBF_ASYNC. 468 * 469 * Hence we need to take an additional reference here so that IO 470 * completion processing doesn't free the buffer prematurely. 471 */ 472 xfs_buf_lock(bp); 473 xfs_buf_hold(bp); 474 bp->b_flags |= XBF_ASYNC; 475 xfs_buf_ioerror(bp, -EIO); 476 bp->b_flags &= ~XBF_DONE; 477 xfs_buf_stale(bp); 478 xfs_buf_ioend(bp); 479 } 480 } 481 482 /* 483 * Buffer IO error rate limiting. Limit it to no more than 10 messages per 30 484 * seconds so as to not spam logs too much on repeated detection of the same 485 * buffer being bad.. 486 */ 487 488 static DEFINE_RATELIMIT_STATE(xfs_buf_write_fail_rl_state, 30 * HZ, 10); 489 490 STATIC uint 491 xfs_buf_item_push( 492 struct xfs_log_item *lip, 493 struct list_head *buffer_list) 494 { 495 struct xfs_buf_log_item *bip = BUF_ITEM(lip); 496 struct xfs_buf *bp = bip->bli_buf; 497 uint rval = XFS_ITEM_SUCCESS; 498 499 if (xfs_buf_ispinned(bp)) 500 return XFS_ITEM_PINNED; 501 if (!xfs_buf_trylock(bp)) { 502 /* 503 * If we have just raced with a buffer being pinned and it has 504 * been marked stale, we could end up stalling until someone else 505 * issues a log force to unpin the stale buffer. Check for the 506 * race condition here so xfsaild recognizes the buffer is pinned 507 * and queues a log force to move it along. 508 */ 509 if (xfs_buf_ispinned(bp)) 510 return XFS_ITEM_PINNED; 511 return XFS_ITEM_LOCKED; 512 } 513 514 ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); 515 516 trace_xfs_buf_item_push(bip); 517 518 /* has a previous flush failed due to IO errors? */ 519 if ((bp->b_flags & XBF_WRITE_FAIL) && 520 ___ratelimit(&xfs_buf_write_fail_rl_state, "XFS: Failing async write")) { 521 xfs_warn(bp->b_mount, 522 "Failing async write on buffer block 0x%llx. Retrying async write.", 523 (long long)bp->b_bn); 524 } 525 526 if (!xfs_buf_delwri_queue(bp, buffer_list)) 527 rval = XFS_ITEM_FLUSHING; 528 xfs_buf_unlock(bp); 529 return rval; 530 } 531 532 /* 533 * Drop the buffer log item refcount and take appropriate action. This helper 534 * determines whether the bli must be freed or not, since a decrement to zero 535 * does not necessarily mean the bli is unused. 536 * 537 * Return true if the bli is freed, false otherwise. 538 */ 539 bool 540 xfs_buf_item_put( 541 struct xfs_buf_log_item *bip) 542 { 543 struct xfs_log_item *lip = &bip->bli_item; 544 bool aborted; 545 bool dirty; 546 547 /* drop the bli ref and return if it wasn't the last one */ 548 if (!atomic_dec_and_test(&bip->bli_refcount)) 549 return false; 550 551 /* 552 * We dropped the last ref and must free the item if clean or aborted. 553 * If the bli is dirty and non-aborted, the buffer was clean in the 554 * transaction but still awaiting writeback from previous changes. In 555 * that case, the bli is freed on buffer writeback completion. 556 */ 557 aborted = test_bit(XFS_LI_ABORTED, &lip->li_flags) || 558 XFS_FORCED_SHUTDOWN(lip->li_mountp); 559 dirty = bip->bli_flags & XFS_BLI_DIRTY; 560 if (dirty && !aborted) 561 return false; 562 563 /* 564 * The bli is aborted or clean. An aborted item may be in the AIL 565 * regardless of dirty state. For example, consider an aborted 566 * transaction that invalidated a dirty bli and cleared the dirty 567 * state. 568 */ 569 if (aborted) 570 xfs_trans_ail_remove(lip, SHUTDOWN_LOG_IO_ERROR); 571 xfs_buf_item_relse(bip->bli_buf); 572 return true; 573 } 574 575 /* 576 * Release the buffer associated with the buf log item. If there is no dirty 577 * logged data associated with the buffer recorded in the buf log item, then 578 * free the buf log item and remove the reference to it in the buffer. 579 * 580 * This call ignores the recursion count. It is only called when the buffer 581 * should REALLY be unlocked, regardless of the recursion count. 582 * 583 * We unconditionally drop the transaction's reference to the log item. If the 584 * item was logged, then another reference was taken when it was pinned, so we 585 * can safely drop the transaction reference now. This also allows us to avoid 586 * potential races with the unpin code freeing the bli by not referencing the 587 * bli after we've dropped the reference count. 588 * 589 * If the XFS_BLI_HOLD flag is set in the buf log item, then free the log item 590 * if necessary but do not unlock the buffer. This is for support of 591 * xfs_trans_bhold(). Make sure the XFS_BLI_HOLD field is cleared if we don't 592 * free the item. 593 */ 594 STATIC void 595 xfs_buf_item_release( 596 struct xfs_log_item *lip) 597 { 598 struct xfs_buf_log_item *bip = BUF_ITEM(lip); 599 struct xfs_buf *bp = bip->bli_buf; 600 bool released; 601 bool hold = bip->bli_flags & XFS_BLI_HOLD; 602 bool stale = bip->bli_flags & XFS_BLI_STALE; 603 #if defined(DEBUG) || defined(XFS_WARN) 604 bool ordered = bip->bli_flags & XFS_BLI_ORDERED; 605 bool dirty = bip->bli_flags & XFS_BLI_DIRTY; 606 bool aborted = test_bit(XFS_LI_ABORTED, 607 &lip->li_flags); 608 #endif 609 610 trace_xfs_buf_item_release(bip); 611 612 /* 613 * The bli dirty state should match whether the blf has logged segments 614 * except for ordered buffers, where only the bli should be dirty. 615 */ 616 ASSERT((!ordered && dirty == xfs_buf_item_dirty_format(bip)) || 617 (ordered && dirty && !xfs_buf_item_dirty_format(bip))); 618 ASSERT(!stale || (bip->__bli_format.blf_flags & XFS_BLF_CANCEL)); 619 620 /* 621 * Clear the buffer's association with this transaction and 622 * per-transaction state from the bli, which has been copied above. 623 */ 624 bp->b_transp = NULL; 625 bip->bli_flags &= ~(XFS_BLI_LOGGED | XFS_BLI_HOLD | XFS_BLI_ORDERED); 626 627 /* 628 * Unref the item and unlock the buffer unless held or stale. Stale 629 * buffers remain locked until final unpin unless the bli is freed by 630 * the unref call. The latter implies shutdown because buffer 631 * invalidation dirties the bli and transaction. 632 */ 633 released = xfs_buf_item_put(bip); 634 if (hold || (stale && !released)) 635 return; 636 ASSERT(!stale || aborted); 637 xfs_buf_relse(bp); 638 } 639 640 STATIC void 641 xfs_buf_item_committing( 642 struct xfs_log_item *lip, 643 xfs_lsn_t commit_lsn) 644 { 645 return xfs_buf_item_release(lip); 646 } 647 648 /* 649 * This is called to find out where the oldest active copy of the 650 * buf log item in the on disk log resides now that the last log 651 * write of it completed at the given lsn. 652 * We always re-log all the dirty data in a buffer, so usually the 653 * latest copy in the on disk log is the only one that matters. For 654 * those cases we simply return the given lsn. 655 * 656 * The one exception to this is for buffers full of newly allocated 657 * inodes. These buffers are only relogged with the XFS_BLI_INODE_BUF 658 * flag set, indicating that only the di_next_unlinked fields from the 659 * inodes in the buffers will be replayed during recovery. If the 660 * original newly allocated inode images have not yet been flushed 661 * when the buffer is so relogged, then we need to make sure that we 662 * keep the old images in the 'active' portion of the log. We do this 663 * by returning the original lsn of that transaction here rather than 664 * the current one. 665 */ 666 STATIC xfs_lsn_t 667 xfs_buf_item_committed( 668 struct xfs_log_item *lip, 669 xfs_lsn_t lsn) 670 { 671 struct xfs_buf_log_item *bip = BUF_ITEM(lip); 672 673 trace_xfs_buf_item_committed(bip); 674 675 if ((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) && lip->li_lsn != 0) 676 return lip->li_lsn; 677 return lsn; 678 } 679 680 static const struct xfs_item_ops xfs_buf_item_ops = { 681 .iop_size = xfs_buf_item_size, 682 .iop_format = xfs_buf_item_format, 683 .iop_pin = xfs_buf_item_pin, 684 .iop_unpin = xfs_buf_item_unpin, 685 .iop_release = xfs_buf_item_release, 686 .iop_committing = xfs_buf_item_committing, 687 .iop_committed = xfs_buf_item_committed, 688 .iop_push = xfs_buf_item_push, 689 }; 690 691 STATIC int 692 xfs_buf_item_get_format( 693 struct xfs_buf_log_item *bip, 694 int count) 695 { 696 ASSERT(bip->bli_formats == NULL); 697 bip->bli_format_count = count; 698 699 if (count == 1) { 700 bip->bli_formats = &bip->__bli_format; 701 return 0; 702 } 703 704 bip->bli_formats = kmem_zalloc(count * sizeof(struct xfs_buf_log_format), 705 0); 706 if (!bip->bli_formats) 707 return -ENOMEM; 708 return 0; 709 } 710 711 STATIC void 712 xfs_buf_item_free_format( 713 struct xfs_buf_log_item *bip) 714 { 715 if (bip->bli_formats != &bip->__bli_format) { 716 kmem_free(bip->bli_formats); 717 bip->bli_formats = NULL; 718 } 719 } 720 721 /* 722 * Allocate a new buf log item to go with the given buffer. 723 * Set the buffer's b_log_item field to point to the new 724 * buf log item. 725 */ 726 int 727 xfs_buf_item_init( 728 struct xfs_buf *bp, 729 struct xfs_mount *mp) 730 { 731 struct xfs_buf_log_item *bip = bp->b_log_item; 732 int chunks; 733 int map_size; 734 int error; 735 int i; 736 737 /* 738 * Check to see if there is already a buf log item for 739 * this buffer. If we do already have one, there is 740 * nothing to do here so return. 741 */ 742 ASSERT(bp->b_mount == mp); 743 if (bip) { 744 ASSERT(bip->bli_item.li_type == XFS_LI_BUF); 745 ASSERT(!bp->b_transp); 746 ASSERT(bip->bli_buf == bp); 747 return 0; 748 } 749 750 bip = kmem_zone_zalloc(xfs_buf_item_zone, 0); 751 xfs_log_item_init(mp, &bip->bli_item, XFS_LI_BUF, &xfs_buf_item_ops); 752 bip->bli_buf = bp; 753 754 /* 755 * chunks is the number of XFS_BLF_CHUNK size pieces the buffer 756 * can be divided into. Make sure not to truncate any pieces. 757 * map_size is the size of the bitmap needed to describe the 758 * chunks of the buffer. 759 * 760 * Discontiguous buffer support follows the layout of the underlying 761 * buffer. This makes the implementation as simple as possible. 762 */ 763 error = xfs_buf_item_get_format(bip, bp->b_map_count); 764 ASSERT(error == 0); 765 if (error) { /* to stop gcc throwing set-but-unused warnings */ 766 kmem_cache_free(xfs_buf_item_zone, bip); 767 return error; 768 } 769 770 771 for (i = 0; i < bip->bli_format_count; i++) { 772 chunks = DIV_ROUND_UP(BBTOB(bp->b_maps[i].bm_len), 773 XFS_BLF_CHUNK); 774 map_size = DIV_ROUND_UP(chunks, NBWORD); 775 776 bip->bli_formats[i].blf_type = XFS_LI_BUF; 777 bip->bli_formats[i].blf_blkno = bp->b_maps[i].bm_bn; 778 bip->bli_formats[i].blf_len = bp->b_maps[i].bm_len; 779 bip->bli_formats[i].blf_map_size = map_size; 780 } 781 782 bp->b_log_item = bip; 783 xfs_buf_hold(bp); 784 return 0; 785 } 786 787 788 /* 789 * Mark bytes first through last inclusive as dirty in the buf 790 * item's bitmap. 791 */ 792 static void 793 xfs_buf_item_log_segment( 794 uint first, 795 uint last, 796 uint *map) 797 { 798 uint first_bit; 799 uint last_bit; 800 uint bits_to_set; 801 uint bits_set; 802 uint word_num; 803 uint *wordp; 804 uint bit; 805 uint end_bit; 806 uint mask; 807 808 /* 809 * Convert byte offsets to bit numbers. 810 */ 811 first_bit = first >> XFS_BLF_SHIFT; 812 last_bit = last >> XFS_BLF_SHIFT; 813 814 /* 815 * Calculate the total number of bits to be set. 816 */ 817 bits_to_set = last_bit - first_bit + 1; 818 819 /* 820 * Get a pointer to the first word in the bitmap 821 * to set a bit in. 822 */ 823 word_num = first_bit >> BIT_TO_WORD_SHIFT; 824 wordp = &map[word_num]; 825 826 /* 827 * Calculate the starting bit in the first word. 828 */ 829 bit = first_bit & (uint)(NBWORD - 1); 830 831 /* 832 * First set any bits in the first word of our range. 833 * If it starts at bit 0 of the word, it will be 834 * set below rather than here. That is what the variable 835 * bit tells us. The variable bits_set tracks the number 836 * of bits that have been set so far. End_bit is the number 837 * of the last bit to be set in this word plus one. 838 */ 839 if (bit) { 840 end_bit = min(bit + bits_to_set, (uint)NBWORD); 841 mask = ((1U << (end_bit - bit)) - 1) << bit; 842 *wordp |= mask; 843 wordp++; 844 bits_set = end_bit - bit; 845 } else { 846 bits_set = 0; 847 } 848 849 /* 850 * Now set bits a whole word at a time that are between 851 * first_bit and last_bit. 852 */ 853 while ((bits_to_set - bits_set) >= NBWORD) { 854 *wordp = 0xffffffff; 855 bits_set += NBWORD; 856 wordp++; 857 } 858 859 /* 860 * Finally, set any bits left to be set in one last partial word. 861 */ 862 end_bit = bits_to_set - bits_set; 863 if (end_bit) { 864 mask = (1U << end_bit) - 1; 865 *wordp |= mask; 866 } 867 } 868 869 /* 870 * Mark bytes first through last inclusive as dirty in the buf 871 * item's bitmap. 872 */ 873 void 874 xfs_buf_item_log( 875 struct xfs_buf_log_item *bip, 876 uint first, 877 uint last) 878 { 879 int i; 880 uint start; 881 uint end; 882 struct xfs_buf *bp = bip->bli_buf; 883 884 /* 885 * walk each buffer segment and mark them dirty appropriately. 886 */ 887 start = 0; 888 for (i = 0; i < bip->bli_format_count; i++) { 889 if (start > last) 890 break; 891 end = start + BBTOB(bp->b_maps[i].bm_len) - 1; 892 893 /* skip to the map that includes the first byte to log */ 894 if (first > end) { 895 start += BBTOB(bp->b_maps[i].bm_len); 896 continue; 897 } 898 899 /* 900 * Trim the range to this segment and mark it in the bitmap. 901 * Note that we must convert buffer offsets to segment relative 902 * offsets (e.g., the first byte of each segment is byte 0 of 903 * that segment). 904 */ 905 if (first < start) 906 first = start; 907 if (end > last) 908 end = last; 909 xfs_buf_item_log_segment(first - start, end - start, 910 &bip->bli_formats[i].blf_data_map[0]); 911 912 start += BBTOB(bp->b_maps[i].bm_len); 913 } 914 } 915 916 917 /* 918 * Return true if the buffer has any ranges logged/dirtied by a transaction, 919 * false otherwise. 920 */ 921 bool 922 xfs_buf_item_dirty_format( 923 struct xfs_buf_log_item *bip) 924 { 925 int i; 926 927 for (i = 0; i < bip->bli_format_count; i++) { 928 if (!xfs_bitmap_empty(bip->bli_formats[i].blf_data_map, 929 bip->bli_formats[i].blf_map_size)) 930 return true; 931 } 932 933 return false; 934 } 935 936 STATIC void 937 xfs_buf_item_free( 938 struct xfs_buf_log_item *bip) 939 { 940 xfs_buf_item_free_format(bip); 941 kmem_free(bip->bli_item.li_lv_shadow); 942 kmem_cache_free(xfs_buf_item_zone, bip); 943 } 944 945 /* 946 * This is called when the buf log item is no longer needed. It should 947 * free the buf log item associated with the given buffer and clear 948 * the buffer's pointer to the buf log item. If there are no more 949 * items in the list, clear the b_iodone field of the buffer (see 950 * xfs_buf_attach_iodone() below). 951 */ 952 void 953 xfs_buf_item_relse( 954 xfs_buf_t *bp) 955 { 956 struct xfs_buf_log_item *bip = bp->b_log_item; 957 958 trace_xfs_buf_item_relse(bp, _RET_IP_); 959 ASSERT(!(bip->bli_item.li_flags & XFS_LI_IN_AIL)); 960 961 bp->b_log_item = NULL; 962 if (list_empty(&bp->b_li_list)) 963 bp->b_iodone = NULL; 964 965 xfs_buf_rele(bp); 966 xfs_buf_item_free(bip); 967 } 968 969 970 /* 971 * Add the given log item with its callback to the list of callbacks 972 * to be called when the buffer's I/O completes. If it is not set 973 * already, set the buffer's b_iodone() routine to be 974 * xfs_buf_iodone_callbacks() and link the log item into the list of 975 * items rooted at b_li_list. 976 */ 977 void 978 xfs_buf_attach_iodone( 979 struct xfs_buf *bp, 980 void (*cb)(struct xfs_buf *, struct xfs_log_item *), 981 struct xfs_log_item *lip) 982 { 983 ASSERT(xfs_buf_islocked(bp)); 984 985 lip->li_cb = cb; 986 list_add_tail(&lip->li_bio_list, &bp->b_li_list); 987 988 ASSERT(bp->b_iodone == NULL || 989 bp->b_iodone == xfs_buf_iodone_callbacks); 990 bp->b_iodone = xfs_buf_iodone_callbacks; 991 } 992 993 /* 994 * We can have many callbacks on a buffer. Running the callbacks individually 995 * can cause a lot of contention on the AIL lock, so we allow for a single 996 * callback to be able to scan the remaining items in bp->b_li_list for other 997 * items of the same type and callback to be processed in the first call. 998 * 999 * As a result, the loop walking the callback list below will also modify the 1000 * list. it removes the first item from the list and then runs the callback. 1001 * The loop then restarts from the new first item int the list. This allows the 1002 * callback to scan and modify the list attached to the buffer and we don't 1003 * have to care about maintaining a next item pointer. 1004 */ 1005 STATIC void 1006 xfs_buf_do_callbacks( 1007 struct xfs_buf *bp) 1008 { 1009 struct xfs_buf_log_item *blip = bp->b_log_item; 1010 struct xfs_log_item *lip; 1011 1012 /* If there is a buf_log_item attached, run its callback */ 1013 if (blip) { 1014 lip = &blip->bli_item; 1015 lip->li_cb(bp, lip); 1016 } 1017 1018 while (!list_empty(&bp->b_li_list)) { 1019 lip = list_first_entry(&bp->b_li_list, struct xfs_log_item, 1020 li_bio_list); 1021 1022 /* 1023 * Remove the item from the list, so we don't have any 1024 * confusion if the item is added to another buf. 1025 * Don't touch the log item after calling its 1026 * callback, because it could have freed itself. 1027 */ 1028 list_del_init(&lip->li_bio_list); 1029 lip->li_cb(bp, lip); 1030 } 1031 } 1032 1033 /* 1034 * Invoke the error state callback for each log item affected by the failed I/O. 1035 * 1036 * If a metadata buffer write fails with a non-permanent error, the buffer is 1037 * eventually resubmitted and so the completion callbacks are not run. The error 1038 * state may need to be propagated to the log items attached to the buffer, 1039 * however, so the next AIL push of the item knows hot to handle it correctly. 1040 */ 1041 STATIC void 1042 xfs_buf_do_callbacks_fail( 1043 struct xfs_buf *bp) 1044 { 1045 struct xfs_log_item *lip; 1046 struct xfs_ail *ailp; 1047 1048 /* 1049 * Buffer log item errors are handled directly by xfs_buf_item_push() 1050 * and xfs_buf_iodone_callback_error, and they have no IO error 1051 * callbacks. Check only for items in b_li_list. 1052 */ 1053 if (list_empty(&bp->b_li_list)) 1054 return; 1055 1056 lip = list_first_entry(&bp->b_li_list, struct xfs_log_item, 1057 li_bio_list); 1058 ailp = lip->li_ailp; 1059 spin_lock(&ailp->ail_lock); 1060 list_for_each_entry(lip, &bp->b_li_list, li_bio_list) { 1061 if (lip->li_ops->iop_error) 1062 lip->li_ops->iop_error(lip, bp); 1063 } 1064 spin_unlock(&ailp->ail_lock); 1065 } 1066 1067 static bool 1068 xfs_buf_iodone_callback_error( 1069 struct xfs_buf *bp) 1070 { 1071 struct xfs_buf_log_item *bip = bp->b_log_item; 1072 struct xfs_log_item *lip; 1073 struct xfs_mount *mp; 1074 static ulong lasttime; 1075 static xfs_buftarg_t *lasttarg; 1076 struct xfs_error_cfg *cfg; 1077 1078 /* 1079 * The failed buffer might not have a buf_log_item attached or the 1080 * log_item list might be empty. Get the mp from the available 1081 * xfs_log_item 1082 */ 1083 lip = list_first_entry_or_null(&bp->b_li_list, struct xfs_log_item, 1084 li_bio_list); 1085 mp = lip ? lip->li_mountp : bip->bli_item.li_mountp; 1086 1087 /* 1088 * If we've already decided to shutdown the filesystem because of 1089 * I/O errors, there's no point in giving this a retry. 1090 */ 1091 if (XFS_FORCED_SHUTDOWN(mp)) 1092 goto out_stale; 1093 1094 if (bp->b_target != lasttarg || 1095 time_after(jiffies, (lasttime + 5*HZ))) { 1096 lasttime = jiffies; 1097 xfs_buf_ioerror_alert(bp, __func__); 1098 } 1099 lasttarg = bp->b_target; 1100 1101 /* synchronous writes will have callers process the error */ 1102 if (!(bp->b_flags & XBF_ASYNC)) 1103 goto out_stale; 1104 1105 trace_xfs_buf_item_iodone_async(bp, _RET_IP_); 1106 ASSERT(bp->b_iodone != NULL); 1107 1108 cfg = xfs_error_get_cfg(mp, XFS_ERR_METADATA, bp->b_error); 1109 1110 /* 1111 * If the write was asynchronous then no one will be looking for the 1112 * error. If this is the first failure of this type, clear the error 1113 * state and write the buffer out again. This means we always retry an 1114 * async write failure at least once, but we also need to set the buffer 1115 * up to behave correctly now for repeated failures. 1116 */ 1117 if (!(bp->b_flags & (XBF_STALE | XBF_WRITE_FAIL)) || 1118 bp->b_last_error != bp->b_error) { 1119 bp->b_flags |= (XBF_WRITE | XBF_DONE | XBF_WRITE_FAIL); 1120 bp->b_last_error = bp->b_error; 1121 if (cfg->retry_timeout != XFS_ERR_RETRY_FOREVER && 1122 !bp->b_first_retry_time) 1123 bp->b_first_retry_time = jiffies; 1124 1125 xfs_buf_ioerror(bp, 0); 1126 xfs_buf_submit(bp); 1127 return true; 1128 } 1129 1130 /* 1131 * Repeated failure on an async write. Take action according to the 1132 * error configuration we have been set up to use. 1133 */ 1134 1135 if (cfg->max_retries != XFS_ERR_RETRY_FOREVER && 1136 ++bp->b_retries > cfg->max_retries) 1137 goto permanent_error; 1138 if (cfg->retry_timeout != XFS_ERR_RETRY_FOREVER && 1139 time_after(jiffies, cfg->retry_timeout + bp->b_first_retry_time)) 1140 goto permanent_error; 1141 1142 /* At unmount we may treat errors differently */ 1143 if ((mp->m_flags & XFS_MOUNT_UNMOUNTING) && mp->m_fail_unmount) 1144 goto permanent_error; 1145 1146 /* 1147 * Still a transient error, run IO completion failure callbacks and let 1148 * the higher layers retry the buffer. 1149 */ 1150 xfs_buf_do_callbacks_fail(bp); 1151 xfs_buf_ioerror(bp, 0); 1152 xfs_buf_relse(bp); 1153 return true; 1154 1155 /* 1156 * Permanent error - we need to trigger a shutdown if we haven't already 1157 * to indicate that inconsistency will result from this action. 1158 */ 1159 permanent_error: 1160 xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR); 1161 out_stale: 1162 xfs_buf_stale(bp); 1163 bp->b_flags |= XBF_DONE; 1164 trace_xfs_buf_error_relse(bp, _RET_IP_); 1165 return false; 1166 } 1167 1168 /* 1169 * This is the iodone() function for buffers which have had callbacks attached 1170 * to them by xfs_buf_attach_iodone(). We need to iterate the items on the 1171 * callback list, mark the buffer as having no more callbacks and then push the 1172 * buffer through IO completion processing. 1173 */ 1174 void 1175 xfs_buf_iodone_callbacks( 1176 struct xfs_buf *bp) 1177 { 1178 /* 1179 * If there is an error, process it. Some errors require us 1180 * to run callbacks after failure processing is done so we 1181 * detect that and take appropriate action. 1182 */ 1183 if (bp->b_error && xfs_buf_iodone_callback_error(bp)) 1184 return; 1185 1186 /* 1187 * Successful IO or permanent error. Either way, we can clear the 1188 * retry state here in preparation for the next error that may occur. 1189 */ 1190 bp->b_last_error = 0; 1191 bp->b_retries = 0; 1192 bp->b_first_retry_time = 0; 1193 1194 xfs_buf_do_callbacks(bp); 1195 bp->b_log_item = NULL; 1196 list_del_init(&bp->b_li_list); 1197 bp->b_iodone = NULL; 1198 xfs_buf_ioend(bp); 1199 } 1200 1201 /* 1202 * This is the iodone() function for buffers which have been 1203 * logged. It is called when they are eventually flushed out. 1204 * It should remove the buf item from the AIL, and free the buf item. 1205 * It is called by xfs_buf_iodone_callbacks() above which will take 1206 * care of cleaning up the buffer itself. 1207 */ 1208 void 1209 xfs_buf_iodone( 1210 struct xfs_buf *bp, 1211 struct xfs_log_item *lip) 1212 { 1213 struct xfs_ail *ailp = lip->li_ailp; 1214 1215 ASSERT(BUF_ITEM(lip)->bli_buf == bp); 1216 1217 xfs_buf_rele(bp); 1218 1219 /* 1220 * If we are forcibly shutting down, this may well be 1221 * off the AIL already. That's because we simulate the 1222 * log-committed callbacks to unpin these buffers. Or we may never 1223 * have put this item on AIL because of the transaction was 1224 * aborted forcibly. xfs_trans_ail_delete() takes care of these. 1225 * 1226 * Either way, AIL is useless if we're forcing a shutdown. 1227 */ 1228 spin_lock(&ailp->ail_lock); 1229 xfs_trans_ail_delete(ailp, lip, SHUTDOWN_CORRUPT_INCORE); 1230 xfs_buf_item_free(BUF_ITEM(lip)); 1231 } 1232 1233 /* 1234 * Requeue a failed buffer for writeback. 1235 * 1236 * We clear the log item failed state here as well, but we have to be careful 1237 * about reference counts because the only active reference counts on the buffer 1238 * may be the failed log items. Hence if we clear the log item failed state 1239 * before queuing the buffer for IO we can release all active references to 1240 * the buffer and free it, leading to use after free problems in 1241 * xfs_buf_delwri_queue. It makes no difference to the buffer or log items which 1242 * order we process them in - the buffer is locked, and we own the buffer list 1243 * so nothing on them is going to change while we are performing this action. 1244 * 1245 * Hence we can safely queue the buffer for IO before we clear the failed log 1246 * item state, therefore always having an active reference to the buffer and 1247 * avoiding the transient zero-reference state that leads to use-after-free. 1248 * 1249 * Return true if the buffer was added to the buffer list, false if it was 1250 * already on the buffer list. 1251 */ 1252 bool 1253 xfs_buf_resubmit_failed_buffers( 1254 struct xfs_buf *bp, 1255 struct list_head *buffer_list) 1256 { 1257 struct xfs_log_item *lip; 1258 bool ret; 1259 1260 ret = xfs_buf_delwri_queue(bp, buffer_list); 1261 1262 /* 1263 * XFS_LI_FAILED set/clear is protected by ail_lock, caller of this 1264 * function already have it acquired 1265 */ 1266 list_for_each_entry(lip, &bp->b_li_list, li_bio_list) 1267 xfs_clear_li_failed(lip); 1268 1269 return ret; 1270 } 1271