1 /* 2 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 #include "xfs.h" 19 #include "xfs_fs.h" 20 #include "xfs_types.h" 21 #include "xfs_bit.h" 22 #include "xfs_log.h" 23 #include "xfs_inum.h" 24 #include "xfs_trans.h" 25 #include "xfs_sb.h" 26 #include "xfs_ag.h" 27 #include "xfs_mount.h" 28 #include "xfs_buf_item.h" 29 #include "xfs_trans_priv.h" 30 #include "xfs_error.h" 31 #include "xfs_trace.h" 32 33 34 kmem_zone_t *xfs_buf_item_zone; 35 36 static inline struct xfs_buf_log_item *BUF_ITEM(struct xfs_log_item *lip) 37 { 38 return container_of(lip, struct xfs_buf_log_item, bli_item); 39 } 40 41 42 #ifdef XFS_TRANS_DEBUG 43 /* 44 * This function uses an alternate strategy for tracking the bytes 45 * that the user requests to be logged. This can then be used 46 * in conjunction with the bli_orig array in the buf log item to 47 * catch bugs in our callers' code. 48 * 49 * We also double check the bits set in xfs_buf_item_log using a 50 * simple algorithm to check that every byte is accounted for. 51 */ 52 STATIC void 53 xfs_buf_item_log_debug( 54 xfs_buf_log_item_t *bip, 55 uint first, 56 uint last) 57 { 58 uint x; 59 uint byte; 60 uint nbytes; 61 uint chunk_num; 62 uint word_num; 63 uint bit_num; 64 uint bit_set; 65 uint *wordp; 66 67 ASSERT(bip->bli_logged != NULL); 68 byte = first; 69 nbytes = last - first + 1; 70 bfset(bip->bli_logged, first, nbytes); 71 for (x = 0; x < nbytes; x++) { 72 chunk_num = byte >> XFS_BLF_SHIFT; 73 word_num = chunk_num >> BIT_TO_WORD_SHIFT; 74 bit_num = chunk_num & (NBWORD - 1); 75 wordp = &(bip->bli_format.blf_data_map[word_num]); 76 bit_set = *wordp & (1 << bit_num); 77 ASSERT(bit_set); 78 byte++; 79 } 80 } 81 82 /* 83 * This function is called when we flush something into a buffer without 84 * logging it. This happens for things like inodes which are logged 85 * separately from the buffer. 86 */ 87 void 88 xfs_buf_item_flush_log_debug( 89 xfs_buf_t *bp, 90 uint first, 91 uint last) 92 { 93 xfs_buf_log_item_t *bip; 94 uint nbytes; 95 96 bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t*); 97 if ((bip == NULL) || (bip->bli_item.li_type != XFS_LI_BUF)) { 98 return; 99 } 100 101 ASSERT(bip->bli_logged != NULL); 102 nbytes = last - first + 1; 103 bfset(bip->bli_logged, first, nbytes); 104 } 105 106 /* 107 * This function is called to verify that our callers have logged 108 * all the bytes that they changed. 109 * 110 * It does this by comparing the original copy of the buffer stored in 111 * the buf log item's bli_orig array to the current copy of the buffer 112 * and ensuring that all bytes which mismatch are set in the bli_logged 113 * array of the buf log item. 114 */ 115 STATIC void 116 xfs_buf_item_log_check( 117 xfs_buf_log_item_t *bip) 118 { 119 char *orig; 120 char *buffer; 121 int x; 122 xfs_buf_t *bp; 123 124 ASSERT(bip->bli_orig != NULL); 125 ASSERT(bip->bli_logged != NULL); 126 127 bp = bip->bli_buf; 128 ASSERT(XFS_BUF_COUNT(bp) > 0); 129 ASSERT(XFS_BUF_PTR(bp) != NULL); 130 orig = bip->bli_orig; 131 buffer = XFS_BUF_PTR(bp); 132 for (x = 0; x < XFS_BUF_COUNT(bp); x++) { 133 if (orig[x] != buffer[x] && !btst(bip->bli_logged, x)) { 134 xfs_emerg(bp->b_mount, 135 "%s: bip %x buffer %x orig %x index %d", 136 __func__, bip, bp, orig, x); 137 ASSERT(0); 138 } 139 } 140 } 141 #else 142 #define xfs_buf_item_log_debug(x,y,z) 143 #define xfs_buf_item_log_check(x) 144 #endif 145 146 STATIC void xfs_buf_do_callbacks(struct xfs_buf *bp); 147 148 /* 149 * This returns the number of log iovecs needed to log the 150 * given buf log item. 151 * 152 * It calculates this as 1 iovec for the buf log format structure 153 * and 1 for each stretch of non-contiguous chunks to be logged. 154 * Contiguous chunks are logged in a single iovec. 155 * 156 * If the XFS_BLI_STALE flag has been set, then log nothing. 157 */ 158 STATIC uint 159 xfs_buf_item_size( 160 struct xfs_log_item *lip) 161 { 162 struct xfs_buf_log_item *bip = BUF_ITEM(lip); 163 struct xfs_buf *bp = bip->bli_buf; 164 uint nvecs; 165 int next_bit; 166 int last_bit; 167 168 ASSERT(atomic_read(&bip->bli_refcount) > 0); 169 if (bip->bli_flags & XFS_BLI_STALE) { 170 /* 171 * The buffer is stale, so all we need to log 172 * is the buf log format structure with the 173 * cancel flag in it. 174 */ 175 trace_xfs_buf_item_size_stale(bip); 176 ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL); 177 return 1; 178 } 179 180 ASSERT(bip->bli_flags & XFS_BLI_LOGGED); 181 nvecs = 1; 182 last_bit = xfs_next_bit(bip->bli_format.blf_data_map, 183 bip->bli_format.blf_map_size, 0); 184 ASSERT(last_bit != -1); 185 nvecs++; 186 while (last_bit != -1) { 187 /* 188 * This takes the bit number to start looking from and 189 * returns the next set bit from there. It returns -1 190 * if there are no more bits set or the start bit is 191 * beyond the end of the bitmap. 192 */ 193 next_bit = xfs_next_bit(bip->bli_format.blf_data_map, 194 bip->bli_format.blf_map_size, 195 last_bit + 1); 196 /* 197 * If we run out of bits, leave the loop, 198 * else if we find a new set of bits bump the number of vecs, 199 * else keep scanning the current set of bits. 200 */ 201 if (next_bit == -1) { 202 last_bit = -1; 203 } else if (next_bit != last_bit + 1) { 204 last_bit = next_bit; 205 nvecs++; 206 } else if (xfs_buf_offset(bp, next_bit * XFS_BLF_CHUNK) != 207 (xfs_buf_offset(bp, last_bit * XFS_BLF_CHUNK) + 208 XFS_BLF_CHUNK)) { 209 last_bit = next_bit; 210 nvecs++; 211 } else { 212 last_bit++; 213 } 214 } 215 216 trace_xfs_buf_item_size(bip); 217 return nvecs; 218 } 219 220 /* 221 * This is called to fill in the vector of log iovecs for the 222 * given log buf item. It fills the first entry with a buf log 223 * format structure, and the rest point to contiguous chunks 224 * within the buffer. 225 */ 226 STATIC void 227 xfs_buf_item_format( 228 struct xfs_log_item *lip, 229 struct xfs_log_iovec *vecp) 230 { 231 struct xfs_buf_log_item *bip = BUF_ITEM(lip); 232 struct xfs_buf *bp = bip->bli_buf; 233 uint base_size; 234 uint nvecs; 235 int first_bit; 236 int last_bit; 237 int next_bit; 238 uint nbits; 239 uint buffer_offset; 240 241 ASSERT(atomic_read(&bip->bli_refcount) > 0); 242 ASSERT((bip->bli_flags & XFS_BLI_LOGGED) || 243 (bip->bli_flags & XFS_BLI_STALE)); 244 245 /* 246 * The size of the base structure is the size of the 247 * declared structure plus the space for the extra words 248 * of the bitmap. We subtract one from the map size, because 249 * the first element of the bitmap is accounted for in the 250 * size of the base structure. 251 */ 252 base_size = 253 (uint)(sizeof(xfs_buf_log_format_t) + 254 ((bip->bli_format.blf_map_size - 1) * sizeof(uint))); 255 vecp->i_addr = &bip->bli_format; 256 vecp->i_len = base_size; 257 vecp->i_type = XLOG_REG_TYPE_BFORMAT; 258 vecp++; 259 nvecs = 1; 260 261 /* 262 * If it is an inode buffer, transfer the in-memory state to the 263 * format flags and clear the in-memory state. We do not transfer 264 * this state if the inode buffer allocation has not yet been committed 265 * to the log as setting the XFS_BLI_INODE_BUF flag will prevent 266 * correct replay of the inode allocation. 267 */ 268 if (bip->bli_flags & XFS_BLI_INODE_BUF) { 269 if (!((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) && 270 xfs_log_item_in_current_chkpt(lip))) 271 bip->bli_format.blf_flags |= XFS_BLF_INODE_BUF; 272 bip->bli_flags &= ~XFS_BLI_INODE_BUF; 273 } 274 275 if (bip->bli_flags & XFS_BLI_STALE) { 276 /* 277 * The buffer is stale, so all we need to log 278 * is the buf log format structure with the 279 * cancel flag in it. 280 */ 281 trace_xfs_buf_item_format_stale(bip); 282 ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL); 283 bip->bli_format.blf_size = nvecs; 284 return; 285 } 286 287 /* 288 * Fill in an iovec for each set of contiguous chunks. 289 */ 290 first_bit = xfs_next_bit(bip->bli_format.blf_data_map, 291 bip->bli_format.blf_map_size, 0); 292 ASSERT(first_bit != -1); 293 last_bit = first_bit; 294 nbits = 1; 295 for (;;) { 296 /* 297 * This takes the bit number to start looking from and 298 * returns the next set bit from there. It returns -1 299 * if there are no more bits set or the start bit is 300 * beyond the end of the bitmap. 301 */ 302 next_bit = xfs_next_bit(bip->bli_format.blf_data_map, 303 bip->bli_format.blf_map_size, 304 (uint)last_bit + 1); 305 /* 306 * If we run out of bits fill in the last iovec and get 307 * out of the loop. 308 * Else if we start a new set of bits then fill in the 309 * iovec for the series we were looking at and start 310 * counting the bits in the new one. 311 * Else we're still in the same set of bits so just 312 * keep counting and scanning. 313 */ 314 if (next_bit == -1) { 315 buffer_offset = first_bit * XFS_BLF_CHUNK; 316 vecp->i_addr = xfs_buf_offset(bp, buffer_offset); 317 vecp->i_len = nbits * XFS_BLF_CHUNK; 318 vecp->i_type = XLOG_REG_TYPE_BCHUNK; 319 nvecs++; 320 break; 321 } else if (next_bit != last_bit + 1) { 322 buffer_offset = first_bit * XFS_BLF_CHUNK; 323 vecp->i_addr = xfs_buf_offset(bp, buffer_offset); 324 vecp->i_len = nbits * XFS_BLF_CHUNK; 325 vecp->i_type = XLOG_REG_TYPE_BCHUNK; 326 nvecs++; 327 vecp++; 328 first_bit = next_bit; 329 last_bit = next_bit; 330 nbits = 1; 331 } else if (xfs_buf_offset(bp, next_bit << XFS_BLF_SHIFT) != 332 (xfs_buf_offset(bp, last_bit << XFS_BLF_SHIFT) + 333 XFS_BLF_CHUNK)) { 334 buffer_offset = first_bit * XFS_BLF_CHUNK; 335 vecp->i_addr = xfs_buf_offset(bp, buffer_offset); 336 vecp->i_len = nbits * XFS_BLF_CHUNK; 337 vecp->i_type = XLOG_REG_TYPE_BCHUNK; 338 /* You would think we need to bump the nvecs here too, but we do not 339 * this number is used by recovery, and it gets confused by the boundary 340 * split here 341 * nvecs++; 342 */ 343 vecp++; 344 first_bit = next_bit; 345 last_bit = next_bit; 346 nbits = 1; 347 } else { 348 last_bit++; 349 nbits++; 350 } 351 } 352 bip->bli_format.blf_size = nvecs; 353 354 /* 355 * Check to make sure everything is consistent. 356 */ 357 trace_xfs_buf_item_format(bip); 358 xfs_buf_item_log_check(bip); 359 } 360 361 /* 362 * This is called to pin the buffer associated with the buf log item in memory 363 * so it cannot be written out. 364 * 365 * We also always take a reference to the buffer log item here so that the bli 366 * is held while the item is pinned in memory. This means that we can 367 * unconditionally drop the reference count a transaction holds when the 368 * transaction is completed. 369 */ 370 STATIC void 371 xfs_buf_item_pin( 372 struct xfs_log_item *lip) 373 { 374 struct xfs_buf_log_item *bip = BUF_ITEM(lip); 375 376 ASSERT(XFS_BUF_ISBUSY(bip->bli_buf)); 377 ASSERT(atomic_read(&bip->bli_refcount) > 0); 378 ASSERT((bip->bli_flags & XFS_BLI_LOGGED) || 379 (bip->bli_flags & XFS_BLI_STALE)); 380 381 trace_xfs_buf_item_pin(bip); 382 383 atomic_inc(&bip->bli_refcount); 384 atomic_inc(&bip->bli_buf->b_pin_count); 385 } 386 387 /* 388 * This is called to unpin the buffer associated with the buf log 389 * item which was previously pinned with a call to xfs_buf_item_pin(). 390 * 391 * Also drop the reference to the buf item for the current transaction. 392 * If the XFS_BLI_STALE flag is set and we are the last reference, 393 * then free up the buf log item and unlock the buffer. 394 * 395 * If the remove flag is set we are called from uncommit in the 396 * forced-shutdown path. If that is true and the reference count on 397 * the log item is going to drop to zero we need to free the item's 398 * descriptor in the transaction. 399 */ 400 STATIC void 401 xfs_buf_item_unpin( 402 struct xfs_log_item *lip, 403 int remove) 404 { 405 struct xfs_buf_log_item *bip = BUF_ITEM(lip); 406 xfs_buf_t *bp = bip->bli_buf; 407 struct xfs_ail *ailp = lip->li_ailp; 408 int stale = bip->bli_flags & XFS_BLI_STALE; 409 int freed; 410 411 ASSERT(XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *) == bip); 412 ASSERT(atomic_read(&bip->bli_refcount) > 0); 413 414 trace_xfs_buf_item_unpin(bip); 415 416 freed = atomic_dec_and_test(&bip->bli_refcount); 417 418 if (atomic_dec_and_test(&bp->b_pin_count)) 419 wake_up_all(&bp->b_waiters); 420 421 if (freed && stale) { 422 ASSERT(bip->bli_flags & XFS_BLI_STALE); 423 ASSERT(XFS_BUF_VALUSEMA(bp) <= 0); 424 ASSERT(!(XFS_BUF_ISDELAYWRITE(bp))); 425 ASSERT(XFS_BUF_ISSTALE(bp)); 426 ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL); 427 428 trace_xfs_buf_item_unpin_stale(bip); 429 430 if (remove) { 431 /* 432 * If we are in a transaction context, we have to 433 * remove the log item from the transaction as we are 434 * about to release our reference to the buffer. If we 435 * don't, the unlock that occurs later in 436 * xfs_trans_uncommit() will try to reference the 437 * buffer which we no longer have a hold on. 438 */ 439 if (lip->li_desc) 440 xfs_trans_del_item(lip); 441 442 /* 443 * Since the transaction no longer refers to the buffer, 444 * the buffer should no longer refer to the transaction. 445 */ 446 XFS_BUF_SET_FSPRIVATE2(bp, NULL); 447 } 448 449 /* 450 * If we get called here because of an IO error, we may 451 * or may not have the item on the AIL. xfs_trans_ail_delete() 452 * will take care of that situation. 453 * xfs_trans_ail_delete() drops the AIL lock. 454 */ 455 if (bip->bli_flags & XFS_BLI_STALE_INODE) { 456 xfs_buf_do_callbacks(bp); 457 XFS_BUF_SET_FSPRIVATE(bp, NULL); 458 XFS_BUF_CLR_IODONE_FUNC(bp); 459 } else { 460 spin_lock(&ailp->xa_lock); 461 xfs_trans_ail_delete(ailp, (xfs_log_item_t *)bip); 462 xfs_buf_item_relse(bp); 463 ASSERT(XFS_BUF_FSPRIVATE(bp, void *) == NULL); 464 } 465 xfs_buf_relse(bp); 466 } 467 } 468 469 /* 470 * This is called to attempt to lock the buffer associated with this 471 * buf log item. Don't sleep on the buffer lock. If we can't get 472 * the lock right away, return 0. If we can get the lock, take a 473 * reference to the buffer. If this is a delayed write buffer that 474 * needs AIL help to be written back, invoke the pushbuf routine 475 * rather than the normal success path. 476 */ 477 STATIC uint 478 xfs_buf_item_trylock( 479 struct xfs_log_item *lip) 480 { 481 struct xfs_buf_log_item *bip = BUF_ITEM(lip); 482 struct xfs_buf *bp = bip->bli_buf; 483 484 if (XFS_BUF_ISPINNED(bp)) 485 return XFS_ITEM_PINNED; 486 if (!XFS_BUF_CPSEMA(bp)) 487 return XFS_ITEM_LOCKED; 488 489 /* take a reference to the buffer. */ 490 XFS_BUF_HOLD(bp); 491 492 ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); 493 trace_xfs_buf_item_trylock(bip); 494 if (XFS_BUF_ISDELAYWRITE(bp)) 495 return XFS_ITEM_PUSHBUF; 496 return XFS_ITEM_SUCCESS; 497 } 498 499 /* 500 * Release the buffer associated with the buf log item. If there is no dirty 501 * logged data associated with the buffer recorded in the buf log item, then 502 * free the buf log item and remove the reference to it in the buffer. 503 * 504 * This call ignores the recursion count. It is only called when the buffer 505 * should REALLY be unlocked, regardless of the recursion count. 506 * 507 * We unconditionally drop the transaction's reference to the log item. If the 508 * item was logged, then another reference was taken when it was pinned, so we 509 * can safely drop the transaction reference now. This also allows us to avoid 510 * potential races with the unpin code freeing the bli by not referencing the 511 * bli after we've dropped the reference count. 512 * 513 * If the XFS_BLI_HOLD flag is set in the buf log item, then free the log item 514 * if necessary but do not unlock the buffer. This is for support of 515 * xfs_trans_bhold(). Make sure the XFS_BLI_HOLD field is cleared if we don't 516 * free the item. 517 */ 518 STATIC void 519 xfs_buf_item_unlock( 520 struct xfs_log_item *lip) 521 { 522 struct xfs_buf_log_item *bip = BUF_ITEM(lip); 523 struct xfs_buf *bp = bip->bli_buf; 524 int aborted; 525 uint hold; 526 527 /* Clear the buffer's association with this transaction. */ 528 XFS_BUF_SET_FSPRIVATE2(bp, NULL); 529 530 /* 531 * If this is a transaction abort, don't return early. Instead, allow 532 * the brelse to happen. Normally it would be done for stale 533 * (cancelled) buffers at unpin time, but we'll never go through the 534 * pin/unpin cycle if we abort inside commit. 535 */ 536 aborted = (lip->li_flags & XFS_LI_ABORTED) != 0; 537 538 /* 539 * Before possibly freeing the buf item, determine if we should 540 * release the buffer at the end of this routine. 541 */ 542 hold = bip->bli_flags & XFS_BLI_HOLD; 543 544 /* Clear the per transaction state. */ 545 bip->bli_flags &= ~(XFS_BLI_LOGGED | XFS_BLI_HOLD); 546 547 /* 548 * If the buf item is marked stale, then don't do anything. We'll 549 * unlock the buffer and free the buf item when the buffer is unpinned 550 * for the last time. 551 */ 552 if (bip->bli_flags & XFS_BLI_STALE) { 553 trace_xfs_buf_item_unlock_stale(bip); 554 ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL); 555 if (!aborted) { 556 atomic_dec(&bip->bli_refcount); 557 return; 558 } 559 } 560 561 trace_xfs_buf_item_unlock(bip); 562 563 /* 564 * If the buf item isn't tracking any data, free it, otherwise drop the 565 * reference we hold to it. 566 */ 567 if (xfs_bitmap_empty(bip->bli_format.blf_data_map, 568 bip->bli_format.blf_map_size)) 569 xfs_buf_item_relse(bp); 570 else 571 atomic_dec(&bip->bli_refcount); 572 573 if (!hold) 574 xfs_buf_relse(bp); 575 } 576 577 /* 578 * This is called to find out where the oldest active copy of the 579 * buf log item in the on disk log resides now that the last log 580 * write of it completed at the given lsn. 581 * We always re-log all the dirty data in a buffer, so usually the 582 * latest copy in the on disk log is the only one that matters. For 583 * those cases we simply return the given lsn. 584 * 585 * The one exception to this is for buffers full of newly allocated 586 * inodes. These buffers are only relogged with the XFS_BLI_INODE_BUF 587 * flag set, indicating that only the di_next_unlinked fields from the 588 * inodes in the buffers will be replayed during recovery. If the 589 * original newly allocated inode images have not yet been flushed 590 * when the buffer is so relogged, then we need to make sure that we 591 * keep the old images in the 'active' portion of the log. We do this 592 * by returning the original lsn of that transaction here rather than 593 * the current one. 594 */ 595 STATIC xfs_lsn_t 596 xfs_buf_item_committed( 597 struct xfs_log_item *lip, 598 xfs_lsn_t lsn) 599 { 600 struct xfs_buf_log_item *bip = BUF_ITEM(lip); 601 602 trace_xfs_buf_item_committed(bip); 603 604 if ((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) && lip->li_lsn != 0) 605 return lip->li_lsn; 606 return lsn; 607 } 608 609 /* 610 * The buffer is locked, but is not a delayed write buffer. This happens 611 * if we race with IO completion and hence we don't want to try to write it 612 * again. Just release the buffer. 613 */ 614 STATIC void 615 xfs_buf_item_push( 616 struct xfs_log_item *lip) 617 { 618 struct xfs_buf_log_item *bip = BUF_ITEM(lip); 619 struct xfs_buf *bp = bip->bli_buf; 620 621 ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); 622 ASSERT(!XFS_BUF_ISDELAYWRITE(bp)); 623 624 trace_xfs_buf_item_push(bip); 625 626 xfs_buf_relse(bp); 627 } 628 629 /* 630 * The buffer is locked and is a delayed write buffer. Promote the buffer 631 * in the delayed write queue as the caller knows that they must invoke 632 * the xfsbufd to get this buffer written. We have to unlock the buffer 633 * to allow the xfsbufd to write it, too. 634 */ 635 STATIC void 636 xfs_buf_item_pushbuf( 637 struct xfs_log_item *lip) 638 { 639 struct xfs_buf_log_item *bip = BUF_ITEM(lip); 640 struct xfs_buf *bp = bip->bli_buf; 641 642 ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); 643 ASSERT(XFS_BUF_ISDELAYWRITE(bp)); 644 645 trace_xfs_buf_item_pushbuf(bip); 646 647 xfs_buf_delwri_promote(bp); 648 xfs_buf_relse(bp); 649 } 650 651 STATIC void 652 xfs_buf_item_committing( 653 struct xfs_log_item *lip, 654 xfs_lsn_t commit_lsn) 655 { 656 } 657 658 /* 659 * This is the ops vector shared by all buf log items. 660 */ 661 static struct xfs_item_ops xfs_buf_item_ops = { 662 .iop_size = xfs_buf_item_size, 663 .iop_format = xfs_buf_item_format, 664 .iop_pin = xfs_buf_item_pin, 665 .iop_unpin = xfs_buf_item_unpin, 666 .iop_trylock = xfs_buf_item_trylock, 667 .iop_unlock = xfs_buf_item_unlock, 668 .iop_committed = xfs_buf_item_committed, 669 .iop_push = xfs_buf_item_push, 670 .iop_pushbuf = xfs_buf_item_pushbuf, 671 .iop_committing = xfs_buf_item_committing 672 }; 673 674 675 /* 676 * Allocate a new buf log item to go with the given buffer. 677 * Set the buffer's b_fsprivate field to point to the new 678 * buf log item. If there are other item's attached to the 679 * buffer (see xfs_buf_attach_iodone() below), then put the 680 * buf log item at the front. 681 */ 682 void 683 xfs_buf_item_init( 684 xfs_buf_t *bp, 685 xfs_mount_t *mp) 686 { 687 xfs_log_item_t *lip; 688 xfs_buf_log_item_t *bip; 689 int chunks; 690 int map_size; 691 692 /* 693 * Check to see if there is already a buf log item for 694 * this buffer. If there is, it is guaranteed to be 695 * the first. If we do already have one, there is 696 * nothing to do here so return. 697 */ 698 ASSERT(bp->b_target->bt_mount == mp); 699 if (XFS_BUF_FSPRIVATE(bp, void *) != NULL) { 700 lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *); 701 if (lip->li_type == XFS_LI_BUF) { 702 return; 703 } 704 } 705 706 /* 707 * chunks is the number of XFS_BLF_CHUNK size pieces 708 * the buffer can be divided into. Make sure not to 709 * truncate any pieces. map_size is the size of the 710 * bitmap needed to describe the chunks of the buffer. 711 */ 712 chunks = (int)((XFS_BUF_COUNT(bp) + (XFS_BLF_CHUNK - 1)) >> XFS_BLF_SHIFT); 713 map_size = (int)((chunks + NBWORD) >> BIT_TO_WORD_SHIFT); 714 715 bip = (xfs_buf_log_item_t*)kmem_zone_zalloc(xfs_buf_item_zone, 716 KM_SLEEP); 717 xfs_log_item_init(mp, &bip->bli_item, XFS_LI_BUF, &xfs_buf_item_ops); 718 bip->bli_buf = bp; 719 xfs_buf_hold(bp); 720 bip->bli_format.blf_type = XFS_LI_BUF; 721 bip->bli_format.blf_blkno = (__int64_t)XFS_BUF_ADDR(bp); 722 bip->bli_format.blf_len = (ushort)BTOBB(XFS_BUF_COUNT(bp)); 723 bip->bli_format.blf_map_size = map_size; 724 725 #ifdef XFS_TRANS_DEBUG 726 /* 727 * Allocate the arrays for tracking what needs to be logged 728 * and what our callers request to be logged. bli_orig 729 * holds a copy of the original, clean buffer for comparison 730 * against, and bli_logged keeps a 1 bit flag per byte in 731 * the buffer to indicate which bytes the callers have asked 732 * to have logged. 733 */ 734 bip->bli_orig = (char *)kmem_alloc(XFS_BUF_COUNT(bp), KM_SLEEP); 735 memcpy(bip->bli_orig, XFS_BUF_PTR(bp), XFS_BUF_COUNT(bp)); 736 bip->bli_logged = (char *)kmem_zalloc(XFS_BUF_COUNT(bp) / NBBY, KM_SLEEP); 737 #endif 738 739 /* 740 * Put the buf item into the list of items attached to the 741 * buffer at the front. 742 */ 743 if (XFS_BUF_FSPRIVATE(bp, void *) != NULL) { 744 bip->bli_item.li_bio_list = 745 XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *); 746 } 747 XFS_BUF_SET_FSPRIVATE(bp, bip); 748 } 749 750 751 /* 752 * Mark bytes first through last inclusive as dirty in the buf 753 * item's bitmap. 754 */ 755 void 756 xfs_buf_item_log( 757 xfs_buf_log_item_t *bip, 758 uint first, 759 uint last) 760 { 761 uint first_bit; 762 uint last_bit; 763 uint bits_to_set; 764 uint bits_set; 765 uint word_num; 766 uint *wordp; 767 uint bit; 768 uint end_bit; 769 uint mask; 770 771 /* 772 * Mark the item as having some dirty data for 773 * quick reference in xfs_buf_item_dirty. 774 */ 775 bip->bli_flags |= XFS_BLI_DIRTY; 776 777 /* 778 * Convert byte offsets to bit numbers. 779 */ 780 first_bit = first >> XFS_BLF_SHIFT; 781 last_bit = last >> XFS_BLF_SHIFT; 782 783 /* 784 * Calculate the total number of bits to be set. 785 */ 786 bits_to_set = last_bit - first_bit + 1; 787 788 /* 789 * Get a pointer to the first word in the bitmap 790 * to set a bit in. 791 */ 792 word_num = first_bit >> BIT_TO_WORD_SHIFT; 793 wordp = &(bip->bli_format.blf_data_map[word_num]); 794 795 /* 796 * Calculate the starting bit in the first word. 797 */ 798 bit = first_bit & (uint)(NBWORD - 1); 799 800 /* 801 * First set any bits in the first word of our range. 802 * If it starts at bit 0 of the word, it will be 803 * set below rather than here. That is what the variable 804 * bit tells us. The variable bits_set tracks the number 805 * of bits that have been set so far. End_bit is the number 806 * of the last bit to be set in this word plus one. 807 */ 808 if (bit) { 809 end_bit = MIN(bit + bits_to_set, (uint)NBWORD); 810 mask = ((1 << (end_bit - bit)) - 1) << bit; 811 *wordp |= mask; 812 wordp++; 813 bits_set = end_bit - bit; 814 } else { 815 bits_set = 0; 816 } 817 818 /* 819 * Now set bits a whole word at a time that are between 820 * first_bit and last_bit. 821 */ 822 while ((bits_to_set - bits_set) >= NBWORD) { 823 *wordp |= 0xffffffff; 824 bits_set += NBWORD; 825 wordp++; 826 } 827 828 /* 829 * Finally, set any bits left to be set in one last partial word. 830 */ 831 end_bit = bits_to_set - bits_set; 832 if (end_bit) { 833 mask = (1 << end_bit) - 1; 834 *wordp |= mask; 835 } 836 837 xfs_buf_item_log_debug(bip, first, last); 838 } 839 840 841 /* 842 * Return 1 if the buffer has some data that has been logged (at any 843 * point, not just the current transaction) and 0 if not. 844 */ 845 uint 846 xfs_buf_item_dirty( 847 xfs_buf_log_item_t *bip) 848 { 849 return (bip->bli_flags & XFS_BLI_DIRTY); 850 } 851 852 STATIC void 853 xfs_buf_item_free( 854 xfs_buf_log_item_t *bip) 855 { 856 #ifdef XFS_TRANS_DEBUG 857 kmem_free(bip->bli_orig); 858 kmem_free(bip->bli_logged); 859 #endif /* XFS_TRANS_DEBUG */ 860 861 kmem_zone_free(xfs_buf_item_zone, bip); 862 } 863 864 /* 865 * This is called when the buf log item is no longer needed. It should 866 * free the buf log item associated with the given buffer and clear 867 * the buffer's pointer to the buf log item. If there are no more 868 * items in the list, clear the b_iodone field of the buffer (see 869 * xfs_buf_attach_iodone() below). 870 */ 871 void 872 xfs_buf_item_relse( 873 xfs_buf_t *bp) 874 { 875 xfs_buf_log_item_t *bip; 876 877 trace_xfs_buf_item_relse(bp, _RET_IP_); 878 879 bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t*); 880 XFS_BUF_SET_FSPRIVATE(bp, bip->bli_item.li_bio_list); 881 if ((XFS_BUF_FSPRIVATE(bp, void *) == NULL) && 882 (XFS_BUF_IODONE_FUNC(bp) != NULL)) { 883 XFS_BUF_CLR_IODONE_FUNC(bp); 884 } 885 xfs_buf_rele(bp); 886 xfs_buf_item_free(bip); 887 } 888 889 890 /* 891 * Add the given log item with its callback to the list of callbacks 892 * to be called when the buffer's I/O completes. If it is not set 893 * already, set the buffer's b_iodone() routine to be 894 * xfs_buf_iodone_callbacks() and link the log item into the list of 895 * items rooted at b_fsprivate. Items are always added as the second 896 * entry in the list if there is a first, because the buf item code 897 * assumes that the buf log item is first. 898 */ 899 void 900 xfs_buf_attach_iodone( 901 xfs_buf_t *bp, 902 void (*cb)(xfs_buf_t *, xfs_log_item_t *), 903 xfs_log_item_t *lip) 904 { 905 xfs_log_item_t *head_lip; 906 907 ASSERT(XFS_BUF_ISBUSY(bp)); 908 ASSERT(XFS_BUF_VALUSEMA(bp) <= 0); 909 910 lip->li_cb = cb; 911 if (XFS_BUF_FSPRIVATE(bp, void *) != NULL) { 912 head_lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *); 913 lip->li_bio_list = head_lip->li_bio_list; 914 head_lip->li_bio_list = lip; 915 } else { 916 XFS_BUF_SET_FSPRIVATE(bp, lip); 917 } 918 919 ASSERT((XFS_BUF_IODONE_FUNC(bp) == xfs_buf_iodone_callbacks) || 920 (XFS_BUF_IODONE_FUNC(bp) == NULL)); 921 XFS_BUF_SET_IODONE_FUNC(bp, xfs_buf_iodone_callbacks); 922 } 923 924 /* 925 * We can have many callbacks on a buffer. Running the callbacks individually 926 * can cause a lot of contention on the AIL lock, so we allow for a single 927 * callback to be able to scan the remaining lip->li_bio_list for other items 928 * of the same type and callback to be processed in the first call. 929 * 930 * As a result, the loop walking the callback list below will also modify the 931 * list. it removes the first item from the list and then runs the callback. 932 * The loop then restarts from the new head of the list. This allows the 933 * callback to scan and modify the list attached to the buffer and we don't 934 * have to care about maintaining a next item pointer. 935 */ 936 STATIC void 937 xfs_buf_do_callbacks( 938 struct xfs_buf *bp) 939 { 940 struct xfs_log_item *lip; 941 942 while ((lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *)) != NULL) { 943 XFS_BUF_SET_FSPRIVATE(bp, lip->li_bio_list); 944 ASSERT(lip->li_cb != NULL); 945 /* 946 * Clear the next pointer so we don't have any 947 * confusion if the item is added to another buf. 948 * Don't touch the log item after calling its 949 * callback, because it could have freed itself. 950 */ 951 lip->li_bio_list = NULL; 952 lip->li_cb(bp, lip); 953 } 954 } 955 956 /* 957 * This is the iodone() function for buffers which have had callbacks 958 * attached to them by xfs_buf_attach_iodone(). It should remove each 959 * log item from the buffer's list and call the callback of each in turn. 960 * When done, the buffer's fsprivate field is set to NULL and the buffer 961 * is unlocked with a call to iodone(). 962 */ 963 void 964 xfs_buf_iodone_callbacks( 965 struct xfs_buf *bp) 966 { 967 struct xfs_log_item *lip = bp->b_fspriv; 968 struct xfs_mount *mp = lip->li_mountp; 969 static ulong lasttime; 970 static xfs_buftarg_t *lasttarg; 971 972 if (likely(!XFS_BUF_GETERROR(bp))) 973 goto do_callbacks; 974 975 /* 976 * If we've already decided to shutdown the filesystem because of 977 * I/O errors, there's no point in giving this a retry. 978 */ 979 if (XFS_FORCED_SHUTDOWN(mp)) { 980 XFS_BUF_SUPER_STALE(bp); 981 trace_xfs_buf_item_iodone(bp, _RET_IP_); 982 goto do_callbacks; 983 } 984 985 if (XFS_BUF_TARGET(bp) != lasttarg || 986 time_after(jiffies, (lasttime + 5*HZ))) { 987 lasttime = jiffies; 988 xfs_alert(mp, "Device %s: metadata write error block 0x%llx", 989 XFS_BUFTARG_NAME(XFS_BUF_TARGET(bp)), 990 (__uint64_t)XFS_BUF_ADDR(bp)); 991 } 992 lasttarg = XFS_BUF_TARGET(bp); 993 994 /* 995 * If the write was asynchronous then noone will be looking for the 996 * error. Clear the error state and write the buffer out again. 997 * 998 * During sync or umount we'll write all pending buffers again 999 * synchronous, which will catch these errors if they keep hanging 1000 * around. 1001 */ 1002 if (XFS_BUF_ISASYNC(bp)) { 1003 XFS_BUF_ERROR(bp, 0); /* errno of 0 unsets the flag */ 1004 1005 if (!XFS_BUF_ISSTALE(bp)) { 1006 XFS_BUF_DELAYWRITE(bp); 1007 XFS_BUF_DONE(bp); 1008 XFS_BUF_SET_START(bp); 1009 } 1010 ASSERT(XFS_BUF_IODONE_FUNC(bp)); 1011 trace_xfs_buf_item_iodone_async(bp, _RET_IP_); 1012 xfs_buf_relse(bp); 1013 return; 1014 } 1015 1016 /* 1017 * If the write of the buffer was synchronous, we want to make 1018 * sure to return the error to the caller of xfs_bwrite(). 1019 */ 1020 XFS_BUF_STALE(bp); 1021 XFS_BUF_DONE(bp); 1022 XFS_BUF_UNDELAYWRITE(bp); 1023 1024 trace_xfs_buf_error_relse(bp, _RET_IP_); 1025 xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR); 1026 1027 do_callbacks: 1028 xfs_buf_do_callbacks(bp); 1029 XFS_BUF_SET_FSPRIVATE(bp, NULL); 1030 XFS_BUF_CLR_IODONE_FUNC(bp); 1031 xfs_buf_ioend(bp, 0); 1032 } 1033 1034 /* 1035 * This is the iodone() function for buffers which have been 1036 * logged. It is called when they are eventually flushed out. 1037 * It should remove the buf item from the AIL, and free the buf item. 1038 * It is called by xfs_buf_iodone_callbacks() above which will take 1039 * care of cleaning up the buffer itself. 1040 */ 1041 void 1042 xfs_buf_iodone( 1043 struct xfs_buf *bp, 1044 struct xfs_log_item *lip) 1045 { 1046 struct xfs_ail *ailp = lip->li_ailp; 1047 1048 ASSERT(BUF_ITEM(lip)->bli_buf == bp); 1049 1050 xfs_buf_rele(bp); 1051 1052 /* 1053 * If we are forcibly shutting down, this may well be 1054 * off the AIL already. That's because we simulate the 1055 * log-committed callbacks to unpin these buffers. Or we may never 1056 * have put this item on AIL because of the transaction was 1057 * aborted forcibly. xfs_trans_ail_delete() takes care of these. 1058 * 1059 * Either way, AIL is useless if we're forcing a shutdown. 1060 */ 1061 spin_lock(&ailp->xa_lock); 1062 xfs_trans_ail_delete(ailp, lip); 1063 xfs_buf_item_free(BUF_ITEM(lip)); 1064 } 1065