1 /* 2 * Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms of version 2 of the GNU General Public License as 6 * published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it would be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. 11 * 12 * Further, this software is distributed without any warranty that it is 13 * free of the rightful claim of any third person regarding infringement 14 * or the like. Any license provided herein, whether implied or 15 * otherwise, applies only to this software file. Patent licenses, if 16 * any, provided herein do not apply to combinations of this program with 17 * other software, or any other product whatsoever. 18 * 19 * You should have received a copy of the GNU General Public License along 20 * with this program; if not, write the Free Software Foundation, Inc., 59 21 * Temple Place - Suite 330, Boston MA 02111-1307, USA. 22 * 23 * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, 24 * Mountain View, CA 94043, or: 25 * 26 * http://www.sgi.com 27 * 28 * For further information regarding this notice, see: 29 * 30 * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ 31 */ 32 33 /* 34 * This file contains the implementation of the xfs_buf_log_item. 35 * It contains the item operations used to manipulate the buf log 36 * items as well as utility routines used by the buffer specific 37 * transaction routines. 38 */ 39 40 #include "xfs.h" 41 42 #include "xfs_macros.h" 43 #include "xfs_types.h" 44 #include "xfs_inum.h" 45 #include "xfs_log.h" 46 #include "xfs_trans.h" 47 #include "xfs_buf_item.h" 48 #include "xfs_sb.h" 49 #include "xfs_dir.h" 50 #include "xfs_dmapi.h" 51 #include "xfs_mount.h" 52 #include "xfs_trans_priv.h" 53 #include "xfs_rw.h" 54 #include "xfs_bit.h" 55 #include "xfs_error.h" 56 57 58 kmem_zone_t *xfs_buf_item_zone; 59 60 #ifdef XFS_TRANS_DEBUG 61 /* 62 * This function uses an alternate strategy for tracking the bytes 63 * that the user requests to be logged. This can then be used 64 * in conjunction with the bli_orig array in the buf log item to 65 * catch bugs in our callers' code. 66 * 67 * We also double check the bits set in xfs_buf_item_log using a 68 * simple algorithm to check that every byte is accounted for. 69 */ 70 STATIC void 71 xfs_buf_item_log_debug( 72 xfs_buf_log_item_t *bip, 73 uint first, 74 uint last) 75 { 76 uint x; 77 uint byte; 78 uint nbytes; 79 uint chunk_num; 80 uint word_num; 81 uint bit_num; 82 uint bit_set; 83 uint *wordp; 84 85 ASSERT(bip->bli_logged != NULL); 86 byte = first; 87 nbytes = last - first + 1; 88 bfset(bip->bli_logged, first, nbytes); 89 for (x = 0; x < nbytes; x++) { 90 chunk_num = byte >> XFS_BLI_SHIFT; 91 word_num = chunk_num >> BIT_TO_WORD_SHIFT; 92 bit_num = chunk_num & (NBWORD - 1); 93 wordp = &(bip->bli_format.blf_data_map[word_num]); 94 bit_set = *wordp & (1 << bit_num); 95 ASSERT(bit_set); 96 byte++; 97 } 98 } 99 100 /* 101 * This function is called when we flush something into a buffer without 102 * logging it. This happens for things like inodes which are logged 103 * separately from the buffer. 104 */ 105 void 106 xfs_buf_item_flush_log_debug( 107 xfs_buf_t *bp, 108 uint first, 109 uint last) 110 { 111 xfs_buf_log_item_t *bip; 112 uint nbytes; 113 114 bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t*); 115 if ((bip == NULL) || (bip->bli_item.li_type != XFS_LI_BUF)) { 116 return; 117 } 118 119 ASSERT(bip->bli_logged != NULL); 120 nbytes = last - first + 1; 121 bfset(bip->bli_logged, first, nbytes); 122 } 123 124 /* 125 * This function is called to verify that our caller's have logged 126 * all the bytes that they changed. 127 * 128 * It does this by comparing the original copy of the buffer stored in 129 * the buf log item's bli_orig array to the current copy of the buffer 130 * and ensuring that all bytes which miscompare are set in the bli_logged 131 * array of the buf log item. 132 */ 133 STATIC void 134 xfs_buf_item_log_check( 135 xfs_buf_log_item_t *bip) 136 { 137 char *orig; 138 char *buffer; 139 int x; 140 xfs_buf_t *bp; 141 142 ASSERT(bip->bli_orig != NULL); 143 ASSERT(bip->bli_logged != NULL); 144 145 bp = bip->bli_buf; 146 ASSERT(XFS_BUF_COUNT(bp) > 0); 147 ASSERT(XFS_BUF_PTR(bp) != NULL); 148 orig = bip->bli_orig; 149 buffer = XFS_BUF_PTR(bp); 150 for (x = 0; x < XFS_BUF_COUNT(bp); x++) { 151 if (orig[x] != buffer[x] && !btst(bip->bli_logged, x)) 152 cmn_err(CE_PANIC, 153 "xfs_buf_item_log_check bip %x buffer %x orig %x index %d", 154 bip, bp, orig, x); 155 } 156 } 157 #else 158 #define xfs_buf_item_log_debug(x,y,z) 159 #define xfs_buf_item_log_check(x) 160 #endif 161 162 STATIC void xfs_buf_error_relse(xfs_buf_t *bp); 163 STATIC void xfs_buf_do_callbacks(xfs_buf_t *bp, xfs_log_item_t *lip); 164 165 /* 166 * This returns the number of log iovecs needed to log the 167 * given buf log item. 168 * 169 * It calculates this as 1 iovec for the buf log format structure 170 * and 1 for each stretch of non-contiguous chunks to be logged. 171 * Contiguous chunks are logged in a single iovec. 172 * 173 * If the XFS_BLI_STALE flag has been set, then log nothing. 174 */ 175 uint 176 xfs_buf_item_size( 177 xfs_buf_log_item_t *bip) 178 { 179 uint nvecs; 180 int next_bit; 181 int last_bit; 182 xfs_buf_t *bp; 183 184 ASSERT(atomic_read(&bip->bli_refcount) > 0); 185 if (bip->bli_flags & XFS_BLI_STALE) { 186 /* 187 * The buffer is stale, so all we need to log 188 * is the buf log format structure with the 189 * cancel flag in it. 190 */ 191 xfs_buf_item_trace("SIZE STALE", bip); 192 ASSERT(bip->bli_format.blf_flags & XFS_BLI_CANCEL); 193 return 1; 194 } 195 196 bp = bip->bli_buf; 197 ASSERT(bip->bli_flags & XFS_BLI_LOGGED); 198 nvecs = 1; 199 last_bit = xfs_next_bit(bip->bli_format.blf_data_map, 200 bip->bli_format.blf_map_size, 0); 201 ASSERT(last_bit != -1); 202 nvecs++; 203 while (last_bit != -1) { 204 /* 205 * This takes the bit number to start looking from and 206 * returns the next set bit from there. It returns -1 207 * if there are no more bits set or the start bit is 208 * beyond the end of the bitmap. 209 */ 210 next_bit = xfs_next_bit(bip->bli_format.blf_data_map, 211 bip->bli_format.blf_map_size, 212 last_bit + 1); 213 /* 214 * If we run out of bits, leave the loop, 215 * else if we find a new set of bits bump the number of vecs, 216 * else keep scanning the current set of bits. 217 */ 218 if (next_bit == -1) { 219 last_bit = -1; 220 } else if (next_bit != last_bit + 1) { 221 last_bit = next_bit; 222 nvecs++; 223 } else if (xfs_buf_offset(bp, next_bit * XFS_BLI_CHUNK) != 224 (xfs_buf_offset(bp, last_bit * XFS_BLI_CHUNK) + 225 XFS_BLI_CHUNK)) { 226 last_bit = next_bit; 227 nvecs++; 228 } else { 229 last_bit++; 230 } 231 } 232 233 xfs_buf_item_trace("SIZE NORM", bip); 234 return nvecs; 235 } 236 237 /* 238 * This is called to fill in the vector of log iovecs for the 239 * given log buf item. It fills the first entry with a buf log 240 * format structure, and the rest point to contiguous chunks 241 * within the buffer. 242 */ 243 void 244 xfs_buf_item_format( 245 xfs_buf_log_item_t *bip, 246 xfs_log_iovec_t *log_vector) 247 { 248 uint base_size; 249 uint nvecs; 250 xfs_log_iovec_t *vecp; 251 xfs_buf_t *bp; 252 int first_bit; 253 int last_bit; 254 int next_bit; 255 uint nbits; 256 uint buffer_offset; 257 258 ASSERT(atomic_read(&bip->bli_refcount) > 0); 259 ASSERT((bip->bli_flags & XFS_BLI_LOGGED) || 260 (bip->bli_flags & XFS_BLI_STALE)); 261 bp = bip->bli_buf; 262 ASSERT(XFS_BUF_BP_ISMAPPED(bp)); 263 vecp = log_vector; 264 265 /* 266 * The size of the base structure is the size of the 267 * declared structure plus the space for the extra words 268 * of the bitmap. We subtract one from the map size, because 269 * the first element of the bitmap is accounted for in the 270 * size of the base structure. 271 */ 272 base_size = 273 (uint)(sizeof(xfs_buf_log_format_t) + 274 ((bip->bli_format.blf_map_size - 1) * sizeof(uint))); 275 vecp->i_addr = (xfs_caddr_t)&bip->bli_format; 276 vecp->i_len = base_size; 277 vecp++; 278 nvecs = 1; 279 280 if (bip->bli_flags & XFS_BLI_STALE) { 281 /* 282 * The buffer is stale, so all we need to log 283 * is the buf log format structure with the 284 * cancel flag in it. 285 */ 286 xfs_buf_item_trace("FORMAT STALE", bip); 287 ASSERT(bip->bli_format.blf_flags & XFS_BLI_CANCEL); 288 bip->bli_format.blf_size = nvecs; 289 return; 290 } 291 292 /* 293 * Fill in an iovec for each set of contiguous chunks. 294 */ 295 first_bit = xfs_next_bit(bip->bli_format.blf_data_map, 296 bip->bli_format.blf_map_size, 0); 297 ASSERT(first_bit != -1); 298 last_bit = first_bit; 299 nbits = 1; 300 for (;;) { 301 /* 302 * This takes the bit number to start looking from and 303 * returns the next set bit from there. It returns -1 304 * if there are no more bits set or the start bit is 305 * beyond the end of the bitmap. 306 */ 307 next_bit = xfs_next_bit(bip->bli_format.blf_data_map, 308 bip->bli_format.blf_map_size, 309 (uint)last_bit + 1); 310 /* 311 * If we run out of bits fill in the last iovec and get 312 * out of the loop. 313 * Else if we start a new set of bits then fill in the 314 * iovec for the series we were looking at and start 315 * counting the bits in the new one. 316 * Else we're still in the same set of bits so just 317 * keep counting and scanning. 318 */ 319 if (next_bit == -1) { 320 buffer_offset = first_bit * XFS_BLI_CHUNK; 321 vecp->i_addr = xfs_buf_offset(bp, buffer_offset); 322 vecp->i_len = nbits * XFS_BLI_CHUNK; 323 nvecs++; 324 break; 325 } else if (next_bit != last_bit + 1) { 326 buffer_offset = first_bit * XFS_BLI_CHUNK; 327 vecp->i_addr = xfs_buf_offset(bp, buffer_offset); 328 vecp->i_len = nbits * XFS_BLI_CHUNK; 329 nvecs++; 330 vecp++; 331 first_bit = next_bit; 332 last_bit = next_bit; 333 nbits = 1; 334 } else if (xfs_buf_offset(bp, next_bit << XFS_BLI_SHIFT) != 335 (xfs_buf_offset(bp, last_bit << XFS_BLI_SHIFT) + 336 XFS_BLI_CHUNK)) { 337 buffer_offset = first_bit * XFS_BLI_CHUNK; 338 vecp->i_addr = xfs_buf_offset(bp, buffer_offset); 339 vecp->i_len = nbits * XFS_BLI_CHUNK; 340 /* You would think we need to bump the nvecs here too, but we do not 341 * this number is used by recovery, and it gets confused by the boundary 342 * split here 343 * nvecs++; 344 */ 345 vecp++; 346 first_bit = next_bit; 347 last_bit = next_bit; 348 nbits = 1; 349 } else { 350 last_bit++; 351 nbits++; 352 } 353 } 354 bip->bli_format.blf_size = nvecs; 355 356 /* 357 * Check to make sure everything is consistent. 358 */ 359 xfs_buf_item_trace("FORMAT NORM", bip); 360 xfs_buf_item_log_check(bip); 361 } 362 363 /* 364 * This is called to pin the buffer associated with the buf log 365 * item in memory so it cannot be written out. Simply call bpin() 366 * on the buffer to do this. 367 */ 368 void 369 xfs_buf_item_pin( 370 xfs_buf_log_item_t *bip) 371 { 372 xfs_buf_t *bp; 373 374 bp = bip->bli_buf; 375 ASSERT(XFS_BUF_ISBUSY(bp)); 376 ASSERT(atomic_read(&bip->bli_refcount) > 0); 377 ASSERT((bip->bli_flags & XFS_BLI_LOGGED) || 378 (bip->bli_flags & XFS_BLI_STALE)); 379 xfs_buf_item_trace("PIN", bip); 380 xfs_buftrace("XFS_PIN", bp); 381 xfs_bpin(bp); 382 } 383 384 385 /* 386 * This is called to unpin the buffer associated with the buf log 387 * item which was previously pinned with a call to xfs_buf_item_pin(). 388 * Just call bunpin() on the buffer to do this. 389 * 390 * Also drop the reference to the buf item for the current transaction. 391 * If the XFS_BLI_STALE flag is set and we are the last reference, 392 * then free up the buf log item and unlock the buffer. 393 */ 394 void 395 xfs_buf_item_unpin( 396 xfs_buf_log_item_t *bip, 397 int stale) 398 { 399 xfs_mount_t *mp; 400 xfs_buf_t *bp; 401 int freed; 402 SPLDECL(s); 403 404 bp = bip->bli_buf; 405 ASSERT(bp != NULL); 406 ASSERT(XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *) == bip); 407 ASSERT(atomic_read(&bip->bli_refcount) > 0); 408 xfs_buf_item_trace("UNPIN", bip); 409 xfs_buftrace("XFS_UNPIN", bp); 410 411 freed = atomic_dec_and_test(&bip->bli_refcount); 412 mp = bip->bli_item.li_mountp; 413 xfs_bunpin(bp); 414 if (freed && stale) { 415 ASSERT(bip->bli_flags & XFS_BLI_STALE); 416 ASSERT(XFS_BUF_VALUSEMA(bp) <= 0); 417 ASSERT(!(XFS_BUF_ISDELAYWRITE(bp))); 418 ASSERT(XFS_BUF_ISSTALE(bp)); 419 ASSERT(bip->bli_format.blf_flags & XFS_BLI_CANCEL); 420 xfs_buf_item_trace("UNPIN STALE", bip); 421 xfs_buftrace("XFS_UNPIN STALE", bp); 422 /* 423 * If we get called here because of an IO error, we may 424 * or may not have the item on the AIL. xfs_trans_delete_ail() 425 * will take care of that situation. 426 * xfs_trans_delete_ail() drops the AIL lock. 427 */ 428 if (bip->bli_flags & XFS_BLI_STALE_INODE) { 429 xfs_buf_do_callbacks(bp, (xfs_log_item_t *)bip); 430 XFS_BUF_SET_FSPRIVATE(bp, NULL); 431 XFS_BUF_CLR_IODONE_FUNC(bp); 432 } else { 433 AIL_LOCK(mp,s); 434 xfs_trans_delete_ail(mp, (xfs_log_item_t *)bip, s); 435 xfs_buf_item_relse(bp); 436 ASSERT(XFS_BUF_FSPRIVATE(bp, void *) == NULL); 437 } 438 xfs_buf_relse(bp); 439 } 440 } 441 442 /* 443 * this is called from uncommit in the forced-shutdown path. 444 * we need to check to see if the reference count on the log item 445 * is going to drop to zero. If so, unpin will free the log item 446 * so we need to free the item's descriptor (that points to the item) 447 * in the transaction. 448 */ 449 void 450 xfs_buf_item_unpin_remove( 451 xfs_buf_log_item_t *bip, 452 xfs_trans_t *tp) 453 { 454 xfs_buf_t *bp; 455 xfs_log_item_desc_t *lidp; 456 int stale = 0; 457 458 bp = bip->bli_buf; 459 /* 460 * will xfs_buf_item_unpin() call xfs_buf_item_relse()? 461 */ 462 if ((atomic_read(&bip->bli_refcount) == 1) && 463 (bip->bli_flags & XFS_BLI_STALE)) { 464 ASSERT(XFS_BUF_VALUSEMA(bip->bli_buf) <= 0); 465 xfs_buf_item_trace("UNPIN REMOVE", bip); 466 xfs_buftrace("XFS_UNPIN_REMOVE", bp); 467 /* 468 * yes -- clear the xaction descriptor in-use flag 469 * and free the chunk if required. We can safely 470 * do some work here and then call buf_item_unpin 471 * to do the rest because if the if is true, then 472 * we are holding the buffer locked so no one else 473 * will be able to bump up the refcount. 474 */ 475 lidp = xfs_trans_find_item(tp, (xfs_log_item_t *) bip); 476 stale = lidp->lid_flags & XFS_LID_BUF_STALE; 477 xfs_trans_free_item(tp, lidp); 478 /* 479 * Since the transaction no longer refers to the buffer, 480 * the buffer should no longer refer to the transaction. 481 */ 482 XFS_BUF_SET_FSPRIVATE2(bp, NULL); 483 } 484 485 xfs_buf_item_unpin(bip, stale); 486 487 return; 488 } 489 490 /* 491 * This is called to attempt to lock the buffer associated with this 492 * buf log item. Don't sleep on the buffer lock. If we can't get 493 * the lock right away, return 0. If we can get the lock, pull the 494 * buffer from the free list, mark it busy, and return 1. 495 */ 496 uint 497 xfs_buf_item_trylock( 498 xfs_buf_log_item_t *bip) 499 { 500 xfs_buf_t *bp; 501 502 bp = bip->bli_buf; 503 504 if (XFS_BUF_ISPINNED(bp)) { 505 return XFS_ITEM_PINNED; 506 } 507 508 if (!XFS_BUF_CPSEMA(bp)) { 509 return XFS_ITEM_LOCKED; 510 } 511 512 /* 513 * Remove the buffer from the free list. Only do this 514 * if it's on the free list. Private buffers like the 515 * superblock buffer are not. 516 */ 517 XFS_BUF_HOLD(bp); 518 519 ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); 520 xfs_buf_item_trace("TRYLOCK SUCCESS", bip); 521 return XFS_ITEM_SUCCESS; 522 } 523 524 /* 525 * Release the buffer associated with the buf log item. 526 * If there is no dirty logged data associated with the 527 * buffer recorded in the buf log item, then free the 528 * buf log item and remove the reference to it in the 529 * buffer. 530 * 531 * This call ignores the recursion count. It is only called 532 * when the buffer should REALLY be unlocked, regardless 533 * of the recursion count. 534 * 535 * If the XFS_BLI_HOLD flag is set in the buf log item, then 536 * free the log item if necessary but do not unlock the buffer. 537 * This is for support of xfs_trans_bhold(). Make sure the 538 * XFS_BLI_HOLD field is cleared if we don't free the item. 539 */ 540 void 541 xfs_buf_item_unlock( 542 xfs_buf_log_item_t *bip) 543 { 544 int aborted; 545 xfs_buf_t *bp; 546 uint hold; 547 548 bp = bip->bli_buf; 549 xfs_buftrace("XFS_UNLOCK", bp); 550 551 /* 552 * Clear the buffer's association with this transaction. 553 */ 554 XFS_BUF_SET_FSPRIVATE2(bp, NULL); 555 556 /* 557 * If this is a transaction abort, don't return early. 558 * Instead, allow the brelse to happen. 559 * Normally it would be done for stale (cancelled) buffers 560 * at unpin time, but we'll never go through the pin/unpin 561 * cycle if we abort inside commit. 562 */ 563 aborted = (bip->bli_item.li_flags & XFS_LI_ABORTED) != 0; 564 565 /* 566 * If the buf item is marked stale, then don't do anything. 567 * We'll unlock the buffer and free the buf item when the 568 * buffer is unpinned for the last time. 569 */ 570 if (bip->bli_flags & XFS_BLI_STALE) { 571 bip->bli_flags &= ~XFS_BLI_LOGGED; 572 xfs_buf_item_trace("UNLOCK STALE", bip); 573 ASSERT(bip->bli_format.blf_flags & XFS_BLI_CANCEL); 574 if (!aborted) 575 return; 576 } 577 578 /* 579 * Drop the transaction's reference to the log item if 580 * it was not logged as part of the transaction. Otherwise 581 * we'll drop the reference in xfs_buf_item_unpin() when 582 * the transaction is really through with the buffer. 583 */ 584 if (!(bip->bli_flags & XFS_BLI_LOGGED)) { 585 atomic_dec(&bip->bli_refcount); 586 } else { 587 /* 588 * Clear the logged flag since this is per 589 * transaction state. 590 */ 591 bip->bli_flags &= ~XFS_BLI_LOGGED; 592 } 593 594 /* 595 * Before possibly freeing the buf item, determine if we should 596 * release the buffer at the end of this routine. 597 */ 598 hold = bip->bli_flags & XFS_BLI_HOLD; 599 xfs_buf_item_trace("UNLOCK", bip); 600 601 /* 602 * If the buf item isn't tracking any data, free it. 603 * Otherwise, if XFS_BLI_HOLD is set clear it. 604 */ 605 if (xfs_count_bits(bip->bli_format.blf_data_map, 606 bip->bli_format.blf_map_size, 0) == 0) { 607 xfs_buf_item_relse(bp); 608 } else if (hold) { 609 bip->bli_flags &= ~XFS_BLI_HOLD; 610 } 611 612 /* 613 * Release the buffer if XFS_BLI_HOLD was not set. 614 */ 615 if (!hold) { 616 xfs_buf_relse(bp); 617 } 618 } 619 620 /* 621 * This is called to find out where the oldest active copy of the 622 * buf log item in the on disk log resides now that the last log 623 * write of it completed at the given lsn. 624 * We always re-log all the dirty data in a buffer, so usually the 625 * latest copy in the on disk log is the only one that matters. For 626 * those cases we simply return the given lsn. 627 * 628 * The one exception to this is for buffers full of newly allocated 629 * inodes. These buffers are only relogged with the XFS_BLI_INODE_BUF 630 * flag set, indicating that only the di_next_unlinked fields from the 631 * inodes in the buffers will be replayed during recovery. If the 632 * original newly allocated inode images have not yet been flushed 633 * when the buffer is so relogged, then we need to make sure that we 634 * keep the old images in the 'active' portion of the log. We do this 635 * by returning the original lsn of that transaction here rather than 636 * the current one. 637 */ 638 xfs_lsn_t 639 xfs_buf_item_committed( 640 xfs_buf_log_item_t *bip, 641 xfs_lsn_t lsn) 642 { 643 xfs_buf_item_trace("COMMITTED", bip); 644 if ((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) && 645 (bip->bli_item.li_lsn != 0)) { 646 return bip->bli_item.li_lsn; 647 } 648 return (lsn); 649 } 650 651 /* 652 * This is called when the transaction holding the buffer is aborted. 653 * Just behave as if the transaction had been cancelled. If we're shutting down 654 * and have aborted this transaction, we'll trap this buffer when it tries to 655 * get written out. 656 */ 657 void 658 xfs_buf_item_abort( 659 xfs_buf_log_item_t *bip) 660 { 661 xfs_buf_t *bp; 662 663 bp = bip->bli_buf; 664 xfs_buftrace("XFS_ABORT", bp); 665 XFS_BUF_SUPER_STALE(bp); 666 xfs_buf_item_unlock(bip); 667 return; 668 } 669 670 /* 671 * This is called to asynchronously write the buffer associated with this 672 * buf log item out to disk. The buffer will already have been locked by 673 * a successful call to xfs_buf_item_trylock(). If the buffer still has 674 * B_DELWRI set, then get it going out to disk with a call to bawrite(). 675 * If not, then just release the buffer. 676 */ 677 void 678 xfs_buf_item_push( 679 xfs_buf_log_item_t *bip) 680 { 681 xfs_buf_t *bp; 682 683 ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); 684 xfs_buf_item_trace("PUSH", bip); 685 686 bp = bip->bli_buf; 687 688 if (XFS_BUF_ISDELAYWRITE(bp)) { 689 xfs_bawrite(bip->bli_item.li_mountp, bp); 690 } else { 691 xfs_buf_relse(bp); 692 } 693 } 694 695 /* ARGSUSED */ 696 void 697 xfs_buf_item_committing(xfs_buf_log_item_t *bip, xfs_lsn_t commit_lsn) 698 { 699 } 700 701 /* 702 * This is the ops vector shared by all buf log items. 703 */ 704 struct xfs_item_ops xfs_buf_item_ops = { 705 .iop_size = (uint(*)(xfs_log_item_t*))xfs_buf_item_size, 706 .iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*)) 707 xfs_buf_item_format, 708 .iop_pin = (void(*)(xfs_log_item_t*))xfs_buf_item_pin, 709 .iop_unpin = (void(*)(xfs_log_item_t*, int))xfs_buf_item_unpin, 710 .iop_unpin_remove = (void(*)(xfs_log_item_t*, xfs_trans_t *)) 711 xfs_buf_item_unpin_remove, 712 .iop_trylock = (uint(*)(xfs_log_item_t*))xfs_buf_item_trylock, 713 .iop_unlock = (void(*)(xfs_log_item_t*))xfs_buf_item_unlock, 714 .iop_committed = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t)) 715 xfs_buf_item_committed, 716 .iop_push = (void(*)(xfs_log_item_t*))xfs_buf_item_push, 717 .iop_abort = (void(*)(xfs_log_item_t*))xfs_buf_item_abort, 718 .iop_pushbuf = NULL, 719 .iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t)) 720 xfs_buf_item_committing 721 }; 722 723 724 /* 725 * Allocate a new buf log item to go with the given buffer. 726 * Set the buffer's b_fsprivate field to point to the new 727 * buf log item. If there are other item's attached to the 728 * buffer (see xfs_buf_attach_iodone() below), then put the 729 * buf log item at the front. 730 */ 731 void 732 xfs_buf_item_init( 733 xfs_buf_t *bp, 734 xfs_mount_t *mp) 735 { 736 xfs_log_item_t *lip; 737 xfs_buf_log_item_t *bip; 738 int chunks; 739 int map_size; 740 741 /* 742 * Check to see if there is already a buf log item for 743 * this buffer. If there is, it is guaranteed to be 744 * the first. If we do already have one, there is 745 * nothing to do here so return. 746 */ 747 if (XFS_BUF_FSPRIVATE3(bp, xfs_mount_t *) != mp) 748 XFS_BUF_SET_FSPRIVATE3(bp, mp); 749 XFS_BUF_SET_BDSTRAT_FUNC(bp, xfs_bdstrat_cb); 750 if (XFS_BUF_FSPRIVATE(bp, void *) != NULL) { 751 lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *); 752 if (lip->li_type == XFS_LI_BUF) { 753 return; 754 } 755 } 756 757 /* 758 * chunks is the number of XFS_BLI_CHUNK size pieces 759 * the buffer can be divided into. Make sure not to 760 * truncate any pieces. map_size is the size of the 761 * bitmap needed to describe the chunks of the buffer. 762 */ 763 chunks = (int)((XFS_BUF_COUNT(bp) + (XFS_BLI_CHUNK - 1)) >> XFS_BLI_SHIFT); 764 map_size = (int)((chunks + NBWORD) >> BIT_TO_WORD_SHIFT); 765 766 bip = (xfs_buf_log_item_t*)kmem_zone_zalloc(xfs_buf_item_zone, 767 KM_SLEEP); 768 bip->bli_item.li_type = XFS_LI_BUF; 769 bip->bli_item.li_ops = &xfs_buf_item_ops; 770 bip->bli_item.li_mountp = mp; 771 bip->bli_buf = bp; 772 bip->bli_format.blf_type = XFS_LI_BUF; 773 bip->bli_format.blf_blkno = (__int64_t)XFS_BUF_ADDR(bp); 774 bip->bli_format.blf_len = (ushort)BTOBB(XFS_BUF_COUNT(bp)); 775 bip->bli_format.blf_map_size = map_size; 776 #ifdef XFS_BLI_TRACE 777 bip->bli_trace = ktrace_alloc(XFS_BLI_TRACE_SIZE, KM_SLEEP); 778 #endif 779 780 #ifdef XFS_TRANS_DEBUG 781 /* 782 * Allocate the arrays for tracking what needs to be logged 783 * and what our callers request to be logged. bli_orig 784 * holds a copy of the original, clean buffer for comparison 785 * against, and bli_logged keeps a 1 bit flag per byte in 786 * the buffer to indicate which bytes the callers have asked 787 * to have logged. 788 */ 789 bip->bli_orig = (char *)kmem_alloc(XFS_BUF_COUNT(bp), KM_SLEEP); 790 memcpy(bip->bli_orig, XFS_BUF_PTR(bp), XFS_BUF_COUNT(bp)); 791 bip->bli_logged = (char *)kmem_zalloc(XFS_BUF_COUNT(bp) / NBBY, KM_SLEEP); 792 #endif 793 794 /* 795 * Put the buf item into the list of items attached to the 796 * buffer at the front. 797 */ 798 if (XFS_BUF_FSPRIVATE(bp, void *) != NULL) { 799 bip->bli_item.li_bio_list = 800 XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *); 801 } 802 XFS_BUF_SET_FSPRIVATE(bp, bip); 803 } 804 805 806 /* 807 * Mark bytes first through last inclusive as dirty in the buf 808 * item's bitmap. 809 */ 810 void 811 xfs_buf_item_log( 812 xfs_buf_log_item_t *bip, 813 uint first, 814 uint last) 815 { 816 uint first_bit; 817 uint last_bit; 818 uint bits_to_set; 819 uint bits_set; 820 uint word_num; 821 uint *wordp; 822 uint bit; 823 uint end_bit; 824 uint mask; 825 826 /* 827 * Mark the item as having some dirty data for 828 * quick reference in xfs_buf_item_dirty. 829 */ 830 bip->bli_flags |= XFS_BLI_DIRTY; 831 832 /* 833 * Convert byte offsets to bit numbers. 834 */ 835 first_bit = first >> XFS_BLI_SHIFT; 836 last_bit = last >> XFS_BLI_SHIFT; 837 838 /* 839 * Calculate the total number of bits to be set. 840 */ 841 bits_to_set = last_bit - first_bit + 1; 842 843 /* 844 * Get a pointer to the first word in the bitmap 845 * to set a bit in. 846 */ 847 word_num = first_bit >> BIT_TO_WORD_SHIFT; 848 wordp = &(bip->bli_format.blf_data_map[word_num]); 849 850 /* 851 * Calculate the starting bit in the first word. 852 */ 853 bit = first_bit & (uint)(NBWORD - 1); 854 855 /* 856 * First set any bits in the first word of our range. 857 * If it starts at bit 0 of the word, it will be 858 * set below rather than here. That is what the variable 859 * bit tells us. The variable bits_set tracks the number 860 * of bits that have been set so far. End_bit is the number 861 * of the last bit to be set in this word plus one. 862 */ 863 if (bit) { 864 end_bit = MIN(bit + bits_to_set, (uint)NBWORD); 865 mask = ((1 << (end_bit - bit)) - 1) << bit; 866 *wordp |= mask; 867 wordp++; 868 bits_set = end_bit - bit; 869 } else { 870 bits_set = 0; 871 } 872 873 /* 874 * Now set bits a whole word at a time that are between 875 * first_bit and last_bit. 876 */ 877 while ((bits_to_set - bits_set) >= NBWORD) { 878 *wordp |= 0xffffffff; 879 bits_set += NBWORD; 880 wordp++; 881 } 882 883 /* 884 * Finally, set any bits left to be set in one last partial word. 885 */ 886 end_bit = bits_to_set - bits_set; 887 if (end_bit) { 888 mask = (1 << end_bit) - 1; 889 *wordp |= mask; 890 } 891 892 xfs_buf_item_log_debug(bip, first, last); 893 } 894 895 896 /* 897 * Return 1 if the buffer has some data that has been logged (at any 898 * point, not just the current transaction) and 0 if not. 899 */ 900 uint 901 xfs_buf_item_dirty( 902 xfs_buf_log_item_t *bip) 903 { 904 return (bip->bli_flags & XFS_BLI_DIRTY); 905 } 906 907 /* 908 * This is called when the buf log item is no longer needed. It should 909 * free the buf log item associated with the given buffer and clear 910 * the buffer's pointer to the buf log item. If there are no more 911 * items in the list, clear the b_iodone field of the buffer (see 912 * xfs_buf_attach_iodone() below). 913 */ 914 void 915 xfs_buf_item_relse( 916 xfs_buf_t *bp) 917 { 918 xfs_buf_log_item_t *bip; 919 920 xfs_buftrace("XFS_RELSE", bp); 921 bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t*); 922 XFS_BUF_SET_FSPRIVATE(bp, bip->bli_item.li_bio_list); 923 if ((XFS_BUF_FSPRIVATE(bp, void *) == NULL) && 924 (XFS_BUF_IODONE_FUNC(bp) != NULL)) { 925 ASSERT((XFS_BUF_ISUNINITIAL(bp)) == 0); 926 XFS_BUF_CLR_IODONE_FUNC(bp); 927 } 928 929 #ifdef XFS_TRANS_DEBUG 930 kmem_free(bip->bli_orig, XFS_BUF_COUNT(bp)); 931 bip->bli_orig = NULL; 932 kmem_free(bip->bli_logged, XFS_BUF_COUNT(bp) / NBBY); 933 bip->bli_logged = NULL; 934 #endif /* XFS_TRANS_DEBUG */ 935 936 #ifdef XFS_BLI_TRACE 937 ktrace_free(bip->bli_trace); 938 #endif 939 kmem_zone_free(xfs_buf_item_zone, bip); 940 } 941 942 943 /* 944 * Add the given log item with its callback to the list of callbacks 945 * to be called when the buffer's I/O completes. If it is not set 946 * already, set the buffer's b_iodone() routine to be 947 * xfs_buf_iodone_callbacks() and link the log item into the list of 948 * items rooted at b_fsprivate. Items are always added as the second 949 * entry in the list if there is a first, because the buf item code 950 * assumes that the buf log item is first. 951 */ 952 void 953 xfs_buf_attach_iodone( 954 xfs_buf_t *bp, 955 void (*cb)(xfs_buf_t *, xfs_log_item_t *), 956 xfs_log_item_t *lip) 957 { 958 xfs_log_item_t *head_lip; 959 960 ASSERT(XFS_BUF_ISBUSY(bp)); 961 ASSERT(XFS_BUF_VALUSEMA(bp) <= 0); 962 963 lip->li_cb = cb; 964 if (XFS_BUF_FSPRIVATE(bp, void *) != NULL) { 965 head_lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *); 966 lip->li_bio_list = head_lip->li_bio_list; 967 head_lip->li_bio_list = lip; 968 } else { 969 XFS_BUF_SET_FSPRIVATE(bp, lip); 970 } 971 972 ASSERT((XFS_BUF_IODONE_FUNC(bp) == xfs_buf_iodone_callbacks) || 973 (XFS_BUF_IODONE_FUNC(bp) == NULL)); 974 XFS_BUF_SET_IODONE_FUNC(bp, xfs_buf_iodone_callbacks); 975 } 976 977 STATIC void 978 xfs_buf_do_callbacks( 979 xfs_buf_t *bp, 980 xfs_log_item_t *lip) 981 { 982 xfs_log_item_t *nlip; 983 984 while (lip != NULL) { 985 nlip = lip->li_bio_list; 986 ASSERT(lip->li_cb != NULL); 987 /* 988 * Clear the next pointer so we don't have any 989 * confusion if the item is added to another buf. 990 * Don't touch the log item after calling its 991 * callback, because it could have freed itself. 992 */ 993 lip->li_bio_list = NULL; 994 lip->li_cb(bp, lip); 995 lip = nlip; 996 } 997 } 998 999 /* 1000 * This is the iodone() function for buffers which have had callbacks 1001 * attached to them by xfs_buf_attach_iodone(). It should remove each 1002 * log item from the buffer's list and call the callback of each in turn. 1003 * When done, the buffer's fsprivate field is set to NULL and the buffer 1004 * is unlocked with a call to iodone(). 1005 */ 1006 void 1007 xfs_buf_iodone_callbacks( 1008 xfs_buf_t *bp) 1009 { 1010 xfs_log_item_t *lip; 1011 static ulong lasttime; 1012 static xfs_buftarg_t *lasttarg; 1013 xfs_mount_t *mp; 1014 1015 ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); 1016 lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *); 1017 1018 if (XFS_BUF_GETERROR(bp) != 0) { 1019 /* 1020 * If we've already decided to shutdown the filesystem 1021 * because of IO errors, there's no point in giving this 1022 * a retry. 1023 */ 1024 mp = lip->li_mountp; 1025 if (XFS_FORCED_SHUTDOWN(mp)) { 1026 ASSERT(XFS_BUF_TARGET(bp) == mp->m_ddev_targp); 1027 XFS_BUF_SUPER_STALE(bp); 1028 xfs_buftrace("BUF_IODONE_CB", bp); 1029 xfs_buf_do_callbacks(bp, lip); 1030 XFS_BUF_SET_FSPRIVATE(bp, NULL); 1031 XFS_BUF_CLR_IODONE_FUNC(bp); 1032 1033 /* 1034 * XFS_SHUT flag gets set when we go thru the 1035 * entire buffer cache and deliberately start 1036 * throwing away delayed write buffers. 1037 * Since there's no biowait done on those, 1038 * we should just brelse them. 1039 */ 1040 if (XFS_BUF_ISSHUT(bp)) { 1041 XFS_BUF_UNSHUT(bp); 1042 xfs_buf_relse(bp); 1043 } else { 1044 xfs_biodone(bp); 1045 } 1046 1047 return; 1048 } 1049 1050 if ((XFS_BUF_TARGET(bp) != lasttarg) || 1051 (time_after(jiffies, (lasttime + 5*HZ)))) { 1052 lasttime = jiffies; 1053 prdev("XFS write error in file system meta-data " 1054 "block 0x%llx in %s", 1055 XFS_BUF_TARGET(bp), 1056 (__uint64_t)XFS_BUF_ADDR(bp), mp->m_fsname); 1057 } 1058 lasttarg = XFS_BUF_TARGET(bp); 1059 1060 if (XFS_BUF_ISASYNC(bp)) { 1061 /* 1062 * If the write was asynchronous then noone will be 1063 * looking for the error. Clear the error state 1064 * and write the buffer out again delayed write. 1065 * 1066 * XXXsup This is OK, so long as we catch these 1067 * before we start the umount; we don't want these 1068 * DELWRI metadata bufs to be hanging around. 1069 */ 1070 XFS_BUF_ERROR(bp,0); /* errno of 0 unsets the flag */ 1071 1072 if (!(XFS_BUF_ISSTALE(bp))) { 1073 XFS_BUF_DELAYWRITE(bp); 1074 XFS_BUF_DONE(bp); 1075 XFS_BUF_SET_START(bp); 1076 } 1077 ASSERT(XFS_BUF_IODONE_FUNC(bp)); 1078 xfs_buftrace("BUF_IODONE ASYNC", bp); 1079 xfs_buf_relse(bp); 1080 } else { 1081 /* 1082 * If the write of the buffer was not asynchronous, 1083 * then we want to make sure to return the error 1084 * to the caller of bwrite(). Because of this we 1085 * cannot clear the B_ERROR state at this point. 1086 * Instead we install a callback function that 1087 * will be called when the buffer is released, and 1088 * that routine will clear the error state and 1089 * set the buffer to be written out again after 1090 * some delay. 1091 */ 1092 /* We actually overwrite the existing b-relse 1093 function at times, but we're gonna be shutting down 1094 anyway. */ 1095 XFS_BUF_SET_BRELSE_FUNC(bp,xfs_buf_error_relse); 1096 XFS_BUF_DONE(bp); 1097 XFS_BUF_V_IODONESEMA(bp); 1098 } 1099 return; 1100 } 1101 #ifdef XFSERRORDEBUG 1102 xfs_buftrace("XFS BUFCB NOERR", bp); 1103 #endif 1104 xfs_buf_do_callbacks(bp, lip); 1105 XFS_BUF_SET_FSPRIVATE(bp, NULL); 1106 XFS_BUF_CLR_IODONE_FUNC(bp); 1107 xfs_biodone(bp); 1108 } 1109 1110 /* 1111 * This is a callback routine attached to a buffer which gets an error 1112 * when being written out synchronously. 1113 */ 1114 STATIC void 1115 xfs_buf_error_relse( 1116 xfs_buf_t *bp) 1117 { 1118 xfs_log_item_t *lip; 1119 xfs_mount_t *mp; 1120 1121 lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *); 1122 mp = (xfs_mount_t *)lip->li_mountp; 1123 ASSERT(XFS_BUF_TARGET(bp) == mp->m_ddev_targp); 1124 1125 XFS_BUF_STALE(bp); 1126 XFS_BUF_DONE(bp); 1127 XFS_BUF_UNDELAYWRITE(bp); 1128 XFS_BUF_ERROR(bp,0); 1129 xfs_buftrace("BUF_ERROR_RELSE", bp); 1130 if (! XFS_FORCED_SHUTDOWN(mp)) 1131 xfs_force_shutdown(mp, XFS_METADATA_IO_ERROR); 1132 /* 1133 * We have to unpin the pinned buffers so do the 1134 * callbacks. 1135 */ 1136 xfs_buf_do_callbacks(bp, lip); 1137 XFS_BUF_SET_FSPRIVATE(bp, NULL); 1138 XFS_BUF_CLR_IODONE_FUNC(bp); 1139 XFS_BUF_SET_BRELSE_FUNC(bp,NULL); 1140 xfs_buf_relse(bp); 1141 } 1142 1143 1144 /* 1145 * This is the iodone() function for buffers which have been 1146 * logged. It is called when they are eventually flushed out. 1147 * It should remove the buf item from the AIL, and free the buf item. 1148 * It is called by xfs_buf_iodone_callbacks() above which will take 1149 * care of cleaning up the buffer itself. 1150 */ 1151 /* ARGSUSED */ 1152 void 1153 xfs_buf_iodone( 1154 xfs_buf_t *bp, 1155 xfs_buf_log_item_t *bip) 1156 { 1157 struct xfs_mount *mp; 1158 SPLDECL(s); 1159 1160 ASSERT(bip->bli_buf == bp); 1161 1162 mp = bip->bli_item.li_mountp; 1163 1164 /* 1165 * If we are forcibly shutting down, this may well be 1166 * off the AIL already. That's because we simulate the 1167 * log-committed callbacks to unpin these buffers. Or we may never 1168 * have put this item on AIL because of the transaction was 1169 * aborted forcibly. xfs_trans_delete_ail() takes care of these. 1170 * 1171 * Either way, AIL is useless if we're forcing a shutdown. 1172 */ 1173 AIL_LOCK(mp,s); 1174 /* 1175 * xfs_trans_delete_ail() drops the AIL lock. 1176 */ 1177 xfs_trans_delete_ail(mp, (xfs_log_item_t *)bip, s); 1178 1179 #ifdef XFS_TRANS_DEBUG 1180 kmem_free(bip->bli_orig, XFS_BUF_COUNT(bp)); 1181 bip->bli_orig = NULL; 1182 kmem_free(bip->bli_logged, XFS_BUF_COUNT(bp) / NBBY); 1183 bip->bli_logged = NULL; 1184 #endif /* XFS_TRANS_DEBUG */ 1185 1186 #ifdef XFS_BLI_TRACE 1187 ktrace_free(bip->bli_trace); 1188 #endif 1189 kmem_zone_free(xfs_buf_item_zone, bip); 1190 } 1191 1192 #if defined(XFS_BLI_TRACE) 1193 void 1194 xfs_buf_item_trace( 1195 char *id, 1196 xfs_buf_log_item_t *bip) 1197 { 1198 xfs_buf_t *bp; 1199 ASSERT(bip->bli_trace != NULL); 1200 1201 bp = bip->bli_buf; 1202 ktrace_enter(bip->bli_trace, 1203 (void *)id, 1204 (void *)bip->bli_buf, 1205 (void *)((unsigned long)bip->bli_flags), 1206 (void *)((unsigned long)bip->bli_recur), 1207 (void *)((unsigned long)atomic_read(&bip->bli_refcount)), 1208 (void *)((unsigned long) 1209 (0xFFFFFFFF & XFS_BUF_ADDR(bp) >> 32)), 1210 (void *)((unsigned long)(0xFFFFFFFF & XFS_BUF_ADDR(bp))), 1211 (void *)((unsigned long)XFS_BUF_COUNT(bp)), 1212 (void *)((unsigned long)XFS_BUF_BFLAGS(bp)), 1213 XFS_BUF_FSPRIVATE(bp, void *), 1214 XFS_BUF_FSPRIVATE2(bp, void *), 1215 (void *)(unsigned long)XFS_BUF_ISPINNED(bp), 1216 (void *)XFS_BUF_IODONE_FUNC(bp), 1217 (void *)((unsigned long)(XFS_BUF_VALUSEMA(bp))), 1218 (void *)bip->bli_item.li_desc, 1219 (void *)((unsigned long)bip->bli_item.li_flags)); 1220 } 1221 #endif /* XFS_BLI_TRACE */ 1222