1 /* 2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 #include "xfs.h" 19 #include "xfs_fs.h" 20 #include "xfs_shared.h" 21 #include "xfs_format.h" 22 #include "xfs_log_format.h" 23 #include "xfs_trans_resv.h" 24 #include "xfs_mount.h" 25 #include "xfs_inode.h" 26 #include "xfs_trans.h" 27 #include "xfs_buf_item.h" 28 #include "xfs_trans_priv.h" 29 #include "xfs_error.h" 30 #include "xfs_trace.h" 31 32 /* 33 * Check to see if a buffer matching the given parameters is already 34 * a part of the given transaction. 35 */ 36 STATIC struct xfs_buf * 37 xfs_trans_buf_item_match( 38 struct xfs_trans *tp, 39 struct xfs_buftarg *target, 40 struct xfs_buf_map *map, 41 int nmaps) 42 { 43 struct xfs_log_item_desc *lidp; 44 struct xfs_buf_log_item *blip; 45 int len = 0; 46 int i; 47 48 for (i = 0; i < nmaps; i++) 49 len += map[i].bm_len; 50 51 list_for_each_entry(lidp, &tp->t_items, lid_trans) { 52 blip = (struct xfs_buf_log_item *)lidp->lid_item; 53 if (blip->bli_item.li_type == XFS_LI_BUF && 54 blip->bli_buf->b_target == target && 55 XFS_BUF_ADDR(blip->bli_buf) == map[0].bm_bn && 56 blip->bli_buf->b_length == len) { 57 ASSERT(blip->bli_buf->b_map_count == nmaps); 58 return blip->bli_buf; 59 } 60 } 61 62 return NULL; 63 } 64 65 /* 66 * Add the locked buffer to the transaction. 67 * 68 * The buffer must be locked, and it cannot be associated with any 69 * transaction. 70 * 71 * If the buffer does not yet have a buf log item associated with it, 72 * then allocate one for it. Then add the buf item to the transaction. 73 */ 74 STATIC void 75 _xfs_trans_bjoin( 76 struct xfs_trans *tp, 77 struct xfs_buf *bp, 78 int reset_recur) 79 { 80 struct xfs_buf_log_item *bip; 81 82 ASSERT(bp->b_transp == NULL); 83 84 /* 85 * The xfs_buf_log_item pointer is stored in b_log_item. If 86 * it doesn't have one yet, then allocate one and initialize it. 87 * The checks to see if one is there are in xfs_buf_item_init(). 88 */ 89 xfs_buf_item_init(bp, tp->t_mountp); 90 bip = bp->b_log_item; 91 ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); 92 ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_CANCEL)); 93 ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED)); 94 if (reset_recur) 95 bip->bli_recur = 0; 96 97 /* 98 * Take a reference for this transaction on the buf item. 99 */ 100 atomic_inc(&bip->bli_refcount); 101 102 /* 103 * Get a log_item_desc to point at the new item. 104 */ 105 xfs_trans_add_item(tp, &bip->bli_item); 106 107 /* 108 * Initialize b_fsprivate2 so we can find it with incore_match() 109 * in xfs_trans_get_buf() and friends above. 110 */ 111 bp->b_transp = tp; 112 113 } 114 115 void 116 xfs_trans_bjoin( 117 struct xfs_trans *tp, 118 struct xfs_buf *bp) 119 { 120 _xfs_trans_bjoin(tp, bp, 0); 121 trace_xfs_trans_bjoin(bp->b_log_item); 122 } 123 124 /* 125 * Get and lock the buffer for the caller if it is not already 126 * locked within the given transaction. If it is already locked 127 * within the transaction, just increment its lock recursion count 128 * and return a pointer to it. 129 * 130 * If the transaction pointer is NULL, make this just a normal 131 * get_buf() call. 132 */ 133 struct xfs_buf * 134 xfs_trans_get_buf_map( 135 struct xfs_trans *tp, 136 struct xfs_buftarg *target, 137 struct xfs_buf_map *map, 138 int nmaps, 139 xfs_buf_flags_t flags) 140 { 141 xfs_buf_t *bp; 142 struct xfs_buf_log_item *bip; 143 144 if (!tp) 145 return xfs_buf_get_map(target, map, nmaps, flags); 146 147 /* 148 * If we find the buffer in the cache with this transaction 149 * pointer in its b_fsprivate2 field, then we know we already 150 * have it locked. In this case we just increment the lock 151 * recursion count and return the buffer to the caller. 152 */ 153 bp = xfs_trans_buf_item_match(tp, target, map, nmaps); 154 if (bp != NULL) { 155 ASSERT(xfs_buf_islocked(bp)); 156 if (XFS_FORCED_SHUTDOWN(tp->t_mountp)) { 157 xfs_buf_stale(bp); 158 bp->b_flags |= XBF_DONE; 159 } 160 161 ASSERT(bp->b_transp == tp); 162 bip = bp->b_log_item; 163 ASSERT(bip != NULL); 164 ASSERT(atomic_read(&bip->bli_refcount) > 0); 165 bip->bli_recur++; 166 trace_xfs_trans_get_buf_recur(bip); 167 return bp; 168 } 169 170 bp = xfs_buf_get_map(target, map, nmaps, flags); 171 if (bp == NULL) { 172 return NULL; 173 } 174 175 ASSERT(!bp->b_error); 176 177 _xfs_trans_bjoin(tp, bp, 1); 178 trace_xfs_trans_get_buf(bp->b_log_item); 179 return bp; 180 } 181 182 /* 183 * Get and lock the superblock buffer of this file system for the 184 * given transaction. 185 * 186 * We don't need to use incore_match() here, because the superblock 187 * buffer is a private buffer which we keep a pointer to in the 188 * mount structure. 189 */ 190 xfs_buf_t * 191 xfs_trans_getsb( 192 xfs_trans_t *tp, 193 struct xfs_mount *mp, 194 int flags) 195 { 196 xfs_buf_t *bp; 197 struct xfs_buf_log_item *bip; 198 199 /* 200 * Default to just trying to lock the superblock buffer 201 * if tp is NULL. 202 */ 203 if (tp == NULL) 204 return xfs_getsb(mp, flags); 205 206 /* 207 * If the superblock buffer already has this transaction 208 * pointer in its b_fsprivate2 field, then we know we already 209 * have it locked. In this case we just increment the lock 210 * recursion count and return the buffer to the caller. 211 */ 212 bp = mp->m_sb_bp; 213 if (bp->b_transp == tp) { 214 bip = bp->b_log_item; 215 ASSERT(bip != NULL); 216 ASSERT(atomic_read(&bip->bli_refcount) > 0); 217 bip->bli_recur++; 218 trace_xfs_trans_getsb_recur(bip); 219 return bp; 220 } 221 222 bp = xfs_getsb(mp, flags); 223 if (bp == NULL) 224 return NULL; 225 226 _xfs_trans_bjoin(tp, bp, 1); 227 trace_xfs_trans_getsb(bp->b_log_item); 228 return bp; 229 } 230 231 /* 232 * Get and lock the buffer for the caller if it is not already 233 * locked within the given transaction. If it has not yet been 234 * read in, read it from disk. If it is already locked 235 * within the transaction and already read in, just increment its 236 * lock recursion count and return a pointer to it. 237 * 238 * If the transaction pointer is NULL, make this just a normal 239 * read_buf() call. 240 */ 241 int 242 xfs_trans_read_buf_map( 243 struct xfs_mount *mp, 244 struct xfs_trans *tp, 245 struct xfs_buftarg *target, 246 struct xfs_buf_map *map, 247 int nmaps, 248 xfs_buf_flags_t flags, 249 struct xfs_buf **bpp, 250 const struct xfs_buf_ops *ops) 251 { 252 struct xfs_buf *bp = NULL; 253 struct xfs_buf_log_item *bip; 254 int error; 255 256 *bpp = NULL; 257 /* 258 * If we find the buffer in the cache with this transaction 259 * pointer in its b_fsprivate2 field, then we know we already 260 * have it locked. If it is already read in we just increment 261 * the lock recursion count and return the buffer to the caller. 262 * If the buffer is not yet read in, then we read it in, increment 263 * the lock recursion count, and return it to the caller. 264 */ 265 if (tp) 266 bp = xfs_trans_buf_item_match(tp, target, map, nmaps); 267 if (bp) { 268 ASSERT(xfs_buf_islocked(bp)); 269 ASSERT(bp->b_transp == tp); 270 ASSERT(bp->b_log_item != NULL); 271 ASSERT(!bp->b_error); 272 ASSERT(bp->b_flags & XBF_DONE); 273 274 /* 275 * We never locked this buf ourselves, so we shouldn't 276 * brelse it either. Just get out. 277 */ 278 if (XFS_FORCED_SHUTDOWN(mp)) { 279 trace_xfs_trans_read_buf_shut(bp, _RET_IP_); 280 return -EIO; 281 } 282 283 bip = bp->b_log_item; 284 bip->bli_recur++; 285 286 ASSERT(atomic_read(&bip->bli_refcount) > 0); 287 trace_xfs_trans_read_buf_recur(bip); 288 *bpp = bp; 289 return 0; 290 } 291 292 bp = xfs_buf_read_map(target, map, nmaps, flags, ops); 293 if (!bp) { 294 if (!(flags & XBF_TRYLOCK)) 295 return -ENOMEM; 296 return tp ? 0 : -EAGAIN; 297 } 298 299 /* 300 * If we've had a read error, then the contents of the buffer are 301 * invalid and should not be used. To ensure that a followup read tries 302 * to pull the buffer from disk again, we clear the XBF_DONE flag and 303 * mark the buffer stale. This ensures that anyone who has a current 304 * reference to the buffer will interpret it's contents correctly and 305 * future cache lookups will also treat it as an empty, uninitialised 306 * buffer. 307 */ 308 if (bp->b_error) { 309 error = bp->b_error; 310 if (!XFS_FORCED_SHUTDOWN(mp)) 311 xfs_buf_ioerror_alert(bp, __func__); 312 bp->b_flags &= ~XBF_DONE; 313 xfs_buf_stale(bp); 314 315 if (tp && (tp->t_flags & XFS_TRANS_DIRTY)) 316 xfs_force_shutdown(tp->t_mountp, SHUTDOWN_META_IO_ERROR); 317 xfs_buf_relse(bp); 318 319 /* bad CRC means corrupted metadata */ 320 if (error == -EFSBADCRC) 321 error = -EFSCORRUPTED; 322 return error; 323 } 324 325 if (XFS_FORCED_SHUTDOWN(mp)) { 326 xfs_buf_relse(bp); 327 trace_xfs_trans_read_buf_shut(bp, _RET_IP_); 328 return -EIO; 329 } 330 331 if (tp) { 332 _xfs_trans_bjoin(tp, bp, 1); 333 trace_xfs_trans_read_buf(bp->b_log_item); 334 } 335 *bpp = bp; 336 return 0; 337 338 } 339 340 /* 341 * Release the buffer bp which was previously acquired with one of the 342 * xfs_trans_... buffer allocation routines if the buffer has not 343 * been modified within this transaction. If the buffer is modified 344 * within this transaction, do decrement the recursion count but do 345 * not release the buffer even if the count goes to 0. If the buffer is not 346 * modified within the transaction, decrement the recursion count and 347 * release the buffer if the recursion count goes to 0. 348 * 349 * If the buffer is to be released and it was not modified before 350 * this transaction began, then free the buf_log_item associated with it. 351 * 352 * If the transaction pointer is NULL, make this just a normal 353 * brelse() call. 354 */ 355 void 356 xfs_trans_brelse( 357 xfs_trans_t *tp, 358 xfs_buf_t *bp) 359 { 360 struct xfs_buf_log_item *bip; 361 int freed; 362 363 /* 364 * Default to a normal brelse() call if the tp is NULL. 365 */ 366 if (tp == NULL) { 367 ASSERT(bp->b_transp == NULL); 368 xfs_buf_relse(bp); 369 return; 370 } 371 372 ASSERT(bp->b_transp == tp); 373 bip = bp->b_log_item; 374 ASSERT(bip->bli_item.li_type == XFS_LI_BUF); 375 ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); 376 ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_CANCEL)); 377 ASSERT(atomic_read(&bip->bli_refcount) > 0); 378 379 trace_xfs_trans_brelse(bip); 380 381 /* 382 * If the release is just for a recursive lock, 383 * then decrement the count and return. 384 */ 385 if (bip->bli_recur > 0) { 386 bip->bli_recur--; 387 return; 388 } 389 390 /* 391 * If the buffer is dirty within this transaction, we can't 392 * release it until we commit. 393 */ 394 if (bip->bli_item.li_desc->lid_flags & XFS_LID_DIRTY) 395 return; 396 397 /* 398 * If the buffer has been invalidated, then we can't release 399 * it until the transaction commits to disk unless it is re-dirtied 400 * as part of this transaction. This prevents us from pulling 401 * the item from the AIL before we should. 402 */ 403 if (bip->bli_flags & XFS_BLI_STALE) 404 return; 405 406 ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED)); 407 408 /* 409 * Free up the log item descriptor tracking the released item. 410 */ 411 xfs_trans_del_item(&bip->bli_item); 412 413 /* 414 * Clear the hold flag in the buf log item if it is set. 415 * We wouldn't want the next user of the buffer to 416 * get confused. 417 */ 418 if (bip->bli_flags & XFS_BLI_HOLD) { 419 bip->bli_flags &= ~XFS_BLI_HOLD; 420 } 421 422 /* 423 * Drop our reference to the buf log item. 424 */ 425 freed = atomic_dec_and_test(&bip->bli_refcount); 426 427 /* 428 * If the buf item is not tracking data in the log, then we must free it 429 * before releasing the buffer back to the free pool. 430 * 431 * If the fs has shutdown and we dropped the last reference, it may fall 432 * on us to release a (possibly dirty) bli if it never made it to the 433 * AIL (e.g., the aborted unpin already happened and didn't release it 434 * due to our reference). Since we're already shutdown and need xa_lock, 435 * just force remove from the AIL and release the bli here. 436 */ 437 if (XFS_FORCED_SHUTDOWN(tp->t_mountp) && freed) { 438 xfs_trans_ail_remove(&bip->bli_item, SHUTDOWN_LOG_IO_ERROR); 439 xfs_buf_item_relse(bp); 440 } else if (!(bip->bli_flags & XFS_BLI_DIRTY)) { 441 /*** 442 ASSERT(bp->b_pincount == 0); 443 ***/ 444 ASSERT(atomic_read(&bip->bli_refcount) == 0); 445 ASSERT(!(bip->bli_item.li_flags & XFS_LI_IN_AIL)); 446 ASSERT(!(bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF)); 447 xfs_buf_item_relse(bp); 448 } 449 450 bp->b_transp = NULL; 451 xfs_buf_relse(bp); 452 } 453 454 /* 455 * Mark the buffer as not needing to be unlocked when the buf item's 456 * iop_unlock() routine is called. The buffer must already be locked 457 * and associated with the given transaction. 458 */ 459 /* ARGSUSED */ 460 void 461 xfs_trans_bhold( 462 xfs_trans_t *tp, 463 xfs_buf_t *bp) 464 { 465 struct xfs_buf_log_item *bip = bp->b_log_item; 466 467 ASSERT(bp->b_transp == tp); 468 ASSERT(bip != NULL); 469 ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); 470 ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_CANCEL)); 471 ASSERT(atomic_read(&bip->bli_refcount) > 0); 472 473 bip->bli_flags |= XFS_BLI_HOLD; 474 trace_xfs_trans_bhold(bip); 475 } 476 477 /* 478 * Cancel the previous buffer hold request made on this buffer 479 * for this transaction. 480 */ 481 void 482 xfs_trans_bhold_release( 483 xfs_trans_t *tp, 484 xfs_buf_t *bp) 485 { 486 struct xfs_buf_log_item *bip = bp->b_log_item; 487 488 ASSERT(bp->b_transp == tp); 489 ASSERT(bip != NULL); 490 ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); 491 ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_CANCEL)); 492 ASSERT(atomic_read(&bip->bli_refcount) > 0); 493 ASSERT(bip->bli_flags & XFS_BLI_HOLD); 494 495 bip->bli_flags &= ~XFS_BLI_HOLD; 496 trace_xfs_trans_bhold_release(bip); 497 } 498 499 /* 500 * Mark a buffer dirty in the transaction. 501 */ 502 void 503 xfs_trans_dirty_buf( 504 struct xfs_trans *tp, 505 struct xfs_buf *bp) 506 { 507 struct xfs_buf_log_item *bip = bp->b_log_item; 508 509 ASSERT(bp->b_transp == tp); 510 ASSERT(bip != NULL); 511 ASSERT(bp->b_iodone == NULL || 512 bp->b_iodone == xfs_buf_iodone_callbacks); 513 514 /* 515 * Mark the buffer as needing to be written out eventually, 516 * and set its iodone function to remove the buffer's buf log 517 * item from the AIL and free it when the buffer is flushed 518 * to disk. See xfs_buf_attach_iodone() for more details 519 * on li_cb and xfs_buf_iodone_callbacks(). 520 * If we end up aborting this transaction, we trap this buffer 521 * inside the b_bdstrat callback so that this won't get written to 522 * disk. 523 */ 524 bp->b_flags |= XBF_DONE; 525 526 ASSERT(atomic_read(&bip->bli_refcount) > 0); 527 bp->b_iodone = xfs_buf_iodone_callbacks; 528 bip->bli_item.li_cb = xfs_buf_iodone; 529 530 /* 531 * If we invalidated the buffer within this transaction, then 532 * cancel the invalidation now that we're dirtying the buffer 533 * again. There are no races with the code in xfs_buf_item_unpin(), 534 * because we have a reference to the buffer this entire time. 535 */ 536 if (bip->bli_flags & XFS_BLI_STALE) { 537 bip->bli_flags &= ~XFS_BLI_STALE; 538 ASSERT(bp->b_flags & XBF_STALE); 539 bp->b_flags &= ~XBF_STALE; 540 bip->__bli_format.blf_flags &= ~XFS_BLF_CANCEL; 541 } 542 bip->bli_flags |= XFS_BLI_DIRTY | XFS_BLI_LOGGED; 543 544 tp->t_flags |= XFS_TRANS_DIRTY; 545 bip->bli_item.li_desc->lid_flags |= XFS_LID_DIRTY; 546 } 547 548 /* 549 * This is called to mark bytes first through last inclusive of the given 550 * buffer as needing to be logged when the transaction is committed. 551 * The buffer must already be associated with the given transaction. 552 * 553 * First and last are numbers relative to the beginning of this buffer, 554 * so the first byte in the buffer is numbered 0 regardless of the 555 * value of b_blkno. 556 */ 557 void 558 xfs_trans_log_buf( 559 struct xfs_trans *tp, 560 struct xfs_buf *bp, 561 uint first, 562 uint last) 563 { 564 struct xfs_buf_log_item *bip = bp->b_log_item; 565 566 ASSERT(first <= last && last < BBTOB(bp->b_length)); 567 ASSERT(!(bip->bli_flags & XFS_BLI_ORDERED)); 568 569 xfs_trans_dirty_buf(tp, bp); 570 571 trace_xfs_trans_log_buf(bip); 572 xfs_buf_item_log(bip, first, last); 573 } 574 575 576 /* 577 * Invalidate a buffer that is being used within a transaction. 578 * 579 * Typically this is because the blocks in the buffer are being freed, so we 580 * need to prevent it from being written out when we're done. Allowing it 581 * to be written again might overwrite data in the free blocks if they are 582 * reallocated to a file. 583 * 584 * We prevent the buffer from being written out by marking it stale. We can't 585 * get rid of the buf log item at this point because the buffer may still be 586 * pinned by another transaction. If that is the case, then we'll wait until 587 * the buffer is committed to disk for the last time (we can tell by the ref 588 * count) and free it in xfs_buf_item_unpin(). Until that happens we will 589 * keep the buffer locked so that the buffer and buf log item are not reused. 590 * 591 * We also set the XFS_BLF_CANCEL flag in the buf log format structure and log 592 * the buf item. This will be used at recovery time to determine that copies 593 * of the buffer in the log before this should not be replayed. 594 * 595 * We mark the item descriptor and the transaction dirty so that we'll hold 596 * the buffer until after the commit. 597 * 598 * Since we're invalidating the buffer, we also clear the state about which 599 * parts of the buffer have been logged. We also clear the flag indicating 600 * that this is an inode buffer since the data in the buffer will no longer 601 * be valid. 602 * 603 * We set the stale bit in the buffer as well since we're getting rid of it. 604 */ 605 void 606 xfs_trans_binval( 607 xfs_trans_t *tp, 608 xfs_buf_t *bp) 609 { 610 struct xfs_buf_log_item *bip = bp->b_log_item; 611 int i; 612 613 ASSERT(bp->b_transp == tp); 614 ASSERT(bip != NULL); 615 ASSERT(atomic_read(&bip->bli_refcount) > 0); 616 617 trace_xfs_trans_binval(bip); 618 619 if (bip->bli_flags & XFS_BLI_STALE) { 620 /* 621 * If the buffer is already invalidated, then 622 * just return. 623 */ 624 ASSERT(bp->b_flags & XBF_STALE); 625 ASSERT(!(bip->bli_flags & (XFS_BLI_LOGGED | XFS_BLI_DIRTY))); 626 ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_INODE_BUF)); 627 ASSERT(!(bip->__bli_format.blf_flags & XFS_BLFT_MASK)); 628 ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL); 629 ASSERT(bip->bli_item.li_desc->lid_flags & XFS_LID_DIRTY); 630 ASSERT(tp->t_flags & XFS_TRANS_DIRTY); 631 return; 632 } 633 634 xfs_buf_stale(bp); 635 636 bip->bli_flags |= XFS_BLI_STALE; 637 bip->bli_flags &= ~(XFS_BLI_INODE_BUF | XFS_BLI_LOGGED | XFS_BLI_DIRTY); 638 bip->__bli_format.blf_flags &= ~XFS_BLF_INODE_BUF; 639 bip->__bli_format.blf_flags |= XFS_BLF_CANCEL; 640 bip->__bli_format.blf_flags &= ~XFS_BLFT_MASK; 641 for (i = 0; i < bip->bli_format_count; i++) { 642 memset(bip->bli_formats[i].blf_data_map, 0, 643 (bip->bli_formats[i].blf_map_size * sizeof(uint))); 644 } 645 bip->bli_item.li_desc->lid_flags |= XFS_LID_DIRTY; 646 tp->t_flags |= XFS_TRANS_DIRTY; 647 } 648 649 /* 650 * This call is used to indicate that the buffer contains on-disk inodes which 651 * must be handled specially during recovery. They require special handling 652 * because only the di_next_unlinked from the inodes in the buffer should be 653 * recovered. The rest of the data in the buffer is logged via the inodes 654 * themselves. 655 * 656 * All we do is set the XFS_BLI_INODE_BUF flag in the items flags so it can be 657 * transferred to the buffer's log format structure so that we'll know what to 658 * do at recovery time. 659 */ 660 void 661 xfs_trans_inode_buf( 662 xfs_trans_t *tp, 663 xfs_buf_t *bp) 664 { 665 struct xfs_buf_log_item *bip = bp->b_log_item; 666 667 ASSERT(bp->b_transp == tp); 668 ASSERT(bip != NULL); 669 ASSERT(atomic_read(&bip->bli_refcount) > 0); 670 671 bip->bli_flags |= XFS_BLI_INODE_BUF; 672 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DINO_BUF); 673 } 674 675 /* 676 * This call is used to indicate that the buffer is going to 677 * be staled and was an inode buffer. This means it gets 678 * special processing during unpin - where any inodes 679 * associated with the buffer should be removed from ail. 680 * There is also special processing during recovery, 681 * any replay of the inodes in the buffer needs to be 682 * prevented as the buffer may have been reused. 683 */ 684 void 685 xfs_trans_stale_inode_buf( 686 xfs_trans_t *tp, 687 xfs_buf_t *bp) 688 { 689 struct xfs_buf_log_item *bip = bp->b_log_item; 690 691 ASSERT(bp->b_transp == tp); 692 ASSERT(bip != NULL); 693 ASSERT(atomic_read(&bip->bli_refcount) > 0); 694 695 bip->bli_flags |= XFS_BLI_STALE_INODE; 696 bip->bli_item.li_cb = xfs_buf_iodone; 697 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DINO_BUF); 698 } 699 700 /* 701 * Mark the buffer as being one which contains newly allocated 702 * inodes. We need to make sure that even if this buffer is 703 * relogged as an 'inode buf' we still recover all of the inode 704 * images in the face of a crash. This works in coordination with 705 * xfs_buf_item_committed() to ensure that the buffer remains in the 706 * AIL at its original location even after it has been relogged. 707 */ 708 /* ARGSUSED */ 709 void 710 xfs_trans_inode_alloc_buf( 711 xfs_trans_t *tp, 712 xfs_buf_t *bp) 713 { 714 struct xfs_buf_log_item *bip = bp->b_log_item; 715 716 ASSERT(bp->b_transp == tp); 717 ASSERT(bip != NULL); 718 ASSERT(atomic_read(&bip->bli_refcount) > 0); 719 720 bip->bli_flags |= XFS_BLI_INODE_ALLOC_BUF; 721 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DINO_BUF); 722 } 723 724 /* 725 * Mark the buffer as ordered for this transaction. This means that the contents 726 * of the buffer are not recorded in the transaction but it is tracked in the 727 * AIL as though it was. This allows us to record logical changes in 728 * transactions rather than the physical changes we make to the buffer without 729 * changing writeback ordering constraints of metadata buffers. 730 */ 731 bool 732 xfs_trans_ordered_buf( 733 struct xfs_trans *tp, 734 struct xfs_buf *bp) 735 { 736 struct xfs_buf_log_item *bip = bp->b_log_item; 737 738 ASSERT(bp->b_transp == tp); 739 ASSERT(bip != NULL); 740 ASSERT(atomic_read(&bip->bli_refcount) > 0); 741 742 if (xfs_buf_item_dirty_format(bip)) 743 return false; 744 745 bip->bli_flags |= XFS_BLI_ORDERED; 746 trace_xfs_buf_item_ordered(bip); 747 748 /* 749 * We don't log a dirty range of an ordered buffer but it still needs 750 * to be marked dirty and that it has been logged. 751 */ 752 xfs_trans_dirty_buf(tp, bp); 753 return true; 754 } 755 756 /* 757 * Set the type of the buffer for log recovery so that it can correctly identify 758 * and hence attach the correct buffer ops to the buffer after replay. 759 */ 760 void 761 xfs_trans_buf_set_type( 762 struct xfs_trans *tp, 763 struct xfs_buf *bp, 764 enum xfs_blft type) 765 { 766 struct xfs_buf_log_item *bip = bp->b_log_item; 767 768 if (!tp) 769 return; 770 771 ASSERT(bp->b_transp == tp); 772 ASSERT(bip != NULL); 773 ASSERT(atomic_read(&bip->bli_refcount) > 0); 774 775 xfs_blft_to_flags(&bip->__bli_format, type); 776 } 777 778 void 779 xfs_trans_buf_copy_type( 780 struct xfs_buf *dst_bp, 781 struct xfs_buf *src_bp) 782 { 783 struct xfs_buf_log_item *sbip = src_bp->b_log_item; 784 struct xfs_buf_log_item *dbip = dst_bp->b_log_item; 785 enum xfs_blft type; 786 787 type = xfs_blft_from_flags(&sbip->__bli_format); 788 xfs_blft_to_flags(&dbip->__bli_format, type); 789 } 790 791 /* 792 * Similar to xfs_trans_inode_buf(), this marks the buffer as a cluster of 793 * dquots. However, unlike in inode buffer recovery, dquot buffers get 794 * recovered in their entirety. (Hence, no XFS_BLI_DQUOT_ALLOC_BUF flag). 795 * The only thing that makes dquot buffers different from regular 796 * buffers is that we must not replay dquot bufs when recovering 797 * if a _corresponding_ quotaoff has happened. We also have to distinguish 798 * between usr dquot bufs and grp dquot bufs, because usr and grp quotas 799 * can be turned off independently. 800 */ 801 /* ARGSUSED */ 802 void 803 xfs_trans_dquot_buf( 804 xfs_trans_t *tp, 805 xfs_buf_t *bp, 806 uint type) 807 { 808 struct xfs_buf_log_item *bip = bp->b_log_item; 809 810 ASSERT(type == XFS_BLF_UDQUOT_BUF || 811 type == XFS_BLF_PDQUOT_BUF || 812 type == XFS_BLF_GDQUOT_BUF); 813 814 bip->__bli_format.blf_flags |= type; 815 816 switch (type) { 817 case XFS_BLF_UDQUOT_BUF: 818 type = XFS_BLFT_UDQUOT_BUF; 819 break; 820 case XFS_BLF_PDQUOT_BUF: 821 type = XFS_BLFT_PDQUOT_BUF; 822 break; 823 case XFS_BLF_GDQUOT_BUF: 824 type = XFS_BLFT_GDQUOT_BUF; 825 break; 826 default: 827 type = XFS_BLFT_UNKNOWN_BUF; 828 break; 829 } 830 831 xfs_trans_buf_set_type(tp, bp, type); 832 } 833