1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_log_format.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_mount.h" 13 #include "xfs_inode.h" 14 #include "xfs_trans.h" 15 #include "xfs_buf_item.h" 16 #include "xfs_trans_priv.h" 17 #include "xfs_error.h" 18 #include "xfs_trace.h" 19 20 /* 21 * Check to see if a buffer matching the given parameters is already 22 * a part of the given transaction. 23 */ 24 STATIC struct xfs_buf * 25 xfs_trans_buf_item_match( 26 struct xfs_trans *tp, 27 struct xfs_buftarg *target, 28 struct xfs_buf_map *map, 29 int nmaps) 30 { 31 struct xfs_log_item *lip; 32 struct xfs_buf_log_item *blip; 33 int len = 0; 34 int i; 35 36 for (i = 0; i < nmaps; i++) 37 len += map[i].bm_len; 38 39 list_for_each_entry(lip, &tp->t_items, li_trans) { 40 blip = (struct xfs_buf_log_item *)lip; 41 if (blip->bli_item.li_type == XFS_LI_BUF && 42 blip->bli_buf->b_target == target && 43 XFS_BUF_ADDR(blip->bli_buf) == map[0].bm_bn && 44 blip->bli_buf->b_length == len) { 45 ASSERT(blip->bli_buf->b_map_count == nmaps); 46 return blip->bli_buf; 47 } 48 } 49 50 return NULL; 51 } 52 53 /* 54 * Add the locked buffer to the transaction. 55 * 56 * The buffer must be locked, and it cannot be associated with any 57 * transaction. 58 * 59 * If the buffer does not yet have a buf log item associated with it, 60 * then allocate one for it. Then add the buf item to the transaction. 61 */ 62 STATIC void 63 _xfs_trans_bjoin( 64 struct xfs_trans *tp, 65 struct xfs_buf *bp, 66 int reset_recur) 67 { 68 struct xfs_buf_log_item *bip; 69 70 ASSERT(bp->b_transp == NULL); 71 72 /* 73 * The xfs_buf_log_item pointer is stored in b_log_item. If 74 * it doesn't have one yet, then allocate one and initialize it. 75 * The checks to see if one is there are in xfs_buf_item_init(). 76 */ 77 xfs_buf_item_init(bp, tp->t_mountp); 78 bip = bp->b_log_item; 79 ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); 80 ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_CANCEL)); 81 ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED)); 82 if (reset_recur) 83 bip->bli_recur = 0; 84 85 /* 86 * Take a reference for this transaction on the buf item. 87 */ 88 atomic_inc(&bip->bli_refcount); 89 90 /* 91 * Attach the item to the transaction so we can find it in 92 * xfs_trans_get_buf() and friends. 93 */ 94 xfs_trans_add_item(tp, &bip->bli_item); 95 bp->b_transp = tp; 96 97 } 98 99 void 100 xfs_trans_bjoin( 101 struct xfs_trans *tp, 102 struct xfs_buf *bp) 103 { 104 _xfs_trans_bjoin(tp, bp, 0); 105 trace_xfs_trans_bjoin(bp->b_log_item); 106 } 107 108 /* 109 * Get and lock the buffer for the caller if it is not already 110 * locked within the given transaction. If it is already locked 111 * within the transaction, just increment its lock recursion count 112 * and return a pointer to it. 113 * 114 * If the transaction pointer is NULL, make this just a normal 115 * get_buf() call. 116 */ 117 struct xfs_buf * 118 xfs_trans_get_buf_map( 119 struct xfs_trans *tp, 120 struct xfs_buftarg *target, 121 struct xfs_buf_map *map, 122 int nmaps, 123 xfs_buf_flags_t flags) 124 { 125 xfs_buf_t *bp; 126 struct xfs_buf_log_item *bip; 127 128 if (!tp) 129 return xfs_buf_get_map(target, map, nmaps, flags); 130 131 /* 132 * If we find the buffer in the cache with this transaction 133 * pointer in its b_fsprivate2 field, then we know we already 134 * have it locked. In this case we just increment the lock 135 * recursion count and return the buffer to the caller. 136 */ 137 bp = xfs_trans_buf_item_match(tp, target, map, nmaps); 138 if (bp != NULL) { 139 ASSERT(xfs_buf_islocked(bp)); 140 if (XFS_FORCED_SHUTDOWN(tp->t_mountp)) { 141 xfs_buf_stale(bp); 142 bp->b_flags |= XBF_DONE; 143 } 144 145 ASSERT(bp->b_transp == tp); 146 bip = bp->b_log_item; 147 ASSERT(bip != NULL); 148 ASSERT(atomic_read(&bip->bli_refcount) > 0); 149 bip->bli_recur++; 150 trace_xfs_trans_get_buf_recur(bip); 151 return bp; 152 } 153 154 bp = xfs_buf_get_map(target, map, nmaps, flags); 155 if (bp == NULL) { 156 return NULL; 157 } 158 159 ASSERT(!bp->b_error); 160 161 _xfs_trans_bjoin(tp, bp, 1); 162 trace_xfs_trans_get_buf(bp->b_log_item); 163 return bp; 164 } 165 166 /* 167 * Get and lock the superblock buffer of this file system for the 168 * given transaction. 169 * 170 * We don't need to use incore_match() here, because the superblock 171 * buffer is a private buffer which we keep a pointer to in the 172 * mount structure. 173 */ 174 xfs_buf_t * 175 xfs_trans_getsb( 176 xfs_trans_t *tp, 177 struct xfs_mount *mp, 178 int flags) 179 { 180 xfs_buf_t *bp; 181 struct xfs_buf_log_item *bip; 182 183 /* 184 * Default to just trying to lock the superblock buffer 185 * if tp is NULL. 186 */ 187 if (tp == NULL) 188 return xfs_getsb(mp, flags); 189 190 /* 191 * If the superblock buffer already has this transaction 192 * pointer in its b_fsprivate2 field, then we know we already 193 * have it locked. In this case we just increment the lock 194 * recursion count and return the buffer to the caller. 195 */ 196 bp = mp->m_sb_bp; 197 if (bp->b_transp == tp) { 198 bip = bp->b_log_item; 199 ASSERT(bip != NULL); 200 ASSERT(atomic_read(&bip->bli_refcount) > 0); 201 bip->bli_recur++; 202 trace_xfs_trans_getsb_recur(bip); 203 return bp; 204 } 205 206 bp = xfs_getsb(mp, flags); 207 if (bp == NULL) 208 return NULL; 209 210 _xfs_trans_bjoin(tp, bp, 1); 211 trace_xfs_trans_getsb(bp->b_log_item); 212 return bp; 213 } 214 215 /* 216 * Get and lock the buffer for the caller if it is not already 217 * locked within the given transaction. If it has not yet been 218 * read in, read it from disk. If it is already locked 219 * within the transaction and already read in, just increment its 220 * lock recursion count and return a pointer to it. 221 * 222 * If the transaction pointer is NULL, make this just a normal 223 * read_buf() call. 224 */ 225 int 226 xfs_trans_read_buf_map( 227 struct xfs_mount *mp, 228 struct xfs_trans *tp, 229 struct xfs_buftarg *target, 230 struct xfs_buf_map *map, 231 int nmaps, 232 xfs_buf_flags_t flags, 233 struct xfs_buf **bpp, 234 const struct xfs_buf_ops *ops) 235 { 236 struct xfs_buf *bp = NULL; 237 struct xfs_buf_log_item *bip; 238 int error; 239 240 *bpp = NULL; 241 /* 242 * If we find the buffer in the cache with this transaction 243 * pointer in its b_fsprivate2 field, then we know we already 244 * have it locked. If it is already read in we just increment 245 * the lock recursion count and return the buffer to the caller. 246 * If the buffer is not yet read in, then we read it in, increment 247 * the lock recursion count, and return it to the caller. 248 */ 249 if (tp) 250 bp = xfs_trans_buf_item_match(tp, target, map, nmaps); 251 if (bp) { 252 ASSERT(xfs_buf_islocked(bp)); 253 ASSERT(bp->b_transp == tp); 254 ASSERT(bp->b_log_item != NULL); 255 ASSERT(!bp->b_error); 256 ASSERT(bp->b_flags & XBF_DONE); 257 258 /* 259 * We never locked this buf ourselves, so we shouldn't 260 * brelse it either. Just get out. 261 */ 262 if (XFS_FORCED_SHUTDOWN(mp)) { 263 trace_xfs_trans_read_buf_shut(bp, _RET_IP_); 264 return -EIO; 265 } 266 267 bip = bp->b_log_item; 268 bip->bli_recur++; 269 270 ASSERT(atomic_read(&bip->bli_refcount) > 0); 271 trace_xfs_trans_read_buf_recur(bip); 272 *bpp = bp; 273 return 0; 274 } 275 276 bp = xfs_buf_read_map(target, map, nmaps, flags, ops); 277 if (!bp) { 278 if (!(flags & XBF_TRYLOCK)) 279 return -ENOMEM; 280 return tp ? 0 : -EAGAIN; 281 } 282 283 /* 284 * If we've had a read error, then the contents of the buffer are 285 * invalid and should not be used. To ensure that a followup read tries 286 * to pull the buffer from disk again, we clear the XBF_DONE flag and 287 * mark the buffer stale. This ensures that anyone who has a current 288 * reference to the buffer will interpret it's contents correctly and 289 * future cache lookups will also treat it as an empty, uninitialised 290 * buffer. 291 */ 292 if (bp->b_error) { 293 error = bp->b_error; 294 if (!XFS_FORCED_SHUTDOWN(mp)) 295 xfs_buf_ioerror_alert(bp, __func__); 296 bp->b_flags &= ~XBF_DONE; 297 xfs_buf_stale(bp); 298 299 if (tp && (tp->t_flags & XFS_TRANS_DIRTY)) 300 xfs_force_shutdown(tp->t_mountp, SHUTDOWN_META_IO_ERROR); 301 xfs_buf_relse(bp); 302 303 /* bad CRC means corrupted metadata */ 304 if (error == -EFSBADCRC) 305 error = -EFSCORRUPTED; 306 return error; 307 } 308 309 if (XFS_FORCED_SHUTDOWN(mp)) { 310 xfs_buf_relse(bp); 311 trace_xfs_trans_read_buf_shut(bp, _RET_IP_); 312 return -EIO; 313 } 314 315 if (tp) { 316 _xfs_trans_bjoin(tp, bp, 1); 317 trace_xfs_trans_read_buf(bp->b_log_item); 318 } 319 *bpp = bp; 320 return 0; 321 322 } 323 324 /* 325 * Release a buffer previously joined to the transaction. If the buffer is 326 * modified within this transaction, decrement the recursion count but do not 327 * release the buffer even if the count goes to 0. If the buffer is not modified 328 * within the transaction, decrement the recursion count and release the buffer 329 * if the recursion count goes to 0. 330 * 331 * If the buffer is to be released and it was not already dirty before this 332 * transaction began, then also free the buf_log_item associated with it. 333 * 334 * If the transaction pointer is NULL, this is a normal xfs_buf_relse() call. 335 */ 336 void 337 xfs_trans_brelse( 338 struct xfs_trans *tp, 339 struct xfs_buf *bp) 340 { 341 struct xfs_buf_log_item *bip = bp->b_log_item; 342 343 ASSERT(bp->b_transp == tp); 344 345 if (!tp) { 346 xfs_buf_relse(bp); 347 return; 348 } 349 350 trace_xfs_trans_brelse(bip); 351 ASSERT(bip->bli_item.li_type == XFS_LI_BUF); 352 ASSERT(atomic_read(&bip->bli_refcount) > 0); 353 354 /* 355 * If the release is for a recursive lookup, then decrement the count 356 * and return. 357 */ 358 if (bip->bli_recur > 0) { 359 bip->bli_recur--; 360 return; 361 } 362 363 /* 364 * If the buffer is invalidated or dirty in this transaction, we can't 365 * release it until we commit. 366 */ 367 if (test_bit(XFS_LI_DIRTY, &bip->bli_item.li_flags)) 368 return; 369 if (bip->bli_flags & XFS_BLI_STALE) 370 return; 371 372 /* 373 * Unlink the log item from the transaction and clear the hold flag, if 374 * set. We wouldn't want the next user of the buffer to get confused. 375 */ 376 ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED)); 377 xfs_trans_del_item(&bip->bli_item); 378 bip->bli_flags &= ~XFS_BLI_HOLD; 379 380 /* drop the reference to the bli */ 381 xfs_buf_item_put(bip); 382 383 bp->b_transp = NULL; 384 xfs_buf_relse(bp); 385 } 386 387 /* 388 * Mark the buffer as not needing to be unlocked when the buf item's 389 * iop_unlock() routine is called. The buffer must already be locked 390 * and associated with the given transaction. 391 */ 392 /* ARGSUSED */ 393 void 394 xfs_trans_bhold( 395 xfs_trans_t *tp, 396 xfs_buf_t *bp) 397 { 398 struct xfs_buf_log_item *bip = bp->b_log_item; 399 400 ASSERT(bp->b_transp == tp); 401 ASSERT(bip != NULL); 402 ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); 403 ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_CANCEL)); 404 ASSERT(atomic_read(&bip->bli_refcount) > 0); 405 406 bip->bli_flags |= XFS_BLI_HOLD; 407 trace_xfs_trans_bhold(bip); 408 } 409 410 /* 411 * Cancel the previous buffer hold request made on this buffer 412 * for this transaction. 413 */ 414 void 415 xfs_trans_bhold_release( 416 xfs_trans_t *tp, 417 xfs_buf_t *bp) 418 { 419 struct xfs_buf_log_item *bip = bp->b_log_item; 420 421 ASSERT(bp->b_transp == tp); 422 ASSERT(bip != NULL); 423 ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); 424 ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_CANCEL)); 425 ASSERT(atomic_read(&bip->bli_refcount) > 0); 426 ASSERT(bip->bli_flags & XFS_BLI_HOLD); 427 428 bip->bli_flags &= ~XFS_BLI_HOLD; 429 trace_xfs_trans_bhold_release(bip); 430 } 431 432 /* 433 * Mark a buffer dirty in the transaction. 434 */ 435 void 436 xfs_trans_dirty_buf( 437 struct xfs_trans *tp, 438 struct xfs_buf *bp) 439 { 440 struct xfs_buf_log_item *bip = bp->b_log_item; 441 442 ASSERT(bp->b_transp == tp); 443 ASSERT(bip != NULL); 444 ASSERT(bp->b_iodone == NULL || 445 bp->b_iodone == xfs_buf_iodone_callbacks); 446 447 /* 448 * Mark the buffer as needing to be written out eventually, 449 * and set its iodone function to remove the buffer's buf log 450 * item from the AIL and free it when the buffer is flushed 451 * to disk. See xfs_buf_attach_iodone() for more details 452 * on li_cb and xfs_buf_iodone_callbacks(). 453 * If we end up aborting this transaction, we trap this buffer 454 * inside the b_bdstrat callback so that this won't get written to 455 * disk. 456 */ 457 bp->b_flags |= XBF_DONE; 458 459 ASSERT(atomic_read(&bip->bli_refcount) > 0); 460 bp->b_iodone = xfs_buf_iodone_callbacks; 461 bip->bli_item.li_cb = xfs_buf_iodone; 462 463 /* 464 * If we invalidated the buffer within this transaction, then 465 * cancel the invalidation now that we're dirtying the buffer 466 * again. There are no races with the code in xfs_buf_item_unpin(), 467 * because we have a reference to the buffer this entire time. 468 */ 469 if (bip->bli_flags & XFS_BLI_STALE) { 470 bip->bli_flags &= ~XFS_BLI_STALE; 471 ASSERT(bp->b_flags & XBF_STALE); 472 bp->b_flags &= ~XBF_STALE; 473 bip->__bli_format.blf_flags &= ~XFS_BLF_CANCEL; 474 } 475 bip->bli_flags |= XFS_BLI_DIRTY | XFS_BLI_LOGGED; 476 477 tp->t_flags |= XFS_TRANS_DIRTY; 478 set_bit(XFS_LI_DIRTY, &bip->bli_item.li_flags); 479 } 480 481 /* 482 * This is called to mark bytes first through last inclusive of the given 483 * buffer as needing to be logged when the transaction is committed. 484 * The buffer must already be associated with the given transaction. 485 * 486 * First and last are numbers relative to the beginning of this buffer, 487 * so the first byte in the buffer is numbered 0 regardless of the 488 * value of b_blkno. 489 */ 490 void 491 xfs_trans_log_buf( 492 struct xfs_trans *tp, 493 struct xfs_buf *bp, 494 uint first, 495 uint last) 496 { 497 struct xfs_buf_log_item *bip = bp->b_log_item; 498 499 ASSERT(first <= last && last < BBTOB(bp->b_length)); 500 ASSERT(!(bip->bli_flags & XFS_BLI_ORDERED)); 501 502 xfs_trans_dirty_buf(tp, bp); 503 504 trace_xfs_trans_log_buf(bip); 505 xfs_buf_item_log(bip, first, last); 506 } 507 508 509 /* 510 * Invalidate a buffer that is being used within a transaction. 511 * 512 * Typically this is because the blocks in the buffer are being freed, so we 513 * need to prevent it from being written out when we're done. Allowing it 514 * to be written again might overwrite data in the free blocks if they are 515 * reallocated to a file. 516 * 517 * We prevent the buffer from being written out by marking it stale. We can't 518 * get rid of the buf log item at this point because the buffer may still be 519 * pinned by another transaction. If that is the case, then we'll wait until 520 * the buffer is committed to disk for the last time (we can tell by the ref 521 * count) and free it in xfs_buf_item_unpin(). Until that happens we will 522 * keep the buffer locked so that the buffer and buf log item are not reused. 523 * 524 * We also set the XFS_BLF_CANCEL flag in the buf log format structure and log 525 * the buf item. This will be used at recovery time to determine that copies 526 * of the buffer in the log before this should not be replayed. 527 * 528 * We mark the item descriptor and the transaction dirty so that we'll hold 529 * the buffer until after the commit. 530 * 531 * Since we're invalidating the buffer, we also clear the state about which 532 * parts of the buffer have been logged. We also clear the flag indicating 533 * that this is an inode buffer since the data in the buffer will no longer 534 * be valid. 535 * 536 * We set the stale bit in the buffer as well since we're getting rid of it. 537 */ 538 void 539 xfs_trans_binval( 540 xfs_trans_t *tp, 541 xfs_buf_t *bp) 542 { 543 struct xfs_buf_log_item *bip = bp->b_log_item; 544 int i; 545 546 ASSERT(bp->b_transp == tp); 547 ASSERT(bip != NULL); 548 ASSERT(atomic_read(&bip->bli_refcount) > 0); 549 550 trace_xfs_trans_binval(bip); 551 552 if (bip->bli_flags & XFS_BLI_STALE) { 553 /* 554 * If the buffer is already invalidated, then 555 * just return. 556 */ 557 ASSERT(bp->b_flags & XBF_STALE); 558 ASSERT(!(bip->bli_flags & (XFS_BLI_LOGGED | XFS_BLI_DIRTY))); 559 ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_INODE_BUF)); 560 ASSERT(!(bip->__bli_format.blf_flags & XFS_BLFT_MASK)); 561 ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL); 562 ASSERT(test_bit(XFS_LI_DIRTY, &bip->bli_item.li_flags)); 563 ASSERT(tp->t_flags & XFS_TRANS_DIRTY); 564 return; 565 } 566 567 xfs_buf_stale(bp); 568 569 bip->bli_flags |= XFS_BLI_STALE; 570 bip->bli_flags &= ~(XFS_BLI_INODE_BUF | XFS_BLI_LOGGED | XFS_BLI_DIRTY); 571 bip->__bli_format.blf_flags &= ~XFS_BLF_INODE_BUF; 572 bip->__bli_format.blf_flags |= XFS_BLF_CANCEL; 573 bip->__bli_format.blf_flags &= ~XFS_BLFT_MASK; 574 for (i = 0; i < bip->bli_format_count; i++) { 575 memset(bip->bli_formats[i].blf_data_map, 0, 576 (bip->bli_formats[i].blf_map_size * sizeof(uint))); 577 } 578 set_bit(XFS_LI_DIRTY, &bip->bli_item.li_flags); 579 tp->t_flags |= XFS_TRANS_DIRTY; 580 } 581 582 /* 583 * This call is used to indicate that the buffer contains on-disk inodes which 584 * must be handled specially during recovery. They require special handling 585 * because only the di_next_unlinked from the inodes in the buffer should be 586 * recovered. The rest of the data in the buffer is logged via the inodes 587 * themselves. 588 * 589 * All we do is set the XFS_BLI_INODE_BUF flag in the items flags so it can be 590 * transferred to the buffer's log format structure so that we'll know what to 591 * do at recovery time. 592 */ 593 void 594 xfs_trans_inode_buf( 595 xfs_trans_t *tp, 596 xfs_buf_t *bp) 597 { 598 struct xfs_buf_log_item *bip = bp->b_log_item; 599 600 ASSERT(bp->b_transp == tp); 601 ASSERT(bip != NULL); 602 ASSERT(atomic_read(&bip->bli_refcount) > 0); 603 604 bip->bli_flags |= XFS_BLI_INODE_BUF; 605 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DINO_BUF); 606 } 607 608 /* 609 * This call is used to indicate that the buffer is going to 610 * be staled and was an inode buffer. This means it gets 611 * special processing during unpin - where any inodes 612 * associated with the buffer should be removed from ail. 613 * There is also special processing during recovery, 614 * any replay of the inodes in the buffer needs to be 615 * prevented as the buffer may have been reused. 616 */ 617 void 618 xfs_trans_stale_inode_buf( 619 xfs_trans_t *tp, 620 xfs_buf_t *bp) 621 { 622 struct xfs_buf_log_item *bip = bp->b_log_item; 623 624 ASSERT(bp->b_transp == tp); 625 ASSERT(bip != NULL); 626 ASSERT(atomic_read(&bip->bli_refcount) > 0); 627 628 bip->bli_flags |= XFS_BLI_STALE_INODE; 629 bip->bli_item.li_cb = xfs_buf_iodone; 630 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DINO_BUF); 631 } 632 633 /* 634 * Mark the buffer as being one which contains newly allocated 635 * inodes. We need to make sure that even if this buffer is 636 * relogged as an 'inode buf' we still recover all of the inode 637 * images in the face of a crash. This works in coordination with 638 * xfs_buf_item_committed() to ensure that the buffer remains in the 639 * AIL at its original location even after it has been relogged. 640 */ 641 /* ARGSUSED */ 642 void 643 xfs_trans_inode_alloc_buf( 644 xfs_trans_t *tp, 645 xfs_buf_t *bp) 646 { 647 struct xfs_buf_log_item *bip = bp->b_log_item; 648 649 ASSERT(bp->b_transp == tp); 650 ASSERT(bip != NULL); 651 ASSERT(atomic_read(&bip->bli_refcount) > 0); 652 653 bip->bli_flags |= XFS_BLI_INODE_ALLOC_BUF; 654 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DINO_BUF); 655 } 656 657 /* 658 * Mark the buffer as ordered for this transaction. This means that the contents 659 * of the buffer are not recorded in the transaction but it is tracked in the 660 * AIL as though it was. This allows us to record logical changes in 661 * transactions rather than the physical changes we make to the buffer without 662 * changing writeback ordering constraints of metadata buffers. 663 */ 664 bool 665 xfs_trans_ordered_buf( 666 struct xfs_trans *tp, 667 struct xfs_buf *bp) 668 { 669 struct xfs_buf_log_item *bip = bp->b_log_item; 670 671 ASSERT(bp->b_transp == tp); 672 ASSERT(bip != NULL); 673 ASSERT(atomic_read(&bip->bli_refcount) > 0); 674 675 if (xfs_buf_item_dirty_format(bip)) 676 return false; 677 678 bip->bli_flags |= XFS_BLI_ORDERED; 679 trace_xfs_buf_item_ordered(bip); 680 681 /* 682 * We don't log a dirty range of an ordered buffer but it still needs 683 * to be marked dirty and that it has been logged. 684 */ 685 xfs_trans_dirty_buf(tp, bp); 686 return true; 687 } 688 689 /* 690 * Set the type of the buffer for log recovery so that it can correctly identify 691 * and hence attach the correct buffer ops to the buffer after replay. 692 */ 693 void 694 xfs_trans_buf_set_type( 695 struct xfs_trans *tp, 696 struct xfs_buf *bp, 697 enum xfs_blft type) 698 { 699 struct xfs_buf_log_item *bip = bp->b_log_item; 700 701 if (!tp) 702 return; 703 704 ASSERT(bp->b_transp == tp); 705 ASSERT(bip != NULL); 706 ASSERT(atomic_read(&bip->bli_refcount) > 0); 707 708 xfs_blft_to_flags(&bip->__bli_format, type); 709 } 710 711 void 712 xfs_trans_buf_copy_type( 713 struct xfs_buf *dst_bp, 714 struct xfs_buf *src_bp) 715 { 716 struct xfs_buf_log_item *sbip = src_bp->b_log_item; 717 struct xfs_buf_log_item *dbip = dst_bp->b_log_item; 718 enum xfs_blft type; 719 720 type = xfs_blft_from_flags(&sbip->__bli_format); 721 xfs_blft_to_flags(&dbip->__bli_format, type); 722 } 723 724 /* 725 * Similar to xfs_trans_inode_buf(), this marks the buffer as a cluster of 726 * dquots. However, unlike in inode buffer recovery, dquot buffers get 727 * recovered in their entirety. (Hence, no XFS_BLI_DQUOT_ALLOC_BUF flag). 728 * The only thing that makes dquot buffers different from regular 729 * buffers is that we must not replay dquot bufs when recovering 730 * if a _corresponding_ quotaoff has happened. We also have to distinguish 731 * between usr dquot bufs and grp dquot bufs, because usr and grp quotas 732 * can be turned off independently. 733 */ 734 /* ARGSUSED */ 735 void 736 xfs_trans_dquot_buf( 737 xfs_trans_t *tp, 738 xfs_buf_t *bp, 739 uint type) 740 { 741 struct xfs_buf_log_item *bip = bp->b_log_item; 742 743 ASSERT(type == XFS_BLF_UDQUOT_BUF || 744 type == XFS_BLF_PDQUOT_BUF || 745 type == XFS_BLF_GDQUOT_BUF); 746 747 bip->__bli_format.blf_flags |= type; 748 749 switch (type) { 750 case XFS_BLF_UDQUOT_BUF: 751 type = XFS_BLFT_UDQUOT_BUF; 752 break; 753 case XFS_BLF_PDQUOT_BUF: 754 type = XFS_BLFT_PDQUOT_BUF; 755 break; 756 case XFS_BLF_GDQUOT_BUF: 757 type = XFS_BLFT_GDQUOT_BUF; 758 break; 759 default: 760 type = XFS_BLFT_UNKNOWN_BUF; 761 break; 762 } 763 764 xfs_trans_buf_set_type(tp, bp, type); 765 } 766