1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Copyright (C) 2016 Oracle. All Rights Reserved. 4 * Author: Darrick J. Wong <darrick.wong@oracle.com> 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_format.h" 9 #include "xfs_log_format.h" 10 #include "xfs_trans_resv.h" 11 #include "xfs_bit.h" 12 #include "xfs_shared.h" 13 #include "xfs_mount.h" 14 #include "xfs_defer.h" 15 #include "xfs_trans.h" 16 #include "xfs_trans_priv.h" 17 #include "xfs_refcount_item.h" 18 #include "xfs_log.h" 19 #include "xfs_refcount.h" 20 #include "xfs_error.h" 21 #include "xfs_log_priv.h" 22 #include "xfs_log_recover.h" 23 24 kmem_zone_t *xfs_cui_zone; 25 kmem_zone_t *xfs_cud_zone; 26 27 static const struct xfs_item_ops xfs_cui_item_ops; 28 29 static inline struct xfs_cui_log_item *CUI_ITEM(struct xfs_log_item *lip) 30 { 31 return container_of(lip, struct xfs_cui_log_item, cui_item); 32 } 33 34 STATIC void 35 xfs_cui_item_free( 36 struct xfs_cui_log_item *cuip) 37 { 38 if (cuip->cui_format.cui_nextents > XFS_CUI_MAX_FAST_EXTENTS) 39 kmem_free(cuip); 40 else 41 kmem_cache_free(xfs_cui_zone, cuip); 42 } 43 44 /* 45 * Freeing the CUI requires that we remove it from the AIL if it has already 46 * been placed there. However, the CUI may not yet have been placed in the AIL 47 * when called by xfs_cui_release() from CUD processing due to the ordering of 48 * committed vs unpin operations in bulk insert operations. Hence the reference 49 * count to ensure only the last caller frees the CUI. 50 */ 51 STATIC void 52 xfs_cui_release( 53 struct xfs_cui_log_item *cuip) 54 { 55 ASSERT(atomic_read(&cuip->cui_refcount) > 0); 56 if (atomic_dec_and_test(&cuip->cui_refcount)) { 57 xfs_trans_ail_delete(&cuip->cui_item, SHUTDOWN_LOG_IO_ERROR); 58 xfs_cui_item_free(cuip); 59 } 60 } 61 62 63 STATIC void 64 xfs_cui_item_size( 65 struct xfs_log_item *lip, 66 int *nvecs, 67 int *nbytes) 68 { 69 struct xfs_cui_log_item *cuip = CUI_ITEM(lip); 70 71 *nvecs += 1; 72 *nbytes += xfs_cui_log_format_sizeof(cuip->cui_format.cui_nextents); 73 } 74 75 /* 76 * This is called to fill in the vector of log iovecs for the 77 * given cui log item. We use only 1 iovec, and we point that 78 * at the cui_log_format structure embedded in the cui item. 79 * It is at this point that we assert that all of the extent 80 * slots in the cui item have been filled. 81 */ 82 STATIC void 83 xfs_cui_item_format( 84 struct xfs_log_item *lip, 85 struct xfs_log_vec *lv) 86 { 87 struct xfs_cui_log_item *cuip = CUI_ITEM(lip); 88 struct xfs_log_iovec *vecp = NULL; 89 90 ASSERT(atomic_read(&cuip->cui_next_extent) == 91 cuip->cui_format.cui_nextents); 92 93 cuip->cui_format.cui_type = XFS_LI_CUI; 94 cuip->cui_format.cui_size = 1; 95 96 xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_CUI_FORMAT, &cuip->cui_format, 97 xfs_cui_log_format_sizeof(cuip->cui_format.cui_nextents)); 98 } 99 100 /* 101 * The unpin operation is the last place an CUI is manipulated in the log. It is 102 * either inserted in the AIL or aborted in the event of a log I/O error. In 103 * either case, the CUI transaction has been successfully committed to make it 104 * this far. Therefore, we expect whoever committed the CUI to either construct 105 * and commit the CUD or drop the CUD's reference in the event of error. Simply 106 * drop the log's CUI reference now that the log is done with it. 107 */ 108 STATIC void 109 xfs_cui_item_unpin( 110 struct xfs_log_item *lip, 111 int remove) 112 { 113 struct xfs_cui_log_item *cuip = CUI_ITEM(lip); 114 115 xfs_cui_release(cuip); 116 } 117 118 /* 119 * The CUI has been either committed or aborted if the transaction has been 120 * cancelled. If the transaction was cancelled, an CUD isn't going to be 121 * constructed and thus we free the CUI here directly. 122 */ 123 STATIC void 124 xfs_cui_item_release( 125 struct xfs_log_item *lip) 126 { 127 xfs_cui_release(CUI_ITEM(lip)); 128 } 129 130 /* 131 * Allocate and initialize an cui item with the given number of extents. 132 */ 133 STATIC struct xfs_cui_log_item * 134 xfs_cui_init( 135 struct xfs_mount *mp, 136 uint nextents) 137 138 { 139 struct xfs_cui_log_item *cuip; 140 141 ASSERT(nextents > 0); 142 if (nextents > XFS_CUI_MAX_FAST_EXTENTS) 143 cuip = kmem_zalloc(xfs_cui_log_item_sizeof(nextents), 144 0); 145 else 146 cuip = kmem_zone_zalloc(xfs_cui_zone, 0); 147 148 xfs_log_item_init(mp, &cuip->cui_item, XFS_LI_CUI, &xfs_cui_item_ops); 149 cuip->cui_format.cui_nextents = nextents; 150 cuip->cui_format.cui_id = (uintptr_t)(void *)cuip; 151 atomic_set(&cuip->cui_next_extent, 0); 152 atomic_set(&cuip->cui_refcount, 2); 153 154 return cuip; 155 } 156 157 static inline struct xfs_cud_log_item *CUD_ITEM(struct xfs_log_item *lip) 158 { 159 return container_of(lip, struct xfs_cud_log_item, cud_item); 160 } 161 162 STATIC void 163 xfs_cud_item_size( 164 struct xfs_log_item *lip, 165 int *nvecs, 166 int *nbytes) 167 { 168 *nvecs += 1; 169 *nbytes += sizeof(struct xfs_cud_log_format); 170 } 171 172 /* 173 * This is called to fill in the vector of log iovecs for the 174 * given cud log item. We use only 1 iovec, and we point that 175 * at the cud_log_format structure embedded in the cud item. 176 * It is at this point that we assert that all of the extent 177 * slots in the cud item have been filled. 178 */ 179 STATIC void 180 xfs_cud_item_format( 181 struct xfs_log_item *lip, 182 struct xfs_log_vec *lv) 183 { 184 struct xfs_cud_log_item *cudp = CUD_ITEM(lip); 185 struct xfs_log_iovec *vecp = NULL; 186 187 cudp->cud_format.cud_type = XFS_LI_CUD; 188 cudp->cud_format.cud_size = 1; 189 190 xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_CUD_FORMAT, &cudp->cud_format, 191 sizeof(struct xfs_cud_log_format)); 192 } 193 194 /* 195 * The CUD is either committed or aborted if the transaction is cancelled. If 196 * the transaction is cancelled, drop our reference to the CUI and free the 197 * CUD. 198 */ 199 STATIC void 200 xfs_cud_item_release( 201 struct xfs_log_item *lip) 202 { 203 struct xfs_cud_log_item *cudp = CUD_ITEM(lip); 204 205 xfs_cui_release(cudp->cud_cuip); 206 kmem_cache_free(xfs_cud_zone, cudp); 207 } 208 209 static const struct xfs_item_ops xfs_cud_item_ops = { 210 .flags = XFS_ITEM_RELEASE_WHEN_COMMITTED, 211 .iop_size = xfs_cud_item_size, 212 .iop_format = xfs_cud_item_format, 213 .iop_release = xfs_cud_item_release, 214 }; 215 216 static struct xfs_cud_log_item * 217 xfs_trans_get_cud( 218 struct xfs_trans *tp, 219 struct xfs_cui_log_item *cuip) 220 { 221 struct xfs_cud_log_item *cudp; 222 223 cudp = kmem_zone_zalloc(xfs_cud_zone, 0); 224 xfs_log_item_init(tp->t_mountp, &cudp->cud_item, XFS_LI_CUD, 225 &xfs_cud_item_ops); 226 cudp->cud_cuip = cuip; 227 cudp->cud_format.cud_cui_id = cuip->cui_format.cui_id; 228 229 xfs_trans_add_item(tp, &cudp->cud_item); 230 return cudp; 231 } 232 233 /* 234 * Finish an refcount update and log it to the CUD. Note that the 235 * transaction is marked dirty regardless of whether the refcount 236 * update succeeds or fails to support the CUI/CUD lifecycle rules. 237 */ 238 static int 239 xfs_trans_log_finish_refcount_update( 240 struct xfs_trans *tp, 241 struct xfs_cud_log_item *cudp, 242 enum xfs_refcount_intent_type type, 243 xfs_fsblock_t startblock, 244 xfs_extlen_t blockcount, 245 xfs_fsblock_t *new_fsb, 246 xfs_extlen_t *new_len, 247 struct xfs_btree_cur **pcur) 248 { 249 int error; 250 251 error = xfs_refcount_finish_one(tp, type, startblock, 252 blockcount, new_fsb, new_len, pcur); 253 254 /* 255 * Mark the transaction dirty, even on error. This ensures the 256 * transaction is aborted, which: 257 * 258 * 1.) releases the CUI and frees the CUD 259 * 2.) shuts down the filesystem 260 */ 261 tp->t_flags |= XFS_TRANS_DIRTY; 262 set_bit(XFS_LI_DIRTY, &cudp->cud_item.li_flags); 263 264 return error; 265 } 266 267 /* Sort refcount intents by AG. */ 268 static int 269 xfs_refcount_update_diff_items( 270 void *priv, 271 struct list_head *a, 272 struct list_head *b) 273 { 274 struct xfs_mount *mp = priv; 275 struct xfs_refcount_intent *ra; 276 struct xfs_refcount_intent *rb; 277 278 ra = container_of(a, struct xfs_refcount_intent, ri_list); 279 rb = container_of(b, struct xfs_refcount_intent, ri_list); 280 return XFS_FSB_TO_AGNO(mp, ra->ri_startblock) - 281 XFS_FSB_TO_AGNO(mp, rb->ri_startblock); 282 } 283 284 /* Set the phys extent flags for this reverse mapping. */ 285 static void 286 xfs_trans_set_refcount_flags( 287 struct xfs_phys_extent *refc, 288 enum xfs_refcount_intent_type type) 289 { 290 refc->pe_flags = 0; 291 switch (type) { 292 case XFS_REFCOUNT_INCREASE: 293 case XFS_REFCOUNT_DECREASE: 294 case XFS_REFCOUNT_ALLOC_COW: 295 case XFS_REFCOUNT_FREE_COW: 296 refc->pe_flags |= type; 297 break; 298 default: 299 ASSERT(0); 300 } 301 } 302 303 /* Log refcount updates in the intent item. */ 304 STATIC void 305 xfs_refcount_update_log_item( 306 struct xfs_trans *tp, 307 struct xfs_cui_log_item *cuip, 308 struct xfs_refcount_intent *refc) 309 { 310 uint next_extent; 311 struct xfs_phys_extent *ext; 312 313 tp->t_flags |= XFS_TRANS_DIRTY; 314 set_bit(XFS_LI_DIRTY, &cuip->cui_item.li_flags); 315 316 /* 317 * atomic_inc_return gives us the value after the increment; 318 * we want to use it as an array index so we need to subtract 1 from 319 * it. 320 */ 321 next_extent = atomic_inc_return(&cuip->cui_next_extent) - 1; 322 ASSERT(next_extent < cuip->cui_format.cui_nextents); 323 ext = &cuip->cui_format.cui_extents[next_extent]; 324 ext->pe_startblock = refc->ri_startblock; 325 ext->pe_len = refc->ri_blockcount; 326 xfs_trans_set_refcount_flags(ext, refc->ri_type); 327 } 328 329 static struct xfs_log_item * 330 xfs_refcount_update_create_intent( 331 struct xfs_trans *tp, 332 struct list_head *items, 333 unsigned int count, 334 bool sort) 335 { 336 struct xfs_mount *mp = tp->t_mountp; 337 struct xfs_cui_log_item *cuip = xfs_cui_init(mp, count); 338 struct xfs_refcount_intent *refc; 339 340 ASSERT(count > 0); 341 342 xfs_trans_add_item(tp, &cuip->cui_item); 343 if (sort) 344 list_sort(mp, items, xfs_refcount_update_diff_items); 345 list_for_each_entry(refc, items, ri_list) 346 xfs_refcount_update_log_item(tp, cuip, refc); 347 return &cuip->cui_item; 348 } 349 350 /* Get an CUD so we can process all the deferred refcount updates. */ 351 static struct xfs_log_item * 352 xfs_refcount_update_create_done( 353 struct xfs_trans *tp, 354 struct xfs_log_item *intent, 355 unsigned int count) 356 { 357 return &xfs_trans_get_cud(tp, CUI_ITEM(intent))->cud_item; 358 } 359 360 /* Process a deferred refcount update. */ 361 STATIC int 362 xfs_refcount_update_finish_item( 363 struct xfs_trans *tp, 364 struct xfs_log_item *done, 365 struct list_head *item, 366 struct xfs_btree_cur **state) 367 { 368 struct xfs_refcount_intent *refc; 369 xfs_fsblock_t new_fsb; 370 xfs_extlen_t new_aglen; 371 int error; 372 373 refc = container_of(item, struct xfs_refcount_intent, ri_list); 374 error = xfs_trans_log_finish_refcount_update(tp, CUD_ITEM(done), 375 refc->ri_type, refc->ri_startblock, refc->ri_blockcount, 376 &new_fsb, &new_aglen, state); 377 378 /* Did we run out of reservation? Requeue what we didn't finish. */ 379 if (!error && new_aglen > 0) { 380 ASSERT(refc->ri_type == XFS_REFCOUNT_INCREASE || 381 refc->ri_type == XFS_REFCOUNT_DECREASE); 382 refc->ri_startblock = new_fsb; 383 refc->ri_blockcount = new_aglen; 384 return -EAGAIN; 385 } 386 kmem_free(refc); 387 return error; 388 } 389 390 /* Abort all pending CUIs. */ 391 STATIC void 392 xfs_refcount_update_abort_intent( 393 struct xfs_log_item *intent) 394 { 395 xfs_cui_release(CUI_ITEM(intent)); 396 } 397 398 /* Cancel a deferred refcount update. */ 399 STATIC void 400 xfs_refcount_update_cancel_item( 401 struct list_head *item) 402 { 403 struct xfs_refcount_intent *refc; 404 405 refc = container_of(item, struct xfs_refcount_intent, ri_list); 406 kmem_free(refc); 407 } 408 409 const struct xfs_defer_op_type xfs_refcount_update_defer_type = { 410 .max_items = XFS_CUI_MAX_FAST_EXTENTS, 411 .create_intent = xfs_refcount_update_create_intent, 412 .abort_intent = xfs_refcount_update_abort_intent, 413 .create_done = xfs_refcount_update_create_done, 414 .finish_item = xfs_refcount_update_finish_item, 415 .finish_cleanup = xfs_refcount_finish_one_cleanup, 416 .cancel_item = xfs_refcount_update_cancel_item, 417 }; 418 419 /* 420 * Process a refcount update intent item that was recovered from the log. 421 * We need to update the refcountbt. 422 */ 423 STATIC int 424 xfs_cui_item_recover( 425 struct xfs_log_item *lip, 426 struct xfs_trans *parent_tp) 427 { 428 struct xfs_bmbt_irec irec; 429 struct xfs_cui_log_item *cuip = CUI_ITEM(lip); 430 struct xfs_phys_extent *refc; 431 struct xfs_cud_log_item *cudp; 432 struct xfs_trans *tp; 433 struct xfs_btree_cur *rcur = NULL; 434 struct xfs_mount *mp = parent_tp->t_mountp; 435 xfs_fsblock_t startblock_fsb; 436 xfs_fsblock_t new_fsb; 437 xfs_extlen_t new_len; 438 unsigned int refc_type; 439 bool op_ok; 440 bool requeue_only = false; 441 enum xfs_refcount_intent_type type; 442 int i; 443 int error = 0; 444 445 /* 446 * First check the validity of the extents described by the 447 * CUI. If any are bad, then assume that all are bad and 448 * just toss the CUI. 449 */ 450 for (i = 0; i < cuip->cui_format.cui_nextents; i++) { 451 refc = &cuip->cui_format.cui_extents[i]; 452 startblock_fsb = XFS_BB_TO_FSB(mp, 453 XFS_FSB_TO_DADDR(mp, refc->pe_startblock)); 454 switch (refc->pe_flags & XFS_REFCOUNT_EXTENT_TYPE_MASK) { 455 case XFS_REFCOUNT_INCREASE: 456 case XFS_REFCOUNT_DECREASE: 457 case XFS_REFCOUNT_ALLOC_COW: 458 case XFS_REFCOUNT_FREE_COW: 459 op_ok = true; 460 break; 461 default: 462 op_ok = false; 463 break; 464 } 465 if (!op_ok || startblock_fsb == 0 || 466 refc->pe_len == 0 || 467 startblock_fsb >= mp->m_sb.sb_dblocks || 468 refc->pe_len >= mp->m_sb.sb_agblocks || 469 (refc->pe_flags & ~XFS_REFCOUNT_EXTENT_FLAGS)) { 470 /* 471 * This will pull the CUI from the AIL and 472 * free the memory associated with it. 473 */ 474 xfs_cui_release(cuip); 475 return -EFSCORRUPTED; 476 } 477 } 478 479 /* 480 * Under normal operation, refcount updates are deferred, so we 481 * wouldn't be adding them directly to a transaction. All 482 * refcount updates manage reservation usage internally and 483 * dynamically by deferring work that won't fit in the 484 * transaction. Normally, any work that needs to be deferred 485 * gets attached to the same defer_ops that scheduled the 486 * refcount update. However, we're in log recovery here, so we 487 * we use the passed in defer_ops and to finish up any work that 488 * doesn't fit. We need to reserve enough blocks to handle a 489 * full btree split on either end of the refcount range. 490 */ 491 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 492 mp->m_refc_maxlevels * 2, 0, XFS_TRANS_RESERVE, &tp); 493 if (error) 494 return error; 495 /* 496 * Recovery stashes all deferred ops during intent processing and 497 * finishes them on completion. Transfer current dfops state to this 498 * transaction and transfer the result back before we return. 499 */ 500 xfs_defer_move(tp, parent_tp); 501 cudp = xfs_trans_get_cud(tp, cuip); 502 503 for (i = 0; i < cuip->cui_format.cui_nextents; i++) { 504 refc = &cuip->cui_format.cui_extents[i]; 505 refc_type = refc->pe_flags & XFS_REFCOUNT_EXTENT_TYPE_MASK; 506 switch (refc_type) { 507 case XFS_REFCOUNT_INCREASE: 508 case XFS_REFCOUNT_DECREASE: 509 case XFS_REFCOUNT_ALLOC_COW: 510 case XFS_REFCOUNT_FREE_COW: 511 type = refc_type; 512 break; 513 default: 514 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp); 515 error = -EFSCORRUPTED; 516 goto abort_error; 517 } 518 if (requeue_only) { 519 new_fsb = refc->pe_startblock; 520 new_len = refc->pe_len; 521 } else 522 error = xfs_trans_log_finish_refcount_update(tp, cudp, 523 type, refc->pe_startblock, refc->pe_len, 524 &new_fsb, &new_len, &rcur); 525 if (error) 526 goto abort_error; 527 528 /* Requeue what we didn't finish. */ 529 if (new_len > 0) { 530 irec.br_startblock = new_fsb; 531 irec.br_blockcount = new_len; 532 switch (type) { 533 case XFS_REFCOUNT_INCREASE: 534 xfs_refcount_increase_extent(tp, &irec); 535 break; 536 case XFS_REFCOUNT_DECREASE: 537 xfs_refcount_decrease_extent(tp, &irec); 538 break; 539 case XFS_REFCOUNT_ALLOC_COW: 540 xfs_refcount_alloc_cow_extent(tp, 541 irec.br_startblock, 542 irec.br_blockcount); 543 break; 544 case XFS_REFCOUNT_FREE_COW: 545 xfs_refcount_free_cow_extent(tp, 546 irec.br_startblock, 547 irec.br_blockcount); 548 break; 549 default: 550 ASSERT(0); 551 } 552 requeue_only = true; 553 } 554 } 555 556 xfs_refcount_finish_one_cleanup(tp, rcur, error); 557 xfs_defer_move(parent_tp, tp); 558 error = xfs_trans_commit(tp); 559 return error; 560 561 abort_error: 562 xfs_refcount_finish_one_cleanup(tp, rcur, error); 563 xfs_defer_move(parent_tp, tp); 564 xfs_trans_cancel(tp); 565 return error; 566 } 567 568 STATIC bool 569 xfs_cui_item_match( 570 struct xfs_log_item *lip, 571 uint64_t intent_id) 572 { 573 return CUI_ITEM(lip)->cui_format.cui_id == intent_id; 574 } 575 576 static const struct xfs_item_ops xfs_cui_item_ops = { 577 .iop_size = xfs_cui_item_size, 578 .iop_format = xfs_cui_item_format, 579 .iop_unpin = xfs_cui_item_unpin, 580 .iop_release = xfs_cui_item_release, 581 .iop_recover = xfs_cui_item_recover, 582 .iop_match = xfs_cui_item_match, 583 }; 584 585 /* 586 * Copy an CUI format buffer from the given buf, and into the destination 587 * CUI format structure. The CUI/CUD items were designed not to need any 588 * special alignment handling. 589 */ 590 static int 591 xfs_cui_copy_format( 592 struct xfs_log_iovec *buf, 593 struct xfs_cui_log_format *dst_cui_fmt) 594 { 595 struct xfs_cui_log_format *src_cui_fmt; 596 uint len; 597 598 src_cui_fmt = buf->i_addr; 599 len = xfs_cui_log_format_sizeof(src_cui_fmt->cui_nextents); 600 601 if (buf->i_len == len) { 602 memcpy(dst_cui_fmt, src_cui_fmt, len); 603 return 0; 604 } 605 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, NULL); 606 return -EFSCORRUPTED; 607 } 608 609 /* 610 * This routine is called to create an in-core extent refcount update 611 * item from the cui format structure which was logged on disk. 612 * It allocates an in-core cui, copies the extents from the format 613 * structure into it, and adds the cui to the AIL with the given 614 * LSN. 615 */ 616 STATIC int 617 xlog_recover_cui_commit_pass2( 618 struct xlog *log, 619 struct list_head *buffer_list, 620 struct xlog_recover_item *item, 621 xfs_lsn_t lsn) 622 { 623 int error; 624 struct xfs_mount *mp = log->l_mp; 625 struct xfs_cui_log_item *cuip; 626 struct xfs_cui_log_format *cui_formatp; 627 628 cui_formatp = item->ri_buf[0].i_addr; 629 630 cuip = xfs_cui_init(mp, cui_formatp->cui_nextents); 631 error = xfs_cui_copy_format(&item->ri_buf[0], &cuip->cui_format); 632 if (error) { 633 xfs_cui_item_free(cuip); 634 return error; 635 } 636 atomic_set(&cuip->cui_next_extent, cui_formatp->cui_nextents); 637 /* 638 * Insert the intent into the AIL directly and drop one reference so 639 * that finishing or canceling the work will drop the other. 640 */ 641 xfs_trans_ail_insert(log->l_ailp, &cuip->cui_item, lsn); 642 xfs_cui_release(cuip); 643 return 0; 644 } 645 646 const struct xlog_recover_item_ops xlog_cui_item_ops = { 647 .item_type = XFS_LI_CUI, 648 .commit_pass2 = xlog_recover_cui_commit_pass2, 649 }; 650 651 /* 652 * This routine is called when an CUD format structure is found in a committed 653 * transaction in the log. Its purpose is to cancel the corresponding CUI if it 654 * was still in the log. To do this it searches the AIL for the CUI with an id 655 * equal to that in the CUD format structure. If we find it we drop the CUD 656 * reference, which removes the CUI from the AIL and frees it. 657 */ 658 STATIC int 659 xlog_recover_cud_commit_pass2( 660 struct xlog *log, 661 struct list_head *buffer_list, 662 struct xlog_recover_item *item, 663 xfs_lsn_t lsn) 664 { 665 struct xfs_cud_log_format *cud_formatp; 666 667 cud_formatp = item->ri_buf[0].i_addr; 668 if (item->ri_buf[0].i_len != sizeof(struct xfs_cud_log_format)) { 669 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, log->l_mp); 670 return -EFSCORRUPTED; 671 } 672 673 xlog_recover_release_intent(log, XFS_LI_CUI, cud_formatp->cud_cui_id); 674 return 0; 675 } 676 677 const struct xlog_recover_item_ops xlog_cud_item_ops = { 678 .item_type = XFS_LI_CUD, 679 .commit_pass2 = xlog_recover_cud_commit_pass2, 680 }; 681