1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_log_format.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_mount.h" 13 #include "xfs_btree.h" 14 #include "xfs_btree_staging.h" 15 #include "xfs_alloc_btree.h" 16 #include "xfs_alloc.h" 17 #include "xfs_extent_busy.h" 18 #include "xfs_error.h" 19 #include "xfs_trace.h" 20 #include "xfs_trans.h" 21 #include "xfs_ag.h" 22 23 static struct kmem_cache *xfs_allocbt_cur_cache; 24 25 STATIC struct xfs_btree_cur * 26 xfs_allocbt_dup_cursor( 27 struct xfs_btree_cur *cur) 28 { 29 return xfs_allocbt_init_cursor(cur->bc_mp, cur->bc_tp, 30 cur->bc_ag.agbp, cur->bc_ag.pag, cur->bc_btnum); 31 } 32 33 STATIC void 34 xfs_allocbt_set_root( 35 struct xfs_btree_cur *cur, 36 const union xfs_btree_ptr *ptr, 37 int inc) 38 { 39 struct xfs_buf *agbp = cur->bc_ag.agbp; 40 struct xfs_agf *agf = agbp->b_addr; 41 int btnum = cur->bc_btnum; 42 43 ASSERT(ptr->s != 0); 44 45 agf->agf_roots[btnum] = ptr->s; 46 be32_add_cpu(&agf->agf_levels[btnum], inc); 47 cur->bc_ag.pag->pagf_levels[btnum] += inc; 48 49 xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS); 50 } 51 52 STATIC int 53 xfs_allocbt_alloc_block( 54 struct xfs_btree_cur *cur, 55 const union xfs_btree_ptr *start, 56 union xfs_btree_ptr *new, 57 int *stat) 58 { 59 int error; 60 xfs_agblock_t bno; 61 62 /* Allocate the new block from the freelist. If we can't, give up. */ 63 error = xfs_alloc_get_freelist(cur->bc_ag.pag, cur->bc_tp, 64 cur->bc_ag.agbp, &bno, 1); 65 if (error) 66 return error; 67 68 if (bno == NULLAGBLOCK) { 69 *stat = 0; 70 return 0; 71 } 72 73 atomic64_inc(&cur->bc_mp->m_allocbt_blks); 74 xfs_extent_busy_reuse(cur->bc_mp, cur->bc_ag.pag, bno, 1, false); 75 76 new->s = cpu_to_be32(bno); 77 78 *stat = 1; 79 return 0; 80 } 81 82 STATIC int 83 xfs_allocbt_free_block( 84 struct xfs_btree_cur *cur, 85 struct xfs_buf *bp) 86 { 87 struct xfs_buf *agbp = cur->bc_ag.agbp; 88 xfs_agblock_t bno; 89 int error; 90 91 bno = xfs_daddr_to_agbno(cur->bc_mp, xfs_buf_daddr(bp)); 92 error = xfs_alloc_put_freelist(cur->bc_ag.pag, cur->bc_tp, agbp, NULL, 93 bno, 1); 94 if (error) 95 return error; 96 97 atomic64_dec(&cur->bc_mp->m_allocbt_blks); 98 xfs_extent_busy_insert(cur->bc_tp, agbp->b_pag, bno, 1, 99 XFS_EXTENT_BUSY_SKIP_DISCARD); 100 return 0; 101 } 102 103 /* 104 * Update the longest extent in the AGF 105 */ 106 STATIC void 107 xfs_allocbt_update_lastrec( 108 struct xfs_btree_cur *cur, 109 const struct xfs_btree_block *block, 110 const union xfs_btree_rec *rec, 111 int ptr, 112 int reason) 113 { 114 struct xfs_agf *agf = cur->bc_ag.agbp->b_addr; 115 struct xfs_perag *pag; 116 __be32 len; 117 int numrecs; 118 119 ASSERT(cur->bc_btnum == XFS_BTNUM_CNT); 120 121 switch (reason) { 122 case LASTREC_UPDATE: 123 /* 124 * If this is the last leaf block and it's the last record, 125 * then update the size of the longest extent in the AG. 126 */ 127 if (ptr != xfs_btree_get_numrecs(block)) 128 return; 129 len = rec->alloc.ar_blockcount; 130 break; 131 case LASTREC_INSREC: 132 if (be32_to_cpu(rec->alloc.ar_blockcount) <= 133 be32_to_cpu(agf->agf_longest)) 134 return; 135 len = rec->alloc.ar_blockcount; 136 break; 137 case LASTREC_DELREC: 138 numrecs = xfs_btree_get_numrecs(block); 139 if (ptr <= numrecs) 140 return; 141 ASSERT(ptr == numrecs + 1); 142 143 if (numrecs) { 144 xfs_alloc_rec_t *rrp; 145 146 rrp = XFS_ALLOC_REC_ADDR(cur->bc_mp, block, numrecs); 147 len = rrp->ar_blockcount; 148 } else { 149 len = 0; 150 } 151 152 break; 153 default: 154 ASSERT(0); 155 return; 156 } 157 158 agf->agf_longest = len; 159 pag = cur->bc_ag.agbp->b_pag; 160 pag->pagf_longest = be32_to_cpu(len); 161 xfs_alloc_log_agf(cur->bc_tp, cur->bc_ag.agbp, XFS_AGF_LONGEST); 162 } 163 164 STATIC int 165 xfs_allocbt_get_minrecs( 166 struct xfs_btree_cur *cur, 167 int level) 168 { 169 return cur->bc_mp->m_alloc_mnr[level != 0]; 170 } 171 172 STATIC int 173 xfs_allocbt_get_maxrecs( 174 struct xfs_btree_cur *cur, 175 int level) 176 { 177 return cur->bc_mp->m_alloc_mxr[level != 0]; 178 } 179 180 STATIC void 181 xfs_allocbt_init_key_from_rec( 182 union xfs_btree_key *key, 183 const union xfs_btree_rec *rec) 184 { 185 key->alloc.ar_startblock = rec->alloc.ar_startblock; 186 key->alloc.ar_blockcount = rec->alloc.ar_blockcount; 187 } 188 189 STATIC void 190 xfs_bnobt_init_high_key_from_rec( 191 union xfs_btree_key *key, 192 const union xfs_btree_rec *rec) 193 { 194 __u32 x; 195 196 x = be32_to_cpu(rec->alloc.ar_startblock); 197 x += be32_to_cpu(rec->alloc.ar_blockcount) - 1; 198 key->alloc.ar_startblock = cpu_to_be32(x); 199 key->alloc.ar_blockcount = 0; 200 } 201 202 STATIC void 203 xfs_cntbt_init_high_key_from_rec( 204 union xfs_btree_key *key, 205 const union xfs_btree_rec *rec) 206 { 207 key->alloc.ar_blockcount = rec->alloc.ar_blockcount; 208 key->alloc.ar_startblock = 0; 209 } 210 211 STATIC void 212 xfs_allocbt_init_rec_from_cur( 213 struct xfs_btree_cur *cur, 214 union xfs_btree_rec *rec) 215 { 216 rec->alloc.ar_startblock = cpu_to_be32(cur->bc_rec.a.ar_startblock); 217 rec->alloc.ar_blockcount = cpu_to_be32(cur->bc_rec.a.ar_blockcount); 218 } 219 220 STATIC void 221 xfs_allocbt_init_ptr_from_cur( 222 struct xfs_btree_cur *cur, 223 union xfs_btree_ptr *ptr) 224 { 225 struct xfs_agf *agf = cur->bc_ag.agbp->b_addr; 226 227 ASSERT(cur->bc_ag.pag->pag_agno == be32_to_cpu(agf->agf_seqno)); 228 229 ptr->s = agf->agf_roots[cur->bc_btnum]; 230 } 231 232 STATIC int64_t 233 xfs_bnobt_key_diff( 234 struct xfs_btree_cur *cur, 235 const union xfs_btree_key *key) 236 { 237 struct xfs_alloc_rec_incore *rec = &cur->bc_rec.a; 238 const struct xfs_alloc_rec *kp = &key->alloc; 239 240 return (int64_t)be32_to_cpu(kp->ar_startblock) - rec->ar_startblock; 241 } 242 243 STATIC int64_t 244 xfs_cntbt_key_diff( 245 struct xfs_btree_cur *cur, 246 const union xfs_btree_key *key) 247 { 248 struct xfs_alloc_rec_incore *rec = &cur->bc_rec.a; 249 const struct xfs_alloc_rec *kp = &key->alloc; 250 int64_t diff; 251 252 diff = (int64_t)be32_to_cpu(kp->ar_blockcount) - rec->ar_blockcount; 253 if (diff) 254 return diff; 255 256 return (int64_t)be32_to_cpu(kp->ar_startblock) - rec->ar_startblock; 257 } 258 259 STATIC int64_t 260 xfs_bnobt_diff_two_keys( 261 struct xfs_btree_cur *cur, 262 const union xfs_btree_key *k1, 263 const union xfs_btree_key *k2) 264 { 265 return (int64_t)be32_to_cpu(k1->alloc.ar_startblock) - 266 be32_to_cpu(k2->alloc.ar_startblock); 267 } 268 269 STATIC int64_t 270 xfs_cntbt_diff_two_keys( 271 struct xfs_btree_cur *cur, 272 const union xfs_btree_key *k1, 273 const union xfs_btree_key *k2) 274 { 275 int64_t diff; 276 277 diff = be32_to_cpu(k1->alloc.ar_blockcount) - 278 be32_to_cpu(k2->alloc.ar_blockcount); 279 if (diff) 280 return diff; 281 282 return be32_to_cpu(k1->alloc.ar_startblock) - 283 be32_to_cpu(k2->alloc.ar_startblock); 284 } 285 286 static xfs_failaddr_t 287 xfs_allocbt_verify( 288 struct xfs_buf *bp) 289 { 290 struct xfs_mount *mp = bp->b_mount; 291 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp); 292 struct xfs_perag *pag = bp->b_pag; 293 xfs_failaddr_t fa; 294 unsigned int level; 295 xfs_btnum_t btnum = XFS_BTNUM_BNOi; 296 297 if (!xfs_verify_magic(bp, block->bb_magic)) 298 return __this_address; 299 300 if (xfs_has_crc(mp)) { 301 fa = xfs_btree_sblock_v5hdr_verify(bp); 302 if (fa) 303 return fa; 304 } 305 306 /* 307 * The perag may not be attached during grow operations or fully 308 * initialized from the AGF during log recovery. Therefore we can only 309 * check against maximum tree depth from those contexts. 310 * 311 * Otherwise check against the per-tree limit. Peek at one of the 312 * verifier magic values to determine the type of tree we're verifying 313 * against. 314 */ 315 level = be16_to_cpu(block->bb_level); 316 if (bp->b_ops->magic[0] == cpu_to_be32(XFS_ABTC_MAGIC)) 317 btnum = XFS_BTNUM_CNTi; 318 if (pag && pag->pagf_init) { 319 if (level >= pag->pagf_levels[btnum]) 320 return __this_address; 321 } else if (level >= mp->m_alloc_maxlevels) 322 return __this_address; 323 324 return xfs_btree_sblock_verify(bp, mp->m_alloc_mxr[level != 0]); 325 } 326 327 static void 328 xfs_allocbt_read_verify( 329 struct xfs_buf *bp) 330 { 331 xfs_failaddr_t fa; 332 333 if (!xfs_btree_sblock_verify_crc(bp)) 334 xfs_verifier_error(bp, -EFSBADCRC, __this_address); 335 else { 336 fa = xfs_allocbt_verify(bp); 337 if (fa) 338 xfs_verifier_error(bp, -EFSCORRUPTED, fa); 339 } 340 341 if (bp->b_error) 342 trace_xfs_btree_corrupt(bp, _RET_IP_); 343 } 344 345 static void 346 xfs_allocbt_write_verify( 347 struct xfs_buf *bp) 348 { 349 xfs_failaddr_t fa; 350 351 fa = xfs_allocbt_verify(bp); 352 if (fa) { 353 trace_xfs_btree_corrupt(bp, _RET_IP_); 354 xfs_verifier_error(bp, -EFSCORRUPTED, fa); 355 return; 356 } 357 xfs_btree_sblock_calc_crc(bp); 358 359 } 360 361 const struct xfs_buf_ops xfs_bnobt_buf_ops = { 362 .name = "xfs_bnobt", 363 .magic = { cpu_to_be32(XFS_ABTB_MAGIC), 364 cpu_to_be32(XFS_ABTB_CRC_MAGIC) }, 365 .verify_read = xfs_allocbt_read_verify, 366 .verify_write = xfs_allocbt_write_verify, 367 .verify_struct = xfs_allocbt_verify, 368 }; 369 370 const struct xfs_buf_ops xfs_cntbt_buf_ops = { 371 .name = "xfs_cntbt", 372 .magic = { cpu_to_be32(XFS_ABTC_MAGIC), 373 cpu_to_be32(XFS_ABTC_CRC_MAGIC) }, 374 .verify_read = xfs_allocbt_read_verify, 375 .verify_write = xfs_allocbt_write_verify, 376 .verify_struct = xfs_allocbt_verify, 377 }; 378 379 STATIC int 380 xfs_bnobt_keys_inorder( 381 struct xfs_btree_cur *cur, 382 const union xfs_btree_key *k1, 383 const union xfs_btree_key *k2) 384 { 385 return be32_to_cpu(k1->alloc.ar_startblock) < 386 be32_to_cpu(k2->alloc.ar_startblock); 387 } 388 389 STATIC int 390 xfs_bnobt_recs_inorder( 391 struct xfs_btree_cur *cur, 392 const union xfs_btree_rec *r1, 393 const union xfs_btree_rec *r2) 394 { 395 return be32_to_cpu(r1->alloc.ar_startblock) + 396 be32_to_cpu(r1->alloc.ar_blockcount) <= 397 be32_to_cpu(r2->alloc.ar_startblock); 398 } 399 400 STATIC int 401 xfs_cntbt_keys_inorder( 402 struct xfs_btree_cur *cur, 403 const union xfs_btree_key *k1, 404 const union xfs_btree_key *k2) 405 { 406 return be32_to_cpu(k1->alloc.ar_blockcount) < 407 be32_to_cpu(k2->alloc.ar_blockcount) || 408 (k1->alloc.ar_blockcount == k2->alloc.ar_blockcount && 409 be32_to_cpu(k1->alloc.ar_startblock) < 410 be32_to_cpu(k2->alloc.ar_startblock)); 411 } 412 413 STATIC int 414 xfs_cntbt_recs_inorder( 415 struct xfs_btree_cur *cur, 416 const union xfs_btree_rec *r1, 417 const union xfs_btree_rec *r2) 418 { 419 return be32_to_cpu(r1->alloc.ar_blockcount) < 420 be32_to_cpu(r2->alloc.ar_blockcount) || 421 (r1->alloc.ar_blockcount == r2->alloc.ar_blockcount && 422 be32_to_cpu(r1->alloc.ar_startblock) < 423 be32_to_cpu(r2->alloc.ar_startblock)); 424 } 425 426 static const struct xfs_btree_ops xfs_bnobt_ops = { 427 .rec_len = sizeof(xfs_alloc_rec_t), 428 .key_len = sizeof(xfs_alloc_key_t), 429 430 .dup_cursor = xfs_allocbt_dup_cursor, 431 .set_root = xfs_allocbt_set_root, 432 .alloc_block = xfs_allocbt_alloc_block, 433 .free_block = xfs_allocbt_free_block, 434 .update_lastrec = xfs_allocbt_update_lastrec, 435 .get_minrecs = xfs_allocbt_get_minrecs, 436 .get_maxrecs = xfs_allocbt_get_maxrecs, 437 .init_key_from_rec = xfs_allocbt_init_key_from_rec, 438 .init_high_key_from_rec = xfs_bnobt_init_high_key_from_rec, 439 .init_rec_from_cur = xfs_allocbt_init_rec_from_cur, 440 .init_ptr_from_cur = xfs_allocbt_init_ptr_from_cur, 441 .key_diff = xfs_bnobt_key_diff, 442 .buf_ops = &xfs_bnobt_buf_ops, 443 .diff_two_keys = xfs_bnobt_diff_two_keys, 444 .keys_inorder = xfs_bnobt_keys_inorder, 445 .recs_inorder = xfs_bnobt_recs_inorder, 446 }; 447 448 static const struct xfs_btree_ops xfs_cntbt_ops = { 449 .rec_len = sizeof(xfs_alloc_rec_t), 450 .key_len = sizeof(xfs_alloc_key_t), 451 452 .dup_cursor = xfs_allocbt_dup_cursor, 453 .set_root = xfs_allocbt_set_root, 454 .alloc_block = xfs_allocbt_alloc_block, 455 .free_block = xfs_allocbt_free_block, 456 .update_lastrec = xfs_allocbt_update_lastrec, 457 .get_minrecs = xfs_allocbt_get_minrecs, 458 .get_maxrecs = xfs_allocbt_get_maxrecs, 459 .init_key_from_rec = xfs_allocbt_init_key_from_rec, 460 .init_high_key_from_rec = xfs_cntbt_init_high_key_from_rec, 461 .init_rec_from_cur = xfs_allocbt_init_rec_from_cur, 462 .init_ptr_from_cur = xfs_allocbt_init_ptr_from_cur, 463 .key_diff = xfs_cntbt_key_diff, 464 .buf_ops = &xfs_cntbt_buf_ops, 465 .diff_two_keys = xfs_cntbt_diff_two_keys, 466 .keys_inorder = xfs_cntbt_keys_inorder, 467 .recs_inorder = xfs_cntbt_recs_inorder, 468 }; 469 470 /* Allocate most of a new allocation btree cursor. */ 471 STATIC struct xfs_btree_cur * 472 xfs_allocbt_init_common( 473 struct xfs_mount *mp, 474 struct xfs_trans *tp, 475 struct xfs_perag *pag, 476 xfs_btnum_t btnum) 477 { 478 struct xfs_btree_cur *cur; 479 480 ASSERT(btnum == XFS_BTNUM_BNO || btnum == XFS_BTNUM_CNT); 481 482 cur = xfs_btree_alloc_cursor(mp, tp, btnum, mp->m_alloc_maxlevels, 483 xfs_allocbt_cur_cache); 484 cur->bc_ag.abt.active = false; 485 486 if (btnum == XFS_BTNUM_CNT) { 487 cur->bc_ops = &xfs_cntbt_ops; 488 cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_abtc_2); 489 cur->bc_flags = XFS_BTREE_LASTREC_UPDATE; 490 } else { 491 cur->bc_ops = &xfs_bnobt_ops; 492 cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_abtb_2); 493 } 494 495 /* take a reference for the cursor */ 496 atomic_inc(&pag->pag_ref); 497 cur->bc_ag.pag = pag; 498 499 if (xfs_has_crc(mp)) 500 cur->bc_flags |= XFS_BTREE_CRC_BLOCKS; 501 502 return cur; 503 } 504 505 /* 506 * Allocate a new allocation btree cursor. 507 */ 508 struct xfs_btree_cur * /* new alloc btree cursor */ 509 xfs_allocbt_init_cursor( 510 struct xfs_mount *mp, /* file system mount point */ 511 struct xfs_trans *tp, /* transaction pointer */ 512 struct xfs_buf *agbp, /* buffer for agf structure */ 513 struct xfs_perag *pag, 514 xfs_btnum_t btnum) /* btree identifier */ 515 { 516 struct xfs_agf *agf = agbp->b_addr; 517 struct xfs_btree_cur *cur; 518 519 cur = xfs_allocbt_init_common(mp, tp, pag, btnum); 520 if (btnum == XFS_BTNUM_CNT) 521 cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]); 522 else 523 cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]); 524 525 cur->bc_ag.agbp = agbp; 526 527 return cur; 528 } 529 530 /* Create a free space btree cursor with a fake root for staging. */ 531 struct xfs_btree_cur * 532 xfs_allocbt_stage_cursor( 533 struct xfs_mount *mp, 534 struct xbtree_afakeroot *afake, 535 struct xfs_perag *pag, 536 xfs_btnum_t btnum) 537 { 538 struct xfs_btree_cur *cur; 539 540 cur = xfs_allocbt_init_common(mp, NULL, pag, btnum); 541 xfs_btree_stage_afakeroot(cur, afake); 542 return cur; 543 } 544 545 /* 546 * Install a new free space btree root. Caller is responsible for invalidating 547 * and freeing the old btree blocks. 548 */ 549 void 550 xfs_allocbt_commit_staged_btree( 551 struct xfs_btree_cur *cur, 552 struct xfs_trans *tp, 553 struct xfs_buf *agbp) 554 { 555 struct xfs_agf *agf = agbp->b_addr; 556 struct xbtree_afakeroot *afake = cur->bc_ag.afake; 557 558 ASSERT(cur->bc_flags & XFS_BTREE_STAGING); 559 560 agf->agf_roots[cur->bc_btnum] = cpu_to_be32(afake->af_root); 561 agf->agf_levels[cur->bc_btnum] = cpu_to_be32(afake->af_levels); 562 xfs_alloc_log_agf(tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS); 563 564 if (cur->bc_btnum == XFS_BTNUM_BNO) { 565 xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_bnobt_ops); 566 } else { 567 cur->bc_flags |= XFS_BTREE_LASTREC_UPDATE; 568 xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_cntbt_ops); 569 } 570 } 571 572 /* Calculate number of records in an alloc btree block. */ 573 static inline unsigned int 574 xfs_allocbt_block_maxrecs( 575 unsigned int blocklen, 576 bool leaf) 577 { 578 if (leaf) 579 return blocklen / sizeof(xfs_alloc_rec_t); 580 return blocklen / (sizeof(xfs_alloc_key_t) + sizeof(xfs_alloc_ptr_t)); 581 } 582 583 /* 584 * Calculate number of records in an alloc btree block. 585 */ 586 int 587 xfs_allocbt_maxrecs( 588 struct xfs_mount *mp, 589 int blocklen, 590 int leaf) 591 { 592 blocklen -= XFS_ALLOC_BLOCK_LEN(mp); 593 return xfs_allocbt_block_maxrecs(blocklen, leaf); 594 } 595 596 /* Free space btrees are at their largest when every other block is free. */ 597 #define XFS_MAX_FREESP_RECORDS ((XFS_MAX_AG_BLOCKS + 1) / 2) 598 599 /* Compute the max possible height for free space btrees. */ 600 unsigned int 601 xfs_allocbt_maxlevels_ondisk(void) 602 { 603 unsigned int minrecs[2]; 604 unsigned int blocklen; 605 606 blocklen = min(XFS_MIN_BLOCKSIZE - XFS_BTREE_SBLOCK_LEN, 607 XFS_MIN_CRC_BLOCKSIZE - XFS_BTREE_SBLOCK_CRC_LEN); 608 609 minrecs[0] = xfs_allocbt_block_maxrecs(blocklen, true) / 2; 610 minrecs[1] = xfs_allocbt_block_maxrecs(blocklen, false) / 2; 611 612 return xfs_btree_compute_maxlevels(minrecs, XFS_MAX_FREESP_RECORDS); 613 } 614 615 /* Calculate the freespace btree size for some records. */ 616 xfs_extlen_t 617 xfs_allocbt_calc_size( 618 struct xfs_mount *mp, 619 unsigned long long len) 620 { 621 return xfs_btree_calc_size(mp->m_alloc_mnr, len); 622 } 623 624 int __init 625 xfs_allocbt_init_cur_cache(void) 626 { 627 xfs_allocbt_cur_cache = kmem_cache_create("xfs_bnobt_cur", 628 xfs_btree_cur_sizeof(xfs_allocbt_maxlevels_ondisk()), 629 0, 0, NULL); 630 631 if (!xfs_allocbt_cur_cache) 632 return -ENOMEM; 633 return 0; 634 } 635 636 void 637 xfs_allocbt_destroy_cur_cache(void) 638 { 639 kmem_cache_destroy(xfs_allocbt_cur_cache); 640 xfs_allocbt_cur_cache = NULL; 641 } 642