10b61f8a4SDave Chinner // SPDX-License-Identifier: GPL-2.0
230f712c9SDave Chinner /*
330f712c9SDave Chinner * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc.
430f712c9SDave Chinner * All Rights Reserved.
530f712c9SDave Chinner */
630f712c9SDave Chinner #include "xfs.h"
730f712c9SDave Chinner #include "xfs_fs.h"
830f712c9SDave Chinner #include "xfs_shared.h"
930f712c9SDave Chinner #include "xfs_format.h"
1030f712c9SDave Chinner #include "xfs_log_format.h"
1130f712c9SDave Chinner #include "xfs_trans_resv.h"
1230f712c9SDave Chinner #include "xfs_mount.h"
1330f712c9SDave Chinner #include "xfs_btree.h"
14e6eb33d9SDarrick J. Wong #include "xfs_btree_staging.h"
1530f712c9SDave Chinner #include "xfs_alloc_btree.h"
1630f712c9SDave Chinner #include "xfs_alloc.h"
1730f712c9SDave Chinner #include "xfs_extent_busy.h"
1830f712c9SDave Chinner #include "xfs_error.h"
1930f712c9SDave Chinner #include "xfs_trace.h"
2030f712c9SDave Chinner #include "xfs_trans.h"
219bbafc71SDave Chinner #include "xfs_ag.h"
2230f712c9SDave Chinner
23e7720afaSDarrick J. Wong static struct kmem_cache *xfs_allocbt_cur_cache;
2430f712c9SDave Chinner
2530f712c9SDave Chinner STATIC struct xfs_btree_cur *
xfs_allocbt_dup_cursor(struct xfs_btree_cur * cur)2630f712c9SDave Chinner xfs_allocbt_dup_cursor(
2730f712c9SDave Chinner struct xfs_btree_cur *cur)
2830f712c9SDave Chinner {
2930f712c9SDave Chinner return xfs_allocbt_init_cursor(cur->bc_mp, cur->bc_tp,
30289d38d2SDave Chinner cur->bc_ag.agbp, cur->bc_ag.pag, cur->bc_btnum);
3130f712c9SDave Chinner }
3230f712c9SDave Chinner
3330f712c9SDave Chinner STATIC void
xfs_allocbt_set_root(struct xfs_btree_cur * cur,const union xfs_btree_ptr * ptr,int inc)3430f712c9SDave Chinner xfs_allocbt_set_root(
3530f712c9SDave Chinner struct xfs_btree_cur *cur,
36b5a6e5feSDarrick J. Wong const union xfs_btree_ptr *ptr,
3730f712c9SDave Chinner int inc)
3830f712c9SDave Chinner {
39576af732SDave Chinner struct xfs_buf *agbp = cur->bc_ag.agbp;
409798f615SChristoph Hellwig struct xfs_agf *agf = agbp->b_addr;
4130f712c9SDave Chinner int btnum = cur->bc_btnum;
4230f712c9SDave Chinner
4330f712c9SDave Chinner ASSERT(ptr->s != 0);
4430f712c9SDave Chinner
4530f712c9SDave Chinner agf->agf_roots[btnum] = ptr->s;
4630f712c9SDave Chinner be32_add_cpu(&agf->agf_levels[btnum], inc);
47289d38d2SDave Chinner cur->bc_ag.pag->pagf_levels[btnum] += inc;
4830f712c9SDave Chinner
4930f712c9SDave Chinner xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS);
5030f712c9SDave Chinner }
5130f712c9SDave Chinner
5230f712c9SDave Chinner STATIC int
xfs_allocbt_alloc_block(struct xfs_btree_cur * cur,const union xfs_btree_ptr * start,union xfs_btree_ptr * new,int * stat)5330f712c9SDave Chinner xfs_allocbt_alloc_block(
5430f712c9SDave Chinner struct xfs_btree_cur *cur,
55deb06b9aSDarrick J. Wong const union xfs_btree_ptr *start,
5630f712c9SDave Chinner union xfs_btree_ptr *new,
5730f712c9SDave Chinner int *stat)
5830f712c9SDave Chinner {
5930f712c9SDave Chinner int error;
6030f712c9SDave Chinner xfs_agblock_t bno;
6130f712c9SDave Chinner
6230f712c9SDave Chinner /* Allocate the new block from the freelist. If we can't, give up. */
6349f0d84eSDave Chinner error = xfs_alloc_get_freelist(cur->bc_ag.pag, cur->bc_tp,
6449f0d84eSDave Chinner cur->bc_ag.agbp, &bno, 1);
65e157ebdcSCarlos Maiolino if (error)
6630f712c9SDave Chinner return error;
6730f712c9SDave Chinner
6830f712c9SDave Chinner if (bno == NULLAGBLOCK) {
6930f712c9SDave Chinner *stat = 0;
7030f712c9SDave Chinner return 0;
7130f712c9SDave Chinner }
7230f712c9SDave Chinner
7316eaab83SBrian Foster atomic64_inc(&cur->bc_mp->m_allocbt_blks);
7449f0d84eSDave Chinner xfs_extent_busy_reuse(cur->bc_mp, cur->bc_ag.pag, bno, 1, false);
7530f712c9SDave Chinner
7630f712c9SDave Chinner new->s = cpu_to_be32(bno);
7730f712c9SDave Chinner
7830f712c9SDave Chinner *stat = 1;
7930f712c9SDave Chinner return 0;
8030f712c9SDave Chinner }
8130f712c9SDave Chinner
8230f712c9SDave Chinner STATIC int
xfs_allocbt_free_block(struct xfs_btree_cur * cur,struct xfs_buf * bp)8330f712c9SDave Chinner xfs_allocbt_free_block(
8430f712c9SDave Chinner struct xfs_btree_cur *cur,
8530f712c9SDave Chinner struct xfs_buf *bp)
8630f712c9SDave Chinner {
87576af732SDave Chinner struct xfs_buf *agbp = cur->bc_ag.agbp;
8830f712c9SDave Chinner xfs_agblock_t bno;
8930f712c9SDave Chinner int error;
9030f712c9SDave Chinner
9104fcad80SDave Chinner bno = xfs_daddr_to_agbno(cur->bc_mp, xfs_buf_daddr(bp));
928c392eb2SDave Chinner error = xfs_alloc_put_freelist(cur->bc_ag.pag, cur->bc_tp, agbp, NULL,
938c392eb2SDave Chinner bno, 1);
9430f712c9SDave Chinner if (error)
9530f712c9SDave Chinner return error;
9630f712c9SDave Chinner
9716eaab83SBrian Foster atomic64_dec(&cur->bc_mp->m_allocbt_blks);
9845d06621SDave Chinner xfs_extent_busy_insert(cur->bc_tp, agbp->b_pag, bno, 1,
9930f712c9SDave Chinner XFS_EXTENT_BUSY_SKIP_DISCARD);
10030f712c9SDave Chinner return 0;
10130f712c9SDave Chinner }
10230f712c9SDave Chinner
10330f712c9SDave Chinner /*
10430f712c9SDave Chinner * Update the longest extent in the AGF
10530f712c9SDave Chinner */
10630f712c9SDave Chinner STATIC void
xfs_allocbt_update_lastrec(struct xfs_btree_cur * cur,const struct xfs_btree_block * block,const union xfs_btree_rec * rec,int ptr,int reason)10730f712c9SDave Chinner xfs_allocbt_update_lastrec(
10830f712c9SDave Chinner struct xfs_btree_cur *cur,
10960e265f7SDarrick J. Wong const struct xfs_btree_block *block,
11060e265f7SDarrick J. Wong const union xfs_btree_rec *rec,
11130f712c9SDave Chinner int ptr,
11230f712c9SDave Chinner int reason)
11330f712c9SDave Chinner {
114576af732SDave Chinner struct xfs_agf *agf = cur->bc_ag.agbp->b_addr;
11530f712c9SDave Chinner struct xfs_perag *pag;
11630f712c9SDave Chinner __be32 len;
11730f712c9SDave Chinner int numrecs;
11830f712c9SDave Chinner
11930f712c9SDave Chinner ASSERT(cur->bc_btnum == XFS_BTNUM_CNT);
12030f712c9SDave Chinner
12130f712c9SDave Chinner switch (reason) {
12230f712c9SDave Chinner case LASTREC_UPDATE:
12330f712c9SDave Chinner /*
12430f712c9SDave Chinner * If this is the last leaf block and it's the last record,
12530f712c9SDave Chinner * then update the size of the longest extent in the AG.
12630f712c9SDave Chinner */
12730f712c9SDave Chinner if (ptr != xfs_btree_get_numrecs(block))
12830f712c9SDave Chinner return;
12930f712c9SDave Chinner len = rec->alloc.ar_blockcount;
13030f712c9SDave Chinner break;
13130f712c9SDave Chinner case LASTREC_INSREC:
13230f712c9SDave Chinner if (be32_to_cpu(rec->alloc.ar_blockcount) <=
13330f712c9SDave Chinner be32_to_cpu(agf->agf_longest))
13430f712c9SDave Chinner return;
13530f712c9SDave Chinner len = rec->alloc.ar_blockcount;
13630f712c9SDave Chinner break;
13730f712c9SDave Chinner case LASTREC_DELREC:
13830f712c9SDave Chinner numrecs = xfs_btree_get_numrecs(block);
13930f712c9SDave Chinner if (ptr <= numrecs)
14030f712c9SDave Chinner return;
14130f712c9SDave Chinner ASSERT(ptr == numrecs + 1);
14230f712c9SDave Chinner
14330f712c9SDave Chinner if (numrecs) {
14430f712c9SDave Chinner xfs_alloc_rec_t *rrp;
14530f712c9SDave Chinner
14630f712c9SDave Chinner rrp = XFS_ALLOC_REC_ADDR(cur->bc_mp, block, numrecs);
14730f712c9SDave Chinner len = rrp->ar_blockcount;
14830f712c9SDave Chinner } else {
14930f712c9SDave Chinner len = 0;
15030f712c9SDave Chinner }
15130f712c9SDave Chinner
15230f712c9SDave Chinner break;
15330f712c9SDave Chinner default:
15430f712c9SDave Chinner ASSERT(0);
15530f712c9SDave Chinner return;
15630f712c9SDave Chinner }
15730f712c9SDave Chinner
15830f712c9SDave Chinner agf->agf_longest = len;
15992a00544SGao Xiang pag = cur->bc_ag.agbp->b_pag;
16030f712c9SDave Chinner pag->pagf_longest = be32_to_cpu(len);
161576af732SDave Chinner xfs_alloc_log_agf(cur->bc_tp, cur->bc_ag.agbp, XFS_AGF_LONGEST);
16230f712c9SDave Chinner }
16330f712c9SDave Chinner
16430f712c9SDave Chinner STATIC int
xfs_allocbt_get_minrecs(struct xfs_btree_cur * cur,int level)16530f712c9SDave Chinner xfs_allocbt_get_minrecs(
16630f712c9SDave Chinner struct xfs_btree_cur *cur,
16730f712c9SDave Chinner int level)
16830f712c9SDave Chinner {
16930f712c9SDave Chinner return cur->bc_mp->m_alloc_mnr[level != 0];
17030f712c9SDave Chinner }
17130f712c9SDave Chinner
17230f712c9SDave Chinner STATIC int
xfs_allocbt_get_maxrecs(struct xfs_btree_cur * cur,int level)17330f712c9SDave Chinner xfs_allocbt_get_maxrecs(
17430f712c9SDave Chinner struct xfs_btree_cur *cur,
17530f712c9SDave Chinner int level)
17630f712c9SDave Chinner {
17730f712c9SDave Chinner return cur->bc_mp->m_alloc_mxr[level != 0];
17830f712c9SDave Chinner }
17930f712c9SDave Chinner
18030f712c9SDave Chinner STATIC void
xfs_allocbt_init_key_from_rec(union xfs_btree_key * key,const union xfs_btree_rec * rec)18130f712c9SDave Chinner xfs_allocbt_init_key_from_rec(
18230f712c9SDave Chinner union xfs_btree_key *key,
18323825cd1SDarrick J. Wong const union xfs_btree_rec *rec)
18430f712c9SDave Chinner {
18530f712c9SDave Chinner key->alloc.ar_startblock = rec->alloc.ar_startblock;
18630f712c9SDave Chinner key->alloc.ar_blockcount = rec->alloc.ar_blockcount;
18730f712c9SDave Chinner }
18830f712c9SDave Chinner
18930f712c9SDave Chinner STATIC void
xfs_bnobt_init_high_key_from_rec(union xfs_btree_key * key,const union xfs_btree_rec * rec)19008438b1eSDarrick J. Wong xfs_bnobt_init_high_key_from_rec(
19108438b1eSDarrick J. Wong union xfs_btree_key *key,
19223825cd1SDarrick J. Wong const union xfs_btree_rec *rec)
19308438b1eSDarrick J. Wong {
19408438b1eSDarrick J. Wong __u32 x;
19508438b1eSDarrick J. Wong
19608438b1eSDarrick J. Wong x = be32_to_cpu(rec->alloc.ar_startblock);
19708438b1eSDarrick J. Wong x += be32_to_cpu(rec->alloc.ar_blockcount) - 1;
19808438b1eSDarrick J. Wong key->alloc.ar_startblock = cpu_to_be32(x);
19908438b1eSDarrick J. Wong key->alloc.ar_blockcount = 0;
20008438b1eSDarrick J. Wong }
20108438b1eSDarrick J. Wong
20208438b1eSDarrick J. Wong STATIC void
xfs_cntbt_init_high_key_from_rec(union xfs_btree_key * key,const union xfs_btree_rec * rec)20308438b1eSDarrick J. Wong xfs_cntbt_init_high_key_from_rec(
20408438b1eSDarrick J. Wong union xfs_btree_key *key,
20523825cd1SDarrick J. Wong const union xfs_btree_rec *rec)
20608438b1eSDarrick J. Wong {
20708438b1eSDarrick J. Wong key->alloc.ar_blockcount = rec->alloc.ar_blockcount;
20808438b1eSDarrick J. Wong key->alloc.ar_startblock = 0;
20908438b1eSDarrick J. Wong }
21008438b1eSDarrick J. Wong
21108438b1eSDarrick J. Wong STATIC void
xfs_allocbt_init_rec_from_cur(struct xfs_btree_cur * cur,union xfs_btree_rec * rec)21230f712c9SDave Chinner xfs_allocbt_init_rec_from_cur(
21330f712c9SDave Chinner struct xfs_btree_cur *cur,
21430f712c9SDave Chinner union xfs_btree_rec *rec)
21530f712c9SDave Chinner {
21630f712c9SDave Chinner rec->alloc.ar_startblock = cpu_to_be32(cur->bc_rec.a.ar_startblock);
21730f712c9SDave Chinner rec->alloc.ar_blockcount = cpu_to_be32(cur->bc_rec.a.ar_blockcount);
21830f712c9SDave Chinner }
21930f712c9SDave Chinner
22030f712c9SDave Chinner STATIC void
xfs_allocbt_init_ptr_from_cur(struct xfs_btree_cur * cur,union xfs_btree_ptr * ptr)22130f712c9SDave Chinner xfs_allocbt_init_ptr_from_cur(
22230f712c9SDave Chinner struct xfs_btree_cur *cur,
22330f712c9SDave Chinner union xfs_btree_ptr *ptr)
22430f712c9SDave Chinner {
225576af732SDave Chinner struct xfs_agf *agf = cur->bc_ag.agbp->b_addr;
22630f712c9SDave Chinner
227289d38d2SDave Chinner ASSERT(cur->bc_ag.pag->pag_agno == be32_to_cpu(agf->agf_seqno));
22830f712c9SDave Chinner
22930f712c9SDave Chinner ptr->s = agf->agf_roots[cur->bc_btnum];
23030f712c9SDave Chinner }
23130f712c9SDave Chinner
232c8ce540dSDarrick J. Wong STATIC int64_t
xfs_bnobt_key_diff(struct xfs_btree_cur * cur,const union xfs_btree_key * key)23308438b1eSDarrick J. Wong xfs_bnobt_key_diff(
23408438b1eSDarrick J. Wong struct xfs_btree_cur *cur,
235d29d5577SDarrick J. Wong const union xfs_btree_key *key)
23608438b1eSDarrick J. Wong {
237d29d5577SDarrick J. Wong struct xfs_alloc_rec_incore *rec = &cur->bc_rec.a;
238d29d5577SDarrick J. Wong const struct xfs_alloc_rec *kp = &key->alloc;
23908438b1eSDarrick J. Wong
240c8ce540dSDarrick J. Wong return (int64_t)be32_to_cpu(kp->ar_startblock) - rec->ar_startblock;
24108438b1eSDarrick J. Wong }
24208438b1eSDarrick J. Wong
243c8ce540dSDarrick J. Wong STATIC int64_t
xfs_cntbt_key_diff(struct xfs_btree_cur * cur,const union xfs_btree_key * key)24408438b1eSDarrick J. Wong xfs_cntbt_key_diff(
24530f712c9SDave Chinner struct xfs_btree_cur *cur,
246d29d5577SDarrick J. Wong const union xfs_btree_key *key)
24730f712c9SDave Chinner {
248d29d5577SDarrick J. Wong struct xfs_alloc_rec_incore *rec = &cur->bc_rec.a;
249d29d5577SDarrick J. Wong const struct xfs_alloc_rec *kp = &key->alloc;
250c8ce540dSDarrick J. Wong int64_t diff;
25130f712c9SDave Chinner
252c8ce540dSDarrick J. Wong diff = (int64_t)be32_to_cpu(kp->ar_blockcount) - rec->ar_blockcount;
25330f712c9SDave Chinner if (diff)
25430f712c9SDave Chinner return diff;
25530f712c9SDave Chinner
256c8ce540dSDarrick J. Wong return (int64_t)be32_to_cpu(kp->ar_startblock) - rec->ar_startblock;
25730f712c9SDave Chinner }
25830f712c9SDave Chinner
259c8ce540dSDarrick J. Wong STATIC int64_t
xfs_bnobt_diff_two_keys(struct xfs_btree_cur * cur,const union xfs_btree_key * k1,const union xfs_btree_key * k2,const union xfs_btree_key * mask)26008438b1eSDarrick J. Wong xfs_bnobt_diff_two_keys(
26108438b1eSDarrick J. Wong struct xfs_btree_cur *cur,
262d29d5577SDarrick J. Wong const union xfs_btree_key *k1,
263*4a200a09SDarrick J. Wong const union xfs_btree_key *k2,
264*4a200a09SDarrick J. Wong const union xfs_btree_key *mask)
26508438b1eSDarrick J. Wong {
266*4a200a09SDarrick J. Wong ASSERT(!mask || mask->alloc.ar_startblock);
267*4a200a09SDarrick J. Wong
268c8ce540dSDarrick J. Wong return (int64_t)be32_to_cpu(k1->alloc.ar_startblock) -
26908438b1eSDarrick J. Wong be32_to_cpu(k2->alloc.ar_startblock);
27008438b1eSDarrick J. Wong }
27108438b1eSDarrick J. Wong
272c8ce540dSDarrick J. Wong STATIC int64_t
xfs_cntbt_diff_two_keys(struct xfs_btree_cur * cur,const union xfs_btree_key * k1,const union xfs_btree_key * k2,const union xfs_btree_key * mask)27308438b1eSDarrick J. Wong xfs_cntbt_diff_two_keys(
27408438b1eSDarrick J. Wong struct xfs_btree_cur *cur,
275d29d5577SDarrick J. Wong const union xfs_btree_key *k1,
276*4a200a09SDarrick J. Wong const union xfs_btree_key *k2,
277*4a200a09SDarrick J. Wong const union xfs_btree_key *mask)
27808438b1eSDarrick J. Wong {
279c8ce540dSDarrick J. Wong int64_t diff;
28008438b1eSDarrick J. Wong
281*4a200a09SDarrick J. Wong ASSERT(!mask || (mask->alloc.ar_blockcount &&
282*4a200a09SDarrick J. Wong mask->alloc.ar_startblock));
283*4a200a09SDarrick J. Wong
28408438b1eSDarrick J. Wong diff = be32_to_cpu(k1->alloc.ar_blockcount) -
28508438b1eSDarrick J. Wong be32_to_cpu(k2->alloc.ar_blockcount);
28608438b1eSDarrick J. Wong if (diff)
28708438b1eSDarrick J. Wong return diff;
28808438b1eSDarrick J. Wong
28908438b1eSDarrick J. Wong return be32_to_cpu(k1->alloc.ar_startblock) -
29008438b1eSDarrick J. Wong be32_to_cpu(k2->alloc.ar_startblock);
29108438b1eSDarrick J. Wong }
29208438b1eSDarrick J. Wong
293a6a781a5SDarrick J. Wong static xfs_failaddr_t
xfs_allocbt_verify(struct xfs_buf * bp)29430f712c9SDave Chinner xfs_allocbt_verify(
29530f712c9SDave Chinner struct xfs_buf *bp)
29630f712c9SDave Chinner {
297dbd329f1SChristoph Hellwig struct xfs_mount *mp = bp->b_mount;
29830f712c9SDave Chinner struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
29930f712c9SDave Chinner struct xfs_perag *pag = bp->b_pag;
300a6a781a5SDarrick J. Wong xfs_failaddr_t fa;
30130f712c9SDave Chinner unsigned int level;
302b8f89801SBrian Foster xfs_btnum_t btnum = XFS_BTNUM_BNOi;
303b8f89801SBrian Foster
304b8f89801SBrian Foster if (!xfs_verify_magic(bp, block->bb_magic))
305b8f89801SBrian Foster return __this_address;
306b8f89801SBrian Foster
307ebd9027dSDave Chinner if (xfs_has_crc(mp)) {
308b8f89801SBrian Foster fa = xfs_btree_sblock_v5hdr_verify(bp);
309b8f89801SBrian Foster if (fa)
310b8f89801SBrian Foster return fa;
311b8f89801SBrian Foster }
31230f712c9SDave Chinner
31330f712c9SDave Chinner /*
314b8f89801SBrian Foster * The perag may not be attached during grow operations or fully
315b8f89801SBrian Foster * initialized from the AGF during log recovery. Therefore we can only
316b8f89801SBrian Foster * check against maximum tree depth from those contexts.
31730f712c9SDave Chinner *
318b8f89801SBrian Foster * Otherwise check against the per-tree limit. Peek at one of the
319b8f89801SBrian Foster * verifier magic values to determine the type of tree we're verifying
320b8f89801SBrian Foster * against.
32130f712c9SDave Chinner */
32230f712c9SDave Chinner level = be16_to_cpu(block->bb_level);
323b8f89801SBrian Foster if (bp->b_ops->magic[0] == cpu_to_be32(XFS_ABTC_MAGIC))
324b8f89801SBrian Foster btnum = XFS_BTNUM_CNTi;
3257ac2ff8bSDave Chinner if (pag && xfs_perag_initialised_agf(pag)) {
326b8f89801SBrian Foster if (level >= pag->pagf_levels[btnum])
327a6a781a5SDarrick J. Wong return __this_address;
3287cb3efb4SDarrick J. Wong } else if (level >= mp->m_alloc_maxlevels)
329a6a781a5SDarrick J. Wong return __this_address;
33030f712c9SDave Chinner
331c5ab131bSDarrick J. Wong return xfs_btree_sblock_verify(bp, mp->m_alloc_mxr[level != 0]);
33230f712c9SDave Chinner }
33330f712c9SDave Chinner
33430f712c9SDave Chinner static void
xfs_allocbt_read_verify(struct xfs_buf * bp)33530f712c9SDave Chinner xfs_allocbt_read_verify(
33630f712c9SDave Chinner struct xfs_buf *bp)
33730f712c9SDave Chinner {
338bc1a09b8SDarrick J. Wong xfs_failaddr_t fa;
339bc1a09b8SDarrick J. Wong
34030f712c9SDave Chinner if (!xfs_btree_sblock_verify_crc(bp))
341bc1a09b8SDarrick J. Wong xfs_verifier_error(bp, -EFSBADCRC, __this_address);
342bc1a09b8SDarrick J. Wong else {
343bc1a09b8SDarrick J. Wong fa = xfs_allocbt_verify(bp);
344bc1a09b8SDarrick J. Wong if (fa)
345bc1a09b8SDarrick J. Wong xfs_verifier_error(bp, -EFSCORRUPTED, fa);
346bc1a09b8SDarrick J. Wong }
34730f712c9SDave Chinner
34831ca03c9SDarrick J. Wong if (bp->b_error)
34930f712c9SDave Chinner trace_xfs_btree_corrupt(bp, _RET_IP_);
35030f712c9SDave Chinner }
35130f712c9SDave Chinner
35230f712c9SDave Chinner static void
xfs_allocbt_write_verify(struct xfs_buf * bp)35330f712c9SDave Chinner xfs_allocbt_write_verify(
35430f712c9SDave Chinner struct xfs_buf *bp)
35530f712c9SDave Chinner {
356bc1a09b8SDarrick J. Wong xfs_failaddr_t fa;
357bc1a09b8SDarrick J. Wong
358bc1a09b8SDarrick J. Wong fa = xfs_allocbt_verify(bp);
359bc1a09b8SDarrick J. Wong if (fa) {
36030f712c9SDave Chinner trace_xfs_btree_corrupt(bp, _RET_IP_);
361bc1a09b8SDarrick J. Wong xfs_verifier_error(bp, -EFSCORRUPTED, fa);
36230f712c9SDave Chinner return;
36330f712c9SDave Chinner }
36430f712c9SDave Chinner xfs_btree_sblock_calc_crc(bp);
36530f712c9SDave Chinner
36630f712c9SDave Chinner }
36730f712c9SDave Chinner
36827df4f50SBrian Foster const struct xfs_buf_ops xfs_bnobt_buf_ops = {
36927df4f50SBrian Foster .name = "xfs_bnobt",
370b8f89801SBrian Foster .magic = { cpu_to_be32(XFS_ABTB_MAGIC),
371b8f89801SBrian Foster cpu_to_be32(XFS_ABTB_CRC_MAGIC) },
37230f712c9SDave Chinner .verify_read = xfs_allocbt_read_verify,
37330f712c9SDave Chinner .verify_write = xfs_allocbt_write_verify,
374b5572597SDarrick J. Wong .verify_struct = xfs_allocbt_verify,
37530f712c9SDave Chinner };
37630f712c9SDave Chinner
37727df4f50SBrian Foster const struct xfs_buf_ops xfs_cntbt_buf_ops = {
37827df4f50SBrian Foster .name = "xfs_cntbt",
379b8f89801SBrian Foster .magic = { cpu_to_be32(XFS_ABTC_MAGIC),
380b8f89801SBrian Foster cpu_to_be32(XFS_ABTC_CRC_MAGIC) },
38127df4f50SBrian Foster .verify_read = xfs_allocbt_read_verify,
38227df4f50SBrian Foster .verify_write = xfs_allocbt_write_verify,
38327df4f50SBrian Foster .verify_struct = xfs_allocbt_verify,
38427df4f50SBrian Foster };
38530f712c9SDave Chinner
38630f712c9SDave Chinner STATIC int
xfs_bnobt_keys_inorder(struct xfs_btree_cur * cur,const union xfs_btree_key * k1,const union xfs_btree_key * k2)38708438b1eSDarrick J. Wong xfs_bnobt_keys_inorder(
38830f712c9SDave Chinner struct xfs_btree_cur *cur,
3898e38dc88SDarrick J. Wong const union xfs_btree_key *k1,
3908e38dc88SDarrick J. Wong const union xfs_btree_key *k2)
39130f712c9SDave Chinner {
39230f712c9SDave Chinner return be32_to_cpu(k1->alloc.ar_startblock) <
39330f712c9SDave Chinner be32_to_cpu(k2->alloc.ar_startblock);
39408438b1eSDarrick J. Wong }
39508438b1eSDarrick J. Wong
39608438b1eSDarrick J. Wong STATIC int
xfs_bnobt_recs_inorder(struct xfs_btree_cur * cur,const union xfs_btree_rec * r1,const union xfs_btree_rec * r2)39708438b1eSDarrick J. Wong xfs_bnobt_recs_inorder(
39808438b1eSDarrick J. Wong struct xfs_btree_cur *cur,
3998e38dc88SDarrick J. Wong const union xfs_btree_rec *r1,
4008e38dc88SDarrick J. Wong const union xfs_btree_rec *r2)
40108438b1eSDarrick J. Wong {
40208438b1eSDarrick J. Wong return be32_to_cpu(r1->alloc.ar_startblock) +
40308438b1eSDarrick J. Wong be32_to_cpu(r1->alloc.ar_blockcount) <=
40408438b1eSDarrick J. Wong be32_to_cpu(r2->alloc.ar_startblock);
40508438b1eSDarrick J. Wong }
40608438b1eSDarrick J. Wong
40708438b1eSDarrick J. Wong STATIC int
xfs_cntbt_keys_inorder(struct xfs_btree_cur * cur,const union xfs_btree_key * k1,const union xfs_btree_key * k2)40808438b1eSDarrick J. Wong xfs_cntbt_keys_inorder(
40908438b1eSDarrick J. Wong struct xfs_btree_cur *cur,
4108e38dc88SDarrick J. Wong const union xfs_btree_key *k1,
4118e38dc88SDarrick J. Wong const union xfs_btree_key *k2)
41208438b1eSDarrick J. Wong {
41330f712c9SDave Chinner return be32_to_cpu(k1->alloc.ar_blockcount) <
41430f712c9SDave Chinner be32_to_cpu(k2->alloc.ar_blockcount) ||
41530f712c9SDave Chinner (k1->alloc.ar_blockcount == k2->alloc.ar_blockcount &&
41630f712c9SDave Chinner be32_to_cpu(k1->alloc.ar_startblock) <
41730f712c9SDave Chinner be32_to_cpu(k2->alloc.ar_startblock));
41830f712c9SDave Chinner }
41930f712c9SDave Chinner
42030f712c9SDave Chinner STATIC int
xfs_cntbt_recs_inorder(struct xfs_btree_cur * cur,const union xfs_btree_rec * r1,const union xfs_btree_rec * r2)42108438b1eSDarrick J. Wong xfs_cntbt_recs_inorder(
42230f712c9SDave Chinner struct xfs_btree_cur *cur,
4238e38dc88SDarrick J. Wong const union xfs_btree_rec *r1,
4248e38dc88SDarrick J. Wong const union xfs_btree_rec *r2)
42530f712c9SDave Chinner {
42630f712c9SDave Chinner return be32_to_cpu(r1->alloc.ar_blockcount) <
42730f712c9SDave Chinner be32_to_cpu(r2->alloc.ar_blockcount) ||
42830f712c9SDave Chinner (r1->alloc.ar_blockcount == r2->alloc.ar_blockcount &&
42930f712c9SDave Chinner be32_to_cpu(r1->alloc.ar_startblock) <
43030f712c9SDave Chinner be32_to_cpu(r2->alloc.ar_startblock));
43130f712c9SDave Chinner }
43230f712c9SDave Chinner
4336abc7aefSDarrick J. Wong STATIC enum xbtree_key_contig
xfs_allocbt_keys_contiguous(struct xfs_btree_cur * cur,const union xfs_btree_key * key1,const union xfs_btree_key * key2,const union xfs_btree_key * mask)4346abc7aefSDarrick J. Wong xfs_allocbt_keys_contiguous(
4356abc7aefSDarrick J. Wong struct xfs_btree_cur *cur,
4366abc7aefSDarrick J. Wong const union xfs_btree_key *key1,
437*4a200a09SDarrick J. Wong const union xfs_btree_key *key2,
438*4a200a09SDarrick J. Wong const union xfs_btree_key *mask)
4396abc7aefSDarrick J. Wong {
440*4a200a09SDarrick J. Wong ASSERT(!mask || mask->alloc.ar_startblock);
441*4a200a09SDarrick J. Wong
4426abc7aefSDarrick J. Wong return xbtree_key_contig(be32_to_cpu(key1->alloc.ar_startblock),
4436abc7aefSDarrick J. Wong be32_to_cpu(key2->alloc.ar_startblock));
4446abc7aefSDarrick J. Wong }
4456abc7aefSDarrick J. Wong
44608438b1eSDarrick J. Wong static const struct xfs_btree_ops xfs_bnobt_ops = {
44730f712c9SDave Chinner .rec_len = sizeof(xfs_alloc_rec_t),
44830f712c9SDave Chinner .key_len = sizeof(xfs_alloc_key_t),
44930f712c9SDave Chinner
45030f712c9SDave Chinner .dup_cursor = xfs_allocbt_dup_cursor,
45130f712c9SDave Chinner .set_root = xfs_allocbt_set_root,
45230f712c9SDave Chinner .alloc_block = xfs_allocbt_alloc_block,
45330f712c9SDave Chinner .free_block = xfs_allocbt_free_block,
45430f712c9SDave Chinner .update_lastrec = xfs_allocbt_update_lastrec,
45530f712c9SDave Chinner .get_minrecs = xfs_allocbt_get_minrecs,
45630f712c9SDave Chinner .get_maxrecs = xfs_allocbt_get_maxrecs,
45730f712c9SDave Chinner .init_key_from_rec = xfs_allocbt_init_key_from_rec,
45808438b1eSDarrick J. Wong .init_high_key_from_rec = xfs_bnobt_init_high_key_from_rec,
45930f712c9SDave Chinner .init_rec_from_cur = xfs_allocbt_init_rec_from_cur,
46030f712c9SDave Chinner .init_ptr_from_cur = xfs_allocbt_init_ptr_from_cur,
46108438b1eSDarrick J. Wong .key_diff = xfs_bnobt_key_diff,
46227df4f50SBrian Foster .buf_ops = &xfs_bnobt_buf_ops,
46308438b1eSDarrick J. Wong .diff_two_keys = xfs_bnobt_diff_two_keys,
46408438b1eSDarrick J. Wong .keys_inorder = xfs_bnobt_keys_inorder,
46508438b1eSDarrick J. Wong .recs_inorder = xfs_bnobt_recs_inorder,
4666abc7aefSDarrick J. Wong .keys_contiguous = xfs_allocbt_keys_contiguous,
46708438b1eSDarrick J. Wong };
46808438b1eSDarrick J. Wong
46908438b1eSDarrick J. Wong static const struct xfs_btree_ops xfs_cntbt_ops = {
47008438b1eSDarrick J. Wong .rec_len = sizeof(xfs_alloc_rec_t),
47108438b1eSDarrick J. Wong .key_len = sizeof(xfs_alloc_key_t),
47208438b1eSDarrick J. Wong
47308438b1eSDarrick J. Wong .dup_cursor = xfs_allocbt_dup_cursor,
47408438b1eSDarrick J. Wong .set_root = xfs_allocbt_set_root,
47508438b1eSDarrick J. Wong .alloc_block = xfs_allocbt_alloc_block,
47608438b1eSDarrick J. Wong .free_block = xfs_allocbt_free_block,
47708438b1eSDarrick J. Wong .update_lastrec = xfs_allocbt_update_lastrec,
47808438b1eSDarrick J. Wong .get_minrecs = xfs_allocbt_get_minrecs,
47908438b1eSDarrick J. Wong .get_maxrecs = xfs_allocbt_get_maxrecs,
48008438b1eSDarrick J. Wong .init_key_from_rec = xfs_allocbt_init_key_from_rec,
48108438b1eSDarrick J. Wong .init_high_key_from_rec = xfs_cntbt_init_high_key_from_rec,
48208438b1eSDarrick J. Wong .init_rec_from_cur = xfs_allocbt_init_rec_from_cur,
48308438b1eSDarrick J. Wong .init_ptr_from_cur = xfs_allocbt_init_ptr_from_cur,
48408438b1eSDarrick J. Wong .key_diff = xfs_cntbt_key_diff,
48527df4f50SBrian Foster .buf_ops = &xfs_cntbt_buf_ops,
48608438b1eSDarrick J. Wong .diff_two_keys = xfs_cntbt_diff_two_keys,
48708438b1eSDarrick J. Wong .keys_inorder = xfs_cntbt_keys_inorder,
48808438b1eSDarrick J. Wong .recs_inorder = xfs_cntbt_recs_inorder,
4896abc7aefSDarrick J. Wong .keys_contiguous = NULL, /* not needed right now */
49030f712c9SDave Chinner };
49130f712c9SDave Chinner
492e6eb33d9SDarrick J. Wong /* Allocate most of a new allocation btree cursor. */
493e6eb33d9SDarrick J. Wong STATIC struct xfs_btree_cur *
xfs_allocbt_init_common(struct xfs_mount * mp,struct xfs_trans * tp,struct xfs_perag * pag,xfs_btnum_t btnum)494e6eb33d9SDarrick J. Wong xfs_allocbt_init_common(
495e6eb33d9SDarrick J. Wong struct xfs_mount *mp,
496e6eb33d9SDarrick J. Wong struct xfs_trans *tp,
497be9fb17dSDave Chinner struct xfs_perag *pag,
498e6eb33d9SDarrick J. Wong xfs_btnum_t btnum)
499e6eb33d9SDarrick J. Wong {
500e6eb33d9SDarrick J. Wong struct xfs_btree_cur *cur;
501e6eb33d9SDarrick J. Wong
502e6eb33d9SDarrick J. Wong ASSERT(btnum == XFS_BTNUM_BNO || btnum == XFS_BTNUM_CNT);
503e6eb33d9SDarrick J. Wong
5049fa47bdcSDarrick J. Wong cur = xfs_btree_alloc_cursor(mp, tp, btnum, mp->m_alloc_maxlevels,
5059fa47bdcSDarrick J. Wong xfs_allocbt_cur_cache);
506289d38d2SDave Chinner cur->bc_ag.abt.active = false;
507e6eb33d9SDarrick J. Wong
508e6eb33d9SDarrick J. Wong if (btnum == XFS_BTNUM_CNT) {
509e6eb33d9SDarrick J. Wong cur->bc_ops = &xfs_cntbt_ops;
510e6eb33d9SDarrick J. Wong cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_abtc_2);
511e6eb33d9SDarrick J. Wong cur->bc_flags = XFS_BTREE_LASTREC_UPDATE;
512e6eb33d9SDarrick J. Wong } else {
513e6eb33d9SDarrick J. Wong cur->bc_ops = &xfs_bnobt_ops;
514e6eb33d9SDarrick J. Wong cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_abtb_2);
515e6eb33d9SDarrick J. Wong }
516e6eb33d9SDarrick J. Wong
5179b2e5a23SDarrick J. Wong cur->bc_ag.pag = xfs_perag_hold(pag);
518e6eb33d9SDarrick J. Wong
51938c26bfdSDave Chinner if (xfs_has_crc(mp))
520e6eb33d9SDarrick J. Wong cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
521e6eb33d9SDarrick J. Wong
522e6eb33d9SDarrick J. Wong return cur;
523e6eb33d9SDarrick J. Wong }
524e6eb33d9SDarrick J. Wong
52530f712c9SDave Chinner /*
52630f712c9SDave Chinner * Allocate a new allocation btree cursor.
52730f712c9SDave Chinner */
52830f712c9SDave Chinner struct xfs_btree_cur * /* new alloc btree cursor */
xfs_allocbt_init_cursor(struct xfs_mount * mp,struct xfs_trans * tp,struct xfs_buf * agbp,struct xfs_perag * pag,xfs_btnum_t btnum)52930f712c9SDave Chinner xfs_allocbt_init_cursor(
53030f712c9SDave Chinner struct xfs_mount *mp, /* file system mount point */
53130f712c9SDave Chinner struct xfs_trans *tp, /* transaction pointer */
53230f712c9SDave Chinner struct xfs_buf *agbp, /* buffer for agf structure */
533be9fb17dSDave Chinner struct xfs_perag *pag,
53430f712c9SDave Chinner xfs_btnum_t btnum) /* btree identifier */
53530f712c9SDave Chinner {
5369798f615SChristoph Hellwig struct xfs_agf *agf = agbp->b_addr;
53730f712c9SDave Chinner struct xfs_btree_cur *cur;
53830f712c9SDave Chinner
539289d38d2SDave Chinner cur = xfs_allocbt_init_common(mp, tp, pag, btnum);
540e6eb33d9SDarrick J. Wong if (btnum == XFS_BTNUM_CNT)
54130f712c9SDave Chinner cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]);
542e6eb33d9SDarrick J. Wong else
54330f712c9SDave Chinner cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]);
54430f712c9SDave Chinner
545576af732SDave Chinner cur->bc_ag.agbp = agbp;
54630f712c9SDave Chinner
54730f712c9SDave Chinner return cur;
54830f712c9SDave Chinner }
54930f712c9SDave Chinner
550e6eb33d9SDarrick J. Wong /* Create a free space btree cursor with a fake root for staging. */
551e6eb33d9SDarrick J. Wong struct xfs_btree_cur *
xfs_allocbt_stage_cursor(struct xfs_mount * mp,struct xbtree_afakeroot * afake,struct xfs_perag * pag,xfs_btnum_t btnum)552e6eb33d9SDarrick J. Wong xfs_allocbt_stage_cursor(
553e6eb33d9SDarrick J. Wong struct xfs_mount *mp,
554e6eb33d9SDarrick J. Wong struct xbtree_afakeroot *afake,
555289d38d2SDave Chinner struct xfs_perag *pag,
556e6eb33d9SDarrick J. Wong xfs_btnum_t btnum)
557e6eb33d9SDarrick J. Wong {
558e6eb33d9SDarrick J. Wong struct xfs_btree_cur *cur;
559e6eb33d9SDarrick J. Wong
560289d38d2SDave Chinner cur = xfs_allocbt_init_common(mp, NULL, pag, btnum);
561e6eb33d9SDarrick J. Wong xfs_btree_stage_afakeroot(cur, afake);
562e6eb33d9SDarrick J. Wong return cur;
563e6eb33d9SDarrick J. Wong }
564e6eb33d9SDarrick J. Wong
565e6eb33d9SDarrick J. Wong /*
566e6eb33d9SDarrick J. Wong * Install a new free space btree root. Caller is responsible for invalidating
567e6eb33d9SDarrick J. Wong * and freeing the old btree blocks.
568e6eb33d9SDarrick J. Wong */
569e6eb33d9SDarrick J. Wong void
xfs_allocbt_commit_staged_btree(struct xfs_btree_cur * cur,struct xfs_trans * tp,struct xfs_buf * agbp)570e6eb33d9SDarrick J. Wong xfs_allocbt_commit_staged_btree(
571e6eb33d9SDarrick J. Wong struct xfs_btree_cur *cur,
572e6eb33d9SDarrick J. Wong struct xfs_trans *tp,
573e6eb33d9SDarrick J. Wong struct xfs_buf *agbp)
574e6eb33d9SDarrick J. Wong {
575e6eb33d9SDarrick J. Wong struct xfs_agf *agf = agbp->b_addr;
576e6eb33d9SDarrick J. Wong struct xbtree_afakeroot *afake = cur->bc_ag.afake;
577e6eb33d9SDarrick J. Wong
578e6eb33d9SDarrick J. Wong ASSERT(cur->bc_flags & XFS_BTREE_STAGING);
579e6eb33d9SDarrick J. Wong
580e6eb33d9SDarrick J. Wong agf->agf_roots[cur->bc_btnum] = cpu_to_be32(afake->af_root);
581e6eb33d9SDarrick J. Wong agf->agf_levels[cur->bc_btnum] = cpu_to_be32(afake->af_levels);
582e6eb33d9SDarrick J. Wong xfs_alloc_log_agf(tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS);
583e6eb33d9SDarrick J. Wong
584e6eb33d9SDarrick J. Wong if (cur->bc_btnum == XFS_BTNUM_BNO) {
585e6eb33d9SDarrick J. Wong xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_bnobt_ops);
586e6eb33d9SDarrick J. Wong } else {
587e6eb33d9SDarrick J. Wong cur->bc_flags |= XFS_BTREE_LASTREC_UPDATE;
588e6eb33d9SDarrick J. Wong xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_cntbt_ops);
589e6eb33d9SDarrick J. Wong }
590e6eb33d9SDarrick J. Wong }
591e6eb33d9SDarrick J. Wong
5920ed5f735SDarrick J. Wong /* Calculate number of records in an alloc btree block. */
5930ed5f735SDarrick J. Wong static inline unsigned int
xfs_allocbt_block_maxrecs(unsigned int blocklen,bool leaf)5940ed5f735SDarrick J. Wong xfs_allocbt_block_maxrecs(
5950ed5f735SDarrick J. Wong unsigned int blocklen,
5960ed5f735SDarrick J. Wong bool leaf)
5970ed5f735SDarrick J. Wong {
5980ed5f735SDarrick J. Wong if (leaf)
5990ed5f735SDarrick J. Wong return blocklen / sizeof(xfs_alloc_rec_t);
6000ed5f735SDarrick J. Wong return blocklen / (sizeof(xfs_alloc_key_t) + sizeof(xfs_alloc_ptr_t));
6010ed5f735SDarrick J. Wong }
6020ed5f735SDarrick J. Wong
60330f712c9SDave Chinner /*
60430f712c9SDave Chinner * Calculate number of records in an alloc btree block.
60530f712c9SDave Chinner */
60630f712c9SDave Chinner int
xfs_allocbt_maxrecs(struct xfs_mount * mp,int blocklen,int leaf)60730f712c9SDave Chinner xfs_allocbt_maxrecs(
60830f712c9SDave Chinner struct xfs_mount *mp,
60930f712c9SDave Chinner int blocklen,
61030f712c9SDave Chinner int leaf)
61130f712c9SDave Chinner {
61230f712c9SDave Chinner blocklen -= XFS_ALLOC_BLOCK_LEN(mp);
6130ed5f735SDarrick J. Wong return xfs_allocbt_block_maxrecs(blocklen, leaf);
6140ed5f735SDarrick J. Wong }
61530f712c9SDave Chinner
6160ed5f735SDarrick J. Wong /* Free space btrees are at their largest when every other block is free. */
6170ed5f735SDarrick J. Wong #define XFS_MAX_FREESP_RECORDS ((XFS_MAX_AG_BLOCKS + 1) / 2)
6180ed5f735SDarrick J. Wong
6190ed5f735SDarrick J. Wong /* Compute the max possible height for free space btrees. */
6200ed5f735SDarrick J. Wong unsigned int
xfs_allocbt_maxlevels_ondisk(void)6210ed5f735SDarrick J. Wong xfs_allocbt_maxlevels_ondisk(void)
6220ed5f735SDarrick J. Wong {
6230ed5f735SDarrick J. Wong unsigned int minrecs[2];
6240ed5f735SDarrick J. Wong unsigned int blocklen;
6250ed5f735SDarrick J. Wong
6260ed5f735SDarrick J. Wong blocklen = min(XFS_MIN_BLOCKSIZE - XFS_BTREE_SBLOCK_LEN,
6270ed5f735SDarrick J. Wong XFS_MIN_CRC_BLOCKSIZE - XFS_BTREE_SBLOCK_CRC_LEN);
6280ed5f735SDarrick J. Wong
6290ed5f735SDarrick J. Wong minrecs[0] = xfs_allocbt_block_maxrecs(blocklen, true) / 2;
6300ed5f735SDarrick J. Wong minrecs[1] = xfs_allocbt_block_maxrecs(blocklen, false) / 2;
6310ed5f735SDarrick J. Wong
6320ed5f735SDarrick J. Wong return xfs_btree_compute_maxlevels(minrecs, XFS_MAX_FREESP_RECORDS);
63330f712c9SDave Chinner }
63414861c47SDarrick J. Wong
63514861c47SDarrick J. Wong /* Calculate the freespace btree size for some records. */
63614861c47SDarrick J. Wong xfs_extlen_t
xfs_allocbt_calc_size(struct xfs_mount * mp,unsigned long long len)63714861c47SDarrick J. Wong xfs_allocbt_calc_size(
63814861c47SDarrick J. Wong struct xfs_mount *mp,
63914861c47SDarrick J. Wong unsigned long long len)
64014861c47SDarrick J. Wong {
64114861c47SDarrick J. Wong return xfs_btree_calc_size(mp->m_alloc_mnr, len);
64214861c47SDarrick J. Wong }
6439fa47bdcSDarrick J. Wong
6449fa47bdcSDarrick J. Wong int __init
xfs_allocbt_init_cur_cache(void)6459fa47bdcSDarrick J. Wong xfs_allocbt_init_cur_cache(void)
6469fa47bdcSDarrick J. Wong {
6479fa47bdcSDarrick J. Wong xfs_allocbt_cur_cache = kmem_cache_create("xfs_bnobt_cur",
6489fa47bdcSDarrick J. Wong xfs_btree_cur_sizeof(xfs_allocbt_maxlevels_ondisk()),
6499fa47bdcSDarrick J. Wong 0, 0, NULL);
6509fa47bdcSDarrick J. Wong
6519fa47bdcSDarrick J. Wong if (!xfs_allocbt_cur_cache)
6529fa47bdcSDarrick J. Wong return -ENOMEM;
6539fa47bdcSDarrick J. Wong return 0;
6549fa47bdcSDarrick J. Wong }
6559fa47bdcSDarrick J. Wong
6569fa47bdcSDarrick J. Wong void
xfs_allocbt_destroy_cur_cache(void)6579fa47bdcSDarrick J. Wong xfs_allocbt_destroy_cur_cache(void)
6589fa47bdcSDarrick J. Wong {
6599fa47bdcSDarrick J. Wong kmem_cache_destroy(xfs_allocbt_cur_cache);
6609fa47bdcSDarrick J. Wong xfs_allocbt_cur_cache = NULL;
6619fa47bdcSDarrick J. Wong }
662