10b61f8a4SDave Chinner // SPDX-License-Identifier: GPL-2.0 230f712c9SDave Chinner /* 330f712c9SDave Chinner * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc. 430f712c9SDave Chinner * All Rights Reserved. 530f712c9SDave Chinner */ 630f712c9SDave Chinner #include "xfs.h" 730f712c9SDave Chinner #include "xfs_fs.h" 830f712c9SDave Chinner #include "xfs_shared.h" 930f712c9SDave Chinner #include "xfs_format.h" 1030f712c9SDave Chinner #include "xfs_log_format.h" 1130f712c9SDave Chinner #include "xfs_trans_resv.h" 1230f712c9SDave Chinner #include "xfs_bit.h" 1330f712c9SDave Chinner #include "xfs_mount.h" 1430f712c9SDave Chinner #include "xfs_btree.h" 15c29ce8f4SDarrick J. Wong #include "xfs_btree_staging.h" 1630f712c9SDave Chinner #include "xfs_ialloc.h" 1730f712c9SDave Chinner #include "xfs_ialloc_btree.h" 1830f712c9SDave Chinner #include "xfs_alloc.h" 1930f712c9SDave Chinner #include "xfs_error.h" 2030f712c9SDave Chinner #include "xfs_trace.h" 2130f712c9SDave Chinner #include "xfs_trans.h" 22340785ccSDarrick J. Wong #include "xfs_rmap.h" 2330933120SDave Chinner #include "xfs_ag.h" 2430f712c9SDave Chinner 2530f712c9SDave Chinner STATIC int 2630f712c9SDave Chinner xfs_inobt_get_minrecs( 2730f712c9SDave Chinner struct xfs_btree_cur *cur, 2830f712c9SDave Chinner int level) 2930f712c9SDave Chinner { 30ef325959SDarrick J. Wong return M_IGEO(cur->bc_mp)->inobt_mnr[level != 0]; 3130f712c9SDave Chinner } 3230f712c9SDave Chinner 3330f712c9SDave Chinner STATIC struct xfs_btree_cur * 3430f712c9SDave Chinner xfs_inobt_dup_cursor( 3530f712c9SDave Chinner struct xfs_btree_cur *cur) 3630f712c9SDave Chinner { 3730f712c9SDave Chinner return xfs_inobt_init_cursor(cur->bc_mp, cur->bc_tp, 387b13c515SDave Chinner cur->bc_ag.agbp, cur->bc_ag.pag, cur->bc_btnum); 3930f712c9SDave Chinner } 4030f712c9SDave Chinner 4130f712c9SDave Chinner STATIC void 4230f712c9SDave Chinner xfs_inobt_set_root( 4330f712c9SDave Chinner struct xfs_btree_cur *cur, 44b5a6e5feSDarrick J. Wong const union xfs_btree_ptr *nptr, 4530f712c9SDave Chinner int inc) /* level change */ 4630f712c9SDave Chinner { 47576af732SDave Chinner struct xfs_buf *agbp = cur->bc_ag.agbp; 48370c782bSChristoph Hellwig struct xfs_agi *agi = agbp->b_addr; 4930f712c9SDave Chinner 5030f712c9SDave Chinner agi->agi_root = nptr->s; 5130f712c9SDave Chinner be32_add_cpu(&agi->agi_level, inc); 5230f712c9SDave Chinner xfs_ialloc_log_agi(cur->bc_tp, agbp, XFS_AGI_ROOT | XFS_AGI_LEVEL); 5330f712c9SDave Chinner } 5430f712c9SDave Chinner 5530f712c9SDave Chinner STATIC void 5630f712c9SDave Chinner xfs_finobt_set_root( 5730f712c9SDave Chinner struct xfs_btree_cur *cur, 58b5a6e5feSDarrick J. Wong const union xfs_btree_ptr *nptr, 5930f712c9SDave Chinner int inc) /* level change */ 6030f712c9SDave Chinner { 61576af732SDave Chinner struct xfs_buf *agbp = cur->bc_ag.agbp; 62370c782bSChristoph Hellwig struct xfs_agi *agi = agbp->b_addr; 6330f712c9SDave Chinner 6430f712c9SDave Chinner agi->agi_free_root = nptr->s; 6530f712c9SDave Chinner be32_add_cpu(&agi->agi_free_level, inc); 6630f712c9SDave Chinner xfs_ialloc_log_agi(cur->bc_tp, agbp, 6730f712c9SDave Chinner XFS_AGI_FREE_ROOT | XFS_AGI_FREE_LEVEL); 6830f712c9SDave Chinner } 6930f712c9SDave Chinner 702a39946cSDarrick J. Wong /* Update the inode btree block counter for this btree. */ 712a39946cSDarrick J. Wong static inline void 722a39946cSDarrick J. Wong xfs_inobt_mod_blockcount( 732a39946cSDarrick J. Wong struct xfs_btree_cur *cur, 742a39946cSDarrick J. Wong int howmuch) 752a39946cSDarrick J. Wong { 762a39946cSDarrick J. Wong struct xfs_buf *agbp = cur->bc_ag.agbp; 772a39946cSDarrick J. Wong struct xfs_agi *agi = agbp->b_addr; 782a39946cSDarrick J. Wong 792a39946cSDarrick J. Wong if (!xfs_sb_version_hasinobtcounts(&cur->bc_mp->m_sb)) 802a39946cSDarrick J. Wong return; 812a39946cSDarrick J. Wong 822a39946cSDarrick J. Wong if (cur->bc_btnum == XFS_BTNUM_FINO) 832a39946cSDarrick J. Wong be32_add_cpu(&agi->agi_fblocks, howmuch); 842a39946cSDarrick J. Wong else if (cur->bc_btnum == XFS_BTNUM_INO) 852a39946cSDarrick J. Wong be32_add_cpu(&agi->agi_iblocks, howmuch); 862a39946cSDarrick J. Wong xfs_ialloc_log_agi(cur->bc_tp, agbp, XFS_AGI_IBLOCKS); 872a39946cSDarrick J. Wong } 882a39946cSDarrick J. Wong 8930f712c9SDave Chinner STATIC int 9076d771b4SChristoph Hellwig __xfs_inobt_alloc_block( 9130f712c9SDave Chinner struct xfs_btree_cur *cur, 92*deb06b9aSDarrick J. Wong const union xfs_btree_ptr *start, 9330f712c9SDave Chinner union xfs_btree_ptr *new, 9476d771b4SChristoph Hellwig int *stat, 9576d771b4SChristoph Hellwig enum xfs_ag_resv_type resv) 9630f712c9SDave Chinner { 9730f712c9SDave Chinner xfs_alloc_arg_t args; /* block allocation args */ 9830f712c9SDave Chinner int error; /* error return value */ 9930f712c9SDave Chinner xfs_agblock_t sbno = be32_to_cpu(start->s); 10030f712c9SDave Chinner 10130f712c9SDave Chinner memset(&args, 0, sizeof(args)); 10230f712c9SDave Chinner args.tp = cur->bc_tp; 10330f712c9SDave Chinner args.mp = cur->bc_mp; 1047280fedaSDarrick J. Wong args.oinfo = XFS_RMAP_OINFO_INOBT; 10550f02fe3SDave Chinner args.fsbno = XFS_AGB_TO_FSB(args.mp, cur->bc_ag.pag->pag_agno, sbno); 10630f712c9SDave Chinner args.minlen = 1; 10730f712c9SDave Chinner args.maxlen = 1; 10830f712c9SDave Chinner args.prod = 1; 10930f712c9SDave Chinner args.type = XFS_ALLOCTYPE_NEAR_BNO; 11076d771b4SChristoph Hellwig args.resv = resv; 11130f712c9SDave Chinner 11230f712c9SDave Chinner error = xfs_alloc_vextent(&args); 113e157ebdcSCarlos Maiolino if (error) 11430f712c9SDave Chinner return error; 115e157ebdcSCarlos Maiolino 11630f712c9SDave Chinner if (args.fsbno == NULLFSBLOCK) { 11730f712c9SDave Chinner *stat = 0; 11830f712c9SDave Chinner return 0; 11930f712c9SDave Chinner } 12030f712c9SDave Chinner ASSERT(args.len == 1); 12130f712c9SDave Chinner 12230f712c9SDave Chinner new->s = cpu_to_be32(XFS_FSB_TO_AGBNO(args.mp, args.fsbno)); 12330f712c9SDave Chinner *stat = 1; 1242a39946cSDarrick J. Wong xfs_inobt_mod_blockcount(cur, 1); 12530f712c9SDave Chinner return 0; 12630f712c9SDave Chinner } 12730f712c9SDave Chinner 12830f712c9SDave Chinner STATIC int 12976d771b4SChristoph Hellwig xfs_inobt_alloc_block( 13076d771b4SChristoph Hellwig struct xfs_btree_cur *cur, 131*deb06b9aSDarrick J. Wong const union xfs_btree_ptr *start, 13276d771b4SChristoph Hellwig union xfs_btree_ptr *new, 13376d771b4SChristoph Hellwig int *stat) 13476d771b4SChristoph Hellwig { 13576d771b4SChristoph Hellwig return __xfs_inobt_alloc_block(cur, start, new, stat, XFS_AG_RESV_NONE); 13676d771b4SChristoph Hellwig } 13776d771b4SChristoph Hellwig 13876d771b4SChristoph Hellwig STATIC int 13976d771b4SChristoph Hellwig xfs_finobt_alloc_block( 14076d771b4SChristoph Hellwig struct xfs_btree_cur *cur, 141*deb06b9aSDarrick J. Wong const union xfs_btree_ptr *start, 14276d771b4SChristoph Hellwig union xfs_btree_ptr *new, 14376d771b4SChristoph Hellwig int *stat) 14476d771b4SChristoph Hellwig { 145e1f6ca11SDarrick J. Wong if (cur->bc_mp->m_finobt_nores) 146ad90bb58SBrian Foster return xfs_inobt_alloc_block(cur, start, new, stat); 14776d771b4SChristoph Hellwig return __xfs_inobt_alloc_block(cur, start, new, stat, 14876d771b4SChristoph Hellwig XFS_AG_RESV_METADATA); 14976d771b4SChristoph Hellwig } 15076d771b4SChristoph Hellwig 15176d771b4SChristoph Hellwig STATIC int 152ad90bb58SBrian Foster __xfs_inobt_free_block( 153ad90bb58SBrian Foster struct xfs_btree_cur *cur, 154ad90bb58SBrian Foster struct xfs_buf *bp, 155ad90bb58SBrian Foster enum xfs_ag_resv_type resv) 156ad90bb58SBrian Foster { 1572a39946cSDarrick J. Wong xfs_inobt_mod_blockcount(cur, -1); 158ad90bb58SBrian Foster return xfs_free_extent(cur->bc_tp, 159ad90bb58SBrian Foster XFS_DADDR_TO_FSB(cur->bc_mp, XFS_BUF_ADDR(bp)), 1, 1607280fedaSDarrick J. Wong &XFS_RMAP_OINFO_INOBT, resv); 161ad90bb58SBrian Foster } 162ad90bb58SBrian Foster 163ad90bb58SBrian Foster STATIC int 16430f712c9SDave Chinner xfs_inobt_free_block( 16530f712c9SDave Chinner struct xfs_btree_cur *cur, 16630f712c9SDave Chinner struct xfs_buf *bp) 16730f712c9SDave Chinner { 168ad90bb58SBrian Foster return __xfs_inobt_free_block(cur, bp, XFS_AG_RESV_NONE); 169ad90bb58SBrian Foster } 170340785ccSDarrick J. Wong 171ad90bb58SBrian Foster STATIC int 172ad90bb58SBrian Foster xfs_finobt_free_block( 173ad90bb58SBrian Foster struct xfs_btree_cur *cur, 174ad90bb58SBrian Foster struct xfs_buf *bp) 175ad90bb58SBrian Foster { 176e1f6ca11SDarrick J. Wong if (cur->bc_mp->m_finobt_nores) 177ad90bb58SBrian Foster return xfs_inobt_free_block(cur, bp); 178ad90bb58SBrian Foster return __xfs_inobt_free_block(cur, bp, XFS_AG_RESV_METADATA); 17930f712c9SDave Chinner } 18030f712c9SDave Chinner 18130f712c9SDave Chinner STATIC int 18230f712c9SDave Chinner xfs_inobt_get_maxrecs( 18330f712c9SDave Chinner struct xfs_btree_cur *cur, 18430f712c9SDave Chinner int level) 18530f712c9SDave Chinner { 186ef325959SDarrick J. Wong return M_IGEO(cur->bc_mp)->inobt_mxr[level != 0]; 18730f712c9SDave Chinner } 18830f712c9SDave Chinner 18930f712c9SDave Chinner STATIC void 19030f712c9SDave Chinner xfs_inobt_init_key_from_rec( 19130f712c9SDave Chinner union xfs_btree_key *key, 19223825cd1SDarrick J. Wong const union xfs_btree_rec *rec) 19330f712c9SDave Chinner { 19430f712c9SDave Chinner key->inobt.ir_startino = rec->inobt.ir_startino; 19530f712c9SDave Chinner } 19630f712c9SDave Chinner 19730f712c9SDave Chinner STATIC void 198118bb47eSDarrick J. Wong xfs_inobt_init_high_key_from_rec( 199118bb47eSDarrick J. Wong union xfs_btree_key *key, 20023825cd1SDarrick J. Wong const union xfs_btree_rec *rec) 201118bb47eSDarrick J. Wong { 202118bb47eSDarrick J. Wong __u32 x; 203118bb47eSDarrick J. Wong 204118bb47eSDarrick J. Wong x = be32_to_cpu(rec->inobt.ir_startino); 205118bb47eSDarrick J. Wong x += XFS_INODES_PER_CHUNK - 1; 206118bb47eSDarrick J. Wong key->inobt.ir_startino = cpu_to_be32(x); 207118bb47eSDarrick J. Wong } 208118bb47eSDarrick J. Wong 209118bb47eSDarrick J. Wong STATIC void 21030f712c9SDave Chinner xfs_inobt_init_rec_from_cur( 21130f712c9SDave Chinner struct xfs_btree_cur *cur, 21230f712c9SDave Chinner union xfs_btree_rec *rec) 21330f712c9SDave Chinner { 21430f712c9SDave Chinner rec->inobt.ir_startino = cpu_to_be32(cur->bc_rec.i.ir_startino); 2155419040fSBrian Foster if (xfs_sb_version_hassparseinodes(&cur->bc_mp->m_sb)) { 2165419040fSBrian Foster rec->inobt.ir_u.sp.ir_holemask = 2175419040fSBrian Foster cpu_to_be16(cur->bc_rec.i.ir_holemask); 2185419040fSBrian Foster rec->inobt.ir_u.sp.ir_count = cur->bc_rec.i.ir_count; 2195419040fSBrian Foster rec->inobt.ir_u.sp.ir_freecount = cur->bc_rec.i.ir_freecount; 2205419040fSBrian Foster } else { 2215419040fSBrian Foster /* ir_holemask/ir_count not supported on-disk */ 2225419040fSBrian Foster rec->inobt.ir_u.f.ir_freecount = 2235419040fSBrian Foster cpu_to_be32(cur->bc_rec.i.ir_freecount); 2245419040fSBrian Foster } 22530f712c9SDave Chinner rec->inobt.ir_free = cpu_to_be64(cur->bc_rec.i.ir_free); 22630f712c9SDave Chinner } 22730f712c9SDave Chinner 22830f712c9SDave Chinner /* 22930f712c9SDave Chinner * initial value of ptr for lookup 23030f712c9SDave Chinner */ 23130f712c9SDave Chinner STATIC void 23230f712c9SDave Chinner xfs_inobt_init_ptr_from_cur( 23330f712c9SDave Chinner struct xfs_btree_cur *cur, 23430f712c9SDave Chinner union xfs_btree_ptr *ptr) 23530f712c9SDave Chinner { 236576af732SDave Chinner struct xfs_agi *agi = cur->bc_ag.agbp->b_addr; 23730f712c9SDave Chinner 23850f02fe3SDave Chinner ASSERT(cur->bc_ag.pag->pag_agno == be32_to_cpu(agi->agi_seqno)); 23930f712c9SDave Chinner 24030f712c9SDave Chinner ptr->s = agi->agi_root; 24130f712c9SDave Chinner } 24230f712c9SDave Chinner 24330f712c9SDave Chinner STATIC void 24430f712c9SDave Chinner xfs_finobt_init_ptr_from_cur( 24530f712c9SDave Chinner struct xfs_btree_cur *cur, 24630f712c9SDave Chinner union xfs_btree_ptr *ptr) 24730f712c9SDave Chinner { 248576af732SDave Chinner struct xfs_agi *agi = cur->bc_ag.agbp->b_addr; 24930f712c9SDave Chinner 25050f02fe3SDave Chinner ASSERT(cur->bc_ag.pag->pag_agno == be32_to_cpu(agi->agi_seqno)); 25130f712c9SDave Chinner ptr->s = agi->agi_free_root; 25230f712c9SDave Chinner } 25330f712c9SDave Chinner 254c8ce540dSDarrick J. Wong STATIC int64_t 25530f712c9SDave Chinner xfs_inobt_key_diff( 25630f712c9SDave Chinner struct xfs_btree_cur *cur, 257d29d5577SDarrick J. Wong const union xfs_btree_key *key) 25830f712c9SDave Chinner { 259c8ce540dSDarrick J. Wong return (int64_t)be32_to_cpu(key->inobt.ir_startino) - 26030f712c9SDave Chinner cur->bc_rec.i.ir_startino; 26130f712c9SDave Chinner } 26230f712c9SDave Chinner 263118bb47eSDarrick J. Wong STATIC int64_t 264118bb47eSDarrick J. Wong xfs_inobt_diff_two_keys( 265118bb47eSDarrick J. Wong struct xfs_btree_cur *cur, 266d29d5577SDarrick J. Wong const union xfs_btree_key *k1, 267d29d5577SDarrick J. Wong const union xfs_btree_key *k2) 268118bb47eSDarrick J. Wong { 269118bb47eSDarrick J. Wong return (int64_t)be32_to_cpu(k1->inobt.ir_startino) - 270118bb47eSDarrick J. Wong be32_to_cpu(k2->inobt.ir_startino); 271118bb47eSDarrick J. Wong } 272118bb47eSDarrick J. Wong 273a6a781a5SDarrick J. Wong static xfs_failaddr_t 27430f712c9SDave Chinner xfs_inobt_verify( 27530f712c9SDave Chinner struct xfs_buf *bp) 27630f712c9SDave Chinner { 277dbd329f1SChristoph Hellwig struct xfs_mount *mp = bp->b_mount; 27830f712c9SDave Chinner struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp); 279a6a781a5SDarrick J. Wong xfs_failaddr_t fa; 28030f712c9SDave Chinner unsigned int level; 28130f712c9SDave Chinner 2828473fee3SBrian Foster if (!xfs_verify_magic(bp, block->bb_magic)) 2838473fee3SBrian Foster return __this_address; 2848473fee3SBrian Foster 28530f712c9SDave Chinner /* 28630f712c9SDave Chinner * During growfs operations, we can't verify the exact owner as the 28730f712c9SDave Chinner * perag is not fully initialised and hence not attached to the buffer. 28830f712c9SDave Chinner * 28930f712c9SDave Chinner * Similarly, during log recovery we will have a perag structure 29030f712c9SDave Chinner * attached, but the agi information will not yet have been initialised 29130f712c9SDave Chinner * from the on disk AGI. We don't currently use any of this information, 29230f712c9SDave Chinner * but beware of the landmine (i.e. need to check pag->pagi_init) if we 29330f712c9SDave Chinner * ever do. 29430f712c9SDave Chinner */ 2958473fee3SBrian Foster if (xfs_sb_version_hascrc(&mp->m_sb)) { 296a6a781a5SDarrick J. Wong fa = xfs_btree_sblock_v5hdr_verify(bp); 297a6a781a5SDarrick J. Wong if (fa) 298a6a781a5SDarrick J. Wong return fa; 29930f712c9SDave Chinner } 30030f712c9SDave Chinner 301c5ab131bSDarrick J. Wong /* level verification */ 30230f712c9SDave Chinner level = be16_to_cpu(block->bb_level); 303ef325959SDarrick J. Wong if (level >= M_IGEO(mp)->inobt_maxlevels) 304a6a781a5SDarrick J. Wong return __this_address; 30530f712c9SDave Chinner 306ef325959SDarrick J. Wong return xfs_btree_sblock_verify(bp, 307ef325959SDarrick J. Wong M_IGEO(mp)->inobt_mxr[level != 0]); 30830f712c9SDave Chinner } 30930f712c9SDave Chinner 31030f712c9SDave Chinner static void 31130f712c9SDave Chinner xfs_inobt_read_verify( 31230f712c9SDave Chinner struct xfs_buf *bp) 31330f712c9SDave Chinner { 314bc1a09b8SDarrick J. Wong xfs_failaddr_t fa; 315bc1a09b8SDarrick J. Wong 31630f712c9SDave Chinner if (!xfs_btree_sblock_verify_crc(bp)) 317bc1a09b8SDarrick J. Wong xfs_verifier_error(bp, -EFSBADCRC, __this_address); 318bc1a09b8SDarrick J. Wong else { 319bc1a09b8SDarrick J. Wong fa = xfs_inobt_verify(bp); 320bc1a09b8SDarrick J. Wong if (fa) 321bc1a09b8SDarrick J. Wong xfs_verifier_error(bp, -EFSCORRUPTED, fa); 322bc1a09b8SDarrick J. Wong } 32330f712c9SDave Chinner 32431ca03c9SDarrick J. Wong if (bp->b_error) 32530f712c9SDave Chinner trace_xfs_btree_corrupt(bp, _RET_IP_); 32630f712c9SDave Chinner } 32730f712c9SDave Chinner 32830f712c9SDave Chinner static void 32930f712c9SDave Chinner xfs_inobt_write_verify( 33030f712c9SDave Chinner struct xfs_buf *bp) 33130f712c9SDave Chinner { 332bc1a09b8SDarrick J. Wong xfs_failaddr_t fa; 333bc1a09b8SDarrick J. Wong 334bc1a09b8SDarrick J. Wong fa = xfs_inobt_verify(bp); 335bc1a09b8SDarrick J. Wong if (fa) { 33630f712c9SDave Chinner trace_xfs_btree_corrupt(bp, _RET_IP_); 337bc1a09b8SDarrick J. Wong xfs_verifier_error(bp, -EFSCORRUPTED, fa); 33830f712c9SDave Chinner return; 33930f712c9SDave Chinner } 34030f712c9SDave Chinner xfs_btree_sblock_calc_crc(bp); 34130f712c9SDave Chinner 34230f712c9SDave Chinner } 34330f712c9SDave Chinner 34430f712c9SDave Chinner const struct xfs_buf_ops xfs_inobt_buf_ops = { 345233135b7SEric Sandeen .name = "xfs_inobt", 3468473fee3SBrian Foster .magic = { cpu_to_be32(XFS_IBT_MAGIC), cpu_to_be32(XFS_IBT_CRC_MAGIC) }, 34730f712c9SDave Chinner .verify_read = xfs_inobt_read_verify, 34830f712c9SDave Chinner .verify_write = xfs_inobt_write_verify, 349b5572597SDarrick J. Wong .verify_struct = xfs_inobt_verify, 35030f712c9SDave Chinner }; 35130f712c9SDave Chinner 35201e68f40SBrian Foster const struct xfs_buf_ops xfs_finobt_buf_ops = { 35301e68f40SBrian Foster .name = "xfs_finobt", 3548473fee3SBrian Foster .magic = { cpu_to_be32(XFS_FIBT_MAGIC), 3558473fee3SBrian Foster cpu_to_be32(XFS_FIBT_CRC_MAGIC) }, 35601e68f40SBrian Foster .verify_read = xfs_inobt_read_verify, 35701e68f40SBrian Foster .verify_write = xfs_inobt_write_verify, 35801e68f40SBrian Foster .verify_struct = xfs_inobt_verify, 35901e68f40SBrian Foster }; 36001e68f40SBrian Foster 36130f712c9SDave Chinner STATIC int 36230f712c9SDave Chinner xfs_inobt_keys_inorder( 36330f712c9SDave Chinner struct xfs_btree_cur *cur, 3648e38dc88SDarrick J. Wong const union xfs_btree_key *k1, 3658e38dc88SDarrick J. Wong const union xfs_btree_key *k2) 36630f712c9SDave Chinner { 36730f712c9SDave Chinner return be32_to_cpu(k1->inobt.ir_startino) < 36830f712c9SDave Chinner be32_to_cpu(k2->inobt.ir_startino); 36930f712c9SDave Chinner } 37030f712c9SDave Chinner 37130f712c9SDave Chinner STATIC int 37230f712c9SDave Chinner xfs_inobt_recs_inorder( 37330f712c9SDave Chinner struct xfs_btree_cur *cur, 3748e38dc88SDarrick J. Wong const union xfs_btree_rec *r1, 3758e38dc88SDarrick J. Wong const union xfs_btree_rec *r2) 37630f712c9SDave Chinner { 37730f712c9SDave Chinner return be32_to_cpu(r1->inobt.ir_startino) + XFS_INODES_PER_CHUNK <= 37830f712c9SDave Chinner be32_to_cpu(r2->inobt.ir_startino); 37930f712c9SDave Chinner } 38030f712c9SDave Chinner 38130f712c9SDave Chinner static const struct xfs_btree_ops xfs_inobt_ops = { 38230f712c9SDave Chinner .rec_len = sizeof(xfs_inobt_rec_t), 38330f712c9SDave Chinner .key_len = sizeof(xfs_inobt_key_t), 38430f712c9SDave Chinner 38530f712c9SDave Chinner .dup_cursor = xfs_inobt_dup_cursor, 38630f712c9SDave Chinner .set_root = xfs_inobt_set_root, 38730f712c9SDave Chinner .alloc_block = xfs_inobt_alloc_block, 38830f712c9SDave Chinner .free_block = xfs_inobt_free_block, 38930f712c9SDave Chinner .get_minrecs = xfs_inobt_get_minrecs, 39030f712c9SDave Chinner .get_maxrecs = xfs_inobt_get_maxrecs, 39130f712c9SDave Chinner .init_key_from_rec = xfs_inobt_init_key_from_rec, 392118bb47eSDarrick J. Wong .init_high_key_from_rec = xfs_inobt_init_high_key_from_rec, 39330f712c9SDave Chinner .init_rec_from_cur = xfs_inobt_init_rec_from_cur, 39430f712c9SDave Chinner .init_ptr_from_cur = xfs_inobt_init_ptr_from_cur, 39530f712c9SDave Chinner .key_diff = xfs_inobt_key_diff, 39630f712c9SDave Chinner .buf_ops = &xfs_inobt_buf_ops, 397118bb47eSDarrick J. Wong .diff_two_keys = xfs_inobt_diff_two_keys, 39830f712c9SDave Chinner .keys_inorder = xfs_inobt_keys_inorder, 39930f712c9SDave Chinner .recs_inorder = xfs_inobt_recs_inorder, 40030f712c9SDave Chinner }; 40130f712c9SDave Chinner 40230f712c9SDave Chinner static const struct xfs_btree_ops xfs_finobt_ops = { 40330f712c9SDave Chinner .rec_len = sizeof(xfs_inobt_rec_t), 40430f712c9SDave Chinner .key_len = sizeof(xfs_inobt_key_t), 40530f712c9SDave Chinner 40630f712c9SDave Chinner .dup_cursor = xfs_inobt_dup_cursor, 40730f712c9SDave Chinner .set_root = xfs_finobt_set_root, 40876d771b4SChristoph Hellwig .alloc_block = xfs_finobt_alloc_block, 409ad90bb58SBrian Foster .free_block = xfs_finobt_free_block, 41030f712c9SDave Chinner .get_minrecs = xfs_inobt_get_minrecs, 41130f712c9SDave Chinner .get_maxrecs = xfs_inobt_get_maxrecs, 41230f712c9SDave Chinner .init_key_from_rec = xfs_inobt_init_key_from_rec, 413118bb47eSDarrick J. Wong .init_high_key_from_rec = xfs_inobt_init_high_key_from_rec, 41430f712c9SDave Chinner .init_rec_from_cur = xfs_inobt_init_rec_from_cur, 41530f712c9SDave Chinner .init_ptr_from_cur = xfs_finobt_init_ptr_from_cur, 41630f712c9SDave Chinner .key_diff = xfs_inobt_key_diff, 41701e68f40SBrian Foster .buf_ops = &xfs_finobt_buf_ops, 418118bb47eSDarrick J. Wong .diff_two_keys = xfs_inobt_diff_two_keys, 41930f712c9SDave Chinner .keys_inorder = xfs_inobt_keys_inorder, 42030f712c9SDave Chinner .recs_inorder = xfs_inobt_recs_inorder, 42130f712c9SDave Chinner }; 42230f712c9SDave Chinner 42330f712c9SDave Chinner /* 424c29ce8f4SDarrick J. Wong * Initialize a new inode btree cursor. 42530f712c9SDave Chinner */ 426c29ce8f4SDarrick J. Wong static struct xfs_btree_cur * 427c29ce8f4SDarrick J. Wong xfs_inobt_init_common( 42830f712c9SDave Chinner struct xfs_mount *mp, /* file system mount point */ 42930f712c9SDave Chinner struct xfs_trans *tp, /* transaction pointer */ 430be9fb17dSDave Chinner struct xfs_perag *pag, 43130f712c9SDave Chinner xfs_btnum_t btnum) /* ialloc or free ino btree */ 43230f712c9SDave Chinner { 43330f712c9SDave Chinner struct xfs_btree_cur *cur; 43430f712c9SDave Chinner 43532a2b11fSCarlos Maiolino cur = kmem_cache_zalloc(xfs_btree_cur_zone, GFP_NOFS | __GFP_NOFAIL); 43630f712c9SDave Chinner cur->bc_tp = tp; 43730f712c9SDave Chinner cur->bc_mp = mp; 43830f712c9SDave Chinner cur->bc_btnum = btnum; 43930f712c9SDave Chinner if (btnum == XFS_BTNUM_INO) { 44011ef38afSDave Chinner cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_ibt_2); 441c29ce8f4SDarrick J. Wong cur->bc_ops = &xfs_inobt_ops; 44230f712c9SDave Chinner } else { 44311ef38afSDave Chinner cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_fibt_2); 444c29ce8f4SDarrick J. Wong cur->bc_ops = &xfs_finobt_ops; 44530f712c9SDave Chinner } 44630f712c9SDave Chinner 44730f712c9SDave Chinner cur->bc_blocklog = mp->m_sb.sb_blocklog; 44830f712c9SDave Chinner 44930f712c9SDave Chinner if (xfs_sb_version_hascrc(&mp->m_sb)) 45030f712c9SDave Chinner cur->bc_flags |= XFS_BTREE_CRC_BLOCKS; 45130f712c9SDave Chinner 452be9fb17dSDave Chinner /* take a reference for the cursor */ 453be9fb17dSDave Chinner atomic_inc(&pag->pag_ref); 454be9fb17dSDave Chinner cur->bc_ag.pag = pag; 45530f712c9SDave Chinner return cur; 45630f712c9SDave Chinner } 45730f712c9SDave Chinner 458c29ce8f4SDarrick J. Wong /* Create an inode btree cursor. */ 459c29ce8f4SDarrick J. Wong struct xfs_btree_cur * 460c29ce8f4SDarrick J. Wong xfs_inobt_init_cursor( 461c29ce8f4SDarrick J. Wong struct xfs_mount *mp, 462c29ce8f4SDarrick J. Wong struct xfs_trans *tp, 463c29ce8f4SDarrick J. Wong struct xfs_buf *agbp, 464be9fb17dSDave Chinner struct xfs_perag *pag, 465c29ce8f4SDarrick J. Wong xfs_btnum_t btnum) 466c29ce8f4SDarrick J. Wong { 467c29ce8f4SDarrick J. Wong struct xfs_btree_cur *cur; 468c29ce8f4SDarrick J. Wong struct xfs_agi *agi = agbp->b_addr; 469c29ce8f4SDarrick J. Wong 4707b13c515SDave Chinner cur = xfs_inobt_init_common(mp, tp, pag, btnum); 471c29ce8f4SDarrick J. Wong if (btnum == XFS_BTNUM_INO) 472c29ce8f4SDarrick J. Wong cur->bc_nlevels = be32_to_cpu(agi->agi_level); 473c29ce8f4SDarrick J. Wong else 474c29ce8f4SDarrick J. Wong cur->bc_nlevels = be32_to_cpu(agi->agi_free_level); 475c29ce8f4SDarrick J. Wong cur->bc_ag.agbp = agbp; 476c29ce8f4SDarrick J. Wong return cur; 477c29ce8f4SDarrick J. Wong } 478c29ce8f4SDarrick J. Wong 479c29ce8f4SDarrick J. Wong /* Create an inode btree cursor with a fake root for staging. */ 480c29ce8f4SDarrick J. Wong struct xfs_btree_cur * 481c29ce8f4SDarrick J. Wong xfs_inobt_stage_cursor( 482c29ce8f4SDarrick J. Wong struct xfs_mount *mp, 483c29ce8f4SDarrick J. Wong struct xbtree_afakeroot *afake, 4847b13c515SDave Chinner struct xfs_perag *pag, 485c29ce8f4SDarrick J. Wong xfs_btnum_t btnum) 486c29ce8f4SDarrick J. Wong { 487c29ce8f4SDarrick J. Wong struct xfs_btree_cur *cur; 488c29ce8f4SDarrick J. Wong 4897b13c515SDave Chinner cur = xfs_inobt_init_common(mp, NULL, pag, btnum); 490c29ce8f4SDarrick J. Wong xfs_btree_stage_afakeroot(cur, afake); 491c29ce8f4SDarrick J. Wong return cur; 492c29ce8f4SDarrick J. Wong } 493c29ce8f4SDarrick J. Wong 494c29ce8f4SDarrick J. Wong /* 495c29ce8f4SDarrick J. Wong * Install a new inobt btree root. Caller is responsible for invalidating 496c29ce8f4SDarrick J. Wong * and freeing the old btree blocks. 497c29ce8f4SDarrick J. Wong */ 498c29ce8f4SDarrick J. Wong void 499c29ce8f4SDarrick J. Wong xfs_inobt_commit_staged_btree( 500c29ce8f4SDarrick J. Wong struct xfs_btree_cur *cur, 501c29ce8f4SDarrick J. Wong struct xfs_trans *tp, 502c29ce8f4SDarrick J. Wong struct xfs_buf *agbp) 503c29ce8f4SDarrick J. Wong { 504c29ce8f4SDarrick J. Wong struct xfs_agi *agi = agbp->b_addr; 505c29ce8f4SDarrick J. Wong struct xbtree_afakeroot *afake = cur->bc_ag.afake; 50611f74423SDarrick J. Wong int fields; 507c29ce8f4SDarrick J. Wong 508c29ce8f4SDarrick J. Wong ASSERT(cur->bc_flags & XFS_BTREE_STAGING); 509c29ce8f4SDarrick J. Wong 510c29ce8f4SDarrick J. Wong if (cur->bc_btnum == XFS_BTNUM_INO) { 51111f74423SDarrick J. Wong fields = XFS_AGI_ROOT | XFS_AGI_LEVEL; 512c29ce8f4SDarrick J. Wong agi->agi_root = cpu_to_be32(afake->af_root); 513c29ce8f4SDarrick J. Wong agi->agi_level = cpu_to_be32(afake->af_levels); 51411f74423SDarrick J. Wong if (xfs_sb_version_hasinobtcounts(&cur->bc_mp->m_sb)) { 51511f74423SDarrick J. Wong agi->agi_iblocks = cpu_to_be32(afake->af_blocks); 51611f74423SDarrick J. Wong fields |= XFS_AGI_IBLOCKS; 51711f74423SDarrick J. Wong } 51811f74423SDarrick J. Wong xfs_ialloc_log_agi(tp, agbp, fields); 519c29ce8f4SDarrick J. Wong xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_inobt_ops); 520c29ce8f4SDarrick J. Wong } else { 52111f74423SDarrick J. Wong fields = XFS_AGI_FREE_ROOT | XFS_AGI_FREE_LEVEL; 522c29ce8f4SDarrick J. Wong agi->agi_free_root = cpu_to_be32(afake->af_root); 523c29ce8f4SDarrick J. Wong agi->agi_free_level = cpu_to_be32(afake->af_levels); 52411f74423SDarrick J. Wong if (xfs_sb_version_hasinobtcounts(&cur->bc_mp->m_sb)) { 52511f74423SDarrick J. Wong agi->agi_fblocks = cpu_to_be32(afake->af_blocks); 52611f74423SDarrick J. Wong fields |= XFS_AGI_IBLOCKS; 52711f74423SDarrick J. Wong } 52811f74423SDarrick J. Wong xfs_ialloc_log_agi(tp, agbp, fields); 529c29ce8f4SDarrick J. Wong xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_finobt_ops); 530c29ce8f4SDarrick J. Wong } 531c29ce8f4SDarrick J. Wong } 532c29ce8f4SDarrick J. Wong 53330f712c9SDave Chinner /* 53430f712c9SDave Chinner * Calculate number of records in an inobt btree block. 53530f712c9SDave Chinner */ 53630f712c9SDave Chinner int 53730f712c9SDave Chinner xfs_inobt_maxrecs( 53830f712c9SDave Chinner struct xfs_mount *mp, 53930f712c9SDave Chinner int blocklen, 54030f712c9SDave Chinner int leaf) 54130f712c9SDave Chinner { 54230f712c9SDave Chinner blocklen -= XFS_INOBT_BLOCK_LEN(mp); 54330f712c9SDave Chinner 54430f712c9SDave Chinner if (leaf) 54530f712c9SDave Chinner return blocklen / sizeof(xfs_inobt_rec_t); 54630f712c9SDave Chinner return blocklen / (sizeof(xfs_inobt_key_t) + sizeof(xfs_inobt_ptr_t)); 54730f712c9SDave Chinner } 5484148c347SBrian Foster 5494148c347SBrian Foster /* 5504148c347SBrian Foster * Convert the inode record holemask to an inode allocation bitmap. The inode 5514148c347SBrian Foster * allocation bitmap is inode granularity and specifies whether an inode is 5524148c347SBrian Foster * physically allocated on disk (not whether the inode is considered allocated 5534148c347SBrian Foster * or free by the fs). 5544148c347SBrian Foster * 5554148c347SBrian Foster * A bit value of 1 means the inode is allocated, a value of 0 means it is free. 5564148c347SBrian Foster */ 5574148c347SBrian Foster uint64_t 5584148c347SBrian Foster xfs_inobt_irec_to_allocmask( 5594148c347SBrian Foster struct xfs_inobt_rec_incore *rec) 5604148c347SBrian Foster { 5614148c347SBrian Foster uint64_t bitmap = 0; 5624148c347SBrian Foster uint64_t inodespbit; 5634148c347SBrian Foster int nextbit; 5644148c347SBrian Foster uint allocbitmap; 5654148c347SBrian Foster 5664148c347SBrian Foster /* 5674148c347SBrian Foster * The holemask has 16-bits for a 64 inode record. Therefore each 5684148c347SBrian Foster * holemask bit represents multiple inodes. Create a mask of bits to set 5694148c347SBrian Foster * in the allocmask for each holemask bit. 5704148c347SBrian Foster */ 5714148c347SBrian Foster inodespbit = (1 << XFS_INODES_PER_HOLEMASK_BIT) - 1; 5724148c347SBrian Foster 5734148c347SBrian Foster /* 5744148c347SBrian Foster * Allocated inodes are represented by 0 bits in holemask. Invert the 0 5754148c347SBrian Foster * bits to 1 and convert to a uint so we can use xfs_next_bit(). Mask 5764148c347SBrian Foster * anything beyond the 16 holemask bits since this casts to a larger 5774148c347SBrian Foster * type. 5784148c347SBrian Foster */ 5794148c347SBrian Foster allocbitmap = ~rec->ir_holemask & ((1 << XFS_INOBT_HOLEMASK_BITS) - 1); 5804148c347SBrian Foster 5814148c347SBrian Foster /* 5824148c347SBrian Foster * allocbitmap is the inverted holemask so every set bit represents 5834148c347SBrian Foster * allocated inodes. To expand from 16-bit holemask granularity to 5844148c347SBrian Foster * 64-bit (e.g., bit-per-inode), set inodespbit bits in the target 5854148c347SBrian Foster * bitmap for every holemask bit. 5864148c347SBrian Foster */ 5874148c347SBrian Foster nextbit = xfs_next_bit(&allocbitmap, 1, 0); 5884148c347SBrian Foster while (nextbit != -1) { 5894148c347SBrian Foster ASSERT(nextbit < (sizeof(rec->ir_holemask) * NBBY)); 5904148c347SBrian Foster 5914148c347SBrian Foster bitmap |= (inodespbit << 5924148c347SBrian Foster (nextbit * XFS_INODES_PER_HOLEMASK_BIT)); 5934148c347SBrian Foster 5944148c347SBrian Foster nextbit = xfs_next_bit(&allocbitmap, 1, nextbit + 1); 5954148c347SBrian Foster } 5964148c347SBrian Foster 5974148c347SBrian Foster return bitmap; 5984148c347SBrian Foster } 59956d1115cSBrian Foster 60056d1115cSBrian Foster #if defined(DEBUG) || defined(XFS_WARN) 60156d1115cSBrian Foster /* 60256d1115cSBrian Foster * Verify that an in-core inode record has a valid inode count. 60356d1115cSBrian Foster */ 60456d1115cSBrian Foster int 60556d1115cSBrian Foster xfs_inobt_rec_check_count( 60656d1115cSBrian Foster struct xfs_mount *mp, 60756d1115cSBrian Foster struct xfs_inobt_rec_incore *rec) 60856d1115cSBrian Foster { 60956d1115cSBrian Foster int inocount = 0; 61056d1115cSBrian Foster int nextbit = 0; 61156d1115cSBrian Foster uint64_t allocbmap; 61256d1115cSBrian Foster int wordsz; 61356d1115cSBrian Foster 61456d1115cSBrian Foster wordsz = sizeof(allocbmap) / sizeof(unsigned int); 61556d1115cSBrian Foster allocbmap = xfs_inobt_irec_to_allocmask(rec); 61656d1115cSBrian Foster 61756d1115cSBrian Foster nextbit = xfs_next_bit((uint *) &allocbmap, wordsz, nextbit); 61856d1115cSBrian Foster while (nextbit != -1) { 61956d1115cSBrian Foster inocount++; 62056d1115cSBrian Foster nextbit = xfs_next_bit((uint *) &allocbmap, wordsz, 62156d1115cSBrian Foster nextbit + 1); 62256d1115cSBrian Foster } 62356d1115cSBrian Foster 62456d1115cSBrian Foster if (inocount != rec->ir_count) 62556d1115cSBrian Foster return -EFSCORRUPTED; 62656d1115cSBrian Foster 62756d1115cSBrian Foster return 0; 62856d1115cSBrian Foster } 62956d1115cSBrian Foster #endif /* DEBUG */ 63076d771b4SChristoph Hellwig 63176d771b4SChristoph Hellwig static xfs_extlen_t 63276d771b4SChristoph Hellwig xfs_inobt_max_size( 633c0876897SDave Chinner struct xfs_mount *mp, 634c0876897SDave Chinner xfs_agnumber_t agno) 63576d771b4SChristoph Hellwig { 636c0876897SDave Chinner xfs_agblock_t agblocks = xfs_ag_block_count(mp, agno); 637c0876897SDave Chinner 63876d771b4SChristoph Hellwig /* Bail out if we're uninitialized, which can happen in mkfs. */ 639ef325959SDarrick J. Wong if (M_IGEO(mp)->inobt_mxr[0] == 0) 64076d771b4SChristoph Hellwig return 0; 64176d771b4SChristoph Hellwig 6425cd213b0SDarrick J. Wong /* 6435cd213b0SDarrick J. Wong * The log is permanently allocated, so the space it occupies will 6445cd213b0SDarrick J. Wong * never be available for the kinds of things that would require btree 6455cd213b0SDarrick J. Wong * expansion. We therefore can pretend the space isn't there. 6465cd213b0SDarrick J. Wong */ 6475cd213b0SDarrick J. Wong if (mp->m_sb.sb_logstart && 6485cd213b0SDarrick J. Wong XFS_FSB_TO_AGNO(mp, mp->m_sb.sb_logstart) == agno) 6495cd213b0SDarrick J. Wong agblocks -= mp->m_sb.sb_logblocks; 6505cd213b0SDarrick J. Wong 651ef325959SDarrick J. Wong return xfs_btree_calc_size(M_IGEO(mp)->inobt_mnr, 652c0876897SDave Chinner (uint64_t)agblocks * mp->m_sb.sb_inopblock / 65376d771b4SChristoph Hellwig XFS_INODES_PER_CHUNK); 65476d771b4SChristoph Hellwig } 65576d771b4SChristoph Hellwig 656a211432cSDarrick J. Wong /* Read AGI and create inobt cursor. */ 657a211432cSDarrick J. Wong int 658a211432cSDarrick J. Wong xfs_inobt_cur( 659a211432cSDarrick J. Wong struct xfs_mount *mp, 660a211432cSDarrick J. Wong struct xfs_trans *tp, 6617b13c515SDave Chinner struct xfs_perag *pag, 662a211432cSDarrick J. Wong xfs_btnum_t which, 663a211432cSDarrick J. Wong struct xfs_btree_cur **curpp, 664a211432cSDarrick J. Wong struct xfs_buf **agi_bpp) 665a211432cSDarrick J. Wong { 666a211432cSDarrick J. Wong struct xfs_btree_cur *cur; 667a211432cSDarrick J. Wong int error; 668a211432cSDarrick J. Wong 669a211432cSDarrick J. Wong ASSERT(*agi_bpp == NULL); 670a211432cSDarrick J. Wong ASSERT(*curpp == NULL); 671a211432cSDarrick J. Wong 6727b13c515SDave Chinner error = xfs_ialloc_read_agi(mp, tp, pag->pag_agno, agi_bpp); 673a211432cSDarrick J. Wong if (error) 674a211432cSDarrick J. Wong return error; 675a211432cSDarrick J. Wong 6767b13c515SDave Chinner cur = xfs_inobt_init_cursor(mp, tp, *agi_bpp, pag, which); 677a211432cSDarrick J. Wong *curpp = cur; 678a211432cSDarrick J. Wong return 0; 679a211432cSDarrick J. Wong } 680a211432cSDarrick J. Wong 68176d771b4SChristoph Hellwig static int 68276d771b4SChristoph Hellwig xfs_inobt_count_blocks( 68376d771b4SChristoph Hellwig struct xfs_mount *mp, 684ebcbef3aSDarrick J. Wong struct xfs_trans *tp, 68530933120SDave Chinner struct xfs_perag *pag, 68676d771b4SChristoph Hellwig xfs_btnum_t btnum, 68776d771b4SChristoph Hellwig xfs_extlen_t *tree_blocks) 68876d771b4SChristoph Hellwig { 689a211432cSDarrick J. Wong struct xfs_buf *agbp = NULL; 690a211432cSDarrick J. Wong struct xfs_btree_cur *cur = NULL; 69176d771b4SChristoph Hellwig int error; 69276d771b4SChristoph Hellwig 6937b13c515SDave Chinner error = xfs_inobt_cur(mp, tp, pag, btnum, &cur, &agbp); 69476d771b4SChristoph Hellwig if (error) 69576d771b4SChristoph Hellwig return error; 69676d771b4SChristoph Hellwig 69776d771b4SChristoph Hellwig error = xfs_btree_count_blocks(cur, tree_blocks); 6980b04b6b8SDarrick J. Wong xfs_btree_del_cursor(cur, error); 699ebcbef3aSDarrick J. Wong xfs_trans_brelse(tp, agbp); 70076d771b4SChristoph Hellwig 70176d771b4SChristoph Hellwig return error; 70276d771b4SChristoph Hellwig } 70376d771b4SChristoph Hellwig 7041ac35f06SDarrick J. Wong /* Read finobt block count from AGI header. */ 7051ac35f06SDarrick J. Wong static int 7061ac35f06SDarrick J. Wong xfs_finobt_read_blocks( 7071ac35f06SDarrick J. Wong struct xfs_mount *mp, 7081ac35f06SDarrick J. Wong struct xfs_trans *tp, 70930933120SDave Chinner struct xfs_perag *pag, 7101ac35f06SDarrick J. Wong xfs_extlen_t *tree_blocks) 7111ac35f06SDarrick J. Wong { 7121ac35f06SDarrick J. Wong struct xfs_buf *agbp; 7131ac35f06SDarrick J. Wong struct xfs_agi *agi; 7141ac35f06SDarrick J. Wong int error; 7151ac35f06SDarrick J. Wong 71630933120SDave Chinner error = xfs_ialloc_read_agi(mp, tp, pag->pag_agno, &agbp); 7171ac35f06SDarrick J. Wong if (error) 7181ac35f06SDarrick J. Wong return error; 7191ac35f06SDarrick J. Wong 7201ac35f06SDarrick J. Wong agi = agbp->b_addr; 7211ac35f06SDarrick J. Wong *tree_blocks = be32_to_cpu(agi->agi_fblocks); 7221ac35f06SDarrick J. Wong xfs_trans_brelse(tp, agbp); 7231ac35f06SDarrick J. Wong return 0; 7241ac35f06SDarrick J. Wong } 7251ac35f06SDarrick J. Wong 72676d771b4SChristoph Hellwig /* 72776d771b4SChristoph Hellwig * Figure out how many blocks to reserve and how many are used by this btree. 72876d771b4SChristoph Hellwig */ 72976d771b4SChristoph Hellwig int 73076d771b4SChristoph Hellwig xfs_finobt_calc_reserves( 73176d771b4SChristoph Hellwig struct xfs_mount *mp, 732ebcbef3aSDarrick J. Wong struct xfs_trans *tp, 73330933120SDave Chinner struct xfs_perag *pag, 73476d771b4SChristoph Hellwig xfs_extlen_t *ask, 73576d771b4SChristoph Hellwig xfs_extlen_t *used) 73676d771b4SChristoph Hellwig { 73776d771b4SChristoph Hellwig xfs_extlen_t tree_len = 0; 73876d771b4SChristoph Hellwig int error; 73976d771b4SChristoph Hellwig 74076d771b4SChristoph Hellwig if (!xfs_sb_version_hasfinobt(&mp->m_sb)) 74176d771b4SChristoph Hellwig return 0; 74276d771b4SChristoph Hellwig 7431ac35f06SDarrick J. Wong if (xfs_sb_version_hasinobtcounts(&mp->m_sb)) 74430933120SDave Chinner error = xfs_finobt_read_blocks(mp, tp, pag, &tree_len); 7451ac35f06SDarrick J. Wong else 74630933120SDave Chinner error = xfs_inobt_count_blocks(mp, tp, pag, XFS_BTNUM_FINO, 7471ac35f06SDarrick J. Wong &tree_len); 74876d771b4SChristoph Hellwig if (error) 74976d771b4SChristoph Hellwig return error; 75076d771b4SChristoph Hellwig 75130933120SDave Chinner *ask += xfs_inobt_max_size(mp, pag->pag_agno); 75276d771b4SChristoph Hellwig *used += tree_len; 75376d771b4SChristoph Hellwig return 0; 75476d771b4SChristoph Hellwig } 75514861c47SDarrick J. Wong 75614861c47SDarrick J. Wong /* Calculate the inobt btree size for some records. */ 75714861c47SDarrick J. Wong xfs_extlen_t 75814861c47SDarrick J. Wong xfs_iallocbt_calc_size( 75914861c47SDarrick J. Wong struct xfs_mount *mp, 76014861c47SDarrick J. Wong unsigned long long len) 76114861c47SDarrick J. Wong { 762ef325959SDarrick J. Wong return xfs_btree_calc_size(M_IGEO(mp)->inobt_mnr, len); 76314861c47SDarrick J. Wong } 764