xref: /openbmc/linux/fs/xfs/libxfs/xfs_ialloc_btree.c (revision 8473fee3)
10b61f8a4SDave Chinner // SPDX-License-Identifier: GPL-2.0
230f712c9SDave Chinner /*
330f712c9SDave Chinner  * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc.
430f712c9SDave Chinner  * All Rights Reserved.
530f712c9SDave Chinner  */
630f712c9SDave Chinner #include "xfs.h"
730f712c9SDave Chinner #include "xfs_fs.h"
830f712c9SDave Chinner #include "xfs_shared.h"
930f712c9SDave Chinner #include "xfs_format.h"
1030f712c9SDave Chinner #include "xfs_log_format.h"
1130f712c9SDave Chinner #include "xfs_trans_resv.h"
1230f712c9SDave Chinner #include "xfs_bit.h"
1330f712c9SDave Chinner #include "xfs_mount.h"
1430f712c9SDave Chinner #include "xfs_inode.h"
1530f712c9SDave Chinner #include "xfs_btree.h"
1630f712c9SDave Chinner #include "xfs_ialloc.h"
1730f712c9SDave Chinner #include "xfs_ialloc_btree.h"
1830f712c9SDave Chinner #include "xfs_alloc.h"
1930f712c9SDave Chinner #include "xfs_error.h"
2030f712c9SDave Chinner #include "xfs_trace.h"
2130f712c9SDave Chinner #include "xfs_cksum.h"
2230f712c9SDave Chinner #include "xfs_trans.h"
23340785ccSDarrick J. Wong #include "xfs_rmap.h"
2430f712c9SDave Chinner 
2530f712c9SDave Chinner 
2630f712c9SDave Chinner STATIC int
2730f712c9SDave Chinner xfs_inobt_get_minrecs(
2830f712c9SDave Chinner 	struct xfs_btree_cur	*cur,
2930f712c9SDave Chinner 	int			level)
3030f712c9SDave Chinner {
3130f712c9SDave Chinner 	return cur->bc_mp->m_inobt_mnr[level != 0];
3230f712c9SDave Chinner }
3330f712c9SDave Chinner 
3430f712c9SDave Chinner STATIC struct xfs_btree_cur *
3530f712c9SDave Chinner xfs_inobt_dup_cursor(
3630f712c9SDave Chinner 	struct xfs_btree_cur	*cur)
3730f712c9SDave Chinner {
3830f712c9SDave Chinner 	return xfs_inobt_init_cursor(cur->bc_mp, cur->bc_tp,
3930f712c9SDave Chinner 			cur->bc_private.a.agbp, cur->bc_private.a.agno,
4030f712c9SDave Chinner 			cur->bc_btnum);
4130f712c9SDave Chinner }
4230f712c9SDave Chinner 
4330f712c9SDave Chinner STATIC void
4430f712c9SDave Chinner xfs_inobt_set_root(
4530f712c9SDave Chinner 	struct xfs_btree_cur	*cur,
4630f712c9SDave Chinner 	union xfs_btree_ptr	*nptr,
4730f712c9SDave Chinner 	int			inc)	/* level change */
4830f712c9SDave Chinner {
4930f712c9SDave Chinner 	struct xfs_buf		*agbp = cur->bc_private.a.agbp;
5030f712c9SDave Chinner 	struct xfs_agi		*agi = XFS_BUF_TO_AGI(agbp);
5130f712c9SDave Chinner 
5230f712c9SDave Chinner 	agi->agi_root = nptr->s;
5330f712c9SDave Chinner 	be32_add_cpu(&agi->agi_level, inc);
5430f712c9SDave Chinner 	xfs_ialloc_log_agi(cur->bc_tp, agbp, XFS_AGI_ROOT | XFS_AGI_LEVEL);
5530f712c9SDave Chinner }
5630f712c9SDave Chinner 
5730f712c9SDave Chinner STATIC void
5830f712c9SDave Chinner xfs_finobt_set_root(
5930f712c9SDave Chinner 	struct xfs_btree_cur	*cur,
6030f712c9SDave Chinner 	union xfs_btree_ptr	*nptr,
6130f712c9SDave Chinner 	int			inc)	/* level change */
6230f712c9SDave Chinner {
6330f712c9SDave Chinner 	struct xfs_buf		*agbp = cur->bc_private.a.agbp;
6430f712c9SDave Chinner 	struct xfs_agi		*agi = XFS_BUF_TO_AGI(agbp);
6530f712c9SDave Chinner 
6630f712c9SDave Chinner 	agi->agi_free_root = nptr->s;
6730f712c9SDave Chinner 	be32_add_cpu(&agi->agi_free_level, inc);
6830f712c9SDave Chinner 	xfs_ialloc_log_agi(cur->bc_tp, agbp,
6930f712c9SDave Chinner 			   XFS_AGI_FREE_ROOT | XFS_AGI_FREE_LEVEL);
7030f712c9SDave Chinner }
7130f712c9SDave Chinner 
7230f712c9SDave Chinner STATIC int
7376d771b4SChristoph Hellwig __xfs_inobt_alloc_block(
7430f712c9SDave Chinner 	struct xfs_btree_cur	*cur,
7530f712c9SDave Chinner 	union xfs_btree_ptr	*start,
7630f712c9SDave Chinner 	union xfs_btree_ptr	*new,
7776d771b4SChristoph Hellwig 	int			*stat,
7876d771b4SChristoph Hellwig 	enum xfs_ag_resv_type	resv)
7930f712c9SDave Chinner {
8030f712c9SDave Chinner 	xfs_alloc_arg_t		args;		/* block allocation args */
8130f712c9SDave Chinner 	int			error;		/* error return value */
8230f712c9SDave Chinner 	xfs_agblock_t		sbno = be32_to_cpu(start->s);
8330f712c9SDave Chinner 
8430f712c9SDave Chinner 	memset(&args, 0, sizeof(args));
8530f712c9SDave Chinner 	args.tp = cur->bc_tp;
8630f712c9SDave Chinner 	args.mp = cur->bc_mp;
877280fedaSDarrick J. Wong 	args.oinfo = XFS_RMAP_OINFO_INOBT;
8830f712c9SDave Chinner 	args.fsbno = XFS_AGB_TO_FSB(args.mp, cur->bc_private.a.agno, sbno);
8930f712c9SDave Chinner 	args.minlen = 1;
9030f712c9SDave Chinner 	args.maxlen = 1;
9130f712c9SDave Chinner 	args.prod = 1;
9230f712c9SDave Chinner 	args.type = XFS_ALLOCTYPE_NEAR_BNO;
9376d771b4SChristoph Hellwig 	args.resv = resv;
9430f712c9SDave Chinner 
9530f712c9SDave Chinner 	error = xfs_alloc_vextent(&args);
96e157ebdcSCarlos Maiolino 	if (error)
9730f712c9SDave Chinner 		return error;
98e157ebdcSCarlos Maiolino 
9930f712c9SDave Chinner 	if (args.fsbno == NULLFSBLOCK) {
10030f712c9SDave Chinner 		*stat = 0;
10130f712c9SDave Chinner 		return 0;
10230f712c9SDave Chinner 	}
10330f712c9SDave Chinner 	ASSERT(args.len == 1);
10430f712c9SDave Chinner 
10530f712c9SDave Chinner 	new->s = cpu_to_be32(XFS_FSB_TO_AGBNO(args.mp, args.fsbno));
10630f712c9SDave Chinner 	*stat = 1;
10730f712c9SDave Chinner 	return 0;
10830f712c9SDave Chinner }
10930f712c9SDave Chinner 
11030f712c9SDave Chinner STATIC int
11176d771b4SChristoph Hellwig xfs_inobt_alloc_block(
11276d771b4SChristoph Hellwig 	struct xfs_btree_cur	*cur,
11376d771b4SChristoph Hellwig 	union xfs_btree_ptr	*start,
11476d771b4SChristoph Hellwig 	union xfs_btree_ptr	*new,
11576d771b4SChristoph Hellwig 	int			*stat)
11676d771b4SChristoph Hellwig {
11776d771b4SChristoph Hellwig 	return __xfs_inobt_alloc_block(cur, start, new, stat, XFS_AG_RESV_NONE);
11876d771b4SChristoph Hellwig }
11976d771b4SChristoph Hellwig 
12076d771b4SChristoph Hellwig STATIC int
12176d771b4SChristoph Hellwig xfs_finobt_alloc_block(
12276d771b4SChristoph Hellwig 	struct xfs_btree_cur	*cur,
12376d771b4SChristoph Hellwig 	union xfs_btree_ptr	*start,
12476d771b4SChristoph Hellwig 	union xfs_btree_ptr	*new,
12576d771b4SChristoph Hellwig 	int			*stat)
12676d771b4SChristoph Hellwig {
127ad90bb58SBrian Foster 	if (cur->bc_mp->m_inotbt_nores)
128ad90bb58SBrian Foster 		return xfs_inobt_alloc_block(cur, start, new, stat);
12976d771b4SChristoph Hellwig 	return __xfs_inobt_alloc_block(cur, start, new, stat,
13076d771b4SChristoph Hellwig 			XFS_AG_RESV_METADATA);
13176d771b4SChristoph Hellwig }
13276d771b4SChristoph Hellwig 
13376d771b4SChristoph Hellwig STATIC int
134ad90bb58SBrian Foster __xfs_inobt_free_block(
135ad90bb58SBrian Foster 	struct xfs_btree_cur	*cur,
136ad90bb58SBrian Foster 	struct xfs_buf		*bp,
137ad90bb58SBrian Foster 	enum xfs_ag_resv_type	resv)
138ad90bb58SBrian Foster {
139ad90bb58SBrian Foster 	return xfs_free_extent(cur->bc_tp,
140ad90bb58SBrian Foster 			XFS_DADDR_TO_FSB(cur->bc_mp, XFS_BUF_ADDR(bp)), 1,
1417280fedaSDarrick J. Wong 			&XFS_RMAP_OINFO_INOBT, resv);
142ad90bb58SBrian Foster }
143ad90bb58SBrian Foster 
144ad90bb58SBrian Foster STATIC int
14530f712c9SDave Chinner xfs_inobt_free_block(
14630f712c9SDave Chinner 	struct xfs_btree_cur	*cur,
14730f712c9SDave Chinner 	struct xfs_buf		*bp)
14830f712c9SDave Chinner {
149ad90bb58SBrian Foster 	return __xfs_inobt_free_block(cur, bp, XFS_AG_RESV_NONE);
150ad90bb58SBrian Foster }
151340785ccSDarrick J. Wong 
152ad90bb58SBrian Foster STATIC int
153ad90bb58SBrian Foster xfs_finobt_free_block(
154ad90bb58SBrian Foster 	struct xfs_btree_cur	*cur,
155ad90bb58SBrian Foster 	struct xfs_buf		*bp)
156ad90bb58SBrian Foster {
157ad90bb58SBrian Foster 	if (cur->bc_mp->m_inotbt_nores)
158ad90bb58SBrian Foster 		return xfs_inobt_free_block(cur, bp);
159ad90bb58SBrian Foster 	return __xfs_inobt_free_block(cur, bp, XFS_AG_RESV_METADATA);
16030f712c9SDave Chinner }
16130f712c9SDave Chinner 
16230f712c9SDave Chinner STATIC int
16330f712c9SDave Chinner xfs_inobt_get_maxrecs(
16430f712c9SDave Chinner 	struct xfs_btree_cur	*cur,
16530f712c9SDave Chinner 	int			level)
16630f712c9SDave Chinner {
16730f712c9SDave Chinner 	return cur->bc_mp->m_inobt_mxr[level != 0];
16830f712c9SDave Chinner }
16930f712c9SDave Chinner 
17030f712c9SDave Chinner STATIC void
17130f712c9SDave Chinner xfs_inobt_init_key_from_rec(
17230f712c9SDave Chinner 	union xfs_btree_key	*key,
17330f712c9SDave Chinner 	union xfs_btree_rec	*rec)
17430f712c9SDave Chinner {
17530f712c9SDave Chinner 	key->inobt.ir_startino = rec->inobt.ir_startino;
17630f712c9SDave Chinner }
17730f712c9SDave Chinner 
17830f712c9SDave Chinner STATIC void
179118bb47eSDarrick J. Wong xfs_inobt_init_high_key_from_rec(
180118bb47eSDarrick J. Wong 	union xfs_btree_key	*key,
181118bb47eSDarrick J. Wong 	union xfs_btree_rec	*rec)
182118bb47eSDarrick J. Wong {
183118bb47eSDarrick J. Wong 	__u32			x;
184118bb47eSDarrick J. Wong 
185118bb47eSDarrick J. Wong 	x = be32_to_cpu(rec->inobt.ir_startino);
186118bb47eSDarrick J. Wong 	x += XFS_INODES_PER_CHUNK - 1;
187118bb47eSDarrick J. Wong 	key->inobt.ir_startino = cpu_to_be32(x);
188118bb47eSDarrick J. Wong }
189118bb47eSDarrick J. Wong 
190118bb47eSDarrick J. Wong STATIC void
19130f712c9SDave Chinner xfs_inobt_init_rec_from_cur(
19230f712c9SDave Chinner 	struct xfs_btree_cur	*cur,
19330f712c9SDave Chinner 	union xfs_btree_rec	*rec)
19430f712c9SDave Chinner {
19530f712c9SDave Chinner 	rec->inobt.ir_startino = cpu_to_be32(cur->bc_rec.i.ir_startino);
1965419040fSBrian Foster 	if (xfs_sb_version_hassparseinodes(&cur->bc_mp->m_sb)) {
1975419040fSBrian Foster 		rec->inobt.ir_u.sp.ir_holemask =
1985419040fSBrian Foster 					cpu_to_be16(cur->bc_rec.i.ir_holemask);
1995419040fSBrian Foster 		rec->inobt.ir_u.sp.ir_count = cur->bc_rec.i.ir_count;
2005419040fSBrian Foster 		rec->inobt.ir_u.sp.ir_freecount = cur->bc_rec.i.ir_freecount;
2015419040fSBrian Foster 	} else {
2025419040fSBrian Foster 		/* ir_holemask/ir_count not supported on-disk */
2035419040fSBrian Foster 		rec->inobt.ir_u.f.ir_freecount =
2045419040fSBrian Foster 					cpu_to_be32(cur->bc_rec.i.ir_freecount);
2055419040fSBrian Foster 	}
20630f712c9SDave Chinner 	rec->inobt.ir_free = cpu_to_be64(cur->bc_rec.i.ir_free);
20730f712c9SDave Chinner }
20830f712c9SDave Chinner 
20930f712c9SDave Chinner /*
21030f712c9SDave Chinner  * initial value of ptr for lookup
21130f712c9SDave Chinner  */
21230f712c9SDave Chinner STATIC void
21330f712c9SDave Chinner xfs_inobt_init_ptr_from_cur(
21430f712c9SDave Chinner 	struct xfs_btree_cur	*cur,
21530f712c9SDave Chinner 	union xfs_btree_ptr	*ptr)
21630f712c9SDave Chinner {
21730f712c9SDave Chinner 	struct xfs_agi		*agi = XFS_BUF_TO_AGI(cur->bc_private.a.agbp);
21830f712c9SDave Chinner 
21930f712c9SDave Chinner 	ASSERT(cur->bc_private.a.agno == be32_to_cpu(agi->agi_seqno));
22030f712c9SDave Chinner 
22130f712c9SDave Chinner 	ptr->s = agi->agi_root;
22230f712c9SDave Chinner }
22330f712c9SDave Chinner 
22430f712c9SDave Chinner STATIC void
22530f712c9SDave Chinner xfs_finobt_init_ptr_from_cur(
22630f712c9SDave Chinner 	struct xfs_btree_cur	*cur,
22730f712c9SDave Chinner 	union xfs_btree_ptr	*ptr)
22830f712c9SDave Chinner {
22930f712c9SDave Chinner 	struct xfs_agi		*agi = XFS_BUF_TO_AGI(cur->bc_private.a.agbp);
23030f712c9SDave Chinner 
23130f712c9SDave Chinner 	ASSERT(cur->bc_private.a.agno == be32_to_cpu(agi->agi_seqno));
23230f712c9SDave Chinner 	ptr->s = agi->agi_free_root;
23330f712c9SDave Chinner }
23430f712c9SDave Chinner 
235c8ce540dSDarrick J. Wong STATIC int64_t
23630f712c9SDave Chinner xfs_inobt_key_diff(
23730f712c9SDave Chinner 	struct xfs_btree_cur	*cur,
23830f712c9SDave Chinner 	union xfs_btree_key	*key)
23930f712c9SDave Chinner {
240c8ce540dSDarrick J. Wong 	return (int64_t)be32_to_cpu(key->inobt.ir_startino) -
24130f712c9SDave Chinner 			  cur->bc_rec.i.ir_startino;
24230f712c9SDave Chinner }
24330f712c9SDave Chinner 
244118bb47eSDarrick J. Wong STATIC int64_t
245118bb47eSDarrick J. Wong xfs_inobt_diff_two_keys(
246118bb47eSDarrick J. Wong 	struct xfs_btree_cur	*cur,
247118bb47eSDarrick J. Wong 	union xfs_btree_key	*k1,
248118bb47eSDarrick J. Wong 	union xfs_btree_key	*k2)
249118bb47eSDarrick J. Wong {
250118bb47eSDarrick J. Wong 	return (int64_t)be32_to_cpu(k1->inobt.ir_startino) -
251118bb47eSDarrick J. Wong 			  be32_to_cpu(k2->inobt.ir_startino);
252118bb47eSDarrick J. Wong }
253118bb47eSDarrick J. Wong 
254a6a781a5SDarrick J. Wong static xfs_failaddr_t
25530f712c9SDave Chinner xfs_inobt_verify(
25630f712c9SDave Chinner 	struct xfs_buf		*bp)
25730f712c9SDave Chinner {
25830f712c9SDave Chinner 	struct xfs_mount	*mp = bp->b_target->bt_mount;
25930f712c9SDave Chinner 	struct xfs_btree_block	*block = XFS_BUF_TO_BLOCK(bp);
260a6a781a5SDarrick J. Wong 	xfs_failaddr_t		fa;
26130f712c9SDave Chinner 	unsigned int		level;
26230f712c9SDave Chinner 
2638473fee3SBrian Foster 	if (!xfs_verify_magic(bp, block->bb_magic))
2648473fee3SBrian Foster 		return __this_address;
2658473fee3SBrian Foster 
26630f712c9SDave Chinner 	/*
26730f712c9SDave Chinner 	 * During growfs operations, we can't verify the exact owner as the
26830f712c9SDave Chinner 	 * perag is not fully initialised and hence not attached to the buffer.
26930f712c9SDave Chinner 	 *
27030f712c9SDave Chinner 	 * Similarly, during log recovery we will have a perag structure
27130f712c9SDave Chinner 	 * attached, but the agi information will not yet have been initialised
27230f712c9SDave Chinner 	 * from the on disk AGI. We don't currently use any of this information,
27330f712c9SDave Chinner 	 * but beware of the landmine (i.e. need to check pag->pagi_init) if we
27430f712c9SDave Chinner 	 * ever do.
27530f712c9SDave Chinner 	 */
2768473fee3SBrian Foster 	if (xfs_sb_version_hascrc(&mp->m_sb)) {
277a6a781a5SDarrick J. Wong 		fa = xfs_btree_sblock_v5hdr_verify(bp);
278a6a781a5SDarrick J. Wong 		if (fa)
279a6a781a5SDarrick J. Wong 			return fa;
28030f712c9SDave Chinner 	}
28130f712c9SDave Chinner 
282c5ab131bSDarrick J. Wong 	/* level verification */
28330f712c9SDave Chinner 	level = be16_to_cpu(block->bb_level);
28430f712c9SDave Chinner 	if (level >= mp->m_in_maxlevels)
285a6a781a5SDarrick J. Wong 		return __this_address;
28630f712c9SDave Chinner 
287c5ab131bSDarrick J. Wong 	return xfs_btree_sblock_verify(bp, mp->m_inobt_mxr[level != 0]);
28830f712c9SDave Chinner }
28930f712c9SDave Chinner 
29030f712c9SDave Chinner static void
29130f712c9SDave Chinner xfs_inobt_read_verify(
29230f712c9SDave Chinner 	struct xfs_buf	*bp)
29330f712c9SDave Chinner {
294bc1a09b8SDarrick J. Wong 	xfs_failaddr_t	fa;
295bc1a09b8SDarrick J. Wong 
29630f712c9SDave Chinner 	if (!xfs_btree_sblock_verify_crc(bp))
297bc1a09b8SDarrick J. Wong 		xfs_verifier_error(bp, -EFSBADCRC, __this_address);
298bc1a09b8SDarrick J. Wong 	else {
299bc1a09b8SDarrick J. Wong 		fa = xfs_inobt_verify(bp);
300bc1a09b8SDarrick J. Wong 		if (fa)
301bc1a09b8SDarrick J. Wong 			xfs_verifier_error(bp, -EFSCORRUPTED, fa);
302bc1a09b8SDarrick J. Wong 	}
30330f712c9SDave Chinner 
30431ca03c9SDarrick J. Wong 	if (bp->b_error)
30530f712c9SDave Chinner 		trace_xfs_btree_corrupt(bp, _RET_IP_);
30630f712c9SDave Chinner }
30730f712c9SDave Chinner 
30830f712c9SDave Chinner static void
30930f712c9SDave Chinner xfs_inobt_write_verify(
31030f712c9SDave Chinner 	struct xfs_buf	*bp)
31130f712c9SDave Chinner {
312bc1a09b8SDarrick J. Wong 	xfs_failaddr_t	fa;
313bc1a09b8SDarrick J. Wong 
314bc1a09b8SDarrick J. Wong 	fa = xfs_inobt_verify(bp);
315bc1a09b8SDarrick J. Wong 	if (fa) {
31630f712c9SDave Chinner 		trace_xfs_btree_corrupt(bp, _RET_IP_);
317bc1a09b8SDarrick J. Wong 		xfs_verifier_error(bp, -EFSCORRUPTED, fa);
31830f712c9SDave Chinner 		return;
31930f712c9SDave Chinner 	}
32030f712c9SDave Chinner 	xfs_btree_sblock_calc_crc(bp);
32130f712c9SDave Chinner 
32230f712c9SDave Chinner }
32330f712c9SDave Chinner 
32430f712c9SDave Chinner const struct xfs_buf_ops xfs_inobt_buf_ops = {
325233135b7SEric Sandeen 	.name = "xfs_inobt",
3268473fee3SBrian Foster 	.magic = { cpu_to_be32(XFS_IBT_MAGIC), cpu_to_be32(XFS_IBT_CRC_MAGIC) },
32730f712c9SDave Chinner 	.verify_read = xfs_inobt_read_verify,
32830f712c9SDave Chinner 	.verify_write = xfs_inobt_write_verify,
329b5572597SDarrick J. Wong 	.verify_struct = xfs_inobt_verify,
33030f712c9SDave Chinner };
33130f712c9SDave Chinner 
33201e68f40SBrian Foster const struct xfs_buf_ops xfs_finobt_buf_ops = {
33301e68f40SBrian Foster 	.name = "xfs_finobt",
3348473fee3SBrian Foster 	.magic = { cpu_to_be32(XFS_FIBT_MAGIC),
3358473fee3SBrian Foster 		   cpu_to_be32(XFS_FIBT_CRC_MAGIC) },
33601e68f40SBrian Foster 	.verify_read = xfs_inobt_read_verify,
33701e68f40SBrian Foster 	.verify_write = xfs_inobt_write_verify,
33801e68f40SBrian Foster 	.verify_struct = xfs_inobt_verify,
33901e68f40SBrian Foster };
34001e68f40SBrian Foster 
34130f712c9SDave Chinner STATIC int
34230f712c9SDave Chinner xfs_inobt_keys_inorder(
34330f712c9SDave Chinner 	struct xfs_btree_cur	*cur,
34430f712c9SDave Chinner 	union xfs_btree_key	*k1,
34530f712c9SDave Chinner 	union xfs_btree_key	*k2)
34630f712c9SDave Chinner {
34730f712c9SDave Chinner 	return be32_to_cpu(k1->inobt.ir_startino) <
34830f712c9SDave Chinner 		be32_to_cpu(k2->inobt.ir_startino);
34930f712c9SDave Chinner }
35030f712c9SDave Chinner 
35130f712c9SDave Chinner STATIC int
35230f712c9SDave Chinner xfs_inobt_recs_inorder(
35330f712c9SDave Chinner 	struct xfs_btree_cur	*cur,
35430f712c9SDave Chinner 	union xfs_btree_rec	*r1,
35530f712c9SDave Chinner 	union xfs_btree_rec	*r2)
35630f712c9SDave Chinner {
35730f712c9SDave Chinner 	return be32_to_cpu(r1->inobt.ir_startino) + XFS_INODES_PER_CHUNK <=
35830f712c9SDave Chinner 		be32_to_cpu(r2->inobt.ir_startino);
35930f712c9SDave Chinner }
36030f712c9SDave Chinner 
36130f712c9SDave Chinner static const struct xfs_btree_ops xfs_inobt_ops = {
36230f712c9SDave Chinner 	.rec_len		= sizeof(xfs_inobt_rec_t),
36330f712c9SDave Chinner 	.key_len		= sizeof(xfs_inobt_key_t),
36430f712c9SDave Chinner 
36530f712c9SDave Chinner 	.dup_cursor		= xfs_inobt_dup_cursor,
36630f712c9SDave Chinner 	.set_root		= xfs_inobt_set_root,
36730f712c9SDave Chinner 	.alloc_block		= xfs_inobt_alloc_block,
36830f712c9SDave Chinner 	.free_block		= xfs_inobt_free_block,
36930f712c9SDave Chinner 	.get_minrecs		= xfs_inobt_get_minrecs,
37030f712c9SDave Chinner 	.get_maxrecs		= xfs_inobt_get_maxrecs,
37130f712c9SDave Chinner 	.init_key_from_rec	= xfs_inobt_init_key_from_rec,
372118bb47eSDarrick J. Wong 	.init_high_key_from_rec	= xfs_inobt_init_high_key_from_rec,
37330f712c9SDave Chinner 	.init_rec_from_cur	= xfs_inobt_init_rec_from_cur,
37430f712c9SDave Chinner 	.init_ptr_from_cur	= xfs_inobt_init_ptr_from_cur,
37530f712c9SDave Chinner 	.key_diff		= xfs_inobt_key_diff,
37630f712c9SDave Chinner 	.buf_ops		= &xfs_inobt_buf_ops,
377118bb47eSDarrick J. Wong 	.diff_two_keys		= xfs_inobt_diff_two_keys,
37830f712c9SDave Chinner 	.keys_inorder		= xfs_inobt_keys_inorder,
37930f712c9SDave Chinner 	.recs_inorder		= xfs_inobt_recs_inorder,
38030f712c9SDave Chinner };
38130f712c9SDave Chinner 
38230f712c9SDave Chinner static const struct xfs_btree_ops xfs_finobt_ops = {
38330f712c9SDave Chinner 	.rec_len		= sizeof(xfs_inobt_rec_t),
38430f712c9SDave Chinner 	.key_len		= sizeof(xfs_inobt_key_t),
38530f712c9SDave Chinner 
38630f712c9SDave Chinner 	.dup_cursor		= xfs_inobt_dup_cursor,
38730f712c9SDave Chinner 	.set_root		= xfs_finobt_set_root,
38876d771b4SChristoph Hellwig 	.alloc_block		= xfs_finobt_alloc_block,
389ad90bb58SBrian Foster 	.free_block		= xfs_finobt_free_block,
39030f712c9SDave Chinner 	.get_minrecs		= xfs_inobt_get_minrecs,
39130f712c9SDave Chinner 	.get_maxrecs		= xfs_inobt_get_maxrecs,
39230f712c9SDave Chinner 	.init_key_from_rec	= xfs_inobt_init_key_from_rec,
393118bb47eSDarrick J. Wong 	.init_high_key_from_rec	= xfs_inobt_init_high_key_from_rec,
39430f712c9SDave Chinner 	.init_rec_from_cur	= xfs_inobt_init_rec_from_cur,
39530f712c9SDave Chinner 	.init_ptr_from_cur	= xfs_finobt_init_ptr_from_cur,
39630f712c9SDave Chinner 	.key_diff		= xfs_inobt_key_diff,
39701e68f40SBrian Foster 	.buf_ops		= &xfs_finobt_buf_ops,
398118bb47eSDarrick J. Wong 	.diff_two_keys		= xfs_inobt_diff_two_keys,
39930f712c9SDave Chinner 	.keys_inorder		= xfs_inobt_keys_inorder,
40030f712c9SDave Chinner 	.recs_inorder		= xfs_inobt_recs_inorder,
40130f712c9SDave Chinner };
40230f712c9SDave Chinner 
40330f712c9SDave Chinner /*
40430f712c9SDave Chinner  * Allocate a new inode btree cursor.
40530f712c9SDave Chinner  */
40630f712c9SDave Chinner struct xfs_btree_cur *				/* new inode btree cursor */
40730f712c9SDave Chinner xfs_inobt_init_cursor(
40830f712c9SDave Chinner 	struct xfs_mount	*mp,		/* file system mount point */
40930f712c9SDave Chinner 	struct xfs_trans	*tp,		/* transaction pointer */
41030f712c9SDave Chinner 	struct xfs_buf		*agbp,		/* buffer for agi structure */
41130f712c9SDave Chinner 	xfs_agnumber_t		agno,		/* allocation group number */
41230f712c9SDave Chinner 	xfs_btnum_t		btnum)		/* ialloc or free ino btree */
41330f712c9SDave Chinner {
41430f712c9SDave Chinner 	struct xfs_agi		*agi = XFS_BUF_TO_AGI(agbp);
41530f712c9SDave Chinner 	struct xfs_btree_cur	*cur;
41630f712c9SDave Chinner 
417b24a978cSDarrick J. Wong 	cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_NOFS);
41830f712c9SDave Chinner 
41930f712c9SDave Chinner 	cur->bc_tp = tp;
42030f712c9SDave Chinner 	cur->bc_mp = mp;
42130f712c9SDave Chinner 	cur->bc_btnum = btnum;
42230f712c9SDave Chinner 	if (btnum == XFS_BTNUM_INO) {
42330f712c9SDave Chinner 		cur->bc_nlevels = be32_to_cpu(agi->agi_level);
42430f712c9SDave Chinner 		cur->bc_ops = &xfs_inobt_ops;
42511ef38afSDave Chinner 		cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_ibt_2);
42630f712c9SDave Chinner 	} else {
42730f712c9SDave Chinner 		cur->bc_nlevels = be32_to_cpu(agi->agi_free_level);
42830f712c9SDave Chinner 		cur->bc_ops = &xfs_finobt_ops;
42911ef38afSDave Chinner 		cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_fibt_2);
43030f712c9SDave Chinner 	}
43130f712c9SDave Chinner 
43230f712c9SDave Chinner 	cur->bc_blocklog = mp->m_sb.sb_blocklog;
43330f712c9SDave Chinner 
43430f712c9SDave Chinner 	if (xfs_sb_version_hascrc(&mp->m_sb))
43530f712c9SDave Chinner 		cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
43630f712c9SDave Chinner 
43730f712c9SDave Chinner 	cur->bc_private.a.agbp = agbp;
43830f712c9SDave Chinner 	cur->bc_private.a.agno = agno;
43930f712c9SDave Chinner 
44030f712c9SDave Chinner 	return cur;
44130f712c9SDave Chinner }
44230f712c9SDave Chinner 
44330f712c9SDave Chinner /*
44430f712c9SDave Chinner  * Calculate number of records in an inobt btree block.
44530f712c9SDave Chinner  */
44630f712c9SDave Chinner int
44730f712c9SDave Chinner xfs_inobt_maxrecs(
44830f712c9SDave Chinner 	struct xfs_mount	*mp,
44930f712c9SDave Chinner 	int			blocklen,
45030f712c9SDave Chinner 	int			leaf)
45130f712c9SDave Chinner {
45230f712c9SDave Chinner 	blocklen -= XFS_INOBT_BLOCK_LEN(mp);
45330f712c9SDave Chinner 
45430f712c9SDave Chinner 	if (leaf)
45530f712c9SDave Chinner 		return blocklen / sizeof(xfs_inobt_rec_t);
45630f712c9SDave Chinner 	return blocklen / (sizeof(xfs_inobt_key_t) + sizeof(xfs_inobt_ptr_t));
45730f712c9SDave Chinner }
4584148c347SBrian Foster 
4594148c347SBrian Foster /*
4604148c347SBrian Foster  * Convert the inode record holemask to an inode allocation bitmap. The inode
4614148c347SBrian Foster  * allocation bitmap is inode granularity and specifies whether an inode is
4624148c347SBrian Foster  * physically allocated on disk (not whether the inode is considered allocated
4634148c347SBrian Foster  * or free by the fs).
4644148c347SBrian Foster  *
4654148c347SBrian Foster  * A bit value of 1 means the inode is allocated, a value of 0 means it is free.
4664148c347SBrian Foster  */
4674148c347SBrian Foster uint64_t
4684148c347SBrian Foster xfs_inobt_irec_to_allocmask(
4694148c347SBrian Foster 	struct xfs_inobt_rec_incore	*rec)
4704148c347SBrian Foster {
4714148c347SBrian Foster 	uint64_t			bitmap = 0;
4724148c347SBrian Foster 	uint64_t			inodespbit;
4734148c347SBrian Foster 	int				nextbit;
4744148c347SBrian Foster 	uint				allocbitmap;
4754148c347SBrian Foster 
4764148c347SBrian Foster 	/*
4774148c347SBrian Foster 	 * The holemask has 16-bits for a 64 inode record. Therefore each
4784148c347SBrian Foster 	 * holemask bit represents multiple inodes. Create a mask of bits to set
4794148c347SBrian Foster 	 * in the allocmask for each holemask bit.
4804148c347SBrian Foster 	 */
4814148c347SBrian Foster 	inodespbit = (1 << XFS_INODES_PER_HOLEMASK_BIT) - 1;
4824148c347SBrian Foster 
4834148c347SBrian Foster 	/*
4844148c347SBrian Foster 	 * Allocated inodes are represented by 0 bits in holemask. Invert the 0
4854148c347SBrian Foster 	 * bits to 1 and convert to a uint so we can use xfs_next_bit(). Mask
4864148c347SBrian Foster 	 * anything beyond the 16 holemask bits since this casts to a larger
4874148c347SBrian Foster 	 * type.
4884148c347SBrian Foster 	 */
4894148c347SBrian Foster 	allocbitmap = ~rec->ir_holemask & ((1 << XFS_INOBT_HOLEMASK_BITS) - 1);
4904148c347SBrian Foster 
4914148c347SBrian Foster 	/*
4924148c347SBrian Foster 	 * allocbitmap is the inverted holemask so every set bit represents
4934148c347SBrian Foster 	 * allocated inodes. To expand from 16-bit holemask granularity to
4944148c347SBrian Foster 	 * 64-bit (e.g., bit-per-inode), set inodespbit bits in the target
4954148c347SBrian Foster 	 * bitmap for every holemask bit.
4964148c347SBrian Foster 	 */
4974148c347SBrian Foster 	nextbit = xfs_next_bit(&allocbitmap, 1, 0);
4984148c347SBrian Foster 	while (nextbit != -1) {
4994148c347SBrian Foster 		ASSERT(nextbit < (sizeof(rec->ir_holemask) * NBBY));
5004148c347SBrian Foster 
5014148c347SBrian Foster 		bitmap |= (inodespbit <<
5024148c347SBrian Foster 			   (nextbit * XFS_INODES_PER_HOLEMASK_BIT));
5034148c347SBrian Foster 
5044148c347SBrian Foster 		nextbit = xfs_next_bit(&allocbitmap, 1, nextbit + 1);
5054148c347SBrian Foster 	}
5064148c347SBrian Foster 
5074148c347SBrian Foster 	return bitmap;
5084148c347SBrian Foster }
50956d1115cSBrian Foster 
51056d1115cSBrian Foster #if defined(DEBUG) || defined(XFS_WARN)
51156d1115cSBrian Foster /*
51256d1115cSBrian Foster  * Verify that an in-core inode record has a valid inode count.
51356d1115cSBrian Foster  */
51456d1115cSBrian Foster int
51556d1115cSBrian Foster xfs_inobt_rec_check_count(
51656d1115cSBrian Foster 	struct xfs_mount		*mp,
51756d1115cSBrian Foster 	struct xfs_inobt_rec_incore	*rec)
51856d1115cSBrian Foster {
51956d1115cSBrian Foster 	int				inocount = 0;
52056d1115cSBrian Foster 	int				nextbit = 0;
52156d1115cSBrian Foster 	uint64_t			allocbmap;
52256d1115cSBrian Foster 	int				wordsz;
52356d1115cSBrian Foster 
52456d1115cSBrian Foster 	wordsz = sizeof(allocbmap) / sizeof(unsigned int);
52556d1115cSBrian Foster 	allocbmap = xfs_inobt_irec_to_allocmask(rec);
52656d1115cSBrian Foster 
52756d1115cSBrian Foster 	nextbit = xfs_next_bit((uint *) &allocbmap, wordsz, nextbit);
52856d1115cSBrian Foster 	while (nextbit != -1) {
52956d1115cSBrian Foster 		inocount++;
53056d1115cSBrian Foster 		nextbit = xfs_next_bit((uint *) &allocbmap, wordsz,
53156d1115cSBrian Foster 				       nextbit + 1);
53256d1115cSBrian Foster 	}
53356d1115cSBrian Foster 
53456d1115cSBrian Foster 	if (inocount != rec->ir_count)
53556d1115cSBrian Foster 		return -EFSCORRUPTED;
53656d1115cSBrian Foster 
53756d1115cSBrian Foster 	return 0;
53856d1115cSBrian Foster }
53956d1115cSBrian Foster #endif	/* DEBUG */
54076d771b4SChristoph Hellwig 
54176d771b4SChristoph Hellwig static xfs_extlen_t
54276d771b4SChristoph Hellwig xfs_inobt_max_size(
543c0876897SDave Chinner 	struct xfs_mount	*mp,
544c0876897SDave Chinner 	xfs_agnumber_t		agno)
54576d771b4SChristoph Hellwig {
546c0876897SDave Chinner 	xfs_agblock_t		agblocks = xfs_ag_block_count(mp, agno);
547c0876897SDave Chinner 
54876d771b4SChristoph Hellwig 	/* Bail out if we're uninitialized, which can happen in mkfs. */
54976d771b4SChristoph Hellwig 	if (mp->m_inobt_mxr[0] == 0)
55076d771b4SChristoph Hellwig 		return 0;
55176d771b4SChristoph Hellwig 
552a1f69417SEric Sandeen 	return xfs_btree_calc_size(mp->m_inobt_mnr,
553c0876897SDave Chinner 				(uint64_t)agblocks * mp->m_sb.sb_inopblock /
55476d771b4SChristoph Hellwig 					XFS_INODES_PER_CHUNK);
55576d771b4SChristoph Hellwig }
55676d771b4SChristoph Hellwig 
55776d771b4SChristoph Hellwig static int
55876d771b4SChristoph Hellwig xfs_inobt_count_blocks(
55976d771b4SChristoph Hellwig 	struct xfs_mount	*mp,
560ebcbef3aSDarrick J. Wong 	struct xfs_trans	*tp,
56176d771b4SChristoph Hellwig 	xfs_agnumber_t		agno,
56276d771b4SChristoph Hellwig 	xfs_btnum_t		btnum,
56376d771b4SChristoph Hellwig 	xfs_extlen_t		*tree_blocks)
56476d771b4SChristoph Hellwig {
56576d771b4SChristoph Hellwig 	struct xfs_buf		*agbp;
56676d771b4SChristoph Hellwig 	struct xfs_btree_cur	*cur;
56776d771b4SChristoph Hellwig 	int			error;
56876d771b4SChristoph Hellwig 
569ebcbef3aSDarrick J. Wong 	error = xfs_ialloc_read_agi(mp, tp, agno, &agbp);
57076d771b4SChristoph Hellwig 	if (error)
57176d771b4SChristoph Hellwig 		return error;
57276d771b4SChristoph Hellwig 
573ebcbef3aSDarrick J. Wong 	cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, btnum);
57476d771b4SChristoph Hellwig 	error = xfs_btree_count_blocks(cur, tree_blocks);
5750b04b6b8SDarrick J. Wong 	xfs_btree_del_cursor(cur, error);
576ebcbef3aSDarrick J. Wong 	xfs_trans_brelse(tp, agbp);
57776d771b4SChristoph Hellwig 
57876d771b4SChristoph Hellwig 	return error;
57976d771b4SChristoph Hellwig }
58076d771b4SChristoph Hellwig 
58176d771b4SChristoph Hellwig /*
58276d771b4SChristoph Hellwig  * Figure out how many blocks to reserve and how many are used by this btree.
58376d771b4SChristoph Hellwig  */
58476d771b4SChristoph Hellwig int
58576d771b4SChristoph Hellwig xfs_finobt_calc_reserves(
58676d771b4SChristoph Hellwig 	struct xfs_mount	*mp,
587ebcbef3aSDarrick J. Wong 	struct xfs_trans	*tp,
58876d771b4SChristoph Hellwig 	xfs_agnumber_t		agno,
58976d771b4SChristoph Hellwig 	xfs_extlen_t		*ask,
59076d771b4SChristoph Hellwig 	xfs_extlen_t		*used)
59176d771b4SChristoph Hellwig {
59276d771b4SChristoph Hellwig 	xfs_extlen_t		tree_len = 0;
59376d771b4SChristoph Hellwig 	int			error;
59476d771b4SChristoph Hellwig 
59576d771b4SChristoph Hellwig 	if (!xfs_sb_version_hasfinobt(&mp->m_sb))
59676d771b4SChristoph Hellwig 		return 0;
59776d771b4SChristoph Hellwig 
598ebcbef3aSDarrick J. Wong 	error = xfs_inobt_count_blocks(mp, tp, agno, XFS_BTNUM_FINO, &tree_len);
59976d771b4SChristoph Hellwig 	if (error)
60076d771b4SChristoph Hellwig 		return error;
60176d771b4SChristoph Hellwig 
602c0876897SDave Chinner 	*ask += xfs_inobt_max_size(mp, agno);
60376d771b4SChristoph Hellwig 	*used += tree_len;
60476d771b4SChristoph Hellwig 	return 0;
60576d771b4SChristoph Hellwig }
60614861c47SDarrick J. Wong 
60714861c47SDarrick J. Wong /* Calculate the inobt btree size for some records. */
60814861c47SDarrick J. Wong xfs_extlen_t
60914861c47SDarrick J. Wong xfs_iallocbt_calc_size(
61014861c47SDarrick J. Wong 	struct xfs_mount	*mp,
61114861c47SDarrick J. Wong 	unsigned long long	len)
61214861c47SDarrick J. Wong {
61314861c47SDarrick J. Wong 	return xfs_btree_calc_size(mp->m_inobt_mnr, len);
61414861c47SDarrick J. Wong }
615