xref: /openbmc/linux/fs/xfs/libxfs/xfs_rmap_btree.c (revision 6abc7aef)
10b61f8a4SDave Chinner // SPDX-License-Identifier: GPL-2.0
2035e00acSDarrick J. Wong /*
3035e00acSDarrick J. Wong  * Copyright (c) 2014 Red Hat, Inc.
4035e00acSDarrick J. Wong  * All Rights Reserved.
5035e00acSDarrick J. Wong  */
6035e00acSDarrick J. Wong #include "xfs.h"
7035e00acSDarrick J. Wong #include "xfs_fs.h"
8035e00acSDarrick J. Wong #include "xfs_shared.h"
9035e00acSDarrick J. Wong #include "xfs_format.h"
10035e00acSDarrick J. Wong #include "xfs_log_format.h"
11035e00acSDarrick J. Wong #include "xfs_trans_resv.h"
12035e00acSDarrick J. Wong #include "xfs_mount.h"
13035e00acSDarrick J. Wong #include "xfs_trans.h"
14035e00acSDarrick J. Wong #include "xfs_alloc.h"
15035e00acSDarrick J. Wong #include "xfs_btree.h"
1659d67712SDarrick J. Wong #include "xfs_btree_staging.h"
174b8ed677SDarrick J. Wong #include "xfs_rmap.h"
18035e00acSDarrick J. Wong #include "xfs_rmap_btree.h"
19035e00acSDarrick J. Wong #include "xfs_trace.h"
20035e00acSDarrick J. Wong #include "xfs_error.h"
21035e00acSDarrick J. Wong #include "xfs_extent_busy.h"
229bbafc71SDave Chinner #include "xfs_ag.h"
2384d69619SDarrick J. Wong #include "xfs_ag_resv.h"
24035e00acSDarrick J. Wong 
25e7720afaSDarrick J. Wong static struct kmem_cache	*xfs_rmapbt_cur_cache;
269fa47bdcSDarrick J. Wong 
274b8ed677SDarrick J. Wong /*
284b8ed677SDarrick J. Wong  * Reverse map btree.
294b8ed677SDarrick J. Wong  *
304b8ed677SDarrick J. Wong  * This is a per-ag tree used to track the owner(s) of a given extent. With
314b8ed677SDarrick J. Wong  * reflink it is possible for there to be multiple owners, which is a departure
324b8ed677SDarrick J. Wong  * from classic XFS. Owner records for data extents are inserted when the
334b8ed677SDarrick J. Wong  * extent is mapped and removed when an extent is unmapped.  Owner records for
344b8ed677SDarrick J. Wong  * all other block types (i.e. metadata) are inserted when an extent is
354b8ed677SDarrick J. Wong  * allocated and removed when an extent is freed. There can only be one owner
364b8ed677SDarrick J. Wong  * of a metadata extent, usually an inode or some other metadata structure like
374b8ed677SDarrick J. Wong  * an AG btree.
384b8ed677SDarrick J. Wong  *
394b8ed677SDarrick J. Wong  * The rmap btree is part of the free space management, so blocks for the tree
404b8ed677SDarrick J. Wong  * are sourced from the agfl. Hence we need transaction reservation support for
414b8ed677SDarrick J. Wong  * this tree so that the freelist is always large enough. This also impacts on
424b8ed677SDarrick J. Wong  * the minimum space we need to leave free in the AG.
434b8ed677SDarrick J. Wong  *
444b8ed677SDarrick J. Wong  * The tree is ordered by [ag block, owner, offset]. This is a large key size,
454b8ed677SDarrick J. Wong  * but it is the only way to enforce unique keys when a block can be owned by
464b8ed677SDarrick J. Wong  * multiple files at any offset. There's no need to order/search by extent
474b8ed677SDarrick J. Wong  * size for online updating/management of the tree. It is intended that most
484b8ed677SDarrick J. Wong  * reverse lookups will be to find the owner(s) of a particular block, or to
494b8ed677SDarrick J. Wong  * try to recover tree and file data from corrupt primary metadata.
504b8ed677SDarrick J. Wong  */
514b8ed677SDarrick J. Wong 
52035e00acSDarrick J. Wong static struct xfs_btree_cur *
53035e00acSDarrick J. Wong xfs_rmapbt_dup_cursor(
54035e00acSDarrick J. Wong 	struct xfs_btree_cur	*cur)
55035e00acSDarrick J. Wong {
56035e00acSDarrick J. Wong 	return xfs_rmapbt_init_cursor(cur->bc_mp, cur->bc_tp,
57fa9c3c19SDave Chinner 				cur->bc_ag.agbp, cur->bc_ag.pag);
58035e00acSDarrick J. Wong }
59035e00acSDarrick J. Wong 
604b8ed677SDarrick J. Wong STATIC void
614b8ed677SDarrick J. Wong xfs_rmapbt_set_root(
624b8ed677SDarrick J. Wong 	struct xfs_btree_cur		*cur,
63b5a6e5feSDarrick J. Wong 	const union xfs_btree_ptr	*ptr,
644b8ed677SDarrick J. Wong 	int				inc)
654b8ed677SDarrick J. Wong {
66576af732SDave Chinner 	struct xfs_buf		*agbp = cur->bc_ag.agbp;
679798f615SChristoph Hellwig 	struct xfs_agf		*agf = agbp->b_addr;
684b8ed677SDarrick J. Wong 	int			btnum = cur->bc_btnum;
694b8ed677SDarrick J. Wong 
704b8ed677SDarrick J. Wong 	ASSERT(ptr->s != 0);
714b8ed677SDarrick J. Wong 
724b8ed677SDarrick J. Wong 	agf->agf_roots[btnum] = ptr->s;
734b8ed677SDarrick J. Wong 	be32_add_cpu(&agf->agf_levels[btnum], inc);
74fa9c3c19SDave Chinner 	cur->bc_ag.pag->pagf_levels[btnum] += inc;
754b8ed677SDarrick J. Wong 
764b8ed677SDarrick J. Wong 	xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS);
774b8ed677SDarrick J. Wong }
784b8ed677SDarrick J. Wong 
794b8ed677SDarrick J. Wong STATIC int
804b8ed677SDarrick J. Wong xfs_rmapbt_alloc_block(
814b8ed677SDarrick J. Wong 	struct xfs_btree_cur		*cur,
82deb06b9aSDarrick J. Wong 	const union xfs_btree_ptr	*start,
834b8ed677SDarrick J. Wong 	union xfs_btree_ptr		*new,
844b8ed677SDarrick J. Wong 	int				*stat)
854b8ed677SDarrick J. Wong {
86576af732SDave Chinner 	struct xfs_buf		*agbp = cur->bc_ag.agbp;
879798f615SChristoph Hellwig 	struct xfs_agf		*agf = agbp->b_addr;
88fa9c3c19SDave Chinner 	struct xfs_perag	*pag = cur->bc_ag.pag;
894b8ed677SDarrick J. Wong 	int			error;
904b8ed677SDarrick J. Wong 	xfs_agblock_t		bno;
914b8ed677SDarrick J. Wong 
924b8ed677SDarrick J. Wong 	/* Allocate the new block from the freelist. If we can't, give up.  */
9349f0d84eSDave Chinner 	error = xfs_alloc_get_freelist(pag, cur->bc_tp, cur->bc_ag.agbp,
944b8ed677SDarrick J. Wong 				       &bno, 1);
95e157ebdcSCarlos Maiolino 	if (error)
964b8ed677SDarrick J. Wong 		return error;
974b8ed677SDarrick J. Wong 
98fa9c3c19SDave Chinner 	trace_xfs_rmapbt_alloc_block(cur->bc_mp, pag->pag_agno, bno, 1);
994b8ed677SDarrick J. Wong 	if (bno == NULLAGBLOCK) {
1004b8ed677SDarrick J. Wong 		*stat = 0;
1014b8ed677SDarrick J. Wong 		return 0;
1024b8ed677SDarrick J. Wong 	}
1034b8ed677SDarrick J. Wong 
104fa9c3c19SDave Chinner 	xfs_extent_busy_reuse(cur->bc_mp, pag, bno, 1, false);
1054b8ed677SDarrick J. Wong 
1064b8ed677SDarrick J. Wong 	new->s = cpu_to_be32(bno);
107f32866fdSDarrick J. Wong 	be32_add_cpu(&agf->agf_rmap_blocks, 1);
108f32866fdSDarrick J. Wong 	xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_RMAP_BLOCKS);
1094b8ed677SDarrick J. Wong 
110fa9c3c19SDave Chinner 	xfs_ag_resv_rmapbt_alloc(cur->bc_mp, pag->pag_agno);
1110ab32086SBrian Foster 
1124b8ed677SDarrick J. Wong 	*stat = 1;
1134b8ed677SDarrick J. Wong 	return 0;
1144b8ed677SDarrick J. Wong }
1154b8ed677SDarrick J. Wong 
1164b8ed677SDarrick J. Wong STATIC int
1174b8ed677SDarrick J. Wong xfs_rmapbt_free_block(
1184b8ed677SDarrick J. Wong 	struct xfs_btree_cur	*cur,
1194b8ed677SDarrick J. Wong 	struct xfs_buf		*bp)
1204b8ed677SDarrick J. Wong {
121576af732SDave Chinner 	struct xfs_buf		*agbp = cur->bc_ag.agbp;
1229798f615SChristoph Hellwig 	struct xfs_agf		*agf = agbp->b_addr;
123fa9c3c19SDave Chinner 	struct xfs_perag	*pag = cur->bc_ag.pag;
1244b8ed677SDarrick J. Wong 	xfs_agblock_t		bno;
1254b8ed677SDarrick J. Wong 	int			error;
1264b8ed677SDarrick J. Wong 
12704fcad80SDave Chinner 	bno = xfs_daddr_to_agbno(cur->bc_mp, xfs_buf_daddr(bp));
128fa9c3c19SDave Chinner 	trace_xfs_rmapbt_free_block(cur->bc_mp, pag->pag_agno,
1294b8ed677SDarrick J. Wong 			bno, 1);
130f32866fdSDarrick J. Wong 	be32_add_cpu(&agf->agf_rmap_blocks, -1);
131f32866fdSDarrick J. Wong 	xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_RMAP_BLOCKS);
1328c392eb2SDave Chinner 	error = xfs_alloc_put_freelist(pag, cur->bc_tp, agbp, NULL, bno, 1);
1334b8ed677SDarrick J. Wong 	if (error)
1344b8ed677SDarrick J. Wong 		return error;
1354b8ed677SDarrick J. Wong 
13645d06621SDave Chinner 	xfs_extent_busy_insert(cur->bc_tp, pag, bno, 1,
1374b8ed677SDarrick J. Wong 			      XFS_EXTENT_BUSY_SKIP_DISCARD);
1384b8ed677SDarrick J. Wong 
13992a00544SGao Xiang 	xfs_ag_resv_free_extent(pag, XFS_AG_RESV_RMAPBT, NULL, 1);
1404b8ed677SDarrick J. Wong 	return 0;
1414b8ed677SDarrick J. Wong }
1424b8ed677SDarrick J. Wong 
1434b8ed677SDarrick J. Wong STATIC int
1444b8ed677SDarrick J. Wong xfs_rmapbt_get_minrecs(
1454b8ed677SDarrick J. Wong 	struct xfs_btree_cur	*cur,
1464b8ed677SDarrick J. Wong 	int			level)
1474b8ed677SDarrick J. Wong {
1484b8ed677SDarrick J. Wong 	return cur->bc_mp->m_rmap_mnr[level != 0];
1494b8ed677SDarrick J. Wong }
1504b8ed677SDarrick J. Wong 
1514b8ed677SDarrick J. Wong STATIC int
1524b8ed677SDarrick J. Wong xfs_rmapbt_get_maxrecs(
1534b8ed677SDarrick J. Wong 	struct xfs_btree_cur	*cur,
1544b8ed677SDarrick J. Wong 	int			level)
1554b8ed677SDarrick J. Wong {
1564b8ed677SDarrick J. Wong 	return cur->bc_mp->m_rmap_mxr[level != 0];
1574b8ed677SDarrick J. Wong }
1584b8ed677SDarrick J. Wong 
15908c987deSDarrick J. Wong /*
16008c987deSDarrick J. Wong  * Convert the ondisk record's offset field into the ondisk key's offset field.
16108c987deSDarrick J. Wong  * Fork and bmbt are significant parts of the rmap record key, but written
16208c987deSDarrick J. Wong  * status is merely a record attribute.
16308c987deSDarrick J. Wong  */
16408c987deSDarrick J. Wong static inline __be64 ondisk_rec_offset_to_key(const union xfs_btree_rec *rec)
16508c987deSDarrick J. Wong {
16608c987deSDarrick J. Wong 	return rec->rmap.rm_offset & ~cpu_to_be64(XFS_RMAP_OFF_UNWRITTEN);
16708c987deSDarrick J. Wong }
16808c987deSDarrick J. Wong 
1694b8ed677SDarrick J. Wong STATIC void
1704b8ed677SDarrick J. Wong xfs_rmapbt_init_key_from_rec(
1714b8ed677SDarrick J. Wong 	union xfs_btree_key		*key,
17223825cd1SDarrick J. Wong 	const union xfs_btree_rec	*rec)
1734b8ed677SDarrick J. Wong {
1744b8ed677SDarrick J. Wong 	key->rmap.rm_startblock = rec->rmap.rm_startblock;
1754b8ed677SDarrick J. Wong 	key->rmap.rm_owner = rec->rmap.rm_owner;
17608c987deSDarrick J. Wong 	key->rmap.rm_offset = ondisk_rec_offset_to_key(rec);
1774b8ed677SDarrick J. Wong }
1784b8ed677SDarrick J. Wong 
179cfed56aeSDarrick J. Wong /*
180cfed56aeSDarrick J. Wong  * The high key for a reverse mapping record can be computed by shifting
181cfed56aeSDarrick J. Wong  * the startblock and offset to the highest value that would still map
182cfed56aeSDarrick J. Wong  * to that record.  In practice this means that we add blockcount-1 to
183cfed56aeSDarrick J. Wong  * the startblock for all records, and if the record is for a data/attr
184cfed56aeSDarrick J. Wong  * fork mapping, we add blockcount-1 to the offset too.
185cfed56aeSDarrick J. Wong  */
186cfed56aeSDarrick J. Wong STATIC void
187cfed56aeSDarrick J. Wong xfs_rmapbt_init_high_key_from_rec(
188cfed56aeSDarrick J. Wong 	union xfs_btree_key		*key,
18923825cd1SDarrick J. Wong 	const union xfs_btree_rec	*rec)
190cfed56aeSDarrick J. Wong {
191c8ce540dSDarrick J. Wong 	uint64_t			off;
192cfed56aeSDarrick J. Wong 	int				adj;
193cfed56aeSDarrick J. Wong 
194cfed56aeSDarrick J. Wong 	adj = be32_to_cpu(rec->rmap.rm_blockcount) - 1;
195cfed56aeSDarrick J. Wong 
196cfed56aeSDarrick J. Wong 	key->rmap.rm_startblock = rec->rmap.rm_startblock;
197cfed56aeSDarrick J. Wong 	be32_add_cpu(&key->rmap.rm_startblock, adj);
198cfed56aeSDarrick J. Wong 	key->rmap.rm_owner = rec->rmap.rm_owner;
19908c987deSDarrick J. Wong 	key->rmap.rm_offset = ondisk_rec_offset_to_key(rec);
200cfed56aeSDarrick J. Wong 	if (XFS_RMAP_NON_INODE_OWNER(be64_to_cpu(rec->rmap.rm_owner)) ||
201cfed56aeSDarrick J. Wong 	    XFS_RMAP_IS_BMBT_BLOCK(be64_to_cpu(rec->rmap.rm_offset)))
202cfed56aeSDarrick J. Wong 		return;
203cfed56aeSDarrick J. Wong 	off = be64_to_cpu(key->rmap.rm_offset);
204cfed56aeSDarrick J. Wong 	off = (XFS_RMAP_OFF(off) + adj) | (off & ~XFS_RMAP_OFF_MASK);
205cfed56aeSDarrick J. Wong 	key->rmap.rm_offset = cpu_to_be64(off);
206cfed56aeSDarrick J. Wong }
207cfed56aeSDarrick J. Wong 
2084b8ed677SDarrick J. Wong STATIC void
2094b8ed677SDarrick J. Wong xfs_rmapbt_init_rec_from_cur(
2104b8ed677SDarrick J. Wong 	struct xfs_btree_cur	*cur,
2114b8ed677SDarrick J. Wong 	union xfs_btree_rec	*rec)
2124b8ed677SDarrick J. Wong {
2134b8ed677SDarrick J. Wong 	rec->rmap.rm_startblock = cpu_to_be32(cur->bc_rec.r.rm_startblock);
2144b8ed677SDarrick J. Wong 	rec->rmap.rm_blockcount = cpu_to_be32(cur->bc_rec.r.rm_blockcount);
2154b8ed677SDarrick J. Wong 	rec->rmap.rm_owner = cpu_to_be64(cur->bc_rec.r.rm_owner);
2164b8ed677SDarrick J. Wong 	rec->rmap.rm_offset = cpu_to_be64(
2174b8ed677SDarrick J. Wong 			xfs_rmap_irec_offset_pack(&cur->bc_rec.r));
2184b8ed677SDarrick J. Wong }
2194b8ed677SDarrick J. Wong 
2204b8ed677SDarrick J. Wong STATIC void
2214b8ed677SDarrick J. Wong xfs_rmapbt_init_ptr_from_cur(
2224b8ed677SDarrick J. Wong 	struct xfs_btree_cur	*cur,
2234b8ed677SDarrick J. Wong 	union xfs_btree_ptr	*ptr)
2244b8ed677SDarrick J. Wong {
225576af732SDave Chinner 	struct xfs_agf		*agf = cur->bc_ag.agbp->b_addr;
2264b8ed677SDarrick J. Wong 
227fa9c3c19SDave Chinner 	ASSERT(cur->bc_ag.pag->pag_agno == be32_to_cpu(agf->agf_seqno));
2284b8ed677SDarrick J. Wong 
2294b8ed677SDarrick J. Wong 	ptr->s = agf->agf_roots[cur->bc_btnum];
2304b8ed677SDarrick J. Wong }
2314b8ed677SDarrick J. Wong 
23208c987deSDarrick J. Wong /*
23308c987deSDarrick J. Wong  * Mask the appropriate parts of the ondisk key field for a key comparison.
23408c987deSDarrick J. Wong  * Fork and bmbt are significant parts of the rmap record key, but written
23508c987deSDarrick J. Wong  * status is merely a record attribute.
23608c987deSDarrick J. Wong  */
23708c987deSDarrick J. Wong static inline uint64_t offset_keymask(uint64_t offset)
23808c987deSDarrick J. Wong {
23908c987deSDarrick J. Wong 	return offset & ~XFS_RMAP_OFF_UNWRITTEN;
24008c987deSDarrick J. Wong }
24108c987deSDarrick J. Wong 
242c8ce540dSDarrick J. Wong STATIC int64_t
2434b8ed677SDarrick J. Wong xfs_rmapbt_key_diff(
2444b8ed677SDarrick J. Wong 	struct xfs_btree_cur		*cur,
245d29d5577SDarrick J. Wong 	const union xfs_btree_key	*key)
2464b8ed677SDarrick J. Wong {
2474b8ed677SDarrick J. Wong 	struct xfs_rmap_irec		*rec = &cur->bc_rec.r;
248d29d5577SDarrick J. Wong 	const struct xfs_rmap_key	*kp = &key->rmap;
2494b8ed677SDarrick J. Wong 	__u64				x, y;
250c8ce540dSDarrick J. Wong 	int64_t				d;
2514b8ed677SDarrick J. Wong 
252c8ce540dSDarrick J. Wong 	d = (int64_t)be32_to_cpu(kp->rm_startblock) - rec->rm_startblock;
2534b8ed677SDarrick J. Wong 	if (d)
2544b8ed677SDarrick J. Wong 		return d;
2554b8ed677SDarrick J. Wong 
2564b8ed677SDarrick J. Wong 	x = be64_to_cpu(kp->rm_owner);
2574b8ed677SDarrick J. Wong 	y = rec->rm_owner;
2584b8ed677SDarrick J. Wong 	if (x > y)
2594b8ed677SDarrick J. Wong 		return 1;
2604b8ed677SDarrick J. Wong 	else if (y > x)
2614b8ed677SDarrick J. Wong 		return -1;
2624b8ed677SDarrick J. Wong 
26308c987deSDarrick J. Wong 	x = offset_keymask(be64_to_cpu(kp->rm_offset));
26408c987deSDarrick J. Wong 	y = offset_keymask(xfs_rmap_irec_offset_pack(rec));
2654b8ed677SDarrick J. Wong 	if (x > y)
2664b8ed677SDarrick J. Wong 		return 1;
2674b8ed677SDarrick J. Wong 	else if (y > x)
2684b8ed677SDarrick J. Wong 		return -1;
2694b8ed677SDarrick J. Wong 	return 0;
2704b8ed677SDarrick J. Wong }
2714b8ed677SDarrick J. Wong 
272c8ce540dSDarrick J. Wong STATIC int64_t
273cfed56aeSDarrick J. Wong xfs_rmapbt_diff_two_keys(
274cfed56aeSDarrick J. Wong 	struct xfs_btree_cur		*cur,
275d29d5577SDarrick J. Wong 	const union xfs_btree_key	*k1,
276d29d5577SDarrick J. Wong 	const union xfs_btree_key	*k2)
277cfed56aeSDarrick J. Wong {
278d29d5577SDarrick J. Wong 	const struct xfs_rmap_key	*kp1 = &k1->rmap;
279d29d5577SDarrick J. Wong 	const struct xfs_rmap_key	*kp2 = &k2->rmap;
280c8ce540dSDarrick J. Wong 	int64_t				d;
281cfed56aeSDarrick J. Wong 	__u64				x, y;
282cfed56aeSDarrick J. Wong 
283c8ce540dSDarrick J. Wong 	d = (int64_t)be32_to_cpu(kp1->rm_startblock) -
284cfed56aeSDarrick J. Wong 		       be32_to_cpu(kp2->rm_startblock);
285cfed56aeSDarrick J. Wong 	if (d)
286cfed56aeSDarrick J. Wong 		return d;
287cfed56aeSDarrick J. Wong 
288cfed56aeSDarrick J. Wong 	x = be64_to_cpu(kp1->rm_owner);
289cfed56aeSDarrick J. Wong 	y = be64_to_cpu(kp2->rm_owner);
290cfed56aeSDarrick J. Wong 	if (x > y)
291cfed56aeSDarrick J. Wong 		return 1;
292cfed56aeSDarrick J. Wong 	else if (y > x)
293cfed56aeSDarrick J. Wong 		return -1;
294cfed56aeSDarrick J. Wong 
29508c987deSDarrick J. Wong 	x = offset_keymask(be64_to_cpu(kp1->rm_offset));
29608c987deSDarrick J. Wong 	y = offset_keymask(be64_to_cpu(kp2->rm_offset));
297cfed56aeSDarrick J. Wong 	if (x > y)
298cfed56aeSDarrick J. Wong 		return 1;
299cfed56aeSDarrick J. Wong 	else if (y > x)
300cfed56aeSDarrick J. Wong 		return -1;
301cfed56aeSDarrick J. Wong 	return 0;
302cfed56aeSDarrick J. Wong }
303cfed56aeSDarrick J. Wong 
304a6a781a5SDarrick J. Wong static xfs_failaddr_t
305035e00acSDarrick J. Wong xfs_rmapbt_verify(
306035e00acSDarrick J. Wong 	struct xfs_buf		*bp)
307035e00acSDarrick J. Wong {
308dbd329f1SChristoph Hellwig 	struct xfs_mount	*mp = bp->b_mount;
309035e00acSDarrick J. Wong 	struct xfs_btree_block	*block = XFS_BUF_TO_BLOCK(bp);
310035e00acSDarrick J. Wong 	struct xfs_perag	*pag = bp->b_pag;
311a6a781a5SDarrick J. Wong 	xfs_failaddr_t		fa;
312035e00acSDarrick J. Wong 	unsigned int		level;
313035e00acSDarrick J. Wong 
314035e00acSDarrick J. Wong 	/*
315035e00acSDarrick J. Wong 	 * magic number and level verification
316035e00acSDarrick J. Wong 	 *
317035e00acSDarrick J. Wong 	 * During growfs operations, we can't verify the exact level or owner as
318035e00acSDarrick J. Wong 	 * the perag is not fully initialised and hence not attached to the
319035e00acSDarrick J. Wong 	 * buffer.  In this case, check against the maximum tree depth.
320035e00acSDarrick J. Wong 	 *
321035e00acSDarrick J. Wong 	 * Similarly, during log recovery we will have a perag structure
322035e00acSDarrick J. Wong 	 * attached, but the agf information will not yet have been initialised
323035e00acSDarrick J. Wong 	 * from the on disk AGF. Again, we can only check against maximum limits
324035e00acSDarrick J. Wong 	 * in this case.
325035e00acSDarrick J. Wong 	 */
32639708c20SBrian Foster 	if (!xfs_verify_magic(bp, block->bb_magic))
327a6a781a5SDarrick J. Wong 		return __this_address;
328035e00acSDarrick J. Wong 
32938c26bfdSDave Chinner 	if (!xfs_has_rmapbt(mp))
330a6a781a5SDarrick J. Wong 		return __this_address;
331a6a781a5SDarrick J. Wong 	fa = xfs_btree_sblock_v5hdr_verify(bp);
332a6a781a5SDarrick J. Wong 	if (fa)
333a6a781a5SDarrick J. Wong 		return fa;
334035e00acSDarrick J. Wong 
335035e00acSDarrick J. Wong 	level = be16_to_cpu(block->bb_level);
3367ac2ff8bSDave Chinner 	if (pag && xfs_perag_initialised_agf(pag)) {
337035e00acSDarrick J. Wong 		if (level >= pag->pagf_levels[XFS_BTNUM_RMAPi])
338a6a781a5SDarrick J. Wong 			return __this_address;
339035e00acSDarrick J. Wong 	} else if (level >= mp->m_rmap_maxlevels)
340a6a781a5SDarrick J. Wong 		return __this_address;
341035e00acSDarrick J. Wong 
342035e00acSDarrick J. Wong 	return xfs_btree_sblock_verify(bp, mp->m_rmap_mxr[level != 0]);
343035e00acSDarrick J. Wong }
344035e00acSDarrick J. Wong 
345035e00acSDarrick J. Wong static void
346035e00acSDarrick J. Wong xfs_rmapbt_read_verify(
347035e00acSDarrick J. Wong 	struct xfs_buf	*bp)
348035e00acSDarrick J. Wong {
349bc1a09b8SDarrick J. Wong 	xfs_failaddr_t	fa;
350bc1a09b8SDarrick J. Wong 
351035e00acSDarrick J. Wong 	if (!xfs_btree_sblock_verify_crc(bp))
352bc1a09b8SDarrick J. Wong 		xfs_verifier_error(bp, -EFSBADCRC, __this_address);
353bc1a09b8SDarrick J. Wong 	else {
354bc1a09b8SDarrick J. Wong 		fa = xfs_rmapbt_verify(bp);
355bc1a09b8SDarrick J. Wong 		if (fa)
356bc1a09b8SDarrick J. Wong 			xfs_verifier_error(bp, -EFSCORRUPTED, fa);
357bc1a09b8SDarrick J. Wong 	}
358035e00acSDarrick J. Wong 
35931ca03c9SDarrick J. Wong 	if (bp->b_error)
360035e00acSDarrick J. Wong 		trace_xfs_btree_corrupt(bp, _RET_IP_);
361035e00acSDarrick J. Wong }
362035e00acSDarrick J. Wong 
363035e00acSDarrick J. Wong static void
364035e00acSDarrick J. Wong xfs_rmapbt_write_verify(
365035e00acSDarrick J. Wong 	struct xfs_buf	*bp)
366035e00acSDarrick J. Wong {
367bc1a09b8SDarrick J. Wong 	xfs_failaddr_t	fa;
368bc1a09b8SDarrick J. Wong 
369bc1a09b8SDarrick J. Wong 	fa = xfs_rmapbt_verify(bp);
370bc1a09b8SDarrick J. Wong 	if (fa) {
371035e00acSDarrick J. Wong 		trace_xfs_btree_corrupt(bp, _RET_IP_);
372bc1a09b8SDarrick J. Wong 		xfs_verifier_error(bp, -EFSCORRUPTED, fa);
373035e00acSDarrick J. Wong 		return;
374035e00acSDarrick J. Wong 	}
375035e00acSDarrick J. Wong 	xfs_btree_sblock_calc_crc(bp);
376035e00acSDarrick J. Wong 
377035e00acSDarrick J. Wong }
378035e00acSDarrick J. Wong 
379035e00acSDarrick J. Wong const struct xfs_buf_ops xfs_rmapbt_buf_ops = {
380035e00acSDarrick J. Wong 	.name			= "xfs_rmapbt",
38139708c20SBrian Foster 	.magic			= { 0, cpu_to_be32(XFS_RMAP_CRC_MAGIC) },
382035e00acSDarrick J. Wong 	.verify_read		= xfs_rmapbt_read_verify,
383035e00acSDarrick J. Wong 	.verify_write		= xfs_rmapbt_write_verify,
384b5572597SDarrick J. Wong 	.verify_struct		= xfs_rmapbt_verify,
385035e00acSDarrick J. Wong };
386035e00acSDarrick J. Wong 
3874b8ed677SDarrick J. Wong STATIC int
3884b8ed677SDarrick J. Wong xfs_rmapbt_keys_inorder(
3894b8ed677SDarrick J. Wong 	struct xfs_btree_cur		*cur,
3908e38dc88SDarrick J. Wong 	const union xfs_btree_key	*k1,
3918e38dc88SDarrick J. Wong 	const union xfs_btree_key	*k2)
3924b8ed677SDarrick J. Wong {
393c8ce540dSDarrick J. Wong 	uint32_t		x;
394c8ce540dSDarrick J. Wong 	uint32_t		y;
395c8ce540dSDarrick J. Wong 	uint64_t		a;
396c8ce540dSDarrick J. Wong 	uint64_t		b;
3974b8ed677SDarrick J. Wong 
3984b8ed677SDarrick J. Wong 	x = be32_to_cpu(k1->rmap.rm_startblock);
3994b8ed677SDarrick J. Wong 	y = be32_to_cpu(k2->rmap.rm_startblock);
4004b8ed677SDarrick J. Wong 	if (x < y)
4014b8ed677SDarrick J. Wong 		return 1;
4024b8ed677SDarrick J. Wong 	else if (x > y)
4034b8ed677SDarrick J. Wong 		return 0;
4044b8ed677SDarrick J. Wong 	a = be64_to_cpu(k1->rmap.rm_owner);
4054b8ed677SDarrick J. Wong 	b = be64_to_cpu(k2->rmap.rm_owner);
4064b8ed677SDarrick J. Wong 	if (a < b)
4074b8ed677SDarrick J. Wong 		return 1;
4084b8ed677SDarrick J. Wong 	else if (a > b)
4094b8ed677SDarrick J. Wong 		return 0;
41008c987deSDarrick J. Wong 	a = offset_keymask(be64_to_cpu(k1->rmap.rm_offset));
41108c987deSDarrick J. Wong 	b = offset_keymask(be64_to_cpu(k2->rmap.rm_offset));
4124b8ed677SDarrick J. Wong 	if (a <= b)
4134b8ed677SDarrick J. Wong 		return 1;
4144b8ed677SDarrick J. Wong 	return 0;
4154b8ed677SDarrick J. Wong }
4164b8ed677SDarrick J. Wong 
4174b8ed677SDarrick J. Wong STATIC int
4184b8ed677SDarrick J. Wong xfs_rmapbt_recs_inorder(
4194b8ed677SDarrick J. Wong 	struct xfs_btree_cur		*cur,
4208e38dc88SDarrick J. Wong 	const union xfs_btree_rec	*r1,
4218e38dc88SDarrick J. Wong 	const union xfs_btree_rec	*r2)
4224b8ed677SDarrick J. Wong {
423c8ce540dSDarrick J. Wong 	uint32_t		x;
424c8ce540dSDarrick J. Wong 	uint32_t		y;
425c8ce540dSDarrick J. Wong 	uint64_t		a;
426c8ce540dSDarrick J. Wong 	uint64_t		b;
4274b8ed677SDarrick J. Wong 
4284b8ed677SDarrick J. Wong 	x = be32_to_cpu(r1->rmap.rm_startblock);
4294b8ed677SDarrick J. Wong 	y = be32_to_cpu(r2->rmap.rm_startblock);
4304b8ed677SDarrick J. Wong 	if (x < y)
4314b8ed677SDarrick J. Wong 		return 1;
4324b8ed677SDarrick J. Wong 	else if (x > y)
4334b8ed677SDarrick J. Wong 		return 0;
4344b8ed677SDarrick J. Wong 	a = be64_to_cpu(r1->rmap.rm_owner);
4354b8ed677SDarrick J. Wong 	b = be64_to_cpu(r2->rmap.rm_owner);
4364b8ed677SDarrick J. Wong 	if (a < b)
4374b8ed677SDarrick J. Wong 		return 1;
4384b8ed677SDarrick J. Wong 	else if (a > b)
4394b8ed677SDarrick J. Wong 		return 0;
44008c987deSDarrick J. Wong 	a = offset_keymask(be64_to_cpu(r1->rmap.rm_offset));
44108c987deSDarrick J. Wong 	b = offset_keymask(be64_to_cpu(r2->rmap.rm_offset));
4424b8ed677SDarrick J. Wong 	if (a <= b)
4434b8ed677SDarrick J. Wong 		return 1;
4444b8ed677SDarrick J. Wong 	return 0;
4454b8ed677SDarrick J. Wong }
4464b8ed677SDarrick J. Wong 
447*6abc7aefSDarrick J. Wong STATIC enum xbtree_key_contig
448*6abc7aefSDarrick J. Wong xfs_rmapbt_keys_contiguous(
449*6abc7aefSDarrick J. Wong 	struct xfs_btree_cur		*cur,
450*6abc7aefSDarrick J. Wong 	const union xfs_btree_key	*key1,
451*6abc7aefSDarrick J. Wong 	const union xfs_btree_key	*key2)
452*6abc7aefSDarrick J. Wong {
453*6abc7aefSDarrick J. Wong 	/*
454*6abc7aefSDarrick J. Wong 	 * We only support checking contiguity of the physical space component.
455*6abc7aefSDarrick J. Wong 	 * If any callers ever need more specificity than that, they'll have to
456*6abc7aefSDarrick J. Wong 	 * implement it here.
457*6abc7aefSDarrick J. Wong 	 */
458*6abc7aefSDarrick J. Wong 	return xbtree_key_contig(be32_to_cpu(key1->rmap.rm_startblock),
459*6abc7aefSDarrick J. Wong 				 be32_to_cpu(key2->rmap.rm_startblock));
460*6abc7aefSDarrick J. Wong }
461*6abc7aefSDarrick J. Wong 
462035e00acSDarrick J. Wong static const struct xfs_btree_ops xfs_rmapbt_ops = {
463035e00acSDarrick J. Wong 	.rec_len		= sizeof(struct xfs_rmap_rec),
464035e00acSDarrick J. Wong 	.key_len		= 2 * sizeof(struct xfs_rmap_key),
465035e00acSDarrick J. Wong 
466035e00acSDarrick J. Wong 	.dup_cursor		= xfs_rmapbt_dup_cursor,
4674b8ed677SDarrick J. Wong 	.set_root		= xfs_rmapbt_set_root,
4684b8ed677SDarrick J. Wong 	.alloc_block		= xfs_rmapbt_alloc_block,
4694b8ed677SDarrick J. Wong 	.free_block		= xfs_rmapbt_free_block,
4704b8ed677SDarrick J. Wong 	.get_minrecs		= xfs_rmapbt_get_minrecs,
4714b8ed677SDarrick J. Wong 	.get_maxrecs		= xfs_rmapbt_get_maxrecs,
4724b8ed677SDarrick J. Wong 	.init_key_from_rec	= xfs_rmapbt_init_key_from_rec,
473cfed56aeSDarrick J. Wong 	.init_high_key_from_rec	= xfs_rmapbt_init_high_key_from_rec,
4744b8ed677SDarrick J. Wong 	.init_rec_from_cur	= xfs_rmapbt_init_rec_from_cur,
4754b8ed677SDarrick J. Wong 	.init_ptr_from_cur	= xfs_rmapbt_init_ptr_from_cur,
4764b8ed677SDarrick J. Wong 	.key_diff		= xfs_rmapbt_key_diff,
477035e00acSDarrick J. Wong 	.buf_ops		= &xfs_rmapbt_buf_ops,
478cfed56aeSDarrick J. Wong 	.diff_two_keys		= xfs_rmapbt_diff_two_keys,
4794b8ed677SDarrick J. Wong 	.keys_inorder		= xfs_rmapbt_keys_inorder,
4804b8ed677SDarrick J. Wong 	.recs_inorder		= xfs_rmapbt_recs_inorder,
481*6abc7aefSDarrick J. Wong 	.keys_contiguous	= xfs_rmapbt_keys_contiguous,
482035e00acSDarrick J. Wong };
483035e00acSDarrick J. Wong 
48459d67712SDarrick J. Wong static struct xfs_btree_cur *
48559d67712SDarrick J. Wong xfs_rmapbt_init_common(
48659d67712SDarrick J. Wong 	struct xfs_mount	*mp,
48759d67712SDarrick J. Wong 	struct xfs_trans	*tp,
488be9fb17dSDave Chinner 	struct xfs_perag	*pag)
48959d67712SDarrick J. Wong {
49059d67712SDarrick J. Wong 	struct xfs_btree_cur	*cur;
49159d67712SDarrick J. Wong 
49259d67712SDarrick J. Wong 	/* Overlapping btree; 2 keys per pointer. */
493c940a0c5SDarrick J. Wong 	cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_RMAP,
4949fa47bdcSDarrick J. Wong 			mp->m_rmap_maxlevels, xfs_rmapbt_cur_cache);
49559d67712SDarrick J. Wong 	cur->bc_flags = XFS_BTREE_CRC_BLOCKS | XFS_BTREE_OVERLAPPING;
49659d67712SDarrick J. Wong 	cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_rmap_2);
49759d67712SDarrick J. Wong 	cur->bc_ops = &xfs_rmapbt_ops;
498fa9c3c19SDave Chinner 
4999b2e5a23SDarrick J. Wong 	cur->bc_ag.pag = xfs_perag_hold(pag);
50059d67712SDarrick J. Wong 	return cur;
50159d67712SDarrick J. Wong }
50259d67712SDarrick J. Wong 
50359d67712SDarrick J. Wong /* Create a new reverse mapping btree cursor. */
504035e00acSDarrick J. Wong struct xfs_btree_cur *
505035e00acSDarrick J. Wong xfs_rmapbt_init_cursor(
506035e00acSDarrick J. Wong 	struct xfs_mount	*mp,
507035e00acSDarrick J. Wong 	struct xfs_trans	*tp,
508035e00acSDarrick J. Wong 	struct xfs_buf		*agbp,
509be9fb17dSDave Chinner 	struct xfs_perag	*pag)
510035e00acSDarrick J. Wong {
5119798f615SChristoph Hellwig 	struct xfs_agf		*agf = agbp->b_addr;
512035e00acSDarrick J. Wong 	struct xfs_btree_cur	*cur;
513035e00acSDarrick J. Wong 
514fa9c3c19SDave Chinner 	cur = xfs_rmapbt_init_common(mp, tp, pag);
515035e00acSDarrick J. Wong 	cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]);
516576af732SDave Chinner 	cur->bc_ag.agbp = agbp;
517035e00acSDarrick J. Wong 	return cur;
518035e00acSDarrick J. Wong }
519035e00acSDarrick J. Wong 
52059d67712SDarrick J. Wong /* Create a new reverse mapping btree cursor with a fake root for staging. */
52159d67712SDarrick J. Wong struct xfs_btree_cur *
52259d67712SDarrick J. Wong xfs_rmapbt_stage_cursor(
52359d67712SDarrick J. Wong 	struct xfs_mount	*mp,
52459d67712SDarrick J. Wong 	struct xbtree_afakeroot	*afake,
525fa9c3c19SDave Chinner 	struct xfs_perag	*pag)
52659d67712SDarrick J. Wong {
52759d67712SDarrick J. Wong 	struct xfs_btree_cur	*cur;
52859d67712SDarrick J. Wong 
529fa9c3c19SDave Chinner 	cur = xfs_rmapbt_init_common(mp, NULL, pag);
53059d67712SDarrick J. Wong 	xfs_btree_stage_afakeroot(cur, afake);
53159d67712SDarrick J. Wong 	return cur;
53259d67712SDarrick J. Wong }
53359d67712SDarrick J. Wong 
53459d67712SDarrick J. Wong /*
53559d67712SDarrick J. Wong  * Install a new reverse mapping btree root.  Caller is responsible for
53659d67712SDarrick J. Wong  * invalidating and freeing the old btree blocks.
53759d67712SDarrick J. Wong  */
53859d67712SDarrick J. Wong void
53959d67712SDarrick J. Wong xfs_rmapbt_commit_staged_btree(
54059d67712SDarrick J. Wong 	struct xfs_btree_cur	*cur,
54159d67712SDarrick J. Wong 	struct xfs_trans	*tp,
54259d67712SDarrick J. Wong 	struct xfs_buf		*agbp)
54359d67712SDarrick J. Wong {
54459d67712SDarrick J. Wong 	struct xfs_agf		*agf = agbp->b_addr;
54559d67712SDarrick J. Wong 	struct xbtree_afakeroot	*afake = cur->bc_ag.afake;
54659d67712SDarrick J. Wong 
54759d67712SDarrick J. Wong 	ASSERT(cur->bc_flags & XFS_BTREE_STAGING);
54859d67712SDarrick J. Wong 
54959d67712SDarrick J. Wong 	agf->agf_roots[cur->bc_btnum] = cpu_to_be32(afake->af_root);
55059d67712SDarrick J. Wong 	agf->agf_levels[cur->bc_btnum] = cpu_to_be32(afake->af_levels);
55159d67712SDarrick J. Wong 	agf->agf_rmap_blocks = cpu_to_be32(afake->af_blocks);
55259d67712SDarrick J. Wong 	xfs_alloc_log_agf(tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS |
55359d67712SDarrick J. Wong 				    XFS_AGF_RMAP_BLOCKS);
55459d67712SDarrick J. Wong 	xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_rmapbt_ops);
55559d67712SDarrick J. Wong }
55659d67712SDarrick J. Wong 
5570ed5f735SDarrick J. Wong /* Calculate number of records in a reverse mapping btree block. */
5580ed5f735SDarrick J. Wong static inline unsigned int
5590ed5f735SDarrick J. Wong xfs_rmapbt_block_maxrecs(
5600ed5f735SDarrick J. Wong 	unsigned int		blocklen,
5610ed5f735SDarrick J. Wong 	bool			leaf)
5620ed5f735SDarrick J. Wong {
5630ed5f735SDarrick J. Wong 	if (leaf)
5640ed5f735SDarrick J. Wong 		return blocklen / sizeof(struct xfs_rmap_rec);
5650ed5f735SDarrick J. Wong 	return blocklen /
5660ed5f735SDarrick J. Wong 		(2 * sizeof(struct xfs_rmap_key) + sizeof(xfs_rmap_ptr_t));
5670ed5f735SDarrick J. Wong }
5680ed5f735SDarrick J. Wong 
569035e00acSDarrick J. Wong /*
570035e00acSDarrick J. Wong  * Calculate number of records in an rmap btree block.
571035e00acSDarrick J. Wong  */
572035e00acSDarrick J. Wong int
573035e00acSDarrick J. Wong xfs_rmapbt_maxrecs(
574035e00acSDarrick J. Wong 	int			blocklen,
575035e00acSDarrick J. Wong 	int			leaf)
576035e00acSDarrick J. Wong {
577035e00acSDarrick J. Wong 	blocklen -= XFS_RMAP_BLOCK_LEN;
5780ed5f735SDarrick J. Wong 	return xfs_rmapbt_block_maxrecs(blocklen, leaf);
5790ed5f735SDarrick J. Wong }
580035e00acSDarrick J. Wong 
5810ed5f735SDarrick J. Wong /* Compute the max possible height for reverse mapping btrees. */
5820ed5f735SDarrick J. Wong unsigned int
5830ed5f735SDarrick J. Wong xfs_rmapbt_maxlevels_ondisk(void)
5840ed5f735SDarrick J. Wong {
5850ed5f735SDarrick J. Wong 	unsigned int		minrecs[2];
5860ed5f735SDarrick J. Wong 	unsigned int		blocklen;
5870ed5f735SDarrick J. Wong 
5880ed5f735SDarrick J. Wong 	blocklen = XFS_MIN_CRC_BLOCKSIZE - XFS_BTREE_SBLOCK_CRC_LEN;
5890ed5f735SDarrick J. Wong 
5900ed5f735SDarrick J. Wong 	minrecs[0] = xfs_rmapbt_block_maxrecs(blocklen, true) / 2;
5910ed5f735SDarrick J. Wong 	minrecs[1] = xfs_rmapbt_block_maxrecs(blocklen, false) / 2;
5920ed5f735SDarrick J. Wong 
5930ed5f735SDarrick J. Wong 	/*
5940ed5f735SDarrick J. Wong 	 * Compute the asymptotic maxlevels for an rmapbt on any reflink fs.
5950ed5f735SDarrick J. Wong 	 *
5960ed5f735SDarrick J. Wong 	 * On a reflink filesystem, each AG block can have up to 2^32 (per the
5970ed5f735SDarrick J. Wong 	 * refcount record format) owners, which means that theoretically we
5980ed5f735SDarrick J. Wong 	 * could face up to 2^64 rmap records.  However, we're likely to run
5990ed5f735SDarrick J. Wong 	 * out of blocks in the AG long before that happens, which means that
6000ed5f735SDarrick J. Wong 	 * we must compute the max height based on what the btree will look
6010ed5f735SDarrick J. Wong 	 * like if it consumes almost all the blocks in the AG due to maximal
6020ed5f735SDarrick J. Wong 	 * sharing factor.
6030ed5f735SDarrick J. Wong 	 */
6040ed5f735SDarrick J. Wong 	return xfs_btree_space_to_height(minrecs, XFS_MAX_CRC_AG_BLOCKS);
605035e00acSDarrick J. Wong }
606035e00acSDarrick J. Wong 
607035e00acSDarrick J. Wong /* Compute the maximum height of an rmap btree. */
608035e00acSDarrick J. Wong void
609035e00acSDarrick J. Wong xfs_rmapbt_compute_maxlevels(
610035e00acSDarrick J. Wong 	struct xfs_mount		*mp)
611035e00acSDarrick J. Wong {
6129ec69120SDarrick J. Wong 	if (!xfs_has_rmapbt(mp)) {
6139ec69120SDarrick J. Wong 		mp->m_rmap_maxlevels = 0;
6149ec69120SDarrick J. Wong 		return;
6159ec69120SDarrick J. Wong 	}
6169ec69120SDarrick J. Wong 
6179ec69120SDarrick J. Wong 	if (xfs_has_reflink(mp)) {
61846eeb521SDarrick J. Wong 		/*
6199ec69120SDarrick J. Wong 		 * Compute the asymptotic maxlevels for an rmap btree on a
6209ec69120SDarrick J. Wong 		 * filesystem that supports reflink.
62146eeb521SDarrick J. Wong 		 *
6229ec69120SDarrick J. Wong 		 * On a reflink filesystem, each AG block can have up to 2^32
6239ec69120SDarrick J. Wong 		 * (per the refcount record format) owners, which means that
6249ec69120SDarrick J. Wong 		 * theoretically we could face up to 2^64 rmap records.
6259ec69120SDarrick J. Wong 		 * However, we're likely to run out of blocks in the AG long
6269ec69120SDarrick J. Wong 		 * before that happens, which means that we must compute the
6279ec69120SDarrick J. Wong 		 * max height based on what the btree will look like if it
6289ec69120SDarrick J. Wong 		 * consumes almost all the blocks in the AG due to maximal
6299ec69120SDarrick J. Wong 		 * sharing factor.
63046eeb521SDarrick J. Wong 		 */
6319ec69120SDarrick J. Wong 		mp->m_rmap_maxlevels = xfs_btree_space_to_height(mp->m_rmap_mnr,
6329ec69120SDarrick J. Wong 				mp->m_sb.sb_agblocks);
6339ec69120SDarrick J. Wong 	} else {
6349ec69120SDarrick J. Wong 		/*
6359ec69120SDarrick J. Wong 		 * If there's no block sharing, compute the maximum rmapbt
6369ec69120SDarrick J. Wong 		 * height assuming one rmap record per AG block.
6379ec69120SDarrick J. Wong 		 */
638a1f69417SEric Sandeen 		mp->m_rmap_maxlevels = xfs_btree_compute_maxlevels(
639035e00acSDarrick J. Wong 				mp->m_rmap_mnr, mp->m_sb.sb_agblocks);
640035e00acSDarrick J. Wong 	}
6410ed5f735SDarrick J. Wong 	ASSERT(mp->m_rmap_maxlevels <= xfs_rmapbt_maxlevels_ondisk());
6429ec69120SDarrick J. Wong }
64384d69619SDarrick J. Wong 
64484d69619SDarrick J. Wong /* Calculate the refcount btree size for some records. */
64584d69619SDarrick J. Wong xfs_extlen_t
64684d69619SDarrick J. Wong xfs_rmapbt_calc_size(
64784d69619SDarrick J. Wong 	struct xfs_mount	*mp,
64884d69619SDarrick J. Wong 	unsigned long long	len)
64984d69619SDarrick J. Wong {
650a1f69417SEric Sandeen 	return xfs_btree_calc_size(mp->m_rmap_mnr, len);
65184d69619SDarrick J. Wong }
65284d69619SDarrick J. Wong 
65384d69619SDarrick J. Wong /*
65484d69619SDarrick J. Wong  * Calculate the maximum refcount btree size.
65584d69619SDarrick J. Wong  */
65684d69619SDarrick J. Wong xfs_extlen_t
65784d69619SDarrick J. Wong xfs_rmapbt_max_size(
65820e73b00SDarrick J. Wong 	struct xfs_mount	*mp,
65920e73b00SDarrick J. Wong 	xfs_agblock_t		agblocks)
66084d69619SDarrick J. Wong {
66184d69619SDarrick J. Wong 	/* Bail out if we're uninitialized, which can happen in mkfs. */
66284d69619SDarrick J. Wong 	if (mp->m_rmap_mxr[0] == 0)
66384d69619SDarrick J. Wong 		return 0;
66484d69619SDarrick J. Wong 
66520e73b00SDarrick J. Wong 	return xfs_rmapbt_calc_size(mp, agblocks);
66684d69619SDarrick J. Wong }
66784d69619SDarrick J. Wong 
66884d69619SDarrick J. Wong /*
66984d69619SDarrick J. Wong  * Figure out how many blocks to reserve and how many are used by this btree.
67084d69619SDarrick J. Wong  */
67184d69619SDarrick J. Wong int
67284d69619SDarrick J. Wong xfs_rmapbt_calc_reserves(
67384d69619SDarrick J. Wong 	struct xfs_mount	*mp,
674ebcbef3aSDarrick J. Wong 	struct xfs_trans	*tp,
67530933120SDave Chinner 	struct xfs_perag	*pag,
67684d69619SDarrick J. Wong 	xfs_extlen_t		*ask,
67784d69619SDarrick J. Wong 	xfs_extlen_t		*used)
67884d69619SDarrick J. Wong {
67984d69619SDarrick J. Wong 	struct xfs_buf		*agbp;
68084d69619SDarrick J. Wong 	struct xfs_agf		*agf;
68120e73b00SDarrick J. Wong 	xfs_agblock_t		agblocks;
68284d69619SDarrick J. Wong 	xfs_extlen_t		tree_len;
68384d69619SDarrick J. Wong 	int			error;
68484d69619SDarrick J. Wong 
68538c26bfdSDave Chinner 	if (!xfs_has_rmapbt(mp))
68684d69619SDarrick J. Wong 		return 0;
68784d69619SDarrick J. Wong 
68808d3e84fSDave Chinner 	error = xfs_alloc_read_agf(pag, tp, 0, &agbp);
68984d69619SDarrick J. Wong 	if (error)
69084d69619SDarrick J. Wong 		return error;
69184d69619SDarrick J. Wong 
6929798f615SChristoph Hellwig 	agf = agbp->b_addr;
69320e73b00SDarrick J. Wong 	agblocks = be32_to_cpu(agf->agf_length);
69484d69619SDarrick J. Wong 	tree_len = be32_to_cpu(agf->agf_rmap_blocks);
695ebcbef3aSDarrick J. Wong 	xfs_trans_brelse(tp, agbp);
69684d69619SDarrick J. Wong 
6975cd213b0SDarrick J. Wong 	/*
6985cd213b0SDarrick J. Wong 	 * The log is permanently allocated, so the space it occupies will
6995cd213b0SDarrick J. Wong 	 * never be available for the kinds of things that would require btree
7005cd213b0SDarrick J. Wong 	 * expansion.  We therefore can pretend the space isn't there.
7015cd213b0SDarrick J. Wong 	 */
70236029deeSDave Chinner 	if (xfs_ag_contains_log(mp, pag->pag_agno))
7035cd213b0SDarrick J. Wong 		agblocks -= mp->m_sb.sb_logblocks;
7045cd213b0SDarrick J. Wong 
70520e73b00SDarrick J. Wong 	/* Reserve 1% of the AG or enough for 1 block per record. */
70620e73b00SDarrick J. Wong 	*ask += max(agblocks / 100, xfs_rmapbt_max_size(mp, agblocks));
70784d69619SDarrick J. Wong 	*used += tree_len;
70884d69619SDarrick J. Wong 
70984d69619SDarrick J. Wong 	return error;
71084d69619SDarrick J. Wong }
7119fa47bdcSDarrick J. Wong 
7129fa47bdcSDarrick J. Wong int __init
7139fa47bdcSDarrick J. Wong xfs_rmapbt_init_cur_cache(void)
7149fa47bdcSDarrick J. Wong {
7159fa47bdcSDarrick J. Wong 	xfs_rmapbt_cur_cache = kmem_cache_create("xfs_rmapbt_cur",
7169fa47bdcSDarrick J. Wong 			xfs_btree_cur_sizeof(xfs_rmapbt_maxlevels_ondisk()),
7179fa47bdcSDarrick J. Wong 			0, 0, NULL);
7189fa47bdcSDarrick J. Wong 
7199fa47bdcSDarrick J. Wong 	if (!xfs_rmapbt_cur_cache)
7209fa47bdcSDarrick J. Wong 		return -ENOMEM;
7219fa47bdcSDarrick J. Wong 	return 0;
7229fa47bdcSDarrick J. Wong }
7239fa47bdcSDarrick J. Wong 
7249fa47bdcSDarrick J. Wong void
7259fa47bdcSDarrick J. Wong xfs_rmapbt_destroy_cur_cache(void)
7269fa47bdcSDarrick J. Wong {
7279fa47bdcSDarrick J. Wong 	kmem_cache_destroy(xfs_rmapbt_cur_cache);
7289fa47bdcSDarrick J. Wong 	xfs_rmapbt_cur_cache = NULL;
7299fa47bdcSDarrick J. Wong }
730