xref: /openbmc/linux/fs/xfs/libxfs/xfs_ag.c (revision b16817b66b6c97d2a812d663d26faed40079892a)
1*b16817b6SDave Chinner /* SPDX-License-Identifier: GPL-2.0 */
2*b16817b6SDave Chinner /*
3*b16817b6SDave Chinner  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4*b16817b6SDave Chinner  * Copyright (c) 2018 Red Hat, Inc.
5*b16817b6SDave Chinner  * All rights reserved.
6*b16817b6SDave Chinner  */
7*b16817b6SDave Chinner 
8*b16817b6SDave Chinner #include "xfs.h"
9*b16817b6SDave Chinner #include "xfs_fs.h"
10*b16817b6SDave Chinner #include "xfs_shared.h"
11*b16817b6SDave Chinner #include "xfs_format.h"
12*b16817b6SDave Chinner #include "xfs_trans_resv.h"
13*b16817b6SDave Chinner #include "xfs_sb.h"
14*b16817b6SDave Chinner #include "xfs_mount.h"
15*b16817b6SDave Chinner #include "xfs_btree.h"
16*b16817b6SDave Chinner #include "xfs_alloc_btree.h"
17*b16817b6SDave Chinner #include "xfs_rmap_btree.h"
18*b16817b6SDave Chinner #include "xfs_alloc.h"
19*b16817b6SDave Chinner #include "xfs_rmap.h"
20*b16817b6SDave Chinner #include "xfs_ag.h"
21*b16817b6SDave Chinner 
22*b16817b6SDave Chinner static struct xfs_buf *
23*b16817b6SDave Chinner xfs_get_aghdr_buf(
24*b16817b6SDave Chinner 	struct xfs_mount	*mp,
25*b16817b6SDave Chinner 	xfs_daddr_t		blkno,
26*b16817b6SDave Chinner 	size_t			numblks,
27*b16817b6SDave Chinner 	int			flags,
28*b16817b6SDave Chinner 	const struct xfs_buf_ops *ops)
29*b16817b6SDave Chinner {
30*b16817b6SDave Chinner 	struct xfs_buf		*bp;
31*b16817b6SDave Chinner 
32*b16817b6SDave Chinner 	bp = xfs_buf_get_uncached(mp->m_ddev_targp, numblks, flags);
33*b16817b6SDave Chinner 	if (!bp)
34*b16817b6SDave Chinner 		return NULL;
35*b16817b6SDave Chinner 
36*b16817b6SDave Chinner 	xfs_buf_zero(bp, 0, BBTOB(bp->b_length));
37*b16817b6SDave Chinner 	bp->b_bn = blkno;
38*b16817b6SDave Chinner 	bp->b_maps[0].bm_bn = blkno;
39*b16817b6SDave Chinner 	bp->b_ops = ops;
40*b16817b6SDave Chinner 
41*b16817b6SDave Chinner 	return bp;
42*b16817b6SDave Chinner }
43*b16817b6SDave Chinner 
44*b16817b6SDave Chinner /*
45*b16817b6SDave Chinner  * Generic btree root block init function
46*b16817b6SDave Chinner  */
47*b16817b6SDave Chinner static void
48*b16817b6SDave Chinner xfs_btroot_init(
49*b16817b6SDave Chinner 	struct xfs_mount	*mp,
50*b16817b6SDave Chinner 	struct xfs_buf		*bp,
51*b16817b6SDave Chinner 	struct aghdr_init_data	*id)
52*b16817b6SDave Chinner {
53*b16817b6SDave Chinner 	xfs_btree_init_block(mp, bp, id->type, 0, 0, id->agno, 0);
54*b16817b6SDave Chinner }
55*b16817b6SDave Chinner 
56*b16817b6SDave Chinner /*
57*b16817b6SDave Chinner  * Alloc btree root block init functions
58*b16817b6SDave Chinner  */
59*b16817b6SDave Chinner static void
60*b16817b6SDave Chinner xfs_bnoroot_init(
61*b16817b6SDave Chinner 	struct xfs_mount	*mp,
62*b16817b6SDave Chinner 	struct xfs_buf		*bp,
63*b16817b6SDave Chinner 	struct aghdr_init_data	*id)
64*b16817b6SDave Chinner {
65*b16817b6SDave Chinner 	struct xfs_alloc_rec	*arec;
66*b16817b6SDave Chinner 
67*b16817b6SDave Chinner 	xfs_btree_init_block(mp, bp, XFS_BTNUM_BNO, 0, 1, id->agno, 0);
68*b16817b6SDave Chinner 	arec = XFS_ALLOC_REC_ADDR(mp, XFS_BUF_TO_BLOCK(bp), 1);
69*b16817b6SDave Chinner 	arec->ar_startblock = cpu_to_be32(mp->m_ag_prealloc_blocks);
70*b16817b6SDave Chinner 	arec->ar_blockcount = cpu_to_be32(id->agsize -
71*b16817b6SDave Chinner 					  be32_to_cpu(arec->ar_startblock));
72*b16817b6SDave Chinner }
73*b16817b6SDave Chinner 
74*b16817b6SDave Chinner static void
75*b16817b6SDave Chinner xfs_cntroot_init(
76*b16817b6SDave Chinner 	struct xfs_mount	*mp,
77*b16817b6SDave Chinner 	struct xfs_buf		*bp,
78*b16817b6SDave Chinner 	struct aghdr_init_data	*id)
79*b16817b6SDave Chinner {
80*b16817b6SDave Chinner 	struct xfs_alloc_rec	*arec;
81*b16817b6SDave Chinner 
82*b16817b6SDave Chinner 	xfs_btree_init_block(mp, bp, XFS_BTNUM_CNT, 0, 1, id->agno, 0);
83*b16817b6SDave Chinner 	arec = XFS_ALLOC_REC_ADDR(mp, XFS_BUF_TO_BLOCK(bp), 1);
84*b16817b6SDave Chinner 	arec->ar_startblock = cpu_to_be32(mp->m_ag_prealloc_blocks);
85*b16817b6SDave Chinner 	arec->ar_blockcount = cpu_to_be32(id->agsize -
86*b16817b6SDave Chinner 					  be32_to_cpu(arec->ar_startblock));
87*b16817b6SDave Chinner }
88*b16817b6SDave Chinner 
89*b16817b6SDave Chinner /*
90*b16817b6SDave Chinner  * Reverse map root block init
91*b16817b6SDave Chinner  */
92*b16817b6SDave Chinner static void
93*b16817b6SDave Chinner xfs_rmaproot_init(
94*b16817b6SDave Chinner 	struct xfs_mount	*mp,
95*b16817b6SDave Chinner 	struct xfs_buf		*bp,
96*b16817b6SDave Chinner 	struct aghdr_init_data	*id)
97*b16817b6SDave Chinner {
98*b16817b6SDave Chinner 	struct xfs_btree_block	*block = XFS_BUF_TO_BLOCK(bp);
99*b16817b6SDave Chinner 	struct xfs_rmap_rec	*rrec;
100*b16817b6SDave Chinner 
101*b16817b6SDave Chinner 	xfs_btree_init_block(mp, bp, XFS_BTNUM_RMAP, 0, 4, id->agno, 0);
102*b16817b6SDave Chinner 
103*b16817b6SDave Chinner 	/*
104*b16817b6SDave Chinner 	 * mark the AG header regions as static metadata The BNO
105*b16817b6SDave Chinner 	 * btree block is the first block after the headers, so
106*b16817b6SDave Chinner 	 * it's location defines the size of region the static
107*b16817b6SDave Chinner 	 * metadata consumes.
108*b16817b6SDave Chinner 	 *
109*b16817b6SDave Chinner 	 * Note: unlike mkfs, we never have to account for log
110*b16817b6SDave Chinner 	 * space when growing the data regions
111*b16817b6SDave Chinner 	 */
112*b16817b6SDave Chinner 	rrec = XFS_RMAP_REC_ADDR(block, 1);
113*b16817b6SDave Chinner 	rrec->rm_startblock = 0;
114*b16817b6SDave Chinner 	rrec->rm_blockcount = cpu_to_be32(XFS_BNO_BLOCK(mp));
115*b16817b6SDave Chinner 	rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_FS);
116*b16817b6SDave Chinner 	rrec->rm_offset = 0;
117*b16817b6SDave Chinner 
118*b16817b6SDave Chinner 	/* account freespace btree root blocks */
119*b16817b6SDave Chinner 	rrec = XFS_RMAP_REC_ADDR(block, 2);
120*b16817b6SDave Chinner 	rrec->rm_startblock = cpu_to_be32(XFS_BNO_BLOCK(mp));
121*b16817b6SDave Chinner 	rrec->rm_blockcount = cpu_to_be32(2);
122*b16817b6SDave Chinner 	rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_AG);
123*b16817b6SDave Chinner 	rrec->rm_offset = 0;
124*b16817b6SDave Chinner 
125*b16817b6SDave Chinner 	/* account inode btree root blocks */
126*b16817b6SDave Chinner 	rrec = XFS_RMAP_REC_ADDR(block, 3);
127*b16817b6SDave Chinner 	rrec->rm_startblock = cpu_to_be32(XFS_IBT_BLOCK(mp));
128*b16817b6SDave Chinner 	rrec->rm_blockcount = cpu_to_be32(XFS_RMAP_BLOCK(mp) -
129*b16817b6SDave Chinner 					  XFS_IBT_BLOCK(mp));
130*b16817b6SDave Chinner 	rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_INOBT);
131*b16817b6SDave Chinner 	rrec->rm_offset = 0;
132*b16817b6SDave Chinner 
133*b16817b6SDave Chinner 	/* account for rmap btree root */
134*b16817b6SDave Chinner 	rrec = XFS_RMAP_REC_ADDR(block, 4);
135*b16817b6SDave Chinner 	rrec->rm_startblock = cpu_to_be32(XFS_RMAP_BLOCK(mp));
136*b16817b6SDave Chinner 	rrec->rm_blockcount = cpu_to_be32(1);
137*b16817b6SDave Chinner 	rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_AG);
138*b16817b6SDave Chinner 	rrec->rm_offset = 0;
139*b16817b6SDave Chinner 
140*b16817b6SDave Chinner 	/* account for refc btree root */
141*b16817b6SDave Chinner 	if (xfs_sb_version_hasreflink(&mp->m_sb)) {
142*b16817b6SDave Chinner 		rrec = XFS_RMAP_REC_ADDR(block, 5);
143*b16817b6SDave Chinner 		rrec->rm_startblock = cpu_to_be32(xfs_refc_block(mp));
144*b16817b6SDave Chinner 		rrec->rm_blockcount = cpu_to_be32(1);
145*b16817b6SDave Chinner 		rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_REFC);
146*b16817b6SDave Chinner 		rrec->rm_offset = 0;
147*b16817b6SDave Chinner 		be16_add_cpu(&block->bb_numrecs, 1);
148*b16817b6SDave Chinner 	}
149*b16817b6SDave Chinner }
150*b16817b6SDave Chinner 
151*b16817b6SDave Chinner /*
152*b16817b6SDave Chinner  * Initialise new secondary superblocks with the pre-grow geometry, but mark
153*b16817b6SDave Chinner  * them as "in progress" so we know they haven't yet been activated. This will
154*b16817b6SDave Chinner  * get cleared when the update with the new geometry information is done after
155*b16817b6SDave Chinner  * changes to the primary are committed. This isn't strictly necessary, but we
156*b16817b6SDave Chinner  * get it for free with the delayed buffer write lists and it means we can tell
157*b16817b6SDave Chinner  * if a grow operation didn't complete properly after the fact.
158*b16817b6SDave Chinner  */
159*b16817b6SDave Chinner static void
160*b16817b6SDave Chinner xfs_sbblock_init(
161*b16817b6SDave Chinner 	struct xfs_mount	*mp,
162*b16817b6SDave Chinner 	struct xfs_buf		*bp,
163*b16817b6SDave Chinner 	struct aghdr_init_data	*id)
164*b16817b6SDave Chinner {
165*b16817b6SDave Chinner 	struct xfs_dsb		*dsb = XFS_BUF_TO_SBP(bp);
166*b16817b6SDave Chinner 
167*b16817b6SDave Chinner 	xfs_sb_to_disk(dsb, &mp->m_sb);
168*b16817b6SDave Chinner 	dsb->sb_inprogress = 1;
169*b16817b6SDave Chinner }
170*b16817b6SDave Chinner 
171*b16817b6SDave Chinner static void
172*b16817b6SDave Chinner xfs_agfblock_init(
173*b16817b6SDave Chinner 	struct xfs_mount	*mp,
174*b16817b6SDave Chinner 	struct xfs_buf		*bp,
175*b16817b6SDave Chinner 	struct aghdr_init_data	*id)
176*b16817b6SDave Chinner {
177*b16817b6SDave Chinner 	struct xfs_agf		*agf = XFS_BUF_TO_AGF(bp);
178*b16817b6SDave Chinner 	xfs_extlen_t		tmpsize;
179*b16817b6SDave Chinner 
180*b16817b6SDave Chinner 	agf->agf_magicnum = cpu_to_be32(XFS_AGF_MAGIC);
181*b16817b6SDave Chinner 	agf->agf_versionnum = cpu_to_be32(XFS_AGF_VERSION);
182*b16817b6SDave Chinner 	agf->agf_seqno = cpu_to_be32(id->agno);
183*b16817b6SDave Chinner 	agf->agf_length = cpu_to_be32(id->agsize);
184*b16817b6SDave Chinner 	agf->agf_roots[XFS_BTNUM_BNOi] = cpu_to_be32(XFS_BNO_BLOCK(mp));
185*b16817b6SDave Chinner 	agf->agf_roots[XFS_BTNUM_CNTi] = cpu_to_be32(XFS_CNT_BLOCK(mp));
186*b16817b6SDave Chinner 	agf->agf_levels[XFS_BTNUM_BNOi] = cpu_to_be32(1);
187*b16817b6SDave Chinner 	agf->agf_levels[XFS_BTNUM_CNTi] = cpu_to_be32(1);
188*b16817b6SDave Chinner 	if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
189*b16817b6SDave Chinner 		agf->agf_roots[XFS_BTNUM_RMAPi] =
190*b16817b6SDave Chinner 					cpu_to_be32(XFS_RMAP_BLOCK(mp));
191*b16817b6SDave Chinner 		agf->agf_levels[XFS_BTNUM_RMAPi] = cpu_to_be32(1);
192*b16817b6SDave Chinner 		agf->agf_rmap_blocks = cpu_to_be32(1);
193*b16817b6SDave Chinner 	}
194*b16817b6SDave Chinner 
195*b16817b6SDave Chinner 	agf->agf_flfirst = cpu_to_be32(1);
196*b16817b6SDave Chinner 	agf->agf_fllast = 0;
197*b16817b6SDave Chinner 	agf->agf_flcount = 0;
198*b16817b6SDave Chinner 	tmpsize = id->agsize - mp->m_ag_prealloc_blocks;
199*b16817b6SDave Chinner 	agf->agf_freeblks = cpu_to_be32(tmpsize);
200*b16817b6SDave Chinner 	agf->agf_longest = cpu_to_be32(tmpsize);
201*b16817b6SDave Chinner 	if (xfs_sb_version_hascrc(&mp->m_sb))
202*b16817b6SDave Chinner 		uuid_copy(&agf->agf_uuid, &mp->m_sb.sb_meta_uuid);
203*b16817b6SDave Chinner 	if (xfs_sb_version_hasreflink(&mp->m_sb)) {
204*b16817b6SDave Chinner 		agf->agf_refcount_root = cpu_to_be32(
205*b16817b6SDave Chinner 				xfs_refc_block(mp));
206*b16817b6SDave Chinner 		agf->agf_refcount_level = cpu_to_be32(1);
207*b16817b6SDave Chinner 		agf->agf_refcount_blocks = cpu_to_be32(1);
208*b16817b6SDave Chinner 	}
209*b16817b6SDave Chinner }
210*b16817b6SDave Chinner 
211*b16817b6SDave Chinner static void
212*b16817b6SDave Chinner xfs_agflblock_init(
213*b16817b6SDave Chinner 	struct xfs_mount	*mp,
214*b16817b6SDave Chinner 	struct xfs_buf		*bp,
215*b16817b6SDave Chinner 	struct aghdr_init_data	*id)
216*b16817b6SDave Chinner {
217*b16817b6SDave Chinner 	struct xfs_agfl		*agfl = XFS_BUF_TO_AGFL(bp);
218*b16817b6SDave Chinner 	__be32			*agfl_bno;
219*b16817b6SDave Chinner 	int			bucket;
220*b16817b6SDave Chinner 
221*b16817b6SDave Chinner 	if (xfs_sb_version_hascrc(&mp->m_sb)) {
222*b16817b6SDave Chinner 		agfl->agfl_magicnum = cpu_to_be32(XFS_AGFL_MAGIC);
223*b16817b6SDave Chinner 		agfl->agfl_seqno = cpu_to_be32(id->agno);
224*b16817b6SDave Chinner 		uuid_copy(&agfl->agfl_uuid, &mp->m_sb.sb_meta_uuid);
225*b16817b6SDave Chinner 	}
226*b16817b6SDave Chinner 
227*b16817b6SDave Chinner 	agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, bp);
228*b16817b6SDave Chinner 	for (bucket = 0; bucket < xfs_agfl_size(mp); bucket++)
229*b16817b6SDave Chinner 		agfl_bno[bucket] = cpu_to_be32(NULLAGBLOCK);
230*b16817b6SDave Chinner }
231*b16817b6SDave Chinner 
232*b16817b6SDave Chinner static void
233*b16817b6SDave Chinner xfs_agiblock_init(
234*b16817b6SDave Chinner 	struct xfs_mount	*mp,
235*b16817b6SDave Chinner 	struct xfs_buf		*bp,
236*b16817b6SDave Chinner 	struct aghdr_init_data	*id)
237*b16817b6SDave Chinner {
238*b16817b6SDave Chinner 	struct xfs_agi		*agi = XFS_BUF_TO_AGI(bp);
239*b16817b6SDave Chinner 	int			bucket;
240*b16817b6SDave Chinner 
241*b16817b6SDave Chinner 	agi->agi_magicnum = cpu_to_be32(XFS_AGI_MAGIC);
242*b16817b6SDave Chinner 	agi->agi_versionnum = cpu_to_be32(XFS_AGI_VERSION);
243*b16817b6SDave Chinner 	agi->agi_seqno = cpu_to_be32(id->agno);
244*b16817b6SDave Chinner 	agi->agi_length = cpu_to_be32(id->agsize);
245*b16817b6SDave Chinner 	agi->agi_count = 0;
246*b16817b6SDave Chinner 	agi->agi_root = cpu_to_be32(XFS_IBT_BLOCK(mp));
247*b16817b6SDave Chinner 	agi->agi_level = cpu_to_be32(1);
248*b16817b6SDave Chinner 	agi->agi_freecount = 0;
249*b16817b6SDave Chinner 	agi->agi_newino = cpu_to_be32(NULLAGINO);
250*b16817b6SDave Chinner 	agi->agi_dirino = cpu_to_be32(NULLAGINO);
251*b16817b6SDave Chinner 	if (xfs_sb_version_hascrc(&mp->m_sb))
252*b16817b6SDave Chinner 		uuid_copy(&agi->agi_uuid, &mp->m_sb.sb_meta_uuid);
253*b16817b6SDave Chinner 	if (xfs_sb_version_hasfinobt(&mp->m_sb)) {
254*b16817b6SDave Chinner 		agi->agi_free_root = cpu_to_be32(XFS_FIBT_BLOCK(mp));
255*b16817b6SDave Chinner 		agi->agi_free_level = cpu_to_be32(1);
256*b16817b6SDave Chinner 	}
257*b16817b6SDave Chinner 	for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++)
258*b16817b6SDave Chinner 		agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
259*b16817b6SDave Chinner }
260*b16817b6SDave Chinner 
261*b16817b6SDave Chinner typedef void (*aghdr_init_work_f)(struct xfs_mount *mp, struct xfs_buf *bp,
262*b16817b6SDave Chinner 				  struct aghdr_init_data *id);
263*b16817b6SDave Chinner static int
264*b16817b6SDave Chinner xfs_ag_init_hdr(
265*b16817b6SDave Chinner 	struct xfs_mount	*mp,
266*b16817b6SDave Chinner 	struct aghdr_init_data	*id,
267*b16817b6SDave Chinner 	aghdr_init_work_f	work,
268*b16817b6SDave Chinner 	const struct xfs_buf_ops *ops)
269*b16817b6SDave Chinner 
270*b16817b6SDave Chinner {
271*b16817b6SDave Chinner 	struct xfs_buf		*bp;
272*b16817b6SDave Chinner 
273*b16817b6SDave Chinner 	bp = xfs_get_aghdr_buf(mp, id->daddr, id->numblks, 0, ops);
274*b16817b6SDave Chinner 	if (!bp)
275*b16817b6SDave Chinner 		return -ENOMEM;
276*b16817b6SDave Chinner 
277*b16817b6SDave Chinner 	(*work)(mp, bp, id);
278*b16817b6SDave Chinner 
279*b16817b6SDave Chinner 	xfs_buf_delwri_queue(bp, &id->buffer_list);
280*b16817b6SDave Chinner 	xfs_buf_relse(bp);
281*b16817b6SDave Chinner 	return 0;
282*b16817b6SDave Chinner }
283*b16817b6SDave Chinner 
284*b16817b6SDave Chinner struct xfs_aghdr_grow_data {
285*b16817b6SDave Chinner 	xfs_daddr_t		daddr;
286*b16817b6SDave Chinner 	size_t			numblks;
287*b16817b6SDave Chinner 	const struct xfs_buf_ops *ops;
288*b16817b6SDave Chinner 	aghdr_init_work_f	work;
289*b16817b6SDave Chinner 	xfs_btnum_t		type;
290*b16817b6SDave Chinner 	bool			need_init;
291*b16817b6SDave Chinner };
292*b16817b6SDave Chinner 
293*b16817b6SDave Chinner /*
294*b16817b6SDave Chinner  * Prepare new AG headers to be written to disk. We use uncached buffers here,
295*b16817b6SDave Chinner  * as it is assumed these new AG headers are currently beyond the currently
296*b16817b6SDave Chinner  * valid filesystem address space. Using cached buffers would trip over EOFS
297*b16817b6SDave Chinner  * corruption detection alogrithms in the buffer cache lookup routines.
298*b16817b6SDave Chinner  *
299*b16817b6SDave Chinner  * This is a non-transactional function, but the prepared buffers are added to a
300*b16817b6SDave Chinner  * delayed write buffer list supplied by the caller so they can submit them to
301*b16817b6SDave Chinner  * disk and wait on them as required.
302*b16817b6SDave Chinner  */
303*b16817b6SDave Chinner int
304*b16817b6SDave Chinner xfs_ag_init_headers(
305*b16817b6SDave Chinner 	struct xfs_mount	*mp,
306*b16817b6SDave Chinner 	struct aghdr_init_data	*id)
307*b16817b6SDave Chinner 
308*b16817b6SDave Chinner {
309*b16817b6SDave Chinner 	struct xfs_aghdr_grow_data aghdr_data[] = {
310*b16817b6SDave Chinner 	{ /* SB */
311*b16817b6SDave Chinner 		.daddr = XFS_AG_DADDR(mp, id->agno, XFS_SB_DADDR),
312*b16817b6SDave Chinner 		.numblks = XFS_FSS_TO_BB(mp, 1),
313*b16817b6SDave Chinner 		.ops = &xfs_sb_buf_ops,
314*b16817b6SDave Chinner 		.work = &xfs_sbblock_init,
315*b16817b6SDave Chinner 		.need_init = true
316*b16817b6SDave Chinner 	},
317*b16817b6SDave Chinner 	{ /* AGF */
318*b16817b6SDave Chinner 		.daddr = XFS_AG_DADDR(mp, id->agno, XFS_AGF_DADDR(mp)),
319*b16817b6SDave Chinner 		.numblks = XFS_FSS_TO_BB(mp, 1),
320*b16817b6SDave Chinner 		.ops = &xfs_agf_buf_ops,
321*b16817b6SDave Chinner 		.work = &xfs_agfblock_init,
322*b16817b6SDave Chinner 		.need_init = true
323*b16817b6SDave Chinner 	},
324*b16817b6SDave Chinner 	{ /* AGFL */
325*b16817b6SDave Chinner 		.daddr = XFS_AG_DADDR(mp, id->agno, XFS_AGFL_DADDR(mp)),
326*b16817b6SDave Chinner 		.numblks = XFS_FSS_TO_BB(mp, 1),
327*b16817b6SDave Chinner 		.ops = &xfs_agfl_buf_ops,
328*b16817b6SDave Chinner 		.work = &xfs_agflblock_init,
329*b16817b6SDave Chinner 		.need_init = true
330*b16817b6SDave Chinner 	},
331*b16817b6SDave Chinner 	{ /* AGI */
332*b16817b6SDave Chinner 		.daddr = XFS_AG_DADDR(mp, id->agno, XFS_AGI_DADDR(mp)),
333*b16817b6SDave Chinner 		.numblks = XFS_FSS_TO_BB(mp, 1),
334*b16817b6SDave Chinner 		.ops = &xfs_agi_buf_ops,
335*b16817b6SDave Chinner 		.work = &xfs_agiblock_init,
336*b16817b6SDave Chinner 		.need_init = true
337*b16817b6SDave Chinner 	},
338*b16817b6SDave Chinner 	{ /* BNO root block */
339*b16817b6SDave Chinner 		.daddr = XFS_AGB_TO_DADDR(mp, id->agno, XFS_BNO_BLOCK(mp)),
340*b16817b6SDave Chinner 		.numblks = BTOBB(mp->m_sb.sb_blocksize),
341*b16817b6SDave Chinner 		.ops = &xfs_allocbt_buf_ops,
342*b16817b6SDave Chinner 		.work = &xfs_bnoroot_init,
343*b16817b6SDave Chinner 		.need_init = true
344*b16817b6SDave Chinner 	},
345*b16817b6SDave Chinner 	{ /* CNT root block */
346*b16817b6SDave Chinner 		.daddr = XFS_AGB_TO_DADDR(mp, id->agno, XFS_CNT_BLOCK(mp)),
347*b16817b6SDave Chinner 		.numblks = BTOBB(mp->m_sb.sb_blocksize),
348*b16817b6SDave Chinner 		.ops = &xfs_allocbt_buf_ops,
349*b16817b6SDave Chinner 		.work = &xfs_cntroot_init,
350*b16817b6SDave Chinner 		.need_init = true
351*b16817b6SDave Chinner 	},
352*b16817b6SDave Chinner 	{ /* INO root block */
353*b16817b6SDave Chinner 		.daddr = XFS_AGB_TO_DADDR(mp, id->agno, XFS_IBT_BLOCK(mp)),
354*b16817b6SDave Chinner 		.numblks = BTOBB(mp->m_sb.sb_blocksize),
355*b16817b6SDave Chinner 		.ops = &xfs_inobt_buf_ops,
356*b16817b6SDave Chinner 		.work = &xfs_btroot_init,
357*b16817b6SDave Chinner 		.type = XFS_BTNUM_INO,
358*b16817b6SDave Chinner 		.need_init = true
359*b16817b6SDave Chinner 	},
360*b16817b6SDave Chinner 	{ /* FINO root block */
361*b16817b6SDave Chinner 		.daddr = XFS_AGB_TO_DADDR(mp, id->agno, XFS_FIBT_BLOCK(mp)),
362*b16817b6SDave Chinner 		.numblks = BTOBB(mp->m_sb.sb_blocksize),
363*b16817b6SDave Chinner 		.ops = &xfs_inobt_buf_ops,
364*b16817b6SDave Chinner 		.work = &xfs_btroot_init,
365*b16817b6SDave Chinner 		.type = XFS_BTNUM_FINO,
366*b16817b6SDave Chinner 		.need_init =  xfs_sb_version_hasfinobt(&mp->m_sb)
367*b16817b6SDave Chinner 	},
368*b16817b6SDave Chinner 	{ /* RMAP root block */
369*b16817b6SDave Chinner 		.daddr = XFS_AGB_TO_DADDR(mp, id->agno, XFS_RMAP_BLOCK(mp)),
370*b16817b6SDave Chinner 		.numblks = BTOBB(mp->m_sb.sb_blocksize),
371*b16817b6SDave Chinner 		.ops = &xfs_rmapbt_buf_ops,
372*b16817b6SDave Chinner 		.work = &xfs_rmaproot_init,
373*b16817b6SDave Chinner 		.need_init = xfs_sb_version_hasrmapbt(&mp->m_sb)
374*b16817b6SDave Chinner 	},
375*b16817b6SDave Chinner 	{ /* REFC root block */
376*b16817b6SDave Chinner 		.daddr = XFS_AGB_TO_DADDR(mp, id->agno, xfs_refc_block(mp)),
377*b16817b6SDave Chinner 		.numblks = BTOBB(mp->m_sb.sb_blocksize),
378*b16817b6SDave Chinner 		.ops = &xfs_refcountbt_buf_ops,
379*b16817b6SDave Chinner 		.work = &xfs_btroot_init,
380*b16817b6SDave Chinner 		.type = XFS_BTNUM_REFC,
381*b16817b6SDave Chinner 		.need_init = xfs_sb_version_hasreflink(&mp->m_sb)
382*b16817b6SDave Chinner 	},
383*b16817b6SDave Chinner 	{ /* NULL terminating block */
384*b16817b6SDave Chinner 		.daddr = XFS_BUF_DADDR_NULL,
385*b16817b6SDave Chinner 	}
386*b16817b6SDave Chinner 	};
387*b16817b6SDave Chinner 	struct  xfs_aghdr_grow_data *dp;
388*b16817b6SDave Chinner 	int			error = 0;
389*b16817b6SDave Chinner 
390*b16817b6SDave Chinner 	/* Account for AG free space in new AG */
391*b16817b6SDave Chinner 	id->nfree += id->agsize - mp->m_ag_prealloc_blocks;
392*b16817b6SDave Chinner 	for (dp = &aghdr_data[0]; dp->daddr != XFS_BUF_DADDR_NULL; dp++) {
393*b16817b6SDave Chinner 		if (!dp->need_init)
394*b16817b6SDave Chinner 			continue;
395*b16817b6SDave Chinner 
396*b16817b6SDave Chinner 		id->daddr = dp->daddr;
397*b16817b6SDave Chinner 		id->numblks = dp->numblks;
398*b16817b6SDave Chinner 		id->type = dp->type;
399*b16817b6SDave Chinner 		error = xfs_ag_init_hdr(mp, id, dp->work, dp->ops);
400*b16817b6SDave Chinner 		if (error)
401*b16817b6SDave Chinner 			break;
402*b16817b6SDave Chinner 	}
403*b16817b6SDave Chinner 	return error;
404*b16817b6SDave Chinner }
405