1b16817b6SDave Chinner /* SPDX-License-Identifier: GPL-2.0 */ 2b16817b6SDave Chinner /* 3b16817b6SDave Chinner * Copyright (c) 2000-2005 Silicon Graphics, Inc. 4b16817b6SDave Chinner * Copyright (c) 2018 Red Hat, Inc. 5b16817b6SDave Chinner * All rights reserved. 6b16817b6SDave Chinner */ 7b16817b6SDave Chinner 8b16817b6SDave Chinner #include "xfs.h" 9b16817b6SDave Chinner #include "xfs_fs.h" 10b16817b6SDave Chinner #include "xfs_shared.h" 11b16817b6SDave Chinner #include "xfs_format.h" 12b16817b6SDave Chinner #include "xfs_trans_resv.h" 13f327a007SDarrick J. Wong #include "xfs_bit.h" 14b16817b6SDave Chinner #include "xfs_sb.h" 15b16817b6SDave Chinner #include "xfs_mount.h" 16b16817b6SDave Chinner #include "xfs_btree.h" 17b16817b6SDave Chinner #include "xfs_alloc_btree.h" 18b16817b6SDave Chinner #include "xfs_rmap_btree.h" 19b16817b6SDave Chinner #include "xfs_alloc.h" 2049dd56f2SDave Chinner #include "xfs_ialloc.h" 21b16817b6SDave Chinner #include "xfs_rmap.h" 22b16817b6SDave Chinner #include "xfs_ag.h" 237cd5006bSDarrick J. Wong #include "xfs_ag_resv.h" 241302c6a2SDarrick J. Wong #include "xfs_health.h" 25b16817b6SDave Chinner 262842b6dbSDarrick J. Wong static int 27b16817b6SDave Chinner xfs_get_aghdr_buf( 28b16817b6SDave Chinner struct xfs_mount *mp, 29b16817b6SDave Chinner xfs_daddr_t blkno, 30b16817b6SDave Chinner size_t numblks, 312842b6dbSDarrick J. Wong struct xfs_buf **bpp, 32b16817b6SDave Chinner const struct xfs_buf_ops *ops) 33b16817b6SDave Chinner { 34b16817b6SDave Chinner struct xfs_buf *bp; 352842b6dbSDarrick J. Wong int error; 36b16817b6SDave Chinner 372842b6dbSDarrick J. Wong error = xfs_buf_get_uncached(mp->m_ddev_targp, numblks, 0, &bp); 382842b6dbSDarrick J. Wong if (error) 392842b6dbSDarrick J. Wong return error; 40b16817b6SDave Chinner 41b16817b6SDave Chinner xfs_buf_zero(bp, 0, BBTOB(bp->b_length)); 42b16817b6SDave Chinner bp->b_bn = blkno; 43b16817b6SDave Chinner bp->b_maps[0].bm_bn = blkno; 44b16817b6SDave Chinner bp->b_ops = ops; 45b16817b6SDave Chinner 462842b6dbSDarrick J. Wong *bpp = bp; 472842b6dbSDarrick J. Wong return 0; 48b16817b6SDave Chinner } 49b16817b6SDave Chinner 50f327a007SDarrick J. Wong static inline bool is_log_ag(struct xfs_mount *mp, struct aghdr_init_data *id) 51f327a007SDarrick J. Wong { 52f327a007SDarrick J. Wong return mp->m_sb.sb_logstart > 0 && 53f327a007SDarrick J. Wong id->agno == XFS_FSB_TO_AGNO(mp, mp->m_sb.sb_logstart); 54f327a007SDarrick J. Wong } 55f327a007SDarrick J. Wong 56b16817b6SDave Chinner /* 57b16817b6SDave Chinner * Generic btree root block init function 58b16817b6SDave Chinner */ 59b16817b6SDave Chinner static void 60b16817b6SDave Chinner xfs_btroot_init( 61b16817b6SDave Chinner struct xfs_mount *mp, 62b16817b6SDave Chinner struct xfs_buf *bp, 63b16817b6SDave Chinner struct aghdr_init_data *id) 64b16817b6SDave Chinner { 65f5b999c0SEric Sandeen xfs_btree_init_block(mp, bp, id->type, 0, 0, id->agno); 66b16817b6SDave Chinner } 67b16817b6SDave Chinner 688d90857cSDarrick J. Wong /* Finish initializing a free space btree. */ 698d90857cSDarrick J. Wong static void 708d90857cSDarrick J. Wong xfs_freesp_init_recs( 718d90857cSDarrick J. Wong struct xfs_mount *mp, 728d90857cSDarrick J. Wong struct xfs_buf *bp, 738d90857cSDarrick J. Wong struct aghdr_init_data *id) 748d90857cSDarrick J. Wong { 758d90857cSDarrick J. Wong struct xfs_alloc_rec *arec; 76f327a007SDarrick J. Wong struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp); 778d90857cSDarrick J. Wong 788d90857cSDarrick J. Wong arec = XFS_ALLOC_REC_ADDR(mp, XFS_BUF_TO_BLOCK(bp), 1); 798d90857cSDarrick J. Wong arec->ar_startblock = cpu_to_be32(mp->m_ag_prealloc_blocks); 80f327a007SDarrick J. Wong 81f327a007SDarrick J. Wong if (is_log_ag(mp, id)) { 82f327a007SDarrick J. Wong struct xfs_alloc_rec *nrec; 83f327a007SDarrick J. Wong xfs_agblock_t start = XFS_FSB_TO_AGBNO(mp, 84f327a007SDarrick J. Wong mp->m_sb.sb_logstart); 85f327a007SDarrick J. Wong 86f327a007SDarrick J. Wong ASSERT(start >= mp->m_ag_prealloc_blocks); 87f327a007SDarrick J. Wong if (start != mp->m_ag_prealloc_blocks) { 88f327a007SDarrick J. Wong /* 89f327a007SDarrick J. Wong * Modify first record to pad stripe align of log 90f327a007SDarrick J. Wong */ 91f327a007SDarrick J. Wong arec->ar_blockcount = cpu_to_be32(start - 92f327a007SDarrick J. Wong mp->m_ag_prealloc_blocks); 93f327a007SDarrick J. Wong nrec = arec + 1; 94f327a007SDarrick J. Wong 95f327a007SDarrick J. Wong /* 96f327a007SDarrick J. Wong * Insert second record at start of internal log 97f327a007SDarrick J. Wong * which then gets trimmed. 98f327a007SDarrick J. Wong */ 99f327a007SDarrick J. Wong nrec->ar_startblock = cpu_to_be32( 100f327a007SDarrick J. Wong be32_to_cpu(arec->ar_startblock) + 101f327a007SDarrick J. Wong be32_to_cpu(arec->ar_blockcount)); 102f327a007SDarrick J. Wong arec = nrec; 103f327a007SDarrick J. Wong be16_add_cpu(&block->bb_numrecs, 1); 104f327a007SDarrick J. Wong } 105f327a007SDarrick J. Wong /* 106f327a007SDarrick J. Wong * Change record start to after the internal log 107f327a007SDarrick J. Wong */ 108f327a007SDarrick J. Wong be32_add_cpu(&arec->ar_startblock, mp->m_sb.sb_logblocks); 109f327a007SDarrick J. Wong } 110f327a007SDarrick J. Wong 111f327a007SDarrick J. Wong /* 112f327a007SDarrick J. Wong * Calculate the record block count and check for the case where 113f327a007SDarrick J. Wong * the log might have consumed all available space in the AG. If 114f327a007SDarrick J. Wong * so, reset the record count to 0 to avoid exposure of an invalid 115f327a007SDarrick J. Wong * record start block. 116f327a007SDarrick J. Wong */ 1178d90857cSDarrick J. Wong arec->ar_blockcount = cpu_to_be32(id->agsize - 1188d90857cSDarrick J. Wong be32_to_cpu(arec->ar_startblock)); 119f327a007SDarrick J. Wong if (!arec->ar_blockcount) 120f327a007SDarrick J. Wong block->bb_numrecs = 0; 1218d90857cSDarrick J. Wong } 1228d90857cSDarrick J. Wong 123b16817b6SDave Chinner /* 124b16817b6SDave Chinner * Alloc btree root block init functions 125b16817b6SDave Chinner */ 126b16817b6SDave Chinner static void 127b16817b6SDave Chinner xfs_bnoroot_init( 128b16817b6SDave Chinner struct xfs_mount *mp, 129b16817b6SDave Chinner struct xfs_buf *bp, 130b16817b6SDave Chinner struct aghdr_init_data *id) 131b16817b6SDave Chinner { 132f5b999c0SEric Sandeen xfs_btree_init_block(mp, bp, XFS_BTNUM_BNO, 0, 1, id->agno); 1338d90857cSDarrick J. Wong xfs_freesp_init_recs(mp, bp, id); 134b16817b6SDave Chinner } 135b16817b6SDave Chinner 136b16817b6SDave Chinner static void 137b16817b6SDave Chinner xfs_cntroot_init( 138b16817b6SDave Chinner struct xfs_mount *mp, 139b16817b6SDave Chinner struct xfs_buf *bp, 140b16817b6SDave Chinner struct aghdr_init_data *id) 141b16817b6SDave Chinner { 142f5b999c0SEric Sandeen xfs_btree_init_block(mp, bp, XFS_BTNUM_CNT, 0, 1, id->agno); 1438d90857cSDarrick J. Wong xfs_freesp_init_recs(mp, bp, id); 144b16817b6SDave Chinner } 145b16817b6SDave Chinner 146b16817b6SDave Chinner /* 147b16817b6SDave Chinner * Reverse map root block init 148b16817b6SDave Chinner */ 149b16817b6SDave Chinner static void 150b16817b6SDave Chinner xfs_rmaproot_init( 151b16817b6SDave Chinner struct xfs_mount *mp, 152b16817b6SDave Chinner struct xfs_buf *bp, 153b16817b6SDave Chinner struct aghdr_init_data *id) 154b16817b6SDave Chinner { 155b16817b6SDave Chinner struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp); 156b16817b6SDave Chinner struct xfs_rmap_rec *rrec; 157b16817b6SDave Chinner 158f5b999c0SEric Sandeen xfs_btree_init_block(mp, bp, XFS_BTNUM_RMAP, 0, 4, id->agno); 159b16817b6SDave Chinner 160b16817b6SDave Chinner /* 161b16817b6SDave Chinner * mark the AG header regions as static metadata The BNO 162b16817b6SDave Chinner * btree block is the first block after the headers, so 163b16817b6SDave Chinner * it's location defines the size of region the static 164b16817b6SDave Chinner * metadata consumes. 165b16817b6SDave Chinner * 166b16817b6SDave Chinner * Note: unlike mkfs, we never have to account for log 167b16817b6SDave Chinner * space when growing the data regions 168b16817b6SDave Chinner */ 169b16817b6SDave Chinner rrec = XFS_RMAP_REC_ADDR(block, 1); 170b16817b6SDave Chinner rrec->rm_startblock = 0; 171b16817b6SDave Chinner rrec->rm_blockcount = cpu_to_be32(XFS_BNO_BLOCK(mp)); 172b16817b6SDave Chinner rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_FS); 173b16817b6SDave Chinner rrec->rm_offset = 0; 174b16817b6SDave Chinner 175b16817b6SDave Chinner /* account freespace btree root blocks */ 176b16817b6SDave Chinner rrec = XFS_RMAP_REC_ADDR(block, 2); 177b16817b6SDave Chinner rrec->rm_startblock = cpu_to_be32(XFS_BNO_BLOCK(mp)); 178b16817b6SDave Chinner rrec->rm_blockcount = cpu_to_be32(2); 179b16817b6SDave Chinner rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_AG); 180b16817b6SDave Chinner rrec->rm_offset = 0; 181b16817b6SDave Chinner 182b16817b6SDave Chinner /* account inode btree root blocks */ 183b16817b6SDave Chinner rrec = XFS_RMAP_REC_ADDR(block, 3); 184b16817b6SDave Chinner rrec->rm_startblock = cpu_to_be32(XFS_IBT_BLOCK(mp)); 185b16817b6SDave Chinner rrec->rm_blockcount = cpu_to_be32(XFS_RMAP_BLOCK(mp) - 186b16817b6SDave Chinner XFS_IBT_BLOCK(mp)); 187b16817b6SDave Chinner rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_INOBT); 188b16817b6SDave Chinner rrec->rm_offset = 0; 189b16817b6SDave Chinner 190b16817b6SDave Chinner /* account for rmap btree root */ 191b16817b6SDave Chinner rrec = XFS_RMAP_REC_ADDR(block, 4); 192b16817b6SDave Chinner rrec->rm_startblock = cpu_to_be32(XFS_RMAP_BLOCK(mp)); 193b16817b6SDave Chinner rrec->rm_blockcount = cpu_to_be32(1); 194b16817b6SDave Chinner rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_AG); 195b16817b6SDave Chinner rrec->rm_offset = 0; 196b16817b6SDave Chinner 197b16817b6SDave Chinner /* account for refc btree root */ 198b16817b6SDave Chinner if (xfs_sb_version_hasreflink(&mp->m_sb)) { 199b16817b6SDave Chinner rrec = XFS_RMAP_REC_ADDR(block, 5); 200b16817b6SDave Chinner rrec->rm_startblock = cpu_to_be32(xfs_refc_block(mp)); 201b16817b6SDave Chinner rrec->rm_blockcount = cpu_to_be32(1); 202b16817b6SDave Chinner rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_REFC); 203b16817b6SDave Chinner rrec->rm_offset = 0; 204b16817b6SDave Chinner be16_add_cpu(&block->bb_numrecs, 1); 205b16817b6SDave Chinner } 206f327a007SDarrick J. Wong 207f327a007SDarrick J. Wong /* account for the log space */ 208f327a007SDarrick J. Wong if (is_log_ag(mp, id)) { 209f327a007SDarrick J. Wong rrec = XFS_RMAP_REC_ADDR(block, 210f327a007SDarrick J. Wong be16_to_cpu(block->bb_numrecs) + 1); 211f327a007SDarrick J. Wong rrec->rm_startblock = cpu_to_be32( 212f327a007SDarrick J. Wong XFS_FSB_TO_AGBNO(mp, mp->m_sb.sb_logstart)); 213f327a007SDarrick J. Wong rrec->rm_blockcount = cpu_to_be32(mp->m_sb.sb_logblocks); 214f327a007SDarrick J. Wong rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_LOG); 215f327a007SDarrick J. Wong rrec->rm_offset = 0; 216f327a007SDarrick J. Wong be16_add_cpu(&block->bb_numrecs, 1); 217f327a007SDarrick J. Wong } 218b16817b6SDave Chinner } 219b16817b6SDave Chinner 220b16817b6SDave Chinner /* 221b16817b6SDave Chinner * Initialise new secondary superblocks with the pre-grow geometry, but mark 222b16817b6SDave Chinner * them as "in progress" so we know they haven't yet been activated. This will 223b16817b6SDave Chinner * get cleared when the update with the new geometry information is done after 224b16817b6SDave Chinner * changes to the primary are committed. This isn't strictly necessary, but we 225b16817b6SDave Chinner * get it for free with the delayed buffer write lists and it means we can tell 226b16817b6SDave Chinner * if a grow operation didn't complete properly after the fact. 227b16817b6SDave Chinner */ 228b16817b6SDave Chinner static void 229b16817b6SDave Chinner xfs_sbblock_init( 230b16817b6SDave Chinner struct xfs_mount *mp, 231b16817b6SDave Chinner struct xfs_buf *bp, 232b16817b6SDave Chinner struct aghdr_init_data *id) 233b16817b6SDave Chinner { 234*3e6e8afdSChristoph Hellwig struct xfs_dsb *dsb = bp->b_addr; 235b16817b6SDave Chinner 236b16817b6SDave Chinner xfs_sb_to_disk(dsb, &mp->m_sb); 237b16817b6SDave Chinner dsb->sb_inprogress = 1; 238b16817b6SDave Chinner } 239b16817b6SDave Chinner 240b16817b6SDave Chinner static void 241b16817b6SDave Chinner xfs_agfblock_init( 242b16817b6SDave Chinner struct xfs_mount *mp, 243b16817b6SDave Chinner struct xfs_buf *bp, 244b16817b6SDave Chinner struct aghdr_init_data *id) 245b16817b6SDave Chinner { 2469798f615SChristoph Hellwig struct xfs_agf *agf = bp->b_addr; 247b16817b6SDave Chinner xfs_extlen_t tmpsize; 248b16817b6SDave Chinner 249b16817b6SDave Chinner agf->agf_magicnum = cpu_to_be32(XFS_AGF_MAGIC); 250b16817b6SDave Chinner agf->agf_versionnum = cpu_to_be32(XFS_AGF_VERSION); 251b16817b6SDave Chinner agf->agf_seqno = cpu_to_be32(id->agno); 252b16817b6SDave Chinner agf->agf_length = cpu_to_be32(id->agsize); 253b16817b6SDave Chinner agf->agf_roots[XFS_BTNUM_BNOi] = cpu_to_be32(XFS_BNO_BLOCK(mp)); 254b16817b6SDave Chinner agf->agf_roots[XFS_BTNUM_CNTi] = cpu_to_be32(XFS_CNT_BLOCK(mp)); 255b16817b6SDave Chinner agf->agf_levels[XFS_BTNUM_BNOi] = cpu_to_be32(1); 256b16817b6SDave Chinner agf->agf_levels[XFS_BTNUM_CNTi] = cpu_to_be32(1); 257b16817b6SDave Chinner if (xfs_sb_version_hasrmapbt(&mp->m_sb)) { 258b16817b6SDave Chinner agf->agf_roots[XFS_BTNUM_RMAPi] = 259b16817b6SDave Chinner cpu_to_be32(XFS_RMAP_BLOCK(mp)); 260b16817b6SDave Chinner agf->agf_levels[XFS_BTNUM_RMAPi] = cpu_to_be32(1); 261b16817b6SDave Chinner agf->agf_rmap_blocks = cpu_to_be32(1); 262b16817b6SDave Chinner } 263b16817b6SDave Chinner 264b16817b6SDave Chinner agf->agf_flfirst = cpu_to_be32(1); 265b16817b6SDave Chinner agf->agf_fllast = 0; 266b16817b6SDave Chinner agf->agf_flcount = 0; 267b16817b6SDave Chinner tmpsize = id->agsize - mp->m_ag_prealloc_blocks; 268b16817b6SDave Chinner agf->agf_freeblks = cpu_to_be32(tmpsize); 269b16817b6SDave Chinner agf->agf_longest = cpu_to_be32(tmpsize); 270b16817b6SDave Chinner if (xfs_sb_version_hascrc(&mp->m_sb)) 271b16817b6SDave Chinner uuid_copy(&agf->agf_uuid, &mp->m_sb.sb_meta_uuid); 272b16817b6SDave Chinner if (xfs_sb_version_hasreflink(&mp->m_sb)) { 273b16817b6SDave Chinner agf->agf_refcount_root = cpu_to_be32( 274b16817b6SDave Chinner xfs_refc_block(mp)); 275b16817b6SDave Chinner agf->agf_refcount_level = cpu_to_be32(1); 276b16817b6SDave Chinner agf->agf_refcount_blocks = cpu_to_be32(1); 277b16817b6SDave Chinner } 278f327a007SDarrick J. Wong 279f327a007SDarrick J. Wong if (is_log_ag(mp, id)) { 280f327a007SDarrick J. Wong int64_t logblocks = mp->m_sb.sb_logblocks; 281f327a007SDarrick J. Wong 282f327a007SDarrick J. Wong be32_add_cpu(&agf->agf_freeblks, -logblocks); 283f327a007SDarrick J. Wong agf->agf_longest = cpu_to_be32(id->agsize - 284f327a007SDarrick J. Wong XFS_FSB_TO_AGBNO(mp, mp->m_sb.sb_logstart) - logblocks); 285f327a007SDarrick J. Wong } 286b16817b6SDave Chinner } 287b16817b6SDave Chinner 288b16817b6SDave Chinner static void 289b16817b6SDave Chinner xfs_agflblock_init( 290b16817b6SDave Chinner struct xfs_mount *mp, 291b16817b6SDave Chinner struct xfs_buf *bp, 292b16817b6SDave Chinner struct aghdr_init_data *id) 293b16817b6SDave Chinner { 294b16817b6SDave Chinner struct xfs_agfl *agfl = XFS_BUF_TO_AGFL(bp); 295b16817b6SDave Chinner __be32 *agfl_bno; 296b16817b6SDave Chinner int bucket; 297b16817b6SDave Chinner 298b16817b6SDave Chinner if (xfs_sb_version_hascrc(&mp->m_sb)) { 299b16817b6SDave Chinner agfl->agfl_magicnum = cpu_to_be32(XFS_AGFL_MAGIC); 300b16817b6SDave Chinner agfl->agfl_seqno = cpu_to_be32(id->agno); 301b16817b6SDave Chinner uuid_copy(&agfl->agfl_uuid, &mp->m_sb.sb_meta_uuid); 302b16817b6SDave Chinner } 303b16817b6SDave Chinner 304183606d8SChristoph Hellwig agfl_bno = xfs_buf_to_agfl_bno(bp); 305b16817b6SDave Chinner for (bucket = 0; bucket < xfs_agfl_size(mp); bucket++) 306b16817b6SDave Chinner agfl_bno[bucket] = cpu_to_be32(NULLAGBLOCK); 307b16817b6SDave Chinner } 308b16817b6SDave Chinner 309b16817b6SDave Chinner static void 310b16817b6SDave Chinner xfs_agiblock_init( 311b16817b6SDave Chinner struct xfs_mount *mp, 312b16817b6SDave Chinner struct xfs_buf *bp, 313b16817b6SDave Chinner struct aghdr_init_data *id) 314b16817b6SDave Chinner { 315370c782bSChristoph Hellwig struct xfs_agi *agi = bp->b_addr; 316b16817b6SDave Chinner int bucket; 317b16817b6SDave Chinner 318b16817b6SDave Chinner agi->agi_magicnum = cpu_to_be32(XFS_AGI_MAGIC); 319b16817b6SDave Chinner agi->agi_versionnum = cpu_to_be32(XFS_AGI_VERSION); 320b16817b6SDave Chinner agi->agi_seqno = cpu_to_be32(id->agno); 321b16817b6SDave Chinner agi->agi_length = cpu_to_be32(id->agsize); 322b16817b6SDave Chinner agi->agi_count = 0; 323b16817b6SDave Chinner agi->agi_root = cpu_to_be32(XFS_IBT_BLOCK(mp)); 324b16817b6SDave Chinner agi->agi_level = cpu_to_be32(1); 325b16817b6SDave Chinner agi->agi_freecount = 0; 326b16817b6SDave Chinner agi->agi_newino = cpu_to_be32(NULLAGINO); 327b16817b6SDave Chinner agi->agi_dirino = cpu_to_be32(NULLAGINO); 328b16817b6SDave Chinner if (xfs_sb_version_hascrc(&mp->m_sb)) 329b16817b6SDave Chinner uuid_copy(&agi->agi_uuid, &mp->m_sb.sb_meta_uuid); 330b16817b6SDave Chinner if (xfs_sb_version_hasfinobt(&mp->m_sb)) { 331b16817b6SDave Chinner agi->agi_free_root = cpu_to_be32(XFS_FIBT_BLOCK(mp)); 332b16817b6SDave Chinner agi->agi_free_level = cpu_to_be32(1); 333b16817b6SDave Chinner } 334b16817b6SDave Chinner for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) 335b16817b6SDave Chinner agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO); 336b16817b6SDave Chinner } 337b16817b6SDave Chinner 338b16817b6SDave Chinner typedef void (*aghdr_init_work_f)(struct xfs_mount *mp, struct xfs_buf *bp, 339b16817b6SDave Chinner struct aghdr_init_data *id); 340b16817b6SDave Chinner static int 341b16817b6SDave Chinner xfs_ag_init_hdr( 342b16817b6SDave Chinner struct xfs_mount *mp, 343b16817b6SDave Chinner struct aghdr_init_data *id, 344b16817b6SDave Chinner aghdr_init_work_f work, 345b16817b6SDave Chinner const struct xfs_buf_ops *ops) 346b16817b6SDave Chinner { 347b16817b6SDave Chinner struct xfs_buf *bp; 3482842b6dbSDarrick J. Wong int error; 349b16817b6SDave Chinner 3502842b6dbSDarrick J. Wong error = xfs_get_aghdr_buf(mp, id->daddr, id->numblks, &bp, ops); 3512842b6dbSDarrick J. Wong if (error) 3522842b6dbSDarrick J. Wong return error; 353b16817b6SDave Chinner 354b16817b6SDave Chinner (*work)(mp, bp, id); 355b16817b6SDave Chinner 356b16817b6SDave Chinner xfs_buf_delwri_queue(bp, &id->buffer_list); 357b16817b6SDave Chinner xfs_buf_relse(bp); 358b16817b6SDave Chinner return 0; 359b16817b6SDave Chinner } 360b16817b6SDave Chinner 361b16817b6SDave Chinner struct xfs_aghdr_grow_data { 362b16817b6SDave Chinner xfs_daddr_t daddr; 363b16817b6SDave Chinner size_t numblks; 364b16817b6SDave Chinner const struct xfs_buf_ops *ops; 365b16817b6SDave Chinner aghdr_init_work_f work; 366b16817b6SDave Chinner xfs_btnum_t type; 367b16817b6SDave Chinner bool need_init; 368b16817b6SDave Chinner }; 369b16817b6SDave Chinner 370b16817b6SDave Chinner /* 371b16817b6SDave Chinner * Prepare new AG headers to be written to disk. We use uncached buffers here, 372b16817b6SDave Chinner * as it is assumed these new AG headers are currently beyond the currently 373b16817b6SDave Chinner * valid filesystem address space. Using cached buffers would trip over EOFS 374b16817b6SDave Chinner * corruption detection alogrithms in the buffer cache lookup routines. 375b16817b6SDave Chinner * 376b16817b6SDave Chinner * This is a non-transactional function, but the prepared buffers are added to a 377b16817b6SDave Chinner * delayed write buffer list supplied by the caller so they can submit them to 378b16817b6SDave Chinner * disk and wait on them as required. 379b16817b6SDave Chinner */ 380b16817b6SDave Chinner int 381b16817b6SDave Chinner xfs_ag_init_headers( 382b16817b6SDave Chinner struct xfs_mount *mp, 383b16817b6SDave Chinner struct aghdr_init_data *id) 384b16817b6SDave Chinner 385b16817b6SDave Chinner { 386b16817b6SDave Chinner struct xfs_aghdr_grow_data aghdr_data[] = { 387b16817b6SDave Chinner { /* SB */ 388b16817b6SDave Chinner .daddr = XFS_AG_DADDR(mp, id->agno, XFS_SB_DADDR), 389b16817b6SDave Chinner .numblks = XFS_FSS_TO_BB(mp, 1), 390b16817b6SDave Chinner .ops = &xfs_sb_buf_ops, 391b16817b6SDave Chinner .work = &xfs_sbblock_init, 392b16817b6SDave Chinner .need_init = true 393b16817b6SDave Chinner }, 394b16817b6SDave Chinner { /* AGF */ 395b16817b6SDave Chinner .daddr = XFS_AG_DADDR(mp, id->agno, XFS_AGF_DADDR(mp)), 396b16817b6SDave Chinner .numblks = XFS_FSS_TO_BB(mp, 1), 397b16817b6SDave Chinner .ops = &xfs_agf_buf_ops, 398b16817b6SDave Chinner .work = &xfs_agfblock_init, 399b16817b6SDave Chinner .need_init = true 400b16817b6SDave Chinner }, 401b16817b6SDave Chinner { /* AGFL */ 402b16817b6SDave Chinner .daddr = XFS_AG_DADDR(mp, id->agno, XFS_AGFL_DADDR(mp)), 403b16817b6SDave Chinner .numblks = XFS_FSS_TO_BB(mp, 1), 404b16817b6SDave Chinner .ops = &xfs_agfl_buf_ops, 405b16817b6SDave Chinner .work = &xfs_agflblock_init, 406b16817b6SDave Chinner .need_init = true 407b16817b6SDave Chinner }, 408b16817b6SDave Chinner { /* AGI */ 409b16817b6SDave Chinner .daddr = XFS_AG_DADDR(mp, id->agno, XFS_AGI_DADDR(mp)), 410b16817b6SDave Chinner .numblks = XFS_FSS_TO_BB(mp, 1), 411b16817b6SDave Chinner .ops = &xfs_agi_buf_ops, 412b16817b6SDave Chinner .work = &xfs_agiblock_init, 413b16817b6SDave Chinner .need_init = true 414b16817b6SDave Chinner }, 415b16817b6SDave Chinner { /* BNO root block */ 416b16817b6SDave Chinner .daddr = XFS_AGB_TO_DADDR(mp, id->agno, XFS_BNO_BLOCK(mp)), 417b16817b6SDave Chinner .numblks = BTOBB(mp->m_sb.sb_blocksize), 41827df4f50SBrian Foster .ops = &xfs_bnobt_buf_ops, 419b16817b6SDave Chinner .work = &xfs_bnoroot_init, 420b16817b6SDave Chinner .need_init = true 421b16817b6SDave Chinner }, 422b16817b6SDave Chinner { /* CNT root block */ 423b16817b6SDave Chinner .daddr = XFS_AGB_TO_DADDR(mp, id->agno, XFS_CNT_BLOCK(mp)), 424b16817b6SDave Chinner .numblks = BTOBB(mp->m_sb.sb_blocksize), 42527df4f50SBrian Foster .ops = &xfs_cntbt_buf_ops, 426b16817b6SDave Chinner .work = &xfs_cntroot_init, 427b16817b6SDave Chinner .need_init = true 428b16817b6SDave Chinner }, 429b16817b6SDave Chinner { /* INO root block */ 430b16817b6SDave Chinner .daddr = XFS_AGB_TO_DADDR(mp, id->agno, XFS_IBT_BLOCK(mp)), 431b16817b6SDave Chinner .numblks = BTOBB(mp->m_sb.sb_blocksize), 432b16817b6SDave Chinner .ops = &xfs_inobt_buf_ops, 433b16817b6SDave Chinner .work = &xfs_btroot_init, 434b16817b6SDave Chinner .type = XFS_BTNUM_INO, 435b16817b6SDave Chinner .need_init = true 436b16817b6SDave Chinner }, 437b16817b6SDave Chinner { /* FINO root block */ 438b16817b6SDave Chinner .daddr = XFS_AGB_TO_DADDR(mp, id->agno, XFS_FIBT_BLOCK(mp)), 439b16817b6SDave Chinner .numblks = BTOBB(mp->m_sb.sb_blocksize), 44001e68f40SBrian Foster .ops = &xfs_finobt_buf_ops, 441b16817b6SDave Chinner .work = &xfs_btroot_init, 442b16817b6SDave Chinner .type = XFS_BTNUM_FINO, 443b16817b6SDave Chinner .need_init = xfs_sb_version_hasfinobt(&mp->m_sb) 444b16817b6SDave Chinner }, 445b16817b6SDave Chinner { /* RMAP root block */ 446b16817b6SDave Chinner .daddr = XFS_AGB_TO_DADDR(mp, id->agno, XFS_RMAP_BLOCK(mp)), 447b16817b6SDave Chinner .numblks = BTOBB(mp->m_sb.sb_blocksize), 448b16817b6SDave Chinner .ops = &xfs_rmapbt_buf_ops, 449b16817b6SDave Chinner .work = &xfs_rmaproot_init, 450b16817b6SDave Chinner .need_init = xfs_sb_version_hasrmapbt(&mp->m_sb) 451b16817b6SDave Chinner }, 452b16817b6SDave Chinner { /* REFC root block */ 453b16817b6SDave Chinner .daddr = XFS_AGB_TO_DADDR(mp, id->agno, xfs_refc_block(mp)), 454b16817b6SDave Chinner .numblks = BTOBB(mp->m_sb.sb_blocksize), 455b16817b6SDave Chinner .ops = &xfs_refcountbt_buf_ops, 456b16817b6SDave Chinner .work = &xfs_btroot_init, 457b16817b6SDave Chinner .type = XFS_BTNUM_REFC, 458b16817b6SDave Chinner .need_init = xfs_sb_version_hasreflink(&mp->m_sb) 459b16817b6SDave Chinner }, 460b16817b6SDave Chinner { /* NULL terminating block */ 461b16817b6SDave Chinner .daddr = XFS_BUF_DADDR_NULL, 462b16817b6SDave Chinner } 463b16817b6SDave Chinner }; 464b16817b6SDave Chinner struct xfs_aghdr_grow_data *dp; 465b16817b6SDave Chinner int error = 0; 466b16817b6SDave Chinner 467b16817b6SDave Chinner /* Account for AG free space in new AG */ 468b16817b6SDave Chinner id->nfree += id->agsize - mp->m_ag_prealloc_blocks; 469b16817b6SDave Chinner for (dp = &aghdr_data[0]; dp->daddr != XFS_BUF_DADDR_NULL; dp++) { 470b16817b6SDave Chinner if (!dp->need_init) 471b16817b6SDave Chinner continue; 472b16817b6SDave Chinner 473b16817b6SDave Chinner id->daddr = dp->daddr; 474b16817b6SDave Chinner id->numblks = dp->numblks; 475b16817b6SDave Chinner id->type = dp->type; 476b16817b6SDave Chinner error = xfs_ag_init_hdr(mp, id, dp->work, dp->ops); 477b16817b6SDave Chinner if (error) 478b16817b6SDave Chinner break; 479b16817b6SDave Chinner } 480b16817b6SDave Chinner return error; 481b16817b6SDave Chinner } 48249dd56f2SDave Chinner 48349dd56f2SDave Chinner /* 48449dd56f2SDave Chinner * Extent the AG indicated by the @id by the length passed in 48549dd56f2SDave Chinner */ 48649dd56f2SDave Chinner int 48749dd56f2SDave Chinner xfs_ag_extend_space( 48849dd56f2SDave Chinner struct xfs_mount *mp, 48949dd56f2SDave Chinner struct xfs_trans *tp, 49049dd56f2SDave Chinner struct aghdr_init_data *id, 49149dd56f2SDave Chinner xfs_extlen_t len) 49249dd56f2SDave Chinner { 49349dd56f2SDave Chinner struct xfs_buf *bp; 49449dd56f2SDave Chinner struct xfs_agi *agi; 49549dd56f2SDave Chinner struct xfs_agf *agf; 49649dd56f2SDave Chinner int error; 49749dd56f2SDave Chinner 49849dd56f2SDave Chinner /* 49949dd56f2SDave Chinner * Change the agi length. 50049dd56f2SDave Chinner */ 50149dd56f2SDave Chinner error = xfs_ialloc_read_agi(mp, tp, id->agno, &bp); 50249dd56f2SDave Chinner if (error) 50349dd56f2SDave Chinner return error; 50449dd56f2SDave Chinner 505370c782bSChristoph Hellwig agi = bp->b_addr; 50649dd56f2SDave Chinner be32_add_cpu(&agi->agi_length, len); 50749dd56f2SDave Chinner ASSERT(id->agno == mp->m_sb.sb_agcount - 1 || 50849dd56f2SDave Chinner be32_to_cpu(agi->agi_length) == mp->m_sb.sb_agblocks); 50949dd56f2SDave Chinner xfs_ialloc_log_agi(tp, bp, XFS_AGI_LENGTH); 51049dd56f2SDave Chinner 51149dd56f2SDave Chinner /* 51249dd56f2SDave Chinner * Change agf length. 51349dd56f2SDave Chinner */ 51449dd56f2SDave Chinner error = xfs_alloc_read_agf(mp, tp, id->agno, 0, &bp); 51549dd56f2SDave Chinner if (error) 51649dd56f2SDave Chinner return error; 51749dd56f2SDave Chinner 5189798f615SChristoph Hellwig agf = bp->b_addr; 51949dd56f2SDave Chinner be32_add_cpu(&agf->agf_length, len); 52049dd56f2SDave Chinner ASSERT(agf->agf_length == agi->agi_length); 52149dd56f2SDave Chinner xfs_alloc_log_agf(tp, bp, XFS_AGF_LENGTH); 52249dd56f2SDave Chinner 52349dd56f2SDave Chinner /* 52449dd56f2SDave Chinner * Free the new space. 52549dd56f2SDave Chinner * 5267280fedaSDarrick J. Wong * XFS_RMAP_OINFO_SKIP_UPDATE is used here to tell the rmap btree that 52749dd56f2SDave Chinner * this doesn't actually exist in the rmap btree. 52849dd56f2SDave Chinner */ 52949dd56f2SDave Chinner error = xfs_rmap_free(tp, bp, id->agno, 53049dd56f2SDave Chinner be32_to_cpu(agf->agf_length) - len, 5317280fedaSDarrick J. Wong len, &XFS_RMAP_OINFO_SKIP_UPDATE); 53249dd56f2SDave Chinner if (error) 53349dd56f2SDave Chinner return error; 53449dd56f2SDave Chinner 53549dd56f2SDave Chinner return xfs_free_extent(tp, XFS_AGB_TO_FSB(mp, id->agno, 53649dd56f2SDave Chinner be32_to_cpu(agf->agf_length) - len), 5377280fedaSDarrick J. Wong len, &XFS_RMAP_OINFO_SKIP_UPDATE, 5387280fedaSDarrick J. Wong XFS_AG_RESV_NONE); 53949dd56f2SDave Chinner } 5407cd5006bSDarrick J. Wong 5417cd5006bSDarrick J. Wong /* Retrieve AG geometry. */ 5427cd5006bSDarrick J. Wong int 5437cd5006bSDarrick J. Wong xfs_ag_get_geometry( 5447cd5006bSDarrick J. Wong struct xfs_mount *mp, 5457cd5006bSDarrick J. Wong xfs_agnumber_t agno, 5467cd5006bSDarrick J. Wong struct xfs_ag_geometry *ageo) 5477cd5006bSDarrick J. Wong { 5487cd5006bSDarrick J. Wong struct xfs_buf *agi_bp; 5497cd5006bSDarrick J. Wong struct xfs_buf *agf_bp; 5507cd5006bSDarrick J. Wong struct xfs_agi *agi; 5517cd5006bSDarrick J. Wong struct xfs_agf *agf; 5527cd5006bSDarrick J. Wong struct xfs_perag *pag; 5537cd5006bSDarrick J. Wong unsigned int freeblks; 5547cd5006bSDarrick J. Wong int error; 5557cd5006bSDarrick J. Wong 5567cd5006bSDarrick J. Wong if (agno >= mp->m_sb.sb_agcount) 5577cd5006bSDarrick J. Wong return -EINVAL; 5587cd5006bSDarrick J. Wong 5597cd5006bSDarrick J. Wong /* Lock the AG headers. */ 5607cd5006bSDarrick J. Wong error = xfs_ialloc_read_agi(mp, NULL, agno, &agi_bp); 5617cd5006bSDarrick J. Wong if (error) 5627cd5006bSDarrick J. Wong return error; 5637cd5006bSDarrick J. Wong error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agf_bp); 5647cd5006bSDarrick J. Wong if (error) 5657cd5006bSDarrick J. Wong goto out_agi; 5667cd5006bSDarrick J. Wong pag = xfs_perag_get(mp, agno); 5677cd5006bSDarrick J. Wong 5687cd5006bSDarrick J. Wong /* Fill out form. */ 5697cd5006bSDarrick J. Wong memset(ageo, 0, sizeof(*ageo)); 5707cd5006bSDarrick J. Wong ageo->ag_number = agno; 5717cd5006bSDarrick J. Wong 572370c782bSChristoph Hellwig agi = agi_bp->b_addr; 5737cd5006bSDarrick J. Wong ageo->ag_icount = be32_to_cpu(agi->agi_count); 5747cd5006bSDarrick J. Wong ageo->ag_ifree = be32_to_cpu(agi->agi_freecount); 5757cd5006bSDarrick J. Wong 5769798f615SChristoph Hellwig agf = agf_bp->b_addr; 5777cd5006bSDarrick J. Wong ageo->ag_length = be32_to_cpu(agf->agf_length); 5787cd5006bSDarrick J. Wong freeblks = pag->pagf_freeblks + 5797cd5006bSDarrick J. Wong pag->pagf_flcount + 5807cd5006bSDarrick J. Wong pag->pagf_btreeblks - 5817cd5006bSDarrick J. Wong xfs_ag_resv_needed(pag, XFS_AG_RESV_NONE); 5827cd5006bSDarrick J. Wong ageo->ag_freeblks = freeblks; 5831302c6a2SDarrick J. Wong xfs_ag_geom_health(pag, ageo); 5847cd5006bSDarrick J. Wong 5857cd5006bSDarrick J. Wong /* Release resources. */ 5867cd5006bSDarrick J. Wong xfs_perag_put(pag); 5877cd5006bSDarrick J. Wong xfs_buf_relse(agf_bp); 5887cd5006bSDarrick J. Wong out_agi: 5897cd5006bSDarrick J. Wong xfs_buf_relse(agi_bp); 5907cd5006bSDarrick J. Wong return error; 5917cd5006bSDarrick J. Wong } 592