1b16817b6SDave Chinner /* SPDX-License-Identifier: GPL-2.0 */ 2b16817b6SDave Chinner /* 3b16817b6SDave Chinner * Copyright (c) 2000-2005 Silicon Graphics, Inc. 4b16817b6SDave Chinner * Copyright (c) 2018 Red Hat, Inc. 5b16817b6SDave Chinner * All rights reserved. 6b16817b6SDave Chinner */ 7b16817b6SDave Chinner 8b16817b6SDave Chinner #include "xfs.h" 9b16817b6SDave Chinner #include "xfs_fs.h" 10b16817b6SDave Chinner #include "xfs_shared.h" 11b16817b6SDave Chinner #include "xfs_format.h" 12b16817b6SDave Chinner #include "xfs_trans_resv.h" 13f327a007SDarrick J. Wong #include "xfs_bit.h" 14b16817b6SDave Chinner #include "xfs_sb.h" 15b16817b6SDave Chinner #include "xfs_mount.h" 16b16817b6SDave Chinner #include "xfs_btree.h" 17b16817b6SDave Chinner #include "xfs_alloc_btree.h" 18b16817b6SDave Chinner #include "xfs_rmap_btree.h" 19b16817b6SDave Chinner #include "xfs_alloc.h" 2049dd56f2SDave Chinner #include "xfs_ialloc.h" 21b16817b6SDave Chinner #include "xfs_rmap.h" 22b16817b6SDave Chinner #include "xfs_ag.h" 237cd5006bSDarrick J. Wong #include "xfs_ag_resv.h" 241302c6a2SDarrick J. Wong #include "xfs_health.h" 2546141dc8SGao Xiang #include "xfs_error.h" 2646141dc8SGao Xiang #include "xfs_bmap.h" 2746141dc8SGao Xiang #include "xfs_defer.h" 2846141dc8SGao Xiang #include "xfs_log_format.h" 2946141dc8SGao Xiang #include "xfs_trans.h" 309bbafc71SDave Chinner #include "xfs_trace.h" 3107b6403aSDave Chinner #include "xfs_inode.h" 3207b6403aSDave Chinner #include "xfs_icache.h" 3307b6403aSDave Chinner 349bbafc71SDave Chinner 359bbafc71SDave Chinner /* 369bbafc71SDave Chinner * Passive reference counting access wrappers to the perag structures. If the 379bbafc71SDave Chinner * per-ag structure is to be freed, the freeing code is responsible for cleaning 389bbafc71SDave Chinner * up objects with passive references before freeing the structure. This is 399bbafc71SDave Chinner * things like cached buffers. 409bbafc71SDave Chinner */ 419bbafc71SDave Chinner struct xfs_perag * 429bbafc71SDave Chinner xfs_perag_get( 439bbafc71SDave Chinner struct xfs_mount *mp, 449bbafc71SDave Chinner xfs_agnumber_t agno) 459bbafc71SDave Chinner { 469bbafc71SDave Chinner struct xfs_perag *pag; 479bbafc71SDave Chinner 489bbafc71SDave Chinner rcu_read_lock(); 499bbafc71SDave Chinner pag = radix_tree_lookup(&mp->m_perag_tree, agno); 509bbafc71SDave Chinner if (pag) { 51368e2d09SDave Chinner trace_xfs_perag_get(pag, _RET_IP_); 529bbafc71SDave Chinner ASSERT(atomic_read(&pag->pag_ref) >= 0); 53368e2d09SDave Chinner atomic_inc(&pag->pag_ref); 549bbafc71SDave Chinner } 559bbafc71SDave Chinner rcu_read_unlock(); 569bbafc71SDave Chinner return pag; 579bbafc71SDave Chinner } 589bbafc71SDave Chinner 599bbafc71SDave Chinner /* 609bbafc71SDave Chinner * search from @first to find the next perag with the given tag set. 619bbafc71SDave Chinner */ 629bbafc71SDave Chinner struct xfs_perag * 639bbafc71SDave Chinner xfs_perag_get_tag( 649bbafc71SDave Chinner struct xfs_mount *mp, 659bbafc71SDave Chinner xfs_agnumber_t first, 66ffc18582SDarrick J. Wong unsigned int tag) 679bbafc71SDave Chinner { 689bbafc71SDave Chinner struct xfs_perag *pag; 699bbafc71SDave Chinner int found; 709bbafc71SDave Chinner 719bbafc71SDave Chinner rcu_read_lock(); 729bbafc71SDave Chinner found = radix_tree_gang_lookup_tag(&mp->m_perag_tree, 739bbafc71SDave Chinner (void **)&pag, first, 1, tag); 749bbafc71SDave Chinner if (found <= 0) { 759bbafc71SDave Chinner rcu_read_unlock(); 769bbafc71SDave Chinner return NULL; 779bbafc71SDave Chinner } 78368e2d09SDave Chinner trace_xfs_perag_get_tag(pag, _RET_IP_); 79368e2d09SDave Chinner atomic_inc(&pag->pag_ref); 809bbafc71SDave Chinner rcu_read_unlock(); 819bbafc71SDave Chinner return pag; 829bbafc71SDave Chinner } 839bbafc71SDave Chinner 849b2e5a23SDarrick J. Wong /* Get a passive reference to the given perag. */ 859b2e5a23SDarrick J. Wong struct xfs_perag * 869b2e5a23SDarrick J. Wong xfs_perag_hold( 879b2e5a23SDarrick J. Wong struct xfs_perag *pag) 889b2e5a23SDarrick J. Wong { 899b2e5a23SDarrick J. Wong ASSERT(atomic_read(&pag->pag_ref) > 0 || 909b2e5a23SDarrick J. Wong atomic_read(&pag->pag_active_ref) > 0); 919b2e5a23SDarrick J. Wong 929b2e5a23SDarrick J. Wong trace_xfs_perag_hold(pag, _RET_IP_); 939b2e5a23SDarrick J. Wong atomic_inc(&pag->pag_ref); 949b2e5a23SDarrick J. Wong return pag; 959b2e5a23SDarrick J. Wong } 969b2e5a23SDarrick J. Wong 979bbafc71SDave Chinner void 989bbafc71SDave Chinner xfs_perag_put( 999bbafc71SDave Chinner struct xfs_perag *pag) 1009bbafc71SDave Chinner { 101368e2d09SDave Chinner trace_xfs_perag_put(pag, _RET_IP_); 1029bbafc71SDave Chinner ASSERT(atomic_read(&pag->pag_ref) > 0); 103368e2d09SDave Chinner atomic_dec(&pag->pag_ref); 1049bbafc71SDave Chinner } 1059bbafc71SDave Chinner 1069bbafc71SDave Chinner /* 107c4d5660aSDave Chinner * Active references for perag structures. This is for short term access to the 108c4d5660aSDave Chinner * per ag structures for walking trees or accessing state. If an AG is being 109c4d5660aSDave Chinner * shrunk or is offline, then this will fail to find that AG and return NULL 110c4d5660aSDave Chinner * instead. 111c4d5660aSDave Chinner */ 112c4d5660aSDave Chinner struct xfs_perag * 113c4d5660aSDave Chinner xfs_perag_grab( 114c4d5660aSDave Chinner struct xfs_mount *mp, 115c4d5660aSDave Chinner xfs_agnumber_t agno) 116c4d5660aSDave Chinner { 117c4d5660aSDave Chinner struct xfs_perag *pag; 118c4d5660aSDave Chinner 119c4d5660aSDave Chinner rcu_read_lock(); 120c4d5660aSDave Chinner pag = radix_tree_lookup(&mp->m_perag_tree, agno); 121c4d5660aSDave Chinner if (pag) { 122368e2d09SDave Chinner trace_xfs_perag_grab(pag, _RET_IP_); 123c4d5660aSDave Chinner if (!atomic_inc_not_zero(&pag->pag_active_ref)) 124c4d5660aSDave Chinner pag = NULL; 125c4d5660aSDave Chinner } 126c4d5660aSDave Chinner rcu_read_unlock(); 127c4d5660aSDave Chinner return pag; 128c4d5660aSDave Chinner } 129c4d5660aSDave Chinner 130c4d5660aSDave Chinner /* 131c4d5660aSDave Chinner * search from @first to find the next perag with the given tag set. 132c4d5660aSDave Chinner */ 133c4d5660aSDave Chinner struct xfs_perag * 134c4d5660aSDave Chinner xfs_perag_grab_tag( 135c4d5660aSDave Chinner struct xfs_mount *mp, 136c4d5660aSDave Chinner xfs_agnumber_t first, 137c4d5660aSDave Chinner int tag) 138c4d5660aSDave Chinner { 139c4d5660aSDave Chinner struct xfs_perag *pag; 140c4d5660aSDave Chinner int found; 141c4d5660aSDave Chinner 142c4d5660aSDave Chinner rcu_read_lock(); 143c4d5660aSDave Chinner found = radix_tree_gang_lookup_tag(&mp->m_perag_tree, 144c4d5660aSDave Chinner (void **)&pag, first, 1, tag); 145c4d5660aSDave Chinner if (found <= 0) { 146c4d5660aSDave Chinner rcu_read_unlock(); 147c4d5660aSDave Chinner return NULL; 148c4d5660aSDave Chinner } 149368e2d09SDave Chinner trace_xfs_perag_grab_tag(pag, _RET_IP_); 150c4d5660aSDave Chinner if (!atomic_inc_not_zero(&pag->pag_active_ref)) 151c4d5660aSDave Chinner pag = NULL; 152c4d5660aSDave Chinner rcu_read_unlock(); 153c4d5660aSDave Chinner return pag; 154c4d5660aSDave Chinner } 155c4d5660aSDave Chinner 156c4d5660aSDave Chinner void 157c4d5660aSDave Chinner xfs_perag_rele( 158c4d5660aSDave Chinner struct xfs_perag *pag) 159c4d5660aSDave Chinner { 160368e2d09SDave Chinner trace_xfs_perag_rele(pag, _RET_IP_); 161c4d5660aSDave Chinner if (atomic_dec_and_test(&pag->pag_active_ref)) 162c4d5660aSDave Chinner wake_up(&pag->pag_active_wq); 163c4d5660aSDave Chinner } 164c4d5660aSDave Chinner 165c4d5660aSDave Chinner /* 1669bbafc71SDave Chinner * xfs_initialize_perag_data 1679bbafc71SDave Chinner * 1689bbafc71SDave Chinner * Read in each per-ag structure so we can count up the number of 1699bbafc71SDave Chinner * allocated inodes, free inodes and used filesystem blocks as this 1709bbafc71SDave Chinner * information is no longer persistent in the superblock. Once we have 1719bbafc71SDave Chinner * this information, write it into the in-core superblock structure. 1729bbafc71SDave Chinner */ 1739bbafc71SDave Chinner int 1749bbafc71SDave Chinner xfs_initialize_perag_data( 1759bbafc71SDave Chinner struct xfs_mount *mp, 1769bbafc71SDave Chinner xfs_agnumber_t agcount) 1779bbafc71SDave Chinner { 1789bbafc71SDave Chinner xfs_agnumber_t index; 17950920116SDave Chinner struct xfs_perag *pag; 18050920116SDave Chinner struct xfs_sb *sbp = &mp->m_sb; 1819bbafc71SDave Chinner uint64_t ifree = 0; 1829bbafc71SDave Chinner uint64_t ialloc = 0; 1839bbafc71SDave Chinner uint64_t bfree = 0; 1849bbafc71SDave Chinner uint64_t bfreelst = 0; 1859bbafc71SDave Chinner uint64_t btree = 0; 1869bbafc71SDave Chinner uint64_t fdblocks; 1879bbafc71SDave Chinner int error = 0; 1889bbafc71SDave Chinner 1899bbafc71SDave Chinner for (index = 0; index < agcount; index++) { 1909bbafc71SDave Chinner /* 19108d3e84fSDave Chinner * Read the AGF and AGI buffers to populate the per-ag 19208d3e84fSDave Chinner * structures for us. 1939bbafc71SDave Chinner */ 1949bbafc71SDave Chinner pag = xfs_perag_get(mp, index); 19508d3e84fSDave Chinner error = xfs_alloc_read_agf(pag, NULL, 0, NULL); 19608d3e84fSDave Chinner if (!error) 19799b13c7fSDave Chinner error = xfs_ialloc_read_agi(pag, NULL, NULL); 19899b13c7fSDave Chinner if (error) { 19999b13c7fSDave Chinner xfs_perag_put(pag); 20099b13c7fSDave Chinner return error; 20199b13c7fSDave Chinner } 20299b13c7fSDave Chinner 2039bbafc71SDave Chinner ifree += pag->pagi_freecount; 2049bbafc71SDave Chinner ialloc += pag->pagi_count; 2059bbafc71SDave Chinner bfree += pag->pagf_freeblks; 2069bbafc71SDave Chinner bfreelst += pag->pagf_flcount; 2079bbafc71SDave Chinner btree += pag->pagf_btreeblks; 2089bbafc71SDave Chinner xfs_perag_put(pag); 2099bbafc71SDave Chinner } 2109bbafc71SDave Chinner fdblocks = bfree + bfreelst + btree; 2119bbafc71SDave Chinner 2129bbafc71SDave Chinner /* 2139bbafc71SDave Chinner * If the new summary counts are obviously incorrect, fail the 2149bbafc71SDave Chinner * mount operation because that implies the AGFs are also corrupt. 2159bbafc71SDave Chinner * Clear FS_COUNTERS so that we don't unmount with a dirty log, which 2169bbafc71SDave Chinner * will prevent xfs_repair from fixing anything. 2179bbafc71SDave Chinner */ 2189bbafc71SDave Chinner if (fdblocks > sbp->sb_dblocks || ifree > ialloc) { 2199bbafc71SDave Chinner xfs_alert(mp, "AGF corruption. Please run xfs_repair."); 2209bbafc71SDave Chinner error = -EFSCORRUPTED; 2219bbafc71SDave Chinner goto out; 2229bbafc71SDave Chinner } 2239bbafc71SDave Chinner 2249bbafc71SDave Chinner /* Overwrite incore superblock counters with just-read data */ 2259bbafc71SDave Chinner spin_lock(&mp->m_sb_lock); 2269bbafc71SDave Chinner sbp->sb_ifree = ifree; 2279bbafc71SDave Chinner sbp->sb_icount = ialloc; 2289bbafc71SDave Chinner sbp->sb_fdblocks = fdblocks; 2299bbafc71SDave Chinner spin_unlock(&mp->m_sb_lock); 2309bbafc71SDave Chinner 2319bbafc71SDave Chinner xfs_reinit_percpu_counters(mp); 2329bbafc71SDave Chinner out: 2339bbafc71SDave Chinner xfs_fs_mark_healthy(mp, XFS_SICK_FS_COUNTERS); 2349bbafc71SDave Chinner return error; 2359bbafc71SDave Chinner } 236b16817b6SDave Chinner 23707b6403aSDave Chinner STATIC void 23807b6403aSDave Chinner __xfs_free_perag( 23907b6403aSDave Chinner struct rcu_head *head) 24007b6403aSDave Chinner { 24107b6403aSDave Chinner struct xfs_perag *pag = container_of(head, struct xfs_perag, rcu_head); 24207b6403aSDave Chinner 24307b6403aSDave Chinner ASSERT(!delayed_work_pending(&pag->pag_blockgc_work)); 24407b6403aSDave Chinner kmem_free(pag); 24507b6403aSDave Chinner } 24607b6403aSDave Chinner 24707b6403aSDave Chinner /* 24807b6403aSDave Chinner * Free up the per-ag resources associated with the mount structure. 24907b6403aSDave Chinner */ 25007b6403aSDave Chinner void 25107b6403aSDave Chinner xfs_free_perag( 25207b6403aSDave Chinner struct xfs_mount *mp) 25307b6403aSDave Chinner { 25407b6403aSDave Chinner struct xfs_perag *pag; 25507b6403aSDave Chinner xfs_agnumber_t agno; 25607b6403aSDave Chinner 25707b6403aSDave Chinner for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) { 25807b6403aSDave Chinner spin_lock(&mp->m_perag_lock); 25907b6403aSDave Chinner pag = radix_tree_delete(&mp->m_perag_tree, agno); 26007b6403aSDave Chinner spin_unlock(&mp->m_perag_lock); 26107b6403aSDave Chinner ASSERT(pag); 2625b55cbc2SDave Chinner XFS_IS_CORRUPT(pag->pag_mount, atomic_read(&pag->pag_ref) != 0); 263d5c88131SDarrick J. Wong xfs_defer_drain_free(&pag->pag_intents_drain); 26407b6403aSDave Chinner 26507b6403aSDave Chinner cancel_delayed_work_sync(&pag->pag_blockgc_work); 26607b6403aSDave Chinner xfs_buf_hash_destroy(pag); 26707b6403aSDave Chinner 268c4d5660aSDave Chinner /* drop the mount's active reference */ 269c4d5660aSDave Chinner xfs_perag_rele(pag); 270c4d5660aSDave Chinner XFS_IS_CORRUPT(pag->pag_mount, 271c4d5660aSDave Chinner atomic_read(&pag->pag_active_ref) != 0); 27207b6403aSDave Chinner call_rcu(&pag->rcu_head, __xfs_free_perag); 27307b6403aSDave Chinner } 27407b6403aSDave Chinner } 27507b6403aSDave Chinner 2760800169eSDave Chinner /* Find the size of the AG, in blocks. */ 2770800169eSDave Chinner static xfs_agblock_t 2780800169eSDave Chinner __xfs_ag_block_count( 2790800169eSDave Chinner struct xfs_mount *mp, 2800800169eSDave Chinner xfs_agnumber_t agno, 2810800169eSDave Chinner xfs_agnumber_t agcount, 2820800169eSDave Chinner xfs_rfsblock_t dblocks) 2830800169eSDave Chinner { 2840800169eSDave Chinner ASSERT(agno < agcount); 2850800169eSDave Chinner 2860800169eSDave Chinner if (agno < agcount - 1) 2870800169eSDave Chinner return mp->m_sb.sb_agblocks; 2880800169eSDave Chinner return dblocks - (agno * mp->m_sb.sb_agblocks); 2890800169eSDave Chinner } 2900800169eSDave Chinner 2910800169eSDave Chinner xfs_agblock_t 2920800169eSDave Chinner xfs_ag_block_count( 2930800169eSDave Chinner struct xfs_mount *mp, 2940800169eSDave Chinner xfs_agnumber_t agno) 2950800169eSDave Chinner { 2960800169eSDave Chinner return __xfs_ag_block_count(mp, agno, mp->m_sb.sb_agcount, 2970800169eSDave Chinner mp->m_sb.sb_dblocks); 2980800169eSDave Chinner } 2990800169eSDave Chinner 3002d6ca832SDave Chinner /* Calculate the first and last possible inode number in an AG. */ 3012d6ca832SDave Chinner static void 3022d6ca832SDave Chinner __xfs_agino_range( 3032d6ca832SDave Chinner struct xfs_mount *mp, 3042d6ca832SDave Chinner xfs_agblock_t eoag, 3052d6ca832SDave Chinner xfs_agino_t *first, 3062d6ca832SDave Chinner xfs_agino_t *last) 3072d6ca832SDave Chinner { 3082d6ca832SDave Chinner xfs_agblock_t bno; 3092d6ca832SDave Chinner 3102d6ca832SDave Chinner /* 3112d6ca832SDave Chinner * Calculate the first inode, which will be in the first 3122d6ca832SDave Chinner * cluster-aligned block after the AGFL. 3132d6ca832SDave Chinner */ 3142d6ca832SDave Chinner bno = round_up(XFS_AGFL_BLOCK(mp) + 1, M_IGEO(mp)->cluster_align); 3152d6ca832SDave Chinner *first = XFS_AGB_TO_AGINO(mp, bno); 3162d6ca832SDave Chinner 3172d6ca832SDave Chinner /* 3182d6ca832SDave Chinner * Calculate the last inode, which will be at the end of the 3192d6ca832SDave Chinner * last (aligned) cluster that can be allocated in the AG. 3202d6ca832SDave Chinner */ 3212d6ca832SDave Chinner bno = round_down(eoag, M_IGEO(mp)->cluster_align); 3222d6ca832SDave Chinner *last = XFS_AGB_TO_AGINO(mp, bno) - 1; 3232d6ca832SDave Chinner } 3242d6ca832SDave Chinner 3252d6ca832SDave Chinner void 3262d6ca832SDave Chinner xfs_agino_range( 3272d6ca832SDave Chinner struct xfs_mount *mp, 3282d6ca832SDave Chinner xfs_agnumber_t agno, 3292d6ca832SDave Chinner xfs_agino_t *first, 3302d6ca832SDave Chinner xfs_agino_t *last) 3312d6ca832SDave Chinner { 3322d6ca832SDave Chinner return __xfs_agino_range(mp, xfs_ag_block_count(mp, agno), first, last); 3332d6ca832SDave Chinner } 3342d6ca832SDave Chinner 3358a456679SLong Li /* 3368a456679SLong Li * Free perag within the specified AG range, it is only used to free unused 3378a456679SLong Li * perags under the error handling path. 3388a456679SLong Li */ 3398a456679SLong Li void 3408a456679SLong Li xfs_free_unused_perag_range( 3418a456679SLong Li struct xfs_mount *mp, 3428a456679SLong Li xfs_agnumber_t agstart, 3438a456679SLong Li xfs_agnumber_t agend) 3448a456679SLong Li { 3458a456679SLong Li struct xfs_perag *pag; 3468a456679SLong Li xfs_agnumber_t index; 3478a456679SLong Li 3488a456679SLong Li for (index = agstart; index < agend; index++) { 3498a456679SLong Li spin_lock(&mp->m_perag_lock); 3508a456679SLong Li pag = radix_tree_delete(&mp->m_perag_tree, index); 3518a456679SLong Li spin_unlock(&mp->m_perag_lock); 3528a456679SLong Li if (!pag) 3538a456679SLong Li break; 3548a456679SLong Li xfs_buf_hash_destroy(pag); 3558a456679SLong Li xfs_defer_drain_free(&pag->pag_intents_drain); 3568a456679SLong Li kmem_free(pag); 3578a456679SLong Li } 3588a456679SLong Li } 3598a456679SLong Li 36007b6403aSDave Chinner int 36107b6403aSDave Chinner xfs_initialize_perag( 36207b6403aSDave Chinner struct xfs_mount *mp, 36307b6403aSDave Chinner xfs_agnumber_t agcount, 3640800169eSDave Chinner xfs_rfsblock_t dblocks, 36507b6403aSDave Chinner xfs_agnumber_t *maxagi) 36607b6403aSDave Chinner { 36707b6403aSDave Chinner struct xfs_perag *pag; 36807b6403aSDave Chinner xfs_agnumber_t index; 36907b6403aSDave Chinner xfs_agnumber_t first_initialised = NULLAGNUMBER; 37007b6403aSDave Chinner int error; 37107b6403aSDave Chinner 37207b6403aSDave Chinner /* 37307b6403aSDave Chinner * Walk the current per-ag tree so we don't try to initialise AGs 37407b6403aSDave Chinner * that already exist (growfs case). Allocate and insert all the 37507b6403aSDave Chinner * AGs we don't find ready for initialisation. 37607b6403aSDave Chinner */ 37707b6403aSDave Chinner for (index = 0; index < agcount; index++) { 37807b6403aSDave Chinner pag = xfs_perag_get(mp, index); 37907b6403aSDave Chinner if (pag) { 38007b6403aSDave Chinner xfs_perag_put(pag); 38107b6403aSDave Chinner continue; 38207b6403aSDave Chinner } 38307b6403aSDave Chinner 38407b6403aSDave Chinner pag = kmem_zalloc(sizeof(*pag), KM_MAYFAIL); 38507b6403aSDave Chinner if (!pag) { 38607b6403aSDave Chinner error = -ENOMEM; 38707b6403aSDave Chinner goto out_unwind_new_pags; 38807b6403aSDave Chinner } 38907b6403aSDave Chinner pag->pag_agno = index; 39007b6403aSDave Chinner pag->pag_mount = mp; 39107b6403aSDave Chinner 39207b6403aSDave Chinner error = radix_tree_preload(GFP_NOFS); 39307b6403aSDave Chinner if (error) 39407b6403aSDave Chinner goto out_free_pag; 39507b6403aSDave Chinner 39607b6403aSDave Chinner spin_lock(&mp->m_perag_lock); 39707b6403aSDave Chinner if (radix_tree_insert(&mp->m_perag_tree, index, pag)) { 39807b6403aSDave Chinner WARN_ON_ONCE(1); 39907b6403aSDave Chinner spin_unlock(&mp->m_perag_lock); 40007b6403aSDave Chinner radix_tree_preload_end(); 40107b6403aSDave Chinner error = -EEXIST; 40207b6403aSDave Chinner goto out_free_pag; 40307b6403aSDave Chinner } 40407b6403aSDave Chinner spin_unlock(&mp->m_perag_lock); 40507b6403aSDave Chinner radix_tree_preload_end(); 40607b6403aSDave Chinner 40729f11fceSEric Sandeen #ifdef __KERNEL__ 40807b6403aSDave Chinner /* Place kernel structure only init below this point. */ 40907b6403aSDave Chinner spin_lock_init(&pag->pag_ici_lock); 41007b6403aSDave Chinner spin_lock_init(&pag->pagb_lock); 41107b6403aSDave Chinner spin_lock_init(&pag->pag_state_lock); 41207b6403aSDave Chinner INIT_DELAYED_WORK(&pag->pag_blockgc_work, xfs_blockgc_worker); 41307b6403aSDave Chinner INIT_RADIX_TREE(&pag->pag_ici_root, GFP_ATOMIC); 414d5c88131SDarrick J. Wong xfs_defer_drain_init(&pag->pag_intents_drain); 41507b6403aSDave Chinner init_waitqueue_head(&pag->pagb_wait); 416c4d5660aSDave Chinner init_waitqueue_head(&pag->pag_active_wq); 41707b6403aSDave Chinner pag->pagb_count = 0; 41807b6403aSDave Chinner pag->pagb_tree = RB_ROOT; 41929f11fceSEric Sandeen #endif /* __KERNEL__ */ 42007b6403aSDave Chinner 42107b6403aSDave Chinner error = xfs_buf_hash_init(pag); 42207b6403aSDave Chinner if (error) 42307b6403aSDave Chinner goto out_remove_pag; 42407b6403aSDave Chinner 425c4d5660aSDave Chinner /* Active ref owned by mount indicates AG is online. */ 426c4d5660aSDave Chinner atomic_set(&pag->pag_active_ref, 1); 427c4d5660aSDave Chinner 42807b6403aSDave Chinner /* first new pag is fully initialized */ 42907b6403aSDave Chinner if (first_initialised == NULLAGNUMBER) 43007b6403aSDave Chinner first_initialised = index; 4310800169eSDave Chinner 4320800169eSDave Chinner /* 4330800169eSDave Chinner * Pre-calculated geometry 4340800169eSDave Chinner */ 4350800169eSDave Chinner pag->block_count = __xfs_ag_block_count(mp, index, agcount, 4360800169eSDave Chinner dblocks); 4370800169eSDave Chinner pag->min_block = XFS_AGFL_BLOCK(mp); 4382d6ca832SDave Chinner __xfs_agino_range(mp, pag->block_count, &pag->agino_min, 4392d6ca832SDave Chinner &pag->agino_max); 44007b6403aSDave Chinner } 44107b6403aSDave Chinner 44207b6403aSDave Chinner index = xfs_set_inode_alloc(mp, agcount); 44307b6403aSDave Chinner 44407b6403aSDave Chinner if (maxagi) 44507b6403aSDave Chinner *maxagi = index; 44607b6403aSDave Chinner 44707b6403aSDave Chinner mp->m_ag_prealloc_blocks = xfs_prealloc_blocks(mp); 44807b6403aSDave Chinner return 0; 44907b6403aSDave Chinner 45007b6403aSDave Chinner out_remove_pag: 451d5c88131SDarrick J. Wong xfs_defer_drain_free(&pag->pag_intents_drain); 45259b115a7SLong Li spin_lock(&mp->m_perag_lock); 45307b6403aSDave Chinner radix_tree_delete(&mp->m_perag_tree, index); 45459b115a7SLong Li spin_unlock(&mp->m_perag_lock); 45507b6403aSDave Chinner out_free_pag: 45607b6403aSDave Chinner kmem_free(pag); 45707b6403aSDave Chinner out_unwind_new_pags: 45807b6403aSDave Chinner /* unwind any prior newly initialized pags */ 4598a456679SLong Li xfs_free_unused_perag_range(mp, first_initialised, agcount); 46007b6403aSDave Chinner return error; 46107b6403aSDave Chinner } 462b16817b6SDave Chinner 4632842b6dbSDarrick J. Wong static int 464b16817b6SDave Chinner xfs_get_aghdr_buf( 465b16817b6SDave Chinner struct xfs_mount *mp, 466b16817b6SDave Chinner xfs_daddr_t blkno, 467b16817b6SDave Chinner size_t numblks, 4682842b6dbSDarrick J. Wong struct xfs_buf **bpp, 469b16817b6SDave Chinner const struct xfs_buf_ops *ops) 470b16817b6SDave Chinner { 471b16817b6SDave Chinner struct xfs_buf *bp; 4722842b6dbSDarrick J. Wong int error; 473b16817b6SDave Chinner 4742842b6dbSDarrick J. Wong error = xfs_buf_get_uncached(mp->m_ddev_targp, numblks, 0, &bp); 4752842b6dbSDarrick J. Wong if (error) 4762842b6dbSDarrick J. Wong return error; 477b16817b6SDave Chinner 478b16817b6SDave Chinner bp->b_maps[0].bm_bn = blkno; 479b16817b6SDave Chinner bp->b_ops = ops; 480b16817b6SDave Chinner 4812842b6dbSDarrick J. Wong *bpp = bp; 4822842b6dbSDarrick J. Wong return 0; 483b16817b6SDave Chinner } 484b16817b6SDave Chinner 485b16817b6SDave Chinner /* 486b16817b6SDave Chinner * Generic btree root block init function 487b16817b6SDave Chinner */ 488b16817b6SDave Chinner static void 489b16817b6SDave Chinner xfs_btroot_init( 490b16817b6SDave Chinner struct xfs_mount *mp, 491b16817b6SDave Chinner struct xfs_buf *bp, 492b16817b6SDave Chinner struct aghdr_init_data *id) 493b16817b6SDave Chinner { 494f5b999c0SEric Sandeen xfs_btree_init_block(mp, bp, id->type, 0, 0, id->agno); 495b16817b6SDave Chinner } 496b16817b6SDave Chinner 4978d90857cSDarrick J. Wong /* Finish initializing a free space btree. */ 4988d90857cSDarrick J. Wong static void 4998d90857cSDarrick J. Wong xfs_freesp_init_recs( 5008d90857cSDarrick J. Wong struct xfs_mount *mp, 5018d90857cSDarrick J. Wong struct xfs_buf *bp, 5028d90857cSDarrick J. Wong struct aghdr_init_data *id) 5038d90857cSDarrick J. Wong { 5048d90857cSDarrick J. Wong struct xfs_alloc_rec *arec; 505f327a007SDarrick J. Wong struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp); 5068d90857cSDarrick J. Wong 5078d90857cSDarrick J. Wong arec = XFS_ALLOC_REC_ADDR(mp, XFS_BUF_TO_BLOCK(bp), 1); 5088d90857cSDarrick J. Wong arec->ar_startblock = cpu_to_be32(mp->m_ag_prealloc_blocks); 509f327a007SDarrick J. Wong 51036029deeSDave Chinner if (xfs_ag_contains_log(mp, id->agno)) { 511f327a007SDarrick J. Wong struct xfs_alloc_rec *nrec; 512f327a007SDarrick J. Wong xfs_agblock_t start = XFS_FSB_TO_AGBNO(mp, 513f327a007SDarrick J. Wong mp->m_sb.sb_logstart); 514f327a007SDarrick J. Wong 515f327a007SDarrick J. Wong ASSERT(start >= mp->m_ag_prealloc_blocks); 516f327a007SDarrick J. Wong if (start != mp->m_ag_prealloc_blocks) { 517f327a007SDarrick J. Wong /* 5188e698ee7SDarrick J. Wong * Modify first record to pad stripe align of log and 5198e698ee7SDarrick J. Wong * bump the record count. 520f327a007SDarrick J. Wong */ 521f327a007SDarrick J. Wong arec->ar_blockcount = cpu_to_be32(start - 522f327a007SDarrick J. Wong mp->m_ag_prealloc_blocks); 5238e698ee7SDarrick J. Wong be16_add_cpu(&block->bb_numrecs, 1); 524f327a007SDarrick J. Wong nrec = arec + 1; 525f327a007SDarrick J. Wong 526f327a007SDarrick J. Wong /* 527f327a007SDarrick J. Wong * Insert second record at start of internal log 528f327a007SDarrick J. Wong * which then gets trimmed. 529f327a007SDarrick J. Wong */ 530f327a007SDarrick J. Wong nrec->ar_startblock = cpu_to_be32( 531f327a007SDarrick J. Wong be32_to_cpu(arec->ar_startblock) + 532f327a007SDarrick J. Wong be32_to_cpu(arec->ar_blockcount)); 533f327a007SDarrick J. Wong arec = nrec; 534f327a007SDarrick J. Wong } 535f327a007SDarrick J. Wong /* 536f327a007SDarrick J. Wong * Change record start to after the internal log 537f327a007SDarrick J. Wong */ 538f327a007SDarrick J. Wong be32_add_cpu(&arec->ar_startblock, mp->m_sb.sb_logblocks); 539f327a007SDarrick J. Wong } 540f327a007SDarrick J. Wong 541f327a007SDarrick J. Wong /* 5428e698ee7SDarrick J. Wong * Calculate the block count of this record; if it is nonzero, 5438e698ee7SDarrick J. Wong * increment the record count. 544f327a007SDarrick J. Wong */ 5458d90857cSDarrick J. Wong arec->ar_blockcount = cpu_to_be32(id->agsize - 5468d90857cSDarrick J. Wong be32_to_cpu(arec->ar_startblock)); 5478e698ee7SDarrick J. Wong if (arec->ar_blockcount) 5488e698ee7SDarrick J. Wong be16_add_cpu(&block->bb_numrecs, 1); 5498d90857cSDarrick J. Wong } 5508d90857cSDarrick J. Wong 551b16817b6SDave Chinner /* 552b16817b6SDave Chinner * Alloc btree root block init functions 553b16817b6SDave Chinner */ 554b16817b6SDave Chinner static void 555b16817b6SDave Chinner xfs_bnoroot_init( 556b16817b6SDave Chinner struct xfs_mount *mp, 557b16817b6SDave Chinner struct xfs_buf *bp, 558b16817b6SDave Chinner struct aghdr_init_data *id) 559b16817b6SDave Chinner { 5608e698ee7SDarrick J. Wong xfs_btree_init_block(mp, bp, XFS_BTNUM_BNO, 0, 0, id->agno); 5618d90857cSDarrick J. Wong xfs_freesp_init_recs(mp, bp, id); 562b16817b6SDave Chinner } 563b16817b6SDave Chinner 564b16817b6SDave Chinner static void 565b16817b6SDave Chinner xfs_cntroot_init( 566b16817b6SDave Chinner struct xfs_mount *mp, 567b16817b6SDave Chinner struct xfs_buf *bp, 568b16817b6SDave Chinner struct aghdr_init_data *id) 569b16817b6SDave Chinner { 5708e698ee7SDarrick J. Wong xfs_btree_init_block(mp, bp, XFS_BTNUM_CNT, 0, 0, id->agno); 5718d90857cSDarrick J. Wong xfs_freesp_init_recs(mp, bp, id); 572b16817b6SDave Chinner } 573b16817b6SDave Chinner 574b16817b6SDave Chinner /* 575b16817b6SDave Chinner * Reverse map root block init 576b16817b6SDave Chinner */ 577b16817b6SDave Chinner static void 578b16817b6SDave Chinner xfs_rmaproot_init( 579b16817b6SDave Chinner struct xfs_mount *mp, 580b16817b6SDave Chinner struct xfs_buf *bp, 581b16817b6SDave Chinner struct aghdr_init_data *id) 582b16817b6SDave Chinner { 583b16817b6SDave Chinner struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp); 584b16817b6SDave Chinner struct xfs_rmap_rec *rrec; 585b16817b6SDave Chinner 586f5b999c0SEric Sandeen xfs_btree_init_block(mp, bp, XFS_BTNUM_RMAP, 0, 4, id->agno); 587b16817b6SDave Chinner 588b16817b6SDave Chinner /* 589b16817b6SDave Chinner * mark the AG header regions as static metadata The BNO 590b16817b6SDave Chinner * btree block is the first block after the headers, so 591b16817b6SDave Chinner * it's location defines the size of region the static 592b16817b6SDave Chinner * metadata consumes. 593b16817b6SDave Chinner * 594b16817b6SDave Chinner * Note: unlike mkfs, we never have to account for log 595b16817b6SDave Chinner * space when growing the data regions 596b16817b6SDave Chinner */ 597b16817b6SDave Chinner rrec = XFS_RMAP_REC_ADDR(block, 1); 598b16817b6SDave Chinner rrec->rm_startblock = 0; 599b16817b6SDave Chinner rrec->rm_blockcount = cpu_to_be32(XFS_BNO_BLOCK(mp)); 600b16817b6SDave Chinner rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_FS); 601b16817b6SDave Chinner rrec->rm_offset = 0; 602b16817b6SDave Chinner 603b16817b6SDave Chinner /* account freespace btree root blocks */ 604b16817b6SDave Chinner rrec = XFS_RMAP_REC_ADDR(block, 2); 605b16817b6SDave Chinner rrec->rm_startblock = cpu_to_be32(XFS_BNO_BLOCK(mp)); 606b16817b6SDave Chinner rrec->rm_blockcount = cpu_to_be32(2); 607b16817b6SDave Chinner rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_AG); 608b16817b6SDave Chinner rrec->rm_offset = 0; 609b16817b6SDave Chinner 610b16817b6SDave Chinner /* account inode btree root blocks */ 611b16817b6SDave Chinner rrec = XFS_RMAP_REC_ADDR(block, 3); 612b16817b6SDave Chinner rrec->rm_startblock = cpu_to_be32(XFS_IBT_BLOCK(mp)); 613b16817b6SDave Chinner rrec->rm_blockcount = cpu_to_be32(XFS_RMAP_BLOCK(mp) - 614b16817b6SDave Chinner XFS_IBT_BLOCK(mp)); 615b16817b6SDave Chinner rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_INOBT); 616b16817b6SDave Chinner rrec->rm_offset = 0; 617b16817b6SDave Chinner 618b16817b6SDave Chinner /* account for rmap btree root */ 619b16817b6SDave Chinner rrec = XFS_RMAP_REC_ADDR(block, 4); 620b16817b6SDave Chinner rrec->rm_startblock = cpu_to_be32(XFS_RMAP_BLOCK(mp)); 621b16817b6SDave Chinner rrec->rm_blockcount = cpu_to_be32(1); 622b16817b6SDave Chinner rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_AG); 623b16817b6SDave Chinner rrec->rm_offset = 0; 624b16817b6SDave Chinner 625b16817b6SDave Chinner /* account for refc btree root */ 62638c26bfdSDave Chinner if (xfs_has_reflink(mp)) { 627b16817b6SDave Chinner rrec = XFS_RMAP_REC_ADDR(block, 5); 628b16817b6SDave Chinner rrec->rm_startblock = cpu_to_be32(xfs_refc_block(mp)); 629b16817b6SDave Chinner rrec->rm_blockcount = cpu_to_be32(1); 630b16817b6SDave Chinner rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_REFC); 631b16817b6SDave Chinner rrec->rm_offset = 0; 632b16817b6SDave Chinner be16_add_cpu(&block->bb_numrecs, 1); 633b16817b6SDave Chinner } 634f327a007SDarrick J. Wong 635f327a007SDarrick J. Wong /* account for the log space */ 63636029deeSDave Chinner if (xfs_ag_contains_log(mp, id->agno)) { 637f327a007SDarrick J. Wong rrec = XFS_RMAP_REC_ADDR(block, 638f327a007SDarrick J. Wong be16_to_cpu(block->bb_numrecs) + 1); 639f327a007SDarrick J. Wong rrec->rm_startblock = cpu_to_be32( 640f327a007SDarrick J. Wong XFS_FSB_TO_AGBNO(mp, mp->m_sb.sb_logstart)); 641f327a007SDarrick J. Wong rrec->rm_blockcount = cpu_to_be32(mp->m_sb.sb_logblocks); 642f327a007SDarrick J. Wong rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_LOG); 643f327a007SDarrick J. Wong rrec->rm_offset = 0; 644f327a007SDarrick J. Wong be16_add_cpu(&block->bb_numrecs, 1); 645f327a007SDarrick J. Wong } 646b16817b6SDave Chinner } 647b16817b6SDave Chinner 648b16817b6SDave Chinner /* 649b16817b6SDave Chinner * Initialise new secondary superblocks with the pre-grow geometry, but mark 650b16817b6SDave Chinner * them as "in progress" so we know they haven't yet been activated. This will 651b16817b6SDave Chinner * get cleared when the update with the new geometry information is done after 652b16817b6SDave Chinner * changes to the primary are committed. This isn't strictly necessary, but we 653b16817b6SDave Chinner * get it for free with the delayed buffer write lists and it means we can tell 654b16817b6SDave Chinner * if a grow operation didn't complete properly after the fact. 655b16817b6SDave Chinner */ 656b16817b6SDave Chinner static void 657b16817b6SDave Chinner xfs_sbblock_init( 658b16817b6SDave Chinner struct xfs_mount *mp, 659b16817b6SDave Chinner struct xfs_buf *bp, 660b16817b6SDave Chinner struct aghdr_init_data *id) 661b16817b6SDave Chinner { 6623e6e8afdSChristoph Hellwig struct xfs_dsb *dsb = bp->b_addr; 663b16817b6SDave Chinner 664b16817b6SDave Chinner xfs_sb_to_disk(dsb, &mp->m_sb); 665b16817b6SDave Chinner dsb->sb_inprogress = 1; 666b16817b6SDave Chinner } 667b16817b6SDave Chinner 668b16817b6SDave Chinner static void 669b16817b6SDave Chinner xfs_agfblock_init( 670b16817b6SDave Chinner struct xfs_mount *mp, 671b16817b6SDave Chinner struct xfs_buf *bp, 672b16817b6SDave Chinner struct aghdr_init_data *id) 673b16817b6SDave Chinner { 6749798f615SChristoph Hellwig struct xfs_agf *agf = bp->b_addr; 675b16817b6SDave Chinner xfs_extlen_t tmpsize; 676b16817b6SDave Chinner 677b16817b6SDave Chinner agf->agf_magicnum = cpu_to_be32(XFS_AGF_MAGIC); 678b16817b6SDave Chinner agf->agf_versionnum = cpu_to_be32(XFS_AGF_VERSION); 679b16817b6SDave Chinner agf->agf_seqno = cpu_to_be32(id->agno); 680b16817b6SDave Chinner agf->agf_length = cpu_to_be32(id->agsize); 681b16817b6SDave Chinner agf->agf_roots[XFS_BTNUM_BNOi] = cpu_to_be32(XFS_BNO_BLOCK(mp)); 682b16817b6SDave Chinner agf->agf_roots[XFS_BTNUM_CNTi] = cpu_to_be32(XFS_CNT_BLOCK(mp)); 683b16817b6SDave Chinner agf->agf_levels[XFS_BTNUM_BNOi] = cpu_to_be32(1); 684b16817b6SDave Chinner agf->agf_levels[XFS_BTNUM_CNTi] = cpu_to_be32(1); 68538c26bfdSDave Chinner if (xfs_has_rmapbt(mp)) { 686b16817b6SDave Chinner agf->agf_roots[XFS_BTNUM_RMAPi] = 687b16817b6SDave Chinner cpu_to_be32(XFS_RMAP_BLOCK(mp)); 688b16817b6SDave Chinner agf->agf_levels[XFS_BTNUM_RMAPi] = cpu_to_be32(1); 689b16817b6SDave Chinner agf->agf_rmap_blocks = cpu_to_be32(1); 690b16817b6SDave Chinner } 691b16817b6SDave Chinner 692b16817b6SDave Chinner agf->agf_flfirst = cpu_to_be32(1); 693b16817b6SDave Chinner agf->agf_fllast = 0; 694b16817b6SDave Chinner agf->agf_flcount = 0; 695b16817b6SDave Chinner tmpsize = id->agsize - mp->m_ag_prealloc_blocks; 696b16817b6SDave Chinner agf->agf_freeblks = cpu_to_be32(tmpsize); 697b16817b6SDave Chinner agf->agf_longest = cpu_to_be32(tmpsize); 69838c26bfdSDave Chinner if (xfs_has_crc(mp)) 699b16817b6SDave Chinner uuid_copy(&agf->agf_uuid, &mp->m_sb.sb_meta_uuid); 70038c26bfdSDave Chinner if (xfs_has_reflink(mp)) { 701b16817b6SDave Chinner agf->agf_refcount_root = cpu_to_be32( 702b16817b6SDave Chinner xfs_refc_block(mp)); 703b16817b6SDave Chinner agf->agf_refcount_level = cpu_to_be32(1); 704b16817b6SDave Chinner agf->agf_refcount_blocks = cpu_to_be32(1); 705b16817b6SDave Chinner } 706f327a007SDarrick J. Wong 70736029deeSDave Chinner if (xfs_ag_contains_log(mp, id->agno)) { 708f327a007SDarrick J. Wong int64_t logblocks = mp->m_sb.sb_logblocks; 709f327a007SDarrick J. Wong 710f327a007SDarrick J. Wong be32_add_cpu(&agf->agf_freeblks, -logblocks); 711f327a007SDarrick J. Wong agf->agf_longest = cpu_to_be32(id->agsize - 712f327a007SDarrick J. Wong XFS_FSB_TO_AGBNO(mp, mp->m_sb.sb_logstart) - logblocks); 713f327a007SDarrick J. Wong } 714b16817b6SDave Chinner } 715b16817b6SDave Chinner 716b16817b6SDave Chinner static void 717b16817b6SDave Chinner xfs_agflblock_init( 718b16817b6SDave Chinner struct xfs_mount *mp, 719b16817b6SDave Chinner struct xfs_buf *bp, 720b16817b6SDave Chinner struct aghdr_init_data *id) 721b16817b6SDave Chinner { 722b16817b6SDave Chinner struct xfs_agfl *agfl = XFS_BUF_TO_AGFL(bp); 723b16817b6SDave Chinner __be32 *agfl_bno; 724b16817b6SDave Chinner int bucket; 725b16817b6SDave Chinner 72638c26bfdSDave Chinner if (xfs_has_crc(mp)) { 727b16817b6SDave Chinner agfl->agfl_magicnum = cpu_to_be32(XFS_AGFL_MAGIC); 728b16817b6SDave Chinner agfl->agfl_seqno = cpu_to_be32(id->agno); 729b16817b6SDave Chinner uuid_copy(&agfl->agfl_uuid, &mp->m_sb.sb_meta_uuid); 730b16817b6SDave Chinner } 731b16817b6SDave Chinner 732183606d8SChristoph Hellwig agfl_bno = xfs_buf_to_agfl_bno(bp); 733b16817b6SDave Chinner for (bucket = 0; bucket < xfs_agfl_size(mp); bucket++) 734b16817b6SDave Chinner agfl_bno[bucket] = cpu_to_be32(NULLAGBLOCK); 735b16817b6SDave Chinner } 736b16817b6SDave Chinner 737b16817b6SDave Chinner static void 738b16817b6SDave Chinner xfs_agiblock_init( 739b16817b6SDave Chinner struct xfs_mount *mp, 740b16817b6SDave Chinner struct xfs_buf *bp, 741b16817b6SDave Chinner struct aghdr_init_data *id) 742b16817b6SDave Chinner { 743370c782bSChristoph Hellwig struct xfs_agi *agi = bp->b_addr; 744b16817b6SDave Chinner int bucket; 745b16817b6SDave Chinner 746b16817b6SDave Chinner agi->agi_magicnum = cpu_to_be32(XFS_AGI_MAGIC); 747b16817b6SDave Chinner agi->agi_versionnum = cpu_to_be32(XFS_AGI_VERSION); 748b16817b6SDave Chinner agi->agi_seqno = cpu_to_be32(id->agno); 749b16817b6SDave Chinner agi->agi_length = cpu_to_be32(id->agsize); 750b16817b6SDave Chinner agi->agi_count = 0; 751b16817b6SDave Chinner agi->agi_root = cpu_to_be32(XFS_IBT_BLOCK(mp)); 752b16817b6SDave Chinner agi->agi_level = cpu_to_be32(1); 753b16817b6SDave Chinner agi->agi_freecount = 0; 754b16817b6SDave Chinner agi->agi_newino = cpu_to_be32(NULLAGINO); 755b16817b6SDave Chinner agi->agi_dirino = cpu_to_be32(NULLAGINO); 75638c26bfdSDave Chinner if (xfs_has_crc(mp)) 757b16817b6SDave Chinner uuid_copy(&agi->agi_uuid, &mp->m_sb.sb_meta_uuid); 75838c26bfdSDave Chinner if (xfs_has_finobt(mp)) { 759b16817b6SDave Chinner agi->agi_free_root = cpu_to_be32(XFS_FIBT_BLOCK(mp)); 760b16817b6SDave Chinner agi->agi_free_level = cpu_to_be32(1); 761b16817b6SDave Chinner } 762b16817b6SDave Chinner for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) 763b16817b6SDave Chinner agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO); 764ebd9027dSDave Chinner if (xfs_has_inobtcounts(mp)) { 7652a39946cSDarrick J. Wong agi->agi_iblocks = cpu_to_be32(1); 766ebd9027dSDave Chinner if (xfs_has_finobt(mp)) 7672a39946cSDarrick J. Wong agi->agi_fblocks = cpu_to_be32(1); 7682a39946cSDarrick J. Wong } 769b16817b6SDave Chinner } 770b16817b6SDave Chinner 771b16817b6SDave Chinner typedef void (*aghdr_init_work_f)(struct xfs_mount *mp, struct xfs_buf *bp, 772b16817b6SDave Chinner struct aghdr_init_data *id); 773b16817b6SDave Chinner static int 774b16817b6SDave Chinner xfs_ag_init_hdr( 775b16817b6SDave Chinner struct xfs_mount *mp, 776b16817b6SDave Chinner struct aghdr_init_data *id, 777b16817b6SDave Chinner aghdr_init_work_f work, 778b16817b6SDave Chinner const struct xfs_buf_ops *ops) 779b16817b6SDave Chinner { 780b16817b6SDave Chinner struct xfs_buf *bp; 7812842b6dbSDarrick J. Wong int error; 782b16817b6SDave Chinner 7832842b6dbSDarrick J. Wong error = xfs_get_aghdr_buf(mp, id->daddr, id->numblks, &bp, ops); 7842842b6dbSDarrick J. Wong if (error) 7852842b6dbSDarrick J. Wong return error; 786b16817b6SDave Chinner 787b16817b6SDave Chinner (*work)(mp, bp, id); 788b16817b6SDave Chinner 789b16817b6SDave Chinner xfs_buf_delwri_queue(bp, &id->buffer_list); 790b16817b6SDave Chinner xfs_buf_relse(bp); 791b16817b6SDave Chinner return 0; 792b16817b6SDave Chinner } 793b16817b6SDave Chinner 794b16817b6SDave Chinner struct xfs_aghdr_grow_data { 795b16817b6SDave Chinner xfs_daddr_t daddr; 796b16817b6SDave Chinner size_t numblks; 797b16817b6SDave Chinner const struct xfs_buf_ops *ops; 798b16817b6SDave Chinner aghdr_init_work_f work; 799b16817b6SDave Chinner xfs_btnum_t type; 800b16817b6SDave Chinner bool need_init; 801b16817b6SDave Chinner }; 802b16817b6SDave Chinner 803b16817b6SDave Chinner /* 804b16817b6SDave Chinner * Prepare new AG headers to be written to disk. We use uncached buffers here, 805b16817b6SDave Chinner * as it is assumed these new AG headers are currently beyond the currently 806b16817b6SDave Chinner * valid filesystem address space. Using cached buffers would trip over EOFS 807b16817b6SDave Chinner * corruption detection alogrithms in the buffer cache lookup routines. 808b16817b6SDave Chinner * 809b16817b6SDave Chinner * This is a non-transactional function, but the prepared buffers are added to a 810b16817b6SDave Chinner * delayed write buffer list supplied by the caller so they can submit them to 811b16817b6SDave Chinner * disk and wait on them as required. 812b16817b6SDave Chinner */ 813b16817b6SDave Chinner int 814b16817b6SDave Chinner xfs_ag_init_headers( 815b16817b6SDave Chinner struct xfs_mount *mp, 816b16817b6SDave Chinner struct aghdr_init_data *id) 817b16817b6SDave Chinner 818b16817b6SDave Chinner { 819b16817b6SDave Chinner struct xfs_aghdr_grow_data aghdr_data[] = { 820b16817b6SDave Chinner { /* SB */ 821b16817b6SDave Chinner .daddr = XFS_AG_DADDR(mp, id->agno, XFS_SB_DADDR), 822b16817b6SDave Chinner .numblks = XFS_FSS_TO_BB(mp, 1), 823b16817b6SDave Chinner .ops = &xfs_sb_buf_ops, 824b16817b6SDave Chinner .work = &xfs_sbblock_init, 825b16817b6SDave Chinner .need_init = true 826b16817b6SDave Chinner }, 827b16817b6SDave Chinner { /* AGF */ 828b16817b6SDave Chinner .daddr = XFS_AG_DADDR(mp, id->agno, XFS_AGF_DADDR(mp)), 829b16817b6SDave Chinner .numblks = XFS_FSS_TO_BB(mp, 1), 830b16817b6SDave Chinner .ops = &xfs_agf_buf_ops, 831b16817b6SDave Chinner .work = &xfs_agfblock_init, 832b16817b6SDave Chinner .need_init = true 833b16817b6SDave Chinner }, 834b16817b6SDave Chinner { /* AGFL */ 835b16817b6SDave Chinner .daddr = XFS_AG_DADDR(mp, id->agno, XFS_AGFL_DADDR(mp)), 836b16817b6SDave Chinner .numblks = XFS_FSS_TO_BB(mp, 1), 837b16817b6SDave Chinner .ops = &xfs_agfl_buf_ops, 838b16817b6SDave Chinner .work = &xfs_agflblock_init, 839b16817b6SDave Chinner .need_init = true 840b16817b6SDave Chinner }, 841b16817b6SDave Chinner { /* AGI */ 842b16817b6SDave Chinner .daddr = XFS_AG_DADDR(mp, id->agno, XFS_AGI_DADDR(mp)), 843b16817b6SDave Chinner .numblks = XFS_FSS_TO_BB(mp, 1), 844b16817b6SDave Chinner .ops = &xfs_agi_buf_ops, 845b16817b6SDave Chinner .work = &xfs_agiblock_init, 846b16817b6SDave Chinner .need_init = true 847b16817b6SDave Chinner }, 848b16817b6SDave Chinner { /* BNO root block */ 849b16817b6SDave Chinner .daddr = XFS_AGB_TO_DADDR(mp, id->agno, XFS_BNO_BLOCK(mp)), 850b16817b6SDave Chinner .numblks = BTOBB(mp->m_sb.sb_blocksize), 85127df4f50SBrian Foster .ops = &xfs_bnobt_buf_ops, 852b16817b6SDave Chinner .work = &xfs_bnoroot_init, 853b16817b6SDave Chinner .need_init = true 854b16817b6SDave Chinner }, 855b16817b6SDave Chinner { /* CNT root block */ 856b16817b6SDave Chinner .daddr = XFS_AGB_TO_DADDR(mp, id->agno, XFS_CNT_BLOCK(mp)), 857b16817b6SDave Chinner .numblks = BTOBB(mp->m_sb.sb_blocksize), 85827df4f50SBrian Foster .ops = &xfs_cntbt_buf_ops, 859b16817b6SDave Chinner .work = &xfs_cntroot_init, 860b16817b6SDave Chinner .need_init = true 861b16817b6SDave Chinner }, 862b16817b6SDave Chinner { /* INO root block */ 863b16817b6SDave Chinner .daddr = XFS_AGB_TO_DADDR(mp, id->agno, XFS_IBT_BLOCK(mp)), 864b16817b6SDave Chinner .numblks = BTOBB(mp->m_sb.sb_blocksize), 865b16817b6SDave Chinner .ops = &xfs_inobt_buf_ops, 866b16817b6SDave Chinner .work = &xfs_btroot_init, 867b16817b6SDave Chinner .type = XFS_BTNUM_INO, 868b16817b6SDave Chinner .need_init = true 869b16817b6SDave Chinner }, 870b16817b6SDave Chinner { /* FINO root block */ 871b16817b6SDave Chinner .daddr = XFS_AGB_TO_DADDR(mp, id->agno, XFS_FIBT_BLOCK(mp)), 872b16817b6SDave Chinner .numblks = BTOBB(mp->m_sb.sb_blocksize), 87301e68f40SBrian Foster .ops = &xfs_finobt_buf_ops, 874b16817b6SDave Chinner .work = &xfs_btroot_init, 875b16817b6SDave Chinner .type = XFS_BTNUM_FINO, 87638c26bfdSDave Chinner .need_init = xfs_has_finobt(mp) 877b16817b6SDave Chinner }, 878b16817b6SDave Chinner { /* RMAP root block */ 879b16817b6SDave Chinner .daddr = XFS_AGB_TO_DADDR(mp, id->agno, XFS_RMAP_BLOCK(mp)), 880b16817b6SDave Chinner .numblks = BTOBB(mp->m_sb.sb_blocksize), 881b16817b6SDave Chinner .ops = &xfs_rmapbt_buf_ops, 882b16817b6SDave Chinner .work = &xfs_rmaproot_init, 88338c26bfdSDave Chinner .need_init = xfs_has_rmapbt(mp) 884b16817b6SDave Chinner }, 885b16817b6SDave Chinner { /* REFC root block */ 886b16817b6SDave Chinner .daddr = XFS_AGB_TO_DADDR(mp, id->agno, xfs_refc_block(mp)), 887b16817b6SDave Chinner .numblks = BTOBB(mp->m_sb.sb_blocksize), 888b16817b6SDave Chinner .ops = &xfs_refcountbt_buf_ops, 889b16817b6SDave Chinner .work = &xfs_btroot_init, 890b16817b6SDave Chinner .type = XFS_BTNUM_REFC, 89138c26bfdSDave Chinner .need_init = xfs_has_reflink(mp) 892b16817b6SDave Chinner }, 893b16817b6SDave Chinner { /* NULL terminating block */ 894b16817b6SDave Chinner .daddr = XFS_BUF_DADDR_NULL, 895b16817b6SDave Chinner } 896b16817b6SDave Chinner }; 897b16817b6SDave Chinner struct xfs_aghdr_grow_data *dp; 898b16817b6SDave Chinner int error = 0; 899b16817b6SDave Chinner 900b16817b6SDave Chinner /* Account for AG free space in new AG */ 901b16817b6SDave Chinner id->nfree += id->agsize - mp->m_ag_prealloc_blocks; 902b16817b6SDave Chinner for (dp = &aghdr_data[0]; dp->daddr != XFS_BUF_DADDR_NULL; dp++) { 903b16817b6SDave Chinner if (!dp->need_init) 904b16817b6SDave Chinner continue; 905b16817b6SDave Chinner 906b16817b6SDave Chinner id->daddr = dp->daddr; 907b16817b6SDave Chinner id->numblks = dp->numblks; 908b16817b6SDave Chinner id->type = dp->type; 909b16817b6SDave Chinner error = xfs_ag_init_hdr(mp, id, dp->work, dp->ops); 910b16817b6SDave Chinner if (error) 911b16817b6SDave Chinner break; 912b16817b6SDave Chinner } 913b16817b6SDave Chinner return error; 914b16817b6SDave Chinner } 91549dd56f2SDave Chinner 91646141dc8SGao Xiang int 91746141dc8SGao Xiang xfs_ag_shrink_space( 918c6aee248SDave Chinner struct xfs_perag *pag, 91946141dc8SGao Xiang struct xfs_trans **tpp, 92046141dc8SGao Xiang xfs_extlen_t delta) 92146141dc8SGao Xiang { 922c6aee248SDave Chinner struct xfs_mount *mp = pag->pag_mount; 92346141dc8SGao Xiang struct xfs_alloc_arg args = { 92446141dc8SGao Xiang .tp = *tpp, 92546141dc8SGao Xiang .mp = mp, 92674c36a86SDave Chinner .pag = pag, 92746141dc8SGao Xiang .minlen = delta, 92846141dc8SGao Xiang .maxlen = delta, 92946141dc8SGao Xiang .oinfo = XFS_RMAP_OINFO_SKIP_UPDATE, 93046141dc8SGao Xiang .resv = XFS_AG_RESV_NONE, 93146141dc8SGao Xiang .prod = 1 93246141dc8SGao Xiang }; 93346141dc8SGao Xiang struct xfs_buf *agibp, *agfbp; 93446141dc8SGao Xiang struct xfs_agi *agi; 93546141dc8SGao Xiang struct xfs_agf *agf; 936a8f3522cSDarrick J. Wong xfs_agblock_t aglen; 93746141dc8SGao Xiang int error, err2; 93846141dc8SGao Xiang 939c6aee248SDave Chinner ASSERT(pag->pag_agno == mp->m_sb.sb_agcount - 1); 94099b13c7fSDave Chinner error = xfs_ialloc_read_agi(pag, *tpp, &agibp); 94146141dc8SGao Xiang if (error) 94246141dc8SGao Xiang return error; 94346141dc8SGao Xiang 94446141dc8SGao Xiang agi = agibp->b_addr; 94546141dc8SGao Xiang 94608d3e84fSDave Chinner error = xfs_alloc_read_agf(pag, *tpp, 0, &agfbp); 94746141dc8SGao Xiang if (error) 94846141dc8SGao Xiang return error; 94946141dc8SGao Xiang 95046141dc8SGao Xiang agf = agfbp->b_addr; 951a8f3522cSDarrick J. Wong aglen = be32_to_cpu(agi->agi_length); 95246141dc8SGao Xiang /* some extra paranoid checks before we shrink the ag */ 95346141dc8SGao Xiang if (XFS_IS_CORRUPT(mp, agf->agf_length != agi->agi_length)) 95446141dc8SGao Xiang return -EFSCORRUPTED; 955a8f3522cSDarrick J. Wong if (delta >= aglen) 95646141dc8SGao Xiang return -EINVAL; 95746141dc8SGao Xiang 95846141dc8SGao Xiang /* 959da062d16SDarrick J. Wong * Make sure that the last inode cluster cannot overlap with the new 960da062d16SDarrick J. Wong * end of the AG, even if it's sparse. 961da062d16SDarrick J. Wong */ 962dedab3e4SDave Chinner error = xfs_ialloc_check_shrink(pag, *tpp, agibp, aglen - delta); 963da062d16SDarrick J. Wong if (error) 964da062d16SDarrick J. Wong return error; 965da062d16SDarrick J. Wong 966da062d16SDarrick J. Wong /* 96746141dc8SGao Xiang * Disable perag reservations so it doesn't cause the allocation request 96846141dc8SGao Xiang * to fail. We'll reestablish reservation before we return. 96946141dc8SGao Xiang */ 97099b13c7fSDave Chinner error = xfs_ag_resv_free(pag); 97146141dc8SGao Xiang if (error) 97246141dc8SGao Xiang return error; 97346141dc8SGao Xiang 97446141dc8SGao Xiang /* internal log shouldn't also show up in the free space btrees */ 9755f36b2ceSDave Chinner error = xfs_alloc_vextent_exact_bno(&args, 9765f36b2ceSDave Chinner XFS_AGB_TO_FSB(mp, pag->pag_agno, aglen - delta)); 97746141dc8SGao Xiang if (!error && args.agbno == NULLAGBLOCK) 97846141dc8SGao Xiang error = -ENOSPC; 97946141dc8SGao Xiang 98046141dc8SGao Xiang if (error) { 98146141dc8SGao Xiang /* 982*04fa4269SDave Chinner * If extent allocation fails, need to roll the transaction to 98346141dc8SGao Xiang * ensure that the AGFL fixup has been committed anyway. 984*04fa4269SDave Chinner * 985*04fa4269SDave Chinner * We need to hold the AGF across the roll to ensure nothing can 986*04fa4269SDave Chinner * access the AG for allocation until the shrink is fully 987*04fa4269SDave Chinner * cleaned up. And due to the resetting of the AG block 988*04fa4269SDave Chinner * reservation space needing to lock the AGI, we also have to 989*04fa4269SDave Chinner * hold that so we don't get AGI/AGF lock order inversions in 990*04fa4269SDave Chinner * the error handling path. 99146141dc8SGao Xiang */ 99246141dc8SGao Xiang xfs_trans_bhold(*tpp, agfbp); 993*04fa4269SDave Chinner xfs_trans_bhold(*tpp, agibp); 99446141dc8SGao Xiang err2 = xfs_trans_roll(tpp); 99546141dc8SGao Xiang if (err2) 99646141dc8SGao Xiang return err2; 99746141dc8SGao Xiang xfs_trans_bjoin(*tpp, agfbp); 998*04fa4269SDave Chinner xfs_trans_bjoin(*tpp, agibp); 99946141dc8SGao Xiang goto resv_init_out; 100046141dc8SGao Xiang } 100146141dc8SGao Xiang 100246141dc8SGao Xiang /* 100346141dc8SGao Xiang * if successfully deleted from freespace btrees, need to confirm 100446141dc8SGao Xiang * per-AG reservation works as expected. 100546141dc8SGao Xiang */ 100646141dc8SGao Xiang be32_add_cpu(&agi->agi_length, -delta); 100746141dc8SGao Xiang be32_add_cpu(&agf->agf_length, -delta); 100846141dc8SGao Xiang 100999b13c7fSDave Chinner err2 = xfs_ag_resv_init(pag, *tpp); 101046141dc8SGao Xiang if (err2) { 101146141dc8SGao Xiang be32_add_cpu(&agi->agi_length, delta); 101246141dc8SGao Xiang be32_add_cpu(&agf->agf_length, delta); 101346141dc8SGao Xiang if (err2 != -ENOSPC) 101446141dc8SGao Xiang goto resv_err; 101546141dc8SGao Xiang 10167dfee17bSDave Chinner err2 = __xfs_free_extent_later(*tpp, args.fsbno, delta, NULL, 1017b742d7b4SDave Chinner XFS_AG_RESV_NONE, true); 10187dfee17bSDave Chinner if (err2) 10197dfee17bSDave Chinner goto resv_err; 102046141dc8SGao Xiang 102146141dc8SGao Xiang /* 102246141dc8SGao Xiang * Roll the transaction before trying to re-init the per-ag 102346141dc8SGao Xiang * reservation. The new transaction is clean so it will cancel 102446141dc8SGao Xiang * without any side effects. 102546141dc8SGao Xiang */ 102646141dc8SGao Xiang error = xfs_defer_finish(tpp); 102746141dc8SGao Xiang if (error) 102846141dc8SGao Xiang return error; 102946141dc8SGao Xiang 103046141dc8SGao Xiang error = -ENOSPC; 103146141dc8SGao Xiang goto resv_init_out; 103246141dc8SGao Xiang } 10336868b850SDarrick J. Wong 10346868b850SDarrick J. Wong /* Update perag geometry */ 10356868b850SDarrick J. Wong pag->block_count -= delta; 10366868b850SDarrick J. Wong __xfs_agino_range(pag->pag_mount, pag->block_count, &pag->agino_min, 10376868b850SDarrick J. Wong &pag->agino_max); 10386868b850SDarrick J. Wong 103946141dc8SGao Xiang xfs_ialloc_log_agi(*tpp, agibp, XFS_AGI_LENGTH); 104046141dc8SGao Xiang xfs_alloc_log_agf(*tpp, agfbp, XFS_AGF_LENGTH); 104146141dc8SGao Xiang return 0; 104299b13c7fSDave Chinner 104346141dc8SGao Xiang resv_init_out: 104499b13c7fSDave Chinner err2 = xfs_ag_resv_init(pag, *tpp); 104546141dc8SGao Xiang if (!err2) 104646141dc8SGao Xiang return error; 104746141dc8SGao Xiang resv_err: 104846141dc8SGao Xiang xfs_warn(mp, "Error %d reserving per-AG metadata reserve pool.", err2); 104946141dc8SGao Xiang xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 105046141dc8SGao Xiang return err2; 105146141dc8SGao Xiang } 105246141dc8SGao Xiang 105349dd56f2SDave Chinner /* 105449dd56f2SDave Chinner * Extent the AG indicated by the @id by the length passed in 105549dd56f2SDave Chinner */ 105649dd56f2SDave Chinner int 105749dd56f2SDave Chinner xfs_ag_extend_space( 1058c6aee248SDave Chinner struct xfs_perag *pag, 105949dd56f2SDave Chinner struct xfs_trans *tp, 106049dd56f2SDave Chinner xfs_extlen_t len) 106149dd56f2SDave Chinner { 106249dd56f2SDave Chinner struct xfs_buf *bp; 106349dd56f2SDave Chinner struct xfs_agi *agi; 106449dd56f2SDave Chinner struct xfs_agf *agf; 106549dd56f2SDave Chinner int error; 106649dd56f2SDave Chinner 1067c6aee248SDave Chinner ASSERT(pag->pag_agno == pag->pag_mount->m_sb.sb_agcount - 1); 1068c6aee248SDave Chinner 106999b13c7fSDave Chinner error = xfs_ialloc_read_agi(pag, tp, &bp); 107049dd56f2SDave Chinner if (error) 107149dd56f2SDave Chinner return error; 107249dd56f2SDave Chinner 1073370c782bSChristoph Hellwig agi = bp->b_addr; 107449dd56f2SDave Chinner be32_add_cpu(&agi->agi_length, len); 107549dd56f2SDave Chinner xfs_ialloc_log_agi(tp, bp, XFS_AGI_LENGTH); 107649dd56f2SDave Chinner 107749dd56f2SDave Chinner /* 107849dd56f2SDave Chinner * Change agf length. 107949dd56f2SDave Chinner */ 108008d3e84fSDave Chinner error = xfs_alloc_read_agf(pag, tp, 0, &bp); 108149dd56f2SDave Chinner if (error) 108249dd56f2SDave Chinner return error; 108349dd56f2SDave Chinner 10849798f615SChristoph Hellwig agf = bp->b_addr; 108549dd56f2SDave Chinner be32_add_cpu(&agf->agf_length, len); 108649dd56f2SDave Chinner ASSERT(agf->agf_length == agi->agi_length); 108749dd56f2SDave Chinner xfs_alloc_log_agf(tp, bp, XFS_AGF_LENGTH); 108849dd56f2SDave Chinner 108949dd56f2SDave Chinner /* 109049dd56f2SDave Chinner * Free the new space. 109149dd56f2SDave Chinner * 10927280fedaSDarrick J. Wong * XFS_RMAP_OINFO_SKIP_UPDATE is used here to tell the rmap btree that 109349dd56f2SDave Chinner * this doesn't actually exist in the rmap btree. 109449dd56f2SDave Chinner */ 1095c6aee248SDave Chinner error = xfs_rmap_free(tp, bp, pag, be32_to_cpu(agf->agf_length) - len, 10967280fedaSDarrick J. Wong len, &XFS_RMAP_OINFO_SKIP_UPDATE); 109749dd56f2SDave Chinner if (error) 109849dd56f2SDave Chinner return error; 109949dd56f2SDave Chinner 1100b2ccab31SDarrick J. Wong error = xfs_free_extent(tp, pag, be32_to_cpu(agf->agf_length) - len, 1101b2ccab31SDarrick J. Wong len, &XFS_RMAP_OINFO_SKIP_UPDATE, XFS_AG_RESV_NONE); 11020800169eSDave Chinner if (error) 11030800169eSDave Chinner return error; 11040800169eSDave Chinner 11050800169eSDave Chinner /* Update perag geometry */ 11060800169eSDave Chinner pag->block_count = be32_to_cpu(agf->agf_length); 11072d6ca832SDave Chinner __xfs_agino_range(pag->pag_mount, pag->block_count, &pag->agino_min, 11082d6ca832SDave Chinner &pag->agino_max); 11090800169eSDave Chinner return 0; 111049dd56f2SDave Chinner } 11117cd5006bSDarrick J. Wong 11127cd5006bSDarrick J. Wong /* Retrieve AG geometry. */ 11137cd5006bSDarrick J. Wong int 11147cd5006bSDarrick J. Wong xfs_ag_get_geometry( 1115c6aee248SDave Chinner struct xfs_perag *pag, 11167cd5006bSDarrick J. Wong struct xfs_ag_geometry *ageo) 11177cd5006bSDarrick J. Wong { 11187cd5006bSDarrick J. Wong struct xfs_buf *agi_bp; 11197cd5006bSDarrick J. Wong struct xfs_buf *agf_bp; 11207cd5006bSDarrick J. Wong struct xfs_agi *agi; 11217cd5006bSDarrick J. Wong struct xfs_agf *agf; 11227cd5006bSDarrick J. Wong unsigned int freeblks; 11237cd5006bSDarrick J. Wong int error; 11247cd5006bSDarrick J. Wong 11257cd5006bSDarrick J. Wong /* Lock the AG headers. */ 112699b13c7fSDave Chinner error = xfs_ialloc_read_agi(pag, NULL, &agi_bp); 11277cd5006bSDarrick J. Wong if (error) 11287cd5006bSDarrick J. Wong return error; 112908d3e84fSDave Chinner error = xfs_alloc_read_agf(pag, NULL, 0, &agf_bp); 11307cd5006bSDarrick J. Wong if (error) 11317cd5006bSDarrick J. Wong goto out_agi; 113292a00544SGao Xiang 11337cd5006bSDarrick J. Wong /* Fill out form. */ 11347cd5006bSDarrick J. Wong memset(ageo, 0, sizeof(*ageo)); 1135c6aee248SDave Chinner ageo->ag_number = pag->pag_agno; 11367cd5006bSDarrick J. Wong 1137370c782bSChristoph Hellwig agi = agi_bp->b_addr; 11387cd5006bSDarrick J. Wong ageo->ag_icount = be32_to_cpu(agi->agi_count); 11397cd5006bSDarrick J. Wong ageo->ag_ifree = be32_to_cpu(agi->agi_freecount); 11407cd5006bSDarrick J. Wong 11419798f615SChristoph Hellwig agf = agf_bp->b_addr; 11427cd5006bSDarrick J. Wong ageo->ag_length = be32_to_cpu(agf->agf_length); 11437cd5006bSDarrick J. Wong freeblks = pag->pagf_freeblks + 11447cd5006bSDarrick J. Wong pag->pagf_flcount + 11457cd5006bSDarrick J. Wong pag->pagf_btreeblks - 11467cd5006bSDarrick J. Wong xfs_ag_resv_needed(pag, XFS_AG_RESV_NONE); 11477cd5006bSDarrick J. Wong ageo->ag_freeblks = freeblks; 11481302c6a2SDarrick J. Wong xfs_ag_geom_health(pag, ageo); 11497cd5006bSDarrick J. Wong 11507cd5006bSDarrick J. Wong /* Release resources. */ 11517cd5006bSDarrick J. Wong xfs_buf_relse(agf_bp); 11527cd5006bSDarrick J. Wong out_agi: 11537cd5006bSDarrick J. Wong xfs_buf_relse(agi_bp); 11547cd5006bSDarrick J. Wong return error; 11557cd5006bSDarrick J. Wong } 1156