10b61f8a4SDave Chinner // SPDX-License-Identifier: GPL-2.0 230f712c9SDave Chinner /* 330f712c9SDave Chinner * Copyright (c) 2000-2006 Silicon Graphics, Inc. 430f712c9SDave Chinner * All Rights Reserved. 530f712c9SDave Chinner */ 630f712c9SDave Chinner #include "xfs.h" 730f712c9SDave Chinner #include "xfs_fs.h" 830f712c9SDave Chinner #include "xfs_shared.h" 930f712c9SDave Chinner #include "xfs_format.h" 1030f712c9SDave Chinner #include "xfs_log_format.h" 1130f712c9SDave Chinner #include "xfs_trans_resv.h" 1230f712c9SDave Chinner #include "xfs_bit.h" 1330f712c9SDave Chinner #include "xfs_sb.h" 1430f712c9SDave Chinner #include "xfs_mount.h" 153ab78df2SDarrick J. Wong #include "xfs_defer.h" 1630f712c9SDave Chinner #include "xfs_dir2.h" 1730f712c9SDave Chinner #include "xfs_inode.h" 1830f712c9SDave Chinner #include "xfs_btree.h" 1930f712c9SDave Chinner #include "xfs_trans.h" 2030f712c9SDave Chinner #include "xfs_alloc.h" 2130f712c9SDave Chinner #include "xfs_bmap.h" 2230f712c9SDave Chinner #include "xfs_bmap_util.h" 2330f712c9SDave Chinner #include "xfs_bmap_btree.h" 2430f712c9SDave Chinner #include "xfs_rtalloc.h" 25e9e899a2SDarrick J. Wong #include "xfs_errortag.h" 2630f712c9SDave Chinner #include "xfs_error.h" 2730f712c9SDave Chinner #include "xfs_quota.h" 2830f712c9SDave Chinner #include "xfs_trans_space.h" 2930f712c9SDave Chinner #include "xfs_buf_item.h" 3030f712c9SDave Chinner #include "xfs_trace.h" 3130f712c9SDave Chinner #include "xfs_attr_leaf.h" 3230f712c9SDave Chinner #include "xfs_filestream.h" 33340785ccSDarrick J. Wong #include "xfs_rmap.h" 349bbafc71SDave Chinner #include "xfs_ag.h" 353fd129b6SDarrick J. Wong #include "xfs_ag_resv.h" 3662aab20fSDarrick J. Wong #include "xfs_refcount.h" 37974ae922SBrian Foster #include "xfs_icache.h" 384e087a3bSChristoph Hellwig #include "xfs_iomap.h" 3930f712c9SDave Chinner 40f3c799c2SDarrick J. Wong struct kmem_cache *xfs_bmap_intent_cache; 4130f712c9SDave Chinner 4230f712c9SDave Chinner /* 4330f712c9SDave Chinner * Miscellaneous helper functions 4430f712c9SDave Chinner */ 4530f712c9SDave Chinner 4630f712c9SDave Chinner /* 4730f712c9SDave Chinner * Compute and fill in the value of the maximum depth of a bmap btree 4830f712c9SDave Chinner * in this filesystem. Done once, during mount. 4930f712c9SDave Chinner */ 5030f712c9SDave Chinner void 5130f712c9SDave Chinner xfs_bmap_compute_maxlevels( 5230f712c9SDave Chinner xfs_mount_t *mp, /* file system mount structure */ 5330f712c9SDave Chinner int whichfork) /* data or attr fork */ 5430f712c9SDave Chinner { 550c35e7baSChandan Babu R uint64_t maxblocks; /* max blocks at this level */ 56bb1d5049SChandan Babu R xfs_extnum_t maxleafents; /* max leaf entries possible */ 5730f712c9SDave Chinner int level; /* btree level */ 5830f712c9SDave Chinner int maxrootrecs; /* max records in root block */ 5930f712c9SDave Chinner int minleafrecs; /* min records in leaf block */ 6030f712c9SDave Chinner int minnoderecs; /* min records in node block */ 6130f712c9SDave Chinner int sz; /* root block size */ 6230f712c9SDave Chinner 6330f712c9SDave Chinner /* 64df9ad5ccSChandan Babu R * The maximum number of extents in a fork, hence the maximum number of 65df9ad5ccSChandan Babu R * leaf entries, is controlled by the size of the on-disk extent count. 6630f712c9SDave Chinner * 677821ea30SChristoph Hellwig * Note that we can no longer assume that if we are in ATTR1 that the 687821ea30SChristoph Hellwig * fork offset of all the inodes will be 697821ea30SChristoph Hellwig * (xfs_default_attroffset(ip) >> 3) because we could have mounted with 707821ea30SChristoph Hellwig * ATTR2 and then mounted back with ATTR1, keeping the i_forkoff's fixed 717821ea30SChristoph Hellwig * but probably at various positions. Therefore, for both ATTR1 and 727821ea30SChristoph Hellwig * ATTR2 we have to assume the worst case scenario of a minimum size 737821ea30SChristoph Hellwig * available. 7430f712c9SDave Chinner */ 75df9ad5ccSChandan Babu R maxleafents = xfs_iext_max_nextents(xfs_has_large_extent_counts(mp), 76df9ad5ccSChandan Babu R whichfork); 779feb8f19SChandan Babu R if (whichfork == XFS_DATA_FORK) 7830f712c9SDave Chinner sz = XFS_BMDR_SPACE_CALC(MINDBTPTRS); 799feb8f19SChandan Babu R else 8030f712c9SDave Chinner sz = XFS_BMDR_SPACE_CALC(MINABTPTRS); 819feb8f19SChandan Babu R 8230f712c9SDave Chinner maxrootrecs = xfs_bmdr_maxrecs(sz, 0); 8330f712c9SDave Chinner minleafrecs = mp->m_bmap_dmnr[0]; 8430f712c9SDave Chinner minnoderecs = mp->m_bmap_dmnr[1]; 85755c38ffSChandan Babu R maxblocks = howmany_64(maxleafents, minleafrecs); 8630f712c9SDave Chinner for (level = 1; maxblocks > 1; level++) { 8730f712c9SDave Chinner if (maxblocks <= maxrootrecs) 8830f712c9SDave Chinner maxblocks = 1; 8930f712c9SDave Chinner else 900c35e7baSChandan Babu R maxblocks = howmany_64(maxblocks, minnoderecs); 9130f712c9SDave Chinner } 9230f712c9SDave Chinner mp->m_bm_maxlevels[whichfork] = level; 930ed5f735SDarrick J. Wong ASSERT(mp->m_bm_maxlevels[whichfork] <= xfs_bmbt_maxlevels_ondisk()); 9430f712c9SDave Chinner } 9530f712c9SDave Chinner 96b2941046SDave Chinner unsigned int 97b2941046SDave Chinner xfs_bmap_compute_attr_offset( 98b2941046SDave Chinner struct xfs_mount *mp) 99b2941046SDave Chinner { 100b2941046SDave Chinner if (mp->m_sb.sb_inodesize == 256) 101b2941046SDave Chinner return XFS_LITINO(mp) - XFS_BMDR_SPACE_CALC(MINABTPTRS); 102b2941046SDave Chinner return XFS_BMDR_SPACE_CALC(6 * MINABTPTRS); 103b2941046SDave Chinner } 104b2941046SDave Chinner 10530f712c9SDave Chinner STATIC int /* error */ 10630f712c9SDave Chinner xfs_bmbt_lookup_eq( 10730f712c9SDave Chinner struct xfs_btree_cur *cur, 108e16cf9b0SChristoph Hellwig struct xfs_bmbt_irec *irec, 10930f712c9SDave Chinner int *stat) /* success/failure */ 11030f712c9SDave Chinner { 111e16cf9b0SChristoph Hellwig cur->bc_rec.b = *irec; 11230f712c9SDave Chinner return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat); 11330f712c9SDave Chinner } 11430f712c9SDave Chinner 11530f712c9SDave Chinner STATIC int /* error */ 116b5cfbc22SChristoph Hellwig xfs_bmbt_lookup_first( 11730f712c9SDave Chinner struct xfs_btree_cur *cur, 11830f712c9SDave Chinner int *stat) /* success/failure */ 11930f712c9SDave Chinner { 120b5cfbc22SChristoph Hellwig cur->bc_rec.b.br_startoff = 0; 121b5cfbc22SChristoph Hellwig cur->bc_rec.b.br_startblock = 0; 122b5cfbc22SChristoph Hellwig cur->bc_rec.b.br_blockcount = 0; 12330f712c9SDave Chinner return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat); 12430f712c9SDave Chinner } 12530f712c9SDave Chinner 12630f712c9SDave Chinner /* 12730f712c9SDave Chinner * Check if the inode needs to be converted to btree format. 12830f712c9SDave Chinner */ 12930f712c9SDave Chinner static inline bool xfs_bmap_needs_btree(struct xfs_inode *ip, int whichfork) 13030f712c9SDave Chinner { 131732436efSDarrick J. Wong struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 132daf83964SChristoph Hellwig 13360b4984fSDarrick J. Wong return whichfork != XFS_COW_FORK && 134f7e67b20SChristoph Hellwig ifp->if_format == XFS_DINODE_FMT_EXTENTS && 135daf83964SChristoph Hellwig ifp->if_nextents > XFS_IFORK_MAXEXT(ip, whichfork); 13630f712c9SDave Chinner } 13730f712c9SDave Chinner 13830f712c9SDave Chinner /* 13930f712c9SDave Chinner * Check if the inode should be converted to extent format. 14030f712c9SDave Chinner */ 14130f712c9SDave Chinner static inline bool xfs_bmap_wants_extents(struct xfs_inode *ip, int whichfork) 14230f712c9SDave Chinner { 143732436efSDarrick J. Wong struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 144daf83964SChristoph Hellwig 14560b4984fSDarrick J. Wong return whichfork != XFS_COW_FORK && 146f7e67b20SChristoph Hellwig ifp->if_format == XFS_DINODE_FMT_BTREE && 147daf83964SChristoph Hellwig ifp->if_nextents <= XFS_IFORK_MAXEXT(ip, whichfork); 14830f712c9SDave Chinner } 14930f712c9SDave Chinner 15030f712c9SDave Chinner /* 151a67d00a5SChristoph Hellwig * Update the record referred to by cur to the value given by irec 15230f712c9SDave Chinner * This either works (return 0) or gets an EFSCORRUPTED error. 15330f712c9SDave Chinner */ 15430f712c9SDave Chinner STATIC int 15530f712c9SDave Chinner xfs_bmbt_update( 15630f712c9SDave Chinner struct xfs_btree_cur *cur, 157a67d00a5SChristoph Hellwig struct xfs_bmbt_irec *irec) 15830f712c9SDave Chinner { 15930f712c9SDave Chinner union xfs_btree_rec rec; 16030f712c9SDave Chinner 161a67d00a5SChristoph Hellwig xfs_bmbt_disk_set_all(&rec.bmbt, irec); 16230f712c9SDave Chinner return xfs_btree_update(cur, &rec); 16330f712c9SDave Chinner } 16430f712c9SDave Chinner 16530f712c9SDave Chinner /* 16630f712c9SDave Chinner * Compute the worst-case number of indirect blocks that will be used 16730f712c9SDave Chinner * for ip's delayed extent of length "len". 16830f712c9SDave Chinner */ 16930f712c9SDave Chinner STATIC xfs_filblks_t 17030f712c9SDave Chinner xfs_bmap_worst_indlen( 17130f712c9SDave Chinner xfs_inode_t *ip, /* incore inode pointer */ 17230f712c9SDave Chinner xfs_filblks_t len) /* delayed extent length */ 17330f712c9SDave Chinner { 17430f712c9SDave Chinner int level; /* btree level number */ 17530f712c9SDave Chinner int maxrecs; /* maximum record count at this level */ 17630f712c9SDave Chinner xfs_mount_t *mp; /* mount structure */ 17730f712c9SDave Chinner xfs_filblks_t rval; /* return value */ 17830f712c9SDave Chinner 17930f712c9SDave Chinner mp = ip->i_mount; 18030f712c9SDave Chinner maxrecs = mp->m_bmap_dmxr[0]; 18130f712c9SDave Chinner for (level = 0, rval = 0; 18230f712c9SDave Chinner level < XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK); 18330f712c9SDave Chinner level++) { 18430f712c9SDave Chinner len += maxrecs - 1; 18530f712c9SDave Chinner do_div(len, maxrecs); 18630f712c9SDave Chinner rval += len; 1875e5c943cSDarrick J. Wong if (len == 1) 1885e5c943cSDarrick J. Wong return rval + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) - 18930f712c9SDave Chinner level - 1; 19030f712c9SDave Chinner if (level == 0) 19130f712c9SDave Chinner maxrecs = mp->m_bmap_dmxr[1]; 19230f712c9SDave Chinner } 19330f712c9SDave Chinner return rval; 19430f712c9SDave Chinner } 19530f712c9SDave Chinner 19630f712c9SDave Chinner /* 19730f712c9SDave Chinner * Calculate the default attribute fork offset for newly created inodes. 19830f712c9SDave Chinner */ 19930f712c9SDave Chinner uint 20030f712c9SDave Chinner xfs_default_attroffset( 20130f712c9SDave Chinner struct xfs_inode *ip) 20230f712c9SDave Chinner { 203683ec9baSDave Chinner if (ip->i_df.if_format == XFS_DINODE_FMT_DEV) 204683ec9baSDave Chinner return roundup(sizeof(xfs_dev_t), 8); 205b2941046SDave Chinner return M_IGEO(ip->i_mount)->attr_fork_offset; 20630f712c9SDave Chinner } 20730f712c9SDave Chinner 20830f712c9SDave Chinner /* 2097821ea30SChristoph Hellwig * Helper routine to reset inode i_forkoff field when switching attribute fork 2107821ea30SChristoph Hellwig * from local to extent format - we reset it where possible to make space 2117821ea30SChristoph Hellwig * available for inline data fork extents. 21230f712c9SDave Chinner */ 21330f712c9SDave Chinner STATIC void 21430f712c9SDave Chinner xfs_bmap_forkoff_reset( 21530f712c9SDave Chinner xfs_inode_t *ip, 21630f712c9SDave Chinner int whichfork) 21730f712c9SDave Chinner { 21830f712c9SDave Chinner if (whichfork == XFS_ATTR_FORK && 219f7e67b20SChristoph Hellwig ip->i_df.if_format != XFS_DINODE_FMT_DEV && 220f7e67b20SChristoph Hellwig ip->i_df.if_format != XFS_DINODE_FMT_BTREE) { 22130f712c9SDave Chinner uint dfl_forkoff = xfs_default_attroffset(ip) >> 3; 22230f712c9SDave Chinner 2237821ea30SChristoph Hellwig if (dfl_forkoff > ip->i_forkoff) 2247821ea30SChristoph Hellwig ip->i_forkoff = dfl_forkoff; 22530f712c9SDave Chinner } 22630f712c9SDave Chinner } 22730f712c9SDave Chinner 22830f712c9SDave Chinner #ifdef DEBUG 22930f712c9SDave Chinner STATIC struct xfs_buf * 23030f712c9SDave Chinner xfs_bmap_get_bp( 23130f712c9SDave Chinner struct xfs_btree_cur *cur, 23230f712c9SDave Chinner xfs_fsblock_t bno) 23330f712c9SDave Chinner { 234e6631f85SDave Chinner struct xfs_log_item *lip; 23530f712c9SDave Chinner int i; 23630f712c9SDave Chinner 23730f712c9SDave Chinner if (!cur) 23830f712c9SDave Chinner return NULL; 23930f712c9SDave Chinner 240c0643f6fSDarrick J. Wong for (i = 0; i < cur->bc_maxlevels; i++) { 2416ca444cfSDarrick J. Wong if (!cur->bc_levels[i].bp) 24230f712c9SDave Chinner break; 2436ca444cfSDarrick J. Wong if (xfs_buf_daddr(cur->bc_levels[i].bp) == bno) 2446ca444cfSDarrick J. Wong return cur->bc_levels[i].bp; 24530f712c9SDave Chinner } 24630f712c9SDave Chinner 24730f712c9SDave Chinner /* Chase down all the log items to see if the bp is there */ 248e6631f85SDave Chinner list_for_each_entry(lip, &cur->bc_tp->t_items, li_trans) { 249e6631f85SDave Chinner struct xfs_buf_log_item *bip = (struct xfs_buf_log_item *)lip; 250e6631f85SDave Chinner 25130f712c9SDave Chinner if (bip->bli_item.li_type == XFS_LI_BUF && 25204fcad80SDave Chinner xfs_buf_daddr(bip->bli_buf) == bno) 25330f712c9SDave Chinner return bip->bli_buf; 25430f712c9SDave Chinner } 25530f712c9SDave Chinner 25630f712c9SDave Chinner return NULL; 25730f712c9SDave Chinner } 25830f712c9SDave Chinner 25930f712c9SDave Chinner STATIC void 26030f712c9SDave Chinner xfs_check_block( 26130f712c9SDave Chinner struct xfs_btree_block *block, 26230f712c9SDave Chinner xfs_mount_t *mp, 26330f712c9SDave Chinner int root, 26430f712c9SDave Chinner short sz) 26530f712c9SDave Chinner { 26630f712c9SDave Chinner int i, j, dmxr; 26730f712c9SDave Chinner __be64 *pp, *thispa; /* pointer to block address */ 26830f712c9SDave Chinner xfs_bmbt_key_t *prevp, *keyp; 26930f712c9SDave Chinner 27030f712c9SDave Chinner ASSERT(be16_to_cpu(block->bb_level) > 0); 27130f712c9SDave Chinner 27230f712c9SDave Chinner prevp = NULL; 27330f712c9SDave Chinner for( i = 1; i <= xfs_btree_get_numrecs(block); i++) { 27430f712c9SDave Chinner dmxr = mp->m_bmap_dmxr[0]; 27530f712c9SDave Chinner keyp = XFS_BMBT_KEY_ADDR(mp, block, i); 27630f712c9SDave Chinner 27730f712c9SDave Chinner if (prevp) { 27830f712c9SDave Chinner ASSERT(be64_to_cpu(prevp->br_startoff) < 27930f712c9SDave Chinner be64_to_cpu(keyp->br_startoff)); 28030f712c9SDave Chinner } 28130f712c9SDave Chinner prevp = keyp; 28230f712c9SDave Chinner 28330f712c9SDave Chinner /* 28430f712c9SDave Chinner * Compare the block numbers to see if there are dups. 28530f712c9SDave Chinner */ 28630f712c9SDave Chinner if (root) 28730f712c9SDave Chinner pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, i, sz); 28830f712c9SDave Chinner else 28930f712c9SDave Chinner pp = XFS_BMBT_PTR_ADDR(mp, block, i, dmxr); 29030f712c9SDave Chinner 29130f712c9SDave Chinner for (j = i+1; j <= be16_to_cpu(block->bb_numrecs); j++) { 29230f712c9SDave Chinner if (root) 29330f712c9SDave Chinner thispa = XFS_BMAP_BROOT_PTR_ADDR(mp, block, j, sz); 29430f712c9SDave Chinner else 29530f712c9SDave Chinner thispa = XFS_BMBT_PTR_ADDR(mp, block, j, dmxr); 29630f712c9SDave Chinner if (*thispa == *pp) { 29778b0f58bSZeng Heng xfs_warn(mp, "%s: thispa(%d) == pp(%d) %lld", 29830f712c9SDave Chinner __func__, j, i, 29930f712c9SDave Chinner (unsigned long long)be64_to_cpu(*thispa)); 300cec57256SDarrick J. Wong xfs_err(mp, "%s: ptrs are equal in node\n", 30130f712c9SDave Chinner __func__); 302cec57256SDarrick J. Wong xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 30330f712c9SDave Chinner } 30430f712c9SDave Chinner } 30530f712c9SDave Chinner } 30630f712c9SDave Chinner } 30730f712c9SDave Chinner 30830f712c9SDave Chinner /* 30930f712c9SDave Chinner * Check that the extents for the inode ip are in the right order in all 310e3543819SDave Chinner * btree leaves. THis becomes prohibitively expensive for large extent count 311e3543819SDave Chinner * files, so don't bother with inodes that have more than 10,000 extents in 312e3543819SDave Chinner * them. The btree record ordering checks will still be done, so for such large 313e3543819SDave Chinner * bmapbt constructs that is going to catch most corruptions. 31430f712c9SDave Chinner */ 31530f712c9SDave Chinner STATIC void 31630f712c9SDave Chinner xfs_bmap_check_leaf_extents( 317ae127f08SDarrick J. Wong struct xfs_btree_cur *cur, /* btree cursor or null */ 31830f712c9SDave Chinner xfs_inode_t *ip, /* incore inode pointer */ 31930f712c9SDave Chinner int whichfork) /* data or attr fork */ 32030f712c9SDave Chinner { 321f7e67b20SChristoph Hellwig struct xfs_mount *mp = ip->i_mount; 322732436efSDarrick J. Wong struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 32330f712c9SDave Chinner struct xfs_btree_block *block; /* current btree block */ 32430f712c9SDave Chinner xfs_fsblock_t bno; /* block # of "block" */ 325e8222613SDave Chinner struct xfs_buf *bp; /* buffer for "block" */ 32630f712c9SDave Chinner int error; /* error return value */ 32730f712c9SDave Chinner xfs_extnum_t i=0, j; /* index into the extents list */ 32830f712c9SDave Chinner int level; /* btree level, for checking */ 32930f712c9SDave Chinner __be64 *pp; /* pointer to block address */ 33030f712c9SDave Chinner xfs_bmbt_rec_t *ep; /* pointer to current extent */ 33130f712c9SDave Chinner xfs_bmbt_rec_t last = {0, 0}; /* last extent in prev block */ 33230f712c9SDave Chinner xfs_bmbt_rec_t *nextp; /* pointer to next extent */ 33330f712c9SDave Chinner int bp_release = 0; 33430f712c9SDave Chinner 335f7e67b20SChristoph Hellwig if (ifp->if_format != XFS_DINODE_FMT_BTREE) 33630f712c9SDave Chinner return; 33730f712c9SDave Chinner 338e3543819SDave Chinner /* skip large extent count inodes */ 339daf83964SChristoph Hellwig if (ip->i_df.if_nextents > 10000) 340e3543819SDave Chinner return; 341e3543819SDave Chinner 34230f712c9SDave Chinner bno = NULLFSBLOCK; 34330f712c9SDave Chinner block = ifp->if_broot; 34430f712c9SDave Chinner /* 34530f712c9SDave Chinner * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out. 34630f712c9SDave Chinner */ 34730f712c9SDave Chinner level = be16_to_cpu(block->bb_level); 34830f712c9SDave Chinner ASSERT(level > 0); 34930f712c9SDave Chinner xfs_check_block(block, mp, 1, ifp->if_broot_bytes); 35030f712c9SDave Chinner pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes); 35130f712c9SDave Chinner bno = be64_to_cpu(*pp); 35230f712c9SDave Chinner 353d5cf09baSChristoph Hellwig ASSERT(bno != NULLFSBLOCK); 35430f712c9SDave Chinner ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount); 35530f712c9SDave Chinner ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks); 35630f712c9SDave Chinner 35730f712c9SDave Chinner /* 35830f712c9SDave Chinner * Go down the tree until leaf level is reached, following the first 35930f712c9SDave Chinner * pointer (leftmost) at each level. 36030f712c9SDave Chinner */ 36130f712c9SDave Chinner while (level-- > 0) { 36230f712c9SDave Chinner /* See if buf is in cur first */ 36330f712c9SDave Chinner bp_release = 0; 36430f712c9SDave Chinner bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno)); 36530f712c9SDave Chinner if (!bp) { 36630f712c9SDave Chinner bp_release = 1; 367f5b999c0SEric Sandeen error = xfs_btree_read_bufl(mp, NULL, bno, &bp, 36830f712c9SDave Chinner XFS_BMAP_BTREE_REF, 36930f712c9SDave Chinner &xfs_bmbt_buf_ops); 37030f712c9SDave Chinner if (error) 37130f712c9SDave Chinner goto error_norelse; 37230f712c9SDave Chinner } 37330f712c9SDave Chinner block = XFS_BUF_TO_BLOCK(bp); 37430f712c9SDave Chinner if (level == 0) 37530f712c9SDave Chinner break; 37630f712c9SDave Chinner 37730f712c9SDave Chinner /* 37830f712c9SDave Chinner * Check this block for basic sanity (increasing keys and 37930f712c9SDave Chinner * no duplicate blocks). 38030f712c9SDave Chinner */ 38130f712c9SDave Chinner 38230f712c9SDave Chinner xfs_check_block(block, mp, 0, 0); 38330f712c9SDave Chinner pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]); 38430f712c9SDave Chinner bno = be64_to_cpu(*pp); 385f9e03706SDarrick J. Wong if (XFS_IS_CORRUPT(mp, !xfs_verify_fsbno(mp, bno))) { 386f9e03706SDarrick J. Wong error = -EFSCORRUPTED; 387f9e03706SDarrick J. Wong goto error0; 388f9e03706SDarrick J. Wong } 38930f712c9SDave Chinner if (bp_release) { 39030f712c9SDave Chinner bp_release = 0; 39130f712c9SDave Chinner xfs_trans_brelse(NULL, bp); 39230f712c9SDave Chinner } 39330f712c9SDave Chinner } 39430f712c9SDave Chinner 39530f712c9SDave Chinner /* 39630f712c9SDave Chinner * Here with bp and block set to the leftmost leaf node in the tree. 39730f712c9SDave Chinner */ 39830f712c9SDave Chinner i = 0; 39930f712c9SDave Chinner 40030f712c9SDave Chinner /* 40130f712c9SDave Chinner * Loop over all leaf nodes checking that all extents are in the right order. 40230f712c9SDave Chinner */ 40330f712c9SDave Chinner for (;;) { 40430f712c9SDave Chinner xfs_fsblock_t nextbno; 40530f712c9SDave Chinner xfs_extnum_t num_recs; 40630f712c9SDave Chinner 40730f712c9SDave Chinner 40830f712c9SDave Chinner num_recs = xfs_btree_get_numrecs(block); 40930f712c9SDave Chinner 41030f712c9SDave Chinner /* 41130f712c9SDave Chinner * Read-ahead the next leaf block, if any. 41230f712c9SDave Chinner */ 41330f712c9SDave Chinner 41430f712c9SDave Chinner nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib); 41530f712c9SDave Chinner 41630f712c9SDave Chinner /* 41730f712c9SDave Chinner * Check all the extents to make sure they are OK. 41830f712c9SDave Chinner * If we had a previous block, the last entry should 41930f712c9SDave Chinner * conform with the first entry in this one. 42030f712c9SDave Chinner */ 42130f712c9SDave Chinner 42230f712c9SDave Chinner ep = XFS_BMBT_REC_ADDR(mp, block, 1); 42330f712c9SDave Chinner if (i) { 42430f712c9SDave Chinner ASSERT(xfs_bmbt_disk_get_startoff(&last) + 42530f712c9SDave Chinner xfs_bmbt_disk_get_blockcount(&last) <= 42630f712c9SDave Chinner xfs_bmbt_disk_get_startoff(ep)); 42730f712c9SDave Chinner } 42830f712c9SDave Chinner for (j = 1; j < num_recs; j++) { 42930f712c9SDave Chinner nextp = XFS_BMBT_REC_ADDR(mp, block, j + 1); 43030f712c9SDave Chinner ASSERT(xfs_bmbt_disk_get_startoff(ep) + 43130f712c9SDave Chinner xfs_bmbt_disk_get_blockcount(ep) <= 43230f712c9SDave Chinner xfs_bmbt_disk_get_startoff(nextp)); 43330f712c9SDave Chinner ep = nextp; 43430f712c9SDave Chinner } 43530f712c9SDave Chinner 43630f712c9SDave Chinner last = *ep; 43730f712c9SDave Chinner i += num_recs; 43830f712c9SDave Chinner if (bp_release) { 43930f712c9SDave Chinner bp_release = 0; 44030f712c9SDave Chinner xfs_trans_brelse(NULL, bp); 44130f712c9SDave Chinner } 44230f712c9SDave Chinner bno = nextbno; 44330f712c9SDave Chinner /* 44430f712c9SDave Chinner * If we've reached the end, stop. 44530f712c9SDave Chinner */ 44630f712c9SDave Chinner if (bno == NULLFSBLOCK) 44730f712c9SDave Chinner break; 44830f712c9SDave Chinner 44930f712c9SDave Chinner bp_release = 0; 45030f712c9SDave Chinner bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno)); 45130f712c9SDave Chinner if (!bp) { 45230f712c9SDave Chinner bp_release = 1; 453f5b999c0SEric Sandeen error = xfs_btree_read_bufl(mp, NULL, bno, &bp, 45430f712c9SDave Chinner XFS_BMAP_BTREE_REF, 45530f712c9SDave Chinner &xfs_bmbt_buf_ops); 45630f712c9SDave Chinner if (error) 45730f712c9SDave Chinner goto error_norelse; 45830f712c9SDave Chinner } 45930f712c9SDave Chinner block = XFS_BUF_TO_BLOCK(bp); 46030f712c9SDave Chinner } 461a5fd276bSLuis de Bethencourt 46230f712c9SDave Chinner return; 46330f712c9SDave Chinner 46430f712c9SDave Chinner error0: 46530f712c9SDave Chinner xfs_warn(mp, "%s: at error0", __func__); 46630f712c9SDave Chinner if (bp_release) 46730f712c9SDave Chinner xfs_trans_brelse(NULL, bp); 46830f712c9SDave Chinner error_norelse: 469755c38ffSChandan Babu R xfs_warn(mp, "%s: BAD after btree leaves for %llu extents", 47030f712c9SDave Chinner __func__, i); 471cec57256SDarrick J. Wong xfs_err(mp, "%s: CORRUPTED BTREE OR SOMETHING", __func__); 472cec57256SDarrick J. Wong xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 47330f712c9SDave Chinner return; 47430f712c9SDave Chinner } 47530f712c9SDave Chinner 47630f712c9SDave Chinner /* 47730f712c9SDave Chinner * Validate that the bmbt_irecs being returned from bmapi are valid 47830f712c9SDave Chinner * given the caller's original parameters. Specifically check the 47930f712c9SDave Chinner * ranges of the returned irecs to ensure that they only extend beyond 48030f712c9SDave Chinner * the given parameters if the XFS_BMAPI_ENTIRE flag was set. 48130f712c9SDave Chinner */ 48230f712c9SDave Chinner STATIC void 48330f712c9SDave Chinner xfs_bmap_validate_ret( 48430f712c9SDave Chinner xfs_fileoff_t bno, 48530f712c9SDave Chinner xfs_filblks_t len, 486e7d410acSDave Chinner uint32_t flags, 48730f712c9SDave Chinner xfs_bmbt_irec_t *mval, 48830f712c9SDave Chinner int nmap, 48930f712c9SDave Chinner int ret_nmap) 49030f712c9SDave Chinner { 49130f712c9SDave Chinner int i; /* index to map values */ 49230f712c9SDave Chinner 49330f712c9SDave Chinner ASSERT(ret_nmap <= nmap); 49430f712c9SDave Chinner 49530f712c9SDave Chinner for (i = 0; i < ret_nmap; i++) { 49630f712c9SDave Chinner ASSERT(mval[i].br_blockcount > 0); 49730f712c9SDave Chinner if (!(flags & XFS_BMAPI_ENTIRE)) { 49830f712c9SDave Chinner ASSERT(mval[i].br_startoff >= bno); 49930f712c9SDave Chinner ASSERT(mval[i].br_blockcount <= len); 50030f712c9SDave Chinner ASSERT(mval[i].br_startoff + mval[i].br_blockcount <= 50130f712c9SDave Chinner bno + len); 50230f712c9SDave Chinner } else { 50330f712c9SDave Chinner ASSERT(mval[i].br_startoff < bno + len); 50430f712c9SDave Chinner ASSERT(mval[i].br_startoff + mval[i].br_blockcount > 50530f712c9SDave Chinner bno); 50630f712c9SDave Chinner } 50730f712c9SDave Chinner ASSERT(i == 0 || 50830f712c9SDave Chinner mval[i - 1].br_startoff + mval[i - 1].br_blockcount == 50930f712c9SDave Chinner mval[i].br_startoff); 51030f712c9SDave Chinner ASSERT(mval[i].br_startblock != DELAYSTARTBLOCK && 51130f712c9SDave Chinner mval[i].br_startblock != HOLESTARTBLOCK); 51230f712c9SDave Chinner ASSERT(mval[i].br_state == XFS_EXT_NORM || 51330f712c9SDave Chinner mval[i].br_state == XFS_EXT_UNWRITTEN); 51430f712c9SDave Chinner } 51530f712c9SDave Chinner } 51630f712c9SDave Chinner 51730f712c9SDave Chinner #else 51830f712c9SDave Chinner #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0) 5197bf7a193SDarrick J. Wong #define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do { } while (0) 52030f712c9SDave Chinner #endif /* DEBUG */ 52130f712c9SDave Chinner 52230f712c9SDave Chinner /* 52330f712c9SDave Chinner * Inode fork format manipulation functions 52430f712c9SDave Chinner */ 52530f712c9SDave Chinner 52630f712c9SDave Chinner /* 527b101e334SChristoph Hellwig * Convert the inode format to extent format if it currently is in btree format, 528b101e334SChristoph Hellwig * but the extent list is small enough that it fits into the extent format. 529b101e334SChristoph Hellwig * 530b101e334SChristoph Hellwig * Since the extents are already in-core, all we have to do is give up the space 531b101e334SChristoph Hellwig * for the btree root and pitch the leaf block. 53230f712c9SDave Chinner */ 53330f712c9SDave Chinner STATIC int /* error */ 53430f712c9SDave Chinner xfs_bmap_btree_to_extents( 535b101e334SChristoph Hellwig struct xfs_trans *tp, /* transaction pointer */ 536b101e334SChristoph Hellwig struct xfs_inode *ip, /* incore inode pointer */ 537b101e334SChristoph Hellwig struct xfs_btree_cur *cur, /* btree cursor */ 53830f712c9SDave Chinner int *logflagsp, /* inode logging flags */ 53930f712c9SDave Chinner int whichfork) /* data or attr fork */ 54030f712c9SDave Chinner { 541732436efSDarrick J. Wong struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 542b101e334SChristoph Hellwig struct xfs_mount *mp = ip->i_mount; 543b101e334SChristoph Hellwig struct xfs_btree_block *rblock = ifp->if_broot; 54430f712c9SDave Chinner struct xfs_btree_block *cblock;/* child btree block */ 54530f712c9SDave Chinner xfs_fsblock_t cbno; /* child block number */ 546e8222613SDave Chinner struct xfs_buf *cbp; /* child block's buffer */ 54730f712c9SDave Chinner int error; /* error return value */ 54830f712c9SDave Chinner __be64 *pp; /* ptr to block address */ 549340785ccSDarrick J. Wong struct xfs_owner_info oinfo; 55030f712c9SDave Chinner 551b101e334SChristoph Hellwig /* check if we actually need the extent format first: */ 552b101e334SChristoph Hellwig if (!xfs_bmap_wants_extents(ip, whichfork)) 553b101e334SChristoph Hellwig return 0; 554b101e334SChristoph Hellwig 555b101e334SChristoph Hellwig ASSERT(cur); 55660b4984fSDarrick J. Wong ASSERT(whichfork != XFS_COW_FORK); 557f7e67b20SChristoph Hellwig ASSERT(ifp->if_format == XFS_DINODE_FMT_BTREE); 55830f712c9SDave Chinner ASSERT(be16_to_cpu(rblock->bb_level) == 1); 55930f712c9SDave Chinner ASSERT(be16_to_cpu(rblock->bb_numrecs) == 1); 56030f712c9SDave Chinner ASSERT(xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0) == 1); 561b101e334SChristoph Hellwig 56230f712c9SDave Chinner pp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, ifp->if_broot_bytes); 56330f712c9SDave Chinner cbno = be64_to_cpu(*pp); 56430f712c9SDave Chinner #ifdef DEBUG 565f9e03706SDarrick J. Wong if (XFS_IS_CORRUPT(cur->bc_mp, !xfs_btree_check_lptr(cur, cbno, 1))) 566f9e03706SDarrick J. Wong return -EFSCORRUPTED; 56730f712c9SDave Chinner #endif 568f5b999c0SEric Sandeen error = xfs_btree_read_bufl(mp, tp, cbno, &cbp, XFS_BMAP_BTREE_REF, 56930f712c9SDave Chinner &xfs_bmbt_buf_ops); 57030f712c9SDave Chinner if (error) 57130f712c9SDave Chinner return error; 57230f712c9SDave Chinner cblock = XFS_BUF_TO_BLOCK(cbp); 57330f712c9SDave Chinner if ((error = xfs_btree_check_block(cur, cblock, 0, cbp))) 57430f712c9SDave Chinner return error; 575340785ccSDarrick J. Wong xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork); 576c201d9caSDarrick J. Wong xfs_free_extent_later(cur->bc_tp, cbno, 1, &oinfo); 5776e73a545SChristoph Hellwig ip->i_nblocks--; 57830f712c9SDave Chinner xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L); 57930f712c9SDave Chinner xfs_trans_binval(tp, cbp); 5806ca444cfSDarrick J. Wong if (cur->bc_levels[0].bp == cbp) 5816ca444cfSDarrick J. Wong cur->bc_levels[0].bp = NULL; 58230f712c9SDave Chinner xfs_iroot_realloc(ip, -1, whichfork); 58330f712c9SDave Chinner ASSERT(ifp->if_broot == NULL); 584f7e67b20SChristoph Hellwig ifp->if_format = XFS_DINODE_FMT_EXTENTS; 585b101e334SChristoph Hellwig *logflagsp |= XFS_ILOG_CORE | xfs_ilog_fext(whichfork); 58630f712c9SDave Chinner return 0; 58730f712c9SDave Chinner } 58830f712c9SDave Chinner 58930f712c9SDave Chinner /* 59030f712c9SDave Chinner * Convert an extents-format file into a btree-format file. 59130f712c9SDave Chinner * The new file will have a root block (in the inode) and a single child block. 59230f712c9SDave Chinner */ 59330f712c9SDave Chinner STATIC int /* error */ 59430f712c9SDave Chinner xfs_bmap_extents_to_btree( 59581ba8f3eSBrian Foster struct xfs_trans *tp, /* transaction pointer */ 59681ba8f3eSBrian Foster struct xfs_inode *ip, /* incore inode pointer */ 59781ba8f3eSBrian Foster struct xfs_btree_cur **curp, /* cursor returned to caller */ 59830f712c9SDave Chinner int wasdel, /* converting a delayed alloc */ 59930f712c9SDave Chinner int *logflagsp, /* inode logging flags */ 60030f712c9SDave Chinner int whichfork) /* data or attr fork */ 60130f712c9SDave Chinner { 60230f712c9SDave Chinner struct xfs_btree_block *ablock; /* allocated (child) bt block */ 60381ba8f3eSBrian Foster struct xfs_buf *abp; /* buffer for ablock */ 60481ba8f3eSBrian Foster struct xfs_alloc_arg args; /* allocation arguments */ 60581ba8f3eSBrian Foster struct xfs_bmbt_rec *arp; /* child record pointer */ 60630f712c9SDave Chinner struct xfs_btree_block *block; /* btree root block */ 60781ba8f3eSBrian Foster struct xfs_btree_cur *cur; /* bmap btree cursor */ 60830f712c9SDave Chinner int error; /* error return value */ 60981ba8f3eSBrian Foster struct xfs_ifork *ifp; /* inode fork pointer */ 61081ba8f3eSBrian Foster struct xfs_bmbt_key *kp; /* root block key pointer */ 61181ba8f3eSBrian Foster struct xfs_mount *mp; /* mount structure */ 61230f712c9SDave Chinner xfs_bmbt_ptr_t *pp; /* root block address pointer */ 613b2b1712aSChristoph Hellwig struct xfs_iext_cursor icur; 614906abed5SChristoph Hellwig struct xfs_bmbt_irec rec; 615b2b1712aSChristoph Hellwig xfs_extnum_t cnt = 0; 61630f712c9SDave Chinner 61730f712c9SDave Chinner mp = ip->i_mount; 61860b4984fSDarrick J. Wong ASSERT(whichfork != XFS_COW_FORK); 619732436efSDarrick J. Wong ifp = xfs_ifork_ptr(ip, whichfork); 620f7e67b20SChristoph Hellwig ASSERT(ifp->if_format == XFS_DINODE_FMT_EXTENTS); 62130f712c9SDave Chinner 62230f712c9SDave Chinner /* 623e55ec4ddSDave Chinner * Make space in the inode incore. This needs to be undone if we fail 624e55ec4ddSDave Chinner * to expand the root. 62530f712c9SDave Chinner */ 62630f712c9SDave Chinner xfs_iroot_realloc(ip, 1, whichfork); 62730f712c9SDave Chinner 62830f712c9SDave Chinner /* 62930f712c9SDave Chinner * Fill in the root. 63030f712c9SDave Chinner */ 63130f712c9SDave Chinner block = ifp->if_broot; 63230f712c9SDave Chinner xfs_btree_init_block_int(mp, block, XFS_BUF_DADDR_NULL, 633b6f41e44SEric Sandeen XFS_BTNUM_BMAP, 1, 1, ip->i_ino, 634f88ae46bSEric Sandeen XFS_BTREE_LONG_PTRS); 63530f712c9SDave Chinner /* 63630f712c9SDave Chinner * Need a cursor. Can't allocate until bb_level is filled in. 63730f712c9SDave Chinner */ 63830f712c9SDave Chinner cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 6398ef54797SDave Chinner cur->bc_ino.flags = wasdel ? XFS_BTCUR_BMBT_WASDEL : 0; 64030f712c9SDave Chinner /* 64130f712c9SDave Chinner * Convert to a btree with two levels, one record in root. 64230f712c9SDave Chinner */ 643f7e67b20SChristoph Hellwig ifp->if_format = XFS_DINODE_FMT_BTREE; 64430f712c9SDave Chinner memset(&args, 0, sizeof(args)); 64530f712c9SDave Chinner args.tp = tp; 64630f712c9SDave Chinner args.mp = mp; 647340785ccSDarrick J. Wong xfs_rmap_ino_bmbt_owner(&args.oinfo, ip->i_ino, whichfork); 64836b6ad2dSDave Chinner 64930f712c9SDave Chinner args.minlen = args.maxlen = args.prod = 1; 65030f712c9SDave Chinner args.wasdel = wasdel; 65130f712c9SDave Chinner *logflagsp = 0; 6522a7f6d41SDave Chinner error = xfs_alloc_vextent_start_ag(&args, 6532a7f6d41SDave Chinner XFS_INO_TO_FSB(mp, ip->i_ino)); 654e55ec4ddSDave Chinner if (error) 655e55ec4ddSDave Chinner goto out_root_realloc; 65690e2056dSDarrick J. Wong 65736b6ad2dSDave Chinner /* 65836b6ad2dSDave Chinner * Allocation can't fail, the space was reserved. 65936b6ad2dSDave Chinner */ 6602fcc319dSChristoph Hellwig if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) { 66101239d77SShan Hai error = -ENOSPC; 662e55ec4ddSDave Chinner goto out_root_realloc; 6632fcc319dSChristoph Hellwig } 664e55ec4ddSDave Chinner 66592219c29SDave Chinner cur->bc_ino.allocated++; 6666e73a545SChristoph Hellwig ip->i_nblocks++; 66730f712c9SDave Chinner xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L); 668ee647f85SDarrick J. Wong error = xfs_trans_get_buf(tp, mp->m_ddev_targp, 669ee647f85SDarrick J. Wong XFS_FSB_TO_DADDR(mp, args.fsbno), 670ee647f85SDarrick J. Wong mp->m_bsize, 0, &abp); 671ee647f85SDarrick J. Wong if (error) 672e55ec4ddSDave Chinner goto out_unreserve_dquot; 673e55ec4ddSDave Chinner 67430f712c9SDave Chinner /* 67530f712c9SDave Chinner * Fill in the child block. 67630f712c9SDave Chinner */ 67730f712c9SDave Chinner abp->b_ops = &xfs_bmbt_buf_ops; 67830f712c9SDave Chinner ablock = XFS_BUF_TO_BLOCK(abp); 6799343ee76SDave Chinner xfs_btree_init_block_int(mp, ablock, xfs_buf_daddr(abp), 680b6f41e44SEric Sandeen XFS_BTNUM_BMAP, 0, 0, ip->i_ino, 68130f712c9SDave Chinner XFS_BTREE_LONG_PTRS); 68230f712c9SDave Chinner 683b2b1712aSChristoph Hellwig for_each_xfs_iext(ifp, &icur, &rec) { 684906abed5SChristoph Hellwig if (isnullstartblock(rec.br_startblock)) 685906abed5SChristoph Hellwig continue; 686906abed5SChristoph Hellwig arp = XFS_BMBT_REC_ADDR(mp, ablock, 1 + cnt); 687906abed5SChristoph Hellwig xfs_bmbt_disk_set_all(arp, &rec); 688906abed5SChristoph Hellwig cnt++; 68930f712c9SDave Chinner } 690daf83964SChristoph Hellwig ASSERT(cnt == ifp->if_nextents); 69130f712c9SDave Chinner xfs_btree_set_numrecs(ablock, cnt); 69230f712c9SDave Chinner 69330f712c9SDave Chinner /* 69430f712c9SDave Chinner * Fill in the root key and pointer. 69530f712c9SDave Chinner */ 69630f712c9SDave Chinner kp = XFS_BMBT_KEY_ADDR(mp, block, 1); 69730f712c9SDave Chinner arp = XFS_BMBT_REC_ADDR(mp, ablock, 1); 69830f712c9SDave Chinner kp->br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(arp)); 69930f712c9SDave Chinner pp = XFS_BMBT_PTR_ADDR(mp, block, 1, xfs_bmbt_get_maxrecs(cur, 70030f712c9SDave Chinner be16_to_cpu(block->bb_level))); 70130f712c9SDave Chinner *pp = cpu_to_be64(args.fsbno); 70230f712c9SDave Chinner 70330f712c9SDave Chinner /* 70430f712c9SDave Chinner * Do all this logging at the end so that 70530f712c9SDave Chinner * the root is at the right level. 70630f712c9SDave Chinner */ 70730f712c9SDave Chinner xfs_btree_log_block(cur, abp, XFS_BB_ALL_BITS); 70830f712c9SDave Chinner xfs_btree_log_recs(cur, abp, 1, be16_to_cpu(ablock->bb_numrecs)); 70930f712c9SDave Chinner ASSERT(*curp == NULL); 71030f712c9SDave Chinner *curp = cur; 71130f712c9SDave Chinner *logflagsp = XFS_ILOG_CORE | xfs_ilog_fbroot(whichfork); 71230f712c9SDave Chinner return 0; 71301239d77SShan Hai 714e55ec4ddSDave Chinner out_unreserve_dquot: 71501239d77SShan Hai xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L); 716e55ec4ddSDave Chinner out_root_realloc: 71701239d77SShan Hai xfs_iroot_realloc(ip, -1, whichfork); 718f7e67b20SChristoph Hellwig ifp->if_format = XFS_DINODE_FMT_EXTENTS; 719e55ec4ddSDave Chinner ASSERT(ifp->if_broot == NULL); 72001239d77SShan Hai xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 72101239d77SShan Hai 72201239d77SShan Hai return error; 72330f712c9SDave Chinner } 72430f712c9SDave Chinner 72530f712c9SDave Chinner /* 72630f712c9SDave Chinner * Convert a local file to an extents file. 72730f712c9SDave Chinner * This code is out of bounds for data forks of regular files, 72830f712c9SDave Chinner * since the file data needs to get logged so things will stay consistent. 72930f712c9SDave Chinner * (The bmap-level manipulations are ok, though). 73030f712c9SDave Chinner */ 73130f712c9SDave Chinner void 73230f712c9SDave Chinner xfs_bmap_local_to_extents_empty( 733aeea4b75SBrian Foster struct xfs_trans *tp, 73430f712c9SDave Chinner struct xfs_inode *ip, 73530f712c9SDave Chinner int whichfork) 73630f712c9SDave Chinner { 737732436efSDarrick J. Wong struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 73830f712c9SDave Chinner 73960b4984fSDarrick J. Wong ASSERT(whichfork != XFS_COW_FORK); 740f7e67b20SChristoph Hellwig ASSERT(ifp->if_format == XFS_DINODE_FMT_LOCAL); 74130f712c9SDave Chinner ASSERT(ifp->if_bytes == 0); 742daf83964SChristoph Hellwig ASSERT(ifp->if_nextents == 0); 74330f712c9SDave Chinner 74430f712c9SDave Chinner xfs_bmap_forkoff_reset(ip, whichfork); 7456bdcf26aSChristoph Hellwig ifp->if_u1.if_root = NULL; 7466bdcf26aSChristoph Hellwig ifp->if_height = 0; 747f7e67b20SChristoph Hellwig ifp->if_format = XFS_DINODE_FMT_EXTENTS; 748aeea4b75SBrian Foster xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 74930f712c9SDave Chinner } 75030f712c9SDave Chinner 75130f712c9SDave Chinner 75230f712c9SDave Chinner STATIC int /* error */ 75330f712c9SDave Chinner xfs_bmap_local_to_extents( 75430f712c9SDave Chinner xfs_trans_t *tp, /* transaction pointer */ 75530f712c9SDave Chinner xfs_inode_t *ip, /* incore inode pointer */ 75630f712c9SDave Chinner xfs_extlen_t total, /* total blocks needed by transaction */ 75730f712c9SDave Chinner int *logflagsp, /* inode logging flags */ 75830f712c9SDave Chinner int whichfork, 75930f712c9SDave Chinner void (*init_fn)(struct xfs_trans *tp, 76030f712c9SDave Chinner struct xfs_buf *bp, 76130f712c9SDave Chinner struct xfs_inode *ip, 76230f712c9SDave Chinner struct xfs_ifork *ifp)) 76330f712c9SDave Chinner { 76430f712c9SDave Chinner int error = 0; 76530f712c9SDave Chinner int flags; /* logging flags returned */ 7663ba738dfSChristoph Hellwig struct xfs_ifork *ifp; /* inode fork pointer */ 76730f712c9SDave Chinner xfs_alloc_arg_t args; /* allocation arguments */ 768e8222613SDave Chinner struct xfs_buf *bp; /* buffer for extent block */ 76950bb44c2SChristoph Hellwig struct xfs_bmbt_irec rec; 770b2b1712aSChristoph Hellwig struct xfs_iext_cursor icur; 77130f712c9SDave Chinner 77230f712c9SDave Chinner /* 77330f712c9SDave Chinner * We don't want to deal with the case of keeping inode data inline yet. 77430f712c9SDave Chinner * So sending the data fork of a regular inode is invalid. 77530f712c9SDave Chinner */ 776c19b3b05SDave Chinner ASSERT(!(S_ISREG(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK)); 777732436efSDarrick J. Wong ifp = xfs_ifork_ptr(ip, whichfork); 778f7e67b20SChristoph Hellwig ASSERT(ifp->if_format == XFS_DINODE_FMT_LOCAL); 77930f712c9SDave Chinner 78030f712c9SDave Chinner if (!ifp->if_bytes) { 781aeea4b75SBrian Foster xfs_bmap_local_to_extents_empty(tp, ip, whichfork); 78230f712c9SDave Chinner flags = XFS_ILOG_CORE; 78330f712c9SDave Chinner goto done; 78430f712c9SDave Chinner } 78530f712c9SDave Chinner 78630f712c9SDave Chinner flags = 0; 78730f712c9SDave Chinner error = 0; 78830f712c9SDave Chinner memset(&args, 0, sizeof(args)); 78930f712c9SDave Chinner args.tp = tp; 79030f712c9SDave Chinner args.mp = ip->i_mount; 79174c36a86SDave Chinner args.total = total; 79274c36a86SDave Chinner args.minlen = args.maxlen = args.prod = 1; 793340785ccSDarrick J. Wong xfs_rmap_ino_owner(&args.oinfo, ip->i_ino, whichfork, 0); 7942a7f6d41SDave Chinner 79530f712c9SDave Chinner /* 79630f712c9SDave Chinner * Allocate a block. We know we need only one, since the 79730f712c9SDave Chinner * file currently fits in an inode. 79830f712c9SDave Chinner */ 79930f712c9SDave Chinner args.total = total; 80030f712c9SDave Chinner args.minlen = args.maxlen = args.prod = 1; 8012a7f6d41SDave Chinner error = xfs_alloc_vextent_start_ag(&args, 8022a7f6d41SDave Chinner XFS_INO_TO_FSB(args.mp, ip->i_ino)); 80330f712c9SDave Chinner if (error) 80430f712c9SDave Chinner goto done; 80530f712c9SDave Chinner 80630f712c9SDave Chinner /* Can't fail, the space was reserved. */ 80730f712c9SDave Chinner ASSERT(args.fsbno != NULLFSBLOCK); 80830f712c9SDave Chinner ASSERT(args.len == 1); 809ee647f85SDarrick J. Wong error = xfs_trans_get_buf(tp, args.mp->m_ddev_targp, 810ee647f85SDarrick J. Wong XFS_FSB_TO_DADDR(args.mp, args.fsbno), 811ee647f85SDarrick J. Wong args.mp->m_bsize, 0, &bp); 812ee647f85SDarrick J. Wong if (error) 813ee647f85SDarrick J. Wong goto done; 81430f712c9SDave Chinner 815fe22d552SDave Chinner /* 816b7cdc66bSBrian Foster * Initialize the block, copy the data and log the remote buffer. 817fe22d552SDave Chinner * 818b7cdc66bSBrian Foster * The callout is responsible for logging because the remote format 819b7cdc66bSBrian Foster * might differ from the local format and thus we don't know how much to 820b7cdc66bSBrian Foster * log here. Note that init_fn must also set the buffer log item type 821b7cdc66bSBrian Foster * correctly. 822fe22d552SDave Chinner */ 82330f712c9SDave Chinner init_fn(tp, bp, ip, ifp); 82430f712c9SDave Chinner 825b7cdc66bSBrian Foster /* account for the change in fork size */ 82630f712c9SDave Chinner xfs_idata_realloc(ip, -ifp->if_bytes, whichfork); 827aeea4b75SBrian Foster xfs_bmap_local_to_extents_empty(tp, ip, whichfork); 82830f712c9SDave Chinner flags |= XFS_ILOG_CORE; 82930f712c9SDave Chinner 8306bdcf26aSChristoph Hellwig ifp->if_u1.if_root = NULL; 8316bdcf26aSChristoph Hellwig ifp->if_height = 0; 8326bdcf26aSChristoph Hellwig 83350bb44c2SChristoph Hellwig rec.br_startoff = 0; 83450bb44c2SChristoph Hellwig rec.br_startblock = args.fsbno; 83550bb44c2SChristoph Hellwig rec.br_blockcount = 1; 83650bb44c2SChristoph Hellwig rec.br_state = XFS_EXT_NORM; 837b2b1712aSChristoph Hellwig xfs_iext_first(ifp, &icur); 8380254c2f2SChristoph Hellwig xfs_iext_insert(ip, &icur, &rec, 0); 83950bb44c2SChristoph Hellwig 840daf83964SChristoph Hellwig ifp->if_nextents = 1; 8416e73a545SChristoph Hellwig ip->i_nblocks = 1; 84236b6ad2dSDave Chinner xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L); 84330f712c9SDave Chinner flags |= xfs_ilog_fext(whichfork); 84430f712c9SDave Chinner 84530f712c9SDave Chinner done: 84630f712c9SDave Chinner *logflagsp = flags; 84730f712c9SDave Chinner return error; 84830f712c9SDave Chinner } 84930f712c9SDave Chinner 85030f712c9SDave Chinner /* 85130f712c9SDave Chinner * Called from xfs_bmap_add_attrfork to handle btree format files. 85230f712c9SDave Chinner */ 85330f712c9SDave Chinner STATIC int /* error */ 85430f712c9SDave Chinner xfs_bmap_add_attrfork_btree( 85530f712c9SDave Chinner xfs_trans_t *tp, /* transaction pointer */ 85630f712c9SDave Chinner xfs_inode_t *ip, /* incore inode pointer */ 85730f712c9SDave Chinner int *flags) /* inode logging flags */ 85830f712c9SDave Chinner { 859b6785e27SChandan Babu R struct xfs_btree_block *block = ip->i_df.if_broot; 860ae127f08SDarrick J. Wong struct xfs_btree_cur *cur; /* btree cursor */ 86130f712c9SDave Chinner int error; /* error return value */ 86230f712c9SDave Chinner xfs_mount_t *mp; /* file system mount struct */ 86330f712c9SDave Chinner int stat; /* newroot status */ 86430f712c9SDave Chinner 86530f712c9SDave Chinner mp = ip->i_mount; 866b6785e27SChandan Babu R 867c01147d9SDarrick J. Wong if (XFS_BMAP_BMDR_SPACE(block) <= xfs_inode_data_fork_size(ip)) 86830f712c9SDave Chinner *flags |= XFS_ILOG_DBROOT; 86930f712c9SDave Chinner else { 87030f712c9SDave Chinner cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK); 871b5cfbc22SChristoph Hellwig error = xfs_bmbt_lookup_first(cur, &stat); 872b5cfbc22SChristoph Hellwig if (error) 87330f712c9SDave Chinner goto error0; 87430f712c9SDave Chinner /* must be at least one entry */ 875f9e03706SDarrick J. Wong if (XFS_IS_CORRUPT(mp, stat != 1)) { 876f9e03706SDarrick J. Wong error = -EFSCORRUPTED; 877f9e03706SDarrick J. Wong goto error0; 878f9e03706SDarrick J. Wong } 87930f712c9SDave Chinner if ((error = xfs_btree_new_iroot(cur, flags, &stat))) 88030f712c9SDave Chinner goto error0; 88130f712c9SDave Chinner if (stat == 0) { 88230f712c9SDave Chinner xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 8832451337dSDave Chinner return -ENOSPC; 88430f712c9SDave Chinner } 88592219c29SDave Chinner cur->bc_ino.allocated = 0; 88630f712c9SDave Chinner xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 88730f712c9SDave Chinner } 88830f712c9SDave Chinner return 0; 88930f712c9SDave Chinner error0: 89030f712c9SDave Chinner xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 89130f712c9SDave Chinner return error; 89230f712c9SDave Chinner } 89330f712c9SDave Chinner 89430f712c9SDave Chinner /* 89530f712c9SDave Chinner * Called from xfs_bmap_add_attrfork to handle extents format files. 89630f712c9SDave Chinner */ 89730f712c9SDave Chinner STATIC int /* error */ 89830f712c9SDave Chinner xfs_bmap_add_attrfork_extents( 89981ba8f3eSBrian Foster struct xfs_trans *tp, /* transaction pointer */ 90081ba8f3eSBrian Foster struct xfs_inode *ip, /* incore inode pointer */ 90130f712c9SDave Chinner int *flags) /* inode logging flags */ 90230f712c9SDave Chinner { 903ae127f08SDarrick J. Wong struct xfs_btree_cur *cur; /* bmap btree cursor */ 90430f712c9SDave Chinner int error; /* error return value */ 90530f712c9SDave Chinner 906daf83964SChristoph Hellwig if (ip->i_df.if_nextents * sizeof(struct xfs_bmbt_rec) <= 907c01147d9SDarrick J. Wong xfs_inode_data_fork_size(ip)) 90830f712c9SDave Chinner return 0; 90930f712c9SDave Chinner cur = NULL; 910280253d2SBrian Foster error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0, flags, 911280253d2SBrian Foster XFS_DATA_FORK); 91230f712c9SDave Chinner if (cur) { 91392219c29SDave Chinner cur->bc_ino.allocated = 0; 9140b04b6b8SDarrick J. Wong xfs_btree_del_cursor(cur, error); 91530f712c9SDave Chinner } 91630f712c9SDave Chinner return error; 91730f712c9SDave Chinner } 91830f712c9SDave Chinner 91930f712c9SDave Chinner /* 92030f712c9SDave Chinner * Called from xfs_bmap_add_attrfork to handle local format files. Each 92130f712c9SDave Chinner * different data fork content type needs a different callout to do the 92230f712c9SDave Chinner * conversion. Some are basic and only require special block initialisation 92330f712c9SDave Chinner * callouts for the data formating, others (directories) are so specialised they 92430f712c9SDave Chinner * handle everything themselves. 92530f712c9SDave Chinner * 92630f712c9SDave Chinner * XXX (dgc): investigate whether directory conversion can use the generic 92730f712c9SDave Chinner * formatting callout. It should be possible - it's just a very complex 92830f712c9SDave Chinner * formatter. 92930f712c9SDave Chinner */ 93030f712c9SDave Chinner STATIC int /* error */ 93130f712c9SDave Chinner xfs_bmap_add_attrfork_local( 932825d75cdSBrian Foster struct xfs_trans *tp, /* transaction pointer */ 933825d75cdSBrian Foster struct xfs_inode *ip, /* incore inode pointer */ 93430f712c9SDave Chinner int *flags) /* inode logging flags */ 93530f712c9SDave Chinner { 936825d75cdSBrian Foster struct xfs_da_args dargs; /* args for dir/attr code */ 93730f712c9SDave Chinner 938c01147d9SDarrick J. Wong if (ip->i_df.if_bytes <= xfs_inode_data_fork_size(ip)) 93930f712c9SDave Chinner return 0; 94030f712c9SDave Chinner 941c19b3b05SDave Chinner if (S_ISDIR(VFS_I(ip)->i_mode)) { 94230f712c9SDave Chinner memset(&dargs, 0, sizeof(dargs)); 94330f712c9SDave Chinner dargs.geo = ip->i_mount->m_dir_geo; 94430f712c9SDave Chinner dargs.dp = ip; 94530f712c9SDave Chinner dargs.total = dargs.geo->fsbcount; 94630f712c9SDave Chinner dargs.whichfork = XFS_DATA_FORK; 94730f712c9SDave Chinner dargs.trans = tp; 94830f712c9SDave Chinner return xfs_dir2_sf_to_block(&dargs); 94930f712c9SDave Chinner } 95030f712c9SDave Chinner 951c19b3b05SDave Chinner if (S_ISLNK(VFS_I(ip)->i_mode)) 952280253d2SBrian Foster return xfs_bmap_local_to_extents(tp, ip, 1, flags, 953280253d2SBrian Foster XFS_DATA_FORK, 95430f712c9SDave Chinner xfs_symlink_local_to_remote); 95530f712c9SDave Chinner 95630f712c9SDave Chinner /* should only be called for types that support local format data */ 95730f712c9SDave Chinner ASSERT(0); 9582451337dSDave Chinner return -EFSCORRUPTED; 95930f712c9SDave Chinner } 96030f712c9SDave Chinner 961e6a688c3SDave Chinner /* 962e6a688c3SDave Chinner * Set an inode attr fork offset based on the format of the data fork. 963e6a688c3SDave Chinner */ 9645a981e4eSChristoph Hellwig static int 9652f3cd809SAllison Henderson xfs_bmap_set_attrforkoff( 9662f3cd809SAllison Henderson struct xfs_inode *ip, 9672f3cd809SAllison Henderson int size, 9682f3cd809SAllison Henderson int *version) 9692f3cd809SAllison Henderson { 970683ec9baSDave Chinner int default_size = xfs_default_attroffset(ip) >> 3; 971683ec9baSDave Chinner 972f7e67b20SChristoph Hellwig switch (ip->i_df.if_format) { 9732f3cd809SAllison Henderson case XFS_DINODE_FMT_DEV: 974683ec9baSDave Chinner ip->i_forkoff = default_size; 9752f3cd809SAllison Henderson break; 9762f3cd809SAllison Henderson case XFS_DINODE_FMT_LOCAL: 9772f3cd809SAllison Henderson case XFS_DINODE_FMT_EXTENTS: 9782f3cd809SAllison Henderson case XFS_DINODE_FMT_BTREE: 9797821ea30SChristoph Hellwig ip->i_forkoff = xfs_attr_shortform_bytesfit(ip, size); 9807821ea30SChristoph Hellwig if (!ip->i_forkoff) 981683ec9baSDave Chinner ip->i_forkoff = default_size; 9820560f31aSDave Chinner else if (xfs_has_attr2(ip->i_mount) && version) 9832f3cd809SAllison Henderson *version = 2; 9842f3cd809SAllison Henderson break; 9852f3cd809SAllison Henderson default: 9862f3cd809SAllison Henderson ASSERT(0); 9872f3cd809SAllison Henderson return -EINVAL; 9882f3cd809SAllison Henderson } 9892f3cd809SAllison Henderson 9902f3cd809SAllison Henderson return 0; 9912f3cd809SAllison Henderson } 9922f3cd809SAllison Henderson 99330f712c9SDave Chinner /* 99430f712c9SDave Chinner * Convert inode from non-attributed to attributed. 99530f712c9SDave Chinner * Must not be in a transaction, ip must not be locked. 99630f712c9SDave Chinner */ 99730f712c9SDave Chinner int /* error code */ 99830f712c9SDave Chinner xfs_bmap_add_attrfork( 99930f712c9SDave Chinner xfs_inode_t *ip, /* incore inode pointer */ 100030f712c9SDave Chinner int size, /* space new attribute needs */ 100130f712c9SDave Chinner int rsvd) /* xact may use reserved blks */ 100230f712c9SDave Chinner { 100330f712c9SDave Chinner xfs_mount_t *mp; /* mount structure */ 100430f712c9SDave Chinner xfs_trans_t *tp; /* transaction pointer */ 100530f712c9SDave Chinner int blks; /* space reservation */ 100630f712c9SDave Chinner int version = 1; /* superblock attr version */ 100730f712c9SDave Chinner int logflags; /* logging flags */ 100830f712c9SDave Chinner int error; /* error return value */ 100930f712c9SDave Chinner 1010932b42c6SDarrick J. Wong ASSERT(xfs_inode_has_attr_fork(ip) == 0); 101130f712c9SDave Chinner 101230f712c9SDave Chinner mp = ip->i_mount; 101330f712c9SDave Chinner ASSERT(!XFS_NOT_DQATTACHED(mp, ip)); 1014253f4911SChristoph Hellwig 101530f712c9SDave Chinner blks = XFS_ADDAFORK_SPACE_RES(mp); 1016253f4911SChristoph Hellwig 10173de4eb10SDarrick J. Wong error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_addafork, blks, 0, 10183a1af6c3SDarrick J. Wong rsvd, &tp); 1019253f4911SChristoph Hellwig if (error) 102030f712c9SDave Chinner return error; 1021932b42c6SDarrick J. Wong if (xfs_inode_has_attr_fork(ip)) 102230f712c9SDave Chinner goto trans_cancel; 102330f712c9SDave Chinner 102430f712c9SDave Chinner xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 10252f3cd809SAllison Henderson error = xfs_bmap_set_attrforkoff(ip, size, &version); 10262f3cd809SAllison Henderson if (error) 102730f712c9SDave Chinner goto trans_cancel; 102832a2b11fSCarlos Maiolino 10292ed5b09bSDarrick J. Wong xfs_ifork_init_attr(ip, XFS_DINODE_FMT_EXTENTS, 0); 103030f712c9SDave Chinner logflags = 0; 1031f7e67b20SChristoph Hellwig switch (ip->i_df.if_format) { 103230f712c9SDave Chinner case XFS_DINODE_FMT_LOCAL: 1033825d75cdSBrian Foster error = xfs_bmap_add_attrfork_local(tp, ip, &logflags); 103430f712c9SDave Chinner break; 103530f712c9SDave Chinner case XFS_DINODE_FMT_EXTENTS: 1036825d75cdSBrian Foster error = xfs_bmap_add_attrfork_extents(tp, ip, &logflags); 103730f712c9SDave Chinner break; 103830f712c9SDave Chinner case XFS_DINODE_FMT_BTREE: 1039825d75cdSBrian Foster error = xfs_bmap_add_attrfork_btree(tp, ip, &logflags); 104030f712c9SDave Chinner break; 104130f712c9SDave Chinner default: 104230f712c9SDave Chinner error = 0; 104330f712c9SDave Chinner break; 104430f712c9SDave Chinner } 104530f712c9SDave Chinner if (logflags) 104630f712c9SDave Chinner xfs_trans_log_inode(tp, ip, logflags); 104730f712c9SDave Chinner if (error) 1048c8eac49eSBrian Foster goto trans_cancel; 104938c26bfdSDave Chinner if (!xfs_has_attr(mp) || 105038c26bfdSDave Chinner (!xfs_has_attr2(mp) && version == 2)) { 105161e63ecbSDave Chinner bool log_sb = false; 105230f712c9SDave Chinner 105330f712c9SDave Chinner spin_lock(&mp->m_sb_lock); 105438c26bfdSDave Chinner if (!xfs_has_attr(mp)) { 105538c26bfdSDave Chinner xfs_add_attr(mp); 105661e63ecbSDave Chinner log_sb = true; 105730f712c9SDave Chinner } 105838c26bfdSDave Chinner if (!xfs_has_attr2(mp) && version == 2) { 105938c26bfdSDave Chinner xfs_add_attr2(mp); 106061e63ecbSDave Chinner log_sb = true; 106130f712c9SDave Chinner } 106230f712c9SDave Chinner spin_unlock(&mp->m_sb_lock); 106361e63ecbSDave Chinner if (log_sb) 106461e63ecbSDave Chinner xfs_log_sb(tp); 106530f712c9SDave Chinner } 106630f712c9SDave Chinner 106770393313SChristoph Hellwig error = xfs_trans_commit(tp); 106830f712c9SDave Chinner xfs_iunlock(ip, XFS_ILOCK_EXCL); 106930f712c9SDave Chinner return error; 107030f712c9SDave Chinner 107130f712c9SDave Chinner trans_cancel: 10724906e215SChristoph Hellwig xfs_trans_cancel(tp); 107330f712c9SDave Chinner xfs_iunlock(ip, XFS_ILOCK_EXCL); 107430f712c9SDave Chinner return error; 107530f712c9SDave Chinner } 107630f712c9SDave Chinner 107730f712c9SDave Chinner /* 107830f712c9SDave Chinner * Internal and external extent tree search functions. 107930f712c9SDave Chinner */ 108030f712c9SDave Chinner 1081e992ae8aSDarrick J. Wong struct xfs_iread_state { 1082e992ae8aSDarrick J. Wong struct xfs_iext_cursor icur; 1083e992ae8aSDarrick J. Wong xfs_extnum_t loaded; 1084e992ae8aSDarrick J. Wong }; 1085e992ae8aSDarrick J. Wong 1086e992ae8aSDarrick J. Wong /* Stuff every bmbt record from this block into the incore extent map. */ 1087e992ae8aSDarrick J. Wong static int 1088e992ae8aSDarrick J. Wong xfs_iread_bmbt_block( 1089e992ae8aSDarrick J. Wong struct xfs_btree_cur *cur, 1090e992ae8aSDarrick J. Wong int level, 1091e992ae8aSDarrick J. Wong void *priv) 1092e992ae8aSDarrick J. Wong { 1093e992ae8aSDarrick J. Wong struct xfs_iread_state *ir = priv; 1094e992ae8aSDarrick J. Wong struct xfs_mount *mp = cur->bc_mp; 109592219c29SDave Chinner struct xfs_inode *ip = cur->bc_ino.ip; 1096e992ae8aSDarrick J. Wong struct xfs_btree_block *block; 1097e992ae8aSDarrick J. Wong struct xfs_buf *bp; 1098e992ae8aSDarrick J. Wong struct xfs_bmbt_rec *frp; 1099e992ae8aSDarrick J. Wong xfs_extnum_t num_recs; 1100e992ae8aSDarrick J. Wong xfs_extnum_t j; 110192219c29SDave Chinner int whichfork = cur->bc_ino.whichfork; 1102732436efSDarrick J. Wong struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 1103e992ae8aSDarrick J. Wong 1104e992ae8aSDarrick J. Wong block = xfs_btree_get_block(cur, level, &bp); 1105e992ae8aSDarrick J. Wong 1106e992ae8aSDarrick J. Wong /* Abort if we find more records than nextents. */ 1107e992ae8aSDarrick J. Wong num_recs = xfs_btree_get_numrecs(block); 1108daf83964SChristoph Hellwig if (unlikely(ir->loaded + num_recs > ifp->if_nextents)) { 1109e992ae8aSDarrick J. Wong xfs_warn(ip->i_mount, "corrupt dinode %llu, (btree extents).", 1110e992ae8aSDarrick J. Wong (unsigned long long)ip->i_ino); 1111e992ae8aSDarrick J. Wong xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__, block, 1112e992ae8aSDarrick J. Wong sizeof(*block), __this_address); 1113e992ae8aSDarrick J. Wong return -EFSCORRUPTED; 1114e992ae8aSDarrick J. Wong } 1115e992ae8aSDarrick J. Wong 1116e992ae8aSDarrick J. Wong /* Copy records into the incore cache. */ 1117e992ae8aSDarrick J. Wong frp = XFS_BMBT_REC_ADDR(mp, block, 1); 1118e992ae8aSDarrick J. Wong for (j = 0; j < num_recs; j++, frp++, ir->loaded++) { 1119e992ae8aSDarrick J. Wong struct xfs_bmbt_irec new; 1120e992ae8aSDarrick J. Wong xfs_failaddr_t fa; 1121e992ae8aSDarrick J. Wong 1122e992ae8aSDarrick J. Wong xfs_bmbt_disk_get_all(frp, &new); 1123e992ae8aSDarrick J. Wong fa = xfs_bmap_validate_extent(ip, whichfork, &new); 1124e992ae8aSDarrick J. Wong if (fa) { 1125e992ae8aSDarrick J. Wong xfs_inode_verifier_error(ip, -EFSCORRUPTED, 1126e992ae8aSDarrick J. Wong "xfs_iread_extents(2)", frp, 1127e992ae8aSDarrick J. Wong sizeof(*frp), fa); 1128e992ae8aSDarrick J. Wong return -EFSCORRUPTED; 1129e992ae8aSDarrick J. Wong } 1130e992ae8aSDarrick J. Wong xfs_iext_insert(ip, &ir->icur, &new, 1131e992ae8aSDarrick J. Wong xfs_bmap_fork_to_state(whichfork)); 1132e992ae8aSDarrick J. Wong trace_xfs_read_extent(ip, &ir->icur, 1133e992ae8aSDarrick J. Wong xfs_bmap_fork_to_state(whichfork), _THIS_IP_); 1134daf83964SChristoph Hellwig xfs_iext_next(ifp, &ir->icur); 1135e992ae8aSDarrick J. Wong } 1136e992ae8aSDarrick J. Wong 1137e992ae8aSDarrick J. Wong return 0; 1138e992ae8aSDarrick J. Wong } 1139e992ae8aSDarrick J. Wong 114030f712c9SDave Chinner /* 1141211e95bbSChristoph Hellwig * Read in extents from a btree-format inode. 114230f712c9SDave Chinner */ 1143211e95bbSChristoph Hellwig int 1144211e95bbSChristoph Hellwig xfs_iread_extents( 1145211e95bbSChristoph Hellwig struct xfs_trans *tp, 1146211e95bbSChristoph Hellwig struct xfs_inode *ip, 1147211e95bbSChristoph Hellwig int whichfork) 114830f712c9SDave Chinner { 1149e992ae8aSDarrick J. Wong struct xfs_iread_state ir; 1150732436efSDarrick J. Wong struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 1151e992ae8aSDarrick J. Wong struct xfs_mount *mp = ip->i_mount; 1152e992ae8aSDarrick J. Wong struct xfs_btree_cur *cur; 1153211e95bbSChristoph Hellwig int error; 115430f712c9SDave Chinner 1155b2197a36SChristoph Hellwig if (!xfs_need_iread_extents(ifp)) 1156862a804aSChristoph Hellwig return 0; 1157862a804aSChristoph Hellwig 1158211e95bbSChristoph Hellwig ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 1159211e95bbSChristoph Hellwig 1160e992ae8aSDarrick J. Wong ir.loaded = 0; 1161e992ae8aSDarrick J. Wong xfs_iext_first(ifp, &ir.icur); 1162e992ae8aSDarrick J. Wong cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 1163e992ae8aSDarrick J. Wong error = xfs_btree_visit_blocks(cur, xfs_iread_bmbt_block, 1164e992ae8aSDarrick J. Wong XFS_BTREE_VISIT_RECORDS, &ir); 1165e992ae8aSDarrick J. Wong xfs_btree_del_cursor(cur, error); 1166e992ae8aSDarrick J. Wong if (error) 1167e992ae8aSDarrick J. Wong goto out; 1168e992ae8aSDarrick J. Wong 1169daf83964SChristoph Hellwig if (XFS_IS_CORRUPT(mp, ir.loaded != ifp->if_nextents)) { 1170211e95bbSChristoph Hellwig error = -EFSCORRUPTED; 1171211e95bbSChristoph Hellwig goto out; 1172211e95bbSChristoph Hellwig } 1173e992ae8aSDarrick J. Wong ASSERT(ir.loaded == xfs_iext_count(ifp)); 117430f712c9SDave Chinner return 0; 1175211e95bbSChristoph Hellwig out: 1176211e95bbSChristoph Hellwig xfs_iext_destroy(ifp); 1177211e95bbSChristoph Hellwig return error; 117830f712c9SDave Chinner } 117930f712c9SDave Chinner 118030f712c9SDave Chinner /* 118129b3e94aSChristoph Hellwig * Returns the relative block number of the first unused block(s) in the given 118229b3e94aSChristoph Hellwig * fork with at least "len" logically contiguous blocks free. This is the 118329b3e94aSChristoph Hellwig * lowest-address hole if the fork has holes, else the first block past the end 118429b3e94aSChristoph Hellwig * of fork. Return 0 if the fork is currently local (in-inode). 118530f712c9SDave Chinner */ 118630f712c9SDave Chinner int /* error */ 118730f712c9SDave Chinner xfs_bmap_first_unused( 118829b3e94aSChristoph Hellwig struct xfs_trans *tp, /* transaction pointer */ 118929b3e94aSChristoph Hellwig struct xfs_inode *ip, /* incore inode */ 119030f712c9SDave Chinner xfs_extlen_t len, /* size of hole to find */ 119130f712c9SDave Chinner xfs_fileoff_t *first_unused, /* unused block */ 119230f712c9SDave Chinner int whichfork) /* data or attr fork */ 119330f712c9SDave Chinner { 1194732436efSDarrick J. Wong struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 119529b3e94aSChristoph Hellwig struct xfs_bmbt_irec got; 1196b2b1712aSChristoph Hellwig struct xfs_iext_cursor icur; 119729b3e94aSChristoph Hellwig xfs_fileoff_t lastaddr = 0; 119829b3e94aSChristoph Hellwig xfs_fileoff_t lowest, max; 119929b3e94aSChristoph Hellwig int error; 120030f712c9SDave Chinner 1201f7e67b20SChristoph Hellwig if (ifp->if_format == XFS_DINODE_FMT_LOCAL) { 120230f712c9SDave Chinner *first_unused = 0; 120330f712c9SDave Chinner return 0; 120430f712c9SDave Chinner } 120529b3e94aSChristoph Hellwig 1206f7e67b20SChristoph Hellwig ASSERT(xfs_ifork_has_extents(ifp)); 1207f7e67b20SChristoph Hellwig 120829b3e94aSChristoph Hellwig error = xfs_iread_extents(tp, ip, whichfork); 120929b3e94aSChristoph Hellwig if (error) 121030f712c9SDave Chinner return error; 1211f2285c14SChristoph Hellwig 121229b3e94aSChristoph Hellwig lowest = max = *first_unused; 1213b2b1712aSChristoph Hellwig for_each_xfs_iext(ifp, &icur, &got) { 121430f712c9SDave Chinner /* 121530f712c9SDave Chinner * See if the hole before this extent will work. 121630f712c9SDave Chinner */ 1217f2285c14SChristoph Hellwig if (got.br_startoff >= lowest + len && 121829b3e94aSChristoph Hellwig got.br_startoff - max >= len) 121929b3e94aSChristoph Hellwig break; 1220f2285c14SChristoph Hellwig lastaddr = got.br_startoff + got.br_blockcount; 122130f712c9SDave Chinner max = XFS_FILEOFF_MAX(lastaddr, lowest); 122230f712c9SDave Chinner } 122329b3e94aSChristoph Hellwig 122430f712c9SDave Chinner *first_unused = max; 122530f712c9SDave Chinner return 0; 122630f712c9SDave Chinner } 122730f712c9SDave Chinner 122830f712c9SDave Chinner /* 122930f712c9SDave Chinner * Returns the file-relative block number of the last block - 1 before 123030f712c9SDave Chinner * last_block (input value) in the file. 123130f712c9SDave Chinner * This is not based on i_size, it is based on the extent records. 123230f712c9SDave Chinner * Returns 0 for local files, as they do not have extent records. 123330f712c9SDave Chinner */ 123430f712c9SDave Chinner int /* error */ 123530f712c9SDave Chinner xfs_bmap_last_before( 123686685f7bSChristoph Hellwig struct xfs_trans *tp, /* transaction pointer */ 123786685f7bSChristoph Hellwig struct xfs_inode *ip, /* incore inode */ 123830f712c9SDave Chinner xfs_fileoff_t *last_block, /* last block */ 123930f712c9SDave Chinner int whichfork) /* data or attr fork */ 124030f712c9SDave Chinner { 1241732436efSDarrick J. Wong struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 124286685f7bSChristoph Hellwig struct xfs_bmbt_irec got; 1243b2b1712aSChristoph Hellwig struct xfs_iext_cursor icur; 124486685f7bSChristoph Hellwig int error; 124530f712c9SDave Chinner 1246f7e67b20SChristoph Hellwig switch (ifp->if_format) { 124786685f7bSChristoph Hellwig case XFS_DINODE_FMT_LOCAL: 124830f712c9SDave Chinner *last_block = 0; 124930f712c9SDave Chinner return 0; 125086685f7bSChristoph Hellwig case XFS_DINODE_FMT_BTREE: 125186685f7bSChristoph Hellwig case XFS_DINODE_FMT_EXTENTS: 125286685f7bSChristoph Hellwig break; 125386685f7bSChristoph Hellwig default: 1254a5155b87SDarrick J. Wong ASSERT(0); 1255c2414ad6SDarrick J. Wong return -EFSCORRUPTED; 125630f712c9SDave Chinner } 125786685f7bSChristoph Hellwig 125886685f7bSChristoph Hellwig error = xfs_iread_extents(tp, ip, whichfork); 125986685f7bSChristoph Hellwig if (error) 126030f712c9SDave Chinner return error; 126186685f7bSChristoph Hellwig 1262b2b1712aSChristoph Hellwig if (!xfs_iext_lookup_extent_before(ip, ifp, last_block, &icur, &got)) 126386685f7bSChristoph Hellwig *last_block = 0; 126430f712c9SDave Chinner return 0; 126530f712c9SDave Chinner } 126630f712c9SDave Chinner 126730f712c9SDave Chinner int 126830f712c9SDave Chinner xfs_bmap_last_extent( 126930f712c9SDave Chinner struct xfs_trans *tp, 127030f712c9SDave Chinner struct xfs_inode *ip, 127130f712c9SDave Chinner int whichfork, 127230f712c9SDave Chinner struct xfs_bmbt_irec *rec, 127330f712c9SDave Chinner int *is_empty) 127430f712c9SDave Chinner { 1275732436efSDarrick J. Wong struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 1276b2b1712aSChristoph Hellwig struct xfs_iext_cursor icur; 127730f712c9SDave Chinner int error; 127830f712c9SDave Chinner 127930f712c9SDave Chinner error = xfs_iread_extents(tp, ip, whichfork); 128030f712c9SDave Chinner if (error) 128130f712c9SDave Chinner return error; 128230f712c9SDave Chinner 1283b2b1712aSChristoph Hellwig xfs_iext_last(ifp, &icur); 1284b2b1712aSChristoph Hellwig if (!xfs_iext_get_extent(ifp, &icur, rec)) 128530f712c9SDave Chinner *is_empty = 1; 1286b2b1712aSChristoph Hellwig else 128730f712c9SDave Chinner *is_empty = 0; 128830f712c9SDave Chinner return 0; 128930f712c9SDave Chinner } 129030f712c9SDave Chinner 129130f712c9SDave Chinner /* 129230f712c9SDave Chinner * Check the last inode extent to determine whether this allocation will result 129330f712c9SDave Chinner * in blocks being allocated at the end of the file. When we allocate new data 129430f712c9SDave Chinner * blocks at the end of the file which do not start at the previous data block, 129530f712c9SDave Chinner * we will try to align the new blocks at stripe unit boundaries. 129630f712c9SDave Chinner * 129730f712c9SDave Chinner * Returns 1 in bma->aeof if the file (fork) is empty as any new write will be 129830f712c9SDave Chinner * at, or past the EOF. 129930f712c9SDave Chinner */ 130030f712c9SDave Chinner STATIC int 130130f712c9SDave Chinner xfs_bmap_isaeof( 130230f712c9SDave Chinner struct xfs_bmalloca *bma, 130330f712c9SDave Chinner int whichfork) 130430f712c9SDave Chinner { 130530f712c9SDave Chinner struct xfs_bmbt_irec rec; 130630f712c9SDave Chinner int is_empty; 130730f712c9SDave Chinner int error; 130830f712c9SDave Chinner 1309749f24f3SThomas Meyer bma->aeof = false; 131030f712c9SDave Chinner error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec, 131130f712c9SDave Chinner &is_empty); 131230f712c9SDave Chinner if (error) 131330f712c9SDave Chinner return error; 131430f712c9SDave Chinner 131530f712c9SDave Chinner if (is_empty) { 1316749f24f3SThomas Meyer bma->aeof = true; 131730f712c9SDave Chinner return 0; 131830f712c9SDave Chinner } 131930f712c9SDave Chinner 132030f712c9SDave Chinner /* 132130f712c9SDave Chinner * Check if we are allocation or past the last extent, or at least into 132230f712c9SDave Chinner * the last delayed allocated extent. 132330f712c9SDave Chinner */ 132430f712c9SDave Chinner bma->aeof = bma->offset >= rec.br_startoff + rec.br_blockcount || 132530f712c9SDave Chinner (bma->offset >= rec.br_startoff && 132630f712c9SDave Chinner isnullstartblock(rec.br_startblock)); 132730f712c9SDave Chinner return 0; 132830f712c9SDave Chinner } 132930f712c9SDave Chinner 133030f712c9SDave Chinner /* 133130f712c9SDave Chinner * Returns the file-relative block number of the first block past eof in 133230f712c9SDave Chinner * the file. This is not based on i_size, it is based on the extent records. 133330f712c9SDave Chinner * Returns 0 for local files, as they do not have extent records. 133430f712c9SDave Chinner */ 133530f712c9SDave Chinner int 133630f712c9SDave Chinner xfs_bmap_last_offset( 133730f712c9SDave Chinner struct xfs_inode *ip, 133830f712c9SDave Chinner xfs_fileoff_t *last_block, 133930f712c9SDave Chinner int whichfork) 134030f712c9SDave Chinner { 1341732436efSDarrick J. Wong struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 134230f712c9SDave Chinner struct xfs_bmbt_irec rec; 134330f712c9SDave Chinner int is_empty; 134430f712c9SDave Chinner int error; 134530f712c9SDave Chinner 134630f712c9SDave Chinner *last_block = 0; 134730f712c9SDave Chinner 1348f7e67b20SChristoph Hellwig if (ifp->if_format == XFS_DINODE_FMT_LOCAL) 134930f712c9SDave Chinner return 0; 135030f712c9SDave Chinner 1351f7e67b20SChristoph Hellwig if (XFS_IS_CORRUPT(ip->i_mount, !xfs_ifork_has_extents(ifp))) 1352c2414ad6SDarrick J. Wong return -EFSCORRUPTED; 135330f712c9SDave Chinner 135430f712c9SDave Chinner error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, &is_empty); 135530f712c9SDave Chinner if (error || is_empty) 135630f712c9SDave Chinner return error; 135730f712c9SDave Chinner 135830f712c9SDave Chinner *last_block = rec.br_startoff + rec.br_blockcount; 135930f712c9SDave Chinner return 0; 136030f712c9SDave Chinner } 136130f712c9SDave Chinner 136230f712c9SDave Chinner /* 136330f712c9SDave Chinner * Extent tree manipulation functions used during allocation. 136430f712c9SDave Chinner */ 136530f712c9SDave Chinner 136630f712c9SDave Chinner /* 136730f712c9SDave Chinner * Convert a delayed allocation to a real allocation. 136830f712c9SDave Chinner */ 136930f712c9SDave Chinner STATIC int /* error */ 137030f712c9SDave Chinner xfs_bmap_add_extent_delay_real( 137160b4984fSDarrick J. Wong struct xfs_bmalloca *bma, 137260b4984fSDarrick J. Wong int whichfork) 137330f712c9SDave Chinner { 1374daf83964SChristoph Hellwig struct xfs_mount *mp = bma->ip->i_mount; 1375732436efSDarrick J. Wong struct xfs_ifork *ifp = xfs_ifork_ptr(bma->ip, whichfork); 137630f712c9SDave Chinner struct xfs_bmbt_irec *new = &bma->got; 137730f712c9SDave Chinner int error; /* error return value */ 137830f712c9SDave Chinner int i; /* temp state */ 137930f712c9SDave Chinner xfs_fileoff_t new_endoff; /* end offset of new entry */ 138030f712c9SDave Chinner xfs_bmbt_irec_t r[3]; /* neighbor extent entries */ 138130f712c9SDave Chinner /* left is 0, right is 1, prev is 2 */ 138230f712c9SDave Chinner int rval=0; /* return value (logging flags) */ 13830e5b8e45SDave Chinner uint32_t state = xfs_bmap_fork_to_state(whichfork); 138430f712c9SDave Chinner xfs_filblks_t da_new; /* new count del alloc blocks used */ 138530f712c9SDave Chinner xfs_filblks_t da_old; /* old count del alloc blocks used */ 138630f712c9SDave Chinner xfs_filblks_t temp=0; /* value for da_new calculations */ 138730f712c9SDave Chinner int tmp_rval; /* partial logging flags */ 13884dcb8869SChristoph Hellwig struct xfs_bmbt_irec old; 138930f712c9SDave Chinner 139060b4984fSDarrick J. Wong ASSERT(whichfork != XFS_ATTR_FORK); 139130f712c9SDave Chinner ASSERT(!isnullstartblock(new->br_startblock)); 139230f712c9SDave Chinner ASSERT(!bma->cur || 13938ef54797SDave Chinner (bma->cur->bc_ino.flags & XFS_BTCUR_BMBT_WASDEL)); 139430f712c9SDave Chinner 1395ff6d6af2SBill O'Donnell XFS_STATS_INC(mp, xs_add_exlist); 139630f712c9SDave Chinner 139730f712c9SDave Chinner #define LEFT r[0] 139830f712c9SDave Chinner #define RIGHT r[1] 139930f712c9SDave Chinner #define PREV r[2] 140030f712c9SDave Chinner 140130f712c9SDave Chinner /* 140230f712c9SDave Chinner * Set up a bunch of variables to make the tests simpler. 140330f712c9SDave Chinner */ 1404b2b1712aSChristoph Hellwig xfs_iext_get_extent(ifp, &bma->icur, &PREV); 140530f712c9SDave Chinner new_endoff = new->br_startoff + new->br_blockcount; 14064dcb8869SChristoph Hellwig ASSERT(isnullstartblock(PREV.br_startblock)); 140730f712c9SDave Chinner ASSERT(PREV.br_startoff <= new->br_startoff); 140830f712c9SDave Chinner ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff); 140930f712c9SDave Chinner 141030f712c9SDave Chinner da_old = startblockval(PREV.br_startblock); 141130f712c9SDave Chinner da_new = 0; 141230f712c9SDave Chinner 141330f712c9SDave Chinner /* 141430f712c9SDave Chinner * Set flags determining what part of the previous delayed allocation 141530f712c9SDave Chinner * extent is being replaced by a real allocation. 141630f712c9SDave Chinner */ 141730f712c9SDave Chinner if (PREV.br_startoff == new->br_startoff) 141830f712c9SDave Chinner state |= BMAP_LEFT_FILLING; 141930f712c9SDave Chinner if (PREV.br_startoff + PREV.br_blockcount == new_endoff) 142030f712c9SDave Chinner state |= BMAP_RIGHT_FILLING; 142130f712c9SDave Chinner 142230f712c9SDave Chinner /* 142330f712c9SDave Chinner * Check and set flags if this segment has a left neighbor. 142430f712c9SDave Chinner * Don't set contiguous if the combined extent would be too large. 142530f712c9SDave Chinner */ 1426b2b1712aSChristoph Hellwig if (xfs_iext_peek_prev_extent(ifp, &bma->icur, &LEFT)) { 142730f712c9SDave Chinner state |= BMAP_LEFT_VALID; 142830f712c9SDave Chinner if (isnullstartblock(LEFT.br_startblock)) 142930f712c9SDave Chinner state |= BMAP_LEFT_DELAY; 143030f712c9SDave Chinner } 143130f712c9SDave Chinner 143230f712c9SDave Chinner if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) && 143330f712c9SDave Chinner LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff && 143430f712c9SDave Chinner LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock && 143530f712c9SDave Chinner LEFT.br_state == new->br_state && 143695f0b95eSChandan Babu R LEFT.br_blockcount + new->br_blockcount <= XFS_MAX_BMBT_EXTLEN) 143730f712c9SDave Chinner state |= BMAP_LEFT_CONTIG; 143830f712c9SDave Chinner 143930f712c9SDave Chinner /* 144030f712c9SDave Chinner * Check and set flags if this segment has a right neighbor. 144130f712c9SDave Chinner * Don't set contiguous if the combined extent would be too large. 144230f712c9SDave Chinner * Also check for all-three-contiguous being too large. 144330f712c9SDave Chinner */ 1444b2b1712aSChristoph Hellwig if (xfs_iext_peek_next_extent(ifp, &bma->icur, &RIGHT)) { 144530f712c9SDave Chinner state |= BMAP_RIGHT_VALID; 144630f712c9SDave Chinner if (isnullstartblock(RIGHT.br_startblock)) 144730f712c9SDave Chinner state |= BMAP_RIGHT_DELAY; 144830f712c9SDave Chinner } 144930f712c9SDave Chinner 145030f712c9SDave Chinner if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) && 145130f712c9SDave Chinner new_endoff == RIGHT.br_startoff && 145230f712c9SDave Chinner new->br_startblock + new->br_blockcount == RIGHT.br_startblock && 145330f712c9SDave Chinner new->br_state == RIGHT.br_state && 145495f0b95eSChandan Babu R new->br_blockcount + RIGHT.br_blockcount <= XFS_MAX_BMBT_EXTLEN && 145530f712c9SDave Chinner ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING | 145630f712c9SDave Chinner BMAP_RIGHT_FILLING)) != 145730f712c9SDave Chinner (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING | 145830f712c9SDave Chinner BMAP_RIGHT_FILLING) || 145930f712c9SDave Chinner LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount 146095f0b95eSChandan Babu R <= XFS_MAX_BMBT_EXTLEN)) 146130f712c9SDave Chinner state |= BMAP_RIGHT_CONTIG; 146230f712c9SDave Chinner 146330f712c9SDave Chinner error = 0; 146430f712c9SDave Chinner /* 146530f712c9SDave Chinner * Switch out based on the FILLING and CONTIG state bits. 146630f712c9SDave Chinner */ 146730f712c9SDave Chinner switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | 146830f712c9SDave Chinner BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) { 146930f712c9SDave Chinner case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | 147030f712c9SDave Chinner BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 147130f712c9SDave Chinner /* 147230f712c9SDave Chinner * Filling in all of a previously delayed allocation extent. 147330f712c9SDave Chinner * The left and right neighbors are both contiguous with new. 147430f712c9SDave Chinner */ 14754dcb8869SChristoph Hellwig LEFT.br_blockcount += PREV.br_blockcount + RIGHT.br_blockcount; 147630f712c9SDave Chinner 1477c38ccf59SChristoph Hellwig xfs_iext_remove(bma->ip, &bma->icur, state); 1478c38ccf59SChristoph Hellwig xfs_iext_remove(bma->ip, &bma->icur, state); 1479b2b1712aSChristoph Hellwig xfs_iext_prev(ifp, &bma->icur); 1480b2b1712aSChristoph Hellwig xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT); 1481daf83964SChristoph Hellwig ifp->if_nextents--; 14820d045540SChristoph Hellwig 148330f712c9SDave Chinner if (bma->cur == NULL) 148430f712c9SDave Chinner rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 148530f712c9SDave Chinner else { 148630f712c9SDave Chinner rval = XFS_ILOG_CORE; 1487e16cf9b0SChristoph Hellwig error = xfs_bmbt_lookup_eq(bma->cur, &RIGHT, &i); 148830f712c9SDave Chinner if (error) 148930f712c9SDave Chinner goto done; 1490f9e03706SDarrick J. Wong if (XFS_IS_CORRUPT(mp, i != 1)) { 1491f9e03706SDarrick J. Wong error = -EFSCORRUPTED; 1492f9e03706SDarrick J. Wong goto done; 1493f9e03706SDarrick J. Wong } 149430f712c9SDave Chinner error = xfs_btree_delete(bma->cur, &i); 149530f712c9SDave Chinner if (error) 149630f712c9SDave Chinner goto done; 1497f9e03706SDarrick J. Wong if (XFS_IS_CORRUPT(mp, i != 1)) { 1498f9e03706SDarrick J. Wong error = -EFSCORRUPTED; 1499f9e03706SDarrick J. Wong goto done; 1500f9e03706SDarrick J. Wong } 150130f712c9SDave Chinner error = xfs_btree_decrement(bma->cur, 0, &i); 150230f712c9SDave Chinner if (error) 150330f712c9SDave Chinner goto done; 1504f9e03706SDarrick J. Wong if (XFS_IS_CORRUPT(mp, i != 1)) { 1505f9e03706SDarrick J. Wong error = -EFSCORRUPTED; 1506f9e03706SDarrick J. Wong goto done; 1507f9e03706SDarrick J. Wong } 1508a67d00a5SChristoph Hellwig error = xfs_bmbt_update(bma->cur, &LEFT); 150930f712c9SDave Chinner if (error) 151030f712c9SDave Chinner goto done; 151130f712c9SDave Chinner } 151230f712c9SDave Chinner break; 151330f712c9SDave Chinner 151430f712c9SDave Chinner case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG: 151530f712c9SDave Chinner /* 151630f712c9SDave Chinner * Filling in all of a previously delayed allocation extent. 151730f712c9SDave Chinner * The left neighbor is contiguous, the right is not. 151830f712c9SDave Chinner */ 15194dcb8869SChristoph Hellwig old = LEFT; 15204dcb8869SChristoph Hellwig LEFT.br_blockcount += PREV.br_blockcount; 15210d045540SChristoph Hellwig 1522c38ccf59SChristoph Hellwig xfs_iext_remove(bma->ip, &bma->icur, state); 1523b2b1712aSChristoph Hellwig xfs_iext_prev(ifp, &bma->icur); 1524b2b1712aSChristoph Hellwig xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT); 152530f712c9SDave Chinner 152630f712c9SDave Chinner if (bma->cur == NULL) 152730f712c9SDave Chinner rval = XFS_ILOG_DEXT; 152830f712c9SDave Chinner else { 152930f712c9SDave Chinner rval = 0; 1530e16cf9b0SChristoph Hellwig error = xfs_bmbt_lookup_eq(bma->cur, &old, &i); 153130f712c9SDave Chinner if (error) 153230f712c9SDave Chinner goto done; 1533f9e03706SDarrick J. Wong if (XFS_IS_CORRUPT(mp, i != 1)) { 1534f9e03706SDarrick J. Wong error = -EFSCORRUPTED; 1535f9e03706SDarrick J. Wong goto done; 1536f9e03706SDarrick J. Wong } 1537a67d00a5SChristoph Hellwig error = xfs_bmbt_update(bma->cur, &LEFT); 153830f712c9SDave Chinner if (error) 153930f712c9SDave Chinner goto done; 154030f712c9SDave Chinner } 154130f712c9SDave Chinner break; 154230f712c9SDave Chinner 154330f712c9SDave Chinner case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 154430f712c9SDave Chinner /* 154530f712c9SDave Chinner * Filling in all of a previously delayed allocation extent. 15469230a0b6SDave Chinner * The right neighbor is contiguous, the left is not. Take care 15479230a0b6SDave Chinner * with delay -> unwritten extent allocation here because the 15489230a0b6SDave Chinner * delalloc record we are overwriting is always written. 154930f712c9SDave Chinner */ 15504dcb8869SChristoph Hellwig PREV.br_startblock = new->br_startblock; 15514dcb8869SChristoph Hellwig PREV.br_blockcount += RIGHT.br_blockcount; 15529230a0b6SDave Chinner PREV.br_state = new->br_state; 15530d045540SChristoph Hellwig 1554b2b1712aSChristoph Hellwig xfs_iext_next(ifp, &bma->icur); 1555c38ccf59SChristoph Hellwig xfs_iext_remove(bma->ip, &bma->icur, state); 1556b2b1712aSChristoph Hellwig xfs_iext_prev(ifp, &bma->icur); 1557b2b1712aSChristoph Hellwig xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV); 155830f712c9SDave Chinner 155930f712c9SDave Chinner if (bma->cur == NULL) 156030f712c9SDave Chinner rval = XFS_ILOG_DEXT; 156130f712c9SDave Chinner else { 156230f712c9SDave Chinner rval = 0; 1563e16cf9b0SChristoph Hellwig error = xfs_bmbt_lookup_eq(bma->cur, &RIGHT, &i); 156430f712c9SDave Chinner if (error) 156530f712c9SDave Chinner goto done; 1566f9e03706SDarrick J. Wong if (XFS_IS_CORRUPT(mp, i != 1)) { 1567f9e03706SDarrick J. Wong error = -EFSCORRUPTED; 1568f9e03706SDarrick J. Wong goto done; 1569f9e03706SDarrick J. Wong } 1570a67d00a5SChristoph Hellwig error = xfs_bmbt_update(bma->cur, &PREV); 157130f712c9SDave Chinner if (error) 157230f712c9SDave Chinner goto done; 157330f712c9SDave Chinner } 157430f712c9SDave Chinner break; 157530f712c9SDave Chinner 157630f712c9SDave Chinner case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING: 157730f712c9SDave Chinner /* 157830f712c9SDave Chinner * Filling in all of a previously delayed allocation extent. 157930f712c9SDave Chinner * Neither the left nor right neighbors are contiguous with 158030f712c9SDave Chinner * the new one. 158130f712c9SDave Chinner */ 15824dcb8869SChristoph Hellwig PREV.br_startblock = new->br_startblock; 15834dcb8869SChristoph Hellwig PREV.br_state = new->br_state; 1584b2b1712aSChristoph Hellwig xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV); 1585daf83964SChristoph Hellwig ifp->if_nextents++; 158630f712c9SDave Chinner 158730f712c9SDave Chinner if (bma->cur == NULL) 158830f712c9SDave Chinner rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 158930f712c9SDave Chinner else { 159030f712c9SDave Chinner rval = XFS_ILOG_CORE; 1591e16cf9b0SChristoph Hellwig error = xfs_bmbt_lookup_eq(bma->cur, new, &i); 159230f712c9SDave Chinner if (error) 159330f712c9SDave Chinner goto done; 1594f9e03706SDarrick J. Wong if (XFS_IS_CORRUPT(mp, i != 0)) { 1595f9e03706SDarrick J. Wong error = -EFSCORRUPTED; 1596f9e03706SDarrick J. Wong goto done; 1597f9e03706SDarrick J. Wong } 159830f712c9SDave Chinner error = xfs_btree_insert(bma->cur, &i); 159930f712c9SDave Chinner if (error) 160030f712c9SDave Chinner goto done; 1601f9e03706SDarrick J. Wong if (XFS_IS_CORRUPT(mp, i != 1)) { 1602f9e03706SDarrick J. Wong error = -EFSCORRUPTED; 1603f9e03706SDarrick J. Wong goto done; 1604f9e03706SDarrick J. Wong } 160530f712c9SDave Chinner } 160630f712c9SDave Chinner break; 160730f712c9SDave Chinner 160830f712c9SDave Chinner case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG: 160930f712c9SDave Chinner /* 161030f712c9SDave Chinner * Filling in the first part of a previous delayed allocation. 161130f712c9SDave Chinner * The left neighbor is contiguous. 161230f712c9SDave Chinner */ 16134dcb8869SChristoph Hellwig old = LEFT; 16144dcb8869SChristoph Hellwig temp = PREV.br_blockcount - new->br_blockcount; 16154dcb8869SChristoph Hellwig da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp), 16164dcb8869SChristoph Hellwig startblockval(PREV.br_startblock)); 16174dcb8869SChristoph Hellwig 16184dcb8869SChristoph Hellwig LEFT.br_blockcount += new->br_blockcount; 161930f712c9SDave Chinner 1620bf99971cSChristoph Hellwig PREV.br_blockcount = temp; 16214dcb8869SChristoph Hellwig PREV.br_startoff += new->br_blockcount; 16224dcb8869SChristoph Hellwig PREV.br_startblock = nullstartblock(da_new); 16230d045540SChristoph Hellwig 1624b2b1712aSChristoph Hellwig xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV); 1625b2b1712aSChristoph Hellwig xfs_iext_prev(ifp, &bma->icur); 1626b2b1712aSChristoph Hellwig xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT); 16274dcb8869SChristoph Hellwig 162830f712c9SDave Chinner if (bma->cur == NULL) 162930f712c9SDave Chinner rval = XFS_ILOG_DEXT; 163030f712c9SDave Chinner else { 163130f712c9SDave Chinner rval = 0; 1632e16cf9b0SChristoph Hellwig error = xfs_bmbt_lookup_eq(bma->cur, &old, &i); 163330f712c9SDave Chinner if (error) 163430f712c9SDave Chinner goto done; 1635f9e03706SDarrick J. Wong if (XFS_IS_CORRUPT(mp, i != 1)) { 1636f9e03706SDarrick J. Wong error = -EFSCORRUPTED; 1637f9e03706SDarrick J. Wong goto done; 1638f9e03706SDarrick J. Wong } 1639a67d00a5SChristoph Hellwig error = xfs_bmbt_update(bma->cur, &LEFT); 164030f712c9SDave Chinner if (error) 164130f712c9SDave Chinner goto done; 164230f712c9SDave Chinner } 164330f712c9SDave Chinner break; 164430f712c9SDave Chinner 164530f712c9SDave Chinner case BMAP_LEFT_FILLING: 164630f712c9SDave Chinner /* 164730f712c9SDave Chinner * Filling in the first part of a previous delayed allocation. 164830f712c9SDave Chinner * The left neighbor is not contiguous. 164930f712c9SDave Chinner */ 1650b2b1712aSChristoph Hellwig xfs_iext_update_extent(bma->ip, state, &bma->icur, new); 1651daf83964SChristoph Hellwig ifp->if_nextents++; 1652daf83964SChristoph Hellwig 165330f712c9SDave Chinner if (bma->cur == NULL) 165430f712c9SDave Chinner rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 165530f712c9SDave Chinner else { 165630f712c9SDave Chinner rval = XFS_ILOG_CORE; 1657e16cf9b0SChristoph Hellwig error = xfs_bmbt_lookup_eq(bma->cur, new, &i); 165830f712c9SDave Chinner if (error) 165930f712c9SDave Chinner goto done; 1660f9e03706SDarrick J. Wong if (XFS_IS_CORRUPT(mp, i != 0)) { 1661f9e03706SDarrick J. Wong error = -EFSCORRUPTED; 1662f9e03706SDarrick J. Wong goto done; 1663f9e03706SDarrick J. Wong } 166430f712c9SDave Chinner error = xfs_btree_insert(bma->cur, &i); 166530f712c9SDave Chinner if (error) 166630f712c9SDave Chinner goto done; 1667f9e03706SDarrick J. Wong if (XFS_IS_CORRUPT(mp, i != 1)) { 1668f9e03706SDarrick J. Wong error = -EFSCORRUPTED; 1669f9e03706SDarrick J. Wong goto done; 1670f9e03706SDarrick J. Wong } 167130f712c9SDave Chinner } 167230f712c9SDave Chinner 16736d3eb1ecSDarrick J. Wong if (xfs_bmap_needs_btree(bma->ip, whichfork)) { 167430f712c9SDave Chinner error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, 1675280253d2SBrian Foster &bma->cur, 1, &tmp_rval, whichfork); 167630f712c9SDave Chinner rval |= tmp_rval; 167730f712c9SDave Chinner if (error) 167830f712c9SDave Chinner goto done; 167930f712c9SDave Chinner } 16804dcb8869SChristoph Hellwig 16814dcb8869SChristoph Hellwig temp = PREV.br_blockcount - new->br_blockcount; 168230f712c9SDave Chinner da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp), 168330f712c9SDave Chinner startblockval(PREV.br_startblock) - 168492219c29SDave Chinner (bma->cur ? bma->cur->bc_ino.allocated : 0)); 16854dcb8869SChristoph Hellwig 16864dcb8869SChristoph Hellwig PREV.br_startoff = new_endoff; 16874dcb8869SChristoph Hellwig PREV.br_blockcount = temp; 16884dcb8869SChristoph Hellwig PREV.br_startblock = nullstartblock(da_new); 1689b2b1712aSChristoph Hellwig xfs_iext_next(ifp, &bma->icur); 16900254c2f2SChristoph Hellwig xfs_iext_insert(bma->ip, &bma->icur, &PREV, state); 1691b2b1712aSChristoph Hellwig xfs_iext_prev(ifp, &bma->icur); 169230f712c9SDave Chinner break; 169330f712c9SDave Chinner 169430f712c9SDave Chinner case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 169530f712c9SDave Chinner /* 169630f712c9SDave Chinner * Filling in the last part of a previous delayed allocation. 169730f712c9SDave Chinner * The right neighbor is contiguous with the new allocation. 169830f712c9SDave Chinner */ 16994dcb8869SChristoph Hellwig old = RIGHT; 17004dcb8869SChristoph Hellwig RIGHT.br_startoff = new->br_startoff; 17014dcb8869SChristoph Hellwig RIGHT.br_startblock = new->br_startblock; 17024dcb8869SChristoph Hellwig RIGHT.br_blockcount += new->br_blockcount; 17034dcb8869SChristoph Hellwig 170430f712c9SDave Chinner if (bma->cur == NULL) 170530f712c9SDave Chinner rval = XFS_ILOG_DEXT; 170630f712c9SDave Chinner else { 170730f712c9SDave Chinner rval = 0; 1708e16cf9b0SChristoph Hellwig error = xfs_bmbt_lookup_eq(bma->cur, &old, &i); 170930f712c9SDave Chinner if (error) 171030f712c9SDave Chinner goto done; 1711f9e03706SDarrick J. Wong if (XFS_IS_CORRUPT(mp, i != 1)) { 1712f9e03706SDarrick J. Wong error = -EFSCORRUPTED; 1713f9e03706SDarrick J. Wong goto done; 1714f9e03706SDarrick J. Wong } 1715a67d00a5SChristoph Hellwig error = xfs_bmbt_update(bma->cur, &RIGHT); 171630f712c9SDave Chinner if (error) 171730f712c9SDave Chinner goto done; 171830f712c9SDave Chinner } 171930f712c9SDave Chinner 17204dcb8869SChristoph Hellwig temp = PREV.br_blockcount - new->br_blockcount; 172130f712c9SDave Chinner da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp), 172230f712c9SDave Chinner startblockval(PREV.br_startblock)); 17234dcb8869SChristoph Hellwig 17244dcb8869SChristoph Hellwig PREV.br_blockcount = temp; 17254dcb8869SChristoph Hellwig PREV.br_startblock = nullstartblock(da_new); 172630f712c9SDave Chinner 1727b2b1712aSChristoph Hellwig xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV); 1728b2b1712aSChristoph Hellwig xfs_iext_next(ifp, &bma->icur); 1729b2b1712aSChristoph Hellwig xfs_iext_update_extent(bma->ip, state, &bma->icur, &RIGHT); 173030f712c9SDave Chinner break; 173130f712c9SDave Chinner 173230f712c9SDave Chinner case BMAP_RIGHT_FILLING: 173330f712c9SDave Chinner /* 173430f712c9SDave Chinner * Filling in the last part of a previous delayed allocation. 173530f712c9SDave Chinner * The right neighbor is not contiguous. 173630f712c9SDave Chinner */ 1737b2b1712aSChristoph Hellwig xfs_iext_update_extent(bma->ip, state, &bma->icur, new); 1738daf83964SChristoph Hellwig ifp->if_nextents++; 1739daf83964SChristoph Hellwig 174030f712c9SDave Chinner if (bma->cur == NULL) 174130f712c9SDave Chinner rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 174230f712c9SDave Chinner else { 174330f712c9SDave Chinner rval = XFS_ILOG_CORE; 1744e16cf9b0SChristoph Hellwig error = xfs_bmbt_lookup_eq(bma->cur, new, &i); 174530f712c9SDave Chinner if (error) 174630f712c9SDave Chinner goto done; 1747f9e03706SDarrick J. Wong if (XFS_IS_CORRUPT(mp, i != 0)) { 1748f9e03706SDarrick J. Wong error = -EFSCORRUPTED; 1749f9e03706SDarrick J. Wong goto done; 1750f9e03706SDarrick J. Wong } 175130f712c9SDave Chinner error = xfs_btree_insert(bma->cur, &i); 175230f712c9SDave Chinner if (error) 175330f712c9SDave Chinner goto done; 1754f9e03706SDarrick J. Wong if (XFS_IS_CORRUPT(mp, i != 1)) { 1755f9e03706SDarrick J. Wong error = -EFSCORRUPTED; 1756f9e03706SDarrick J. Wong goto done; 1757f9e03706SDarrick J. Wong } 175830f712c9SDave Chinner } 175930f712c9SDave Chinner 17606d3eb1ecSDarrick J. Wong if (xfs_bmap_needs_btree(bma->ip, whichfork)) { 176130f712c9SDave Chinner error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, 1762280253d2SBrian Foster &bma->cur, 1, &tmp_rval, whichfork); 176330f712c9SDave Chinner rval |= tmp_rval; 176430f712c9SDave Chinner if (error) 176530f712c9SDave Chinner goto done; 176630f712c9SDave Chinner } 17674dcb8869SChristoph Hellwig 17684dcb8869SChristoph Hellwig temp = PREV.br_blockcount - new->br_blockcount; 176930f712c9SDave Chinner da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp), 177030f712c9SDave Chinner startblockval(PREV.br_startblock) - 177192219c29SDave Chinner (bma->cur ? bma->cur->bc_ino.allocated : 0)); 17724dcb8869SChristoph Hellwig 17734dcb8869SChristoph Hellwig PREV.br_startblock = nullstartblock(da_new); 17744dcb8869SChristoph Hellwig PREV.br_blockcount = temp; 17750254c2f2SChristoph Hellwig xfs_iext_insert(bma->ip, &bma->icur, &PREV, state); 1776b2b1712aSChristoph Hellwig xfs_iext_next(ifp, &bma->icur); 177730f712c9SDave Chinner break; 177830f712c9SDave Chinner 177930f712c9SDave Chinner case 0: 178030f712c9SDave Chinner /* 178130f712c9SDave Chinner * Filling in the middle part of a previous delayed allocation. 178230f712c9SDave Chinner * Contiguity is impossible here. 178330f712c9SDave Chinner * This case is avoided almost all the time. 178430f712c9SDave Chinner * 178530f712c9SDave Chinner * We start with a delayed allocation: 178630f712c9SDave Chinner * 178730f712c9SDave Chinner * +ddddddddddddddddddddddddddddddddddddddddddddddddddddddd+ 178830f712c9SDave Chinner * PREV @ idx 178930f712c9SDave Chinner * 179030f712c9SDave Chinner * and we are allocating: 179130f712c9SDave Chinner * +rrrrrrrrrrrrrrrrr+ 179230f712c9SDave Chinner * new 179330f712c9SDave Chinner * 179430f712c9SDave Chinner * and we set it up for insertion as: 179530f712c9SDave Chinner * +ddddddddddddddddddd+rrrrrrrrrrrrrrrrr+ddddddddddddddddd+ 179630f712c9SDave Chinner * new 179730f712c9SDave Chinner * PREV @ idx LEFT RIGHT 179830f712c9SDave Chinner * inserted at idx + 1 179930f712c9SDave Chinner */ 18004dcb8869SChristoph Hellwig old = PREV; 18014dcb8869SChristoph Hellwig 18024dcb8869SChristoph Hellwig /* LEFT is the new middle */ 180330f712c9SDave Chinner LEFT = *new; 18044dcb8869SChristoph Hellwig 18054dcb8869SChristoph Hellwig /* RIGHT is the new right */ 180630f712c9SDave Chinner RIGHT.br_state = PREV.br_state; 180730f712c9SDave Chinner RIGHT.br_startoff = new_endoff; 18084dcb8869SChristoph Hellwig RIGHT.br_blockcount = 18094dcb8869SChristoph Hellwig PREV.br_startoff + PREV.br_blockcount - new_endoff; 18104dcb8869SChristoph Hellwig RIGHT.br_startblock = 18114dcb8869SChristoph Hellwig nullstartblock(xfs_bmap_worst_indlen(bma->ip, 18124dcb8869SChristoph Hellwig RIGHT.br_blockcount)); 18134dcb8869SChristoph Hellwig 18144dcb8869SChristoph Hellwig /* truncate PREV */ 18154dcb8869SChristoph Hellwig PREV.br_blockcount = new->br_startoff - PREV.br_startoff; 18164dcb8869SChristoph Hellwig PREV.br_startblock = 18174dcb8869SChristoph Hellwig nullstartblock(xfs_bmap_worst_indlen(bma->ip, 18184dcb8869SChristoph Hellwig PREV.br_blockcount)); 1819b2b1712aSChristoph Hellwig xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV); 18204dcb8869SChristoph Hellwig 1821b2b1712aSChristoph Hellwig xfs_iext_next(ifp, &bma->icur); 18220254c2f2SChristoph Hellwig xfs_iext_insert(bma->ip, &bma->icur, &RIGHT, state); 18230254c2f2SChristoph Hellwig xfs_iext_insert(bma->ip, &bma->icur, &LEFT, state); 1824daf83964SChristoph Hellwig ifp->if_nextents++; 18254dcb8869SChristoph Hellwig 182630f712c9SDave Chinner if (bma->cur == NULL) 182730f712c9SDave Chinner rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 182830f712c9SDave Chinner else { 182930f712c9SDave Chinner rval = XFS_ILOG_CORE; 1830e16cf9b0SChristoph Hellwig error = xfs_bmbt_lookup_eq(bma->cur, new, &i); 183130f712c9SDave Chinner if (error) 183230f712c9SDave Chinner goto done; 1833f9e03706SDarrick J. Wong if (XFS_IS_CORRUPT(mp, i != 0)) { 1834f9e03706SDarrick J. Wong error = -EFSCORRUPTED; 1835f9e03706SDarrick J. Wong goto done; 1836f9e03706SDarrick J. Wong } 183730f712c9SDave Chinner error = xfs_btree_insert(bma->cur, &i); 183830f712c9SDave Chinner if (error) 183930f712c9SDave Chinner goto done; 1840f9e03706SDarrick J. Wong if (XFS_IS_CORRUPT(mp, i != 1)) { 1841f9e03706SDarrick J. Wong error = -EFSCORRUPTED; 1842f9e03706SDarrick J. Wong goto done; 1843f9e03706SDarrick J. Wong } 184430f712c9SDave Chinner } 184530f712c9SDave Chinner 18466d3eb1ecSDarrick J. Wong if (xfs_bmap_needs_btree(bma->ip, whichfork)) { 184730f712c9SDave Chinner error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, 1848280253d2SBrian Foster &bma->cur, 1, &tmp_rval, whichfork); 184930f712c9SDave Chinner rval |= tmp_rval; 185030f712c9SDave Chinner if (error) 185130f712c9SDave Chinner goto done; 185230f712c9SDave Chinner } 18534dcb8869SChristoph Hellwig 18544dcb8869SChristoph Hellwig da_new = startblockval(PREV.br_startblock) + 18554dcb8869SChristoph Hellwig startblockval(RIGHT.br_startblock); 185630f712c9SDave Chinner break; 185730f712c9SDave Chinner 185830f712c9SDave Chinner case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 185930f712c9SDave Chinner case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 186030f712c9SDave Chinner case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG: 186130f712c9SDave Chinner case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG: 186230f712c9SDave Chinner case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 186330f712c9SDave Chinner case BMAP_LEFT_CONTIG: 186430f712c9SDave Chinner case BMAP_RIGHT_CONTIG: 186530f712c9SDave Chinner /* 186630f712c9SDave Chinner * These cases are all impossible. 186730f712c9SDave Chinner */ 186830f712c9SDave Chinner ASSERT(0); 186930f712c9SDave Chinner } 187030f712c9SDave Chinner 187195eb308cSDarrick J. Wong /* add reverse mapping unless caller opted out */ 1872bc46ac64SDarrick J. Wong if (!(bma->flags & XFS_BMAPI_NORMAP)) 1873bc46ac64SDarrick J. Wong xfs_rmap_map_extent(bma->tp, bma->ip, whichfork, new); 18749c194644SDarrick J. Wong 187530f712c9SDave Chinner /* convert to a btree if necessary */ 18766d3eb1ecSDarrick J. Wong if (xfs_bmap_needs_btree(bma->ip, whichfork)) { 187730f712c9SDave Chinner int tmp_logflags; /* partial log flag return val */ 187830f712c9SDave Chinner 187930f712c9SDave Chinner ASSERT(bma->cur == NULL); 188030f712c9SDave Chinner error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, 1881280253d2SBrian Foster &bma->cur, da_old > 0, &tmp_logflags, 1882280253d2SBrian Foster whichfork); 188330f712c9SDave Chinner bma->logflags |= tmp_logflags; 188430f712c9SDave Chinner if (error) 188530f712c9SDave Chinner goto done; 188630f712c9SDave Chinner } 188730f712c9SDave Chinner 18889fe82b8cSDarrick J. Wong if (da_new != da_old) 18899fe82b8cSDarrick J. Wong xfs_mod_delalloc(mp, (int64_t)da_new - da_old); 18909fe82b8cSDarrick J. Wong 1891ca1862b0SChristoph Hellwig if (bma->cur) { 189292219c29SDave Chinner da_new += bma->cur->bc_ino.allocated; 189392219c29SDave Chinner bma->cur->bc_ino.allocated = 0; 189430f712c9SDave Chinner } 189530f712c9SDave Chinner 1896ca1862b0SChristoph Hellwig /* adjust for changes in reserved delayed indirect blocks */ 1897ca1862b0SChristoph Hellwig if (da_new != da_old) { 1898ca1862b0SChristoph Hellwig ASSERT(state == 0 || da_new < da_old); 1899ca1862b0SChristoph Hellwig error = xfs_mod_fdblocks(mp, (int64_t)(da_old - da_new), 1900ca1862b0SChristoph Hellwig false); 1901ca1862b0SChristoph Hellwig } 190230f712c9SDave Chinner 19036d3eb1ecSDarrick J. Wong xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork); 190430f712c9SDave Chinner done: 190560b4984fSDarrick J. Wong if (whichfork != XFS_COW_FORK) 190630f712c9SDave Chinner bma->logflags |= rval; 190730f712c9SDave Chinner return error; 190830f712c9SDave Chinner #undef LEFT 190930f712c9SDave Chinner #undef RIGHT 191030f712c9SDave Chinner #undef PREV 191130f712c9SDave Chinner } 191230f712c9SDave Chinner 191330f712c9SDave Chinner /* 191430f712c9SDave Chinner * Convert an unwritten allocation to a real allocation or vice versa. 191530f712c9SDave Chinner */ 191626b91c72SChristoph Hellwig int /* error */ 191730f712c9SDave Chinner xfs_bmap_add_extent_unwritten_real( 191830f712c9SDave Chinner struct xfs_trans *tp, 191930f712c9SDave Chinner xfs_inode_t *ip, /* incore inode pointer */ 192005a630d7SDarrick J. Wong int whichfork, 1921b2b1712aSChristoph Hellwig struct xfs_iext_cursor *icur, 1922ae127f08SDarrick J. Wong struct xfs_btree_cur **curp, /* if *curp is null, not a btree */ 192330f712c9SDave Chinner xfs_bmbt_irec_t *new, /* new data to add to file extents */ 192430f712c9SDave Chinner int *logflagsp) /* inode logging flags */ 192530f712c9SDave Chinner { 1926ae127f08SDarrick J. Wong struct xfs_btree_cur *cur; /* btree cursor */ 192730f712c9SDave Chinner int error; /* error return value */ 192830f712c9SDave Chinner int i; /* temp state */ 19293ba738dfSChristoph Hellwig struct xfs_ifork *ifp; /* inode fork pointer */ 193030f712c9SDave Chinner xfs_fileoff_t new_endoff; /* end offset of new entry */ 193130f712c9SDave Chinner xfs_bmbt_irec_t r[3]; /* neighbor extent entries */ 193230f712c9SDave Chinner /* left is 0, right is 1, prev is 2 */ 193330f712c9SDave Chinner int rval=0; /* return value (logging flags) */ 19340e5b8e45SDave Chinner uint32_t state = xfs_bmap_fork_to_state(whichfork); 193505a630d7SDarrick J. Wong struct xfs_mount *mp = ip->i_mount; 193679fa6143SChristoph Hellwig struct xfs_bmbt_irec old; 193730f712c9SDave Chinner 193830f712c9SDave Chinner *logflagsp = 0; 193930f712c9SDave Chinner 194030f712c9SDave Chinner cur = *curp; 1941732436efSDarrick J. Wong ifp = xfs_ifork_ptr(ip, whichfork); 194230f712c9SDave Chinner 194330f712c9SDave Chinner ASSERT(!isnullstartblock(new->br_startblock)); 194430f712c9SDave Chinner 1945ff6d6af2SBill O'Donnell XFS_STATS_INC(mp, xs_add_exlist); 194630f712c9SDave Chinner 194730f712c9SDave Chinner #define LEFT r[0] 194830f712c9SDave Chinner #define RIGHT r[1] 194930f712c9SDave Chinner #define PREV r[2] 195030f712c9SDave Chinner 195130f712c9SDave Chinner /* 195230f712c9SDave Chinner * Set up a bunch of variables to make the tests simpler. 195330f712c9SDave Chinner */ 195430f712c9SDave Chinner error = 0; 1955b2b1712aSChristoph Hellwig xfs_iext_get_extent(ifp, icur, &PREV); 195679fa6143SChristoph Hellwig ASSERT(new->br_state != PREV.br_state); 195730f712c9SDave Chinner new_endoff = new->br_startoff + new->br_blockcount; 195830f712c9SDave Chinner ASSERT(PREV.br_startoff <= new->br_startoff); 195930f712c9SDave Chinner ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff); 196030f712c9SDave Chinner 196130f712c9SDave Chinner /* 196230f712c9SDave Chinner * Set flags determining what part of the previous oldext allocation 196330f712c9SDave Chinner * extent is being replaced by a newext allocation. 196430f712c9SDave Chinner */ 196530f712c9SDave Chinner if (PREV.br_startoff == new->br_startoff) 196630f712c9SDave Chinner state |= BMAP_LEFT_FILLING; 196730f712c9SDave Chinner if (PREV.br_startoff + PREV.br_blockcount == new_endoff) 196830f712c9SDave Chinner state |= BMAP_RIGHT_FILLING; 196930f712c9SDave Chinner 197030f712c9SDave Chinner /* 197130f712c9SDave Chinner * Check and set flags if this segment has a left neighbor. 197230f712c9SDave Chinner * Don't set contiguous if the combined extent would be too large. 197330f712c9SDave Chinner */ 1974b2b1712aSChristoph Hellwig if (xfs_iext_peek_prev_extent(ifp, icur, &LEFT)) { 197530f712c9SDave Chinner state |= BMAP_LEFT_VALID; 197630f712c9SDave Chinner if (isnullstartblock(LEFT.br_startblock)) 197730f712c9SDave Chinner state |= BMAP_LEFT_DELAY; 197830f712c9SDave Chinner } 197930f712c9SDave Chinner 198030f712c9SDave Chinner if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) && 198130f712c9SDave Chinner LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff && 198230f712c9SDave Chinner LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock && 198379fa6143SChristoph Hellwig LEFT.br_state == new->br_state && 198495f0b95eSChandan Babu R LEFT.br_blockcount + new->br_blockcount <= XFS_MAX_BMBT_EXTLEN) 198530f712c9SDave Chinner state |= BMAP_LEFT_CONTIG; 198630f712c9SDave Chinner 198730f712c9SDave Chinner /* 198830f712c9SDave Chinner * Check and set flags if this segment has a right neighbor. 198930f712c9SDave Chinner * Don't set contiguous if the combined extent would be too large. 199030f712c9SDave Chinner * Also check for all-three-contiguous being too large. 199130f712c9SDave Chinner */ 1992b2b1712aSChristoph Hellwig if (xfs_iext_peek_next_extent(ifp, icur, &RIGHT)) { 199330f712c9SDave Chinner state |= BMAP_RIGHT_VALID; 199430f712c9SDave Chinner if (isnullstartblock(RIGHT.br_startblock)) 199530f712c9SDave Chinner state |= BMAP_RIGHT_DELAY; 199630f712c9SDave Chinner } 199730f712c9SDave Chinner 199830f712c9SDave Chinner if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) && 199930f712c9SDave Chinner new_endoff == RIGHT.br_startoff && 200030f712c9SDave Chinner new->br_startblock + new->br_blockcount == RIGHT.br_startblock && 200179fa6143SChristoph Hellwig new->br_state == RIGHT.br_state && 200295f0b95eSChandan Babu R new->br_blockcount + RIGHT.br_blockcount <= XFS_MAX_BMBT_EXTLEN && 200330f712c9SDave Chinner ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING | 200430f712c9SDave Chinner BMAP_RIGHT_FILLING)) != 200530f712c9SDave Chinner (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING | 200630f712c9SDave Chinner BMAP_RIGHT_FILLING) || 200730f712c9SDave Chinner LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount 200895f0b95eSChandan Babu R <= XFS_MAX_BMBT_EXTLEN)) 200930f712c9SDave Chinner state |= BMAP_RIGHT_CONTIG; 201030f712c9SDave Chinner 201130f712c9SDave Chinner /* 201230f712c9SDave Chinner * Switch out based on the FILLING and CONTIG state bits. 201330f712c9SDave Chinner */ 201430f712c9SDave Chinner switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | 201530f712c9SDave Chinner BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) { 201630f712c9SDave Chinner case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | 201730f712c9SDave Chinner BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 201830f712c9SDave Chinner /* 201930f712c9SDave Chinner * Setting all of a previous oldext extent to newext. 202030f712c9SDave Chinner * The left and right neighbors are both contiguous with new. 202130f712c9SDave Chinner */ 202279fa6143SChristoph Hellwig LEFT.br_blockcount += PREV.br_blockcount + RIGHT.br_blockcount; 202330f712c9SDave Chinner 2024c38ccf59SChristoph Hellwig xfs_iext_remove(ip, icur, state); 2025c38ccf59SChristoph Hellwig xfs_iext_remove(ip, icur, state); 2026b2b1712aSChristoph Hellwig xfs_iext_prev(ifp, icur); 2027b2b1712aSChristoph Hellwig xfs_iext_update_extent(ip, state, icur, &LEFT); 2028daf83964SChristoph Hellwig ifp->if_nextents -= 2; 202930f712c9SDave Chinner if (cur == NULL) 203030f712c9SDave Chinner rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 203130f712c9SDave Chinner else { 203230f712c9SDave Chinner rval = XFS_ILOG_CORE; 2033e16cf9b0SChristoph Hellwig error = xfs_bmbt_lookup_eq(cur, &RIGHT, &i); 2034e16cf9b0SChristoph Hellwig if (error) 203530f712c9SDave Chinner goto done; 2036f9e03706SDarrick J. Wong if (XFS_IS_CORRUPT(mp, i != 1)) { 2037f9e03706SDarrick J. Wong error = -EFSCORRUPTED; 2038f9e03706SDarrick J. Wong goto done; 2039f9e03706SDarrick J. Wong } 204030f712c9SDave Chinner if ((error = xfs_btree_delete(cur, &i))) 204130f712c9SDave Chinner goto done; 2042f9e03706SDarrick J. Wong if (XFS_IS_CORRUPT(mp, i != 1)) { 2043f9e03706SDarrick J. Wong error = -EFSCORRUPTED; 2044f9e03706SDarrick J. Wong goto done; 2045f9e03706SDarrick J. Wong } 204630f712c9SDave Chinner if ((error = xfs_btree_decrement(cur, 0, &i))) 204730f712c9SDave Chinner goto done; 2048f9e03706SDarrick J. Wong if (XFS_IS_CORRUPT(mp, i != 1)) { 2049f9e03706SDarrick J. Wong error = -EFSCORRUPTED; 2050f9e03706SDarrick J. Wong goto done; 2051f9e03706SDarrick J. Wong } 205230f712c9SDave Chinner if ((error = xfs_btree_delete(cur, &i))) 205330f712c9SDave Chinner goto done; 2054f9e03706SDarrick J. Wong if (XFS_IS_CORRUPT(mp, i != 1)) { 2055f9e03706SDarrick J. Wong error = -EFSCORRUPTED; 2056f9e03706SDarrick J. Wong goto done; 2057f9e03706SDarrick J. Wong } 205830f712c9SDave Chinner if ((error = xfs_btree_decrement(cur, 0, &i))) 205930f712c9SDave Chinner goto done; 2060f9e03706SDarrick J. Wong if (XFS_IS_CORRUPT(mp, i != 1)) { 2061f9e03706SDarrick J. Wong error = -EFSCORRUPTED; 2062f9e03706SDarrick J. Wong goto done; 2063f9e03706SDarrick J. Wong } 2064a67d00a5SChristoph Hellwig error = xfs_bmbt_update(cur, &LEFT); 206579fa6143SChristoph Hellwig if (error) 206630f712c9SDave Chinner goto done; 206730f712c9SDave Chinner } 206830f712c9SDave Chinner break; 206930f712c9SDave Chinner 207030f712c9SDave Chinner case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG: 207130f712c9SDave Chinner /* 207230f712c9SDave Chinner * Setting all of a previous oldext extent to newext. 207330f712c9SDave Chinner * The left neighbor is contiguous, the right is not. 207430f712c9SDave Chinner */ 207579fa6143SChristoph Hellwig LEFT.br_blockcount += PREV.br_blockcount; 207630f712c9SDave Chinner 2077c38ccf59SChristoph Hellwig xfs_iext_remove(ip, icur, state); 2078b2b1712aSChristoph Hellwig xfs_iext_prev(ifp, icur); 2079b2b1712aSChristoph Hellwig xfs_iext_update_extent(ip, state, icur, &LEFT); 2080daf83964SChristoph Hellwig ifp->if_nextents--; 208130f712c9SDave Chinner if (cur == NULL) 208230f712c9SDave Chinner rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 208330f712c9SDave Chinner else { 208430f712c9SDave Chinner rval = XFS_ILOG_CORE; 2085e16cf9b0SChristoph Hellwig error = xfs_bmbt_lookup_eq(cur, &PREV, &i); 2086e16cf9b0SChristoph Hellwig if (error) 208730f712c9SDave Chinner goto done; 2088f9e03706SDarrick J. Wong if (XFS_IS_CORRUPT(mp, i != 1)) { 2089f9e03706SDarrick J. Wong error = -EFSCORRUPTED; 2090f9e03706SDarrick J. Wong goto done; 2091f9e03706SDarrick J. Wong } 209230f712c9SDave Chinner if ((error = xfs_btree_delete(cur, &i))) 209330f712c9SDave Chinner goto done; 2094f9e03706SDarrick J. Wong if (XFS_IS_CORRUPT(mp, i != 1)) { 2095f9e03706SDarrick J. Wong error = -EFSCORRUPTED; 2096f9e03706SDarrick J. Wong goto done; 2097f9e03706SDarrick J. Wong } 209830f712c9SDave Chinner if ((error = xfs_btree_decrement(cur, 0, &i))) 209930f712c9SDave Chinner goto done; 2100f9e03706SDarrick J. Wong if (XFS_IS_CORRUPT(mp, i != 1)) { 2101f9e03706SDarrick J. Wong error = -EFSCORRUPTED; 2102f9e03706SDarrick J. Wong goto done; 2103f9e03706SDarrick J. Wong } 2104a67d00a5SChristoph Hellwig error = xfs_bmbt_update(cur, &LEFT); 210579fa6143SChristoph Hellwig if (error) 210630f712c9SDave Chinner goto done; 210730f712c9SDave Chinner } 210830f712c9SDave Chinner break; 210930f712c9SDave Chinner 211030f712c9SDave Chinner case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 211130f712c9SDave Chinner /* 211230f712c9SDave Chinner * Setting all of a previous oldext extent to newext. 211330f712c9SDave Chinner * The right neighbor is contiguous, the left is not. 211430f712c9SDave Chinner */ 211579fa6143SChristoph Hellwig PREV.br_blockcount += RIGHT.br_blockcount; 211679fa6143SChristoph Hellwig PREV.br_state = new->br_state; 2117a6818477SChristoph Hellwig 2118b2b1712aSChristoph Hellwig xfs_iext_next(ifp, icur); 2119c38ccf59SChristoph Hellwig xfs_iext_remove(ip, icur, state); 2120b2b1712aSChristoph Hellwig xfs_iext_prev(ifp, icur); 2121b2b1712aSChristoph Hellwig xfs_iext_update_extent(ip, state, icur, &PREV); 2122daf83964SChristoph Hellwig ifp->if_nextents--; 212379fa6143SChristoph Hellwig 212430f712c9SDave Chinner if (cur == NULL) 212530f712c9SDave Chinner rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 212630f712c9SDave Chinner else { 212730f712c9SDave Chinner rval = XFS_ILOG_CORE; 2128e16cf9b0SChristoph Hellwig error = xfs_bmbt_lookup_eq(cur, &RIGHT, &i); 2129e16cf9b0SChristoph Hellwig if (error) 213030f712c9SDave Chinner goto done; 2131f9e03706SDarrick J. Wong if (XFS_IS_CORRUPT(mp, i != 1)) { 2132f9e03706SDarrick J. Wong error = -EFSCORRUPTED; 2133f9e03706SDarrick J. Wong goto done; 2134f9e03706SDarrick J. Wong } 213530f712c9SDave Chinner if ((error = xfs_btree_delete(cur, &i))) 213630f712c9SDave Chinner goto done; 2137f9e03706SDarrick J. Wong if (XFS_IS_CORRUPT(mp, i != 1)) { 2138f9e03706SDarrick J. Wong error = -EFSCORRUPTED; 2139f9e03706SDarrick J. Wong goto done; 2140f9e03706SDarrick J. Wong } 214130f712c9SDave Chinner if ((error = xfs_btree_decrement(cur, 0, &i))) 214230f712c9SDave Chinner goto done; 2143f9e03706SDarrick J. Wong if (XFS_IS_CORRUPT(mp, i != 1)) { 2144f9e03706SDarrick J. Wong error = -EFSCORRUPTED; 2145f9e03706SDarrick J. Wong goto done; 2146f9e03706SDarrick J. Wong } 2147a67d00a5SChristoph Hellwig error = xfs_bmbt_update(cur, &PREV); 214879fa6143SChristoph Hellwig if (error) 214930f712c9SDave Chinner goto done; 215030f712c9SDave Chinner } 215130f712c9SDave Chinner break; 215230f712c9SDave Chinner 215330f712c9SDave Chinner case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING: 215430f712c9SDave Chinner /* 215530f712c9SDave Chinner * Setting all of a previous oldext extent to newext. 215630f712c9SDave Chinner * Neither the left nor right neighbors are contiguous with 215730f712c9SDave Chinner * the new one. 215830f712c9SDave Chinner */ 215979fa6143SChristoph Hellwig PREV.br_state = new->br_state; 2160b2b1712aSChristoph Hellwig xfs_iext_update_extent(ip, state, icur, &PREV); 216130f712c9SDave Chinner 216230f712c9SDave Chinner if (cur == NULL) 216330f712c9SDave Chinner rval = XFS_ILOG_DEXT; 216430f712c9SDave Chinner else { 216530f712c9SDave Chinner rval = 0; 2166e16cf9b0SChristoph Hellwig error = xfs_bmbt_lookup_eq(cur, new, &i); 2167e16cf9b0SChristoph Hellwig if (error) 216830f712c9SDave Chinner goto done; 2169f9e03706SDarrick J. Wong if (XFS_IS_CORRUPT(mp, i != 1)) { 2170f9e03706SDarrick J. Wong error = -EFSCORRUPTED; 2171f9e03706SDarrick J. Wong goto done; 2172f9e03706SDarrick J. Wong } 2173a67d00a5SChristoph Hellwig error = xfs_bmbt_update(cur, &PREV); 217479fa6143SChristoph Hellwig if (error) 217530f712c9SDave Chinner goto done; 217630f712c9SDave Chinner } 217730f712c9SDave Chinner break; 217830f712c9SDave Chinner 217930f712c9SDave Chinner case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG: 218030f712c9SDave Chinner /* 218130f712c9SDave Chinner * Setting the first part of a previous oldext extent to newext. 218230f712c9SDave Chinner * The left neighbor is contiguous. 218330f712c9SDave Chinner */ 218479fa6143SChristoph Hellwig LEFT.br_blockcount += new->br_blockcount; 218530f712c9SDave Chinner 218679fa6143SChristoph Hellwig old = PREV; 218779fa6143SChristoph Hellwig PREV.br_startoff += new->br_blockcount; 218879fa6143SChristoph Hellwig PREV.br_startblock += new->br_blockcount; 218979fa6143SChristoph Hellwig PREV.br_blockcount -= new->br_blockcount; 219030f712c9SDave Chinner 2191b2b1712aSChristoph Hellwig xfs_iext_update_extent(ip, state, icur, &PREV); 2192b2b1712aSChristoph Hellwig xfs_iext_prev(ifp, icur); 2193b2b1712aSChristoph Hellwig xfs_iext_update_extent(ip, state, icur, &LEFT); 219430f712c9SDave Chinner 219530f712c9SDave Chinner if (cur == NULL) 219630f712c9SDave Chinner rval = XFS_ILOG_DEXT; 219730f712c9SDave Chinner else { 219830f712c9SDave Chinner rval = 0; 2199e16cf9b0SChristoph Hellwig error = xfs_bmbt_lookup_eq(cur, &old, &i); 220079fa6143SChristoph Hellwig if (error) 220130f712c9SDave Chinner goto done; 2202f9e03706SDarrick J. Wong if (XFS_IS_CORRUPT(mp, i != 1)) { 2203f9e03706SDarrick J. Wong error = -EFSCORRUPTED; 2204f9e03706SDarrick J. Wong goto done; 2205f9e03706SDarrick J. Wong } 2206a67d00a5SChristoph Hellwig error = xfs_bmbt_update(cur, &PREV); 220779fa6143SChristoph Hellwig if (error) 220830f712c9SDave Chinner goto done; 220979fa6143SChristoph Hellwig error = xfs_btree_decrement(cur, 0, &i); 221079fa6143SChristoph Hellwig if (error) 221130f712c9SDave Chinner goto done; 2212a67d00a5SChristoph Hellwig error = xfs_bmbt_update(cur, &LEFT); 221330f712c9SDave Chinner if (error) 221430f712c9SDave Chinner goto done; 221530f712c9SDave Chinner } 221630f712c9SDave Chinner break; 221730f712c9SDave Chinner 221830f712c9SDave Chinner case BMAP_LEFT_FILLING: 221930f712c9SDave Chinner /* 222030f712c9SDave Chinner * Setting the first part of a previous oldext extent to newext. 222130f712c9SDave Chinner * The left neighbor is not contiguous. 222230f712c9SDave Chinner */ 222379fa6143SChristoph Hellwig old = PREV; 222479fa6143SChristoph Hellwig PREV.br_startoff += new->br_blockcount; 222579fa6143SChristoph Hellwig PREV.br_startblock += new->br_blockcount; 222679fa6143SChristoph Hellwig PREV.br_blockcount -= new->br_blockcount; 222730f712c9SDave Chinner 2228b2b1712aSChristoph Hellwig xfs_iext_update_extent(ip, state, icur, &PREV); 22290254c2f2SChristoph Hellwig xfs_iext_insert(ip, icur, new, state); 2230daf83964SChristoph Hellwig ifp->if_nextents++; 2231daf83964SChristoph Hellwig 223230f712c9SDave Chinner if (cur == NULL) 223330f712c9SDave Chinner rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 223430f712c9SDave Chinner else { 223530f712c9SDave Chinner rval = XFS_ILOG_CORE; 2236e16cf9b0SChristoph Hellwig error = xfs_bmbt_lookup_eq(cur, &old, &i); 223779fa6143SChristoph Hellwig if (error) 223830f712c9SDave Chinner goto done; 2239f9e03706SDarrick J. Wong if (XFS_IS_CORRUPT(mp, i != 1)) { 2240f9e03706SDarrick J. Wong error = -EFSCORRUPTED; 2241f9e03706SDarrick J. Wong goto done; 2242f9e03706SDarrick J. Wong } 2243a67d00a5SChristoph Hellwig error = xfs_bmbt_update(cur, &PREV); 224479fa6143SChristoph Hellwig if (error) 224530f712c9SDave Chinner goto done; 224630f712c9SDave Chinner cur->bc_rec.b = *new; 224730f712c9SDave Chinner if ((error = xfs_btree_insert(cur, &i))) 224830f712c9SDave Chinner goto done; 2249f9e03706SDarrick J. Wong if (XFS_IS_CORRUPT(mp, i != 1)) { 2250f9e03706SDarrick J. Wong error = -EFSCORRUPTED; 2251f9e03706SDarrick J. Wong goto done; 2252f9e03706SDarrick J. Wong } 225330f712c9SDave Chinner } 225430f712c9SDave Chinner break; 225530f712c9SDave Chinner 225630f712c9SDave Chinner case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 225730f712c9SDave Chinner /* 225830f712c9SDave Chinner * Setting the last part of a previous oldext extent to newext. 225930f712c9SDave Chinner * The right neighbor is contiguous with the new allocation. 226030f712c9SDave Chinner */ 226179fa6143SChristoph Hellwig old = PREV; 226279fa6143SChristoph Hellwig PREV.br_blockcount -= new->br_blockcount; 226330f712c9SDave Chinner 226479fa6143SChristoph Hellwig RIGHT.br_startoff = new->br_startoff; 226579fa6143SChristoph Hellwig RIGHT.br_startblock = new->br_startblock; 226679fa6143SChristoph Hellwig RIGHT.br_blockcount += new->br_blockcount; 2267a6818477SChristoph Hellwig 2268b2b1712aSChristoph Hellwig xfs_iext_update_extent(ip, state, icur, &PREV); 2269b2b1712aSChristoph Hellwig xfs_iext_next(ifp, icur); 2270b2b1712aSChristoph Hellwig xfs_iext_update_extent(ip, state, icur, &RIGHT); 227130f712c9SDave Chinner 227230f712c9SDave Chinner if (cur == NULL) 227330f712c9SDave Chinner rval = XFS_ILOG_DEXT; 227430f712c9SDave Chinner else { 227530f712c9SDave Chinner rval = 0; 2276e16cf9b0SChristoph Hellwig error = xfs_bmbt_lookup_eq(cur, &old, &i); 227779fa6143SChristoph Hellwig if (error) 227830f712c9SDave Chinner goto done; 2279f9e03706SDarrick J. Wong if (XFS_IS_CORRUPT(mp, i != 1)) { 2280f9e03706SDarrick J. Wong error = -EFSCORRUPTED; 2281f9e03706SDarrick J. Wong goto done; 2282f9e03706SDarrick J. Wong } 2283a67d00a5SChristoph Hellwig error = xfs_bmbt_update(cur, &PREV); 228479fa6143SChristoph Hellwig if (error) 228530f712c9SDave Chinner goto done; 228679fa6143SChristoph Hellwig error = xfs_btree_increment(cur, 0, &i); 228779fa6143SChristoph Hellwig if (error) 228830f712c9SDave Chinner goto done; 2289a67d00a5SChristoph Hellwig error = xfs_bmbt_update(cur, &RIGHT); 229079fa6143SChristoph Hellwig if (error) 229130f712c9SDave Chinner goto done; 229230f712c9SDave Chinner } 229330f712c9SDave Chinner break; 229430f712c9SDave Chinner 229530f712c9SDave Chinner case BMAP_RIGHT_FILLING: 229630f712c9SDave Chinner /* 229730f712c9SDave Chinner * Setting the last part of a previous oldext extent to newext. 229830f712c9SDave Chinner * The right neighbor is not contiguous. 229930f712c9SDave Chinner */ 230079fa6143SChristoph Hellwig old = PREV; 230179fa6143SChristoph Hellwig PREV.br_blockcount -= new->br_blockcount; 230230f712c9SDave Chinner 2303b2b1712aSChristoph Hellwig xfs_iext_update_extent(ip, state, icur, &PREV); 2304b2b1712aSChristoph Hellwig xfs_iext_next(ifp, icur); 23050254c2f2SChristoph Hellwig xfs_iext_insert(ip, icur, new, state); 2306daf83964SChristoph Hellwig ifp->if_nextents++; 230730f712c9SDave Chinner 230830f712c9SDave Chinner if (cur == NULL) 230930f712c9SDave Chinner rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 231030f712c9SDave Chinner else { 231130f712c9SDave Chinner rval = XFS_ILOG_CORE; 2312e16cf9b0SChristoph Hellwig error = xfs_bmbt_lookup_eq(cur, &old, &i); 231379fa6143SChristoph Hellwig if (error) 231430f712c9SDave Chinner goto done; 2315f9e03706SDarrick J. Wong if (XFS_IS_CORRUPT(mp, i != 1)) { 2316f9e03706SDarrick J. Wong error = -EFSCORRUPTED; 2317f9e03706SDarrick J. Wong goto done; 2318f9e03706SDarrick J. Wong } 2319a67d00a5SChristoph Hellwig error = xfs_bmbt_update(cur, &PREV); 232079fa6143SChristoph Hellwig if (error) 232130f712c9SDave Chinner goto done; 2322e16cf9b0SChristoph Hellwig error = xfs_bmbt_lookup_eq(cur, new, &i); 2323e16cf9b0SChristoph Hellwig if (error) 232430f712c9SDave Chinner goto done; 2325f9e03706SDarrick J. Wong if (XFS_IS_CORRUPT(mp, i != 0)) { 2326f9e03706SDarrick J. Wong error = -EFSCORRUPTED; 2327f9e03706SDarrick J. Wong goto done; 2328f9e03706SDarrick J. Wong } 232930f712c9SDave Chinner if ((error = xfs_btree_insert(cur, &i))) 233030f712c9SDave Chinner goto done; 2331f9e03706SDarrick J. Wong if (XFS_IS_CORRUPT(mp, i != 1)) { 2332f9e03706SDarrick J. Wong error = -EFSCORRUPTED; 2333f9e03706SDarrick J. Wong goto done; 2334f9e03706SDarrick J. Wong } 233530f712c9SDave Chinner } 233630f712c9SDave Chinner break; 233730f712c9SDave Chinner 233830f712c9SDave Chinner case 0: 233930f712c9SDave Chinner /* 234030f712c9SDave Chinner * Setting the middle part of a previous oldext extent to 234130f712c9SDave Chinner * newext. Contiguity is impossible here. 234230f712c9SDave Chinner * One extent becomes three extents. 234330f712c9SDave Chinner */ 234479fa6143SChristoph Hellwig old = PREV; 234579fa6143SChristoph Hellwig PREV.br_blockcount = new->br_startoff - PREV.br_startoff; 234630f712c9SDave Chinner 234730f712c9SDave Chinner r[0] = *new; 234830f712c9SDave Chinner r[1].br_startoff = new_endoff; 234930f712c9SDave Chinner r[1].br_blockcount = 235079fa6143SChristoph Hellwig old.br_startoff + old.br_blockcount - new_endoff; 235130f712c9SDave Chinner r[1].br_startblock = new->br_startblock + new->br_blockcount; 235279fa6143SChristoph Hellwig r[1].br_state = PREV.br_state; 235330f712c9SDave Chinner 2354b2b1712aSChristoph Hellwig xfs_iext_update_extent(ip, state, icur, &PREV); 2355b2b1712aSChristoph Hellwig xfs_iext_next(ifp, icur); 23560254c2f2SChristoph Hellwig xfs_iext_insert(ip, icur, &r[1], state); 23570254c2f2SChristoph Hellwig xfs_iext_insert(ip, icur, &r[0], state); 2358daf83964SChristoph Hellwig ifp->if_nextents += 2; 235930f712c9SDave Chinner 236030f712c9SDave Chinner if (cur == NULL) 236130f712c9SDave Chinner rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 236230f712c9SDave Chinner else { 236330f712c9SDave Chinner rval = XFS_ILOG_CORE; 2364e16cf9b0SChristoph Hellwig error = xfs_bmbt_lookup_eq(cur, &old, &i); 236579fa6143SChristoph Hellwig if (error) 236630f712c9SDave Chinner goto done; 2367f9e03706SDarrick J. Wong if (XFS_IS_CORRUPT(mp, i != 1)) { 2368f9e03706SDarrick J. Wong error = -EFSCORRUPTED; 2369f9e03706SDarrick J. Wong goto done; 2370f9e03706SDarrick J. Wong } 237130f712c9SDave Chinner /* new right extent - oldext */ 2372a67d00a5SChristoph Hellwig error = xfs_bmbt_update(cur, &r[1]); 2373a67d00a5SChristoph Hellwig if (error) 237430f712c9SDave Chinner goto done; 237530f712c9SDave Chinner /* new left extent - oldext */ 237630f712c9SDave Chinner cur->bc_rec.b = PREV; 237730f712c9SDave Chinner if ((error = xfs_btree_insert(cur, &i))) 237830f712c9SDave Chinner goto done; 2379f9e03706SDarrick J. Wong if (XFS_IS_CORRUPT(mp, i != 1)) { 2380f9e03706SDarrick J. Wong error = -EFSCORRUPTED; 2381f9e03706SDarrick J. Wong goto done; 2382f9e03706SDarrick J. Wong } 238330f712c9SDave Chinner /* 238430f712c9SDave Chinner * Reset the cursor to the position of the new extent 238530f712c9SDave Chinner * we are about to insert as we can't trust it after 238630f712c9SDave Chinner * the previous insert. 238730f712c9SDave Chinner */ 2388e16cf9b0SChristoph Hellwig error = xfs_bmbt_lookup_eq(cur, new, &i); 2389e16cf9b0SChristoph Hellwig if (error) 239030f712c9SDave Chinner goto done; 2391f9e03706SDarrick J. Wong if (XFS_IS_CORRUPT(mp, i != 0)) { 2392f9e03706SDarrick J. Wong error = -EFSCORRUPTED; 2393f9e03706SDarrick J. Wong goto done; 2394f9e03706SDarrick J. Wong } 239530f712c9SDave Chinner /* new middle extent - newext */ 239630f712c9SDave Chinner if ((error = xfs_btree_insert(cur, &i))) 239730f712c9SDave Chinner goto done; 2398f9e03706SDarrick J. Wong if (XFS_IS_CORRUPT(mp, i != 1)) { 2399f9e03706SDarrick J. Wong error = -EFSCORRUPTED; 2400f9e03706SDarrick J. Wong goto done; 2401f9e03706SDarrick J. Wong } 240230f712c9SDave Chinner } 240330f712c9SDave Chinner break; 240430f712c9SDave Chinner 240530f712c9SDave Chinner case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 240630f712c9SDave Chinner case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 240730f712c9SDave Chinner case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG: 240830f712c9SDave Chinner case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG: 240930f712c9SDave Chinner case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 241030f712c9SDave Chinner case BMAP_LEFT_CONTIG: 241130f712c9SDave Chinner case BMAP_RIGHT_CONTIG: 241230f712c9SDave Chinner /* 241330f712c9SDave Chinner * These cases are all impossible. 241430f712c9SDave Chinner */ 241530f712c9SDave Chinner ASSERT(0); 241630f712c9SDave Chinner } 241730f712c9SDave Chinner 24189c194644SDarrick J. Wong /* update reverse mappings */ 2419bc46ac64SDarrick J. Wong xfs_rmap_convert_extent(mp, tp, ip, whichfork, new); 24209c194644SDarrick J. Wong 242130f712c9SDave Chinner /* convert to a btree if necessary */ 242205a630d7SDarrick J. Wong if (xfs_bmap_needs_btree(ip, whichfork)) { 242330f712c9SDave Chinner int tmp_logflags; /* partial log flag return val */ 242430f712c9SDave Chinner 242530f712c9SDave Chinner ASSERT(cur == NULL); 2426280253d2SBrian Foster error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0, 2427280253d2SBrian Foster &tmp_logflags, whichfork); 242830f712c9SDave Chinner *logflagsp |= tmp_logflags; 242930f712c9SDave Chinner if (error) 243030f712c9SDave Chinner goto done; 243130f712c9SDave Chinner } 243230f712c9SDave Chinner 243330f712c9SDave Chinner /* clear out the allocated field, done with it now in any case. */ 243430f712c9SDave Chinner if (cur) { 243592219c29SDave Chinner cur->bc_ino.allocated = 0; 243630f712c9SDave Chinner *curp = cur; 243730f712c9SDave Chinner } 243830f712c9SDave Chinner 243905a630d7SDarrick J. Wong xfs_bmap_check_leaf_extents(*curp, ip, whichfork); 244030f712c9SDave Chinner done: 244130f712c9SDave Chinner *logflagsp |= rval; 244230f712c9SDave Chinner return error; 244330f712c9SDave Chinner #undef LEFT 244430f712c9SDave Chinner #undef RIGHT 244530f712c9SDave Chinner #undef PREV 244630f712c9SDave Chinner } 244730f712c9SDave Chinner 244830f712c9SDave Chinner /* 244930f712c9SDave Chinner * Convert a hole to a delayed allocation. 245030f712c9SDave Chinner */ 245130f712c9SDave Chinner STATIC void 245230f712c9SDave Chinner xfs_bmap_add_extent_hole_delay( 245330f712c9SDave Chinner xfs_inode_t *ip, /* incore inode pointer */ 2454be51f811SDarrick J. Wong int whichfork, 2455b2b1712aSChristoph Hellwig struct xfs_iext_cursor *icur, 245630f712c9SDave Chinner xfs_bmbt_irec_t *new) /* new data to add to file extents */ 245730f712c9SDave Chinner { 24583ba738dfSChristoph Hellwig struct xfs_ifork *ifp; /* inode fork pointer */ 245930f712c9SDave Chinner xfs_bmbt_irec_t left; /* left neighbor extent entry */ 246030f712c9SDave Chinner xfs_filblks_t newlen=0; /* new indirect size */ 246130f712c9SDave Chinner xfs_filblks_t oldlen=0; /* old indirect size */ 246230f712c9SDave Chinner xfs_bmbt_irec_t right; /* right neighbor extent entry */ 24630e5b8e45SDave Chinner uint32_t state = xfs_bmap_fork_to_state(whichfork); 24643ffc18ecSChristoph Hellwig xfs_filblks_t temp; /* temp for indirect calculations */ 246530f712c9SDave Chinner 2466732436efSDarrick J. Wong ifp = xfs_ifork_ptr(ip, whichfork); 246730f712c9SDave Chinner ASSERT(isnullstartblock(new->br_startblock)); 246830f712c9SDave Chinner 246930f712c9SDave Chinner /* 247030f712c9SDave Chinner * Check and set flags if this segment has a left neighbor 247130f712c9SDave Chinner */ 2472b2b1712aSChristoph Hellwig if (xfs_iext_peek_prev_extent(ifp, icur, &left)) { 247330f712c9SDave Chinner state |= BMAP_LEFT_VALID; 247430f712c9SDave Chinner if (isnullstartblock(left.br_startblock)) 247530f712c9SDave Chinner state |= BMAP_LEFT_DELAY; 247630f712c9SDave Chinner } 247730f712c9SDave Chinner 247830f712c9SDave Chinner /* 247930f712c9SDave Chinner * Check and set flags if the current (right) segment exists. 248030f712c9SDave Chinner * If it doesn't exist, we're converting the hole at end-of-file. 248130f712c9SDave Chinner */ 2482b2b1712aSChristoph Hellwig if (xfs_iext_get_extent(ifp, icur, &right)) { 248330f712c9SDave Chinner state |= BMAP_RIGHT_VALID; 248430f712c9SDave Chinner if (isnullstartblock(right.br_startblock)) 248530f712c9SDave Chinner state |= BMAP_RIGHT_DELAY; 248630f712c9SDave Chinner } 248730f712c9SDave Chinner 248830f712c9SDave Chinner /* 248930f712c9SDave Chinner * Set contiguity flags on the left and right neighbors. 249030f712c9SDave Chinner * Don't let extents get too large, even if the pieces are contiguous. 249130f712c9SDave Chinner */ 249230f712c9SDave Chinner if ((state & BMAP_LEFT_VALID) && (state & BMAP_LEFT_DELAY) && 249330f712c9SDave Chinner left.br_startoff + left.br_blockcount == new->br_startoff && 249495f0b95eSChandan Babu R left.br_blockcount + new->br_blockcount <= XFS_MAX_BMBT_EXTLEN) 249530f712c9SDave Chinner state |= BMAP_LEFT_CONTIG; 249630f712c9SDave Chinner 249730f712c9SDave Chinner if ((state & BMAP_RIGHT_VALID) && (state & BMAP_RIGHT_DELAY) && 249830f712c9SDave Chinner new->br_startoff + new->br_blockcount == right.br_startoff && 249995f0b95eSChandan Babu R new->br_blockcount + right.br_blockcount <= XFS_MAX_BMBT_EXTLEN && 250030f712c9SDave Chinner (!(state & BMAP_LEFT_CONTIG) || 250130f712c9SDave Chinner (left.br_blockcount + new->br_blockcount + 250295f0b95eSChandan Babu R right.br_blockcount <= XFS_MAX_BMBT_EXTLEN))) 250330f712c9SDave Chinner state |= BMAP_RIGHT_CONTIG; 250430f712c9SDave Chinner 250530f712c9SDave Chinner /* 250630f712c9SDave Chinner * Switch out based on the contiguity flags. 250730f712c9SDave Chinner */ 250830f712c9SDave Chinner switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) { 250930f712c9SDave Chinner case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 251030f712c9SDave Chinner /* 251130f712c9SDave Chinner * New allocation is contiguous with delayed allocations 251230f712c9SDave Chinner * on the left and on the right. 251330f712c9SDave Chinner * Merge all three into a single extent record. 251430f712c9SDave Chinner */ 251530f712c9SDave Chinner temp = left.br_blockcount + new->br_blockcount + 251630f712c9SDave Chinner right.br_blockcount; 251730f712c9SDave Chinner 251830f712c9SDave Chinner oldlen = startblockval(left.br_startblock) + 251930f712c9SDave Chinner startblockval(new->br_startblock) + 252030f712c9SDave Chinner startblockval(right.br_startblock); 25210e339ef8SBrian Foster newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), 25220e339ef8SBrian Foster oldlen); 25233ffc18ecSChristoph Hellwig left.br_startblock = nullstartblock(newlen); 25243ffc18ecSChristoph Hellwig left.br_blockcount = temp; 252530f712c9SDave Chinner 2526c38ccf59SChristoph Hellwig xfs_iext_remove(ip, icur, state); 2527b2b1712aSChristoph Hellwig xfs_iext_prev(ifp, icur); 2528b2b1712aSChristoph Hellwig xfs_iext_update_extent(ip, state, icur, &left); 252930f712c9SDave Chinner break; 253030f712c9SDave Chinner 253130f712c9SDave Chinner case BMAP_LEFT_CONTIG: 253230f712c9SDave Chinner /* 253330f712c9SDave Chinner * New allocation is contiguous with a delayed allocation 253430f712c9SDave Chinner * on the left. 253530f712c9SDave Chinner * Merge the new allocation with the left neighbor. 253630f712c9SDave Chinner */ 253730f712c9SDave Chinner temp = left.br_blockcount + new->br_blockcount; 253830f712c9SDave Chinner 253930f712c9SDave Chinner oldlen = startblockval(left.br_startblock) + 254030f712c9SDave Chinner startblockval(new->br_startblock); 25410e339ef8SBrian Foster newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), 25420e339ef8SBrian Foster oldlen); 25433ffc18ecSChristoph Hellwig left.br_blockcount = temp; 25443ffc18ecSChristoph Hellwig left.br_startblock = nullstartblock(newlen); 254541d196f4SChristoph Hellwig 2546b2b1712aSChristoph Hellwig xfs_iext_prev(ifp, icur); 2547b2b1712aSChristoph Hellwig xfs_iext_update_extent(ip, state, icur, &left); 254830f712c9SDave Chinner break; 254930f712c9SDave Chinner 255030f712c9SDave Chinner case BMAP_RIGHT_CONTIG: 255130f712c9SDave Chinner /* 255230f712c9SDave Chinner * New allocation is contiguous with a delayed allocation 255330f712c9SDave Chinner * on the right. 255430f712c9SDave Chinner * Merge the new allocation with the right neighbor. 255530f712c9SDave Chinner */ 255630f712c9SDave Chinner temp = new->br_blockcount + right.br_blockcount; 255730f712c9SDave Chinner oldlen = startblockval(new->br_startblock) + 255830f712c9SDave Chinner startblockval(right.br_startblock); 25590e339ef8SBrian Foster newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), 25600e339ef8SBrian Foster oldlen); 25613ffc18ecSChristoph Hellwig right.br_startoff = new->br_startoff; 25623ffc18ecSChristoph Hellwig right.br_startblock = nullstartblock(newlen); 25633ffc18ecSChristoph Hellwig right.br_blockcount = temp; 2564b2b1712aSChristoph Hellwig xfs_iext_update_extent(ip, state, icur, &right); 256530f712c9SDave Chinner break; 256630f712c9SDave Chinner 256730f712c9SDave Chinner case 0: 256830f712c9SDave Chinner /* 256930f712c9SDave Chinner * New allocation is not contiguous with another 257030f712c9SDave Chinner * delayed allocation. 257130f712c9SDave Chinner * Insert a new entry. 257230f712c9SDave Chinner */ 257330f712c9SDave Chinner oldlen = newlen = 0; 25740254c2f2SChristoph Hellwig xfs_iext_insert(ip, icur, new, state); 257530f712c9SDave Chinner break; 257630f712c9SDave Chinner } 257730f712c9SDave Chinner if (oldlen != newlen) { 257830f712c9SDave Chinner ASSERT(oldlen > newlen); 25790d485adaSDave Chinner xfs_mod_fdblocks(ip->i_mount, (int64_t)(oldlen - newlen), 25800d485adaSDave Chinner false); 258130f712c9SDave Chinner /* 258230f712c9SDave Chinner * Nothing to do for disk quota accounting here. 258330f712c9SDave Chinner */ 25849fe82b8cSDarrick J. Wong xfs_mod_delalloc(ip->i_mount, (int64_t)newlen - oldlen); 258530f712c9SDave Chinner } 258630f712c9SDave Chinner } 258730f712c9SDave Chinner 258830f712c9SDave Chinner /* 258930f712c9SDave Chinner * Convert a hole to a real allocation. 259030f712c9SDave Chinner */ 259130f712c9SDave Chinner STATIC int /* error */ 259230f712c9SDave Chinner xfs_bmap_add_extent_hole_real( 25936d04558fSChristoph Hellwig struct xfs_trans *tp, 25946d04558fSChristoph Hellwig struct xfs_inode *ip, 25956d04558fSChristoph Hellwig int whichfork, 2596b2b1712aSChristoph Hellwig struct xfs_iext_cursor *icur, 25976d04558fSChristoph Hellwig struct xfs_btree_cur **curp, 25986d04558fSChristoph Hellwig struct xfs_bmbt_irec *new, 259995eb308cSDarrick J. Wong int *logflagsp, 2600e7d410acSDave Chinner uint32_t flags) 260130f712c9SDave Chinner { 2602732436efSDarrick J. Wong struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 26036d04558fSChristoph Hellwig struct xfs_mount *mp = ip->i_mount; 26046d04558fSChristoph Hellwig struct xfs_btree_cur *cur = *curp; 260530f712c9SDave Chinner int error; /* error return value */ 260630f712c9SDave Chinner int i; /* temp state */ 260730f712c9SDave Chinner xfs_bmbt_irec_t left; /* left neighbor extent entry */ 260830f712c9SDave Chinner xfs_bmbt_irec_t right; /* right neighbor extent entry */ 260930f712c9SDave Chinner int rval=0; /* return value (logging flags) */ 26100e5b8e45SDave Chinner uint32_t state = xfs_bmap_fork_to_state(whichfork); 26111abb9e55SChristoph Hellwig struct xfs_bmbt_irec old; 261230f712c9SDave Chinner 261330f712c9SDave Chinner ASSERT(!isnullstartblock(new->br_startblock)); 26148ef54797SDave Chinner ASSERT(!cur || !(cur->bc_ino.flags & XFS_BTCUR_BMBT_WASDEL)); 261530f712c9SDave Chinner 2616ff6d6af2SBill O'Donnell XFS_STATS_INC(mp, xs_add_exlist); 261730f712c9SDave Chinner 261830f712c9SDave Chinner /* 261930f712c9SDave Chinner * Check and set flags if this segment has a left neighbor. 262030f712c9SDave Chinner */ 2621b2b1712aSChristoph Hellwig if (xfs_iext_peek_prev_extent(ifp, icur, &left)) { 262230f712c9SDave Chinner state |= BMAP_LEFT_VALID; 262330f712c9SDave Chinner if (isnullstartblock(left.br_startblock)) 262430f712c9SDave Chinner state |= BMAP_LEFT_DELAY; 262530f712c9SDave Chinner } 262630f712c9SDave Chinner 262730f712c9SDave Chinner /* 262830f712c9SDave Chinner * Check and set flags if this segment has a current value. 262930f712c9SDave Chinner * Not true if we're inserting into the "hole" at eof. 263030f712c9SDave Chinner */ 2631b2b1712aSChristoph Hellwig if (xfs_iext_get_extent(ifp, icur, &right)) { 263230f712c9SDave Chinner state |= BMAP_RIGHT_VALID; 263330f712c9SDave Chinner if (isnullstartblock(right.br_startblock)) 263430f712c9SDave Chinner state |= BMAP_RIGHT_DELAY; 263530f712c9SDave Chinner } 263630f712c9SDave Chinner 263730f712c9SDave Chinner /* 263830f712c9SDave Chinner * We're inserting a real allocation between "left" and "right". 263930f712c9SDave Chinner * Set the contiguity flags. Don't let extents get too large. 264030f712c9SDave Chinner */ 264130f712c9SDave Chinner if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) && 264230f712c9SDave Chinner left.br_startoff + left.br_blockcount == new->br_startoff && 264330f712c9SDave Chinner left.br_startblock + left.br_blockcount == new->br_startblock && 264430f712c9SDave Chinner left.br_state == new->br_state && 264595f0b95eSChandan Babu R left.br_blockcount + new->br_blockcount <= XFS_MAX_BMBT_EXTLEN) 264630f712c9SDave Chinner state |= BMAP_LEFT_CONTIG; 264730f712c9SDave Chinner 264830f712c9SDave Chinner if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) && 264930f712c9SDave Chinner new->br_startoff + new->br_blockcount == right.br_startoff && 265030f712c9SDave Chinner new->br_startblock + new->br_blockcount == right.br_startblock && 265130f712c9SDave Chinner new->br_state == right.br_state && 265295f0b95eSChandan Babu R new->br_blockcount + right.br_blockcount <= XFS_MAX_BMBT_EXTLEN && 265330f712c9SDave Chinner (!(state & BMAP_LEFT_CONTIG) || 265430f712c9SDave Chinner left.br_blockcount + new->br_blockcount + 265595f0b95eSChandan Babu R right.br_blockcount <= XFS_MAX_BMBT_EXTLEN)) 265630f712c9SDave Chinner state |= BMAP_RIGHT_CONTIG; 265730f712c9SDave Chinner 265830f712c9SDave Chinner error = 0; 265930f712c9SDave Chinner /* 266030f712c9SDave Chinner * Select which case we're in here, and implement it. 266130f712c9SDave Chinner */ 266230f712c9SDave Chinner switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) { 266330f712c9SDave Chinner case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 266430f712c9SDave Chinner /* 266530f712c9SDave Chinner * New allocation is contiguous with real allocations on the 266630f712c9SDave Chinner * left and on the right. 266730f712c9SDave Chinner * Merge all three into a single extent record. 266830f712c9SDave Chinner */ 26691abb9e55SChristoph Hellwig left.br_blockcount += new->br_blockcount + right.br_blockcount; 267030f712c9SDave Chinner 2671c38ccf59SChristoph Hellwig xfs_iext_remove(ip, icur, state); 2672b2b1712aSChristoph Hellwig xfs_iext_prev(ifp, icur); 2673b2b1712aSChristoph Hellwig xfs_iext_update_extent(ip, state, icur, &left); 2674daf83964SChristoph Hellwig ifp->if_nextents--; 267530f712c9SDave Chinner 26766d04558fSChristoph Hellwig if (cur == NULL) { 267730f712c9SDave Chinner rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork); 267830f712c9SDave Chinner } else { 267930f712c9SDave Chinner rval = XFS_ILOG_CORE; 2680e16cf9b0SChristoph Hellwig error = xfs_bmbt_lookup_eq(cur, &right, &i); 268130f712c9SDave Chinner if (error) 268230f712c9SDave Chinner goto done; 2683f9e03706SDarrick J. Wong if (XFS_IS_CORRUPT(mp, i != 1)) { 2684f9e03706SDarrick J. Wong error = -EFSCORRUPTED; 2685f9e03706SDarrick J. Wong goto done; 2686f9e03706SDarrick J. Wong } 26876d04558fSChristoph Hellwig error = xfs_btree_delete(cur, &i); 268830f712c9SDave Chinner if (error) 268930f712c9SDave Chinner goto done; 2690f9e03706SDarrick J. Wong if (XFS_IS_CORRUPT(mp, i != 1)) { 2691f9e03706SDarrick J. Wong error = -EFSCORRUPTED; 2692f9e03706SDarrick J. Wong goto done; 2693f9e03706SDarrick J. Wong } 26946d04558fSChristoph Hellwig error = xfs_btree_decrement(cur, 0, &i); 269530f712c9SDave Chinner if (error) 269630f712c9SDave Chinner goto done; 2697f9e03706SDarrick J. Wong if (XFS_IS_CORRUPT(mp, i != 1)) { 2698f9e03706SDarrick J. Wong error = -EFSCORRUPTED; 2699f9e03706SDarrick J. Wong goto done; 2700f9e03706SDarrick J. Wong } 2701a67d00a5SChristoph Hellwig error = xfs_bmbt_update(cur, &left); 270230f712c9SDave Chinner if (error) 270330f712c9SDave Chinner goto done; 270430f712c9SDave Chinner } 270530f712c9SDave Chinner break; 270630f712c9SDave Chinner 270730f712c9SDave Chinner case BMAP_LEFT_CONTIG: 270830f712c9SDave Chinner /* 270930f712c9SDave Chinner * New allocation is contiguous with a real allocation 271030f712c9SDave Chinner * on the left. 271130f712c9SDave Chinner * Merge the new allocation with the left neighbor. 271230f712c9SDave Chinner */ 27131abb9e55SChristoph Hellwig old = left; 27141abb9e55SChristoph Hellwig left.br_blockcount += new->br_blockcount; 27151d2e0089SChristoph Hellwig 2716b2b1712aSChristoph Hellwig xfs_iext_prev(ifp, icur); 2717b2b1712aSChristoph Hellwig xfs_iext_update_extent(ip, state, icur, &left); 271830f712c9SDave Chinner 27196d04558fSChristoph Hellwig if (cur == NULL) { 272030f712c9SDave Chinner rval = xfs_ilog_fext(whichfork); 272130f712c9SDave Chinner } else { 272230f712c9SDave Chinner rval = 0; 2723e16cf9b0SChristoph Hellwig error = xfs_bmbt_lookup_eq(cur, &old, &i); 272430f712c9SDave Chinner if (error) 272530f712c9SDave Chinner goto done; 2726f9e03706SDarrick J. Wong if (XFS_IS_CORRUPT(mp, i != 1)) { 2727f9e03706SDarrick J. Wong error = -EFSCORRUPTED; 2728f9e03706SDarrick J. Wong goto done; 2729f9e03706SDarrick J. Wong } 2730a67d00a5SChristoph Hellwig error = xfs_bmbt_update(cur, &left); 273130f712c9SDave Chinner if (error) 273230f712c9SDave Chinner goto done; 273330f712c9SDave Chinner } 273430f712c9SDave Chinner break; 273530f712c9SDave Chinner 273630f712c9SDave Chinner case BMAP_RIGHT_CONTIG: 273730f712c9SDave Chinner /* 273830f712c9SDave Chinner * New allocation is contiguous with a real allocation 273930f712c9SDave Chinner * on the right. 274030f712c9SDave Chinner * Merge the new allocation with the right neighbor. 274130f712c9SDave Chinner */ 27421abb9e55SChristoph Hellwig old = right; 2743ca5d8e5bSChristoph Hellwig 27441abb9e55SChristoph Hellwig right.br_startoff = new->br_startoff; 27451abb9e55SChristoph Hellwig right.br_startblock = new->br_startblock; 27461abb9e55SChristoph Hellwig right.br_blockcount += new->br_blockcount; 2747b2b1712aSChristoph Hellwig xfs_iext_update_extent(ip, state, icur, &right); 274830f712c9SDave Chinner 27496d04558fSChristoph Hellwig if (cur == NULL) { 275030f712c9SDave Chinner rval = xfs_ilog_fext(whichfork); 275130f712c9SDave Chinner } else { 275230f712c9SDave Chinner rval = 0; 2753e16cf9b0SChristoph Hellwig error = xfs_bmbt_lookup_eq(cur, &old, &i); 275430f712c9SDave Chinner if (error) 275530f712c9SDave Chinner goto done; 2756f9e03706SDarrick J. Wong if (XFS_IS_CORRUPT(mp, i != 1)) { 2757f9e03706SDarrick J. Wong error = -EFSCORRUPTED; 2758f9e03706SDarrick J. Wong goto done; 2759f9e03706SDarrick J. Wong } 2760a67d00a5SChristoph Hellwig error = xfs_bmbt_update(cur, &right); 276130f712c9SDave Chinner if (error) 276230f712c9SDave Chinner goto done; 276330f712c9SDave Chinner } 276430f712c9SDave Chinner break; 276530f712c9SDave Chinner 276630f712c9SDave Chinner case 0: 276730f712c9SDave Chinner /* 276830f712c9SDave Chinner * New allocation is not contiguous with another 276930f712c9SDave Chinner * real allocation. 277030f712c9SDave Chinner * Insert a new entry. 277130f712c9SDave Chinner */ 27720254c2f2SChristoph Hellwig xfs_iext_insert(ip, icur, new, state); 2773daf83964SChristoph Hellwig ifp->if_nextents++; 2774daf83964SChristoph Hellwig 27756d04558fSChristoph Hellwig if (cur == NULL) { 277630f712c9SDave Chinner rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork); 277730f712c9SDave Chinner } else { 277830f712c9SDave Chinner rval = XFS_ILOG_CORE; 2779e16cf9b0SChristoph Hellwig error = xfs_bmbt_lookup_eq(cur, new, &i); 278030f712c9SDave Chinner if (error) 278130f712c9SDave Chinner goto done; 2782f9e03706SDarrick J. Wong if (XFS_IS_CORRUPT(mp, i != 0)) { 2783f9e03706SDarrick J. Wong error = -EFSCORRUPTED; 2784f9e03706SDarrick J. Wong goto done; 2785f9e03706SDarrick J. Wong } 27866d04558fSChristoph Hellwig error = xfs_btree_insert(cur, &i); 278730f712c9SDave Chinner if (error) 278830f712c9SDave Chinner goto done; 2789f9e03706SDarrick J. Wong if (XFS_IS_CORRUPT(mp, i != 1)) { 2790f9e03706SDarrick J. Wong error = -EFSCORRUPTED; 2791f9e03706SDarrick J. Wong goto done; 2792f9e03706SDarrick J. Wong } 279330f712c9SDave Chinner } 279430f712c9SDave Chinner break; 279530f712c9SDave Chinner } 279630f712c9SDave Chinner 279795eb308cSDarrick J. Wong /* add reverse mapping unless caller opted out */ 2798bc46ac64SDarrick J. Wong if (!(flags & XFS_BMAPI_NORMAP)) 2799bc46ac64SDarrick J. Wong xfs_rmap_map_extent(tp, ip, whichfork, new); 28009c194644SDarrick J. Wong 280130f712c9SDave Chinner /* convert to a btree if necessary */ 28026d04558fSChristoph Hellwig if (xfs_bmap_needs_btree(ip, whichfork)) { 280330f712c9SDave Chinner int tmp_logflags; /* partial log flag return val */ 280430f712c9SDave Chinner 28056d04558fSChristoph Hellwig ASSERT(cur == NULL); 2806280253d2SBrian Foster error = xfs_bmap_extents_to_btree(tp, ip, curp, 0, 2807280253d2SBrian Foster &tmp_logflags, whichfork); 28086d04558fSChristoph Hellwig *logflagsp |= tmp_logflags; 28096d04558fSChristoph Hellwig cur = *curp; 281030f712c9SDave Chinner if (error) 281130f712c9SDave Chinner goto done; 281230f712c9SDave Chinner } 281330f712c9SDave Chinner 281430f712c9SDave Chinner /* clear out the allocated field, done with it now in any case. */ 28156d04558fSChristoph Hellwig if (cur) 281692219c29SDave Chinner cur->bc_ino.allocated = 0; 281730f712c9SDave Chinner 28186d04558fSChristoph Hellwig xfs_bmap_check_leaf_extents(cur, ip, whichfork); 281930f712c9SDave Chinner done: 28206d04558fSChristoph Hellwig *logflagsp |= rval; 282130f712c9SDave Chinner return error; 282230f712c9SDave Chinner } 282330f712c9SDave Chinner 282430f712c9SDave Chinner /* 282530f712c9SDave Chinner * Functions used in the extent read, allocate and remove paths 282630f712c9SDave Chinner */ 282730f712c9SDave Chinner 282830f712c9SDave Chinner /* 2829031474c2SChristoph Hellwig * Adjust the size of the new extent based on i_extsize and rt extsize. 283030f712c9SDave Chinner */ 283130f712c9SDave Chinner int 283230f712c9SDave Chinner xfs_bmap_extsize_align( 283330f712c9SDave Chinner xfs_mount_t *mp, 283430f712c9SDave Chinner xfs_bmbt_irec_t *gotp, /* next extent pointer */ 283530f712c9SDave Chinner xfs_bmbt_irec_t *prevp, /* previous extent pointer */ 283630f712c9SDave Chinner xfs_extlen_t extsz, /* align to this extent size */ 283730f712c9SDave Chinner int rt, /* is this a realtime inode? */ 283830f712c9SDave Chinner int eof, /* is extent at end-of-file? */ 283930f712c9SDave Chinner int delay, /* creating delalloc extent? */ 284030f712c9SDave Chinner int convert, /* overwriting unwritten extent? */ 284130f712c9SDave Chinner xfs_fileoff_t *offp, /* in/out: aligned offset */ 284230f712c9SDave Chinner xfs_extlen_t *lenp) /* in/out: aligned length */ 284330f712c9SDave Chinner { 284430f712c9SDave Chinner xfs_fileoff_t orig_off; /* original offset */ 284530f712c9SDave Chinner xfs_extlen_t orig_alen; /* original length */ 284630f712c9SDave Chinner xfs_fileoff_t orig_end; /* original off+len */ 284730f712c9SDave Chinner xfs_fileoff_t nexto; /* next file offset */ 284830f712c9SDave Chinner xfs_fileoff_t prevo; /* previous file offset */ 284930f712c9SDave Chinner xfs_fileoff_t align_off; /* temp for offset */ 285030f712c9SDave Chinner xfs_extlen_t align_alen; /* temp for length */ 285130f712c9SDave Chinner xfs_extlen_t temp; /* temp for calculations */ 285230f712c9SDave Chinner 285330f712c9SDave Chinner if (convert) 285430f712c9SDave Chinner return 0; 285530f712c9SDave Chinner 285630f712c9SDave Chinner orig_off = align_off = *offp; 285730f712c9SDave Chinner orig_alen = align_alen = *lenp; 285830f712c9SDave Chinner orig_end = orig_off + orig_alen; 285930f712c9SDave Chinner 286030f712c9SDave Chinner /* 286130f712c9SDave Chinner * If this request overlaps an existing extent, then don't 286230f712c9SDave Chinner * attempt to perform any additional alignment. 286330f712c9SDave Chinner */ 286430f712c9SDave Chinner if (!delay && !eof && 286530f712c9SDave Chinner (orig_off >= gotp->br_startoff) && 286630f712c9SDave Chinner (orig_end <= gotp->br_startoff + gotp->br_blockcount)) { 286730f712c9SDave Chinner return 0; 286830f712c9SDave Chinner } 286930f712c9SDave Chinner 287030f712c9SDave Chinner /* 287130f712c9SDave Chinner * If the file offset is unaligned vs. the extent size 287230f712c9SDave Chinner * we need to align it. This will be possible unless 287330f712c9SDave Chinner * the file was previously written with a kernel that didn't 287430f712c9SDave Chinner * perform this alignment, or if a truncate shot us in the 287530f712c9SDave Chinner * foot. 287630f712c9SDave Chinner */ 28770703a8e1SDave Chinner div_u64_rem(orig_off, extsz, &temp); 287830f712c9SDave Chinner if (temp) { 287930f712c9SDave Chinner align_alen += temp; 288030f712c9SDave Chinner align_off -= temp; 288130f712c9SDave Chinner } 28826dea405eSDave Chinner 28836dea405eSDave Chinner /* Same adjustment for the end of the requested area. */ 28846dea405eSDave Chinner temp = (align_alen % extsz); 28856dea405eSDave Chinner if (temp) 288630f712c9SDave Chinner align_alen += extsz - temp; 28876dea405eSDave Chinner 28886dea405eSDave Chinner /* 28896dea405eSDave Chinner * For large extent hint sizes, the aligned extent might be larger than 289095f0b95eSChandan Babu R * XFS_BMBT_MAX_EXTLEN. In that case, reduce the size by an extsz so 289195f0b95eSChandan Babu R * that it pulls the length back under XFS_BMBT_MAX_EXTLEN. The outer 289295f0b95eSChandan Babu R * allocation loops handle short allocation just fine, so it is safe to 289395f0b95eSChandan Babu R * do this. We only want to do it when we are forced to, though, because 289495f0b95eSChandan Babu R * it means more allocation operations are required. 28956dea405eSDave Chinner */ 289695f0b95eSChandan Babu R while (align_alen > XFS_MAX_BMBT_EXTLEN) 28976dea405eSDave Chinner align_alen -= extsz; 289895f0b95eSChandan Babu R ASSERT(align_alen <= XFS_MAX_BMBT_EXTLEN); 28996dea405eSDave Chinner 290030f712c9SDave Chinner /* 290130f712c9SDave Chinner * If the previous block overlaps with this proposed allocation 290230f712c9SDave Chinner * then move the start forward without adjusting the length. 290330f712c9SDave Chinner */ 290430f712c9SDave Chinner if (prevp->br_startoff != NULLFILEOFF) { 290530f712c9SDave Chinner if (prevp->br_startblock == HOLESTARTBLOCK) 290630f712c9SDave Chinner prevo = prevp->br_startoff; 290730f712c9SDave Chinner else 290830f712c9SDave Chinner prevo = prevp->br_startoff + prevp->br_blockcount; 290930f712c9SDave Chinner } else 291030f712c9SDave Chinner prevo = 0; 291130f712c9SDave Chinner if (align_off != orig_off && align_off < prevo) 291230f712c9SDave Chinner align_off = prevo; 291330f712c9SDave Chinner /* 291430f712c9SDave Chinner * If the next block overlaps with this proposed allocation 291530f712c9SDave Chinner * then move the start back without adjusting the length, 291630f712c9SDave Chinner * but not before offset 0. 291730f712c9SDave Chinner * This may of course make the start overlap previous block, 291830f712c9SDave Chinner * and if we hit the offset 0 limit then the next block 291930f712c9SDave Chinner * can still overlap too. 292030f712c9SDave Chinner */ 292130f712c9SDave Chinner if (!eof && gotp->br_startoff != NULLFILEOFF) { 292230f712c9SDave Chinner if ((delay && gotp->br_startblock == HOLESTARTBLOCK) || 292330f712c9SDave Chinner (!delay && gotp->br_startblock == DELAYSTARTBLOCK)) 292430f712c9SDave Chinner nexto = gotp->br_startoff + gotp->br_blockcount; 292530f712c9SDave Chinner else 292630f712c9SDave Chinner nexto = gotp->br_startoff; 292730f712c9SDave Chinner } else 292830f712c9SDave Chinner nexto = NULLFILEOFF; 292930f712c9SDave Chinner if (!eof && 293030f712c9SDave Chinner align_off + align_alen != orig_end && 293130f712c9SDave Chinner align_off + align_alen > nexto) 293230f712c9SDave Chinner align_off = nexto > align_alen ? nexto - align_alen : 0; 293330f712c9SDave Chinner /* 293430f712c9SDave Chinner * If we're now overlapping the next or previous extent that 293530f712c9SDave Chinner * means we can't fit an extsz piece in this hole. Just move 293630f712c9SDave Chinner * the start forward to the first valid spot and set 293730f712c9SDave Chinner * the length so we hit the end. 293830f712c9SDave Chinner */ 293930f712c9SDave Chinner if (align_off != orig_off && align_off < prevo) 294030f712c9SDave Chinner align_off = prevo; 294130f712c9SDave Chinner if (align_off + align_alen != orig_end && 294230f712c9SDave Chinner align_off + align_alen > nexto && 294330f712c9SDave Chinner nexto != NULLFILEOFF) { 294430f712c9SDave Chinner ASSERT(nexto > prevo); 294530f712c9SDave Chinner align_alen = nexto - align_off; 294630f712c9SDave Chinner } 294730f712c9SDave Chinner 294830f712c9SDave Chinner /* 294930f712c9SDave Chinner * If realtime, and the result isn't a multiple of the realtime 295030f712c9SDave Chinner * extent size we need to remove blocks until it is. 295130f712c9SDave Chinner */ 295230f712c9SDave Chinner if (rt && (temp = (align_alen % mp->m_sb.sb_rextsize))) { 295330f712c9SDave Chinner /* 295430f712c9SDave Chinner * We're not covering the original request, or 295530f712c9SDave Chinner * we won't be able to once we fix the length. 295630f712c9SDave Chinner */ 295730f712c9SDave Chinner if (orig_off < align_off || 295830f712c9SDave Chinner orig_end > align_off + align_alen || 295930f712c9SDave Chinner align_alen - temp < orig_alen) 29602451337dSDave Chinner return -EINVAL; 296130f712c9SDave Chinner /* 296230f712c9SDave Chinner * Try to fix it by moving the start up. 296330f712c9SDave Chinner */ 296430f712c9SDave Chinner if (align_off + temp <= orig_off) { 296530f712c9SDave Chinner align_alen -= temp; 296630f712c9SDave Chinner align_off += temp; 296730f712c9SDave Chinner } 296830f712c9SDave Chinner /* 296930f712c9SDave Chinner * Try to fix it by moving the end in. 297030f712c9SDave Chinner */ 297130f712c9SDave Chinner else if (align_off + align_alen - temp >= orig_end) 297230f712c9SDave Chinner align_alen -= temp; 297330f712c9SDave Chinner /* 297430f712c9SDave Chinner * Set the start to the minimum then trim the length. 297530f712c9SDave Chinner */ 297630f712c9SDave Chinner else { 297730f712c9SDave Chinner align_alen -= orig_off - align_off; 297830f712c9SDave Chinner align_off = orig_off; 297930f712c9SDave Chinner align_alen -= align_alen % mp->m_sb.sb_rextsize; 298030f712c9SDave Chinner } 298130f712c9SDave Chinner /* 298230f712c9SDave Chinner * Result doesn't cover the request, fail it. 298330f712c9SDave Chinner */ 298430f712c9SDave Chinner if (orig_off < align_off || orig_end > align_off + align_alen) 29852451337dSDave Chinner return -EINVAL; 298630f712c9SDave Chinner } else { 298730f712c9SDave Chinner ASSERT(orig_off >= align_off); 298895f0b95eSChandan Babu R /* see XFS_BMBT_MAX_EXTLEN handling above */ 29896dea405eSDave Chinner ASSERT(orig_end <= align_off + align_alen || 299095f0b95eSChandan Babu R align_alen + extsz > XFS_MAX_BMBT_EXTLEN); 299130f712c9SDave Chinner } 299230f712c9SDave Chinner 299330f712c9SDave Chinner #ifdef DEBUG 299430f712c9SDave Chinner if (!eof && gotp->br_startoff != NULLFILEOFF) 299530f712c9SDave Chinner ASSERT(align_off + align_alen <= gotp->br_startoff); 299630f712c9SDave Chinner if (prevp->br_startoff != NULLFILEOFF) 299730f712c9SDave Chinner ASSERT(align_off >= prevp->br_startoff + prevp->br_blockcount); 299830f712c9SDave Chinner #endif 299930f712c9SDave Chinner 300030f712c9SDave Chinner *lenp = align_alen; 300130f712c9SDave Chinner *offp = align_off; 300230f712c9SDave Chinner return 0; 300330f712c9SDave Chinner } 300430f712c9SDave Chinner 300530f712c9SDave Chinner #define XFS_ALLOC_GAP_UNITS 4 300630f712c9SDave Chinner 300730f712c9SDave Chinner void 300830f712c9SDave Chinner xfs_bmap_adjacent( 300930f712c9SDave Chinner struct xfs_bmalloca *ap) /* bmap alloc argument struct */ 301030f712c9SDave Chinner { 301130f712c9SDave Chinner xfs_fsblock_t adjust; /* adjustment to block numbers */ 301230f712c9SDave Chinner xfs_mount_t *mp; /* mount point structure */ 301330f712c9SDave Chinner int rt; /* true if inode is realtime */ 301430f712c9SDave Chinner 301530f712c9SDave Chinner #define ISVALID(x,y) \ 301630f712c9SDave Chinner (rt ? \ 301730f712c9SDave Chinner (x) < mp->m_sb.sb_rblocks : \ 301830f712c9SDave Chinner XFS_FSB_TO_AGNO(mp, x) == XFS_FSB_TO_AGNO(mp, y) && \ 301930f712c9SDave Chinner XFS_FSB_TO_AGNO(mp, x) < mp->m_sb.sb_agcount && \ 302030f712c9SDave Chinner XFS_FSB_TO_AGBNO(mp, x) < mp->m_sb.sb_agblocks) 302130f712c9SDave Chinner 302230f712c9SDave Chinner mp = ap->ip->i_mount; 3023292378edSDave Chinner rt = XFS_IS_REALTIME_INODE(ap->ip) && 3024c34d570dSChristoph Hellwig (ap->datatype & XFS_ALLOC_USERDATA); 302530f712c9SDave Chinner /* 302630f712c9SDave Chinner * If allocating at eof, and there's a previous real block, 302730f712c9SDave Chinner * try to use its last block as our starting point. 302830f712c9SDave Chinner */ 302930f712c9SDave Chinner if (ap->eof && ap->prev.br_startoff != NULLFILEOFF && 303030f712c9SDave Chinner !isnullstartblock(ap->prev.br_startblock) && 303130f712c9SDave Chinner ISVALID(ap->prev.br_startblock + ap->prev.br_blockcount, 303230f712c9SDave Chinner ap->prev.br_startblock)) { 303330f712c9SDave Chinner ap->blkno = ap->prev.br_startblock + ap->prev.br_blockcount; 303430f712c9SDave Chinner /* 303530f712c9SDave Chinner * Adjust for the gap between prevp and us. 303630f712c9SDave Chinner */ 303730f712c9SDave Chinner adjust = ap->offset - 303830f712c9SDave Chinner (ap->prev.br_startoff + ap->prev.br_blockcount); 303930f712c9SDave Chinner if (adjust && 304030f712c9SDave Chinner ISVALID(ap->blkno + adjust, ap->prev.br_startblock)) 304130f712c9SDave Chinner ap->blkno += adjust; 304230f712c9SDave Chinner } 304330f712c9SDave Chinner /* 304430f712c9SDave Chinner * If not at eof, then compare the two neighbor blocks. 304530f712c9SDave Chinner * Figure out whether either one gives us a good starting point, 304630f712c9SDave Chinner * and pick the better one. 304730f712c9SDave Chinner */ 304830f712c9SDave Chinner else if (!ap->eof) { 304930f712c9SDave Chinner xfs_fsblock_t gotbno; /* right side block number */ 305030f712c9SDave Chinner xfs_fsblock_t gotdiff=0; /* right side difference */ 305130f712c9SDave Chinner xfs_fsblock_t prevbno; /* left side block number */ 305230f712c9SDave Chinner xfs_fsblock_t prevdiff=0; /* left side difference */ 305330f712c9SDave Chinner 305430f712c9SDave Chinner /* 305530f712c9SDave Chinner * If there's a previous (left) block, select a requested 305630f712c9SDave Chinner * start block based on it. 305730f712c9SDave Chinner */ 305830f712c9SDave Chinner if (ap->prev.br_startoff != NULLFILEOFF && 305930f712c9SDave Chinner !isnullstartblock(ap->prev.br_startblock) && 306030f712c9SDave Chinner (prevbno = ap->prev.br_startblock + 306130f712c9SDave Chinner ap->prev.br_blockcount) && 306230f712c9SDave Chinner ISVALID(prevbno, ap->prev.br_startblock)) { 306330f712c9SDave Chinner /* 306430f712c9SDave Chinner * Calculate gap to end of previous block. 306530f712c9SDave Chinner */ 306630f712c9SDave Chinner adjust = prevdiff = ap->offset - 306730f712c9SDave Chinner (ap->prev.br_startoff + 306830f712c9SDave Chinner ap->prev.br_blockcount); 306930f712c9SDave Chinner /* 307030f712c9SDave Chinner * Figure the startblock based on the previous block's 307130f712c9SDave Chinner * end and the gap size. 307230f712c9SDave Chinner * Heuristic! 307330f712c9SDave Chinner * If the gap is large relative to the piece we're 307430f712c9SDave Chinner * allocating, or using it gives us an invalid block 307530f712c9SDave Chinner * number, then just use the end of the previous block. 307630f712c9SDave Chinner */ 307730f712c9SDave Chinner if (prevdiff <= XFS_ALLOC_GAP_UNITS * ap->length && 307830f712c9SDave Chinner ISVALID(prevbno + prevdiff, 307930f712c9SDave Chinner ap->prev.br_startblock)) 308030f712c9SDave Chinner prevbno += adjust; 308130f712c9SDave Chinner else 308230f712c9SDave Chinner prevdiff += adjust; 308330f712c9SDave Chinner } 308430f712c9SDave Chinner /* 308530f712c9SDave Chinner * No previous block or can't follow it, just default. 308630f712c9SDave Chinner */ 308730f712c9SDave Chinner else 308830f712c9SDave Chinner prevbno = NULLFSBLOCK; 308930f712c9SDave Chinner /* 309030f712c9SDave Chinner * If there's a following (right) block, select a requested 309130f712c9SDave Chinner * start block based on it. 309230f712c9SDave Chinner */ 309330f712c9SDave Chinner if (!isnullstartblock(ap->got.br_startblock)) { 309430f712c9SDave Chinner /* 309530f712c9SDave Chinner * Calculate gap to start of next block. 309630f712c9SDave Chinner */ 309730f712c9SDave Chinner adjust = gotdiff = ap->got.br_startoff - ap->offset; 309830f712c9SDave Chinner /* 309930f712c9SDave Chinner * Figure the startblock based on the next block's 310030f712c9SDave Chinner * start and the gap size. 310130f712c9SDave Chinner */ 310230f712c9SDave Chinner gotbno = ap->got.br_startblock; 310330f712c9SDave Chinner /* 310430f712c9SDave Chinner * Heuristic! 310530f712c9SDave Chinner * If the gap is large relative to the piece we're 310630f712c9SDave Chinner * allocating, or using it gives us an invalid block 310730f712c9SDave Chinner * number, then just use the start of the next block 310830f712c9SDave Chinner * offset by our length. 310930f712c9SDave Chinner */ 311030f712c9SDave Chinner if (gotdiff <= XFS_ALLOC_GAP_UNITS * ap->length && 311130f712c9SDave Chinner ISVALID(gotbno - gotdiff, gotbno)) 311230f712c9SDave Chinner gotbno -= adjust; 311330f712c9SDave Chinner else if (ISVALID(gotbno - ap->length, gotbno)) { 311430f712c9SDave Chinner gotbno -= ap->length; 311530f712c9SDave Chinner gotdiff += adjust - ap->length; 311630f712c9SDave Chinner } else 311730f712c9SDave Chinner gotdiff += adjust; 311830f712c9SDave Chinner } 311930f712c9SDave Chinner /* 312030f712c9SDave Chinner * No next block, just default. 312130f712c9SDave Chinner */ 312230f712c9SDave Chinner else 312330f712c9SDave Chinner gotbno = NULLFSBLOCK; 312430f712c9SDave Chinner /* 312530f712c9SDave Chinner * If both valid, pick the better one, else the only good 312630f712c9SDave Chinner * one, else ap->blkno is already set (to 0 or the inode block). 312730f712c9SDave Chinner */ 312830f712c9SDave Chinner if (prevbno != NULLFSBLOCK && gotbno != NULLFSBLOCK) 312930f712c9SDave Chinner ap->blkno = prevdiff <= gotdiff ? prevbno : gotbno; 313030f712c9SDave Chinner else if (prevbno != NULLFSBLOCK) 313130f712c9SDave Chinner ap->blkno = prevbno; 313230f712c9SDave Chinner else if (gotbno != NULLFSBLOCK) 313330f712c9SDave Chinner ap->blkno = gotbno; 313430f712c9SDave Chinner } 313530f712c9SDave Chinner #undef ISVALID 313630f712c9SDave Chinner } 313730f712c9SDave Chinner 313830f712c9SDave Chinner static int 313930f712c9SDave Chinner xfs_bmap_longest_free_extent( 314076257a15SDave Chinner struct xfs_perag *pag, 314130f712c9SDave Chinner struct xfs_trans *tp, 314230f712c9SDave Chinner xfs_extlen_t *blen, 314330f712c9SDave Chinner int *notinit) 314430f712c9SDave Chinner { 314530f712c9SDave Chinner xfs_extlen_t longest; 314630f712c9SDave Chinner int error = 0; 314730f712c9SDave Chinner 31487ac2ff8bSDave Chinner if (!xfs_perag_initialised_agf(pag)) { 314908d3e84fSDave Chinner error = xfs_alloc_read_agf(pag, tp, XFS_ALLOC_FLAG_TRYLOCK, 315076b47e52SDave Chinner NULL); 3151f48e2df8SDarrick J. Wong if (error) { 3152f48e2df8SDarrick J. Wong /* Couldn't lock the AGF, so skip this AG. */ 3153f48e2df8SDarrick J. Wong if (error == -EAGAIN) { 315430f712c9SDave Chinner *notinit = 1; 3155f48e2df8SDarrick J. Wong error = 0; 3156f48e2df8SDarrick J. Wong } 315776257a15SDave Chinner return error; 315830f712c9SDave Chinner } 315930f712c9SDave Chinner } 316030f712c9SDave Chinner 3161a1f69417SEric Sandeen longest = xfs_alloc_longest_free_extent(pag, 316276257a15SDave Chinner xfs_alloc_min_freelist(pag->pag_mount, pag), 31633fd129b6SDarrick J. Wong xfs_ag_resv_needed(pag, XFS_AG_RESV_NONE)); 316430f712c9SDave Chinner if (*blen < longest) 316530f712c9SDave Chinner *blen = longest; 316630f712c9SDave Chinner 316776257a15SDave Chinner return 0; 316830f712c9SDave Chinner } 316930f712c9SDave Chinner 317030f712c9SDave Chinner static void 317130f712c9SDave Chinner xfs_bmap_select_minlen( 317230f712c9SDave Chinner struct xfs_bmalloca *ap, 317330f712c9SDave Chinner struct xfs_alloc_arg *args, 317430f712c9SDave Chinner xfs_extlen_t *blen, 317530f712c9SDave Chinner int notinit) 317630f712c9SDave Chinner { 317730f712c9SDave Chinner if (notinit || *blen < ap->minlen) { 317830f712c9SDave Chinner /* 317930f712c9SDave Chinner * Since we did a BUF_TRYLOCK above, it is possible that 318030f712c9SDave Chinner * there is space for this request. 318130f712c9SDave Chinner */ 318230f712c9SDave Chinner args->minlen = ap->minlen; 318330f712c9SDave Chinner } else if (*blen < args->maxlen) { 318430f712c9SDave Chinner /* 318530f712c9SDave Chinner * If the best seen length is less than the request length, 318630f712c9SDave Chinner * use the best as the minimum. 318730f712c9SDave Chinner */ 318830f712c9SDave Chinner args->minlen = *blen; 318930f712c9SDave Chinner } else { 319030f712c9SDave Chinner /* 319130f712c9SDave Chinner * Otherwise we've seen an extent as big as maxlen, use that 319230f712c9SDave Chinner * as the minimum. 319330f712c9SDave Chinner */ 319430f712c9SDave Chinner args->minlen = args->maxlen; 319530f712c9SDave Chinner } 319630f712c9SDave Chinner } 319730f712c9SDave Chinner 319885843327SDave Chinner static int 319936b6ad2dSDave Chinner xfs_bmap_btalloc_select_lengths( 320030f712c9SDave Chinner struct xfs_bmalloca *ap, 320130f712c9SDave Chinner struct xfs_alloc_arg *args, 320230f712c9SDave Chinner xfs_extlen_t *blen) 320330f712c9SDave Chinner { 320485843327SDave Chinner struct xfs_mount *mp = args->mp; 320576257a15SDave Chinner struct xfs_perag *pag; 320676257a15SDave Chinner xfs_agnumber_t agno, startag; 320730f712c9SDave Chinner int notinit = 0; 320876257a15SDave Chinner int error = 0; 320930f712c9SDave Chinner 321036b6ad2dSDave Chinner if (ap->tp->t_flags & XFS_TRANS_LOWMODE) { 321136b6ad2dSDave Chinner args->total = ap->minlen; 321236b6ad2dSDave Chinner args->minlen = ap->minlen; 321336b6ad2dSDave Chinner return 0; 321436b6ad2dSDave Chinner } 321530f712c9SDave Chinner 321636b6ad2dSDave Chinner args->total = ap->total; 321785843327SDave Chinner startag = XFS_FSB_TO_AGNO(mp, ap->blkno); 321830f712c9SDave Chinner if (startag == NULLAGNUMBER) 321976257a15SDave Chinner startag = 0; 322030f712c9SDave Chinner 322176257a15SDave Chinner *blen = 0; 322276257a15SDave Chinner for_each_perag_wrap(mp, startag, agno, pag) { 322376257a15SDave Chinner error = xfs_bmap_longest_free_extent(pag, args->tp, blen, 322430f712c9SDave Chinner ¬init); 322530f712c9SDave Chinner if (error) 322676257a15SDave Chinner break; 322776257a15SDave Chinner if (*blen >= args->maxlen) 322830f712c9SDave Chinner break; 322930f712c9SDave Chinner } 323076257a15SDave Chinner if (pag) 323176257a15SDave Chinner xfs_perag_rele(pag); 323230f712c9SDave Chinner 323330f712c9SDave Chinner xfs_bmap_select_minlen(ap, args, blen, notinit); 323476257a15SDave Chinner return error; 323530f712c9SDave Chinner } 323630f712c9SDave Chinner 323730f712c9SDave Chinner STATIC int 323830f712c9SDave Chinner xfs_bmap_btalloc_filestreams( 323930f712c9SDave Chinner struct xfs_bmalloca *ap, 324030f712c9SDave Chinner struct xfs_alloc_arg *args, 324130f712c9SDave Chinner xfs_extlen_t *blen) 324230f712c9SDave Chinner { 324330f712c9SDave Chinner struct xfs_mount *mp = ap->ip->i_mount; 324476257a15SDave Chinner struct xfs_perag *pag; 324576257a15SDave Chinner xfs_agnumber_t start_agno; 324630f712c9SDave Chinner int notinit = 0; 324730f712c9SDave Chinner int error; 324830f712c9SDave Chinner 324930f712c9SDave Chinner args->total = ap->total; 325030f712c9SDave Chinner 325185843327SDave Chinner start_agno = XFS_FSB_TO_AGNO(mp, ap->blkno); 325276257a15SDave Chinner if (start_agno == NULLAGNUMBER) 325376257a15SDave Chinner start_agno = 0; 325430f712c9SDave Chinner 325576257a15SDave Chinner pag = xfs_perag_grab(mp, start_agno); 325676257a15SDave Chinner if (pag) { 325776257a15SDave Chinner error = xfs_bmap_longest_free_extent(pag, args->tp, blen, 325876257a15SDave Chinner ¬init); 325976257a15SDave Chinner xfs_perag_rele(pag); 326030f712c9SDave Chinner if (error) 326130f712c9SDave Chinner return error; 326276257a15SDave Chinner } 326330f712c9SDave Chinner 326430f712c9SDave Chinner if (*blen < args->maxlen) { 326576257a15SDave Chinner xfs_agnumber_t agno = start_agno; 326676257a15SDave Chinner 326776257a15SDave Chinner error = xfs_filestream_new_ag(ap, &agno); 326876257a15SDave Chinner if (error) 326976257a15SDave Chinner return error; 327076257a15SDave Chinner if (agno == NULLAGNUMBER) 327176257a15SDave Chinner goto out_select; 327276257a15SDave Chinner 327376257a15SDave Chinner pag = xfs_perag_grab(mp, agno); 327476257a15SDave Chinner if (!pag) 327576257a15SDave Chinner goto out_select; 327676257a15SDave Chinner 327776257a15SDave Chinner error = xfs_bmap_longest_free_extent(pag, args->tp, 327876257a15SDave Chinner blen, ¬init); 327976257a15SDave Chinner xfs_perag_rele(pag); 328030f712c9SDave Chinner if (error) 328130f712c9SDave Chinner return error; 328230f712c9SDave Chinner 328376257a15SDave Chinner start_agno = agno; 328430f712c9SDave Chinner 328530f712c9SDave Chinner } 328630f712c9SDave Chinner 328776257a15SDave Chinner out_select: 328830f712c9SDave Chinner xfs_bmap_select_minlen(ap, args, blen, notinit); 328930f712c9SDave Chinner 329030f712c9SDave Chinner /* 329130f712c9SDave Chinner * Set the failure fallback case to look in the selected AG as stream 329230f712c9SDave Chinner * may have moved. 329330f712c9SDave Chinner */ 329476257a15SDave Chinner ap->blkno = args->fsbno = XFS_AGB_TO_FSB(mp, start_agno, 0); 329530f712c9SDave Chinner return 0; 329630f712c9SDave Chinner } 329730f712c9SDave Chinner 3298751f3767SDarrick J. Wong /* Update all inode and quota accounting for the allocation we just did. */ 3299751f3767SDarrick J. Wong static void 3300751f3767SDarrick J. Wong xfs_bmap_btalloc_accounting( 3301751f3767SDarrick J. Wong struct xfs_bmalloca *ap, 3302751f3767SDarrick J. Wong struct xfs_alloc_arg *args) 3303751f3767SDarrick J. Wong { 33044b4c1326SDarrick J. Wong if (ap->flags & XFS_BMAPI_COWFORK) { 33054b4c1326SDarrick J. Wong /* 33064b4c1326SDarrick J. Wong * COW fork blocks are in-core only and thus are treated as 33074b4c1326SDarrick J. Wong * in-core quota reservation (like delalloc blocks) even when 33084b4c1326SDarrick J. Wong * converted to real blocks. The quota reservation is not 33094b4c1326SDarrick J. Wong * accounted to disk until blocks are remapped to the data 33104b4c1326SDarrick J. Wong * fork. So if these blocks were previously delalloc, we 33114b4c1326SDarrick J. Wong * already have quota reservation and there's nothing to do 33124b4c1326SDarrick J. Wong * yet. 33134b4c1326SDarrick J. Wong */ 33149fe82b8cSDarrick J. Wong if (ap->wasdel) { 33159fe82b8cSDarrick J. Wong xfs_mod_delalloc(ap->ip->i_mount, -(int64_t)args->len); 33164b4c1326SDarrick J. Wong return; 33179fe82b8cSDarrick J. Wong } 33184b4c1326SDarrick J. Wong 33194b4c1326SDarrick J. Wong /* 33204b4c1326SDarrick J. Wong * Otherwise, we've allocated blocks in a hole. The transaction 33214b4c1326SDarrick J. Wong * has acquired in-core quota reservation for this extent. 33224b4c1326SDarrick J. Wong * Rather than account these as real blocks, however, we reduce 33234b4c1326SDarrick J. Wong * the transaction quota reservation based on the allocation. 33244b4c1326SDarrick J. Wong * This essentially transfers the transaction quota reservation 33254b4c1326SDarrick J. Wong * to that of a delalloc extent. 33264b4c1326SDarrick J. Wong */ 33274b4c1326SDarrick J. Wong ap->ip->i_delayed_blks += args->len; 33284b4c1326SDarrick J. Wong xfs_trans_mod_dquot_byino(ap->tp, ap->ip, XFS_TRANS_DQ_RES_BLKS, 33294b4c1326SDarrick J. Wong -(long)args->len); 33304b4c1326SDarrick J. Wong return; 33314b4c1326SDarrick J. Wong } 33324b4c1326SDarrick J. Wong 33334b4c1326SDarrick J. Wong /* data/attr fork only */ 33346e73a545SChristoph Hellwig ap->ip->i_nblocks += args->len; 3335751f3767SDarrick J. Wong xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE); 33369fe82b8cSDarrick J. Wong if (ap->wasdel) { 3337751f3767SDarrick J. Wong ap->ip->i_delayed_blks -= args->len; 33389fe82b8cSDarrick J. Wong xfs_mod_delalloc(ap->ip->i_mount, -(int64_t)args->len); 33399fe82b8cSDarrick J. Wong } 3340751f3767SDarrick J. Wong xfs_trans_mod_dquot_byino(ap->tp, ap->ip, 3341751f3767SDarrick J. Wong ap->wasdel ? XFS_TRANS_DQ_DELBCOUNT : XFS_TRANS_DQ_BCOUNT, 3342751f3767SDarrick J. Wong args->len); 3343751f3767SDarrick J. Wong } 3344751f3767SDarrick J. Wong 33450961fddfSChandan Babu R static int 33460961fddfSChandan Babu R xfs_bmap_compute_alignments( 33470961fddfSChandan Babu R struct xfs_bmalloca *ap, 33480961fddfSChandan Babu R struct xfs_alloc_arg *args) 33490961fddfSChandan Babu R { 33500961fddfSChandan Babu R struct xfs_mount *mp = args->mp; 33510961fddfSChandan Babu R xfs_extlen_t align = 0; /* minimum allocation alignment */ 33520961fddfSChandan Babu R int stripe_align = 0; 33530961fddfSChandan Babu R 33540961fddfSChandan Babu R /* stripe alignment for allocation is determined by mount parameters */ 33550560f31aSDave Chinner if (mp->m_swidth && xfs_has_swalloc(mp)) 33560961fddfSChandan Babu R stripe_align = mp->m_swidth; 33570961fddfSChandan Babu R else if (mp->m_dalign) 33580961fddfSChandan Babu R stripe_align = mp->m_dalign; 33590961fddfSChandan Babu R 33600961fddfSChandan Babu R if (ap->flags & XFS_BMAPI_COWFORK) 33610961fddfSChandan Babu R align = xfs_get_cowextsz_hint(ap->ip); 33620961fddfSChandan Babu R else if (ap->datatype & XFS_ALLOC_USERDATA) 33630961fddfSChandan Babu R align = xfs_get_extsz_hint(ap->ip); 33640961fddfSChandan Babu R if (align) { 3365560ab6c0SChandan Babu R if (xfs_bmap_extsize_align(mp, &ap->got, &ap->prev, align, 0, 3366560ab6c0SChandan Babu R ap->eof, 0, ap->conv, &ap->offset, 3367560ab6c0SChandan Babu R &ap->length)) 3368560ab6c0SChandan Babu R ASSERT(0); 33690961fddfSChandan Babu R ASSERT(ap->length); 33700961fddfSChandan Babu R } 33710961fddfSChandan Babu R 33720961fddfSChandan Babu R /* apply extent size hints if obtained earlier */ 33730961fddfSChandan Babu R if (align) { 33740961fddfSChandan Babu R args->prod = align; 33750961fddfSChandan Babu R div_u64_rem(ap->offset, args->prod, &args->mod); 33760961fddfSChandan Babu R if (args->mod) 33770961fddfSChandan Babu R args->mod = args->prod - args->mod; 33780961fddfSChandan Babu R } else if (mp->m_sb.sb_blocksize >= PAGE_SIZE) { 33790961fddfSChandan Babu R args->prod = 1; 33800961fddfSChandan Babu R args->mod = 0; 33810961fddfSChandan Babu R } else { 33820961fddfSChandan Babu R args->prod = PAGE_SIZE >> mp->m_sb.sb_blocklog; 33830961fddfSChandan Babu R div_u64_rem(ap->offset, args->prod, &args->mod); 33840961fddfSChandan Babu R if (args->mod) 33850961fddfSChandan Babu R args->mod = args->prod - args->mod; 33860961fddfSChandan Babu R } 33870961fddfSChandan Babu R 33880961fddfSChandan Babu R return stripe_align; 33890961fddfSChandan Babu R } 33900961fddfSChandan Babu R 339107c72e55SChandan Babu R static void 339207c72e55SChandan Babu R xfs_bmap_process_allocated_extent( 339307c72e55SChandan Babu R struct xfs_bmalloca *ap, 339407c72e55SChandan Babu R struct xfs_alloc_arg *args, 339507c72e55SChandan Babu R xfs_fileoff_t orig_offset, 339607c72e55SChandan Babu R xfs_extlen_t orig_length) 339707c72e55SChandan Babu R { 339807c72e55SChandan Babu R ap->blkno = args->fsbno; 339907c72e55SChandan Babu R ap->length = args->len; 340007c72e55SChandan Babu R /* 340107c72e55SChandan Babu R * If the extent size hint is active, we tried to round the 340207c72e55SChandan Babu R * caller's allocation request offset down to extsz and the 340307c72e55SChandan Babu R * length up to another extsz boundary. If we found a free 340407c72e55SChandan Babu R * extent we mapped it in starting at this new offset. If the 340507c72e55SChandan Babu R * newly mapped space isn't long enough to cover any of the 340607c72e55SChandan Babu R * range of offsets that was originally requested, move the 340707c72e55SChandan Babu R * mapping up so that we can fill as much of the caller's 340807c72e55SChandan Babu R * original request as possible. Free space is apparently 340907c72e55SChandan Babu R * very fragmented so we're unlikely to be able to satisfy the 341007c72e55SChandan Babu R * hints anyway. 341107c72e55SChandan Babu R */ 341207c72e55SChandan Babu R if (ap->length <= orig_length) 341307c72e55SChandan Babu R ap->offset = orig_offset; 341407c72e55SChandan Babu R else if (ap->offset + ap->length < orig_offset + orig_length) 341507c72e55SChandan Babu R ap->offset = orig_offset + orig_length - ap->length; 341607c72e55SChandan Babu R xfs_bmap_btalloc_accounting(ap, args); 341707c72e55SChandan Babu R } 341807c72e55SChandan Babu R 341930151967SChandan Babu R #ifdef DEBUG 342030151967SChandan Babu R static int 342130151967SChandan Babu R xfs_bmap_exact_minlen_extent_alloc( 342230151967SChandan Babu R struct xfs_bmalloca *ap) 342330151967SChandan Babu R { 342430151967SChandan Babu R struct xfs_mount *mp = ap->ip->i_mount; 342530151967SChandan Babu R struct xfs_alloc_arg args = { .tp = ap->tp, .mp = mp }; 342630151967SChandan Babu R xfs_fileoff_t orig_offset; 342730151967SChandan Babu R xfs_extlen_t orig_length; 342830151967SChandan Babu R int error; 342930151967SChandan Babu R 343030151967SChandan Babu R ASSERT(ap->length); 343130151967SChandan Babu R 343230151967SChandan Babu R if (ap->minlen != 1) { 343330151967SChandan Babu R ap->blkno = NULLFSBLOCK; 343430151967SChandan Babu R ap->length = 0; 343530151967SChandan Babu R return 0; 343630151967SChandan Babu R } 343730151967SChandan Babu R 343830151967SChandan Babu R orig_offset = ap->offset; 343930151967SChandan Babu R orig_length = ap->length; 344030151967SChandan Babu R 344130151967SChandan Babu R args.alloc_minlen_only = 1; 344230151967SChandan Babu R 344330151967SChandan Babu R xfs_bmap_compute_alignments(ap, &args); 344430151967SChandan Babu R 344530151967SChandan Babu R /* 344630151967SChandan Babu R * Unlike the longest extent available in an AG, we don't track 344730151967SChandan Babu R * the length of an AG's shortest extent. 344830151967SChandan Babu R * XFS_ERRTAG_BMAP_ALLOC_MINLEN_EXTENT is a debug only knob and 344930151967SChandan Babu R * hence we can afford to start traversing from the 0th AG since 345030151967SChandan Babu R * we need not be concerned about a drop in performance in 345130151967SChandan Babu R * "debug only" code paths. 345230151967SChandan Babu R */ 345330151967SChandan Babu R ap->blkno = XFS_AGB_TO_FSB(mp, 0, 0); 345430151967SChandan Babu R 345530151967SChandan Babu R args.oinfo = XFS_RMAP_OINFO_SKIP_UPDATE; 34566e8bd39dSChandan Babu R args.minlen = args.maxlen = ap->minlen; 34576e8bd39dSChandan Babu R args.total = ap->total; 345830151967SChandan Babu R 345930151967SChandan Babu R args.alignment = 1; 346030151967SChandan Babu R args.minalignslop = 0; 346130151967SChandan Babu R 346230151967SChandan Babu R args.minleft = ap->minleft; 346330151967SChandan Babu R args.wasdel = ap->wasdel; 346430151967SChandan Babu R args.resv = XFS_AG_RESV_NONE; 346530151967SChandan Babu R args.datatype = ap->datatype; 346630151967SChandan Babu R 3467319c9e87SDave Chinner error = xfs_alloc_vextent_first_ag(&args, ap->blkno); 346830151967SChandan Babu R if (error) 346930151967SChandan Babu R return error; 347030151967SChandan Babu R 347130151967SChandan Babu R if (args.fsbno != NULLFSBLOCK) { 347230151967SChandan Babu R xfs_bmap_process_allocated_extent(ap, &args, orig_offset, 347330151967SChandan Babu R orig_length); 347430151967SChandan Babu R } else { 347530151967SChandan Babu R ap->blkno = NULLFSBLOCK; 347630151967SChandan Babu R ap->length = 0; 347730151967SChandan Babu R } 347830151967SChandan Babu R 347930151967SChandan Babu R return 0; 348030151967SChandan Babu R } 348130151967SChandan Babu R #else 348230151967SChandan Babu R 348330151967SChandan Babu R #define xfs_bmap_exact_minlen_extent_alloc(bma) (-EFSCORRUPTED) 348430151967SChandan Babu R 348530151967SChandan Babu R #endif 348630151967SChandan Babu R 348785843327SDave Chinner /* 348885843327SDave Chinner * If we are not low on available data blocks and we are allocating at 348985843327SDave Chinner * EOF, optimise allocation for contiguous file extension and/or stripe 349085843327SDave Chinner * alignment of the new extent. 349185843327SDave Chinner * 349285843327SDave Chinner * NOTE: ap->aeof is only set if the allocation length is >= the 349385843327SDave Chinner * stripe unit and the allocation offset is at the end of file. 349485843327SDave Chinner */ 349585843327SDave Chinner static int 349685843327SDave Chinner xfs_bmap_btalloc_at_eof( 349785843327SDave Chinner struct xfs_bmalloca *ap, 349885843327SDave Chinner struct xfs_alloc_arg *args, 349985843327SDave Chinner xfs_extlen_t blen, 35002a7f6d41SDave Chinner int stripe_align, 35012a7f6d41SDave Chinner bool ag_only) 350285843327SDave Chinner { 350385843327SDave Chinner struct xfs_mount *mp = args->mp; 350485843327SDave Chinner int error; 350585843327SDave Chinner 350685843327SDave Chinner /* 350785843327SDave Chinner * If there are already extents in the file, try an exact EOF block 350885843327SDave Chinner * allocation to extend the file as a contiguous extent. If that fails, 350985843327SDave Chinner * or it's the first allocation in a file, just try for a stripe aligned 351085843327SDave Chinner * allocation. 351185843327SDave Chinner */ 351285843327SDave Chinner if (ap->offset) { 351385843327SDave Chinner xfs_extlen_t nextminlen = 0; 351485843327SDave Chinner 351585843327SDave Chinner /* 351685843327SDave Chinner * Compute the minlen+alignment for the next case. Set slop so 351785843327SDave Chinner * that the value of minlen+alignment+slop doesn't go up between 351885843327SDave Chinner * the calls. 351985843327SDave Chinner */ 3520*230e8fe8SDave Chinner args->alignment = 1; 352185843327SDave Chinner if (blen > stripe_align && blen <= args->maxlen) 352285843327SDave Chinner nextminlen = blen - stripe_align; 352385843327SDave Chinner else 352485843327SDave Chinner nextminlen = args->minlen; 352585843327SDave Chinner if (nextminlen + stripe_align > args->minlen + 1) 352685843327SDave Chinner args->minalignslop = nextminlen + stripe_align - 352785843327SDave Chinner args->minlen - 1; 352885843327SDave Chinner else 352985843327SDave Chinner args->minalignslop = 0; 353085843327SDave Chinner 35315f36b2ceSDave Chinner args->pag = xfs_perag_get(mp, XFS_FSB_TO_AGNO(mp, ap->blkno)); 35325f36b2ceSDave Chinner error = xfs_alloc_vextent_exact_bno(args, ap->blkno); 353385843327SDave Chinner xfs_perag_put(args->pag); 353485843327SDave Chinner if (error) 353585843327SDave Chinner return error; 353685843327SDave Chinner 353785843327SDave Chinner if (args->fsbno != NULLFSBLOCK) 353885843327SDave Chinner return 0; 353985843327SDave Chinner /* 354085843327SDave Chinner * Exact allocation failed. Reset to try an aligned allocation 354185843327SDave Chinner * according to the original allocation specification. 354285843327SDave Chinner */ 354385843327SDave Chinner args->pag = NULL; 354485843327SDave Chinner args->alignment = stripe_align; 354585843327SDave Chinner args->minlen = nextminlen; 354685843327SDave Chinner args->minalignslop = 0; 354785843327SDave Chinner } else { 354885843327SDave Chinner /* 354985843327SDave Chinner * Adjust minlen to try and preserve alignment if we 355085843327SDave Chinner * can't guarantee an aligned maxlen extent. 355185843327SDave Chinner */ 3552*230e8fe8SDave Chinner args->alignment = stripe_align; 355385843327SDave Chinner if (blen > args->alignment && 355485843327SDave Chinner blen <= args->maxlen + args->alignment) 355585843327SDave Chinner args->minlen = blen - args->alignment; 355685843327SDave Chinner args->minalignslop = 0; 355785843327SDave Chinner } 355885843327SDave Chinner 35592a7f6d41SDave Chinner if (ag_only) 3560db4710fdSDave Chinner error = xfs_alloc_vextent_near_bno(args, ap->blkno); 35612a7f6d41SDave Chinner else 35622a7f6d41SDave Chinner error = xfs_alloc_vextent_start_ag(args, ap->blkno); 356385843327SDave Chinner if (error) 356485843327SDave Chinner return error; 356585843327SDave Chinner 356685843327SDave Chinner if (args->fsbno != NULLFSBLOCK) 356785843327SDave Chinner return 0; 356885843327SDave Chinner 356985843327SDave Chinner /* 357085843327SDave Chinner * Allocation failed, so turn return the allocation args to their 357185843327SDave Chinner * original non-aligned state so the caller can proceed on allocation 357285843327SDave Chinner * failure as if this function was never called. 357385843327SDave Chinner */ 357485843327SDave Chinner args->fsbno = ap->blkno; 357585843327SDave Chinner args->alignment = 1; 357685843327SDave Chinner return 0; 357785843327SDave Chinner } 357885843327SDave Chinner 357985843327SDave Chinner static int 358085843327SDave Chinner xfs_bmap_btalloc_best_length( 358185843327SDave Chinner struct xfs_bmalloca *ap, 358285843327SDave Chinner struct xfs_alloc_arg *args, 358385843327SDave Chinner int stripe_align) 358485843327SDave Chinner { 358585843327SDave Chinner struct xfs_mount *mp = args->mp; 358685843327SDave Chinner xfs_extlen_t blen = 0; 35872a7f6d41SDave Chinner bool is_filestream = false; 358885843327SDave Chinner int error; 358985843327SDave Chinner 35902a7f6d41SDave Chinner if ((ap->datatype & XFS_ALLOC_USERDATA) && 35912a7f6d41SDave Chinner xfs_inode_is_filestream(ap->ip)) 35922a7f6d41SDave Chinner is_filestream = true; 35932a7f6d41SDave Chinner 359485843327SDave Chinner /* 359585843327SDave Chinner * Determine the initial block number we will target for allocation. 359685843327SDave Chinner */ 35972a7f6d41SDave Chinner if (is_filestream) { 359885843327SDave Chinner xfs_agnumber_t agno = xfs_filestream_lookup_ag(ap->ip); 359985843327SDave Chinner if (agno == NULLAGNUMBER) 360085843327SDave Chinner agno = 0; 360185843327SDave Chinner ap->blkno = XFS_AGB_TO_FSB(mp, agno, 0); 360285843327SDave Chinner } else { 360385843327SDave Chinner ap->blkno = XFS_INO_TO_FSB(mp, ap->ip->i_ino); 360485843327SDave Chinner } 360585843327SDave Chinner xfs_bmap_adjacent(ap); 360685843327SDave Chinner 360785843327SDave Chinner /* 360885843327SDave Chinner * Search for an allocation group with a single extent large enough for 360985843327SDave Chinner * the request. If one isn't found, then adjust the minimum allocation 361085843327SDave Chinner * size to the largest space found. 361185843327SDave Chinner */ 36122a7f6d41SDave Chinner if (is_filestream) { 3613319c9e87SDave Chinner /* 3614319c9e87SDave Chinner * If there is very little free space before we start a 3615319c9e87SDave Chinner * filestreams allocation, we're almost guaranteed to fail to 3616319c9e87SDave Chinner * find an AG with enough contiguous free space to succeed, so 3617319c9e87SDave Chinner * just go straight to the low space algorithm. 3618319c9e87SDave Chinner */ 3619319c9e87SDave Chinner if (ap->tp->t_flags & XFS_TRANS_LOWMODE) { 3620319c9e87SDave Chinner args->minlen = ap->minlen; 3621319c9e87SDave Chinner goto critically_low_space; 3622319c9e87SDave Chinner } 362385843327SDave Chinner error = xfs_bmap_btalloc_filestreams(ap, args, &blen); 3624319c9e87SDave Chinner } else { 362585843327SDave Chinner error = xfs_bmap_btalloc_select_lengths(ap, args, &blen); 3626319c9e87SDave Chinner } 362785843327SDave Chinner if (error) 362885843327SDave Chinner return error; 362985843327SDave Chinner 363085843327SDave Chinner /* 363185843327SDave Chinner * Don't attempt optimal EOF allocation if previous allocations barely 363285843327SDave Chinner * succeeded due to being near ENOSPC. It is highly unlikely we'll get 363385843327SDave Chinner * optimal or even aligned allocations in this case, so don't waste time 363485843327SDave Chinner * trying. 363585843327SDave Chinner */ 363685843327SDave Chinner if (ap->aeof && !(ap->tp->t_flags & XFS_TRANS_LOWMODE)) { 36372a7f6d41SDave Chinner error = xfs_bmap_btalloc_at_eof(ap, args, blen, stripe_align, 36382a7f6d41SDave Chinner is_filestream); 363985843327SDave Chinner if (error) 364085843327SDave Chinner return error; 364185843327SDave Chinner if (args->fsbno != NULLFSBLOCK) 364285843327SDave Chinner return 0; 364385843327SDave Chinner } 364485843327SDave Chinner 36452a7f6d41SDave Chinner if (is_filestream) 3646db4710fdSDave Chinner error = xfs_alloc_vextent_near_bno(args, ap->blkno); 36472a7f6d41SDave Chinner else 36482a7f6d41SDave Chinner error = xfs_alloc_vextent_start_ag(args, ap->blkno); 364985843327SDave Chinner if (error) 365085843327SDave Chinner return error; 365185843327SDave Chinner if (args->fsbno != NULLFSBLOCK) 365285843327SDave Chinner return 0; 365385843327SDave Chinner 365485843327SDave Chinner /* 365585843327SDave Chinner * Try a locality first full filesystem minimum length allocation whilst 365685843327SDave Chinner * still maintaining necessary total block reservation requirements. 365785843327SDave Chinner */ 365885843327SDave Chinner if (args->minlen > ap->minlen) { 365985843327SDave Chinner args->minlen = ap->minlen; 36602a7f6d41SDave Chinner error = xfs_alloc_vextent_start_ag(args, ap->blkno); 366185843327SDave Chinner if (error) 366285843327SDave Chinner return error; 366385843327SDave Chinner } 366485843327SDave Chinner if (args->fsbno != NULLFSBLOCK) 366585843327SDave Chinner return 0; 366685843327SDave Chinner 366785843327SDave Chinner /* 366885843327SDave Chinner * We are now critically low on space, so this is a last resort 366985843327SDave Chinner * allocation attempt: no reserve, no locality, blocking, minimum 367085843327SDave Chinner * length, full filesystem free space scan. We also indicate to future 367185843327SDave Chinner * allocations in this transaction that we are critically low on space 367285843327SDave Chinner * so they don't waste time on allocation modes that are unlikely to 367385843327SDave Chinner * succeed. 367485843327SDave Chinner */ 3675319c9e87SDave Chinner critically_low_space: 367685843327SDave Chinner args->total = ap->minlen; 3677319c9e87SDave Chinner error = xfs_alloc_vextent_first_ag(args, 0); 367885843327SDave Chinner if (error) 367985843327SDave Chinner return error; 368085843327SDave Chinner ap->tp->t_flags |= XFS_TRANS_LOWMODE; 368185843327SDave Chinner return 0; 368285843327SDave Chinner } 368385843327SDave Chinner 368485843327SDave Chinner static int 368530f712c9SDave Chinner xfs_bmap_btalloc( 368630151967SChandan Babu R struct xfs_bmalloca *ap) 368730f712c9SDave Chinner { 368830151967SChandan Babu R struct xfs_mount *mp = ap->ip->i_mount; 368985843327SDave Chinner struct xfs_alloc_arg args = { 369085843327SDave Chinner .tp = ap->tp, 369185843327SDave Chinner .mp = mp, 369285843327SDave Chinner .fsbno = NULLFSBLOCK, 369385843327SDave Chinner .oinfo = XFS_RMAP_OINFO_SKIP_UPDATE, 369485843327SDave Chinner .minleft = ap->minleft, 369585843327SDave Chinner .wasdel = ap->wasdel, 369685843327SDave Chinner .resv = XFS_AG_RESV_NONE, 369785843327SDave Chinner .datatype = ap->datatype, 369885843327SDave Chinner .alignment = 1, 369985843327SDave Chinner .minalignslop = 0, 370085843327SDave Chinner }; 37016d8a45ceSDarrick J. Wong xfs_fileoff_t orig_offset; 37026d8a45ceSDarrick J. Wong xfs_extlen_t orig_length; 370330f712c9SDave Chinner int error; 370430f712c9SDave Chinner int stripe_align; 370530f712c9SDave Chinner 370630f712c9SDave Chinner ASSERT(ap->length); 37076d8a45ceSDarrick J. Wong orig_offset = ap->offset; 37086d8a45ceSDarrick J. Wong orig_length = ap->length; 370930f712c9SDave Chinner 37100961fddfSChandan Babu R stripe_align = xfs_bmap_compute_alignments(ap, &args); 371130f712c9SDave Chinner 371230f712c9SDave Chinner /* Trim the allocation back to the maximum an AG can fit. */ 37139bb54cb5SDave Chinner args.maxlen = min(ap->length, mp->m_ag_max_usable); 371436b6ad2dSDave Chinner 371585843327SDave Chinner error = xfs_bmap_btalloc_best_length(ap, &args, stripe_align); 371630f712c9SDave Chinner if (error) 371730f712c9SDave Chinner return error; 37180961fddfSChandan Babu R 371907c72e55SChandan Babu R if (args.fsbno != NULLFSBLOCK) { 372007c72e55SChandan Babu R xfs_bmap_process_allocated_extent(ap, &args, orig_offset, 372107c72e55SChandan Babu R orig_length); 372230f712c9SDave Chinner } else { 372330f712c9SDave Chinner ap->blkno = NULLFSBLOCK; 372430f712c9SDave Chinner ap->length = 0; 372530f712c9SDave Chinner } 372630f712c9SDave Chinner return 0; 372730f712c9SDave Chinner } 372830f712c9SDave Chinner 37290a0af28cSDarrick J. Wong /* Trim extent to fit a logical block range. */ 37300a0af28cSDarrick J. Wong void 37310a0af28cSDarrick J. Wong xfs_trim_extent( 37320a0af28cSDarrick J. Wong struct xfs_bmbt_irec *irec, 37330a0af28cSDarrick J. Wong xfs_fileoff_t bno, 37340a0af28cSDarrick J. Wong xfs_filblks_t len) 37350a0af28cSDarrick J. Wong { 37360a0af28cSDarrick J. Wong xfs_fileoff_t distance; 37370a0af28cSDarrick J. Wong xfs_fileoff_t end = bno + len; 37380a0af28cSDarrick J. Wong 37390a0af28cSDarrick J. Wong if (irec->br_startoff + irec->br_blockcount <= bno || 37400a0af28cSDarrick J. Wong irec->br_startoff >= end) { 37410a0af28cSDarrick J. Wong irec->br_blockcount = 0; 37420a0af28cSDarrick J. Wong return; 37430a0af28cSDarrick J. Wong } 37440a0af28cSDarrick J. Wong 37450a0af28cSDarrick J. Wong if (irec->br_startoff < bno) { 37460a0af28cSDarrick J. Wong distance = bno - irec->br_startoff; 37470a0af28cSDarrick J. Wong if (isnullstartblock(irec->br_startblock)) 37480a0af28cSDarrick J. Wong irec->br_startblock = DELAYSTARTBLOCK; 37490a0af28cSDarrick J. Wong if (irec->br_startblock != DELAYSTARTBLOCK && 37500a0af28cSDarrick J. Wong irec->br_startblock != HOLESTARTBLOCK) 37510a0af28cSDarrick J. Wong irec->br_startblock += distance; 37520a0af28cSDarrick J. Wong irec->br_startoff += distance; 37530a0af28cSDarrick J. Wong irec->br_blockcount -= distance; 37540a0af28cSDarrick J. Wong } 37550a0af28cSDarrick J. Wong 37560a0af28cSDarrick J. Wong if (end < irec->br_startoff + irec->br_blockcount) { 37570a0af28cSDarrick J. Wong distance = irec->br_startoff + irec->br_blockcount - end; 37580a0af28cSDarrick J. Wong irec->br_blockcount -= distance; 37590a0af28cSDarrick J. Wong } 37600a0af28cSDarrick J. Wong } 37610a0af28cSDarrick J. Wong 376230f712c9SDave Chinner /* 376330f712c9SDave Chinner * Trim the returned map to the required bounds 376430f712c9SDave Chinner */ 376530f712c9SDave Chinner STATIC void 376630f712c9SDave Chinner xfs_bmapi_trim_map( 376730f712c9SDave Chinner struct xfs_bmbt_irec *mval, 376830f712c9SDave Chinner struct xfs_bmbt_irec *got, 376930f712c9SDave Chinner xfs_fileoff_t *bno, 377030f712c9SDave Chinner xfs_filblks_t len, 377130f712c9SDave Chinner xfs_fileoff_t obno, 377230f712c9SDave Chinner xfs_fileoff_t end, 377330f712c9SDave Chinner int n, 3774e7d410acSDave Chinner uint32_t flags) 377530f712c9SDave Chinner { 377630f712c9SDave Chinner if ((flags & XFS_BMAPI_ENTIRE) || 377730f712c9SDave Chinner got->br_startoff + got->br_blockcount <= obno) { 377830f712c9SDave Chinner *mval = *got; 377930f712c9SDave Chinner if (isnullstartblock(got->br_startblock)) 378030f712c9SDave Chinner mval->br_startblock = DELAYSTARTBLOCK; 378130f712c9SDave Chinner return; 378230f712c9SDave Chinner } 378330f712c9SDave Chinner 378430f712c9SDave Chinner if (obno > *bno) 378530f712c9SDave Chinner *bno = obno; 378630f712c9SDave Chinner ASSERT((*bno >= obno) || (n == 0)); 378730f712c9SDave Chinner ASSERT(*bno < end); 378830f712c9SDave Chinner mval->br_startoff = *bno; 378930f712c9SDave Chinner if (isnullstartblock(got->br_startblock)) 379030f712c9SDave Chinner mval->br_startblock = DELAYSTARTBLOCK; 379130f712c9SDave Chinner else 379230f712c9SDave Chinner mval->br_startblock = got->br_startblock + 379330f712c9SDave Chinner (*bno - got->br_startoff); 379430f712c9SDave Chinner /* 379530f712c9SDave Chinner * Return the minimum of what we got and what we asked for for 379630f712c9SDave Chinner * the length. We can use the len variable here because it is 379730f712c9SDave Chinner * modified below and we could have been there before coming 379830f712c9SDave Chinner * here if the first part of the allocation didn't overlap what 379930f712c9SDave Chinner * was asked for. 380030f712c9SDave Chinner */ 380130f712c9SDave Chinner mval->br_blockcount = XFS_FILBLKS_MIN(end - *bno, 380230f712c9SDave Chinner got->br_blockcount - (*bno - got->br_startoff)); 380330f712c9SDave Chinner mval->br_state = got->br_state; 380430f712c9SDave Chinner ASSERT(mval->br_blockcount <= len); 380530f712c9SDave Chinner return; 380630f712c9SDave Chinner } 380730f712c9SDave Chinner 380830f712c9SDave Chinner /* 380930f712c9SDave Chinner * Update and validate the extent map to return 381030f712c9SDave Chinner */ 381130f712c9SDave Chinner STATIC void 381230f712c9SDave Chinner xfs_bmapi_update_map( 381330f712c9SDave Chinner struct xfs_bmbt_irec **map, 381430f712c9SDave Chinner xfs_fileoff_t *bno, 381530f712c9SDave Chinner xfs_filblks_t *len, 381630f712c9SDave Chinner xfs_fileoff_t obno, 381730f712c9SDave Chinner xfs_fileoff_t end, 381830f712c9SDave Chinner int *n, 3819e7d410acSDave Chinner uint32_t flags) 382030f712c9SDave Chinner { 382130f712c9SDave Chinner xfs_bmbt_irec_t *mval = *map; 382230f712c9SDave Chinner 382330f712c9SDave Chinner ASSERT((flags & XFS_BMAPI_ENTIRE) || 382430f712c9SDave Chinner ((mval->br_startoff + mval->br_blockcount) <= end)); 382530f712c9SDave Chinner ASSERT((flags & XFS_BMAPI_ENTIRE) || (mval->br_blockcount <= *len) || 382630f712c9SDave Chinner (mval->br_startoff < obno)); 382730f712c9SDave Chinner 382830f712c9SDave Chinner *bno = mval->br_startoff + mval->br_blockcount; 382930f712c9SDave Chinner *len = end - *bno; 383030f712c9SDave Chinner if (*n > 0 && mval->br_startoff == mval[-1].br_startoff) { 383130f712c9SDave Chinner /* update previous map with new information */ 383230f712c9SDave Chinner ASSERT(mval->br_startblock == mval[-1].br_startblock); 383330f712c9SDave Chinner ASSERT(mval->br_blockcount > mval[-1].br_blockcount); 383430f712c9SDave Chinner ASSERT(mval->br_state == mval[-1].br_state); 383530f712c9SDave Chinner mval[-1].br_blockcount = mval->br_blockcount; 383630f712c9SDave Chinner mval[-1].br_state = mval->br_state; 383730f712c9SDave Chinner } else if (*n > 0 && mval->br_startblock != DELAYSTARTBLOCK && 383830f712c9SDave Chinner mval[-1].br_startblock != DELAYSTARTBLOCK && 383930f712c9SDave Chinner mval[-1].br_startblock != HOLESTARTBLOCK && 384030f712c9SDave Chinner mval->br_startblock == mval[-1].br_startblock + 384130f712c9SDave Chinner mval[-1].br_blockcount && 3842c3a2f9ffSChristoph Hellwig mval[-1].br_state == mval->br_state) { 384330f712c9SDave Chinner ASSERT(mval->br_startoff == 384430f712c9SDave Chinner mval[-1].br_startoff + mval[-1].br_blockcount); 384530f712c9SDave Chinner mval[-1].br_blockcount += mval->br_blockcount; 384630f712c9SDave Chinner } else if (*n > 0 && 384730f712c9SDave Chinner mval->br_startblock == DELAYSTARTBLOCK && 384830f712c9SDave Chinner mval[-1].br_startblock == DELAYSTARTBLOCK && 384930f712c9SDave Chinner mval->br_startoff == 385030f712c9SDave Chinner mval[-1].br_startoff + mval[-1].br_blockcount) { 385130f712c9SDave Chinner mval[-1].br_blockcount += mval->br_blockcount; 385230f712c9SDave Chinner mval[-1].br_state = mval->br_state; 385330f712c9SDave Chinner } else if (!((*n == 0) && 385430f712c9SDave Chinner ((mval->br_startoff + mval->br_blockcount) <= 385530f712c9SDave Chinner obno))) { 385630f712c9SDave Chinner mval++; 385730f712c9SDave Chinner (*n)++; 385830f712c9SDave Chinner } 385930f712c9SDave Chinner *map = mval; 386030f712c9SDave Chinner } 386130f712c9SDave Chinner 386230f712c9SDave Chinner /* 386330f712c9SDave Chinner * Map file blocks to filesystem blocks without allocation. 386430f712c9SDave Chinner */ 386530f712c9SDave Chinner int 386630f712c9SDave Chinner xfs_bmapi_read( 386730f712c9SDave Chinner struct xfs_inode *ip, 386830f712c9SDave Chinner xfs_fileoff_t bno, 386930f712c9SDave Chinner xfs_filblks_t len, 387030f712c9SDave Chinner struct xfs_bmbt_irec *mval, 387130f712c9SDave Chinner int *nmap, 3872e7d410acSDave Chinner uint32_t flags) 387330f712c9SDave Chinner { 387430f712c9SDave Chinner struct xfs_mount *mp = ip->i_mount; 38754b516ff4SChristoph Hellwig int whichfork = xfs_bmapi_whichfork(flags); 3876732436efSDarrick J. Wong struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 387730f712c9SDave Chinner struct xfs_bmbt_irec got; 387830f712c9SDave Chinner xfs_fileoff_t obno; 387930f712c9SDave Chinner xfs_fileoff_t end; 3880b2b1712aSChristoph Hellwig struct xfs_iext_cursor icur; 388130f712c9SDave Chinner int error; 3882334f3423SChristoph Hellwig bool eof = false; 388330f712c9SDave Chinner int n = 0; 388430f712c9SDave Chinner 388530f712c9SDave Chinner ASSERT(*nmap >= 1); 38861a1c57b2SChristoph Hellwig ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK | XFS_BMAPI_ENTIRE))); 388730f712c9SDave Chinner ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)); 388830f712c9SDave Chinner 38894b516ff4SChristoph Hellwig if (WARN_ON_ONCE(!ifp)) 38904b516ff4SChristoph Hellwig return -EFSCORRUPTED; 38914b516ff4SChristoph Hellwig 3892f7e67b20SChristoph Hellwig if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) || 3893f7e67b20SChristoph Hellwig XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) 38942451337dSDave Chinner return -EFSCORRUPTED; 389530f712c9SDave Chinner 389675c8c50fSDave Chinner if (xfs_is_shutdown(mp)) 38972451337dSDave Chinner return -EIO; 389830f712c9SDave Chinner 3899ff6d6af2SBill O'Donnell XFS_STATS_INC(mp, xs_blk_mapr); 390030f712c9SDave Chinner 390130f712c9SDave Chinner error = xfs_iread_extents(NULL, ip, whichfork); 390230f712c9SDave Chinner if (error) 390330f712c9SDave Chinner return error; 390430f712c9SDave Chinner 3905b2b1712aSChristoph Hellwig if (!xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) 3906334f3423SChristoph Hellwig eof = true; 390730f712c9SDave Chinner end = bno + len; 390830f712c9SDave Chinner obno = bno; 390930f712c9SDave Chinner 391030f712c9SDave Chinner while (bno < end && n < *nmap) { 391130f712c9SDave Chinner /* Reading past eof, act as though there's a hole up to end. */ 391230f712c9SDave Chinner if (eof) 391330f712c9SDave Chinner got.br_startoff = end; 391430f712c9SDave Chinner if (got.br_startoff > bno) { 391530f712c9SDave Chinner /* Reading in a hole. */ 391630f712c9SDave Chinner mval->br_startoff = bno; 391730f712c9SDave Chinner mval->br_startblock = HOLESTARTBLOCK; 391830f712c9SDave Chinner mval->br_blockcount = 391930f712c9SDave Chinner XFS_FILBLKS_MIN(len, got.br_startoff - bno); 392030f712c9SDave Chinner mval->br_state = XFS_EXT_NORM; 392130f712c9SDave Chinner bno += mval->br_blockcount; 392230f712c9SDave Chinner len -= mval->br_blockcount; 392330f712c9SDave Chinner mval++; 392430f712c9SDave Chinner n++; 392530f712c9SDave Chinner continue; 392630f712c9SDave Chinner } 392730f712c9SDave Chinner 392830f712c9SDave Chinner /* set up the extent map to return. */ 392930f712c9SDave Chinner xfs_bmapi_trim_map(mval, &got, &bno, len, obno, end, n, flags); 393030f712c9SDave Chinner xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags); 393130f712c9SDave Chinner 393230f712c9SDave Chinner /* If we're done, stop now. */ 393330f712c9SDave Chinner if (bno >= end || n >= *nmap) 393430f712c9SDave Chinner break; 393530f712c9SDave Chinner 393630f712c9SDave Chinner /* Else go on to the next record. */ 3937b2b1712aSChristoph Hellwig if (!xfs_iext_next_extent(ifp, &icur, &got)) 3938334f3423SChristoph Hellwig eof = true; 393930f712c9SDave Chinner } 394030f712c9SDave Chinner *nmap = n; 394130f712c9SDave Chinner return 0; 394230f712c9SDave Chinner } 394330f712c9SDave Chinner 3944f65e6fadSBrian Foster /* 3945f65e6fadSBrian Foster * Add a delayed allocation extent to an inode. Blocks are reserved from the 3946f65e6fadSBrian Foster * global pool and the extent inserted into the inode in-core extent tree. 3947f65e6fadSBrian Foster * 3948f65e6fadSBrian Foster * On entry, got refers to the first extent beyond the offset of the extent to 3949f65e6fadSBrian Foster * allocate or eof is specified if no such extent exists. On return, got refers 3950f65e6fadSBrian Foster * to the extent record that was inserted to the inode fork. 3951f65e6fadSBrian Foster * 3952f65e6fadSBrian Foster * Note that the allocated extent may have been merged with contiguous extents 3953f65e6fadSBrian Foster * during insertion into the inode fork. Thus, got does not reflect the current 3954f65e6fadSBrian Foster * state of the inode fork on return. If necessary, the caller can use lastx to 3955f65e6fadSBrian Foster * look up the updated record in the inode fork. 3956f65e6fadSBrian Foster */ 395751446f5bSChristoph Hellwig int 395830f712c9SDave Chinner xfs_bmapi_reserve_delalloc( 395930f712c9SDave Chinner struct xfs_inode *ip, 3960be51f811SDarrick J. Wong int whichfork, 3961974ae922SBrian Foster xfs_fileoff_t off, 396230f712c9SDave Chinner xfs_filblks_t len, 3963974ae922SBrian Foster xfs_filblks_t prealloc, 396430f712c9SDave Chinner struct xfs_bmbt_irec *got, 3965b2b1712aSChristoph Hellwig struct xfs_iext_cursor *icur, 396630f712c9SDave Chinner int eof) 396730f712c9SDave Chinner { 396830f712c9SDave Chinner struct xfs_mount *mp = ip->i_mount; 3969732436efSDarrick J. Wong struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 397030f712c9SDave Chinner xfs_extlen_t alen; 397130f712c9SDave Chinner xfs_extlen_t indlen; 397230f712c9SDave Chinner int error; 3973974ae922SBrian Foster xfs_fileoff_t aoff = off; 397430f712c9SDave Chinner 3975974ae922SBrian Foster /* 3976974ae922SBrian Foster * Cap the alloc length. Keep track of prealloc so we know whether to 3977974ae922SBrian Foster * tag the inode before we return. 3978974ae922SBrian Foster */ 397995f0b95eSChandan Babu R alen = XFS_FILBLKS_MIN(len + prealloc, XFS_MAX_BMBT_EXTLEN); 398030f712c9SDave Chinner if (!eof) 398130f712c9SDave Chinner alen = XFS_FILBLKS_MIN(alen, got->br_startoff - aoff); 3982974ae922SBrian Foster if (prealloc && alen >= len) 3983974ae922SBrian Foster prealloc = alen - len; 398430f712c9SDave Chinner 398530f712c9SDave Chinner /* Figure out the extent size, adjust alen */ 39866ca30729SShan Hai if (whichfork == XFS_COW_FORK) { 398765c5f419SChristoph Hellwig struct xfs_bmbt_irec prev; 39886ca30729SShan Hai xfs_extlen_t extsz = xfs_get_cowextsz_hint(ip); 398965c5f419SChristoph Hellwig 3990b2b1712aSChristoph Hellwig if (!xfs_iext_peek_prev_extent(ifp, icur, &prev)) 399165c5f419SChristoph Hellwig prev.br_startoff = NULLFILEOFF; 399265c5f419SChristoph Hellwig 39936ca30729SShan Hai error = xfs_bmap_extsize_align(mp, got, &prev, extsz, 0, eof, 399430f712c9SDave Chinner 1, 0, &aoff, &alen); 399530f712c9SDave Chinner ASSERT(!error); 399630f712c9SDave Chinner } 399730f712c9SDave Chinner 399830f712c9SDave Chinner /* 399930f712c9SDave Chinner * Make a transaction-less quota reservation for delayed allocation 400030f712c9SDave Chinner * blocks. This number gets adjusted later. We return if we haven't 400130f712c9SDave Chinner * allocated blocks already inside this loop. 400230f712c9SDave Chinner */ 400385546500SDarrick J. Wong error = xfs_quota_reserve_blkres(ip, alen); 400430f712c9SDave Chinner if (error) 400530f712c9SDave Chinner return error; 400630f712c9SDave Chinner 400730f712c9SDave Chinner /* 400830f712c9SDave Chinner * Split changing sb for alen and indlen since they could be coming 400930f712c9SDave Chinner * from different places. 401030f712c9SDave Chinner */ 401130f712c9SDave Chinner indlen = (xfs_extlen_t)xfs_bmap_worst_indlen(ip, alen); 401230f712c9SDave Chinner ASSERT(indlen > 0); 401330f712c9SDave Chinner 40140d485adaSDave Chinner error = xfs_mod_fdblocks(mp, -((int64_t)alen), false); 401530f712c9SDave Chinner if (error) 401630f712c9SDave Chinner goto out_unreserve_quota; 401730f712c9SDave Chinner 40180d485adaSDave Chinner error = xfs_mod_fdblocks(mp, -((int64_t)indlen), false); 401930f712c9SDave Chinner if (error) 402030f712c9SDave Chinner goto out_unreserve_blocks; 402130f712c9SDave Chinner 402230f712c9SDave Chinner 402330f712c9SDave Chinner ip->i_delayed_blks += alen; 40249fe82b8cSDarrick J. Wong xfs_mod_delalloc(ip->i_mount, alen + indlen); 402530f712c9SDave Chinner 402630f712c9SDave Chinner got->br_startoff = aoff; 402730f712c9SDave Chinner got->br_startblock = nullstartblock(indlen); 402830f712c9SDave Chinner got->br_blockcount = alen; 402930f712c9SDave Chinner got->br_state = XFS_EXT_NORM; 403030f712c9SDave Chinner 4031b2b1712aSChristoph Hellwig xfs_bmap_add_extent_hole_delay(ip, whichfork, icur, got); 403230f712c9SDave Chinner 4033974ae922SBrian Foster /* 4034974ae922SBrian Foster * Tag the inode if blocks were preallocated. Note that COW fork 4035974ae922SBrian Foster * preallocation can occur at the start or end of the extent, even when 4036974ae922SBrian Foster * prealloc == 0, so we must also check the aligned offset and length. 4037974ae922SBrian Foster */ 4038974ae922SBrian Foster if (whichfork == XFS_DATA_FORK && prealloc) 4039974ae922SBrian Foster xfs_inode_set_eofblocks_tag(ip); 4040974ae922SBrian Foster if (whichfork == XFS_COW_FORK && (prealloc || aoff < off || alen > len)) 4041974ae922SBrian Foster xfs_inode_set_cowblocks_tag(ip); 4042974ae922SBrian Foster 404330f712c9SDave Chinner return 0; 404430f712c9SDave Chinner 404530f712c9SDave Chinner out_unreserve_blocks: 40460d485adaSDave Chinner xfs_mod_fdblocks(mp, alen, false); 404730f712c9SDave Chinner out_unreserve_quota: 404830f712c9SDave Chinner if (XFS_IS_QUOTA_ON(mp)) 404985546500SDarrick J. Wong xfs_quota_unreserve_blkres(ip, alen); 405030f712c9SDave Chinner return error; 405130f712c9SDave Chinner } 405230f712c9SDave Chinner 40537f8a058fSDave Chinner static int 4054be6cacbeSChristoph Hellwig xfs_bmap_alloc_userdata( 4055be6cacbeSChristoph Hellwig struct xfs_bmalloca *bma) 4056be6cacbeSChristoph Hellwig { 4057be6cacbeSChristoph Hellwig struct xfs_mount *mp = bma->ip->i_mount; 4058be6cacbeSChristoph Hellwig int whichfork = xfs_bmapi_whichfork(bma->flags); 4059be6cacbeSChristoph Hellwig int error; 4060be6cacbeSChristoph Hellwig 4061be6cacbeSChristoph Hellwig /* 4062be6cacbeSChristoph Hellwig * Set the data type being allocated. For the data fork, the first data 4063be6cacbeSChristoph Hellwig * in the file is treated differently to all other allocations. For the 4064be6cacbeSChristoph Hellwig * attribute fork, we only need to ensure the allocated range is not on 4065be6cacbeSChristoph Hellwig * the busy list. 4066be6cacbeSChristoph Hellwig */ 4067be6cacbeSChristoph Hellwig bma->datatype = XFS_ALLOC_NOBUSY; 4068ddfdd530SDarrick J. Wong if (whichfork == XFS_DATA_FORK || whichfork == XFS_COW_FORK) { 4069c34d570dSChristoph Hellwig bma->datatype |= XFS_ALLOC_USERDATA; 4070be6cacbeSChristoph Hellwig if (bma->offset == 0) 4071be6cacbeSChristoph Hellwig bma->datatype |= XFS_ALLOC_INITIAL_USER_DATA; 4072be6cacbeSChristoph Hellwig 4073be6cacbeSChristoph Hellwig if (mp->m_dalign && bma->length >= mp->m_dalign) { 4074be6cacbeSChristoph Hellwig error = xfs_bmap_isaeof(bma, whichfork); 4075be6cacbeSChristoph Hellwig if (error) 4076be6cacbeSChristoph Hellwig return error; 4077be6cacbeSChristoph Hellwig } 4078be6cacbeSChristoph Hellwig 4079be6cacbeSChristoph Hellwig if (XFS_IS_REALTIME_INODE(bma->ip)) 4080be6cacbeSChristoph Hellwig return xfs_bmap_rtalloc(bma); 4081be6cacbeSChristoph Hellwig } 4082be6cacbeSChristoph Hellwig 408330151967SChandan Babu R if (unlikely(XFS_TEST_ERROR(false, mp, 408430151967SChandan Babu R XFS_ERRTAG_BMAP_ALLOC_MINLEN_EXTENT))) 408530151967SChandan Babu R return xfs_bmap_exact_minlen_extent_alloc(bma); 408630151967SChandan Babu R 4087be6cacbeSChristoph Hellwig return xfs_bmap_btalloc(bma); 4088be6cacbeSChristoph Hellwig } 4089be6cacbeSChristoph Hellwig 4090be6cacbeSChristoph Hellwig static int 40917f8a058fSDave Chinner xfs_bmapi_allocate( 409230f712c9SDave Chinner struct xfs_bmalloca *bma) 409330f712c9SDave Chinner { 409430f712c9SDave Chinner struct xfs_mount *mp = bma->ip->i_mount; 409560b4984fSDarrick J. Wong int whichfork = xfs_bmapi_whichfork(bma->flags); 4096732436efSDarrick J. Wong struct xfs_ifork *ifp = xfs_ifork_ptr(bma->ip, whichfork); 409730f712c9SDave Chinner int tmp_logflags = 0; 409830f712c9SDave Chinner int error; 409930f712c9SDave Chinner 410030f712c9SDave Chinner ASSERT(bma->length > 0); 410130f712c9SDave Chinner 410230f712c9SDave Chinner /* 410330f712c9SDave Chinner * For the wasdelay case, we could also just allocate the stuff asked 410430f712c9SDave Chinner * for in this bmap call but that wouldn't be as good. 410530f712c9SDave Chinner */ 410630f712c9SDave Chinner if (bma->wasdel) { 410730f712c9SDave Chinner bma->length = (xfs_extlen_t)bma->got.br_blockcount; 410830f712c9SDave Chinner bma->offset = bma->got.br_startoff; 4109f5be0844SDarrick J. Wong if (!xfs_iext_peek_prev_extent(ifp, &bma->icur, &bma->prev)) 4110f5be0844SDarrick J. Wong bma->prev.br_startoff = NULLFILEOFF; 411130f712c9SDave Chinner } else { 411295f0b95eSChandan Babu R bma->length = XFS_FILBLKS_MIN(bma->length, XFS_MAX_BMBT_EXTLEN); 411330f712c9SDave Chinner if (!bma->eof) 411430f712c9SDave Chinner bma->length = XFS_FILBLKS_MIN(bma->length, 411530f712c9SDave Chinner bma->got.br_startoff - bma->offset); 411630f712c9SDave Chinner } 411730f712c9SDave Chinner 4118be6cacbeSChristoph Hellwig if (bma->flags & XFS_BMAPI_CONTIG) 4119be6cacbeSChristoph Hellwig bma->minlen = bma->length; 4120ce840429SDarrick J. Wong else 4121be6cacbeSChristoph Hellwig bma->minlen = 1; 412230f712c9SDave Chinner 412330151967SChandan Babu R if (bma->flags & XFS_BMAPI_METADATA) { 412430151967SChandan Babu R if (unlikely(XFS_TEST_ERROR(false, mp, 412530151967SChandan Babu R XFS_ERRTAG_BMAP_ALLOC_MINLEN_EXTENT))) 412630151967SChandan Babu R error = xfs_bmap_exact_minlen_extent_alloc(bma); 4127be6cacbeSChristoph Hellwig else 412830151967SChandan Babu R error = xfs_bmap_btalloc(bma); 412930151967SChandan Babu R } else { 4130be6cacbeSChristoph Hellwig error = xfs_bmap_alloc_userdata(bma); 413130151967SChandan Babu R } 4132be6cacbeSChristoph Hellwig if (error || bma->blkno == NULLFSBLOCK) 413330f712c9SDave Chinner return error; 413430f712c9SDave Chinner 4135fd638f1dSChristoph Hellwig if (bma->flags & XFS_BMAPI_ZERO) { 4136fd638f1dSChristoph Hellwig error = xfs_zero_extent(bma->ip, bma->blkno, bma->length); 4137fd638f1dSChristoph Hellwig if (error) 4138fd638f1dSChristoph Hellwig return error; 4139fd638f1dSChristoph Hellwig } 4140fd638f1dSChristoph Hellwig 4141ac1e0672SChristoph Hellwig if (ifp->if_format == XFS_DINODE_FMT_BTREE && !bma->cur) 414230f712c9SDave Chinner bma->cur = xfs_bmbt_init_cursor(mp, bma->tp, bma->ip, whichfork); 414330f712c9SDave Chinner /* 414430f712c9SDave Chinner * Bump the number of extents we've allocated 414530f712c9SDave Chinner * in this call. 414630f712c9SDave Chinner */ 414730f712c9SDave Chinner bma->nallocs++; 414830f712c9SDave Chinner 414930f712c9SDave Chinner if (bma->cur) 415092219c29SDave Chinner bma->cur->bc_ino.flags = 41518ef54797SDave Chinner bma->wasdel ? XFS_BTCUR_BMBT_WASDEL : 0; 415230f712c9SDave Chinner 415330f712c9SDave Chinner bma->got.br_startoff = bma->offset; 415430f712c9SDave Chinner bma->got.br_startblock = bma->blkno; 415530f712c9SDave Chinner bma->got.br_blockcount = bma->length; 415630f712c9SDave Chinner bma->got.br_state = XFS_EXT_NORM; 415730f712c9SDave Chinner 4158a5949d3fSDarrick J. Wong if (bma->flags & XFS_BMAPI_PREALLOC) 415930f712c9SDave Chinner bma->got.br_state = XFS_EXT_UNWRITTEN; 416030f712c9SDave Chinner 416130f712c9SDave Chinner if (bma->wasdel) 416260b4984fSDarrick J. Wong error = xfs_bmap_add_extent_delay_real(bma, whichfork); 416330f712c9SDave Chinner else 41646d04558fSChristoph Hellwig error = xfs_bmap_add_extent_hole_real(bma->tp, bma->ip, 4165b2b1712aSChristoph Hellwig whichfork, &bma->icur, &bma->cur, &bma->got, 416692f9da30SBrian Foster &bma->logflags, bma->flags); 416730f712c9SDave Chinner 416830f712c9SDave Chinner bma->logflags |= tmp_logflags; 416930f712c9SDave Chinner if (error) 417030f712c9SDave Chinner return error; 417130f712c9SDave Chinner 417230f712c9SDave Chinner /* 417330f712c9SDave Chinner * Update our extent pointer, given that xfs_bmap_add_extent_delay_real 417430f712c9SDave Chinner * or xfs_bmap_add_extent_hole_real might have merged it into one of 417530f712c9SDave Chinner * the neighbouring ones. 417630f712c9SDave Chinner */ 4177b2b1712aSChristoph Hellwig xfs_iext_get_extent(ifp, &bma->icur, &bma->got); 417830f712c9SDave Chinner 417930f712c9SDave Chinner ASSERT(bma->got.br_startoff <= bma->offset); 418030f712c9SDave Chinner ASSERT(bma->got.br_startoff + bma->got.br_blockcount >= 418130f712c9SDave Chinner bma->offset + bma->length); 418230f712c9SDave Chinner ASSERT(bma->got.br_state == XFS_EXT_NORM || 418330f712c9SDave Chinner bma->got.br_state == XFS_EXT_UNWRITTEN); 418430f712c9SDave Chinner return 0; 418530f712c9SDave Chinner } 418630f712c9SDave Chinner 418730f712c9SDave Chinner STATIC int 418830f712c9SDave Chinner xfs_bmapi_convert_unwritten( 418930f712c9SDave Chinner struct xfs_bmalloca *bma, 419030f712c9SDave Chinner struct xfs_bmbt_irec *mval, 419130f712c9SDave Chinner xfs_filblks_t len, 4192e7d410acSDave Chinner uint32_t flags) 419330f712c9SDave Chinner { 41943993baebSDarrick J. Wong int whichfork = xfs_bmapi_whichfork(flags); 4195732436efSDarrick J. Wong struct xfs_ifork *ifp = xfs_ifork_ptr(bma->ip, whichfork); 419630f712c9SDave Chinner int tmp_logflags = 0; 419730f712c9SDave Chinner int error; 419830f712c9SDave Chinner 419930f712c9SDave Chinner /* check if we need to do unwritten->real conversion */ 420030f712c9SDave Chinner if (mval->br_state == XFS_EXT_UNWRITTEN && 420130f712c9SDave Chinner (flags & XFS_BMAPI_PREALLOC)) 420230f712c9SDave Chinner return 0; 420330f712c9SDave Chinner 420430f712c9SDave Chinner /* check if we need to do real->unwritten conversion */ 420530f712c9SDave Chinner if (mval->br_state == XFS_EXT_NORM && 420630f712c9SDave Chinner (flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT)) != 420730f712c9SDave Chinner (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT)) 420830f712c9SDave Chinner return 0; 420930f712c9SDave Chinner 421030f712c9SDave Chinner /* 421130f712c9SDave Chinner * Modify (by adding) the state flag, if writing. 421230f712c9SDave Chinner */ 421330f712c9SDave Chinner ASSERT(mval->br_blockcount <= len); 4214ac1e0672SChristoph Hellwig if (ifp->if_format == XFS_DINODE_FMT_BTREE && !bma->cur) { 421530f712c9SDave Chinner bma->cur = xfs_bmbt_init_cursor(bma->ip->i_mount, bma->tp, 421630f712c9SDave Chinner bma->ip, whichfork); 421730f712c9SDave Chinner } 421830f712c9SDave Chinner mval->br_state = (mval->br_state == XFS_EXT_UNWRITTEN) 421930f712c9SDave Chinner ? XFS_EXT_NORM : XFS_EXT_UNWRITTEN; 422030f712c9SDave Chinner 42213fbbbea3SDave Chinner /* 42223fbbbea3SDave Chinner * Before insertion into the bmbt, zero the range being converted 42233fbbbea3SDave Chinner * if required. 42243fbbbea3SDave Chinner */ 42253fbbbea3SDave Chinner if (flags & XFS_BMAPI_ZERO) { 42263fbbbea3SDave Chinner error = xfs_zero_extent(bma->ip, mval->br_startblock, 42273fbbbea3SDave Chinner mval->br_blockcount); 42283fbbbea3SDave Chinner if (error) 42293fbbbea3SDave Chinner return error; 42303fbbbea3SDave Chinner } 42313fbbbea3SDave Chinner 423205a630d7SDarrick J. Wong error = xfs_bmap_add_extent_unwritten_real(bma->tp, bma->ip, whichfork, 423392f9da30SBrian Foster &bma->icur, &bma->cur, mval, &tmp_logflags); 42342e588a46SBrian Foster /* 42352e588a46SBrian Foster * Log the inode core unconditionally in the unwritten extent conversion 42362e588a46SBrian Foster * path because the conversion might not have done so (e.g., if the 42372e588a46SBrian Foster * extent count hasn't changed). We need to make sure the inode is dirty 42382e588a46SBrian Foster * in the transaction for the sake of fsync(), even if nothing has 42392e588a46SBrian Foster * changed, because fsync() will not force the log for this transaction 42402e588a46SBrian Foster * unless it sees the inode pinned. 424105a630d7SDarrick J. Wong * 424205a630d7SDarrick J. Wong * Note: If we're only converting cow fork extents, there aren't 424305a630d7SDarrick J. Wong * any on-disk updates to make, so we don't need to log anything. 42442e588a46SBrian Foster */ 424505a630d7SDarrick J. Wong if (whichfork != XFS_COW_FORK) 42462e588a46SBrian Foster bma->logflags |= tmp_logflags | XFS_ILOG_CORE; 424730f712c9SDave Chinner if (error) 424830f712c9SDave Chinner return error; 424930f712c9SDave Chinner 425030f712c9SDave Chinner /* 425130f712c9SDave Chinner * Update our extent pointer, given that 425230f712c9SDave Chinner * xfs_bmap_add_extent_unwritten_real might have merged it into one 425330f712c9SDave Chinner * of the neighbouring ones. 425430f712c9SDave Chinner */ 4255b2b1712aSChristoph Hellwig xfs_iext_get_extent(ifp, &bma->icur, &bma->got); 425630f712c9SDave Chinner 425730f712c9SDave Chinner /* 425830f712c9SDave Chinner * We may have combined previously unwritten space with written space, 425930f712c9SDave Chinner * so generate another request. 426030f712c9SDave Chinner */ 426130f712c9SDave Chinner if (mval->br_blockcount < len) 42622451337dSDave Chinner return -EAGAIN; 426330f712c9SDave Chinner return 0; 426430f712c9SDave Chinner } 426530f712c9SDave Chinner 4266d5753847SDave Chinner xfs_extlen_t 4267c8b54673SChristoph Hellwig xfs_bmapi_minleft( 4268c8b54673SChristoph Hellwig struct xfs_trans *tp, 4269c8b54673SChristoph Hellwig struct xfs_inode *ip, 4270c8b54673SChristoph Hellwig int fork) 4271c8b54673SChristoph Hellwig { 4272732436efSDarrick J. Wong struct xfs_ifork *ifp = xfs_ifork_ptr(ip, fork); 4273f7e67b20SChristoph Hellwig 4274692b6cddSDave Chinner if (tp && tp->t_highest_agno != NULLAGNUMBER) 4275c8b54673SChristoph Hellwig return 0; 4276f7e67b20SChristoph Hellwig if (ifp->if_format != XFS_DINODE_FMT_BTREE) 4277c8b54673SChristoph Hellwig return 1; 4278f7e67b20SChristoph Hellwig return be16_to_cpu(ifp->if_broot->bb_level) + 1; 4279c8b54673SChristoph Hellwig } 4280c8b54673SChristoph Hellwig 4281c8b54673SChristoph Hellwig /* 4282c8b54673SChristoph Hellwig * Log whatever the flags say, even if error. Otherwise we might miss detecting 4283c8b54673SChristoph Hellwig * a case where the data is changed, there's an error, and it's not logged so we 4284c8b54673SChristoph Hellwig * don't shutdown when we should. Don't bother logging extents/btree changes if 4285c8b54673SChristoph Hellwig * we converted to the other format. 4286c8b54673SChristoph Hellwig */ 4287c8b54673SChristoph Hellwig static void 4288c8b54673SChristoph Hellwig xfs_bmapi_finish( 4289c8b54673SChristoph Hellwig struct xfs_bmalloca *bma, 4290c8b54673SChristoph Hellwig int whichfork, 4291c8b54673SChristoph Hellwig int error) 4292c8b54673SChristoph Hellwig { 4293732436efSDarrick J. Wong struct xfs_ifork *ifp = xfs_ifork_ptr(bma->ip, whichfork); 4294f7e67b20SChristoph Hellwig 4295c8b54673SChristoph Hellwig if ((bma->logflags & xfs_ilog_fext(whichfork)) && 4296f7e67b20SChristoph Hellwig ifp->if_format != XFS_DINODE_FMT_EXTENTS) 4297c8b54673SChristoph Hellwig bma->logflags &= ~xfs_ilog_fext(whichfork); 4298c8b54673SChristoph Hellwig else if ((bma->logflags & xfs_ilog_fbroot(whichfork)) && 4299f7e67b20SChristoph Hellwig ifp->if_format != XFS_DINODE_FMT_BTREE) 4300c8b54673SChristoph Hellwig bma->logflags &= ~xfs_ilog_fbroot(whichfork); 4301c8b54673SChristoph Hellwig 4302c8b54673SChristoph Hellwig if (bma->logflags) 4303c8b54673SChristoph Hellwig xfs_trans_log_inode(bma->tp, bma->ip, bma->logflags); 4304c8b54673SChristoph Hellwig if (bma->cur) 4305c8b54673SChristoph Hellwig xfs_btree_del_cursor(bma->cur, error); 4306c8b54673SChristoph Hellwig } 4307c8b54673SChristoph Hellwig 430830f712c9SDave Chinner /* 430930f712c9SDave Chinner * Map file blocks to filesystem blocks, and allocate blocks or convert the 431030f712c9SDave Chinner * extent state if necessary. Details behaviour is controlled by the flags 431130f712c9SDave Chinner * parameter. Only allocates blocks from a single allocation group, to avoid 431230f712c9SDave Chinner * locking problems. 431330f712c9SDave Chinner */ 431430f712c9SDave Chinner int 431530f712c9SDave Chinner xfs_bmapi_write( 431630f712c9SDave Chinner struct xfs_trans *tp, /* transaction pointer */ 431730f712c9SDave Chinner struct xfs_inode *ip, /* incore inode */ 431830f712c9SDave Chinner xfs_fileoff_t bno, /* starting file offs. mapped */ 431930f712c9SDave Chinner xfs_filblks_t len, /* length to map in file */ 4320e7d410acSDave Chinner uint32_t flags, /* XFS_BMAPI_... */ 432130f712c9SDave Chinner xfs_extlen_t total, /* total blocks needed */ 432230f712c9SDave Chinner struct xfs_bmbt_irec *mval, /* output: map values */ 43236e702a5dSBrian Foster int *nmap) /* i/o: mval size/count */ 432430f712c9SDave Chinner { 43254b0bce30SDarrick J. Wong struct xfs_bmalloca bma = { 43264b0bce30SDarrick J. Wong .tp = tp, 43274b0bce30SDarrick J. Wong .ip = ip, 43284b0bce30SDarrick J. Wong .total = total, 43294b0bce30SDarrick J. Wong }; 433030f712c9SDave Chinner struct xfs_mount *mp = ip->i_mount; 4331f7e67b20SChristoph Hellwig int whichfork = xfs_bmapi_whichfork(flags); 4332732436efSDarrick J. Wong struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 433330f712c9SDave Chinner xfs_fileoff_t end; /* end of mapped file region */ 43342d58f6efSChristoph Hellwig bool eof = false; /* after the end of extents */ 433530f712c9SDave Chinner int error; /* error return */ 433630f712c9SDave Chinner int n; /* current extent index */ 433730f712c9SDave Chinner xfs_fileoff_t obno; /* old block number (offset) */ 433830f712c9SDave Chinner 433930f712c9SDave Chinner #ifdef DEBUG 434030f712c9SDave Chinner xfs_fileoff_t orig_bno; /* original block number value */ 434130f712c9SDave Chinner int orig_flags; /* original flags arg value */ 434230f712c9SDave Chinner xfs_filblks_t orig_len; /* original value of len arg */ 434330f712c9SDave Chinner struct xfs_bmbt_irec *orig_mval; /* original value of mval */ 434430f712c9SDave Chinner int orig_nmap; /* original value of *nmap */ 434530f712c9SDave Chinner 434630f712c9SDave Chinner orig_bno = bno; 434730f712c9SDave Chinner orig_len = len; 434830f712c9SDave Chinner orig_flags = flags; 434930f712c9SDave Chinner orig_mval = mval; 435030f712c9SDave Chinner orig_nmap = *nmap; 435130f712c9SDave Chinner #endif 435230f712c9SDave Chinner 435330f712c9SDave Chinner ASSERT(*nmap >= 1); 435430f712c9SDave Chinner ASSERT(*nmap <= XFS_BMAP_MAX_NMAP); 435526b91c72SChristoph Hellwig ASSERT(tp != NULL); 435630f712c9SDave Chinner ASSERT(len > 0); 4357f7e67b20SChristoph Hellwig ASSERT(ifp->if_format != XFS_DINODE_FMT_LOCAL); 435830f712c9SDave Chinner ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 43596ebd5a44SChristoph Hellwig ASSERT(!(flags & XFS_BMAPI_REMAP)); 436030f712c9SDave Chinner 43613fbbbea3SDave Chinner /* zeroing is for currently only for data extents, not metadata */ 43623fbbbea3SDave Chinner ASSERT((flags & (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO)) != 43633fbbbea3SDave Chinner (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO)); 43643fbbbea3SDave Chinner /* 43653fbbbea3SDave Chinner * we can allocate unwritten extents or pre-zero allocated blocks, 43663fbbbea3SDave Chinner * but it makes no sense to do both at once. This would result in 43673fbbbea3SDave Chinner * zeroing the unwritten extent twice, but it still being an 43683fbbbea3SDave Chinner * unwritten extent.... 43693fbbbea3SDave Chinner */ 43703fbbbea3SDave Chinner ASSERT((flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO)) != 43713fbbbea3SDave Chinner (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO)); 43723fbbbea3SDave Chinner 4373f7e67b20SChristoph Hellwig if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) || 4374a71895c5SDarrick J. Wong XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) { 43752451337dSDave Chinner return -EFSCORRUPTED; 437630f712c9SDave Chinner } 437730f712c9SDave Chinner 437875c8c50fSDave Chinner if (xfs_is_shutdown(mp)) 43792451337dSDave Chinner return -EIO; 438030f712c9SDave Chinner 4381ff6d6af2SBill O'Donnell XFS_STATS_INC(mp, xs_blk_mapw); 438230f712c9SDave Chinner 438330f712c9SDave Chinner error = xfs_iread_extents(tp, ip, whichfork); 438430f712c9SDave Chinner if (error) 438530f712c9SDave Chinner goto error0; 438630f712c9SDave Chinner 4387b2b1712aSChristoph Hellwig if (!xfs_iext_lookup_extent(ip, ifp, bno, &bma.icur, &bma.got)) 43882d58f6efSChristoph Hellwig eof = true; 4389b2b1712aSChristoph Hellwig if (!xfs_iext_peek_prev_extent(ifp, &bma.icur, &bma.prev)) 43902d58f6efSChristoph Hellwig bma.prev.br_startoff = NULLFILEOFF; 4391c8b54673SChristoph Hellwig bma.minleft = xfs_bmapi_minleft(tp, ip, whichfork); 439230f712c9SDave Chinner 4393627209fbSBrian Foster n = 0; 4394627209fbSBrian Foster end = bno + len; 4395627209fbSBrian Foster obno = bno; 439630f712c9SDave Chinner while (bno < end && n < *nmap) { 4397d2b3964aSChristoph Hellwig bool need_alloc = false, wasdelay = false; 439830f712c9SDave Chinner 4399be78ff0eSDarrick J. Wong /* in hole or beyond EOF? */ 4400d2b3964aSChristoph Hellwig if (eof || bma.got.br_startoff > bno) { 4401be78ff0eSDarrick J. Wong /* 4402be78ff0eSDarrick J. Wong * CoW fork conversions should /never/ hit EOF or 4403be78ff0eSDarrick J. Wong * holes. There should always be something for us 4404be78ff0eSDarrick J. Wong * to work on. 4405be78ff0eSDarrick J. Wong */ 4406be78ff0eSDarrick J. Wong ASSERT(!((flags & XFS_BMAPI_CONVERT) && 4407be78ff0eSDarrick J. Wong (flags & XFS_BMAPI_COWFORK))); 4408be78ff0eSDarrick J. Wong 4409d2b3964aSChristoph Hellwig need_alloc = true; 44106ebd5a44SChristoph Hellwig } else if (isnullstartblock(bma.got.br_startblock)) { 4411d2b3964aSChristoph Hellwig wasdelay = true; 4412d2b3964aSChristoph Hellwig } 4413f65306eaSDarrick J. Wong 4414f65306eaSDarrick J. Wong /* 441530f712c9SDave Chinner * First, deal with the hole before the allocated space 441630f712c9SDave Chinner * that we found, if any. 441730f712c9SDave Chinner */ 441826b91c72SChristoph Hellwig if (need_alloc || wasdelay) { 441930f712c9SDave Chinner bma.eof = eof; 442030f712c9SDave Chinner bma.conv = !!(flags & XFS_BMAPI_CONVERT); 442130f712c9SDave Chinner bma.wasdel = wasdelay; 442230f712c9SDave Chinner bma.offset = bno; 442330f712c9SDave Chinner bma.flags = flags; 442430f712c9SDave Chinner 442530f712c9SDave Chinner /* 442630f712c9SDave Chinner * There's a 32/64 bit type mismatch between the 442730f712c9SDave Chinner * allocation length request (which can be 64 bits in 442830f712c9SDave Chinner * length) and the bma length request, which is 442930f712c9SDave Chinner * xfs_extlen_t and therefore 32 bits. Hence we have to 443030f712c9SDave Chinner * check for 32-bit overflows and handle them here. 443130f712c9SDave Chinner */ 443295f0b95eSChandan Babu R if (len > (xfs_filblks_t)XFS_MAX_BMBT_EXTLEN) 443395f0b95eSChandan Babu R bma.length = XFS_MAX_BMBT_EXTLEN; 443430f712c9SDave Chinner else 443530f712c9SDave Chinner bma.length = len; 443630f712c9SDave Chinner 443730f712c9SDave Chinner ASSERT(len > 0); 443830f712c9SDave Chinner ASSERT(bma.length > 0); 443930f712c9SDave Chinner error = xfs_bmapi_allocate(&bma); 444030f712c9SDave Chinner if (error) 444130f712c9SDave Chinner goto error0; 444230f712c9SDave Chinner if (bma.blkno == NULLFSBLOCK) 444330f712c9SDave Chinner break; 4444174edb0eSDarrick J. Wong 4445174edb0eSDarrick J. Wong /* 4446174edb0eSDarrick J. Wong * If this is a CoW allocation, record the data in 4447174edb0eSDarrick J. Wong * the refcount btree for orphan recovery. 4448174edb0eSDarrick J. Wong */ 444974b4c5d4SDarrick J. Wong if (whichfork == XFS_COW_FORK) 445074b4c5d4SDarrick J. Wong xfs_refcount_alloc_cow_extent(tp, bma.blkno, 445174b4c5d4SDarrick J. Wong bma.length); 445230f712c9SDave Chinner } 445330f712c9SDave Chinner 445430f712c9SDave Chinner /* Deal with the allocated space we found. */ 445530f712c9SDave Chinner xfs_bmapi_trim_map(mval, &bma.got, &bno, len, obno, 445630f712c9SDave Chinner end, n, flags); 445730f712c9SDave Chinner 445830f712c9SDave Chinner /* Execute unwritten extent conversion if necessary */ 445930f712c9SDave Chinner error = xfs_bmapi_convert_unwritten(&bma, mval, len, flags); 44602451337dSDave Chinner if (error == -EAGAIN) 446130f712c9SDave Chinner continue; 446230f712c9SDave Chinner if (error) 446330f712c9SDave Chinner goto error0; 446430f712c9SDave Chinner 446530f712c9SDave Chinner /* update the extent map to return */ 446630f712c9SDave Chinner xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags); 446730f712c9SDave Chinner 446830f712c9SDave Chinner /* 446930f712c9SDave Chinner * If we're done, stop now. Stop when we've allocated 447030f712c9SDave Chinner * XFS_BMAP_MAX_NMAP extents no matter what. Otherwise 447130f712c9SDave Chinner * the transaction may get too big. 447230f712c9SDave Chinner */ 447330f712c9SDave Chinner if (bno >= end || n >= *nmap || bma.nallocs >= *nmap) 447430f712c9SDave Chinner break; 447530f712c9SDave Chinner 447630f712c9SDave Chinner /* Else go on to the next record. */ 447730f712c9SDave Chinner bma.prev = bma.got; 4478b2b1712aSChristoph Hellwig if (!xfs_iext_next_extent(ifp, &bma.icur, &bma.got)) 44792d58f6efSChristoph Hellwig eof = true; 448030f712c9SDave Chinner } 448130f712c9SDave Chinner *nmap = n; 448230f712c9SDave Chinner 4483b101e334SChristoph Hellwig error = xfs_bmap_btree_to_extents(tp, ip, bma.cur, &bma.logflags, 4484b101e334SChristoph Hellwig whichfork); 448530f712c9SDave Chinner if (error) 448630f712c9SDave Chinner goto error0; 448730f712c9SDave Chinner 4488f7e67b20SChristoph Hellwig ASSERT(ifp->if_format != XFS_DINODE_FMT_BTREE || 4489daf83964SChristoph Hellwig ifp->if_nextents > XFS_IFORK_MAXEXT(ip, whichfork)); 4490c8b54673SChristoph Hellwig xfs_bmapi_finish(&bma, whichfork, 0); 449130f712c9SDave Chinner xfs_bmap_validate_ret(orig_bno, orig_len, orig_flags, orig_mval, 449230f712c9SDave Chinner orig_nmap, *nmap); 4493c8b54673SChristoph Hellwig return 0; 4494c8b54673SChristoph Hellwig error0: 4495c8b54673SChristoph Hellwig xfs_bmapi_finish(&bma, whichfork, error); 449630f712c9SDave Chinner return error; 449730f712c9SDave Chinner } 449830f712c9SDave Chinner 4499627209fbSBrian Foster /* 4500627209fbSBrian Foster * Convert an existing delalloc extent to real blocks based on file offset. This 4501627209fbSBrian Foster * attempts to allocate the entire delalloc extent and may require multiple 4502627209fbSBrian Foster * invocations to allocate the target offset if a large enough physical extent 4503627209fbSBrian Foster * is not available. 4504627209fbSBrian Foster */ 4505627209fbSBrian Foster int 4506627209fbSBrian Foster xfs_bmapi_convert_delalloc( 4507627209fbSBrian Foster struct xfs_inode *ip, 4508627209fbSBrian Foster int whichfork, 45094e087a3bSChristoph Hellwig xfs_off_t offset, 45104e087a3bSChristoph Hellwig struct iomap *iomap, 4511491ce61eSChristoph Hellwig unsigned int *seq) 4512627209fbSBrian Foster { 4513732436efSDarrick J. Wong struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 4514491ce61eSChristoph Hellwig struct xfs_mount *mp = ip->i_mount; 45154e087a3bSChristoph Hellwig xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset); 4516d8ae82e3SChristoph Hellwig struct xfs_bmalloca bma = { NULL }; 4517af952aebSDarrick J. Wong uint16_t flags = 0; 4518491ce61eSChristoph Hellwig struct xfs_trans *tp; 4519627209fbSBrian Foster int error; 4520627209fbSBrian Foster 45214e087a3bSChristoph Hellwig if (whichfork == XFS_COW_FORK) 45224e087a3bSChristoph Hellwig flags |= IOMAP_F_SHARED; 45234e087a3bSChristoph Hellwig 4524491ce61eSChristoph Hellwig /* 4525491ce61eSChristoph Hellwig * Space for the extent and indirect blocks was reserved when the 4526491ce61eSChristoph Hellwig * delalloc extent was created so there's no need to do so here. 4527491ce61eSChristoph Hellwig */ 4528491ce61eSChristoph Hellwig error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 4529491ce61eSChristoph Hellwig XFS_TRANS_RESERVE, &tp); 4530491ce61eSChristoph Hellwig if (error) 4531491ce61eSChristoph Hellwig return error; 4532491ce61eSChristoph Hellwig 4533491ce61eSChristoph Hellwig xfs_ilock(ip, XFS_ILOCK_EXCL); 45344f86bb4bSChandan Babu R xfs_trans_ijoin(tp, ip, 0); 4535727e1acdSChandan Babu R 4536727e1acdSChandan Babu R error = xfs_iext_count_may_overflow(ip, whichfork, 4537727e1acdSChandan Babu R XFS_IEXT_ADD_NOSPLIT_CNT); 45384f86bb4bSChandan Babu R if (error == -EFBIG) 45394f86bb4bSChandan Babu R error = xfs_iext_count_upgrade(tp, ip, 45404f86bb4bSChandan Babu R XFS_IEXT_ADD_NOSPLIT_CNT); 4541727e1acdSChandan Babu R if (error) 4542727e1acdSChandan Babu R goto out_trans_cancel; 4543727e1acdSChandan Babu R 4544d8ae82e3SChristoph Hellwig if (!xfs_iext_lookup_extent(ip, ifp, offset_fsb, &bma.icur, &bma.got) || 4545d8ae82e3SChristoph Hellwig bma.got.br_startoff > offset_fsb) { 4546d8ae82e3SChristoph Hellwig /* 4547d8ae82e3SChristoph Hellwig * No extent found in the range we are trying to convert. This 4548d8ae82e3SChristoph Hellwig * should only happen for the COW fork, where another thread 4549d8ae82e3SChristoph Hellwig * might have moved the extent to the data fork in the meantime. 4550d8ae82e3SChristoph Hellwig */ 4551d8ae82e3SChristoph Hellwig WARN_ON_ONCE(whichfork != XFS_COW_FORK); 4552491ce61eSChristoph Hellwig error = -EAGAIN; 4553491ce61eSChristoph Hellwig goto out_trans_cancel; 4554d8ae82e3SChristoph Hellwig } 4555627209fbSBrian Foster 4556627209fbSBrian Foster /* 4557d8ae82e3SChristoph Hellwig * If we find a real extent here we raced with another thread converting 4558d8ae82e3SChristoph Hellwig * the extent. Just return the real extent at this offset. 4559627209fbSBrian Foster */ 4560d8ae82e3SChristoph Hellwig if (!isnullstartblock(bma.got.br_startblock)) { 4561304a68b9SDave Chinner xfs_bmbt_to_iomap(ip, iomap, &bma.got, 0, flags, 4562304a68b9SDave Chinner xfs_iomap_inode_sequence(ip, flags)); 4563491ce61eSChristoph Hellwig *seq = READ_ONCE(ifp->if_seq); 4564491ce61eSChristoph Hellwig goto out_trans_cancel; 4565d8ae82e3SChristoph Hellwig } 4566d8ae82e3SChristoph Hellwig 4567d8ae82e3SChristoph Hellwig bma.tp = tp; 4568d8ae82e3SChristoph Hellwig bma.ip = ip; 4569d8ae82e3SChristoph Hellwig bma.wasdel = true; 4570d8ae82e3SChristoph Hellwig bma.offset = bma.got.br_startoff; 457195f0b95eSChandan Babu R bma.length = max_t(xfs_filblks_t, bma.got.br_blockcount, 457295f0b95eSChandan Babu R XFS_MAX_BMBT_EXTLEN); 4573d8ae82e3SChristoph Hellwig bma.minleft = xfs_bmapi_minleft(tp, ip, whichfork); 4574a5949d3fSDarrick J. Wong 4575a5949d3fSDarrick J. Wong /* 4576a5949d3fSDarrick J. Wong * When we're converting the delalloc reservations backing dirty pages 4577a5949d3fSDarrick J. Wong * in the page cache, we must be careful about how we create the new 4578a5949d3fSDarrick J. Wong * extents: 4579a5949d3fSDarrick J. Wong * 4580a5949d3fSDarrick J. Wong * New CoW fork extents are created unwritten, turned into real extents 4581a5949d3fSDarrick J. Wong * when we're about to write the data to disk, and mapped into the data 4582a5949d3fSDarrick J. Wong * fork after the write finishes. End of story. 4583a5949d3fSDarrick J. Wong * 4584a5949d3fSDarrick J. Wong * New data fork extents must be mapped in as unwritten and converted 4585a5949d3fSDarrick J. Wong * to real extents after the write succeeds to avoid exposing stale 4586a5949d3fSDarrick J. Wong * disk contents if we crash. 4587a5949d3fSDarrick J. Wong */ 4588a5949d3fSDarrick J. Wong bma.flags = XFS_BMAPI_PREALLOC; 4589d8ae82e3SChristoph Hellwig if (whichfork == XFS_COW_FORK) 4590a5949d3fSDarrick J. Wong bma.flags |= XFS_BMAPI_COWFORK; 4591d8ae82e3SChristoph Hellwig 4592d8ae82e3SChristoph Hellwig if (!xfs_iext_peek_prev_extent(ifp, &bma.icur, &bma.prev)) 4593d8ae82e3SChristoph Hellwig bma.prev.br_startoff = NULLFILEOFF; 4594d8ae82e3SChristoph Hellwig 4595d8ae82e3SChristoph Hellwig error = xfs_bmapi_allocate(&bma); 4596d8ae82e3SChristoph Hellwig if (error) 4597d8ae82e3SChristoph Hellwig goto out_finish; 4598d8ae82e3SChristoph Hellwig 4599d8ae82e3SChristoph Hellwig error = -ENOSPC; 4600d8ae82e3SChristoph Hellwig if (WARN_ON_ONCE(bma.blkno == NULLFSBLOCK)) 4601d8ae82e3SChristoph Hellwig goto out_finish; 4602627209fbSBrian Foster error = -EFSCORRUPTED; 4603eb77b23bSChristoph Hellwig if (WARN_ON_ONCE(!xfs_valid_startblock(ip, bma.got.br_startblock))) 4604d8ae82e3SChristoph Hellwig goto out_finish; 4605d8ae82e3SChristoph Hellwig 4606125851acSChristoph Hellwig XFS_STATS_ADD(mp, xs_xstrat_bytes, XFS_FSB_TO_B(mp, bma.length)); 4607125851acSChristoph Hellwig XFS_STATS_INC(mp, xs_xstrat_quick); 4608125851acSChristoph Hellwig 4609d8ae82e3SChristoph Hellwig ASSERT(!isnullstartblock(bma.got.br_startblock)); 4610304a68b9SDave Chinner xfs_bmbt_to_iomap(ip, iomap, &bma.got, 0, flags, 4611304a68b9SDave Chinner xfs_iomap_inode_sequence(ip, flags)); 4612491ce61eSChristoph Hellwig *seq = READ_ONCE(ifp->if_seq); 4613d8ae82e3SChristoph Hellwig 461474b4c5d4SDarrick J. Wong if (whichfork == XFS_COW_FORK) 461574b4c5d4SDarrick J. Wong xfs_refcount_alloc_cow_extent(tp, bma.blkno, bma.length); 4616d8ae82e3SChristoph Hellwig 4617d8ae82e3SChristoph Hellwig error = xfs_bmap_btree_to_extents(tp, ip, bma.cur, &bma.logflags, 4618d8ae82e3SChristoph Hellwig whichfork); 4619491ce61eSChristoph Hellwig if (error) 4620491ce61eSChristoph Hellwig goto out_finish; 4621491ce61eSChristoph Hellwig 4622491ce61eSChristoph Hellwig xfs_bmapi_finish(&bma, whichfork, 0); 4623491ce61eSChristoph Hellwig error = xfs_trans_commit(tp); 4624491ce61eSChristoph Hellwig xfs_iunlock(ip, XFS_ILOCK_EXCL); 4625491ce61eSChristoph Hellwig return error; 4626491ce61eSChristoph Hellwig 4627d8ae82e3SChristoph Hellwig out_finish: 4628d8ae82e3SChristoph Hellwig xfs_bmapi_finish(&bma, whichfork, error); 4629491ce61eSChristoph Hellwig out_trans_cancel: 4630491ce61eSChristoph Hellwig xfs_trans_cancel(tp); 4631491ce61eSChristoph Hellwig xfs_iunlock(ip, XFS_ILOCK_EXCL); 4632627209fbSBrian Foster return error; 4633627209fbSBrian Foster } 4634627209fbSBrian Foster 46357cf199baSDarrick J. Wong int 46366ebd5a44SChristoph Hellwig xfs_bmapi_remap( 46376ebd5a44SChristoph Hellwig struct xfs_trans *tp, 46386ebd5a44SChristoph Hellwig struct xfs_inode *ip, 46396ebd5a44SChristoph Hellwig xfs_fileoff_t bno, 46406ebd5a44SChristoph Hellwig xfs_filblks_t len, 46416ebd5a44SChristoph Hellwig xfs_fsblock_t startblock, 4642e7d410acSDave Chinner uint32_t flags) 46436ebd5a44SChristoph Hellwig { 46446ebd5a44SChristoph Hellwig struct xfs_mount *mp = ip->i_mount; 46457cf199baSDarrick J. Wong struct xfs_ifork *ifp; 46466ebd5a44SChristoph Hellwig struct xfs_btree_cur *cur = NULL; 46476ebd5a44SChristoph Hellwig struct xfs_bmbt_irec got; 4648b2b1712aSChristoph Hellwig struct xfs_iext_cursor icur; 46497cf199baSDarrick J. Wong int whichfork = xfs_bmapi_whichfork(flags); 46506ebd5a44SChristoph Hellwig int logflags = 0, error; 46516ebd5a44SChristoph Hellwig 4652732436efSDarrick J. Wong ifp = xfs_ifork_ptr(ip, whichfork); 46536ebd5a44SChristoph Hellwig ASSERT(len > 0); 465495f0b95eSChandan Babu R ASSERT(len <= (xfs_filblks_t)XFS_MAX_BMBT_EXTLEN); 46556ebd5a44SChristoph Hellwig ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 46567644bd98SDarrick J. Wong ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC | 46577644bd98SDarrick J. Wong XFS_BMAPI_NORMAP))); 46587644bd98SDarrick J. Wong ASSERT((flags & (XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC)) != 46597644bd98SDarrick J. Wong (XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC)); 46606ebd5a44SChristoph Hellwig 4661f7e67b20SChristoph Hellwig if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) || 4662a71895c5SDarrick J. Wong XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) { 46636ebd5a44SChristoph Hellwig return -EFSCORRUPTED; 46646ebd5a44SChristoph Hellwig } 46656ebd5a44SChristoph Hellwig 466675c8c50fSDave Chinner if (xfs_is_shutdown(mp)) 46676ebd5a44SChristoph Hellwig return -EIO; 46686ebd5a44SChristoph Hellwig 46697cf199baSDarrick J. Wong error = xfs_iread_extents(tp, ip, whichfork); 46706ebd5a44SChristoph Hellwig if (error) 46716ebd5a44SChristoph Hellwig return error; 46726ebd5a44SChristoph Hellwig 4673b2b1712aSChristoph Hellwig if (xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) { 46746ebd5a44SChristoph Hellwig /* make sure we only reflink into a hole. */ 46756ebd5a44SChristoph Hellwig ASSERT(got.br_startoff > bno); 46766ebd5a44SChristoph Hellwig ASSERT(got.br_startoff - bno >= len); 46776ebd5a44SChristoph Hellwig } 46786ebd5a44SChristoph Hellwig 46796e73a545SChristoph Hellwig ip->i_nblocks += len; 4680bf8eadbaSChristoph Hellwig xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 46816ebd5a44SChristoph Hellwig 4682ac1e0672SChristoph Hellwig if (ifp->if_format == XFS_DINODE_FMT_BTREE) { 46837cf199baSDarrick J. Wong cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 468492219c29SDave Chinner cur->bc_ino.flags = 0; 46856ebd5a44SChristoph Hellwig } 46866ebd5a44SChristoph Hellwig 46876ebd5a44SChristoph Hellwig got.br_startoff = bno; 46886ebd5a44SChristoph Hellwig got.br_startblock = startblock; 46896ebd5a44SChristoph Hellwig got.br_blockcount = len; 46907644bd98SDarrick J. Wong if (flags & XFS_BMAPI_PREALLOC) 46917644bd98SDarrick J. Wong got.br_state = XFS_EXT_UNWRITTEN; 46927644bd98SDarrick J. Wong else 46936ebd5a44SChristoph Hellwig got.br_state = XFS_EXT_NORM; 46946ebd5a44SChristoph Hellwig 46957cf199baSDarrick J. Wong error = xfs_bmap_add_extent_hole_real(tp, ip, whichfork, &icur, 469692f9da30SBrian Foster &cur, &got, &logflags, flags); 46976ebd5a44SChristoph Hellwig if (error) 46986ebd5a44SChristoph Hellwig goto error0; 46996ebd5a44SChristoph Hellwig 4700b101e334SChristoph Hellwig error = xfs_bmap_btree_to_extents(tp, ip, cur, &logflags, whichfork); 47016ebd5a44SChristoph Hellwig 47026ebd5a44SChristoph Hellwig error0: 4703f7e67b20SChristoph Hellwig if (ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS) 47046ebd5a44SChristoph Hellwig logflags &= ~XFS_ILOG_DEXT; 4705f7e67b20SChristoph Hellwig else if (ip->i_df.if_format != XFS_DINODE_FMT_BTREE) 47066ebd5a44SChristoph Hellwig logflags &= ~XFS_ILOG_DBROOT; 47076ebd5a44SChristoph Hellwig 47086ebd5a44SChristoph Hellwig if (logflags) 47096ebd5a44SChristoph Hellwig xfs_trans_log_inode(tp, ip, logflags); 47100b04b6b8SDarrick J. Wong if (cur) 47110b04b6b8SDarrick J. Wong xfs_btree_del_cursor(cur, error); 47126ebd5a44SChristoph Hellwig return error; 47136ebd5a44SChristoph Hellwig } 47146ebd5a44SChristoph Hellwig 471530f712c9SDave Chinner /* 4716a9bd24acSBrian Foster * When a delalloc extent is split (e.g., due to a hole punch), the original 4717a9bd24acSBrian Foster * indlen reservation must be shared across the two new extents that are left 4718a9bd24acSBrian Foster * behind. 4719a9bd24acSBrian Foster * 4720a9bd24acSBrian Foster * Given the original reservation and the worst case indlen for the two new 4721a9bd24acSBrian Foster * extents (as calculated by xfs_bmap_worst_indlen()), split the original 4722d34999c9SBrian Foster * reservation fairly across the two new extents. If necessary, steal available 4723d34999c9SBrian Foster * blocks from a deleted extent to make up a reservation deficiency (e.g., if 4724d34999c9SBrian Foster * ores == 1). The number of stolen blocks is returned. The availability and 4725d34999c9SBrian Foster * subsequent accounting of stolen blocks is the responsibility of the caller. 4726a9bd24acSBrian Foster */ 4727d34999c9SBrian Foster static xfs_filblks_t 4728a9bd24acSBrian Foster xfs_bmap_split_indlen( 4729a9bd24acSBrian Foster xfs_filblks_t ores, /* original res. */ 4730a9bd24acSBrian Foster xfs_filblks_t *indlen1, /* ext1 worst indlen */ 4731d34999c9SBrian Foster xfs_filblks_t *indlen2, /* ext2 worst indlen */ 4732d34999c9SBrian Foster xfs_filblks_t avail) /* stealable blocks */ 4733a9bd24acSBrian Foster { 4734a9bd24acSBrian Foster xfs_filblks_t len1 = *indlen1; 4735a9bd24acSBrian Foster xfs_filblks_t len2 = *indlen2; 4736a9bd24acSBrian Foster xfs_filblks_t nres = len1 + len2; /* new total res. */ 4737d34999c9SBrian Foster xfs_filblks_t stolen = 0; 473875d65361SBrian Foster xfs_filblks_t resfactor; 4739a9bd24acSBrian Foster 4740a9bd24acSBrian Foster /* 4741d34999c9SBrian Foster * Steal as many blocks as we can to try and satisfy the worst case 4742d34999c9SBrian Foster * indlen for both new extents. 4743d34999c9SBrian Foster */ 474475d65361SBrian Foster if (ores < nres && avail) 474575d65361SBrian Foster stolen = XFS_FILBLKS_MIN(nres - ores, avail); 474675d65361SBrian Foster ores += stolen; 474775d65361SBrian Foster 474875d65361SBrian Foster /* nothing else to do if we've satisfied the new reservation */ 474975d65361SBrian Foster if (ores >= nres) 475075d65361SBrian Foster return stolen; 4751d34999c9SBrian Foster 4752d34999c9SBrian Foster /* 475375d65361SBrian Foster * We can't meet the total required reservation for the two extents. 475475d65361SBrian Foster * Calculate the percent of the overall shortage between both extents 475575d65361SBrian Foster * and apply this percentage to each of the requested indlen values. 475675d65361SBrian Foster * This distributes the shortage fairly and reduces the chances that one 475775d65361SBrian Foster * of the two extents is left with nothing when extents are repeatedly 475875d65361SBrian Foster * split. 4759a9bd24acSBrian Foster */ 476075d65361SBrian Foster resfactor = (ores * 100); 476175d65361SBrian Foster do_div(resfactor, nres); 476275d65361SBrian Foster len1 *= resfactor; 476375d65361SBrian Foster do_div(len1, 100); 476475d65361SBrian Foster len2 *= resfactor; 476575d65361SBrian Foster do_div(len2, 100); 476675d65361SBrian Foster ASSERT(len1 + len2 <= ores); 476775d65361SBrian Foster ASSERT(len1 < *indlen1 && len2 < *indlen2); 476875d65361SBrian Foster 476975d65361SBrian Foster /* 477075d65361SBrian Foster * Hand out the remainder to each extent. If one of the two reservations 477175d65361SBrian Foster * is zero, we want to make sure that one gets a block first. The loop 477275d65361SBrian Foster * below starts with len1, so hand len2 a block right off the bat if it 477375d65361SBrian Foster * is zero. 477475d65361SBrian Foster */ 477575d65361SBrian Foster ores -= (len1 + len2); 477675d65361SBrian Foster ASSERT((*indlen1 - len1) + (*indlen2 - len2) >= ores); 477775d65361SBrian Foster if (ores && !len2 && *indlen2) { 477875d65361SBrian Foster len2++; 477975d65361SBrian Foster ores--; 4780a9bd24acSBrian Foster } 478175d65361SBrian Foster while (ores) { 478275d65361SBrian Foster if (len1 < *indlen1) { 478375d65361SBrian Foster len1++; 478475d65361SBrian Foster ores--; 478575d65361SBrian Foster } 478675d65361SBrian Foster if (!ores) 4787a9bd24acSBrian Foster break; 478875d65361SBrian Foster if (len2 < *indlen2) { 478975d65361SBrian Foster len2++; 479075d65361SBrian Foster ores--; 4791a9bd24acSBrian Foster } 4792a9bd24acSBrian Foster } 4793a9bd24acSBrian Foster 4794a9bd24acSBrian Foster *indlen1 = len1; 4795a9bd24acSBrian Foster *indlen2 = len2; 4796d34999c9SBrian Foster 4797d34999c9SBrian Foster return stolen; 4798a9bd24acSBrian Foster } 4799a9bd24acSBrian Foster 4800fa5c836cSChristoph Hellwig int 4801fa5c836cSChristoph Hellwig xfs_bmap_del_extent_delay( 4802fa5c836cSChristoph Hellwig struct xfs_inode *ip, 4803fa5c836cSChristoph Hellwig int whichfork, 4804b2b1712aSChristoph Hellwig struct xfs_iext_cursor *icur, 4805fa5c836cSChristoph Hellwig struct xfs_bmbt_irec *got, 4806fa5c836cSChristoph Hellwig struct xfs_bmbt_irec *del) 4807fa5c836cSChristoph Hellwig { 4808fa5c836cSChristoph Hellwig struct xfs_mount *mp = ip->i_mount; 4809732436efSDarrick J. Wong struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 4810fa5c836cSChristoph Hellwig struct xfs_bmbt_irec new; 4811fa5c836cSChristoph Hellwig int64_t da_old, da_new, da_diff = 0; 4812fa5c836cSChristoph Hellwig xfs_fileoff_t del_endoff, got_endoff; 4813fa5c836cSChristoph Hellwig xfs_filblks_t got_indlen, new_indlen, stolen; 48140e5b8e45SDave Chinner uint32_t state = xfs_bmap_fork_to_state(whichfork); 4815060ea65bSChristoph Hellwig int error = 0; 4816fa5c836cSChristoph Hellwig bool isrt; 4817fa5c836cSChristoph Hellwig 4818fa5c836cSChristoph Hellwig XFS_STATS_INC(mp, xs_del_exlist); 4819fa5c836cSChristoph Hellwig 4820fa5c836cSChristoph Hellwig isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip); 4821fa5c836cSChristoph Hellwig del_endoff = del->br_startoff + del->br_blockcount; 4822fa5c836cSChristoph Hellwig got_endoff = got->br_startoff + got->br_blockcount; 4823fa5c836cSChristoph Hellwig da_old = startblockval(got->br_startblock); 4824fa5c836cSChristoph Hellwig da_new = 0; 4825fa5c836cSChristoph Hellwig 4826fa5c836cSChristoph Hellwig ASSERT(del->br_blockcount > 0); 4827fa5c836cSChristoph Hellwig ASSERT(got->br_startoff <= del->br_startoff); 4828fa5c836cSChristoph Hellwig ASSERT(got_endoff >= del_endoff); 4829fa5c836cSChristoph Hellwig 4830fa5c836cSChristoph Hellwig if (isrt) { 48314f1adf33SEric Sandeen uint64_t rtexts = XFS_FSB_TO_B(mp, del->br_blockcount); 4832fa5c836cSChristoph Hellwig 4833fa5c836cSChristoph Hellwig do_div(rtexts, mp->m_sb.sb_rextsize); 4834fa5c836cSChristoph Hellwig xfs_mod_frextents(mp, rtexts); 4835fa5c836cSChristoph Hellwig } 4836fa5c836cSChristoph Hellwig 4837fa5c836cSChristoph Hellwig /* 4838fa5c836cSChristoph Hellwig * Update the inode delalloc counter now and wait to update the 4839fa5c836cSChristoph Hellwig * sb counters as we might have to borrow some blocks for the 4840fa5c836cSChristoph Hellwig * indirect block accounting. 4841fa5c836cSChristoph Hellwig */ 484285546500SDarrick J. Wong ASSERT(!isrt); 484385546500SDarrick J. Wong error = xfs_quota_unreserve_blkres(ip, del->br_blockcount); 48444fd29ec4SDarrick J. Wong if (error) 48454fd29ec4SDarrick J. Wong return error; 4846fa5c836cSChristoph Hellwig ip->i_delayed_blks -= del->br_blockcount; 4847fa5c836cSChristoph Hellwig 4848fa5c836cSChristoph Hellwig if (got->br_startoff == del->br_startoff) 48490173c689SChristoph Hellwig state |= BMAP_LEFT_FILLING; 4850fa5c836cSChristoph Hellwig if (got_endoff == del_endoff) 48510173c689SChristoph Hellwig state |= BMAP_RIGHT_FILLING; 4852fa5c836cSChristoph Hellwig 48530173c689SChristoph Hellwig switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) { 48540173c689SChristoph Hellwig case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING: 4855fa5c836cSChristoph Hellwig /* 4856fa5c836cSChristoph Hellwig * Matches the whole extent. Delete the entry. 4857fa5c836cSChristoph Hellwig */ 4858c38ccf59SChristoph Hellwig xfs_iext_remove(ip, icur, state); 4859b2b1712aSChristoph Hellwig xfs_iext_prev(ifp, icur); 4860fa5c836cSChristoph Hellwig break; 48610173c689SChristoph Hellwig case BMAP_LEFT_FILLING: 4862fa5c836cSChristoph Hellwig /* 4863fa5c836cSChristoph Hellwig * Deleting the first part of the extent. 4864fa5c836cSChristoph Hellwig */ 4865fa5c836cSChristoph Hellwig got->br_startoff = del_endoff; 4866fa5c836cSChristoph Hellwig got->br_blockcount -= del->br_blockcount; 4867fa5c836cSChristoph Hellwig da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, 4868fa5c836cSChristoph Hellwig got->br_blockcount), da_old); 4869fa5c836cSChristoph Hellwig got->br_startblock = nullstartblock((int)da_new); 4870b2b1712aSChristoph Hellwig xfs_iext_update_extent(ip, state, icur, got); 4871fa5c836cSChristoph Hellwig break; 48720173c689SChristoph Hellwig case BMAP_RIGHT_FILLING: 4873fa5c836cSChristoph Hellwig /* 4874fa5c836cSChristoph Hellwig * Deleting the last part of the extent. 4875fa5c836cSChristoph Hellwig */ 4876fa5c836cSChristoph Hellwig got->br_blockcount = got->br_blockcount - del->br_blockcount; 4877fa5c836cSChristoph Hellwig da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, 4878fa5c836cSChristoph Hellwig got->br_blockcount), da_old); 4879fa5c836cSChristoph Hellwig got->br_startblock = nullstartblock((int)da_new); 4880b2b1712aSChristoph Hellwig xfs_iext_update_extent(ip, state, icur, got); 4881fa5c836cSChristoph Hellwig break; 4882fa5c836cSChristoph Hellwig case 0: 4883fa5c836cSChristoph Hellwig /* 4884fa5c836cSChristoph Hellwig * Deleting the middle of the extent. 4885fa5c836cSChristoph Hellwig * 4886fa5c836cSChristoph Hellwig * Distribute the original indlen reservation across the two new 4887fa5c836cSChristoph Hellwig * extents. Steal blocks from the deleted extent if necessary. 4888fa5c836cSChristoph Hellwig * Stealing blocks simply fudges the fdblocks accounting below. 4889fa5c836cSChristoph Hellwig * Warn if either of the new indlen reservations is zero as this 4890fa5c836cSChristoph Hellwig * can lead to delalloc problems. 4891fa5c836cSChristoph Hellwig */ 4892fa5c836cSChristoph Hellwig got->br_blockcount = del->br_startoff - got->br_startoff; 4893fa5c836cSChristoph Hellwig got_indlen = xfs_bmap_worst_indlen(ip, got->br_blockcount); 4894fa5c836cSChristoph Hellwig 4895fa5c836cSChristoph Hellwig new.br_blockcount = got_endoff - del_endoff; 4896fa5c836cSChristoph Hellwig new_indlen = xfs_bmap_worst_indlen(ip, new.br_blockcount); 4897fa5c836cSChristoph Hellwig 4898fa5c836cSChristoph Hellwig WARN_ON_ONCE(!got_indlen || !new_indlen); 4899fa5c836cSChristoph Hellwig stolen = xfs_bmap_split_indlen(da_old, &got_indlen, &new_indlen, 4900fa5c836cSChristoph Hellwig del->br_blockcount); 4901fa5c836cSChristoph Hellwig 4902fa5c836cSChristoph Hellwig got->br_startblock = nullstartblock((int)got_indlen); 4903fa5c836cSChristoph Hellwig 4904fa5c836cSChristoph Hellwig new.br_startoff = del_endoff; 4905fa5c836cSChristoph Hellwig new.br_state = got->br_state; 4906fa5c836cSChristoph Hellwig new.br_startblock = nullstartblock((int)new_indlen); 4907fa5c836cSChristoph Hellwig 4908b2b1712aSChristoph Hellwig xfs_iext_update_extent(ip, state, icur, got); 4909b2b1712aSChristoph Hellwig xfs_iext_next(ifp, icur); 49100254c2f2SChristoph Hellwig xfs_iext_insert(ip, icur, &new, state); 4911fa5c836cSChristoph Hellwig 4912fa5c836cSChristoph Hellwig da_new = got_indlen + new_indlen - stolen; 4913fa5c836cSChristoph Hellwig del->br_blockcount -= stolen; 4914fa5c836cSChristoph Hellwig break; 4915fa5c836cSChristoph Hellwig } 4916fa5c836cSChristoph Hellwig 4917fa5c836cSChristoph Hellwig ASSERT(da_old >= da_new); 4918fa5c836cSChristoph Hellwig da_diff = da_old - da_new; 4919fa5c836cSChristoph Hellwig if (!isrt) 4920fa5c836cSChristoph Hellwig da_diff += del->br_blockcount; 49219fe82b8cSDarrick J. Wong if (da_diff) { 4922fa5c836cSChristoph Hellwig xfs_mod_fdblocks(mp, da_diff, false); 49239fe82b8cSDarrick J. Wong xfs_mod_delalloc(mp, -da_diff); 49249fe82b8cSDarrick J. Wong } 4925fa5c836cSChristoph Hellwig return error; 4926fa5c836cSChristoph Hellwig } 4927fa5c836cSChristoph Hellwig 4928fa5c836cSChristoph Hellwig void 4929fa5c836cSChristoph Hellwig xfs_bmap_del_extent_cow( 4930fa5c836cSChristoph Hellwig struct xfs_inode *ip, 4931b2b1712aSChristoph Hellwig struct xfs_iext_cursor *icur, 4932fa5c836cSChristoph Hellwig struct xfs_bmbt_irec *got, 4933fa5c836cSChristoph Hellwig struct xfs_bmbt_irec *del) 4934fa5c836cSChristoph Hellwig { 4935fa5c836cSChristoph Hellwig struct xfs_mount *mp = ip->i_mount; 4936732436efSDarrick J. Wong struct xfs_ifork *ifp = xfs_ifork_ptr(ip, XFS_COW_FORK); 4937fa5c836cSChristoph Hellwig struct xfs_bmbt_irec new; 4938fa5c836cSChristoph Hellwig xfs_fileoff_t del_endoff, got_endoff; 49390e5b8e45SDave Chinner uint32_t state = BMAP_COWFORK; 4940fa5c836cSChristoph Hellwig 4941fa5c836cSChristoph Hellwig XFS_STATS_INC(mp, xs_del_exlist); 4942fa5c836cSChristoph Hellwig 4943fa5c836cSChristoph Hellwig del_endoff = del->br_startoff + del->br_blockcount; 4944fa5c836cSChristoph Hellwig got_endoff = got->br_startoff + got->br_blockcount; 4945fa5c836cSChristoph Hellwig 4946fa5c836cSChristoph Hellwig ASSERT(del->br_blockcount > 0); 4947fa5c836cSChristoph Hellwig ASSERT(got->br_startoff <= del->br_startoff); 4948fa5c836cSChristoph Hellwig ASSERT(got_endoff >= del_endoff); 4949fa5c836cSChristoph Hellwig ASSERT(!isnullstartblock(got->br_startblock)); 4950fa5c836cSChristoph Hellwig 4951fa5c836cSChristoph Hellwig if (got->br_startoff == del->br_startoff) 49520173c689SChristoph Hellwig state |= BMAP_LEFT_FILLING; 4953fa5c836cSChristoph Hellwig if (got_endoff == del_endoff) 49540173c689SChristoph Hellwig state |= BMAP_RIGHT_FILLING; 4955fa5c836cSChristoph Hellwig 49560173c689SChristoph Hellwig switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) { 49570173c689SChristoph Hellwig case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING: 4958fa5c836cSChristoph Hellwig /* 4959fa5c836cSChristoph Hellwig * Matches the whole extent. Delete the entry. 4960fa5c836cSChristoph Hellwig */ 4961c38ccf59SChristoph Hellwig xfs_iext_remove(ip, icur, state); 4962b2b1712aSChristoph Hellwig xfs_iext_prev(ifp, icur); 4963fa5c836cSChristoph Hellwig break; 49640173c689SChristoph Hellwig case BMAP_LEFT_FILLING: 4965fa5c836cSChristoph Hellwig /* 4966fa5c836cSChristoph Hellwig * Deleting the first part of the extent. 4967fa5c836cSChristoph Hellwig */ 4968fa5c836cSChristoph Hellwig got->br_startoff = del_endoff; 4969fa5c836cSChristoph Hellwig got->br_blockcount -= del->br_blockcount; 4970fa5c836cSChristoph Hellwig got->br_startblock = del->br_startblock + del->br_blockcount; 4971b2b1712aSChristoph Hellwig xfs_iext_update_extent(ip, state, icur, got); 4972fa5c836cSChristoph Hellwig break; 49730173c689SChristoph Hellwig case BMAP_RIGHT_FILLING: 4974fa5c836cSChristoph Hellwig /* 4975fa5c836cSChristoph Hellwig * Deleting the last part of the extent. 4976fa5c836cSChristoph Hellwig */ 4977fa5c836cSChristoph Hellwig got->br_blockcount -= del->br_blockcount; 4978b2b1712aSChristoph Hellwig xfs_iext_update_extent(ip, state, icur, got); 4979fa5c836cSChristoph Hellwig break; 4980fa5c836cSChristoph Hellwig case 0: 4981fa5c836cSChristoph Hellwig /* 4982fa5c836cSChristoph Hellwig * Deleting the middle of the extent. 4983fa5c836cSChristoph Hellwig */ 4984fa5c836cSChristoph Hellwig got->br_blockcount = del->br_startoff - got->br_startoff; 4985fa5c836cSChristoph Hellwig 4986fa5c836cSChristoph Hellwig new.br_startoff = del_endoff; 4987fa5c836cSChristoph Hellwig new.br_blockcount = got_endoff - del_endoff; 4988fa5c836cSChristoph Hellwig new.br_state = got->br_state; 4989fa5c836cSChristoph Hellwig new.br_startblock = del->br_startblock + del->br_blockcount; 4990fa5c836cSChristoph Hellwig 4991b2b1712aSChristoph Hellwig xfs_iext_update_extent(ip, state, icur, got); 4992b2b1712aSChristoph Hellwig xfs_iext_next(ifp, icur); 49930254c2f2SChristoph Hellwig xfs_iext_insert(ip, icur, &new, state); 4994fa5c836cSChristoph Hellwig break; 4995fa5c836cSChristoph Hellwig } 49964b4c1326SDarrick J. Wong ip->i_delayed_blks -= del->br_blockcount; 4997fa5c836cSChristoph Hellwig } 4998fa5c836cSChristoph Hellwig 4999a9bd24acSBrian Foster /* 500030f712c9SDave Chinner * Called by xfs_bmapi to update file extent records and the btree 5001e1d7553fSChristoph Hellwig * after removing space. 500230f712c9SDave Chinner */ 500330f712c9SDave Chinner STATIC int /* error */ 5004e1d7553fSChristoph Hellwig xfs_bmap_del_extent_real( 500530f712c9SDave Chinner xfs_inode_t *ip, /* incore inode pointer */ 500630f712c9SDave Chinner xfs_trans_t *tp, /* current transaction pointer */ 5007b2b1712aSChristoph Hellwig struct xfs_iext_cursor *icur, 5008ae127f08SDarrick J. Wong struct xfs_btree_cur *cur, /* if null, not a btree */ 500930f712c9SDave Chinner xfs_bmbt_irec_t *del, /* data to remove from extents */ 501030f712c9SDave Chinner int *logflagsp, /* inode logging flags */ 50114847acf8SDarrick J. Wong int whichfork, /* data or attr fork */ 5012e7d410acSDave Chinner uint32_t bflags) /* bmapi flags */ 501330f712c9SDave Chinner { 501430f712c9SDave Chinner xfs_fsblock_t del_endblock=0; /* first block past del */ 501530f712c9SDave Chinner xfs_fileoff_t del_endoff; /* first offset past del */ 501630f712c9SDave Chinner int do_fx; /* free extent at end of routine */ 501730f712c9SDave Chinner int error; /* error return value */ 50181b24b633SChristoph Hellwig int flags = 0;/* inode logging flags */ 501948fd52b1SChristoph Hellwig struct xfs_bmbt_irec got; /* current extent entry */ 502030f712c9SDave Chinner xfs_fileoff_t got_endoff; /* first offset past got */ 502130f712c9SDave Chinner int i; /* temp state */ 50223ba738dfSChristoph Hellwig struct xfs_ifork *ifp; /* inode fork pointer */ 502330f712c9SDave Chinner xfs_mount_t *mp; /* mount structure */ 502430f712c9SDave Chinner xfs_filblks_t nblks; /* quota/sb block count */ 502530f712c9SDave Chinner xfs_bmbt_irec_t new; /* new record to be inserted */ 502630f712c9SDave Chinner /* REFERENCED */ 502730f712c9SDave Chinner uint qfield; /* quota field to update */ 50280e5b8e45SDave Chinner uint32_t state = xfs_bmap_fork_to_state(whichfork); 502948fd52b1SChristoph Hellwig struct xfs_bmbt_irec old; 503030f712c9SDave Chinner 5031ff6d6af2SBill O'Donnell mp = ip->i_mount; 5032ff6d6af2SBill O'Donnell XFS_STATS_INC(mp, xs_del_exlist); 503330f712c9SDave Chinner 5034732436efSDarrick J. Wong ifp = xfs_ifork_ptr(ip, whichfork); 503530f712c9SDave Chinner ASSERT(del->br_blockcount > 0); 5036b2b1712aSChristoph Hellwig xfs_iext_get_extent(ifp, icur, &got); 503730f712c9SDave Chinner ASSERT(got.br_startoff <= del->br_startoff); 503830f712c9SDave Chinner del_endoff = del->br_startoff + del->br_blockcount; 503930f712c9SDave Chinner got_endoff = got.br_startoff + got.br_blockcount; 504030f712c9SDave Chinner ASSERT(got_endoff >= del_endoff); 5041e1d7553fSChristoph Hellwig ASSERT(!isnullstartblock(got.br_startblock)); 504230f712c9SDave Chinner qfield = 0; 504330f712c9SDave Chinner error = 0; 5044e1d7553fSChristoph Hellwig 50451b24b633SChristoph Hellwig /* 50461b24b633SChristoph Hellwig * If it's the case where the directory code is running with no block 50471b24b633SChristoph Hellwig * reservation, and the deleted block is in the middle of its extent, 50481b24b633SChristoph Hellwig * and the resulting insert of an extent would cause transformation to 50491b24b633SChristoph Hellwig * btree format, then reject it. The calling code will then swap blocks 50501b24b633SChristoph Hellwig * around instead. We have to do this now, rather than waiting for the 50511b24b633SChristoph Hellwig * conversion to btree format, since the transaction will be dirty then. 50521b24b633SChristoph Hellwig */ 50531b24b633SChristoph Hellwig if (tp->t_blk_res == 0 && 5054f7e67b20SChristoph Hellwig ifp->if_format == XFS_DINODE_FMT_EXTENTS && 5055daf83964SChristoph Hellwig ifp->if_nextents >= XFS_IFORK_MAXEXT(ip, whichfork) && 50561b24b633SChristoph Hellwig del->br_startoff > got.br_startoff && del_endoff < got_endoff) 50571b24b633SChristoph Hellwig return -ENOSPC; 50581b24b633SChristoph Hellwig 50591b24b633SChristoph Hellwig flags = XFS_ILOG_CORE; 506030f712c9SDave Chinner if (whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip)) { 506130f712c9SDave Chinner xfs_filblks_t len; 50620703a8e1SDave Chinner xfs_extlen_t mod; 506330f712c9SDave Chinner 50640703a8e1SDave Chinner len = div_u64_rem(del->br_blockcount, mp->m_sb.sb_rextsize, 50650703a8e1SDave Chinner &mod); 50660703a8e1SDave Chinner ASSERT(mod == 0); 50670703a8e1SDave Chinner 50688df0fa39SDarrick J. Wong if (!(bflags & XFS_BMAPI_REMAP)) { 50698df0fa39SDarrick J. Wong xfs_fsblock_t bno; 50708df0fa39SDarrick J. Wong 50718df0fa39SDarrick J. Wong bno = div_u64_rem(del->br_startblock, 50728df0fa39SDarrick J. Wong mp->m_sb.sb_rextsize, &mod); 50738df0fa39SDarrick J. Wong ASSERT(mod == 0); 50748df0fa39SDarrick J. Wong 507530f712c9SDave Chinner error = xfs_rtfree_extent(tp, bno, (xfs_extlen_t)len); 507630f712c9SDave Chinner if (error) 507730f712c9SDave Chinner goto done; 50788df0fa39SDarrick J. Wong } 50798df0fa39SDarrick J. Wong 508030f712c9SDave Chinner do_fx = 0; 508130f712c9SDave Chinner nblks = len * mp->m_sb.sb_rextsize; 508230f712c9SDave Chinner qfield = XFS_TRANS_DQ_RTBCOUNT; 5083e1d7553fSChristoph Hellwig } else { 508430f712c9SDave Chinner do_fx = 1; 508530f712c9SDave Chinner nblks = del->br_blockcount; 508630f712c9SDave Chinner qfield = XFS_TRANS_DQ_BCOUNT; 508730f712c9SDave Chinner } 5088e1d7553fSChristoph Hellwig 508930f712c9SDave Chinner del_endblock = del->br_startblock + del->br_blockcount; 509030f712c9SDave Chinner if (cur) { 5091e16cf9b0SChristoph Hellwig error = xfs_bmbt_lookup_eq(cur, &got, &i); 5092e1d7553fSChristoph Hellwig if (error) 509330f712c9SDave Chinner goto done; 5094f9e03706SDarrick J. Wong if (XFS_IS_CORRUPT(mp, i != 1)) { 5095f9e03706SDarrick J. Wong error = -EFSCORRUPTED; 5096f9e03706SDarrick J. Wong goto done; 5097f9e03706SDarrick J. Wong } 509830f712c9SDave Chinner } 5099340785ccSDarrick J. Wong 5100491f6f8aSChristoph Hellwig if (got.br_startoff == del->br_startoff) 5101491f6f8aSChristoph Hellwig state |= BMAP_LEFT_FILLING; 5102491f6f8aSChristoph Hellwig if (got_endoff == del_endoff) 5103491f6f8aSChristoph Hellwig state |= BMAP_RIGHT_FILLING; 5104491f6f8aSChristoph Hellwig 5105491f6f8aSChristoph Hellwig switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) { 5106491f6f8aSChristoph Hellwig case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING: 510730f712c9SDave Chinner /* 510830f712c9SDave Chinner * Matches the whole extent. Delete the entry. 510930f712c9SDave Chinner */ 5110c38ccf59SChristoph Hellwig xfs_iext_remove(ip, icur, state); 5111b2b1712aSChristoph Hellwig xfs_iext_prev(ifp, icur); 5112daf83964SChristoph Hellwig ifp->if_nextents--; 5113daf83964SChristoph Hellwig 511430f712c9SDave Chinner flags |= XFS_ILOG_CORE; 511530f712c9SDave Chinner if (!cur) { 511630f712c9SDave Chinner flags |= xfs_ilog_fext(whichfork); 511730f712c9SDave Chinner break; 511830f712c9SDave Chinner } 511930f712c9SDave Chinner if ((error = xfs_btree_delete(cur, &i))) 512030f712c9SDave Chinner goto done; 5121f9e03706SDarrick J. Wong if (XFS_IS_CORRUPT(mp, i != 1)) { 5122f9e03706SDarrick J. Wong error = -EFSCORRUPTED; 5123f9e03706SDarrick J. Wong goto done; 5124f9e03706SDarrick J. Wong } 512530f712c9SDave Chinner break; 5126491f6f8aSChristoph Hellwig case BMAP_LEFT_FILLING: 512730f712c9SDave Chinner /* 512830f712c9SDave Chinner * Deleting the first part of the extent. 512930f712c9SDave Chinner */ 513048fd52b1SChristoph Hellwig got.br_startoff = del_endoff; 513148fd52b1SChristoph Hellwig got.br_startblock = del_endblock; 513248fd52b1SChristoph Hellwig got.br_blockcount -= del->br_blockcount; 5133b2b1712aSChristoph Hellwig xfs_iext_update_extent(ip, state, icur, &got); 513430f712c9SDave Chinner if (!cur) { 513530f712c9SDave Chinner flags |= xfs_ilog_fext(whichfork); 513630f712c9SDave Chinner break; 513730f712c9SDave Chinner } 5138a67d00a5SChristoph Hellwig error = xfs_bmbt_update(cur, &got); 513948fd52b1SChristoph Hellwig if (error) 514030f712c9SDave Chinner goto done; 514130f712c9SDave Chinner break; 5142491f6f8aSChristoph Hellwig case BMAP_RIGHT_FILLING: 514330f712c9SDave Chinner /* 514430f712c9SDave Chinner * Deleting the last part of the extent. 514530f712c9SDave Chinner */ 514648fd52b1SChristoph Hellwig got.br_blockcount -= del->br_blockcount; 5147b2b1712aSChristoph Hellwig xfs_iext_update_extent(ip, state, icur, &got); 514830f712c9SDave Chinner if (!cur) { 514930f712c9SDave Chinner flags |= xfs_ilog_fext(whichfork); 515030f712c9SDave Chinner break; 515130f712c9SDave Chinner } 5152a67d00a5SChristoph Hellwig error = xfs_bmbt_update(cur, &got); 515348fd52b1SChristoph Hellwig if (error) 515430f712c9SDave Chinner goto done; 515530f712c9SDave Chinner break; 515630f712c9SDave Chinner case 0: 515730f712c9SDave Chinner /* 515830f712c9SDave Chinner * Deleting the middle of the extent. 515930f712c9SDave Chinner */ 51600dbc5cb1SChandan Babu R 516148fd52b1SChristoph Hellwig old = got; 5162ca5d8e5bSChristoph Hellwig 516348fd52b1SChristoph Hellwig got.br_blockcount = del->br_startoff - got.br_startoff; 5164b2b1712aSChristoph Hellwig xfs_iext_update_extent(ip, state, icur, &got); 516548fd52b1SChristoph Hellwig 516630f712c9SDave Chinner new.br_startoff = del_endoff; 516748fd52b1SChristoph Hellwig new.br_blockcount = got_endoff - del_endoff; 516830f712c9SDave Chinner new.br_state = got.br_state; 516930f712c9SDave Chinner new.br_startblock = del_endblock; 517048fd52b1SChristoph Hellwig 517130f712c9SDave Chinner flags |= XFS_ILOG_CORE; 517230f712c9SDave Chinner if (cur) { 5173a67d00a5SChristoph Hellwig error = xfs_bmbt_update(cur, &got); 5174e1d7553fSChristoph Hellwig if (error) 517530f712c9SDave Chinner goto done; 5176e1d7553fSChristoph Hellwig error = xfs_btree_increment(cur, 0, &i); 5177e1d7553fSChristoph Hellwig if (error) 517830f712c9SDave Chinner goto done; 517930f712c9SDave Chinner cur->bc_rec.b = new; 518030f712c9SDave Chinner error = xfs_btree_insert(cur, &i); 51812451337dSDave Chinner if (error && error != -ENOSPC) 518230f712c9SDave Chinner goto done; 518330f712c9SDave Chinner /* 5184e1d7553fSChristoph Hellwig * If get no-space back from btree insert, it tried a 5185e1d7553fSChristoph Hellwig * split, and we have a zero block reservation. Fix up 5186e1d7553fSChristoph Hellwig * our state and return the error. 518730f712c9SDave Chinner */ 51882451337dSDave Chinner if (error == -ENOSPC) { 518930f712c9SDave Chinner /* 5190e1d7553fSChristoph Hellwig * Reset the cursor, don't trust it after any 5191e1d7553fSChristoph Hellwig * insert operation. 519230f712c9SDave Chinner */ 5193e16cf9b0SChristoph Hellwig error = xfs_bmbt_lookup_eq(cur, &got, &i); 5194e1d7553fSChristoph Hellwig if (error) 519530f712c9SDave Chinner goto done; 5196f9e03706SDarrick J. Wong if (XFS_IS_CORRUPT(mp, i != 1)) { 5197f9e03706SDarrick J. Wong error = -EFSCORRUPTED; 5198f9e03706SDarrick J. Wong goto done; 5199f9e03706SDarrick J. Wong } 520030f712c9SDave Chinner /* 520130f712c9SDave Chinner * Update the btree record back 520230f712c9SDave Chinner * to the original value. 520330f712c9SDave Chinner */ 5204a67d00a5SChristoph Hellwig error = xfs_bmbt_update(cur, &old); 5205e1d7553fSChristoph Hellwig if (error) 520630f712c9SDave Chinner goto done; 520730f712c9SDave Chinner /* 520830f712c9SDave Chinner * Reset the extent record back 520930f712c9SDave Chinner * to the original value. 521030f712c9SDave Chinner */ 5211b2b1712aSChristoph Hellwig xfs_iext_update_extent(ip, state, icur, &old); 521230f712c9SDave Chinner flags = 0; 52132451337dSDave Chinner error = -ENOSPC; 521430f712c9SDave Chinner goto done; 521530f712c9SDave Chinner } 5216f9e03706SDarrick J. Wong if (XFS_IS_CORRUPT(mp, i != 1)) { 5217f9e03706SDarrick J. Wong error = -EFSCORRUPTED; 5218f9e03706SDarrick J. Wong goto done; 5219f9e03706SDarrick J. Wong } 522030f712c9SDave Chinner } else 522130f712c9SDave Chinner flags |= xfs_ilog_fext(whichfork); 5222daf83964SChristoph Hellwig 5223daf83964SChristoph Hellwig ifp->if_nextents++; 5224b2b1712aSChristoph Hellwig xfs_iext_next(ifp, icur); 52250254c2f2SChristoph Hellwig xfs_iext_insert(ip, icur, &new, state); 522630f712c9SDave Chinner break; 522730f712c9SDave Chinner } 52289c194644SDarrick J. Wong 52299c194644SDarrick J. Wong /* remove reverse mapping */ 5230bc46ac64SDarrick J. Wong xfs_rmap_unmap_extent(tp, ip, whichfork, del); 52319c194644SDarrick J. Wong 523230f712c9SDave Chinner /* 523330f712c9SDave Chinner * If we need to, add to list of extents to delete. 523430f712c9SDave Chinner */ 52354847acf8SDarrick J. Wong if (do_fx && !(bflags & XFS_BMAPI_REMAP)) { 523662aab20fSDarrick J. Wong if (xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK) { 523774b4c5d4SDarrick J. Wong xfs_refcount_decrease_extent(tp, del); 5238fcb762f5SBrian Foster } else { 5239c201d9caSDarrick J. Wong __xfs_free_extent_later(tp, del->br_startblock, 52404e529339SBrian Foster del->br_blockcount, NULL, 52414e529339SBrian Foster (bflags & XFS_BMAPI_NODISCARD) || 52424e529339SBrian Foster del->br_state == XFS_EXT_UNWRITTEN); 5243fcb762f5SBrian Foster } 5244fcb762f5SBrian Foster } 524562aab20fSDarrick J. Wong 524630f712c9SDave Chinner /* 524730f712c9SDave Chinner * Adjust inode # blocks in the file. 524830f712c9SDave Chinner */ 524930f712c9SDave Chinner if (nblks) 52506e73a545SChristoph Hellwig ip->i_nblocks -= nblks; 525130f712c9SDave Chinner /* 525230f712c9SDave Chinner * Adjust quota data. 525330f712c9SDave Chinner */ 52544847acf8SDarrick J. Wong if (qfield && !(bflags & XFS_BMAPI_REMAP)) 525530f712c9SDave Chinner xfs_trans_mod_dquot_byino(tp, ip, qfield, (long)-nblks); 525630f712c9SDave Chinner 525730f712c9SDave Chinner done: 525830f712c9SDave Chinner *logflagsp = flags; 525930f712c9SDave Chinner return error; 526030f712c9SDave Chinner } 526130f712c9SDave Chinner 526230f712c9SDave Chinner /* 526330f712c9SDave Chinner * Unmap (remove) blocks from a file. 526430f712c9SDave Chinner * If nexts is nonzero then the number of extents to remove is limited to 526530f712c9SDave Chinner * that value. If not all extents in the block range can be removed then 526630f712c9SDave Chinner * *done is set. 526730f712c9SDave Chinner */ 526830f712c9SDave Chinner int /* error */ 52694453593bSDarrick J. Wong __xfs_bunmapi( 5270ccd9d911SBrian Foster struct xfs_trans *tp, /* transaction pointer */ 527130f712c9SDave Chinner struct xfs_inode *ip, /* incore inode */ 52728280f6edSChristoph Hellwig xfs_fileoff_t start, /* first file offset deleted */ 52734453593bSDarrick J. Wong xfs_filblks_t *rlen, /* i/o: amount remaining */ 5274e7d410acSDave Chinner uint32_t flags, /* misc flags */ 52752af52842SBrian Foster xfs_extnum_t nexts) /* number of extents max */ 527630f712c9SDave Chinner { 5277ccd9d911SBrian Foster struct xfs_btree_cur *cur; /* bmap btree cursor */ 5278ccd9d911SBrian Foster struct xfs_bmbt_irec del; /* extent being deleted */ 527930f712c9SDave Chinner int error; /* error return value */ 528030f712c9SDave Chinner xfs_extnum_t extno; /* extent number in list */ 5281ccd9d911SBrian Foster struct xfs_bmbt_irec got; /* current extent record */ 52823ba738dfSChristoph Hellwig struct xfs_ifork *ifp; /* inode fork pointer */ 528330f712c9SDave Chinner int isrt; /* freeing in rt area */ 528430f712c9SDave Chinner int logflags; /* transaction logging flags */ 528530f712c9SDave Chinner xfs_extlen_t mod; /* rt extent offset */ 5286a71895c5SDarrick J. Wong struct xfs_mount *mp = ip->i_mount; 528730f712c9SDave Chinner int tmp_logflags; /* partial logging flags */ 528830f712c9SDave Chinner int wasdel; /* was a delayed alloc extent */ 528930f712c9SDave Chinner int whichfork; /* data or attribute fork */ 529030f712c9SDave Chinner xfs_fsblock_t sum; 52914453593bSDarrick J. Wong xfs_filblks_t len = *rlen; /* length to unmap in file */ 52928280f6edSChristoph Hellwig xfs_fileoff_t end; 5293b2b1712aSChristoph Hellwig struct xfs_iext_cursor icur; 5294b2b1712aSChristoph Hellwig bool done = false; 529530f712c9SDave Chinner 52968280f6edSChristoph Hellwig trace_xfs_bunmap(ip, start, len, flags, _RET_IP_); 529730f712c9SDave Chinner 52983993baebSDarrick J. Wong whichfork = xfs_bmapi_whichfork(flags); 52993993baebSDarrick J. Wong ASSERT(whichfork != XFS_COW_FORK); 5300732436efSDarrick J. Wong ifp = xfs_ifork_ptr(ip, whichfork); 5301f7e67b20SChristoph Hellwig if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp))) 53022451337dSDave Chinner return -EFSCORRUPTED; 530375c8c50fSDave Chinner if (xfs_is_shutdown(mp)) 53042451337dSDave Chinner return -EIO; 530530f712c9SDave Chinner 530630f712c9SDave Chinner ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 530730f712c9SDave Chinner ASSERT(len > 0); 530830f712c9SDave Chinner ASSERT(nexts >= 0); 530930f712c9SDave Chinner 5310862a804aSChristoph Hellwig error = xfs_iread_extents(tp, ip, whichfork); 5311862a804aSChristoph Hellwig if (error) 531230f712c9SDave Chinner return error; 5313862a804aSChristoph Hellwig 53145d829300SEric Sandeen if (xfs_iext_count(ifp) == 0) { 53154453593bSDarrick J. Wong *rlen = 0; 531630f712c9SDave Chinner return 0; 531730f712c9SDave Chinner } 5318ff6d6af2SBill O'Donnell XFS_STATS_INC(mp, xs_blk_unmap); 531930f712c9SDave Chinner isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip); 5320dc56015fSChristoph Hellwig end = start + len; 532130f712c9SDave Chinner 5322b2b1712aSChristoph Hellwig if (!xfs_iext_lookup_extent_before(ip, ifp, &end, &icur, &got)) { 5323dc56015fSChristoph Hellwig *rlen = 0; 5324dc56015fSChristoph Hellwig return 0; 532530f712c9SDave Chinner } 5326dc56015fSChristoph Hellwig end--; 53277efc7945SChristoph Hellwig 532830f712c9SDave Chinner logflags = 0; 5329ac1e0672SChristoph Hellwig if (ifp->if_format == XFS_DINODE_FMT_BTREE) { 5330f7e67b20SChristoph Hellwig ASSERT(ifp->if_format == XFS_DINODE_FMT_BTREE); 533130f712c9SDave Chinner cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 533292219c29SDave Chinner cur->bc_ino.flags = 0; 533330f712c9SDave Chinner } else 533430f712c9SDave Chinner cur = NULL; 533530f712c9SDave Chinner 533630f712c9SDave Chinner if (isrt) { 533730f712c9SDave Chinner /* 533830f712c9SDave Chinner * Synchronize by locking the bitmap inode. 533930f712c9SDave Chinner */ 5340f4a0660dSDarrick J. Wong xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP); 534130f712c9SDave Chinner xfs_trans_ijoin(tp, mp->m_rbmip, XFS_ILOCK_EXCL); 5342f4a0660dSDarrick J. Wong xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM); 5343f4a0660dSDarrick J. Wong xfs_trans_ijoin(tp, mp->m_rsumip, XFS_ILOCK_EXCL); 534430f712c9SDave Chinner } 534530f712c9SDave Chinner 534630f712c9SDave Chinner extno = 0; 5347b2b1712aSChristoph Hellwig while (end != (xfs_fileoff_t)-1 && end >= start && 53484ed6435cSDarrick J. Wong (nexts == 0 || extno < nexts)) { 534930f712c9SDave Chinner /* 53508280f6edSChristoph Hellwig * Is the found extent after a hole in which end lives? 535130f712c9SDave Chinner * Just back up to the previous extent, if so. 535230f712c9SDave Chinner */ 5353b2b1712aSChristoph Hellwig if (got.br_startoff > end && 5354b2b1712aSChristoph Hellwig !xfs_iext_prev_extent(ifp, &icur, &got)) { 5355b2b1712aSChristoph Hellwig done = true; 535630f712c9SDave Chinner break; 535730f712c9SDave Chinner } 535830f712c9SDave Chinner /* 535930f712c9SDave Chinner * Is the last block of this extent before the range 536030f712c9SDave Chinner * we're supposed to delete? If so, we're done. 536130f712c9SDave Chinner */ 53628280f6edSChristoph Hellwig end = XFS_FILEOFF_MIN(end, 536330f712c9SDave Chinner got.br_startoff + got.br_blockcount - 1); 53648280f6edSChristoph Hellwig if (end < start) 536530f712c9SDave Chinner break; 536630f712c9SDave Chinner /* 536730f712c9SDave Chinner * Then deal with the (possibly delayed) allocated space 536830f712c9SDave Chinner * we found. 536930f712c9SDave Chinner */ 537030f712c9SDave Chinner del = got; 537130f712c9SDave Chinner wasdel = isnullstartblock(del.br_startblock); 53725b094d6dSChristoph Hellwig 537330f712c9SDave Chinner if (got.br_startoff < start) { 537430f712c9SDave Chinner del.br_startoff = start; 537530f712c9SDave Chinner del.br_blockcount -= start - got.br_startoff; 537630f712c9SDave Chinner if (!wasdel) 537730f712c9SDave Chinner del.br_startblock += start - got.br_startoff; 537830f712c9SDave Chinner } 53798280f6edSChristoph Hellwig if (del.br_startoff + del.br_blockcount > end + 1) 53808280f6edSChristoph Hellwig del.br_blockcount = end + 1 - del.br_startoff; 5381e1a4e37cSDarrick J. Wong 53820703a8e1SDave Chinner if (!isrt) 53830703a8e1SDave Chinner goto delete; 53840703a8e1SDave Chinner 538530f712c9SDave Chinner sum = del.br_startblock + del.br_blockcount; 53860703a8e1SDave Chinner div_u64_rem(sum, mp->m_sb.sb_rextsize, &mod); 53870703a8e1SDave Chinner if (mod) { 538830f712c9SDave Chinner /* 538930f712c9SDave Chinner * Realtime extent not lined up at the end. 539030f712c9SDave Chinner * The extent could have been split into written 539130f712c9SDave Chinner * and unwritten pieces, or we could just be 539230f712c9SDave Chinner * unmapping part of it. But we can't really 539330f712c9SDave Chinner * get rid of part of a realtime extent. 539430f712c9SDave Chinner */ 5395daa79baeSChristoph Hellwig if (del.br_state == XFS_EXT_UNWRITTEN) { 539630f712c9SDave Chinner /* 539730f712c9SDave Chinner * This piece is unwritten, or we're not 539830f712c9SDave Chinner * using unwritten extents. Skip over it. 539930f712c9SDave Chinner */ 54008280f6edSChristoph Hellwig ASSERT(end >= mod); 54018280f6edSChristoph Hellwig end -= mod > del.br_blockcount ? 540230f712c9SDave Chinner del.br_blockcount : mod; 5403b2b1712aSChristoph Hellwig if (end < got.br_startoff && 5404b2b1712aSChristoph Hellwig !xfs_iext_prev_extent(ifp, &icur, &got)) { 5405b2b1712aSChristoph Hellwig done = true; 5406b2b1712aSChristoph Hellwig break; 540730f712c9SDave Chinner } 540830f712c9SDave Chinner continue; 540930f712c9SDave Chinner } 541030f712c9SDave Chinner /* 541130f712c9SDave Chinner * It's written, turn it unwritten. 541230f712c9SDave Chinner * This is better than zeroing it. 541330f712c9SDave Chinner */ 541430f712c9SDave Chinner ASSERT(del.br_state == XFS_EXT_NORM); 5415a7e5d03bSChristoph Hellwig ASSERT(tp->t_blk_res > 0); 541630f712c9SDave Chinner /* 541730f712c9SDave Chinner * If this spans a realtime extent boundary, 541830f712c9SDave Chinner * chop it back to the start of the one we end at. 541930f712c9SDave Chinner */ 542030f712c9SDave Chinner if (del.br_blockcount > mod) { 542130f712c9SDave Chinner del.br_startoff += del.br_blockcount - mod; 542230f712c9SDave Chinner del.br_startblock += del.br_blockcount - mod; 542330f712c9SDave Chinner del.br_blockcount = mod; 542430f712c9SDave Chinner } 542530f712c9SDave Chinner del.br_state = XFS_EXT_UNWRITTEN; 542630f712c9SDave Chinner error = xfs_bmap_add_extent_unwritten_real(tp, ip, 5427b2b1712aSChristoph Hellwig whichfork, &icur, &cur, &del, 542892f9da30SBrian Foster &logflags); 542930f712c9SDave Chinner if (error) 543030f712c9SDave Chinner goto error0; 543130f712c9SDave Chinner goto nodelete; 543230f712c9SDave Chinner } 54330703a8e1SDave Chinner div_u64_rem(del.br_startblock, mp->m_sb.sb_rextsize, &mod); 54340703a8e1SDave Chinner if (mod) { 54350c4da70cSOmar Sandoval xfs_extlen_t off = mp->m_sb.sb_rextsize - mod; 54360c4da70cSOmar Sandoval 543730f712c9SDave Chinner /* 543830f712c9SDave Chinner * Realtime extent is lined up at the end but not 543930f712c9SDave Chinner * at the front. We'll get rid of full extents if 544030f712c9SDave Chinner * we can. 544130f712c9SDave Chinner */ 54420c4da70cSOmar Sandoval if (del.br_blockcount > off) { 54430c4da70cSOmar Sandoval del.br_blockcount -= off; 54440c4da70cSOmar Sandoval del.br_startoff += off; 54450c4da70cSOmar Sandoval del.br_startblock += off; 5446daa79baeSChristoph Hellwig } else if (del.br_startoff == start && 544730f712c9SDave Chinner (del.br_state == XFS_EXT_UNWRITTEN || 5448daa79baeSChristoph Hellwig tp->t_blk_res == 0)) { 544930f712c9SDave Chinner /* 545030f712c9SDave Chinner * Can't make it unwritten. There isn't 545130f712c9SDave Chinner * a full extent here so just skip it. 545230f712c9SDave Chinner */ 54538280f6edSChristoph Hellwig ASSERT(end >= del.br_blockcount); 54548280f6edSChristoph Hellwig end -= del.br_blockcount; 5455b2b1712aSChristoph Hellwig if (got.br_startoff > end && 5456b2b1712aSChristoph Hellwig !xfs_iext_prev_extent(ifp, &icur, &got)) { 5457b2b1712aSChristoph Hellwig done = true; 5458b2b1712aSChristoph Hellwig break; 5459b2b1712aSChristoph Hellwig } 546030f712c9SDave Chinner continue; 546130f712c9SDave Chinner } else if (del.br_state == XFS_EXT_UNWRITTEN) { 54627efc7945SChristoph Hellwig struct xfs_bmbt_irec prev; 54630c4da70cSOmar Sandoval xfs_fileoff_t unwrite_start; 54647efc7945SChristoph Hellwig 546530f712c9SDave Chinner /* 546630f712c9SDave Chinner * This one is already unwritten. 546730f712c9SDave Chinner * It must have a written left neighbor. 546830f712c9SDave Chinner * Unwrite the killed part of that one and 546930f712c9SDave Chinner * try again. 547030f712c9SDave Chinner */ 5471b2b1712aSChristoph Hellwig if (!xfs_iext_prev_extent(ifp, &icur, &prev)) 5472b2b1712aSChristoph Hellwig ASSERT(0); 547330f712c9SDave Chinner ASSERT(prev.br_state == XFS_EXT_NORM); 547430f712c9SDave Chinner ASSERT(!isnullstartblock(prev.br_startblock)); 547530f712c9SDave Chinner ASSERT(del.br_startblock == 547630f712c9SDave Chinner prev.br_startblock + prev.br_blockcount); 54770c4da70cSOmar Sandoval unwrite_start = max3(start, 54780c4da70cSOmar Sandoval del.br_startoff - mod, 54790c4da70cSOmar Sandoval prev.br_startoff); 54800c4da70cSOmar Sandoval mod = unwrite_start - prev.br_startoff; 54810c4da70cSOmar Sandoval prev.br_startoff = unwrite_start; 548230f712c9SDave Chinner prev.br_startblock += mod; 54830c4da70cSOmar Sandoval prev.br_blockcount -= mod; 548430f712c9SDave Chinner prev.br_state = XFS_EXT_UNWRITTEN; 548530f712c9SDave Chinner error = xfs_bmap_add_extent_unwritten_real(tp, 5486b2b1712aSChristoph Hellwig ip, whichfork, &icur, &cur, 548792f9da30SBrian Foster &prev, &logflags); 548830f712c9SDave Chinner if (error) 548930f712c9SDave Chinner goto error0; 549030f712c9SDave Chinner goto nodelete; 549130f712c9SDave Chinner } else { 549230f712c9SDave Chinner ASSERT(del.br_state == XFS_EXT_NORM); 549330f712c9SDave Chinner del.br_state = XFS_EXT_UNWRITTEN; 549430f712c9SDave Chinner error = xfs_bmap_add_extent_unwritten_real(tp, 5495b2b1712aSChristoph Hellwig ip, whichfork, &icur, &cur, 549692f9da30SBrian Foster &del, &logflags); 549730f712c9SDave Chinner if (error) 549830f712c9SDave Chinner goto error0; 549930f712c9SDave Chinner goto nodelete; 550030f712c9SDave Chinner } 550130f712c9SDave Chinner } 550230f712c9SDave Chinner 55030703a8e1SDave Chinner delete: 5504e1d7553fSChristoph Hellwig if (wasdel) { 5505b2b1712aSChristoph Hellwig error = xfs_bmap_del_extent_delay(ip, whichfork, &icur, 5506e1d7553fSChristoph Hellwig &got, &del); 5507e1d7553fSChristoph Hellwig } else { 550881ba8f3eSBrian Foster error = xfs_bmap_del_extent_real(ip, tp, &icur, cur, 550981ba8f3eSBrian Foster &del, &tmp_logflags, whichfork, 5510e1d7553fSChristoph Hellwig flags); 551130f712c9SDave Chinner logflags |= tmp_logflags; 5512e1d7553fSChristoph Hellwig } 5513e1d7553fSChristoph Hellwig 551430f712c9SDave Chinner if (error) 551530f712c9SDave Chinner goto error0; 5516b2706a05SBrian Foster 55178280f6edSChristoph Hellwig end = del.br_startoff - 1; 551830f712c9SDave Chinner nodelete: 551930f712c9SDave Chinner /* 552030f712c9SDave Chinner * If not done go on to the next (previous) record. 552130f712c9SDave Chinner */ 55228280f6edSChristoph Hellwig if (end != (xfs_fileoff_t)-1 && end >= start) { 5523b2b1712aSChristoph Hellwig if (!xfs_iext_get_extent(ifp, &icur, &got) || 5524b2b1712aSChristoph Hellwig (got.br_startoff > end && 5525b2b1712aSChristoph Hellwig !xfs_iext_prev_extent(ifp, &icur, &got))) { 5526b2b1712aSChristoph Hellwig done = true; 5527b2b1712aSChristoph Hellwig break; 552830f712c9SDave Chinner } 552930f712c9SDave Chinner extno++; 553030f712c9SDave Chinner } 553130f712c9SDave Chinner } 5532b2b1712aSChristoph Hellwig if (done || end == (xfs_fileoff_t)-1 || end < start) 55334453593bSDarrick J. Wong *rlen = 0; 55344453593bSDarrick J. Wong else 55358280f6edSChristoph Hellwig *rlen = end - start + 1; 553630f712c9SDave Chinner 553730f712c9SDave Chinner /* 553830f712c9SDave Chinner * Convert to a btree if necessary. 553930f712c9SDave Chinner */ 554030f712c9SDave Chinner if (xfs_bmap_needs_btree(ip, whichfork)) { 554130f712c9SDave Chinner ASSERT(cur == NULL); 5542280253d2SBrian Foster error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0, 5543280253d2SBrian Foster &tmp_logflags, whichfork); 554430f712c9SDave Chinner logflags |= tmp_logflags; 5545b101e334SChristoph Hellwig } else { 5546b101e334SChristoph Hellwig error = xfs_bmap_btree_to_extents(tp, ip, cur, &logflags, 554730f712c9SDave Chinner whichfork); 554830f712c9SDave Chinner } 5549b101e334SChristoph Hellwig 555030f712c9SDave Chinner error0: 555130f712c9SDave Chinner /* 555230f712c9SDave Chinner * Log everything. Do this after conversion, there's no point in 555330f712c9SDave Chinner * logging the extent records if we've converted to btree format. 555430f712c9SDave Chinner */ 555530f712c9SDave Chinner if ((logflags & xfs_ilog_fext(whichfork)) && 5556f7e67b20SChristoph Hellwig ifp->if_format != XFS_DINODE_FMT_EXTENTS) 555730f712c9SDave Chinner logflags &= ~xfs_ilog_fext(whichfork); 555830f712c9SDave Chinner else if ((logflags & xfs_ilog_fbroot(whichfork)) && 5559f7e67b20SChristoph Hellwig ifp->if_format != XFS_DINODE_FMT_BTREE) 556030f712c9SDave Chinner logflags &= ~xfs_ilog_fbroot(whichfork); 556130f712c9SDave Chinner /* 556230f712c9SDave Chinner * Log inode even in the error case, if the transaction 556330f712c9SDave Chinner * is dirty we'll need to shut down the filesystem. 556430f712c9SDave Chinner */ 556530f712c9SDave Chinner if (logflags) 556630f712c9SDave Chinner xfs_trans_log_inode(tp, ip, logflags); 556730f712c9SDave Chinner if (cur) { 5568cf612de7SBrian Foster if (!error) 556992219c29SDave Chinner cur->bc_ino.allocated = 0; 55700b04b6b8SDarrick J. Wong xfs_btree_del_cursor(cur, error); 557130f712c9SDave Chinner } 557230f712c9SDave Chinner return error; 557330f712c9SDave Chinner } 557430f712c9SDave Chinner 55754453593bSDarrick J. Wong /* Unmap a range of a file. */ 55764453593bSDarrick J. Wong int 55774453593bSDarrick J. Wong xfs_bunmapi( 55784453593bSDarrick J. Wong xfs_trans_t *tp, 55794453593bSDarrick J. Wong struct xfs_inode *ip, 55804453593bSDarrick J. Wong xfs_fileoff_t bno, 55814453593bSDarrick J. Wong xfs_filblks_t len, 5582e7d410acSDave Chinner uint32_t flags, 55834453593bSDarrick J. Wong xfs_extnum_t nexts, 55844453593bSDarrick J. Wong int *done) 55854453593bSDarrick J. Wong { 55864453593bSDarrick J. Wong int error; 55874453593bSDarrick J. Wong 55882af52842SBrian Foster error = __xfs_bunmapi(tp, ip, bno, &len, flags, nexts); 55894453593bSDarrick J. Wong *done = (len == 0); 55904453593bSDarrick J. Wong return error; 55914453593bSDarrick J. Wong } 55924453593bSDarrick J. Wong 559330f712c9SDave Chinner /* 5594ddb19e31SBrian Foster * Determine whether an extent shift can be accomplished by a merge with the 5595ddb19e31SBrian Foster * extent that precedes the target hole of the shift. 5596ddb19e31SBrian Foster */ 5597ddb19e31SBrian Foster STATIC bool 5598ddb19e31SBrian Foster xfs_bmse_can_merge( 5599ddb19e31SBrian Foster struct xfs_bmbt_irec *left, /* preceding extent */ 5600ddb19e31SBrian Foster struct xfs_bmbt_irec *got, /* current extent to shift */ 5601ddb19e31SBrian Foster xfs_fileoff_t shift) /* shift fsb */ 5602ddb19e31SBrian Foster { 5603ddb19e31SBrian Foster xfs_fileoff_t startoff; 5604ddb19e31SBrian Foster 5605ddb19e31SBrian Foster startoff = got->br_startoff - shift; 5606ddb19e31SBrian Foster 5607ddb19e31SBrian Foster /* 5608ddb19e31SBrian Foster * The extent, once shifted, must be adjacent in-file and on-disk with 5609ddb19e31SBrian Foster * the preceding extent. 5610ddb19e31SBrian Foster */ 5611ddb19e31SBrian Foster if ((left->br_startoff + left->br_blockcount != startoff) || 5612ddb19e31SBrian Foster (left->br_startblock + left->br_blockcount != got->br_startblock) || 5613ddb19e31SBrian Foster (left->br_state != got->br_state) || 561495f0b95eSChandan Babu R (left->br_blockcount + got->br_blockcount > XFS_MAX_BMBT_EXTLEN)) 5615ddb19e31SBrian Foster return false; 5616ddb19e31SBrian Foster 5617ddb19e31SBrian Foster return true; 5618ddb19e31SBrian Foster } 5619ddb19e31SBrian Foster 5620ddb19e31SBrian Foster /* 5621ddb19e31SBrian Foster * A bmap extent shift adjusts the file offset of an extent to fill a preceding 5622ddb19e31SBrian Foster * hole in the file. If an extent shift would result in the extent being fully 5623ddb19e31SBrian Foster * adjacent to the extent that currently precedes the hole, we can merge with 5624ddb19e31SBrian Foster * the preceding extent rather than do the shift. 5625ddb19e31SBrian Foster * 5626ddb19e31SBrian Foster * This function assumes the caller has verified a shift-by-merge is possible 5627ddb19e31SBrian Foster * with the provided extents via xfs_bmse_can_merge(). 5628ddb19e31SBrian Foster */ 5629ddb19e31SBrian Foster STATIC int 5630ddb19e31SBrian Foster xfs_bmse_merge( 56310f37d178SBrian Foster struct xfs_trans *tp, 5632ddb19e31SBrian Foster struct xfs_inode *ip, 5633ddb19e31SBrian Foster int whichfork, 5634ddb19e31SBrian Foster xfs_fileoff_t shift, /* shift fsb */ 5635b2b1712aSChristoph Hellwig struct xfs_iext_cursor *icur, 56364da6b514SChristoph Hellwig struct xfs_bmbt_irec *got, /* extent to shift */ 56374da6b514SChristoph Hellwig struct xfs_bmbt_irec *left, /* preceding extent */ 5638ddb19e31SBrian Foster struct xfs_btree_cur *cur, 56390f37d178SBrian Foster int *logflags) /* output */ 5640ddb19e31SBrian Foster { 5641732436efSDarrick J. Wong struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 56424da6b514SChristoph Hellwig struct xfs_bmbt_irec new; 5643ddb19e31SBrian Foster xfs_filblks_t blockcount; 5644ddb19e31SBrian Foster int error, i; 56455fb5aeeeSEric Sandeen struct xfs_mount *mp = ip->i_mount; 5646ddb19e31SBrian Foster 56474da6b514SChristoph Hellwig blockcount = left->br_blockcount + got->br_blockcount; 5648ddb19e31SBrian Foster 5649ddb19e31SBrian Foster ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); 5650ddb19e31SBrian Foster ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 56514da6b514SChristoph Hellwig ASSERT(xfs_bmse_can_merge(left, got, shift)); 5652ddb19e31SBrian Foster 56534da6b514SChristoph Hellwig new = *left; 56544da6b514SChristoph Hellwig new.br_blockcount = blockcount; 5655ddb19e31SBrian Foster 5656ddb19e31SBrian Foster /* 5657ddb19e31SBrian Foster * Update the on-disk extent count, the btree if necessary and log the 5658ddb19e31SBrian Foster * inode. 5659ddb19e31SBrian Foster */ 5660daf83964SChristoph Hellwig ifp->if_nextents--; 5661ddb19e31SBrian Foster *logflags |= XFS_ILOG_CORE; 5662ddb19e31SBrian Foster if (!cur) { 5663ddb19e31SBrian Foster *logflags |= XFS_ILOG_DEXT; 56644da6b514SChristoph Hellwig goto done; 5665ddb19e31SBrian Foster } 5666ddb19e31SBrian Foster 5667ddb19e31SBrian Foster /* lookup and remove the extent to merge */ 5668e16cf9b0SChristoph Hellwig error = xfs_bmbt_lookup_eq(cur, got, &i); 5669ddb19e31SBrian Foster if (error) 56704db431f5SDave Chinner return error; 5671f9e03706SDarrick J. Wong if (XFS_IS_CORRUPT(mp, i != 1)) 5672f9e03706SDarrick J. Wong return -EFSCORRUPTED; 5673ddb19e31SBrian Foster 5674ddb19e31SBrian Foster error = xfs_btree_delete(cur, &i); 5675ddb19e31SBrian Foster if (error) 56764db431f5SDave Chinner return error; 5677f9e03706SDarrick J. Wong if (XFS_IS_CORRUPT(mp, i != 1)) 5678f9e03706SDarrick J. Wong return -EFSCORRUPTED; 5679ddb19e31SBrian Foster 5680ddb19e31SBrian Foster /* lookup and update size of the previous extent */ 5681e16cf9b0SChristoph Hellwig error = xfs_bmbt_lookup_eq(cur, left, &i); 5682ddb19e31SBrian Foster if (error) 56834db431f5SDave Chinner return error; 5684f9e03706SDarrick J. Wong if (XFS_IS_CORRUPT(mp, i != 1)) 5685f9e03706SDarrick J. Wong return -EFSCORRUPTED; 5686ddb19e31SBrian Foster 5687a67d00a5SChristoph Hellwig error = xfs_bmbt_update(cur, &new); 56884da6b514SChristoph Hellwig if (error) 56894da6b514SChristoph Hellwig return error; 5690ddb19e31SBrian Foster 5691e20e174cSBrian Foster /* change to extent format if required after extent removal */ 5692e20e174cSBrian Foster error = xfs_bmap_btree_to_extents(tp, ip, cur, logflags, whichfork); 5693e20e174cSBrian Foster if (error) 5694e20e174cSBrian Foster return error; 5695e20e174cSBrian Foster 56964da6b514SChristoph Hellwig done: 5697c38ccf59SChristoph Hellwig xfs_iext_remove(ip, icur, 0); 5698daf83964SChristoph Hellwig xfs_iext_prev(ifp, icur); 5699b2b1712aSChristoph Hellwig xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), icur, 5700b2b1712aSChristoph Hellwig &new); 57014da6b514SChristoph Hellwig 57024cc1ee5eSDarrick J. Wong /* update reverse mapping. rmap functions merge the rmaps for us */ 5703bc46ac64SDarrick J. Wong xfs_rmap_unmap_extent(tp, ip, whichfork, got); 57044cc1ee5eSDarrick J. Wong memcpy(&new, got, sizeof(new)); 57054cc1ee5eSDarrick J. Wong new.br_startoff = left->br_startoff + left->br_blockcount; 5706bc46ac64SDarrick J. Wong xfs_rmap_map_extent(tp, ip, whichfork, &new); 5707bc46ac64SDarrick J. Wong return 0; 5708ddb19e31SBrian Foster } 5709ddb19e31SBrian Foster 5710bf806280SChristoph Hellwig static int 5711bf806280SChristoph Hellwig xfs_bmap_shift_update_extent( 57120f37d178SBrian Foster struct xfs_trans *tp, 5713a979bdfeSBrian Foster struct xfs_inode *ip, 5714a979bdfeSBrian Foster int whichfork, 5715b2b1712aSChristoph Hellwig struct xfs_iext_cursor *icur, 57164da6b514SChristoph Hellwig struct xfs_bmbt_irec *got, 5717a979bdfeSBrian Foster struct xfs_btree_cur *cur, 5718a904b1caSNamjae Jeon int *logflags, 5719bf806280SChristoph Hellwig xfs_fileoff_t startoff) 5720a979bdfeSBrian Foster { 5721bf806280SChristoph Hellwig struct xfs_mount *mp = ip->i_mount; 572211f75b3bSChristoph Hellwig struct xfs_bmbt_irec prev = *got; 5723bf806280SChristoph Hellwig int error, i; 5724a979bdfeSBrian Foster 5725a979bdfeSBrian Foster *logflags |= XFS_ILOG_CORE; 5726a979bdfeSBrian Foster 572711f75b3bSChristoph Hellwig got->br_startoff = startoff; 57284da6b514SChristoph Hellwig 57294da6b514SChristoph Hellwig if (cur) { 573011f75b3bSChristoph Hellwig error = xfs_bmbt_lookup_eq(cur, &prev, &i); 5731a979bdfeSBrian Foster if (error) 5732a979bdfeSBrian Foster return error; 5733f9e03706SDarrick J. Wong if (XFS_IS_CORRUPT(mp, i != 1)) 5734f9e03706SDarrick J. Wong return -EFSCORRUPTED; 5735a979bdfeSBrian Foster 573611f75b3bSChristoph Hellwig error = xfs_bmbt_update(cur, got); 57379c194644SDarrick J. Wong if (error) 57389c194644SDarrick J. Wong return error; 57394da6b514SChristoph Hellwig } else { 57404da6b514SChristoph Hellwig *logflags |= XFS_ILOG_DEXT; 57414da6b514SChristoph Hellwig } 57429c194644SDarrick J. Wong 5743b2b1712aSChristoph Hellwig xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), icur, 5744b2b1712aSChristoph Hellwig got); 57454da6b514SChristoph Hellwig 57469c194644SDarrick J. Wong /* update reverse mapping */ 5747bc46ac64SDarrick J. Wong xfs_rmap_unmap_extent(tp, ip, whichfork, &prev); 5748bc46ac64SDarrick J. Wong xfs_rmap_map_extent(tp, ip, whichfork, got); 5749bc46ac64SDarrick J. Wong return 0; 5750a979bdfeSBrian Foster } 5751a979bdfeSBrian Foster 575230f712c9SDave Chinner int 5753ecfea3f0SChristoph Hellwig xfs_bmap_collapse_extents( 575430f712c9SDave Chinner struct xfs_trans *tp, 575530f712c9SDave Chinner struct xfs_inode *ip, 5756a904b1caSNamjae Jeon xfs_fileoff_t *next_fsb, 575730f712c9SDave Chinner xfs_fileoff_t offset_shift_fsb, 5758333f950cSBrian Foster bool *done) 575930f712c9SDave Chinner { 5760ecfea3f0SChristoph Hellwig int whichfork = XFS_DATA_FORK; 5761ecfea3f0SChristoph Hellwig struct xfs_mount *mp = ip->i_mount; 5762732436efSDarrick J. Wong struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 5763ca446d88SBrian Foster struct xfs_btree_cur *cur = NULL; 5764bf806280SChristoph Hellwig struct xfs_bmbt_irec got, prev; 5765b2b1712aSChristoph Hellwig struct xfs_iext_cursor icur; 5766bf806280SChristoph Hellwig xfs_fileoff_t new_startoff; 576730f712c9SDave Chinner int error = 0; 5768ca446d88SBrian Foster int logflags = 0; 576930f712c9SDave Chinner 5770f7e67b20SChristoph Hellwig if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) || 5771a71895c5SDarrick J. Wong XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) { 57722451337dSDave Chinner return -EFSCORRUPTED; 577330f712c9SDave Chinner } 577430f712c9SDave Chinner 577575c8c50fSDave Chinner if (xfs_is_shutdown(mp)) 57762451337dSDave Chinner return -EIO; 577730f712c9SDave Chinner 5778ecfea3f0SChristoph Hellwig ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL)); 577930f712c9SDave Chinner 578030f712c9SDave Chinner error = xfs_iread_extents(tp, ip, whichfork); 578130f712c9SDave Chinner if (error) 578230f712c9SDave Chinner return error; 578330f712c9SDave Chinner 5784ac1e0672SChristoph Hellwig if (ifp->if_format == XFS_DINODE_FMT_BTREE) { 5785ddb19e31SBrian Foster cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 578692219c29SDave Chinner cur->bc_ino.flags = 0; 5787ddb19e31SBrian Foster } 5788ddb19e31SBrian Foster 5789b2b1712aSChristoph Hellwig if (!xfs_iext_lookup_extent(ip, ifp, *next_fsb, &icur, &got)) { 5790ecfea3f0SChristoph Hellwig *done = true; 5791ecfea3f0SChristoph Hellwig goto del_cursor; 5792ecfea3f0SChristoph Hellwig } 5793f9e03706SDarrick J. Wong if (XFS_IS_CORRUPT(mp, isnullstartblock(got.br_startblock))) { 5794f9e03706SDarrick J. Wong error = -EFSCORRUPTED; 5795f9e03706SDarrick J. Wong goto del_cursor; 5796f9e03706SDarrick J. Wong } 5797ecfea3f0SChristoph Hellwig 5798bf806280SChristoph Hellwig new_startoff = got.br_startoff - offset_shift_fsb; 5799b2b1712aSChristoph Hellwig if (xfs_iext_peek_prev_extent(ifp, &icur, &prev)) { 5800bf806280SChristoph Hellwig if (new_startoff < prev.br_startoff + prev.br_blockcount) { 5801bf806280SChristoph Hellwig error = -EINVAL; 5802bf806280SChristoph Hellwig goto del_cursor; 5803bf806280SChristoph Hellwig } 5804bf806280SChristoph Hellwig 5805bf806280SChristoph Hellwig if (xfs_bmse_can_merge(&prev, &got, offset_shift_fsb)) { 58060f37d178SBrian Foster error = xfs_bmse_merge(tp, ip, whichfork, 58070f37d178SBrian Foster offset_shift_fsb, &icur, &got, &prev, 58080f37d178SBrian Foster cur, &logflags); 5809ecfea3f0SChristoph Hellwig if (error) 5810ecfea3f0SChristoph Hellwig goto del_cursor; 5811bf806280SChristoph Hellwig goto done; 5812bf806280SChristoph Hellwig } 5813bf806280SChristoph Hellwig } else { 5814bf806280SChristoph Hellwig if (got.br_startoff < offset_shift_fsb) { 5815bf806280SChristoph Hellwig error = -EINVAL; 5816bf806280SChristoph Hellwig goto del_cursor; 5817bf806280SChristoph Hellwig } 5818bf806280SChristoph Hellwig } 5819bf806280SChristoph Hellwig 58200f37d178SBrian Foster error = xfs_bmap_shift_update_extent(tp, ip, whichfork, &icur, &got, 58210f37d178SBrian Foster cur, &logflags, new_startoff); 5822bf806280SChristoph Hellwig if (error) 5823bf806280SChristoph Hellwig goto del_cursor; 582440591bdbSChristoph Hellwig 582542630361SChristoph Hellwig done: 5826b2b1712aSChristoph Hellwig if (!xfs_iext_next_extent(ifp, &icur, &got)) { 5827ecfea3f0SChristoph Hellwig *done = true; 5828ecfea3f0SChristoph Hellwig goto del_cursor; 5829ecfea3f0SChristoph Hellwig } 5830ecfea3f0SChristoph Hellwig 5831ecfea3f0SChristoph Hellwig *next_fsb = got.br_startoff; 5832ecfea3f0SChristoph Hellwig del_cursor: 5833ecfea3f0SChristoph Hellwig if (cur) 58340b04b6b8SDarrick J. Wong xfs_btree_del_cursor(cur, error); 5835ecfea3f0SChristoph Hellwig if (logflags) 5836ecfea3f0SChristoph Hellwig xfs_trans_log_inode(tp, ip, logflags); 5837ecfea3f0SChristoph Hellwig return error; 5838ecfea3f0SChristoph Hellwig } 5839ecfea3f0SChristoph Hellwig 5840f62cb48eSDarrick J. Wong /* Make sure we won't be right-shifting an extent past the maximum bound. */ 5841f62cb48eSDarrick J. Wong int 5842f62cb48eSDarrick J. Wong xfs_bmap_can_insert_extents( 5843f62cb48eSDarrick J. Wong struct xfs_inode *ip, 5844f62cb48eSDarrick J. Wong xfs_fileoff_t off, 5845f62cb48eSDarrick J. Wong xfs_fileoff_t shift) 5846f62cb48eSDarrick J. Wong { 5847f62cb48eSDarrick J. Wong struct xfs_bmbt_irec got; 5848f62cb48eSDarrick J. Wong int is_empty; 5849f62cb48eSDarrick J. Wong int error = 0; 5850f62cb48eSDarrick J. Wong 5851f62cb48eSDarrick J. Wong ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); 5852f62cb48eSDarrick J. Wong 585375c8c50fSDave Chinner if (xfs_is_shutdown(ip->i_mount)) 5854f62cb48eSDarrick J. Wong return -EIO; 5855f62cb48eSDarrick J. Wong 5856f62cb48eSDarrick J. Wong xfs_ilock(ip, XFS_ILOCK_EXCL); 5857f62cb48eSDarrick J. Wong error = xfs_bmap_last_extent(NULL, ip, XFS_DATA_FORK, &got, &is_empty); 5858f62cb48eSDarrick J. Wong if (!error && !is_empty && got.br_startoff >= off && 5859f62cb48eSDarrick J. Wong ((got.br_startoff + shift) & BMBT_STARTOFF_MASK) < got.br_startoff) 5860f62cb48eSDarrick J. Wong error = -EINVAL; 5861f62cb48eSDarrick J. Wong xfs_iunlock(ip, XFS_ILOCK_EXCL); 5862f62cb48eSDarrick J. Wong 5863f62cb48eSDarrick J. Wong return error; 5864f62cb48eSDarrick J. Wong } 5865f62cb48eSDarrick J. Wong 5866ecfea3f0SChristoph Hellwig int 5867ecfea3f0SChristoph Hellwig xfs_bmap_insert_extents( 5868ecfea3f0SChristoph Hellwig struct xfs_trans *tp, 5869ecfea3f0SChristoph Hellwig struct xfs_inode *ip, 5870ecfea3f0SChristoph Hellwig xfs_fileoff_t *next_fsb, 5871ecfea3f0SChristoph Hellwig xfs_fileoff_t offset_shift_fsb, 5872ecfea3f0SChristoph Hellwig bool *done, 5873333f950cSBrian Foster xfs_fileoff_t stop_fsb) 5874ecfea3f0SChristoph Hellwig { 5875ecfea3f0SChristoph Hellwig int whichfork = XFS_DATA_FORK; 5876ecfea3f0SChristoph Hellwig struct xfs_mount *mp = ip->i_mount; 5877732436efSDarrick J. Wong struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 5878ecfea3f0SChristoph Hellwig struct xfs_btree_cur *cur = NULL; 58795936dc54SChristoph Hellwig struct xfs_bmbt_irec got, next; 5880b2b1712aSChristoph Hellwig struct xfs_iext_cursor icur; 5881bf806280SChristoph Hellwig xfs_fileoff_t new_startoff; 5882ecfea3f0SChristoph Hellwig int error = 0; 5883ecfea3f0SChristoph Hellwig int logflags = 0; 5884ecfea3f0SChristoph Hellwig 5885f7e67b20SChristoph Hellwig if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) || 5886a71895c5SDarrick J. Wong XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) { 5887ecfea3f0SChristoph Hellwig return -EFSCORRUPTED; 5888ecfea3f0SChristoph Hellwig } 5889ecfea3f0SChristoph Hellwig 589075c8c50fSDave Chinner if (xfs_is_shutdown(mp)) 5891ecfea3f0SChristoph Hellwig return -EIO; 5892ecfea3f0SChristoph Hellwig 5893ecfea3f0SChristoph Hellwig ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL)); 5894ecfea3f0SChristoph Hellwig 5895ecfea3f0SChristoph Hellwig error = xfs_iread_extents(tp, ip, whichfork); 5896ecfea3f0SChristoph Hellwig if (error) 5897ecfea3f0SChristoph Hellwig return error; 5898ecfea3f0SChristoph Hellwig 5899ac1e0672SChristoph Hellwig if (ifp->if_format == XFS_DINODE_FMT_BTREE) { 5900ecfea3f0SChristoph Hellwig cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 590192219c29SDave Chinner cur->bc_ino.flags = 0; 5902ecfea3f0SChristoph Hellwig } 5903ecfea3f0SChristoph Hellwig 5904a904b1caSNamjae Jeon if (*next_fsb == NULLFSBLOCK) { 5905b2b1712aSChristoph Hellwig xfs_iext_last(ifp, &icur); 5906b2b1712aSChristoph Hellwig if (!xfs_iext_get_extent(ifp, &icur, &got) || 59075936dc54SChristoph Hellwig stop_fsb > got.br_startoff) { 5908ecfea3f0SChristoph Hellwig *done = true; 5909a904b1caSNamjae Jeon goto del_cursor; 5910a904b1caSNamjae Jeon } 591105b7c8abSChristoph Hellwig } else { 5912b2b1712aSChristoph Hellwig if (!xfs_iext_lookup_extent(ip, ifp, *next_fsb, &icur, &got)) { 5913ecfea3f0SChristoph Hellwig *done = true; 5914a904b1caSNamjae Jeon goto del_cursor; 5915a904b1caSNamjae Jeon } 591605b7c8abSChristoph Hellwig } 5917f9e03706SDarrick J. Wong if (XFS_IS_CORRUPT(mp, isnullstartblock(got.br_startblock))) { 5918f9e03706SDarrick J. Wong error = -EFSCORRUPTED; 5919f9e03706SDarrick J. Wong goto del_cursor; 5920f9e03706SDarrick J. Wong } 5921a904b1caSNamjae Jeon 5922d0c22041SBrian Foster if (XFS_IS_CORRUPT(mp, stop_fsb > got.br_startoff)) { 5923c2414ad6SDarrick J. Wong error = -EFSCORRUPTED; 5924a904b1caSNamjae Jeon goto del_cursor; 5925a904b1caSNamjae Jeon } 5926a904b1caSNamjae Jeon 5927bf806280SChristoph Hellwig new_startoff = got.br_startoff + offset_shift_fsb; 5928b2b1712aSChristoph Hellwig if (xfs_iext_peek_next_extent(ifp, &icur, &next)) { 5929bf806280SChristoph Hellwig if (new_startoff + got.br_blockcount > next.br_startoff) { 5930bf806280SChristoph Hellwig error = -EINVAL; 5931bf806280SChristoph Hellwig goto del_cursor; 5932bf806280SChristoph Hellwig } 5933bf806280SChristoph Hellwig 5934bf806280SChristoph Hellwig /* 5935bf806280SChristoph Hellwig * Unlike a left shift (which involves a hole punch), a right 5936bf806280SChristoph Hellwig * shift does not modify extent neighbors in any way. We should 5937bf806280SChristoph Hellwig * never find mergeable extents in this scenario. Check anyways 5938bf806280SChristoph Hellwig * and warn if we encounter two extents that could be one. 5939bf806280SChristoph Hellwig */ 5940bf806280SChristoph Hellwig if (xfs_bmse_can_merge(&got, &next, offset_shift_fsb)) 5941bf806280SChristoph Hellwig WARN_ON_ONCE(1); 5942bf806280SChristoph Hellwig } 5943bf806280SChristoph Hellwig 59440f37d178SBrian Foster error = xfs_bmap_shift_update_extent(tp, ip, whichfork, &icur, &got, 59450f37d178SBrian Foster cur, &logflags, new_startoff); 5946ddb19e31SBrian Foster if (error) 5947ddb19e31SBrian Foster goto del_cursor; 59485936dc54SChristoph Hellwig 5949b2b1712aSChristoph Hellwig if (!xfs_iext_prev_extent(ifp, &icur, &got) || 59505936dc54SChristoph Hellwig stop_fsb >= got.br_startoff + got.br_blockcount) { 5951ecfea3f0SChristoph Hellwig *done = true; 59526b18af0dSChristoph Hellwig goto del_cursor; 5953a904b1caSNamjae Jeon } 595430f712c9SDave Chinner 59552c845f5aSBrian Foster *next_fsb = got.br_startoff; 595630f712c9SDave Chinner del_cursor: 595730f712c9SDave Chinner if (cur) 59580b04b6b8SDarrick J. Wong xfs_btree_del_cursor(cur, error); 5959ca446d88SBrian Foster if (logflags) 596030f712c9SDave Chinner xfs_trans_log_inode(tp, ip, logflags); 596130f712c9SDave Chinner return error; 596230f712c9SDave Chinner } 5963a904b1caSNamjae Jeon 5964a904b1caSNamjae Jeon /* 5965b2b1712aSChristoph Hellwig * Splits an extent into two extents at split_fsb block such that it is the 5966b2b1712aSChristoph Hellwig * first block of the current_ext. @ext is a target extent to be split. 5967b2b1712aSChristoph Hellwig * @split_fsb is a block where the extents is split. If split_fsb lies in a 5968b2b1712aSChristoph Hellwig * hole or the first block of extents, just return 0. 5969a904b1caSNamjae Jeon */ 5970b73df17eSBrian Foster int 5971b73df17eSBrian Foster xfs_bmap_split_extent( 5972a904b1caSNamjae Jeon struct xfs_trans *tp, 5973a904b1caSNamjae Jeon struct xfs_inode *ip, 59744b77a088SBrian Foster xfs_fileoff_t split_fsb) 5975a904b1caSNamjae Jeon { 5976a904b1caSNamjae Jeon int whichfork = XFS_DATA_FORK; 5977732436efSDarrick J. Wong struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 5978a904b1caSNamjae Jeon struct xfs_btree_cur *cur = NULL; 5979a904b1caSNamjae Jeon struct xfs_bmbt_irec got; 5980a904b1caSNamjae Jeon struct xfs_bmbt_irec new; /* split extent */ 5981a904b1caSNamjae Jeon struct xfs_mount *mp = ip->i_mount; 5982a904b1caSNamjae Jeon xfs_fsblock_t gotblkcnt; /* new block count for got */ 5983b2b1712aSChristoph Hellwig struct xfs_iext_cursor icur; 5984a904b1caSNamjae Jeon int error = 0; 5985a904b1caSNamjae Jeon int logflags = 0; 5986a904b1caSNamjae Jeon int i = 0; 5987a904b1caSNamjae Jeon 5988f7e67b20SChristoph Hellwig if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) || 5989a71895c5SDarrick J. Wong XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) { 5990a904b1caSNamjae Jeon return -EFSCORRUPTED; 5991a904b1caSNamjae Jeon } 5992a904b1caSNamjae Jeon 599375c8c50fSDave Chinner if (xfs_is_shutdown(mp)) 5994a904b1caSNamjae Jeon return -EIO; 5995a904b1caSNamjae Jeon 5996a904b1caSNamjae Jeon /* Read in all the extents */ 5997a904b1caSNamjae Jeon error = xfs_iread_extents(tp, ip, whichfork); 5998a904b1caSNamjae Jeon if (error) 5999a904b1caSNamjae Jeon return error; 6000a904b1caSNamjae Jeon 6001a904b1caSNamjae Jeon /* 60024c35445bSChristoph Hellwig * If there are not extents, or split_fsb lies in a hole we are done. 6003a904b1caSNamjae Jeon */ 6004b2b1712aSChristoph Hellwig if (!xfs_iext_lookup_extent(ip, ifp, split_fsb, &icur, &got) || 60054c35445bSChristoph Hellwig got.br_startoff >= split_fsb) 6006a904b1caSNamjae Jeon return 0; 6007a904b1caSNamjae Jeon 6008a904b1caSNamjae Jeon gotblkcnt = split_fsb - got.br_startoff; 6009a904b1caSNamjae Jeon new.br_startoff = split_fsb; 6010a904b1caSNamjae Jeon new.br_startblock = got.br_startblock + gotblkcnt; 6011a904b1caSNamjae Jeon new.br_blockcount = got.br_blockcount - gotblkcnt; 6012a904b1caSNamjae Jeon new.br_state = got.br_state; 6013a904b1caSNamjae Jeon 6014ac1e0672SChristoph Hellwig if (ifp->if_format == XFS_DINODE_FMT_BTREE) { 6015a904b1caSNamjae Jeon cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); 601692219c29SDave Chinner cur->bc_ino.flags = 0; 6017e16cf9b0SChristoph Hellwig error = xfs_bmbt_lookup_eq(cur, &got, &i); 6018a904b1caSNamjae Jeon if (error) 6019a904b1caSNamjae Jeon goto del_cursor; 6020f9e03706SDarrick J. Wong if (XFS_IS_CORRUPT(mp, i != 1)) { 6021f9e03706SDarrick J. Wong error = -EFSCORRUPTED; 6022f9e03706SDarrick J. Wong goto del_cursor; 6023f9e03706SDarrick J. Wong } 6024a904b1caSNamjae Jeon } 6025a904b1caSNamjae Jeon 6026a904b1caSNamjae Jeon got.br_blockcount = gotblkcnt; 6027b2b1712aSChristoph Hellwig xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), &icur, 6028b2b1712aSChristoph Hellwig &got); 6029a904b1caSNamjae Jeon 6030a904b1caSNamjae Jeon logflags = XFS_ILOG_CORE; 6031a904b1caSNamjae Jeon if (cur) { 6032a67d00a5SChristoph Hellwig error = xfs_bmbt_update(cur, &got); 6033a904b1caSNamjae Jeon if (error) 6034a904b1caSNamjae Jeon goto del_cursor; 6035a904b1caSNamjae Jeon } else 6036a904b1caSNamjae Jeon logflags |= XFS_ILOG_DEXT; 6037a904b1caSNamjae Jeon 6038a904b1caSNamjae Jeon /* Add new extent */ 6039b2b1712aSChristoph Hellwig xfs_iext_next(ifp, &icur); 60400254c2f2SChristoph Hellwig xfs_iext_insert(ip, &icur, &new, 0); 6041daf83964SChristoph Hellwig ifp->if_nextents++; 6042a904b1caSNamjae Jeon 6043a904b1caSNamjae Jeon if (cur) { 6044e16cf9b0SChristoph Hellwig error = xfs_bmbt_lookup_eq(cur, &new, &i); 6045a904b1caSNamjae Jeon if (error) 6046a904b1caSNamjae Jeon goto del_cursor; 6047f9e03706SDarrick J. Wong if (XFS_IS_CORRUPT(mp, i != 0)) { 6048f9e03706SDarrick J. Wong error = -EFSCORRUPTED; 6049f9e03706SDarrick J. Wong goto del_cursor; 6050f9e03706SDarrick J. Wong } 6051a904b1caSNamjae Jeon error = xfs_btree_insert(cur, &i); 6052a904b1caSNamjae Jeon if (error) 6053a904b1caSNamjae Jeon goto del_cursor; 6054f9e03706SDarrick J. Wong if (XFS_IS_CORRUPT(mp, i != 1)) { 6055f9e03706SDarrick J. Wong error = -EFSCORRUPTED; 6056f9e03706SDarrick J. Wong goto del_cursor; 6057f9e03706SDarrick J. Wong } 6058a904b1caSNamjae Jeon } 6059a904b1caSNamjae Jeon 6060a904b1caSNamjae Jeon /* 6061a904b1caSNamjae Jeon * Convert to a btree if necessary. 6062a904b1caSNamjae Jeon */ 6063a904b1caSNamjae Jeon if (xfs_bmap_needs_btree(ip, whichfork)) { 6064a904b1caSNamjae Jeon int tmp_logflags; /* partial log flag return val */ 6065a904b1caSNamjae Jeon 6066a904b1caSNamjae Jeon ASSERT(cur == NULL); 6067280253d2SBrian Foster error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0, 6068280253d2SBrian Foster &tmp_logflags, whichfork); 6069a904b1caSNamjae Jeon logflags |= tmp_logflags; 6070a904b1caSNamjae Jeon } 6071a904b1caSNamjae Jeon 6072a904b1caSNamjae Jeon del_cursor: 6073a904b1caSNamjae Jeon if (cur) { 607492219c29SDave Chinner cur->bc_ino.allocated = 0; 60750b04b6b8SDarrick J. Wong xfs_btree_del_cursor(cur, error); 6076a904b1caSNamjae Jeon } 6077a904b1caSNamjae Jeon 6078a904b1caSNamjae Jeon if (logflags) 6079a904b1caSNamjae Jeon xfs_trans_log_inode(tp, ip, logflags); 6080a904b1caSNamjae Jeon return error; 6081a904b1caSNamjae Jeon } 6082a904b1caSNamjae Jeon 60839f3afb57SDarrick J. Wong /* Deferred mapping is only for real extents in the data fork. */ 60849f3afb57SDarrick J. Wong static bool 60859f3afb57SDarrick J. Wong xfs_bmap_is_update_needed( 60869f3afb57SDarrick J. Wong struct xfs_bmbt_irec *bmap) 60879f3afb57SDarrick J. Wong { 60889f3afb57SDarrick J. Wong return bmap->br_startblock != HOLESTARTBLOCK && 60899f3afb57SDarrick J. Wong bmap->br_startblock != DELAYSTARTBLOCK; 60909f3afb57SDarrick J. Wong } 60919f3afb57SDarrick J. Wong 60929f3afb57SDarrick J. Wong /* Record a bmap intent. */ 60939f3afb57SDarrick J. Wong static int 60949f3afb57SDarrick J. Wong __xfs_bmap_add( 60950f37d178SBrian Foster struct xfs_trans *tp, 60969f3afb57SDarrick J. Wong enum xfs_bmap_intent_type type, 60979f3afb57SDarrick J. Wong struct xfs_inode *ip, 60989f3afb57SDarrick J. Wong int whichfork, 60999f3afb57SDarrick J. Wong struct xfs_bmbt_irec *bmap) 61009f3afb57SDarrick J. Wong { 61019f3afb57SDarrick J. Wong struct xfs_bmap_intent *bi; 61029f3afb57SDarrick J. Wong 61030f37d178SBrian Foster trace_xfs_bmap_defer(tp->t_mountp, 61040f37d178SBrian Foster XFS_FSB_TO_AGNO(tp->t_mountp, bmap->br_startblock), 61059f3afb57SDarrick J. Wong type, 61060f37d178SBrian Foster XFS_FSB_TO_AGBNO(tp->t_mountp, bmap->br_startblock), 61079f3afb57SDarrick J. Wong ip->i_ino, whichfork, 61089f3afb57SDarrick J. Wong bmap->br_startoff, 61099f3afb57SDarrick J. Wong bmap->br_blockcount, 61109f3afb57SDarrick J. Wong bmap->br_state); 61119f3afb57SDarrick J. Wong 6112f3c799c2SDarrick J. Wong bi = kmem_cache_alloc(xfs_bmap_intent_cache, GFP_NOFS | __GFP_NOFAIL); 61139f3afb57SDarrick J. Wong INIT_LIST_HEAD(&bi->bi_list); 61149f3afb57SDarrick J. Wong bi->bi_type = type; 61159f3afb57SDarrick J. Wong bi->bi_owner = ip; 61169f3afb57SDarrick J. Wong bi->bi_whichfork = whichfork; 61179f3afb57SDarrick J. Wong bi->bi_bmap = *bmap; 61189f3afb57SDarrick J. Wong 61190f37d178SBrian Foster xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_BMAP, &bi->bi_list); 61209f3afb57SDarrick J. Wong return 0; 61219f3afb57SDarrick J. Wong } 61229f3afb57SDarrick J. Wong 61239f3afb57SDarrick J. Wong /* Map an extent into a file. */ 61243e08f42aSDarrick J. Wong void 61259f3afb57SDarrick J. Wong xfs_bmap_map_extent( 61260f37d178SBrian Foster struct xfs_trans *tp, 61279f3afb57SDarrick J. Wong struct xfs_inode *ip, 61289f3afb57SDarrick J. Wong struct xfs_bmbt_irec *PREV) 61299f3afb57SDarrick J. Wong { 61309f3afb57SDarrick J. Wong if (!xfs_bmap_is_update_needed(PREV)) 61313e08f42aSDarrick J. Wong return; 61329f3afb57SDarrick J. Wong 61333e08f42aSDarrick J. Wong __xfs_bmap_add(tp, XFS_BMAP_MAP, ip, XFS_DATA_FORK, PREV); 61349f3afb57SDarrick J. Wong } 61359f3afb57SDarrick J. Wong 61369f3afb57SDarrick J. Wong /* Unmap an extent out of a file. */ 61373e08f42aSDarrick J. Wong void 61389f3afb57SDarrick J. Wong xfs_bmap_unmap_extent( 61390f37d178SBrian Foster struct xfs_trans *tp, 61409f3afb57SDarrick J. Wong struct xfs_inode *ip, 61419f3afb57SDarrick J. Wong struct xfs_bmbt_irec *PREV) 61429f3afb57SDarrick J. Wong { 61439f3afb57SDarrick J. Wong if (!xfs_bmap_is_update_needed(PREV)) 61443e08f42aSDarrick J. Wong return; 61459f3afb57SDarrick J. Wong 61463e08f42aSDarrick J. Wong __xfs_bmap_add(tp, XFS_BMAP_UNMAP, ip, XFS_DATA_FORK, PREV); 61479f3afb57SDarrick J. Wong } 61489f3afb57SDarrick J. Wong 61499f3afb57SDarrick J. Wong /* 61509f3afb57SDarrick J. Wong * Process one of the deferred bmap operations. We pass back the 61519f3afb57SDarrick J. Wong * btree cursor to maintain our lock on the bmapbt between calls. 61529f3afb57SDarrick J. Wong */ 61539f3afb57SDarrick J. Wong int 61549f3afb57SDarrick J. Wong xfs_bmap_finish_one( 61559f3afb57SDarrick J. Wong struct xfs_trans *tp, 6156ddccb81bSDarrick J. Wong struct xfs_bmap_intent *bi) 61579f3afb57SDarrick J. Wong { 6158ddccb81bSDarrick J. Wong struct xfs_bmbt_irec *bmap = &bi->bi_bmap; 6159e1a4e37cSDarrick J. Wong int error = 0; 61609f3afb57SDarrick J. Wong 6161692b6cddSDave Chinner ASSERT(tp->t_highest_agno == NULLAGNUMBER); 61624c1a67bdSDarrick J. Wong 61639f3afb57SDarrick J. Wong trace_xfs_bmap_deferred(tp->t_mountp, 6164ddccb81bSDarrick J. Wong XFS_FSB_TO_AGNO(tp->t_mountp, bmap->br_startblock), 6165ddccb81bSDarrick J. Wong bi->bi_type, 6166ddccb81bSDarrick J. Wong XFS_FSB_TO_AGBNO(tp->t_mountp, bmap->br_startblock), 6167ddccb81bSDarrick J. Wong bi->bi_owner->i_ino, bi->bi_whichfork, 6168ddccb81bSDarrick J. Wong bmap->br_startoff, bmap->br_blockcount, 6169ddccb81bSDarrick J. Wong bmap->br_state); 61709f3afb57SDarrick J. Wong 6171ddccb81bSDarrick J. Wong if (WARN_ON_ONCE(bi->bi_whichfork != XFS_DATA_FORK)) 61729f3afb57SDarrick J. Wong return -EFSCORRUPTED; 61739f3afb57SDarrick J. Wong 61749f3afb57SDarrick J. Wong if (XFS_TEST_ERROR(false, tp->t_mountp, 61759e24cfd0SDarrick J. Wong XFS_ERRTAG_BMAP_FINISH_ONE)) 61769f3afb57SDarrick J. Wong return -EIO; 61779f3afb57SDarrick J. Wong 6178ddccb81bSDarrick J. Wong switch (bi->bi_type) { 61799f3afb57SDarrick J. Wong case XFS_BMAP_MAP: 6180ddccb81bSDarrick J. Wong error = xfs_bmapi_remap(tp, bi->bi_owner, bmap->br_startoff, 6181ddccb81bSDarrick J. Wong bmap->br_blockcount, bmap->br_startblock, 0); 6182ddccb81bSDarrick J. Wong bmap->br_blockcount = 0; 61839f3afb57SDarrick J. Wong break; 61849f3afb57SDarrick J. Wong case XFS_BMAP_UNMAP: 6185ddccb81bSDarrick J. Wong error = __xfs_bunmapi(tp, bi->bi_owner, bmap->br_startoff, 6186ddccb81bSDarrick J. Wong &bmap->br_blockcount, XFS_BMAPI_REMAP, 1); 61879f3afb57SDarrick J. Wong break; 61889f3afb57SDarrick J. Wong default: 61899f3afb57SDarrick J. Wong ASSERT(0); 61909f3afb57SDarrick J. Wong error = -EFSCORRUPTED; 61919f3afb57SDarrick J. Wong } 61929f3afb57SDarrick J. Wong 61939f3afb57SDarrick J. Wong return error; 61949f3afb57SDarrick J. Wong } 619530b0984dSDarrick J. Wong 619630b0984dSDarrick J. Wong /* Check that an inode's extent does not have invalid flags or bad ranges. */ 619730b0984dSDarrick J. Wong xfs_failaddr_t 619830b0984dSDarrick J. Wong xfs_bmap_validate_extent( 619930b0984dSDarrick J. Wong struct xfs_inode *ip, 620030b0984dSDarrick J. Wong int whichfork, 620130b0984dSDarrick J. Wong struct xfs_bmbt_irec *irec) 620230b0984dSDarrick J. Wong { 620330b0984dSDarrick J. Wong struct xfs_mount *mp = ip->i_mount; 620430b0984dSDarrick J. Wong 620533005fd0SDarrick J. Wong if (!xfs_verify_fileext(mp, irec->br_startoff, irec->br_blockcount)) 6206acf104c2SDarrick J. Wong return __this_address; 6207acf104c2SDarrick J. Wong 620818695ad4SDarrick J. Wong if (XFS_IS_REALTIME_INODE(ip) && whichfork == XFS_DATA_FORK) { 620918695ad4SDarrick J. Wong if (!xfs_verify_rtext(mp, irec->br_startblock, 621018695ad4SDarrick J. Wong irec->br_blockcount)) 621130b0984dSDarrick J. Wong return __this_address; 621230b0984dSDarrick J. Wong } else { 621367457eb0SDarrick J. Wong if (!xfs_verify_fsbext(mp, irec->br_startblock, 621467457eb0SDarrick J. Wong irec->br_blockcount)) 621530b0984dSDarrick J. Wong return __this_address; 621630b0984dSDarrick J. Wong } 6217daa79baeSChristoph Hellwig if (irec->br_state != XFS_EXT_NORM && whichfork != XFS_DATA_FORK) 621830b0984dSDarrick J. Wong return __this_address; 621930b0984dSDarrick J. Wong return NULL; 622030b0984dSDarrick J. Wong } 6221f3c799c2SDarrick J. Wong 6222f3c799c2SDarrick J. Wong int __init 6223f3c799c2SDarrick J. Wong xfs_bmap_intent_init_cache(void) 6224f3c799c2SDarrick J. Wong { 6225f3c799c2SDarrick J. Wong xfs_bmap_intent_cache = kmem_cache_create("xfs_bmap_intent", 6226f3c799c2SDarrick J. Wong sizeof(struct xfs_bmap_intent), 6227f3c799c2SDarrick J. Wong 0, 0, NULL); 6228f3c799c2SDarrick J. Wong 6229f3c799c2SDarrick J. Wong return xfs_bmap_intent_cache != NULL ? 0 : -ENOMEM; 6230f3c799c2SDarrick J. Wong } 6231f3c799c2SDarrick J. Wong 6232f3c799c2SDarrick J. Wong void 6233f3c799c2SDarrick J. Wong xfs_bmap_intent_destroy_cache(void) 6234f3c799c2SDarrick J. Wong { 6235f3c799c2SDarrick J. Wong kmem_cache_destroy(xfs_bmap_intent_cache); 6236f3c799c2SDarrick J. Wong xfs_bmap_intent_cache = NULL; 6237f3c799c2SDarrick J. Wong } 6238