xref: /openbmc/linux/fs/xfs/libxfs/xfs_bmap.c (revision 264e3509)
10b61f8a4SDave Chinner // SPDX-License-Identifier: GPL-2.0
230f712c9SDave Chinner /*
330f712c9SDave Chinner  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
430f712c9SDave Chinner  * All Rights Reserved.
530f712c9SDave Chinner  */
630f712c9SDave Chinner #include "xfs.h"
730f712c9SDave Chinner #include "xfs_fs.h"
830f712c9SDave Chinner #include "xfs_shared.h"
930f712c9SDave Chinner #include "xfs_format.h"
1030f712c9SDave Chinner #include "xfs_log_format.h"
1130f712c9SDave Chinner #include "xfs_trans_resv.h"
1230f712c9SDave Chinner #include "xfs_bit.h"
1330f712c9SDave Chinner #include "xfs_sb.h"
1430f712c9SDave Chinner #include "xfs_mount.h"
153ab78df2SDarrick J. Wong #include "xfs_defer.h"
1630f712c9SDave Chinner #include "xfs_dir2.h"
1730f712c9SDave Chinner #include "xfs_inode.h"
1830f712c9SDave Chinner #include "xfs_btree.h"
1930f712c9SDave Chinner #include "xfs_trans.h"
2030f712c9SDave Chinner #include "xfs_alloc.h"
2130f712c9SDave Chinner #include "xfs_bmap.h"
2230f712c9SDave Chinner #include "xfs_bmap_util.h"
2330f712c9SDave Chinner #include "xfs_bmap_btree.h"
241a6d63f2SDarrick J. Wong #include "xfs_rtbitmap.h"
25e9e899a2SDarrick J. Wong #include "xfs_errortag.h"
2630f712c9SDave Chinner #include "xfs_error.h"
2730f712c9SDave Chinner #include "xfs_quota.h"
2830f712c9SDave Chinner #include "xfs_trans_space.h"
2930f712c9SDave Chinner #include "xfs_buf_item.h"
3030f712c9SDave Chinner #include "xfs_trace.h"
3130f712c9SDave Chinner #include "xfs_attr_leaf.h"
3230f712c9SDave Chinner #include "xfs_filestream.h"
33340785ccSDarrick J. Wong #include "xfs_rmap.h"
349bbafc71SDave Chinner #include "xfs_ag.h"
353fd129b6SDarrick J. Wong #include "xfs_ag_resv.h"
3662aab20fSDarrick J. Wong #include "xfs_refcount.h"
37974ae922SBrian Foster #include "xfs_icache.h"
384e087a3bSChristoph Hellwig #include "xfs_iomap.h"
3930f712c9SDave Chinner 
40f3c799c2SDarrick J. Wong struct kmem_cache		*xfs_bmap_intent_cache;
4130f712c9SDave Chinner 
4230f712c9SDave Chinner /*
4330f712c9SDave Chinner  * Miscellaneous helper functions
4430f712c9SDave Chinner  */
4530f712c9SDave Chinner 
4630f712c9SDave Chinner /*
4730f712c9SDave Chinner  * Compute and fill in the value of the maximum depth of a bmap btree
4830f712c9SDave Chinner  * in this filesystem.  Done once, during mount.
4930f712c9SDave Chinner  */
5030f712c9SDave Chinner void
xfs_bmap_compute_maxlevels(xfs_mount_t * mp,int whichfork)5130f712c9SDave Chinner xfs_bmap_compute_maxlevels(
5230f712c9SDave Chinner 	xfs_mount_t	*mp,		/* file system mount structure */
5330f712c9SDave Chinner 	int		whichfork)	/* data or attr fork */
5430f712c9SDave Chinner {
550c35e7baSChandan Babu R 	uint64_t	maxblocks;	/* max blocks at this level */
56bb1d5049SChandan Babu R 	xfs_extnum_t	maxleafents;	/* max leaf entries possible */
5730f712c9SDave Chinner 	int		level;		/* btree level */
5830f712c9SDave Chinner 	int		maxrootrecs;	/* max records in root block */
5930f712c9SDave Chinner 	int		minleafrecs;	/* min records in leaf block */
6030f712c9SDave Chinner 	int		minnoderecs;	/* min records in node block */
6130f712c9SDave Chinner 	int		sz;		/* root block size */
6230f712c9SDave Chinner 
6330f712c9SDave Chinner 	/*
64df9ad5ccSChandan Babu R 	 * The maximum number of extents in a fork, hence the maximum number of
65df9ad5ccSChandan Babu R 	 * leaf entries, is controlled by the size of the on-disk extent count.
6630f712c9SDave Chinner 	 *
677821ea30SChristoph Hellwig 	 * Note that we can no longer assume that if we are in ATTR1 that the
687821ea30SChristoph Hellwig 	 * fork offset of all the inodes will be
697821ea30SChristoph Hellwig 	 * (xfs_default_attroffset(ip) >> 3) because we could have mounted with
707821ea30SChristoph Hellwig 	 * ATTR2 and then mounted back with ATTR1, keeping the i_forkoff's fixed
717821ea30SChristoph Hellwig 	 * but probably at various positions. Therefore, for both ATTR1 and
727821ea30SChristoph Hellwig 	 * ATTR2 we have to assume the worst case scenario of a minimum size
737821ea30SChristoph Hellwig 	 * available.
7430f712c9SDave Chinner 	 */
75df9ad5ccSChandan Babu R 	maxleafents = xfs_iext_max_nextents(xfs_has_large_extent_counts(mp),
76df9ad5ccSChandan Babu R 				whichfork);
779feb8f19SChandan Babu R 	if (whichfork == XFS_DATA_FORK)
7830f712c9SDave Chinner 		sz = XFS_BMDR_SPACE_CALC(MINDBTPTRS);
799feb8f19SChandan Babu R 	else
8030f712c9SDave Chinner 		sz = XFS_BMDR_SPACE_CALC(MINABTPTRS);
819feb8f19SChandan Babu R 
8230f712c9SDave Chinner 	maxrootrecs = xfs_bmdr_maxrecs(sz, 0);
8330f712c9SDave Chinner 	minleafrecs = mp->m_bmap_dmnr[0];
8430f712c9SDave Chinner 	minnoderecs = mp->m_bmap_dmnr[1];
85755c38ffSChandan Babu R 	maxblocks = howmany_64(maxleafents, minleafrecs);
8630f712c9SDave Chinner 	for (level = 1; maxblocks > 1; level++) {
8730f712c9SDave Chinner 		if (maxblocks <= maxrootrecs)
8830f712c9SDave Chinner 			maxblocks = 1;
8930f712c9SDave Chinner 		else
900c35e7baSChandan Babu R 			maxblocks = howmany_64(maxblocks, minnoderecs);
9130f712c9SDave Chinner 	}
9230f712c9SDave Chinner 	mp->m_bm_maxlevels[whichfork] = level;
930ed5f735SDarrick J. Wong 	ASSERT(mp->m_bm_maxlevels[whichfork] <= xfs_bmbt_maxlevels_ondisk());
9430f712c9SDave Chinner }
9530f712c9SDave Chinner 
96b2941046SDave Chinner unsigned int
xfs_bmap_compute_attr_offset(struct xfs_mount * mp)97b2941046SDave Chinner xfs_bmap_compute_attr_offset(
98b2941046SDave Chinner 	struct xfs_mount	*mp)
99b2941046SDave Chinner {
100b2941046SDave Chinner 	if (mp->m_sb.sb_inodesize == 256)
101b2941046SDave Chinner 		return XFS_LITINO(mp) - XFS_BMDR_SPACE_CALC(MINABTPTRS);
102b2941046SDave Chinner 	return XFS_BMDR_SPACE_CALC(6 * MINABTPTRS);
103b2941046SDave Chinner }
104b2941046SDave Chinner 
10530f712c9SDave Chinner STATIC int				/* error */
xfs_bmbt_lookup_eq(struct xfs_btree_cur * cur,struct xfs_bmbt_irec * irec,int * stat)10630f712c9SDave Chinner xfs_bmbt_lookup_eq(
10730f712c9SDave Chinner 	struct xfs_btree_cur	*cur,
108e16cf9b0SChristoph Hellwig 	struct xfs_bmbt_irec	*irec,
10930f712c9SDave Chinner 	int			*stat)	/* success/failure */
11030f712c9SDave Chinner {
111e16cf9b0SChristoph Hellwig 	cur->bc_rec.b = *irec;
11230f712c9SDave Chinner 	return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
11330f712c9SDave Chinner }
11430f712c9SDave Chinner 
11530f712c9SDave Chinner STATIC int				/* error */
xfs_bmbt_lookup_first(struct xfs_btree_cur * cur,int * stat)116b5cfbc22SChristoph Hellwig xfs_bmbt_lookup_first(
11730f712c9SDave Chinner 	struct xfs_btree_cur	*cur,
11830f712c9SDave Chinner 	int			*stat)	/* success/failure */
11930f712c9SDave Chinner {
120b5cfbc22SChristoph Hellwig 	cur->bc_rec.b.br_startoff = 0;
121b5cfbc22SChristoph Hellwig 	cur->bc_rec.b.br_startblock = 0;
122b5cfbc22SChristoph Hellwig 	cur->bc_rec.b.br_blockcount = 0;
12330f712c9SDave Chinner 	return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
12430f712c9SDave Chinner }
12530f712c9SDave Chinner 
12630f712c9SDave Chinner /*
12730f712c9SDave Chinner  * Check if the inode needs to be converted to btree format.
12830f712c9SDave Chinner  */
xfs_bmap_needs_btree(struct xfs_inode * ip,int whichfork)12930f712c9SDave Chinner static inline bool xfs_bmap_needs_btree(struct xfs_inode *ip, int whichfork)
13030f712c9SDave Chinner {
131732436efSDarrick J. Wong 	struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
132daf83964SChristoph Hellwig 
13360b4984fSDarrick J. Wong 	return whichfork != XFS_COW_FORK &&
134f7e67b20SChristoph Hellwig 		ifp->if_format == XFS_DINODE_FMT_EXTENTS &&
135daf83964SChristoph Hellwig 		ifp->if_nextents > XFS_IFORK_MAXEXT(ip, whichfork);
13630f712c9SDave Chinner }
13730f712c9SDave Chinner 
13830f712c9SDave Chinner /*
13930f712c9SDave Chinner  * Check if the inode should be converted to extent format.
14030f712c9SDave Chinner  */
xfs_bmap_wants_extents(struct xfs_inode * ip,int whichfork)14130f712c9SDave Chinner static inline bool xfs_bmap_wants_extents(struct xfs_inode *ip, int whichfork)
14230f712c9SDave Chinner {
143732436efSDarrick J. Wong 	struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
144daf83964SChristoph Hellwig 
14560b4984fSDarrick J. Wong 	return whichfork != XFS_COW_FORK &&
146f7e67b20SChristoph Hellwig 		ifp->if_format == XFS_DINODE_FMT_BTREE &&
147daf83964SChristoph Hellwig 		ifp->if_nextents <= XFS_IFORK_MAXEXT(ip, whichfork);
14830f712c9SDave Chinner }
14930f712c9SDave Chinner 
15030f712c9SDave Chinner /*
151a67d00a5SChristoph Hellwig  * Update the record referred to by cur to the value given by irec
15230f712c9SDave Chinner  * This either works (return 0) or gets an EFSCORRUPTED error.
15330f712c9SDave Chinner  */
15430f712c9SDave Chinner STATIC int
xfs_bmbt_update(struct xfs_btree_cur * cur,struct xfs_bmbt_irec * irec)15530f712c9SDave Chinner xfs_bmbt_update(
15630f712c9SDave Chinner 	struct xfs_btree_cur	*cur,
157a67d00a5SChristoph Hellwig 	struct xfs_bmbt_irec	*irec)
15830f712c9SDave Chinner {
15930f712c9SDave Chinner 	union xfs_btree_rec	rec;
16030f712c9SDave Chinner 
161a67d00a5SChristoph Hellwig 	xfs_bmbt_disk_set_all(&rec.bmbt, irec);
16230f712c9SDave Chinner 	return xfs_btree_update(cur, &rec);
16330f712c9SDave Chinner }
16430f712c9SDave Chinner 
16530f712c9SDave Chinner /*
16630f712c9SDave Chinner  * Compute the worst-case number of indirect blocks that will be used
16730f712c9SDave Chinner  * for ip's delayed extent of length "len".
16830f712c9SDave Chinner  */
16930f712c9SDave Chinner STATIC xfs_filblks_t
xfs_bmap_worst_indlen(xfs_inode_t * ip,xfs_filblks_t len)17030f712c9SDave Chinner xfs_bmap_worst_indlen(
17130f712c9SDave Chinner 	xfs_inode_t	*ip,		/* incore inode pointer */
17230f712c9SDave Chinner 	xfs_filblks_t	len)		/* delayed extent length */
17330f712c9SDave Chinner {
17430f712c9SDave Chinner 	int		level;		/* btree level number */
17530f712c9SDave Chinner 	int		maxrecs;	/* maximum record count at this level */
17630f712c9SDave Chinner 	xfs_mount_t	*mp;		/* mount structure */
17730f712c9SDave Chinner 	xfs_filblks_t	rval;		/* return value */
17830f712c9SDave Chinner 
17930f712c9SDave Chinner 	mp = ip->i_mount;
18030f712c9SDave Chinner 	maxrecs = mp->m_bmap_dmxr[0];
18130f712c9SDave Chinner 	for (level = 0, rval = 0;
18230f712c9SDave Chinner 	     level < XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK);
18330f712c9SDave Chinner 	     level++) {
18430f712c9SDave Chinner 		len += maxrecs - 1;
18530f712c9SDave Chinner 		do_div(len, maxrecs);
18630f712c9SDave Chinner 		rval += len;
1875e5c943cSDarrick J. Wong 		if (len == 1)
1885e5c943cSDarrick J. Wong 			return rval + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) -
18930f712c9SDave Chinner 				level - 1;
19030f712c9SDave Chinner 		if (level == 0)
19130f712c9SDave Chinner 			maxrecs = mp->m_bmap_dmxr[1];
19230f712c9SDave Chinner 	}
19330f712c9SDave Chinner 	return rval;
19430f712c9SDave Chinner }
19530f712c9SDave Chinner 
19630f712c9SDave Chinner /*
19730f712c9SDave Chinner  * Calculate the default attribute fork offset for newly created inodes.
19830f712c9SDave Chinner  */
19930f712c9SDave Chinner uint
xfs_default_attroffset(struct xfs_inode * ip)20030f712c9SDave Chinner xfs_default_attroffset(
20130f712c9SDave Chinner 	struct xfs_inode	*ip)
20230f712c9SDave Chinner {
203683ec9baSDave Chinner 	if (ip->i_df.if_format == XFS_DINODE_FMT_DEV)
204683ec9baSDave Chinner 		return roundup(sizeof(xfs_dev_t), 8);
205b2941046SDave Chinner 	return M_IGEO(ip->i_mount)->attr_fork_offset;
20630f712c9SDave Chinner }
20730f712c9SDave Chinner 
20830f712c9SDave Chinner /*
2097821ea30SChristoph Hellwig  * Helper routine to reset inode i_forkoff field when switching attribute fork
2107821ea30SChristoph Hellwig  * from local to extent format - we reset it where possible to make space
2117821ea30SChristoph Hellwig  * available for inline data fork extents.
21230f712c9SDave Chinner  */
21330f712c9SDave Chinner STATIC void
xfs_bmap_forkoff_reset(xfs_inode_t * ip,int whichfork)21430f712c9SDave Chinner xfs_bmap_forkoff_reset(
21530f712c9SDave Chinner 	xfs_inode_t	*ip,
21630f712c9SDave Chinner 	int		whichfork)
21730f712c9SDave Chinner {
21830f712c9SDave Chinner 	if (whichfork == XFS_ATTR_FORK &&
219f7e67b20SChristoph Hellwig 	    ip->i_df.if_format != XFS_DINODE_FMT_DEV &&
220f7e67b20SChristoph Hellwig 	    ip->i_df.if_format != XFS_DINODE_FMT_BTREE) {
22130f712c9SDave Chinner 		uint	dfl_forkoff = xfs_default_attroffset(ip) >> 3;
22230f712c9SDave Chinner 
2237821ea30SChristoph Hellwig 		if (dfl_forkoff > ip->i_forkoff)
2247821ea30SChristoph Hellwig 			ip->i_forkoff = dfl_forkoff;
22530f712c9SDave Chinner 	}
22630f712c9SDave Chinner }
22730f712c9SDave Chinner 
22830f712c9SDave Chinner #ifdef DEBUG
22930f712c9SDave Chinner STATIC struct xfs_buf *
xfs_bmap_get_bp(struct xfs_btree_cur * cur,xfs_fsblock_t bno)23030f712c9SDave Chinner xfs_bmap_get_bp(
23130f712c9SDave Chinner 	struct xfs_btree_cur	*cur,
23230f712c9SDave Chinner 	xfs_fsblock_t		bno)
23330f712c9SDave Chinner {
234e6631f85SDave Chinner 	struct xfs_log_item	*lip;
23530f712c9SDave Chinner 	int			i;
23630f712c9SDave Chinner 
23730f712c9SDave Chinner 	if (!cur)
23830f712c9SDave Chinner 		return NULL;
23930f712c9SDave Chinner 
240c0643f6fSDarrick J. Wong 	for (i = 0; i < cur->bc_maxlevels; i++) {
2416ca444cfSDarrick J. Wong 		if (!cur->bc_levels[i].bp)
24230f712c9SDave Chinner 			break;
2436ca444cfSDarrick J. Wong 		if (xfs_buf_daddr(cur->bc_levels[i].bp) == bno)
2446ca444cfSDarrick J. Wong 			return cur->bc_levels[i].bp;
24530f712c9SDave Chinner 	}
24630f712c9SDave Chinner 
24730f712c9SDave Chinner 	/* Chase down all the log items to see if the bp is there */
248e6631f85SDave Chinner 	list_for_each_entry(lip, &cur->bc_tp->t_items, li_trans) {
249e6631f85SDave Chinner 		struct xfs_buf_log_item	*bip = (struct xfs_buf_log_item *)lip;
250e6631f85SDave Chinner 
25130f712c9SDave Chinner 		if (bip->bli_item.li_type == XFS_LI_BUF &&
25204fcad80SDave Chinner 		    xfs_buf_daddr(bip->bli_buf) == bno)
25330f712c9SDave Chinner 			return bip->bli_buf;
25430f712c9SDave Chinner 	}
25530f712c9SDave Chinner 
25630f712c9SDave Chinner 	return NULL;
25730f712c9SDave Chinner }
25830f712c9SDave Chinner 
25930f712c9SDave Chinner STATIC void
xfs_check_block(struct xfs_btree_block * block,xfs_mount_t * mp,int root,short sz)26030f712c9SDave Chinner xfs_check_block(
26130f712c9SDave Chinner 	struct xfs_btree_block	*block,
26230f712c9SDave Chinner 	xfs_mount_t		*mp,
26330f712c9SDave Chinner 	int			root,
26430f712c9SDave Chinner 	short			sz)
26530f712c9SDave Chinner {
26630f712c9SDave Chinner 	int			i, j, dmxr;
26730f712c9SDave Chinner 	__be64			*pp, *thispa;	/* pointer to block address */
26830f712c9SDave Chinner 	xfs_bmbt_key_t		*prevp, *keyp;
26930f712c9SDave Chinner 
27030f712c9SDave Chinner 	ASSERT(be16_to_cpu(block->bb_level) > 0);
27130f712c9SDave Chinner 
27230f712c9SDave Chinner 	prevp = NULL;
27330f712c9SDave Chinner 	for( i = 1; i <= xfs_btree_get_numrecs(block); i++) {
27430f712c9SDave Chinner 		dmxr = mp->m_bmap_dmxr[0];
27530f712c9SDave Chinner 		keyp = XFS_BMBT_KEY_ADDR(mp, block, i);
27630f712c9SDave Chinner 
27730f712c9SDave Chinner 		if (prevp) {
27830f712c9SDave Chinner 			ASSERT(be64_to_cpu(prevp->br_startoff) <
27930f712c9SDave Chinner 			       be64_to_cpu(keyp->br_startoff));
28030f712c9SDave Chinner 		}
28130f712c9SDave Chinner 		prevp = keyp;
28230f712c9SDave Chinner 
28330f712c9SDave Chinner 		/*
28430f712c9SDave Chinner 		 * Compare the block numbers to see if there are dups.
28530f712c9SDave Chinner 		 */
28630f712c9SDave Chinner 		if (root)
28730f712c9SDave Chinner 			pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, i, sz);
28830f712c9SDave Chinner 		else
28930f712c9SDave Chinner 			pp = XFS_BMBT_PTR_ADDR(mp, block, i, dmxr);
29030f712c9SDave Chinner 
29130f712c9SDave Chinner 		for (j = i+1; j <= be16_to_cpu(block->bb_numrecs); j++) {
29230f712c9SDave Chinner 			if (root)
29330f712c9SDave Chinner 				thispa = XFS_BMAP_BROOT_PTR_ADDR(mp, block, j, sz);
29430f712c9SDave Chinner 			else
29530f712c9SDave Chinner 				thispa = XFS_BMBT_PTR_ADDR(mp, block, j, dmxr);
29630f712c9SDave Chinner 			if (*thispa == *pp) {
29778b0f58bSZeng Heng 				xfs_warn(mp, "%s: thispa(%d) == pp(%d) %lld",
29830f712c9SDave Chinner 					__func__, j, i,
29930f712c9SDave Chinner 					(unsigned long long)be64_to_cpu(*thispa));
300cec57256SDarrick J. Wong 				xfs_err(mp, "%s: ptrs are equal in node\n",
30130f712c9SDave Chinner 					__func__);
302cec57256SDarrick J. Wong 				xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
30330f712c9SDave Chinner 			}
30430f712c9SDave Chinner 		}
30530f712c9SDave Chinner 	}
30630f712c9SDave Chinner }
30730f712c9SDave Chinner 
30830f712c9SDave Chinner /*
30930f712c9SDave Chinner  * Check that the extents for the inode ip are in the right order in all
310e3543819SDave Chinner  * btree leaves. THis becomes prohibitively expensive for large extent count
311e3543819SDave Chinner  * files, so don't bother with inodes that have more than 10,000 extents in
312e3543819SDave Chinner  * them. The btree record ordering checks will still be done, so for such large
313e3543819SDave Chinner  * bmapbt constructs that is going to catch most corruptions.
31430f712c9SDave Chinner  */
31530f712c9SDave Chinner STATIC void
xfs_bmap_check_leaf_extents(struct xfs_btree_cur * cur,xfs_inode_t * ip,int whichfork)31630f712c9SDave Chinner xfs_bmap_check_leaf_extents(
317ae127f08SDarrick J. Wong 	struct xfs_btree_cur	*cur,	/* btree cursor or null */
31830f712c9SDave Chinner 	xfs_inode_t		*ip,		/* incore inode pointer */
31930f712c9SDave Chinner 	int			whichfork)	/* data or attr fork */
32030f712c9SDave Chinner {
321f7e67b20SChristoph Hellwig 	struct xfs_mount	*mp = ip->i_mount;
322732436efSDarrick J. Wong 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
32330f712c9SDave Chinner 	struct xfs_btree_block	*block;	/* current btree block */
32430f712c9SDave Chinner 	xfs_fsblock_t		bno;	/* block # of "block" */
325e8222613SDave Chinner 	struct xfs_buf		*bp;	/* buffer for "block" */
32630f712c9SDave Chinner 	int			error;	/* error return value */
32730f712c9SDave Chinner 	xfs_extnum_t		i=0, j;	/* index into the extents list */
32830f712c9SDave Chinner 	int			level;	/* btree level, for checking */
32930f712c9SDave Chinner 	__be64			*pp;	/* pointer to block address */
33030f712c9SDave Chinner 	xfs_bmbt_rec_t		*ep;	/* pointer to current extent */
33130f712c9SDave Chinner 	xfs_bmbt_rec_t		last = {0, 0}; /* last extent in prev block */
33230f712c9SDave Chinner 	xfs_bmbt_rec_t		*nextp;	/* pointer to next extent */
33330f712c9SDave Chinner 	int			bp_release = 0;
33430f712c9SDave Chinner 
335f7e67b20SChristoph Hellwig 	if (ifp->if_format != XFS_DINODE_FMT_BTREE)
33630f712c9SDave Chinner 		return;
33730f712c9SDave Chinner 
338e3543819SDave Chinner 	/* skip large extent count inodes */
339daf83964SChristoph Hellwig 	if (ip->i_df.if_nextents > 10000)
340e3543819SDave Chinner 		return;
341e3543819SDave Chinner 
34230f712c9SDave Chinner 	bno = NULLFSBLOCK;
34330f712c9SDave Chinner 	block = ifp->if_broot;
34430f712c9SDave Chinner 	/*
34530f712c9SDave Chinner 	 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
34630f712c9SDave Chinner 	 */
34730f712c9SDave Chinner 	level = be16_to_cpu(block->bb_level);
34830f712c9SDave Chinner 	ASSERT(level > 0);
34930f712c9SDave Chinner 	xfs_check_block(block, mp, 1, ifp->if_broot_bytes);
35030f712c9SDave Chinner 	pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
35130f712c9SDave Chinner 	bno = be64_to_cpu(*pp);
35230f712c9SDave Chinner 
353d5cf09baSChristoph Hellwig 	ASSERT(bno != NULLFSBLOCK);
35430f712c9SDave Chinner 	ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
35530f712c9SDave Chinner 	ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
35630f712c9SDave Chinner 
35730f712c9SDave Chinner 	/*
35830f712c9SDave Chinner 	 * Go down the tree until leaf level is reached, following the first
35930f712c9SDave Chinner 	 * pointer (leftmost) at each level.
36030f712c9SDave Chinner 	 */
36130f712c9SDave Chinner 	while (level-- > 0) {
36230f712c9SDave Chinner 		/* See if buf is in cur first */
36330f712c9SDave Chinner 		bp_release = 0;
36430f712c9SDave Chinner 		bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
36530f712c9SDave Chinner 		if (!bp) {
36630f712c9SDave Chinner 			bp_release = 1;
367f5b999c0SEric Sandeen 			error = xfs_btree_read_bufl(mp, NULL, bno, &bp,
36830f712c9SDave Chinner 						XFS_BMAP_BTREE_REF,
36930f712c9SDave Chinner 						&xfs_bmbt_buf_ops);
37030f712c9SDave Chinner 			if (error)
37130f712c9SDave Chinner 				goto error_norelse;
37230f712c9SDave Chinner 		}
37330f712c9SDave Chinner 		block = XFS_BUF_TO_BLOCK(bp);
37430f712c9SDave Chinner 		if (level == 0)
37530f712c9SDave Chinner 			break;
37630f712c9SDave Chinner 
37730f712c9SDave Chinner 		/*
37830f712c9SDave Chinner 		 * Check this block for basic sanity (increasing keys and
37930f712c9SDave Chinner 		 * no duplicate blocks).
38030f712c9SDave Chinner 		 */
38130f712c9SDave Chinner 
38230f712c9SDave Chinner 		xfs_check_block(block, mp, 0, 0);
38330f712c9SDave Chinner 		pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
38430f712c9SDave Chinner 		bno = be64_to_cpu(*pp);
385f9e03706SDarrick J. Wong 		if (XFS_IS_CORRUPT(mp, !xfs_verify_fsbno(mp, bno))) {
386f9e03706SDarrick J. Wong 			error = -EFSCORRUPTED;
387f9e03706SDarrick J. Wong 			goto error0;
388f9e03706SDarrick J. Wong 		}
38930f712c9SDave Chinner 		if (bp_release) {
39030f712c9SDave Chinner 			bp_release = 0;
39130f712c9SDave Chinner 			xfs_trans_brelse(NULL, bp);
39230f712c9SDave Chinner 		}
39330f712c9SDave Chinner 	}
39430f712c9SDave Chinner 
39530f712c9SDave Chinner 	/*
39630f712c9SDave Chinner 	 * Here with bp and block set to the leftmost leaf node in the tree.
39730f712c9SDave Chinner 	 */
39830f712c9SDave Chinner 	i = 0;
39930f712c9SDave Chinner 
40030f712c9SDave Chinner 	/*
40130f712c9SDave Chinner 	 * Loop over all leaf nodes checking that all extents are in the right order.
40230f712c9SDave Chinner 	 */
40330f712c9SDave Chinner 	for (;;) {
40430f712c9SDave Chinner 		xfs_fsblock_t	nextbno;
40530f712c9SDave Chinner 		xfs_extnum_t	num_recs;
40630f712c9SDave Chinner 
40730f712c9SDave Chinner 
40830f712c9SDave Chinner 		num_recs = xfs_btree_get_numrecs(block);
40930f712c9SDave Chinner 
41030f712c9SDave Chinner 		/*
41130f712c9SDave Chinner 		 * Read-ahead the next leaf block, if any.
41230f712c9SDave Chinner 		 */
41330f712c9SDave Chinner 
41430f712c9SDave Chinner 		nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
41530f712c9SDave Chinner 
41630f712c9SDave Chinner 		/*
41730f712c9SDave Chinner 		 * Check all the extents to make sure they are OK.
41830f712c9SDave Chinner 		 * If we had a previous block, the last entry should
41930f712c9SDave Chinner 		 * conform with the first entry in this one.
42030f712c9SDave Chinner 		 */
42130f712c9SDave Chinner 
42230f712c9SDave Chinner 		ep = XFS_BMBT_REC_ADDR(mp, block, 1);
42330f712c9SDave Chinner 		if (i) {
42430f712c9SDave Chinner 			ASSERT(xfs_bmbt_disk_get_startoff(&last) +
42530f712c9SDave Chinner 			       xfs_bmbt_disk_get_blockcount(&last) <=
42630f712c9SDave Chinner 			       xfs_bmbt_disk_get_startoff(ep));
42730f712c9SDave Chinner 		}
42830f712c9SDave Chinner 		for (j = 1; j < num_recs; j++) {
42930f712c9SDave Chinner 			nextp = XFS_BMBT_REC_ADDR(mp, block, j + 1);
43030f712c9SDave Chinner 			ASSERT(xfs_bmbt_disk_get_startoff(ep) +
43130f712c9SDave Chinner 			       xfs_bmbt_disk_get_blockcount(ep) <=
43230f712c9SDave Chinner 			       xfs_bmbt_disk_get_startoff(nextp));
43330f712c9SDave Chinner 			ep = nextp;
43430f712c9SDave Chinner 		}
43530f712c9SDave Chinner 
43630f712c9SDave Chinner 		last = *ep;
43730f712c9SDave Chinner 		i += num_recs;
43830f712c9SDave Chinner 		if (bp_release) {
43930f712c9SDave Chinner 			bp_release = 0;
44030f712c9SDave Chinner 			xfs_trans_brelse(NULL, bp);
44130f712c9SDave Chinner 		}
44230f712c9SDave Chinner 		bno = nextbno;
44330f712c9SDave Chinner 		/*
44430f712c9SDave Chinner 		 * If we've reached the end, stop.
44530f712c9SDave Chinner 		 */
44630f712c9SDave Chinner 		if (bno == NULLFSBLOCK)
44730f712c9SDave Chinner 			break;
44830f712c9SDave Chinner 
44930f712c9SDave Chinner 		bp_release = 0;
45030f712c9SDave Chinner 		bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
45130f712c9SDave Chinner 		if (!bp) {
45230f712c9SDave Chinner 			bp_release = 1;
453f5b999c0SEric Sandeen 			error = xfs_btree_read_bufl(mp, NULL, bno, &bp,
45430f712c9SDave Chinner 						XFS_BMAP_BTREE_REF,
45530f712c9SDave Chinner 						&xfs_bmbt_buf_ops);
45630f712c9SDave Chinner 			if (error)
45730f712c9SDave Chinner 				goto error_norelse;
45830f712c9SDave Chinner 		}
45930f712c9SDave Chinner 		block = XFS_BUF_TO_BLOCK(bp);
46030f712c9SDave Chinner 	}
461a5fd276bSLuis de Bethencourt 
46230f712c9SDave Chinner 	return;
46330f712c9SDave Chinner 
46430f712c9SDave Chinner error0:
46530f712c9SDave Chinner 	xfs_warn(mp, "%s: at error0", __func__);
46630f712c9SDave Chinner 	if (bp_release)
46730f712c9SDave Chinner 		xfs_trans_brelse(NULL, bp);
46830f712c9SDave Chinner error_norelse:
469755c38ffSChandan Babu R 	xfs_warn(mp, "%s: BAD after btree leaves for %llu extents",
47030f712c9SDave Chinner 		__func__, i);
471cec57256SDarrick J. Wong 	xfs_err(mp, "%s: CORRUPTED BTREE OR SOMETHING", __func__);
472cec57256SDarrick J. Wong 	xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
47330f712c9SDave Chinner 	return;
47430f712c9SDave Chinner }
47530f712c9SDave Chinner 
47630f712c9SDave Chinner /*
47730f712c9SDave Chinner  * Validate that the bmbt_irecs being returned from bmapi are valid
47830f712c9SDave Chinner  * given the caller's original parameters.  Specifically check the
47930f712c9SDave Chinner  * ranges of the returned irecs to ensure that they only extend beyond
48030f712c9SDave Chinner  * the given parameters if the XFS_BMAPI_ENTIRE flag was set.
48130f712c9SDave Chinner  */
48230f712c9SDave Chinner STATIC void
xfs_bmap_validate_ret(xfs_fileoff_t bno,xfs_filblks_t len,uint32_t flags,xfs_bmbt_irec_t * mval,int nmap,int ret_nmap)48330f712c9SDave Chinner xfs_bmap_validate_ret(
48430f712c9SDave Chinner 	xfs_fileoff_t		bno,
48530f712c9SDave Chinner 	xfs_filblks_t		len,
486e7d410acSDave Chinner 	uint32_t		flags,
48730f712c9SDave Chinner 	xfs_bmbt_irec_t		*mval,
48830f712c9SDave Chinner 	int			nmap,
48930f712c9SDave Chinner 	int			ret_nmap)
49030f712c9SDave Chinner {
49130f712c9SDave Chinner 	int			i;		/* index to map values */
49230f712c9SDave Chinner 
49330f712c9SDave Chinner 	ASSERT(ret_nmap <= nmap);
49430f712c9SDave Chinner 
49530f712c9SDave Chinner 	for (i = 0; i < ret_nmap; i++) {
49630f712c9SDave Chinner 		ASSERT(mval[i].br_blockcount > 0);
49730f712c9SDave Chinner 		if (!(flags & XFS_BMAPI_ENTIRE)) {
49830f712c9SDave Chinner 			ASSERT(mval[i].br_startoff >= bno);
49930f712c9SDave Chinner 			ASSERT(mval[i].br_blockcount <= len);
50030f712c9SDave Chinner 			ASSERT(mval[i].br_startoff + mval[i].br_blockcount <=
50130f712c9SDave Chinner 			       bno + len);
50230f712c9SDave Chinner 		} else {
50330f712c9SDave Chinner 			ASSERT(mval[i].br_startoff < bno + len);
50430f712c9SDave Chinner 			ASSERT(mval[i].br_startoff + mval[i].br_blockcount >
50530f712c9SDave Chinner 			       bno);
50630f712c9SDave Chinner 		}
50730f712c9SDave Chinner 		ASSERT(i == 0 ||
50830f712c9SDave Chinner 		       mval[i - 1].br_startoff + mval[i - 1].br_blockcount ==
50930f712c9SDave Chinner 		       mval[i].br_startoff);
51030f712c9SDave Chinner 		ASSERT(mval[i].br_startblock != DELAYSTARTBLOCK &&
51130f712c9SDave Chinner 		       mval[i].br_startblock != HOLESTARTBLOCK);
51230f712c9SDave Chinner 		ASSERT(mval[i].br_state == XFS_EXT_NORM ||
51330f712c9SDave Chinner 		       mval[i].br_state == XFS_EXT_UNWRITTEN);
51430f712c9SDave Chinner 	}
51530f712c9SDave Chinner }
51630f712c9SDave Chinner 
51730f712c9SDave Chinner #else
51830f712c9SDave Chinner #define xfs_bmap_check_leaf_extents(cur, ip, whichfork)		do { } while (0)
5197bf7a193SDarrick J. Wong #define	xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)	do { } while (0)
52030f712c9SDave Chinner #endif /* DEBUG */
52130f712c9SDave Chinner 
52230f712c9SDave Chinner /*
52330f712c9SDave Chinner  * Inode fork format manipulation functions
52430f712c9SDave Chinner  */
52530f712c9SDave Chinner 
52630f712c9SDave Chinner /*
527b101e334SChristoph Hellwig  * Convert the inode format to extent format if it currently is in btree format,
528b101e334SChristoph Hellwig  * but the extent list is small enough that it fits into the extent format.
529b101e334SChristoph Hellwig  *
530b101e334SChristoph Hellwig  * Since the extents are already in-core, all we have to do is give up the space
531b101e334SChristoph Hellwig  * for the btree root and pitch the leaf block.
53230f712c9SDave Chinner  */
53330f712c9SDave Chinner STATIC int				/* error */
xfs_bmap_btree_to_extents(struct xfs_trans * tp,struct xfs_inode * ip,struct xfs_btree_cur * cur,int * logflagsp,int whichfork)53430f712c9SDave Chinner xfs_bmap_btree_to_extents(
535b101e334SChristoph Hellwig 	struct xfs_trans	*tp,	/* transaction pointer */
536b101e334SChristoph Hellwig 	struct xfs_inode	*ip,	/* incore inode pointer */
537b101e334SChristoph Hellwig 	struct xfs_btree_cur	*cur,	/* btree cursor */
53830f712c9SDave Chinner 	int			*logflagsp, /* inode logging flags */
53930f712c9SDave Chinner 	int			whichfork)  /* data or attr fork */
54030f712c9SDave Chinner {
541732436efSDarrick J. Wong 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
542b101e334SChristoph Hellwig 	struct xfs_mount	*mp = ip->i_mount;
543b101e334SChristoph Hellwig 	struct xfs_btree_block	*rblock = ifp->if_broot;
54430f712c9SDave Chinner 	struct xfs_btree_block	*cblock;/* child btree block */
54530f712c9SDave Chinner 	xfs_fsblock_t		cbno;	/* child block number */
546e8222613SDave Chinner 	struct xfs_buf		*cbp;	/* child block's buffer */
54730f712c9SDave Chinner 	int			error;	/* error return value */
54830f712c9SDave Chinner 	__be64			*pp;	/* ptr to block address */
549340785ccSDarrick J. Wong 	struct xfs_owner_info	oinfo;
55030f712c9SDave Chinner 
551b101e334SChristoph Hellwig 	/* check if we actually need the extent format first: */
552b101e334SChristoph Hellwig 	if (!xfs_bmap_wants_extents(ip, whichfork))
553b101e334SChristoph Hellwig 		return 0;
554b101e334SChristoph Hellwig 
555b101e334SChristoph Hellwig 	ASSERT(cur);
55660b4984fSDarrick J. Wong 	ASSERT(whichfork != XFS_COW_FORK);
557f7e67b20SChristoph Hellwig 	ASSERT(ifp->if_format == XFS_DINODE_FMT_BTREE);
55830f712c9SDave Chinner 	ASSERT(be16_to_cpu(rblock->bb_level) == 1);
55930f712c9SDave Chinner 	ASSERT(be16_to_cpu(rblock->bb_numrecs) == 1);
56030f712c9SDave Chinner 	ASSERT(xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0) == 1);
561b101e334SChristoph Hellwig 
56230f712c9SDave Chinner 	pp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, ifp->if_broot_bytes);
56330f712c9SDave Chinner 	cbno = be64_to_cpu(*pp);
56430f712c9SDave Chinner #ifdef DEBUG
565f9e03706SDarrick J. Wong 	if (XFS_IS_CORRUPT(cur->bc_mp, !xfs_btree_check_lptr(cur, cbno, 1)))
566f9e03706SDarrick J. Wong 		return -EFSCORRUPTED;
56730f712c9SDave Chinner #endif
568f5b999c0SEric Sandeen 	error = xfs_btree_read_bufl(mp, tp, cbno, &cbp, XFS_BMAP_BTREE_REF,
56930f712c9SDave Chinner 				&xfs_bmbt_buf_ops);
57030f712c9SDave Chinner 	if (error)
57130f712c9SDave Chinner 		return error;
57230f712c9SDave Chinner 	cblock = XFS_BUF_TO_BLOCK(cbp);
57330f712c9SDave Chinner 	if ((error = xfs_btree_check_block(cur, cblock, 0, cbp)))
57430f712c9SDave Chinner 		return error;
5757dfee17bSDave Chinner 
576340785ccSDarrick J. Wong 	xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork);
577b742d7b4SDave Chinner 	error = xfs_free_extent_later(cur->bc_tp, cbno, 1, &oinfo,
578b742d7b4SDave Chinner 			XFS_AG_RESV_NONE);
5797dfee17bSDave Chinner 	if (error)
5807dfee17bSDave Chinner 		return error;
5817dfee17bSDave Chinner 
5826e73a545SChristoph Hellwig 	ip->i_nblocks--;
58330f712c9SDave Chinner 	xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
58430f712c9SDave Chinner 	xfs_trans_binval(tp, cbp);
5856ca444cfSDarrick J. Wong 	if (cur->bc_levels[0].bp == cbp)
5866ca444cfSDarrick J. Wong 		cur->bc_levels[0].bp = NULL;
58730f712c9SDave Chinner 	xfs_iroot_realloc(ip, -1, whichfork);
58830f712c9SDave Chinner 	ASSERT(ifp->if_broot == NULL);
589f7e67b20SChristoph Hellwig 	ifp->if_format = XFS_DINODE_FMT_EXTENTS;
590b101e334SChristoph Hellwig 	*logflagsp |= XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
59130f712c9SDave Chinner 	return 0;
59230f712c9SDave Chinner }
59330f712c9SDave Chinner 
59430f712c9SDave Chinner /*
59530f712c9SDave Chinner  * Convert an extents-format file into a btree-format file.
59630f712c9SDave Chinner  * The new file will have a root block (in the inode) and a single child block.
59730f712c9SDave Chinner  */
59830f712c9SDave Chinner STATIC int					/* error */
xfs_bmap_extents_to_btree(struct xfs_trans * tp,struct xfs_inode * ip,struct xfs_btree_cur ** curp,int wasdel,int * logflagsp,int whichfork)59930f712c9SDave Chinner xfs_bmap_extents_to_btree(
60081ba8f3eSBrian Foster 	struct xfs_trans	*tp,		/* transaction pointer */
60181ba8f3eSBrian Foster 	struct xfs_inode	*ip,		/* incore inode pointer */
60281ba8f3eSBrian Foster 	struct xfs_btree_cur	**curp,		/* cursor returned to caller */
60330f712c9SDave Chinner 	int			wasdel,		/* converting a delayed alloc */
60430f712c9SDave Chinner 	int			*logflagsp,	/* inode logging flags */
60530f712c9SDave Chinner 	int			whichfork)	/* data or attr fork */
60630f712c9SDave Chinner {
60730f712c9SDave Chinner 	struct xfs_btree_block	*ablock;	/* allocated (child) bt block */
60881ba8f3eSBrian Foster 	struct xfs_buf		*abp;		/* buffer for ablock */
60981ba8f3eSBrian Foster 	struct xfs_alloc_arg	args;		/* allocation arguments */
61081ba8f3eSBrian Foster 	struct xfs_bmbt_rec	*arp;		/* child record pointer */
61130f712c9SDave Chinner 	struct xfs_btree_block	*block;		/* btree root block */
61281ba8f3eSBrian Foster 	struct xfs_btree_cur	*cur;		/* bmap btree cursor */
61330f712c9SDave Chinner 	int			error;		/* error return value */
61481ba8f3eSBrian Foster 	struct xfs_ifork	*ifp;		/* inode fork pointer */
61581ba8f3eSBrian Foster 	struct xfs_bmbt_key	*kp;		/* root block key pointer */
61681ba8f3eSBrian Foster 	struct xfs_mount	*mp;		/* mount structure */
61730f712c9SDave Chinner 	xfs_bmbt_ptr_t		*pp;		/* root block address pointer */
618b2b1712aSChristoph Hellwig 	struct xfs_iext_cursor	icur;
619906abed5SChristoph Hellwig 	struct xfs_bmbt_irec	rec;
620b2b1712aSChristoph Hellwig 	xfs_extnum_t		cnt = 0;
62130f712c9SDave Chinner 
62230f712c9SDave Chinner 	mp = ip->i_mount;
62360b4984fSDarrick J. Wong 	ASSERT(whichfork != XFS_COW_FORK);
624732436efSDarrick J. Wong 	ifp = xfs_ifork_ptr(ip, whichfork);
625f7e67b20SChristoph Hellwig 	ASSERT(ifp->if_format == XFS_DINODE_FMT_EXTENTS);
62630f712c9SDave Chinner 
62730f712c9SDave Chinner 	/*
628e55ec4ddSDave Chinner 	 * Make space in the inode incore. This needs to be undone if we fail
629e55ec4ddSDave Chinner 	 * to expand the root.
63030f712c9SDave Chinner 	 */
63130f712c9SDave Chinner 	xfs_iroot_realloc(ip, 1, whichfork);
63230f712c9SDave Chinner 
63330f712c9SDave Chinner 	/*
63430f712c9SDave Chinner 	 * Fill in the root.
63530f712c9SDave Chinner 	 */
63630f712c9SDave Chinner 	block = ifp->if_broot;
63730f712c9SDave Chinner 	xfs_btree_init_block_int(mp, block, XFS_BUF_DADDR_NULL,
638b6f41e44SEric Sandeen 				 XFS_BTNUM_BMAP, 1, 1, ip->i_ino,
639f88ae46bSEric Sandeen 				 XFS_BTREE_LONG_PTRS);
64030f712c9SDave Chinner 	/*
64130f712c9SDave Chinner 	 * Need a cursor.  Can't allocate until bb_level is filled in.
64230f712c9SDave Chinner 	 */
64330f712c9SDave Chinner 	cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
6448ef54797SDave Chinner 	cur->bc_ino.flags = wasdel ? XFS_BTCUR_BMBT_WASDEL : 0;
64530f712c9SDave Chinner 	/*
64630f712c9SDave Chinner 	 * Convert to a btree with two levels, one record in root.
64730f712c9SDave Chinner 	 */
648f7e67b20SChristoph Hellwig 	ifp->if_format = XFS_DINODE_FMT_BTREE;
64930f712c9SDave Chinner 	memset(&args, 0, sizeof(args));
65030f712c9SDave Chinner 	args.tp = tp;
65130f712c9SDave Chinner 	args.mp = mp;
652340785ccSDarrick J. Wong 	xfs_rmap_ino_bmbt_owner(&args.oinfo, ip->i_ino, whichfork);
65336b6ad2dSDave Chinner 
65430f712c9SDave Chinner 	args.minlen = args.maxlen = args.prod = 1;
65530f712c9SDave Chinner 	args.wasdel = wasdel;
65630f712c9SDave Chinner 	*logflagsp = 0;
6572a7f6d41SDave Chinner 	error = xfs_alloc_vextent_start_ag(&args,
6582a7f6d41SDave Chinner 				XFS_INO_TO_FSB(mp, ip->i_ino));
659e55ec4ddSDave Chinner 	if (error)
660e55ec4ddSDave Chinner 		goto out_root_realloc;
66190e2056dSDarrick J. Wong 
66236b6ad2dSDave Chinner 	/*
66336b6ad2dSDave Chinner 	 * Allocation can't fail, the space was reserved.
66436b6ad2dSDave Chinner 	 */
6652fcc319dSChristoph Hellwig 	if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) {
66601239d77SShan Hai 		error = -ENOSPC;
667e55ec4ddSDave Chinner 		goto out_root_realloc;
6682fcc319dSChristoph Hellwig 	}
669e55ec4ddSDave Chinner 
67092219c29SDave Chinner 	cur->bc_ino.allocated++;
6716e73a545SChristoph Hellwig 	ip->i_nblocks++;
67230f712c9SDave Chinner 	xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
673ee647f85SDarrick J. Wong 	error = xfs_trans_get_buf(tp, mp->m_ddev_targp,
674ee647f85SDarrick J. Wong 			XFS_FSB_TO_DADDR(mp, args.fsbno),
675ee647f85SDarrick J. Wong 			mp->m_bsize, 0, &abp);
676ee647f85SDarrick J. Wong 	if (error)
677e55ec4ddSDave Chinner 		goto out_unreserve_dquot;
678e55ec4ddSDave Chinner 
67930f712c9SDave Chinner 	/*
68030f712c9SDave Chinner 	 * Fill in the child block.
68130f712c9SDave Chinner 	 */
68230f712c9SDave Chinner 	abp->b_ops = &xfs_bmbt_buf_ops;
68330f712c9SDave Chinner 	ablock = XFS_BUF_TO_BLOCK(abp);
6849343ee76SDave Chinner 	xfs_btree_init_block_int(mp, ablock, xfs_buf_daddr(abp),
685b6f41e44SEric Sandeen 				XFS_BTNUM_BMAP, 0, 0, ip->i_ino,
68630f712c9SDave Chinner 				XFS_BTREE_LONG_PTRS);
68730f712c9SDave Chinner 
688b2b1712aSChristoph Hellwig 	for_each_xfs_iext(ifp, &icur, &rec) {
689906abed5SChristoph Hellwig 		if (isnullstartblock(rec.br_startblock))
690906abed5SChristoph Hellwig 			continue;
691906abed5SChristoph Hellwig 		arp = XFS_BMBT_REC_ADDR(mp, ablock, 1 + cnt);
692906abed5SChristoph Hellwig 		xfs_bmbt_disk_set_all(arp, &rec);
693906abed5SChristoph Hellwig 		cnt++;
69430f712c9SDave Chinner 	}
695daf83964SChristoph Hellwig 	ASSERT(cnt == ifp->if_nextents);
69630f712c9SDave Chinner 	xfs_btree_set_numrecs(ablock, cnt);
69730f712c9SDave Chinner 
69830f712c9SDave Chinner 	/*
69930f712c9SDave Chinner 	 * Fill in the root key and pointer.
70030f712c9SDave Chinner 	 */
70130f712c9SDave Chinner 	kp = XFS_BMBT_KEY_ADDR(mp, block, 1);
70230f712c9SDave Chinner 	arp = XFS_BMBT_REC_ADDR(mp, ablock, 1);
70330f712c9SDave Chinner 	kp->br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(arp));
70430f712c9SDave Chinner 	pp = XFS_BMBT_PTR_ADDR(mp, block, 1, xfs_bmbt_get_maxrecs(cur,
70530f712c9SDave Chinner 						be16_to_cpu(block->bb_level)));
70630f712c9SDave Chinner 	*pp = cpu_to_be64(args.fsbno);
70730f712c9SDave Chinner 
70830f712c9SDave Chinner 	/*
70930f712c9SDave Chinner 	 * Do all this logging at the end so that
71030f712c9SDave Chinner 	 * the root is at the right level.
71130f712c9SDave Chinner 	 */
71230f712c9SDave Chinner 	xfs_btree_log_block(cur, abp, XFS_BB_ALL_BITS);
71330f712c9SDave Chinner 	xfs_btree_log_recs(cur, abp, 1, be16_to_cpu(ablock->bb_numrecs));
71430f712c9SDave Chinner 	ASSERT(*curp == NULL);
71530f712c9SDave Chinner 	*curp = cur;
71630f712c9SDave Chinner 	*logflagsp = XFS_ILOG_CORE | xfs_ilog_fbroot(whichfork);
71730f712c9SDave Chinner 	return 0;
71801239d77SShan Hai 
719e55ec4ddSDave Chinner out_unreserve_dquot:
72001239d77SShan Hai 	xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
721e55ec4ddSDave Chinner out_root_realloc:
72201239d77SShan Hai 	xfs_iroot_realloc(ip, -1, whichfork);
723f7e67b20SChristoph Hellwig 	ifp->if_format = XFS_DINODE_FMT_EXTENTS;
724e55ec4ddSDave Chinner 	ASSERT(ifp->if_broot == NULL);
72501239d77SShan Hai 	xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
72601239d77SShan Hai 
72701239d77SShan Hai 	return error;
72830f712c9SDave Chinner }
72930f712c9SDave Chinner 
73030f712c9SDave Chinner /*
73130f712c9SDave Chinner  * Convert a local file to an extents file.
73230f712c9SDave Chinner  * This code is out of bounds for data forks of regular files,
73330f712c9SDave Chinner  * since the file data needs to get logged so things will stay consistent.
73430f712c9SDave Chinner  * (The bmap-level manipulations are ok, though).
73530f712c9SDave Chinner  */
73630f712c9SDave Chinner void
xfs_bmap_local_to_extents_empty(struct xfs_trans * tp,struct xfs_inode * ip,int whichfork)73730f712c9SDave Chinner xfs_bmap_local_to_extents_empty(
738aeea4b75SBrian Foster 	struct xfs_trans	*tp,
73930f712c9SDave Chinner 	struct xfs_inode	*ip,
74030f712c9SDave Chinner 	int			whichfork)
74130f712c9SDave Chinner {
742732436efSDarrick J. Wong 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
74330f712c9SDave Chinner 
74460b4984fSDarrick J. Wong 	ASSERT(whichfork != XFS_COW_FORK);
745f7e67b20SChristoph Hellwig 	ASSERT(ifp->if_format == XFS_DINODE_FMT_LOCAL);
74630f712c9SDave Chinner 	ASSERT(ifp->if_bytes == 0);
747daf83964SChristoph Hellwig 	ASSERT(ifp->if_nextents == 0);
74830f712c9SDave Chinner 
74930f712c9SDave Chinner 	xfs_bmap_forkoff_reset(ip, whichfork);
7506bdcf26aSChristoph Hellwig 	ifp->if_u1.if_root = NULL;
7516bdcf26aSChristoph Hellwig 	ifp->if_height = 0;
752f7e67b20SChristoph Hellwig 	ifp->if_format = XFS_DINODE_FMT_EXTENTS;
753aeea4b75SBrian Foster 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
75430f712c9SDave Chinner }
75530f712c9SDave Chinner 
75630f712c9SDave Chinner 
75730f712c9SDave Chinner STATIC int				/* error */
xfs_bmap_local_to_extents(xfs_trans_t * tp,xfs_inode_t * ip,xfs_extlen_t total,int * logflagsp,int whichfork,void (* init_fn)(struct xfs_trans * tp,struct xfs_buf * bp,struct xfs_inode * ip,struct xfs_ifork * ifp))75830f712c9SDave Chinner xfs_bmap_local_to_extents(
75930f712c9SDave Chinner 	xfs_trans_t	*tp,		/* transaction pointer */
76030f712c9SDave Chinner 	xfs_inode_t	*ip,		/* incore inode pointer */
76130f712c9SDave Chinner 	xfs_extlen_t	total,		/* total blocks needed by transaction */
76230f712c9SDave Chinner 	int		*logflagsp,	/* inode logging flags */
76330f712c9SDave Chinner 	int		whichfork,
76430f712c9SDave Chinner 	void		(*init_fn)(struct xfs_trans *tp,
76530f712c9SDave Chinner 				   struct xfs_buf *bp,
76630f712c9SDave Chinner 				   struct xfs_inode *ip,
76730f712c9SDave Chinner 				   struct xfs_ifork *ifp))
76830f712c9SDave Chinner {
76930f712c9SDave Chinner 	int		error = 0;
77030f712c9SDave Chinner 	int		flags;		/* logging flags returned */
7713ba738dfSChristoph Hellwig 	struct xfs_ifork *ifp;		/* inode fork pointer */
77230f712c9SDave Chinner 	xfs_alloc_arg_t	args;		/* allocation arguments */
773e8222613SDave Chinner 	struct xfs_buf	*bp;		/* buffer for extent block */
77450bb44c2SChristoph Hellwig 	struct xfs_bmbt_irec rec;
775b2b1712aSChristoph Hellwig 	struct xfs_iext_cursor icur;
77630f712c9SDave Chinner 
77730f712c9SDave Chinner 	/*
77830f712c9SDave Chinner 	 * We don't want to deal with the case of keeping inode data inline yet.
77930f712c9SDave Chinner 	 * So sending the data fork of a regular inode is invalid.
78030f712c9SDave Chinner 	 */
781c19b3b05SDave Chinner 	ASSERT(!(S_ISREG(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK));
782732436efSDarrick J. Wong 	ifp = xfs_ifork_ptr(ip, whichfork);
783f7e67b20SChristoph Hellwig 	ASSERT(ifp->if_format == XFS_DINODE_FMT_LOCAL);
78430f712c9SDave Chinner 
78530f712c9SDave Chinner 	if (!ifp->if_bytes) {
786aeea4b75SBrian Foster 		xfs_bmap_local_to_extents_empty(tp, ip, whichfork);
78730f712c9SDave Chinner 		flags = XFS_ILOG_CORE;
78830f712c9SDave Chinner 		goto done;
78930f712c9SDave Chinner 	}
79030f712c9SDave Chinner 
79130f712c9SDave Chinner 	flags = 0;
79230f712c9SDave Chinner 	error = 0;
79330f712c9SDave Chinner 	memset(&args, 0, sizeof(args));
79430f712c9SDave Chinner 	args.tp = tp;
79530f712c9SDave Chinner 	args.mp = ip->i_mount;
79674c36a86SDave Chinner 	args.total = total;
79774c36a86SDave Chinner 	args.minlen = args.maxlen = args.prod = 1;
798340785ccSDarrick J. Wong 	xfs_rmap_ino_owner(&args.oinfo, ip->i_ino, whichfork, 0);
7992a7f6d41SDave Chinner 
80030f712c9SDave Chinner 	/*
80130f712c9SDave Chinner 	 * Allocate a block.  We know we need only one, since the
80230f712c9SDave Chinner 	 * file currently fits in an inode.
80330f712c9SDave Chinner 	 */
80430f712c9SDave Chinner 	args.total = total;
80530f712c9SDave Chinner 	args.minlen = args.maxlen = args.prod = 1;
8062a7f6d41SDave Chinner 	error = xfs_alloc_vextent_start_ag(&args,
8072a7f6d41SDave Chinner 			XFS_INO_TO_FSB(args.mp, ip->i_ino));
80830f712c9SDave Chinner 	if (error)
80930f712c9SDave Chinner 		goto done;
81030f712c9SDave Chinner 
81130f712c9SDave Chinner 	/* Can't fail, the space was reserved. */
81230f712c9SDave Chinner 	ASSERT(args.fsbno != NULLFSBLOCK);
81330f712c9SDave Chinner 	ASSERT(args.len == 1);
814ee647f85SDarrick J. Wong 	error = xfs_trans_get_buf(tp, args.mp->m_ddev_targp,
815ee647f85SDarrick J. Wong 			XFS_FSB_TO_DADDR(args.mp, args.fsbno),
816ee647f85SDarrick J. Wong 			args.mp->m_bsize, 0, &bp);
817ee647f85SDarrick J. Wong 	if (error)
818ee647f85SDarrick J. Wong 		goto done;
81930f712c9SDave Chinner 
820fe22d552SDave Chinner 	/*
821b7cdc66bSBrian Foster 	 * Initialize the block, copy the data and log the remote buffer.
822fe22d552SDave Chinner 	 *
823b7cdc66bSBrian Foster 	 * The callout is responsible for logging because the remote format
824b7cdc66bSBrian Foster 	 * might differ from the local format and thus we don't know how much to
825b7cdc66bSBrian Foster 	 * log here. Note that init_fn must also set the buffer log item type
826b7cdc66bSBrian Foster 	 * correctly.
827fe22d552SDave Chinner 	 */
82830f712c9SDave Chinner 	init_fn(tp, bp, ip, ifp);
82930f712c9SDave Chinner 
830b7cdc66bSBrian Foster 	/* account for the change in fork size */
83130f712c9SDave Chinner 	xfs_idata_realloc(ip, -ifp->if_bytes, whichfork);
832aeea4b75SBrian Foster 	xfs_bmap_local_to_extents_empty(tp, ip, whichfork);
83330f712c9SDave Chinner 	flags |= XFS_ILOG_CORE;
83430f712c9SDave Chinner 
8356bdcf26aSChristoph Hellwig 	ifp->if_u1.if_root = NULL;
8366bdcf26aSChristoph Hellwig 	ifp->if_height = 0;
8376bdcf26aSChristoph Hellwig 
83850bb44c2SChristoph Hellwig 	rec.br_startoff = 0;
83950bb44c2SChristoph Hellwig 	rec.br_startblock = args.fsbno;
84050bb44c2SChristoph Hellwig 	rec.br_blockcount = 1;
84150bb44c2SChristoph Hellwig 	rec.br_state = XFS_EXT_NORM;
842b2b1712aSChristoph Hellwig 	xfs_iext_first(ifp, &icur);
8430254c2f2SChristoph Hellwig 	xfs_iext_insert(ip, &icur, &rec, 0);
84450bb44c2SChristoph Hellwig 
845daf83964SChristoph Hellwig 	ifp->if_nextents = 1;
8466e73a545SChristoph Hellwig 	ip->i_nblocks = 1;
84736b6ad2dSDave Chinner 	xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
84830f712c9SDave Chinner 	flags |= xfs_ilog_fext(whichfork);
84930f712c9SDave Chinner 
85030f712c9SDave Chinner done:
85130f712c9SDave Chinner 	*logflagsp = flags;
85230f712c9SDave Chinner 	return error;
85330f712c9SDave Chinner }
85430f712c9SDave Chinner 
85530f712c9SDave Chinner /*
85630f712c9SDave Chinner  * Called from xfs_bmap_add_attrfork to handle btree format files.
85730f712c9SDave Chinner  */
85830f712c9SDave Chinner STATIC int					/* error */
xfs_bmap_add_attrfork_btree(xfs_trans_t * tp,xfs_inode_t * ip,int * flags)85930f712c9SDave Chinner xfs_bmap_add_attrfork_btree(
86030f712c9SDave Chinner 	xfs_trans_t		*tp,		/* transaction pointer */
86130f712c9SDave Chinner 	xfs_inode_t		*ip,		/* incore inode pointer */
86230f712c9SDave Chinner 	int			*flags)		/* inode logging flags */
86330f712c9SDave Chinner {
864b6785e27SChandan Babu R 	struct xfs_btree_block	*block = ip->i_df.if_broot;
865ae127f08SDarrick J. Wong 	struct xfs_btree_cur	*cur;		/* btree cursor */
86630f712c9SDave Chinner 	int			error;		/* error return value */
86730f712c9SDave Chinner 	xfs_mount_t		*mp;		/* file system mount struct */
86830f712c9SDave Chinner 	int			stat;		/* newroot status */
86930f712c9SDave Chinner 
87030f712c9SDave Chinner 	mp = ip->i_mount;
871b6785e27SChandan Babu R 
872c01147d9SDarrick J. Wong 	if (XFS_BMAP_BMDR_SPACE(block) <= xfs_inode_data_fork_size(ip))
87330f712c9SDave Chinner 		*flags |= XFS_ILOG_DBROOT;
87430f712c9SDave Chinner 	else {
87530f712c9SDave Chinner 		cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK);
876b5cfbc22SChristoph Hellwig 		error = xfs_bmbt_lookup_first(cur, &stat);
877b5cfbc22SChristoph Hellwig 		if (error)
87830f712c9SDave Chinner 			goto error0;
87930f712c9SDave Chinner 		/* must be at least one entry */
880f9e03706SDarrick J. Wong 		if (XFS_IS_CORRUPT(mp, stat != 1)) {
881f9e03706SDarrick J. Wong 			error = -EFSCORRUPTED;
882f9e03706SDarrick J. Wong 			goto error0;
883f9e03706SDarrick J. Wong 		}
88430f712c9SDave Chinner 		if ((error = xfs_btree_new_iroot(cur, flags, &stat)))
88530f712c9SDave Chinner 			goto error0;
88630f712c9SDave Chinner 		if (stat == 0) {
88730f712c9SDave Chinner 			xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
8882451337dSDave Chinner 			return -ENOSPC;
88930f712c9SDave Chinner 		}
89092219c29SDave Chinner 		cur->bc_ino.allocated = 0;
89130f712c9SDave Chinner 		xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
89230f712c9SDave Chinner 	}
89330f712c9SDave Chinner 	return 0;
89430f712c9SDave Chinner error0:
89530f712c9SDave Chinner 	xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
89630f712c9SDave Chinner 	return error;
89730f712c9SDave Chinner }
89830f712c9SDave Chinner 
89930f712c9SDave Chinner /*
90030f712c9SDave Chinner  * Called from xfs_bmap_add_attrfork to handle extents format files.
90130f712c9SDave Chinner  */
90230f712c9SDave Chinner STATIC int					/* error */
xfs_bmap_add_attrfork_extents(struct xfs_trans * tp,struct xfs_inode * ip,int * flags)90330f712c9SDave Chinner xfs_bmap_add_attrfork_extents(
90481ba8f3eSBrian Foster 	struct xfs_trans	*tp,		/* transaction pointer */
90581ba8f3eSBrian Foster 	struct xfs_inode	*ip,		/* incore inode pointer */
90630f712c9SDave Chinner 	int			*flags)		/* inode logging flags */
90730f712c9SDave Chinner {
908ae127f08SDarrick J. Wong 	struct xfs_btree_cur	*cur;		/* bmap btree cursor */
90930f712c9SDave Chinner 	int			error;		/* error return value */
91030f712c9SDave Chinner 
911daf83964SChristoph Hellwig 	if (ip->i_df.if_nextents * sizeof(struct xfs_bmbt_rec) <=
912c01147d9SDarrick J. Wong 	    xfs_inode_data_fork_size(ip))
91330f712c9SDave Chinner 		return 0;
91430f712c9SDave Chinner 	cur = NULL;
915280253d2SBrian Foster 	error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0, flags,
916280253d2SBrian Foster 					  XFS_DATA_FORK);
91730f712c9SDave Chinner 	if (cur) {
91892219c29SDave Chinner 		cur->bc_ino.allocated = 0;
9190b04b6b8SDarrick J. Wong 		xfs_btree_del_cursor(cur, error);
92030f712c9SDave Chinner 	}
92130f712c9SDave Chinner 	return error;
92230f712c9SDave Chinner }
92330f712c9SDave Chinner 
92430f712c9SDave Chinner /*
92530f712c9SDave Chinner  * Called from xfs_bmap_add_attrfork to handle local format files. Each
92630f712c9SDave Chinner  * different data fork content type needs a different callout to do the
92730f712c9SDave Chinner  * conversion. Some are basic and only require special block initialisation
92830f712c9SDave Chinner  * callouts for the data formating, others (directories) are so specialised they
92930f712c9SDave Chinner  * handle everything themselves.
93030f712c9SDave Chinner  *
93130f712c9SDave Chinner  * XXX (dgc): investigate whether directory conversion can use the generic
93230f712c9SDave Chinner  * formatting callout. It should be possible - it's just a very complex
93330f712c9SDave Chinner  * formatter.
93430f712c9SDave Chinner  */
93530f712c9SDave Chinner STATIC int					/* error */
xfs_bmap_add_attrfork_local(struct xfs_trans * tp,struct xfs_inode * ip,int * flags)93630f712c9SDave Chinner xfs_bmap_add_attrfork_local(
937825d75cdSBrian Foster 	struct xfs_trans	*tp,		/* transaction pointer */
938825d75cdSBrian Foster 	struct xfs_inode	*ip,		/* incore inode pointer */
93930f712c9SDave Chinner 	int			*flags)		/* inode logging flags */
94030f712c9SDave Chinner {
941825d75cdSBrian Foster 	struct xfs_da_args	dargs;		/* args for dir/attr code */
94230f712c9SDave Chinner 
943c01147d9SDarrick J. Wong 	if (ip->i_df.if_bytes <= xfs_inode_data_fork_size(ip))
94430f712c9SDave Chinner 		return 0;
94530f712c9SDave Chinner 
946c19b3b05SDave Chinner 	if (S_ISDIR(VFS_I(ip)->i_mode)) {
94730f712c9SDave Chinner 		memset(&dargs, 0, sizeof(dargs));
94830f712c9SDave Chinner 		dargs.geo = ip->i_mount->m_dir_geo;
94930f712c9SDave Chinner 		dargs.dp = ip;
95030f712c9SDave Chinner 		dargs.total = dargs.geo->fsbcount;
95130f712c9SDave Chinner 		dargs.whichfork = XFS_DATA_FORK;
95230f712c9SDave Chinner 		dargs.trans = tp;
95330f712c9SDave Chinner 		return xfs_dir2_sf_to_block(&dargs);
95430f712c9SDave Chinner 	}
95530f712c9SDave Chinner 
956c19b3b05SDave Chinner 	if (S_ISLNK(VFS_I(ip)->i_mode))
957280253d2SBrian Foster 		return xfs_bmap_local_to_extents(tp, ip, 1, flags,
958280253d2SBrian Foster 						 XFS_DATA_FORK,
95930f712c9SDave Chinner 						 xfs_symlink_local_to_remote);
96030f712c9SDave Chinner 
96130f712c9SDave Chinner 	/* should only be called for types that support local format data */
96230f712c9SDave Chinner 	ASSERT(0);
9632451337dSDave Chinner 	return -EFSCORRUPTED;
96430f712c9SDave Chinner }
96530f712c9SDave Chinner 
966e6a688c3SDave Chinner /*
967e6a688c3SDave Chinner  * Set an inode attr fork offset based on the format of the data fork.
968e6a688c3SDave Chinner  */
9695a981e4eSChristoph Hellwig static int
xfs_bmap_set_attrforkoff(struct xfs_inode * ip,int size,int * version)9702f3cd809SAllison Henderson xfs_bmap_set_attrforkoff(
9712f3cd809SAllison Henderson 	struct xfs_inode	*ip,
9722f3cd809SAllison Henderson 	int			size,
9732f3cd809SAllison Henderson 	int			*version)
9742f3cd809SAllison Henderson {
975683ec9baSDave Chinner 	int			default_size = xfs_default_attroffset(ip) >> 3;
976683ec9baSDave Chinner 
977f7e67b20SChristoph Hellwig 	switch (ip->i_df.if_format) {
9782f3cd809SAllison Henderson 	case XFS_DINODE_FMT_DEV:
979683ec9baSDave Chinner 		ip->i_forkoff = default_size;
9802f3cd809SAllison Henderson 		break;
9812f3cd809SAllison Henderson 	case XFS_DINODE_FMT_LOCAL:
9822f3cd809SAllison Henderson 	case XFS_DINODE_FMT_EXTENTS:
9832f3cd809SAllison Henderson 	case XFS_DINODE_FMT_BTREE:
9847821ea30SChristoph Hellwig 		ip->i_forkoff = xfs_attr_shortform_bytesfit(ip, size);
9857821ea30SChristoph Hellwig 		if (!ip->i_forkoff)
986683ec9baSDave Chinner 			ip->i_forkoff = default_size;
9870560f31aSDave Chinner 		else if (xfs_has_attr2(ip->i_mount) && version)
9882f3cd809SAllison Henderson 			*version = 2;
9892f3cd809SAllison Henderson 		break;
9902f3cd809SAllison Henderson 	default:
9912f3cd809SAllison Henderson 		ASSERT(0);
9922f3cd809SAllison Henderson 		return -EINVAL;
9932f3cd809SAllison Henderson 	}
9942f3cd809SAllison Henderson 
9952f3cd809SAllison Henderson 	return 0;
9962f3cd809SAllison Henderson }
9972f3cd809SAllison Henderson 
99830f712c9SDave Chinner /*
99930f712c9SDave Chinner  * Convert inode from non-attributed to attributed.
100030f712c9SDave Chinner  * Must not be in a transaction, ip must not be locked.
100130f712c9SDave Chinner  */
100230f712c9SDave Chinner int						/* error code */
xfs_bmap_add_attrfork(xfs_inode_t * ip,int size,int rsvd)100330f712c9SDave Chinner xfs_bmap_add_attrfork(
100430f712c9SDave Chinner 	xfs_inode_t		*ip,		/* incore inode pointer */
100530f712c9SDave Chinner 	int			size,		/* space new attribute needs */
100630f712c9SDave Chinner 	int			rsvd)		/* xact may use reserved blks */
100730f712c9SDave Chinner {
100830f712c9SDave Chinner 	xfs_mount_t		*mp;		/* mount structure */
100930f712c9SDave Chinner 	xfs_trans_t		*tp;		/* transaction pointer */
101030f712c9SDave Chinner 	int			blks;		/* space reservation */
101130f712c9SDave Chinner 	int			version = 1;	/* superblock attr version */
101230f712c9SDave Chinner 	int			logflags;	/* logging flags */
101330f712c9SDave Chinner 	int			error;		/* error return value */
101430f712c9SDave Chinner 
1015932b42c6SDarrick J. Wong 	ASSERT(xfs_inode_has_attr_fork(ip) == 0);
101630f712c9SDave Chinner 
101730f712c9SDave Chinner 	mp = ip->i_mount;
101830f712c9SDave Chinner 	ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
1019253f4911SChristoph Hellwig 
102030f712c9SDave Chinner 	blks = XFS_ADDAFORK_SPACE_RES(mp);
1021253f4911SChristoph Hellwig 
10223de4eb10SDarrick J. Wong 	error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_addafork, blks, 0,
10233a1af6c3SDarrick J. Wong 			rsvd, &tp);
1024253f4911SChristoph Hellwig 	if (error)
102530f712c9SDave Chinner 		return error;
1026932b42c6SDarrick J. Wong 	if (xfs_inode_has_attr_fork(ip))
102730f712c9SDave Chinner 		goto trans_cancel;
102830f712c9SDave Chinner 
102930f712c9SDave Chinner 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
10302f3cd809SAllison Henderson 	error = xfs_bmap_set_attrforkoff(ip, size, &version);
10312f3cd809SAllison Henderson 	if (error)
103230f712c9SDave Chinner 		goto trans_cancel;
103332a2b11fSCarlos Maiolino 
10342ed5b09bSDarrick J. Wong 	xfs_ifork_init_attr(ip, XFS_DINODE_FMT_EXTENTS, 0);
103530f712c9SDave Chinner 	logflags = 0;
1036f7e67b20SChristoph Hellwig 	switch (ip->i_df.if_format) {
103730f712c9SDave Chinner 	case XFS_DINODE_FMT_LOCAL:
1038825d75cdSBrian Foster 		error = xfs_bmap_add_attrfork_local(tp, ip, &logflags);
103930f712c9SDave Chinner 		break;
104030f712c9SDave Chinner 	case XFS_DINODE_FMT_EXTENTS:
1041825d75cdSBrian Foster 		error = xfs_bmap_add_attrfork_extents(tp, ip, &logflags);
104230f712c9SDave Chinner 		break;
104330f712c9SDave Chinner 	case XFS_DINODE_FMT_BTREE:
1044825d75cdSBrian Foster 		error = xfs_bmap_add_attrfork_btree(tp, ip, &logflags);
104530f712c9SDave Chinner 		break;
104630f712c9SDave Chinner 	default:
104730f712c9SDave Chinner 		error = 0;
104830f712c9SDave Chinner 		break;
104930f712c9SDave Chinner 	}
105030f712c9SDave Chinner 	if (logflags)
105130f712c9SDave Chinner 		xfs_trans_log_inode(tp, ip, logflags);
105230f712c9SDave Chinner 	if (error)
1053c8eac49eSBrian Foster 		goto trans_cancel;
105438c26bfdSDave Chinner 	if (!xfs_has_attr(mp) ||
105538c26bfdSDave Chinner 	   (!xfs_has_attr2(mp) && version == 2)) {
105661e63ecbSDave Chinner 		bool log_sb = false;
105730f712c9SDave Chinner 
105830f712c9SDave Chinner 		spin_lock(&mp->m_sb_lock);
105938c26bfdSDave Chinner 		if (!xfs_has_attr(mp)) {
106038c26bfdSDave Chinner 			xfs_add_attr(mp);
106161e63ecbSDave Chinner 			log_sb = true;
106230f712c9SDave Chinner 		}
106338c26bfdSDave Chinner 		if (!xfs_has_attr2(mp) && version == 2) {
106438c26bfdSDave Chinner 			xfs_add_attr2(mp);
106561e63ecbSDave Chinner 			log_sb = true;
106630f712c9SDave Chinner 		}
106730f712c9SDave Chinner 		spin_unlock(&mp->m_sb_lock);
106861e63ecbSDave Chinner 		if (log_sb)
106961e63ecbSDave Chinner 			xfs_log_sb(tp);
107030f712c9SDave Chinner 	}
107130f712c9SDave Chinner 
107270393313SChristoph Hellwig 	error = xfs_trans_commit(tp);
107330f712c9SDave Chinner 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
107430f712c9SDave Chinner 	return error;
107530f712c9SDave Chinner 
107630f712c9SDave Chinner trans_cancel:
10774906e215SChristoph Hellwig 	xfs_trans_cancel(tp);
107830f712c9SDave Chinner 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
107930f712c9SDave Chinner 	return error;
108030f712c9SDave Chinner }
108130f712c9SDave Chinner 
108230f712c9SDave Chinner /*
108330f712c9SDave Chinner  * Internal and external extent tree search functions.
108430f712c9SDave Chinner  */
108530f712c9SDave Chinner 
1086e992ae8aSDarrick J. Wong struct xfs_iread_state {
1087e992ae8aSDarrick J. Wong 	struct xfs_iext_cursor	icur;
1088e992ae8aSDarrick J. Wong 	xfs_extnum_t		loaded;
1089e992ae8aSDarrick J. Wong };
1090e992ae8aSDarrick J. Wong 
10916a3bd8fcSDarrick J. Wong int
xfs_bmap_complain_bad_rec(struct xfs_inode * ip,int whichfork,xfs_failaddr_t fa,const struct xfs_bmbt_irec * irec)10926a3bd8fcSDarrick J. Wong xfs_bmap_complain_bad_rec(
10936a3bd8fcSDarrick J. Wong 	struct xfs_inode		*ip,
10946a3bd8fcSDarrick J. Wong 	int				whichfork,
10956a3bd8fcSDarrick J. Wong 	xfs_failaddr_t			fa,
10966a3bd8fcSDarrick J. Wong 	const struct xfs_bmbt_irec	*irec)
10976a3bd8fcSDarrick J. Wong {
10986a3bd8fcSDarrick J. Wong 	struct xfs_mount		*mp = ip->i_mount;
10996a3bd8fcSDarrick J. Wong 	const char			*forkname;
11006a3bd8fcSDarrick J. Wong 
11016a3bd8fcSDarrick J. Wong 	switch (whichfork) {
11026a3bd8fcSDarrick J. Wong 	case XFS_DATA_FORK:	forkname = "data"; break;
11036a3bd8fcSDarrick J. Wong 	case XFS_ATTR_FORK:	forkname = "attr"; break;
11046a3bd8fcSDarrick J. Wong 	case XFS_COW_FORK:	forkname = "CoW"; break;
11056a3bd8fcSDarrick J. Wong 	default:		forkname = "???"; break;
11066a3bd8fcSDarrick J. Wong 	}
11076a3bd8fcSDarrick J. Wong 
11086a3bd8fcSDarrick J. Wong 	xfs_warn(mp,
11096a3bd8fcSDarrick J. Wong  "Bmap BTree record corruption in inode 0x%llx %s fork detected at %pS!",
11106a3bd8fcSDarrick J. Wong 				ip->i_ino, forkname, fa);
11116a3bd8fcSDarrick J. Wong 	xfs_warn(mp,
11126a3bd8fcSDarrick J. Wong 		"Offset 0x%llx, start block 0x%llx, block count 0x%llx state 0x%x",
11136a3bd8fcSDarrick J. Wong 		irec->br_startoff, irec->br_startblock, irec->br_blockcount,
11146a3bd8fcSDarrick J. Wong 		irec->br_state);
11156a3bd8fcSDarrick J. Wong 
11166a3bd8fcSDarrick J. Wong 	return -EFSCORRUPTED;
11176a3bd8fcSDarrick J. Wong }
11186a3bd8fcSDarrick J. Wong 
1119e992ae8aSDarrick J. Wong /* Stuff every bmbt record from this block into the incore extent map. */
1120e992ae8aSDarrick J. Wong static int
xfs_iread_bmbt_block(struct xfs_btree_cur * cur,int level,void * priv)1121e992ae8aSDarrick J. Wong xfs_iread_bmbt_block(
1122e992ae8aSDarrick J. Wong 	struct xfs_btree_cur	*cur,
1123e992ae8aSDarrick J. Wong 	int			level,
1124e992ae8aSDarrick J. Wong 	void			*priv)
1125e992ae8aSDarrick J. Wong {
1126e992ae8aSDarrick J. Wong 	struct xfs_iread_state	*ir = priv;
1127e992ae8aSDarrick J. Wong 	struct xfs_mount	*mp = cur->bc_mp;
112892219c29SDave Chinner 	struct xfs_inode	*ip = cur->bc_ino.ip;
1129e992ae8aSDarrick J. Wong 	struct xfs_btree_block	*block;
1130e992ae8aSDarrick J. Wong 	struct xfs_buf		*bp;
1131e992ae8aSDarrick J. Wong 	struct xfs_bmbt_rec	*frp;
1132e992ae8aSDarrick J. Wong 	xfs_extnum_t		num_recs;
1133e992ae8aSDarrick J. Wong 	xfs_extnum_t		j;
113492219c29SDave Chinner 	int			whichfork = cur->bc_ino.whichfork;
1135732436efSDarrick J. Wong 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
1136e992ae8aSDarrick J. Wong 
1137e992ae8aSDarrick J. Wong 	block = xfs_btree_get_block(cur, level, &bp);
1138e992ae8aSDarrick J. Wong 
1139e992ae8aSDarrick J. Wong 	/* Abort if we find more records than nextents. */
1140e992ae8aSDarrick J. Wong 	num_recs = xfs_btree_get_numrecs(block);
1141daf83964SChristoph Hellwig 	if (unlikely(ir->loaded + num_recs > ifp->if_nextents)) {
1142e992ae8aSDarrick J. Wong 		xfs_warn(ip->i_mount, "corrupt dinode %llu, (btree extents).",
1143e992ae8aSDarrick J. Wong 				(unsigned long long)ip->i_ino);
1144e992ae8aSDarrick J. Wong 		xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__, block,
1145e992ae8aSDarrick J. Wong 				sizeof(*block), __this_address);
1146e992ae8aSDarrick J. Wong 		return -EFSCORRUPTED;
1147e992ae8aSDarrick J. Wong 	}
1148e992ae8aSDarrick J. Wong 
1149e992ae8aSDarrick J. Wong 	/* Copy records into the incore cache. */
1150e992ae8aSDarrick J. Wong 	frp = XFS_BMBT_REC_ADDR(mp, block, 1);
1151e992ae8aSDarrick J. Wong 	for (j = 0; j < num_recs; j++, frp++, ir->loaded++) {
1152e992ae8aSDarrick J. Wong 		struct xfs_bmbt_irec	new;
1153e992ae8aSDarrick J. Wong 		xfs_failaddr_t		fa;
1154e992ae8aSDarrick J. Wong 
1155e992ae8aSDarrick J. Wong 		xfs_bmbt_disk_get_all(frp, &new);
1156e992ae8aSDarrick J. Wong 		fa = xfs_bmap_validate_extent(ip, whichfork, &new);
1157e992ae8aSDarrick J. Wong 		if (fa) {
1158e992ae8aSDarrick J. Wong 			xfs_inode_verifier_error(ip, -EFSCORRUPTED,
1159e992ae8aSDarrick J. Wong 					"xfs_iread_extents(2)", frp,
1160e992ae8aSDarrick J. Wong 					sizeof(*frp), fa);
11616a3bd8fcSDarrick J. Wong 			return xfs_bmap_complain_bad_rec(ip, whichfork, fa,
11626a3bd8fcSDarrick J. Wong 					&new);
1163e992ae8aSDarrick J. Wong 		}
1164e992ae8aSDarrick J. Wong 		xfs_iext_insert(ip, &ir->icur, &new,
1165e992ae8aSDarrick J. Wong 				xfs_bmap_fork_to_state(whichfork));
1166e992ae8aSDarrick J. Wong 		trace_xfs_read_extent(ip, &ir->icur,
1167e992ae8aSDarrick J. Wong 				xfs_bmap_fork_to_state(whichfork), _THIS_IP_);
1168daf83964SChristoph Hellwig 		xfs_iext_next(ifp, &ir->icur);
1169e992ae8aSDarrick J. Wong 	}
1170e992ae8aSDarrick J. Wong 
1171e992ae8aSDarrick J. Wong 	return 0;
1172e992ae8aSDarrick J. Wong }
1173e992ae8aSDarrick J. Wong 
117430f712c9SDave Chinner /*
1175211e95bbSChristoph Hellwig  * Read in extents from a btree-format inode.
117630f712c9SDave Chinner  */
1177211e95bbSChristoph Hellwig int
xfs_iread_extents(struct xfs_trans * tp,struct xfs_inode * ip,int whichfork)1178211e95bbSChristoph Hellwig xfs_iread_extents(
1179211e95bbSChristoph Hellwig 	struct xfs_trans	*tp,
1180211e95bbSChristoph Hellwig 	struct xfs_inode	*ip,
1181211e95bbSChristoph Hellwig 	int			whichfork)
118230f712c9SDave Chinner {
1183e992ae8aSDarrick J. Wong 	struct xfs_iread_state	ir;
1184732436efSDarrick J. Wong 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
1185e992ae8aSDarrick J. Wong 	struct xfs_mount	*mp = ip->i_mount;
1186e992ae8aSDarrick J. Wong 	struct xfs_btree_cur	*cur;
1187211e95bbSChristoph Hellwig 	int			error;
118830f712c9SDave Chinner 
1189b2197a36SChristoph Hellwig 	if (!xfs_need_iread_extents(ifp))
1190862a804aSChristoph Hellwig 		return 0;
1191862a804aSChristoph Hellwig 
1192211e95bbSChristoph Hellwig 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1193211e95bbSChristoph Hellwig 
1194e992ae8aSDarrick J. Wong 	ir.loaded = 0;
1195e992ae8aSDarrick J. Wong 	xfs_iext_first(ifp, &ir.icur);
1196e992ae8aSDarrick J. Wong 	cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
1197e992ae8aSDarrick J. Wong 	error = xfs_btree_visit_blocks(cur, xfs_iread_bmbt_block,
1198e992ae8aSDarrick J. Wong 			XFS_BTREE_VISIT_RECORDS, &ir);
1199e992ae8aSDarrick J. Wong 	xfs_btree_del_cursor(cur, error);
1200e992ae8aSDarrick J. Wong 	if (error)
1201e992ae8aSDarrick J. Wong 		goto out;
1202e992ae8aSDarrick J. Wong 
1203daf83964SChristoph Hellwig 	if (XFS_IS_CORRUPT(mp, ir.loaded != ifp->if_nextents)) {
1204211e95bbSChristoph Hellwig 		error = -EFSCORRUPTED;
1205211e95bbSChristoph Hellwig 		goto out;
1206211e95bbSChristoph Hellwig 	}
1207e992ae8aSDarrick J. Wong 	ASSERT(ir.loaded == xfs_iext_count(ifp));
1208c95356caSDarrick J. Wong 	/*
1209c95356caSDarrick J. Wong 	 * Use release semantics so that we can use acquire semantics in
1210c95356caSDarrick J. Wong 	 * xfs_need_iread_extents and be guaranteed to see a valid mapping tree
1211c95356caSDarrick J. Wong 	 * after that load.
1212c95356caSDarrick J. Wong 	 */
1213c95356caSDarrick J. Wong 	smp_store_release(&ifp->if_needextents, 0);
121430f712c9SDave Chinner 	return 0;
1215211e95bbSChristoph Hellwig out:
1216211e95bbSChristoph Hellwig 	xfs_iext_destroy(ifp);
1217211e95bbSChristoph Hellwig 	return error;
121830f712c9SDave Chinner }
121930f712c9SDave Chinner 
122030f712c9SDave Chinner /*
122129b3e94aSChristoph Hellwig  * Returns the relative block number of the first unused block(s) in the given
122229b3e94aSChristoph Hellwig  * fork with at least "len" logically contiguous blocks free.  This is the
122329b3e94aSChristoph Hellwig  * lowest-address hole if the fork has holes, else the first block past the end
122429b3e94aSChristoph Hellwig  * of fork.  Return 0 if the fork is currently local (in-inode).
122530f712c9SDave Chinner  */
122630f712c9SDave Chinner int						/* error */
xfs_bmap_first_unused(struct xfs_trans * tp,struct xfs_inode * ip,xfs_extlen_t len,xfs_fileoff_t * first_unused,int whichfork)122730f712c9SDave Chinner xfs_bmap_first_unused(
122829b3e94aSChristoph Hellwig 	struct xfs_trans	*tp,		/* transaction pointer */
122929b3e94aSChristoph Hellwig 	struct xfs_inode	*ip,		/* incore inode */
123030f712c9SDave Chinner 	xfs_extlen_t		len,		/* size of hole to find */
123130f712c9SDave Chinner 	xfs_fileoff_t		*first_unused,	/* unused block */
123230f712c9SDave Chinner 	int			whichfork)	/* data or attr fork */
123330f712c9SDave Chinner {
1234732436efSDarrick J. Wong 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
123529b3e94aSChristoph Hellwig 	struct xfs_bmbt_irec	got;
1236b2b1712aSChristoph Hellwig 	struct xfs_iext_cursor	icur;
123729b3e94aSChristoph Hellwig 	xfs_fileoff_t		lastaddr = 0;
123829b3e94aSChristoph Hellwig 	xfs_fileoff_t		lowest, max;
123929b3e94aSChristoph Hellwig 	int			error;
124030f712c9SDave Chinner 
1241f7e67b20SChristoph Hellwig 	if (ifp->if_format == XFS_DINODE_FMT_LOCAL) {
124230f712c9SDave Chinner 		*first_unused = 0;
124330f712c9SDave Chinner 		return 0;
124430f712c9SDave Chinner 	}
124529b3e94aSChristoph Hellwig 
1246f7e67b20SChristoph Hellwig 	ASSERT(xfs_ifork_has_extents(ifp));
1247f7e67b20SChristoph Hellwig 
124829b3e94aSChristoph Hellwig 	error = xfs_iread_extents(tp, ip, whichfork);
124929b3e94aSChristoph Hellwig 	if (error)
125030f712c9SDave Chinner 		return error;
1251f2285c14SChristoph Hellwig 
125229b3e94aSChristoph Hellwig 	lowest = max = *first_unused;
1253b2b1712aSChristoph Hellwig 	for_each_xfs_iext(ifp, &icur, &got) {
125430f712c9SDave Chinner 		/*
125530f712c9SDave Chinner 		 * See if the hole before this extent will work.
125630f712c9SDave Chinner 		 */
1257f2285c14SChristoph Hellwig 		if (got.br_startoff >= lowest + len &&
125829b3e94aSChristoph Hellwig 		    got.br_startoff - max >= len)
125929b3e94aSChristoph Hellwig 			break;
1260f2285c14SChristoph Hellwig 		lastaddr = got.br_startoff + got.br_blockcount;
126130f712c9SDave Chinner 		max = XFS_FILEOFF_MAX(lastaddr, lowest);
126230f712c9SDave Chinner 	}
126329b3e94aSChristoph Hellwig 
126430f712c9SDave Chinner 	*first_unused = max;
126530f712c9SDave Chinner 	return 0;
126630f712c9SDave Chinner }
126730f712c9SDave Chinner 
126830f712c9SDave Chinner /*
126930f712c9SDave Chinner  * Returns the file-relative block number of the last block - 1 before
127030f712c9SDave Chinner  * last_block (input value) in the file.
127130f712c9SDave Chinner  * This is not based on i_size, it is based on the extent records.
127230f712c9SDave Chinner  * Returns 0 for local files, as they do not have extent records.
127330f712c9SDave Chinner  */
127430f712c9SDave Chinner int						/* error */
xfs_bmap_last_before(struct xfs_trans * tp,struct xfs_inode * ip,xfs_fileoff_t * last_block,int whichfork)127530f712c9SDave Chinner xfs_bmap_last_before(
127686685f7bSChristoph Hellwig 	struct xfs_trans	*tp,		/* transaction pointer */
127786685f7bSChristoph Hellwig 	struct xfs_inode	*ip,		/* incore inode */
127830f712c9SDave Chinner 	xfs_fileoff_t		*last_block,	/* last block */
127930f712c9SDave Chinner 	int			whichfork)	/* data or attr fork */
128030f712c9SDave Chinner {
1281732436efSDarrick J. Wong 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
128286685f7bSChristoph Hellwig 	struct xfs_bmbt_irec	got;
1283b2b1712aSChristoph Hellwig 	struct xfs_iext_cursor	icur;
128486685f7bSChristoph Hellwig 	int			error;
128530f712c9SDave Chinner 
1286f7e67b20SChristoph Hellwig 	switch (ifp->if_format) {
128786685f7bSChristoph Hellwig 	case XFS_DINODE_FMT_LOCAL:
128830f712c9SDave Chinner 		*last_block = 0;
128930f712c9SDave Chinner 		return 0;
129086685f7bSChristoph Hellwig 	case XFS_DINODE_FMT_BTREE:
129186685f7bSChristoph Hellwig 	case XFS_DINODE_FMT_EXTENTS:
129286685f7bSChristoph Hellwig 		break;
129386685f7bSChristoph Hellwig 	default:
1294a5155b87SDarrick J. Wong 		ASSERT(0);
1295c2414ad6SDarrick J. Wong 		return -EFSCORRUPTED;
129630f712c9SDave Chinner 	}
129786685f7bSChristoph Hellwig 
129886685f7bSChristoph Hellwig 	error = xfs_iread_extents(tp, ip, whichfork);
129986685f7bSChristoph Hellwig 	if (error)
130030f712c9SDave Chinner 		return error;
130186685f7bSChristoph Hellwig 
1302b2b1712aSChristoph Hellwig 	if (!xfs_iext_lookup_extent_before(ip, ifp, last_block, &icur, &got))
130386685f7bSChristoph Hellwig 		*last_block = 0;
130430f712c9SDave Chinner 	return 0;
130530f712c9SDave Chinner }
130630f712c9SDave Chinner 
130730f712c9SDave Chinner int
xfs_bmap_last_extent(struct xfs_trans * tp,struct xfs_inode * ip,int whichfork,struct xfs_bmbt_irec * rec,int * is_empty)130830f712c9SDave Chinner xfs_bmap_last_extent(
130930f712c9SDave Chinner 	struct xfs_trans	*tp,
131030f712c9SDave Chinner 	struct xfs_inode	*ip,
131130f712c9SDave Chinner 	int			whichfork,
131230f712c9SDave Chinner 	struct xfs_bmbt_irec	*rec,
131330f712c9SDave Chinner 	int			*is_empty)
131430f712c9SDave Chinner {
1315732436efSDarrick J. Wong 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
1316b2b1712aSChristoph Hellwig 	struct xfs_iext_cursor	icur;
131730f712c9SDave Chinner 	int			error;
131830f712c9SDave Chinner 
131930f712c9SDave Chinner 	error = xfs_iread_extents(tp, ip, whichfork);
132030f712c9SDave Chinner 	if (error)
132130f712c9SDave Chinner 		return error;
132230f712c9SDave Chinner 
1323b2b1712aSChristoph Hellwig 	xfs_iext_last(ifp, &icur);
1324b2b1712aSChristoph Hellwig 	if (!xfs_iext_get_extent(ifp, &icur, rec))
132530f712c9SDave Chinner 		*is_empty = 1;
1326b2b1712aSChristoph Hellwig 	else
132730f712c9SDave Chinner 		*is_empty = 0;
132830f712c9SDave Chinner 	return 0;
132930f712c9SDave Chinner }
133030f712c9SDave Chinner 
133130f712c9SDave Chinner /*
133230f712c9SDave Chinner  * Check the last inode extent to determine whether this allocation will result
133330f712c9SDave Chinner  * in blocks being allocated at the end of the file. When we allocate new data
133430f712c9SDave Chinner  * blocks at the end of the file which do not start at the previous data block,
133530f712c9SDave Chinner  * we will try to align the new blocks at stripe unit boundaries.
133630f712c9SDave Chinner  *
133730f712c9SDave Chinner  * Returns 1 in bma->aeof if the file (fork) is empty as any new write will be
133830f712c9SDave Chinner  * at, or past the EOF.
133930f712c9SDave Chinner  */
134030f712c9SDave Chinner STATIC int
xfs_bmap_isaeof(struct xfs_bmalloca * bma,int whichfork)134130f712c9SDave Chinner xfs_bmap_isaeof(
134230f712c9SDave Chinner 	struct xfs_bmalloca	*bma,
134330f712c9SDave Chinner 	int			whichfork)
134430f712c9SDave Chinner {
134530f712c9SDave Chinner 	struct xfs_bmbt_irec	rec;
134630f712c9SDave Chinner 	int			is_empty;
134730f712c9SDave Chinner 	int			error;
134830f712c9SDave Chinner 
1349749f24f3SThomas Meyer 	bma->aeof = false;
135030f712c9SDave Chinner 	error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec,
135130f712c9SDave Chinner 				     &is_empty);
135230f712c9SDave Chinner 	if (error)
135330f712c9SDave Chinner 		return error;
135430f712c9SDave Chinner 
135530f712c9SDave Chinner 	if (is_empty) {
1356749f24f3SThomas Meyer 		bma->aeof = true;
135730f712c9SDave Chinner 		return 0;
135830f712c9SDave Chinner 	}
135930f712c9SDave Chinner 
136030f712c9SDave Chinner 	/*
136130f712c9SDave Chinner 	 * Check if we are allocation or past the last extent, or at least into
136230f712c9SDave Chinner 	 * the last delayed allocated extent.
136330f712c9SDave Chinner 	 */
136430f712c9SDave Chinner 	bma->aeof = bma->offset >= rec.br_startoff + rec.br_blockcount ||
136530f712c9SDave Chinner 		(bma->offset >= rec.br_startoff &&
136630f712c9SDave Chinner 		 isnullstartblock(rec.br_startblock));
136730f712c9SDave Chinner 	return 0;
136830f712c9SDave Chinner }
136930f712c9SDave Chinner 
137030f712c9SDave Chinner /*
137130f712c9SDave Chinner  * Returns the file-relative block number of the first block past eof in
137230f712c9SDave Chinner  * the file.  This is not based on i_size, it is based on the extent records.
137330f712c9SDave Chinner  * Returns 0 for local files, as they do not have extent records.
137430f712c9SDave Chinner  */
137530f712c9SDave Chinner int
xfs_bmap_last_offset(struct xfs_inode * ip,xfs_fileoff_t * last_block,int whichfork)137630f712c9SDave Chinner xfs_bmap_last_offset(
137730f712c9SDave Chinner 	struct xfs_inode	*ip,
137830f712c9SDave Chinner 	xfs_fileoff_t		*last_block,
137930f712c9SDave Chinner 	int			whichfork)
138030f712c9SDave Chinner {
1381732436efSDarrick J. Wong 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
138230f712c9SDave Chinner 	struct xfs_bmbt_irec	rec;
138330f712c9SDave Chinner 	int			is_empty;
138430f712c9SDave Chinner 	int			error;
138530f712c9SDave Chinner 
138630f712c9SDave Chinner 	*last_block = 0;
138730f712c9SDave Chinner 
1388f7e67b20SChristoph Hellwig 	if (ifp->if_format == XFS_DINODE_FMT_LOCAL)
138930f712c9SDave Chinner 		return 0;
139030f712c9SDave Chinner 
1391f7e67b20SChristoph Hellwig 	if (XFS_IS_CORRUPT(ip->i_mount, !xfs_ifork_has_extents(ifp)))
1392c2414ad6SDarrick J. Wong 		return -EFSCORRUPTED;
139330f712c9SDave Chinner 
139430f712c9SDave Chinner 	error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, &is_empty);
139530f712c9SDave Chinner 	if (error || is_empty)
139630f712c9SDave Chinner 		return error;
139730f712c9SDave Chinner 
139830f712c9SDave Chinner 	*last_block = rec.br_startoff + rec.br_blockcount;
139930f712c9SDave Chinner 	return 0;
140030f712c9SDave Chinner }
140130f712c9SDave Chinner 
140230f712c9SDave Chinner /*
140330f712c9SDave Chinner  * Extent tree manipulation functions used during allocation.
140430f712c9SDave Chinner  */
140530f712c9SDave Chinner 
140630f712c9SDave Chinner /*
140730f712c9SDave Chinner  * Convert a delayed allocation to a real allocation.
140830f712c9SDave Chinner  */
140930f712c9SDave Chinner STATIC int				/* error */
xfs_bmap_add_extent_delay_real(struct xfs_bmalloca * bma,int whichfork)141030f712c9SDave Chinner xfs_bmap_add_extent_delay_real(
141160b4984fSDarrick J. Wong 	struct xfs_bmalloca	*bma,
141260b4984fSDarrick J. Wong 	int			whichfork)
141330f712c9SDave Chinner {
1414daf83964SChristoph Hellwig 	struct xfs_mount	*mp = bma->ip->i_mount;
1415732436efSDarrick J. Wong 	struct xfs_ifork	*ifp = xfs_ifork_ptr(bma->ip, whichfork);
141630f712c9SDave Chinner 	struct xfs_bmbt_irec	*new = &bma->got;
141730f712c9SDave Chinner 	int			error;	/* error return value */
141830f712c9SDave Chinner 	int			i;	/* temp state */
141930f712c9SDave Chinner 	xfs_fileoff_t		new_endoff;	/* end offset of new entry */
142030f712c9SDave Chinner 	xfs_bmbt_irec_t		r[3];	/* neighbor extent entries */
142130f712c9SDave Chinner 					/* left is 0, right is 1, prev is 2 */
142230f712c9SDave Chinner 	int			rval=0;	/* return value (logging flags) */
14230e5b8e45SDave Chinner 	uint32_t		state = xfs_bmap_fork_to_state(whichfork);
142430f712c9SDave Chinner 	xfs_filblks_t		da_new; /* new count del alloc blocks used */
142530f712c9SDave Chinner 	xfs_filblks_t		da_old; /* old count del alloc blocks used */
142630f712c9SDave Chinner 	xfs_filblks_t		temp=0;	/* value for da_new calculations */
142730f712c9SDave Chinner 	int			tmp_rval;	/* partial logging flags */
14284dcb8869SChristoph Hellwig 	struct xfs_bmbt_irec	old;
142930f712c9SDave Chinner 
143060b4984fSDarrick J. Wong 	ASSERT(whichfork != XFS_ATTR_FORK);
143130f712c9SDave Chinner 	ASSERT(!isnullstartblock(new->br_startblock));
143230f712c9SDave Chinner 	ASSERT(!bma->cur ||
14338ef54797SDave Chinner 	       (bma->cur->bc_ino.flags & XFS_BTCUR_BMBT_WASDEL));
143430f712c9SDave Chinner 
1435ff6d6af2SBill O'Donnell 	XFS_STATS_INC(mp, xs_add_exlist);
143630f712c9SDave Chinner 
143730f712c9SDave Chinner #define	LEFT		r[0]
143830f712c9SDave Chinner #define	RIGHT		r[1]
143930f712c9SDave Chinner #define	PREV		r[2]
144030f712c9SDave Chinner 
144130f712c9SDave Chinner 	/*
144230f712c9SDave Chinner 	 * Set up a bunch of variables to make the tests simpler.
144330f712c9SDave Chinner 	 */
1444b2b1712aSChristoph Hellwig 	xfs_iext_get_extent(ifp, &bma->icur, &PREV);
144530f712c9SDave Chinner 	new_endoff = new->br_startoff + new->br_blockcount;
14464dcb8869SChristoph Hellwig 	ASSERT(isnullstartblock(PREV.br_startblock));
144730f712c9SDave Chinner 	ASSERT(PREV.br_startoff <= new->br_startoff);
144830f712c9SDave Chinner 	ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
144930f712c9SDave Chinner 
145030f712c9SDave Chinner 	da_old = startblockval(PREV.br_startblock);
145130f712c9SDave Chinner 	da_new = 0;
145230f712c9SDave Chinner 
145330f712c9SDave Chinner 	/*
145430f712c9SDave Chinner 	 * Set flags determining what part of the previous delayed allocation
145530f712c9SDave Chinner 	 * extent is being replaced by a real allocation.
145630f712c9SDave Chinner 	 */
145730f712c9SDave Chinner 	if (PREV.br_startoff == new->br_startoff)
145830f712c9SDave Chinner 		state |= BMAP_LEFT_FILLING;
145930f712c9SDave Chinner 	if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
146030f712c9SDave Chinner 		state |= BMAP_RIGHT_FILLING;
146130f712c9SDave Chinner 
146230f712c9SDave Chinner 	/*
146330f712c9SDave Chinner 	 * Check and set flags if this segment has a left neighbor.
146430f712c9SDave Chinner 	 * Don't set contiguous if the combined extent would be too large.
146530f712c9SDave Chinner 	 */
1466b2b1712aSChristoph Hellwig 	if (xfs_iext_peek_prev_extent(ifp, &bma->icur, &LEFT)) {
146730f712c9SDave Chinner 		state |= BMAP_LEFT_VALID;
146830f712c9SDave Chinner 		if (isnullstartblock(LEFT.br_startblock))
146930f712c9SDave Chinner 			state |= BMAP_LEFT_DELAY;
147030f712c9SDave Chinner 	}
147130f712c9SDave Chinner 
147230f712c9SDave Chinner 	if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
147330f712c9SDave Chinner 	    LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
147430f712c9SDave Chinner 	    LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
147530f712c9SDave Chinner 	    LEFT.br_state == new->br_state &&
147695f0b95eSChandan Babu R 	    LEFT.br_blockcount + new->br_blockcount <= XFS_MAX_BMBT_EXTLEN)
147730f712c9SDave Chinner 		state |= BMAP_LEFT_CONTIG;
147830f712c9SDave Chinner 
147930f712c9SDave Chinner 	/*
148030f712c9SDave Chinner 	 * Check and set flags if this segment has a right neighbor.
148130f712c9SDave Chinner 	 * Don't set contiguous if the combined extent would be too large.
148230f712c9SDave Chinner 	 * Also check for all-three-contiguous being too large.
148330f712c9SDave Chinner 	 */
1484b2b1712aSChristoph Hellwig 	if (xfs_iext_peek_next_extent(ifp, &bma->icur, &RIGHT)) {
148530f712c9SDave Chinner 		state |= BMAP_RIGHT_VALID;
148630f712c9SDave Chinner 		if (isnullstartblock(RIGHT.br_startblock))
148730f712c9SDave Chinner 			state |= BMAP_RIGHT_DELAY;
148830f712c9SDave Chinner 	}
148930f712c9SDave Chinner 
149030f712c9SDave Chinner 	if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
149130f712c9SDave Chinner 	    new_endoff == RIGHT.br_startoff &&
149230f712c9SDave Chinner 	    new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
149330f712c9SDave Chinner 	    new->br_state == RIGHT.br_state &&
149495f0b95eSChandan Babu R 	    new->br_blockcount + RIGHT.br_blockcount <= XFS_MAX_BMBT_EXTLEN &&
149530f712c9SDave Chinner 	    ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
149630f712c9SDave Chinner 		       BMAP_RIGHT_FILLING)) !=
149730f712c9SDave Chinner 		      (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
149830f712c9SDave Chinner 		       BMAP_RIGHT_FILLING) ||
149930f712c9SDave Chinner 	     LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
150095f0b95eSChandan Babu R 			<= XFS_MAX_BMBT_EXTLEN))
150130f712c9SDave Chinner 		state |= BMAP_RIGHT_CONTIG;
150230f712c9SDave Chinner 
150330f712c9SDave Chinner 	error = 0;
150430f712c9SDave Chinner 	/*
150530f712c9SDave Chinner 	 * Switch out based on the FILLING and CONTIG state bits.
150630f712c9SDave Chinner 	 */
150730f712c9SDave Chinner 	switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
150830f712c9SDave Chinner 			 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
150930f712c9SDave Chinner 	case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
151030f712c9SDave Chinner 	     BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
151130f712c9SDave Chinner 		/*
151230f712c9SDave Chinner 		 * Filling in all of a previously delayed allocation extent.
151330f712c9SDave Chinner 		 * The left and right neighbors are both contiguous with new.
151430f712c9SDave Chinner 		 */
15154dcb8869SChristoph Hellwig 		LEFT.br_blockcount += PREV.br_blockcount + RIGHT.br_blockcount;
151630f712c9SDave Chinner 
1517c38ccf59SChristoph Hellwig 		xfs_iext_remove(bma->ip, &bma->icur, state);
1518c38ccf59SChristoph Hellwig 		xfs_iext_remove(bma->ip, &bma->icur, state);
1519b2b1712aSChristoph Hellwig 		xfs_iext_prev(ifp, &bma->icur);
1520b2b1712aSChristoph Hellwig 		xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT);
1521daf83964SChristoph Hellwig 		ifp->if_nextents--;
15220d045540SChristoph Hellwig 
152330f712c9SDave Chinner 		if (bma->cur == NULL)
152430f712c9SDave Chinner 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
152530f712c9SDave Chinner 		else {
152630f712c9SDave Chinner 			rval = XFS_ILOG_CORE;
1527e16cf9b0SChristoph Hellwig 			error = xfs_bmbt_lookup_eq(bma->cur, &RIGHT, &i);
152830f712c9SDave Chinner 			if (error)
152930f712c9SDave Chinner 				goto done;
1530f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1531f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
1532f9e03706SDarrick J. Wong 				goto done;
1533f9e03706SDarrick J. Wong 			}
153430f712c9SDave Chinner 			error = xfs_btree_delete(bma->cur, &i);
153530f712c9SDave Chinner 			if (error)
153630f712c9SDave Chinner 				goto done;
1537f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1538f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
1539f9e03706SDarrick J. Wong 				goto done;
1540f9e03706SDarrick J. Wong 			}
154130f712c9SDave Chinner 			error = xfs_btree_decrement(bma->cur, 0, &i);
154230f712c9SDave Chinner 			if (error)
154330f712c9SDave Chinner 				goto done;
1544f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1545f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
1546f9e03706SDarrick J. Wong 				goto done;
1547f9e03706SDarrick J. Wong 			}
1548a67d00a5SChristoph Hellwig 			error = xfs_bmbt_update(bma->cur, &LEFT);
154930f712c9SDave Chinner 			if (error)
155030f712c9SDave Chinner 				goto done;
155130f712c9SDave Chinner 		}
155230f712c9SDave Chinner 		break;
155330f712c9SDave Chinner 
155430f712c9SDave Chinner 	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
155530f712c9SDave Chinner 		/*
155630f712c9SDave Chinner 		 * Filling in all of a previously delayed allocation extent.
155730f712c9SDave Chinner 		 * The left neighbor is contiguous, the right is not.
155830f712c9SDave Chinner 		 */
15594dcb8869SChristoph Hellwig 		old = LEFT;
15604dcb8869SChristoph Hellwig 		LEFT.br_blockcount += PREV.br_blockcount;
15610d045540SChristoph Hellwig 
1562c38ccf59SChristoph Hellwig 		xfs_iext_remove(bma->ip, &bma->icur, state);
1563b2b1712aSChristoph Hellwig 		xfs_iext_prev(ifp, &bma->icur);
1564b2b1712aSChristoph Hellwig 		xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT);
156530f712c9SDave Chinner 
156630f712c9SDave Chinner 		if (bma->cur == NULL)
156730f712c9SDave Chinner 			rval = XFS_ILOG_DEXT;
156830f712c9SDave Chinner 		else {
156930f712c9SDave Chinner 			rval = 0;
1570e16cf9b0SChristoph Hellwig 			error = xfs_bmbt_lookup_eq(bma->cur, &old, &i);
157130f712c9SDave Chinner 			if (error)
157230f712c9SDave Chinner 				goto done;
1573f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1574f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
1575f9e03706SDarrick J. Wong 				goto done;
1576f9e03706SDarrick J. Wong 			}
1577a67d00a5SChristoph Hellwig 			error = xfs_bmbt_update(bma->cur, &LEFT);
157830f712c9SDave Chinner 			if (error)
157930f712c9SDave Chinner 				goto done;
158030f712c9SDave Chinner 		}
158130f712c9SDave Chinner 		break;
158230f712c9SDave Chinner 
158330f712c9SDave Chinner 	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
158430f712c9SDave Chinner 		/*
158530f712c9SDave Chinner 		 * Filling in all of a previously delayed allocation extent.
15869230a0b6SDave Chinner 		 * The right neighbor is contiguous, the left is not. Take care
15879230a0b6SDave Chinner 		 * with delay -> unwritten extent allocation here because the
15889230a0b6SDave Chinner 		 * delalloc record we are overwriting is always written.
158930f712c9SDave Chinner 		 */
15904dcb8869SChristoph Hellwig 		PREV.br_startblock = new->br_startblock;
15914dcb8869SChristoph Hellwig 		PREV.br_blockcount += RIGHT.br_blockcount;
15929230a0b6SDave Chinner 		PREV.br_state = new->br_state;
15930d045540SChristoph Hellwig 
1594b2b1712aSChristoph Hellwig 		xfs_iext_next(ifp, &bma->icur);
1595c38ccf59SChristoph Hellwig 		xfs_iext_remove(bma->ip, &bma->icur, state);
1596b2b1712aSChristoph Hellwig 		xfs_iext_prev(ifp, &bma->icur);
1597b2b1712aSChristoph Hellwig 		xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
159830f712c9SDave Chinner 
159930f712c9SDave Chinner 		if (bma->cur == NULL)
160030f712c9SDave Chinner 			rval = XFS_ILOG_DEXT;
160130f712c9SDave Chinner 		else {
160230f712c9SDave Chinner 			rval = 0;
1603e16cf9b0SChristoph Hellwig 			error = xfs_bmbt_lookup_eq(bma->cur, &RIGHT, &i);
160430f712c9SDave Chinner 			if (error)
160530f712c9SDave Chinner 				goto done;
1606f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1607f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
1608f9e03706SDarrick J. Wong 				goto done;
1609f9e03706SDarrick J. Wong 			}
1610a67d00a5SChristoph Hellwig 			error = xfs_bmbt_update(bma->cur, &PREV);
161130f712c9SDave Chinner 			if (error)
161230f712c9SDave Chinner 				goto done;
161330f712c9SDave Chinner 		}
161430f712c9SDave Chinner 		break;
161530f712c9SDave Chinner 
161630f712c9SDave Chinner 	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
161730f712c9SDave Chinner 		/*
161830f712c9SDave Chinner 		 * Filling in all of a previously delayed allocation extent.
161930f712c9SDave Chinner 		 * Neither the left nor right neighbors are contiguous with
162030f712c9SDave Chinner 		 * the new one.
162130f712c9SDave Chinner 		 */
16224dcb8869SChristoph Hellwig 		PREV.br_startblock = new->br_startblock;
16234dcb8869SChristoph Hellwig 		PREV.br_state = new->br_state;
1624b2b1712aSChristoph Hellwig 		xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1625daf83964SChristoph Hellwig 		ifp->if_nextents++;
162630f712c9SDave Chinner 
162730f712c9SDave Chinner 		if (bma->cur == NULL)
162830f712c9SDave Chinner 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
162930f712c9SDave Chinner 		else {
163030f712c9SDave Chinner 			rval = XFS_ILOG_CORE;
1631e16cf9b0SChristoph Hellwig 			error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
163230f712c9SDave Chinner 			if (error)
163330f712c9SDave Chinner 				goto done;
1634f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 0)) {
1635f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
1636f9e03706SDarrick J. Wong 				goto done;
1637f9e03706SDarrick J. Wong 			}
163830f712c9SDave Chinner 			error = xfs_btree_insert(bma->cur, &i);
163930f712c9SDave Chinner 			if (error)
164030f712c9SDave Chinner 				goto done;
1641f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1642f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
1643f9e03706SDarrick J. Wong 				goto done;
1644f9e03706SDarrick J. Wong 			}
164530f712c9SDave Chinner 		}
164630f712c9SDave Chinner 		break;
164730f712c9SDave Chinner 
164830f712c9SDave Chinner 	case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
164930f712c9SDave Chinner 		/*
165030f712c9SDave Chinner 		 * Filling in the first part of a previous delayed allocation.
165130f712c9SDave Chinner 		 * The left neighbor is contiguous.
165230f712c9SDave Chinner 		 */
16534dcb8869SChristoph Hellwig 		old = LEFT;
16544dcb8869SChristoph Hellwig 		temp = PREV.br_blockcount - new->br_blockcount;
16554dcb8869SChristoph Hellwig 		da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
16564dcb8869SChristoph Hellwig 				startblockval(PREV.br_startblock));
16574dcb8869SChristoph Hellwig 
16584dcb8869SChristoph Hellwig 		LEFT.br_blockcount += new->br_blockcount;
165930f712c9SDave Chinner 
1660bf99971cSChristoph Hellwig 		PREV.br_blockcount = temp;
16614dcb8869SChristoph Hellwig 		PREV.br_startoff += new->br_blockcount;
16624dcb8869SChristoph Hellwig 		PREV.br_startblock = nullstartblock(da_new);
16630d045540SChristoph Hellwig 
1664b2b1712aSChristoph Hellwig 		xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1665b2b1712aSChristoph Hellwig 		xfs_iext_prev(ifp, &bma->icur);
1666b2b1712aSChristoph Hellwig 		xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT);
16674dcb8869SChristoph Hellwig 
166830f712c9SDave Chinner 		if (bma->cur == NULL)
166930f712c9SDave Chinner 			rval = XFS_ILOG_DEXT;
167030f712c9SDave Chinner 		else {
167130f712c9SDave Chinner 			rval = 0;
1672e16cf9b0SChristoph Hellwig 			error = xfs_bmbt_lookup_eq(bma->cur, &old, &i);
167330f712c9SDave Chinner 			if (error)
167430f712c9SDave Chinner 				goto done;
1675f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1676f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
1677f9e03706SDarrick J. Wong 				goto done;
1678f9e03706SDarrick J. Wong 			}
1679a67d00a5SChristoph Hellwig 			error = xfs_bmbt_update(bma->cur, &LEFT);
168030f712c9SDave Chinner 			if (error)
168130f712c9SDave Chinner 				goto done;
168230f712c9SDave Chinner 		}
168330f712c9SDave Chinner 		break;
168430f712c9SDave Chinner 
168530f712c9SDave Chinner 	case BMAP_LEFT_FILLING:
168630f712c9SDave Chinner 		/*
168730f712c9SDave Chinner 		 * Filling in the first part of a previous delayed allocation.
168830f712c9SDave Chinner 		 * The left neighbor is not contiguous.
168930f712c9SDave Chinner 		 */
1690b2b1712aSChristoph Hellwig 		xfs_iext_update_extent(bma->ip, state, &bma->icur, new);
1691daf83964SChristoph Hellwig 		ifp->if_nextents++;
1692daf83964SChristoph Hellwig 
169330f712c9SDave Chinner 		if (bma->cur == NULL)
169430f712c9SDave Chinner 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
169530f712c9SDave Chinner 		else {
169630f712c9SDave Chinner 			rval = XFS_ILOG_CORE;
1697e16cf9b0SChristoph Hellwig 			error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
169830f712c9SDave Chinner 			if (error)
169930f712c9SDave Chinner 				goto done;
1700f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 0)) {
1701f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
1702f9e03706SDarrick J. Wong 				goto done;
1703f9e03706SDarrick J. Wong 			}
170430f712c9SDave Chinner 			error = xfs_btree_insert(bma->cur, &i);
170530f712c9SDave Chinner 			if (error)
170630f712c9SDave Chinner 				goto done;
1707f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1708f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
1709f9e03706SDarrick J. Wong 				goto done;
1710f9e03706SDarrick J. Wong 			}
171130f712c9SDave Chinner 		}
171230f712c9SDave Chinner 
17136d3eb1ecSDarrick J. Wong 		if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
171430f712c9SDave Chinner 			error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1715280253d2SBrian Foster 					&bma->cur, 1, &tmp_rval, whichfork);
171630f712c9SDave Chinner 			rval |= tmp_rval;
171730f712c9SDave Chinner 			if (error)
171830f712c9SDave Chinner 				goto done;
171930f712c9SDave Chinner 		}
17204dcb8869SChristoph Hellwig 
17214dcb8869SChristoph Hellwig 		temp = PREV.br_blockcount - new->br_blockcount;
172230f712c9SDave Chinner 		da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
172330f712c9SDave Chinner 			startblockval(PREV.br_startblock) -
172492219c29SDave Chinner 			(bma->cur ? bma->cur->bc_ino.allocated : 0));
17254dcb8869SChristoph Hellwig 
17264dcb8869SChristoph Hellwig 		PREV.br_startoff = new_endoff;
17274dcb8869SChristoph Hellwig 		PREV.br_blockcount = temp;
17284dcb8869SChristoph Hellwig 		PREV.br_startblock = nullstartblock(da_new);
1729b2b1712aSChristoph Hellwig 		xfs_iext_next(ifp, &bma->icur);
17300254c2f2SChristoph Hellwig 		xfs_iext_insert(bma->ip, &bma->icur, &PREV, state);
1731b2b1712aSChristoph Hellwig 		xfs_iext_prev(ifp, &bma->icur);
173230f712c9SDave Chinner 		break;
173330f712c9SDave Chinner 
173430f712c9SDave Chinner 	case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
173530f712c9SDave Chinner 		/*
173630f712c9SDave Chinner 		 * Filling in the last part of a previous delayed allocation.
173730f712c9SDave Chinner 		 * The right neighbor is contiguous with the new allocation.
173830f712c9SDave Chinner 		 */
17394dcb8869SChristoph Hellwig 		old = RIGHT;
17404dcb8869SChristoph Hellwig 		RIGHT.br_startoff = new->br_startoff;
17414dcb8869SChristoph Hellwig 		RIGHT.br_startblock = new->br_startblock;
17424dcb8869SChristoph Hellwig 		RIGHT.br_blockcount += new->br_blockcount;
17434dcb8869SChristoph Hellwig 
174430f712c9SDave Chinner 		if (bma->cur == NULL)
174530f712c9SDave Chinner 			rval = XFS_ILOG_DEXT;
174630f712c9SDave Chinner 		else {
174730f712c9SDave Chinner 			rval = 0;
1748e16cf9b0SChristoph Hellwig 			error = xfs_bmbt_lookup_eq(bma->cur, &old, &i);
174930f712c9SDave Chinner 			if (error)
175030f712c9SDave Chinner 				goto done;
1751f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1752f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
1753f9e03706SDarrick J. Wong 				goto done;
1754f9e03706SDarrick J. Wong 			}
1755a67d00a5SChristoph Hellwig 			error = xfs_bmbt_update(bma->cur, &RIGHT);
175630f712c9SDave Chinner 			if (error)
175730f712c9SDave Chinner 				goto done;
175830f712c9SDave Chinner 		}
175930f712c9SDave Chinner 
17604dcb8869SChristoph Hellwig 		temp = PREV.br_blockcount - new->br_blockcount;
176130f712c9SDave Chinner 		da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
176230f712c9SDave Chinner 			startblockval(PREV.br_startblock));
17634dcb8869SChristoph Hellwig 
17644dcb8869SChristoph Hellwig 		PREV.br_blockcount = temp;
17654dcb8869SChristoph Hellwig 		PREV.br_startblock = nullstartblock(da_new);
176630f712c9SDave Chinner 
1767b2b1712aSChristoph Hellwig 		xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1768b2b1712aSChristoph Hellwig 		xfs_iext_next(ifp, &bma->icur);
1769b2b1712aSChristoph Hellwig 		xfs_iext_update_extent(bma->ip, state, &bma->icur, &RIGHT);
177030f712c9SDave Chinner 		break;
177130f712c9SDave Chinner 
177230f712c9SDave Chinner 	case BMAP_RIGHT_FILLING:
177330f712c9SDave Chinner 		/*
177430f712c9SDave Chinner 		 * Filling in the last part of a previous delayed allocation.
177530f712c9SDave Chinner 		 * The right neighbor is not contiguous.
177630f712c9SDave Chinner 		 */
1777b2b1712aSChristoph Hellwig 		xfs_iext_update_extent(bma->ip, state, &bma->icur, new);
1778daf83964SChristoph Hellwig 		ifp->if_nextents++;
1779daf83964SChristoph Hellwig 
178030f712c9SDave Chinner 		if (bma->cur == NULL)
178130f712c9SDave Chinner 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
178230f712c9SDave Chinner 		else {
178330f712c9SDave Chinner 			rval = XFS_ILOG_CORE;
1784e16cf9b0SChristoph Hellwig 			error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
178530f712c9SDave Chinner 			if (error)
178630f712c9SDave Chinner 				goto done;
1787f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 0)) {
1788f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
1789f9e03706SDarrick J. Wong 				goto done;
1790f9e03706SDarrick J. Wong 			}
179130f712c9SDave Chinner 			error = xfs_btree_insert(bma->cur, &i);
179230f712c9SDave Chinner 			if (error)
179330f712c9SDave Chinner 				goto done;
1794f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1795f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
1796f9e03706SDarrick J. Wong 				goto done;
1797f9e03706SDarrick J. Wong 			}
179830f712c9SDave Chinner 		}
179930f712c9SDave Chinner 
18006d3eb1ecSDarrick J. Wong 		if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
180130f712c9SDave Chinner 			error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1802280253d2SBrian Foster 				&bma->cur, 1, &tmp_rval, whichfork);
180330f712c9SDave Chinner 			rval |= tmp_rval;
180430f712c9SDave Chinner 			if (error)
180530f712c9SDave Chinner 				goto done;
180630f712c9SDave Chinner 		}
18074dcb8869SChristoph Hellwig 
18084dcb8869SChristoph Hellwig 		temp = PREV.br_blockcount - new->br_blockcount;
180930f712c9SDave Chinner 		da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
181030f712c9SDave Chinner 			startblockval(PREV.br_startblock) -
181192219c29SDave Chinner 			(bma->cur ? bma->cur->bc_ino.allocated : 0));
18124dcb8869SChristoph Hellwig 
18134dcb8869SChristoph Hellwig 		PREV.br_startblock = nullstartblock(da_new);
18144dcb8869SChristoph Hellwig 		PREV.br_blockcount = temp;
18150254c2f2SChristoph Hellwig 		xfs_iext_insert(bma->ip, &bma->icur, &PREV, state);
1816b2b1712aSChristoph Hellwig 		xfs_iext_next(ifp, &bma->icur);
181730f712c9SDave Chinner 		break;
181830f712c9SDave Chinner 
181930f712c9SDave Chinner 	case 0:
182030f712c9SDave Chinner 		/*
182130f712c9SDave Chinner 		 * Filling in the middle part of a previous delayed allocation.
182230f712c9SDave Chinner 		 * Contiguity is impossible here.
182330f712c9SDave Chinner 		 * This case is avoided almost all the time.
182430f712c9SDave Chinner 		 *
182530f712c9SDave Chinner 		 * We start with a delayed allocation:
182630f712c9SDave Chinner 		 *
182730f712c9SDave Chinner 		 * +ddddddddddddddddddddddddddddddddddddddddddddddddddddddd+
182830f712c9SDave Chinner 		 *  PREV @ idx
182930f712c9SDave Chinner 		 *
183030f712c9SDave Chinner 	         * and we are allocating:
183130f712c9SDave Chinner 		 *                     +rrrrrrrrrrrrrrrrr+
183230f712c9SDave Chinner 		 *			      new
183330f712c9SDave Chinner 		 *
183430f712c9SDave Chinner 		 * and we set it up for insertion as:
183530f712c9SDave Chinner 		 * +ddddddddddddddddddd+rrrrrrrrrrrrrrrrr+ddddddddddddddddd+
183630f712c9SDave Chinner 		 *                            new
183730f712c9SDave Chinner 		 *  PREV @ idx          LEFT              RIGHT
183830f712c9SDave Chinner 		 *                      inserted at idx + 1
183930f712c9SDave Chinner 		 */
18404dcb8869SChristoph Hellwig 		old = PREV;
18414dcb8869SChristoph Hellwig 
18424dcb8869SChristoph Hellwig 		/* LEFT is the new middle */
184330f712c9SDave Chinner 		LEFT = *new;
18444dcb8869SChristoph Hellwig 
18454dcb8869SChristoph Hellwig 		/* RIGHT is the new right */
184630f712c9SDave Chinner 		RIGHT.br_state = PREV.br_state;
184730f712c9SDave Chinner 		RIGHT.br_startoff = new_endoff;
18484dcb8869SChristoph Hellwig 		RIGHT.br_blockcount =
18494dcb8869SChristoph Hellwig 			PREV.br_startoff + PREV.br_blockcount - new_endoff;
18504dcb8869SChristoph Hellwig 		RIGHT.br_startblock =
18514dcb8869SChristoph Hellwig 			nullstartblock(xfs_bmap_worst_indlen(bma->ip,
18524dcb8869SChristoph Hellwig 					RIGHT.br_blockcount));
18534dcb8869SChristoph Hellwig 
18544dcb8869SChristoph Hellwig 		/* truncate PREV */
18554dcb8869SChristoph Hellwig 		PREV.br_blockcount = new->br_startoff - PREV.br_startoff;
18564dcb8869SChristoph Hellwig 		PREV.br_startblock =
18574dcb8869SChristoph Hellwig 			nullstartblock(xfs_bmap_worst_indlen(bma->ip,
18584dcb8869SChristoph Hellwig 					PREV.br_blockcount));
1859b2b1712aSChristoph Hellwig 		xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
18604dcb8869SChristoph Hellwig 
1861b2b1712aSChristoph Hellwig 		xfs_iext_next(ifp, &bma->icur);
18620254c2f2SChristoph Hellwig 		xfs_iext_insert(bma->ip, &bma->icur, &RIGHT, state);
18630254c2f2SChristoph Hellwig 		xfs_iext_insert(bma->ip, &bma->icur, &LEFT, state);
1864daf83964SChristoph Hellwig 		ifp->if_nextents++;
18654dcb8869SChristoph Hellwig 
186630f712c9SDave Chinner 		if (bma->cur == NULL)
186730f712c9SDave Chinner 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
186830f712c9SDave Chinner 		else {
186930f712c9SDave Chinner 			rval = XFS_ILOG_CORE;
1870e16cf9b0SChristoph Hellwig 			error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
187130f712c9SDave Chinner 			if (error)
187230f712c9SDave Chinner 				goto done;
1873f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 0)) {
1874f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
1875f9e03706SDarrick J. Wong 				goto done;
1876f9e03706SDarrick J. Wong 			}
187730f712c9SDave Chinner 			error = xfs_btree_insert(bma->cur, &i);
187830f712c9SDave Chinner 			if (error)
187930f712c9SDave Chinner 				goto done;
1880f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1881f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
1882f9e03706SDarrick J. Wong 				goto done;
1883f9e03706SDarrick J. Wong 			}
188430f712c9SDave Chinner 		}
188530f712c9SDave Chinner 
18866d3eb1ecSDarrick J. Wong 		if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
188730f712c9SDave Chinner 			error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1888280253d2SBrian Foster 					&bma->cur, 1, &tmp_rval, whichfork);
188930f712c9SDave Chinner 			rval |= tmp_rval;
189030f712c9SDave Chinner 			if (error)
189130f712c9SDave Chinner 				goto done;
189230f712c9SDave Chinner 		}
18934dcb8869SChristoph Hellwig 
18944dcb8869SChristoph Hellwig 		da_new = startblockval(PREV.br_startblock) +
18954dcb8869SChristoph Hellwig 			 startblockval(RIGHT.br_startblock);
189630f712c9SDave Chinner 		break;
189730f712c9SDave Chinner 
189830f712c9SDave Chinner 	case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
189930f712c9SDave Chinner 	case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
190030f712c9SDave Chinner 	case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
190130f712c9SDave Chinner 	case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
190230f712c9SDave Chinner 	case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
190330f712c9SDave Chinner 	case BMAP_LEFT_CONTIG:
190430f712c9SDave Chinner 	case BMAP_RIGHT_CONTIG:
190530f712c9SDave Chinner 		/*
190630f712c9SDave Chinner 		 * These cases are all impossible.
190730f712c9SDave Chinner 		 */
190830f712c9SDave Chinner 		ASSERT(0);
190930f712c9SDave Chinner 	}
191030f712c9SDave Chinner 
191195eb308cSDarrick J. Wong 	/* add reverse mapping unless caller opted out */
1912bc46ac64SDarrick J. Wong 	if (!(bma->flags & XFS_BMAPI_NORMAP))
1913bc46ac64SDarrick J. Wong 		xfs_rmap_map_extent(bma->tp, bma->ip, whichfork, new);
19149c194644SDarrick J. Wong 
191530f712c9SDave Chinner 	/* convert to a btree if necessary */
19166d3eb1ecSDarrick J. Wong 	if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
191730f712c9SDave Chinner 		int	tmp_logflags;	/* partial log flag return val */
191830f712c9SDave Chinner 
191930f712c9SDave Chinner 		ASSERT(bma->cur == NULL);
192030f712c9SDave Chinner 		error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1921280253d2SBrian Foster 				&bma->cur, da_old > 0, &tmp_logflags,
1922280253d2SBrian Foster 				whichfork);
192330f712c9SDave Chinner 		bma->logflags |= tmp_logflags;
192430f712c9SDave Chinner 		if (error)
192530f712c9SDave Chinner 			goto done;
192630f712c9SDave Chinner 	}
192730f712c9SDave Chinner 
19289fe82b8cSDarrick J. Wong 	if (da_new != da_old)
19299fe82b8cSDarrick J. Wong 		xfs_mod_delalloc(mp, (int64_t)da_new - da_old);
19309fe82b8cSDarrick J. Wong 
1931ca1862b0SChristoph Hellwig 	if (bma->cur) {
193292219c29SDave Chinner 		da_new += bma->cur->bc_ino.allocated;
193392219c29SDave Chinner 		bma->cur->bc_ino.allocated = 0;
193430f712c9SDave Chinner 	}
193530f712c9SDave Chinner 
1936ca1862b0SChristoph Hellwig 	/* adjust for changes in reserved delayed indirect blocks */
1937ca1862b0SChristoph Hellwig 	if (da_new != da_old) {
1938ca1862b0SChristoph Hellwig 		ASSERT(state == 0 || da_new < da_old);
1939ca1862b0SChristoph Hellwig 		error = xfs_mod_fdblocks(mp, (int64_t)(da_old - da_new),
1940ca1862b0SChristoph Hellwig 				false);
1941ca1862b0SChristoph Hellwig 	}
194230f712c9SDave Chinner 
19436d3eb1ecSDarrick J. Wong 	xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork);
194430f712c9SDave Chinner done:
194560b4984fSDarrick J. Wong 	if (whichfork != XFS_COW_FORK)
194630f712c9SDave Chinner 		bma->logflags |= rval;
194730f712c9SDave Chinner 	return error;
194830f712c9SDave Chinner #undef	LEFT
194930f712c9SDave Chinner #undef	RIGHT
195030f712c9SDave Chinner #undef	PREV
195130f712c9SDave Chinner }
195230f712c9SDave Chinner 
195330f712c9SDave Chinner /*
195430f712c9SDave Chinner  * Convert an unwritten allocation to a real allocation or vice versa.
195530f712c9SDave Chinner  */
195626b91c72SChristoph Hellwig int					/* error */
xfs_bmap_add_extent_unwritten_real(struct xfs_trans * tp,xfs_inode_t * ip,int whichfork,struct xfs_iext_cursor * icur,struct xfs_btree_cur ** curp,xfs_bmbt_irec_t * new,int * logflagsp)195730f712c9SDave Chinner xfs_bmap_add_extent_unwritten_real(
195830f712c9SDave Chinner 	struct xfs_trans	*tp,
195930f712c9SDave Chinner 	xfs_inode_t		*ip,	/* incore inode pointer */
196005a630d7SDarrick J. Wong 	int			whichfork,
1961b2b1712aSChristoph Hellwig 	struct xfs_iext_cursor	*icur,
1962ae127f08SDarrick J. Wong 	struct xfs_btree_cur	**curp,	/* if *curp is null, not a btree */
196330f712c9SDave Chinner 	xfs_bmbt_irec_t		*new,	/* new data to add to file extents */
196430f712c9SDave Chinner 	int			*logflagsp) /* inode logging flags */
196530f712c9SDave Chinner {
1966ae127f08SDarrick J. Wong 	struct xfs_btree_cur	*cur;	/* btree cursor */
196730f712c9SDave Chinner 	int			error;	/* error return value */
196830f712c9SDave Chinner 	int			i;	/* temp state */
19693ba738dfSChristoph Hellwig 	struct xfs_ifork	*ifp;	/* inode fork pointer */
197030f712c9SDave Chinner 	xfs_fileoff_t		new_endoff;	/* end offset of new entry */
197130f712c9SDave Chinner 	xfs_bmbt_irec_t		r[3];	/* neighbor extent entries */
197230f712c9SDave Chinner 					/* left is 0, right is 1, prev is 2 */
197330f712c9SDave Chinner 	int			rval=0;	/* return value (logging flags) */
19740e5b8e45SDave Chinner 	uint32_t		state = xfs_bmap_fork_to_state(whichfork);
197505a630d7SDarrick J. Wong 	struct xfs_mount	*mp = ip->i_mount;
197679fa6143SChristoph Hellwig 	struct xfs_bmbt_irec	old;
197730f712c9SDave Chinner 
197830f712c9SDave Chinner 	*logflagsp = 0;
197930f712c9SDave Chinner 
198030f712c9SDave Chinner 	cur = *curp;
1981732436efSDarrick J. Wong 	ifp = xfs_ifork_ptr(ip, whichfork);
198230f712c9SDave Chinner 
198330f712c9SDave Chinner 	ASSERT(!isnullstartblock(new->br_startblock));
198430f712c9SDave Chinner 
1985ff6d6af2SBill O'Donnell 	XFS_STATS_INC(mp, xs_add_exlist);
198630f712c9SDave Chinner 
198730f712c9SDave Chinner #define	LEFT		r[0]
198830f712c9SDave Chinner #define	RIGHT		r[1]
198930f712c9SDave Chinner #define	PREV		r[2]
199030f712c9SDave Chinner 
199130f712c9SDave Chinner 	/*
199230f712c9SDave Chinner 	 * Set up a bunch of variables to make the tests simpler.
199330f712c9SDave Chinner 	 */
199430f712c9SDave Chinner 	error = 0;
1995b2b1712aSChristoph Hellwig 	xfs_iext_get_extent(ifp, icur, &PREV);
199679fa6143SChristoph Hellwig 	ASSERT(new->br_state != PREV.br_state);
199730f712c9SDave Chinner 	new_endoff = new->br_startoff + new->br_blockcount;
199830f712c9SDave Chinner 	ASSERT(PREV.br_startoff <= new->br_startoff);
199930f712c9SDave Chinner 	ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
200030f712c9SDave Chinner 
200130f712c9SDave Chinner 	/*
200230f712c9SDave Chinner 	 * Set flags determining what part of the previous oldext allocation
200330f712c9SDave Chinner 	 * extent is being replaced by a newext allocation.
200430f712c9SDave Chinner 	 */
200530f712c9SDave Chinner 	if (PREV.br_startoff == new->br_startoff)
200630f712c9SDave Chinner 		state |= BMAP_LEFT_FILLING;
200730f712c9SDave Chinner 	if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
200830f712c9SDave Chinner 		state |= BMAP_RIGHT_FILLING;
200930f712c9SDave Chinner 
201030f712c9SDave Chinner 	/*
201130f712c9SDave Chinner 	 * Check and set flags if this segment has a left neighbor.
201230f712c9SDave Chinner 	 * Don't set contiguous if the combined extent would be too large.
201330f712c9SDave Chinner 	 */
2014b2b1712aSChristoph Hellwig 	if (xfs_iext_peek_prev_extent(ifp, icur, &LEFT)) {
201530f712c9SDave Chinner 		state |= BMAP_LEFT_VALID;
201630f712c9SDave Chinner 		if (isnullstartblock(LEFT.br_startblock))
201730f712c9SDave Chinner 			state |= BMAP_LEFT_DELAY;
201830f712c9SDave Chinner 	}
201930f712c9SDave Chinner 
202030f712c9SDave Chinner 	if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
202130f712c9SDave Chinner 	    LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
202230f712c9SDave Chinner 	    LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
202379fa6143SChristoph Hellwig 	    LEFT.br_state == new->br_state &&
202495f0b95eSChandan Babu R 	    LEFT.br_blockcount + new->br_blockcount <= XFS_MAX_BMBT_EXTLEN)
202530f712c9SDave Chinner 		state |= BMAP_LEFT_CONTIG;
202630f712c9SDave Chinner 
202730f712c9SDave Chinner 	/*
202830f712c9SDave Chinner 	 * Check and set flags if this segment has a right neighbor.
202930f712c9SDave Chinner 	 * Don't set contiguous if the combined extent would be too large.
203030f712c9SDave Chinner 	 * Also check for all-three-contiguous being too large.
203130f712c9SDave Chinner 	 */
2032b2b1712aSChristoph Hellwig 	if (xfs_iext_peek_next_extent(ifp, icur, &RIGHT)) {
203330f712c9SDave Chinner 		state |= BMAP_RIGHT_VALID;
203430f712c9SDave Chinner 		if (isnullstartblock(RIGHT.br_startblock))
203530f712c9SDave Chinner 			state |= BMAP_RIGHT_DELAY;
203630f712c9SDave Chinner 	}
203730f712c9SDave Chinner 
203830f712c9SDave Chinner 	if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
203930f712c9SDave Chinner 	    new_endoff == RIGHT.br_startoff &&
204030f712c9SDave Chinner 	    new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
204179fa6143SChristoph Hellwig 	    new->br_state == RIGHT.br_state &&
204295f0b95eSChandan Babu R 	    new->br_blockcount + RIGHT.br_blockcount <= XFS_MAX_BMBT_EXTLEN &&
204330f712c9SDave Chinner 	    ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
204430f712c9SDave Chinner 		       BMAP_RIGHT_FILLING)) !=
204530f712c9SDave Chinner 		      (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
204630f712c9SDave Chinner 		       BMAP_RIGHT_FILLING) ||
204730f712c9SDave Chinner 	     LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
204895f0b95eSChandan Babu R 			<= XFS_MAX_BMBT_EXTLEN))
204930f712c9SDave Chinner 		state |= BMAP_RIGHT_CONTIG;
205030f712c9SDave Chinner 
205130f712c9SDave Chinner 	/*
205230f712c9SDave Chinner 	 * Switch out based on the FILLING and CONTIG state bits.
205330f712c9SDave Chinner 	 */
205430f712c9SDave Chinner 	switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
205530f712c9SDave Chinner 			 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
205630f712c9SDave Chinner 	case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
205730f712c9SDave Chinner 	     BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
205830f712c9SDave Chinner 		/*
205930f712c9SDave Chinner 		 * Setting all of a previous oldext extent to newext.
206030f712c9SDave Chinner 		 * The left and right neighbors are both contiguous with new.
206130f712c9SDave Chinner 		 */
206279fa6143SChristoph Hellwig 		LEFT.br_blockcount += PREV.br_blockcount + RIGHT.br_blockcount;
206330f712c9SDave Chinner 
2064c38ccf59SChristoph Hellwig 		xfs_iext_remove(ip, icur, state);
2065c38ccf59SChristoph Hellwig 		xfs_iext_remove(ip, icur, state);
2066b2b1712aSChristoph Hellwig 		xfs_iext_prev(ifp, icur);
2067b2b1712aSChristoph Hellwig 		xfs_iext_update_extent(ip, state, icur, &LEFT);
2068daf83964SChristoph Hellwig 		ifp->if_nextents -= 2;
206930f712c9SDave Chinner 		if (cur == NULL)
207030f712c9SDave Chinner 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
207130f712c9SDave Chinner 		else {
207230f712c9SDave Chinner 			rval = XFS_ILOG_CORE;
2073e16cf9b0SChristoph Hellwig 			error = xfs_bmbt_lookup_eq(cur, &RIGHT, &i);
2074e16cf9b0SChristoph Hellwig 			if (error)
207530f712c9SDave Chinner 				goto done;
2076f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2077f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
2078f9e03706SDarrick J. Wong 				goto done;
2079f9e03706SDarrick J. Wong 			}
208030f712c9SDave Chinner 			if ((error = xfs_btree_delete(cur, &i)))
208130f712c9SDave Chinner 				goto done;
2082f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2083f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
2084f9e03706SDarrick J. Wong 				goto done;
2085f9e03706SDarrick J. Wong 			}
208630f712c9SDave Chinner 			if ((error = xfs_btree_decrement(cur, 0, &i)))
208730f712c9SDave Chinner 				goto done;
2088f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2089f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
2090f9e03706SDarrick J. Wong 				goto done;
2091f9e03706SDarrick J. Wong 			}
209230f712c9SDave Chinner 			if ((error = xfs_btree_delete(cur, &i)))
209330f712c9SDave Chinner 				goto done;
2094f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2095f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
2096f9e03706SDarrick J. Wong 				goto done;
2097f9e03706SDarrick J. Wong 			}
209830f712c9SDave Chinner 			if ((error = xfs_btree_decrement(cur, 0, &i)))
209930f712c9SDave Chinner 				goto done;
2100f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2101f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
2102f9e03706SDarrick J. Wong 				goto done;
2103f9e03706SDarrick J. Wong 			}
2104a67d00a5SChristoph Hellwig 			error = xfs_bmbt_update(cur, &LEFT);
210579fa6143SChristoph Hellwig 			if (error)
210630f712c9SDave Chinner 				goto done;
210730f712c9SDave Chinner 		}
210830f712c9SDave Chinner 		break;
210930f712c9SDave Chinner 
211030f712c9SDave Chinner 	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
211130f712c9SDave Chinner 		/*
211230f712c9SDave Chinner 		 * Setting all of a previous oldext extent to newext.
211330f712c9SDave Chinner 		 * The left neighbor is contiguous, the right is not.
211430f712c9SDave Chinner 		 */
211579fa6143SChristoph Hellwig 		LEFT.br_blockcount += PREV.br_blockcount;
211630f712c9SDave Chinner 
2117c38ccf59SChristoph Hellwig 		xfs_iext_remove(ip, icur, state);
2118b2b1712aSChristoph Hellwig 		xfs_iext_prev(ifp, icur);
2119b2b1712aSChristoph Hellwig 		xfs_iext_update_extent(ip, state, icur, &LEFT);
2120daf83964SChristoph Hellwig 		ifp->if_nextents--;
212130f712c9SDave Chinner 		if (cur == NULL)
212230f712c9SDave Chinner 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
212330f712c9SDave Chinner 		else {
212430f712c9SDave Chinner 			rval = XFS_ILOG_CORE;
2125e16cf9b0SChristoph Hellwig 			error = xfs_bmbt_lookup_eq(cur, &PREV, &i);
2126e16cf9b0SChristoph Hellwig 			if (error)
212730f712c9SDave Chinner 				goto done;
2128f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2129f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
2130f9e03706SDarrick J. Wong 				goto done;
2131f9e03706SDarrick J. Wong 			}
213230f712c9SDave Chinner 			if ((error = xfs_btree_delete(cur, &i)))
213330f712c9SDave Chinner 				goto done;
2134f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2135f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
2136f9e03706SDarrick J. Wong 				goto done;
2137f9e03706SDarrick J. Wong 			}
213830f712c9SDave Chinner 			if ((error = xfs_btree_decrement(cur, 0, &i)))
213930f712c9SDave Chinner 				goto done;
2140f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2141f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
2142f9e03706SDarrick J. Wong 				goto done;
2143f9e03706SDarrick J. Wong 			}
2144a67d00a5SChristoph Hellwig 			error = xfs_bmbt_update(cur, &LEFT);
214579fa6143SChristoph Hellwig 			if (error)
214630f712c9SDave Chinner 				goto done;
214730f712c9SDave Chinner 		}
214830f712c9SDave Chinner 		break;
214930f712c9SDave Chinner 
215030f712c9SDave Chinner 	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
215130f712c9SDave Chinner 		/*
215230f712c9SDave Chinner 		 * Setting all of a previous oldext extent to newext.
215330f712c9SDave Chinner 		 * The right neighbor is contiguous, the left is not.
215430f712c9SDave Chinner 		 */
215579fa6143SChristoph Hellwig 		PREV.br_blockcount += RIGHT.br_blockcount;
215679fa6143SChristoph Hellwig 		PREV.br_state = new->br_state;
2157a6818477SChristoph Hellwig 
2158b2b1712aSChristoph Hellwig 		xfs_iext_next(ifp, icur);
2159c38ccf59SChristoph Hellwig 		xfs_iext_remove(ip, icur, state);
2160b2b1712aSChristoph Hellwig 		xfs_iext_prev(ifp, icur);
2161b2b1712aSChristoph Hellwig 		xfs_iext_update_extent(ip, state, icur, &PREV);
2162daf83964SChristoph Hellwig 		ifp->if_nextents--;
216379fa6143SChristoph Hellwig 
216430f712c9SDave Chinner 		if (cur == NULL)
216530f712c9SDave Chinner 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
216630f712c9SDave Chinner 		else {
216730f712c9SDave Chinner 			rval = XFS_ILOG_CORE;
2168e16cf9b0SChristoph Hellwig 			error = xfs_bmbt_lookup_eq(cur, &RIGHT, &i);
2169e16cf9b0SChristoph Hellwig 			if (error)
217030f712c9SDave Chinner 				goto done;
2171f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2172f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
2173f9e03706SDarrick J. Wong 				goto done;
2174f9e03706SDarrick J. Wong 			}
217530f712c9SDave Chinner 			if ((error = xfs_btree_delete(cur, &i)))
217630f712c9SDave Chinner 				goto done;
2177f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2178f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
2179f9e03706SDarrick J. Wong 				goto done;
2180f9e03706SDarrick J. Wong 			}
218130f712c9SDave Chinner 			if ((error = xfs_btree_decrement(cur, 0, &i)))
218230f712c9SDave Chinner 				goto done;
2183f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2184f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
2185f9e03706SDarrick J. Wong 				goto done;
2186f9e03706SDarrick J. Wong 			}
2187a67d00a5SChristoph Hellwig 			error = xfs_bmbt_update(cur, &PREV);
218879fa6143SChristoph Hellwig 			if (error)
218930f712c9SDave Chinner 				goto done;
219030f712c9SDave Chinner 		}
219130f712c9SDave Chinner 		break;
219230f712c9SDave Chinner 
219330f712c9SDave Chinner 	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
219430f712c9SDave Chinner 		/*
219530f712c9SDave Chinner 		 * Setting all of a previous oldext extent to newext.
219630f712c9SDave Chinner 		 * Neither the left nor right neighbors are contiguous with
219730f712c9SDave Chinner 		 * the new one.
219830f712c9SDave Chinner 		 */
219979fa6143SChristoph Hellwig 		PREV.br_state = new->br_state;
2200b2b1712aSChristoph Hellwig 		xfs_iext_update_extent(ip, state, icur, &PREV);
220130f712c9SDave Chinner 
220230f712c9SDave Chinner 		if (cur == NULL)
220330f712c9SDave Chinner 			rval = XFS_ILOG_DEXT;
220430f712c9SDave Chinner 		else {
220530f712c9SDave Chinner 			rval = 0;
2206e16cf9b0SChristoph Hellwig 			error = xfs_bmbt_lookup_eq(cur, new, &i);
2207e16cf9b0SChristoph Hellwig 			if (error)
220830f712c9SDave Chinner 				goto done;
2209f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2210f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
2211f9e03706SDarrick J. Wong 				goto done;
2212f9e03706SDarrick J. Wong 			}
2213a67d00a5SChristoph Hellwig 			error = xfs_bmbt_update(cur, &PREV);
221479fa6143SChristoph Hellwig 			if (error)
221530f712c9SDave Chinner 				goto done;
221630f712c9SDave Chinner 		}
221730f712c9SDave Chinner 		break;
221830f712c9SDave Chinner 
221930f712c9SDave Chinner 	case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
222030f712c9SDave Chinner 		/*
222130f712c9SDave Chinner 		 * Setting the first part of a previous oldext extent to newext.
222230f712c9SDave Chinner 		 * The left neighbor is contiguous.
222330f712c9SDave Chinner 		 */
222479fa6143SChristoph Hellwig 		LEFT.br_blockcount += new->br_blockcount;
222530f712c9SDave Chinner 
222679fa6143SChristoph Hellwig 		old = PREV;
222779fa6143SChristoph Hellwig 		PREV.br_startoff += new->br_blockcount;
222879fa6143SChristoph Hellwig 		PREV.br_startblock += new->br_blockcount;
222979fa6143SChristoph Hellwig 		PREV.br_blockcount -= new->br_blockcount;
223030f712c9SDave Chinner 
2231b2b1712aSChristoph Hellwig 		xfs_iext_update_extent(ip, state, icur, &PREV);
2232b2b1712aSChristoph Hellwig 		xfs_iext_prev(ifp, icur);
2233b2b1712aSChristoph Hellwig 		xfs_iext_update_extent(ip, state, icur, &LEFT);
223430f712c9SDave Chinner 
223530f712c9SDave Chinner 		if (cur == NULL)
223630f712c9SDave Chinner 			rval = XFS_ILOG_DEXT;
223730f712c9SDave Chinner 		else {
223830f712c9SDave Chinner 			rval = 0;
2239e16cf9b0SChristoph Hellwig 			error = xfs_bmbt_lookup_eq(cur, &old, &i);
224079fa6143SChristoph Hellwig 			if (error)
224130f712c9SDave Chinner 				goto done;
2242f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2243f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
2244f9e03706SDarrick J. Wong 				goto done;
2245f9e03706SDarrick J. Wong 			}
2246a67d00a5SChristoph Hellwig 			error = xfs_bmbt_update(cur, &PREV);
224779fa6143SChristoph Hellwig 			if (error)
224830f712c9SDave Chinner 				goto done;
224979fa6143SChristoph Hellwig 			error = xfs_btree_decrement(cur, 0, &i);
225079fa6143SChristoph Hellwig 			if (error)
225130f712c9SDave Chinner 				goto done;
2252a67d00a5SChristoph Hellwig 			error = xfs_bmbt_update(cur, &LEFT);
225330f712c9SDave Chinner 			if (error)
225430f712c9SDave Chinner 				goto done;
225530f712c9SDave Chinner 		}
225630f712c9SDave Chinner 		break;
225730f712c9SDave Chinner 
225830f712c9SDave Chinner 	case BMAP_LEFT_FILLING:
225930f712c9SDave Chinner 		/*
226030f712c9SDave Chinner 		 * Setting the first part of a previous oldext extent to newext.
226130f712c9SDave Chinner 		 * The left neighbor is not contiguous.
226230f712c9SDave Chinner 		 */
226379fa6143SChristoph Hellwig 		old = PREV;
226479fa6143SChristoph Hellwig 		PREV.br_startoff += new->br_blockcount;
226579fa6143SChristoph Hellwig 		PREV.br_startblock += new->br_blockcount;
226679fa6143SChristoph Hellwig 		PREV.br_blockcount -= new->br_blockcount;
226730f712c9SDave Chinner 
2268b2b1712aSChristoph Hellwig 		xfs_iext_update_extent(ip, state, icur, &PREV);
22690254c2f2SChristoph Hellwig 		xfs_iext_insert(ip, icur, new, state);
2270daf83964SChristoph Hellwig 		ifp->if_nextents++;
2271daf83964SChristoph Hellwig 
227230f712c9SDave Chinner 		if (cur == NULL)
227330f712c9SDave Chinner 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
227430f712c9SDave Chinner 		else {
227530f712c9SDave Chinner 			rval = XFS_ILOG_CORE;
2276e16cf9b0SChristoph Hellwig 			error = xfs_bmbt_lookup_eq(cur, &old, &i);
227779fa6143SChristoph Hellwig 			if (error)
227830f712c9SDave Chinner 				goto done;
2279f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2280f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
2281f9e03706SDarrick J. Wong 				goto done;
2282f9e03706SDarrick J. Wong 			}
2283a67d00a5SChristoph Hellwig 			error = xfs_bmbt_update(cur, &PREV);
228479fa6143SChristoph Hellwig 			if (error)
228530f712c9SDave Chinner 				goto done;
228630f712c9SDave Chinner 			cur->bc_rec.b = *new;
228730f712c9SDave Chinner 			if ((error = xfs_btree_insert(cur, &i)))
228830f712c9SDave Chinner 				goto done;
2289f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2290f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
2291f9e03706SDarrick J. Wong 				goto done;
2292f9e03706SDarrick J. Wong 			}
229330f712c9SDave Chinner 		}
229430f712c9SDave Chinner 		break;
229530f712c9SDave Chinner 
229630f712c9SDave Chinner 	case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
229730f712c9SDave Chinner 		/*
229830f712c9SDave Chinner 		 * Setting the last part of a previous oldext extent to newext.
229930f712c9SDave Chinner 		 * The right neighbor is contiguous with the new allocation.
230030f712c9SDave Chinner 		 */
230179fa6143SChristoph Hellwig 		old = PREV;
230279fa6143SChristoph Hellwig 		PREV.br_blockcount -= new->br_blockcount;
230330f712c9SDave Chinner 
230479fa6143SChristoph Hellwig 		RIGHT.br_startoff = new->br_startoff;
230579fa6143SChristoph Hellwig 		RIGHT.br_startblock = new->br_startblock;
230679fa6143SChristoph Hellwig 		RIGHT.br_blockcount += new->br_blockcount;
2307a6818477SChristoph Hellwig 
2308b2b1712aSChristoph Hellwig 		xfs_iext_update_extent(ip, state, icur, &PREV);
2309b2b1712aSChristoph Hellwig 		xfs_iext_next(ifp, icur);
2310b2b1712aSChristoph Hellwig 		xfs_iext_update_extent(ip, state, icur, &RIGHT);
231130f712c9SDave Chinner 
231230f712c9SDave Chinner 		if (cur == NULL)
231330f712c9SDave Chinner 			rval = XFS_ILOG_DEXT;
231430f712c9SDave Chinner 		else {
231530f712c9SDave Chinner 			rval = 0;
2316e16cf9b0SChristoph Hellwig 			error = xfs_bmbt_lookup_eq(cur, &old, &i);
231779fa6143SChristoph Hellwig 			if (error)
231830f712c9SDave Chinner 				goto done;
2319f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2320f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
2321f9e03706SDarrick J. Wong 				goto done;
2322f9e03706SDarrick J. Wong 			}
2323a67d00a5SChristoph Hellwig 			error = xfs_bmbt_update(cur, &PREV);
232479fa6143SChristoph Hellwig 			if (error)
232530f712c9SDave Chinner 				goto done;
232679fa6143SChristoph Hellwig 			error = xfs_btree_increment(cur, 0, &i);
232779fa6143SChristoph Hellwig 			if (error)
232830f712c9SDave Chinner 				goto done;
2329a67d00a5SChristoph Hellwig 			error = xfs_bmbt_update(cur, &RIGHT);
233079fa6143SChristoph Hellwig 			if (error)
233130f712c9SDave Chinner 				goto done;
233230f712c9SDave Chinner 		}
233330f712c9SDave Chinner 		break;
233430f712c9SDave Chinner 
233530f712c9SDave Chinner 	case BMAP_RIGHT_FILLING:
233630f712c9SDave Chinner 		/*
233730f712c9SDave Chinner 		 * Setting the last part of a previous oldext extent to newext.
233830f712c9SDave Chinner 		 * The right neighbor is not contiguous.
233930f712c9SDave Chinner 		 */
234079fa6143SChristoph Hellwig 		old = PREV;
234179fa6143SChristoph Hellwig 		PREV.br_blockcount -= new->br_blockcount;
234230f712c9SDave Chinner 
2343b2b1712aSChristoph Hellwig 		xfs_iext_update_extent(ip, state, icur, &PREV);
2344b2b1712aSChristoph Hellwig 		xfs_iext_next(ifp, icur);
23450254c2f2SChristoph Hellwig 		xfs_iext_insert(ip, icur, new, state);
2346daf83964SChristoph Hellwig 		ifp->if_nextents++;
234730f712c9SDave Chinner 
234830f712c9SDave Chinner 		if (cur == NULL)
234930f712c9SDave Chinner 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
235030f712c9SDave Chinner 		else {
235130f712c9SDave Chinner 			rval = XFS_ILOG_CORE;
2352e16cf9b0SChristoph Hellwig 			error = xfs_bmbt_lookup_eq(cur, &old, &i);
235379fa6143SChristoph Hellwig 			if (error)
235430f712c9SDave Chinner 				goto done;
2355f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2356f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
2357f9e03706SDarrick J. Wong 				goto done;
2358f9e03706SDarrick J. Wong 			}
2359a67d00a5SChristoph Hellwig 			error = xfs_bmbt_update(cur, &PREV);
236079fa6143SChristoph Hellwig 			if (error)
236130f712c9SDave Chinner 				goto done;
2362e16cf9b0SChristoph Hellwig 			error = xfs_bmbt_lookup_eq(cur, new, &i);
2363e16cf9b0SChristoph Hellwig 			if (error)
236430f712c9SDave Chinner 				goto done;
2365f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 0)) {
2366f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
2367f9e03706SDarrick J. Wong 				goto done;
2368f9e03706SDarrick J. Wong 			}
236930f712c9SDave Chinner 			if ((error = xfs_btree_insert(cur, &i)))
237030f712c9SDave Chinner 				goto done;
2371f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2372f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
2373f9e03706SDarrick J. Wong 				goto done;
2374f9e03706SDarrick J. Wong 			}
237530f712c9SDave Chinner 		}
237630f712c9SDave Chinner 		break;
237730f712c9SDave Chinner 
237830f712c9SDave Chinner 	case 0:
237930f712c9SDave Chinner 		/*
238030f712c9SDave Chinner 		 * Setting the middle part of a previous oldext extent to
238130f712c9SDave Chinner 		 * newext.  Contiguity is impossible here.
238230f712c9SDave Chinner 		 * One extent becomes three extents.
238330f712c9SDave Chinner 		 */
238479fa6143SChristoph Hellwig 		old = PREV;
238579fa6143SChristoph Hellwig 		PREV.br_blockcount = new->br_startoff - PREV.br_startoff;
238630f712c9SDave Chinner 
238730f712c9SDave Chinner 		r[0] = *new;
238830f712c9SDave Chinner 		r[1].br_startoff = new_endoff;
238930f712c9SDave Chinner 		r[1].br_blockcount =
239079fa6143SChristoph Hellwig 			old.br_startoff + old.br_blockcount - new_endoff;
239130f712c9SDave Chinner 		r[1].br_startblock = new->br_startblock + new->br_blockcount;
239279fa6143SChristoph Hellwig 		r[1].br_state = PREV.br_state;
239330f712c9SDave Chinner 
2394b2b1712aSChristoph Hellwig 		xfs_iext_update_extent(ip, state, icur, &PREV);
2395b2b1712aSChristoph Hellwig 		xfs_iext_next(ifp, icur);
23960254c2f2SChristoph Hellwig 		xfs_iext_insert(ip, icur, &r[1], state);
23970254c2f2SChristoph Hellwig 		xfs_iext_insert(ip, icur, &r[0], state);
2398daf83964SChristoph Hellwig 		ifp->if_nextents += 2;
239930f712c9SDave Chinner 
240030f712c9SDave Chinner 		if (cur == NULL)
240130f712c9SDave Chinner 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
240230f712c9SDave Chinner 		else {
240330f712c9SDave Chinner 			rval = XFS_ILOG_CORE;
2404e16cf9b0SChristoph Hellwig 			error = xfs_bmbt_lookup_eq(cur, &old, &i);
240579fa6143SChristoph Hellwig 			if (error)
240630f712c9SDave Chinner 				goto done;
2407f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2408f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
2409f9e03706SDarrick J. Wong 				goto done;
2410f9e03706SDarrick J. Wong 			}
241130f712c9SDave Chinner 			/* new right extent - oldext */
2412a67d00a5SChristoph Hellwig 			error = xfs_bmbt_update(cur, &r[1]);
2413a67d00a5SChristoph Hellwig 			if (error)
241430f712c9SDave Chinner 				goto done;
241530f712c9SDave Chinner 			/* new left extent - oldext */
241630f712c9SDave Chinner 			cur->bc_rec.b = PREV;
241730f712c9SDave Chinner 			if ((error = xfs_btree_insert(cur, &i)))
241830f712c9SDave Chinner 				goto done;
2419f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2420f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
2421f9e03706SDarrick J. Wong 				goto done;
2422f9e03706SDarrick J. Wong 			}
242330f712c9SDave Chinner 			/*
242430f712c9SDave Chinner 			 * Reset the cursor to the position of the new extent
242530f712c9SDave Chinner 			 * we are about to insert as we can't trust it after
242630f712c9SDave Chinner 			 * the previous insert.
242730f712c9SDave Chinner 			 */
2428e16cf9b0SChristoph Hellwig 			error = xfs_bmbt_lookup_eq(cur, new, &i);
2429e16cf9b0SChristoph Hellwig 			if (error)
243030f712c9SDave Chinner 				goto done;
2431f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 0)) {
2432f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
2433f9e03706SDarrick J. Wong 				goto done;
2434f9e03706SDarrick J. Wong 			}
243530f712c9SDave Chinner 			/* new middle extent - newext */
243630f712c9SDave Chinner 			if ((error = xfs_btree_insert(cur, &i)))
243730f712c9SDave Chinner 				goto done;
2438f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2439f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
2440f9e03706SDarrick J. Wong 				goto done;
2441f9e03706SDarrick J. Wong 			}
244230f712c9SDave Chinner 		}
244330f712c9SDave Chinner 		break;
244430f712c9SDave Chinner 
244530f712c9SDave Chinner 	case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
244630f712c9SDave Chinner 	case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
244730f712c9SDave Chinner 	case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
244830f712c9SDave Chinner 	case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
244930f712c9SDave Chinner 	case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
245030f712c9SDave Chinner 	case BMAP_LEFT_CONTIG:
245130f712c9SDave Chinner 	case BMAP_RIGHT_CONTIG:
245230f712c9SDave Chinner 		/*
245330f712c9SDave Chinner 		 * These cases are all impossible.
245430f712c9SDave Chinner 		 */
245530f712c9SDave Chinner 		ASSERT(0);
245630f712c9SDave Chinner 	}
245730f712c9SDave Chinner 
24589c194644SDarrick J. Wong 	/* update reverse mappings */
2459bc46ac64SDarrick J. Wong 	xfs_rmap_convert_extent(mp, tp, ip, whichfork, new);
24609c194644SDarrick J. Wong 
246130f712c9SDave Chinner 	/* convert to a btree if necessary */
246205a630d7SDarrick J. Wong 	if (xfs_bmap_needs_btree(ip, whichfork)) {
246330f712c9SDave Chinner 		int	tmp_logflags;	/* partial log flag return val */
246430f712c9SDave Chinner 
246530f712c9SDave Chinner 		ASSERT(cur == NULL);
2466280253d2SBrian Foster 		error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0,
2467280253d2SBrian Foster 				&tmp_logflags, whichfork);
246830f712c9SDave Chinner 		*logflagsp |= tmp_logflags;
246930f712c9SDave Chinner 		if (error)
247030f712c9SDave Chinner 			goto done;
247130f712c9SDave Chinner 	}
247230f712c9SDave Chinner 
247330f712c9SDave Chinner 	/* clear out the allocated field, done with it now in any case. */
247430f712c9SDave Chinner 	if (cur) {
247592219c29SDave Chinner 		cur->bc_ino.allocated = 0;
247630f712c9SDave Chinner 		*curp = cur;
247730f712c9SDave Chinner 	}
247830f712c9SDave Chinner 
247905a630d7SDarrick J. Wong 	xfs_bmap_check_leaf_extents(*curp, ip, whichfork);
248030f712c9SDave Chinner done:
248130f712c9SDave Chinner 	*logflagsp |= rval;
248230f712c9SDave Chinner 	return error;
248330f712c9SDave Chinner #undef	LEFT
248430f712c9SDave Chinner #undef	RIGHT
248530f712c9SDave Chinner #undef	PREV
248630f712c9SDave Chinner }
248730f712c9SDave Chinner 
248830f712c9SDave Chinner /*
248930f712c9SDave Chinner  * Convert a hole to a delayed allocation.
249030f712c9SDave Chinner  */
249130f712c9SDave Chinner STATIC void
xfs_bmap_add_extent_hole_delay(xfs_inode_t * ip,int whichfork,struct xfs_iext_cursor * icur,xfs_bmbt_irec_t * new)249230f712c9SDave Chinner xfs_bmap_add_extent_hole_delay(
249330f712c9SDave Chinner 	xfs_inode_t		*ip,	/* incore inode pointer */
2494be51f811SDarrick J. Wong 	int			whichfork,
2495b2b1712aSChristoph Hellwig 	struct xfs_iext_cursor	*icur,
249630f712c9SDave Chinner 	xfs_bmbt_irec_t		*new)	/* new data to add to file extents */
249730f712c9SDave Chinner {
24983ba738dfSChristoph Hellwig 	struct xfs_ifork	*ifp;	/* inode fork pointer */
249930f712c9SDave Chinner 	xfs_bmbt_irec_t		left;	/* left neighbor extent entry */
250030f712c9SDave Chinner 	xfs_filblks_t		newlen=0;	/* new indirect size */
250130f712c9SDave Chinner 	xfs_filblks_t		oldlen=0;	/* old indirect size */
250230f712c9SDave Chinner 	xfs_bmbt_irec_t		right;	/* right neighbor extent entry */
25030e5b8e45SDave Chinner 	uint32_t		state = xfs_bmap_fork_to_state(whichfork);
25043ffc18ecSChristoph Hellwig 	xfs_filblks_t		temp;	 /* temp for indirect calculations */
250530f712c9SDave Chinner 
2506732436efSDarrick J. Wong 	ifp = xfs_ifork_ptr(ip, whichfork);
250730f712c9SDave Chinner 	ASSERT(isnullstartblock(new->br_startblock));
250830f712c9SDave Chinner 
250930f712c9SDave Chinner 	/*
251030f712c9SDave Chinner 	 * Check and set flags if this segment has a left neighbor
251130f712c9SDave Chinner 	 */
2512b2b1712aSChristoph Hellwig 	if (xfs_iext_peek_prev_extent(ifp, icur, &left)) {
251330f712c9SDave Chinner 		state |= BMAP_LEFT_VALID;
251430f712c9SDave Chinner 		if (isnullstartblock(left.br_startblock))
251530f712c9SDave Chinner 			state |= BMAP_LEFT_DELAY;
251630f712c9SDave Chinner 	}
251730f712c9SDave Chinner 
251830f712c9SDave Chinner 	/*
251930f712c9SDave Chinner 	 * Check and set flags if the current (right) segment exists.
252030f712c9SDave Chinner 	 * If it doesn't exist, we're converting the hole at end-of-file.
252130f712c9SDave Chinner 	 */
2522b2b1712aSChristoph Hellwig 	if (xfs_iext_get_extent(ifp, icur, &right)) {
252330f712c9SDave Chinner 		state |= BMAP_RIGHT_VALID;
252430f712c9SDave Chinner 		if (isnullstartblock(right.br_startblock))
252530f712c9SDave Chinner 			state |= BMAP_RIGHT_DELAY;
252630f712c9SDave Chinner 	}
252730f712c9SDave Chinner 
252830f712c9SDave Chinner 	/*
252930f712c9SDave Chinner 	 * Set contiguity flags on the left and right neighbors.
253030f712c9SDave Chinner 	 * Don't let extents get too large, even if the pieces are contiguous.
253130f712c9SDave Chinner 	 */
253230f712c9SDave Chinner 	if ((state & BMAP_LEFT_VALID) && (state & BMAP_LEFT_DELAY) &&
253330f712c9SDave Chinner 	    left.br_startoff + left.br_blockcount == new->br_startoff &&
253495f0b95eSChandan Babu R 	    left.br_blockcount + new->br_blockcount <= XFS_MAX_BMBT_EXTLEN)
253530f712c9SDave Chinner 		state |= BMAP_LEFT_CONTIG;
253630f712c9SDave Chinner 
253730f712c9SDave Chinner 	if ((state & BMAP_RIGHT_VALID) && (state & BMAP_RIGHT_DELAY) &&
253830f712c9SDave Chinner 	    new->br_startoff + new->br_blockcount == right.br_startoff &&
253995f0b95eSChandan Babu R 	    new->br_blockcount + right.br_blockcount <= XFS_MAX_BMBT_EXTLEN &&
254030f712c9SDave Chinner 	    (!(state & BMAP_LEFT_CONTIG) ||
254130f712c9SDave Chinner 	     (left.br_blockcount + new->br_blockcount +
254295f0b95eSChandan Babu R 	      right.br_blockcount <= XFS_MAX_BMBT_EXTLEN)))
254330f712c9SDave Chinner 		state |= BMAP_RIGHT_CONTIG;
254430f712c9SDave Chinner 
254530f712c9SDave Chinner 	/*
254630f712c9SDave Chinner 	 * Switch out based on the contiguity flags.
254730f712c9SDave Chinner 	 */
254830f712c9SDave Chinner 	switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
254930f712c9SDave Chinner 	case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
255030f712c9SDave Chinner 		/*
255130f712c9SDave Chinner 		 * New allocation is contiguous with delayed allocations
255230f712c9SDave Chinner 		 * on the left and on the right.
255330f712c9SDave Chinner 		 * Merge all three into a single extent record.
255430f712c9SDave Chinner 		 */
255530f712c9SDave Chinner 		temp = left.br_blockcount + new->br_blockcount +
255630f712c9SDave Chinner 			right.br_blockcount;
255730f712c9SDave Chinner 
255830f712c9SDave Chinner 		oldlen = startblockval(left.br_startblock) +
255930f712c9SDave Chinner 			startblockval(new->br_startblock) +
256030f712c9SDave Chinner 			startblockval(right.br_startblock);
25610e339ef8SBrian Foster 		newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
25620e339ef8SBrian Foster 					 oldlen);
25633ffc18ecSChristoph Hellwig 		left.br_startblock = nullstartblock(newlen);
25643ffc18ecSChristoph Hellwig 		left.br_blockcount = temp;
256530f712c9SDave Chinner 
2566c38ccf59SChristoph Hellwig 		xfs_iext_remove(ip, icur, state);
2567b2b1712aSChristoph Hellwig 		xfs_iext_prev(ifp, icur);
2568b2b1712aSChristoph Hellwig 		xfs_iext_update_extent(ip, state, icur, &left);
256930f712c9SDave Chinner 		break;
257030f712c9SDave Chinner 
257130f712c9SDave Chinner 	case BMAP_LEFT_CONTIG:
257230f712c9SDave Chinner 		/*
257330f712c9SDave Chinner 		 * New allocation is contiguous with a delayed allocation
257430f712c9SDave Chinner 		 * on the left.
257530f712c9SDave Chinner 		 * Merge the new allocation with the left neighbor.
257630f712c9SDave Chinner 		 */
257730f712c9SDave Chinner 		temp = left.br_blockcount + new->br_blockcount;
257830f712c9SDave Chinner 
257930f712c9SDave Chinner 		oldlen = startblockval(left.br_startblock) +
258030f712c9SDave Chinner 			startblockval(new->br_startblock);
25810e339ef8SBrian Foster 		newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
25820e339ef8SBrian Foster 					 oldlen);
25833ffc18ecSChristoph Hellwig 		left.br_blockcount = temp;
25843ffc18ecSChristoph Hellwig 		left.br_startblock = nullstartblock(newlen);
258541d196f4SChristoph Hellwig 
2586b2b1712aSChristoph Hellwig 		xfs_iext_prev(ifp, icur);
2587b2b1712aSChristoph Hellwig 		xfs_iext_update_extent(ip, state, icur, &left);
258830f712c9SDave Chinner 		break;
258930f712c9SDave Chinner 
259030f712c9SDave Chinner 	case BMAP_RIGHT_CONTIG:
259130f712c9SDave Chinner 		/*
259230f712c9SDave Chinner 		 * New allocation is contiguous with a delayed allocation
259330f712c9SDave Chinner 		 * on the right.
259430f712c9SDave Chinner 		 * Merge the new allocation with the right neighbor.
259530f712c9SDave Chinner 		 */
259630f712c9SDave Chinner 		temp = new->br_blockcount + right.br_blockcount;
259730f712c9SDave Chinner 		oldlen = startblockval(new->br_startblock) +
259830f712c9SDave Chinner 			startblockval(right.br_startblock);
25990e339ef8SBrian Foster 		newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
26000e339ef8SBrian Foster 					 oldlen);
26013ffc18ecSChristoph Hellwig 		right.br_startoff = new->br_startoff;
26023ffc18ecSChristoph Hellwig 		right.br_startblock = nullstartblock(newlen);
26033ffc18ecSChristoph Hellwig 		right.br_blockcount = temp;
2604b2b1712aSChristoph Hellwig 		xfs_iext_update_extent(ip, state, icur, &right);
260530f712c9SDave Chinner 		break;
260630f712c9SDave Chinner 
260730f712c9SDave Chinner 	case 0:
260830f712c9SDave Chinner 		/*
260930f712c9SDave Chinner 		 * New allocation is not contiguous with another
261030f712c9SDave Chinner 		 * delayed allocation.
261130f712c9SDave Chinner 		 * Insert a new entry.
261230f712c9SDave Chinner 		 */
261330f712c9SDave Chinner 		oldlen = newlen = 0;
26140254c2f2SChristoph Hellwig 		xfs_iext_insert(ip, icur, new, state);
261530f712c9SDave Chinner 		break;
261630f712c9SDave Chinner 	}
261730f712c9SDave Chinner 	if (oldlen != newlen) {
261830f712c9SDave Chinner 		ASSERT(oldlen > newlen);
26190d485adaSDave Chinner 		xfs_mod_fdblocks(ip->i_mount, (int64_t)(oldlen - newlen),
26200d485adaSDave Chinner 				 false);
262130f712c9SDave Chinner 		/*
262230f712c9SDave Chinner 		 * Nothing to do for disk quota accounting here.
262330f712c9SDave Chinner 		 */
26249fe82b8cSDarrick J. Wong 		xfs_mod_delalloc(ip->i_mount, (int64_t)newlen - oldlen);
262530f712c9SDave Chinner 	}
262630f712c9SDave Chinner }
262730f712c9SDave Chinner 
262830f712c9SDave Chinner /*
262930f712c9SDave Chinner  * Convert a hole to a real allocation.
263030f712c9SDave Chinner  */
263130f712c9SDave Chinner STATIC int				/* error */
xfs_bmap_add_extent_hole_real(struct xfs_trans * tp,struct xfs_inode * ip,int whichfork,struct xfs_iext_cursor * icur,struct xfs_btree_cur ** curp,struct xfs_bmbt_irec * new,int * logflagsp,uint32_t flags)263230f712c9SDave Chinner xfs_bmap_add_extent_hole_real(
26336d04558fSChristoph Hellwig 	struct xfs_trans	*tp,
26346d04558fSChristoph Hellwig 	struct xfs_inode	*ip,
26356d04558fSChristoph Hellwig 	int			whichfork,
2636b2b1712aSChristoph Hellwig 	struct xfs_iext_cursor	*icur,
26376d04558fSChristoph Hellwig 	struct xfs_btree_cur	**curp,
26386d04558fSChristoph Hellwig 	struct xfs_bmbt_irec	*new,
263995eb308cSDarrick J. Wong 	int			*logflagsp,
2640e7d410acSDave Chinner 	uint32_t		flags)
264130f712c9SDave Chinner {
2642732436efSDarrick J. Wong 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
26436d04558fSChristoph Hellwig 	struct xfs_mount	*mp = ip->i_mount;
26446d04558fSChristoph Hellwig 	struct xfs_btree_cur	*cur = *curp;
264530f712c9SDave Chinner 	int			error;	/* error return value */
264630f712c9SDave Chinner 	int			i;	/* temp state */
264730f712c9SDave Chinner 	xfs_bmbt_irec_t		left;	/* left neighbor extent entry */
264830f712c9SDave Chinner 	xfs_bmbt_irec_t		right;	/* right neighbor extent entry */
264930f712c9SDave Chinner 	int			rval=0;	/* return value (logging flags) */
26500e5b8e45SDave Chinner 	uint32_t		state = xfs_bmap_fork_to_state(whichfork);
26511abb9e55SChristoph Hellwig 	struct xfs_bmbt_irec	old;
265230f712c9SDave Chinner 
265330f712c9SDave Chinner 	ASSERT(!isnullstartblock(new->br_startblock));
26548ef54797SDave Chinner 	ASSERT(!cur || !(cur->bc_ino.flags & XFS_BTCUR_BMBT_WASDEL));
265530f712c9SDave Chinner 
2656ff6d6af2SBill O'Donnell 	XFS_STATS_INC(mp, xs_add_exlist);
265730f712c9SDave Chinner 
265830f712c9SDave Chinner 	/*
265930f712c9SDave Chinner 	 * Check and set flags if this segment has a left neighbor.
266030f712c9SDave Chinner 	 */
2661b2b1712aSChristoph Hellwig 	if (xfs_iext_peek_prev_extent(ifp, icur, &left)) {
266230f712c9SDave Chinner 		state |= BMAP_LEFT_VALID;
266330f712c9SDave Chinner 		if (isnullstartblock(left.br_startblock))
266430f712c9SDave Chinner 			state |= BMAP_LEFT_DELAY;
266530f712c9SDave Chinner 	}
266630f712c9SDave Chinner 
266730f712c9SDave Chinner 	/*
266830f712c9SDave Chinner 	 * Check and set flags if this segment has a current value.
266930f712c9SDave Chinner 	 * Not true if we're inserting into the "hole" at eof.
267030f712c9SDave Chinner 	 */
2671b2b1712aSChristoph Hellwig 	if (xfs_iext_get_extent(ifp, icur, &right)) {
267230f712c9SDave Chinner 		state |= BMAP_RIGHT_VALID;
267330f712c9SDave Chinner 		if (isnullstartblock(right.br_startblock))
267430f712c9SDave Chinner 			state |= BMAP_RIGHT_DELAY;
267530f712c9SDave Chinner 	}
267630f712c9SDave Chinner 
267730f712c9SDave Chinner 	/*
267830f712c9SDave Chinner 	 * We're inserting a real allocation between "left" and "right".
267930f712c9SDave Chinner 	 * Set the contiguity flags.  Don't let extents get too large.
268030f712c9SDave Chinner 	 */
268130f712c9SDave Chinner 	if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
268230f712c9SDave Chinner 	    left.br_startoff + left.br_blockcount == new->br_startoff &&
268330f712c9SDave Chinner 	    left.br_startblock + left.br_blockcount == new->br_startblock &&
268430f712c9SDave Chinner 	    left.br_state == new->br_state &&
268595f0b95eSChandan Babu R 	    left.br_blockcount + new->br_blockcount <= XFS_MAX_BMBT_EXTLEN)
268630f712c9SDave Chinner 		state |= BMAP_LEFT_CONTIG;
268730f712c9SDave Chinner 
268830f712c9SDave Chinner 	if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
268930f712c9SDave Chinner 	    new->br_startoff + new->br_blockcount == right.br_startoff &&
269030f712c9SDave Chinner 	    new->br_startblock + new->br_blockcount == right.br_startblock &&
269130f712c9SDave Chinner 	    new->br_state == right.br_state &&
269295f0b95eSChandan Babu R 	    new->br_blockcount + right.br_blockcount <= XFS_MAX_BMBT_EXTLEN &&
269330f712c9SDave Chinner 	    (!(state & BMAP_LEFT_CONTIG) ||
269430f712c9SDave Chinner 	     left.br_blockcount + new->br_blockcount +
269595f0b95eSChandan Babu R 	     right.br_blockcount <= XFS_MAX_BMBT_EXTLEN))
269630f712c9SDave Chinner 		state |= BMAP_RIGHT_CONTIG;
269730f712c9SDave Chinner 
269830f712c9SDave Chinner 	error = 0;
269930f712c9SDave Chinner 	/*
270030f712c9SDave Chinner 	 * Select which case we're in here, and implement it.
270130f712c9SDave Chinner 	 */
270230f712c9SDave Chinner 	switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
270330f712c9SDave Chinner 	case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
270430f712c9SDave Chinner 		/*
270530f712c9SDave Chinner 		 * New allocation is contiguous with real allocations on the
270630f712c9SDave Chinner 		 * left and on the right.
270730f712c9SDave Chinner 		 * Merge all three into a single extent record.
270830f712c9SDave Chinner 		 */
27091abb9e55SChristoph Hellwig 		left.br_blockcount += new->br_blockcount + right.br_blockcount;
271030f712c9SDave Chinner 
2711c38ccf59SChristoph Hellwig 		xfs_iext_remove(ip, icur, state);
2712b2b1712aSChristoph Hellwig 		xfs_iext_prev(ifp, icur);
2713b2b1712aSChristoph Hellwig 		xfs_iext_update_extent(ip, state, icur, &left);
2714daf83964SChristoph Hellwig 		ifp->if_nextents--;
271530f712c9SDave Chinner 
27166d04558fSChristoph Hellwig 		if (cur == NULL) {
271730f712c9SDave Chinner 			rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
271830f712c9SDave Chinner 		} else {
271930f712c9SDave Chinner 			rval = XFS_ILOG_CORE;
2720e16cf9b0SChristoph Hellwig 			error = xfs_bmbt_lookup_eq(cur, &right, &i);
272130f712c9SDave Chinner 			if (error)
272230f712c9SDave Chinner 				goto done;
2723f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2724f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
2725f9e03706SDarrick J. Wong 				goto done;
2726f9e03706SDarrick J. Wong 			}
27276d04558fSChristoph Hellwig 			error = xfs_btree_delete(cur, &i);
272830f712c9SDave Chinner 			if (error)
272930f712c9SDave Chinner 				goto done;
2730f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2731f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
2732f9e03706SDarrick J. Wong 				goto done;
2733f9e03706SDarrick J. Wong 			}
27346d04558fSChristoph Hellwig 			error = xfs_btree_decrement(cur, 0, &i);
273530f712c9SDave Chinner 			if (error)
273630f712c9SDave Chinner 				goto done;
2737f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2738f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
2739f9e03706SDarrick J. Wong 				goto done;
2740f9e03706SDarrick J. Wong 			}
2741a67d00a5SChristoph Hellwig 			error = xfs_bmbt_update(cur, &left);
274230f712c9SDave Chinner 			if (error)
274330f712c9SDave Chinner 				goto done;
274430f712c9SDave Chinner 		}
274530f712c9SDave Chinner 		break;
274630f712c9SDave Chinner 
274730f712c9SDave Chinner 	case BMAP_LEFT_CONTIG:
274830f712c9SDave Chinner 		/*
274930f712c9SDave Chinner 		 * New allocation is contiguous with a real allocation
275030f712c9SDave Chinner 		 * on the left.
275130f712c9SDave Chinner 		 * Merge the new allocation with the left neighbor.
275230f712c9SDave Chinner 		 */
27531abb9e55SChristoph Hellwig 		old = left;
27541abb9e55SChristoph Hellwig 		left.br_blockcount += new->br_blockcount;
27551d2e0089SChristoph Hellwig 
2756b2b1712aSChristoph Hellwig 		xfs_iext_prev(ifp, icur);
2757b2b1712aSChristoph Hellwig 		xfs_iext_update_extent(ip, state, icur, &left);
275830f712c9SDave Chinner 
27596d04558fSChristoph Hellwig 		if (cur == NULL) {
276030f712c9SDave Chinner 			rval = xfs_ilog_fext(whichfork);
276130f712c9SDave Chinner 		} else {
276230f712c9SDave Chinner 			rval = 0;
2763e16cf9b0SChristoph Hellwig 			error = xfs_bmbt_lookup_eq(cur, &old, &i);
276430f712c9SDave Chinner 			if (error)
276530f712c9SDave Chinner 				goto done;
2766f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2767f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
2768f9e03706SDarrick J. Wong 				goto done;
2769f9e03706SDarrick J. Wong 			}
2770a67d00a5SChristoph Hellwig 			error = xfs_bmbt_update(cur, &left);
277130f712c9SDave Chinner 			if (error)
277230f712c9SDave Chinner 				goto done;
277330f712c9SDave Chinner 		}
277430f712c9SDave Chinner 		break;
277530f712c9SDave Chinner 
277630f712c9SDave Chinner 	case BMAP_RIGHT_CONTIG:
277730f712c9SDave Chinner 		/*
277830f712c9SDave Chinner 		 * New allocation is contiguous with a real allocation
277930f712c9SDave Chinner 		 * on the right.
278030f712c9SDave Chinner 		 * Merge the new allocation with the right neighbor.
278130f712c9SDave Chinner 		 */
27821abb9e55SChristoph Hellwig 		old = right;
2783ca5d8e5bSChristoph Hellwig 
27841abb9e55SChristoph Hellwig 		right.br_startoff = new->br_startoff;
27851abb9e55SChristoph Hellwig 		right.br_startblock = new->br_startblock;
27861abb9e55SChristoph Hellwig 		right.br_blockcount += new->br_blockcount;
2787b2b1712aSChristoph Hellwig 		xfs_iext_update_extent(ip, state, icur, &right);
278830f712c9SDave Chinner 
27896d04558fSChristoph Hellwig 		if (cur == NULL) {
279030f712c9SDave Chinner 			rval = xfs_ilog_fext(whichfork);
279130f712c9SDave Chinner 		} else {
279230f712c9SDave Chinner 			rval = 0;
2793e16cf9b0SChristoph Hellwig 			error = xfs_bmbt_lookup_eq(cur, &old, &i);
279430f712c9SDave Chinner 			if (error)
279530f712c9SDave Chinner 				goto done;
2796f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2797f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
2798f9e03706SDarrick J. Wong 				goto done;
2799f9e03706SDarrick J. Wong 			}
2800a67d00a5SChristoph Hellwig 			error = xfs_bmbt_update(cur, &right);
280130f712c9SDave Chinner 			if (error)
280230f712c9SDave Chinner 				goto done;
280330f712c9SDave Chinner 		}
280430f712c9SDave Chinner 		break;
280530f712c9SDave Chinner 
280630f712c9SDave Chinner 	case 0:
280730f712c9SDave Chinner 		/*
280830f712c9SDave Chinner 		 * New allocation is not contiguous with another
280930f712c9SDave Chinner 		 * real allocation.
281030f712c9SDave Chinner 		 * Insert a new entry.
281130f712c9SDave Chinner 		 */
28120254c2f2SChristoph Hellwig 		xfs_iext_insert(ip, icur, new, state);
2813daf83964SChristoph Hellwig 		ifp->if_nextents++;
2814daf83964SChristoph Hellwig 
28156d04558fSChristoph Hellwig 		if (cur == NULL) {
281630f712c9SDave Chinner 			rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
281730f712c9SDave Chinner 		} else {
281830f712c9SDave Chinner 			rval = XFS_ILOG_CORE;
2819e16cf9b0SChristoph Hellwig 			error = xfs_bmbt_lookup_eq(cur, new, &i);
282030f712c9SDave Chinner 			if (error)
282130f712c9SDave Chinner 				goto done;
2822f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 0)) {
2823f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
2824f9e03706SDarrick J. Wong 				goto done;
2825f9e03706SDarrick J. Wong 			}
28266d04558fSChristoph Hellwig 			error = xfs_btree_insert(cur, &i);
282730f712c9SDave Chinner 			if (error)
282830f712c9SDave Chinner 				goto done;
2829f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2830f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
2831f9e03706SDarrick J. Wong 				goto done;
2832f9e03706SDarrick J. Wong 			}
283330f712c9SDave Chinner 		}
283430f712c9SDave Chinner 		break;
283530f712c9SDave Chinner 	}
283630f712c9SDave Chinner 
283795eb308cSDarrick J. Wong 	/* add reverse mapping unless caller opted out */
2838bc46ac64SDarrick J. Wong 	if (!(flags & XFS_BMAPI_NORMAP))
2839bc46ac64SDarrick J. Wong 		xfs_rmap_map_extent(tp, ip, whichfork, new);
28409c194644SDarrick J. Wong 
284130f712c9SDave Chinner 	/* convert to a btree if necessary */
28426d04558fSChristoph Hellwig 	if (xfs_bmap_needs_btree(ip, whichfork)) {
284330f712c9SDave Chinner 		int	tmp_logflags;	/* partial log flag return val */
284430f712c9SDave Chinner 
28456d04558fSChristoph Hellwig 		ASSERT(cur == NULL);
2846280253d2SBrian Foster 		error = xfs_bmap_extents_to_btree(tp, ip, curp, 0,
2847280253d2SBrian Foster 				&tmp_logflags, whichfork);
28486d04558fSChristoph Hellwig 		*logflagsp |= tmp_logflags;
28496d04558fSChristoph Hellwig 		cur = *curp;
285030f712c9SDave Chinner 		if (error)
285130f712c9SDave Chinner 			goto done;
285230f712c9SDave Chinner 	}
285330f712c9SDave Chinner 
285430f712c9SDave Chinner 	/* clear out the allocated field, done with it now in any case. */
28556d04558fSChristoph Hellwig 	if (cur)
285692219c29SDave Chinner 		cur->bc_ino.allocated = 0;
285730f712c9SDave Chinner 
28586d04558fSChristoph Hellwig 	xfs_bmap_check_leaf_extents(cur, ip, whichfork);
285930f712c9SDave Chinner done:
28606d04558fSChristoph Hellwig 	*logflagsp |= rval;
286130f712c9SDave Chinner 	return error;
286230f712c9SDave Chinner }
286330f712c9SDave Chinner 
286430f712c9SDave Chinner /*
286530f712c9SDave Chinner  * Functions used in the extent read, allocate and remove paths
286630f712c9SDave Chinner  */
286730f712c9SDave Chinner 
286830f712c9SDave Chinner /*
2869031474c2SChristoph Hellwig  * Adjust the size of the new extent based on i_extsize and rt extsize.
287030f712c9SDave Chinner  */
287130f712c9SDave Chinner int
xfs_bmap_extsize_align(xfs_mount_t * mp,xfs_bmbt_irec_t * gotp,xfs_bmbt_irec_t * prevp,xfs_extlen_t extsz,int rt,int eof,int delay,int convert,xfs_fileoff_t * offp,xfs_extlen_t * lenp)287230f712c9SDave Chinner xfs_bmap_extsize_align(
287330f712c9SDave Chinner 	xfs_mount_t	*mp,
287430f712c9SDave Chinner 	xfs_bmbt_irec_t	*gotp,		/* next extent pointer */
287530f712c9SDave Chinner 	xfs_bmbt_irec_t	*prevp,		/* previous extent pointer */
287630f712c9SDave Chinner 	xfs_extlen_t	extsz,		/* align to this extent size */
287730f712c9SDave Chinner 	int		rt,		/* is this a realtime inode? */
287830f712c9SDave Chinner 	int		eof,		/* is extent at end-of-file? */
287930f712c9SDave Chinner 	int		delay,		/* creating delalloc extent? */
288030f712c9SDave Chinner 	int		convert,	/* overwriting unwritten extent? */
288130f712c9SDave Chinner 	xfs_fileoff_t	*offp,		/* in/out: aligned offset */
288230f712c9SDave Chinner 	xfs_extlen_t	*lenp)		/* in/out: aligned length */
288330f712c9SDave Chinner {
288430f712c9SDave Chinner 	xfs_fileoff_t	orig_off;	/* original offset */
288530f712c9SDave Chinner 	xfs_extlen_t	orig_alen;	/* original length */
288630f712c9SDave Chinner 	xfs_fileoff_t	orig_end;	/* original off+len */
288730f712c9SDave Chinner 	xfs_fileoff_t	nexto;		/* next file offset */
288830f712c9SDave Chinner 	xfs_fileoff_t	prevo;		/* previous file offset */
288930f712c9SDave Chinner 	xfs_fileoff_t	align_off;	/* temp for offset */
289030f712c9SDave Chinner 	xfs_extlen_t	align_alen;	/* temp for length */
289130f712c9SDave Chinner 	xfs_extlen_t	temp;		/* temp for calculations */
289230f712c9SDave Chinner 
289330f712c9SDave Chinner 	if (convert)
289430f712c9SDave Chinner 		return 0;
289530f712c9SDave Chinner 
289630f712c9SDave Chinner 	orig_off = align_off = *offp;
289730f712c9SDave Chinner 	orig_alen = align_alen = *lenp;
289830f712c9SDave Chinner 	orig_end = orig_off + orig_alen;
289930f712c9SDave Chinner 
290030f712c9SDave Chinner 	/*
290130f712c9SDave Chinner 	 * If this request overlaps an existing extent, then don't
290230f712c9SDave Chinner 	 * attempt to perform any additional alignment.
290330f712c9SDave Chinner 	 */
290430f712c9SDave Chinner 	if (!delay && !eof &&
290530f712c9SDave Chinner 	    (orig_off >= gotp->br_startoff) &&
290630f712c9SDave Chinner 	    (orig_end <= gotp->br_startoff + gotp->br_blockcount)) {
290730f712c9SDave Chinner 		return 0;
290830f712c9SDave Chinner 	}
290930f712c9SDave Chinner 
291030f712c9SDave Chinner 	/*
291130f712c9SDave Chinner 	 * If the file offset is unaligned vs. the extent size
291230f712c9SDave Chinner 	 * we need to align it.  This will be possible unless
291330f712c9SDave Chinner 	 * the file was previously written with a kernel that didn't
291430f712c9SDave Chinner 	 * perform this alignment, or if a truncate shot us in the
291530f712c9SDave Chinner 	 * foot.
291630f712c9SDave Chinner 	 */
29170703a8e1SDave Chinner 	div_u64_rem(orig_off, extsz, &temp);
291830f712c9SDave Chinner 	if (temp) {
291930f712c9SDave Chinner 		align_alen += temp;
292030f712c9SDave Chinner 		align_off -= temp;
292130f712c9SDave Chinner 	}
29226dea405eSDave Chinner 
29236dea405eSDave Chinner 	/* Same adjustment for the end of the requested area. */
29246dea405eSDave Chinner 	temp = (align_alen % extsz);
29256dea405eSDave Chinner 	if (temp)
292630f712c9SDave Chinner 		align_alen += extsz - temp;
29276dea405eSDave Chinner 
29286dea405eSDave Chinner 	/*
29296dea405eSDave Chinner 	 * For large extent hint sizes, the aligned extent might be larger than
293095f0b95eSChandan Babu R 	 * XFS_BMBT_MAX_EXTLEN. In that case, reduce the size by an extsz so
293195f0b95eSChandan Babu R 	 * that it pulls the length back under XFS_BMBT_MAX_EXTLEN. The outer
293295f0b95eSChandan Babu R 	 * allocation loops handle short allocation just fine, so it is safe to
293395f0b95eSChandan Babu R 	 * do this. We only want to do it when we are forced to, though, because
293495f0b95eSChandan Babu R 	 * it means more allocation operations are required.
29356dea405eSDave Chinner 	 */
293695f0b95eSChandan Babu R 	while (align_alen > XFS_MAX_BMBT_EXTLEN)
29376dea405eSDave Chinner 		align_alen -= extsz;
293895f0b95eSChandan Babu R 	ASSERT(align_alen <= XFS_MAX_BMBT_EXTLEN);
29396dea405eSDave Chinner 
294030f712c9SDave Chinner 	/*
294130f712c9SDave Chinner 	 * If the previous block overlaps with this proposed allocation
294230f712c9SDave Chinner 	 * then move the start forward without adjusting the length.
294330f712c9SDave Chinner 	 */
294430f712c9SDave Chinner 	if (prevp->br_startoff != NULLFILEOFF) {
294530f712c9SDave Chinner 		if (prevp->br_startblock == HOLESTARTBLOCK)
294630f712c9SDave Chinner 			prevo = prevp->br_startoff;
294730f712c9SDave Chinner 		else
294830f712c9SDave Chinner 			prevo = prevp->br_startoff + prevp->br_blockcount;
294930f712c9SDave Chinner 	} else
295030f712c9SDave Chinner 		prevo = 0;
295130f712c9SDave Chinner 	if (align_off != orig_off && align_off < prevo)
295230f712c9SDave Chinner 		align_off = prevo;
295330f712c9SDave Chinner 	/*
295430f712c9SDave Chinner 	 * If the next block overlaps with this proposed allocation
295530f712c9SDave Chinner 	 * then move the start back without adjusting the length,
295630f712c9SDave Chinner 	 * but not before offset 0.
295730f712c9SDave Chinner 	 * This may of course make the start overlap previous block,
295830f712c9SDave Chinner 	 * and if we hit the offset 0 limit then the next block
295930f712c9SDave Chinner 	 * can still overlap too.
296030f712c9SDave Chinner 	 */
296130f712c9SDave Chinner 	if (!eof && gotp->br_startoff != NULLFILEOFF) {
296230f712c9SDave Chinner 		if ((delay && gotp->br_startblock == HOLESTARTBLOCK) ||
296330f712c9SDave Chinner 		    (!delay && gotp->br_startblock == DELAYSTARTBLOCK))
296430f712c9SDave Chinner 			nexto = gotp->br_startoff + gotp->br_blockcount;
296530f712c9SDave Chinner 		else
296630f712c9SDave Chinner 			nexto = gotp->br_startoff;
296730f712c9SDave Chinner 	} else
296830f712c9SDave Chinner 		nexto = NULLFILEOFF;
296930f712c9SDave Chinner 	if (!eof &&
297030f712c9SDave Chinner 	    align_off + align_alen != orig_end &&
297130f712c9SDave Chinner 	    align_off + align_alen > nexto)
297230f712c9SDave Chinner 		align_off = nexto > align_alen ? nexto - align_alen : 0;
297330f712c9SDave Chinner 	/*
297430f712c9SDave Chinner 	 * If we're now overlapping the next or previous extent that
297530f712c9SDave Chinner 	 * means we can't fit an extsz piece in this hole.  Just move
297630f712c9SDave Chinner 	 * the start forward to the first valid spot and set
297730f712c9SDave Chinner 	 * the length so we hit the end.
297830f712c9SDave Chinner 	 */
297930f712c9SDave Chinner 	if (align_off != orig_off && align_off < prevo)
298030f712c9SDave Chinner 		align_off = prevo;
298130f712c9SDave Chinner 	if (align_off + align_alen != orig_end &&
298230f712c9SDave Chinner 	    align_off + align_alen > nexto &&
298330f712c9SDave Chinner 	    nexto != NULLFILEOFF) {
298430f712c9SDave Chinner 		ASSERT(nexto > prevo);
298530f712c9SDave Chinner 		align_alen = nexto - align_off;
298630f712c9SDave Chinner 	}
298730f712c9SDave Chinner 
298830f712c9SDave Chinner 	/*
298930f712c9SDave Chinner 	 * If realtime, and the result isn't a multiple of the realtime
299030f712c9SDave Chinner 	 * extent size we need to remove blocks until it is.
299130f712c9SDave Chinner 	 */
299230f712c9SDave Chinner 	if (rt && (temp = (align_alen % mp->m_sb.sb_rextsize))) {
299330f712c9SDave Chinner 		/*
299430f712c9SDave Chinner 		 * We're not covering the original request, or
299530f712c9SDave Chinner 		 * we won't be able to once we fix the length.
299630f712c9SDave Chinner 		 */
299730f712c9SDave Chinner 		if (orig_off < align_off ||
299830f712c9SDave Chinner 		    orig_end > align_off + align_alen ||
299930f712c9SDave Chinner 		    align_alen - temp < orig_alen)
30002451337dSDave Chinner 			return -EINVAL;
300130f712c9SDave Chinner 		/*
300230f712c9SDave Chinner 		 * Try to fix it by moving the start up.
300330f712c9SDave Chinner 		 */
300430f712c9SDave Chinner 		if (align_off + temp <= orig_off) {
300530f712c9SDave Chinner 			align_alen -= temp;
300630f712c9SDave Chinner 			align_off += temp;
300730f712c9SDave Chinner 		}
300830f712c9SDave Chinner 		/*
300930f712c9SDave Chinner 		 * Try to fix it by moving the end in.
301030f712c9SDave Chinner 		 */
301130f712c9SDave Chinner 		else if (align_off + align_alen - temp >= orig_end)
301230f712c9SDave Chinner 			align_alen -= temp;
301330f712c9SDave Chinner 		/*
301430f712c9SDave Chinner 		 * Set the start to the minimum then trim the length.
301530f712c9SDave Chinner 		 */
301630f712c9SDave Chinner 		else {
301730f712c9SDave Chinner 			align_alen -= orig_off - align_off;
301830f712c9SDave Chinner 			align_off = orig_off;
301930f712c9SDave Chinner 			align_alen -= align_alen % mp->m_sb.sb_rextsize;
302030f712c9SDave Chinner 		}
302130f712c9SDave Chinner 		/*
302230f712c9SDave Chinner 		 * Result doesn't cover the request, fail it.
302330f712c9SDave Chinner 		 */
302430f712c9SDave Chinner 		if (orig_off < align_off || orig_end > align_off + align_alen)
30252451337dSDave Chinner 			return -EINVAL;
302630f712c9SDave Chinner 	} else {
302730f712c9SDave Chinner 		ASSERT(orig_off >= align_off);
302895f0b95eSChandan Babu R 		/* see XFS_BMBT_MAX_EXTLEN handling above */
30296dea405eSDave Chinner 		ASSERT(orig_end <= align_off + align_alen ||
303095f0b95eSChandan Babu R 		       align_alen + extsz > XFS_MAX_BMBT_EXTLEN);
303130f712c9SDave Chinner 	}
303230f712c9SDave Chinner 
303330f712c9SDave Chinner #ifdef DEBUG
303430f712c9SDave Chinner 	if (!eof && gotp->br_startoff != NULLFILEOFF)
303530f712c9SDave Chinner 		ASSERT(align_off + align_alen <= gotp->br_startoff);
303630f712c9SDave Chinner 	if (prevp->br_startoff != NULLFILEOFF)
303730f712c9SDave Chinner 		ASSERT(align_off >= prevp->br_startoff + prevp->br_blockcount);
303830f712c9SDave Chinner #endif
303930f712c9SDave Chinner 
304030f712c9SDave Chinner 	*lenp = align_alen;
304130f712c9SDave Chinner 	*offp = align_off;
304230f712c9SDave Chinner 	return 0;
304330f712c9SDave Chinner }
304430f712c9SDave Chinner 
304530f712c9SDave Chinner #define XFS_ALLOC_GAP_UNITS	4
304630f712c9SDave Chinner 
304730f712c9SDave Chinner void
xfs_bmap_adjacent(struct xfs_bmalloca * ap)304830f712c9SDave Chinner xfs_bmap_adjacent(
304930f712c9SDave Chinner 	struct xfs_bmalloca	*ap)	/* bmap alloc argument struct */
305030f712c9SDave Chinner {
305130f712c9SDave Chinner 	xfs_fsblock_t	adjust;		/* adjustment to block numbers */
305230f712c9SDave Chinner 	xfs_mount_t	*mp;		/* mount point structure */
305330f712c9SDave Chinner 	int		rt;		/* true if inode is realtime */
305430f712c9SDave Chinner 
305530f712c9SDave Chinner #define	ISVALID(x,y)	\
305630f712c9SDave Chinner 	(rt ? \
305730f712c9SDave Chinner 		(x) < mp->m_sb.sb_rblocks : \
305830f712c9SDave Chinner 		XFS_FSB_TO_AGNO(mp, x) == XFS_FSB_TO_AGNO(mp, y) && \
305930f712c9SDave Chinner 		XFS_FSB_TO_AGNO(mp, x) < mp->m_sb.sb_agcount && \
306030f712c9SDave Chinner 		XFS_FSB_TO_AGBNO(mp, x) < mp->m_sb.sb_agblocks)
306130f712c9SDave Chinner 
306230f712c9SDave Chinner 	mp = ap->ip->i_mount;
3063292378edSDave Chinner 	rt = XFS_IS_REALTIME_INODE(ap->ip) &&
3064c34d570dSChristoph Hellwig 		(ap->datatype & XFS_ALLOC_USERDATA);
306530f712c9SDave Chinner 	/*
306630f712c9SDave Chinner 	 * If allocating at eof, and there's a previous real block,
306730f712c9SDave Chinner 	 * try to use its last block as our starting point.
306830f712c9SDave Chinner 	 */
306930f712c9SDave Chinner 	if (ap->eof && ap->prev.br_startoff != NULLFILEOFF &&
307030f712c9SDave Chinner 	    !isnullstartblock(ap->prev.br_startblock) &&
307130f712c9SDave Chinner 	    ISVALID(ap->prev.br_startblock + ap->prev.br_blockcount,
307230f712c9SDave Chinner 		    ap->prev.br_startblock)) {
307330f712c9SDave Chinner 		ap->blkno = ap->prev.br_startblock + ap->prev.br_blockcount;
307430f712c9SDave Chinner 		/*
307530f712c9SDave Chinner 		 * Adjust for the gap between prevp and us.
307630f712c9SDave Chinner 		 */
307730f712c9SDave Chinner 		adjust = ap->offset -
307830f712c9SDave Chinner 			(ap->prev.br_startoff + ap->prev.br_blockcount);
307930f712c9SDave Chinner 		if (adjust &&
308030f712c9SDave Chinner 		    ISVALID(ap->blkno + adjust, ap->prev.br_startblock))
308130f712c9SDave Chinner 			ap->blkno += adjust;
308230f712c9SDave Chinner 	}
308330f712c9SDave Chinner 	/*
308430f712c9SDave Chinner 	 * If not at eof, then compare the two neighbor blocks.
308530f712c9SDave Chinner 	 * Figure out whether either one gives us a good starting point,
308630f712c9SDave Chinner 	 * and pick the better one.
308730f712c9SDave Chinner 	 */
308830f712c9SDave Chinner 	else if (!ap->eof) {
308930f712c9SDave Chinner 		xfs_fsblock_t	gotbno;		/* right side block number */
309030f712c9SDave Chinner 		xfs_fsblock_t	gotdiff=0;	/* right side difference */
309130f712c9SDave Chinner 		xfs_fsblock_t	prevbno;	/* left side block number */
309230f712c9SDave Chinner 		xfs_fsblock_t	prevdiff=0;	/* left side difference */
309330f712c9SDave Chinner 
309430f712c9SDave Chinner 		/*
309530f712c9SDave Chinner 		 * If there's a previous (left) block, select a requested
309630f712c9SDave Chinner 		 * start block based on it.
309730f712c9SDave Chinner 		 */
309830f712c9SDave Chinner 		if (ap->prev.br_startoff != NULLFILEOFF &&
309930f712c9SDave Chinner 		    !isnullstartblock(ap->prev.br_startblock) &&
310030f712c9SDave Chinner 		    (prevbno = ap->prev.br_startblock +
310130f712c9SDave Chinner 			       ap->prev.br_blockcount) &&
310230f712c9SDave Chinner 		    ISVALID(prevbno, ap->prev.br_startblock)) {
310330f712c9SDave Chinner 			/*
310430f712c9SDave Chinner 			 * Calculate gap to end of previous block.
310530f712c9SDave Chinner 			 */
310630f712c9SDave Chinner 			adjust = prevdiff = ap->offset -
310730f712c9SDave Chinner 				(ap->prev.br_startoff +
310830f712c9SDave Chinner 				 ap->prev.br_blockcount);
310930f712c9SDave Chinner 			/*
311030f712c9SDave Chinner 			 * Figure the startblock based on the previous block's
311130f712c9SDave Chinner 			 * end and the gap size.
311230f712c9SDave Chinner 			 * Heuristic!
311330f712c9SDave Chinner 			 * If the gap is large relative to the piece we're
311430f712c9SDave Chinner 			 * allocating, or using it gives us an invalid block
311530f712c9SDave Chinner 			 * number, then just use the end of the previous block.
311630f712c9SDave Chinner 			 */
311730f712c9SDave Chinner 			if (prevdiff <= XFS_ALLOC_GAP_UNITS * ap->length &&
311830f712c9SDave Chinner 			    ISVALID(prevbno + prevdiff,
311930f712c9SDave Chinner 				    ap->prev.br_startblock))
312030f712c9SDave Chinner 				prevbno += adjust;
312130f712c9SDave Chinner 			else
312230f712c9SDave Chinner 				prevdiff += adjust;
312330f712c9SDave Chinner 		}
312430f712c9SDave Chinner 		/*
312530f712c9SDave Chinner 		 * No previous block or can't follow it, just default.
312630f712c9SDave Chinner 		 */
312730f712c9SDave Chinner 		else
312830f712c9SDave Chinner 			prevbno = NULLFSBLOCK;
312930f712c9SDave Chinner 		/*
313030f712c9SDave Chinner 		 * If there's a following (right) block, select a requested
313130f712c9SDave Chinner 		 * start block based on it.
313230f712c9SDave Chinner 		 */
313330f712c9SDave Chinner 		if (!isnullstartblock(ap->got.br_startblock)) {
313430f712c9SDave Chinner 			/*
313530f712c9SDave Chinner 			 * Calculate gap to start of next block.
313630f712c9SDave Chinner 			 */
313730f712c9SDave Chinner 			adjust = gotdiff = ap->got.br_startoff - ap->offset;
313830f712c9SDave Chinner 			/*
313930f712c9SDave Chinner 			 * Figure the startblock based on the next block's
314030f712c9SDave Chinner 			 * start and the gap size.
314130f712c9SDave Chinner 			 */
314230f712c9SDave Chinner 			gotbno = ap->got.br_startblock;
314330f712c9SDave Chinner 			/*
314430f712c9SDave Chinner 			 * Heuristic!
314530f712c9SDave Chinner 			 * If the gap is large relative to the piece we're
314630f712c9SDave Chinner 			 * allocating, or using it gives us an invalid block
314730f712c9SDave Chinner 			 * number, then just use the start of the next block
314830f712c9SDave Chinner 			 * offset by our length.
314930f712c9SDave Chinner 			 */
315030f712c9SDave Chinner 			if (gotdiff <= XFS_ALLOC_GAP_UNITS * ap->length &&
315130f712c9SDave Chinner 			    ISVALID(gotbno - gotdiff, gotbno))
315230f712c9SDave Chinner 				gotbno -= adjust;
315330f712c9SDave Chinner 			else if (ISVALID(gotbno - ap->length, gotbno)) {
315430f712c9SDave Chinner 				gotbno -= ap->length;
315530f712c9SDave Chinner 				gotdiff += adjust - ap->length;
315630f712c9SDave Chinner 			} else
315730f712c9SDave Chinner 				gotdiff += adjust;
315830f712c9SDave Chinner 		}
315930f712c9SDave Chinner 		/*
316030f712c9SDave Chinner 		 * No next block, just default.
316130f712c9SDave Chinner 		 */
316230f712c9SDave Chinner 		else
316330f712c9SDave Chinner 			gotbno = NULLFSBLOCK;
316430f712c9SDave Chinner 		/*
316530f712c9SDave Chinner 		 * If both valid, pick the better one, else the only good
316630f712c9SDave Chinner 		 * one, else ap->blkno is already set (to 0 or the inode block).
316730f712c9SDave Chinner 		 */
316830f712c9SDave Chinner 		if (prevbno != NULLFSBLOCK && gotbno != NULLFSBLOCK)
316930f712c9SDave Chinner 			ap->blkno = prevdiff <= gotdiff ? prevbno : gotbno;
317030f712c9SDave Chinner 		else if (prevbno != NULLFSBLOCK)
317130f712c9SDave Chinner 			ap->blkno = prevbno;
317230f712c9SDave Chinner 		else if (gotbno != NULLFSBLOCK)
317330f712c9SDave Chinner 			ap->blkno = gotbno;
317430f712c9SDave Chinner 	}
317530f712c9SDave Chinner #undef ISVALID
317630f712c9SDave Chinner }
317730f712c9SDave Chinner 
317805cf492aSDave Chinner int
xfs_bmap_longest_free_extent(struct xfs_perag * pag,struct xfs_trans * tp,xfs_extlen_t * blen)317930f712c9SDave Chinner xfs_bmap_longest_free_extent(
318076257a15SDave Chinner 	struct xfs_perag	*pag,
318130f712c9SDave Chinner 	struct xfs_trans	*tp,
31826b637ad0SDave Chinner 	xfs_extlen_t		*blen)
318330f712c9SDave Chinner {
318430f712c9SDave Chinner 	xfs_extlen_t		longest;
318530f712c9SDave Chinner 	int			error = 0;
318630f712c9SDave Chinner 
31877ac2ff8bSDave Chinner 	if (!xfs_perag_initialised_agf(pag)) {
318808d3e84fSDave Chinner 		error = xfs_alloc_read_agf(pag, tp, XFS_ALLOC_FLAG_TRYLOCK,
318976b47e52SDave Chinner 				NULL);
31906b637ad0SDave Chinner 		if (error)
319176257a15SDave Chinner 			return error;
319230f712c9SDave Chinner 	}
319330f712c9SDave Chinner 
3194a1f69417SEric Sandeen 	longest = xfs_alloc_longest_free_extent(pag,
319576257a15SDave Chinner 				xfs_alloc_min_freelist(pag->pag_mount, pag),
31963fd129b6SDarrick J. Wong 				xfs_ag_resv_needed(pag, XFS_AG_RESV_NONE));
319730f712c9SDave Chinner 	if (*blen < longest)
319830f712c9SDave Chinner 		*blen = longest;
319930f712c9SDave Chinner 
320076257a15SDave Chinner 	return 0;
320130f712c9SDave Chinner }
320230f712c9SDave Chinner 
32036b637ad0SDave Chinner static xfs_extlen_t
xfs_bmap_select_minlen(struct xfs_bmalloca * ap,struct xfs_alloc_arg * args,xfs_extlen_t blen)320430f712c9SDave Chinner xfs_bmap_select_minlen(
320530f712c9SDave Chinner 	struct xfs_bmalloca	*ap,
320630f712c9SDave Chinner 	struct xfs_alloc_arg	*args,
32076b637ad0SDave Chinner 	xfs_extlen_t		blen)
320830f712c9SDave Chinner {
32096b637ad0SDave Chinner 
321030f712c9SDave Chinner 	/*
32116b637ad0SDave Chinner 	 * Since we used XFS_ALLOC_FLAG_TRYLOCK in _longest_free_extent(), it is
32126b637ad0SDave Chinner 	 * possible that there is enough contiguous free space for this request.
321330f712c9SDave Chinner 	 */
32146b637ad0SDave Chinner 	if (blen < ap->minlen)
32156b637ad0SDave Chinner 		return ap->minlen;
32166b637ad0SDave Chinner 
321730f712c9SDave Chinner 	/*
321830f712c9SDave Chinner 	 * If the best seen length is less than the request length,
32196b637ad0SDave Chinner 	 * use the best as the minimum, otherwise we've got the maxlen we
32206b637ad0SDave Chinner 	 * were asked for.
322130f712c9SDave Chinner 	 */
32226b637ad0SDave Chinner 	if (blen < args->maxlen)
32236b637ad0SDave Chinner 		return blen;
32246b637ad0SDave Chinner 	return args->maxlen;
322530f712c9SDave Chinner }
322630f712c9SDave Chinner 
322785843327SDave Chinner static int
xfs_bmap_btalloc_select_lengths(struct xfs_bmalloca * ap,struct xfs_alloc_arg * args,xfs_extlen_t * blen)322836b6ad2dSDave Chinner xfs_bmap_btalloc_select_lengths(
322930f712c9SDave Chinner 	struct xfs_bmalloca	*ap,
323030f712c9SDave Chinner 	struct xfs_alloc_arg	*args,
323130f712c9SDave Chinner 	xfs_extlen_t		*blen)
323230f712c9SDave Chinner {
323385843327SDave Chinner 	struct xfs_mount	*mp = args->mp;
323476257a15SDave Chinner 	struct xfs_perag	*pag;
323576257a15SDave Chinner 	xfs_agnumber_t		agno, startag;
323676257a15SDave Chinner 	int			error = 0;
323730f712c9SDave Chinner 
323836b6ad2dSDave Chinner 	if (ap->tp->t_flags & XFS_TRANS_LOWMODE) {
323936b6ad2dSDave Chinner 		args->total = ap->minlen;
324036b6ad2dSDave Chinner 		args->minlen = ap->minlen;
324136b6ad2dSDave Chinner 		return 0;
324236b6ad2dSDave Chinner 	}
324330f712c9SDave Chinner 
324436b6ad2dSDave Chinner 	args->total = ap->total;
324585843327SDave Chinner 	startag = XFS_FSB_TO_AGNO(mp, ap->blkno);
324630f712c9SDave Chinner 	if (startag == NULLAGNUMBER)
324776257a15SDave Chinner 		startag = 0;
324830f712c9SDave Chinner 
324976257a15SDave Chinner 	*blen = 0;
325076257a15SDave Chinner 	for_each_perag_wrap(mp, startag, agno, pag) {
32516b637ad0SDave Chinner 		error = xfs_bmap_longest_free_extent(pag, args->tp, blen);
32526b637ad0SDave Chinner 		if (error && error != -EAGAIN)
325376257a15SDave Chinner 			break;
32546b637ad0SDave Chinner 		error = 0;
325576257a15SDave Chinner 		if (*blen >= args->maxlen)
325630f712c9SDave Chinner 			break;
325730f712c9SDave Chinner 	}
325876257a15SDave Chinner 	if (pag)
325976257a15SDave Chinner 		xfs_perag_rele(pag);
326030f712c9SDave Chinner 
32616b637ad0SDave Chinner 	args->minlen = xfs_bmap_select_minlen(ap, args, *blen);
326276257a15SDave Chinner 	return error;
326330f712c9SDave Chinner }
326430f712c9SDave Chinner 
3265751f3767SDarrick J. Wong /* Update all inode and quota accounting for the allocation we just did. */
3266751f3767SDarrick J. Wong static void
xfs_bmap_btalloc_accounting(struct xfs_bmalloca * ap,struct xfs_alloc_arg * args)3267751f3767SDarrick J. Wong xfs_bmap_btalloc_accounting(
3268751f3767SDarrick J. Wong 	struct xfs_bmalloca	*ap,
3269751f3767SDarrick J. Wong 	struct xfs_alloc_arg	*args)
3270751f3767SDarrick J. Wong {
32714b4c1326SDarrick J. Wong 	if (ap->flags & XFS_BMAPI_COWFORK) {
32724b4c1326SDarrick J. Wong 		/*
32734b4c1326SDarrick J. Wong 		 * COW fork blocks are in-core only and thus are treated as
32744b4c1326SDarrick J. Wong 		 * in-core quota reservation (like delalloc blocks) even when
32754b4c1326SDarrick J. Wong 		 * converted to real blocks. The quota reservation is not
32764b4c1326SDarrick J. Wong 		 * accounted to disk until blocks are remapped to the data
32774b4c1326SDarrick J. Wong 		 * fork. So if these blocks were previously delalloc, we
32784b4c1326SDarrick J. Wong 		 * already have quota reservation and there's nothing to do
32794b4c1326SDarrick J. Wong 		 * yet.
32804b4c1326SDarrick J. Wong 		 */
32819fe82b8cSDarrick J. Wong 		if (ap->wasdel) {
32829fe82b8cSDarrick J. Wong 			xfs_mod_delalloc(ap->ip->i_mount, -(int64_t)args->len);
32834b4c1326SDarrick J. Wong 			return;
32849fe82b8cSDarrick J. Wong 		}
32854b4c1326SDarrick J. Wong 
32864b4c1326SDarrick J. Wong 		/*
32874b4c1326SDarrick J. Wong 		 * Otherwise, we've allocated blocks in a hole. The transaction
32884b4c1326SDarrick J. Wong 		 * has acquired in-core quota reservation for this extent.
32894b4c1326SDarrick J. Wong 		 * Rather than account these as real blocks, however, we reduce
32904b4c1326SDarrick J. Wong 		 * the transaction quota reservation based on the allocation.
32914b4c1326SDarrick J. Wong 		 * This essentially transfers the transaction quota reservation
32924b4c1326SDarrick J. Wong 		 * to that of a delalloc extent.
32934b4c1326SDarrick J. Wong 		 */
32944b4c1326SDarrick J. Wong 		ap->ip->i_delayed_blks += args->len;
32954b4c1326SDarrick J. Wong 		xfs_trans_mod_dquot_byino(ap->tp, ap->ip, XFS_TRANS_DQ_RES_BLKS,
32964b4c1326SDarrick J. Wong 				-(long)args->len);
32974b4c1326SDarrick J. Wong 		return;
32984b4c1326SDarrick J. Wong 	}
32994b4c1326SDarrick J. Wong 
33004b4c1326SDarrick J. Wong 	/* data/attr fork only */
33016e73a545SChristoph Hellwig 	ap->ip->i_nblocks += args->len;
3302751f3767SDarrick J. Wong 	xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
33039fe82b8cSDarrick J. Wong 	if (ap->wasdel) {
3304751f3767SDarrick J. Wong 		ap->ip->i_delayed_blks -= args->len;
33059fe82b8cSDarrick J. Wong 		xfs_mod_delalloc(ap->ip->i_mount, -(int64_t)args->len);
33069fe82b8cSDarrick J. Wong 	}
3307751f3767SDarrick J. Wong 	xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
3308751f3767SDarrick J. Wong 		ap->wasdel ? XFS_TRANS_DQ_DELBCOUNT : XFS_TRANS_DQ_BCOUNT,
3309751f3767SDarrick J. Wong 		args->len);
3310751f3767SDarrick J. Wong }
3311751f3767SDarrick J. Wong 
33120961fddfSChandan Babu R static int
xfs_bmap_compute_alignments(struct xfs_bmalloca * ap,struct xfs_alloc_arg * args)33130961fddfSChandan Babu R xfs_bmap_compute_alignments(
33140961fddfSChandan Babu R 	struct xfs_bmalloca	*ap,
33150961fddfSChandan Babu R 	struct xfs_alloc_arg	*args)
33160961fddfSChandan Babu R {
33170961fddfSChandan Babu R 	struct xfs_mount	*mp = args->mp;
33180961fddfSChandan Babu R 	xfs_extlen_t		align = 0; /* minimum allocation alignment */
33190961fddfSChandan Babu R 	int			stripe_align = 0;
33200961fddfSChandan Babu R 
33210961fddfSChandan Babu R 	/* stripe alignment for allocation is determined by mount parameters */
33220560f31aSDave Chinner 	if (mp->m_swidth && xfs_has_swalloc(mp))
33230961fddfSChandan Babu R 		stripe_align = mp->m_swidth;
33240961fddfSChandan Babu R 	else if (mp->m_dalign)
33250961fddfSChandan Babu R 		stripe_align = mp->m_dalign;
33260961fddfSChandan Babu R 
33270961fddfSChandan Babu R 	if (ap->flags & XFS_BMAPI_COWFORK)
33280961fddfSChandan Babu R 		align = xfs_get_cowextsz_hint(ap->ip);
33290961fddfSChandan Babu R 	else if (ap->datatype & XFS_ALLOC_USERDATA)
33300961fddfSChandan Babu R 		align = xfs_get_extsz_hint(ap->ip);
33310961fddfSChandan Babu R 	if (align) {
3332560ab6c0SChandan Babu R 		if (xfs_bmap_extsize_align(mp, &ap->got, &ap->prev, align, 0,
3333560ab6c0SChandan Babu R 					ap->eof, 0, ap->conv, &ap->offset,
3334560ab6c0SChandan Babu R 					&ap->length))
3335560ab6c0SChandan Babu R 			ASSERT(0);
33360961fddfSChandan Babu R 		ASSERT(ap->length);
33370961fddfSChandan Babu R 	}
33380961fddfSChandan Babu R 
33390961fddfSChandan Babu R 	/* apply extent size hints if obtained earlier */
33400961fddfSChandan Babu R 	if (align) {
33410961fddfSChandan Babu R 		args->prod = align;
33420961fddfSChandan Babu R 		div_u64_rem(ap->offset, args->prod, &args->mod);
33430961fddfSChandan Babu R 		if (args->mod)
33440961fddfSChandan Babu R 			args->mod = args->prod - args->mod;
33450961fddfSChandan Babu R 	} else if (mp->m_sb.sb_blocksize >= PAGE_SIZE) {
33460961fddfSChandan Babu R 		args->prod = 1;
33470961fddfSChandan Babu R 		args->mod = 0;
33480961fddfSChandan Babu R 	} else {
33490961fddfSChandan Babu R 		args->prod = PAGE_SIZE >> mp->m_sb.sb_blocklog;
33500961fddfSChandan Babu R 		div_u64_rem(ap->offset, args->prod, &args->mod);
33510961fddfSChandan Babu R 		if (args->mod)
33520961fddfSChandan Babu R 			args->mod = args->prod - args->mod;
33530961fddfSChandan Babu R 	}
33540961fddfSChandan Babu R 
33550961fddfSChandan Babu R 	return stripe_align;
33560961fddfSChandan Babu R }
33570961fddfSChandan Babu R 
335807c72e55SChandan Babu R static void
xfs_bmap_process_allocated_extent(struct xfs_bmalloca * ap,struct xfs_alloc_arg * args,xfs_fileoff_t orig_offset,xfs_extlen_t orig_length)335907c72e55SChandan Babu R xfs_bmap_process_allocated_extent(
336007c72e55SChandan Babu R 	struct xfs_bmalloca	*ap,
336107c72e55SChandan Babu R 	struct xfs_alloc_arg	*args,
336207c72e55SChandan Babu R 	xfs_fileoff_t		orig_offset,
336307c72e55SChandan Babu R 	xfs_extlen_t		orig_length)
336407c72e55SChandan Babu R {
336507c72e55SChandan Babu R 	ap->blkno = args->fsbno;
336607c72e55SChandan Babu R 	ap->length = args->len;
336707c72e55SChandan Babu R 	/*
336807c72e55SChandan Babu R 	 * If the extent size hint is active, we tried to round the
336907c72e55SChandan Babu R 	 * caller's allocation request offset down to extsz and the
337007c72e55SChandan Babu R 	 * length up to another extsz boundary.  If we found a free
337107c72e55SChandan Babu R 	 * extent we mapped it in starting at this new offset.  If the
337207c72e55SChandan Babu R 	 * newly mapped space isn't long enough to cover any of the
337307c72e55SChandan Babu R 	 * range of offsets that was originally requested, move the
337407c72e55SChandan Babu R 	 * mapping up so that we can fill as much of the caller's
337507c72e55SChandan Babu R 	 * original request as possible.  Free space is apparently
337607c72e55SChandan Babu R 	 * very fragmented so we're unlikely to be able to satisfy the
337707c72e55SChandan Babu R 	 * hints anyway.
337807c72e55SChandan Babu R 	 */
337907c72e55SChandan Babu R 	if (ap->length <= orig_length)
338007c72e55SChandan Babu R 		ap->offset = orig_offset;
338107c72e55SChandan Babu R 	else if (ap->offset + ap->length < orig_offset + orig_length)
338207c72e55SChandan Babu R 		ap->offset = orig_offset + orig_length - ap->length;
338307c72e55SChandan Babu R 	xfs_bmap_btalloc_accounting(ap, args);
338407c72e55SChandan Babu R }
338507c72e55SChandan Babu R 
338630151967SChandan Babu R #ifdef DEBUG
338730151967SChandan Babu R static int
xfs_bmap_exact_minlen_extent_alloc(struct xfs_bmalloca * ap)338830151967SChandan Babu R xfs_bmap_exact_minlen_extent_alloc(
338930151967SChandan Babu R 	struct xfs_bmalloca	*ap)
339030151967SChandan Babu R {
339130151967SChandan Babu R 	struct xfs_mount	*mp = ap->ip->i_mount;
339230151967SChandan Babu R 	struct xfs_alloc_arg	args = { .tp = ap->tp, .mp = mp };
339330151967SChandan Babu R 	xfs_fileoff_t		orig_offset;
339430151967SChandan Babu R 	xfs_extlen_t		orig_length;
339530151967SChandan Babu R 	int			error;
339630151967SChandan Babu R 
339730151967SChandan Babu R 	ASSERT(ap->length);
339830151967SChandan Babu R 
339930151967SChandan Babu R 	if (ap->minlen != 1) {
340030151967SChandan Babu R 		ap->blkno = NULLFSBLOCK;
340130151967SChandan Babu R 		ap->length = 0;
340230151967SChandan Babu R 		return 0;
340330151967SChandan Babu R 	}
340430151967SChandan Babu R 
340530151967SChandan Babu R 	orig_offset = ap->offset;
340630151967SChandan Babu R 	orig_length = ap->length;
340730151967SChandan Babu R 
340830151967SChandan Babu R 	args.alloc_minlen_only = 1;
340930151967SChandan Babu R 
341030151967SChandan Babu R 	xfs_bmap_compute_alignments(ap, &args);
341130151967SChandan Babu R 
341230151967SChandan Babu R 	/*
341330151967SChandan Babu R 	 * Unlike the longest extent available in an AG, we don't track
341430151967SChandan Babu R 	 * the length of an AG's shortest extent.
341530151967SChandan Babu R 	 * XFS_ERRTAG_BMAP_ALLOC_MINLEN_EXTENT is a debug only knob and
341630151967SChandan Babu R 	 * hence we can afford to start traversing from the 0th AG since
341730151967SChandan Babu R 	 * we need not be concerned about a drop in performance in
341830151967SChandan Babu R 	 * "debug only" code paths.
341930151967SChandan Babu R 	 */
342030151967SChandan Babu R 	ap->blkno = XFS_AGB_TO_FSB(mp, 0, 0);
342130151967SChandan Babu R 
342230151967SChandan Babu R 	args.oinfo = XFS_RMAP_OINFO_SKIP_UPDATE;
34236e8bd39dSChandan Babu R 	args.minlen = args.maxlen = ap->minlen;
34246e8bd39dSChandan Babu R 	args.total = ap->total;
342530151967SChandan Babu R 
342630151967SChandan Babu R 	args.alignment = 1;
342730151967SChandan Babu R 	args.minalignslop = 0;
342830151967SChandan Babu R 
342930151967SChandan Babu R 	args.minleft = ap->minleft;
343030151967SChandan Babu R 	args.wasdel = ap->wasdel;
343130151967SChandan Babu R 	args.resv = XFS_AG_RESV_NONE;
343230151967SChandan Babu R 	args.datatype = ap->datatype;
343330151967SChandan Babu R 
3434319c9e87SDave Chinner 	error = xfs_alloc_vextent_first_ag(&args, ap->blkno);
343530151967SChandan Babu R 	if (error)
343630151967SChandan Babu R 		return error;
343730151967SChandan Babu R 
343830151967SChandan Babu R 	if (args.fsbno != NULLFSBLOCK) {
343930151967SChandan Babu R 		xfs_bmap_process_allocated_extent(ap, &args, orig_offset,
344030151967SChandan Babu R 			orig_length);
344130151967SChandan Babu R 	} else {
344230151967SChandan Babu R 		ap->blkno = NULLFSBLOCK;
344330151967SChandan Babu R 		ap->length = 0;
344430151967SChandan Babu R 	}
344530151967SChandan Babu R 
344630151967SChandan Babu R 	return 0;
344730151967SChandan Babu R }
344830151967SChandan Babu R #else
344930151967SChandan Babu R 
345030151967SChandan Babu R #define xfs_bmap_exact_minlen_extent_alloc(bma) (-EFSCORRUPTED)
345130151967SChandan Babu R 
345230151967SChandan Babu R #endif
345330151967SChandan Babu R 
345485843327SDave Chinner /*
345585843327SDave Chinner  * If we are not low on available data blocks and we are allocating at
345685843327SDave Chinner  * EOF, optimise allocation for contiguous file extension and/or stripe
345785843327SDave Chinner  * alignment of the new extent.
345885843327SDave Chinner  *
345985843327SDave Chinner  * NOTE: ap->aeof is only set if the allocation length is >= the
346085843327SDave Chinner  * stripe unit and the allocation offset is at the end of file.
346185843327SDave Chinner  */
346285843327SDave Chinner static int
xfs_bmap_btalloc_at_eof(struct xfs_bmalloca * ap,struct xfs_alloc_arg * args,xfs_extlen_t blen,int stripe_align,bool ag_only)346385843327SDave Chinner xfs_bmap_btalloc_at_eof(
346485843327SDave Chinner 	struct xfs_bmalloca	*ap,
346585843327SDave Chinner 	struct xfs_alloc_arg	*args,
346685843327SDave Chinner 	xfs_extlen_t		blen,
34672a7f6d41SDave Chinner 	int			stripe_align,
34682a7f6d41SDave Chinner 	bool			ag_only)
346985843327SDave Chinner {
347085843327SDave Chinner 	struct xfs_mount	*mp = args->mp;
3471f8f1ed1aSDave Chinner 	struct xfs_perag	*caller_pag = args->pag;
347285843327SDave Chinner 	int			error;
347385843327SDave Chinner 
347485843327SDave Chinner 	/*
347585843327SDave Chinner 	 * If there are already extents in the file, try an exact EOF block
347685843327SDave Chinner 	 * allocation to extend the file as a contiguous extent. If that fails,
347785843327SDave Chinner 	 * or it's the first allocation in a file, just try for a stripe aligned
347885843327SDave Chinner 	 * allocation.
347985843327SDave Chinner 	 */
348085843327SDave Chinner 	if (ap->offset) {
348185843327SDave Chinner 		xfs_extlen_t	nextminlen = 0;
348285843327SDave Chinner 
348385843327SDave Chinner 		/*
348485843327SDave Chinner 		 * Compute the minlen+alignment for the next case.  Set slop so
348585843327SDave Chinner 		 * that the value of minlen+alignment+slop doesn't go up between
348685843327SDave Chinner 		 * the calls.
348785843327SDave Chinner 		 */
3488230e8fe8SDave Chinner 		args->alignment = 1;
348985843327SDave Chinner 		if (blen > stripe_align && blen <= args->maxlen)
349085843327SDave Chinner 			nextminlen = blen - stripe_align;
349185843327SDave Chinner 		else
349285843327SDave Chinner 			nextminlen = args->minlen;
349385843327SDave Chinner 		if (nextminlen + stripe_align > args->minlen + 1)
349485843327SDave Chinner 			args->minalignslop = nextminlen + stripe_align -
349585843327SDave Chinner 					args->minlen - 1;
349685843327SDave Chinner 		else
349785843327SDave Chinner 			args->minalignslop = 0;
349885843327SDave Chinner 
3499f8f1ed1aSDave Chinner 		if (!caller_pag)
35005f36b2ceSDave Chinner 			args->pag = xfs_perag_get(mp, XFS_FSB_TO_AGNO(mp, ap->blkno));
35015f36b2ceSDave Chinner 		error = xfs_alloc_vextent_exact_bno(args, ap->blkno);
3502b82a5c42SDarrick J. Wong 		if (!caller_pag) {
350385843327SDave Chinner 			xfs_perag_put(args->pag);
3504b82a5c42SDarrick J. Wong 			args->pag = NULL;
3505b82a5c42SDarrick J. Wong 		}
350685843327SDave Chinner 		if (error)
350785843327SDave Chinner 			return error;
350885843327SDave Chinner 
350985843327SDave Chinner 		if (args->fsbno != NULLFSBLOCK)
351085843327SDave Chinner 			return 0;
351185843327SDave Chinner 		/*
351285843327SDave Chinner 		 * Exact allocation failed. Reset to try an aligned allocation
351385843327SDave Chinner 		 * according to the original allocation specification.
351485843327SDave Chinner 		 */
351585843327SDave Chinner 		args->alignment = stripe_align;
351685843327SDave Chinner 		args->minlen = nextminlen;
351785843327SDave Chinner 		args->minalignslop = 0;
351885843327SDave Chinner 	} else {
351985843327SDave Chinner 		/*
352085843327SDave Chinner 		 * Adjust minlen to try and preserve alignment if we
352185843327SDave Chinner 		 * can't guarantee an aligned maxlen extent.
352285843327SDave Chinner 		 */
3523230e8fe8SDave Chinner 		args->alignment = stripe_align;
352485843327SDave Chinner 		if (blen > args->alignment &&
352585843327SDave Chinner 		    blen <= args->maxlen + args->alignment)
352685843327SDave Chinner 			args->minlen = blen - args->alignment;
352785843327SDave Chinner 		args->minalignslop = 0;
352885843327SDave Chinner 	}
352985843327SDave Chinner 
3530f8f1ed1aSDave Chinner 	if (ag_only) {
3531db4710fdSDave Chinner 		error = xfs_alloc_vextent_near_bno(args, ap->blkno);
3532f8f1ed1aSDave Chinner 	} else {
3533f8f1ed1aSDave Chinner 		args->pag = NULL;
35342a7f6d41SDave Chinner 		error = xfs_alloc_vextent_start_ag(args, ap->blkno);
3535f8f1ed1aSDave Chinner 		ASSERT(args->pag == NULL);
3536f8f1ed1aSDave Chinner 		args->pag = caller_pag;
3537f8f1ed1aSDave Chinner 	}
353885843327SDave Chinner 	if (error)
353985843327SDave Chinner 		return error;
354085843327SDave Chinner 
354185843327SDave Chinner 	if (args->fsbno != NULLFSBLOCK)
354285843327SDave Chinner 		return 0;
354385843327SDave Chinner 
354485843327SDave Chinner 	/*
354585843327SDave Chinner 	 * Allocation failed, so turn return the allocation args to their
354685843327SDave Chinner 	 * original non-aligned state so the caller can proceed on allocation
354785843327SDave Chinner 	 * failure as if this function was never called.
354885843327SDave Chinner 	 */
354985843327SDave Chinner 	args->alignment = 1;
355085843327SDave Chinner 	return 0;
355185843327SDave Chinner }
355285843327SDave Chinner 
355389563e7dSDave Chinner /*
355489563e7dSDave Chinner  * We have failed multiple allocation attempts so now are in a low space
355589563e7dSDave Chinner  * allocation situation. Try a locality first full filesystem minimum length
355689563e7dSDave Chinner  * allocation whilst still maintaining necessary total block reservation
355789563e7dSDave Chinner  * requirements.
355889563e7dSDave Chinner  *
355989563e7dSDave Chinner  * If that fails, we are now critically low on space, so perform a last resort
356089563e7dSDave Chinner  * allocation attempt: no reserve, no locality, blocking, minimum length, full
356189563e7dSDave Chinner  * filesystem free space scan. We also indicate to future allocations in this
356289563e7dSDave Chinner  * transaction that we are critically low on space so they don't waste time on
356389563e7dSDave Chinner  * allocation modes that are unlikely to succeed.
356489563e7dSDave Chinner  */
35658f7747adSDave Chinner int
xfs_bmap_btalloc_low_space(struct xfs_bmalloca * ap,struct xfs_alloc_arg * args)356689563e7dSDave Chinner xfs_bmap_btalloc_low_space(
356789563e7dSDave Chinner 	struct xfs_bmalloca	*ap,
356889563e7dSDave Chinner 	struct xfs_alloc_arg	*args)
356989563e7dSDave Chinner {
357089563e7dSDave Chinner 	int			error;
357189563e7dSDave Chinner 
357289563e7dSDave Chinner 	if (args->minlen > ap->minlen) {
357389563e7dSDave Chinner 		args->minlen = ap->minlen;
357489563e7dSDave Chinner 		error = xfs_alloc_vextent_start_ag(args, ap->blkno);
357589563e7dSDave Chinner 		if (error || args->fsbno != NULLFSBLOCK)
357689563e7dSDave Chinner 			return error;
357789563e7dSDave Chinner 	}
357889563e7dSDave Chinner 
357989563e7dSDave Chinner 	/* Last ditch attempt before failure is declared. */
358089563e7dSDave Chinner 	args->total = ap->minlen;
358189563e7dSDave Chinner 	error = xfs_alloc_vextent_first_ag(args, 0);
358289563e7dSDave Chinner 	if (error)
358389563e7dSDave Chinner 		return error;
358489563e7dSDave Chinner 	ap->tp->t_flags |= XFS_TRANS_LOWMODE;
358589563e7dSDave Chinner 	return 0;
358689563e7dSDave Chinner }
358789563e7dSDave Chinner 
358889563e7dSDave Chinner static int
xfs_bmap_btalloc_filestreams(struct xfs_bmalloca * ap,struct xfs_alloc_arg * args,int stripe_align)358989563e7dSDave Chinner xfs_bmap_btalloc_filestreams(
359085843327SDave Chinner 	struct xfs_bmalloca	*ap,
359185843327SDave Chinner 	struct xfs_alloc_arg	*args,
359285843327SDave Chinner 	int			stripe_align)
359385843327SDave Chinner {
359485843327SDave Chinner 	xfs_extlen_t		blen = 0;
3595f8f1ed1aSDave Chinner 	int			error = 0;
359685843327SDave Chinner 
359785843327SDave Chinner 
35988f7747adSDave Chinner 	error = xfs_filestream_select_ag(ap, args, &blen);
359989563e7dSDave Chinner 	if (error)
360089563e7dSDave Chinner 		return error;
3601f8f1ed1aSDave Chinner 	ASSERT(args->pag);
360289563e7dSDave Chinner 
36038f7747adSDave Chinner 	/*
36048f7747adSDave Chinner 	 * If we are in low space mode, then optimal allocation will fail so
36058f7747adSDave Chinner 	 * prepare for minimal allocation and jump to the low space algorithm
36068f7747adSDave Chinner 	 * immediately.
36078f7747adSDave Chinner 	 */
36088f7747adSDave Chinner 	if (ap->tp->t_flags & XFS_TRANS_LOWMODE) {
36098f7747adSDave Chinner 		args->minlen = ap->minlen;
3610f8f1ed1aSDave Chinner 		ASSERT(args->fsbno == NULLFSBLOCK);
36118f7747adSDave Chinner 		goto out_low_space;
36128f7747adSDave Chinner 	}
36138f7747adSDave Chinner 
36148f7747adSDave Chinner 	args->minlen = xfs_bmap_select_minlen(ap, args, blen);
3615f8f1ed1aSDave Chinner 	if (ap->aeof)
361689563e7dSDave Chinner 		error = xfs_bmap_btalloc_at_eof(ap, args, blen, stripe_align,
361789563e7dSDave Chinner 				true);
361889563e7dSDave Chinner 
3619f8f1ed1aSDave Chinner 	if (!error && args->fsbno == NULLFSBLOCK)
362089563e7dSDave Chinner 		error = xfs_alloc_vextent_near_bno(args, ap->blkno);
362189563e7dSDave Chinner 
36228f7747adSDave Chinner out_low_space:
3623f8f1ed1aSDave Chinner 	/*
3624f8f1ed1aSDave Chinner 	 * We are now done with the perag reference for the filestreams
3625f8f1ed1aSDave Chinner 	 * association provided by xfs_filestream_select_ag(). Release it now as
3626f8f1ed1aSDave Chinner 	 * we've either succeeded, had a fatal error or we are out of space and
3627f8f1ed1aSDave Chinner 	 * need to do a full filesystem scan for free space which will take it's
3628f8f1ed1aSDave Chinner 	 * own references.
3629f8f1ed1aSDave Chinner 	 */
3630f8f1ed1aSDave Chinner 	xfs_perag_rele(args->pag);
3631f8f1ed1aSDave Chinner 	args->pag = NULL;
3632f8f1ed1aSDave Chinner 	if (error || args->fsbno != NULLFSBLOCK)
3633f8f1ed1aSDave Chinner 		return error;
3634f8f1ed1aSDave Chinner 
363589563e7dSDave Chinner 	return xfs_bmap_btalloc_low_space(ap, args);
363689563e7dSDave Chinner }
363789563e7dSDave Chinner 
363889563e7dSDave Chinner static int
xfs_bmap_btalloc_best_length(struct xfs_bmalloca * ap,struct xfs_alloc_arg * args,int stripe_align)363989563e7dSDave Chinner xfs_bmap_btalloc_best_length(
364089563e7dSDave Chinner 	struct xfs_bmalloca	*ap,
364189563e7dSDave Chinner 	struct xfs_alloc_arg	*args,
364289563e7dSDave Chinner 	int			stripe_align)
364389563e7dSDave Chinner {
364489563e7dSDave Chinner 	xfs_extlen_t		blen = 0;
364589563e7dSDave Chinner 	int			error;
364689563e7dSDave Chinner 
364789563e7dSDave Chinner 	ap->blkno = XFS_INO_TO_FSB(args->mp, ap->ip->i_ino);
364889563e7dSDave Chinner 	xfs_bmap_adjacent(ap);
364989563e7dSDave Chinner 
365089563e7dSDave Chinner 	/*
365189563e7dSDave Chinner 	 * Search for an allocation group with a single extent large enough for
365289563e7dSDave Chinner 	 * the request.  If one isn't found, then adjust the minimum allocation
365389563e7dSDave Chinner 	 * size to the largest space found.
365489563e7dSDave Chinner 	 */
365585843327SDave Chinner 	error = xfs_bmap_btalloc_select_lengths(ap, args, &blen);
365685843327SDave Chinner 	if (error)
365785843327SDave Chinner 		return error;
365885843327SDave Chinner 
365985843327SDave Chinner 	/*
366085843327SDave Chinner 	 * Don't attempt optimal EOF allocation if previous allocations barely
366185843327SDave Chinner 	 * succeeded due to being near ENOSPC. It is highly unlikely we'll get
366285843327SDave Chinner 	 * optimal or even aligned allocations in this case, so don't waste time
366385843327SDave Chinner 	 * trying.
366485843327SDave Chinner 	 */
366585843327SDave Chinner 	if (ap->aeof && !(ap->tp->t_flags & XFS_TRANS_LOWMODE)) {
36662a7f6d41SDave Chinner 		error = xfs_bmap_btalloc_at_eof(ap, args, blen, stripe_align,
366789563e7dSDave Chinner 				false);
366889563e7dSDave Chinner 		if (error || args->fsbno != NULLFSBLOCK)
366985843327SDave Chinner 			return error;
367085843327SDave Chinner 	}
367185843327SDave Chinner 
367289563e7dSDave Chinner 	error = xfs_alloc_vextent_start_ag(args, ap->blkno);
367389563e7dSDave Chinner 	if (error || args->fsbno != NULLFSBLOCK)
367485843327SDave Chinner 		return error;
367589563e7dSDave Chinner 
367689563e7dSDave Chinner 	return xfs_bmap_btalloc_low_space(ap, args);
367785843327SDave Chinner }
367885843327SDave Chinner 
367985843327SDave Chinner static int
xfs_bmap_btalloc(struct xfs_bmalloca * ap)368030f712c9SDave Chinner xfs_bmap_btalloc(
368130151967SChandan Babu R 	struct xfs_bmalloca	*ap)
368230f712c9SDave Chinner {
368330151967SChandan Babu R 	struct xfs_mount	*mp = ap->ip->i_mount;
368485843327SDave Chinner 	struct xfs_alloc_arg	args = {
368585843327SDave Chinner 		.tp		= ap->tp,
368685843327SDave Chinner 		.mp		= mp,
368785843327SDave Chinner 		.fsbno		= NULLFSBLOCK,
368885843327SDave Chinner 		.oinfo		= XFS_RMAP_OINFO_SKIP_UPDATE,
368985843327SDave Chinner 		.minleft	= ap->minleft,
369085843327SDave Chinner 		.wasdel		= ap->wasdel,
369185843327SDave Chinner 		.resv		= XFS_AG_RESV_NONE,
369285843327SDave Chinner 		.datatype	= ap->datatype,
369385843327SDave Chinner 		.alignment	= 1,
369485843327SDave Chinner 		.minalignslop	= 0,
369585843327SDave Chinner 	};
36966d8a45ceSDarrick J. Wong 	xfs_fileoff_t		orig_offset;
36976d8a45ceSDarrick J. Wong 	xfs_extlen_t		orig_length;
369830f712c9SDave Chinner 	int			error;
369930f712c9SDave Chinner 	int			stripe_align;
370030f712c9SDave Chinner 
370130f712c9SDave Chinner 	ASSERT(ap->length);
37026d8a45ceSDarrick J. Wong 	orig_offset = ap->offset;
37036d8a45ceSDarrick J. Wong 	orig_length = ap->length;
370430f712c9SDave Chinner 
37050961fddfSChandan Babu R 	stripe_align = xfs_bmap_compute_alignments(ap, &args);
370630f712c9SDave Chinner 
370730f712c9SDave Chinner 	/* Trim the allocation back to the maximum an AG can fit. */
37089bb54cb5SDave Chinner 	args.maxlen = min(ap->length, mp->m_ag_max_usable);
370936b6ad2dSDave Chinner 
371089563e7dSDave Chinner 	if ((ap->datatype & XFS_ALLOC_USERDATA) &&
371189563e7dSDave Chinner 	    xfs_inode_is_filestream(ap->ip))
371289563e7dSDave Chinner 		error = xfs_bmap_btalloc_filestreams(ap, &args, stripe_align);
371389563e7dSDave Chinner 	else
371485843327SDave Chinner 		error = xfs_bmap_btalloc_best_length(ap, &args, stripe_align);
371530f712c9SDave Chinner 	if (error)
371630f712c9SDave Chinner 		return error;
37170961fddfSChandan Babu R 
371807c72e55SChandan Babu R 	if (args.fsbno != NULLFSBLOCK) {
371907c72e55SChandan Babu R 		xfs_bmap_process_allocated_extent(ap, &args, orig_offset,
372007c72e55SChandan Babu R 			orig_length);
372130f712c9SDave Chinner 	} else {
372230f712c9SDave Chinner 		ap->blkno = NULLFSBLOCK;
372330f712c9SDave Chinner 		ap->length = 0;
372430f712c9SDave Chinner 	}
372530f712c9SDave Chinner 	return 0;
372630f712c9SDave Chinner }
372730f712c9SDave Chinner 
37280a0af28cSDarrick J. Wong /* Trim extent to fit a logical block range. */
37290a0af28cSDarrick J. Wong void
xfs_trim_extent(struct xfs_bmbt_irec * irec,xfs_fileoff_t bno,xfs_filblks_t len)37300a0af28cSDarrick J. Wong xfs_trim_extent(
37310a0af28cSDarrick J. Wong 	struct xfs_bmbt_irec	*irec,
37320a0af28cSDarrick J. Wong 	xfs_fileoff_t		bno,
37330a0af28cSDarrick J. Wong 	xfs_filblks_t		len)
37340a0af28cSDarrick J. Wong {
37350a0af28cSDarrick J. Wong 	xfs_fileoff_t		distance;
37360a0af28cSDarrick J. Wong 	xfs_fileoff_t		end = bno + len;
37370a0af28cSDarrick J. Wong 
37380a0af28cSDarrick J. Wong 	if (irec->br_startoff + irec->br_blockcount <= bno ||
37390a0af28cSDarrick J. Wong 	    irec->br_startoff >= end) {
37400a0af28cSDarrick J. Wong 		irec->br_blockcount = 0;
37410a0af28cSDarrick J. Wong 		return;
37420a0af28cSDarrick J. Wong 	}
37430a0af28cSDarrick J. Wong 
37440a0af28cSDarrick J. Wong 	if (irec->br_startoff < bno) {
37450a0af28cSDarrick J. Wong 		distance = bno - irec->br_startoff;
37460a0af28cSDarrick J. Wong 		if (isnullstartblock(irec->br_startblock))
37470a0af28cSDarrick J. Wong 			irec->br_startblock = DELAYSTARTBLOCK;
37480a0af28cSDarrick J. Wong 		if (irec->br_startblock != DELAYSTARTBLOCK &&
37490a0af28cSDarrick J. Wong 		    irec->br_startblock != HOLESTARTBLOCK)
37500a0af28cSDarrick J. Wong 			irec->br_startblock += distance;
37510a0af28cSDarrick J. Wong 		irec->br_startoff += distance;
37520a0af28cSDarrick J. Wong 		irec->br_blockcount -= distance;
37530a0af28cSDarrick J. Wong 	}
37540a0af28cSDarrick J. Wong 
37550a0af28cSDarrick J. Wong 	if (end < irec->br_startoff + irec->br_blockcount) {
37560a0af28cSDarrick J. Wong 		distance = irec->br_startoff + irec->br_blockcount - end;
37570a0af28cSDarrick J. Wong 		irec->br_blockcount -= distance;
37580a0af28cSDarrick J. Wong 	}
37590a0af28cSDarrick J. Wong }
37600a0af28cSDarrick J. Wong 
376130f712c9SDave Chinner /*
376230f712c9SDave Chinner  * Trim the returned map to the required bounds
376330f712c9SDave Chinner  */
376430f712c9SDave Chinner STATIC void
xfs_bmapi_trim_map(struct xfs_bmbt_irec * mval,struct xfs_bmbt_irec * got,xfs_fileoff_t * bno,xfs_filblks_t len,xfs_fileoff_t obno,xfs_fileoff_t end,int n,uint32_t flags)376530f712c9SDave Chinner xfs_bmapi_trim_map(
376630f712c9SDave Chinner 	struct xfs_bmbt_irec	*mval,
376730f712c9SDave Chinner 	struct xfs_bmbt_irec	*got,
376830f712c9SDave Chinner 	xfs_fileoff_t		*bno,
376930f712c9SDave Chinner 	xfs_filblks_t		len,
377030f712c9SDave Chinner 	xfs_fileoff_t		obno,
377130f712c9SDave Chinner 	xfs_fileoff_t		end,
377230f712c9SDave Chinner 	int			n,
3773e7d410acSDave Chinner 	uint32_t		flags)
377430f712c9SDave Chinner {
377530f712c9SDave Chinner 	if ((flags & XFS_BMAPI_ENTIRE) ||
377630f712c9SDave Chinner 	    got->br_startoff + got->br_blockcount <= obno) {
377730f712c9SDave Chinner 		*mval = *got;
377830f712c9SDave Chinner 		if (isnullstartblock(got->br_startblock))
377930f712c9SDave Chinner 			mval->br_startblock = DELAYSTARTBLOCK;
378030f712c9SDave Chinner 		return;
378130f712c9SDave Chinner 	}
378230f712c9SDave Chinner 
378330f712c9SDave Chinner 	if (obno > *bno)
378430f712c9SDave Chinner 		*bno = obno;
378530f712c9SDave Chinner 	ASSERT((*bno >= obno) || (n == 0));
378630f712c9SDave Chinner 	ASSERT(*bno < end);
378730f712c9SDave Chinner 	mval->br_startoff = *bno;
378830f712c9SDave Chinner 	if (isnullstartblock(got->br_startblock))
378930f712c9SDave Chinner 		mval->br_startblock = DELAYSTARTBLOCK;
379030f712c9SDave Chinner 	else
379130f712c9SDave Chinner 		mval->br_startblock = got->br_startblock +
379230f712c9SDave Chinner 					(*bno - got->br_startoff);
379330f712c9SDave Chinner 	/*
379430f712c9SDave Chinner 	 * Return the minimum of what we got and what we asked for for
379530f712c9SDave Chinner 	 * the length.  We can use the len variable here because it is
379630f712c9SDave Chinner 	 * modified below and we could have been there before coming
379730f712c9SDave Chinner 	 * here if the first part of the allocation didn't overlap what
379830f712c9SDave Chinner 	 * was asked for.
379930f712c9SDave Chinner 	 */
380030f712c9SDave Chinner 	mval->br_blockcount = XFS_FILBLKS_MIN(end - *bno,
380130f712c9SDave Chinner 			got->br_blockcount - (*bno - got->br_startoff));
380230f712c9SDave Chinner 	mval->br_state = got->br_state;
380330f712c9SDave Chinner 	ASSERT(mval->br_blockcount <= len);
380430f712c9SDave Chinner 	return;
380530f712c9SDave Chinner }
380630f712c9SDave Chinner 
380730f712c9SDave Chinner /*
380830f712c9SDave Chinner  * Update and validate the extent map to return
380930f712c9SDave Chinner  */
381030f712c9SDave Chinner STATIC void
xfs_bmapi_update_map(struct xfs_bmbt_irec ** map,xfs_fileoff_t * bno,xfs_filblks_t * len,xfs_fileoff_t obno,xfs_fileoff_t end,int * n,uint32_t flags)381130f712c9SDave Chinner xfs_bmapi_update_map(
381230f712c9SDave Chinner 	struct xfs_bmbt_irec	**map,
381330f712c9SDave Chinner 	xfs_fileoff_t		*bno,
381430f712c9SDave Chinner 	xfs_filblks_t		*len,
381530f712c9SDave Chinner 	xfs_fileoff_t		obno,
381630f712c9SDave Chinner 	xfs_fileoff_t		end,
381730f712c9SDave Chinner 	int			*n,
3818e7d410acSDave Chinner 	uint32_t		flags)
381930f712c9SDave Chinner {
382030f712c9SDave Chinner 	xfs_bmbt_irec_t	*mval = *map;
382130f712c9SDave Chinner 
382230f712c9SDave Chinner 	ASSERT((flags & XFS_BMAPI_ENTIRE) ||
382330f712c9SDave Chinner 	       ((mval->br_startoff + mval->br_blockcount) <= end));
382430f712c9SDave Chinner 	ASSERT((flags & XFS_BMAPI_ENTIRE) || (mval->br_blockcount <= *len) ||
382530f712c9SDave Chinner 	       (mval->br_startoff < obno));
382630f712c9SDave Chinner 
382730f712c9SDave Chinner 	*bno = mval->br_startoff + mval->br_blockcount;
382830f712c9SDave Chinner 	*len = end - *bno;
382930f712c9SDave Chinner 	if (*n > 0 && mval->br_startoff == mval[-1].br_startoff) {
383030f712c9SDave Chinner 		/* update previous map with new information */
383130f712c9SDave Chinner 		ASSERT(mval->br_startblock == mval[-1].br_startblock);
383230f712c9SDave Chinner 		ASSERT(mval->br_blockcount > mval[-1].br_blockcount);
383330f712c9SDave Chinner 		ASSERT(mval->br_state == mval[-1].br_state);
383430f712c9SDave Chinner 		mval[-1].br_blockcount = mval->br_blockcount;
383530f712c9SDave Chinner 		mval[-1].br_state = mval->br_state;
383630f712c9SDave Chinner 	} else if (*n > 0 && mval->br_startblock != DELAYSTARTBLOCK &&
383730f712c9SDave Chinner 		   mval[-1].br_startblock != DELAYSTARTBLOCK &&
383830f712c9SDave Chinner 		   mval[-1].br_startblock != HOLESTARTBLOCK &&
383930f712c9SDave Chinner 		   mval->br_startblock == mval[-1].br_startblock +
384030f712c9SDave Chinner 					  mval[-1].br_blockcount &&
3841c3a2f9ffSChristoph Hellwig 		   mval[-1].br_state == mval->br_state) {
384230f712c9SDave Chinner 		ASSERT(mval->br_startoff ==
384330f712c9SDave Chinner 		       mval[-1].br_startoff + mval[-1].br_blockcount);
384430f712c9SDave Chinner 		mval[-1].br_blockcount += mval->br_blockcount;
384530f712c9SDave Chinner 	} else if (*n > 0 &&
384630f712c9SDave Chinner 		   mval->br_startblock == DELAYSTARTBLOCK &&
384730f712c9SDave Chinner 		   mval[-1].br_startblock == DELAYSTARTBLOCK &&
384830f712c9SDave Chinner 		   mval->br_startoff ==
384930f712c9SDave Chinner 		   mval[-1].br_startoff + mval[-1].br_blockcount) {
385030f712c9SDave Chinner 		mval[-1].br_blockcount += mval->br_blockcount;
385130f712c9SDave Chinner 		mval[-1].br_state = mval->br_state;
385230f712c9SDave Chinner 	} else if (!((*n == 0) &&
385330f712c9SDave Chinner 		     ((mval->br_startoff + mval->br_blockcount) <=
385430f712c9SDave Chinner 		      obno))) {
385530f712c9SDave Chinner 		mval++;
385630f712c9SDave Chinner 		(*n)++;
385730f712c9SDave Chinner 	}
385830f712c9SDave Chinner 	*map = mval;
385930f712c9SDave Chinner }
386030f712c9SDave Chinner 
386130f712c9SDave Chinner /*
386230f712c9SDave Chinner  * Map file blocks to filesystem blocks without allocation.
386330f712c9SDave Chinner  */
386430f712c9SDave Chinner int
xfs_bmapi_read(struct xfs_inode * ip,xfs_fileoff_t bno,xfs_filblks_t len,struct xfs_bmbt_irec * mval,int * nmap,uint32_t flags)386530f712c9SDave Chinner xfs_bmapi_read(
386630f712c9SDave Chinner 	struct xfs_inode	*ip,
386730f712c9SDave Chinner 	xfs_fileoff_t		bno,
386830f712c9SDave Chinner 	xfs_filblks_t		len,
386930f712c9SDave Chinner 	struct xfs_bmbt_irec	*mval,
387030f712c9SDave Chinner 	int			*nmap,
3871e7d410acSDave Chinner 	uint32_t		flags)
387230f712c9SDave Chinner {
387330f712c9SDave Chinner 	struct xfs_mount	*mp = ip->i_mount;
38744b516ff4SChristoph Hellwig 	int			whichfork = xfs_bmapi_whichfork(flags);
3875732436efSDarrick J. Wong 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
387630f712c9SDave Chinner 	struct xfs_bmbt_irec	got;
387730f712c9SDave Chinner 	xfs_fileoff_t		obno;
387830f712c9SDave Chinner 	xfs_fileoff_t		end;
3879b2b1712aSChristoph Hellwig 	struct xfs_iext_cursor	icur;
388030f712c9SDave Chinner 	int			error;
3881334f3423SChristoph Hellwig 	bool			eof = false;
388230f712c9SDave Chinner 	int			n = 0;
388330f712c9SDave Chinner 
388430f712c9SDave Chinner 	ASSERT(*nmap >= 1);
38851a1c57b2SChristoph Hellwig 	ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK | XFS_BMAPI_ENTIRE)));
388630f712c9SDave Chinner 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED|XFS_ILOCK_EXCL));
388730f712c9SDave Chinner 
38884b516ff4SChristoph Hellwig 	if (WARN_ON_ONCE(!ifp))
38894b516ff4SChristoph Hellwig 		return -EFSCORRUPTED;
38904b516ff4SChristoph Hellwig 
3891f7e67b20SChristoph Hellwig 	if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
3892f7e67b20SChristoph Hellwig 	    XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT))
38932451337dSDave Chinner 		return -EFSCORRUPTED;
389430f712c9SDave Chinner 
389575c8c50fSDave Chinner 	if (xfs_is_shutdown(mp))
38962451337dSDave Chinner 		return -EIO;
389730f712c9SDave Chinner 
3898ff6d6af2SBill O'Donnell 	XFS_STATS_INC(mp, xs_blk_mapr);
389930f712c9SDave Chinner 
390030f712c9SDave Chinner 	error = xfs_iread_extents(NULL, ip, whichfork);
390130f712c9SDave Chinner 	if (error)
390230f712c9SDave Chinner 		return error;
390330f712c9SDave Chinner 
3904b2b1712aSChristoph Hellwig 	if (!xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got))
3905334f3423SChristoph Hellwig 		eof = true;
390630f712c9SDave Chinner 	end = bno + len;
390730f712c9SDave Chinner 	obno = bno;
390830f712c9SDave Chinner 
390930f712c9SDave Chinner 	while (bno < end && n < *nmap) {
391030f712c9SDave Chinner 		/* Reading past eof, act as though there's a hole up to end. */
391130f712c9SDave Chinner 		if (eof)
391230f712c9SDave Chinner 			got.br_startoff = end;
391330f712c9SDave Chinner 		if (got.br_startoff > bno) {
391430f712c9SDave Chinner 			/* Reading in a hole.  */
391530f712c9SDave Chinner 			mval->br_startoff = bno;
391630f712c9SDave Chinner 			mval->br_startblock = HOLESTARTBLOCK;
391730f712c9SDave Chinner 			mval->br_blockcount =
391830f712c9SDave Chinner 				XFS_FILBLKS_MIN(len, got.br_startoff - bno);
391930f712c9SDave Chinner 			mval->br_state = XFS_EXT_NORM;
392030f712c9SDave Chinner 			bno += mval->br_blockcount;
392130f712c9SDave Chinner 			len -= mval->br_blockcount;
392230f712c9SDave Chinner 			mval++;
392330f712c9SDave Chinner 			n++;
392430f712c9SDave Chinner 			continue;
392530f712c9SDave Chinner 		}
392630f712c9SDave Chinner 
392730f712c9SDave Chinner 		/* set up the extent map to return. */
392830f712c9SDave Chinner 		xfs_bmapi_trim_map(mval, &got, &bno, len, obno, end, n, flags);
392930f712c9SDave Chinner 		xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags);
393030f712c9SDave Chinner 
393130f712c9SDave Chinner 		/* If we're done, stop now. */
393230f712c9SDave Chinner 		if (bno >= end || n >= *nmap)
393330f712c9SDave Chinner 			break;
393430f712c9SDave Chinner 
393530f712c9SDave Chinner 		/* Else go on to the next record. */
3936b2b1712aSChristoph Hellwig 		if (!xfs_iext_next_extent(ifp, &icur, &got))
3937334f3423SChristoph Hellwig 			eof = true;
393830f712c9SDave Chinner 	}
393930f712c9SDave Chinner 	*nmap = n;
394030f712c9SDave Chinner 	return 0;
394130f712c9SDave Chinner }
394230f712c9SDave Chinner 
3943f65e6fadSBrian Foster /*
3944f65e6fadSBrian Foster  * Add a delayed allocation extent to an inode. Blocks are reserved from the
3945f65e6fadSBrian Foster  * global pool and the extent inserted into the inode in-core extent tree.
3946f65e6fadSBrian Foster  *
3947f65e6fadSBrian Foster  * On entry, got refers to the first extent beyond the offset of the extent to
3948f65e6fadSBrian Foster  * allocate or eof is specified if no such extent exists. On return, got refers
3949f65e6fadSBrian Foster  * to the extent record that was inserted to the inode fork.
3950f65e6fadSBrian Foster  *
3951f65e6fadSBrian Foster  * Note that the allocated extent may have been merged with contiguous extents
3952f65e6fadSBrian Foster  * during insertion into the inode fork. Thus, got does not reflect the current
3953f65e6fadSBrian Foster  * state of the inode fork on return. If necessary, the caller can use lastx to
3954f65e6fadSBrian Foster  * look up the updated record in the inode fork.
3955f65e6fadSBrian Foster  */
395651446f5bSChristoph Hellwig int
xfs_bmapi_reserve_delalloc(struct xfs_inode * ip,int whichfork,xfs_fileoff_t off,xfs_filblks_t len,xfs_filblks_t prealloc,struct xfs_bmbt_irec * got,struct xfs_iext_cursor * icur,int eof)395730f712c9SDave Chinner xfs_bmapi_reserve_delalloc(
395830f712c9SDave Chinner 	struct xfs_inode	*ip,
3959be51f811SDarrick J. Wong 	int			whichfork,
3960974ae922SBrian Foster 	xfs_fileoff_t		off,
396130f712c9SDave Chinner 	xfs_filblks_t		len,
3962974ae922SBrian Foster 	xfs_filblks_t		prealloc,
396330f712c9SDave Chinner 	struct xfs_bmbt_irec	*got,
3964b2b1712aSChristoph Hellwig 	struct xfs_iext_cursor	*icur,
396530f712c9SDave Chinner 	int			eof)
396630f712c9SDave Chinner {
396730f712c9SDave Chinner 	struct xfs_mount	*mp = ip->i_mount;
3968732436efSDarrick J. Wong 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
396930f712c9SDave Chinner 	xfs_extlen_t		alen;
397030f712c9SDave Chinner 	xfs_extlen_t		indlen;
397130f712c9SDave Chinner 	int			error;
3972974ae922SBrian Foster 	xfs_fileoff_t		aoff = off;
397330f712c9SDave Chinner 
3974974ae922SBrian Foster 	/*
3975974ae922SBrian Foster 	 * Cap the alloc length. Keep track of prealloc so we know whether to
3976974ae922SBrian Foster 	 * tag the inode before we return.
3977974ae922SBrian Foster 	 */
397895f0b95eSChandan Babu R 	alen = XFS_FILBLKS_MIN(len + prealloc, XFS_MAX_BMBT_EXTLEN);
397930f712c9SDave Chinner 	if (!eof)
398030f712c9SDave Chinner 		alen = XFS_FILBLKS_MIN(alen, got->br_startoff - aoff);
3981974ae922SBrian Foster 	if (prealloc && alen >= len)
3982974ae922SBrian Foster 		prealloc = alen - len;
398330f712c9SDave Chinner 
398430f712c9SDave Chinner 	/* Figure out the extent size, adjust alen */
39856ca30729SShan Hai 	if (whichfork == XFS_COW_FORK) {
398665c5f419SChristoph Hellwig 		struct xfs_bmbt_irec	prev;
39876ca30729SShan Hai 		xfs_extlen_t		extsz = xfs_get_cowextsz_hint(ip);
398865c5f419SChristoph Hellwig 
3989b2b1712aSChristoph Hellwig 		if (!xfs_iext_peek_prev_extent(ifp, icur, &prev))
399065c5f419SChristoph Hellwig 			prev.br_startoff = NULLFILEOFF;
399165c5f419SChristoph Hellwig 
39926ca30729SShan Hai 		error = xfs_bmap_extsize_align(mp, got, &prev, extsz, 0, eof,
399330f712c9SDave Chinner 					       1, 0, &aoff, &alen);
399430f712c9SDave Chinner 		ASSERT(!error);
399530f712c9SDave Chinner 	}
399630f712c9SDave Chinner 
399730f712c9SDave Chinner 	/*
399830f712c9SDave Chinner 	 * Make a transaction-less quota reservation for delayed allocation
399930f712c9SDave Chinner 	 * blocks.  This number gets adjusted later.  We return if we haven't
400030f712c9SDave Chinner 	 * allocated blocks already inside this loop.
400130f712c9SDave Chinner 	 */
400285546500SDarrick J. Wong 	error = xfs_quota_reserve_blkres(ip, alen);
400330f712c9SDave Chinner 	if (error)
400430f712c9SDave Chinner 		return error;
400530f712c9SDave Chinner 
400630f712c9SDave Chinner 	/*
400730f712c9SDave Chinner 	 * Split changing sb for alen and indlen since they could be coming
400830f712c9SDave Chinner 	 * from different places.
400930f712c9SDave Chinner 	 */
401030f712c9SDave Chinner 	indlen = (xfs_extlen_t)xfs_bmap_worst_indlen(ip, alen);
401130f712c9SDave Chinner 	ASSERT(indlen > 0);
401230f712c9SDave Chinner 
40130d485adaSDave Chinner 	error = xfs_mod_fdblocks(mp, -((int64_t)alen), false);
401430f712c9SDave Chinner 	if (error)
401530f712c9SDave Chinner 		goto out_unreserve_quota;
401630f712c9SDave Chinner 
40170d485adaSDave Chinner 	error = xfs_mod_fdblocks(mp, -((int64_t)indlen), false);
401830f712c9SDave Chinner 	if (error)
401930f712c9SDave Chinner 		goto out_unreserve_blocks;
402030f712c9SDave Chinner 
402130f712c9SDave Chinner 
402230f712c9SDave Chinner 	ip->i_delayed_blks += alen;
40239fe82b8cSDarrick J. Wong 	xfs_mod_delalloc(ip->i_mount, alen + indlen);
402430f712c9SDave Chinner 
402530f712c9SDave Chinner 	got->br_startoff = aoff;
402630f712c9SDave Chinner 	got->br_startblock = nullstartblock(indlen);
402730f712c9SDave Chinner 	got->br_blockcount = alen;
402830f712c9SDave Chinner 	got->br_state = XFS_EXT_NORM;
402930f712c9SDave Chinner 
4030b2b1712aSChristoph Hellwig 	xfs_bmap_add_extent_hole_delay(ip, whichfork, icur, got);
403130f712c9SDave Chinner 
4032974ae922SBrian Foster 	/*
4033974ae922SBrian Foster 	 * Tag the inode if blocks were preallocated. Note that COW fork
4034974ae922SBrian Foster 	 * preallocation can occur at the start or end of the extent, even when
4035974ae922SBrian Foster 	 * prealloc == 0, so we must also check the aligned offset and length.
4036974ae922SBrian Foster 	 */
4037974ae922SBrian Foster 	if (whichfork == XFS_DATA_FORK && prealloc)
4038974ae922SBrian Foster 		xfs_inode_set_eofblocks_tag(ip);
4039974ae922SBrian Foster 	if (whichfork == XFS_COW_FORK && (prealloc || aoff < off || alen > len))
4040974ae922SBrian Foster 		xfs_inode_set_cowblocks_tag(ip);
4041974ae922SBrian Foster 
404230f712c9SDave Chinner 	return 0;
404330f712c9SDave Chinner 
404430f712c9SDave Chinner out_unreserve_blocks:
40450d485adaSDave Chinner 	xfs_mod_fdblocks(mp, alen, false);
404630f712c9SDave Chinner out_unreserve_quota:
404730f712c9SDave Chinner 	if (XFS_IS_QUOTA_ON(mp))
404885546500SDarrick J. Wong 		xfs_quota_unreserve_blkres(ip, alen);
404930f712c9SDave Chinner 	return error;
405030f712c9SDave Chinner }
405130f712c9SDave Chinner 
40527f8a058fSDave Chinner static int
xfs_bmap_alloc_userdata(struct xfs_bmalloca * bma)4053be6cacbeSChristoph Hellwig xfs_bmap_alloc_userdata(
4054be6cacbeSChristoph Hellwig 	struct xfs_bmalloca	*bma)
4055be6cacbeSChristoph Hellwig {
4056be6cacbeSChristoph Hellwig 	struct xfs_mount	*mp = bma->ip->i_mount;
4057be6cacbeSChristoph Hellwig 	int			whichfork = xfs_bmapi_whichfork(bma->flags);
4058be6cacbeSChristoph Hellwig 	int			error;
4059be6cacbeSChristoph Hellwig 
4060be6cacbeSChristoph Hellwig 	/*
4061be6cacbeSChristoph Hellwig 	 * Set the data type being allocated. For the data fork, the first data
4062be6cacbeSChristoph Hellwig 	 * in the file is treated differently to all other allocations. For the
4063be6cacbeSChristoph Hellwig 	 * attribute fork, we only need to ensure the allocated range is not on
4064be6cacbeSChristoph Hellwig 	 * the busy list.
4065be6cacbeSChristoph Hellwig 	 */
4066be6cacbeSChristoph Hellwig 	bma->datatype = XFS_ALLOC_NOBUSY;
4067ddfdd530SDarrick J. Wong 	if (whichfork == XFS_DATA_FORK || whichfork == XFS_COW_FORK) {
4068c34d570dSChristoph Hellwig 		bma->datatype |= XFS_ALLOC_USERDATA;
4069be6cacbeSChristoph Hellwig 		if (bma->offset == 0)
4070be6cacbeSChristoph Hellwig 			bma->datatype |= XFS_ALLOC_INITIAL_USER_DATA;
4071be6cacbeSChristoph Hellwig 
4072be6cacbeSChristoph Hellwig 		if (mp->m_dalign && bma->length >= mp->m_dalign) {
4073be6cacbeSChristoph Hellwig 			error = xfs_bmap_isaeof(bma, whichfork);
4074be6cacbeSChristoph Hellwig 			if (error)
4075be6cacbeSChristoph Hellwig 				return error;
4076be6cacbeSChristoph Hellwig 		}
4077be6cacbeSChristoph Hellwig 
4078be6cacbeSChristoph Hellwig 		if (XFS_IS_REALTIME_INODE(bma->ip))
4079be6cacbeSChristoph Hellwig 			return xfs_bmap_rtalloc(bma);
4080be6cacbeSChristoph Hellwig 	}
4081be6cacbeSChristoph Hellwig 
408230151967SChandan Babu R 	if (unlikely(XFS_TEST_ERROR(false, mp,
408330151967SChandan Babu R 			XFS_ERRTAG_BMAP_ALLOC_MINLEN_EXTENT)))
408430151967SChandan Babu R 		return xfs_bmap_exact_minlen_extent_alloc(bma);
408530151967SChandan Babu R 
4086be6cacbeSChristoph Hellwig 	return xfs_bmap_btalloc(bma);
4087be6cacbeSChristoph Hellwig }
4088be6cacbeSChristoph Hellwig 
4089be6cacbeSChristoph Hellwig static int
xfs_bmapi_allocate(struct xfs_bmalloca * bma)40907f8a058fSDave Chinner xfs_bmapi_allocate(
409130f712c9SDave Chinner 	struct xfs_bmalloca	*bma)
409230f712c9SDave Chinner {
409330f712c9SDave Chinner 	struct xfs_mount	*mp = bma->ip->i_mount;
409460b4984fSDarrick J. Wong 	int			whichfork = xfs_bmapi_whichfork(bma->flags);
4095732436efSDarrick J. Wong 	struct xfs_ifork	*ifp = xfs_ifork_ptr(bma->ip, whichfork);
409630f712c9SDave Chinner 	int			tmp_logflags = 0;
409730f712c9SDave Chinner 	int			error;
409830f712c9SDave Chinner 
409930f712c9SDave Chinner 	ASSERT(bma->length > 0);
410030f712c9SDave Chinner 
410130f712c9SDave Chinner 	/*
410230f712c9SDave Chinner 	 * For the wasdelay case, we could also just allocate the stuff asked
410330f712c9SDave Chinner 	 * for in this bmap call but that wouldn't be as good.
410430f712c9SDave Chinner 	 */
410530f712c9SDave Chinner 	if (bma->wasdel) {
410630f712c9SDave Chinner 		bma->length = (xfs_extlen_t)bma->got.br_blockcount;
410730f712c9SDave Chinner 		bma->offset = bma->got.br_startoff;
4108f5be0844SDarrick J. Wong 		if (!xfs_iext_peek_prev_extent(ifp, &bma->icur, &bma->prev))
4109f5be0844SDarrick J. Wong 			bma->prev.br_startoff = NULLFILEOFF;
411030f712c9SDave Chinner 	} else {
411195f0b95eSChandan Babu R 		bma->length = XFS_FILBLKS_MIN(bma->length, XFS_MAX_BMBT_EXTLEN);
411230f712c9SDave Chinner 		if (!bma->eof)
411330f712c9SDave Chinner 			bma->length = XFS_FILBLKS_MIN(bma->length,
411430f712c9SDave Chinner 					bma->got.br_startoff - bma->offset);
411530f712c9SDave Chinner 	}
411630f712c9SDave Chinner 
4117be6cacbeSChristoph Hellwig 	if (bma->flags & XFS_BMAPI_CONTIG)
4118be6cacbeSChristoph Hellwig 		bma->minlen = bma->length;
4119ce840429SDarrick J. Wong 	else
4120be6cacbeSChristoph Hellwig 		bma->minlen = 1;
412130f712c9SDave Chinner 
412230151967SChandan Babu R 	if (bma->flags & XFS_BMAPI_METADATA) {
412330151967SChandan Babu R 		if (unlikely(XFS_TEST_ERROR(false, mp,
412430151967SChandan Babu R 				XFS_ERRTAG_BMAP_ALLOC_MINLEN_EXTENT)))
412530151967SChandan Babu R 			error = xfs_bmap_exact_minlen_extent_alloc(bma);
4126be6cacbeSChristoph Hellwig 		else
412730151967SChandan Babu R 			error = xfs_bmap_btalloc(bma);
412830151967SChandan Babu R 	} else {
4129be6cacbeSChristoph Hellwig 		error = xfs_bmap_alloc_userdata(bma);
413030151967SChandan Babu R 	}
4131be6cacbeSChristoph Hellwig 	if (error || bma->blkno == NULLFSBLOCK)
413230f712c9SDave Chinner 		return error;
413330f712c9SDave Chinner 
4134fd638f1dSChristoph Hellwig 	if (bma->flags & XFS_BMAPI_ZERO) {
4135fd638f1dSChristoph Hellwig 		error = xfs_zero_extent(bma->ip, bma->blkno, bma->length);
4136fd638f1dSChristoph Hellwig 		if (error)
4137fd638f1dSChristoph Hellwig 			return error;
4138fd638f1dSChristoph Hellwig 	}
4139fd638f1dSChristoph Hellwig 
4140ac1e0672SChristoph Hellwig 	if (ifp->if_format == XFS_DINODE_FMT_BTREE && !bma->cur)
414130f712c9SDave Chinner 		bma->cur = xfs_bmbt_init_cursor(mp, bma->tp, bma->ip, whichfork);
414230f712c9SDave Chinner 	/*
414330f712c9SDave Chinner 	 * Bump the number of extents we've allocated
414430f712c9SDave Chinner 	 * in this call.
414530f712c9SDave Chinner 	 */
414630f712c9SDave Chinner 	bma->nallocs++;
414730f712c9SDave Chinner 
414830f712c9SDave Chinner 	if (bma->cur)
414992219c29SDave Chinner 		bma->cur->bc_ino.flags =
41508ef54797SDave Chinner 			bma->wasdel ? XFS_BTCUR_BMBT_WASDEL : 0;
415130f712c9SDave Chinner 
415230f712c9SDave Chinner 	bma->got.br_startoff = bma->offset;
415330f712c9SDave Chinner 	bma->got.br_startblock = bma->blkno;
415430f712c9SDave Chinner 	bma->got.br_blockcount = bma->length;
415530f712c9SDave Chinner 	bma->got.br_state = XFS_EXT_NORM;
415630f712c9SDave Chinner 
4157a5949d3fSDarrick J. Wong 	if (bma->flags & XFS_BMAPI_PREALLOC)
415830f712c9SDave Chinner 		bma->got.br_state = XFS_EXT_UNWRITTEN;
415930f712c9SDave Chinner 
416030f712c9SDave Chinner 	if (bma->wasdel)
416160b4984fSDarrick J. Wong 		error = xfs_bmap_add_extent_delay_real(bma, whichfork);
416230f712c9SDave Chinner 	else
41636d04558fSChristoph Hellwig 		error = xfs_bmap_add_extent_hole_real(bma->tp, bma->ip,
4164b2b1712aSChristoph Hellwig 				whichfork, &bma->icur, &bma->cur, &bma->got,
416592f9da30SBrian Foster 				&bma->logflags, bma->flags);
416630f712c9SDave Chinner 
416730f712c9SDave Chinner 	bma->logflags |= tmp_logflags;
416830f712c9SDave Chinner 	if (error)
416930f712c9SDave Chinner 		return error;
417030f712c9SDave Chinner 
417130f712c9SDave Chinner 	/*
417230f712c9SDave Chinner 	 * Update our extent pointer, given that xfs_bmap_add_extent_delay_real
417330f712c9SDave Chinner 	 * or xfs_bmap_add_extent_hole_real might have merged it into one of
417430f712c9SDave Chinner 	 * the neighbouring ones.
417530f712c9SDave Chinner 	 */
4176b2b1712aSChristoph Hellwig 	xfs_iext_get_extent(ifp, &bma->icur, &bma->got);
417730f712c9SDave Chinner 
417830f712c9SDave Chinner 	ASSERT(bma->got.br_startoff <= bma->offset);
417930f712c9SDave Chinner 	ASSERT(bma->got.br_startoff + bma->got.br_blockcount >=
418030f712c9SDave Chinner 	       bma->offset + bma->length);
418130f712c9SDave Chinner 	ASSERT(bma->got.br_state == XFS_EXT_NORM ||
418230f712c9SDave Chinner 	       bma->got.br_state == XFS_EXT_UNWRITTEN);
418330f712c9SDave Chinner 	return 0;
418430f712c9SDave Chinner }
418530f712c9SDave Chinner 
418630f712c9SDave Chinner STATIC int
xfs_bmapi_convert_unwritten(struct xfs_bmalloca * bma,struct xfs_bmbt_irec * mval,xfs_filblks_t len,uint32_t flags)418730f712c9SDave Chinner xfs_bmapi_convert_unwritten(
418830f712c9SDave Chinner 	struct xfs_bmalloca	*bma,
418930f712c9SDave Chinner 	struct xfs_bmbt_irec	*mval,
419030f712c9SDave Chinner 	xfs_filblks_t		len,
4191e7d410acSDave Chinner 	uint32_t		flags)
419230f712c9SDave Chinner {
41933993baebSDarrick J. Wong 	int			whichfork = xfs_bmapi_whichfork(flags);
4194732436efSDarrick J. Wong 	struct xfs_ifork	*ifp = xfs_ifork_ptr(bma->ip, whichfork);
419530f712c9SDave Chinner 	int			tmp_logflags = 0;
419630f712c9SDave Chinner 	int			error;
419730f712c9SDave Chinner 
419830f712c9SDave Chinner 	/* check if we need to do unwritten->real conversion */
419930f712c9SDave Chinner 	if (mval->br_state == XFS_EXT_UNWRITTEN &&
420030f712c9SDave Chinner 	    (flags & XFS_BMAPI_PREALLOC))
420130f712c9SDave Chinner 		return 0;
420230f712c9SDave Chinner 
420330f712c9SDave Chinner 	/* check if we need to do real->unwritten conversion */
420430f712c9SDave Chinner 	if (mval->br_state == XFS_EXT_NORM &&
420530f712c9SDave Chinner 	    (flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT)) !=
420630f712c9SDave Chinner 			(XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT))
420730f712c9SDave Chinner 		return 0;
420830f712c9SDave Chinner 
420930f712c9SDave Chinner 	/*
421030f712c9SDave Chinner 	 * Modify (by adding) the state flag, if writing.
421130f712c9SDave Chinner 	 */
421230f712c9SDave Chinner 	ASSERT(mval->br_blockcount <= len);
4213ac1e0672SChristoph Hellwig 	if (ifp->if_format == XFS_DINODE_FMT_BTREE && !bma->cur) {
421430f712c9SDave Chinner 		bma->cur = xfs_bmbt_init_cursor(bma->ip->i_mount, bma->tp,
421530f712c9SDave Chinner 					bma->ip, whichfork);
421630f712c9SDave Chinner 	}
421730f712c9SDave Chinner 	mval->br_state = (mval->br_state == XFS_EXT_UNWRITTEN)
421830f712c9SDave Chinner 				? XFS_EXT_NORM : XFS_EXT_UNWRITTEN;
421930f712c9SDave Chinner 
42203fbbbea3SDave Chinner 	/*
42213fbbbea3SDave Chinner 	 * Before insertion into the bmbt, zero the range being converted
42223fbbbea3SDave Chinner 	 * if required.
42233fbbbea3SDave Chinner 	 */
42243fbbbea3SDave Chinner 	if (flags & XFS_BMAPI_ZERO) {
42253fbbbea3SDave Chinner 		error = xfs_zero_extent(bma->ip, mval->br_startblock,
42263fbbbea3SDave Chinner 					mval->br_blockcount);
42273fbbbea3SDave Chinner 		if (error)
42283fbbbea3SDave Chinner 			return error;
42293fbbbea3SDave Chinner 	}
42303fbbbea3SDave Chinner 
423105a630d7SDarrick J. Wong 	error = xfs_bmap_add_extent_unwritten_real(bma->tp, bma->ip, whichfork,
423292f9da30SBrian Foster 			&bma->icur, &bma->cur, mval, &tmp_logflags);
42332e588a46SBrian Foster 	/*
42342e588a46SBrian Foster 	 * Log the inode core unconditionally in the unwritten extent conversion
42352e588a46SBrian Foster 	 * path because the conversion might not have done so (e.g., if the
42362e588a46SBrian Foster 	 * extent count hasn't changed). We need to make sure the inode is dirty
42372e588a46SBrian Foster 	 * in the transaction for the sake of fsync(), even if nothing has
42382e588a46SBrian Foster 	 * changed, because fsync() will not force the log for this transaction
42392e588a46SBrian Foster 	 * unless it sees the inode pinned.
424005a630d7SDarrick J. Wong 	 *
424105a630d7SDarrick J. Wong 	 * Note: If we're only converting cow fork extents, there aren't
424205a630d7SDarrick J. Wong 	 * any on-disk updates to make, so we don't need to log anything.
42432e588a46SBrian Foster 	 */
424405a630d7SDarrick J. Wong 	if (whichfork != XFS_COW_FORK)
42452e588a46SBrian Foster 		bma->logflags |= tmp_logflags | XFS_ILOG_CORE;
424630f712c9SDave Chinner 	if (error)
424730f712c9SDave Chinner 		return error;
424830f712c9SDave Chinner 
424930f712c9SDave Chinner 	/*
425030f712c9SDave Chinner 	 * Update our extent pointer, given that
425130f712c9SDave Chinner 	 * xfs_bmap_add_extent_unwritten_real might have merged it into one
425230f712c9SDave Chinner 	 * of the neighbouring ones.
425330f712c9SDave Chinner 	 */
4254b2b1712aSChristoph Hellwig 	xfs_iext_get_extent(ifp, &bma->icur, &bma->got);
425530f712c9SDave Chinner 
425630f712c9SDave Chinner 	/*
425730f712c9SDave Chinner 	 * We may have combined previously unwritten space with written space,
425830f712c9SDave Chinner 	 * so generate another request.
425930f712c9SDave Chinner 	 */
426030f712c9SDave Chinner 	if (mval->br_blockcount < len)
42612451337dSDave Chinner 		return -EAGAIN;
426230f712c9SDave Chinner 	return 0;
426330f712c9SDave Chinner }
426430f712c9SDave Chinner 
4265d5753847SDave Chinner xfs_extlen_t
xfs_bmapi_minleft(struct xfs_trans * tp,struct xfs_inode * ip,int fork)4266c8b54673SChristoph Hellwig xfs_bmapi_minleft(
4267c8b54673SChristoph Hellwig 	struct xfs_trans	*tp,
4268c8b54673SChristoph Hellwig 	struct xfs_inode	*ip,
4269c8b54673SChristoph Hellwig 	int			fork)
4270c8b54673SChristoph Hellwig {
4271732436efSDarrick J. Wong 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, fork);
4272f7e67b20SChristoph Hellwig 
4273692b6cddSDave Chinner 	if (tp && tp->t_highest_agno != NULLAGNUMBER)
4274c8b54673SChristoph Hellwig 		return 0;
4275f7e67b20SChristoph Hellwig 	if (ifp->if_format != XFS_DINODE_FMT_BTREE)
4276c8b54673SChristoph Hellwig 		return 1;
4277f7e67b20SChristoph Hellwig 	return be16_to_cpu(ifp->if_broot->bb_level) + 1;
4278c8b54673SChristoph Hellwig }
4279c8b54673SChristoph Hellwig 
4280c8b54673SChristoph Hellwig /*
4281c8b54673SChristoph Hellwig  * Log whatever the flags say, even if error.  Otherwise we might miss detecting
4282c8b54673SChristoph Hellwig  * a case where the data is changed, there's an error, and it's not logged so we
4283c8b54673SChristoph Hellwig  * don't shutdown when we should.  Don't bother logging extents/btree changes if
4284c8b54673SChristoph Hellwig  * we converted to the other format.
4285c8b54673SChristoph Hellwig  */
4286c8b54673SChristoph Hellwig static void
xfs_bmapi_finish(struct xfs_bmalloca * bma,int whichfork,int error)4287c8b54673SChristoph Hellwig xfs_bmapi_finish(
4288c8b54673SChristoph Hellwig 	struct xfs_bmalloca	*bma,
4289c8b54673SChristoph Hellwig 	int			whichfork,
4290c8b54673SChristoph Hellwig 	int			error)
4291c8b54673SChristoph Hellwig {
4292732436efSDarrick J. Wong 	struct xfs_ifork	*ifp = xfs_ifork_ptr(bma->ip, whichfork);
4293f7e67b20SChristoph Hellwig 
4294c8b54673SChristoph Hellwig 	if ((bma->logflags & xfs_ilog_fext(whichfork)) &&
4295f7e67b20SChristoph Hellwig 	    ifp->if_format != XFS_DINODE_FMT_EXTENTS)
4296c8b54673SChristoph Hellwig 		bma->logflags &= ~xfs_ilog_fext(whichfork);
4297c8b54673SChristoph Hellwig 	else if ((bma->logflags & xfs_ilog_fbroot(whichfork)) &&
4298f7e67b20SChristoph Hellwig 		 ifp->if_format != XFS_DINODE_FMT_BTREE)
4299c8b54673SChristoph Hellwig 		bma->logflags &= ~xfs_ilog_fbroot(whichfork);
4300c8b54673SChristoph Hellwig 
4301c8b54673SChristoph Hellwig 	if (bma->logflags)
4302c8b54673SChristoph Hellwig 		xfs_trans_log_inode(bma->tp, bma->ip, bma->logflags);
4303c8b54673SChristoph Hellwig 	if (bma->cur)
4304c8b54673SChristoph Hellwig 		xfs_btree_del_cursor(bma->cur, error);
4305c8b54673SChristoph Hellwig }
4306c8b54673SChristoph Hellwig 
430730f712c9SDave Chinner /*
430830f712c9SDave Chinner  * Map file blocks to filesystem blocks, and allocate blocks or convert the
430930f712c9SDave Chinner  * extent state if necessary.  Details behaviour is controlled by the flags
431030f712c9SDave Chinner  * parameter.  Only allocates blocks from a single allocation group, to avoid
431130f712c9SDave Chinner  * locking problems.
431230f712c9SDave Chinner  */
431330f712c9SDave Chinner int
xfs_bmapi_write(struct xfs_trans * tp,struct xfs_inode * ip,xfs_fileoff_t bno,xfs_filblks_t len,uint32_t flags,xfs_extlen_t total,struct xfs_bmbt_irec * mval,int * nmap)431430f712c9SDave Chinner xfs_bmapi_write(
431530f712c9SDave Chinner 	struct xfs_trans	*tp,		/* transaction pointer */
431630f712c9SDave Chinner 	struct xfs_inode	*ip,		/* incore inode */
431730f712c9SDave Chinner 	xfs_fileoff_t		bno,		/* starting file offs. mapped */
431830f712c9SDave Chinner 	xfs_filblks_t		len,		/* length to map in file */
4319e7d410acSDave Chinner 	uint32_t		flags,		/* XFS_BMAPI_... */
432030f712c9SDave Chinner 	xfs_extlen_t		total,		/* total blocks needed */
432130f712c9SDave Chinner 	struct xfs_bmbt_irec	*mval,		/* output: map values */
43226e702a5dSBrian Foster 	int			*nmap)		/* i/o: mval size/count */
432330f712c9SDave Chinner {
43244b0bce30SDarrick J. Wong 	struct xfs_bmalloca	bma = {
43254b0bce30SDarrick J. Wong 		.tp		= tp,
43264b0bce30SDarrick J. Wong 		.ip		= ip,
43274b0bce30SDarrick J. Wong 		.total		= total,
43284b0bce30SDarrick J. Wong 	};
432930f712c9SDave Chinner 	struct xfs_mount	*mp = ip->i_mount;
4330f7e67b20SChristoph Hellwig 	int			whichfork = xfs_bmapi_whichfork(flags);
4331732436efSDarrick J. Wong 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
433230f712c9SDave Chinner 	xfs_fileoff_t		end;		/* end of mapped file region */
43332d58f6efSChristoph Hellwig 	bool			eof = false;	/* after the end of extents */
433430f712c9SDave Chinner 	int			error;		/* error return */
433530f712c9SDave Chinner 	int			n;		/* current extent index */
433630f712c9SDave Chinner 	xfs_fileoff_t		obno;		/* old block number (offset) */
433730f712c9SDave Chinner 
433830f712c9SDave Chinner #ifdef DEBUG
433930f712c9SDave Chinner 	xfs_fileoff_t		orig_bno;	/* original block number value */
434030f712c9SDave Chinner 	int			orig_flags;	/* original flags arg value */
434130f712c9SDave Chinner 	xfs_filblks_t		orig_len;	/* original value of len arg */
434230f712c9SDave Chinner 	struct xfs_bmbt_irec	*orig_mval;	/* original value of mval */
434330f712c9SDave Chinner 	int			orig_nmap;	/* original value of *nmap */
434430f712c9SDave Chinner 
434530f712c9SDave Chinner 	orig_bno = bno;
434630f712c9SDave Chinner 	orig_len = len;
434730f712c9SDave Chinner 	orig_flags = flags;
434830f712c9SDave Chinner 	orig_mval = mval;
434930f712c9SDave Chinner 	orig_nmap = *nmap;
435030f712c9SDave Chinner #endif
435130f712c9SDave Chinner 
435230f712c9SDave Chinner 	ASSERT(*nmap >= 1);
435330f712c9SDave Chinner 	ASSERT(*nmap <= XFS_BMAP_MAX_NMAP);
435426b91c72SChristoph Hellwig 	ASSERT(tp != NULL);
435530f712c9SDave Chinner 	ASSERT(len > 0);
4356f7e67b20SChristoph Hellwig 	ASSERT(ifp->if_format != XFS_DINODE_FMT_LOCAL);
435730f712c9SDave Chinner 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
43586ebd5a44SChristoph Hellwig 	ASSERT(!(flags & XFS_BMAPI_REMAP));
435930f712c9SDave Chinner 
43603fbbbea3SDave Chinner 	/* zeroing is for currently only for data extents, not metadata */
43613fbbbea3SDave Chinner 	ASSERT((flags & (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO)) !=
43623fbbbea3SDave Chinner 			(XFS_BMAPI_METADATA | XFS_BMAPI_ZERO));
43633fbbbea3SDave Chinner 	/*
43643fbbbea3SDave Chinner 	 * we can allocate unwritten extents or pre-zero allocated blocks,
43653fbbbea3SDave Chinner 	 * but it makes no sense to do both at once. This would result in
43663fbbbea3SDave Chinner 	 * zeroing the unwritten extent twice, but it still being an
43673fbbbea3SDave Chinner 	 * unwritten extent....
43683fbbbea3SDave Chinner 	 */
43693fbbbea3SDave Chinner 	ASSERT((flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO)) !=
43703fbbbea3SDave Chinner 			(XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO));
43713fbbbea3SDave Chinner 
4372f7e67b20SChristoph Hellwig 	if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
4373a71895c5SDarrick J. Wong 	    XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
43742451337dSDave Chinner 		return -EFSCORRUPTED;
437530f712c9SDave Chinner 	}
437630f712c9SDave Chinner 
437775c8c50fSDave Chinner 	if (xfs_is_shutdown(mp))
43782451337dSDave Chinner 		return -EIO;
437930f712c9SDave Chinner 
4380ff6d6af2SBill O'Donnell 	XFS_STATS_INC(mp, xs_blk_mapw);
438130f712c9SDave Chinner 
438230f712c9SDave Chinner 	error = xfs_iread_extents(tp, ip, whichfork);
438330f712c9SDave Chinner 	if (error)
438430f712c9SDave Chinner 		goto error0;
438530f712c9SDave Chinner 
4386b2b1712aSChristoph Hellwig 	if (!xfs_iext_lookup_extent(ip, ifp, bno, &bma.icur, &bma.got))
43872d58f6efSChristoph Hellwig 		eof = true;
4388b2b1712aSChristoph Hellwig 	if (!xfs_iext_peek_prev_extent(ifp, &bma.icur, &bma.prev))
43892d58f6efSChristoph Hellwig 		bma.prev.br_startoff = NULLFILEOFF;
4390c8b54673SChristoph Hellwig 	bma.minleft = xfs_bmapi_minleft(tp, ip, whichfork);
439130f712c9SDave Chinner 
4392627209fbSBrian Foster 	n = 0;
4393627209fbSBrian Foster 	end = bno + len;
4394627209fbSBrian Foster 	obno = bno;
439530f712c9SDave Chinner 	while (bno < end && n < *nmap) {
4396d2b3964aSChristoph Hellwig 		bool			need_alloc = false, wasdelay = false;
439730f712c9SDave Chinner 
4398be78ff0eSDarrick J. Wong 		/* in hole or beyond EOF? */
4399d2b3964aSChristoph Hellwig 		if (eof || bma.got.br_startoff > bno) {
4400be78ff0eSDarrick J. Wong 			/*
4401be78ff0eSDarrick J. Wong 			 * CoW fork conversions should /never/ hit EOF or
4402be78ff0eSDarrick J. Wong 			 * holes.  There should always be something for us
4403be78ff0eSDarrick J. Wong 			 * to work on.
4404be78ff0eSDarrick J. Wong 			 */
4405be78ff0eSDarrick J. Wong 			ASSERT(!((flags & XFS_BMAPI_CONVERT) &&
4406be78ff0eSDarrick J. Wong 			         (flags & XFS_BMAPI_COWFORK)));
4407be78ff0eSDarrick J. Wong 
4408d2b3964aSChristoph Hellwig 			need_alloc = true;
44096ebd5a44SChristoph Hellwig 		} else if (isnullstartblock(bma.got.br_startblock)) {
4410d2b3964aSChristoph Hellwig 			wasdelay = true;
4411d2b3964aSChristoph Hellwig 		}
4412f65306eaSDarrick J. Wong 
4413f65306eaSDarrick J. Wong 		/*
441430f712c9SDave Chinner 		 * First, deal with the hole before the allocated space
441530f712c9SDave Chinner 		 * that we found, if any.
441630f712c9SDave Chinner 		 */
441726b91c72SChristoph Hellwig 		if (need_alloc || wasdelay) {
441830f712c9SDave Chinner 			bma.eof = eof;
441930f712c9SDave Chinner 			bma.conv = !!(flags & XFS_BMAPI_CONVERT);
442030f712c9SDave Chinner 			bma.wasdel = wasdelay;
442130f712c9SDave Chinner 			bma.offset = bno;
442230f712c9SDave Chinner 			bma.flags = flags;
442330f712c9SDave Chinner 
442430f712c9SDave Chinner 			/*
442530f712c9SDave Chinner 			 * There's a 32/64 bit type mismatch between the
442630f712c9SDave Chinner 			 * allocation length request (which can be 64 bits in
442730f712c9SDave Chinner 			 * length) and the bma length request, which is
442830f712c9SDave Chinner 			 * xfs_extlen_t and therefore 32 bits. Hence we have to
442930f712c9SDave Chinner 			 * check for 32-bit overflows and handle them here.
443030f712c9SDave Chinner 			 */
443195f0b95eSChandan Babu R 			if (len > (xfs_filblks_t)XFS_MAX_BMBT_EXTLEN)
443295f0b95eSChandan Babu R 				bma.length = XFS_MAX_BMBT_EXTLEN;
443330f712c9SDave Chinner 			else
443430f712c9SDave Chinner 				bma.length = len;
443530f712c9SDave Chinner 
443630f712c9SDave Chinner 			ASSERT(len > 0);
443730f712c9SDave Chinner 			ASSERT(bma.length > 0);
443830f712c9SDave Chinner 			error = xfs_bmapi_allocate(&bma);
443930f712c9SDave Chinner 			if (error)
444030f712c9SDave Chinner 				goto error0;
444130f712c9SDave Chinner 			if (bma.blkno == NULLFSBLOCK)
444230f712c9SDave Chinner 				break;
4443174edb0eSDarrick J. Wong 
4444174edb0eSDarrick J. Wong 			/*
4445174edb0eSDarrick J. Wong 			 * If this is a CoW allocation, record the data in
4446174edb0eSDarrick J. Wong 			 * the refcount btree for orphan recovery.
4447174edb0eSDarrick J. Wong 			 */
444874b4c5d4SDarrick J. Wong 			if (whichfork == XFS_COW_FORK)
444974b4c5d4SDarrick J. Wong 				xfs_refcount_alloc_cow_extent(tp, bma.blkno,
445074b4c5d4SDarrick J. Wong 						bma.length);
445130f712c9SDave Chinner 		}
445230f712c9SDave Chinner 
445330f712c9SDave Chinner 		/* Deal with the allocated space we found.  */
445430f712c9SDave Chinner 		xfs_bmapi_trim_map(mval, &bma.got, &bno, len, obno,
445530f712c9SDave Chinner 							end, n, flags);
445630f712c9SDave Chinner 
445730f712c9SDave Chinner 		/* Execute unwritten extent conversion if necessary */
445830f712c9SDave Chinner 		error = xfs_bmapi_convert_unwritten(&bma, mval, len, flags);
44592451337dSDave Chinner 		if (error == -EAGAIN)
446030f712c9SDave Chinner 			continue;
446130f712c9SDave Chinner 		if (error)
446230f712c9SDave Chinner 			goto error0;
446330f712c9SDave Chinner 
446430f712c9SDave Chinner 		/* update the extent map to return */
446530f712c9SDave Chinner 		xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags);
446630f712c9SDave Chinner 
446730f712c9SDave Chinner 		/*
446830f712c9SDave Chinner 		 * If we're done, stop now.  Stop when we've allocated
446930f712c9SDave Chinner 		 * XFS_BMAP_MAX_NMAP extents no matter what.  Otherwise
447030f712c9SDave Chinner 		 * the transaction may get too big.
447130f712c9SDave Chinner 		 */
447230f712c9SDave Chinner 		if (bno >= end || n >= *nmap || bma.nallocs >= *nmap)
447330f712c9SDave Chinner 			break;
447430f712c9SDave Chinner 
447530f712c9SDave Chinner 		/* Else go on to the next record. */
447630f712c9SDave Chinner 		bma.prev = bma.got;
4477b2b1712aSChristoph Hellwig 		if (!xfs_iext_next_extent(ifp, &bma.icur, &bma.got))
44782d58f6efSChristoph Hellwig 			eof = true;
447930f712c9SDave Chinner 	}
448030f712c9SDave Chinner 	*nmap = n;
448130f712c9SDave Chinner 
4482b101e334SChristoph Hellwig 	error = xfs_bmap_btree_to_extents(tp, ip, bma.cur, &bma.logflags,
4483b101e334SChristoph Hellwig 			whichfork);
448430f712c9SDave Chinner 	if (error)
448530f712c9SDave Chinner 		goto error0;
448630f712c9SDave Chinner 
4487f7e67b20SChristoph Hellwig 	ASSERT(ifp->if_format != XFS_DINODE_FMT_BTREE ||
4488daf83964SChristoph Hellwig 	       ifp->if_nextents > XFS_IFORK_MAXEXT(ip, whichfork));
4489c8b54673SChristoph Hellwig 	xfs_bmapi_finish(&bma, whichfork, 0);
449030f712c9SDave Chinner 	xfs_bmap_validate_ret(orig_bno, orig_len, orig_flags, orig_mval,
449130f712c9SDave Chinner 		orig_nmap, *nmap);
4492c8b54673SChristoph Hellwig 	return 0;
4493c8b54673SChristoph Hellwig error0:
4494c8b54673SChristoph Hellwig 	xfs_bmapi_finish(&bma, whichfork, error);
449530f712c9SDave Chinner 	return error;
449630f712c9SDave Chinner }
449730f712c9SDave Chinner 
4498627209fbSBrian Foster /*
4499627209fbSBrian Foster  * Convert an existing delalloc extent to real blocks based on file offset. This
4500627209fbSBrian Foster  * attempts to allocate the entire delalloc extent and may require multiple
4501627209fbSBrian Foster  * invocations to allocate the target offset if a large enough physical extent
4502627209fbSBrian Foster  * is not available.
4503627209fbSBrian Foster  */
4504627209fbSBrian Foster int
xfs_bmapi_convert_delalloc(struct xfs_inode * ip,int whichfork,xfs_off_t offset,struct iomap * iomap,unsigned int * seq)4505627209fbSBrian Foster xfs_bmapi_convert_delalloc(
4506627209fbSBrian Foster 	struct xfs_inode	*ip,
4507627209fbSBrian Foster 	int			whichfork,
45084e087a3bSChristoph Hellwig 	xfs_off_t		offset,
45094e087a3bSChristoph Hellwig 	struct iomap		*iomap,
4510491ce61eSChristoph Hellwig 	unsigned int		*seq)
4511627209fbSBrian Foster {
4512732436efSDarrick J. Wong 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
4513491ce61eSChristoph Hellwig 	struct xfs_mount	*mp = ip->i_mount;
45144e087a3bSChristoph Hellwig 	xfs_fileoff_t		offset_fsb = XFS_B_TO_FSBT(mp, offset);
4515d8ae82e3SChristoph Hellwig 	struct xfs_bmalloca	bma = { NULL };
4516af952aebSDarrick J. Wong 	uint16_t		flags = 0;
4517491ce61eSChristoph Hellwig 	struct xfs_trans	*tp;
4518627209fbSBrian Foster 	int			error;
4519627209fbSBrian Foster 
45204e087a3bSChristoph Hellwig 	if (whichfork == XFS_COW_FORK)
45214e087a3bSChristoph Hellwig 		flags |= IOMAP_F_SHARED;
45224e087a3bSChristoph Hellwig 
4523491ce61eSChristoph Hellwig 	/*
4524491ce61eSChristoph Hellwig 	 * Space for the extent and indirect blocks was reserved when the
4525491ce61eSChristoph Hellwig 	 * delalloc extent was created so there's no need to do so here.
4526491ce61eSChristoph Hellwig 	 */
4527491ce61eSChristoph Hellwig 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0,
4528491ce61eSChristoph Hellwig 				XFS_TRANS_RESERVE, &tp);
4529491ce61eSChristoph Hellwig 	if (error)
4530491ce61eSChristoph Hellwig 		return error;
4531491ce61eSChristoph Hellwig 
4532491ce61eSChristoph Hellwig 	xfs_ilock(ip, XFS_ILOCK_EXCL);
45334f86bb4bSChandan Babu R 	xfs_trans_ijoin(tp, ip, 0);
4534727e1acdSChandan Babu R 
4535727e1acdSChandan Babu R 	error = xfs_iext_count_may_overflow(ip, whichfork,
4536727e1acdSChandan Babu R 			XFS_IEXT_ADD_NOSPLIT_CNT);
45374f86bb4bSChandan Babu R 	if (error == -EFBIG)
45384f86bb4bSChandan Babu R 		error = xfs_iext_count_upgrade(tp, ip,
45394f86bb4bSChandan Babu R 				XFS_IEXT_ADD_NOSPLIT_CNT);
4540727e1acdSChandan Babu R 	if (error)
4541727e1acdSChandan Babu R 		goto out_trans_cancel;
4542727e1acdSChandan Babu R 
4543d8ae82e3SChristoph Hellwig 	if (!xfs_iext_lookup_extent(ip, ifp, offset_fsb, &bma.icur, &bma.got) ||
4544d8ae82e3SChristoph Hellwig 	    bma.got.br_startoff > offset_fsb) {
4545d8ae82e3SChristoph Hellwig 		/*
4546d8ae82e3SChristoph Hellwig 		 * No extent found in the range we are trying to convert.  This
4547d8ae82e3SChristoph Hellwig 		 * should only happen for the COW fork, where another thread
4548d8ae82e3SChristoph Hellwig 		 * might have moved the extent to the data fork in the meantime.
4549d8ae82e3SChristoph Hellwig 		 */
4550d8ae82e3SChristoph Hellwig 		WARN_ON_ONCE(whichfork != XFS_COW_FORK);
4551491ce61eSChristoph Hellwig 		error = -EAGAIN;
4552491ce61eSChristoph Hellwig 		goto out_trans_cancel;
4553d8ae82e3SChristoph Hellwig 	}
4554627209fbSBrian Foster 
4555627209fbSBrian Foster 	/*
4556d8ae82e3SChristoph Hellwig 	 * If we find a real extent here we raced with another thread converting
4557d8ae82e3SChristoph Hellwig 	 * the extent.  Just return the real extent at this offset.
4558627209fbSBrian Foster 	 */
4559d8ae82e3SChristoph Hellwig 	if (!isnullstartblock(bma.got.br_startblock)) {
4560304a68b9SDave Chinner 		xfs_bmbt_to_iomap(ip, iomap, &bma.got, 0, flags,
4561304a68b9SDave Chinner 				xfs_iomap_inode_sequence(ip, flags));
4562491ce61eSChristoph Hellwig 		*seq = READ_ONCE(ifp->if_seq);
4563491ce61eSChristoph Hellwig 		goto out_trans_cancel;
4564d8ae82e3SChristoph Hellwig 	}
4565d8ae82e3SChristoph Hellwig 
4566d8ae82e3SChristoph Hellwig 	bma.tp = tp;
4567d8ae82e3SChristoph Hellwig 	bma.ip = ip;
4568d8ae82e3SChristoph Hellwig 	bma.wasdel = true;
4569d8ae82e3SChristoph Hellwig 	bma.offset = bma.got.br_startoff;
457095f0b95eSChandan Babu R 	bma.length = max_t(xfs_filblks_t, bma.got.br_blockcount,
457195f0b95eSChandan Babu R 			XFS_MAX_BMBT_EXTLEN);
4572d8ae82e3SChristoph Hellwig 	bma.minleft = xfs_bmapi_minleft(tp, ip, whichfork);
4573a5949d3fSDarrick J. Wong 
4574a5949d3fSDarrick J. Wong 	/*
4575a5949d3fSDarrick J. Wong 	 * When we're converting the delalloc reservations backing dirty pages
4576a5949d3fSDarrick J. Wong 	 * in the page cache, we must be careful about how we create the new
4577a5949d3fSDarrick J. Wong 	 * extents:
4578a5949d3fSDarrick J. Wong 	 *
4579a5949d3fSDarrick J. Wong 	 * New CoW fork extents are created unwritten, turned into real extents
4580a5949d3fSDarrick J. Wong 	 * when we're about to write the data to disk, and mapped into the data
4581a5949d3fSDarrick J. Wong 	 * fork after the write finishes.  End of story.
4582a5949d3fSDarrick J. Wong 	 *
4583a5949d3fSDarrick J. Wong 	 * New data fork extents must be mapped in as unwritten and converted
4584a5949d3fSDarrick J. Wong 	 * to real extents after the write succeeds to avoid exposing stale
4585a5949d3fSDarrick J. Wong 	 * disk contents if we crash.
4586a5949d3fSDarrick J. Wong 	 */
4587a5949d3fSDarrick J. Wong 	bma.flags = XFS_BMAPI_PREALLOC;
4588d8ae82e3SChristoph Hellwig 	if (whichfork == XFS_COW_FORK)
4589a5949d3fSDarrick J. Wong 		bma.flags |= XFS_BMAPI_COWFORK;
4590d8ae82e3SChristoph Hellwig 
4591d8ae82e3SChristoph Hellwig 	if (!xfs_iext_peek_prev_extent(ifp, &bma.icur, &bma.prev))
4592d8ae82e3SChristoph Hellwig 		bma.prev.br_startoff = NULLFILEOFF;
4593d8ae82e3SChristoph Hellwig 
4594d8ae82e3SChristoph Hellwig 	error = xfs_bmapi_allocate(&bma);
4595d8ae82e3SChristoph Hellwig 	if (error)
4596d8ae82e3SChristoph Hellwig 		goto out_finish;
4597d8ae82e3SChristoph Hellwig 
4598d8ae82e3SChristoph Hellwig 	error = -ENOSPC;
4599d8ae82e3SChristoph Hellwig 	if (WARN_ON_ONCE(bma.blkno == NULLFSBLOCK))
4600d8ae82e3SChristoph Hellwig 		goto out_finish;
4601627209fbSBrian Foster 	error = -EFSCORRUPTED;
4602eb77b23bSChristoph Hellwig 	if (WARN_ON_ONCE(!xfs_valid_startblock(ip, bma.got.br_startblock)))
4603d8ae82e3SChristoph Hellwig 		goto out_finish;
4604d8ae82e3SChristoph Hellwig 
4605125851acSChristoph Hellwig 	XFS_STATS_ADD(mp, xs_xstrat_bytes, XFS_FSB_TO_B(mp, bma.length));
4606125851acSChristoph Hellwig 	XFS_STATS_INC(mp, xs_xstrat_quick);
4607125851acSChristoph Hellwig 
4608d8ae82e3SChristoph Hellwig 	ASSERT(!isnullstartblock(bma.got.br_startblock));
4609304a68b9SDave Chinner 	xfs_bmbt_to_iomap(ip, iomap, &bma.got, 0, flags,
4610304a68b9SDave Chinner 				xfs_iomap_inode_sequence(ip, flags));
4611491ce61eSChristoph Hellwig 	*seq = READ_ONCE(ifp->if_seq);
4612d8ae82e3SChristoph Hellwig 
461374b4c5d4SDarrick J. Wong 	if (whichfork == XFS_COW_FORK)
461474b4c5d4SDarrick J. Wong 		xfs_refcount_alloc_cow_extent(tp, bma.blkno, bma.length);
4615d8ae82e3SChristoph Hellwig 
4616d8ae82e3SChristoph Hellwig 	error = xfs_bmap_btree_to_extents(tp, ip, bma.cur, &bma.logflags,
4617d8ae82e3SChristoph Hellwig 			whichfork);
4618491ce61eSChristoph Hellwig 	if (error)
4619491ce61eSChristoph Hellwig 		goto out_finish;
4620491ce61eSChristoph Hellwig 
4621491ce61eSChristoph Hellwig 	xfs_bmapi_finish(&bma, whichfork, 0);
4622491ce61eSChristoph Hellwig 	error = xfs_trans_commit(tp);
4623491ce61eSChristoph Hellwig 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
4624491ce61eSChristoph Hellwig 	return error;
4625491ce61eSChristoph Hellwig 
4626d8ae82e3SChristoph Hellwig out_finish:
4627d8ae82e3SChristoph Hellwig 	xfs_bmapi_finish(&bma, whichfork, error);
4628491ce61eSChristoph Hellwig out_trans_cancel:
4629491ce61eSChristoph Hellwig 	xfs_trans_cancel(tp);
4630491ce61eSChristoph Hellwig 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
4631627209fbSBrian Foster 	return error;
4632627209fbSBrian Foster }
4633627209fbSBrian Foster 
46347cf199baSDarrick J. Wong int
xfs_bmapi_remap(struct xfs_trans * tp,struct xfs_inode * ip,xfs_fileoff_t bno,xfs_filblks_t len,xfs_fsblock_t startblock,uint32_t flags)46356ebd5a44SChristoph Hellwig xfs_bmapi_remap(
46366ebd5a44SChristoph Hellwig 	struct xfs_trans	*tp,
46376ebd5a44SChristoph Hellwig 	struct xfs_inode	*ip,
46386ebd5a44SChristoph Hellwig 	xfs_fileoff_t		bno,
46396ebd5a44SChristoph Hellwig 	xfs_filblks_t		len,
46406ebd5a44SChristoph Hellwig 	xfs_fsblock_t		startblock,
4641e7d410acSDave Chinner 	uint32_t		flags)
46426ebd5a44SChristoph Hellwig {
46436ebd5a44SChristoph Hellwig 	struct xfs_mount	*mp = ip->i_mount;
46447cf199baSDarrick J. Wong 	struct xfs_ifork	*ifp;
46456ebd5a44SChristoph Hellwig 	struct xfs_btree_cur	*cur = NULL;
46466ebd5a44SChristoph Hellwig 	struct xfs_bmbt_irec	got;
4647b2b1712aSChristoph Hellwig 	struct xfs_iext_cursor	icur;
46487cf199baSDarrick J. Wong 	int			whichfork = xfs_bmapi_whichfork(flags);
46496ebd5a44SChristoph Hellwig 	int			logflags = 0, error;
46506ebd5a44SChristoph Hellwig 
4651732436efSDarrick J. Wong 	ifp = xfs_ifork_ptr(ip, whichfork);
46526ebd5a44SChristoph Hellwig 	ASSERT(len > 0);
465395f0b95eSChandan Babu R 	ASSERT(len <= (xfs_filblks_t)XFS_MAX_BMBT_EXTLEN);
46546ebd5a44SChristoph Hellwig 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
46557644bd98SDarrick J. Wong 	ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC |
46567644bd98SDarrick J. Wong 			   XFS_BMAPI_NORMAP)));
46577644bd98SDarrick J. Wong 	ASSERT((flags & (XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC)) !=
46587644bd98SDarrick J. Wong 			(XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC));
46596ebd5a44SChristoph Hellwig 
4660f7e67b20SChristoph Hellwig 	if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
4661a71895c5SDarrick J. Wong 	    XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
46626ebd5a44SChristoph Hellwig 		return -EFSCORRUPTED;
46636ebd5a44SChristoph Hellwig 	}
46646ebd5a44SChristoph Hellwig 
466575c8c50fSDave Chinner 	if (xfs_is_shutdown(mp))
46666ebd5a44SChristoph Hellwig 		return -EIO;
46676ebd5a44SChristoph Hellwig 
46687cf199baSDarrick J. Wong 	error = xfs_iread_extents(tp, ip, whichfork);
46696ebd5a44SChristoph Hellwig 	if (error)
46706ebd5a44SChristoph Hellwig 		return error;
46716ebd5a44SChristoph Hellwig 
4672b2b1712aSChristoph Hellwig 	if (xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) {
46736ebd5a44SChristoph Hellwig 		/* make sure we only reflink into a hole. */
46746ebd5a44SChristoph Hellwig 		ASSERT(got.br_startoff > bno);
46756ebd5a44SChristoph Hellwig 		ASSERT(got.br_startoff - bno >= len);
46766ebd5a44SChristoph Hellwig 	}
46776ebd5a44SChristoph Hellwig 
46786e73a545SChristoph Hellwig 	ip->i_nblocks += len;
4679bf8eadbaSChristoph Hellwig 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
46806ebd5a44SChristoph Hellwig 
4681ac1e0672SChristoph Hellwig 	if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
46827cf199baSDarrick J. Wong 		cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
468392219c29SDave Chinner 		cur->bc_ino.flags = 0;
46846ebd5a44SChristoph Hellwig 	}
46856ebd5a44SChristoph Hellwig 
46866ebd5a44SChristoph Hellwig 	got.br_startoff = bno;
46876ebd5a44SChristoph Hellwig 	got.br_startblock = startblock;
46886ebd5a44SChristoph Hellwig 	got.br_blockcount = len;
46897644bd98SDarrick J. Wong 	if (flags & XFS_BMAPI_PREALLOC)
46907644bd98SDarrick J. Wong 		got.br_state = XFS_EXT_UNWRITTEN;
46917644bd98SDarrick J. Wong 	else
46926ebd5a44SChristoph Hellwig 		got.br_state = XFS_EXT_NORM;
46936ebd5a44SChristoph Hellwig 
46947cf199baSDarrick J. Wong 	error = xfs_bmap_add_extent_hole_real(tp, ip, whichfork, &icur,
469592f9da30SBrian Foster 			&cur, &got, &logflags, flags);
46966ebd5a44SChristoph Hellwig 	if (error)
46976ebd5a44SChristoph Hellwig 		goto error0;
46986ebd5a44SChristoph Hellwig 
4699b101e334SChristoph Hellwig 	error = xfs_bmap_btree_to_extents(tp, ip, cur, &logflags, whichfork);
47006ebd5a44SChristoph Hellwig 
47016ebd5a44SChristoph Hellwig error0:
4702f7e67b20SChristoph Hellwig 	if (ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS)
47036ebd5a44SChristoph Hellwig 		logflags &= ~XFS_ILOG_DEXT;
4704f7e67b20SChristoph Hellwig 	else if (ip->i_df.if_format != XFS_DINODE_FMT_BTREE)
47056ebd5a44SChristoph Hellwig 		logflags &= ~XFS_ILOG_DBROOT;
47066ebd5a44SChristoph Hellwig 
47076ebd5a44SChristoph Hellwig 	if (logflags)
47086ebd5a44SChristoph Hellwig 		xfs_trans_log_inode(tp, ip, logflags);
47090b04b6b8SDarrick J. Wong 	if (cur)
47100b04b6b8SDarrick J. Wong 		xfs_btree_del_cursor(cur, error);
47116ebd5a44SChristoph Hellwig 	return error;
47126ebd5a44SChristoph Hellwig }
47136ebd5a44SChristoph Hellwig 
471430f712c9SDave Chinner /*
4715a9bd24acSBrian Foster  * When a delalloc extent is split (e.g., due to a hole punch), the original
4716a9bd24acSBrian Foster  * indlen reservation must be shared across the two new extents that are left
4717a9bd24acSBrian Foster  * behind.
4718a9bd24acSBrian Foster  *
4719a9bd24acSBrian Foster  * Given the original reservation and the worst case indlen for the two new
4720a9bd24acSBrian Foster  * extents (as calculated by xfs_bmap_worst_indlen()), split the original
4721d34999c9SBrian Foster  * reservation fairly across the two new extents. If necessary, steal available
4722d34999c9SBrian Foster  * blocks from a deleted extent to make up a reservation deficiency (e.g., if
4723d34999c9SBrian Foster  * ores == 1). The number of stolen blocks is returned. The availability and
4724d34999c9SBrian Foster  * subsequent accounting of stolen blocks is the responsibility of the caller.
4725a9bd24acSBrian Foster  */
4726d34999c9SBrian Foster static xfs_filblks_t
xfs_bmap_split_indlen(xfs_filblks_t ores,xfs_filblks_t * indlen1,xfs_filblks_t * indlen2,xfs_filblks_t avail)4727a9bd24acSBrian Foster xfs_bmap_split_indlen(
4728a9bd24acSBrian Foster 	xfs_filblks_t			ores,		/* original res. */
4729a9bd24acSBrian Foster 	xfs_filblks_t			*indlen1,	/* ext1 worst indlen */
4730d34999c9SBrian Foster 	xfs_filblks_t			*indlen2,	/* ext2 worst indlen */
4731d34999c9SBrian Foster 	xfs_filblks_t			avail)		/* stealable blocks */
4732a9bd24acSBrian Foster {
4733a9bd24acSBrian Foster 	xfs_filblks_t			len1 = *indlen1;
4734a9bd24acSBrian Foster 	xfs_filblks_t			len2 = *indlen2;
4735a9bd24acSBrian Foster 	xfs_filblks_t			nres = len1 + len2; /* new total res. */
4736d34999c9SBrian Foster 	xfs_filblks_t			stolen = 0;
473775d65361SBrian Foster 	xfs_filblks_t			resfactor;
4738a9bd24acSBrian Foster 
4739a9bd24acSBrian Foster 	/*
4740d34999c9SBrian Foster 	 * Steal as many blocks as we can to try and satisfy the worst case
4741d34999c9SBrian Foster 	 * indlen for both new extents.
4742d34999c9SBrian Foster 	 */
474375d65361SBrian Foster 	if (ores < nres && avail)
474475d65361SBrian Foster 		stolen = XFS_FILBLKS_MIN(nres - ores, avail);
474575d65361SBrian Foster 	ores += stolen;
474675d65361SBrian Foster 
474775d65361SBrian Foster 	 /* nothing else to do if we've satisfied the new reservation */
474875d65361SBrian Foster 	if (ores >= nres)
474975d65361SBrian Foster 		return stolen;
4750d34999c9SBrian Foster 
4751d34999c9SBrian Foster 	/*
475275d65361SBrian Foster 	 * We can't meet the total required reservation for the two extents.
475375d65361SBrian Foster 	 * Calculate the percent of the overall shortage between both extents
475475d65361SBrian Foster 	 * and apply this percentage to each of the requested indlen values.
475575d65361SBrian Foster 	 * This distributes the shortage fairly and reduces the chances that one
475675d65361SBrian Foster 	 * of the two extents is left with nothing when extents are repeatedly
475775d65361SBrian Foster 	 * split.
4758a9bd24acSBrian Foster 	 */
475975d65361SBrian Foster 	resfactor = (ores * 100);
476075d65361SBrian Foster 	do_div(resfactor, nres);
476175d65361SBrian Foster 	len1 *= resfactor;
476275d65361SBrian Foster 	do_div(len1, 100);
476375d65361SBrian Foster 	len2 *= resfactor;
476475d65361SBrian Foster 	do_div(len2, 100);
476575d65361SBrian Foster 	ASSERT(len1 + len2 <= ores);
476675d65361SBrian Foster 	ASSERT(len1 < *indlen1 && len2 < *indlen2);
476775d65361SBrian Foster 
476875d65361SBrian Foster 	/*
476975d65361SBrian Foster 	 * Hand out the remainder to each extent. If one of the two reservations
477075d65361SBrian Foster 	 * is zero, we want to make sure that one gets a block first. The loop
477175d65361SBrian Foster 	 * below starts with len1, so hand len2 a block right off the bat if it
477275d65361SBrian Foster 	 * is zero.
477375d65361SBrian Foster 	 */
477475d65361SBrian Foster 	ores -= (len1 + len2);
477575d65361SBrian Foster 	ASSERT((*indlen1 - len1) + (*indlen2 - len2) >= ores);
477675d65361SBrian Foster 	if (ores && !len2 && *indlen2) {
477775d65361SBrian Foster 		len2++;
477875d65361SBrian Foster 		ores--;
4779a9bd24acSBrian Foster 	}
478075d65361SBrian Foster 	while (ores) {
478175d65361SBrian Foster 		if (len1 < *indlen1) {
478275d65361SBrian Foster 			len1++;
478375d65361SBrian Foster 			ores--;
478475d65361SBrian Foster 		}
478575d65361SBrian Foster 		if (!ores)
4786a9bd24acSBrian Foster 			break;
478775d65361SBrian Foster 		if (len2 < *indlen2) {
478875d65361SBrian Foster 			len2++;
478975d65361SBrian Foster 			ores--;
4790a9bd24acSBrian Foster 		}
4791a9bd24acSBrian Foster 	}
4792a9bd24acSBrian Foster 
4793a9bd24acSBrian Foster 	*indlen1 = len1;
4794a9bd24acSBrian Foster 	*indlen2 = len2;
4795d34999c9SBrian Foster 
4796d34999c9SBrian Foster 	return stolen;
4797a9bd24acSBrian Foster }
4798a9bd24acSBrian Foster 
4799fa5c836cSChristoph Hellwig int
xfs_bmap_del_extent_delay(struct xfs_inode * ip,int whichfork,struct xfs_iext_cursor * icur,struct xfs_bmbt_irec * got,struct xfs_bmbt_irec * del)4800fa5c836cSChristoph Hellwig xfs_bmap_del_extent_delay(
4801fa5c836cSChristoph Hellwig 	struct xfs_inode	*ip,
4802fa5c836cSChristoph Hellwig 	int			whichfork,
4803b2b1712aSChristoph Hellwig 	struct xfs_iext_cursor	*icur,
4804fa5c836cSChristoph Hellwig 	struct xfs_bmbt_irec	*got,
4805fa5c836cSChristoph Hellwig 	struct xfs_bmbt_irec	*del)
4806fa5c836cSChristoph Hellwig {
4807fa5c836cSChristoph Hellwig 	struct xfs_mount	*mp = ip->i_mount;
4808732436efSDarrick J. Wong 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
4809fa5c836cSChristoph Hellwig 	struct xfs_bmbt_irec	new;
4810fa5c836cSChristoph Hellwig 	int64_t			da_old, da_new, da_diff = 0;
4811fa5c836cSChristoph Hellwig 	xfs_fileoff_t		del_endoff, got_endoff;
4812fa5c836cSChristoph Hellwig 	xfs_filblks_t		got_indlen, new_indlen, stolen;
48130e5b8e45SDave Chinner 	uint32_t		state = xfs_bmap_fork_to_state(whichfork);
4814060ea65bSChristoph Hellwig 	int			error = 0;
4815fa5c836cSChristoph Hellwig 	bool			isrt;
4816fa5c836cSChristoph Hellwig 
4817fa5c836cSChristoph Hellwig 	XFS_STATS_INC(mp, xs_del_exlist);
4818fa5c836cSChristoph Hellwig 
4819fa5c836cSChristoph Hellwig 	isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
4820fa5c836cSChristoph Hellwig 	del_endoff = del->br_startoff + del->br_blockcount;
4821fa5c836cSChristoph Hellwig 	got_endoff = got->br_startoff + got->br_blockcount;
4822fa5c836cSChristoph Hellwig 	da_old = startblockval(got->br_startblock);
4823fa5c836cSChristoph Hellwig 	da_new = 0;
4824fa5c836cSChristoph Hellwig 
4825fa5c836cSChristoph Hellwig 	ASSERT(del->br_blockcount > 0);
4826fa5c836cSChristoph Hellwig 	ASSERT(got->br_startoff <= del->br_startoff);
4827fa5c836cSChristoph Hellwig 	ASSERT(got_endoff >= del_endoff);
4828fa5c836cSChristoph Hellwig 
4829fa5c836cSChristoph Hellwig 	if (isrt) {
4830e3aca453SDarrick J. Wong 		uint64_t	rtexts = del->br_blockcount;
4831fa5c836cSChristoph Hellwig 
4832fa5c836cSChristoph Hellwig 		do_div(rtexts, mp->m_sb.sb_rextsize);
4833fa5c836cSChristoph Hellwig 		xfs_mod_frextents(mp, rtexts);
4834fa5c836cSChristoph Hellwig 	}
4835fa5c836cSChristoph Hellwig 
4836fa5c836cSChristoph Hellwig 	/*
4837fa5c836cSChristoph Hellwig 	 * Update the inode delalloc counter now and wait to update the
4838fa5c836cSChristoph Hellwig 	 * sb counters as we might have to borrow some blocks for the
4839fa5c836cSChristoph Hellwig 	 * indirect block accounting.
4840fa5c836cSChristoph Hellwig 	 */
484185546500SDarrick J. Wong 	ASSERT(!isrt);
484285546500SDarrick J. Wong 	error = xfs_quota_unreserve_blkres(ip, del->br_blockcount);
48434fd29ec4SDarrick J. Wong 	if (error)
48444fd29ec4SDarrick J. Wong 		return error;
4845fa5c836cSChristoph Hellwig 	ip->i_delayed_blks -= del->br_blockcount;
4846fa5c836cSChristoph Hellwig 
4847fa5c836cSChristoph Hellwig 	if (got->br_startoff == del->br_startoff)
48480173c689SChristoph Hellwig 		state |= BMAP_LEFT_FILLING;
4849fa5c836cSChristoph Hellwig 	if (got_endoff == del_endoff)
48500173c689SChristoph Hellwig 		state |= BMAP_RIGHT_FILLING;
4851fa5c836cSChristoph Hellwig 
48520173c689SChristoph Hellwig 	switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) {
48530173c689SChristoph Hellwig 	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
4854fa5c836cSChristoph Hellwig 		/*
4855fa5c836cSChristoph Hellwig 		 * Matches the whole extent.  Delete the entry.
4856fa5c836cSChristoph Hellwig 		 */
4857c38ccf59SChristoph Hellwig 		xfs_iext_remove(ip, icur, state);
4858b2b1712aSChristoph Hellwig 		xfs_iext_prev(ifp, icur);
4859fa5c836cSChristoph Hellwig 		break;
48600173c689SChristoph Hellwig 	case BMAP_LEFT_FILLING:
4861fa5c836cSChristoph Hellwig 		/*
4862fa5c836cSChristoph Hellwig 		 * Deleting the first part of the extent.
4863fa5c836cSChristoph Hellwig 		 */
4864fa5c836cSChristoph Hellwig 		got->br_startoff = del_endoff;
4865fa5c836cSChristoph Hellwig 		got->br_blockcount -= del->br_blockcount;
4866fa5c836cSChristoph Hellwig 		da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip,
4867fa5c836cSChristoph Hellwig 				got->br_blockcount), da_old);
4868fa5c836cSChristoph Hellwig 		got->br_startblock = nullstartblock((int)da_new);
4869b2b1712aSChristoph Hellwig 		xfs_iext_update_extent(ip, state, icur, got);
4870fa5c836cSChristoph Hellwig 		break;
48710173c689SChristoph Hellwig 	case BMAP_RIGHT_FILLING:
4872fa5c836cSChristoph Hellwig 		/*
4873fa5c836cSChristoph Hellwig 		 * Deleting the last part of the extent.
4874fa5c836cSChristoph Hellwig 		 */
4875fa5c836cSChristoph Hellwig 		got->br_blockcount = got->br_blockcount - del->br_blockcount;
4876fa5c836cSChristoph Hellwig 		da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip,
4877fa5c836cSChristoph Hellwig 				got->br_blockcount), da_old);
4878fa5c836cSChristoph Hellwig 		got->br_startblock = nullstartblock((int)da_new);
4879b2b1712aSChristoph Hellwig 		xfs_iext_update_extent(ip, state, icur, got);
4880fa5c836cSChristoph Hellwig 		break;
4881fa5c836cSChristoph Hellwig 	case 0:
4882fa5c836cSChristoph Hellwig 		/*
4883fa5c836cSChristoph Hellwig 		 * Deleting the middle of the extent.
4884fa5c836cSChristoph Hellwig 		 *
4885fa5c836cSChristoph Hellwig 		 * Distribute the original indlen reservation across the two new
4886fa5c836cSChristoph Hellwig 		 * extents.  Steal blocks from the deleted extent if necessary.
4887fa5c836cSChristoph Hellwig 		 * Stealing blocks simply fudges the fdblocks accounting below.
4888fa5c836cSChristoph Hellwig 		 * Warn if either of the new indlen reservations is zero as this
4889fa5c836cSChristoph Hellwig 		 * can lead to delalloc problems.
4890fa5c836cSChristoph Hellwig 		 */
4891fa5c836cSChristoph Hellwig 		got->br_blockcount = del->br_startoff - got->br_startoff;
4892fa5c836cSChristoph Hellwig 		got_indlen = xfs_bmap_worst_indlen(ip, got->br_blockcount);
4893fa5c836cSChristoph Hellwig 
4894fa5c836cSChristoph Hellwig 		new.br_blockcount = got_endoff - del_endoff;
4895fa5c836cSChristoph Hellwig 		new_indlen = xfs_bmap_worst_indlen(ip, new.br_blockcount);
4896fa5c836cSChristoph Hellwig 
4897fa5c836cSChristoph Hellwig 		WARN_ON_ONCE(!got_indlen || !new_indlen);
4898fa5c836cSChristoph Hellwig 		stolen = xfs_bmap_split_indlen(da_old, &got_indlen, &new_indlen,
4899fa5c836cSChristoph Hellwig 						       del->br_blockcount);
4900fa5c836cSChristoph Hellwig 
4901fa5c836cSChristoph Hellwig 		got->br_startblock = nullstartblock((int)got_indlen);
4902fa5c836cSChristoph Hellwig 
4903fa5c836cSChristoph Hellwig 		new.br_startoff = del_endoff;
4904fa5c836cSChristoph Hellwig 		new.br_state = got->br_state;
4905fa5c836cSChristoph Hellwig 		new.br_startblock = nullstartblock((int)new_indlen);
4906fa5c836cSChristoph Hellwig 
4907b2b1712aSChristoph Hellwig 		xfs_iext_update_extent(ip, state, icur, got);
4908b2b1712aSChristoph Hellwig 		xfs_iext_next(ifp, icur);
49090254c2f2SChristoph Hellwig 		xfs_iext_insert(ip, icur, &new, state);
4910fa5c836cSChristoph Hellwig 
4911fa5c836cSChristoph Hellwig 		da_new = got_indlen + new_indlen - stolen;
4912fa5c836cSChristoph Hellwig 		del->br_blockcount -= stolen;
4913fa5c836cSChristoph Hellwig 		break;
4914fa5c836cSChristoph Hellwig 	}
4915fa5c836cSChristoph Hellwig 
4916fa5c836cSChristoph Hellwig 	ASSERT(da_old >= da_new);
4917fa5c836cSChristoph Hellwig 	da_diff = da_old - da_new;
4918fa5c836cSChristoph Hellwig 	if (!isrt)
4919fa5c836cSChristoph Hellwig 		da_diff += del->br_blockcount;
49209fe82b8cSDarrick J. Wong 	if (da_diff) {
4921fa5c836cSChristoph Hellwig 		xfs_mod_fdblocks(mp, da_diff, false);
49229fe82b8cSDarrick J. Wong 		xfs_mod_delalloc(mp, -da_diff);
49239fe82b8cSDarrick J. Wong 	}
4924fa5c836cSChristoph Hellwig 	return error;
4925fa5c836cSChristoph Hellwig }
4926fa5c836cSChristoph Hellwig 
4927fa5c836cSChristoph Hellwig void
xfs_bmap_del_extent_cow(struct xfs_inode * ip,struct xfs_iext_cursor * icur,struct xfs_bmbt_irec * got,struct xfs_bmbt_irec * del)4928fa5c836cSChristoph Hellwig xfs_bmap_del_extent_cow(
4929fa5c836cSChristoph Hellwig 	struct xfs_inode	*ip,
4930b2b1712aSChristoph Hellwig 	struct xfs_iext_cursor	*icur,
4931fa5c836cSChristoph Hellwig 	struct xfs_bmbt_irec	*got,
4932fa5c836cSChristoph Hellwig 	struct xfs_bmbt_irec	*del)
4933fa5c836cSChristoph Hellwig {
4934fa5c836cSChristoph Hellwig 	struct xfs_mount	*mp = ip->i_mount;
4935732436efSDarrick J. Wong 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, XFS_COW_FORK);
4936fa5c836cSChristoph Hellwig 	struct xfs_bmbt_irec	new;
4937fa5c836cSChristoph Hellwig 	xfs_fileoff_t		del_endoff, got_endoff;
49380e5b8e45SDave Chinner 	uint32_t		state = BMAP_COWFORK;
4939fa5c836cSChristoph Hellwig 
4940fa5c836cSChristoph Hellwig 	XFS_STATS_INC(mp, xs_del_exlist);
4941fa5c836cSChristoph Hellwig 
4942fa5c836cSChristoph Hellwig 	del_endoff = del->br_startoff + del->br_blockcount;
4943fa5c836cSChristoph Hellwig 	got_endoff = got->br_startoff + got->br_blockcount;
4944fa5c836cSChristoph Hellwig 
4945fa5c836cSChristoph Hellwig 	ASSERT(del->br_blockcount > 0);
4946fa5c836cSChristoph Hellwig 	ASSERT(got->br_startoff <= del->br_startoff);
4947fa5c836cSChristoph Hellwig 	ASSERT(got_endoff >= del_endoff);
4948fa5c836cSChristoph Hellwig 	ASSERT(!isnullstartblock(got->br_startblock));
4949fa5c836cSChristoph Hellwig 
4950fa5c836cSChristoph Hellwig 	if (got->br_startoff == del->br_startoff)
49510173c689SChristoph Hellwig 		state |= BMAP_LEFT_FILLING;
4952fa5c836cSChristoph Hellwig 	if (got_endoff == del_endoff)
49530173c689SChristoph Hellwig 		state |= BMAP_RIGHT_FILLING;
4954fa5c836cSChristoph Hellwig 
49550173c689SChristoph Hellwig 	switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) {
49560173c689SChristoph Hellwig 	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
4957fa5c836cSChristoph Hellwig 		/*
4958fa5c836cSChristoph Hellwig 		 * Matches the whole extent.  Delete the entry.
4959fa5c836cSChristoph Hellwig 		 */
4960c38ccf59SChristoph Hellwig 		xfs_iext_remove(ip, icur, state);
4961b2b1712aSChristoph Hellwig 		xfs_iext_prev(ifp, icur);
4962fa5c836cSChristoph Hellwig 		break;
49630173c689SChristoph Hellwig 	case BMAP_LEFT_FILLING:
4964fa5c836cSChristoph Hellwig 		/*
4965fa5c836cSChristoph Hellwig 		 * Deleting the first part of the extent.
4966fa5c836cSChristoph Hellwig 		 */
4967fa5c836cSChristoph Hellwig 		got->br_startoff = del_endoff;
4968fa5c836cSChristoph Hellwig 		got->br_blockcount -= del->br_blockcount;
4969fa5c836cSChristoph Hellwig 		got->br_startblock = del->br_startblock + del->br_blockcount;
4970b2b1712aSChristoph Hellwig 		xfs_iext_update_extent(ip, state, icur, got);
4971fa5c836cSChristoph Hellwig 		break;
49720173c689SChristoph Hellwig 	case BMAP_RIGHT_FILLING:
4973fa5c836cSChristoph Hellwig 		/*
4974fa5c836cSChristoph Hellwig 		 * Deleting the last part of the extent.
4975fa5c836cSChristoph Hellwig 		 */
4976fa5c836cSChristoph Hellwig 		got->br_blockcount -= del->br_blockcount;
4977b2b1712aSChristoph Hellwig 		xfs_iext_update_extent(ip, state, icur, got);
4978fa5c836cSChristoph Hellwig 		break;
4979fa5c836cSChristoph Hellwig 	case 0:
4980fa5c836cSChristoph Hellwig 		/*
4981fa5c836cSChristoph Hellwig 		 * Deleting the middle of the extent.
4982fa5c836cSChristoph Hellwig 		 */
4983fa5c836cSChristoph Hellwig 		got->br_blockcount = del->br_startoff - got->br_startoff;
4984fa5c836cSChristoph Hellwig 
4985fa5c836cSChristoph Hellwig 		new.br_startoff = del_endoff;
4986fa5c836cSChristoph Hellwig 		new.br_blockcount = got_endoff - del_endoff;
4987fa5c836cSChristoph Hellwig 		new.br_state = got->br_state;
4988fa5c836cSChristoph Hellwig 		new.br_startblock = del->br_startblock + del->br_blockcount;
4989fa5c836cSChristoph Hellwig 
4990b2b1712aSChristoph Hellwig 		xfs_iext_update_extent(ip, state, icur, got);
4991b2b1712aSChristoph Hellwig 		xfs_iext_next(ifp, icur);
49920254c2f2SChristoph Hellwig 		xfs_iext_insert(ip, icur, &new, state);
4993fa5c836cSChristoph Hellwig 		break;
4994fa5c836cSChristoph Hellwig 	}
49954b4c1326SDarrick J. Wong 	ip->i_delayed_blks -= del->br_blockcount;
4996fa5c836cSChristoph Hellwig }
4997fa5c836cSChristoph Hellwig 
4998a9bd24acSBrian Foster /*
499930f712c9SDave Chinner  * Called by xfs_bmapi to update file extent records and the btree
5000e1d7553fSChristoph Hellwig  * after removing space.
500130f712c9SDave Chinner  */
500230f712c9SDave Chinner STATIC int				/* error */
xfs_bmap_del_extent_real(xfs_inode_t * ip,xfs_trans_t * tp,struct xfs_iext_cursor * icur,struct xfs_btree_cur * cur,xfs_bmbt_irec_t * del,int * logflagsp,int whichfork,uint32_t bflags)5003e1d7553fSChristoph Hellwig xfs_bmap_del_extent_real(
500430f712c9SDave Chinner 	xfs_inode_t		*ip,	/* incore inode pointer */
500530f712c9SDave Chinner 	xfs_trans_t		*tp,	/* current transaction pointer */
5006b2b1712aSChristoph Hellwig 	struct xfs_iext_cursor	*icur,
5007ae127f08SDarrick J. Wong 	struct xfs_btree_cur	*cur,	/* if null, not a btree */
500830f712c9SDave Chinner 	xfs_bmbt_irec_t		*del,	/* data to remove from extents */
500930f712c9SDave Chinner 	int			*logflagsp, /* inode logging flags */
50104847acf8SDarrick J. Wong 	int			whichfork, /* data or attr fork */
5011e7d410acSDave Chinner 	uint32_t		bflags)	/* bmapi flags */
501230f712c9SDave Chinner {
501330f712c9SDave Chinner 	xfs_fsblock_t		del_endblock=0;	/* first block past del */
501430f712c9SDave Chinner 	xfs_fileoff_t		del_endoff;	/* first offset past del */
501530f712c9SDave Chinner 	int			do_fx;	/* free extent at end of routine */
501630f712c9SDave Chinner 	int			error;	/* error return value */
501748fd52b1SChristoph Hellwig 	struct xfs_bmbt_irec	got;	/* current extent entry */
501830f712c9SDave Chinner 	xfs_fileoff_t		got_endoff;	/* first offset past got */
501930f712c9SDave Chinner 	int			i;	/* temp state */
50203ba738dfSChristoph Hellwig 	struct xfs_ifork	*ifp;	/* inode fork pointer */
502130f712c9SDave Chinner 	xfs_mount_t		*mp;	/* mount structure */
502230f712c9SDave Chinner 	xfs_filblks_t		nblks;	/* quota/sb block count */
502330f712c9SDave Chinner 	xfs_bmbt_irec_t		new;	/* new record to be inserted */
502430f712c9SDave Chinner 	/* REFERENCED */
502530f712c9SDave Chinner 	uint			qfield;	/* quota field to update */
50260e5b8e45SDave Chinner 	uint32_t		state = xfs_bmap_fork_to_state(whichfork);
502748fd52b1SChristoph Hellwig 	struct xfs_bmbt_irec	old;
502830f712c9SDave Chinner 
5029264e3509SJiachen Zhang 	*logflagsp = 0;
5030264e3509SJiachen Zhang 
5031ff6d6af2SBill O'Donnell 	mp = ip->i_mount;
5032ff6d6af2SBill O'Donnell 	XFS_STATS_INC(mp, xs_del_exlist);
503330f712c9SDave Chinner 
5034732436efSDarrick J. Wong 	ifp = xfs_ifork_ptr(ip, whichfork);
503530f712c9SDave Chinner 	ASSERT(del->br_blockcount > 0);
5036b2b1712aSChristoph Hellwig 	xfs_iext_get_extent(ifp, icur, &got);
503730f712c9SDave Chinner 	ASSERT(got.br_startoff <= del->br_startoff);
503830f712c9SDave Chinner 	del_endoff = del->br_startoff + del->br_blockcount;
503930f712c9SDave Chinner 	got_endoff = got.br_startoff + got.br_blockcount;
504030f712c9SDave Chinner 	ASSERT(got_endoff >= del_endoff);
5041e1d7553fSChristoph Hellwig 	ASSERT(!isnullstartblock(got.br_startblock));
504230f712c9SDave Chinner 	qfield = 0;
5043e1d7553fSChristoph Hellwig 
50441b24b633SChristoph Hellwig 	/*
50451b24b633SChristoph Hellwig 	 * If it's the case where the directory code is running with no block
50461b24b633SChristoph Hellwig 	 * reservation, and the deleted block is in the middle of its extent,
50471b24b633SChristoph Hellwig 	 * and the resulting insert of an extent would cause transformation to
50481b24b633SChristoph Hellwig 	 * btree format, then reject it.  The calling code will then swap blocks
50491b24b633SChristoph Hellwig 	 * around instead.  We have to do this now, rather than waiting for the
50501b24b633SChristoph Hellwig 	 * conversion to btree format, since the transaction will be dirty then.
50511b24b633SChristoph Hellwig 	 */
50521b24b633SChristoph Hellwig 	if (tp->t_blk_res == 0 &&
5053f7e67b20SChristoph Hellwig 	    ifp->if_format == XFS_DINODE_FMT_EXTENTS &&
5054daf83964SChristoph Hellwig 	    ifp->if_nextents >= XFS_IFORK_MAXEXT(ip, whichfork) &&
50551b24b633SChristoph Hellwig 	    del->br_startoff > got.br_startoff && del_endoff < got_endoff)
50561b24b633SChristoph Hellwig 		return -ENOSPC;
50571b24b633SChristoph Hellwig 
5058264e3509SJiachen Zhang 	*logflagsp = XFS_ILOG_CORE;
505930f712c9SDave Chinner 	if (whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip)) {
50608df0fa39SDarrick J. Wong 		if (!(bflags & XFS_BMAPI_REMAP)) {
5061e820b13bSDarrick J. Wong 			error = xfs_rtfree_blocks(tp, del->br_startblock,
5062e820b13bSDarrick J. Wong 					del->br_blockcount);
506330f712c9SDave Chinner 			if (error)
5064264e3509SJiachen Zhang 				return error;
50658df0fa39SDarrick J. Wong 		}
50668df0fa39SDarrick J. Wong 
506730f712c9SDave Chinner 		do_fx = 0;
506830f712c9SDave Chinner 		qfield = XFS_TRANS_DQ_RTBCOUNT;
5069e1d7553fSChristoph Hellwig 	} else {
507030f712c9SDave Chinner 		do_fx = 1;
507130f712c9SDave Chinner 		qfield = XFS_TRANS_DQ_BCOUNT;
507230f712c9SDave Chinner 	}
5073e820b13bSDarrick J. Wong 	nblks = del->br_blockcount;
5074e1d7553fSChristoph Hellwig 
507530f712c9SDave Chinner 	del_endblock = del->br_startblock + del->br_blockcount;
507630f712c9SDave Chinner 	if (cur) {
5077e16cf9b0SChristoph Hellwig 		error = xfs_bmbt_lookup_eq(cur, &got, &i);
5078e1d7553fSChristoph Hellwig 		if (error)
5079264e3509SJiachen Zhang 			return error;
5080264e3509SJiachen Zhang 		if (XFS_IS_CORRUPT(mp, i != 1))
5081264e3509SJiachen Zhang 			return -EFSCORRUPTED;
508230f712c9SDave Chinner 	}
5083340785ccSDarrick J. Wong 
5084491f6f8aSChristoph Hellwig 	if (got.br_startoff == del->br_startoff)
5085491f6f8aSChristoph Hellwig 		state |= BMAP_LEFT_FILLING;
5086491f6f8aSChristoph Hellwig 	if (got_endoff == del_endoff)
5087491f6f8aSChristoph Hellwig 		state |= BMAP_RIGHT_FILLING;
5088491f6f8aSChristoph Hellwig 
5089491f6f8aSChristoph Hellwig 	switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) {
5090491f6f8aSChristoph Hellwig 	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
509130f712c9SDave Chinner 		/*
509230f712c9SDave Chinner 		 * Matches the whole extent.  Delete the entry.
509330f712c9SDave Chinner 		 */
5094c38ccf59SChristoph Hellwig 		xfs_iext_remove(ip, icur, state);
5095b2b1712aSChristoph Hellwig 		xfs_iext_prev(ifp, icur);
5096daf83964SChristoph Hellwig 		ifp->if_nextents--;
5097daf83964SChristoph Hellwig 
5098264e3509SJiachen Zhang 		*logflagsp |= XFS_ILOG_CORE;
509930f712c9SDave Chinner 		if (!cur) {
5100264e3509SJiachen Zhang 			*logflagsp |= xfs_ilog_fext(whichfork);
510130f712c9SDave Chinner 			break;
510230f712c9SDave Chinner 		}
510330f712c9SDave Chinner 		if ((error = xfs_btree_delete(cur, &i)))
5104264e3509SJiachen Zhang 			return error;
5105264e3509SJiachen Zhang 		if (XFS_IS_CORRUPT(mp, i != 1))
5106264e3509SJiachen Zhang 			return -EFSCORRUPTED;
510730f712c9SDave Chinner 		break;
5108491f6f8aSChristoph Hellwig 	case BMAP_LEFT_FILLING:
510930f712c9SDave Chinner 		/*
511030f712c9SDave Chinner 		 * Deleting the first part of the extent.
511130f712c9SDave Chinner 		 */
511248fd52b1SChristoph Hellwig 		got.br_startoff = del_endoff;
511348fd52b1SChristoph Hellwig 		got.br_startblock = del_endblock;
511448fd52b1SChristoph Hellwig 		got.br_blockcount -= del->br_blockcount;
5115b2b1712aSChristoph Hellwig 		xfs_iext_update_extent(ip, state, icur, &got);
511630f712c9SDave Chinner 		if (!cur) {
5117264e3509SJiachen Zhang 			*logflagsp |= xfs_ilog_fext(whichfork);
511830f712c9SDave Chinner 			break;
511930f712c9SDave Chinner 		}
5120a67d00a5SChristoph Hellwig 		error = xfs_bmbt_update(cur, &got);
512148fd52b1SChristoph Hellwig 		if (error)
5122264e3509SJiachen Zhang 			return error;
512330f712c9SDave Chinner 		break;
5124491f6f8aSChristoph Hellwig 	case BMAP_RIGHT_FILLING:
512530f712c9SDave Chinner 		/*
512630f712c9SDave Chinner 		 * Deleting the last part of the extent.
512730f712c9SDave Chinner 		 */
512848fd52b1SChristoph Hellwig 		got.br_blockcount -= del->br_blockcount;
5129b2b1712aSChristoph Hellwig 		xfs_iext_update_extent(ip, state, icur, &got);
513030f712c9SDave Chinner 		if (!cur) {
5131264e3509SJiachen Zhang 			*logflagsp |= xfs_ilog_fext(whichfork);
513230f712c9SDave Chinner 			break;
513330f712c9SDave Chinner 		}
5134a67d00a5SChristoph Hellwig 		error = xfs_bmbt_update(cur, &got);
513548fd52b1SChristoph Hellwig 		if (error)
5136264e3509SJiachen Zhang 			return error;
513730f712c9SDave Chinner 		break;
513830f712c9SDave Chinner 	case 0:
513930f712c9SDave Chinner 		/*
514030f712c9SDave Chinner 		 * Deleting the middle of the extent.
514130f712c9SDave Chinner 		 */
51420dbc5cb1SChandan Babu R 
514348fd52b1SChristoph Hellwig 		old = got;
5144ca5d8e5bSChristoph Hellwig 
514548fd52b1SChristoph Hellwig 		got.br_blockcount = del->br_startoff - got.br_startoff;
5146b2b1712aSChristoph Hellwig 		xfs_iext_update_extent(ip, state, icur, &got);
514748fd52b1SChristoph Hellwig 
514830f712c9SDave Chinner 		new.br_startoff = del_endoff;
514948fd52b1SChristoph Hellwig 		new.br_blockcount = got_endoff - del_endoff;
515030f712c9SDave Chinner 		new.br_state = got.br_state;
515130f712c9SDave Chinner 		new.br_startblock = del_endblock;
515248fd52b1SChristoph Hellwig 
5153264e3509SJiachen Zhang 		*logflagsp |= XFS_ILOG_CORE;
515430f712c9SDave Chinner 		if (cur) {
5155a67d00a5SChristoph Hellwig 			error = xfs_bmbt_update(cur, &got);
5156e1d7553fSChristoph Hellwig 			if (error)
5157264e3509SJiachen Zhang 				return error;
5158e1d7553fSChristoph Hellwig 			error = xfs_btree_increment(cur, 0, &i);
5159e1d7553fSChristoph Hellwig 			if (error)
5160264e3509SJiachen Zhang 				return error;
516130f712c9SDave Chinner 			cur->bc_rec.b = new;
516230f712c9SDave Chinner 			error = xfs_btree_insert(cur, &i);
51632451337dSDave Chinner 			if (error && error != -ENOSPC)
5164264e3509SJiachen Zhang 				return error;
516530f712c9SDave Chinner 			/*
5166e1d7553fSChristoph Hellwig 			 * If get no-space back from btree insert, it tried a
5167e1d7553fSChristoph Hellwig 			 * split, and we have a zero block reservation.  Fix up
5168e1d7553fSChristoph Hellwig 			 * our state and return the error.
516930f712c9SDave Chinner 			 */
51702451337dSDave Chinner 			if (error == -ENOSPC) {
517130f712c9SDave Chinner 				/*
5172e1d7553fSChristoph Hellwig 				 * Reset the cursor, don't trust it after any
5173e1d7553fSChristoph Hellwig 				 * insert operation.
517430f712c9SDave Chinner 				 */
5175e16cf9b0SChristoph Hellwig 				error = xfs_bmbt_lookup_eq(cur, &got, &i);
5176e1d7553fSChristoph Hellwig 				if (error)
5177264e3509SJiachen Zhang 					return error;
5178264e3509SJiachen Zhang 				if (XFS_IS_CORRUPT(mp, i != 1))
5179264e3509SJiachen Zhang 					return -EFSCORRUPTED;
518030f712c9SDave Chinner 				/*
518130f712c9SDave Chinner 				 * Update the btree record back
518230f712c9SDave Chinner 				 * to the original value.
518330f712c9SDave Chinner 				 */
5184a67d00a5SChristoph Hellwig 				error = xfs_bmbt_update(cur, &old);
5185e1d7553fSChristoph Hellwig 				if (error)
5186264e3509SJiachen Zhang 					return error;
518730f712c9SDave Chinner 				/*
518830f712c9SDave Chinner 				 * Reset the extent record back
518930f712c9SDave Chinner 				 * to the original value.
519030f712c9SDave Chinner 				 */
5191b2b1712aSChristoph Hellwig 				xfs_iext_update_extent(ip, state, icur, &old);
5192264e3509SJiachen Zhang 				*logflagsp = 0;
5193264e3509SJiachen Zhang 				return -ENOSPC;
519430f712c9SDave Chinner 			}
5195264e3509SJiachen Zhang 			if (XFS_IS_CORRUPT(mp, i != 1))
5196264e3509SJiachen Zhang 				return -EFSCORRUPTED;
519730f712c9SDave Chinner 		} else
5198264e3509SJiachen Zhang 			*logflagsp |= xfs_ilog_fext(whichfork);
5199daf83964SChristoph Hellwig 
5200daf83964SChristoph Hellwig 		ifp->if_nextents++;
5201b2b1712aSChristoph Hellwig 		xfs_iext_next(ifp, icur);
52020254c2f2SChristoph Hellwig 		xfs_iext_insert(ip, icur, &new, state);
520330f712c9SDave Chinner 		break;
520430f712c9SDave Chinner 	}
52059c194644SDarrick J. Wong 
52069c194644SDarrick J. Wong 	/* remove reverse mapping */
5207bc46ac64SDarrick J. Wong 	xfs_rmap_unmap_extent(tp, ip, whichfork, del);
52089c194644SDarrick J. Wong 
520930f712c9SDave Chinner 	/*
521030f712c9SDave Chinner 	 * If we need to, add to list of extents to delete.
521130f712c9SDave Chinner 	 */
52124847acf8SDarrick J. Wong 	if (do_fx && !(bflags & XFS_BMAPI_REMAP)) {
521362aab20fSDarrick J. Wong 		if (xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK) {
521474b4c5d4SDarrick J. Wong 			xfs_refcount_decrease_extent(tp, del);
5215fcb762f5SBrian Foster 		} else {
52167dfee17bSDave Chinner 			error = __xfs_free_extent_later(tp, del->br_startblock,
52174e529339SBrian Foster 					del->br_blockcount, NULL,
5218b742d7b4SDave Chinner 					XFS_AG_RESV_NONE,
5219b742d7b4SDave Chinner 					((bflags & XFS_BMAPI_NODISCARD) ||
5220b742d7b4SDave Chinner 					del->br_state == XFS_EXT_UNWRITTEN));
52217dfee17bSDave Chinner 			if (error)
5222264e3509SJiachen Zhang 				return error;
5223fcb762f5SBrian Foster 		}
5224fcb762f5SBrian Foster 	}
522562aab20fSDarrick J. Wong 
522630f712c9SDave Chinner 	/*
522730f712c9SDave Chinner 	 * Adjust inode # blocks in the file.
522830f712c9SDave Chinner 	 */
522930f712c9SDave Chinner 	if (nblks)
52306e73a545SChristoph Hellwig 		ip->i_nblocks -= nblks;
523130f712c9SDave Chinner 	/*
523230f712c9SDave Chinner 	 * Adjust quota data.
523330f712c9SDave Chinner 	 */
52344847acf8SDarrick J. Wong 	if (qfield && !(bflags & XFS_BMAPI_REMAP))
523530f712c9SDave Chinner 		xfs_trans_mod_dquot_byino(tp, ip, qfield, (long)-nblks);
523630f712c9SDave Chinner 
5237264e3509SJiachen Zhang 	return 0;
523830f712c9SDave Chinner }
523930f712c9SDave Chinner 
524030f712c9SDave Chinner /*
524130f712c9SDave Chinner  * Unmap (remove) blocks from a file.
524230f712c9SDave Chinner  * If nexts is nonzero then the number of extents to remove is limited to
524330f712c9SDave Chinner  * that value.  If not all extents in the block range can be removed then
524430f712c9SDave Chinner  * *done is set.
524530f712c9SDave Chinner  */
524630f712c9SDave Chinner int						/* error */
__xfs_bunmapi(struct xfs_trans * tp,struct xfs_inode * ip,xfs_fileoff_t start,xfs_filblks_t * rlen,uint32_t flags,xfs_extnum_t nexts)52474453593bSDarrick J. Wong __xfs_bunmapi(
5248ccd9d911SBrian Foster 	struct xfs_trans	*tp,		/* transaction pointer */
524930f712c9SDave Chinner 	struct xfs_inode	*ip,		/* incore inode */
52508280f6edSChristoph Hellwig 	xfs_fileoff_t		start,		/* first file offset deleted */
52514453593bSDarrick J. Wong 	xfs_filblks_t		*rlen,		/* i/o: amount remaining */
5252e7d410acSDave Chinner 	uint32_t		flags,		/* misc flags */
52532af52842SBrian Foster 	xfs_extnum_t		nexts)		/* number of extents max */
525430f712c9SDave Chinner {
5255ccd9d911SBrian Foster 	struct xfs_btree_cur	*cur;		/* bmap btree cursor */
5256ccd9d911SBrian Foster 	struct xfs_bmbt_irec	del;		/* extent being deleted */
525730f712c9SDave Chinner 	int			error;		/* error return value */
525830f712c9SDave Chinner 	xfs_extnum_t		extno;		/* extent number in list */
5259ccd9d911SBrian Foster 	struct xfs_bmbt_irec	got;		/* current extent record */
52603ba738dfSChristoph Hellwig 	struct xfs_ifork	*ifp;		/* inode fork pointer */
526130f712c9SDave Chinner 	int			isrt;		/* freeing in rt area */
526230f712c9SDave Chinner 	int			logflags;	/* transaction logging flags */
526330f712c9SDave Chinner 	xfs_extlen_t		mod;		/* rt extent offset */
5264a71895c5SDarrick J. Wong 	struct xfs_mount	*mp = ip->i_mount;
526530f712c9SDave Chinner 	int			tmp_logflags;	/* partial logging flags */
526630f712c9SDave Chinner 	int			wasdel;		/* was a delayed alloc extent */
526730f712c9SDave Chinner 	int			whichfork;	/* data or attribute fork */
526830f712c9SDave Chinner 	xfs_fsblock_t		sum;
52694453593bSDarrick J. Wong 	xfs_filblks_t		len = *rlen;	/* length to unmap in file */
52708280f6edSChristoph Hellwig 	xfs_fileoff_t		end;
5271b2b1712aSChristoph Hellwig 	struct xfs_iext_cursor	icur;
5272b2b1712aSChristoph Hellwig 	bool			done = false;
527330f712c9SDave Chinner 
52748280f6edSChristoph Hellwig 	trace_xfs_bunmap(ip, start, len, flags, _RET_IP_);
527530f712c9SDave Chinner 
52763993baebSDarrick J. Wong 	whichfork = xfs_bmapi_whichfork(flags);
52773993baebSDarrick J. Wong 	ASSERT(whichfork != XFS_COW_FORK);
5278732436efSDarrick J. Wong 	ifp = xfs_ifork_ptr(ip, whichfork);
5279f7e67b20SChristoph Hellwig 	if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)))
52802451337dSDave Chinner 		return -EFSCORRUPTED;
528175c8c50fSDave Chinner 	if (xfs_is_shutdown(mp))
52822451337dSDave Chinner 		return -EIO;
528330f712c9SDave Chinner 
528430f712c9SDave Chinner 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
528530f712c9SDave Chinner 	ASSERT(len > 0);
528630f712c9SDave Chinner 	ASSERT(nexts >= 0);
528730f712c9SDave Chinner 
5288862a804aSChristoph Hellwig 	error = xfs_iread_extents(tp, ip, whichfork);
5289862a804aSChristoph Hellwig 	if (error)
529030f712c9SDave Chinner 		return error;
5291862a804aSChristoph Hellwig 
52925d829300SEric Sandeen 	if (xfs_iext_count(ifp) == 0) {
52934453593bSDarrick J. Wong 		*rlen = 0;
529430f712c9SDave Chinner 		return 0;
529530f712c9SDave Chinner 	}
5296ff6d6af2SBill O'Donnell 	XFS_STATS_INC(mp, xs_blk_unmap);
529730f712c9SDave Chinner 	isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
5298dc56015fSChristoph Hellwig 	end = start + len;
529930f712c9SDave Chinner 
5300b2b1712aSChristoph Hellwig 	if (!xfs_iext_lookup_extent_before(ip, ifp, &end, &icur, &got)) {
5301dc56015fSChristoph Hellwig 		*rlen = 0;
5302dc56015fSChristoph Hellwig 		return 0;
530330f712c9SDave Chinner 	}
5304dc56015fSChristoph Hellwig 	end--;
53057efc7945SChristoph Hellwig 
530630f712c9SDave Chinner 	logflags = 0;
5307ac1e0672SChristoph Hellwig 	if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
5308f7e67b20SChristoph Hellwig 		ASSERT(ifp->if_format == XFS_DINODE_FMT_BTREE);
530930f712c9SDave Chinner 		cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
531092219c29SDave Chinner 		cur->bc_ino.flags = 0;
531130f712c9SDave Chinner 	} else
531230f712c9SDave Chinner 		cur = NULL;
531330f712c9SDave Chinner 
531430f712c9SDave Chinner 	if (isrt) {
531530f712c9SDave Chinner 		/*
531630f712c9SDave Chinner 		 * Synchronize by locking the bitmap inode.
531730f712c9SDave Chinner 		 */
5318f4a0660dSDarrick J. Wong 		xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP);
531930f712c9SDave Chinner 		xfs_trans_ijoin(tp, mp->m_rbmip, XFS_ILOCK_EXCL);
5320f4a0660dSDarrick J. Wong 		xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM);
5321f4a0660dSDarrick J. Wong 		xfs_trans_ijoin(tp, mp->m_rsumip, XFS_ILOCK_EXCL);
532230f712c9SDave Chinner 	}
532330f712c9SDave Chinner 
532430f712c9SDave Chinner 	extno = 0;
5325b2b1712aSChristoph Hellwig 	while (end != (xfs_fileoff_t)-1 && end >= start &&
53264ed6435cSDarrick J. Wong 	       (nexts == 0 || extno < nexts)) {
532730f712c9SDave Chinner 		/*
53288280f6edSChristoph Hellwig 		 * Is the found extent after a hole in which end lives?
532930f712c9SDave Chinner 		 * Just back up to the previous extent, if so.
533030f712c9SDave Chinner 		 */
5331b2b1712aSChristoph Hellwig 		if (got.br_startoff > end &&
5332b2b1712aSChristoph Hellwig 		    !xfs_iext_prev_extent(ifp, &icur, &got)) {
5333b2b1712aSChristoph Hellwig 			done = true;
533430f712c9SDave Chinner 			break;
533530f712c9SDave Chinner 		}
533630f712c9SDave Chinner 		/*
533730f712c9SDave Chinner 		 * Is the last block of this extent before the range
533830f712c9SDave Chinner 		 * we're supposed to delete?  If so, we're done.
533930f712c9SDave Chinner 		 */
53408280f6edSChristoph Hellwig 		end = XFS_FILEOFF_MIN(end,
534130f712c9SDave Chinner 			got.br_startoff + got.br_blockcount - 1);
53428280f6edSChristoph Hellwig 		if (end < start)
534330f712c9SDave Chinner 			break;
534430f712c9SDave Chinner 		/*
534530f712c9SDave Chinner 		 * Then deal with the (possibly delayed) allocated space
534630f712c9SDave Chinner 		 * we found.
534730f712c9SDave Chinner 		 */
534830f712c9SDave Chinner 		del = got;
534930f712c9SDave Chinner 		wasdel = isnullstartblock(del.br_startblock);
53505b094d6dSChristoph Hellwig 
535130f712c9SDave Chinner 		if (got.br_startoff < start) {
535230f712c9SDave Chinner 			del.br_startoff = start;
535330f712c9SDave Chinner 			del.br_blockcount -= start - got.br_startoff;
535430f712c9SDave Chinner 			if (!wasdel)
535530f712c9SDave Chinner 				del.br_startblock += start - got.br_startoff;
535630f712c9SDave Chinner 		}
53578280f6edSChristoph Hellwig 		if (del.br_startoff + del.br_blockcount > end + 1)
53588280f6edSChristoph Hellwig 			del.br_blockcount = end + 1 - del.br_startoff;
5359e1a4e37cSDarrick J. Wong 
53600703a8e1SDave Chinner 		if (!isrt)
53610703a8e1SDave Chinner 			goto delete;
53620703a8e1SDave Chinner 
536330f712c9SDave Chinner 		sum = del.br_startblock + del.br_blockcount;
53640703a8e1SDave Chinner 		div_u64_rem(sum, mp->m_sb.sb_rextsize, &mod);
53650703a8e1SDave Chinner 		if (mod) {
536630f712c9SDave Chinner 			/*
536730f712c9SDave Chinner 			 * Realtime extent not lined up at the end.
536830f712c9SDave Chinner 			 * The extent could have been split into written
536930f712c9SDave Chinner 			 * and unwritten pieces, or we could just be
537030f712c9SDave Chinner 			 * unmapping part of it.  But we can't really
537130f712c9SDave Chinner 			 * get rid of part of a realtime extent.
537230f712c9SDave Chinner 			 */
5373daa79baeSChristoph Hellwig 			if (del.br_state == XFS_EXT_UNWRITTEN) {
537430f712c9SDave Chinner 				/*
537530f712c9SDave Chinner 				 * This piece is unwritten, or we're not
537630f712c9SDave Chinner 				 * using unwritten extents.  Skip over it.
537730f712c9SDave Chinner 				 */
53788280f6edSChristoph Hellwig 				ASSERT(end >= mod);
53798280f6edSChristoph Hellwig 				end -= mod > del.br_blockcount ?
538030f712c9SDave Chinner 					del.br_blockcount : mod;
5381b2b1712aSChristoph Hellwig 				if (end < got.br_startoff &&
5382b2b1712aSChristoph Hellwig 				    !xfs_iext_prev_extent(ifp, &icur, &got)) {
5383b2b1712aSChristoph Hellwig 					done = true;
5384b2b1712aSChristoph Hellwig 					break;
538530f712c9SDave Chinner 				}
538630f712c9SDave Chinner 				continue;
538730f712c9SDave Chinner 			}
538830f712c9SDave Chinner 			/*
538930f712c9SDave Chinner 			 * It's written, turn it unwritten.
539030f712c9SDave Chinner 			 * This is better than zeroing it.
539130f712c9SDave Chinner 			 */
539230f712c9SDave Chinner 			ASSERT(del.br_state == XFS_EXT_NORM);
5393a7e5d03bSChristoph Hellwig 			ASSERT(tp->t_blk_res > 0);
539430f712c9SDave Chinner 			/*
539530f712c9SDave Chinner 			 * If this spans a realtime extent boundary,
539630f712c9SDave Chinner 			 * chop it back to the start of the one we end at.
539730f712c9SDave Chinner 			 */
539830f712c9SDave Chinner 			if (del.br_blockcount > mod) {
539930f712c9SDave Chinner 				del.br_startoff += del.br_blockcount - mod;
540030f712c9SDave Chinner 				del.br_startblock += del.br_blockcount - mod;
540130f712c9SDave Chinner 				del.br_blockcount = mod;
540230f712c9SDave Chinner 			}
540330f712c9SDave Chinner 			del.br_state = XFS_EXT_UNWRITTEN;
540430f712c9SDave Chinner 			error = xfs_bmap_add_extent_unwritten_real(tp, ip,
5405b2b1712aSChristoph Hellwig 					whichfork, &icur, &cur, &del,
540692f9da30SBrian Foster 					&logflags);
540730f712c9SDave Chinner 			if (error)
540830f712c9SDave Chinner 				goto error0;
540930f712c9SDave Chinner 			goto nodelete;
541030f712c9SDave Chinner 		}
54110703a8e1SDave Chinner 		div_u64_rem(del.br_startblock, mp->m_sb.sb_rextsize, &mod);
54120703a8e1SDave Chinner 		if (mod) {
54130c4da70cSOmar Sandoval 			xfs_extlen_t off = mp->m_sb.sb_rextsize - mod;
54140c4da70cSOmar Sandoval 
541530f712c9SDave Chinner 			/*
541630f712c9SDave Chinner 			 * Realtime extent is lined up at the end but not
541730f712c9SDave Chinner 			 * at the front.  We'll get rid of full extents if
541830f712c9SDave Chinner 			 * we can.
541930f712c9SDave Chinner 			 */
54200c4da70cSOmar Sandoval 			if (del.br_blockcount > off) {
54210c4da70cSOmar Sandoval 				del.br_blockcount -= off;
54220c4da70cSOmar Sandoval 				del.br_startoff += off;
54230c4da70cSOmar Sandoval 				del.br_startblock += off;
5424daa79baeSChristoph Hellwig 			} else if (del.br_startoff == start &&
542530f712c9SDave Chinner 				   (del.br_state == XFS_EXT_UNWRITTEN ||
5426daa79baeSChristoph Hellwig 				    tp->t_blk_res == 0)) {
542730f712c9SDave Chinner 				/*
542830f712c9SDave Chinner 				 * Can't make it unwritten.  There isn't
542930f712c9SDave Chinner 				 * a full extent here so just skip it.
543030f712c9SDave Chinner 				 */
54318280f6edSChristoph Hellwig 				ASSERT(end >= del.br_blockcount);
54328280f6edSChristoph Hellwig 				end -= del.br_blockcount;
5433b2b1712aSChristoph Hellwig 				if (got.br_startoff > end &&
5434b2b1712aSChristoph Hellwig 				    !xfs_iext_prev_extent(ifp, &icur, &got)) {
5435b2b1712aSChristoph Hellwig 					done = true;
5436b2b1712aSChristoph Hellwig 					break;
5437b2b1712aSChristoph Hellwig 				}
543830f712c9SDave Chinner 				continue;
543930f712c9SDave Chinner 			} else if (del.br_state == XFS_EXT_UNWRITTEN) {
54407efc7945SChristoph Hellwig 				struct xfs_bmbt_irec	prev;
54410c4da70cSOmar Sandoval 				xfs_fileoff_t		unwrite_start;
54427efc7945SChristoph Hellwig 
544330f712c9SDave Chinner 				/*
544430f712c9SDave Chinner 				 * This one is already unwritten.
544530f712c9SDave Chinner 				 * It must have a written left neighbor.
544630f712c9SDave Chinner 				 * Unwrite the killed part of that one and
544730f712c9SDave Chinner 				 * try again.
544830f712c9SDave Chinner 				 */
5449b2b1712aSChristoph Hellwig 				if (!xfs_iext_prev_extent(ifp, &icur, &prev))
5450b2b1712aSChristoph Hellwig 					ASSERT(0);
545130f712c9SDave Chinner 				ASSERT(prev.br_state == XFS_EXT_NORM);
545230f712c9SDave Chinner 				ASSERT(!isnullstartblock(prev.br_startblock));
545330f712c9SDave Chinner 				ASSERT(del.br_startblock ==
545430f712c9SDave Chinner 				       prev.br_startblock + prev.br_blockcount);
54550c4da70cSOmar Sandoval 				unwrite_start = max3(start,
54560c4da70cSOmar Sandoval 						     del.br_startoff - mod,
54570c4da70cSOmar Sandoval 						     prev.br_startoff);
54580c4da70cSOmar Sandoval 				mod = unwrite_start - prev.br_startoff;
54590c4da70cSOmar Sandoval 				prev.br_startoff = unwrite_start;
546030f712c9SDave Chinner 				prev.br_startblock += mod;
54610c4da70cSOmar Sandoval 				prev.br_blockcount -= mod;
546230f712c9SDave Chinner 				prev.br_state = XFS_EXT_UNWRITTEN;
546330f712c9SDave Chinner 				error = xfs_bmap_add_extent_unwritten_real(tp,
5464b2b1712aSChristoph Hellwig 						ip, whichfork, &icur, &cur,
546592f9da30SBrian Foster 						&prev, &logflags);
546630f712c9SDave Chinner 				if (error)
546730f712c9SDave Chinner 					goto error0;
546830f712c9SDave Chinner 				goto nodelete;
546930f712c9SDave Chinner 			} else {
547030f712c9SDave Chinner 				ASSERT(del.br_state == XFS_EXT_NORM);
547130f712c9SDave Chinner 				del.br_state = XFS_EXT_UNWRITTEN;
547230f712c9SDave Chinner 				error = xfs_bmap_add_extent_unwritten_real(tp,
5473b2b1712aSChristoph Hellwig 						ip, whichfork, &icur, &cur,
547492f9da30SBrian Foster 						&del, &logflags);
547530f712c9SDave Chinner 				if (error)
547630f712c9SDave Chinner 					goto error0;
547730f712c9SDave Chinner 				goto nodelete;
547830f712c9SDave Chinner 			}
547930f712c9SDave Chinner 		}
548030f712c9SDave Chinner 
54810703a8e1SDave Chinner delete:
5482e1d7553fSChristoph Hellwig 		if (wasdel) {
5483b2b1712aSChristoph Hellwig 			error = xfs_bmap_del_extent_delay(ip, whichfork, &icur,
5484e1d7553fSChristoph Hellwig 					&got, &del);
5485e1d7553fSChristoph Hellwig 		} else {
548681ba8f3eSBrian Foster 			error = xfs_bmap_del_extent_real(ip, tp, &icur, cur,
548781ba8f3eSBrian Foster 					&del, &tmp_logflags, whichfork,
5488e1d7553fSChristoph Hellwig 					flags);
548930f712c9SDave Chinner 			logflags |= tmp_logflags;
5490e1d7553fSChristoph Hellwig 		}
5491e1d7553fSChristoph Hellwig 
549230f712c9SDave Chinner 		if (error)
549330f712c9SDave Chinner 			goto error0;
5494b2706a05SBrian Foster 
54958280f6edSChristoph Hellwig 		end = del.br_startoff - 1;
549630f712c9SDave Chinner nodelete:
549730f712c9SDave Chinner 		/*
549830f712c9SDave Chinner 		 * If not done go on to the next (previous) record.
549930f712c9SDave Chinner 		 */
55008280f6edSChristoph Hellwig 		if (end != (xfs_fileoff_t)-1 && end >= start) {
5501b2b1712aSChristoph Hellwig 			if (!xfs_iext_get_extent(ifp, &icur, &got) ||
5502b2b1712aSChristoph Hellwig 			    (got.br_startoff > end &&
5503b2b1712aSChristoph Hellwig 			     !xfs_iext_prev_extent(ifp, &icur, &got))) {
5504b2b1712aSChristoph Hellwig 				done = true;
5505b2b1712aSChristoph Hellwig 				break;
550630f712c9SDave Chinner 			}
550730f712c9SDave Chinner 			extno++;
550830f712c9SDave Chinner 		}
550930f712c9SDave Chinner 	}
5510b2b1712aSChristoph Hellwig 	if (done || end == (xfs_fileoff_t)-1 || end < start)
55114453593bSDarrick J. Wong 		*rlen = 0;
55124453593bSDarrick J. Wong 	else
55138280f6edSChristoph Hellwig 		*rlen = end - start + 1;
551430f712c9SDave Chinner 
551530f712c9SDave Chinner 	/*
551630f712c9SDave Chinner 	 * Convert to a btree if necessary.
551730f712c9SDave Chinner 	 */
551830f712c9SDave Chinner 	if (xfs_bmap_needs_btree(ip, whichfork)) {
551930f712c9SDave Chinner 		ASSERT(cur == NULL);
5520280253d2SBrian Foster 		error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0,
5521280253d2SBrian Foster 				&tmp_logflags, whichfork);
552230f712c9SDave Chinner 		logflags |= tmp_logflags;
5523b101e334SChristoph Hellwig 	} else {
5524b101e334SChristoph Hellwig 		error = xfs_bmap_btree_to_extents(tp, ip, cur, &logflags,
552530f712c9SDave Chinner 			whichfork);
552630f712c9SDave Chinner 	}
5527b101e334SChristoph Hellwig 
552830f712c9SDave Chinner error0:
552930f712c9SDave Chinner 	/*
553030f712c9SDave Chinner 	 * Log everything.  Do this after conversion, there's no point in
553130f712c9SDave Chinner 	 * logging the extent records if we've converted to btree format.
553230f712c9SDave Chinner 	 */
553330f712c9SDave Chinner 	if ((logflags & xfs_ilog_fext(whichfork)) &&
5534f7e67b20SChristoph Hellwig 	    ifp->if_format != XFS_DINODE_FMT_EXTENTS)
553530f712c9SDave Chinner 		logflags &= ~xfs_ilog_fext(whichfork);
553630f712c9SDave Chinner 	else if ((logflags & xfs_ilog_fbroot(whichfork)) &&
5537f7e67b20SChristoph Hellwig 		 ifp->if_format != XFS_DINODE_FMT_BTREE)
553830f712c9SDave Chinner 		logflags &= ~xfs_ilog_fbroot(whichfork);
553930f712c9SDave Chinner 	/*
554030f712c9SDave Chinner 	 * Log inode even in the error case, if the transaction
554130f712c9SDave Chinner 	 * is dirty we'll need to shut down the filesystem.
554230f712c9SDave Chinner 	 */
554330f712c9SDave Chinner 	if (logflags)
554430f712c9SDave Chinner 		xfs_trans_log_inode(tp, ip, logflags);
554530f712c9SDave Chinner 	if (cur) {
5546cf612de7SBrian Foster 		if (!error)
554792219c29SDave Chinner 			cur->bc_ino.allocated = 0;
55480b04b6b8SDarrick J. Wong 		xfs_btree_del_cursor(cur, error);
554930f712c9SDave Chinner 	}
555030f712c9SDave Chinner 	return error;
555130f712c9SDave Chinner }
555230f712c9SDave Chinner 
55534453593bSDarrick J. Wong /* Unmap a range of a file. */
55544453593bSDarrick J. Wong int
xfs_bunmapi(xfs_trans_t * tp,struct xfs_inode * ip,xfs_fileoff_t bno,xfs_filblks_t len,uint32_t flags,xfs_extnum_t nexts,int * done)55554453593bSDarrick J. Wong xfs_bunmapi(
55564453593bSDarrick J. Wong 	xfs_trans_t		*tp,
55574453593bSDarrick J. Wong 	struct xfs_inode	*ip,
55584453593bSDarrick J. Wong 	xfs_fileoff_t		bno,
55594453593bSDarrick J. Wong 	xfs_filblks_t		len,
5560e7d410acSDave Chinner 	uint32_t		flags,
55614453593bSDarrick J. Wong 	xfs_extnum_t		nexts,
55624453593bSDarrick J. Wong 	int			*done)
55634453593bSDarrick J. Wong {
55644453593bSDarrick J. Wong 	int			error;
55654453593bSDarrick J. Wong 
55662af52842SBrian Foster 	error = __xfs_bunmapi(tp, ip, bno, &len, flags, nexts);
55674453593bSDarrick J. Wong 	*done = (len == 0);
55684453593bSDarrick J. Wong 	return error;
55694453593bSDarrick J. Wong }
55704453593bSDarrick J. Wong 
557130f712c9SDave Chinner /*
5572ddb19e31SBrian Foster  * Determine whether an extent shift can be accomplished by a merge with the
5573ddb19e31SBrian Foster  * extent that precedes the target hole of the shift.
5574ddb19e31SBrian Foster  */
5575ddb19e31SBrian Foster STATIC bool
xfs_bmse_can_merge(struct xfs_bmbt_irec * left,struct xfs_bmbt_irec * got,xfs_fileoff_t shift)5576ddb19e31SBrian Foster xfs_bmse_can_merge(
5577ddb19e31SBrian Foster 	struct xfs_bmbt_irec	*left,	/* preceding extent */
5578ddb19e31SBrian Foster 	struct xfs_bmbt_irec	*got,	/* current extent to shift */
5579ddb19e31SBrian Foster 	xfs_fileoff_t		shift)	/* shift fsb */
5580ddb19e31SBrian Foster {
5581ddb19e31SBrian Foster 	xfs_fileoff_t		startoff;
5582ddb19e31SBrian Foster 
5583ddb19e31SBrian Foster 	startoff = got->br_startoff - shift;
5584ddb19e31SBrian Foster 
5585ddb19e31SBrian Foster 	/*
5586ddb19e31SBrian Foster 	 * The extent, once shifted, must be adjacent in-file and on-disk with
5587ddb19e31SBrian Foster 	 * the preceding extent.
5588ddb19e31SBrian Foster 	 */
5589ddb19e31SBrian Foster 	if ((left->br_startoff + left->br_blockcount != startoff) ||
5590ddb19e31SBrian Foster 	    (left->br_startblock + left->br_blockcount != got->br_startblock) ||
5591ddb19e31SBrian Foster 	    (left->br_state != got->br_state) ||
559295f0b95eSChandan Babu R 	    (left->br_blockcount + got->br_blockcount > XFS_MAX_BMBT_EXTLEN))
5593ddb19e31SBrian Foster 		return false;
5594ddb19e31SBrian Foster 
5595ddb19e31SBrian Foster 	return true;
5596ddb19e31SBrian Foster }
5597ddb19e31SBrian Foster 
5598ddb19e31SBrian Foster /*
5599ddb19e31SBrian Foster  * A bmap extent shift adjusts the file offset of an extent to fill a preceding
5600ddb19e31SBrian Foster  * hole in the file. If an extent shift would result in the extent being fully
5601ddb19e31SBrian Foster  * adjacent to the extent that currently precedes the hole, we can merge with
5602ddb19e31SBrian Foster  * the preceding extent rather than do the shift.
5603ddb19e31SBrian Foster  *
5604ddb19e31SBrian Foster  * This function assumes the caller has verified a shift-by-merge is possible
5605ddb19e31SBrian Foster  * with the provided extents via xfs_bmse_can_merge().
5606ddb19e31SBrian Foster  */
5607ddb19e31SBrian Foster STATIC int
xfs_bmse_merge(struct xfs_trans * tp,struct xfs_inode * ip,int whichfork,xfs_fileoff_t shift,struct xfs_iext_cursor * icur,struct xfs_bmbt_irec * got,struct xfs_bmbt_irec * left,struct xfs_btree_cur * cur,int * logflags)5608ddb19e31SBrian Foster xfs_bmse_merge(
56090f37d178SBrian Foster 	struct xfs_trans		*tp,
5610ddb19e31SBrian Foster 	struct xfs_inode		*ip,
5611ddb19e31SBrian Foster 	int				whichfork,
5612ddb19e31SBrian Foster 	xfs_fileoff_t			shift,		/* shift fsb */
5613b2b1712aSChristoph Hellwig 	struct xfs_iext_cursor		*icur,
56144da6b514SChristoph Hellwig 	struct xfs_bmbt_irec		*got,		/* extent to shift */
56154da6b514SChristoph Hellwig 	struct xfs_bmbt_irec		*left,		/* preceding extent */
5616ddb19e31SBrian Foster 	struct xfs_btree_cur		*cur,
56170f37d178SBrian Foster 	int				*logflags)	/* output */
5618ddb19e31SBrian Foster {
5619732436efSDarrick J. Wong 	struct xfs_ifork		*ifp = xfs_ifork_ptr(ip, whichfork);
56204da6b514SChristoph Hellwig 	struct xfs_bmbt_irec		new;
5621ddb19e31SBrian Foster 	xfs_filblks_t			blockcount;
5622ddb19e31SBrian Foster 	int				error, i;
56235fb5aeeeSEric Sandeen 	struct xfs_mount		*mp = ip->i_mount;
5624ddb19e31SBrian Foster 
56254da6b514SChristoph Hellwig 	blockcount = left->br_blockcount + got->br_blockcount;
5626ddb19e31SBrian Foster 
5627ddb19e31SBrian Foster 	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
5628ddb19e31SBrian Foster 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
56294da6b514SChristoph Hellwig 	ASSERT(xfs_bmse_can_merge(left, got, shift));
5630ddb19e31SBrian Foster 
56314da6b514SChristoph Hellwig 	new = *left;
56324da6b514SChristoph Hellwig 	new.br_blockcount = blockcount;
5633ddb19e31SBrian Foster 
5634ddb19e31SBrian Foster 	/*
5635ddb19e31SBrian Foster 	 * Update the on-disk extent count, the btree if necessary and log the
5636ddb19e31SBrian Foster 	 * inode.
5637ddb19e31SBrian Foster 	 */
5638daf83964SChristoph Hellwig 	ifp->if_nextents--;
5639ddb19e31SBrian Foster 	*logflags |= XFS_ILOG_CORE;
5640ddb19e31SBrian Foster 	if (!cur) {
5641ddb19e31SBrian Foster 		*logflags |= XFS_ILOG_DEXT;
56424da6b514SChristoph Hellwig 		goto done;
5643ddb19e31SBrian Foster 	}
5644ddb19e31SBrian Foster 
5645ddb19e31SBrian Foster 	/* lookup and remove the extent to merge */
5646e16cf9b0SChristoph Hellwig 	error = xfs_bmbt_lookup_eq(cur, got, &i);
5647ddb19e31SBrian Foster 	if (error)
56484db431f5SDave Chinner 		return error;
5649f9e03706SDarrick J. Wong 	if (XFS_IS_CORRUPT(mp, i != 1))
5650f9e03706SDarrick J. Wong 		return -EFSCORRUPTED;
5651ddb19e31SBrian Foster 
5652ddb19e31SBrian Foster 	error = xfs_btree_delete(cur, &i);
5653ddb19e31SBrian Foster 	if (error)
56544db431f5SDave Chinner 		return error;
5655f9e03706SDarrick J. Wong 	if (XFS_IS_CORRUPT(mp, i != 1))
5656f9e03706SDarrick J. Wong 		return -EFSCORRUPTED;
5657ddb19e31SBrian Foster 
5658ddb19e31SBrian Foster 	/* lookup and update size of the previous extent */
5659e16cf9b0SChristoph Hellwig 	error = xfs_bmbt_lookup_eq(cur, left, &i);
5660ddb19e31SBrian Foster 	if (error)
56614db431f5SDave Chinner 		return error;
5662f9e03706SDarrick J. Wong 	if (XFS_IS_CORRUPT(mp, i != 1))
5663f9e03706SDarrick J. Wong 		return -EFSCORRUPTED;
5664ddb19e31SBrian Foster 
5665a67d00a5SChristoph Hellwig 	error = xfs_bmbt_update(cur, &new);
56664da6b514SChristoph Hellwig 	if (error)
56674da6b514SChristoph Hellwig 		return error;
5668ddb19e31SBrian Foster 
5669e20e174cSBrian Foster 	/* change to extent format if required after extent removal */
5670e20e174cSBrian Foster 	error = xfs_bmap_btree_to_extents(tp, ip, cur, logflags, whichfork);
5671e20e174cSBrian Foster 	if (error)
5672e20e174cSBrian Foster 		return error;
5673e20e174cSBrian Foster 
56744da6b514SChristoph Hellwig done:
5675c38ccf59SChristoph Hellwig 	xfs_iext_remove(ip, icur, 0);
5676daf83964SChristoph Hellwig 	xfs_iext_prev(ifp, icur);
5677b2b1712aSChristoph Hellwig 	xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), icur,
5678b2b1712aSChristoph Hellwig 			&new);
56794da6b514SChristoph Hellwig 
56804cc1ee5eSDarrick J. Wong 	/* update reverse mapping. rmap functions merge the rmaps for us */
5681bc46ac64SDarrick J. Wong 	xfs_rmap_unmap_extent(tp, ip, whichfork, got);
56824cc1ee5eSDarrick J. Wong 	memcpy(&new, got, sizeof(new));
56834cc1ee5eSDarrick J. Wong 	new.br_startoff = left->br_startoff + left->br_blockcount;
5684bc46ac64SDarrick J. Wong 	xfs_rmap_map_extent(tp, ip, whichfork, &new);
5685bc46ac64SDarrick J. Wong 	return 0;
5686ddb19e31SBrian Foster }
5687ddb19e31SBrian Foster 
5688bf806280SChristoph Hellwig static int
xfs_bmap_shift_update_extent(struct xfs_trans * tp,struct xfs_inode * ip,int whichfork,struct xfs_iext_cursor * icur,struct xfs_bmbt_irec * got,struct xfs_btree_cur * cur,int * logflags,xfs_fileoff_t startoff)5689bf806280SChristoph Hellwig xfs_bmap_shift_update_extent(
56900f37d178SBrian Foster 	struct xfs_trans	*tp,
5691a979bdfeSBrian Foster 	struct xfs_inode	*ip,
5692a979bdfeSBrian Foster 	int			whichfork,
5693b2b1712aSChristoph Hellwig 	struct xfs_iext_cursor	*icur,
56944da6b514SChristoph Hellwig 	struct xfs_bmbt_irec	*got,
5695a979bdfeSBrian Foster 	struct xfs_btree_cur	*cur,
5696a904b1caSNamjae Jeon 	int			*logflags,
5697bf806280SChristoph Hellwig 	xfs_fileoff_t		startoff)
5698a979bdfeSBrian Foster {
5699bf806280SChristoph Hellwig 	struct xfs_mount	*mp = ip->i_mount;
570011f75b3bSChristoph Hellwig 	struct xfs_bmbt_irec	prev = *got;
5701bf806280SChristoph Hellwig 	int			error, i;
5702a979bdfeSBrian Foster 
5703a979bdfeSBrian Foster 	*logflags |= XFS_ILOG_CORE;
5704a979bdfeSBrian Foster 
570511f75b3bSChristoph Hellwig 	got->br_startoff = startoff;
57064da6b514SChristoph Hellwig 
57074da6b514SChristoph Hellwig 	if (cur) {
570811f75b3bSChristoph Hellwig 		error = xfs_bmbt_lookup_eq(cur, &prev, &i);
5709a979bdfeSBrian Foster 		if (error)
5710a979bdfeSBrian Foster 			return error;
5711f9e03706SDarrick J. Wong 		if (XFS_IS_CORRUPT(mp, i != 1))
5712f9e03706SDarrick J. Wong 			return -EFSCORRUPTED;
5713a979bdfeSBrian Foster 
571411f75b3bSChristoph Hellwig 		error = xfs_bmbt_update(cur, got);
57159c194644SDarrick J. Wong 		if (error)
57169c194644SDarrick J. Wong 			return error;
57174da6b514SChristoph Hellwig 	} else {
57184da6b514SChristoph Hellwig 		*logflags |= XFS_ILOG_DEXT;
57194da6b514SChristoph Hellwig 	}
57209c194644SDarrick J. Wong 
5721b2b1712aSChristoph Hellwig 	xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), icur,
5722b2b1712aSChristoph Hellwig 			got);
57234da6b514SChristoph Hellwig 
57249c194644SDarrick J. Wong 	/* update reverse mapping */
5725bc46ac64SDarrick J. Wong 	xfs_rmap_unmap_extent(tp, ip, whichfork, &prev);
5726bc46ac64SDarrick J. Wong 	xfs_rmap_map_extent(tp, ip, whichfork, got);
5727bc46ac64SDarrick J. Wong 	return 0;
5728a979bdfeSBrian Foster }
5729a979bdfeSBrian Foster 
573030f712c9SDave Chinner int
xfs_bmap_collapse_extents(struct xfs_trans * tp,struct xfs_inode * ip,xfs_fileoff_t * next_fsb,xfs_fileoff_t offset_shift_fsb,bool * done)5731ecfea3f0SChristoph Hellwig xfs_bmap_collapse_extents(
573230f712c9SDave Chinner 	struct xfs_trans	*tp,
573330f712c9SDave Chinner 	struct xfs_inode	*ip,
5734a904b1caSNamjae Jeon 	xfs_fileoff_t		*next_fsb,
573530f712c9SDave Chinner 	xfs_fileoff_t		offset_shift_fsb,
5736333f950cSBrian Foster 	bool			*done)
573730f712c9SDave Chinner {
5738ecfea3f0SChristoph Hellwig 	int			whichfork = XFS_DATA_FORK;
5739ecfea3f0SChristoph Hellwig 	struct xfs_mount	*mp = ip->i_mount;
5740732436efSDarrick J. Wong 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
5741ca446d88SBrian Foster 	struct xfs_btree_cur	*cur = NULL;
5742bf806280SChristoph Hellwig 	struct xfs_bmbt_irec	got, prev;
5743b2b1712aSChristoph Hellwig 	struct xfs_iext_cursor	icur;
5744bf806280SChristoph Hellwig 	xfs_fileoff_t		new_startoff;
574530f712c9SDave Chinner 	int			error = 0;
5746ca446d88SBrian Foster 	int			logflags = 0;
574730f712c9SDave Chinner 
5748f7e67b20SChristoph Hellwig 	if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
5749a71895c5SDarrick J. Wong 	    XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
57502451337dSDave Chinner 		return -EFSCORRUPTED;
575130f712c9SDave Chinner 	}
575230f712c9SDave Chinner 
575375c8c50fSDave Chinner 	if (xfs_is_shutdown(mp))
57542451337dSDave Chinner 		return -EIO;
575530f712c9SDave Chinner 
5756ecfea3f0SChristoph Hellwig 	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL));
575730f712c9SDave Chinner 
575830f712c9SDave Chinner 	error = xfs_iread_extents(tp, ip, whichfork);
575930f712c9SDave Chinner 	if (error)
576030f712c9SDave Chinner 		return error;
576130f712c9SDave Chinner 
5762ac1e0672SChristoph Hellwig 	if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
5763ddb19e31SBrian Foster 		cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
576492219c29SDave Chinner 		cur->bc_ino.flags = 0;
5765ddb19e31SBrian Foster 	}
5766ddb19e31SBrian Foster 
5767b2b1712aSChristoph Hellwig 	if (!xfs_iext_lookup_extent(ip, ifp, *next_fsb, &icur, &got)) {
5768ecfea3f0SChristoph Hellwig 		*done = true;
5769ecfea3f0SChristoph Hellwig 		goto del_cursor;
5770ecfea3f0SChristoph Hellwig 	}
5771f9e03706SDarrick J. Wong 	if (XFS_IS_CORRUPT(mp, isnullstartblock(got.br_startblock))) {
5772f9e03706SDarrick J. Wong 		error = -EFSCORRUPTED;
5773f9e03706SDarrick J. Wong 		goto del_cursor;
5774f9e03706SDarrick J. Wong 	}
5775ecfea3f0SChristoph Hellwig 
5776bf806280SChristoph Hellwig 	new_startoff = got.br_startoff - offset_shift_fsb;
5777b2b1712aSChristoph Hellwig 	if (xfs_iext_peek_prev_extent(ifp, &icur, &prev)) {
5778bf806280SChristoph Hellwig 		if (new_startoff < prev.br_startoff + prev.br_blockcount) {
5779bf806280SChristoph Hellwig 			error = -EINVAL;
5780bf806280SChristoph Hellwig 			goto del_cursor;
5781bf806280SChristoph Hellwig 		}
5782bf806280SChristoph Hellwig 
5783bf806280SChristoph Hellwig 		if (xfs_bmse_can_merge(&prev, &got, offset_shift_fsb)) {
57840f37d178SBrian Foster 			error = xfs_bmse_merge(tp, ip, whichfork,
57850f37d178SBrian Foster 					offset_shift_fsb, &icur, &got, &prev,
57860f37d178SBrian Foster 					cur, &logflags);
5787ecfea3f0SChristoph Hellwig 			if (error)
5788ecfea3f0SChristoph Hellwig 				goto del_cursor;
5789bf806280SChristoph Hellwig 			goto done;
5790bf806280SChristoph Hellwig 		}
5791bf806280SChristoph Hellwig 	} else {
5792bf806280SChristoph Hellwig 		if (got.br_startoff < offset_shift_fsb) {
5793bf806280SChristoph Hellwig 			error = -EINVAL;
5794bf806280SChristoph Hellwig 			goto del_cursor;
5795bf806280SChristoph Hellwig 		}
5796bf806280SChristoph Hellwig 	}
5797bf806280SChristoph Hellwig 
57980f37d178SBrian Foster 	error = xfs_bmap_shift_update_extent(tp, ip, whichfork, &icur, &got,
57990f37d178SBrian Foster 			cur, &logflags, new_startoff);
5800bf806280SChristoph Hellwig 	if (error)
5801bf806280SChristoph Hellwig 		goto del_cursor;
580240591bdbSChristoph Hellwig 
580342630361SChristoph Hellwig done:
5804b2b1712aSChristoph Hellwig 	if (!xfs_iext_next_extent(ifp, &icur, &got)) {
5805ecfea3f0SChristoph Hellwig 		*done = true;
5806ecfea3f0SChristoph Hellwig 		goto del_cursor;
5807ecfea3f0SChristoph Hellwig 	}
5808ecfea3f0SChristoph Hellwig 
5809ecfea3f0SChristoph Hellwig 	*next_fsb = got.br_startoff;
5810ecfea3f0SChristoph Hellwig del_cursor:
5811ecfea3f0SChristoph Hellwig 	if (cur)
58120b04b6b8SDarrick J. Wong 		xfs_btree_del_cursor(cur, error);
5813ecfea3f0SChristoph Hellwig 	if (logflags)
5814ecfea3f0SChristoph Hellwig 		xfs_trans_log_inode(tp, ip, logflags);
5815ecfea3f0SChristoph Hellwig 	return error;
5816ecfea3f0SChristoph Hellwig }
5817ecfea3f0SChristoph Hellwig 
5818f62cb48eSDarrick J. Wong /* Make sure we won't be right-shifting an extent past the maximum bound. */
5819f62cb48eSDarrick J. Wong int
xfs_bmap_can_insert_extents(struct xfs_inode * ip,xfs_fileoff_t off,xfs_fileoff_t shift)5820f62cb48eSDarrick J. Wong xfs_bmap_can_insert_extents(
5821f62cb48eSDarrick J. Wong 	struct xfs_inode	*ip,
5822f62cb48eSDarrick J. Wong 	xfs_fileoff_t		off,
5823f62cb48eSDarrick J. Wong 	xfs_fileoff_t		shift)
5824f62cb48eSDarrick J. Wong {
5825f62cb48eSDarrick J. Wong 	struct xfs_bmbt_irec	got;
5826f62cb48eSDarrick J. Wong 	int			is_empty;
5827f62cb48eSDarrick J. Wong 	int			error = 0;
5828f62cb48eSDarrick J. Wong 
5829f62cb48eSDarrick J. Wong 	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
5830f62cb48eSDarrick J. Wong 
583175c8c50fSDave Chinner 	if (xfs_is_shutdown(ip->i_mount))
5832f62cb48eSDarrick J. Wong 		return -EIO;
5833f62cb48eSDarrick J. Wong 
5834f62cb48eSDarrick J. Wong 	xfs_ilock(ip, XFS_ILOCK_EXCL);
5835f62cb48eSDarrick J. Wong 	error = xfs_bmap_last_extent(NULL, ip, XFS_DATA_FORK, &got, &is_empty);
5836f62cb48eSDarrick J. Wong 	if (!error && !is_empty && got.br_startoff >= off &&
5837f62cb48eSDarrick J. Wong 	    ((got.br_startoff + shift) & BMBT_STARTOFF_MASK) < got.br_startoff)
5838f62cb48eSDarrick J. Wong 		error = -EINVAL;
5839f62cb48eSDarrick J. Wong 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
5840f62cb48eSDarrick J. Wong 
5841f62cb48eSDarrick J. Wong 	return error;
5842f62cb48eSDarrick J. Wong }
5843f62cb48eSDarrick J. Wong 
5844ecfea3f0SChristoph Hellwig int
xfs_bmap_insert_extents(struct xfs_trans * tp,struct xfs_inode * ip,xfs_fileoff_t * next_fsb,xfs_fileoff_t offset_shift_fsb,bool * done,xfs_fileoff_t stop_fsb)5845ecfea3f0SChristoph Hellwig xfs_bmap_insert_extents(
5846ecfea3f0SChristoph Hellwig 	struct xfs_trans	*tp,
5847ecfea3f0SChristoph Hellwig 	struct xfs_inode	*ip,
5848ecfea3f0SChristoph Hellwig 	xfs_fileoff_t		*next_fsb,
5849ecfea3f0SChristoph Hellwig 	xfs_fileoff_t		offset_shift_fsb,
5850ecfea3f0SChristoph Hellwig 	bool			*done,
5851333f950cSBrian Foster 	xfs_fileoff_t		stop_fsb)
5852ecfea3f0SChristoph Hellwig {
5853ecfea3f0SChristoph Hellwig 	int			whichfork = XFS_DATA_FORK;
5854ecfea3f0SChristoph Hellwig 	struct xfs_mount	*mp = ip->i_mount;
5855732436efSDarrick J. Wong 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
5856ecfea3f0SChristoph Hellwig 	struct xfs_btree_cur	*cur = NULL;
58575936dc54SChristoph Hellwig 	struct xfs_bmbt_irec	got, next;
5858b2b1712aSChristoph Hellwig 	struct xfs_iext_cursor	icur;
5859bf806280SChristoph Hellwig 	xfs_fileoff_t		new_startoff;
5860ecfea3f0SChristoph Hellwig 	int			error = 0;
5861ecfea3f0SChristoph Hellwig 	int			logflags = 0;
5862ecfea3f0SChristoph Hellwig 
5863f7e67b20SChristoph Hellwig 	if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
5864a71895c5SDarrick J. Wong 	    XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
5865ecfea3f0SChristoph Hellwig 		return -EFSCORRUPTED;
5866ecfea3f0SChristoph Hellwig 	}
5867ecfea3f0SChristoph Hellwig 
586875c8c50fSDave Chinner 	if (xfs_is_shutdown(mp))
5869ecfea3f0SChristoph Hellwig 		return -EIO;
5870ecfea3f0SChristoph Hellwig 
5871ecfea3f0SChristoph Hellwig 	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL));
5872ecfea3f0SChristoph Hellwig 
5873ecfea3f0SChristoph Hellwig 	error = xfs_iread_extents(tp, ip, whichfork);
5874ecfea3f0SChristoph Hellwig 	if (error)
5875ecfea3f0SChristoph Hellwig 		return error;
5876ecfea3f0SChristoph Hellwig 
5877ac1e0672SChristoph Hellwig 	if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
5878ecfea3f0SChristoph Hellwig 		cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
587992219c29SDave Chinner 		cur->bc_ino.flags = 0;
5880ecfea3f0SChristoph Hellwig 	}
5881ecfea3f0SChristoph Hellwig 
5882a904b1caSNamjae Jeon 	if (*next_fsb == NULLFSBLOCK) {
5883b2b1712aSChristoph Hellwig 		xfs_iext_last(ifp, &icur);
5884b2b1712aSChristoph Hellwig 		if (!xfs_iext_get_extent(ifp, &icur, &got) ||
58855936dc54SChristoph Hellwig 		    stop_fsb > got.br_startoff) {
5886ecfea3f0SChristoph Hellwig 			*done = true;
5887a904b1caSNamjae Jeon 			goto del_cursor;
5888a904b1caSNamjae Jeon 		}
588905b7c8abSChristoph Hellwig 	} else {
5890b2b1712aSChristoph Hellwig 		if (!xfs_iext_lookup_extent(ip, ifp, *next_fsb, &icur, &got)) {
5891ecfea3f0SChristoph Hellwig 			*done = true;
5892a904b1caSNamjae Jeon 			goto del_cursor;
5893a904b1caSNamjae Jeon 		}
589405b7c8abSChristoph Hellwig 	}
5895f9e03706SDarrick J. Wong 	if (XFS_IS_CORRUPT(mp, isnullstartblock(got.br_startblock))) {
5896f9e03706SDarrick J. Wong 		error = -EFSCORRUPTED;
5897f9e03706SDarrick J. Wong 		goto del_cursor;
5898f9e03706SDarrick J. Wong 	}
5899a904b1caSNamjae Jeon 
5900d0c22041SBrian Foster 	if (XFS_IS_CORRUPT(mp, stop_fsb > got.br_startoff)) {
5901c2414ad6SDarrick J. Wong 		error = -EFSCORRUPTED;
5902a904b1caSNamjae Jeon 		goto del_cursor;
5903a904b1caSNamjae Jeon 	}
5904a904b1caSNamjae Jeon 
5905bf806280SChristoph Hellwig 	new_startoff = got.br_startoff + offset_shift_fsb;
5906b2b1712aSChristoph Hellwig 	if (xfs_iext_peek_next_extent(ifp, &icur, &next)) {
5907bf806280SChristoph Hellwig 		if (new_startoff + got.br_blockcount > next.br_startoff) {
5908bf806280SChristoph Hellwig 			error = -EINVAL;
5909bf806280SChristoph Hellwig 			goto del_cursor;
5910bf806280SChristoph Hellwig 		}
5911bf806280SChristoph Hellwig 
5912bf806280SChristoph Hellwig 		/*
5913bf806280SChristoph Hellwig 		 * Unlike a left shift (which involves a hole punch), a right
5914bf806280SChristoph Hellwig 		 * shift does not modify extent neighbors in any way.  We should
5915bf806280SChristoph Hellwig 		 * never find mergeable extents in this scenario.  Check anyways
5916bf806280SChristoph Hellwig 		 * and warn if we encounter two extents that could be one.
5917bf806280SChristoph Hellwig 		 */
5918bf806280SChristoph Hellwig 		if (xfs_bmse_can_merge(&got, &next, offset_shift_fsb))
5919bf806280SChristoph Hellwig 			WARN_ON_ONCE(1);
5920bf806280SChristoph Hellwig 	}
5921bf806280SChristoph Hellwig 
59220f37d178SBrian Foster 	error = xfs_bmap_shift_update_extent(tp, ip, whichfork, &icur, &got,
59230f37d178SBrian Foster 			cur, &logflags, new_startoff);
5924ddb19e31SBrian Foster 	if (error)
5925ddb19e31SBrian Foster 		goto del_cursor;
59265936dc54SChristoph Hellwig 
5927b2b1712aSChristoph Hellwig 	if (!xfs_iext_prev_extent(ifp, &icur, &got) ||
59285936dc54SChristoph Hellwig 	    stop_fsb >= got.br_startoff + got.br_blockcount) {
5929ecfea3f0SChristoph Hellwig 		*done = true;
59306b18af0dSChristoph Hellwig 		goto del_cursor;
5931a904b1caSNamjae Jeon 	}
593230f712c9SDave Chinner 
59332c845f5aSBrian Foster 	*next_fsb = got.br_startoff;
593430f712c9SDave Chinner del_cursor:
593530f712c9SDave Chinner 	if (cur)
59360b04b6b8SDarrick J. Wong 		xfs_btree_del_cursor(cur, error);
5937ca446d88SBrian Foster 	if (logflags)
593830f712c9SDave Chinner 		xfs_trans_log_inode(tp, ip, logflags);
593930f712c9SDave Chinner 	return error;
594030f712c9SDave Chinner }
5941a904b1caSNamjae Jeon 
5942a904b1caSNamjae Jeon /*
5943b2b1712aSChristoph Hellwig  * Splits an extent into two extents at split_fsb block such that it is the
5944b2b1712aSChristoph Hellwig  * first block of the current_ext. @ext is a target extent to be split.
5945b2b1712aSChristoph Hellwig  * @split_fsb is a block where the extents is split.  If split_fsb lies in a
5946b2b1712aSChristoph Hellwig  * hole or the first block of extents, just return 0.
5947a904b1caSNamjae Jeon  */
5948b73df17eSBrian Foster int
xfs_bmap_split_extent(struct xfs_trans * tp,struct xfs_inode * ip,xfs_fileoff_t split_fsb)5949b73df17eSBrian Foster xfs_bmap_split_extent(
5950a904b1caSNamjae Jeon 	struct xfs_trans	*tp,
5951a904b1caSNamjae Jeon 	struct xfs_inode	*ip,
59524b77a088SBrian Foster 	xfs_fileoff_t		split_fsb)
5953a904b1caSNamjae Jeon {
5954a904b1caSNamjae Jeon 	int				whichfork = XFS_DATA_FORK;
5955732436efSDarrick J. Wong 	struct xfs_ifork		*ifp = xfs_ifork_ptr(ip, whichfork);
5956a904b1caSNamjae Jeon 	struct xfs_btree_cur		*cur = NULL;
5957a904b1caSNamjae Jeon 	struct xfs_bmbt_irec		got;
5958a904b1caSNamjae Jeon 	struct xfs_bmbt_irec		new; /* split extent */
5959a904b1caSNamjae Jeon 	struct xfs_mount		*mp = ip->i_mount;
5960a904b1caSNamjae Jeon 	xfs_fsblock_t			gotblkcnt; /* new block count for got */
5961b2b1712aSChristoph Hellwig 	struct xfs_iext_cursor		icur;
5962a904b1caSNamjae Jeon 	int				error = 0;
5963a904b1caSNamjae Jeon 	int				logflags = 0;
5964a904b1caSNamjae Jeon 	int				i = 0;
5965a904b1caSNamjae Jeon 
5966f7e67b20SChristoph Hellwig 	if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
5967a71895c5SDarrick J. Wong 	    XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
5968a904b1caSNamjae Jeon 		return -EFSCORRUPTED;
5969a904b1caSNamjae Jeon 	}
5970a904b1caSNamjae Jeon 
597175c8c50fSDave Chinner 	if (xfs_is_shutdown(mp))
5972a904b1caSNamjae Jeon 		return -EIO;
5973a904b1caSNamjae Jeon 
5974a904b1caSNamjae Jeon 	/* Read in all the extents */
5975a904b1caSNamjae Jeon 	error = xfs_iread_extents(tp, ip, whichfork);
5976a904b1caSNamjae Jeon 	if (error)
5977a904b1caSNamjae Jeon 		return error;
5978a904b1caSNamjae Jeon 
5979a904b1caSNamjae Jeon 	/*
59804c35445bSChristoph Hellwig 	 * If there are not extents, or split_fsb lies in a hole we are done.
5981a904b1caSNamjae Jeon 	 */
5982b2b1712aSChristoph Hellwig 	if (!xfs_iext_lookup_extent(ip, ifp, split_fsb, &icur, &got) ||
59834c35445bSChristoph Hellwig 	    got.br_startoff >= split_fsb)
5984a904b1caSNamjae Jeon 		return 0;
5985a904b1caSNamjae Jeon 
5986a904b1caSNamjae Jeon 	gotblkcnt = split_fsb - got.br_startoff;
5987a904b1caSNamjae Jeon 	new.br_startoff = split_fsb;
5988a904b1caSNamjae Jeon 	new.br_startblock = got.br_startblock + gotblkcnt;
5989a904b1caSNamjae Jeon 	new.br_blockcount = got.br_blockcount - gotblkcnt;
5990a904b1caSNamjae Jeon 	new.br_state = got.br_state;
5991a904b1caSNamjae Jeon 
5992ac1e0672SChristoph Hellwig 	if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
5993a904b1caSNamjae Jeon 		cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
599492219c29SDave Chinner 		cur->bc_ino.flags = 0;
5995e16cf9b0SChristoph Hellwig 		error = xfs_bmbt_lookup_eq(cur, &got, &i);
5996a904b1caSNamjae Jeon 		if (error)
5997a904b1caSNamjae Jeon 			goto del_cursor;
5998f9e03706SDarrick J. Wong 		if (XFS_IS_CORRUPT(mp, i != 1)) {
5999f9e03706SDarrick J. Wong 			error = -EFSCORRUPTED;
6000f9e03706SDarrick J. Wong 			goto del_cursor;
6001f9e03706SDarrick J. Wong 		}
6002a904b1caSNamjae Jeon 	}
6003a904b1caSNamjae Jeon 
6004a904b1caSNamjae Jeon 	got.br_blockcount = gotblkcnt;
6005b2b1712aSChristoph Hellwig 	xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), &icur,
6006b2b1712aSChristoph Hellwig 			&got);
6007a904b1caSNamjae Jeon 
6008a904b1caSNamjae Jeon 	logflags = XFS_ILOG_CORE;
6009a904b1caSNamjae Jeon 	if (cur) {
6010a67d00a5SChristoph Hellwig 		error = xfs_bmbt_update(cur, &got);
6011a904b1caSNamjae Jeon 		if (error)
6012a904b1caSNamjae Jeon 			goto del_cursor;
6013a904b1caSNamjae Jeon 	} else
6014a904b1caSNamjae Jeon 		logflags |= XFS_ILOG_DEXT;
6015a904b1caSNamjae Jeon 
6016a904b1caSNamjae Jeon 	/* Add new extent */
6017b2b1712aSChristoph Hellwig 	xfs_iext_next(ifp, &icur);
60180254c2f2SChristoph Hellwig 	xfs_iext_insert(ip, &icur, &new, 0);
6019daf83964SChristoph Hellwig 	ifp->if_nextents++;
6020a904b1caSNamjae Jeon 
6021a904b1caSNamjae Jeon 	if (cur) {
6022e16cf9b0SChristoph Hellwig 		error = xfs_bmbt_lookup_eq(cur, &new, &i);
6023a904b1caSNamjae Jeon 		if (error)
6024a904b1caSNamjae Jeon 			goto del_cursor;
6025f9e03706SDarrick J. Wong 		if (XFS_IS_CORRUPT(mp, i != 0)) {
6026f9e03706SDarrick J. Wong 			error = -EFSCORRUPTED;
6027f9e03706SDarrick J. Wong 			goto del_cursor;
6028f9e03706SDarrick J. Wong 		}
6029a904b1caSNamjae Jeon 		error = xfs_btree_insert(cur, &i);
6030a904b1caSNamjae Jeon 		if (error)
6031a904b1caSNamjae Jeon 			goto del_cursor;
6032f9e03706SDarrick J. Wong 		if (XFS_IS_CORRUPT(mp, i != 1)) {
6033f9e03706SDarrick J. Wong 			error = -EFSCORRUPTED;
6034f9e03706SDarrick J. Wong 			goto del_cursor;
6035f9e03706SDarrick J. Wong 		}
6036a904b1caSNamjae Jeon 	}
6037a904b1caSNamjae Jeon 
6038a904b1caSNamjae Jeon 	/*
6039a904b1caSNamjae Jeon 	 * Convert to a btree if necessary.
6040a904b1caSNamjae Jeon 	 */
6041a904b1caSNamjae Jeon 	if (xfs_bmap_needs_btree(ip, whichfork)) {
6042a904b1caSNamjae Jeon 		int tmp_logflags; /* partial log flag return val */
6043a904b1caSNamjae Jeon 
6044a904b1caSNamjae Jeon 		ASSERT(cur == NULL);
6045280253d2SBrian Foster 		error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0,
6046280253d2SBrian Foster 				&tmp_logflags, whichfork);
6047a904b1caSNamjae Jeon 		logflags |= tmp_logflags;
6048a904b1caSNamjae Jeon 	}
6049a904b1caSNamjae Jeon 
6050a904b1caSNamjae Jeon del_cursor:
6051a904b1caSNamjae Jeon 	if (cur) {
605292219c29SDave Chinner 		cur->bc_ino.allocated = 0;
60530b04b6b8SDarrick J. Wong 		xfs_btree_del_cursor(cur, error);
6054a904b1caSNamjae Jeon 	}
6055a904b1caSNamjae Jeon 
6056a904b1caSNamjae Jeon 	if (logflags)
6057a904b1caSNamjae Jeon 		xfs_trans_log_inode(tp, ip, logflags);
6058a904b1caSNamjae Jeon 	return error;
6059a904b1caSNamjae Jeon }
6060a904b1caSNamjae Jeon 
60619f3afb57SDarrick J. Wong /* Deferred mapping is only for real extents in the data fork. */
60629f3afb57SDarrick J. Wong static bool
xfs_bmap_is_update_needed(struct xfs_bmbt_irec * bmap)60639f3afb57SDarrick J. Wong xfs_bmap_is_update_needed(
60649f3afb57SDarrick J. Wong 	struct xfs_bmbt_irec	*bmap)
60659f3afb57SDarrick J. Wong {
60669f3afb57SDarrick J. Wong 	return  bmap->br_startblock != HOLESTARTBLOCK &&
60679f3afb57SDarrick J. Wong 		bmap->br_startblock != DELAYSTARTBLOCK;
60689f3afb57SDarrick J. Wong }
60699f3afb57SDarrick J. Wong 
60709f3afb57SDarrick J. Wong /* Record a bmap intent. */
60719f3afb57SDarrick J. Wong static int
__xfs_bmap_add(struct xfs_trans * tp,enum xfs_bmap_intent_type type,struct xfs_inode * ip,int whichfork,struct xfs_bmbt_irec * bmap)60729f3afb57SDarrick J. Wong __xfs_bmap_add(
60730f37d178SBrian Foster 	struct xfs_trans		*tp,
60749f3afb57SDarrick J. Wong 	enum xfs_bmap_intent_type	type,
60759f3afb57SDarrick J. Wong 	struct xfs_inode		*ip,
60769f3afb57SDarrick J. Wong 	int				whichfork,
60779f3afb57SDarrick J. Wong 	struct xfs_bmbt_irec		*bmap)
60789f3afb57SDarrick J. Wong {
60799f3afb57SDarrick J. Wong 	struct xfs_bmap_intent		*bi;
60809f3afb57SDarrick J. Wong 
60810f37d178SBrian Foster 	trace_xfs_bmap_defer(tp->t_mountp,
60820f37d178SBrian Foster 			XFS_FSB_TO_AGNO(tp->t_mountp, bmap->br_startblock),
60839f3afb57SDarrick J. Wong 			type,
60840f37d178SBrian Foster 			XFS_FSB_TO_AGBNO(tp->t_mountp, bmap->br_startblock),
60859f3afb57SDarrick J. Wong 			ip->i_ino, whichfork,
60869f3afb57SDarrick J. Wong 			bmap->br_startoff,
60879f3afb57SDarrick J. Wong 			bmap->br_blockcount,
60889f3afb57SDarrick J. Wong 			bmap->br_state);
60899f3afb57SDarrick J. Wong 
6090f3c799c2SDarrick J. Wong 	bi = kmem_cache_alloc(xfs_bmap_intent_cache, GFP_NOFS | __GFP_NOFAIL);
60919f3afb57SDarrick J. Wong 	INIT_LIST_HEAD(&bi->bi_list);
60929f3afb57SDarrick J. Wong 	bi->bi_type = type;
60939f3afb57SDarrick J. Wong 	bi->bi_owner = ip;
60949f3afb57SDarrick J. Wong 	bi->bi_whichfork = whichfork;
60959f3afb57SDarrick J. Wong 	bi->bi_bmap = *bmap;
60969f3afb57SDarrick J. Wong 
6097774a99b4SDarrick J. Wong 	xfs_bmap_update_get_group(tp->t_mountp, bi);
60980f37d178SBrian Foster 	xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_BMAP, &bi->bi_list);
60999f3afb57SDarrick J. Wong 	return 0;
61009f3afb57SDarrick J. Wong }
61019f3afb57SDarrick J. Wong 
61029f3afb57SDarrick J. Wong /* Map an extent into a file. */
61033e08f42aSDarrick J. Wong void
xfs_bmap_map_extent(struct xfs_trans * tp,struct xfs_inode * ip,struct xfs_bmbt_irec * PREV)61049f3afb57SDarrick J. Wong xfs_bmap_map_extent(
61050f37d178SBrian Foster 	struct xfs_trans	*tp,
61069f3afb57SDarrick J. Wong 	struct xfs_inode	*ip,
61079f3afb57SDarrick J. Wong 	struct xfs_bmbt_irec	*PREV)
61089f3afb57SDarrick J. Wong {
61099f3afb57SDarrick J. Wong 	if (!xfs_bmap_is_update_needed(PREV))
61103e08f42aSDarrick J. Wong 		return;
61119f3afb57SDarrick J. Wong 
61123e08f42aSDarrick J. Wong 	__xfs_bmap_add(tp, XFS_BMAP_MAP, ip, XFS_DATA_FORK, PREV);
61139f3afb57SDarrick J. Wong }
61149f3afb57SDarrick J. Wong 
61159f3afb57SDarrick J. Wong /* Unmap an extent out of a file. */
61163e08f42aSDarrick J. Wong void
xfs_bmap_unmap_extent(struct xfs_trans * tp,struct xfs_inode * ip,struct xfs_bmbt_irec * PREV)61179f3afb57SDarrick J. Wong xfs_bmap_unmap_extent(
61180f37d178SBrian Foster 	struct xfs_trans	*tp,
61199f3afb57SDarrick J. Wong 	struct xfs_inode	*ip,
61209f3afb57SDarrick J. Wong 	struct xfs_bmbt_irec	*PREV)
61219f3afb57SDarrick J. Wong {
61229f3afb57SDarrick J. Wong 	if (!xfs_bmap_is_update_needed(PREV))
61233e08f42aSDarrick J. Wong 		return;
61249f3afb57SDarrick J. Wong 
61253e08f42aSDarrick J. Wong 	__xfs_bmap_add(tp, XFS_BMAP_UNMAP, ip, XFS_DATA_FORK, PREV);
61269f3afb57SDarrick J. Wong }
61279f3afb57SDarrick J. Wong 
61289f3afb57SDarrick J. Wong /*
61299f3afb57SDarrick J. Wong  * Process one of the deferred bmap operations.  We pass back the
61309f3afb57SDarrick J. Wong  * btree cursor to maintain our lock on the bmapbt between calls.
61319f3afb57SDarrick J. Wong  */
61329f3afb57SDarrick J. Wong int
xfs_bmap_finish_one(struct xfs_trans * tp,struct xfs_bmap_intent * bi)61339f3afb57SDarrick J. Wong xfs_bmap_finish_one(
61349f3afb57SDarrick J. Wong 	struct xfs_trans		*tp,
6135ddccb81bSDarrick J. Wong 	struct xfs_bmap_intent		*bi)
61369f3afb57SDarrick J. Wong {
6137ddccb81bSDarrick J. Wong 	struct xfs_bmbt_irec		*bmap = &bi->bi_bmap;
6138e1a4e37cSDarrick J. Wong 	int				error = 0;
61399f3afb57SDarrick J. Wong 
6140692b6cddSDave Chinner 	ASSERT(tp->t_highest_agno == NULLAGNUMBER);
61414c1a67bdSDarrick J. Wong 
61429f3afb57SDarrick J. Wong 	trace_xfs_bmap_deferred(tp->t_mountp,
6143ddccb81bSDarrick J. Wong 			XFS_FSB_TO_AGNO(tp->t_mountp, bmap->br_startblock),
6144ddccb81bSDarrick J. Wong 			bi->bi_type,
6145ddccb81bSDarrick J. Wong 			XFS_FSB_TO_AGBNO(tp->t_mountp, bmap->br_startblock),
6146ddccb81bSDarrick J. Wong 			bi->bi_owner->i_ino, bi->bi_whichfork,
6147ddccb81bSDarrick J. Wong 			bmap->br_startoff, bmap->br_blockcount,
6148ddccb81bSDarrick J. Wong 			bmap->br_state);
61499f3afb57SDarrick J. Wong 
6150ddccb81bSDarrick J. Wong 	if (WARN_ON_ONCE(bi->bi_whichfork != XFS_DATA_FORK))
61519f3afb57SDarrick J. Wong 		return -EFSCORRUPTED;
61529f3afb57SDarrick J. Wong 
61539f3afb57SDarrick J. Wong 	if (XFS_TEST_ERROR(false, tp->t_mountp,
61549e24cfd0SDarrick J. Wong 			XFS_ERRTAG_BMAP_FINISH_ONE))
61559f3afb57SDarrick J. Wong 		return -EIO;
61569f3afb57SDarrick J. Wong 
6157ddccb81bSDarrick J. Wong 	switch (bi->bi_type) {
61589f3afb57SDarrick J. Wong 	case XFS_BMAP_MAP:
6159ddccb81bSDarrick J. Wong 		error = xfs_bmapi_remap(tp, bi->bi_owner, bmap->br_startoff,
6160ddccb81bSDarrick J. Wong 				bmap->br_blockcount, bmap->br_startblock, 0);
6161ddccb81bSDarrick J. Wong 		bmap->br_blockcount = 0;
61629f3afb57SDarrick J. Wong 		break;
61639f3afb57SDarrick J. Wong 	case XFS_BMAP_UNMAP:
6164ddccb81bSDarrick J. Wong 		error = __xfs_bunmapi(tp, bi->bi_owner, bmap->br_startoff,
6165ddccb81bSDarrick J. Wong 				&bmap->br_blockcount, XFS_BMAPI_REMAP, 1);
61669f3afb57SDarrick J. Wong 		break;
61679f3afb57SDarrick J. Wong 	default:
61689f3afb57SDarrick J. Wong 		ASSERT(0);
61699f3afb57SDarrick J. Wong 		error = -EFSCORRUPTED;
61709f3afb57SDarrick J. Wong 	}
61719f3afb57SDarrick J. Wong 
61729f3afb57SDarrick J. Wong 	return error;
61739f3afb57SDarrick J. Wong }
617430b0984dSDarrick J. Wong 
617530b0984dSDarrick J. Wong /* Check that an inode's extent does not have invalid flags or bad ranges. */
617630b0984dSDarrick J. Wong xfs_failaddr_t
xfs_bmap_validate_extent(struct xfs_inode * ip,int whichfork,struct xfs_bmbt_irec * irec)617730b0984dSDarrick J. Wong xfs_bmap_validate_extent(
617830b0984dSDarrick J. Wong 	struct xfs_inode	*ip,
617930b0984dSDarrick J. Wong 	int			whichfork,
618030b0984dSDarrick J. Wong 	struct xfs_bmbt_irec	*irec)
618130b0984dSDarrick J. Wong {
618230b0984dSDarrick J. Wong 	struct xfs_mount	*mp = ip->i_mount;
618330b0984dSDarrick J. Wong 
618433005fd0SDarrick J. Wong 	if (!xfs_verify_fileext(mp, irec->br_startoff, irec->br_blockcount))
6185acf104c2SDarrick J. Wong 		return __this_address;
6186acf104c2SDarrick J. Wong 
618718695ad4SDarrick J. Wong 	if (XFS_IS_REALTIME_INODE(ip) && whichfork == XFS_DATA_FORK) {
618818695ad4SDarrick J. Wong 		if (!xfs_verify_rtext(mp, irec->br_startblock,
618918695ad4SDarrick J. Wong 					  irec->br_blockcount))
619030b0984dSDarrick J. Wong 			return __this_address;
619130b0984dSDarrick J. Wong 	} else {
619267457eb0SDarrick J. Wong 		if (!xfs_verify_fsbext(mp, irec->br_startblock,
619367457eb0SDarrick J. Wong 					   irec->br_blockcount))
619430b0984dSDarrick J. Wong 			return __this_address;
619530b0984dSDarrick J. Wong 	}
6196daa79baeSChristoph Hellwig 	if (irec->br_state != XFS_EXT_NORM && whichfork != XFS_DATA_FORK)
619730b0984dSDarrick J. Wong 		return __this_address;
619830b0984dSDarrick J. Wong 	return NULL;
619930b0984dSDarrick J. Wong }
6200f3c799c2SDarrick J. Wong 
6201f3c799c2SDarrick J. Wong int __init
xfs_bmap_intent_init_cache(void)6202f3c799c2SDarrick J. Wong xfs_bmap_intent_init_cache(void)
6203f3c799c2SDarrick J. Wong {
6204f3c799c2SDarrick J. Wong 	xfs_bmap_intent_cache = kmem_cache_create("xfs_bmap_intent",
6205f3c799c2SDarrick J. Wong 			sizeof(struct xfs_bmap_intent),
6206f3c799c2SDarrick J. Wong 			0, 0, NULL);
6207f3c799c2SDarrick J. Wong 
6208f3c799c2SDarrick J. Wong 	return xfs_bmap_intent_cache != NULL ? 0 : -ENOMEM;
6209f3c799c2SDarrick J. Wong }
6210f3c799c2SDarrick J. Wong 
6211f3c799c2SDarrick J. Wong void
xfs_bmap_intent_destroy_cache(void)6212f3c799c2SDarrick J. Wong xfs_bmap_intent_destroy_cache(void)
6213f3c799c2SDarrick J. Wong {
6214f3c799c2SDarrick J. Wong 	kmem_cache_destroy(xfs_bmap_intent_cache);
6215f3c799c2SDarrick J. Wong 	xfs_bmap_intent_cache = NULL;
6216f3c799c2SDarrick J. Wong }
6217