xref: /openbmc/linux/fs/xfs/libxfs/xfs_bmap.c (revision 2a7f6d41)
10b61f8a4SDave Chinner // SPDX-License-Identifier: GPL-2.0
230f712c9SDave Chinner /*
330f712c9SDave Chinner  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
430f712c9SDave Chinner  * All Rights Reserved.
530f712c9SDave Chinner  */
630f712c9SDave Chinner #include "xfs.h"
730f712c9SDave Chinner #include "xfs_fs.h"
830f712c9SDave Chinner #include "xfs_shared.h"
930f712c9SDave Chinner #include "xfs_format.h"
1030f712c9SDave Chinner #include "xfs_log_format.h"
1130f712c9SDave Chinner #include "xfs_trans_resv.h"
1230f712c9SDave Chinner #include "xfs_bit.h"
1330f712c9SDave Chinner #include "xfs_sb.h"
1430f712c9SDave Chinner #include "xfs_mount.h"
153ab78df2SDarrick J. Wong #include "xfs_defer.h"
1630f712c9SDave Chinner #include "xfs_dir2.h"
1730f712c9SDave Chinner #include "xfs_inode.h"
1830f712c9SDave Chinner #include "xfs_btree.h"
1930f712c9SDave Chinner #include "xfs_trans.h"
2030f712c9SDave Chinner #include "xfs_alloc.h"
2130f712c9SDave Chinner #include "xfs_bmap.h"
2230f712c9SDave Chinner #include "xfs_bmap_util.h"
2330f712c9SDave Chinner #include "xfs_bmap_btree.h"
2430f712c9SDave Chinner #include "xfs_rtalloc.h"
25e9e899a2SDarrick J. Wong #include "xfs_errortag.h"
2630f712c9SDave Chinner #include "xfs_error.h"
2730f712c9SDave Chinner #include "xfs_quota.h"
2830f712c9SDave Chinner #include "xfs_trans_space.h"
2930f712c9SDave Chinner #include "xfs_buf_item.h"
3030f712c9SDave Chinner #include "xfs_trace.h"
3130f712c9SDave Chinner #include "xfs_attr_leaf.h"
3230f712c9SDave Chinner #include "xfs_filestream.h"
33340785ccSDarrick J. Wong #include "xfs_rmap.h"
349bbafc71SDave Chinner #include "xfs_ag.h"
353fd129b6SDarrick J. Wong #include "xfs_ag_resv.h"
3662aab20fSDarrick J. Wong #include "xfs_refcount.h"
37974ae922SBrian Foster #include "xfs_icache.h"
384e087a3bSChristoph Hellwig #include "xfs_iomap.h"
3930f712c9SDave Chinner 
40f3c799c2SDarrick J. Wong struct kmem_cache		*xfs_bmap_intent_cache;
4130f712c9SDave Chinner 
4230f712c9SDave Chinner /*
4330f712c9SDave Chinner  * Miscellaneous helper functions
4430f712c9SDave Chinner  */
4530f712c9SDave Chinner 
4630f712c9SDave Chinner /*
4730f712c9SDave Chinner  * Compute and fill in the value of the maximum depth of a bmap btree
4830f712c9SDave Chinner  * in this filesystem.  Done once, during mount.
4930f712c9SDave Chinner  */
5030f712c9SDave Chinner void
5130f712c9SDave Chinner xfs_bmap_compute_maxlevels(
5230f712c9SDave Chinner 	xfs_mount_t	*mp,		/* file system mount structure */
5330f712c9SDave Chinner 	int		whichfork)	/* data or attr fork */
5430f712c9SDave Chinner {
550c35e7baSChandan Babu R 	uint64_t	maxblocks;	/* max blocks at this level */
56bb1d5049SChandan Babu R 	xfs_extnum_t	maxleafents;	/* max leaf entries possible */
5730f712c9SDave Chinner 	int		level;		/* btree level */
5830f712c9SDave Chinner 	int		maxrootrecs;	/* max records in root block */
5930f712c9SDave Chinner 	int		minleafrecs;	/* min records in leaf block */
6030f712c9SDave Chinner 	int		minnoderecs;	/* min records in node block */
6130f712c9SDave Chinner 	int		sz;		/* root block size */
6230f712c9SDave Chinner 
6330f712c9SDave Chinner 	/*
64df9ad5ccSChandan Babu R 	 * The maximum number of extents in a fork, hence the maximum number of
65df9ad5ccSChandan Babu R 	 * leaf entries, is controlled by the size of the on-disk extent count.
6630f712c9SDave Chinner 	 *
677821ea30SChristoph Hellwig 	 * Note that we can no longer assume that if we are in ATTR1 that the
687821ea30SChristoph Hellwig 	 * fork offset of all the inodes will be
697821ea30SChristoph Hellwig 	 * (xfs_default_attroffset(ip) >> 3) because we could have mounted with
707821ea30SChristoph Hellwig 	 * ATTR2 and then mounted back with ATTR1, keeping the i_forkoff's fixed
717821ea30SChristoph Hellwig 	 * but probably at various positions. Therefore, for both ATTR1 and
727821ea30SChristoph Hellwig 	 * ATTR2 we have to assume the worst case scenario of a minimum size
737821ea30SChristoph Hellwig 	 * available.
7430f712c9SDave Chinner 	 */
75df9ad5ccSChandan Babu R 	maxleafents = xfs_iext_max_nextents(xfs_has_large_extent_counts(mp),
76df9ad5ccSChandan Babu R 				whichfork);
779feb8f19SChandan Babu R 	if (whichfork == XFS_DATA_FORK)
7830f712c9SDave Chinner 		sz = XFS_BMDR_SPACE_CALC(MINDBTPTRS);
799feb8f19SChandan Babu R 	else
8030f712c9SDave Chinner 		sz = XFS_BMDR_SPACE_CALC(MINABTPTRS);
819feb8f19SChandan Babu R 
8230f712c9SDave Chinner 	maxrootrecs = xfs_bmdr_maxrecs(sz, 0);
8330f712c9SDave Chinner 	minleafrecs = mp->m_bmap_dmnr[0];
8430f712c9SDave Chinner 	minnoderecs = mp->m_bmap_dmnr[1];
85755c38ffSChandan Babu R 	maxblocks = howmany_64(maxleafents, minleafrecs);
8630f712c9SDave Chinner 	for (level = 1; maxblocks > 1; level++) {
8730f712c9SDave Chinner 		if (maxblocks <= maxrootrecs)
8830f712c9SDave Chinner 			maxblocks = 1;
8930f712c9SDave Chinner 		else
900c35e7baSChandan Babu R 			maxblocks = howmany_64(maxblocks, minnoderecs);
9130f712c9SDave Chinner 	}
9230f712c9SDave Chinner 	mp->m_bm_maxlevels[whichfork] = level;
930ed5f735SDarrick J. Wong 	ASSERT(mp->m_bm_maxlevels[whichfork] <= xfs_bmbt_maxlevels_ondisk());
9430f712c9SDave Chinner }
9530f712c9SDave Chinner 
96b2941046SDave Chinner unsigned int
97b2941046SDave Chinner xfs_bmap_compute_attr_offset(
98b2941046SDave Chinner 	struct xfs_mount	*mp)
99b2941046SDave Chinner {
100b2941046SDave Chinner 	if (mp->m_sb.sb_inodesize == 256)
101b2941046SDave Chinner 		return XFS_LITINO(mp) - XFS_BMDR_SPACE_CALC(MINABTPTRS);
102b2941046SDave Chinner 	return XFS_BMDR_SPACE_CALC(6 * MINABTPTRS);
103b2941046SDave Chinner }
104b2941046SDave Chinner 
10530f712c9SDave Chinner STATIC int				/* error */
10630f712c9SDave Chinner xfs_bmbt_lookup_eq(
10730f712c9SDave Chinner 	struct xfs_btree_cur	*cur,
108e16cf9b0SChristoph Hellwig 	struct xfs_bmbt_irec	*irec,
10930f712c9SDave Chinner 	int			*stat)	/* success/failure */
11030f712c9SDave Chinner {
111e16cf9b0SChristoph Hellwig 	cur->bc_rec.b = *irec;
11230f712c9SDave Chinner 	return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
11330f712c9SDave Chinner }
11430f712c9SDave Chinner 
11530f712c9SDave Chinner STATIC int				/* error */
116b5cfbc22SChristoph Hellwig xfs_bmbt_lookup_first(
11730f712c9SDave Chinner 	struct xfs_btree_cur	*cur,
11830f712c9SDave Chinner 	int			*stat)	/* success/failure */
11930f712c9SDave Chinner {
120b5cfbc22SChristoph Hellwig 	cur->bc_rec.b.br_startoff = 0;
121b5cfbc22SChristoph Hellwig 	cur->bc_rec.b.br_startblock = 0;
122b5cfbc22SChristoph Hellwig 	cur->bc_rec.b.br_blockcount = 0;
12330f712c9SDave Chinner 	return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
12430f712c9SDave Chinner }
12530f712c9SDave Chinner 
12630f712c9SDave Chinner /*
12730f712c9SDave Chinner  * Check if the inode needs to be converted to btree format.
12830f712c9SDave Chinner  */
12930f712c9SDave Chinner static inline bool xfs_bmap_needs_btree(struct xfs_inode *ip, int whichfork)
13030f712c9SDave Chinner {
131732436efSDarrick J. Wong 	struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
132daf83964SChristoph Hellwig 
13360b4984fSDarrick J. Wong 	return whichfork != XFS_COW_FORK &&
134f7e67b20SChristoph Hellwig 		ifp->if_format == XFS_DINODE_FMT_EXTENTS &&
135daf83964SChristoph Hellwig 		ifp->if_nextents > XFS_IFORK_MAXEXT(ip, whichfork);
13630f712c9SDave Chinner }
13730f712c9SDave Chinner 
13830f712c9SDave Chinner /*
13930f712c9SDave Chinner  * Check if the inode should be converted to extent format.
14030f712c9SDave Chinner  */
14130f712c9SDave Chinner static inline bool xfs_bmap_wants_extents(struct xfs_inode *ip, int whichfork)
14230f712c9SDave Chinner {
143732436efSDarrick J. Wong 	struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
144daf83964SChristoph Hellwig 
14560b4984fSDarrick J. Wong 	return whichfork != XFS_COW_FORK &&
146f7e67b20SChristoph Hellwig 		ifp->if_format == XFS_DINODE_FMT_BTREE &&
147daf83964SChristoph Hellwig 		ifp->if_nextents <= XFS_IFORK_MAXEXT(ip, whichfork);
14830f712c9SDave Chinner }
14930f712c9SDave Chinner 
15030f712c9SDave Chinner /*
151a67d00a5SChristoph Hellwig  * Update the record referred to by cur to the value given by irec
15230f712c9SDave Chinner  * This either works (return 0) or gets an EFSCORRUPTED error.
15330f712c9SDave Chinner  */
15430f712c9SDave Chinner STATIC int
15530f712c9SDave Chinner xfs_bmbt_update(
15630f712c9SDave Chinner 	struct xfs_btree_cur	*cur,
157a67d00a5SChristoph Hellwig 	struct xfs_bmbt_irec	*irec)
15830f712c9SDave Chinner {
15930f712c9SDave Chinner 	union xfs_btree_rec	rec;
16030f712c9SDave Chinner 
161a67d00a5SChristoph Hellwig 	xfs_bmbt_disk_set_all(&rec.bmbt, irec);
16230f712c9SDave Chinner 	return xfs_btree_update(cur, &rec);
16330f712c9SDave Chinner }
16430f712c9SDave Chinner 
16530f712c9SDave Chinner /*
16630f712c9SDave Chinner  * Compute the worst-case number of indirect blocks that will be used
16730f712c9SDave Chinner  * for ip's delayed extent of length "len".
16830f712c9SDave Chinner  */
16930f712c9SDave Chinner STATIC xfs_filblks_t
17030f712c9SDave Chinner xfs_bmap_worst_indlen(
17130f712c9SDave Chinner 	xfs_inode_t	*ip,		/* incore inode pointer */
17230f712c9SDave Chinner 	xfs_filblks_t	len)		/* delayed extent length */
17330f712c9SDave Chinner {
17430f712c9SDave Chinner 	int		level;		/* btree level number */
17530f712c9SDave Chinner 	int		maxrecs;	/* maximum record count at this level */
17630f712c9SDave Chinner 	xfs_mount_t	*mp;		/* mount structure */
17730f712c9SDave Chinner 	xfs_filblks_t	rval;		/* return value */
17830f712c9SDave Chinner 
17930f712c9SDave Chinner 	mp = ip->i_mount;
18030f712c9SDave Chinner 	maxrecs = mp->m_bmap_dmxr[0];
18130f712c9SDave Chinner 	for (level = 0, rval = 0;
18230f712c9SDave Chinner 	     level < XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK);
18330f712c9SDave Chinner 	     level++) {
18430f712c9SDave Chinner 		len += maxrecs - 1;
18530f712c9SDave Chinner 		do_div(len, maxrecs);
18630f712c9SDave Chinner 		rval += len;
1875e5c943cSDarrick J. Wong 		if (len == 1)
1885e5c943cSDarrick J. Wong 			return rval + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) -
18930f712c9SDave Chinner 				level - 1;
19030f712c9SDave Chinner 		if (level == 0)
19130f712c9SDave Chinner 			maxrecs = mp->m_bmap_dmxr[1];
19230f712c9SDave Chinner 	}
19330f712c9SDave Chinner 	return rval;
19430f712c9SDave Chinner }
19530f712c9SDave Chinner 
19630f712c9SDave Chinner /*
19730f712c9SDave Chinner  * Calculate the default attribute fork offset for newly created inodes.
19830f712c9SDave Chinner  */
19930f712c9SDave Chinner uint
20030f712c9SDave Chinner xfs_default_attroffset(
20130f712c9SDave Chinner 	struct xfs_inode	*ip)
20230f712c9SDave Chinner {
203683ec9baSDave Chinner 	if (ip->i_df.if_format == XFS_DINODE_FMT_DEV)
204683ec9baSDave Chinner 		return roundup(sizeof(xfs_dev_t), 8);
205b2941046SDave Chinner 	return M_IGEO(ip->i_mount)->attr_fork_offset;
20630f712c9SDave Chinner }
20730f712c9SDave Chinner 
20830f712c9SDave Chinner /*
2097821ea30SChristoph Hellwig  * Helper routine to reset inode i_forkoff field when switching attribute fork
2107821ea30SChristoph Hellwig  * from local to extent format - we reset it where possible to make space
2117821ea30SChristoph Hellwig  * available for inline data fork extents.
21230f712c9SDave Chinner  */
21330f712c9SDave Chinner STATIC void
21430f712c9SDave Chinner xfs_bmap_forkoff_reset(
21530f712c9SDave Chinner 	xfs_inode_t	*ip,
21630f712c9SDave Chinner 	int		whichfork)
21730f712c9SDave Chinner {
21830f712c9SDave Chinner 	if (whichfork == XFS_ATTR_FORK &&
219f7e67b20SChristoph Hellwig 	    ip->i_df.if_format != XFS_DINODE_FMT_DEV &&
220f7e67b20SChristoph Hellwig 	    ip->i_df.if_format != XFS_DINODE_FMT_BTREE) {
22130f712c9SDave Chinner 		uint	dfl_forkoff = xfs_default_attroffset(ip) >> 3;
22230f712c9SDave Chinner 
2237821ea30SChristoph Hellwig 		if (dfl_forkoff > ip->i_forkoff)
2247821ea30SChristoph Hellwig 			ip->i_forkoff = dfl_forkoff;
22530f712c9SDave Chinner 	}
22630f712c9SDave Chinner }
22730f712c9SDave Chinner 
22830f712c9SDave Chinner #ifdef DEBUG
22930f712c9SDave Chinner STATIC struct xfs_buf *
23030f712c9SDave Chinner xfs_bmap_get_bp(
23130f712c9SDave Chinner 	struct xfs_btree_cur	*cur,
23230f712c9SDave Chinner 	xfs_fsblock_t		bno)
23330f712c9SDave Chinner {
234e6631f85SDave Chinner 	struct xfs_log_item	*lip;
23530f712c9SDave Chinner 	int			i;
23630f712c9SDave Chinner 
23730f712c9SDave Chinner 	if (!cur)
23830f712c9SDave Chinner 		return NULL;
23930f712c9SDave Chinner 
240c0643f6fSDarrick J. Wong 	for (i = 0; i < cur->bc_maxlevels; i++) {
2416ca444cfSDarrick J. Wong 		if (!cur->bc_levels[i].bp)
24230f712c9SDave Chinner 			break;
2436ca444cfSDarrick J. Wong 		if (xfs_buf_daddr(cur->bc_levels[i].bp) == bno)
2446ca444cfSDarrick J. Wong 			return cur->bc_levels[i].bp;
24530f712c9SDave Chinner 	}
24630f712c9SDave Chinner 
24730f712c9SDave Chinner 	/* Chase down all the log items to see if the bp is there */
248e6631f85SDave Chinner 	list_for_each_entry(lip, &cur->bc_tp->t_items, li_trans) {
249e6631f85SDave Chinner 		struct xfs_buf_log_item	*bip = (struct xfs_buf_log_item *)lip;
250e6631f85SDave Chinner 
25130f712c9SDave Chinner 		if (bip->bli_item.li_type == XFS_LI_BUF &&
25204fcad80SDave Chinner 		    xfs_buf_daddr(bip->bli_buf) == bno)
25330f712c9SDave Chinner 			return bip->bli_buf;
25430f712c9SDave Chinner 	}
25530f712c9SDave Chinner 
25630f712c9SDave Chinner 	return NULL;
25730f712c9SDave Chinner }
25830f712c9SDave Chinner 
25930f712c9SDave Chinner STATIC void
26030f712c9SDave Chinner xfs_check_block(
26130f712c9SDave Chinner 	struct xfs_btree_block	*block,
26230f712c9SDave Chinner 	xfs_mount_t		*mp,
26330f712c9SDave Chinner 	int			root,
26430f712c9SDave Chinner 	short			sz)
26530f712c9SDave Chinner {
26630f712c9SDave Chinner 	int			i, j, dmxr;
26730f712c9SDave Chinner 	__be64			*pp, *thispa;	/* pointer to block address */
26830f712c9SDave Chinner 	xfs_bmbt_key_t		*prevp, *keyp;
26930f712c9SDave Chinner 
27030f712c9SDave Chinner 	ASSERT(be16_to_cpu(block->bb_level) > 0);
27130f712c9SDave Chinner 
27230f712c9SDave Chinner 	prevp = NULL;
27330f712c9SDave Chinner 	for( i = 1; i <= xfs_btree_get_numrecs(block); i++) {
27430f712c9SDave Chinner 		dmxr = mp->m_bmap_dmxr[0];
27530f712c9SDave Chinner 		keyp = XFS_BMBT_KEY_ADDR(mp, block, i);
27630f712c9SDave Chinner 
27730f712c9SDave Chinner 		if (prevp) {
27830f712c9SDave Chinner 			ASSERT(be64_to_cpu(prevp->br_startoff) <
27930f712c9SDave Chinner 			       be64_to_cpu(keyp->br_startoff));
28030f712c9SDave Chinner 		}
28130f712c9SDave Chinner 		prevp = keyp;
28230f712c9SDave Chinner 
28330f712c9SDave Chinner 		/*
28430f712c9SDave Chinner 		 * Compare the block numbers to see if there are dups.
28530f712c9SDave Chinner 		 */
28630f712c9SDave Chinner 		if (root)
28730f712c9SDave Chinner 			pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, i, sz);
28830f712c9SDave Chinner 		else
28930f712c9SDave Chinner 			pp = XFS_BMBT_PTR_ADDR(mp, block, i, dmxr);
29030f712c9SDave Chinner 
29130f712c9SDave Chinner 		for (j = i+1; j <= be16_to_cpu(block->bb_numrecs); j++) {
29230f712c9SDave Chinner 			if (root)
29330f712c9SDave Chinner 				thispa = XFS_BMAP_BROOT_PTR_ADDR(mp, block, j, sz);
29430f712c9SDave Chinner 			else
29530f712c9SDave Chinner 				thispa = XFS_BMBT_PTR_ADDR(mp, block, j, dmxr);
29630f712c9SDave Chinner 			if (*thispa == *pp) {
29778b0f58bSZeng Heng 				xfs_warn(mp, "%s: thispa(%d) == pp(%d) %lld",
29830f712c9SDave Chinner 					__func__, j, i,
29930f712c9SDave Chinner 					(unsigned long long)be64_to_cpu(*thispa));
300cec57256SDarrick J. Wong 				xfs_err(mp, "%s: ptrs are equal in node\n",
30130f712c9SDave Chinner 					__func__);
302cec57256SDarrick J. Wong 				xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
30330f712c9SDave Chinner 			}
30430f712c9SDave Chinner 		}
30530f712c9SDave Chinner 	}
30630f712c9SDave Chinner }
30730f712c9SDave Chinner 
30830f712c9SDave Chinner /*
30930f712c9SDave Chinner  * Check that the extents for the inode ip are in the right order in all
310e3543819SDave Chinner  * btree leaves. THis becomes prohibitively expensive for large extent count
311e3543819SDave Chinner  * files, so don't bother with inodes that have more than 10,000 extents in
312e3543819SDave Chinner  * them. The btree record ordering checks will still be done, so for such large
313e3543819SDave Chinner  * bmapbt constructs that is going to catch most corruptions.
31430f712c9SDave Chinner  */
31530f712c9SDave Chinner STATIC void
31630f712c9SDave Chinner xfs_bmap_check_leaf_extents(
317ae127f08SDarrick J. Wong 	struct xfs_btree_cur	*cur,	/* btree cursor or null */
31830f712c9SDave Chinner 	xfs_inode_t		*ip,		/* incore inode pointer */
31930f712c9SDave Chinner 	int			whichfork)	/* data or attr fork */
32030f712c9SDave Chinner {
321f7e67b20SChristoph Hellwig 	struct xfs_mount	*mp = ip->i_mount;
322732436efSDarrick J. Wong 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
32330f712c9SDave Chinner 	struct xfs_btree_block	*block;	/* current btree block */
32430f712c9SDave Chinner 	xfs_fsblock_t		bno;	/* block # of "block" */
325e8222613SDave Chinner 	struct xfs_buf		*bp;	/* buffer for "block" */
32630f712c9SDave Chinner 	int			error;	/* error return value */
32730f712c9SDave Chinner 	xfs_extnum_t		i=0, j;	/* index into the extents list */
32830f712c9SDave Chinner 	int			level;	/* btree level, for checking */
32930f712c9SDave Chinner 	__be64			*pp;	/* pointer to block address */
33030f712c9SDave Chinner 	xfs_bmbt_rec_t		*ep;	/* pointer to current extent */
33130f712c9SDave Chinner 	xfs_bmbt_rec_t		last = {0, 0}; /* last extent in prev block */
33230f712c9SDave Chinner 	xfs_bmbt_rec_t		*nextp;	/* pointer to next extent */
33330f712c9SDave Chinner 	int			bp_release = 0;
33430f712c9SDave Chinner 
335f7e67b20SChristoph Hellwig 	if (ifp->if_format != XFS_DINODE_FMT_BTREE)
33630f712c9SDave Chinner 		return;
33730f712c9SDave Chinner 
338e3543819SDave Chinner 	/* skip large extent count inodes */
339daf83964SChristoph Hellwig 	if (ip->i_df.if_nextents > 10000)
340e3543819SDave Chinner 		return;
341e3543819SDave Chinner 
34230f712c9SDave Chinner 	bno = NULLFSBLOCK;
34330f712c9SDave Chinner 	block = ifp->if_broot;
34430f712c9SDave Chinner 	/*
34530f712c9SDave Chinner 	 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
34630f712c9SDave Chinner 	 */
34730f712c9SDave Chinner 	level = be16_to_cpu(block->bb_level);
34830f712c9SDave Chinner 	ASSERT(level > 0);
34930f712c9SDave Chinner 	xfs_check_block(block, mp, 1, ifp->if_broot_bytes);
35030f712c9SDave Chinner 	pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
35130f712c9SDave Chinner 	bno = be64_to_cpu(*pp);
35230f712c9SDave Chinner 
353d5cf09baSChristoph Hellwig 	ASSERT(bno != NULLFSBLOCK);
35430f712c9SDave Chinner 	ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
35530f712c9SDave Chinner 	ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
35630f712c9SDave Chinner 
35730f712c9SDave Chinner 	/*
35830f712c9SDave Chinner 	 * Go down the tree until leaf level is reached, following the first
35930f712c9SDave Chinner 	 * pointer (leftmost) at each level.
36030f712c9SDave Chinner 	 */
36130f712c9SDave Chinner 	while (level-- > 0) {
36230f712c9SDave Chinner 		/* See if buf is in cur first */
36330f712c9SDave Chinner 		bp_release = 0;
36430f712c9SDave Chinner 		bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
36530f712c9SDave Chinner 		if (!bp) {
36630f712c9SDave Chinner 			bp_release = 1;
367f5b999c0SEric Sandeen 			error = xfs_btree_read_bufl(mp, NULL, bno, &bp,
36830f712c9SDave Chinner 						XFS_BMAP_BTREE_REF,
36930f712c9SDave Chinner 						&xfs_bmbt_buf_ops);
37030f712c9SDave Chinner 			if (error)
37130f712c9SDave Chinner 				goto error_norelse;
37230f712c9SDave Chinner 		}
37330f712c9SDave Chinner 		block = XFS_BUF_TO_BLOCK(bp);
37430f712c9SDave Chinner 		if (level == 0)
37530f712c9SDave Chinner 			break;
37630f712c9SDave Chinner 
37730f712c9SDave Chinner 		/*
37830f712c9SDave Chinner 		 * Check this block for basic sanity (increasing keys and
37930f712c9SDave Chinner 		 * no duplicate blocks).
38030f712c9SDave Chinner 		 */
38130f712c9SDave Chinner 
38230f712c9SDave Chinner 		xfs_check_block(block, mp, 0, 0);
38330f712c9SDave Chinner 		pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
38430f712c9SDave Chinner 		bno = be64_to_cpu(*pp);
385f9e03706SDarrick J. Wong 		if (XFS_IS_CORRUPT(mp, !xfs_verify_fsbno(mp, bno))) {
386f9e03706SDarrick J. Wong 			error = -EFSCORRUPTED;
387f9e03706SDarrick J. Wong 			goto error0;
388f9e03706SDarrick J. Wong 		}
38930f712c9SDave Chinner 		if (bp_release) {
39030f712c9SDave Chinner 			bp_release = 0;
39130f712c9SDave Chinner 			xfs_trans_brelse(NULL, bp);
39230f712c9SDave Chinner 		}
39330f712c9SDave Chinner 	}
39430f712c9SDave Chinner 
39530f712c9SDave Chinner 	/*
39630f712c9SDave Chinner 	 * Here with bp and block set to the leftmost leaf node in the tree.
39730f712c9SDave Chinner 	 */
39830f712c9SDave Chinner 	i = 0;
39930f712c9SDave Chinner 
40030f712c9SDave Chinner 	/*
40130f712c9SDave Chinner 	 * Loop over all leaf nodes checking that all extents are in the right order.
40230f712c9SDave Chinner 	 */
40330f712c9SDave Chinner 	for (;;) {
40430f712c9SDave Chinner 		xfs_fsblock_t	nextbno;
40530f712c9SDave Chinner 		xfs_extnum_t	num_recs;
40630f712c9SDave Chinner 
40730f712c9SDave Chinner 
40830f712c9SDave Chinner 		num_recs = xfs_btree_get_numrecs(block);
40930f712c9SDave Chinner 
41030f712c9SDave Chinner 		/*
41130f712c9SDave Chinner 		 * Read-ahead the next leaf block, if any.
41230f712c9SDave Chinner 		 */
41330f712c9SDave Chinner 
41430f712c9SDave Chinner 		nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
41530f712c9SDave Chinner 
41630f712c9SDave Chinner 		/*
41730f712c9SDave Chinner 		 * Check all the extents to make sure they are OK.
41830f712c9SDave Chinner 		 * If we had a previous block, the last entry should
41930f712c9SDave Chinner 		 * conform with the first entry in this one.
42030f712c9SDave Chinner 		 */
42130f712c9SDave Chinner 
42230f712c9SDave Chinner 		ep = XFS_BMBT_REC_ADDR(mp, block, 1);
42330f712c9SDave Chinner 		if (i) {
42430f712c9SDave Chinner 			ASSERT(xfs_bmbt_disk_get_startoff(&last) +
42530f712c9SDave Chinner 			       xfs_bmbt_disk_get_blockcount(&last) <=
42630f712c9SDave Chinner 			       xfs_bmbt_disk_get_startoff(ep));
42730f712c9SDave Chinner 		}
42830f712c9SDave Chinner 		for (j = 1; j < num_recs; j++) {
42930f712c9SDave Chinner 			nextp = XFS_BMBT_REC_ADDR(mp, block, j + 1);
43030f712c9SDave Chinner 			ASSERT(xfs_bmbt_disk_get_startoff(ep) +
43130f712c9SDave Chinner 			       xfs_bmbt_disk_get_blockcount(ep) <=
43230f712c9SDave Chinner 			       xfs_bmbt_disk_get_startoff(nextp));
43330f712c9SDave Chinner 			ep = nextp;
43430f712c9SDave Chinner 		}
43530f712c9SDave Chinner 
43630f712c9SDave Chinner 		last = *ep;
43730f712c9SDave Chinner 		i += num_recs;
43830f712c9SDave Chinner 		if (bp_release) {
43930f712c9SDave Chinner 			bp_release = 0;
44030f712c9SDave Chinner 			xfs_trans_brelse(NULL, bp);
44130f712c9SDave Chinner 		}
44230f712c9SDave Chinner 		bno = nextbno;
44330f712c9SDave Chinner 		/*
44430f712c9SDave Chinner 		 * If we've reached the end, stop.
44530f712c9SDave Chinner 		 */
44630f712c9SDave Chinner 		if (bno == NULLFSBLOCK)
44730f712c9SDave Chinner 			break;
44830f712c9SDave Chinner 
44930f712c9SDave Chinner 		bp_release = 0;
45030f712c9SDave Chinner 		bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
45130f712c9SDave Chinner 		if (!bp) {
45230f712c9SDave Chinner 			bp_release = 1;
453f5b999c0SEric Sandeen 			error = xfs_btree_read_bufl(mp, NULL, bno, &bp,
45430f712c9SDave Chinner 						XFS_BMAP_BTREE_REF,
45530f712c9SDave Chinner 						&xfs_bmbt_buf_ops);
45630f712c9SDave Chinner 			if (error)
45730f712c9SDave Chinner 				goto error_norelse;
45830f712c9SDave Chinner 		}
45930f712c9SDave Chinner 		block = XFS_BUF_TO_BLOCK(bp);
46030f712c9SDave Chinner 	}
461a5fd276bSLuis de Bethencourt 
46230f712c9SDave Chinner 	return;
46330f712c9SDave Chinner 
46430f712c9SDave Chinner error0:
46530f712c9SDave Chinner 	xfs_warn(mp, "%s: at error0", __func__);
46630f712c9SDave Chinner 	if (bp_release)
46730f712c9SDave Chinner 		xfs_trans_brelse(NULL, bp);
46830f712c9SDave Chinner error_norelse:
469755c38ffSChandan Babu R 	xfs_warn(mp, "%s: BAD after btree leaves for %llu extents",
47030f712c9SDave Chinner 		__func__, i);
471cec57256SDarrick J. Wong 	xfs_err(mp, "%s: CORRUPTED BTREE OR SOMETHING", __func__);
472cec57256SDarrick J. Wong 	xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
47330f712c9SDave Chinner 	return;
47430f712c9SDave Chinner }
47530f712c9SDave Chinner 
47630f712c9SDave Chinner /*
47730f712c9SDave Chinner  * Validate that the bmbt_irecs being returned from bmapi are valid
47830f712c9SDave Chinner  * given the caller's original parameters.  Specifically check the
47930f712c9SDave Chinner  * ranges of the returned irecs to ensure that they only extend beyond
48030f712c9SDave Chinner  * the given parameters if the XFS_BMAPI_ENTIRE flag was set.
48130f712c9SDave Chinner  */
48230f712c9SDave Chinner STATIC void
48330f712c9SDave Chinner xfs_bmap_validate_ret(
48430f712c9SDave Chinner 	xfs_fileoff_t		bno,
48530f712c9SDave Chinner 	xfs_filblks_t		len,
486e7d410acSDave Chinner 	uint32_t		flags,
48730f712c9SDave Chinner 	xfs_bmbt_irec_t		*mval,
48830f712c9SDave Chinner 	int			nmap,
48930f712c9SDave Chinner 	int			ret_nmap)
49030f712c9SDave Chinner {
49130f712c9SDave Chinner 	int			i;		/* index to map values */
49230f712c9SDave Chinner 
49330f712c9SDave Chinner 	ASSERT(ret_nmap <= nmap);
49430f712c9SDave Chinner 
49530f712c9SDave Chinner 	for (i = 0; i < ret_nmap; i++) {
49630f712c9SDave Chinner 		ASSERT(mval[i].br_blockcount > 0);
49730f712c9SDave Chinner 		if (!(flags & XFS_BMAPI_ENTIRE)) {
49830f712c9SDave Chinner 			ASSERT(mval[i].br_startoff >= bno);
49930f712c9SDave Chinner 			ASSERT(mval[i].br_blockcount <= len);
50030f712c9SDave Chinner 			ASSERT(mval[i].br_startoff + mval[i].br_blockcount <=
50130f712c9SDave Chinner 			       bno + len);
50230f712c9SDave Chinner 		} else {
50330f712c9SDave Chinner 			ASSERT(mval[i].br_startoff < bno + len);
50430f712c9SDave Chinner 			ASSERT(mval[i].br_startoff + mval[i].br_blockcount >
50530f712c9SDave Chinner 			       bno);
50630f712c9SDave Chinner 		}
50730f712c9SDave Chinner 		ASSERT(i == 0 ||
50830f712c9SDave Chinner 		       mval[i - 1].br_startoff + mval[i - 1].br_blockcount ==
50930f712c9SDave Chinner 		       mval[i].br_startoff);
51030f712c9SDave Chinner 		ASSERT(mval[i].br_startblock != DELAYSTARTBLOCK &&
51130f712c9SDave Chinner 		       mval[i].br_startblock != HOLESTARTBLOCK);
51230f712c9SDave Chinner 		ASSERT(mval[i].br_state == XFS_EXT_NORM ||
51330f712c9SDave Chinner 		       mval[i].br_state == XFS_EXT_UNWRITTEN);
51430f712c9SDave Chinner 	}
51530f712c9SDave Chinner }
51630f712c9SDave Chinner 
51730f712c9SDave Chinner #else
51830f712c9SDave Chinner #define xfs_bmap_check_leaf_extents(cur, ip, whichfork)		do { } while (0)
5197bf7a193SDarrick J. Wong #define	xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)	do { } while (0)
52030f712c9SDave Chinner #endif /* DEBUG */
52130f712c9SDave Chinner 
52230f712c9SDave Chinner /*
52330f712c9SDave Chinner  * Inode fork format manipulation functions
52430f712c9SDave Chinner  */
52530f712c9SDave Chinner 
52630f712c9SDave Chinner /*
527b101e334SChristoph Hellwig  * Convert the inode format to extent format if it currently is in btree format,
528b101e334SChristoph Hellwig  * but the extent list is small enough that it fits into the extent format.
529b101e334SChristoph Hellwig  *
530b101e334SChristoph Hellwig  * Since the extents are already in-core, all we have to do is give up the space
531b101e334SChristoph Hellwig  * for the btree root and pitch the leaf block.
53230f712c9SDave Chinner  */
53330f712c9SDave Chinner STATIC int				/* error */
53430f712c9SDave Chinner xfs_bmap_btree_to_extents(
535b101e334SChristoph Hellwig 	struct xfs_trans	*tp,	/* transaction pointer */
536b101e334SChristoph Hellwig 	struct xfs_inode	*ip,	/* incore inode pointer */
537b101e334SChristoph Hellwig 	struct xfs_btree_cur	*cur,	/* btree cursor */
53830f712c9SDave Chinner 	int			*logflagsp, /* inode logging flags */
53930f712c9SDave Chinner 	int			whichfork)  /* data or attr fork */
54030f712c9SDave Chinner {
541732436efSDarrick J. Wong 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
542b101e334SChristoph Hellwig 	struct xfs_mount	*mp = ip->i_mount;
543b101e334SChristoph Hellwig 	struct xfs_btree_block	*rblock = ifp->if_broot;
54430f712c9SDave Chinner 	struct xfs_btree_block	*cblock;/* child btree block */
54530f712c9SDave Chinner 	xfs_fsblock_t		cbno;	/* child block number */
546e8222613SDave Chinner 	struct xfs_buf		*cbp;	/* child block's buffer */
54730f712c9SDave Chinner 	int			error;	/* error return value */
54830f712c9SDave Chinner 	__be64			*pp;	/* ptr to block address */
549340785ccSDarrick J. Wong 	struct xfs_owner_info	oinfo;
55030f712c9SDave Chinner 
551b101e334SChristoph Hellwig 	/* check if we actually need the extent format first: */
552b101e334SChristoph Hellwig 	if (!xfs_bmap_wants_extents(ip, whichfork))
553b101e334SChristoph Hellwig 		return 0;
554b101e334SChristoph Hellwig 
555b101e334SChristoph Hellwig 	ASSERT(cur);
55660b4984fSDarrick J. Wong 	ASSERT(whichfork != XFS_COW_FORK);
557f7e67b20SChristoph Hellwig 	ASSERT(ifp->if_format == XFS_DINODE_FMT_BTREE);
55830f712c9SDave Chinner 	ASSERT(be16_to_cpu(rblock->bb_level) == 1);
55930f712c9SDave Chinner 	ASSERT(be16_to_cpu(rblock->bb_numrecs) == 1);
56030f712c9SDave Chinner 	ASSERT(xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0) == 1);
561b101e334SChristoph Hellwig 
56230f712c9SDave Chinner 	pp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, ifp->if_broot_bytes);
56330f712c9SDave Chinner 	cbno = be64_to_cpu(*pp);
56430f712c9SDave Chinner #ifdef DEBUG
565f9e03706SDarrick J. Wong 	if (XFS_IS_CORRUPT(cur->bc_mp, !xfs_btree_check_lptr(cur, cbno, 1)))
566f9e03706SDarrick J. Wong 		return -EFSCORRUPTED;
56730f712c9SDave Chinner #endif
568f5b999c0SEric Sandeen 	error = xfs_btree_read_bufl(mp, tp, cbno, &cbp, XFS_BMAP_BTREE_REF,
56930f712c9SDave Chinner 				&xfs_bmbt_buf_ops);
57030f712c9SDave Chinner 	if (error)
57130f712c9SDave Chinner 		return error;
57230f712c9SDave Chinner 	cblock = XFS_BUF_TO_BLOCK(cbp);
57330f712c9SDave Chinner 	if ((error = xfs_btree_check_block(cur, cblock, 0, cbp)))
57430f712c9SDave Chinner 		return error;
575340785ccSDarrick J. Wong 	xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork);
576c201d9caSDarrick J. Wong 	xfs_free_extent_later(cur->bc_tp, cbno, 1, &oinfo);
5776e73a545SChristoph Hellwig 	ip->i_nblocks--;
57830f712c9SDave Chinner 	xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
57930f712c9SDave Chinner 	xfs_trans_binval(tp, cbp);
5806ca444cfSDarrick J. Wong 	if (cur->bc_levels[0].bp == cbp)
5816ca444cfSDarrick J. Wong 		cur->bc_levels[0].bp = NULL;
58230f712c9SDave Chinner 	xfs_iroot_realloc(ip, -1, whichfork);
58330f712c9SDave Chinner 	ASSERT(ifp->if_broot == NULL);
584f7e67b20SChristoph Hellwig 	ifp->if_format = XFS_DINODE_FMT_EXTENTS;
585b101e334SChristoph Hellwig 	*logflagsp |= XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
58630f712c9SDave Chinner 	return 0;
58730f712c9SDave Chinner }
58830f712c9SDave Chinner 
58930f712c9SDave Chinner /*
59030f712c9SDave Chinner  * Convert an extents-format file into a btree-format file.
59130f712c9SDave Chinner  * The new file will have a root block (in the inode) and a single child block.
59230f712c9SDave Chinner  */
59330f712c9SDave Chinner STATIC int					/* error */
59430f712c9SDave Chinner xfs_bmap_extents_to_btree(
59581ba8f3eSBrian Foster 	struct xfs_trans	*tp,		/* transaction pointer */
59681ba8f3eSBrian Foster 	struct xfs_inode	*ip,		/* incore inode pointer */
59781ba8f3eSBrian Foster 	struct xfs_btree_cur	**curp,		/* cursor returned to caller */
59830f712c9SDave Chinner 	int			wasdel,		/* converting a delayed alloc */
59930f712c9SDave Chinner 	int			*logflagsp,	/* inode logging flags */
60030f712c9SDave Chinner 	int			whichfork)	/* data or attr fork */
60130f712c9SDave Chinner {
60230f712c9SDave Chinner 	struct xfs_btree_block	*ablock;	/* allocated (child) bt block */
60381ba8f3eSBrian Foster 	struct xfs_buf		*abp;		/* buffer for ablock */
60481ba8f3eSBrian Foster 	struct xfs_alloc_arg	args;		/* allocation arguments */
60581ba8f3eSBrian Foster 	struct xfs_bmbt_rec	*arp;		/* child record pointer */
60630f712c9SDave Chinner 	struct xfs_btree_block	*block;		/* btree root block */
60781ba8f3eSBrian Foster 	struct xfs_btree_cur	*cur;		/* bmap btree cursor */
60830f712c9SDave Chinner 	int			error;		/* error return value */
60981ba8f3eSBrian Foster 	struct xfs_ifork	*ifp;		/* inode fork pointer */
61081ba8f3eSBrian Foster 	struct xfs_bmbt_key	*kp;		/* root block key pointer */
61181ba8f3eSBrian Foster 	struct xfs_mount	*mp;		/* mount structure */
61230f712c9SDave Chinner 	xfs_bmbt_ptr_t		*pp;		/* root block address pointer */
613b2b1712aSChristoph Hellwig 	struct xfs_iext_cursor	icur;
614906abed5SChristoph Hellwig 	struct xfs_bmbt_irec	rec;
615b2b1712aSChristoph Hellwig 	xfs_extnum_t		cnt = 0;
61630f712c9SDave Chinner 
61730f712c9SDave Chinner 	mp = ip->i_mount;
61860b4984fSDarrick J. Wong 	ASSERT(whichfork != XFS_COW_FORK);
619732436efSDarrick J. Wong 	ifp = xfs_ifork_ptr(ip, whichfork);
620f7e67b20SChristoph Hellwig 	ASSERT(ifp->if_format == XFS_DINODE_FMT_EXTENTS);
62130f712c9SDave Chinner 
62230f712c9SDave Chinner 	/*
623e55ec4ddSDave Chinner 	 * Make space in the inode incore. This needs to be undone if we fail
624e55ec4ddSDave Chinner 	 * to expand the root.
62530f712c9SDave Chinner 	 */
62630f712c9SDave Chinner 	xfs_iroot_realloc(ip, 1, whichfork);
62730f712c9SDave Chinner 
62830f712c9SDave Chinner 	/*
62930f712c9SDave Chinner 	 * Fill in the root.
63030f712c9SDave Chinner 	 */
63130f712c9SDave Chinner 	block = ifp->if_broot;
63230f712c9SDave Chinner 	xfs_btree_init_block_int(mp, block, XFS_BUF_DADDR_NULL,
633b6f41e44SEric Sandeen 				 XFS_BTNUM_BMAP, 1, 1, ip->i_ino,
634f88ae46bSEric Sandeen 				 XFS_BTREE_LONG_PTRS);
63530f712c9SDave Chinner 	/*
63630f712c9SDave Chinner 	 * Need a cursor.  Can't allocate until bb_level is filled in.
63730f712c9SDave Chinner 	 */
63830f712c9SDave Chinner 	cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
6398ef54797SDave Chinner 	cur->bc_ino.flags = wasdel ? XFS_BTCUR_BMBT_WASDEL : 0;
64030f712c9SDave Chinner 	/*
64130f712c9SDave Chinner 	 * Convert to a btree with two levels, one record in root.
64230f712c9SDave Chinner 	 */
643f7e67b20SChristoph Hellwig 	ifp->if_format = XFS_DINODE_FMT_BTREE;
64430f712c9SDave Chinner 	memset(&args, 0, sizeof(args));
64530f712c9SDave Chinner 	args.tp = tp;
64630f712c9SDave Chinner 	args.mp = mp;
647340785ccSDarrick J. Wong 	xfs_rmap_ino_bmbt_owner(&args.oinfo, ip->i_ino, whichfork);
64836b6ad2dSDave Chinner 
64930f712c9SDave Chinner 	args.minlen = args.maxlen = args.prod = 1;
65030f712c9SDave Chinner 	args.wasdel = wasdel;
65130f712c9SDave Chinner 	*logflagsp = 0;
652*2a7f6d41SDave Chinner 	error = xfs_alloc_vextent_start_ag(&args,
653*2a7f6d41SDave Chinner 				XFS_INO_TO_FSB(mp, ip->i_ino));
654e55ec4ddSDave Chinner 	if (error)
655e55ec4ddSDave Chinner 		goto out_root_realloc;
65690e2056dSDarrick J. Wong 
65736b6ad2dSDave Chinner 	/*
65836b6ad2dSDave Chinner 	 * Allocation can't fail, the space was reserved.
65936b6ad2dSDave Chinner 	 */
6602fcc319dSChristoph Hellwig 	if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) {
66101239d77SShan Hai 		error = -ENOSPC;
662e55ec4ddSDave Chinner 		goto out_root_realloc;
6632fcc319dSChristoph Hellwig 	}
664e55ec4ddSDave Chinner 
66592219c29SDave Chinner 	cur->bc_ino.allocated++;
6666e73a545SChristoph Hellwig 	ip->i_nblocks++;
66730f712c9SDave Chinner 	xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
668ee647f85SDarrick J. Wong 	error = xfs_trans_get_buf(tp, mp->m_ddev_targp,
669ee647f85SDarrick J. Wong 			XFS_FSB_TO_DADDR(mp, args.fsbno),
670ee647f85SDarrick J. Wong 			mp->m_bsize, 0, &abp);
671ee647f85SDarrick J. Wong 	if (error)
672e55ec4ddSDave Chinner 		goto out_unreserve_dquot;
673e55ec4ddSDave Chinner 
67430f712c9SDave Chinner 	/*
67530f712c9SDave Chinner 	 * Fill in the child block.
67630f712c9SDave Chinner 	 */
67730f712c9SDave Chinner 	abp->b_ops = &xfs_bmbt_buf_ops;
67830f712c9SDave Chinner 	ablock = XFS_BUF_TO_BLOCK(abp);
6799343ee76SDave Chinner 	xfs_btree_init_block_int(mp, ablock, xfs_buf_daddr(abp),
680b6f41e44SEric Sandeen 				XFS_BTNUM_BMAP, 0, 0, ip->i_ino,
68130f712c9SDave Chinner 				XFS_BTREE_LONG_PTRS);
68230f712c9SDave Chinner 
683b2b1712aSChristoph Hellwig 	for_each_xfs_iext(ifp, &icur, &rec) {
684906abed5SChristoph Hellwig 		if (isnullstartblock(rec.br_startblock))
685906abed5SChristoph Hellwig 			continue;
686906abed5SChristoph Hellwig 		arp = XFS_BMBT_REC_ADDR(mp, ablock, 1 + cnt);
687906abed5SChristoph Hellwig 		xfs_bmbt_disk_set_all(arp, &rec);
688906abed5SChristoph Hellwig 		cnt++;
68930f712c9SDave Chinner 	}
690daf83964SChristoph Hellwig 	ASSERT(cnt == ifp->if_nextents);
69130f712c9SDave Chinner 	xfs_btree_set_numrecs(ablock, cnt);
69230f712c9SDave Chinner 
69330f712c9SDave Chinner 	/*
69430f712c9SDave Chinner 	 * Fill in the root key and pointer.
69530f712c9SDave Chinner 	 */
69630f712c9SDave Chinner 	kp = XFS_BMBT_KEY_ADDR(mp, block, 1);
69730f712c9SDave Chinner 	arp = XFS_BMBT_REC_ADDR(mp, ablock, 1);
69830f712c9SDave Chinner 	kp->br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(arp));
69930f712c9SDave Chinner 	pp = XFS_BMBT_PTR_ADDR(mp, block, 1, xfs_bmbt_get_maxrecs(cur,
70030f712c9SDave Chinner 						be16_to_cpu(block->bb_level)));
70130f712c9SDave Chinner 	*pp = cpu_to_be64(args.fsbno);
70230f712c9SDave Chinner 
70330f712c9SDave Chinner 	/*
70430f712c9SDave Chinner 	 * Do all this logging at the end so that
70530f712c9SDave Chinner 	 * the root is at the right level.
70630f712c9SDave Chinner 	 */
70730f712c9SDave Chinner 	xfs_btree_log_block(cur, abp, XFS_BB_ALL_BITS);
70830f712c9SDave Chinner 	xfs_btree_log_recs(cur, abp, 1, be16_to_cpu(ablock->bb_numrecs));
70930f712c9SDave Chinner 	ASSERT(*curp == NULL);
71030f712c9SDave Chinner 	*curp = cur;
71130f712c9SDave Chinner 	*logflagsp = XFS_ILOG_CORE | xfs_ilog_fbroot(whichfork);
71230f712c9SDave Chinner 	return 0;
71301239d77SShan Hai 
714e55ec4ddSDave Chinner out_unreserve_dquot:
71501239d77SShan Hai 	xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
716e55ec4ddSDave Chinner out_root_realloc:
71701239d77SShan Hai 	xfs_iroot_realloc(ip, -1, whichfork);
718f7e67b20SChristoph Hellwig 	ifp->if_format = XFS_DINODE_FMT_EXTENTS;
719e55ec4ddSDave Chinner 	ASSERT(ifp->if_broot == NULL);
72001239d77SShan Hai 	xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
72101239d77SShan Hai 
72201239d77SShan Hai 	return error;
72330f712c9SDave Chinner }
72430f712c9SDave Chinner 
72530f712c9SDave Chinner /*
72630f712c9SDave Chinner  * Convert a local file to an extents file.
72730f712c9SDave Chinner  * This code is out of bounds for data forks of regular files,
72830f712c9SDave Chinner  * since the file data needs to get logged so things will stay consistent.
72930f712c9SDave Chinner  * (The bmap-level manipulations are ok, though).
73030f712c9SDave Chinner  */
73130f712c9SDave Chinner void
73230f712c9SDave Chinner xfs_bmap_local_to_extents_empty(
733aeea4b75SBrian Foster 	struct xfs_trans	*tp,
73430f712c9SDave Chinner 	struct xfs_inode	*ip,
73530f712c9SDave Chinner 	int			whichfork)
73630f712c9SDave Chinner {
737732436efSDarrick J. Wong 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
73830f712c9SDave Chinner 
73960b4984fSDarrick J. Wong 	ASSERT(whichfork != XFS_COW_FORK);
740f7e67b20SChristoph Hellwig 	ASSERT(ifp->if_format == XFS_DINODE_FMT_LOCAL);
74130f712c9SDave Chinner 	ASSERT(ifp->if_bytes == 0);
742daf83964SChristoph Hellwig 	ASSERT(ifp->if_nextents == 0);
74330f712c9SDave Chinner 
74430f712c9SDave Chinner 	xfs_bmap_forkoff_reset(ip, whichfork);
7456bdcf26aSChristoph Hellwig 	ifp->if_u1.if_root = NULL;
7466bdcf26aSChristoph Hellwig 	ifp->if_height = 0;
747f7e67b20SChristoph Hellwig 	ifp->if_format = XFS_DINODE_FMT_EXTENTS;
748aeea4b75SBrian Foster 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
74930f712c9SDave Chinner }
75030f712c9SDave Chinner 
75130f712c9SDave Chinner 
75230f712c9SDave Chinner STATIC int				/* error */
75330f712c9SDave Chinner xfs_bmap_local_to_extents(
75430f712c9SDave Chinner 	xfs_trans_t	*tp,		/* transaction pointer */
75530f712c9SDave Chinner 	xfs_inode_t	*ip,		/* incore inode pointer */
75630f712c9SDave Chinner 	xfs_extlen_t	total,		/* total blocks needed by transaction */
75730f712c9SDave Chinner 	int		*logflagsp,	/* inode logging flags */
75830f712c9SDave Chinner 	int		whichfork,
75930f712c9SDave Chinner 	void		(*init_fn)(struct xfs_trans *tp,
76030f712c9SDave Chinner 				   struct xfs_buf *bp,
76130f712c9SDave Chinner 				   struct xfs_inode *ip,
76230f712c9SDave Chinner 				   struct xfs_ifork *ifp))
76330f712c9SDave Chinner {
76430f712c9SDave Chinner 	int		error = 0;
76530f712c9SDave Chinner 	int		flags;		/* logging flags returned */
7663ba738dfSChristoph Hellwig 	struct xfs_ifork *ifp;		/* inode fork pointer */
76730f712c9SDave Chinner 	xfs_alloc_arg_t	args;		/* allocation arguments */
768e8222613SDave Chinner 	struct xfs_buf	*bp;		/* buffer for extent block */
76950bb44c2SChristoph Hellwig 	struct xfs_bmbt_irec rec;
770b2b1712aSChristoph Hellwig 	struct xfs_iext_cursor icur;
77130f712c9SDave Chinner 
77230f712c9SDave Chinner 	/*
77330f712c9SDave Chinner 	 * We don't want to deal with the case of keeping inode data inline yet.
77430f712c9SDave Chinner 	 * So sending the data fork of a regular inode is invalid.
77530f712c9SDave Chinner 	 */
776c19b3b05SDave Chinner 	ASSERT(!(S_ISREG(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK));
777732436efSDarrick J. Wong 	ifp = xfs_ifork_ptr(ip, whichfork);
778f7e67b20SChristoph Hellwig 	ASSERT(ifp->if_format == XFS_DINODE_FMT_LOCAL);
77930f712c9SDave Chinner 
78030f712c9SDave Chinner 	if (!ifp->if_bytes) {
781aeea4b75SBrian Foster 		xfs_bmap_local_to_extents_empty(tp, ip, whichfork);
78230f712c9SDave Chinner 		flags = XFS_ILOG_CORE;
78330f712c9SDave Chinner 		goto done;
78430f712c9SDave Chinner 	}
78530f712c9SDave Chinner 
78630f712c9SDave Chinner 	flags = 0;
78730f712c9SDave Chinner 	error = 0;
78830f712c9SDave Chinner 	memset(&args, 0, sizeof(args));
78930f712c9SDave Chinner 	args.tp = tp;
79030f712c9SDave Chinner 	args.mp = ip->i_mount;
79174c36a86SDave Chinner 	args.total = total;
79274c36a86SDave Chinner 	args.minlen = args.maxlen = args.prod = 1;
793340785ccSDarrick J. Wong 	xfs_rmap_ino_owner(&args.oinfo, ip->i_ino, whichfork, 0);
794*2a7f6d41SDave Chinner 
79530f712c9SDave Chinner 	/*
79630f712c9SDave Chinner 	 * Allocate a block.  We know we need only one, since the
79730f712c9SDave Chinner 	 * file currently fits in an inode.
79830f712c9SDave Chinner 	 */
79930f712c9SDave Chinner 	args.total = total;
80030f712c9SDave Chinner 	args.minlen = args.maxlen = args.prod = 1;
801*2a7f6d41SDave Chinner 	error = xfs_alloc_vextent_start_ag(&args,
802*2a7f6d41SDave Chinner 			XFS_INO_TO_FSB(args.mp, ip->i_ino));
80330f712c9SDave Chinner 	if (error)
80430f712c9SDave Chinner 		goto done;
80530f712c9SDave Chinner 
80630f712c9SDave Chinner 	/* Can't fail, the space was reserved. */
80730f712c9SDave Chinner 	ASSERT(args.fsbno != NULLFSBLOCK);
80830f712c9SDave Chinner 	ASSERT(args.len == 1);
809ee647f85SDarrick J. Wong 	error = xfs_trans_get_buf(tp, args.mp->m_ddev_targp,
810ee647f85SDarrick J. Wong 			XFS_FSB_TO_DADDR(args.mp, args.fsbno),
811ee647f85SDarrick J. Wong 			args.mp->m_bsize, 0, &bp);
812ee647f85SDarrick J. Wong 	if (error)
813ee647f85SDarrick J. Wong 		goto done;
81430f712c9SDave Chinner 
815fe22d552SDave Chinner 	/*
816b7cdc66bSBrian Foster 	 * Initialize the block, copy the data and log the remote buffer.
817fe22d552SDave Chinner 	 *
818b7cdc66bSBrian Foster 	 * The callout is responsible for logging because the remote format
819b7cdc66bSBrian Foster 	 * might differ from the local format and thus we don't know how much to
820b7cdc66bSBrian Foster 	 * log here. Note that init_fn must also set the buffer log item type
821b7cdc66bSBrian Foster 	 * correctly.
822fe22d552SDave Chinner 	 */
82330f712c9SDave Chinner 	init_fn(tp, bp, ip, ifp);
82430f712c9SDave Chinner 
825b7cdc66bSBrian Foster 	/* account for the change in fork size */
82630f712c9SDave Chinner 	xfs_idata_realloc(ip, -ifp->if_bytes, whichfork);
827aeea4b75SBrian Foster 	xfs_bmap_local_to_extents_empty(tp, ip, whichfork);
82830f712c9SDave Chinner 	flags |= XFS_ILOG_CORE;
82930f712c9SDave Chinner 
8306bdcf26aSChristoph Hellwig 	ifp->if_u1.if_root = NULL;
8316bdcf26aSChristoph Hellwig 	ifp->if_height = 0;
8326bdcf26aSChristoph Hellwig 
83350bb44c2SChristoph Hellwig 	rec.br_startoff = 0;
83450bb44c2SChristoph Hellwig 	rec.br_startblock = args.fsbno;
83550bb44c2SChristoph Hellwig 	rec.br_blockcount = 1;
83650bb44c2SChristoph Hellwig 	rec.br_state = XFS_EXT_NORM;
837b2b1712aSChristoph Hellwig 	xfs_iext_first(ifp, &icur);
8380254c2f2SChristoph Hellwig 	xfs_iext_insert(ip, &icur, &rec, 0);
83950bb44c2SChristoph Hellwig 
840daf83964SChristoph Hellwig 	ifp->if_nextents = 1;
8416e73a545SChristoph Hellwig 	ip->i_nblocks = 1;
84236b6ad2dSDave Chinner 	xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
84330f712c9SDave Chinner 	flags |= xfs_ilog_fext(whichfork);
84430f712c9SDave Chinner 
84530f712c9SDave Chinner done:
84630f712c9SDave Chinner 	*logflagsp = flags;
84730f712c9SDave Chinner 	return error;
84830f712c9SDave Chinner }
84930f712c9SDave Chinner 
85030f712c9SDave Chinner /*
85130f712c9SDave Chinner  * Called from xfs_bmap_add_attrfork to handle btree format files.
85230f712c9SDave Chinner  */
85330f712c9SDave Chinner STATIC int					/* error */
85430f712c9SDave Chinner xfs_bmap_add_attrfork_btree(
85530f712c9SDave Chinner 	xfs_trans_t		*tp,		/* transaction pointer */
85630f712c9SDave Chinner 	xfs_inode_t		*ip,		/* incore inode pointer */
85730f712c9SDave Chinner 	int			*flags)		/* inode logging flags */
85830f712c9SDave Chinner {
859b6785e27SChandan Babu R 	struct xfs_btree_block	*block = ip->i_df.if_broot;
860ae127f08SDarrick J. Wong 	struct xfs_btree_cur	*cur;		/* btree cursor */
86130f712c9SDave Chinner 	int			error;		/* error return value */
86230f712c9SDave Chinner 	xfs_mount_t		*mp;		/* file system mount struct */
86330f712c9SDave Chinner 	int			stat;		/* newroot status */
86430f712c9SDave Chinner 
86530f712c9SDave Chinner 	mp = ip->i_mount;
866b6785e27SChandan Babu R 
867c01147d9SDarrick J. Wong 	if (XFS_BMAP_BMDR_SPACE(block) <= xfs_inode_data_fork_size(ip))
86830f712c9SDave Chinner 		*flags |= XFS_ILOG_DBROOT;
86930f712c9SDave Chinner 	else {
87030f712c9SDave Chinner 		cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK);
871b5cfbc22SChristoph Hellwig 		error = xfs_bmbt_lookup_first(cur, &stat);
872b5cfbc22SChristoph Hellwig 		if (error)
87330f712c9SDave Chinner 			goto error0;
87430f712c9SDave Chinner 		/* must be at least one entry */
875f9e03706SDarrick J. Wong 		if (XFS_IS_CORRUPT(mp, stat != 1)) {
876f9e03706SDarrick J. Wong 			error = -EFSCORRUPTED;
877f9e03706SDarrick J. Wong 			goto error0;
878f9e03706SDarrick J. Wong 		}
87930f712c9SDave Chinner 		if ((error = xfs_btree_new_iroot(cur, flags, &stat)))
88030f712c9SDave Chinner 			goto error0;
88130f712c9SDave Chinner 		if (stat == 0) {
88230f712c9SDave Chinner 			xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
8832451337dSDave Chinner 			return -ENOSPC;
88430f712c9SDave Chinner 		}
88592219c29SDave Chinner 		cur->bc_ino.allocated = 0;
88630f712c9SDave Chinner 		xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
88730f712c9SDave Chinner 	}
88830f712c9SDave Chinner 	return 0;
88930f712c9SDave Chinner error0:
89030f712c9SDave Chinner 	xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
89130f712c9SDave Chinner 	return error;
89230f712c9SDave Chinner }
89330f712c9SDave Chinner 
89430f712c9SDave Chinner /*
89530f712c9SDave Chinner  * Called from xfs_bmap_add_attrfork to handle extents format files.
89630f712c9SDave Chinner  */
89730f712c9SDave Chinner STATIC int					/* error */
89830f712c9SDave Chinner xfs_bmap_add_attrfork_extents(
89981ba8f3eSBrian Foster 	struct xfs_trans	*tp,		/* transaction pointer */
90081ba8f3eSBrian Foster 	struct xfs_inode	*ip,		/* incore inode pointer */
90130f712c9SDave Chinner 	int			*flags)		/* inode logging flags */
90230f712c9SDave Chinner {
903ae127f08SDarrick J. Wong 	struct xfs_btree_cur	*cur;		/* bmap btree cursor */
90430f712c9SDave Chinner 	int			error;		/* error return value */
90530f712c9SDave Chinner 
906daf83964SChristoph Hellwig 	if (ip->i_df.if_nextents * sizeof(struct xfs_bmbt_rec) <=
907c01147d9SDarrick J. Wong 	    xfs_inode_data_fork_size(ip))
90830f712c9SDave Chinner 		return 0;
90930f712c9SDave Chinner 	cur = NULL;
910280253d2SBrian Foster 	error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0, flags,
911280253d2SBrian Foster 					  XFS_DATA_FORK);
91230f712c9SDave Chinner 	if (cur) {
91392219c29SDave Chinner 		cur->bc_ino.allocated = 0;
9140b04b6b8SDarrick J. Wong 		xfs_btree_del_cursor(cur, error);
91530f712c9SDave Chinner 	}
91630f712c9SDave Chinner 	return error;
91730f712c9SDave Chinner }
91830f712c9SDave Chinner 
91930f712c9SDave Chinner /*
92030f712c9SDave Chinner  * Called from xfs_bmap_add_attrfork to handle local format files. Each
92130f712c9SDave Chinner  * different data fork content type needs a different callout to do the
92230f712c9SDave Chinner  * conversion. Some are basic and only require special block initialisation
92330f712c9SDave Chinner  * callouts for the data formating, others (directories) are so specialised they
92430f712c9SDave Chinner  * handle everything themselves.
92530f712c9SDave Chinner  *
92630f712c9SDave Chinner  * XXX (dgc): investigate whether directory conversion can use the generic
92730f712c9SDave Chinner  * formatting callout. It should be possible - it's just a very complex
92830f712c9SDave Chinner  * formatter.
92930f712c9SDave Chinner  */
93030f712c9SDave Chinner STATIC int					/* error */
93130f712c9SDave Chinner xfs_bmap_add_attrfork_local(
932825d75cdSBrian Foster 	struct xfs_trans	*tp,		/* transaction pointer */
933825d75cdSBrian Foster 	struct xfs_inode	*ip,		/* incore inode pointer */
93430f712c9SDave Chinner 	int			*flags)		/* inode logging flags */
93530f712c9SDave Chinner {
936825d75cdSBrian Foster 	struct xfs_da_args	dargs;		/* args for dir/attr code */
93730f712c9SDave Chinner 
938c01147d9SDarrick J. Wong 	if (ip->i_df.if_bytes <= xfs_inode_data_fork_size(ip))
93930f712c9SDave Chinner 		return 0;
94030f712c9SDave Chinner 
941c19b3b05SDave Chinner 	if (S_ISDIR(VFS_I(ip)->i_mode)) {
94230f712c9SDave Chinner 		memset(&dargs, 0, sizeof(dargs));
94330f712c9SDave Chinner 		dargs.geo = ip->i_mount->m_dir_geo;
94430f712c9SDave Chinner 		dargs.dp = ip;
94530f712c9SDave Chinner 		dargs.total = dargs.geo->fsbcount;
94630f712c9SDave Chinner 		dargs.whichfork = XFS_DATA_FORK;
94730f712c9SDave Chinner 		dargs.trans = tp;
94830f712c9SDave Chinner 		return xfs_dir2_sf_to_block(&dargs);
94930f712c9SDave Chinner 	}
95030f712c9SDave Chinner 
951c19b3b05SDave Chinner 	if (S_ISLNK(VFS_I(ip)->i_mode))
952280253d2SBrian Foster 		return xfs_bmap_local_to_extents(tp, ip, 1, flags,
953280253d2SBrian Foster 						 XFS_DATA_FORK,
95430f712c9SDave Chinner 						 xfs_symlink_local_to_remote);
95530f712c9SDave Chinner 
95630f712c9SDave Chinner 	/* should only be called for types that support local format data */
95730f712c9SDave Chinner 	ASSERT(0);
9582451337dSDave Chinner 	return -EFSCORRUPTED;
95930f712c9SDave Chinner }
96030f712c9SDave Chinner 
961e6a688c3SDave Chinner /*
962e6a688c3SDave Chinner  * Set an inode attr fork offset based on the format of the data fork.
963e6a688c3SDave Chinner  */
9645a981e4eSChristoph Hellwig static int
9652f3cd809SAllison Henderson xfs_bmap_set_attrforkoff(
9662f3cd809SAllison Henderson 	struct xfs_inode	*ip,
9672f3cd809SAllison Henderson 	int			size,
9682f3cd809SAllison Henderson 	int			*version)
9692f3cd809SAllison Henderson {
970683ec9baSDave Chinner 	int			default_size = xfs_default_attroffset(ip) >> 3;
971683ec9baSDave Chinner 
972f7e67b20SChristoph Hellwig 	switch (ip->i_df.if_format) {
9732f3cd809SAllison Henderson 	case XFS_DINODE_FMT_DEV:
974683ec9baSDave Chinner 		ip->i_forkoff = default_size;
9752f3cd809SAllison Henderson 		break;
9762f3cd809SAllison Henderson 	case XFS_DINODE_FMT_LOCAL:
9772f3cd809SAllison Henderson 	case XFS_DINODE_FMT_EXTENTS:
9782f3cd809SAllison Henderson 	case XFS_DINODE_FMT_BTREE:
9797821ea30SChristoph Hellwig 		ip->i_forkoff = xfs_attr_shortform_bytesfit(ip, size);
9807821ea30SChristoph Hellwig 		if (!ip->i_forkoff)
981683ec9baSDave Chinner 			ip->i_forkoff = default_size;
9820560f31aSDave Chinner 		else if (xfs_has_attr2(ip->i_mount) && version)
9832f3cd809SAllison Henderson 			*version = 2;
9842f3cd809SAllison Henderson 		break;
9852f3cd809SAllison Henderson 	default:
9862f3cd809SAllison Henderson 		ASSERT(0);
9872f3cd809SAllison Henderson 		return -EINVAL;
9882f3cd809SAllison Henderson 	}
9892f3cd809SAllison Henderson 
9902f3cd809SAllison Henderson 	return 0;
9912f3cd809SAllison Henderson }
9922f3cd809SAllison Henderson 
99330f712c9SDave Chinner /*
99430f712c9SDave Chinner  * Convert inode from non-attributed to attributed.
99530f712c9SDave Chinner  * Must not be in a transaction, ip must not be locked.
99630f712c9SDave Chinner  */
99730f712c9SDave Chinner int						/* error code */
99830f712c9SDave Chinner xfs_bmap_add_attrfork(
99930f712c9SDave Chinner 	xfs_inode_t		*ip,		/* incore inode pointer */
100030f712c9SDave Chinner 	int			size,		/* space new attribute needs */
100130f712c9SDave Chinner 	int			rsvd)		/* xact may use reserved blks */
100230f712c9SDave Chinner {
100330f712c9SDave Chinner 	xfs_mount_t		*mp;		/* mount structure */
100430f712c9SDave Chinner 	xfs_trans_t		*tp;		/* transaction pointer */
100530f712c9SDave Chinner 	int			blks;		/* space reservation */
100630f712c9SDave Chinner 	int			version = 1;	/* superblock attr version */
100730f712c9SDave Chinner 	int			logflags;	/* logging flags */
100830f712c9SDave Chinner 	int			error;		/* error return value */
100930f712c9SDave Chinner 
1010932b42c6SDarrick J. Wong 	ASSERT(xfs_inode_has_attr_fork(ip) == 0);
101130f712c9SDave Chinner 
101230f712c9SDave Chinner 	mp = ip->i_mount;
101330f712c9SDave Chinner 	ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
1014253f4911SChristoph Hellwig 
101530f712c9SDave Chinner 	blks = XFS_ADDAFORK_SPACE_RES(mp);
1016253f4911SChristoph Hellwig 
10173de4eb10SDarrick J. Wong 	error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_addafork, blks, 0,
10183a1af6c3SDarrick J. Wong 			rsvd, &tp);
1019253f4911SChristoph Hellwig 	if (error)
102030f712c9SDave Chinner 		return error;
1021932b42c6SDarrick J. Wong 	if (xfs_inode_has_attr_fork(ip))
102230f712c9SDave Chinner 		goto trans_cancel;
102330f712c9SDave Chinner 
102430f712c9SDave Chinner 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
10252f3cd809SAllison Henderson 	error = xfs_bmap_set_attrforkoff(ip, size, &version);
10262f3cd809SAllison Henderson 	if (error)
102730f712c9SDave Chinner 		goto trans_cancel;
102832a2b11fSCarlos Maiolino 
10292ed5b09bSDarrick J. Wong 	xfs_ifork_init_attr(ip, XFS_DINODE_FMT_EXTENTS, 0);
103030f712c9SDave Chinner 	logflags = 0;
1031f7e67b20SChristoph Hellwig 	switch (ip->i_df.if_format) {
103230f712c9SDave Chinner 	case XFS_DINODE_FMT_LOCAL:
1033825d75cdSBrian Foster 		error = xfs_bmap_add_attrfork_local(tp, ip, &logflags);
103430f712c9SDave Chinner 		break;
103530f712c9SDave Chinner 	case XFS_DINODE_FMT_EXTENTS:
1036825d75cdSBrian Foster 		error = xfs_bmap_add_attrfork_extents(tp, ip, &logflags);
103730f712c9SDave Chinner 		break;
103830f712c9SDave Chinner 	case XFS_DINODE_FMT_BTREE:
1039825d75cdSBrian Foster 		error = xfs_bmap_add_attrfork_btree(tp, ip, &logflags);
104030f712c9SDave Chinner 		break;
104130f712c9SDave Chinner 	default:
104230f712c9SDave Chinner 		error = 0;
104330f712c9SDave Chinner 		break;
104430f712c9SDave Chinner 	}
104530f712c9SDave Chinner 	if (logflags)
104630f712c9SDave Chinner 		xfs_trans_log_inode(tp, ip, logflags);
104730f712c9SDave Chinner 	if (error)
1048c8eac49eSBrian Foster 		goto trans_cancel;
104938c26bfdSDave Chinner 	if (!xfs_has_attr(mp) ||
105038c26bfdSDave Chinner 	   (!xfs_has_attr2(mp) && version == 2)) {
105161e63ecbSDave Chinner 		bool log_sb = false;
105230f712c9SDave Chinner 
105330f712c9SDave Chinner 		spin_lock(&mp->m_sb_lock);
105438c26bfdSDave Chinner 		if (!xfs_has_attr(mp)) {
105538c26bfdSDave Chinner 			xfs_add_attr(mp);
105661e63ecbSDave Chinner 			log_sb = true;
105730f712c9SDave Chinner 		}
105838c26bfdSDave Chinner 		if (!xfs_has_attr2(mp) && version == 2) {
105938c26bfdSDave Chinner 			xfs_add_attr2(mp);
106061e63ecbSDave Chinner 			log_sb = true;
106130f712c9SDave Chinner 		}
106230f712c9SDave Chinner 		spin_unlock(&mp->m_sb_lock);
106361e63ecbSDave Chinner 		if (log_sb)
106461e63ecbSDave Chinner 			xfs_log_sb(tp);
106530f712c9SDave Chinner 	}
106630f712c9SDave Chinner 
106770393313SChristoph Hellwig 	error = xfs_trans_commit(tp);
106830f712c9SDave Chinner 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
106930f712c9SDave Chinner 	return error;
107030f712c9SDave Chinner 
107130f712c9SDave Chinner trans_cancel:
10724906e215SChristoph Hellwig 	xfs_trans_cancel(tp);
107330f712c9SDave Chinner 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
107430f712c9SDave Chinner 	return error;
107530f712c9SDave Chinner }
107630f712c9SDave Chinner 
107730f712c9SDave Chinner /*
107830f712c9SDave Chinner  * Internal and external extent tree search functions.
107930f712c9SDave Chinner  */
108030f712c9SDave Chinner 
1081e992ae8aSDarrick J. Wong struct xfs_iread_state {
1082e992ae8aSDarrick J. Wong 	struct xfs_iext_cursor	icur;
1083e992ae8aSDarrick J. Wong 	xfs_extnum_t		loaded;
1084e992ae8aSDarrick J. Wong };
1085e992ae8aSDarrick J. Wong 
1086e992ae8aSDarrick J. Wong /* Stuff every bmbt record from this block into the incore extent map. */
1087e992ae8aSDarrick J. Wong static int
1088e992ae8aSDarrick J. Wong xfs_iread_bmbt_block(
1089e992ae8aSDarrick J. Wong 	struct xfs_btree_cur	*cur,
1090e992ae8aSDarrick J. Wong 	int			level,
1091e992ae8aSDarrick J. Wong 	void			*priv)
1092e992ae8aSDarrick J. Wong {
1093e992ae8aSDarrick J. Wong 	struct xfs_iread_state	*ir = priv;
1094e992ae8aSDarrick J. Wong 	struct xfs_mount	*mp = cur->bc_mp;
109592219c29SDave Chinner 	struct xfs_inode	*ip = cur->bc_ino.ip;
1096e992ae8aSDarrick J. Wong 	struct xfs_btree_block	*block;
1097e992ae8aSDarrick J. Wong 	struct xfs_buf		*bp;
1098e992ae8aSDarrick J. Wong 	struct xfs_bmbt_rec	*frp;
1099e992ae8aSDarrick J. Wong 	xfs_extnum_t		num_recs;
1100e992ae8aSDarrick J. Wong 	xfs_extnum_t		j;
110192219c29SDave Chinner 	int			whichfork = cur->bc_ino.whichfork;
1102732436efSDarrick J. Wong 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
1103e992ae8aSDarrick J. Wong 
1104e992ae8aSDarrick J. Wong 	block = xfs_btree_get_block(cur, level, &bp);
1105e992ae8aSDarrick J. Wong 
1106e992ae8aSDarrick J. Wong 	/* Abort if we find more records than nextents. */
1107e992ae8aSDarrick J. Wong 	num_recs = xfs_btree_get_numrecs(block);
1108daf83964SChristoph Hellwig 	if (unlikely(ir->loaded + num_recs > ifp->if_nextents)) {
1109e992ae8aSDarrick J. Wong 		xfs_warn(ip->i_mount, "corrupt dinode %llu, (btree extents).",
1110e992ae8aSDarrick J. Wong 				(unsigned long long)ip->i_ino);
1111e992ae8aSDarrick J. Wong 		xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__, block,
1112e992ae8aSDarrick J. Wong 				sizeof(*block), __this_address);
1113e992ae8aSDarrick J. Wong 		return -EFSCORRUPTED;
1114e992ae8aSDarrick J. Wong 	}
1115e992ae8aSDarrick J. Wong 
1116e992ae8aSDarrick J. Wong 	/* Copy records into the incore cache. */
1117e992ae8aSDarrick J. Wong 	frp = XFS_BMBT_REC_ADDR(mp, block, 1);
1118e992ae8aSDarrick J. Wong 	for (j = 0; j < num_recs; j++, frp++, ir->loaded++) {
1119e992ae8aSDarrick J. Wong 		struct xfs_bmbt_irec	new;
1120e992ae8aSDarrick J. Wong 		xfs_failaddr_t		fa;
1121e992ae8aSDarrick J. Wong 
1122e992ae8aSDarrick J. Wong 		xfs_bmbt_disk_get_all(frp, &new);
1123e992ae8aSDarrick J. Wong 		fa = xfs_bmap_validate_extent(ip, whichfork, &new);
1124e992ae8aSDarrick J. Wong 		if (fa) {
1125e992ae8aSDarrick J. Wong 			xfs_inode_verifier_error(ip, -EFSCORRUPTED,
1126e992ae8aSDarrick J. Wong 					"xfs_iread_extents(2)", frp,
1127e992ae8aSDarrick J. Wong 					sizeof(*frp), fa);
1128e992ae8aSDarrick J. Wong 			return -EFSCORRUPTED;
1129e992ae8aSDarrick J. Wong 		}
1130e992ae8aSDarrick J. Wong 		xfs_iext_insert(ip, &ir->icur, &new,
1131e992ae8aSDarrick J. Wong 				xfs_bmap_fork_to_state(whichfork));
1132e992ae8aSDarrick J. Wong 		trace_xfs_read_extent(ip, &ir->icur,
1133e992ae8aSDarrick J. Wong 				xfs_bmap_fork_to_state(whichfork), _THIS_IP_);
1134daf83964SChristoph Hellwig 		xfs_iext_next(ifp, &ir->icur);
1135e992ae8aSDarrick J. Wong 	}
1136e992ae8aSDarrick J. Wong 
1137e992ae8aSDarrick J. Wong 	return 0;
1138e992ae8aSDarrick J. Wong }
1139e992ae8aSDarrick J. Wong 
114030f712c9SDave Chinner /*
1141211e95bbSChristoph Hellwig  * Read in extents from a btree-format inode.
114230f712c9SDave Chinner  */
1143211e95bbSChristoph Hellwig int
1144211e95bbSChristoph Hellwig xfs_iread_extents(
1145211e95bbSChristoph Hellwig 	struct xfs_trans	*tp,
1146211e95bbSChristoph Hellwig 	struct xfs_inode	*ip,
1147211e95bbSChristoph Hellwig 	int			whichfork)
114830f712c9SDave Chinner {
1149e992ae8aSDarrick J. Wong 	struct xfs_iread_state	ir;
1150732436efSDarrick J. Wong 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
1151e992ae8aSDarrick J. Wong 	struct xfs_mount	*mp = ip->i_mount;
1152e992ae8aSDarrick J. Wong 	struct xfs_btree_cur	*cur;
1153211e95bbSChristoph Hellwig 	int			error;
115430f712c9SDave Chinner 
1155b2197a36SChristoph Hellwig 	if (!xfs_need_iread_extents(ifp))
1156862a804aSChristoph Hellwig 		return 0;
1157862a804aSChristoph Hellwig 
1158211e95bbSChristoph Hellwig 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1159211e95bbSChristoph Hellwig 
1160e992ae8aSDarrick J. Wong 	ir.loaded = 0;
1161e992ae8aSDarrick J. Wong 	xfs_iext_first(ifp, &ir.icur);
1162e992ae8aSDarrick J. Wong 	cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
1163e992ae8aSDarrick J. Wong 	error = xfs_btree_visit_blocks(cur, xfs_iread_bmbt_block,
1164e992ae8aSDarrick J. Wong 			XFS_BTREE_VISIT_RECORDS, &ir);
1165e992ae8aSDarrick J. Wong 	xfs_btree_del_cursor(cur, error);
1166e992ae8aSDarrick J. Wong 	if (error)
1167e992ae8aSDarrick J. Wong 		goto out;
1168e992ae8aSDarrick J. Wong 
1169daf83964SChristoph Hellwig 	if (XFS_IS_CORRUPT(mp, ir.loaded != ifp->if_nextents)) {
1170211e95bbSChristoph Hellwig 		error = -EFSCORRUPTED;
1171211e95bbSChristoph Hellwig 		goto out;
1172211e95bbSChristoph Hellwig 	}
1173e992ae8aSDarrick J. Wong 	ASSERT(ir.loaded == xfs_iext_count(ifp));
117430f712c9SDave Chinner 	return 0;
1175211e95bbSChristoph Hellwig out:
1176211e95bbSChristoph Hellwig 	xfs_iext_destroy(ifp);
1177211e95bbSChristoph Hellwig 	return error;
117830f712c9SDave Chinner }
117930f712c9SDave Chinner 
118030f712c9SDave Chinner /*
118129b3e94aSChristoph Hellwig  * Returns the relative block number of the first unused block(s) in the given
118229b3e94aSChristoph Hellwig  * fork with at least "len" logically contiguous blocks free.  This is the
118329b3e94aSChristoph Hellwig  * lowest-address hole if the fork has holes, else the first block past the end
118429b3e94aSChristoph Hellwig  * of fork.  Return 0 if the fork is currently local (in-inode).
118530f712c9SDave Chinner  */
118630f712c9SDave Chinner int						/* error */
118730f712c9SDave Chinner xfs_bmap_first_unused(
118829b3e94aSChristoph Hellwig 	struct xfs_trans	*tp,		/* transaction pointer */
118929b3e94aSChristoph Hellwig 	struct xfs_inode	*ip,		/* incore inode */
119030f712c9SDave Chinner 	xfs_extlen_t		len,		/* size of hole to find */
119130f712c9SDave Chinner 	xfs_fileoff_t		*first_unused,	/* unused block */
119230f712c9SDave Chinner 	int			whichfork)	/* data or attr fork */
119330f712c9SDave Chinner {
1194732436efSDarrick J. Wong 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
119529b3e94aSChristoph Hellwig 	struct xfs_bmbt_irec	got;
1196b2b1712aSChristoph Hellwig 	struct xfs_iext_cursor	icur;
119729b3e94aSChristoph Hellwig 	xfs_fileoff_t		lastaddr = 0;
119829b3e94aSChristoph Hellwig 	xfs_fileoff_t		lowest, max;
119929b3e94aSChristoph Hellwig 	int			error;
120030f712c9SDave Chinner 
1201f7e67b20SChristoph Hellwig 	if (ifp->if_format == XFS_DINODE_FMT_LOCAL) {
120230f712c9SDave Chinner 		*first_unused = 0;
120330f712c9SDave Chinner 		return 0;
120430f712c9SDave Chinner 	}
120529b3e94aSChristoph Hellwig 
1206f7e67b20SChristoph Hellwig 	ASSERT(xfs_ifork_has_extents(ifp));
1207f7e67b20SChristoph Hellwig 
120829b3e94aSChristoph Hellwig 	error = xfs_iread_extents(tp, ip, whichfork);
120929b3e94aSChristoph Hellwig 	if (error)
121030f712c9SDave Chinner 		return error;
1211f2285c14SChristoph Hellwig 
121229b3e94aSChristoph Hellwig 	lowest = max = *first_unused;
1213b2b1712aSChristoph Hellwig 	for_each_xfs_iext(ifp, &icur, &got) {
121430f712c9SDave Chinner 		/*
121530f712c9SDave Chinner 		 * See if the hole before this extent will work.
121630f712c9SDave Chinner 		 */
1217f2285c14SChristoph Hellwig 		if (got.br_startoff >= lowest + len &&
121829b3e94aSChristoph Hellwig 		    got.br_startoff - max >= len)
121929b3e94aSChristoph Hellwig 			break;
1220f2285c14SChristoph Hellwig 		lastaddr = got.br_startoff + got.br_blockcount;
122130f712c9SDave Chinner 		max = XFS_FILEOFF_MAX(lastaddr, lowest);
122230f712c9SDave Chinner 	}
122329b3e94aSChristoph Hellwig 
122430f712c9SDave Chinner 	*first_unused = max;
122530f712c9SDave Chinner 	return 0;
122630f712c9SDave Chinner }
122730f712c9SDave Chinner 
122830f712c9SDave Chinner /*
122930f712c9SDave Chinner  * Returns the file-relative block number of the last block - 1 before
123030f712c9SDave Chinner  * last_block (input value) in the file.
123130f712c9SDave Chinner  * This is not based on i_size, it is based on the extent records.
123230f712c9SDave Chinner  * Returns 0 for local files, as they do not have extent records.
123330f712c9SDave Chinner  */
123430f712c9SDave Chinner int						/* error */
123530f712c9SDave Chinner xfs_bmap_last_before(
123686685f7bSChristoph Hellwig 	struct xfs_trans	*tp,		/* transaction pointer */
123786685f7bSChristoph Hellwig 	struct xfs_inode	*ip,		/* incore inode */
123830f712c9SDave Chinner 	xfs_fileoff_t		*last_block,	/* last block */
123930f712c9SDave Chinner 	int			whichfork)	/* data or attr fork */
124030f712c9SDave Chinner {
1241732436efSDarrick J. Wong 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
124286685f7bSChristoph Hellwig 	struct xfs_bmbt_irec	got;
1243b2b1712aSChristoph Hellwig 	struct xfs_iext_cursor	icur;
124486685f7bSChristoph Hellwig 	int			error;
124530f712c9SDave Chinner 
1246f7e67b20SChristoph Hellwig 	switch (ifp->if_format) {
124786685f7bSChristoph Hellwig 	case XFS_DINODE_FMT_LOCAL:
124830f712c9SDave Chinner 		*last_block = 0;
124930f712c9SDave Chinner 		return 0;
125086685f7bSChristoph Hellwig 	case XFS_DINODE_FMT_BTREE:
125186685f7bSChristoph Hellwig 	case XFS_DINODE_FMT_EXTENTS:
125286685f7bSChristoph Hellwig 		break;
125386685f7bSChristoph Hellwig 	default:
1254a5155b87SDarrick J. Wong 		ASSERT(0);
1255c2414ad6SDarrick J. Wong 		return -EFSCORRUPTED;
125630f712c9SDave Chinner 	}
125786685f7bSChristoph Hellwig 
125886685f7bSChristoph Hellwig 	error = xfs_iread_extents(tp, ip, whichfork);
125986685f7bSChristoph Hellwig 	if (error)
126030f712c9SDave Chinner 		return error;
126186685f7bSChristoph Hellwig 
1262b2b1712aSChristoph Hellwig 	if (!xfs_iext_lookup_extent_before(ip, ifp, last_block, &icur, &got))
126386685f7bSChristoph Hellwig 		*last_block = 0;
126430f712c9SDave Chinner 	return 0;
126530f712c9SDave Chinner }
126630f712c9SDave Chinner 
126730f712c9SDave Chinner int
126830f712c9SDave Chinner xfs_bmap_last_extent(
126930f712c9SDave Chinner 	struct xfs_trans	*tp,
127030f712c9SDave Chinner 	struct xfs_inode	*ip,
127130f712c9SDave Chinner 	int			whichfork,
127230f712c9SDave Chinner 	struct xfs_bmbt_irec	*rec,
127330f712c9SDave Chinner 	int			*is_empty)
127430f712c9SDave Chinner {
1275732436efSDarrick J. Wong 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
1276b2b1712aSChristoph Hellwig 	struct xfs_iext_cursor	icur;
127730f712c9SDave Chinner 	int			error;
127830f712c9SDave Chinner 
127930f712c9SDave Chinner 	error = xfs_iread_extents(tp, ip, whichfork);
128030f712c9SDave Chinner 	if (error)
128130f712c9SDave Chinner 		return error;
128230f712c9SDave Chinner 
1283b2b1712aSChristoph Hellwig 	xfs_iext_last(ifp, &icur);
1284b2b1712aSChristoph Hellwig 	if (!xfs_iext_get_extent(ifp, &icur, rec))
128530f712c9SDave Chinner 		*is_empty = 1;
1286b2b1712aSChristoph Hellwig 	else
128730f712c9SDave Chinner 		*is_empty = 0;
128830f712c9SDave Chinner 	return 0;
128930f712c9SDave Chinner }
129030f712c9SDave Chinner 
129130f712c9SDave Chinner /*
129230f712c9SDave Chinner  * Check the last inode extent to determine whether this allocation will result
129330f712c9SDave Chinner  * in blocks being allocated at the end of the file. When we allocate new data
129430f712c9SDave Chinner  * blocks at the end of the file which do not start at the previous data block,
129530f712c9SDave Chinner  * we will try to align the new blocks at stripe unit boundaries.
129630f712c9SDave Chinner  *
129730f712c9SDave Chinner  * Returns 1 in bma->aeof if the file (fork) is empty as any new write will be
129830f712c9SDave Chinner  * at, or past the EOF.
129930f712c9SDave Chinner  */
130030f712c9SDave Chinner STATIC int
130130f712c9SDave Chinner xfs_bmap_isaeof(
130230f712c9SDave Chinner 	struct xfs_bmalloca	*bma,
130330f712c9SDave Chinner 	int			whichfork)
130430f712c9SDave Chinner {
130530f712c9SDave Chinner 	struct xfs_bmbt_irec	rec;
130630f712c9SDave Chinner 	int			is_empty;
130730f712c9SDave Chinner 	int			error;
130830f712c9SDave Chinner 
1309749f24f3SThomas Meyer 	bma->aeof = false;
131030f712c9SDave Chinner 	error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec,
131130f712c9SDave Chinner 				     &is_empty);
131230f712c9SDave Chinner 	if (error)
131330f712c9SDave Chinner 		return error;
131430f712c9SDave Chinner 
131530f712c9SDave Chinner 	if (is_empty) {
1316749f24f3SThomas Meyer 		bma->aeof = true;
131730f712c9SDave Chinner 		return 0;
131830f712c9SDave Chinner 	}
131930f712c9SDave Chinner 
132030f712c9SDave Chinner 	/*
132130f712c9SDave Chinner 	 * Check if we are allocation or past the last extent, or at least into
132230f712c9SDave Chinner 	 * the last delayed allocated extent.
132330f712c9SDave Chinner 	 */
132430f712c9SDave Chinner 	bma->aeof = bma->offset >= rec.br_startoff + rec.br_blockcount ||
132530f712c9SDave Chinner 		(bma->offset >= rec.br_startoff &&
132630f712c9SDave Chinner 		 isnullstartblock(rec.br_startblock));
132730f712c9SDave Chinner 	return 0;
132830f712c9SDave Chinner }
132930f712c9SDave Chinner 
133030f712c9SDave Chinner /*
133130f712c9SDave Chinner  * Returns the file-relative block number of the first block past eof in
133230f712c9SDave Chinner  * the file.  This is not based on i_size, it is based on the extent records.
133330f712c9SDave Chinner  * Returns 0 for local files, as they do not have extent records.
133430f712c9SDave Chinner  */
133530f712c9SDave Chinner int
133630f712c9SDave Chinner xfs_bmap_last_offset(
133730f712c9SDave Chinner 	struct xfs_inode	*ip,
133830f712c9SDave Chinner 	xfs_fileoff_t		*last_block,
133930f712c9SDave Chinner 	int			whichfork)
134030f712c9SDave Chinner {
1341732436efSDarrick J. Wong 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
134230f712c9SDave Chinner 	struct xfs_bmbt_irec	rec;
134330f712c9SDave Chinner 	int			is_empty;
134430f712c9SDave Chinner 	int			error;
134530f712c9SDave Chinner 
134630f712c9SDave Chinner 	*last_block = 0;
134730f712c9SDave Chinner 
1348f7e67b20SChristoph Hellwig 	if (ifp->if_format == XFS_DINODE_FMT_LOCAL)
134930f712c9SDave Chinner 		return 0;
135030f712c9SDave Chinner 
1351f7e67b20SChristoph Hellwig 	if (XFS_IS_CORRUPT(ip->i_mount, !xfs_ifork_has_extents(ifp)))
1352c2414ad6SDarrick J. Wong 		return -EFSCORRUPTED;
135330f712c9SDave Chinner 
135430f712c9SDave Chinner 	error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, &is_empty);
135530f712c9SDave Chinner 	if (error || is_empty)
135630f712c9SDave Chinner 		return error;
135730f712c9SDave Chinner 
135830f712c9SDave Chinner 	*last_block = rec.br_startoff + rec.br_blockcount;
135930f712c9SDave Chinner 	return 0;
136030f712c9SDave Chinner }
136130f712c9SDave Chinner 
136230f712c9SDave Chinner /*
136330f712c9SDave Chinner  * Extent tree manipulation functions used during allocation.
136430f712c9SDave Chinner  */
136530f712c9SDave Chinner 
136630f712c9SDave Chinner /*
136730f712c9SDave Chinner  * Convert a delayed allocation to a real allocation.
136830f712c9SDave Chinner  */
136930f712c9SDave Chinner STATIC int				/* error */
137030f712c9SDave Chinner xfs_bmap_add_extent_delay_real(
137160b4984fSDarrick J. Wong 	struct xfs_bmalloca	*bma,
137260b4984fSDarrick J. Wong 	int			whichfork)
137330f712c9SDave Chinner {
1374daf83964SChristoph Hellwig 	struct xfs_mount	*mp = bma->ip->i_mount;
1375732436efSDarrick J. Wong 	struct xfs_ifork	*ifp = xfs_ifork_ptr(bma->ip, whichfork);
137630f712c9SDave Chinner 	struct xfs_bmbt_irec	*new = &bma->got;
137730f712c9SDave Chinner 	int			error;	/* error return value */
137830f712c9SDave Chinner 	int			i;	/* temp state */
137930f712c9SDave Chinner 	xfs_fileoff_t		new_endoff;	/* end offset of new entry */
138030f712c9SDave Chinner 	xfs_bmbt_irec_t		r[3];	/* neighbor extent entries */
138130f712c9SDave Chinner 					/* left is 0, right is 1, prev is 2 */
138230f712c9SDave Chinner 	int			rval=0;	/* return value (logging flags) */
13830e5b8e45SDave Chinner 	uint32_t		state = xfs_bmap_fork_to_state(whichfork);
138430f712c9SDave Chinner 	xfs_filblks_t		da_new; /* new count del alloc blocks used */
138530f712c9SDave Chinner 	xfs_filblks_t		da_old; /* old count del alloc blocks used */
138630f712c9SDave Chinner 	xfs_filblks_t		temp=0;	/* value for da_new calculations */
138730f712c9SDave Chinner 	int			tmp_rval;	/* partial logging flags */
13884dcb8869SChristoph Hellwig 	struct xfs_bmbt_irec	old;
138930f712c9SDave Chinner 
139060b4984fSDarrick J. Wong 	ASSERT(whichfork != XFS_ATTR_FORK);
139130f712c9SDave Chinner 	ASSERT(!isnullstartblock(new->br_startblock));
139230f712c9SDave Chinner 	ASSERT(!bma->cur ||
13938ef54797SDave Chinner 	       (bma->cur->bc_ino.flags & XFS_BTCUR_BMBT_WASDEL));
139430f712c9SDave Chinner 
1395ff6d6af2SBill O'Donnell 	XFS_STATS_INC(mp, xs_add_exlist);
139630f712c9SDave Chinner 
139730f712c9SDave Chinner #define	LEFT		r[0]
139830f712c9SDave Chinner #define	RIGHT		r[1]
139930f712c9SDave Chinner #define	PREV		r[2]
140030f712c9SDave Chinner 
140130f712c9SDave Chinner 	/*
140230f712c9SDave Chinner 	 * Set up a bunch of variables to make the tests simpler.
140330f712c9SDave Chinner 	 */
1404b2b1712aSChristoph Hellwig 	xfs_iext_get_extent(ifp, &bma->icur, &PREV);
140530f712c9SDave Chinner 	new_endoff = new->br_startoff + new->br_blockcount;
14064dcb8869SChristoph Hellwig 	ASSERT(isnullstartblock(PREV.br_startblock));
140730f712c9SDave Chinner 	ASSERT(PREV.br_startoff <= new->br_startoff);
140830f712c9SDave Chinner 	ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
140930f712c9SDave Chinner 
141030f712c9SDave Chinner 	da_old = startblockval(PREV.br_startblock);
141130f712c9SDave Chinner 	da_new = 0;
141230f712c9SDave Chinner 
141330f712c9SDave Chinner 	/*
141430f712c9SDave Chinner 	 * Set flags determining what part of the previous delayed allocation
141530f712c9SDave Chinner 	 * extent is being replaced by a real allocation.
141630f712c9SDave Chinner 	 */
141730f712c9SDave Chinner 	if (PREV.br_startoff == new->br_startoff)
141830f712c9SDave Chinner 		state |= BMAP_LEFT_FILLING;
141930f712c9SDave Chinner 	if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
142030f712c9SDave Chinner 		state |= BMAP_RIGHT_FILLING;
142130f712c9SDave Chinner 
142230f712c9SDave Chinner 	/*
142330f712c9SDave Chinner 	 * Check and set flags if this segment has a left neighbor.
142430f712c9SDave Chinner 	 * Don't set contiguous if the combined extent would be too large.
142530f712c9SDave Chinner 	 */
1426b2b1712aSChristoph Hellwig 	if (xfs_iext_peek_prev_extent(ifp, &bma->icur, &LEFT)) {
142730f712c9SDave Chinner 		state |= BMAP_LEFT_VALID;
142830f712c9SDave Chinner 		if (isnullstartblock(LEFT.br_startblock))
142930f712c9SDave Chinner 			state |= BMAP_LEFT_DELAY;
143030f712c9SDave Chinner 	}
143130f712c9SDave Chinner 
143230f712c9SDave Chinner 	if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
143330f712c9SDave Chinner 	    LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
143430f712c9SDave Chinner 	    LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
143530f712c9SDave Chinner 	    LEFT.br_state == new->br_state &&
143695f0b95eSChandan Babu R 	    LEFT.br_blockcount + new->br_blockcount <= XFS_MAX_BMBT_EXTLEN)
143730f712c9SDave Chinner 		state |= BMAP_LEFT_CONTIG;
143830f712c9SDave Chinner 
143930f712c9SDave Chinner 	/*
144030f712c9SDave Chinner 	 * Check and set flags if this segment has a right neighbor.
144130f712c9SDave Chinner 	 * Don't set contiguous if the combined extent would be too large.
144230f712c9SDave Chinner 	 * Also check for all-three-contiguous being too large.
144330f712c9SDave Chinner 	 */
1444b2b1712aSChristoph Hellwig 	if (xfs_iext_peek_next_extent(ifp, &bma->icur, &RIGHT)) {
144530f712c9SDave Chinner 		state |= BMAP_RIGHT_VALID;
144630f712c9SDave Chinner 		if (isnullstartblock(RIGHT.br_startblock))
144730f712c9SDave Chinner 			state |= BMAP_RIGHT_DELAY;
144830f712c9SDave Chinner 	}
144930f712c9SDave Chinner 
145030f712c9SDave Chinner 	if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
145130f712c9SDave Chinner 	    new_endoff == RIGHT.br_startoff &&
145230f712c9SDave Chinner 	    new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
145330f712c9SDave Chinner 	    new->br_state == RIGHT.br_state &&
145495f0b95eSChandan Babu R 	    new->br_blockcount + RIGHT.br_blockcount <= XFS_MAX_BMBT_EXTLEN &&
145530f712c9SDave Chinner 	    ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
145630f712c9SDave Chinner 		       BMAP_RIGHT_FILLING)) !=
145730f712c9SDave Chinner 		      (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
145830f712c9SDave Chinner 		       BMAP_RIGHT_FILLING) ||
145930f712c9SDave Chinner 	     LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
146095f0b95eSChandan Babu R 			<= XFS_MAX_BMBT_EXTLEN))
146130f712c9SDave Chinner 		state |= BMAP_RIGHT_CONTIG;
146230f712c9SDave Chinner 
146330f712c9SDave Chinner 	error = 0;
146430f712c9SDave Chinner 	/*
146530f712c9SDave Chinner 	 * Switch out based on the FILLING and CONTIG state bits.
146630f712c9SDave Chinner 	 */
146730f712c9SDave Chinner 	switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
146830f712c9SDave Chinner 			 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
146930f712c9SDave Chinner 	case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
147030f712c9SDave Chinner 	     BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
147130f712c9SDave Chinner 		/*
147230f712c9SDave Chinner 		 * Filling in all of a previously delayed allocation extent.
147330f712c9SDave Chinner 		 * The left and right neighbors are both contiguous with new.
147430f712c9SDave Chinner 		 */
14754dcb8869SChristoph Hellwig 		LEFT.br_blockcount += PREV.br_blockcount + RIGHT.br_blockcount;
147630f712c9SDave Chinner 
1477c38ccf59SChristoph Hellwig 		xfs_iext_remove(bma->ip, &bma->icur, state);
1478c38ccf59SChristoph Hellwig 		xfs_iext_remove(bma->ip, &bma->icur, state);
1479b2b1712aSChristoph Hellwig 		xfs_iext_prev(ifp, &bma->icur);
1480b2b1712aSChristoph Hellwig 		xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT);
1481daf83964SChristoph Hellwig 		ifp->if_nextents--;
14820d045540SChristoph Hellwig 
148330f712c9SDave Chinner 		if (bma->cur == NULL)
148430f712c9SDave Chinner 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
148530f712c9SDave Chinner 		else {
148630f712c9SDave Chinner 			rval = XFS_ILOG_CORE;
1487e16cf9b0SChristoph Hellwig 			error = xfs_bmbt_lookup_eq(bma->cur, &RIGHT, &i);
148830f712c9SDave Chinner 			if (error)
148930f712c9SDave Chinner 				goto done;
1490f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1491f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
1492f9e03706SDarrick J. Wong 				goto done;
1493f9e03706SDarrick J. Wong 			}
149430f712c9SDave Chinner 			error = xfs_btree_delete(bma->cur, &i);
149530f712c9SDave Chinner 			if (error)
149630f712c9SDave Chinner 				goto done;
1497f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1498f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
1499f9e03706SDarrick J. Wong 				goto done;
1500f9e03706SDarrick J. Wong 			}
150130f712c9SDave Chinner 			error = xfs_btree_decrement(bma->cur, 0, &i);
150230f712c9SDave Chinner 			if (error)
150330f712c9SDave Chinner 				goto done;
1504f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1505f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
1506f9e03706SDarrick J. Wong 				goto done;
1507f9e03706SDarrick J. Wong 			}
1508a67d00a5SChristoph Hellwig 			error = xfs_bmbt_update(bma->cur, &LEFT);
150930f712c9SDave Chinner 			if (error)
151030f712c9SDave Chinner 				goto done;
151130f712c9SDave Chinner 		}
151230f712c9SDave Chinner 		break;
151330f712c9SDave Chinner 
151430f712c9SDave Chinner 	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
151530f712c9SDave Chinner 		/*
151630f712c9SDave Chinner 		 * Filling in all of a previously delayed allocation extent.
151730f712c9SDave Chinner 		 * The left neighbor is contiguous, the right is not.
151830f712c9SDave Chinner 		 */
15194dcb8869SChristoph Hellwig 		old = LEFT;
15204dcb8869SChristoph Hellwig 		LEFT.br_blockcount += PREV.br_blockcount;
15210d045540SChristoph Hellwig 
1522c38ccf59SChristoph Hellwig 		xfs_iext_remove(bma->ip, &bma->icur, state);
1523b2b1712aSChristoph Hellwig 		xfs_iext_prev(ifp, &bma->icur);
1524b2b1712aSChristoph Hellwig 		xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT);
152530f712c9SDave Chinner 
152630f712c9SDave Chinner 		if (bma->cur == NULL)
152730f712c9SDave Chinner 			rval = XFS_ILOG_DEXT;
152830f712c9SDave Chinner 		else {
152930f712c9SDave Chinner 			rval = 0;
1530e16cf9b0SChristoph Hellwig 			error = xfs_bmbt_lookup_eq(bma->cur, &old, &i);
153130f712c9SDave Chinner 			if (error)
153230f712c9SDave Chinner 				goto done;
1533f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1534f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
1535f9e03706SDarrick J. Wong 				goto done;
1536f9e03706SDarrick J. Wong 			}
1537a67d00a5SChristoph Hellwig 			error = xfs_bmbt_update(bma->cur, &LEFT);
153830f712c9SDave Chinner 			if (error)
153930f712c9SDave Chinner 				goto done;
154030f712c9SDave Chinner 		}
154130f712c9SDave Chinner 		break;
154230f712c9SDave Chinner 
154330f712c9SDave Chinner 	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
154430f712c9SDave Chinner 		/*
154530f712c9SDave Chinner 		 * Filling in all of a previously delayed allocation extent.
15469230a0b6SDave Chinner 		 * The right neighbor is contiguous, the left is not. Take care
15479230a0b6SDave Chinner 		 * with delay -> unwritten extent allocation here because the
15489230a0b6SDave Chinner 		 * delalloc record we are overwriting is always written.
154930f712c9SDave Chinner 		 */
15504dcb8869SChristoph Hellwig 		PREV.br_startblock = new->br_startblock;
15514dcb8869SChristoph Hellwig 		PREV.br_blockcount += RIGHT.br_blockcount;
15529230a0b6SDave Chinner 		PREV.br_state = new->br_state;
15530d045540SChristoph Hellwig 
1554b2b1712aSChristoph Hellwig 		xfs_iext_next(ifp, &bma->icur);
1555c38ccf59SChristoph Hellwig 		xfs_iext_remove(bma->ip, &bma->icur, state);
1556b2b1712aSChristoph Hellwig 		xfs_iext_prev(ifp, &bma->icur);
1557b2b1712aSChristoph Hellwig 		xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
155830f712c9SDave Chinner 
155930f712c9SDave Chinner 		if (bma->cur == NULL)
156030f712c9SDave Chinner 			rval = XFS_ILOG_DEXT;
156130f712c9SDave Chinner 		else {
156230f712c9SDave Chinner 			rval = 0;
1563e16cf9b0SChristoph Hellwig 			error = xfs_bmbt_lookup_eq(bma->cur, &RIGHT, &i);
156430f712c9SDave Chinner 			if (error)
156530f712c9SDave Chinner 				goto done;
1566f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1567f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
1568f9e03706SDarrick J. Wong 				goto done;
1569f9e03706SDarrick J. Wong 			}
1570a67d00a5SChristoph Hellwig 			error = xfs_bmbt_update(bma->cur, &PREV);
157130f712c9SDave Chinner 			if (error)
157230f712c9SDave Chinner 				goto done;
157330f712c9SDave Chinner 		}
157430f712c9SDave Chinner 		break;
157530f712c9SDave Chinner 
157630f712c9SDave Chinner 	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
157730f712c9SDave Chinner 		/*
157830f712c9SDave Chinner 		 * Filling in all of a previously delayed allocation extent.
157930f712c9SDave Chinner 		 * Neither the left nor right neighbors are contiguous with
158030f712c9SDave Chinner 		 * the new one.
158130f712c9SDave Chinner 		 */
15824dcb8869SChristoph Hellwig 		PREV.br_startblock = new->br_startblock;
15834dcb8869SChristoph Hellwig 		PREV.br_state = new->br_state;
1584b2b1712aSChristoph Hellwig 		xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1585daf83964SChristoph Hellwig 		ifp->if_nextents++;
158630f712c9SDave Chinner 
158730f712c9SDave Chinner 		if (bma->cur == NULL)
158830f712c9SDave Chinner 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
158930f712c9SDave Chinner 		else {
159030f712c9SDave Chinner 			rval = XFS_ILOG_CORE;
1591e16cf9b0SChristoph Hellwig 			error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
159230f712c9SDave Chinner 			if (error)
159330f712c9SDave Chinner 				goto done;
1594f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 0)) {
1595f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
1596f9e03706SDarrick J. Wong 				goto done;
1597f9e03706SDarrick J. Wong 			}
159830f712c9SDave Chinner 			error = xfs_btree_insert(bma->cur, &i);
159930f712c9SDave Chinner 			if (error)
160030f712c9SDave Chinner 				goto done;
1601f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1602f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
1603f9e03706SDarrick J. Wong 				goto done;
1604f9e03706SDarrick J. Wong 			}
160530f712c9SDave Chinner 		}
160630f712c9SDave Chinner 		break;
160730f712c9SDave Chinner 
160830f712c9SDave Chinner 	case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
160930f712c9SDave Chinner 		/*
161030f712c9SDave Chinner 		 * Filling in the first part of a previous delayed allocation.
161130f712c9SDave Chinner 		 * The left neighbor is contiguous.
161230f712c9SDave Chinner 		 */
16134dcb8869SChristoph Hellwig 		old = LEFT;
16144dcb8869SChristoph Hellwig 		temp = PREV.br_blockcount - new->br_blockcount;
16154dcb8869SChristoph Hellwig 		da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
16164dcb8869SChristoph Hellwig 				startblockval(PREV.br_startblock));
16174dcb8869SChristoph Hellwig 
16184dcb8869SChristoph Hellwig 		LEFT.br_blockcount += new->br_blockcount;
161930f712c9SDave Chinner 
1620bf99971cSChristoph Hellwig 		PREV.br_blockcount = temp;
16214dcb8869SChristoph Hellwig 		PREV.br_startoff += new->br_blockcount;
16224dcb8869SChristoph Hellwig 		PREV.br_startblock = nullstartblock(da_new);
16230d045540SChristoph Hellwig 
1624b2b1712aSChristoph Hellwig 		xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1625b2b1712aSChristoph Hellwig 		xfs_iext_prev(ifp, &bma->icur);
1626b2b1712aSChristoph Hellwig 		xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT);
16274dcb8869SChristoph Hellwig 
162830f712c9SDave Chinner 		if (bma->cur == NULL)
162930f712c9SDave Chinner 			rval = XFS_ILOG_DEXT;
163030f712c9SDave Chinner 		else {
163130f712c9SDave Chinner 			rval = 0;
1632e16cf9b0SChristoph Hellwig 			error = xfs_bmbt_lookup_eq(bma->cur, &old, &i);
163330f712c9SDave Chinner 			if (error)
163430f712c9SDave Chinner 				goto done;
1635f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1636f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
1637f9e03706SDarrick J. Wong 				goto done;
1638f9e03706SDarrick J. Wong 			}
1639a67d00a5SChristoph Hellwig 			error = xfs_bmbt_update(bma->cur, &LEFT);
164030f712c9SDave Chinner 			if (error)
164130f712c9SDave Chinner 				goto done;
164230f712c9SDave Chinner 		}
164330f712c9SDave Chinner 		break;
164430f712c9SDave Chinner 
164530f712c9SDave Chinner 	case BMAP_LEFT_FILLING:
164630f712c9SDave Chinner 		/*
164730f712c9SDave Chinner 		 * Filling in the first part of a previous delayed allocation.
164830f712c9SDave Chinner 		 * The left neighbor is not contiguous.
164930f712c9SDave Chinner 		 */
1650b2b1712aSChristoph Hellwig 		xfs_iext_update_extent(bma->ip, state, &bma->icur, new);
1651daf83964SChristoph Hellwig 		ifp->if_nextents++;
1652daf83964SChristoph Hellwig 
165330f712c9SDave Chinner 		if (bma->cur == NULL)
165430f712c9SDave Chinner 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
165530f712c9SDave Chinner 		else {
165630f712c9SDave Chinner 			rval = XFS_ILOG_CORE;
1657e16cf9b0SChristoph Hellwig 			error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
165830f712c9SDave Chinner 			if (error)
165930f712c9SDave Chinner 				goto done;
1660f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 0)) {
1661f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
1662f9e03706SDarrick J. Wong 				goto done;
1663f9e03706SDarrick J. Wong 			}
166430f712c9SDave Chinner 			error = xfs_btree_insert(bma->cur, &i);
166530f712c9SDave Chinner 			if (error)
166630f712c9SDave Chinner 				goto done;
1667f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1668f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
1669f9e03706SDarrick J. Wong 				goto done;
1670f9e03706SDarrick J. Wong 			}
167130f712c9SDave Chinner 		}
167230f712c9SDave Chinner 
16736d3eb1ecSDarrick J. Wong 		if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
167430f712c9SDave Chinner 			error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1675280253d2SBrian Foster 					&bma->cur, 1, &tmp_rval, whichfork);
167630f712c9SDave Chinner 			rval |= tmp_rval;
167730f712c9SDave Chinner 			if (error)
167830f712c9SDave Chinner 				goto done;
167930f712c9SDave Chinner 		}
16804dcb8869SChristoph Hellwig 
16814dcb8869SChristoph Hellwig 		temp = PREV.br_blockcount - new->br_blockcount;
168230f712c9SDave Chinner 		da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
168330f712c9SDave Chinner 			startblockval(PREV.br_startblock) -
168492219c29SDave Chinner 			(bma->cur ? bma->cur->bc_ino.allocated : 0));
16854dcb8869SChristoph Hellwig 
16864dcb8869SChristoph Hellwig 		PREV.br_startoff = new_endoff;
16874dcb8869SChristoph Hellwig 		PREV.br_blockcount = temp;
16884dcb8869SChristoph Hellwig 		PREV.br_startblock = nullstartblock(da_new);
1689b2b1712aSChristoph Hellwig 		xfs_iext_next(ifp, &bma->icur);
16900254c2f2SChristoph Hellwig 		xfs_iext_insert(bma->ip, &bma->icur, &PREV, state);
1691b2b1712aSChristoph Hellwig 		xfs_iext_prev(ifp, &bma->icur);
169230f712c9SDave Chinner 		break;
169330f712c9SDave Chinner 
169430f712c9SDave Chinner 	case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
169530f712c9SDave Chinner 		/*
169630f712c9SDave Chinner 		 * Filling in the last part of a previous delayed allocation.
169730f712c9SDave Chinner 		 * The right neighbor is contiguous with the new allocation.
169830f712c9SDave Chinner 		 */
16994dcb8869SChristoph Hellwig 		old = RIGHT;
17004dcb8869SChristoph Hellwig 		RIGHT.br_startoff = new->br_startoff;
17014dcb8869SChristoph Hellwig 		RIGHT.br_startblock = new->br_startblock;
17024dcb8869SChristoph Hellwig 		RIGHT.br_blockcount += new->br_blockcount;
17034dcb8869SChristoph Hellwig 
170430f712c9SDave Chinner 		if (bma->cur == NULL)
170530f712c9SDave Chinner 			rval = XFS_ILOG_DEXT;
170630f712c9SDave Chinner 		else {
170730f712c9SDave Chinner 			rval = 0;
1708e16cf9b0SChristoph Hellwig 			error = xfs_bmbt_lookup_eq(bma->cur, &old, &i);
170930f712c9SDave Chinner 			if (error)
171030f712c9SDave Chinner 				goto done;
1711f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1712f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
1713f9e03706SDarrick J. Wong 				goto done;
1714f9e03706SDarrick J. Wong 			}
1715a67d00a5SChristoph Hellwig 			error = xfs_bmbt_update(bma->cur, &RIGHT);
171630f712c9SDave Chinner 			if (error)
171730f712c9SDave Chinner 				goto done;
171830f712c9SDave Chinner 		}
171930f712c9SDave Chinner 
17204dcb8869SChristoph Hellwig 		temp = PREV.br_blockcount - new->br_blockcount;
172130f712c9SDave Chinner 		da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
172230f712c9SDave Chinner 			startblockval(PREV.br_startblock));
17234dcb8869SChristoph Hellwig 
17244dcb8869SChristoph Hellwig 		PREV.br_blockcount = temp;
17254dcb8869SChristoph Hellwig 		PREV.br_startblock = nullstartblock(da_new);
172630f712c9SDave Chinner 
1727b2b1712aSChristoph Hellwig 		xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1728b2b1712aSChristoph Hellwig 		xfs_iext_next(ifp, &bma->icur);
1729b2b1712aSChristoph Hellwig 		xfs_iext_update_extent(bma->ip, state, &bma->icur, &RIGHT);
173030f712c9SDave Chinner 		break;
173130f712c9SDave Chinner 
173230f712c9SDave Chinner 	case BMAP_RIGHT_FILLING:
173330f712c9SDave Chinner 		/*
173430f712c9SDave Chinner 		 * Filling in the last part of a previous delayed allocation.
173530f712c9SDave Chinner 		 * The right neighbor is not contiguous.
173630f712c9SDave Chinner 		 */
1737b2b1712aSChristoph Hellwig 		xfs_iext_update_extent(bma->ip, state, &bma->icur, new);
1738daf83964SChristoph Hellwig 		ifp->if_nextents++;
1739daf83964SChristoph Hellwig 
174030f712c9SDave Chinner 		if (bma->cur == NULL)
174130f712c9SDave Chinner 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
174230f712c9SDave Chinner 		else {
174330f712c9SDave Chinner 			rval = XFS_ILOG_CORE;
1744e16cf9b0SChristoph Hellwig 			error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
174530f712c9SDave Chinner 			if (error)
174630f712c9SDave Chinner 				goto done;
1747f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 0)) {
1748f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
1749f9e03706SDarrick J. Wong 				goto done;
1750f9e03706SDarrick J. Wong 			}
175130f712c9SDave Chinner 			error = xfs_btree_insert(bma->cur, &i);
175230f712c9SDave Chinner 			if (error)
175330f712c9SDave Chinner 				goto done;
1754f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1755f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
1756f9e03706SDarrick J. Wong 				goto done;
1757f9e03706SDarrick J. Wong 			}
175830f712c9SDave Chinner 		}
175930f712c9SDave Chinner 
17606d3eb1ecSDarrick J. Wong 		if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
176130f712c9SDave Chinner 			error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1762280253d2SBrian Foster 				&bma->cur, 1, &tmp_rval, whichfork);
176330f712c9SDave Chinner 			rval |= tmp_rval;
176430f712c9SDave Chinner 			if (error)
176530f712c9SDave Chinner 				goto done;
176630f712c9SDave Chinner 		}
17674dcb8869SChristoph Hellwig 
17684dcb8869SChristoph Hellwig 		temp = PREV.br_blockcount - new->br_blockcount;
176930f712c9SDave Chinner 		da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
177030f712c9SDave Chinner 			startblockval(PREV.br_startblock) -
177192219c29SDave Chinner 			(bma->cur ? bma->cur->bc_ino.allocated : 0));
17724dcb8869SChristoph Hellwig 
17734dcb8869SChristoph Hellwig 		PREV.br_startblock = nullstartblock(da_new);
17744dcb8869SChristoph Hellwig 		PREV.br_blockcount = temp;
17750254c2f2SChristoph Hellwig 		xfs_iext_insert(bma->ip, &bma->icur, &PREV, state);
1776b2b1712aSChristoph Hellwig 		xfs_iext_next(ifp, &bma->icur);
177730f712c9SDave Chinner 		break;
177830f712c9SDave Chinner 
177930f712c9SDave Chinner 	case 0:
178030f712c9SDave Chinner 		/*
178130f712c9SDave Chinner 		 * Filling in the middle part of a previous delayed allocation.
178230f712c9SDave Chinner 		 * Contiguity is impossible here.
178330f712c9SDave Chinner 		 * This case is avoided almost all the time.
178430f712c9SDave Chinner 		 *
178530f712c9SDave Chinner 		 * We start with a delayed allocation:
178630f712c9SDave Chinner 		 *
178730f712c9SDave Chinner 		 * +ddddddddddddddddddddddddddddddddddddddddddddddddddddddd+
178830f712c9SDave Chinner 		 *  PREV @ idx
178930f712c9SDave Chinner 		 *
179030f712c9SDave Chinner 	         * and we are allocating:
179130f712c9SDave Chinner 		 *                     +rrrrrrrrrrrrrrrrr+
179230f712c9SDave Chinner 		 *			      new
179330f712c9SDave Chinner 		 *
179430f712c9SDave Chinner 		 * and we set it up for insertion as:
179530f712c9SDave Chinner 		 * +ddddddddddddddddddd+rrrrrrrrrrrrrrrrr+ddddddddddddddddd+
179630f712c9SDave Chinner 		 *                            new
179730f712c9SDave Chinner 		 *  PREV @ idx          LEFT              RIGHT
179830f712c9SDave Chinner 		 *                      inserted at idx + 1
179930f712c9SDave Chinner 		 */
18004dcb8869SChristoph Hellwig 		old = PREV;
18014dcb8869SChristoph Hellwig 
18024dcb8869SChristoph Hellwig 		/* LEFT is the new middle */
180330f712c9SDave Chinner 		LEFT = *new;
18044dcb8869SChristoph Hellwig 
18054dcb8869SChristoph Hellwig 		/* RIGHT is the new right */
180630f712c9SDave Chinner 		RIGHT.br_state = PREV.br_state;
180730f712c9SDave Chinner 		RIGHT.br_startoff = new_endoff;
18084dcb8869SChristoph Hellwig 		RIGHT.br_blockcount =
18094dcb8869SChristoph Hellwig 			PREV.br_startoff + PREV.br_blockcount - new_endoff;
18104dcb8869SChristoph Hellwig 		RIGHT.br_startblock =
18114dcb8869SChristoph Hellwig 			nullstartblock(xfs_bmap_worst_indlen(bma->ip,
18124dcb8869SChristoph Hellwig 					RIGHT.br_blockcount));
18134dcb8869SChristoph Hellwig 
18144dcb8869SChristoph Hellwig 		/* truncate PREV */
18154dcb8869SChristoph Hellwig 		PREV.br_blockcount = new->br_startoff - PREV.br_startoff;
18164dcb8869SChristoph Hellwig 		PREV.br_startblock =
18174dcb8869SChristoph Hellwig 			nullstartblock(xfs_bmap_worst_indlen(bma->ip,
18184dcb8869SChristoph Hellwig 					PREV.br_blockcount));
1819b2b1712aSChristoph Hellwig 		xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
18204dcb8869SChristoph Hellwig 
1821b2b1712aSChristoph Hellwig 		xfs_iext_next(ifp, &bma->icur);
18220254c2f2SChristoph Hellwig 		xfs_iext_insert(bma->ip, &bma->icur, &RIGHT, state);
18230254c2f2SChristoph Hellwig 		xfs_iext_insert(bma->ip, &bma->icur, &LEFT, state);
1824daf83964SChristoph Hellwig 		ifp->if_nextents++;
18254dcb8869SChristoph Hellwig 
182630f712c9SDave Chinner 		if (bma->cur == NULL)
182730f712c9SDave Chinner 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
182830f712c9SDave Chinner 		else {
182930f712c9SDave Chinner 			rval = XFS_ILOG_CORE;
1830e16cf9b0SChristoph Hellwig 			error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
183130f712c9SDave Chinner 			if (error)
183230f712c9SDave Chinner 				goto done;
1833f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 0)) {
1834f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
1835f9e03706SDarrick J. Wong 				goto done;
1836f9e03706SDarrick J. Wong 			}
183730f712c9SDave Chinner 			error = xfs_btree_insert(bma->cur, &i);
183830f712c9SDave Chinner 			if (error)
183930f712c9SDave Chinner 				goto done;
1840f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1841f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
1842f9e03706SDarrick J. Wong 				goto done;
1843f9e03706SDarrick J. Wong 			}
184430f712c9SDave Chinner 		}
184530f712c9SDave Chinner 
18466d3eb1ecSDarrick J. Wong 		if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
184730f712c9SDave Chinner 			error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1848280253d2SBrian Foster 					&bma->cur, 1, &tmp_rval, whichfork);
184930f712c9SDave Chinner 			rval |= tmp_rval;
185030f712c9SDave Chinner 			if (error)
185130f712c9SDave Chinner 				goto done;
185230f712c9SDave Chinner 		}
18534dcb8869SChristoph Hellwig 
18544dcb8869SChristoph Hellwig 		da_new = startblockval(PREV.br_startblock) +
18554dcb8869SChristoph Hellwig 			 startblockval(RIGHT.br_startblock);
185630f712c9SDave Chinner 		break;
185730f712c9SDave Chinner 
185830f712c9SDave Chinner 	case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
185930f712c9SDave Chinner 	case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
186030f712c9SDave Chinner 	case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
186130f712c9SDave Chinner 	case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
186230f712c9SDave Chinner 	case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
186330f712c9SDave Chinner 	case BMAP_LEFT_CONTIG:
186430f712c9SDave Chinner 	case BMAP_RIGHT_CONTIG:
186530f712c9SDave Chinner 		/*
186630f712c9SDave Chinner 		 * These cases are all impossible.
186730f712c9SDave Chinner 		 */
186830f712c9SDave Chinner 		ASSERT(0);
186930f712c9SDave Chinner 	}
187030f712c9SDave Chinner 
187195eb308cSDarrick J. Wong 	/* add reverse mapping unless caller opted out */
1872bc46ac64SDarrick J. Wong 	if (!(bma->flags & XFS_BMAPI_NORMAP))
1873bc46ac64SDarrick J. Wong 		xfs_rmap_map_extent(bma->tp, bma->ip, whichfork, new);
18749c194644SDarrick J. Wong 
187530f712c9SDave Chinner 	/* convert to a btree if necessary */
18766d3eb1ecSDarrick J. Wong 	if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
187730f712c9SDave Chinner 		int	tmp_logflags;	/* partial log flag return val */
187830f712c9SDave Chinner 
187930f712c9SDave Chinner 		ASSERT(bma->cur == NULL);
188030f712c9SDave Chinner 		error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1881280253d2SBrian Foster 				&bma->cur, da_old > 0, &tmp_logflags,
1882280253d2SBrian Foster 				whichfork);
188330f712c9SDave Chinner 		bma->logflags |= tmp_logflags;
188430f712c9SDave Chinner 		if (error)
188530f712c9SDave Chinner 			goto done;
188630f712c9SDave Chinner 	}
188730f712c9SDave Chinner 
18889fe82b8cSDarrick J. Wong 	if (da_new != da_old)
18899fe82b8cSDarrick J. Wong 		xfs_mod_delalloc(mp, (int64_t)da_new - da_old);
18909fe82b8cSDarrick J. Wong 
1891ca1862b0SChristoph Hellwig 	if (bma->cur) {
189292219c29SDave Chinner 		da_new += bma->cur->bc_ino.allocated;
189392219c29SDave Chinner 		bma->cur->bc_ino.allocated = 0;
189430f712c9SDave Chinner 	}
189530f712c9SDave Chinner 
1896ca1862b0SChristoph Hellwig 	/* adjust for changes in reserved delayed indirect blocks */
1897ca1862b0SChristoph Hellwig 	if (da_new != da_old) {
1898ca1862b0SChristoph Hellwig 		ASSERT(state == 0 || da_new < da_old);
1899ca1862b0SChristoph Hellwig 		error = xfs_mod_fdblocks(mp, (int64_t)(da_old - da_new),
1900ca1862b0SChristoph Hellwig 				false);
1901ca1862b0SChristoph Hellwig 	}
190230f712c9SDave Chinner 
19036d3eb1ecSDarrick J. Wong 	xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork);
190430f712c9SDave Chinner done:
190560b4984fSDarrick J. Wong 	if (whichfork != XFS_COW_FORK)
190630f712c9SDave Chinner 		bma->logflags |= rval;
190730f712c9SDave Chinner 	return error;
190830f712c9SDave Chinner #undef	LEFT
190930f712c9SDave Chinner #undef	RIGHT
191030f712c9SDave Chinner #undef	PREV
191130f712c9SDave Chinner }
191230f712c9SDave Chinner 
191330f712c9SDave Chinner /*
191430f712c9SDave Chinner  * Convert an unwritten allocation to a real allocation or vice versa.
191530f712c9SDave Chinner  */
191626b91c72SChristoph Hellwig int					/* error */
191730f712c9SDave Chinner xfs_bmap_add_extent_unwritten_real(
191830f712c9SDave Chinner 	struct xfs_trans	*tp,
191930f712c9SDave Chinner 	xfs_inode_t		*ip,	/* incore inode pointer */
192005a630d7SDarrick J. Wong 	int			whichfork,
1921b2b1712aSChristoph Hellwig 	struct xfs_iext_cursor	*icur,
1922ae127f08SDarrick J. Wong 	struct xfs_btree_cur	**curp,	/* if *curp is null, not a btree */
192330f712c9SDave Chinner 	xfs_bmbt_irec_t		*new,	/* new data to add to file extents */
192430f712c9SDave Chinner 	int			*logflagsp) /* inode logging flags */
192530f712c9SDave Chinner {
1926ae127f08SDarrick J. Wong 	struct xfs_btree_cur	*cur;	/* btree cursor */
192730f712c9SDave Chinner 	int			error;	/* error return value */
192830f712c9SDave Chinner 	int			i;	/* temp state */
19293ba738dfSChristoph Hellwig 	struct xfs_ifork	*ifp;	/* inode fork pointer */
193030f712c9SDave Chinner 	xfs_fileoff_t		new_endoff;	/* end offset of new entry */
193130f712c9SDave Chinner 	xfs_bmbt_irec_t		r[3];	/* neighbor extent entries */
193230f712c9SDave Chinner 					/* left is 0, right is 1, prev is 2 */
193330f712c9SDave Chinner 	int			rval=0;	/* return value (logging flags) */
19340e5b8e45SDave Chinner 	uint32_t		state = xfs_bmap_fork_to_state(whichfork);
193505a630d7SDarrick J. Wong 	struct xfs_mount	*mp = ip->i_mount;
193679fa6143SChristoph Hellwig 	struct xfs_bmbt_irec	old;
193730f712c9SDave Chinner 
193830f712c9SDave Chinner 	*logflagsp = 0;
193930f712c9SDave Chinner 
194030f712c9SDave Chinner 	cur = *curp;
1941732436efSDarrick J. Wong 	ifp = xfs_ifork_ptr(ip, whichfork);
194230f712c9SDave Chinner 
194330f712c9SDave Chinner 	ASSERT(!isnullstartblock(new->br_startblock));
194430f712c9SDave Chinner 
1945ff6d6af2SBill O'Donnell 	XFS_STATS_INC(mp, xs_add_exlist);
194630f712c9SDave Chinner 
194730f712c9SDave Chinner #define	LEFT		r[0]
194830f712c9SDave Chinner #define	RIGHT		r[1]
194930f712c9SDave Chinner #define	PREV		r[2]
195030f712c9SDave Chinner 
195130f712c9SDave Chinner 	/*
195230f712c9SDave Chinner 	 * Set up a bunch of variables to make the tests simpler.
195330f712c9SDave Chinner 	 */
195430f712c9SDave Chinner 	error = 0;
1955b2b1712aSChristoph Hellwig 	xfs_iext_get_extent(ifp, icur, &PREV);
195679fa6143SChristoph Hellwig 	ASSERT(new->br_state != PREV.br_state);
195730f712c9SDave Chinner 	new_endoff = new->br_startoff + new->br_blockcount;
195830f712c9SDave Chinner 	ASSERT(PREV.br_startoff <= new->br_startoff);
195930f712c9SDave Chinner 	ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
196030f712c9SDave Chinner 
196130f712c9SDave Chinner 	/*
196230f712c9SDave Chinner 	 * Set flags determining what part of the previous oldext allocation
196330f712c9SDave Chinner 	 * extent is being replaced by a newext allocation.
196430f712c9SDave Chinner 	 */
196530f712c9SDave Chinner 	if (PREV.br_startoff == new->br_startoff)
196630f712c9SDave Chinner 		state |= BMAP_LEFT_FILLING;
196730f712c9SDave Chinner 	if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
196830f712c9SDave Chinner 		state |= BMAP_RIGHT_FILLING;
196930f712c9SDave Chinner 
197030f712c9SDave Chinner 	/*
197130f712c9SDave Chinner 	 * Check and set flags if this segment has a left neighbor.
197230f712c9SDave Chinner 	 * Don't set contiguous if the combined extent would be too large.
197330f712c9SDave Chinner 	 */
1974b2b1712aSChristoph Hellwig 	if (xfs_iext_peek_prev_extent(ifp, icur, &LEFT)) {
197530f712c9SDave Chinner 		state |= BMAP_LEFT_VALID;
197630f712c9SDave Chinner 		if (isnullstartblock(LEFT.br_startblock))
197730f712c9SDave Chinner 			state |= BMAP_LEFT_DELAY;
197830f712c9SDave Chinner 	}
197930f712c9SDave Chinner 
198030f712c9SDave Chinner 	if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
198130f712c9SDave Chinner 	    LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
198230f712c9SDave Chinner 	    LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
198379fa6143SChristoph Hellwig 	    LEFT.br_state == new->br_state &&
198495f0b95eSChandan Babu R 	    LEFT.br_blockcount + new->br_blockcount <= XFS_MAX_BMBT_EXTLEN)
198530f712c9SDave Chinner 		state |= BMAP_LEFT_CONTIG;
198630f712c9SDave Chinner 
198730f712c9SDave Chinner 	/*
198830f712c9SDave Chinner 	 * Check and set flags if this segment has a right neighbor.
198930f712c9SDave Chinner 	 * Don't set contiguous if the combined extent would be too large.
199030f712c9SDave Chinner 	 * Also check for all-three-contiguous being too large.
199130f712c9SDave Chinner 	 */
1992b2b1712aSChristoph Hellwig 	if (xfs_iext_peek_next_extent(ifp, icur, &RIGHT)) {
199330f712c9SDave Chinner 		state |= BMAP_RIGHT_VALID;
199430f712c9SDave Chinner 		if (isnullstartblock(RIGHT.br_startblock))
199530f712c9SDave Chinner 			state |= BMAP_RIGHT_DELAY;
199630f712c9SDave Chinner 	}
199730f712c9SDave Chinner 
199830f712c9SDave Chinner 	if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
199930f712c9SDave Chinner 	    new_endoff == RIGHT.br_startoff &&
200030f712c9SDave Chinner 	    new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
200179fa6143SChristoph Hellwig 	    new->br_state == RIGHT.br_state &&
200295f0b95eSChandan Babu R 	    new->br_blockcount + RIGHT.br_blockcount <= XFS_MAX_BMBT_EXTLEN &&
200330f712c9SDave Chinner 	    ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
200430f712c9SDave Chinner 		       BMAP_RIGHT_FILLING)) !=
200530f712c9SDave Chinner 		      (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
200630f712c9SDave Chinner 		       BMAP_RIGHT_FILLING) ||
200730f712c9SDave Chinner 	     LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
200895f0b95eSChandan Babu R 			<= XFS_MAX_BMBT_EXTLEN))
200930f712c9SDave Chinner 		state |= BMAP_RIGHT_CONTIG;
201030f712c9SDave Chinner 
201130f712c9SDave Chinner 	/*
201230f712c9SDave Chinner 	 * Switch out based on the FILLING and CONTIG state bits.
201330f712c9SDave Chinner 	 */
201430f712c9SDave Chinner 	switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
201530f712c9SDave Chinner 			 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
201630f712c9SDave Chinner 	case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
201730f712c9SDave Chinner 	     BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
201830f712c9SDave Chinner 		/*
201930f712c9SDave Chinner 		 * Setting all of a previous oldext extent to newext.
202030f712c9SDave Chinner 		 * The left and right neighbors are both contiguous with new.
202130f712c9SDave Chinner 		 */
202279fa6143SChristoph Hellwig 		LEFT.br_blockcount += PREV.br_blockcount + RIGHT.br_blockcount;
202330f712c9SDave Chinner 
2024c38ccf59SChristoph Hellwig 		xfs_iext_remove(ip, icur, state);
2025c38ccf59SChristoph Hellwig 		xfs_iext_remove(ip, icur, state);
2026b2b1712aSChristoph Hellwig 		xfs_iext_prev(ifp, icur);
2027b2b1712aSChristoph Hellwig 		xfs_iext_update_extent(ip, state, icur, &LEFT);
2028daf83964SChristoph Hellwig 		ifp->if_nextents -= 2;
202930f712c9SDave Chinner 		if (cur == NULL)
203030f712c9SDave Chinner 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
203130f712c9SDave Chinner 		else {
203230f712c9SDave Chinner 			rval = XFS_ILOG_CORE;
2033e16cf9b0SChristoph Hellwig 			error = xfs_bmbt_lookup_eq(cur, &RIGHT, &i);
2034e16cf9b0SChristoph Hellwig 			if (error)
203530f712c9SDave Chinner 				goto done;
2036f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2037f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
2038f9e03706SDarrick J. Wong 				goto done;
2039f9e03706SDarrick J. Wong 			}
204030f712c9SDave Chinner 			if ((error = xfs_btree_delete(cur, &i)))
204130f712c9SDave Chinner 				goto done;
2042f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2043f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
2044f9e03706SDarrick J. Wong 				goto done;
2045f9e03706SDarrick J. Wong 			}
204630f712c9SDave Chinner 			if ((error = xfs_btree_decrement(cur, 0, &i)))
204730f712c9SDave Chinner 				goto done;
2048f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2049f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
2050f9e03706SDarrick J. Wong 				goto done;
2051f9e03706SDarrick J. Wong 			}
205230f712c9SDave Chinner 			if ((error = xfs_btree_delete(cur, &i)))
205330f712c9SDave Chinner 				goto done;
2054f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2055f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
2056f9e03706SDarrick J. Wong 				goto done;
2057f9e03706SDarrick J. Wong 			}
205830f712c9SDave Chinner 			if ((error = xfs_btree_decrement(cur, 0, &i)))
205930f712c9SDave Chinner 				goto done;
2060f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2061f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
2062f9e03706SDarrick J. Wong 				goto done;
2063f9e03706SDarrick J. Wong 			}
2064a67d00a5SChristoph Hellwig 			error = xfs_bmbt_update(cur, &LEFT);
206579fa6143SChristoph Hellwig 			if (error)
206630f712c9SDave Chinner 				goto done;
206730f712c9SDave Chinner 		}
206830f712c9SDave Chinner 		break;
206930f712c9SDave Chinner 
207030f712c9SDave Chinner 	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
207130f712c9SDave Chinner 		/*
207230f712c9SDave Chinner 		 * Setting all of a previous oldext extent to newext.
207330f712c9SDave Chinner 		 * The left neighbor is contiguous, the right is not.
207430f712c9SDave Chinner 		 */
207579fa6143SChristoph Hellwig 		LEFT.br_blockcount += PREV.br_blockcount;
207630f712c9SDave Chinner 
2077c38ccf59SChristoph Hellwig 		xfs_iext_remove(ip, icur, state);
2078b2b1712aSChristoph Hellwig 		xfs_iext_prev(ifp, icur);
2079b2b1712aSChristoph Hellwig 		xfs_iext_update_extent(ip, state, icur, &LEFT);
2080daf83964SChristoph Hellwig 		ifp->if_nextents--;
208130f712c9SDave Chinner 		if (cur == NULL)
208230f712c9SDave Chinner 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
208330f712c9SDave Chinner 		else {
208430f712c9SDave Chinner 			rval = XFS_ILOG_CORE;
2085e16cf9b0SChristoph Hellwig 			error = xfs_bmbt_lookup_eq(cur, &PREV, &i);
2086e16cf9b0SChristoph Hellwig 			if (error)
208730f712c9SDave Chinner 				goto done;
2088f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2089f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
2090f9e03706SDarrick J. Wong 				goto done;
2091f9e03706SDarrick J. Wong 			}
209230f712c9SDave Chinner 			if ((error = xfs_btree_delete(cur, &i)))
209330f712c9SDave Chinner 				goto done;
2094f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2095f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
2096f9e03706SDarrick J. Wong 				goto done;
2097f9e03706SDarrick J. Wong 			}
209830f712c9SDave Chinner 			if ((error = xfs_btree_decrement(cur, 0, &i)))
209930f712c9SDave Chinner 				goto done;
2100f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2101f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
2102f9e03706SDarrick J. Wong 				goto done;
2103f9e03706SDarrick J. Wong 			}
2104a67d00a5SChristoph Hellwig 			error = xfs_bmbt_update(cur, &LEFT);
210579fa6143SChristoph Hellwig 			if (error)
210630f712c9SDave Chinner 				goto done;
210730f712c9SDave Chinner 		}
210830f712c9SDave Chinner 		break;
210930f712c9SDave Chinner 
211030f712c9SDave Chinner 	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
211130f712c9SDave Chinner 		/*
211230f712c9SDave Chinner 		 * Setting all of a previous oldext extent to newext.
211330f712c9SDave Chinner 		 * The right neighbor is contiguous, the left is not.
211430f712c9SDave Chinner 		 */
211579fa6143SChristoph Hellwig 		PREV.br_blockcount += RIGHT.br_blockcount;
211679fa6143SChristoph Hellwig 		PREV.br_state = new->br_state;
2117a6818477SChristoph Hellwig 
2118b2b1712aSChristoph Hellwig 		xfs_iext_next(ifp, icur);
2119c38ccf59SChristoph Hellwig 		xfs_iext_remove(ip, icur, state);
2120b2b1712aSChristoph Hellwig 		xfs_iext_prev(ifp, icur);
2121b2b1712aSChristoph Hellwig 		xfs_iext_update_extent(ip, state, icur, &PREV);
2122daf83964SChristoph Hellwig 		ifp->if_nextents--;
212379fa6143SChristoph Hellwig 
212430f712c9SDave Chinner 		if (cur == NULL)
212530f712c9SDave Chinner 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
212630f712c9SDave Chinner 		else {
212730f712c9SDave Chinner 			rval = XFS_ILOG_CORE;
2128e16cf9b0SChristoph Hellwig 			error = xfs_bmbt_lookup_eq(cur, &RIGHT, &i);
2129e16cf9b0SChristoph Hellwig 			if (error)
213030f712c9SDave Chinner 				goto done;
2131f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2132f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
2133f9e03706SDarrick J. Wong 				goto done;
2134f9e03706SDarrick J. Wong 			}
213530f712c9SDave Chinner 			if ((error = xfs_btree_delete(cur, &i)))
213630f712c9SDave Chinner 				goto done;
2137f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2138f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
2139f9e03706SDarrick J. Wong 				goto done;
2140f9e03706SDarrick J. Wong 			}
214130f712c9SDave Chinner 			if ((error = xfs_btree_decrement(cur, 0, &i)))
214230f712c9SDave Chinner 				goto done;
2143f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2144f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
2145f9e03706SDarrick J. Wong 				goto done;
2146f9e03706SDarrick J. Wong 			}
2147a67d00a5SChristoph Hellwig 			error = xfs_bmbt_update(cur, &PREV);
214879fa6143SChristoph Hellwig 			if (error)
214930f712c9SDave Chinner 				goto done;
215030f712c9SDave Chinner 		}
215130f712c9SDave Chinner 		break;
215230f712c9SDave Chinner 
215330f712c9SDave Chinner 	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
215430f712c9SDave Chinner 		/*
215530f712c9SDave Chinner 		 * Setting all of a previous oldext extent to newext.
215630f712c9SDave Chinner 		 * Neither the left nor right neighbors are contiguous with
215730f712c9SDave Chinner 		 * the new one.
215830f712c9SDave Chinner 		 */
215979fa6143SChristoph Hellwig 		PREV.br_state = new->br_state;
2160b2b1712aSChristoph Hellwig 		xfs_iext_update_extent(ip, state, icur, &PREV);
216130f712c9SDave Chinner 
216230f712c9SDave Chinner 		if (cur == NULL)
216330f712c9SDave Chinner 			rval = XFS_ILOG_DEXT;
216430f712c9SDave Chinner 		else {
216530f712c9SDave Chinner 			rval = 0;
2166e16cf9b0SChristoph Hellwig 			error = xfs_bmbt_lookup_eq(cur, new, &i);
2167e16cf9b0SChristoph Hellwig 			if (error)
216830f712c9SDave Chinner 				goto done;
2169f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2170f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
2171f9e03706SDarrick J. Wong 				goto done;
2172f9e03706SDarrick J. Wong 			}
2173a67d00a5SChristoph Hellwig 			error = xfs_bmbt_update(cur, &PREV);
217479fa6143SChristoph Hellwig 			if (error)
217530f712c9SDave Chinner 				goto done;
217630f712c9SDave Chinner 		}
217730f712c9SDave Chinner 		break;
217830f712c9SDave Chinner 
217930f712c9SDave Chinner 	case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
218030f712c9SDave Chinner 		/*
218130f712c9SDave Chinner 		 * Setting the first part of a previous oldext extent to newext.
218230f712c9SDave Chinner 		 * The left neighbor is contiguous.
218330f712c9SDave Chinner 		 */
218479fa6143SChristoph Hellwig 		LEFT.br_blockcount += new->br_blockcount;
218530f712c9SDave Chinner 
218679fa6143SChristoph Hellwig 		old = PREV;
218779fa6143SChristoph Hellwig 		PREV.br_startoff += new->br_blockcount;
218879fa6143SChristoph Hellwig 		PREV.br_startblock += new->br_blockcount;
218979fa6143SChristoph Hellwig 		PREV.br_blockcount -= new->br_blockcount;
219030f712c9SDave Chinner 
2191b2b1712aSChristoph Hellwig 		xfs_iext_update_extent(ip, state, icur, &PREV);
2192b2b1712aSChristoph Hellwig 		xfs_iext_prev(ifp, icur);
2193b2b1712aSChristoph Hellwig 		xfs_iext_update_extent(ip, state, icur, &LEFT);
219430f712c9SDave Chinner 
219530f712c9SDave Chinner 		if (cur == NULL)
219630f712c9SDave Chinner 			rval = XFS_ILOG_DEXT;
219730f712c9SDave Chinner 		else {
219830f712c9SDave Chinner 			rval = 0;
2199e16cf9b0SChristoph Hellwig 			error = xfs_bmbt_lookup_eq(cur, &old, &i);
220079fa6143SChristoph Hellwig 			if (error)
220130f712c9SDave Chinner 				goto done;
2202f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2203f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
2204f9e03706SDarrick J. Wong 				goto done;
2205f9e03706SDarrick J. Wong 			}
2206a67d00a5SChristoph Hellwig 			error = xfs_bmbt_update(cur, &PREV);
220779fa6143SChristoph Hellwig 			if (error)
220830f712c9SDave Chinner 				goto done;
220979fa6143SChristoph Hellwig 			error = xfs_btree_decrement(cur, 0, &i);
221079fa6143SChristoph Hellwig 			if (error)
221130f712c9SDave Chinner 				goto done;
2212a67d00a5SChristoph Hellwig 			error = xfs_bmbt_update(cur, &LEFT);
221330f712c9SDave Chinner 			if (error)
221430f712c9SDave Chinner 				goto done;
221530f712c9SDave Chinner 		}
221630f712c9SDave Chinner 		break;
221730f712c9SDave Chinner 
221830f712c9SDave Chinner 	case BMAP_LEFT_FILLING:
221930f712c9SDave Chinner 		/*
222030f712c9SDave Chinner 		 * Setting the first part of a previous oldext extent to newext.
222130f712c9SDave Chinner 		 * The left neighbor is not contiguous.
222230f712c9SDave Chinner 		 */
222379fa6143SChristoph Hellwig 		old = PREV;
222479fa6143SChristoph Hellwig 		PREV.br_startoff += new->br_blockcount;
222579fa6143SChristoph Hellwig 		PREV.br_startblock += new->br_blockcount;
222679fa6143SChristoph Hellwig 		PREV.br_blockcount -= new->br_blockcount;
222730f712c9SDave Chinner 
2228b2b1712aSChristoph Hellwig 		xfs_iext_update_extent(ip, state, icur, &PREV);
22290254c2f2SChristoph Hellwig 		xfs_iext_insert(ip, icur, new, state);
2230daf83964SChristoph Hellwig 		ifp->if_nextents++;
2231daf83964SChristoph Hellwig 
223230f712c9SDave Chinner 		if (cur == NULL)
223330f712c9SDave Chinner 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
223430f712c9SDave Chinner 		else {
223530f712c9SDave Chinner 			rval = XFS_ILOG_CORE;
2236e16cf9b0SChristoph Hellwig 			error = xfs_bmbt_lookup_eq(cur, &old, &i);
223779fa6143SChristoph Hellwig 			if (error)
223830f712c9SDave Chinner 				goto done;
2239f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2240f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
2241f9e03706SDarrick J. Wong 				goto done;
2242f9e03706SDarrick J. Wong 			}
2243a67d00a5SChristoph Hellwig 			error = xfs_bmbt_update(cur, &PREV);
224479fa6143SChristoph Hellwig 			if (error)
224530f712c9SDave Chinner 				goto done;
224630f712c9SDave Chinner 			cur->bc_rec.b = *new;
224730f712c9SDave Chinner 			if ((error = xfs_btree_insert(cur, &i)))
224830f712c9SDave Chinner 				goto done;
2249f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2250f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
2251f9e03706SDarrick J. Wong 				goto done;
2252f9e03706SDarrick J. Wong 			}
225330f712c9SDave Chinner 		}
225430f712c9SDave Chinner 		break;
225530f712c9SDave Chinner 
225630f712c9SDave Chinner 	case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
225730f712c9SDave Chinner 		/*
225830f712c9SDave Chinner 		 * Setting the last part of a previous oldext extent to newext.
225930f712c9SDave Chinner 		 * The right neighbor is contiguous with the new allocation.
226030f712c9SDave Chinner 		 */
226179fa6143SChristoph Hellwig 		old = PREV;
226279fa6143SChristoph Hellwig 		PREV.br_blockcount -= new->br_blockcount;
226330f712c9SDave Chinner 
226479fa6143SChristoph Hellwig 		RIGHT.br_startoff = new->br_startoff;
226579fa6143SChristoph Hellwig 		RIGHT.br_startblock = new->br_startblock;
226679fa6143SChristoph Hellwig 		RIGHT.br_blockcount += new->br_blockcount;
2267a6818477SChristoph Hellwig 
2268b2b1712aSChristoph Hellwig 		xfs_iext_update_extent(ip, state, icur, &PREV);
2269b2b1712aSChristoph Hellwig 		xfs_iext_next(ifp, icur);
2270b2b1712aSChristoph Hellwig 		xfs_iext_update_extent(ip, state, icur, &RIGHT);
227130f712c9SDave Chinner 
227230f712c9SDave Chinner 		if (cur == NULL)
227330f712c9SDave Chinner 			rval = XFS_ILOG_DEXT;
227430f712c9SDave Chinner 		else {
227530f712c9SDave Chinner 			rval = 0;
2276e16cf9b0SChristoph Hellwig 			error = xfs_bmbt_lookup_eq(cur, &old, &i);
227779fa6143SChristoph Hellwig 			if (error)
227830f712c9SDave Chinner 				goto done;
2279f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2280f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
2281f9e03706SDarrick J. Wong 				goto done;
2282f9e03706SDarrick J. Wong 			}
2283a67d00a5SChristoph Hellwig 			error = xfs_bmbt_update(cur, &PREV);
228479fa6143SChristoph Hellwig 			if (error)
228530f712c9SDave Chinner 				goto done;
228679fa6143SChristoph Hellwig 			error = xfs_btree_increment(cur, 0, &i);
228779fa6143SChristoph Hellwig 			if (error)
228830f712c9SDave Chinner 				goto done;
2289a67d00a5SChristoph Hellwig 			error = xfs_bmbt_update(cur, &RIGHT);
229079fa6143SChristoph Hellwig 			if (error)
229130f712c9SDave Chinner 				goto done;
229230f712c9SDave Chinner 		}
229330f712c9SDave Chinner 		break;
229430f712c9SDave Chinner 
229530f712c9SDave Chinner 	case BMAP_RIGHT_FILLING:
229630f712c9SDave Chinner 		/*
229730f712c9SDave Chinner 		 * Setting the last part of a previous oldext extent to newext.
229830f712c9SDave Chinner 		 * The right neighbor is not contiguous.
229930f712c9SDave Chinner 		 */
230079fa6143SChristoph Hellwig 		old = PREV;
230179fa6143SChristoph Hellwig 		PREV.br_blockcount -= new->br_blockcount;
230230f712c9SDave Chinner 
2303b2b1712aSChristoph Hellwig 		xfs_iext_update_extent(ip, state, icur, &PREV);
2304b2b1712aSChristoph Hellwig 		xfs_iext_next(ifp, icur);
23050254c2f2SChristoph Hellwig 		xfs_iext_insert(ip, icur, new, state);
2306daf83964SChristoph Hellwig 		ifp->if_nextents++;
230730f712c9SDave Chinner 
230830f712c9SDave Chinner 		if (cur == NULL)
230930f712c9SDave Chinner 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
231030f712c9SDave Chinner 		else {
231130f712c9SDave Chinner 			rval = XFS_ILOG_CORE;
2312e16cf9b0SChristoph Hellwig 			error = xfs_bmbt_lookup_eq(cur, &old, &i);
231379fa6143SChristoph Hellwig 			if (error)
231430f712c9SDave Chinner 				goto done;
2315f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2316f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
2317f9e03706SDarrick J. Wong 				goto done;
2318f9e03706SDarrick J. Wong 			}
2319a67d00a5SChristoph Hellwig 			error = xfs_bmbt_update(cur, &PREV);
232079fa6143SChristoph Hellwig 			if (error)
232130f712c9SDave Chinner 				goto done;
2322e16cf9b0SChristoph Hellwig 			error = xfs_bmbt_lookup_eq(cur, new, &i);
2323e16cf9b0SChristoph Hellwig 			if (error)
232430f712c9SDave Chinner 				goto done;
2325f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 0)) {
2326f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
2327f9e03706SDarrick J. Wong 				goto done;
2328f9e03706SDarrick J. Wong 			}
232930f712c9SDave Chinner 			if ((error = xfs_btree_insert(cur, &i)))
233030f712c9SDave Chinner 				goto done;
2331f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2332f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
2333f9e03706SDarrick J. Wong 				goto done;
2334f9e03706SDarrick J. Wong 			}
233530f712c9SDave Chinner 		}
233630f712c9SDave Chinner 		break;
233730f712c9SDave Chinner 
233830f712c9SDave Chinner 	case 0:
233930f712c9SDave Chinner 		/*
234030f712c9SDave Chinner 		 * Setting the middle part of a previous oldext extent to
234130f712c9SDave Chinner 		 * newext.  Contiguity is impossible here.
234230f712c9SDave Chinner 		 * One extent becomes three extents.
234330f712c9SDave Chinner 		 */
234479fa6143SChristoph Hellwig 		old = PREV;
234579fa6143SChristoph Hellwig 		PREV.br_blockcount = new->br_startoff - PREV.br_startoff;
234630f712c9SDave Chinner 
234730f712c9SDave Chinner 		r[0] = *new;
234830f712c9SDave Chinner 		r[1].br_startoff = new_endoff;
234930f712c9SDave Chinner 		r[1].br_blockcount =
235079fa6143SChristoph Hellwig 			old.br_startoff + old.br_blockcount - new_endoff;
235130f712c9SDave Chinner 		r[1].br_startblock = new->br_startblock + new->br_blockcount;
235279fa6143SChristoph Hellwig 		r[1].br_state = PREV.br_state;
235330f712c9SDave Chinner 
2354b2b1712aSChristoph Hellwig 		xfs_iext_update_extent(ip, state, icur, &PREV);
2355b2b1712aSChristoph Hellwig 		xfs_iext_next(ifp, icur);
23560254c2f2SChristoph Hellwig 		xfs_iext_insert(ip, icur, &r[1], state);
23570254c2f2SChristoph Hellwig 		xfs_iext_insert(ip, icur, &r[0], state);
2358daf83964SChristoph Hellwig 		ifp->if_nextents += 2;
235930f712c9SDave Chinner 
236030f712c9SDave Chinner 		if (cur == NULL)
236130f712c9SDave Chinner 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
236230f712c9SDave Chinner 		else {
236330f712c9SDave Chinner 			rval = XFS_ILOG_CORE;
2364e16cf9b0SChristoph Hellwig 			error = xfs_bmbt_lookup_eq(cur, &old, &i);
236579fa6143SChristoph Hellwig 			if (error)
236630f712c9SDave Chinner 				goto done;
2367f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2368f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
2369f9e03706SDarrick J. Wong 				goto done;
2370f9e03706SDarrick J. Wong 			}
237130f712c9SDave Chinner 			/* new right extent - oldext */
2372a67d00a5SChristoph Hellwig 			error = xfs_bmbt_update(cur, &r[1]);
2373a67d00a5SChristoph Hellwig 			if (error)
237430f712c9SDave Chinner 				goto done;
237530f712c9SDave Chinner 			/* new left extent - oldext */
237630f712c9SDave Chinner 			cur->bc_rec.b = PREV;
237730f712c9SDave Chinner 			if ((error = xfs_btree_insert(cur, &i)))
237830f712c9SDave Chinner 				goto done;
2379f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2380f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
2381f9e03706SDarrick J. Wong 				goto done;
2382f9e03706SDarrick J. Wong 			}
238330f712c9SDave Chinner 			/*
238430f712c9SDave Chinner 			 * Reset the cursor to the position of the new extent
238530f712c9SDave Chinner 			 * we are about to insert as we can't trust it after
238630f712c9SDave Chinner 			 * the previous insert.
238730f712c9SDave Chinner 			 */
2388e16cf9b0SChristoph Hellwig 			error = xfs_bmbt_lookup_eq(cur, new, &i);
2389e16cf9b0SChristoph Hellwig 			if (error)
239030f712c9SDave Chinner 				goto done;
2391f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 0)) {
2392f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
2393f9e03706SDarrick J. Wong 				goto done;
2394f9e03706SDarrick J. Wong 			}
239530f712c9SDave Chinner 			/* new middle extent - newext */
239630f712c9SDave Chinner 			if ((error = xfs_btree_insert(cur, &i)))
239730f712c9SDave Chinner 				goto done;
2398f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2399f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
2400f9e03706SDarrick J. Wong 				goto done;
2401f9e03706SDarrick J. Wong 			}
240230f712c9SDave Chinner 		}
240330f712c9SDave Chinner 		break;
240430f712c9SDave Chinner 
240530f712c9SDave Chinner 	case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
240630f712c9SDave Chinner 	case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
240730f712c9SDave Chinner 	case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
240830f712c9SDave Chinner 	case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
240930f712c9SDave Chinner 	case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
241030f712c9SDave Chinner 	case BMAP_LEFT_CONTIG:
241130f712c9SDave Chinner 	case BMAP_RIGHT_CONTIG:
241230f712c9SDave Chinner 		/*
241330f712c9SDave Chinner 		 * These cases are all impossible.
241430f712c9SDave Chinner 		 */
241530f712c9SDave Chinner 		ASSERT(0);
241630f712c9SDave Chinner 	}
241730f712c9SDave Chinner 
24189c194644SDarrick J. Wong 	/* update reverse mappings */
2419bc46ac64SDarrick J. Wong 	xfs_rmap_convert_extent(mp, tp, ip, whichfork, new);
24209c194644SDarrick J. Wong 
242130f712c9SDave Chinner 	/* convert to a btree if necessary */
242205a630d7SDarrick J. Wong 	if (xfs_bmap_needs_btree(ip, whichfork)) {
242330f712c9SDave Chinner 		int	tmp_logflags;	/* partial log flag return val */
242430f712c9SDave Chinner 
242530f712c9SDave Chinner 		ASSERT(cur == NULL);
2426280253d2SBrian Foster 		error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0,
2427280253d2SBrian Foster 				&tmp_logflags, whichfork);
242830f712c9SDave Chinner 		*logflagsp |= tmp_logflags;
242930f712c9SDave Chinner 		if (error)
243030f712c9SDave Chinner 			goto done;
243130f712c9SDave Chinner 	}
243230f712c9SDave Chinner 
243330f712c9SDave Chinner 	/* clear out the allocated field, done with it now in any case. */
243430f712c9SDave Chinner 	if (cur) {
243592219c29SDave Chinner 		cur->bc_ino.allocated = 0;
243630f712c9SDave Chinner 		*curp = cur;
243730f712c9SDave Chinner 	}
243830f712c9SDave Chinner 
243905a630d7SDarrick J. Wong 	xfs_bmap_check_leaf_extents(*curp, ip, whichfork);
244030f712c9SDave Chinner done:
244130f712c9SDave Chinner 	*logflagsp |= rval;
244230f712c9SDave Chinner 	return error;
244330f712c9SDave Chinner #undef	LEFT
244430f712c9SDave Chinner #undef	RIGHT
244530f712c9SDave Chinner #undef	PREV
244630f712c9SDave Chinner }
244730f712c9SDave Chinner 
244830f712c9SDave Chinner /*
244930f712c9SDave Chinner  * Convert a hole to a delayed allocation.
245030f712c9SDave Chinner  */
245130f712c9SDave Chinner STATIC void
245230f712c9SDave Chinner xfs_bmap_add_extent_hole_delay(
245330f712c9SDave Chinner 	xfs_inode_t		*ip,	/* incore inode pointer */
2454be51f811SDarrick J. Wong 	int			whichfork,
2455b2b1712aSChristoph Hellwig 	struct xfs_iext_cursor	*icur,
245630f712c9SDave Chinner 	xfs_bmbt_irec_t		*new)	/* new data to add to file extents */
245730f712c9SDave Chinner {
24583ba738dfSChristoph Hellwig 	struct xfs_ifork	*ifp;	/* inode fork pointer */
245930f712c9SDave Chinner 	xfs_bmbt_irec_t		left;	/* left neighbor extent entry */
246030f712c9SDave Chinner 	xfs_filblks_t		newlen=0;	/* new indirect size */
246130f712c9SDave Chinner 	xfs_filblks_t		oldlen=0;	/* old indirect size */
246230f712c9SDave Chinner 	xfs_bmbt_irec_t		right;	/* right neighbor extent entry */
24630e5b8e45SDave Chinner 	uint32_t		state = xfs_bmap_fork_to_state(whichfork);
24643ffc18ecSChristoph Hellwig 	xfs_filblks_t		temp;	 /* temp for indirect calculations */
246530f712c9SDave Chinner 
2466732436efSDarrick J. Wong 	ifp = xfs_ifork_ptr(ip, whichfork);
246730f712c9SDave Chinner 	ASSERT(isnullstartblock(new->br_startblock));
246830f712c9SDave Chinner 
246930f712c9SDave Chinner 	/*
247030f712c9SDave Chinner 	 * Check and set flags if this segment has a left neighbor
247130f712c9SDave Chinner 	 */
2472b2b1712aSChristoph Hellwig 	if (xfs_iext_peek_prev_extent(ifp, icur, &left)) {
247330f712c9SDave Chinner 		state |= BMAP_LEFT_VALID;
247430f712c9SDave Chinner 		if (isnullstartblock(left.br_startblock))
247530f712c9SDave Chinner 			state |= BMAP_LEFT_DELAY;
247630f712c9SDave Chinner 	}
247730f712c9SDave Chinner 
247830f712c9SDave Chinner 	/*
247930f712c9SDave Chinner 	 * Check and set flags if the current (right) segment exists.
248030f712c9SDave Chinner 	 * If it doesn't exist, we're converting the hole at end-of-file.
248130f712c9SDave Chinner 	 */
2482b2b1712aSChristoph Hellwig 	if (xfs_iext_get_extent(ifp, icur, &right)) {
248330f712c9SDave Chinner 		state |= BMAP_RIGHT_VALID;
248430f712c9SDave Chinner 		if (isnullstartblock(right.br_startblock))
248530f712c9SDave Chinner 			state |= BMAP_RIGHT_DELAY;
248630f712c9SDave Chinner 	}
248730f712c9SDave Chinner 
248830f712c9SDave Chinner 	/*
248930f712c9SDave Chinner 	 * Set contiguity flags on the left and right neighbors.
249030f712c9SDave Chinner 	 * Don't let extents get too large, even if the pieces are contiguous.
249130f712c9SDave Chinner 	 */
249230f712c9SDave Chinner 	if ((state & BMAP_LEFT_VALID) && (state & BMAP_LEFT_DELAY) &&
249330f712c9SDave Chinner 	    left.br_startoff + left.br_blockcount == new->br_startoff &&
249495f0b95eSChandan Babu R 	    left.br_blockcount + new->br_blockcount <= XFS_MAX_BMBT_EXTLEN)
249530f712c9SDave Chinner 		state |= BMAP_LEFT_CONTIG;
249630f712c9SDave Chinner 
249730f712c9SDave Chinner 	if ((state & BMAP_RIGHT_VALID) && (state & BMAP_RIGHT_DELAY) &&
249830f712c9SDave Chinner 	    new->br_startoff + new->br_blockcount == right.br_startoff &&
249995f0b95eSChandan Babu R 	    new->br_blockcount + right.br_blockcount <= XFS_MAX_BMBT_EXTLEN &&
250030f712c9SDave Chinner 	    (!(state & BMAP_LEFT_CONTIG) ||
250130f712c9SDave Chinner 	     (left.br_blockcount + new->br_blockcount +
250295f0b95eSChandan Babu R 	      right.br_blockcount <= XFS_MAX_BMBT_EXTLEN)))
250330f712c9SDave Chinner 		state |= BMAP_RIGHT_CONTIG;
250430f712c9SDave Chinner 
250530f712c9SDave Chinner 	/*
250630f712c9SDave Chinner 	 * Switch out based on the contiguity flags.
250730f712c9SDave Chinner 	 */
250830f712c9SDave Chinner 	switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
250930f712c9SDave Chinner 	case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
251030f712c9SDave Chinner 		/*
251130f712c9SDave Chinner 		 * New allocation is contiguous with delayed allocations
251230f712c9SDave Chinner 		 * on the left and on the right.
251330f712c9SDave Chinner 		 * Merge all three into a single extent record.
251430f712c9SDave Chinner 		 */
251530f712c9SDave Chinner 		temp = left.br_blockcount + new->br_blockcount +
251630f712c9SDave Chinner 			right.br_blockcount;
251730f712c9SDave Chinner 
251830f712c9SDave Chinner 		oldlen = startblockval(left.br_startblock) +
251930f712c9SDave Chinner 			startblockval(new->br_startblock) +
252030f712c9SDave Chinner 			startblockval(right.br_startblock);
25210e339ef8SBrian Foster 		newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
25220e339ef8SBrian Foster 					 oldlen);
25233ffc18ecSChristoph Hellwig 		left.br_startblock = nullstartblock(newlen);
25243ffc18ecSChristoph Hellwig 		left.br_blockcount = temp;
252530f712c9SDave Chinner 
2526c38ccf59SChristoph Hellwig 		xfs_iext_remove(ip, icur, state);
2527b2b1712aSChristoph Hellwig 		xfs_iext_prev(ifp, icur);
2528b2b1712aSChristoph Hellwig 		xfs_iext_update_extent(ip, state, icur, &left);
252930f712c9SDave Chinner 		break;
253030f712c9SDave Chinner 
253130f712c9SDave Chinner 	case BMAP_LEFT_CONTIG:
253230f712c9SDave Chinner 		/*
253330f712c9SDave Chinner 		 * New allocation is contiguous with a delayed allocation
253430f712c9SDave Chinner 		 * on the left.
253530f712c9SDave Chinner 		 * Merge the new allocation with the left neighbor.
253630f712c9SDave Chinner 		 */
253730f712c9SDave Chinner 		temp = left.br_blockcount + new->br_blockcount;
253830f712c9SDave Chinner 
253930f712c9SDave Chinner 		oldlen = startblockval(left.br_startblock) +
254030f712c9SDave Chinner 			startblockval(new->br_startblock);
25410e339ef8SBrian Foster 		newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
25420e339ef8SBrian Foster 					 oldlen);
25433ffc18ecSChristoph Hellwig 		left.br_blockcount = temp;
25443ffc18ecSChristoph Hellwig 		left.br_startblock = nullstartblock(newlen);
254541d196f4SChristoph Hellwig 
2546b2b1712aSChristoph Hellwig 		xfs_iext_prev(ifp, icur);
2547b2b1712aSChristoph Hellwig 		xfs_iext_update_extent(ip, state, icur, &left);
254830f712c9SDave Chinner 		break;
254930f712c9SDave Chinner 
255030f712c9SDave Chinner 	case BMAP_RIGHT_CONTIG:
255130f712c9SDave Chinner 		/*
255230f712c9SDave Chinner 		 * New allocation is contiguous with a delayed allocation
255330f712c9SDave Chinner 		 * on the right.
255430f712c9SDave Chinner 		 * Merge the new allocation with the right neighbor.
255530f712c9SDave Chinner 		 */
255630f712c9SDave Chinner 		temp = new->br_blockcount + right.br_blockcount;
255730f712c9SDave Chinner 		oldlen = startblockval(new->br_startblock) +
255830f712c9SDave Chinner 			startblockval(right.br_startblock);
25590e339ef8SBrian Foster 		newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
25600e339ef8SBrian Foster 					 oldlen);
25613ffc18ecSChristoph Hellwig 		right.br_startoff = new->br_startoff;
25623ffc18ecSChristoph Hellwig 		right.br_startblock = nullstartblock(newlen);
25633ffc18ecSChristoph Hellwig 		right.br_blockcount = temp;
2564b2b1712aSChristoph Hellwig 		xfs_iext_update_extent(ip, state, icur, &right);
256530f712c9SDave Chinner 		break;
256630f712c9SDave Chinner 
256730f712c9SDave Chinner 	case 0:
256830f712c9SDave Chinner 		/*
256930f712c9SDave Chinner 		 * New allocation is not contiguous with another
257030f712c9SDave Chinner 		 * delayed allocation.
257130f712c9SDave Chinner 		 * Insert a new entry.
257230f712c9SDave Chinner 		 */
257330f712c9SDave Chinner 		oldlen = newlen = 0;
25740254c2f2SChristoph Hellwig 		xfs_iext_insert(ip, icur, new, state);
257530f712c9SDave Chinner 		break;
257630f712c9SDave Chinner 	}
257730f712c9SDave Chinner 	if (oldlen != newlen) {
257830f712c9SDave Chinner 		ASSERT(oldlen > newlen);
25790d485adaSDave Chinner 		xfs_mod_fdblocks(ip->i_mount, (int64_t)(oldlen - newlen),
25800d485adaSDave Chinner 				 false);
258130f712c9SDave Chinner 		/*
258230f712c9SDave Chinner 		 * Nothing to do for disk quota accounting here.
258330f712c9SDave Chinner 		 */
25849fe82b8cSDarrick J. Wong 		xfs_mod_delalloc(ip->i_mount, (int64_t)newlen - oldlen);
258530f712c9SDave Chinner 	}
258630f712c9SDave Chinner }
258730f712c9SDave Chinner 
258830f712c9SDave Chinner /*
258930f712c9SDave Chinner  * Convert a hole to a real allocation.
259030f712c9SDave Chinner  */
259130f712c9SDave Chinner STATIC int				/* error */
259230f712c9SDave Chinner xfs_bmap_add_extent_hole_real(
25936d04558fSChristoph Hellwig 	struct xfs_trans	*tp,
25946d04558fSChristoph Hellwig 	struct xfs_inode	*ip,
25956d04558fSChristoph Hellwig 	int			whichfork,
2596b2b1712aSChristoph Hellwig 	struct xfs_iext_cursor	*icur,
25976d04558fSChristoph Hellwig 	struct xfs_btree_cur	**curp,
25986d04558fSChristoph Hellwig 	struct xfs_bmbt_irec	*new,
259995eb308cSDarrick J. Wong 	int			*logflagsp,
2600e7d410acSDave Chinner 	uint32_t		flags)
260130f712c9SDave Chinner {
2602732436efSDarrick J. Wong 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
26036d04558fSChristoph Hellwig 	struct xfs_mount	*mp = ip->i_mount;
26046d04558fSChristoph Hellwig 	struct xfs_btree_cur	*cur = *curp;
260530f712c9SDave Chinner 	int			error;	/* error return value */
260630f712c9SDave Chinner 	int			i;	/* temp state */
260730f712c9SDave Chinner 	xfs_bmbt_irec_t		left;	/* left neighbor extent entry */
260830f712c9SDave Chinner 	xfs_bmbt_irec_t		right;	/* right neighbor extent entry */
260930f712c9SDave Chinner 	int			rval=0;	/* return value (logging flags) */
26100e5b8e45SDave Chinner 	uint32_t		state = xfs_bmap_fork_to_state(whichfork);
26111abb9e55SChristoph Hellwig 	struct xfs_bmbt_irec	old;
261230f712c9SDave Chinner 
261330f712c9SDave Chinner 	ASSERT(!isnullstartblock(new->br_startblock));
26148ef54797SDave Chinner 	ASSERT(!cur || !(cur->bc_ino.flags & XFS_BTCUR_BMBT_WASDEL));
261530f712c9SDave Chinner 
2616ff6d6af2SBill O'Donnell 	XFS_STATS_INC(mp, xs_add_exlist);
261730f712c9SDave Chinner 
261830f712c9SDave Chinner 	/*
261930f712c9SDave Chinner 	 * Check and set flags if this segment has a left neighbor.
262030f712c9SDave Chinner 	 */
2621b2b1712aSChristoph Hellwig 	if (xfs_iext_peek_prev_extent(ifp, icur, &left)) {
262230f712c9SDave Chinner 		state |= BMAP_LEFT_VALID;
262330f712c9SDave Chinner 		if (isnullstartblock(left.br_startblock))
262430f712c9SDave Chinner 			state |= BMAP_LEFT_DELAY;
262530f712c9SDave Chinner 	}
262630f712c9SDave Chinner 
262730f712c9SDave Chinner 	/*
262830f712c9SDave Chinner 	 * Check and set flags if this segment has a current value.
262930f712c9SDave Chinner 	 * Not true if we're inserting into the "hole" at eof.
263030f712c9SDave Chinner 	 */
2631b2b1712aSChristoph Hellwig 	if (xfs_iext_get_extent(ifp, icur, &right)) {
263230f712c9SDave Chinner 		state |= BMAP_RIGHT_VALID;
263330f712c9SDave Chinner 		if (isnullstartblock(right.br_startblock))
263430f712c9SDave Chinner 			state |= BMAP_RIGHT_DELAY;
263530f712c9SDave Chinner 	}
263630f712c9SDave Chinner 
263730f712c9SDave Chinner 	/*
263830f712c9SDave Chinner 	 * We're inserting a real allocation between "left" and "right".
263930f712c9SDave Chinner 	 * Set the contiguity flags.  Don't let extents get too large.
264030f712c9SDave Chinner 	 */
264130f712c9SDave Chinner 	if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
264230f712c9SDave Chinner 	    left.br_startoff + left.br_blockcount == new->br_startoff &&
264330f712c9SDave Chinner 	    left.br_startblock + left.br_blockcount == new->br_startblock &&
264430f712c9SDave Chinner 	    left.br_state == new->br_state &&
264595f0b95eSChandan Babu R 	    left.br_blockcount + new->br_blockcount <= XFS_MAX_BMBT_EXTLEN)
264630f712c9SDave Chinner 		state |= BMAP_LEFT_CONTIG;
264730f712c9SDave Chinner 
264830f712c9SDave Chinner 	if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
264930f712c9SDave Chinner 	    new->br_startoff + new->br_blockcount == right.br_startoff &&
265030f712c9SDave Chinner 	    new->br_startblock + new->br_blockcount == right.br_startblock &&
265130f712c9SDave Chinner 	    new->br_state == right.br_state &&
265295f0b95eSChandan Babu R 	    new->br_blockcount + right.br_blockcount <= XFS_MAX_BMBT_EXTLEN &&
265330f712c9SDave Chinner 	    (!(state & BMAP_LEFT_CONTIG) ||
265430f712c9SDave Chinner 	     left.br_blockcount + new->br_blockcount +
265595f0b95eSChandan Babu R 	     right.br_blockcount <= XFS_MAX_BMBT_EXTLEN))
265630f712c9SDave Chinner 		state |= BMAP_RIGHT_CONTIG;
265730f712c9SDave Chinner 
265830f712c9SDave Chinner 	error = 0;
265930f712c9SDave Chinner 	/*
266030f712c9SDave Chinner 	 * Select which case we're in here, and implement it.
266130f712c9SDave Chinner 	 */
266230f712c9SDave Chinner 	switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
266330f712c9SDave Chinner 	case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
266430f712c9SDave Chinner 		/*
266530f712c9SDave Chinner 		 * New allocation is contiguous with real allocations on the
266630f712c9SDave Chinner 		 * left and on the right.
266730f712c9SDave Chinner 		 * Merge all three into a single extent record.
266830f712c9SDave Chinner 		 */
26691abb9e55SChristoph Hellwig 		left.br_blockcount += new->br_blockcount + right.br_blockcount;
267030f712c9SDave Chinner 
2671c38ccf59SChristoph Hellwig 		xfs_iext_remove(ip, icur, state);
2672b2b1712aSChristoph Hellwig 		xfs_iext_prev(ifp, icur);
2673b2b1712aSChristoph Hellwig 		xfs_iext_update_extent(ip, state, icur, &left);
2674daf83964SChristoph Hellwig 		ifp->if_nextents--;
267530f712c9SDave Chinner 
26766d04558fSChristoph Hellwig 		if (cur == NULL) {
267730f712c9SDave Chinner 			rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
267830f712c9SDave Chinner 		} else {
267930f712c9SDave Chinner 			rval = XFS_ILOG_CORE;
2680e16cf9b0SChristoph Hellwig 			error = xfs_bmbt_lookup_eq(cur, &right, &i);
268130f712c9SDave Chinner 			if (error)
268230f712c9SDave Chinner 				goto done;
2683f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2684f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
2685f9e03706SDarrick J. Wong 				goto done;
2686f9e03706SDarrick J. Wong 			}
26876d04558fSChristoph Hellwig 			error = xfs_btree_delete(cur, &i);
268830f712c9SDave Chinner 			if (error)
268930f712c9SDave Chinner 				goto done;
2690f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2691f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
2692f9e03706SDarrick J. Wong 				goto done;
2693f9e03706SDarrick J. Wong 			}
26946d04558fSChristoph Hellwig 			error = xfs_btree_decrement(cur, 0, &i);
269530f712c9SDave Chinner 			if (error)
269630f712c9SDave Chinner 				goto done;
2697f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2698f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
2699f9e03706SDarrick J. Wong 				goto done;
2700f9e03706SDarrick J. Wong 			}
2701a67d00a5SChristoph Hellwig 			error = xfs_bmbt_update(cur, &left);
270230f712c9SDave Chinner 			if (error)
270330f712c9SDave Chinner 				goto done;
270430f712c9SDave Chinner 		}
270530f712c9SDave Chinner 		break;
270630f712c9SDave Chinner 
270730f712c9SDave Chinner 	case BMAP_LEFT_CONTIG:
270830f712c9SDave Chinner 		/*
270930f712c9SDave Chinner 		 * New allocation is contiguous with a real allocation
271030f712c9SDave Chinner 		 * on the left.
271130f712c9SDave Chinner 		 * Merge the new allocation with the left neighbor.
271230f712c9SDave Chinner 		 */
27131abb9e55SChristoph Hellwig 		old = left;
27141abb9e55SChristoph Hellwig 		left.br_blockcount += new->br_blockcount;
27151d2e0089SChristoph Hellwig 
2716b2b1712aSChristoph Hellwig 		xfs_iext_prev(ifp, icur);
2717b2b1712aSChristoph Hellwig 		xfs_iext_update_extent(ip, state, icur, &left);
271830f712c9SDave Chinner 
27196d04558fSChristoph Hellwig 		if (cur == NULL) {
272030f712c9SDave Chinner 			rval = xfs_ilog_fext(whichfork);
272130f712c9SDave Chinner 		} else {
272230f712c9SDave Chinner 			rval = 0;
2723e16cf9b0SChristoph Hellwig 			error = xfs_bmbt_lookup_eq(cur, &old, &i);
272430f712c9SDave Chinner 			if (error)
272530f712c9SDave Chinner 				goto done;
2726f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2727f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
2728f9e03706SDarrick J. Wong 				goto done;
2729f9e03706SDarrick J. Wong 			}
2730a67d00a5SChristoph Hellwig 			error = xfs_bmbt_update(cur, &left);
273130f712c9SDave Chinner 			if (error)
273230f712c9SDave Chinner 				goto done;
273330f712c9SDave Chinner 		}
273430f712c9SDave Chinner 		break;
273530f712c9SDave Chinner 
273630f712c9SDave Chinner 	case BMAP_RIGHT_CONTIG:
273730f712c9SDave Chinner 		/*
273830f712c9SDave Chinner 		 * New allocation is contiguous with a real allocation
273930f712c9SDave Chinner 		 * on the right.
274030f712c9SDave Chinner 		 * Merge the new allocation with the right neighbor.
274130f712c9SDave Chinner 		 */
27421abb9e55SChristoph Hellwig 		old = right;
2743ca5d8e5bSChristoph Hellwig 
27441abb9e55SChristoph Hellwig 		right.br_startoff = new->br_startoff;
27451abb9e55SChristoph Hellwig 		right.br_startblock = new->br_startblock;
27461abb9e55SChristoph Hellwig 		right.br_blockcount += new->br_blockcount;
2747b2b1712aSChristoph Hellwig 		xfs_iext_update_extent(ip, state, icur, &right);
274830f712c9SDave Chinner 
27496d04558fSChristoph Hellwig 		if (cur == NULL) {
275030f712c9SDave Chinner 			rval = xfs_ilog_fext(whichfork);
275130f712c9SDave Chinner 		} else {
275230f712c9SDave Chinner 			rval = 0;
2753e16cf9b0SChristoph Hellwig 			error = xfs_bmbt_lookup_eq(cur, &old, &i);
275430f712c9SDave Chinner 			if (error)
275530f712c9SDave Chinner 				goto done;
2756f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2757f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
2758f9e03706SDarrick J. Wong 				goto done;
2759f9e03706SDarrick J. Wong 			}
2760a67d00a5SChristoph Hellwig 			error = xfs_bmbt_update(cur, &right);
276130f712c9SDave Chinner 			if (error)
276230f712c9SDave Chinner 				goto done;
276330f712c9SDave Chinner 		}
276430f712c9SDave Chinner 		break;
276530f712c9SDave Chinner 
276630f712c9SDave Chinner 	case 0:
276730f712c9SDave Chinner 		/*
276830f712c9SDave Chinner 		 * New allocation is not contiguous with another
276930f712c9SDave Chinner 		 * real allocation.
277030f712c9SDave Chinner 		 * Insert a new entry.
277130f712c9SDave Chinner 		 */
27720254c2f2SChristoph Hellwig 		xfs_iext_insert(ip, icur, new, state);
2773daf83964SChristoph Hellwig 		ifp->if_nextents++;
2774daf83964SChristoph Hellwig 
27756d04558fSChristoph Hellwig 		if (cur == NULL) {
277630f712c9SDave Chinner 			rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
277730f712c9SDave Chinner 		} else {
277830f712c9SDave Chinner 			rval = XFS_ILOG_CORE;
2779e16cf9b0SChristoph Hellwig 			error = xfs_bmbt_lookup_eq(cur, new, &i);
278030f712c9SDave Chinner 			if (error)
278130f712c9SDave Chinner 				goto done;
2782f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 0)) {
2783f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
2784f9e03706SDarrick J. Wong 				goto done;
2785f9e03706SDarrick J. Wong 			}
27866d04558fSChristoph Hellwig 			error = xfs_btree_insert(cur, &i);
278730f712c9SDave Chinner 			if (error)
278830f712c9SDave Chinner 				goto done;
2789f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2790f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
2791f9e03706SDarrick J. Wong 				goto done;
2792f9e03706SDarrick J. Wong 			}
279330f712c9SDave Chinner 		}
279430f712c9SDave Chinner 		break;
279530f712c9SDave Chinner 	}
279630f712c9SDave Chinner 
279795eb308cSDarrick J. Wong 	/* add reverse mapping unless caller opted out */
2798bc46ac64SDarrick J. Wong 	if (!(flags & XFS_BMAPI_NORMAP))
2799bc46ac64SDarrick J. Wong 		xfs_rmap_map_extent(tp, ip, whichfork, new);
28009c194644SDarrick J. Wong 
280130f712c9SDave Chinner 	/* convert to a btree if necessary */
28026d04558fSChristoph Hellwig 	if (xfs_bmap_needs_btree(ip, whichfork)) {
280330f712c9SDave Chinner 		int	tmp_logflags;	/* partial log flag return val */
280430f712c9SDave Chinner 
28056d04558fSChristoph Hellwig 		ASSERT(cur == NULL);
2806280253d2SBrian Foster 		error = xfs_bmap_extents_to_btree(tp, ip, curp, 0,
2807280253d2SBrian Foster 				&tmp_logflags, whichfork);
28086d04558fSChristoph Hellwig 		*logflagsp |= tmp_logflags;
28096d04558fSChristoph Hellwig 		cur = *curp;
281030f712c9SDave Chinner 		if (error)
281130f712c9SDave Chinner 			goto done;
281230f712c9SDave Chinner 	}
281330f712c9SDave Chinner 
281430f712c9SDave Chinner 	/* clear out the allocated field, done with it now in any case. */
28156d04558fSChristoph Hellwig 	if (cur)
281692219c29SDave Chinner 		cur->bc_ino.allocated = 0;
281730f712c9SDave Chinner 
28186d04558fSChristoph Hellwig 	xfs_bmap_check_leaf_extents(cur, ip, whichfork);
281930f712c9SDave Chinner done:
28206d04558fSChristoph Hellwig 	*logflagsp |= rval;
282130f712c9SDave Chinner 	return error;
282230f712c9SDave Chinner }
282330f712c9SDave Chinner 
282430f712c9SDave Chinner /*
282530f712c9SDave Chinner  * Functions used in the extent read, allocate and remove paths
282630f712c9SDave Chinner  */
282730f712c9SDave Chinner 
282830f712c9SDave Chinner /*
2829031474c2SChristoph Hellwig  * Adjust the size of the new extent based on i_extsize and rt extsize.
283030f712c9SDave Chinner  */
283130f712c9SDave Chinner int
283230f712c9SDave Chinner xfs_bmap_extsize_align(
283330f712c9SDave Chinner 	xfs_mount_t	*mp,
283430f712c9SDave Chinner 	xfs_bmbt_irec_t	*gotp,		/* next extent pointer */
283530f712c9SDave Chinner 	xfs_bmbt_irec_t	*prevp,		/* previous extent pointer */
283630f712c9SDave Chinner 	xfs_extlen_t	extsz,		/* align to this extent size */
283730f712c9SDave Chinner 	int		rt,		/* is this a realtime inode? */
283830f712c9SDave Chinner 	int		eof,		/* is extent at end-of-file? */
283930f712c9SDave Chinner 	int		delay,		/* creating delalloc extent? */
284030f712c9SDave Chinner 	int		convert,	/* overwriting unwritten extent? */
284130f712c9SDave Chinner 	xfs_fileoff_t	*offp,		/* in/out: aligned offset */
284230f712c9SDave Chinner 	xfs_extlen_t	*lenp)		/* in/out: aligned length */
284330f712c9SDave Chinner {
284430f712c9SDave Chinner 	xfs_fileoff_t	orig_off;	/* original offset */
284530f712c9SDave Chinner 	xfs_extlen_t	orig_alen;	/* original length */
284630f712c9SDave Chinner 	xfs_fileoff_t	orig_end;	/* original off+len */
284730f712c9SDave Chinner 	xfs_fileoff_t	nexto;		/* next file offset */
284830f712c9SDave Chinner 	xfs_fileoff_t	prevo;		/* previous file offset */
284930f712c9SDave Chinner 	xfs_fileoff_t	align_off;	/* temp for offset */
285030f712c9SDave Chinner 	xfs_extlen_t	align_alen;	/* temp for length */
285130f712c9SDave Chinner 	xfs_extlen_t	temp;		/* temp for calculations */
285230f712c9SDave Chinner 
285330f712c9SDave Chinner 	if (convert)
285430f712c9SDave Chinner 		return 0;
285530f712c9SDave Chinner 
285630f712c9SDave Chinner 	orig_off = align_off = *offp;
285730f712c9SDave Chinner 	orig_alen = align_alen = *lenp;
285830f712c9SDave Chinner 	orig_end = orig_off + orig_alen;
285930f712c9SDave Chinner 
286030f712c9SDave Chinner 	/*
286130f712c9SDave Chinner 	 * If this request overlaps an existing extent, then don't
286230f712c9SDave Chinner 	 * attempt to perform any additional alignment.
286330f712c9SDave Chinner 	 */
286430f712c9SDave Chinner 	if (!delay && !eof &&
286530f712c9SDave Chinner 	    (orig_off >= gotp->br_startoff) &&
286630f712c9SDave Chinner 	    (orig_end <= gotp->br_startoff + gotp->br_blockcount)) {
286730f712c9SDave Chinner 		return 0;
286830f712c9SDave Chinner 	}
286930f712c9SDave Chinner 
287030f712c9SDave Chinner 	/*
287130f712c9SDave Chinner 	 * If the file offset is unaligned vs. the extent size
287230f712c9SDave Chinner 	 * we need to align it.  This will be possible unless
287330f712c9SDave Chinner 	 * the file was previously written with a kernel that didn't
287430f712c9SDave Chinner 	 * perform this alignment, or if a truncate shot us in the
287530f712c9SDave Chinner 	 * foot.
287630f712c9SDave Chinner 	 */
28770703a8e1SDave Chinner 	div_u64_rem(orig_off, extsz, &temp);
287830f712c9SDave Chinner 	if (temp) {
287930f712c9SDave Chinner 		align_alen += temp;
288030f712c9SDave Chinner 		align_off -= temp;
288130f712c9SDave Chinner 	}
28826dea405eSDave Chinner 
28836dea405eSDave Chinner 	/* Same adjustment for the end of the requested area. */
28846dea405eSDave Chinner 	temp = (align_alen % extsz);
28856dea405eSDave Chinner 	if (temp)
288630f712c9SDave Chinner 		align_alen += extsz - temp;
28876dea405eSDave Chinner 
28886dea405eSDave Chinner 	/*
28896dea405eSDave Chinner 	 * For large extent hint sizes, the aligned extent might be larger than
289095f0b95eSChandan Babu R 	 * XFS_BMBT_MAX_EXTLEN. In that case, reduce the size by an extsz so
289195f0b95eSChandan Babu R 	 * that it pulls the length back under XFS_BMBT_MAX_EXTLEN. The outer
289295f0b95eSChandan Babu R 	 * allocation loops handle short allocation just fine, so it is safe to
289395f0b95eSChandan Babu R 	 * do this. We only want to do it when we are forced to, though, because
289495f0b95eSChandan Babu R 	 * it means more allocation operations are required.
28956dea405eSDave Chinner 	 */
289695f0b95eSChandan Babu R 	while (align_alen > XFS_MAX_BMBT_EXTLEN)
28976dea405eSDave Chinner 		align_alen -= extsz;
289895f0b95eSChandan Babu R 	ASSERT(align_alen <= XFS_MAX_BMBT_EXTLEN);
28996dea405eSDave Chinner 
290030f712c9SDave Chinner 	/*
290130f712c9SDave Chinner 	 * If the previous block overlaps with this proposed allocation
290230f712c9SDave Chinner 	 * then move the start forward without adjusting the length.
290330f712c9SDave Chinner 	 */
290430f712c9SDave Chinner 	if (prevp->br_startoff != NULLFILEOFF) {
290530f712c9SDave Chinner 		if (prevp->br_startblock == HOLESTARTBLOCK)
290630f712c9SDave Chinner 			prevo = prevp->br_startoff;
290730f712c9SDave Chinner 		else
290830f712c9SDave Chinner 			prevo = prevp->br_startoff + prevp->br_blockcount;
290930f712c9SDave Chinner 	} else
291030f712c9SDave Chinner 		prevo = 0;
291130f712c9SDave Chinner 	if (align_off != orig_off && align_off < prevo)
291230f712c9SDave Chinner 		align_off = prevo;
291330f712c9SDave Chinner 	/*
291430f712c9SDave Chinner 	 * If the next block overlaps with this proposed allocation
291530f712c9SDave Chinner 	 * then move the start back without adjusting the length,
291630f712c9SDave Chinner 	 * but not before offset 0.
291730f712c9SDave Chinner 	 * This may of course make the start overlap previous block,
291830f712c9SDave Chinner 	 * and if we hit the offset 0 limit then the next block
291930f712c9SDave Chinner 	 * can still overlap too.
292030f712c9SDave Chinner 	 */
292130f712c9SDave Chinner 	if (!eof && gotp->br_startoff != NULLFILEOFF) {
292230f712c9SDave Chinner 		if ((delay && gotp->br_startblock == HOLESTARTBLOCK) ||
292330f712c9SDave Chinner 		    (!delay && gotp->br_startblock == DELAYSTARTBLOCK))
292430f712c9SDave Chinner 			nexto = gotp->br_startoff + gotp->br_blockcount;
292530f712c9SDave Chinner 		else
292630f712c9SDave Chinner 			nexto = gotp->br_startoff;
292730f712c9SDave Chinner 	} else
292830f712c9SDave Chinner 		nexto = NULLFILEOFF;
292930f712c9SDave Chinner 	if (!eof &&
293030f712c9SDave Chinner 	    align_off + align_alen != orig_end &&
293130f712c9SDave Chinner 	    align_off + align_alen > nexto)
293230f712c9SDave Chinner 		align_off = nexto > align_alen ? nexto - align_alen : 0;
293330f712c9SDave Chinner 	/*
293430f712c9SDave Chinner 	 * If we're now overlapping the next or previous extent that
293530f712c9SDave Chinner 	 * means we can't fit an extsz piece in this hole.  Just move
293630f712c9SDave Chinner 	 * the start forward to the first valid spot and set
293730f712c9SDave Chinner 	 * the length so we hit the end.
293830f712c9SDave Chinner 	 */
293930f712c9SDave Chinner 	if (align_off != orig_off && align_off < prevo)
294030f712c9SDave Chinner 		align_off = prevo;
294130f712c9SDave Chinner 	if (align_off + align_alen != orig_end &&
294230f712c9SDave Chinner 	    align_off + align_alen > nexto &&
294330f712c9SDave Chinner 	    nexto != NULLFILEOFF) {
294430f712c9SDave Chinner 		ASSERT(nexto > prevo);
294530f712c9SDave Chinner 		align_alen = nexto - align_off;
294630f712c9SDave Chinner 	}
294730f712c9SDave Chinner 
294830f712c9SDave Chinner 	/*
294930f712c9SDave Chinner 	 * If realtime, and the result isn't a multiple of the realtime
295030f712c9SDave Chinner 	 * extent size we need to remove blocks until it is.
295130f712c9SDave Chinner 	 */
295230f712c9SDave Chinner 	if (rt && (temp = (align_alen % mp->m_sb.sb_rextsize))) {
295330f712c9SDave Chinner 		/*
295430f712c9SDave Chinner 		 * We're not covering the original request, or
295530f712c9SDave Chinner 		 * we won't be able to once we fix the length.
295630f712c9SDave Chinner 		 */
295730f712c9SDave Chinner 		if (orig_off < align_off ||
295830f712c9SDave Chinner 		    orig_end > align_off + align_alen ||
295930f712c9SDave Chinner 		    align_alen - temp < orig_alen)
29602451337dSDave Chinner 			return -EINVAL;
296130f712c9SDave Chinner 		/*
296230f712c9SDave Chinner 		 * Try to fix it by moving the start up.
296330f712c9SDave Chinner 		 */
296430f712c9SDave Chinner 		if (align_off + temp <= orig_off) {
296530f712c9SDave Chinner 			align_alen -= temp;
296630f712c9SDave Chinner 			align_off += temp;
296730f712c9SDave Chinner 		}
296830f712c9SDave Chinner 		/*
296930f712c9SDave Chinner 		 * Try to fix it by moving the end in.
297030f712c9SDave Chinner 		 */
297130f712c9SDave Chinner 		else if (align_off + align_alen - temp >= orig_end)
297230f712c9SDave Chinner 			align_alen -= temp;
297330f712c9SDave Chinner 		/*
297430f712c9SDave Chinner 		 * Set the start to the minimum then trim the length.
297530f712c9SDave Chinner 		 */
297630f712c9SDave Chinner 		else {
297730f712c9SDave Chinner 			align_alen -= orig_off - align_off;
297830f712c9SDave Chinner 			align_off = orig_off;
297930f712c9SDave Chinner 			align_alen -= align_alen % mp->m_sb.sb_rextsize;
298030f712c9SDave Chinner 		}
298130f712c9SDave Chinner 		/*
298230f712c9SDave Chinner 		 * Result doesn't cover the request, fail it.
298330f712c9SDave Chinner 		 */
298430f712c9SDave Chinner 		if (orig_off < align_off || orig_end > align_off + align_alen)
29852451337dSDave Chinner 			return -EINVAL;
298630f712c9SDave Chinner 	} else {
298730f712c9SDave Chinner 		ASSERT(orig_off >= align_off);
298895f0b95eSChandan Babu R 		/* see XFS_BMBT_MAX_EXTLEN handling above */
29896dea405eSDave Chinner 		ASSERT(orig_end <= align_off + align_alen ||
299095f0b95eSChandan Babu R 		       align_alen + extsz > XFS_MAX_BMBT_EXTLEN);
299130f712c9SDave Chinner 	}
299230f712c9SDave Chinner 
299330f712c9SDave Chinner #ifdef DEBUG
299430f712c9SDave Chinner 	if (!eof && gotp->br_startoff != NULLFILEOFF)
299530f712c9SDave Chinner 		ASSERT(align_off + align_alen <= gotp->br_startoff);
299630f712c9SDave Chinner 	if (prevp->br_startoff != NULLFILEOFF)
299730f712c9SDave Chinner 		ASSERT(align_off >= prevp->br_startoff + prevp->br_blockcount);
299830f712c9SDave Chinner #endif
299930f712c9SDave Chinner 
300030f712c9SDave Chinner 	*lenp = align_alen;
300130f712c9SDave Chinner 	*offp = align_off;
300230f712c9SDave Chinner 	return 0;
300330f712c9SDave Chinner }
300430f712c9SDave Chinner 
300530f712c9SDave Chinner #define XFS_ALLOC_GAP_UNITS	4
300630f712c9SDave Chinner 
300730f712c9SDave Chinner void
300830f712c9SDave Chinner xfs_bmap_adjacent(
300930f712c9SDave Chinner 	struct xfs_bmalloca	*ap)	/* bmap alloc argument struct */
301030f712c9SDave Chinner {
301130f712c9SDave Chinner 	xfs_fsblock_t	adjust;		/* adjustment to block numbers */
301230f712c9SDave Chinner 	xfs_mount_t	*mp;		/* mount point structure */
301330f712c9SDave Chinner 	int		rt;		/* true if inode is realtime */
301430f712c9SDave Chinner 
301530f712c9SDave Chinner #define	ISVALID(x,y)	\
301630f712c9SDave Chinner 	(rt ? \
301730f712c9SDave Chinner 		(x) < mp->m_sb.sb_rblocks : \
301830f712c9SDave Chinner 		XFS_FSB_TO_AGNO(mp, x) == XFS_FSB_TO_AGNO(mp, y) && \
301930f712c9SDave Chinner 		XFS_FSB_TO_AGNO(mp, x) < mp->m_sb.sb_agcount && \
302030f712c9SDave Chinner 		XFS_FSB_TO_AGBNO(mp, x) < mp->m_sb.sb_agblocks)
302130f712c9SDave Chinner 
302230f712c9SDave Chinner 	mp = ap->ip->i_mount;
3023292378edSDave Chinner 	rt = XFS_IS_REALTIME_INODE(ap->ip) &&
3024c34d570dSChristoph Hellwig 		(ap->datatype & XFS_ALLOC_USERDATA);
302530f712c9SDave Chinner 	/*
302630f712c9SDave Chinner 	 * If allocating at eof, and there's a previous real block,
302730f712c9SDave Chinner 	 * try to use its last block as our starting point.
302830f712c9SDave Chinner 	 */
302930f712c9SDave Chinner 	if (ap->eof && ap->prev.br_startoff != NULLFILEOFF &&
303030f712c9SDave Chinner 	    !isnullstartblock(ap->prev.br_startblock) &&
303130f712c9SDave Chinner 	    ISVALID(ap->prev.br_startblock + ap->prev.br_blockcount,
303230f712c9SDave Chinner 		    ap->prev.br_startblock)) {
303330f712c9SDave Chinner 		ap->blkno = ap->prev.br_startblock + ap->prev.br_blockcount;
303430f712c9SDave Chinner 		/*
303530f712c9SDave Chinner 		 * Adjust for the gap between prevp and us.
303630f712c9SDave Chinner 		 */
303730f712c9SDave Chinner 		adjust = ap->offset -
303830f712c9SDave Chinner 			(ap->prev.br_startoff + ap->prev.br_blockcount);
303930f712c9SDave Chinner 		if (adjust &&
304030f712c9SDave Chinner 		    ISVALID(ap->blkno + adjust, ap->prev.br_startblock))
304130f712c9SDave Chinner 			ap->blkno += adjust;
304230f712c9SDave Chinner 	}
304330f712c9SDave Chinner 	/*
304430f712c9SDave Chinner 	 * If not at eof, then compare the two neighbor blocks.
304530f712c9SDave Chinner 	 * Figure out whether either one gives us a good starting point,
304630f712c9SDave Chinner 	 * and pick the better one.
304730f712c9SDave Chinner 	 */
304830f712c9SDave Chinner 	else if (!ap->eof) {
304930f712c9SDave Chinner 		xfs_fsblock_t	gotbno;		/* right side block number */
305030f712c9SDave Chinner 		xfs_fsblock_t	gotdiff=0;	/* right side difference */
305130f712c9SDave Chinner 		xfs_fsblock_t	prevbno;	/* left side block number */
305230f712c9SDave Chinner 		xfs_fsblock_t	prevdiff=0;	/* left side difference */
305330f712c9SDave Chinner 
305430f712c9SDave Chinner 		/*
305530f712c9SDave Chinner 		 * If there's a previous (left) block, select a requested
305630f712c9SDave Chinner 		 * start block based on it.
305730f712c9SDave Chinner 		 */
305830f712c9SDave Chinner 		if (ap->prev.br_startoff != NULLFILEOFF &&
305930f712c9SDave Chinner 		    !isnullstartblock(ap->prev.br_startblock) &&
306030f712c9SDave Chinner 		    (prevbno = ap->prev.br_startblock +
306130f712c9SDave Chinner 			       ap->prev.br_blockcount) &&
306230f712c9SDave Chinner 		    ISVALID(prevbno, ap->prev.br_startblock)) {
306330f712c9SDave Chinner 			/*
306430f712c9SDave Chinner 			 * Calculate gap to end of previous block.
306530f712c9SDave Chinner 			 */
306630f712c9SDave Chinner 			adjust = prevdiff = ap->offset -
306730f712c9SDave Chinner 				(ap->prev.br_startoff +
306830f712c9SDave Chinner 				 ap->prev.br_blockcount);
306930f712c9SDave Chinner 			/*
307030f712c9SDave Chinner 			 * Figure the startblock based on the previous block's
307130f712c9SDave Chinner 			 * end and the gap size.
307230f712c9SDave Chinner 			 * Heuristic!
307330f712c9SDave Chinner 			 * If the gap is large relative to the piece we're
307430f712c9SDave Chinner 			 * allocating, or using it gives us an invalid block
307530f712c9SDave Chinner 			 * number, then just use the end of the previous block.
307630f712c9SDave Chinner 			 */
307730f712c9SDave Chinner 			if (prevdiff <= XFS_ALLOC_GAP_UNITS * ap->length &&
307830f712c9SDave Chinner 			    ISVALID(prevbno + prevdiff,
307930f712c9SDave Chinner 				    ap->prev.br_startblock))
308030f712c9SDave Chinner 				prevbno += adjust;
308130f712c9SDave Chinner 			else
308230f712c9SDave Chinner 				prevdiff += adjust;
308330f712c9SDave Chinner 		}
308430f712c9SDave Chinner 		/*
308530f712c9SDave Chinner 		 * No previous block or can't follow it, just default.
308630f712c9SDave Chinner 		 */
308730f712c9SDave Chinner 		else
308830f712c9SDave Chinner 			prevbno = NULLFSBLOCK;
308930f712c9SDave Chinner 		/*
309030f712c9SDave Chinner 		 * If there's a following (right) block, select a requested
309130f712c9SDave Chinner 		 * start block based on it.
309230f712c9SDave Chinner 		 */
309330f712c9SDave Chinner 		if (!isnullstartblock(ap->got.br_startblock)) {
309430f712c9SDave Chinner 			/*
309530f712c9SDave Chinner 			 * Calculate gap to start of next block.
309630f712c9SDave Chinner 			 */
309730f712c9SDave Chinner 			adjust = gotdiff = ap->got.br_startoff - ap->offset;
309830f712c9SDave Chinner 			/*
309930f712c9SDave Chinner 			 * Figure the startblock based on the next block's
310030f712c9SDave Chinner 			 * start and the gap size.
310130f712c9SDave Chinner 			 */
310230f712c9SDave Chinner 			gotbno = ap->got.br_startblock;
310330f712c9SDave Chinner 			/*
310430f712c9SDave Chinner 			 * Heuristic!
310530f712c9SDave Chinner 			 * If the gap is large relative to the piece we're
310630f712c9SDave Chinner 			 * allocating, or using it gives us an invalid block
310730f712c9SDave Chinner 			 * number, then just use the start of the next block
310830f712c9SDave Chinner 			 * offset by our length.
310930f712c9SDave Chinner 			 */
311030f712c9SDave Chinner 			if (gotdiff <= XFS_ALLOC_GAP_UNITS * ap->length &&
311130f712c9SDave Chinner 			    ISVALID(gotbno - gotdiff, gotbno))
311230f712c9SDave Chinner 				gotbno -= adjust;
311330f712c9SDave Chinner 			else if (ISVALID(gotbno - ap->length, gotbno)) {
311430f712c9SDave Chinner 				gotbno -= ap->length;
311530f712c9SDave Chinner 				gotdiff += adjust - ap->length;
311630f712c9SDave Chinner 			} else
311730f712c9SDave Chinner 				gotdiff += adjust;
311830f712c9SDave Chinner 		}
311930f712c9SDave Chinner 		/*
312030f712c9SDave Chinner 		 * No next block, just default.
312130f712c9SDave Chinner 		 */
312230f712c9SDave Chinner 		else
312330f712c9SDave Chinner 			gotbno = NULLFSBLOCK;
312430f712c9SDave Chinner 		/*
312530f712c9SDave Chinner 		 * If both valid, pick the better one, else the only good
312630f712c9SDave Chinner 		 * one, else ap->blkno is already set (to 0 or the inode block).
312730f712c9SDave Chinner 		 */
312830f712c9SDave Chinner 		if (prevbno != NULLFSBLOCK && gotbno != NULLFSBLOCK)
312930f712c9SDave Chinner 			ap->blkno = prevdiff <= gotdiff ? prevbno : gotbno;
313030f712c9SDave Chinner 		else if (prevbno != NULLFSBLOCK)
313130f712c9SDave Chinner 			ap->blkno = prevbno;
313230f712c9SDave Chinner 		else if (gotbno != NULLFSBLOCK)
313330f712c9SDave Chinner 			ap->blkno = gotbno;
313430f712c9SDave Chinner 	}
313530f712c9SDave Chinner #undef ISVALID
313630f712c9SDave Chinner }
313730f712c9SDave Chinner 
313830f712c9SDave Chinner static int
313930f712c9SDave Chinner xfs_bmap_longest_free_extent(
314076257a15SDave Chinner 	struct xfs_perag	*pag,
314130f712c9SDave Chinner 	struct xfs_trans	*tp,
314230f712c9SDave Chinner 	xfs_extlen_t		*blen,
314330f712c9SDave Chinner 	int			*notinit)
314430f712c9SDave Chinner {
314530f712c9SDave Chinner 	xfs_extlen_t		longest;
314630f712c9SDave Chinner 	int			error = 0;
314730f712c9SDave Chinner 
31487ac2ff8bSDave Chinner 	if (!xfs_perag_initialised_agf(pag)) {
314908d3e84fSDave Chinner 		error = xfs_alloc_read_agf(pag, tp, XFS_ALLOC_FLAG_TRYLOCK,
315076b47e52SDave Chinner 				NULL);
3151f48e2df8SDarrick J. Wong 		if (error) {
3152f48e2df8SDarrick J. Wong 			/* Couldn't lock the AGF, so skip this AG. */
3153f48e2df8SDarrick J. Wong 			if (error == -EAGAIN) {
315430f712c9SDave Chinner 				*notinit = 1;
3155f48e2df8SDarrick J. Wong 				error = 0;
3156f48e2df8SDarrick J. Wong 			}
315776257a15SDave Chinner 			return error;
315830f712c9SDave Chinner 		}
315930f712c9SDave Chinner 	}
316030f712c9SDave Chinner 
3161a1f69417SEric Sandeen 	longest = xfs_alloc_longest_free_extent(pag,
316276257a15SDave Chinner 				xfs_alloc_min_freelist(pag->pag_mount, pag),
31633fd129b6SDarrick J. Wong 				xfs_ag_resv_needed(pag, XFS_AG_RESV_NONE));
316430f712c9SDave Chinner 	if (*blen < longest)
316530f712c9SDave Chinner 		*blen = longest;
316630f712c9SDave Chinner 
316776257a15SDave Chinner 	return 0;
316830f712c9SDave Chinner }
316930f712c9SDave Chinner 
317030f712c9SDave Chinner static void
317130f712c9SDave Chinner xfs_bmap_select_minlen(
317230f712c9SDave Chinner 	struct xfs_bmalloca	*ap,
317330f712c9SDave Chinner 	struct xfs_alloc_arg	*args,
317430f712c9SDave Chinner 	xfs_extlen_t		*blen,
317530f712c9SDave Chinner 	int			notinit)
317630f712c9SDave Chinner {
317730f712c9SDave Chinner 	if (notinit || *blen < ap->minlen) {
317830f712c9SDave Chinner 		/*
317930f712c9SDave Chinner 		 * Since we did a BUF_TRYLOCK above, it is possible that
318030f712c9SDave Chinner 		 * there is space for this request.
318130f712c9SDave Chinner 		 */
318230f712c9SDave Chinner 		args->minlen = ap->minlen;
318330f712c9SDave Chinner 	} else if (*blen < args->maxlen) {
318430f712c9SDave Chinner 		/*
318530f712c9SDave Chinner 		 * If the best seen length is less than the request length,
318630f712c9SDave Chinner 		 * use the best as the minimum.
318730f712c9SDave Chinner 		 */
318830f712c9SDave Chinner 		args->minlen = *blen;
318930f712c9SDave Chinner 	} else {
319030f712c9SDave Chinner 		/*
319130f712c9SDave Chinner 		 * Otherwise we've seen an extent as big as maxlen, use that
319230f712c9SDave Chinner 		 * as the minimum.
319330f712c9SDave Chinner 		 */
319430f712c9SDave Chinner 		args->minlen = args->maxlen;
319530f712c9SDave Chinner 	}
319630f712c9SDave Chinner }
319730f712c9SDave Chinner 
319885843327SDave Chinner static int
319936b6ad2dSDave Chinner xfs_bmap_btalloc_select_lengths(
320030f712c9SDave Chinner 	struct xfs_bmalloca	*ap,
320130f712c9SDave Chinner 	struct xfs_alloc_arg	*args,
320230f712c9SDave Chinner 	xfs_extlen_t		*blen)
320330f712c9SDave Chinner {
320485843327SDave Chinner 	struct xfs_mount	*mp = args->mp;
320576257a15SDave Chinner 	struct xfs_perag	*pag;
320676257a15SDave Chinner 	xfs_agnumber_t		agno, startag;
320730f712c9SDave Chinner 	int			notinit = 0;
320876257a15SDave Chinner 	int			error = 0;
320930f712c9SDave Chinner 
321036b6ad2dSDave Chinner 	if (ap->tp->t_flags & XFS_TRANS_LOWMODE) {
321136b6ad2dSDave Chinner 		args->total = ap->minlen;
321236b6ad2dSDave Chinner 		args->minlen = ap->minlen;
321336b6ad2dSDave Chinner 		return 0;
321436b6ad2dSDave Chinner 	}
321530f712c9SDave Chinner 
321636b6ad2dSDave Chinner 	args->total = ap->total;
321785843327SDave Chinner 	startag = XFS_FSB_TO_AGNO(mp, ap->blkno);
321830f712c9SDave Chinner 	if (startag == NULLAGNUMBER)
321976257a15SDave Chinner 		startag = 0;
322030f712c9SDave Chinner 
322176257a15SDave Chinner 	*blen = 0;
322276257a15SDave Chinner 	for_each_perag_wrap(mp, startag, agno, pag) {
322376257a15SDave Chinner 		error = xfs_bmap_longest_free_extent(pag, args->tp, blen,
322430f712c9SDave Chinner 						     &notinit);
322530f712c9SDave Chinner 		if (error)
322676257a15SDave Chinner 			break;
322776257a15SDave Chinner 		if (*blen >= args->maxlen)
322830f712c9SDave Chinner 			break;
322930f712c9SDave Chinner 	}
323076257a15SDave Chinner 	if (pag)
323176257a15SDave Chinner 		xfs_perag_rele(pag);
323230f712c9SDave Chinner 
323330f712c9SDave Chinner 	xfs_bmap_select_minlen(ap, args, blen, notinit);
323476257a15SDave Chinner 	return error;
323530f712c9SDave Chinner }
323630f712c9SDave Chinner 
323730f712c9SDave Chinner STATIC int
323830f712c9SDave Chinner xfs_bmap_btalloc_filestreams(
323930f712c9SDave Chinner 	struct xfs_bmalloca	*ap,
324030f712c9SDave Chinner 	struct xfs_alloc_arg	*args,
324130f712c9SDave Chinner 	xfs_extlen_t		*blen)
324230f712c9SDave Chinner {
324330f712c9SDave Chinner 	struct xfs_mount	*mp = ap->ip->i_mount;
324476257a15SDave Chinner 	struct xfs_perag	*pag;
324576257a15SDave Chinner 	xfs_agnumber_t		start_agno;
324630f712c9SDave Chinner 	int			notinit = 0;
324730f712c9SDave Chinner 	int			error;
324830f712c9SDave Chinner 
324930f712c9SDave Chinner 	args->type = XFS_ALLOCTYPE_NEAR_BNO;
325030f712c9SDave Chinner 	args->total = ap->total;
325130f712c9SDave Chinner 
325285843327SDave Chinner 	start_agno = XFS_FSB_TO_AGNO(mp, ap->blkno);
325376257a15SDave Chinner 	if (start_agno == NULLAGNUMBER)
325476257a15SDave Chinner 		start_agno = 0;
325530f712c9SDave Chinner 
325676257a15SDave Chinner 	pag = xfs_perag_grab(mp, start_agno);
325776257a15SDave Chinner 	if (pag) {
325876257a15SDave Chinner 		error = xfs_bmap_longest_free_extent(pag, args->tp, blen,
325976257a15SDave Chinner 				&notinit);
326076257a15SDave Chinner 		xfs_perag_rele(pag);
326130f712c9SDave Chinner 		if (error)
326230f712c9SDave Chinner 			return error;
326376257a15SDave Chinner 	}
326430f712c9SDave Chinner 
326530f712c9SDave Chinner 	if (*blen < args->maxlen) {
326676257a15SDave Chinner 		xfs_agnumber_t	agno = start_agno;
326776257a15SDave Chinner 
326876257a15SDave Chinner 		error = xfs_filestream_new_ag(ap, &agno);
326976257a15SDave Chinner 		if (error)
327076257a15SDave Chinner 			return error;
327176257a15SDave Chinner 		if (agno == NULLAGNUMBER)
327276257a15SDave Chinner 			goto out_select;
327376257a15SDave Chinner 
327476257a15SDave Chinner 		pag = xfs_perag_grab(mp, agno);
327576257a15SDave Chinner 		if (!pag)
327676257a15SDave Chinner 			goto out_select;
327776257a15SDave Chinner 
327876257a15SDave Chinner 		error = xfs_bmap_longest_free_extent(pag, args->tp,
327976257a15SDave Chinner 				blen, &notinit);
328076257a15SDave Chinner 		xfs_perag_rele(pag);
328130f712c9SDave Chinner 		if (error)
328230f712c9SDave Chinner 			return error;
328330f712c9SDave Chinner 
328476257a15SDave Chinner 		start_agno = agno;
328530f712c9SDave Chinner 
328630f712c9SDave Chinner 	}
328730f712c9SDave Chinner 
328876257a15SDave Chinner out_select:
328930f712c9SDave Chinner 	xfs_bmap_select_minlen(ap, args, blen, notinit);
329030f712c9SDave Chinner 
329130f712c9SDave Chinner 	/*
329230f712c9SDave Chinner 	 * Set the failure fallback case to look in the selected AG as stream
329330f712c9SDave Chinner 	 * may have moved.
329430f712c9SDave Chinner 	 */
329576257a15SDave Chinner 	ap->blkno = args->fsbno = XFS_AGB_TO_FSB(mp, start_agno, 0);
329630f712c9SDave Chinner 	return 0;
329730f712c9SDave Chinner }
329830f712c9SDave Chinner 
3299751f3767SDarrick J. Wong /* Update all inode and quota accounting for the allocation we just did. */
3300751f3767SDarrick J. Wong static void
3301751f3767SDarrick J. Wong xfs_bmap_btalloc_accounting(
3302751f3767SDarrick J. Wong 	struct xfs_bmalloca	*ap,
3303751f3767SDarrick J. Wong 	struct xfs_alloc_arg	*args)
3304751f3767SDarrick J. Wong {
33054b4c1326SDarrick J. Wong 	if (ap->flags & XFS_BMAPI_COWFORK) {
33064b4c1326SDarrick J. Wong 		/*
33074b4c1326SDarrick J. Wong 		 * COW fork blocks are in-core only and thus are treated as
33084b4c1326SDarrick J. Wong 		 * in-core quota reservation (like delalloc blocks) even when
33094b4c1326SDarrick J. Wong 		 * converted to real blocks. The quota reservation is not
33104b4c1326SDarrick J. Wong 		 * accounted to disk until blocks are remapped to the data
33114b4c1326SDarrick J. Wong 		 * fork. So if these blocks were previously delalloc, we
33124b4c1326SDarrick J. Wong 		 * already have quota reservation and there's nothing to do
33134b4c1326SDarrick J. Wong 		 * yet.
33144b4c1326SDarrick J. Wong 		 */
33159fe82b8cSDarrick J. Wong 		if (ap->wasdel) {
33169fe82b8cSDarrick J. Wong 			xfs_mod_delalloc(ap->ip->i_mount, -(int64_t)args->len);
33174b4c1326SDarrick J. Wong 			return;
33189fe82b8cSDarrick J. Wong 		}
33194b4c1326SDarrick J. Wong 
33204b4c1326SDarrick J. Wong 		/*
33214b4c1326SDarrick J. Wong 		 * Otherwise, we've allocated blocks in a hole. The transaction
33224b4c1326SDarrick J. Wong 		 * has acquired in-core quota reservation for this extent.
33234b4c1326SDarrick J. Wong 		 * Rather than account these as real blocks, however, we reduce
33244b4c1326SDarrick J. Wong 		 * the transaction quota reservation based on the allocation.
33254b4c1326SDarrick J. Wong 		 * This essentially transfers the transaction quota reservation
33264b4c1326SDarrick J. Wong 		 * to that of a delalloc extent.
33274b4c1326SDarrick J. Wong 		 */
33284b4c1326SDarrick J. Wong 		ap->ip->i_delayed_blks += args->len;
33294b4c1326SDarrick J. Wong 		xfs_trans_mod_dquot_byino(ap->tp, ap->ip, XFS_TRANS_DQ_RES_BLKS,
33304b4c1326SDarrick J. Wong 				-(long)args->len);
33314b4c1326SDarrick J. Wong 		return;
33324b4c1326SDarrick J. Wong 	}
33334b4c1326SDarrick J. Wong 
33344b4c1326SDarrick J. Wong 	/* data/attr fork only */
33356e73a545SChristoph Hellwig 	ap->ip->i_nblocks += args->len;
3336751f3767SDarrick J. Wong 	xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
33379fe82b8cSDarrick J. Wong 	if (ap->wasdel) {
3338751f3767SDarrick J. Wong 		ap->ip->i_delayed_blks -= args->len;
33399fe82b8cSDarrick J. Wong 		xfs_mod_delalloc(ap->ip->i_mount, -(int64_t)args->len);
33409fe82b8cSDarrick J. Wong 	}
3341751f3767SDarrick J. Wong 	xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
3342751f3767SDarrick J. Wong 		ap->wasdel ? XFS_TRANS_DQ_DELBCOUNT : XFS_TRANS_DQ_BCOUNT,
3343751f3767SDarrick J. Wong 		args->len);
3344751f3767SDarrick J. Wong }
3345751f3767SDarrick J. Wong 
33460961fddfSChandan Babu R static int
33470961fddfSChandan Babu R xfs_bmap_compute_alignments(
33480961fddfSChandan Babu R 	struct xfs_bmalloca	*ap,
33490961fddfSChandan Babu R 	struct xfs_alloc_arg	*args)
33500961fddfSChandan Babu R {
33510961fddfSChandan Babu R 	struct xfs_mount	*mp = args->mp;
33520961fddfSChandan Babu R 	xfs_extlen_t		align = 0; /* minimum allocation alignment */
33530961fddfSChandan Babu R 	int			stripe_align = 0;
33540961fddfSChandan Babu R 
33550961fddfSChandan Babu R 	/* stripe alignment for allocation is determined by mount parameters */
33560560f31aSDave Chinner 	if (mp->m_swidth && xfs_has_swalloc(mp))
33570961fddfSChandan Babu R 		stripe_align = mp->m_swidth;
33580961fddfSChandan Babu R 	else if (mp->m_dalign)
33590961fddfSChandan Babu R 		stripe_align = mp->m_dalign;
33600961fddfSChandan Babu R 
33610961fddfSChandan Babu R 	if (ap->flags & XFS_BMAPI_COWFORK)
33620961fddfSChandan Babu R 		align = xfs_get_cowextsz_hint(ap->ip);
33630961fddfSChandan Babu R 	else if (ap->datatype & XFS_ALLOC_USERDATA)
33640961fddfSChandan Babu R 		align = xfs_get_extsz_hint(ap->ip);
33650961fddfSChandan Babu R 	if (align) {
3366560ab6c0SChandan Babu R 		if (xfs_bmap_extsize_align(mp, &ap->got, &ap->prev, align, 0,
3367560ab6c0SChandan Babu R 					ap->eof, 0, ap->conv, &ap->offset,
3368560ab6c0SChandan Babu R 					&ap->length))
3369560ab6c0SChandan Babu R 			ASSERT(0);
33700961fddfSChandan Babu R 		ASSERT(ap->length);
33710961fddfSChandan Babu R 	}
33720961fddfSChandan Babu R 
33730961fddfSChandan Babu R 	/* apply extent size hints if obtained earlier */
33740961fddfSChandan Babu R 	if (align) {
33750961fddfSChandan Babu R 		args->prod = align;
33760961fddfSChandan Babu R 		div_u64_rem(ap->offset, args->prod, &args->mod);
33770961fddfSChandan Babu R 		if (args->mod)
33780961fddfSChandan Babu R 			args->mod = args->prod - args->mod;
33790961fddfSChandan Babu R 	} else if (mp->m_sb.sb_blocksize >= PAGE_SIZE) {
33800961fddfSChandan Babu R 		args->prod = 1;
33810961fddfSChandan Babu R 		args->mod = 0;
33820961fddfSChandan Babu R 	} else {
33830961fddfSChandan Babu R 		args->prod = PAGE_SIZE >> mp->m_sb.sb_blocklog;
33840961fddfSChandan Babu R 		div_u64_rem(ap->offset, args->prod, &args->mod);
33850961fddfSChandan Babu R 		if (args->mod)
33860961fddfSChandan Babu R 			args->mod = args->prod - args->mod;
33870961fddfSChandan Babu R 	}
33880961fddfSChandan Babu R 
33890961fddfSChandan Babu R 	return stripe_align;
33900961fddfSChandan Babu R }
33910961fddfSChandan Babu R 
339207c72e55SChandan Babu R static void
339307c72e55SChandan Babu R xfs_bmap_process_allocated_extent(
339407c72e55SChandan Babu R 	struct xfs_bmalloca	*ap,
339507c72e55SChandan Babu R 	struct xfs_alloc_arg	*args,
339607c72e55SChandan Babu R 	xfs_fileoff_t		orig_offset,
339707c72e55SChandan Babu R 	xfs_extlen_t		orig_length)
339807c72e55SChandan Babu R {
339907c72e55SChandan Babu R 	ap->blkno = args->fsbno;
340007c72e55SChandan Babu R 	ap->length = args->len;
340107c72e55SChandan Babu R 	/*
340207c72e55SChandan Babu R 	 * If the extent size hint is active, we tried to round the
340307c72e55SChandan Babu R 	 * caller's allocation request offset down to extsz and the
340407c72e55SChandan Babu R 	 * length up to another extsz boundary.  If we found a free
340507c72e55SChandan Babu R 	 * extent we mapped it in starting at this new offset.  If the
340607c72e55SChandan Babu R 	 * newly mapped space isn't long enough to cover any of the
340707c72e55SChandan Babu R 	 * range of offsets that was originally requested, move the
340807c72e55SChandan Babu R 	 * mapping up so that we can fill as much of the caller's
340907c72e55SChandan Babu R 	 * original request as possible.  Free space is apparently
341007c72e55SChandan Babu R 	 * very fragmented so we're unlikely to be able to satisfy the
341107c72e55SChandan Babu R 	 * hints anyway.
341207c72e55SChandan Babu R 	 */
341307c72e55SChandan Babu R 	if (ap->length <= orig_length)
341407c72e55SChandan Babu R 		ap->offset = orig_offset;
341507c72e55SChandan Babu R 	else if (ap->offset + ap->length < orig_offset + orig_length)
341607c72e55SChandan Babu R 		ap->offset = orig_offset + orig_length - ap->length;
341707c72e55SChandan Babu R 	xfs_bmap_btalloc_accounting(ap, args);
341807c72e55SChandan Babu R }
341907c72e55SChandan Babu R 
342030151967SChandan Babu R #ifdef DEBUG
342130151967SChandan Babu R static int
342230151967SChandan Babu R xfs_bmap_exact_minlen_extent_alloc(
342330151967SChandan Babu R 	struct xfs_bmalloca	*ap)
342430151967SChandan Babu R {
342530151967SChandan Babu R 	struct xfs_mount	*mp = ap->ip->i_mount;
342630151967SChandan Babu R 	struct xfs_alloc_arg	args = { .tp = ap->tp, .mp = mp };
342730151967SChandan Babu R 	xfs_fileoff_t		orig_offset;
342830151967SChandan Babu R 	xfs_extlen_t		orig_length;
342930151967SChandan Babu R 	int			error;
343030151967SChandan Babu R 
343130151967SChandan Babu R 	ASSERT(ap->length);
343230151967SChandan Babu R 
343330151967SChandan Babu R 	if (ap->minlen != 1) {
343430151967SChandan Babu R 		ap->blkno = NULLFSBLOCK;
343530151967SChandan Babu R 		ap->length = 0;
343630151967SChandan Babu R 		return 0;
343730151967SChandan Babu R 	}
343830151967SChandan Babu R 
343930151967SChandan Babu R 	orig_offset = ap->offset;
344030151967SChandan Babu R 	orig_length = ap->length;
344130151967SChandan Babu R 
344230151967SChandan Babu R 	args.alloc_minlen_only = 1;
344330151967SChandan Babu R 
344430151967SChandan Babu R 	xfs_bmap_compute_alignments(ap, &args);
344530151967SChandan Babu R 
344630151967SChandan Babu R 	/*
344730151967SChandan Babu R 	 * Unlike the longest extent available in an AG, we don't track
344830151967SChandan Babu R 	 * the length of an AG's shortest extent.
344930151967SChandan Babu R 	 * XFS_ERRTAG_BMAP_ALLOC_MINLEN_EXTENT is a debug only knob and
345030151967SChandan Babu R 	 * hence we can afford to start traversing from the 0th AG since
345130151967SChandan Babu R 	 * we need not be concerned about a drop in performance in
345230151967SChandan Babu R 	 * "debug only" code paths.
345330151967SChandan Babu R 	 */
345430151967SChandan Babu R 	ap->blkno = XFS_AGB_TO_FSB(mp, 0, 0);
345530151967SChandan Babu R 
345630151967SChandan Babu R 	args.oinfo = XFS_RMAP_OINFO_SKIP_UPDATE;
34576e8bd39dSChandan Babu R 	args.minlen = args.maxlen = ap->minlen;
34586e8bd39dSChandan Babu R 	args.total = ap->total;
345930151967SChandan Babu R 
346030151967SChandan Babu R 	args.alignment = 1;
346130151967SChandan Babu R 	args.minalignslop = 0;
346230151967SChandan Babu R 
346330151967SChandan Babu R 	args.minleft = ap->minleft;
346430151967SChandan Babu R 	args.wasdel = ap->wasdel;
346530151967SChandan Babu R 	args.resv = XFS_AG_RESV_NONE;
346630151967SChandan Babu R 	args.datatype = ap->datatype;
346730151967SChandan Babu R 
3468319c9e87SDave Chinner 	error = xfs_alloc_vextent_first_ag(&args, ap->blkno);
346930151967SChandan Babu R 	if (error)
347030151967SChandan Babu R 		return error;
347130151967SChandan Babu R 
347230151967SChandan Babu R 	if (args.fsbno != NULLFSBLOCK) {
347330151967SChandan Babu R 		xfs_bmap_process_allocated_extent(ap, &args, orig_offset,
347430151967SChandan Babu R 			orig_length);
347530151967SChandan Babu R 	} else {
347630151967SChandan Babu R 		ap->blkno = NULLFSBLOCK;
347730151967SChandan Babu R 		ap->length = 0;
347830151967SChandan Babu R 	}
347930151967SChandan Babu R 
348030151967SChandan Babu R 	return 0;
348130151967SChandan Babu R }
348230151967SChandan Babu R #else
348330151967SChandan Babu R 
348430151967SChandan Babu R #define xfs_bmap_exact_minlen_extent_alloc(bma) (-EFSCORRUPTED)
348530151967SChandan Babu R 
348630151967SChandan Babu R #endif
348730151967SChandan Babu R 
348885843327SDave Chinner /*
348985843327SDave Chinner  * If we are not low on available data blocks and we are allocating at
349085843327SDave Chinner  * EOF, optimise allocation for contiguous file extension and/or stripe
349185843327SDave Chinner  * alignment of the new extent.
349285843327SDave Chinner  *
349385843327SDave Chinner  * NOTE: ap->aeof is only set if the allocation length is >= the
349485843327SDave Chinner  * stripe unit and the allocation offset is at the end of file.
349585843327SDave Chinner  */
349685843327SDave Chinner static int
349785843327SDave Chinner xfs_bmap_btalloc_at_eof(
349885843327SDave Chinner 	struct xfs_bmalloca	*ap,
349985843327SDave Chinner 	struct xfs_alloc_arg	*args,
350085843327SDave Chinner 	xfs_extlen_t		blen,
3501*2a7f6d41SDave Chinner 	int			stripe_align,
3502*2a7f6d41SDave Chinner 	bool			ag_only)
350385843327SDave Chinner {
350485843327SDave Chinner 	struct xfs_mount	*mp = args->mp;
350585843327SDave Chinner 	xfs_alloctype_t		atype;
350685843327SDave Chinner 	int			error;
350785843327SDave Chinner 
350885843327SDave Chinner 	/*
350985843327SDave Chinner 	 * If there are already extents in the file, try an exact EOF block
351085843327SDave Chinner 	 * allocation to extend the file as a contiguous extent. If that fails,
351185843327SDave Chinner 	 * or it's the first allocation in a file, just try for a stripe aligned
351285843327SDave Chinner 	 * allocation.
351385843327SDave Chinner 	 */
351485843327SDave Chinner 	if (ap->offset) {
351585843327SDave Chinner 		xfs_extlen_t	nextminlen = 0;
351685843327SDave Chinner 
351785843327SDave Chinner 		atype = args->type;
351885843327SDave Chinner 		args->type = XFS_ALLOCTYPE_THIS_BNO;
351985843327SDave Chinner 		args->alignment = 1;
352085843327SDave Chinner 
352185843327SDave Chinner 		/*
352285843327SDave Chinner 		 * Compute the minlen+alignment for the next case.  Set slop so
352385843327SDave Chinner 		 * that the value of minlen+alignment+slop doesn't go up between
352485843327SDave Chinner 		 * the calls.
352585843327SDave Chinner 		 */
352685843327SDave Chinner 		if (blen > stripe_align && blen <= args->maxlen)
352785843327SDave Chinner 			nextminlen = blen - stripe_align;
352885843327SDave Chinner 		else
352985843327SDave Chinner 			nextminlen = args->minlen;
353085843327SDave Chinner 		if (nextminlen + stripe_align > args->minlen + 1)
353185843327SDave Chinner 			args->minalignslop = nextminlen + stripe_align -
353285843327SDave Chinner 					args->minlen - 1;
353385843327SDave Chinner 		else
353485843327SDave Chinner 			args->minalignslop = 0;
353585843327SDave Chinner 
353685843327SDave Chinner 		args->pag = xfs_perag_get(mp, XFS_FSB_TO_AGNO(mp, args->fsbno));
353785843327SDave Chinner 		error = xfs_alloc_vextent_this_ag(args);
353885843327SDave Chinner 		xfs_perag_put(args->pag);
353985843327SDave Chinner 		if (error)
354085843327SDave Chinner 			return error;
354185843327SDave Chinner 
354285843327SDave Chinner 		if (args->fsbno != NULLFSBLOCK)
354385843327SDave Chinner 			return 0;
354485843327SDave Chinner 		/*
354585843327SDave Chinner 		 * Exact allocation failed. Reset to try an aligned allocation
354685843327SDave Chinner 		 * according to the original allocation specification.
354785843327SDave Chinner 		 */
354885843327SDave Chinner 		args->pag = NULL;
354985843327SDave Chinner 		args->type = atype;
355085843327SDave Chinner 		args->fsbno = ap->blkno;
355185843327SDave Chinner 		args->alignment = stripe_align;
355285843327SDave Chinner 		args->minlen = nextminlen;
355385843327SDave Chinner 		args->minalignslop = 0;
355485843327SDave Chinner 	} else {
355585843327SDave Chinner 		args->alignment = stripe_align;
355685843327SDave Chinner 		atype = args->type;
355785843327SDave Chinner 		/*
355885843327SDave Chinner 		 * Adjust minlen to try and preserve alignment if we
355985843327SDave Chinner 		 * can't guarantee an aligned maxlen extent.
356085843327SDave Chinner 		 */
356185843327SDave Chinner 		if (blen > args->alignment &&
356285843327SDave Chinner 		    blen <= args->maxlen + args->alignment)
356385843327SDave Chinner 			args->minlen = blen - args->alignment;
356485843327SDave Chinner 		args->minalignslop = 0;
356585843327SDave Chinner 	}
356685843327SDave Chinner 
3567*2a7f6d41SDave Chinner 	if (ag_only)
356885843327SDave Chinner 		error = xfs_alloc_vextent(args);
3569*2a7f6d41SDave Chinner 	else
3570*2a7f6d41SDave Chinner 		error = xfs_alloc_vextent_start_ag(args, ap->blkno);
357185843327SDave Chinner 	if (error)
357285843327SDave Chinner 		return error;
357385843327SDave Chinner 
357485843327SDave Chinner 	if (args->fsbno != NULLFSBLOCK)
357585843327SDave Chinner 		return 0;
357685843327SDave Chinner 
357785843327SDave Chinner 	/*
357885843327SDave Chinner 	 * Allocation failed, so turn return the allocation args to their
357985843327SDave Chinner 	 * original non-aligned state so the caller can proceed on allocation
358085843327SDave Chinner 	 * failure as if this function was never called.
358185843327SDave Chinner 	 */
358285843327SDave Chinner 	args->type = atype;
358385843327SDave Chinner 	args->fsbno = ap->blkno;
358485843327SDave Chinner 	args->alignment = 1;
358585843327SDave Chinner 	return 0;
358685843327SDave Chinner }
358785843327SDave Chinner 
358885843327SDave Chinner static int
358985843327SDave Chinner xfs_bmap_btalloc_best_length(
359085843327SDave Chinner 	struct xfs_bmalloca	*ap,
359185843327SDave Chinner 	struct xfs_alloc_arg	*args,
359285843327SDave Chinner 	int			stripe_align)
359385843327SDave Chinner {
359485843327SDave Chinner 	struct xfs_mount	*mp = args->mp;
359585843327SDave Chinner 	xfs_extlen_t		blen = 0;
3596*2a7f6d41SDave Chinner 	bool			is_filestream = false;
359785843327SDave Chinner 	int			error;
359885843327SDave Chinner 
3599*2a7f6d41SDave Chinner 	if ((ap->datatype & XFS_ALLOC_USERDATA) &&
3600*2a7f6d41SDave Chinner 	    xfs_inode_is_filestream(ap->ip))
3601*2a7f6d41SDave Chinner 		is_filestream = true;
3602*2a7f6d41SDave Chinner 
360385843327SDave Chinner 	/*
360485843327SDave Chinner 	 * Determine the initial block number we will target for allocation.
360585843327SDave Chinner 	 */
3606*2a7f6d41SDave Chinner 	if (is_filestream) {
360785843327SDave Chinner 		xfs_agnumber_t	agno = xfs_filestream_lookup_ag(ap->ip);
360885843327SDave Chinner 		if (agno == NULLAGNUMBER)
360985843327SDave Chinner 			agno = 0;
361085843327SDave Chinner 		ap->blkno = XFS_AGB_TO_FSB(mp, agno, 0);
361185843327SDave Chinner 	} else {
361285843327SDave Chinner 		ap->blkno = XFS_INO_TO_FSB(mp, ap->ip->i_ino);
361385843327SDave Chinner 	}
361485843327SDave Chinner 	xfs_bmap_adjacent(ap);
361585843327SDave Chinner 	args->fsbno = ap->blkno;
361685843327SDave Chinner 
361785843327SDave Chinner 	/*
361885843327SDave Chinner 	 * Search for an allocation group with a single extent large enough for
361985843327SDave Chinner 	 * the request.  If one isn't found, then adjust the minimum allocation
362085843327SDave Chinner 	 * size to the largest space found.
362185843327SDave Chinner 	 */
3622*2a7f6d41SDave Chinner 	if (is_filestream) {
3623319c9e87SDave Chinner 		/*
3624319c9e87SDave Chinner 		 * If there is very little free space before we start a
3625319c9e87SDave Chinner 		 * filestreams allocation, we're almost guaranteed to fail to
3626319c9e87SDave Chinner 		 * find an AG with enough contiguous free space to succeed, so
3627319c9e87SDave Chinner 		 * just go straight to the low space algorithm.
3628319c9e87SDave Chinner 		 */
3629319c9e87SDave Chinner 		if (ap->tp->t_flags & XFS_TRANS_LOWMODE) {
3630319c9e87SDave Chinner 			args->minlen = ap->minlen;
3631319c9e87SDave Chinner 			goto critically_low_space;
3632319c9e87SDave Chinner 		}
363385843327SDave Chinner 		error = xfs_bmap_btalloc_filestreams(ap, args, &blen);
3634319c9e87SDave Chinner 	} else {
363585843327SDave Chinner 		error = xfs_bmap_btalloc_select_lengths(ap, args, &blen);
3636319c9e87SDave Chinner 	}
363785843327SDave Chinner 	if (error)
363885843327SDave Chinner 		return error;
363985843327SDave Chinner 
364085843327SDave Chinner 	/*
364185843327SDave Chinner 	 * Don't attempt optimal EOF allocation if previous allocations barely
364285843327SDave Chinner 	 * succeeded due to being near ENOSPC. It is highly unlikely we'll get
364385843327SDave Chinner 	 * optimal or even aligned allocations in this case, so don't waste time
364485843327SDave Chinner 	 * trying.
364585843327SDave Chinner 	 */
364685843327SDave Chinner 	if (ap->aeof && !(ap->tp->t_flags & XFS_TRANS_LOWMODE)) {
3647*2a7f6d41SDave Chinner 		error = xfs_bmap_btalloc_at_eof(ap, args, blen, stripe_align,
3648*2a7f6d41SDave Chinner 				is_filestream);
364985843327SDave Chinner 		if (error)
365085843327SDave Chinner 			return error;
365185843327SDave Chinner 		if (args->fsbno != NULLFSBLOCK)
365285843327SDave Chinner 			return 0;
365385843327SDave Chinner 	}
365485843327SDave Chinner 
3655*2a7f6d41SDave Chinner 	if (is_filestream)
365685843327SDave Chinner 		error = xfs_alloc_vextent(args);
3657*2a7f6d41SDave Chinner 	else
3658*2a7f6d41SDave Chinner 		error = xfs_alloc_vextent_start_ag(args, ap->blkno);
365985843327SDave Chinner 	if (error)
366085843327SDave Chinner 		return error;
366185843327SDave Chinner 	if (args->fsbno != NULLFSBLOCK)
366285843327SDave Chinner 		return 0;
366385843327SDave Chinner 
366485843327SDave Chinner 	/*
366585843327SDave Chinner 	 * Try a locality first full filesystem minimum length allocation whilst
366685843327SDave Chinner 	 * still maintaining necessary total block reservation requirements.
366785843327SDave Chinner 	 */
366885843327SDave Chinner 	if (args->minlen > ap->minlen) {
366985843327SDave Chinner 		args->minlen = ap->minlen;
3670*2a7f6d41SDave Chinner 		error = xfs_alloc_vextent_start_ag(args, ap->blkno);
367185843327SDave Chinner 		if (error)
367285843327SDave Chinner 			return error;
367385843327SDave Chinner 	}
367485843327SDave Chinner 	if (args->fsbno != NULLFSBLOCK)
367585843327SDave Chinner 		return 0;
367685843327SDave Chinner 
367785843327SDave Chinner 	/*
367885843327SDave Chinner 	 * We are now critically low on space, so this is a last resort
367985843327SDave Chinner 	 * allocation attempt: no reserve, no locality, blocking, minimum
368085843327SDave Chinner 	 * length, full filesystem free space scan. We also indicate to future
368185843327SDave Chinner 	 * allocations in this transaction that we are critically low on space
368285843327SDave Chinner 	 * so they don't waste time on allocation modes that are unlikely to
368385843327SDave Chinner 	 * succeed.
368485843327SDave Chinner 	 */
3685319c9e87SDave Chinner critically_low_space:
368685843327SDave Chinner 	args->total = ap->minlen;
3687319c9e87SDave Chinner 	error = xfs_alloc_vextent_first_ag(args, 0);
368885843327SDave Chinner 	if (error)
368985843327SDave Chinner 		return error;
369085843327SDave Chinner 	ap->tp->t_flags |= XFS_TRANS_LOWMODE;
369185843327SDave Chinner 	return 0;
369285843327SDave Chinner }
369385843327SDave Chinner 
369485843327SDave Chinner static int
369530f712c9SDave Chinner xfs_bmap_btalloc(
369630151967SChandan Babu R 	struct xfs_bmalloca	*ap)
369730f712c9SDave Chinner {
369830151967SChandan Babu R 	struct xfs_mount	*mp = ap->ip->i_mount;
369985843327SDave Chinner 	struct xfs_alloc_arg	args = {
370085843327SDave Chinner 		.tp		= ap->tp,
370185843327SDave Chinner 		.mp		= mp,
370285843327SDave Chinner 		.fsbno		= NULLFSBLOCK,
370385843327SDave Chinner 		.oinfo		= XFS_RMAP_OINFO_SKIP_UPDATE,
370485843327SDave Chinner 		.minleft	= ap->minleft,
370585843327SDave Chinner 		.wasdel		= ap->wasdel,
370685843327SDave Chinner 		.resv		= XFS_AG_RESV_NONE,
370785843327SDave Chinner 		.datatype	= ap->datatype,
370885843327SDave Chinner 		.alignment	= 1,
370985843327SDave Chinner 		.minalignslop	= 0,
371085843327SDave Chinner 	};
37116d8a45ceSDarrick J. Wong 	xfs_fileoff_t		orig_offset;
37126d8a45ceSDarrick J. Wong 	xfs_extlen_t		orig_length;
371330f712c9SDave Chinner 	int			error;
371430f712c9SDave Chinner 	int			stripe_align;
371530f712c9SDave Chinner 
371630f712c9SDave Chinner 	ASSERT(ap->length);
37176d8a45ceSDarrick J. Wong 	orig_offset = ap->offset;
37186d8a45ceSDarrick J. Wong 	orig_length = ap->length;
371930f712c9SDave Chinner 
37200961fddfSChandan Babu R 	stripe_align = xfs_bmap_compute_alignments(ap, &args);
372130f712c9SDave Chinner 
372230f712c9SDave Chinner 	/* Trim the allocation back to the maximum an AG can fit. */
37239bb54cb5SDave Chinner 	args.maxlen = min(ap->length, mp->m_ag_max_usable);
372436b6ad2dSDave Chinner 
372585843327SDave Chinner 	error = xfs_bmap_btalloc_best_length(ap, &args, stripe_align);
372630f712c9SDave Chinner 	if (error)
372730f712c9SDave Chinner 		return error;
37280961fddfSChandan Babu R 
372907c72e55SChandan Babu R 	if (args.fsbno != NULLFSBLOCK) {
373007c72e55SChandan Babu R 		xfs_bmap_process_allocated_extent(ap, &args, orig_offset,
373107c72e55SChandan Babu R 			orig_length);
373230f712c9SDave Chinner 	} else {
373330f712c9SDave Chinner 		ap->blkno = NULLFSBLOCK;
373430f712c9SDave Chinner 		ap->length = 0;
373530f712c9SDave Chinner 	}
373630f712c9SDave Chinner 	return 0;
373730f712c9SDave Chinner }
373830f712c9SDave Chinner 
37390a0af28cSDarrick J. Wong /* Trim extent to fit a logical block range. */
37400a0af28cSDarrick J. Wong void
37410a0af28cSDarrick J. Wong xfs_trim_extent(
37420a0af28cSDarrick J. Wong 	struct xfs_bmbt_irec	*irec,
37430a0af28cSDarrick J. Wong 	xfs_fileoff_t		bno,
37440a0af28cSDarrick J. Wong 	xfs_filblks_t		len)
37450a0af28cSDarrick J. Wong {
37460a0af28cSDarrick J. Wong 	xfs_fileoff_t		distance;
37470a0af28cSDarrick J. Wong 	xfs_fileoff_t		end = bno + len;
37480a0af28cSDarrick J. Wong 
37490a0af28cSDarrick J. Wong 	if (irec->br_startoff + irec->br_blockcount <= bno ||
37500a0af28cSDarrick J. Wong 	    irec->br_startoff >= end) {
37510a0af28cSDarrick J. Wong 		irec->br_blockcount = 0;
37520a0af28cSDarrick J. Wong 		return;
37530a0af28cSDarrick J. Wong 	}
37540a0af28cSDarrick J. Wong 
37550a0af28cSDarrick J. Wong 	if (irec->br_startoff < bno) {
37560a0af28cSDarrick J. Wong 		distance = bno - irec->br_startoff;
37570a0af28cSDarrick J. Wong 		if (isnullstartblock(irec->br_startblock))
37580a0af28cSDarrick J. Wong 			irec->br_startblock = DELAYSTARTBLOCK;
37590a0af28cSDarrick J. Wong 		if (irec->br_startblock != DELAYSTARTBLOCK &&
37600a0af28cSDarrick J. Wong 		    irec->br_startblock != HOLESTARTBLOCK)
37610a0af28cSDarrick J. Wong 			irec->br_startblock += distance;
37620a0af28cSDarrick J. Wong 		irec->br_startoff += distance;
37630a0af28cSDarrick J. Wong 		irec->br_blockcount -= distance;
37640a0af28cSDarrick J. Wong 	}
37650a0af28cSDarrick J. Wong 
37660a0af28cSDarrick J. Wong 	if (end < irec->br_startoff + irec->br_blockcount) {
37670a0af28cSDarrick J. Wong 		distance = irec->br_startoff + irec->br_blockcount - end;
37680a0af28cSDarrick J. Wong 		irec->br_blockcount -= distance;
37690a0af28cSDarrick J. Wong 	}
37700a0af28cSDarrick J. Wong }
37710a0af28cSDarrick J. Wong 
377230f712c9SDave Chinner /*
377330f712c9SDave Chinner  * Trim the returned map to the required bounds
377430f712c9SDave Chinner  */
377530f712c9SDave Chinner STATIC void
377630f712c9SDave Chinner xfs_bmapi_trim_map(
377730f712c9SDave Chinner 	struct xfs_bmbt_irec	*mval,
377830f712c9SDave Chinner 	struct xfs_bmbt_irec	*got,
377930f712c9SDave Chinner 	xfs_fileoff_t		*bno,
378030f712c9SDave Chinner 	xfs_filblks_t		len,
378130f712c9SDave Chinner 	xfs_fileoff_t		obno,
378230f712c9SDave Chinner 	xfs_fileoff_t		end,
378330f712c9SDave Chinner 	int			n,
3784e7d410acSDave Chinner 	uint32_t		flags)
378530f712c9SDave Chinner {
378630f712c9SDave Chinner 	if ((flags & XFS_BMAPI_ENTIRE) ||
378730f712c9SDave Chinner 	    got->br_startoff + got->br_blockcount <= obno) {
378830f712c9SDave Chinner 		*mval = *got;
378930f712c9SDave Chinner 		if (isnullstartblock(got->br_startblock))
379030f712c9SDave Chinner 			mval->br_startblock = DELAYSTARTBLOCK;
379130f712c9SDave Chinner 		return;
379230f712c9SDave Chinner 	}
379330f712c9SDave Chinner 
379430f712c9SDave Chinner 	if (obno > *bno)
379530f712c9SDave Chinner 		*bno = obno;
379630f712c9SDave Chinner 	ASSERT((*bno >= obno) || (n == 0));
379730f712c9SDave Chinner 	ASSERT(*bno < end);
379830f712c9SDave Chinner 	mval->br_startoff = *bno;
379930f712c9SDave Chinner 	if (isnullstartblock(got->br_startblock))
380030f712c9SDave Chinner 		mval->br_startblock = DELAYSTARTBLOCK;
380130f712c9SDave Chinner 	else
380230f712c9SDave Chinner 		mval->br_startblock = got->br_startblock +
380330f712c9SDave Chinner 					(*bno - got->br_startoff);
380430f712c9SDave Chinner 	/*
380530f712c9SDave Chinner 	 * Return the minimum of what we got and what we asked for for
380630f712c9SDave Chinner 	 * the length.  We can use the len variable here because it is
380730f712c9SDave Chinner 	 * modified below and we could have been there before coming
380830f712c9SDave Chinner 	 * here if the first part of the allocation didn't overlap what
380930f712c9SDave Chinner 	 * was asked for.
381030f712c9SDave Chinner 	 */
381130f712c9SDave Chinner 	mval->br_blockcount = XFS_FILBLKS_MIN(end - *bno,
381230f712c9SDave Chinner 			got->br_blockcount - (*bno - got->br_startoff));
381330f712c9SDave Chinner 	mval->br_state = got->br_state;
381430f712c9SDave Chinner 	ASSERT(mval->br_blockcount <= len);
381530f712c9SDave Chinner 	return;
381630f712c9SDave Chinner }
381730f712c9SDave Chinner 
381830f712c9SDave Chinner /*
381930f712c9SDave Chinner  * Update and validate the extent map to return
382030f712c9SDave Chinner  */
382130f712c9SDave Chinner STATIC void
382230f712c9SDave Chinner xfs_bmapi_update_map(
382330f712c9SDave Chinner 	struct xfs_bmbt_irec	**map,
382430f712c9SDave Chinner 	xfs_fileoff_t		*bno,
382530f712c9SDave Chinner 	xfs_filblks_t		*len,
382630f712c9SDave Chinner 	xfs_fileoff_t		obno,
382730f712c9SDave Chinner 	xfs_fileoff_t		end,
382830f712c9SDave Chinner 	int			*n,
3829e7d410acSDave Chinner 	uint32_t		flags)
383030f712c9SDave Chinner {
383130f712c9SDave Chinner 	xfs_bmbt_irec_t	*mval = *map;
383230f712c9SDave Chinner 
383330f712c9SDave Chinner 	ASSERT((flags & XFS_BMAPI_ENTIRE) ||
383430f712c9SDave Chinner 	       ((mval->br_startoff + mval->br_blockcount) <= end));
383530f712c9SDave Chinner 	ASSERT((flags & XFS_BMAPI_ENTIRE) || (mval->br_blockcount <= *len) ||
383630f712c9SDave Chinner 	       (mval->br_startoff < obno));
383730f712c9SDave Chinner 
383830f712c9SDave Chinner 	*bno = mval->br_startoff + mval->br_blockcount;
383930f712c9SDave Chinner 	*len = end - *bno;
384030f712c9SDave Chinner 	if (*n > 0 && mval->br_startoff == mval[-1].br_startoff) {
384130f712c9SDave Chinner 		/* update previous map with new information */
384230f712c9SDave Chinner 		ASSERT(mval->br_startblock == mval[-1].br_startblock);
384330f712c9SDave Chinner 		ASSERT(mval->br_blockcount > mval[-1].br_blockcount);
384430f712c9SDave Chinner 		ASSERT(mval->br_state == mval[-1].br_state);
384530f712c9SDave Chinner 		mval[-1].br_blockcount = mval->br_blockcount;
384630f712c9SDave Chinner 		mval[-1].br_state = mval->br_state;
384730f712c9SDave Chinner 	} else if (*n > 0 && mval->br_startblock != DELAYSTARTBLOCK &&
384830f712c9SDave Chinner 		   mval[-1].br_startblock != DELAYSTARTBLOCK &&
384930f712c9SDave Chinner 		   mval[-1].br_startblock != HOLESTARTBLOCK &&
385030f712c9SDave Chinner 		   mval->br_startblock == mval[-1].br_startblock +
385130f712c9SDave Chinner 					  mval[-1].br_blockcount &&
3852c3a2f9ffSChristoph Hellwig 		   mval[-1].br_state == mval->br_state) {
385330f712c9SDave Chinner 		ASSERT(mval->br_startoff ==
385430f712c9SDave Chinner 		       mval[-1].br_startoff + mval[-1].br_blockcount);
385530f712c9SDave Chinner 		mval[-1].br_blockcount += mval->br_blockcount;
385630f712c9SDave Chinner 	} else if (*n > 0 &&
385730f712c9SDave Chinner 		   mval->br_startblock == DELAYSTARTBLOCK &&
385830f712c9SDave Chinner 		   mval[-1].br_startblock == DELAYSTARTBLOCK &&
385930f712c9SDave Chinner 		   mval->br_startoff ==
386030f712c9SDave Chinner 		   mval[-1].br_startoff + mval[-1].br_blockcount) {
386130f712c9SDave Chinner 		mval[-1].br_blockcount += mval->br_blockcount;
386230f712c9SDave Chinner 		mval[-1].br_state = mval->br_state;
386330f712c9SDave Chinner 	} else if (!((*n == 0) &&
386430f712c9SDave Chinner 		     ((mval->br_startoff + mval->br_blockcount) <=
386530f712c9SDave Chinner 		      obno))) {
386630f712c9SDave Chinner 		mval++;
386730f712c9SDave Chinner 		(*n)++;
386830f712c9SDave Chinner 	}
386930f712c9SDave Chinner 	*map = mval;
387030f712c9SDave Chinner }
387130f712c9SDave Chinner 
387230f712c9SDave Chinner /*
387330f712c9SDave Chinner  * Map file blocks to filesystem blocks without allocation.
387430f712c9SDave Chinner  */
387530f712c9SDave Chinner int
387630f712c9SDave Chinner xfs_bmapi_read(
387730f712c9SDave Chinner 	struct xfs_inode	*ip,
387830f712c9SDave Chinner 	xfs_fileoff_t		bno,
387930f712c9SDave Chinner 	xfs_filblks_t		len,
388030f712c9SDave Chinner 	struct xfs_bmbt_irec	*mval,
388130f712c9SDave Chinner 	int			*nmap,
3882e7d410acSDave Chinner 	uint32_t		flags)
388330f712c9SDave Chinner {
388430f712c9SDave Chinner 	struct xfs_mount	*mp = ip->i_mount;
38854b516ff4SChristoph Hellwig 	int			whichfork = xfs_bmapi_whichfork(flags);
3886732436efSDarrick J. Wong 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
388730f712c9SDave Chinner 	struct xfs_bmbt_irec	got;
388830f712c9SDave Chinner 	xfs_fileoff_t		obno;
388930f712c9SDave Chinner 	xfs_fileoff_t		end;
3890b2b1712aSChristoph Hellwig 	struct xfs_iext_cursor	icur;
389130f712c9SDave Chinner 	int			error;
3892334f3423SChristoph Hellwig 	bool			eof = false;
389330f712c9SDave Chinner 	int			n = 0;
389430f712c9SDave Chinner 
389530f712c9SDave Chinner 	ASSERT(*nmap >= 1);
38961a1c57b2SChristoph Hellwig 	ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK | XFS_BMAPI_ENTIRE)));
389730f712c9SDave Chinner 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED|XFS_ILOCK_EXCL));
389830f712c9SDave Chinner 
38994b516ff4SChristoph Hellwig 	if (WARN_ON_ONCE(!ifp))
39004b516ff4SChristoph Hellwig 		return -EFSCORRUPTED;
39014b516ff4SChristoph Hellwig 
3902f7e67b20SChristoph Hellwig 	if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
3903f7e67b20SChristoph Hellwig 	    XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT))
39042451337dSDave Chinner 		return -EFSCORRUPTED;
390530f712c9SDave Chinner 
390675c8c50fSDave Chinner 	if (xfs_is_shutdown(mp))
39072451337dSDave Chinner 		return -EIO;
390830f712c9SDave Chinner 
3909ff6d6af2SBill O'Donnell 	XFS_STATS_INC(mp, xs_blk_mapr);
391030f712c9SDave Chinner 
391130f712c9SDave Chinner 	error = xfs_iread_extents(NULL, ip, whichfork);
391230f712c9SDave Chinner 	if (error)
391330f712c9SDave Chinner 		return error;
391430f712c9SDave Chinner 
3915b2b1712aSChristoph Hellwig 	if (!xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got))
3916334f3423SChristoph Hellwig 		eof = true;
391730f712c9SDave Chinner 	end = bno + len;
391830f712c9SDave Chinner 	obno = bno;
391930f712c9SDave Chinner 
392030f712c9SDave Chinner 	while (bno < end && n < *nmap) {
392130f712c9SDave Chinner 		/* Reading past eof, act as though there's a hole up to end. */
392230f712c9SDave Chinner 		if (eof)
392330f712c9SDave Chinner 			got.br_startoff = end;
392430f712c9SDave Chinner 		if (got.br_startoff > bno) {
392530f712c9SDave Chinner 			/* Reading in a hole.  */
392630f712c9SDave Chinner 			mval->br_startoff = bno;
392730f712c9SDave Chinner 			mval->br_startblock = HOLESTARTBLOCK;
392830f712c9SDave Chinner 			mval->br_blockcount =
392930f712c9SDave Chinner 				XFS_FILBLKS_MIN(len, got.br_startoff - bno);
393030f712c9SDave Chinner 			mval->br_state = XFS_EXT_NORM;
393130f712c9SDave Chinner 			bno += mval->br_blockcount;
393230f712c9SDave Chinner 			len -= mval->br_blockcount;
393330f712c9SDave Chinner 			mval++;
393430f712c9SDave Chinner 			n++;
393530f712c9SDave Chinner 			continue;
393630f712c9SDave Chinner 		}
393730f712c9SDave Chinner 
393830f712c9SDave Chinner 		/* set up the extent map to return. */
393930f712c9SDave Chinner 		xfs_bmapi_trim_map(mval, &got, &bno, len, obno, end, n, flags);
394030f712c9SDave Chinner 		xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags);
394130f712c9SDave Chinner 
394230f712c9SDave Chinner 		/* If we're done, stop now. */
394330f712c9SDave Chinner 		if (bno >= end || n >= *nmap)
394430f712c9SDave Chinner 			break;
394530f712c9SDave Chinner 
394630f712c9SDave Chinner 		/* Else go on to the next record. */
3947b2b1712aSChristoph Hellwig 		if (!xfs_iext_next_extent(ifp, &icur, &got))
3948334f3423SChristoph Hellwig 			eof = true;
394930f712c9SDave Chinner 	}
395030f712c9SDave Chinner 	*nmap = n;
395130f712c9SDave Chinner 	return 0;
395230f712c9SDave Chinner }
395330f712c9SDave Chinner 
3954f65e6fadSBrian Foster /*
3955f65e6fadSBrian Foster  * Add a delayed allocation extent to an inode. Blocks are reserved from the
3956f65e6fadSBrian Foster  * global pool and the extent inserted into the inode in-core extent tree.
3957f65e6fadSBrian Foster  *
3958f65e6fadSBrian Foster  * On entry, got refers to the first extent beyond the offset of the extent to
3959f65e6fadSBrian Foster  * allocate or eof is specified if no such extent exists. On return, got refers
3960f65e6fadSBrian Foster  * to the extent record that was inserted to the inode fork.
3961f65e6fadSBrian Foster  *
3962f65e6fadSBrian Foster  * Note that the allocated extent may have been merged with contiguous extents
3963f65e6fadSBrian Foster  * during insertion into the inode fork. Thus, got does not reflect the current
3964f65e6fadSBrian Foster  * state of the inode fork on return. If necessary, the caller can use lastx to
3965f65e6fadSBrian Foster  * look up the updated record in the inode fork.
3966f65e6fadSBrian Foster  */
396751446f5bSChristoph Hellwig int
396830f712c9SDave Chinner xfs_bmapi_reserve_delalloc(
396930f712c9SDave Chinner 	struct xfs_inode	*ip,
3970be51f811SDarrick J. Wong 	int			whichfork,
3971974ae922SBrian Foster 	xfs_fileoff_t		off,
397230f712c9SDave Chinner 	xfs_filblks_t		len,
3973974ae922SBrian Foster 	xfs_filblks_t		prealloc,
397430f712c9SDave Chinner 	struct xfs_bmbt_irec	*got,
3975b2b1712aSChristoph Hellwig 	struct xfs_iext_cursor	*icur,
397630f712c9SDave Chinner 	int			eof)
397730f712c9SDave Chinner {
397830f712c9SDave Chinner 	struct xfs_mount	*mp = ip->i_mount;
3979732436efSDarrick J. Wong 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
398030f712c9SDave Chinner 	xfs_extlen_t		alen;
398130f712c9SDave Chinner 	xfs_extlen_t		indlen;
398230f712c9SDave Chinner 	int			error;
3983974ae922SBrian Foster 	xfs_fileoff_t		aoff = off;
398430f712c9SDave Chinner 
3985974ae922SBrian Foster 	/*
3986974ae922SBrian Foster 	 * Cap the alloc length. Keep track of prealloc so we know whether to
3987974ae922SBrian Foster 	 * tag the inode before we return.
3988974ae922SBrian Foster 	 */
398995f0b95eSChandan Babu R 	alen = XFS_FILBLKS_MIN(len + prealloc, XFS_MAX_BMBT_EXTLEN);
399030f712c9SDave Chinner 	if (!eof)
399130f712c9SDave Chinner 		alen = XFS_FILBLKS_MIN(alen, got->br_startoff - aoff);
3992974ae922SBrian Foster 	if (prealloc && alen >= len)
3993974ae922SBrian Foster 		prealloc = alen - len;
399430f712c9SDave Chinner 
399530f712c9SDave Chinner 	/* Figure out the extent size, adjust alen */
39966ca30729SShan Hai 	if (whichfork == XFS_COW_FORK) {
399765c5f419SChristoph Hellwig 		struct xfs_bmbt_irec	prev;
39986ca30729SShan Hai 		xfs_extlen_t		extsz = xfs_get_cowextsz_hint(ip);
399965c5f419SChristoph Hellwig 
4000b2b1712aSChristoph Hellwig 		if (!xfs_iext_peek_prev_extent(ifp, icur, &prev))
400165c5f419SChristoph Hellwig 			prev.br_startoff = NULLFILEOFF;
400265c5f419SChristoph Hellwig 
40036ca30729SShan Hai 		error = xfs_bmap_extsize_align(mp, got, &prev, extsz, 0, eof,
400430f712c9SDave Chinner 					       1, 0, &aoff, &alen);
400530f712c9SDave Chinner 		ASSERT(!error);
400630f712c9SDave Chinner 	}
400730f712c9SDave Chinner 
400830f712c9SDave Chinner 	/*
400930f712c9SDave Chinner 	 * Make a transaction-less quota reservation for delayed allocation
401030f712c9SDave Chinner 	 * blocks.  This number gets adjusted later.  We return if we haven't
401130f712c9SDave Chinner 	 * allocated blocks already inside this loop.
401230f712c9SDave Chinner 	 */
401385546500SDarrick J. Wong 	error = xfs_quota_reserve_blkres(ip, alen);
401430f712c9SDave Chinner 	if (error)
401530f712c9SDave Chinner 		return error;
401630f712c9SDave Chinner 
401730f712c9SDave Chinner 	/*
401830f712c9SDave Chinner 	 * Split changing sb for alen and indlen since they could be coming
401930f712c9SDave Chinner 	 * from different places.
402030f712c9SDave Chinner 	 */
402130f712c9SDave Chinner 	indlen = (xfs_extlen_t)xfs_bmap_worst_indlen(ip, alen);
402230f712c9SDave Chinner 	ASSERT(indlen > 0);
402330f712c9SDave Chinner 
40240d485adaSDave Chinner 	error = xfs_mod_fdblocks(mp, -((int64_t)alen), false);
402530f712c9SDave Chinner 	if (error)
402630f712c9SDave Chinner 		goto out_unreserve_quota;
402730f712c9SDave Chinner 
40280d485adaSDave Chinner 	error = xfs_mod_fdblocks(mp, -((int64_t)indlen), false);
402930f712c9SDave Chinner 	if (error)
403030f712c9SDave Chinner 		goto out_unreserve_blocks;
403130f712c9SDave Chinner 
403230f712c9SDave Chinner 
403330f712c9SDave Chinner 	ip->i_delayed_blks += alen;
40349fe82b8cSDarrick J. Wong 	xfs_mod_delalloc(ip->i_mount, alen + indlen);
403530f712c9SDave Chinner 
403630f712c9SDave Chinner 	got->br_startoff = aoff;
403730f712c9SDave Chinner 	got->br_startblock = nullstartblock(indlen);
403830f712c9SDave Chinner 	got->br_blockcount = alen;
403930f712c9SDave Chinner 	got->br_state = XFS_EXT_NORM;
404030f712c9SDave Chinner 
4041b2b1712aSChristoph Hellwig 	xfs_bmap_add_extent_hole_delay(ip, whichfork, icur, got);
404230f712c9SDave Chinner 
4043974ae922SBrian Foster 	/*
4044974ae922SBrian Foster 	 * Tag the inode if blocks were preallocated. Note that COW fork
4045974ae922SBrian Foster 	 * preallocation can occur at the start or end of the extent, even when
4046974ae922SBrian Foster 	 * prealloc == 0, so we must also check the aligned offset and length.
4047974ae922SBrian Foster 	 */
4048974ae922SBrian Foster 	if (whichfork == XFS_DATA_FORK && prealloc)
4049974ae922SBrian Foster 		xfs_inode_set_eofblocks_tag(ip);
4050974ae922SBrian Foster 	if (whichfork == XFS_COW_FORK && (prealloc || aoff < off || alen > len))
4051974ae922SBrian Foster 		xfs_inode_set_cowblocks_tag(ip);
4052974ae922SBrian Foster 
405330f712c9SDave Chinner 	return 0;
405430f712c9SDave Chinner 
405530f712c9SDave Chinner out_unreserve_blocks:
40560d485adaSDave Chinner 	xfs_mod_fdblocks(mp, alen, false);
405730f712c9SDave Chinner out_unreserve_quota:
405830f712c9SDave Chinner 	if (XFS_IS_QUOTA_ON(mp))
405985546500SDarrick J. Wong 		xfs_quota_unreserve_blkres(ip, alen);
406030f712c9SDave Chinner 	return error;
406130f712c9SDave Chinner }
406230f712c9SDave Chinner 
40637f8a058fSDave Chinner static int
4064be6cacbeSChristoph Hellwig xfs_bmap_alloc_userdata(
4065be6cacbeSChristoph Hellwig 	struct xfs_bmalloca	*bma)
4066be6cacbeSChristoph Hellwig {
4067be6cacbeSChristoph Hellwig 	struct xfs_mount	*mp = bma->ip->i_mount;
4068be6cacbeSChristoph Hellwig 	int			whichfork = xfs_bmapi_whichfork(bma->flags);
4069be6cacbeSChristoph Hellwig 	int			error;
4070be6cacbeSChristoph Hellwig 
4071be6cacbeSChristoph Hellwig 	/*
4072be6cacbeSChristoph Hellwig 	 * Set the data type being allocated. For the data fork, the first data
4073be6cacbeSChristoph Hellwig 	 * in the file is treated differently to all other allocations. For the
4074be6cacbeSChristoph Hellwig 	 * attribute fork, we only need to ensure the allocated range is not on
4075be6cacbeSChristoph Hellwig 	 * the busy list.
4076be6cacbeSChristoph Hellwig 	 */
4077be6cacbeSChristoph Hellwig 	bma->datatype = XFS_ALLOC_NOBUSY;
4078ddfdd530SDarrick J. Wong 	if (whichfork == XFS_DATA_FORK || whichfork == XFS_COW_FORK) {
4079c34d570dSChristoph Hellwig 		bma->datatype |= XFS_ALLOC_USERDATA;
4080be6cacbeSChristoph Hellwig 		if (bma->offset == 0)
4081be6cacbeSChristoph Hellwig 			bma->datatype |= XFS_ALLOC_INITIAL_USER_DATA;
4082be6cacbeSChristoph Hellwig 
4083be6cacbeSChristoph Hellwig 		if (mp->m_dalign && bma->length >= mp->m_dalign) {
4084be6cacbeSChristoph Hellwig 			error = xfs_bmap_isaeof(bma, whichfork);
4085be6cacbeSChristoph Hellwig 			if (error)
4086be6cacbeSChristoph Hellwig 				return error;
4087be6cacbeSChristoph Hellwig 		}
4088be6cacbeSChristoph Hellwig 
4089be6cacbeSChristoph Hellwig 		if (XFS_IS_REALTIME_INODE(bma->ip))
4090be6cacbeSChristoph Hellwig 			return xfs_bmap_rtalloc(bma);
4091be6cacbeSChristoph Hellwig 	}
4092be6cacbeSChristoph Hellwig 
409330151967SChandan Babu R 	if (unlikely(XFS_TEST_ERROR(false, mp,
409430151967SChandan Babu R 			XFS_ERRTAG_BMAP_ALLOC_MINLEN_EXTENT)))
409530151967SChandan Babu R 		return xfs_bmap_exact_minlen_extent_alloc(bma);
409630151967SChandan Babu R 
4097be6cacbeSChristoph Hellwig 	return xfs_bmap_btalloc(bma);
4098be6cacbeSChristoph Hellwig }
4099be6cacbeSChristoph Hellwig 
4100be6cacbeSChristoph Hellwig static int
41017f8a058fSDave Chinner xfs_bmapi_allocate(
410230f712c9SDave Chinner 	struct xfs_bmalloca	*bma)
410330f712c9SDave Chinner {
410430f712c9SDave Chinner 	struct xfs_mount	*mp = bma->ip->i_mount;
410560b4984fSDarrick J. Wong 	int			whichfork = xfs_bmapi_whichfork(bma->flags);
4106732436efSDarrick J. Wong 	struct xfs_ifork	*ifp = xfs_ifork_ptr(bma->ip, whichfork);
410730f712c9SDave Chinner 	int			tmp_logflags = 0;
410830f712c9SDave Chinner 	int			error;
410930f712c9SDave Chinner 
411030f712c9SDave Chinner 	ASSERT(bma->length > 0);
411130f712c9SDave Chinner 
411230f712c9SDave Chinner 	/*
411330f712c9SDave Chinner 	 * For the wasdelay case, we could also just allocate the stuff asked
411430f712c9SDave Chinner 	 * for in this bmap call but that wouldn't be as good.
411530f712c9SDave Chinner 	 */
411630f712c9SDave Chinner 	if (bma->wasdel) {
411730f712c9SDave Chinner 		bma->length = (xfs_extlen_t)bma->got.br_blockcount;
411830f712c9SDave Chinner 		bma->offset = bma->got.br_startoff;
4119f5be0844SDarrick J. Wong 		if (!xfs_iext_peek_prev_extent(ifp, &bma->icur, &bma->prev))
4120f5be0844SDarrick J. Wong 			bma->prev.br_startoff = NULLFILEOFF;
412130f712c9SDave Chinner 	} else {
412295f0b95eSChandan Babu R 		bma->length = XFS_FILBLKS_MIN(bma->length, XFS_MAX_BMBT_EXTLEN);
412330f712c9SDave Chinner 		if (!bma->eof)
412430f712c9SDave Chinner 			bma->length = XFS_FILBLKS_MIN(bma->length,
412530f712c9SDave Chinner 					bma->got.br_startoff - bma->offset);
412630f712c9SDave Chinner 	}
412730f712c9SDave Chinner 
4128be6cacbeSChristoph Hellwig 	if (bma->flags & XFS_BMAPI_CONTIG)
4129be6cacbeSChristoph Hellwig 		bma->minlen = bma->length;
4130ce840429SDarrick J. Wong 	else
4131be6cacbeSChristoph Hellwig 		bma->minlen = 1;
413230f712c9SDave Chinner 
413330151967SChandan Babu R 	if (bma->flags & XFS_BMAPI_METADATA) {
413430151967SChandan Babu R 		if (unlikely(XFS_TEST_ERROR(false, mp,
413530151967SChandan Babu R 				XFS_ERRTAG_BMAP_ALLOC_MINLEN_EXTENT)))
413630151967SChandan Babu R 			error = xfs_bmap_exact_minlen_extent_alloc(bma);
4137be6cacbeSChristoph Hellwig 		else
413830151967SChandan Babu R 			error = xfs_bmap_btalloc(bma);
413930151967SChandan Babu R 	} else {
4140be6cacbeSChristoph Hellwig 		error = xfs_bmap_alloc_userdata(bma);
414130151967SChandan Babu R 	}
4142be6cacbeSChristoph Hellwig 	if (error || bma->blkno == NULLFSBLOCK)
414330f712c9SDave Chinner 		return error;
414430f712c9SDave Chinner 
4145fd638f1dSChristoph Hellwig 	if (bma->flags & XFS_BMAPI_ZERO) {
4146fd638f1dSChristoph Hellwig 		error = xfs_zero_extent(bma->ip, bma->blkno, bma->length);
4147fd638f1dSChristoph Hellwig 		if (error)
4148fd638f1dSChristoph Hellwig 			return error;
4149fd638f1dSChristoph Hellwig 	}
4150fd638f1dSChristoph Hellwig 
4151ac1e0672SChristoph Hellwig 	if (ifp->if_format == XFS_DINODE_FMT_BTREE && !bma->cur)
415230f712c9SDave Chinner 		bma->cur = xfs_bmbt_init_cursor(mp, bma->tp, bma->ip, whichfork);
415330f712c9SDave Chinner 	/*
415430f712c9SDave Chinner 	 * Bump the number of extents we've allocated
415530f712c9SDave Chinner 	 * in this call.
415630f712c9SDave Chinner 	 */
415730f712c9SDave Chinner 	bma->nallocs++;
415830f712c9SDave Chinner 
415930f712c9SDave Chinner 	if (bma->cur)
416092219c29SDave Chinner 		bma->cur->bc_ino.flags =
41618ef54797SDave Chinner 			bma->wasdel ? XFS_BTCUR_BMBT_WASDEL : 0;
416230f712c9SDave Chinner 
416330f712c9SDave Chinner 	bma->got.br_startoff = bma->offset;
416430f712c9SDave Chinner 	bma->got.br_startblock = bma->blkno;
416530f712c9SDave Chinner 	bma->got.br_blockcount = bma->length;
416630f712c9SDave Chinner 	bma->got.br_state = XFS_EXT_NORM;
416730f712c9SDave Chinner 
4168a5949d3fSDarrick J. Wong 	if (bma->flags & XFS_BMAPI_PREALLOC)
416930f712c9SDave Chinner 		bma->got.br_state = XFS_EXT_UNWRITTEN;
417030f712c9SDave Chinner 
417130f712c9SDave Chinner 	if (bma->wasdel)
417260b4984fSDarrick J. Wong 		error = xfs_bmap_add_extent_delay_real(bma, whichfork);
417330f712c9SDave Chinner 	else
41746d04558fSChristoph Hellwig 		error = xfs_bmap_add_extent_hole_real(bma->tp, bma->ip,
4175b2b1712aSChristoph Hellwig 				whichfork, &bma->icur, &bma->cur, &bma->got,
417692f9da30SBrian Foster 				&bma->logflags, bma->flags);
417730f712c9SDave Chinner 
417830f712c9SDave Chinner 	bma->logflags |= tmp_logflags;
417930f712c9SDave Chinner 	if (error)
418030f712c9SDave Chinner 		return error;
418130f712c9SDave Chinner 
418230f712c9SDave Chinner 	/*
418330f712c9SDave Chinner 	 * Update our extent pointer, given that xfs_bmap_add_extent_delay_real
418430f712c9SDave Chinner 	 * or xfs_bmap_add_extent_hole_real might have merged it into one of
418530f712c9SDave Chinner 	 * the neighbouring ones.
418630f712c9SDave Chinner 	 */
4187b2b1712aSChristoph Hellwig 	xfs_iext_get_extent(ifp, &bma->icur, &bma->got);
418830f712c9SDave Chinner 
418930f712c9SDave Chinner 	ASSERT(bma->got.br_startoff <= bma->offset);
419030f712c9SDave Chinner 	ASSERT(bma->got.br_startoff + bma->got.br_blockcount >=
419130f712c9SDave Chinner 	       bma->offset + bma->length);
419230f712c9SDave Chinner 	ASSERT(bma->got.br_state == XFS_EXT_NORM ||
419330f712c9SDave Chinner 	       bma->got.br_state == XFS_EXT_UNWRITTEN);
419430f712c9SDave Chinner 	return 0;
419530f712c9SDave Chinner }
419630f712c9SDave Chinner 
419730f712c9SDave Chinner STATIC int
419830f712c9SDave Chinner xfs_bmapi_convert_unwritten(
419930f712c9SDave Chinner 	struct xfs_bmalloca	*bma,
420030f712c9SDave Chinner 	struct xfs_bmbt_irec	*mval,
420130f712c9SDave Chinner 	xfs_filblks_t		len,
4202e7d410acSDave Chinner 	uint32_t		flags)
420330f712c9SDave Chinner {
42043993baebSDarrick J. Wong 	int			whichfork = xfs_bmapi_whichfork(flags);
4205732436efSDarrick J. Wong 	struct xfs_ifork	*ifp = xfs_ifork_ptr(bma->ip, whichfork);
420630f712c9SDave Chinner 	int			tmp_logflags = 0;
420730f712c9SDave Chinner 	int			error;
420830f712c9SDave Chinner 
420930f712c9SDave Chinner 	/* check if we need to do unwritten->real conversion */
421030f712c9SDave Chinner 	if (mval->br_state == XFS_EXT_UNWRITTEN &&
421130f712c9SDave Chinner 	    (flags & XFS_BMAPI_PREALLOC))
421230f712c9SDave Chinner 		return 0;
421330f712c9SDave Chinner 
421430f712c9SDave Chinner 	/* check if we need to do real->unwritten conversion */
421530f712c9SDave Chinner 	if (mval->br_state == XFS_EXT_NORM &&
421630f712c9SDave Chinner 	    (flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT)) !=
421730f712c9SDave Chinner 			(XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT))
421830f712c9SDave Chinner 		return 0;
421930f712c9SDave Chinner 
422030f712c9SDave Chinner 	/*
422130f712c9SDave Chinner 	 * Modify (by adding) the state flag, if writing.
422230f712c9SDave Chinner 	 */
422330f712c9SDave Chinner 	ASSERT(mval->br_blockcount <= len);
4224ac1e0672SChristoph Hellwig 	if (ifp->if_format == XFS_DINODE_FMT_BTREE && !bma->cur) {
422530f712c9SDave Chinner 		bma->cur = xfs_bmbt_init_cursor(bma->ip->i_mount, bma->tp,
422630f712c9SDave Chinner 					bma->ip, whichfork);
422730f712c9SDave Chinner 	}
422830f712c9SDave Chinner 	mval->br_state = (mval->br_state == XFS_EXT_UNWRITTEN)
422930f712c9SDave Chinner 				? XFS_EXT_NORM : XFS_EXT_UNWRITTEN;
423030f712c9SDave Chinner 
42313fbbbea3SDave Chinner 	/*
42323fbbbea3SDave Chinner 	 * Before insertion into the bmbt, zero the range being converted
42333fbbbea3SDave Chinner 	 * if required.
42343fbbbea3SDave Chinner 	 */
42353fbbbea3SDave Chinner 	if (flags & XFS_BMAPI_ZERO) {
42363fbbbea3SDave Chinner 		error = xfs_zero_extent(bma->ip, mval->br_startblock,
42373fbbbea3SDave Chinner 					mval->br_blockcount);
42383fbbbea3SDave Chinner 		if (error)
42393fbbbea3SDave Chinner 			return error;
42403fbbbea3SDave Chinner 	}
42413fbbbea3SDave Chinner 
424205a630d7SDarrick J. Wong 	error = xfs_bmap_add_extent_unwritten_real(bma->tp, bma->ip, whichfork,
424392f9da30SBrian Foster 			&bma->icur, &bma->cur, mval, &tmp_logflags);
42442e588a46SBrian Foster 	/*
42452e588a46SBrian Foster 	 * Log the inode core unconditionally in the unwritten extent conversion
42462e588a46SBrian Foster 	 * path because the conversion might not have done so (e.g., if the
42472e588a46SBrian Foster 	 * extent count hasn't changed). We need to make sure the inode is dirty
42482e588a46SBrian Foster 	 * in the transaction for the sake of fsync(), even if nothing has
42492e588a46SBrian Foster 	 * changed, because fsync() will not force the log for this transaction
42502e588a46SBrian Foster 	 * unless it sees the inode pinned.
425105a630d7SDarrick J. Wong 	 *
425205a630d7SDarrick J. Wong 	 * Note: If we're only converting cow fork extents, there aren't
425305a630d7SDarrick J. Wong 	 * any on-disk updates to make, so we don't need to log anything.
42542e588a46SBrian Foster 	 */
425505a630d7SDarrick J. Wong 	if (whichfork != XFS_COW_FORK)
42562e588a46SBrian Foster 		bma->logflags |= tmp_logflags | XFS_ILOG_CORE;
425730f712c9SDave Chinner 	if (error)
425830f712c9SDave Chinner 		return error;
425930f712c9SDave Chinner 
426030f712c9SDave Chinner 	/*
426130f712c9SDave Chinner 	 * Update our extent pointer, given that
426230f712c9SDave Chinner 	 * xfs_bmap_add_extent_unwritten_real might have merged it into one
426330f712c9SDave Chinner 	 * of the neighbouring ones.
426430f712c9SDave Chinner 	 */
4265b2b1712aSChristoph Hellwig 	xfs_iext_get_extent(ifp, &bma->icur, &bma->got);
426630f712c9SDave Chinner 
426730f712c9SDave Chinner 	/*
426830f712c9SDave Chinner 	 * We may have combined previously unwritten space with written space,
426930f712c9SDave Chinner 	 * so generate another request.
427030f712c9SDave Chinner 	 */
427130f712c9SDave Chinner 	if (mval->br_blockcount < len)
42722451337dSDave Chinner 		return -EAGAIN;
427330f712c9SDave Chinner 	return 0;
427430f712c9SDave Chinner }
427530f712c9SDave Chinner 
4276d5753847SDave Chinner xfs_extlen_t
4277c8b54673SChristoph Hellwig xfs_bmapi_minleft(
4278c8b54673SChristoph Hellwig 	struct xfs_trans	*tp,
4279c8b54673SChristoph Hellwig 	struct xfs_inode	*ip,
4280c8b54673SChristoph Hellwig 	int			fork)
4281c8b54673SChristoph Hellwig {
4282732436efSDarrick J. Wong 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, fork);
4283f7e67b20SChristoph Hellwig 
4284692b6cddSDave Chinner 	if (tp && tp->t_highest_agno != NULLAGNUMBER)
4285c8b54673SChristoph Hellwig 		return 0;
4286f7e67b20SChristoph Hellwig 	if (ifp->if_format != XFS_DINODE_FMT_BTREE)
4287c8b54673SChristoph Hellwig 		return 1;
4288f7e67b20SChristoph Hellwig 	return be16_to_cpu(ifp->if_broot->bb_level) + 1;
4289c8b54673SChristoph Hellwig }
4290c8b54673SChristoph Hellwig 
4291c8b54673SChristoph Hellwig /*
4292c8b54673SChristoph Hellwig  * Log whatever the flags say, even if error.  Otherwise we might miss detecting
4293c8b54673SChristoph Hellwig  * a case where the data is changed, there's an error, and it's not logged so we
4294c8b54673SChristoph Hellwig  * don't shutdown when we should.  Don't bother logging extents/btree changes if
4295c8b54673SChristoph Hellwig  * we converted to the other format.
4296c8b54673SChristoph Hellwig  */
4297c8b54673SChristoph Hellwig static void
4298c8b54673SChristoph Hellwig xfs_bmapi_finish(
4299c8b54673SChristoph Hellwig 	struct xfs_bmalloca	*bma,
4300c8b54673SChristoph Hellwig 	int			whichfork,
4301c8b54673SChristoph Hellwig 	int			error)
4302c8b54673SChristoph Hellwig {
4303732436efSDarrick J. Wong 	struct xfs_ifork	*ifp = xfs_ifork_ptr(bma->ip, whichfork);
4304f7e67b20SChristoph Hellwig 
4305c8b54673SChristoph Hellwig 	if ((bma->logflags & xfs_ilog_fext(whichfork)) &&
4306f7e67b20SChristoph Hellwig 	    ifp->if_format != XFS_DINODE_FMT_EXTENTS)
4307c8b54673SChristoph Hellwig 		bma->logflags &= ~xfs_ilog_fext(whichfork);
4308c8b54673SChristoph Hellwig 	else if ((bma->logflags & xfs_ilog_fbroot(whichfork)) &&
4309f7e67b20SChristoph Hellwig 		 ifp->if_format != XFS_DINODE_FMT_BTREE)
4310c8b54673SChristoph Hellwig 		bma->logflags &= ~xfs_ilog_fbroot(whichfork);
4311c8b54673SChristoph Hellwig 
4312c8b54673SChristoph Hellwig 	if (bma->logflags)
4313c8b54673SChristoph Hellwig 		xfs_trans_log_inode(bma->tp, bma->ip, bma->logflags);
4314c8b54673SChristoph Hellwig 	if (bma->cur)
4315c8b54673SChristoph Hellwig 		xfs_btree_del_cursor(bma->cur, error);
4316c8b54673SChristoph Hellwig }
4317c8b54673SChristoph Hellwig 
431830f712c9SDave Chinner /*
431930f712c9SDave Chinner  * Map file blocks to filesystem blocks, and allocate blocks or convert the
432030f712c9SDave Chinner  * extent state if necessary.  Details behaviour is controlled by the flags
432130f712c9SDave Chinner  * parameter.  Only allocates blocks from a single allocation group, to avoid
432230f712c9SDave Chinner  * locking problems.
432330f712c9SDave Chinner  */
432430f712c9SDave Chinner int
432530f712c9SDave Chinner xfs_bmapi_write(
432630f712c9SDave Chinner 	struct xfs_trans	*tp,		/* transaction pointer */
432730f712c9SDave Chinner 	struct xfs_inode	*ip,		/* incore inode */
432830f712c9SDave Chinner 	xfs_fileoff_t		bno,		/* starting file offs. mapped */
432930f712c9SDave Chinner 	xfs_filblks_t		len,		/* length to map in file */
4330e7d410acSDave Chinner 	uint32_t		flags,		/* XFS_BMAPI_... */
433130f712c9SDave Chinner 	xfs_extlen_t		total,		/* total blocks needed */
433230f712c9SDave Chinner 	struct xfs_bmbt_irec	*mval,		/* output: map values */
43336e702a5dSBrian Foster 	int			*nmap)		/* i/o: mval size/count */
433430f712c9SDave Chinner {
43354b0bce30SDarrick J. Wong 	struct xfs_bmalloca	bma = {
43364b0bce30SDarrick J. Wong 		.tp		= tp,
43374b0bce30SDarrick J. Wong 		.ip		= ip,
43384b0bce30SDarrick J. Wong 		.total		= total,
43394b0bce30SDarrick J. Wong 	};
434030f712c9SDave Chinner 	struct xfs_mount	*mp = ip->i_mount;
4341f7e67b20SChristoph Hellwig 	int			whichfork = xfs_bmapi_whichfork(flags);
4342732436efSDarrick J. Wong 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
434330f712c9SDave Chinner 	xfs_fileoff_t		end;		/* end of mapped file region */
43442d58f6efSChristoph Hellwig 	bool			eof = false;	/* after the end of extents */
434530f712c9SDave Chinner 	int			error;		/* error return */
434630f712c9SDave Chinner 	int			n;		/* current extent index */
434730f712c9SDave Chinner 	xfs_fileoff_t		obno;		/* old block number (offset) */
434830f712c9SDave Chinner 
434930f712c9SDave Chinner #ifdef DEBUG
435030f712c9SDave Chinner 	xfs_fileoff_t		orig_bno;	/* original block number value */
435130f712c9SDave Chinner 	int			orig_flags;	/* original flags arg value */
435230f712c9SDave Chinner 	xfs_filblks_t		orig_len;	/* original value of len arg */
435330f712c9SDave Chinner 	struct xfs_bmbt_irec	*orig_mval;	/* original value of mval */
435430f712c9SDave Chinner 	int			orig_nmap;	/* original value of *nmap */
435530f712c9SDave Chinner 
435630f712c9SDave Chinner 	orig_bno = bno;
435730f712c9SDave Chinner 	orig_len = len;
435830f712c9SDave Chinner 	orig_flags = flags;
435930f712c9SDave Chinner 	orig_mval = mval;
436030f712c9SDave Chinner 	orig_nmap = *nmap;
436130f712c9SDave Chinner #endif
436230f712c9SDave Chinner 
436330f712c9SDave Chinner 	ASSERT(*nmap >= 1);
436430f712c9SDave Chinner 	ASSERT(*nmap <= XFS_BMAP_MAX_NMAP);
436526b91c72SChristoph Hellwig 	ASSERT(tp != NULL);
436630f712c9SDave Chinner 	ASSERT(len > 0);
4367f7e67b20SChristoph Hellwig 	ASSERT(ifp->if_format != XFS_DINODE_FMT_LOCAL);
436830f712c9SDave Chinner 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
43696ebd5a44SChristoph Hellwig 	ASSERT(!(flags & XFS_BMAPI_REMAP));
437030f712c9SDave Chinner 
43713fbbbea3SDave Chinner 	/* zeroing is for currently only for data extents, not metadata */
43723fbbbea3SDave Chinner 	ASSERT((flags & (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO)) !=
43733fbbbea3SDave Chinner 			(XFS_BMAPI_METADATA | XFS_BMAPI_ZERO));
43743fbbbea3SDave Chinner 	/*
43753fbbbea3SDave Chinner 	 * we can allocate unwritten extents or pre-zero allocated blocks,
43763fbbbea3SDave Chinner 	 * but it makes no sense to do both at once. This would result in
43773fbbbea3SDave Chinner 	 * zeroing the unwritten extent twice, but it still being an
43783fbbbea3SDave Chinner 	 * unwritten extent....
43793fbbbea3SDave Chinner 	 */
43803fbbbea3SDave Chinner 	ASSERT((flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO)) !=
43813fbbbea3SDave Chinner 			(XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO));
43823fbbbea3SDave Chinner 
4383f7e67b20SChristoph Hellwig 	if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
4384a71895c5SDarrick J. Wong 	    XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
43852451337dSDave Chinner 		return -EFSCORRUPTED;
438630f712c9SDave Chinner 	}
438730f712c9SDave Chinner 
438875c8c50fSDave Chinner 	if (xfs_is_shutdown(mp))
43892451337dSDave Chinner 		return -EIO;
439030f712c9SDave Chinner 
4391ff6d6af2SBill O'Donnell 	XFS_STATS_INC(mp, xs_blk_mapw);
439230f712c9SDave Chinner 
439330f712c9SDave Chinner 	error = xfs_iread_extents(tp, ip, whichfork);
439430f712c9SDave Chinner 	if (error)
439530f712c9SDave Chinner 		goto error0;
439630f712c9SDave Chinner 
4397b2b1712aSChristoph Hellwig 	if (!xfs_iext_lookup_extent(ip, ifp, bno, &bma.icur, &bma.got))
43982d58f6efSChristoph Hellwig 		eof = true;
4399b2b1712aSChristoph Hellwig 	if (!xfs_iext_peek_prev_extent(ifp, &bma.icur, &bma.prev))
44002d58f6efSChristoph Hellwig 		bma.prev.br_startoff = NULLFILEOFF;
4401c8b54673SChristoph Hellwig 	bma.minleft = xfs_bmapi_minleft(tp, ip, whichfork);
440230f712c9SDave Chinner 
4403627209fbSBrian Foster 	n = 0;
4404627209fbSBrian Foster 	end = bno + len;
4405627209fbSBrian Foster 	obno = bno;
440630f712c9SDave Chinner 	while (bno < end && n < *nmap) {
4407d2b3964aSChristoph Hellwig 		bool			need_alloc = false, wasdelay = false;
440830f712c9SDave Chinner 
4409be78ff0eSDarrick J. Wong 		/* in hole or beyond EOF? */
4410d2b3964aSChristoph Hellwig 		if (eof || bma.got.br_startoff > bno) {
4411be78ff0eSDarrick J. Wong 			/*
4412be78ff0eSDarrick J. Wong 			 * CoW fork conversions should /never/ hit EOF or
4413be78ff0eSDarrick J. Wong 			 * holes.  There should always be something for us
4414be78ff0eSDarrick J. Wong 			 * to work on.
4415be78ff0eSDarrick J. Wong 			 */
4416be78ff0eSDarrick J. Wong 			ASSERT(!((flags & XFS_BMAPI_CONVERT) &&
4417be78ff0eSDarrick J. Wong 			         (flags & XFS_BMAPI_COWFORK)));
4418be78ff0eSDarrick J. Wong 
4419d2b3964aSChristoph Hellwig 			need_alloc = true;
44206ebd5a44SChristoph Hellwig 		} else if (isnullstartblock(bma.got.br_startblock)) {
4421d2b3964aSChristoph Hellwig 			wasdelay = true;
4422d2b3964aSChristoph Hellwig 		}
4423f65306eaSDarrick J. Wong 
4424f65306eaSDarrick J. Wong 		/*
442530f712c9SDave Chinner 		 * First, deal with the hole before the allocated space
442630f712c9SDave Chinner 		 * that we found, if any.
442730f712c9SDave Chinner 		 */
442826b91c72SChristoph Hellwig 		if (need_alloc || wasdelay) {
442930f712c9SDave Chinner 			bma.eof = eof;
443030f712c9SDave Chinner 			bma.conv = !!(flags & XFS_BMAPI_CONVERT);
443130f712c9SDave Chinner 			bma.wasdel = wasdelay;
443230f712c9SDave Chinner 			bma.offset = bno;
443330f712c9SDave Chinner 			bma.flags = flags;
443430f712c9SDave Chinner 
443530f712c9SDave Chinner 			/*
443630f712c9SDave Chinner 			 * There's a 32/64 bit type mismatch between the
443730f712c9SDave Chinner 			 * allocation length request (which can be 64 bits in
443830f712c9SDave Chinner 			 * length) and the bma length request, which is
443930f712c9SDave Chinner 			 * xfs_extlen_t and therefore 32 bits. Hence we have to
444030f712c9SDave Chinner 			 * check for 32-bit overflows and handle them here.
444130f712c9SDave Chinner 			 */
444295f0b95eSChandan Babu R 			if (len > (xfs_filblks_t)XFS_MAX_BMBT_EXTLEN)
444395f0b95eSChandan Babu R 				bma.length = XFS_MAX_BMBT_EXTLEN;
444430f712c9SDave Chinner 			else
444530f712c9SDave Chinner 				bma.length = len;
444630f712c9SDave Chinner 
444730f712c9SDave Chinner 			ASSERT(len > 0);
444830f712c9SDave Chinner 			ASSERT(bma.length > 0);
444930f712c9SDave Chinner 			error = xfs_bmapi_allocate(&bma);
445030f712c9SDave Chinner 			if (error)
445130f712c9SDave Chinner 				goto error0;
445230f712c9SDave Chinner 			if (bma.blkno == NULLFSBLOCK)
445330f712c9SDave Chinner 				break;
4454174edb0eSDarrick J. Wong 
4455174edb0eSDarrick J. Wong 			/*
4456174edb0eSDarrick J. Wong 			 * If this is a CoW allocation, record the data in
4457174edb0eSDarrick J. Wong 			 * the refcount btree for orphan recovery.
4458174edb0eSDarrick J. Wong 			 */
445974b4c5d4SDarrick J. Wong 			if (whichfork == XFS_COW_FORK)
446074b4c5d4SDarrick J. Wong 				xfs_refcount_alloc_cow_extent(tp, bma.blkno,
446174b4c5d4SDarrick J. Wong 						bma.length);
446230f712c9SDave Chinner 		}
446330f712c9SDave Chinner 
446430f712c9SDave Chinner 		/* Deal with the allocated space we found.  */
446530f712c9SDave Chinner 		xfs_bmapi_trim_map(mval, &bma.got, &bno, len, obno,
446630f712c9SDave Chinner 							end, n, flags);
446730f712c9SDave Chinner 
446830f712c9SDave Chinner 		/* Execute unwritten extent conversion if necessary */
446930f712c9SDave Chinner 		error = xfs_bmapi_convert_unwritten(&bma, mval, len, flags);
44702451337dSDave Chinner 		if (error == -EAGAIN)
447130f712c9SDave Chinner 			continue;
447230f712c9SDave Chinner 		if (error)
447330f712c9SDave Chinner 			goto error0;
447430f712c9SDave Chinner 
447530f712c9SDave Chinner 		/* update the extent map to return */
447630f712c9SDave Chinner 		xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags);
447730f712c9SDave Chinner 
447830f712c9SDave Chinner 		/*
447930f712c9SDave Chinner 		 * If we're done, stop now.  Stop when we've allocated
448030f712c9SDave Chinner 		 * XFS_BMAP_MAX_NMAP extents no matter what.  Otherwise
448130f712c9SDave Chinner 		 * the transaction may get too big.
448230f712c9SDave Chinner 		 */
448330f712c9SDave Chinner 		if (bno >= end || n >= *nmap || bma.nallocs >= *nmap)
448430f712c9SDave Chinner 			break;
448530f712c9SDave Chinner 
448630f712c9SDave Chinner 		/* Else go on to the next record. */
448730f712c9SDave Chinner 		bma.prev = bma.got;
4488b2b1712aSChristoph Hellwig 		if (!xfs_iext_next_extent(ifp, &bma.icur, &bma.got))
44892d58f6efSChristoph Hellwig 			eof = true;
449030f712c9SDave Chinner 	}
449130f712c9SDave Chinner 	*nmap = n;
449230f712c9SDave Chinner 
4493b101e334SChristoph Hellwig 	error = xfs_bmap_btree_to_extents(tp, ip, bma.cur, &bma.logflags,
4494b101e334SChristoph Hellwig 			whichfork);
449530f712c9SDave Chinner 	if (error)
449630f712c9SDave Chinner 		goto error0;
449730f712c9SDave Chinner 
4498f7e67b20SChristoph Hellwig 	ASSERT(ifp->if_format != XFS_DINODE_FMT_BTREE ||
4499daf83964SChristoph Hellwig 	       ifp->if_nextents > XFS_IFORK_MAXEXT(ip, whichfork));
4500c8b54673SChristoph Hellwig 	xfs_bmapi_finish(&bma, whichfork, 0);
450130f712c9SDave Chinner 	xfs_bmap_validate_ret(orig_bno, orig_len, orig_flags, orig_mval,
450230f712c9SDave Chinner 		orig_nmap, *nmap);
4503c8b54673SChristoph Hellwig 	return 0;
4504c8b54673SChristoph Hellwig error0:
4505c8b54673SChristoph Hellwig 	xfs_bmapi_finish(&bma, whichfork, error);
450630f712c9SDave Chinner 	return error;
450730f712c9SDave Chinner }
450830f712c9SDave Chinner 
4509627209fbSBrian Foster /*
4510627209fbSBrian Foster  * Convert an existing delalloc extent to real blocks based on file offset. This
4511627209fbSBrian Foster  * attempts to allocate the entire delalloc extent and may require multiple
4512627209fbSBrian Foster  * invocations to allocate the target offset if a large enough physical extent
4513627209fbSBrian Foster  * is not available.
4514627209fbSBrian Foster  */
4515627209fbSBrian Foster int
4516627209fbSBrian Foster xfs_bmapi_convert_delalloc(
4517627209fbSBrian Foster 	struct xfs_inode	*ip,
4518627209fbSBrian Foster 	int			whichfork,
45194e087a3bSChristoph Hellwig 	xfs_off_t		offset,
45204e087a3bSChristoph Hellwig 	struct iomap		*iomap,
4521491ce61eSChristoph Hellwig 	unsigned int		*seq)
4522627209fbSBrian Foster {
4523732436efSDarrick J. Wong 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
4524491ce61eSChristoph Hellwig 	struct xfs_mount	*mp = ip->i_mount;
45254e087a3bSChristoph Hellwig 	xfs_fileoff_t		offset_fsb = XFS_B_TO_FSBT(mp, offset);
4526d8ae82e3SChristoph Hellwig 	struct xfs_bmalloca	bma = { NULL };
4527af952aebSDarrick J. Wong 	uint16_t		flags = 0;
4528491ce61eSChristoph Hellwig 	struct xfs_trans	*tp;
4529627209fbSBrian Foster 	int			error;
4530627209fbSBrian Foster 
45314e087a3bSChristoph Hellwig 	if (whichfork == XFS_COW_FORK)
45324e087a3bSChristoph Hellwig 		flags |= IOMAP_F_SHARED;
45334e087a3bSChristoph Hellwig 
4534491ce61eSChristoph Hellwig 	/*
4535491ce61eSChristoph Hellwig 	 * Space for the extent and indirect blocks was reserved when the
4536491ce61eSChristoph Hellwig 	 * delalloc extent was created so there's no need to do so here.
4537491ce61eSChristoph Hellwig 	 */
4538491ce61eSChristoph Hellwig 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0,
4539491ce61eSChristoph Hellwig 				XFS_TRANS_RESERVE, &tp);
4540491ce61eSChristoph Hellwig 	if (error)
4541491ce61eSChristoph Hellwig 		return error;
4542491ce61eSChristoph Hellwig 
4543491ce61eSChristoph Hellwig 	xfs_ilock(ip, XFS_ILOCK_EXCL);
45444f86bb4bSChandan Babu R 	xfs_trans_ijoin(tp, ip, 0);
4545727e1acdSChandan Babu R 
4546727e1acdSChandan Babu R 	error = xfs_iext_count_may_overflow(ip, whichfork,
4547727e1acdSChandan Babu R 			XFS_IEXT_ADD_NOSPLIT_CNT);
45484f86bb4bSChandan Babu R 	if (error == -EFBIG)
45494f86bb4bSChandan Babu R 		error = xfs_iext_count_upgrade(tp, ip,
45504f86bb4bSChandan Babu R 				XFS_IEXT_ADD_NOSPLIT_CNT);
4551727e1acdSChandan Babu R 	if (error)
4552727e1acdSChandan Babu R 		goto out_trans_cancel;
4553727e1acdSChandan Babu R 
4554d8ae82e3SChristoph Hellwig 	if (!xfs_iext_lookup_extent(ip, ifp, offset_fsb, &bma.icur, &bma.got) ||
4555d8ae82e3SChristoph Hellwig 	    bma.got.br_startoff > offset_fsb) {
4556d8ae82e3SChristoph Hellwig 		/*
4557d8ae82e3SChristoph Hellwig 		 * No extent found in the range we are trying to convert.  This
4558d8ae82e3SChristoph Hellwig 		 * should only happen for the COW fork, where another thread
4559d8ae82e3SChristoph Hellwig 		 * might have moved the extent to the data fork in the meantime.
4560d8ae82e3SChristoph Hellwig 		 */
4561d8ae82e3SChristoph Hellwig 		WARN_ON_ONCE(whichfork != XFS_COW_FORK);
4562491ce61eSChristoph Hellwig 		error = -EAGAIN;
4563491ce61eSChristoph Hellwig 		goto out_trans_cancel;
4564d8ae82e3SChristoph Hellwig 	}
4565627209fbSBrian Foster 
4566627209fbSBrian Foster 	/*
4567d8ae82e3SChristoph Hellwig 	 * If we find a real extent here we raced with another thread converting
4568d8ae82e3SChristoph Hellwig 	 * the extent.  Just return the real extent at this offset.
4569627209fbSBrian Foster 	 */
4570d8ae82e3SChristoph Hellwig 	if (!isnullstartblock(bma.got.br_startblock)) {
4571304a68b9SDave Chinner 		xfs_bmbt_to_iomap(ip, iomap, &bma.got, 0, flags,
4572304a68b9SDave Chinner 				xfs_iomap_inode_sequence(ip, flags));
4573491ce61eSChristoph Hellwig 		*seq = READ_ONCE(ifp->if_seq);
4574491ce61eSChristoph Hellwig 		goto out_trans_cancel;
4575d8ae82e3SChristoph Hellwig 	}
4576d8ae82e3SChristoph Hellwig 
4577d8ae82e3SChristoph Hellwig 	bma.tp = tp;
4578d8ae82e3SChristoph Hellwig 	bma.ip = ip;
4579d8ae82e3SChristoph Hellwig 	bma.wasdel = true;
4580d8ae82e3SChristoph Hellwig 	bma.offset = bma.got.br_startoff;
458195f0b95eSChandan Babu R 	bma.length = max_t(xfs_filblks_t, bma.got.br_blockcount,
458295f0b95eSChandan Babu R 			XFS_MAX_BMBT_EXTLEN);
4583d8ae82e3SChristoph Hellwig 	bma.minleft = xfs_bmapi_minleft(tp, ip, whichfork);
4584a5949d3fSDarrick J. Wong 
4585a5949d3fSDarrick J. Wong 	/*
4586a5949d3fSDarrick J. Wong 	 * When we're converting the delalloc reservations backing dirty pages
4587a5949d3fSDarrick J. Wong 	 * in the page cache, we must be careful about how we create the new
4588a5949d3fSDarrick J. Wong 	 * extents:
4589a5949d3fSDarrick J. Wong 	 *
4590a5949d3fSDarrick J. Wong 	 * New CoW fork extents are created unwritten, turned into real extents
4591a5949d3fSDarrick J. Wong 	 * when we're about to write the data to disk, and mapped into the data
4592a5949d3fSDarrick J. Wong 	 * fork after the write finishes.  End of story.
4593a5949d3fSDarrick J. Wong 	 *
4594a5949d3fSDarrick J. Wong 	 * New data fork extents must be mapped in as unwritten and converted
4595a5949d3fSDarrick J. Wong 	 * to real extents after the write succeeds to avoid exposing stale
4596a5949d3fSDarrick J. Wong 	 * disk contents if we crash.
4597a5949d3fSDarrick J. Wong 	 */
4598a5949d3fSDarrick J. Wong 	bma.flags = XFS_BMAPI_PREALLOC;
4599d8ae82e3SChristoph Hellwig 	if (whichfork == XFS_COW_FORK)
4600a5949d3fSDarrick J. Wong 		bma.flags |= XFS_BMAPI_COWFORK;
4601d8ae82e3SChristoph Hellwig 
4602d8ae82e3SChristoph Hellwig 	if (!xfs_iext_peek_prev_extent(ifp, &bma.icur, &bma.prev))
4603d8ae82e3SChristoph Hellwig 		bma.prev.br_startoff = NULLFILEOFF;
4604d8ae82e3SChristoph Hellwig 
4605d8ae82e3SChristoph Hellwig 	error = xfs_bmapi_allocate(&bma);
4606d8ae82e3SChristoph Hellwig 	if (error)
4607d8ae82e3SChristoph Hellwig 		goto out_finish;
4608d8ae82e3SChristoph Hellwig 
4609d8ae82e3SChristoph Hellwig 	error = -ENOSPC;
4610d8ae82e3SChristoph Hellwig 	if (WARN_ON_ONCE(bma.blkno == NULLFSBLOCK))
4611d8ae82e3SChristoph Hellwig 		goto out_finish;
4612627209fbSBrian Foster 	error = -EFSCORRUPTED;
4613eb77b23bSChristoph Hellwig 	if (WARN_ON_ONCE(!xfs_valid_startblock(ip, bma.got.br_startblock)))
4614d8ae82e3SChristoph Hellwig 		goto out_finish;
4615d8ae82e3SChristoph Hellwig 
4616125851acSChristoph Hellwig 	XFS_STATS_ADD(mp, xs_xstrat_bytes, XFS_FSB_TO_B(mp, bma.length));
4617125851acSChristoph Hellwig 	XFS_STATS_INC(mp, xs_xstrat_quick);
4618125851acSChristoph Hellwig 
4619d8ae82e3SChristoph Hellwig 	ASSERT(!isnullstartblock(bma.got.br_startblock));
4620304a68b9SDave Chinner 	xfs_bmbt_to_iomap(ip, iomap, &bma.got, 0, flags,
4621304a68b9SDave Chinner 				xfs_iomap_inode_sequence(ip, flags));
4622491ce61eSChristoph Hellwig 	*seq = READ_ONCE(ifp->if_seq);
4623d8ae82e3SChristoph Hellwig 
462474b4c5d4SDarrick J. Wong 	if (whichfork == XFS_COW_FORK)
462574b4c5d4SDarrick J. Wong 		xfs_refcount_alloc_cow_extent(tp, bma.blkno, bma.length);
4626d8ae82e3SChristoph Hellwig 
4627d8ae82e3SChristoph Hellwig 	error = xfs_bmap_btree_to_extents(tp, ip, bma.cur, &bma.logflags,
4628d8ae82e3SChristoph Hellwig 			whichfork);
4629491ce61eSChristoph Hellwig 	if (error)
4630491ce61eSChristoph Hellwig 		goto out_finish;
4631491ce61eSChristoph Hellwig 
4632491ce61eSChristoph Hellwig 	xfs_bmapi_finish(&bma, whichfork, 0);
4633491ce61eSChristoph Hellwig 	error = xfs_trans_commit(tp);
4634491ce61eSChristoph Hellwig 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
4635491ce61eSChristoph Hellwig 	return error;
4636491ce61eSChristoph Hellwig 
4637d8ae82e3SChristoph Hellwig out_finish:
4638d8ae82e3SChristoph Hellwig 	xfs_bmapi_finish(&bma, whichfork, error);
4639491ce61eSChristoph Hellwig out_trans_cancel:
4640491ce61eSChristoph Hellwig 	xfs_trans_cancel(tp);
4641491ce61eSChristoph Hellwig 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
4642627209fbSBrian Foster 	return error;
4643627209fbSBrian Foster }
4644627209fbSBrian Foster 
46457cf199baSDarrick J. Wong int
46466ebd5a44SChristoph Hellwig xfs_bmapi_remap(
46476ebd5a44SChristoph Hellwig 	struct xfs_trans	*tp,
46486ebd5a44SChristoph Hellwig 	struct xfs_inode	*ip,
46496ebd5a44SChristoph Hellwig 	xfs_fileoff_t		bno,
46506ebd5a44SChristoph Hellwig 	xfs_filblks_t		len,
46516ebd5a44SChristoph Hellwig 	xfs_fsblock_t		startblock,
4652e7d410acSDave Chinner 	uint32_t		flags)
46536ebd5a44SChristoph Hellwig {
46546ebd5a44SChristoph Hellwig 	struct xfs_mount	*mp = ip->i_mount;
46557cf199baSDarrick J. Wong 	struct xfs_ifork	*ifp;
46566ebd5a44SChristoph Hellwig 	struct xfs_btree_cur	*cur = NULL;
46576ebd5a44SChristoph Hellwig 	struct xfs_bmbt_irec	got;
4658b2b1712aSChristoph Hellwig 	struct xfs_iext_cursor	icur;
46597cf199baSDarrick J. Wong 	int			whichfork = xfs_bmapi_whichfork(flags);
46606ebd5a44SChristoph Hellwig 	int			logflags = 0, error;
46616ebd5a44SChristoph Hellwig 
4662732436efSDarrick J. Wong 	ifp = xfs_ifork_ptr(ip, whichfork);
46636ebd5a44SChristoph Hellwig 	ASSERT(len > 0);
466495f0b95eSChandan Babu R 	ASSERT(len <= (xfs_filblks_t)XFS_MAX_BMBT_EXTLEN);
46656ebd5a44SChristoph Hellwig 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
46667644bd98SDarrick J. Wong 	ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC |
46677644bd98SDarrick J. Wong 			   XFS_BMAPI_NORMAP)));
46687644bd98SDarrick J. Wong 	ASSERT((flags & (XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC)) !=
46697644bd98SDarrick J. Wong 			(XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC));
46706ebd5a44SChristoph Hellwig 
4671f7e67b20SChristoph Hellwig 	if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
4672a71895c5SDarrick J. Wong 	    XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
46736ebd5a44SChristoph Hellwig 		return -EFSCORRUPTED;
46746ebd5a44SChristoph Hellwig 	}
46756ebd5a44SChristoph Hellwig 
467675c8c50fSDave Chinner 	if (xfs_is_shutdown(mp))
46776ebd5a44SChristoph Hellwig 		return -EIO;
46786ebd5a44SChristoph Hellwig 
46797cf199baSDarrick J. Wong 	error = xfs_iread_extents(tp, ip, whichfork);
46806ebd5a44SChristoph Hellwig 	if (error)
46816ebd5a44SChristoph Hellwig 		return error;
46826ebd5a44SChristoph Hellwig 
4683b2b1712aSChristoph Hellwig 	if (xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) {
46846ebd5a44SChristoph Hellwig 		/* make sure we only reflink into a hole. */
46856ebd5a44SChristoph Hellwig 		ASSERT(got.br_startoff > bno);
46866ebd5a44SChristoph Hellwig 		ASSERT(got.br_startoff - bno >= len);
46876ebd5a44SChristoph Hellwig 	}
46886ebd5a44SChristoph Hellwig 
46896e73a545SChristoph Hellwig 	ip->i_nblocks += len;
4690bf8eadbaSChristoph Hellwig 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
46916ebd5a44SChristoph Hellwig 
4692ac1e0672SChristoph Hellwig 	if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
46937cf199baSDarrick J. Wong 		cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
469492219c29SDave Chinner 		cur->bc_ino.flags = 0;
46956ebd5a44SChristoph Hellwig 	}
46966ebd5a44SChristoph Hellwig 
46976ebd5a44SChristoph Hellwig 	got.br_startoff = bno;
46986ebd5a44SChristoph Hellwig 	got.br_startblock = startblock;
46996ebd5a44SChristoph Hellwig 	got.br_blockcount = len;
47007644bd98SDarrick J. Wong 	if (flags & XFS_BMAPI_PREALLOC)
47017644bd98SDarrick J. Wong 		got.br_state = XFS_EXT_UNWRITTEN;
47027644bd98SDarrick J. Wong 	else
47036ebd5a44SChristoph Hellwig 		got.br_state = XFS_EXT_NORM;
47046ebd5a44SChristoph Hellwig 
47057cf199baSDarrick J. Wong 	error = xfs_bmap_add_extent_hole_real(tp, ip, whichfork, &icur,
470692f9da30SBrian Foster 			&cur, &got, &logflags, flags);
47076ebd5a44SChristoph Hellwig 	if (error)
47086ebd5a44SChristoph Hellwig 		goto error0;
47096ebd5a44SChristoph Hellwig 
4710b101e334SChristoph Hellwig 	error = xfs_bmap_btree_to_extents(tp, ip, cur, &logflags, whichfork);
47116ebd5a44SChristoph Hellwig 
47126ebd5a44SChristoph Hellwig error0:
4713f7e67b20SChristoph Hellwig 	if (ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS)
47146ebd5a44SChristoph Hellwig 		logflags &= ~XFS_ILOG_DEXT;
4715f7e67b20SChristoph Hellwig 	else if (ip->i_df.if_format != XFS_DINODE_FMT_BTREE)
47166ebd5a44SChristoph Hellwig 		logflags &= ~XFS_ILOG_DBROOT;
47176ebd5a44SChristoph Hellwig 
47186ebd5a44SChristoph Hellwig 	if (logflags)
47196ebd5a44SChristoph Hellwig 		xfs_trans_log_inode(tp, ip, logflags);
47200b04b6b8SDarrick J. Wong 	if (cur)
47210b04b6b8SDarrick J. Wong 		xfs_btree_del_cursor(cur, error);
47226ebd5a44SChristoph Hellwig 	return error;
47236ebd5a44SChristoph Hellwig }
47246ebd5a44SChristoph Hellwig 
472530f712c9SDave Chinner /*
4726a9bd24acSBrian Foster  * When a delalloc extent is split (e.g., due to a hole punch), the original
4727a9bd24acSBrian Foster  * indlen reservation must be shared across the two new extents that are left
4728a9bd24acSBrian Foster  * behind.
4729a9bd24acSBrian Foster  *
4730a9bd24acSBrian Foster  * Given the original reservation and the worst case indlen for the two new
4731a9bd24acSBrian Foster  * extents (as calculated by xfs_bmap_worst_indlen()), split the original
4732d34999c9SBrian Foster  * reservation fairly across the two new extents. If necessary, steal available
4733d34999c9SBrian Foster  * blocks from a deleted extent to make up a reservation deficiency (e.g., if
4734d34999c9SBrian Foster  * ores == 1). The number of stolen blocks is returned. The availability and
4735d34999c9SBrian Foster  * subsequent accounting of stolen blocks is the responsibility of the caller.
4736a9bd24acSBrian Foster  */
4737d34999c9SBrian Foster static xfs_filblks_t
4738a9bd24acSBrian Foster xfs_bmap_split_indlen(
4739a9bd24acSBrian Foster 	xfs_filblks_t			ores,		/* original res. */
4740a9bd24acSBrian Foster 	xfs_filblks_t			*indlen1,	/* ext1 worst indlen */
4741d34999c9SBrian Foster 	xfs_filblks_t			*indlen2,	/* ext2 worst indlen */
4742d34999c9SBrian Foster 	xfs_filblks_t			avail)		/* stealable blocks */
4743a9bd24acSBrian Foster {
4744a9bd24acSBrian Foster 	xfs_filblks_t			len1 = *indlen1;
4745a9bd24acSBrian Foster 	xfs_filblks_t			len2 = *indlen2;
4746a9bd24acSBrian Foster 	xfs_filblks_t			nres = len1 + len2; /* new total res. */
4747d34999c9SBrian Foster 	xfs_filblks_t			stolen = 0;
474875d65361SBrian Foster 	xfs_filblks_t			resfactor;
4749a9bd24acSBrian Foster 
4750a9bd24acSBrian Foster 	/*
4751d34999c9SBrian Foster 	 * Steal as many blocks as we can to try and satisfy the worst case
4752d34999c9SBrian Foster 	 * indlen for both new extents.
4753d34999c9SBrian Foster 	 */
475475d65361SBrian Foster 	if (ores < nres && avail)
475575d65361SBrian Foster 		stolen = XFS_FILBLKS_MIN(nres - ores, avail);
475675d65361SBrian Foster 	ores += stolen;
475775d65361SBrian Foster 
475875d65361SBrian Foster 	 /* nothing else to do if we've satisfied the new reservation */
475975d65361SBrian Foster 	if (ores >= nres)
476075d65361SBrian Foster 		return stolen;
4761d34999c9SBrian Foster 
4762d34999c9SBrian Foster 	/*
476375d65361SBrian Foster 	 * We can't meet the total required reservation for the two extents.
476475d65361SBrian Foster 	 * Calculate the percent of the overall shortage between both extents
476575d65361SBrian Foster 	 * and apply this percentage to each of the requested indlen values.
476675d65361SBrian Foster 	 * This distributes the shortage fairly and reduces the chances that one
476775d65361SBrian Foster 	 * of the two extents is left with nothing when extents are repeatedly
476875d65361SBrian Foster 	 * split.
4769a9bd24acSBrian Foster 	 */
477075d65361SBrian Foster 	resfactor = (ores * 100);
477175d65361SBrian Foster 	do_div(resfactor, nres);
477275d65361SBrian Foster 	len1 *= resfactor;
477375d65361SBrian Foster 	do_div(len1, 100);
477475d65361SBrian Foster 	len2 *= resfactor;
477575d65361SBrian Foster 	do_div(len2, 100);
477675d65361SBrian Foster 	ASSERT(len1 + len2 <= ores);
477775d65361SBrian Foster 	ASSERT(len1 < *indlen1 && len2 < *indlen2);
477875d65361SBrian Foster 
477975d65361SBrian Foster 	/*
478075d65361SBrian Foster 	 * Hand out the remainder to each extent. If one of the two reservations
478175d65361SBrian Foster 	 * is zero, we want to make sure that one gets a block first. The loop
478275d65361SBrian Foster 	 * below starts with len1, so hand len2 a block right off the bat if it
478375d65361SBrian Foster 	 * is zero.
478475d65361SBrian Foster 	 */
478575d65361SBrian Foster 	ores -= (len1 + len2);
478675d65361SBrian Foster 	ASSERT((*indlen1 - len1) + (*indlen2 - len2) >= ores);
478775d65361SBrian Foster 	if (ores && !len2 && *indlen2) {
478875d65361SBrian Foster 		len2++;
478975d65361SBrian Foster 		ores--;
4790a9bd24acSBrian Foster 	}
479175d65361SBrian Foster 	while (ores) {
479275d65361SBrian Foster 		if (len1 < *indlen1) {
479375d65361SBrian Foster 			len1++;
479475d65361SBrian Foster 			ores--;
479575d65361SBrian Foster 		}
479675d65361SBrian Foster 		if (!ores)
4797a9bd24acSBrian Foster 			break;
479875d65361SBrian Foster 		if (len2 < *indlen2) {
479975d65361SBrian Foster 			len2++;
480075d65361SBrian Foster 			ores--;
4801a9bd24acSBrian Foster 		}
4802a9bd24acSBrian Foster 	}
4803a9bd24acSBrian Foster 
4804a9bd24acSBrian Foster 	*indlen1 = len1;
4805a9bd24acSBrian Foster 	*indlen2 = len2;
4806d34999c9SBrian Foster 
4807d34999c9SBrian Foster 	return stolen;
4808a9bd24acSBrian Foster }
4809a9bd24acSBrian Foster 
4810fa5c836cSChristoph Hellwig int
4811fa5c836cSChristoph Hellwig xfs_bmap_del_extent_delay(
4812fa5c836cSChristoph Hellwig 	struct xfs_inode	*ip,
4813fa5c836cSChristoph Hellwig 	int			whichfork,
4814b2b1712aSChristoph Hellwig 	struct xfs_iext_cursor	*icur,
4815fa5c836cSChristoph Hellwig 	struct xfs_bmbt_irec	*got,
4816fa5c836cSChristoph Hellwig 	struct xfs_bmbt_irec	*del)
4817fa5c836cSChristoph Hellwig {
4818fa5c836cSChristoph Hellwig 	struct xfs_mount	*mp = ip->i_mount;
4819732436efSDarrick J. Wong 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
4820fa5c836cSChristoph Hellwig 	struct xfs_bmbt_irec	new;
4821fa5c836cSChristoph Hellwig 	int64_t			da_old, da_new, da_diff = 0;
4822fa5c836cSChristoph Hellwig 	xfs_fileoff_t		del_endoff, got_endoff;
4823fa5c836cSChristoph Hellwig 	xfs_filblks_t		got_indlen, new_indlen, stolen;
48240e5b8e45SDave Chinner 	uint32_t		state = xfs_bmap_fork_to_state(whichfork);
4825060ea65bSChristoph Hellwig 	int			error = 0;
4826fa5c836cSChristoph Hellwig 	bool			isrt;
4827fa5c836cSChristoph Hellwig 
4828fa5c836cSChristoph Hellwig 	XFS_STATS_INC(mp, xs_del_exlist);
4829fa5c836cSChristoph Hellwig 
4830fa5c836cSChristoph Hellwig 	isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
4831fa5c836cSChristoph Hellwig 	del_endoff = del->br_startoff + del->br_blockcount;
4832fa5c836cSChristoph Hellwig 	got_endoff = got->br_startoff + got->br_blockcount;
4833fa5c836cSChristoph Hellwig 	da_old = startblockval(got->br_startblock);
4834fa5c836cSChristoph Hellwig 	da_new = 0;
4835fa5c836cSChristoph Hellwig 
4836fa5c836cSChristoph Hellwig 	ASSERT(del->br_blockcount > 0);
4837fa5c836cSChristoph Hellwig 	ASSERT(got->br_startoff <= del->br_startoff);
4838fa5c836cSChristoph Hellwig 	ASSERT(got_endoff >= del_endoff);
4839fa5c836cSChristoph Hellwig 
4840fa5c836cSChristoph Hellwig 	if (isrt) {
48414f1adf33SEric Sandeen 		uint64_t rtexts = XFS_FSB_TO_B(mp, del->br_blockcount);
4842fa5c836cSChristoph Hellwig 
4843fa5c836cSChristoph Hellwig 		do_div(rtexts, mp->m_sb.sb_rextsize);
4844fa5c836cSChristoph Hellwig 		xfs_mod_frextents(mp, rtexts);
4845fa5c836cSChristoph Hellwig 	}
4846fa5c836cSChristoph Hellwig 
4847fa5c836cSChristoph Hellwig 	/*
4848fa5c836cSChristoph Hellwig 	 * Update the inode delalloc counter now and wait to update the
4849fa5c836cSChristoph Hellwig 	 * sb counters as we might have to borrow some blocks for the
4850fa5c836cSChristoph Hellwig 	 * indirect block accounting.
4851fa5c836cSChristoph Hellwig 	 */
485285546500SDarrick J. Wong 	ASSERT(!isrt);
485385546500SDarrick J. Wong 	error = xfs_quota_unreserve_blkres(ip, del->br_blockcount);
48544fd29ec4SDarrick J. Wong 	if (error)
48554fd29ec4SDarrick J. Wong 		return error;
4856fa5c836cSChristoph Hellwig 	ip->i_delayed_blks -= del->br_blockcount;
4857fa5c836cSChristoph Hellwig 
4858fa5c836cSChristoph Hellwig 	if (got->br_startoff == del->br_startoff)
48590173c689SChristoph Hellwig 		state |= BMAP_LEFT_FILLING;
4860fa5c836cSChristoph Hellwig 	if (got_endoff == del_endoff)
48610173c689SChristoph Hellwig 		state |= BMAP_RIGHT_FILLING;
4862fa5c836cSChristoph Hellwig 
48630173c689SChristoph Hellwig 	switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) {
48640173c689SChristoph Hellwig 	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
4865fa5c836cSChristoph Hellwig 		/*
4866fa5c836cSChristoph Hellwig 		 * Matches the whole extent.  Delete the entry.
4867fa5c836cSChristoph Hellwig 		 */
4868c38ccf59SChristoph Hellwig 		xfs_iext_remove(ip, icur, state);
4869b2b1712aSChristoph Hellwig 		xfs_iext_prev(ifp, icur);
4870fa5c836cSChristoph Hellwig 		break;
48710173c689SChristoph Hellwig 	case BMAP_LEFT_FILLING:
4872fa5c836cSChristoph Hellwig 		/*
4873fa5c836cSChristoph Hellwig 		 * Deleting the first part of the extent.
4874fa5c836cSChristoph Hellwig 		 */
4875fa5c836cSChristoph Hellwig 		got->br_startoff = del_endoff;
4876fa5c836cSChristoph Hellwig 		got->br_blockcount -= del->br_blockcount;
4877fa5c836cSChristoph Hellwig 		da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip,
4878fa5c836cSChristoph Hellwig 				got->br_blockcount), da_old);
4879fa5c836cSChristoph Hellwig 		got->br_startblock = nullstartblock((int)da_new);
4880b2b1712aSChristoph Hellwig 		xfs_iext_update_extent(ip, state, icur, got);
4881fa5c836cSChristoph Hellwig 		break;
48820173c689SChristoph Hellwig 	case BMAP_RIGHT_FILLING:
4883fa5c836cSChristoph Hellwig 		/*
4884fa5c836cSChristoph Hellwig 		 * Deleting the last part of the extent.
4885fa5c836cSChristoph Hellwig 		 */
4886fa5c836cSChristoph Hellwig 		got->br_blockcount = got->br_blockcount - del->br_blockcount;
4887fa5c836cSChristoph Hellwig 		da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip,
4888fa5c836cSChristoph Hellwig 				got->br_blockcount), da_old);
4889fa5c836cSChristoph Hellwig 		got->br_startblock = nullstartblock((int)da_new);
4890b2b1712aSChristoph Hellwig 		xfs_iext_update_extent(ip, state, icur, got);
4891fa5c836cSChristoph Hellwig 		break;
4892fa5c836cSChristoph Hellwig 	case 0:
4893fa5c836cSChristoph Hellwig 		/*
4894fa5c836cSChristoph Hellwig 		 * Deleting the middle of the extent.
4895fa5c836cSChristoph Hellwig 		 *
4896fa5c836cSChristoph Hellwig 		 * Distribute the original indlen reservation across the two new
4897fa5c836cSChristoph Hellwig 		 * extents.  Steal blocks from the deleted extent if necessary.
4898fa5c836cSChristoph Hellwig 		 * Stealing blocks simply fudges the fdblocks accounting below.
4899fa5c836cSChristoph Hellwig 		 * Warn if either of the new indlen reservations is zero as this
4900fa5c836cSChristoph Hellwig 		 * can lead to delalloc problems.
4901fa5c836cSChristoph Hellwig 		 */
4902fa5c836cSChristoph Hellwig 		got->br_blockcount = del->br_startoff - got->br_startoff;
4903fa5c836cSChristoph Hellwig 		got_indlen = xfs_bmap_worst_indlen(ip, got->br_blockcount);
4904fa5c836cSChristoph Hellwig 
4905fa5c836cSChristoph Hellwig 		new.br_blockcount = got_endoff - del_endoff;
4906fa5c836cSChristoph Hellwig 		new_indlen = xfs_bmap_worst_indlen(ip, new.br_blockcount);
4907fa5c836cSChristoph Hellwig 
4908fa5c836cSChristoph Hellwig 		WARN_ON_ONCE(!got_indlen || !new_indlen);
4909fa5c836cSChristoph Hellwig 		stolen = xfs_bmap_split_indlen(da_old, &got_indlen, &new_indlen,
4910fa5c836cSChristoph Hellwig 						       del->br_blockcount);
4911fa5c836cSChristoph Hellwig 
4912fa5c836cSChristoph Hellwig 		got->br_startblock = nullstartblock((int)got_indlen);
4913fa5c836cSChristoph Hellwig 
4914fa5c836cSChristoph Hellwig 		new.br_startoff = del_endoff;
4915fa5c836cSChristoph Hellwig 		new.br_state = got->br_state;
4916fa5c836cSChristoph Hellwig 		new.br_startblock = nullstartblock((int)new_indlen);
4917fa5c836cSChristoph Hellwig 
4918b2b1712aSChristoph Hellwig 		xfs_iext_update_extent(ip, state, icur, got);
4919b2b1712aSChristoph Hellwig 		xfs_iext_next(ifp, icur);
49200254c2f2SChristoph Hellwig 		xfs_iext_insert(ip, icur, &new, state);
4921fa5c836cSChristoph Hellwig 
4922fa5c836cSChristoph Hellwig 		da_new = got_indlen + new_indlen - stolen;
4923fa5c836cSChristoph Hellwig 		del->br_blockcount -= stolen;
4924fa5c836cSChristoph Hellwig 		break;
4925fa5c836cSChristoph Hellwig 	}
4926fa5c836cSChristoph Hellwig 
4927fa5c836cSChristoph Hellwig 	ASSERT(da_old >= da_new);
4928fa5c836cSChristoph Hellwig 	da_diff = da_old - da_new;
4929fa5c836cSChristoph Hellwig 	if (!isrt)
4930fa5c836cSChristoph Hellwig 		da_diff += del->br_blockcount;
49319fe82b8cSDarrick J. Wong 	if (da_diff) {
4932fa5c836cSChristoph Hellwig 		xfs_mod_fdblocks(mp, da_diff, false);
49339fe82b8cSDarrick J. Wong 		xfs_mod_delalloc(mp, -da_diff);
49349fe82b8cSDarrick J. Wong 	}
4935fa5c836cSChristoph Hellwig 	return error;
4936fa5c836cSChristoph Hellwig }
4937fa5c836cSChristoph Hellwig 
4938fa5c836cSChristoph Hellwig void
4939fa5c836cSChristoph Hellwig xfs_bmap_del_extent_cow(
4940fa5c836cSChristoph Hellwig 	struct xfs_inode	*ip,
4941b2b1712aSChristoph Hellwig 	struct xfs_iext_cursor	*icur,
4942fa5c836cSChristoph Hellwig 	struct xfs_bmbt_irec	*got,
4943fa5c836cSChristoph Hellwig 	struct xfs_bmbt_irec	*del)
4944fa5c836cSChristoph Hellwig {
4945fa5c836cSChristoph Hellwig 	struct xfs_mount	*mp = ip->i_mount;
4946732436efSDarrick J. Wong 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, XFS_COW_FORK);
4947fa5c836cSChristoph Hellwig 	struct xfs_bmbt_irec	new;
4948fa5c836cSChristoph Hellwig 	xfs_fileoff_t		del_endoff, got_endoff;
49490e5b8e45SDave Chinner 	uint32_t		state = BMAP_COWFORK;
4950fa5c836cSChristoph Hellwig 
4951fa5c836cSChristoph Hellwig 	XFS_STATS_INC(mp, xs_del_exlist);
4952fa5c836cSChristoph Hellwig 
4953fa5c836cSChristoph Hellwig 	del_endoff = del->br_startoff + del->br_blockcount;
4954fa5c836cSChristoph Hellwig 	got_endoff = got->br_startoff + got->br_blockcount;
4955fa5c836cSChristoph Hellwig 
4956fa5c836cSChristoph Hellwig 	ASSERT(del->br_blockcount > 0);
4957fa5c836cSChristoph Hellwig 	ASSERT(got->br_startoff <= del->br_startoff);
4958fa5c836cSChristoph Hellwig 	ASSERT(got_endoff >= del_endoff);
4959fa5c836cSChristoph Hellwig 	ASSERT(!isnullstartblock(got->br_startblock));
4960fa5c836cSChristoph Hellwig 
4961fa5c836cSChristoph Hellwig 	if (got->br_startoff == del->br_startoff)
49620173c689SChristoph Hellwig 		state |= BMAP_LEFT_FILLING;
4963fa5c836cSChristoph Hellwig 	if (got_endoff == del_endoff)
49640173c689SChristoph Hellwig 		state |= BMAP_RIGHT_FILLING;
4965fa5c836cSChristoph Hellwig 
49660173c689SChristoph Hellwig 	switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) {
49670173c689SChristoph Hellwig 	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
4968fa5c836cSChristoph Hellwig 		/*
4969fa5c836cSChristoph Hellwig 		 * Matches the whole extent.  Delete the entry.
4970fa5c836cSChristoph Hellwig 		 */
4971c38ccf59SChristoph Hellwig 		xfs_iext_remove(ip, icur, state);
4972b2b1712aSChristoph Hellwig 		xfs_iext_prev(ifp, icur);
4973fa5c836cSChristoph Hellwig 		break;
49740173c689SChristoph Hellwig 	case BMAP_LEFT_FILLING:
4975fa5c836cSChristoph Hellwig 		/*
4976fa5c836cSChristoph Hellwig 		 * Deleting the first part of the extent.
4977fa5c836cSChristoph Hellwig 		 */
4978fa5c836cSChristoph Hellwig 		got->br_startoff = del_endoff;
4979fa5c836cSChristoph Hellwig 		got->br_blockcount -= del->br_blockcount;
4980fa5c836cSChristoph Hellwig 		got->br_startblock = del->br_startblock + del->br_blockcount;
4981b2b1712aSChristoph Hellwig 		xfs_iext_update_extent(ip, state, icur, got);
4982fa5c836cSChristoph Hellwig 		break;
49830173c689SChristoph Hellwig 	case BMAP_RIGHT_FILLING:
4984fa5c836cSChristoph Hellwig 		/*
4985fa5c836cSChristoph Hellwig 		 * Deleting the last part of the extent.
4986fa5c836cSChristoph Hellwig 		 */
4987fa5c836cSChristoph Hellwig 		got->br_blockcount -= del->br_blockcount;
4988b2b1712aSChristoph Hellwig 		xfs_iext_update_extent(ip, state, icur, got);
4989fa5c836cSChristoph Hellwig 		break;
4990fa5c836cSChristoph Hellwig 	case 0:
4991fa5c836cSChristoph Hellwig 		/*
4992fa5c836cSChristoph Hellwig 		 * Deleting the middle of the extent.
4993fa5c836cSChristoph Hellwig 		 */
4994fa5c836cSChristoph Hellwig 		got->br_blockcount = del->br_startoff - got->br_startoff;
4995fa5c836cSChristoph Hellwig 
4996fa5c836cSChristoph Hellwig 		new.br_startoff = del_endoff;
4997fa5c836cSChristoph Hellwig 		new.br_blockcount = got_endoff - del_endoff;
4998fa5c836cSChristoph Hellwig 		new.br_state = got->br_state;
4999fa5c836cSChristoph Hellwig 		new.br_startblock = del->br_startblock + del->br_blockcount;
5000fa5c836cSChristoph Hellwig 
5001b2b1712aSChristoph Hellwig 		xfs_iext_update_extent(ip, state, icur, got);
5002b2b1712aSChristoph Hellwig 		xfs_iext_next(ifp, icur);
50030254c2f2SChristoph Hellwig 		xfs_iext_insert(ip, icur, &new, state);
5004fa5c836cSChristoph Hellwig 		break;
5005fa5c836cSChristoph Hellwig 	}
50064b4c1326SDarrick J. Wong 	ip->i_delayed_blks -= del->br_blockcount;
5007fa5c836cSChristoph Hellwig }
5008fa5c836cSChristoph Hellwig 
5009a9bd24acSBrian Foster /*
501030f712c9SDave Chinner  * Called by xfs_bmapi to update file extent records and the btree
5011e1d7553fSChristoph Hellwig  * after removing space.
501230f712c9SDave Chinner  */
501330f712c9SDave Chinner STATIC int				/* error */
5014e1d7553fSChristoph Hellwig xfs_bmap_del_extent_real(
501530f712c9SDave Chinner 	xfs_inode_t		*ip,	/* incore inode pointer */
501630f712c9SDave Chinner 	xfs_trans_t		*tp,	/* current transaction pointer */
5017b2b1712aSChristoph Hellwig 	struct xfs_iext_cursor	*icur,
5018ae127f08SDarrick J. Wong 	struct xfs_btree_cur	*cur,	/* if null, not a btree */
501930f712c9SDave Chinner 	xfs_bmbt_irec_t		*del,	/* data to remove from extents */
502030f712c9SDave Chinner 	int			*logflagsp, /* inode logging flags */
50214847acf8SDarrick J. Wong 	int			whichfork, /* data or attr fork */
5022e7d410acSDave Chinner 	uint32_t		bflags)	/* bmapi flags */
502330f712c9SDave Chinner {
502430f712c9SDave Chinner 	xfs_fsblock_t		del_endblock=0;	/* first block past del */
502530f712c9SDave Chinner 	xfs_fileoff_t		del_endoff;	/* first offset past del */
502630f712c9SDave Chinner 	int			do_fx;	/* free extent at end of routine */
502730f712c9SDave Chinner 	int			error;	/* error return value */
50281b24b633SChristoph Hellwig 	int			flags = 0;/* inode logging flags */
502948fd52b1SChristoph Hellwig 	struct xfs_bmbt_irec	got;	/* current extent entry */
503030f712c9SDave Chinner 	xfs_fileoff_t		got_endoff;	/* first offset past got */
503130f712c9SDave Chinner 	int			i;	/* temp state */
50323ba738dfSChristoph Hellwig 	struct xfs_ifork	*ifp;	/* inode fork pointer */
503330f712c9SDave Chinner 	xfs_mount_t		*mp;	/* mount structure */
503430f712c9SDave Chinner 	xfs_filblks_t		nblks;	/* quota/sb block count */
503530f712c9SDave Chinner 	xfs_bmbt_irec_t		new;	/* new record to be inserted */
503630f712c9SDave Chinner 	/* REFERENCED */
503730f712c9SDave Chinner 	uint			qfield;	/* quota field to update */
50380e5b8e45SDave Chinner 	uint32_t		state = xfs_bmap_fork_to_state(whichfork);
503948fd52b1SChristoph Hellwig 	struct xfs_bmbt_irec	old;
504030f712c9SDave Chinner 
5041ff6d6af2SBill O'Donnell 	mp = ip->i_mount;
5042ff6d6af2SBill O'Donnell 	XFS_STATS_INC(mp, xs_del_exlist);
504330f712c9SDave Chinner 
5044732436efSDarrick J. Wong 	ifp = xfs_ifork_ptr(ip, whichfork);
504530f712c9SDave Chinner 	ASSERT(del->br_blockcount > 0);
5046b2b1712aSChristoph Hellwig 	xfs_iext_get_extent(ifp, icur, &got);
504730f712c9SDave Chinner 	ASSERT(got.br_startoff <= del->br_startoff);
504830f712c9SDave Chinner 	del_endoff = del->br_startoff + del->br_blockcount;
504930f712c9SDave Chinner 	got_endoff = got.br_startoff + got.br_blockcount;
505030f712c9SDave Chinner 	ASSERT(got_endoff >= del_endoff);
5051e1d7553fSChristoph Hellwig 	ASSERT(!isnullstartblock(got.br_startblock));
505230f712c9SDave Chinner 	qfield = 0;
505330f712c9SDave Chinner 	error = 0;
5054e1d7553fSChristoph Hellwig 
50551b24b633SChristoph Hellwig 	/*
50561b24b633SChristoph Hellwig 	 * If it's the case where the directory code is running with no block
50571b24b633SChristoph Hellwig 	 * reservation, and the deleted block is in the middle of its extent,
50581b24b633SChristoph Hellwig 	 * and the resulting insert of an extent would cause transformation to
50591b24b633SChristoph Hellwig 	 * btree format, then reject it.  The calling code will then swap blocks
50601b24b633SChristoph Hellwig 	 * around instead.  We have to do this now, rather than waiting for the
50611b24b633SChristoph Hellwig 	 * conversion to btree format, since the transaction will be dirty then.
50621b24b633SChristoph Hellwig 	 */
50631b24b633SChristoph Hellwig 	if (tp->t_blk_res == 0 &&
5064f7e67b20SChristoph Hellwig 	    ifp->if_format == XFS_DINODE_FMT_EXTENTS &&
5065daf83964SChristoph Hellwig 	    ifp->if_nextents >= XFS_IFORK_MAXEXT(ip, whichfork) &&
50661b24b633SChristoph Hellwig 	    del->br_startoff > got.br_startoff && del_endoff < got_endoff)
50671b24b633SChristoph Hellwig 		return -ENOSPC;
50681b24b633SChristoph Hellwig 
50691b24b633SChristoph Hellwig 	flags = XFS_ILOG_CORE;
507030f712c9SDave Chinner 	if (whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip)) {
507130f712c9SDave Chinner 		xfs_filblks_t	len;
50720703a8e1SDave Chinner 		xfs_extlen_t	mod;
507330f712c9SDave Chinner 
50740703a8e1SDave Chinner 		len = div_u64_rem(del->br_blockcount, mp->m_sb.sb_rextsize,
50750703a8e1SDave Chinner 				  &mod);
50760703a8e1SDave Chinner 		ASSERT(mod == 0);
50770703a8e1SDave Chinner 
50788df0fa39SDarrick J. Wong 		if (!(bflags & XFS_BMAPI_REMAP)) {
50798df0fa39SDarrick J. Wong 			xfs_fsblock_t	bno;
50808df0fa39SDarrick J. Wong 
50818df0fa39SDarrick J. Wong 			bno = div_u64_rem(del->br_startblock,
50828df0fa39SDarrick J. Wong 					mp->m_sb.sb_rextsize, &mod);
50838df0fa39SDarrick J. Wong 			ASSERT(mod == 0);
50848df0fa39SDarrick J. Wong 
508530f712c9SDave Chinner 			error = xfs_rtfree_extent(tp, bno, (xfs_extlen_t)len);
508630f712c9SDave Chinner 			if (error)
508730f712c9SDave Chinner 				goto done;
50888df0fa39SDarrick J. Wong 		}
50898df0fa39SDarrick J. Wong 
509030f712c9SDave Chinner 		do_fx = 0;
509130f712c9SDave Chinner 		nblks = len * mp->m_sb.sb_rextsize;
509230f712c9SDave Chinner 		qfield = XFS_TRANS_DQ_RTBCOUNT;
5093e1d7553fSChristoph Hellwig 	} else {
509430f712c9SDave Chinner 		do_fx = 1;
509530f712c9SDave Chinner 		nblks = del->br_blockcount;
509630f712c9SDave Chinner 		qfield = XFS_TRANS_DQ_BCOUNT;
509730f712c9SDave Chinner 	}
5098e1d7553fSChristoph Hellwig 
509930f712c9SDave Chinner 	del_endblock = del->br_startblock + del->br_blockcount;
510030f712c9SDave Chinner 	if (cur) {
5101e16cf9b0SChristoph Hellwig 		error = xfs_bmbt_lookup_eq(cur, &got, &i);
5102e1d7553fSChristoph Hellwig 		if (error)
510330f712c9SDave Chinner 			goto done;
5104f9e03706SDarrick J. Wong 		if (XFS_IS_CORRUPT(mp, i != 1)) {
5105f9e03706SDarrick J. Wong 			error = -EFSCORRUPTED;
5106f9e03706SDarrick J. Wong 			goto done;
5107f9e03706SDarrick J. Wong 		}
510830f712c9SDave Chinner 	}
5109340785ccSDarrick J. Wong 
5110491f6f8aSChristoph Hellwig 	if (got.br_startoff == del->br_startoff)
5111491f6f8aSChristoph Hellwig 		state |= BMAP_LEFT_FILLING;
5112491f6f8aSChristoph Hellwig 	if (got_endoff == del_endoff)
5113491f6f8aSChristoph Hellwig 		state |= BMAP_RIGHT_FILLING;
5114491f6f8aSChristoph Hellwig 
5115491f6f8aSChristoph Hellwig 	switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) {
5116491f6f8aSChristoph Hellwig 	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
511730f712c9SDave Chinner 		/*
511830f712c9SDave Chinner 		 * Matches the whole extent.  Delete the entry.
511930f712c9SDave Chinner 		 */
5120c38ccf59SChristoph Hellwig 		xfs_iext_remove(ip, icur, state);
5121b2b1712aSChristoph Hellwig 		xfs_iext_prev(ifp, icur);
5122daf83964SChristoph Hellwig 		ifp->if_nextents--;
5123daf83964SChristoph Hellwig 
512430f712c9SDave Chinner 		flags |= XFS_ILOG_CORE;
512530f712c9SDave Chinner 		if (!cur) {
512630f712c9SDave Chinner 			flags |= xfs_ilog_fext(whichfork);
512730f712c9SDave Chinner 			break;
512830f712c9SDave Chinner 		}
512930f712c9SDave Chinner 		if ((error = xfs_btree_delete(cur, &i)))
513030f712c9SDave Chinner 			goto done;
5131f9e03706SDarrick J. Wong 		if (XFS_IS_CORRUPT(mp, i != 1)) {
5132f9e03706SDarrick J. Wong 			error = -EFSCORRUPTED;
5133f9e03706SDarrick J. Wong 			goto done;
5134f9e03706SDarrick J. Wong 		}
513530f712c9SDave Chinner 		break;
5136491f6f8aSChristoph Hellwig 	case BMAP_LEFT_FILLING:
513730f712c9SDave Chinner 		/*
513830f712c9SDave Chinner 		 * Deleting the first part of the extent.
513930f712c9SDave Chinner 		 */
514048fd52b1SChristoph Hellwig 		got.br_startoff = del_endoff;
514148fd52b1SChristoph Hellwig 		got.br_startblock = del_endblock;
514248fd52b1SChristoph Hellwig 		got.br_blockcount -= del->br_blockcount;
5143b2b1712aSChristoph Hellwig 		xfs_iext_update_extent(ip, state, icur, &got);
514430f712c9SDave Chinner 		if (!cur) {
514530f712c9SDave Chinner 			flags |= xfs_ilog_fext(whichfork);
514630f712c9SDave Chinner 			break;
514730f712c9SDave Chinner 		}
5148a67d00a5SChristoph Hellwig 		error = xfs_bmbt_update(cur, &got);
514948fd52b1SChristoph Hellwig 		if (error)
515030f712c9SDave Chinner 			goto done;
515130f712c9SDave Chinner 		break;
5152491f6f8aSChristoph Hellwig 	case BMAP_RIGHT_FILLING:
515330f712c9SDave Chinner 		/*
515430f712c9SDave Chinner 		 * Deleting the last part of the extent.
515530f712c9SDave Chinner 		 */
515648fd52b1SChristoph Hellwig 		got.br_blockcount -= del->br_blockcount;
5157b2b1712aSChristoph Hellwig 		xfs_iext_update_extent(ip, state, icur, &got);
515830f712c9SDave Chinner 		if (!cur) {
515930f712c9SDave Chinner 			flags |= xfs_ilog_fext(whichfork);
516030f712c9SDave Chinner 			break;
516130f712c9SDave Chinner 		}
5162a67d00a5SChristoph Hellwig 		error = xfs_bmbt_update(cur, &got);
516348fd52b1SChristoph Hellwig 		if (error)
516430f712c9SDave Chinner 			goto done;
516530f712c9SDave Chinner 		break;
516630f712c9SDave Chinner 	case 0:
516730f712c9SDave Chinner 		/*
516830f712c9SDave Chinner 		 * Deleting the middle of the extent.
516930f712c9SDave Chinner 		 */
51700dbc5cb1SChandan Babu R 
517148fd52b1SChristoph Hellwig 		old = got;
5172ca5d8e5bSChristoph Hellwig 
517348fd52b1SChristoph Hellwig 		got.br_blockcount = del->br_startoff - got.br_startoff;
5174b2b1712aSChristoph Hellwig 		xfs_iext_update_extent(ip, state, icur, &got);
517548fd52b1SChristoph Hellwig 
517630f712c9SDave Chinner 		new.br_startoff = del_endoff;
517748fd52b1SChristoph Hellwig 		new.br_blockcount = got_endoff - del_endoff;
517830f712c9SDave Chinner 		new.br_state = got.br_state;
517930f712c9SDave Chinner 		new.br_startblock = del_endblock;
518048fd52b1SChristoph Hellwig 
518130f712c9SDave Chinner 		flags |= XFS_ILOG_CORE;
518230f712c9SDave Chinner 		if (cur) {
5183a67d00a5SChristoph Hellwig 			error = xfs_bmbt_update(cur, &got);
5184e1d7553fSChristoph Hellwig 			if (error)
518530f712c9SDave Chinner 				goto done;
5186e1d7553fSChristoph Hellwig 			error = xfs_btree_increment(cur, 0, &i);
5187e1d7553fSChristoph Hellwig 			if (error)
518830f712c9SDave Chinner 				goto done;
518930f712c9SDave Chinner 			cur->bc_rec.b = new;
519030f712c9SDave Chinner 			error = xfs_btree_insert(cur, &i);
51912451337dSDave Chinner 			if (error && error != -ENOSPC)
519230f712c9SDave Chinner 				goto done;
519330f712c9SDave Chinner 			/*
5194e1d7553fSChristoph Hellwig 			 * If get no-space back from btree insert, it tried a
5195e1d7553fSChristoph Hellwig 			 * split, and we have a zero block reservation.  Fix up
5196e1d7553fSChristoph Hellwig 			 * our state and return the error.
519730f712c9SDave Chinner 			 */
51982451337dSDave Chinner 			if (error == -ENOSPC) {
519930f712c9SDave Chinner 				/*
5200e1d7553fSChristoph Hellwig 				 * Reset the cursor, don't trust it after any
5201e1d7553fSChristoph Hellwig 				 * insert operation.
520230f712c9SDave Chinner 				 */
5203e16cf9b0SChristoph Hellwig 				error = xfs_bmbt_lookup_eq(cur, &got, &i);
5204e1d7553fSChristoph Hellwig 				if (error)
520530f712c9SDave Chinner 					goto done;
5206f9e03706SDarrick J. Wong 				if (XFS_IS_CORRUPT(mp, i != 1)) {
5207f9e03706SDarrick J. Wong 					error = -EFSCORRUPTED;
5208f9e03706SDarrick J. Wong 					goto done;
5209f9e03706SDarrick J. Wong 				}
521030f712c9SDave Chinner 				/*
521130f712c9SDave Chinner 				 * Update the btree record back
521230f712c9SDave Chinner 				 * to the original value.
521330f712c9SDave Chinner 				 */
5214a67d00a5SChristoph Hellwig 				error = xfs_bmbt_update(cur, &old);
5215e1d7553fSChristoph Hellwig 				if (error)
521630f712c9SDave Chinner 					goto done;
521730f712c9SDave Chinner 				/*
521830f712c9SDave Chinner 				 * Reset the extent record back
521930f712c9SDave Chinner 				 * to the original value.
522030f712c9SDave Chinner 				 */
5221b2b1712aSChristoph Hellwig 				xfs_iext_update_extent(ip, state, icur, &old);
522230f712c9SDave Chinner 				flags = 0;
52232451337dSDave Chinner 				error = -ENOSPC;
522430f712c9SDave Chinner 				goto done;
522530f712c9SDave Chinner 			}
5226f9e03706SDarrick J. Wong 			if (XFS_IS_CORRUPT(mp, i != 1)) {
5227f9e03706SDarrick J. Wong 				error = -EFSCORRUPTED;
5228f9e03706SDarrick J. Wong 				goto done;
5229f9e03706SDarrick J. Wong 			}
523030f712c9SDave Chinner 		} else
523130f712c9SDave Chinner 			flags |= xfs_ilog_fext(whichfork);
5232daf83964SChristoph Hellwig 
5233daf83964SChristoph Hellwig 		ifp->if_nextents++;
5234b2b1712aSChristoph Hellwig 		xfs_iext_next(ifp, icur);
52350254c2f2SChristoph Hellwig 		xfs_iext_insert(ip, icur, &new, state);
523630f712c9SDave Chinner 		break;
523730f712c9SDave Chinner 	}
52389c194644SDarrick J. Wong 
52399c194644SDarrick J. Wong 	/* remove reverse mapping */
5240bc46ac64SDarrick J. Wong 	xfs_rmap_unmap_extent(tp, ip, whichfork, del);
52419c194644SDarrick J. Wong 
524230f712c9SDave Chinner 	/*
524330f712c9SDave Chinner 	 * If we need to, add to list of extents to delete.
524430f712c9SDave Chinner 	 */
52454847acf8SDarrick J. Wong 	if (do_fx && !(bflags & XFS_BMAPI_REMAP)) {
524662aab20fSDarrick J. Wong 		if (xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK) {
524774b4c5d4SDarrick J. Wong 			xfs_refcount_decrease_extent(tp, del);
5248fcb762f5SBrian Foster 		} else {
5249c201d9caSDarrick J. Wong 			__xfs_free_extent_later(tp, del->br_startblock,
52504e529339SBrian Foster 					del->br_blockcount, NULL,
52514e529339SBrian Foster 					(bflags & XFS_BMAPI_NODISCARD) ||
52524e529339SBrian Foster 					del->br_state == XFS_EXT_UNWRITTEN);
5253fcb762f5SBrian Foster 		}
5254fcb762f5SBrian Foster 	}
525562aab20fSDarrick J. Wong 
525630f712c9SDave Chinner 	/*
525730f712c9SDave Chinner 	 * Adjust inode # blocks in the file.
525830f712c9SDave Chinner 	 */
525930f712c9SDave Chinner 	if (nblks)
52606e73a545SChristoph Hellwig 		ip->i_nblocks -= nblks;
526130f712c9SDave Chinner 	/*
526230f712c9SDave Chinner 	 * Adjust quota data.
526330f712c9SDave Chinner 	 */
52644847acf8SDarrick J. Wong 	if (qfield && !(bflags & XFS_BMAPI_REMAP))
526530f712c9SDave Chinner 		xfs_trans_mod_dquot_byino(tp, ip, qfield, (long)-nblks);
526630f712c9SDave Chinner 
526730f712c9SDave Chinner done:
526830f712c9SDave Chinner 	*logflagsp = flags;
526930f712c9SDave Chinner 	return error;
527030f712c9SDave Chinner }
527130f712c9SDave Chinner 
527230f712c9SDave Chinner /*
527330f712c9SDave Chinner  * Unmap (remove) blocks from a file.
527430f712c9SDave Chinner  * If nexts is nonzero then the number of extents to remove is limited to
527530f712c9SDave Chinner  * that value.  If not all extents in the block range can be removed then
527630f712c9SDave Chinner  * *done is set.
527730f712c9SDave Chinner  */
527830f712c9SDave Chinner int						/* error */
52794453593bSDarrick J. Wong __xfs_bunmapi(
5280ccd9d911SBrian Foster 	struct xfs_trans	*tp,		/* transaction pointer */
528130f712c9SDave Chinner 	struct xfs_inode	*ip,		/* incore inode */
52828280f6edSChristoph Hellwig 	xfs_fileoff_t		start,		/* first file offset deleted */
52834453593bSDarrick J. Wong 	xfs_filblks_t		*rlen,		/* i/o: amount remaining */
5284e7d410acSDave Chinner 	uint32_t		flags,		/* misc flags */
52852af52842SBrian Foster 	xfs_extnum_t		nexts)		/* number of extents max */
528630f712c9SDave Chinner {
5287ccd9d911SBrian Foster 	struct xfs_btree_cur	*cur;		/* bmap btree cursor */
5288ccd9d911SBrian Foster 	struct xfs_bmbt_irec	del;		/* extent being deleted */
528930f712c9SDave Chinner 	int			error;		/* error return value */
529030f712c9SDave Chinner 	xfs_extnum_t		extno;		/* extent number in list */
5291ccd9d911SBrian Foster 	struct xfs_bmbt_irec	got;		/* current extent record */
52923ba738dfSChristoph Hellwig 	struct xfs_ifork	*ifp;		/* inode fork pointer */
529330f712c9SDave Chinner 	int			isrt;		/* freeing in rt area */
529430f712c9SDave Chinner 	int			logflags;	/* transaction logging flags */
529530f712c9SDave Chinner 	xfs_extlen_t		mod;		/* rt extent offset */
5296a71895c5SDarrick J. Wong 	struct xfs_mount	*mp = ip->i_mount;
529730f712c9SDave Chinner 	int			tmp_logflags;	/* partial logging flags */
529830f712c9SDave Chinner 	int			wasdel;		/* was a delayed alloc extent */
529930f712c9SDave Chinner 	int			whichfork;	/* data or attribute fork */
530030f712c9SDave Chinner 	xfs_fsblock_t		sum;
53014453593bSDarrick J. Wong 	xfs_filblks_t		len = *rlen;	/* length to unmap in file */
53028280f6edSChristoph Hellwig 	xfs_fileoff_t		end;
5303b2b1712aSChristoph Hellwig 	struct xfs_iext_cursor	icur;
5304b2b1712aSChristoph Hellwig 	bool			done = false;
530530f712c9SDave Chinner 
53068280f6edSChristoph Hellwig 	trace_xfs_bunmap(ip, start, len, flags, _RET_IP_);
530730f712c9SDave Chinner 
53083993baebSDarrick J. Wong 	whichfork = xfs_bmapi_whichfork(flags);
53093993baebSDarrick J. Wong 	ASSERT(whichfork != XFS_COW_FORK);
5310732436efSDarrick J. Wong 	ifp = xfs_ifork_ptr(ip, whichfork);
5311f7e67b20SChristoph Hellwig 	if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)))
53122451337dSDave Chinner 		return -EFSCORRUPTED;
531375c8c50fSDave Chinner 	if (xfs_is_shutdown(mp))
53142451337dSDave Chinner 		return -EIO;
531530f712c9SDave Chinner 
531630f712c9SDave Chinner 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
531730f712c9SDave Chinner 	ASSERT(len > 0);
531830f712c9SDave Chinner 	ASSERT(nexts >= 0);
531930f712c9SDave Chinner 
5320862a804aSChristoph Hellwig 	error = xfs_iread_extents(tp, ip, whichfork);
5321862a804aSChristoph Hellwig 	if (error)
532230f712c9SDave Chinner 		return error;
5323862a804aSChristoph Hellwig 
53245d829300SEric Sandeen 	if (xfs_iext_count(ifp) == 0) {
53254453593bSDarrick J. Wong 		*rlen = 0;
532630f712c9SDave Chinner 		return 0;
532730f712c9SDave Chinner 	}
5328ff6d6af2SBill O'Donnell 	XFS_STATS_INC(mp, xs_blk_unmap);
532930f712c9SDave Chinner 	isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
5330dc56015fSChristoph Hellwig 	end = start + len;
533130f712c9SDave Chinner 
5332b2b1712aSChristoph Hellwig 	if (!xfs_iext_lookup_extent_before(ip, ifp, &end, &icur, &got)) {
5333dc56015fSChristoph Hellwig 		*rlen = 0;
5334dc56015fSChristoph Hellwig 		return 0;
533530f712c9SDave Chinner 	}
5336dc56015fSChristoph Hellwig 	end--;
53377efc7945SChristoph Hellwig 
533830f712c9SDave Chinner 	logflags = 0;
5339ac1e0672SChristoph Hellwig 	if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
5340f7e67b20SChristoph Hellwig 		ASSERT(ifp->if_format == XFS_DINODE_FMT_BTREE);
534130f712c9SDave Chinner 		cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
534292219c29SDave Chinner 		cur->bc_ino.flags = 0;
534330f712c9SDave Chinner 	} else
534430f712c9SDave Chinner 		cur = NULL;
534530f712c9SDave Chinner 
534630f712c9SDave Chinner 	if (isrt) {
534730f712c9SDave Chinner 		/*
534830f712c9SDave Chinner 		 * Synchronize by locking the bitmap inode.
534930f712c9SDave Chinner 		 */
5350f4a0660dSDarrick J. Wong 		xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP);
535130f712c9SDave Chinner 		xfs_trans_ijoin(tp, mp->m_rbmip, XFS_ILOCK_EXCL);
5352f4a0660dSDarrick J. Wong 		xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM);
5353f4a0660dSDarrick J. Wong 		xfs_trans_ijoin(tp, mp->m_rsumip, XFS_ILOCK_EXCL);
535430f712c9SDave Chinner 	}
535530f712c9SDave Chinner 
535630f712c9SDave Chinner 	extno = 0;
5357b2b1712aSChristoph Hellwig 	while (end != (xfs_fileoff_t)-1 && end >= start &&
53584ed6435cSDarrick J. Wong 	       (nexts == 0 || extno < nexts)) {
535930f712c9SDave Chinner 		/*
53608280f6edSChristoph Hellwig 		 * Is the found extent after a hole in which end lives?
536130f712c9SDave Chinner 		 * Just back up to the previous extent, if so.
536230f712c9SDave Chinner 		 */
5363b2b1712aSChristoph Hellwig 		if (got.br_startoff > end &&
5364b2b1712aSChristoph Hellwig 		    !xfs_iext_prev_extent(ifp, &icur, &got)) {
5365b2b1712aSChristoph Hellwig 			done = true;
536630f712c9SDave Chinner 			break;
536730f712c9SDave Chinner 		}
536830f712c9SDave Chinner 		/*
536930f712c9SDave Chinner 		 * Is the last block of this extent before the range
537030f712c9SDave Chinner 		 * we're supposed to delete?  If so, we're done.
537130f712c9SDave Chinner 		 */
53728280f6edSChristoph Hellwig 		end = XFS_FILEOFF_MIN(end,
537330f712c9SDave Chinner 			got.br_startoff + got.br_blockcount - 1);
53748280f6edSChristoph Hellwig 		if (end < start)
537530f712c9SDave Chinner 			break;
537630f712c9SDave Chinner 		/*
537730f712c9SDave Chinner 		 * Then deal with the (possibly delayed) allocated space
537830f712c9SDave Chinner 		 * we found.
537930f712c9SDave Chinner 		 */
538030f712c9SDave Chinner 		del = got;
538130f712c9SDave Chinner 		wasdel = isnullstartblock(del.br_startblock);
53825b094d6dSChristoph Hellwig 
538330f712c9SDave Chinner 		if (got.br_startoff < start) {
538430f712c9SDave Chinner 			del.br_startoff = start;
538530f712c9SDave Chinner 			del.br_blockcount -= start - got.br_startoff;
538630f712c9SDave Chinner 			if (!wasdel)
538730f712c9SDave Chinner 				del.br_startblock += start - got.br_startoff;
538830f712c9SDave Chinner 		}
53898280f6edSChristoph Hellwig 		if (del.br_startoff + del.br_blockcount > end + 1)
53908280f6edSChristoph Hellwig 			del.br_blockcount = end + 1 - del.br_startoff;
5391e1a4e37cSDarrick J. Wong 
53920703a8e1SDave Chinner 		if (!isrt)
53930703a8e1SDave Chinner 			goto delete;
53940703a8e1SDave Chinner 
539530f712c9SDave Chinner 		sum = del.br_startblock + del.br_blockcount;
53960703a8e1SDave Chinner 		div_u64_rem(sum, mp->m_sb.sb_rextsize, &mod);
53970703a8e1SDave Chinner 		if (mod) {
539830f712c9SDave Chinner 			/*
539930f712c9SDave Chinner 			 * Realtime extent not lined up at the end.
540030f712c9SDave Chinner 			 * The extent could have been split into written
540130f712c9SDave Chinner 			 * and unwritten pieces, or we could just be
540230f712c9SDave Chinner 			 * unmapping part of it.  But we can't really
540330f712c9SDave Chinner 			 * get rid of part of a realtime extent.
540430f712c9SDave Chinner 			 */
5405daa79baeSChristoph Hellwig 			if (del.br_state == XFS_EXT_UNWRITTEN) {
540630f712c9SDave Chinner 				/*
540730f712c9SDave Chinner 				 * This piece is unwritten, or we're not
540830f712c9SDave Chinner 				 * using unwritten extents.  Skip over it.
540930f712c9SDave Chinner 				 */
54108280f6edSChristoph Hellwig 				ASSERT(end >= mod);
54118280f6edSChristoph Hellwig 				end -= mod > del.br_blockcount ?
541230f712c9SDave Chinner 					del.br_blockcount : mod;
5413b2b1712aSChristoph Hellwig 				if (end < got.br_startoff &&
5414b2b1712aSChristoph Hellwig 				    !xfs_iext_prev_extent(ifp, &icur, &got)) {
5415b2b1712aSChristoph Hellwig 					done = true;
5416b2b1712aSChristoph Hellwig 					break;
541730f712c9SDave Chinner 				}
541830f712c9SDave Chinner 				continue;
541930f712c9SDave Chinner 			}
542030f712c9SDave Chinner 			/*
542130f712c9SDave Chinner 			 * It's written, turn it unwritten.
542230f712c9SDave Chinner 			 * This is better than zeroing it.
542330f712c9SDave Chinner 			 */
542430f712c9SDave Chinner 			ASSERT(del.br_state == XFS_EXT_NORM);
5425a7e5d03bSChristoph Hellwig 			ASSERT(tp->t_blk_res > 0);
542630f712c9SDave Chinner 			/*
542730f712c9SDave Chinner 			 * If this spans a realtime extent boundary,
542830f712c9SDave Chinner 			 * chop it back to the start of the one we end at.
542930f712c9SDave Chinner 			 */
543030f712c9SDave Chinner 			if (del.br_blockcount > mod) {
543130f712c9SDave Chinner 				del.br_startoff += del.br_blockcount - mod;
543230f712c9SDave Chinner 				del.br_startblock += del.br_blockcount - mod;
543330f712c9SDave Chinner 				del.br_blockcount = mod;
543430f712c9SDave Chinner 			}
543530f712c9SDave Chinner 			del.br_state = XFS_EXT_UNWRITTEN;
543630f712c9SDave Chinner 			error = xfs_bmap_add_extent_unwritten_real(tp, ip,
5437b2b1712aSChristoph Hellwig 					whichfork, &icur, &cur, &del,
543892f9da30SBrian Foster 					&logflags);
543930f712c9SDave Chinner 			if (error)
544030f712c9SDave Chinner 				goto error0;
544130f712c9SDave Chinner 			goto nodelete;
544230f712c9SDave Chinner 		}
54430703a8e1SDave Chinner 		div_u64_rem(del.br_startblock, mp->m_sb.sb_rextsize, &mod);
54440703a8e1SDave Chinner 		if (mod) {
54450c4da70cSOmar Sandoval 			xfs_extlen_t off = mp->m_sb.sb_rextsize - mod;
54460c4da70cSOmar Sandoval 
544730f712c9SDave Chinner 			/*
544830f712c9SDave Chinner 			 * Realtime extent is lined up at the end but not
544930f712c9SDave Chinner 			 * at the front.  We'll get rid of full extents if
545030f712c9SDave Chinner 			 * we can.
545130f712c9SDave Chinner 			 */
54520c4da70cSOmar Sandoval 			if (del.br_blockcount > off) {
54530c4da70cSOmar Sandoval 				del.br_blockcount -= off;
54540c4da70cSOmar Sandoval 				del.br_startoff += off;
54550c4da70cSOmar Sandoval 				del.br_startblock += off;
5456daa79baeSChristoph Hellwig 			} else if (del.br_startoff == start &&
545730f712c9SDave Chinner 				   (del.br_state == XFS_EXT_UNWRITTEN ||
5458daa79baeSChristoph Hellwig 				    tp->t_blk_res == 0)) {
545930f712c9SDave Chinner 				/*
546030f712c9SDave Chinner 				 * Can't make it unwritten.  There isn't
546130f712c9SDave Chinner 				 * a full extent here so just skip it.
546230f712c9SDave Chinner 				 */
54638280f6edSChristoph Hellwig 				ASSERT(end >= del.br_blockcount);
54648280f6edSChristoph Hellwig 				end -= del.br_blockcount;
5465b2b1712aSChristoph Hellwig 				if (got.br_startoff > end &&
5466b2b1712aSChristoph Hellwig 				    !xfs_iext_prev_extent(ifp, &icur, &got)) {
5467b2b1712aSChristoph Hellwig 					done = true;
5468b2b1712aSChristoph Hellwig 					break;
5469b2b1712aSChristoph Hellwig 				}
547030f712c9SDave Chinner 				continue;
547130f712c9SDave Chinner 			} else if (del.br_state == XFS_EXT_UNWRITTEN) {
54727efc7945SChristoph Hellwig 				struct xfs_bmbt_irec	prev;
54730c4da70cSOmar Sandoval 				xfs_fileoff_t		unwrite_start;
54747efc7945SChristoph Hellwig 
547530f712c9SDave Chinner 				/*
547630f712c9SDave Chinner 				 * This one is already unwritten.
547730f712c9SDave Chinner 				 * It must have a written left neighbor.
547830f712c9SDave Chinner 				 * Unwrite the killed part of that one and
547930f712c9SDave Chinner 				 * try again.
548030f712c9SDave Chinner 				 */
5481b2b1712aSChristoph Hellwig 				if (!xfs_iext_prev_extent(ifp, &icur, &prev))
5482b2b1712aSChristoph Hellwig 					ASSERT(0);
548330f712c9SDave Chinner 				ASSERT(prev.br_state == XFS_EXT_NORM);
548430f712c9SDave Chinner 				ASSERT(!isnullstartblock(prev.br_startblock));
548530f712c9SDave Chinner 				ASSERT(del.br_startblock ==
548630f712c9SDave Chinner 				       prev.br_startblock + prev.br_blockcount);
54870c4da70cSOmar Sandoval 				unwrite_start = max3(start,
54880c4da70cSOmar Sandoval 						     del.br_startoff - mod,
54890c4da70cSOmar Sandoval 						     prev.br_startoff);
54900c4da70cSOmar Sandoval 				mod = unwrite_start - prev.br_startoff;
54910c4da70cSOmar Sandoval 				prev.br_startoff = unwrite_start;
549230f712c9SDave Chinner 				prev.br_startblock += mod;
54930c4da70cSOmar Sandoval 				prev.br_blockcount -= mod;
549430f712c9SDave Chinner 				prev.br_state = XFS_EXT_UNWRITTEN;
549530f712c9SDave Chinner 				error = xfs_bmap_add_extent_unwritten_real(tp,
5496b2b1712aSChristoph Hellwig 						ip, whichfork, &icur, &cur,
549792f9da30SBrian Foster 						&prev, &logflags);
549830f712c9SDave Chinner 				if (error)
549930f712c9SDave Chinner 					goto error0;
550030f712c9SDave Chinner 				goto nodelete;
550130f712c9SDave Chinner 			} else {
550230f712c9SDave Chinner 				ASSERT(del.br_state == XFS_EXT_NORM);
550330f712c9SDave Chinner 				del.br_state = XFS_EXT_UNWRITTEN;
550430f712c9SDave Chinner 				error = xfs_bmap_add_extent_unwritten_real(tp,
5505b2b1712aSChristoph Hellwig 						ip, whichfork, &icur, &cur,
550692f9da30SBrian Foster 						&del, &logflags);
550730f712c9SDave Chinner 				if (error)
550830f712c9SDave Chinner 					goto error0;
550930f712c9SDave Chinner 				goto nodelete;
551030f712c9SDave Chinner 			}
551130f712c9SDave Chinner 		}
551230f712c9SDave Chinner 
55130703a8e1SDave Chinner delete:
5514e1d7553fSChristoph Hellwig 		if (wasdel) {
5515b2b1712aSChristoph Hellwig 			error = xfs_bmap_del_extent_delay(ip, whichfork, &icur,
5516e1d7553fSChristoph Hellwig 					&got, &del);
5517e1d7553fSChristoph Hellwig 		} else {
551881ba8f3eSBrian Foster 			error = xfs_bmap_del_extent_real(ip, tp, &icur, cur,
551981ba8f3eSBrian Foster 					&del, &tmp_logflags, whichfork,
5520e1d7553fSChristoph Hellwig 					flags);
552130f712c9SDave Chinner 			logflags |= tmp_logflags;
5522e1d7553fSChristoph Hellwig 		}
5523e1d7553fSChristoph Hellwig 
552430f712c9SDave Chinner 		if (error)
552530f712c9SDave Chinner 			goto error0;
5526b2706a05SBrian Foster 
55278280f6edSChristoph Hellwig 		end = del.br_startoff - 1;
552830f712c9SDave Chinner nodelete:
552930f712c9SDave Chinner 		/*
553030f712c9SDave Chinner 		 * If not done go on to the next (previous) record.
553130f712c9SDave Chinner 		 */
55328280f6edSChristoph Hellwig 		if (end != (xfs_fileoff_t)-1 && end >= start) {
5533b2b1712aSChristoph Hellwig 			if (!xfs_iext_get_extent(ifp, &icur, &got) ||
5534b2b1712aSChristoph Hellwig 			    (got.br_startoff > end &&
5535b2b1712aSChristoph Hellwig 			     !xfs_iext_prev_extent(ifp, &icur, &got))) {
5536b2b1712aSChristoph Hellwig 				done = true;
5537b2b1712aSChristoph Hellwig 				break;
553830f712c9SDave Chinner 			}
553930f712c9SDave Chinner 			extno++;
554030f712c9SDave Chinner 		}
554130f712c9SDave Chinner 	}
5542b2b1712aSChristoph Hellwig 	if (done || end == (xfs_fileoff_t)-1 || end < start)
55434453593bSDarrick J. Wong 		*rlen = 0;
55444453593bSDarrick J. Wong 	else
55458280f6edSChristoph Hellwig 		*rlen = end - start + 1;
554630f712c9SDave Chinner 
554730f712c9SDave Chinner 	/*
554830f712c9SDave Chinner 	 * Convert to a btree if necessary.
554930f712c9SDave Chinner 	 */
555030f712c9SDave Chinner 	if (xfs_bmap_needs_btree(ip, whichfork)) {
555130f712c9SDave Chinner 		ASSERT(cur == NULL);
5552280253d2SBrian Foster 		error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0,
5553280253d2SBrian Foster 				&tmp_logflags, whichfork);
555430f712c9SDave Chinner 		logflags |= tmp_logflags;
5555b101e334SChristoph Hellwig 	} else {
5556b101e334SChristoph Hellwig 		error = xfs_bmap_btree_to_extents(tp, ip, cur, &logflags,
555730f712c9SDave Chinner 			whichfork);
555830f712c9SDave Chinner 	}
5559b101e334SChristoph Hellwig 
556030f712c9SDave Chinner error0:
556130f712c9SDave Chinner 	/*
556230f712c9SDave Chinner 	 * Log everything.  Do this after conversion, there's no point in
556330f712c9SDave Chinner 	 * logging the extent records if we've converted to btree format.
556430f712c9SDave Chinner 	 */
556530f712c9SDave Chinner 	if ((logflags & xfs_ilog_fext(whichfork)) &&
5566f7e67b20SChristoph Hellwig 	    ifp->if_format != XFS_DINODE_FMT_EXTENTS)
556730f712c9SDave Chinner 		logflags &= ~xfs_ilog_fext(whichfork);
556830f712c9SDave Chinner 	else if ((logflags & xfs_ilog_fbroot(whichfork)) &&
5569f7e67b20SChristoph Hellwig 		 ifp->if_format != XFS_DINODE_FMT_BTREE)
557030f712c9SDave Chinner 		logflags &= ~xfs_ilog_fbroot(whichfork);
557130f712c9SDave Chinner 	/*
557230f712c9SDave Chinner 	 * Log inode even in the error case, if the transaction
557330f712c9SDave Chinner 	 * is dirty we'll need to shut down the filesystem.
557430f712c9SDave Chinner 	 */
557530f712c9SDave Chinner 	if (logflags)
557630f712c9SDave Chinner 		xfs_trans_log_inode(tp, ip, logflags);
557730f712c9SDave Chinner 	if (cur) {
5578cf612de7SBrian Foster 		if (!error)
557992219c29SDave Chinner 			cur->bc_ino.allocated = 0;
55800b04b6b8SDarrick J. Wong 		xfs_btree_del_cursor(cur, error);
558130f712c9SDave Chinner 	}
558230f712c9SDave Chinner 	return error;
558330f712c9SDave Chinner }
558430f712c9SDave Chinner 
55854453593bSDarrick J. Wong /* Unmap a range of a file. */
55864453593bSDarrick J. Wong int
55874453593bSDarrick J. Wong xfs_bunmapi(
55884453593bSDarrick J. Wong 	xfs_trans_t		*tp,
55894453593bSDarrick J. Wong 	struct xfs_inode	*ip,
55904453593bSDarrick J. Wong 	xfs_fileoff_t		bno,
55914453593bSDarrick J. Wong 	xfs_filblks_t		len,
5592e7d410acSDave Chinner 	uint32_t		flags,
55934453593bSDarrick J. Wong 	xfs_extnum_t		nexts,
55944453593bSDarrick J. Wong 	int			*done)
55954453593bSDarrick J. Wong {
55964453593bSDarrick J. Wong 	int			error;
55974453593bSDarrick J. Wong 
55982af52842SBrian Foster 	error = __xfs_bunmapi(tp, ip, bno, &len, flags, nexts);
55994453593bSDarrick J. Wong 	*done = (len == 0);
56004453593bSDarrick J. Wong 	return error;
56014453593bSDarrick J. Wong }
56024453593bSDarrick J. Wong 
560330f712c9SDave Chinner /*
5604ddb19e31SBrian Foster  * Determine whether an extent shift can be accomplished by a merge with the
5605ddb19e31SBrian Foster  * extent that precedes the target hole of the shift.
5606ddb19e31SBrian Foster  */
5607ddb19e31SBrian Foster STATIC bool
5608ddb19e31SBrian Foster xfs_bmse_can_merge(
5609ddb19e31SBrian Foster 	struct xfs_bmbt_irec	*left,	/* preceding extent */
5610ddb19e31SBrian Foster 	struct xfs_bmbt_irec	*got,	/* current extent to shift */
5611ddb19e31SBrian Foster 	xfs_fileoff_t		shift)	/* shift fsb */
5612ddb19e31SBrian Foster {
5613ddb19e31SBrian Foster 	xfs_fileoff_t		startoff;
5614ddb19e31SBrian Foster 
5615ddb19e31SBrian Foster 	startoff = got->br_startoff - shift;
5616ddb19e31SBrian Foster 
5617ddb19e31SBrian Foster 	/*
5618ddb19e31SBrian Foster 	 * The extent, once shifted, must be adjacent in-file and on-disk with
5619ddb19e31SBrian Foster 	 * the preceding extent.
5620ddb19e31SBrian Foster 	 */
5621ddb19e31SBrian Foster 	if ((left->br_startoff + left->br_blockcount != startoff) ||
5622ddb19e31SBrian Foster 	    (left->br_startblock + left->br_blockcount != got->br_startblock) ||
5623ddb19e31SBrian Foster 	    (left->br_state != got->br_state) ||
562495f0b95eSChandan Babu R 	    (left->br_blockcount + got->br_blockcount > XFS_MAX_BMBT_EXTLEN))
5625ddb19e31SBrian Foster 		return false;
5626ddb19e31SBrian Foster 
5627ddb19e31SBrian Foster 	return true;
5628ddb19e31SBrian Foster }
5629ddb19e31SBrian Foster 
5630ddb19e31SBrian Foster /*
5631ddb19e31SBrian Foster  * A bmap extent shift adjusts the file offset of an extent to fill a preceding
5632ddb19e31SBrian Foster  * hole in the file. If an extent shift would result in the extent being fully
5633ddb19e31SBrian Foster  * adjacent to the extent that currently precedes the hole, we can merge with
5634ddb19e31SBrian Foster  * the preceding extent rather than do the shift.
5635ddb19e31SBrian Foster  *
5636ddb19e31SBrian Foster  * This function assumes the caller has verified a shift-by-merge is possible
5637ddb19e31SBrian Foster  * with the provided extents via xfs_bmse_can_merge().
5638ddb19e31SBrian Foster  */
5639ddb19e31SBrian Foster STATIC int
5640ddb19e31SBrian Foster xfs_bmse_merge(
56410f37d178SBrian Foster 	struct xfs_trans		*tp,
5642ddb19e31SBrian Foster 	struct xfs_inode		*ip,
5643ddb19e31SBrian Foster 	int				whichfork,
5644ddb19e31SBrian Foster 	xfs_fileoff_t			shift,		/* shift fsb */
5645b2b1712aSChristoph Hellwig 	struct xfs_iext_cursor		*icur,
56464da6b514SChristoph Hellwig 	struct xfs_bmbt_irec		*got,		/* extent to shift */
56474da6b514SChristoph Hellwig 	struct xfs_bmbt_irec		*left,		/* preceding extent */
5648ddb19e31SBrian Foster 	struct xfs_btree_cur		*cur,
56490f37d178SBrian Foster 	int				*logflags)	/* output */
5650ddb19e31SBrian Foster {
5651732436efSDarrick J. Wong 	struct xfs_ifork		*ifp = xfs_ifork_ptr(ip, whichfork);
56524da6b514SChristoph Hellwig 	struct xfs_bmbt_irec		new;
5653ddb19e31SBrian Foster 	xfs_filblks_t			blockcount;
5654ddb19e31SBrian Foster 	int				error, i;
56555fb5aeeeSEric Sandeen 	struct xfs_mount		*mp = ip->i_mount;
5656ddb19e31SBrian Foster 
56574da6b514SChristoph Hellwig 	blockcount = left->br_blockcount + got->br_blockcount;
5658ddb19e31SBrian Foster 
5659ddb19e31SBrian Foster 	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
5660ddb19e31SBrian Foster 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
56614da6b514SChristoph Hellwig 	ASSERT(xfs_bmse_can_merge(left, got, shift));
5662ddb19e31SBrian Foster 
56634da6b514SChristoph Hellwig 	new = *left;
56644da6b514SChristoph Hellwig 	new.br_blockcount = blockcount;
5665ddb19e31SBrian Foster 
5666ddb19e31SBrian Foster 	/*
5667ddb19e31SBrian Foster 	 * Update the on-disk extent count, the btree if necessary and log the
5668ddb19e31SBrian Foster 	 * inode.
5669ddb19e31SBrian Foster 	 */
5670daf83964SChristoph Hellwig 	ifp->if_nextents--;
5671ddb19e31SBrian Foster 	*logflags |= XFS_ILOG_CORE;
5672ddb19e31SBrian Foster 	if (!cur) {
5673ddb19e31SBrian Foster 		*logflags |= XFS_ILOG_DEXT;
56744da6b514SChristoph Hellwig 		goto done;
5675ddb19e31SBrian Foster 	}
5676ddb19e31SBrian Foster 
5677ddb19e31SBrian Foster 	/* lookup and remove the extent to merge */
5678e16cf9b0SChristoph Hellwig 	error = xfs_bmbt_lookup_eq(cur, got, &i);
5679ddb19e31SBrian Foster 	if (error)
56804db431f5SDave Chinner 		return error;
5681f9e03706SDarrick J. Wong 	if (XFS_IS_CORRUPT(mp, i != 1))
5682f9e03706SDarrick J. Wong 		return -EFSCORRUPTED;
5683ddb19e31SBrian Foster 
5684ddb19e31SBrian Foster 	error = xfs_btree_delete(cur, &i);
5685ddb19e31SBrian Foster 	if (error)
56864db431f5SDave Chinner 		return error;
5687f9e03706SDarrick J. Wong 	if (XFS_IS_CORRUPT(mp, i != 1))
5688f9e03706SDarrick J. Wong 		return -EFSCORRUPTED;
5689ddb19e31SBrian Foster 
5690ddb19e31SBrian Foster 	/* lookup and update size of the previous extent */
5691e16cf9b0SChristoph Hellwig 	error = xfs_bmbt_lookup_eq(cur, left, &i);
5692ddb19e31SBrian Foster 	if (error)
56934db431f5SDave Chinner 		return error;
5694f9e03706SDarrick J. Wong 	if (XFS_IS_CORRUPT(mp, i != 1))
5695f9e03706SDarrick J. Wong 		return -EFSCORRUPTED;
5696ddb19e31SBrian Foster 
5697a67d00a5SChristoph Hellwig 	error = xfs_bmbt_update(cur, &new);
56984da6b514SChristoph Hellwig 	if (error)
56994da6b514SChristoph Hellwig 		return error;
5700ddb19e31SBrian Foster 
5701e20e174cSBrian Foster 	/* change to extent format if required after extent removal */
5702e20e174cSBrian Foster 	error = xfs_bmap_btree_to_extents(tp, ip, cur, logflags, whichfork);
5703e20e174cSBrian Foster 	if (error)
5704e20e174cSBrian Foster 		return error;
5705e20e174cSBrian Foster 
57064da6b514SChristoph Hellwig done:
5707c38ccf59SChristoph Hellwig 	xfs_iext_remove(ip, icur, 0);
5708daf83964SChristoph Hellwig 	xfs_iext_prev(ifp, icur);
5709b2b1712aSChristoph Hellwig 	xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), icur,
5710b2b1712aSChristoph Hellwig 			&new);
57114da6b514SChristoph Hellwig 
57124cc1ee5eSDarrick J. Wong 	/* update reverse mapping. rmap functions merge the rmaps for us */
5713bc46ac64SDarrick J. Wong 	xfs_rmap_unmap_extent(tp, ip, whichfork, got);
57144cc1ee5eSDarrick J. Wong 	memcpy(&new, got, sizeof(new));
57154cc1ee5eSDarrick J. Wong 	new.br_startoff = left->br_startoff + left->br_blockcount;
5716bc46ac64SDarrick J. Wong 	xfs_rmap_map_extent(tp, ip, whichfork, &new);
5717bc46ac64SDarrick J. Wong 	return 0;
5718ddb19e31SBrian Foster }
5719ddb19e31SBrian Foster 
5720bf806280SChristoph Hellwig static int
5721bf806280SChristoph Hellwig xfs_bmap_shift_update_extent(
57220f37d178SBrian Foster 	struct xfs_trans	*tp,
5723a979bdfeSBrian Foster 	struct xfs_inode	*ip,
5724a979bdfeSBrian Foster 	int			whichfork,
5725b2b1712aSChristoph Hellwig 	struct xfs_iext_cursor	*icur,
57264da6b514SChristoph Hellwig 	struct xfs_bmbt_irec	*got,
5727a979bdfeSBrian Foster 	struct xfs_btree_cur	*cur,
5728a904b1caSNamjae Jeon 	int			*logflags,
5729bf806280SChristoph Hellwig 	xfs_fileoff_t		startoff)
5730a979bdfeSBrian Foster {
5731bf806280SChristoph Hellwig 	struct xfs_mount	*mp = ip->i_mount;
573211f75b3bSChristoph Hellwig 	struct xfs_bmbt_irec	prev = *got;
5733bf806280SChristoph Hellwig 	int			error, i;
5734a979bdfeSBrian Foster 
5735a979bdfeSBrian Foster 	*logflags |= XFS_ILOG_CORE;
5736a979bdfeSBrian Foster 
573711f75b3bSChristoph Hellwig 	got->br_startoff = startoff;
57384da6b514SChristoph Hellwig 
57394da6b514SChristoph Hellwig 	if (cur) {
574011f75b3bSChristoph Hellwig 		error = xfs_bmbt_lookup_eq(cur, &prev, &i);
5741a979bdfeSBrian Foster 		if (error)
5742a979bdfeSBrian Foster 			return error;
5743f9e03706SDarrick J. Wong 		if (XFS_IS_CORRUPT(mp, i != 1))
5744f9e03706SDarrick J. Wong 			return -EFSCORRUPTED;
5745a979bdfeSBrian Foster 
574611f75b3bSChristoph Hellwig 		error = xfs_bmbt_update(cur, got);
57479c194644SDarrick J. Wong 		if (error)
57489c194644SDarrick J. Wong 			return error;
57494da6b514SChristoph Hellwig 	} else {
57504da6b514SChristoph Hellwig 		*logflags |= XFS_ILOG_DEXT;
57514da6b514SChristoph Hellwig 	}
57529c194644SDarrick J. Wong 
5753b2b1712aSChristoph Hellwig 	xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), icur,
5754b2b1712aSChristoph Hellwig 			got);
57554da6b514SChristoph Hellwig 
57569c194644SDarrick J. Wong 	/* update reverse mapping */
5757bc46ac64SDarrick J. Wong 	xfs_rmap_unmap_extent(tp, ip, whichfork, &prev);
5758bc46ac64SDarrick J. Wong 	xfs_rmap_map_extent(tp, ip, whichfork, got);
5759bc46ac64SDarrick J. Wong 	return 0;
5760a979bdfeSBrian Foster }
5761a979bdfeSBrian Foster 
576230f712c9SDave Chinner int
5763ecfea3f0SChristoph Hellwig xfs_bmap_collapse_extents(
576430f712c9SDave Chinner 	struct xfs_trans	*tp,
576530f712c9SDave Chinner 	struct xfs_inode	*ip,
5766a904b1caSNamjae Jeon 	xfs_fileoff_t		*next_fsb,
576730f712c9SDave Chinner 	xfs_fileoff_t		offset_shift_fsb,
5768333f950cSBrian Foster 	bool			*done)
576930f712c9SDave Chinner {
5770ecfea3f0SChristoph Hellwig 	int			whichfork = XFS_DATA_FORK;
5771ecfea3f0SChristoph Hellwig 	struct xfs_mount	*mp = ip->i_mount;
5772732436efSDarrick J. Wong 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
5773ca446d88SBrian Foster 	struct xfs_btree_cur	*cur = NULL;
5774bf806280SChristoph Hellwig 	struct xfs_bmbt_irec	got, prev;
5775b2b1712aSChristoph Hellwig 	struct xfs_iext_cursor	icur;
5776bf806280SChristoph Hellwig 	xfs_fileoff_t		new_startoff;
577730f712c9SDave Chinner 	int			error = 0;
5778ca446d88SBrian Foster 	int			logflags = 0;
577930f712c9SDave Chinner 
5780f7e67b20SChristoph Hellwig 	if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
5781a71895c5SDarrick J. Wong 	    XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
57822451337dSDave Chinner 		return -EFSCORRUPTED;
578330f712c9SDave Chinner 	}
578430f712c9SDave Chinner 
578575c8c50fSDave Chinner 	if (xfs_is_shutdown(mp))
57862451337dSDave Chinner 		return -EIO;
578730f712c9SDave Chinner 
5788ecfea3f0SChristoph Hellwig 	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL));
578930f712c9SDave Chinner 
579030f712c9SDave Chinner 	error = xfs_iread_extents(tp, ip, whichfork);
579130f712c9SDave Chinner 	if (error)
579230f712c9SDave Chinner 		return error;
579330f712c9SDave Chinner 
5794ac1e0672SChristoph Hellwig 	if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
5795ddb19e31SBrian Foster 		cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
579692219c29SDave Chinner 		cur->bc_ino.flags = 0;
5797ddb19e31SBrian Foster 	}
5798ddb19e31SBrian Foster 
5799b2b1712aSChristoph Hellwig 	if (!xfs_iext_lookup_extent(ip, ifp, *next_fsb, &icur, &got)) {
5800ecfea3f0SChristoph Hellwig 		*done = true;
5801ecfea3f0SChristoph Hellwig 		goto del_cursor;
5802ecfea3f0SChristoph Hellwig 	}
5803f9e03706SDarrick J. Wong 	if (XFS_IS_CORRUPT(mp, isnullstartblock(got.br_startblock))) {
5804f9e03706SDarrick J. Wong 		error = -EFSCORRUPTED;
5805f9e03706SDarrick J. Wong 		goto del_cursor;
5806f9e03706SDarrick J. Wong 	}
5807ecfea3f0SChristoph Hellwig 
5808bf806280SChristoph Hellwig 	new_startoff = got.br_startoff - offset_shift_fsb;
5809b2b1712aSChristoph Hellwig 	if (xfs_iext_peek_prev_extent(ifp, &icur, &prev)) {
5810bf806280SChristoph Hellwig 		if (new_startoff < prev.br_startoff + prev.br_blockcount) {
5811bf806280SChristoph Hellwig 			error = -EINVAL;
5812bf806280SChristoph Hellwig 			goto del_cursor;
5813bf806280SChristoph Hellwig 		}
5814bf806280SChristoph Hellwig 
5815bf806280SChristoph Hellwig 		if (xfs_bmse_can_merge(&prev, &got, offset_shift_fsb)) {
58160f37d178SBrian Foster 			error = xfs_bmse_merge(tp, ip, whichfork,
58170f37d178SBrian Foster 					offset_shift_fsb, &icur, &got, &prev,
58180f37d178SBrian Foster 					cur, &logflags);
5819ecfea3f0SChristoph Hellwig 			if (error)
5820ecfea3f0SChristoph Hellwig 				goto del_cursor;
5821bf806280SChristoph Hellwig 			goto done;
5822bf806280SChristoph Hellwig 		}
5823bf806280SChristoph Hellwig 	} else {
5824bf806280SChristoph Hellwig 		if (got.br_startoff < offset_shift_fsb) {
5825bf806280SChristoph Hellwig 			error = -EINVAL;
5826bf806280SChristoph Hellwig 			goto del_cursor;
5827bf806280SChristoph Hellwig 		}
5828bf806280SChristoph Hellwig 	}
5829bf806280SChristoph Hellwig 
58300f37d178SBrian Foster 	error = xfs_bmap_shift_update_extent(tp, ip, whichfork, &icur, &got,
58310f37d178SBrian Foster 			cur, &logflags, new_startoff);
5832bf806280SChristoph Hellwig 	if (error)
5833bf806280SChristoph Hellwig 		goto del_cursor;
583440591bdbSChristoph Hellwig 
583542630361SChristoph Hellwig done:
5836b2b1712aSChristoph Hellwig 	if (!xfs_iext_next_extent(ifp, &icur, &got)) {
5837ecfea3f0SChristoph Hellwig 		*done = true;
5838ecfea3f0SChristoph Hellwig 		goto del_cursor;
5839ecfea3f0SChristoph Hellwig 	}
5840ecfea3f0SChristoph Hellwig 
5841ecfea3f0SChristoph Hellwig 	*next_fsb = got.br_startoff;
5842ecfea3f0SChristoph Hellwig del_cursor:
5843ecfea3f0SChristoph Hellwig 	if (cur)
58440b04b6b8SDarrick J. Wong 		xfs_btree_del_cursor(cur, error);
5845ecfea3f0SChristoph Hellwig 	if (logflags)
5846ecfea3f0SChristoph Hellwig 		xfs_trans_log_inode(tp, ip, logflags);
5847ecfea3f0SChristoph Hellwig 	return error;
5848ecfea3f0SChristoph Hellwig }
5849ecfea3f0SChristoph Hellwig 
5850f62cb48eSDarrick J. Wong /* Make sure we won't be right-shifting an extent past the maximum bound. */
5851f62cb48eSDarrick J. Wong int
5852f62cb48eSDarrick J. Wong xfs_bmap_can_insert_extents(
5853f62cb48eSDarrick J. Wong 	struct xfs_inode	*ip,
5854f62cb48eSDarrick J. Wong 	xfs_fileoff_t		off,
5855f62cb48eSDarrick J. Wong 	xfs_fileoff_t		shift)
5856f62cb48eSDarrick J. Wong {
5857f62cb48eSDarrick J. Wong 	struct xfs_bmbt_irec	got;
5858f62cb48eSDarrick J. Wong 	int			is_empty;
5859f62cb48eSDarrick J. Wong 	int			error = 0;
5860f62cb48eSDarrick J. Wong 
5861f62cb48eSDarrick J. Wong 	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
5862f62cb48eSDarrick J. Wong 
586375c8c50fSDave Chinner 	if (xfs_is_shutdown(ip->i_mount))
5864f62cb48eSDarrick J. Wong 		return -EIO;
5865f62cb48eSDarrick J. Wong 
5866f62cb48eSDarrick J. Wong 	xfs_ilock(ip, XFS_ILOCK_EXCL);
5867f62cb48eSDarrick J. Wong 	error = xfs_bmap_last_extent(NULL, ip, XFS_DATA_FORK, &got, &is_empty);
5868f62cb48eSDarrick J. Wong 	if (!error && !is_empty && got.br_startoff >= off &&
5869f62cb48eSDarrick J. Wong 	    ((got.br_startoff + shift) & BMBT_STARTOFF_MASK) < got.br_startoff)
5870f62cb48eSDarrick J. Wong 		error = -EINVAL;
5871f62cb48eSDarrick J. Wong 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
5872f62cb48eSDarrick J. Wong 
5873f62cb48eSDarrick J. Wong 	return error;
5874f62cb48eSDarrick J. Wong }
5875f62cb48eSDarrick J. Wong 
5876ecfea3f0SChristoph Hellwig int
5877ecfea3f0SChristoph Hellwig xfs_bmap_insert_extents(
5878ecfea3f0SChristoph Hellwig 	struct xfs_trans	*tp,
5879ecfea3f0SChristoph Hellwig 	struct xfs_inode	*ip,
5880ecfea3f0SChristoph Hellwig 	xfs_fileoff_t		*next_fsb,
5881ecfea3f0SChristoph Hellwig 	xfs_fileoff_t		offset_shift_fsb,
5882ecfea3f0SChristoph Hellwig 	bool			*done,
5883333f950cSBrian Foster 	xfs_fileoff_t		stop_fsb)
5884ecfea3f0SChristoph Hellwig {
5885ecfea3f0SChristoph Hellwig 	int			whichfork = XFS_DATA_FORK;
5886ecfea3f0SChristoph Hellwig 	struct xfs_mount	*mp = ip->i_mount;
5887732436efSDarrick J. Wong 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
5888ecfea3f0SChristoph Hellwig 	struct xfs_btree_cur	*cur = NULL;
58895936dc54SChristoph Hellwig 	struct xfs_bmbt_irec	got, next;
5890b2b1712aSChristoph Hellwig 	struct xfs_iext_cursor	icur;
5891bf806280SChristoph Hellwig 	xfs_fileoff_t		new_startoff;
5892ecfea3f0SChristoph Hellwig 	int			error = 0;
5893ecfea3f0SChristoph Hellwig 	int			logflags = 0;
5894ecfea3f0SChristoph Hellwig 
5895f7e67b20SChristoph Hellwig 	if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
5896a71895c5SDarrick J. Wong 	    XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
5897ecfea3f0SChristoph Hellwig 		return -EFSCORRUPTED;
5898ecfea3f0SChristoph Hellwig 	}
5899ecfea3f0SChristoph Hellwig 
590075c8c50fSDave Chinner 	if (xfs_is_shutdown(mp))
5901ecfea3f0SChristoph Hellwig 		return -EIO;
5902ecfea3f0SChristoph Hellwig 
5903ecfea3f0SChristoph Hellwig 	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL));
5904ecfea3f0SChristoph Hellwig 
5905ecfea3f0SChristoph Hellwig 	error = xfs_iread_extents(tp, ip, whichfork);
5906ecfea3f0SChristoph Hellwig 	if (error)
5907ecfea3f0SChristoph Hellwig 		return error;
5908ecfea3f0SChristoph Hellwig 
5909ac1e0672SChristoph Hellwig 	if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
5910ecfea3f0SChristoph Hellwig 		cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
591192219c29SDave Chinner 		cur->bc_ino.flags = 0;
5912ecfea3f0SChristoph Hellwig 	}
5913ecfea3f0SChristoph Hellwig 
5914a904b1caSNamjae Jeon 	if (*next_fsb == NULLFSBLOCK) {
5915b2b1712aSChristoph Hellwig 		xfs_iext_last(ifp, &icur);
5916b2b1712aSChristoph Hellwig 		if (!xfs_iext_get_extent(ifp, &icur, &got) ||
59175936dc54SChristoph Hellwig 		    stop_fsb > got.br_startoff) {
5918ecfea3f0SChristoph Hellwig 			*done = true;
5919a904b1caSNamjae Jeon 			goto del_cursor;
5920a904b1caSNamjae Jeon 		}
592105b7c8abSChristoph Hellwig 	} else {
5922b2b1712aSChristoph Hellwig 		if (!xfs_iext_lookup_extent(ip, ifp, *next_fsb, &icur, &got)) {
5923ecfea3f0SChristoph Hellwig 			*done = true;
5924a904b1caSNamjae Jeon 			goto del_cursor;
5925a904b1caSNamjae Jeon 		}
592605b7c8abSChristoph Hellwig 	}
5927f9e03706SDarrick J. Wong 	if (XFS_IS_CORRUPT(mp, isnullstartblock(got.br_startblock))) {
5928f9e03706SDarrick J. Wong 		error = -EFSCORRUPTED;
5929f9e03706SDarrick J. Wong 		goto del_cursor;
5930f9e03706SDarrick J. Wong 	}
5931a904b1caSNamjae Jeon 
5932d0c22041SBrian Foster 	if (XFS_IS_CORRUPT(mp, stop_fsb > got.br_startoff)) {
5933c2414ad6SDarrick J. Wong 		error = -EFSCORRUPTED;
5934a904b1caSNamjae Jeon 		goto del_cursor;
5935a904b1caSNamjae Jeon 	}
5936a904b1caSNamjae Jeon 
5937bf806280SChristoph Hellwig 	new_startoff = got.br_startoff + offset_shift_fsb;
5938b2b1712aSChristoph Hellwig 	if (xfs_iext_peek_next_extent(ifp, &icur, &next)) {
5939bf806280SChristoph Hellwig 		if (new_startoff + got.br_blockcount > next.br_startoff) {
5940bf806280SChristoph Hellwig 			error = -EINVAL;
5941bf806280SChristoph Hellwig 			goto del_cursor;
5942bf806280SChristoph Hellwig 		}
5943bf806280SChristoph Hellwig 
5944bf806280SChristoph Hellwig 		/*
5945bf806280SChristoph Hellwig 		 * Unlike a left shift (which involves a hole punch), a right
5946bf806280SChristoph Hellwig 		 * shift does not modify extent neighbors in any way.  We should
5947bf806280SChristoph Hellwig 		 * never find mergeable extents in this scenario.  Check anyways
5948bf806280SChristoph Hellwig 		 * and warn if we encounter two extents that could be one.
5949bf806280SChristoph Hellwig 		 */
5950bf806280SChristoph Hellwig 		if (xfs_bmse_can_merge(&got, &next, offset_shift_fsb))
5951bf806280SChristoph Hellwig 			WARN_ON_ONCE(1);
5952bf806280SChristoph Hellwig 	}
5953bf806280SChristoph Hellwig 
59540f37d178SBrian Foster 	error = xfs_bmap_shift_update_extent(tp, ip, whichfork, &icur, &got,
59550f37d178SBrian Foster 			cur, &logflags, new_startoff);
5956ddb19e31SBrian Foster 	if (error)
5957ddb19e31SBrian Foster 		goto del_cursor;
59585936dc54SChristoph Hellwig 
5959b2b1712aSChristoph Hellwig 	if (!xfs_iext_prev_extent(ifp, &icur, &got) ||
59605936dc54SChristoph Hellwig 	    stop_fsb >= got.br_startoff + got.br_blockcount) {
5961ecfea3f0SChristoph Hellwig 		*done = true;
59626b18af0dSChristoph Hellwig 		goto del_cursor;
5963a904b1caSNamjae Jeon 	}
596430f712c9SDave Chinner 
59652c845f5aSBrian Foster 	*next_fsb = got.br_startoff;
596630f712c9SDave Chinner del_cursor:
596730f712c9SDave Chinner 	if (cur)
59680b04b6b8SDarrick J. Wong 		xfs_btree_del_cursor(cur, error);
5969ca446d88SBrian Foster 	if (logflags)
597030f712c9SDave Chinner 		xfs_trans_log_inode(tp, ip, logflags);
597130f712c9SDave Chinner 	return error;
597230f712c9SDave Chinner }
5973a904b1caSNamjae Jeon 
5974a904b1caSNamjae Jeon /*
5975b2b1712aSChristoph Hellwig  * Splits an extent into two extents at split_fsb block such that it is the
5976b2b1712aSChristoph Hellwig  * first block of the current_ext. @ext is a target extent to be split.
5977b2b1712aSChristoph Hellwig  * @split_fsb is a block where the extents is split.  If split_fsb lies in a
5978b2b1712aSChristoph Hellwig  * hole or the first block of extents, just return 0.
5979a904b1caSNamjae Jeon  */
5980b73df17eSBrian Foster int
5981b73df17eSBrian Foster xfs_bmap_split_extent(
5982a904b1caSNamjae Jeon 	struct xfs_trans	*tp,
5983a904b1caSNamjae Jeon 	struct xfs_inode	*ip,
59844b77a088SBrian Foster 	xfs_fileoff_t		split_fsb)
5985a904b1caSNamjae Jeon {
5986a904b1caSNamjae Jeon 	int				whichfork = XFS_DATA_FORK;
5987732436efSDarrick J. Wong 	struct xfs_ifork		*ifp = xfs_ifork_ptr(ip, whichfork);
5988a904b1caSNamjae Jeon 	struct xfs_btree_cur		*cur = NULL;
5989a904b1caSNamjae Jeon 	struct xfs_bmbt_irec		got;
5990a904b1caSNamjae Jeon 	struct xfs_bmbt_irec		new; /* split extent */
5991a904b1caSNamjae Jeon 	struct xfs_mount		*mp = ip->i_mount;
5992a904b1caSNamjae Jeon 	xfs_fsblock_t			gotblkcnt; /* new block count for got */
5993b2b1712aSChristoph Hellwig 	struct xfs_iext_cursor		icur;
5994a904b1caSNamjae Jeon 	int				error = 0;
5995a904b1caSNamjae Jeon 	int				logflags = 0;
5996a904b1caSNamjae Jeon 	int				i = 0;
5997a904b1caSNamjae Jeon 
5998f7e67b20SChristoph Hellwig 	if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
5999a71895c5SDarrick J. Wong 	    XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
6000a904b1caSNamjae Jeon 		return -EFSCORRUPTED;
6001a904b1caSNamjae Jeon 	}
6002a904b1caSNamjae Jeon 
600375c8c50fSDave Chinner 	if (xfs_is_shutdown(mp))
6004a904b1caSNamjae Jeon 		return -EIO;
6005a904b1caSNamjae Jeon 
6006a904b1caSNamjae Jeon 	/* Read in all the extents */
6007a904b1caSNamjae Jeon 	error = xfs_iread_extents(tp, ip, whichfork);
6008a904b1caSNamjae Jeon 	if (error)
6009a904b1caSNamjae Jeon 		return error;
6010a904b1caSNamjae Jeon 
6011a904b1caSNamjae Jeon 	/*
60124c35445bSChristoph Hellwig 	 * If there are not extents, or split_fsb lies in a hole we are done.
6013a904b1caSNamjae Jeon 	 */
6014b2b1712aSChristoph Hellwig 	if (!xfs_iext_lookup_extent(ip, ifp, split_fsb, &icur, &got) ||
60154c35445bSChristoph Hellwig 	    got.br_startoff >= split_fsb)
6016a904b1caSNamjae Jeon 		return 0;
6017a904b1caSNamjae Jeon 
6018a904b1caSNamjae Jeon 	gotblkcnt = split_fsb - got.br_startoff;
6019a904b1caSNamjae Jeon 	new.br_startoff = split_fsb;
6020a904b1caSNamjae Jeon 	new.br_startblock = got.br_startblock + gotblkcnt;
6021a904b1caSNamjae Jeon 	new.br_blockcount = got.br_blockcount - gotblkcnt;
6022a904b1caSNamjae Jeon 	new.br_state = got.br_state;
6023a904b1caSNamjae Jeon 
6024ac1e0672SChristoph Hellwig 	if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
6025a904b1caSNamjae Jeon 		cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
602692219c29SDave Chinner 		cur->bc_ino.flags = 0;
6027e16cf9b0SChristoph Hellwig 		error = xfs_bmbt_lookup_eq(cur, &got, &i);
6028a904b1caSNamjae Jeon 		if (error)
6029a904b1caSNamjae Jeon 			goto del_cursor;
6030f9e03706SDarrick J. Wong 		if (XFS_IS_CORRUPT(mp, i != 1)) {
6031f9e03706SDarrick J. Wong 			error = -EFSCORRUPTED;
6032f9e03706SDarrick J. Wong 			goto del_cursor;
6033f9e03706SDarrick J. Wong 		}
6034a904b1caSNamjae Jeon 	}
6035a904b1caSNamjae Jeon 
6036a904b1caSNamjae Jeon 	got.br_blockcount = gotblkcnt;
6037b2b1712aSChristoph Hellwig 	xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), &icur,
6038b2b1712aSChristoph Hellwig 			&got);
6039a904b1caSNamjae Jeon 
6040a904b1caSNamjae Jeon 	logflags = XFS_ILOG_CORE;
6041a904b1caSNamjae Jeon 	if (cur) {
6042a67d00a5SChristoph Hellwig 		error = xfs_bmbt_update(cur, &got);
6043a904b1caSNamjae Jeon 		if (error)
6044a904b1caSNamjae Jeon 			goto del_cursor;
6045a904b1caSNamjae Jeon 	} else
6046a904b1caSNamjae Jeon 		logflags |= XFS_ILOG_DEXT;
6047a904b1caSNamjae Jeon 
6048a904b1caSNamjae Jeon 	/* Add new extent */
6049b2b1712aSChristoph Hellwig 	xfs_iext_next(ifp, &icur);
60500254c2f2SChristoph Hellwig 	xfs_iext_insert(ip, &icur, &new, 0);
6051daf83964SChristoph Hellwig 	ifp->if_nextents++;
6052a904b1caSNamjae Jeon 
6053a904b1caSNamjae Jeon 	if (cur) {
6054e16cf9b0SChristoph Hellwig 		error = xfs_bmbt_lookup_eq(cur, &new, &i);
6055a904b1caSNamjae Jeon 		if (error)
6056a904b1caSNamjae Jeon 			goto del_cursor;
6057f9e03706SDarrick J. Wong 		if (XFS_IS_CORRUPT(mp, i != 0)) {
6058f9e03706SDarrick J. Wong 			error = -EFSCORRUPTED;
6059f9e03706SDarrick J. Wong 			goto del_cursor;
6060f9e03706SDarrick J. Wong 		}
6061a904b1caSNamjae Jeon 		error = xfs_btree_insert(cur, &i);
6062a904b1caSNamjae Jeon 		if (error)
6063a904b1caSNamjae Jeon 			goto del_cursor;
6064f9e03706SDarrick J. Wong 		if (XFS_IS_CORRUPT(mp, i != 1)) {
6065f9e03706SDarrick J. Wong 			error = -EFSCORRUPTED;
6066f9e03706SDarrick J. Wong 			goto del_cursor;
6067f9e03706SDarrick J. Wong 		}
6068a904b1caSNamjae Jeon 	}
6069a904b1caSNamjae Jeon 
6070a904b1caSNamjae Jeon 	/*
6071a904b1caSNamjae Jeon 	 * Convert to a btree if necessary.
6072a904b1caSNamjae Jeon 	 */
6073a904b1caSNamjae Jeon 	if (xfs_bmap_needs_btree(ip, whichfork)) {
6074a904b1caSNamjae Jeon 		int tmp_logflags; /* partial log flag return val */
6075a904b1caSNamjae Jeon 
6076a904b1caSNamjae Jeon 		ASSERT(cur == NULL);
6077280253d2SBrian Foster 		error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0,
6078280253d2SBrian Foster 				&tmp_logflags, whichfork);
6079a904b1caSNamjae Jeon 		logflags |= tmp_logflags;
6080a904b1caSNamjae Jeon 	}
6081a904b1caSNamjae Jeon 
6082a904b1caSNamjae Jeon del_cursor:
6083a904b1caSNamjae Jeon 	if (cur) {
608492219c29SDave Chinner 		cur->bc_ino.allocated = 0;
60850b04b6b8SDarrick J. Wong 		xfs_btree_del_cursor(cur, error);
6086a904b1caSNamjae Jeon 	}
6087a904b1caSNamjae Jeon 
6088a904b1caSNamjae Jeon 	if (logflags)
6089a904b1caSNamjae Jeon 		xfs_trans_log_inode(tp, ip, logflags);
6090a904b1caSNamjae Jeon 	return error;
6091a904b1caSNamjae Jeon }
6092a904b1caSNamjae Jeon 
60939f3afb57SDarrick J. Wong /* Deferred mapping is only for real extents in the data fork. */
60949f3afb57SDarrick J. Wong static bool
60959f3afb57SDarrick J. Wong xfs_bmap_is_update_needed(
60969f3afb57SDarrick J. Wong 	struct xfs_bmbt_irec	*bmap)
60979f3afb57SDarrick J. Wong {
60989f3afb57SDarrick J. Wong 	return  bmap->br_startblock != HOLESTARTBLOCK &&
60999f3afb57SDarrick J. Wong 		bmap->br_startblock != DELAYSTARTBLOCK;
61009f3afb57SDarrick J. Wong }
61019f3afb57SDarrick J. Wong 
61029f3afb57SDarrick J. Wong /* Record a bmap intent. */
61039f3afb57SDarrick J. Wong static int
61049f3afb57SDarrick J. Wong __xfs_bmap_add(
61050f37d178SBrian Foster 	struct xfs_trans		*tp,
61069f3afb57SDarrick J. Wong 	enum xfs_bmap_intent_type	type,
61079f3afb57SDarrick J. Wong 	struct xfs_inode		*ip,
61089f3afb57SDarrick J. Wong 	int				whichfork,
61099f3afb57SDarrick J. Wong 	struct xfs_bmbt_irec		*bmap)
61109f3afb57SDarrick J. Wong {
61119f3afb57SDarrick J. Wong 	struct xfs_bmap_intent		*bi;
61129f3afb57SDarrick J. Wong 
61130f37d178SBrian Foster 	trace_xfs_bmap_defer(tp->t_mountp,
61140f37d178SBrian Foster 			XFS_FSB_TO_AGNO(tp->t_mountp, bmap->br_startblock),
61159f3afb57SDarrick J. Wong 			type,
61160f37d178SBrian Foster 			XFS_FSB_TO_AGBNO(tp->t_mountp, bmap->br_startblock),
61179f3afb57SDarrick J. Wong 			ip->i_ino, whichfork,
61189f3afb57SDarrick J. Wong 			bmap->br_startoff,
61199f3afb57SDarrick J. Wong 			bmap->br_blockcount,
61209f3afb57SDarrick J. Wong 			bmap->br_state);
61219f3afb57SDarrick J. Wong 
6122f3c799c2SDarrick J. Wong 	bi = kmem_cache_alloc(xfs_bmap_intent_cache, GFP_NOFS | __GFP_NOFAIL);
61239f3afb57SDarrick J. Wong 	INIT_LIST_HEAD(&bi->bi_list);
61249f3afb57SDarrick J. Wong 	bi->bi_type = type;
61259f3afb57SDarrick J. Wong 	bi->bi_owner = ip;
61269f3afb57SDarrick J. Wong 	bi->bi_whichfork = whichfork;
61279f3afb57SDarrick J. Wong 	bi->bi_bmap = *bmap;
61289f3afb57SDarrick J. Wong 
61290f37d178SBrian Foster 	xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_BMAP, &bi->bi_list);
61309f3afb57SDarrick J. Wong 	return 0;
61319f3afb57SDarrick J. Wong }
61329f3afb57SDarrick J. Wong 
61339f3afb57SDarrick J. Wong /* Map an extent into a file. */
61343e08f42aSDarrick J. Wong void
61359f3afb57SDarrick J. Wong xfs_bmap_map_extent(
61360f37d178SBrian Foster 	struct xfs_trans	*tp,
61379f3afb57SDarrick J. Wong 	struct xfs_inode	*ip,
61389f3afb57SDarrick J. Wong 	struct xfs_bmbt_irec	*PREV)
61399f3afb57SDarrick J. Wong {
61409f3afb57SDarrick J. Wong 	if (!xfs_bmap_is_update_needed(PREV))
61413e08f42aSDarrick J. Wong 		return;
61429f3afb57SDarrick J. Wong 
61433e08f42aSDarrick J. Wong 	__xfs_bmap_add(tp, XFS_BMAP_MAP, ip, XFS_DATA_FORK, PREV);
61449f3afb57SDarrick J. Wong }
61459f3afb57SDarrick J. Wong 
61469f3afb57SDarrick J. Wong /* Unmap an extent out of a file. */
61473e08f42aSDarrick J. Wong void
61489f3afb57SDarrick J. Wong xfs_bmap_unmap_extent(
61490f37d178SBrian Foster 	struct xfs_trans	*tp,
61509f3afb57SDarrick J. Wong 	struct xfs_inode	*ip,
61519f3afb57SDarrick J. Wong 	struct xfs_bmbt_irec	*PREV)
61529f3afb57SDarrick J. Wong {
61539f3afb57SDarrick J. Wong 	if (!xfs_bmap_is_update_needed(PREV))
61543e08f42aSDarrick J. Wong 		return;
61559f3afb57SDarrick J. Wong 
61563e08f42aSDarrick J. Wong 	__xfs_bmap_add(tp, XFS_BMAP_UNMAP, ip, XFS_DATA_FORK, PREV);
61579f3afb57SDarrick J. Wong }
61589f3afb57SDarrick J. Wong 
61599f3afb57SDarrick J. Wong /*
61609f3afb57SDarrick J. Wong  * Process one of the deferred bmap operations.  We pass back the
61619f3afb57SDarrick J. Wong  * btree cursor to maintain our lock on the bmapbt between calls.
61629f3afb57SDarrick J. Wong  */
61639f3afb57SDarrick J. Wong int
61649f3afb57SDarrick J. Wong xfs_bmap_finish_one(
61659f3afb57SDarrick J. Wong 	struct xfs_trans		*tp,
6166ddccb81bSDarrick J. Wong 	struct xfs_bmap_intent		*bi)
61679f3afb57SDarrick J. Wong {
6168ddccb81bSDarrick J. Wong 	struct xfs_bmbt_irec		*bmap = &bi->bi_bmap;
6169e1a4e37cSDarrick J. Wong 	int				error = 0;
61709f3afb57SDarrick J. Wong 
6171692b6cddSDave Chinner 	ASSERT(tp->t_highest_agno == NULLAGNUMBER);
61724c1a67bdSDarrick J. Wong 
61739f3afb57SDarrick J. Wong 	trace_xfs_bmap_deferred(tp->t_mountp,
6174ddccb81bSDarrick J. Wong 			XFS_FSB_TO_AGNO(tp->t_mountp, bmap->br_startblock),
6175ddccb81bSDarrick J. Wong 			bi->bi_type,
6176ddccb81bSDarrick J. Wong 			XFS_FSB_TO_AGBNO(tp->t_mountp, bmap->br_startblock),
6177ddccb81bSDarrick J. Wong 			bi->bi_owner->i_ino, bi->bi_whichfork,
6178ddccb81bSDarrick J. Wong 			bmap->br_startoff, bmap->br_blockcount,
6179ddccb81bSDarrick J. Wong 			bmap->br_state);
61809f3afb57SDarrick J. Wong 
6181ddccb81bSDarrick J. Wong 	if (WARN_ON_ONCE(bi->bi_whichfork != XFS_DATA_FORK))
61829f3afb57SDarrick J. Wong 		return -EFSCORRUPTED;
61839f3afb57SDarrick J. Wong 
61849f3afb57SDarrick J. Wong 	if (XFS_TEST_ERROR(false, tp->t_mountp,
61859e24cfd0SDarrick J. Wong 			XFS_ERRTAG_BMAP_FINISH_ONE))
61869f3afb57SDarrick J. Wong 		return -EIO;
61879f3afb57SDarrick J. Wong 
6188ddccb81bSDarrick J. Wong 	switch (bi->bi_type) {
61899f3afb57SDarrick J. Wong 	case XFS_BMAP_MAP:
6190ddccb81bSDarrick J. Wong 		error = xfs_bmapi_remap(tp, bi->bi_owner, bmap->br_startoff,
6191ddccb81bSDarrick J. Wong 				bmap->br_blockcount, bmap->br_startblock, 0);
6192ddccb81bSDarrick J. Wong 		bmap->br_blockcount = 0;
61939f3afb57SDarrick J. Wong 		break;
61949f3afb57SDarrick J. Wong 	case XFS_BMAP_UNMAP:
6195ddccb81bSDarrick J. Wong 		error = __xfs_bunmapi(tp, bi->bi_owner, bmap->br_startoff,
6196ddccb81bSDarrick J. Wong 				&bmap->br_blockcount, XFS_BMAPI_REMAP, 1);
61979f3afb57SDarrick J. Wong 		break;
61989f3afb57SDarrick J. Wong 	default:
61999f3afb57SDarrick J. Wong 		ASSERT(0);
62009f3afb57SDarrick J. Wong 		error = -EFSCORRUPTED;
62019f3afb57SDarrick J. Wong 	}
62029f3afb57SDarrick J. Wong 
62039f3afb57SDarrick J. Wong 	return error;
62049f3afb57SDarrick J. Wong }
620530b0984dSDarrick J. Wong 
620630b0984dSDarrick J. Wong /* Check that an inode's extent does not have invalid flags or bad ranges. */
620730b0984dSDarrick J. Wong xfs_failaddr_t
620830b0984dSDarrick J. Wong xfs_bmap_validate_extent(
620930b0984dSDarrick J. Wong 	struct xfs_inode	*ip,
621030b0984dSDarrick J. Wong 	int			whichfork,
621130b0984dSDarrick J. Wong 	struct xfs_bmbt_irec	*irec)
621230b0984dSDarrick J. Wong {
621330b0984dSDarrick J. Wong 	struct xfs_mount	*mp = ip->i_mount;
621430b0984dSDarrick J. Wong 
621533005fd0SDarrick J. Wong 	if (!xfs_verify_fileext(mp, irec->br_startoff, irec->br_blockcount))
6216acf104c2SDarrick J. Wong 		return __this_address;
6217acf104c2SDarrick J. Wong 
621818695ad4SDarrick J. Wong 	if (XFS_IS_REALTIME_INODE(ip) && whichfork == XFS_DATA_FORK) {
621918695ad4SDarrick J. Wong 		if (!xfs_verify_rtext(mp, irec->br_startblock,
622018695ad4SDarrick J. Wong 					  irec->br_blockcount))
622130b0984dSDarrick J. Wong 			return __this_address;
622230b0984dSDarrick J. Wong 	} else {
622367457eb0SDarrick J. Wong 		if (!xfs_verify_fsbext(mp, irec->br_startblock,
622467457eb0SDarrick J. Wong 					   irec->br_blockcount))
622530b0984dSDarrick J. Wong 			return __this_address;
622630b0984dSDarrick J. Wong 	}
6227daa79baeSChristoph Hellwig 	if (irec->br_state != XFS_EXT_NORM && whichfork != XFS_DATA_FORK)
622830b0984dSDarrick J. Wong 		return __this_address;
622930b0984dSDarrick J. Wong 	return NULL;
623030b0984dSDarrick J. Wong }
6231f3c799c2SDarrick J. Wong 
6232f3c799c2SDarrick J. Wong int __init
6233f3c799c2SDarrick J. Wong xfs_bmap_intent_init_cache(void)
6234f3c799c2SDarrick J. Wong {
6235f3c799c2SDarrick J. Wong 	xfs_bmap_intent_cache = kmem_cache_create("xfs_bmap_intent",
6236f3c799c2SDarrick J. Wong 			sizeof(struct xfs_bmap_intent),
6237f3c799c2SDarrick J. Wong 			0, 0, NULL);
6238f3c799c2SDarrick J. Wong 
6239f3c799c2SDarrick J. Wong 	return xfs_bmap_intent_cache != NULL ? 0 : -ENOMEM;
6240f3c799c2SDarrick J. Wong }
6241f3c799c2SDarrick J. Wong 
6242f3c799c2SDarrick J. Wong void
6243f3c799c2SDarrick J. Wong xfs_bmap_intent_destroy_cache(void)
6244f3c799c2SDarrick J. Wong {
6245f3c799c2SDarrick J. Wong 	kmem_cache_destroy(xfs_bmap_intent_cache);
6246f3c799c2SDarrick J. Wong 	xfs_bmap_intent_cache = NULL;
6247f3c799c2SDarrick J. Wong }
6248