xref: /openbmc/linux/fs/xfs/libxfs/xfs_bmap.c (revision 51ad5b54)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4  * All Rights Reserved.
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_bit.h"
13 #include "xfs_sb.h"
14 #include "xfs_mount.h"
15 #include "xfs_defer.h"
16 #include "xfs_dir2.h"
17 #include "xfs_inode.h"
18 #include "xfs_btree.h"
19 #include "xfs_trans.h"
20 #include "xfs_alloc.h"
21 #include "xfs_bmap.h"
22 #include "xfs_bmap_util.h"
23 #include "xfs_bmap_btree.h"
24 #include "xfs_rtalloc.h"
25 #include "xfs_errortag.h"
26 #include "xfs_error.h"
27 #include "xfs_quota.h"
28 #include "xfs_trans_space.h"
29 #include "xfs_buf_item.h"
30 #include "xfs_trace.h"
31 #include "xfs_attr_leaf.h"
32 #include "xfs_filestream.h"
33 #include "xfs_rmap.h"
34 #include "xfs_ag_resv.h"
35 #include "xfs_refcount.h"
36 #include "xfs_icache.h"
37 #include "xfs_iomap.h"
38 
39 
40 kmem_zone_t		*xfs_bmap_free_item_zone;
41 
42 /*
43  * Miscellaneous helper functions
44  */
45 
46 /*
47  * Compute and fill in the value of the maximum depth of a bmap btree
48  * in this filesystem.  Done once, during mount.
49  */
50 void
51 xfs_bmap_compute_maxlevels(
52 	xfs_mount_t	*mp,		/* file system mount structure */
53 	int		whichfork)	/* data or attr fork */
54 {
55 	int		level;		/* btree level */
56 	uint		maxblocks;	/* max blocks at this level */
57 	uint		maxleafents;	/* max leaf entries possible */
58 	int		maxrootrecs;	/* max records in root block */
59 	int		minleafrecs;	/* min records in leaf block */
60 	int		minnoderecs;	/* min records in node block */
61 	int		sz;		/* root block size */
62 
63 	/*
64 	 * The maximum number of extents in a file, hence the maximum number of
65 	 * leaf entries, is controlled by the size of the on-disk extent count,
66 	 * either a signed 32-bit number for the data fork, or a signed 16-bit
67 	 * number for the attr fork.
68 	 *
69 	 * Note that we can no longer assume that if we are in ATTR1 that
70 	 * the fork offset of all the inodes will be
71 	 * (xfs_default_attroffset(ip) >> 3) because we could have mounted
72 	 * with ATTR2 and then mounted back with ATTR1, keeping the
73 	 * di_forkoff's fixed but probably at various positions. Therefore,
74 	 * for both ATTR1 and ATTR2 we have to assume the worst case scenario
75 	 * of a minimum size available.
76 	 */
77 	if (whichfork == XFS_DATA_FORK) {
78 		maxleafents = MAXEXTNUM;
79 		sz = XFS_BMDR_SPACE_CALC(MINDBTPTRS);
80 	} else {
81 		maxleafents = MAXAEXTNUM;
82 		sz = XFS_BMDR_SPACE_CALC(MINABTPTRS);
83 	}
84 	maxrootrecs = xfs_bmdr_maxrecs(sz, 0);
85 	minleafrecs = mp->m_bmap_dmnr[0];
86 	minnoderecs = mp->m_bmap_dmnr[1];
87 	maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs;
88 	for (level = 1; maxblocks > 1; level++) {
89 		if (maxblocks <= maxrootrecs)
90 			maxblocks = 1;
91 		else
92 			maxblocks = (maxblocks + minnoderecs - 1) / minnoderecs;
93 	}
94 	mp->m_bm_maxlevels[whichfork] = level;
95 }
96 
97 STATIC int				/* error */
98 xfs_bmbt_lookup_eq(
99 	struct xfs_btree_cur	*cur,
100 	struct xfs_bmbt_irec	*irec,
101 	int			*stat)	/* success/failure */
102 {
103 	cur->bc_rec.b = *irec;
104 	return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
105 }
106 
107 STATIC int				/* error */
108 xfs_bmbt_lookup_first(
109 	struct xfs_btree_cur	*cur,
110 	int			*stat)	/* success/failure */
111 {
112 	cur->bc_rec.b.br_startoff = 0;
113 	cur->bc_rec.b.br_startblock = 0;
114 	cur->bc_rec.b.br_blockcount = 0;
115 	return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
116 }
117 
118 /*
119  * Check if the inode needs to be converted to btree format.
120  */
121 static inline bool xfs_bmap_needs_btree(struct xfs_inode *ip, int whichfork)
122 {
123 	struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
124 
125 	return whichfork != XFS_COW_FORK &&
126 		ifp->if_format == XFS_DINODE_FMT_EXTENTS &&
127 		ifp->if_nextents > XFS_IFORK_MAXEXT(ip, whichfork);
128 }
129 
130 /*
131  * Check if the inode should be converted to extent format.
132  */
133 static inline bool xfs_bmap_wants_extents(struct xfs_inode *ip, int whichfork)
134 {
135 	struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
136 
137 	return whichfork != XFS_COW_FORK &&
138 		ifp->if_format == XFS_DINODE_FMT_BTREE &&
139 		ifp->if_nextents <= XFS_IFORK_MAXEXT(ip, whichfork);
140 }
141 
142 /*
143  * Update the record referred to by cur to the value given by irec
144  * This either works (return 0) or gets an EFSCORRUPTED error.
145  */
146 STATIC int
147 xfs_bmbt_update(
148 	struct xfs_btree_cur	*cur,
149 	struct xfs_bmbt_irec	*irec)
150 {
151 	union xfs_btree_rec	rec;
152 
153 	xfs_bmbt_disk_set_all(&rec.bmbt, irec);
154 	return xfs_btree_update(cur, &rec);
155 }
156 
157 /*
158  * Compute the worst-case number of indirect blocks that will be used
159  * for ip's delayed extent of length "len".
160  */
161 STATIC xfs_filblks_t
162 xfs_bmap_worst_indlen(
163 	xfs_inode_t	*ip,		/* incore inode pointer */
164 	xfs_filblks_t	len)		/* delayed extent length */
165 {
166 	int		level;		/* btree level number */
167 	int		maxrecs;	/* maximum record count at this level */
168 	xfs_mount_t	*mp;		/* mount structure */
169 	xfs_filblks_t	rval;		/* return value */
170 
171 	mp = ip->i_mount;
172 	maxrecs = mp->m_bmap_dmxr[0];
173 	for (level = 0, rval = 0;
174 	     level < XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK);
175 	     level++) {
176 		len += maxrecs - 1;
177 		do_div(len, maxrecs);
178 		rval += len;
179 		if (len == 1)
180 			return rval + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) -
181 				level - 1;
182 		if (level == 0)
183 			maxrecs = mp->m_bmap_dmxr[1];
184 	}
185 	return rval;
186 }
187 
188 /*
189  * Calculate the default attribute fork offset for newly created inodes.
190  */
191 uint
192 xfs_default_attroffset(
193 	struct xfs_inode	*ip)
194 {
195 	struct xfs_mount	*mp = ip->i_mount;
196 	uint			offset;
197 
198 	if (mp->m_sb.sb_inodesize == 256)
199 		offset = XFS_LITINO(mp) - XFS_BMDR_SPACE_CALC(MINABTPTRS);
200 	else
201 		offset = XFS_BMDR_SPACE_CALC(6 * MINABTPTRS);
202 
203 	ASSERT(offset < XFS_LITINO(mp));
204 	return offset;
205 }
206 
207 /*
208  * Helper routine to reset inode di_forkoff field when switching
209  * attribute fork from local to extent format - we reset it where
210  * possible to make space available for inline data fork extents.
211  */
212 STATIC void
213 xfs_bmap_forkoff_reset(
214 	xfs_inode_t	*ip,
215 	int		whichfork)
216 {
217 	if (whichfork == XFS_ATTR_FORK &&
218 	    ip->i_df.if_format != XFS_DINODE_FMT_DEV &&
219 	    ip->i_df.if_format != XFS_DINODE_FMT_BTREE) {
220 		uint	dfl_forkoff = xfs_default_attroffset(ip) >> 3;
221 
222 		if (dfl_forkoff > ip->i_d.di_forkoff)
223 			ip->i_d.di_forkoff = dfl_forkoff;
224 	}
225 }
226 
227 #ifdef DEBUG
228 STATIC struct xfs_buf *
229 xfs_bmap_get_bp(
230 	struct xfs_btree_cur	*cur,
231 	xfs_fsblock_t		bno)
232 {
233 	struct xfs_log_item	*lip;
234 	int			i;
235 
236 	if (!cur)
237 		return NULL;
238 
239 	for (i = 0; i < XFS_BTREE_MAXLEVELS; i++) {
240 		if (!cur->bc_bufs[i])
241 			break;
242 		if (XFS_BUF_ADDR(cur->bc_bufs[i]) == bno)
243 			return cur->bc_bufs[i];
244 	}
245 
246 	/* Chase down all the log items to see if the bp is there */
247 	list_for_each_entry(lip, &cur->bc_tp->t_items, li_trans) {
248 		struct xfs_buf_log_item	*bip = (struct xfs_buf_log_item *)lip;
249 
250 		if (bip->bli_item.li_type == XFS_LI_BUF &&
251 		    XFS_BUF_ADDR(bip->bli_buf) == bno)
252 			return bip->bli_buf;
253 	}
254 
255 	return NULL;
256 }
257 
258 STATIC void
259 xfs_check_block(
260 	struct xfs_btree_block	*block,
261 	xfs_mount_t		*mp,
262 	int			root,
263 	short			sz)
264 {
265 	int			i, j, dmxr;
266 	__be64			*pp, *thispa;	/* pointer to block address */
267 	xfs_bmbt_key_t		*prevp, *keyp;
268 
269 	ASSERT(be16_to_cpu(block->bb_level) > 0);
270 
271 	prevp = NULL;
272 	for( i = 1; i <= xfs_btree_get_numrecs(block); i++) {
273 		dmxr = mp->m_bmap_dmxr[0];
274 		keyp = XFS_BMBT_KEY_ADDR(mp, block, i);
275 
276 		if (prevp) {
277 			ASSERT(be64_to_cpu(prevp->br_startoff) <
278 			       be64_to_cpu(keyp->br_startoff));
279 		}
280 		prevp = keyp;
281 
282 		/*
283 		 * Compare the block numbers to see if there are dups.
284 		 */
285 		if (root)
286 			pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, i, sz);
287 		else
288 			pp = XFS_BMBT_PTR_ADDR(mp, block, i, dmxr);
289 
290 		for (j = i+1; j <= be16_to_cpu(block->bb_numrecs); j++) {
291 			if (root)
292 				thispa = XFS_BMAP_BROOT_PTR_ADDR(mp, block, j, sz);
293 			else
294 				thispa = XFS_BMBT_PTR_ADDR(mp, block, j, dmxr);
295 			if (*thispa == *pp) {
296 				xfs_warn(mp, "%s: thispa(%d) == pp(%d) %Ld",
297 					__func__, j, i,
298 					(unsigned long long)be64_to_cpu(*thispa));
299 				xfs_err(mp, "%s: ptrs are equal in node\n",
300 					__func__);
301 				xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
302 			}
303 		}
304 	}
305 }
306 
307 /*
308  * Check that the extents for the inode ip are in the right order in all
309  * btree leaves. THis becomes prohibitively expensive for large extent count
310  * files, so don't bother with inodes that have more than 10,000 extents in
311  * them. The btree record ordering checks will still be done, so for such large
312  * bmapbt constructs that is going to catch most corruptions.
313  */
314 STATIC void
315 xfs_bmap_check_leaf_extents(
316 	xfs_btree_cur_t		*cur,	/* btree cursor or null */
317 	xfs_inode_t		*ip,		/* incore inode pointer */
318 	int			whichfork)	/* data or attr fork */
319 {
320 	struct xfs_mount	*mp = ip->i_mount;
321 	struct xfs_ifork	*ifp = XFS_IFORK_PTR(ip, whichfork);
322 	struct xfs_btree_block	*block;	/* current btree block */
323 	xfs_fsblock_t		bno;	/* block # of "block" */
324 	xfs_buf_t		*bp;	/* buffer for "block" */
325 	int			error;	/* error return value */
326 	xfs_extnum_t		i=0, j;	/* index into the extents list */
327 	int			level;	/* btree level, for checking */
328 	__be64			*pp;	/* pointer to block address */
329 	xfs_bmbt_rec_t		*ep;	/* pointer to current extent */
330 	xfs_bmbt_rec_t		last = {0, 0}; /* last extent in prev block */
331 	xfs_bmbt_rec_t		*nextp;	/* pointer to next extent */
332 	int			bp_release = 0;
333 
334 	if (ifp->if_format != XFS_DINODE_FMT_BTREE)
335 		return;
336 
337 	/* skip large extent count inodes */
338 	if (ip->i_df.if_nextents > 10000)
339 		return;
340 
341 	bno = NULLFSBLOCK;
342 	block = ifp->if_broot;
343 	/*
344 	 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
345 	 */
346 	level = be16_to_cpu(block->bb_level);
347 	ASSERT(level > 0);
348 	xfs_check_block(block, mp, 1, ifp->if_broot_bytes);
349 	pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
350 	bno = be64_to_cpu(*pp);
351 
352 	ASSERT(bno != NULLFSBLOCK);
353 	ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
354 	ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
355 
356 	/*
357 	 * Go down the tree until leaf level is reached, following the first
358 	 * pointer (leftmost) at each level.
359 	 */
360 	while (level-- > 0) {
361 		/* See if buf is in cur first */
362 		bp_release = 0;
363 		bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
364 		if (!bp) {
365 			bp_release = 1;
366 			error = xfs_btree_read_bufl(mp, NULL, bno, &bp,
367 						XFS_BMAP_BTREE_REF,
368 						&xfs_bmbt_buf_ops);
369 			if (error)
370 				goto error_norelse;
371 		}
372 		block = XFS_BUF_TO_BLOCK(bp);
373 		if (level == 0)
374 			break;
375 
376 		/*
377 		 * Check this block for basic sanity (increasing keys and
378 		 * no duplicate blocks).
379 		 */
380 
381 		xfs_check_block(block, mp, 0, 0);
382 		pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
383 		bno = be64_to_cpu(*pp);
384 		if (XFS_IS_CORRUPT(mp, !xfs_verify_fsbno(mp, bno))) {
385 			error = -EFSCORRUPTED;
386 			goto error0;
387 		}
388 		if (bp_release) {
389 			bp_release = 0;
390 			xfs_trans_brelse(NULL, bp);
391 		}
392 	}
393 
394 	/*
395 	 * Here with bp and block set to the leftmost leaf node in the tree.
396 	 */
397 	i = 0;
398 
399 	/*
400 	 * Loop over all leaf nodes checking that all extents are in the right order.
401 	 */
402 	for (;;) {
403 		xfs_fsblock_t	nextbno;
404 		xfs_extnum_t	num_recs;
405 
406 
407 		num_recs = xfs_btree_get_numrecs(block);
408 
409 		/*
410 		 * Read-ahead the next leaf block, if any.
411 		 */
412 
413 		nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
414 
415 		/*
416 		 * Check all the extents to make sure they are OK.
417 		 * If we had a previous block, the last entry should
418 		 * conform with the first entry in this one.
419 		 */
420 
421 		ep = XFS_BMBT_REC_ADDR(mp, block, 1);
422 		if (i) {
423 			ASSERT(xfs_bmbt_disk_get_startoff(&last) +
424 			       xfs_bmbt_disk_get_blockcount(&last) <=
425 			       xfs_bmbt_disk_get_startoff(ep));
426 		}
427 		for (j = 1; j < num_recs; j++) {
428 			nextp = XFS_BMBT_REC_ADDR(mp, block, j + 1);
429 			ASSERT(xfs_bmbt_disk_get_startoff(ep) +
430 			       xfs_bmbt_disk_get_blockcount(ep) <=
431 			       xfs_bmbt_disk_get_startoff(nextp));
432 			ep = nextp;
433 		}
434 
435 		last = *ep;
436 		i += num_recs;
437 		if (bp_release) {
438 			bp_release = 0;
439 			xfs_trans_brelse(NULL, bp);
440 		}
441 		bno = nextbno;
442 		/*
443 		 * If we've reached the end, stop.
444 		 */
445 		if (bno == NULLFSBLOCK)
446 			break;
447 
448 		bp_release = 0;
449 		bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
450 		if (!bp) {
451 			bp_release = 1;
452 			error = xfs_btree_read_bufl(mp, NULL, bno, &bp,
453 						XFS_BMAP_BTREE_REF,
454 						&xfs_bmbt_buf_ops);
455 			if (error)
456 				goto error_norelse;
457 		}
458 		block = XFS_BUF_TO_BLOCK(bp);
459 	}
460 
461 	return;
462 
463 error0:
464 	xfs_warn(mp, "%s: at error0", __func__);
465 	if (bp_release)
466 		xfs_trans_brelse(NULL, bp);
467 error_norelse:
468 	xfs_warn(mp, "%s: BAD after btree leaves for %d extents",
469 		__func__, i);
470 	xfs_err(mp, "%s: CORRUPTED BTREE OR SOMETHING", __func__);
471 	xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
472 	return;
473 }
474 
475 /*
476  * Validate that the bmbt_irecs being returned from bmapi are valid
477  * given the caller's original parameters.  Specifically check the
478  * ranges of the returned irecs to ensure that they only extend beyond
479  * the given parameters if the XFS_BMAPI_ENTIRE flag was set.
480  */
481 STATIC void
482 xfs_bmap_validate_ret(
483 	xfs_fileoff_t		bno,
484 	xfs_filblks_t		len,
485 	int			flags,
486 	xfs_bmbt_irec_t		*mval,
487 	int			nmap,
488 	int			ret_nmap)
489 {
490 	int			i;		/* index to map values */
491 
492 	ASSERT(ret_nmap <= nmap);
493 
494 	for (i = 0; i < ret_nmap; i++) {
495 		ASSERT(mval[i].br_blockcount > 0);
496 		if (!(flags & XFS_BMAPI_ENTIRE)) {
497 			ASSERT(mval[i].br_startoff >= bno);
498 			ASSERT(mval[i].br_blockcount <= len);
499 			ASSERT(mval[i].br_startoff + mval[i].br_blockcount <=
500 			       bno + len);
501 		} else {
502 			ASSERT(mval[i].br_startoff < bno + len);
503 			ASSERT(mval[i].br_startoff + mval[i].br_blockcount >
504 			       bno);
505 		}
506 		ASSERT(i == 0 ||
507 		       mval[i - 1].br_startoff + mval[i - 1].br_blockcount ==
508 		       mval[i].br_startoff);
509 		ASSERT(mval[i].br_startblock != DELAYSTARTBLOCK &&
510 		       mval[i].br_startblock != HOLESTARTBLOCK);
511 		ASSERT(mval[i].br_state == XFS_EXT_NORM ||
512 		       mval[i].br_state == XFS_EXT_UNWRITTEN);
513 	}
514 }
515 
516 #else
517 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork)		do { } while (0)
518 #define	xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)	do { } while (0)
519 #endif /* DEBUG */
520 
521 /*
522  * bmap free list manipulation functions
523  */
524 
525 /*
526  * Add the extent to the list of extents to be free at transaction end.
527  * The list is maintained sorted (by block number).
528  */
529 void
530 __xfs_bmap_add_free(
531 	struct xfs_trans		*tp,
532 	xfs_fsblock_t			bno,
533 	xfs_filblks_t			len,
534 	const struct xfs_owner_info	*oinfo,
535 	bool				skip_discard)
536 {
537 	struct xfs_extent_free_item	*new;		/* new element */
538 #ifdef DEBUG
539 	struct xfs_mount		*mp = tp->t_mountp;
540 	xfs_agnumber_t			agno;
541 	xfs_agblock_t			agbno;
542 
543 	ASSERT(bno != NULLFSBLOCK);
544 	ASSERT(len > 0);
545 	ASSERT(len <= MAXEXTLEN);
546 	ASSERT(!isnullstartblock(bno));
547 	agno = XFS_FSB_TO_AGNO(mp, bno);
548 	agbno = XFS_FSB_TO_AGBNO(mp, bno);
549 	ASSERT(agno < mp->m_sb.sb_agcount);
550 	ASSERT(agbno < mp->m_sb.sb_agblocks);
551 	ASSERT(len < mp->m_sb.sb_agblocks);
552 	ASSERT(agbno + len <= mp->m_sb.sb_agblocks);
553 #endif
554 	ASSERT(xfs_bmap_free_item_zone != NULL);
555 
556 	new = kmem_zone_alloc(xfs_bmap_free_item_zone, 0);
557 	new->xefi_startblock = bno;
558 	new->xefi_blockcount = (xfs_extlen_t)len;
559 	if (oinfo)
560 		new->xefi_oinfo = *oinfo;
561 	else
562 		new->xefi_oinfo = XFS_RMAP_OINFO_SKIP_UPDATE;
563 	new->xefi_skip_discard = skip_discard;
564 	trace_xfs_bmap_free_defer(tp->t_mountp,
565 			XFS_FSB_TO_AGNO(tp->t_mountp, bno), 0,
566 			XFS_FSB_TO_AGBNO(tp->t_mountp, bno), len);
567 	xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_FREE, &new->xefi_list);
568 }
569 
570 /*
571  * Inode fork format manipulation functions
572  */
573 
574 /*
575  * Convert the inode format to extent format if it currently is in btree format,
576  * but the extent list is small enough that it fits into the extent format.
577  *
578  * Since the extents are already in-core, all we have to do is give up the space
579  * for the btree root and pitch the leaf block.
580  */
581 STATIC int				/* error */
582 xfs_bmap_btree_to_extents(
583 	struct xfs_trans	*tp,	/* transaction pointer */
584 	struct xfs_inode	*ip,	/* incore inode pointer */
585 	struct xfs_btree_cur	*cur,	/* btree cursor */
586 	int			*logflagsp, /* inode logging flags */
587 	int			whichfork)  /* data or attr fork */
588 {
589 	struct xfs_ifork	*ifp = XFS_IFORK_PTR(ip, whichfork);
590 	struct xfs_mount	*mp = ip->i_mount;
591 	struct xfs_btree_block	*rblock = ifp->if_broot;
592 	struct xfs_btree_block	*cblock;/* child btree block */
593 	xfs_fsblock_t		cbno;	/* child block number */
594 	xfs_buf_t		*cbp;	/* child block's buffer */
595 	int			error;	/* error return value */
596 	__be64			*pp;	/* ptr to block address */
597 	struct xfs_owner_info	oinfo;
598 
599 	/* check if we actually need the extent format first: */
600 	if (!xfs_bmap_wants_extents(ip, whichfork))
601 		return 0;
602 
603 	ASSERT(cur);
604 	ASSERT(whichfork != XFS_COW_FORK);
605 	ASSERT(ifp->if_flags & XFS_IFEXTENTS);
606 	ASSERT(ifp->if_format == XFS_DINODE_FMT_BTREE);
607 	ASSERT(be16_to_cpu(rblock->bb_level) == 1);
608 	ASSERT(be16_to_cpu(rblock->bb_numrecs) == 1);
609 	ASSERT(xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0) == 1);
610 
611 	pp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, ifp->if_broot_bytes);
612 	cbno = be64_to_cpu(*pp);
613 #ifdef DEBUG
614 	if (XFS_IS_CORRUPT(cur->bc_mp, !xfs_btree_check_lptr(cur, cbno, 1)))
615 		return -EFSCORRUPTED;
616 #endif
617 	error = xfs_btree_read_bufl(mp, tp, cbno, &cbp, XFS_BMAP_BTREE_REF,
618 				&xfs_bmbt_buf_ops);
619 	if (error)
620 		return error;
621 	cblock = XFS_BUF_TO_BLOCK(cbp);
622 	if ((error = xfs_btree_check_block(cur, cblock, 0, cbp)))
623 		return error;
624 	xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork);
625 	xfs_bmap_add_free(cur->bc_tp, cbno, 1, &oinfo);
626 	ip->i_d.di_nblocks--;
627 	xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
628 	xfs_trans_binval(tp, cbp);
629 	if (cur->bc_bufs[0] == cbp)
630 		cur->bc_bufs[0] = NULL;
631 	xfs_iroot_realloc(ip, -1, whichfork);
632 	ASSERT(ifp->if_broot == NULL);
633 	ASSERT((ifp->if_flags & XFS_IFBROOT) == 0);
634 	ifp->if_format = XFS_DINODE_FMT_EXTENTS;
635 	*logflagsp |= XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
636 	return 0;
637 }
638 
639 /*
640  * Convert an extents-format file into a btree-format file.
641  * The new file will have a root block (in the inode) and a single child block.
642  */
643 STATIC int					/* error */
644 xfs_bmap_extents_to_btree(
645 	struct xfs_trans	*tp,		/* transaction pointer */
646 	struct xfs_inode	*ip,		/* incore inode pointer */
647 	struct xfs_btree_cur	**curp,		/* cursor returned to caller */
648 	int			wasdel,		/* converting a delayed alloc */
649 	int			*logflagsp,	/* inode logging flags */
650 	int			whichfork)	/* data or attr fork */
651 {
652 	struct xfs_btree_block	*ablock;	/* allocated (child) bt block */
653 	struct xfs_buf		*abp;		/* buffer for ablock */
654 	struct xfs_alloc_arg	args;		/* allocation arguments */
655 	struct xfs_bmbt_rec	*arp;		/* child record pointer */
656 	struct xfs_btree_block	*block;		/* btree root block */
657 	struct xfs_btree_cur	*cur;		/* bmap btree cursor */
658 	int			error;		/* error return value */
659 	struct xfs_ifork	*ifp;		/* inode fork pointer */
660 	struct xfs_bmbt_key	*kp;		/* root block key pointer */
661 	struct xfs_mount	*mp;		/* mount structure */
662 	xfs_bmbt_ptr_t		*pp;		/* root block address pointer */
663 	struct xfs_iext_cursor	icur;
664 	struct xfs_bmbt_irec	rec;
665 	xfs_extnum_t		cnt = 0;
666 
667 	mp = ip->i_mount;
668 	ASSERT(whichfork != XFS_COW_FORK);
669 	ifp = XFS_IFORK_PTR(ip, whichfork);
670 	ASSERT(ifp->if_format == XFS_DINODE_FMT_EXTENTS);
671 
672 	/*
673 	 * Make space in the inode incore. This needs to be undone if we fail
674 	 * to expand the root.
675 	 */
676 	xfs_iroot_realloc(ip, 1, whichfork);
677 	ifp->if_flags |= XFS_IFBROOT;
678 
679 	/*
680 	 * Fill in the root.
681 	 */
682 	block = ifp->if_broot;
683 	xfs_btree_init_block_int(mp, block, XFS_BUF_DADDR_NULL,
684 				 XFS_BTNUM_BMAP, 1, 1, ip->i_ino,
685 				 XFS_BTREE_LONG_PTRS);
686 	/*
687 	 * Need a cursor.  Can't allocate until bb_level is filled in.
688 	 */
689 	cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
690 	cur->bc_ino.flags = wasdel ? XFS_BTCUR_BMBT_WASDEL : 0;
691 	/*
692 	 * Convert to a btree with two levels, one record in root.
693 	 */
694 	ifp->if_format = XFS_DINODE_FMT_BTREE;
695 	memset(&args, 0, sizeof(args));
696 	args.tp = tp;
697 	args.mp = mp;
698 	xfs_rmap_ino_bmbt_owner(&args.oinfo, ip->i_ino, whichfork);
699 	if (tp->t_firstblock == NULLFSBLOCK) {
700 		args.type = XFS_ALLOCTYPE_START_BNO;
701 		args.fsbno = XFS_INO_TO_FSB(mp, ip->i_ino);
702 	} else if (tp->t_flags & XFS_TRANS_LOWMODE) {
703 		args.type = XFS_ALLOCTYPE_START_BNO;
704 		args.fsbno = tp->t_firstblock;
705 	} else {
706 		args.type = XFS_ALLOCTYPE_NEAR_BNO;
707 		args.fsbno = tp->t_firstblock;
708 	}
709 	args.minlen = args.maxlen = args.prod = 1;
710 	args.wasdel = wasdel;
711 	*logflagsp = 0;
712 	error = xfs_alloc_vextent(&args);
713 	if (error)
714 		goto out_root_realloc;
715 
716 	if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) {
717 		error = -ENOSPC;
718 		goto out_root_realloc;
719 	}
720 
721 	/*
722 	 * Allocation can't fail, the space was reserved.
723 	 */
724 	ASSERT(tp->t_firstblock == NULLFSBLOCK ||
725 	       args.agno >= XFS_FSB_TO_AGNO(mp, tp->t_firstblock));
726 	tp->t_firstblock = args.fsbno;
727 	cur->bc_ino.allocated++;
728 	ip->i_d.di_nblocks++;
729 	xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
730 	error = xfs_trans_get_buf(tp, mp->m_ddev_targp,
731 			XFS_FSB_TO_DADDR(mp, args.fsbno),
732 			mp->m_bsize, 0, &abp);
733 	if (error)
734 		goto out_unreserve_dquot;
735 
736 	/*
737 	 * Fill in the child block.
738 	 */
739 	abp->b_ops = &xfs_bmbt_buf_ops;
740 	ablock = XFS_BUF_TO_BLOCK(abp);
741 	xfs_btree_init_block_int(mp, ablock, abp->b_bn,
742 				XFS_BTNUM_BMAP, 0, 0, ip->i_ino,
743 				XFS_BTREE_LONG_PTRS);
744 
745 	for_each_xfs_iext(ifp, &icur, &rec) {
746 		if (isnullstartblock(rec.br_startblock))
747 			continue;
748 		arp = XFS_BMBT_REC_ADDR(mp, ablock, 1 + cnt);
749 		xfs_bmbt_disk_set_all(arp, &rec);
750 		cnt++;
751 	}
752 	ASSERT(cnt == ifp->if_nextents);
753 	xfs_btree_set_numrecs(ablock, cnt);
754 
755 	/*
756 	 * Fill in the root key and pointer.
757 	 */
758 	kp = XFS_BMBT_KEY_ADDR(mp, block, 1);
759 	arp = XFS_BMBT_REC_ADDR(mp, ablock, 1);
760 	kp->br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(arp));
761 	pp = XFS_BMBT_PTR_ADDR(mp, block, 1, xfs_bmbt_get_maxrecs(cur,
762 						be16_to_cpu(block->bb_level)));
763 	*pp = cpu_to_be64(args.fsbno);
764 
765 	/*
766 	 * Do all this logging at the end so that
767 	 * the root is at the right level.
768 	 */
769 	xfs_btree_log_block(cur, abp, XFS_BB_ALL_BITS);
770 	xfs_btree_log_recs(cur, abp, 1, be16_to_cpu(ablock->bb_numrecs));
771 	ASSERT(*curp == NULL);
772 	*curp = cur;
773 	*logflagsp = XFS_ILOG_CORE | xfs_ilog_fbroot(whichfork);
774 	return 0;
775 
776 out_unreserve_dquot:
777 	xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
778 out_root_realloc:
779 	xfs_iroot_realloc(ip, -1, whichfork);
780 	ifp->if_format = XFS_DINODE_FMT_EXTENTS;
781 	ASSERT(ifp->if_broot == NULL);
782 	xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
783 
784 	return error;
785 }
786 
787 /*
788  * Convert a local file to an extents file.
789  * This code is out of bounds for data forks of regular files,
790  * since the file data needs to get logged so things will stay consistent.
791  * (The bmap-level manipulations are ok, though).
792  */
793 void
794 xfs_bmap_local_to_extents_empty(
795 	struct xfs_trans	*tp,
796 	struct xfs_inode	*ip,
797 	int			whichfork)
798 {
799 	struct xfs_ifork	*ifp = XFS_IFORK_PTR(ip, whichfork);
800 
801 	ASSERT(whichfork != XFS_COW_FORK);
802 	ASSERT(ifp->if_format == XFS_DINODE_FMT_LOCAL);
803 	ASSERT(ifp->if_bytes == 0);
804 	ASSERT(ifp->if_nextents == 0);
805 
806 	xfs_bmap_forkoff_reset(ip, whichfork);
807 	ifp->if_flags &= ~XFS_IFINLINE;
808 	ifp->if_flags |= XFS_IFEXTENTS;
809 	ifp->if_u1.if_root = NULL;
810 	ifp->if_height = 0;
811 	ifp->if_format = XFS_DINODE_FMT_EXTENTS;
812 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
813 }
814 
815 
816 STATIC int				/* error */
817 xfs_bmap_local_to_extents(
818 	xfs_trans_t	*tp,		/* transaction pointer */
819 	xfs_inode_t	*ip,		/* incore inode pointer */
820 	xfs_extlen_t	total,		/* total blocks needed by transaction */
821 	int		*logflagsp,	/* inode logging flags */
822 	int		whichfork,
823 	void		(*init_fn)(struct xfs_trans *tp,
824 				   struct xfs_buf *bp,
825 				   struct xfs_inode *ip,
826 				   struct xfs_ifork *ifp))
827 {
828 	int		error = 0;
829 	int		flags;		/* logging flags returned */
830 	struct xfs_ifork *ifp;		/* inode fork pointer */
831 	xfs_alloc_arg_t	args;		/* allocation arguments */
832 	xfs_buf_t	*bp;		/* buffer for extent block */
833 	struct xfs_bmbt_irec rec;
834 	struct xfs_iext_cursor icur;
835 
836 	/*
837 	 * We don't want to deal with the case of keeping inode data inline yet.
838 	 * So sending the data fork of a regular inode is invalid.
839 	 */
840 	ASSERT(!(S_ISREG(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK));
841 	ifp = XFS_IFORK_PTR(ip, whichfork);
842 	ASSERT(ifp->if_format == XFS_DINODE_FMT_LOCAL);
843 
844 	if (!ifp->if_bytes) {
845 		xfs_bmap_local_to_extents_empty(tp, ip, whichfork);
846 		flags = XFS_ILOG_CORE;
847 		goto done;
848 	}
849 
850 	flags = 0;
851 	error = 0;
852 	ASSERT((ifp->if_flags & (XFS_IFINLINE|XFS_IFEXTENTS)) == XFS_IFINLINE);
853 	memset(&args, 0, sizeof(args));
854 	args.tp = tp;
855 	args.mp = ip->i_mount;
856 	xfs_rmap_ino_owner(&args.oinfo, ip->i_ino, whichfork, 0);
857 	/*
858 	 * Allocate a block.  We know we need only one, since the
859 	 * file currently fits in an inode.
860 	 */
861 	if (tp->t_firstblock == NULLFSBLOCK) {
862 		args.fsbno = XFS_INO_TO_FSB(args.mp, ip->i_ino);
863 		args.type = XFS_ALLOCTYPE_START_BNO;
864 	} else {
865 		args.fsbno = tp->t_firstblock;
866 		args.type = XFS_ALLOCTYPE_NEAR_BNO;
867 	}
868 	args.total = total;
869 	args.minlen = args.maxlen = args.prod = 1;
870 	error = xfs_alloc_vextent(&args);
871 	if (error)
872 		goto done;
873 
874 	/* Can't fail, the space was reserved. */
875 	ASSERT(args.fsbno != NULLFSBLOCK);
876 	ASSERT(args.len == 1);
877 	tp->t_firstblock = args.fsbno;
878 	error = xfs_trans_get_buf(tp, args.mp->m_ddev_targp,
879 			XFS_FSB_TO_DADDR(args.mp, args.fsbno),
880 			args.mp->m_bsize, 0, &bp);
881 	if (error)
882 		goto done;
883 
884 	/*
885 	 * Initialize the block, copy the data and log the remote buffer.
886 	 *
887 	 * The callout is responsible for logging because the remote format
888 	 * might differ from the local format and thus we don't know how much to
889 	 * log here. Note that init_fn must also set the buffer log item type
890 	 * correctly.
891 	 */
892 	init_fn(tp, bp, ip, ifp);
893 
894 	/* account for the change in fork size */
895 	xfs_idata_realloc(ip, -ifp->if_bytes, whichfork);
896 	xfs_bmap_local_to_extents_empty(tp, ip, whichfork);
897 	flags |= XFS_ILOG_CORE;
898 
899 	ifp->if_u1.if_root = NULL;
900 	ifp->if_height = 0;
901 
902 	rec.br_startoff = 0;
903 	rec.br_startblock = args.fsbno;
904 	rec.br_blockcount = 1;
905 	rec.br_state = XFS_EXT_NORM;
906 	xfs_iext_first(ifp, &icur);
907 	xfs_iext_insert(ip, &icur, &rec, 0);
908 
909 	ifp->if_nextents = 1;
910 	ip->i_d.di_nblocks = 1;
911 	xfs_trans_mod_dquot_byino(tp, ip,
912 		XFS_TRANS_DQ_BCOUNT, 1L);
913 	flags |= xfs_ilog_fext(whichfork);
914 
915 done:
916 	*logflagsp = flags;
917 	return error;
918 }
919 
920 /*
921  * Called from xfs_bmap_add_attrfork to handle btree format files.
922  */
923 STATIC int					/* error */
924 xfs_bmap_add_attrfork_btree(
925 	xfs_trans_t		*tp,		/* transaction pointer */
926 	xfs_inode_t		*ip,		/* incore inode pointer */
927 	int			*flags)		/* inode logging flags */
928 {
929 	xfs_btree_cur_t		*cur;		/* btree cursor */
930 	int			error;		/* error return value */
931 	xfs_mount_t		*mp;		/* file system mount struct */
932 	int			stat;		/* newroot status */
933 
934 	mp = ip->i_mount;
935 	if (ip->i_df.if_broot_bytes <= XFS_IFORK_DSIZE(ip))
936 		*flags |= XFS_ILOG_DBROOT;
937 	else {
938 		cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK);
939 		error = xfs_bmbt_lookup_first(cur, &stat);
940 		if (error)
941 			goto error0;
942 		/* must be at least one entry */
943 		if (XFS_IS_CORRUPT(mp, stat != 1)) {
944 			error = -EFSCORRUPTED;
945 			goto error0;
946 		}
947 		if ((error = xfs_btree_new_iroot(cur, flags, &stat)))
948 			goto error0;
949 		if (stat == 0) {
950 			xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
951 			return -ENOSPC;
952 		}
953 		cur->bc_ino.allocated = 0;
954 		xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
955 	}
956 	return 0;
957 error0:
958 	xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
959 	return error;
960 }
961 
962 /*
963  * Called from xfs_bmap_add_attrfork to handle extents format files.
964  */
965 STATIC int					/* error */
966 xfs_bmap_add_attrfork_extents(
967 	struct xfs_trans	*tp,		/* transaction pointer */
968 	struct xfs_inode	*ip,		/* incore inode pointer */
969 	int			*flags)		/* inode logging flags */
970 {
971 	xfs_btree_cur_t		*cur;		/* bmap btree cursor */
972 	int			error;		/* error return value */
973 
974 	if (ip->i_df.if_nextents * sizeof(struct xfs_bmbt_rec) <=
975 	    XFS_IFORK_DSIZE(ip))
976 		return 0;
977 	cur = NULL;
978 	error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0, flags,
979 					  XFS_DATA_FORK);
980 	if (cur) {
981 		cur->bc_ino.allocated = 0;
982 		xfs_btree_del_cursor(cur, error);
983 	}
984 	return error;
985 }
986 
987 /*
988  * Called from xfs_bmap_add_attrfork to handle local format files. Each
989  * different data fork content type needs a different callout to do the
990  * conversion. Some are basic and only require special block initialisation
991  * callouts for the data formating, others (directories) are so specialised they
992  * handle everything themselves.
993  *
994  * XXX (dgc): investigate whether directory conversion can use the generic
995  * formatting callout. It should be possible - it's just a very complex
996  * formatter.
997  */
998 STATIC int					/* error */
999 xfs_bmap_add_attrfork_local(
1000 	struct xfs_trans	*tp,		/* transaction pointer */
1001 	struct xfs_inode	*ip,		/* incore inode pointer */
1002 	int			*flags)		/* inode logging flags */
1003 {
1004 	struct xfs_da_args	dargs;		/* args for dir/attr code */
1005 
1006 	if (ip->i_df.if_bytes <= XFS_IFORK_DSIZE(ip))
1007 		return 0;
1008 
1009 	if (S_ISDIR(VFS_I(ip)->i_mode)) {
1010 		memset(&dargs, 0, sizeof(dargs));
1011 		dargs.geo = ip->i_mount->m_dir_geo;
1012 		dargs.dp = ip;
1013 		dargs.total = dargs.geo->fsbcount;
1014 		dargs.whichfork = XFS_DATA_FORK;
1015 		dargs.trans = tp;
1016 		return xfs_dir2_sf_to_block(&dargs);
1017 	}
1018 
1019 	if (S_ISLNK(VFS_I(ip)->i_mode))
1020 		return xfs_bmap_local_to_extents(tp, ip, 1, flags,
1021 						 XFS_DATA_FORK,
1022 						 xfs_symlink_local_to_remote);
1023 
1024 	/* should only be called for types that support local format data */
1025 	ASSERT(0);
1026 	return -EFSCORRUPTED;
1027 }
1028 
1029 /* Set an inode attr fork off based on the format */
1030 int
1031 xfs_bmap_set_attrforkoff(
1032 	struct xfs_inode	*ip,
1033 	int			size,
1034 	int			*version)
1035 {
1036 	switch (ip->i_df.if_format) {
1037 	case XFS_DINODE_FMT_DEV:
1038 		ip->i_d.di_forkoff = roundup(sizeof(xfs_dev_t), 8) >> 3;
1039 		break;
1040 	case XFS_DINODE_FMT_LOCAL:
1041 	case XFS_DINODE_FMT_EXTENTS:
1042 	case XFS_DINODE_FMT_BTREE:
1043 		ip->i_d.di_forkoff = xfs_attr_shortform_bytesfit(ip, size);
1044 		if (!ip->i_d.di_forkoff)
1045 			ip->i_d.di_forkoff = xfs_default_attroffset(ip) >> 3;
1046 		else if ((ip->i_mount->m_flags & XFS_MOUNT_ATTR2) && version)
1047 			*version = 2;
1048 		break;
1049 	default:
1050 		ASSERT(0);
1051 		return -EINVAL;
1052 	}
1053 
1054 	return 0;
1055 }
1056 
1057 /*
1058  * Convert inode from non-attributed to attributed.
1059  * Must not be in a transaction, ip must not be locked.
1060  */
1061 int						/* error code */
1062 xfs_bmap_add_attrfork(
1063 	xfs_inode_t		*ip,		/* incore inode pointer */
1064 	int			size,		/* space new attribute needs */
1065 	int			rsvd)		/* xact may use reserved blks */
1066 {
1067 	xfs_mount_t		*mp;		/* mount structure */
1068 	xfs_trans_t		*tp;		/* transaction pointer */
1069 	int			blks;		/* space reservation */
1070 	int			version = 1;	/* superblock attr version */
1071 	int			logflags;	/* logging flags */
1072 	int			error;		/* error return value */
1073 
1074 	ASSERT(XFS_IFORK_Q(ip) == 0);
1075 
1076 	mp = ip->i_mount;
1077 	ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
1078 
1079 	blks = XFS_ADDAFORK_SPACE_RES(mp);
1080 
1081 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_addafork, blks, 0,
1082 			rsvd ? XFS_TRANS_RESERVE : 0, &tp);
1083 	if (error)
1084 		return error;
1085 
1086 	xfs_ilock(ip, XFS_ILOCK_EXCL);
1087 	error = xfs_trans_reserve_quota_nblks(tp, ip, blks, 0, rsvd ?
1088 			XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES :
1089 			XFS_QMOPT_RES_REGBLKS);
1090 	if (error)
1091 		goto trans_cancel;
1092 	if (XFS_IFORK_Q(ip))
1093 		goto trans_cancel;
1094 
1095 	xfs_trans_ijoin(tp, ip, 0);
1096 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1097 	error = xfs_bmap_set_attrforkoff(ip, size, &version);
1098 	if (error)
1099 		goto trans_cancel;
1100 	ASSERT(ip->i_afp == NULL);
1101 	ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, 0);
1102 	ip->i_afp->if_format = XFS_DINODE_FMT_EXTENTS;
1103 	ip->i_afp->if_flags = XFS_IFEXTENTS;
1104 	logflags = 0;
1105 	switch (ip->i_df.if_format) {
1106 	case XFS_DINODE_FMT_LOCAL:
1107 		error = xfs_bmap_add_attrfork_local(tp, ip, &logflags);
1108 		break;
1109 	case XFS_DINODE_FMT_EXTENTS:
1110 		error = xfs_bmap_add_attrfork_extents(tp, ip, &logflags);
1111 		break;
1112 	case XFS_DINODE_FMT_BTREE:
1113 		error = xfs_bmap_add_attrfork_btree(tp, ip, &logflags);
1114 		break;
1115 	default:
1116 		error = 0;
1117 		break;
1118 	}
1119 	if (logflags)
1120 		xfs_trans_log_inode(tp, ip, logflags);
1121 	if (error)
1122 		goto trans_cancel;
1123 	if (!xfs_sb_version_hasattr(&mp->m_sb) ||
1124 	   (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2)) {
1125 		bool log_sb = false;
1126 
1127 		spin_lock(&mp->m_sb_lock);
1128 		if (!xfs_sb_version_hasattr(&mp->m_sb)) {
1129 			xfs_sb_version_addattr(&mp->m_sb);
1130 			log_sb = true;
1131 		}
1132 		if (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2) {
1133 			xfs_sb_version_addattr2(&mp->m_sb);
1134 			log_sb = true;
1135 		}
1136 		spin_unlock(&mp->m_sb_lock);
1137 		if (log_sb)
1138 			xfs_log_sb(tp);
1139 	}
1140 
1141 	error = xfs_trans_commit(tp);
1142 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1143 	return error;
1144 
1145 trans_cancel:
1146 	xfs_trans_cancel(tp);
1147 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1148 	return error;
1149 }
1150 
1151 /*
1152  * Internal and external extent tree search functions.
1153  */
1154 
1155 struct xfs_iread_state {
1156 	struct xfs_iext_cursor	icur;
1157 	xfs_extnum_t		loaded;
1158 };
1159 
1160 /* Stuff every bmbt record from this block into the incore extent map. */
1161 static int
1162 xfs_iread_bmbt_block(
1163 	struct xfs_btree_cur	*cur,
1164 	int			level,
1165 	void			*priv)
1166 {
1167 	struct xfs_iread_state	*ir = priv;
1168 	struct xfs_mount	*mp = cur->bc_mp;
1169 	struct xfs_inode	*ip = cur->bc_ino.ip;
1170 	struct xfs_btree_block	*block;
1171 	struct xfs_buf		*bp;
1172 	struct xfs_bmbt_rec	*frp;
1173 	xfs_extnum_t		num_recs;
1174 	xfs_extnum_t		j;
1175 	int			whichfork = cur->bc_ino.whichfork;
1176 	struct xfs_ifork	*ifp = XFS_IFORK_PTR(ip, whichfork);
1177 
1178 	block = xfs_btree_get_block(cur, level, &bp);
1179 
1180 	/* Abort if we find more records than nextents. */
1181 	num_recs = xfs_btree_get_numrecs(block);
1182 	if (unlikely(ir->loaded + num_recs > ifp->if_nextents)) {
1183 		xfs_warn(ip->i_mount, "corrupt dinode %llu, (btree extents).",
1184 				(unsigned long long)ip->i_ino);
1185 		xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__, block,
1186 				sizeof(*block), __this_address);
1187 		return -EFSCORRUPTED;
1188 	}
1189 
1190 	/* Copy records into the incore cache. */
1191 	frp = XFS_BMBT_REC_ADDR(mp, block, 1);
1192 	for (j = 0; j < num_recs; j++, frp++, ir->loaded++) {
1193 		struct xfs_bmbt_irec	new;
1194 		xfs_failaddr_t		fa;
1195 
1196 		xfs_bmbt_disk_get_all(frp, &new);
1197 		fa = xfs_bmap_validate_extent(ip, whichfork, &new);
1198 		if (fa) {
1199 			xfs_inode_verifier_error(ip, -EFSCORRUPTED,
1200 					"xfs_iread_extents(2)", frp,
1201 					sizeof(*frp), fa);
1202 			return -EFSCORRUPTED;
1203 		}
1204 		xfs_iext_insert(ip, &ir->icur, &new,
1205 				xfs_bmap_fork_to_state(whichfork));
1206 		trace_xfs_read_extent(ip, &ir->icur,
1207 				xfs_bmap_fork_to_state(whichfork), _THIS_IP_);
1208 		xfs_iext_next(ifp, &ir->icur);
1209 	}
1210 
1211 	return 0;
1212 }
1213 
1214 /*
1215  * Read in extents from a btree-format inode.
1216  */
1217 int
1218 xfs_iread_extents(
1219 	struct xfs_trans	*tp,
1220 	struct xfs_inode	*ip,
1221 	int			whichfork)
1222 {
1223 	struct xfs_iread_state	ir;
1224 	struct xfs_ifork	*ifp = XFS_IFORK_PTR(ip, whichfork);
1225 	struct xfs_mount	*mp = ip->i_mount;
1226 	struct xfs_btree_cur	*cur;
1227 	int			error;
1228 
1229 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1230 
1231 	if (XFS_IS_CORRUPT(mp, ifp->if_format != XFS_DINODE_FMT_BTREE)) {
1232 		error = -EFSCORRUPTED;
1233 		goto out;
1234 	}
1235 
1236 	ir.loaded = 0;
1237 	xfs_iext_first(ifp, &ir.icur);
1238 	cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
1239 	error = xfs_btree_visit_blocks(cur, xfs_iread_bmbt_block,
1240 			XFS_BTREE_VISIT_RECORDS, &ir);
1241 	xfs_btree_del_cursor(cur, error);
1242 	if (error)
1243 		goto out;
1244 
1245 	if (XFS_IS_CORRUPT(mp, ir.loaded != ifp->if_nextents)) {
1246 		error = -EFSCORRUPTED;
1247 		goto out;
1248 	}
1249 	ASSERT(ir.loaded == xfs_iext_count(ifp));
1250 
1251 	ifp->if_flags |= XFS_IFEXTENTS;
1252 	return 0;
1253 out:
1254 	xfs_iext_destroy(ifp);
1255 	return error;
1256 }
1257 
1258 /*
1259  * Returns the relative block number of the first unused block(s) in the given
1260  * fork with at least "len" logically contiguous blocks free.  This is the
1261  * lowest-address hole if the fork has holes, else the first block past the end
1262  * of fork.  Return 0 if the fork is currently local (in-inode).
1263  */
1264 int						/* error */
1265 xfs_bmap_first_unused(
1266 	struct xfs_trans	*tp,		/* transaction pointer */
1267 	struct xfs_inode	*ip,		/* incore inode */
1268 	xfs_extlen_t		len,		/* size of hole to find */
1269 	xfs_fileoff_t		*first_unused,	/* unused block */
1270 	int			whichfork)	/* data or attr fork */
1271 {
1272 	struct xfs_ifork	*ifp = XFS_IFORK_PTR(ip, whichfork);
1273 	struct xfs_bmbt_irec	got;
1274 	struct xfs_iext_cursor	icur;
1275 	xfs_fileoff_t		lastaddr = 0;
1276 	xfs_fileoff_t		lowest, max;
1277 	int			error;
1278 
1279 	if (ifp->if_format == XFS_DINODE_FMT_LOCAL) {
1280 		*first_unused = 0;
1281 		return 0;
1282 	}
1283 
1284 	ASSERT(xfs_ifork_has_extents(ifp));
1285 
1286 	if (!(ifp->if_flags & XFS_IFEXTENTS)) {
1287 		error = xfs_iread_extents(tp, ip, whichfork);
1288 		if (error)
1289 			return error;
1290 	}
1291 
1292 	lowest = max = *first_unused;
1293 	for_each_xfs_iext(ifp, &icur, &got) {
1294 		/*
1295 		 * See if the hole before this extent will work.
1296 		 */
1297 		if (got.br_startoff >= lowest + len &&
1298 		    got.br_startoff - max >= len)
1299 			break;
1300 		lastaddr = got.br_startoff + got.br_blockcount;
1301 		max = XFS_FILEOFF_MAX(lastaddr, lowest);
1302 	}
1303 
1304 	*first_unused = max;
1305 	return 0;
1306 }
1307 
1308 /*
1309  * Returns the file-relative block number of the last block - 1 before
1310  * last_block (input value) in the file.
1311  * This is not based on i_size, it is based on the extent records.
1312  * Returns 0 for local files, as they do not have extent records.
1313  */
1314 int						/* error */
1315 xfs_bmap_last_before(
1316 	struct xfs_trans	*tp,		/* transaction pointer */
1317 	struct xfs_inode	*ip,		/* incore inode */
1318 	xfs_fileoff_t		*last_block,	/* last block */
1319 	int			whichfork)	/* data or attr fork */
1320 {
1321 	struct xfs_ifork	*ifp = XFS_IFORK_PTR(ip, whichfork);
1322 	struct xfs_bmbt_irec	got;
1323 	struct xfs_iext_cursor	icur;
1324 	int			error;
1325 
1326 	switch (ifp->if_format) {
1327 	case XFS_DINODE_FMT_LOCAL:
1328 		*last_block = 0;
1329 		return 0;
1330 	case XFS_DINODE_FMT_BTREE:
1331 	case XFS_DINODE_FMT_EXTENTS:
1332 		break;
1333 	default:
1334 		ASSERT(0);
1335 		return -EFSCORRUPTED;
1336 	}
1337 
1338 	if (!(ifp->if_flags & XFS_IFEXTENTS)) {
1339 		error = xfs_iread_extents(tp, ip, whichfork);
1340 		if (error)
1341 			return error;
1342 	}
1343 
1344 	if (!xfs_iext_lookup_extent_before(ip, ifp, last_block, &icur, &got))
1345 		*last_block = 0;
1346 	return 0;
1347 }
1348 
1349 int
1350 xfs_bmap_last_extent(
1351 	struct xfs_trans	*tp,
1352 	struct xfs_inode	*ip,
1353 	int			whichfork,
1354 	struct xfs_bmbt_irec	*rec,
1355 	int			*is_empty)
1356 {
1357 	struct xfs_ifork	*ifp = XFS_IFORK_PTR(ip, whichfork);
1358 	struct xfs_iext_cursor	icur;
1359 	int			error;
1360 
1361 	if (!(ifp->if_flags & XFS_IFEXTENTS)) {
1362 		error = xfs_iread_extents(tp, ip, whichfork);
1363 		if (error)
1364 			return error;
1365 	}
1366 
1367 	xfs_iext_last(ifp, &icur);
1368 	if (!xfs_iext_get_extent(ifp, &icur, rec))
1369 		*is_empty = 1;
1370 	else
1371 		*is_empty = 0;
1372 	return 0;
1373 }
1374 
1375 /*
1376  * Check the last inode extent to determine whether this allocation will result
1377  * in blocks being allocated at the end of the file. When we allocate new data
1378  * blocks at the end of the file which do not start at the previous data block,
1379  * we will try to align the new blocks at stripe unit boundaries.
1380  *
1381  * Returns 1 in bma->aeof if the file (fork) is empty as any new write will be
1382  * at, or past the EOF.
1383  */
1384 STATIC int
1385 xfs_bmap_isaeof(
1386 	struct xfs_bmalloca	*bma,
1387 	int			whichfork)
1388 {
1389 	struct xfs_bmbt_irec	rec;
1390 	int			is_empty;
1391 	int			error;
1392 
1393 	bma->aeof = false;
1394 	error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec,
1395 				     &is_empty);
1396 	if (error)
1397 		return error;
1398 
1399 	if (is_empty) {
1400 		bma->aeof = true;
1401 		return 0;
1402 	}
1403 
1404 	/*
1405 	 * Check if we are allocation or past the last extent, or at least into
1406 	 * the last delayed allocated extent.
1407 	 */
1408 	bma->aeof = bma->offset >= rec.br_startoff + rec.br_blockcount ||
1409 		(bma->offset >= rec.br_startoff &&
1410 		 isnullstartblock(rec.br_startblock));
1411 	return 0;
1412 }
1413 
1414 /*
1415  * Returns the file-relative block number of the first block past eof in
1416  * the file.  This is not based on i_size, it is based on the extent records.
1417  * Returns 0 for local files, as they do not have extent records.
1418  */
1419 int
1420 xfs_bmap_last_offset(
1421 	struct xfs_inode	*ip,
1422 	xfs_fileoff_t		*last_block,
1423 	int			whichfork)
1424 {
1425 	struct xfs_ifork	*ifp = XFS_IFORK_PTR(ip, whichfork);
1426 	struct xfs_bmbt_irec	rec;
1427 	int			is_empty;
1428 	int			error;
1429 
1430 	*last_block = 0;
1431 
1432 	if (ifp->if_format == XFS_DINODE_FMT_LOCAL)
1433 		return 0;
1434 
1435 	if (XFS_IS_CORRUPT(ip->i_mount, !xfs_ifork_has_extents(ifp)))
1436 		return -EFSCORRUPTED;
1437 
1438 	error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, &is_empty);
1439 	if (error || is_empty)
1440 		return error;
1441 
1442 	*last_block = rec.br_startoff + rec.br_blockcount;
1443 	return 0;
1444 }
1445 
1446 /*
1447  * Returns whether the selected fork of the inode has exactly one
1448  * block or not.  For the data fork we check this matches di_size,
1449  * implying the file's range is 0..bsize-1.
1450  */
1451 int					/* 1=>1 block, 0=>otherwise */
1452 xfs_bmap_one_block(
1453 	struct xfs_inode	*ip,		/* incore inode */
1454 	int			whichfork)	/* data or attr fork */
1455 {
1456 	struct xfs_ifork	*ifp = XFS_IFORK_PTR(ip, whichfork);
1457 	int			rval;		/* return value */
1458 	struct xfs_bmbt_irec	s;		/* internal version of extent */
1459 	struct xfs_iext_cursor icur;
1460 
1461 #ifndef DEBUG
1462 	if (whichfork == XFS_DATA_FORK)
1463 		return XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize;
1464 #endif	/* !DEBUG */
1465 	if (ifp->if_nextents != 1)
1466 		return 0;
1467 	if (ifp->if_format != XFS_DINODE_FMT_EXTENTS)
1468 		return 0;
1469 	ASSERT(ifp->if_flags & XFS_IFEXTENTS);
1470 	xfs_iext_first(ifp, &icur);
1471 	xfs_iext_get_extent(ifp, &icur, &s);
1472 	rval = s.br_startoff == 0 && s.br_blockcount == 1;
1473 	if (rval && whichfork == XFS_DATA_FORK)
1474 		ASSERT(XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize);
1475 	return rval;
1476 }
1477 
1478 /*
1479  * Extent tree manipulation functions used during allocation.
1480  */
1481 
1482 /*
1483  * Convert a delayed allocation to a real allocation.
1484  */
1485 STATIC int				/* error */
1486 xfs_bmap_add_extent_delay_real(
1487 	struct xfs_bmalloca	*bma,
1488 	int			whichfork)
1489 {
1490 	struct xfs_mount	*mp = bma->ip->i_mount;
1491 	struct xfs_ifork	*ifp = XFS_IFORK_PTR(bma->ip, whichfork);
1492 	struct xfs_bmbt_irec	*new = &bma->got;
1493 	int			error;	/* error return value */
1494 	int			i;	/* temp state */
1495 	xfs_fileoff_t		new_endoff;	/* end offset of new entry */
1496 	xfs_bmbt_irec_t		r[3];	/* neighbor extent entries */
1497 					/* left is 0, right is 1, prev is 2 */
1498 	int			rval=0;	/* return value (logging flags) */
1499 	int			state = xfs_bmap_fork_to_state(whichfork);
1500 	xfs_filblks_t		da_new; /* new count del alloc blocks used */
1501 	xfs_filblks_t		da_old; /* old count del alloc blocks used */
1502 	xfs_filblks_t		temp=0;	/* value for da_new calculations */
1503 	int			tmp_rval;	/* partial logging flags */
1504 	struct xfs_bmbt_irec	old;
1505 
1506 	ASSERT(whichfork != XFS_ATTR_FORK);
1507 	ASSERT(!isnullstartblock(new->br_startblock));
1508 	ASSERT(!bma->cur ||
1509 	       (bma->cur->bc_ino.flags & XFS_BTCUR_BMBT_WASDEL));
1510 
1511 	XFS_STATS_INC(mp, xs_add_exlist);
1512 
1513 #define	LEFT		r[0]
1514 #define	RIGHT		r[1]
1515 #define	PREV		r[2]
1516 
1517 	/*
1518 	 * Set up a bunch of variables to make the tests simpler.
1519 	 */
1520 	xfs_iext_get_extent(ifp, &bma->icur, &PREV);
1521 	new_endoff = new->br_startoff + new->br_blockcount;
1522 	ASSERT(isnullstartblock(PREV.br_startblock));
1523 	ASSERT(PREV.br_startoff <= new->br_startoff);
1524 	ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
1525 
1526 	da_old = startblockval(PREV.br_startblock);
1527 	da_new = 0;
1528 
1529 	/*
1530 	 * Set flags determining what part of the previous delayed allocation
1531 	 * extent is being replaced by a real allocation.
1532 	 */
1533 	if (PREV.br_startoff == new->br_startoff)
1534 		state |= BMAP_LEFT_FILLING;
1535 	if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
1536 		state |= BMAP_RIGHT_FILLING;
1537 
1538 	/*
1539 	 * Check and set flags if this segment has a left neighbor.
1540 	 * Don't set contiguous if the combined extent would be too large.
1541 	 */
1542 	if (xfs_iext_peek_prev_extent(ifp, &bma->icur, &LEFT)) {
1543 		state |= BMAP_LEFT_VALID;
1544 		if (isnullstartblock(LEFT.br_startblock))
1545 			state |= BMAP_LEFT_DELAY;
1546 	}
1547 
1548 	if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
1549 	    LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
1550 	    LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
1551 	    LEFT.br_state == new->br_state &&
1552 	    LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN)
1553 		state |= BMAP_LEFT_CONTIG;
1554 
1555 	/*
1556 	 * Check and set flags if this segment has a right neighbor.
1557 	 * Don't set contiguous if the combined extent would be too large.
1558 	 * Also check for all-three-contiguous being too large.
1559 	 */
1560 	if (xfs_iext_peek_next_extent(ifp, &bma->icur, &RIGHT)) {
1561 		state |= BMAP_RIGHT_VALID;
1562 		if (isnullstartblock(RIGHT.br_startblock))
1563 			state |= BMAP_RIGHT_DELAY;
1564 	}
1565 
1566 	if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
1567 	    new_endoff == RIGHT.br_startoff &&
1568 	    new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
1569 	    new->br_state == RIGHT.br_state &&
1570 	    new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
1571 	    ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
1572 		       BMAP_RIGHT_FILLING)) !=
1573 		      (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
1574 		       BMAP_RIGHT_FILLING) ||
1575 	     LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
1576 			<= MAXEXTLEN))
1577 		state |= BMAP_RIGHT_CONTIG;
1578 
1579 	error = 0;
1580 	/*
1581 	 * Switch out based on the FILLING and CONTIG state bits.
1582 	 */
1583 	switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
1584 			 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
1585 	case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
1586 	     BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1587 		/*
1588 		 * Filling in all of a previously delayed allocation extent.
1589 		 * The left and right neighbors are both contiguous with new.
1590 		 */
1591 		LEFT.br_blockcount += PREV.br_blockcount + RIGHT.br_blockcount;
1592 
1593 		xfs_iext_remove(bma->ip, &bma->icur, state);
1594 		xfs_iext_remove(bma->ip, &bma->icur, state);
1595 		xfs_iext_prev(ifp, &bma->icur);
1596 		xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT);
1597 		ifp->if_nextents--;
1598 
1599 		if (bma->cur == NULL)
1600 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1601 		else {
1602 			rval = XFS_ILOG_CORE;
1603 			error = xfs_bmbt_lookup_eq(bma->cur, &RIGHT, &i);
1604 			if (error)
1605 				goto done;
1606 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1607 				error = -EFSCORRUPTED;
1608 				goto done;
1609 			}
1610 			error = xfs_btree_delete(bma->cur, &i);
1611 			if (error)
1612 				goto done;
1613 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1614 				error = -EFSCORRUPTED;
1615 				goto done;
1616 			}
1617 			error = xfs_btree_decrement(bma->cur, 0, &i);
1618 			if (error)
1619 				goto done;
1620 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1621 				error = -EFSCORRUPTED;
1622 				goto done;
1623 			}
1624 			error = xfs_bmbt_update(bma->cur, &LEFT);
1625 			if (error)
1626 				goto done;
1627 		}
1628 		break;
1629 
1630 	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
1631 		/*
1632 		 * Filling in all of a previously delayed allocation extent.
1633 		 * The left neighbor is contiguous, the right is not.
1634 		 */
1635 		old = LEFT;
1636 		LEFT.br_blockcount += PREV.br_blockcount;
1637 
1638 		xfs_iext_remove(bma->ip, &bma->icur, state);
1639 		xfs_iext_prev(ifp, &bma->icur);
1640 		xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT);
1641 
1642 		if (bma->cur == NULL)
1643 			rval = XFS_ILOG_DEXT;
1644 		else {
1645 			rval = 0;
1646 			error = xfs_bmbt_lookup_eq(bma->cur, &old, &i);
1647 			if (error)
1648 				goto done;
1649 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1650 				error = -EFSCORRUPTED;
1651 				goto done;
1652 			}
1653 			error = xfs_bmbt_update(bma->cur, &LEFT);
1654 			if (error)
1655 				goto done;
1656 		}
1657 		break;
1658 
1659 	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1660 		/*
1661 		 * Filling in all of a previously delayed allocation extent.
1662 		 * The right neighbor is contiguous, the left is not. Take care
1663 		 * with delay -> unwritten extent allocation here because the
1664 		 * delalloc record we are overwriting is always written.
1665 		 */
1666 		PREV.br_startblock = new->br_startblock;
1667 		PREV.br_blockcount += RIGHT.br_blockcount;
1668 		PREV.br_state = new->br_state;
1669 
1670 		xfs_iext_next(ifp, &bma->icur);
1671 		xfs_iext_remove(bma->ip, &bma->icur, state);
1672 		xfs_iext_prev(ifp, &bma->icur);
1673 		xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1674 
1675 		if (bma->cur == NULL)
1676 			rval = XFS_ILOG_DEXT;
1677 		else {
1678 			rval = 0;
1679 			error = xfs_bmbt_lookup_eq(bma->cur, &RIGHT, &i);
1680 			if (error)
1681 				goto done;
1682 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1683 				error = -EFSCORRUPTED;
1684 				goto done;
1685 			}
1686 			error = xfs_bmbt_update(bma->cur, &PREV);
1687 			if (error)
1688 				goto done;
1689 		}
1690 		break;
1691 
1692 	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
1693 		/*
1694 		 * Filling in all of a previously delayed allocation extent.
1695 		 * Neither the left nor right neighbors are contiguous with
1696 		 * the new one.
1697 		 */
1698 		PREV.br_startblock = new->br_startblock;
1699 		PREV.br_state = new->br_state;
1700 		xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1701 		ifp->if_nextents++;
1702 
1703 		if (bma->cur == NULL)
1704 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1705 		else {
1706 			rval = XFS_ILOG_CORE;
1707 			error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
1708 			if (error)
1709 				goto done;
1710 			if (XFS_IS_CORRUPT(mp, i != 0)) {
1711 				error = -EFSCORRUPTED;
1712 				goto done;
1713 			}
1714 			error = xfs_btree_insert(bma->cur, &i);
1715 			if (error)
1716 				goto done;
1717 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1718 				error = -EFSCORRUPTED;
1719 				goto done;
1720 			}
1721 		}
1722 		break;
1723 
1724 	case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
1725 		/*
1726 		 * Filling in the first part of a previous delayed allocation.
1727 		 * The left neighbor is contiguous.
1728 		 */
1729 		old = LEFT;
1730 		temp = PREV.br_blockcount - new->br_blockcount;
1731 		da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1732 				startblockval(PREV.br_startblock));
1733 
1734 		LEFT.br_blockcount += new->br_blockcount;
1735 
1736 		PREV.br_blockcount = temp;
1737 		PREV.br_startoff += new->br_blockcount;
1738 		PREV.br_startblock = nullstartblock(da_new);
1739 
1740 		xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1741 		xfs_iext_prev(ifp, &bma->icur);
1742 		xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT);
1743 
1744 		if (bma->cur == NULL)
1745 			rval = XFS_ILOG_DEXT;
1746 		else {
1747 			rval = 0;
1748 			error = xfs_bmbt_lookup_eq(bma->cur, &old, &i);
1749 			if (error)
1750 				goto done;
1751 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1752 				error = -EFSCORRUPTED;
1753 				goto done;
1754 			}
1755 			error = xfs_bmbt_update(bma->cur, &LEFT);
1756 			if (error)
1757 				goto done;
1758 		}
1759 		break;
1760 
1761 	case BMAP_LEFT_FILLING:
1762 		/*
1763 		 * Filling in the first part of a previous delayed allocation.
1764 		 * The left neighbor is not contiguous.
1765 		 */
1766 		xfs_iext_update_extent(bma->ip, state, &bma->icur, new);
1767 		ifp->if_nextents++;
1768 
1769 		if (bma->cur == NULL)
1770 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1771 		else {
1772 			rval = XFS_ILOG_CORE;
1773 			error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
1774 			if (error)
1775 				goto done;
1776 			if (XFS_IS_CORRUPT(mp, i != 0)) {
1777 				error = -EFSCORRUPTED;
1778 				goto done;
1779 			}
1780 			error = xfs_btree_insert(bma->cur, &i);
1781 			if (error)
1782 				goto done;
1783 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1784 				error = -EFSCORRUPTED;
1785 				goto done;
1786 			}
1787 		}
1788 
1789 		if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1790 			error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1791 					&bma->cur, 1, &tmp_rval, whichfork);
1792 			rval |= tmp_rval;
1793 			if (error)
1794 				goto done;
1795 		}
1796 
1797 		temp = PREV.br_blockcount - new->br_blockcount;
1798 		da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1799 			startblockval(PREV.br_startblock) -
1800 			(bma->cur ? bma->cur->bc_ino.allocated : 0));
1801 
1802 		PREV.br_startoff = new_endoff;
1803 		PREV.br_blockcount = temp;
1804 		PREV.br_startblock = nullstartblock(da_new);
1805 		xfs_iext_next(ifp, &bma->icur);
1806 		xfs_iext_insert(bma->ip, &bma->icur, &PREV, state);
1807 		xfs_iext_prev(ifp, &bma->icur);
1808 		break;
1809 
1810 	case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1811 		/*
1812 		 * Filling in the last part of a previous delayed allocation.
1813 		 * The right neighbor is contiguous with the new allocation.
1814 		 */
1815 		old = RIGHT;
1816 		RIGHT.br_startoff = new->br_startoff;
1817 		RIGHT.br_startblock = new->br_startblock;
1818 		RIGHT.br_blockcount += new->br_blockcount;
1819 
1820 		if (bma->cur == NULL)
1821 			rval = XFS_ILOG_DEXT;
1822 		else {
1823 			rval = 0;
1824 			error = xfs_bmbt_lookup_eq(bma->cur, &old, &i);
1825 			if (error)
1826 				goto done;
1827 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1828 				error = -EFSCORRUPTED;
1829 				goto done;
1830 			}
1831 			error = xfs_bmbt_update(bma->cur, &RIGHT);
1832 			if (error)
1833 				goto done;
1834 		}
1835 
1836 		temp = PREV.br_blockcount - new->br_blockcount;
1837 		da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1838 			startblockval(PREV.br_startblock));
1839 
1840 		PREV.br_blockcount = temp;
1841 		PREV.br_startblock = nullstartblock(da_new);
1842 
1843 		xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1844 		xfs_iext_next(ifp, &bma->icur);
1845 		xfs_iext_update_extent(bma->ip, state, &bma->icur, &RIGHT);
1846 		break;
1847 
1848 	case BMAP_RIGHT_FILLING:
1849 		/*
1850 		 * Filling in the last part of a previous delayed allocation.
1851 		 * The right neighbor is not contiguous.
1852 		 */
1853 		xfs_iext_update_extent(bma->ip, state, &bma->icur, new);
1854 		ifp->if_nextents++;
1855 
1856 		if (bma->cur == NULL)
1857 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1858 		else {
1859 			rval = XFS_ILOG_CORE;
1860 			error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
1861 			if (error)
1862 				goto done;
1863 			if (XFS_IS_CORRUPT(mp, i != 0)) {
1864 				error = -EFSCORRUPTED;
1865 				goto done;
1866 			}
1867 			error = xfs_btree_insert(bma->cur, &i);
1868 			if (error)
1869 				goto done;
1870 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1871 				error = -EFSCORRUPTED;
1872 				goto done;
1873 			}
1874 		}
1875 
1876 		if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1877 			error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1878 				&bma->cur, 1, &tmp_rval, whichfork);
1879 			rval |= tmp_rval;
1880 			if (error)
1881 				goto done;
1882 		}
1883 
1884 		temp = PREV.br_blockcount - new->br_blockcount;
1885 		da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1886 			startblockval(PREV.br_startblock) -
1887 			(bma->cur ? bma->cur->bc_ino.allocated : 0));
1888 
1889 		PREV.br_startblock = nullstartblock(da_new);
1890 		PREV.br_blockcount = temp;
1891 		xfs_iext_insert(bma->ip, &bma->icur, &PREV, state);
1892 		xfs_iext_next(ifp, &bma->icur);
1893 		break;
1894 
1895 	case 0:
1896 		/*
1897 		 * Filling in the middle part of a previous delayed allocation.
1898 		 * Contiguity is impossible here.
1899 		 * This case is avoided almost all the time.
1900 		 *
1901 		 * We start with a delayed allocation:
1902 		 *
1903 		 * +ddddddddddddddddddddddddddddddddddddddddddddddddddddddd+
1904 		 *  PREV @ idx
1905 		 *
1906 	         * and we are allocating:
1907 		 *                     +rrrrrrrrrrrrrrrrr+
1908 		 *			      new
1909 		 *
1910 		 * and we set it up for insertion as:
1911 		 * +ddddddddddddddddddd+rrrrrrrrrrrrrrrrr+ddddddddddddddddd+
1912 		 *                            new
1913 		 *  PREV @ idx          LEFT              RIGHT
1914 		 *                      inserted at idx + 1
1915 		 */
1916 		old = PREV;
1917 
1918 		/* LEFT is the new middle */
1919 		LEFT = *new;
1920 
1921 		/* RIGHT is the new right */
1922 		RIGHT.br_state = PREV.br_state;
1923 		RIGHT.br_startoff = new_endoff;
1924 		RIGHT.br_blockcount =
1925 			PREV.br_startoff + PREV.br_blockcount - new_endoff;
1926 		RIGHT.br_startblock =
1927 			nullstartblock(xfs_bmap_worst_indlen(bma->ip,
1928 					RIGHT.br_blockcount));
1929 
1930 		/* truncate PREV */
1931 		PREV.br_blockcount = new->br_startoff - PREV.br_startoff;
1932 		PREV.br_startblock =
1933 			nullstartblock(xfs_bmap_worst_indlen(bma->ip,
1934 					PREV.br_blockcount));
1935 		xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1936 
1937 		xfs_iext_next(ifp, &bma->icur);
1938 		xfs_iext_insert(bma->ip, &bma->icur, &RIGHT, state);
1939 		xfs_iext_insert(bma->ip, &bma->icur, &LEFT, state);
1940 		ifp->if_nextents++;
1941 
1942 		if (bma->cur == NULL)
1943 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1944 		else {
1945 			rval = XFS_ILOG_CORE;
1946 			error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
1947 			if (error)
1948 				goto done;
1949 			if (XFS_IS_CORRUPT(mp, i != 0)) {
1950 				error = -EFSCORRUPTED;
1951 				goto done;
1952 			}
1953 			error = xfs_btree_insert(bma->cur, &i);
1954 			if (error)
1955 				goto done;
1956 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1957 				error = -EFSCORRUPTED;
1958 				goto done;
1959 			}
1960 		}
1961 
1962 		if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1963 			error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1964 					&bma->cur, 1, &tmp_rval, whichfork);
1965 			rval |= tmp_rval;
1966 			if (error)
1967 				goto done;
1968 		}
1969 
1970 		da_new = startblockval(PREV.br_startblock) +
1971 			 startblockval(RIGHT.br_startblock);
1972 		break;
1973 
1974 	case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1975 	case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1976 	case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
1977 	case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
1978 	case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1979 	case BMAP_LEFT_CONTIG:
1980 	case BMAP_RIGHT_CONTIG:
1981 		/*
1982 		 * These cases are all impossible.
1983 		 */
1984 		ASSERT(0);
1985 	}
1986 
1987 	/* add reverse mapping unless caller opted out */
1988 	if (!(bma->flags & XFS_BMAPI_NORMAP))
1989 		xfs_rmap_map_extent(bma->tp, bma->ip, whichfork, new);
1990 
1991 	/* convert to a btree if necessary */
1992 	if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1993 		int	tmp_logflags;	/* partial log flag return val */
1994 
1995 		ASSERT(bma->cur == NULL);
1996 		error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1997 				&bma->cur, da_old > 0, &tmp_logflags,
1998 				whichfork);
1999 		bma->logflags |= tmp_logflags;
2000 		if (error)
2001 			goto done;
2002 	}
2003 
2004 	if (da_new != da_old)
2005 		xfs_mod_delalloc(mp, (int64_t)da_new - da_old);
2006 
2007 	if (bma->cur) {
2008 		da_new += bma->cur->bc_ino.allocated;
2009 		bma->cur->bc_ino.allocated = 0;
2010 	}
2011 
2012 	/* adjust for changes in reserved delayed indirect blocks */
2013 	if (da_new != da_old) {
2014 		ASSERT(state == 0 || da_new < da_old);
2015 		error = xfs_mod_fdblocks(mp, (int64_t)(da_old - da_new),
2016 				false);
2017 	}
2018 
2019 	xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork);
2020 done:
2021 	if (whichfork != XFS_COW_FORK)
2022 		bma->logflags |= rval;
2023 	return error;
2024 #undef	LEFT
2025 #undef	RIGHT
2026 #undef	PREV
2027 }
2028 
2029 /*
2030  * Convert an unwritten allocation to a real allocation or vice versa.
2031  */
2032 int					/* error */
2033 xfs_bmap_add_extent_unwritten_real(
2034 	struct xfs_trans	*tp,
2035 	xfs_inode_t		*ip,	/* incore inode pointer */
2036 	int			whichfork,
2037 	struct xfs_iext_cursor	*icur,
2038 	xfs_btree_cur_t		**curp,	/* if *curp is null, not a btree */
2039 	xfs_bmbt_irec_t		*new,	/* new data to add to file extents */
2040 	int			*logflagsp) /* inode logging flags */
2041 {
2042 	xfs_btree_cur_t		*cur;	/* btree cursor */
2043 	int			error;	/* error return value */
2044 	int			i;	/* temp state */
2045 	struct xfs_ifork	*ifp;	/* inode fork pointer */
2046 	xfs_fileoff_t		new_endoff;	/* end offset of new entry */
2047 	xfs_bmbt_irec_t		r[3];	/* neighbor extent entries */
2048 					/* left is 0, right is 1, prev is 2 */
2049 	int			rval=0;	/* return value (logging flags) */
2050 	int			state = xfs_bmap_fork_to_state(whichfork);
2051 	struct xfs_mount	*mp = ip->i_mount;
2052 	struct xfs_bmbt_irec	old;
2053 
2054 	*logflagsp = 0;
2055 
2056 	cur = *curp;
2057 	ifp = XFS_IFORK_PTR(ip, whichfork);
2058 
2059 	ASSERT(!isnullstartblock(new->br_startblock));
2060 
2061 	XFS_STATS_INC(mp, xs_add_exlist);
2062 
2063 #define	LEFT		r[0]
2064 #define	RIGHT		r[1]
2065 #define	PREV		r[2]
2066 
2067 	/*
2068 	 * Set up a bunch of variables to make the tests simpler.
2069 	 */
2070 	error = 0;
2071 	xfs_iext_get_extent(ifp, icur, &PREV);
2072 	ASSERT(new->br_state != PREV.br_state);
2073 	new_endoff = new->br_startoff + new->br_blockcount;
2074 	ASSERT(PREV.br_startoff <= new->br_startoff);
2075 	ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
2076 
2077 	/*
2078 	 * Set flags determining what part of the previous oldext allocation
2079 	 * extent is being replaced by a newext allocation.
2080 	 */
2081 	if (PREV.br_startoff == new->br_startoff)
2082 		state |= BMAP_LEFT_FILLING;
2083 	if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
2084 		state |= BMAP_RIGHT_FILLING;
2085 
2086 	/*
2087 	 * Check and set flags if this segment has a left neighbor.
2088 	 * Don't set contiguous if the combined extent would be too large.
2089 	 */
2090 	if (xfs_iext_peek_prev_extent(ifp, icur, &LEFT)) {
2091 		state |= BMAP_LEFT_VALID;
2092 		if (isnullstartblock(LEFT.br_startblock))
2093 			state |= BMAP_LEFT_DELAY;
2094 	}
2095 
2096 	if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
2097 	    LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
2098 	    LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
2099 	    LEFT.br_state == new->br_state &&
2100 	    LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN)
2101 		state |= BMAP_LEFT_CONTIG;
2102 
2103 	/*
2104 	 * Check and set flags if this segment has a right neighbor.
2105 	 * Don't set contiguous if the combined extent would be too large.
2106 	 * Also check for all-three-contiguous being too large.
2107 	 */
2108 	if (xfs_iext_peek_next_extent(ifp, icur, &RIGHT)) {
2109 		state |= BMAP_RIGHT_VALID;
2110 		if (isnullstartblock(RIGHT.br_startblock))
2111 			state |= BMAP_RIGHT_DELAY;
2112 	}
2113 
2114 	if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
2115 	    new_endoff == RIGHT.br_startoff &&
2116 	    new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
2117 	    new->br_state == RIGHT.br_state &&
2118 	    new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
2119 	    ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
2120 		       BMAP_RIGHT_FILLING)) !=
2121 		      (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
2122 		       BMAP_RIGHT_FILLING) ||
2123 	     LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
2124 			<= MAXEXTLEN))
2125 		state |= BMAP_RIGHT_CONTIG;
2126 
2127 	/*
2128 	 * Switch out based on the FILLING and CONTIG state bits.
2129 	 */
2130 	switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
2131 			 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
2132 	case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
2133 	     BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2134 		/*
2135 		 * Setting all of a previous oldext extent to newext.
2136 		 * The left and right neighbors are both contiguous with new.
2137 		 */
2138 		LEFT.br_blockcount += PREV.br_blockcount + RIGHT.br_blockcount;
2139 
2140 		xfs_iext_remove(ip, icur, state);
2141 		xfs_iext_remove(ip, icur, state);
2142 		xfs_iext_prev(ifp, icur);
2143 		xfs_iext_update_extent(ip, state, icur, &LEFT);
2144 		ifp->if_nextents -= 2;
2145 		if (cur == NULL)
2146 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2147 		else {
2148 			rval = XFS_ILOG_CORE;
2149 			error = xfs_bmbt_lookup_eq(cur, &RIGHT, &i);
2150 			if (error)
2151 				goto done;
2152 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2153 				error = -EFSCORRUPTED;
2154 				goto done;
2155 			}
2156 			if ((error = xfs_btree_delete(cur, &i)))
2157 				goto done;
2158 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2159 				error = -EFSCORRUPTED;
2160 				goto done;
2161 			}
2162 			if ((error = xfs_btree_decrement(cur, 0, &i)))
2163 				goto done;
2164 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2165 				error = -EFSCORRUPTED;
2166 				goto done;
2167 			}
2168 			if ((error = xfs_btree_delete(cur, &i)))
2169 				goto done;
2170 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2171 				error = -EFSCORRUPTED;
2172 				goto done;
2173 			}
2174 			if ((error = xfs_btree_decrement(cur, 0, &i)))
2175 				goto done;
2176 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2177 				error = -EFSCORRUPTED;
2178 				goto done;
2179 			}
2180 			error = xfs_bmbt_update(cur, &LEFT);
2181 			if (error)
2182 				goto done;
2183 		}
2184 		break;
2185 
2186 	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
2187 		/*
2188 		 * Setting all of a previous oldext extent to newext.
2189 		 * The left neighbor is contiguous, the right is not.
2190 		 */
2191 		LEFT.br_blockcount += PREV.br_blockcount;
2192 
2193 		xfs_iext_remove(ip, icur, state);
2194 		xfs_iext_prev(ifp, icur);
2195 		xfs_iext_update_extent(ip, state, icur, &LEFT);
2196 		ifp->if_nextents--;
2197 		if (cur == NULL)
2198 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2199 		else {
2200 			rval = XFS_ILOG_CORE;
2201 			error = xfs_bmbt_lookup_eq(cur, &PREV, &i);
2202 			if (error)
2203 				goto done;
2204 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2205 				error = -EFSCORRUPTED;
2206 				goto done;
2207 			}
2208 			if ((error = xfs_btree_delete(cur, &i)))
2209 				goto done;
2210 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2211 				error = -EFSCORRUPTED;
2212 				goto done;
2213 			}
2214 			if ((error = xfs_btree_decrement(cur, 0, &i)))
2215 				goto done;
2216 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2217 				error = -EFSCORRUPTED;
2218 				goto done;
2219 			}
2220 			error = xfs_bmbt_update(cur, &LEFT);
2221 			if (error)
2222 				goto done;
2223 		}
2224 		break;
2225 
2226 	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2227 		/*
2228 		 * Setting all of a previous oldext extent to newext.
2229 		 * The right neighbor is contiguous, the left is not.
2230 		 */
2231 		PREV.br_blockcount += RIGHT.br_blockcount;
2232 		PREV.br_state = new->br_state;
2233 
2234 		xfs_iext_next(ifp, icur);
2235 		xfs_iext_remove(ip, icur, state);
2236 		xfs_iext_prev(ifp, icur);
2237 		xfs_iext_update_extent(ip, state, icur, &PREV);
2238 		ifp->if_nextents--;
2239 
2240 		if (cur == NULL)
2241 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2242 		else {
2243 			rval = XFS_ILOG_CORE;
2244 			error = xfs_bmbt_lookup_eq(cur, &RIGHT, &i);
2245 			if (error)
2246 				goto done;
2247 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2248 				error = -EFSCORRUPTED;
2249 				goto done;
2250 			}
2251 			if ((error = xfs_btree_delete(cur, &i)))
2252 				goto done;
2253 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2254 				error = -EFSCORRUPTED;
2255 				goto done;
2256 			}
2257 			if ((error = xfs_btree_decrement(cur, 0, &i)))
2258 				goto done;
2259 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2260 				error = -EFSCORRUPTED;
2261 				goto done;
2262 			}
2263 			error = xfs_bmbt_update(cur, &PREV);
2264 			if (error)
2265 				goto done;
2266 		}
2267 		break;
2268 
2269 	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
2270 		/*
2271 		 * Setting all of a previous oldext extent to newext.
2272 		 * Neither the left nor right neighbors are contiguous with
2273 		 * the new one.
2274 		 */
2275 		PREV.br_state = new->br_state;
2276 		xfs_iext_update_extent(ip, state, icur, &PREV);
2277 
2278 		if (cur == NULL)
2279 			rval = XFS_ILOG_DEXT;
2280 		else {
2281 			rval = 0;
2282 			error = xfs_bmbt_lookup_eq(cur, new, &i);
2283 			if (error)
2284 				goto done;
2285 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2286 				error = -EFSCORRUPTED;
2287 				goto done;
2288 			}
2289 			error = xfs_bmbt_update(cur, &PREV);
2290 			if (error)
2291 				goto done;
2292 		}
2293 		break;
2294 
2295 	case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
2296 		/*
2297 		 * Setting the first part of a previous oldext extent to newext.
2298 		 * The left neighbor is contiguous.
2299 		 */
2300 		LEFT.br_blockcount += new->br_blockcount;
2301 
2302 		old = PREV;
2303 		PREV.br_startoff += new->br_blockcount;
2304 		PREV.br_startblock += new->br_blockcount;
2305 		PREV.br_blockcount -= new->br_blockcount;
2306 
2307 		xfs_iext_update_extent(ip, state, icur, &PREV);
2308 		xfs_iext_prev(ifp, icur);
2309 		xfs_iext_update_extent(ip, state, icur, &LEFT);
2310 
2311 		if (cur == NULL)
2312 			rval = XFS_ILOG_DEXT;
2313 		else {
2314 			rval = 0;
2315 			error = xfs_bmbt_lookup_eq(cur, &old, &i);
2316 			if (error)
2317 				goto done;
2318 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2319 				error = -EFSCORRUPTED;
2320 				goto done;
2321 			}
2322 			error = xfs_bmbt_update(cur, &PREV);
2323 			if (error)
2324 				goto done;
2325 			error = xfs_btree_decrement(cur, 0, &i);
2326 			if (error)
2327 				goto done;
2328 			error = xfs_bmbt_update(cur, &LEFT);
2329 			if (error)
2330 				goto done;
2331 		}
2332 		break;
2333 
2334 	case BMAP_LEFT_FILLING:
2335 		/*
2336 		 * Setting the first part of a previous oldext extent to newext.
2337 		 * The left neighbor is not contiguous.
2338 		 */
2339 		old = PREV;
2340 		PREV.br_startoff += new->br_blockcount;
2341 		PREV.br_startblock += new->br_blockcount;
2342 		PREV.br_blockcount -= new->br_blockcount;
2343 
2344 		xfs_iext_update_extent(ip, state, icur, &PREV);
2345 		xfs_iext_insert(ip, icur, new, state);
2346 		ifp->if_nextents++;
2347 
2348 		if (cur == NULL)
2349 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2350 		else {
2351 			rval = XFS_ILOG_CORE;
2352 			error = xfs_bmbt_lookup_eq(cur, &old, &i);
2353 			if (error)
2354 				goto done;
2355 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2356 				error = -EFSCORRUPTED;
2357 				goto done;
2358 			}
2359 			error = xfs_bmbt_update(cur, &PREV);
2360 			if (error)
2361 				goto done;
2362 			cur->bc_rec.b = *new;
2363 			if ((error = xfs_btree_insert(cur, &i)))
2364 				goto done;
2365 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2366 				error = -EFSCORRUPTED;
2367 				goto done;
2368 			}
2369 		}
2370 		break;
2371 
2372 	case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2373 		/*
2374 		 * Setting the last part of a previous oldext extent to newext.
2375 		 * The right neighbor is contiguous with the new allocation.
2376 		 */
2377 		old = PREV;
2378 		PREV.br_blockcount -= new->br_blockcount;
2379 
2380 		RIGHT.br_startoff = new->br_startoff;
2381 		RIGHT.br_startblock = new->br_startblock;
2382 		RIGHT.br_blockcount += new->br_blockcount;
2383 
2384 		xfs_iext_update_extent(ip, state, icur, &PREV);
2385 		xfs_iext_next(ifp, icur);
2386 		xfs_iext_update_extent(ip, state, icur, &RIGHT);
2387 
2388 		if (cur == NULL)
2389 			rval = XFS_ILOG_DEXT;
2390 		else {
2391 			rval = 0;
2392 			error = xfs_bmbt_lookup_eq(cur, &old, &i);
2393 			if (error)
2394 				goto done;
2395 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2396 				error = -EFSCORRUPTED;
2397 				goto done;
2398 			}
2399 			error = xfs_bmbt_update(cur, &PREV);
2400 			if (error)
2401 				goto done;
2402 			error = xfs_btree_increment(cur, 0, &i);
2403 			if (error)
2404 				goto done;
2405 			error = xfs_bmbt_update(cur, &RIGHT);
2406 			if (error)
2407 				goto done;
2408 		}
2409 		break;
2410 
2411 	case BMAP_RIGHT_FILLING:
2412 		/*
2413 		 * Setting the last part of a previous oldext extent to newext.
2414 		 * The right neighbor is not contiguous.
2415 		 */
2416 		old = PREV;
2417 		PREV.br_blockcount -= new->br_blockcount;
2418 
2419 		xfs_iext_update_extent(ip, state, icur, &PREV);
2420 		xfs_iext_next(ifp, icur);
2421 		xfs_iext_insert(ip, icur, new, state);
2422 		ifp->if_nextents++;
2423 
2424 		if (cur == NULL)
2425 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2426 		else {
2427 			rval = XFS_ILOG_CORE;
2428 			error = xfs_bmbt_lookup_eq(cur, &old, &i);
2429 			if (error)
2430 				goto done;
2431 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2432 				error = -EFSCORRUPTED;
2433 				goto done;
2434 			}
2435 			error = xfs_bmbt_update(cur, &PREV);
2436 			if (error)
2437 				goto done;
2438 			error = xfs_bmbt_lookup_eq(cur, new, &i);
2439 			if (error)
2440 				goto done;
2441 			if (XFS_IS_CORRUPT(mp, i != 0)) {
2442 				error = -EFSCORRUPTED;
2443 				goto done;
2444 			}
2445 			if ((error = xfs_btree_insert(cur, &i)))
2446 				goto done;
2447 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2448 				error = -EFSCORRUPTED;
2449 				goto done;
2450 			}
2451 		}
2452 		break;
2453 
2454 	case 0:
2455 		/*
2456 		 * Setting the middle part of a previous oldext extent to
2457 		 * newext.  Contiguity is impossible here.
2458 		 * One extent becomes three extents.
2459 		 */
2460 		old = PREV;
2461 		PREV.br_blockcount = new->br_startoff - PREV.br_startoff;
2462 
2463 		r[0] = *new;
2464 		r[1].br_startoff = new_endoff;
2465 		r[1].br_blockcount =
2466 			old.br_startoff + old.br_blockcount - new_endoff;
2467 		r[1].br_startblock = new->br_startblock + new->br_blockcount;
2468 		r[1].br_state = PREV.br_state;
2469 
2470 		xfs_iext_update_extent(ip, state, icur, &PREV);
2471 		xfs_iext_next(ifp, icur);
2472 		xfs_iext_insert(ip, icur, &r[1], state);
2473 		xfs_iext_insert(ip, icur, &r[0], state);
2474 		ifp->if_nextents += 2;
2475 
2476 		if (cur == NULL)
2477 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2478 		else {
2479 			rval = XFS_ILOG_CORE;
2480 			error = xfs_bmbt_lookup_eq(cur, &old, &i);
2481 			if (error)
2482 				goto done;
2483 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2484 				error = -EFSCORRUPTED;
2485 				goto done;
2486 			}
2487 			/* new right extent - oldext */
2488 			error = xfs_bmbt_update(cur, &r[1]);
2489 			if (error)
2490 				goto done;
2491 			/* new left extent - oldext */
2492 			cur->bc_rec.b = PREV;
2493 			if ((error = xfs_btree_insert(cur, &i)))
2494 				goto done;
2495 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2496 				error = -EFSCORRUPTED;
2497 				goto done;
2498 			}
2499 			/*
2500 			 * Reset the cursor to the position of the new extent
2501 			 * we are about to insert as we can't trust it after
2502 			 * the previous insert.
2503 			 */
2504 			error = xfs_bmbt_lookup_eq(cur, new, &i);
2505 			if (error)
2506 				goto done;
2507 			if (XFS_IS_CORRUPT(mp, i != 0)) {
2508 				error = -EFSCORRUPTED;
2509 				goto done;
2510 			}
2511 			/* new middle extent - newext */
2512 			if ((error = xfs_btree_insert(cur, &i)))
2513 				goto done;
2514 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2515 				error = -EFSCORRUPTED;
2516 				goto done;
2517 			}
2518 		}
2519 		break;
2520 
2521 	case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2522 	case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2523 	case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
2524 	case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
2525 	case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2526 	case BMAP_LEFT_CONTIG:
2527 	case BMAP_RIGHT_CONTIG:
2528 		/*
2529 		 * These cases are all impossible.
2530 		 */
2531 		ASSERT(0);
2532 	}
2533 
2534 	/* update reverse mappings */
2535 	xfs_rmap_convert_extent(mp, tp, ip, whichfork, new);
2536 
2537 	/* convert to a btree if necessary */
2538 	if (xfs_bmap_needs_btree(ip, whichfork)) {
2539 		int	tmp_logflags;	/* partial log flag return val */
2540 
2541 		ASSERT(cur == NULL);
2542 		error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0,
2543 				&tmp_logflags, whichfork);
2544 		*logflagsp |= tmp_logflags;
2545 		if (error)
2546 			goto done;
2547 	}
2548 
2549 	/* clear out the allocated field, done with it now in any case. */
2550 	if (cur) {
2551 		cur->bc_ino.allocated = 0;
2552 		*curp = cur;
2553 	}
2554 
2555 	xfs_bmap_check_leaf_extents(*curp, ip, whichfork);
2556 done:
2557 	*logflagsp |= rval;
2558 	return error;
2559 #undef	LEFT
2560 #undef	RIGHT
2561 #undef	PREV
2562 }
2563 
2564 /*
2565  * Convert a hole to a delayed allocation.
2566  */
2567 STATIC void
2568 xfs_bmap_add_extent_hole_delay(
2569 	xfs_inode_t		*ip,	/* incore inode pointer */
2570 	int			whichfork,
2571 	struct xfs_iext_cursor	*icur,
2572 	xfs_bmbt_irec_t		*new)	/* new data to add to file extents */
2573 {
2574 	struct xfs_ifork	*ifp;	/* inode fork pointer */
2575 	xfs_bmbt_irec_t		left;	/* left neighbor extent entry */
2576 	xfs_filblks_t		newlen=0;	/* new indirect size */
2577 	xfs_filblks_t		oldlen=0;	/* old indirect size */
2578 	xfs_bmbt_irec_t		right;	/* right neighbor extent entry */
2579 	int			state = xfs_bmap_fork_to_state(whichfork);
2580 	xfs_filblks_t		temp;	 /* temp for indirect calculations */
2581 
2582 	ifp = XFS_IFORK_PTR(ip, whichfork);
2583 	ASSERT(isnullstartblock(new->br_startblock));
2584 
2585 	/*
2586 	 * Check and set flags if this segment has a left neighbor
2587 	 */
2588 	if (xfs_iext_peek_prev_extent(ifp, icur, &left)) {
2589 		state |= BMAP_LEFT_VALID;
2590 		if (isnullstartblock(left.br_startblock))
2591 			state |= BMAP_LEFT_DELAY;
2592 	}
2593 
2594 	/*
2595 	 * Check and set flags if the current (right) segment exists.
2596 	 * If it doesn't exist, we're converting the hole at end-of-file.
2597 	 */
2598 	if (xfs_iext_get_extent(ifp, icur, &right)) {
2599 		state |= BMAP_RIGHT_VALID;
2600 		if (isnullstartblock(right.br_startblock))
2601 			state |= BMAP_RIGHT_DELAY;
2602 	}
2603 
2604 	/*
2605 	 * Set contiguity flags on the left and right neighbors.
2606 	 * Don't let extents get too large, even if the pieces are contiguous.
2607 	 */
2608 	if ((state & BMAP_LEFT_VALID) && (state & BMAP_LEFT_DELAY) &&
2609 	    left.br_startoff + left.br_blockcount == new->br_startoff &&
2610 	    left.br_blockcount + new->br_blockcount <= MAXEXTLEN)
2611 		state |= BMAP_LEFT_CONTIG;
2612 
2613 	if ((state & BMAP_RIGHT_VALID) && (state & BMAP_RIGHT_DELAY) &&
2614 	    new->br_startoff + new->br_blockcount == right.br_startoff &&
2615 	    new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
2616 	    (!(state & BMAP_LEFT_CONTIG) ||
2617 	     (left.br_blockcount + new->br_blockcount +
2618 	      right.br_blockcount <= MAXEXTLEN)))
2619 		state |= BMAP_RIGHT_CONTIG;
2620 
2621 	/*
2622 	 * Switch out based on the contiguity flags.
2623 	 */
2624 	switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
2625 	case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2626 		/*
2627 		 * New allocation is contiguous with delayed allocations
2628 		 * on the left and on the right.
2629 		 * Merge all three into a single extent record.
2630 		 */
2631 		temp = left.br_blockcount + new->br_blockcount +
2632 			right.br_blockcount;
2633 
2634 		oldlen = startblockval(left.br_startblock) +
2635 			startblockval(new->br_startblock) +
2636 			startblockval(right.br_startblock);
2637 		newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
2638 					 oldlen);
2639 		left.br_startblock = nullstartblock(newlen);
2640 		left.br_blockcount = temp;
2641 
2642 		xfs_iext_remove(ip, icur, state);
2643 		xfs_iext_prev(ifp, icur);
2644 		xfs_iext_update_extent(ip, state, icur, &left);
2645 		break;
2646 
2647 	case BMAP_LEFT_CONTIG:
2648 		/*
2649 		 * New allocation is contiguous with a delayed allocation
2650 		 * on the left.
2651 		 * Merge the new allocation with the left neighbor.
2652 		 */
2653 		temp = left.br_blockcount + new->br_blockcount;
2654 
2655 		oldlen = startblockval(left.br_startblock) +
2656 			startblockval(new->br_startblock);
2657 		newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
2658 					 oldlen);
2659 		left.br_blockcount = temp;
2660 		left.br_startblock = nullstartblock(newlen);
2661 
2662 		xfs_iext_prev(ifp, icur);
2663 		xfs_iext_update_extent(ip, state, icur, &left);
2664 		break;
2665 
2666 	case BMAP_RIGHT_CONTIG:
2667 		/*
2668 		 * New allocation is contiguous with a delayed allocation
2669 		 * on the right.
2670 		 * Merge the new allocation with the right neighbor.
2671 		 */
2672 		temp = new->br_blockcount + right.br_blockcount;
2673 		oldlen = startblockval(new->br_startblock) +
2674 			startblockval(right.br_startblock);
2675 		newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
2676 					 oldlen);
2677 		right.br_startoff = new->br_startoff;
2678 		right.br_startblock = nullstartblock(newlen);
2679 		right.br_blockcount = temp;
2680 		xfs_iext_update_extent(ip, state, icur, &right);
2681 		break;
2682 
2683 	case 0:
2684 		/*
2685 		 * New allocation is not contiguous with another
2686 		 * delayed allocation.
2687 		 * Insert a new entry.
2688 		 */
2689 		oldlen = newlen = 0;
2690 		xfs_iext_insert(ip, icur, new, state);
2691 		break;
2692 	}
2693 	if (oldlen != newlen) {
2694 		ASSERT(oldlen > newlen);
2695 		xfs_mod_fdblocks(ip->i_mount, (int64_t)(oldlen - newlen),
2696 				 false);
2697 		/*
2698 		 * Nothing to do for disk quota accounting here.
2699 		 */
2700 		xfs_mod_delalloc(ip->i_mount, (int64_t)newlen - oldlen);
2701 	}
2702 }
2703 
2704 /*
2705  * Convert a hole to a real allocation.
2706  */
2707 STATIC int				/* error */
2708 xfs_bmap_add_extent_hole_real(
2709 	struct xfs_trans	*tp,
2710 	struct xfs_inode	*ip,
2711 	int			whichfork,
2712 	struct xfs_iext_cursor	*icur,
2713 	struct xfs_btree_cur	**curp,
2714 	struct xfs_bmbt_irec	*new,
2715 	int			*logflagsp,
2716 	int			flags)
2717 {
2718 	struct xfs_ifork	*ifp = XFS_IFORK_PTR(ip, whichfork);
2719 	struct xfs_mount	*mp = ip->i_mount;
2720 	struct xfs_btree_cur	*cur = *curp;
2721 	int			error;	/* error return value */
2722 	int			i;	/* temp state */
2723 	xfs_bmbt_irec_t		left;	/* left neighbor extent entry */
2724 	xfs_bmbt_irec_t		right;	/* right neighbor extent entry */
2725 	int			rval=0;	/* return value (logging flags) */
2726 	int			state = xfs_bmap_fork_to_state(whichfork);
2727 	struct xfs_bmbt_irec	old;
2728 
2729 	ASSERT(!isnullstartblock(new->br_startblock));
2730 	ASSERT(!cur || !(cur->bc_ino.flags & XFS_BTCUR_BMBT_WASDEL));
2731 
2732 	XFS_STATS_INC(mp, xs_add_exlist);
2733 
2734 	/*
2735 	 * Check and set flags if this segment has a left neighbor.
2736 	 */
2737 	if (xfs_iext_peek_prev_extent(ifp, icur, &left)) {
2738 		state |= BMAP_LEFT_VALID;
2739 		if (isnullstartblock(left.br_startblock))
2740 			state |= BMAP_LEFT_DELAY;
2741 	}
2742 
2743 	/*
2744 	 * Check and set flags if this segment has a current value.
2745 	 * Not true if we're inserting into the "hole" at eof.
2746 	 */
2747 	if (xfs_iext_get_extent(ifp, icur, &right)) {
2748 		state |= BMAP_RIGHT_VALID;
2749 		if (isnullstartblock(right.br_startblock))
2750 			state |= BMAP_RIGHT_DELAY;
2751 	}
2752 
2753 	/*
2754 	 * We're inserting a real allocation between "left" and "right".
2755 	 * Set the contiguity flags.  Don't let extents get too large.
2756 	 */
2757 	if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
2758 	    left.br_startoff + left.br_blockcount == new->br_startoff &&
2759 	    left.br_startblock + left.br_blockcount == new->br_startblock &&
2760 	    left.br_state == new->br_state &&
2761 	    left.br_blockcount + new->br_blockcount <= MAXEXTLEN)
2762 		state |= BMAP_LEFT_CONTIG;
2763 
2764 	if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
2765 	    new->br_startoff + new->br_blockcount == right.br_startoff &&
2766 	    new->br_startblock + new->br_blockcount == right.br_startblock &&
2767 	    new->br_state == right.br_state &&
2768 	    new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
2769 	    (!(state & BMAP_LEFT_CONTIG) ||
2770 	     left.br_blockcount + new->br_blockcount +
2771 	     right.br_blockcount <= MAXEXTLEN))
2772 		state |= BMAP_RIGHT_CONTIG;
2773 
2774 	error = 0;
2775 	/*
2776 	 * Select which case we're in here, and implement it.
2777 	 */
2778 	switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
2779 	case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2780 		/*
2781 		 * New allocation is contiguous with real allocations on the
2782 		 * left and on the right.
2783 		 * Merge all three into a single extent record.
2784 		 */
2785 		left.br_blockcount += new->br_blockcount + right.br_blockcount;
2786 
2787 		xfs_iext_remove(ip, icur, state);
2788 		xfs_iext_prev(ifp, icur);
2789 		xfs_iext_update_extent(ip, state, icur, &left);
2790 		ifp->if_nextents--;
2791 
2792 		if (cur == NULL) {
2793 			rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
2794 		} else {
2795 			rval = XFS_ILOG_CORE;
2796 			error = xfs_bmbt_lookup_eq(cur, &right, &i);
2797 			if (error)
2798 				goto done;
2799 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2800 				error = -EFSCORRUPTED;
2801 				goto done;
2802 			}
2803 			error = xfs_btree_delete(cur, &i);
2804 			if (error)
2805 				goto done;
2806 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2807 				error = -EFSCORRUPTED;
2808 				goto done;
2809 			}
2810 			error = xfs_btree_decrement(cur, 0, &i);
2811 			if (error)
2812 				goto done;
2813 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2814 				error = -EFSCORRUPTED;
2815 				goto done;
2816 			}
2817 			error = xfs_bmbt_update(cur, &left);
2818 			if (error)
2819 				goto done;
2820 		}
2821 		break;
2822 
2823 	case BMAP_LEFT_CONTIG:
2824 		/*
2825 		 * New allocation is contiguous with a real allocation
2826 		 * on the left.
2827 		 * Merge the new allocation with the left neighbor.
2828 		 */
2829 		old = left;
2830 		left.br_blockcount += new->br_blockcount;
2831 
2832 		xfs_iext_prev(ifp, icur);
2833 		xfs_iext_update_extent(ip, state, icur, &left);
2834 
2835 		if (cur == NULL) {
2836 			rval = xfs_ilog_fext(whichfork);
2837 		} else {
2838 			rval = 0;
2839 			error = xfs_bmbt_lookup_eq(cur, &old, &i);
2840 			if (error)
2841 				goto done;
2842 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2843 				error = -EFSCORRUPTED;
2844 				goto done;
2845 			}
2846 			error = xfs_bmbt_update(cur, &left);
2847 			if (error)
2848 				goto done;
2849 		}
2850 		break;
2851 
2852 	case BMAP_RIGHT_CONTIG:
2853 		/*
2854 		 * New allocation is contiguous with a real allocation
2855 		 * on the right.
2856 		 * Merge the new allocation with the right neighbor.
2857 		 */
2858 		old = right;
2859 
2860 		right.br_startoff = new->br_startoff;
2861 		right.br_startblock = new->br_startblock;
2862 		right.br_blockcount += new->br_blockcount;
2863 		xfs_iext_update_extent(ip, state, icur, &right);
2864 
2865 		if (cur == NULL) {
2866 			rval = xfs_ilog_fext(whichfork);
2867 		} else {
2868 			rval = 0;
2869 			error = xfs_bmbt_lookup_eq(cur, &old, &i);
2870 			if (error)
2871 				goto done;
2872 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2873 				error = -EFSCORRUPTED;
2874 				goto done;
2875 			}
2876 			error = xfs_bmbt_update(cur, &right);
2877 			if (error)
2878 				goto done;
2879 		}
2880 		break;
2881 
2882 	case 0:
2883 		/*
2884 		 * New allocation is not contiguous with another
2885 		 * real allocation.
2886 		 * Insert a new entry.
2887 		 */
2888 		xfs_iext_insert(ip, icur, new, state);
2889 		ifp->if_nextents++;
2890 
2891 		if (cur == NULL) {
2892 			rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
2893 		} else {
2894 			rval = XFS_ILOG_CORE;
2895 			error = xfs_bmbt_lookup_eq(cur, new, &i);
2896 			if (error)
2897 				goto done;
2898 			if (XFS_IS_CORRUPT(mp, i != 0)) {
2899 				error = -EFSCORRUPTED;
2900 				goto done;
2901 			}
2902 			error = xfs_btree_insert(cur, &i);
2903 			if (error)
2904 				goto done;
2905 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2906 				error = -EFSCORRUPTED;
2907 				goto done;
2908 			}
2909 		}
2910 		break;
2911 	}
2912 
2913 	/* add reverse mapping unless caller opted out */
2914 	if (!(flags & XFS_BMAPI_NORMAP))
2915 		xfs_rmap_map_extent(tp, ip, whichfork, new);
2916 
2917 	/* convert to a btree if necessary */
2918 	if (xfs_bmap_needs_btree(ip, whichfork)) {
2919 		int	tmp_logflags;	/* partial log flag return val */
2920 
2921 		ASSERT(cur == NULL);
2922 		error = xfs_bmap_extents_to_btree(tp, ip, curp, 0,
2923 				&tmp_logflags, whichfork);
2924 		*logflagsp |= tmp_logflags;
2925 		cur = *curp;
2926 		if (error)
2927 			goto done;
2928 	}
2929 
2930 	/* clear out the allocated field, done with it now in any case. */
2931 	if (cur)
2932 		cur->bc_ino.allocated = 0;
2933 
2934 	xfs_bmap_check_leaf_extents(cur, ip, whichfork);
2935 done:
2936 	*logflagsp |= rval;
2937 	return error;
2938 }
2939 
2940 /*
2941  * Functions used in the extent read, allocate and remove paths
2942  */
2943 
2944 /*
2945  * Adjust the size of the new extent based on di_extsize and rt extsize.
2946  */
2947 int
2948 xfs_bmap_extsize_align(
2949 	xfs_mount_t	*mp,
2950 	xfs_bmbt_irec_t	*gotp,		/* next extent pointer */
2951 	xfs_bmbt_irec_t	*prevp,		/* previous extent pointer */
2952 	xfs_extlen_t	extsz,		/* align to this extent size */
2953 	int		rt,		/* is this a realtime inode? */
2954 	int		eof,		/* is extent at end-of-file? */
2955 	int		delay,		/* creating delalloc extent? */
2956 	int		convert,	/* overwriting unwritten extent? */
2957 	xfs_fileoff_t	*offp,		/* in/out: aligned offset */
2958 	xfs_extlen_t	*lenp)		/* in/out: aligned length */
2959 {
2960 	xfs_fileoff_t	orig_off;	/* original offset */
2961 	xfs_extlen_t	orig_alen;	/* original length */
2962 	xfs_fileoff_t	orig_end;	/* original off+len */
2963 	xfs_fileoff_t	nexto;		/* next file offset */
2964 	xfs_fileoff_t	prevo;		/* previous file offset */
2965 	xfs_fileoff_t	align_off;	/* temp for offset */
2966 	xfs_extlen_t	align_alen;	/* temp for length */
2967 	xfs_extlen_t	temp;		/* temp for calculations */
2968 
2969 	if (convert)
2970 		return 0;
2971 
2972 	orig_off = align_off = *offp;
2973 	orig_alen = align_alen = *lenp;
2974 	orig_end = orig_off + orig_alen;
2975 
2976 	/*
2977 	 * If this request overlaps an existing extent, then don't
2978 	 * attempt to perform any additional alignment.
2979 	 */
2980 	if (!delay && !eof &&
2981 	    (orig_off >= gotp->br_startoff) &&
2982 	    (orig_end <= gotp->br_startoff + gotp->br_blockcount)) {
2983 		return 0;
2984 	}
2985 
2986 	/*
2987 	 * If the file offset is unaligned vs. the extent size
2988 	 * we need to align it.  This will be possible unless
2989 	 * the file was previously written with a kernel that didn't
2990 	 * perform this alignment, or if a truncate shot us in the
2991 	 * foot.
2992 	 */
2993 	div_u64_rem(orig_off, extsz, &temp);
2994 	if (temp) {
2995 		align_alen += temp;
2996 		align_off -= temp;
2997 	}
2998 
2999 	/* Same adjustment for the end of the requested area. */
3000 	temp = (align_alen % extsz);
3001 	if (temp)
3002 		align_alen += extsz - temp;
3003 
3004 	/*
3005 	 * For large extent hint sizes, the aligned extent might be larger than
3006 	 * MAXEXTLEN. In that case, reduce the size by an extsz so that it pulls
3007 	 * the length back under MAXEXTLEN. The outer allocation loops handle
3008 	 * short allocation just fine, so it is safe to do this. We only want to
3009 	 * do it when we are forced to, though, because it means more allocation
3010 	 * operations are required.
3011 	 */
3012 	while (align_alen > MAXEXTLEN)
3013 		align_alen -= extsz;
3014 	ASSERT(align_alen <= MAXEXTLEN);
3015 
3016 	/*
3017 	 * If the previous block overlaps with this proposed allocation
3018 	 * then move the start forward without adjusting the length.
3019 	 */
3020 	if (prevp->br_startoff != NULLFILEOFF) {
3021 		if (prevp->br_startblock == HOLESTARTBLOCK)
3022 			prevo = prevp->br_startoff;
3023 		else
3024 			prevo = prevp->br_startoff + prevp->br_blockcount;
3025 	} else
3026 		prevo = 0;
3027 	if (align_off != orig_off && align_off < prevo)
3028 		align_off = prevo;
3029 	/*
3030 	 * If the next block overlaps with this proposed allocation
3031 	 * then move the start back without adjusting the length,
3032 	 * but not before offset 0.
3033 	 * This may of course make the start overlap previous block,
3034 	 * and if we hit the offset 0 limit then the next block
3035 	 * can still overlap too.
3036 	 */
3037 	if (!eof && gotp->br_startoff != NULLFILEOFF) {
3038 		if ((delay && gotp->br_startblock == HOLESTARTBLOCK) ||
3039 		    (!delay && gotp->br_startblock == DELAYSTARTBLOCK))
3040 			nexto = gotp->br_startoff + gotp->br_blockcount;
3041 		else
3042 			nexto = gotp->br_startoff;
3043 	} else
3044 		nexto = NULLFILEOFF;
3045 	if (!eof &&
3046 	    align_off + align_alen != orig_end &&
3047 	    align_off + align_alen > nexto)
3048 		align_off = nexto > align_alen ? nexto - align_alen : 0;
3049 	/*
3050 	 * If we're now overlapping the next or previous extent that
3051 	 * means we can't fit an extsz piece in this hole.  Just move
3052 	 * the start forward to the first valid spot and set
3053 	 * the length so we hit the end.
3054 	 */
3055 	if (align_off != orig_off && align_off < prevo)
3056 		align_off = prevo;
3057 	if (align_off + align_alen != orig_end &&
3058 	    align_off + align_alen > nexto &&
3059 	    nexto != NULLFILEOFF) {
3060 		ASSERT(nexto > prevo);
3061 		align_alen = nexto - align_off;
3062 	}
3063 
3064 	/*
3065 	 * If realtime, and the result isn't a multiple of the realtime
3066 	 * extent size we need to remove blocks until it is.
3067 	 */
3068 	if (rt && (temp = (align_alen % mp->m_sb.sb_rextsize))) {
3069 		/*
3070 		 * We're not covering the original request, or
3071 		 * we won't be able to once we fix the length.
3072 		 */
3073 		if (orig_off < align_off ||
3074 		    orig_end > align_off + align_alen ||
3075 		    align_alen - temp < orig_alen)
3076 			return -EINVAL;
3077 		/*
3078 		 * Try to fix it by moving the start up.
3079 		 */
3080 		if (align_off + temp <= orig_off) {
3081 			align_alen -= temp;
3082 			align_off += temp;
3083 		}
3084 		/*
3085 		 * Try to fix it by moving the end in.
3086 		 */
3087 		else if (align_off + align_alen - temp >= orig_end)
3088 			align_alen -= temp;
3089 		/*
3090 		 * Set the start to the minimum then trim the length.
3091 		 */
3092 		else {
3093 			align_alen -= orig_off - align_off;
3094 			align_off = orig_off;
3095 			align_alen -= align_alen % mp->m_sb.sb_rextsize;
3096 		}
3097 		/*
3098 		 * Result doesn't cover the request, fail it.
3099 		 */
3100 		if (orig_off < align_off || orig_end > align_off + align_alen)
3101 			return -EINVAL;
3102 	} else {
3103 		ASSERT(orig_off >= align_off);
3104 		/* see MAXEXTLEN handling above */
3105 		ASSERT(orig_end <= align_off + align_alen ||
3106 		       align_alen + extsz > MAXEXTLEN);
3107 	}
3108 
3109 #ifdef DEBUG
3110 	if (!eof && gotp->br_startoff != NULLFILEOFF)
3111 		ASSERT(align_off + align_alen <= gotp->br_startoff);
3112 	if (prevp->br_startoff != NULLFILEOFF)
3113 		ASSERT(align_off >= prevp->br_startoff + prevp->br_blockcount);
3114 #endif
3115 
3116 	*lenp = align_alen;
3117 	*offp = align_off;
3118 	return 0;
3119 }
3120 
3121 #define XFS_ALLOC_GAP_UNITS	4
3122 
3123 void
3124 xfs_bmap_adjacent(
3125 	struct xfs_bmalloca	*ap)	/* bmap alloc argument struct */
3126 {
3127 	xfs_fsblock_t	adjust;		/* adjustment to block numbers */
3128 	xfs_agnumber_t	fb_agno;	/* ag number of ap->firstblock */
3129 	xfs_mount_t	*mp;		/* mount point structure */
3130 	int		nullfb;		/* true if ap->firstblock isn't set */
3131 	int		rt;		/* true if inode is realtime */
3132 
3133 #define	ISVALID(x,y)	\
3134 	(rt ? \
3135 		(x) < mp->m_sb.sb_rblocks : \
3136 		XFS_FSB_TO_AGNO(mp, x) == XFS_FSB_TO_AGNO(mp, y) && \
3137 		XFS_FSB_TO_AGNO(mp, x) < mp->m_sb.sb_agcount && \
3138 		XFS_FSB_TO_AGBNO(mp, x) < mp->m_sb.sb_agblocks)
3139 
3140 	mp = ap->ip->i_mount;
3141 	nullfb = ap->tp->t_firstblock == NULLFSBLOCK;
3142 	rt = XFS_IS_REALTIME_INODE(ap->ip) &&
3143 		(ap->datatype & XFS_ALLOC_USERDATA);
3144 	fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp,
3145 							ap->tp->t_firstblock);
3146 	/*
3147 	 * If allocating at eof, and there's a previous real block,
3148 	 * try to use its last block as our starting point.
3149 	 */
3150 	if (ap->eof && ap->prev.br_startoff != NULLFILEOFF &&
3151 	    !isnullstartblock(ap->prev.br_startblock) &&
3152 	    ISVALID(ap->prev.br_startblock + ap->prev.br_blockcount,
3153 		    ap->prev.br_startblock)) {
3154 		ap->blkno = ap->prev.br_startblock + ap->prev.br_blockcount;
3155 		/*
3156 		 * Adjust for the gap between prevp and us.
3157 		 */
3158 		adjust = ap->offset -
3159 			(ap->prev.br_startoff + ap->prev.br_blockcount);
3160 		if (adjust &&
3161 		    ISVALID(ap->blkno + adjust, ap->prev.br_startblock))
3162 			ap->blkno += adjust;
3163 	}
3164 	/*
3165 	 * If not at eof, then compare the two neighbor blocks.
3166 	 * Figure out whether either one gives us a good starting point,
3167 	 * and pick the better one.
3168 	 */
3169 	else if (!ap->eof) {
3170 		xfs_fsblock_t	gotbno;		/* right side block number */
3171 		xfs_fsblock_t	gotdiff=0;	/* right side difference */
3172 		xfs_fsblock_t	prevbno;	/* left side block number */
3173 		xfs_fsblock_t	prevdiff=0;	/* left side difference */
3174 
3175 		/*
3176 		 * If there's a previous (left) block, select a requested
3177 		 * start block based on it.
3178 		 */
3179 		if (ap->prev.br_startoff != NULLFILEOFF &&
3180 		    !isnullstartblock(ap->prev.br_startblock) &&
3181 		    (prevbno = ap->prev.br_startblock +
3182 			       ap->prev.br_blockcount) &&
3183 		    ISVALID(prevbno, ap->prev.br_startblock)) {
3184 			/*
3185 			 * Calculate gap to end of previous block.
3186 			 */
3187 			adjust = prevdiff = ap->offset -
3188 				(ap->prev.br_startoff +
3189 				 ap->prev.br_blockcount);
3190 			/*
3191 			 * Figure the startblock based on the previous block's
3192 			 * end and the gap size.
3193 			 * Heuristic!
3194 			 * If the gap is large relative to the piece we're
3195 			 * allocating, or using it gives us an invalid block
3196 			 * number, then just use the end of the previous block.
3197 			 */
3198 			if (prevdiff <= XFS_ALLOC_GAP_UNITS * ap->length &&
3199 			    ISVALID(prevbno + prevdiff,
3200 				    ap->prev.br_startblock))
3201 				prevbno += adjust;
3202 			else
3203 				prevdiff += adjust;
3204 			/*
3205 			 * If the firstblock forbids it, can't use it,
3206 			 * must use default.
3207 			 */
3208 			if (!rt && !nullfb &&
3209 			    XFS_FSB_TO_AGNO(mp, prevbno) != fb_agno)
3210 				prevbno = NULLFSBLOCK;
3211 		}
3212 		/*
3213 		 * No previous block or can't follow it, just default.
3214 		 */
3215 		else
3216 			prevbno = NULLFSBLOCK;
3217 		/*
3218 		 * If there's a following (right) block, select a requested
3219 		 * start block based on it.
3220 		 */
3221 		if (!isnullstartblock(ap->got.br_startblock)) {
3222 			/*
3223 			 * Calculate gap to start of next block.
3224 			 */
3225 			adjust = gotdiff = ap->got.br_startoff - ap->offset;
3226 			/*
3227 			 * Figure the startblock based on the next block's
3228 			 * start and the gap size.
3229 			 */
3230 			gotbno = ap->got.br_startblock;
3231 			/*
3232 			 * Heuristic!
3233 			 * If the gap is large relative to the piece we're
3234 			 * allocating, or using it gives us an invalid block
3235 			 * number, then just use the start of the next block
3236 			 * offset by our length.
3237 			 */
3238 			if (gotdiff <= XFS_ALLOC_GAP_UNITS * ap->length &&
3239 			    ISVALID(gotbno - gotdiff, gotbno))
3240 				gotbno -= adjust;
3241 			else if (ISVALID(gotbno - ap->length, gotbno)) {
3242 				gotbno -= ap->length;
3243 				gotdiff += adjust - ap->length;
3244 			} else
3245 				gotdiff += adjust;
3246 			/*
3247 			 * If the firstblock forbids it, can't use it,
3248 			 * must use default.
3249 			 */
3250 			if (!rt && !nullfb &&
3251 			    XFS_FSB_TO_AGNO(mp, gotbno) != fb_agno)
3252 				gotbno = NULLFSBLOCK;
3253 		}
3254 		/*
3255 		 * No next block, just default.
3256 		 */
3257 		else
3258 			gotbno = NULLFSBLOCK;
3259 		/*
3260 		 * If both valid, pick the better one, else the only good
3261 		 * one, else ap->blkno is already set (to 0 or the inode block).
3262 		 */
3263 		if (prevbno != NULLFSBLOCK && gotbno != NULLFSBLOCK)
3264 			ap->blkno = prevdiff <= gotdiff ? prevbno : gotbno;
3265 		else if (prevbno != NULLFSBLOCK)
3266 			ap->blkno = prevbno;
3267 		else if (gotbno != NULLFSBLOCK)
3268 			ap->blkno = gotbno;
3269 	}
3270 #undef ISVALID
3271 }
3272 
3273 static int
3274 xfs_bmap_longest_free_extent(
3275 	struct xfs_trans	*tp,
3276 	xfs_agnumber_t		ag,
3277 	xfs_extlen_t		*blen,
3278 	int			*notinit)
3279 {
3280 	struct xfs_mount	*mp = tp->t_mountp;
3281 	struct xfs_perag	*pag;
3282 	xfs_extlen_t		longest;
3283 	int			error = 0;
3284 
3285 	pag = xfs_perag_get(mp, ag);
3286 	if (!pag->pagf_init) {
3287 		error = xfs_alloc_pagf_init(mp, tp, ag, XFS_ALLOC_FLAG_TRYLOCK);
3288 		if (error) {
3289 			/* Couldn't lock the AGF, so skip this AG. */
3290 			if (error == -EAGAIN) {
3291 				*notinit = 1;
3292 				error = 0;
3293 			}
3294 			goto out;
3295 		}
3296 	}
3297 
3298 	longest = xfs_alloc_longest_free_extent(pag,
3299 				xfs_alloc_min_freelist(mp, pag),
3300 				xfs_ag_resv_needed(pag, XFS_AG_RESV_NONE));
3301 	if (*blen < longest)
3302 		*blen = longest;
3303 
3304 out:
3305 	xfs_perag_put(pag);
3306 	return error;
3307 }
3308 
3309 static void
3310 xfs_bmap_select_minlen(
3311 	struct xfs_bmalloca	*ap,
3312 	struct xfs_alloc_arg	*args,
3313 	xfs_extlen_t		*blen,
3314 	int			notinit)
3315 {
3316 	if (notinit || *blen < ap->minlen) {
3317 		/*
3318 		 * Since we did a BUF_TRYLOCK above, it is possible that
3319 		 * there is space for this request.
3320 		 */
3321 		args->minlen = ap->minlen;
3322 	} else if (*blen < args->maxlen) {
3323 		/*
3324 		 * If the best seen length is less than the request length,
3325 		 * use the best as the minimum.
3326 		 */
3327 		args->minlen = *blen;
3328 	} else {
3329 		/*
3330 		 * Otherwise we've seen an extent as big as maxlen, use that
3331 		 * as the minimum.
3332 		 */
3333 		args->minlen = args->maxlen;
3334 	}
3335 }
3336 
3337 STATIC int
3338 xfs_bmap_btalloc_nullfb(
3339 	struct xfs_bmalloca	*ap,
3340 	struct xfs_alloc_arg	*args,
3341 	xfs_extlen_t		*blen)
3342 {
3343 	struct xfs_mount	*mp = ap->ip->i_mount;
3344 	xfs_agnumber_t		ag, startag;
3345 	int			notinit = 0;
3346 	int			error;
3347 
3348 	args->type = XFS_ALLOCTYPE_START_BNO;
3349 	args->total = ap->total;
3350 
3351 	startag = ag = XFS_FSB_TO_AGNO(mp, args->fsbno);
3352 	if (startag == NULLAGNUMBER)
3353 		startag = ag = 0;
3354 
3355 	while (*blen < args->maxlen) {
3356 		error = xfs_bmap_longest_free_extent(args->tp, ag, blen,
3357 						     &notinit);
3358 		if (error)
3359 			return error;
3360 
3361 		if (++ag == mp->m_sb.sb_agcount)
3362 			ag = 0;
3363 		if (ag == startag)
3364 			break;
3365 	}
3366 
3367 	xfs_bmap_select_minlen(ap, args, blen, notinit);
3368 	return 0;
3369 }
3370 
3371 STATIC int
3372 xfs_bmap_btalloc_filestreams(
3373 	struct xfs_bmalloca	*ap,
3374 	struct xfs_alloc_arg	*args,
3375 	xfs_extlen_t		*blen)
3376 {
3377 	struct xfs_mount	*mp = ap->ip->i_mount;
3378 	xfs_agnumber_t		ag;
3379 	int			notinit = 0;
3380 	int			error;
3381 
3382 	args->type = XFS_ALLOCTYPE_NEAR_BNO;
3383 	args->total = ap->total;
3384 
3385 	ag = XFS_FSB_TO_AGNO(mp, args->fsbno);
3386 	if (ag == NULLAGNUMBER)
3387 		ag = 0;
3388 
3389 	error = xfs_bmap_longest_free_extent(args->tp, ag, blen, &notinit);
3390 	if (error)
3391 		return error;
3392 
3393 	if (*blen < args->maxlen) {
3394 		error = xfs_filestream_new_ag(ap, &ag);
3395 		if (error)
3396 			return error;
3397 
3398 		error = xfs_bmap_longest_free_extent(args->tp, ag, blen,
3399 						     &notinit);
3400 		if (error)
3401 			return error;
3402 
3403 	}
3404 
3405 	xfs_bmap_select_minlen(ap, args, blen, notinit);
3406 
3407 	/*
3408 	 * Set the failure fallback case to look in the selected AG as stream
3409 	 * may have moved.
3410 	 */
3411 	ap->blkno = args->fsbno = XFS_AGB_TO_FSB(mp, ag, 0);
3412 	return 0;
3413 }
3414 
3415 /* Update all inode and quota accounting for the allocation we just did. */
3416 static void
3417 xfs_bmap_btalloc_accounting(
3418 	struct xfs_bmalloca	*ap,
3419 	struct xfs_alloc_arg	*args)
3420 {
3421 	if (ap->flags & XFS_BMAPI_COWFORK) {
3422 		/*
3423 		 * COW fork blocks are in-core only and thus are treated as
3424 		 * in-core quota reservation (like delalloc blocks) even when
3425 		 * converted to real blocks. The quota reservation is not
3426 		 * accounted to disk until blocks are remapped to the data
3427 		 * fork. So if these blocks were previously delalloc, we
3428 		 * already have quota reservation and there's nothing to do
3429 		 * yet.
3430 		 */
3431 		if (ap->wasdel) {
3432 			xfs_mod_delalloc(ap->ip->i_mount, -(int64_t)args->len);
3433 			return;
3434 		}
3435 
3436 		/*
3437 		 * Otherwise, we've allocated blocks in a hole. The transaction
3438 		 * has acquired in-core quota reservation for this extent.
3439 		 * Rather than account these as real blocks, however, we reduce
3440 		 * the transaction quota reservation based on the allocation.
3441 		 * This essentially transfers the transaction quota reservation
3442 		 * to that of a delalloc extent.
3443 		 */
3444 		ap->ip->i_delayed_blks += args->len;
3445 		xfs_trans_mod_dquot_byino(ap->tp, ap->ip, XFS_TRANS_DQ_RES_BLKS,
3446 				-(long)args->len);
3447 		return;
3448 	}
3449 
3450 	/* data/attr fork only */
3451 	ap->ip->i_d.di_nblocks += args->len;
3452 	xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
3453 	if (ap->wasdel) {
3454 		ap->ip->i_delayed_blks -= args->len;
3455 		xfs_mod_delalloc(ap->ip->i_mount, -(int64_t)args->len);
3456 	}
3457 	xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
3458 		ap->wasdel ? XFS_TRANS_DQ_DELBCOUNT : XFS_TRANS_DQ_BCOUNT,
3459 		args->len);
3460 }
3461 
3462 STATIC int
3463 xfs_bmap_btalloc(
3464 	struct xfs_bmalloca	*ap)	/* bmap alloc argument struct */
3465 {
3466 	xfs_mount_t	*mp;		/* mount point structure */
3467 	xfs_alloctype_t	atype = 0;	/* type for allocation routines */
3468 	xfs_extlen_t	align = 0;	/* minimum allocation alignment */
3469 	xfs_agnumber_t	fb_agno;	/* ag number of ap->firstblock */
3470 	xfs_agnumber_t	ag;
3471 	xfs_alloc_arg_t	args;
3472 	xfs_fileoff_t	orig_offset;
3473 	xfs_extlen_t	orig_length;
3474 	xfs_extlen_t	blen;
3475 	xfs_extlen_t	nextminlen = 0;
3476 	int		nullfb;		/* true if ap->firstblock isn't set */
3477 	int		isaligned;
3478 	int		tryagain;
3479 	int		error;
3480 	int		stripe_align;
3481 
3482 	ASSERT(ap->length);
3483 	orig_offset = ap->offset;
3484 	orig_length = ap->length;
3485 
3486 	mp = ap->ip->i_mount;
3487 
3488 	/* stripe alignment for allocation is determined by mount parameters */
3489 	stripe_align = 0;
3490 	if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC))
3491 		stripe_align = mp->m_swidth;
3492 	else if (mp->m_dalign)
3493 		stripe_align = mp->m_dalign;
3494 
3495 	if (ap->flags & XFS_BMAPI_COWFORK)
3496 		align = xfs_get_cowextsz_hint(ap->ip);
3497 	else if (ap->datatype & XFS_ALLOC_USERDATA)
3498 		align = xfs_get_extsz_hint(ap->ip);
3499 	if (align) {
3500 		error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
3501 						align, 0, ap->eof, 0, ap->conv,
3502 						&ap->offset, &ap->length);
3503 		ASSERT(!error);
3504 		ASSERT(ap->length);
3505 	}
3506 
3507 
3508 	nullfb = ap->tp->t_firstblock == NULLFSBLOCK;
3509 	fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp,
3510 							ap->tp->t_firstblock);
3511 	if (nullfb) {
3512 		if ((ap->datatype & XFS_ALLOC_USERDATA) &&
3513 		    xfs_inode_is_filestream(ap->ip)) {
3514 			ag = xfs_filestream_lookup_ag(ap->ip);
3515 			ag = (ag != NULLAGNUMBER) ? ag : 0;
3516 			ap->blkno = XFS_AGB_TO_FSB(mp, ag, 0);
3517 		} else {
3518 			ap->blkno = XFS_INO_TO_FSB(mp, ap->ip->i_ino);
3519 		}
3520 	} else
3521 		ap->blkno = ap->tp->t_firstblock;
3522 
3523 	xfs_bmap_adjacent(ap);
3524 
3525 	/*
3526 	 * If allowed, use ap->blkno; otherwise must use firstblock since
3527 	 * it's in the right allocation group.
3528 	 */
3529 	if (nullfb || XFS_FSB_TO_AGNO(mp, ap->blkno) == fb_agno)
3530 		;
3531 	else
3532 		ap->blkno = ap->tp->t_firstblock;
3533 	/*
3534 	 * Normal allocation, done through xfs_alloc_vextent.
3535 	 */
3536 	tryagain = isaligned = 0;
3537 	memset(&args, 0, sizeof(args));
3538 	args.tp = ap->tp;
3539 	args.mp = mp;
3540 	args.fsbno = ap->blkno;
3541 	args.oinfo = XFS_RMAP_OINFO_SKIP_UPDATE;
3542 
3543 	/* Trim the allocation back to the maximum an AG can fit. */
3544 	args.maxlen = min(ap->length, mp->m_ag_max_usable);
3545 	blen = 0;
3546 	if (nullfb) {
3547 		/*
3548 		 * Search for an allocation group with a single extent large
3549 		 * enough for the request.  If one isn't found, then adjust
3550 		 * the minimum allocation size to the largest space found.
3551 		 */
3552 		if ((ap->datatype & XFS_ALLOC_USERDATA) &&
3553 		    xfs_inode_is_filestream(ap->ip))
3554 			error = xfs_bmap_btalloc_filestreams(ap, &args, &blen);
3555 		else
3556 			error = xfs_bmap_btalloc_nullfb(ap, &args, &blen);
3557 		if (error)
3558 			return error;
3559 	} else if (ap->tp->t_flags & XFS_TRANS_LOWMODE) {
3560 		if (xfs_inode_is_filestream(ap->ip))
3561 			args.type = XFS_ALLOCTYPE_FIRST_AG;
3562 		else
3563 			args.type = XFS_ALLOCTYPE_START_BNO;
3564 		args.total = args.minlen = ap->minlen;
3565 	} else {
3566 		args.type = XFS_ALLOCTYPE_NEAR_BNO;
3567 		args.total = ap->total;
3568 		args.minlen = ap->minlen;
3569 	}
3570 	/* apply extent size hints if obtained earlier */
3571 	if (align) {
3572 		args.prod = align;
3573 		div_u64_rem(ap->offset, args.prod, &args.mod);
3574 		if (args.mod)
3575 			args.mod = args.prod - args.mod;
3576 	} else if (mp->m_sb.sb_blocksize >= PAGE_SIZE) {
3577 		args.prod = 1;
3578 		args.mod = 0;
3579 	} else {
3580 		args.prod = PAGE_SIZE >> mp->m_sb.sb_blocklog;
3581 		div_u64_rem(ap->offset, args.prod, &args.mod);
3582 		if (args.mod)
3583 			args.mod = args.prod - args.mod;
3584 	}
3585 	/*
3586 	 * If we are not low on available data blocks, and the underlying
3587 	 * logical volume manager is a stripe, and the file offset is zero then
3588 	 * try to allocate data blocks on stripe unit boundary. NOTE: ap->aeof
3589 	 * is only set if the allocation length is >= the stripe unit and the
3590 	 * allocation offset is at the end of file.
3591 	 */
3592 	if (!(ap->tp->t_flags & XFS_TRANS_LOWMODE) && ap->aeof) {
3593 		if (!ap->offset) {
3594 			args.alignment = stripe_align;
3595 			atype = args.type;
3596 			isaligned = 1;
3597 			/*
3598 			 * Adjust minlen to try and preserve alignment if we
3599 			 * can't guarantee an aligned maxlen extent.
3600 			 */
3601 			if (blen > args.alignment &&
3602 			    blen <= args.maxlen + args.alignment)
3603 				args.minlen = blen - args.alignment;
3604 			args.minalignslop = 0;
3605 		} else {
3606 			/*
3607 			 * First try an exact bno allocation.
3608 			 * If it fails then do a near or start bno
3609 			 * allocation with alignment turned on.
3610 			 */
3611 			atype = args.type;
3612 			tryagain = 1;
3613 			args.type = XFS_ALLOCTYPE_THIS_BNO;
3614 			args.alignment = 1;
3615 			/*
3616 			 * Compute the minlen+alignment for the
3617 			 * next case.  Set slop so that the value
3618 			 * of minlen+alignment+slop doesn't go up
3619 			 * between the calls.
3620 			 */
3621 			if (blen > stripe_align && blen <= args.maxlen)
3622 				nextminlen = blen - stripe_align;
3623 			else
3624 				nextminlen = args.minlen;
3625 			if (nextminlen + stripe_align > args.minlen + 1)
3626 				args.minalignslop =
3627 					nextminlen + stripe_align -
3628 					args.minlen - 1;
3629 			else
3630 				args.minalignslop = 0;
3631 		}
3632 	} else {
3633 		args.alignment = 1;
3634 		args.minalignslop = 0;
3635 	}
3636 	args.minleft = ap->minleft;
3637 	args.wasdel = ap->wasdel;
3638 	args.resv = XFS_AG_RESV_NONE;
3639 	args.datatype = ap->datatype;
3640 
3641 	error = xfs_alloc_vextent(&args);
3642 	if (error)
3643 		return error;
3644 
3645 	if (tryagain && args.fsbno == NULLFSBLOCK) {
3646 		/*
3647 		 * Exact allocation failed. Now try with alignment
3648 		 * turned on.
3649 		 */
3650 		args.type = atype;
3651 		args.fsbno = ap->blkno;
3652 		args.alignment = stripe_align;
3653 		args.minlen = nextminlen;
3654 		args.minalignslop = 0;
3655 		isaligned = 1;
3656 		if ((error = xfs_alloc_vextent(&args)))
3657 			return error;
3658 	}
3659 	if (isaligned && args.fsbno == NULLFSBLOCK) {
3660 		/*
3661 		 * allocation failed, so turn off alignment and
3662 		 * try again.
3663 		 */
3664 		args.type = atype;
3665 		args.fsbno = ap->blkno;
3666 		args.alignment = 0;
3667 		if ((error = xfs_alloc_vextent(&args)))
3668 			return error;
3669 	}
3670 	if (args.fsbno == NULLFSBLOCK && nullfb &&
3671 	    args.minlen > ap->minlen) {
3672 		args.minlen = ap->minlen;
3673 		args.type = XFS_ALLOCTYPE_START_BNO;
3674 		args.fsbno = ap->blkno;
3675 		if ((error = xfs_alloc_vextent(&args)))
3676 			return error;
3677 	}
3678 	if (args.fsbno == NULLFSBLOCK && nullfb) {
3679 		args.fsbno = 0;
3680 		args.type = XFS_ALLOCTYPE_FIRST_AG;
3681 		args.total = ap->minlen;
3682 		if ((error = xfs_alloc_vextent(&args)))
3683 			return error;
3684 		ap->tp->t_flags |= XFS_TRANS_LOWMODE;
3685 	}
3686 	if (args.fsbno != NULLFSBLOCK) {
3687 		/*
3688 		 * check the allocation happened at the same or higher AG than
3689 		 * the first block that was allocated.
3690 		 */
3691 		ASSERT(ap->tp->t_firstblock == NULLFSBLOCK ||
3692 		       XFS_FSB_TO_AGNO(mp, ap->tp->t_firstblock) <=
3693 		       XFS_FSB_TO_AGNO(mp, args.fsbno));
3694 
3695 		ap->blkno = args.fsbno;
3696 		if (ap->tp->t_firstblock == NULLFSBLOCK)
3697 			ap->tp->t_firstblock = args.fsbno;
3698 		ASSERT(nullfb || fb_agno <= args.agno);
3699 		ap->length = args.len;
3700 		/*
3701 		 * If the extent size hint is active, we tried to round the
3702 		 * caller's allocation request offset down to extsz and the
3703 		 * length up to another extsz boundary.  If we found a free
3704 		 * extent we mapped it in starting at this new offset.  If the
3705 		 * newly mapped space isn't long enough to cover any of the
3706 		 * range of offsets that was originally requested, move the
3707 		 * mapping up so that we can fill as much of the caller's
3708 		 * original request as possible.  Free space is apparently
3709 		 * very fragmented so we're unlikely to be able to satisfy the
3710 		 * hints anyway.
3711 		 */
3712 		if (ap->length <= orig_length)
3713 			ap->offset = orig_offset;
3714 		else if (ap->offset + ap->length < orig_offset + orig_length)
3715 			ap->offset = orig_offset + orig_length - ap->length;
3716 		xfs_bmap_btalloc_accounting(ap, &args);
3717 	} else {
3718 		ap->blkno = NULLFSBLOCK;
3719 		ap->length = 0;
3720 	}
3721 	return 0;
3722 }
3723 
3724 /* Trim extent to fit a logical block range. */
3725 void
3726 xfs_trim_extent(
3727 	struct xfs_bmbt_irec	*irec,
3728 	xfs_fileoff_t		bno,
3729 	xfs_filblks_t		len)
3730 {
3731 	xfs_fileoff_t		distance;
3732 	xfs_fileoff_t		end = bno + len;
3733 
3734 	if (irec->br_startoff + irec->br_blockcount <= bno ||
3735 	    irec->br_startoff >= end) {
3736 		irec->br_blockcount = 0;
3737 		return;
3738 	}
3739 
3740 	if (irec->br_startoff < bno) {
3741 		distance = bno - irec->br_startoff;
3742 		if (isnullstartblock(irec->br_startblock))
3743 			irec->br_startblock = DELAYSTARTBLOCK;
3744 		if (irec->br_startblock != DELAYSTARTBLOCK &&
3745 		    irec->br_startblock != HOLESTARTBLOCK)
3746 			irec->br_startblock += distance;
3747 		irec->br_startoff += distance;
3748 		irec->br_blockcount -= distance;
3749 	}
3750 
3751 	if (end < irec->br_startoff + irec->br_blockcount) {
3752 		distance = irec->br_startoff + irec->br_blockcount - end;
3753 		irec->br_blockcount -= distance;
3754 	}
3755 }
3756 
3757 /*
3758  * Trim the returned map to the required bounds
3759  */
3760 STATIC void
3761 xfs_bmapi_trim_map(
3762 	struct xfs_bmbt_irec	*mval,
3763 	struct xfs_bmbt_irec	*got,
3764 	xfs_fileoff_t		*bno,
3765 	xfs_filblks_t		len,
3766 	xfs_fileoff_t		obno,
3767 	xfs_fileoff_t		end,
3768 	int			n,
3769 	int			flags)
3770 {
3771 	if ((flags & XFS_BMAPI_ENTIRE) ||
3772 	    got->br_startoff + got->br_blockcount <= obno) {
3773 		*mval = *got;
3774 		if (isnullstartblock(got->br_startblock))
3775 			mval->br_startblock = DELAYSTARTBLOCK;
3776 		return;
3777 	}
3778 
3779 	if (obno > *bno)
3780 		*bno = obno;
3781 	ASSERT((*bno >= obno) || (n == 0));
3782 	ASSERT(*bno < end);
3783 	mval->br_startoff = *bno;
3784 	if (isnullstartblock(got->br_startblock))
3785 		mval->br_startblock = DELAYSTARTBLOCK;
3786 	else
3787 		mval->br_startblock = got->br_startblock +
3788 					(*bno - got->br_startoff);
3789 	/*
3790 	 * Return the minimum of what we got and what we asked for for
3791 	 * the length.  We can use the len variable here because it is
3792 	 * modified below and we could have been there before coming
3793 	 * here if the first part of the allocation didn't overlap what
3794 	 * was asked for.
3795 	 */
3796 	mval->br_blockcount = XFS_FILBLKS_MIN(end - *bno,
3797 			got->br_blockcount - (*bno - got->br_startoff));
3798 	mval->br_state = got->br_state;
3799 	ASSERT(mval->br_blockcount <= len);
3800 	return;
3801 }
3802 
3803 /*
3804  * Update and validate the extent map to return
3805  */
3806 STATIC void
3807 xfs_bmapi_update_map(
3808 	struct xfs_bmbt_irec	**map,
3809 	xfs_fileoff_t		*bno,
3810 	xfs_filblks_t		*len,
3811 	xfs_fileoff_t		obno,
3812 	xfs_fileoff_t		end,
3813 	int			*n,
3814 	int			flags)
3815 {
3816 	xfs_bmbt_irec_t	*mval = *map;
3817 
3818 	ASSERT((flags & XFS_BMAPI_ENTIRE) ||
3819 	       ((mval->br_startoff + mval->br_blockcount) <= end));
3820 	ASSERT((flags & XFS_BMAPI_ENTIRE) || (mval->br_blockcount <= *len) ||
3821 	       (mval->br_startoff < obno));
3822 
3823 	*bno = mval->br_startoff + mval->br_blockcount;
3824 	*len = end - *bno;
3825 	if (*n > 0 && mval->br_startoff == mval[-1].br_startoff) {
3826 		/* update previous map with new information */
3827 		ASSERT(mval->br_startblock == mval[-1].br_startblock);
3828 		ASSERT(mval->br_blockcount > mval[-1].br_blockcount);
3829 		ASSERT(mval->br_state == mval[-1].br_state);
3830 		mval[-1].br_blockcount = mval->br_blockcount;
3831 		mval[-1].br_state = mval->br_state;
3832 	} else if (*n > 0 && mval->br_startblock != DELAYSTARTBLOCK &&
3833 		   mval[-1].br_startblock != DELAYSTARTBLOCK &&
3834 		   mval[-1].br_startblock != HOLESTARTBLOCK &&
3835 		   mval->br_startblock == mval[-1].br_startblock +
3836 					  mval[-1].br_blockcount &&
3837 		   mval[-1].br_state == mval->br_state) {
3838 		ASSERT(mval->br_startoff ==
3839 		       mval[-1].br_startoff + mval[-1].br_blockcount);
3840 		mval[-1].br_blockcount += mval->br_blockcount;
3841 	} else if (*n > 0 &&
3842 		   mval->br_startblock == DELAYSTARTBLOCK &&
3843 		   mval[-1].br_startblock == DELAYSTARTBLOCK &&
3844 		   mval->br_startoff ==
3845 		   mval[-1].br_startoff + mval[-1].br_blockcount) {
3846 		mval[-1].br_blockcount += mval->br_blockcount;
3847 		mval[-1].br_state = mval->br_state;
3848 	} else if (!((*n == 0) &&
3849 		     ((mval->br_startoff + mval->br_blockcount) <=
3850 		      obno))) {
3851 		mval++;
3852 		(*n)++;
3853 	}
3854 	*map = mval;
3855 }
3856 
3857 /*
3858  * Map file blocks to filesystem blocks without allocation.
3859  */
3860 int
3861 xfs_bmapi_read(
3862 	struct xfs_inode	*ip,
3863 	xfs_fileoff_t		bno,
3864 	xfs_filblks_t		len,
3865 	struct xfs_bmbt_irec	*mval,
3866 	int			*nmap,
3867 	int			flags)
3868 {
3869 	struct xfs_mount	*mp = ip->i_mount;
3870 	int			whichfork = xfs_bmapi_whichfork(flags);
3871 	struct xfs_ifork	*ifp = XFS_IFORK_PTR(ip, whichfork);
3872 	struct xfs_bmbt_irec	got;
3873 	xfs_fileoff_t		obno;
3874 	xfs_fileoff_t		end;
3875 	struct xfs_iext_cursor	icur;
3876 	int			error;
3877 	bool			eof = false;
3878 	int			n = 0;
3879 
3880 	ASSERT(*nmap >= 1);
3881 	ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK | XFS_BMAPI_ENTIRE)));
3882 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED|XFS_ILOCK_EXCL));
3883 
3884 	if (WARN_ON_ONCE(!ifp))
3885 		return -EFSCORRUPTED;
3886 
3887 	if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
3888 	    XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT))
3889 		return -EFSCORRUPTED;
3890 
3891 	if (XFS_FORCED_SHUTDOWN(mp))
3892 		return -EIO;
3893 
3894 	XFS_STATS_INC(mp, xs_blk_mapr);
3895 
3896 	if (!(ifp->if_flags & XFS_IFEXTENTS)) {
3897 		error = xfs_iread_extents(NULL, ip, whichfork);
3898 		if (error)
3899 			return error;
3900 	}
3901 
3902 	if (!xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got))
3903 		eof = true;
3904 	end = bno + len;
3905 	obno = bno;
3906 
3907 	while (bno < end && n < *nmap) {
3908 		/* Reading past eof, act as though there's a hole up to end. */
3909 		if (eof)
3910 			got.br_startoff = end;
3911 		if (got.br_startoff > bno) {
3912 			/* Reading in a hole.  */
3913 			mval->br_startoff = bno;
3914 			mval->br_startblock = HOLESTARTBLOCK;
3915 			mval->br_blockcount =
3916 				XFS_FILBLKS_MIN(len, got.br_startoff - bno);
3917 			mval->br_state = XFS_EXT_NORM;
3918 			bno += mval->br_blockcount;
3919 			len -= mval->br_blockcount;
3920 			mval++;
3921 			n++;
3922 			continue;
3923 		}
3924 
3925 		/* set up the extent map to return. */
3926 		xfs_bmapi_trim_map(mval, &got, &bno, len, obno, end, n, flags);
3927 		xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags);
3928 
3929 		/* If we're done, stop now. */
3930 		if (bno >= end || n >= *nmap)
3931 			break;
3932 
3933 		/* Else go on to the next record. */
3934 		if (!xfs_iext_next_extent(ifp, &icur, &got))
3935 			eof = true;
3936 	}
3937 	*nmap = n;
3938 	return 0;
3939 }
3940 
3941 /*
3942  * Add a delayed allocation extent to an inode. Blocks are reserved from the
3943  * global pool and the extent inserted into the inode in-core extent tree.
3944  *
3945  * On entry, got refers to the first extent beyond the offset of the extent to
3946  * allocate or eof is specified if no such extent exists. On return, got refers
3947  * to the extent record that was inserted to the inode fork.
3948  *
3949  * Note that the allocated extent may have been merged with contiguous extents
3950  * during insertion into the inode fork. Thus, got does not reflect the current
3951  * state of the inode fork on return. If necessary, the caller can use lastx to
3952  * look up the updated record in the inode fork.
3953  */
3954 int
3955 xfs_bmapi_reserve_delalloc(
3956 	struct xfs_inode	*ip,
3957 	int			whichfork,
3958 	xfs_fileoff_t		off,
3959 	xfs_filblks_t		len,
3960 	xfs_filblks_t		prealloc,
3961 	struct xfs_bmbt_irec	*got,
3962 	struct xfs_iext_cursor	*icur,
3963 	int			eof)
3964 {
3965 	struct xfs_mount	*mp = ip->i_mount;
3966 	struct xfs_ifork	*ifp = XFS_IFORK_PTR(ip, whichfork);
3967 	xfs_extlen_t		alen;
3968 	xfs_extlen_t		indlen;
3969 	int			error;
3970 	xfs_fileoff_t		aoff = off;
3971 
3972 	/*
3973 	 * Cap the alloc length. Keep track of prealloc so we know whether to
3974 	 * tag the inode before we return.
3975 	 */
3976 	alen = XFS_FILBLKS_MIN(len + prealloc, MAXEXTLEN);
3977 	if (!eof)
3978 		alen = XFS_FILBLKS_MIN(alen, got->br_startoff - aoff);
3979 	if (prealloc && alen >= len)
3980 		prealloc = alen - len;
3981 
3982 	/* Figure out the extent size, adjust alen */
3983 	if (whichfork == XFS_COW_FORK) {
3984 		struct xfs_bmbt_irec	prev;
3985 		xfs_extlen_t		extsz = xfs_get_cowextsz_hint(ip);
3986 
3987 		if (!xfs_iext_peek_prev_extent(ifp, icur, &prev))
3988 			prev.br_startoff = NULLFILEOFF;
3989 
3990 		error = xfs_bmap_extsize_align(mp, got, &prev, extsz, 0, eof,
3991 					       1, 0, &aoff, &alen);
3992 		ASSERT(!error);
3993 	}
3994 
3995 	/*
3996 	 * Make a transaction-less quota reservation for delayed allocation
3997 	 * blocks.  This number gets adjusted later.  We return if we haven't
3998 	 * allocated blocks already inside this loop.
3999 	 */
4000 	error = xfs_trans_reserve_quota_nblks(NULL, ip, (long)alen, 0,
4001 						XFS_QMOPT_RES_REGBLKS);
4002 	if (error)
4003 		return error;
4004 
4005 	/*
4006 	 * Split changing sb for alen and indlen since they could be coming
4007 	 * from different places.
4008 	 */
4009 	indlen = (xfs_extlen_t)xfs_bmap_worst_indlen(ip, alen);
4010 	ASSERT(indlen > 0);
4011 
4012 	error = xfs_mod_fdblocks(mp, -((int64_t)alen), false);
4013 	if (error)
4014 		goto out_unreserve_quota;
4015 
4016 	error = xfs_mod_fdblocks(mp, -((int64_t)indlen), false);
4017 	if (error)
4018 		goto out_unreserve_blocks;
4019 
4020 
4021 	ip->i_delayed_blks += alen;
4022 	xfs_mod_delalloc(ip->i_mount, alen + indlen);
4023 
4024 	got->br_startoff = aoff;
4025 	got->br_startblock = nullstartblock(indlen);
4026 	got->br_blockcount = alen;
4027 	got->br_state = XFS_EXT_NORM;
4028 
4029 	xfs_bmap_add_extent_hole_delay(ip, whichfork, icur, got);
4030 
4031 	/*
4032 	 * Tag the inode if blocks were preallocated. Note that COW fork
4033 	 * preallocation can occur at the start or end of the extent, even when
4034 	 * prealloc == 0, so we must also check the aligned offset and length.
4035 	 */
4036 	if (whichfork == XFS_DATA_FORK && prealloc)
4037 		xfs_inode_set_eofblocks_tag(ip);
4038 	if (whichfork == XFS_COW_FORK && (prealloc || aoff < off || alen > len))
4039 		xfs_inode_set_cowblocks_tag(ip);
4040 
4041 	return 0;
4042 
4043 out_unreserve_blocks:
4044 	xfs_mod_fdblocks(mp, alen, false);
4045 out_unreserve_quota:
4046 	if (XFS_IS_QUOTA_ON(mp))
4047 		xfs_trans_unreserve_quota_nblks(NULL, ip, (long)alen, 0,
4048 						XFS_QMOPT_RES_REGBLKS);
4049 	return error;
4050 }
4051 
4052 static int
4053 xfs_bmap_alloc_userdata(
4054 	struct xfs_bmalloca	*bma)
4055 {
4056 	struct xfs_mount	*mp = bma->ip->i_mount;
4057 	int			whichfork = xfs_bmapi_whichfork(bma->flags);
4058 	int			error;
4059 
4060 	/*
4061 	 * Set the data type being allocated. For the data fork, the first data
4062 	 * in the file is treated differently to all other allocations. For the
4063 	 * attribute fork, we only need to ensure the allocated range is not on
4064 	 * the busy list.
4065 	 */
4066 	bma->datatype = XFS_ALLOC_NOBUSY;
4067 	if (whichfork == XFS_DATA_FORK) {
4068 		bma->datatype |= XFS_ALLOC_USERDATA;
4069 		if (bma->offset == 0)
4070 			bma->datatype |= XFS_ALLOC_INITIAL_USER_DATA;
4071 
4072 		if (mp->m_dalign && bma->length >= mp->m_dalign) {
4073 			error = xfs_bmap_isaeof(bma, whichfork);
4074 			if (error)
4075 				return error;
4076 		}
4077 
4078 		if (XFS_IS_REALTIME_INODE(bma->ip))
4079 			return xfs_bmap_rtalloc(bma);
4080 	}
4081 
4082 	return xfs_bmap_btalloc(bma);
4083 }
4084 
4085 static int
4086 xfs_bmapi_allocate(
4087 	struct xfs_bmalloca	*bma)
4088 {
4089 	struct xfs_mount	*mp = bma->ip->i_mount;
4090 	int			whichfork = xfs_bmapi_whichfork(bma->flags);
4091 	struct xfs_ifork	*ifp = XFS_IFORK_PTR(bma->ip, whichfork);
4092 	int			tmp_logflags = 0;
4093 	int			error;
4094 
4095 	ASSERT(bma->length > 0);
4096 
4097 	/*
4098 	 * For the wasdelay case, we could also just allocate the stuff asked
4099 	 * for in this bmap call but that wouldn't be as good.
4100 	 */
4101 	if (bma->wasdel) {
4102 		bma->length = (xfs_extlen_t)bma->got.br_blockcount;
4103 		bma->offset = bma->got.br_startoff;
4104 		if (!xfs_iext_peek_prev_extent(ifp, &bma->icur, &bma->prev))
4105 			bma->prev.br_startoff = NULLFILEOFF;
4106 	} else {
4107 		bma->length = XFS_FILBLKS_MIN(bma->length, MAXEXTLEN);
4108 		if (!bma->eof)
4109 			bma->length = XFS_FILBLKS_MIN(bma->length,
4110 					bma->got.br_startoff - bma->offset);
4111 	}
4112 
4113 	if (bma->flags & XFS_BMAPI_CONTIG)
4114 		bma->minlen = bma->length;
4115 	else
4116 		bma->minlen = 1;
4117 
4118 	if (bma->flags & XFS_BMAPI_METADATA)
4119 		error = xfs_bmap_btalloc(bma);
4120 	else
4121 		error = xfs_bmap_alloc_userdata(bma);
4122 	if (error || bma->blkno == NULLFSBLOCK)
4123 		return error;
4124 
4125 	if (bma->flags & XFS_BMAPI_ZERO) {
4126 		error = xfs_zero_extent(bma->ip, bma->blkno, bma->length);
4127 		if (error)
4128 			return error;
4129 	}
4130 
4131 	if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur)
4132 		bma->cur = xfs_bmbt_init_cursor(mp, bma->tp, bma->ip, whichfork);
4133 	/*
4134 	 * Bump the number of extents we've allocated
4135 	 * in this call.
4136 	 */
4137 	bma->nallocs++;
4138 
4139 	if (bma->cur)
4140 		bma->cur->bc_ino.flags =
4141 			bma->wasdel ? XFS_BTCUR_BMBT_WASDEL : 0;
4142 
4143 	bma->got.br_startoff = bma->offset;
4144 	bma->got.br_startblock = bma->blkno;
4145 	bma->got.br_blockcount = bma->length;
4146 	bma->got.br_state = XFS_EXT_NORM;
4147 
4148 	if (bma->flags & XFS_BMAPI_PREALLOC)
4149 		bma->got.br_state = XFS_EXT_UNWRITTEN;
4150 
4151 	if (bma->wasdel)
4152 		error = xfs_bmap_add_extent_delay_real(bma, whichfork);
4153 	else
4154 		error = xfs_bmap_add_extent_hole_real(bma->tp, bma->ip,
4155 				whichfork, &bma->icur, &bma->cur, &bma->got,
4156 				&bma->logflags, bma->flags);
4157 
4158 	bma->logflags |= tmp_logflags;
4159 	if (error)
4160 		return error;
4161 
4162 	/*
4163 	 * Update our extent pointer, given that xfs_bmap_add_extent_delay_real
4164 	 * or xfs_bmap_add_extent_hole_real might have merged it into one of
4165 	 * the neighbouring ones.
4166 	 */
4167 	xfs_iext_get_extent(ifp, &bma->icur, &bma->got);
4168 
4169 	ASSERT(bma->got.br_startoff <= bma->offset);
4170 	ASSERT(bma->got.br_startoff + bma->got.br_blockcount >=
4171 	       bma->offset + bma->length);
4172 	ASSERT(bma->got.br_state == XFS_EXT_NORM ||
4173 	       bma->got.br_state == XFS_EXT_UNWRITTEN);
4174 	return 0;
4175 }
4176 
4177 STATIC int
4178 xfs_bmapi_convert_unwritten(
4179 	struct xfs_bmalloca	*bma,
4180 	struct xfs_bmbt_irec	*mval,
4181 	xfs_filblks_t		len,
4182 	int			flags)
4183 {
4184 	int			whichfork = xfs_bmapi_whichfork(flags);
4185 	struct xfs_ifork	*ifp = XFS_IFORK_PTR(bma->ip, whichfork);
4186 	int			tmp_logflags = 0;
4187 	int			error;
4188 
4189 	/* check if we need to do unwritten->real conversion */
4190 	if (mval->br_state == XFS_EXT_UNWRITTEN &&
4191 	    (flags & XFS_BMAPI_PREALLOC))
4192 		return 0;
4193 
4194 	/* check if we need to do real->unwritten conversion */
4195 	if (mval->br_state == XFS_EXT_NORM &&
4196 	    (flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT)) !=
4197 			(XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT))
4198 		return 0;
4199 
4200 	/*
4201 	 * Modify (by adding) the state flag, if writing.
4202 	 */
4203 	ASSERT(mval->br_blockcount <= len);
4204 	if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur) {
4205 		bma->cur = xfs_bmbt_init_cursor(bma->ip->i_mount, bma->tp,
4206 					bma->ip, whichfork);
4207 	}
4208 	mval->br_state = (mval->br_state == XFS_EXT_UNWRITTEN)
4209 				? XFS_EXT_NORM : XFS_EXT_UNWRITTEN;
4210 
4211 	/*
4212 	 * Before insertion into the bmbt, zero the range being converted
4213 	 * if required.
4214 	 */
4215 	if (flags & XFS_BMAPI_ZERO) {
4216 		error = xfs_zero_extent(bma->ip, mval->br_startblock,
4217 					mval->br_blockcount);
4218 		if (error)
4219 			return error;
4220 	}
4221 
4222 	error = xfs_bmap_add_extent_unwritten_real(bma->tp, bma->ip, whichfork,
4223 			&bma->icur, &bma->cur, mval, &tmp_logflags);
4224 	/*
4225 	 * Log the inode core unconditionally in the unwritten extent conversion
4226 	 * path because the conversion might not have done so (e.g., if the
4227 	 * extent count hasn't changed). We need to make sure the inode is dirty
4228 	 * in the transaction for the sake of fsync(), even if nothing has
4229 	 * changed, because fsync() will not force the log for this transaction
4230 	 * unless it sees the inode pinned.
4231 	 *
4232 	 * Note: If we're only converting cow fork extents, there aren't
4233 	 * any on-disk updates to make, so we don't need to log anything.
4234 	 */
4235 	if (whichfork != XFS_COW_FORK)
4236 		bma->logflags |= tmp_logflags | XFS_ILOG_CORE;
4237 	if (error)
4238 		return error;
4239 
4240 	/*
4241 	 * Update our extent pointer, given that
4242 	 * xfs_bmap_add_extent_unwritten_real might have merged it into one
4243 	 * of the neighbouring ones.
4244 	 */
4245 	xfs_iext_get_extent(ifp, &bma->icur, &bma->got);
4246 
4247 	/*
4248 	 * We may have combined previously unwritten space with written space,
4249 	 * so generate another request.
4250 	 */
4251 	if (mval->br_blockcount < len)
4252 		return -EAGAIN;
4253 	return 0;
4254 }
4255 
4256 static inline xfs_extlen_t
4257 xfs_bmapi_minleft(
4258 	struct xfs_trans	*tp,
4259 	struct xfs_inode	*ip,
4260 	int			fork)
4261 {
4262 	struct xfs_ifork	*ifp = XFS_IFORK_PTR(ip, fork);
4263 
4264 	if (tp && tp->t_firstblock != NULLFSBLOCK)
4265 		return 0;
4266 	if (ifp->if_format != XFS_DINODE_FMT_BTREE)
4267 		return 1;
4268 	return be16_to_cpu(ifp->if_broot->bb_level) + 1;
4269 }
4270 
4271 /*
4272  * Log whatever the flags say, even if error.  Otherwise we might miss detecting
4273  * a case where the data is changed, there's an error, and it's not logged so we
4274  * don't shutdown when we should.  Don't bother logging extents/btree changes if
4275  * we converted to the other format.
4276  */
4277 static void
4278 xfs_bmapi_finish(
4279 	struct xfs_bmalloca	*bma,
4280 	int			whichfork,
4281 	int			error)
4282 {
4283 	struct xfs_ifork	*ifp = XFS_IFORK_PTR(bma->ip, whichfork);
4284 
4285 	if ((bma->logflags & xfs_ilog_fext(whichfork)) &&
4286 	    ifp->if_format != XFS_DINODE_FMT_EXTENTS)
4287 		bma->logflags &= ~xfs_ilog_fext(whichfork);
4288 	else if ((bma->logflags & xfs_ilog_fbroot(whichfork)) &&
4289 		 ifp->if_format != XFS_DINODE_FMT_BTREE)
4290 		bma->logflags &= ~xfs_ilog_fbroot(whichfork);
4291 
4292 	if (bma->logflags)
4293 		xfs_trans_log_inode(bma->tp, bma->ip, bma->logflags);
4294 	if (bma->cur)
4295 		xfs_btree_del_cursor(bma->cur, error);
4296 }
4297 
4298 /*
4299  * Map file blocks to filesystem blocks, and allocate blocks or convert the
4300  * extent state if necessary.  Details behaviour is controlled by the flags
4301  * parameter.  Only allocates blocks from a single allocation group, to avoid
4302  * locking problems.
4303  */
4304 int
4305 xfs_bmapi_write(
4306 	struct xfs_trans	*tp,		/* transaction pointer */
4307 	struct xfs_inode	*ip,		/* incore inode */
4308 	xfs_fileoff_t		bno,		/* starting file offs. mapped */
4309 	xfs_filblks_t		len,		/* length to map in file */
4310 	int			flags,		/* XFS_BMAPI_... */
4311 	xfs_extlen_t		total,		/* total blocks needed */
4312 	struct xfs_bmbt_irec	*mval,		/* output: map values */
4313 	int			*nmap)		/* i/o: mval size/count */
4314 {
4315 	struct xfs_bmalloca	bma = {
4316 		.tp		= tp,
4317 		.ip		= ip,
4318 		.total		= total,
4319 	};
4320 	struct xfs_mount	*mp = ip->i_mount;
4321 	int			whichfork = xfs_bmapi_whichfork(flags);
4322 	struct xfs_ifork	*ifp = XFS_IFORK_PTR(ip, whichfork);
4323 	xfs_fileoff_t		end;		/* end of mapped file region */
4324 	bool			eof = false;	/* after the end of extents */
4325 	int			error;		/* error return */
4326 	int			n;		/* current extent index */
4327 	xfs_fileoff_t		obno;		/* old block number (offset) */
4328 
4329 #ifdef DEBUG
4330 	xfs_fileoff_t		orig_bno;	/* original block number value */
4331 	int			orig_flags;	/* original flags arg value */
4332 	xfs_filblks_t		orig_len;	/* original value of len arg */
4333 	struct xfs_bmbt_irec	*orig_mval;	/* original value of mval */
4334 	int			orig_nmap;	/* original value of *nmap */
4335 
4336 	orig_bno = bno;
4337 	orig_len = len;
4338 	orig_flags = flags;
4339 	orig_mval = mval;
4340 	orig_nmap = *nmap;
4341 #endif
4342 
4343 	ASSERT(*nmap >= 1);
4344 	ASSERT(*nmap <= XFS_BMAP_MAX_NMAP);
4345 	ASSERT(tp != NULL);
4346 	ASSERT(len > 0);
4347 	ASSERT(ifp->if_format != XFS_DINODE_FMT_LOCAL);
4348 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
4349 	ASSERT(!(flags & XFS_BMAPI_REMAP));
4350 
4351 	/* zeroing is for currently only for data extents, not metadata */
4352 	ASSERT((flags & (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO)) !=
4353 			(XFS_BMAPI_METADATA | XFS_BMAPI_ZERO));
4354 	/*
4355 	 * we can allocate unwritten extents or pre-zero allocated blocks,
4356 	 * but it makes no sense to do both at once. This would result in
4357 	 * zeroing the unwritten extent twice, but it still being an
4358 	 * unwritten extent....
4359 	 */
4360 	ASSERT((flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO)) !=
4361 			(XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO));
4362 
4363 	if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
4364 	    XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
4365 		return -EFSCORRUPTED;
4366 	}
4367 
4368 	if (XFS_FORCED_SHUTDOWN(mp))
4369 		return -EIO;
4370 
4371 	XFS_STATS_INC(mp, xs_blk_mapw);
4372 
4373 	if (!(ifp->if_flags & XFS_IFEXTENTS)) {
4374 		error = xfs_iread_extents(tp, ip, whichfork);
4375 		if (error)
4376 			goto error0;
4377 	}
4378 
4379 	if (!xfs_iext_lookup_extent(ip, ifp, bno, &bma.icur, &bma.got))
4380 		eof = true;
4381 	if (!xfs_iext_peek_prev_extent(ifp, &bma.icur, &bma.prev))
4382 		bma.prev.br_startoff = NULLFILEOFF;
4383 	bma.minleft = xfs_bmapi_minleft(tp, ip, whichfork);
4384 
4385 	n = 0;
4386 	end = bno + len;
4387 	obno = bno;
4388 	while (bno < end && n < *nmap) {
4389 		bool			need_alloc = false, wasdelay = false;
4390 
4391 		/* in hole or beyond EOF? */
4392 		if (eof || bma.got.br_startoff > bno) {
4393 			/*
4394 			 * CoW fork conversions should /never/ hit EOF or
4395 			 * holes.  There should always be something for us
4396 			 * to work on.
4397 			 */
4398 			ASSERT(!((flags & XFS_BMAPI_CONVERT) &&
4399 			         (flags & XFS_BMAPI_COWFORK)));
4400 
4401 			need_alloc = true;
4402 		} else if (isnullstartblock(bma.got.br_startblock)) {
4403 			wasdelay = true;
4404 		}
4405 
4406 		/*
4407 		 * First, deal with the hole before the allocated space
4408 		 * that we found, if any.
4409 		 */
4410 		if (need_alloc || wasdelay) {
4411 			bma.eof = eof;
4412 			bma.conv = !!(flags & XFS_BMAPI_CONVERT);
4413 			bma.wasdel = wasdelay;
4414 			bma.offset = bno;
4415 			bma.flags = flags;
4416 
4417 			/*
4418 			 * There's a 32/64 bit type mismatch between the
4419 			 * allocation length request (which can be 64 bits in
4420 			 * length) and the bma length request, which is
4421 			 * xfs_extlen_t and therefore 32 bits. Hence we have to
4422 			 * check for 32-bit overflows and handle them here.
4423 			 */
4424 			if (len > (xfs_filblks_t)MAXEXTLEN)
4425 				bma.length = MAXEXTLEN;
4426 			else
4427 				bma.length = len;
4428 
4429 			ASSERT(len > 0);
4430 			ASSERT(bma.length > 0);
4431 			error = xfs_bmapi_allocate(&bma);
4432 			if (error)
4433 				goto error0;
4434 			if (bma.blkno == NULLFSBLOCK)
4435 				break;
4436 
4437 			/*
4438 			 * If this is a CoW allocation, record the data in
4439 			 * the refcount btree for orphan recovery.
4440 			 */
4441 			if (whichfork == XFS_COW_FORK)
4442 				xfs_refcount_alloc_cow_extent(tp, bma.blkno,
4443 						bma.length);
4444 		}
4445 
4446 		/* Deal with the allocated space we found.  */
4447 		xfs_bmapi_trim_map(mval, &bma.got, &bno, len, obno,
4448 							end, n, flags);
4449 
4450 		/* Execute unwritten extent conversion if necessary */
4451 		error = xfs_bmapi_convert_unwritten(&bma, mval, len, flags);
4452 		if (error == -EAGAIN)
4453 			continue;
4454 		if (error)
4455 			goto error0;
4456 
4457 		/* update the extent map to return */
4458 		xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags);
4459 
4460 		/*
4461 		 * If we're done, stop now.  Stop when we've allocated
4462 		 * XFS_BMAP_MAX_NMAP extents no matter what.  Otherwise
4463 		 * the transaction may get too big.
4464 		 */
4465 		if (bno >= end || n >= *nmap || bma.nallocs >= *nmap)
4466 			break;
4467 
4468 		/* Else go on to the next record. */
4469 		bma.prev = bma.got;
4470 		if (!xfs_iext_next_extent(ifp, &bma.icur, &bma.got))
4471 			eof = true;
4472 	}
4473 	*nmap = n;
4474 
4475 	error = xfs_bmap_btree_to_extents(tp, ip, bma.cur, &bma.logflags,
4476 			whichfork);
4477 	if (error)
4478 		goto error0;
4479 
4480 	ASSERT(ifp->if_format != XFS_DINODE_FMT_BTREE ||
4481 	       ifp->if_nextents > XFS_IFORK_MAXEXT(ip, whichfork));
4482 	xfs_bmapi_finish(&bma, whichfork, 0);
4483 	xfs_bmap_validate_ret(orig_bno, orig_len, orig_flags, orig_mval,
4484 		orig_nmap, *nmap);
4485 	return 0;
4486 error0:
4487 	xfs_bmapi_finish(&bma, whichfork, error);
4488 	return error;
4489 }
4490 
4491 /*
4492  * Convert an existing delalloc extent to real blocks based on file offset. This
4493  * attempts to allocate the entire delalloc extent and may require multiple
4494  * invocations to allocate the target offset if a large enough physical extent
4495  * is not available.
4496  */
4497 int
4498 xfs_bmapi_convert_delalloc(
4499 	struct xfs_inode	*ip,
4500 	int			whichfork,
4501 	xfs_off_t		offset,
4502 	struct iomap		*iomap,
4503 	unsigned int		*seq)
4504 {
4505 	struct xfs_ifork	*ifp = XFS_IFORK_PTR(ip, whichfork);
4506 	struct xfs_mount	*mp = ip->i_mount;
4507 	xfs_fileoff_t		offset_fsb = XFS_B_TO_FSBT(mp, offset);
4508 	struct xfs_bmalloca	bma = { NULL };
4509 	uint16_t		flags = 0;
4510 	struct xfs_trans	*tp;
4511 	int			error;
4512 
4513 	if (whichfork == XFS_COW_FORK)
4514 		flags |= IOMAP_F_SHARED;
4515 
4516 	/*
4517 	 * Space for the extent and indirect blocks was reserved when the
4518 	 * delalloc extent was created so there's no need to do so here.
4519 	 */
4520 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0,
4521 				XFS_TRANS_RESERVE, &tp);
4522 	if (error)
4523 		return error;
4524 
4525 	xfs_ilock(ip, XFS_ILOCK_EXCL);
4526 	xfs_trans_ijoin(tp, ip, 0);
4527 
4528 	if (!xfs_iext_lookup_extent(ip, ifp, offset_fsb, &bma.icur, &bma.got) ||
4529 	    bma.got.br_startoff > offset_fsb) {
4530 		/*
4531 		 * No extent found in the range we are trying to convert.  This
4532 		 * should only happen for the COW fork, where another thread
4533 		 * might have moved the extent to the data fork in the meantime.
4534 		 */
4535 		WARN_ON_ONCE(whichfork != XFS_COW_FORK);
4536 		error = -EAGAIN;
4537 		goto out_trans_cancel;
4538 	}
4539 
4540 	/*
4541 	 * If we find a real extent here we raced with another thread converting
4542 	 * the extent.  Just return the real extent at this offset.
4543 	 */
4544 	if (!isnullstartblock(bma.got.br_startblock)) {
4545 		xfs_bmbt_to_iomap(ip, iomap, &bma.got, flags);
4546 		*seq = READ_ONCE(ifp->if_seq);
4547 		goto out_trans_cancel;
4548 	}
4549 
4550 	bma.tp = tp;
4551 	bma.ip = ip;
4552 	bma.wasdel = true;
4553 	bma.offset = bma.got.br_startoff;
4554 	bma.length = max_t(xfs_filblks_t, bma.got.br_blockcount, MAXEXTLEN);
4555 	bma.minleft = xfs_bmapi_minleft(tp, ip, whichfork);
4556 
4557 	/*
4558 	 * When we're converting the delalloc reservations backing dirty pages
4559 	 * in the page cache, we must be careful about how we create the new
4560 	 * extents:
4561 	 *
4562 	 * New CoW fork extents are created unwritten, turned into real extents
4563 	 * when we're about to write the data to disk, and mapped into the data
4564 	 * fork after the write finishes.  End of story.
4565 	 *
4566 	 * New data fork extents must be mapped in as unwritten and converted
4567 	 * to real extents after the write succeeds to avoid exposing stale
4568 	 * disk contents if we crash.
4569 	 */
4570 	bma.flags = XFS_BMAPI_PREALLOC;
4571 	if (whichfork == XFS_COW_FORK)
4572 		bma.flags |= XFS_BMAPI_COWFORK;
4573 
4574 	if (!xfs_iext_peek_prev_extent(ifp, &bma.icur, &bma.prev))
4575 		bma.prev.br_startoff = NULLFILEOFF;
4576 
4577 	error = xfs_bmapi_allocate(&bma);
4578 	if (error)
4579 		goto out_finish;
4580 
4581 	error = -ENOSPC;
4582 	if (WARN_ON_ONCE(bma.blkno == NULLFSBLOCK))
4583 		goto out_finish;
4584 	error = -EFSCORRUPTED;
4585 	if (WARN_ON_ONCE(!xfs_valid_startblock(ip, bma.got.br_startblock)))
4586 		goto out_finish;
4587 
4588 	XFS_STATS_ADD(mp, xs_xstrat_bytes, XFS_FSB_TO_B(mp, bma.length));
4589 	XFS_STATS_INC(mp, xs_xstrat_quick);
4590 
4591 	ASSERT(!isnullstartblock(bma.got.br_startblock));
4592 	xfs_bmbt_to_iomap(ip, iomap, &bma.got, flags);
4593 	*seq = READ_ONCE(ifp->if_seq);
4594 
4595 	if (whichfork == XFS_COW_FORK)
4596 		xfs_refcount_alloc_cow_extent(tp, bma.blkno, bma.length);
4597 
4598 	error = xfs_bmap_btree_to_extents(tp, ip, bma.cur, &bma.logflags,
4599 			whichfork);
4600 	if (error)
4601 		goto out_finish;
4602 
4603 	xfs_bmapi_finish(&bma, whichfork, 0);
4604 	error = xfs_trans_commit(tp);
4605 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
4606 	return error;
4607 
4608 out_finish:
4609 	xfs_bmapi_finish(&bma, whichfork, error);
4610 out_trans_cancel:
4611 	xfs_trans_cancel(tp);
4612 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
4613 	return error;
4614 }
4615 
4616 int
4617 xfs_bmapi_remap(
4618 	struct xfs_trans	*tp,
4619 	struct xfs_inode	*ip,
4620 	xfs_fileoff_t		bno,
4621 	xfs_filblks_t		len,
4622 	xfs_fsblock_t		startblock,
4623 	int			flags)
4624 {
4625 	struct xfs_mount	*mp = ip->i_mount;
4626 	struct xfs_ifork	*ifp;
4627 	struct xfs_btree_cur	*cur = NULL;
4628 	struct xfs_bmbt_irec	got;
4629 	struct xfs_iext_cursor	icur;
4630 	int			whichfork = xfs_bmapi_whichfork(flags);
4631 	int			logflags = 0, error;
4632 
4633 	ifp = XFS_IFORK_PTR(ip, whichfork);
4634 	ASSERT(len > 0);
4635 	ASSERT(len <= (xfs_filblks_t)MAXEXTLEN);
4636 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
4637 	ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC |
4638 			   XFS_BMAPI_NORMAP)));
4639 	ASSERT((flags & (XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC)) !=
4640 			(XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC));
4641 
4642 	if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
4643 	    XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
4644 		return -EFSCORRUPTED;
4645 	}
4646 
4647 	if (XFS_FORCED_SHUTDOWN(mp))
4648 		return -EIO;
4649 
4650 	if (!(ifp->if_flags & XFS_IFEXTENTS)) {
4651 		error = xfs_iread_extents(tp, ip, whichfork);
4652 		if (error)
4653 			return error;
4654 	}
4655 
4656 	if (xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) {
4657 		/* make sure we only reflink into a hole. */
4658 		ASSERT(got.br_startoff > bno);
4659 		ASSERT(got.br_startoff - bno >= len);
4660 	}
4661 
4662 	ip->i_d.di_nblocks += len;
4663 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
4664 
4665 	if (ifp->if_flags & XFS_IFBROOT) {
4666 		cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
4667 		cur->bc_ino.flags = 0;
4668 	}
4669 
4670 	got.br_startoff = bno;
4671 	got.br_startblock = startblock;
4672 	got.br_blockcount = len;
4673 	if (flags & XFS_BMAPI_PREALLOC)
4674 		got.br_state = XFS_EXT_UNWRITTEN;
4675 	else
4676 		got.br_state = XFS_EXT_NORM;
4677 
4678 	error = xfs_bmap_add_extent_hole_real(tp, ip, whichfork, &icur,
4679 			&cur, &got, &logflags, flags);
4680 	if (error)
4681 		goto error0;
4682 
4683 	error = xfs_bmap_btree_to_extents(tp, ip, cur, &logflags, whichfork);
4684 
4685 error0:
4686 	if (ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS)
4687 		logflags &= ~XFS_ILOG_DEXT;
4688 	else if (ip->i_df.if_format != XFS_DINODE_FMT_BTREE)
4689 		logflags &= ~XFS_ILOG_DBROOT;
4690 
4691 	if (logflags)
4692 		xfs_trans_log_inode(tp, ip, logflags);
4693 	if (cur)
4694 		xfs_btree_del_cursor(cur, error);
4695 	return error;
4696 }
4697 
4698 /*
4699  * When a delalloc extent is split (e.g., due to a hole punch), the original
4700  * indlen reservation must be shared across the two new extents that are left
4701  * behind.
4702  *
4703  * Given the original reservation and the worst case indlen for the two new
4704  * extents (as calculated by xfs_bmap_worst_indlen()), split the original
4705  * reservation fairly across the two new extents. If necessary, steal available
4706  * blocks from a deleted extent to make up a reservation deficiency (e.g., if
4707  * ores == 1). The number of stolen blocks is returned. The availability and
4708  * subsequent accounting of stolen blocks is the responsibility of the caller.
4709  */
4710 static xfs_filblks_t
4711 xfs_bmap_split_indlen(
4712 	xfs_filblks_t			ores,		/* original res. */
4713 	xfs_filblks_t			*indlen1,	/* ext1 worst indlen */
4714 	xfs_filblks_t			*indlen2,	/* ext2 worst indlen */
4715 	xfs_filblks_t			avail)		/* stealable blocks */
4716 {
4717 	xfs_filblks_t			len1 = *indlen1;
4718 	xfs_filblks_t			len2 = *indlen2;
4719 	xfs_filblks_t			nres = len1 + len2; /* new total res. */
4720 	xfs_filblks_t			stolen = 0;
4721 	xfs_filblks_t			resfactor;
4722 
4723 	/*
4724 	 * Steal as many blocks as we can to try and satisfy the worst case
4725 	 * indlen for both new extents.
4726 	 */
4727 	if (ores < nres && avail)
4728 		stolen = XFS_FILBLKS_MIN(nres - ores, avail);
4729 	ores += stolen;
4730 
4731 	 /* nothing else to do if we've satisfied the new reservation */
4732 	if (ores >= nres)
4733 		return stolen;
4734 
4735 	/*
4736 	 * We can't meet the total required reservation for the two extents.
4737 	 * Calculate the percent of the overall shortage between both extents
4738 	 * and apply this percentage to each of the requested indlen values.
4739 	 * This distributes the shortage fairly and reduces the chances that one
4740 	 * of the two extents is left with nothing when extents are repeatedly
4741 	 * split.
4742 	 */
4743 	resfactor = (ores * 100);
4744 	do_div(resfactor, nres);
4745 	len1 *= resfactor;
4746 	do_div(len1, 100);
4747 	len2 *= resfactor;
4748 	do_div(len2, 100);
4749 	ASSERT(len1 + len2 <= ores);
4750 	ASSERT(len1 < *indlen1 && len2 < *indlen2);
4751 
4752 	/*
4753 	 * Hand out the remainder to each extent. If one of the two reservations
4754 	 * is zero, we want to make sure that one gets a block first. The loop
4755 	 * below starts with len1, so hand len2 a block right off the bat if it
4756 	 * is zero.
4757 	 */
4758 	ores -= (len1 + len2);
4759 	ASSERT((*indlen1 - len1) + (*indlen2 - len2) >= ores);
4760 	if (ores && !len2 && *indlen2) {
4761 		len2++;
4762 		ores--;
4763 	}
4764 	while (ores) {
4765 		if (len1 < *indlen1) {
4766 			len1++;
4767 			ores--;
4768 		}
4769 		if (!ores)
4770 			break;
4771 		if (len2 < *indlen2) {
4772 			len2++;
4773 			ores--;
4774 		}
4775 	}
4776 
4777 	*indlen1 = len1;
4778 	*indlen2 = len2;
4779 
4780 	return stolen;
4781 }
4782 
4783 int
4784 xfs_bmap_del_extent_delay(
4785 	struct xfs_inode	*ip,
4786 	int			whichfork,
4787 	struct xfs_iext_cursor	*icur,
4788 	struct xfs_bmbt_irec	*got,
4789 	struct xfs_bmbt_irec	*del)
4790 {
4791 	struct xfs_mount	*mp = ip->i_mount;
4792 	struct xfs_ifork	*ifp = XFS_IFORK_PTR(ip, whichfork);
4793 	struct xfs_bmbt_irec	new;
4794 	int64_t			da_old, da_new, da_diff = 0;
4795 	xfs_fileoff_t		del_endoff, got_endoff;
4796 	xfs_filblks_t		got_indlen, new_indlen, stolen;
4797 	int			state = xfs_bmap_fork_to_state(whichfork);
4798 	int			error = 0;
4799 	bool			isrt;
4800 
4801 	XFS_STATS_INC(mp, xs_del_exlist);
4802 
4803 	isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
4804 	del_endoff = del->br_startoff + del->br_blockcount;
4805 	got_endoff = got->br_startoff + got->br_blockcount;
4806 	da_old = startblockval(got->br_startblock);
4807 	da_new = 0;
4808 
4809 	ASSERT(del->br_blockcount > 0);
4810 	ASSERT(got->br_startoff <= del->br_startoff);
4811 	ASSERT(got_endoff >= del_endoff);
4812 
4813 	if (isrt) {
4814 		uint64_t rtexts = XFS_FSB_TO_B(mp, del->br_blockcount);
4815 
4816 		do_div(rtexts, mp->m_sb.sb_rextsize);
4817 		xfs_mod_frextents(mp, rtexts);
4818 	}
4819 
4820 	/*
4821 	 * Update the inode delalloc counter now and wait to update the
4822 	 * sb counters as we might have to borrow some blocks for the
4823 	 * indirect block accounting.
4824 	 */
4825 	error = xfs_trans_reserve_quota_nblks(NULL, ip,
4826 			-((long)del->br_blockcount), 0,
4827 			isrt ? XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS);
4828 	if (error)
4829 		return error;
4830 	ip->i_delayed_blks -= del->br_blockcount;
4831 
4832 	if (got->br_startoff == del->br_startoff)
4833 		state |= BMAP_LEFT_FILLING;
4834 	if (got_endoff == del_endoff)
4835 		state |= BMAP_RIGHT_FILLING;
4836 
4837 	switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) {
4838 	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
4839 		/*
4840 		 * Matches the whole extent.  Delete the entry.
4841 		 */
4842 		xfs_iext_remove(ip, icur, state);
4843 		xfs_iext_prev(ifp, icur);
4844 		break;
4845 	case BMAP_LEFT_FILLING:
4846 		/*
4847 		 * Deleting the first part of the extent.
4848 		 */
4849 		got->br_startoff = del_endoff;
4850 		got->br_blockcount -= del->br_blockcount;
4851 		da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip,
4852 				got->br_blockcount), da_old);
4853 		got->br_startblock = nullstartblock((int)da_new);
4854 		xfs_iext_update_extent(ip, state, icur, got);
4855 		break;
4856 	case BMAP_RIGHT_FILLING:
4857 		/*
4858 		 * Deleting the last part of the extent.
4859 		 */
4860 		got->br_blockcount = got->br_blockcount - del->br_blockcount;
4861 		da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip,
4862 				got->br_blockcount), da_old);
4863 		got->br_startblock = nullstartblock((int)da_new);
4864 		xfs_iext_update_extent(ip, state, icur, got);
4865 		break;
4866 	case 0:
4867 		/*
4868 		 * Deleting the middle of the extent.
4869 		 *
4870 		 * Distribute the original indlen reservation across the two new
4871 		 * extents.  Steal blocks from the deleted extent if necessary.
4872 		 * Stealing blocks simply fudges the fdblocks accounting below.
4873 		 * Warn if either of the new indlen reservations is zero as this
4874 		 * can lead to delalloc problems.
4875 		 */
4876 		got->br_blockcount = del->br_startoff - got->br_startoff;
4877 		got_indlen = xfs_bmap_worst_indlen(ip, got->br_blockcount);
4878 
4879 		new.br_blockcount = got_endoff - del_endoff;
4880 		new_indlen = xfs_bmap_worst_indlen(ip, new.br_blockcount);
4881 
4882 		WARN_ON_ONCE(!got_indlen || !new_indlen);
4883 		stolen = xfs_bmap_split_indlen(da_old, &got_indlen, &new_indlen,
4884 						       del->br_blockcount);
4885 
4886 		got->br_startblock = nullstartblock((int)got_indlen);
4887 
4888 		new.br_startoff = del_endoff;
4889 		new.br_state = got->br_state;
4890 		new.br_startblock = nullstartblock((int)new_indlen);
4891 
4892 		xfs_iext_update_extent(ip, state, icur, got);
4893 		xfs_iext_next(ifp, icur);
4894 		xfs_iext_insert(ip, icur, &new, state);
4895 
4896 		da_new = got_indlen + new_indlen - stolen;
4897 		del->br_blockcount -= stolen;
4898 		break;
4899 	}
4900 
4901 	ASSERT(da_old >= da_new);
4902 	da_diff = da_old - da_new;
4903 	if (!isrt)
4904 		da_diff += del->br_blockcount;
4905 	if (da_diff) {
4906 		xfs_mod_fdblocks(mp, da_diff, false);
4907 		xfs_mod_delalloc(mp, -da_diff);
4908 	}
4909 	return error;
4910 }
4911 
4912 void
4913 xfs_bmap_del_extent_cow(
4914 	struct xfs_inode	*ip,
4915 	struct xfs_iext_cursor	*icur,
4916 	struct xfs_bmbt_irec	*got,
4917 	struct xfs_bmbt_irec	*del)
4918 {
4919 	struct xfs_mount	*mp = ip->i_mount;
4920 	struct xfs_ifork	*ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
4921 	struct xfs_bmbt_irec	new;
4922 	xfs_fileoff_t		del_endoff, got_endoff;
4923 	int			state = BMAP_COWFORK;
4924 
4925 	XFS_STATS_INC(mp, xs_del_exlist);
4926 
4927 	del_endoff = del->br_startoff + del->br_blockcount;
4928 	got_endoff = got->br_startoff + got->br_blockcount;
4929 
4930 	ASSERT(del->br_blockcount > 0);
4931 	ASSERT(got->br_startoff <= del->br_startoff);
4932 	ASSERT(got_endoff >= del_endoff);
4933 	ASSERT(!isnullstartblock(got->br_startblock));
4934 
4935 	if (got->br_startoff == del->br_startoff)
4936 		state |= BMAP_LEFT_FILLING;
4937 	if (got_endoff == del_endoff)
4938 		state |= BMAP_RIGHT_FILLING;
4939 
4940 	switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) {
4941 	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
4942 		/*
4943 		 * Matches the whole extent.  Delete the entry.
4944 		 */
4945 		xfs_iext_remove(ip, icur, state);
4946 		xfs_iext_prev(ifp, icur);
4947 		break;
4948 	case BMAP_LEFT_FILLING:
4949 		/*
4950 		 * Deleting the first part of the extent.
4951 		 */
4952 		got->br_startoff = del_endoff;
4953 		got->br_blockcount -= del->br_blockcount;
4954 		got->br_startblock = del->br_startblock + del->br_blockcount;
4955 		xfs_iext_update_extent(ip, state, icur, got);
4956 		break;
4957 	case BMAP_RIGHT_FILLING:
4958 		/*
4959 		 * Deleting the last part of the extent.
4960 		 */
4961 		got->br_blockcount -= del->br_blockcount;
4962 		xfs_iext_update_extent(ip, state, icur, got);
4963 		break;
4964 	case 0:
4965 		/*
4966 		 * Deleting the middle of the extent.
4967 		 */
4968 		got->br_blockcount = del->br_startoff - got->br_startoff;
4969 
4970 		new.br_startoff = del_endoff;
4971 		new.br_blockcount = got_endoff - del_endoff;
4972 		new.br_state = got->br_state;
4973 		new.br_startblock = del->br_startblock + del->br_blockcount;
4974 
4975 		xfs_iext_update_extent(ip, state, icur, got);
4976 		xfs_iext_next(ifp, icur);
4977 		xfs_iext_insert(ip, icur, &new, state);
4978 		break;
4979 	}
4980 	ip->i_delayed_blks -= del->br_blockcount;
4981 }
4982 
4983 /*
4984  * Called by xfs_bmapi to update file extent records and the btree
4985  * after removing space.
4986  */
4987 STATIC int				/* error */
4988 xfs_bmap_del_extent_real(
4989 	xfs_inode_t		*ip,	/* incore inode pointer */
4990 	xfs_trans_t		*tp,	/* current transaction pointer */
4991 	struct xfs_iext_cursor	*icur,
4992 	xfs_btree_cur_t		*cur,	/* if null, not a btree */
4993 	xfs_bmbt_irec_t		*del,	/* data to remove from extents */
4994 	int			*logflagsp, /* inode logging flags */
4995 	int			whichfork, /* data or attr fork */
4996 	int			bflags)	/* bmapi flags */
4997 {
4998 	xfs_fsblock_t		del_endblock=0;	/* first block past del */
4999 	xfs_fileoff_t		del_endoff;	/* first offset past del */
5000 	int			do_fx;	/* free extent at end of routine */
5001 	int			error;	/* error return value */
5002 	int			flags = 0;/* inode logging flags */
5003 	struct xfs_bmbt_irec	got;	/* current extent entry */
5004 	xfs_fileoff_t		got_endoff;	/* first offset past got */
5005 	int			i;	/* temp state */
5006 	struct xfs_ifork	*ifp;	/* inode fork pointer */
5007 	xfs_mount_t		*mp;	/* mount structure */
5008 	xfs_filblks_t		nblks;	/* quota/sb block count */
5009 	xfs_bmbt_irec_t		new;	/* new record to be inserted */
5010 	/* REFERENCED */
5011 	uint			qfield;	/* quota field to update */
5012 	int			state = xfs_bmap_fork_to_state(whichfork);
5013 	struct xfs_bmbt_irec	old;
5014 
5015 	mp = ip->i_mount;
5016 	XFS_STATS_INC(mp, xs_del_exlist);
5017 
5018 	ifp = XFS_IFORK_PTR(ip, whichfork);
5019 	ASSERT(del->br_blockcount > 0);
5020 	xfs_iext_get_extent(ifp, icur, &got);
5021 	ASSERT(got.br_startoff <= del->br_startoff);
5022 	del_endoff = del->br_startoff + del->br_blockcount;
5023 	got_endoff = got.br_startoff + got.br_blockcount;
5024 	ASSERT(got_endoff >= del_endoff);
5025 	ASSERT(!isnullstartblock(got.br_startblock));
5026 	qfield = 0;
5027 	error = 0;
5028 
5029 	/*
5030 	 * If it's the case where the directory code is running with no block
5031 	 * reservation, and the deleted block is in the middle of its extent,
5032 	 * and the resulting insert of an extent would cause transformation to
5033 	 * btree format, then reject it.  The calling code will then swap blocks
5034 	 * around instead.  We have to do this now, rather than waiting for the
5035 	 * conversion to btree format, since the transaction will be dirty then.
5036 	 */
5037 	if (tp->t_blk_res == 0 &&
5038 	    ifp->if_format == XFS_DINODE_FMT_EXTENTS &&
5039 	    ifp->if_nextents >= XFS_IFORK_MAXEXT(ip, whichfork) &&
5040 	    del->br_startoff > got.br_startoff && del_endoff < got_endoff)
5041 		return -ENOSPC;
5042 
5043 	flags = XFS_ILOG_CORE;
5044 	if (whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip)) {
5045 		xfs_fsblock_t	bno;
5046 		xfs_filblks_t	len;
5047 		xfs_extlen_t	mod;
5048 
5049 		bno = div_u64_rem(del->br_startblock, mp->m_sb.sb_rextsize,
5050 				  &mod);
5051 		ASSERT(mod == 0);
5052 		len = div_u64_rem(del->br_blockcount, mp->m_sb.sb_rextsize,
5053 				  &mod);
5054 		ASSERT(mod == 0);
5055 
5056 		error = xfs_rtfree_extent(tp, bno, (xfs_extlen_t)len);
5057 		if (error)
5058 			goto done;
5059 		do_fx = 0;
5060 		nblks = len * mp->m_sb.sb_rextsize;
5061 		qfield = XFS_TRANS_DQ_RTBCOUNT;
5062 	} else {
5063 		do_fx = 1;
5064 		nblks = del->br_blockcount;
5065 		qfield = XFS_TRANS_DQ_BCOUNT;
5066 	}
5067 
5068 	del_endblock = del->br_startblock + del->br_blockcount;
5069 	if (cur) {
5070 		error = xfs_bmbt_lookup_eq(cur, &got, &i);
5071 		if (error)
5072 			goto done;
5073 		if (XFS_IS_CORRUPT(mp, i != 1)) {
5074 			error = -EFSCORRUPTED;
5075 			goto done;
5076 		}
5077 	}
5078 
5079 	if (got.br_startoff == del->br_startoff)
5080 		state |= BMAP_LEFT_FILLING;
5081 	if (got_endoff == del_endoff)
5082 		state |= BMAP_RIGHT_FILLING;
5083 
5084 	switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) {
5085 	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
5086 		/*
5087 		 * Matches the whole extent.  Delete the entry.
5088 		 */
5089 		xfs_iext_remove(ip, icur, state);
5090 		xfs_iext_prev(ifp, icur);
5091 		ifp->if_nextents--;
5092 
5093 		flags |= XFS_ILOG_CORE;
5094 		if (!cur) {
5095 			flags |= xfs_ilog_fext(whichfork);
5096 			break;
5097 		}
5098 		if ((error = xfs_btree_delete(cur, &i)))
5099 			goto done;
5100 		if (XFS_IS_CORRUPT(mp, i != 1)) {
5101 			error = -EFSCORRUPTED;
5102 			goto done;
5103 		}
5104 		break;
5105 	case BMAP_LEFT_FILLING:
5106 		/*
5107 		 * Deleting the first part of the extent.
5108 		 */
5109 		got.br_startoff = del_endoff;
5110 		got.br_startblock = del_endblock;
5111 		got.br_blockcount -= del->br_blockcount;
5112 		xfs_iext_update_extent(ip, state, icur, &got);
5113 		if (!cur) {
5114 			flags |= xfs_ilog_fext(whichfork);
5115 			break;
5116 		}
5117 		error = xfs_bmbt_update(cur, &got);
5118 		if (error)
5119 			goto done;
5120 		break;
5121 	case BMAP_RIGHT_FILLING:
5122 		/*
5123 		 * Deleting the last part of the extent.
5124 		 */
5125 		got.br_blockcount -= del->br_blockcount;
5126 		xfs_iext_update_extent(ip, state, icur, &got);
5127 		if (!cur) {
5128 			flags |= xfs_ilog_fext(whichfork);
5129 			break;
5130 		}
5131 		error = xfs_bmbt_update(cur, &got);
5132 		if (error)
5133 			goto done;
5134 		break;
5135 	case 0:
5136 		/*
5137 		 * Deleting the middle of the extent.
5138 		 */
5139 		old = got;
5140 
5141 		got.br_blockcount = del->br_startoff - got.br_startoff;
5142 		xfs_iext_update_extent(ip, state, icur, &got);
5143 
5144 		new.br_startoff = del_endoff;
5145 		new.br_blockcount = got_endoff - del_endoff;
5146 		new.br_state = got.br_state;
5147 		new.br_startblock = del_endblock;
5148 
5149 		flags |= XFS_ILOG_CORE;
5150 		if (cur) {
5151 			error = xfs_bmbt_update(cur, &got);
5152 			if (error)
5153 				goto done;
5154 			error = xfs_btree_increment(cur, 0, &i);
5155 			if (error)
5156 				goto done;
5157 			cur->bc_rec.b = new;
5158 			error = xfs_btree_insert(cur, &i);
5159 			if (error && error != -ENOSPC)
5160 				goto done;
5161 			/*
5162 			 * If get no-space back from btree insert, it tried a
5163 			 * split, and we have a zero block reservation.  Fix up
5164 			 * our state and return the error.
5165 			 */
5166 			if (error == -ENOSPC) {
5167 				/*
5168 				 * Reset the cursor, don't trust it after any
5169 				 * insert operation.
5170 				 */
5171 				error = xfs_bmbt_lookup_eq(cur, &got, &i);
5172 				if (error)
5173 					goto done;
5174 				if (XFS_IS_CORRUPT(mp, i != 1)) {
5175 					error = -EFSCORRUPTED;
5176 					goto done;
5177 				}
5178 				/*
5179 				 * Update the btree record back
5180 				 * to the original value.
5181 				 */
5182 				error = xfs_bmbt_update(cur, &old);
5183 				if (error)
5184 					goto done;
5185 				/*
5186 				 * Reset the extent record back
5187 				 * to the original value.
5188 				 */
5189 				xfs_iext_update_extent(ip, state, icur, &old);
5190 				flags = 0;
5191 				error = -ENOSPC;
5192 				goto done;
5193 			}
5194 			if (XFS_IS_CORRUPT(mp, i != 1)) {
5195 				error = -EFSCORRUPTED;
5196 				goto done;
5197 			}
5198 		} else
5199 			flags |= xfs_ilog_fext(whichfork);
5200 
5201 		ifp->if_nextents++;
5202 		xfs_iext_next(ifp, icur);
5203 		xfs_iext_insert(ip, icur, &new, state);
5204 		break;
5205 	}
5206 
5207 	/* remove reverse mapping */
5208 	xfs_rmap_unmap_extent(tp, ip, whichfork, del);
5209 
5210 	/*
5211 	 * If we need to, add to list of extents to delete.
5212 	 */
5213 	if (do_fx && !(bflags & XFS_BMAPI_REMAP)) {
5214 		if (xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK) {
5215 			xfs_refcount_decrease_extent(tp, del);
5216 		} else {
5217 			__xfs_bmap_add_free(tp, del->br_startblock,
5218 					del->br_blockcount, NULL,
5219 					(bflags & XFS_BMAPI_NODISCARD) ||
5220 					del->br_state == XFS_EXT_UNWRITTEN);
5221 		}
5222 	}
5223 
5224 	/*
5225 	 * Adjust inode # blocks in the file.
5226 	 */
5227 	if (nblks)
5228 		ip->i_d.di_nblocks -= nblks;
5229 	/*
5230 	 * Adjust quota data.
5231 	 */
5232 	if (qfield && !(bflags & XFS_BMAPI_REMAP))
5233 		xfs_trans_mod_dquot_byino(tp, ip, qfield, (long)-nblks);
5234 
5235 done:
5236 	*logflagsp = flags;
5237 	return error;
5238 }
5239 
5240 /*
5241  * Unmap (remove) blocks from a file.
5242  * If nexts is nonzero then the number of extents to remove is limited to
5243  * that value.  If not all extents in the block range can be removed then
5244  * *done is set.
5245  */
5246 int						/* error */
5247 __xfs_bunmapi(
5248 	struct xfs_trans	*tp,		/* transaction pointer */
5249 	struct xfs_inode	*ip,		/* incore inode */
5250 	xfs_fileoff_t		start,		/* first file offset deleted */
5251 	xfs_filblks_t		*rlen,		/* i/o: amount remaining */
5252 	int			flags,		/* misc flags */
5253 	xfs_extnum_t		nexts)		/* number of extents max */
5254 {
5255 	struct xfs_btree_cur	*cur;		/* bmap btree cursor */
5256 	struct xfs_bmbt_irec	del;		/* extent being deleted */
5257 	int			error;		/* error return value */
5258 	xfs_extnum_t		extno;		/* extent number in list */
5259 	struct xfs_bmbt_irec	got;		/* current extent record */
5260 	struct xfs_ifork	*ifp;		/* inode fork pointer */
5261 	int			isrt;		/* freeing in rt area */
5262 	int			logflags;	/* transaction logging flags */
5263 	xfs_extlen_t		mod;		/* rt extent offset */
5264 	struct xfs_mount	*mp = ip->i_mount;
5265 	int			tmp_logflags;	/* partial logging flags */
5266 	int			wasdel;		/* was a delayed alloc extent */
5267 	int			whichfork;	/* data or attribute fork */
5268 	xfs_fsblock_t		sum;
5269 	xfs_filblks_t		len = *rlen;	/* length to unmap in file */
5270 	xfs_fileoff_t		max_len;
5271 	xfs_agnumber_t		prev_agno = NULLAGNUMBER, agno;
5272 	xfs_fileoff_t		end;
5273 	struct xfs_iext_cursor	icur;
5274 	bool			done = false;
5275 
5276 	trace_xfs_bunmap(ip, start, len, flags, _RET_IP_);
5277 
5278 	whichfork = xfs_bmapi_whichfork(flags);
5279 	ASSERT(whichfork != XFS_COW_FORK);
5280 	ifp = XFS_IFORK_PTR(ip, whichfork);
5281 	if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)))
5282 		return -EFSCORRUPTED;
5283 	if (XFS_FORCED_SHUTDOWN(mp))
5284 		return -EIO;
5285 
5286 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
5287 	ASSERT(len > 0);
5288 	ASSERT(nexts >= 0);
5289 
5290 	/*
5291 	 * Guesstimate how many blocks we can unmap without running the risk of
5292 	 * blowing out the transaction with a mix of EFIs and reflink
5293 	 * adjustments.
5294 	 */
5295 	if (tp && xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK)
5296 		max_len = min(len, xfs_refcount_max_unmap(tp->t_log_res));
5297 	else
5298 		max_len = len;
5299 
5300 	if (!(ifp->if_flags & XFS_IFEXTENTS) &&
5301 	    (error = xfs_iread_extents(tp, ip, whichfork)))
5302 		return error;
5303 	if (xfs_iext_count(ifp) == 0) {
5304 		*rlen = 0;
5305 		return 0;
5306 	}
5307 	XFS_STATS_INC(mp, xs_blk_unmap);
5308 	isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
5309 	end = start + len;
5310 
5311 	if (!xfs_iext_lookup_extent_before(ip, ifp, &end, &icur, &got)) {
5312 		*rlen = 0;
5313 		return 0;
5314 	}
5315 	end--;
5316 
5317 	logflags = 0;
5318 	if (ifp->if_flags & XFS_IFBROOT) {
5319 		ASSERT(ifp->if_format == XFS_DINODE_FMT_BTREE);
5320 		cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5321 		cur->bc_ino.flags = 0;
5322 	} else
5323 		cur = NULL;
5324 
5325 	if (isrt) {
5326 		/*
5327 		 * Synchronize by locking the bitmap inode.
5328 		 */
5329 		xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP);
5330 		xfs_trans_ijoin(tp, mp->m_rbmip, XFS_ILOCK_EXCL);
5331 		xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM);
5332 		xfs_trans_ijoin(tp, mp->m_rsumip, XFS_ILOCK_EXCL);
5333 	}
5334 
5335 	extno = 0;
5336 	while (end != (xfs_fileoff_t)-1 && end >= start &&
5337 	       (nexts == 0 || extno < nexts) && max_len > 0) {
5338 		/*
5339 		 * Is the found extent after a hole in which end lives?
5340 		 * Just back up to the previous extent, if so.
5341 		 */
5342 		if (got.br_startoff > end &&
5343 		    !xfs_iext_prev_extent(ifp, &icur, &got)) {
5344 			done = true;
5345 			break;
5346 		}
5347 		/*
5348 		 * Is the last block of this extent before the range
5349 		 * we're supposed to delete?  If so, we're done.
5350 		 */
5351 		end = XFS_FILEOFF_MIN(end,
5352 			got.br_startoff + got.br_blockcount - 1);
5353 		if (end < start)
5354 			break;
5355 		/*
5356 		 * Then deal with the (possibly delayed) allocated space
5357 		 * we found.
5358 		 */
5359 		del = got;
5360 		wasdel = isnullstartblock(del.br_startblock);
5361 
5362 		/*
5363 		 * Make sure we don't touch multiple AGF headers out of order
5364 		 * in a single transaction, as that could cause AB-BA deadlocks.
5365 		 */
5366 		if (!wasdel && !isrt) {
5367 			agno = XFS_FSB_TO_AGNO(mp, del.br_startblock);
5368 			if (prev_agno != NULLAGNUMBER && prev_agno > agno)
5369 				break;
5370 			prev_agno = agno;
5371 		}
5372 		if (got.br_startoff < start) {
5373 			del.br_startoff = start;
5374 			del.br_blockcount -= start - got.br_startoff;
5375 			if (!wasdel)
5376 				del.br_startblock += start - got.br_startoff;
5377 		}
5378 		if (del.br_startoff + del.br_blockcount > end + 1)
5379 			del.br_blockcount = end + 1 - del.br_startoff;
5380 
5381 		/* How much can we safely unmap? */
5382 		if (max_len < del.br_blockcount) {
5383 			del.br_startoff += del.br_blockcount - max_len;
5384 			if (!wasdel)
5385 				del.br_startblock += del.br_blockcount - max_len;
5386 			del.br_blockcount = max_len;
5387 		}
5388 
5389 		if (!isrt)
5390 			goto delete;
5391 
5392 		sum = del.br_startblock + del.br_blockcount;
5393 		div_u64_rem(sum, mp->m_sb.sb_rextsize, &mod);
5394 		if (mod) {
5395 			/*
5396 			 * Realtime extent not lined up at the end.
5397 			 * The extent could have been split into written
5398 			 * and unwritten pieces, or we could just be
5399 			 * unmapping part of it.  But we can't really
5400 			 * get rid of part of a realtime extent.
5401 			 */
5402 			if (del.br_state == XFS_EXT_UNWRITTEN) {
5403 				/*
5404 				 * This piece is unwritten, or we're not
5405 				 * using unwritten extents.  Skip over it.
5406 				 */
5407 				ASSERT(end >= mod);
5408 				end -= mod > del.br_blockcount ?
5409 					del.br_blockcount : mod;
5410 				if (end < got.br_startoff &&
5411 				    !xfs_iext_prev_extent(ifp, &icur, &got)) {
5412 					done = true;
5413 					break;
5414 				}
5415 				continue;
5416 			}
5417 			/*
5418 			 * It's written, turn it unwritten.
5419 			 * This is better than zeroing it.
5420 			 */
5421 			ASSERT(del.br_state == XFS_EXT_NORM);
5422 			ASSERT(tp->t_blk_res > 0);
5423 			/*
5424 			 * If this spans a realtime extent boundary,
5425 			 * chop it back to the start of the one we end at.
5426 			 */
5427 			if (del.br_blockcount > mod) {
5428 				del.br_startoff += del.br_blockcount - mod;
5429 				del.br_startblock += del.br_blockcount - mod;
5430 				del.br_blockcount = mod;
5431 			}
5432 			del.br_state = XFS_EXT_UNWRITTEN;
5433 			error = xfs_bmap_add_extent_unwritten_real(tp, ip,
5434 					whichfork, &icur, &cur, &del,
5435 					&logflags);
5436 			if (error)
5437 				goto error0;
5438 			goto nodelete;
5439 		}
5440 		div_u64_rem(del.br_startblock, mp->m_sb.sb_rextsize, &mod);
5441 		if (mod) {
5442 			xfs_extlen_t off = mp->m_sb.sb_rextsize - mod;
5443 
5444 			/*
5445 			 * Realtime extent is lined up at the end but not
5446 			 * at the front.  We'll get rid of full extents if
5447 			 * we can.
5448 			 */
5449 			if (del.br_blockcount > off) {
5450 				del.br_blockcount -= off;
5451 				del.br_startoff += off;
5452 				del.br_startblock += off;
5453 			} else if (del.br_startoff == start &&
5454 				   (del.br_state == XFS_EXT_UNWRITTEN ||
5455 				    tp->t_blk_res == 0)) {
5456 				/*
5457 				 * Can't make it unwritten.  There isn't
5458 				 * a full extent here so just skip it.
5459 				 */
5460 				ASSERT(end >= del.br_blockcount);
5461 				end -= del.br_blockcount;
5462 				if (got.br_startoff > end &&
5463 				    !xfs_iext_prev_extent(ifp, &icur, &got)) {
5464 					done = true;
5465 					break;
5466 				}
5467 				continue;
5468 			} else if (del.br_state == XFS_EXT_UNWRITTEN) {
5469 				struct xfs_bmbt_irec	prev;
5470 				xfs_fileoff_t		unwrite_start;
5471 
5472 				/*
5473 				 * This one is already unwritten.
5474 				 * It must have a written left neighbor.
5475 				 * Unwrite the killed part of that one and
5476 				 * try again.
5477 				 */
5478 				if (!xfs_iext_prev_extent(ifp, &icur, &prev))
5479 					ASSERT(0);
5480 				ASSERT(prev.br_state == XFS_EXT_NORM);
5481 				ASSERT(!isnullstartblock(prev.br_startblock));
5482 				ASSERT(del.br_startblock ==
5483 				       prev.br_startblock + prev.br_blockcount);
5484 				unwrite_start = max3(start,
5485 						     del.br_startoff - mod,
5486 						     prev.br_startoff);
5487 				mod = unwrite_start - prev.br_startoff;
5488 				prev.br_startoff = unwrite_start;
5489 				prev.br_startblock += mod;
5490 				prev.br_blockcount -= mod;
5491 				prev.br_state = XFS_EXT_UNWRITTEN;
5492 				error = xfs_bmap_add_extent_unwritten_real(tp,
5493 						ip, whichfork, &icur, &cur,
5494 						&prev, &logflags);
5495 				if (error)
5496 					goto error0;
5497 				goto nodelete;
5498 			} else {
5499 				ASSERT(del.br_state == XFS_EXT_NORM);
5500 				del.br_state = XFS_EXT_UNWRITTEN;
5501 				error = xfs_bmap_add_extent_unwritten_real(tp,
5502 						ip, whichfork, &icur, &cur,
5503 						&del, &logflags);
5504 				if (error)
5505 					goto error0;
5506 				goto nodelete;
5507 			}
5508 		}
5509 
5510 delete:
5511 		if (wasdel) {
5512 			error = xfs_bmap_del_extent_delay(ip, whichfork, &icur,
5513 					&got, &del);
5514 		} else {
5515 			error = xfs_bmap_del_extent_real(ip, tp, &icur, cur,
5516 					&del, &tmp_logflags, whichfork,
5517 					flags);
5518 			logflags |= tmp_logflags;
5519 		}
5520 
5521 		if (error)
5522 			goto error0;
5523 
5524 		max_len -= del.br_blockcount;
5525 		end = del.br_startoff - 1;
5526 nodelete:
5527 		/*
5528 		 * If not done go on to the next (previous) record.
5529 		 */
5530 		if (end != (xfs_fileoff_t)-1 && end >= start) {
5531 			if (!xfs_iext_get_extent(ifp, &icur, &got) ||
5532 			    (got.br_startoff > end &&
5533 			     !xfs_iext_prev_extent(ifp, &icur, &got))) {
5534 				done = true;
5535 				break;
5536 			}
5537 			extno++;
5538 		}
5539 	}
5540 	if (done || end == (xfs_fileoff_t)-1 || end < start)
5541 		*rlen = 0;
5542 	else
5543 		*rlen = end - start + 1;
5544 
5545 	/*
5546 	 * Convert to a btree if necessary.
5547 	 */
5548 	if (xfs_bmap_needs_btree(ip, whichfork)) {
5549 		ASSERT(cur == NULL);
5550 		error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0,
5551 				&tmp_logflags, whichfork);
5552 		logflags |= tmp_logflags;
5553 	} else {
5554 		error = xfs_bmap_btree_to_extents(tp, ip, cur, &logflags,
5555 			whichfork);
5556 	}
5557 
5558 error0:
5559 	/*
5560 	 * Log everything.  Do this after conversion, there's no point in
5561 	 * logging the extent records if we've converted to btree format.
5562 	 */
5563 	if ((logflags & xfs_ilog_fext(whichfork)) &&
5564 	    ifp->if_format != XFS_DINODE_FMT_EXTENTS)
5565 		logflags &= ~xfs_ilog_fext(whichfork);
5566 	else if ((logflags & xfs_ilog_fbroot(whichfork)) &&
5567 		 ifp->if_format != XFS_DINODE_FMT_BTREE)
5568 		logflags &= ~xfs_ilog_fbroot(whichfork);
5569 	/*
5570 	 * Log inode even in the error case, if the transaction
5571 	 * is dirty we'll need to shut down the filesystem.
5572 	 */
5573 	if (logflags)
5574 		xfs_trans_log_inode(tp, ip, logflags);
5575 	if (cur) {
5576 		if (!error)
5577 			cur->bc_ino.allocated = 0;
5578 		xfs_btree_del_cursor(cur, error);
5579 	}
5580 	return error;
5581 }
5582 
5583 /* Unmap a range of a file. */
5584 int
5585 xfs_bunmapi(
5586 	xfs_trans_t		*tp,
5587 	struct xfs_inode	*ip,
5588 	xfs_fileoff_t		bno,
5589 	xfs_filblks_t		len,
5590 	int			flags,
5591 	xfs_extnum_t		nexts,
5592 	int			*done)
5593 {
5594 	int			error;
5595 
5596 	error = __xfs_bunmapi(tp, ip, bno, &len, flags, nexts);
5597 	*done = (len == 0);
5598 	return error;
5599 }
5600 
5601 /*
5602  * Determine whether an extent shift can be accomplished by a merge with the
5603  * extent that precedes the target hole of the shift.
5604  */
5605 STATIC bool
5606 xfs_bmse_can_merge(
5607 	struct xfs_bmbt_irec	*left,	/* preceding extent */
5608 	struct xfs_bmbt_irec	*got,	/* current extent to shift */
5609 	xfs_fileoff_t		shift)	/* shift fsb */
5610 {
5611 	xfs_fileoff_t		startoff;
5612 
5613 	startoff = got->br_startoff - shift;
5614 
5615 	/*
5616 	 * The extent, once shifted, must be adjacent in-file and on-disk with
5617 	 * the preceding extent.
5618 	 */
5619 	if ((left->br_startoff + left->br_blockcount != startoff) ||
5620 	    (left->br_startblock + left->br_blockcount != got->br_startblock) ||
5621 	    (left->br_state != got->br_state) ||
5622 	    (left->br_blockcount + got->br_blockcount > MAXEXTLEN))
5623 		return false;
5624 
5625 	return true;
5626 }
5627 
5628 /*
5629  * A bmap extent shift adjusts the file offset of an extent to fill a preceding
5630  * hole in the file. If an extent shift would result in the extent being fully
5631  * adjacent to the extent that currently precedes the hole, we can merge with
5632  * the preceding extent rather than do the shift.
5633  *
5634  * This function assumes the caller has verified a shift-by-merge is possible
5635  * with the provided extents via xfs_bmse_can_merge().
5636  */
5637 STATIC int
5638 xfs_bmse_merge(
5639 	struct xfs_trans		*tp,
5640 	struct xfs_inode		*ip,
5641 	int				whichfork,
5642 	xfs_fileoff_t			shift,		/* shift fsb */
5643 	struct xfs_iext_cursor		*icur,
5644 	struct xfs_bmbt_irec		*got,		/* extent to shift */
5645 	struct xfs_bmbt_irec		*left,		/* preceding extent */
5646 	struct xfs_btree_cur		*cur,
5647 	int				*logflags)	/* output */
5648 {
5649 	struct xfs_ifork		*ifp = XFS_IFORK_PTR(ip, whichfork);
5650 	struct xfs_bmbt_irec		new;
5651 	xfs_filblks_t			blockcount;
5652 	int				error, i;
5653 	struct xfs_mount		*mp = ip->i_mount;
5654 
5655 	blockcount = left->br_blockcount + got->br_blockcount;
5656 
5657 	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
5658 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
5659 	ASSERT(xfs_bmse_can_merge(left, got, shift));
5660 
5661 	new = *left;
5662 	new.br_blockcount = blockcount;
5663 
5664 	/*
5665 	 * Update the on-disk extent count, the btree if necessary and log the
5666 	 * inode.
5667 	 */
5668 	ifp->if_nextents--;
5669 	*logflags |= XFS_ILOG_CORE;
5670 	if (!cur) {
5671 		*logflags |= XFS_ILOG_DEXT;
5672 		goto done;
5673 	}
5674 
5675 	/* lookup and remove the extent to merge */
5676 	error = xfs_bmbt_lookup_eq(cur, got, &i);
5677 	if (error)
5678 		return error;
5679 	if (XFS_IS_CORRUPT(mp, i != 1))
5680 		return -EFSCORRUPTED;
5681 
5682 	error = xfs_btree_delete(cur, &i);
5683 	if (error)
5684 		return error;
5685 	if (XFS_IS_CORRUPT(mp, i != 1))
5686 		return -EFSCORRUPTED;
5687 
5688 	/* lookup and update size of the previous extent */
5689 	error = xfs_bmbt_lookup_eq(cur, left, &i);
5690 	if (error)
5691 		return error;
5692 	if (XFS_IS_CORRUPT(mp, i != 1))
5693 		return -EFSCORRUPTED;
5694 
5695 	error = xfs_bmbt_update(cur, &new);
5696 	if (error)
5697 		return error;
5698 
5699 	/* change to extent format if required after extent removal */
5700 	error = xfs_bmap_btree_to_extents(tp, ip, cur, logflags, whichfork);
5701 	if (error)
5702 		return error;
5703 
5704 done:
5705 	xfs_iext_remove(ip, icur, 0);
5706 	xfs_iext_prev(ifp, icur);
5707 	xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), icur,
5708 			&new);
5709 
5710 	/* update reverse mapping. rmap functions merge the rmaps for us */
5711 	xfs_rmap_unmap_extent(tp, ip, whichfork, got);
5712 	memcpy(&new, got, sizeof(new));
5713 	new.br_startoff = left->br_startoff + left->br_blockcount;
5714 	xfs_rmap_map_extent(tp, ip, whichfork, &new);
5715 	return 0;
5716 }
5717 
5718 static int
5719 xfs_bmap_shift_update_extent(
5720 	struct xfs_trans	*tp,
5721 	struct xfs_inode	*ip,
5722 	int			whichfork,
5723 	struct xfs_iext_cursor	*icur,
5724 	struct xfs_bmbt_irec	*got,
5725 	struct xfs_btree_cur	*cur,
5726 	int			*logflags,
5727 	xfs_fileoff_t		startoff)
5728 {
5729 	struct xfs_mount	*mp = ip->i_mount;
5730 	struct xfs_bmbt_irec	prev = *got;
5731 	int			error, i;
5732 
5733 	*logflags |= XFS_ILOG_CORE;
5734 
5735 	got->br_startoff = startoff;
5736 
5737 	if (cur) {
5738 		error = xfs_bmbt_lookup_eq(cur, &prev, &i);
5739 		if (error)
5740 			return error;
5741 		if (XFS_IS_CORRUPT(mp, i != 1))
5742 			return -EFSCORRUPTED;
5743 
5744 		error = xfs_bmbt_update(cur, got);
5745 		if (error)
5746 			return error;
5747 	} else {
5748 		*logflags |= XFS_ILOG_DEXT;
5749 	}
5750 
5751 	xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), icur,
5752 			got);
5753 
5754 	/* update reverse mapping */
5755 	xfs_rmap_unmap_extent(tp, ip, whichfork, &prev);
5756 	xfs_rmap_map_extent(tp, ip, whichfork, got);
5757 	return 0;
5758 }
5759 
5760 int
5761 xfs_bmap_collapse_extents(
5762 	struct xfs_trans	*tp,
5763 	struct xfs_inode	*ip,
5764 	xfs_fileoff_t		*next_fsb,
5765 	xfs_fileoff_t		offset_shift_fsb,
5766 	bool			*done)
5767 {
5768 	int			whichfork = XFS_DATA_FORK;
5769 	struct xfs_mount	*mp = ip->i_mount;
5770 	struct xfs_ifork	*ifp = XFS_IFORK_PTR(ip, whichfork);
5771 	struct xfs_btree_cur	*cur = NULL;
5772 	struct xfs_bmbt_irec	got, prev;
5773 	struct xfs_iext_cursor	icur;
5774 	xfs_fileoff_t		new_startoff;
5775 	int			error = 0;
5776 	int			logflags = 0;
5777 
5778 	if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
5779 	    XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
5780 		return -EFSCORRUPTED;
5781 	}
5782 
5783 	if (XFS_FORCED_SHUTDOWN(mp))
5784 		return -EIO;
5785 
5786 	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL));
5787 
5788 	if (!(ifp->if_flags & XFS_IFEXTENTS)) {
5789 		error = xfs_iread_extents(tp, ip, whichfork);
5790 		if (error)
5791 			return error;
5792 	}
5793 
5794 	if (ifp->if_flags & XFS_IFBROOT) {
5795 		cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5796 		cur->bc_ino.flags = 0;
5797 	}
5798 
5799 	if (!xfs_iext_lookup_extent(ip, ifp, *next_fsb, &icur, &got)) {
5800 		*done = true;
5801 		goto del_cursor;
5802 	}
5803 	if (XFS_IS_CORRUPT(mp, isnullstartblock(got.br_startblock))) {
5804 		error = -EFSCORRUPTED;
5805 		goto del_cursor;
5806 	}
5807 
5808 	new_startoff = got.br_startoff - offset_shift_fsb;
5809 	if (xfs_iext_peek_prev_extent(ifp, &icur, &prev)) {
5810 		if (new_startoff < prev.br_startoff + prev.br_blockcount) {
5811 			error = -EINVAL;
5812 			goto del_cursor;
5813 		}
5814 
5815 		if (xfs_bmse_can_merge(&prev, &got, offset_shift_fsb)) {
5816 			error = xfs_bmse_merge(tp, ip, whichfork,
5817 					offset_shift_fsb, &icur, &got, &prev,
5818 					cur, &logflags);
5819 			if (error)
5820 				goto del_cursor;
5821 			goto done;
5822 		}
5823 	} else {
5824 		if (got.br_startoff < offset_shift_fsb) {
5825 			error = -EINVAL;
5826 			goto del_cursor;
5827 		}
5828 	}
5829 
5830 	error = xfs_bmap_shift_update_extent(tp, ip, whichfork, &icur, &got,
5831 			cur, &logflags, new_startoff);
5832 	if (error)
5833 		goto del_cursor;
5834 
5835 done:
5836 	if (!xfs_iext_next_extent(ifp, &icur, &got)) {
5837 		*done = true;
5838 		goto del_cursor;
5839 	}
5840 
5841 	*next_fsb = got.br_startoff;
5842 del_cursor:
5843 	if (cur)
5844 		xfs_btree_del_cursor(cur, error);
5845 	if (logflags)
5846 		xfs_trans_log_inode(tp, ip, logflags);
5847 	return error;
5848 }
5849 
5850 /* Make sure we won't be right-shifting an extent past the maximum bound. */
5851 int
5852 xfs_bmap_can_insert_extents(
5853 	struct xfs_inode	*ip,
5854 	xfs_fileoff_t		off,
5855 	xfs_fileoff_t		shift)
5856 {
5857 	struct xfs_bmbt_irec	got;
5858 	int			is_empty;
5859 	int			error = 0;
5860 
5861 	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
5862 
5863 	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
5864 		return -EIO;
5865 
5866 	xfs_ilock(ip, XFS_ILOCK_EXCL);
5867 	error = xfs_bmap_last_extent(NULL, ip, XFS_DATA_FORK, &got, &is_empty);
5868 	if (!error && !is_empty && got.br_startoff >= off &&
5869 	    ((got.br_startoff + shift) & BMBT_STARTOFF_MASK) < got.br_startoff)
5870 		error = -EINVAL;
5871 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
5872 
5873 	return error;
5874 }
5875 
5876 int
5877 xfs_bmap_insert_extents(
5878 	struct xfs_trans	*tp,
5879 	struct xfs_inode	*ip,
5880 	xfs_fileoff_t		*next_fsb,
5881 	xfs_fileoff_t		offset_shift_fsb,
5882 	bool			*done,
5883 	xfs_fileoff_t		stop_fsb)
5884 {
5885 	int			whichfork = XFS_DATA_FORK;
5886 	struct xfs_mount	*mp = ip->i_mount;
5887 	struct xfs_ifork	*ifp = XFS_IFORK_PTR(ip, whichfork);
5888 	struct xfs_btree_cur	*cur = NULL;
5889 	struct xfs_bmbt_irec	got, next;
5890 	struct xfs_iext_cursor	icur;
5891 	xfs_fileoff_t		new_startoff;
5892 	int			error = 0;
5893 	int			logflags = 0;
5894 
5895 	if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
5896 	    XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
5897 		return -EFSCORRUPTED;
5898 	}
5899 
5900 	if (XFS_FORCED_SHUTDOWN(mp))
5901 		return -EIO;
5902 
5903 	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL));
5904 
5905 	if (!(ifp->if_flags & XFS_IFEXTENTS)) {
5906 		error = xfs_iread_extents(tp, ip, whichfork);
5907 		if (error)
5908 			return error;
5909 	}
5910 
5911 	if (ifp->if_flags & XFS_IFBROOT) {
5912 		cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5913 		cur->bc_ino.flags = 0;
5914 	}
5915 
5916 	if (*next_fsb == NULLFSBLOCK) {
5917 		xfs_iext_last(ifp, &icur);
5918 		if (!xfs_iext_get_extent(ifp, &icur, &got) ||
5919 		    stop_fsb > got.br_startoff) {
5920 			*done = true;
5921 			goto del_cursor;
5922 		}
5923 	} else {
5924 		if (!xfs_iext_lookup_extent(ip, ifp, *next_fsb, &icur, &got)) {
5925 			*done = true;
5926 			goto del_cursor;
5927 		}
5928 	}
5929 	if (XFS_IS_CORRUPT(mp, isnullstartblock(got.br_startblock))) {
5930 		error = -EFSCORRUPTED;
5931 		goto del_cursor;
5932 	}
5933 
5934 	if (XFS_IS_CORRUPT(mp, stop_fsb > got.br_startoff)) {
5935 		error = -EFSCORRUPTED;
5936 		goto del_cursor;
5937 	}
5938 
5939 	new_startoff = got.br_startoff + offset_shift_fsb;
5940 	if (xfs_iext_peek_next_extent(ifp, &icur, &next)) {
5941 		if (new_startoff + got.br_blockcount > next.br_startoff) {
5942 			error = -EINVAL;
5943 			goto del_cursor;
5944 		}
5945 
5946 		/*
5947 		 * Unlike a left shift (which involves a hole punch), a right
5948 		 * shift does not modify extent neighbors in any way.  We should
5949 		 * never find mergeable extents in this scenario.  Check anyways
5950 		 * and warn if we encounter two extents that could be one.
5951 		 */
5952 		if (xfs_bmse_can_merge(&got, &next, offset_shift_fsb))
5953 			WARN_ON_ONCE(1);
5954 	}
5955 
5956 	error = xfs_bmap_shift_update_extent(tp, ip, whichfork, &icur, &got,
5957 			cur, &logflags, new_startoff);
5958 	if (error)
5959 		goto del_cursor;
5960 
5961 	if (!xfs_iext_prev_extent(ifp, &icur, &got) ||
5962 	    stop_fsb >= got.br_startoff + got.br_blockcount) {
5963 		*done = true;
5964 		goto del_cursor;
5965 	}
5966 
5967 	*next_fsb = got.br_startoff;
5968 del_cursor:
5969 	if (cur)
5970 		xfs_btree_del_cursor(cur, error);
5971 	if (logflags)
5972 		xfs_trans_log_inode(tp, ip, logflags);
5973 	return error;
5974 }
5975 
5976 /*
5977  * Splits an extent into two extents at split_fsb block such that it is the
5978  * first block of the current_ext. @ext is a target extent to be split.
5979  * @split_fsb is a block where the extents is split.  If split_fsb lies in a
5980  * hole or the first block of extents, just return 0.
5981  */
5982 int
5983 xfs_bmap_split_extent(
5984 	struct xfs_trans	*tp,
5985 	struct xfs_inode	*ip,
5986 	xfs_fileoff_t		split_fsb)
5987 {
5988 	int				whichfork = XFS_DATA_FORK;
5989 	struct xfs_ifork		*ifp = XFS_IFORK_PTR(ip, whichfork);
5990 	struct xfs_btree_cur		*cur = NULL;
5991 	struct xfs_bmbt_irec		got;
5992 	struct xfs_bmbt_irec		new; /* split extent */
5993 	struct xfs_mount		*mp = ip->i_mount;
5994 	xfs_fsblock_t			gotblkcnt; /* new block count for got */
5995 	struct xfs_iext_cursor		icur;
5996 	int				error = 0;
5997 	int				logflags = 0;
5998 	int				i = 0;
5999 
6000 	if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
6001 	    XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
6002 		return -EFSCORRUPTED;
6003 	}
6004 
6005 	if (XFS_FORCED_SHUTDOWN(mp))
6006 		return -EIO;
6007 
6008 	if (!(ifp->if_flags & XFS_IFEXTENTS)) {
6009 		/* Read in all the extents */
6010 		error = xfs_iread_extents(tp, ip, whichfork);
6011 		if (error)
6012 			return error;
6013 	}
6014 
6015 	/*
6016 	 * If there are not extents, or split_fsb lies in a hole we are done.
6017 	 */
6018 	if (!xfs_iext_lookup_extent(ip, ifp, split_fsb, &icur, &got) ||
6019 	    got.br_startoff >= split_fsb)
6020 		return 0;
6021 
6022 	gotblkcnt = split_fsb - got.br_startoff;
6023 	new.br_startoff = split_fsb;
6024 	new.br_startblock = got.br_startblock + gotblkcnt;
6025 	new.br_blockcount = got.br_blockcount - gotblkcnt;
6026 	new.br_state = got.br_state;
6027 
6028 	if (ifp->if_flags & XFS_IFBROOT) {
6029 		cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
6030 		cur->bc_ino.flags = 0;
6031 		error = xfs_bmbt_lookup_eq(cur, &got, &i);
6032 		if (error)
6033 			goto del_cursor;
6034 		if (XFS_IS_CORRUPT(mp, i != 1)) {
6035 			error = -EFSCORRUPTED;
6036 			goto del_cursor;
6037 		}
6038 	}
6039 
6040 	got.br_blockcount = gotblkcnt;
6041 	xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), &icur,
6042 			&got);
6043 
6044 	logflags = XFS_ILOG_CORE;
6045 	if (cur) {
6046 		error = xfs_bmbt_update(cur, &got);
6047 		if (error)
6048 			goto del_cursor;
6049 	} else
6050 		logflags |= XFS_ILOG_DEXT;
6051 
6052 	/* Add new extent */
6053 	xfs_iext_next(ifp, &icur);
6054 	xfs_iext_insert(ip, &icur, &new, 0);
6055 	ifp->if_nextents++;
6056 
6057 	if (cur) {
6058 		error = xfs_bmbt_lookup_eq(cur, &new, &i);
6059 		if (error)
6060 			goto del_cursor;
6061 		if (XFS_IS_CORRUPT(mp, i != 0)) {
6062 			error = -EFSCORRUPTED;
6063 			goto del_cursor;
6064 		}
6065 		error = xfs_btree_insert(cur, &i);
6066 		if (error)
6067 			goto del_cursor;
6068 		if (XFS_IS_CORRUPT(mp, i != 1)) {
6069 			error = -EFSCORRUPTED;
6070 			goto del_cursor;
6071 		}
6072 	}
6073 
6074 	/*
6075 	 * Convert to a btree if necessary.
6076 	 */
6077 	if (xfs_bmap_needs_btree(ip, whichfork)) {
6078 		int tmp_logflags; /* partial log flag return val */
6079 
6080 		ASSERT(cur == NULL);
6081 		error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0,
6082 				&tmp_logflags, whichfork);
6083 		logflags |= tmp_logflags;
6084 	}
6085 
6086 del_cursor:
6087 	if (cur) {
6088 		cur->bc_ino.allocated = 0;
6089 		xfs_btree_del_cursor(cur, error);
6090 	}
6091 
6092 	if (logflags)
6093 		xfs_trans_log_inode(tp, ip, logflags);
6094 	return error;
6095 }
6096 
6097 /* Deferred mapping is only for real extents in the data fork. */
6098 static bool
6099 xfs_bmap_is_update_needed(
6100 	struct xfs_bmbt_irec	*bmap)
6101 {
6102 	return  bmap->br_startblock != HOLESTARTBLOCK &&
6103 		bmap->br_startblock != DELAYSTARTBLOCK;
6104 }
6105 
6106 /* Record a bmap intent. */
6107 static int
6108 __xfs_bmap_add(
6109 	struct xfs_trans		*tp,
6110 	enum xfs_bmap_intent_type	type,
6111 	struct xfs_inode		*ip,
6112 	int				whichfork,
6113 	struct xfs_bmbt_irec		*bmap)
6114 {
6115 	struct xfs_bmap_intent		*bi;
6116 
6117 	trace_xfs_bmap_defer(tp->t_mountp,
6118 			XFS_FSB_TO_AGNO(tp->t_mountp, bmap->br_startblock),
6119 			type,
6120 			XFS_FSB_TO_AGBNO(tp->t_mountp, bmap->br_startblock),
6121 			ip->i_ino, whichfork,
6122 			bmap->br_startoff,
6123 			bmap->br_blockcount,
6124 			bmap->br_state);
6125 
6126 	bi = kmem_alloc(sizeof(struct xfs_bmap_intent), KM_NOFS);
6127 	INIT_LIST_HEAD(&bi->bi_list);
6128 	bi->bi_type = type;
6129 	bi->bi_owner = ip;
6130 	bi->bi_whichfork = whichfork;
6131 	bi->bi_bmap = *bmap;
6132 
6133 	xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_BMAP, &bi->bi_list);
6134 	return 0;
6135 }
6136 
6137 /* Map an extent into a file. */
6138 void
6139 xfs_bmap_map_extent(
6140 	struct xfs_trans	*tp,
6141 	struct xfs_inode	*ip,
6142 	struct xfs_bmbt_irec	*PREV)
6143 {
6144 	if (!xfs_bmap_is_update_needed(PREV))
6145 		return;
6146 
6147 	__xfs_bmap_add(tp, XFS_BMAP_MAP, ip, XFS_DATA_FORK, PREV);
6148 }
6149 
6150 /* Unmap an extent out of a file. */
6151 void
6152 xfs_bmap_unmap_extent(
6153 	struct xfs_trans	*tp,
6154 	struct xfs_inode	*ip,
6155 	struct xfs_bmbt_irec	*PREV)
6156 {
6157 	if (!xfs_bmap_is_update_needed(PREV))
6158 		return;
6159 
6160 	__xfs_bmap_add(tp, XFS_BMAP_UNMAP, ip, XFS_DATA_FORK, PREV);
6161 }
6162 
6163 /*
6164  * Process one of the deferred bmap operations.  We pass back the
6165  * btree cursor to maintain our lock on the bmapbt between calls.
6166  */
6167 int
6168 xfs_bmap_finish_one(
6169 	struct xfs_trans		*tp,
6170 	struct xfs_inode		*ip,
6171 	enum xfs_bmap_intent_type	type,
6172 	int				whichfork,
6173 	xfs_fileoff_t			startoff,
6174 	xfs_fsblock_t			startblock,
6175 	xfs_filblks_t			*blockcount,
6176 	xfs_exntst_t			state)
6177 {
6178 	int				error = 0;
6179 
6180 	ASSERT(tp->t_firstblock == NULLFSBLOCK);
6181 
6182 	trace_xfs_bmap_deferred(tp->t_mountp,
6183 			XFS_FSB_TO_AGNO(tp->t_mountp, startblock), type,
6184 			XFS_FSB_TO_AGBNO(tp->t_mountp, startblock),
6185 			ip->i_ino, whichfork, startoff, *blockcount, state);
6186 
6187 	if (WARN_ON_ONCE(whichfork != XFS_DATA_FORK))
6188 		return -EFSCORRUPTED;
6189 
6190 	if (XFS_TEST_ERROR(false, tp->t_mountp,
6191 			XFS_ERRTAG_BMAP_FINISH_ONE))
6192 		return -EIO;
6193 
6194 	switch (type) {
6195 	case XFS_BMAP_MAP:
6196 		error = xfs_bmapi_remap(tp, ip, startoff, *blockcount,
6197 				startblock, 0);
6198 		*blockcount = 0;
6199 		break;
6200 	case XFS_BMAP_UNMAP:
6201 		error = __xfs_bunmapi(tp, ip, startoff, blockcount,
6202 				XFS_BMAPI_REMAP, 1);
6203 		break;
6204 	default:
6205 		ASSERT(0);
6206 		error = -EFSCORRUPTED;
6207 	}
6208 
6209 	return error;
6210 }
6211 
6212 /* Check that an inode's extent does not have invalid flags or bad ranges. */
6213 xfs_failaddr_t
6214 xfs_bmap_validate_extent(
6215 	struct xfs_inode	*ip,
6216 	int			whichfork,
6217 	struct xfs_bmbt_irec	*irec)
6218 {
6219 	struct xfs_mount	*mp = ip->i_mount;
6220 	xfs_fsblock_t		endfsb;
6221 	bool			isrt;
6222 
6223 	isrt = XFS_IS_REALTIME_INODE(ip);
6224 	endfsb = irec->br_startblock + irec->br_blockcount - 1;
6225 	if (isrt) {
6226 		if (!xfs_verify_rtbno(mp, irec->br_startblock))
6227 			return __this_address;
6228 		if (!xfs_verify_rtbno(mp, endfsb))
6229 			return __this_address;
6230 	} else {
6231 		if (!xfs_verify_fsbno(mp, irec->br_startblock))
6232 			return __this_address;
6233 		if (!xfs_verify_fsbno(mp, endfsb))
6234 			return __this_address;
6235 		if (XFS_FSB_TO_AGNO(mp, irec->br_startblock) !=
6236 		    XFS_FSB_TO_AGNO(mp, endfsb))
6237 			return __this_address;
6238 	}
6239 	if (irec->br_state != XFS_EXT_NORM && whichfork != XFS_DATA_FORK)
6240 		return __this_address;
6241 	return NULL;
6242 }
6243