xref: /openbmc/linux/fs/xfs/xfs_bmap_util.c (revision e2f1cf25)
1 /*
2  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3  * Copyright (c) 2012 Red Hat, Inc.
4  * All Rights Reserved.
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it would be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write the Free Software Foundation,
17  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
18  */
19 #include "xfs.h"
20 #include "xfs_fs.h"
21 #include "xfs_shared.h"
22 #include "xfs_format.h"
23 #include "xfs_log_format.h"
24 #include "xfs_trans_resv.h"
25 #include "xfs_bit.h"
26 #include "xfs_mount.h"
27 #include "xfs_da_format.h"
28 #include "xfs_inode.h"
29 #include "xfs_btree.h"
30 #include "xfs_trans.h"
31 #include "xfs_extfree_item.h"
32 #include "xfs_alloc.h"
33 #include "xfs_bmap.h"
34 #include "xfs_bmap_util.h"
35 #include "xfs_bmap_btree.h"
36 #include "xfs_rtalloc.h"
37 #include "xfs_error.h"
38 #include "xfs_quota.h"
39 #include "xfs_trans_space.h"
40 #include "xfs_trace.h"
41 #include "xfs_icache.h"
42 #include "xfs_log.h"
43 
44 /* Kernel only BMAP related definitions and functions */
45 
46 /*
47  * Convert the given file system block to a disk block.  We have to treat it
48  * differently based on whether the file is a real time file or not, because the
49  * bmap code does.
50  */
51 xfs_daddr_t
52 xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb)
53 {
54 	return (XFS_IS_REALTIME_INODE(ip) ? \
55 		 (xfs_daddr_t)XFS_FSB_TO_BB((ip)->i_mount, (fsb)) : \
56 		 XFS_FSB_TO_DADDR((ip)->i_mount, (fsb)));
57 }
58 
59 /*
60  * Routine to be called at transaction's end by xfs_bmapi, xfs_bunmapi
61  * caller.  Frees all the extents that need freeing, which must be done
62  * last due to locking considerations.  We never free any extents in
63  * the first transaction.
64  *
65  * Return 1 if the given transaction was committed and a new one
66  * started, and 0 otherwise in the committed parameter.
67  */
68 int						/* error */
69 xfs_bmap_finish(
70 	xfs_trans_t		**tp,		/* transaction pointer addr */
71 	xfs_bmap_free_t		*flist,		/* i/o: list extents to free */
72 	int			*committed)	/* xact committed or not */
73 {
74 	xfs_efd_log_item_t	*efd;		/* extent free data */
75 	xfs_efi_log_item_t	*efi;		/* extent free intention */
76 	int			error;		/* error return value */
77 	xfs_bmap_free_item_t	*free;		/* free extent item */
78 	xfs_mount_t		*mp;		/* filesystem mount structure */
79 	xfs_bmap_free_item_t	*next;		/* next item on free list */
80 
81 	ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES);
82 	if (flist->xbf_count == 0) {
83 		*committed = 0;
84 		return 0;
85 	}
86 	efi = xfs_trans_get_efi(*tp, flist->xbf_count);
87 	for (free = flist->xbf_first; free; free = free->xbfi_next)
88 		xfs_trans_log_efi_extent(*tp, efi, free->xbfi_startblock,
89 			free->xbfi_blockcount);
90 
91 	error = xfs_trans_roll(tp, NULL);
92 	*committed = 1;
93 	/*
94 	 * We have a new transaction, so we should return committed=1,
95 	 * even though we're returning an error.
96 	 */
97 	if (error)
98 		return error;
99 
100 	efd = xfs_trans_get_efd(*tp, efi, flist->xbf_count);
101 	for (free = flist->xbf_first; free != NULL; free = next) {
102 		next = free->xbfi_next;
103 		if ((error = xfs_free_extent(*tp, free->xbfi_startblock,
104 				free->xbfi_blockcount))) {
105 			/*
106 			 * The bmap free list will be cleaned up at a
107 			 * higher level.  The EFI will be canceled when
108 			 * this transaction is aborted.
109 			 * Need to force shutdown here to make sure it
110 			 * happens, since this transaction may not be
111 			 * dirty yet.
112 			 */
113 			mp = (*tp)->t_mountp;
114 			if (!XFS_FORCED_SHUTDOWN(mp))
115 				xfs_force_shutdown(mp,
116 						   (error == -EFSCORRUPTED) ?
117 						   SHUTDOWN_CORRUPT_INCORE :
118 						   SHUTDOWN_META_IO_ERROR);
119 			return error;
120 		}
121 		xfs_trans_log_efd_extent(*tp, efd, free->xbfi_startblock,
122 			free->xbfi_blockcount);
123 		xfs_bmap_del_free(flist, NULL, free);
124 	}
125 	return 0;
126 }
127 
128 int
129 xfs_bmap_rtalloc(
130 	struct xfs_bmalloca	*ap)	/* bmap alloc argument struct */
131 {
132 	xfs_alloctype_t	atype = 0;	/* type for allocation routines */
133 	int		error;		/* error return value */
134 	xfs_mount_t	*mp;		/* mount point structure */
135 	xfs_extlen_t	prod = 0;	/* product factor for allocators */
136 	xfs_extlen_t	ralen = 0;	/* realtime allocation length */
137 	xfs_extlen_t	align;		/* minimum allocation alignment */
138 	xfs_rtblock_t	rtb;
139 
140 	mp = ap->ip->i_mount;
141 	align = xfs_get_extsz_hint(ap->ip);
142 	prod = align / mp->m_sb.sb_rextsize;
143 	error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
144 					align, 1, ap->eof, 0,
145 					ap->conv, &ap->offset, &ap->length);
146 	if (error)
147 		return error;
148 	ASSERT(ap->length);
149 	ASSERT(ap->length % mp->m_sb.sb_rextsize == 0);
150 
151 	/*
152 	 * If the offset & length are not perfectly aligned
153 	 * then kill prod, it will just get us in trouble.
154 	 */
155 	if (do_mod(ap->offset, align) || ap->length % align)
156 		prod = 1;
157 	/*
158 	 * Set ralen to be the actual requested length in rtextents.
159 	 */
160 	ralen = ap->length / mp->m_sb.sb_rextsize;
161 	/*
162 	 * If the old value was close enough to MAXEXTLEN that
163 	 * we rounded up to it, cut it back so it's valid again.
164 	 * Note that if it's a really large request (bigger than
165 	 * MAXEXTLEN), we don't hear about that number, and can't
166 	 * adjust the starting point to match it.
167 	 */
168 	if (ralen * mp->m_sb.sb_rextsize >= MAXEXTLEN)
169 		ralen = MAXEXTLEN / mp->m_sb.sb_rextsize;
170 
171 	/*
172 	 * Lock out other modifications to the RT bitmap inode.
173 	 */
174 	xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL);
175 	xfs_trans_ijoin(ap->tp, mp->m_rbmip, XFS_ILOCK_EXCL);
176 
177 	/*
178 	 * If it's an allocation to an empty file at offset 0,
179 	 * pick an extent that will space things out in the rt area.
180 	 */
181 	if (ap->eof && ap->offset == 0) {
182 		xfs_rtblock_t uninitialized_var(rtx); /* realtime extent no */
183 
184 		error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx);
185 		if (error)
186 			return error;
187 		ap->blkno = rtx * mp->m_sb.sb_rextsize;
188 	} else {
189 		ap->blkno = 0;
190 	}
191 
192 	xfs_bmap_adjacent(ap);
193 
194 	/*
195 	 * Realtime allocation, done through xfs_rtallocate_extent.
196 	 */
197 	atype = ap->blkno == 0 ?  XFS_ALLOCTYPE_ANY_AG : XFS_ALLOCTYPE_NEAR_BNO;
198 	do_div(ap->blkno, mp->m_sb.sb_rextsize);
199 	rtb = ap->blkno;
200 	ap->length = ralen;
201 	if ((error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1, ap->length,
202 				&ralen, atype, ap->wasdel, prod, &rtb)))
203 		return error;
204 	if (rtb == NULLFSBLOCK && prod > 1 &&
205 	    (error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1,
206 					   ap->length, &ralen, atype,
207 					   ap->wasdel, 1, &rtb)))
208 		return error;
209 	ap->blkno = rtb;
210 	if (ap->blkno != NULLFSBLOCK) {
211 		ap->blkno *= mp->m_sb.sb_rextsize;
212 		ralen *= mp->m_sb.sb_rextsize;
213 		ap->length = ralen;
214 		ap->ip->i_d.di_nblocks += ralen;
215 		xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
216 		if (ap->wasdel)
217 			ap->ip->i_delayed_blks -= ralen;
218 		/*
219 		 * Adjust the disk quota also. This was reserved
220 		 * earlier.
221 		 */
222 		xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
223 			ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT :
224 					XFS_TRANS_DQ_RTBCOUNT, (long) ralen);
225 	} else {
226 		ap->length = 0;
227 	}
228 	return 0;
229 }
230 
231 /*
232  * Check if the endoff is outside the last extent. If so the caller will grow
233  * the allocation to a stripe unit boundary.  All offsets are considered outside
234  * the end of file for an empty fork, so 1 is returned in *eof in that case.
235  */
236 int
237 xfs_bmap_eof(
238 	struct xfs_inode	*ip,
239 	xfs_fileoff_t		endoff,
240 	int			whichfork,
241 	int			*eof)
242 {
243 	struct xfs_bmbt_irec	rec;
244 	int			error;
245 
246 	error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, eof);
247 	if (error || *eof)
248 		return error;
249 
250 	*eof = endoff >= rec.br_startoff + rec.br_blockcount;
251 	return 0;
252 }
253 
254 /*
255  * Extent tree block counting routines.
256  */
257 
258 /*
259  * Count leaf blocks given a range of extent records.
260  */
261 STATIC void
262 xfs_bmap_count_leaves(
263 	xfs_ifork_t		*ifp,
264 	xfs_extnum_t		idx,
265 	int			numrecs,
266 	int			*count)
267 {
268 	int		b;
269 
270 	for (b = 0; b < numrecs; b++) {
271 		xfs_bmbt_rec_host_t *frp = xfs_iext_get_ext(ifp, idx + b);
272 		*count += xfs_bmbt_get_blockcount(frp);
273 	}
274 }
275 
276 /*
277  * Count leaf blocks given a range of extent records originally
278  * in btree format.
279  */
280 STATIC void
281 xfs_bmap_disk_count_leaves(
282 	struct xfs_mount	*mp,
283 	struct xfs_btree_block	*block,
284 	int			numrecs,
285 	int			*count)
286 {
287 	int		b;
288 	xfs_bmbt_rec_t	*frp;
289 
290 	for (b = 1; b <= numrecs; b++) {
291 		frp = XFS_BMBT_REC_ADDR(mp, block, b);
292 		*count += xfs_bmbt_disk_get_blockcount(frp);
293 	}
294 }
295 
296 /*
297  * Recursively walks each level of a btree
298  * to count total fsblocks in use.
299  */
300 STATIC int                                     /* error */
301 xfs_bmap_count_tree(
302 	xfs_mount_t     *mp,            /* file system mount point */
303 	xfs_trans_t     *tp,            /* transaction pointer */
304 	xfs_ifork_t	*ifp,		/* inode fork pointer */
305 	xfs_fsblock_t   blockno,	/* file system block number */
306 	int             levelin,	/* level in btree */
307 	int		*count)		/* Count of blocks */
308 {
309 	int			error;
310 	xfs_buf_t		*bp, *nbp;
311 	int			level = levelin;
312 	__be64			*pp;
313 	xfs_fsblock_t           bno = blockno;
314 	xfs_fsblock_t		nextbno;
315 	struct xfs_btree_block	*block, *nextblock;
316 	int			numrecs;
317 
318 	error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, XFS_BMAP_BTREE_REF,
319 						&xfs_bmbt_buf_ops);
320 	if (error)
321 		return error;
322 	*count += 1;
323 	block = XFS_BUF_TO_BLOCK(bp);
324 
325 	if (--level) {
326 		/* Not at node above leaves, count this level of nodes */
327 		nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
328 		while (nextbno != NULLFSBLOCK) {
329 			error = xfs_btree_read_bufl(mp, tp, nextbno, 0, &nbp,
330 						XFS_BMAP_BTREE_REF,
331 						&xfs_bmbt_buf_ops);
332 			if (error)
333 				return error;
334 			*count += 1;
335 			nextblock = XFS_BUF_TO_BLOCK(nbp);
336 			nextbno = be64_to_cpu(nextblock->bb_u.l.bb_rightsib);
337 			xfs_trans_brelse(tp, nbp);
338 		}
339 
340 		/* Dive to the next level */
341 		pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
342 		bno = be64_to_cpu(*pp);
343 		if (unlikely((error =
344 		     xfs_bmap_count_tree(mp, tp, ifp, bno, level, count)) < 0)) {
345 			xfs_trans_brelse(tp, bp);
346 			XFS_ERROR_REPORT("xfs_bmap_count_tree(1)",
347 					 XFS_ERRLEVEL_LOW, mp);
348 			return -EFSCORRUPTED;
349 		}
350 		xfs_trans_brelse(tp, bp);
351 	} else {
352 		/* count all level 1 nodes and their leaves */
353 		for (;;) {
354 			nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
355 			numrecs = be16_to_cpu(block->bb_numrecs);
356 			xfs_bmap_disk_count_leaves(mp, block, numrecs, count);
357 			xfs_trans_brelse(tp, bp);
358 			if (nextbno == NULLFSBLOCK)
359 				break;
360 			bno = nextbno;
361 			error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
362 						XFS_BMAP_BTREE_REF,
363 						&xfs_bmbt_buf_ops);
364 			if (error)
365 				return error;
366 			*count += 1;
367 			block = XFS_BUF_TO_BLOCK(bp);
368 		}
369 	}
370 	return 0;
371 }
372 
373 /*
374  * Count fsblocks of the given fork.
375  */
376 int						/* error */
377 xfs_bmap_count_blocks(
378 	xfs_trans_t		*tp,		/* transaction pointer */
379 	xfs_inode_t		*ip,		/* incore inode */
380 	int			whichfork,	/* data or attr fork */
381 	int			*count)		/* out: count of blocks */
382 {
383 	struct xfs_btree_block	*block;	/* current btree block */
384 	xfs_fsblock_t		bno;	/* block # of "block" */
385 	xfs_ifork_t		*ifp;	/* fork structure */
386 	int			level;	/* btree level, for checking */
387 	xfs_mount_t		*mp;	/* file system mount structure */
388 	__be64			*pp;	/* pointer to block address */
389 
390 	bno = NULLFSBLOCK;
391 	mp = ip->i_mount;
392 	ifp = XFS_IFORK_PTR(ip, whichfork);
393 	if ( XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ) {
394 		xfs_bmap_count_leaves(ifp, 0,
395 			ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t),
396 			count);
397 		return 0;
398 	}
399 
400 	/*
401 	 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
402 	 */
403 	block = ifp->if_broot;
404 	level = be16_to_cpu(block->bb_level);
405 	ASSERT(level > 0);
406 	pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
407 	bno = be64_to_cpu(*pp);
408 	ASSERT(bno != NULLFSBLOCK);
409 	ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
410 	ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
411 
412 	if (unlikely(xfs_bmap_count_tree(mp, tp, ifp, bno, level, count) < 0)) {
413 		XFS_ERROR_REPORT("xfs_bmap_count_blocks(2)", XFS_ERRLEVEL_LOW,
414 				 mp);
415 		return -EFSCORRUPTED;
416 	}
417 
418 	return 0;
419 }
420 
421 /*
422  * returns 1 for success, 0 if we failed to map the extent.
423  */
424 STATIC int
425 xfs_getbmapx_fix_eof_hole(
426 	xfs_inode_t		*ip,		/* xfs incore inode pointer */
427 	struct getbmapx		*out,		/* output structure */
428 	int			prealloced,	/* this is a file with
429 						 * preallocated data space */
430 	__int64_t		end,		/* last block requested */
431 	xfs_fsblock_t		startblock)
432 {
433 	__int64_t		fixlen;
434 	xfs_mount_t		*mp;		/* file system mount point */
435 	xfs_ifork_t		*ifp;		/* inode fork pointer */
436 	xfs_extnum_t		lastx;		/* last extent pointer */
437 	xfs_fileoff_t		fileblock;
438 
439 	if (startblock == HOLESTARTBLOCK) {
440 		mp = ip->i_mount;
441 		out->bmv_block = -1;
442 		fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, XFS_ISIZE(ip)));
443 		fixlen -= out->bmv_offset;
444 		if (prealloced && out->bmv_offset + out->bmv_length == end) {
445 			/* Came to hole at EOF. Trim it. */
446 			if (fixlen <= 0)
447 				return 0;
448 			out->bmv_length = fixlen;
449 		}
450 	} else {
451 		if (startblock == DELAYSTARTBLOCK)
452 			out->bmv_block = -2;
453 		else
454 			out->bmv_block = xfs_fsb_to_db(ip, startblock);
455 		fileblock = XFS_BB_TO_FSB(ip->i_mount, out->bmv_offset);
456 		ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
457 		if (xfs_iext_bno_to_ext(ifp, fileblock, &lastx) &&
458 		   (lastx == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))-1))
459 			out->bmv_oflags |= BMV_OF_LAST;
460 	}
461 
462 	return 1;
463 }
464 
465 /*
466  * Get inode's extents as described in bmv, and format for output.
467  * Calls formatter to fill the user's buffer until all extents
468  * are mapped, until the passed-in bmv->bmv_count slots have
469  * been filled, or until the formatter short-circuits the loop,
470  * if it is tracking filled-in extents on its own.
471  */
472 int						/* error code */
473 xfs_getbmap(
474 	xfs_inode_t		*ip,
475 	struct getbmapx		*bmv,		/* user bmap structure */
476 	xfs_bmap_format_t	formatter,	/* format to user */
477 	void			*arg)		/* formatter arg */
478 {
479 	__int64_t		bmvend;		/* last block requested */
480 	int			error = 0;	/* return value */
481 	__int64_t		fixlen;		/* length for -1 case */
482 	int			i;		/* extent number */
483 	int			lock;		/* lock state */
484 	xfs_bmbt_irec_t		*map;		/* buffer for user's data */
485 	xfs_mount_t		*mp;		/* file system mount point */
486 	int			nex;		/* # of user extents can do */
487 	int			nexleft;	/* # of user extents left */
488 	int			subnex;		/* # of bmapi's can do */
489 	int			nmap;		/* number of map entries */
490 	struct getbmapx		*out;		/* output structure */
491 	int			whichfork;	/* data or attr fork */
492 	int			prealloced;	/* this is a file with
493 						 * preallocated data space */
494 	int			iflags;		/* interface flags */
495 	int			bmapi_flags;	/* flags for xfs_bmapi */
496 	int			cur_ext = 0;
497 
498 	mp = ip->i_mount;
499 	iflags = bmv->bmv_iflags;
500 	whichfork = iflags & BMV_IF_ATTRFORK ? XFS_ATTR_FORK : XFS_DATA_FORK;
501 
502 	if (whichfork == XFS_ATTR_FORK) {
503 		if (XFS_IFORK_Q(ip)) {
504 			if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS &&
505 			    ip->i_d.di_aformat != XFS_DINODE_FMT_BTREE &&
506 			    ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)
507 				return -EINVAL;
508 		} else if (unlikely(
509 			   ip->i_d.di_aformat != 0 &&
510 			   ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS)) {
511 			XFS_ERROR_REPORT("xfs_getbmap", XFS_ERRLEVEL_LOW,
512 					 ip->i_mount);
513 			return -EFSCORRUPTED;
514 		}
515 
516 		prealloced = 0;
517 		fixlen = 1LL << 32;
518 	} else {
519 		if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS &&
520 		    ip->i_d.di_format != XFS_DINODE_FMT_BTREE &&
521 		    ip->i_d.di_format != XFS_DINODE_FMT_LOCAL)
522 			return -EINVAL;
523 
524 		if (xfs_get_extsz_hint(ip) ||
525 		    ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC|XFS_DIFLAG_APPEND)){
526 			prealloced = 1;
527 			fixlen = mp->m_super->s_maxbytes;
528 		} else {
529 			prealloced = 0;
530 			fixlen = XFS_ISIZE(ip);
531 		}
532 	}
533 
534 	if (bmv->bmv_length == -1) {
535 		fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, fixlen));
536 		bmv->bmv_length =
537 			max_t(__int64_t, fixlen - bmv->bmv_offset, 0);
538 	} else if (bmv->bmv_length == 0) {
539 		bmv->bmv_entries = 0;
540 		return 0;
541 	} else if (bmv->bmv_length < 0) {
542 		return -EINVAL;
543 	}
544 
545 	nex = bmv->bmv_count - 1;
546 	if (nex <= 0)
547 		return -EINVAL;
548 	bmvend = bmv->bmv_offset + bmv->bmv_length;
549 
550 
551 	if (bmv->bmv_count > ULONG_MAX / sizeof(struct getbmapx))
552 		return -ENOMEM;
553 	out = kmem_zalloc_large(bmv->bmv_count * sizeof(struct getbmapx), 0);
554 	if (!out)
555 		return -ENOMEM;
556 
557 	xfs_ilock(ip, XFS_IOLOCK_SHARED);
558 	if (whichfork == XFS_DATA_FORK) {
559 		if (!(iflags & BMV_IF_DELALLOC) &&
560 		    (ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_d.di_size)) {
561 			error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
562 			if (error)
563 				goto out_unlock_iolock;
564 
565 			/*
566 			 * Even after flushing the inode, there can still be
567 			 * delalloc blocks on the inode beyond EOF due to
568 			 * speculative preallocation.  These are not removed
569 			 * until the release function is called or the inode
570 			 * is inactivated.  Hence we cannot assert here that
571 			 * ip->i_delayed_blks == 0.
572 			 */
573 		}
574 
575 		lock = xfs_ilock_data_map_shared(ip);
576 	} else {
577 		lock = xfs_ilock_attr_map_shared(ip);
578 	}
579 
580 	/*
581 	 * Don't let nex be bigger than the number of extents
582 	 * we can have assuming alternating holes and real extents.
583 	 */
584 	if (nex > XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1)
585 		nex = XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1;
586 
587 	bmapi_flags = xfs_bmapi_aflag(whichfork);
588 	if (!(iflags & BMV_IF_PREALLOC))
589 		bmapi_flags |= XFS_BMAPI_IGSTATE;
590 
591 	/*
592 	 * Allocate enough space to handle "subnex" maps at a time.
593 	 */
594 	error = -ENOMEM;
595 	subnex = 16;
596 	map = kmem_alloc(subnex * sizeof(*map), KM_MAYFAIL | KM_NOFS);
597 	if (!map)
598 		goto out_unlock_ilock;
599 
600 	bmv->bmv_entries = 0;
601 
602 	if (XFS_IFORK_NEXTENTS(ip, whichfork) == 0 &&
603 	    (whichfork == XFS_ATTR_FORK || !(iflags & BMV_IF_DELALLOC))) {
604 		error = 0;
605 		goto out_free_map;
606 	}
607 
608 	nexleft = nex;
609 
610 	do {
611 		nmap = (nexleft > subnex) ? subnex : nexleft;
612 		error = xfs_bmapi_read(ip, XFS_BB_TO_FSBT(mp, bmv->bmv_offset),
613 				       XFS_BB_TO_FSB(mp, bmv->bmv_length),
614 				       map, &nmap, bmapi_flags);
615 		if (error)
616 			goto out_free_map;
617 		ASSERT(nmap <= subnex);
618 
619 		for (i = 0; i < nmap && nexleft && bmv->bmv_length; i++) {
620 			out[cur_ext].bmv_oflags = 0;
621 			if (map[i].br_state == XFS_EXT_UNWRITTEN)
622 				out[cur_ext].bmv_oflags |= BMV_OF_PREALLOC;
623 			else if (map[i].br_startblock == DELAYSTARTBLOCK)
624 				out[cur_ext].bmv_oflags |= BMV_OF_DELALLOC;
625 			out[cur_ext].bmv_offset =
626 				XFS_FSB_TO_BB(mp, map[i].br_startoff);
627 			out[cur_ext].bmv_length =
628 				XFS_FSB_TO_BB(mp, map[i].br_blockcount);
629 			out[cur_ext].bmv_unused1 = 0;
630 			out[cur_ext].bmv_unused2 = 0;
631 
632 			/*
633 			 * delayed allocation extents that start beyond EOF can
634 			 * occur due to speculative EOF allocation when the
635 			 * delalloc extent is larger than the largest freespace
636 			 * extent at conversion time. These extents cannot be
637 			 * converted by data writeback, so can exist here even
638 			 * if we are not supposed to be finding delalloc
639 			 * extents.
640 			 */
641 			if (map[i].br_startblock == DELAYSTARTBLOCK &&
642 			    map[i].br_startoff <= XFS_B_TO_FSB(mp, XFS_ISIZE(ip)))
643 				ASSERT((iflags & BMV_IF_DELALLOC) != 0);
644 
645                         if (map[i].br_startblock == HOLESTARTBLOCK &&
646 			    whichfork == XFS_ATTR_FORK) {
647 				/* came to the end of attribute fork */
648 				out[cur_ext].bmv_oflags |= BMV_OF_LAST;
649 				goto out_free_map;
650 			}
651 
652 			if (!xfs_getbmapx_fix_eof_hole(ip, &out[cur_ext],
653 					prealloced, bmvend,
654 					map[i].br_startblock))
655 				goto out_free_map;
656 
657 			bmv->bmv_offset =
658 				out[cur_ext].bmv_offset +
659 				out[cur_ext].bmv_length;
660 			bmv->bmv_length =
661 				max_t(__int64_t, 0, bmvend - bmv->bmv_offset);
662 
663 			/*
664 			 * In case we don't want to return the hole,
665 			 * don't increase cur_ext so that we can reuse
666 			 * it in the next loop.
667 			 */
668 			if ((iflags & BMV_IF_NO_HOLES) &&
669 			    map[i].br_startblock == HOLESTARTBLOCK) {
670 				memset(&out[cur_ext], 0, sizeof(out[cur_ext]));
671 				continue;
672 			}
673 
674 			nexleft--;
675 			bmv->bmv_entries++;
676 			cur_ext++;
677 		}
678 	} while (nmap && nexleft && bmv->bmv_length);
679 
680  out_free_map:
681 	kmem_free(map);
682  out_unlock_ilock:
683 	xfs_iunlock(ip, lock);
684  out_unlock_iolock:
685 	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
686 
687 	for (i = 0; i < cur_ext; i++) {
688 		int full = 0;	/* user array is full */
689 
690 		/* format results & advance arg */
691 		error = formatter(&arg, &out[i], &full);
692 		if (error || full)
693 			break;
694 	}
695 
696 	kmem_free(out);
697 	return error;
698 }
699 
700 /*
701  * dead simple method of punching delalyed allocation blocks from a range in
702  * the inode. Walks a block at a time so will be slow, but is only executed in
703  * rare error cases so the overhead is not critical. This will always punch out
704  * both the start and end blocks, even if the ranges only partially overlap
705  * them, so it is up to the caller to ensure that partial blocks are not
706  * passed in.
707  */
708 int
709 xfs_bmap_punch_delalloc_range(
710 	struct xfs_inode	*ip,
711 	xfs_fileoff_t		start_fsb,
712 	xfs_fileoff_t		length)
713 {
714 	xfs_fileoff_t		remaining = length;
715 	int			error = 0;
716 
717 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
718 
719 	do {
720 		int		done;
721 		xfs_bmbt_irec_t	imap;
722 		int		nimaps = 1;
723 		xfs_fsblock_t	firstblock;
724 		xfs_bmap_free_t flist;
725 
726 		/*
727 		 * Map the range first and check that it is a delalloc extent
728 		 * before trying to unmap the range. Otherwise we will be
729 		 * trying to remove a real extent (which requires a
730 		 * transaction) or a hole, which is probably a bad idea...
731 		 */
732 		error = xfs_bmapi_read(ip, start_fsb, 1, &imap, &nimaps,
733 				       XFS_BMAPI_ENTIRE);
734 
735 		if (error) {
736 			/* something screwed, just bail */
737 			if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
738 				xfs_alert(ip->i_mount,
739 			"Failed delalloc mapping lookup ino %lld fsb %lld.",
740 						ip->i_ino, start_fsb);
741 			}
742 			break;
743 		}
744 		if (!nimaps) {
745 			/* nothing there */
746 			goto next_block;
747 		}
748 		if (imap.br_startblock != DELAYSTARTBLOCK) {
749 			/* been converted, ignore */
750 			goto next_block;
751 		}
752 		WARN_ON(imap.br_blockcount == 0);
753 
754 		/*
755 		 * Note: while we initialise the firstblock/flist pair, they
756 		 * should never be used because blocks should never be
757 		 * allocated or freed for a delalloc extent and hence we need
758 		 * don't cancel or finish them after the xfs_bunmapi() call.
759 		 */
760 		xfs_bmap_init(&flist, &firstblock);
761 		error = xfs_bunmapi(NULL, ip, start_fsb, 1, 0, 1, &firstblock,
762 					&flist, &done);
763 		if (error)
764 			break;
765 
766 		ASSERT(!flist.xbf_count && !flist.xbf_first);
767 next_block:
768 		start_fsb++;
769 		remaining--;
770 	} while(remaining > 0);
771 
772 	return error;
773 }
774 
775 /*
776  * Test whether it is appropriate to check an inode for and free post EOF
777  * blocks. The 'force' parameter determines whether we should also consider
778  * regular files that are marked preallocated or append-only.
779  */
780 bool
781 xfs_can_free_eofblocks(struct xfs_inode *ip, bool force)
782 {
783 	/* prealloc/delalloc exists only on regular files */
784 	if (!S_ISREG(ip->i_d.di_mode))
785 		return false;
786 
787 	/*
788 	 * Zero sized files with no cached pages and delalloc blocks will not
789 	 * have speculative prealloc/delalloc blocks to remove.
790 	 */
791 	if (VFS_I(ip)->i_size == 0 &&
792 	    VFS_I(ip)->i_mapping->nrpages == 0 &&
793 	    ip->i_delayed_blks == 0)
794 		return false;
795 
796 	/* If we haven't read in the extent list, then don't do it now. */
797 	if (!(ip->i_df.if_flags & XFS_IFEXTENTS))
798 		return false;
799 
800 	/*
801 	 * Do not free real preallocated or append-only files unless the file
802 	 * has delalloc blocks and we are forced to remove them.
803 	 */
804 	if (ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND))
805 		if (!force || ip->i_delayed_blks == 0)
806 			return false;
807 
808 	return true;
809 }
810 
811 /*
812  * This is called by xfs_inactive to free any blocks beyond eof
813  * when the link count isn't zero and by xfs_dm_punch_hole() when
814  * punching a hole to EOF.
815  */
816 int
817 xfs_free_eofblocks(
818 	xfs_mount_t	*mp,
819 	xfs_inode_t	*ip,
820 	bool		need_iolock)
821 {
822 	xfs_trans_t	*tp;
823 	int		error;
824 	xfs_fileoff_t	end_fsb;
825 	xfs_fileoff_t	last_fsb;
826 	xfs_filblks_t	map_len;
827 	int		nimaps;
828 	xfs_bmbt_irec_t	imap;
829 
830 	/*
831 	 * Figure out if there are any blocks beyond the end
832 	 * of the file.  If not, then there is nothing to do.
833 	 */
834 	end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_ISIZE(ip));
835 	last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
836 	if (last_fsb <= end_fsb)
837 		return 0;
838 	map_len = last_fsb - end_fsb;
839 
840 	nimaps = 1;
841 	xfs_ilock(ip, XFS_ILOCK_SHARED);
842 	error = xfs_bmapi_read(ip, end_fsb, map_len, &imap, &nimaps, 0);
843 	xfs_iunlock(ip, XFS_ILOCK_SHARED);
844 
845 	if (!error && (nimaps != 0) &&
846 	    (imap.br_startblock != HOLESTARTBLOCK ||
847 	     ip->i_delayed_blks)) {
848 		/*
849 		 * Attach the dquots to the inode up front.
850 		 */
851 		error = xfs_qm_dqattach(ip, 0);
852 		if (error)
853 			return error;
854 
855 		/*
856 		 * There are blocks after the end of file.
857 		 * Free them up now by truncating the file to
858 		 * its current size.
859 		 */
860 		tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
861 
862 		if (need_iolock) {
863 			if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
864 				xfs_trans_cancel(tp);
865 				return -EAGAIN;
866 			}
867 		}
868 
869 		error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0);
870 		if (error) {
871 			ASSERT(XFS_FORCED_SHUTDOWN(mp));
872 			xfs_trans_cancel(tp);
873 			if (need_iolock)
874 				xfs_iunlock(ip, XFS_IOLOCK_EXCL);
875 			return error;
876 		}
877 
878 		xfs_ilock(ip, XFS_ILOCK_EXCL);
879 		xfs_trans_ijoin(tp, ip, 0);
880 
881 		/*
882 		 * Do not update the on-disk file size.  If we update the
883 		 * on-disk file size and then the system crashes before the
884 		 * contents of the file are flushed to disk then the files
885 		 * may be full of holes (ie NULL files bug).
886 		 */
887 		error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK,
888 					      XFS_ISIZE(ip));
889 		if (error) {
890 			/*
891 			 * If we get an error at this point we simply don't
892 			 * bother truncating the file.
893 			 */
894 			xfs_trans_cancel(tp);
895 		} else {
896 			error = xfs_trans_commit(tp);
897 			if (!error)
898 				xfs_inode_clear_eofblocks_tag(ip);
899 		}
900 
901 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
902 		if (need_iolock)
903 			xfs_iunlock(ip, XFS_IOLOCK_EXCL);
904 	}
905 	return error;
906 }
907 
908 int
909 xfs_alloc_file_space(
910 	struct xfs_inode	*ip,
911 	xfs_off_t		offset,
912 	xfs_off_t		len,
913 	int			alloc_type)
914 {
915 	xfs_mount_t		*mp = ip->i_mount;
916 	xfs_off_t		count;
917 	xfs_filblks_t		allocated_fsb;
918 	xfs_filblks_t		allocatesize_fsb;
919 	xfs_extlen_t		extsz, temp;
920 	xfs_fileoff_t		startoffset_fsb;
921 	xfs_fsblock_t		firstfsb;
922 	int			nimaps;
923 	int			quota_flag;
924 	int			rt;
925 	xfs_trans_t		*tp;
926 	xfs_bmbt_irec_t		imaps[1], *imapp;
927 	xfs_bmap_free_t		free_list;
928 	uint			qblocks, resblks, resrtextents;
929 	int			committed;
930 	int			error;
931 
932 	trace_xfs_alloc_file_space(ip);
933 
934 	if (XFS_FORCED_SHUTDOWN(mp))
935 		return -EIO;
936 
937 	error = xfs_qm_dqattach(ip, 0);
938 	if (error)
939 		return error;
940 
941 	if (len <= 0)
942 		return -EINVAL;
943 
944 	rt = XFS_IS_REALTIME_INODE(ip);
945 	extsz = xfs_get_extsz_hint(ip);
946 
947 	count = len;
948 	imapp = &imaps[0];
949 	nimaps = 1;
950 	startoffset_fsb	= XFS_B_TO_FSBT(mp, offset);
951 	allocatesize_fsb = XFS_B_TO_FSB(mp, count);
952 
953 	/*
954 	 * Allocate file space until done or until there is an error
955 	 */
956 	while (allocatesize_fsb && !error) {
957 		xfs_fileoff_t	s, e;
958 
959 		/*
960 		 * Determine space reservations for data/realtime.
961 		 */
962 		if (unlikely(extsz)) {
963 			s = startoffset_fsb;
964 			do_div(s, extsz);
965 			s *= extsz;
966 			e = startoffset_fsb + allocatesize_fsb;
967 			if ((temp = do_mod(startoffset_fsb, extsz)))
968 				e += temp;
969 			if ((temp = do_mod(e, extsz)))
970 				e += extsz - temp;
971 		} else {
972 			s = 0;
973 			e = allocatesize_fsb;
974 		}
975 
976 		/*
977 		 * The transaction reservation is limited to a 32-bit block
978 		 * count, hence we need to limit the number of blocks we are
979 		 * trying to reserve to avoid an overflow. We can't allocate
980 		 * more than @nimaps extents, and an extent is limited on disk
981 		 * to MAXEXTLEN (21 bits), so use that to enforce the limit.
982 		 */
983 		resblks = min_t(xfs_fileoff_t, (e - s), (MAXEXTLEN * nimaps));
984 		if (unlikely(rt)) {
985 			resrtextents = qblocks = resblks;
986 			resrtextents /= mp->m_sb.sb_rextsize;
987 			resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
988 			quota_flag = XFS_QMOPT_RES_RTBLKS;
989 		} else {
990 			resrtextents = 0;
991 			resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resblks);
992 			quota_flag = XFS_QMOPT_RES_REGBLKS;
993 		}
994 
995 		/*
996 		 * Allocate and setup the transaction.
997 		 */
998 		tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
999 		error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write,
1000 					  resblks, resrtextents);
1001 		/*
1002 		 * Check for running out of space
1003 		 */
1004 		if (error) {
1005 			/*
1006 			 * Free the transaction structure.
1007 			 */
1008 			ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp));
1009 			xfs_trans_cancel(tp);
1010 			break;
1011 		}
1012 		xfs_ilock(ip, XFS_ILOCK_EXCL);
1013 		error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks,
1014 						      0, quota_flag);
1015 		if (error)
1016 			goto error1;
1017 
1018 		xfs_trans_ijoin(tp, ip, 0);
1019 
1020 		xfs_bmap_init(&free_list, &firstfsb);
1021 		error = xfs_bmapi_write(tp, ip, startoffset_fsb,
1022 					allocatesize_fsb, alloc_type, &firstfsb,
1023 					0, imapp, &nimaps, &free_list);
1024 		if (error) {
1025 			goto error0;
1026 		}
1027 
1028 		/*
1029 		 * Complete the transaction
1030 		 */
1031 		error = xfs_bmap_finish(&tp, &free_list, &committed);
1032 		if (error) {
1033 			goto error0;
1034 		}
1035 
1036 		error = xfs_trans_commit(tp);
1037 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
1038 		if (error) {
1039 			break;
1040 		}
1041 
1042 		allocated_fsb = imapp->br_blockcount;
1043 
1044 		if (nimaps == 0) {
1045 			error = -ENOSPC;
1046 			break;
1047 		}
1048 
1049 		startoffset_fsb += allocated_fsb;
1050 		allocatesize_fsb -= allocated_fsb;
1051 	}
1052 
1053 	return error;
1054 
1055 error0:	/* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */
1056 	xfs_bmap_cancel(&free_list);
1057 	xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag);
1058 
1059 error1:	/* Just cancel transaction */
1060 	xfs_trans_cancel(tp);
1061 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1062 	return error;
1063 }
1064 
1065 /*
1066  * Zero file bytes between startoff and endoff inclusive.
1067  * The iolock is held exclusive and no blocks are buffered.
1068  *
1069  * This function is used by xfs_free_file_space() to zero
1070  * partial blocks when the range to free is not block aligned.
1071  * When unreserving space with boundaries that are not block
1072  * aligned we round up the start and round down the end
1073  * boundaries and then use this function to zero the parts of
1074  * the blocks that got dropped during the rounding.
1075  */
1076 STATIC int
1077 xfs_zero_remaining_bytes(
1078 	xfs_inode_t		*ip,
1079 	xfs_off_t		startoff,
1080 	xfs_off_t		endoff)
1081 {
1082 	xfs_bmbt_irec_t		imap;
1083 	xfs_fileoff_t		offset_fsb;
1084 	xfs_off_t		lastoffset;
1085 	xfs_off_t		offset;
1086 	xfs_buf_t		*bp;
1087 	xfs_mount_t		*mp = ip->i_mount;
1088 	int			nimap;
1089 	int			error = 0;
1090 
1091 	/*
1092 	 * Avoid doing I/O beyond eof - it's not necessary
1093 	 * since nothing can read beyond eof.  The space will
1094 	 * be zeroed when the file is extended anyway.
1095 	 */
1096 	if (startoff >= XFS_ISIZE(ip))
1097 		return 0;
1098 
1099 	if (endoff > XFS_ISIZE(ip))
1100 		endoff = XFS_ISIZE(ip);
1101 
1102 	for (offset = startoff; offset <= endoff; offset = lastoffset + 1) {
1103 		uint lock_mode;
1104 
1105 		offset_fsb = XFS_B_TO_FSBT(mp, offset);
1106 		nimap = 1;
1107 
1108 		lock_mode = xfs_ilock_data_map_shared(ip);
1109 		error = xfs_bmapi_read(ip, offset_fsb, 1, &imap, &nimap, 0);
1110 		xfs_iunlock(ip, lock_mode);
1111 
1112 		if (error || nimap < 1)
1113 			break;
1114 		ASSERT(imap.br_blockcount >= 1);
1115 		ASSERT(imap.br_startoff == offset_fsb);
1116 		ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
1117 
1118 		if (imap.br_startblock == HOLESTARTBLOCK ||
1119 		    imap.br_state == XFS_EXT_UNWRITTEN) {
1120 			/* skip the entire extent */
1121 			lastoffset = XFS_FSB_TO_B(mp, imap.br_startoff +
1122 						      imap.br_blockcount) - 1;
1123 			continue;
1124 		}
1125 
1126 		lastoffset = XFS_FSB_TO_B(mp, imap.br_startoff + 1) - 1;
1127 		if (lastoffset > endoff)
1128 			lastoffset = endoff;
1129 
1130 		/* DAX can just zero the backing device directly */
1131 		if (IS_DAX(VFS_I(ip))) {
1132 			error = dax_zero_page_range(VFS_I(ip), offset,
1133 						    lastoffset - offset + 1,
1134 						    xfs_get_blocks_direct);
1135 			if (error)
1136 				return error;
1137 			continue;
1138 		}
1139 
1140 		error = xfs_buf_read_uncached(XFS_IS_REALTIME_INODE(ip) ?
1141 				mp->m_rtdev_targp : mp->m_ddev_targp,
1142 				xfs_fsb_to_db(ip, imap.br_startblock),
1143 				BTOBB(mp->m_sb.sb_blocksize),
1144 				0, &bp, NULL);
1145 		if (error)
1146 			return error;
1147 
1148 		memset(bp->b_addr +
1149 				(offset - XFS_FSB_TO_B(mp, imap.br_startoff)),
1150 		       0, lastoffset - offset + 1);
1151 
1152 		error = xfs_bwrite(bp);
1153 		xfs_buf_relse(bp);
1154 		if (error)
1155 			return error;
1156 	}
1157 	return error;
1158 }
1159 
1160 int
1161 xfs_free_file_space(
1162 	struct xfs_inode	*ip,
1163 	xfs_off_t		offset,
1164 	xfs_off_t		len)
1165 {
1166 	int			committed;
1167 	int			done;
1168 	xfs_fileoff_t		endoffset_fsb;
1169 	int			error;
1170 	xfs_fsblock_t		firstfsb;
1171 	xfs_bmap_free_t		free_list;
1172 	xfs_bmbt_irec_t		imap;
1173 	xfs_off_t		ioffset;
1174 	xfs_off_t		iendoffset;
1175 	xfs_extlen_t		mod=0;
1176 	xfs_mount_t		*mp;
1177 	int			nimap;
1178 	uint			resblks;
1179 	xfs_off_t		rounding;
1180 	int			rt;
1181 	xfs_fileoff_t		startoffset_fsb;
1182 	xfs_trans_t		*tp;
1183 
1184 	mp = ip->i_mount;
1185 
1186 	trace_xfs_free_file_space(ip);
1187 
1188 	error = xfs_qm_dqattach(ip, 0);
1189 	if (error)
1190 		return error;
1191 
1192 	error = 0;
1193 	if (len <= 0)	/* if nothing being freed */
1194 		return error;
1195 	rt = XFS_IS_REALTIME_INODE(ip);
1196 	startoffset_fsb	= XFS_B_TO_FSB(mp, offset);
1197 	endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len);
1198 
1199 	/* wait for the completion of any pending DIOs */
1200 	inode_dio_wait(VFS_I(ip));
1201 
1202 	rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE);
1203 	ioffset = round_down(offset, rounding);
1204 	iendoffset = round_up(offset + len, rounding) - 1;
1205 	error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping, ioffset,
1206 					     iendoffset);
1207 	if (error)
1208 		goto out;
1209 	truncate_pagecache_range(VFS_I(ip), ioffset, iendoffset);
1210 
1211 	/*
1212 	 * Need to zero the stuff we're not freeing, on disk.
1213 	 * If it's a realtime file & can't use unwritten extents then we
1214 	 * actually need to zero the extent edges.  Otherwise xfs_bunmapi
1215 	 * will take care of it for us.
1216 	 */
1217 	if (rt && !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
1218 		nimap = 1;
1219 		error = xfs_bmapi_read(ip, startoffset_fsb, 1,
1220 					&imap, &nimap, 0);
1221 		if (error)
1222 			goto out;
1223 		ASSERT(nimap == 0 || nimap == 1);
1224 		if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
1225 			xfs_daddr_t	block;
1226 
1227 			ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
1228 			block = imap.br_startblock;
1229 			mod = do_div(block, mp->m_sb.sb_rextsize);
1230 			if (mod)
1231 				startoffset_fsb += mp->m_sb.sb_rextsize - mod;
1232 		}
1233 		nimap = 1;
1234 		error = xfs_bmapi_read(ip, endoffset_fsb - 1, 1,
1235 					&imap, &nimap, 0);
1236 		if (error)
1237 			goto out;
1238 		ASSERT(nimap == 0 || nimap == 1);
1239 		if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
1240 			ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
1241 			mod++;
1242 			if (mod && (mod != mp->m_sb.sb_rextsize))
1243 				endoffset_fsb -= mod;
1244 		}
1245 	}
1246 	if ((done = (endoffset_fsb <= startoffset_fsb)))
1247 		/*
1248 		 * One contiguous piece to clear
1249 		 */
1250 		error = xfs_zero_remaining_bytes(ip, offset, offset + len - 1);
1251 	else {
1252 		/*
1253 		 * Some full blocks, possibly two pieces to clear
1254 		 */
1255 		if (offset < XFS_FSB_TO_B(mp, startoffset_fsb))
1256 			error = xfs_zero_remaining_bytes(ip, offset,
1257 				XFS_FSB_TO_B(mp, startoffset_fsb) - 1);
1258 		if (!error &&
1259 		    XFS_FSB_TO_B(mp, endoffset_fsb) < offset + len)
1260 			error = xfs_zero_remaining_bytes(ip,
1261 				XFS_FSB_TO_B(mp, endoffset_fsb),
1262 				offset + len - 1);
1263 	}
1264 
1265 	/*
1266 	 * free file space until done or until there is an error
1267 	 */
1268 	resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
1269 	while (!error && !done) {
1270 
1271 		/*
1272 		 * allocate and setup the transaction. Allow this
1273 		 * transaction to dip into the reserve blocks to ensure
1274 		 * the freeing of the space succeeds at ENOSPC.
1275 		 */
1276 		tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
1277 		error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write, resblks, 0);
1278 
1279 		/*
1280 		 * check for running out of space
1281 		 */
1282 		if (error) {
1283 			/*
1284 			 * Free the transaction structure.
1285 			 */
1286 			ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp));
1287 			xfs_trans_cancel(tp);
1288 			break;
1289 		}
1290 		xfs_ilock(ip, XFS_ILOCK_EXCL);
1291 		error = xfs_trans_reserve_quota(tp, mp,
1292 				ip->i_udquot, ip->i_gdquot, ip->i_pdquot,
1293 				resblks, 0, XFS_QMOPT_RES_REGBLKS);
1294 		if (error)
1295 			goto error1;
1296 
1297 		xfs_trans_ijoin(tp, ip, 0);
1298 
1299 		/*
1300 		 * issue the bunmapi() call to free the blocks
1301 		 */
1302 		xfs_bmap_init(&free_list, &firstfsb);
1303 		error = xfs_bunmapi(tp, ip, startoffset_fsb,
1304 				  endoffset_fsb - startoffset_fsb,
1305 				  0, 2, &firstfsb, &free_list, &done);
1306 		if (error) {
1307 			goto error0;
1308 		}
1309 
1310 		/*
1311 		 * complete the transaction
1312 		 */
1313 		error = xfs_bmap_finish(&tp, &free_list, &committed);
1314 		if (error) {
1315 			goto error0;
1316 		}
1317 
1318 		error = xfs_trans_commit(tp);
1319 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
1320 	}
1321 
1322  out:
1323 	return error;
1324 
1325  error0:
1326 	xfs_bmap_cancel(&free_list);
1327  error1:
1328 	xfs_trans_cancel(tp);
1329 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1330 	goto out;
1331 }
1332 
1333 /*
1334  * Preallocate and zero a range of a file. This mechanism has the allocation
1335  * semantics of fallocate and in addition converts data in the range to zeroes.
1336  */
1337 int
1338 xfs_zero_file_space(
1339 	struct xfs_inode	*ip,
1340 	xfs_off_t		offset,
1341 	xfs_off_t		len)
1342 {
1343 	struct xfs_mount	*mp = ip->i_mount;
1344 	uint			blksize;
1345 	int			error;
1346 
1347 	trace_xfs_zero_file_space(ip);
1348 
1349 	blksize = 1 << mp->m_sb.sb_blocklog;
1350 
1351 	/*
1352 	 * Punch a hole and prealloc the range. We use hole punch rather than
1353 	 * unwritten extent conversion for two reasons:
1354 	 *
1355 	 * 1.) Hole punch handles partial block zeroing for us.
1356 	 *
1357 	 * 2.) If prealloc returns ENOSPC, the file range is still zero-valued
1358 	 * by virtue of the hole punch.
1359 	 */
1360 	error = xfs_free_file_space(ip, offset, len);
1361 	if (error)
1362 		goto out;
1363 
1364 	error = xfs_alloc_file_space(ip, round_down(offset, blksize),
1365 				     round_up(offset + len, blksize) -
1366 				     round_down(offset, blksize),
1367 				     XFS_BMAPI_PREALLOC);
1368 out:
1369 	return error;
1370 
1371 }
1372 
1373 /*
1374  * @next_fsb will keep track of the extent currently undergoing shift.
1375  * @stop_fsb will keep track of the extent at which we have to stop.
1376  * If we are shifting left, we will start with block (offset + len) and
1377  * shift each extent till last extent.
1378  * If we are shifting right, we will start with last extent inside file space
1379  * and continue until we reach the block corresponding to offset.
1380  */
1381 static int
1382 xfs_shift_file_space(
1383 	struct xfs_inode        *ip,
1384 	xfs_off_t               offset,
1385 	xfs_off_t               len,
1386 	enum shift_direction	direction)
1387 {
1388 	int			done = 0;
1389 	struct xfs_mount	*mp = ip->i_mount;
1390 	struct xfs_trans	*tp;
1391 	int			error;
1392 	struct xfs_bmap_free	free_list;
1393 	xfs_fsblock_t		first_block;
1394 	int			committed;
1395 	xfs_fileoff_t		stop_fsb;
1396 	xfs_fileoff_t		next_fsb;
1397 	xfs_fileoff_t		shift_fsb;
1398 
1399 	ASSERT(direction == SHIFT_LEFT || direction == SHIFT_RIGHT);
1400 
1401 	if (direction == SHIFT_LEFT) {
1402 		next_fsb = XFS_B_TO_FSB(mp, offset + len);
1403 		stop_fsb = XFS_B_TO_FSB(mp, VFS_I(ip)->i_size);
1404 	} else {
1405 		/*
1406 		 * If right shift, delegate the work of initialization of
1407 		 * next_fsb to xfs_bmap_shift_extent as it has ilock held.
1408 		 */
1409 		next_fsb = NULLFSBLOCK;
1410 		stop_fsb = XFS_B_TO_FSB(mp, offset);
1411 	}
1412 
1413 	shift_fsb = XFS_B_TO_FSB(mp, len);
1414 
1415 	/*
1416 	 * Trim eofblocks to avoid shifting uninitialized post-eof preallocation
1417 	 * into the accessible region of the file.
1418 	 */
1419 	if (xfs_can_free_eofblocks(ip, true)) {
1420 		error = xfs_free_eofblocks(mp, ip, false);
1421 		if (error)
1422 			return error;
1423 	}
1424 
1425 	/*
1426 	 * Writeback and invalidate cache for the remainder of the file as we're
1427 	 * about to shift down every extent from offset to EOF.
1428 	 */
1429 	error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
1430 					     offset, -1);
1431 	if (error)
1432 		return error;
1433 	error = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping,
1434 					offset >> PAGE_CACHE_SHIFT, -1);
1435 	if (error)
1436 		return error;
1437 
1438 	/*
1439 	 * The extent shiting code works on extent granularity. So, if
1440 	 * stop_fsb is not the starting block of extent, we need to split
1441 	 * the extent at stop_fsb.
1442 	 */
1443 	if (direction == SHIFT_RIGHT) {
1444 		error = xfs_bmap_split_extent(ip, stop_fsb);
1445 		if (error)
1446 			return error;
1447 	}
1448 
1449 	while (!error && !done) {
1450 		tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
1451 		/*
1452 		 * We would need to reserve permanent block for transaction.
1453 		 * This will come into picture when after shifting extent into
1454 		 * hole we found that adjacent extents can be merged which
1455 		 * may lead to freeing of a block during record update.
1456 		 */
1457 		error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write,
1458 				XFS_DIOSTRAT_SPACE_RES(mp, 0), 0);
1459 		if (error) {
1460 			xfs_trans_cancel(tp);
1461 			break;
1462 		}
1463 
1464 		xfs_ilock(ip, XFS_ILOCK_EXCL);
1465 		error = xfs_trans_reserve_quota(tp, mp, ip->i_udquot,
1466 				ip->i_gdquot, ip->i_pdquot,
1467 				XFS_DIOSTRAT_SPACE_RES(mp, 0), 0,
1468 				XFS_QMOPT_RES_REGBLKS);
1469 		if (error)
1470 			goto out;
1471 
1472 		xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
1473 
1474 		xfs_bmap_init(&free_list, &first_block);
1475 
1476 		/*
1477 		 * We are using the write transaction in which max 2 bmbt
1478 		 * updates are allowed
1479 		 */
1480 		error = xfs_bmap_shift_extents(tp, ip, &next_fsb, shift_fsb,
1481 				&done, stop_fsb, &first_block, &free_list,
1482 				direction, XFS_BMAP_MAX_SHIFT_EXTENTS);
1483 		if (error)
1484 			goto out;
1485 
1486 		error = xfs_bmap_finish(&tp, &free_list, &committed);
1487 		if (error)
1488 			goto out;
1489 
1490 		error = xfs_trans_commit(tp);
1491 	}
1492 
1493 	return error;
1494 
1495 out:
1496 	xfs_trans_cancel(tp);
1497 	return error;
1498 }
1499 
1500 /*
1501  * xfs_collapse_file_space()
1502  *	This routine frees disk space and shift extent for the given file.
1503  *	The first thing we do is to free data blocks in the specified range
1504  *	by calling xfs_free_file_space(). It would also sync dirty data
1505  *	and invalidate page cache over the region on which collapse range
1506  *	is working. And Shift extent records to the left to cover a hole.
1507  * RETURNS:
1508  *	0 on success
1509  *	errno on error
1510  *
1511  */
1512 int
1513 xfs_collapse_file_space(
1514 	struct xfs_inode	*ip,
1515 	xfs_off_t		offset,
1516 	xfs_off_t		len)
1517 {
1518 	int error;
1519 
1520 	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1521 	trace_xfs_collapse_file_space(ip);
1522 
1523 	error = xfs_free_file_space(ip, offset, len);
1524 	if (error)
1525 		return error;
1526 
1527 	return xfs_shift_file_space(ip, offset, len, SHIFT_LEFT);
1528 }
1529 
1530 /*
1531  * xfs_insert_file_space()
1532  *	This routine create hole space by shifting extents for the given file.
1533  *	The first thing we do is to sync dirty data and invalidate page cache
1534  *	over the region on which insert range is working. And split an extent
1535  *	to two extents at given offset by calling xfs_bmap_split_extent.
1536  *	And shift all extent records which are laying between [offset,
1537  *	last allocated extent] to the right to reserve hole range.
1538  * RETURNS:
1539  *	0 on success
1540  *	errno on error
1541  */
1542 int
1543 xfs_insert_file_space(
1544 	struct xfs_inode	*ip,
1545 	loff_t			offset,
1546 	loff_t			len)
1547 {
1548 	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1549 	trace_xfs_insert_file_space(ip);
1550 
1551 	return xfs_shift_file_space(ip, offset, len, SHIFT_RIGHT);
1552 }
1553 
1554 /*
1555  * We need to check that the format of the data fork in the temporary inode is
1556  * valid for the target inode before doing the swap. This is not a problem with
1557  * attr1 because of the fixed fork offset, but attr2 has a dynamically sized
1558  * data fork depending on the space the attribute fork is taking so we can get
1559  * invalid formats on the target inode.
1560  *
1561  * E.g. target has space for 7 extents in extent format, temp inode only has
1562  * space for 6.  If we defragment down to 7 extents, then the tmp format is a
1563  * btree, but when swapped it needs to be in extent format. Hence we can't just
1564  * blindly swap data forks on attr2 filesystems.
1565  *
1566  * Note that we check the swap in both directions so that we don't end up with
1567  * a corrupt temporary inode, either.
1568  *
1569  * Note that fixing the way xfs_fsr sets up the attribute fork in the source
1570  * inode will prevent this situation from occurring, so all we do here is
1571  * reject and log the attempt. basically we are putting the responsibility on
1572  * userspace to get this right.
1573  */
1574 static int
1575 xfs_swap_extents_check_format(
1576 	xfs_inode_t	*ip,	/* target inode */
1577 	xfs_inode_t	*tip)	/* tmp inode */
1578 {
1579 
1580 	/* Should never get a local format */
1581 	if (ip->i_d.di_format == XFS_DINODE_FMT_LOCAL ||
1582 	    tip->i_d.di_format == XFS_DINODE_FMT_LOCAL)
1583 		return -EINVAL;
1584 
1585 	/*
1586 	 * if the target inode has less extents that then temporary inode then
1587 	 * why did userspace call us?
1588 	 */
1589 	if (ip->i_d.di_nextents < tip->i_d.di_nextents)
1590 		return -EINVAL;
1591 
1592 	/*
1593 	 * if the target inode is in extent form and the temp inode is in btree
1594 	 * form then we will end up with the target inode in the wrong format
1595 	 * as we already know there are less extents in the temp inode.
1596 	 */
1597 	if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1598 	    tip->i_d.di_format == XFS_DINODE_FMT_BTREE)
1599 		return -EINVAL;
1600 
1601 	/* Check temp in extent form to max in target */
1602 	if (tip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1603 	    XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) >
1604 			XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
1605 		return -EINVAL;
1606 
1607 	/* Check target in extent form to max in temp */
1608 	if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1609 	    XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) >
1610 			XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
1611 		return -EINVAL;
1612 
1613 	/*
1614 	 * If we are in a btree format, check that the temp root block will fit
1615 	 * in the target and that it has enough extents to be in btree format
1616 	 * in the target.
1617 	 *
1618 	 * Note that we have to be careful to allow btree->extent conversions
1619 	 * (a common defrag case) which will occur when the temp inode is in
1620 	 * extent format...
1621 	 */
1622 	if (tip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
1623 		if (XFS_IFORK_BOFF(ip) &&
1624 		    XFS_BMAP_BMDR_SPACE(tip->i_df.if_broot) > XFS_IFORK_BOFF(ip))
1625 			return -EINVAL;
1626 		if (XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) <=
1627 		    XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
1628 			return -EINVAL;
1629 	}
1630 
1631 	/* Reciprocal target->temp btree format checks */
1632 	if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
1633 		if (XFS_IFORK_BOFF(tip) &&
1634 		    XFS_BMAP_BMDR_SPACE(ip->i_df.if_broot) > XFS_IFORK_BOFF(tip))
1635 			return -EINVAL;
1636 		if (XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) <=
1637 		    XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
1638 			return -EINVAL;
1639 	}
1640 
1641 	return 0;
1642 }
1643 
1644 static int
1645 xfs_swap_extent_flush(
1646 	struct xfs_inode	*ip)
1647 {
1648 	int	error;
1649 
1650 	error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
1651 	if (error)
1652 		return error;
1653 	truncate_pagecache_range(VFS_I(ip), 0, -1);
1654 
1655 	/* Verify O_DIRECT for ftmp */
1656 	if (VFS_I(ip)->i_mapping->nrpages)
1657 		return -EINVAL;
1658 	return 0;
1659 }
1660 
1661 int
1662 xfs_swap_extents(
1663 	xfs_inode_t	*ip,	/* target inode */
1664 	xfs_inode_t	*tip,	/* tmp inode */
1665 	xfs_swapext_t	*sxp)
1666 {
1667 	xfs_mount_t	*mp = ip->i_mount;
1668 	xfs_trans_t	*tp;
1669 	xfs_bstat_t	*sbp = &sxp->sx_stat;
1670 	xfs_ifork_t	*tempifp, *ifp, *tifp;
1671 	int		src_log_flags, target_log_flags;
1672 	int		error = 0;
1673 	int		aforkblks = 0;
1674 	int		taforkblks = 0;
1675 	__uint64_t	tmp;
1676 	int		lock_flags;
1677 
1678 	tempifp = kmem_alloc(sizeof(xfs_ifork_t), KM_MAYFAIL);
1679 	if (!tempifp) {
1680 		error = -ENOMEM;
1681 		goto out;
1682 	}
1683 
1684 	/*
1685 	 * Lock the inodes against other IO, page faults and truncate to
1686 	 * begin with.  Then we can ensure the inodes are flushed and have no
1687 	 * page cache safely. Once we have done this we can take the ilocks and
1688 	 * do the rest of the checks.
1689 	 */
1690 	lock_flags = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
1691 	xfs_lock_two_inodes(ip, tip, XFS_IOLOCK_EXCL);
1692 	xfs_lock_two_inodes(ip, tip, XFS_MMAPLOCK_EXCL);
1693 
1694 	/* Verify that both files have the same format */
1695 	if ((ip->i_d.di_mode & S_IFMT) != (tip->i_d.di_mode & S_IFMT)) {
1696 		error = -EINVAL;
1697 		goto out_unlock;
1698 	}
1699 
1700 	/* Verify both files are either real-time or non-realtime */
1701 	if (XFS_IS_REALTIME_INODE(ip) != XFS_IS_REALTIME_INODE(tip)) {
1702 		error = -EINVAL;
1703 		goto out_unlock;
1704 	}
1705 
1706 	error = xfs_swap_extent_flush(ip);
1707 	if (error)
1708 		goto out_unlock;
1709 	error = xfs_swap_extent_flush(tip);
1710 	if (error)
1711 		goto out_unlock;
1712 
1713 	tp = xfs_trans_alloc(mp, XFS_TRANS_SWAPEXT);
1714 	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_ichange, 0, 0);
1715 	if (error) {
1716 		xfs_trans_cancel(tp);
1717 		goto out_unlock;
1718 	}
1719 
1720 	/*
1721 	 * Lock and join the inodes to the tansaction so that transaction commit
1722 	 * or cancel will unlock the inodes from this point onwards.
1723 	 */
1724 	xfs_lock_two_inodes(ip, tip, XFS_ILOCK_EXCL);
1725 	lock_flags |= XFS_ILOCK_EXCL;
1726 	xfs_trans_ijoin(tp, ip, lock_flags);
1727 	xfs_trans_ijoin(tp, tip, lock_flags);
1728 
1729 
1730 	/* Verify all data are being swapped */
1731 	if (sxp->sx_offset != 0 ||
1732 	    sxp->sx_length != ip->i_d.di_size ||
1733 	    sxp->sx_length != tip->i_d.di_size) {
1734 		error = -EFAULT;
1735 		goto out_trans_cancel;
1736 	}
1737 
1738 	trace_xfs_swap_extent_before(ip, 0);
1739 	trace_xfs_swap_extent_before(tip, 1);
1740 
1741 	/* check inode formats now that data is flushed */
1742 	error = xfs_swap_extents_check_format(ip, tip);
1743 	if (error) {
1744 		xfs_notice(mp,
1745 		    "%s: inode 0x%llx format is incompatible for exchanging.",
1746 				__func__, ip->i_ino);
1747 		goto out_trans_cancel;
1748 	}
1749 
1750 	/*
1751 	 * Compare the current change & modify times with that
1752 	 * passed in.  If they differ, we abort this swap.
1753 	 * This is the mechanism used to ensure the calling
1754 	 * process that the file was not changed out from
1755 	 * under it.
1756 	 */
1757 	if ((sbp->bs_ctime.tv_sec != VFS_I(ip)->i_ctime.tv_sec) ||
1758 	    (sbp->bs_ctime.tv_nsec != VFS_I(ip)->i_ctime.tv_nsec) ||
1759 	    (sbp->bs_mtime.tv_sec != VFS_I(ip)->i_mtime.tv_sec) ||
1760 	    (sbp->bs_mtime.tv_nsec != VFS_I(ip)->i_mtime.tv_nsec)) {
1761 		error = -EBUSY;
1762 		goto out_trans_cancel;
1763 	}
1764 	/*
1765 	 * Count the number of extended attribute blocks
1766 	 */
1767 	if ( ((XFS_IFORK_Q(ip) != 0) && (ip->i_d.di_anextents > 0)) &&
1768 	     (ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
1769 		error = xfs_bmap_count_blocks(tp, ip, XFS_ATTR_FORK, &aforkblks);
1770 		if (error)
1771 			goto out_trans_cancel;
1772 	}
1773 	if ( ((XFS_IFORK_Q(tip) != 0) && (tip->i_d.di_anextents > 0)) &&
1774 	     (tip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
1775 		error = xfs_bmap_count_blocks(tp, tip, XFS_ATTR_FORK,
1776 			&taforkblks);
1777 		if (error)
1778 			goto out_trans_cancel;
1779 	}
1780 
1781 	/*
1782 	 * Before we've swapped the forks, lets set the owners of the forks
1783 	 * appropriately. We have to do this as we are demand paging the btree
1784 	 * buffers, and so the validation done on read will expect the owner
1785 	 * field to be correctly set. Once we change the owners, we can swap the
1786 	 * inode forks.
1787 	 *
1788 	 * Note the trickiness in setting the log flags - we set the owner log
1789 	 * flag on the opposite inode (i.e. the inode we are setting the new
1790 	 * owner to be) because once we swap the forks and log that, log
1791 	 * recovery is going to see the fork as owned by the swapped inode,
1792 	 * not the pre-swapped inodes.
1793 	 */
1794 	src_log_flags = XFS_ILOG_CORE;
1795 	target_log_flags = XFS_ILOG_CORE;
1796 	if (ip->i_d.di_version == 3 &&
1797 	    ip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
1798 		target_log_flags |= XFS_ILOG_DOWNER;
1799 		error = xfs_bmbt_change_owner(tp, ip, XFS_DATA_FORK,
1800 					      tip->i_ino, NULL);
1801 		if (error)
1802 			goto out_trans_cancel;
1803 	}
1804 
1805 	if (tip->i_d.di_version == 3 &&
1806 	    tip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
1807 		src_log_flags |= XFS_ILOG_DOWNER;
1808 		error = xfs_bmbt_change_owner(tp, tip, XFS_DATA_FORK,
1809 					      ip->i_ino, NULL);
1810 		if (error)
1811 			goto out_trans_cancel;
1812 	}
1813 
1814 	/*
1815 	 * Swap the data forks of the inodes
1816 	 */
1817 	ifp = &ip->i_df;
1818 	tifp = &tip->i_df;
1819 	*tempifp = *ifp;	/* struct copy */
1820 	*ifp = *tifp;		/* struct copy */
1821 	*tifp = *tempifp;	/* struct copy */
1822 
1823 	/*
1824 	 * Fix the on-disk inode values
1825 	 */
1826 	tmp = (__uint64_t)ip->i_d.di_nblocks;
1827 	ip->i_d.di_nblocks = tip->i_d.di_nblocks - taforkblks + aforkblks;
1828 	tip->i_d.di_nblocks = tmp + taforkblks - aforkblks;
1829 
1830 	tmp = (__uint64_t) ip->i_d.di_nextents;
1831 	ip->i_d.di_nextents = tip->i_d.di_nextents;
1832 	tip->i_d.di_nextents = tmp;
1833 
1834 	tmp = (__uint64_t) ip->i_d.di_format;
1835 	ip->i_d.di_format = tip->i_d.di_format;
1836 	tip->i_d.di_format = tmp;
1837 
1838 	/*
1839 	 * The extents in the source inode could still contain speculative
1840 	 * preallocation beyond EOF (e.g. the file is open but not modified
1841 	 * while defrag is in progress). In that case, we need to copy over the
1842 	 * number of delalloc blocks the data fork in the source inode is
1843 	 * tracking beyond EOF so that when the fork is truncated away when the
1844 	 * temporary inode is unlinked we don't underrun the i_delayed_blks
1845 	 * counter on that inode.
1846 	 */
1847 	ASSERT(tip->i_delayed_blks == 0);
1848 	tip->i_delayed_blks = ip->i_delayed_blks;
1849 	ip->i_delayed_blks = 0;
1850 
1851 	switch (ip->i_d.di_format) {
1852 	case XFS_DINODE_FMT_EXTENTS:
1853 		/* If the extents fit in the inode, fix the
1854 		 * pointer.  Otherwise it's already NULL or
1855 		 * pointing to the extent.
1856 		 */
1857 		if (ip->i_d.di_nextents <= XFS_INLINE_EXTS) {
1858 			ifp->if_u1.if_extents =
1859 				ifp->if_u2.if_inline_ext;
1860 		}
1861 		src_log_flags |= XFS_ILOG_DEXT;
1862 		break;
1863 	case XFS_DINODE_FMT_BTREE:
1864 		ASSERT(ip->i_d.di_version < 3 ||
1865 		       (src_log_flags & XFS_ILOG_DOWNER));
1866 		src_log_flags |= XFS_ILOG_DBROOT;
1867 		break;
1868 	}
1869 
1870 	switch (tip->i_d.di_format) {
1871 	case XFS_DINODE_FMT_EXTENTS:
1872 		/* If the extents fit in the inode, fix the
1873 		 * pointer.  Otherwise it's already NULL or
1874 		 * pointing to the extent.
1875 		 */
1876 		if (tip->i_d.di_nextents <= XFS_INLINE_EXTS) {
1877 			tifp->if_u1.if_extents =
1878 				tifp->if_u2.if_inline_ext;
1879 		}
1880 		target_log_flags |= XFS_ILOG_DEXT;
1881 		break;
1882 	case XFS_DINODE_FMT_BTREE:
1883 		target_log_flags |= XFS_ILOG_DBROOT;
1884 		ASSERT(tip->i_d.di_version < 3 ||
1885 		       (target_log_flags & XFS_ILOG_DOWNER));
1886 		break;
1887 	}
1888 
1889 	xfs_trans_log_inode(tp, ip,  src_log_flags);
1890 	xfs_trans_log_inode(tp, tip, target_log_flags);
1891 
1892 	/*
1893 	 * If this is a synchronous mount, make sure that the
1894 	 * transaction goes to disk before returning to the user.
1895 	 */
1896 	if (mp->m_flags & XFS_MOUNT_WSYNC)
1897 		xfs_trans_set_sync(tp);
1898 
1899 	error = xfs_trans_commit(tp);
1900 
1901 	trace_xfs_swap_extent_after(ip, 0);
1902 	trace_xfs_swap_extent_after(tip, 1);
1903 out:
1904 	kmem_free(tempifp);
1905 	return error;
1906 
1907 out_unlock:
1908 	xfs_iunlock(ip, lock_flags);
1909 	xfs_iunlock(tip, lock_flags);
1910 	goto out;
1911 
1912 out_trans_cancel:
1913 	xfs_trans_cancel(tp);
1914 	goto out;
1915 }
1916