xref: /openbmc/linux/fs/xfs/xfs_bmap_util.c (revision d236d361)
1 /*
2  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3  * Copyright (c) 2012 Red Hat, Inc.
4  * All Rights Reserved.
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it would be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write the Free Software Foundation,
17  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
18  */
19 #include "xfs.h"
20 #include "xfs_fs.h"
21 #include "xfs_shared.h"
22 #include "xfs_format.h"
23 #include "xfs_log_format.h"
24 #include "xfs_trans_resv.h"
25 #include "xfs_bit.h"
26 #include "xfs_mount.h"
27 #include "xfs_da_format.h"
28 #include "xfs_defer.h"
29 #include "xfs_inode.h"
30 #include "xfs_btree.h"
31 #include "xfs_trans.h"
32 #include "xfs_extfree_item.h"
33 #include "xfs_alloc.h"
34 #include "xfs_bmap.h"
35 #include "xfs_bmap_util.h"
36 #include "xfs_bmap_btree.h"
37 #include "xfs_rtalloc.h"
38 #include "xfs_error.h"
39 #include "xfs_quota.h"
40 #include "xfs_trans_space.h"
41 #include "xfs_trace.h"
42 #include "xfs_icache.h"
43 #include "xfs_log.h"
44 #include "xfs_rmap_btree.h"
45 #include "xfs_iomap.h"
46 #include "xfs_reflink.h"
47 #include "xfs_refcount.h"
48 
49 /* Kernel only BMAP related definitions and functions */
50 
51 /*
52  * Convert the given file system block to a disk block.  We have to treat it
53  * differently based on whether the file is a real time file or not, because the
54  * bmap code does.
55  */
56 xfs_daddr_t
57 xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb)
58 {
59 	return (XFS_IS_REALTIME_INODE(ip) ? \
60 		 (xfs_daddr_t)XFS_FSB_TO_BB((ip)->i_mount, (fsb)) : \
61 		 XFS_FSB_TO_DADDR((ip)->i_mount, (fsb)));
62 }
63 
64 /*
65  * Routine to zero an extent on disk allocated to the specific inode.
66  *
67  * The VFS functions take a linearised filesystem block offset, so we have to
68  * convert the sparse xfs fsb to the right format first.
69  * VFS types are real funky, too.
70  */
71 int
72 xfs_zero_extent(
73 	struct xfs_inode *ip,
74 	xfs_fsblock_t	start_fsb,
75 	xfs_off_t	count_fsb)
76 {
77 	struct xfs_mount *mp = ip->i_mount;
78 	xfs_daddr_t	sector = xfs_fsb_to_db(ip, start_fsb);
79 	sector_t	block = XFS_BB_TO_FSBT(mp, sector);
80 
81 	return blkdev_issue_zeroout(xfs_find_bdev_for_inode(VFS_I(ip)),
82 		block << (mp->m_super->s_blocksize_bits - 9),
83 		count_fsb << (mp->m_super->s_blocksize_bits - 9),
84 		GFP_NOFS, 0);
85 }
86 
87 int
88 xfs_bmap_rtalloc(
89 	struct xfs_bmalloca	*ap)	/* bmap alloc argument struct */
90 {
91 	int		error;		/* error return value */
92 	xfs_mount_t	*mp;		/* mount point structure */
93 	xfs_extlen_t	prod = 0;	/* product factor for allocators */
94 	xfs_extlen_t	ralen = 0;	/* realtime allocation length */
95 	xfs_extlen_t	align;		/* minimum allocation alignment */
96 	xfs_rtblock_t	rtb;
97 
98 	mp = ap->ip->i_mount;
99 	align = xfs_get_extsz_hint(ap->ip);
100 	prod = align / mp->m_sb.sb_rextsize;
101 	error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
102 					align, 1, ap->eof, 0,
103 					ap->conv, &ap->offset, &ap->length);
104 	if (error)
105 		return error;
106 	ASSERT(ap->length);
107 	ASSERT(ap->length % mp->m_sb.sb_rextsize == 0);
108 
109 	/*
110 	 * If the offset & length are not perfectly aligned
111 	 * then kill prod, it will just get us in trouble.
112 	 */
113 	if (do_mod(ap->offset, align) || ap->length % align)
114 		prod = 1;
115 	/*
116 	 * Set ralen to be the actual requested length in rtextents.
117 	 */
118 	ralen = ap->length / mp->m_sb.sb_rextsize;
119 	/*
120 	 * If the old value was close enough to MAXEXTLEN that
121 	 * we rounded up to it, cut it back so it's valid again.
122 	 * Note that if it's a really large request (bigger than
123 	 * MAXEXTLEN), we don't hear about that number, and can't
124 	 * adjust the starting point to match it.
125 	 */
126 	if (ralen * mp->m_sb.sb_rextsize >= MAXEXTLEN)
127 		ralen = MAXEXTLEN / mp->m_sb.sb_rextsize;
128 
129 	/*
130 	 * Lock out modifications to both the RT bitmap and summary inodes
131 	 */
132 	xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP);
133 	xfs_trans_ijoin(ap->tp, mp->m_rbmip, XFS_ILOCK_EXCL);
134 	xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM);
135 	xfs_trans_ijoin(ap->tp, mp->m_rsumip, XFS_ILOCK_EXCL);
136 
137 	/*
138 	 * If it's an allocation to an empty file at offset 0,
139 	 * pick an extent that will space things out in the rt area.
140 	 */
141 	if (ap->eof && ap->offset == 0) {
142 		xfs_rtblock_t uninitialized_var(rtx); /* realtime extent no */
143 
144 		error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx);
145 		if (error)
146 			return error;
147 		ap->blkno = rtx * mp->m_sb.sb_rextsize;
148 	} else {
149 		ap->blkno = 0;
150 	}
151 
152 	xfs_bmap_adjacent(ap);
153 
154 	/*
155 	 * Realtime allocation, done through xfs_rtallocate_extent.
156 	 */
157 	do_div(ap->blkno, mp->m_sb.sb_rextsize);
158 	rtb = ap->blkno;
159 	ap->length = ralen;
160 	error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1, ap->length,
161 				&ralen, ap->wasdel, prod, &rtb);
162 	if (error)
163 		return error;
164 
165 	ap->blkno = rtb;
166 	if (ap->blkno != NULLFSBLOCK) {
167 		ap->blkno *= mp->m_sb.sb_rextsize;
168 		ralen *= mp->m_sb.sb_rextsize;
169 		ap->length = ralen;
170 		ap->ip->i_d.di_nblocks += ralen;
171 		xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
172 		if (ap->wasdel)
173 			ap->ip->i_delayed_blks -= ralen;
174 		/*
175 		 * Adjust the disk quota also. This was reserved
176 		 * earlier.
177 		 */
178 		xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
179 			ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT :
180 					XFS_TRANS_DQ_RTBCOUNT, (long) ralen);
181 
182 		/* Zero the extent if we were asked to do so */
183 		if (ap->datatype & XFS_ALLOC_USERDATA_ZERO) {
184 			error = xfs_zero_extent(ap->ip, ap->blkno, ap->length);
185 			if (error)
186 				return error;
187 		}
188 	} else {
189 		ap->length = 0;
190 	}
191 	return 0;
192 }
193 
194 /*
195  * Check if the endoff is outside the last extent. If so the caller will grow
196  * the allocation to a stripe unit boundary.  All offsets are considered outside
197  * the end of file for an empty fork, so 1 is returned in *eof in that case.
198  */
199 int
200 xfs_bmap_eof(
201 	struct xfs_inode	*ip,
202 	xfs_fileoff_t		endoff,
203 	int			whichfork,
204 	int			*eof)
205 {
206 	struct xfs_bmbt_irec	rec;
207 	int			error;
208 
209 	error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, eof);
210 	if (error || *eof)
211 		return error;
212 
213 	*eof = endoff >= rec.br_startoff + rec.br_blockcount;
214 	return 0;
215 }
216 
217 /*
218  * Extent tree block counting routines.
219  */
220 
221 /*
222  * Count leaf blocks given a range of extent records.
223  */
224 STATIC void
225 xfs_bmap_count_leaves(
226 	xfs_ifork_t		*ifp,
227 	xfs_extnum_t		idx,
228 	int			numrecs,
229 	int			*count)
230 {
231 	int		b;
232 
233 	for (b = 0; b < numrecs; b++) {
234 		xfs_bmbt_rec_host_t *frp = xfs_iext_get_ext(ifp, idx + b);
235 		*count += xfs_bmbt_get_blockcount(frp);
236 	}
237 }
238 
239 /*
240  * Count leaf blocks given a range of extent records originally
241  * in btree format.
242  */
243 STATIC void
244 xfs_bmap_disk_count_leaves(
245 	struct xfs_mount	*mp,
246 	struct xfs_btree_block	*block,
247 	int			numrecs,
248 	int			*count)
249 {
250 	int		b;
251 	xfs_bmbt_rec_t	*frp;
252 
253 	for (b = 1; b <= numrecs; b++) {
254 		frp = XFS_BMBT_REC_ADDR(mp, block, b);
255 		*count += xfs_bmbt_disk_get_blockcount(frp);
256 	}
257 }
258 
259 /*
260  * Recursively walks each level of a btree
261  * to count total fsblocks in use.
262  */
263 STATIC int                                     /* error */
264 xfs_bmap_count_tree(
265 	xfs_mount_t     *mp,            /* file system mount point */
266 	xfs_trans_t     *tp,            /* transaction pointer */
267 	xfs_ifork_t	*ifp,		/* inode fork pointer */
268 	xfs_fsblock_t   blockno,	/* file system block number */
269 	int             levelin,	/* level in btree */
270 	int		*count)		/* Count of blocks */
271 {
272 	int			error;
273 	xfs_buf_t		*bp, *nbp;
274 	int			level = levelin;
275 	__be64			*pp;
276 	xfs_fsblock_t           bno = blockno;
277 	xfs_fsblock_t		nextbno;
278 	struct xfs_btree_block	*block, *nextblock;
279 	int			numrecs;
280 
281 	error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, XFS_BMAP_BTREE_REF,
282 						&xfs_bmbt_buf_ops);
283 	if (error)
284 		return error;
285 	*count += 1;
286 	block = XFS_BUF_TO_BLOCK(bp);
287 
288 	if (--level) {
289 		/* Not at node above leaves, count this level of nodes */
290 		nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
291 		while (nextbno != NULLFSBLOCK) {
292 			error = xfs_btree_read_bufl(mp, tp, nextbno, 0, &nbp,
293 						XFS_BMAP_BTREE_REF,
294 						&xfs_bmbt_buf_ops);
295 			if (error)
296 				return error;
297 			*count += 1;
298 			nextblock = XFS_BUF_TO_BLOCK(nbp);
299 			nextbno = be64_to_cpu(nextblock->bb_u.l.bb_rightsib);
300 			xfs_trans_brelse(tp, nbp);
301 		}
302 
303 		/* Dive to the next level */
304 		pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
305 		bno = be64_to_cpu(*pp);
306 		if (unlikely((error =
307 		     xfs_bmap_count_tree(mp, tp, ifp, bno, level, count)) < 0)) {
308 			xfs_trans_brelse(tp, bp);
309 			XFS_ERROR_REPORT("xfs_bmap_count_tree(1)",
310 					 XFS_ERRLEVEL_LOW, mp);
311 			return -EFSCORRUPTED;
312 		}
313 		xfs_trans_brelse(tp, bp);
314 	} else {
315 		/* count all level 1 nodes and their leaves */
316 		for (;;) {
317 			nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
318 			numrecs = be16_to_cpu(block->bb_numrecs);
319 			xfs_bmap_disk_count_leaves(mp, block, numrecs, count);
320 			xfs_trans_brelse(tp, bp);
321 			if (nextbno == NULLFSBLOCK)
322 				break;
323 			bno = nextbno;
324 			error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
325 						XFS_BMAP_BTREE_REF,
326 						&xfs_bmbt_buf_ops);
327 			if (error)
328 				return error;
329 			*count += 1;
330 			block = XFS_BUF_TO_BLOCK(bp);
331 		}
332 	}
333 	return 0;
334 }
335 
336 /*
337  * Count fsblocks of the given fork.
338  */
339 static int					/* error */
340 xfs_bmap_count_blocks(
341 	xfs_trans_t		*tp,		/* transaction pointer */
342 	xfs_inode_t		*ip,		/* incore inode */
343 	int			whichfork,	/* data or attr fork */
344 	int			*count)		/* out: count of blocks */
345 {
346 	struct xfs_btree_block	*block;	/* current btree block */
347 	xfs_fsblock_t		bno;	/* block # of "block" */
348 	xfs_ifork_t		*ifp;	/* fork structure */
349 	int			level;	/* btree level, for checking */
350 	xfs_mount_t		*mp;	/* file system mount structure */
351 	__be64			*pp;	/* pointer to block address */
352 
353 	bno = NULLFSBLOCK;
354 	mp = ip->i_mount;
355 	ifp = XFS_IFORK_PTR(ip, whichfork);
356 	if ( XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ) {
357 		xfs_bmap_count_leaves(ifp, 0, xfs_iext_count(ifp), count);
358 		return 0;
359 	}
360 
361 	/*
362 	 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
363 	 */
364 	block = ifp->if_broot;
365 	level = be16_to_cpu(block->bb_level);
366 	ASSERT(level > 0);
367 	pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
368 	bno = be64_to_cpu(*pp);
369 	ASSERT(bno != NULLFSBLOCK);
370 	ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
371 	ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
372 
373 	if (unlikely(xfs_bmap_count_tree(mp, tp, ifp, bno, level, count) < 0)) {
374 		XFS_ERROR_REPORT("xfs_bmap_count_blocks(2)", XFS_ERRLEVEL_LOW,
375 				 mp);
376 		return -EFSCORRUPTED;
377 	}
378 
379 	return 0;
380 }
381 
382 /*
383  * returns 1 for success, 0 if we failed to map the extent.
384  */
385 STATIC int
386 xfs_getbmapx_fix_eof_hole(
387 	xfs_inode_t		*ip,		/* xfs incore inode pointer */
388 	int			whichfork,
389 	struct getbmapx		*out,		/* output structure */
390 	int			prealloced,	/* this is a file with
391 						 * preallocated data space */
392 	__int64_t		end,		/* last block requested */
393 	xfs_fsblock_t		startblock,
394 	bool			moretocome)
395 {
396 	__int64_t		fixlen;
397 	xfs_mount_t		*mp;		/* file system mount point */
398 	xfs_ifork_t		*ifp;		/* inode fork pointer */
399 	xfs_extnum_t		lastx;		/* last extent pointer */
400 	xfs_fileoff_t		fileblock;
401 
402 	if (startblock == HOLESTARTBLOCK) {
403 		mp = ip->i_mount;
404 		out->bmv_block = -1;
405 		fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, XFS_ISIZE(ip)));
406 		fixlen -= out->bmv_offset;
407 		if (prealloced && out->bmv_offset + out->bmv_length == end) {
408 			/* Came to hole at EOF. Trim it. */
409 			if (fixlen <= 0)
410 				return 0;
411 			out->bmv_length = fixlen;
412 		}
413 	} else {
414 		if (startblock == DELAYSTARTBLOCK)
415 			out->bmv_block = -2;
416 		else
417 			out->bmv_block = xfs_fsb_to_db(ip, startblock);
418 		fileblock = XFS_BB_TO_FSB(ip->i_mount, out->bmv_offset);
419 		ifp = XFS_IFORK_PTR(ip, whichfork);
420 		if (!moretocome &&
421 		    xfs_iext_bno_to_ext(ifp, fileblock, &lastx) &&
422 		   (lastx == xfs_iext_count(ifp) - 1))
423 			out->bmv_oflags |= BMV_OF_LAST;
424 	}
425 
426 	return 1;
427 }
428 
429 /* Adjust the reported bmap around shared/unshared extent transitions. */
430 STATIC int
431 xfs_getbmap_adjust_shared(
432 	struct xfs_inode		*ip,
433 	int				whichfork,
434 	struct xfs_bmbt_irec		*map,
435 	struct getbmapx			*out,
436 	struct xfs_bmbt_irec		*next_map)
437 {
438 	struct xfs_mount		*mp = ip->i_mount;
439 	xfs_agnumber_t			agno;
440 	xfs_agblock_t			agbno;
441 	xfs_agblock_t			ebno;
442 	xfs_extlen_t			elen;
443 	xfs_extlen_t			nlen;
444 	int				error;
445 
446 	next_map->br_startblock = NULLFSBLOCK;
447 	next_map->br_startoff = NULLFILEOFF;
448 	next_map->br_blockcount = 0;
449 
450 	/* Only written data blocks can be shared. */
451 	if (!xfs_is_reflink_inode(ip) ||
452 	    whichfork != XFS_DATA_FORK ||
453 	    !xfs_bmap_is_real_extent(map))
454 		return 0;
455 
456 	agno = XFS_FSB_TO_AGNO(mp, map->br_startblock);
457 	agbno = XFS_FSB_TO_AGBNO(mp, map->br_startblock);
458 	error = xfs_reflink_find_shared(mp, agno, agbno, map->br_blockcount,
459 			&ebno, &elen, true);
460 	if (error)
461 		return error;
462 
463 	if (ebno == NULLAGBLOCK) {
464 		/* No shared blocks at all. */
465 		return 0;
466 	} else if (agbno == ebno) {
467 		/*
468 		 * Shared extent at (agbno, elen).  Shrink the reported
469 		 * extent length and prepare to move the start of map[i]
470 		 * to agbno+elen, with the aim of (re)formatting the new
471 		 * map[i] the next time through the inner loop.
472 		 */
473 		out->bmv_length = XFS_FSB_TO_BB(mp, elen);
474 		out->bmv_oflags |= BMV_OF_SHARED;
475 		if (elen != map->br_blockcount) {
476 			*next_map = *map;
477 			next_map->br_startblock += elen;
478 			next_map->br_startoff += elen;
479 			next_map->br_blockcount -= elen;
480 		}
481 		map->br_blockcount -= elen;
482 	} else {
483 		/*
484 		 * There's an unshared extent (agbno, ebno - agbno)
485 		 * followed by shared extent at (ebno, elen).  Shrink
486 		 * the reported extent length to cover only the unshared
487 		 * extent and prepare to move up the start of map[i] to
488 		 * ebno, with the aim of (re)formatting the new map[i]
489 		 * the next time through the inner loop.
490 		 */
491 		*next_map = *map;
492 		nlen = ebno - agbno;
493 		out->bmv_length = XFS_FSB_TO_BB(mp, nlen);
494 		next_map->br_startblock += nlen;
495 		next_map->br_startoff += nlen;
496 		next_map->br_blockcount -= nlen;
497 		map->br_blockcount -= nlen;
498 	}
499 
500 	return 0;
501 }
502 
503 /*
504  * Get inode's extents as described in bmv, and format for output.
505  * Calls formatter to fill the user's buffer until all extents
506  * are mapped, until the passed-in bmv->bmv_count slots have
507  * been filled, or until the formatter short-circuits the loop,
508  * if it is tracking filled-in extents on its own.
509  */
510 int						/* error code */
511 xfs_getbmap(
512 	xfs_inode_t		*ip,
513 	struct getbmapx		*bmv,		/* user bmap structure */
514 	xfs_bmap_format_t	formatter,	/* format to user */
515 	void			*arg)		/* formatter arg */
516 {
517 	__int64_t		bmvend;		/* last block requested */
518 	int			error = 0;	/* return value */
519 	__int64_t		fixlen;		/* length for -1 case */
520 	int			i;		/* extent number */
521 	int			lock;		/* lock state */
522 	xfs_bmbt_irec_t		*map;		/* buffer for user's data */
523 	xfs_mount_t		*mp;		/* file system mount point */
524 	int			nex;		/* # of user extents can do */
525 	int			subnex;		/* # of bmapi's can do */
526 	int			nmap;		/* number of map entries */
527 	struct getbmapx		*out;		/* output structure */
528 	int			whichfork;	/* data or attr fork */
529 	int			prealloced;	/* this is a file with
530 						 * preallocated data space */
531 	int			iflags;		/* interface flags */
532 	int			bmapi_flags;	/* flags for xfs_bmapi */
533 	int			cur_ext = 0;
534 	struct xfs_bmbt_irec	inject_map;
535 
536 	mp = ip->i_mount;
537 	iflags = bmv->bmv_iflags;
538 
539 #ifndef DEBUG
540 	/* Only allow CoW fork queries if we're debugging. */
541 	if (iflags & BMV_IF_COWFORK)
542 		return -EINVAL;
543 #endif
544 	if ((iflags & BMV_IF_ATTRFORK) && (iflags & BMV_IF_COWFORK))
545 		return -EINVAL;
546 
547 	if (iflags & BMV_IF_ATTRFORK)
548 		whichfork = XFS_ATTR_FORK;
549 	else if (iflags & BMV_IF_COWFORK)
550 		whichfork = XFS_COW_FORK;
551 	else
552 		whichfork = XFS_DATA_FORK;
553 
554 	switch (whichfork) {
555 	case XFS_ATTR_FORK:
556 		if (XFS_IFORK_Q(ip)) {
557 			if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS &&
558 			    ip->i_d.di_aformat != XFS_DINODE_FMT_BTREE &&
559 			    ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)
560 				return -EINVAL;
561 		} else if (unlikely(
562 			   ip->i_d.di_aformat != 0 &&
563 			   ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS)) {
564 			XFS_ERROR_REPORT("xfs_getbmap", XFS_ERRLEVEL_LOW,
565 					 ip->i_mount);
566 			return -EFSCORRUPTED;
567 		}
568 
569 		prealloced = 0;
570 		fixlen = 1LL << 32;
571 		break;
572 	case XFS_COW_FORK:
573 		if (ip->i_cformat != XFS_DINODE_FMT_EXTENTS)
574 			return -EINVAL;
575 
576 		if (xfs_get_cowextsz_hint(ip)) {
577 			prealloced = 1;
578 			fixlen = mp->m_super->s_maxbytes;
579 		} else {
580 			prealloced = 0;
581 			fixlen = XFS_ISIZE(ip);
582 		}
583 		break;
584 	default:
585 		if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS &&
586 		    ip->i_d.di_format != XFS_DINODE_FMT_BTREE &&
587 		    ip->i_d.di_format != XFS_DINODE_FMT_LOCAL)
588 			return -EINVAL;
589 
590 		if (xfs_get_extsz_hint(ip) ||
591 		    ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC|XFS_DIFLAG_APPEND)){
592 			prealloced = 1;
593 			fixlen = mp->m_super->s_maxbytes;
594 		} else {
595 			prealloced = 0;
596 			fixlen = XFS_ISIZE(ip);
597 		}
598 		break;
599 	}
600 
601 	if (bmv->bmv_length == -1) {
602 		fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, fixlen));
603 		bmv->bmv_length =
604 			max_t(__int64_t, fixlen - bmv->bmv_offset, 0);
605 	} else if (bmv->bmv_length == 0) {
606 		bmv->bmv_entries = 0;
607 		return 0;
608 	} else if (bmv->bmv_length < 0) {
609 		return -EINVAL;
610 	}
611 
612 	nex = bmv->bmv_count - 1;
613 	if (nex <= 0)
614 		return -EINVAL;
615 	bmvend = bmv->bmv_offset + bmv->bmv_length;
616 
617 
618 	if (bmv->bmv_count > ULONG_MAX / sizeof(struct getbmapx))
619 		return -ENOMEM;
620 	out = kmem_zalloc_large(bmv->bmv_count * sizeof(struct getbmapx), 0);
621 	if (!out)
622 		return -ENOMEM;
623 
624 	xfs_ilock(ip, XFS_IOLOCK_SHARED);
625 	switch (whichfork) {
626 	case XFS_DATA_FORK:
627 		if (!(iflags & BMV_IF_DELALLOC) &&
628 		    (ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_d.di_size)) {
629 			error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
630 			if (error)
631 				goto out_unlock_iolock;
632 
633 			/*
634 			 * Even after flushing the inode, there can still be
635 			 * delalloc blocks on the inode beyond EOF due to
636 			 * speculative preallocation.  These are not removed
637 			 * until the release function is called or the inode
638 			 * is inactivated.  Hence we cannot assert here that
639 			 * ip->i_delayed_blks == 0.
640 			 */
641 		}
642 
643 		lock = xfs_ilock_data_map_shared(ip);
644 		break;
645 	case XFS_COW_FORK:
646 		lock = XFS_ILOCK_SHARED;
647 		xfs_ilock(ip, lock);
648 		break;
649 	case XFS_ATTR_FORK:
650 		lock = xfs_ilock_attr_map_shared(ip);
651 		break;
652 	}
653 
654 	/*
655 	 * Don't let nex be bigger than the number of extents
656 	 * we can have assuming alternating holes and real extents.
657 	 */
658 	if (nex > XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1)
659 		nex = XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1;
660 
661 	bmapi_flags = xfs_bmapi_aflag(whichfork);
662 	if (!(iflags & BMV_IF_PREALLOC))
663 		bmapi_flags |= XFS_BMAPI_IGSTATE;
664 
665 	/*
666 	 * Allocate enough space to handle "subnex" maps at a time.
667 	 */
668 	error = -ENOMEM;
669 	subnex = 16;
670 	map = kmem_alloc(subnex * sizeof(*map), KM_MAYFAIL | KM_NOFS);
671 	if (!map)
672 		goto out_unlock_ilock;
673 
674 	bmv->bmv_entries = 0;
675 
676 	if (XFS_IFORK_NEXTENTS(ip, whichfork) == 0 &&
677 	    (whichfork == XFS_ATTR_FORK || !(iflags & BMV_IF_DELALLOC))) {
678 		error = 0;
679 		goto out_free_map;
680 	}
681 
682 	do {
683 		nmap = (nex> subnex) ? subnex : nex;
684 		error = xfs_bmapi_read(ip, XFS_BB_TO_FSBT(mp, bmv->bmv_offset),
685 				       XFS_BB_TO_FSB(mp, bmv->bmv_length),
686 				       map, &nmap, bmapi_flags);
687 		if (error)
688 			goto out_free_map;
689 		ASSERT(nmap <= subnex);
690 
691 		for (i = 0; i < nmap && bmv->bmv_length &&
692 				cur_ext < bmv->bmv_count - 1; i++) {
693 			out[cur_ext].bmv_oflags = 0;
694 			if (map[i].br_state == XFS_EXT_UNWRITTEN)
695 				out[cur_ext].bmv_oflags |= BMV_OF_PREALLOC;
696 			else if (map[i].br_startblock == DELAYSTARTBLOCK)
697 				out[cur_ext].bmv_oflags |= BMV_OF_DELALLOC;
698 			out[cur_ext].bmv_offset =
699 				XFS_FSB_TO_BB(mp, map[i].br_startoff);
700 			out[cur_ext].bmv_length =
701 				XFS_FSB_TO_BB(mp, map[i].br_blockcount);
702 			out[cur_ext].bmv_unused1 = 0;
703 			out[cur_ext].bmv_unused2 = 0;
704 
705 			/*
706 			 * delayed allocation extents that start beyond EOF can
707 			 * occur due to speculative EOF allocation when the
708 			 * delalloc extent is larger than the largest freespace
709 			 * extent at conversion time. These extents cannot be
710 			 * converted by data writeback, so can exist here even
711 			 * if we are not supposed to be finding delalloc
712 			 * extents.
713 			 */
714 			if (map[i].br_startblock == DELAYSTARTBLOCK &&
715 			    map[i].br_startoff <= XFS_B_TO_FSB(mp, XFS_ISIZE(ip)))
716 				ASSERT((iflags & BMV_IF_DELALLOC) != 0);
717 
718                         if (map[i].br_startblock == HOLESTARTBLOCK &&
719 			    whichfork == XFS_ATTR_FORK) {
720 				/* came to the end of attribute fork */
721 				out[cur_ext].bmv_oflags |= BMV_OF_LAST;
722 				goto out_free_map;
723 			}
724 
725 			/* Is this a shared block? */
726 			error = xfs_getbmap_adjust_shared(ip, whichfork,
727 					&map[i], &out[cur_ext], &inject_map);
728 			if (error)
729 				goto out_free_map;
730 
731 			if (!xfs_getbmapx_fix_eof_hole(ip, whichfork,
732 					&out[cur_ext], prealloced, bmvend,
733 					map[i].br_startblock,
734 					inject_map.br_startblock != NULLFSBLOCK))
735 				goto out_free_map;
736 
737 			bmv->bmv_offset =
738 				out[cur_ext].bmv_offset +
739 				out[cur_ext].bmv_length;
740 			bmv->bmv_length =
741 				max_t(__int64_t, 0, bmvend - bmv->bmv_offset);
742 
743 			/*
744 			 * In case we don't want to return the hole,
745 			 * don't increase cur_ext so that we can reuse
746 			 * it in the next loop.
747 			 */
748 			if ((iflags & BMV_IF_NO_HOLES) &&
749 			    map[i].br_startblock == HOLESTARTBLOCK) {
750 				memset(&out[cur_ext], 0, sizeof(out[cur_ext]));
751 				continue;
752 			}
753 
754 			/*
755 			 * In order to report shared extents accurately,
756 			 * we report each distinct shared/unshared part
757 			 * of a single bmbt record using multiple bmap
758 			 * extents.  To make that happen, we iterate the
759 			 * same map array item multiple times, each
760 			 * time trimming out the subextent that we just
761 			 * reported.
762 			 *
763 			 * Because of this, we must check the out array
764 			 * index (cur_ext) directly against bmv_count-1
765 			 * to avoid overflows.
766 			 */
767 			if (inject_map.br_startblock != NULLFSBLOCK) {
768 				map[i] = inject_map;
769 				i--;
770 			}
771 			bmv->bmv_entries++;
772 			cur_ext++;
773 		}
774 	} while (nmap && bmv->bmv_length && cur_ext < bmv->bmv_count - 1);
775 
776  out_free_map:
777 	kmem_free(map);
778  out_unlock_ilock:
779 	xfs_iunlock(ip, lock);
780  out_unlock_iolock:
781 	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
782 
783 	for (i = 0; i < cur_ext; i++) {
784 		/* format results & advance arg */
785 		error = formatter(&arg, &out[i]);
786 		if (error)
787 			break;
788 	}
789 
790 	kmem_free(out);
791 	return error;
792 }
793 
794 /*
795  * dead simple method of punching delalyed allocation blocks from a range in
796  * the inode. Walks a block at a time so will be slow, but is only executed in
797  * rare error cases so the overhead is not critical. This will always punch out
798  * both the start and end blocks, even if the ranges only partially overlap
799  * them, so it is up to the caller to ensure that partial blocks are not
800  * passed in.
801  */
802 int
803 xfs_bmap_punch_delalloc_range(
804 	struct xfs_inode	*ip,
805 	xfs_fileoff_t		start_fsb,
806 	xfs_fileoff_t		length)
807 {
808 	xfs_fileoff_t		remaining = length;
809 	int			error = 0;
810 
811 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
812 
813 	do {
814 		int		done;
815 		xfs_bmbt_irec_t	imap;
816 		int		nimaps = 1;
817 		xfs_fsblock_t	firstblock;
818 		struct xfs_defer_ops dfops;
819 
820 		/*
821 		 * Map the range first and check that it is a delalloc extent
822 		 * before trying to unmap the range. Otherwise we will be
823 		 * trying to remove a real extent (which requires a
824 		 * transaction) or a hole, which is probably a bad idea...
825 		 */
826 		error = xfs_bmapi_read(ip, start_fsb, 1, &imap, &nimaps,
827 				       XFS_BMAPI_ENTIRE);
828 
829 		if (error) {
830 			/* something screwed, just bail */
831 			if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
832 				xfs_alert(ip->i_mount,
833 			"Failed delalloc mapping lookup ino %lld fsb %lld.",
834 						ip->i_ino, start_fsb);
835 			}
836 			break;
837 		}
838 		if (!nimaps) {
839 			/* nothing there */
840 			goto next_block;
841 		}
842 		if (imap.br_startblock != DELAYSTARTBLOCK) {
843 			/* been converted, ignore */
844 			goto next_block;
845 		}
846 		WARN_ON(imap.br_blockcount == 0);
847 
848 		/*
849 		 * Note: while we initialise the firstblock/dfops pair, they
850 		 * should never be used because blocks should never be
851 		 * allocated or freed for a delalloc extent and hence we need
852 		 * don't cancel or finish them after the xfs_bunmapi() call.
853 		 */
854 		xfs_defer_init(&dfops, &firstblock);
855 		error = xfs_bunmapi(NULL, ip, start_fsb, 1, 0, 1, &firstblock,
856 					&dfops, &done);
857 		if (error)
858 			break;
859 
860 		ASSERT(!xfs_defer_has_unfinished_work(&dfops));
861 next_block:
862 		start_fsb++;
863 		remaining--;
864 	} while(remaining > 0);
865 
866 	return error;
867 }
868 
869 /*
870  * Test whether it is appropriate to check an inode for and free post EOF
871  * blocks. The 'force' parameter determines whether we should also consider
872  * regular files that are marked preallocated or append-only.
873  */
874 bool
875 xfs_can_free_eofblocks(struct xfs_inode *ip, bool force)
876 {
877 	/* prealloc/delalloc exists only on regular files */
878 	if (!S_ISREG(VFS_I(ip)->i_mode))
879 		return false;
880 
881 	/*
882 	 * Zero sized files with no cached pages and delalloc blocks will not
883 	 * have speculative prealloc/delalloc blocks to remove.
884 	 */
885 	if (VFS_I(ip)->i_size == 0 &&
886 	    VFS_I(ip)->i_mapping->nrpages == 0 &&
887 	    ip->i_delayed_blks == 0)
888 		return false;
889 
890 	/* If we haven't read in the extent list, then don't do it now. */
891 	if (!(ip->i_df.if_flags & XFS_IFEXTENTS))
892 		return false;
893 
894 	/*
895 	 * Do not free real preallocated or append-only files unless the file
896 	 * has delalloc blocks and we are forced to remove them.
897 	 */
898 	if (ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND))
899 		if (!force || ip->i_delayed_blks == 0)
900 			return false;
901 
902 	return true;
903 }
904 
905 /*
906  * This is called to free any blocks beyond eof. The caller must hold
907  * IOLOCK_EXCL unless we are in the inode reclaim path and have the only
908  * reference to the inode.
909  */
910 int
911 xfs_free_eofblocks(
912 	struct xfs_inode	*ip)
913 {
914 	struct xfs_trans	*tp;
915 	int			error;
916 	xfs_fileoff_t		end_fsb;
917 	xfs_fileoff_t		last_fsb;
918 	xfs_filblks_t		map_len;
919 	int			nimaps;
920 	struct xfs_bmbt_irec	imap;
921 	struct xfs_mount	*mp = ip->i_mount;
922 
923 	/*
924 	 * Figure out if there are any blocks beyond the end
925 	 * of the file.  If not, then there is nothing to do.
926 	 */
927 	end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_ISIZE(ip));
928 	last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
929 	if (last_fsb <= end_fsb)
930 		return 0;
931 	map_len = last_fsb - end_fsb;
932 
933 	nimaps = 1;
934 	xfs_ilock(ip, XFS_ILOCK_SHARED);
935 	error = xfs_bmapi_read(ip, end_fsb, map_len, &imap, &nimaps, 0);
936 	xfs_iunlock(ip, XFS_ILOCK_SHARED);
937 
938 	/*
939 	 * If there are blocks after the end of file, truncate the file to its
940 	 * current size to free them up.
941 	 */
942 	if (!error && (nimaps != 0) &&
943 	    (imap.br_startblock != HOLESTARTBLOCK ||
944 	     ip->i_delayed_blks)) {
945 		/*
946 		 * Attach the dquots to the inode up front.
947 		 */
948 		error = xfs_qm_dqattach(ip, 0);
949 		if (error)
950 			return error;
951 
952 		/* wait on dio to ensure i_size has settled */
953 		inode_dio_wait(VFS_I(ip));
954 
955 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0,
956 				&tp);
957 		if (error) {
958 			ASSERT(XFS_FORCED_SHUTDOWN(mp));
959 			return error;
960 		}
961 
962 		xfs_ilock(ip, XFS_ILOCK_EXCL);
963 		xfs_trans_ijoin(tp, ip, 0);
964 
965 		/*
966 		 * Do not update the on-disk file size.  If we update the
967 		 * on-disk file size and then the system crashes before the
968 		 * contents of the file are flushed to disk then the files
969 		 * may be full of holes (ie NULL files bug).
970 		 */
971 		error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK,
972 					      XFS_ISIZE(ip));
973 		if (error) {
974 			/*
975 			 * If we get an error at this point we simply don't
976 			 * bother truncating the file.
977 			 */
978 			xfs_trans_cancel(tp);
979 		} else {
980 			error = xfs_trans_commit(tp);
981 			if (!error)
982 				xfs_inode_clear_eofblocks_tag(ip);
983 		}
984 
985 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
986 	}
987 	return error;
988 }
989 
990 int
991 xfs_alloc_file_space(
992 	struct xfs_inode	*ip,
993 	xfs_off_t		offset,
994 	xfs_off_t		len,
995 	int			alloc_type)
996 {
997 	xfs_mount_t		*mp = ip->i_mount;
998 	xfs_off_t		count;
999 	xfs_filblks_t		allocated_fsb;
1000 	xfs_filblks_t		allocatesize_fsb;
1001 	xfs_extlen_t		extsz, temp;
1002 	xfs_fileoff_t		startoffset_fsb;
1003 	xfs_fsblock_t		firstfsb;
1004 	int			nimaps;
1005 	int			quota_flag;
1006 	int			rt;
1007 	xfs_trans_t		*tp;
1008 	xfs_bmbt_irec_t		imaps[1], *imapp;
1009 	struct xfs_defer_ops	dfops;
1010 	uint			qblocks, resblks, resrtextents;
1011 	int			error;
1012 
1013 	trace_xfs_alloc_file_space(ip);
1014 
1015 	if (XFS_FORCED_SHUTDOWN(mp))
1016 		return -EIO;
1017 
1018 	error = xfs_qm_dqattach(ip, 0);
1019 	if (error)
1020 		return error;
1021 
1022 	if (len <= 0)
1023 		return -EINVAL;
1024 
1025 	rt = XFS_IS_REALTIME_INODE(ip);
1026 	extsz = xfs_get_extsz_hint(ip);
1027 
1028 	count = len;
1029 	imapp = &imaps[0];
1030 	nimaps = 1;
1031 	startoffset_fsb	= XFS_B_TO_FSBT(mp, offset);
1032 	allocatesize_fsb = XFS_B_TO_FSB(mp, count);
1033 
1034 	/*
1035 	 * Allocate file space until done or until there is an error
1036 	 */
1037 	while (allocatesize_fsb && !error) {
1038 		xfs_fileoff_t	s, e;
1039 
1040 		/*
1041 		 * Determine space reservations for data/realtime.
1042 		 */
1043 		if (unlikely(extsz)) {
1044 			s = startoffset_fsb;
1045 			do_div(s, extsz);
1046 			s *= extsz;
1047 			e = startoffset_fsb + allocatesize_fsb;
1048 			if ((temp = do_mod(startoffset_fsb, extsz)))
1049 				e += temp;
1050 			if ((temp = do_mod(e, extsz)))
1051 				e += extsz - temp;
1052 		} else {
1053 			s = 0;
1054 			e = allocatesize_fsb;
1055 		}
1056 
1057 		/*
1058 		 * The transaction reservation is limited to a 32-bit block
1059 		 * count, hence we need to limit the number of blocks we are
1060 		 * trying to reserve to avoid an overflow. We can't allocate
1061 		 * more than @nimaps extents, and an extent is limited on disk
1062 		 * to MAXEXTLEN (21 bits), so use that to enforce the limit.
1063 		 */
1064 		resblks = min_t(xfs_fileoff_t, (e - s), (MAXEXTLEN * nimaps));
1065 		if (unlikely(rt)) {
1066 			resrtextents = qblocks = resblks;
1067 			resrtextents /= mp->m_sb.sb_rextsize;
1068 			resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
1069 			quota_flag = XFS_QMOPT_RES_RTBLKS;
1070 		} else {
1071 			resrtextents = 0;
1072 			resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resblks);
1073 			quota_flag = XFS_QMOPT_RES_REGBLKS;
1074 		}
1075 
1076 		/*
1077 		 * Allocate and setup the transaction.
1078 		 */
1079 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks,
1080 				resrtextents, 0, &tp);
1081 
1082 		/*
1083 		 * Check for running out of space
1084 		 */
1085 		if (error) {
1086 			/*
1087 			 * Free the transaction structure.
1088 			 */
1089 			ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp));
1090 			break;
1091 		}
1092 		xfs_ilock(ip, XFS_ILOCK_EXCL);
1093 		error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks,
1094 						      0, quota_flag);
1095 		if (error)
1096 			goto error1;
1097 
1098 		xfs_trans_ijoin(tp, ip, 0);
1099 
1100 		xfs_defer_init(&dfops, &firstfsb);
1101 		error = xfs_bmapi_write(tp, ip, startoffset_fsb,
1102 					allocatesize_fsb, alloc_type, &firstfsb,
1103 					resblks, imapp, &nimaps, &dfops);
1104 		if (error)
1105 			goto error0;
1106 
1107 		/*
1108 		 * Complete the transaction
1109 		 */
1110 		error = xfs_defer_finish(&tp, &dfops, NULL);
1111 		if (error)
1112 			goto error0;
1113 
1114 		error = xfs_trans_commit(tp);
1115 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
1116 		if (error)
1117 			break;
1118 
1119 		allocated_fsb = imapp->br_blockcount;
1120 
1121 		if (nimaps == 0) {
1122 			error = -ENOSPC;
1123 			break;
1124 		}
1125 
1126 		startoffset_fsb += allocated_fsb;
1127 		allocatesize_fsb -= allocated_fsb;
1128 	}
1129 
1130 	return error;
1131 
1132 error0:	/* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */
1133 	xfs_defer_cancel(&dfops);
1134 	xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag);
1135 
1136 error1:	/* Just cancel transaction */
1137 	xfs_trans_cancel(tp);
1138 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1139 	return error;
1140 }
1141 
1142 static int
1143 xfs_unmap_extent(
1144 	struct xfs_inode	*ip,
1145 	xfs_fileoff_t		startoffset_fsb,
1146 	xfs_filblks_t		len_fsb,
1147 	int			*done)
1148 {
1149 	struct xfs_mount	*mp = ip->i_mount;
1150 	struct xfs_trans	*tp;
1151 	struct xfs_defer_ops	dfops;
1152 	xfs_fsblock_t		firstfsb;
1153 	uint			resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
1154 	int			error;
1155 
1156 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);
1157 	if (error) {
1158 		ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp));
1159 		return error;
1160 	}
1161 
1162 	xfs_ilock(ip, XFS_ILOCK_EXCL);
1163 	error = xfs_trans_reserve_quota(tp, mp, ip->i_udquot, ip->i_gdquot,
1164 			ip->i_pdquot, resblks, 0, XFS_QMOPT_RES_REGBLKS);
1165 	if (error)
1166 		goto out_trans_cancel;
1167 
1168 	xfs_trans_ijoin(tp, ip, 0);
1169 
1170 	xfs_defer_init(&dfops, &firstfsb);
1171 	error = xfs_bunmapi(tp, ip, startoffset_fsb, len_fsb, 0, 2, &firstfsb,
1172 			&dfops, done);
1173 	if (error)
1174 		goto out_bmap_cancel;
1175 
1176 	error = xfs_defer_finish(&tp, &dfops, ip);
1177 	if (error)
1178 		goto out_bmap_cancel;
1179 
1180 	error = xfs_trans_commit(tp);
1181 out_unlock:
1182 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1183 	return error;
1184 
1185 out_bmap_cancel:
1186 	xfs_defer_cancel(&dfops);
1187 out_trans_cancel:
1188 	xfs_trans_cancel(tp);
1189 	goto out_unlock;
1190 }
1191 
1192 static int
1193 xfs_adjust_extent_unmap_boundaries(
1194 	struct xfs_inode	*ip,
1195 	xfs_fileoff_t		*startoffset_fsb,
1196 	xfs_fileoff_t		*endoffset_fsb)
1197 {
1198 	struct xfs_mount	*mp = ip->i_mount;
1199 	struct xfs_bmbt_irec	imap;
1200 	int			nimap, error;
1201 	xfs_extlen_t		mod = 0;
1202 
1203 	nimap = 1;
1204 	error = xfs_bmapi_read(ip, *startoffset_fsb, 1, &imap, &nimap, 0);
1205 	if (error)
1206 		return error;
1207 
1208 	if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
1209 		ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
1210 		mod = do_mod(imap.br_startblock, mp->m_sb.sb_rextsize);
1211 		if (mod)
1212 			*startoffset_fsb += mp->m_sb.sb_rextsize - mod;
1213 	}
1214 
1215 	nimap = 1;
1216 	error = xfs_bmapi_read(ip, *endoffset_fsb - 1, 1, &imap, &nimap, 0);
1217 	if (error)
1218 		return error;
1219 
1220 	if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
1221 		ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
1222 		mod++;
1223 		if (mod && mod != mp->m_sb.sb_rextsize)
1224 			*endoffset_fsb -= mod;
1225 	}
1226 
1227 	return 0;
1228 }
1229 
1230 static int
1231 xfs_flush_unmap_range(
1232 	struct xfs_inode	*ip,
1233 	xfs_off_t		offset,
1234 	xfs_off_t		len)
1235 {
1236 	struct xfs_mount	*mp = ip->i_mount;
1237 	struct inode		*inode = VFS_I(ip);
1238 	xfs_off_t		rounding, start, end;
1239 	int			error;
1240 
1241 	/* wait for the completion of any pending DIOs */
1242 	inode_dio_wait(inode);
1243 
1244 	rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_SIZE);
1245 	start = round_down(offset, rounding);
1246 	end = round_up(offset + len, rounding) - 1;
1247 
1248 	error = filemap_write_and_wait_range(inode->i_mapping, start, end);
1249 	if (error)
1250 		return error;
1251 	truncate_pagecache_range(inode, start, end);
1252 	return 0;
1253 }
1254 
1255 int
1256 xfs_free_file_space(
1257 	struct xfs_inode	*ip,
1258 	xfs_off_t		offset,
1259 	xfs_off_t		len)
1260 {
1261 	struct xfs_mount	*mp = ip->i_mount;
1262 	xfs_fileoff_t		startoffset_fsb;
1263 	xfs_fileoff_t		endoffset_fsb;
1264 	int			done = 0, error;
1265 
1266 	trace_xfs_free_file_space(ip);
1267 
1268 	error = xfs_qm_dqattach(ip, 0);
1269 	if (error)
1270 		return error;
1271 
1272 	if (len <= 0)	/* if nothing being freed */
1273 		return 0;
1274 
1275 	error = xfs_flush_unmap_range(ip, offset, len);
1276 	if (error)
1277 		return error;
1278 
1279 	startoffset_fsb = XFS_B_TO_FSB(mp, offset);
1280 	endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len);
1281 
1282 	/*
1283 	 * Need to zero the stuff we're not freeing, on disk.  If it's a RT file
1284 	 * and we can't use unwritten extents then we actually need to ensure
1285 	 * to zero the whole extent, otherwise we just need to take of block
1286 	 * boundaries, and xfs_bunmapi will handle the rest.
1287 	 */
1288 	if (XFS_IS_REALTIME_INODE(ip) &&
1289 	    !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
1290 		error = xfs_adjust_extent_unmap_boundaries(ip, &startoffset_fsb,
1291 				&endoffset_fsb);
1292 		if (error)
1293 			return error;
1294 	}
1295 
1296 	if (endoffset_fsb > startoffset_fsb) {
1297 		while (!done) {
1298 			error = xfs_unmap_extent(ip, startoffset_fsb,
1299 					endoffset_fsb - startoffset_fsb, &done);
1300 			if (error)
1301 				return error;
1302 		}
1303 	}
1304 
1305 	/*
1306 	 * Now that we've unmap all full blocks we'll have to zero out any
1307 	 * partial block at the beginning and/or end.  xfs_zero_range is
1308 	 * smart enough to skip any holes, including those we just created,
1309 	 * but we must take care not to zero beyond EOF and enlarge i_size.
1310 	 */
1311 
1312 	if (offset >= XFS_ISIZE(ip))
1313 		return 0;
1314 
1315 	if (offset + len > XFS_ISIZE(ip))
1316 		len = XFS_ISIZE(ip) - offset;
1317 
1318 	return xfs_zero_range(ip, offset, len, NULL);
1319 }
1320 
1321 /*
1322  * Preallocate and zero a range of a file. This mechanism has the allocation
1323  * semantics of fallocate and in addition converts data in the range to zeroes.
1324  */
1325 int
1326 xfs_zero_file_space(
1327 	struct xfs_inode	*ip,
1328 	xfs_off_t		offset,
1329 	xfs_off_t		len)
1330 {
1331 	struct xfs_mount	*mp = ip->i_mount;
1332 	uint			blksize;
1333 	int			error;
1334 
1335 	trace_xfs_zero_file_space(ip);
1336 
1337 	blksize = 1 << mp->m_sb.sb_blocklog;
1338 
1339 	/*
1340 	 * Punch a hole and prealloc the range. We use hole punch rather than
1341 	 * unwritten extent conversion for two reasons:
1342 	 *
1343 	 * 1.) Hole punch handles partial block zeroing for us.
1344 	 *
1345 	 * 2.) If prealloc returns ENOSPC, the file range is still zero-valued
1346 	 * by virtue of the hole punch.
1347 	 */
1348 	error = xfs_free_file_space(ip, offset, len);
1349 	if (error)
1350 		goto out;
1351 
1352 	error = xfs_alloc_file_space(ip, round_down(offset, blksize),
1353 				     round_up(offset + len, blksize) -
1354 				     round_down(offset, blksize),
1355 				     XFS_BMAPI_PREALLOC);
1356 out:
1357 	return error;
1358 
1359 }
1360 
1361 /*
1362  * @next_fsb will keep track of the extent currently undergoing shift.
1363  * @stop_fsb will keep track of the extent at which we have to stop.
1364  * If we are shifting left, we will start with block (offset + len) and
1365  * shift each extent till last extent.
1366  * If we are shifting right, we will start with last extent inside file space
1367  * and continue until we reach the block corresponding to offset.
1368  */
1369 static int
1370 xfs_shift_file_space(
1371 	struct xfs_inode        *ip,
1372 	xfs_off_t               offset,
1373 	xfs_off_t               len,
1374 	enum shift_direction	direction)
1375 {
1376 	int			done = 0;
1377 	struct xfs_mount	*mp = ip->i_mount;
1378 	struct xfs_trans	*tp;
1379 	int			error;
1380 	struct xfs_defer_ops	dfops;
1381 	xfs_fsblock_t		first_block;
1382 	xfs_fileoff_t		stop_fsb;
1383 	xfs_fileoff_t		next_fsb;
1384 	xfs_fileoff_t		shift_fsb;
1385 	uint			resblks;
1386 
1387 	ASSERT(direction == SHIFT_LEFT || direction == SHIFT_RIGHT);
1388 
1389 	if (direction == SHIFT_LEFT) {
1390 		/*
1391 		 * Reserve blocks to cover potential extent merges after left
1392 		 * shift operations.
1393 		 */
1394 		resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
1395 		next_fsb = XFS_B_TO_FSB(mp, offset + len);
1396 		stop_fsb = XFS_B_TO_FSB(mp, VFS_I(ip)->i_size);
1397 	} else {
1398 		/*
1399 		 * If right shift, delegate the work of initialization of
1400 		 * next_fsb to xfs_bmap_shift_extent as it has ilock held.
1401 		 */
1402 		resblks = 0;
1403 		next_fsb = NULLFSBLOCK;
1404 		stop_fsb = XFS_B_TO_FSB(mp, offset);
1405 	}
1406 
1407 	shift_fsb = XFS_B_TO_FSB(mp, len);
1408 
1409 	/*
1410 	 * Trim eofblocks to avoid shifting uninitialized post-eof preallocation
1411 	 * into the accessible region of the file.
1412 	 */
1413 	if (xfs_can_free_eofblocks(ip, true)) {
1414 		error = xfs_free_eofblocks(ip);
1415 		if (error)
1416 			return error;
1417 	}
1418 
1419 	/*
1420 	 * Writeback and invalidate cache for the remainder of the file as we're
1421 	 * about to shift down every extent from offset to EOF.
1422 	 */
1423 	error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
1424 					     offset, -1);
1425 	if (error)
1426 		return error;
1427 	error = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping,
1428 					offset >> PAGE_SHIFT, -1);
1429 	if (error)
1430 		return error;
1431 
1432 	/*
1433 	 * The extent shiting code works on extent granularity. So, if
1434 	 * stop_fsb is not the starting block of extent, we need to split
1435 	 * the extent at stop_fsb.
1436 	 */
1437 	if (direction == SHIFT_RIGHT) {
1438 		error = xfs_bmap_split_extent(ip, stop_fsb);
1439 		if (error)
1440 			return error;
1441 	}
1442 
1443 	while (!error && !done) {
1444 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0,
1445 					&tp);
1446 		if (error)
1447 			break;
1448 
1449 		xfs_ilock(ip, XFS_ILOCK_EXCL);
1450 		error = xfs_trans_reserve_quota(tp, mp, ip->i_udquot,
1451 				ip->i_gdquot, ip->i_pdquot, resblks, 0,
1452 				XFS_QMOPT_RES_REGBLKS);
1453 		if (error)
1454 			goto out_trans_cancel;
1455 
1456 		xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
1457 
1458 		xfs_defer_init(&dfops, &first_block);
1459 
1460 		/*
1461 		 * We are using the write transaction in which max 2 bmbt
1462 		 * updates are allowed
1463 		 */
1464 		error = xfs_bmap_shift_extents(tp, ip, &next_fsb, shift_fsb,
1465 				&done, stop_fsb, &first_block, &dfops,
1466 				direction, XFS_BMAP_MAX_SHIFT_EXTENTS);
1467 		if (error)
1468 			goto out_bmap_cancel;
1469 
1470 		error = xfs_defer_finish(&tp, &dfops, NULL);
1471 		if (error)
1472 			goto out_bmap_cancel;
1473 
1474 		error = xfs_trans_commit(tp);
1475 	}
1476 
1477 	return error;
1478 
1479 out_bmap_cancel:
1480 	xfs_defer_cancel(&dfops);
1481 out_trans_cancel:
1482 	xfs_trans_cancel(tp);
1483 	return error;
1484 }
1485 
1486 /*
1487  * xfs_collapse_file_space()
1488  *	This routine frees disk space and shift extent for the given file.
1489  *	The first thing we do is to free data blocks in the specified range
1490  *	by calling xfs_free_file_space(). It would also sync dirty data
1491  *	and invalidate page cache over the region on which collapse range
1492  *	is working. And Shift extent records to the left to cover a hole.
1493  * RETURNS:
1494  *	0 on success
1495  *	errno on error
1496  *
1497  */
1498 int
1499 xfs_collapse_file_space(
1500 	struct xfs_inode	*ip,
1501 	xfs_off_t		offset,
1502 	xfs_off_t		len)
1503 {
1504 	int error;
1505 
1506 	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1507 	trace_xfs_collapse_file_space(ip);
1508 
1509 	error = xfs_free_file_space(ip, offset, len);
1510 	if (error)
1511 		return error;
1512 
1513 	return xfs_shift_file_space(ip, offset, len, SHIFT_LEFT);
1514 }
1515 
1516 /*
1517  * xfs_insert_file_space()
1518  *	This routine create hole space by shifting extents for the given file.
1519  *	The first thing we do is to sync dirty data and invalidate page cache
1520  *	over the region on which insert range is working. And split an extent
1521  *	to two extents at given offset by calling xfs_bmap_split_extent.
1522  *	And shift all extent records which are laying between [offset,
1523  *	last allocated extent] to the right to reserve hole range.
1524  * RETURNS:
1525  *	0 on success
1526  *	errno on error
1527  */
1528 int
1529 xfs_insert_file_space(
1530 	struct xfs_inode	*ip,
1531 	loff_t			offset,
1532 	loff_t			len)
1533 {
1534 	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1535 	trace_xfs_insert_file_space(ip);
1536 
1537 	return xfs_shift_file_space(ip, offset, len, SHIFT_RIGHT);
1538 }
1539 
1540 /*
1541  * We need to check that the format of the data fork in the temporary inode is
1542  * valid for the target inode before doing the swap. This is not a problem with
1543  * attr1 because of the fixed fork offset, but attr2 has a dynamically sized
1544  * data fork depending on the space the attribute fork is taking so we can get
1545  * invalid formats on the target inode.
1546  *
1547  * E.g. target has space for 7 extents in extent format, temp inode only has
1548  * space for 6.  If we defragment down to 7 extents, then the tmp format is a
1549  * btree, but when swapped it needs to be in extent format. Hence we can't just
1550  * blindly swap data forks on attr2 filesystems.
1551  *
1552  * Note that we check the swap in both directions so that we don't end up with
1553  * a corrupt temporary inode, either.
1554  *
1555  * Note that fixing the way xfs_fsr sets up the attribute fork in the source
1556  * inode will prevent this situation from occurring, so all we do here is
1557  * reject and log the attempt. basically we are putting the responsibility on
1558  * userspace to get this right.
1559  */
1560 static int
1561 xfs_swap_extents_check_format(
1562 	struct xfs_inode	*ip,	/* target inode */
1563 	struct xfs_inode	*tip)	/* tmp inode */
1564 {
1565 
1566 	/* Should never get a local format */
1567 	if (ip->i_d.di_format == XFS_DINODE_FMT_LOCAL ||
1568 	    tip->i_d.di_format == XFS_DINODE_FMT_LOCAL)
1569 		return -EINVAL;
1570 
1571 	/*
1572 	 * if the target inode has less extents that then temporary inode then
1573 	 * why did userspace call us?
1574 	 */
1575 	if (ip->i_d.di_nextents < tip->i_d.di_nextents)
1576 		return -EINVAL;
1577 
1578 	/*
1579 	 * If we have to use the (expensive) rmap swap method, we can
1580 	 * handle any number of extents and any format.
1581 	 */
1582 	if (xfs_sb_version_hasrmapbt(&ip->i_mount->m_sb))
1583 		return 0;
1584 
1585 	/*
1586 	 * if the target inode is in extent form and the temp inode is in btree
1587 	 * form then we will end up with the target inode in the wrong format
1588 	 * as we already know there are less extents in the temp inode.
1589 	 */
1590 	if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1591 	    tip->i_d.di_format == XFS_DINODE_FMT_BTREE)
1592 		return -EINVAL;
1593 
1594 	/* Check temp in extent form to max in target */
1595 	if (tip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1596 	    XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) >
1597 			XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
1598 		return -EINVAL;
1599 
1600 	/* Check target in extent form to max in temp */
1601 	if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1602 	    XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) >
1603 			XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
1604 		return -EINVAL;
1605 
1606 	/*
1607 	 * If we are in a btree format, check that the temp root block will fit
1608 	 * in the target and that it has enough extents to be in btree format
1609 	 * in the target.
1610 	 *
1611 	 * Note that we have to be careful to allow btree->extent conversions
1612 	 * (a common defrag case) which will occur when the temp inode is in
1613 	 * extent format...
1614 	 */
1615 	if (tip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
1616 		if (XFS_IFORK_BOFF(ip) &&
1617 		    XFS_BMAP_BMDR_SPACE(tip->i_df.if_broot) > XFS_IFORK_BOFF(ip))
1618 			return -EINVAL;
1619 		if (XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) <=
1620 		    XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
1621 			return -EINVAL;
1622 	}
1623 
1624 	/* Reciprocal target->temp btree format checks */
1625 	if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
1626 		if (XFS_IFORK_BOFF(tip) &&
1627 		    XFS_BMAP_BMDR_SPACE(ip->i_df.if_broot) > XFS_IFORK_BOFF(tip))
1628 			return -EINVAL;
1629 		if (XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) <=
1630 		    XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
1631 			return -EINVAL;
1632 	}
1633 
1634 	return 0;
1635 }
1636 
1637 static int
1638 xfs_swap_extent_flush(
1639 	struct xfs_inode	*ip)
1640 {
1641 	int	error;
1642 
1643 	error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
1644 	if (error)
1645 		return error;
1646 	truncate_pagecache_range(VFS_I(ip), 0, -1);
1647 
1648 	/* Verify O_DIRECT for ftmp */
1649 	if (VFS_I(ip)->i_mapping->nrpages)
1650 		return -EINVAL;
1651 	return 0;
1652 }
1653 
1654 /*
1655  * Move extents from one file to another, when rmap is enabled.
1656  */
1657 STATIC int
1658 xfs_swap_extent_rmap(
1659 	struct xfs_trans		**tpp,
1660 	struct xfs_inode		*ip,
1661 	struct xfs_inode		*tip)
1662 {
1663 	struct xfs_bmbt_irec		irec;
1664 	struct xfs_bmbt_irec		uirec;
1665 	struct xfs_bmbt_irec		tirec;
1666 	xfs_fileoff_t			offset_fsb;
1667 	xfs_fileoff_t			end_fsb;
1668 	xfs_filblks_t			count_fsb;
1669 	xfs_fsblock_t			firstfsb;
1670 	struct xfs_defer_ops		dfops;
1671 	int				error;
1672 	xfs_filblks_t			ilen;
1673 	xfs_filblks_t			rlen;
1674 	int				nimaps;
1675 	__uint64_t			tip_flags2;
1676 
1677 	/*
1678 	 * If the source file has shared blocks, we must flag the donor
1679 	 * file as having shared blocks so that we get the shared-block
1680 	 * rmap functions when we go to fix up the rmaps.  The flags
1681 	 * will be switch for reals later.
1682 	 */
1683 	tip_flags2 = tip->i_d.di_flags2;
1684 	if (ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK)
1685 		tip->i_d.di_flags2 |= XFS_DIFLAG2_REFLINK;
1686 
1687 	offset_fsb = 0;
1688 	end_fsb = XFS_B_TO_FSB(ip->i_mount, i_size_read(VFS_I(ip)));
1689 	count_fsb = (xfs_filblks_t)(end_fsb - offset_fsb);
1690 
1691 	while (count_fsb) {
1692 		/* Read extent from the donor file */
1693 		nimaps = 1;
1694 		error = xfs_bmapi_read(tip, offset_fsb, count_fsb, &tirec,
1695 				&nimaps, 0);
1696 		if (error)
1697 			goto out;
1698 		ASSERT(nimaps == 1);
1699 		ASSERT(tirec.br_startblock != DELAYSTARTBLOCK);
1700 
1701 		trace_xfs_swap_extent_rmap_remap(tip, &tirec);
1702 		ilen = tirec.br_blockcount;
1703 
1704 		/* Unmap the old blocks in the source file. */
1705 		while (tirec.br_blockcount) {
1706 			xfs_defer_init(&dfops, &firstfsb);
1707 			trace_xfs_swap_extent_rmap_remap_piece(tip, &tirec);
1708 
1709 			/* Read extent from the source file */
1710 			nimaps = 1;
1711 			error = xfs_bmapi_read(ip, tirec.br_startoff,
1712 					tirec.br_blockcount, &irec,
1713 					&nimaps, 0);
1714 			if (error)
1715 				goto out_defer;
1716 			ASSERT(nimaps == 1);
1717 			ASSERT(tirec.br_startoff == irec.br_startoff);
1718 			trace_xfs_swap_extent_rmap_remap_piece(ip, &irec);
1719 
1720 			/* Trim the extent. */
1721 			uirec = tirec;
1722 			uirec.br_blockcount = rlen = min_t(xfs_filblks_t,
1723 					tirec.br_blockcount,
1724 					irec.br_blockcount);
1725 			trace_xfs_swap_extent_rmap_remap_piece(tip, &uirec);
1726 
1727 			/* Remove the mapping from the donor file. */
1728 			error = xfs_bmap_unmap_extent((*tpp)->t_mountp, &dfops,
1729 					tip, &uirec);
1730 			if (error)
1731 				goto out_defer;
1732 
1733 			/* Remove the mapping from the source file. */
1734 			error = xfs_bmap_unmap_extent((*tpp)->t_mountp, &dfops,
1735 					ip, &irec);
1736 			if (error)
1737 				goto out_defer;
1738 
1739 			/* Map the donor file's blocks into the source file. */
1740 			error = xfs_bmap_map_extent((*tpp)->t_mountp, &dfops,
1741 					ip, &uirec);
1742 			if (error)
1743 				goto out_defer;
1744 
1745 			/* Map the source file's blocks into the donor file. */
1746 			error = xfs_bmap_map_extent((*tpp)->t_mountp, &dfops,
1747 					tip, &irec);
1748 			if (error)
1749 				goto out_defer;
1750 
1751 			error = xfs_defer_finish(tpp, &dfops, ip);
1752 			if (error)
1753 				goto out_defer;
1754 
1755 			tirec.br_startoff += rlen;
1756 			if (tirec.br_startblock != HOLESTARTBLOCK &&
1757 			    tirec.br_startblock != DELAYSTARTBLOCK)
1758 				tirec.br_startblock += rlen;
1759 			tirec.br_blockcount -= rlen;
1760 		}
1761 
1762 		/* Roll on... */
1763 		count_fsb -= ilen;
1764 		offset_fsb += ilen;
1765 	}
1766 
1767 	tip->i_d.di_flags2 = tip_flags2;
1768 	return 0;
1769 
1770 out_defer:
1771 	xfs_defer_cancel(&dfops);
1772 out:
1773 	trace_xfs_swap_extent_rmap_error(ip, error, _RET_IP_);
1774 	tip->i_d.di_flags2 = tip_flags2;
1775 	return error;
1776 }
1777 
1778 /* Swap the extents of two files by swapping data forks. */
1779 STATIC int
1780 xfs_swap_extent_forks(
1781 	struct xfs_trans	*tp,
1782 	struct xfs_inode	*ip,
1783 	struct xfs_inode	*tip,
1784 	int			*src_log_flags,
1785 	int			*target_log_flags)
1786 {
1787 	struct xfs_ifork	tempifp, *ifp, *tifp;
1788 	int			aforkblks = 0;
1789 	int			taforkblks = 0;
1790 	xfs_extnum_t		nextents;
1791 	__uint64_t		tmp;
1792 	int			error;
1793 
1794 	/*
1795 	 * Count the number of extended attribute blocks
1796 	 */
1797 	if ( ((XFS_IFORK_Q(ip) != 0) && (ip->i_d.di_anextents > 0)) &&
1798 	     (ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
1799 		error = xfs_bmap_count_blocks(tp, ip, XFS_ATTR_FORK,
1800 				&aforkblks);
1801 		if (error)
1802 			return error;
1803 	}
1804 	if ( ((XFS_IFORK_Q(tip) != 0) && (tip->i_d.di_anextents > 0)) &&
1805 	     (tip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
1806 		error = xfs_bmap_count_blocks(tp, tip, XFS_ATTR_FORK,
1807 				&taforkblks);
1808 		if (error)
1809 			return error;
1810 	}
1811 
1812 	/*
1813 	 * Before we've swapped the forks, lets set the owners of the forks
1814 	 * appropriately. We have to do this as we are demand paging the btree
1815 	 * buffers, and so the validation done on read will expect the owner
1816 	 * field to be correctly set. Once we change the owners, we can swap the
1817 	 * inode forks.
1818 	 */
1819 	if (ip->i_d.di_version == 3 &&
1820 	    ip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
1821 		(*target_log_flags) |= XFS_ILOG_DOWNER;
1822 		error = xfs_bmbt_change_owner(tp, ip, XFS_DATA_FORK,
1823 					      tip->i_ino, NULL);
1824 		if (error)
1825 			return error;
1826 	}
1827 
1828 	if (tip->i_d.di_version == 3 &&
1829 	    tip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
1830 		(*src_log_flags) |= XFS_ILOG_DOWNER;
1831 		error = xfs_bmbt_change_owner(tp, tip, XFS_DATA_FORK,
1832 					      ip->i_ino, NULL);
1833 		if (error)
1834 			return error;
1835 	}
1836 
1837 	/*
1838 	 * Swap the data forks of the inodes
1839 	 */
1840 	ifp = &ip->i_df;
1841 	tifp = &tip->i_df;
1842 	tempifp = *ifp;		/* struct copy */
1843 	*ifp = *tifp;		/* struct copy */
1844 	*tifp = tempifp;	/* struct copy */
1845 
1846 	/*
1847 	 * Fix the on-disk inode values
1848 	 */
1849 	tmp = (__uint64_t)ip->i_d.di_nblocks;
1850 	ip->i_d.di_nblocks = tip->i_d.di_nblocks - taforkblks + aforkblks;
1851 	tip->i_d.di_nblocks = tmp + taforkblks - aforkblks;
1852 
1853 	tmp = (__uint64_t) ip->i_d.di_nextents;
1854 	ip->i_d.di_nextents = tip->i_d.di_nextents;
1855 	tip->i_d.di_nextents = tmp;
1856 
1857 	tmp = (__uint64_t) ip->i_d.di_format;
1858 	ip->i_d.di_format = tip->i_d.di_format;
1859 	tip->i_d.di_format = tmp;
1860 
1861 	/*
1862 	 * The extents in the source inode could still contain speculative
1863 	 * preallocation beyond EOF (e.g. the file is open but not modified
1864 	 * while defrag is in progress). In that case, we need to copy over the
1865 	 * number of delalloc blocks the data fork in the source inode is
1866 	 * tracking beyond EOF so that when the fork is truncated away when the
1867 	 * temporary inode is unlinked we don't underrun the i_delayed_blks
1868 	 * counter on that inode.
1869 	 */
1870 	ASSERT(tip->i_delayed_blks == 0);
1871 	tip->i_delayed_blks = ip->i_delayed_blks;
1872 	ip->i_delayed_blks = 0;
1873 
1874 	switch (ip->i_d.di_format) {
1875 	case XFS_DINODE_FMT_EXTENTS:
1876 		/*
1877 		 * If the extents fit in the inode, fix the pointer.  Otherwise
1878 		 * it's already NULL or pointing to the extent.
1879 		 */
1880 		nextents = xfs_iext_count(&ip->i_df);
1881 		if (nextents <= XFS_INLINE_EXTS)
1882 			ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext;
1883 		(*src_log_flags) |= XFS_ILOG_DEXT;
1884 		break;
1885 	case XFS_DINODE_FMT_BTREE:
1886 		ASSERT(ip->i_d.di_version < 3 ||
1887 		       (*src_log_flags & XFS_ILOG_DOWNER));
1888 		(*src_log_flags) |= XFS_ILOG_DBROOT;
1889 		break;
1890 	}
1891 
1892 	switch (tip->i_d.di_format) {
1893 	case XFS_DINODE_FMT_EXTENTS:
1894 		/*
1895 		 * If the extents fit in the inode, fix the pointer.  Otherwise
1896 		 * it's already NULL or pointing to the extent.
1897 		 */
1898 		nextents = xfs_iext_count(&tip->i_df);
1899 		if (nextents <= XFS_INLINE_EXTS)
1900 			tifp->if_u1.if_extents = tifp->if_u2.if_inline_ext;
1901 		(*target_log_flags) |= XFS_ILOG_DEXT;
1902 		break;
1903 	case XFS_DINODE_FMT_BTREE:
1904 		(*target_log_flags) |= XFS_ILOG_DBROOT;
1905 		ASSERT(tip->i_d.di_version < 3 ||
1906 		       (*target_log_flags & XFS_ILOG_DOWNER));
1907 		break;
1908 	}
1909 
1910 	return 0;
1911 }
1912 
1913 int
1914 xfs_swap_extents(
1915 	struct xfs_inode	*ip,	/* target inode */
1916 	struct xfs_inode	*tip,	/* tmp inode */
1917 	struct xfs_swapext	*sxp)
1918 {
1919 	struct xfs_mount	*mp = ip->i_mount;
1920 	struct xfs_trans	*tp;
1921 	struct xfs_bstat	*sbp = &sxp->sx_stat;
1922 	int			src_log_flags, target_log_flags;
1923 	int			error = 0;
1924 	int			lock_flags;
1925 	struct xfs_ifork	*cowfp;
1926 	__uint64_t		f;
1927 	int			resblks;
1928 
1929 	/*
1930 	 * Lock the inodes against other IO, page faults and truncate to
1931 	 * begin with.  Then we can ensure the inodes are flushed and have no
1932 	 * page cache safely. Once we have done this we can take the ilocks and
1933 	 * do the rest of the checks.
1934 	 */
1935 	lock_two_nondirectories(VFS_I(ip), VFS_I(tip));
1936 	lock_flags = XFS_MMAPLOCK_EXCL;
1937 	xfs_lock_two_inodes(ip, tip, XFS_MMAPLOCK_EXCL);
1938 
1939 	/* Verify that both files have the same format */
1940 	if ((VFS_I(ip)->i_mode & S_IFMT) != (VFS_I(tip)->i_mode & S_IFMT)) {
1941 		error = -EINVAL;
1942 		goto out_unlock;
1943 	}
1944 
1945 	/* Verify both files are either real-time or non-realtime */
1946 	if (XFS_IS_REALTIME_INODE(ip) != XFS_IS_REALTIME_INODE(tip)) {
1947 		error = -EINVAL;
1948 		goto out_unlock;
1949 	}
1950 
1951 	error = xfs_swap_extent_flush(ip);
1952 	if (error)
1953 		goto out_unlock;
1954 	error = xfs_swap_extent_flush(tip);
1955 	if (error)
1956 		goto out_unlock;
1957 
1958 	/*
1959 	 * Extent "swapping" with rmap requires a permanent reservation and
1960 	 * a block reservation because it's really just a remap operation
1961 	 * performed with log redo items!
1962 	 */
1963 	if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
1964 		/*
1965 		 * Conceptually this shouldn't affect the shape of either
1966 		 * bmbt, but since we atomically move extents one by one,
1967 		 * we reserve enough space to rebuild both trees.
1968 		 */
1969 		resblks = XFS_SWAP_RMAP_SPACE_RES(mp,
1970 				XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK),
1971 				XFS_DATA_FORK) +
1972 			  XFS_SWAP_RMAP_SPACE_RES(mp,
1973 				XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK),
1974 				XFS_DATA_FORK);
1975 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks,
1976 				0, 0, &tp);
1977 	} else
1978 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0,
1979 				0, 0, &tp);
1980 	if (error)
1981 		goto out_unlock;
1982 
1983 	/*
1984 	 * Lock and join the inodes to the tansaction so that transaction commit
1985 	 * or cancel will unlock the inodes from this point onwards.
1986 	 */
1987 	xfs_lock_two_inodes(ip, tip, XFS_ILOCK_EXCL);
1988 	lock_flags |= XFS_ILOCK_EXCL;
1989 	xfs_trans_ijoin(tp, ip, 0);
1990 	xfs_trans_ijoin(tp, tip, 0);
1991 
1992 
1993 	/* Verify all data are being swapped */
1994 	if (sxp->sx_offset != 0 ||
1995 	    sxp->sx_length != ip->i_d.di_size ||
1996 	    sxp->sx_length != tip->i_d.di_size) {
1997 		error = -EFAULT;
1998 		goto out_trans_cancel;
1999 	}
2000 
2001 	trace_xfs_swap_extent_before(ip, 0);
2002 	trace_xfs_swap_extent_before(tip, 1);
2003 
2004 	/* check inode formats now that data is flushed */
2005 	error = xfs_swap_extents_check_format(ip, tip);
2006 	if (error) {
2007 		xfs_notice(mp,
2008 		    "%s: inode 0x%llx format is incompatible for exchanging.",
2009 				__func__, ip->i_ino);
2010 		goto out_trans_cancel;
2011 	}
2012 
2013 	/*
2014 	 * Compare the current change & modify times with that
2015 	 * passed in.  If they differ, we abort this swap.
2016 	 * This is the mechanism used to ensure the calling
2017 	 * process that the file was not changed out from
2018 	 * under it.
2019 	 */
2020 	if ((sbp->bs_ctime.tv_sec != VFS_I(ip)->i_ctime.tv_sec) ||
2021 	    (sbp->bs_ctime.tv_nsec != VFS_I(ip)->i_ctime.tv_nsec) ||
2022 	    (sbp->bs_mtime.tv_sec != VFS_I(ip)->i_mtime.tv_sec) ||
2023 	    (sbp->bs_mtime.tv_nsec != VFS_I(ip)->i_mtime.tv_nsec)) {
2024 		error = -EBUSY;
2025 		goto out_trans_cancel;
2026 	}
2027 
2028 	/*
2029 	 * Note the trickiness in setting the log flags - we set the owner log
2030 	 * flag on the opposite inode (i.e. the inode we are setting the new
2031 	 * owner to be) because once we swap the forks and log that, log
2032 	 * recovery is going to see the fork as owned by the swapped inode,
2033 	 * not the pre-swapped inodes.
2034 	 */
2035 	src_log_flags = XFS_ILOG_CORE;
2036 	target_log_flags = XFS_ILOG_CORE;
2037 
2038 	if (xfs_sb_version_hasrmapbt(&mp->m_sb))
2039 		error = xfs_swap_extent_rmap(&tp, ip, tip);
2040 	else
2041 		error = xfs_swap_extent_forks(tp, ip, tip, &src_log_flags,
2042 				&target_log_flags);
2043 	if (error)
2044 		goto out_trans_cancel;
2045 
2046 	/* Do we have to swap reflink flags? */
2047 	if ((ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK) ^
2048 	    (tip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK)) {
2049 		f = ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK;
2050 		ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
2051 		ip->i_d.di_flags2 |= tip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK;
2052 		tip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
2053 		tip->i_d.di_flags2 |= f & XFS_DIFLAG2_REFLINK;
2054 		cowfp = ip->i_cowfp;
2055 		ip->i_cowfp = tip->i_cowfp;
2056 		tip->i_cowfp = cowfp;
2057 		xfs_inode_set_cowblocks_tag(ip);
2058 		xfs_inode_set_cowblocks_tag(tip);
2059 	}
2060 
2061 	xfs_trans_log_inode(tp, ip,  src_log_flags);
2062 	xfs_trans_log_inode(tp, tip, target_log_flags);
2063 
2064 	/*
2065 	 * If this is a synchronous mount, make sure that the
2066 	 * transaction goes to disk before returning to the user.
2067 	 */
2068 	if (mp->m_flags & XFS_MOUNT_WSYNC)
2069 		xfs_trans_set_sync(tp);
2070 
2071 	error = xfs_trans_commit(tp);
2072 
2073 	trace_xfs_swap_extent_after(ip, 0);
2074 	trace_xfs_swap_extent_after(tip, 1);
2075 
2076 out_unlock:
2077 	xfs_iunlock(ip, lock_flags);
2078 	xfs_iunlock(tip, lock_flags);
2079 	unlock_two_nondirectories(VFS_I(ip), VFS_I(tip));
2080 	return error;
2081 
2082 out_trans_cancel:
2083 	xfs_trans_cancel(tp);
2084 	goto out_unlock;
2085 }
2086