xref: /openbmc/linux/fs/xfs/xfs_bmap_util.c (revision d3964221)
1 /*
2  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3  * Copyright (c) 2012 Red Hat, Inc.
4  * All Rights Reserved.
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it would be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write the Free Software Foundation,
17  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
18  */
19 #include "xfs.h"
20 #include "xfs_fs.h"
21 #include "xfs_shared.h"
22 #include "xfs_format.h"
23 #include "xfs_log_format.h"
24 #include "xfs_trans_resv.h"
25 #include "xfs_bit.h"
26 #include "xfs_mount.h"
27 #include "xfs_da_format.h"
28 #include "xfs_defer.h"
29 #include "xfs_inode.h"
30 #include "xfs_btree.h"
31 #include "xfs_trans.h"
32 #include "xfs_extfree_item.h"
33 #include "xfs_alloc.h"
34 #include "xfs_bmap.h"
35 #include "xfs_bmap_util.h"
36 #include "xfs_bmap_btree.h"
37 #include "xfs_rtalloc.h"
38 #include "xfs_error.h"
39 #include "xfs_quota.h"
40 #include "xfs_trans_space.h"
41 #include "xfs_trace.h"
42 #include "xfs_icache.h"
43 #include "xfs_log.h"
44 #include "xfs_rmap_btree.h"
45 #include "xfs_iomap.h"
46 #include "xfs_reflink.h"
47 #include "xfs_refcount.h"
48 
49 /* Kernel only BMAP related definitions and functions */
50 
51 /*
52  * Convert the given file system block to a disk block.  We have to treat it
53  * differently based on whether the file is a real time file or not, because the
54  * bmap code does.
55  */
56 xfs_daddr_t
57 xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb)
58 {
59 	return (XFS_IS_REALTIME_INODE(ip) ? \
60 		 (xfs_daddr_t)XFS_FSB_TO_BB((ip)->i_mount, (fsb)) : \
61 		 XFS_FSB_TO_DADDR((ip)->i_mount, (fsb)));
62 }
63 
64 /*
65  * Routine to zero an extent on disk allocated to the specific inode.
66  *
67  * The VFS functions take a linearised filesystem block offset, so we have to
68  * convert the sparse xfs fsb to the right format first.
69  * VFS types are real funky, too.
70  */
71 int
72 xfs_zero_extent(
73 	struct xfs_inode *ip,
74 	xfs_fsblock_t	start_fsb,
75 	xfs_off_t	count_fsb)
76 {
77 	struct xfs_mount *mp = ip->i_mount;
78 	xfs_daddr_t	sector = xfs_fsb_to_db(ip, start_fsb);
79 	sector_t	block = XFS_BB_TO_FSBT(mp, sector);
80 
81 	return blkdev_issue_zeroout(xfs_find_bdev_for_inode(VFS_I(ip)),
82 		block << (mp->m_super->s_blocksize_bits - 9),
83 		count_fsb << (mp->m_super->s_blocksize_bits - 9),
84 		GFP_NOFS, 0);
85 }
86 
87 #ifdef CONFIG_XFS_RT
88 int
89 xfs_bmap_rtalloc(
90 	struct xfs_bmalloca	*ap)	/* bmap alloc argument struct */
91 {
92 	int		error;		/* error return value */
93 	xfs_mount_t	*mp;		/* mount point structure */
94 	xfs_extlen_t	prod = 0;	/* product factor for allocators */
95 	xfs_extlen_t	ralen = 0;	/* realtime allocation length */
96 	xfs_extlen_t	align;		/* minimum allocation alignment */
97 	xfs_rtblock_t	rtb;
98 
99 	mp = ap->ip->i_mount;
100 	align = xfs_get_extsz_hint(ap->ip);
101 	prod = align / mp->m_sb.sb_rextsize;
102 	error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
103 					align, 1, ap->eof, 0,
104 					ap->conv, &ap->offset, &ap->length);
105 	if (error)
106 		return error;
107 	ASSERT(ap->length);
108 	ASSERT(ap->length % mp->m_sb.sb_rextsize == 0);
109 
110 	/*
111 	 * If the offset & length are not perfectly aligned
112 	 * then kill prod, it will just get us in trouble.
113 	 */
114 	if (do_mod(ap->offset, align) || ap->length % align)
115 		prod = 1;
116 	/*
117 	 * Set ralen to be the actual requested length in rtextents.
118 	 */
119 	ralen = ap->length / mp->m_sb.sb_rextsize;
120 	/*
121 	 * If the old value was close enough to MAXEXTLEN that
122 	 * we rounded up to it, cut it back so it's valid again.
123 	 * Note that if it's a really large request (bigger than
124 	 * MAXEXTLEN), we don't hear about that number, and can't
125 	 * adjust the starting point to match it.
126 	 */
127 	if (ralen * mp->m_sb.sb_rextsize >= MAXEXTLEN)
128 		ralen = MAXEXTLEN / mp->m_sb.sb_rextsize;
129 
130 	/*
131 	 * Lock out modifications to both the RT bitmap and summary inodes
132 	 */
133 	xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP);
134 	xfs_trans_ijoin(ap->tp, mp->m_rbmip, XFS_ILOCK_EXCL);
135 	xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM);
136 	xfs_trans_ijoin(ap->tp, mp->m_rsumip, XFS_ILOCK_EXCL);
137 
138 	/*
139 	 * If it's an allocation to an empty file at offset 0,
140 	 * pick an extent that will space things out in the rt area.
141 	 */
142 	if (ap->eof && ap->offset == 0) {
143 		xfs_rtblock_t uninitialized_var(rtx); /* realtime extent no */
144 
145 		error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx);
146 		if (error)
147 			return error;
148 		ap->blkno = rtx * mp->m_sb.sb_rextsize;
149 	} else {
150 		ap->blkno = 0;
151 	}
152 
153 	xfs_bmap_adjacent(ap);
154 
155 	/*
156 	 * Realtime allocation, done through xfs_rtallocate_extent.
157 	 */
158 	do_div(ap->blkno, mp->m_sb.sb_rextsize);
159 	rtb = ap->blkno;
160 	ap->length = ralen;
161 	error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1, ap->length,
162 				&ralen, ap->wasdel, prod, &rtb);
163 	if (error)
164 		return error;
165 
166 	ap->blkno = rtb;
167 	if (ap->blkno != NULLFSBLOCK) {
168 		ap->blkno *= mp->m_sb.sb_rextsize;
169 		ralen *= mp->m_sb.sb_rextsize;
170 		ap->length = ralen;
171 		ap->ip->i_d.di_nblocks += ralen;
172 		xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
173 		if (ap->wasdel)
174 			ap->ip->i_delayed_blks -= ralen;
175 		/*
176 		 * Adjust the disk quota also. This was reserved
177 		 * earlier.
178 		 */
179 		xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
180 			ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT :
181 					XFS_TRANS_DQ_RTBCOUNT, (long) ralen);
182 
183 		/* Zero the extent if we were asked to do so */
184 		if (ap->datatype & XFS_ALLOC_USERDATA_ZERO) {
185 			error = xfs_zero_extent(ap->ip, ap->blkno, ap->length);
186 			if (error)
187 				return error;
188 		}
189 	} else {
190 		ap->length = 0;
191 	}
192 	return 0;
193 }
194 #endif /* CONFIG_XFS_RT */
195 
196 /*
197  * Check if the endoff is outside the last extent. If so the caller will grow
198  * the allocation to a stripe unit boundary.  All offsets are considered outside
199  * the end of file for an empty fork, so 1 is returned in *eof in that case.
200  */
201 int
202 xfs_bmap_eof(
203 	struct xfs_inode	*ip,
204 	xfs_fileoff_t		endoff,
205 	int			whichfork,
206 	int			*eof)
207 {
208 	struct xfs_bmbt_irec	rec;
209 	int			error;
210 
211 	error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, eof);
212 	if (error || *eof)
213 		return error;
214 
215 	*eof = endoff >= rec.br_startoff + rec.br_blockcount;
216 	return 0;
217 }
218 
219 /*
220  * Extent tree block counting routines.
221  */
222 
223 /*
224  * Count leaf blocks given a range of extent records.  Delayed allocation
225  * extents are not counted towards the totals.
226  */
227 xfs_extnum_t
228 xfs_bmap_count_leaves(
229 	struct xfs_ifork	*ifp,
230 	xfs_filblks_t		*count)
231 {
232 	struct xfs_bmbt_irec	got;
233 	xfs_extnum_t		numrecs = 0, i = 0;
234 
235 	while (xfs_iext_get_extent(ifp, i++, &got)) {
236 		if (!isnullstartblock(got.br_startblock)) {
237 			*count += got.br_blockcount;
238 			numrecs++;
239 		}
240 	}
241 	return numrecs;
242 }
243 
244 /*
245  * Count leaf blocks given a range of extent records originally
246  * in btree format.
247  */
248 STATIC void
249 xfs_bmap_disk_count_leaves(
250 	struct xfs_mount	*mp,
251 	struct xfs_btree_block	*block,
252 	int			numrecs,
253 	xfs_filblks_t		*count)
254 {
255 	int		b;
256 	xfs_bmbt_rec_t	*frp;
257 
258 	for (b = 1; b <= numrecs; b++) {
259 		frp = XFS_BMBT_REC_ADDR(mp, block, b);
260 		*count += xfs_bmbt_disk_get_blockcount(frp);
261 	}
262 }
263 
264 /*
265  * Recursively walks each level of a btree
266  * to count total fsblocks in use.
267  */
268 STATIC int
269 xfs_bmap_count_tree(
270 	struct xfs_mount	*mp,
271 	struct xfs_trans	*tp,
272 	struct xfs_ifork	*ifp,
273 	xfs_fsblock_t		blockno,
274 	int			levelin,
275 	xfs_extnum_t		*nextents,
276 	xfs_filblks_t		*count)
277 {
278 	int			error;
279 	struct xfs_buf		*bp, *nbp;
280 	int			level = levelin;
281 	__be64			*pp;
282 	xfs_fsblock_t           bno = blockno;
283 	xfs_fsblock_t		nextbno;
284 	struct xfs_btree_block	*block, *nextblock;
285 	int			numrecs;
286 
287 	error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, XFS_BMAP_BTREE_REF,
288 						&xfs_bmbt_buf_ops);
289 	if (error)
290 		return error;
291 	*count += 1;
292 	block = XFS_BUF_TO_BLOCK(bp);
293 
294 	if (--level) {
295 		/* Not at node above leaves, count this level of nodes */
296 		nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
297 		while (nextbno != NULLFSBLOCK) {
298 			error = xfs_btree_read_bufl(mp, tp, nextbno, 0, &nbp,
299 						XFS_BMAP_BTREE_REF,
300 						&xfs_bmbt_buf_ops);
301 			if (error)
302 				return error;
303 			*count += 1;
304 			nextblock = XFS_BUF_TO_BLOCK(nbp);
305 			nextbno = be64_to_cpu(nextblock->bb_u.l.bb_rightsib);
306 			xfs_trans_brelse(tp, nbp);
307 		}
308 
309 		/* Dive to the next level */
310 		pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
311 		bno = be64_to_cpu(*pp);
312 		error = xfs_bmap_count_tree(mp, tp, ifp, bno, level, nextents,
313 				count);
314 		if (error) {
315 			xfs_trans_brelse(tp, bp);
316 			XFS_ERROR_REPORT("xfs_bmap_count_tree(1)",
317 					 XFS_ERRLEVEL_LOW, mp);
318 			return -EFSCORRUPTED;
319 		}
320 		xfs_trans_brelse(tp, bp);
321 	} else {
322 		/* count all level 1 nodes and their leaves */
323 		for (;;) {
324 			nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
325 			numrecs = be16_to_cpu(block->bb_numrecs);
326 			(*nextents) += numrecs;
327 			xfs_bmap_disk_count_leaves(mp, block, numrecs, count);
328 			xfs_trans_brelse(tp, bp);
329 			if (nextbno == NULLFSBLOCK)
330 				break;
331 			bno = nextbno;
332 			error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
333 						XFS_BMAP_BTREE_REF,
334 						&xfs_bmbt_buf_ops);
335 			if (error)
336 				return error;
337 			*count += 1;
338 			block = XFS_BUF_TO_BLOCK(bp);
339 		}
340 	}
341 	return 0;
342 }
343 
344 /*
345  * Count fsblocks of the given fork.  Delayed allocation extents are
346  * not counted towards the totals.
347  */
348 int
349 xfs_bmap_count_blocks(
350 	struct xfs_trans	*tp,
351 	struct xfs_inode	*ip,
352 	int			whichfork,
353 	xfs_extnum_t		*nextents,
354 	xfs_filblks_t		*count)
355 {
356 	struct xfs_mount	*mp;	/* file system mount structure */
357 	__be64			*pp;	/* pointer to block address */
358 	struct xfs_btree_block	*block;	/* current btree block */
359 	struct xfs_ifork	*ifp;	/* fork structure */
360 	xfs_fsblock_t		bno;	/* block # of "block" */
361 	int			level;	/* btree level, for checking */
362 	int			error;
363 
364 	bno = NULLFSBLOCK;
365 	mp = ip->i_mount;
366 	*nextents = 0;
367 	*count = 0;
368 	ifp = XFS_IFORK_PTR(ip, whichfork);
369 	if (!ifp)
370 		return 0;
371 
372 	switch (XFS_IFORK_FORMAT(ip, whichfork)) {
373 	case XFS_DINODE_FMT_EXTENTS:
374 		*nextents = xfs_bmap_count_leaves(ifp, count);
375 		return 0;
376 	case XFS_DINODE_FMT_BTREE:
377 		if (!(ifp->if_flags & XFS_IFEXTENTS)) {
378 			error = xfs_iread_extents(tp, ip, whichfork);
379 			if (error)
380 				return error;
381 		}
382 
383 		/*
384 		 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
385 		 */
386 		block = ifp->if_broot;
387 		level = be16_to_cpu(block->bb_level);
388 		ASSERT(level > 0);
389 		pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
390 		bno = be64_to_cpu(*pp);
391 		ASSERT(bno != NULLFSBLOCK);
392 		ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
393 		ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
394 
395 		error = xfs_bmap_count_tree(mp, tp, ifp, bno, level,
396 				nextents, count);
397 		if (error) {
398 			XFS_ERROR_REPORT("xfs_bmap_count_blocks(2)",
399 					XFS_ERRLEVEL_LOW, mp);
400 			return -EFSCORRUPTED;
401 		}
402 		return 0;
403 	}
404 
405 	return 0;
406 }
407 
408 /*
409  * returns 1 for success, 0 if we failed to map the extent.
410  */
411 STATIC int
412 xfs_getbmapx_fix_eof_hole(
413 	xfs_inode_t		*ip,		/* xfs incore inode pointer */
414 	int			whichfork,
415 	struct getbmapx		*out,		/* output structure */
416 	int			prealloced,	/* this is a file with
417 						 * preallocated data space */
418 	int64_t			end,		/* last block requested */
419 	xfs_fsblock_t		startblock,
420 	bool			moretocome)
421 {
422 	int64_t			fixlen;
423 	xfs_mount_t		*mp;		/* file system mount point */
424 	xfs_ifork_t		*ifp;		/* inode fork pointer */
425 	xfs_extnum_t		lastx;		/* last extent pointer */
426 	xfs_fileoff_t		fileblock;
427 
428 	if (startblock == HOLESTARTBLOCK) {
429 		mp = ip->i_mount;
430 		out->bmv_block = -1;
431 		fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, XFS_ISIZE(ip)));
432 		fixlen -= out->bmv_offset;
433 		if (prealloced && out->bmv_offset + out->bmv_length == end) {
434 			/* Came to hole at EOF. Trim it. */
435 			if (fixlen <= 0)
436 				return 0;
437 			out->bmv_length = fixlen;
438 		}
439 	} else {
440 		if (startblock == DELAYSTARTBLOCK)
441 			out->bmv_block = -2;
442 		else
443 			out->bmv_block = xfs_fsb_to_db(ip, startblock);
444 		fileblock = XFS_BB_TO_FSB(ip->i_mount, out->bmv_offset);
445 		ifp = XFS_IFORK_PTR(ip, whichfork);
446 		if (!moretocome &&
447 		    xfs_iext_bno_to_ext(ifp, fileblock, &lastx) &&
448 		   (lastx == xfs_iext_count(ifp) - 1))
449 			out->bmv_oflags |= BMV_OF_LAST;
450 	}
451 
452 	return 1;
453 }
454 
455 /* Adjust the reported bmap around shared/unshared extent transitions. */
456 STATIC int
457 xfs_getbmap_adjust_shared(
458 	struct xfs_inode		*ip,
459 	int				whichfork,
460 	struct xfs_bmbt_irec		*map,
461 	struct getbmapx			*out,
462 	struct xfs_bmbt_irec		*next_map)
463 {
464 	struct xfs_mount		*mp = ip->i_mount;
465 	xfs_agnumber_t			agno;
466 	xfs_agblock_t			agbno;
467 	xfs_agblock_t			ebno;
468 	xfs_extlen_t			elen;
469 	xfs_extlen_t			nlen;
470 	int				error;
471 
472 	next_map->br_startblock = NULLFSBLOCK;
473 	next_map->br_startoff = NULLFILEOFF;
474 	next_map->br_blockcount = 0;
475 
476 	/* Only written data blocks can be shared. */
477 	if (!xfs_is_reflink_inode(ip) ||
478 	    whichfork != XFS_DATA_FORK ||
479 	    !xfs_bmap_is_real_extent(map))
480 		return 0;
481 
482 	agno = XFS_FSB_TO_AGNO(mp, map->br_startblock);
483 	agbno = XFS_FSB_TO_AGBNO(mp, map->br_startblock);
484 	error = xfs_reflink_find_shared(mp, NULL, agno, agbno,
485 			map->br_blockcount, &ebno, &elen, true);
486 	if (error)
487 		return error;
488 
489 	if (ebno == NULLAGBLOCK) {
490 		/* No shared blocks at all. */
491 		return 0;
492 	} else if (agbno == ebno) {
493 		/*
494 		 * Shared extent at (agbno, elen).  Shrink the reported
495 		 * extent length and prepare to move the start of map[i]
496 		 * to agbno+elen, with the aim of (re)formatting the new
497 		 * map[i] the next time through the inner loop.
498 		 */
499 		out->bmv_length = XFS_FSB_TO_BB(mp, elen);
500 		out->bmv_oflags |= BMV_OF_SHARED;
501 		if (elen != map->br_blockcount) {
502 			*next_map = *map;
503 			next_map->br_startblock += elen;
504 			next_map->br_startoff += elen;
505 			next_map->br_blockcount -= elen;
506 		}
507 		map->br_blockcount -= elen;
508 	} else {
509 		/*
510 		 * There's an unshared extent (agbno, ebno - agbno)
511 		 * followed by shared extent at (ebno, elen).  Shrink
512 		 * the reported extent length to cover only the unshared
513 		 * extent and prepare to move up the start of map[i] to
514 		 * ebno, with the aim of (re)formatting the new map[i]
515 		 * the next time through the inner loop.
516 		 */
517 		*next_map = *map;
518 		nlen = ebno - agbno;
519 		out->bmv_length = XFS_FSB_TO_BB(mp, nlen);
520 		next_map->br_startblock += nlen;
521 		next_map->br_startoff += nlen;
522 		next_map->br_blockcount -= nlen;
523 		map->br_blockcount -= nlen;
524 	}
525 
526 	return 0;
527 }
528 
529 /*
530  * Get inode's extents as described in bmv, and format for output.
531  * Calls formatter to fill the user's buffer until all extents
532  * are mapped, until the passed-in bmv->bmv_count slots have
533  * been filled, or until the formatter short-circuits the loop,
534  * if it is tracking filled-in extents on its own.
535  */
536 int						/* error code */
537 xfs_getbmap(
538 	xfs_inode_t		*ip,
539 	struct getbmapx		*bmv,		/* user bmap structure */
540 	xfs_bmap_format_t	formatter,	/* format to user */
541 	void			*arg)		/* formatter arg */
542 {
543 	int64_t			bmvend;		/* last block requested */
544 	int			error = 0;	/* return value */
545 	int64_t			fixlen;		/* length for -1 case */
546 	int			i;		/* extent number */
547 	int			lock;		/* lock state */
548 	xfs_bmbt_irec_t		*map;		/* buffer for user's data */
549 	xfs_mount_t		*mp;		/* file system mount point */
550 	int			nex;		/* # of user extents can do */
551 	int			subnex;		/* # of bmapi's can do */
552 	int			nmap;		/* number of map entries */
553 	struct getbmapx		*out;		/* output structure */
554 	int			whichfork;	/* data or attr fork */
555 	int			prealloced;	/* this is a file with
556 						 * preallocated data space */
557 	int			iflags;		/* interface flags */
558 	int			bmapi_flags;	/* flags for xfs_bmapi */
559 	int			cur_ext = 0;
560 	struct xfs_bmbt_irec	inject_map;
561 
562 	mp = ip->i_mount;
563 	iflags = bmv->bmv_iflags;
564 
565 #ifndef DEBUG
566 	/* Only allow CoW fork queries if we're debugging. */
567 	if (iflags & BMV_IF_COWFORK)
568 		return -EINVAL;
569 #endif
570 	if ((iflags & BMV_IF_ATTRFORK) && (iflags & BMV_IF_COWFORK))
571 		return -EINVAL;
572 
573 	if (iflags & BMV_IF_ATTRFORK)
574 		whichfork = XFS_ATTR_FORK;
575 	else if (iflags & BMV_IF_COWFORK)
576 		whichfork = XFS_COW_FORK;
577 	else
578 		whichfork = XFS_DATA_FORK;
579 
580 	switch (whichfork) {
581 	case XFS_ATTR_FORK:
582 		if (XFS_IFORK_Q(ip)) {
583 			if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS &&
584 			    ip->i_d.di_aformat != XFS_DINODE_FMT_BTREE &&
585 			    ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)
586 				return -EINVAL;
587 		} else if (unlikely(
588 			   ip->i_d.di_aformat != 0 &&
589 			   ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS)) {
590 			XFS_ERROR_REPORT("xfs_getbmap", XFS_ERRLEVEL_LOW,
591 					 ip->i_mount);
592 			return -EFSCORRUPTED;
593 		}
594 
595 		prealloced = 0;
596 		fixlen = 1LL << 32;
597 		break;
598 	case XFS_COW_FORK:
599 		if (ip->i_cformat != XFS_DINODE_FMT_EXTENTS)
600 			return -EINVAL;
601 
602 		if (xfs_get_cowextsz_hint(ip)) {
603 			prealloced = 1;
604 			fixlen = mp->m_super->s_maxbytes;
605 		} else {
606 			prealloced = 0;
607 			fixlen = XFS_ISIZE(ip);
608 		}
609 		break;
610 	default:
611 		/* Local format data forks report no extents. */
612 		if (ip->i_d.di_format == XFS_DINODE_FMT_LOCAL) {
613 			bmv->bmv_entries = 0;
614 			return 0;
615 		}
616 		if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS &&
617 		    ip->i_d.di_format != XFS_DINODE_FMT_BTREE)
618 			return -EINVAL;
619 
620 		if (xfs_get_extsz_hint(ip) ||
621 		    ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC|XFS_DIFLAG_APPEND)){
622 			prealloced = 1;
623 			fixlen = mp->m_super->s_maxbytes;
624 		} else {
625 			prealloced = 0;
626 			fixlen = XFS_ISIZE(ip);
627 		}
628 		break;
629 	}
630 
631 	if (bmv->bmv_length == -1) {
632 		fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, fixlen));
633 		bmv->bmv_length =
634 			max_t(int64_t, fixlen - bmv->bmv_offset, 0);
635 	} else if (bmv->bmv_length == 0) {
636 		bmv->bmv_entries = 0;
637 		return 0;
638 	} else if (bmv->bmv_length < 0) {
639 		return -EINVAL;
640 	}
641 
642 	nex = bmv->bmv_count - 1;
643 	if (nex <= 0)
644 		return -EINVAL;
645 	bmvend = bmv->bmv_offset + bmv->bmv_length;
646 
647 
648 	if (bmv->bmv_count > ULONG_MAX / sizeof(struct getbmapx))
649 		return -ENOMEM;
650 	out = kmem_zalloc_large(bmv->bmv_count * sizeof(struct getbmapx), 0);
651 	if (!out)
652 		return -ENOMEM;
653 
654 	xfs_ilock(ip, XFS_IOLOCK_SHARED);
655 	switch (whichfork) {
656 	case XFS_DATA_FORK:
657 		if (!(iflags & BMV_IF_DELALLOC) &&
658 		    (ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_d.di_size)) {
659 			error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
660 			if (error)
661 				goto out_unlock_iolock;
662 
663 			/*
664 			 * Even after flushing the inode, there can still be
665 			 * delalloc blocks on the inode beyond EOF due to
666 			 * speculative preallocation.  These are not removed
667 			 * until the release function is called or the inode
668 			 * is inactivated.  Hence we cannot assert here that
669 			 * ip->i_delayed_blks == 0.
670 			 */
671 		}
672 
673 		lock = xfs_ilock_data_map_shared(ip);
674 		break;
675 	case XFS_COW_FORK:
676 		lock = XFS_ILOCK_SHARED;
677 		xfs_ilock(ip, lock);
678 		break;
679 	case XFS_ATTR_FORK:
680 		lock = xfs_ilock_attr_map_shared(ip);
681 		break;
682 	}
683 
684 	/*
685 	 * Don't let nex be bigger than the number of extents
686 	 * we can have assuming alternating holes and real extents.
687 	 */
688 	if (nex > XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1)
689 		nex = XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1;
690 
691 	bmapi_flags = xfs_bmapi_aflag(whichfork);
692 	if (!(iflags & BMV_IF_PREALLOC))
693 		bmapi_flags |= XFS_BMAPI_IGSTATE;
694 
695 	/*
696 	 * Allocate enough space to handle "subnex" maps at a time.
697 	 */
698 	error = -ENOMEM;
699 	subnex = 16;
700 	map = kmem_alloc(subnex * sizeof(*map), KM_MAYFAIL | KM_NOFS);
701 	if (!map)
702 		goto out_unlock_ilock;
703 
704 	bmv->bmv_entries = 0;
705 
706 	if (XFS_IFORK_NEXTENTS(ip, whichfork) == 0 &&
707 	    (whichfork == XFS_ATTR_FORK || !(iflags & BMV_IF_DELALLOC))) {
708 		error = 0;
709 		goto out_free_map;
710 	}
711 
712 	do {
713 		nmap = (nex> subnex) ? subnex : nex;
714 		error = xfs_bmapi_read(ip, XFS_BB_TO_FSBT(mp, bmv->bmv_offset),
715 				       XFS_BB_TO_FSB(mp, bmv->bmv_length),
716 				       map, &nmap, bmapi_flags);
717 		if (error)
718 			goto out_free_map;
719 		ASSERT(nmap <= subnex);
720 
721 		for (i = 0; i < nmap && bmv->bmv_length &&
722 				cur_ext < bmv->bmv_count - 1; i++) {
723 			out[cur_ext].bmv_oflags = 0;
724 			if (map[i].br_state == XFS_EXT_UNWRITTEN)
725 				out[cur_ext].bmv_oflags |= BMV_OF_PREALLOC;
726 			else if (map[i].br_startblock == DELAYSTARTBLOCK)
727 				out[cur_ext].bmv_oflags |= BMV_OF_DELALLOC;
728 			out[cur_ext].bmv_offset =
729 				XFS_FSB_TO_BB(mp, map[i].br_startoff);
730 			out[cur_ext].bmv_length =
731 				XFS_FSB_TO_BB(mp, map[i].br_blockcount);
732 			out[cur_ext].bmv_unused1 = 0;
733 			out[cur_ext].bmv_unused2 = 0;
734 
735 			/*
736 			 * delayed allocation extents that start beyond EOF can
737 			 * occur due to speculative EOF allocation when the
738 			 * delalloc extent is larger than the largest freespace
739 			 * extent at conversion time. These extents cannot be
740 			 * converted by data writeback, so can exist here even
741 			 * if we are not supposed to be finding delalloc
742 			 * extents.
743 			 */
744 			if (map[i].br_startblock == DELAYSTARTBLOCK &&
745 			    map[i].br_startoff < XFS_B_TO_FSB(mp, XFS_ISIZE(ip)))
746 				ASSERT((iflags & BMV_IF_DELALLOC) != 0);
747 
748                         if (map[i].br_startblock == HOLESTARTBLOCK &&
749 			    whichfork == XFS_ATTR_FORK) {
750 				/* came to the end of attribute fork */
751 				out[cur_ext].bmv_oflags |= BMV_OF_LAST;
752 				goto out_free_map;
753 			}
754 
755 			/* Is this a shared block? */
756 			error = xfs_getbmap_adjust_shared(ip, whichfork,
757 					&map[i], &out[cur_ext], &inject_map);
758 			if (error)
759 				goto out_free_map;
760 
761 			if (!xfs_getbmapx_fix_eof_hole(ip, whichfork,
762 					&out[cur_ext], prealloced, bmvend,
763 					map[i].br_startblock,
764 					inject_map.br_startblock != NULLFSBLOCK))
765 				goto out_free_map;
766 
767 			bmv->bmv_offset =
768 				out[cur_ext].bmv_offset +
769 				out[cur_ext].bmv_length;
770 			bmv->bmv_length =
771 				max_t(int64_t, 0, bmvend - bmv->bmv_offset);
772 
773 			/*
774 			 * In case we don't want to return the hole,
775 			 * don't increase cur_ext so that we can reuse
776 			 * it in the next loop.
777 			 */
778 			if ((iflags & BMV_IF_NO_HOLES) &&
779 			    map[i].br_startblock == HOLESTARTBLOCK) {
780 				memset(&out[cur_ext], 0, sizeof(out[cur_ext]));
781 				continue;
782 			}
783 
784 			/*
785 			 * In order to report shared extents accurately,
786 			 * we report each distinct shared/unshared part
787 			 * of a single bmbt record using multiple bmap
788 			 * extents.  To make that happen, we iterate the
789 			 * same map array item multiple times, each
790 			 * time trimming out the subextent that we just
791 			 * reported.
792 			 *
793 			 * Because of this, we must check the out array
794 			 * index (cur_ext) directly against bmv_count-1
795 			 * to avoid overflows.
796 			 */
797 			if (inject_map.br_startblock != NULLFSBLOCK) {
798 				map[i] = inject_map;
799 				i--;
800 			}
801 			bmv->bmv_entries++;
802 			cur_ext++;
803 		}
804 	} while (nmap && bmv->bmv_length && cur_ext < bmv->bmv_count - 1);
805 
806  out_free_map:
807 	kmem_free(map);
808  out_unlock_ilock:
809 	xfs_iunlock(ip, lock);
810  out_unlock_iolock:
811 	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
812 
813 	for (i = 0; i < cur_ext; i++) {
814 		/* format results & advance arg */
815 		error = formatter(&arg, &out[i]);
816 		if (error)
817 			break;
818 	}
819 
820 	kmem_free(out);
821 	return error;
822 }
823 
824 /*
825  * dead simple method of punching delalyed allocation blocks from a range in
826  * the inode. Walks a block at a time so will be slow, but is only executed in
827  * rare error cases so the overhead is not critical. This will always punch out
828  * both the start and end blocks, even if the ranges only partially overlap
829  * them, so it is up to the caller to ensure that partial blocks are not
830  * passed in.
831  */
832 int
833 xfs_bmap_punch_delalloc_range(
834 	struct xfs_inode	*ip,
835 	xfs_fileoff_t		start_fsb,
836 	xfs_fileoff_t		length)
837 {
838 	xfs_fileoff_t		remaining = length;
839 	int			error = 0;
840 
841 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
842 
843 	do {
844 		int		done;
845 		xfs_bmbt_irec_t	imap;
846 		int		nimaps = 1;
847 		xfs_fsblock_t	firstblock;
848 		struct xfs_defer_ops dfops;
849 
850 		/*
851 		 * Map the range first and check that it is a delalloc extent
852 		 * before trying to unmap the range. Otherwise we will be
853 		 * trying to remove a real extent (which requires a
854 		 * transaction) or a hole, which is probably a bad idea...
855 		 */
856 		error = xfs_bmapi_read(ip, start_fsb, 1, &imap, &nimaps,
857 				       XFS_BMAPI_ENTIRE);
858 
859 		if (error) {
860 			/* something screwed, just bail */
861 			if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
862 				xfs_alert(ip->i_mount,
863 			"Failed delalloc mapping lookup ino %lld fsb %lld.",
864 						ip->i_ino, start_fsb);
865 			}
866 			break;
867 		}
868 		if (!nimaps) {
869 			/* nothing there */
870 			goto next_block;
871 		}
872 		if (imap.br_startblock != DELAYSTARTBLOCK) {
873 			/* been converted, ignore */
874 			goto next_block;
875 		}
876 		WARN_ON(imap.br_blockcount == 0);
877 
878 		/*
879 		 * Note: while we initialise the firstblock/dfops pair, they
880 		 * should never be used because blocks should never be
881 		 * allocated or freed for a delalloc extent and hence we need
882 		 * don't cancel or finish them after the xfs_bunmapi() call.
883 		 */
884 		xfs_defer_init(&dfops, &firstblock);
885 		error = xfs_bunmapi(NULL, ip, start_fsb, 1, 0, 1, &firstblock,
886 					&dfops, &done);
887 		if (error)
888 			break;
889 
890 		ASSERT(!xfs_defer_has_unfinished_work(&dfops));
891 next_block:
892 		start_fsb++;
893 		remaining--;
894 	} while(remaining > 0);
895 
896 	return error;
897 }
898 
899 /*
900  * Test whether it is appropriate to check an inode for and free post EOF
901  * blocks. The 'force' parameter determines whether we should also consider
902  * regular files that are marked preallocated or append-only.
903  */
904 bool
905 xfs_can_free_eofblocks(struct xfs_inode *ip, bool force)
906 {
907 	/* prealloc/delalloc exists only on regular files */
908 	if (!S_ISREG(VFS_I(ip)->i_mode))
909 		return false;
910 
911 	/*
912 	 * Zero sized files with no cached pages and delalloc blocks will not
913 	 * have speculative prealloc/delalloc blocks to remove.
914 	 */
915 	if (VFS_I(ip)->i_size == 0 &&
916 	    VFS_I(ip)->i_mapping->nrpages == 0 &&
917 	    ip->i_delayed_blks == 0)
918 		return false;
919 
920 	/* If we haven't read in the extent list, then don't do it now. */
921 	if (!(ip->i_df.if_flags & XFS_IFEXTENTS))
922 		return false;
923 
924 	/*
925 	 * Do not free real preallocated or append-only files unless the file
926 	 * has delalloc blocks and we are forced to remove them.
927 	 */
928 	if (ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND))
929 		if (!force || ip->i_delayed_blks == 0)
930 			return false;
931 
932 	return true;
933 }
934 
935 /*
936  * This is called to free any blocks beyond eof. The caller must hold
937  * IOLOCK_EXCL unless we are in the inode reclaim path and have the only
938  * reference to the inode.
939  */
940 int
941 xfs_free_eofblocks(
942 	struct xfs_inode	*ip)
943 {
944 	struct xfs_trans	*tp;
945 	int			error;
946 	xfs_fileoff_t		end_fsb;
947 	xfs_fileoff_t		last_fsb;
948 	xfs_filblks_t		map_len;
949 	int			nimaps;
950 	struct xfs_bmbt_irec	imap;
951 	struct xfs_mount	*mp = ip->i_mount;
952 
953 	/*
954 	 * Figure out if there are any blocks beyond the end
955 	 * of the file.  If not, then there is nothing to do.
956 	 */
957 	end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_ISIZE(ip));
958 	last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
959 	if (last_fsb <= end_fsb)
960 		return 0;
961 	map_len = last_fsb - end_fsb;
962 
963 	nimaps = 1;
964 	xfs_ilock(ip, XFS_ILOCK_SHARED);
965 	error = xfs_bmapi_read(ip, end_fsb, map_len, &imap, &nimaps, 0);
966 	xfs_iunlock(ip, XFS_ILOCK_SHARED);
967 
968 	/*
969 	 * If there are blocks after the end of file, truncate the file to its
970 	 * current size to free them up.
971 	 */
972 	if (!error && (nimaps != 0) &&
973 	    (imap.br_startblock != HOLESTARTBLOCK ||
974 	     ip->i_delayed_blks)) {
975 		/*
976 		 * Attach the dquots to the inode up front.
977 		 */
978 		error = xfs_qm_dqattach(ip, 0);
979 		if (error)
980 			return error;
981 
982 		/* wait on dio to ensure i_size has settled */
983 		inode_dio_wait(VFS_I(ip));
984 
985 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0,
986 				&tp);
987 		if (error) {
988 			ASSERT(XFS_FORCED_SHUTDOWN(mp));
989 			return error;
990 		}
991 
992 		xfs_ilock(ip, XFS_ILOCK_EXCL);
993 		xfs_trans_ijoin(tp, ip, 0);
994 
995 		/*
996 		 * Do not update the on-disk file size.  If we update the
997 		 * on-disk file size and then the system crashes before the
998 		 * contents of the file are flushed to disk then the files
999 		 * may be full of holes (ie NULL files bug).
1000 		 */
1001 		error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK,
1002 					      XFS_ISIZE(ip));
1003 		if (error) {
1004 			/*
1005 			 * If we get an error at this point we simply don't
1006 			 * bother truncating the file.
1007 			 */
1008 			xfs_trans_cancel(tp);
1009 		} else {
1010 			error = xfs_trans_commit(tp);
1011 			if (!error)
1012 				xfs_inode_clear_eofblocks_tag(ip);
1013 		}
1014 
1015 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
1016 	}
1017 	return error;
1018 }
1019 
1020 int
1021 xfs_alloc_file_space(
1022 	struct xfs_inode	*ip,
1023 	xfs_off_t		offset,
1024 	xfs_off_t		len,
1025 	int			alloc_type)
1026 {
1027 	xfs_mount_t		*mp = ip->i_mount;
1028 	xfs_off_t		count;
1029 	xfs_filblks_t		allocated_fsb;
1030 	xfs_filblks_t		allocatesize_fsb;
1031 	xfs_extlen_t		extsz, temp;
1032 	xfs_fileoff_t		startoffset_fsb;
1033 	xfs_fsblock_t		firstfsb;
1034 	int			nimaps;
1035 	int			quota_flag;
1036 	int			rt;
1037 	xfs_trans_t		*tp;
1038 	xfs_bmbt_irec_t		imaps[1], *imapp;
1039 	struct xfs_defer_ops	dfops;
1040 	uint			qblocks, resblks, resrtextents;
1041 	int			error;
1042 
1043 	trace_xfs_alloc_file_space(ip);
1044 
1045 	if (XFS_FORCED_SHUTDOWN(mp))
1046 		return -EIO;
1047 
1048 	error = xfs_qm_dqattach(ip, 0);
1049 	if (error)
1050 		return error;
1051 
1052 	if (len <= 0)
1053 		return -EINVAL;
1054 
1055 	rt = XFS_IS_REALTIME_INODE(ip);
1056 	extsz = xfs_get_extsz_hint(ip);
1057 
1058 	count = len;
1059 	imapp = &imaps[0];
1060 	nimaps = 1;
1061 	startoffset_fsb	= XFS_B_TO_FSBT(mp, offset);
1062 	allocatesize_fsb = XFS_B_TO_FSB(mp, count);
1063 
1064 	/*
1065 	 * Allocate file space until done or until there is an error
1066 	 */
1067 	while (allocatesize_fsb && !error) {
1068 		xfs_fileoff_t	s, e;
1069 
1070 		/*
1071 		 * Determine space reservations for data/realtime.
1072 		 */
1073 		if (unlikely(extsz)) {
1074 			s = startoffset_fsb;
1075 			do_div(s, extsz);
1076 			s *= extsz;
1077 			e = startoffset_fsb + allocatesize_fsb;
1078 			if ((temp = do_mod(startoffset_fsb, extsz)))
1079 				e += temp;
1080 			if ((temp = do_mod(e, extsz)))
1081 				e += extsz - temp;
1082 		} else {
1083 			s = 0;
1084 			e = allocatesize_fsb;
1085 		}
1086 
1087 		/*
1088 		 * The transaction reservation is limited to a 32-bit block
1089 		 * count, hence we need to limit the number of blocks we are
1090 		 * trying to reserve to avoid an overflow. We can't allocate
1091 		 * more than @nimaps extents, and an extent is limited on disk
1092 		 * to MAXEXTLEN (21 bits), so use that to enforce the limit.
1093 		 */
1094 		resblks = min_t(xfs_fileoff_t, (e - s), (MAXEXTLEN * nimaps));
1095 		if (unlikely(rt)) {
1096 			resrtextents = qblocks = resblks;
1097 			resrtextents /= mp->m_sb.sb_rextsize;
1098 			resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
1099 			quota_flag = XFS_QMOPT_RES_RTBLKS;
1100 		} else {
1101 			resrtextents = 0;
1102 			resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resblks);
1103 			quota_flag = XFS_QMOPT_RES_REGBLKS;
1104 		}
1105 
1106 		/*
1107 		 * Allocate and setup the transaction.
1108 		 */
1109 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks,
1110 				resrtextents, 0, &tp);
1111 
1112 		/*
1113 		 * Check for running out of space
1114 		 */
1115 		if (error) {
1116 			/*
1117 			 * Free the transaction structure.
1118 			 */
1119 			ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp));
1120 			break;
1121 		}
1122 		xfs_ilock(ip, XFS_ILOCK_EXCL);
1123 		error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks,
1124 						      0, quota_flag);
1125 		if (error)
1126 			goto error1;
1127 
1128 		xfs_trans_ijoin(tp, ip, 0);
1129 
1130 		xfs_defer_init(&dfops, &firstfsb);
1131 		error = xfs_bmapi_write(tp, ip, startoffset_fsb,
1132 					allocatesize_fsb, alloc_type, &firstfsb,
1133 					resblks, imapp, &nimaps, &dfops);
1134 		if (error)
1135 			goto error0;
1136 
1137 		/*
1138 		 * Complete the transaction
1139 		 */
1140 		error = xfs_defer_finish(&tp, &dfops);
1141 		if (error)
1142 			goto error0;
1143 
1144 		error = xfs_trans_commit(tp);
1145 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
1146 		if (error)
1147 			break;
1148 
1149 		allocated_fsb = imapp->br_blockcount;
1150 
1151 		if (nimaps == 0) {
1152 			error = -ENOSPC;
1153 			break;
1154 		}
1155 
1156 		startoffset_fsb += allocated_fsb;
1157 		allocatesize_fsb -= allocated_fsb;
1158 	}
1159 
1160 	return error;
1161 
1162 error0:	/* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */
1163 	xfs_defer_cancel(&dfops);
1164 	xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag);
1165 
1166 error1:	/* Just cancel transaction */
1167 	xfs_trans_cancel(tp);
1168 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1169 	return error;
1170 }
1171 
1172 static int
1173 xfs_unmap_extent(
1174 	struct xfs_inode	*ip,
1175 	xfs_fileoff_t		startoffset_fsb,
1176 	xfs_filblks_t		len_fsb,
1177 	int			*done)
1178 {
1179 	struct xfs_mount	*mp = ip->i_mount;
1180 	struct xfs_trans	*tp;
1181 	struct xfs_defer_ops	dfops;
1182 	xfs_fsblock_t		firstfsb;
1183 	uint			resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
1184 	int			error;
1185 
1186 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);
1187 	if (error) {
1188 		ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp));
1189 		return error;
1190 	}
1191 
1192 	xfs_ilock(ip, XFS_ILOCK_EXCL);
1193 	error = xfs_trans_reserve_quota(tp, mp, ip->i_udquot, ip->i_gdquot,
1194 			ip->i_pdquot, resblks, 0, XFS_QMOPT_RES_REGBLKS);
1195 	if (error)
1196 		goto out_trans_cancel;
1197 
1198 	xfs_trans_ijoin(tp, ip, 0);
1199 
1200 	xfs_defer_init(&dfops, &firstfsb);
1201 	error = xfs_bunmapi(tp, ip, startoffset_fsb, len_fsb, 0, 2, &firstfsb,
1202 			&dfops, done);
1203 	if (error)
1204 		goto out_bmap_cancel;
1205 
1206 	xfs_defer_ijoin(&dfops, ip);
1207 	error = xfs_defer_finish(&tp, &dfops);
1208 	if (error)
1209 		goto out_bmap_cancel;
1210 
1211 	error = xfs_trans_commit(tp);
1212 out_unlock:
1213 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1214 	return error;
1215 
1216 out_bmap_cancel:
1217 	xfs_defer_cancel(&dfops);
1218 out_trans_cancel:
1219 	xfs_trans_cancel(tp);
1220 	goto out_unlock;
1221 }
1222 
1223 static int
1224 xfs_adjust_extent_unmap_boundaries(
1225 	struct xfs_inode	*ip,
1226 	xfs_fileoff_t		*startoffset_fsb,
1227 	xfs_fileoff_t		*endoffset_fsb)
1228 {
1229 	struct xfs_mount	*mp = ip->i_mount;
1230 	struct xfs_bmbt_irec	imap;
1231 	int			nimap, error;
1232 	xfs_extlen_t		mod = 0;
1233 
1234 	nimap = 1;
1235 	error = xfs_bmapi_read(ip, *startoffset_fsb, 1, &imap, &nimap, 0);
1236 	if (error)
1237 		return error;
1238 
1239 	if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
1240 		ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
1241 		mod = do_mod(imap.br_startblock, mp->m_sb.sb_rextsize);
1242 		if (mod)
1243 			*startoffset_fsb += mp->m_sb.sb_rextsize - mod;
1244 	}
1245 
1246 	nimap = 1;
1247 	error = xfs_bmapi_read(ip, *endoffset_fsb - 1, 1, &imap, &nimap, 0);
1248 	if (error)
1249 		return error;
1250 
1251 	if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
1252 		ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
1253 		mod++;
1254 		if (mod && mod != mp->m_sb.sb_rextsize)
1255 			*endoffset_fsb -= mod;
1256 	}
1257 
1258 	return 0;
1259 }
1260 
1261 static int
1262 xfs_flush_unmap_range(
1263 	struct xfs_inode	*ip,
1264 	xfs_off_t		offset,
1265 	xfs_off_t		len)
1266 {
1267 	struct xfs_mount	*mp = ip->i_mount;
1268 	struct inode		*inode = VFS_I(ip);
1269 	xfs_off_t		rounding, start, end;
1270 	int			error;
1271 
1272 	/* wait for the completion of any pending DIOs */
1273 	inode_dio_wait(inode);
1274 
1275 	rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_SIZE);
1276 	start = round_down(offset, rounding);
1277 	end = round_up(offset + len, rounding) - 1;
1278 
1279 	error = filemap_write_and_wait_range(inode->i_mapping, start, end);
1280 	if (error)
1281 		return error;
1282 	truncate_pagecache_range(inode, start, end);
1283 	return 0;
1284 }
1285 
1286 int
1287 xfs_free_file_space(
1288 	struct xfs_inode	*ip,
1289 	xfs_off_t		offset,
1290 	xfs_off_t		len)
1291 {
1292 	struct xfs_mount	*mp = ip->i_mount;
1293 	xfs_fileoff_t		startoffset_fsb;
1294 	xfs_fileoff_t		endoffset_fsb;
1295 	int			done = 0, error;
1296 
1297 	trace_xfs_free_file_space(ip);
1298 
1299 	error = xfs_qm_dqattach(ip, 0);
1300 	if (error)
1301 		return error;
1302 
1303 	if (len <= 0)	/* if nothing being freed */
1304 		return 0;
1305 
1306 	error = xfs_flush_unmap_range(ip, offset, len);
1307 	if (error)
1308 		return error;
1309 
1310 	startoffset_fsb = XFS_B_TO_FSB(mp, offset);
1311 	endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len);
1312 
1313 	/*
1314 	 * Need to zero the stuff we're not freeing, on disk.  If it's a RT file
1315 	 * and we can't use unwritten extents then we actually need to ensure
1316 	 * to zero the whole extent, otherwise we just need to take of block
1317 	 * boundaries, and xfs_bunmapi will handle the rest.
1318 	 */
1319 	if (XFS_IS_REALTIME_INODE(ip) &&
1320 	    !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
1321 		error = xfs_adjust_extent_unmap_boundaries(ip, &startoffset_fsb,
1322 				&endoffset_fsb);
1323 		if (error)
1324 			return error;
1325 	}
1326 
1327 	if (endoffset_fsb > startoffset_fsb) {
1328 		while (!done) {
1329 			error = xfs_unmap_extent(ip, startoffset_fsb,
1330 					endoffset_fsb - startoffset_fsb, &done);
1331 			if (error)
1332 				return error;
1333 		}
1334 	}
1335 
1336 	/*
1337 	 * Now that we've unmap all full blocks we'll have to zero out any
1338 	 * partial block at the beginning and/or end.  xfs_zero_range is
1339 	 * smart enough to skip any holes, including those we just created,
1340 	 * but we must take care not to zero beyond EOF and enlarge i_size.
1341 	 */
1342 
1343 	if (offset >= XFS_ISIZE(ip))
1344 		return 0;
1345 
1346 	if (offset + len > XFS_ISIZE(ip))
1347 		len = XFS_ISIZE(ip) - offset;
1348 
1349 	return xfs_zero_range(ip, offset, len, NULL);
1350 }
1351 
1352 /*
1353  * Preallocate and zero a range of a file. This mechanism has the allocation
1354  * semantics of fallocate and in addition converts data in the range to zeroes.
1355  */
1356 int
1357 xfs_zero_file_space(
1358 	struct xfs_inode	*ip,
1359 	xfs_off_t		offset,
1360 	xfs_off_t		len)
1361 {
1362 	struct xfs_mount	*mp = ip->i_mount;
1363 	uint			blksize;
1364 	int			error;
1365 
1366 	trace_xfs_zero_file_space(ip);
1367 
1368 	blksize = 1 << mp->m_sb.sb_blocklog;
1369 
1370 	/*
1371 	 * Punch a hole and prealloc the range. We use hole punch rather than
1372 	 * unwritten extent conversion for two reasons:
1373 	 *
1374 	 * 1.) Hole punch handles partial block zeroing for us.
1375 	 *
1376 	 * 2.) If prealloc returns ENOSPC, the file range is still zero-valued
1377 	 * by virtue of the hole punch.
1378 	 */
1379 	error = xfs_free_file_space(ip, offset, len);
1380 	if (error)
1381 		goto out;
1382 
1383 	error = xfs_alloc_file_space(ip, round_down(offset, blksize),
1384 				     round_up(offset + len, blksize) -
1385 				     round_down(offset, blksize),
1386 				     XFS_BMAPI_PREALLOC);
1387 out:
1388 	return error;
1389 
1390 }
1391 
1392 /*
1393  * @next_fsb will keep track of the extent currently undergoing shift.
1394  * @stop_fsb will keep track of the extent at which we have to stop.
1395  * If we are shifting left, we will start with block (offset + len) and
1396  * shift each extent till last extent.
1397  * If we are shifting right, we will start with last extent inside file space
1398  * and continue until we reach the block corresponding to offset.
1399  */
1400 static int
1401 xfs_shift_file_space(
1402 	struct xfs_inode        *ip,
1403 	xfs_off_t               offset,
1404 	xfs_off_t               len,
1405 	enum shift_direction	direction)
1406 {
1407 	int			done = 0;
1408 	struct xfs_mount	*mp = ip->i_mount;
1409 	struct xfs_trans	*tp;
1410 	int			error;
1411 	struct xfs_defer_ops	dfops;
1412 	xfs_fsblock_t		first_block;
1413 	xfs_fileoff_t		stop_fsb;
1414 	xfs_fileoff_t		next_fsb;
1415 	xfs_fileoff_t		shift_fsb;
1416 	uint			resblks;
1417 
1418 	ASSERT(direction == SHIFT_LEFT || direction == SHIFT_RIGHT);
1419 
1420 	if (direction == SHIFT_LEFT) {
1421 		/*
1422 		 * Reserve blocks to cover potential extent merges after left
1423 		 * shift operations.
1424 		 */
1425 		resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
1426 		next_fsb = XFS_B_TO_FSB(mp, offset + len);
1427 		stop_fsb = XFS_B_TO_FSB(mp, VFS_I(ip)->i_size);
1428 	} else {
1429 		/*
1430 		 * If right shift, delegate the work of initialization of
1431 		 * next_fsb to xfs_bmap_shift_extent as it has ilock held.
1432 		 */
1433 		resblks = 0;
1434 		next_fsb = NULLFSBLOCK;
1435 		stop_fsb = XFS_B_TO_FSB(mp, offset);
1436 	}
1437 
1438 	shift_fsb = XFS_B_TO_FSB(mp, len);
1439 
1440 	/*
1441 	 * Trim eofblocks to avoid shifting uninitialized post-eof preallocation
1442 	 * into the accessible region of the file.
1443 	 */
1444 	if (xfs_can_free_eofblocks(ip, true)) {
1445 		error = xfs_free_eofblocks(ip);
1446 		if (error)
1447 			return error;
1448 	}
1449 
1450 	/*
1451 	 * Writeback and invalidate cache for the remainder of the file as we're
1452 	 * about to shift down every extent from offset to EOF.
1453 	 */
1454 	error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
1455 					     offset, -1);
1456 	if (error)
1457 		return error;
1458 	error = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping,
1459 					offset >> PAGE_SHIFT, -1);
1460 	if (error)
1461 		return error;
1462 
1463 	/*
1464 	 * Clean out anything hanging around in the cow fork now that
1465 	 * we've flushed all the dirty data out to disk to avoid having
1466 	 * CoW extents at the wrong offsets.
1467 	 */
1468 	if (xfs_is_reflink_inode(ip)) {
1469 		error = xfs_reflink_cancel_cow_range(ip, offset, NULLFILEOFF,
1470 				true);
1471 		if (error)
1472 			return error;
1473 	}
1474 
1475 	/*
1476 	 * The extent shifting code works on extent granularity. So, if
1477 	 * stop_fsb is not the starting block of extent, we need to split
1478 	 * the extent at stop_fsb.
1479 	 */
1480 	if (direction == SHIFT_RIGHT) {
1481 		error = xfs_bmap_split_extent(ip, stop_fsb);
1482 		if (error)
1483 			return error;
1484 	}
1485 
1486 	while (!error && !done) {
1487 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0,
1488 					&tp);
1489 		if (error)
1490 			break;
1491 
1492 		xfs_ilock(ip, XFS_ILOCK_EXCL);
1493 		error = xfs_trans_reserve_quota(tp, mp, ip->i_udquot,
1494 				ip->i_gdquot, ip->i_pdquot, resblks, 0,
1495 				XFS_QMOPT_RES_REGBLKS);
1496 		if (error)
1497 			goto out_trans_cancel;
1498 
1499 		xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
1500 
1501 		xfs_defer_init(&dfops, &first_block);
1502 
1503 		/*
1504 		 * We are using the write transaction in which max 2 bmbt
1505 		 * updates are allowed
1506 		 */
1507 		error = xfs_bmap_shift_extents(tp, ip, &next_fsb, shift_fsb,
1508 				&done, stop_fsb, &first_block, &dfops,
1509 				direction, XFS_BMAP_MAX_SHIFT_EXTENTS);
1510 		if (error)
1511 			goto out_bmap_cancel;
1512 
1513 		error = xfs_defer_finish(&tp, &dfops);
1514 		if (error)
1515 			goto out_bmap_cancel;
1516 
1517 		error = xfs_trans_commit(tp);
1518 	}
1519 
1520 	return error;
1521 
1522 out_bmap_cancel:
1523 	xfs_defer_cancel(&dfops);
1524 out_trans_cancel:
1525 	xfs_trans_cancel(tp);
1526 	return error;
1527 }
1528 
1529 /*
1530  * xfs_collapse_file_space()
1531  *	This routine frees disk space and shift extent for the given file.
1532  *	The first thing we do is to free data blocks in the specified range
1533  *	by calling xfs_free_file_space(). It would also sync dirty data
1534  *	and invalidate page cache over the region on which collapse range
1535  *	is working. And Shift extent records to the left to cover a hole.
1536  * RETURNS:
1537  *	0 on success
1538  *	errno on error
1539  *
1540  */
1541 int
1542 xfs_collapse_file_space(
1543 	struct xfs_inode	*ip,
1544 	xfs_off_t		offset,
1545 	xfs_off_t		len)
1546 {
1547 	int error;
1548 
1549 	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1550 	trace_xfs_collapse_file_space(ip);
1551 
1552 	error = xfs_free_file_space(ip, offset, len);
1553 	if (error)
1554 		return error;
1555 
1556 	return xfs_shift_file_space(ip, offset, len, SHIFT_LEFT);
1557 }
1558 
1559 /*
1560  * xfs_insert_file_space()
1561  *	This routine create hole space by shifting extents for the given file.
1562  *	The first thing we do is to sync dirty data and invalidate page cache
1563  *	over the region on which insert range is working. And split an extent
1564  *	to two extents at given offset by calling xfs_bmap_split_extent.
1565  *	And shift all extent records which are laying between [offset,
1566  *	last allocated extent] to the right to reserve hole range.
1567  * RETURNS:
1568  *	0 on success
1569  *	errno on error
1570  */
1571 int
1572 xfs_insert_file_space(
1573 	struct xfs_inode	*ip,
1574 	loff_t			offset,
1575 	loff_t			len)
1576 {
1577 	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1578 	trace_xfs_insert_file_space(ip);
1579 
1580 	return xfs_shift_file_space(ip, offset, len, SHIFT_RIGHT);
1581 }
1582 
1583 /*
1584  * We need to check that the format of the data fork in the temporary inode is
1585  * valid for the target inode before doing the swap. This is not a problem with
1586  * attr1 because of the fixed fork offset, but attr2 has a dynamically sized
1587  * data fork depending on the space the attribute fork is taking so we can get
1588  * invalid formats on the target inode.
1589  *
1590  * E.g. target has space for 7 extents in extent format, temp inode only has
1591  * space for 6.  If we defragment down to 7 extents, then the tmp format is a
1592  * btree, but when swapped it needs to be in extent format. Hence we can't just
1593  * blindly swap data forks on attr2 filesystems.
1594  *
1595  * Note that we check the swap in both directions so that we don't end up with
1596  * a corrupt temporary inode, either.
1597  *
1598  * Note that fixing the way xfs_fsr sets up the attribute fork in the source
1599  * inode will prevent this situation from occurring, so all we do here is
1600  * reject and log the attempt. basically we are putting the responsibility on
1601  * userspace to get this right.
1602  */
1603 static int
1604 xfs_swap_extents_check_format(
1605 	struct xfs_inode	*ip,	/* target inode */
1606 	struct xfs_inode	*tip)	/* tmp inode */
1607 {
1608 
1609 	/* Should never get a local format */
1610 	if (ip->i_d.di_format == XFS_DINODE_FMT_LOCAL ||
1611 	    tip->i_d.di_format == XFS_DINODE_FMT_LOCAL)
1612 		return -EINVAL;
1613 
1614 	/*
1615 	 * if the target inode has less extents that then temporary inode then
1616 	 * why did userspace call us?
1617 	 */
1618 	if (ip->i_d.di_nextents < tip->i_d.di_nextents)
1619 		return -EINVAL;
1620 
1621 	/*
1622 	 * If we have to use the (expensive) rmap swap method, we can
1623 	 * handle any number of extents and any format.
1624 	 */
1625 	if (xfs_sb_version_hasrmapbt(&ip->i_mount->m_sb))
1626 		return 0;
1627 
1628 	/*
1629 	 * if the target inode is in extent form and the temp inode is in btree
1630 	 * form then we will end up with the target inode in the wrong format
1631 	 * as we already know there are less extents in the temp inode.
1632 	 */
1633 	if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1634 	    tip->i_d.di_format == XFS_DINODE_FMT_BTREE)
1635 		return -EINVAL;
1636 
1637 	/* Check temp in extent form to max in target */
1638 	if (tip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1639 	    XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) >
1640 			XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
1641 		return -EINVAL;
1642 
1643 	/* Check target in extent form to max in temp */
1644 	if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1645 	    XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) >
1646 			XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
1647 		return -EINVAL;
1648 
1649 	/*
1650 	 * If we are in a btree format, check that the temp root block will fit
1651 	 * in the target and that it has enough extents to be in btree format
1652 	 * in the target.
1653 	 *
1654 	 * Note that we have to be careful to allow btree->extent conversions
1655 	 * (a common defrag case) which will occur when the temp inode is in
1656 	 * extent format...
1657 	 */
1658 	if (tip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
1659 		if (XFS_IFORK_Q(ip) &&
1660 		    XFS_BMAP_BMDR_SPACE(tip->i_df.if_broot) > XFS_IFORK_BOFF(ip))
1661 			return -EINVAL;
1662 		if (XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) <=
1663 		    XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
1664 			return -EINVAL;
1665 	}
1666 
1667 	/* Reciprocal target->temp btree format checks */
1668 	if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
1669 		if (XFS_IFORK_Q(tip) &&
1670 		    XFS_BMAP_BMDR_SPACE(ip->i_df.if_broot) > XFS_IFORK_BOFF(tip))
1671 			return -EINVAL;
1672 		if (XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) <=
1673 		    XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
1674 			return -EINVAL;
1675 	}
1676 
1677 	return 0;
1678 }
1679 
1680 static int
1681 xfs_swap_extent_flush(
1682 	struct xfs_inode	*ip)
1683 {
1684 	int	error;
1685 
1686 	error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
1687 	if (error)
1688 		return error;
1689 	truncate_pagecache_range(VFS_I(ip), 0, -1);
1690 
1691 	/* Verify O_DIRECT for ftmp */
1692 	if (VFS_I(ip)->i_mapping->nrpages)
1693 		return -EINVAL;
1694 	return 0;
1695 }
1696 
1697 /*
1698  * Move extents from one file to another, when rmap is enabled.
1699  */
1700 STATIC int
1701 xfs_swap_extent_rmap(
1702 	struct xfs_trans		**tpp,
1703 	struct xfs_inode		*ip,
1704 	struct xfs_inode		*tip)
1705 {
1706 	struct xfs_bmbt_irec		irec;
1707 	struct xfs_bmbt_irec		uirec;
1708 	struct xfs_bmbt_irec		tirec;
1709 	xfs_fileoff_t			offset_fsb;
1710 	xfs_fileoff_t			end_fsb;
1711 	xfs_filblks_t			count_fsb;
1712 	xfs_fsblock_t			firstfsb;
1713 	struct xfs_defer_ops		dfops;
1714 	int				error;
1715 	xfs_filblks_t			ilen;
1716 	xfs_filblks_t			rlen;
1717 	int				nimaps;
1718 	uint64_t			tip_flags2;
1719 
1720 	/*
1721 	 * If the source file has shared blocks, we must flag the donor
1722 	 * file as having shared blocks so that we get the shared-block
1723 	 * rmap functions when we go to fix up the rmaps.  The flags
1724 	 * will be switch for reals later.
1725 	 */
1726 	tip_flags2 = tip->i_d.di_flags2;
1727 	if (ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK)
1728 		tip->i_d.di_flags2 |= XFS_DIFLAG2_REFLINK;
1729 
1730 	offset_fsb = 0;
1731 	end_fsb = XFS_B_TO_FSB(ip->i_mount, i_size_read(VFS_I(ip)));
1732 	count_fsb = (xfs_filblks_t)(end_fsb - offset_fsb);
1733 
1734 	while (count_fsb) {
1735 		/* Read extent from the donor file */
1736 		nimaps = 1;
1737 		error = xfs_bmapi_read(tip, offset_fsb, count_fsb, &tirec,
1738 				&nimaps, 0);
1739 		if (error)
1740 			goto out;
1741 		ASSERT(nimaps == 1);
1742 		ASSERT(tirec.br_startblock != DELAYSTARTBLOCK);
1743 
1744 		trace_xfs_swap_extent_rmap_remap(tip, &tirec);
1745 		ilen = tirec.br_blockcount;
1746 
1747 		/* Unmap the old blocks in the source file. */
1748 		while (tirec.br_blockcount) {
1749 			xfs_defer_init(&dfops, &firstfsb);
1750 			trace_xfs_swap_extent_rmap_remap_piece(tip, &tirec);
1751 
1752 			/* Read extent from the source file */
1753 			nimaps = 1;
1754 			error = xfs_bmapi_read(ip, tirec.br_startoff,
1755 					tirec.br_blockcount, &irec,
1756 					&nimaps, 0);
1757 			if (error)
1758 				goto out_defer;
1759 			ASSERT(nimaps == 1);
1760 			ASSERT(tirec.br_startoff == irec.br_startoff);
1761 			trace_xfs_swap_extent_rmap_remap_piece(ip, &irec);
1762 
1763 			/* Trim the extent. */
1764 			uirec = tirec;
1765 			uirec.br_blockcount = rlen = min_t(xfs_filblks_t,
1766 					tirec.br_blockcount,
1767 					irec.br_blockcount);
1768 			trace_xfs_swap_extent_rmap_remap_piece(tip, &uirec);
1769 
1770 			/* Remove the mapping from the donor file. */
1771 			error = xfs_bmap_unmap_extent((*tpp)->t_mountp, &dfops,
1772 					tip, &uirec);
1773 			if (error)
1774 				goto out_defer;
1775 
1776 			/* Remove the mapping from the source file. */
1777 			error = xfs_bmap_unmap_extent((*tpp)->t_mountp, &dfops,
1778 					ip, &irec);
1779 			if (error)
1780 				goto out_defer;
1781 
1782 			/* Map the donor file's blocks into the source file. */
1783 			error = xfs_bmap_map_extent((*tpp)->t_mountp, &dfops,
1784 					ip, &uirec);
1785 			if (error)
1786 				goto out_defer;
1787 
1788 			/* Map the source file's blocks into the donor file. */
1789 			error = xfs_bmap_map_extent((*tpp)->t_mountp, &dfops,
1790 					tip, &irec);
1791 			if (error)
1792 				goto out_defer;
1793 
1794 			xfs_defer_ijoin(&dfops, ip);
1795 			error = xfs_defer_finish(tpp, &dfops);
1796 			if (error)
1797 				goto out_defer;
1798 
1799 			tirec.br_startoff += rlen;
1800 			if (tirec.br_startblock != HOLESTARTBLOCK &&
1801 			    tirec.br_startblock != DELAYSTARTBLOCK)
1802 				tirec.br_startblock += rlen;
1803 			tirec.br_blockcount -= rlen;
1804 		}
1805 
1806 		/* Roll on... */
1807 		count_fsb -= ilen;
1808 		offset_fsb += ilen;
1809 	}
1810 
1811 	tip->i_d.di_flags2 = tip_flags2;
1812 	return 0;
1813 
1814 out_defer:
1815 	xfs_defer_cancel(&dfops);
1816 out:
1817 	trace_xfs_swap_extent_rmap_error(ip, error, _RET_IP_);
1818 	tip->i_d.di_flags2 = tip_flags2;
1819 	return error;
1820 }
1821 
1822 /* Swap the extents of two files by swapping data forks. */
1823 STATIC int
1824 xfs_swap_extent_forks(
1825 	struct xfs_trans	*tp,
1826 	struct xfs_inode	*ip,
1827 	struct xfs_inode	*tip,
1828 	int			*src_log_flags,
1829 	int			*target_log_flags)
1830 {
1831 	struct xfs_ifork	tempifp, *ifp, *tifp;
1832 	xfs_filblks_t		aforkblks = 0;
1833 	xfs_filblks_t		taforkblks = 0;
1834 	xfs_extnum_t		junk;
1835 	xfs_extnum_t		nextents;
1836 	uint64_t		tmp;
1837 	int			error;
1838 
1839 	/*
1840 	 * Count the number of extended attribute blocks
1841 	 */
1842 	if ( ((XFS_IFORK_Q(ip) != 0) && (ip->i_d.di_anextents > 0)) &&
1843 	     (ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
1844 		error = xfs_bmap_count_blocks(tp, ip, XFS_ATTR_FORK, &junk,
1845 				&aforkblks);
1846 		if (error)
1847 			return error;
1848 	}
1849 	if ( ((XFS_IFORK_Q(tip) != 0) && (tip->i_d.di_anextents > 0)) &&
1850 	     (tip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
1851 		error = xfs_bmap_count_blocks(tp, tip, XFS_ATTR_FORK, &junk,
1852 				&taforkblks);
1853 		if (error)
1854 			return error;
1855 	}
1856 
1857 	/*
1858 	 * Btree format (v3) inodes have the inode number stamped in the bmbt
1859 	 * block headers. We can't start changing the bmbt blocks until the
1860 	 * inode owner change is logged so recovery does the right thing in the
1861 	 * event of a crash. Set the owner change log flags now and leave the
1862 	 * bmbt scan as the last step.
1863 	 */
1864 	if (ip->i_d.di_version == 3 &&
1865 	    ip->i_d.di_format == XFS_DINODE_FMT_BTREE)
1866 		(*target_log_flags) |= XFS_ILOG_DOWNER;
1867 	if (tip->i_d.di_version == 3 &&
1868 	    tip->i_d.di_format == XFS_DINODE_FMT_BTREE)
1869 		(*src_log_flags) |= XFS_ILOG_DOWNER;
1870 
1871 	/*
1872 	 * Swap the data forks of the inodes
1873 	 */
1874 	ifp = &ip->i_df;
1875 	tifp = &tip->i_df;
1876 	tempifp = *ifp;		/* struct copy */
1877 	*ifp = *tifp;		/* struct copy */
1878 	*tifp = tempifp;	/* struct copy */
1879 
1880 	/*
1881 	 * Fix the on-disk inode values
1882 	 */
1883 	tmp = (uint64_t)ip->i_d.di_nblocks;
1884 	ip->i_d.di_nblocks = tip->i_d.di_nblocks - taforkblks + aforkblks;
1885 	tip->i_d.di_nblocks = tmp + taforkblks - aforkblks;
1886 
1887 	tmp = (uint64_t) ip->i_d.di_nextents;
1888 	ip->i_d.di_nextents = tip->i_d.di_nextents;
1889 	tip->i_d.di_nextents = tmp;
1890 
1891 	tmp = (uint64_t) ip->i_d.di_format;
1892 	ip->i_d.di_format = tip->i_d.di_format;
1893 	tip->i_d.di_format = tmp;
1894 
1895 	/*
1896 	 * The extents in the source inode could still contain speculative
1897 	 * preallocation beyond EOF (e.g. the file is open but not modified
1898 	 * while defrag is in progress). In that case, we need to copy over the
1899 	 * number of delalloc blocks the data fork in the source inode is
1900 	 * tracking beyond EOF so that when the fork is truncated away when the
1901 	 * temporary inode is unlinked we don't underrun the i_delayed_blks
1902 	 * counter on that inode.
1903 	 */
1904 	ASSERT(tip->i_delayed_blks == 0);
1905 	tip->i_delayed_blks = ip->i_delayed_blks;
1906 	ip->i_delayed_blks = 0;
1907 
1908 	switch (ip->i_d.di_format) {
1909 	case XFS_DINODE_FMT_EXTENTS:
1910 		/*
1911 		 * If the extents fit in the inode, fix the pointer.  Otherwise
1912 		 * it's already NULL or pointing to the extent.
1913 		 */
1914 		nextents = xfs_iext_count(&ip->i_df);
1915 		if (nextents <= XFS_INLINE_EXTS)
1916 			ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext;
1917 		(*src_log_flags) |= XFS_ILOG_DEXT;
1918 		break;
1919 	case XFS_DINODE_FMT_BTREE:
1920 		ASSERT(ip->i_d.di_version < 3 ||
1921 		       (*src_log_flags & XFS_ILOG_DOWNER));
1922 		(*src_log_flags) |= XFS_ILOG_DBROOT;
1923 		break;
1924 	}
1925 
1926 	switch (tip->i_d.di_format) {
1927 	case XFS_DINODE_FMT_EXTENTS:
1928 		/*
1929 		 * If the extents fit in the inode, fix the pointer.  Otherwise
1930 		 * it's already NULL or pointing to the extent.
1931 		 */
1932 		nextents = xfs_iext_count(&tip->i_df);
1933 		if (nextents <= XFS_INLINE_EXTS)
1934 			tifp->if_u1.if_extents = tifp->if_u2.if_inline_ext;
1935 		(*target_log_flags) |= XFS_ILOG_DEXT;
1936 		break;
1937 	case XFS_DINODE_FMT_BTREE:
1938 		(*target_log_flags) |= XFS_ILOG_DBROOT;
1939 		ASSERT(tip->i_d.di_version < 3 ||
1940 		       (*target_log_flags & XFS_ILOG_DOWNER));
1941 		break;
1942 	}
1943 
1944 	return 0;
1945 }
1946 
1947 /*
1948  * Fix up the owners of the bmbt blocks to refer to the current inode. The
1949  * change owner scan attempts to order all modified buffers in the current
1950  * transaction. In the event of ordered buffer failure, the offending buffer is
1951  * physically logged as a fallback and the scan returns -EAGAIN. We must roll
1952  * the transaction in this case to replenish the fallback log reservation and
1953  * restart the scan. This process repeats until the scan completes.
1954  */
1955 static int
1956 xfs_swap_change_owner(
1957 	struct xfs_trans	**tpp,
1958 	struct xfs_inode	*ip,
1959 	struct xfs_inode	*tmpip)
1960 {
1961 	int			error;
1962 	struct xfs_trans	*tp = *tpp;
1963 
1964 	do {
1965 		error = xfs_bmbt_change_owner(tp, ip, XFS_DATA_FORK, ip->i_ino,
1966 					      NULL);
1967 		/* success or fatal error */
1968 		if (error != -EAGAIN)
1969 			break;
1970 
1971 		error = xfs_trans_roll(tpp);
1972 		if (error)
1973 			break;
1974 		tp = *tpp;
1975 
1976 		/*
1977 		 * Redirty both inodes so they can relog and keep the log tail
1978 		 * moving forward.
1979 		 */
1980 		xfs_trans_ijoin(tp, ip, 0);
1981 		xfs_trans_ijoin(tp, tmpip, 0);
1982 		xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1983 		xfs_trans_log_inode(tp, tmpip, XFS_ILOG_CORE);
1984 	} while (true);
1985 
1986 	return error;
1987 }
1988 
1989 int
1990 xfs_swap_extents(
1991 	struct xfs_inode	*ip,	/* target inode */
1992 	struct xfs_inode	*tip,	/* tmp inode */
1993 	struct xfs_swapext	*sxp)
1994 {
1995 	struct xfs_mount	*mp = ip->i_mount;
1996 	struct xfs_trans	*tp;
1997 	struct xfs_bstat	*sbp = &sxp->sx_stat;
1998 	int			src_log_flags, target_log_flags;
1999 	int			error = 0;
2000 	int			lock_flags;
2001 	struct xfs_ifork	*cowfp;
2002 	uint64_t		f;
2003 	int			resblks = 0;
2004 
2005 	/*
2006 	 * Lock the inodes against other IO, page faults and truncate to
2007 	 * begin with.  Then we can ensure the inodes are flushed and have no
2008 	 * page cache safely. Once we have done this we can take the ilocks and
2009 	 * do the rest of the checks.
2010 	 */
2011 	lock_two_nondirectories(VFS_I(ip), VFS_I(tip));
2012 	lock_flags = XFS_MMAPLOCK_EXCL;
2013 	xfs_lock_two_inodes(ip, tip, XFS_MMAPLOCK_EXCL);
2014 
2015 	/* Verify that both files have the same format */
2016 	if ((VFS_I(ip)->i_mode & S_IFMT) != (VFS_I(tip)->i_mode & S_IFMT)) {
2017 		error = -EINVAL;
2018 		goto out_unlock;
2019 	}
2020 
2021 	/* Verify both files are either real-time or non-realtime */
2022 	if (XFS_IS_REALTIME_INODE(ip) != XFS_IS_REALTIME_INODE(tip)) {
2023 		error = -EINVAL;
2024 		goto out_unlock;
2025 	}
2026 
2027 	error = xfs_swap_extent_flush(ip);
2028 	if (error)
2029 		goto out_unlock;
2030 	error = xfs_swap_extent_flush(tip);
2031 	if (error)
2032 		goto out_unlock;
2033 
2034 	/*
2035 	 * Extent "swapping" with rmap requires a permanent reservation and
2036 	 * a block reservation because it's really just a remap operation
2037 	 * performed with log redo items!
2038 	 */
2039 	if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
2040 		/*
2041 		 * Conceptually this shouldn't affect the shape of either
2042 		 * bmbt, but since we atomically move extents one by one,
2043 		 * we reserve enough space to rebuild both trees.
2044 		 */
2045 		resblks = XFS_SWAP_RMAP_SPACE_RES(mp,
2046 				XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK),
2047 				XFS_DATA_FORK) +
2048 			  XFS_SWAP_RMAP_SPACE_RES(mp,
2049 				XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK),
2050 				XFS_DATA_FORK);
2051 	}
2052 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);
2053 	if (error)
2054 		goto out_unlock;
2055 
2056 	/*
2057 	 * Lock and join the inodes to the tansaction so that transaction commit
2058 	 * or cancel will unlock the inodes from this point onwards.
2059 	 */
2060 	xfs_lock_two_inodes(ip, tip, XFS_ILOCK_EXCL);
2061 	lock_flags |= XFS_ILOCK_EXCL;
2062 	xfs_trans_ijoin(tp, ip, 0);
2063 	xfs_trans_ijoin(tp, tip, 0);
2064 
2065 
2066 	/* Verify all data are being swapped */
2067 	if (sxp->sx_offset != 0 ||
2068 	    sxp->sx_length != ip->i_d.di_size ||
2069 	    sxp->sx_length != tip->i_d.di_size) {
2070 		error = -EFAULT;
2071 		goto out_trans_cancel;
2072 	}
2073 
2074 	trace_xfs_swap_extent_before(ip, 0);
2075 	trace_xfs_swap_extent_before(tip, 1);
2076 
2077 	/* check inode formats now that data is flushed */
2078 	error = xfs_swap_extents_check_format(ip, tip);
2079 	if (error) {
2080 		xfs_notice(mp,
2081 		    "%s: inode 0x%llx format is incompatible for exchanging.",
2082 				__func__, ip->i_ino);
2083 		goto out_trans_cancel;
2084 	}
2085 
2086 	/*
2087 	 * Compare the current change & modify times with that
2088 	 * passed in.  If they differ, we abort this swap.
2089 	 * This is the mechanism used to ensure the calling
2090 	 * process that the file was not changed out from
2091 	 * under it.
2092 	 */
2093 	if ((sbp->bs_ctime.tv_sec != VFS_I(ip)->i_ctime.tv_sec) ||
2094 	    (sbp->bs_ctime.tv_nsec != VFS_I(ip)->i_ctime.tv_nsec) ||
2095 	    (sbp->bs_mtime.tv_sec != VFS_I(ip)->i_mtime.tv_sec) ||
2096 	    (sbp->bs_mtime.tv_nsec != VFS_I(ip)->i_mtime.tv_nsec)) {
2097 		error = -EBUSY;
2098 		goto out_trans_cancel;
2099 	}
2100 
2101 	/*
2102 	 * Note the trickiness in setting the log flags - we set the owner log
2103 	 * flag on the opposite inode (i.e. the inode we are setting the new
2104 	 * owner to be) because once we swap the forks and log that, log
2105 	 * recovery is going to see the fork as owned by the swapped inode,
2106 	 * not the pre-swapped inodes.
2107 	 */
2108 	src_log_flags = XFS_ILOG_CORE;
2109 	target_log_flags = XFS_ILOG_CORE;
2110 
2111 	if (xfs_sb_version_hasrmapbt(&mp->m_sb))
2112 		error = xfs_swap_extent_rmap(&tp, ip, tip);
2113 	else
2114 		error = xfs_swap_extent_forks(tp, ip, tip, &src_log_flags,
2115 				&target_log_flags);
2116 	if (error)
2117 		goto out_trans_cancel;
2118 
2119 	/* Do we have to swap reflink flags? */
2120 	if ((ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK) ^
2121 	    (tip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK)) {
2122 		f = ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK;
2123 		ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
2124 		ip->i_d.di_flags2 |= tip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK;
2125 		tip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
2126 		tip->i_d.di_flags2 |= f & XFS_DIFLAG2_REFLINK;
2127 	}
2128 
2129 	/* Swap the cow forks. */
2130 	if (xfs_sb_version_hasreflink(&mp->m_sb)) {
2131 		xfs_extnum_t	extnum;
2132 
2133 		ASSERT(ip->i_cformat == XFS_DINODE_FMT_EXTENTS);
2134 		ASSERT(tip->i_cformat == XFS_DINODE_FMT_EXTENTS);
2135 
2136 		extnum = ip->i_cnextents;
2137 		ip->i_cnextents = tip->i_cnextents;
2138 		tip->i_cnextents = extnum;
2139 
2140 		cowfp = ip->i_cowfp;
2141 		ip->i_cowfp = tip->i_cowfp;
2142 		tip->i_cowfp = cowfp;
2143 
2144 		if (ip->i_cowfp && ip->i_cnextents)
2145 			xfs_inode_set_cowblocks_tag(ip);
2146 		else
2147 			xfs_inode_clear_cowblocks_tag(ip);
2148 		if (tip->i_cowfp && tip->i_cnextents)
2149 			xfs_inode_set_cowblocks_tag(tip);
2150 		else
2151 			xfs_inode_clear_cowblocks_tag(tip);
2152 	}
2153 
2154 	xfs_trans_log_inode(tp, ip,  src_log_flags);
2155 	xfs_trans_log_inode(tp, tip, target_log_flags);
2156 
2157 	/*
2158 	 * The extent forks have been swapped, but crc=1,rmapbt=0 filesystems
2159 	 * have inode number owner values in the bmbt blocks that still refer to
2160 	 * the old inode. Scan each bmbt to fix up the owner values with the
2161 	 * inode number of the current inode.
2162 	 */
2163 	if (src_log_flags & XFS_ILOG_DOWNER) {
2164 		error = xfs_swap_change_owner(&tp, ip, tip);
2165 		if (error)
2166 			goto out_trans_cancel;
2167 	}
2168 	if (target_log_flags & XFS_ILOG_DOWNER) {
2169 		error = xfs_swap_change_owner(&tp, tip, ip);
2170 		if (error)
2171 			goto out_trans_cancel;
2172 	}
2173 
2174 	/*
2175 	 * If this is a synchronous mount, make sure that the
2176 	 * transaction goes to disk before returning to the user.
2177 	 */
2178 	if (mp->m_flags & XFS_MOUNT_WSYNC)
2179 		xfs_trans_set_sync(tp);
2180 
2181 	error = xfs_trans_commit(tp);
2182 
2183 	trace_xfs_swap_extent_after(ip, 0);
2184 	trace_xfs_swap_extent_after(tip, 1);
2185 
2186 out_unlock:
2187 	xfs_iunlock(ip, lock_flags);
2188 	xfs_iunlock(tip, lock_flags);
2189 	unlock_two_nondirectories(VFS_I(ip), VFS_I(tip));
2190 	return error;
2191 
2192 out_trans_cancel:
2193 	xfs_trans_cancel(tp);
2194 	goto out_unlock;
2195 }
2196