xref: /openbmc/linux/fs/xfs/xfs_bmap_util.c (revision feac8c8b)
1 /*
2  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3  * Copyright (c) 2012 Red Hat, Inc.
4  * All Rights Reserved.
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it would be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write the Free Software Foundation,
17  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
18  */
19 #include "xfs.h"
20 #include "xfs_fs.h"
21 #include "xfs_shared.h"
22 #include "xfs_format.h"
23 #include "xfs_log_format.h"
24 #include "xfs_trans_resv.h"
25 #include "xfs_bit.h"
26 #include "xfs_mount.h"
27 #include "xfs_da_format.h"
28 #include "xfs_defer.h"
29 #include "xfs_inode.h"
30 #include "xfs_btree.h"
31 #include "xfs_trans.h"
32 #include "xfs_extfree_item.h"
33 #include "xfs_alloc.h"
34 #include "xfs_bmap.h"
35 #include "xfs_bmap_util.h"
36 #include "xfs_bmap_btree.h"
37 #include "xfs_rtalloc.h"
38 #include "xfs_error.h"
39 #include "xfs_quota.h"
40 #include "xfs_trans_space.h"
41 #include "xfs_trace.h"
42 #include "xfs_icache.h"
43 #include "xfs_log.h"
44 #include "xfs_rmap_btree.h"
45 #include "xfs_iomap.h"
46 #include "xfs_reflink.h"
47 #include "xfs_refcount.h"
48 
49 /* Kernel only BMAP related definitions and functions */
50 
51 /*
52  * Convert the given file system block to a disk block.  We have to treat it
53  * differently based on whether the file is a real time file or not, because the
54  * bmap code does.
55  */
56 xfs_daddr_t
57 xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb)
58 {
59 	return (XFS_IS_REALTIME_INODE(ip) ? \
60 		 (xfs_daddr_t)XFS_FSB_TO_BB((ip)->i_mount, (fsb)) : \
61 		 XFS_FSB_TO_DADDR((ip)->i_mount, (fsb)));
62 }
63 
64 /*
65  * Routine to zero an extent on disk allocated to the specific inode.
66  *
67  * The VFS functions take a linearised filesystem block offset, so we have to
68  * convert the sparse xfs fsb to the right format first.
69  * VFS types are real funky, too.
70  */
71 int
72 xfs_zero_extent(
73 	struct xfs_inode *ip,
74 	xfs_fsblock_t	start_fsb,
75 	xfs_off_t	count_fsb)
76 {
77 	struct xfs_mount *mp = ip->i_mount;
78 	xfs_daddr_t	sector = xfs_fsb_to_db(ip, start_fsb);
79 	sector_t	block = XFS_BB_TO_FSBT(mp, sector);
80 
81 	return blkdev_issue_zeroout(xfs_find_bdev_for_inode(VFS_I(ip)),
82 		block << (mp->m_super->s_blocksize_bits - 9),
83 		count_fsb << (mp->m_super->s_blocksize_bits - 9),
84 		GFP_NOFS, 0);
85 }
86 
87 #ifdef CONFIG_XFS_RT
88 int
89 xfs_bmap_rtalloc(
90 	struct xfs_bmalloca	*ap)	/* bmap alloc argument struct */
91 {
92 	int		error;		/* error return value */
93 	xfs_mount_t	*mp;		/* mount point structure */
94 	xfs_extlen_t	prod = 0;	/* product factor for allocators */
95 	xfs_extlen_t	ralen = 0;	/* realtime allocation length */
96 	xfs_extlen_t	align;		/* minimum allocation alignment */
97 	xfs_rtblock_t	rtb;
98 
99 	mp = ap->ip->i_mount;
100 	align = xfs_get_extsz_hint(ap->ip);
101 	prod = align / mp->m_sb.sb_rextsize;
102 	error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
103 					align, 1, ap->eof, 0,
104 					ap->conv, &ap->offset, &ap->length);
105 	if (error)
106 		return error;
107 	ASSERT(ap->length);
108 	ASSERT(ap->length % mp->m_sb.sb_rextsize == 0);
109 
110 	/*
111 	 * If the offset & length are not perfectly aligned
112 	 * then kill prod, it will just get us in trouble.
113 	 */
114 	if (do_mod(ap->offset, align) || ap->length % align)
115 		prod = 1;
116 	/*
117 	 * Set ralen to be the actual requested length in rtextents.
118 	 */
119 	ralen = ap->length / mp->m_sb.sb_rextsize;
120 	/*
121 	 * If the old value was close enough to MAXEXTLEN that
122 	 * we rounded up to it, cut it back so it's valid again.
123 	 * Note that if it's a really large request (bigger than
124 	 * MAXEXTLEN), we don't hear about that number, and can't
125 	 * adjust the starting point to match it.
126 	 */
127 	if (ralen * mp->m_sb.sb_rextsize >= MAXEXTLEN)
128 		ralen = MAXEXTLEN / mp->m_sb.sb_rextsize;
129 
130 	/*
131 	 * Lock out modifications to both the RT bitmap and summary inodes
132 	 */
133 	xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP);
134 	xfs_trans_ijoin(ap->tp, mp->m_rbmip, XFS_ILOCK_EXCL);
135 	xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM);
136 	xfs_trans_ijoin(ap->tp, mp->m_rsumip, XFS_ILOCK_EXCL);
137 
138 	/*
139 	 * If it's an allocation to an empty file at offset 0,
140 	 * pick an extent that will space things out in the rt area.
141 	 */
142 	if (ap->eof && ap->offset == 0) {
143 		xfs_rtblock_t uninitialized_var(rtx); /* realtime extent no */
144 
145 		error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx);
146 		if (error)
147 			return error;
148 		ap->blkno = rtx * mp->m_sb.sb_rextsize;
149 	} else {
150 		ap->blkno = 0;
151 	}
152 
153 	xfs_bmap_adjacent(ap);
154 
155 	/*
156 	 * Realtime allocation, done through xfs_rtallocate_extent.
157 	 */
158 	do_div(ap->blkno, mp->m_sb.sb_rextsize);
159 	rtb = ap->blkno;
160 	ap->length = ralen;
161 	error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1, ap->length,
162 				&ralen, ap->wasdel, prod, &rtb);
163 	if (error)
164 		return error;
165 
166 	ap->blkno = rtb;
167 	if (ap->blkno != NULLFSBLOCK) {
168 		ap->blkno *= mp->m_sb.sb_rextsize;
169 		ralen *= mp->m_sb.sb_rextsize;
170 		ap->length = ralen;
171 		ap->ip->i_d.di_nblocks += ralen;
172 		xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
173 		if (ap->wasdel)
174 			ap->ip->i_delayed_blks -= ralen;
175 		/*
176 		 * Adjust the disk quota also. This was reserved
177 		 * earlier.
178 		 */
179 		xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
180 			ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT :
181 					XFS_TRANS_DQ_RTBCOUNT, (long) ralen);
182 
183 		/* Zero the extent if we were asked to do so */
184 		if (ap->datatype & XFS_ALLOC_USERDATA_ZERO) {
185 			error = xfs_zero_extent(ap->ip, ap->blkno, ap->length);
186 			if (error)
187 				return error;
188 		}
189 	} else {
190 		ap->length = 0;
191 	}
192 	return 0;
193 }
194 #endif /* CONFIG_XFS_RT */
195 
196 /*
197  * Check if the endoff is outside the last extent. If so the caller will grow
198  * the allocation to a stripe unit boundary.  All offsets are considered outside
199  * the end of file for an empty fork, so 1 is returned in *eof in that case.
200  */
201 int
202 xfs_bmap_eof(
203 	struct xfs_inode	*ip,
204 	xfs_fileoff_t		endoff,
205 	int			whichfork,
206 	int			*eof)
207 {
208 	struct xfs_bmbt_irec	rec;
209 	int			error;
210 
211 	error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, eof);
212 	if (error || *eof)
213 		return error;
214 
215 	*eof = endoff >= rec.br_startoff + rec.br_blockcount;
216 	return 0;
217 }
218 
219 /*
220  * Extent tree block counting routines.
221  */
222 
223 /*
224  * Count leaf blocks given a range of extent records.  Delayed allocation
225  * extents are not counted towards the totals.
226  */
227 xfs_extnum_t
228 xfs_bmap_count_leaves(
229 	struct xfs_ifork	*ifp,
230 	xfs_filblks_t		*count)
231 {
232 	struct xfs_iext_cursor	icur;
233 	struct xfs_bmbt_irec	got;
234 	xfs_extnum_t		numrecs = 0;
235 
236 	for_each_xfs_iext(ifp, &icur, &got) {
237 		if (!isnullstartblock(got.br_startblock)) {
238 			*count += got.br_blockcount;
239 			numrecs++;
240 		}
241 	}
242 
243 	return numrecs;
244 }
245 
246 /*
247  * Count leaf blocks given a range of extent records originally
248  * in btree format.
249  */
250 STATIC void
251 xfs_bmap_disk_count_leaves(
252 	struct xfs_mount	*mp,
253 	struct xfs_btree_block	*block,
254 	int			numrecs,
255 	xfs_filblks_t		*count)
256 {
257 	int		b;
258 	xfs_bmbt_rec_t	*frp;
259 
260 	for (b = 1; b <= numrecs; b++) {
261 		frp = XFS_BMBT_REC_ADDR(mp, block, b);
262 		*count += xfs_bmbt_disk_get_blockcount(frp);
263 	}
264 }
265 
266 /*
267  * Recursively walks each level of a btree
268  * to count total fsblocks in use.
269  */
270 STATIC int
271 xfs_bmap_count_tree(
272 	struct xfs_mount	*mp,
273 	struct xfs_trans	*tp,
274 	struct xfs_ifork	*ifp,
275 	xfs_fsblock_t		blockno,
276 	int			levelin,
277 	xfs_extnum_t		*nextents,
278 	xfs_filblks_t		*count)
279 {
280 	int			error;
281 	struct xfs_buf		*bp, *nbp;
282 	int			level = levelin;
283 	__be64			*pp;
284 	xfs_fsblock_t           bno = blockno;
285 	xfs_fsblock_t		nextbno;
286 	struct xfs_btree_block	*block, *nextblock;
287 	int			numrecs;
288 
289 	error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, XFS_BMAP_BTREE_REF,
290 						&xfs_bmbt_buf_ops);
291 	if (error)
292 		return error;
293 	*count += 1;
294 	block = XFS_BUF_TO_BLOCK(bp);
295 
296 	if (--level) {
297 		/* Not at node above leaves, count this level of nodes */
298 		nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
299 		while (nextbno != NULLFSBLOCK) {
300 			error = xfs_btree_read_bufl(mp, tp, nextbno, 0, &nbp,
301 						XFS_BMAP_BTREE_REF,
302 						&xfs_bmbt_buf_ops);
303 			if (error)
304 				return error;
305 			*count += 1;
306 			nextblock = XFS_BUF_TO_BLOCK(nbp);
307 			nextbno = be64_to_cpu(nextblock->bb_u.l.bb_rightsib);
308 			xfs_trans_brelse(tp, nbp);
309 		}
310 
311 		/* Dive to the next level */
312 		pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
313 		bno = be64_to_cpu(*pp);
314 		error = xfs_bmap_count_tree(mp, tp, ifp, bno, level, nextents,
315 				count);
316 		if (error) {
317 			xfs_trans_brelse(tp, bp);
318 			XFS_ERROR_REPORT("xfs_bmap_count_tree(1)",
319 					 XFS_ERRLEVEL_LOW, mp);
320 			return -EFSCORRUPTED;
321 		}
322 		xfs_trans_brelse(tp, bp);
323 	} else {
324 		/* count all level 1 nodes and their leaves */
325 		for (;;) {
326 			nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
327 			numrecs = be16_to_cpu(block->bb_numrecs);
328 			(*nextents) += numrecs;
329 			xfs_bmap_disk_count_leaves(mp, block, numrecs, count);
330 			xfs_trans_brelse(tp, bp);
331 			if (nextbno == NULLFSBLOCK)
332 				break;
333 			bno = nextbno;
334 			error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
335 						XFS_BMAP_BTREE_REF,
336 						&xfs_bmbt_buf_ops);
337 			if (error)
338 				return error;
339 			*count += 1;
340 			block = XFS_BUF_TO_BLOCK(bp);
341 		}
342 	}
343 	return 0;
344 }
345 
346 /*
347  * Count fsblocks of the given fork.  Delayed allocation extents are
348  * not counted towards the totals.
349  */
350 int
351 xfs_bmap_count_blocks(
352 	struct xfs_trans	*tp,
353 	struct xfs_inode	*ip,
354 	int			whichfork,
355 	xfs_extnum_t		*nextents,
356 	xfs_filblks_t		*count)
357 {
358 	struct xfs_mount	*mp;	/* file system mount structure */
359 	__be64			*pp;	/* pointer to block address */
360 	struct xfs_btree_block	*block;	/* current btree block */
361 	struct xfs_ifork	*ifp;	/* fork structure */
362 	xfs_fsblock_t		bno;	/* block # of "block" */
363 	int			level;	/* btree level, for checking */
364 	int			error;
365 
366 	bno = NULLFSBLOCK;
367 	mp = ip->i_mount;
368 	*nextents = 0;
369 	*count = 0;
370 	ifp = XFS_IFORK_PTR(ip, whichfork);
371 	if (!ifp)
372 		return 0;
373 
374 	switch (XFS_IFORK_FORMAT(ip, whichfork)) {
375 	case XFS_DINODE_FMT_EXTENTS:
376 		*nextents = xfs_bmap_count_leaves(ifp, count);
377 		return 0;
378 	case XFS_DINODE_FMT_BTREE:
379 		if (!(ifp->if_flags & XFS_IFEXTENTS)) {
380 			error = xfs_iread_extents(tp, ip, whichfork);
381 			if (error)
382 				return error;
383 		}
384 
385 		/*
386 		 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
387 		 */
388 		block = ifp->if_broot;
389 		level = be16_to_cpu(block->bb_level);
390 		ASSERT(level > 0);
391 		pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
392 		bno = be64_to_cpu(*pp);
393 		ASSERT(bno != NULLFSBLOCK);
394 		ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
395 		ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
396 
397 		error = xfs_bmap_count_tree(mp, tp, ifp, bno, level,
398 				nextents, count);
399 		if (error) {
400 			XFS_ERROR_REPORT("xfs_bmap_count_blocks(2)",
401 					XFS_ERRLEVEL_LOW, mp);
402 			return -EFSCORRUPTED;
403 		}
404 		return 0;
405 	}
406 
407 	return 0;
408 }
409 
410 static int
411 xfs_getbmap_report_one(
412 	struct xfs_inode	*ip,
413 	struct getbmapx		*bmv,
414 	struct kgetbmap		*out,
415 	int64_t			bmv_end,
416 	struct xfs_bmbt_irec	*got)
417 {
418 	struct kgetbmap		*p = out + bmv->bmv_entries;
419 	bool			shared = false, trimmed = false;
420 	int			error;
421 
422 	error = xfs_reflink_trim_around_shared(ip, got, &shared, &trimmed);
423 	if (error)
424 		return error;
425 
426 	if (isnullstartblock(got->br_startblock) ||
427 	    got->br_startblock == DELAYSTARTBLOCK) {
428 		/*
429 		 * Delalloc extents that start beyond EOF can occur due to
430 		 * speculative EOF allocation when the delalloc extent is larger
431 		 * than the largest freespace extent at conversion time.  These
432 		 * extents cannot be converted by data writeback, so can exist
433 		 * here even if we are not supposed to be finding delalloc
434 		 * extents.
435 		 */
436 		if (got->br_startoff < XFS_B_TO_FSB(ip->i_mount, XFS_ISIZE(ip)))
437 			ASSERT((bmv->bmv_iflags & BMV_IF_DELALLOC) != 0);
438 
439 		p->bmv_oflags |= BMV_OF_DELALLOC;
440 		p->bmv_block = -2;
441 	} else {
442 		p->bmv_block = xfs_fsb_to_db(ip, got->br_startblock);
443 	}
444 
445 	if (got->br_state == XFS_EXT_UNWRITTEN &&
446 	    (bmv->bmv_iflags & BMV_IF_PREALLOC))
447 		p->bmv_oflags |= BMV_OF_PREALLOC;
448 
449 	if (shared)
450 		p->bmv_oflags |= BMV_OF_SHARED;
451 
452 	p->bmv_offset = XFS_FSB_TO_BB(ip->i_mount, got->br_startoff);
453 	p->bmv_length = XFS_FSB_TO_BB(ip->i_mount, got->br_blockcount);
454 
455 	bmv->bmv_offset = p->bmv_offset + p->bmv_length;
456 	bmv->bmv_length = max(0LL, bmv_end - bmv->bmv_offset);
457 	bmv->bmv_entries++;
458 	return 0;
459 }
460 
461 static void
462 xfs_getbmap_report_hole(
463 	struct xfs_inode	*ip,
464 	struct getbmapx		*bmv,
465 	struct kgetbmap		*out,
466 	int64_t			bmv_end,
467 	xfs_fileoff_t		bno,
468 	xfs_fileoff_t		end)
469 {
470 	struct kgetbmap		*p = out + bmv->bmv_entries;
471 
472 	if (bmv->bmv_iflags & BMV_IF_NO_HOLES)
473 		return;
474 
475 	p->bmv_block = -1;
476 	p->bmv_offset = XFS_FSB_TO_BB(ip->i_mount, bno);
477 	p->bmv_length = XFS_FSB_TO_BB(ip->i_mount, end - bno);
478 
479 	bmv->bmv_offset = p->bmv_offset + p->bmv_length;
480 	bmv->bmv_length = max(0LL, bmv_end - bmv->bmv_offset);
481 	bmv->bmv_entries++;
482 }
483 
484 static inline bool
485 xfs_getbmap_full(
486 	struct getbmapx		*bmv)
487 {
488 	return bmv->bmv_length == 0 || bmv->bmv_entries >= bmv->bmv_count - 1;
489 }
490 
491 static bool
492 xfs_getbmap_next_rec(
493 	struct xfs_bmbt_irec	*rec,
494 	xfs_fileoff_t		total_end)
495 {
496 	xfs_fileoff_t		end = rec->br_startoff + rec->br_blockcount;
497 
498 	if (end == total_end)
499 		return false;
500 
501 	rec->br_startoff += rec->br_blockcount;
502 	if (!isnullstartblock(rec->br_startblock) &&
503 	    rec->br_startblock != DELAYSTARTBLOCK)
504 		rec->br_startblock += rec->br_blockcount;
505 	rec->br_blockcount = total_end - end;
506 	return true;
507 }
508 
509 /*
510  * Get inode's extents as described in bmv, and format for output.
511  * Calls formatter to fill the user's buffer until all extents
512  * are mapped, until the passed-in bmv->bmv_count slots have
513  * been filled, or until the formatter short-circuits the loop,
514  * if it is tracking filled-in extents on its own.
515  */
516 int						/* error code */
517 xfs_getbmap(
518 	struct xfs_inode	*ip,
519 	struct getbmapx		*bmv,		/* user bmap structure */
520 	struct kgetbmap		*out)
521 {
522 	struct xfs_mount	*mp = ip->i_mount;
523 	int			iflags = bmv->bmv_iflags;
524 	int			whichfork, lock, error = 0;
525 	int64_t			bmv_end, max_len;
526 	xfs_fileoff_t		bno, first_bno;
527 	struct xfs_ifork	*ifp;
528 	struct xfs_bmbt_irec	got, rec;
529 	xfs_filblks_t		len;
530 	struct xfs_iext_cursor	icur;
531 
532 	if (bmv->bmv_iflags & ~BMV_IF_VALID)
533 		return -EINVAL;
534 #ifndef DEBUG
535 	/* Only allow CoW fork queries if we're debugging. */
536 	if (iflags & BMV_IF_COWFORK)
537 		return -EINVAL;
538 #endif
539 	if ((iflags & BMV_IF_ATTRFORK) && (iflags & BMV_IF_COWFORK))
540 		return -EINVAL;
541 
542 	if (bmv->bmv_length < -1)
543 		return -EINVAL;
544 	bmv->bmv_entries = 0;
545 	if (bmv->bmv_length == 0)
546 		return 0;
547 
548 	if (iflags & BMV_IF_ATTRFORK)
549 		whichfork = XFS_ATTR_FORK;
550 	else if (iflags & BMV_IF_COWFORK)
551 		whichfork = XFS_COW_FORK;
552 	else
553 		whichfork = XFS_DATA_FORK;
554 	ifp = XFS_IFORK_PTR(ip, whichfork);
555 
556 	xfs_ilock(ip, XFS_IOLOCK_SHARED);
557 	switch (whichfork) {
558 	case XFS_ATTR_FORK:
559 		if (!XFS_IFORK_Q(ip))
560 			goto out_unlock_iolock;
561 
562 		max_len = 1LL << 32;
563 		lock = xfs_ilock_attr_map_shared(ip);
564 		break;
565 	case XFS_COW_FORK:
566 		/* No CoW fork? Just return */
567 		if (!ifp)
568 			goto out_unlock_iolock;
569 
570 		if (xfs_get_cowextsz_hint(ip))
571 			max_len = mp->m_super->s_maxbytes;
572 		else
573 			max_len = XFS_ISIZE(ip);
574 
575 		lock = XFS_ILOCK_SHARED;
576 		xfs_ilock(ip, lock);
577 		break;
578 	case XFS_DATA_FORK:
579 		if (!(iflags & BMV_IF_DELALLOC) &&
580 		    (ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_d.di_size)) {
581 			error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
582 			if (error)
583 				goto out_unlock_iolock;
584 
585 			/*
586 			 * Even after flushing the inode, there can still be
587 			 * delalloc blocks on the inode beyond EOF due to
588 			 * speculative preallocation.  These are not removed
589 			 * until the release function is called or the inode
590 			 * is inactivated.  Hence we cannot assert here that
591 			 * ip->i_delayed_blks == 0.
592 			 */
593 		}
594 
595 		if (xfs_get_extsz_hint(ip) ||
596 		    (ip->i_d.di_flags &
597 		     (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)))
598 			max_len = mp->m_super->s_maxbytes;
599 		else
600 			max_len = XFS_ISIZE(ip);
601 
602 		lock = xfs_ilock_data_map_shared(ip);
603 		break;
604 	}
605 
606 	switch (XFS_IFORK_FORMAT(ip, whichfork)) {
607 	case XFS_DINODE_FMT_EXTENTS:
608 	case XFS_DINODE_FMT_BTREE:
609 		break;
610 	case XFS_DINODE_FMT_LOCAL:
611 		/* Local format inode forks report no extents. */
612 		goto out_unlock_ilock;
613 	default:
614 		error = -EINVAL;
615 		goto out_unlock_ilock;
616 	}
617 
618 	if (bmv->bmv_length == -1) {
619 		max_len = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, max_len));
620 		bmv->bmv_length = max(0LL, max_len - bmv->bmv_offset);
621 	}
622 
623 	bmv_end = bmv->bmv_offset + bmv->bmv_length;
624 
625 	first_bno = bno = XFS_BB_TO_FSBT(mp, bmv->bmv_offset);
626 	len = XFS_BB_TO_FSB(mp, bmv->bmv_length);
627 
628 	if (!(ifp->if_flags & XFS_IFEXTENTS)) {
629 		error = xfs_iread_extents(NULL, ip, whichfork);
630 		if (error)
631 			goto out_unlock_ilock;
632 	}
633 
634 	if (!xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) {
635 		/*
636 		 * Report a whole-file hole if the delalloc flag is set to
637 		 * stay compatible with the old implementation.
638 		 */
639 		if (iflags & BMV_IF_DELALLOC)
640 			xfs_getbmap_report_hole(ip, bmv, out, bmv_end, bno,
641 					XFS_B_TO_FSB(mp, XFS_ISIZE(ip)));
642 		goto out_unlock_ilock;
643 	}
644 
645 	while (!xfs_getbmap_full(bmv)) {
646 		xfs_trim_extent(&got, first_bno, len);
647 
648 		/*
649 		 * Report an entry for a hole if this extent doesn't directly
650 		 * follow the previous one.
651 		 */
652 		if (got.br_startoff > bno) {
653 			xfs_getbmap_report_hole(ip, bmv, out, bmv_end, bno,
654 					got.br_startoff);
655 			if (xfs_getbmap_full(bmv))
656 				break;
657 		}
658 
659 		/*
660 		 * In order to report shared extents accurately, we report each
661 		 * distinct shared / unshared part of a single bmbt record with
662 		 * an individual getbmapx record.
663 		 */
664 		bno = got.br_startoff + got.br_blockcount;
665 		rec = got;
666 		do {
667 			error = xfs_getbmap_report_one(ip, bmv, out, bmv_end,
668 					&rec);
669 			if (error || xfs_getbmap_full(bmv))
670 				goto out_unlock_ilock;
671 		} while (xfs_getbmap_next_rec(&rec, bno));
672 
673 		if (!xfs_iext_next_extent(ifp, &icur, &got)) {
674 			xfs_fileoff_t	end = XFS_B_TO_FSB(mp, XFS_ISIZE(ip));
675 
676 			out[bmv->bmv_entries - 1].bmv_oflags |= BMV_OF_LAST;
677 
678 			if (whichfork != XFS_ATTR_FORK && bno < end &&
679 			    !xfs_getbmap_full(bmv)) {
680 				xfs_getbmap_report_hole(ip, bmv, out, bmv_end,
681 						bno, end);
682 			}
683 			break;
684 		}
685 
686 		if (bno >= first_bno + len)
687 			break;
688 	}
689 
690 out_unlock_ilock:
691 	xfs_iunlock(ip, lock);
692 out_unlock_iolock:
693 	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
694 	return error;
695 }
696 
697 /*
698  * dead simple method of punching delalyed allocation blocks from a range in
699  * the inode. Walks a block at a time so will be slow, but is only executed in
700  * rare error cases so the overhead is not critical. This will always punch out
701  * both the start and end blocks, even if the ranges only partially overlap
702  * them, so it is up to the caller to ensure that partial blocks are not
703  * passed in.
704  */
705 int
706 xfs_bmap_punch_delalloc_range(
707 	struct xfs_inode	*ip,
708 	xfs_fileoff_t		start_fsb,
709 	xfs_fileoff_t		length)
710 {
711 	xfs_fileoff_t		remaining = length;
712 	int			error = 0;
713 
714 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
715 
716 	do {
717 		int		done;
718 		xfs_bmbt_irec_t	imap;
719 		int		nimaps = 1;
720 		xfs_fsblock_t	firstblock;
721 		struct xfs_defer_ops dfops;
722 
723 		/*
724 		 * Map the range first and check that it is a delalloc extent
725 		 * before trying to unmap the range. Otherwise we will be
726 		 * trying to remove a real extent (which requires a
727 		 * transaction) or a hole, which is probably a bad idea...
728 		 */
729 		error = xfs_bmapi_read(ip, start_fsb, 1, &imap, &nimaps,
730 				       XFS_BMAPI_ENTIRE);
731 
732 		if (error) {
733 			/* something screwed, just bail */
734 			if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
735 				xfs_alert(ip->i_mount,
736 			"Failed delalloc mapping lookup ino %lld fsb %lld.",
737 						ip->i_ino, start_fsb);
738 			}
739 			break;
740 		}
741 		if (!nimaps) {
742 			/* nothing there */
743 			goto next_block;
744 		}
745 		if (imap.br_startblock != DELAYSTARTBLOCK) {
746 			/* been converted, ignore */
747 			goto next_block;
748 		}
749 		WARN_ON(imap.br_blockcount == 0);
750 
751 		/*
752 		 * Note: while we initialise the firstblock/dfops pair, they
753 		 * should never be used because blocks should never be
754 		 * allocated or freed for a delalloc extent and hence we need
755 		 * don't cancel or finish them after the xfs_bunmapi() call.
756 		 */
757 		xfs_defer_init(&dfops, &firstblock);
758 		error = xfs_bunmapi(NULL, ip, start_fsb, 1, 0, 1, &firstblock,
759 					&dfops, &done);
760 		if (error)
761 			break;
762 
763 		ASSERT(!xfs_defer_has_unfinished_work(&dfops));
764 next_block:
765 		start_fsb++;
766 		remaining--;
767 	} while(remaining > 0);
768 
769 	return error;
770 }
771 
772 /*
773  * Test whether it is appropriate to check an inode for and free post EOF
774  * blocks. The 'force' parameter determines whether we should also consider
775  * regular files that are marked preallocated or append-only.
776  */
777 bool
778 xfs_can_free_eofblocks(struct xfs_inode *ip, bool force)
779 {
780 	/* prealloc/delalloc exists only on regular files */
781 	if (!S_ISREG(VFS_I(ip)->i_mode))
782 		return false;
783 
784 	/*
785 	 * Zero sized files with no cached pages and delalloc blocks will not
786 	 * have speculative prealloc/delalloc blocks to remove.
787 	 */
788 	if (VFS_I(ip)->i_size == 0 &&
789 	    VFS_I(ip)->i_mapping->nrpages == 0 &&
790 	    ip->i_delayed_blks == 0)
791 		return false;
792 
793 	/* If we haven't read in the extent list, then don't do it now. */
794 	if (!(ip->i_df.if_flags & XFS_IFEXTENTS))
795 		return false;
796 
797 	/*
798 	 * Do not free real preallocated or append-only files unless the file
799 	 * has delalloc blocks and we are forced to remove them.
800 	 */
801 	if (ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND))
802 		if (!force || ip->i_delayed_blks == 0)
803 			return false;
804 
805 	return true;
806 }
807 
808 /*
809  * This is called to free any blocks beyond eof. The caller must hold
810  * IOLOCK_EXCL unless we are in the inode reclaim path and have the only
811  * reference to the inode.
812  */
813 int
814 xfs_free_eofblocks(
815 	struct xfs_inode	*ip)
816 {
817 	struct xfs_trans	*tp;
818 	int			error;
819 	xfs_fileoff_t		end_fsb;
820 	xfs_fileoff_t		last_fsb;
821 	xfs_filblks_t		map_len;
822 	int			nimaps;
823 	struct xfs_bmbt_irec	imap;
824 	struct xfs_mount	*mp = ip->i_mount;
825 
826 	/*
827 	 * Figure out if there are any blocks beyond the end
828 	 * of the file.  If not, then there is nothing to do.
829 	 */
830 	end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_ISIZE(ip));
831 	last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
832 	if (last_fsb <= end_fsb)
833 		return 0;
834 	map_len = last_fsb - end_fsb;
835 
836 	nimaps = 1;
837 	xfs_ilock(ip, XFS_ILOCK_SHARED);
838 	error = xfs_bmapi_read(ip, end_fsb, map_len, &imap, &nimaps, 0);
839 	xfs_iunlock(ip, XFS_ILOCK_SHARED);
840 
841 	/*
842 	 * If there are blocks after the end of file, truncate the file to its
843 	 * current size to free them up.
844 	 */
845 	if (!error && (nimaps != 0) &&
846 	    (imap.br_startblock != HOLESTARTBLOCK ||
847 	     ip->i_delayed_blks)) {
848 		/*
849 		 * Attach the dquots to the inode up front.
850 		 */
851 		error = xfs_qm_dqattach(ip, 0);
852 		if (error)
853 			return error;
854 
855 		/* wait on dio to ensure i_size has settled */
856 		inode_dio_wait(VFS_I(ip));
857 
858 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0,
859 				&tp);
860 		if (error) {
861 			ASSERT(XFS_FORCED_SHUTDOWN(mp));
862 			return error;
863 		}
864 
865 		xfs_ilock(ip, XFS_ILOCK_EXCL);
866 		xfs_trans_ijoin(tp, ip, 0);
867 
868 		/*
869 		 * Do not update the on-disk file size.  If we update the
870 		 * on-disk file size and then the system crashes before the
871 		 * contents of the file are flushed to disk then the files
872 		 * may be full of holes (ie NULL files bug).
873 		 */
874 		error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK,
875 					      XFS_ISIZE(ip));
876 		if (error) {
877 			/*
878 			 * If we get an error at this point we simply don't
879 			 * bother truncating the file.
880 			 */
881 			xfs_trans_cancel(tp);
882 		} else {
883 			error = xfs_trans_commit(tp);
884 			if (!error)
885 				xfs_inode_clear_eofblocks_tag(ip);
886 		}
887 
888 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
889 	}
890 	return error;
891 }
892 
893 int
894 xfs_alloc_file_space(
895 	struct xfs_inode	*ip,
896 	xfs_off_t		offset,
897 	xfs_off_t		len,
898 	int			alloc_type)
899 {
900 	xfs_mount_t		*mp = ip->i_mount;
901 	xfs_off_t		count;
902 	xfs_filblks_t		allocated_fsb;
903 	xfs_filblks_t		allocatesize_fsb;
904 	xfs_extlen_t		extsz, temp;
905 	xfs_fileoff_t		startoffset_fsb;
906 	xfs_fsblock_t		firstfsb;
907 	int			nimaps;
908 	int			quota_flag;
909 	int			rt;
910 	xfs_trans_t		*tp;
911 	xfs_bmbt_irec_t		imaps[1], *imapp;
912 	struct xfs_defer_ops	dfops;
913 	uint			qblocks, resblks, resrtextents;
914 	int			error;
915 
916 	trace_xfs_alloc_file_space(ip);
917 
918 	if (XFS_FORCED_SHUTDOWN(mp))
919 		return -EIO;
920 
921 	error = xfs_qm_dqattach(ip, 0);
922 	if (error)
923 		return error;
924 
925 	if (len <= 0)
926 		return -EINVAL;
927 
928 	rt = XFS_IS_REALTIME_INODE(ip);
929 	extsz = xfs_get_extsz_hint(ip);
930 
931 	count = len;
932 	imapp = &imaps[0];
933 	nimaps = 1;
934 	startoffset_fsb	= XFS_B_TO_FSBT(mp, offset);
935 	allocatesize_fsb = XFS_B_TO_FSB(mp, count);
936 
937 	/*
938 	 * Allocate file space until done or until there is an error
939 	 */
940 	while (allocatesize_fsb && !error) {
941 		xfs_fileoff_t	s, e;
942 
943 		/*
944 		 * Determine space reservations for data/realtime.
945 		 */
946 		if (unlikely(extsz)) {
947 			s = startoffset_fsb;
948 			do_div(s, extsz);
949 			s *= extsz;
950 			e = startoffset_fsb + allocatesize_fsb;
951 			if ((temp = do_mod(startoffset_fsb, extsz)))
952 				e += temp;
953 			if ((temp = do_mod(e, extsz)))
954 				e += extsz - temp;
955 		} else {
956 			s = 0;
957 			e = allocatesize_fsb;
958 		}
959 
960 		/*
961 		 * The transaction reservation is limited to a 32-bit block
962 		 * count, hence we need to limit the number of blocks we are
963 		 * trying to reserve to avoid an overflow. We can't allocate
964 		 * more than @nimaps extents, and an extent is limited on disk
965 		 * to MAXEXTLEN (21 bits), so use that to enforce the limit.
966 		 */
967 		resblks = min_t(xfs_fileoff_t, (e - s), (MAXEXTLEN * nimaps));
968 		if (unlikely(rt)) {
969 			resrtextents = qblocks = resblks;
970 			resrtextents /= mp->m_sb.sb_rextsize;
971 			resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
972 			quota_flag = XFS_QMOPT_RES_RTBLKS;
973 		} else {
974 			resrtextents = 0;
975 			resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resblks);
976 			quota_flag = XFS_QMOPT_RES_REGBLKS;
977 		}
978 
979 		/*
980 		 * Allocate and setup the transaction.
981 		 */
982 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks,
983 				resrtextents, 0, &tp);
984 
985 		/*
986 		 * Check for running out of space
987 		 */
988 		if (error) {
989 			/*
990 			 * Free the transaction structure.
991 			 */
992 			ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp));
993 			break;
994 		}
995 		xfs_ilock(ip, XFS_ILOCK_EXCL);
996 		error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks,
997 						      0, quota_flag);
998 		if (error)
999 			goto error1;
1000 
1001 		xfs_trans_ijoin(tp, ip, 0);
1002 
1003 		xfs_defer_init(&dfops, &firstfsb);
1004 		error = xfs_bmapi_write(tp, ip, startoffset_fsb,
1005 					allocatesize_fsb, alloc_type, &firstfsb,
1006 					resblks, imapp, &nimaps, &dfops);
1007 		if (error)
1008 			goto error0;
1009 
1010 		/*
1011 		 * Complete the transaction
1012 		 */
1013 		error = xfs_defer_finish(&tp, &dfops);
1014 		if (error)
1015 			goto error0;
1016 
1017 		error = xfs_trans_commit(tp);
1018 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
1019 		if (error)
1020 			break;
1021 
1022 		allocated_fsb = imapp->br_blockcount;
1023 
1024 		if (nimaps == 0) {
1025 			error = -ENOSPC;
1026 			break;
1027 		}
1028 
1029 		startoffset_fsb += allocated_fsb;
1030 		allocatesize_fsb -= allocated_fsb;
1031 	}
1032 
1033 	return error;
1034 
1035 error0:	/* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */
1036 	xfs_defer_cancel(&dfops);
1037 	xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag);
1038 
1039 error1:	/* Just cancel transaction */
1040 	xfs_trans_cancel(tp);
1041 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1042 	return error;
1043 }
1044 
1045 static int
1046 xfs_unmap_extent(
1047 	struct xfs_inode	*ip,
1048 	xfs_fileoff_t		startoffset_fsb,
1049 	xfs_filblks_t		len_fsb,
1050 	int			*done)
1051 {
1052 	struct xfs_mount	*mp = ip->i_mount;
1053 	struct xfs_trans	*tp;
1054 	struct xfs_defer_ops	dfops;
1055 	xfs_fsblock_t		firstfsb;
1056 	uint			resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
1057 	int			error;
1058 
1059 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);
1060 	if (error) {
1061 		ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp));
1062 		return error;
1063 	}
1064 
1065 	xfs_ilock(ip, XFS_ILOCK_EXCL);
1066 	error = xfs_trans_reserve_quota(tp, mp, ip->i_udquot, ip->i_gdquot,
1067 			ip->i_pdquot, resblks, 0, XFS_QMOPT_RES_REGBLKS);
1068 	if (error)
1069 		goto out_trans_cancel;
1070 
1071 	xfs_trans_ijoin(tp, ip, 0);
1072 
1073 	xfs_defer_init(&dfops, &firstfsb);
1074 	error = xfs_bunmapi(tp, ip, startoffset_fsb, len_fsb, 0, 2, &firstfsb,
1075 			&dfops, done);
1076 	if (error)
1077 		goto out_bmap_cancel;
1078 
1079 	xfs_defer_ijoin(&dfops, ip);
1080 	error = xfs_defer_finish(&tp, &dfops);
1081 	if (error)
1082 		goto out_bmap_cancel;
1083 
1084 	error = xfs_trans_commit(tp);
1085 out_unlock:
1086 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1087 	return error;
1088 
1089 out_bmap_cancel:
1090 	xfs_defer_cancel(&dfops);
1091 out_trans_cancel:
1092 	xfs_trans_cancel(tp);
1093 	goto out_unlock;
1094 }
1095 
1096 static int
1097 xfs_adjust_extent_unmap_boundaries(
1098 	struct xfs_inode	*ip,
1099 	xfs_fileoff_t		*startoffset_fsb,
1100 	xfs_fileoff_t		*endoffset_fsb)
1101 {
1102 	struct xfs_mount	*mp = ip->i_mount;
1103 	struct xfs_bmbt_irec	imap;
1104 	int			nimap, error;
1105 	xfs_extlen_t		mod = 0;
1106 
1107 	nimap = 1;
1108 	error = xfs_bmapi_read(ip, *startoffset_fsb, 1, &imap, &nimap, 0);
1109 	if (error)
1110 		return error;
1111 
1112 	if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
1113 		ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
1114 		mod = do_mod(imap.br_startblock, mp->m_sb.sb_rextsize);
1115 		if (mod)
1116 			*startoffset_fsb += mp->m_sb.sb_rextsize - mod;
1117 	}
1118 
1119 	nimap = 1;
1120 	error = xfs_bmapi_read(ip, *endoffset_fsb - 1, 1, &imap, &nimap, 0);
1121 	if (error)
1122 		return error;
1123 
1124 	if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
1125 		ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
1126 		mod++;
1127 		if (mod && mod != mp->m_sb.sb_rextsize)
1128 			*endoffset_fsb -= mod;
1129 	}
1130 
1131 	return 0;
1132 }
1133 
1134 static int
1135 xfs_flush_unmap_range(
1136 	struct xfs_inode	*ip,
1137 	xfs_off_t		offset,
1138 	xfs_off_t		len)
1139 {
1140 	struct xfs_mount	*mp = ip->i_mount;
1141 	struct inode		*inode = VFS_I(ip);
1142 	xfs_off_t		rounding, start, end;
1143 	int			error;
1144 
1145 	/* wait for the completion of any pending DIOs */
1146 	inode_dio_wait(inode);
1147 
1148 	rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_SIZE);
1149 	start = round_down(offset, rounding);
1150 	end = round_up(offset + len, rounding) - 1;
1151 
1152 	error = filemap_write_and_wait_range(inode->i_mapping, start, end);
1153 	if (error)
1154 		return error;
1155 	truncate_pagecache_range(inode, start, end);
1156 	return 0;
1157 }
1158 
1159 int
1160 xfs_free_file_space(
1161 	struct xfs_inode	*ip,
1162 	xfs_off_t		offset,
1163 	xfs_off_t		len)
1164 {
1165 	struct xfs_mount	*mp = ip->i_mount;
1166 	xfs_fileoff_t		startoffset_fsb;
1167 	xfs_fileoff_t		endoffset_fsb;
1168 	int			done = 0, error;
1169 
1170 	trace_xfs_free_file_space(ip);
1171 
1172 	error = xfs_qm_dqattach(ip, 0);
1173 	if (error)
1174 		return error;
1175 
1176 	if (len <= 0)	/* if nothing being freed */
1177 		return 0;
1178 
1179 	error = xfs_flush_unmap_range(ip, offset, len);
1180 	if (error)
1181 		return error;
1182 
1183 	startoffset_fsb = XFS_B_TO_FSB(mp, offset);
1184 	endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len);
1185 
1186 	/*
1187 	 * Need to zero the stuff we're not freeing, on disk.  If it's a RT file
1188 	 * and we can't use unwritten extents then we actually need to ensure
1189 	 * to zero the whole extent, otherwise we just need to take of block
1190 	 * boundaries, and xfs_bunmapi will handle the rest.
1191 	 */
1192 	if (XFS_IS_REALTIME_INODE(ip) &&
1193 	    !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
1194 		error = xfs_adjust_extent_unmap_boundaries(ip, &startoffset_fsb,
1195 				&endoffset_fsb);
1196 		if (error)
1197 			return error;
1198 	}
1199 
1200 	if (endoffset_fsb > startoffset_fsb) {
1201 		while (!done) {
1202 			error = xfs_unmap_extent(ip, startoffset_fsb,
1203 					endoffset_fsb - startoffset_fsb, &done);
1204 			if (error)
1205 				return error;
1206 		}
1207 	}
1208 
1209 	/*
1210 	 * Now that we've unmap all full blocks we'll have to zero out any
1211 	 * partial block at the beginning and/or end.  xfs_zero_range is
1212 	 * smart enough to skip any holes, including those we just created,
1213 	 * but we must take care not to zero beyond EOF and enlarge i_size.
1214 	 */
1215 
1216 	if (offset >= XFS_ISIZE(ip))
1217 		return 0;
1218 
1219 	if (offset + len > XFS_ISIZE(ip))
1220 		len = XFS_ISIZE(ip) - offset;
1221 
1222 	return xfs_zero_range(ip, offset, len, NULL);
1223 }
1224 
1225 /*
1226  * Preallocate and zero a range of a file. This mechanism has the allocation
1227  * semantics of fallocate and in addition converts data in the range to zeroes.
1228  */
1229 int
1230 xfs_zero_file_space(
1231 	struct xfs_inode	*ip,
1232 	xfs_off_t		offset,
1233 	xfs_off_t		len)
1234 {
1235 	struct xfs_mount	*mp = ip->i_mount;
1236 	uint			blksize;
1237 	int			error;
1238 
1239 	trace_xfs_zero_file_space(ip);
1240 
1241 	blksize = 1 << mp->m_sb.sb_blocklog;
1242 
1243 	/*
1244 	 * Punch a hole and prealloc the range. We use hole punch rather than
1245 	 * unwritten extent conversion for two reasons:
1246 	 *
1247 	 * 1.) Hole punch handles partial block zeroing for us.
1248 	 *
1249 	 * 2.) If prealloc returns ENOSPC, the file range is still zero-valued
1250 	 * by virtue of the hole punch.
1251 	 */
1252 	error = xfs_free_file_space(ip, offset, len);
1253 	if (error)
1254 		goto out;
1255 
1256 	error = xfs_alloc_file_space(ip, round_down(offset, blksize),
1257 				     round_up(offset + len, blksize) -
1258 				     round_down(offset, blksize),
1259 				     XFS_BMAPI_PREALLOC);
1260 out:
1261 	return error;
1262 
1263 }
1264 
1265 static int
1266 xfs_prepare_shift(
1267 	struct xfs_inode	*ip,
1268 	loff_t			offset)
1269 {
1270 	int			error;
1271 
1272 	/*
1273 	 * Trim eofblocks to avoid shifting uninitialized post-eof preallocation
1274 	 * into the accessible region of the file.
1275 	 */
1276 	if (xfs_can_free_eofblocks(ip, true)) {
1277 		error = xfs_free_eofblocks(ip);
1278 		if (error)
1279 			return error;
1280 	}
1281 
1282 	/*
1283 	 * Writeback and invalidate cache for the remainder of the file as we're
1284 	 * about to shift down every extent from offset to EOF.
1285 	 */
1286 	error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping, offset, -1);
1287 	if (error)
1288 		return error;
1289 	error = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping,
1290 					offset >> PAGE_SHIFT, -1);
1291 	if (error)
1292 		return error;
1293 
1294 	/*
1295 	 * Clean out anything hanging around in the cow fork now that
1296 	 * we've flushed all the dirty data out to disk to avoid having
1297 	 * CoW extents at the wrong offsets.
1298 	 */
1299 	if (xfs_is_reflink_inode(ip)) {
1300 		error = xfs_reflink_cancel_cow_range(ip, offset, NULLFILEOFF,
1301 				true);
1302 		if (error)
1303 			return error;
1304 	}
1305 
1306 	return 0;
1307 }
1308 
1309 /*
1310  * xfs_collapse_file_space()
1311  *	This routine frees disk space and shift extent for the given file.
1312  *	The first thing we do is to free data blocks in the specified range
1313  *	by calling xfs_free_file_space(). It would also sync dirty data
1314  *	and invalidate page cache over the region on which collapse range
1315  *	is working. And Shift extent records to the left to cover a hole.
1316  * RETURNS:
1317  *	0 on success
1318  *	errno on error
1319  *
1320  */
1321 int
1322 xfs_collapse_file_space(
1323 	struct xfs_inode	*ip,
1324 	xfs_off_t		offset,
1325 	xfs_off_t		len)
1326 {
1327 	struct xfs_mount	*mp = ip->i_mount;
1328 	struct xfs_trans	*tp;
1329 	int			error;
1330 	struct xfs_defer_ops	dfops;
1331 	xfs_fsblock_t		first_block;
1332 	xfs_fileoff_t		stop_fsb = XFS_B_TO_FSB(mp, VFS_I(ip)->i_size);
1333 	xfs_fileoff_t		next_fsb = XFS_B_TO_FSB(mp, offset + len);
1334 	xfs_fileoff_t		shift_fsb = XFS_B_TO_FSB(mp, len);
1335 	uint			resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
1336 	bool			done = false;
1337 
1338 	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1339 	ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL));
1340 
1341 	trace_xfs_collapse_file_space(ip);
1342 
1343 	error = xfs_free_file_space(ip, offset, len);
1344 	if (error)
1345 		return error;
1346 
1347 	error = xfs_prepare_shift(ip, offset);
1348 	if (error)
1349 		return error;
1350 
1351 	while (!error && !done) {
1352 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0,
1353 					&tp);
1354 		if (error)
1355 			break;
1356 
1357 		xfs_ilock(ip, XFS_ILOCK_EXCL);
1358 		error = xfs_trans_reserve_quota(tp, mp, ip->i_udquot,
1359 				ip->i_gdquot, ip->i_pdquot, resblks, 0,
1360 				XFS_QMOPT_RES_REGBLKS);
1361 		if (error)
1362 			goto out_trans_cancel;
1363 		xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
1364 
1365 		xfs_defer_init(&dfops, &first_block);
1366 		error = xfs_bmap_collapse_extents(tp, ip, &next_fsb, shift_fsb,
1367 				&done, stop_fsb, &first_block, &dfops);
1368 		if (error)
1369 			goto out_bmap_cancel;
1370 
1371 		error = xfs_defer_finish(&tp, &dfops);
1372 		if (error)
1373 			goto out_bmap_cancel;
1374 		error = xfs_trans_commit(tp);
1375 	}
1376 
1377 	return error;
1378 
1379 out_bmap_cancel:
1380 	xfs_defer_cancel(&dfops);
1381 out_trans_cancel:
1382 	xfs_trans_cancel(tp);
1383 	return error;
1384 }
1385 
1386 /*
1387  * xfs_insert_file_space()
1388  *	This routine create hole space by shifting extents for the given file.
1389  *	The first thing we do is to sync dirty data and invalidate page cache
1390  *	over the region on which insert range is working. And split an extent
1391  *	to two extents at given offset by calling xfs_bmap_split_extent.
1392  *	And shift all extent records which are laying between [offset,
1393  *	last allocated extent] to the right to reserve hole range.
1394  * RETURNS:
1395  *	0 on success
1396  *	errno on error
1397  */
1398 int
1399 xfs_insert_file_space(
1400 	struct xfs_inode	*ip,
1401 	loff_t			offset,
1402 	loff_t			len)
1403 {
1404 	struct xfs_mount	*mp = ip->i_mount;
1405 	struct xfs_trans	*tp;
1406 	int			error;
1407 	struct xfs_defer_ops	dfops;
1408 	xfs_fsblock_t		first_block;
1409 	xfs_fileoff_t		stop_fsb = XFS_B_TO_FSB(mp, offset);
1410 	xfs_fileoff_t		next_fsb = NULLFSBLOCK;
1411 	xfs_fileoff_t		shift_fsb = XFS_B_TO_FSB(mp, len);
1412 	bool			done = false;
1413 
1414 	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1415 	ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL));
1416 
1417 	trace_xfs_insert_file_space(ip);
1418 
1419 	error = xfs_prepare_shift(ip, offset);
1420 	if (error)
1421 		return error;
1422 
1423 	/*
1424 	 * The extent shifting code works on extent granularity. So, if stop_fsb
1425 	 * is not the starting block of extent, we need to split the extent at
1426 	 * stop_fsb.
1427 	 */
1428 	error = xfs_bmap_split_extent(ip, stop_fsb);
1429 	if (error)
1430 		return error;
1431 
1432 	while (!error && !done) {
1433 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 0,
1434 					&tp);
1435 		if (error)
1436 			break;
1437 
1438 		xfs_ilock(ip, XFS_ILOCK_EXCL);
1439 		xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
1440 		xfs_defer_init(&dfops, &first_block);
1441 		error = xfs_bmap_insert_extents(tp, ip, &next_fsb, shift_fsb,
1442 				&done, stop_fsb, &first_block, &dfops);
1443 		if (error)
1444 			goto out_bmap_cancel;
1445 
1446 		error = xfs_defer_finish(&tp, &dfops);
1447 		if (error)
1448 			goto out_bmap_cancel;
1449 		error = xfs_trans_commit(tp);
1450 	}
1451 
1452 	return error;
1453 
1454 out_bmap_cancel:
1455 	xfs_defer_cancel(&dfops);
1456 	xfs_trans_cancel(tp);
1457 	return error;
1458 }
1459 
1460 /*
1461  * We need to check that the format of the data fork in the temporary inode is
1462  * valid for the target inode before doing the swap. This is not a problem with
1463  * attr1 because of the fixed fork offset, but attr2 has a dynamically sized
1464  * data fork depending on the space the attribute fork is taking so we can get
1465  * invalid formats on the target inode.
1466  *
1467  * E.g. target has space for 7 extents in extent format, temp inode only has
1468  * space for 6.  If we defragment down to 7 extents, then the tmp format is a
1469  * btree, but when swapped it needs to be in extent format. Hence we can't just
1470  * blindly swap data forks on attr2 filesystems.
1471  *
1472  * Note that we check the swap in both directions so that we don't end up with
1473  * a corrupt temporary inode, either.
1474  *
1475  * Note that fixing the way xfs_fsr sets up the attribute fork in the source
1476  * inode will prevent this situation from occurring, so all we do here is
1477  * reject and log the attempt. basically we are putting the responsibility on
1478  * userspace to get this right.
1479  */
1480 static int
1481 xfs_swap_extents_check_format(
1482 	struct xfs_inode	*ip,	/* target inode */
1483 	struct xfs_inode	*tip)	/* tmp inode */
1484 {
1485 
1486 	/* Should never get a local format */
1487 	if (ip->i_d.di_format == XFS_DINODE_FMT_LOCAL ||
1488 	    tip->i_d.di_format == XFS_DINODE_FMT_LOCAL)
1489 		return -EINVAL;
1490 
1491 	/*
1492 	 * if the target inode has less extents that then temporary inode then
1493 	 * why did userspace call us?
1494 	 */
1495 	if (ip->i_d.di_nextents < tip->i_d.di_nextents)
1496 		return -EINVAL;
1497 
1498 	/*
1499 	 * If we have to use the (expensive) rmap swap method, we can
1500 	 * handle any number of extents and any format.
1501 	 */
1502 	if (xfs_sb_version_hasrmapbt(&ip->i_mount->m_sb))
1503 		return 0;
1504 
1505 	/*
1506 	 * if the target inode is in extent form and the temp inode is in btree
1507 	 * form then we will end up with the target inode in the wrong format
1508 	 * as we already know there are less extents in the temp inode.
1509 	 */
1510 	if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1511 	    tip->i_d.di_format == XFS_DINODE_FMT_BTREE)
1512 		return -EINVAL;
1513 
1514 	/* Check temp in extent form to max in target */
1515 	if (tip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1516 	    XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) >
1517 			XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
1518 		return -EINVAL;
1519 
1520 	/* Check target in extent form to max in temp */
1521 	if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1522 	    XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) >
1523 			XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
1524 		return -EINVAL;
1525 
1526 	/*
1527 	 * If we are in a btree format, check that the temp root block will fit
1528 	 * in the target and that it has enough extents to be in btree format
1529 	 * in the target.
1530 	 *
1531 	 * Note that we have to be careful to allow btree->extent conversions
1532 	 * (a common defrag case) which will occur when the temp inode is in
1533 	 * extent format...
1534 	 */
1535 	if (tip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
1536 		if (XFS_IFORK_Q(ip) &&
1537 		    XFS_BMAP_BMDR_SPACE(tip->i_df.if_broot) > XFS_IFORK_BOFF(ip))
1538 			return -EINVAL;
1539 		if (XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) <=
1540 		    XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
1541 			return -EINVAL;
1542 	}
1543 
1544 	/* Reciprocal target->temp btree format checks */
1545 	if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
1546 		if (XFS_IFORK_Q(tip) &&
1547 		    XFS_BMAP_BMDR_SPACE(ip->i_df.if_broot) > XFS_IFORK_BOFF(tip))
1548 			return -EINVAL;
1549 		if (XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) <=
1550 		    XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
1551 			return -EINVAL;
1552 	}
1553 
1554 	return 0;
1555 }
1556 
1557 static int
1558 xfs_swap_extent_flush(
1559 	struct xfs_inode	*ip)
1560 {
1561 	int	error;
1562 
1563 	error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
1564 	if (error)
1565 		return error;
1566 	truncate_pagecache_range(VFS_I(ip), 0, -1);
1567 
1568 	/* Verify O_DIRECT for ftmp */
1569 	if (VFS_I(ip)->i_mapping->nrpages)
1570 		return -EINVAL;
1571 	return 0;
1572 }
1573 
1574 /*
1575  * Move extents from one file to another, when rmap is enabled.
1576  */
1577 STATIC int
1578 xfs_swap_extent_rmap(
1579 	struct xfs_trans		**tpp,
1580 	struct xfs_inode		*ip,
1581 	struct xfs_inode		*tip)
1582 {
1583 	struct xfs_bmbt_irec		irec;
1584 	struct xfs_bmbt_irec		uirec;
1585 	struct xfs_bmbt_irec		tirec;
1586 	xfs_fileoff_t			offset_fsb;
1587 	xfs_fileoff_t			end_fsb;
1588 	xfs_filblks_t			count_fsb;
1589 	xfs_fsblock_t			firstfsb;
1590 	struct xfs_defer_ops		dfops;
1591 	int				error;
1592 	xfs_filblks_t			ilen;
1593 	xfs_filblks_t			rlen;
1594 	int				nimaps;
1595 	uint64_t			tip_flags2;
1596 
1597 	/*
1598 	 * If the source file has shared blocks, we must flag the donor
1599 	 * file as having shared blocks so that we get the shared-block
1600 	 * rmap functions when we go to fix up the rmaps.  The flags
1601 	 * will be switch for reals later.
1602 	 */
1603 	tip_flags2 = tip->i_d.di_flags2;
1604 	if (ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK)
1605 		tip->i_d.di_flags2 |= XFS_DIFLAG2_REFLINK;
1606 
1607 	offset_fsb = 0;
1608 	end_fsb = XFS_B_TO_FSB(ip->i_mount, i_size_read(VFS_I(ip)));
1609 	count_fsb = (xfs_filblks_t)(end_fsb - offset_fsb);
1610 
1611 	while (count_fsb) {
1612 		/* Read extent from the donor file */
1613 		nimaps = 1;
1614 		error = xfs_bmapi_read(tip, offset_fsb, count_fsb, &tirec,
1615 				&nimaps, 0);
1616 		if (error)
1617 			goto out;
1618 		ASSERT(nimaps == 1);
1619 		ASSERT(tirec.br_startblock != DELAYSTARTBLOCK);
1620 
1621 		trace_xfs_swap_extent_rmap_remap(tip, &tirec);
1622 		ilen = tirec.br_blockcount;
1623 
1624 		/* Unmap the old blocks in the source file. */
1625 		while (tirec.br_blockcount) {
1626 			xfs_defer_init(&dfops, &firstfsb);
1627 			trace_xfs_swap_extent_rmap_remap_piece(tip, &tirec);
1628 
1629 			/* Read extent from the source file */
1630 			nimaps = 1;
1631 			error = xfs_bmapi_read(ip, tirec.br_startoff,
1632 					tirec.br_blockcount, &irec,
1633 					&nimaps, 0);
1634 			if (error)
1635 				goto out_defer;
1636 			ASSERT(nimaps == 1);
1637 			ASSERT(tirec.br_startoff == irec.br_startoff);
1638 			trace_xfs_swap_extent_rmap_remap_piece(ip, &irec);
1639 
1640 			/* Trim the extent. */
1641 			uirec = tirec;
1642 			uirec.br_blockcount = rlen = min_t(xfs_filblks_t,
1643 					tirec.br_blockcount,
1644 					irec.br_blockcount);
1645 			trace_xfs_swap_extent_rmap_remap_piece(tip, &uirec);
1646 
1647 			/* Remove the mapping from the donor file. */
1648 			error = xfs_bmap_unmap_extent((*tpp)->t_mountp, &dfops,
1649 					tip, &uirec);
1650 			if (error)
1651 				goto out_defer;
1652 
1653 			/* Remove the mapping from the source file. */
1654 			error = xfs_bmap_unmap_extent((*tpp)->t_mountp, &dfops,
1655 					ip, &irec);
1656 			if (error)
1657 				goto out_defer;
1658 
1659 			/* Map the donor file's blocks into the source file. */
1660 			error = xfs_bmap_map_extent((*tpp)->t_mountp, &dfops,
1661 					ip, &uirec);
1662 			if (error)
1663 				goto out_defer;
1664 
1665 			/* Map the source file's blocks into the donor file. */
1666 			error = xfs_bmap_map_extent((*tpp)->t_mountp, &dfops,
1667 					tip, &irec);
1668 			if (error)
1669 				goto out_defer;
1670 
1671 			xfs_defer_ijoin(&dfops, ip);
1672 			error = xfs_defer_finish(tpp, &dfops);
1673 			if (error)
1674 				goto out_defer;
1675 
1676 			tirec.br_startoff += rlen;
1677 			if (tirec.br_startblock != HOLESTARTBLOCK &&
1678 			    tirec.br_startblock != DELAYSTARTBLOCK)
1679 				tirec.br_startblock += rlen;
1680 			tirec.br_blockcount -= rlen;
1681 		}
1682 
1683 		/* Roll on... */
1684 		count_fsb -= ilen;
1685 		offset_fsb += ilen;
1686 	}
1687 
1688 	tip->i_d.di_flags2 = tip_flags2;
1689 	return 0;
1690 
1691 out_defer:
1692 	xfs_defer_cancel(&dfops);
1693 out:
1694 	trace_xfs_swap_extent_rmap_error(ip, error, _RET_IP_);
1695 	tip->i_d.di_flags2 = tip_flags2;
1696 	return error;
1697 }
1698 
1699 /* Swap the extents of two files by swapping data forks. */
1700 STATIC int
1701 xfs_swap_extent_forks(
1702 	struct xfs_trans	*tp,
1703 	struct xfs_inode	*ip,
1704 	struct xfs_inode	*tip,
1705 	int			*src_log_flags,
1706 	int			*target_log_flags)
1707 {
1708 	struct xfs_ifork	tempifp, *ifp, *tifp;
1709 	xfs_filblks_t		aforkblks = 0;
1710 	xfs_filblks_t		taforkblks = 0;
1711 	xfs_extnum_t		junk;
1712 	uint64_t		tmp;
1713 	int			error;
1714 
1715 	/*
1716 	 * Count the number of extended attribute blocks
1717 	 */
1718 	if ( ((XFS_IFORK_Q(ip) != 0) && (ip->i_d.di_anextents > 0)) &&
1719 	     (ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
1720 		error = xfs_bmap_count_blocks(tp, ip, XFS_ATTR_FORK, &junk,
1721 				&aforkblks);
1722 		if (error)
1723 			return error;
1724 	}
1725 	if ( ((XFS_IFORK_Q(tip) != 0) && (tip->i_d.di_anextents > 0)) &&
1726 	     (tip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
1727 		error = xfs_bmap_count_blocks(tp, tip, XFS_ATTR_FORK, &junk,
1728 				&taforkblks);
1729 		if (error)
1730 			return error;
1731 	}
1732 
1733 	/*
1734 	 * Btree format (v3) inodes have the inode number stamped in the bmbt
1735 	 * block headers. We can't start changing the bmbt blocks until the
1736 	 * inode owner change is logged so recovery does the right thing in the
1737 	 * event of a crash. Set the owner change log flags now and leave the
1738 	 * bmbt scan as the last step.
1739 	 */
1740 	if (ip->i_d.di_version == 3 &&
1741 	    ip->i_d.di_format == XFS_DINODE_FMT_BTREE)
1742 		(*target_log_flags) |= XFS_ILOG_DOWNER;
1743 	if (tip->i_d.di_version == 3 &&
1744 	    tip->i_d.di_format == XFS_DINODE_FMT_BTREE)
1745 		(*src_log_flags) |= XFS_ILOG_DOWNER;
1746 
1747 	/*
1748 	 * Swap the data forks of the inodes
1749 	 */
1750 	ifp = &ip->i_df;
1751 	tifp = &tip->i_df;
1752 	tempifp = *ifp;		/* struct copy */
1753 	*ifp = *tifp;		/* struct copy */
1754 	*tifp = tempifp;	/* struct copy */
1755 
1756 	/*
1757 	 * Fix the on-disk inode values
1758 	 */
1759 	tmp = (uint64_t)ip->i_d.di_nblocks;
1760 	ip->i_d.di_nblocks = tip->i_d.di_nblocks - taforkblks + aforkblks;
1761 	tip->i_d.di_nblocks = tmp + taforkblks - aforkblks;
1762 
1763 	tmp = (uint64_t) ip->i_d.di_nextents;
1764 	ip->i_d.di_nextents = tip->i_d.di_nextents;
1765 	tip->i_d.di_nextents = tmp;
1766 
1767 	tmp = (uint64_t) ip->i_d.di_format;
1768 	ip->i_d.di_format = tip->i_d.di_format;
1769 	tip->i_d.di_format = tmp;
1770 
1771 	/*
1772 	 * The extents in the source inode could still contain speculative
1773 	 * preallocation beyond EOF (e.g. the file is open but not modified
1774 	 * while defrag is in progress). In that case, we need to copy over the
1775 	 * number of delalloc blocks the data fork in the source inode is
1776 	 * tracking beyond EOF so that when the fork is truncated away when the
1777 	 * temporary inode is unlinked we don't underrun the i_delayed_blks
1778 	 * counter on that inode.
1779 	 */
1780 	ASSERT(tip->i_delayed_blks == 0);
1781 	tip->i_delayed_blks = ip->i_delayed_blks;
1782 	ip->i_delayed_blks = 0;
1783 
1784 	switch (ip->i_d.di_format) {
1785 	case XFS_DINODE_FMT_EXTENTS:
1786 		(*src_log_flags) |= XFS_ILOG_DEXT;
1787 		break;
1788 	case XFS_DINODE_FMT_BTREE:
1789 		ASSERT(ip->i_d.di_version < 3 ||
1790 		       (*src_log_flags & XFS_ILOG_DOWNER));
1791 		(*src_log_flags) |= XFS_ILOG_DBROOT;
1792 		break;
1793 	}
1794 
1795 	switch (tip->i_d.di_format) {
1796 	case XFS_DINODE_FMT_EXTENTS:
1797 		(*target_log_flags) |= XFS_ILOG_DEXT;
1798 		break;
1799 	case XFS_DINODE_FMT_BTREE:
1800 		(*target_log_flags) |= XFS_ILOG_DBROOT;
1801 		ASSERT(tip->i_d.di_version < 3 ||
1802 		       (*target_log_flags & XFS_ILOG_DOWNER));
1803 		break;
1804 	}
1805 
1806 	return 0;
1807 }
1808 
1809 /*
1810  * Fix up the owners of the bmbt blocks to refer to the current inode. The
1811  * change owner scan attempts to order all modified buffers in the current
1812  * transaction. In the event of ordered buffer failure, the offending buffer is
1813  * physically logged as a fallback and the scan returns -EAGAIN. We must roll
1814  * the transaction in this case to replenish the fallback log reservation and
1815  * restart the scan. This process repeats until the scan completes.
1816  */
1817 static int
1818 xfs_swap_change_owner(
1819 	struct xfs_trans	**tpp,
1820 	struct xfs_inode	*ip,
1821 	struct xfs_inode	*tmpip)
1822 {
1823 	int			error;
1824 	struct xfs_trans	*tp = *tpp;
1825 
1826 	do {
1827 		error = xfs_bmbt_change_owner(tp, ip, XFS_DATA_FORK, ip->i_ino,
1828 					      NULL);
1829 		/* success or fatal error */
1830 		if (error != -EAGAIN)
1831 			break;
1832 
1833 		error = xfs_trans_roll(tpp);
1834 		if (error)
1835 			break;
1836 		tp = *tpp;
1837 
1838 		/*
1839 		 * Redirty both inodes so they can relog and keep the log tail
1840 		 * moving forward.
1841 		 */
1842 		xfs_trans_ijoin(tp, ip, 0);
1843 		xfs_trans_ijoin(tp, tmpip, 0);
1844 		xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1845 		xfs_trans_log_inode(tp, tmpip, XFS_ILOG_CORE);
1846 	} while (true);
1847 
1848 	return error;
1849 }
1850 
1851 int
1852 xfs_swap_extents(
1853 	struct xfs_inode	*ip,	/* target inode */
1854 	struct xfs_inode	*tip,	/* tmp inode */
1855 	struct xfs_swapext	*sxp)
1856 {
1857 	struct xfs_mount	*mp = ip->i_mount;
1858 	struct xfs_trans	*tp;
1859 	struct xfs_bstat	*sbp = &sxp->sx_stat;
1860 	int			src_log_flags, target_log_flags;
1861 	int			error = 0;
1862 	int			lock_flags;
1863 	struct xfs_ifork	*cowfp;
1864 	uint64_t		f;
1865 	int			resblks = 0;
1866 
1867 	/*
1868 	 * Lock the inodes against other IO, page faults and truncate to
1869 	 * begin with.  Then we can ensure the inodes are flushed and have no
1870 	 * page cache safely. Once we have done this we can take the ilocks and
1871 	 * do the rest of the checks.
1872 	 */
1873 	lock_two_nondirectories(VFS_I(ip), VFS_I(tip));
1874 	lock_flags = XFS_MMAPLOCK_EXCL;
1875 	xfs_lock_two_inodes(ip, XFS_MMAPLOCK_EXCL, tip, XFS_MMAPLOCK_EXCL);
1876 
1877 	/* Verify that both files have the same format */
1878 	if ((VFS_I(ip)->i_mode & S_IFMT) != (VFS_I(tip)->i_mode & S_IFMT)) {
1879 		error = -EINVAL;
1880 		goto out_unlock;
1881 	}
1882 
1883 	/* Verify both files are either real-time or non-realtime */
1884 	if (XFS_IS_REALTIME_INODE(ip) != XFS_IS_REALTIME_INODE(tip)) {
1885 		error = -EINVAL;
1886 		goto out_unlock;
1887 	}
1888 
1889 	error = xfs_swap_extent_flush(ip);
1890 	if (error)
1891 		goto out_unlock;
1892 	error = xfs_swap_extent_flush(tip);
1893 	if (error)
1894 		goto out_unlock;
1895 
1896 	/*
1897 	 * Extent "swapping" with rmap requires a permanent reservation and
1898 	 * a block reservation because it's really just a remap operation
1899 	 * performed with log redo items!
1900 	 */
1901 	if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
1902 		/*
1903 		 * Conceptually this shouldn't affect the shape of either
1904 		 * bmbt, but since we atomically move extents one by one,
1905 		 * we reserve enough space to rebuild both trees.
1906 		 */
1907 		resblks = XFS_SWAP_RMAP_SPACE_RES(mp,
1908 				XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK),
1909 				XFS_DATA_FORK) +
1910 			  XFS_SWAP_RMAP_SPACE_RES(mp,
1911 				XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK),
1912 				XFS_DATA_FORK);
1913 	}
1914 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);
1915 	if (error)
1916 		goto out_unlock;
1917 
1918 	/*
1919 	 * Lock and join the inodes to the tansaction so that transaction commit
1920 	 * or cancel will unlock the inodes from this point onwards.
1921 	 */
1922 	xfs_lock_two_inodes(ip, XFS_ILOCK_EXCL, tip, XFS_ILOCK_EXCL);
1923 	lock_flags |= XFS_ILOCK_EXCL;
1924 	xfs_trans_ijoin(tp, ip, 0);
1925 	xfs_trans_ijoin(tp, tip, 0);
1926 
1927 
1928 	/* Verify all data are being swapped */
1929 	if (sxp->sx_offset != 0 ||
1930 	    sxp->sx_length != ip->i_d.di_size ||
1931 	    sxp->sx_length != tip->i_d.di_size) {
1932 		error = -EFAULT;
1933 		goto out_trans_cancel;
1934 	}
1935 
1936 	trace_xfs_swap_extent_before(ip, 0);
1937 	trace_xfs_swap_extent_before(tip, 1);
1938 
1939 	/* check inode formats now that data is flushed */
1940 	error = xfs_swap_extents_check_format(ip, tip);
1941 	if (error) {
1942 		xfs_notice(mp,
1943 		    "%s: inode 0x%llx format is incompatible for exchanging.",
1944 				__func__, ip->i_ino);
1945 		goto out_trans_cancel;
1946 	}
1947 
1948 	/*
1949 	 * Compare the current change & modify times with that
1950 	 * passed in.  If they differ, we abort this swap.
1951 	 * This is the mechanism used to ensure the calling
1952 	 * process that the file was not changed out from
1953 	 * under it.
1954 	 */
1955 	if ((sbp->bs_ctime.tv_sec != VFS_I(ip)->i_ctime.tv_sec) ||
1956 	    (sbp->bs_ctime.tv_nsec != VFS_I(ip)->i_ctime.tv_nsec) ||
1957 	    (sbp->bs_mtime.tv_sec != VFS_I(ip)->i_mtime.tv_sec) ||
1958 	    (sbp->bs_mtime.tv_nsec != VFS_I(ip)->i_mtime.tv_nsec)) {
1959 		error = -EBUSY;
1960 		goto out_trans_cancel;
1961 	}
1962 
1963 	/*
1964 	 * Note the trickiness in setting the log flags - we set the owner log
1965 	 * flag on the opposite inode (i.e. the inode we are setting the new
1966 	 * owner to be) because once we swap the forks and log that, log
1967 	 * recovery is going to see the fork as owned by the swapped inode,
1968 	 * not the pre-swapped inodes.
1969 	 */
1970 	src_log_flags = XFS_ILOG_CORE;
1971 	target_log_flags = XFS_ILOG_CORE;
1972 
1973 	if (xfs_sb_version_hasrmapbt(&mp->m_sb))
1974 		error = xfs_swap_extent_rmap(&tp, ip, tip);
1975 	else
1976 		error = xfs_swap_extent_forks(tp, ip, tip, &src_log_flags,
1977 				&target_log_flags);
1978 	if (error)
1979 		goto out_trans_cancel;
1980 
1981 	/* Do we have to swap reflink flags? */
1982 	if ((ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK) ^
1983 	    (tip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK)) {
1984 		f = ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK;
1985 		ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
1986 		ip->i_d.di_flags2 |= tip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK;
1987 		tip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
1988 		tip->i_d.di_flags2 |= f & XFS_DIFLAG2_REFLINK;
1989 	}
1990 
1991 	/* Swap the cow forks. */
1992 	if (xfs_sb_version_hasreflink(&mp->m_sb)) {
1993 		xfs_extnum_t	extnum;
1994 
1995 		ASSERT(ip->i_cformat == XFS_DINODE_FMT_EXTENTS);
1996 		ASSERT(tip->i_cformat == XFS_DINODE_FMT_EXTENTS);
1997 
1998 		extnum = ip->i_cnextents;
1999 		ip->i_cnextents = tip->i_cnextents;
2000 		tip->i_cnextents = extnum;
2001 
2002 		cowfp = ip->i_cowfp;
2003 		ip->i_cowfp = tip->i_cowfp;
2004 		tip->i_cowfp = cowfp;
2005 
2006 		if (ip->i_cowfp && ip->i_cnextents)
2007 			xfs_inode_set_cowblocks_tag(ip);
2008 		else
2009 			xfs_inode_clear_cowblocks_tag(ip);
2010 		if (tip->i_cowfp && tip->i_cnextents)
2011 			xfs_inode_set_cowblocks_tag(tip);
2012 		else
2013 			xfs_inode_clear_cowblocks_tag(tip);
2014 	}
2015 
2016 	xfs_trans_log_inode(tp, ip,  src_log_flags);
2017 	xfs_trans_log_inode(tp, tip, target_log_flags);
2018 
2019 	/*
2020 	 * The extent forks have been swapped, but crc=1,rmapbt=0 filesystems
2021 	 * have inode number owner values in the bmbt blocks that still refer to
2022 	 * the old inode. Scan each bmbt to fix up the owner values with the
2023 	 * inode number of the current inode.
2024 	 */
2025 	if (src_log_flags & XFS_ILOG_DOWNER) {
2026 		error = xfs_swap_change_owner(&tp, ip, tip);
2027 		if (error)
2028 			goto out_trans_cancel;
2029 	}
2030 	if (target_log_flags & XFS_ILOG_DOWNER) {
2031 		error = xfs_swap_change_owner(&tp, tip, ip);
2032 		if (error)
2033 			goto out_trans_cancel;
2034 	}
2035 
2036 	/*
2037 	 * If this is a synchronous mount, make sure that the
2038 	 * transaction goes to disk before returning to the user.
2039 	 */
2040 	if (mp->m_flags & XFS_MOUNT_WSYNC)
2041 		xfs_trans_set_sync(tp);
2042 
2043 	error = xfs_trans_commit(tp);
2044 
2045 	trace_xfs_swap_extent_after(ip, 0);
2046 	trace_xfs_swap_extent_after(tip, 1);
2047 
2048 out_unlock:
2049 	xfs_iunlock(ip, lock_flags);
2050 	xfs_iunlock(tip, lock_flags);
2051 	unlock_two_nondirectories(VFS_I(ip), VFS_I(tip));
2052 	return error;
2053 
2054 out_trans_cancel:
2055 	xfs_trans_cancel(tp);
2056 	goto out_unlock;
2057 }
2058