xref: /openbmc/linux/fs/xfs/xfs_bmap_util.c (revision faa16bc4)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4  * Copyright (c) 2012 Red Hat, Inc.
5  * All Rights Reserved.
6  */
7 #include "xfs.h"
8 #include "xfs_fs.h"
9 #include "xfs_shared.h"
10 #include "xfs_format.h"
11 #include "xfs_log_format.h"
12 #include "xfs_trans_resv.h"
13 #include "xfs_bit.h"
14 #include "xfs_mount.h"
15 #include "xfs_da_format.h"
16 #include "xfs_defer.h"
17 #include "xfs_inode.h"
18 #include "xfs_btree.h"
19 #include "xfs_trans.h"
20 #include "xfs_extfree_item.h"
21 #include "xfs_alloc.h"
22 #include "xfs_bmap.h"
23 #include "xfs_bmap_util.h"
24 #include "xfs_bmap_btree.h"
25 #include "xfs_rtalloc.h"
26 #include "xfs_error.h"
27 #include "xfs_quota.h"
28 #include "xfs_trans_space.h"
29 #include "xfs_trace.h"
30 #include "xfs_icache.h"
31 #include "xfs_log.h"
32 #include "xfs_rmap_btree.h"
33 #include "xfs_iomap.h"
34 #include "xfs_reflink.h"
35 #include "xfs_refcount.h"
36 
37 /* Kernel only BMAP related definitions and functions */
38 
39 /*
40  * Convert the given file system block to a disk block.  We have to treat it
41  * differently based on whether the file is a real time file or not, because the
42  * bmap code does.
43  */
44 xfs_daddr_t
45 xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb)
46 {
47 	return (XFS_IS_REALTIME_INODE(ip) ? \
48 		 (xfs_daddr_t)XFS_FSB_TO_BB((ip)->i_mount, (fsb)) : \
49 		 XFS_FSB_TO_DADDR((ip)->i_mount, (fsb)));
50 }
51 
52 /*
53  * Routine to zero an extent on disk allocated to the specific inode.
54  *
55  * The VFS functions take a linearised filesystem block offset, so we have to
56  * convert the sparse xfs fsb to the right format first.
57  * VFS types are real funky, too.
58  */
59 int
60 xfs_zero_extent(
61 	struct xfs_inode *ip,
62 	xfs_fsblock_t	start_fsb,
63 	xfs_off_t	count_fsb)
64 {
65 	struct xfs_mount *mp = ip->i_mount;
66 	xfs_daddr_t	sector = xfs_fsb_to_db(ip, start_fsb);
67 	sector_t	block = XFS_BB_TO_FSBT(mp, sector);
68 
69 	return blkdev_issue_zeroout(xfs_find_bdev_for_inode(VFS_I(ip)),
70 		block << (mp->m_super->s_blocksize_bits - 9),
71 		count_fsb << (mp->m_super->s_blocksize_bits - 9),
72 		GFP_NOFS, 0);
73 }
74 
75 #ifdef CONFIG_XFS_RT
76 int
77 xfs_bmap_rtalloc(
78 	struct xfs_bmalloca	*ap)	/* bmap alloc argument struct */
79 {
80 	int		error;		/* error return value */
81 	xfs_mount_t	*mp;		/* mount point structure */
82 	xfs_extlen_t	prod = 0;	/* product factor for allocators */
83 	xfs_extlen_t	mod = 0;	/* product factor for allocators */
84 	xfs_extlen_t	ralen = 0;	/* realtime allocation length */
85 	xfs_extlen_t	align;		/* minimum allocation alignment */
86 	xfs_rtblock_t	rtb;
87 
88 	mp = ap->ip->i_mount;
89 	align = xfs_get_extsz_hint(ap->ip);
90 	prod = align / mp->m_sb.sb_rextsize;
91 	error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
92 					align, 1, ap->eof, 0,
93 					ap->conv, &ap->offset, &ap->length);
94 	if (error)
95 		return error;
96 	ASSERT(ap->length);
97 	ASSERT(ap->length % mp->m_sb.sb_rextsize == 0);
98 
99 	/*
100 	 * If the offset & length are not perfectly aligned
101 	 * then kill prod, it will just get us in trouble.
102 	 */
103 	div_u64_rem(ap->offset, align, &mod);
104 	if (mod || ap->length % align)
105 		prod = 1;
106 	/*
107 	 * Set ralen to be the actual requested length in rtextents.
108 	 */
109 	ralen = ap->length / mp->m_sb.sb_rextsize;
110 	/*
111 	 * If the old value was close enough to MAXEXTLEN that
112 	 * we rounded up to it, cut it back so it's valid again.
113 	 * Note that if it's a really large request (bigger than
114 	 * MAXEXTLEN), we don't hear about that number, and can't
115 	 * adjust the starting point to match it.
116 	 */
117 	if (ralen * mp->m_sb.sb_rextsize >= MAXEXTLEN)
118 		ralen = MAXEXTLEN / mp->m_sb.sb_rextsize;
119 
120 	/*
121 	 * Lock out modifications to both the RT bitmap and summary inodes
122 	 */
123 	xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP);
124 	xfs_trans_ijoin(ap->tp, mp->m_rbmip, XFS_ILOCK_EXCL);
125 	xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM);
126 	xfs_trans_ijoin(ap->tp, mp->m_rsumip, XFS_ILOCK_EXCL);
127 
128 	/*
129 	 * If it's an allocation to an empty file at offset 0,
130 	 * pick an extent that will space things out in the rt area.
131 	 */
132 	if (ap->eof && ap->offset == 0) {
133 		xfs_rtblock_t uninitialized_var(rtx); /* realtime extent no */
134 
135 		error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx);
136 		if (error)
137 			return error;
138 		ap->blkno = rtx * mp->m_sb.sb_rextsize;
139 	} else {
140 		ap->blkno = 0;
141 	}
142 
143 	xfs_bmap_adjacent(ap);
144 
145 	/*
146 	 * Realtime allocation, done through xfs_rtallocate_extent.
147 	 */
148 	do_div(ap->blkno, mp->m_sb.sb_rextsize);
149 	rtb = ap->blkno;
150 	ap->length = ralen;
151 	error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1, ap->length,
152 				&ralen, ap->wasdel, prod, &rtb);
153 	if (error)
154 		return error;
155 
156 	ap->blkno = rtb;
157 	if (ap->blkno != NULLFSBLOCK) {
158 		ap->blkno *= mp->m_sb.sb_rextsize;
159 		ralen *= mp->m_sb.sb_rextsize;
160 		ap->length = ralen;
161 		ap->ip->i_d.di_nblocks += ralen;
162 		xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
163 		if (ap->wasdel)
164 			ap->ip->i_delayed_blks -= ralen;
165 		/*
166 		 * Adjust the disk quota also. This was reserved
167 		 * earlier.
168 		 */
169 		xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
170 			ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT :
171 					XFS_TRANS_DQ_RTBCOUNT, (long) ralen);
172 
173 		/* Zero the extent if we were asked to do so */
174 		if (ap->datatype & XFS_ALLOC_USERDATA_ZERO) {
175 			error = xfs_zero_extent(ap->ip, ap->blkno, ap->length);
176 			if (error)
177 				return error;
178 		}
179 	} else {
180 		ap->length = 0;
181 	}
182 	return 0;
183 }
184 #endif /* CONFIG_XFS_RT */
185 
186 /*
187  * Check if the endoff is outside the last extent. If so the caller will grow
188  * the allocation to a stripe unit boundary.  All offsets are considered outside
189  * the end of file for an empty fork, so 1 is returned in *eof in that case.
190  */
191 int
192 xfs_bmap_eof(
193 	struct xfs_inode	*ip,
194 	xfs_fileoff_t		endoff,
195 	int			whichfork,
196 	int			*eof)
197 {
198 	struct xfs_bmbt_irec	rec;
199 	int			error;
200 
201 	error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, eof);
202 	if (error || *eof)
203 		return error;
204 
205 	*eof = endoff >= rec.br_startoff + rec.br_blockcount;
206 	return 0;
207 }
208 
209 /*
210  * Extent tree block counting routines.
211  */
212 
213 /*
214  * Count leaf blocks given a range of extent records.  Delayed allocation
215  * extents are not counted towards the totals.
216  */
217 xfs_extnum_t
218 xfs_bmap_count_leaves(
219 	struct xfs_ifork	*ifp,
220 	xfs_filblks_t		*count)
221 {
222 	struct xfs_iext_cursor	icur;
223 	struct xfs_bmbt_irec	got;
224 	xfs_extnum_t		numrecs = 0;
225 
226 	for_each_xfs_iext(ifp, &icur, &got) {
227 		if (!isnullstartblock(got.br_startblock)) {
228 			*count += got.br_blockcount;
229 			numrecs++;
230 		}
231 	}
232 
233 	return numrecs;
234 }
235 
236 /*
237  * Count leaf blocks given a range of extent records originally
238  * in btree format.
239  */
240 STATIC void
241 xfs_bmap_disk_count_leaves(
242 	struct xfs_mount	*mp,
243 	struct xfs_btree_block	*block,
244 	int			numrecs,
245 	xfs_filblks_t		*count)
246 {
247 	int		b;
248 	xfs_bmbt_rec_t	*frp;
249 
250 	for (b = 1; b <= numrecs; b++) {
251 		frp = XFS_BMBT_REC_ADDR(mp, block, b);
252 		*count += xfs_bmbt_disk_get_blockcount(frp);
253 	}
254 }
255 
256 /*
257  * Recursively walks each level of a btree
258  * to count total fsblocks in use.
259  */
260 STATIC int
261 xfs_bmap_count_tree(
262 	struct xfs_mount	*mp,
263 	struct xfs_trans	*tp,
264 	struct xfs_ifork	*ifp,
265 	xfs_fsblock_t		blockno,
266 	int			levelin,
267 	xfs_extnum_t		*nextents,
268 	xfs_filblks_t		*count)
269 {
270 	int			error;
271 	struct xfs_buf		*bp, *nbp;
272 	int			level = levelin;
273 	__be64			*pp;
274 	xfs_fsblock_t           bno = blockno;
275 	xfs_fsblock_t		nextbno;
276 	struct xfs_btree_block	*block, *nextblock;
277 	int			numrecs;
278 
279 	error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, XFS_BMAP_BTREE_REF,
280 						&xfs_bmbt_buf_ops);
281 	if (error)
282 		return error;
283 	*count += 1;
284 	block = XFS_BUF_TO_BLOCK(bp);
285 
286 	if (--level) {
287 		/* Not at node above leaves, count this level of nodes */
288 		nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
289 		while (nextbno != NULLFSBLOCK) {
290 			error = xfs_btree_read_bufl(mp, tp, nextbno, 0, &nbp,
291 						XFS_BMAP_BTREE_REF,
292 						&xfs_bmbt_buf_ops);
293 			if (error)
294 				return error;
295 			*count += 1;
296 			nextblock = XFS_BUF_TO_BLOCK(nbp);
297 			nextbno = be64_to_cpu(nextblock->bb_u.l.bb_rightsib);
298 			xfs_trans_brelse(tp, nbp);
299 		}
300 
301 		/* Dive to the next level */
302 		pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
303 		bno = be64_to_cpu(*pp);
304 		error = xfs_bmap_count_tree(mp, tp, ifp, bno, level, nextents,
305 				count);
306 		if (error) {
307 			xfs_trans_brelse(tp, bp);
308 			XFS_ERROR_REPORT("xfs_bmap_count_tree(1)",
309 					 XFS_ERRLEVEL_LOW, mp);
310 			return -EFSCORRUPTED;
311 		}
312 		xfs_trans_brelse(tp, bp);
313 	} else {
314 		/* count all level 1 nodes and their leaves */
315 		for (;;) {
316 			nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
317 			numrecs = be16_to_cpu(block->bb_numrecs);
318 			(*nextents) += numrecs;
319 			xfs_bmap_disk_count_leaves(mp, block, numrecs, count);
320 			xfs_trans_brelse(tp, bp);
321 			if (nextbno == NULLFSBLOCK)
322 				break;
323 			bno = nextbno;
324 			error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
325 						XFS_BMAP_BTREE_REF,
326 						&xfs_bmbt_buf_ops);
327 			if (error)
328 				return error;
329 			*count += 1;
330 			block = XFS_BUF_TO_BLOCK(bp);
331 		}
332 	}
333 	return 0;
334 }
335 
336 /*
337  * Count fsblocks of the given fork.  Delayed allocation extents are
338  * not counted towards the totals.
339  */
340 int
341 xfs_bmap_count_blocks(
342 	struct xfs_trans	*tp,
343 	struct xfs_inode	*ip,
344 	int			whichfork,
345 	xfs_extnum_t		*nextents,
346 	xfs_filblks_t		*count)
347 {
348 	struct xfs_mount	*mp;	/* file system mount structure */
349 	__be64			*pp;	/* pointer to block address */
350 	struct xfs_btree_block	*block;	/* current btree block */
351 	struct xfs_ifork	*ifp;	/* fork structure */
352 	xfs_fsblock_t		bno;	/* block # of "block" */
353 	int			level;	/* btree level, for checking */
354 	int			error;
355 
356 	bno = NULLFSBLOCK;
357 	mp = ip->i_mount;
358 	*nextents = 0;
359 	*count = 0;
360 	ifp = XFS_IFORK_PTR(ip, whichfork);
361 	if (!ifp)
362 		return 0;
363 
364 	switch (XFS_IFORK_FORMAT(ip, whichfork)) {
365 	case XFS_DINODE_FMT_EXTENTS:
366 		*nextents = xfs_bmap_count_leaves(ifp, count);
367 		return 0;
368 	case XFS_DINODE_FMT_BTREE:
369 		if (!(ifp->if_flags & XFS_IFEXTENTS)) {
370 			error = xfs_iread_extents(tp, ip, whichfork);
371 			if (error)
372 				return error;
373 		}
374 
375 		/*
376 		 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
377 		 */
378 		block = ifp->if_broot;
379 		level = be16_to_cpu(block->bb_level);
380 		ASSERT(level > 0);
381 		pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
382 		bno = be64_to_cpu(*pp);
383 		ASSERT(bno != NULLFSBLOCK);
384 		ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
385 		ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
386 
387 		error = xfs_bmap_count_tree(mp, tp, ifp, bno, level,
388 				nextents, count);
389 		if (error) {
390 			XFS_ERROR_REPORT("xfs_bmap_count_blocks(2)",
391 					XFS_ERRLEVEL_LOW, mp);
392 			return -EFSCORRUPTED;
393 		}
394 		return 0;
395 	}
396 
397 	return 0;
398 }
399 
400 static int
401 xfs_getbmap_report_one(
402 	struct xfs_inode	*ip,
403 	struct getbmapx		*bmv,
404 	struct kgetbmap		*out,
405 	int64_t			bmv_end,
406 	struct xfs_bmbt_irec	*got)
407 {
408 	struct kgetbmap		*p = out + bmv->bmv_entries;
409 	bool			shared = false, trimmed = false;
410 	int			error;
411 
412 	error = xfs_reflink_trim_around_shared(ip, got, &shared, &trimmed);
413 	if (error)
414 		return error;
415 
416 	if (isnullstartblock(got->br_startblock) ||
417 	    got->br_startblock == DELAYSTARTBLOCK) {
418 		/*
419 		 * Delalloc extents that start beyond EOF can occur due to
420 		 * speculative EOF allocation when the delalloc extent is larger
421 		 * than the largest freespace extent at conversion time.  These
422 		 * extents cannot be converted by data writeback, so can exist
423 		 * here even if we are not supposed to be finding delalloc
424 		 * extents.
425 		 */
426 		if (got->br_startoff < XFS_B_TO_FSB(ip->i_mount, XFS_ISIZE(ip)))
427 			ASSERT((bmv->bmv_iflags & BMV_IF_DELALLOC) != 0);
428 
429 		p->bmv_oflags |= BMV_OF_DELALLOC;
430 		p->bmv_block = -2;
431 	} else {
432 		p->bmv_block = xfs_fsb_to_db(ip, got->br_startblock);
433 	}
434 
435 	if (got->br_state == XFS_EXT_UNWRITTEN &&
436 	    (bmv->bmv_iflags & BMV_IF_PREALLOC))
437 		p->bmv_oflags |= BMV_OF_PREALLOC;
438 
439 	if (shared)
440 		p->bmv_oflags |= BMV_OF_SHARED;
441 
442 	p->bmv_offset = XFS_FSB_TO_BB(ip->i_mount, got->br_startoff);
443 	p->bmv_length = XFS_FSB_TO_BB(ip->i_mount, got->br_blockcount);
444 
445 	bmv->bmv_offset = p->bmv_offset + p->bmv_length;
446 	bmv->bmv_length = max(0LL, bmv_end - bmv->bmv_offset);
447 	bmv->bmv_entries++;
448 	return 0;
449 }
450 
451 static void
452 xfs_getbmap_report_hole(
453 	struct xfs_inode	*ip,
454 	struct getbmapx		*bmv,
455 	struct kgetbmap		*out,
456 	int64_t			bmv_end,
457 	xfs_fileoff_t		bno,
458 	xfs_fileoff_t		end)
459 {
460 	struct kgetbmap		*p = out + bmv->bmv_entries;
461 
462 	if (bmv->bmv_iflags & BMV_IF_NO_HOLES)
463 		return;
464 
465 	p->bmv_block = -1;
466 	p->bmv_offset = XFS_FSB_TO_BB(ip->i_mount, bno);
467 	p->bmv_length = XFS_FSB_TO_BB(ip->i_mount, end - bno);
468 
469 	bmv->bmv_offset = p->bmv_offset + p->bmv_length;
470 	bmv->bmv_length = max(0LL, bmv_end - bmv->bmv_offset);
471 	bmv->bmv_entries++;
472 }
473 
474 static inline bool
475 xfs_getbmap_full(
476 	struct getbmapx		*bmv)
477 {
478 	return bmv->bmv_length == 0 || bmv->bmv_entries >= bmv->bmv_count - 1;
479 }
480 
481 static bool
482 xfs_getbmap_next_rec(
483 	struct xfs_bmbt_irec	*rec,
484 	xfs_fileoff_t		total_end)
485 {
486 	xfs_fileoff_t		end = rec->br_startoff + rec->br_blockcount;
487 
488 	if (end == total_end)
489 		return false;
490 
491 	rec->br_startoff += rec->br_blockcount;
492 	if (!isnullstartblock(rec->br_startblock) &&
493 	    rec->br_startblock != DELAYSTARTBLOCK)
494 		rec->br_startblock += rec->br_blockcount;
495 	rec->br_blockcount = total_end - end;
496 	return true;
497 }
498 
499 /*
500  * Get inode's extents as described in bmv, and format for output.
501  * Calls formatter to fill the user's buffer until all extents
502  * are mapped, until the passed-in bmv->bmv_count slots have
503  * been filled, or until the formatter short-circuits the loop,
504  * if it is tracking filled-in extents on its own.
505  */
506 int						/* error code */
507 xfs_getbmap(
508 	struct xfs_inode	*ip,
509 	struct getbmapx		*bmv,		/* user bmap structure */
510 	struct kgetbmap		*out)
511 {
512 	struct xfs_mount	*mp = ip->i_mount;
513 	int			iflags = bmv->bmv_iflags;
514 	int			whichfork, lock, error = 0;
515 	int64_t			bmv_end, max_len;
516 	xfs_fileoff_t		bno, first_bno;
517 	struct xfs_ifork	*ifp;
518 	struct xfs_bmbt_irec	got, rec;
519 	xfs_filblks_t		len;
520 	struct xfs_iext_cursor	icur;
521 
522 	if (bmv->bmv_iflags & ~BMV_IF_VALID)
523 		return -EINVAL;
524 #ifndef DEBUG
525 	/* Only allow CoW fork queries if we're debugging. */
526 	if (iflags & BMV_IF_COWFORK)
527 		return -EINVAL;
528 #endif
529 	if ((iflags & BMV_IF_ATTRFORK) && (iflags & BMV_IF_COWFORK))
530 		return -EINVAL;
531 
532 	if (bmv->bmv_length < -1)
533 		return -EINVAL;
534 	bmv->bmv_entries = 0;
535 	if (bmv->bmv_length == 0)
536 		return 0;
537 
538 	if (iflags & BMV_IF_ATTRFORK)
539 		whichfork = XFS_ATTR_FORK;
540 	else if (iflags & BMV_IF_COWFORK)
541 		whichfork = XFS_COW_FORK;
542 	else
543 		whichfork = XFS_DATA_FORK;
544 	ifp = XFS_IFORK_PTR(ip, whichfork);
545 
546 	xfs_ilock(ip, XFS_IOLOCK_SHARED);
547 	switch (whichfork) {
548 	case XFS_ATTR_FORK:
549 		if (!XFS_IFORK_Q(ip))
550 			goto out_unlock_iolock;
551 
552 		max_len = 1LL << 32;
553 		lock = xfs_ilock_attr_map_shared(ip);
554 		break;
555 	case XFS_COW_FORK:
556 		/* No CoW fork? Just return */
557 		if (!ifp)
558 			goto out_unlock_iolock;
559 
560 		if (xfs_get_cowextsz_hint(ip))
561 			max_len = mp->m_super->s_maxbytes;
562 		else
563 			max_len = XFS_ISIZE(ip);
564 
565 		lock = XFS_ILOCK_SHARED;
566 		xfs_ilock(ip, lock);
567 		break;
568 	case XFS_DATA_FORK:
569 		if (!(iflags & BMV_IF_DELALLOC) &&
570 		    (ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_d.di_size)) {
571 			error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
572 			if (error)
573 				goto out_unlock_iolock;
574 
575 			/*
576 			 * Even after flushing the inode, there can still be
577 			 * delalloc blocks on the inode beyond EOF due to
578 			 * speculative preallocation.  These are not removed
579 			 * until the release function is called or the inode
580 			 * is inactivated.  Hence we cannot assert here that
581 			 * ip->i_delayed_blks == 0.
582 			 */
583 		}
584 
585 		if (xfs_get_extsz_hint(ip) ||
586 		    (ip->i_d.di_flags &
587 		     (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)))
588 			max_len = mp->m_super->s_maxbytes;
589 		else
590 			max_len = XFS_ISIZE(ip);
591 
592 		lock = xfs_ilock_data_map_shared(ip);
593 		break;
594 	}
595 
596 	switch (XFS_IFORK_FORMAT(ip, whichfork)) {
597 	case XFS_DINODE_FMT_EXTENTS:
598 	case XFS_DINODE_FMT_BTREE:
599 		break;
600 	case XFS_DINODE_FMT_LOCAL:
601 		/* Local format inode forks report no extents. */
602 		goto out_unlock_ilock;
603 	default:
604 		error = -EINVAL;
605 		goto out_unlock_ilock;
606 	}
607 
608 	if (bmv->bmv_length == -1) {
609 		max_len = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, max_len));
610 		bmv->bmv_length = max(0LL, max_len - bmv->bmv_offset);
611 	}
612 
613 	bmv_end = bmv->bmv_offset + bmv->bmv_length;
614 
615 	first_bno = bno = XFS_BB_TO_FSBT(mp, bmv->bmv_offset);
616 	len = XFS_BB_TO_FSB(mp, bmv->bmv_length);
617 
618 	if (!(ifp->if_flags & XFS_IFEXTENTS)) {
619 		error = xfs_iread_extents(NULL, ip, whichfork);
620 		if (error)
621 			goto out_unlock_ilock;
622 	}
623 
624 	if (!xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) {
625 		/*
626 		 * Report a whole-file hole if the delalloc flag is set to
627 		 * stay compatible with the old implementation.
628 		 */
629 		if (iflags & BMV_IF_DELALLOC)
630 			xfs_getbmap_report_hole(ip, bmv, out, bmv_end, bno,
631 					XFS_B_TO_FSB(mp, XFS_ISIZE(ip)));
632 		goto out_unlock_ilock;
633 	}
634 
635 	while (!xfs_getbmap_full(bmv)) {
636 		xfs_trim_extent(&got, first_bno, len);
637 
638 		/*
639 		 * Report an entry for a hole if this extent doesn't directly
640 		 * follow the previous one.
641 		 */
642 		if (got.br_startoff > bno) {
643 			xfs_getbmap_report_hole(ip, bmv, out, bmv_end, bno,
644 					got.br_startoff);
645 			if (xfs_getbmap_full(bmv))
646 				break;
647 		}
648 
649 		/*
650 		 * In order to report shared extents accurately, we report each
651 		 * distinct shared / unshared part of a single bmbt record with
652 		 * an individual getbmapx record.
653 		 */
654 		bno = got.br_startoff + got.br_blockcount;
655 		rec = got;
656 		do {
657 			error = xfs_getbmap_report_one(ip, bmv, out, bmv_end,
658 					&rec);
659 			if (error || xfs_getbmap_full(bmv))
660 				goto out_unlock_ilock;
661 		} while (xfs_getbmap_next_rec(&rec, bno));
662 
663 		if (!xfs_iext_next_extent(ifp, &icur, &got)) {
664 			xfs_fileoff_t	end = XFS_B_TO_FSB(mp, XFS_ISIZE(ip));
665 
666 			out[bmv->bmv_entries - 1].bmv_oflags |= BMV_OF_LAST;
667 
668 			if (whichfork != XFS_ATTR_FORK && bno < end &&
669 			    !xfs_getbmap_full(bmv)) {
670 				xfs_getbmap_report_hole(ip, bmv, out, bmv_end,
671 						bno, end);
672 			}
673 			break;
674 		}
675 
676 		if (bno >= first_bno + len)
677 			break;
678 	}
679 
680 out_unlock_ilock:
681 	xfs_iunlock(ip, lock);
682 out_unlock_iolock:
683 	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
684 	return error;
685 }
686 
687 /*
688  * dead simple method of punching delalyed allocation blocks from a range in
689  * the inode. Walks a block at a time so will be slow, but is only executed in
690  * rare error cases so the overhead is not critical. This will always punch out
691  * both the start and end blocks, even if the ranges only partially overlap
692  * them, so it is up to the caller to ensure that partial blocks are not
693  * passed in.
694  */
695 int
696 xfs_bmap_punch_delalloc_range(
697 	struct xfs_inode	*ip,
698 	xfs_fileoff_t		start_fsb,
699 	xfs_fileoff_t		length)
700 {
701 	xfs_fileoff_t		remaining = length;
702 	int			error = 0;
703 
704 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
705 
706 	do {
707 		int		done;
708 		xfs_bmbt_irec_t	imap;
709 		int		nimaps = 1;
710 		xfs_fsblock_t	firstblock;
711 		struct xfs_defer_ops dfops;
712 
713 		/*
714 		 * Map the range first and check that it is a delalloc extent
715 		 * before trying to unmap the range. Otherwise we will be
716 		 * trying to remove a real extent (which requires a
717 		 * transaction) or a hole, which is probably a bad idea...
718 		 */
719 		error = xfs_bmapi_read(ip, start_fsb, 1, &imap, &nimaps,
720 				       XFS_BMAPI_ENTIRE);
721 
722 		if (error) {
723 			/* something screwed, just bail */
724 			if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
725 				xfs_alert(ip->i_mount,
726 			"Failed delalloc mapping lookup ino %lld fsb %lld.",
727 						ip->i_ino, start_fsb);
728 			}
729 			break;
730 		}
731 		if (!nimaps) {
732 			/* nothing there */
733 			goto next_block;
734 		}
735 		if (imap.br_startblock != DELAYSTARTBLOCK) {
736 			/* been converted, ignore */
737 			goto next_block;
738 		}
739 		WARN_ON(imap.br_blockcount == 0);
740 
741 		/*
742 		 * Note: while we initialise the firstblock/dfops pair, they
743 		 * should never be used because blocks should never be
744 		 * allocated or freed for a delalloc extent and hence we need
745 		 * don't cancel or finish them after the xfs_bunmapi() call.
746 		 */
747 		xfs_defer_init(&dfops, &firstblock);
748 		error = xfs_bunmapi(NULL, ip, start_fsb, 1, 0, 1, &firstblock,
749 					&dfops, &done);
750 		if (error)
751 			break;
752 
753 		ASSERT(!xfs_defer_has_unfinished_work(&dfops));
754 next_block:
755 		start_fsb++;
756 		remaining--;
757 	} while(remaining > 0);
758 
759 	return error;
760 }
761 
762 /*
763  * Test whether it is appropriate to check an inode for and free post EOF
764  * blocks. The 'force' parameter determines whether we should also consider
765  * regular files that are marked preallocated or append-only.
766  */
767 bool
768 xfs_can_free_eofblocks(struct xfs_inode *ip, bool force)
769 {
770 	/* prealloc/delalloc exists only on regular files */
771 	if (!S_ISREG(VFS_I(ip)->i_mode))
772 		return false;
773 
774 	/*
775 	 * Zero sized files with no cached pages and delalloc blocks will not
776 	 * have speculative prealloc/delalloc blocks to remove.
777 	 */
778 	if (VFS_I(ip)->i_size == 0 &&
779 	    VFS_I(ip)->i_mapping->nrpages == 0 &&
780 	    ip->i_delayed_blks == 0)
781 		return false;
782 
783 	/* If we haven't read in the extent list, then don't do it now. */
784 	if (!(ip->i_df.if_flags & XFS_IFEXTENTS))
785 		return false;
786 
787 	/*
788 	 * Do not free real preallocated or append-only files unless the file
789 	 * has delalloc blocks and we are forced to remove them.
790 	 */
791 	if (ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND))
792 		if (!force || ip->i_delayed_blks == 0)
793 			return false;
794 
795 	return true;
796 }
797 
798 /*
799  * This is called to free any blocks beyond eof. The caller must hold
800  * IOLOCK_EXCL unless we are in the inode reclaim path and have the only
801  * reference to the inode.
802  */
803 int
804 xfs_free_eofblocks(
805 	struct xfs_inode	*ip)
806 {
807 	struct xfs_trans	*tp;
808 	int			error;
809 	xfs_fileoff_t		end_fsb;
810 	xfs_fileoff_t		last_fsb;
811 	xfs_filblks_t		map_len;
812 	int			nimaps;
813 	struct xfs_bmbt_irec	imap;
814 	struct xfs_mount	*mp = ip->i_mount;
815 
816 	/*
817 	 * Figure out if there are any blocks beyond the end
818 	 * of the file.  If not, then there is nothing to do.
819 	 */
820 	end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_ISIZE(ip));
821 	last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
822 	if (last_fsb <= end_fsb)
823 		return 0;
824 	map_len = last_fsb - end_fsb;
825 
826 	nimaps = 1;
827 	xfs_ilock(ip, XFS_ILOCK_SHARED);
828 	error = xfs_bmapi_read(ip, end_fsb, map_len, &imap, &nimaps, 0);
829 	xfs_iunlock(ip, XFS_ILOCK_SHARED);
830 
831 	/*
832 	 * If there are blocks after the end of file, truncate the file to its
833 	 * current size to free them up.
834 	 */
835 	if (!error && (nimaps != 0) &&
836 	    (imap.br_startblock != HOLESTARTBLOCK ||
837 	     ip->i_delayed_blks)) {
838 		/*
839 		 * Attach the dquots to the inode up front.
840 		 */
841 		error = xfs_qm_dqattach(ip);
842 		if (error)
843 			return error;
844 
845 		/* wait on dio to ensure i_size has settled */
846 		inode_dio_wait(VFS_I(ip));
847 
848 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0,
849 				&tp);
850 		if (error) {
851 			ASSERT(XFS_FORCED_SHUTDOWN(mp));
852 			return error;
853 		}
854 
855 		xfs_ilock(ip, XFS_ILOCK_EXCL);
856 		xfs_trans_ijoin(tp, ip, 0);
857 
858 		/*
859 		 * Do not update the on-disk file size.  If we update the
860 		 * on-disk file size and then the system crashes before the
861 		 * contents of the file are flushed to disk then the files
862 		 * may be full of holes (ie NULL files bug).
863 		 */
864 		error = xfs_itruncate_extents_flags(&tp, ip, XFS_DATA_FORK,
865 					XFS_ISIZE(ip), XFS_BMAPI_NODISCARD);
866 		if (error) {
867 			/*
868 			 * If we get an error at this point we simply don't
869 			 * bother truncating the file.
870 			 */
871 			xfs_trans_cancel(tp);
872 		} else {
873 			error = xfs_trans_commit(tp);
874 			if (!error)
875 				xfs_inode_clear_eofblocks_tag(ip);
876 		}
877 
878 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
879 	}
880 	return error;
881 }
882 
883 int
884 xfs_alloc_file_space(
885 	struct xfs_inode	*ip,
886 	xfs_off_t		offset,
887 	xfs_off_t		len,
888 	int			alloc_type)
889 {
890 	xfs_mount_t		*mp = ip->i_mount;
891 	xfs_off_t		count;
892 	xfs_filblks_t		allocated_fsb;
893 	xfs_filblks_t		allocatesize_fsb;
894 	xfs_extlen_t		extsz, temp;
895 	xfs_fileoff_t		startoffset_fsb;
896 	xfs_fsblock_t		firstfsb;
897 	int			nimaps;
898 	int			quota_flag;
899 	int			rt;
900 	xfs_trans_t		*tp;
901 	xfs_bmbt_irec_t		imaps[1], *imapp;
902 	struct xfs_defer_ops	dfops;
903 	uint			qblocks, resblks, resrtextents;
904 	int			error;
905 
906 	trace_xfs_alloc_file_space(ip);
907 
908 	if (XFS_FORCED_SHUTDOWN(mp))
909 		return -EIO;
910 
911 	error = xfs_qm_dqattach(ip);
912 	if (error)
913 		return error;
914 
915 	if (len <= 0)
916 		return -EINVAL;
917 
918 	rt = XFS_IS_REALTIME_INODE(ip);
919 	extsz = xfs_get_extsz_hint(ip);
920 
921 	count = len;
922 	imapp = &imaps[0];
923 	nimaps = 1;
924 	startoffset_fsb	= XFS_B_TO_FSBT(mp, offset);
925 	allocatesize_fsb = XFS_B_TO_FSB(mp, count);
926 
927 	/*
928 	 * Allocate file space until done or until there is an error
929 	 */
930 	while (allocatesize_fsb && !error) {
931 		xfs_fileoff_t	s, e;
932 
933 		/*
934 		 * Determine space reservations for data/realtime.
935 		 */
936 		if (unlikely(extsz)) {
937 			s = startoffset_fsb;
938 			do_div(s, extsz);
939 			s *= extsz;
940 			e = startoffset_fsb + allocatesize_fsb;
941 			div_u64_rem(startoffset_fsb, extsz, &temp);
942 			if (temp)
943 				e += temp;
944 			div_u64_rem(e, extsz, &temp);
945 			if (temp)
946 				e += extsz - temp;
947 		} else {
948 			s = 0;
949 			e = allocatesize_fsb;
950 		}
951 
952 		/*
953 		 * The transaction reservation is limited to a 32-bit block
954 		 * count, hence we need to limit the number of blocks we are
955 		 * trying to reserve to avoid an overflow. We can't allocate
956 		 * more than @nimaps extents, and an extent is limited on disk
957 		 * to MAXEXTLEN (21 bits), so use that to enforce the limit.
958 		 */
959 		resblks = min_t(xfs_fileoff_t, (e - s), (MAXEXTLEN * nimaps));
960 		if (unlikely(rt)) {
961 			resrtextents = qblocks = resblks;
962 			resrtextents /= mp->m_sb.sb_rextsize;
963 			resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
964 			quota_flag = XFS_QMOPT_RES_RTBLKS;
965 		} else {
966 			resrtextents = 0;
967 			resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resblks);
968 			quota_flag = XFS_QMOPT_RES_REGBLKS;
969 		}
970 
971 		/*
972 		 * Allocate and setup the transaction.
973 		 */
974 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks,
975 				resrtextents, 0, &tp);
976 
977 		/*
978 		 * Check for running out of space
979 		 */
980 		if (error) {
981 			/*
982 			 * Free the transaction structure.
983 			 */
984 			ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp));
985 			break;
986 		}
987 		xfs_ilock(ip, XFS_ILOCK_EXCL);
988 		error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks,
989 						      0, quota_flag);
990 		if (error)
991 			goto error1;
992 
993 		xfs_trans_ijoin(tp, ip, 0);
994 
995 		xfs_defer_init(&dfops, &firstfsb);
996 		error = xfs_bmapi_write(tp, ip, startoffset_fsb,
997 					allocatesize_fsb, alloc_type, &firstfsb,
998 					resblks, imapp, &nimaps, &dfops);
999 		if (error)
1000 			goto error0;
1001 
1002 		/*
1003 		 * Complete the transaction
1004 		 */
1005 		error = xfs_defer_finish(&tp, &dfops);
1006 		if (error)
1007 			goto error0;
1008 
1009 		error = xfs_trans_commit(tp);
1010 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
1011 		if (error)
1012 			break;
1013 
1014 		allocated_fsb = imapp->br_blockcount;
1015 
1016 		if (nimaps == 0) {
1017 			error = -ENOSPC;
1018 			break;
1019 		}
1020 
1021 		startoffset_fsb += allocated_fsb;
1022 		allocatesize_fsb -= allocated_fsb;
1023 	}
1024 
1025 	return error;
1026 
1027 error0:	/* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */
1028 	xfs_defer_cancel(&dfops);
1029 	xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag);
1030 
1031 error1:	/* Just cancel transaction */
1032 	xfs_trans_cancel(tp);
1033 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1034 	return error;
1035 }
1036 
1037 static int
1038 xfs_unmap_extent(
1039 	struct xfs_inode	*ip,
1040 	xfs_fileoff_t		startoffset_fsb,
1041 	xfs_filblks_t		len_fsb,
1042 	int			*done)
1043 {
1044 	struct xfs_mount	*mp = ip->i_mount;
1045 	struct xfs_trans	*tp;
1046 	struct xfs_defer_ops	dfops;
1047 	xfs_fsblock_t		firstfsb;
1048 	uint			resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
1049 	int			error;
1050 
1051 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);
1052 	if (error) {
1053 		ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp));
1054 		return error;
1055 	}
1056 
1057 	xfs_ilock(ip, XFS_ILOCK_EXCL);
1058 	error = xfs_trans_reserve_quota(tp, mp, ip->i_udquot, ip->i_gdquot,
1059 			ip->i_pdquot, resblks, 0, XFS_QMOPT_RES_REGBLKS);
1060 	if (error)
1061 		goto out_trans_cancel;
1062 
1063 	xfs_trans_ijoin(tp, ip, 0);
1064 
1065 	xfs_defer_init(&dfops, &firstfsb);
1066 	error = xfs_bunmapi(tp, ip, startoffset_fsb, len_fsb, 0, 2, &firstfsb,
1067 			&dfops, done);
1068 	if (error)
1069 		goto out_bmap_cancel;
1070 
1071 	xfs_defer_ijoin(&dfops, ip);
1072 	error = xfs_defer_finish(&tp, &dfops);
1073 	if (error)
1074 		goto out_bmap_cancel;
1075 
1076 	error = xfs_trans_commit(tp);
1077 out_unlock:
1078 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1079 	return error;
1080 
1081 out_bmap_cancel:
1082 	xfs_defer_cancel(&dfops);
1083 out_trans_cancel:
1084 	xfs_trans_cancel(tp);
1085 	goto out_unlock;
1086 }
1087 
1088 static int
1089 xfs_adjust_extent_unmap_boundaries(
1090 	struct xfs_inode	*ip,
1091 	xfs_fileoff_t		*startoffset_fsb,
1092 	xfs_fileoff_t		*endoffset_fsb)
1093 {
1094 	struct xfs_mount	*mp = ip->i_mount;
1095 	struct xfs_bmbt_irec	imap;
1096 	int			nimap, error;
1097 	xfs_extlen_t		mod = 0;
1098 
1099 	nimap = 1;
1100 	error = xfs_bmapi_read(ip, *startoffset_fsb, 1, &imap, &nimap, 0);
1101 	if (error)
1102 		return error;
1103 
1104 	if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
1105 		ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
1106 		div_u64_rem(imap.br_startblock, mp->m_sb.sb_rextsize, &mod);
1107 		if (mod)
1108 			*startoffset_fsb += mp->m_sb.sb_rextsize - mod;
1109 	}
1110 
1111 	nimap = 1;
1112 	error = xfs_bmapi_read(ip, *endoffset_fsb - 1, 1, &imap, &nimap, 0);
1113 	if (error)
1114 		return error;
1115 
1116 	if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
1117 		ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
1118 		mod++;
1119 		if (mod && mod != mp->m_sb.sb_rextsize)
1120 			*endoffset_fsb -= mod;
1121 	}
1122 
1123 	return 0;
1124 }
1125 
1126 static int
1127 xfs_flush_unmap_range(
1128 	struct xfs_inode	*ip,
1129 	xfs_off_t		offset,
1130 	xfs_off_t		len)
1131 {
1132 	struct xfs_mount	*mp = ip->i_mount;
1133 	struct inode		*inode = VFS_I(ip);
1134 	xfs_off_t		rounding, start, end;
1135 	int			error;
1136 
1137 	/* wait for the completion of any pending DIOs */
1138 	inode_dio_wait(inode);
1139 
1140 	rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_SIZE);
1141 	start = round_down(offset, rounding);
1142 	end = round_up(offset + len, rounding) - 1;
1143 
1144 	error = filemap_write_and_wait_range(inode->i_mapping, start, end);
1145 	if (error)
1146 		return error;
1147 	truncate_pagecache_range(inode, start, end);
1148 	return 0;
1149 }
1150 
1151 int
1152 xfs_free_file_space(
1153 	struct xfs_inode	*ip,
1154 	xfs_off_t		offset,
1155 	xfs_off_t		len)
1156 {
1157 	struct xfs_mount	*mp = ip->i_mount;
1158 	xfs_fileoff_t		startoffset_fsb;
1159 	xfs_fileoff_t		endoffset_fsb;
1160 	int			done = 0, error;
1161 
1162 	trace_xfs_free_file_space(ip);
1163 
1164 	error = xfs_qm_dqattach(ip);
1165 	if (error)
1166 		return error;
1167 
1168 	if (len <= 0)	/* if nothing being freed */
1169 		return 0;
1170 
1171 	error = xfs_flush_unmap_range(ip, offset, len);
1172 	if (error)
1173 		return error;
1174 
1175 	startoffset_fsb = XFS_B_TO_FSB(mp, offset);
1176 	endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len);
1177 
1178 	/*
1179 	 * Need to zero the stuff we're not freeing, on disk.  If it's a RT file
1180 	 * and we can't use unwritten extents then we actually need to ensure
1181 	 * to zero the whole extent, otherwise we just need to take of block
1182 	 * boundaries, and xfs_bunmapi will handle the rest.
1183 	 */
1184 	if (XFS_IS_REALTIME_INODE(ip) &&
1185 	    !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
1186 		error = xfs_adjust_extent_unmap_boundaries(ip, &startoffset_fsb,
1187 				&endoffset_fsb);
1188 		if (error)
1189 			return error;
1190 	}
1191 
1192 	if (endoffset_fsb > startoffset_fsb) {
1193 		while (!done) {
1194 			error = xfs_unmap_extent(ip, startoffset_fsb,
1195 					endoffset_fsb - startoffset_fsb, &done);
1196 			if (error)
1197 				return error;
1198 		}
1199 	}
1200 
1201 	/*
1202 	 * Now that we've unmap all full blocks we'll have to zero out any
1203 	 * partial block at the beginning and/or end.  iomap_zero_range is smart
1204 	 * enough to skip any holes, including those we just created, but we
1205 	 * must take care not to zero beyond EOF and enlarge i_size.
1206 	 */
1207 	if (offset >= XFS_ISIZE(ip))
1208 		return 0;
1209 	if (offset + len > XFS_ISIZE(ip))
1210 		len = XFS_ISIZE(ip) - offset;
1211 	return iomap_zero_range(VFS_I(ip), offset, len, NULL, &xfs_iomap_ops);
1212 }
1213 
1214 /*
1215  * Preallocate and zero a range of a file. This mechanism has the allocation
1216  * semantics of fallocate and in addition converts data in the range to zeroes.
1217  */
1218 int
1219 xfs_zero_file_space(
1220 	struct xfs_inode	*ip,
1221 	xfs_off_t		offset,
1222 	xfs_off_t		len)
1223 {
1224 	struct xfs_mount	*mp = ip->i_mount;
1225 	uint			blksize;
1226 	int			error;
1227 
1228 	trace_xfs_zero_file_space(ip);
1229 
1230 	blksize = 1 << mp->m_sb.sb_blocklog;
1231 
1232 	/*
1233 	 * Punch a hole and prealloc the range. We use hole punch rather than
1234 	 * unwritten extent conversion for two reasons:
1235 	 *
1236 	 * 1.) Hole punch handles partial block zeroing for us.
1237 	 *
1238 	 * 2.) If prealloc returns ENOSPC, the file range is still zero-valued
1239 	 * by virtue of the hole punch.
1240 	 */
1241 	error = xfs_free_file_space(ip, offset, len);
1242 	if (error)
1243 		goto out;
1244 
1245 	error = xfs_alloc_file_space(ip, round_down(offset, blksize),
1246 				     round_up(offset + len, blksize) -
1247 				     round_down(offset, blksize),
1248 				     XFS_BMAPI_PREALLOC);
1249 out:
1250 	return error;
1251 
1252 }
1253 
1254 static int
1255 xfs_prepare_shift(
1256 	struct xfs_inode	*ip,
1257 	loff_t			offset)
1258 {
1259 	int			error;
1260 
1261 	/*
1262 	 * Trim eofblocks to avoid shifting uninitialized post-eof preallocation
1263 	 * into the accessible region of the file.
1264 	 */
1265 	if (xfs_can_free_eofblocks(ip, true)) {
1266 		error = xfs_free_eofblocks(ip);
1267 		if (error)
1268 			return error;
1269 	}
1270 
1271 	/*
1272 	 * Writeback and invalidate cache for the remainder of the file as we're
1273 	 * about to shift down every extent from offset to EOF.
1274 	 */
1275 	error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping, offset, -1);
1276 	if (error)
1277 		return error;
1278 	error = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping,
1279 					offset >> PAGE_SHIFT, -1);
1280 	if (error)
1281 		return error;
1282 
1283 	/*
1284 	 * Clean out anything hanging around in the cow fork now that
1285 	 * we've flushed all the dirty data out to disk to avoid having
1286 	 * CoW extents at the wrong offsets.
1287 	 */
1288 	if (xfs_is_reflink_inode(ip)) {
1289 		error = xfs_reflink_cancel_cow_range(ip, offset, NULLFILEOFF,
1290 				true);
1291 		if (error)
1292 			return error;
1293 	}
1294 
1295 	return 0;
1296 }
1297 
1298 /*
1299  * xfs_collapse_file_space()
1300  *	This routine frees disk space and shift extent for the given file.
1301  *	The first thing we do is to free data blocks in the specified range
1302  *	by calling xfs_free_file_space(). It would also sync dirty data
1303  *	and invalidate page cache over the region on which collapse range
1304  *	is working. And Shift extent records to the left to cover a hole.
1305  * RETURNS:
1306  *	0 on success
1307  *	errno on error
1308  *
1309  */
1310 int
1311 xfs_collapse_file_space(
1312 	struct xfs_inode	*ip,
1313 	xfs_off_t		offset,
1314 	xfs_off_t		len)
1315 {
1316 	struct xfs_mount	*mp = ip->i_mount;
1317 	struct xfs_trans	*tp;
1318 	int			error;
1319 	struct xfs_defer_ops	dfops;
1320 	xfs_fsblock_t		first_block;
1321 	xfs_fileoff_t		next_fsb = XFS_B_TO_FSB(mp, offset + len);
1322 	xfs_fileoff_t		shift_fsb = XFS_B_TO_FSB(mp, len);
1323 	uint			resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
1324 	bool			done = false;
1325 
1326 	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1327 	ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL));
1328 
1329 	trace_xfs_collapse_file_space(ip);
1330 
1331 	error = xfs_free_file_space(ip, offset, len);
1332 	if (error)
1333 		return error;
1334 
1335 	error = xfs_prepare_shift(ip, offset);
1336 	if (error)
1337 		return error;
1338 
1339 	while (!error && !done) {
1340 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0,
1341 					&tp);
1342 		if (error)
1343 			break;
1344 
1345 		xfs_ilock(ip, XFS_ILOCK_EXCL);
1346 		error = xfs_trans_reserve_quota(tp, mp, ip->i_udquot,
1347 				ip->i_gdquot, ip->i_pdquot, resblks, 0,
1348 				XFS_QMOPT_RES_REGBLKS);
1349 		if (error)
1350 			goto out_trans_cancel;
1351 		xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
1352 
1353 		xfs_defer_init(&dfops, &first_block);
1354 		error = xfs_bmap_collapse_extents(tp, ip, &next_fsb, shift_fsb,
1355 				&done, &first_block, &dfops);
1356 		if (error)
1357 			goto out_bmap_cancel;
1358 
1359 		error = xfs_defer_finish(&tp, &dfops);
1360 		if (error)
1361 			goto out_bmap_cancel;
1362 		error = xfs_trans_commit(tp);
1363 	}
1364 
1365 	return error;
1366 
1367 out_bmap_cancel:
1368 	xfs_defer_cancel(&dfops);
1369 out_trans_cancel:
1370 	xfs_trans_cancel(tp);
1371 	return error;
1372 }
1373 
1374 /*
1375  * xfs_insert_file_space()
1376  *	This routine create hole space by shifting extents for the given file.
1377  *	The first thing we do is to sync dirty data and invalidate page cache
1378  *	over the region on which insert range is working. And split an extent
1379  *	to two extents at given offset by calling xfs_bmap_split_extent.
1380  *	And shift all extent records which are laying between [offset,
1381  *	last allocated extent] to the right to reserve hole range.
1382  * RETURNS:
1383  *	0 on success
1384  *	errno on error
1385  */
1386 int
1387 xfs_insert_file_space(
1388 	struct xfs_inode	*ip,
1389 	loff_t			offset,
1390 	loff_t			len)
1391 {
1392 	struct xfs_mount	*mp = ip->i_mount;
1393 	struct xfs_trans	*tp;
1394 	int			error;
1395 	struct xfs_defer_ops	dfops;
1396 	xfs_fsblock_t		first_block;
1397 	xfs_fileoff_t		stop_fsb = XFS_B_TO_FSB(mp, offset);
1398 	xfs_fileoff_t		next_fsb = NULLFSBLOCK;
1399 	xfs_fileoff_t		shift_fsb = XFS_B_TO_FSB(mp, len);
1400 	bool			done = false;
1401 
1402 	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1403 	ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL));
1404 
1405 	trace_xfs_insert_file_space(ip);
1406 
1407 	error = xfs_prepare_shift(ip, offset);
1408 	if (error)
1409 		return error;
1410 
1411 	/*
1412 	 * The extent shifting code works on extent granularity. So, if stop_fsb
1413 	 * is not the starting block of extent, we need to split the extent at
1414 	 * stop_fsb.
1415 	 */
1416 	error = xfs_bmap_split_extent(ip, stop_fsb);
1417 	if (error)
1418 		return error;
1419 
1420 	while (!error && !done) {
1421 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 0,
1422 					&tp);
1423 		if (error)
1424 			break;
1425 
1426 		xfs_ilock(ip, XFS_ILOCK_EXCL);
1427 		xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
1428 		xfs_defer_init(&dfops, &first_block);
1429 		error = xfs_bmap_insert_extents(tp, ip, &next_fsb, shift_fsb,
1430 				&done, stop_fsb, &first_block, &dfops);
1431 		if (error)
1432 			goto out_bmap_cancel;
1433 
1434 		error = xfs_defer_finish(&tp, &dfops);
1435 		if (error)
1436 			goto out_bmap_cancel;
1437 		error = xfs_trans_commit(tp);
1438 	}
1439 
1440 	return error;
1441 
1442 out_bmap_cancel:
1443 	xfs_defer_cancel(&dfops);
1444 	xfs_trans_cancel(tp);
1445 	return error;
1446 }
1447 
1448 /*
1449  * We need to check that the format of the data fork in the temporary inode is
1450  * valid for the target inode before doing the swap. This is not a problem with
1451  * attr1 because of the fixed fork offset, but attr2 has a dynamically sized
1452  * data fork depending on the space the attribute fork is taking so we can get
1453  * invalid formats on the target inode.
1454  *
1455  * E.g. target has space for 7 extents in extent format, temp inode only has
1456  * space for 6.  If we defragment down to 7 extents, then the tmp format is a
1457  * btree, but when swapped it needs to be in extent format. Hence we can't just
1458  * blindly swap data forks on attr2 filesystems.
1459  *
1460  * Note that we check the swap in both directions so that we don't end up with
1461  * a corrupt temporary inode, either.
1462  *
1463  * Note that fixing the way xfs_fsr sets up the attribute fork in the source
1464  * inode will prevent this situation from occurring, so all we do here is
1465  * reject and log the attempt. basically we are putting the responsibility on
1466  * userspace to get this right.
1467  */
1468 static int
1469 xfs_swap_extents_check_format(
1470 	struct xfs_inode	*ip,	/* target inode */
1471 	struct xfs_inode	*tip)	/* tmp inode */
1472 {
1473 
1474 	/* Should never get a local format */
1475 	if (ip->i_d.di_format == XFS_DINODE_FMT_LOCAL ||
1476 	    tip->i_d.di_format == XFS_DINODE_FMT_LOCAL)
1477 		return -EINVAL;
1478 
1479 	/*
1480 	 * if the target inode has less extents that then temporary inode then
1481 	 * why did userspace call us?
1482 	 */
1483 	if (ip->i_d.di_nextents < tip->i_d.di_nextents)
1484 		return -EINVAL;
1485 
1486 	/*
1487 	 * If we have to use the (expensive) rmap swap method, we can
1488 	 * handle any number of extents and any format.
1489 	 */
1490 	if (xfs_sb_version_hasrmapbt(&ip->i_mount->m_sb))
1491 		return 0;
1492 
1493 	/*
1494 	 * if the target inode is in extent form and the temp inode is in btree
1495 	 * form then we will end up with the target inode in the wrong format
1496 	 * as we already know there are less extents in the temp inode.
1497 	 */
1498 	if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1499 	    tip->i_d.di_format == XFS_DINODE_FMT_BTREE)
1500 		return -EINVAL;
1501 
1502 	/* Check temp in extent form to max in target */
1503 	if (tip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1504 	    XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) >
1505 			XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
1506 		return -EINVAL;
1507 
1508 	/* Check target in extent form to max in temp */
1509 	if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1510 	    XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) >
1511 			XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
1512 		return -EINVAL;
1513 
1514 	/*
1515 	 * If we are in a btree format, check that the temp root block will fit
1516 	 * in the target and that it has enough extents to be in btree format
1517 	 * in the target.
1518 	 *
1519 	 * Note that we have to be careful to allow btree->extent conversions
1520 	 * (a common defrag case) which will occur when the temp inode is in
1521 	 * extent format...
1522 	 */
1523 	if (tip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
1524 		if (XFS_IFORK_Q(ip) &&
1525 		    XFS_BMAP_BMDR_SPACE(tip->i_df.if_broot) > XFS_IFORK_BOFF(ip))
1526 			return -EINVAL;
1527 		if (XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) <=
1528 		    XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
1529 			return -EINVAL;
1530 	}
1531 
1532 	/* Reciprocal target->temp btree format checks */
1533 	if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
1534 		if (XFS_IFORK_Q(tip) &&
1535 		    XFS_BMAP_BMDR_SPACE(ip->i_df.if_broot) > XFS_IFORK_BOFF(tip))
1536 			return -EINVAL;
1537 		if (XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) <=
1538 		    XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
1539 			return -EINVAL;
1540 	}
1541 
1542 	return 0;
1543 }
1544 
1545 static int
1546 xfs_swap_extent_flush(
1547 	struct xfs_inode	*ip)
1548 {
1549 	int	error;
1550 
1551 	error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
1552 	if (error)
1553 		return error;
1554 	truncate_pagecache_range(VFS_I(ip), 0, -1);
1555 
1556 	/* Verify O_DIRECT for ftmp */
1557 	if (VFS_I(ip)->i_mapping->nrpages)
1558 		return -EINVAL;
1559 	return 0;
1560 }
1561 
1562 /*
1563  * Move extents from one file to another, when rmap is enabled.
1564  */
1565 STATIC int
1566 xfs_swap_extent_rmap(
1567 	struct xfs_trans		**tpp,
1568 	struct xfs_inode		*ip,
1569 	struct xfs_inode		*tip)
1570 {
1571 	struct xfs_bmbt_irec		irec;
1572 	struct xfs_bmbt_irec		uirec;
1573 	struct xfs_bmbt_irec		tirec;
1574 	xfs_fileoff_t			offset_fsb;
1575 	xfs_fileoff_t			end_fsb;
1576 	xfs_filblks_t			count_fsb;
1577 	xfs_fsblock_t			firstfsb;
1578 	struct xfs_defer_ops		dfops;
1579 	int				error;
1580 	xfs_filblks_t			ilen;
1581 	xfs_filblks_t			rlen;
1582 	int				nimaps;
1583 	uint64_t			tip_flags2;
1584 
1585 	/*
1586 	 * If the source file has shared blocks, we must flag the donor
1587 	 * file as having shared blocks so that we get the shared-block
1588 	 * rmap functions when we go to fix up the rmaps.  The flags
1589 	 * will be switch for reals later.
1590 	 */
1591 	tip_flags2 = tip->i_d.di_flags2;
1592 	if (ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK)
1593 		tip->i_d.di_flags2 |= XFS_DIFLAG2_REFLINK;
1594 
1595 	offset_fsb = 0;
1596 	end_fsb = XFS_B_TO_FSB(ip->i_mount, i_size_read(VFS_I(ip)));
1597 	count_fsb = (xfs_filblks_t)(end_fsb - offset_fsb);
1598 
1599 	while (count_fsb) {
1600 		/* Read extent from the donor file */
1601 		nimaps = 1;
1602 		error = xfs_bmapi_read(tip, offset_fsb, count_fsb, &tirec,
1603 				&nimaps, 0);
1604 		if (error)
1605 			goto out;
1606 		ASSERT(nimaps == 1);
1607 		ASSERT(tirec.br_startblock != DELAYSTARTBLOCK);
1608 
1609 		trace_xfs_swap_extent_rmap_remap(tip, &tirec);
1610 		ilen = tirec.br_blockcount;
1611 
1612 		/* Unmap the old blocks in the source file. */
1613 		while (tirec.br_blockcount) {
1614 			xfs_defer_init(&dfops, &firstfsb);
1615 			trace_xfs_swap_extent_rmap_remap_piece(tip, &tirec);
1616 
1617 			/* Read extent from the source file */
1618 			nimaps = 1;
1619 			error = xfs_bmapi_read(ip, tirec.br_startoff,
1620 					tirec.br_blockcount, &irec,
1621 					&nimaps, 0);
1622 			if (error)
1623 				goto out_defer;
1624 			ASSERT(nimaps == 1);
1625 			ASSERT(tirec.br_startoff == irec.br_startoff);
1626 			trace_xfs_swap_extent_rmap_remap_piece(ip, &irec);
1627 
1628 			/* Trim the extent. */
1629 			uirec = tirec;
1630 			uirec.br_blockcount = rlen = min_t(xfs_filblks_t,
1631 					tirec.br_blockcount,
1632 					irec.br_blockcount);
1633 			trace_xfs_swap_extent_rmap_remap_piece(tip, &uirec);
1634 
1635 			/* Remove the mapping from the donor file. */
1636 			error = xfs_bmap_unmap_extent((*tpp)->t_mountp, &dfops,
1637 					tip, &uirec);
1638 			if (error)
1639 				goto out_defer;
1640 
1641 			/* Remove the mapping from the source file. */
1642 			error = xfs_bmap_unmap_extent((*tpp)->t_mountp, &dfops,
1643 					ip, &irec);
1644 			if (error)
1645 				goto out_defer;
1646 
1647 			/* Map the donor file's blocks into the source file. */
1648 			error = xfs_bmap_map_extent((*tpp)->t_mountp, &dfops,
1649 					ip, &uirec);
1650 			if (error)
1651 				goto out_defer;
1652 
1653 			/* Map the source file's blocks into the donor file. */
1654 			error = xfs_bmap_map_extent((*tpp)->t_mountp, &dfops,
1655 					tip, &irec);
1656 			if (error)
1657 				goto out_defer;
1658 
1659 			xfs_defer_ijoin(&dfops, ip);
1660 			error = xfs_defer_finish(tpp, &dfops);
1661 			if (error)
1662 				goto out_defer;
1663 
1664 			tirec.br_startoff += rlen;
1665 			if (tirec.br_startblock != HOLESTARTBLOCK &&
1666 			    tirec.br_startblock != DELAYSTARTBLOCK)
1667 				tirec.br_startblock += rlen;
1668 			tirec.br_blockcount -= rlen;
1669 		}
1670 
1671 		/* Roll on... */
1672 		count_fsb -= ilen;
1673 		offset_fsb += ilen;
1674 	}
1675 
1676 	tip->i_d.di_flags2 = tip_flags2;
1677 	return 0;
1678 
1679 out_defer:
1680 	xfs_defer_cancel(&dfops);
1681 out:
1682 	trace_xfs_swap_extent_rmap_error(ip, error, _RET_IP_);
1683 	tip->i_d.di_flags2 = tip_flags2;
1684 	return error;
1685 }
1686 
1687 /* Swap the extents of two files by swapping data forks. */
1688 STATIC int
1689 xfs_swap_extent_forks(
1690 	struct xfs_trans	*tp,
1691 	struct xfs_inode	*ip,
1692 	struct xfs_inode	*tip,
1693 	int			*src_log_flags,
1694 	int			*target_log_flags)
1695 {
1696 	struct xfs_ifork	tempifp, *ifp, *tifp;
1697 	xfs_filblks_t		aforkblks = 0;
1698 	xfs_filblks_t		taforkblks = 0;
1699 	xfs_extnum_t		junk;
1700 	uint64_t		tmp;
1701 	int			error;
1702 
1703 	/*
1704 	 * Count the number of extended attribute blocks
1705 	 */
1706 	if ( ((XFS_IFORK_Q(ip) != 0) && (ip->i_d.di_anextents > 0)) &&
1707 	     (ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
1708 		error = xfs_bmap_count_blocks(tp, ip, XFS_ATTR_FORK, &junk,
1709 				&aforkblks);
1710 		if (error)
1711 			return error;
1712 	}
1713 	if ( ((XFS_IFORK_Q(tip) != 0) && (tip->i_d.di_anextents > 0)) &&
1714 	     (tip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
1715 		error = xfs_bmap_count_blocks(tp, tip, XFS_ATTR_FORK, &junk,
1716 				&taforkblks);
1717 		if (error)
1718 			return error;
1719 	}
1720 
1721 	/*
1722 	 * Btree format (v3) inodes have the inode number stamped in the bmbt
1723 	 * block headers. We can't start changing the bmbt blocks until the
1724 	 * inode owner change is logged so recovery does the right thing in the
1725 	 * event of a crash. Set the owner change log flags now and leave the
1726 	 * bmbt scan as the last step.
1727 	 */
1728 	if (ip->i_d.di_version == 3 &&
1729 	    ip->i_d.di_format == XFS_DINODE_FMT_BTREE)
1730 		(*target_log_flags) |= XFS_ILOG_DOWNER;
1731 	if (tip->i_d.di_version == 3 &&
1732 	    tip->i_d.di_format == XFS_DINODE_FMT_BTREE)
1733 		(*src_log_flags) |= XFS_ILOG_DOWNER;
1734 
1735 	/*
1736 	 * Swap the data forks of the inodes
1737 	 */
1738 	ifp = &ip->i_df;
1739 	tifp = &tip->i_df;
1740 	tempifp = *ifp;		/* struct copy */
1741 	*ifp = *tifp;		/* struct copy */
1742 	*tifp = tempifp;	/* struct copy */
1743 
1744 	/*
1745 	 * Fix the on-disk inode values
1746 	 */
1747 	tmp = (uint64_t)ip->i_d.di_nblocks;
1748 	ip->i_d.di_nblocks = tip->i_d.di_nblocks - taforkblks + aforkblks;
1749 	tip->i_d.di_nblocks = tmp + taforkblks - aforkblks;
1750 
1751 	tmp = (uint64_t) ip->i_d.di_nextents;
1752 	ip->i_d.di_nextents = tip->i_d.di_nextents;
1753 	tip->i_d.di_nextents = tmp;
1754 
1755 	tmp = (uint64_t) ip->i_d.di_format;
1756 	ip->i_d.di_format = tip->i_d.di_format;
1757 	tip->i_d.di_format = tmp;
1758 
1759 	/*
1760 	 * The extents in the source inode could still contain speculative
1761 	 * preallocation beyond EOF (e.g. the file is open but not modified
1762 	 * while defrag is in progress). In that case, we need to copy over the
1763 	 * number of delalloc blocks the data fork in the source inode is
1764 	 * tracking beyond EOF so that when the fork is truncated away when the
1765 	 * temporary inode is unlinked we don't underrun the i_delayed_blks
1766 	 * counter on that inode.
1767 	 */
1768 	ASSERT(tip->i_delayed_blks == 0);
1769 	tip->i_delayed_blks = ip->i_delayed_blks;
1770 	ip->i_delayed_blks = 0;
1771 
1772 	switch (ip->i_d.di_format) {
1773 	case XFS_DINODE_FMT_EXTENTS:
1774 		(*src_log_flags) |= XFS_ILOG_DEXT;
1775 		break;
1776 	case XFS_DINODE_FMT_BTREE:
1777 		ASSERT(ip->i_d.di_version < 3 ||
1778 		       (*src_log_flags & XFS_ILOG_DOWNER));
1779 		(*src_log_flags) |= XFS_ILOG_DBROOT;
1780 		break;
1781 	}
1782 
1783 	switch (tip->i_d.di_format) {
1784 	case XFS_DINODE_FMT_EXTENTS:
1785 		(*target_log_flags) |= XFS_ILOG_DEXT;
1786 		break;
1787 	case XFS_DINODE_FMT_BTREE:
1788 		(*target_log_flags) |= XFS_ILOG_DBROOT;
1789 		ASSERT(tip->i_d.di_version < 3 ||
1790 		       (*target_log_flags & XFS_ILOG_DOWNER));
1791 		break;
1792 	}
1793 
1794 	return 0;
1795 }
1796 
1797 /*
1798  * Fix up the owners of the bmbt blocks to refer to the current inode. The
1799  * change owner scan attempts to order all modified buffers in the current
1800  * transaction. In the event of ordered buffer failure, the offending buffer is
1801  * physically logged as a fallback and the scan returns -EAGAIN. We must roll
1802  * the transaction in this case to replenish the fallback log reservation and
1803  * restart the scan. This process repeats until the scan completes.
1804  */
1805 static int
1806 xfs_swap_change_owner(
1807 	struct xfs_trans	**tpp,
1808 	struct xfs_inode	*ip,
1809 	struct xfs_inode	*tmpip)
1810 {
1811 	int			error;
1812 	struct xfs_trans	*tp = *tpp;
1813 
1814 	do {
1815 		error = xfs_bmbt_change_owner(tp, ip, XFS_DATA_FORK, ip->i_ino,
1816 					      NULL);
1817 		/* success or fatal error */
1818 		if (error != -EAGAIN)
1819 			break;
1820 
1821 		error = xfs_trans_roll(tpp);
1822 		if (error)
1823 			break;
1824 		tp = *tpp;
1825 
1826 		/*
1827 		 * Redirty both inodes so they can relog and keep the log tail
1828 		 * moving forward.
1829 		 */
1830 		xfs_trans_ijoin(tp, ip, 0);
1831 		xfs_trans_ijoin(tp, tmpip, 0);
1832 		xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1833 		xfs_trans_log_inode(tp, tmpip, XFS_ILOG_CORE);
1834 	} while (true);
1835 
1836 	return error;
1837 }
1838 
1839 int
1840 xfs_swap_extents(
1841 	struct xfs_inode	*ip,	/* target inode */
1842 	struct xfs_inode	*tip,	/* tmp inode */
1843 	struct xfs_swapext	*sxp)
1844 {
1845 	struct xfs_mount	*mp = ip->i_mount;
1846 	struct xfs_trans	*tp;
1847 	struct xfs_bstat	*sbp = &sxp->sx_stat;
1848 	int			src_log_flags, target_log_flags;
1849 	int			error = 0;
1850 	int			lock_flags;
1851 	struct xfs_ifork	*cowfp;
1852 	uint64_t		f;
1853 	int			resblks = 0;
1854 
1855 	/*
1856 	 * Lock the inodes against other IO, page faults and truncate to
1857 	 * begin with.  Then we can ensure the inodes are flushed and have no
1858 	 * page cache safely. Once we have done this we can take the ilocks and
1859 	 * do the rest of the checks.
1860 	 */
1861 	lock_two_nondirectories(VFS_I(ip), VFS_I(tip));
1862 	lock_flags = XFS_MMAPLOCK_EXCL;
1863 	xfs_lock_two_inodes(ip, XFS_MMAPLOCK_EXCL, tip, XFS_MMAPLOCK_EXCL);
1864 
1865 	/* Verify that both files have the same format */
1866 	if ((VFS_I(ip)->i_mode & S_IFMT) != (VFS_I(tip)->i_mode & S_IFMT)) {
1867 		error = -EINVAL;
1868 		goto out_unlock;
1869 	}
1870 
1871 	/* Verify both files are either real-time or non-realtime */
1872 	if (XFS_IS_REALTIME_INODE(ip) != XFS_IS_REALTIME_INODE(tip)) {
1873 		error = -EINVAL;
1874 		goto out_unlock;
1875 	}
1876 
1877 	error = xfs_swap_extent_flush(ip);
1878 	if (error)
1879 		goto out_unlock;
1880 	error = xfs_swap_extent_flush(tip);
1881 	if (error)
1882 		goto out_unlock;
1883 
1884 	/*
1885 	 * Extent "swapping" with rmap requires a permanent reservation and
1886 	 * a block reservation because it's really just a remap operation
1887 	 * performed with log redo items!
1888 	 */
1889 	if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
1890 		int		w	= XFS_DATA_FORK;
1891 		uint32_t	ipnext	= XFS_IFORK_NEXTENTS(ip, w);
1892 		uint32_t	tipnext	= XFS_IFORK_NEXTENTS(tip, w);
1893 
1894 		/*
1895 		 * Conceptually this shouldn't affect the shape of either bmbt,
1896 		 * but since we atomically move extents one by one, we reserve
1897 		 * enough space to rebuild both trees.
1898 		 */
1899 		resblks = XFS_SWAP_RMAP_SPACE_RES(mp, ipnext, w);
1900 		resblks +=  XFS_SWAP_RMAP_SPACE_RES(mp, tipnext, w);
1901 
1902 		/*
1903 		 * Handle the corner case where either inode might straddle the
1904 		 * btree format boundary. If so, the inode could bounce between
1905 		 * btree <-> extent format on unmap -> remap cycles, freeing and
1906 		 * allocating a bmapbt block each time.
1907 		 */
1908 		if (ipnext == (XFS_IFORK_MAXEXT(ip, w) + 1))
1909 			resblks += XFS_IFORK_MAXEXT(ip, w);
1910 		if (tipnext == (XFS_IFORK_MAXEXT(tip, w) + 1))
1911 			resblks += XFS_IFORK_MAXEXT(tip, w);
1912 	}
1913 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);
1914 	if (error)
1915 		goto out_unlock;
1916 
1917 	/*
1918 	 * Lock and join the inodes to the tansaction so that transaction commit
1919 	 * or cancel will unlock the inodes from this point onwards.
1920 	 */
1921 	xfs_lock_two_inodes(ip, XFS_ILOCK_EXCL, tip, XFS_ILOCK_EXCL);
1922 	lock_flags |= XFS_ILOCK_EXCL;
1923 	xfs_trans_ijoin(tp, ip, 0);
1924 	xfs_trans_ijoin(tp, tip, 0);
1925 
1926 
1927 	/* Verify all data are being swapped */
1928 	if (sxp->sx_offset != 0 ||
1929 	    sxp->sx_length != ip->i_d.di_size ||
1930 	    sxp->sx_length != tip->i_d.di_size) {
1931 		error = -EFAULT;
1932 		goto out_trans_cancel;
1933 	}
1934 
1935 	trace_xfs_swap_extent_before(ip, 0);
1936 	trace_xfs_swap_extent_before(tip, 1);
1937 
1938 	/* check inode formats now that data is flushed */
1939 	error = xfs_swap_extents_check_format(ip, tip);
1940 	if (error) {
1941 		xfs_notice(mp,
1942 		    "%s: inode 0x%llx format is incompatible for exchanging.",
1943 				__func__, ip->i_ino);
1944 		goto out_trans_cancel;
1945 	}
1946 
1947 	/*
1948 	 * Compare the current change & modify times with that
1949 	 * passed in.  If they differ, we abort this swap.
1950 	 * This is the mechanism used to ensure the calling
1951 	 * process that the file was not changed out from
1952 	 * under it.
1953 	 */
1954 	if ((sbp->bs_ctime.tv_sec != VFS_I(ip)->i_ctime.tv_sec) ||
1955 	    (sbp->bs_ctime.tv_nsec != VFS_I(ip)->i_ctime.tv_nsec) ||
1956 	    (sbp->bs_mtime.tv_sec != VFS_I(ip)->i_mtime.tv_sec) ||
1957 	    (sbp->bs_mtime.tv_nsec != VFS_I(ip)->i_mtime.tv_nsec)) {
1958 		error = -EBUSY;
1959 		goto out_trans_cancel;
1960 	}
1961 
1962 	/*
1963 	 * Note the trickiness in setting the log flags - we set the owner log
1964 	 * flag on the opposite inode (i.e. the inode we are setting the new
1965 	 * owner to be) because once we swap the forks and log that, log
1966 	 * recovery is going to see the fork as owned by the swapped inode,
1967 	 * not the pre-swapped inodes.
1968 	 */
1969 	src_log_flags = XFS_ILOG_CORE;
1970 	target_log_flags = XFS_ILOG_CORE;
1971 
1972 	if (xfs_sb_version_hasrmapbt(&mp->m_sb))
1973 		error = xfs_swap_extent_rmap(&tp, ip, tip);
1974 	else
1975 		error = xfs_swap_extent_forks(tp, ip, tip, &src_log_flags,
1976 				&target_log_flags);
1977 	if (error)
1978 		goto out_trans_cancel;
1979 
1980 	/* Do we have to swap reflink flags? */
1981 	if ((ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK) ^
1982 	    (tip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK)) {
1983 		f = ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK;
1984 		ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
1985 		ip->i_d.di_flags2 |= tip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK;
1986 		tip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
1987 		tip->i_d.di_flags2 |= f & XFS_DIFLAG2_REFLINK;
1988 	}
1989 
1990 	/* Swap the cow forks. */
1991 	if (xfs_sb_version_hasreflink(&mp->m_sb)) {
1992 		xfs_extnum_t	extnum;
1993 
1994 		ASSERT(ip->i_cformat == XFS_DINODE_FMT_EXTENTS);
1995 		ASSERT(tip->i_cformat == XFS_DINODE_FMT_EXTENTS);
1996 
1997 		extnum = ip->i_cnextents;
1998 		ip->i_cnextents = tip->i_cnextents;
1999 		tip->i_cnextents = extnum;
2000 
2001 		cowfp = ip->i_cowfp;
2002 		ip->i_cowfp = tip->i_cowfp;
2003 		tip->i_cowfp = cowfp;
2004 
2005 		if (ip->i_cowfp && ip->i_cowfp->if_bytes)
2006 			xfs_inode_set_cowblocks_tag(ip);
2007 		else
2008 			xfs_inode_clear_cowblocks_tag(ip);
2009 		if (tip->i_cowfp && tip->i_cowfp->if_bytes)
2010 			xfs_inode_set_cowblocks_tag(tip);
2011 		else
2012 			xfs_inode_clear_cowblocks_tag(tip);
2013 	}
2014 
2015 	xfs_trans_log_inode(tp, ip,  src_log_flags);
2016 	xfs_trans_log_inode(tp, tip, target_log_flags);
2017 
2018 	/*
2019 	 * The extent forks have been swapped, but crc=1,rmapbt=0 filesystems
2020 	 * have inode number owner values in the bmbt blocks that still refer to
2021 	 * the old inode. Scan each bmbt to fix up the owner values with the
2022 	 * inode number of the current inode.
2023 	 */
2024 	if (src_log_flags & XFS_ILOG_DOWNER) {
2025 		error = xfs_swap_change_owner(&tp, ip, tip);
2026 		if (error)
2027 			goto out_trans_cancel;
2028 	}
2029 	if (target_log_flags & XFS_ILOG_DOWNER) {
2030 		error = xfs_swap_change_owner(&tp, tip, ip);
2031 		if (error)
2032 			goto out_trans_cancel;
2033 	}
2034 
2035 	/*
2036 	 * If this is a synchronous mount, make sure that the
2037 	 * transaction goes to disk before returning to the user.
2038 	 */
2039 	if (mp->m_flags & XFS_MOUNT_WSYNC)
2040 		xfs_trans_set_sync(tp);
2041 
2042 	error = xfs_trans_commit(tp);
2043 
2044 	trace_xfs_swap_extent_after(ip, 0);
2045 	trace_xfs_swap_extent_after(tip, 1);
2046 
2047 out_unlock:
2048 	xfs_iunlock(ip, lock_flags);
2049 	xfs_iunlock(tip, lock_flags);
2050 	unlock_two_nondirectories(VFS_I(ip), VFS_I(tip));
2051 	return error;
2052 
2053 out_trans_cancel:
2054 	xfs_trans_cancel(tp);
2055 	goto out_unlock;
2056 }
2057