xref: /openbmc/linux/fs/xfs/xfs_bmap_util.c (revision 55e43d6abd078ed6d219902ce8cb4d68e3c993ba)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4  * Copyright (c) 2012 Red Hat, Inc.
5  * All Rights Reserved.
6  */
7 #include "xfs.h"
8 #include "xfs_fs.h"
9 #include "xfs_shared.h"
10 #include "xfs_format.h"
11 #include "xfs_log_format.h"
12 #include "xfs_trans_resv.h"
13 #include "xfs_bit.h"
14 #include "xfs_mount.h"
15 #include "xfs_defer.h"
16 #include "xfs_inode.h"
17 #include "xfs_btree.h"
18 #include "xfs_trans.h"
19 #include "xfs_alloc.h"
20 #include "xfs_bmap.h"
21 #include "xfs_bmap_util.h"
22 #include "xfs_bmap_btree.h"
23 #include "xfs_rtalloc.h"
24 #include "xfs_error.h"
25 #include "xfs_quota.h"
26 #include "xfs_trans_space.h"
27 #include "xfs_trace.h"
28 #include "xfs_icache.h"
29 #include "xfs_iomap.h"
30 #include "xfs_reflink.h"
31 
32 /* Kernel only BMAP related definitions and functions */
33 
34 /*
35  * Convert the given file system block to a disk block.  We have to treat it
36  * differently based on whether the file is a real time file or not, because the
37  * bmap code does.
38  */
39 xfs_daddr_t
xfs_fsb_to_db(struct xfs_inode * ip,xfs_fsblock_t fsb)40 xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb)
41 {
42 	if (XFS_IS_REALTIME_INODE(ip))
43 		return XFS_FSB_TO_BB(ip->i_mount, fsb);
44 	return XFS_FSB_TO_DADDR(ip->i_mount, fsb);
45 }
46 
47 /*
48  * Routine to zero an extent on disk allocated to the specific inode.
49  *
50  * The VFS functions take a linearised filesystem block offset, so we have to
51  * convert the sparse xfs fsb to the right format first.
52  * VFS types are real funky, too.
53  */
54 int
xfs_zero_extent(struct xfs_inode * ip,xfs_fsblock_t start_fsb,xfs_off_t count_fsb)55 xfs_zero_extent(
56 	struct xfs_inode	*ip,
57 	xfs_fsblock_t		start_fsb,
58 	xfs_off_t		count_fsb)
59 {
60 	struct xfs_mount	*mp = ip->i_mount;
61 	struct xfs_buftarg	*target = xfs_inode_buftarg(ip);
62 	xfs_daddr_t		sector = xfs_fsb_to_db(ip, start_fsb);
63 	sector_t		block = XFS_BB_TO_FSBT(mp, sector);
64 
65 	return blkdev_issue_zeroout(target->bt_bdev,
66 		block << (mp->m_super->s_blocksize_bits - 9),
67 		count_fsb << (mp->m_super->s_blocksize_bits - 9),
68 		GFP_NOFS, 0);
69 }
70 
71 #ifdef CONFIG_XFS_RT
72 int
xfs_bmap_rtalloc(struct xfs_bmalloca * ap)73 xfs_bmap_rtalloc(
74 	struct xfs_bmalloca	*ap)
75 {
76 	struct xfs_mount	*mp = ap->ip->i_mount;
77 	xfs_fileoff_t		orig_offset = ap->offset;
78 	xfs_rtblock_t		rtb;
79 	xfs_extlen_t		prod = 0;  /* product factor for allocators */
80 	xfs_extlen_t		mod = 0;   /* product factor for allocators */
81 	xfs_extlen_t		ralen = 0; /* realtime allocation length */
82 	xfs_extlen_t		align;     /* minimum allocation alignment */
83 	xfs_extlen_t		orig_length = ap->length;
84 	xfs_extlen_t		minlen = mp->m_sb.sb_rextsize;
85 	xfs_extlen_t		raminlen;
86 	bool			rtlocked = false;
87 	bool			ignore_locality = false;
88 	int			error;
89 
90 	align = xfs_get_extsz_hint(ap->ip);
91 retry:
92 	prod = align / mp->m_sb.sb_rextsize;
93 	error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
94 					align, 1, ap->eof, 0,
95 					ap->conv, &ap->offset, &ap->length);
96 	if (error)
97 		return error;
98 	ASSERT(ap->length);
99 	ASSERT(ap->length % mp->m_sb.sb_rextsize == 0);
100 
101 	/*
102 	 * If we shifted the file offset downward to satisfy an extent size
103 	 * hint, increase minlen by that amount so that the allocator won't
104 	 * give us an allocation that's too short to cover at least one of the
105 	 * blocks that the caller asked for.
106 	 */
107 	if (ap->offset != orig_offset)
108 		minlen += orig_offset - ap->offset;
109 
110 	/*
111 	 * If the offset & length are not perfectly aligned
112 	 * then kill prod, it will just get us in trouble.
113 	 */
114 	div_u64_rem(ap->offset, align, &mod);
115 	if (mod || ap->length % align)
116 		prod = 1;
117 	/*
118 	 * Set ralen to be the actual requested length in rtextents.
119 	 */
120 	ralen = ap->length / mp->m_sb.sb_rextsize;
121 	/*
122 	 * If the old value was close enough to XFS_BMBT_MAX_EXTLEN that
123 	 * we rounded up to it, cut it back so it's valid again.
124 	 * Note that if it's a really large request (bigger than
125 	 * XFS_BMBT_MAX_EXTLEN), we don't hear about that number, and can't
126 	 * adjust the starting point to match it.
127 	 */
128 	if (ralen * mp->m_sb.sb_rextsize >= XFS_MAX_BMBT_EXTLEN)
129 		ralen = XFS_MAX_BMBT_EXTLEN / mp->m_sb.sb_rextsize;
130 
131 	/*
132 	 * Lock out modifications to both the RT bitmap and summary inodes
133 	 */
134 	if (!rtlocked) {
135 		xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP);
136 		xfs_trans_ijoin(ap->tp, mp->m_rbmip, XFS_ILOCK_EXCL);
137 		xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM);
138 		xfs_trans_ijoin(ap->tp, mp->m_rsumip, XFS_ILOCK_EXCL);
139 		rtlocked = true;
140 	}
141 
142 	/*
143 	 * If it's an allocation to an empty file at offset 0,
144 	 * pick an extent that will space things out in the rt area.
145 	 */
146 	if (ap->eof && ap->offset == 0) {
147 		xfs_rtblock_t rtx; /* realtime extent no */
148 
149 		error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx);
150 		if (error)
151 			return error;
152 		ap->blkno = rtx * mp->m_sb.sb_rextsize;
153 	} else {
154 		ap->blkno = 0;
155 	}
156 
157 	xfs_bmap_adjacent(ap);
158 
159 	/*
160 	 * Realtime allocation, done through xfs_rtallocate_extent.
161 	 */
162 	if (ignore_locality)
163 		ap->blkno = 0;
164 	else
165 		do_div(ap->blkno, mp->m_sb.sb_rextsize);
166 	rtb = ap->blkno;
167 	ap->length = ralen;
168 	raminlen = max_t(xfs_extlen_t, 1, minlen / mp->m_sb.sb_rextsize);
169 	error = xfs_rtallocate_extent(ap->tp, ap->blkno, raminlen, ap->length,
170 			&ralen, ap->wasdel, prod, &rtb);
171 	if (error)
172 		return error;
173 
174 	if (rtb != NULLRTBLOCK) {
175 		ap->blkno = rtb * mp->m_sb.sb_rextsize;
176 		ap->length = ralen * mp->m_sb.sb_rextsize;
177 		ap->ip->i_nblocks += ap->length;
178 		xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
179 		if (ap->wasdel)
180 			ap->ip->i_delayed_blks -= ap->length;
181 		/*
182 		 * Adjust the disk quota also. This was reserved
183 		 * earlier.
184 		 */
185 		xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
186 			ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT :
187 					XFS_TRANS_DQ_RTBCOUNT, ap->length);
188 		return 0;
189 	}
190 
191 	if (align > mp->m_sb.sb_rextsize) {
192 		/*
193 		 * We previously enlarged the request length to try to satisfy
194 		 * an extent size hint.  The allocator didn't return anything,
195 		 * so reset the parameters to the original values and try again
196 		 * without alignment criteria.
197 		 */
198 		ap->offset = orig_offset;
199 		ap->length = orig_length;
200 		minlen = align = mp->m_sb.sb_rextsize;
201 		goto retry;
202 	}
203 
204 	if (!ignore_locality && ap->blkno != 0) {
205 		/*
206 		 * If we can't allocate near a specific rt extent, try again
207 		 * without locality criteria.
208 		 */
209 		ignore_locality = true;
210 		goto retry;
211 	}
212 
213 	ap->blkno = NULLFSBLOCK;
214 	ap->length = 0;
215 	return 0;
216 }
217 #endif /* CONFIG_XFS_RT */
218 
219 /*
220  * Extent tree block counting routines.
221  */
222 
223 /*
224  * Count leaf blocks given a range of extent records.  Delayed allocation
225  * extents are not counted towards the totals.
226  */
227 xfs_extnum_t
xfs_bmap_count_leaves(struct xfs_ifork * ifp,xfs_filblks_t * count)228 xfs_bmap_count_leaves(
229 	struct xfs_ifork	*ifp,
230 	xfs_filblks_t		*count)
231 {
232 	struct xfs_iext_cursor	icur;
233 	struct xfs_bmbt_irec	got;
234 	xfs_extnum_t		numrecs = 0;
235 
236 	for_each_xfs_iext(ifp, &icur, &got) {
237 		if (!isnullstartblock(got.br_startblock)) {
238 			*count += got.br_blockcount;
239 			numrecs++;
240 		}
241 	}
242 
243 	return numrecs;
244 }
245 
246 /*
247  * Count fsblocks of the given fork.  Delayed allocation extents are
248  * not counted towards the totals.
249  */
250 int
xfs_bmap_count_blocks(struct xfs_trans * tp,struct xfs_inode * ip,int whichfork,xfs_extnum_t * nextents,xfs_filblks_t * count)251 xfs_bmap_count_blocks(
252 	struct xfs_trans	*tp,
253 	struct xfs_inode	*ip,
254 	int			whichfork,
255 	xfs_extnum_t		*nextents,
256 	xfs_filblks_t		*count)
257 {
258 	struct xfs_mount	*mp = ip->i_mount;
259 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
260 	struct xfs_btree_cur	*cur;
261 	xfs_extlen_t		btblocks = 0;
262 	int			error;
263 
264 	*nextents = 0;
265 	*count = 0;
266 
267 	if (!ifp)
268 		return 0;
269 
270 	switch (ifp->if_format) {
271 	case XFS_DINODE_FMT_BTREE:
272 		error = xfs_iread_extents(tp, ip, whichfork);
273 		if (error)
274 			return error;
275 
276 		cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
277 		error = xfs_btree_count_blocks(cur, &btblocks);
278 		xfs_btree_del_cursor(cur, error);
279 		if (error)
280 			return error;
281 
282 		/*
283 		 * xfs_btree_count_blocks includes the root block contained in
284 		 * the inode fork in @btblocks, so subtract one because we're
285 		 * only interested in allocated disk blocks.
286 		 */
287 		*count += btblocks - 1;
288 
289 		fallthrough;
290 	case XFS_DINODE_FMT_EXTENTS:
291 		*nextents = xfs_bmap_count_leaves(ifp, count);
292 		break;
293 	}
294 
295 	return 0;
296 }
297 
298 static int
xfs_getbmap_report_one(struct xfs_inode * ip,struct getbmapx * bmv,struct kgetbmap * out,int64_t bmv_end,struct xfs_bmbt_irec * got)299 xfs_getbmap_report_one(
300 	struct xfs_inode	*ip,
301 	struct getbmapx		*bmv,
302 	struct kgetbmap		*out,
303 	int64_t			bmv_end,
304 	struct xfs_bmbt_irec	*got)
305 {
306 	struct kgetbmap		*p = out + bmv->bmv_entries;
307 	bool			shared = false;
308 	int			error;
309 
310 	error = xfs_reflink_trim_around_shared(ip, got, &shared);
311 	if (error)
312 		return error;
313 
314 	if (isnullstartblock(got->br_startblock) ||
315 	    got->br_startblock == DELAYSTARTBLOCK) {
316 		/*
317 		 * Take the flush completion as being a point-in-time snapshot
318 		 * where there are no delalloc extents, and if any new ones
319 		 * have been created racily, just skip them as being 'after'
320 		 * the flush and so don't get reported.
321 		 */
322 		if (!(bmv->bmv_iflags & BMV_IF_DELALLOC))
323 			return 0;
324 
325 		p->bmv_oflags |= BMV_OF_DELALLOC;
326 		p->bmv_block = -2;
327 	} else {
328 		p->bmv_block = xfs_fsb_to_db(ip, got->br_startblock);
329 	}
330 
331 	if (got->br_state == XFS_EXT_UNWRITTEN &&
332 	    (bmv->bmv_iflags & BMV_IF_PREALLOC))
333 		p->bmv_oflags |= BMV_OF_PREALLOC;
334 
335 	if (shared)
336 		p->bmv_oflags |= BMV_OF_SHARED;
337 
338 	p->bmv_offset = XFS_FSB_TO_BB(ip->i_mount, got->br_startoff);
339 	p->bmv_length = XFS_FSB_TO_BB(ip->i_mount, got->br_blockcount);
340 
341 	bmv->bmv_offset = p->bmv_offset + p->bmv_length;
342 	bmv->bmv_length = max(0LL, bmv_end - bmv->bmv_offset);
343 	bmv->bmv_entries++;
344 	return 0;
345 }
346 
347 static void
xfs_getbmap_report_hole(struct xfs_inode * ip,struct getbmapx * bmv,struct kgetbmap * out,int64_t bmv_end,xfs_fileoff_t bno,xfs_fileoff_t end)348 xfs_getbmap_report_hole(
349 	struct xfs_inode	*ip,
350 	struct getbmapx		*bmv,
351 	struct kgetbmap		*out,
352 	int64_t			bmv_end,
353 	xfs_fileoff_t		bno,
354 	xfs_fileoff_t		end)
355 {
356 	struct kgetbmap		*p = out + bmv->bmv_entries;
357 
358 	if (bmv->bmv_iflags & BMV_IF_NO_HOLES)
359 		return;
360 
361 	p->bmv_block = -1;
362 	p->bmv_offset = XFS_FSB_TO_BB(ip->i_mount, bno);
363 	p->bmv_length = XFS_FSB_TO_BB(ip->i_mount, end - bno);
364 
365 	bmv->bmv_offset = p->bmv_offset + p->bmv_length;
366 	bmv->bmv_length = max(0LL, bmv_end - bmv->bmv_offset);
367 	bmv->bmv_entries++;
368 }
369 
370 static inline bool
xfs_getbmap_full(struct getbmapx * bmv)371 xfs_getbmap_full(
372 	struct getbmapx		*bmv)
373 {
374 	return bmv->bmv_length == 0 || bmv->bmv_entries >= bmv->bmv_count - 1;
375 }
376 
377 static bool
xfs_getbmap_next_rec(struct xfs_bmbt_irec * rec,xfs_fileoff_t total_end)378 xfs_getbmap_next_rec(
379 	struct xfs_bmbt_irec	*rec,
380 	xfs_fileoff_t		total_end)
381 {
382 	xfs_fileoff_t		end = rec->br_startoff + rec->br_blockcount;
383 
384 	if (end == total_end)
385 		return false;
386 
387 	rec->br_startoff += rec->br_blockcount;
388 	if (!isnullstartblock(rec->br_startblock) &&
389 	    rec->br_startblock != DELAYSTARTBLOCK)
390 		rec->br_startblock += rec->br_blockcount;
391 	rec->br_blockcount = total_end - end;
392 	return true;
393 }
394 
395 /*
396  * Get inode's extents as described in bmv, and format for output.
397  * Calls formatter to fill the user's buffer until all extents
398  * are mapped, until the passed-in bmv->bmv_count slots have
399  * been filled, or until the formatter short-circuits the loop,
400  * if it is tracking filled-in extents on its own.
401  */
402 int						/* error code */
xfs_getbmap(struct xfs_inode * ip,struct getbmapx * bmv,struct kgetbmap * out)403 xfs_getbmap(
404 	struct xfs_inode	*ip,
405 	struct getbmapx		*bmv,		/* user bmap structure */
406 	struct kgetbmap		*out)
407 {
408 	struct xfs_mount	*mp = ip->i_mount;
409 	int			iflags = bmv->bmv_iflags;
410 	int			whichfork, lock, error = 0;
411 	int64_t			bmv_end, max_len;
412 	xfs_fileoff_t		bno, first_bno;
413 	struct xfs_ifork	*ifp;
414 	struct xfs_bmbt_irec	got, rec;
415 	xfs_filblks_t		len;
416 	struct xfs_iext_cursor	icur;
417 
418 	if (bmv->bmv_iflags & ~BMV_IF_VALID)
419 		return -EINVAL;
420 #ifndef DEBUG
421 	/* Only allow CoW fork queries if we're debugging. */
422 	if (iflags & BMV_IF_COWFORK)
423 		return -EINVAL;
424 #endif
425 	if ((iflags & BMV_IF_ATTRFORK) && (iflags & BMV_IF_COWFORK))
426 		return -EINVAL;
427 
428 	if (bmv->bmv_length < -1)
429 		return -EINVAL;
430 	bmv->bmv_entries = 0;
431 	if (bmv->bmv_length == 0)
432 		return 0;
433 
434 	if (iflags & BMV_IF_ATTRFORK)
435 		whichfork = XFS_ATTR_FORK;
436 	else if (iflags & BMV_IF_COWFORK)
437 		whichfork = XFS_COW_FORK;
438 	else
439 		whichfork = XFS_DATA_FORK;
440 
441 	xfs_ilock(ip, XFS_IOLOCK_SHARED);
442 	switch (whichfork) {
443 	case XFS_ATTR_FORK:
444 		lock = xfs_ilock_attr_map_shared(ip);
445 		if (!xfs_inode_has_attr_fork(ip))
446 			goto out_unlock_ilock;
447 
448 		max_len = 1LL << 32;
449 		break;
450 	case XFS_COW_FORK:
451 		lock = XFS_ILOCK_SHARED;
452 		xfs_ilock(ip, lock);
453 
454 		/* No CoW fork? Just return */
455 		if (!xfs_ifork_ptr(ip, whichfork))
456 			goto out_unlock_ilock;
457 
458 		if (xfs_get_cowextsz_hint(ip))
459 			max_len = mp->m_super->s_maxbytes;
460 		else
461 			max_len = XFS_ISIZE(ip);
462 		break;
463 	case XFS_DATA_FORK:
464 		if (!(iflags & BMV_IF_DELALLOC) &&
465 		    (ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_disk_size)) {
466 			error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
467 			if (error)
468 				goto out_unlock_iolock;
469 
470 			/*
471 			 * Even after flushing the inode, there can still be
472 			 * delalloc blocks on the inode beyond EOF due to
473 			 * speculative preallocation.  These are not removed
474 			 * until the release function is called or the inode
475 			 * is inactivated.  Hence we cannot assert here that
476 			 * ip->i_delayed_blks == 0.
477 			 */
478 		}
479 
480 		if (xfs_get_extsz_hint(ip) ||
481 		    (ip->i_diflags &
482 		     (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)))
483 			max_len = mp->m_super->s_maxbytes;
484 		else
485 			max_len = XFS_ISIZE(ip);
486 
487 		lock = xfs_ilock_data_map_shared(ip);
488 		break;
489 	}
490 
491 	ifp = xfs_ifork_ptr(ip, whichfork);
492 
493 	switch (ifp->if_format) {
494 	case XFS_DINODE_FMT_EXTENTS:
495 	case XFS_DINODE_FMT_BTREE:
496 		break;
497 	case XFS_DINODE_FMT_LOCAL:
498 		/* Local format inode forks report no extents. */
499 		goto out_unlock_ilock;
500 	default:
501 		error = -EINVAL;
502 		goto out_unlock_ilock;
503 	}
504 
505 	if (bmv->bmv_length == -1) {
506 		max_len = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, max_len));
507 		bmv->bmv_length = max(0LL, max_len - bmv->bmv_offset);
508 	}
509 
510 	bmv_end = bmv->bmv_offset + bmv->bmv_length;
511 
512 	first_bno = bno = XFS_BB_TO_FSBT(mp, bmv->bmv_offset);
513 	len = XFS_BB_TO_FSB(mp, bmv->bmv_length);
514 
515 	error = xfs_iread_extents(NULL, ip, whichfork);
516 	if (error)
517 		goto out_unlock_ilock;
518 
519 	if (!xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) {
520 		/*
521 		 * Report a whole-file hole if the delalloc flag is set to
522 		 * stay compatible with the old implementation.
523 		 */
524 		if (iflags & BMV_IF_DELALLOC)
525 			xfs_getbmap_report_hole(ip, bmv, out, bmv_end, bno,
526 					XFS_B_TO_FSB(mp, XFS_ISIZE(ip)));
527 		goto out_unlock_ilock;
528 	}
529 
530 	while (!xfs_getbmap_full(bmv)) {
531 		xfs_trim_extent(&got, first_bno, len);
532 
533 		/*
534 		 * Report an entry for a hole if this extent doesn't directly
535 		 * follow the previous one.
536 		 */
537 		if (got.br_startoff > bno) {
538 			xfs_getbmap_report_hole(ip, bmv, out, bmv_end, bno,
539 					got.br_startoff);
540 			if (xfs_getbmap_full(bmv))
541 				break;
542 		}
543 
544 		/*
545 		 * In order to report shared extents accurately, we report each
546 		 * distinct shared / unshared part of a single bmbt record with
547 		 * an individual getbmapx record.
548 		 */
549 		bno = got.br_startoff + got.br_blockcount;
550 		rec = got;
551 		do {
552 			error = xfs_getbmap_report_one(ip, bmv, out, bmv_end,
553 					&rec);
554 			if (error || xfs_getbmap_full(bmv))
555 				goto out_unlock_ilock;
556 		} while (xfs_getbmap_next_rec(&rec, bno));
557 
558 		if (!xfs_iext_next_extent(ifp, &icur, &got)) {
559 			xfs_fileoff_t	end = XFS_B_TO_FSB(mp, XFS_ISIZE(ip));
560 
561 			if (bmv->bmv_entries > 0)
562 				out[bmv->bmv_entries - 1].bmv_oflags |=
563 								BMV_OF_LAST;
564 
565 			if (whichfork != XFS_ATTR_FORK && bno < end &&
566 			    !xfs_getbmap_full(bmv)) {
567 				xfs_getbmap_report_hole(ip, bmv, out, bmv_end,
568 						bno, end);
569 			}
570 			break;
571 		}
572 
573 		if (bno >= first_bno + len)
574 			break;
575 	}
576 
577 out_unlock_ilock:
578 	xfs_iunlock(ip, lock);
579 out_unlock_iolock:
580 	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
581 	return error;
582 }
583 
584 /*
585  * Dead simple method of punching delalyed allocation blocks from a range in
586  * the inode.  This will always punch out both the start and end blocks, even
587  * if the ranges only partially overlap them, so it is up to the caller to
588  * ensure that partial blocks are not passed in.
589  */
590 int
xfs_bmap_punch_delalloc_range(struct xfs_inode * ip,xfs_off_t start_byte,xfs_off_t end_byte)591 xfs_bmap_punch_delalloc_range(
592 	struct xfs_inode	*ip,
593 	xfs_off_t		start_byte,
594 	xfs_off_t		end_byte)
595 {
596 	struct xfs_mount	*mp = ip->i_mount;
597 	struct xfs_ifork	*ifp = &ip->i_df;
598 	xfs_fileoff_t		start_fsb = XFS_B_TO_FSBT(mp, start_byte);
599 	xfs_fileoff_t		end_fsb = XFS_B_TO_FSB(mp, end_byte);
600 	struct xfs_bmbt_irec	got, del;
601 	struct xfs_iext_cursor	icur;
602 	int			error = 0;
603 
604 	ASSERT(!xfs_need_iread_extents(ifp));
605 
606 	xfs_ilock(ip, XFS_ILOCK_EXCL);
607 	if (!xfs_iext_lookup_extent_before(ip, ifp, &end_fsb, &icur, &got))
608 		goto out_unlock;
609 
610 	while (got.br_startoff + got.br_blockcount > start_fsb) {
611 		del = got;
612 		xfs_trim_extent(&del, start_fsb, end_fsb - start_fsb);
613 
614 		/*
615 		 * A delete can push the cursor forward. Step back to the
616 		 * previous extent on non-delalloc or extents outside the
617 		 * target range.
618 		 */
619 		if (!del.br_blockcount ||
620 		    !isnullstartblock(del.br_startblock)) {
621 			if (!xfs_iext_prev_extent(ifp, &icur, &got))
622 				break;
623 			continue;
624 		}
625 
626 		error = xfs_bmap_del_extent_delay(ip, XFS_DATA_FORK, &icur,
627 						  &got, &del);
628 		if (error || !xfs_iext_get_extent(ifp, &icur, &got))
629 			break;
630 	}
631 
632 out_unlock:
633 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
634 	return error;
635 }
636 
637 /*
638  * Test whether it is appropriate to check an inode for and free post EOF
639  * blocks.
640  */
641 bool
xfs_can_free_eofblocks(struct xfs_inode * ip)642 xfs_can_free_eofblocks(
643 	struct xfs_inode	*ip)
644 {
645 	struct xfs_bmbt_irec	imap;
646 	struct xfs_mount	*mp = ip->i_mount;
647 	xfs_fileoff_t		end_fsb;
648 	xfs_fileoff_t		last_fsb;
649 	int			nimaps = 1;
650 	int			error;
651 
652 	/*
653 	 * Caller must either hold the exclusive io lock; or be inactivating
654 	 * the inode, which guarantees there are no other users of the inode.
655 	 */
656 	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL) ||
657 	       (VFS_I(ip)->i_state & I_FREEING));
658 
659 	/* prealloc/delalloc exists only on regular files */
660 	if (!S_ISREG(VFS_I(ip)->i_mode))
661 		return false;
662 
663 	/*
664 	 * Zero sized files with no cached pages and delalloc blocks will not
665 	 * have speculative prealloc/delalloc blocks to remove.
666 	 */
667 	if (VFS_I(ip)->i_size == 0 &&
668 	    VFS_I(ip)->i_mapping->nrpages == 0 &&
669 	    ip->i_delayed_blks == 0)
670 		return false;
671 
672 	/* If we haven't read in the extent list, then don't do it now. */
673 	if (xfs_need_iread_extents(&ip->i_df))
674 		return false;
675 
676 	/*
677 	 * Only free real extents for inodes with persistent preallocations or
678 	 * the append-only flag.
679 	 */
680 	if (ip->i_diflags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND))
681 		if (ip->i_delayed_blks == 0)
682 			return false;
683 
684 	/*
685 	 * Do not try to free post-EOF blocks if EOF is beyond the end of the
686 	 * range supported by the page cache, because the truncation will loop
687 	 * forever.
688 	 */
689 	end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_ISIZE(ip));
690 	if (XFS_IS_REALTIME_INODE(ip) && mp->m_sb.sb_rextsize > 1)
691 		end_fsb = roundup_64(end_fsb, mp->m_sb.sb_rextsize);
692 	last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
693 	if (last_fsb <= end_fsb)
694 		return false;
695 
696 	/*
697 	 * Look up the mapping for the first block past EOF.  If we can't find
698 	 * it, there's nothing to free.
699 	 */
700 	xfs_ilock(ip, XFS_ILOCK_SHARED);
701 	error = xfs_bmapi_read(ip, end_fsb, last_fsb - end_fsb, &imap, &nimaps,
702 			0);
703 	xfs_iunlock(ip, XFS_ILOCK_SHARED);
704 	if (error || nimaps == 0)
705 		return false;
706 
707 	/*
708 	 * If there's a real mapping there or there are delayed allocation
709 	 * reservations, then we have post-EOF blocks to try to free.
710 	 */
711 	return imap.br_startblock != HOLESTARTBLOCK || ip->i_delayed_blks;
712 }
713 
714 /*
715  * This is called to free any blocks beyond eof. The caller must hold
716  * IOLOCK_EXCL unless we are in the inode reclaim path and have the only
717  * reference to the inode.
718  */
719 int
xfs_free_eofblocks(struct xfs_inode * ip)720 xfs_free_eofblocks(
721 	struct xfs_inode	*ip)
722 {
723 	struct xfs_trans	*tp;
724 	struct xfs_mount	*mp = ip->i_mount;
725 	int			error;
726 
727 	/* Attach the dquots to the inode up front. */
728 	error = xfs_qm_dqattach(ip);
729 	if (error)
730 		return error;
731 
732 	/* Wait on dio to ensure i_size has settled. */
733 	inode_dio_wait(VFS_I(ip));
734 
735 	/*
736 	 * For preallocated files only free delayed allocations.
737 	 *
738 	 * Note that this means we also leave speculative preallocations in
739 	 * place for preallocated files.
740 	 */
741 	if (ip->i_diflags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)) {
742 		if (ip->i_delayed_blks) {
743 			xfs_bmap_punch_delalloc_range(ip,
744 				round_up(XFS_ISIZE(ip), mp->m_sb.sb_blocksize),
745 				LLONG_MAX);
746 		}
747 		xfs_inode_clear_eofblocks_tag(ip);
748 		return 0;
749 	}
750 
751 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
752 	if (error) {
753 		ASSERT(xfs_is_shutdown(mp));
754 		return error;
755 	}
756 
757 	xfs_ilock(ip, XFS_ILOCK_EXCL);
758 	xfs_trans_ijoin(tp, ip, 0);
759 
760 	/*
761 	 * Do not update the on-disk file size.  If we update the on-disk file
762 	 * size and then the system crashes before the contents of the file are
763 	 * flushed to disk then the files may be full of holes (ie NULL files
764 	 * bug).
765 	 */
766 	error = xfs_itruncate_extents_flags(&tp, ip, XFS_DATA_FORK,
767 				XFS_ISIZE(ip), XFS_BMAPI_NODISCARD);
768 	if (error)
769 		goto err_cancel;
770 
771 	error = xfs_trans_commit(tp);
772 	if (error)
773 		goto out_unlock;
774 
775 	xfs_inode_clear_eofblocks_tag(ip);
776 	goto out_unlock;
777 
778 err_cancel:
779 	/*
780 	 * If we get an error at this point we simply don't
781 	 * bother truncating the file.
782 	 */
783 	xfs_trans_cancel(tp);
784 out_unlock:
785 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
786 	return error;
787 }
788 
789 int
xfs_alloc_file_space(struct xfs_inode * ip,xfs_off_t offset,xfs_off_t len)790 xfs_alloc_file_space(
791 	struct xfs_inode	*ip,
792 	xfs_off_t		offset,
793 	xfs_off_t		len)
794 {
795 	xfs_mount_t		*mp = ip->i_mount;
796 	xfs_off_t		count;
797 	xfs_filblks_t		allocatesize_fsb;
798 	xfs_extlen_t		extsz, temp;
799 	xfs_fileoff_t		startoffset_fsb;
800 	xfs_fileoff_t		endoffset_fsb;
801 	int			rt;
802 	xfs_trans_t		*tp;
803 	xfs_bmbt_irec_t		imaps[1], *imapp;
804 	int			error;
805 
806 	trace_xfs_alloc_file_space(ip);
807 
808 	if (xfs_is_shutdown(mp))
809 		return -EIO;
810 
811 	error = xfs_qm_dqattach(ip);
812 	if (error)
813 		return error;
814 
815 	if (len <= 0)
816 		return -EINVAL;
817 
818 	rt = XFS_IS_REALTIME_INODE(ip);
819 	extsz = xfs_get_extsz_hint(ip);
820 
821 	count = len;
822 	imapp = &imaps[0];
823 	startoffset_fsb	= XFS_B_TO_FSBT(mp, offset);
824 	endoffset_fsb = XFS_B_TO_FSB(mp, offset + count);
825 	allocatesize_fsb = endoffset_fsb - startoffset_fsb;
826 
827 	/*
828 	 * Allocate file space until done or until there is an error
829 	 */
830 	while (allocatesize_fsb && !error) {
831 		xfs_fileoff_t	s, e;
832 		unsigned int	dblocks, rblocks, resblks;
833 		int		nimaps = 1;
834 
835 		/*
836 		 * Determine space reservations for data/realtime.
837 		 */
838 		if (unlikely(extsz)) {
839 			s = startoffset_fsb;
840 			do_div(s, extsz);
841 			s *= extsz;
842 			e = startoffset_fsb + allocatesize_fsb;
843 			div_u64_rem(startoffset_fsb, extsz, &temp);
844 			if (temp)
845 				e += temp;
846 			div_u64_rem(e, extsz, &temp);
847 			if (temp)
848 				e += extsz - temp;
849 		} else {
850 			s = 0;
851 			e = allocatesize_fsb;
852 		}
853 
854 		/*
855 		 * The transaction reservation is limited to a 32-bit block
856 		 * count, hence we need to limit the number of blocks we are
857 		 * trying to reserve to avoid an overflow. We can't allocate
858 		 * more than @nimaps extents, and an extent is limited on disk
859 		 * to XFS_BMBT_MAX_EXTLEN (21 bits), so use that to enforce the
860 		 * limit.
861 		 */
862 		resblks = min_t(xfs_fileoff_t, (e - s),
863 				(XFS_MAX_BMBT_EXTLEN * nimaps));
864 		if (unlikely(rt)) {
865 			dblocks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
866 			rblocks = resblks;
867 		} else {
868 			dblocks = XFS_DIOSTRAT_SPACE_RES(mp, resblks);
869 			rblocks = 0;
870 		}
871 
872 		error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write,
873 				dblocks, rblocks, false, &tp);
874 		if (error)
875 			break;
876 
877 		error = xfs_iext_count_may_overflow(ip, XFS_DATA_FORK,
878 				XFS_IEXT_ADD_NOSPLIT_CNT);
879 		if (error == -EFBIG)
880 			error = xfs_iext_count_upgrade(tp, ip,
881 					XFS_IEXT_ADD_NOSPLIT_CNT);
882 		if (error)
883 			goto error;
884 
885 		/*
886 		 * If the allocator cannot find a single free extent large
887 		 * enough to cover the start block of the requested range,
888 		 * xfs_bmapi_write will return -ENOSR.
889 		 *
890 		 * In that case we simply need to keep looping with the same
891 		 * startoffset_fsb so that one of the following allocations
892 		 * will eventually reach the requested range.
893 		 */
894 		error = xfs_bmapi_write(tp, ip, startoffset_fsb,
895 				allocatesize_fsb, XFS_BMAPI_PREALLOC, 0, imapp,
896 				&nimaps);
897 		if (error) {
898 			if (error != -ENOSR)
899 				goto error;
900 			error = 0;
901 		} else {
902 			startoffset_fsb += imapp->br_blockcount;
903 			allocatesize_fsb -= imapp->br_blockcount;
904 		}
905 
906 		ip->i_diflags |= XFS_DIFLAG_PREALLOC;
907 		xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
908 
909 		error = xfs_trans_commit(tp);
910 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
911 	}
912 
913 	return error;
914 
915 error:
916 	xfs_trans_cancel(tp);
917 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
918 	return error;
919 }
920 
921 static int
xfs_unmap_extent(struct xfs_inode * ip,xfs_fileoff_t startoffset_fsb,xfs_filblks_t len_fsb,int * done)922 xfs_unmap_extent(
923 	struct xfs_inode	*ip,
924 	xfs_fileoff_t		startoffset_fsb,
925 	xfs_filblks_t		len_fsb,
926 	int			*done)
927 {
928 	struct xfs_mount	*mp = ip->i_mount;
929 	struct xfs_trans	*tp;
930 	uint			resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
931 	int			error;
932 
933 	error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write, resblks, 0,
934 			false, &tp);
935 	if (error)
936 		return error;
937 
938 	error = xfs_iext_count_may_overflow(ip, XFS_DATA_FORK,
939 			XFS_IEXT_PUNCH_HOLE_CNT);
940 	if (error == -EFBIG)
941 		error = xfs_iext_count_upgrade(tp, ip, XFS_IEXT_PUNCH_HOLE_CNT);
942 	if (error)
943 		goto out_trans_cancel;
944 
945 	error = xfs_bunmapi(tp, ip, startoffset_fsb, len_fsb, 0, 2, done);
946 	if (error)
947 		goto out_trans_cancel;
948 
949 	error = xfs_trans_commit(tp);
950 out_unlock:
951 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
952 	return error;
953 
954 out_trans_cancel:
955 	xfs_trans_cancel(tp);
956 	goto out_unlock;
957 }
958 
959 /* Caller must first wait for the completion of any pending DIOs if required. */
960 int
xfs_flush_unmap_range(struct xfs_inode * ip,xfs_off_t offset,xfs_off_t len)961 xfs_flush_unmap_range(
962 	struct xfs_inode	*ip,
963 	xfs_off_t		offset,
964 	xfs_off_t		len)
965 {
966 	struct inode		*inode = VFS_I(ip);
967 	xfs_off_t		rounding, start, end;
968 	int			error;
969 
970 	/*
971 	 * Make sure we extend the flush out to extent alignment
972 	 * boundaries so any extent range overlapping the start/end
973 	 * of the modification we are about to do is clean and idle.
974 	 */
975 	rounding = max_t(xfs_off_t, xfs_inode_alloc_unitsize(ip), PAGE_SIZE);
976 	start = rounddown_64(offset, rounding);
977 	end = roundup_64(offset + len, rounding) - 1;
978 
979 	error = filemap_write_and_wait_range(inode->i_mapping, start, end);
980 	if (error)
981 		return error;
982 	truncate_pagecache_range(inode, start, end);
983 	return 0;
984 }
985 
986 int
xfs_free_file_space(struct xfs_inode * ip,xfs_off_t offset,xfs_off_t len)987 xfs_free_file_space(
988 	struct xfs_inode	*ip,
989 	xfs_off_t		offset,
990 	xfs_off_t		len)
991 {
992 	struct xfs_mount	*mp = ip->i_mount;
993 	xfs_fileoff_t		startoffset_fsb;
994 	xfs_fileoff_t		endoffset_fsb;
995 	int			done = 0, error;
996 
997 	trace_xfs_free_file_space(ip);
998 
999 	error = xfs_qm_dqattach(ip);
1000 	if (error)
1001 		return error;
1002 
1003 	if (len <= 0)	/* if nothing being freed */
1004 		return 0;
1005 
1006 	startoffset_fsb = XFS_B_TO_FSB(mp, offset);
1007 	endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len);
1008 
1009 	/* We can only free complete realtime extents. */
1010 	if (XFS_IS_REALTIME_INODE(ip) && mp->m_sb.sb_rextsize > 1) {
1011 		startoffset_fsb = roundup_64(startoffset_fsb,
1012 					     mp->m_sb.sb_rextsize);
1013 		endoffset_fsb = rounddown_64(endoffset_fsb,
1014 					     mp->m_sb.sb_rextsize);
1015 	}
1016 
1017 	/*
1018 	 * Need to zero the stuff we're not freeing, on disk.
1019 	 */
1020 	if (endoffset_fsb > startoffset_fsb) {
1021 		while (!done) {
1022 			error = xfs_unmap_extent(ip, startoffset_fsb,
1023 					endoffset_fsb - startoffset_fsb, &done);
1024 			if (error)
1025 				return error;
1026 		}
1027 	}
1028 
1029 	/*
1030 	 * Now that we've unmap all full blocks we'll have to zero out any
1031 	 * partial block at the beginning and/or end.  xfs_zero_range is smart
1032 	 * enough to skip any holes, including those we just created, but we
1033 	 * must take care not to zero beyond EOF and enlarge i_size.
1034 	 */
1035 	if (offset >= XFS_ISIZE(ip))
1036 		return 0;
1037 	if (offset + len > XFS_ISIZE(ip))
1038 		len = XFS_ISIZE(ip) - offset;
1039 	error = xfs_zero_range(ip, offset, len, NULL);
1040 	if (error)
1041 		return error;
1042 
1043 	/*
1044 	 * If we zeroed right up to EOF and EOF straddles a page boundary we
1045 	 * must make sure that the post-EOF area is also zeroed because the
1046 	 * page could be mmap'd and xfs_zero_range doesn't do that for us.
1047 	 * Writeback of the eof page will do this, albeit clumsily.
1048 	 */
1049 	if (offset + len >= XFS_ISIZE(ip) && offset_in_page(offset + len) > 0) {
1050 		error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
1051 				round_down(offset + len, PAGE_SIZE), LLONG_MAX);
1052 	}
1053 
1054 	return error;
1055 }
1056 
1057 static int
xfs_prepare_shift(struct xfs_inode * ip,loff_t offset)1058 xfs_prepare_shift(
1059 	struct xfs_inode	*ip,
1060 	loff_t			offset)
1061 {
1062 	unsigned int		rounding;
1063 	int			error;
1064 
1065 	/*
1066 	 * Trim eofblocks to avoid shifting uninitialized post-eof preallocation
1067 	 * into the accessible region of the file.
1068 	 */
1069 	if (xfs_can_free_eofblocks(ip)) {
1070 		error = xfs_free_eofblocks(ip);
1071 		if (error)
1072 			return error;
1073 	}
1074 
1075 	/*
1076 	 * Shift operations must stabilize the start block offset boundary along
1077 	 * with the full range of the operation. If we don't, a COW writeback
1078 	 * completion could race with an insert, front merge with the start
1079 	 * extent (after split) during the shift and corrupt the file. Start
1080 	 * with the allocation unit just prior to the start to stabilize the
1081 	 * boundary.
1082 	 */
1083 	rounding = xfs_inode_alloc_unitsize(ip);
1084 	offset = rounddown_64(offset, rounding);
1085 	if (offset)
1086 		offset -= rounding;
1087 
1088 	/*
1089 	 * Writeback and invalidate cache for the remainder of the file as we're
1090 	 * about to shift down every extent from offset to EOF.
1091 	 */
1092 	error = xfs_flush_unmap_range(ip, offset, XFS_ISIZE(ip));
1093 	if (error)
1094 		return error;
1095 
1096 	/*
1097 	 * Clean out anything hanging around in the cow fork now that
1098 	 * we've flushed all the dirty data out to disk to avoid having
1099 	 * CoW extents at the wrong offsets.
1100 	 */
1101 	if (xfs_inode_has_cow_data(ip)) {
1102 		error = xfs_reflink_cancel_cow_range(ip, offset, NULLFILEOFF,
1103 				true);
1104 		if (error)
1105 			return error;
1106 	}
1107 
1108 	return 0;
1109 }
1110 
1111 /*
1112  * xfs_collapse_file_space()
1113  *	This routine frees disk space and shift extent for the given file.
1114  *	The first thing we do is to free data blocks in the specified range
1115  *	by calling xfs_free_file_space(). It would also sync dirty data
1116  *	and invalidate page cache over the region on which collapse range
1117  *	is working. And Shift extent records to the left to cover a hole.
1118  * RETURNS:
1119  *	0 on success
1120  *	errno on error
1121  *
1122  */
1123 int
xfs_collapse_file_space(struct xfs_inode * ip,xfs_off_t offset,xfs_off_t len)1124 xfs_collapse_file_space(
1125 	struct xfs_inode	*ip,
1126 	xfs_off_t		offset,
1127 	xfs_off_t		len)
1128 {
1129 	struct xfs_mount	*mp = ip->i_mount;
1130 	struct xfs_trans	*tp;
1131 	int			error;
1132 	xfs_fileoff_t		next_fsb = XFS_B_TO_FSB(mp, offset + len);
1133 	xfs_fileoff_t		shift_fsb = XFS_B_TO_FSB(mp, len);
1134 	bool			done = false;
1135 
1136 	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1137 	ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL));
1138 
1139 	trace_xfs_collapse_file_space(ip);
1140 
1141 	error = xfs_free_file_space(ip, offset, len);
1142 	if (error)
1143 		return error;
1144 
1145 	error = xfs_prepare_shift(ip, offset);
1146 	if (error)
1147 		return error;
1148 
1149 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 0, &tp);
1150 	if (error)
1151 		return error;
1152 
1153 	xfs_ilock(ip, XFS_ILOCK_EXCL);
1154 	xfs_trans_ijoin(tp, ip, 0);
1155 
1156 	while (!done) {
1157 		error = xfs_bmap_collapse_extents(tp, ip, &next_fsb, shift_fsb,
1158 				&done);
1159 		if (error)
1160 			goto out_trans_cancel;
1161 		if (done)
1162 			break;
1163 
1164 		/* finish any deferred frees and roll the transaction */
1165 		error = xfs_defer_finish(&tp);
1166 		if (error)
1167 			goto out_trans_cancel;
1168 	}
1169 
1170 	error = xfs_trans_commit(tp);
1171 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1172 	return error;
1173 
1174 out_trans_cancel:
1175 	xfs_trans_cancel(tp);
1176 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1177 	return error;
1178 }
1179 
1180 /*
1181  * xfs_insert_file_space()
1182  *	This routine create hole space by shifting extents for the given file.
1183  *	The first thing we do is to sync dirty data and invalidate page cache
1184  *	over the region on which insert range is working. And split an extent
1185  *	to two extents at given offset by calling xfs_bmap_split_extent.
1186  *	And shift all extent records which are laying between [offset,
1187  *	last allocated extent] to the right to reserve hole range.
1188  * RETURNS:
1189  *	0 on success
1190  *	errno on error
1191  */
1192 int
xfs_insert_file_space(struct xfs_inode * ip,loff_t offset,loff_t len)1193 xfs_insert_file_space(
1194 	struct xfs_inode	*ip,
1195 	loff_t			offset,
1196 	loff_t			len)
1197 {
1198 	struct xfs_mount	*mp = ip->i_mount;
1199 	struct xfs_trans	*tp;
1200 	int			error;
1201 	xfs_fileoff_t		stop_fsb = XFS_B_TO_FSB(mp, offset);
1202 	xfs_fileoff_t		next_fsb = NULLFSBLOCK;
1203 	xfs_fileoff_t		shift_fsb = XFS_B_TO_FSB(mp, len);
1204 	bool			done = false;
1205 
1206 	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1207 	ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL));
1208 
1209 	trace_xfs_insert_file_space(ip);
1210 
1211 	error = xfs_bmap_can_insert_extents(ip, stop_fsb, shift_fsb);
1212 	if (error)
1213 		return error;
1214 
1215 	error = xfs_prepare_shift(ip, offset);
1216 	if (error)
1217 		return error;
1218 
1219 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write,
1220 			XFS_DIOSTRAT_SPACE_RES(mp, 0), 0, 0, &tp);
1221 	if (error)
1222 		return error;
1223 
1224 	xfs_ilock(ip, XFS_ILOCK_EXCL);
1225 	xfs_trans_ijoin(tp, ip, 0);
1226 
1227 	error = xfs_iext_count_may_overflow(ip, XFS_DATA_FORK,
1228 			XFS_IEXT_PUNCH_HOLE_CNT);
1229 	if (error == -EFBIG)
1230 		error = xfs_iext_count_upgrade(tp, ip, XFS_IEXT_PUNCH_HOLE_CNT);
1231 	if (error)
1232 		goto out_trans_cancel;
1233 
1234 	/*
1235 	 * The extent shifting code works on extent granularity. So, if stop_fsb
1236 	 * is not the starting block of extent, we need to split the extent at
1237 	 * stop_fsb.
1238 	 */
1239 	error = xfs_bmap_split_extent(tp, ip, stop_fsb);
1240 	if (error)
1241 		goto out_trans_cancel;
1242 
1243 	do {
1244 		error = xfs_defer_finish(&tp);
1245 		if (error)
1246 			goto out_trans_cancel;
1247 
1248 		error = xfs_bmap_insert_extents(tp, ip, &next_fsb, shift_fsb,
1249 				&done, stop_fsb);
1250 		if (error)
1251 			goto out_trans_cancel;
1252 	} while (!done);
1253 
1254 	error = xfs_trans_commit(tp);
1255 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1256 	return error;
1257 
1258 out_trans_cancel:
1259 	xfs_trans_cancel(tp);
1260 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1261 	return error;
1262 }
1263 
1264 /*
1265  * We need to check that the format of the data fork in the temporary inode is
1266  * valid for the target inode before doing the swap. This is not a problem with
1267  * attr1 because of the fixed fork offset, but attr2 has a dynamically sized
1268  * data fork depending on the space the attribute fork is taking so we can get
1269  * invalid formats on the target inode.
1270  *
1271  * E.g. target has space for 7 extents in extent format, temp inode only has
1272  * space for 6.  If we defragment down to 7 extents, then the tmp format is a
1273  * btree, but when swapped it needs to be in extent format. Hence we can't just
1274  * blindly swap data forks on attr2 filesystems.
1275  *
1276  * Note that we check the swap in both directions so that we don't end up with
1277  * a corrupt temporary inode, either.
1278  *
1279  * Note that fixing the way xfs_fsr sets up the attribute fork in the source
1280  * inode will prevent this situation from occurring, so all we do here is
1281  * reject and log the attempt. basically we are putting the responsibility on
1282  * userspace to get this right.
1283  */
1284 static int
xfs_swap_extents_check_format(struct xfs_inode * ip,struct xfs_inode * tip)1285 xfs_swap_extents_check_format(
1286 	struct xfs_inode	*ip,	/* target inode */
1287 	struct xfs_inode	*tip)	/* tmp inode */
1288 {
1289 	struct xfs_ifork	*ifp = &ip->i_df;
1290 	struct xfs_ifork	*tifp = &tip->i_df;
1291 
1292 	/* User/group/project quota ids must match if quotas are enforced. */
1293 	if (XFS_IS_QUOTA_ON(ip->i_mount) &&
1294 	    (!uid_eq(VFS_I(ip)->i_uid, VFS_I(tip)->i_uid) ||
1295 	     !gid_eq(VFS_I(ip)->i_gid, VFS_I(tip)->i_gid) ||
1296 	     ip->i_projid != tip->i_projid))
1297 		return -EINVAL;
1298 
1299 	/* Should never get a local format */
1300 	if (ifp->if_format == XFS_DINODE_FMT_LOCAL ||
1301 	    tifp->if_format == XFS_DINODE_FMT_LOCAL)
1302 		return -EINVAL;
1303 
1304 	/*
1305 	 * if the target inode has less extents that then temporary inode then
1306 	 * why did userspace call us?
1307 	 */
1308 	if (ifp->if_nextents < tifp->if_nextents)
1309 		return -EINVAL;
1310 
1311 	/*
1312 	 * If we have to use the (expensive) rmap swap method, we can
1313 	 * handle any number of extents and any format.
1314 	 */
1315 	if (xfs_has_rmapbt(ip->i_mount))
1316 		return 0;
1317 
1318 	/*
1319 	 * if the target inode is in extent form and the temp inode is in btree
1320 	 * form then we will end up with the target inode in the wrong format
1321 	 * as we already know there are less extents in the temp inode.
1322 	 */
1323 	if (ifp->if_format == XFS_DINODE_FMT_EXTENTS &&
1324 	    tifp->if_format == XFS_DINODE_FMT_BTREE)
1325 		return -EINVAL;
1326 
1327 	/* Check temp in extent form to max in target */
1328 	if (tifp->if_format == XFS_DINODE_FMT_EXTENTS &&
1329 	    tifp->if_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
1330 		return -EINVAL;
1331 
1332 	/* Check target in extent form to max in temp */
1333 	if (ifp->if_format == XFS_DINODE_FMT_EXTENTS &&
1334 	    ifp->if_nextents > XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
1335 		return -EINVAL;
1336 
1337 	/*
1338 	 * If we are in a btree format, check that the temp root block will fit
1339 	 * in the target and that it has enough extents to be in btree format
1340 	 * in the target.
1341 	 *
1342 	 * Note that we have to be careful to allow btree->extent conversions
1343 	 * (a common defrag case) which will occur when the temp inode is in
1344 	 * extent format...
1345 	 */
1346 	if (tifp->if_format == XFS_DINODE_FMT_BTREE) {
1347 		if (xfs_inode_has_attr_fork(ip) &&
1348 		    XFS_BMAP_BMDR_SPACE(tifp->if_broot) > xfs_inode_fork_boff(ip))
1349 			return -EINVAL;
1350 		if (tifp->if_nextents <= XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
1351 			return -EINVAL;
1352 	}
1353 
1354 	/* Reciprocal target->temp btree format checks */
1355 	if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
1356 		if (xfs_inode_has_attr_fork(tip) &&
1357 		    XFS_BMAP_BMDR_SPACE(ip->i_df.if_broot) > xfs_inode_fork_boff(tip))
1358 			return -EINVAL;
1359 		if (ifp->if_nextents <= XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
1360 			return -EINVAL;
1361 	}
1362 
1363 	return 0;
1364 }
1365 
1366 static int
xfs_swap_extent_flush(struct xfs_inode * ip)1367 xfs_swap_extent_flush(
1368 	struct xfs_inode	*ip)
1369 {
1370 	int	error;
1371 
1372 	error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
1373 	if (error)
1374 		return error;
1375 	truncate_pagecache_range(VFS_I(ip), 0, -1);
1376 
1377 	/* Verify O_DIRECT for ftmp */
1378 	if (VFS_I(ip)->i_mapping->nrpages)
1379 		return -EINVAL;
1380 	return 0;
1381 }
1382 
1383 /*
1384  * Move extents from one file to another, when rmap is enabled.
1385  */
1386 STATIC int
xfs_swap_extent_rmap(struct xfs_trans ** tpp,struct xfs_inode * ip,struct xfs_inode * tip)1387 xfs_swap_extent_rmap(
1388 	struct xfs_trans		**tpp,
1389 	struct xfs_inode		*ip,
1390 	struct xfs_inode		*tip)
1391 {
1392 	struct xfs_trans		*tp = *tpp;
1393 	struct xfs_bmbt_irec		irec;
1394 	struct xfs_bmbt_irec		uirec;
1395 	struct xfs_bmbt_irec		tirec;
1396 	xfs_fileoff_t			offset_fsb;
1397 	xfs_fileoff_t			end_fsb;
1398 	xfs_filblks_t			count_fsb;
1399 	int				error;
1400 	xfs_filblks_t			ilen;
1401 	xfs_filblks_t			rlen;
1402 	int				nimaps;
1403 	uint64_t			tip_flags2;
1404 
1405 	/*
1406 	 * If the source file has shared blocks, we must flag the donor
1407 	 * file as having shared blocks so that we get the shared-block
1408 	 * rmap functions when we go to fix up the rmaps.  The flags
1409 	 * will be switch for reals later.
1410 	 */
1411 	tip_flags2 = tip->i_diflags2;
1412 	if (ip->i_diflags2 & XFS_DIFLAG2_REFLINK)
1413 		tip->i_diflags2 |= XFS_DIFLAG2_REFLINK;
1414 
1415 	offset_fsb = 0;
1416 	end_fsb = XFS_B_TO_FSB(ip->i_mount, i_size_read(VFS_I(ip)));
1417 	count_fsb = (xfs_filblks_t)(end_fsb - offset_fsb);
1418 
1419 	while (count_fsb) {
1420 		/* Read extent from the donor file */
1421 		nimaps = 1;
1422 		error = xfs_bmapi_read(tip, offset_fsb, count_fsb, &tirec,
1423 				&nimaps, 0);
1424 		if (error)
1425 			goto out;
1426 		ASSERT(nimaps == 1);
1427 		ASSERT(tirec.br_startblock != DELAYSTARTBLOCK);
1428 
1429 		trace_xfs_swap_extent_rmap_remap(tip, &tirec);
1430 		ilen = tirec.br_blockcount;
1431 
1432 		/* Unmap the old blocks in the source file. */
1433 		while (tirec.br_blockcount) {
1434 			ASSERT(tp->t_highest_agno == NULLAGNUMBER);
1435 			trace_xfs_swap_extent_rmap_remap_piece(tip, &tirec);
1436 
1437 			/* Read extent from the source file */
1438 			nimaps = 1;
1439 			error = xfs_bmapi_read(ip, tirec.br_startoff,
1440 					tirec.br_blockcount, &irec,
1441 					&nimaps, 0);
1442 			if (error)
1443 				goto out;
1444 			ASSERT(nimaps == 1);
1445 			ASSERT(tirec.br_startoff == irec.br_startoff);
1446 			trace_xfs_swap_extent_rmap_remap_piece(ip, &irec);
1447 
1448 			/* Trim the extent. */
1449 			uirec = tirec;
1450 			uirec.br_blockcount = rlen = min_t(xfs_filblks_t,
1451 					tirec.br_blockcount,
1452 					irec.br_blockcount);
1453 			trace_xfs_swap_extent_rmap_remap_piece(tip, &uirec);
1454 
1455 			if (xfs_bmap_is_real_extent(&uirec)) {
1456 				error = xfs_iext_count_may_overflow(ip,
1457 						XFS_DATA_FORK,
1458 						XFS_IEXT_SWAP_RMAP_CNT);
1459 				if (error == -EFBIG)
1460 					error = xfs_iext_count_upgrade(tp, ip,
1461 							XFS_IEXT_SWAP_RMAP_CNT);
1462 				if (error)
1463 					goto out;
1464 			}
1465 
1466 			if (xfs_bmap_is_real_extent(&irec)) {
1467 				error = xfs_iext_count_may_overflow(tip,
1468 						XFS_DATA_FORK,
1469 						XFS_IEXT_SWAP_RMAP_CNT);
1470 				if (error == -EFBIG)
1471 					error = xfs_iext_count_upgrade(tp, ip,
1472 							XFS_IEXT_SWAP_RMAP_CNT);
1473 				if (error)
1474 					goto out;
1475 			}
1476 
1477 			/* Remove the mapping from the donor file. */
1478 			xfs_bmap_unmap_extent(tp, tip, &uirec);
1479 
1480 			/* Remove the mapping from the source file. */
1481 			xfs_bmap_unmap_extent(tp, ip, &irec);
1482 
1483 			/* Map the donor file's blocks into the source file. */
1484 			xfs_bmap_map_extent(tp, ip, &uirec);
1485 
1486 			/* Map the source file's blocks into the donor file. */
1487 			xfs_bmap_map_extent(tp, tip, &irec);
1488 
1489 			error = xfs_defer_finish(tpp);
1490 			tp = *tpp;
1491 			if (error)
1492 				goto out;
1493 
1494 			tirec.br_startoff += rlen;
1495 			if (tirec.br_startblock != HOLESTARTBLOCK &&
1496 			    tirec.br_startblock != DELAYSTARTBLOCK)
1497 				tirec.br_startblock += rlen;
1498 			tirec.br_blockcount -= rlen;
1499 		}
1500 
1501 		/* Roll on... */
1502 		count_fsb -= ilen;
1503 		offset_fsb += ilen;
1504 	}
1505 
1506 	tip->i_diflags2 = tip_flags2;
1507 	return 0;
1508 
1509 out:
1510 	trace_xfs_swap_extent_rmap_error(ip, error, _RET_IP_);
1511 	tip->i_diflags2 = tip_flags2;
1512 	return error;
1513 }
1514 
1515 /* Swap the extents of two files by swapping data forks. */
1516 STATIC int
xfs_swap_extent_forks(struct xfs_trans * tp,struct xfs_inode * ip,struct xfs_inode * tip,int * src_log_flags,int * target_log_flags)1517 xfs_swap_extent_forks(
1518 	struct xfs_trans	*tp,
1519 	struct xfs_inode	*ip,
1520 	struct xfs_inode	*tip,
1521 	int			*src_log_flags,
1522 	int			*target_log_flags)
1523 {
1524 	xfs_filblks_t		aforkblks = 0;
1525 	xfs_filblks_t		taforkblks = 0;
1526 	xfs_extnum_t		junk;
1527 	uint64_t		tmp;
1528 	int			error;
1529 
1530 	/*
1531 	 * Count the number of extended attribute blocks
1532 	 */
1533 	if (xfs_inode_has_attr_fork(ip) && ip->i_af.if_nextents > 0 &&
1534 	    ip->i_af.if_format != XFS_DINODE_FMT_LOCAL) {
1535 		error = xfs_bmap_count_blocks(tp, ip, XFS_ATTR_FORK, &junk,
1536 				&aforkblks);
1537 		if (error)
1538 			return error;
1539 	}
1540 	if (xfs_inode_has_attr_fork(tip) && tip->i_af.if_nextents > 0 &&
1541 	    tip->i_af.if_format != XFS_DINODE_FMT_LOCAL) {
1542 		error = xfs_bmap_count_blocks(tp, tip, XFS_ATTR_FORK, &junk,
1543 				&taforkblks);
1544 		if (error)
1545 			return error;
1546 	}
1547 
1548 	/*
1549 	 * Btree format (v3) inodes have the inode number stamped in the bmbt
1550 	 * block headers. We can't start changing the bmbt blocks until the
1551 	 * inode owner change is logged so recovery does the right thing in the
1552 	 * event of a crash. Set the owner change log flags now and leave the
1553 	 * bmbt scan as the last step.
1554 	 */
1555 	if (xfs_has_v3inodes(ip->i_mount)) {
1556 		if (ip->i_df.if_format == XFS_DINODE_FMT_BTREE)
1557 			(*target_log_flags) |= XFS_ILOG_DOWNER;
1558 		if (tip->i_df.if_format == XFS_DINODE_FMT_BTREE)
1559 			(*src_log_flags) |= XFS_ILOG_DOWNER;
1560 	}
1561 
1562 	/*
1563 	 * Swap the data forks of the inodes
1564 	 */
1565 	swap(ip->i_df, tip->i_df);
1566 
1567 	/*
1568 	 * Fix the on-disk inode values
1569 	 */
1570 	tmp = (uint64_t)ip->i_nblocks;
1571 	ip->i_nblocks = tip->i_nblocks - taforkblks + aforkblks;
1572 	tip->i_nblocks = tmp + taforkblks - aforkblks;
1573 
1574 	/*
1575 	 * The extents in the source inode could still contain speculative
1576 	 * preallocation beyond EOF (e.g. the file is open but not modified
1577 	 * while defrag is in progress). In that case, we need to copy over the
1578 	 * number of delalloc blocks the data fork in the source inode is
1579 	 * tracking beyond EOF so that when the fork is truncated away when the
1580 	 * temporary inode is unlinked we don't underrun the i_delayed_blks
1581 	 * counter on that inode.
1582 	 */
1583 	ASSERT(tip->i_delayed_blks == 0);
1584 	tip->i_delayed_blks = ip->i_delayed_blks;
1585 	ip->i_delayed_blks = 0;
1586 
1587 	switch (ip->i_df.if_format) {
1588 	case XFS_DINODE_FMT_EXTENTS:
1589 		(*src_log_flags) |= XFS_ILOG_DEXT;
1590 		break;
1591 	case XFS_DINODE_FMT_BTREE:
1592 		ASSERT(!xfs_has_v3inodes(ip->i_mount) ||
1593 		       (*src_log_flags & XFS_ILOG_DOWNER));
1594 		(*src_log_flags) |= XFS_ILOG_DBROOT;
1595 		break;
1596 	}
1597 
1598 	switch (tip->i_df.if_format) {
1599 	case XFS_DINODE_FMT_EXTENTS:
1600 		(*target_log_flags) |= XFS_ILOG_DEXT;
1601 		break;
1602 	case XFS_DINODE_FMT_BTREE:
1603 		(*target_log_flags) |= XFS_ILOG_DBROOT;
1604 		ASSERT(!xfs_has_v3inodes(ip->i_mount) ||
1605 		       (*target_log_flags & XFS_ILOG_DOWNER));
1606 		break;
1607 	}
1608 
1609 	return 0;
1610 }
1611 
1612 /*
1613  * Fix up the owners of the bmbt blocks to refer to the current inode. The
1614  * change owner scan attempts to order all modified buffers in the current
1615  * transaction. In the event of ordered buffer failure, the offending buffer is
1616  * physically logged as a fallback and the scan returns -EAGAIN. We must roll
1617  * the transaction in this case to replenish the fallback log reservation and
1618  * restart the scan. This process repeats until the scan completes.
1619  */
1620 static int
xfs_swap_change_owner(struct xfs_trans ** tpp,struct xfs_inode * ip,struct xfs_inode * tmpip)1621 xfs_swap_change_owner(
1622 	struct xfs_trans	**tpp,
1623 	struct xfs_inode	*ip,
1624 	struct xfs_inode	*tmpip)
1625 {
1626 	int			error;
1627 	struct xfs_trans	*tp = *tpp;
1628 
1629 	do {
1630 		error = xfs_bmbt_change_owner(tp, ip, XFS_DATA_FORK, ip->i_ino,
1631 					      NULL);
1632 		/* success or fatal error */
1633 		if (error != -EAGAIN)
1634 			break;
1635 
1636 		error = xfs_trans_roll(tpp);
1637 		if (error)
1638 			break;
1639 		tp = *tpp;
1640 
1641 		/*
1642 		 * Redirty both inodes so they can relog and keep the log tail
1643 		 * moving forward.
1644 		 */
1645 		xfs_trans_ijoin(tp, ip, 0);
1646 		xfs_trans_ijoin(tp, tmpip, 0);
1647 		xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1648 		xfs_trans_log_inode(tp, tmpip, XFS_ILOG_CORE);
1649 	} while (true);
1650 
1651 	return error;
1652 }
1653 
1654 int
xfs_swap_extents(struct xfs_inode * ip,struct xfs_inode * tip,struct xfs_swapext * sxp)1655 xfs_swap_extents(
1656 	struct xfs_inode	*ip,	/* target inode */
1657 	struct xfs_inode	*tip,	/* tmp inode */
1658 	struct xfs_swapext	*sxp)
1659 {
1660 	struct xfs_mount	*mp = ip->i_mount;
1661 	struct xfs_trans	*tp;
1662 	struct xfs_bstat	*sbp = &sxp->sx_stat;
1663 	int			src_log_flags, target_log_flags;
1664 	int			error = 0;
1665 	uint64_t		f;
1666 	int			resblks = 0;
1667 	unsigned int		flags = 0;
1668 	struct timespec64	ctime;
1669 
1670 	/*
1671 	 * Lock the inodes against other IO, page faults and truncate to
1672 	 * begin with.  Then we can ensure the inodes are flushed and have no
1673 	 * page cache safely. Once we have done this we can take the ilocks and
1674 	 * do the rest of the checks.
1675 	 */
1676 	lock_two_nondirectories(VFS_I(ip), VFS_I(tip));
1677 	filemap_invalidate_lock_two(VFS_I(ip)->i_mapping,
1678 				    VFS_I(tip)->i_mapping);
1679 
1680 	/* Verify that both files have the same format */
1681 	if ((VFS_I(ip)->i_mode & S_IFMT) != (VFS_I(tip)->i_mode & S_IFMT)) {
1682 		error = -EINVAL;
1683 		goto out_unlock;
1684 	}
1685 
1686 	/* Verify both files are either real-time or non-realtime */
1687 	if (XFS_IS_REALTIME_INODE(ip) != XFS_IS_REALTIME_INODE(tip)) {
1688 		error = -EINVAL;
1689 		goto out_unlock;
1690 	}
1691 
1692 	error = xfs_qm_dqattach(ip);
1693 	if (error)
1694 		goto out_unlock;
1695 
1696 	error = xfs_qm_dqattach(tip);
1697 	if (error)
1698 		goto out_unlock;
1699 
1700 	error = xfs_swap_extent_flush(ip);
1701 	if (error)
1702 		goto out_unlock;
1703 	error = xfs_swap_extent_flush(tip);
1704 	if (error)
1705 		goto out_unlock;
1706 
1707 	if (xfs_inode_has_cow_data(tip)) {
1708 		error = xfs_reflink_cancel_cow_range(tip, 0, NULLFILEOFF, true);
1709 		if (error)
1710 			goto out_unlock;
1711 	}
1712 
1713 	/*
1714 	 * Extent "swapping" with rmap requires a permanent reservation and
1715 	 * a block reservation because it's really just a remap operation
1716 	 * performed with log redo items!
1717 	 */
1718 	if (xfs_has_rmapbt(mp)) {
1719 		int		w = XFS_DATA_FORK;
1720 		uint32_t	ipnext = ip->i_df.if_nextents;
1721 		uint32_t	tipnext	= tip->i_df.if_nextents;
1722 
1723 		/*
1724 		 * Conceptually this shouldn't affect the shape of either bmbt,
1725 		 * but since we atomically move extents one by one, we reserve
1726 		 * enough space to rebuild both trees.
1727 		 */
1728 		resblks = XFS_SWAP_RMAP_SPACE_RES(mp, ipnext, w);
1729 		resblks +=  XFS_SWAP_RMAP_SPACE_RES(mp, tipnext, w);
1730 
1731 		/*
1732 		 * If either inode straddles a bmapbt block allocation boundary,
1733 		 * the rmapbt algorithm triggers repeated allocs and frees as
1734 		 * extents are remapped. This can exhaust the block reservation
1735 		 * prematurely and cause shutdown. Return freed blocks to the
1736 		 * transaction reservation to counter this behavior.
1737 		 */
1738 		flags |= XFS_TRANS_RES_FDBLKS;
1739 	}
1740 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, flags,
1741 				&tp);
1742 	if (error)
1743 		goto out_unlock;
1744 
1745 	/*
1746 	 * Lock and join the inodes to the tansaction so that transaction commit
1747 	 * or cancel will unlock the inodes from this point onwards.
1748 	 */
1749 	xfs_lock_two_inodes(ip, XFS_ILOCK_EXCL, tip, XFS_ILOCK_EXCL);
1750 	xfs_trans_ijoin(tp, ip, 0);
1751 	xfs_trans_ijoin(tp, tip, 0);
1752 
1753 
1754 	/* Verify all data are being swapped */
1755 	if (sxp->sx_offset != 0 ||
1756 	    sxp->sx_length != ip->i_disk_size ||
1757 	    sxp->sx_length != tip->i_disk_size) {
1758 		error = -EFAULT;
1759 		goto out_trans_cancel;
1760 	}
1761 
1762 	trace_xfs_swap_extent_before(ip, 0);
1763 	trace_xfs_swap_extent_before(tip, 1);
1764 
1765 	/* check inode formats now that data is flushed */
1766 	error = xfs_swap_extents_check_format(ip, tip);
1767 	if (error) {
1768 		xfs_notice(mp,
1769 		    "%s: inode 0x%llx format is incompatible for exchanging.",
1770 				__func__, ip->i_ino);
1771 		goto out_trans_cancel;
1772 	}
1773 
1774 	/*
1775 	 * Compare the current change & modify times with that
1776 	 * passed in.  If they differ, we abort this swap.
1777 	 * This is the mechanism used to ensure the calling
1778 	 * process that the file was not changed out from
1779 	 * under it.
1780 	 */
1781 	ctime = inode_get_ctime(VFS_I(ip));
1782 	if ((sbp->bs_ctime.tv_sec != ctime.tv_sec) ||
1783 	    (sbp->bs_ctime.tv_nsec != ctime.tv_nsec) ||
1784 	    (sbp->bs_mtime.tv_sec != VFS_I(ip)->i_mtime.tv_sec) ||
1785 	    (sbp->bs_mtime.tv_nsec != VFS_I(ip)->i_mtime.tv_nsec)) {
1786 		error = -EBUSY;
1787 		goto out_trans_cancel;
1788 	}
1789 
1790 	/*
1791 	 * Note the trickiness in setting the log flags - we set the owner log
1792 	 * flag on the opposite inode (i.e. the inode we are setting the new
1793 	 * owner to be) because once we swap the forks and log that, log
1794 	 * recovery is going to see the fork as owned by the swapped inode,
1795 	 * not the pre-swapped inodes.
1796 	 */
1797 	src_log_flags = XFS_ILOG_CORE;
1798 	target_log_flags = XFS_ILOG_CORE;
1799 
1800 	if (xfs_has_rmapbt(mp))
1801 		error = xfs_swap_extent_rmap(&tp, ip, tip);
1802 	else
1803 		error = xfs_swap_extent_forks(tp, ip, tip, &src_log_flags,
1804 				&target_log_flags);
1805 	if (error)
1806 		goto out_trans_cancel;
1807 
1808 	/* Do we have to swap reflink flags? */
1809 	if ((ip->i_diflags2 & XFS_DIFLAG2_REFLINK) ^
1810 	    (tip->i_diflags2 & XFS_DIFLAG2_REFLINK)) {
1811 		f = ip->i_diflags2 & XFS_DIFLAG2_REFLINK;
1812 		ip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK;
1813 		ip->i_diflags2 |= tip->i_diflags2 & XFS_DIFLAG2_REFLINK;
1814 		tip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK;
1815 		tip->i_diflags2 |= f & XFS_DIFLAG2_REFLINK;
1816 	}
1817 
1818 	/* Swap the cow forks. */
1819 	if (xfs_has_reflink(mp)) {
1820 		ASSERT(!ip->i_cowfp ||
1821 		       ip->i_cowfp->if_format == XFS_DINODE_FMT_EXTENTS);
1822 		ASSERT(!tip->i_cowfp ||
1823 		       tip->i_cowfp->if_format == XFS_DINODE_FMT_EXTENTS);
1824 
1825 		swap(ip->i_cowfp, tip->i_cowfp);
1826 
1827 		if (ip->i_cowfp && ip->i_cowfp->if_bytes)
1828 			xfs_inode_set_cowblocks_tag(ip);
1829 		else
1830 			xfs_inode_clear_cowblocks_tag(ip);
1831 		if (tip->i_cowfp && tip->i_cowfp->if_bytes)
1832 			xfs_inode_set_cowblocks_tag(tip);
1833 		else
1834 			xfs_inode_clear_cowblocks_tag(tip);
1835 	}
1836 
1837 	xfs_trans_log_inode(tp, ip,  src_log_flags);
1838 	xfs_trans_log_inode(tp, tip, target_log_flags);
1839 
1840 	/*
1841 	 * The extent forks have been swapped, but crc=1,rmapbt=0 filesystems
1842 	 * have inode number owner values in the bmbt blocks that still refer to
1843 	 * the old inode. Scan each bmbt to fix up the owner values with the
1844 	 * inode number of the current inode.
1845 	 */
1846 	if (src_log_flags & XFS_ILOG_DOWNER) {
1847 		error = xfs_swap_change_owner(&tp, ip, tip);
1848 		if (error)
1849 			goto out_trans_cancel;
1850 	}
1851 	if (target_log_flags & XFS_ILOG_DOWNER) {
1852 		error = xfs_swap_change_owner(&tp, tip, ip);
1853 		if (error)
1854 			goto out_trans_cancel;
1855 	}
1856 
1857 	/*
1858 	 * If this is a synchronous mount, make sure that the
1859 	 * transaction goes to disk before returning to the user.
1860 	 */
1861 	if (xfs_has_wsync(mp))
1862 		xfs_trans_set_sync(tp);
1863 
1864 	error = xfs_trans_commit(tp);
1865 
1866 	trace_xfs_swap_extent_after(ip, 0);
1867 	trace_xfs_swap_extent_after(tip, 1);
1868 
1869 out_unlock_ilock:
1870 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1871 	xfs_iunlock(tip, XFS_ILOCK_EXCL);
1872 out_unlock:
1873 	filemap_invalidate_unlock_two(VFS_I(ip)->i_mapping,
1874 				      VFS_I(tip)->i_mapping);
1875 	unlock_two_nondirectories(VFS_I(ip), VFS_I(tip));
1876 	return error;
1877 
1878 out_trans_cancel:
1879 	xfs_trans_cancel(tp);
1880 	goto out_unlock_ilock;
1881 }
1882