xref: /openbmc/linux/fs/xfs/xfs_bmap_util.c (revision 15e3ae36)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4  * Copyright (c) 2012 Red Hat, Inc.
5  * All Rights Reserved.
6  */
7 #include "xfs.h"
8 #include "xfs_fs.h"
9 #include "xfs_shared.h"
10 #include "xfs_format.h"
11 #include "xfs_log_format.h"
12 #include "xfs_trans_resv.h"
13 #include "xfs_bit.h"
14 #include "xfs_mount.h"
15 #include "xfs_defer.h"
16 #include "xfs_inode.h"
17 #include "xfs_btree.h"
18 #include "xfs_trans.h"
19 #include "xfs_alloc.h"
20 #include "xfs_bmap.h"
21 #include "xfs_bmap_util.h"
22 #include "xfs_bmap_btree.h"
23 #include "xfs_rtalloc.h"
24 #include "xfs_error.h"
25 #include "xfs_quota.h"
26 #include "xfs_trans_space.h"
27 #include "xfs_trace.h"
28 #include "xfs_icache.h"
29 #include "xfs_iomap.h"
30 #include "xfs_reflink.h"
31 
32 /* Kernel only BMAP related definitions and functions */
33 
34 /*
35  * Convert the given file system block to a disk block.  We have to treat it
36  * differently based on whether the file is a real time file or not, because the
37  * bmap code does.
38  */
39 xfs_daddr_t
40 xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb)
41 {
42 	if (XFS_IS_REALTIME_INODE(ip))
43 		return XFS_FSB_TO_BB(ip->i_mount, fsb);
44 	return XFS_FSB_TO_DADDR(ip->i_mount, fsb);
45 }
46 
47 /*
48  * Routine to zero an extent on disk allocated to the specific inode.
49  *
50  * The VFS functions take a linearised filesystem block offset, so we have to
51  * convert the sparse xfs fsb to the right format first.
52  * VFS types are real funky, too.
53  */
54 int
55 xfs_zero_extent(
56 	struct xfs_inode	*ip,
57 	xfs_fsblock_t		start_fsb,
58 	xfs_off_t		count_fsb)
59 {
60 	struct xfs_mount	*mp = ip->i_mount;
61 	struct xfs_buftarg	*target = xfs_inode_buftarg(ip);
62 	xfs_daddr_t		sector = xfs_fsb_to_db(ip, start_fsb);
63 	sector_t		block = XFS_BB_TO_FSBT(mp, sector);
64 
65 	return blkdev_issue_zeroout(target->bt_bdev,
66 		block << (mp->m_super->s_blocksize_bits - 9),
67 		count_fsb << (mp->m_super->s_blocksize_bits - 9),
68 		GFP_NOFS, 0);
69 }
70 
71 #ifdef CONFIG_XFS_RT
72 int
73 xfs_bmap_rtalloc(
74 	struct xfs_bmalloca	*ap)	/* bmap alloc argument struct */
75 {
76 	int		error;		/* error return value */
77 	xfs_mount_t	*mp;		/* mount point structure */
78 	xfs_extlen_t	prod = 0;	/* product factor for allocators */
79 	xfs_extlen_t	mod = 0;	/* product factor for allocators */
80 	xfs_extlen_t	ralen = 0;	/* realtime allocation length */
81 	xfs_extlen_t	align;		/* minimum allocation alignment */
82 	xfs_rtblock_t	rtb;
83 
84 	mp = ap->ip->i_mount;
85 	align = xfs_get_extsz_hint(ap->ip);
86 	prod = align / mp->m_sb.sb_rextsize;
87 	error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
88 					align, 1, ap->eof, 0,
89 					ap->conv, &ap->offset, &ap->length);
90 	if (error)
91 		return error;
92 	ASSERT(ap->length);
93 	ASSERT(ap->length % mp->m_sb.sb_rextsize == 0);
94 
95 	/*
96 	 * If the offset & length are not perfectly aligned
97 	 * then kill prod, it will just get us in trouble.
98 	 */
99 	div_u64_rem(ap->offset, align, &mod);
100 	if (mod || ap->length % align)
101 		prod = 1;
102 	/*
103 	 * Set ralen to be the actual requested length in rtextents.
104 	 */
105 	ralen = ap->length / mp->m_sb.sb_rextsize;
106 	/*
107 	 * If the old value was close enough to MAXEXTLEN that
108 	 * we rounded up to it, cut it back so it's valid again.
109 	 * Note that if it's a really large request (bigger than
110 	 * MAXEXTLEN), we don't hear about that number, and can't
111 	 * adjust the starting point to match it.
112 	 */
113 	if (ralen * mp->m_sb.sb_rextsize >= MAXEXTLEN)
114 		ralen = MAXEXTLEN / mp->m_sb.sb_rextsize;
115 
116 	/*
117 	 * Lock out modifications to both the RT bitmap and summary inodes
118 	 */
119 	xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP);
120 	xfs_trans_ijoin(ap->tp, mp->m_rbmip, XFS_ILOCK_EXCL);
121 	xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM);
122 	xfs_trans_ijoin(ap->tp, mp->m_rsumip, XFS_ILOCK_EXCL);
123 
124 	/*
125 	 * If it's an allocation to an empty file at offset 0,
126 	 * pick an extent that will space things out in the rt area.
127 	 */
128 	if (ap->eof && ap->offset == 0) {
129 		xfs_rtblock_t uninitialized_var(rtx); /* realtime extent no */
130 
131 		error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx);
132 		if (error)
133 			return error;
134 		ap->blkno = rtx * mp->m_sb.sb_rextsize;
135 	} else {
136 		ap->blkno = 0;
137 	}
138 
139 	xfs_bmap_adjacent(ap);
140 
141 	/*
142 	 * Realtime allocation, done through xfs_rtallocate_extent.
143 	 */
144 	do_div(ap->blkno, mp->m_sb.sb_rextsize);
145 	rtb = ap->blkno;
146 	ap->length = ralen;
147 	error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1, ap->length,
148 				&ralen, ap->wasdel, prod, &rtb);
149 	if (error)
150 		return error;
151 
152 	ap->blkno = rtb;
153 	if (ap->blkno != NULLFSBLOCK) {
154 		ap->blkno *= mp->m_sb.sb_rextsize;
155 		ralen *= mp->m_sb.sb_rextsize;
156 		ap->length = ralen;
157 		ap->ip->i_d.di_nblocks += ralen;
158 		xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
159 		if (ap->wasdel)
160 			ap->ip->i_delayed_blks -= ralen;
161 		/*
162 		 * Adjust the disk quota also. This was reserved
163 		 * earlier.
164 		 */
165 		xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
166 			ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT :
167 					XFS_TRANS_DQ_RTBCOUNT, (long) ralen);
168 	} else {
169 		ap->length = 0;
170 	}
171 	return 0;
172 }
173 #endif /* CONFIG_XFS_RT */
174 
175 /*
176  * Extent tree block counting routines.
177  */
178 
179 /*
180  * Count leaf blocks given a range of extent records.  Delayed allocation
181  * extents are not counted towards the totals.
182  */
183 xfs_extnum_t
184 xfs_bmap_count_leaves(
185 	struct xfs_ifork	*ifp,
186 	xfs_filblks_t		*count)
187 {
188 	struct xfs_iext_cursor	icur;
189 	struct xfs_bmbt_irec	got;
190 	xfs_extnum_t		numrecs = 0;
191 
192 	for_each_xfs_iext(ifp, &icur, &got) {
193 		if (!isnullstartblock(got.br_startblock)) {
194 			*count += got.br_blockcount;
195 			numrecs++;
196 		}
197 	}
198 
199 	return numrecs;
200 }
201 
202 /*
203  * Count fsblocks of the given fork.  Delayed allocation extents are
204  * not counted towards the totals.
205  */
206 int
207 xfs_bmap_count_blocks(
208 	struct xfs_trans	*tp,
209 	struct xfs_inode	*ip,
210 	int			whichfork,
211 	xfs_extnum_t		*nextents,
212 	xfs_filblks_t		*count)
213 {
214 	struct xfs_mount	*mp = ip->i_mount;
215 	struct xfs_ifork	*ifp = XFS_IFORK_PTR(ip, whichfork);
216 	struct xfs_btree_cur	*cur;
217 	xfs_extlen_t		btblocks = 0;
218 	int			error;
219 
220 	*nextents = 0;
221 	*count = 0;
222 
223 	if (!ifp)
224 		return 0;
225 
226 	switch (XFS_IFORK_FORMAT(ip, whichfork)) {
227 	case XFS_DINODE_FMT_BTREE:
228 		if (!(ifp->if_flags & XFS_IFEXTENTS)) {
229 			error = xfs_iread_extents(tp, ip, whichfork);
230 			if (error)
231 				return error;
232 		}
233 
234 		cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
235 		error = xfs_btree_count_blocks(cur, &btblocks);
236 		xfs_btree_del_cursor(cur, error);
237 		if (error)
238 			return error;
239 
240 		/*
241 		 * xfs_btree_count_blocks includes the root block contained in
242 		 * the inode fork in @btblocks, so subtract one because we're
243 		 * only interested in allocated disk blocks.
244 		 */
245 		*count += btblocks - 1;
246 
247 		/* fall through */
248 	case XFS_DINODE_FMT_EXTENTS:
249 		*nextents = xfs_bmap_count_leaves(ifp, count);
250 		break;
251 	}
252 
253 	return 0;
254 }
255 
256 static int
257 xfs_getbmap_report_one(
258 	struct xfs_inode	*ip,
259 	struct getbmapx		*bmv,
260 	struct kgetbmap		*out,
261 	int64_t			bmv_end,
262 	struct xfs_bmbt_irec	*got)
263 {
264 	struct kgetbmap		*p = out + bmv->bmv_entries;
265 	bool			shared = false;
266 	int			error;
267 
268 	error = xfs_reflink_trim_around_shared(ip, got, &shared);
269 	if (error)
270 		return error;
271 
272 	if (isnullstartblock(got->br_startblock) ||
273 	    got->br_startblock == DELAYSTARTBLOCK) {
274 		/*
275 		 * Delalloc extents that start beyond EOF can occur due to
276 		 * speculative EOF allocation when the delalloc extent is larger
277 		 * than the largest freespace extent at conversion time.  These
278 		 * extents cannot be converted by data writeback, so can exist
279 		 * here even if we are not supposed to be finding delalloc
280 		 * extents.
281 		 */
282 		if (got->br_startoff < XFS_B_TO_FSB(ip->i_mount, XFS_ISIZE(ip)))
283 			ASSERT((bmv->bmv_iflags & BMV_IF_DELALLOC) != 0);
284 
285 		p->bmv_oflags |= BMV_OF_DELALLOC;
286 		p->bmv_block = -2;
287 	} else {
288 		p->bmv_block = xfs_fsb_to_db(ip, got->br_startblock);
289 	}
290 
291 	if (got->br_state == XFS_EXT_UNWRITTEN &&
292 	    (bmv->bmv_iflags & BMV_IF_PREALLOC))
293 		p->bmv_oflags |= BMV_OF_PREALLOC;
294 
295 	if (shared)
296 		p->bmv_oflags |= BMV_OF_SHARED;
297 
298 	p->bmv_offset = XFS_FSB_TO_BB(ip->i_mount, got->br_startoff);
299 	p->bmv_length = XFS_FSB_TO_BB(ip->i_mount, got->br_blockcount);
300 
301 	bmv->bmv_offset = p->bmv_offset + p->bmv_length;
302 	bmv->bmv_length = max(0LL, bmv_end - bmv->bmv_offset);
303 	bmv->bmv_entries++;
304 	return 0;
305 }
306 
307 static void
308 xfs_getbmap_report_hole(
309 	struct xfs_inode	*ip,
310 	struct getbmapx		*bmv,
311 	struct kgetbmap		*out,
312 	int64_t			bmv_end,
313 	xfs_fileoff_t		bno,
314 	xfs_fileoff_t		end)
315 {
316 	struct kgetbmap		*p = out + bmv->bmv_entries;
317 
318 	if (bmv->bmv_iflags & BMV_IF_NO_HOLES)
319 		return;
320 
321 	p->bmv_block = -1;
322 	p->bmv_offset = XFS_FSB_TO_BB(ip->i_mount, bno);
323 	p->bmv_length = XFS_FSB_TO_BB(ip->i_mount, end - bno);
324 
325 	bmv->bmv_offset = p->bmv_offset + p->bmv_length;
326 	bmv->bmv_length = max(0LL, bmv_end - bmv->bmv_offset);
327 	bmv->bmv_entries++;
328 }
329 
330 static inline bool
331 xfs_getbmap_full(
332 	struct getbmapx		*bmv)
333 {
334 	return bmv->bmv_length == 0 || bmv->bmv_entries >= bmv->bmv_count - 1;
335 }
336 
337 static bool
338 xfs_getbmap_next_rec(
339 	struct xfs_bmbt_irec	*rec,
340 	xfs_fileoff_t		total_end)
341 {
342 	xfs_fileoff_t		end = rec->br_startoff + rec->br_blockcount;
343 
344 	if (end == total_end)
345 		return false;
346 
347 	rec->br_startoff += rec->br_blockcount;
348 	if (!isnullstartblock(rec->br_startblock) &&
349 	    rec->br_startblock != DELAYSTARTBLOCK)
350 		rec->br_startblock += rec->br_blockcount;
351 	rec->br_blockcount = total_end - end;
352 	return true;
353 }
354 
355 /*
356  * Get inode's extents as described in bmv, and format for output.
357  * Calls formatter to fill the user's buffer until all extents
358  * are mapped, until the passed-in bmv->bmv_count slots have
359  * been filled, or until the formatter short-circuits the loop,
360  * if it is tracking filled-in extents on its own.
361  */
362 int						/* error code */
363 xfs_getbmap(
364 	struct xfs_inode	*ip,
365 	struct getbmapx		*bmv,		/* user bmap structure */
366 	struct kgetbmap		*out)
367 {
368 	struct xfs_mount	*mp = ip->i_mount;
369 	int			iflags = bmv->bmv_iflags;
370 	int			whichfork, lock, error = 0;
371 	int64_t			bmv_end, max_len;
372 	xfs_fileoff_t		bno, first_bno;
373 	struct xfs_ifork	*ifp;
374 	struct xfs_bmbt_irec	got, rec;
375 	xfs_filblks_t		len;
376 	struct xfs_iext_cursor	icur;
377 
378 	if (bmv->bmv_iflags & ~BMV_IF_VALID)
379 		return -EINVAL;
380 #ifndef DEBUG
381 	/* Only allow CoW fork queries if we're debugging. */
382 	if (iflags & BMV_IF_COWFORK)
383 		return -EINVAL;
384 #endif
385 	if ((iflags & BMV_IF_ATTRFORK) && (iflags & BMV_IF_COWFORK))
386 		return -EINVAL;
387 
388 	if (bmv->bmv_length < -1)
389 		return -EINVAL;
390 	bmv->bmv_entries = 0;
391 	if (bmv->bmv_length == 0)
392 		return 0;
393 
394 	if (iflags & BMV_IF_ATTRFORK)
395 		whichfork = XFS_ATTR_FORK;
396 	else if (iflags & BMV_IF_COWFORK)
397 		whichfork = XFS_COW_FORK;
398 	else
399 		whichfork = XFS_DATA_FORK;
400 	ifp = XFS_IFORK_PTR(ip, whichfork);
401 
402 	xfs_ilock(ip, XFS_IOLOCK_SHARED);
403 	switch (whichfork) {
404 	case XFS_ATTR_FORK:
405 		if (!XFS_IFORK_Q(ip))
406 			goto out_unlock_iolock;
407 
408 		max_len = 1LL << 32;
409 		lock = xfs_ilock_attr_map_shared(ip);
410 		break;
411 	case XFS_COW_FORK:
412 		/* No CoW fork? Just return */
413 		if (!ifp)
414 			goto out_unlock_iolock;
415 
416 		if (xfs_get_cowextsz_hint(ip))
417 			max_len = mp->m_super->s_maxbytes;
418 		else
419 			max_len = XFS_ISIZE(ip);
420 
421 		lock = XFS_ILOCK_SHARED;
422 		xfs_ilock(ip, lock);
423 		break;
424 	case XFS_DATA_FORK:
425 		if (!(iflags & BMV_IF_DELALLOC) &&
426 		    (ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_d.di_size)) {
427 			error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
428 			if (error)
429 				goto out_unlock_iolock;
430 
431 			/*
432 			 * Even after flushing the inode, there can still be
433 			 * delalloc blocks on the inode beyond EOF due to
434 			 * speculative preallocation.  These are not removed
435 			 * until the release function is called or the inode
436 			 * is inactivated.  Hence we cannot assert here that
437 			 * ip->i_delayed_blks == 0.
438 			 */
439 		}
440 
441 		if (xfs_get_extsz_hint(ip) ||
442 		    (ip->i_d.di_flags &
443 		     (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)))
444 			max_len = mp->m_super->s_maxbytes;
445 		else
446 			max_len = XFS_ISIZE(ip);
447 
448 		lock = xfs_ilock_data_map_shared(ip);
449 		break;
450 	}
451 
452 	switch (XFS_IFORK_FORMAT(ip, whichfork)) {
453 	case XFS_DINODE_FMT_EXTENTS:
454 	case XFS_DINODE_FMT_BTREE:
455 		break;
456 	case XFS_DINODE_FMT_LOCAL:
457 		/* Local format inode forks report no extents. */
458 		goto out_unlock_ilock;
459 	default:
460 		error = -EINVAL;
461 		goto out_unlock_ilock;
462 	}
463 
464 	if (bmv->bmv_length == -1) {
465 		max_len = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, max_len));
466 		bmv->bmv_length = max(0LL, max_len - bmv->bmv_offset);
467 	}
468 
469 	bmv_end = bmv->bmv_offset + bmv->bmv_length;
470 
471 	first_bno = bno = XFS_BB_TO_FSBT(mp, bmv->bmv_offset);
472 	len = XFS_BB_TO_FSB(mp, bmv->bmv_length);
473 
474 	if (!(ifp->if_flags & XFS_IFEXTENTS)) {
475 		error = xfs_iread_extents(NULL, ip, whichfork);
476 		if (error)
477 			goto out_unlock_ilock;
478 	}
479 
480 	if (!xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) {
481 		/*
482 		 * Report a whole-file hole if the delalloc flag is set to
483 		 * stay compatible with the old implementation.
484 		 */
485 		if (iflags & BMV_IF_DELALLOC)
486 			xfs_getbmap_report_hole(ip, bmv, out, bmv_end, bno,
487 					XFS_B_TO_FSB(mp, XFS_ISIZE(ip)));
488 		goto out_unlock_ilock;
489 	}
490 
491 	while (!xfs_getbmap_full(bmv)) {
492 		xfs_trim_extent(&got, first_bno, len);
493 
494 		/*
495 		 * Report an entry for a hole if this extent doesn't directly
496 		 * follow the previous one.
497 		 */
498 		if (got.br_startoff > bno) {
499 			xfs_getbmap_report_hole(ip, bmv, out, bmv_end, bno,
500 					got.br_startoff);
501 			if (xfs_getbmap_full(bmv))
502 				break;
503 		}
504 
505 		/*
506 		 * In order to report shared extents accurately, we report each
507 		 * distinct shared / unshared part of a single bmbt record with
508 		 * an individual getbmapx record.
509 		 */
510 		bno = got.br_startoff + got.br_blockcount;
511 		rec = got;
512 		do {
513 			error = xfs_getbmap_report_one(ip, bmv, out, bmv_end,
514 					&rec);
515 			if (error || xfs_getbmap_full(bmv))
516 				goto out_unlock_ilock;
517 		} while (xfs_getbmap_next_rec(&rec, bno));
518 
519 		if (!xfs_iext_next_extent(ifp, &icur, &got)) {
520 			xfs_fileoff_t	end = XFS_B_TO_FSB(mp, XFS_ISIZE(ip));
521 
522 			out[bmv->bmv_entries - 1].bmv_oflags |= BMV_OF_LAST;
523 
524 			if (whichfork != XFS_ATTR_FORK && bno < end &&
525 			    !xfs_getbmap_full(bmv)) {
526 				xfs_getbmap_report_hole(ip, bmv, out, bmv_end,
527 						bno, end);
528 			}
529 			break;
530 		}
531 
532 		if (bno >= first_bno + len)
533 			break;
534 	}
535 
536 out_unlock_ilock:
537 	xfs_iunlock(ip, lock);
538 out_unlock_iolock:
539 	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
540 	return error;
541 }
542 
543 /*
544  * Dead simple method of punching delalyed allocation blocks from a range in
545  * the inode.  This will always punch out both the start and end blocks, even
546  * if the ranges only partially overlap them, so it is up to the caller to
547  * ensure that partial blocks are not passed in.
548  */
549 int
550 xfs_bmap_punch_delalloc_range(
551 	struct xfs_inode	*ip,
552 	xfs_fileoff_t		start_fsb,
553 	xfs_fileoff_t		length)
554 {
555 	struct xfs_ifork	*ifp = &ip->i_df;
556 	xfs_fileoff_t		end_fsb = start_fsb + length;
557 	struct xfs_bmbt_irec	got, del;
558 	struct xfs_iext_cursor	icur;
559 	int			error = 0;
560 
561 	ASSERT(ifp->if_flags & XFS_IFEXTENTS);
562 
563 	xfs_ilock(ip, XFS_ILOCK_EXCL);
564 	if (!xfs_iext_lookup_extent_before(ip, ifp, &end_fsb, &icur, &got))
565 		goto out_unlock;
566 
567 	while (got.br_startoff + got.br_blockcount > start_fsb) {
568 		del = got;
569 		xfs_trim_extent(&del, start_fsb, length);
570 
571 		/*
572 		 * A delete can push the cursor forward. Step back to the
573 		 * previous extent on non-delalloc or extents outside the
574 		 * target range.
575 		 */
576 		if (!del.br_blockcount ||
577 		    !isnullstartblock(del.br_startblock)) {
578 			if (!xfs_iext_prev_extent(ifp, &icur, &got))
579 				break;
580 			continue;
581 		}
582 
583 		error = xfs_bmap_del_extent_delay(ip, XFS_DATA_FORK, &icur,
584 						  &got, &del);
585 		if (error || !xfs_iext_get_extent(ifp, &icur, &got))
586 			break;
587 	}
588 
589 out_unlock:
590 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
591 	return error;
592 }
593 
594 /*
595  * Test whether it is appropriate to check an inode for and free post EOF
596  * blocks. The 'force' parameter determines whether we should also consider
597  * regular files that are marked preallocated or append-only.
598  */
599 bool
600 xfs_can_free_eofblocks(struct xfs_inode *ip, bool force)
601 {
602 	/* prealloc/delalloc exists only on regular files */
603 	if (!S_ISREG(VFS_I(ip)->i_mode))
604 		return false;
605 
606 	/*
607 	 * Zero sized files with no cached pages and delalloc blocks will not
608 	 * have speculative prealloc/delalloc blocks to remove.
609 	 */
610 	if (VFS_I(ip)->i_size == 0 &&
611 	    VFS_I(ip)->i_mapping->nrpages == 0 &&
612 	    ip->i_delayed_blks == 0)
613 		return false;
614 
615 	/* If we haven't read in the extent list, then don't do it now. */
616 	if (!(ip->i_df.if_flags & XFS_IFEXTENTS))
617 		return false;
618 
619 	/*
620 	 * Do not free real preallocated or append-only files unless the file
621 	 * has delalloc blocks and we are forced to remove them.
622 	 */
623 	if (ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND))
624 		if (!force || ip->i_delayed_blks == 0)
625 			return false;
626 
627 	return true;
628 }
629 
630 /*
631  * This is called to free any blocks beyond eof. The caller must hold
632  * IOLOCK_EXCL unless we are in the inode reclaim path and have the only
633  * reference to the inode.
634  */
635 int
636 xfs_free_eofblocks(
637 	struct xfs_inode	*ip)
638 {
639 	struct xfs_trans	*tp;
640 	int			error;
641 	xfs_fileoff_t		end_fsb;
642 	xfs_fileoff_t		last_fsb;
643 	xfs_filblks_t		map_len;
644 	int			nimaps;
645 	struct xfs_bmbt_irec	imap;
646 	struct xfs_mount	*mp = ip->i_mount;
647 
648 	/*
649 	 * Figure out if there are any blocks beyond the end
650 	 * of the file.  If not, then there is nothing to do.
651 	 */
652 	end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_ISIZE(ip));
653 	last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
654 	if (last_fsb <= end_fsb)
655 		return 0;
656 	map_len = last_fsb - end_fsb;
657 
658 	nimaps = 1;
659 	xfs_ilock(ip, XFS_ILOCK_SHARED);
660 	error = xfs_bmapi_read(ip, end_fsb, map_len, &imap, &nimaps, 0);
661 	xfs_iunlock(ip, XFS_ILOCK_SHARED);
662 
663 	/*
664 	 * If there are blocks after the end of file, truncate the file to its
665 	 * current size to free them up.
666 	 */
667 	if (!error && (nimaps != 0) &&
668 	    (imap.br_startblock != HOLESTARTBLOCK ||
669 	     ip->i_delayed_blks)) {
670 		/*
671 		 * Attach the dquots to the inode up front.
672 		 */
673 		error = xfs_qm_dqattach(ip);
674 		if (error)
675 			return error;
676 
677 		/* wait on dio to ensure i_size has settled */
678 		inode_dio_wait(VFS_I(ip));
679 
680 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0,
681 				&tp);
682 		if (error) {
683 			ASSERT(XFS_FORCED_SHUTDOWN(mp));
684 			return error;
685 		}
686 
687 		xfs_ilock(ip, XFS_ILOCK_EXCL);
688 		xfs_trans_ijoin(tp, ip, 0);
689 
690 		/*
691 		 * Do not update the on-disk file size.  If we update the
692 		 * on-disk file size and then the system crashes before the
693 		 * contents of the file are flushed to disk then the files
694 		 * may be full of holes (ie NULL files bug).
695 		 */
696 		error = xfs_itruncate_extents_flags(&tp, ip, XFS_DATA_FORK,
697 					XFS_ISIZE(ip), XFS_BMAPI_NODISCARD);
698 		if (error) {
699 			/*
700 			 * If we get an error at this point we simply don't
701 			 * bother truncating the file.
702 			 */
703 			xfs_trans_cancel(tp);
704 		} else {
705 			error = xfs_trans_commit(tp);
706 			if (!error)
707 				xfs_inode_clear_eofblocks_tag(ip);
708 		}
709 
710 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
711 	}
712 	return error;
713 }
714 
715 int
716 xfs_alloc_file_space(
717 	struct xfs_inode	*ip,
718 	xfs_off_t		offset,
719 	xfs_off_t		len,
720 	int			alloc_type)
721 {
722 	xfs_mount_t		*mp = ip->i_mount;
723 	xfs_off_t		count;
724 	xfs_filblks_t		allocated_fsb;
725 	xfs_filblks_t		allocatesize_fsb;
726 	xfs_extlen_t		extsz, temp;
727 	xfs_fileoff_t		startoffset_fsb;
728 	xfs_fileoff_t		endoffset_fsb;
729 	int			nimaps;
730 	int			quota_flag;
731 	int			rt;
732 	xfs_trans_t		*tp;
733 	xfs_bmbt_irec_t		imaps[1], *imapp;
734 	uint			qblocks, resblks, resrtextents;
735 	int			error;
736 
737 	trace_xfs_alloc_file_space(ip);
738 
739 	if (XFS_FORCED_SHUTDOWN(mp))
740 		return -EIO;
741 
742 	error = xfs_qm_dqattach(ip);
743 	if (error)
744 		return error;
745 
746 	if (len <= 0)
747 		return -EINVAL;
748 
749 	rt = XFS_IS_REALTIME_INODE(ip);
750 	extsz = xfs_get_extsz_hint(ip);
751 
752 	count = len;
753 	imapp = &imaps[0];
754 	nimaps = 1;
755 	startoffset_fsb	= XFS_B_TO_FSBT(mp, offset);
756 	endoffset_fsb = XFS_B_TO_FSB(mp, offset + count);
757 	allocatesize_fsb = endoffset_fsb - startoffset_fsb;
758 
759 	/*
760 	 * Allocate file space until done or until there is an error
761 	 */
762 	while (allocatesize_fsb && !error) {
763 		xfs_fileoff_t	s, e;
764 
765 		/*
766 		 * Determine space reservations for data/realtime.
767 		 */
768 		if (unlikely(extsz)) {
769 			s = startoffset_fsb;
770 			do_div(s, extsz);
771 			s *= extsz;
772 			e = startoffset_fsb + allocatesize_fsb;
773 			div_u64_rem(startoffset_fsb, extsz, &temp);
774 			if (temp)
775 				e += temp;
776 			div_u64_rem(e, extsz, &temp);
777 			if (temp)
778 				e += extsz - temp;
779 		} else {
780 			s = 0;
781 			e = allocatesize_fsb;
782 		}
783 
784 		/*
785 		 * The transaction reservation is limited to a 32-bit block
786 		 * count, hence we need to limit the number of blocks we are
787 		 * trying to reserve to avoid an overflow. We can't allocate
788 		 * more than @nimaps extents, and an extent is limited on disk
789 		 * to MAXEXTLEN (21 bits), so use that to enforce the limit.
790 		 */
791 		resblks = min_t(xfs_fileoff_t, (e - s), (MAXEXTLEN * nimaps));
792 		if (unlikely(rt)) {
793 			resrtextents = qblocks = resblks;
794 			resrtextents /= mp->m_sb.sb_rextsize;
795 			resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
796 			quota_flag = XFS_QMOPT_RES_RTBLKS;
797 		} else {
798 			resrtextents = 0;
799 			resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resblks);
800 			quota_flag = XFS_QMOPT_RES_REGBLKS;
801 		}
802 
803 		/*
804 		 * Allocate and setup the transaction.
805 		 */
806 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks,
807 				resrtextents, 0, &tp);
808 
809 		/*
810 		 * Check for running out of space
811 		 */
812 		if (error) {
813 			/*
814 			 * Free the transaction structure.
815 			 */
816 			ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp));
817 			break;
818 		}
819 		xfs_ilock(ip, XFS_ILOCK_EXCL);
820 		error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks,
821 						      0, quota_flag);
822 		if (error)
823 			goto error1;
824 
825 		xfs_trans_ijoin(tp, ip, 0);
826 
827 		error = xfs_bmapi_write(tp, ip, startoffset_fsb,
828 					allocatesize_fsb, alloc_type, 0, imapp,
829 					&nimaps);
830 		if (error)
831 			goto error0;
832 
833 		/*
834 		 * Complete the transaction
835 		 */
836 		error = xfs_trans_commit(tp);
837 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
838 		if (error)
839 			break;
840 
841 		allocated_fsb = imapp->br_blockcount;
842 
843 		if (nimaps == 0) {
844 			error = -ENOSPC;
845 			break;
846 		}
847 
848 		startoffset_fsb += allocated_fsb;
849 		allocatesize_fsb -= allocated_fsb;
850 	}
851 
852 	return error;
853 
854 error0:	/* unlock inode, unreserve quota blocks, cancel trans */
855 	xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag);
856 
857 error1:	/* Just cancel transaction */
858 	xfs_trans_cancel(tp);
859 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
860 	return error;
861 }
862 
863 static int
864 xfs_unmap_extent(
865 	struct xfs_inode	*ip,
866 	xfs_fileoff_t		startoffset_fsb,
867 	xfs_filblks_t		len_fsb,
868 	int			*done)
869 {
870 	struct xfs_mount	*mp = ip->i_mount;
871 	struct xfs_trans	*tp;
872 	uint			resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
873 	int			error;
874 
875 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);
876 	if (error) {
877 		ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp));
878 		return error;
879 	}
880 
881 	xfs_ilock(ip, XFS_ILOCK_EXCL);
882 	error = xfs_trans_reserve_quota(tp, mp, ip->i_udquot, ip->i_gdquot,
883 			ip->i_pdquot, resblks, 0, XFS_QMOPT_RES_REGBLKS);
884 	if (error)
885 		goto out_trans_cancel;
886 
887 	xfs_trans_ijoin(tp, ip, 0);
888 
889 	error = xfs_bunmapi(tp, ip, startoffset_fsb, len_fsb, 0, 2, done);
890 	if (error)
891 		goto out_trans_cancel;
892 
893 	error = xfs_trans_commit(tp);
894 out_unlock:
895 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
896 	return error;
897 
898 out_trans_cancel:
899 	xfs_trans_cancel(tp);
900 	goto out_unlock;
901 }
902 
903 /* Caller must first wait for the completion of any pending DIOs if required. */
904 int
905 xfs_flush_unmap_range(
906 	struct xfs_inode	*ip,
907 	xfs_off_t		offset,
908 	xfs_off_t		len)
909 {
910 	struct xfs_mount	*mp = ip->i_mount;
911 	struct inode		*inode = VFS_I(ip);
912 	xfs_off_t		rounding, start, end;
913 	int			error;
914 
915 	rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_SIZE);
916 	start = round_down(offset, rounding);
917 	end = round_up(offset + len, rounding) - 1;
918 
919 	error = filemap_write_and_wait_range(inode->i_mapping, start, end);
920 	if (error)
921 		return error;
922 	truncate_pagecache_range(inode, start, end);
923 	return 0;
924 }
925 
926 int
927 xfs_free_file_space(
928 	struct xfs_inode	*ip,
929 	xfs_off_t		offset,
930 	xfs_off_t		len)
931 {
932 	struct xfs_mount	*mp = ip->i_mount;
933 	xfs_fileoff_t		startoffset_fsb;
934 	xfs_fileoff_t		endoffset_fsb;
935 	int			done = 0, error;
936 
937 	trace_xfs_free_file_space(ip);
938 
939 	error = xfs_qm_dqattach(ip);
940 	if (error)
941 		return error;
942 
943 	if (len <= 0)	/* if nothing being freed */
944 		return 0;
945 
946 	startoffset_fsb = XFS_B_TO_FSB(mp, offset);
947 	endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len);
948 
949 	/*
950 	 * Need to zero the stuff we're not freeing, on disk.
951 	 */
952 	if (endoffset_fsb > startoffset_fsb) {
953 		while (!done) {
954 			error = xfs_unmap_extent(ip, startoffset_fsb,
955 					endoffset_fsb - startoffset_fsb, &done);
956 			if (error)
957 				return error;
958 		}
959 	}
960 
961 	/*
962 	 * Now that we've unmap all full blocks we'll have to zero out any
963 	 * partial block at the beginning and/or end.  iomap_zero_range is smart
964 	 * enough to skip any holes, including those we just created, but we
965 	 * must take care not to zero beyond EOF and enlarge i_size.
966 	 */
967 	if (offset >= XFS_ISIZE(ip))
968 		return 0;
969 	if (offset + len > XFS_ISIZE(ip))
970 		len = XFS_ISIZE(ip) - offset;
971 	error = iomap_zero_range(VFS_I(ip), offset, len, NULL,
972 			&xfs_buffered_write_iomap_ops);
973 	if (error)
974 		return error;
975 
976 	/*
977 	 * If we zeroed right up to EOF and EOF straddles a page boundary we
978 	 * must make sure that the post-EOF area is also zeroed because the
979 	 * page could be mmap'd and iomap_zero_range doesn't do that for us.
980 	 * Writeback of the eof page will do this, albeit clumsily.
981 	 */
982 	if (offset + len >= XFS_ISIZE(ip) && offset_in_page(offset + len) > 0) {
983 		error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
984 				round_down(offset + len, PAGE_SIZE), LLONG_MAX);
985 	}
986 
987 	return error;
988 }
989 
990 static int
991 xfs_prepare_shift(
992 	struct xfs_inode	*ip,
993 	loff_t			offset)
994 {
995 	struct xfs_mount	*mp = ip->i_mount;
996 	int			error;
997 
998 	/*
999 	 * Trim eofblocks to avoid shifting uninitialized post-eof preallocation
1000 	 * into the accessible region of the file.
1001 	 */
1002 	if (xfs_can_free_eofblocks(ip, true)) {
1003 		error = xfs_free_eofblocks(ip);
1004 		if (error)
1005 			return error;
1006 	}
1007 
1008 	/*
1009 	 * Shift operations must stabilize the start block offset boundary along
1010 	 * with the full range of the operation. If we don't, a COW writeback
1011 	 * completion could race with an insert, front merge with the start
1012 	 * extent (after split) during the shift and corrupt the file. Start
1013 	 * with the block just prior to the start to stabilize the boundary.
1014 	 */
1015 	offset = round_down(offset, 1 << mp->m_sb.sb_blocklog);
1016 	if (offset)
1017 		offset -= (1 << mp->m_sb.sb_blocklog);
1018 
1019 	/*
1020 	 * Writeback and invalidate cache for the remainder of the file as we're
1021 	 * about to shift down every extent from offset to EOF.
1022 	 */
1023 	error = xfs_flush_unmap_range(ip, offset, XFS_ISIZE(ip));
1024 	if (error)
1025 		return error;
1026 
1027 	/*
1028 	 * Clean out anything hanging around in the cow fork now that
1029 	 * we've flushed all the dirty data out to disk to avoid having
1030 	 * CoW extents at the wrong offsets.
1031 	 */
1032 	if (xfs_inode_has_cow_data(ip)) {
1033 		error = xfs_reflink_cancel_cow_range(ip, offset, NULLFILEOFF,
1034 				true);
1035 		if (error)
1036 			return error;
1037 	}
1038 
1039 	return 0;
1040 }
1041 
1042 /*
1043  * xfs_collapse_file_space()
1044  *	This routine frees disk space and shift extent for the given file.
1045  *	The first thing we do is to free data blocks in the specified range
1046  *	by calling xfs_free_file_space(). It would also sync dirty data
1047  *	and invalidate page cache over the region on which collapse range
1048  *	is working. And Shift extent records to the left to cover a hole.
1049  * RETURNS:
1050  *	0 on success
1051  *	errno on error
1052  *
1053  */
1054 int
1055 xfs_collapse_file_space(
1056 	struct xfs_inode	*ip,
1057 	xfs_off_t		offset,
1058 	xfs_off_t		len)
1059 {
1060 	struct xfs_mount	*mp = ip->i_mount;
1061 	struct xfs_trans	*tp;
1062 	int			error;
1063 	xfs_fileoff_t		next_fsb = XFS_B_TO_FSB(mp, offset + len);
1064 	xfs_fileoff_t		shift_fsb = XFS_B_TO_FSB(mp, len);
1065 	bool			done = false;
1066 
1067 	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1068 	ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL));
1069 
1070 	trace_xfs_collapse_file_space(ip);
1071 
1072 	error = xfs_free_file_space(ip, offset, len);
1073 	if (error)
1074 		return error;
1075 
1076 	error = xfs_prepare_shift(ip, offset);
1077 	if (error)
1078 		return error;
1079 
1080 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 0, &tp);
1081 	if (error)
1082 		return error;
1083 
1084 	xfs_ilock(ip, XFS_ILOCK_EXCL);
1085 	xfs_trans_ijoin(tp, ip, 0);
1086 
1087 	while (!done) {
1088 		error = xfs_bmap_collapse_extents(tp, ip, &next_fsb, shift_fsb,
1089 				&done);
1090 		if (error)
1091 			goto out_trans_cancel;
1092 		if (done)
1093 			break;
1094 
1095 		/* finish any deferred frees and roll the transaction */
1096 		error = xfs_defer_finish(&tp);
1097 		if (error)
1098 			goto out_trans_cancel;
1099 	}
1100 
1101 	error = xfs_trans_commit(tp);
1102 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1103 	return error;
1104 
1105 out_trans_cancel:
1106 	xfs_trans_cancel(tp);
1107 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1108 	return error;
1109 }
1110 
1111 /*
1112  * xfs_insert_file_space()
1113  *	This routine create hole space by shifting extents for the given file.
1114  *	The first thing we do is to sync dirty data and invalidate page cache
1115  *	over the region on which insert range is working. And split an extent
1116  *	to two extents at given offset by calling xfs_bmap_split_extent.
1117  *	And shift all extent records which are laying between [offset,
1118  *	last allocated extent] to the right to reserve hole range.
1119  * RETURNS:
1120  *	0 on success
1121  *	errno on error
1122  */
1123 int
1124 xfs_insert_file_space(
1125 	struct xfs_inode	*ip,
1126 	loff_t			offset,
1127 	loff_t			len)
1128 {
1129 	struct xfs_mount	*mp = ip->i_mount;
1130 	struct xfs_trans	*tp;
1131 	int			error;
1132 	xfs_fileoff_t		stop_fsb = XFS_B_TO_FSB(mp, offset);
1133 	xfs_fileoff_t		next_fsb = NULLFSBLOCK;
1134 	xfs_fileoff_t		shift_fsb = XFS_B_TO_FSB(mp, len);
1135 	bool			done = false;
1136 
1137 	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1138 	ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL));
1139 
1140 	trace_xfs_insert_file_space(ip);
1141 
1142 	error = xfs_bmap_can_insert_extents(ip, stop_fsb, shift_fsb);
1143 	if (error)
1144 		return error;
1145 
1146 	error = xfs_prepare_shift(ip, offset);
1147 	if (error)
1148 		return error;
1149 
1150 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write,
1151 			XFS_DIOSTRAT_SPACE_RES(mp, 0), 0, 0, &tp);
1152 	if (error)
1153 		return error;
1154 
1155 	xfs_ilock(ip, XFS_ILOCK_EXCL);
1156 	xfs_trans_ijoin(tp, ip, 0);
1157 
1158 	/*
1159 	 * The extent shifting code works on extent granularity. So, if stop_fsb
1160 	 * is not the starting block of extent, we need to split the extent at
1161 	 * stop_fsb.
1162 	 */
1163 	error = xfs_bmap_split_extent(tp, ip, stop_fsb);
1164 	if (error)
1165 		goto out_trans_cancel;
1166 
1167 	do {
1168 		error = xfs_trans_roll_inode(&tp, ip);
1169 		if (error)
1170 			goto out_trans_cancel;
1171 
1172 		error = xfs_bmap_insert_extents(tp, ip, &next_fsb, shift_fsb,
1173 				&done, stop_fsb);
1174 		if (error)
1175 			goto out_trans_cancel;
1176 	} while (!done);
1177 
1178 	error = xfs_trans_commit(tp);
1179 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1180 	return error;
1181 
1182 out_trans_cancel:
1183 	xfs_trans_cancel(tp);
1184 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1185 	return error;
1186 }
1187 
1188 /*
1189  * We need to check that the format of the data fork in the temporary inode is
1190  * valid for the target inode before doing the swap. This is not a problem with
1191  * attr1 because of the fixed fork offset, but attr2 has a dynamically sized
1192  * data fork depending on the space the attribute fork is taking so we can get
1193  * invalid formats on the target inode.
1194  *
1195  * E.g. target has space for 7 extents in extent format, temp inode only has
1196  * space for 6.  If we defragment down to 7 extents, then the tmp format is a
1197  * btree, but when swapped it needs to be in extent format. Hence we can't just
1198  * blindly swap data forks on attr2 filesystems.
1199  *
1200  * Note that we check the swap in both directions so that we don't end up with
1201  * a corrupt temporary inode, either.
1202  *
1203  * Note that fixing the way xfs_fsr sets up the attribute fork in the source
1204  * inode will prevent this situation from occurring, so all we do here is
1205  * reject and log the attempt. basically we are putting the responsibility on
1206  * userspace to get this right.
1207  */
1208 static int
1209 xfs_swap_extents_check_format(
1210 	struct xfs_inode	*ip,	/* target inode */
1211 	struct xfs_inode	*tip)	/* tmp inode */
1212 {
1213 
1214 	/* Should never get a local format */
1215 	if (ip->i_d.di_format == XFS_DINODE_FMT_LOCAL ||
1216 	    tip->i_d.di_format == XFS_DINODE_FMT_LOCAL)
1217 		return -EINVAL;
1218 
1219 	/*
1220 	 * if the target inode has less extents that then temporary inode then
1221 	 * why did userspace call us?
1222 	 */
1223 	if (ip->i_d.di_nextents < tip->i_d.di_nextents)
1224 		return -EINVAL;
1225 
1226 	/*
1227 	 * If we have to use the (expensive) rmap swap method, we can
1228 	 * handle any number of extents and any format.
1229 	 */
1230 	if (xfs_sb_version_hasrmapbt(&ip->i_mount->m_sb))
1231 		return 0;
1232 
1233 	/*
1234 	 * if the target inode is in extent form and the temp inode is in btree
1235 	 * form then we will end up with the target inode in the wrong format
1236 	 * as we already know there are less extents in the temp inode.
1237 	 */
1238 	if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1239 	    tip->i_d.di_format == XFS_DINODE_FMT_BTREE)
1240 		return -EINVAL;
1241 
1242 	/* Check temp in extent form to max in target */
1243 	if (tip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1244 	    XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) >
1245 			XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
1246 		return -EINVAL;
1247 
1248 	/* Check target in extent form to max in temp */
1249 	if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1250 	    XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) >
1251 			XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
1252 		return -EINVAL;
1253 
1254 	/*
1255 	 * If we are in a btree format, check that the temp root block will fit
1256 	 * in the target and that it has enough extents to be in btree format
1257 	 * in the target.
1258 	 *
1259 	 * Note that we have to be careful to allow btree->extent conversions
1260 	 * (a common defrag case) which will occur when the temp inode is in
1261 	 * extent format...
1262 	 */
1263 	if (tip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
1264 		if (XFS_IFORK_Q(ip) &&
1265 		    XFS_BMAP_BMDR_SPACE(tip->i_df.if_broot) > XFS_IFORK_BOFF(ip))
1266 			return -EINVAL;
1267 		if (XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) <=
1268 		    XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
1269 			return -EINVAL;
1270 	}
1271 
1272 	/* Reciprocal target->temp btree format checks */
1273 	if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
1274 		if (XFS_IFORK_Q(tip) &&
1275 		    XFS_BMAP_BMDR_SPACE(ip->i_df.if_broot) > XFS_IFORK_BOFF(tip))
1276 			return -EINVAL;
1277 		if (XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) <=
1278 		    XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
1279 			return -EINVAL;
1280 	}
1281 
1282 	return 0;
1283 }
1284 
1285 static int
1286 xfs_swap_extent_flush(
1287 	struct xfs_inode	*ip)
1288 {
1289 	int	error;
1290 
1291 	error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
1292 	if (error)
1293 		return error;
1294 	truncate_pagecache_range(VFS_I(ip), 0, -1);
1295 
1296 	/* Verify O_DIRECT for ftmp */
1297 	if (VFS_I(ip)->i_mapping->nrpages)
1298 		return -EINVAL;
1299 	return 0;
1300 }
1301 
1302 /*
1303  * Move extents from one file to another, when rmap is enabled.
1304  */
1305 STATIC int
1306 xfs_swap_extent_rmap(
1307 	struct xfs_trans		**tpp,
1308 	struct xfs_inode		*ip,
1309 	struct xfs_inode		*tip)
1310 {
1311 	struct xfs_trans		*tp = *tpp;
1312 	struct xfs_bmbt_irec		irec;
1313 	struct xfs_bmbt_irec		uirec;
1314 	struct xfs_bmbt_irec		tirec;
1315 	xfs_fileoff_t			offset_fsb;
1316 	xfs_fileoff_t			end_fsb;
1317 	xfs_filblks_t			count_fsb;
1318 	int				error;
1319 	xfs_filblks_t			ilen;
1320 	xfs_filblks_t			rlen;
1321 	int				nimaps;
1322 	uint64_t			tip_flags2;
1323 
1324 	/*
1325 	 * If the source file has shared blocks, we must flag the donor
1326 	 * file as having shared blocks so that we get the shared-block
1327 	 * rmap functions when we go to fix up the rmaps.  The flags
1328 	 * will be switch for reals later.
1329 	 */
1330 	tip_flags2 = tip->i_d.di_flags2;
1331 	if (ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK)
1332 		tip->i_d.di_flags2 |= XFS_DIFLAG2_REFLINK;
1333 
1334 	offset_fsb = 0;
1335 	end_fsb = XFS_B_TO_FSB(ip->i_mount, i_size_read(VFS_I(ip)));
1336 	count_fsb = (xfs_filblks_t)(end_fsb - offset_fsb);
1337 
1338 	while (count_fsb) {
1339 		/* Read extent from the donor file */
1340 		nimaps = 1;
1341 		error = xfs_bmapi_read(tip, offset_fsb, count_fsb, &tirec,
1342 				&nimaps, 0);
1343 		if (error)
1344 			goto out;
1345 		ASSERT(nimaps == 1);
1346 		ASSERT(tirec.br_startblock != DELAYSTARTBLOCK);
1347 
1348 		trace_xfs_swap_extent_rmap_remap(tip, &tirec);
1349 		ilen = tirec.br_blockcount;
1350 
1351 		/* Unmap the old blocks in the source file. */
1352 		while (tirec.br_blockcount) {
1353 			ASSERT(tp->t_firstblock == NULLFSBLOCK);
1354 			trace_xfs_swap_extent_rmap_remap_piece(tip, &tirec);
1355 
1356 			/* Read extent from the source file */
1357 			nimaps = 1;
1358 			error = xfs_bmapi_read(ip, tirec.br_startoff,
1359 					tirec.br_blockcount, &irec,
1360 					&nimaps, 0);
1361 			if (error)
1362 				goto out;
1363 			ASSERT(nimaps == 1);
1364 			ASSERT(tirec.br_startoff == irec.br_startoff);
1365 			trace_xfs_swap_extent_rmap_remap_piece(ip, &irec);
1366 
1367 			/* Trim the extent. */
1368 			uirec = tirec;
1369 			uirec.br_blockcount = rlen = min_t(xfs_filblks_t,
1370 					tirec.br_blockcount,
1371 					irec.br_blockcount);
1372 			trace_xfs_swap_extent_rmap_remap_piece(tip, &uirec);
1373 
1374 			/* Remove the mapping from the donor file. */
1375 			xfs_bmap_unmap_extent(tp, tip, &uirec);
1376 
1377 			/* Remove the mapping from the source file. */
1378 			xfs_bmap_unmap_extent(tp, ip, &irec);
1379 
1380 			/* Map the donor file's blocks into the source file. */
1381 			xfs_bmap_map_extent(tp, ip, &uirec);
1382 
1383 			/* Map the source file's blocks into the donor file. */
1384 			xfs_bmap_map_extent(tp, tip, &irec);
1385 
1386 			error = xfs_defer_finish(tpp);
1387 			tp = *tpp;
1388 			if (error)
1389 				goto out;
1390 
1391 			tirec.br_startoff += rlen;
1392 			if (tirec.br_startblock != HOLESTARTBLOCK &&
1393 			    tirec.br_startblock != DELAYSTARTBLOCK)
1394 				tirec.br_startblock += rlen;
1395 			tirec.br_blockcount -= rlen;
1396 		}
1397 
1398 		/* Roll on... */
1399 		count_fsb -= ilen;
1400 		offset_fsb += ilen;
1401 	}
1402 
1403 	tip->i_d.di_flags2 = tip_flags2;
1404 	return 0;
1405 
1406 out:
1407 	trace_xfs_swap_extent_rmap_error(ip, error, _RET_IP_);
1408 	tip->i_d.di_flags2 = tip_flags2;
1409 	return error;
1410 }
1411 
1412 /* Swap the extents of two files by swapping data forks. */
1413 STATIC int
1414 xfs_swap_extent_forks(
1415 	struct xfs_trans	*tp,
1416 	struct xfs_inode	*ip,
1417 	struct xfs_inode	*tip,
1418 	int			*src_log_flags,
1419 	int			*target_log_flags)
1420 {
1421 	xfs_filblks_t		aforkblks = 0;
1422 	xfs_filblks_t		taforkblks = 0;
1423 	xfs_extnum_t		junk;
1424 	uint64_t		tmp;
1425 	int			error;
1426 
1427 	/*
1428 	 * Count the number of extended attribute blocks
1429 	 */
1430 	if ( ((XFS_IFORK_Q(ip) != 0) && (ip->i_d.di_anextents > 0)) &&
1431 	     (ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
1432 		error = xfs_bmap_count_blocks(tp, ip, XFS_ATTR_FORK, &junk,
1433 				&aforkblks);
1434 		if (error)
1435 			return error;
1436 	}
1437 	if ( ((XFS_IFORK_Q(tip) != 0) && (tip->i_d.di_anextents > 0)) &&
1438 	     (tip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
1439 		error = xfs_bmap_count_blocks(tp, tip, XFS_ATTR_FORK, &junk,
1440 				&taforkblks);
1441 		if (error)
1442 			return error;
1443 	}
1444 
1445 	/*
1446 	 * Btree format (v3) inodes have the inode number stamped in the bmbt
1447 	 * block headers. We can't start changing the bmbt blocks until the
1448 	 * inode owner change is logged so recovery does the right thing in the
1449 	 * event of a crash. Set the owner change log flags now and leave the
1450 	 * bmbt scan as the last step.
1451 	 */
1452 	if (xfs_sb_version_has_v3inode(&ip->i_mount->m_sb)) {
1453 		if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE)
1454 			(*target_log_flags) |= XFS_ILOG_DOWNER;
1455 		if (tip->i_d.di_format == XFS_DINODE_FMT_BTREE)
1456 			(*src_log_flags) |= XFS_ILOG_DOWNER;
1457 	}
1458 
1459 	/*
1460 	 * Swap the data forks of the inodes
1461 	 */
1462 	swap(ip->i_df, tip->i_df);
1463 
1464 	/*
1465 	 * Fix the on-disk inode values
1466 	 */
1467 	tmp = (uint64_t)ip->i_d.di_nblocks;
1468 	ip->i_d.di_nblocks = tip->i_d.di_nblocks - taforkblks + aforkblks;
1469 	tip->i_d.di_nblocks = tmp + taforkblks - aforkblks;
1470 
1471 	swap(ip->i_d.di_nextents, tip->i_d.di_nextents);
1472 	swap(ip->i_d.di_format, tip->i_d.di_format);
1473 
1474 	/*
1475 	 * The extents in the source inode could still contain speculative
1476 	 * preallocation beyond EOF (e.g. the file is open but not modified
1477 	 * while defrag is in progress). In that case, we need to copy over the
1478 	 * number of delalloc blocks the data fork in the source inode is
1479 	 * tracking beyond EOF so that when the fork is truncated away when the
1480 	 * temporary inode is unlinked we don't underrun the i_delayed_blks
1481 	 * counter on that inode.
1482 	 */
1483 	ASSERT(tip->i_delayed_blks == 0);
1484 	tip->i_delayed_blks = ip->i_delayed_blks;
1485 	ip->i_delayed_blks = 0;
1486 
1487 	switch (ip->i_d.di_format) {
1488 	case XFS_DINODE_FMT_EXTENTS:
1489 		(*src_log_flags) |= XFS_ILOG_DEXT;
1490 		break;
1491 	case XFS_DINODE_FMT_BTREE:
1492 		ASSERT(!xfs_sb_version_has_v3inode(&ip->i_mount->m_sb) ||
1493 		       (*src_log_flags & XFS_ILOG_DOWNER));
1494 		(*src_log_flags) |= XFS_ILOG_DBROOT;
1495 		break;
1496 	}
1497 
1498 	switch (tip->i_d.di_format) {
1499 	case XFS_DINODE_FMT_EXTENTS:
1500 		(*target_log_flags) |= XFS_ILOG_DEXT;
1501 		break;
1502 	case XFS_DINODE_FMT_BTREE:
1503 		(*target_log_flags) |= XFS_ILOG_DBROOT;
1504 		ASSERT(!xfs_sb_version_has_v3inode(&ip->i_mount->m_sb) ||
1505 		       (*target_log_flags & XFS_ILOG_DOWNER));
1506 		break;
1507 	}
1508 
1509 	return 0;
1510 }
1511 
1512 /*
1513  * Fix up the owners of the bmbt blocks to refer to the current inode. The
1514  * change owner scan attempts to order all modified buffers in the current
1515  * transaction. In the event of ordered buffer failure, the offending buffer is
1516  * physically logged as a fallback and the scan returns -EAGAIN. We must roll
1517  * the transaction in this case to replenish the fallback log reservation and
1518  * restart the scan. This process repeats until the scan completes.
1519  */
1520 static int
1521 xfs_swap_change_owner(
1522 	struct xfs_trans	**tpp,
1523 	struct xfs_inode	*ip,
1524 	struct xfs_inode	*tmpip)
1525 {
1526 	int			error;
1527 	struct xfs_trans	*tp = *tpp;
1528 
1529 	do {
1530 		error = xfs_bmbt_change_owner(tp, ip, XFS_DATA_FORK, ip->i_ino,
1531 					      NULL);
1532 		/* success or fatal error */
1533 		if (error != -EAGAIN)
1534 			break;
1535 
1536 		error = xfs_trans_roll(tpp);
1537 		if (error)
1538 			break;
1539 		tp = *tpp;
1540 
1541 		/*
1542 		 * Redirty both inodes so they can relog and keep the log tail
1543 		 * moving forward.
1544 		 */
1545 		xfs_trans_ijoin(tp, ip, 0);
1546 		xfs_trans_ijoin(tp, tmpip, 0);
1547 		xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1548 		xfs_trans_log_inode(tp, tmpip, XFS_ILOG_CORE);
1549 	} while (true);
1550 
1551 	return error;
1552 }
1553 
1554 int
1555 xfs_swap_extents(
1556 	struct xfs_inode	*ip,	/* target inode */
1557 	struct xfs_inode	*tip,	/* tmp inode */
1558 	struct xfs_swapext	*sxp)
1559 {
1560 	struct xfs_mount	*mp = ip->i_mount;
1561 	struct xfs_trans	*tp;
1562 	struct xfs_bstat	*sbp = &sxp->sx_stat;
1563 	int			src_log_flags, target_log_flags;
1564 	int			error = 0;
1565 	int			lock_flags;
1566 	uint64_t		f;
1567 	int			resblks = 0;
1568 
1569 	/*
1570 	 * Lock the inodes against other IO, page faults and truncate to
1571 	 * begin with.  Then we can ensure the inodes are flushed and have no
1572 	 * page cache safely. Once we have done this we can take the ilocks and
1573 	 * do the rest of the checks.
1574 	 */
1575 	lock_two_nondirectories(VFS_I(ip), VFS_I(tip));
1576 	lock_flags = XFS_MMAPLOCK_EXCL;
1577 	xfs_lock_two_inodes(ip, XFS_MMAPLOCK_EXCL, tip, XFS_MMAPLOCK_EXCL);
1578 
1579 	/* Verify that both files have the same format */
1580 	if ((VFS_I(ip)->i_mode & S_IFMT) != (VFS_I(tip)->i_mode & S_IFMT)) {
1581 		error = -EINVAL;
1582 		goto out_unlock;
1583 	}
1584 
1585 	/* Verify both files are either real-time or non-realtime */
1586 	if (XFS_IS_REALTIME_INODE(ip) != XFS_IS_REALTIME_INODE(tip)) {
1587 		error = -EINVAL;
1588 		goto out_unlock;
1589 	}
1590 
1591 	error = xfs_qm_dqattach(ip);
1592 	if (error)
1593 		goto out_unlock;
1594 
1595 	error = xfs_qm_dqattach(tip);
1596 	if (error)
1597 		goto out_unlock;
1598 
1599 	error = xfs_swap_extent_flush(ip);
1600 	if (error)
1601 		goto out_unlock;
1602 	error = xfs_swap_extent_flush(tip);
1603 	if (error)
1604 		goto out_unlock;
1605 
1606 	if (xfs_inode_has_cow_data(tip)) {
1607 		error = xfs_reflink_cancel_cow_range(tip, 0, NULLFILEOFF, true);
1608 		if (error)
1609 			return error;
1610 	}
1611 
1612 	/*
1613 	 * Extent "swapping" with rmap requires a permanent reservation and
1614 	 * a block reservation because it's really just a remap operation
1615 	 * performed with log redo items!
1616 	 */
1617 	if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
1618 		int		w	= XFS_DATA_FORK;
1619 		uint32_t	ipnext	= XFS_IFORK_NEXTENTS(ip, w);
1620 		uint32_t	tipnext	= XFS_IFORK_NEXTENTS(tip, w);
1621 
1622 		/*
1623 		 * Conceptually this shouldn't affect the shape of either bmbt,
1624 		 * but since we atomically move extents one by one, we reserve
1625 		 * enough space to rebuild both trees.
1626 		 */
1627 		resblks = XFS_SWAP_RMAP_SPACE_RES(mp, ipnext, w);
1628 		resblks +=  XFS_SWAP_RMAP_SPACE_RES(mp, tipnext, w);
1629 
1630 		/*
1631 		 * Handle the corner case where either inode might straddle the
1632 		 * btree format boundary. If so, the inode could bounce between
1633 		 * btree <-> extent format on unmap -> remap cycles, freeing and
1634 		 * allocating a bmapbt block each time.
1635 		 */
1636 		if (ipnext == (XFS_IFORK_MAXEXT(ip, w) + 1))
1637 			resblks += XFS_IFORK_MAXEXT(ip, w);
1638 		if (tipnext == (XFS_IFORK_MAXEXT(tip, w) + 1))
1639 			resblks += XFS_IFORK_MAXEXT(tip, w);
1640 	}
1641 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);
1642 	if (error)
1643 		goto out_unlock;
1644 
1645 	/*
1646 	 * Lock and join the inodes to the tansaction so that transaction commit
1647 	 * or cancel will unlock the inodes from this point onwards.
1648 	 */
1649 	xfs_lock_two_inodes(ip, XFS_ILOCK_EXCL, tip, XFS_ILOCK_EXCL);
1650 	lock_flags |= XFS_ILOCK_EXCL;
1651 	xfs_trans_ijoin(tp, ip, 0);
1652 	xfs_trans_ijoin(tp, tip, 0);
1653 
1654 
1655 	/* Verify all data are being swapped */
1656 	if (sxp->sx_offset != 0 ||
1657 	    sxp->sx_length != ip->i_d.di_size ||
1658 	    sxp->sx_length != tip->i_d.di_size) {
1659 		error = -EFAULT;
1660 		goto out_trans_cancel;
1661 	}
1662 
1663 	trace_xfs_swap_extent_before(ip, 0);
1664 	trace_xfs_swap_extent_before(tip, 1);
1665 
1666 	/* check inode formats now that data is flushed */
1667 	error = xfs_swap_extents_check_format(ip, tip);
1668 	if (error) {
1669 		xfs_notice(mp,
1670 		    "%s: inode 0x%llx format is incompatible for exchanging.",
1671 				__func__, ip->i_ino);
1672 		goto out_trans_cancel;
1673 	}
1674 
1675 	/*
1676 	 * Compare the current change & modify times with that
1677 	 * passed in.  If they differ, we abort this swap.
1678 	 * This is the mechanism used to ensure the calling
1679 	 * process that the file was not changed out from
1680 	 * under it.
1681 	 */
1682 	if ((sbp->bs_ctime.tv_sec != VFS_I(ip)->i_ctime.tv_sec) ||
1683 	    (sbp->bs_ctime.tv_nsec != VFS_I(ip)->i_ctime.tv_nsec) ||
1684 	    (sbp->bs_mtime.tv_sec != VFS_I(ip)->i_mtime.tv_sec) ||
1685 	    (sbp->bs_mtime.tv_nsec != VFS_I(ip)->i_mtime.tv_nsec)) {
1686 		error = -EBUSY;
1687 		goto out_trans_cancel;
1688 	}
1689 
1690 	/*
1691 	 * Note the trickiness in setting the log flags - we set the owner log
1692 	 * flag on the opposite inode (i.e. the inode we are setting the new
1693 	 * owner to be) because once we swap the forks and log that, log
1694 	 * recovery is going to see the fork as owned by the swapped inode,
1695 	 * not the pre-swapped inodes.
1696 	 */
1697 	src_log_flags = XFS_ILOG_CORE;
1698 	target_log_flags = XFS_ILOG_CORE;
1699 
1700 	if (xfs_sb_version_hasrmapbt(&mp->m_sb))
1701 		error = xfs_swap_extent_rmap(&tp, ip, tip);
1702 	else
1703 		error = xfs_swap_extent_forks(tp, ip, tip, &src_log_flags,
1704 				&target_log_flags);
1705 	if (error)
1706 		goto out_trans_cancel;
1707 
1708 	/* Do we have to swap reflink flags? */
1709 	if ((ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK) ^
1710 	    (tip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK)) {
1711 		f = ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK;
1712 		ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
1713 		ip->i_d.di_flags2 |= tip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK;
1714 		tip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
1715 		tip->i_d.di_flags2 |= f & XFS_DIFLAG2_REFLINK;
1716 	}
1717 
1718 	/* Swap the cow forks. */
1719 	if (xfs_sb_version_hasreflink(&mp->m_sb)) {
1720 		ASSERT(ip->i_cformat == XFS_DINODE_FMT_EXTENTS);
1721 		ASSERT(tip->i_cformat == XFS_DINODE_FMT_EXTENTS);
1722 
1723 		swap(ip->i_cnextents, tip->i_cnextents);
1724 		swap(ip->i_cowfp, tip->i_cowfp);
1725 
1726 		if (ip->i_cowfp && ip->i_cowfp->if_bytes)
1727 			xfs_inode_set_cowblocks_tag(ip);
1728 		else
1729 			xfs_inode_clear_cowblocks_tag(ip);
1730 		if (tip->i_cowfp && tip->i_cowfp->if_bytes)
1731 			xfs_inode_set_cowblocks_tag(tip);
1732 		else
1733 			xfs_inode_clear_cowblocks_tag(tip);
1734 	}
1735 
1736 	xfs_trans_log_inode(tp, ip,  src_log_flags);
1737 	xfs_trans_log_inode(tp, tip, target_log_flags);
1738 
1739 	/*
1740 	 * The extent forks have been swapped, but crc=1,rmapbt=0 filesystems
1741 	 * have inode number owner values in the bmbt blocks that still refer to
1742 	 * the old inode. Scan each bmbt to fix up the owner values with the
1743 	 * inode number of the current inode.
1744 	 */
1745 	if (src_log_flags & XFS_ILOG_DOWNER) {
1746 		error = xfs_swap_change_owner(&tp, ip, tip);
1747 		if (error)
1748 			goto out_trans_cancel;
1749 	}
1750 	if (target_log_flags & XFS_ILOG_DOWNER) {
1751 		error = xfs_swap_change_owner(&tp, tip, ip);
1752 		if (error)
1753 			goto out_trans_cancel;
1754 	}
1755 
1756 	/*
1757 	 * If this is a synchronous mount, make sure that the
1758 	 * transaction goes to disk before returning to the user.
1759 	 */
1760 	if (mp->m_flags & XFS_MOUNT_WSYNC)
1761 		xfs_trans_set_sync(tp);
1762 
1763 	error = xfs_trans_commit(tp);
1764 
1765 	trace_xfs_swap_extent_after(ip, 0);
1766 	trace_xfs_swap_extent_after(tip, 1);
1767 
1768 out_unlock:
1769 	xfs_iunlock(ip, lock_flags);
1770 	xfs_iunlock(tip, lock_flags);
1771 	unlock_two_nondirectories(VFS_I(ip), VFS_I(tip));
1772 	return error;
1773 
1774 out_trans_cancel:
1775 	xfs_trans_cancel(tp);
1776 	goto out_unlock;
1777 }
1778