xref: /openbmc/linux/fs/xfs/xfs_file.c (revision 6bfb56e9)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4  * All Rights Reserved.
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_inode.h"
14 #include "xfs_trans.h"
15 #include "xfs_inode_item.h"
16 #include "xfs_bmap.h"
17 #include "xfs_bmap_util.h"
18 #include "xfs_dir2.h"
19 #include "xfs_dir2_priv.h"
20 #include "xfs_ioctl.h"
21 #include "xfs_trace.h"
22 #include "xfs_log.h"
23 #include "xfs_icache.h"
24 #include "xfs_pnfs.h"
25 #include "xfs_iomap.h"
26 #include "xfs_reflink.h"
27 
28 #include <linux/falloc.h>
29 #include <linux/backing-dev.h>
30 #include <linux/mman.h>
31 #include <linux/fadvise.h>
32 #include <linux/mount.h>
33 
34 static const struct vm_operations_struct xfs_file_vm_ops;
35 
36 /*
37  * Decide if the given file range is aligned to the size of the fundamental
38  * allocation unit for the file.
39  */
40 static bool
41 xfs_is_falloc_aligned(
42 	struct xfs_inode	*ip,
43 	loff_t			pos,
44 	long long int		len)
45 {
46 	struct xfs_mount	*mp = ip->i_mount;
47 	uint64_t		mask;
48 
49 	if (XFS_IS_REALTIME_INODE(ip)) {
50 		if (!is_power_of_2(mp->m_sb.sb_rextsize)) {
51 			u64	rextbytes;
52 			u32	mod;
53 
54 			rextbytes = XFS_FSB_TO_B(mp, mp->m_sb.sb_rextsize);
55 			div_u64_rem(pos, rextbytes, &mod);
56 			if (mod)
57 				return false;
58 			div_u64_rem(len, rextbytes, &mod);
59 			return mod == 0;
60 		}
61 		mask = XFS_FSB_TO_B(mp, mp->m_sb.sb_rextsize) - 1;
62 	} else {
63 		mask = mp->m_sb.sb_blocksize - 1;
64 	}
65 
66 	return !((pos | len) & mask);
67 }
68 
69 /*
70  * Fsync operations on directories are much simpler than on regular files,
71  * as there is no file data to flush, and thus also no need for explicit
72  * cache flush operations, and there are no non-transaction metadata updates
73  * on directories either.
74  */
75 STATIC int
76 xfs_dir_fsync(
77 	struct file		*file,
78 	loff_t			start,
79 	loff_t			end,
80 	int			datasync)
81 {
82 	struct xfs_inode	*ip = XFS_I(file->f_mapping->host);
83 
84 	trace_xfs_dir_fsync(ip);
85 	return xfs_log_force_inode(ip);
86 }
87 
88 static xfs_csn_t
89 xfs_fsync_seq(
90 	struct xfs_inode	*ip,
91 	bool			datasync)
92 {
93 	if (!xfs_ipincount(ip))
94 		return 0;
95 	if (datasync && !(ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
96 		return 0;
97 	return ip->i_itemp->ili_commit_seq;
98 }
99 
100 /*
101  * All metadata updates are logged, which means that we just have to flush the
102  * log up to the latest LSN that touched the inode.
103  *
104  * If we have concurrent fsync/fdatasync() calls, we need them to all block on
105  * the log force before we clear the ili_fsync_fields field. This ensures that
106  * we don't get a racing sync operation that does not wait for the metadata to
107  * hit the journal before returning.  If we race with clearing ili_fsync_fields,
108  * then all that will happen is the log force will do nothing as the lsn will
109  * already be on disk.  We can't race with setting ili_fsync_fields because that
110  * is done under XFS_ILOCK_EXCL, and that can't happen because we hold the lock
111  * shared until after the ili_fsync_fields is cleared.
112  */
113 static  int
114 xfs_fsync_flush_log(
115 	struct xfs_inode	*ip,
116 	bool			datasync,
117 	int			*log_flushed)
118 {
119 	int			error = 0;
120 	xfs_csn_t		seq;
121 
122 	xfs_ilock(ip, XFS_ILOCK_SHARED);
123 	seq = xfs_fsync_seq(ip, datasync);
124 	if (seq) {
125 		error = xfs_log_force_seq(ip->i_mount, seq, XFS_LOG_SYNC,
126 					  log_flushed);
127 
128 		spin_lock(&ip->i_itemp->ili_lock);
129 		ip->i_itemp->ili_fsync_fields = 0;
130 		spin_unlock(&ip->i_itemp->ili_lock);
131 	}
132 	xfs_iunlock(ip, XFS_ILOCK_SHARED);
133 	return error;
134 }
135 
136 STATIC int
137 xfs_file_fsync(
138 	struct file		*file,
139 	loff_t			start,
140 	loff_t			end,
141 	int			datasync)
142 {
143 	struct xfs_inode	*ip = XFS_I(file->f_mapping->host);
144 	struct xfs_mount	*mp = ip->i_mount;
145 	int			error = 0;
146 	int			log_flushed = 0;
147 
148 	trace_xfs_file_fsync(ip);
149 
150 	error = file_write_and_wait_range(file, start, end);
151 	if (error)
152 		return error;
153 
154 	if (xfs_is_shutdown(mp))
155 		return -EIO;
156 
157 	xfs_iflags_clear(ip, XFS_ITRUNCATED);
158 
159 	/*
160 	 * If we have an RT and/or log subvolume we need to make sure to flush
161 	 * the write cache the device used for file data first.  This is to
162 	 * ensure newly written file data make it to disk before logging the new
163 	 * inode size in case of an extending write.
164 	 */
165 	if (XFS_IS_REALTIME_INODE(ip))
166 		blkdev_issue_flush(mp->m_rtdev_targp->bt_bdev);
167 	else if (mp->m_logdev_targp != mp->m_ddev_targp)
168 		blkdev_issue_flush(mp->m_ddev_targp->bt_bdev);
169 
170 	/*
171 	 * Any inode that has dirty modifications in the log is pinned.  The
172 	 * racy check here for a pinned inode while not catch modifications
173 	 * that happen concurrently to the fsync call, but fsync semantics
174 	 * only require to sync previously completed I/O.
175 	 */
176 	if (xfs_ipincount(ip))
177 		error = xfs_fsync_flush_log(ip, datasync, &log_flushed);
178 
179 	/*
180 	 * If we only have a single device, and the log force about was
181 	 * a no-op we might have to flush the data device cache here.
182 	 * This can only happen for fdatasync/O_DSYNC if we were overwriting
183 	 * an already allocated file and thus do not have any metadata to
184 	 * commit.
185 	 */
186 	if (!log_flushed && !XFS_IS_REALTIME_INODE(ip) &&
187 	    mp->m_logdev_targp == mp->m_ddev_targp)
188 		blkdev_issue_flush(mp->m_ddev_targp->bt_bdev);
189 
190 	return error;
191 }
192 
193 static int
194 xfs_ilock_iocb(
195 	struct kiocb		*iocb,
196 	unsigned int		lock_mode)
197 {
198 	struct xfs_inode	*ip = XFS_I(file_inode(iocb->ki_filp));
199 
200 	if (iocb->ki_flags & IOCB_NOWAIT) {
201 		if (!xfs_ilock_nowait(ip, lock_mode))
202 			return -EAGAIN;
203 	} else {
204 		xfs_ilock(ip, lock_mode);
205 	}
206 
207 	return 0;
208 }
209 
210 STATIC ssize_t
211 xfs_file_dio_read(
212 	struct kiocb		*iocb,
213 	struct iov_iter		*to)
214 {
215 	struct xfs_inode	*ip = XFS_I(file_inode(iocb->ki_filp));
216 	ssize_t			ret;
217 
218 	trace_xfs_file_direct_read(iocb, to);
219 
220 	if (!iov_iter_count(to))
221 		return 0; /* skip atime */
222 
223 	file_accessed(iocb->ki_filp);
224 
225 	ret = xfs_ilock_iocb(iocb, XFS_IOLOCK_SHARED);
226 	if (ret)
227 		return ret;
228 	ret = iomap_dio_rw(iocb, to, &xfs_read_iomap_ops, NULL, 0, NULL, 0);
229 	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
230 
231 	return ret;
232 }
233 
234 static noinline ssize_t
235 xfs_file_dax_read(
236 	struct kiocb		*iocb,
237 	struct iov_iter		*to)
238 {
239 	struct xfs_inode	*ip = XFS_I(iocb->ki_filp->f_mapping->host);
240 	ssize_t			ret = 0;
241 
242 	trace_xfs_file_dax_read(iocb, to);
243 
244 	if (!iov_iter_count(to))
245 		return 0; /* skip atime */
246 
247 	ret = xfs_ilock_iocb(iocb, XFS_IOLOCK_SHARED);
248 	if (ret)
249 		return ret;
250 	ret = dax_iomap_rw(iocb, to, &xfs_read_iomap_ops);
251 	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
252 
253 	file_accessed(iocb->ki_filp);
254 	return ret;
255 }
256 
257 STATIC ssize_t
258 xfs_file_buffered_read(
259 	struct kiocb		*iocb,
260 	struct iov_iter		*to)
261 {
262 	struct xfs_inode	*ip = XFS_I(file_inode(iocb->ki_filp));
263 	ssize_t			ret;
264 
265 	trace_xfs_file_buffered_read(iocb, to);
266 
267 	ret = xfs_ilock_iocb(iocb, XFS_IOLOCK_SHARED);
268 	if (ret)
269 		return ret;
270 	ret = generic_file_read_iter(iocb, to);
271 	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
272 
273 	return ret;
274 }
275 
276 STATIC ssize_t
277 xfs_file_read_iter(
278 	struct kiocb		*iocb,
279 	struct iov_iter		*to)
280 {
281 	struct inode		*inode = file_inode(iocb->ki_filp);
282 	struct xfs_mount	*mp = XFS_I(inode)->i_mount;
283 	ssize_t			ret = 0;
284 
285 	XFS_STATS_INC(mp, xs_read_calls);
286 
287 	if (xfs_is_shutdown(mp))
288 		return -EIO;
289 
290 	if (IS_DAX(inode))
291 		ret = xfs_file_dax_read(iocb, to);
292 	else if (iocb->ki_flags & IOCB_DIRECT)
293 		ret = xfs_file_dio_read(iocb, to);
294 	else
295 		ret = xfs_file_buffered_read(iocb, to);
296 
297 	if (ret > 0)
298 		XFS_STATS_ADD(mp, xs_read_bytes, ret);
299 	return ret;
300 }
301 
302 /*
303  * Common pre-write limit and setup checks.
304  *
305  * Called with the iolocked held either shared and exclusive according to
306  * @iolock, and returns with it held.  Might upgrade the iolock to exclusive
307  * if called for a direct write beyond i_size.
308  */
309 STATIC ssize_t
310 xfs_file_write_checks(
311 	struct kiocb		*iocb,
312 	struct iov_iter		*from,
313 	unsigned int		*iolock)
314 {
315 	struct file		*file = iocb->ki_filp;
316 	struct inode		*inode = file->f_mapping->host;
317 	struct xfs_inode	*ip = XFS_I(inode);
318 	ssize_t			error = 0;
319 	size_t			count = iov_iter_count(from);
320 	bool			drained_dio = false;
321 	loff_t			isize;
322 
323 restart:
324 	error = generic_write_checks(iocb, from);
325 	if (error <= 0)
326 		return error;
327 
328 	if (iocb->ki_flags & IOCB_NOWAIT) {
329 		error = break_layout(inode, false);
330 		if (error == -EWOULDBLOCK)
331 			error = -EAGAIN;
332 	} else {
333 		error = xfs_break_layouts(inode, iolock, BREAK_WRITE);
334 	}
335 
336 	if (error)
337 		return error;
338 
339 	/*
340 	 * For changing security info in file_remove_privs() we need i_rwsem
341 	 * exclusively.
342 	 */
343 	if (*iolock == XFS_IOLOCK_SHARED && !IS_NOSEC(inode)) {
344 		xfs_iunlock(ip, *iolock);
345 		*iolock = XFS_IOLOCK_EXCL;
346 		error = xfs_ilock_iocb(iocb, *iolock);
347 		if (error) {
348 			*iolock = 0;
349 			return error;
350 		}
351 		goto restart;
352 	}
353 
354 	/*
355 	 * If the offset is beyond the size of the file, we need to zero any
356 	 * blocks that fall between the existing EOF and the start of this
357 	 * write.  If zeroing is needed and we are currently holding the iolock
358 	 * shared, we need to update it to exclusive which implies having to
359 	 * redo all checks before.
360 	 *
361 	 * We need to serialise against EOF updates that occur in IO completions
362 	 * here. We want to make sure that nobody is changing the size while we
363 	 * do this check until we have placed an IO barrier (i.e.  hold the
364 	 * XFS_IOLOCK_EXCL) that prevents new IO from being dispatched.  The
365 	 * spinlock effectively forms a memory barrier once we have the
366 	 * XFS_IOLOCK_EXCL so we are guaranteed to see the latest EOF value and
367 	 * hence be able to correctly determine if we need to run zeroing.
368 	 *
369 	 * We can do an unlocked check here safely as IO completion can only
370 	 * extend EOF. Truncate is locked out at this point, so the EOF can
371 	 * not move backwards, only forwards. Hence we only need to take the
372 	 * slow path and spin locks when we are at or beyond the current EOF.
373 	 */
374 	if (iocb->ki_pos <= i_size_read(inode))
375 		goto out;
376 
377 	spin_lock(&ip->i_flags_lock);
378 	isize = i_size_read(inode);
379 	if (iocb->ki_pos > isize) {
380 		spin_unlock(&ip->i_flags_lock);
381 
382 		if (iocb->ki_flags & IOCB_NOWAIT)
383 			return -EAGAIN;
384 
385 		if (!drained_dio) {
386 			if (*iolock == XFS_IOLOCK_SHARED) {
387 				xfs_iunlock(ip, *iolock);
388 				*iolock = XFS_IOLOCK_EXCL;
389 				xfs_ilock(ip, *iolock);
390 				iov_iter_reexpand(from, count);
391 			}
392 			/*
393 			 * We now have an IO submission barrier in place, but
394 			 * AIO can do EOF updates during IO completion and hence
395 			 * we now need to wait for all of them to drain. Non-AIO
396 			 * DIO will have drained before we are given the
397 			 * XFS_IOLOCK_EXCL, and so for most cases this wait is a
398 			 * no-op.
399 			 */
400 			inode_dio_wait(inode);
401 			drained_dio = true;
402 			goto restart;
403 		}
404 
405 		trace_xfs_zero_eof(ip, isize, iocb->ki_pos - isize);
406 		error = xfs_zero_range(ip, isize, iocb->ki_pos - isize, NULL);
407 		if (error)
408 			return error;
409 	} else
410 		spin_unlock(&ip->i_flags_lock);
411 
412 out:
413 	return file_modified(file);
414 }
415 
416 static int
417 xfs_dio_write_end_io(
418 	struct kiocb		*iocb,
419 	ssize_t			size,
420 	int			error,
421 	unsigned		flags)
422 {
423 	struct inode		*inode = file_inode(iocb->ki_filp);
424 	struct xfs_inode	*ip = XFS_I(inode);
425 	loff_t			offset = iocb->ki_pos;
426 	unsigned int		nofs_flag;
427 
428 	trace_xfs_end_io_direct_write(ip, offset, size);
429 
430 	if (xfs_is_shutdown(ip->i_mount))
431 		return -EIO;
432 
433 	if (error)
434 		return error;
435 	if (!size)
436 		return 0;
437 
438 	/*
439 	 * Capture amount written on completion as we can't reliably account
440 	 * for it on submission.
441 	 */
442 	XFS_STATS_ADD(ip->i_mount, xs_write_bytes, size);
443 
444 	/*
445 	 * We can allocate memory here while doing writeback on behalf of
446 	 * memory reclaim.  To avoid memory allocation deadlocks set the
447 	 * task-wide nofs context for the following operations.
448 	 */
449 	nofs_flag = memalloc_nofs_save();
450 
451 	if (flags & IOMAP_DIO_COW) {
452 		error = xfs_reflink_end_cow(ip, offset, size);
453 		if (error)
454 			goto out;
455 	}
456 
457 	/*
458 	 * Unwritten conversion updates the in-core isize after extent
459 	 * conversion but before updating the on-disk size. Updating isize any
460 	 * earlier allows a racing dio read to find unwritten extents before
461 	 * they are converted.
462 	 */
463 	if (flags & IOMAP_DIO_UNWRITTEN) {
464 		error = xfs_iomap_write_unwritten(ip, offset, size, true);
465 		goto out;
466 	}
467 
468 	/*
469 	 * We need to update the in-core inode size here so that we don't end up
470 	 * with the on-disk inode size being outside the in-core inode size. We
471 	 * have no other method of updating EOF for AIO, so always do it here
472 	 * if necessary.
473 	 *
474 	 * We need to lock the test/set EOF update as we can be racing with
475 	 * other IO completions here to update the EOF. Failing to serialise
476 	 * here can result in EOF moving backwards and Bad Things Happen when
477 	 * that occurs.
478 	 *
479 	 * As IO completion only ever extends EOF, we can do an unlocked check
480 	 * here to avoid taking the spinlock. If we land within the current EOF,
481 	 * then we do not need to do an extending update at all, and we don't
482 	 * need to take the lock to check this. If we race with an update moving
483 	 * EOF, then we'll either still be beyond EOF and need to take the lock,
484 	 * or we'll be within EOF and we don't need to take it at all.
485 	 */
486 	if (offset + size <= i_size_read(inode))
487 		goto out;
488 
489 	spin_lock(&ip->i_flags_lock);
490 	if (offset + size > i_size_read(inode)) {
491 		i_size_write(inode, offset + size);
492 		spin_unlock(&ip->i_flags_lock);
493 		error = xfs_setfilesize(ip, offset, size);
494 	} else {
495 		spin_unlock(&ip->i_flags_lock);
496 	}
497 
498 out:
499 	memalloc_nofs_restore(nofs_flag);
500 	return error;
501 }
502 
503 static const struct iomap_dio_ops xfs_dio_write_ops = {
504 	.end_io		= xfs_dio_write_end_io,
505 };
506 
507 /*
508  * Handle block aligned direct I/O writes
509  */
510 static noinline ssize_t
511 xfs_file_dio_write_aligned(
512 	struct xfs_inode	*ip,
513 	struct kiocb		*iocb,
514 	struct iov_iter		*from)
515 {
516 	unsigned int		iolock = XFS_IOLOCK_SHARED;
517 	ssize_t			ret;
518 
519 	ret = xfs_ilock_iocb(iocb, iolock);
520 	if (ret)
521 		return ret;
522 	ret = xfs_file_write_checks(iocb, from, &iolock);
523 	if (ret)
524 		goto out_unlock;
525 
526 	/*
527 	 * We don't need to hold the IOLOCK exclusively across the IO, so demote
528 	 * the iolock back to shared if we had to take the exclusive lock in
529 	 * xfs_file_write_checks() for other reasons.
530 	 */
531 	if (iolock == XFS_IOLOCK_EXCL) {
532 		xfs_ilock_demote(ip, XFS_IOLOCK_EXCL);
533 		iolock = XFS_IOLOCK_SHARED;
534 	}
535 	trace_xfs_file_direct_write(iocb, from);
536 	ret = iomap_dio_rw(iocb, from, &xfs_direct_write_iomap_ops,
537 			   &xfs_dio_write_ops, 0, NULL, 0);
538 out_unlock:
539 	if (iolock)
540 		xfs_iunlock(ip, iolock);
541 	return ret;
542 }
543 
544 /*
545  * Handle block unaligned direct I/O writes
546  *
547  * In most cases direct I/O writes will be done holding IOLOCK_SHARED, allowing
548  * them to be done in parallel with reads and other direct I/O writes.  However,
549  * if the I/O is not aligned to filesystem blocks, the direct I/O layer may need
550  * to do sub-block zeroing and that requires serialisation against other direct
551  * I/O to the same block.  In this case we need to serialise the submission of
552  * the unaligned I/O so that we don't get racing block zeroing in the dio layer.
553  * In the case where sub-block zeroing is not required, we can do concurrent
554  * sub-block dios to the same block successfully.
555  *
556  * Optimistically submit the I/O using the shared lock first, but use the
557  * IOMAP_DIO_OVERWRITE_ONLY flag to tell the lower layers to return -EAGAIN
558  * if block allocation or partial block zeroing would be required.  In that case
559  * we try again with the exclusive lock.
560  */
561 static noinline ssize_t
562 xfs_file_dio_write_unaligned(
563 	struct xfs_inode	*ip,
564 	struct kiocb		*iocb,
565 	struct iov_iter		*from)
566 {
567 	size_t			isize = i_size_read(VFS_I(ip));
568 	size_t			count = iov_iter_count(from);
569 	unsigned int		iolock = XFS_IOLOCK_SHARED;
570 	unsigned int		flags = IOMAP_DIO_OVERWRITE_ONLY;
571 	ssize_t			ret;
572 
573 	/*
574 	 * Extending writes need exclusivity because of the sub-block zeroing
575 	 * that the DIO code always does for partial tail blocks beyond EOF, so
576 	 * don't even bother trying the fast path in this case.
577 	 */
578 	if (iocb->ki_pos > isize || iocb->ki_pos + count >= isize) {
579 		if (iocb->ki_flags & IOCB_NOWAIT)
580 			return -EAGAIN;
581 retry_exclusive:
582 		iolock = XFS_IOLOCK_EXCL;
583 		flags = IOMAP_DIO_FORCE_WAIT;
584 	}
585 
586 	ret = xfs_ilock_iocb(iocb, iolock);
587 	if (ret)
588 		return ret;
589 
590 	/*
591 	 * We can't properly handle unaligned direct I/O to reflink files yet,
592 	 * as we can't unshare a partial block.
593 	 */
594 	if (xfs_is_cow_inode(ip)) {
595 		trace_xfs_reflink_bounce_dio_write(iocb, from);
596 		ret = -ENOTBLK;
597 		goto out_unlock;
598 	}
599 
600 	ret = xfs_file_write_checks(iocb, from, &iolock);
601 	if (ret)
602 		goto out_unlock;
603 
604 	/*
605 	 * If we are doing exclusive unaligned I/O, this must be the only I/O
606 	 * in-flight.  Otherwise we risk data corruption due to unwritten extent
607 	 * conversions from the AIO end_io handler.  Wait for all other I/O to
608 	 * drain first.
609 	 */
610 	if (flags & IOMAP_DIO_FORCE_WAIT)
611 		inode_dio_wait(VFS_I(ip));
612 
613 	trace_xfs_file_direct_write(iocb, from);
614 	ret = iomap_dio_rw(iocb, from, &xfs_direct_write_iomap_ops,
615 			   &xfs_dio_write_ops, flags, NULL, 0);
616 
617 	/*
618 	 * Retry unaligned I/O with exclusive blocking semantics if the DIO
619 	 * layer rejected it for mapping or locking reasons. If we are doing
620 	 * nonblocking user I/O, propagate the error.
621 	 */
622 	if (ret == -EAGAIN && !(iocb->ki_flags & IOCB_NOWAIT)) {
623 		ASSERT(flags & IOMAP_DIO_OVERWRITE_ONLY);
624 		xfs_iunlock(ip, iolock);
625 		goto retry_exclusive;
626 	}
627 
628 out_unlock:
629 	if (iolock)
630 		xfs_iunlock(ip, iolock);
631 	return ret;
632 }
633 
634 static ssize_t
635 xfs_file_dio_write(
636 	struct kiocb		*iocb,
637 	struct iov_iter		*from)
638 {
639 	struct xfs_inode	*ip = XFS_I(file_inode(iocb->ki_filp));
640 	struct xfs_buftarg      *target = xfs_inode_buftarg(ip);
641 	size_t			count = iov_iter_count(from);
642 
643 	/* direct I/O must be aligned to device logical sector size */
644 	if ((iocb->ki_pos | count) & target->bt_logical_sectormask)
645 		return -EINVAL;
646 	if ((iocb->ki_pos | count) & ip->i_mount->m_blockmask)
647 		return xfs_file_dio_write_unaligned(ip, iocb, from);
648 	return xfs_file_dio_write_aligned(ip, iocb, from);
649 }
650 
651 static noinline ssize_t
652 xfs_file_dax_write(
653 	struct kiocb		*iocb,
654 	struct iov_iter		*from)
655 {
656 	struct inode		*inode = iocb->ki_filp->f_mapping->host;
657 	struct xfs_inode	*ip = XFS_I(inode);
658 	unsigned int		iolock = XFS_IOLOCK_EXCL;
659 	ssize_t			ret, error = 0;
660 	loff_t			pos;
661 
662 	ret = xfs_ilock_iocb(iocb, iolock);
663 	if (ret)
664 		return ret;
665 	ret = xfs_file_write_checks(iocb, from, &iolock);
666 	if (ret)
667 		goto out;
668 
669 	pos = iocb->ki_pos;
670 
671 	trace_xfs_file_dax_write(iocb, from);
672 	ret = dax_iomap_rw(iocb, from, &xfs_direct_write_iomap_ops);
673 	if (ret > 0 && iocb->ki_pos > i_size_read(inode)) {
674 		i_size_write(inode, iocb->ki_pos);
675 		error = xfs_setfilesize(ip, pos, ret);
676 	}
677 out:
678 	if (iolock)
679 		xfs_iunlock(ip, iolock);
680 	if (error)
681 		return error;
682 
683 	if (ret > 0) {
684 		XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret);
685 
686 		/* Handle various SYNC-type writes */
687 		ret = generic_write_sync(iocb, ret);
688 	}
689 	return ret;
690 }
691 
692 STATIC ssize_t
693 xfs_file_buffered_write(
694 	struct kiocb		*iocb,
695 	struct iov_iter		*from)
696 {
697 	struct inode		*inode = iocb->ki_filp->f_mapping->host;
698 	struct xfs_inode	*ip = XFS_I(inode);
699 	ssize_t			ret;
700 	bool			cleared_space = false;
701 	unsigned int		iolock;
702 
703 	if (iocb->ki_flags & IOCB_NOWAIT)
704 		return -EOPNOTSUPP;
705 
706 write_retry:
707 	iolock = XFS_IOLOCK_EXCL;
708 	xfs_ilock(ip, iolock);
709 
710 	ret = xfs_file_write_checks(iocb, from, &iolock);
711 	if (ret)
712 		goto out;
713 
714 	/* We can write back this queue in page reclaim */
715 	current->backing_dev_info = inode_to_bdi(inode);
716 
717 	trace_xfs_file_buffered_write(iocb, from);
718 	ret = iomap_file_buffered_write(iocb, from,
719 			&xfs_buffered_write_iomap_ops);
720 	if (likely(ret >= 0))
721 		iocb->ki_pos += ret;
722 
723 	/*
724 	 * If we hit a space limit, try to free up some lingering preallocated
725 	 * space before returning an error. In the case of ENOSPC, first try to
726 	 * write back all dirty inodes to free up some of the excess reserved
727 	 * metadata space. This reduces the chances that the eofblocks scan
728 	 * waits on dirty mappings. Since xfs_flush_inodes() is serialized, this
729 	 * also behaves as a filter to prevent too many eofblocks scans from
730 	 * running at the same time.  Use a synchronous scan to increase the
731 	 * effectiveness of the scan.
732 	 */
733 	if (ret == -EDQUOT && !cleared_space) {
734 		xfs_iunlock(ip, iolock);
735 		xfs_blockgc_free_quota(ip, XFS_ICWALK_FLAG_SYNC);
736 		cleared_space = true;
737 		goto write_retry;
738 	} else if (ret == -ENOSPC && !cleared_space) {
739 		struct xfs_icwalk	icw = {0};
740 
741 		cleared_space = true;
742 		xfs_flush_inodes(ip->i_mount);
743 
744 		xfs_iunlock(ip, iolock);
745 		icw.icw_flags = XFS_ICWALK_FLAG_SYNC;
746 		xfs_blockgc_free_space(ip->i_mount, &icw);
747 		goto write_retry;
748 	}
749 
750 	current->backing_dev_info = NULL;
751 out:
752 	if (iolock)
753 		xfs_iunlock(ip, iolock);
754 
755 	if (ret > 0) {
756 		XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret);
757 		/* Handle various SYNC-type writes */
758 		ret = generic_write_sync(iocb, ret);
759 	}
760 	return ret;
761 }
762 
763 STATIC ssize_t
764 xfs_file_write_iter(
765 	struct kiocb		*iocb,
766 	struct iov_iter		*from)
767 {
768 	struct inode		*inode = iocb->ki_filp->f_mapping->host;
769 	struct xfs_inode	*ip = XFS_I(inode);
770 	ssize_t			ret;
771 	size_t			ocount = iov_iter_count(from);
772 
773 	XFS_STATS_INC(ip->i_mount, xs_write_calls);
774 
775 	if (ocount == 0)
776 		return 0;
777 
778 	if (xfs_is_shutdown(ip->i_mount))
779 		return -EIO;
780 
781 	if (IS_DAX(inode))
782 		return xfs_file_dax_write(iocb, from);
783 
784 	if (iocb->ki_flags & IOCB_DIRECT) {
785 		/*
786 		 * Allow a directio write to fall back to a buffered
787 		 * write *only* in the case that we're doing a reflink
788 		 * CoW.  In all other directio scenarios we do not
789 		 * allow an operation to fall back to buffered mode.
790 		 */
791 		ret = xfs_file_dio_write(iocb, from);
792 		if (ret != -ENOTBLK)
793 			return ret;
794 	}
795 
796 	return xfs_file_buffered_write(iocb, from);
797 }
798 
799 static void
800 xfs_wait_dax_page(
801 	struct inode		*inode)
802 {
803 	struct xfs_inode        *ip = XFS_I(inode);
804 
805 	xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
806 	schedule();
807 	xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
808 }
809 
810 static int
811 xfs_break_dax_layouts(
812 	struct inode		*inode,
813 	bool			*retry)
814 {
815 	struct page		*page;
816 
817 	ASSERT(xfs_isilocked(XFS_I(inode), XFS_MMAPLOCK_EXCL));
818 
819 	page = dax_layout_busy_page(inode->i_mapping);
820 	if (!page)
821 		return 0;
822 
823 	*retry = true;
824 	return ___wait_var_event(&page->_refcount,
825 			atomic_read(&page->_refcount) == 1, TASK_INTERRUPTIBLE,
826 			0, 0, xfs_wait_dax_page(inode));
827 }
828 
829 int
830 xfs_break_layouts(
831 	struct inode		*inode,
832 	uint			*iolock,
833 	enum layout_break_reason reason)
834 {
835 	bool			retry;
836 	int			error;
837 
838 	ASSERT(xfs_isilocked(XFS_I(inode), XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL));
839 
840 	do {
841 		retry = false;
842 		switch (reason) {
843 		case BREAK_UNMAP:
844 			error = xfs_break_dax_layouts(inode, &retry);
845 			if (error || retry)
846 				break;
847 			fallthrough;
848 		case BREAK_WRITE:
849 			error = xfs_break_leased_layouts(inode, iolock, &retry);
850 			break;
851 		default:
852 			WARN_ON_ONCE(1);
853 			error = -EINVAL;
854 		}
855 	} while (error == 0 && retry);
856 
857 	return error;
858 }
859 
860 /* Does this file, inode, or mount want synchronous writes? */
861 static inline bool xfs_file_sync_writes(struct file *filp)
862 {
863 	struct xfs_inode	*ip = XFS_I(file_inode(filp));
864 
865 	if (xfs_has_wsync(ip->i_mount))
866 		return true;
867 	if (filp->f_flags & (__O_SYNC | O_DSYNC))
868 		return true;
869 	if (IS_SYNC(file_inode(filp)))
870 		return true;
871 
872 	return false;
873 }
874 
875 #define	XFS_FALLOC_FL_SUPPORTED						\
876 		(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |		\
877 		 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |	\
878 		 FALLOC_FL_INSERT_RANGE | FALLOC_FL_UNSHARE_RANGE)
879 
880 STATIC long
881 xfs_file_fallocate(
882 	struct file		*file,
883 	int			mode,
884 	loff_t			offset,
885 	loff_t			len)
886 {
887 	struct inode		*inode = file_inode(file);
888 	struct xfs_inode	*ip = XFS_I(inode);
889 	long			error;
890 	uint			iolock = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
891 	loff_t			new_size = 0;
892 	bool			do_file_insert = false;
893 
894 	if (!S_ISREG(inode->i_mode))
895 		return -EINVAL;
896 	if (mode & ~XFS_FALLOC_FL_SUPPORTED)
897 		return -EOPNOTSUPP;
898 
899 	xfs_ilock(ip, iolock);
900 	error = xfs_break_layouts(inode, &iolock, BREAK_UNMAP);
901 	if (error)
902 		goto out_unlock;
903 
904 	/*
905 	 * Must wait for all AIO to complete before we continue as AIO can
906 	 * change the file size on completion without holding any locks we
907 	 * currently hold. We must do this first because AIO can update both
908 	 * the on disk and in memory inode sizes, and the operations that follow
909 	 * require the in-memory size to be fully up-to-date.
910 	 */
911 	inode_dio_wait(inode);
912 
913 	/*
914 	 * Now AIO and DIO has drained we flush and (if necessary) invalidate
915 	 * the cached range over the first operation we are about to run.
916 	 *
917 	 * We care about zero and collapse here because they both run a hole
918 	 * punch over the range first. Because that can zero data, and the range
919 	 * of invalidation for the shift operations is much larger, we still do
920 	 * the required flush for collapse in xfs_prepare_shift().
921 	 *
922 	 * Insert has the same range requirements as collapse, and we extend the
923 	 * file first which can zero data. Hence insert has the same
924 	 * flush/invalidate requirements as collapse and so they are both
925 	 * handled at the right time by xfs_prepare_shift().
926 	 */
927 	if (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE |
928 		    FALLOC_FL_COLLAPSE_RANGE)) {
929 		error = xfs_flush_unmap_range(ip, offset, len);
930 		if (error)
931 			goto out_unlock;
932 	}
933 
934 	error = file_modified(file);
935 	if (error)
936 		goto out_unlock;
937 
938 	if (mode & FALLOC_FL_PUNCH_HOLE) {
939 		error = xfs_free_file_space(ip, offset, len);
940 		if (error)
941 			goto out_unlock;
942 	} else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
943 		if (!xfs_is_falloc_aligned(ip, offset, len)) {
944 			error = -EINVAL;
945 			goto out_unlock;
946 		}
947 
948 		/*
949 		 * There is no need to overlap collapse range with EOF,
950 		 * in which case it is effectively a truncate operation
951 		 */
952 		if (offset + len >= i_size_read(inode)) {
953 			error = -EINVAL;
954 			goto out_unlock;
955 		}
956 
957 		new_size = i_size_read(inode) - len;
958 
959 		error = xfs_collapse_file_space(ip, offset, len);
960 		if (error)
961 			goto out_unlock;
962 	} else if (mode & FALLOC_FL_INSERT_RANGE) {
963 		loff_t		isize = i_size_read(inode);
964 
965 		if (!xfs_is_falloc_aligned(ip, offset, len)) {
966 			error = -EINVAL;
967 			goto out_unlock;
968 		}
969 
970 		/*
971 		 * New inode size must not exceed ->s_maxbytes, accounting for
972 		 * possible signed overflow.
973 		 */
974 		if (inode->i_sb->s_maxbytes - isize < len) {
975 			error = -EFBIG;
976 			goto out_unlock;
977 		}
978 		new_size = isize + len;
979 
980 		/* Offset should be less than i_size */
981 		if (offset >= isize) {
982 			error = -EINVAL;
983 			goto out_unlock;
984 		}
985 		do_file_insert = true;
986 	} else {
987 		if (!(mode & FALLOC_FL_KEEP_SIZE) &&
988 		    offset + len > i_size_read(inode)) {
989 			new_size = offset + len;
990 			error = inode_newsize_ok(inode, new_size);
991 			if (error)
992 				goto out_unlock;
993 		}
994 
995 		if (mode & FALLOC_FL_ZERO_RANGE) {
996 			/*
997 			 * Punch a hole and prealloc the range.  We use a hole
998 			 * punch rather than unwritten extent conversion for two
999 			 * reasons:
1000 			 *
1001 			 *   1.) Hole punch handles partial block zeroing for us.
1002 			 *   2.) If prealloc returns ENOSPC, the file range is
1003 			 *       still zero-valued by virtue of the hole punch.
1004 			 */
1005 			unsigned int blksize = i_blocksize(inode);
1006 
1007 			trace_xfs_zero_file_space(ip);
1008 
1009 			error = xfs_free_file_space(ip, offset, len);
1010 			if (error)
1011 				goto out_unlock;
1012 
1013 			len = round_up(offset + len, blksize) -
1014 			      round_down(offset, blksize);
1015 			offset = round_down(offset, blksize);
1016 		} else if (mode & FALLOC_FL_UNSHARE_RANGE) {
1017 			error = xfs_reflink_unshare(ip, offset, len);
1018 			if (error)
1019 				goto out_unlock;
1020 		} else {
1021 			/*
1022 			 * If always_cow mode we can't use preallocations and
1023 			 * thus should not create them.
1024 			 */
1025 			if (xfs_is_always_cow_inode(ip)) {
1026 				error = -EOPNOTSUPP;
1027 				goto out_unlock;
1028 			}
1029 		}
1030 
1031 		if (!xfs_is_always_cow_inode(ip)) {
1032 			error = xfs_alloc_file_space(ip, offset, len);
1033 			if (error)
1034 				goto out_unlock;
1035 		}
1036 	}
1037 
1038 	/* Change file size if needed */
1039 	if (new_size) {
1040 		struct iattr iattr;
1041 
1042 		iattr.ia_valid = ATTR_SIZE;
1043 		iattr.ia_size = new_size;
1044 		error = xfs_vn_setattr_size(file_mnt_user_ns(file),
1045 					    file_dentry(file), &iattr);
1046 		if (error)
1047 			goto out_unlock;
1048 	}
1049 
1050 	/*
1051 	 * Perform hole insertion now that the file size has been
1052 	 * updated so that if we crash during the operation we don't
1053 	 * leave shifted extents past EOF and hence losing access to
1054 	 * the data that is contained within them.
1055 	 */
1056 	if (do_file_insert) {
1057 		error = xfs_insert_file_space(ip, offset, len);
1058 		if (error)
1059 			goto out_unlock;
1060 	}
1061 
1062 	if (xfs_file_sync_writes(file))
1063 		error = xfs_log_force_inode(ip);
1064 
1065 out_unlock:
1066 	xfs_iunlock(ip, iolock);
1067 	return error;
1068 }
1069 
1070 STATIC int
1071 xfs_file_fadvise(
1072 	struct file	*file,
1073 	loff_t		start,
1074 	loff_t		end,
1075 	int		advice)
1076 {
1077 	struct xfs_inode *ip = XFS_I(file_inode(file));
1078 	int ret;
1079 	int lockflags = 0;
1080 
1081 	/*
1082 	 * Operations creating pages in page cache need protection from hole
1083 	 * punching and similar ops
1084 	 */
1085 	if (advice == POSIX_FADV_WILLNEED) {
1086 		lockflags = XFS_IOLOCK_SHARED;
1087 		xfs_ilock(ip, lockflags);
1088 	}
1089 	ret = generic_fadvise(file, start, end, advice);
1090 	if (lockflags)
1091 		xfs_iunlock(ip, lockflags);
1092 	return ret;
1093 }
1094 
1095 STATIC loff_t
1096 xfs_file_remap_range(
1097 	struct file		*file_in,
1098 	loff_t			pos_in,
1099 	struct file		*file_out,
1100 	loff_t			pos_out,
1101 	loff_t			len,
1102 	unsigned int		remap_flags)
1103 {
1104 	struct inode		*inode_in = file_inode(file_in);
1105 	struct xfs_inode	*src = XFS_I(inode_in);
1106 	struct inode		*inode_out = file_inode(file_out);
1107 	struct xfs_inode	*dest = XFS_I(inode_out);
1108 	struct xfs_mount	*mp = src->i_mount;
1109 	loff_t			remapped = 0;
1110 	xfs_extlen_t		cowextsize;
1111 	int			ret;
1112 
1113 	if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
1114 		return -EINVAL;
1115 
1116 	if (!xfs_has_reflink(mp))
1117 		return -EOPNOTSUPP;
1118 
1119 	if (xfs_is_shutdown(mp))
1120 		return -EIO;
1121 
1122 	/* Prepare and then clone file data. */
1123 	ret = xfs_reflink_remap_prep(file_in, pos_in, file_out, pos_out,
1124 			&len, remap_flags);
1125 	if (ret || len == 0)
1126 		return ret;
1127 
1128 	trace_xfs_reflink_remap_range(src, pos_in, len, dest, pos_out);
1129 
1130 	ret = xfs_reflink_remap_blocks(src, pos_in, dest, pos_out, len,
1131 			&remapped);
1132 	if (ret)
1133 		goto out_unlock;
1134 
1135 	/*
1136 	 * Carry the cowextsize hint from src to dest if we're sharing the
1137 	 * entire source file to the entire destination file, the source file
1138 	 * has a cowextsize hint, and the destination file does not.
1139 	 */
1140 	cowextsize = 0;
1141 	if (pos_in == 0 && len == i_size_read(inode_in) &&
1142 	    (src->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE) &&
1143 	    pos_out == 0 && len >= i_size_read(inode_out) &&
1144 	    !(dest->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE))
1145 		cowextsize = src->i_cowextsize;
1146 
1147 	ret = xfs_reflink_update_dest(dest, pos_out + len, cowextsize,
1148 			remap_flags);
1149 	if (ret)
1150 		goto out_unlock;
1151 
1152 	if (xfs_file_sync_writes(file_in) || xfs_file_sync_writes(file_out))
1153 		xfs_log_force_inode(dest);
1154 out_unlock:
1155 	xfs_iunlock2_io_mmap(src, dest);
1156 	if (ret)
1157 		trace_xfs_reflink_remap_range_error(dest, ret, _RET_IP_);
1158 	return remapped > 0 ? remapped : ret;
1159 }
1160 
1161 STATIC int
1162 xfs_file_open(
1163 	struct inode	*inode,
1164 	struct file	*file)
1165 {
1166 	if (xfs_is_shutdown(XFS_M(inode->i_sb)))
1167 		return -EIO;
1168 	file->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC;
1169 	return generic_file_open(inode, file);
1170 }
1171 
1172 STATIC int
1173 xfs_dir_open(
1174 	struct inode	*inode,
1175 	struct file	*file)
1176 {
1177 	struct xfs_inode *ip = XFS_I(inode);
1178 	unsigned int	mode;
1179 	int		error;
1180 
1181 	error = xfs_file_open(inode, file);
1182 	if (error)
1183 		return error;
1184 
1185 	/*
1186 	 * If there are any blocks, read-ahead block 0 as we're almost
1187 	 * certain to have the next operation be a read there.
1188 	 */
1189 	mode = xfs_ilock_data_map_shared(ip);
1190 	if (ip->i_df.if_nextents > 0)
1191 		error = xfs_dir3_data_readahead(ip, 0, 0);
1192 	xfs_iunlock(ip, mode);
1193 	return error;
1194 }
1195 
1196 STATIC int
1197 xfs_file_release(
1198 	struct inode	*inode,
1199 	struct file	*filp)
1200 {
1201 	return xfs_release(XFS_I(inode));
1202 }
1203 
1204 STATIC int
1205 xfs_file_readdir(
1206 	struct file	*file,
1207 	struct dir_context *ctx)
1208 {
1209 	struct inode	*inode = file_inode(file);
1210 	xfs_inode_t	*ip = XFS_I(inode);
1211 	size_t		bufsize;
1212 
1213 	/*
1214 	 * The Linux API doesn't pass down the total size of the buffer
1215 	 * we read into down to the filesystem.  With the filldir concept
1216 	 * it's not needed for correct information, but the XFS dir2 leaf
1217 	 * code wants an estimate of the buffer size to calculate it's
1218 	 * readahead window and size the buffers used for mapping to
1219 	 * physical blocks.
1220 	 *
1221 	 * Try to give it an estimate that's good enough, maybe at some
1222 	 * point we can change the ->readdir prototype to include the
1223 	 * buffer size.  For now we use the current glibc buffer size.
1224 	 */
1225 	bufsize = (size_t)min_t(loff_t, XFS_READDIR_BUFSIZE, ip->i_disk_size);
1226 
1227 	return xfs_readdir(NULL, ip, ctx, bufsize);
1228 }
1229 
1230 STATIC loff_t
1231 xfs_file_llseek(
1232 	struct file	*file,
1233 	loff_t		offset,
1234 	int		whence)
1235 {
1236 	struct inode		*inode = file->f_mapping->host;
1237 
1238 	if (xfs_is_shutdown(XFS_I(inode)->i_mount))
1239 		return -EIO;
1240 
1241 	switch (whence) {
1242 	default:
1243 		return generic_file_llseek(file, offset, whence);
1244 	case SEEK_HOLE:
1245 		offset = iomap_seek_hole(inode, offset, &xfs_seek_iomap_ops);
1246 		break;
1247 	case SEEK_DATA:
1248 		offset = iomap_seek_data(inode, offset, &xfs_seek_iomap_ops);
1249 		break;
1250 	}
1251 
1252 	if (offset < 0)
1253 		return offset;
1254 	return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
1255 }
1256 
1257 /*
1258  * Locking for serialisation of IO during page faults. This results in a lock
1259  * ordering of:
1260  *
1261  * mmap_lock (MM)
1262  *   sb_start_pagefault(vfs, freeze)
1263  *     invalidate_lock (vfs/XFS_MMAPLOCK - truncate serialisation)
1264  *       page_lock (MM)
1265  *         i_lock (XFS - extent map serialisation)
1266  */
1267 static vm_fault_t
1268 __xfs_filemap_fault(
1269 	struct vm_fault		*vmf,
1270 	enum page_entry_size	pe_size,
1271 	bool			write_fault)
1272 {
1273 	struct inode		*inode = file_inode(vmf->vma->vm_file);
1274 	struct xfs_inode	*ip = XFS_I(inode);
1275 	vm_fault_t		ret;
1276 
1277 	trace_xfs_filemap_fault(ip, pe_size, write_fault);
1278 
1279 	if (write_fault) {
1280 		sb_start_pagefault(inode->i_sb);
1281 		file_update_time(vmf->vma->vm_file);
1282 	}
1283 
1284 	if (IS_DAX(inode)) {
1285 		pfn_t pfn;
1286 
1287 		xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1288 		ret = dax_iomap_fault(vmf, pe_size, &pfn, NULL,
1289 				(write_fault && !vmf->cow_page) ?
1290 				 &xfs_direct_write_iomap_ops :
1291 				 &xfs_read_iomap_ops);
1292 		if (ret & VM_FAULT_NEEDDSYNC)
1293 			ret = dax_finish_sync_fault(vmf, pe_size, pfn);
1294 		xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1295 	} else {
1296 		if (write_fault) {
1297 			xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1298 			ret = iomap_page_mkwrite(vmf,
1299 					&xfs_buffered_write_iomap_ops);
1300 			xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1301 		} else {
1302 			ret = filemap_fault(vmf);
1303 		}
1304 	}
1305 
1306 	if (write_fault)
1307 		sb_end_pagefault(inode->i_sb);
1308 	return ret;
1309 }
1310 
1311 static inline bool
1312 xfs_is_write_fault(
1313 	struct vm_fault		*vmf)
1314 {
1315 	return (vmf->flags & FAULT_FLAG_WRITE) &&
1316 	       (vmf->vma->vm_flags & VM_SHARED);
1317 }
1318 
1319 static vm_fault_t
1320 xfs_filemap_fault(
1321 	struct vm_fault		*vmf)
1322 {
1323 	/* DAX can shortcut the normal fault path on write faults! */
1324 	return __xfs_filemap_fault(vmf, PE_SIZE_PTE,
1325 			IS_DAX(file_inode(vmf->vma->vm_file)) &&
1326 			xfs_is_write_fault(vmf));
1327 }
1328 
1329 static vm_fault_t
1330 xfs_filemap_huge_fault(
1331 	struct vm_fault		*vmf,
1332 	enum page_entry_size	pe_size)
1333 {
1334 	if (!IS_DAX(file_inode(vmf->vma->vm_file)))
1335 		return VM_FAULT_FALLBACK;
1336 
1337 	/* DAX can shortcut the normal fault path on write faults! */
1338 	return __xfs_filemap_fault(vmf, pe_size,
1339 			xfs_is_write_fault(vmf));
1340 }
1341 
1342 static vm_fault_t
1343 xfs_filemap_page_mkwrite(
1344 	struct vm_fault		*vmf)
1345 {
1346 	return __xfs_filemap_fault(vmf, PE_SIZE_PTE, true);
1347 }
1348 
1349 /*
1350  * pfn_mkwrite was originally intended to ensure we capture time stamp updates
1351  * on write faults. In reality, it needs to serialise against truncate and
1352  * prepare memory for writing so handle is as standard write fault.
1353  */
1354 static vm_fault_t
1355 xfs_filemap_pfn_mkwrite(
1356 	struct vm_fault		*vmf)
1357 {
1358 
1359 	return __xfs_filemap_fault(vmf, PE_SIZE_PTE, true);
1360 }
1361 
1362 static vm_fault_t
1363 xfs_filemap_map_pages(
1364 	struct vm_fault		*vmf,
1365 	pgoff_t			start_pgoff,
1366 	pgoff_t			end_pgoff)
1367 {
1368 	struct inode		*inode = file_inode(vmf->vma->vm_file);
1369 	vm_fault_t ret;
1370 
1371 	xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1372 	ret = filemap_map_pages(vmf, start_pgoff, end_pgoff);
1373 	xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1374 	return ret;
1375 }
1376 
1377 static const struct vm_operations_struct xfs_file_vm_ops = {
1378 	.fault		= xfs_filemap_fault,
1379 	.huge_fault	= xfs_filemap_huge_fault,
1380 	.map_pages	= xfs_filemap_map_pages,
1381 	.page_mkwrite	= xfs_filemap_page_mkwrite,
1382 	.pfn_mkwrite	= xfs_filemap_pfn_mkwrite,
1383 };
1384 
1385 STATIC int
1386 xfs_file_mmap(
1387 	struct file		*file,
1388 	struct vm_area_struct	*vma)
1389 {
1390 	struct inode		*inode = file_inode(file);
1391 	struct xfs_buftarg	*target = xfs_inode_buftarg(XFS_I(inode));
1392 
1393 	/*
1394 	 * We don't support synchronous mappings for non-DAX files and
1395 	 * for DAX files if underneath dax_device is not synchronous.
1396 	 */
1397 	if (!daxdev_mapping_supported(vma, target->bt_daxdev))
1398 		return -EOPNOTSUPP;
1399 
1400 	file_accessed(file);
1401 	vma->vm_ops = &xfs_file_vm_ops;
1402 	if (IS_DAX(inode))
1403 		vma->vm_flags |= VM_HUGEPAGE;
1404 	return 0;
1405 }
1406 
1407 const struct file_operations xfs_file_operations = {
1408 	.llseek		= xfs_file_llseek,
1409 	.read_iter	= xfs_file_read_iter,
1410 	.write_iter	= xfs_file_write_iter,
1411 	.splice_read	= generic_file_splice_read,
1412 	.splice_write	= iter_file_splice_write,
1413 	.iopoll		= iocb_bio_iopoll,
1414 	.unlocked_ioctl	= xfs_file_ioctl,
1415 #ifdef CONFIG_COMPAT
1416 	.compat_ioctl	= xfs_file_compat_ioctl,
1417 #endif
1418 	.mmap		= xfs_file_mmap,
1419 	.mmap_supported_flags = MAP_SYNC,
1420 	.open		= xfs_file_open,
1421 	.release	= xfs_file_release,
1422 	.fsync		= xfs_file_fsync,
1423 	.get_unmapped_area = thp_get_unmapped_area,
1424 	.fallocate	= xfs_file_fallocate,
1425 	.fadvise	= xfs_file_fadvise,
1426 	.remap_file_range = xfs_file_remap_range,
1427 };
1428 
1429 const struct file_operations xfs_dir_file_operations = {
1430 	.open		= xfs_dir_open,
1431 	.read		= generic_read_dir,
1432 	.iterate_shared	= xfs_file_readdir,
1433 	.llseek		= generic_file_llseek,
1434 	.unlocked_ioctl	= xfs_file_ioctl,
1435 #ifdef CONFIG_COMPAT
1436 	.compat_ioctl	= xfs_file_compat_ioctl,
1437 #endif
1438 	.fsync		= xfs_dir_fsync,
1439 };
1440