xref: /openbmc/linux/fs/xfs/xfs_file.c (revision 3b23dc52)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4  * All Rights Reserved.
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_da_format.h"
14 #include "xfs_da_btree.h"
15 #include "xfs_inode.h"
16 #include "xfs_trans.h"
17 #include "xfs_inode_item.h"
18 #include "xfs_bmap.h"
19 #include "xfs_bmap_util.h"
20 #include "xfs_error.h"
21 #include "xfs_dir2.h"
22 #include "xfs_dir2_priv.h"
23 #include "xfs_ioctl.h"
24 #include "xfs_trace.h"
25 #include "xfs_log.h"
26 #include "xfs_icache.h"
27 #include "xfs_pnfs.h"
28 #include "xfs_iomap.h"
29 #include "xfs_reflink.h"
30 
31 #include <linux/dcache.h>
32 #include <linux/falloc.h>
33 #include <linux/pagevec.h>
34 #include <linux/backing-dev.h>
35 #include <linux/mman.h>
36 
37 static const struct vm_operations_struct xfs_file_vm_ops;
38 
39 int
40 xfs_update_prealloc_flags(
41 	struct xfs_inode	*ip,
42 	enum xfs_prealloc_flags	flags)
43 {
44 	struct xfs_trans	*tp;
45 	int			error;
46 
47 	error = xfs_trans_alloc(ip->i_mount, &M_RES(ip->i_mount)->tr_writeid,
48 			0, 0, 0, &tp);
49 	if (error)
50 		return error;
51 
52 	xfs_ilock(ip, XFS_ILOCK_EXCL);
53 	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
54 
55 	if (!(flags & XFS_PREALLOC_INVISIBLE)) {
56 		VFS_I(ip)->i_mode &= ~S_ISUID;
57 		if (VFS_I(ip)->i_mode & S_IXGRP)
58 			VFS_I(ip)->i_mode &= ~S_ISGID;
59 		xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
60 	}
61 
62 	if (flags & XFS_PREALLOC_SET)
63 		ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC;
64 	if (flags & XFS_PREALLOC_CLEAR)
65 		ip->i_d.di_flags &= ~XFS_DIFLAG_PREALLOC;
66 
67 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
68 	if (flags & XFS_PREALLOC_SYNC)
69 		xfs_trans_set_sync(tp);
70 	return xfs_trans_commit(tp);
71 }
72 
73 /*
74  * Fsync operations on directories are much simpler than on regular files,
75  * as there is no file data to flush, and thus also no need for explicit
76  * cache flush operations, and there are no non-transaction metadata updates
77  * on directories either.
78  */
79 STATIC int
80 xfs_dir_fsync(
81 	struct file		*file,
82 	loff_t			start,
83 	loff_t			end,
84 	int			datasync)
85 {
86 	struct xfs_inode	*ip = XFS_I(file->f_mapping->host);
87 	struct xfs_mount	*mp = ip->i_mount;
88 	xfs_lsn_t		lsn = 0;
89 
90 	trace_xfs_dir_fsync(ip);
91 
92 	xfs_ilock(ip, XFS_ILOCK_SHARED);
93 	if (xfs_ipincount(ip))
94 		lsn = ip->i_itemp->ili_last_lsn;
95 	xfs_iunlock(ip, XFS_ILOCK_SHARED);
96 
97 	if (!lsn)
98 		return 0;
99 	return xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL);
100 }
101 
102 STATIC int
103 xfs_file_fsync(
104 	struct file		*file,
105 	loff_t			start,
106 	loff_t			end,
107 	int			datasync)
108 {
109 	struct inode		*inode = file->f_mapping->host;
110 	struct xfs_inode	*ip = XFS_I(inode);
111 	struct xfs_mount	*mp = ip->i_mount;
112 	int			error = 0;
113 	int			log_flushed = 0;
114 	xfs_lsn_t		lsn = 0;
115 
116 	trace_xfs_file_fsync(ip);
117 
118 	error = file_write_and_wait_range(file, start, end);
119 	if (error)
120 		return error;
121 
122 	if (XFS_FORCED_SHUTDOWN(mp))
123 		return -EIO;
124 
125 	xfs_iflags_clear(ip, XFS_ITRUNCATED);
126 
127 	/*
128 	 * If we have an RT and/or log subvolume we need to make sure to flush
129 	 * the write cache the device used for file data first.  This is to
130 	 * ensure newly written file data make it to disk before logging the new
131 	 * inode size in case of an extending write.
132 	 */
133 	if (XFS_IS_REALTIME_INODE(ip))
134 		xfs_blkdev_issue_flush(mp->m_rtdev_targp);
135 	else if (mp->m_logdev_targp != mp->m_ddev_targp)
136 		xfs_blkdev_issue_flush(mp->m_ddev_targp);
137 
138 	/*
139 	 * All metadata updates are logged, which means that we just have to
140 	 * flush the log up to the latest LSN that touched the inode. If we have
141 	 * concurrent fsync/fdatasync() calls, we need them to all block on the
142 	 * log force before we clear the ili_fsync_fields field. This ensures
143 	 * that we don't get a racing sync operation that does not wait for the
144 	 * metadata to hit the journal before returning. If we race with
145 	 * clearing the ili_fsync_fields, then all that will happen is the log
146 	 * force will do nothing as the lsn will already be on disk. We can't
147 	 * race with setting ili_fsync_fields because that is done under
148 	 * XFS_ILOCK_EXCL, and that can't happen because we hold the lock shared
149 	 * until after the ili_fsync_fields is cleared.
150 	 */
151 	xfs_ilock(ip, XFS_ILOCK_SHARED);
152 	if (xfs_ipincount(ip)) {
153 		if (!datasync ||
154 		    (ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
155 			lsn = ip->i_itemp->ili_last_lsn;
156 	}
157 
158 	if (lsn) {
159 		error = xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, &log_flushed);
160 		ip->i_itemp->ili_fsync_fields = 0;
161 	}
162 	xfs_iunlock(ip, XFS_ILOCK_SHARED);
163 
164 	/*
165 	 * If we only have a single device, and the log force about was
166 	 * a no-op we might have to flush the data device cache here.
167 	 * This can only happen for fdatasync/O_DSYNC if we were overwriting
168 	 * an already allocated file and thus do not have any metadata to
169 	 * commit.
170 	 */
171 	if (!log_flushed && !XFS_IS_REALTIME_INODE(ip) &&
172 	    mp->m_logdev_targp == mp->m_ddev_targp)
173 		xfs_blkdev_issue_flush(mp->m_ddev_targp);
174 
175 	return error;
176 }
177 
178 STATIC ssize_t
179 xfs_file_dio_aio_read(
180 	struct kiocb		*iocb,
181 	struct iov_iter		*to)
182 {
183 	struct xfs_inode	*ip = XFS_I(file_inode(iocb->ki_filp));
184 	size_t			count = iov_iter_count(to);
185 	ssize_t			ret;
186 
187 	trace_xfs_file_direct_read(ip, count, iocb->ki_pos);
188 
189 	if (!count)
190 		return 0; /* skip atime */
191 
192 	file_accessed(iocb->ki_filp);
193 
194 	xfs_ilock(ip, XFS_IOLOCK_SHARED);
195 	ret = iomap_dio_rw(iocb, to, &xfs_iomap_ops, NULL);
196 	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
197 
198 	return ret;
199 }
200 
201 static noinline ssize_t
202 xfs_file_dax_read(
203 	struct kiocb		*iocb,
204 	struct iov_iter		*to)
205 {
206 	struct xfs_inode	*ip = XFS_I(iocb->ki_filp->f_mapping->host);
207 	size_t			count = iov_iter_count(to);
208 	ssize_t			ret = 0;
209 
210 	trace_xfs_file_dax_read(ip, count, iocb->ki_pos);
211 
212 	if (!count)
213 		return 0; /* skip atime */
214 
215 	if (iocb->ki_flags & IOCB_NOWAIT) {
216 		if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED))
217 			return -EAGAIN;
218 	} else {
219 		xfs_ilock(ip, XFS_IOLOCK_SHARED);
220 	}
221 
222 	ret = dax_iomap_rw(iocb, to, &xfs_iomap_ops);
223 	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
224 
225 	file_accessed(iocb->ki_filp);
226 	return ret;
227 }
228 
229 STATIC ssize_t
230 xfs_file_buffered_aio_read(
231 	struct kiocb		*iocb,
232 	struct iov_iter		*to)
233 {
234 	struct xfs_inode	*ip = XFS_I(file_inode(iocb->ki_filp));
235 	ssize_t			ret;
236 
237 	trace_xfs_file_buffered_read(ip, iov_iter_count(to), iocb->ki_pos);
238 
239 	if (iocb->ki_flags & IOCB_NOWAIT) {
240 		if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED))
241 			return -EAGAIN;
242 	} else {
243 		xfs_ilock(ip, XFS_IOLOCK_SHARED);
244 	}
245 	ret = generic_file_read_iter(iocb, to);
246 	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
247 
248 	return ret;
249 }
250 
251 STATIC ssize_t
252 xfs_file_read_iter(
253 	struct kiocb		*iocb,
254 	struct iov_iter		*to)
255 {
256 	struct inode		*inode = file_inode(iocb->ki_filp);
257 	struct xfs_mount	*mp = XFS_I(inode)->i_mount;
258 	ssize_t			ret = 0;
259 
260 	XFS_STATS_INC(mp, xs_read_calls);
261 
262 	if (XFS_FORCED_SHUTDOWN(mp))
263 		return -EIO;
264 
265 	if (IS_DAX(inode))
266 		ret = xfs_file_dax_read(iocb, to);
267 	else if (iocb->ki_flags & IOCB_DIRECT)
268 		ret = xfs_file_dio_aio_read(iocb, to);
269 	else
270 		ret = xfs_file_buffered_aio_read(iocb, to);
271 
272 	if (ret > 0)
273 		XFS_STATS_ADD(mp, xs_read_bytes, ret);
274 	return ret;
275 }
276 
277 /*
278  * Common pre-write limit and setup checks.
279  *
280  * Called with the iolocked held either shared and exclusive according to
281  * @iolock, and returns with it held.  Might upgrade the iolock to exclusive
282  * if called for a direct write beyond i_size.
283  */
284 STATIC ssize_t
285 xfs_file_aio_write_checks(
286 	struct kiocb		*iocb,
287 	struct iov_iter		*from,
288 	int			*iolock)
289 {
290 	struct file		*file = iocb->ki_filp;
291 	struct inode		*inode = file->f_mapping->host;
292 	struct xfs_inode	*ip = XFS_I(inode);
293 	ssize_t			error = 0;
294 	size_t			count = iov_iter_count(from);
295 	bool			drained_dio = false;
296 	loff_t			isize;
297 
298 restart:
299 	error = generic_write_checks(iocb, from);
300 	if (error <= 0)
301 		return error;
302 
303 	error = xfs_break_layouts(inode, iolock, BREAK_WRITE);
304 	if (error)
305 		return error;
306 
307 	/*
308 	 * For changing security info in file_remove_privs() we need i_rwsem
309 	 * exclusively.
310 	 */
311 	if (*iolock == XFS_IOLOCK_SHARED && !IS_NOSEC(inode)) {
312 		xfs_iunlock(ip, *iolock);
313 		*iolock = XFS_IOLOCK_EXCL;
314 		xfs_ilock(ip, *iolock);
315 		goto restart;
316 	}
317 	/*
318 	 * If the offset is beyond the size of the file, we need to zero any
319 	 * blocks that fall between the existing EOF and the start of this
320 	 * write.  If zeroing is needed and we are currently holding the
321 	 * iolock shared, we need to update it to exclusive which implies
322 	 * having to redo all checks before.
323 	 *
324 	 * We need to serialise against EOF updates that occur in IO
325 	 * completions here. We want to make sure that nobody is changing the
326 	 * size while we do this check until we have placed an IO barrier (i.e.
327 	 * hold the XFS_IOLOCK_EXCL) that prevents new IO from being dispatched.
328 	 * The spinlock effectively forms a memory barrier once we have the
329 	 * XFS_IOLOCK_EXCL so we are guaranteed to see the latest EOF value
330 	 * and hence be able to correctly determine if we need to run zeroing.
331 	 */
332 	spin_lock(&ip->i_flags_lock);
333 	isize = i_size_read(inode);
334 	if (iocb->ki_pos > isize) {
335 		spin_unlock(&ip->i_flags_lock);
336 		if (!drained_dio) {
337 			if (*iolock == XFS_IOLOCK_SHARED) {
338 				xfs_iunlock(ip, *iolock);
339 				*iolock = XFS_IOLOCK_EXCL;
340 				xfs_ilock(ip, *iolock);
341 				iov_iter_reexpand(from, count);
342 			}
343 			/*
344 			 * We now have an IO submission barrier in place, but
345 			 * AIO can do EOF updates during IO completion and hence
346 			 * we now need to wait for all of them to drain. Non-AIO
347 			 * DIO will have drained before we are given the
348 			 * XFS_IOLOCK_EXCL, and so for most cases this wait is a
349 			 * no-op.
350 			 */
351 			inode_dio_wait(inode);
352 			drained_dio = true;
353 			goto restart;
354 		}
355 
356 		trace_xfs_zero_eof(ip, isize, iocb->ki_pos - isize);
357 		error = iomap_zero_range(inode, isize, iocb->ki_pos - isize,
358 				NULL, &xfs_iomap_ops);
359 		if (error)
360 			return error;
361 	} else
362 		spin_unlock(&ip->i_flags_lock);
363 
364 	/*
365 	 * Updating the timestamps will grab the ilock again from
366 	 * xfs_fs_dirty_inode, so we have to call it after dropping the
367 	 * lock above.  Eventually we should look into a way to avoid
368 	 * the pointless lock roundtrip.
369 	 */
370 	if (likely(!(file->f_mode & FMODE_NOCMTIME))) {
371 		error = file_update_time(file);
372 		if (error)
373 			return error;
374 	}
375 
376 	/*
377 	 * If we're writing the file then make sure to clear the setuid and
378 	 * setgid bits if the process is not being run by root.  This keeps
379 	 * people from modifying setuid and setgid binaries.
380 	 */
381 	if (!IS_NOSEC(inode))
382 		return file_remove_privs(file);
383 	return 0;
384 }
385 
386 static int
387 xfs_dio_write_end_io(
388 	struct kiocb		*iocb,
389 	ssize_t			size,
390 	unsigned		flags)
391 {
392 	struct inode		*inode = file_inode(iocb->ki_filp);
393 	struct xfs_inode	*ip = XFS_I(inode);
394 	loff_t			offset = iocb->ki_pos;
395 	int			error = 0;
396 
397 	trace_xfs_end_io_direct_write(ip, offset, size);
398 
399 	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
400 		return -EIO;
401 
402 	if (size <= 0)
403 		return size;
404 
405 	/*
406 	 * Capture amount written on completion as we can't reliably account
407 	 * for it on submission.
408 	 */
409 	XFS_STATS_ADD(ip->i_mount, xs_write_bytes, size);
410 
411 	if (flags & IOMAP_DIO_COW) {
412 		error = xfs_reflink_end_cow(ip, offset, size);
413 		if (error)
414 			return error;
415 	}
416 
417 	/*
418 	 * Unwritten conversion updates the in-core isize after extent
419 	 * conversion but before updating the on-disk size. Updating isize any
420 	 * earlier allows a racing dio read to find unwritten extents before
421 	 * they are converted.
422 	 */
423 	if (flags & IOMAP_DIO_UNWRITTEN)
424 		return xfs_iomap_write_unwritten(ip, offset, size, true);
425 
426 	/*
427 	 * We need to update the in-core inode size here so that we don't end up
428 	 * with the on-disk inode size being outside the in-core inode size. We
429 	 * have no other method of updating EOF for AIO, so always do it here
430 	 * if necessary.
431 	 *
432 	 * We need to lock the test/set EOF update as we can be racing with
433 	 * other IO completions here to update the EOF. Failing to serialise
434 	 * here can result in EOF moving backwards and Bad Things Happen when
435 	 * that occurs.
436 	 */
437 	spin_lock(&ip->i_flags_lock);
438 	if (offset + size > i_size_read(inode)) {
439 		i_size_write(inode, offset + size);
440 		spin_unlock(&ip->i_flags_lock);
441 		error = xfs_setfilesize(ip, offset, size);
442 	} else {
443 		spin_unlock(&ip->i_flags_lock);
444 	}
445 
446 	return error;
447 }
448 
449 /*
450  * xfs_file_dio_aio_write - handle direct IO writes
451  *
452  * Lock the inode appropriately to prepare for and issue a direct IO write.
453  * By separating it from the buffered write path we remove all the tricky to
454  * follow locking changes and looping.
455  *
456  * If there are cached pages or we're extending the file, we need IOLOCK_EXCL
457  * until we're sure the bytes at the new EOF have been zeroed and/or the cached
458  * pages are flushed out.
459  *
460  * In most cases the direct IO writes will be done holding IOLOCK_SHARED
461  * allowing them to be done in parallel with reads and other direct IO writes.
462  * However, if the IO is not aligned to filesystem blocks, the direct IO layer
463  * needs to do sub-block zeroing and that requires serialisation against other
464  * direct IOs to the same block. In this case we need to serialise the
465  * submission of the unaligned IOs so that we don't get racing block zeroing in
466  * the dio layer.  To avoid the problem with aio, we also need to wait for
467  * outstanding IOs to complete so that unwritten extent conversion is completed
468  * before we try to map the overlapping block. This is currently implemented by
469  * hitting it with a big hammer (i.e. inode_dio_wait()).
470  *
471  * Returns with locks held indicated by @iolock and errors indicated by
472  * negative return values.
473  */
474 STATIC ssize_t
475 xfs_file_dio_aio_write(
476 	struct kiocb		*iocb,
477 	struct iov_iter		*from)
478 {
479 	struct file		*file = iocb->ki_filp;
480 	struct address_space	*mapping = file->f_mapping;
481 	struct inode		*inode = mapping->host;
482 	struct xfs_inode	*ip = XFS_I(inode);
483 	struct xfs_mount	*mp = ip->i_mount;
484 	ssize_t			ret = 0;
485 	int			unaligned_io = 0;
486 	int			iolock;
487 	size_t			count = iov_iter_count(from);
488 	struct xfs_buftarg      *target = XFS_IS_REALTIME_INODE(ip) ?
489 					mp->m_rtdev_targp : mp->m_ddev_targp;
490 
491 	/* DIO must be aligned to device logical sector size */
492 	if ((iocb->ki_pos | count) & target->bt_logical_sectormask)
493 		return -EINVAL;
494 
495 	/*
496 	 * Don't take the exclusive iolock here unless the I/O is unaligned to
497 	 * the file system block size.  We don't need to consider the EOF
498 	 * extension case here because xfs_file_aio_write_checks() will relock
499 	 * the inode as necessary for EOF zeroing cases and fill out the new
500 	 * inode size as appropriate.
501 	 */
502 	if ((iocb->ki_pos & mp->m_blockmask) ||
503 	    ((iocb->ki_pos + count) & mp->m_blockmask)) {
504 		unaligned_io = 1;
505 
506 		/*
507 		 * We can't properly handle unaligned direct I/O to reflink
508 		 * files yet, as we can't unshare a partial block.
509 		 */
510 		if (xfs_is_reflink_inode(ip)) {
511 			trace_xfs_reflink_bounce_dio_write(ip, iocb->ki_pos, count);
512 			return -EREMCHG;
513 		}
514 		iolock = XFS_IOLOCK_EXCL;
515 	} else {
516 		iolock = XFS_IOLOCK_SHARED;
517 	}
518 
519 	if (iocb->ki_flags & IOCB_NOWAIT) {
520 		if (!xfs_ilock_nowait(ip, iolock))
521 			return -EAGAIN;
522 	} else {
523 		xfs_ilock(ip, iolock);
524 	}
525 
526 	ret = xfs_file_aio_write_checks(iocb, from, &iolock);
527 	if (ret)
528 		goto out;
529 	count = iov_iter_count(from);
530 
531 	/*
532 	 * If we are doing unaligned IO, wait for all other IO to drain,
533 	 * otherwise demote the lock if we had to take the exclusive lock
534 	 * for other reasons in xfs_file_aio_write_checks.
535 	 */
536 	if (unaligned_io) {
537 		/* If we are going to wait for other DIO to finish, bail */
538 		if (iocb->ki_flags & IOCB_NOWAIT) {
539 			if (atomic_read(&inode->i_dio_count))
540 				return -EAGAIN;
541 		} else {
542 			inode_dio_wait(inode);
543 		}
544 	} else if (iolock == XFS_IOLOCK_EXCL) {
545 		xfs_ilock_demote(ip, XFS_IOLOCK_EXCL);
546 		iolock = XFS_IOLOCK_SHARED;
547 	}
548 
549 	trace_xfs_file_direct_write(ip, count, iocb->ki_pos);
550 	ret = iomap_dio_rw(iocb, from, &xfs_iomap_ops, xfs_dio_write_end_io);
551 out:
552 	xfs_iunlock(ip, iolock);
553 
554 	/*
555 	 * No fallback to buffered IO on errors for XFS, direct IO will either
556 	 * complete fully or fail.
557 	 */
558 	ASSERT(ret < 0 || ret == count);
559 	return ret;
560 }
561 
562 static noinline ssize_t
563 xfs_file_dax_write(
564 	struct kiocb		*iocb,
565 	struct iov_iter		*from)
566 {
567 	struct inode		*inode = iocb->ki_filp->f_mapping->host;
568 	struct xfs_inode	*ip = XFS_I(inode);
569 	int			iolock = XFS_IOLOCK_EXCL;
570 	ssize_t			ret, error = 0;
571 	size_t			count;
572 	loff_t			pos;
573 
574 	if (iocb->ki_flags & IOCB_NOWAIT) {
575 		if (!xfs_ilock_nowait(ip, iolock))
576 			return -EAGAIN;
577 	} else {
578 		xfs_ilock(ip, iolock);
579 	}
580 
581 	ret = xfs_file_aio_write_checks(iocb, from, &iolock);
582 	if (ret)
583 		goto out;
584 
585 	pos = iocb->ki_pos;
586 	count = iov_iter_count(from);
587 
588 	trace_xfs_file_dax_write(ip, count, pos);
589 	ret = dax_iomap_rw(iocb, from, &xfs_iomap_ops);
590 	if (ret > 0 && iocb->ki_pos > i_size_read(inode)) {
591 		i_size_write(inode, iocb->ki_pos);
592 		error = xfs_setfilesize(ip, pos, ret);
593 	}
594 out:
595 	xfs_iunlock(ip, iolock);
596 	if (error)
597 		return error;
598 
599 	if (ret > 0) {
600 		XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret);
601 
602 		/* Handle various SYNC-type writes */
603 		ret = generic_write_sync(iocb, ret);
604 	}
605 	return ret;
606 }
607 
608 STATIC ssize_t
609 xfs_file_buffered_aio_write(
610 	struct kiocb		*iocb,
611 	struct iov_iter		*from)
612 {
613 	struct file		*file = iocb->ki_filp;
614 	struct address_space	*mapping = file->f_mapping;
615 	struct inode		*inode = mapping->host;
616 	struct xfs_inode	*ip = XFS_I(inode);
617 	ssize_t			ret;
618 	int			enospc = 0;
619 	int			iolock;
620 
621 	if (iocb->ki_flags & IOCB_NOWAIT)
622 		return -EOPNOTSUPP;
623 
624 write_retry:
625 	iolock = XFS_IOLOCK_EXCL;
626 	xfs_ilock(ip, iolock);
627 
628 	ret = xfs_file_aio_write_checks(iocb, from, &iolock);
629 	if (ret)
630 		goto out;
631 
632 	/* We can write back this queue in page reclaim */
633 	current->backing_dev_info = inode_to_bdi(inode);
634 
635 	trace_xfs_file_buffered_write(ip, iov_iter_count(from), iocb->ki_pos);
636 	ret = iomap_file_buffered_write(iocb, from, &xfs_iomap_ops);
637 	if (likely(ret >= 0))
638 		iocb->ki_pos += ret;
639 
640 	/*
641 	 * If we hit a space limit, try to free up some lingering preallocated
642 	 * space before returning an error. In the case of ENOSPC, first try to
643 	 * write back all dirty inodes to free up some of the excess reserved
644 	 * metadata space. This reduces the chances that the eofblocks scan
645 	 * waits on dirty mappings. Since xfs_flush_inodes() is serialized, this
646 	 * also behaves as a filter to prevent too many eofblocks scans from
647 	 * running at the same time.
648 	 */
649 	if (ret == -EDQUOT && !enospc) {
650 		xfs_iunlock(ip, iolock);
651 		enospc = xfs_inode_free_quota_eofblocks(ip);
652 		if (enospc)
653 			goto write_retry;
654 		enospc = xfs_inode_free_quota_cowblocks(ip);
655 		if (enospc)
656 			goto write_retry;
657 		iolock = 0;
658 	} else if (ret == -ENOSPC && !enospc) {
659 		struct xfs_eofblocks eofb = {0};
660 
661 		enospc = 1;
662 		xfs_flush_inodes(ip->i_mount);
663 
664 		xfs_iunlock(ip, iolock);
665 		eofb.eof_flags = XFS_EOF_FLAGS_SYNC;
666 		xfs_icache_free_eofblocks(ip->i_mount, &eofb);
667 		xfs_icache_free_cowblocks(ip->i_mount, &eofb);
668 		goto write_retry;
669 	}
670 
671 	current->backing_dev_info = NULL;
672 out:
673 	if (iolock)
674 		xfs_iunlock(ip, iolock);
675 
676 	if (ret > 0) {
677 		XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret);
678 		/* Handle various SYNC-type writes */
679 		ret = generic_write_sync(iocb, ret);
680 	}
681 	return ret;
682 }
683 
684 STATIC ssize_t
685 xfs_file_write_iter(
686 	struct kiocb		*iocb,
687 	struct iov_iter		*from)
688 {
689 	struct file		*file = iocb->ki_filp;
690 	struct address_space	*mapping = file->f_mapping;
691 	struct inode		*inode = mapping->host;
692 	struct xfs_inode	*ip = XFS_I(inode);
693 	ssize_t			ret;
694 	size_t			ocount = iov_iter_count(from);
695 
696 	XFS_STATS_INC(ip->i_mount, xs_write_calls);
697 
698 	if (ocount == 0)
699 		return 0;
700 
701 	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
702 		return -EIO;
703 
704 	if (IS_DAX(inode))
705 		return xfs_file_dax_write(iocb, from);
706 
707 	if (iocb->ki_flags & IOCB_DIRECT) {
708 		/*
709 		 * Allow a directio write to fall back to a buffered
710 		 * write *only* in the case that we're doing a reflink
711 		 * CoW.  In all other directio scenarios we do not
712 		 * allow an operation to fall back to buffered mode.
713 		 */
714 		ret = xfs_file_dio_aio_write(iocb, from);
715 		if (ret != -EREMCHG)
716 			return ret;
717 	}
718 
719 	return xfs_file_buffered_aio_write(iocb, from);
720 }
721 
722 static void
723 xfs_wait_dax_page(
724 	struct inode		*inode,
725 	bool			*did_unlock)
726 {
727 	struct xfs_inode        *ip = XFS_I(inode);
728 
729 	*did_unlock = true;
730 	xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
731 	schedule();
732 	xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
733 }
734 
735 static int
736 xfs_break_dax_layouts(
737 	struct inode		*inode,
738 	uint			iolock,
739 	bool			*did_unlock)
740 {
741 	struct page		*page;
742 
743 	ASSERT(xfs_isilocked(XFS_I(inode), XFS_MMAPLOCK_EXCL));
744 
745 	page = dax_layout_busy_page(inode->i_mapping);
746 	if (!page)
747 		return 0;
748 
749 	return ___wait_var_event(&page->_refcount,
750 			atomic_read(&page->_refcount) == 1, TASK_INTERRUPTIBLE,
751 			0, 0, xfs_wait_dax_page(inode, did_unlock));
752 }
753 
754 int
755 xfs_break_layouts(
756 	struct inode		*inode,
757 	uint			*iolock,
758 	enum layout_break_reason reason)
759 {
760 	bool			retry;
761 	int			error;
762 
763 	ASSERT(xfs_isilocked(XFS_I(inode), XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL));
764 
765 	do {
766 		retry = false;
767 		switch (reason) {
768 		case BREAK_UNMAP:
769 			error = xfs_break_dax_layouts(inode, *iolock, &retry);
770 			if (error || retry)
771 				break;
772 			/* fall through */
773 		case BREAK_WRITE:
774 			error = xfs_break_leased_layouts(inode, iolock, &retry);
775 			break;
776 		default:
777 			WARN_ON_ONCE(1);
778 			error = -EINVAL;
779 		}
780 	} while (error == 0 && retry);
781 
782 	return error;
783 }
784 
785 #define	XFS_FALLOC_FL_SUPPORTED						\
786 		(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |		\
787 		 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |	\
788 		 FALLOC_FL_INSERT_RANGE | FALLOC_FL_UNSHARE_RANGE)
789 
790 STATIC long
791 xfs_file_fallocate(
792 	struct file		*file,
793 	int			mode,
794 	loff_t			offset,
795 	loff_t			len)
796 {
797 	struct inode		*inode = file_inode(file);
798 	struct xfs_inode	*ip = XFS_I(inode);
799 	long			error;
800 	enum xfs_prealloc_flags	flags = 0;
801 	uint			iolock = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
802 	loff_t			new_size = 0;
803 	bool			do_file_insert = false;
804 
805 	if (!S_ISREG(inode->i_mode))
806 		return -EINVAL;
807 	if (mode & ~XFS_FALLOC_FL_SUPPORTED)
808 		return -EOPNOTSUPP;
809 
810 	xfs_ilock(ip, iolock);
811 	error = xfs_break_layouts(inode, &iolock, BREAK_UNMAP);
812 	if (error)
813 		goto out_unlock;
814 
815 	if (mode & FALLOC_FL_PUNCH_HOLE) {
816 		error = xfs_free_file_space(ip, offset, len);
817 		if (error)
818 			goto out_unlock;
819 	} else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
820 		unsigned int blksize_mask = i_blocksize(inode) - 1;
821 
822 		if (offset & blksize_mask || len & blksize_mask) {
823 			error = -EINVAL;
824 			goto out_unlock;
825 		}
826 
827 		/*
828 		 * There is no need to overlap collapse range with EOF,
829 		 * in which case it is effectively a truncate operation
830 		 */
831 		if (offset + len >= i_size_read(inode)) {
832 			error = -EINVAL;
833 			goto out_unlock;
834 		}
835 
836 		new_size = i_size_read(inode) - len;
837 
838 		error = xfs_collapse_file_space(ip, offset, len);
839 		if (error)
840 			goto out_unlock;
841 	} else if (mode & FALLOC_FL_INSERT_RANGE) {
842 		unsigned int	blksize_mask = i_blocksize(inode) - 1;
843 		loff_t		isize = i_size_read(inode);
844 
845 		if (offset & blksize_mask || len & blksize_mask) {
846 			error = -EINVAL;
847 			goto out_unlock;
848 		}
849 
850 		/*
851 		 * New inode size must not exceed ->s_maxbytes, accounting for
852 		 * possible signed overflow.
853 		 */
854 		if (inode->i_sb->s_maxbytes - isize < len) {
855 			error = -EFBIG;
856 			goto out_unlock;
857 		}
858 		new_size = isize + len;
859 
860 		/* Offset should be less than i_size */
861 		if (offset >= isize) {
862 			error = -EINVAL;
863 			goto out_unlock;
864 		}
865 		do_file_insert = true;
866 	} else {
867 		flags |= XFS_PREALLOC_SET;
868 
869 		if (!(mode & FALLOC_FL_KEEP_SIZE) &&
870 		    offset + len > i_size_read(inode)) {
871 			new_size = offset + len;
872 			error = inode_newsize_ok(inode, new_size);
873 			if (error)
874 				goto out_unlock;
875 		}
876 
877 		if (mode & FALLOC_FL_ZERO_RANGE)
878 			error = xfs_zero_file_space(ip, offset, len);
879 		else {
880 			if (mode & FALLOC_FL_UNSHARE_RANGE) {
881 				error = xfs_reflink_unshare(ip, offset, len);
882 				if (error)
883 					goto out_unlock;
884 			}
885 			error = xfs_alloc_file_space(ip, offset, len,
886 						     XFS_BMAPI_PREALLOC);
887 		}
888 		if (error)
889 			goto out_unlock;
890 	}
891 
892 	if (file->f_flags & O_DSYNC)
893 		flags |= XFS_PREALLOC_SYNC;
894 
895 	error = xfs_update_prealloc_flags(ip, flags);
896 	if (error)
897 		goto out_unlock;
898 
899 	/* Change file size if needed */
900 	if (new_size) {
901 		struct iattr iattr;
902 
903 		iattr.ia_valid = ATTR_SIZE;
904 		iattr.ia_size = new_size;
905 		error = xfs_vn_setattr_size(file_dentry(file), &iattr);
906 		if (error)
907 			goto out_unlock;
908 	}
909 
910 	/*
911 	 * Perform hole insertion now that the file size has been
912 	 * updated so that if we crash during the operation we don't
913 	 * leave shifted extents past EOF and hence losing access to
914 	 * the data that is contained within them.
915 	 */
916 	if (do_file_insert)
917 		error = xfs_insert_file_space(ip, offset, len);
918 
919 out_unlock:
920 	xfs_iunlock(ip, iolock);
921 	return error;
922 }
923 
924 STATIC int
925 xfs_file_clone_range(
926 	struct file	*file_in,
927 	loff_t		pos_in,
928 	struct file	*file_out,
929 	loff_t		pos_out,
930 	u64		len)
931 {
932 	return xfs_reflink_remap_range(file_in, pos_in, file_out, pos_out,
933 				     len, false);
934 }
935 
936 STATIC ssize_t
937 xfs_file_dedupe_range(
938 	struct file	*src_file,
939 	u64		loff,
940 	u64		len,
941 	struct file	*dst_file,
942 	u64		dst_loff)
943 {
944 	struct inode	*srci = file_inode(src_file);
945 	u64		max_dedupe;
946 	int		error;
947 
948 	/*
949 	 * Since we have to read all these pages in to compare them, cut
950 	 * it off at MAX_RW_COUNT/2 rounded down to the nearest block.
951 	 * That means we won't do more than MAX_RW_COUNT IO per request.
952 	 */
953 	max_dedupe = (MAX_RW_COUNT >> 1) & ~(i_blocksize(srci) - 1);
954 	if (len > max_dedupe)
955 		len = max_dedupe;
956 	error = xfs_reflink_remap_range(src_file, loff, dst_file, dst_loff,
957 				     len, true);
958 	if (error)
959 		return error;
960 	return len;
961 }
962 
963 STATIC int
964 xfs_file_open(
965 	struct inode	*inode,
966 	struct file	*file)
967 {
968 	if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS)
969 		return -EFBIG;
970 	if (XFS_FORCED_SHUTDOWN(XFS_M(inode->i_sb)))
971 		return -EIO;
972 	file->f_mode |= FMODE_NOWAIT;
973 	return 0;
974 }
975 
976 STATIC int
977 xfs_dir_open(
978 	struct inode	*inode,
979 	struct file	*file)
980 {
981 	struct xfs_inode *ip = XFS_I(inode);
982 	int		mode;
983 	int		error;
984 
985 	error = xfs_file_open(inode, file);
986 	if (error)
987 		return error;
988 
989 	/*
990 	 * If there are any blocks, read-ahead block 0 as we're almost
991 	 * certain to have the next operation be a read there.
992 	 */
993 	mode = xfs_ilock_data_map_shared(ip);
994 	if (ip->i_d.di_nextents > 0)
995 		error = xfs_dir3_data_readahead(ip, 0, -1);
996 	xfs_iunlock(ip, mode);
997 	return error;
998 }
999 
1000 STATIC int
1001 xfs_file_release(
1002 	struct inode	*inode,
1003 	struct file	*filp)
1004 {
1005 	return xfs_release(XFS_I(inode));
1006 }
1007 
1008 STATIC int
1009 xfs_file_readdir(
1010 	struct file	*file,
1011 	struct dir_context *ctx)
1012 {
1013 	struct inode	*inode = file_inode(file);
1014 	xfs_inode_t	*ip = XFS_I(inode);
1015 	size_t		bufsize;
1016 
1017 	/*
1018 	 * The Linux API doesn't pass down the total size of the buffer
1019 	 * we read into down to the filesystem.  With the filldir concept
1020 	 * it's not needed for correct information, but the XFS dir2 leaf
1021 	 * code wants an estimate of the buffer size to calculate it's
1022 	 * readahead window and size the buffers used for mapping to
1023 	 * physical blocks.
1024 	 *
1025 	 * Try to give it an estimate that's good enough, maybe at some
1026 	 * point we can change the ->readdir prototype to include the
1027 	 * buffer size.  For now we use the current glibc buffer size.
1028 	 */
1029 	bufsize = (size_t)min_t(loff_t, XFS_READDIR_BUFSIZE, ip->i_d.di_size);
1030 
1031 	return xfs_readdir(NULL, ip, ctx, bufsize);
1032 }
1033 
1034 STATIC loff_t
1035 xfs_file_llseek(
1036 	struct file	*file,
1037 	loff_t		offset,
1038 	int		whence)
1039 {
1040 	struct inode		*inode = file->f_mapping->host;
1041 
1042 	if (XFS_FORCED_SHUTDOWN(XFS_I(inode)->i_mount))
1043 		return -EIO;
1044 
1045 	switch (whence) {
1046 	default:
1047 		return generic_file_llseek(file, offset, whence);
1048 	case SEEK_HOLE:
1049 		offset = iomap_seek_hole(inode, offset, &xfs_iomap_ops);
1050 		break;
1051 	case SEEK_DATA:
1052 		offset = iomap_seek_data(inode, offset, &xfs_iomap_ops);
1053 		break;
1054 	}
1055 
1056 	if (offset < 0)
1057 		return offset;
1058 	return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
1059 }
1060 
1061 /*
1062  * Locking for serialisation of IO during page faults. This results in a lock
1063  * ordering of:
1064  *
1065  * mmap_sem (MM)
1066  *   sb_start_pagefault(vfs, freeze)
1067  *     i_mmaplock (XFS - truncate serialisation)
1068  *       page_lock (MM)
1069  *         i_lock (XFS - extent map serialisation)
1070  */
1071 static vm_fault_t
1072 __xfs_filemap_fault(
1073 	struct vm_fault		*vmf,
1074 	enum page_entry_size	pe_size,
1075 	bool			write_fault)
1076 {
1077 	struct inode		*inode = file_inode(vmf->vma->vm_file);
1078 	struct xfs_inode	*ip = XFS_I(inode);
1079 	vm_fault_t		ret;
1080 
1081 	trace_xfs_filemap_fault(ip, pe_size, write_fault);
1082 
1083 	if (write_fault) {
1084 		sb_start_pagefault(inode->i_sb);
1085 		file_update_time(vmf->vma->vm_file);
1086 	}
1087 
1088 	xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1089 	if (IS_DAX(inode)) {
1090 		pfn_t pfn;
1091 
1092 		ret = dax_iomap_fault(vmf, pe_size, &pfn, NULL, &xfs_iomap_ops);
1093 		if (ret & VM_FAULT_NEEDDSYNC)
1094 			ret = dax_finish_sync_fault(vmf, pe_size, pfn);
1095 	} else {
1096 		if (write_fault)
1097 			ret = iomap_page_mkwrite(vmf, &xfs_iomap_ops);
1098 		else
1099 			ret = filemap_fault(vmf);
1100 	}
1101 	xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1102 
1103 	if (write_fault)
1104 		sb_end_pagefault(inode->i_sb);
1105 	return ret;
1106 }
1107 
1108 static vm_fault_t
1109 xfs_filemap_fault(
1110 	struct vm_fault		*vmf)
1111 {
1112 	/* DAX can shortcut the normal fault path on write faults! */
1113 	return __xfs_filemap_fault(vmf, PE_SIZE_PTE,
1114 			IS_DAX(file_inode(vmf->vma->vm_file)) &&
1115 			(vmf->flags & FAULT_FLAG_WRITE));
1116 }
1117 
1118 static vm_fault_t
1119 xfs_filemap_huge_fault(
1120 	struct vm_fault		*vmf,
1121 	enum page_entry_size	pe_size)
1122 {
1123 	if (!IS_DAX(file_inode(vmf->vma->vm_file)))
1124 		return VM_FAULT_FALLBACK;
1125 
1126 	/* DAX can shortcut the normal fault path on write faults! */
1127 	return __xfs_filemap_fault(vmf, pe_size,
1128 			(vmf->flags & FAULT_FLAG_WRITE));
1129 }
1130 
1131 static vm_fault_t
1132 xfs_filemap_page_mkwrite(
1133 	struct vm_fault		*vmf)
1134 {
1135 	return __xfs_filemap_fault(vmf, PE_SIZE_PTE, true);
1136 }
1137 
1138 /*
1139  * pfn_mkwrite was originally intended to ensure we capture time stamp updates
1140  * on write faults. In reality, it needs to serialise against truncate and
1141  * prepare memory for writing so handle is as standard write fault.
1142  */
1143 static vm_fault_t
1144 xfs_filemap_pfn_mkwrite(
1145 	struct vm_fault		*vmf)
1146 {
1147 
1148 	return __xfs_filemap_fault(vmf, PE_SIZE_PTE, true);
1149 }
1150 
1151 static const struct vm_operations_struct xfs_file_vm_ops = {
1152 	.fault		= xfs_filemap_fault,
1153 	.huge_fault	= xfs_filemap_huge_fault,
1154 	.map_pages	= filemap_map_pages,
1155 	.page_mkwrite	= xfs_filemap_page_mkwrite,
1156 	.pfn_mkwrite	= xfs_filemap_pfn_mkwrite,
1157 };
1158 
1159 STATIC int
1160 xfs_file_mmap(
1161 	struct file	*filp,
1162 	struct vm_area_struct *vma)
1163 {
1164 	/*
1165 	 * We don't support synchronous mappings for non-DAX files. At least
1166 	 * until someone comes with a sensible use case.
1167 	 */
1168 	if (!IS_DAX(file_inode(filp)) && (vma->vm_flags & VM_SYNC))
1169 		return -EOPNOTSUPP;
1170 
1171 	file_accessed(filp);
1172 	vma->vm_ops = &xfs_file_vm_ops;
1173 	if (IS_DAX(file_inode(filp)))
1174 		vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
1175 	return 0;
1176 }
1177 
1178 const struct file_operations xfs_file_operations = {
1179 	.llseek		= xfs_file_llseek,
1180 	.read_iter	= xfs_file_read_iter,
1181 	.write_iter	= xfs_file_write_iter,
1182 	.splice_read	= generic_file_splice_read,
1183 	.splice_write	= iter_file_splice_write,
1184 	.unlocked_ioctl	= xfs_file_ioctl,
1185 #ifdef CONFIG_COMPAT
1186 	.compat_ioctl	= xfs_file_compat_ioctl,
1187 #endif
1188 	.mmap		= xfs_file_mmap,
1189 	.mmap_supported_flags = MAP_SYNC,
1190 	.open		= xfs_file_open,
1191 	.release	= xfs_file_release,
1192 	.fsync		= xfs_file_fsync,
1193 	.get_unmapped_area = thp_get_unmapped_area,
1194 	.fallocate	= xfs_file_fallocate,
1195 	.clone_file_range = xfs_file_clone_range,
1196 	.dedupe_file_range = xfs_file_dedupe_range,
1197 };
1198 
1199 const struct file_operations xfs_dir_file_operations = {
1200 	.open		= xfs_dir_open,
1201 	.read		= generic_read_dir,
1202 	.iterate_shared	= xfs_file_readdir,
1203 	.llseek		= generic_file_llseek,
1204 	.unlocked_ioctl	= xfs_file_ioctl,
1205 #ifdef CONFIG_COMPAT
1206 	.compat_ioctl	= xfs_file_compat_ioctl,
1207 #endif
1208 	.fsync		= xfs_dir_fsync,
1209 };
1210