xref: /openbmc/linux/fs/xfs/xfs_file.c (revision 8b036556)
1 /*
2  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_shared.h"
21 #include "xfs_format.h"
22 #include "xfs_log_format.h"
23 #include "xfs_trans_resv.h"
24 #include "xfs_mount.h"
25 #include "xfs_da_format.h"
26 #include "xfs_da_btree.h"
27 #include "xfs_inode.h"
28 #include "xfs_trans.h"
29 #include "xfs_inode_item.h"
30 #include "xfs_bmap.h"
31 #include "xfs_bmap_util.h"
32 #include "xfs_error.h"
33 #include "xfs_dir2.h"
34 #include "xfs_dir2_priv.h"
35 #include "xfs_ioctl.h"
36 #include "xfs_trace.h"
37 #include "xfs_log.h"
38 #include "xfs_icache.h"
39 #include "xfs_pnfs.h"
40 
41 #include <linux/aio.h>
42 #include <linux/dcache.h>
43 #include <linux/falloc.h>
44 #include <linux/pagevec.h>
45 
46 static const struct vm_operations_struct xfs_file_vm_ops;
47 
48 /*
49  * Locking primitives for read and write IO paths to ensure we consistently use
50  * and order the inode->i_mutex, ip->i_lock and ip->i_iolock.
51  */
52 static inline void
53 xfs_rw_ilock(
54 	struct xfs_inode	*ip,
55 	int			type)
56 {
57 	if (type & XFS_IOLOCK_EXCL)
58 		mutex_lock(&VFS_I(ip)->i_mutex);
59 	xfs_ilock(ip, type);
60 }
61 
62 static inline void
63 xfs_rw_iunlock(
64 	struct xfs_inode	*ip,
65 	int			type)
66 {
67 	xfs_iunlock(ip, type);
68 	if (type & XFS_IOLOCK_EXCL)
69 		mutex_unlock(&VFS_I(ip)->i_mutex);
70 }
71 
72 static inline void
73 xfs_rw_ilock_demote(
74 	struct xfs_inode	*ip,
75 	int			type)
76 {
77 	xfs_ilock_demote(ip, type);
78 	if (type & XFS_IOLOCK_EXCL)
79 		mutex_unlock(&VFS_I(ip)->i_mutex);
80 }
81 
82 /*
83  *	xfs_iozero
84  *
85  *	xfs_iozero clears the specified range of buffer supplied,
86  *	and marks all the affected blocks as valid and modified.  If
87  *	an affected block is not allocated, it will be allocated.  If
88  *	an affected block is not completely overwritten, and is not
89  *	valid before the operation, it will be read from disk before
90  *	being partially zeroed.
91  */
92 int
93 xfs_iozero(
94 	struct xfs_inode	*ip,	/* inode			*/
95 	loff_t			pos,	/* offset in file		*/
96 	size_t			count)	/* size of data to zero		*/
97 {
98 	struct page		*page;
99 	struct address_space	*mapping;
100 	int			status;
101 
102 	mapping = VFS_I(ip)->i_mapping;
103 	do {
104 		unsigned offset, bytes;
105 		void *fsdata;
106 
107 		offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
108 		bytes = PAGE_CACHE_SIZE - offset;
109 		if (bytes > count)
110 			bytes = count;
111 
112 		status = pagecache_write_begin(NULL, mapping, pos, bytes,
113 					AOP_FLAG_UNINTERRUPTIBLE,
114 					&page, &fsdata);
115 		if (status)
116 			break;
117 
118 		zero_user(page, offset, bytes);
119 
120 		status = pagecache_write_end(NULL, mapping, pos, bytes, bytes,
121 					page, fsdata);
122 		WARN_ON(status <= 0); /* can't return less than zero! */
123 		pos += bytes;
124 		count -= bytes;
125 		status = 0;
126 	} while (count);
127 
128 	return (-status);
129 }
130 
131 int
132 xfs_update_prealloc_flags(
133 	struct xfs_inode	*ip,
134 	enum xfs_prealloc_flags	flags)
135 {
136 	struct xfs_trans	*tp;
137 	int			error;
138 
139 	tp = xfs_trans_alloc(ip->i_mount, XFS_TRANS_WRITEID);
140 	error = xfs_trans_reserve(tp, &M_RES(ip->i_mount)->tr_writeid, 0, 0);
141 	if (error) {
142 		xfs_trans_cancel(tp, 0);
143 		return error;
144 	}
145 
146 	xfs_ilock(ip, XFS_ILOCK_EXCL);
147 	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
148 
149 	if (!(flags & XFS_PREALLOC_INVISIBLE)) {
150 		ip->i_d.di_mode &= ~S_ISUID;
151 		if (ip->i_d.di_mode & S_IXGRP)
152 			ip->i_d.di_mode &= ~S_ISGID;
153 		xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
154 	}
155 
156 	if (flags & XFS_PREALLOC_SET)
157 		ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC;
158 	if (flags & XFS_PREALLOC_CLEAR)
159 		ip->i_d.di_flags &= ~XFS_DIFLAG_PREALLOC;
160 
161 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
162 	if (flags & XFS_PREALLOC_SYNC)
163 		xfs_trans_set_sync(tp);
164 	return xfs_trans_commit(tp, 0);
165 }
166 
167 /*
168  * Fsync operations on directories are much simpler than on regular files,
169  * as there is no file data to flush, and thus also no need for explicit
170  * cache flush operations, and there are no non-transaction metadata updates
171  * on directories either.
172  */
173 STATIC int
174 xfs_dir_fsync(
175 	struct file		*file,
176 	loff_t			start,
177 	loff_t			end,
178 	int			datasync)
179 {
180 	struct xfs_inode	*ip = XFS_I(file->f_mapping->host);
181 	struct xfs_mount	*mp = ip->i_mount;
182 	xfs_lsn_t		lsn = 0;
183 
184 	trace_xfs_dir_fsync(ip);
185 
186 	xfs_ilock(ip, XFS_ILOCK_SHARED);
187 	if (xfs_ipincount(ip))
188 		lsn = ip->i_itemp->ili_last_lsn;
189 	xfs_iunlock(ip, XFS_ILOCK_SHARED);
190 
191 	if (!lsn)
192 		return 0;
193 	return _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL);
194 }
195 
196 STATIC int
197 xfs_file_fsync(
198 	struct file		*file,
199 	loff_t			start,
200 	loff_t			end,
201 	int			datasync)
202 {
203 	struct inode		*inode = file->f_mapping->host;
204 	struct xfs_inode	*ip = XFS_I(inode);
205 	struct xfs_mount	*mp = ip->i_mount;
206 	int			error = 0;
207 	int			log_flushed = 0;
208 	xfs_lsn_t		lsn = 0;
209 
210 	trace_xfs_file_fsync(ip);
211 
212 	error = filemap_write_and_wait_range(inode->i_mapping, start, end);
213 	if (error)
214 		return error;
215 
216 	if (XFS_FORCED_SHUTDOWN(mp))
217 		return -EIO;
218 
219 	xfs_iflags_clear(ip, XFS_ITRUNCATED);
220 
221 	if (mp->m_flags & XFS_MOUNT_BARRIER) {
222 		/*
223 		 * If we have an RT and/or log subvolume we need to make sure
224 		 * to flush the write cache the device used for file data
225 		 * first.  This is to ensure newly written file data make
226 		 * it to disk before logging the new inode size in case of
227 		 * an extending write.
228 		 */
229 		if (XFS_IS_REALTIME_INODE(ip))
230 			xfs_blkdev_issue_flush(mp->m_rtdev_targp);
231 		else if (mp->m_logdev_targp != mp->m_ddev_targp)
232 			xfs_blkdev_issue_flush(mp->m_ddev_targp);
233 	}
234 
235 	/*
236 	 * All metadata updates are logged, which means that we just have
237 	 * to flush the log up to the latest LSN that touched the inode.
238 	 */
239 	xfs_ilock(ip, XFS_ILOCK_SHARED);
240 	if (xfs_ipincount(ip)) {
241 		if (!datasync ||
242 		    (ip->i_itemp->ili_fields & ~XFS_ILOG_TIMESTAMP))
243 			lsn = ip->i_itemp->ili_last_lsn;
244 	}
245 	xfs_iunlock(ip, XFS_ILOCK_SHARED);
246 
247 	if (lsn)
248 		error = _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, &log_flushed);
249 
250 	/*
251 	 * If we only have a single device, and the log force about was
252 	 * a no-op we might have to flush the data device cache here.
253 	 * This can only happen for fdatasync/O_DSYNC if we were overwriting
254 	 * an already allocated file and thus do not have any metadata to
255 	 * commit.
256 	 */
257 	if ((mp->m_flags & XFS_MOUNT_BARRIER) &&
258 	    mp->m_logdev_targp == mp->m_ddev_targp &&
259 	    !XFS_IS_REALTIME_INODE(ip) &&
260 	    !log_flushed)
261 		xfs_blkdev_issue_flush(mp->m_ddev_targp);
262 
263 	return error;
264 }
265 
266 STATIC ssize_t
267 xfs_file_read_iter(
268 	struct kiocb		*iocb,
269 	struct iov_iter		*to)
270 {
271 	struct file		*file = iocb->ki_filp;
272 	struct inode		*inode = file->f_mapping->host;
273 	struct xfs_inode	*ip = XFS_I(inode);
274 	struct xfs_mount	*mp = ip->i_mount;
275 	size_t			size = iov_iter_count(to);
276 	ssize_t			ret = 0;
277 	int			ioflags = 0;
278 	xfs_fsize_t		n;
279 	loff_t			pos = iocb->ki_pos;
280 
281 	XFS_STATS_INC(xs_read_calls);
282 
283 	if (unlikely(file->f_flags & O_DIRECT))
284 		ioflags |= XFS_IO_ISDIRECT;
285 	if (file->f_mode & FMODE_NOCMTIME)
286 		ioflags |= XFS_IO_INVIS;
287 
288 	if (unlikely(ioflags & XFS_IO_ISDIRECT)) {
289 		xfs_buftarg_t	*target =
290 			XFS_IS_REALTIME_INODE(ip) ?
291 				mp->m_rtdev_targp : mp->m_ddev_targp;
292 		/* DIO must be aligned to device logical sector size */
293 		if ((pos | size) & target->bt_logical_sectormask) {
294 			if (pos == i_size_read(inode))
295 				return 0;
296 			return -EINVAL;
297 		}
298 	}
299 
300 	n = mp->m_super->s_maxbytes - pos;
301 	if (n <= 0 || size == 0)
302 		return 0;
303 
304 	if (n < size)
305 		size = n;
306 
307 	if (XFS_FORCED_SHUTDOWN(mp))
308 		return -EIO;
309 
310 	/*
311 	 * Locking is a bit tricky here. If we take an exclusive lock
312 	 * for direct IO, we effectively serialise all new concurrent
313 	 * read IO to this file and block it behind IO that is currently in
314 	 * progress because IO in progress holds the IO lock shared. We only
315 	 * need to hold the lock exclusive to blow away the page cache, so
316 	 * only take lock exclusively if the page cache needs invalidation.
317 	 * This allows the normal direct IO case of no page cache pages to
318 	 * proceeed concurrently without serialisation.
319 	 */
320 	xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);
321 	if ((ioflags & XFS_IO_ISDIRECT) && inode->i_mapping->nrpages) {
322 		xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
323 		xfs_rw_ilock(ip, XFS_IOLOCK_EXCL);
324 
325 		if (inode->i_mapping->nrpages) {
326 			ret = filemap_write_and_wait_range(
327 							VFS_I(ip)->i_mapping,
328 							pos, pos + size - 1);
329 			if (ret) {
330 				xfs_rw_iunlock(ip, XFS_IOLOCK_EXCL);
331 				return ret;
332 			}
333 
334 			/*
335 			 * Invalidate whole pages. This can return an error if
336 			 * we fail to invalidate a page, but this should never
337 			 * happen on XFS. Warn if it does fail.
338 			 */
339 			ret = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping,
340 					pos >> PAGE_CACHE_SHIFT,
341 					(pos + size - 1) >> PAGE_CACHE_SHIFT);
342 			WARN_ON_ONCE(ret);
343 			ret = 0;
344 		}
345 		xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);
346 	}
347 
348 	trace_xfs_file_read(ip, size, pos, ioflags);
349 
350 	ret = generic_file_read_iter(iocb, to);
351 	if (ret > 0)
352 		XFS_STATS_ADD(xs_read_bytes, ret);
353 
354 	xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
355 	return ret;
356 }
357 
358 STATIC ssize_t
359 xfs_file_splice_read(
360 	struct file		*infilp,
361 	loff_t			*ppos,
362 	struct pipe_inode_info	*pipe,
363 	size_t			count,
364 	unsigned int		flags)
365 {
366 	struct xfs_inode	*ip = XFS_I(infilp->f_mapping->host);
367 	int			ioflags = 0;
368 	ssize_t			ret;
369 
370 	XFS_STATS_INC(xs_read_calls);
371 
372 	if (infilp->f_mode & FMODE_NOCMTIME)
373 		ioflags |= XFS_IO_INVIS;
374 
375 	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
376 		return -EIO;
377 
378 	xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);
379 
380 	trace_xfs_file_splice_read(ip, count, *ppos, ioflags);
381 
382 	ret = generic_file_splice_read(infilp, ppos, pipe, count, flags);
383 	if (ret > 0)
384 		XFS_STATS_ADD(xs_read_bytes, ret);
385 
386 	xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
387 	return ret;
388 }
389 
390 /*
391  * This routine is called to handle zeroing any space in the last block of the
392  * file that is beyond the EOF.  We do this since the size is being increased
393  * without writing anything to that block and we don't want to read the
394  * garbage on the disk.
395  */
396 STATIC int				/* error (positive) */
397 xfs_zero_last_block(
398 	struct xfs_inode	*ip,
399 	xfs_fsize_t		offset,
400 	xfs_fsize_t		isize,
401 	bool			*did_zeroing)
402 {
403 	struct xfs_mount	*mp = ip->i_mount;
404 	xfs_fileoff_t		last_fsb = XFS_B_TO_FSBT(mp, isize);
405 	int			zero_offset = XFS_B_FSB_OFFSET(mp, isize);
406 	int			zero_len;
407 	int			nimaps = 1;
408 	int			error = 0;
409 	struct xfs_bmbt_irec	imap;
410 
411 	xfs_ilock(ip, XFS_ILOCK_EXCL);
412 	error = xfs_bmapi_read(ip, last_fsb, 1, &imap, &nimaps, 0);
413 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
414 	if (error)
415 		return error;
416 
417 	ASSERT(nimaps > 0);
418 
419 	/*
420 	 * If the block underlying isize is just a hole, then there
421 	 * is nothing to zero.
422 	 */
423 	if (imap.br_startblock == HOLESTARTBLOCK)
424 		return 0;
425 
426 	zero_len = mp->m_sb.sb_blocksize - zero_offset;
427 	if (isize + zero_len > offset)
428 		zero_len = offset - isize;
429 	*did_zeroing = true;
430 	return xfs_iozero(ip, isize, zero_len);
431 }
432 
433 /*
434  * Zero any on disk space between the current EOF and the new, larger EOF.
435  *
436  * This handles the normal case of zeroing the remainder of the last block in
437  * the file and the unusual case of zeroing blocks out beyond the size of the
438  * file.  This second case only happens with fixed size extents and when the
439  * system crashes before the inode size was updated but after blocks were
440  * allocated.
441  *
442  * Expects the iolock to be held exclusive, and will take the ilock internally.
443  */
444 int					/* error (positive) */
445 xfs_zero_eof(
446 	struct xfs_inode	*ip,
447 	xfs_off_t		offset,		/* starting I/O offset */
448 	xfs_fsize_t		isize,		/* current inode size */
449 	bool			*did_zeroing)
450 {
451 	struct xfs_mount	*mp = ip->i_mount;
452 	xfs_fileoff_t		start_zero_fsb;
453 	xfs_fileoff_t		end_zero_fsb;
454 	xfs_fileoff_t		zero_count_fsb;
455 	xfs_fileoff_t		last_fsb;
456 	xfs_fileoff_t		zero_off;
457 	xfs_fsize_t		zero_len;
458 	int			nimaps;
459 	int			error = 0;
460 	struct xfs_bmbt_irec	imap;
461 
462 	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
463 	ASSERT(offset > isize);
464 
465 	/*
466 	 * First handle zeroing the block on which isize resides.
467 	 *
468 	 * We only zero a part of that block so it is handled specially.
469 	 */
470 	if (XFS_B_FSB_OFFSET(mp, isize) != 0) {
471 		error = xfs_zero_last_block(ip, offset, isize, did_zeroing);
472 		if (error)
473 			return error;
474 	}
475 
476 	/*
477 	 * Calculate the range between the new size and the old where blocks
478 	 * needing to be zeroed may exist.
479 	 *
480 	 * To get the block where the last byte in the file currently resides,
481 	 * we need to subtract one from the size and truncate back to a block
482 	 * boundary.  We subtract 1 in case the size is exactly on a block
483 	 * boundary.
484 	 */
485 	last_fsb = isize ? XFS_B_TO_FSBT(mp, isize - 1) : (xfs_fileoff_t)-1;
486 	start_zero_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize);
487 	end_zero_fsb = XFS_B_TO_FSBT(mp, offset - 1);
488 	ASSERT((xfs_sfiloff_t)last_fsb < (xfs_sfiloff_t)start_zero_fsb);
489 	if (last_fsb == end_zero_fsb) {
490 		/*
491 		 * The size was only incremented on its last block.
492 		 * We took care of that above, so just return.
493 		 */
494 		return 0;
495 	}
496 
497 	ASSERT(start_zero_fsb <= end_zero_fsb);
498 	while (start_zero_fsb <= end_zero_fsb) {
499 		nimaps = 1;
500 		zero_count_fsb = end_zero_fsb - start_zero_fsb + 1;
501 
502 		xfs_ilock(ip, XFS_ILOCK_EXCL);
503 		error = xfs_bmapi_read(ip, start_zero_fsb, zero_count_fsb,
504 					  &imap, &nimaps, 0);
505 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
506 		if (error)
507 			return error;
508 
509 		ASSERT(nimaps > 0);
510 
511 		if (imap.br_state == XFS_EXT_UNWRITTEN ||
512 		    imap.br_startblock == HOLESTARTBLOCK) {
513 			start_zero_fsb = imap.br_startoff + imap.br_blockcount;
514 			ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
515 			continue;
516 		}
517 
518 		/*
519 		 * There are blocks we need to zero.
520 		 */
521 		zero_off = XFS_FSB_TO_B(mp, start_zero_fsb);
522 		zero_len = XFS_FSB_TO_B(mp, imap.br_blockcount);
523 
524 		if ((zero_off + zero_len) > offset)
525 			zero_len = offset - zero_off;
526 
527 		error = xfs_iozero(ip, zero_off, zero_len);
528 		if (error)
529 			return error;
530 
531 		*did_zeroing = true;
532 		start_zero_fsb = imap.br_startoff + imap.br_blockcount;
533 		ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
534 	}
535 
536 	return 0;
537 }
538 
539 /*
540  * Common pre-write limit and setup checks.
541  *
542  * Called with the iolocked held either shared and exclusive according to
543  * @iolock, and returns with it held.  Might upgrade the iolock to exclusive
544  * if called for a direct write beyond i_size.
545  */
546 STATIC ssize_t
547 xfs_file_aio_write_checks(
548 	struct file		*file,
549 	loff_t			*pos,
550 	size_t			*count,
551 	int			*iolock)
552 {
553 	struct inode		*inode = file->f_mapping->host;
554 	struct xfs_inode	*ip = XFS_I(inode);
555 	int			error = 0;
556 
557 restart:
558 	error = generic_write_checks(file, pos, count, S_ISBLK(inode->i_mode));
559 	if (error)
560 		return error;
561 
562 	error = xfs_break_layouts(inode, iolock);
563 	if (error)
564 		return error;
565 
566 	/*
567 	 * If the offset is beyond the size of the file, we need to zero any
568 	 * blocks that fall between the existing EOF and the start of this
569 	 * write.  If zeroing is needed and we are currently holding the
570 	 * iolock shared, we need to update it to exclusive which implies
571 	 * having to redo all checks before.
572 	 */
573 	if (*pos > i_size_read(inode)) {
574 		bool	zero = false;
575 
576 		if (*iolock == XFS_IOLOCK_SHARED) {
577 			xfs_rw_iunlock(ip, *iolock);
578 			*iolock = XFS_IOLOCK_EXCL;
579 			xfs_rw_ilock(ip, *iolock);
580 			goto restart;
581 		}
582 		error = xfs_zero_eof(ip, *pos, i_size_read(inode), &zero);
583 		if (error)
584 			return error;
585 	}
586 
587 	/*
588 	 * Updating the timestamps will grab the ilock again from
589 	 * xfs_fs_dirty_inode, so we have to call it after dropping the
590 	 * lock above.  Eventually we should look into a way to avoid
591 	 * the pointless lock roundtrip.
592 	 */
593 	if (likely(!(file->f_mode & FMODE_NOCMTIME))) {
594 		error = file_update_time(file);
595 		if (error)
596 			return error;
597 	}
598 
599 	/*
600 	 * If we're writing the file then make sure to clear the setuid and
601 	 * setgid bits if the process is not being run by root.  This keeps
602 	 * people from modifying setuid and setgid binaries.
603 	 */
604 	return file_remove_suid(file);
605 }
606 
607 /*
608  * xfs_file_dio_aio_write - handle direct IO writes
609  *
610  * Lock the inode appropriately to prepare for and issue a direct IO write.
611  * By separating it from the buffered write path we remove all the tricky to
612  * follow locking changes and looping.
613  *
614  * If there are cached pages or we're extending the file, we need IOLOCK_EXCL
615  * until we're sure the bytes at the new EOF have been zeroed and/or the cached
616  * pages are flushed out.
617  *
618  * In most cases the direct IO writes will be done holding IOLOCK_SHARED
619  * allowing them to be done in parallel with reads and other direct IO writes.
620  * However, if the IO is not aligned to filesystem blocks, the direct IO layer
621  * needs to do sub-block zeroing and that requires serialisation against other
622  * direct IOs to the same block. In this case we need to serialise the
623  * submission of the unaligned IOs so that we don't get racing block zeroing in
624  * the dio layer.  To avoid the problem with aio, we also need to wait for
625  * outstanding IOs to complete so that unwritten extent conversion is completed
626  * before we try to map the overlapping block. This is currently implemented by
627  * hitting it with a big hammer (i.e. inode_dio_wait()).
628  *
629  * Returns with locks held indicated by @iolock and errors indicated by
630  * negative return values.
631  */
632 STATIC ssize_t
633 xfs_file_dio_aio_write(
634 	struct kiocb		*iocb,
635 	struct iov_iter		*from)
636 {
637 	struct file		*file = iocb->ki_filp;
638 	struct address_space	*mapping = file->f_mapping;
639 	struct inode		*inode = mapping->host;
640 	struct xfs_inode	*ip = XFS_I(inode);
641 	struct xfs_mount	*mp = ip->i_mount;
642 	ssize_t			ret = 0;
643 	int			unaligned_io = 0;
644 	int			iolock;
645 	size_t			count = iov_iter_count(from);
646 	loff_t			pos = iocb->ki_pos;
647 	struct xfs_buftarg	*target = XFS_IS_REALTIME_INODE(ip) ?
648 					mp->m_rtdev_targp : mp->m_ddev_targp;
649 
650 	/* DIO must be aligned to device logical sector size */
651 	if ((pos | count) & target->bt_logical_sectormask)
652 		return -EINVAL;
653 
654 	/* "unaligned" here means not aligned to a filesystem block */
655 	if ((pos & mp->m_blockmask) || ((pos + count) & mp->m_blockmask))
656 		unaligned_io = 1;
657 
658 	/*
659 	 * We don't need to take an exclusive lock unless there page cache needs
660 	 * to be invalidated or unaligned IO is being executed. We don't need to
661 	 * consider the EOF extension case here because
662 	 * xfs_file_aio_write_checks() will relock the inode as necessary for
663 	 * EOF zeroing cases and fill out the new inode size as appropriate.
664 	 */
665 	if (unaligned_io || mapping->nrpages)
666 		iolock = XFS_IOLOCK_EXCL;
667 	else
668 		iolock = XFS_IOLOCK_SHARED;
669 	xfs_rw_ilock(ip, iolock);
670 
671 	/*
672 	 * Recheck if there are cached pages that need invalidate after we got
673 	 * the iolock to protect against other threads adding new pages while
674 	 * we were waiting for the iolock.
675 	 */
676 	if (mapping->nrpages && iolock == XFS_IOLOCK_SHARED) {
677 		xfs_rw_iunlock(ip, iolock);
678 		iolock = XFS_IOLOCK_EXCL;
679 		xfs_rw_ilock(ip, iolock);
680 	}
681 
682 	ret = xfs_file_aio_write_checks(file, &pos, &count, &iolock);
683 	if (ret)
684 		goto out;
685 	iov_iter_truncate(from, count);
686 
687 	if (mapping->nrpages) {
688 		ret = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
689 						    pos, pos + count - 1);
690 		if (ret)
691 			goto out;
692 		/*
693 		 * Invalidate whole pages. This can return an error if
694 		 * we fail to invalidate a page, but this should never
695 		 * happen on XFS. Warn if it does fail.
696 		 */
697 		ret = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping,
698 					pos >> PAGE_CACHE_SHIFT,
699 					(pos + count - 1) >> PAGE_CACHE_SHIFT);
700 		WARN_ON_ONCE(ret);
701 		ret = 0;
702 	}
703 
704 	/*
705 	 * If we are doing unaligned IO, wait for all other IO to drain,
706 	 * otherwise demote the lock if we had to flush cached pages
707 	 */
708 	if (unaligned_io)
709 		inode_dio_wait(inode);
710 	else if (iolock == XFS_IOLOCK_EXCL) {
711 		xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);
712 		iolock = XFS_IOLOCK_SHARED;
713 	}
714 
715 	trace_xfs_file_direct_write(ip, count, iocb->ki_pos, 0);
716 	ret = generic_file_direct_write(iocb, from, pos);
717 
718 out:
719 	xfs_rw_iunlock(ip, iolock);
720 
721 	/* No fallback to buffered IO on errors for XFS. */
722 	ASSERT(ret < 0 || ret == count);
723 	return ret;
724 }
725 
726 STATIC ssize_t
727 xfs_file_buffered_aio_write(
728 	struct kiocb		*iocb,
729 	struct iov_iter		*from)
730 {
731 	struct file		*file = iocb->ki_filp;
732 	struct address_space	*mapping = file->f_mapping;
733 	struct inode		*inode = mapping->host;
734 	struct xfs_inode	*ip = XFS_I(inode);
735 	ssize_t			ret;
736 	int			enospc = 0;
737 	int			iolock = XFS_IOLOCK_EXCL;
738 	loff_t			pos = iocb->ki_pos;
739 	size_t			count = iov_iter_count(from);
740 
741 	xfs_rw_ilock(ip, iolock);
742 
743 	ret = xfs_file_aio_write_checks(file, &pos, &count, &iolock);
744 	if (ret)
745 		goto out;
746 
747 	iov_iter_truncate(from, count);
748 	/* We can write back this queue in page reclaim */
749 	current->backing_dev_info = inode_to_bdi(inode);
750 
751 write_retry:
752 	trace_xfs_file_buffered_write(ip, count, iocb->ki_pos, 0);
753 	ret = generic_perform_write(file, from, pos);
754 	if (likely(ret >= 0))
755 		iocb->ki_pos = pos + ret;
756 
757 	/*
758 	 * If we hit a space limit, try to free up some lingering preallocated
759 	 * space before returning an error. In the case of ENOSPC, first try to
760 	 * write back all dirty inodes to free up some of the excess reserved
761 	 * metadata space. This reduces the chances that the eofblocks scan
762 	 * waits on dirty mappings. Since xfs_flush_inodes() is serialized, this
763 	 * also behaves as a filter to prevent too many eofblocks scans from
764 	 * running at the same time.
765 	 */
766 	if (ret == -EDQUOT && !enospc) {
767 		enospc = xfs_inode_free_quota_eofblocks(ip);
768 		if (enospc)
769 			goto write_retry;
770 	} else if (ret == -ENOSPC && !enospc) {
771 		struct xfs_eofblocks eofb = {0};
772 
773 		enospc = 1;
774 		xfs_flush_inodes(ip->i_mount);
775 		eofb.eof_scan_owner = ip->i_ino; /* for locking */
776 		eofb.eof_flags = XFS_EOF_FLAGS_SYNC;
777 		xfs_icache_free_eofblocks(ip->i_mount, &eofb);
778 		goto write_retry;
779 	}
780 
781 	current->backing_dev_info = NULL;
782 out:
783 	xfs_rw_iunlock(ip, iolock);
784 	return ret;
785 }
786 
787 STATIC ssize_t
788 xfs_file_write_iter(
789 	struct kiocb		*iocb,
790 	struct iov_iter		*from)
791 {
792 	struct file		*file = iocb->ki_filp;
793 	struct address_space	*mapping = file->f_mapping;
794 	struct inode		*inode = mapping->host;
795 	struct xfs_inode	*ip = XFS_I(inode);
796 	ssize_t			ret;
797 	size_t			ocount = iov_iter_count(from);
798 
799 	XFS_STATS_INC(xs_write_calls);
800 
801 	if (ocount == 0)
802 		return 0;
803 
804 	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
805 		return -EIO;
806 
807 	if (unlikely(file->f_flags & O_DIRECT))
808 		ret = xfs_file_dio_aio_write(iocb, from);
809 	else
810 		ret = xfs_file_buffered_aio_write(iocb, from);
811 
812 	if (ret > 0) {
813 		ssize_t err;
814 
815 		XFS_STATS_ADD(xs_write_bytes, ret);
816 
817 		/* Handle various SYNC-type writes */
818 		err = generic_write_sync(file, iocb->ki_pos - ret, ret);
819 		if (err < 0)
820 			ret = err;
821 	}
822 	return ret;
823 }
824 
825 STATIC long
826 xfs_file_fallocate(
827 	struct file		*file,
828 	int			mode,
829 	loff_t			offset,
830 	loff_t			len)
831 {
832 	struct inode		*inode = file_inode(file);
833 	struct xfs_inode	*ip = XFS_I(inode);
834 	long			error;
835 	enum xfs_prealloc_flags	flags = 0;
836 	uint			iolock = XFS_IOLOCK_EXCL;
837 	loff_t			new_size = 0;
838 
839 	if (!S_ISREG(inode->i_mode))
840 		return -EINVAL;
841 	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
842 		     FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE))
843 		return -EOPNOTSUPP;
844 
845 	xfs_ilock(ip, iolock);
846 	error = xfs_break_layouts(inode, &iolock);
847 	if (error)
848 		goto out_unlock;
849 
850 	if (mode & FALLOC_FL_PUNCH_HOLE) {
851 		error = xfs_free_file_space(ip, offset, len);
852 		if (error)
853 			goto out_unlock;
854 	} else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
855 		unsigned blksize_mask = (1 << inode->i_blkbits) - 1;
856 
857 		if (offset & blksize_mask || len & blksize_mask) {
858 			error = -EINVAL;
859 			goto out_unlock;
860 		}
861 
862 		/*
863 		 * There is no need to overlap collapse range with EOF,
864 		 * in which case it is effectively a truncate operation
865 		 */
866 		if (offset + len >= i_size_read(inode)) {
867 			error = -EINVAL;
868 			goto out_unlock;
869 		}
870 
871 		new_size = i_size_read(inode) - len;
872 
873 		error = xfs_collapse_file_space(ip, offset, len);
874 		if (error)
875 			goto out_unlock;
876 	} else {
877 		flags |= XFS_PREALLOC_SET;
878 
879 		if (!(mode & FALLOC_FL_KEEP_SIZE) &&
880 		    offset + len > i_size_read(inode)) {
881 			new_size = offset + len;
882 			error = inode_newsize_ok(inode, new_size);
883 			if (error)
884 				goto out_unlock;
885 		}
886 
887 		if (mode & FALLOC_FL_ZERO_RANGE)
888 			error = xfs_zero_file_space(ip, offset, len);
889 		else
890 			error = xfs_alloc_file_space(ip, offset, len,
891 						     XFS_BMAPI_PREALLOC);
892 		if (error)
893 			goto out_unlock;
894 	}
895 
896 	if (file->f_flags & O_DSYNC)
897 		flags |= XFS_PREALLOC_SYNC;
898 
899 	error = xfs_update_prealloc_flags(ip, flags);
900 	if (error)
901 		goto out_unlock;
902 
903 	/* Change file size if needed */
904 	if (new_size) {
905 		struct iattr iattr;
906 
907 		iattr.ia_valid = ATTR_SIZE;
908 		iattr.ia_size = new_size;
909 		error = xfs_setattr_size(ip, &iattr);
910 	}
911 
912 out_unlock:
913 	xfs_iunlock(ip, iolock);
914 	return error;
915 }
916 
917 
918 STATIC int
919 xfs_file_open(
920 	struct inode	*inode,
921 	struct file	*file)
922 {
923 	if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS)
924 		return -EFBIG;
925 	if (XFS_FORCED_SHUTDOWN(XFS_M(inode->i_sb)))
926 		return -EIO;
927 	return 0;
928 }
929 
930 STATIC int
931 xfs_dir_open(
932 	struct inode	*inode,
933 	struct file	*file)
934 {
935 	struct xfs_inode *ip = XFS_I(inode);
936 	int		mode;
937 	int		error;
938 
939 	error = xfs_file_open(inode, file);
940 	if (error)
941 		return error;
942 
943 	/*
944 	 * If there are any blocks, read-ahead block 0 as we're almost
945 	 * certain to have the next operation be a read there.
946 	 */
947 	mode = xfs_ilock_data_map_shared(ip);
948 	if (ip->i_d.di_nextents > 0)
949 		xfs_dir3_data_readahead(ip, 0, -1);
950 	xfs_iunlock(ip, mode);
951 	return 0;
952 }
953 
954 STATIC int
955 xfs_file_release(
956 	struct inode	*inode,
957 	struct file	*filp)
958 {
959 	return xfs_release(XFS_I(inode));
960 }
961 
962 STATIC int
963 xfs_file_readdir(
964 	struct file	*file,
965 	struct dir_context *ctx)
966 {
967 	struct inode	*inode = file_inode(file);
968 	xfs_inode_t	*ip = XFS_I(inode);
969 	size_t		bufsize;
970 
971 	/*
972 	 * The Linux API doesn't pass down the total size of the buffer
973 	 * we read into down to the filesystem.  With the filldir concept
974 	 * it's not needed for correct information, but the XFS dir2 leaf
975 	 * code wants an estimate of the buffer size to calculate it's
976 	 * readahead window and size the buffers used for mapping to
977 	 * physical blocks.
978 	 *
979 	 * Try to give it an estimate that's good enough, maybe at some
980 	 * point we can change the ->readdir prototype to include the
981 	 * buffer size.  For now we use the current glibc buffer size.
982 	 */
983 	bufsize = (size_t)min_t(loff_t, 32768, ip->i_d.di_size);
984 
985 	return xfs_readdir(ip, ctx, bufsize);
986 }
987 
988 STATIC int
989 xfs_file_mmap(
990 	struct file	*filp,
991 	struct vm_area_struct *vma)
992 {
993 	vma->vm_ops = &xfs_file_vm_ops;
994 
995 	file_accessed(filp);
996 	return 0;
997 }
998 
999 /*
1000  * mmap()d file has taken write protection fault and is being made
1001  * writable. We can set the page state up correctly for a writable
1002  * page, which means we can do correct delalloc accounting (ENOSPC
1003  * checking!) and unwritten extent mapping.
1004  */
1005 STATIC int
1006 xfs_vm_page_mkwrite(
1007 	struct vm_area_struct	*vma,
1008 	struct vm_fault		*vmf)
1009 {
1010 	return block_page_mkwrite(vma, vmf, xfs_get_blocks);
1011 }
1012 
1013 /*
1014  * This type is designed to indicate the type of offset we would like
1015  * to search from page cache for xfs_seek_hole_data().
1016  */
1017 enum {
1018 	HOLE_OFF = 0,
1019 	DATA_OFF,
1020 };
1021 
1022 /*
1023  * Lookup the desired type of offset from the given page.
1024  *
1025  * On success, return true and the offset argument will point to the
1026  * start of the region that was found.  Otherwise this function will
1027  * return false and keep the offset argument unchanged.
1028  */
1029 STATIC bool
1030 xfs_lookup_buffer_offset(
1031 	struct page		*page,
1032 	loff_t			*offset,
1033 	unsigned int		type)
1034 {
1035 	loff_t			lastoff = page_offset(page);
1036 	bool			found = false;
1037 	struct buffer_head	*bh, *head;
1038 
1039 	bh = head = page_buffers(page);
1040 	do {
1041 		/*
1042 		 * Unwritten extents that have data in the page
1043 		 * cache covering them can be identified by the
1044 		 * BH_Unwritten state flag.  Pages with multiple
1045 		 * buffers might have a mix of holes, data and
1046 		 * unwritten extents - any buffer with valid
1047 		 * data in it should have BH_Uptodate flag set
1048 		 * on it.
1049 		 */
1050 		if (buffer_unwritten(bh) ||
1051 		    buffer_uptodate(bh)) {
1052 			if (type == DATA_OFF)
1053 				found = true;
1054 		} else {
1055 			if (type == HOLE_OFF)
1056 				found = true;
1057 		}
1058 
1059 		if (found) {
1060 			*offset = lastoff;
1061 			break;
1062 		}
1063 		lastoff += bh->b_size;
1064 	} while ((bh = bh->b_this_page) != head);
1065 
1066 	return found;
1067 }
1068 
1069 /*
1070  * This routine is called to find out and return a data or hole offset
1071  * from the page cache for unwritten extents according to the desired
1072  * type for xfs_seek_hole_data().
1073  *
1074  * The argument offset is used to tell where we start to search from the
1075  * page cache.  Map is used to figure out the end points of the range to
1076  * lookup pages.
1077  *
1078  * Return true if the desired type of offset was found, and the argument
1079  * offset is filled with that address.  Otherwise, return false and keep
1080  * offset unchanged.
1081  */
1082 STATIC bool
1083 xfs_find_get_desired_pgoff(
1084 	struct inode		*inode,
1085 	struct xfs_bmbt_irec	*map,
1086 	unsigned int		type,
1087 	loff_t			*offset)
1088 {
1089 	struct xfs_inode	*ip = XFS_I(inode);
1090 	struct xfs_mount	*mp = ip->i_mount;
1091 	struct pagevec		pvec;
1092 	pgoff_t			index;
1093 	pgoff_t			end;
1094 	loff_t			endoff;
1095 	loff_t			startoff = *offset;
1096 	loff_t			lastoff = startoff;
1097 	bool			found = false;
1098 
1099 	pagevec_init(&pvec, 0);
1100 
1101 	index = startoff >> PAGE_CACHE_SHIFT;
1102 	endoff = XFS_FSB_TO_B(mp, map->br_startoff + map->br_blockcount);
1103 	end = endoff >> PAGE_CACHE_SHIFT;
1104 	do {
1105 		int		want;
1106 		unsigned	nr_pages;
1107 		unsigned int	i;
1108 
1109 		want = min_t(pgoff_t, end - index, PAGEVEC_SIZE);
1110 		nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
1111 					  want);
1112 		/*
1113 		 * No page mapped into given range.  If we are searching holes
1114 		 * and if this is the first time we got into the loop, it means
1115 		 * that the given offset is landed in a hole, return it.
1116 		 *
1117 		 * If we have already stepped through some block buffers to find
1118 		 * holes but they all contains data.  In this case, the last
1119 		 * offset is already updated and pointed to the end of the last
1120 		 * mapped page, if it does not reach the endpoint to search,
1121 		 * that means there should be a hole between them.
1122 		 */
1123 		if (nr_pages == 0) {
1124 			/* Data search found nothing */
1125 			if (type == DATA_OFF)
1126 				break;
1127 
1128 			ASSERT(type == HOLE_OFF);
1129 			if (lastoff == startoff || lastoff < endoff) {
1130 				found = true;
1131 				*offset = lastoff;
1132 			}
1133 			break;
1134 		}
1135 
1136 		/*
1137 		 * At lease we found one page.  If this is the first time we
1138 		 * step into the loop, and if the first page index offset is
1139 		 * greater than the given search offset, a hole was found.
1140 		 */
1141 		if (type == HOLE_OFF && lastoff == startoff &&
1142 		    lastoff < page_offset(pvec.pages[0])) {
1143 			found = true;
1144 			break;
1145 		}
1146 
1147 		for (i = 0; i < nr_pages; i++) {
1148 			struct page	*page = pvec.pages[i];
1149 			loff_t		b_offset;
1150 
1151 			/*
1152 			 * At this point, the page may be truncated or
1153 			 * invalidated (changing page->mapping to NULL),
1154 			 * or even swizzled back from swapper_space to tmpfs
1155 			 * file mapping. However, page->index will not change
1156 			 * because we have a reference on the page.
1157 			 *
1158 			 * Searching done if the page index is out of range.
1159 			 * If the current offset is not reaches the end of
1160 			 * the specified search range, there should be a hole
1161 			 * between them.
1162 			 */
1163 			if (page->index > end) {
1164 				if (type == HOLE_OFF && lastoff < endoff) {
1165 					*offset = lastoff;
1166 					found = true;
1167 				}
1168 				goto out;
1169 			}
1170 
1171 			lock_page(page);
1172 			/*
1173 			 * Page truncated or invalidated(page->mapping == NULL).
1174 			 * We can freely skip it and proceed to check the next
1175 			 * page.
1176 			 */
1177 			if (unlikely(page->mapping != inode->i_mapping)) {
1178 				unlock_page(page);
1179 				continue;
1180 			}
1181 
1182 			if (!page_has_buffers(page)) {
1183 				unlock_page(page);
1184 				continue;
1185 			}
1186 
1187 			found = xfs_lookup_buffer_offset(page, &b_offset, type);
1188 			if (found) {
1189 				/*
1190 				 * The found offset may be less than the start
1191 				 * point to search if this is the first time to
1192 				 * come here.
1193 				 */
1194 				*offset = max_t(loff_t, startoff, b_offset);
1195 				unlock_page(page);
1196 				goto out;
1197 			}
1198 
1199 			/*
1200 			 * We either searching data but nothing was found, or
1201 			 * searching hole but found a data buffer.  In either
1202 			 * case, probably the next page contains the desired
1203 			 * things, update the last offset to it so.
1204 			 */
1205 			lastoff = page_offset(page) + PAGE_SIZE;
1206 			unlock_page(page);
1207 		}
1208 
1209 		/*
1210 		 * The number of returned pages less than our desired, search
1211 		 * done.  In this case, nothing was found for searching data,
1212 		 * but we found a hole behind the last offset.
1213 		 */
1214 		if (nr_pages < want) {
1215 			if (type == HOLE_OFF) {
1216 				*offset = lastoff;
1217 				found = true;
1218 			}
1219 			break;
1220 		}
1221 
1222 		index = pvec.pages[i - 1]->index + 1;
1223 		pagevec_release(&pvec);
1224 	} while (index <= end);
1225 
1226 out:
1227 	pagevec_release(&pvec);
1228 	return found;
1229 }
1230 
1231 STATIC loff_t
1232 xfs_seek_hole_data(
1233 	struct file		*file,
1234 	loff_t			start,
1235 	int			whence)
1236 {
1237 	struct inode		*inode = file->f_mapping->host;
1238 	struct xfs_inode	*ip = XFS_I(inode);
1239 	struct xfs_mount	*mp = ip->i_mount;
1240 	loff_t			uninitialized_var(offset);
1241 	xfs_fsize_t		isize;
1242 	xfs_fileoff_t		fsbno;
1243 	xfs_filblks_t		end;
1244 	uint			lock;
1245 	int			error;
1246 
1247 	if (XFS_FORCED_SHUTDOWN(mp))
1248 		return -EIO;
1249 
1250 	lock = xfs_ilock_data_map_shared(ip);
1251 
1252 	isize = i_size_read(inode);
1253 	if (start >= isize) {
1254 		error = -ENXIO;
1255 		goto out_unlock;
1256 	}
1257 
1258 	/*
1259 	 * Try to read extents from the first block indicated
1260 	 * by fsbno to the end block of the file.
1261 	 */
1262 	fsbno = XFS_B_TO_FSBT(mp, start);
1263 	end = XFS_B_TO_FSB(mp, isize);
1264 
1265 	for (;;) {
1266 		struct xfs_bmbt_irec	map[2];
1267 		int			nmap = 2;
1268 		unsigned int		i;
1269 
1270 		error = xfs_bmapi_read(ip, fsbno, end - fsbno, map, &nmap,
1271 				       XFS_BMAPI_ENTIRE);
1272 		if (error)
1273 			goto out_unlock;
1274 
1275 		/* No extents at given offset, must be beyond EOF */
1276 		if (nmap == 0) {
1277 			error = -ENXIO;
1278 			goto out_unlock;
1279 		}
1280 
1281 		for (i = 0; i < nmap; i++) {
1282 			offset = max_t(loff_t, start,
1283 				       XFS_FSB_TO_B(mp, map[i].br_startoff));
1284 
1285 			/* Landed in the hole we wanted? */
1286 			if (whence == SEEK_HOLE &&
1287 			    map[i].br_startblock == HOLESTARTBLOCK)
1288 				goto out;
1289 
1290 			/* Landed in the data extent we wanted? */
1291 			if (whence == SEEK_DATA &&
1292 			    (map[i].br_startblock == DELAYSTARTBLOCK ||
1293 			     (map[i].br_state == XFS_EXT_NORM &&
1294 			      !isnullstartblock(map[i].br_startblock))))
1295 				goto out;
1296 
1297 			/*
1298 			 * Landed in an unwritten extent, try to search
1299 			 * for hole or data from page cache.
1300 			 */
1301 			if (map[i].br_state == XFS_EXT_UNWRITTEN) {
1302 				if (xfs_find_get_desired_pgoff(inode, &map[i],
1303 				      whence == SEEK_HOLE ? HOLE_OFF : DATA_OFF,
1304 							&offset))
1305 					goto out;
1306 			}
1307 		}
1308 
1309 		/*
1310 		 * We only received one extent out of the two requested. This
1311 		 * means we've hit EOF and didn't find what we are looking for.
1312 		 */
1313 		if (nmap == 1) {
1314 			/*
1315 			 * If we were looking for a hole, set offset to
1316 			 * the end of the file (i.e., there is an implicit
1317 			 * hole at the end of any file).
1318 		 	 */
1319 			if (whence == SEEK_HOLE) {
1320 				offset = isize;
1321 				break;
1322 			}
1323 			/*
1324 			 * If we were looking for data, it's nowhere to be found
1325 			 */
1326 			ASSERT(whence == SEEK_DATA);
1327 			error = -ENXIO;
1328 			goto out_unlock;
1329 		}
1330 
1331 		ASSERT(i > 1);
1332 
1333 		/*
1334 		 * Nothing was found, proceed to the next round of search
1335 		 * if the next reading offset is not at or beyond EOF.
1336 		 */
1337 		fsbno = map[i - 1].br_startoff + map[i - 1].br_blockcount;
1338 		start = XFS_FSB_TO_B(mp, fsbno);
1339 		if (start >= isize) {
1340 			if (whence == SEEK_HOLE) {
1341 				offset = isize;
1342 				break;
1343 			}
1344 			ASSERT(whence == SEEK_DATA);
1345 			error = -ENXIO;
1346 			goto out_unlock;
1347 		}
1348 	}
1349 
1350 out:
1351 	/*
1352 	 * If at this point we have found the hole we wanted, the returned
1353 	 * offset may be bigger than the file size as it may be aligned to
1354 	 * page boundary for unwritten extents.  We need to deal with this
1355 	 * situation in particular.
1356 	 */
1357 	if (whence == SEEK_HOLE)
1358 		offset = min_t(loff_t, offset, isize);
1359 	offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
1360 
1361 out_unlock:
1362 	xfs_iunlock(ip, lock);
1363 
1364 	if (error)
1365 		return error;
1366 	return offset;
1367 }
1368 
1369 STATIC loff_t
1370 xfs_file_llseek(
1371 	struct file	*file,
1372 	loff_t		offset,
1373 	int		whence)
1374 {
1375 	switch (whence) {
1376 	case SEEK_END:
1377 	case SEEK_CUR:
1378 	case SEEK_SET:
1379 		return generic_file_llseek(file, offset, whence);
1380 	case SEEK_HOLE:
1381 	case SEEK_DATA:
1382 		return xfs_seek_hole_data(file, offset, whence);
1383 	default:
1384 		return -EINVAL;
1385 	}
1386 }
1387 
1388 const struct file_operations xfs_file_operations = {
1389 	.llseek		= xfs_file_llseek,
1390 	.read		= new_sync_read,
1391 	.write		= new_sync_write,
1392 	.read_iter	= xfs_file_read_iter,
1393 	.write_iter	= xfs_file_write_iter,
1394 	.splice_read	= xfs_file_splice_read,
1395 	.splice_write	= iter_file_splice_write,
1396 	.unlocked_ioctl	= xfs_file_ioctl,
1397 #ifdef CONFIG_COMPAT
1398 	.compat_ioctl	= xfs_file_compat_ioctl,
1399 #endif
1400 	.mmap		= xfs_file_mmap,
1401 	.open		= xfs_file_open,
1402 	.release	= xfs_file_release,
1403 	.fsync		= xfs_file_fsync,
1404 	.fallocate	= xfs_file_fallocate,
1405 };
1406 
1407 const struct file_operations xfs_dir_file_operations = {
1408 	.open		= xfs_dir_open,
1409 	.read		= generic_read_dir,
1410 	.iterate	= xfs_file_readdir,
1411 	.llseek		= generic_file_llseek,
1412 	.unlocked_ioctl	= xfs_file_ioctl,
1413 #ifdef CONFIG_COMPAT
1414 	.compat_ioctl	= xfs_file_compat_ioctl,
1415 #endif
1416 	.fsync		= xfs_dir_fsync,
1417 };
1418 
1419 static const struct vm_operations_struct xfs_file_vm_ops = {
1420 	.fault		= filemap_fault,
1421 	.map_pages	= filemap_map_pages,
1422 	.page_mkwrite	= xfs_vm_page_mkwrite,
1423 };
1424