xref: /openbmc/linux/fs/ext4/file.c (revision c56f610f)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/fs/ext4/file.c
4  *
5  * Copyright (C) 1992, 1993, 1994, 1995
6  * Remy Card (card@masi.ibp.fr)
7  * Laboratoire MASI - Institut Blaise Pascal
8  * Universite Pierre et Marie Curie (Paris VI)
9  *
10  *  from
11  *
12  *  linux/fs/minix/file.c
13  *
14  *  Copyright (C) 1991, 1992  Linus Torvalds
15  *
16  *  ext4 fs regular file handling primitives
17  *
18  *  64-bit file support on 64-bit platforms by Jakub Jelinek
19  *	(jj@sunsite.ms.mff.cuni.cz)
20  */
21 
22 #include <linux/time.h>
23 #include <linux/fs.h>
24 #include <linux/iomap.h>
25 #include <linux/mount.h>
26 #include <linux/path.h>
27 #include <linux/dax.h>
28 #include <linux/quotaops.h>
29 #include <linux/pagevec.h>
30 #include <linux/uio.h>
31 #include <linux/mman.h>
32 #include <linux/backing-dev.h>
33 #include "ext4.h"
34 #include "ext4_jbd2.h"
35 #include "xattr.h"
36 #include "acl.h"
37 #include "truncate.h"
38 
39 /*
40  * Returns %true if the given DIO request should be attempted with DIO, or
41  * %false if it should fall back to buffered I/O.
42  *
43  * DIO isn't well specified; when it's unsupported (either due to the request
44  * being misaligned, or due to the file not supporting DIO at all), filesystems
45  * either fall back to buffered I/O or return EINVAL.  For files that don't use
46  * any special features like encryption or verity, ext4 has traditionally
47  * returned EINVAL for misaligned DIO.  iomap_dio_rw() uses this convention too.
48  * In this case, we should attempt the DIO, *not* fall back to buffered I/O.
49  *
50  * In contrast, in cases where DIO is unsupported due to ext4 features, ext4
51  * traditionally falls back to buffered I/O.
52  *
53  * This function implements the traditional ext4 behavior in all these cases.
54  */
55 static bool ext4_should_use_dio(struct kiocb *iocb, struct iov_iter *iter)
56 {
57 	struct inode *inode = file_inode(iocb->ki_filp);
58 	u32 dio_align = ext4_dio_alignment(inode);
59 
60 	if (dio_align == 0)
61 		return false;
62 
63 	if (dio_align == 1)
64 		return true;
65 
66 	return IS_ALIGNED(iocb->ki_pos | iov_iter_alignment(iter), dio_align);
67 }
68 
69 static ssize_t ext4_dio_read_iter(struct kiocb *iocb, struct iov_iter *to)
70 {
71 	ssize_t ret;
72 	struct inode *inode = file_inode(iocb->ki_filp);
73 
74 	if (iocb->ki_flags & IOCB_NOWAIT) {
75 		if (!inode_trylock_shared(inode))
76 			return -EAGAIN;
77 	} else {
78 		inode_lock_shared(inode);
79 	}
80 
81 	if (!ext4_should_use_dio(iocb, to)) {
82 		inode_unlock_shared(inode);
83 		/*
84 		 * Fallback to buffered I/O if the operation being performed on
85 		 * the inode is not supported by direct I/O. The IOCB_DIRECT
86 		 * flag needs to be cleared here in order to ensure that the
87 		 * direct I/O path within generic_file_read_iter() is not
88 		 * taken.
89 		 */
90 		iocb->ki_flags &= ~IOCB_DIRECT;
91 		return generic_file_read_iter(iocb, to);
92 	}
93 
94 	ret = iomap_dio_rw(iocb, to, &ext4_iomap_ops, NULL, 0, NULL, 0);
95 	inode_unlock_shared(inode);
96 
97 	file_accessed(iocb->ki_filp);
98 	return ret;
99 }
100 
101 #ifdef CONFIG_FS_DAX
102 static ssize_t ext4_dax_read_iter(struct kiocb *iocb, struct iov_iter *to)
103 {
104 	struct inode *inode = file_inode(iocb->ki_filp);
105 	ssize_t ret;
106 
107 	if (iocb->ki_flags & IOCB_NOWAIT) {
108 		if (!inode_trylock_shared(inode))
109 			return -EAGAIN;
110 	} else {
111 		inode_lock_shared(inode);
112 	}
113 	/*
114 	 * Recheck under inode lock - at this point we are sure it cannot
115 	 * change anymore
116 	 */
117 	if (!IS_DAX(inode)) {
118 		inode_unlock_shared(inode);
119 		/* Fallback to buffered IO in case we cannot support DAX */
120 		return generic_file_read_iter(iocb, to);
121 	}
122 	ret = dax_iomap_rw(iocb, to, &ext4_iomap_ops);
123 	inode_unlock_shared(inode);
124 
125 	file_accessed(iocb->ki_filp);
126 	return ret;
127 }
128 #endif
129 
130 static ssize_t ext4_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
131 {
132 	struct inode *inode = file_inode(iocb->ki_filp);
133 
134 	if (unlikely(ext4_forced_shutdown(inode->i_sb)))
135 		return -EIO;
136 
137 	if (!iov_iter_count(to))
138 		return 0; /* skip atime */
139 
140 #ifdef CONFIG_FS_DAX
141 	if (IS_DAX(inode))
142 		return ext4_dax_read_iter(iocb, to);
143 #endif
144 	if (iocb->ki_flags & IOCB_DIRECT)
145 		return ext4_dio_read_iter(iocb, to);
146 
147 	return generic_file_read_iter(iocb, to);
148 }
149 
150 static ssize_t ext4_file_splice_read(struct file *in, loff_t *ppos,
151 				     struct pipe_inode_info *pipe,
152 				     size_t len, unsigned int flags)
153 {
154 	struct inode *inode = file_inode(in);
155 
156 	if (unlikely(ext4_forced_shutdown(inode->i_sb)))
157 		return -EIO;
158 	return filemap_splice_read(in, ppos, pipe, len, flags);
159 }
160 
161 /*
162  * Called when an inode is released. Note that this is different
163  * from ext4_file_open: open gets called at every open, but release
164  * gets called only when /all/ the files are closed.
165  */
166 static int ext4_release_file(struct inode *inode, struct file *filp)
167 {
168 	if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) {
169 		ext4_alloc_da_blocks(inode);
170 		ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
171 	}
172 	/* if we are the last writer on the inode, drop the block reservation */
173 	if ((filp->f_mode & FMODE_WRITE) &&
174 			(atomic_read(&inode->i_writecount) == 1) &&
175 			!EXT4_I(inode)->i_reserved_data_blocks) {
176 		down_write(&EXT4_I(inode)->i_data_sem);
177 		ext4_discard_preallocations(inode, 0);
178 		up_write(&EXT4_I(inode)->i_data_sem);
179 	}
180 	if (is_dx(inode) && filp->private_data)
181 		ext4_htree_free_dir_info(filp->private_data);
182 
183 	return 0;
184 }
185 
186 /*
187  * This tests whether the IO in question is block-aligned or not.
188  * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they
189  * are converted to written only after the IO is complete.  Until they are
190  * mapped, these blocks appear as holes, so dio_zero_block() will assume that
191  * it needs to zero out portions of the start and/or end block.  If 2 AIO
192  * threads are at work on the same unwritten block, they must be synchronized
193  * or one thread will zero the other's data, causing corruption.
194  */
195 static bool
196 ext4_unaligned_io(struct inode *inode, struct iov_iter *from, loff_t pos)
197 {
198 	struct super_block *sb = inode->i_sb;
199 	unsigned long blockmask = sb->s_blocksize - 1;
200 
201 	if ((pos | iov_iter_alignment(from)) & blockmask)
202 		return true;
203 
204 	return false;
205 }
206 
207 static bool
208 ext4_extending_io(struct inode *inode, loff_t offset, size_t len)
209 {
210 	if (offset + len > i_size_read(inode) ||
211 	    offset + len > EXT4_I(inode)->i_disksize)
212 		return true;
213 	return false;
214 }
215 
216 /* Is IO overwriting allocated or initialized blocks? */
217 static bool ext4_overwrite_io(struct inode *inode,
218 			      loff_t pos, loff_t len, bool *unwritten)
219 {
220 	struct ext4_map_blocks map;
221 	unsigned int blkbits = inode->i_blkbits;
222 	int err, blklen;
223 
224 	if (pos + len > i_size_read(inode))
225 		return false;
226 
227 	map.m_lblk = pos >> blkbits;
228 	map.m_len = EXT4_MAX_BLOCKS(len, pos, blkbits);
229 	blklen = map.m_len;
230 
231 	err = ext4_map_blocks(NULL, inode, &map, 0);
232 	if (err != blklen)
233 		return false;
234 	/*
235 	 * 'err==len' means that all of the blocks have been preallocated,
236 	 * regardless of whether they have been initialized or not. We need to
237 	 * check m_flags to distinguish the unwritten extents.
238 	 */
239 	*unwritten = !(map.m_flags & EXT4_MAP_MAPPED);
240 	return true;
241 }
242 
243 static ssize_t ext4_generic_write_checks(struct kiocb *iocb,
244 					 struct iov_iter *from)
245 {
246 	struct inode *inode = file_inode(iocb->ki_filp);
247 	ssize_t ret;
248 
249 	if (unlikely(IS_IMMUTABLE(inode)))
250 		return -EPERM;
251 
252 	ret = generic_write_checks(iocb, from);
253 	if (ret <= 0)
254 		return ret;
255 
256 	/*
257 	 * If we have encountered a bitmap-format file, the size limit
258 	 * is smaller than s_maxbytes, which is for extent-mapped files.
259 	 */
260 	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
261 		struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
262 
263 		if (iocb->ki_pos >= sbi->s_bitmap_maxbytes)
264 			return -EFBIG;
265 		iov_iter_truncate(from, sbi->s_bitmap_maxbytes - iocb->ki_pos);
266 	}
267 
268 	return iov_iter_count(from);
269 }
270 
271 static ssize_t ext4_write_checks(struct kiocb *iocb, struct iov_iter *from)
272 {
273 	ssize_t ret, count;
274 
275 	count = ext4_generic_write_checks(iocb, from);
276 	if (count <= 0)
277 		return count;
278 
279 	ret = file_modified(iocb->ki_filp);
280 	if (ret)
281 		return ret;
282 	return count;
283 }
284 
285 static ssize_t ext4_buffered_write_iter(struct kiocb *iocb,
286 					struct iov_iter *from)
287 {
288 	ssize_t ret;
289 	struct inode *inode = file_inode(iocb->ki_filp);
290 
291 	if (iocb->ki_flags & IOCB_NOWAIT)
292 		return -EOPNOTSUPP;
293 
294 	inode_lock(inode);
295 	ret = ext4_write_checks(iocb, from);
296 	if (ret <= 0)
297 		goto out;
298 
299 	ret = generic_perform_write(iocb, from);
300 
301 out:
302 	inode_unlock(inode);
303 	if (unlikely(ret <= 0))
304 		return ret;
305 	return generic_write_sync(iocb, ret);
306 }
307 
308 static ssize_t ext4_handle_inode_extension(struct inode *inode, loff_t offset,
309 					   ssize_t count)
310 {
311 	handle_t *handle;
312 
313 	lockdep_assert_held_write(&inode->i_rwsem);
314 	handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
315 	if (IS_ERR(handle))
316 		return PTR_ERR(handle);
317 
318 	if (ext4_update_inode_size(inode, offset + count)) {
319 		int ret = ext4_mark_inode_dirty(handle, inode);
320 		if (unlikely(ret)) {
321 			ext4_journal_stop(handle);
322 			return ret;
323 		}
324 	}
325 
326 	if (inode->i_nlink)
327 		ext4_orphan_del(handle, inode);
328 	ext4_journal_stop(handle);
329 
330 	return count;
331 }
332 
333 /*
334  * Clean up the inode after DIO or DAX extending write has completed and the
335  * inode size has been updated using ext4_handle_inode_extension().
336  */
337 static void ext4_inode_extension_cleanup(struct inode *inode, ssize_t count)
338 {
339 	lockdep_assert_held_write(&inode->i_rwsem);
340 	if (count < 0) {
341 		ext4_truncate_failed_write(inode);
342 		/*
343 		 * If the truncate operation failed early, then the inode may
344 		 * still be on the orphan list. In that case, we need to try
345 		 * remove the inode from the in-memory linked list.
346 		 */
347 		if (inode->i_nlink)
348 			ext4_orphan_del(NULL, inode);
349 		return;
350 	}
351 	/*
352 	 * If i_disksize got extended either due to writeback of delalloc
353 	 * blocks or extending truncate while the DIO was running we could fail
354 	 * to cleanup the orphan list in ext4_handle_inode_extension(). Do it
355 	 * now.
356 	 */
357 	if (!list_empty(&EXT4_I(inode)->i_orphan) && inode->i_nlink) {
358 		handle_t *handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
359 
360 		if (IS_ERR(handle)) {
361 			/*
362 			 * The write has successfully completed. Not much to
363 			 * do with the error here so just cleanup the orphan
364 			 * list and hope for the best.
365 			 */
366 			ext4_orphan_del(NULL, inode);
367 			return;
368 		}
369 		ext4_orphan_del(handle, inode);
370 		ext4_journal_stop(handle);
371 	}
372 }
373 
374 static int ext4_dio_write_end_io(struct kiocb *iocb, ssize_t size,
375 				 int error, unsigned int flags)
376 {
377 	loff_t pos = iocb->ki_pos;
378 	struct inode *inode = file_inode(iocb->ki_filp);
379 
380 	if (!error && size && flags & IOMAP_DIO_UNWRITTEN)
381 		error = ext4_convert_unwritten_extents(NULL, inode, pos, size);
382 	if (error)
383 		return error;
384 	/*
385 	 * Note that EXT4_I(inode)->i_disksize can get extended up to
386 	 * inode->i_size while the I/O was running due to writeback of delalloc
387 	 * blocks. But the code in ext4_iomap_alloc() is careful to use
388 	 * zeroed/unwritten extents if this is possible; thus we won't leave
389 	 * uninitialized blocks in a file even if we didn't succeed in writing
390 	 * as much as we intended. Also we can race with truncate or write
391 	 * expanding the file so we have to be a bit careful here.
392 	 */
393 	if (pos + size <= READ_ONCE(EXT4_I(inode)->i_disksize) &&
394 	    pos + size <= i_size_read(inode))
395 		return size;
396 	return ext4_handle_inode_extension(inode, pos, size);
397 }
398 
399 static const struct iomap_dio_ops ext4_dio_write_ops = {
400 	.end_io = ext4_dio_write_end_io,
401 };
402 
403 /*
404  * The intention here is to start with shared lock acquired then see if any
405  * condition requires an exclusive inode lock. If yes, then we restart the
406  * whole operation by releasing the shared lock and acquiring exclusive lock.
407  *
408  * - For unaligned_io we never take shared lock as it may cause data corruption
409  *   when two unaligned IO tries to modify the same block e.g. while zeroing.
410  *
411  * - For extending writes case we don't take the shared lock, since it requires
412  *   updating inode i_disksize and/or orphan handling with exclusive lock.
413  *
414  * - shared locking will only be true mostly with overwrites, including
415  *   initialized blocks and unwritten blocks. For overwrite unwritten blocks
416  *   we protect splitting extents by i_data_sem in ext4_inode_info, so we can
417  *   also release exclusive i_rwsem lock.
418  *
419  * - Otherwise we will switch to exclusive i_rwsem lock.
420  */
421 static ssize_t ext4_dio_write_checks(struct kiocb *iocb, struct iov_iter *from,
422 				     bool *ilock_shared, bool *extend,
423 				     bool *unwritten, int *dio_flags)
424 {
425 	struct file *file = iocb->ki_filp;
426 	struct inode *inode = file_inode(file);
427 	loff_t offset;
428 	size_t count;
429 	ssize_t ret;
430 	bool overwrite, unaligned_io;
431 
432 restart:
433 	ret = ext4_generic_write_checks(iocb, from);
434 	if (ret <= 0)
435 		goto out;
436 
437 	offset = iocb->ki_pos;
438 	count = ret;
439 
440 	unaligned_io = ext4_unaligned_io(inode, from, offset);
441 	*extend = ext4_extending_io(inode, offset, count);
442 	overwrite = ext4_overwrite_io(inode, offset, count, unwritten);
443 
444 	/*
445 	 * Determine whether we need to upgrade to an exclusive lock. This is
446 	 * required to change security info in file_modified(), for extending
447 	 * I/O, any form of non-overwrite I/O, and unaligned I/O to unwritten
448 	 * extents (as partial block zeroing may be required).
449 	 *
450 	 * Note that unaligned writes are allowed under shared lock so long as
451 	 * they are pure overwrites. Otherwise, concurrent unaligned writes risk
452 	 * data corruption due to partial block zeroing in the dio layer, and so
453 	 * the I/O must occur exclusively.
454 	 */
455 	if (*ilock_shared &&
456 	    ((!IS_NOSEC(inode) || *extend || !overwrite ||
457 	     (unaligned_io && *unwritten)))) {
458 		if (iocb->ki_flags & IOCB_NOWAIT) {
459 			ret = -EAGAIN;
460 			goto out;
461 		}
462 		inode_unlock_shared(inode);
463 		*ilock_shared = false;
464 		inode_lock(inode);
465 		goto restart;
466 	}
467 
468 	/*
469 	 * Now that locking is settled, determine dio flags and exclusivity
470 	 * requirements. We don't use DIO_OVERWRITE_ONLY because we enforce
471 	 * behavior already. The inode lock is already held exclusive if the
472 	 * write is non-overwrite or extending, so drain all outstanding dio and
473 	 * set the force wait dio flag.
474 	 */
475 	if (!*ilock_shared && (unaligned_io || *extend)) {
476 		if (iocb->ki_flags & IOCB_NOWAIT) {
477 			ret = -EAGAIN;
478 			goto out;
479 		}
480 		if (unaligned_io && (!overwrite || *unwritten))
481 			inode_dio_wait(inode);
482 		*dio_flags = IOMAP_DIO_FORCE_WAIT;
483 	}
484 
485 	ret = file_modified(file);
486 	if (ret < 0)
487 		goto out;
488 
489 	return count;
490 out:
491 	if (*ilock_shared)
492 		inode_unlock_shared(inode);
493 	else
494 		inode_unlock(inode);
495 	return ret;
496 }
497 
498 static ssize_t ext4_dio_write_iter(struct kiocb *iocb, struct iov_iter *from)
499 {
500 	ssize_t ret;
501 	handle_t *handle;
502 	struct inode *inode = file_inode(iocb->ki_filp);
503 	loff_t offset = iocb->ki_pos;
504 	size_t count = iov_iter_count(from);
505 	const struct iomap_ops *iomap_ops = &ext4_iomap_ops;
506 	bool extend = false, unwritten = false;
507 	bool ilock_shared = true;
508 	int dio_flags = 0;
509 
510 	/*
511 	 * Quick check here without any i_rwsem lock to see if it is extending
512 	 * IO. A more reliable check is done in ext4_dio_write_checks() with
513 	 * proper locking in place.
514 	 */
515 	if (offset + count > i_size_read(inode))
516 		ilock_shared = false;
517 
518 	if (iocb->ki_flags & IOCB_NOWAIT) {
519 		if (ilock_shared) {
520 			if (!inode_trylock_shared(inode))
521 				return -EAGAIN;
522 		} else {
523 			if (!inode_trylock(inode))
524 				return -EAGAIN;
525 		}
526 	} else {
527 		if (ilock_shared)
528 			inode_lock_shared(inode);
529 		else
530 			inode_lock(inode);
531 	}
532 
533 	/* Fallback to buffered I/O if the inode does not support direct I/O. */
534 	if (!ext4_should_use_dio(iocb, from)) {
535 		if (ilock_shared)
536 			inode_unlock_shared(inode);
537 		else
538 			inode_unlock(inode);
539 		return ext4_buffered_write_iter(iocb, from);
540 	}
541 
542 	/*
543 	 * Prevent inline data from being created since we are going to allocate
544 	 * blocks for DIO. We know the inode does not currently have inline data
545 	 * because ext4_should_use_dio() checked for it, but we have to clear
546 	 * the state flag before the write checks because a lock cycle could
547 	 * introduce races with other writers.
548 	 */
549 	ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
550 
551 	ret = ext4_dio_write_checks(iocb, from, &ilock_shared, &extend,
552 				    &unwritten, &dio_flags);
553 	if (ret <= 0)
554 		return ret;
555 
556 	offset = iocb->ki_pos;
557 	count = ret;
558 
559 	if (extend) {
560 		handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
561 		if (IS_ERR(handle)) {
562 			ret = PTR_ERR(handle);
563 			goto out;
564 		}
565 
566 		ret = ext4_orphan_add(handle, inode);
567 		if (ret) {
568 			ext4_journal_stop(handle);
569 			goto out;
570 		}
571 
572 		ext4_journal_stop(handle);
573 	}
574 
575 	if (ilock_shared && !unwritten)
576 		iomap_ops = &ext4_iomap_overwrite_ops;
577 	ret = iomap_dio_rw(iocb, from, iomap_ops, &ext4_dio_write_ops,
578 			   dio_flags, NULL, 0);
579 	if (ret == -ENOTBLK)
580 		ret = 0;
581 	if (extend) {
582 		/*
583 		 * We always perform extending DIO write synchronously so by
584 		 * now the IO is completed and ext4_handle_inode_extension()
585 		 * was called. Cleanup the inode in case of error or race with
586 		 * writeback of delalloc blocks.
587 		 */
588 		WARN_ON_ONCE(ret == -EIOCBQUEUED);
589 		ext4_inode_extension_cleanup(inode, ret);
590 	}
591 
592 out:
593 	if (ilock_shared)
594 		inode_unlock_shared(inode);
595 	else
596 		inode_unlock(inode);
597 
598 	if (ret >= 0 && iov_iter_count(from)) {
599 		ssize_t err;
600 		loff_t endbyte;
601 
602 		offset = iocb->ki_pos;
603 		err = ext4_buffered_write_iter(iocb, from);
604 		if (err < 0)
605 			return err;
606 
607 		/*
608 		 * We need to ensure that the pages within the page cache for
609 		 * the range covered by this I/O are written to disk and
610 		 * invalidated. This is in attempt to preserve the expected
611 		 * direct I/O semantics in the case we fallback to buffered I/O
612 		 * to complete off the I/O request.
613 		 */
614 		ret += err;
615 		endbyte = offset + err - 1;
616 		err = filemap_write_and_wait_range(iocb->ki_filp->f_mapping,
617 						   offset, endbyte);
618 		if (!err)
619 			invalidate_mapping_pages(iocb->ki_filp->f_mapping,
620 						 offset >> PAGE_SHIFT,
621 						 endbyte >> PAGE_SHIFT);
622 	}
623 
624 	return ret;
625 }
626 
627 #ifdef CONFIG_FS_DAX
628 static ssize_t
629 ext4_dax_write_iter(struct kiocb *iocb, struct iov_iter *from)
630 {
631 	ssize_t ret;
632 	size_t count;
633 	loff_t offset;
634 	handle_t *handle;
635 	bool extend = false;
636 	struct inode *inode = file_inode(iocb->ki_filp);
637 
638 	if (iocb->ki_flags & IOCB_NOWAIT) {
639 		if (!inode_trylock(inode))
640 			return -EAGAIN;
641 	} else {
642 		inode_lock(inode);
643 	}
644 
645 	ret = ext4_write_checks(iocb, from);
646 	if (ret <= 0)
647 		goto out;
648 
649 	offset = iocb->ki_pos;
650 	count = iov_iter_count(from);
651 
652 	if (offset + count > EXT4_I(inode)->i_disksize) {
653 		handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
654 		if (IS_ERR(handle)) {
655 			ret = PTR_ERR(handle);
656 			goto out;
657 		}
658 
659 		ret = ext4_orphan_add(handle, inode);
660 		if (ret) {
661 			ext4_journal_stop(handle);
662 			goto out;
663 		}
664 
665 		extend = true;
666 		ext4_journal_stop(handle);
667 	}
668 
669 	ret = dax_iomap_rw(iocb, from, &ext4_iomap_ops);
670 
671 	if (extend) {
672 		ret = ext4_handle_inode_extension(inode, offset, ret);
673 		ext4_inode_extension_cleanup(inode, ret);
674 	}
675 out:
676 	inode_unlock(inode);
677 	if (ret > 0)
678 		ret = generic_write_sync(iocb, ret);
679 	return ret;
680 }
681 #endif
682 
683 static ssize_t
684 ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
685 {
686 	struct inode *inode = file_inode(iocb->ki_filp);
687 
688 	if (unlikely(ext4_forced_shutdown(inode->i_sb)))
689 		return -EIO;
690 
691 #ifdef CONFIG_FS_DAX
692 	if (IS_DAX(inode))
693 		return ext4_dax_write_iter(iocb, from);
694 #endif
695 	if (iocb->ki_flags & IOCB_DIRECT)
696 		return ext4_dio_write_iter(iocb, from);
697 	else
698 		return ext4_buffered_write_iter(iocb, from);
699 }
700 
701 #ifdef CONFIG_FS_DAX
702 static vm_fault_t ext4_dax_huge_fault(struct vm_fault *vmf, unsigned int order)
703 {
704 	int error = 0;
705 	vm_fault_t result;
706 	int retries = 0;
707 	handle_t *handle = NULL;
708 	struct inode *inode = file_inode(vmf->vma->vm_file);
709 	struct super_block *sb = inode->i_sb;
710 
711 	/*
712 	 * We have to distinguish real writes from writes which will result in a
713 	 * COW page; COW writes should *not* poke the journal (the file will not
714 	 * be changed). Doing so would cause unintended failures when mounted
715 	 * read-only.
716 	 *
717 	 * We check for VM_SHARED rather than vmf->cow_page since the latter is
718 	 * unset for order != 0 (i.e. only in do_cow_fault); for
719 	 * other sizes, dax_iomap_fault will handle splitting / fallback so that
720 	 * we eventually come back with a COW page.
721 	 */
722 	bool write = (vmf->flags & FAULT_FLAG_WRITE) &&
723 		(vmf->vma->vm_flags & VM_SHARED);
724 	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
725 	pfn_t pfn;
726 
727 	if (write) {
728 		sb_start_pagefault(sb);
729 		file_update_time(vmf->vma->vm_file);
730 		filemap_invalidate_lock_shared(mapping);
731 retry:
732 		handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
733 					       EXT4_DATA_TRANS_BLOCKS(sb));
734 		if (IS_ERR(handle)) {
735 			filemap_invalidate_unlock_shared(mapping);
736 			sb_end_pagefault(sb);
737 			return VM_FAULT_SIGBUS;
738 		}
739 	} else {
740 		filemap_invalidate_lock_shared(mapping);
741 	}
742 	result = dax_iomap_fault(vmf, order, &pfn, &error, &ext4_iomap_ops);
743 	if (write) {
744 		ext4_journal_stop(handle);
745 
746 		if ((result & VM_FAULT_ERROR) && error == -ENOSPC &&
747 		    ext4_should_retry_alloc(sb, &retries))
748 			goto retry;
749 		/* Handling synchronous page fault? */
750 		if (result & VM_FAULT_NEEDDSYNC)
751 			result = dax_finish_sync_fault(vmf, order, pfn);
752 		filemap_invalidate_unlock_shared(mapping);
753 		sb_end_pagefault(sb);
754 	} else {
755 		filemap_invalidate_unlock_shared(mapping);
756 	}
757 
758 	return result;
759 }
760 
761 static vm_fault_t ext4_dax_fault(struct vm_fault *vmf)
762 {
763 	return ext4_dax_huge_fault(vmf, 0);
764 }
765 
766 static const struct vm_operations_struct ext4_dax_vm_ops = {
767 	.fault		= ext4_dax_fault,
768 	.huge_fault	= ext4_dax_huge_fault,
769 	.page_mkwrite	= ext4_dax_fault,
770 	.pfn_mkwrite	= ext4_dax_fault,
771 };
772 #else
773 #define ext4_dax_vm_ops	ext4_file_vm_ops
774 #endif
775 
776 static const struct vm_operations_struct ext4_file_vm_ops = {
777 	.fault		= filemap_fault,
778 	.map_pages	= filemap_map_pages,
779 	.page_mkwrite   = ext4_page_mkwrite,
780 };
781 
782 static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
783 {
784 	struct inode *inode = file->f_mapping->host;
785 	struct dax_device *dax_dev = EXT4_SB(inode->i_sb)->s_daxdev;
786 
787 	if (unlikely(ext4_forced_shutdown(inode->i_sb)))
788 		return -EIO;
789 
790 	/*
791 	 * We don't support synchronous mappings for non-DAX files and
792 	 * for DAX files if underneath dax_device is not synchronous.
793 	 */
794 	if (!daxdev_mapping_supported(vma, dax_dev))
795 		return -EOPNOTSUPP;
796 
797 	file_accessed(file);
798 	if (IS_DAX(file_inode(file))) {
799 		vma->vm_ops = &ext4_dax_vm_ops;
800 		vm_flags_set(vma, VM_HUGEPAGE);
801 	} else {
802 		vma->vm_ops = &ext4_file_vm_ops;
803 	}
804 	return 0;
805 }
806 
807 static int ext4_sample_last_mounted(struct super_block *sb,
808 				    struct vfsmount *mnt)
809 {
810 	struct ext4_sb_info *sbi = EXT4_SB(sb);
811 	struct path path;
812 	char buf[64], *cp;
813 	handle_t *handle;
814 	int err;
815 
816 	if (likely(ext4_test_mount_flag(sb, EXT4_MF_MNTDIR_SAMPLED)))
817 		return 0;
818 
819 	if (sb_rdonly(sb) || !sb_start_intwrite_trylock(sb))
820 		return 0;
821 
822 	ext4_set_mount_flag(sb, EXT4_MF_MNTDIR_SAMPLED);
823 	/*
824 	 * Sample where the filesystem has been mounted and
825 	 * store it in the superblock for sysadmin convenience
826 	 * when trying to sort through large numbers of block
827 	 * devices or filesystem images.
828 	 */
829 	memset(buf, 0, sizeof(buf));
830 	path.mnt = mnt;
831 	path.dentry = mnt->mnt_root;
832 	cp = d_path(&path, buf, sizeof(buf));
833 	err = 0;
834 	if (IS_ERR(cp))
835 		goto out;
836 
837 	handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
838 	err = PTR_ERR(handle);
839 	if (IS_ERR(handle))
840 		goto out;
841 	BUFFER_TRACE(sbi->s_sbh, "get_write_access");
842 	err = ext4_journal_get_write_access(handle, sb, sbi->s_sbh,
843 					    EXT4_JTR_NONE);
844 	if (err)
845 		goto out_journal;
846 	lock_buffer(sbi->s_sbh);
847 	strncpy(sbi->s_es->s_last_mounted, cp,
848 		sizeof(sbi->s_es->s_last_mounted));
849 	ext4_superblock_csum_set(sb);
850 	unlock_buffer(sbi->s_sbh);
851 	ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh);
852 out_journal:
853 	ext4_journal_stop(handle);
854 out:
855 	sb_end_intwrite(sb);
856 	return err;
857 }
858 
859 static int ext4_file_open(struct inode *inode, struct file *filp)
860 {
861 	int ret;
862 
863 	if (unlikely(ext4_forced_shutdown(inode->i_sb)))
864 		return -EIO;
865 
866 	ret = ext4_sample_last_mounted(inode->i_sb, filp->f_path.mnt);
867 	if (ret)
868 		return ret;
869 
870 	ret = fscrypt_file_open(inode, filp);
871 	if (ret)
872 		return ret;
873 
874 	ret = fsverity_file_open(inode, filp);
875 	if (ret)
876 		return ret;
877 
878 	/*
879 	 * Set up the jbd2_inode if we are opening the inode for
880 	 * writing and the journal is present
881 	 */
882 	if (filp->f_mode & FMODE_WRITE) {
883 		ret = ext4_inode_attach_jinode(inode);
884 		if (ret < 0)
885 			return ret;
886 	}
887 
888 	filp->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC |
889 			FMODE_DIO_PARALLEL_WRITE;
890 	return dquot_file_open(inode, filp);
891 }
892 
893 /*
894  * ext4_llseek() handles both block-mapped and extent-mapped maxbytes values
895  * by calling generic_file_llseek_size() with the appropriate maxbytes
896  * value for each.
897  */
898 loff_t ext4_llseek(struct file *file, loff_t offset, int whence)
899 {
900 	struct inode *inode = file->f_mapping->host;
901 	loff_t maxbytes;
902 
903 	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
904 		maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
905 	else
906 		maxbytes = inode->i_sb->s_maxbytes;
907 
908 	switch (whence) {
909 	default:
910 		return generic_file_llseek_size(file, offset, whence,
911 						maxbytes, i_size_read(inode));
912 	case SEEK_HOLE:
913 		inode_lock_shared(inode);
914 		offset = iomap_seek_hole(inode, offset,
915 					 &ext4_iomap_report_ops);
916 		inode_unlock_shared(inode);
917 		break;
918 	case SEEK_DATA:
919 		inode_lock_shared(inode);
920 		offset = iomap_seek_data(inode, offset,
921 					 &ext4_iomap_report_ops);
922 		inode_unlock_shared(inode);
923 		break;
924 	}
925 
926 	if (offset < 0)
927 		return offset;
928 	return vfs_setpos(file, offset, maxbytes);
929 }
930 
931 const struct file_operations ext4_file_operations = {
932 	.llseek		= ext4_llseek,
933 	.read_iter	= ext4_file_read_iter,
934 	.write_iter	= ext4_file_write_iter,
935 	.iopoll		= iocb_bio_iopoll,
936 	.unlocked_ioctl = ext4_ioctl,
937 #ifdef CONFIG_COMPAT
938 	.compat_ioctl	= ext4_compat_ioctl,
939 #endif
940 	.mmap		= ext4_file_mmap,
941 	.mmap_supported_flags = MAP_SYNC,
942 	.open		= ext4_file_open,
943 	.release	= ext4_release_file,
944 	.fsync		= ext4_sync_file,
945 	.get_unmapped_area = thp_get_unmapped_area,
946 	.splice_read	= ext4_file_splice_read,
947 	.splice_write	= iter_file_splice_write,
948 	.fallocate	= ext4_fallocate,
949 };
950 
951 const struct inode_operations ext4_file_inode_operations = {
952 	.setattr	= ext4_setattr,
953 	.getattr	= ext4_file_getattr,
954 	.listxattr	= ext4_listxattr,
955 	.get_inode_acl	= ext4_get_acl,
956 	.set_acl	= ext4_set_acl,
957 	.fiemap		= ext4_fiemap,
958 	.fileattr_get	= ext4_fileattr_get,
959 	.fileattr_set	= ext4_fileattr_set,
960 };
961 
962