xref: /openbmc/linux/fs/ext4/file.c (revision 6aa7de05)
1 /*
2  *  linux/fs/ext4/file.c
3  *
4  * Copyright (C) 1992, 1993, 1994, 1995
5  * Remy Card (card@masi.ibp.fr)
6  * Laboratoire MASI - Institut Blaise Pascal
7  * Universite Pierre et Marie Curie (Paris VI)
8  *
9  *  from
10  *
11  *  linux/fs/minix/file.c
12  *
13  *  Copyright (C) 1991, 1992  Linus Torvalds
14  *
15  *  ext4 fs regular file handling primitives
16  *
17  *  64-bit file support on 64-bit platforms by Jakub Jelinek
18  *	(jj@sunsite.ms.mff.cuni.cz)
19  */
20 
21 #include <linux/time.h>
22 #include <linux/fs.h>
23 #include <linux/mount.h>
24 #include <linux/path.h>
25 #include <linux/dax.h>
26 #include <linux/quotaops.h>
27 #include <linux/pagevec.h>
28 #include <linux/uio.h>
29 #include "ext4.h"
30 #include "ext4_jbd2.h"
31 #include "xattr.h"
32 #include "acl.h"
33 
34 #ifdef CONFIG_FS_DAX
35 static ssize_t ext4_dax_read_iter(struct kiocb *iocb, struct iov_iter *to)
36 {
37 	struct inode *inode = file_inode(iocb->ki_filp);
38 	ssize_t ret;
39 
40 	if (!inode_trylock_shared(inode)) {
41 		if (iocb->ki_flags & IOCB_NOWAIT)
42 			return -EAGAIN;
43 		inode_lock_shared(inode);
44 	}
45 	/*
46 	 * Recheck under inode lock - at this point we are sure it cannot
47 	 * change anymore
48 	 */
49 	if (!IS_DAX(inode)) {
50 		inode_unlock_shared(inode);
51 		/* Fallback to buffered IO in case we cannot support DAX */
52 		return generic_file_read_iter(iocb, to);
53 	}
54 	ret = dax_iomap_rw(iocb, to, &ext4_iomap_ops);
55 	inode_unlock_shared(inode);
56 
57 	file_accessed(iocb->ki_filp);
58 	return ret;
59 }
60 #endif
61 
62 static ssize_t ext4_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
63 {
64 	if (unlikely(ext4_forced_shutdown(EXT4_SB(file_inode(iocb->ki_filp)->i_sb))))
65 		return -EIO;
66 
67 	if (!iov_iter_count(to))
68 		return 0; /* skip atime */
69 
70 #ifdef CONFIG_FS_DAX
71 	if (IS_DAX(file_inode(iocb->ki_filp)))
72 		return ext4_dax_read_iter(iocb, to);
73 #endif
74 	return generic_file_read_iter(iocb, to);
75 }
76 
77 /*
78  * Called when an inode is released. Note that this is different
79  * from ext4_file_open: open gets called at every open, but release
80  * gets called only when /all/ the files are closed.
81  */
82 static int ext4_release_file(struct inode *inode, struct file *filp)
83 {
84 	if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) {
85 		ext4_alloc_da_blocks(inode);
86 		ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
87 	}
88 	/* if we are the last writer on the inode, drop the block reservation */
89 	if ((filp->f_mode & FMODE_WRITE) &&
90 			(atomic_read(&inode->i_writecount) == 1) &&
91 		        !EXT4_I(inode)->i_reserved_data_blocks)
92 	{
93 		down_write(&EXT4_I(inode)->i_data_sem);
94 		ext4_discard_preallocations(inode);
95 		up_write(&EXT4_I(inode)->i_data_sem);
96 	}
97 	if (is_dx(inode) && filp->private_data)
98 		ext4_htree_free_dir_info(filp->private_data);
99 
100 	return 0;
101 }
102 
103 static void ext4_unwritten_wait(struct inode *inode)
104 {
105 	wait_queue_head_t *wq = ext4_ioend_wq(inode);
106 
107 	wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_unwritten) == 0));
108 }
109 
110 /*
111  * This tests whether the IO in question is block-aligned or not.
112  * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they
113  * are converted to written only after the IO is complete.  Until they are
114  * mapped, these blocks appear as holes, so dio_zero_block() will assume that
115  * it needs to zero out portions of the start and/or end block.  If 2 AIO
116  * threads are at work on the same unwritten block, they must be synchronized
117  * or one thread will zero the other's data, causing corruption.
118  */
119 static int
120 ext4_unaligned_aio(struct inode *inode, struct iov_iter *from, loff_t pos)
121 {
122 	struct super_block *sb = inode->i_sb;
123 	int blockmask = sb->s_blocksize - 1;
124 
125 	if (pos >= i_size_read(inode))
126 		return 0;
127 
128 	if ((pos | iov_iter_alignment(from)) & blockmask)
129 		return 1;
130 
131 	return 0;
132 }
133 
134 /* Is IO overwriting allocated and initialized blocks? */
135 static bool ext4_overwrite_io(struct inode *inode, loff_t pos, loff_t len)
136 {
137 	struct ext4_map_blocks map;
138 	unsigned int blkbits = inode->i_blkbits;
139 	int err, blklen;
140 
141 	if (pos + len > i_size_read(inode))
142 		return false;
143 
144 	map.m_lblk = pos >> blkbits;
145 	map.m_len = EXT4_MAX_BLOCKS(len, pos, blkbits);
146 	blklen = map.m_len;
147 
148 	err = ext4_map_blocks(NULL, inode, &map, 0);
149 	/*
150 	 * 'err==len' means that all of the blocks have been preallocated,
151 	 * regardless of whether they have been initialized or not. To exclude
152 	 * unwritten extents, we need to check m_flags.
153 	 */
154 	return err == blklen && (map.m_flags & EXT4_MAP_MAPPED);
155 }
156 
157 static ssize_t ext4_write_checks(struct kiocb *iocb, struct iov_iter *from)
158 {
159 	struct inode *inode = file_inode(iocb->ki_filp);
160 	ssize_t ret;
161 
162 	ret = generic_write_checks(iocb, from);
163 	if (ret <= 0)
164 		return ret;
165 	/*
166 	 * If we have encountered a bitmap-format file, the size limit
167 	 * is smaller than s_maxbytes, which is for extent-mapped files.
168 	 */
169 	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
170 		struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
171 
172 		if (iocb->ki_pos >= sbi->s_bitmap_maxbytes)
173 			return -EFBIG;
174 		iov_iter_truncate(from, sbi->s_bitmap_maxbytes - iocb->ki_pos);
175 	}
176 	return iov_iter_count(from);
177 }
178 
179 #ifdef CONFIG_FS_DAX
180 static ssize_t
181 ext4_dax_write_iter(struct kiocb *iocb, struct iov_iter *from)
182 {
183 	struct inode *inode = file_inode(iocb->ki_filp);
184 	ssize_t ret;
185 
186 	if (!inode_trylock(inode)) {
187 		if (iocb->ki_flags & IOCB_NOWAIT)
188 			return -EAGAIN;
189 		inode_lock(inode);
190 	}
191 	ret = ext4_write_checks(iocb, from);
192 	if (ret <= 0)
193 		goto out;
194 	ret = file_remove_privs(iocb->ki_filp);
195 	if (ret)
196 		goto out;
197 	ret = file_update_time(iocb->ki_filp);
198 	if (ret)
199 		goto out;
200 
201 	ret = dax_iomap_rw(iocb, from, &ext4_iomap_ops);
202 out:
203 	inode_unlock(inode);
204 	if (ret > 0)
205 		ret = generic_write_sync(iocb, ret);
206 	return ret;
207 }
208 #endif
209 
210 static ssize_t
211 ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
212 {
213 	struct inode *inode = file_inode(iocb->ki_filp);
214 	int o_direct = iocb->ki_flags & IOCB_DIRECT;
215 	int unaligned_aio = 0;
216 	int overwrite = 0;
217 	ssize_t ret;
218 
219 	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
220 		return -EIO;
221 
222 #ifdef CONFIG_FS_DAX
223 	if (IS_DAX(inode))
224 		return ext4_dax_write_iter(iocb, from);
225 #endif
226 	if (!o_direct && (iocb->ki_flags & IOCB_NOWAIT))
227 		return -EOPNOTSUPP;
228 
229 	if (!inode_trylock(inode)) {
230 		if (iocb->ki_flags & IOCB_NOWAIT)
231 			return -EAGAIN;
232 		inode_lock(inode);
233 	}
234 
235 	ret = ext4_write_checks(iocb, from);
236 	if (ret <= 0)
237 		goto out;
238 
239 	/*
240 	 * Unaligned direct AIO must be serialized among each other as zeroing
241 	 * of partial blocks of two competing unaligned AIOs can result in data
242 	 * corruption.
243 	 */
244 	if (o_direct && ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) &&
245 	    !is_sync_kiocb(iocb) &&
246 	    ext4_unaligned_aio(inode, from, iocb->ki_pos)) {
247 		unaligned_aio = 1;
248 		ext4_unwritten_wait(inode);
249 	}
250 
251 	iocb->private = &overwrite;
252 	/* Check whether we do a DIO overwrite or not */
253 	if (o_direct && !unaligned_aio) {
254 		if (ext4_overwrite_io(inode, iocb->ki_pos, iov_iter_count(from))) {
255 			if (ext4_should_dioread_nolock(inode))
256 				overwrite = 1;
257 		} else if (iocb->ki_flags & IOCB_NOWAIT) {
258 			ret = -EAGAIN;
259 			goto out;
260 		}
261 	}
262 
263 	ret = __generic_file_write_iter(iocb, from);
264 	inode_unlock(inode);
265 
266 	if (ret > 0)
267 		ret = generic_write_sync(iocb, ret);
268 
269 	return ret;
270 
271 out:
272 	inode_unlock(inode);
273 	return ret;
274 }
275 
276 #ifdef CONFIG_FS_DAX
277 static int ext4_dax_huge_fault(struct vm_fault *vmf,
278 		enum page_entry_size pe_size)
279 {
280 	int result;
281 	handle_t *handle = NULL;
282 	struct inode *inode = file_inode(vmf->vma->vm_file);
283 	struct super_block *sb = inode->i_sb;
284 
285 	/*
286 	 * We have to distinguish real writes from writes which will result in a
287 	 * COW page; COW writes should *not* poke the journal (the file will not
288 	 * be changed). Doing so would cause unintended failures when mounted
289 	 * read-only.
290 	 *
291 	 * We check for VM_SHARED rather than vmf->cow_page since the latter is
292 	 * unset for pe_size != PE_SIZE_PTE (i.e. only in do_cow_fault); for
293 	 * other sizes, dax_iomap_fault will handle splitting / fallback so that
294 	 * we eventually come back with a COW page.
295 	 */
296 	bool write = (vmf->flags & FAULT_FLAG_WRITE) &&
297 		(vmf->vma->vm_flags & VM_SHARED);
298 
299 	if (write) {
300 		sb_start_pagefault(sb);
301 		file_update_time(vmf->vma->vm_file);
302 		down_read(&EXT4_I(inode)->i_mmap_sem);
303 		handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
304 					       EXT4_DATA_TRANS_BLOCKS(sb));
305 	} else {
306 		down_read(&EXT4_I(inode)->i_mmap_sem);
307 	}
308 	if (!IS_ERR(handle))
309 		result = dax_iomap_fault(vmf, pe_size, &ext4_iomap_ops);
310 	else
311 		result = VM_FAULT_SIGBUS;
312 	if (write) {
313 		if (!IS_ERR(handle))
314 			ext4_journal_stop(handle);
315 		up_read(&EXT4_I(inode)->i_mmap_sem);
316 		sb_end_pagefault(sb);
317 	} else {
318 		up_read(&EXT4_I(inode)->i_mmap_sem);
319 	}
320 
321 	return result;
322 }
323 
324 static int ext4_dax_fault(struct vm_fault *vmf)
325 {
326 	return ext4_dax_huge_fault(vmf, PE_SIZE_PTE);
327 }
328 
329 static const struct vm_operations_struct ext4_dax_vm_ops = {
330 	.fault		= ext4_dax_fault,
331 	.huge_fault	= ext4_dax_huge_fault,
332 	.page_mkwrite	= ext4_dax_fault,
333 	.pfn_mkwrite	= ext4_dax_fault,
334 };
335 #else
336 #define ext4_dax_vm_ops	ext4_file_vm_ops
337 #endif
338 
339 static const struct vm_operations_struct ext4_file_vm_ops = {
340 	.fault		= ext4_filemap_fault,
341 	.map_pages	= filemap_map_pages,
342 	.page_mkwrite   = ext4_page_mkwrite,
343 };
344 
345 static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
346 {
347 	struct inode *inode = file->f_mapping->host;
348 
349 	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
350 		return -EIO;
351 
352 	file_accessed(file);
353 	if (IS_DAX(file_inode(file))) {
354 		vma->vm_ops = &ext4_dax_vm_ops;
355 		vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
356 	} else {
357 		vma->vm_ops = &ext4_file_vm_ops;
358 	}
359 	return 0;
360 }
361 
362 static int ext4_file_open(struct inode * inode, struct file * filp)
363 {
364 	struct super_block *sb = inode->i_sb;
365 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
366 	struct vfsmount *mnt = filp->f_path.mnt;
367 	struct dentry *dir;
368 	struct path path;
369 	char buf[64], *cp;
370 	int ret;
371 
372 	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
373 		return -EIO;
374 
375 	if (unlikely(!(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED) &&
376 		     !sb_rdonly(sb))) {
377 		sbi->s_mount_flags |= EXT4_MF_MNTDIR_SAMPLED;
378 		/*
379 		 * Sample where the filesystem has been mounted and
380 		 * store it in the superblock for sysadmin convenience
381 		 * when trying to sort through large numbers of block
382 		 * devices or filesystem images.
383 		 */
384 		memset(buf, 0, sizeof(buf));
385 		path.mnt = mnt;
386 		path.dentry = mnt->mnt_root;
387 		cp = d_path(&path, buf, sizeof(buf));
388 		if (!IS_ERR(cp)) {
389 			handle_t *handle;
390 			int err;
391 
392 			handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
393 			if (IS_ERR(handle))
394 				return PTR_ERR(handle);
395 			BUFFER_TRACE(sbi->s_sbh, "get_write_access");
396 			err = ext4_journal_get_write_access(handle, sbi->s_sbh);
397 			if (err) {
398 				ext4_journal_stop(handle);
399 				return err;
400 			}
401 			strlcpy(sbi->s_es->s_last_mounted, cp,
402 				sizeof(sbi->s_es->s_last_mounted));
403 			ext4_handle_dirty_super(handle, sb);
404 			ext4_journal_stop(handle);
405 		}
406 	}
407 	if (ext4_encrypted_inode(inode)) {
408 		ret = fscrypt_get_encryption_info(inode);
409 		if (ret)
410 			return -EACCES;
411 		if (!fscrypt_has_encryption_key(inode))
412 			return -ENOKEY;
413 	}
414 
415 	dir = dget_parent(file_dentry(filp));
416 	if (ext4_encrypted_inode(d_inode(dir)) &&
417 			!fscrypt_has_permitted_context(d_inode(dir), inode)) {
418 		ext4_warning(inode->i_sb,
419 			     "Inconsistent encryption contexts: %lu/%lu",
420 			     (unsigned long) d_inode(dir)->i_ino,
421 			     (unsigned long) inode->i_ino);
422 		dput(dir);
423 		return -EPERM;
424 	}
425 	dput(dir);
426 	/*
427 	 * Set up the jbd2_inode if we are opening the inode for
428 	 * writing and the journal is present
429 	 */
430 	if (filp->f_mode & FMODE_WRITE) {
431 		ret = ext4_inode_attach_jinode(inode);
432 		if (ret < 0)
433 			return ret;
434 	}
435 
436 	filp->f_mode |= FMODE_NOWAIT;
437 	return dquot_file_open(inode, filp);
438 }
439 
440 /*
441  * Here we use ext4_map_blocks() to get a block mapping for a extent-based
442  * file rather than ext4_ext_walk_space() because we can introduce
443  * SEEK_DATA/SEEK_HOLE for block-mapped and extent-mapped file at the same
444  * function.  When extent status tree has been fully implemented, it will
445  * track all extent status for a file and we can directly use it to
446  * retrieve the offset for SEEK_DATA/SEEK_HOLE.
447  */
448 
449 /*
450  * When we retrieve the offset for SEEK_DATA/SEEK_HOLE, we would need to
451  * lookup page cache to check whether or not there has some data between
452  * [startoff, endoff] because, if this range contains an unwritten extent,
453  * we determine this extent as a data or a hole according to whether the
454  * page cache has data or not.
455  */
456 static int ext4_find_unwritten_pgoff(struct inode *inode,
457 				     int whence,
458 				     ext4_lblk_t end_blk,
459 				     loff_t *offset)
460 {
461 	struct pagevec pvec;
462 	unsigned int blkbits;
463 	pgoff_t index;
464 	pgoff_t end;
465 	loff_t endoff;
466 	loff_t startoff;
467 	loff_t lastoff;
468 	int found = 0;
469 
470 	blkbits = inode->i_sb->s_blocksize_bits;
471 	startoff = *offset;
472 	lastoff = startoff;
473 	endoff = (loff_t)end_blk << blkbits;
474 
475 	index = startoff >> PAGE_SHIFT;
476 	end = (endoff - 1) >> PAGE_SHIFT;
477 
478 	pagevec_init(&pvec, 0);
479 	do {
480 		int i;
481 		unsigned long nr_pages;
482 
483 		nr_pages = pagevec_lookup_range(&pvec, inode->i_mapping,
484 					&index, end);
485 		if (nr_pages == 0)
486 			break;
487 
488 		for (i = 0; i < nr_pages; i++) {
489 			struct page *page = pvec.pages[i];
490 			struct buffer_head *bh, *head;
491 
492 			/*
493 			 * If current offset is smaller than the page offset,
494 			 * there is a hole at this offset.
495 			 */
496 			if (whence == SEEK_HOLE && lastoff < endoff &&
497 			    lastoff < page_offset(pvec.pages[i])) {
498 				found = 1;
499 				*offset = lastoff;
500 				goto out;
501 			}
502 
503 			lock_page(page);
504 
505 			if (unlikely(page->mapping != inode->i_mapping)) {
506 				unlock_page(page);
507 				continue;
508 			}
509 
510 			if (!page_has_buffers(page)) {
511 				unlock_page(page);
512 				continue;
513 			}
514 
515 			if (page_has_buffers(page)) {
516 				lastoff = page_offset(page);
517 				bh = head = page_buffers(page);
518 				do {
519 					if (lastoff + bh->b_size <= startoff)
520 						goto next;
521 					if (buffer_uptodate(bh) ||
522 					    buffer_unwritten(bh)) {
523 						if (whence == SEEK_DATA)
524 							found = 1;
525 					} else {
526 						if (whence == SEEK_HOLE)
527 							found = 1;
528 					}
529 					if (found) {
530 						*offset = max_t(loff_t,
531 							startoff, lastoff);
532 						unlock_page(page);
533 						goto out;
534 					}
535 next:
536 					lastoff += bh->b_size;
537 					bh = bh->b_this_page;
538 				} while (bh != head);
539 			}
540 
541 			lastoff = page_offset(page) + PAGE_SIZE;
542 			unlock_page(page);
543 		}
544 
545 		pagevec_release(&pvec);
546 	} while (index <= end);
547 
548 	/* There are no pages upto endoff - that would be a hole in there. */
549 	if (whence == SEEK_HOLE && lastoff < endoff) {
550 		found = 1;
551 		*offset = lastoff;
552 	}
553 out:
554 	pagevec_release(&pvec);
555 	return found;
556 }
557 
558 /*
559  * ext4_seek_data() retrieves the offset for SEEK_DATA.
560  */
561 static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize)
562 {
563 	struct inode *inode = file->f_mapping->host;
564 	struct extent_status es;
565 	ext4_lblk_t start, last, end;
566 	loff_t dataoff, isize;
567 	int blkbits;
568 	int ret;
569 
570 	inode_lock(inode);
571 
572 	isize = i_size_read(inode);
573 	if (offset < 0 || offset >= isize) {
574 		inode_unlock(inode);
575 		return -ENXIO;
576 	}
577 
578 	blkbits = inode->i_sb->s_blocksize_bits;
579 	start = offset >> blkbits;
580 	last = start;
581 	end = isize >> blkbits;
582 	dataoff = offset;
583 
584 	do {
585 		ret = ext4_get_next_extent(inode, last, end - last + 1, &es);
586 		if (ret <= 0) {
587 			/* No extent found -> no data */
588 			if (ret == 0)
589 				ret = -ENXIO;
590 			inode_unlock(inode);
591 			return ret;
592 		}
593 
594 		last = es.es_lblk;
595 		if (last != start)
596 			dataoff = (loff_t)last << blkbits;
597 		if (!ext4_es_is_unwritten(&es))
598 			break;
599 
600 		/*
601 		 * If there is a unwritten extent at this offset,
602 		 * it will be as a data or a hole according to page
603 		 * cache that has data or not.
604 		 */
605 		if (ext4_find_unwritten_pgoff(inode, SEEK_DATA,
606 					      es.es_lblk + es.es_len, &dataoff))
607 			break;
608 		last += es.es_len;
609 		dataoff = (loff_t)last << blkbits;
610 		cond_resched();
611 	} while (last <= end);
612 
613 	inode_unlock(inode);
614 
615 	if (dataoff > isize)
616 		return -ENXIO;
617 
618 	return vfs_setpos(file, dataoff, maxsize);
619 }
620 
621 /*
622  * ext4_seek_hole() retrieves the offset for SEEK_HOLE.
623  */
624 static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize)
625 {
626 	struct inode *inode = file->f_mapping->host;
627 	struct extent_status es;
628 	ext4_lblk_t start, last, end;
629 	loff_t holeoff, isize;
630 	int blkbits;
631 	int ret;
632 
633 	inode_lock(inode);
634 
635 	isize = i_size_read(inode);
636 	if (offset < 0 || offset >= isize) {
637 		inode_unlock(inode);
638 		return -ENXIO;
639 	}
640 
641 	blkbits = inode->i_sb->s_blocksize_bits;
642 	start = offset >> blkbits;
643 	last = start;
644 	end = isize >> blkbits;
645 	holeoff = offset;
646 
647 	do {
648 		ret = ext4_get_next_extent(inode, last, end - last + 1, &es);
649 		if (ret < 0) {
650 			inode_unlock(inode);
651 			return ret;
652 		}
653 		/* Found a hole? */
654 		if (ret == 0 || es.es_lblk > last) {
655 			if (last != start)
656 				holeoff = (loff_t)last << blkbits;
657 			break;
658 		}
659 		/*
660 		 * If there is a unwritten extent at this offset,
661 		 * it will be as a data or a hole according to page
662 		 * cache that has data or not.
663 		 */
664 		if (ext4_es_is_unwritten(&es) &&
665 		    ext4_find_unwritten_pgoff(inode, SEEK_HOLE,
666 					      last + es.es_len, &holeoff))
667 			break;
668 
669 		last += es.es_len;
670 		holeoff = (loff_t)last << blkbits;
671 		cond_resched();
672 	} while (last <= end);
673 
674 	inode_unlock(inode);
675 
676 	if (holeoff > isize)
677 		holeoff = isize;
678 
679 	return vfs_setpos(file, holeoff, maxsize);
680 }
681 
682 /*
683  * ext4_llseek() handles both block-mapped and extent-mapped maxbytes values
684  * by calling generic_file_llseek_size() with the appropriate maxbytes
685  * value for each.
686  */
687 loff_t ext4_llseek(struct file *file, loff_t offset, int whence)
688 {
689 	struct inode *inode = file->f_mapping->host;
690 	loff_t maxbytes;
691 
692 	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
693 		maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
694 	else
695 		maxbytes = inode->i_sb->s_maxbytes;
696 
697 	switch (whence) {
698 	case SEEK_SET:
699 	case SEEK_CUR:
700 	case SEEK_END:
701 		return generic_file_llseek_size(file, offset, whence,
702 						maxbytes, i_size_read(inode));
703 	case SEEK_DATA:
704 		return ext4_seek_data(file, offset, maxbytes);
705 	case SEEK_HOLE:
706 		return ext4_seek_hole(file, offset, maxbytes);
707 	}
708 
709 	return -EINVAL;
710 }
711 
712 const struct file_operations ext4_file_operations = {
713 	.llseek		= ext4_llseek,
714 	.read_iter	= ext4_file_read_iter,
715 	.write_iter	= ext4_file_write_iter,
716 	.unlocked_ioctl = ext4_ioctl,
717 #ifdef CONFIG_COMPAT
718 	.compat_ioctl	= ext4_compat_ioctl,
719 #endif
720 	.mmap		= ext4_file_mmap,
721 	.open		= ext4_file_open,
722 	.release	= ext4_release_file,
723 	.fsync		= ext4_sync_file,
724 	.get_unmapped_area = thp_get_unmapped_area,
725 	.splice_read	= generic_file_splice_read,
726 	.splice_write	= iter_file_splice_write,
727 	.fallocate	= ext4_fallocate,
728 };
729 
730 const struct inode_operations ext4_file_inode_operations = {
731 	.setattr	= ext4_setattr,
732 	.getattr	= ext4_file_getattr,
733 	.listxattr	= ext4_listxattr,
734 	.get_acl	= ext4_get_acl,
735 	.set_acl	= ext4_set_acl,
736 	.fiemap		= ext4_fiemap,
737 };
738 
739