1 /* 2 * linux/fs/ext4/file.c 3 * 4 * Copyright (C) 1992, 1993, 1994, 1995 5 * Remy Card (card@masi.ibp.fr) 6 * Laboratoire MASI - Institut Blaise Pascal 7 * Universite Pierre et Marie Curie (Paris VI) 8 * 9 * from 10 * 11 * linux/fs/minix/file.c 12 * 13 * Copyright (C) 1991, 1992 Linus Torvalds 14 * 15 * ext4 fs regular file handling primitives 16 * 17 * 64-bit file support on 64-bit platforms by Jakub Jelinek 18 * (jj@sunsite.ms.mff.cuni.cz) 19 */ 20 21 #include <linux/time.h> 22 #include <linux/fs.h> 23 #include <linux/mount.h> 24 #include <linux/path.h> 25 #include <linux/dax.h> 26 #include <linux/quotaops.h> 27 #include <linux/pagevec.h> 28 #include <linux/uio.h> 29 #include "ext4.h" 30 #include "ext4_jbd2.h" 31 #include "xattr.h" 32 #include "acl.h" 33 34 #ifdef CONFIG_FS_DAX 35 static ssize_t ext4_dax_read_iter(struct kiocb *iocb, struct iov_iter *to) 36 { 37 struct inode *inode = file_inode(iocb->ki_filp); 38 ssize_t ret; 39 40 inode_lock_shared(inode); 41 /* 42 * Recheck under inode lock - at this point we are sure it cannot 43 * change anymore 44 */ 45 if (!IS_DAX(inode)) { 46 inode_unlock_shared(inode); 47 /* Fallback to buffered IO in case we cannot support DAX */ 48 return generic_file_read_iter(iocb, to); 49 } 50 ret = dax_iomap_rw(iocb, to, &ext4_iomap_ops); 51 inode_unlock_shared(inode); 52 53 file_accessed(iocb->ki_filp); 54 return ret; 55 } 56 #endif 57 58 static ssize_t ext4_file_read_iter(struct kiocb *iocb, struct iov_iter *to) 59 { 60 if (unlikely(ext4_forced_shutdown(EXT4_SB(file_inode(iocb->ki_filp)->i_sb)))) 61 return -EIO; 62 63 if (!iov_iter_count(to)) 64 return 0; /* skip atime */ 65 66 #ifdef CONFIG_FS_DAX 67 if (IS_DAX(file_inode(iocb->ki_filp))) 68 return ext4_dax_read_iter(iocb, to); 69 #endif 70 return generic_file_read_iter(iocb, to); 71 } 72 73 /* 74 * Called when an inode is released. Note that this is different 75 * from ext4_file_open: open gets called at every open, but release 76 * gets called only when /all/ the files are closed. 77 */ 78 static int ext4_release_file(struct inode *inode, struct file *filp) 79 { 80 if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) { 81 ext4_alloc_da_blocks(inode); 82 ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE); 83 } 84 /* if we are the last writer on the inode, drop the block reservation */ 85 if ((filp->f_mode & FMODE_WRITE) && 86 (atomic_read(&inode->i_writecount) == 1) && 87 !EXT4_I(inode)->i_reserved_data_blocks) 88 { 89 down_write(&EXT4_I(inode)->i_data_sem); 90 ext4_discard_preallocations(inode); 91 up_write(&EXT4_I(inode)->i_data_sem); 92 } 93 if (is_dx(inode) && filp->private_data) 94 ext4_htree_free_dir_info(filp->private_data); 95 96 return 0; 97 } 98 99 static void ext4_unwritten_wait(struct inode *inode) 100 { 101 wait_queue_head_t *wq = ext4_ioend_wq(inode); 102 103 wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_unwritten) == 0)); 104 } 105 106 /* 107 * This tests whether the IO in question is block-aligned or not. 108 * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they 109 * are converted to written only after the IO is complete. Until they are 110 * mapped, these blocks appear as holes, so dio_zero_block() will assume that 111 * it needs to zero out portions of the start and/or end block. If 2 AIO 112 * threads are at work on the same unwritten block, they must be synchronized 113 * or one thread will zero the other's data, causing corruption. 114 */ 115 static int 116 ext4_unaligned_aio(struct inode *inode, struct iov_iter *from, loff_t pos) 117 { 118 struct super_block *sb = inode->i_sb; 119 int blockmask = sb->s_blocksize - 1; 120 121 if (pos >= i_size_read(inode)) 122 return 0; 123 124 if ((pos | iov_iter_alignment(from)) & blockmask) 125 return 1; 126 127 return 0; 128 } 129 130 /* Is IO overwriting allocated and initialized blocks? */ 131 static bool ext4_overwrite_io(struct inode *inode, loff_t pos, loff_t len) 132 { 133 struct ext4_map_blocks map; 134 unsigned int blkbits = inode->i_blkbits; 135 int err, blklen; 136 137 if (pos + len > i_size_read(inode)) 138 return false; 139 140 map.m_lblk = pos >> blkbits; 141 map.m_len = EXT4_MAX_BLOCKS(len, pos, blkbits); 142 blklen = map.m_len; 143 144 err = ext4_map_blocks(NULL, inode, &map, 0); 145 /* 146 * 'err==len' means that all of the blocks have been preallocated, 147 * regardless of whether they have been initialized or not. To exclude 148 * unwritten extents, we need to check m_flags. 149 */ 150 return err == blklen && (map.m_flags & EXT4_MAP_MAPPED); 151 } 152 153 static ssize_t ext4_write_checks(struct kiocb *iocb, struct iov_iter *from) 154 { 155 struct inode *inode = file_inode(iocb->ki_filp); 156 ssize_t ret; 157 158 ret = generic_write_checks(iocb, from); 159 if (ret <= 0) 160 return ret; 161 /* 162 * If we have encountered a bitmap-format file, the size limit 163 * is smaller than s_maxbytes, which is for extent-mapped files. 164 */ 165 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { 166 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 167 168 if (iocb->ki_pos >= sbi->s_bitmap_maxbytes) 169 return -EFBIG; 170 iov_iter_truncate(from, sbi->s_bitmap_maxbytes - iocb->ki_pos); 171 } 172 return iov_iter_count(from); 173 } 174 175 #ifdef CONFIG_FS_DAX 176 static ssize_t 177 ext4_dax_write_iter(struct kiocb *iocb, struct iov_iter *from) 178 { 179 struct inode *inode = file_inode(iocb->ki_filp); 180 ssize_t ret; 181 182 inode_lock(inode); 183 ret = ext4_write_checks(iocb, from); 184 if (ret <= 0) 185 goto out; 186 ret = file_remove_privs(iocb->ki_filp); 187 if (ret) 188 goto out; 189 ret = file_update_time(iocb->ki_filp); 190 if (ret) 191 goto out; 192 193 ret = dax_iomap_rw(iocb, from, &ext4_iomap_ops); 194 out: 195 inode_unlock(inode); 196 if (ret > 0) 197 ret = generic_write_sync(iocb, ret); 198 return ret; 199 } 200 #endif 201 202 static ssize_t 203 ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 204 { 205 struct inode *inode = file_inode(iocb->ki_filp); 206 int o_direct = iocb->ki_flags & IOCB_DIRECT; 207 int unaligned_aio = 0; 208 int overwrite = 0; 209 ssize_t ret; 210 211 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) 212 return -EIO; 213 214 #ifdef CONFIG_FS_DAX 215 if (IS_DAX(inode)) 216 return ext4_dax_write_iter(iocb, from); 217 #endif 218 219 inode_lock(inode); 220 ret = ext4_write_checks(iocb, from); 221 if (ret <= 0) 222 goto out; 223 224 /* 225 * Unaligned direct AIO must be serialized among each other as zeroing 226 * of partial blocks of two competing unaligned AIOs can result in data 227 * corruption. 228 */ 229 if (o_direct && ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) && 230 !is_sync_kiocb(iocb) && 231 ext4_unaligned_aio(inode, from, iocb->ki_pos)) { 232 unaligned_aio = 1; 233 ext4_unwritten_wait(inode); 234 } 235 236 iocb->private = &overwrite; 237 /* Check whether we do a DIO overwrite or not */ 238 if (o_direct && ext4_should_dioread_nolock(inode) && !unaligned_aio && 239 ext4_overwrite_io(inode, iocb->ki_pos, iov_iter_count(from))) 240 overwrite = 1; 241 242 ret = __generic_file_write_iter(iocb, from); 243 inode_unlock(inode); 244 245 if (ret > 0) 246 ret = generic_write_sync(iocb, ret); 247 248 return ret; 249 250 out: 251 inode_unlock(inode); 252 return ret; 253 } 254 255 #ifdef CONFIG_FS_DAX 256 static int ext4_dax_huge_fault(struct vm_fault *vmf, 257 enum page_entry_size pe_size) 258 { 259 int result; 260 handle_t *handle = NULL; 261 struct inode *inode = file_inode(vmf->vma->vm_file); 262 struct super_block *sb = inode->i_sb; 263 bool write = vmf->flags & FAULT_FLAG_WRITE; 264 265 if (write) { 266 sb_start_pagefault(sb); 267 file_update_time(vmf->vma->vm_file); 268 down_read(&EXT4_I(inode)->i_mmap_sem); 269 handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE, 270 EXT4_DATA_TRANS_BLOCKS(sb)); 271 } else { 272 down_read(&EXT4_I(inode)->i_mmap_sem); 273 } 274 if (!IS_ERR(handle)) 275 result = dax_iomap_fault(vmf, pe_size, &ext4_iomap_ops); 276 else 277 result = VM_FAULT_SIGBUS; 278 if (write) { 279 if (!IS_ERR(handle)) 280 ext4_journal_stop(handle); 281 up_read(&EXT4_I(inode)->i_mmap_sem); 282 sb_end_pagefault(sb); 283 } else { 284 up_read(&EXT4_I(inode)->i_mmap_sem); 285 } 286 287 return result; 288 } 289 290 static int ext4_dax_fault(struct vm_fault *vmf) 291 { 292 return ext4_dax_huge_fault(vmf, PE_SIZE_PTE); 293 } 294 295 /* 296 * Handle write fault for VM_MIXEDMAP mappings. Similarly to ext4_dax_fault() 297 * handler we check for races agaist truncate. Note that since we cycle through 298 * i_mmap_sem, we are sure that also any hole punching that began before we 299 * were called is finished by now and so if it included part of the file we 300 * are working on, our pte will get unmapped and the check for pte_same() in 301 * wp_pfn_shared() fails. Thus fault gets retried and things work out as 302 * desired. 303 */ 304 static int ext4_dax_pfn_mkwrite(struct vm_fault *vmf) 305 { 306 struct inode *inode = file_inode(vmf->vma->vm_file); 307 struct super_block *sb = inode->i_sb; 308 loff_t size; 309 int ret; 310 311 sb_start_pagefault(sb); 312 file_update_time(vmf->vma->vm_file); 313 down_read(&EXT4_I(inode)->i_mmap_sem); 314 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT; 315 if (vmf->pgoff >= size) 316 ret = VM_FAULT_SIGBUS; 317 else 318 ret = dax_pfn_mkwrite(vmf); 319 up_read(&EXT4_I(inode)->i_mmap_sem); 320 sb_end_pagefault(sb); 321 322 return ret; 323 } 324 325 static const struct vm_operations_struct ext4_dax_vm_ops = { 326 .fault = ext4_dax_fault, 327 .huge_fault = ext4_dax_huge_fault, 328 .page_mkwrite = ext4_dax_fault, 329 .pfn_mkwrite = ext4_dax_pfn_mkwrite, 330 }; 331 #else 332 #define ext4_dax_vm_ops ext4_file_vm_ops 333 #endif 334 335 static const struct vm_operations_struct ext4_file_vm_ops = { 336 .fault = ext4_filemap_fault, 337 .map_pages = filemap_map_pages, 338 .page_mkwrite = ext4_page_mkwrite, 339 }; 340 341 static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma) 342 { 343 struct inode *inode = file->f_mapping->host; 344 345 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) 346 return -EIO; 347 348 if (ext4_encrypted_inode(inode)) { 349 int err = fscrypt_get_encryption_info(inode); 350 if (err) 351 return 0; 352 if (!fscrypt_has_encryption_key(inode)) 353 return -ENOKEY; 354 } 355 file_accessed(file); 356 if (IS_DAX(file_inode(file))) { 357 vma->vm_ops = &ext4_dax_vm_ops; 358 vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE; 359 } else { 360 vma->vm_ops = &ext4_file_vm_ops; 361 } 362 return 0; 363 } 364 365 static int ext4_file_open(struct inode * inode, struct file * filp) 366 { 367 struct super_block *sb = inode->i_sb; 368 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 369 struct vfsmount *mnt = filp->f_path.mnt; 370 struct dentry *dir; 371 struct path path; 372 char buf[64], *cp; 373 int ret; 374 375 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) 376 return -EIO; 377 378 if (unlikely(!(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED) && 379 !(sb->s_flags & MS_RDONLY))) { 380 sbi->s_mount_flags |= EXT4_MF_MNTDIR_SAMPLED; 381 /* 382 * Sample where the filesystem has been mounted and 383 * store it in the superblock for sysadmin convenience 384 * when trying to sort through large numbers of block 385 * devices or filesystem images. 386 */ 387 memset(buf, 0, sizeof(buf)); 388 path.mnt = mnt; 389 path.dentry = mnt->mnt_root; 390 cp = d_path(&path, buf, sizeof(buf)); 391 if (!IS_ERR(cp)) { 392 handle_t *handle; 393 int err; 394 395 handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1); 396 if (IS_ERR(handle)) 397 return PTR_ERR(handle); 398 BUFFER_TRACE(sbi->s_sbh, "get_write_access"); 399 err = ext4_journal_get_write_access(handle, sbi->s_sbh); 400 if (err) { 401 ext4_journal_stop(handle); 402 return err; 403 } 404 strlcpy(sbi->s_es->s_last_mounted, cp, 405 sizeof(sbi->s_es->s_last_mounted)); 406 ext4_handle_dirty_super(handle, sb); 407 ext4_journal_stop(handle); 408 } 409 } 410 if (ext4_encrypted_inode(inode)) { 411 ret = fscrypt_get_encryption_info(inode); 412 if (ret) 413 return -EACCES; 414 if (!fscrypt_has_encryption_key(inode)) 415 return -ENOKEY; 416 } 417 418 dir = dget_parent(file_dentry(filp)); 419 if (ext4_encrypted_inode(d_inode(dir)) && 420 !fscrypt_has_permitted_context(d_inode(dir), inode)) { 421 ext4_warning(inode->i_sb, 422 "Inconsistent encryption contexts: %lu/%lu", 423 (unsigned long) d_inode(dir)->i_ino, 424 (unsigned long) inode->i_ino); 425 dput(dir); 426 return -EPERM; 427 } 428 dput(dir); 429 /* 430 * Set up the jbd2_inode if we are opening the inode for 431 * writing and the journal is present 432 */ 433 if (filp->f_mode & FMODE_WRITE) { 434 ret = ext4_inode_attach_jinode(inode); 435 if (ret < 0) 436 return ret; 437 } 438 return dquot_file_open(inode, filp); 439 } 440 441 /* 442 * Here we use ext4_map_blocks() to get a block mapping for a extent-based 443 * file rather than ext4_ext_walk_space() because we can introduce 444 * SEEK_DATA/SEEK_HOLE for block-mapped and extent-mapped file at the same 445 * function. When extent status tree has been fully implemented, it will 446 * track all extent status for a file and we can directly use it to 447 * retrieve the offset for SEEK_DATA/SEEK_HOLE. 448 */ 449 450 /* 451 * When we retrieve the offset for SEEK_DATA/SEEK_HOLE, we would need to 452 * lookup page cache to check whether or not there has some data between 453 * [startoff, endoff] because, if this range contains an unwritten extent, 454 * we determine this extent as a data or a hole according to whether the 455 * page cache has data or not. 456 */ 457 static int ext4_find_unwritten_pgoff(struct inode *inode, 458 int whence, 459 ext4_lblk_t end_blk, 460 loff_t *offset) 461 { 462 struct pagevec pvec; 463 unsigned int blkbits; 464 pgoff_t index; 465 pgoff_t end; 466 loff_t endoff; 467 loff_t startoff; 468 loff_t lastoff; 469 int found = 0; 470 471 blkbits = inode->i_sb->s_blocksize_bits; 472 startoff = *offset; 473 lastoff = startoff; 474 endoff = (loff_t)end_blk << blkbits; 475 476 index = startoff >> PAGE_SHIFT; 477 end = endoff >> PAGE_SHIFT; 478 479 pagevec_init(&pvec, 0); 480 do { 481 int i, num; 482 unsigned long nr_pages; 483 484 num = min_t(pgoff_t, end - index, PAGEVEC_SIZE); 485 nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index, 486 (pgoff_t)num); 487 if (nr_pages == 0) { 488 if (whence == SEEK_DATA) 489 break; 490 491 BUG_ON(whence != SEEK_HOLE); 492 /* 493 * If this is the first time to go into the loop and 494 * offset is not beyond the end offset, it will be a 495 * hole at this offset 496 */ 497 if (lastoff == startoff || lastoff < endoff) 498 found = 1; 499 break; 500 } 501 502 /* 503 * If this is the first time to go into the loop and 504 * offset is smaller than the first page offset, it will be a 505 * hole at this offset. 506 */ 507 if (lastoff == startoff && whence == SEEK_HOLE && 508 lastoff < page_offset(pvec.pages[0])) { 509 found = 1; 510 break; 511 } 512 513 for (i = 0; i < nr_pages; i++) { 514 struct page *page = pvec.pages[i]; 515 struct buffer_head *bh, *head; 516 517 /* 518 * If the current offset is not beyond the end of given 519 * range, it will be a hole. 520 */ 521 if (lastoff < endoff && whence == SEEK_HOLE && 522 page->index > end) { 523 found = 1; 524 *offset = lastoff; 525 goto out; 526 } 527 528 lock_page(page); 529 530 if (unlikely(page->mapping != inode->i_mapping)) { 531 unlock_page(page); 532 continue; 533 } 534 535 if (!page_has_buffers(page)) { 536 unlock_page(page); 537 continue; 538 } 539 540 if (page_has_buffers(page)) { 541 lastoff = page_offset(page); 542 bh = head = page_buffers(page); 543 do { 544 if (buffer_uptodate(bh) || 545 buffer_unwritten(bh)) { 546 if (whence == SEEK_DATA) 547 found = 1; 548 } else { 549 if (whence == SEEK_HOLE) 550 found = 1; 551 } 552 if (found) { 553 *offset = max_t(loff_t, 554 startoff, lastoff); 555 unlock_page(page); 556 goto out; 557 } 558 lastoff += bh->b_size; 559 bh = bh->b_this_page; 560 } while (bh != head); 561 } 562 563 lastoff = page_offset(page) + PAGE_SIZE; 564 unlock_page(page); 565 } 566 567 /* 568 * The no. of pages is less than our desired, that would be a 569 * hole in there. 570 */ 571 if (nr_pages < num && whence == SEEK_HOLE) { 572 found = 1; 573 *offset = lastoff; 574 break; 575 } 576 577 index = pvec.pages[i - 1]->index + 1; 578 pagevec_release(&pvec); 579 } while (index <= end); 580 581 out: 582 pagevec_release(&pvec); 583 return found; 584 } 585 586 /* 587 * ext4_seek_data() retrieves the offset for SEEK_DATA. 588 */ 589 static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize) 590 { 591 struct inode *inode = file->f_mapping->host; 592 struct extent_status es; 593 ext4_lblk_t start, last, end; 594 loff_t dataoff, isize; 595 int blkbits; 596 int ret; 597 598 inode_lock(inode); 599 600 isize = i_size_read(inode); 601 if (offset >= isize) { 602 inode_unlock(inode); 603 return -ENXIO; 604 } 605 606 blkbits = inode->i_sb->s_blocksize_bits; 607 start = offset >> blkbits; 608 last = start; 609 end = isize >> blkbits; 610 dataoff = offset; 611 612 do { 613 ret = ext4_get_next_extent(inode, last, end - last + 1, &es); 614 if (ret <= 0) { 615 /* No extent found -> no data */ 616 if (ret == 0) 617 ret = -ENXIO; 618 inode_unlock(inode); 619 return ret; 620 } 621 622 last = es.es_lblk; 623 if (last != start) 624 dataoff = (loff_t)last << blkbits; 625 if (!ext4_es_is_unwritten(&es)) 626 break; 627 628 /* 629 * If there is a unwritten extent at this offset, 630 * it will be as a data or a hole according to page 631 * cache that has data or not. 632 */ 633 if (ext4_find_unwritten_pgoff(inode, SEEK_DATA, 634 es.es_lblk + es.es_len, &dataoff)) 635 break; 636 last += es.es_len; 637 dataoff = (loff_t)last << blkbits; 638 cond_resched(); 639 } while (last <= end); 640 641 inode_unlock(inode); 642 643 if (dataoff > isize) 644 return -ENXIO; 645 646 return vfs_setpos(file, dataoff, maxsize); 647 } 648 649 /* 650 * ext4_seek_hole() retrieves the offset for SEEK_HOLE. 651 */ 652 static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize) 653 { 654 struct inode *inode = file->f_mapping->host; 655 struct extent_status es; 656 ext4_lblk_t start, last, end; 657 loff_t holeoff, isize; 658 int blkbits; 659 int ret; 660 661 inode_lock(inode); 662 663 isize = i_size_read(inode); 664 if (offset >= isize) { 665 inode_unlock(inode); 666 return -ENXIO; 667 } 668 669 blkbits = inode->i_sb->s_blocksize_bits; 670 start = offset >> blkbits; 671 last = start; 672 end = isize >> blkbits; 673 holeoff = offset; 674 675 do { 676 ret = ext4_get_next_extent(inode, last, end - last + 1, &es); 677 if (ret < 0) { 678 inode_unlock(inode); 679 return ret; 680 } 681 /* Found a hole? */ 682 if (ret == 0 || es.es_lblk > last) { 683 if (last != start) 684 holeoff = (loff_t)last << blkbits; 685 break; 686 } 687 /* 688 * If there is a unwritten extent at this offset, 689 * it will be as a data or a hole according to page 690 * cache that has data or not. 691 */ 692 if (ext4_es_is_unwritten(&es) && 693 ext4_find_unwritten_pgoff(inode, SEEK_HOLE, 694 last + es.es_len, &holeoff)) 695 break; 696 697 last += es.es_len; 698 holeoff = (loff_t)last << blkbits; 699 cond_resched(); 700 } while (last <= end); 701 702 inode_unlock(inode); 703 704 if (holeoff > isize) 705 holeoff = isize; 706 707 return vfs_setpos(file, holeoff, maxsize); 708 } 709 710 /* 711 * ext4_llseek() handles both block-mapped and extent-mapped maxbytes values 712 * by calling generic_file_llseek_size() with the appropriate maxbytes 713 * value for each. 714 */ 715 loff_t ext4_llseek(struct file *file, loff_t offset, int whence) 716 { 717 struct inode *inode = file->f_mapping->host; 718 loff_t maxbytes; 719 720 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) 721 maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes; 722 else 723 maxbytes = inode->i_sb->s_maxbytes; 724 725 switch (whence) { 726 case SEEK_SET: 727 case SEEK_CUR: 728 case SEEK_END: 729 return generic_file_llseek_size(file, offset, whence, 730 maxbytes, i_size_read(inode)); 731 case SEEK_DATA: 732 return ext4_seek_data(file, offset, maxbytes); 733 case SEEK_HOLE: 734 return ext4_seek_hole(file, offset, maxbytes); 735 } 736 737 return -EINVAL; 738 } 739 740 const struct file_operations ext4_file_operations = { 741 .llseek = ext4_llseek, 742 .read_iter = ext4_file_read_iter, 743 .write_iter = ext4_file_write_iter, 744 .unlocked_ioctl = ext4_ioctl, 745 #ifdef CONFIG_COMPAT 746 .compat_ioctl = ext4_compat_ioctl, 747 #endif 748 .mmap = ext4_file_mmap, 749 .open = ext4_file_open, 750 .release = ext4_release_file, 751 .fsync = ext4_sync_file, 752 .get_unmapped_area = thp_get_unmapped_area, 753 .splice_read = generic_file_splice_read, 754 .splice_write = iter_file_splice_write, 755 .fallocate = ext4_fallocate, 756 }; 757 758 const struct inode_operations ext4_file_inode_operations = { 759 .setattr = ext4_setattr, 760 .getattr = ext4_file_getattr, 761 .listxattr = ext4_listxattr, 762 .get_acl = ext4_get_acl, 763 .set_acl = ext4_set_acl, 764 .fiemap = ext4_fiemap, 765 }; 766 767