1 /* 2 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 #include "xfs.h" 19 #include "xfs_fs.h" 20 #include "xfs_shared.h" 21 #include "xfs_format.h" 22 #include "xfs_log_format.h" 23 #include "xfs_trans_resv.h" 24 #include "xfs_sb.h" 25 #include "xfs_ag.h" 26 #include "xfs_mount.h" 27 #include "xfs_da_format.h" 28 #include "xfs_da_btree.h" 29 #include "xfs_inode.h" 30 #include "xfs_trans.h" 31 #include "xfs_inode_item.h" 32 #include "xfs_bmap.h" 33 #include "xfs_bmap_util.h" 34 #include "xfs_error.h" 35 #include "xfs_dir2.h" 36 #include "xfs_dir2_priv.h" 37 #include "xfs_ioctl.h" 38 #include "xfs_trace.h" 39 #include "xfs_log.h" 40 #include "xfs_dinode.h" 41 42 #include <linux/aio.h> 43 #include <linux/dcache.h> 44 #include <linux/falloc.h> 45 #include <linux/pagevec.h> 46 47 static const struct vm_operations_struct xfs_file_vm_ops; 48 49 /* 50 * Locking primitives for read and write IO paths to ensure we consistently use 51 * and order the inode->i_mutex, ip->i_lock and ip->i_iolock. 52 */ 53 static inline void 54 xfs_rw_ilock( 55 struct xfs_inode *ip, 56 int type) 57 { 58 if (type & XFS_IOLOCK_EXCL) 59 mutex_lock(&VFS_I(ip)->i_mutex); 60 xfs_ilock(ip, type); 61 } 62 63 static inline void 64 xfs_rw_iunlock( 65 struct xfs_inode *ip, 66 int type) 67 { 68 xfs_iunlock(ip, type); 69 if (type & XFS_IOLOCK_EXCL) 70 mutex_unlock(&VFS_I(ip)->i_mutex); 71 } 72 73 static inline void 74 xfs_rw_ilock_demote( 75 struct xfs_inode *ip, 76 int type) 77 { 78 xfs_ilock_demote(ip, type); 79 if (type & XFS_IOLOCK_EXCL) 80 mutex_unlock(&VFS_I(ip)->i_mutex); 81 } 82 83 /* 84 * xfs_iozero 85 * 86 * xfs_iozero clears the specified range of buffer supplied, 87 * and marks all the affected blocks as valid and modified. If 88 * an affected block is not allocated, it will be allocated. If 89 * an affected block is not completely overwritten, and is not 90 * valid before the operation, it will be read from disk before 91 * being partially zeroed. 92 */ 93 int 94 xfs_iozero( 95 struct xfs_inode *ip, /* inode */ 96 loff_t pos, /* offset in file */ 97 size_t count) /* size of data to zero */ 98 { 99 struct page *page; 100 struct address_space *mapping; 101 int status; 102 103 mapping = VFS_I(ip)->i_mapping; 104 do { 105 unsigned offset, bytes; 106 void *fsdata; 107 108 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */ 109 bytes = PAGE_CACHE_SIZE - offset; 110 if (bytes > count) 111 bytes = count; 112 113 status = pagecache_write_begin(NULL, mapping, pos, bytes, 114 AOP_FLAG_UNINTERRUPTIBLE, 115 &page, &fsdata); 116 if (status) 117 break; 118 119 zero_user(page, offset, bytes); 120 121 status = pagecache_write_end(NULL, mapping, pos, bytes, bytes, 122 page, fsdata); 123 WARN_ON(status <= 0); /* can't return less than zero! */ 124 pos += bytes; 125 count -= bytes; 126 status = 0; 127 } while (count); 128 129 return (-status); 130 } 131 132 /* 133 * Fsync operations on directories are much simpler than on regular files, 134 * as there is no file data to flush, and thus also no need for explicit 135 * cache flush operations, and there are no non-transaction metadata updates 136 * on directories either. 137 */ 138 STATIC int 139 xfs_dir_fsync( 140 struct file *file, 141 loff_t start, 142 loff_t end, 143 int datasync) 144 { 145 struct xfs_inode *ip = XFS_I(file->f_mapping->host); 146 struct xfs_mount *mp = ip->i_mount; 147 xfs_lsn_t lsn = 0; 148 149 trace_xfs_dir_fsync(ip); 150 151 xfs_ilock(ip, XFS_ILOCK_SHARED); 152 if (xfs_ipincount(ip)) 153 lsn = ip->i_itemp->ili_last_lsn; 154 xfs_iunlock(ip, XFS_ILOCK_SHARED); 155 156 if (!lsn) 157 return 0; 158 return _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL); 159 } 160 161 STATIC int 162 xfs_file_fsync( 163 struct file *file, 164 loff_t start, 165 loff_t end, 166 int datasync) 167 { 168 struct inode *inode = file->f_mapping->host; 169 struct xfs_inode *ip = XFS_I(inode); 170 struct xfs_mount *mp = ip->i_mount; 171 int error = 0; 172 int log_flushed = 0; 173 xfs_lsn_t lsn = 0; 174 175 trace_xfs_file_fsync(ip); 176 177 error = filemap_write_and_wait_range(inode->i_mapping, start, end); 178 if (error) 179 return error; 180 181 if (XFS_FORCED_SHUTDOWN(mp)) 182 return -XFS_ERROR(EIO); 183 184 xfs_iflags_clear(ip, XFS_ITRUNCATED); 185 186 if (mp->m_flags & XFS_MOUNT_BARRIER) { 187 /* 188 * If we have an RT and/or log subvolume we need to make sure 189 * to flush the write cache the device used for file data 190 * first. This is to ensure newly written file data make 191 * it to disk before logging the new inode size in case of 192 * an extending write. 193 */ 194 if (XFS_IS_REALTIME_INODE(ip)) 195 xfs_blkdev_issue_flush(mp->m_rtdev_targp); 196 else if (mp->m_logdev_targp != mp->m_ddev_targp) 197 xfs_blkdev_issue_flush(mp->m_ddev_targp); 198 } 199 200 /* 201 * All metadata updates are logged, which means that we just have 202 * to flush the log up to the latest LSN that touched the inode. 203 */ 204 xfs_ilock(ip, XFS_ILOCK_SHARED); 205 if (xfs_ipincount(ip)) { 206 if (!datasync || 207 (ip->i_itemp->ili_fields & ~XFS_ILOG_TIMESTAMP)) 208 lsn = ip->i_itemp->ili_last_lsn; 209 } 210 xfs_iunlock(ip, XFS_ILOCK_SHARED); 211 212 if (lsn) 213 error = _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, &log_flushed); 214 215 /* 216 * If we only have a single device, and the log force about was 217 * a no-op we might have to flush the data device cache here. 218 * This can only happen for fdatasync/O_DSYNC if we were overwriting 219 * an already allocated file and thus do not have any metadata to 220 * commit. 221 */ 222 if ((mp->m_flags & XFS_MOUNT_BARRIER) && 223 mp->m_logdev_targp == mp->m_ddev_targp && 224 !XFS_IS_REALTIME_INODE(ip) && 225 !log_flushed) 226 xfs_blkdev_issue_flush(mp->m_ddev_targp); 227 228 return -error; 229 } 230 231 STATIC ssize_t 232 xfs_file_aio_read( 233 struct kiocb *iocb, 234 const struct iovec *iovp, 235 unsigned long nr_segs, 236 loff_t pos) 237 { 238 struct file *file = iocb->ki_filp; 239 struct inode *inode = file->f_mapping->host; 240 struct xfs_inode *ip = XFS_I(inode); 241 struct xfs_mount *mp = ip->i_mount; 242 size_t size = 0; 243 ssize_t ret = 0; 244 int ioflags = 0; 245 xfs_fsize_t n; 246 247 XFS_STATS_INC(xs_read_calls); 248 249 BUG_ON(iocb->ki_pos != pos); 250 251 if (unlikely(file->f_flags & O_DIRECT)) 252 ioflags |= IO_ISDIRECT; 253 if (file->f_mode & FMODE_NOCMTIME) 254 ioflags |= IO_INVIS; 255 256 ret = generic_segment_checks(iovp, &nr_segs, &size, VERIFY_WRITE); 257 if (ret < 0) 258 return ret; 259 260 if (unlikely(ioflags & IO_ISDIRECT)) { 261 xfs_buftarg_t *target = 262 XFS_IS_REALTIME_INODE(ip) ? 263 mp->m_rtdev_targp : mp->m_ddev_targp; 264 /* DIO must be aligned to device logical sector size */ 265 if ((pos | size) & target->bt_logical_sectormask) { 266 if (pos == i_size_read(inode)) 267 return 0; 268 return -XFS_ERROR(EINVAL); 269 } 270 } 271 272 n = mp->m_super->s_maxbytes - pos; 273 if (n <= 0 || size == 0) 274 return 0; 275 276 if (n < size) 277 size = n; 278 279 if (XFS_FORCED_SHUTDOWN(mp)) 280 return -EIO; 281 282 /* 283 * Locking is a bit tricky here. If we take an exclusive lock 284 * for direct IO, we effectively serialise all new concurrent 285 * read IO to this file and block it behind IO that is currently in 286 * progress because IO in progress holds the IO lock shared. We only 287 * need to hold the lock exclusive to blow away the page cache, so 288 * only take lock exclusively if the page cache needs invalidation. 289 * This allows the normal direct IO case of no page cache pages to 290 * proceeed concurrently without serialisation. 291 */ 292 xfs_rw_ilock(ip, XFS_IOLOCK_SHARED); 293 if ((ioflags & IO_ISDIRECT) && inode->i_mapping->nrpages) { 294 xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); 295 xfs_rw_ilock(ip, XFS_IOLOCK_EXCL); 296 297 if (inode->i_mapping->nrpages) { 298 ret = -filemap_write_and_wait_range( 299 VFS_I(ip)->i_mapping, 300 pos, -1); 301 if (ret) { 302 xfs_rw_iunlock(ip, XFS_IOLOCK_EXCL); 303 return ret; 304 } 305 truncate_pagecache_range(VFS_I(ip), pos, -1); 306 } 307 xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL); 308 } 309 310 trace_xfs_file_read(ip, size, pos, ioflags); 311 312 ret = generic_file_aio_read(iocb, iovp, nr_segs, pos); 313 if (ret > 0) 314 XFS_STATS_ADD(xs_read_bytes, ret); 315 316 xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); 317 return ret; 318 } 319 320 STATIC ssize_t 321 xfs_file_splice_read( 322 struct file *infilp, 323 loff_t *ppos, 324 struct pipe_inode_info *pipe, 325 size_t count, 326 unsigned int flags) 327 { 328 struct xfs_inode *ip = XFS_I(infilp->f_mapping->host); 329 int ioflags = 0; 330 ssize_t ret; 331 332 XFS_STATS_INC(xs_read_calls); 333 334 if (infilp->f_mode & FMODE_NOCMTIME) 335 ioflags |= IO_INVIS; 336 337 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) 338 return -EIO; 339 340 xfs_rw_ilock(ip, XFS_IOLOCK_SHARED); 341 342 trace_xfs_file_splice_read(ip, count, *ppos, ioflags); 343 344 ret = generic_file_splice_read(infilp, ppos, pipe, count, flags); 345 if (ret > 0) 346 XFS_STATS_ADD(xs_read_bytes, ret); 347 348 xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); 349 return ret; 350 } 351 352 /* 353 * xfs_file_splice_write() does not use xfs_rw_ilock() because 354 * generic_file_splice_write() takes the i_mutex itself. This, in theory, 355 * couuld cause lock inversions between the aio_write path and the splice path 356 * if someone is doing concurrent splice(2) based writes and write(2) based 357 * writes to the same inode. The only real way to fix this is to re-implement 358 * the generic code here with correct locking orders. 359 */ 360 STATIC ssize_t 361 xfs_file_splice_write( 362 struct pipe_inode_info *pipe, 363 struct file *outfilp, 364 loff_t *ppos, 365 size_t count, 366 unsigned int flags) 367 { 368 struct inode *inode = outfilp->f_mapping->host; 369 struct xfs_inode *ip = XFS_I(inode); 370 int ioflags = 0; 371 ssize_t ret; 372 373 XFS_STATS_INC(xs_write_calls); 374 375 if (outfilp->f_mode & FMODE_NOCMTIME) 376 ioflags |= IO_INVIS; 377 378 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) 379 return -EIO; 380 381 xfs_ilock(ip, XFS_IOLOCK_EXCL); 382 383 trace_xfs_file_splice_write(ip, count, *ppos, ioflags); 384 385 ret = generic_file_splice_write(pipe, outfilp, ppos, count, flags); 386 if (ret > 0) 387 XFS_STATS_ADD(xs_write_bytes, ret); 388 389 xfs_iunlock(ip, XFS_IOLOCK_EXCL); 390 return ret; 391 } 392 393 /* 394 * This routine is called to handle zeroing any space in the last block of the 395 * file that is beyond the EOF. We do this since the size is being increased 396 * without writing anything to that block and we don't want to read the 397 * garbage on the disk. 398 */ 399 STATIC int /* error (positive) */ 400 xfs_zero_last_block( 401 struct xfs_inode *ip, 402 xfs_fsize_t offset, 403 xfs_fsize_t isize) 404 { 405 struct xfs_mount *mp = ip->i_mount; 406 xfs_fileoff_t last_fsb = XFS_B_TO_FSBT(mp, isize); 407 int zero_offset = XFS_B_FSB_OFFSET(mp, isize); 408 int zero_len; 409 int nimaps = 1; 410 int error = 0; 411 struct xfs_bmbt_irec imap; 412 413 xfs_ilock(ip, XFS_ILOCK_EXCL); 414 error = xfs_bmapi_read(ip, last_fsb, 1, &imap, &nimaps, 0); 415 xfs_iunlock(ip, XFS_ILOCK_EXCL); 416 if (error) 417 return error; 418 419 ASSERT(nimaps > 0); 420 421 /* 422 * If the block underlying isize is just a hole, then there 423 * is nothing to zero. 424 */ 425 if (imap.br_startblock == HOLESTARTBLOCK) 426 return 0; 427 428 zero_len = mp->m_sb.sb_blocksize - zero_offset; 429 if (isize + zero_len > offset) 430 zero_len = offset - isize; 431 return xfs_iozero(ip, isize, zero_len); 432 } 433 434 /* 435 * Zero any on disk space between the current EOF and the new, larger EOF. 436 * 437 * This handles the normal case of zeroing the remainder of the last block in 438 * the file and the unusual case of zeroing blocks out beyond the size of the 439 * file. This second case only happens with fixed size extents and when the 440 * system crashes before the inode size was updated but after blocks were 441 * allocated. 442 * 443 * Expects the iolock to be held exclusive, and will take the ilock internally. 444 */ 445 int /* error (positive) */ 446 xfs_zero_eof( 447 struct xfs_inode *ip, 448 xfs_off_t offset, /* starting I/O offset */ 449 xfs_fsize_t isize) /* current inode size */ 450 { 451 struct xfs_mount *mp = ip->i_mount; 452 xfs_fileoff_t start_zero_fsb; 453 xfs_fileoff_t end_zero_fsb; 454 xfs_fileoff_t zero_count_fsb; 455 xfs_fileoff_t last_fsb; 456 xfs_fileoff_t zero_off; 457 xfs_fsize_t zero_len; 458 int nimaps; 459 int error = 0; 460 struct xfs_bmbt_irec imap; 461 462 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); 463 ASSERT(offset > isize); 464 465 /* 466 * First handle zeroing the block on which isize resides. 467 * 468 * We only zero a part of that block so it is handled specially. 469 */ 470 if (XFS_B_FSB_OFFSET(mp, isize) != 0) { 471 error = xfs_zero_last_block(ip, offset, isize); 472 if (error) 473 return error; 474 } 475 476 /* 477 * Calculate the range between the new size and the old where blocks 478 * needing to be zeroed may exist. 479 * 480 * To get the block where the last byte in the file currently resides, 481 * we need to subtract one from the size and truncate back to a block 482 * boundary. We subtract 1 in case the size is exactly on a block 483 * boundary. 484 */ 485 last_fsb = isize ? XFS_B_TO_FSBT(mp, isize - 1) : (xfs_fileoff_t)-1; 486 start_zero_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize); 487 end_zero_fsb = XFS_B_TO_FSBT(mp, offset - 1); 488 ASSERT((xfs_sfiloff_t)last_fsb < (xfs_sfiloff_t)start_zero_fsb); 489 if (last_fsb == end_zero_fsb) { 490 /* 491 * The size was only incremented on its last block. 492 * We took care of that above, so just return. 493 */ 494 return 0; 495 } 496 497 ASSERT(start_zero_fsb <= end_zero_fsb); 498 while (start_zero_fsb <= end_zero_fsb) { 499 nimaps = 1; 500 zero_count_fsb = end_zero_fsb - start_zero_fsb + 1; 501 502 xfs_ilock(ip, XFS_ILOCK_EXCL); 503 error = xfs_bmapi_read(ip, start_zero_fsb, zero_count_fsb, 504 &imap, &nimaps, 0); 505 xfs_iunlock(ip, XFS_ILOCK_EXCL); 506 if (error) 507 return error; 508 509 ASSERT(nimaps > 0); 510 511 if (imap.br_state == XFS_EXT_UNWRITTEN || 512 imap.br_startblock == HOLESTARTBLOCK) { 513 start_zero_fsb = imap.br_startoff + imap.br_blockcount; 514 ASSERT(start_zero_fsb <= (end_zero_fsb + 1)); 515 continue; 516 } 517 518 /* 519 * There are blocks we need to zero. 520 */ 521 zero_off = XFS_FSB_TO_B(mp, start_zero_fsb); 522 zero_len = XFS_FSB_TO_B(mp, imap.br_blockcount); 523 524 if ((zero_off + zero_len) > offset) 525 zero_len = offset - zero_off; 526 527 error = xfs_iozero(ip, zero_off, zero_len); 528 if (error) 529 return error; 530 531 start_zero_fsb = imap.br_startoff + imap.br_blockcount; 532 ASSERT(start_zero_fsb <= (end_zero_fsb + 1)); 533 } 534 535 return 0; 536 } 537 538 /* 539 * Common pre-write limit and setup checks. 540 * 541 * Called with the iolocked held either shared and exclusive according to 542 * @iolock, and returns with it held. Might upgrade the iolock to exclusive 543 * if called for a direct write beyond i_size. 544 */ 545 STATIC ssize_t 546 xfs_file_aio_write_checks( 547 struct file *file, 548 loff_t *pos, 549 size_t *count, 550 int *iolock) 551 { 552 struct inode *inode = file->f_mapping->host; 553 struct xfs_inode *ip = XFS_I(inode); 554 int error = 0; 555 556 restart: 557 error = generic_write_checks(file, pos, count, S_ISBLK(inode->i_mode)); 558 if (error) 559 return error; 560 561 /* 562 * If the offset is beyond the size of the file, we need to zero any 563 * blocks that fall between the existing EOF and the start of this 564 * write. If zeroing is needed and we are currently holding the 565 * iolock shared, we need to update it to exclusive which implies 566 * having to redo all checks before. 567 */ 568 if (*pos > i_size_read(inode)) { 569 if (*iolock == XFS_IOLOCK_SHARED) { 570 xfs_rw_iunlock(ip, *iolock); 571 *iolock = XFS_IOLOCK_EXCL; 572 xfs_rw_ilock(ip, *iolock); 573 goto restart; 574 } 575 error = -xfs_zero_eof(ip, *pos, i_size_read(inode)); 576 if (error) 577 return error; 578 } 579 580 /* 581 * Updating the timestamps will grab the ilock again from 582 * xfs_fs_dirty_inode, so we have to call it after dropping the 583 * lock above. Eventually we should look into a way to avoid 584 * the pointless lock roundtrip. 585 */ 586 if (likely(!(file->f_mode & FMODE_NOCMTIME))) { 587 error = file_update_time(file); 588 if (error) 589 return error; 590 } 591 592 /* 593 * If we're writing the file then make sure to clear the setuid and 594 * setgid bits if the process is not being run by root. This keeps 595 * people from modifying setuid and setgid binaries. 596 */ 597 return file_remove_suid(file); 598 } 599 600 /* 601 * xfs_file_dio_aio_write - handle direct IO writes 602 * 603 * Lock the inode appropriately to prepare for and issue a direct IO write. 604 * By separating it from the buffered write path we remove all the tricky to 605 * follow locking changes and looping. 606 * 607 * If there are cached pages or we're extending the file, we need IOLOCK_EXCL 608 * until we're sure the bytes at the new EOF have been zeroed and/or the cached 609 * pages are flushed out. 610 * 611 * In most cases the direct IO writes will be done holding IOLOCK_SHARED 612 * allowing them to be done in parallel with reads and other direct IO writes. 613 * However, if the IO is not aligned to filesystem blocks, the direct IO layer 614 * needs to do sub-block zeroing and that requires serialisation against other 615 * direct IOs to the same block. In this case we need to serialise the 616 * submission of the unaligned IOs so that we don't get racing block zeroing in 617 * the dio layer. To avoid the problem with aio, we also need to wait for 618 * outstanding IOs to complete so that unwritten extent conversion is completed 619 * before we try to map the overlapping block. This is currently implemented by 620 * hitting it with a big hammer (i.e. inode_dio_wait()). 621 * 622 * Returns with locks held indicated by @iolock and errors indicated by 623 * negative return values. 624 */ 625 STATIC ssize_t 626 xfs_file_dio_aio_write( 627 struct kiocb *iocb, 628 const struct iovec *iovp, 629 unsigned long nr_segs, 630 loff_t pos, 631 size_t ocount) 632 { 633 struct file *file = iocb->ki_filp; 634 struct address_space *mapping = file->f_mapping; 635 struct inode *inode = mapping->host; 636 struct xfs_inode *ip = XFS_I(inode); 637 struct xfs_mount *mp = ip->i_mount; 638 ssize_t ret = 0; 639 size_t count = ocount; 640 int unaligned_io = 0; 641 int iolock; 642 struct xfs_buftarg *target = XFS_IS_REALTIME_INODE(ip) ? 643 mp->m_rtdev_targp : mp->m_ddev_targp; 644 645 /* DIO must be aligned to device logical sector size */ 646 if ((pos | count) & target->bt_logical_sectormask) 647 return -XFS_ERROR(EINVAL); 648 649 /* "unaligned" here means not aligned to a filesystem block */ 650 if ((pos & mp->m_blockmask) || ((pos + count) & mp->m_blockmask)) 651 unaligned_io = 1; 652 653 /* 654 * We don't need to take an exclusive lock unless there page cache needs 655 * to be invalidated or unaligned IO is being executed. We don't need to 656 * consider the EOF extension case here because 657 * xfs_file_aio_write_checks() will relock the inode as necessary for 658 * EOF zeroing cases and fill out the new inode size as appropriate. 659 */ 660 if (unaligned_io || mapping->nrpages) 661 iolock = XFS_IOLOCK_EXCL; 662 else 663 iolock = XFS_IOLOCK_SHARED; 664 xfs_rw_ilock(ip, iolock); 665 666 /* 667 * Recheck if there are cached pages that need invalidate after we got 668 * the iolock to protect against other threads adding new pages while 669 * we were waiting for the iolock. 670 */ 671 if (mapping->nrpages && iolock == XFS_IOLOCK_SHARED) { 672 xfs_rw_iunlock(ip, iolock); 673 iolock = XFS_IOLOCK_EXCL; 674 xfs_rw_ilock(ip, iolock); 675 } 676 677 ret = xfs_file_aio_write_checks(file, &pos, &count, &iolock); 678 if (ret) 679 goto out; 680 681 if (mapping->nrpages) { 682 ret = filemap_write_and_wait_range(VFS_I(ip)->i_mapping, 683 pos, -1); 684 if (ret) 685 goto out; 686 truncate_pagecache_range(VFS_I(ip), pos, -1); 687 } 688 689 /* 690 * If we are doing unaligned IO, wait for all other IO to drain, 691 * otherwise demote the lock if we had to flush cached pages 692 */ 693 if (unaligned_io) 694 inode_dio_wait(inode); 695 else if (iolock == XFS_IOLOCK_EXCL) { 696 xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL); 697 iolock = XFS_IOLOCK_SHARED; 698 } 699 700 trace_xfs_file_direct_write(ip, count, iocb->ki_pos, 0); 701 ret = generic_file_direct_write(iocb, iovp, 702 &nr_segs, pos, count, ocount); 703 704 out: 705 xfs_rw_iunlock(ip, iolock); 706 707 /* No fallback to buffered IO on errors for XFS. */ 708 ASSERT(ret < 0 || ret == count); 709 return ret; 710 } 711 712 STATIC ssize_t 713 xfs_file_buffered_aio_write( 714 struct kiocb *iocb, 715 const struct iovec *iovp, 716 unsigned long nr_segs, 717 loff_t pos, 718 size_t count) 719 { 720 struct file *file = iocb->ki_filp; 721 struct address_space *mapping = file->f_mapping; 722 struct inode *inode = mapping->host; 723 struct xfs_inode *ip = XFS_I(inode); 724 ssize_t ret; 725 int enospc = 0; 726 int iolock = XFS_IOLOCK_EXCL; 727 struct iov_iter from; 728 729 xfs_rw_ilock(ip, iolock); 730 731 ret = xfs_file_aio_write_checks(file, &pos, &count, &iolock); 732 if (ret) 733 goto out; 734 735 iov_iter_init(&from, iovp, nr_segs, count, 0); 736 /* We can write back this queue in page reclaim */ 737 current->backing_dev_info = mapping->backing_dev_info; 738 739 write_retry: 740 trace_xfs_file_buffered_write(ip, count, iocb->ki_pos, 0); 741 ret = generic_perform_write(file, &from, pos); 742 if (likely(ret >= 0)) 743 iocb->ki_pos = pos + ret; 744 /* 745 * If we just got an ENOSPC, try to write back all dirty inodes to 746 * convert delalloc space to free up some of the excess reserved 747 * metadata space. 748 */ 749 if (ret == -ENOSPC && !enospc) { 750 enospc = 1; 751 xfs_flush_inodes(ip->i_mount); 752 goto write_retry; 753 } 754 755 current->backing_dev_info = NULL; 756 out: 757 xfs_rw_iunlock(ip, iolock); 758 return ret; 759 } 760 761 STATIC ssize_t 762 xfs_file_aio_write( 763 struct kiocb *iocb, 764 const struct iovec *iovp, 765 unsigned long nr_segs, 766 loff_t pos) 767 { 768 struct file *file = iocb->ki_filp; 769 struct address_space *mapping = file->f_mapping; 770 struct inode *inode = mapping->host; 771 struct xfs_inode *ip = XFS_I(inode); 772 ssize_t ret; 773 size_t ocount = 0; 774 775 XFS_STATS_INC(xs_write_calls); 776 777 BUG_ON(iocb->ki_pos != pos); 778 779 ret = generic_segment_checks(iovp, &nr_segs, &ocount, VERIFY_READ); 780 if (ret) 781 return ret; 782 783 if (ocount == 0) 784 return 0; 785 786 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) { 787 ret = -EIO; 788 goto out; 789 } 790 791 if (unlikely(file->f_flags & O_DIRECT)) 792 ret = xfs_file_dio_aio_write(iocb, iovp, nr_segs, pos, ocount); 793 else 794 ret = xfs_file_buffered_aio_write(iocb, iovp, nr_segs, pos, 795 ocount); 796 797 if (ret > 0) { 798 ssize_t err; 799 800 XFS_STATS_ADD(xs_write_bytes, ret); 801 802 /* Handle various SYNC-type writes */ 803 err = generic_write_sync(file, iocb->ki_pos - ret, ret); 804 if (err < 0) 805 ret = err; 806 } 807 808 out: 809 return ret; 810 } 811 812 STATIC long 813 xfs_file_fallocate( 814 struct file *file, 815 int mode, 816 loff_t offset, 817 loff_t len) 818 { 819 struct inode *inode = file_inode(file); 820 struct xfs_inode *ip = XFS_I(inode); 821 struct xfs_trans *tp; 822 long error; 823 loff_t new_size = 0; 824 825 if (!S_ISREG(inode->i_mode)) 826 return -EINVAL; 827 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | 828 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE)) 829 return -EOPNOTSUPP; 830 831 xfs_ilock(ip, XFS_IOLOCK_EXCL); 832 if (mode & FALLOC_FL_PUNCH_HOLE) { 833 error = xfs_free_file_space(ip, offset, len); 834 if (error) 835 goto out_unlock; 836 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) { 837 unsigned blksize_mask = (1 << inode->i_blkbits) - 1; 838 839 if (offset & blksize_mask || len & blksize_mask) { 840 error = -EINVAL; 841 goto out_unlock; 842 } 843 844 ASSERT(offset + len < i_size_read(inode)); 845 new_size = i_size_read(inode) - len; 846 847 error = xfs_collapse_file_space(ip, offset, len); 848 if (error) 849 goto out_unlock; 850 } else { 851 if (!(mode & FALLOC_FL_KEEP_SIZE) && 852 offset + len > i_size_read(inode)) { 853 new_size = offset + len; 854 error = -inode_newsize_ok(inode, new_size); 855 if (error) 856 goto out_unlock; 857 } 858 859 if (mode & FALLOC_FL_ZERO_RANGE) 860 error = xfs_zero_file_space(ip, offset, len); 861 else 862 error = xfs_alloc_file_space(ip, offset, len, 863 XFS_BMAPI_PREALLOC); 864 if (error) 865 goto out_unlock; 866 } 867 868 tp = xfs_trans_alloc(ip->i_mount, XFS_TRANS_WRITEID); 869 error = xfs_trans_reserve(tp, &M_RES(ip->i_mount)->tr_writeid, 0, 0); 870 if (error) { 871 xfs_trans_cancel(tp, 0); 872 goto out_unlock; 873 } 874 875 xfs_ilock(ip, XFS_ILOCK_EXCL); 876 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 877 ip->i_d.di_mode &= ~S_ISUID; 878 if (ip->i_d.di_mode & S_IXGRP) 879 ip->i_d.di_mode &= ~S_ISGID; 880 881 if (!(mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE))) 882 ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC; 883 884 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); 885 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 886 887 if (file->f_flags & O_DSYNC) 888 xfs_trans_set_sync(tp); 889 error = xfs_trans_commit(tp, 0); 890 if (error) 891 goto out_unlock; 892 893 /* Change file size if needed */ 894 if (new_size) { 895 struct iattr iattr; 896 897 iattr.ia_valid = ATTR_SIZE; 898 iattr.ia_size = new_size; 899 error = xfs_setattr_size(ip, &iattr); 900 } 901 902 out_unlock: 903 xfs_iunlock(ip, XFS_IOLOCK_EXCL); 904 return -error; 905 } 906 907 908 STATIC int 909 xfs_file_open( 910 struct inode *inode, 911 struct file *file) 912 { 913 if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS) 914 return -EFBIG; 915 if (XFS_FORCED_SHUTDOWN(XFS_M(inode->i_sb))) 916 return -EIO; 917 return 0; 918 } 919 920 STATIC int 921 xfs_dir_open( 922 struct inode *inode, 923 struct file *file) 924 { 925 struct xfs_inode *ip = XFS_I(inode); 926 int mode; 927 int error; 928 929 error = xfs_file_open(inode, file); 930 if (error) 931 return error; 932 933 /* 934 * If there are any blocks, read-ahead block 0 as we're almost 935 * certain to have the next operation be a read there. 936 */ 937 mode = xfs_ilock_data_map_shared(ip); 938 if (ip->i_d.di_nextents > 0) 939 xfs_dir3_data_readahead(NULL, ip, 0, -1); 940 xfs_iunlock(ip, mode); 941 return 0; 942 } 943 944 STATIC int 945 xfs_file_release( 946 struct inode *inode, 947 struct file *filp) 948 { 949 return -xfs_release(XFS_I(inode)); 950 } 951 952 STATIC int 953 xfs_file_readdir( 954 struct file *file, 955 struct dir_context *ctx) 956 { 957 struct inode *inode = file_inode(file); 958 xfs_inode_t *ip = XFS_I(inode); 959 int error; 960 size_t bufsize; 961 962 /* 963 * The Linux API doesn't pass down the total size of the buffer 964 * we read into down to the filesystem. With the filldir concept 965 * it's not needed for correct information, but the XFS dir2 leaf 966 * code wants an estimate of the buffer size to calculate it's 967 * readahead window and size the buffers used for mapping to 968 * physical blocks. 969 * 970 * Try to give it an estimate that's good enough, maybe at some 971 * point we can change the ->readdir prototype to include the 972 * buffer size. For now we use the current glibc buffer size. 973 */ 974 bufsize = (size_t)min_t(loff_t, 32768, ip->i_d.di_size); 975 976 error = xfs_readdir(ip, ctx, bufsize); 977 if (error) 978 return -error; 979 return 0; 980 } 981 982 STATIC int 983 xfs_file_mmap( 984 struct file *filp, 985 struct vm_area_struct *vma) 986 { 987 vma->vm_ops = &xfs_file_vm_ops; 988 989 file_accessed(filp); 990 return 0; 991 } 992 993 /* 994 * mmap()d file has taken write protection fault and is being made 995 * writable. We can set the page state up correctly for a writable 996 * page, which means we can do correct delalloc accounting (ENOSPC 997 * checking!) and unwritten extent mapping. 998 */ 999 STATIC int 1000 xfs_vm_page_mkwrite( 1001 struct vm_area_struct *vma, 1002 struct vm_fault *vmf) 1003 { 1004 return block_page_mkwrite(vma, vmf, xfs_get_blocks); 1005 } 1006 1007 /* 1008 * This type is designed to indicate the type of offset we would like 1009 * to search from page cache for either xfs_seek_data() or xfs_seek_hole(). 1010 */ 1011 enum { 1012 HOLE_OFF = 0, 1013 DATA_OFF, 1014 }; 1015 1016 /* 1017 * Lookup the desired type of offset from the given page. 1018 * 1019 * On success, return true and the offset argument will point to the 1020 * start of the region that was found. Otherwise this function will 1021 * return false and keep the offset argument unchanged. 1022 */ 1023 STATIC bool 1024 xfs_lookup_buffer_offset( 1025 struct page *page, 1026 loff_t *offset, 1027 unsigned int type) 1028 { 1029 loff_t lastoff = page_offset(page); 1030 bool found = false; 1031 struct buffer_head *bh, *head; 1032 1033 bh = head = page_buffers(page); 1034 do { 1035 /* 1036 * Unwritten extents that have data in the page 1037 * cache covering them can be identified by the 1038 * BH_Unwritten state flag. Pages with multiple 1039 * buffers might have a mix of holes, data and 1040 * unwritten extents - any buffer with valid 1041 * data in it should have BH_Uptodate flag set 1042 * on it. 1043 */ 1044 if (buffer_unwritten(bh) || 1045 buffer_uptodate(bh)) { 1046 if (type == DATA_OFF) 1047 found = true; 1048 } else { 1049 if (type == HOLE_OFF) 1050 found = true; 1051 } 1052 1053 if (found) { 1054 *offset = lastoff; 1055 break; 1056 } 1057 lastoff += bh->b_size; 1058 } while ((bh = bh->b_this_page) != head); 1059 1060 return found; 1061 } 1062 1063 /* 1064 * This routine is called to find out and return a data or hole offset 1065 * from the page cache for unwritten extents according to the desired 1066 * type for xfs_seek_data() or xfs_seek_hole(). 1067 * 1068 * The argument offset is used to tell where we start to search from the 1069 * page cache. Map is used to figure out the end points of the range to 1070 * lookup pages. 1071 * 1072 * Return true if the desired type of offset was found, and the argument 1073 * offset is filled with that address. Otherwise, return false and keep 1074 * offset unchanged. 1075 */ 1076 STATIC bool 1077 xfs_find_get_desired_pgoff( 1078 struct inode *inode, 1079 struct xfs_bmbt_irec *map, 1080 unsigned int type, 1081 loff_t *offset) 1082 { 1083 struct xfs_inode *ip = XFS_I(inode); 1084 struct xfs_mount *mp = ip->i_mount; 1085 struct pagevec pvec; 1086 pgoff_t index; 1087 pgoff_t end; 1088 loff_t endoff; 1089 loff_t startoff = *offset; 1090 loff_t lastoff = startoff; 1091 bool found = false; 1092 1093 pagevec_init(&pvec, 0); 1094 1095 index = startoff >> PAGE_CACHE_SHIFT; 1096 endoff = XFS_FSB_TO_B(mp, map->br_startoff + map->br_blockcount); 1097 end = endoff >> PAGE_CACHE_SHIFT; 1098 do { 1099 int want; 1100 unsigned nr_pages; 1101 unsigned int i; 1102 1103 want = min_t(pgoff_t, end - index, PAGEVEC_SIZE); 1104 nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index, 1105 want); 1106 /* 1107 * No page mapped into given range. If we are searching holes 1108 * and if this is the first time we got into the loop, it means 1109 * that the given offset is landed in a hole, return it. 1110 * 1111 * If we have already stepped through some block buffers to find 1112 * holes but they all contains data. In this case, the last 1113 * offset is already updated and pointed to the end of the last 1114 * mapped page, if it does not reach the endpoint to search, 1115 * that means there should be a hole between them. 1116 */ 1117 if (nr_pages == 0) { 1118 /* Data search found nothing */ 1119 if (type == DATA_OFF) 1120 break; 1121 1122 ASSERT(type == HOLE_OFF); 1123 if (lastoff == startoff || lastoff < endoff) { 1124 found = true; 1125 *offset = lastoff; 1126 } 1127 break; 1128 } 1129 1130 /* 1131 * At lease we found one page. If this is the first time we 1132 * step into the loop, and if the first page index offset is 1133 * greater than the given search offset, a hole was found. 1134 */ 1135 if (type == HOLE_OFF && lastoff == startoff && 1136 lastoff < page_offset(pvec.pages[0])) { 1137 found = true; 1138 break; 1139 } 1140 1141 for (i = 0; i < nr_pages; i++) { 1142 struct page *page = pvec.pages[i]; 1143 loff_t b_offset; 1144 1145 /* 1146 * At this point, the page may be truncated or 1147 * invalidated (changing page->mapping to NULL), 1148 * or even swizzled back from swapper_space to tmpfs 1149 * file mapping. However, page->index will not change 1150 * because we have a reference on the page. 1151 * 1152 * Searching done if the page index is out of range. 1153 * If the current offset is not reaches the end of 1154 * the specified search range, there should be a hole 1155 * between them. 1156 */ 1157 if (page->index > end) { 1158 if (type == HOLE_OFF && lastoff < endoff) { 1159 *offset = lastoff; 1160 found = true; 1161 } 1162 goto out; 1163 } 1164 1165 lock_page(page); 1166 /* 1167 * Page truncated or invalidated(page->mapping == NULL). 1168 * We can freely skip it and proceed to check the next 1169 * page. 1170 */ 1171 if (unlikely(page->mapping != inode->i_mapping)) { 1172 unlock_page(page); 1173 continue; 1174 } 1175 1176 if (!page_has_buffers(page)) { 1177 unlock_page(page); 1178 continue; 1179 } 1180 1181 found = xfs_lookup_buffer_offset(page, &b_offset, type); 1182 if (found) { 1183 /* 1184 * The found offset may be less than the start 1185 * point to search if this is the first time to 1186 * come here. 1187 */ 1188 *offset = max_t(loff_t, startoff, b_offset); 1189 unlock_page(page); 1190 goto out; 1191 } 1192 1193 /* 1194 * We either searching data but nothing was found, or 1195 * searching hole but found a data buffer. In either 1196 * case, probably the next page contains the desired 1197 * things, update the last offset to it so. 1198 */ 1199 lastoff = page_offset(page) + PAGE_SIZE; 1200 unlock_page(page); 1201 } 1202 1203 /* 1204 * The number of returned pages less than our desired, search 1205 * done. In this case, nothing was found for searching data, 1206 * but we found a hole behind the last offset. 1207 */ 1208 if (nr_pages < want) { 1209 if (type == HOLE_OFF) { 1210 *offset = lastoff; 1211 found = true; 1212 } 1213 break; 1214 } 1215 1216 index = pvec.pages[i - 1]->index + 1; 1217 pagevec_release(&pvec); 1218 } while (index <= end); 1219 1220 out: 1221 pagevec_release(&pvec); 1222 return found; 1223 } 1224 1225 STATIC loff_t 1226 xfs_seek_data( 1227 struct file *file, 1228 loff_t start) 1229 { 1230 struct inode *inode = file->f_mapping->host; 1231 struct xfs_inode *ip = XFS_I(inode); 1232 struct xfs_mount *mp = ip->i_mount; 1233 loff_t uninitialized_var(offset); 1234 xfs_fsize_t isize; 1235 xfs_fileoff_t fsbno; 1236 xfs_filblks_t end; 1237 uint lock; 1238 int error; 1239 1240 lock = xfs_ilock_data_map_shared(ip); 1241 1242 isize = i_size_read(inode); 1243 if (start >= isize) { 1244 error = ENXIO; 1245 goto out_unlock; 1246 } 1247 1248 /* 1249 * Try to read extents from the first block indicated 1250 * by fsbno to the end block of the file. 1251 */ 1252 fsbno = XFS_B_TO_FSBT(mp, start); 1253 end = XFS_B_TO_FSB(mp, isize); 1254 for (;;) { 1255 struct xfs_bmbt_irec map[2]; 1256 int nmap = 2; 1257 unsigned int i; 1258 1259 error = xfs_bmapi_read(ip, fsbno, end - fsbno, map, &nmap, 1260 XFS_BMAPI_ENTIRE); 1261 if (error) 1262 goto out_unlock; 1263 1264 /* No extents at given offset, must be beyond EOF */ 1265 if (nmap == 0) { 1266 error = ENXIO; 1267 goto out_unlock; 1268 } 1269 1270 for (i = 0; i < nmap; i++) { 1271 offset = max_t(loff_t, start, 1272 XFS_FSB_TO_B(mp, map[i].br_startoff)); 1273 1274 /* Landed in a data extent */ 1275 if (map[i].br_startblock == DELAYSTARTBLOCK || 1276 (map[i].br_state == XFS_EXT_NORM && 1277 !isnullstartblock(map[i].br_startblock))) 1278 goto out; 1279 1280 /* 1281 * Landed in an unwritten extent, try to search data 1282 * from page cache. 1283 */ 1284 if (map[i].br_state == XFS_EXT_UNWRITTEN) { 1285 if (xfs_find_get_desired_pgoff(inode, &map[i], 1286 DATA_OFF, &offset)) 1287 goto out; 1288 } 1289 } 1290 1291 /* 1292 * map[0] is hole or its an unwritten extent but 1293 * without data in page cache. Probably means that 1294 * we are reading after EOF if nothing in map[1]. 1295 */ 1296 if (nmap == 1) { 1297 error = ENXIO; 1298 goto out_unlock; 1299 } 1300 1301 ASSERT(i > 1); 1302 1303 /* 1304 * Nothing was found, proceed to the next round of search 1305 * if reading offset not beyond or hit EOF. 1306 */ 1307 fsbno = map[i - 1].br_startoff + map[i - 1].br_blockcount; 1308 start = XFS_FSB_TO_B(mp, fsbno); 1309 if (start >= isize) { 1310 error = ENXIO; 1311 goto out_unlock; 1312 } 1313 } 1314 1315 out: 1316 offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes); 1317 1318 out_unlock: 1319 xfs_iunlock(ip, lock); 1320 1321 if (error) 1322 return -error; 1323 return offset; 1324 } 1325 1326 STATIC loff_t 1327 xfs_seek_hole( 1328 struct file *file, 1329 loff_t start) 1330 { 1331 struct inode *inode = file->f_mapping->host; 1332 struct xfs_inode *ip = XFS_I(inode); 1333 struct xfs_mount *mp = ip->i_mount; 1334 loff_t uninitialized_var(offset); 1335 xfs_fsize_t isize; 1336 xfs_fileoff_t fsbno; 1337 xfs_filblks_t end; 1338 uint lock; 1339 int error; 1340 1341 if (XFS_FORCED_SHUTDOWN(mp)) 1342 return -XFS_ERROR(EIO); 1343 1344 lock = xfs_ilock_data_map_shared(ip); 1345 1346 isize = i_size_read(inode); 1347 if (start >= isize) { 1348 error = ENXIO; 1349 goto out_unlock; 1350 } 1351 1352 fsbno = XFS_B_TO_FSBT(mp, start); 1353 end = XFS_B_TO_FSB(mp, isize); 1354 1355 for (;;) { 1356 struct xfs_bmbt_irec map[2]; 1357 int nmap = 2; 1358 unsigned int i; 1359 1360 error = xfs_bmapi_read(ip, fsbno, end - fsbno, map, &nmap, 1361 XFS_BMAPI_ENTIRE); 1362 if (error) 1363 goto out_unlock; 1364 1365 /* No extents at given offset, must be beyond EOF */ 1366 if (nmap == 0) { 1367 error = ENXIO; 1368 goto out_unlock; 1369 } 1370 1371 for (i = 0; i < nmap; i++) { 1372 offset = max_t(loff_t, start, 1373 XFS_FSB_TO_B(mp, map[i].br_startoff)); 1374 1375 /* Landed in a hole */ 1376 if (map[i].br_startblock == HOLESTARTBLOCK) 1377 goto out; 1378 1379 /* 1380 * Landed in an unwritten extent, try to search hole 1381 * from page cache. 1382 */ 1383 if (map[i].br_state == XFS_EXT_UNWRITTEN) { 1384 if (xfs_find_get_desired_pgoff(inode, &map[i], 1385 HOLE_OFF, &offset)) 1386 goto out; 1387 } 1388 } 1389 1390 /* 1391 * map[0] contains data or its unwritten but contains 1392 * data in page cache, probably means that we are 1393 * reading after EOF. We should fix offset to point 1394 * to the end of the file(i.e., there is an implicit 1395 * hole at the end of any file). 1396 */ 1397 if (nmap == 1) { 1398 offset = isize; 1399 break; 1400 } 1401 1402 ASSERT(i > 1); 1403 1404 /* 1405 * Both mappings contains data, proceed to the next round of 1406 * search if the current reading offset not beyond or hit EOF. 1407 */ 1408 fsbno = map[i - 1].br_startoff + map[i - 1].br_blockcount; 1409 start = XFS_FSB_TO_B(mp, fsbno); 1410 if (start >= isize) { 1411 offset = isize; 1412 break; 1413 } 1414 } 1415 1416 out: 1417 /* 1418 * At this point, we must have found a hole. However, the returned 1419 * offset may be bigger than the file size as it may be aligned to 1420 * page boundary for unwritten extents, we need to deal with this 1421 * situation in particular. 1422 */ 1423 offset = min_t(loff_t, offset, isize); 1424 offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes); 1425 1426 out_unlock: 1427 xfs_iunlock(ip, lock); 1428 1429 if (error) 1430 return -error; 1431 return offset; 1432 } 1433 1434 STATIC loff_t 1435 xfs_file_llseek( 1436 struct file *file, 1437 loff_t offset, 1438 int origin) 1439 { 1440 switch (origin) { 1441 case SEEK_END: 1442 case SEEK_CUR: 1443 case SEEK_SET: 1444 return generic_file_llseek(file, offset, origin); 1445 case SEEK_DATA: 1446 return xfs_seek_data(file, offset); 1447 case SEEK_HOLE: 1448 return xfs_seek_hole(file, offset); 1449 default: 1450 return -EINVAL; 1451 } 1452 } 1453 1454 const struct file_operations xfs_file_operations = { 1455 .llseek = xfs_file_llseek, 1456 .read = do_sync_read, 1457 .write = do_sync_write, 1458 .aio_read = xfs_file_aio_read, 1459 .aio_write = xfs_file_aio_write, 1460 .splice_read = xfs_file_splice_read, 1461 .splice_write = xfs_file_splice_write, 1462 .unlocked_ioctl = xfs_file_ioctl, 1463 #ifdef CONFIG_COMPAT 1464 .compat_ioctl = xfs_file_compat_ioctl, 1465 #endif 1466 .mmap = xfs_file_mmap, 1467 .open = xfs_file_open, 1468 .release = xfs_file_release, 1469 .fsync = xfs_file_fsync, 1470 .fallocate = xfs_file_fallocate, 1471 }; 1472 1473 const struct file_operations xfs_dir_file_operations = { 1474 .open = xfs_dir_open, 1475 .read = generic_read_dir, 1476 .iterate = xfs_file_readdir, 1477 .llseek = generic_file_llseek, 1478 .unlocked_ioctl = xfs_file_ioctl, 1479 #ifdef CONFIG_COMPAT 1480 .compat_ioctl = xfs_file_compat_ioctl, 1481 #endif 1482 .fsync = xfs_dir_fsync, 1483 }; 1484 1485 static const struct vm_operations_struct xfs_file_vm_ops = { 1486 .fault = filemap_fault, 1487 .map_pages = filemap_map_pages, 1488 .page_mkwrite = xfs_vm_page_mkwrite, 1489 .remap_pages = generic_file_remap_pages, 1490 }; 1491