1 /* 2 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 #include "xfs.h" 19 #include "xfs_fs.h" 20 #include "xfs_shared.h" 21 #include "xfs_format.h" 22 #include "xfs_log_format.h" 23 #include "xfs_trans_resv.h" 24 #include "xfs_mount.h" 25 #include "xfs_da_format.h" 26 #include "xfs_da_btree.h" 27 #include "xfs_inode.h" 28 #include "xfs_trans.h" 29 #include "xfs_inode_item.h" 30 #include "xfs_bmap.h" 31 #include "xfs_bmap_util.h" 32 #include "xfs_error.h" 33 #include "xfs_dir2.h" 34 #include "xfs_dir2_priv.h" 35 #include "xfs_ioctl.h" 36 #include "xfs_trace.h" 37 #include "xfs_log.h" 38 #include "xfs_icache.h" 39 #include "xfs_pnfs.h" 40 41 #include <linux/aio.h> 42 #include <linux/dcache.h> 43 #include <linux/falloc.h> 44 #include <linux/pagevec.h> 45 46 static const struct vm_operations_struct xfs_file_vm_ops; 47 48 /* 49 * Locking primitives for read and write IO paths to ensure we consistently use 50 * and order the inode->i_mutex, ip->i_lock and ip->i_iolock. 51 */ 52 static inline void 53 xfs_rw_ilock( 54 struct xfs_inode *ip, 55 int type) 56 { 57 if (type & XFS_IOLOCK_EXCL) 58 mutex_lock(&VFS_I(ip)->i_mutex); 59 xfs_ilock(ip, type); 60 } 61 62 static inline void 63 xfs_rw_iunlock( 64 struct xfs_inode *ip, 65 int type) 66 { 67 xfs_iunlock(ip, type); 68 if (type & XFS_IOLOCK_EXCL) 69 mutex_unlock(&VFS_I(ip)->i_mutex); 70 } 71 72 static inline void 73 xfs_rw_ilock_demote( 74 struct xfs_inode *ip, 75 int type) 76 { 77 xfs_ilock_demote(ip, type); 78 if (type & XFS_IOLOCK_EXCL) 79 mutex_unlock(&VFS_I(ip)->i_mutex); 80 } 81 82 /* 83 * xfs_iozero 84 * 85 * xfs_iozero clears the specified range of buffer supplied, 86 * and marks all the affected blocks as valid and modified. If 87 * an affected block is not allocated, it will be allocated. If 88 * an affected block is not completely overwritten, and is not 89 * valid before the operation, it will be read from disk before 90 * being partially zeroed. 91 */ 92 int 93 xfs_iozero( 94 struct xfs_inode *ip, /* inode */ 95 loff_t pos, /* offset in file */ 96 size_t count) /* size of data to zero */ 97 { 98 struct page *page; 99 struct address_space *mapping; 100 int status; 101 102 mapping = VFS_I(ip)->i_mapping; 103 do { 104 unsigned offset, bytes; 105 void *fsdata; 106 107 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */ 108 bytes = PAGE_CACHE_SIZE - offset; 109 if (bytes > count) 110 bytes = count; 111 112 status = pagecache_write_begin(NULL, mapping, pos, bytes, 113 AOP_FLAG_UNINTERRUPTIBLE, 114 &page, &fsdata); 115 if (status) 116 break; 117 118 zero_user(page, offset, bytes); 119 120 status = pagecache_write_end(NULL, mapping, pos, bytes, bytes, 121 page, fsdata); 122 WARN_ON(status <= 0); /* can't return less than zero! */ 123 pos += bytes; 124 count -= bytes; 125 status = 0; 126 } while (count); 127 128 return (-status); 129 } 130 131 int 132 xfs_update_prealloc_flags( 133 struct xfs_inode *ip, 134 enum xfs_prealloc_flags flags) 135 { 136 struct xfs_trans *tp; 137 int error; 138 139 tp = xfs_trans_alloc(ip->i_mount, XFS_TRANS_WRITEID); 140 error = xfs_trans_reserve(tp, &M_RES(ip->i_mount)->tr_writeid, 0, 0); 141 if (error) { 142 xfs_trans_cancel(tp, 0); 143 return error; 144 } 145 146 xfs_ilock(ip, XFS_ILOCK_EXCL); 147 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 148 149 if (!(flags & XFS_PREALLOC_INVISIBLE)) { 150 ip->i_d.di_mode &= ~S_ISUID; 151 if (ip->i_d.di_mode & S_IXGRP) 152 ip->i_d.di_mode &= ~S_ISGID; 153 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); 154 } 155 156 if (flags & XFS_PREALLOC_SET) 157 ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC; 158 if (flags & XFS_PREALLOC_CLEAR) 159 ip->i_d.di_flags &= ~XFS_DIFLAG_PREALLOC; 160 161 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 162 if (flags & XFS_PREALLOC_SYNC) 163 xfs_trans_set_sync(tp); 164 return xfs_trans_commit(tp, 0); 165 } 166 167 /* 168 * Fsync operations on directories are much simpler than on regular files, 169 * as there is no file data to flush, and thus also no need for explicit 170 * cache flush operations, and there are no non-transaction metadata updates 171 * on directories either. 172 */ 173 STATIC int 174 xfs_dir_fsync( 175 struct file *file, 176 loff_t start, 177 loff_t end, 178 int datasync) 179 { 180 struct xfs_inode *ip = XFS_I(file->f_mapping->host); 181 struct xfs_mount *mp = ip->i_mount; 182 xfs_lsn_t lsn = 0; 183 184 trace_xfs_dir_fsync(ip); 185 186 xfs_ilock(ip, XFS_ILOCK_SHARED); 187 if (xfs_ipincount(ip)) 188 lsn = ip->i_itemp->ili_last_lsn; 189 xfs_iunlock(ip, XFS_ILOCK_SHARED); 190 191 if (!lsn) 192 return 0; 193 return _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL); 194 } 195 196 STATIC int 197 xfs_file_fsync( 198 struct file *file, 199 loff_t start, 200 loff_t end, 201 int datasync) 202 { 203 struct inode *inode = file->f_mapping->host; 204 struct xfs_inode *ip = XFS_I(inode); 205 struct xfs_mount *mp = ip->i_mount; 206 int error = 0; 207 int log_flushed = 0; 208 xfs_lsn_t lsn = 0; 209 210 trace_xfs_file_fsync(ip); 211 212 error = filemap_write_and_wait_range(inode->i_mapping, start, end); 213 if (error) 214 return error; 215 216 if (XFS_FORCED_SHUTDOWN(mp)) 217 return -EIO; 218 219 xfs_iflags_clear(ip, XFS_ITRUNCATED); 220 221 if (mp->m_flags & XFS_MOUNT_BARRIER) { 222 /* 223 * If we have an RT and/or log subvolume we need to make sure 224 * to flush the write cache the device used for file data 225 * first. This is to ensure newly written file data make 226 * it to disk before logging the new inode size in case of 227 * an extending write. 228 */ 229 if (XFS_IS_REALTIME_INODE(ip)) 230 xfs_blkdev_issue_flush(mp->m_rtdev_targp); 231 else if (mp->m_logdev_targp != mp->m_ddev_targp) 232 xfs_blkdev_issue_flush(mp->m_ddev_targp); 233 } 234 235 /* 236 * All metadata updates are logged, which means that we just have 237 * to flush the log up to the latest LSN that touched the inode. 238 */ 239 xfs_ilock(ip, XFS_ILOCK_SHARED); 240 if (xfs_ipincount(ip)) { 241 if (!datasync || 242 (ip->i_itemp->ili_fields & ~XFS_ILOG_TIMESTAMP)) 243 lsn = ip->i_itemp->ili_last_lsn; 244 } 245 xfs_iunlock(ip, XFS_ILOCK_SHARED); 246 247 if (lsn) 248 error = _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, &log_flushed); 249 250 /* 251 * If we only have a single device, and the log force about was 252 * a no-op we might have to flush the data device cache here. 253 * This can only happen for fdatasync/O_DSYNC if we were overwriting 254 * an already allocated file and thus do not have any metadata to 255 * commit. 256 */ 257 if ((mp->m_flags & XFS_MOUNT_BARRIER) && 258 mp->m_logdev_targp == mp->m_ddev_targp && 259 !XFS_IS_REALTIME_INODE(ip) && 260 !log_flushed) 261 xfs_blkdev_issue_flush(mp->m_ddev_targp); 262 263 return error; 264 } 265 266 STATIC ssize_t 267 xfs_file_read_iter( 268 struct kiocb *iocb, 269 struct iov_iter *to) 270 { 271 struct file *file = iocb->ki_filp; 272 struct inode *inode = file->f_mapping->host; 273 struct xfs_inode *ip = XFS_I(inode); 274 struct xfs_mount *mp = ip->i_mount; 275 size_t size = iov_iter_count(to); 276 ssize_t ret = 0; 277 int ioflags = 0; 278 xfs_fsize_t n; 279 loff_t pos = iocb->ki_pos; 280 281 XFS_STATS_INC(xs_read_calls); 282 283 if (unlikely(file->f_flags & O_DIRECT)) 284 ioflags |= XFS_IO_ISDIRECT; 285 if (file->f_mode & FMODE_NOCMTIME) 286 ioflags |= XFS_IO_INVIS; 287 288 if (unlikely(ioflags & XFS_IO_ISDIRECT)) { 289 xfs_buftarg_t *target = 290 XFS_IS_REALTIME_INODE(ip) ? 291 mp->m_rtdev_targp : mp->m_ddev_targp; 292 /* DIO must be aligned to device logical sector size */ 293 if ((pos | size) & target->bt_logical_sectormask) { 294 if (pos == i_size_read(inode)) 295 return 0; 296 return -EINVAL; 297 } 298 } 299 300 n = mp->m_super->s_maxbytes - pos; 301 if (n <= 0 || size == 0) 302 return 0; 303 304 if (n < size) 305 size = n; 306 307 if (XFS_FORCED_SHUTDOWN(mp)) 308 return -EIO; 309 310 /* 311 * Locking is a bit tricky here. If we take an exclusive lock 312 * for direct IO, we effectively serialise all new concurrent 313 * read IO to this file and block it behind IO that is currently in 314 * progress because IO in progress holds the IO lock shared. We only 315 * need to hold the lock exclusive to blow away the page cache, so 316 * only take lock exclusively if the page cache needs invalidation. 317 * This allows the normal direct IO case of no page cache pages to 318 * proceeed concurrently without serialisation. 319 */ 320 xfs_rw_ilock(ip, XFS_IOLOCK_SHARED); 321 if ((ioflags & XFS_IO_ISDIRECT) && inode->i_mapping->nrpages) { 322 xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); 323 xfs_rw_ilock(ip, XFS_IOLOCK_EXCL); 324 325 if (inode->i_mapping->nrpages) { 326 ret = filemap_write_and_wait_range( 327 VFS_I(ip)->i_mapping, 328 pos, pos + size - 1); 329 if (ret) { 330 xfs_rw_iunlock(ip, XFS_IOLOCK_EXCL); 331 return ret; 332 } 333 334 /* 335 * Invalidate whole pages. This can return an error if 336 * we fail to invalidate a page, but this should never 337 * happen on XFS. Warn if it does fail. 338 */ 339 ret = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping, 340 pos >> PAGE_CACHE_SHIFT, 341 (pos + size - 1) >> PAGE_CACHE_SHIFT); 342 WARN_ON_ONCE(ret); 343 ret = 0; 344 } 345 xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL); 346 } 347 348 trace_xfs_file_read(ip, size, pos, ioflags); 349 350 ret = generic_file_read_iter(iocb, to); 351 if (ret > 0) 352 XFS_STATS_ADD(xs_read_bytes, ret); 353 354 xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); 355 return ret; 356 } 357 358 STATIC ssize_t 359 xfs_file_splice_read( 360 struct file *infilp, 361 loff_t *ppos, 362 struct pipe_inode_info *pipe, 363 size_t count, 364 unsigned int flags) 365 { 366 struct xfs_inode *ip = XFS_I(infilp->f_mapping->host); 367 int ioflags = 0; 368 ssize_t ret; 369 370 XFS_STATS_INC(xs_read_calls); 371 372 if (infilp->f_mode & FMODE_NOCMTIME) 373 ioflags |= XFS_IO_INVIS; 374 375 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) 376 return -EIO; 377 378 xfs_rw_ilock(ip, XFS_IOLOCK_SHARED); 379 380 trace_xfs_file_splice_read(ip, count, *ppos, ioflags); 381 382 ret = generic_file_splice_read(infilp, ppos, pipe, count, flags); 383 if (ret > 0) 384 XFS_STATS_ADD(xs_read_bytes, ret); 385 386 xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); 387 return ret; 388 } 389 390 /* 391 * This routine is called to handle zeroing any space in the last block of the 392 * file that is beyond the EOF. We do this since the size is being increased 393 * without writing anything to that block and we don't want to read the 394 * garbage on the disk. 395 */ 396 STATIC int /* error (positive) */ 397 xfs_zero_last_block( 398 struct xfs_inode *ip, 399 xfs_fsize_t offset, 400 xfs_fsize_t isize) 401 { 402 struct xfs_mount *mp = ip->i_mount; 403 xfs_fileoff_t last_fsb = XFS_B_TO_FSBT(mp, isize); 404 int zero_offset = XFS_B_FSB_OFFSET(mp, isize); 405 int zero_len; 406 int nimaps = 1; 407 int error = 0; 408 struct xfs_bmbt_irec imap; 409 410 xfs_ilock(ip, XFS_ILOCK_EXCL); 411 error = xfs_bmapi_read(ip, last_fsb, 1, &imap, &nimaps, 0); 412 xfs_iunlock(ip, XFS_ILOCK_EXCL); 413 if (error) 414 return error; 415 416 ASSERT(nimaps > 0); 417 418 /* 419 * If the block underlying isize is just a hole, then there 420 * is nothing to zero. 421 */ 422 if (imap.br_startblock == HOLESTARTBLOCK) 423 return 0; 424 425 zero_len = mp->m_sb.sb_blocksize - zero_offset; 426 if (isize + zero_len > offset) 427 zero_len = offset - isize; 428 return xfs_iozero(ip, isize, zero_len); 429 } 430 431 /* 432 * Zero any on disk space between the current EOF and the new, larger EOF. 433 * 434 * This handles the normal case of zeroing the remainder of the last block in 435 * the file and the unusual case of zeroing blocks out beyond the size of the 436 * file. This second case only happens with fixed size extents and when the 437 * system crashes before the inode size was updated but after blocks were 438 * allocated. 439 * 440 * Expects the iolock to be held exclusive, and will take the ilock internally. 441 */ 442 int /* error (positive) */ 443 xfs_zero_eof( 444 struct xfs_inode *ip, 445 xfs_off_t offset, /* starting I/O offset */ 446 xfs_fsize_t isize) /* current inode size */ 447 { 448 struct xfs_mount *mp = ip->i_mount; 449 xfs_fileoff_t start_zero_fsb; 450 xfs_fileoff_t end_zero_fsb; 451 xfs_fileoff_t zero_count_fsb; 452 xfs_fileoff_t last_fsb; 453 xfs_fileoff_t zero_off; 454 xfs_fsize_t zero_len; 455 int nimaps; 456 int error = 0; 457 struct xfs_bmbt_irec imap; 458 459 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); 460 ASSERT(offset > isize); 461 462 /* 463 * First handle zeroing the block on which isize resides. 464 * 465 * We only zero a part of that block so it is handled specially. 466 */ 467 if (XFS_B_FSB_OFFSET(mp, isize) != 0) { 468 error = xfs_zero_last_block(ip, offset, isize); 469 if (error) 470 return error; 471 } 472 473 /* 474 * Calculate the range between the new size and the old where blocks 475 * needing to be zeroed may exist. 476 * 477 * To get the block where the last byte in the file currently resides, 478 * we need to subtract one from the size and truncate back to a block 479 * boundary. We subtract 1 in case the size is exactly on a block 480 * boundary. 481 */ 482 last_fsb = isize ? XFS_B_TO_FSBT(mp, isize - 1) : (xfs_fileoff_t)-1; 483 start_zero_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize); 484 end_zero_fsb = XFS_B_TO_FSBT(mp, offset - 1); 485 ASSERT((xfs_sfiloff_t)last_fsb < (xfs_sfiloff_t)start_zero_fsb); 486 if (last_fsb == end_zero_fsb) { 487 /* 488 * The size was only incremented on its last block. 489 * We took care of that above, so just return. 490 */ 491 return 0; 492 } 493 494 ASSERT(start_zero_fsb <= end_zero_fsb); 495 while (start_zero_fsb <= end_zero_fsb) { 496 nimaps = 1; 497 zero_count_fsb = end_zero_fsb - start_zero_fsb + 1; 498 499 xfs_ilock(ip, XFS_ILOCK_EXCL); 500 error = xfs_bmapi_read(ip, start_zero_fsb, zero_count_fsb, 501 &imap, &nimaps, 0); 502 xfs_iunlock(ip, XFS_ILOCK_EXCL); 503 if (error) 504 return error; 505 506 ASSERT(nimaps > 0); 507 508 if (imap.br_state == XFS_EXT_UNWRITTEN || 509 imap.br_startblock == HOLESTARTBLOCK) { 510 start_zero_fsb = imap.br_startoff + imap.br_blockcount; 511 ASSERT(start_zero_fsb <= (end_zero_fsb + 1)); 512 continue; 513 } 514 515 /* 516 * There are blocks we need to zero. 517 */ 518 zero_off = XFS_FSB_TO_B(mp, start_zero_fsb); 519 zero_len = XFS_FSB_TO_B(mp, imap.br_blockcount); 520 521 if ((zero_off + zero_len) > offset) 522 zero_len = offset - zero_off; 523 524 error = xfs_iozero(ip, zero_off, zero_len); 525 if (error) 526 return error; 527 528 start_zero_fsb = imap.br_startoff + imap.br_blockcount; 529 ASSERT(start_zero_fsb <= (end_zero_fsb + 1)); 530 } 531 532 return 0; 533 } 534 535 /* 536 * Common pre-write limit and setup checks. 537 * 538 * Called with the iolocked held either shared and exclusive according to 539 * @iolock, and returns with it held. Might upgrade the iolock to exclusive 540 * if called for a direct write beyond i_size. 541 */ 542 STATIC ssize_t 543 xfs_file_aio_write_checks( 544 struct file *file, 545 loff_t *pos, 546 size_t *count, 547 int *iolock) 548 { 549 struct inode *inode = file->f_mapping->host; 550 struct xfs_inode *ip = XFS_I(inode); 551 int error = 0; 552 553 restart: 554 error = generic_write_checks(file, pos, count, S_ISBLK(inode->i_mode)); 555 if (error) 556 return error; 557 558 error = xfs_break_layouts(inode, iolock); 559 if (error) 560 return error; 561 562 /* 563 * If the offset is beyond the size of the file, we need to zero any 564 * blocks that fall between the existing EOF and the start of this 565 * write. If zeroing is needed and we are currently holding the 566 * iolock shared, we need to update it to exclusive which implies 567 * having to redo all checks before. 568 */ 569 if (*pos > i_size_read(inode)) { 570 if (*iolock == XFS_IOLOCK_SHARED) { 571 xfs_rw_iunlock(ip, *iolock); 572 *iolock = XFS_IOLOCK_EXCL; 573 xfs_rw_ilock(ip, *iolock); 574 goto restart; 575 } 576 error = xfs_zero_eof(ip, *pos, i_size_read(inode)); 577 if (error) 578 return error; 579 } 580 581 /* 582 * Updating the timestamps will grab the ilock again from 583 * xfs_fs_dirty_inode, so we have to call it after dropping the 584 * lock above. Eventually we should look into a way to avoid 585 * the pointless lock roundtrip. 586 */ 587 if (likely(!(file->f_mode & FMODE_NOCMTIME))) { 588 error = file_update_time(file); 589 if (error) 590 return error; 591 } 592 593 /* 594 * If we're writing the file then make sure to clear the setuid and 595 * setgid bits if the process is not being run by root. This keeps 596 * people from modifying setuid and setgid binaries. 597 */ 598 return file_remove_suid(file); 599 } 600 601 /* 602 * xfs_file_dio_aio_write - handle direct IO writes 603 * 604 * Lock the inode appropriately to prepare for and issue a direct IO write. 605 * By separating it from the buffered write path we remove all the tricky to 606 * follow locking changes and looping. 607 * 608 * If there are cached pages or we're extending the file, we need IOLOCK_EXCL 609 * until we're sure the bytes at the new EOF have been zeroed and/or the cached 610 * pages are flushed out. 611 * 612 * In most cases the direct IO writes will be done holding IOLOCK_SHARED 613 * allowing them to be done in parallel with reads and other direct IO writes. 614 * However, if the IO is not aligned to filesystem blocks, the direct IO layer 615 * needs to do sub-block zeroing and that requires serialisation against other 616 * direct IOs to the same block. In this case we need to serialise the 617 * submission of the unaligned IOs so that we don't get racing block zeroing in 618 * the dio layer. To avoid the problem with aio, we also need to wait for 619 * outstanding IOs to complete so that unwritten extent conversion is completed 620 * before we try to map the overlapping block. This is currently implemented by 621 * hitting it with a big hammer (i.e. inode_dio_wait()). 622 * 623 * Returns with locks held indicated by @iolock and errors indicated by 624 * negative return values. 625 */ 626 STATIC ssize_t 627 xfs_file_dio_aio_write( 628 struct kiocb *iocb, 629 struct iov_iter *from) 630 { 631 struct file *file = iocb->ki_filp; 632 struct address_space *mapping = file->f_mapping; 633 struct inode *inode = mapping->host; 634 struct xfs_inode *ip = XFS_I(inode); 635 struct xfs_mount *mp = ip->i_mount; 636 ssize_t ret = 0; 637 int unaligned_io = 0; 638 int iolock; 639 size_t count = iov_iter_count(from); 640 loff_t pos = iocb->ki_pos; 641 struct xfs_buftarg *target = XFS_IS_REALTIME_INODE(ip) ? 642 mp->m_rtdev_targp : mp->m_ddev_targp; 643 644 /* DIO must be aligned to device logical sector size */ 645 if ((pos | count) & target->bt_logical_sectormask) 646 return -EINVAL; 647 648 /* "unaligned" here means not aligned to a filesystem block */ 649 if ((pos & mp->m_blockmask) || ((pos + count) & mp->m_blockmask)) 650 unaligned_io = 1; 651 652 /* 653 * We don't need to take an exclusive lock unless there page cache needs 654 * to be invalidated or unaligned IO is being executed. We don't need to 655 * consider the EOF extension case here because 656 * xfs_file_aio_write_checks() will relock the inode as necessary for 657 * EOF zeroing cases and fill out the new inode size as appropriate. 658 */ 659 if (unaligned_io || mapping->nrpages) 660 iolock = XFS_IOLOCK_EXCL; 661 else 662 iolock = XFS_IOLOCK_SHARED; 663 xfs_rw_ilock(ip, iolock); 664 665 /* 666 * Recheck if there are cached pages that need invalidate after we got 667 * the iolock to protect against other threads adding new pages while 668 * we were waiting for the iolock. 669 */ 670 if (mapping->nrpages && iolock == XFS_IOLOCK_SHARED) { 671 xfs_rw_iunlock(ip, iolock); 672 iolock = XFS_IOLOCK_EXCL; 673 xfs_rw_ilock(ip, iolock); 674 } 675 676 ret = xfs_file_aio_write_checks(file, &pos, &count, &iolock); 677 if (ret) 678 goto out; 679 iov_iter_truncate(from, count); 680 681 if (mapping->nrpages) { 682 ret = filemap_write_and_wait_range(VFS_I(ip)->i_mapping, 683 pos, pos + count - 1); 684 if (ret) 685 goto out; 686 /* 687 * Invalidate whole pages. This can return an error if 688 * we fail to invalidate a page, but this should never 689 * happen on XFS. Warn if it does fail. 690 */ 691 ret = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping, 692 pos >> PAGE_CACHE_SHIFT, 693 (pos + count - 1) >> PAGE_CACHE_SHIFT); 694 WARN_ON_ONCE(ret); 695 ret = 0; 696 } 697 698 /* 699 * If we are doing unaligned IO, wait for all other IO to drain, 700 * otherwise demote the lock if we had to flush cached pages 701 */ 702 if (unaligned_io) 703 inode_dio_wait(inode); 704 else if (iolock == XFS_IOLOCK_EXCL) { 705 xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL); 706 iolock = XFS_IOLOCK_SHARED; 707 } 708 709 trace_xfs_file_direct_write(ip, count, iocb->ki_pos, 0); 710 ret = generic_file_direct_write(iocb, from, pos); 711 712 out: 713 xfs_rw_iunlock(ip, iolock); 714 715 /* No fallback to buffered IO on errors for XFS. */ 716 ASSERT(ret < 0 || ret == count); 717 return ret; 718 } 719 720 STATIC ssize_t 721 xfs_file_buffered_aio_write( 722 struct kiocb *iocb, 723 struct iov_iter *from) 724 { 725 struct file *file = iocb->ki_filp; 726 struct address_space *mapping = file->f_mapping; 727 struct inode *inode = mapping->host; 728 struct xfs_inode *ip = XFS_I(inode); 729 ssize_t ret; 730 int enospc = 0; 731 int iolock = XFS_IOLOCK_EXCL; 732 loff_t pos = iocb->ki_pos; 733 size_t count = iov_iter_count(from); 734 735 xfs_rw_ilock(ip, iolock); 736 737 ret = xfs_file_aio_write_checks(file, &pos, &count, &iolock); 738 if (ret) 739 goto out; 740 741 iov_iter_truncate(from, count); 742 /* We can write back this queue in page reclaim */ 743 current->backing_dev_info = inode_to_bdi(inode); 744 745 write_retry: 746 trace_xfs_file_buffered_write(ip, count, iocb->ki_pos, 0); 747 ret = generic_perform_write(file, from, pos); 748 if (likely(ret >= 0)) 749 iocb->ki_pos = pos + ret; 750 751 /* 752 * If we hit a space limit, try to free up some lingering preallocated 753 * space before returning an error. In the case of ENOSPC, first try to 754 * write back all dirty inodes to free up some of the excess reserved 755 * metadata space. This reduces the chances that the eofblocks scan 756 * waits on dirty mappings. Since xfs_flush_inodes() is serialized, this 757 * also behaves as a filter to prevent too many eofblocks scans from 758 * running at the same time. 759 */ 760 if (ret == -EDQUOT && !enospc) { 761 enospc = xfs_inode_free_quota_eofblocks(ip); 762 if (enospc) 763 goto write_retry; 764 } else if (ret == -ENOSPC && !enospc) { 765 struct xfs_eofblocks eofb = {0}; 766 767 enospc = 1; 768 xfs_flush_inodes(ip->i_mount); 769 eofb.eof_scan_owner = ip->i_ino; /* for locking */ 770 eofb.eof_flags = XFS_EOF_FLAGS_SYNC; 771 xfs_icache_free_eofblocks(ip->i_mount, &eofb); 772 goto write_retry; 773 } 774 775 current->backing_dev_info = NULL; 776 out: 777 xfs_rw_iunlock(ip, iolock); 778 return ret; 779 } 780 781 STATIC ssize_t 782 xfs_file_write_iter( 783 struct kiocb *iocb, 784 struct iov_iter *from) 785 { 786 struct file *file = iocb->ki_filp; 787 struct address_space *mapping = file->f_mapping; 788 struct inode *inode = mapping->host; 789 struct xfs_inode *ip = XFS_I(inode); 790 ssize_t ret; 791 size_t ocount = iov_iter_count(from); 792 793 XFS_STATS_INC(xs_write_calls); 794 795 if (ocount == 0) 796 return 0; 797 798 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) 799 return -EIO; 800 801 if (unlikely(file->f_flags & O_DIRECT)) 802 ret = xfs_file_dio_aio_write(iocb, from); 803 else 804 ret = xfs_file_buffered_aio_write(iocb, from); 805 806 if (ret > 0) { 807 ssize_t err; 808 809 XFS_STATS_ADD(xs_write_bytes, ret); 810 811 /* Handle various SYNC-type writes */ 812 err = generic_write_sync(file, iocb->ki_pos - ret, ret); 813 if (err < 0) 814 ret = err; 815 } 816 return ret; 817 } 818 819 STATIC long 820 xfs_file_fallocate( 821 struct file *file, 822 int mode, 823 loff_t offset, 824 loff_t len) 825 { 826 struct inode *inode = file_inode(file); 827 struct xfs_inode *ip = XFS_I(inode); 828 long error; 829 enum xfs_prealloc_flags flags = 0; 830 uint iolock = XFS_IOLOCK_EXCL; 831 loff_t new_size = 0; 832 833 if (!S_ISREG(inode->i_mode)) 834 return -EINVAL; 835 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | 836 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE)) 837 return -EOPNOTSUPP; 838 839 xfs_ilock(ip, iolock); 840 error = xfs_break_layouts(inode, &iolock); 841 if (error) 842 goto out_unlock; 843 844 if (mode & FALLOC_FL_PUNCH_HOLE) { 845 error = xfs_free_file_space(ip, offset, len); 846 if (error) 847 goto out_unlock; 848 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) { 849 unsigned blksize_mask = (1 << inode->i_blkbits) - 1; 850 851 if (offset & blksize_mask || len & blksize_mask) { 852 error = -EINVAL; 853 goto out_unlock; 854 } 855 856 /* 857 * There is no need to overlap collapse range with EOF, 858 * in which case it is effectively a truncate operation 859 */ 860 if (offset + len >= i_size_read(inode)) { 861 error = -EINVAL; 862 goto out_unlock; 863 } 864 865 new_size = i_size_read(inode) - len; 866 867 error = xfs_collapse_file_space(ip, offset, len); 868 if (error) 869 goto out_unlock; 870 } else { 871 flags |= XFS_PREALLOC_SET; 872 873 if (!(mode & FALLOC_FL_KEEP_SIZE) && 874 offset + len > i_size_read(inode)) { 875 new_size = offset + len; 876 error = inode_newsize_ok(inode, new_size); 877 if (error) 878 goto out_unlock; 879 } 880 881 if (mode & FALLOC_FL_ZERO_RANGE) 882 error = xfs_zero_file_space(ip, offset, len); 883 else 884 error = xfs_alloc_file_space(ip, offset, len, 885 XFS_BMAPI_PREALLOC); 886 if (error) 887 goto out_unlock; 888 } 889 890 if (file->f_flags & O_DSYNC) 891 flags |= XFS_PREALLOC_SYNC; 892 893 error = xfs_update_prealloc_flags(ip, flags); 894 if (error) 895 goto out_unlock; 896 897 /* Change file size if needed */ 898 if (new_size) { 899 struct iattr iattr; 900 901 iattr.ia_valid = ATTR_SIZE; 902 iattr.ia_size = new_size; 903 error = xfs_setattr_size(ip, &iattr); 904 } 905 906 out_unlock: 907 xfs_iunlock(ip, iolock); 908 return error; 909 } 910 911 912 STATIC int 913 xfs_file_open( 914 struct inode *inode, 915 struct file *file) 916 { 917 if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS) 918 return -EFBIG; 919 if (XFS_FORCED_SHUTDOWN(XFS_M(inode->i_sb))) 920 return -EIO; 921 return 0; 922 } 923 924 STATIC int 925 xfs_dir_open( 926 struct inode *inode, 927 struct file *file) 928 { 929 struct xfs_inode *ip = XFS_I(inode); 930 int mode; 931 int error; 932 933 error = xfs_file_open(inode, file); 934 if (error) 935 return error; 936 937 /* 938 * If there are any blocks, read-ahead block 0 as we're almost 939 * certain to have the next operation be a read there. 940 */ 941 mode = xfs_ilock_data_map_shared(ip); 942 if (ip->i_d.di_nextents > 0) 943 xfs_dir3_data_readahead(ip, 0, -1); 944 xfs_iunlock(ip, mode); 945 return 0; 946 } 947 948 STATIC int 949 xfs_file_release( 950 struct inode *inode, 951 struct file *filp) 952 { 953 return xfs_release(XFS_I(inode)); 954 } 955 956 STATIC int 957 xfs_file_readdir( 958 struct file *file, 959 struct dir_context *ctx) 960 { 961 struct inode *inode = file_inode(file); 962 xfs_inode_t *ip = XFS_I(inode); 963 size_t bufsize; 964 965 /* 966 * The Linux API doesn't pass down the total size of the buffer 967 * we read into down to the filesystem. With the filldir concept 968 * it's not needed for correct information, but the XFS dir2 leaf 969 * code wants an estimate of the buffer size to calculate it's 970 * readahead window and size the buffers used for mapping to 971 * physical blocks. 972 * 973 * Try to give it an estimate that's good enough, maybe at some 974 * point we can change the ->readdir prototype to include the 975 * buffer size. For now we use the current glibc buffer size. 976 */ 977 bufsize = (size_t)min_t(loff_t, 32768, ip->i_d.di_size); 978 979 return xfs_readdir(ip, ctx, bufsize); 980 } 981 982 STATIC int 983 xfs_file_mmap( 984 struct file *filp, 985 struct vm_area_struct *vma) 986 { 987 vma->vm_ops = &xfs_file_vm_ops; 988 989 file_accessed(filp); 990 return 0; 991 } 992 993 /* 994 * mmap()d file has taken write protection fault and is being made 995 * writable. We can set the page state up correctly for a writable 996 * page, which means we can do correct delalloc accounting (ENOSPC 997 * checking!) and unwritten extent mapping. 998 */ 999 STATIC int 1000 xfs_vm_page_mkwrite( 1001 struct vm_area_struct *vma, 1002 struct vm_fault *vmf) 1003 { 1004 return block_page_mkwrite(vma, vmf, xfs_get_blocks); 1005 } 1006 1007 /* 1008 * This type is designed to indicate the type of offset we would like 1009 * to search from page cache for xfs_seek_hole_data(). 1010 */ 1011 enum { 1012 HOLE_OFF = 0, 1013 DATA_OFF, 1014 }; 1015 1016 /* 1017 * Lookup the desired type of offset from the given page. 1018 * 1019 * On success, return true and the offset argument will point to the 1020 * start of the region that was found. Otherwise this function will 1021 * return false and keep the offset argument unchanged. 1022 */ 1023 STATIC bool 1024 xfs_lookup_buffer_offset( 1025 struct page *page, 1026 loff_t *offset, 1027 unsigned int type) 1028 { 1029 loff_t lastoff = page_offset(page); 1030 bool found = false; 1031 struct buffer_head *bh, *head; 1032 1033 bh = head = page_buffers(page); 1034 do { 1035 /* 1036 * Unwritten extents that have data in the page 1037 * cache covering them can be identified by the 1038 * BH_Unwritten state flag. Pages with multiple 1039 * buffers might have a mix of holes, data and 1040 * unwritten extents - any buffer with valid 1041 * data in it should have BH_Uptodate flag set 1042 * on it. 1043 */ 1044 if (buffer_unwritten(bh) || 1045 buffer_uptodate(bh)) { 1046 if (type == DATA_OFF) 1047 found = true; 1048 } else { 1049 if (type == HOLE_OFF) 1050 found = true; 1051 } 1052 1053 if (found) { 1054 *offset = lastoff; 1055 break; 1056 } 1057 lastoff += bh->b_size; 1058 } while ((bh = bh->b_this_page) != head); 1059 1060 return found; 1061 } 1062 1063 /* 1064 * This routine is called to find out and return a data or hole offset 1065 * from the page cache for unwritten extents according to the desired 1066 * type for xfs_seek_hole_data(). 1067 * 1068 * The argument offset is used to tell where we start to search from the 1069 * page cache. Map is used to figure out the end points of the range to 1070 * lookup pages. 1071 * 1072 * Return true if the desired type of offset was found, and the argument 1073 * offset is filled with that address. Otherwise, return false and keep 1074 * offset unchanged. 1075 */ 1076 STATIC bool 1077 xfs_find_get_desired_pgoff( 1078 struct inode *inode, 1079 struct xfs_bmbt_irec *map, 1080 unsigned int type, 1081 loff_t *offset) 1082 { 1083 struct xfs_inode *ip = XFS_I(inode); 1084 struct xfs_mount *mp = ip->i_mount; 1085 struct pagevec pvec; 1086 pgoff_t index; 1087 pgoff_t end; 1088 loff_t endoff; 1089 loff_t startoff = *offset; 1090 loff_t lastoff = startoff; 1091 bool found = false; 1092 1093 pagevec_init(&pvec, 0); 1094 1095 index = startoff >> PAGE_CACHE_SHIFT; 1096 endoff = XFS_FSB_TO_B(mp, map->br_startoff + map->br_blockcount); 1097 end = endoff >> PAGE_CACHE_SHIFT; 1098 do { 1099 int want; 1100 unsigned nr_pages; 1101 unsigned int i; 1102 1103 want = min_t(pgoff_t, end - index, PAGEVEC_SIZE); 1104 nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index, 1105 want); 1106 /* 1107 * No page mapped into given range. If we are searching holes 1108 * and if this is the first time we got into the loop, it means 1109 * that the given offset is landed in a hole, return it. 1110 * 1111 * If we have already stepped through some block buffers to find 1112 * holes but they all contains data. In this case, the last 1113 * offset is already updated and pointed to the end of the last 1114 * mapped page, if it does not reach the endpoint to search, 1115 * that means there should be a hole between them. 1116 */ 1117 if (nr_pages == 0) { 1118 /* Data search found nothing */ 1119 if (type == DATA_OFF) 1120 break; 1121 1122 ASSERT(type == HOLE_OFF); 1123 if (lastoff == startoff || lastoff < endoff) { 1124 found = true; 1125 *offset = lastoff; 1126 } 1127 break; 1128 } 1129 1130 /* 1131 * At lease we found one page. If this is the first time we 1132 * step into the loop, and if the first page index offset is 1133 * greater than the given search offset, a hole was found. 1134 */ 1135 if (type == HOLE_OFF && lastoff == startoff && 1136 lastoff < page_offset(pvec.pages[0])) { 1137 found = true; 1138 break; 1139 } 1140 1141 for (i = 0; i < nr_pages; i++) { 1142 struct page *page = pvec.pages[i]; 1143 loff_t b_offset; 1144 1145 /* 1146 * At this point, the page may be truncated or 1147 * invalidated (changing page->mapping to NULL), 1148 * or even swizzled back from swapper_space to tmpfs 1149 * file mapping. However, page->index will not change 1150 * because we have a reference on the page. 1151 * 1152 * Searching done if the page index is out of range. 1153 * If the current offset is not reaches the end of 1154 * the specified search range, there should be a hole 1155 * between them. 1156 */ 1157 if (page->index > end) { 1158 if (type == HOLE_OFF && lastoff < endoff) { 1159 *offset = lastoff; 1160 found = true; 1161 } 1162 goto out; 1163 } 1164 1165 lock_page(page); 1166 /* 1167 * Page truncated or invalidated(page->mapping == NULL). 1168 * We can freely skip it and proceed to check the next 1169 * page. 1170 */ 1171 if (unlikely(page->mapping != inode->i_mapping)) { 1172 unlock_page(page); 1173 continue; 1174 } 1175 1176 if (!page_has_buffers(page)) { 1177 unlock_page(page); 1178 continue; 1179 } 1180 1181 found = xfs_lookup_buffer_offset(page, &b_offset, type); 1182 if (found) { 1183 /* 1184 * The found offset may be less than the start 1185 * point to search if this is the first time to 1186 * come here. 1187 */ 1188 *offset = max_t(loff_t, startoff, b_offset); 1189 unlock_page(page); 1190 goto out; 1191 } 1192 1193 /* 1194 * We either searching data but nothing was found, or 1195 * searching hole but found a data buffer. In either 1196 * case, probably the next page contains the desired 1197 * things, update the last offset to it so. 1198 */ 1199 lastoff = page_offset(page) + PAGE_SIZE; 1200 unlock_page(page); 1201 } 1202 1203 /* 1204 * The number of returned pages less than our desired, search 1205 * done. In this case, nothing was found for searching data, 1206 * but we found a hole behind the last offset. 1207 */ 1208 if (nr_pages < want) { 1209 if (type == HOLE_OFF) { 1210 *offset = lastoff; 1211 found = true; 1212 } 1213 break; 1214 } 1215 1216 index = pvec.pages[i - 1]->index + 1; 1217 pagevec_release(&pvec); 1218 } while (index <= end); 1219 1220 out: 1221 pagevec_release(&pvec); 1222 return found; 1223 } 1224 1225 STATIC loff_t 1226 xfs_seek_hole_data( 1227 struct file *file, 1228 loff_t start, 1229 int whence) 1230 { 1231 struct inode *inode = file->f_mapping->host; 1232 struct xfs_inode *ip = XFS_I(inode); 1233 struct xfs_mount *mp = ip->i_mount; 1234 loff_t uninitialized_var(offset); 1235 xfs_fsize_t isize; 1236 xfs_fileoff_t fsbno; 1237 xfs_filblks_t end; 1238 uint lock; 1239 int error; 1240 1241 if (XFS_FORCED_SHUTDOWN(mp)) 1242 return -EIO; 1243 1244 lock = xfs_ilock_data_map_shared(ip); 1245 1246 isize = i_size_read(inode); 1247 if (start >= isize) { 1248 error = -ENXIO; 1249 goto out_unlock; 1250 } 1251 1252 /* 1253 * Try to read extents from the first block indicated 1254 * by fsbno to the end block of the file. 1255 */ 1256 fsbno = XFS_B_TO_FSBT(mp, start); 1257 end = XFS_B_TO_FSB(mp, isize); 1258 1259 for (;;) { 1260 struct xfs_bmbt_irec map[2]; 1261 int nmap = 2; 1262 unsigned int i; 1263 1264 error = xfs_bmapi_read(ip, fsbno, end - fsbno, map, &nmap, 1265 XFS_BMAPI_ENTIRE); 1266 if (error) 1267 goto out_unlock; 1268 1269 /* No extents at given offset, must be beyond EOF */ 1270 if (nmap == 0) { 1271 error = -ENXIO; 1272 goto out_unlock; 1273 } 1274 1275 for (i = 0; i < nmap; i++) { 1276 offset = max_t(loff_t, start, 1277 XFS_FSB_TO_B(mp, map[i].br_startoff)); 1278 1279 /* Landed in the hole we wanted? */ 1280 if (whence == SEEK_HOLE && 1281 map[i].br_startblock == HOLESTARTBLOCK) 1282 goto out; 1283 1284 /* Landed in the data extent we wanted? */ 1285 if (whence == SEEK_DATA && 1286 (map[i].br_startblock == DELAYSTARTBLOCK || 1287 (map[i].br_state == XFS_EXT_NORM && 1288 !isnullstartblock(map[i].br_startblock)))) 1289 goto out; 1290 1291 /* 1292 * Landed in an unwritten extent, try to search 1293 * for hole or data from page cache. 1294 */ 1295 if (map[i].br_state == XFS_EXT_UNWRITTEN) { 1296 if (xfs_find_get_desired_pgoff(inode, &map[i], 1297 whence == SEEK_HOLE ? HOLE_OFF : DATA_OFF, 1298 &offset)) 1299 goto out; 1300 } 1301 } 1302 1303 /* 1304 * We only received one extent out of the two requested. This 1305 * means we've hit EOF and didn't find what we are looking for. 1306 */ 1307 if (nmap == 1) { 1308 /* 1309 * If we were looking for a hole, set offset to 1310 * the end of the file (i.e., there is an implicit 1311 * hole at the end of any file). 1312 */ 1313 if (whence == SEEK_HOLE) { 1314 offset = isize; 1315 break; 1316 } 1317 /* 1318 * If we were looking for data, it's nowhere to be found 1319 */ 1320 ASSERT(whence == SEEK_DATA); 1321 error = -ENXIO; 1322 goto out_unlock; 1323 } 1324 1325 ASSERT(i > 1); 1326 1327 /* 1328 * Nothing was found, proceed to the next round of search 1329 * if the next reading offset is not at or beyond EOF. 1330 */ 1331 fsbno = map[i - 1].br_startoff + map[i - 1].br_blockcount; 1332 start = XFS_FSB_TO_B(mp, fsbno); 1333 if (start >= isize) { 1334 if (whence == SEEK_HOLE) { 1335 offset = isize; 1336 break; 1337 } 1338 ASSERT(whence == SEEK_DATA); 1339 error = -ENXIO; 1340 goto out_unlock; 1341 } 1342 } 1343 1344 out: 1345 /* 1346 * If at this point we have found the hole we wanted, the returned 1347 * offset may be bigger than the file size as it may be aligned to 1348 * page boundary for unwritten extents. We need to deal with this 1349 * situation in particular. 1350 */ 1351 if (whence == SEEK_HOLE) 1352 offset = min_t(loff_t, offset, isize); 1353 offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes); 1354 1355 out_unlock: 1356 xfs_iunlock(ip, lock); 1357 1358 if (error) 1359 return error; 1360 return offset; 1361 } 1362 1363 STATIC loff_t 1364 xfs_file_llseek( 1365 struct file *file, 1366 loff_t offset, 1367 int whence) 1368 { 1369 switch (whence) { 1370 case SEEK_END: 1371 case SEEK_CUR: 1372 case SEEK_SET: 1373 return generic_file_llseek(file, offset, whence); 1374 case SEEK_HOLE: 1375 case SEEK_DATA: 1376 return xfs_seek_hole_data(file, offset, whence); 1377 default: 1378 return -EINVAL; 1379 } 1380 } 1381 1382 const struct file_operations xfs_file_operations = { 1383 .llseek = xfs_file_llseek, 1384 .read = new_sync_read, 1385 .write = new_sync_write, 1386 .read_iter = xfs_file_read_iter, 1387 .write_iter = xfs_file_write_iter, 1388 .splice_read = xfs_file_splice_read, 1389 .splice_write = iter_file_splice_write, 1390 .unlocked_ioctl = xfs_file_ioctl, 1391 #ifdef CONFIG_COMPAT 1392 .compat_ioctl = xfs_file_compat_ioctl, 1393 #endif 1394 .mmap = xfs_file_mmap, 1395 .open = xfs_file_open, 1396 .release = xfs_file_release, 1397 .fsync = xfs_file_fsync, 1398 .fallocate = xfs_file_fallocate, 1399 }; 1400 1401 const struct file_operations xfs_dir_file_operations = { 1402 .open = xfs_dir_open, 1403 .read = generic_read_dir, 1404 .iterate = xfs_file_readdir, 1405 .llseek = generic_file_llseek, 1406 .unlocked_ioctl = xfs_file_ioctl, 1407 #ifdef CONFIG_COMPAT 1408 .compat_ioctl = xfs_file_compat_ioctl, 1409 #endif 1410 .fsync = xfs_dir_fsync, 1411 }; 1412 1413 static const struct vm_operations_struct xfs_file_vm_ops = { 1414 .fault = filemap_fault, 1415 .map_pages = filemap_map_pages, 1416 .page_mkwrite = xfs_vm_page_mkwrite, 1417 }; 1418