1 /* 2 * linux/fs/nfs/file.c 3 * 4 * Copyright (C) 1992 Rick Sladkey 5 * 6 * Changes Copyright (C) 1994 by Florian La Roche 7 * - Do not copy data too often around in the kernel. 8 * - In nfs_file_read the return value of kmalloc wasn't checked. 9 * - Put in a better version of read look-ahead buffering. Original idea 10 * and implementation by Wai S Kok elekokws@ee.nus.sg. 11 * 12 * Expire cache on write to a file by Wai S Kok (Oct 1994). 13 * 14 * Total rewrite of read side for new NFS buffer cache.. Linus. 15 * 16 * nfs regular file handling functions 17 */ 18 19 #include <linux/module.h> 20 #include <linux/time.h> 21 #include <linux/kernel.h> 22 #include <linux/errno.h> 23 #include <linux/fcntl.h> 24 #include <linux/stat.h> 25 #include <linux/nfs_fs.h> 26 #include <linux/nfs_mount.h> 27 #include <linux/mm.h> 28 #include <linux/pagemap.h> 29 #include <linux/gfp.h> 30 #include <linux/swap.h> 31 32 #include <asm/uaccess.h> 33 34 #include "delegation.h" 35 #include "internal.h" 36 #include "iostat.h" 37 #include "fscache.h" 38 #include "pnfs.h" 39 40 #include "nfstrace.h" 41 42 #define NFSDBG_FACILITY NFSDBG_FILE 43 44 static const struct vm_operations_struct nfs_file_vm_ops; 45 46 /* Hack for future NFS swap support */ 47 #ifndef IS_SWAPFILE 48 # define IS_SWAPFILE(inode) (0) 49 #endif 50 51 int nfs_check_flags(int flags) 52 { 53 if ((flags & (O_APPEND | O_DIRECT)) == (O_APPEND | O_DIRECT)) 54 return -EINVAL; 55 56 return 0; 57 } 58 EXPORT_SYMBOL_GPL(nfs_check_flags); 59 60 /* 61 * Open file 62 */ 63 static int 64 nfs_file_open(struct inode *inode, struct file *filp) 65 { 66 int res; 67 68 dprintk("NFS: open file(%pD2)\n", filp); 69 70 nfs_inc_stats(inode, NFSIOS_VFSOPEN); 71 res = nfs_check_flags(filp->f_flags); 72 if (res) 73 return res; 74 75 res = nfs_open(inode, filp); 76 return res; 77 } 78 79 int 80 nfs_file_release(struct inode *inode, struct file *filp) 81 { 82 dprintk("NFS: release(%pD2)\n", filp); 83 84 nfs_inc_stats(inode, NFSIOS_VFSRELEASE); 85 return nfs_release(inode, filp); 86 } 87 EXPORT_SYMBOL_GPL(nfs_file_release); 88 89 /** 90 * nfs_revalidate_size - Revalidate the file size 91 * @inode - pointer to inode struct 92 * @file - pointer to struct file 93 * 94 * Revalidates the file length. This is basically a wrapper around 95 * nfs_revalidate_inode() that takes into account the fact that we may 96 * have cached writes (in which case we don't care about the server's 97 * idea of what the file length is), or O_DIRECT (in which case we 98 * shouldn't trust the cache). 99 */ 100 static int nfs_revalidate_file_size(struct inode *inode, struct file *filp) 101 { 102 struct nfs_server *server = NFS_SERVER(inode); 103 struct nfs_inode *nfsi = NFS_I(inode); 104 105 if (nfs_have_delegated_attributes(inode)) 106 goto out_noreval; 107 108 if (filp->f_flags & O_DIRECT) 109 goto force_reval; 110 if (nfsi->cache_validity & NFS_INO_REVAL_PAGECACHE) 111 goto force_reval; 112 if (nfs_attribute_timeout(inode)) 113 goto force_reval; 114 out_noreval: 115 return 0; 116 force_reval: 117 return __nfs_revalidate_inode(server, inode); 118 } 119 120 loff_t nfs_file_llseek(struct file *filp, loff_t offset, int whence) 121 { 122 dprintk("NFS: llseek file(%pD2, %lld, %d)\n", 123 filp, offset, whence); 124 125 /* 126 * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate 127 * the cached file length 128 */ 129 if (whence != SEEK_SET && whence != SEEK_CUR) { 130 struct inode *inode = filp->f_mapping->host; 131 132 int retval = nfs_revalidate_file_size(inode, filp); 133 if (retval < 0) 134 return (loff_t)retval; 135 } 136 137 return generic_file_llseek(filp, offset, whence); 138 } 139 EXPORT_SYMBOL_GPL(nfs_file_llseek); 140 141 /* 142 * Flush all dirty pages, and check for write errors. 143 */ 144 int 145 nfs_file_flush(struct file *file, fl_owner_t id) 146 { 147 struct inode *inode = file_inode(file); 148 149 dprintk("NFS: flush(%pD2)\n", file); 150 151 nfs_inc_stats(inode, NFSIOS_VFSFLUSH); 152 if ((file->f_mode & FMODE_WRITE) == 0) 153 return 0; 154 155 /* 156 * If we're holding a write delegation, then just start the i/o 157 * but don't wait for completion (or send a commit). 158 */ 159 if (NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE)) 160 return filemap_fdatawrite(file->f_mapping); 161 162 /* Flush writes to the server and return any errors */ 163 return vfs_fsync(file, 0); 164 } 165 EXPORT_SYMBOL_GPL(nfs_file_flush); 166 167 ssize_t 168 nfs_file_read(struct kiocb *iocb, struct iov_iter *to) 169 { 170 struct inode *inode = file_inode(iocb->ki_filp); 171 ssize_t result; 172 173 if (iocb->ki_flags & IOCB_DIRECT) 174 return nfs_file_direct_read(iocb, to, iocb->ki_pos); 175 176 dprintk("NFS: read(%pD2, %zu@%lu)\n", 177 iocb->ki_filp, 178 iov_iter_count(to), (unsigned long) iocb->ki_pos); 179 180 result = nfs_revalidate_mapping_protected(inode, iocb->ki_filp->f_mapping); 181 if (!result) { 182 result = generic_file_read_iter(iocb, to); 183 if (result > 0) 184 nfs_add_stats(inode, NFSIOS_NORMALREADBYTES, result); 185 } 186 return result; 187 } 188 EXPORT_SYMBOL_GPL(nfs_file_read); 189 190 ssize_t 191 nfs_file_splice_read(struct file *filp, loff_t *ppos, 192 struct pipe_inode_info *pipe, size_t count, 193 unsigned int flags) 194 { 195 struct inode *inode = file_inode(filp); 196 ssize_t res; 197 198 dprintk("NFS: splice_read(%pD2, %lu@%Lu)\n", 199 filp, (unsigned long) count, (unsigned long long) *ppos); 200 201 res = nfs_revalidate_mapping_protected(inode, filp->f_mapping); 202 if (!res) { 203 res = generic_file_splice_read(filp, ppos, pipe, count, flags); 204 if (res > 0) 205 nfs_add_stats(inode, NFSIOS_NORMALREADBYTES, res); 206 } 207 return res; 208 } 209 EXPORT_SYMBOL_GPL(nfs_file_splice_read); 210 211 int 212 nfs_file_mmap(struct file * file, struct vm_area_struct * vma) 213 { 214 struct inode *inode = file_inode(file); 215 int status; 216 217 dprintk("NFS: mmap(%pD2)\n", file); 218 219 /* Note: generic_file_mmap() returns ENOSYS on nommu systems 220 * so we call that before revalidating the mapping 221 */ 222 status = generic_file_mmap(file, vma); 223 if (!status) { 224 vma->vm_ops = &nfs_file_vm_ops; 225 status = nfs_revalidate_mapping(inode, file->f_mapping); 226 } 227 return status; 228 } 229 EXPORT_SYMBOL_GPL(nfs_file_mmap); 230 231 /* 232 * Flush any dirty pages for this process, and check for write errors. 233 * The return status from this call provides a reliable indication of 234 * whether any write errors occurred for this process. 235 * 236 * Notice that it clears the NFS_CONTEXT_ERROR_WRITE before synching to 237 * disk, but it retrieves and clears ctx->error after synching, despite 238 * the two being set at the same time in nfs_context_set_write_error(). 239 * This is because the former is used to notify the _next_ call to 240 * nfs_file_write() that a write error occurred, and hence cause it to 241 * fall back to doing a synchronous write. 242 */ 243 int 244 nfs_file_fsync_commit(struct file *file, loff_t start, loff_t end, int datasync) 245 { 246 struct nfs_open_context *ctx = nfs_file_open_context(file); 247 struct inode *inode = file_inode(file); 248 int have_error, do_resend, status; 249 int ret = 0; 250 251 dprintk("NFS: fsync file(%pD2) datasync %d\n", file, datasync); 252 253 nfs_inc_stats(inode, NFSIOS_VFSFSYNC); 254 do_resend = test_and_clear_bit(NFS_CONTEXT_RESEND_WRITES, &ctx->flags); 255 have_error = test_and_clear_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags); 256 status = nfs_commit_inode(inode, FLUSH_SYNC); 257 have_error |= test_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags); 258 if (have_error) { 259 ret = xchg(&ctx->error, 0); 260 if (ret) 261 goto out; 262 } 263 if (status < 0) { 264 ret = status; 265 goto out; 266 } 267 do_resend |= test_bit(NFS_CONTEXT_RESEND_WRITES, &ctx->flags); 268 if (do_resend) 269 ret = -EAGAIN; 270 out: 271 return ret; 272 } 273 EXPORT_SYMBOL_GPL(nfs_file_fsync_commit); 274 275 static int 276 nfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync) 277 { 278 int ret; 279 struct inode *inode = file_inode(file); 280 281 trace_nfs_fsync_enter(inode); 282 283 do { 284 ret = filemap_write_and_wait_range(inode->i_mapping, start, end); 285 if (ret != 0) 286 break; 287 mutex_lock(&inode->i_mutex); 288 ret = nfs_file_fsync_commit(file, start, end, datasync); 289 mutex_unlock(&inode->i_mutex); 290 /* 291 * If nfs_file_fsync_commit detected a server reboot, then 292 * resend all dirty pages that might have been covered by 293 * the NFS_CONTEXT_RESEND_WRITES flag 294 */ 295 start = 0; 296 end = LLONG_MAX; 297 } while (ret == -EAGAIN); 298 299 trace_nfs_fsync_exit(inode, ret); 300 return ret; 301 } 302 303 /* 304 * Decide whether a read/modify/write cycle may be more efficient 305 * then a modify/write/read cycle when writing to a page in the 306 * page cache. 307 * 308 * The modify/write/read cycle may occur if a page is read before 309 * being completely filled by the writer. In this situation, the 310 * page must be completely written to stable storage on the server 311 * before it can be refilled by reading in the page from the server. 312 * This can lead to expensive, small, FILE_SYNC mode writes being 313 * done. 314 * 315 * It may be more efficient to read the page first if the file is 316 * open for reading in addition to writing, the page is not marked 317 * as Uptodate, it is not dirty or waiting to be committed, 318 * indicating that it was previously allocated and then modified, 319 * that there were valid bytes of data in that range of the file, 320 * and that the new data won't completely replace the old data in 321 * that range of the file. 322 */ 323 static int nfs_want_read_modify_write(struct file *file, struct page *page, 324 loff_t pos, unsigned len) 325 { 326 unsigned int pglen = nfs_page_length(page); 327 unsigned int offset = pos & (PAGE_CACHE_SIZE - 1); 328 unsigned int end = offset + len; 329 330 if (pnfs_ld_read_whole_page(file->f_mapping->host)) { 331 if (!PageUptodate(page)) 332 return 1; 333 return 0; 334 } 335 336 if ((file->f_mode & FMODE_READ) && /* open for read? */ 337 !PageUptodate(page) && /* Uptodate? */ 338 !PagePrivate(page) && /* i/o request already? */ 339 pglen && /* valid bytes of file? */ 340 (end < pglen || offset)) /* replace all valid bytes? */ 341 return 1; 342 return 0; 343 } 344 345 /* 346 * This does the "real" work of the write. We must allocate and lock the 347 * page to be sent back to the generic routine, which then copies the 348 * data from user space. 349 * 350 * If the writer ends up delaying the write, the writer needs to 351 * increment the page use counts until he is done with the page. 352 */ 353 static int nfs_write_begin(struct file *file, struct address_space *mapping, 354 loff_t pos, unsigned len, unsigned flags, 355 struct page **pagep, void **fsdata) 356 { 357 int ret; 358 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 359 struct page *page; 360 int once_thru = 0; 361 362 dfprintk(PAGECACHE, "NFS: write_begin(%pD2(%lu), %u@%lld)\n", 363 file, mapping->host->i_ino, len, (long long) pos); 364 365 start: 366 /* 367 * Prevent starvation issues if someone is doing a consistency 368 * sync-to-disk 369 */ 370 ret = wait_on_bit_action(&NFS_I(mapping->host)->flags, NFS_INO_FLUSHING, 371 nfs_wait_bit_killable, TASK_KILLABLE); 372 if (ret) 373 return ret; 374 /* 375 * Wait for O_DIRECT to complete 376 */ 377 nfs_inode_dio_wait(mapping->host); 378 379 page = grab_cache_page_write_begin(mapping, index, flags); 380 if (!page) 381 return -ENOMEM; 382 *pagep = page; 383 384 ret = nfs_flush_incompatible(file, page); 385 if (ret) { 386 unlock_page(page); 387 page_cache_release(page); 388 } else if (!once_thru && 389 nfs_want_read_modify_write(file, page, pos, len)) { 390 once_thru = 1; 391 ret = nfs_readpage(file, page); 392 page_cache_release(page); 393 if (!ret) 394 goto start; 395 } 396 return ret; 397 } 398 399 static int nfs_write_end(struct file *file, struct address_space *mapping, 400 loff_t pos, unsigned len, unsigned copied, 401 struct page *page, void *fsdata) 402 { 403 unsigned offset = pos & (PAGE_CACHE_SIZE - 1); 404 struct nfs_open_context *ctx = nfs_file_open_context(file); 405 int status; 406 407 dfprintk(PAGECACHE, "NFS: write_end(%pD2(%lu), %u@%lld)\n", 408 file, mapping->host->i_ino, len, (long long) pos); 409 410 /* 411 * Zero any uninitialised parts of the page, and then mark the page 412 * as up to date if it turns out that we're extending the file. 413 */ 414 if (!PageUptodate(page)) { 415 unsigned pglen = nfs_page_length(page); 416 unsigned end = offset + len; 417 418 if (pglen == 0) { 419 zero_user_segments(page, 0, offset, 420 end, PAGE_CACHE_SIZE); 421 SetPageUptodate(page); 422 } else if (end >= pglen) { 423 zero_user_segment(page, end, PAGE_CACHE_SIZE); 424 if (offset == 0) 425 SetPageUptodate(page); 426 } else 427 zero_user_segment(page, pglen, PAGE_CACHE_SIZE); 428 } 429 430 status = nfs_updatepage(file, page, offset, copied); 431 432 unlock_page(page); 433 page_cache_release(page); 434 435 if (status < 0) 436 return status; 437 NFS_I(mapping->host)->write_io += copied; 438 439 if (nfs_ctx_key_to_expire(ctx)) { 440 status = nfs_wb_all(mapping->host); 441 if (status < 0) 442 return status; 443 } 444 445 return copied; 446 } 447 448 /* 449 * Partially or wholly invalidate a page 450 * - Release the private state associated with a page if undergoing complete 451 * page invalidation 452 * - Called if either PG_private or PG_fscache is set on the page 453 * - Caller holds page lock 454 */ 455 static void nfs_invalidate_page(struct page *page, unsigned int offset, 456 unsigned int length) 457 { 458 dfprintk(PAGECACHE, "NFS: invalidate_page(%p, %u, %u)\n", 459 page, offset, length); 460 461 if (offset != 0 || length < PAGE_CACHE_SIZE) 462 return; 463 /* Cancel any unstarted writes on this page */ 464 nfs_wb_page_cancel(page_file_mapping(page)->host, page); 465 466 nfs_fscache_invalidate_page(page, page->mapping->host); 467 } 468 469 /* 470 * Attempt to release the private state associated with a page 471 * - Called if either PG_private or PG_fscache is set on the page 472 * - Caller holds page lock 473 * - Return true (may release page) or false (may not) 474 */ 475 static int nfs_release_page(struct page *page, gfp_t gfp) 476 { 477 struct address_space *mapping = page->mapping; 478 479 dfprintk(PAGECACHE, "NFS: release_page(%p)\n", page); 480 481 /* Always try to initiate a 'commit' if relevant, but only 482 * wait for it if __GFP_WAIT is set. Even then, only wait 1 483 * second and only if the 'bdi' is not congested. 484 * Waiting indefinitely can cause deadlocks when the NFS 485 * server is on this machine, when a new TCP connection is 486 * needed and in other rare cases. There is no particular 487 * need to wait extensively here. A short wait has the 488 * benefit that someone else can worry about the freezer. 489 */ 490 if (mapping) { 491 struct nfs_server *nfss = NFS_SERVER(mapping->host); 492 nfs_commit_inode(mapping->host, 0); 493 if ((gfp & __GFP_WAIT) && 494 !bdi_write_congested(&nfss->backing_dev_info)) { 495 wait_on_page_bit_killable_timeout(page, PG_private, 496 HZ); 497 if (PagePrivate(page)) 498 set_bdi_congested(&nfss->backing_dev_info, 499 BLK_RW_ASYNC); 500 } 501 } 502 /* If PagePrivate() is set, then the page is not freeable */ 503 if (PagePrivate(page)) 504 return 0; 505 return nfs_fscache_release_page(page, gfp); 506 } 507 508 static void nfs_check_dirty_writeback(struct page *page, 509 bool *dirty, bool *writeback) 510 { 511 struct nfs_inode *nfsi; 512 struct address_space *mapping = page_file_mapping(page); 513 514 if (!mapping || PageSwapCache(page)) 515 return; 516 517 /* 518 * Check if an unstable page is currently being committed and 519 * if so, have the VM treat it as if the page is under writeback 520 * so it will not block due to pages that will shortly be freeable. 521 */ 522 nfsi = NFS_I(mapping->host); 523 if (test_bit(NFS_INO_COMMIT, &nfsi->flags)) { 524 *writeback = true; 525 return; 526 } 527 528 /* 529 * If PagePrivate() is set, then the page is not freeable and as the 530 * inode is not being committed, it's not going to be cleaned in the 531 * near future so treat it as dirty 532 */ 533 if (PagePrivate(page)) 534 *dirty = true; 535 } 536 537 /* 538 * Attempt to clear the private state associated with a page when an error 539 * occurs that requires the cached contents of an inode to be written back or 540 * destroyed 541 * - Called if either PG_private or fscache is set on the page 542 * - Caller holds page lock 543 * - Return 0 if successful, -error otherwise 544 */ 545 static int nfs_launder_page(struct page *page) 546 { 547 struct inode *inode = page_file_mapping(page)->host; 548 struct nfs_inode *nfsi = NFS_I(inode); 549 550 dfprintk(PAGECACHE, "NFS: launder_page(%ld, %llu)\n", 551 inode->i_ino, (long long)page_offset(page)); 552 553 nfs_fscache_wait_on_page_write(nfsi, page); 554 return nfs_wb_page(inode, page); 555 } 556 557 #ifdef CONFIG_NFS_SWAP 558 static int nfs_swap_activate(struct swap_info_struct *sis, struct file *file, 559 sector_t *span) 560 { 561 int ret; 562 struct rpc_clnt *clnt = NFS_CLIENT(file->f_mapping->host); 563 564 *span = sis->pages; 565 566 rcu_read_lock(); 567 ret = xs_swapper(rcu_dereference(clnt->cl_xprt), 1); 568 rcu_read_unlock(); 569 570 return ret; 571 } 572 573 static void nfs_swap_deactivate(struct file *file) 574 { 575 struct rpc_clnt *clnt = NFS_CLIENT(file->f_mapping->host); 576 577 rcu_read_lock(); 578 xs_swapper(rcu_dereference(clnt->cl_xprt), 0); 579 rcu_read_unlock(); 580 } 581 #endif 582 583 const struct address_space_operations nfs_file_aops = { 584 .readpage = nfs_readpage, 585 .readpages = nfs_readpages, 586 .set_page_dirty = __set_page_dirty_nobuffers, 587 .writepage = nfs_writepage, 588 .writepages = nfs_writepages, 589 .write_begin = nfs_write_begin, 590 .write_end = nfs_write_end, 591 .invalidatepage = nfs_invalidate_page, 592 .releasepage = nfs_release_page, 593 .direct_IO = nfs_direct_IO, 594 .migratepage = nfs_migrate_page, 595 .launder_page = nfs_launder_page, 596 .is_dirty_writeback = nfs_check_dirty_writeback, 597 .error_remove_page = generic_error_remove_page, 598 #ifdef CONFIG_NFS_SWAP 599 .swap_activate = nfs_swap_activate, 600 .swap_deactivate = nfs_swap_deactivate, 601 #endif 602 }; 603 604 /* 605 * Notification that a PTE pointing to an NFS page is about to be made 606 * writable, implying that someone is about to modify the page through a 607 * shared-writable mapping 608 */ 609 static int nfs_vm_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) 610 { 611 struct page *page = vmf->page; 612 struct file *filp = vma->vm_file; 613 struct inode *inode = file_inode(filp); 614 unsigned pagelen; 615 int ret = VM_FAULT_NOPAGE; 616 struct address_space *mapping; 617 618 dfprintk(PAGECACHE, "NFS: vm_page_mkwrite(%pD2(%lu), offset %lld)\n", 619 filp, filp->f_mapping->host->i_ino, 620 (long long)page_offset(page)); 621 622 /* make sure the cache has finished storing the page */ 623 nfs_fscache_wait_on_page_write(NFS_I(inode), page); 624 625 wait_on_bit_action(&NFS_I(inode)->flags, NFS_INO_INVALIDATING, 626 nfs_wait_bit_killable, TASK_KILLABLE); 627 628 lock_page(page); 629 mapping = page_file_mapping(page); 630 if (mapping != inode->i_mapping) 631 goto out_unlock; 632 633 wait_on_page_writeback(page); 634 635 pagelen = nfs_page_length(page); 636 if (pagelen == 0) 637 goto out_unlock; 638 639 ret = VM_FAULT_LOCKED; 640 if (nfs_flush_incompatible(filp, page) == 0 && 641 nfs_updatepage(filp, page, 0, pagelen) == 0) 642 goto out; 643 644 ret = VM_FAULT_SIGBUS; 645 out_unlock: 646 unlock_page(page); 647 out: 648 return ret; 649 } 650 651 static const struct vm_operations_struct nfs_file_vm_ops = { 652 .fault = filemap_fault, 653 .map_pages = filemap_map_pages, 654 .page_mkwrite = nfs_vm_page_mkwrite, 655 }; 656 657 static int nfs_need_sync_write(struct file *filp, struct inode *inode) 658 { 659 struct nfs_open_context *ctx; 660 661 if (IS_SYNC(inode) || (filp->f_flags & O_DSYNC)) 662 return 1; 663 ctx = nfs_file_open_context(filp); 664 if (test_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags) || 665 nfs_ctx_key_to_expire(ctx)) 666 return 1; 667 return 0; 668 } 669 670 ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from) 671 { 672 struct file *file = iocb->ki_filp; 673 struct inode *inode = file_inode(file); 674 unsigned long written = 0; 675 ssize_t result; 676 size_t count = iov_iter_count(from); 677 678 result = nfs_key_timeout_notify(file, inode); 679 if (result) 680 return result; 681 682 if (iocb->ki_flags & IOCB_DIRECT) { 683 result = generic_write_checks(iocb, from); 684 if (result <= 0) 685 return result; 686 return nfs_file_direct_write(iocb, from); 687 } 688 689 dprintk("NFS: write(%pD2, %zu@%Ld)\n", 690 file, count, (long long) iocb->ki_pos); 691 692 result = -EBUSY; 693 if (IS_SWAPFILE(inode)) 694 goto out_swapfile; 695 /* 696 * O_APPEND implies that we must revalidate the file length. 697 */ 698 if (iocb->ki_flags & IOCB_APPEND) { 699 result = nfs_revalidate_file_size(inode, file); 700 if (result) 701 goto out; 702 } 703 704 result = count; 705 if (!count) 706 goto out; 707 708 result = generic_file_write_iter(iocb, from); 709 if (result > 0) 710 written = result; 711 712 /* Return error values for O_DSYNC and IS_SYNC() */ 713 if (result >= 0 && nfs_need_sync_write(file, inode)) { 714 int err = vfs_fsync(file, 0); 715 if (err < 0) 716 result = err; 717 } 718 if (result > 0) 719 nfs_add_stats(inode, NFSIOS_NORMALWRITTENBYTES, written); 720 out: 721 return result; 722 723 out_swapfile: 724 printk(KERN_INFO "NFS: attempt to write to active swap file!\n"); 725 goto out; 726 } 727 EXPORT_SYMBOL_GPL(nfs_file_write); 728 729 static int 730 do_getlk(struct file *filp, int cmd, struct file_lock *fl, int is_local) 731 { 732 struct inode *inode = filp->f_mapping->host; 733 int status = 0; 734 unsigned int saved_type = fl->fl_type; 735 736 /* Try local locking first */ 737 posix_test_lock(filp, fl); 738 if (fl->fl_type != F_UNLCK) { 739 /* found a conflict */ 740 goto out; 741 } 742 fl->fl_type = saved_type; 743 744 if (NFS_PROTO(inode)->have_delegation(inode, FMODE_READ)) 745 goto out_noconflict; 746 747 if (is_local) 748 goto out_noconflict; 749 750 status = NFS_PROTO(inode)->lock(filp, cmd, fl); 751 out: 752 return status; 753 out_noconflict: 754 fl->fl_type = F_UNLCK; 755 goto out; 756 } 757 758 static int do_vfs_lock(struct file *file, struct file_lock *fl) 759 { 760 int res = 0; 761 switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) { 762 case FL_POSIX: 763 res = posix_lock_file_wait(file, fl); 764 break; 765 case FL_FLOCK: 766 res = flock_lock_file_wait(file, fl); 767 break; 768 default: 769 BUG(); 770 } 771 return res; 772 } 773 774 static int 775 do_unlk(struct file *filp, int cmd, struct file_lock *fl, int is_local) 776 { 777 struct inode *inode = filp->f_mapping->host; 778 struct nfs_lock_context *l_ctx; 779 int status; 780 781 /* 782 * Flush all pending writes before doing anything 783 * with locks.. 784 */ 785 nfs_sync_mapping(filp->f_mapping); 786 787 l_ctx = nfs_get_lock_context(nfs_file_open_context(filp)); 788 if (!IS_ERR(l_ctx)) { 789 status = nfs_iocounter_wait(&l_ctx->io_count); 790 nfs_put_lock_context(l_ctx); 791 if (status < 0) 792 return status; 793 } 794 795 /* NOTE: special case 796 * If we're signalled while cleaning up locks on process exit, we 797 * still need to complete the unlock. 798 */ 799 /* 800 * Use local locking if mounted with "-onolock" or with appropriate 801 * "-olocal_lock=" 802 */ 803 if (!is_local) 804 status = NFS_PROTO(inode)->lock(filp, cmd, fl); 805 else 806 status = do_vfs_lock(filp, fl); 807 return status; 808 } 809 810 static int 811 is_time_granular(struct timespec *ts) { 812 return ((ts->tv_sec == 0) && (ts->tv_nsec <= 1000)); 813 } 814 815 static int 816 do_setlk(struct file *filp, int cmd, struct file_lock *fl, int is_local) 817 { 818 struct inode *inode = filp->f_mapping->host; 819 int status; 820 821 /* 822 * Flush all pending writes before doing anything 823 * with locks.. 824 */ 825 status = nfs_sync_mapping(filp->f_mapping); 826 if (status != 0) 827 goto out; 828 829 /* 830 * Use local locking if mounted with "-onolock" or with appropriate 831 * "-olocal_lock=" 832 */ 833 if (!is_local) 834 status = NFS_PROTO(inode)->lock(filp, cmd, fl); 835 else 836 status = do_vfs_lock(filp, fl); 837 if (status < 0) 838 goto out; 839 840 /* 841 * Revalidate the cache if the server has time stamps granular 842 * enough to detect subsecond changes. Otherwise, clear the 843 * cache to prevent missing any changes. 844 * 845 * This makes locking act as a cache coherency point. 846 */ 847 nfs_sync_mapping(filp->f_mapping); 848 if (!NFS_PROTO(inode)->have_delegation(inode, FMODE_READ)) { 849 if (is_time_granular(&NFS_SERVER(inode)->time_delta)) 850 __nfs_revalidate_inode(NFS_SERVER(inode), inode); 851 else 852 nfs_zap_caches(inode); 853 } 854 out: 855 return status; 856 } 857 858 /* 859 * Lock a (portion of) a file 860 */ 861 int nfs_lock(struct file *filp, int cmd, struct file_lock *fl) 862 { 863 struct inode *inode = filp->f_mapping->host; 864 int ret = -ENOLCK; 865 int is_local = 0; 866 867 dprintk("NFS: lock(%pD2, t=%x, fl=%x, r=%lld:%lld)\n", 868 filp, fl->fl_type, fl->fl_flags, 869 (long long)fl->fl_start, (long long)fl->fl_end); 870 871 nfs_inc_stats(inode, NFSIOS_VFSLOCK); 872 873 /* No mandatory locks over NFS */ 874 if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK) 875 goto out_err; 876 877 if (NFS_SERVER(inode)->flags & NFS_MOUNT_LOCAL_FCNTL) 878 is_local = 1; 879 880 if (NFS_PROTO(inode)->lock_check_bounds != NULL) { 881 ret = NFS_PROTO(inode)->lock_check_bounds(fl); 882 if (ret < 0) 883 goto out_err; 884 } 885 886 if (IS_GETLK(cmd)) 887 ret = do_getlk(filp, cmd, fl, is_local); 888 else if (fl->fl_type == F_UNLCK) 889 ret = do_unlk(filp, cmd, fl, is_local); 890 else 891 ret = do_setlk(filp, cmd, fl, is_local); 892 out_err: 893 return ret; 894 } 895 EXPORT_SYMBOL_GPL(nfs_lock); 896 897 /* 898 * Lock a (portion of) a file 899 */ 900 int nfs_flock(struct file *filp, int cmd, struct file_lock *fl) 901 { 902 struct inode *inode = filp->f_mapping->host; 903 int is_local = 0; 904 905 dprintk("NFS: flock(%pD2, t=%x, fl=%x)\n", 906 filp, fl->fl_type, fl->fl_flags); 907 908 if (!(fl->fl_flags & FL_FLOCK)) 909 return -ENOLCK; 910 911 /* 912 * The NFSv4 protocol doesn't support LOCK_MAND, which is not part of 913 * any standard. In principle we might be able to support LOCK_MAND 914 * on NFSv2/3 since NLMv3/4 support DOS share modes, but for now the 915 * NFS code is not set up for it. 916 */ 917 if (fl->fl_type & LOCK_MAND) 918 return -EINVAL; 919 920 if (NFS_SERVER(inode)->flags & NFS_MOUNT_LOCAL_FLOCK) 921 is_local = 1; 922 923 /* We're simulating flock() locks using posix locks on the server */ 924 if (fl->fl_type == F_UNLCK) 925 return do_unlk(filp, cmd, fl, is_local); 926 return do_setlk(filp, cmd, fl, is_local); 927 } 928 EXPORT_SYMBOL_GPL(nfs_flock); 929 930 const struct file_operations nfs_file_operations = { 931 .llseek = nfs_file_llseek, 932 .read_iter = nfs_file_read, 933 .write_iter = nfs_file_write, 934 .mmap = nfs_file_mmap, 935 .open = nfs_file_open, 936 .flush = nfs_file_flush, 937 .release = nfs_file_release, 938 .fsync = nfs_file_fsync, 939 .lock = nfs_lock, 940 .flock = nfs_flock, 941 .splice_read = nfs_file_splice_read, 942 .splice_write = iter_file_splice_write, 943 .check_flags = nfs_check_flags, 944 .setlease = simple_nosetlease, 945 }; 946 EXPORT_SYMBOL_GPL(nfs_file_operations); 947