11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * linux/fs/nfs/file.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 1992 Rick Sladkey 51da177e4SLinus Torvalds * 61da177e4SLinus Torvalds * Changes Copyright (C) 1994 by Florian La Roche 71da177e4SLinus Torvalds * - Do not copy data too often around in the kernel. 81da177e4SLinus Torvalds * - In nfs_file_read the return value of kmalloc wasn't checked. 91da177e4SLinus Torvalds * - Put in a better version of read look-ahead buffering. Original idea 101da177e4SLinus Torvalds * and implementation by Wai S Kok elekokws@ee.nus.sg. 111da177e4SLinus Torvalds * 121da177e4SLinus Torvalds * Expire cache on write to a file by Wai S Kok (Oct 1994). 131da177e4SLinus Torvalds * 141da177e4SLinus Torvalds * Total rewrite of read side for new NFS buffer cache.. Linus. 151da177e4SLinus Torvalds * 161da177e4SLinus Torvalds * nfs regular file handling functions 171da177e4SLinus Torvalds */ 181da177e4SLinus Torvalds 19ddda8e0aSBryan Schumaker #include <linux/module.h> 201da177e4SLinus Torvalds #include <linux/time.h> 211da177e4SLinus Torvalds #include <linux/kernel.h> 221da177e4SLinus Torvalds #include <linux/errno.h> 231da177e4SLinus Torvalds #include <linux/fcntl.h> 241da177e4SLinus Torvalds #include <linux/stat.h> 251da177e4SLinus Torvalds #include <linux/nfs_fs.h> 261da177e4SLinus Torvalds #include <linux/nfs_mount.h> 271da177e4SLinus Torvalds #include <linux/mm.h> 281da177e4SLinus Torvalds #include <linux/pagemap.h> 29e8edc6e0SAlexey Dobriyan #include <linux/aio.h> 305a0e3ad6STejun Heo #include <linux/gfp.h> 31b608b283STrond Myklebust #include <linux/swap.h> 321da177e4SLinus Torvalds 331da177e4SLinus Torvalds #include <asm/uaccess.h> 341da177e4SLinus Torvalds 351da177e4SLinus Torvalds #include "delegation.h" 3694387fb1STrond Myklebust #include "internal.h" 3791d5b470SChuck Lever #include "iostat.h" 38545db45fSDavid Howells #include "fscache.h" 39612aa983SChristoph Hellwig #include "pnfs.h" 401da177e4SLinus Torvalds 41f4ce1299STrond Myklebust #include "nfstrace.h" 42f4ce1299STrond Myklebust 431da177e4SLinus Torvalds #define NFSDBG_FACILITY NFSDBG_FILE 441da177e4SLinus Torvalds 45f0f37e2fSAlexey Dobriyan static const struct vm_operations_struct nfs_file_vm_ops; 4694387fb1STrond Myklebust 471da177e4SLinus Torvalds /* Hack for future NFS swap support */ 481da177e4SLinus Torvalds #ifndef IS_SWAPFILE 491da177e4SLinus Torvalds # define IS_SWAPFILE(inode) (0) 501da177e4SLinus Torvalds #endif 511da177e4SLinus Torvalds 52ce4ef7c0SBryan Schumaker int nfs_check_flags(int flags) 531da177e4SLinus Torvalds { 541da177e4SLinus Torvalds if ((flags & (O_APPEND | O_DIRECT)) == (O_APPEND | O_DIRECT)) 551da177e4SLinus Torvalds return -EINVAL; 561da177e4SLinus Torvalds 571da177e4SLinus Torvalds return 0; 581da177e4SLinus Torvalds } 5989d77c8fSBryan Schumaker EXPORT_SYMBOL_GPL(nfs_check_flags); 601da177e4SLinus Torvalds 611da177e4SLinus Torvalds /* 621da177e4SLinus Torvalds * Open file 631da177e4SLinus Torvalds */ 641da177e4SLinus Torvalds static int 651da177e4SLinus Torvalds nfs_file_open(struct inode *inode, struct file *filp) 661da177e4SLinus Torvalds { 671da177e4SLinus Torvalds int res; 681da177e4SLinus Torvalds 696de1472fSAl Viro dprintk("NFS: open file(%pD2)\n", filp); 70cc0dd2d1SChuck Lever 71c2459dc4SChuck Lever nfs_inc_stats(inode, NFSIOS_VFSOPEN); 721da177e4SLinus Torvalds res = nfs_check_flags(filp->f_flags); 731da177e4SLinus Torvalds if (res) 741da177e4SLinus Torvalds return res; 751da177e4SLinus Torvalds 7646cb650cSTrond Myklebust res = nfs_open(inode, filp); 771da177e4SLinus Torvalds return res; 781da177e4SLinus Torvalds } 791da177e4SLinus Torvalds 80ce4ef7c0SBryan Schumaker int 811da177e4SLinus Torvalds nfs_file_release(struct inode *inode, struct file *filp) 821da177e4SLinus Torvalds { 836de1472fSAl Viro dprintk("NFS: release(%pD2)\n", filp); 846da24bc9SChuck Lever 8591d5b470SChuck Lever nfs_inc_stats(inode, NFSIOS_VFSRELEASE); 8646cb650cSTrond Myklebust return nfs_release(inode, filp); 871da177e4SLinus Torvalds } 8889d77c8fSBryan Schumaker EXPORT_SYMBOL_GPL(nfs_file_release); 891da177e4SLinus Torvalds 90980802e3STrond Myklebust /** 91980802e3STrond Myklebust * nfs_revalidate_size - Revalidate the file size 92980802e3STrond Myklebust * @inode - pointer to inode struct 93980802e3STrond Myklebust * @file - pointer to struct file 94980802e3STrond Myklebust * 95980802e3STrond Myklebust * Revalidates the file length. This is basically a wrapper around 96980802e3STrond Myklebust * nfs_revalidate_inode() that takes into account the fact that we may 97980802e3STrond Myklebust * have cached writes (in which case we don't care about the server's 98980802e3STrond Myklebust * idea of what the file length is), or O_DIRECT (in which case we 99980802e3STrond Myklebust * shouldn't trust the cache). 100980802e3STrond Myklebust */ 101980802e3STrond Myklebust static int nfs_revalidate_file_size(struct inode *inode, struct file *filp) 102980802e3STrond Myklebust { 103980802e3STrond Myklebust struct nfs_server *server = NFS_SERVER(inode); 104980802e3STrond Myklebust struct nfs_inode *nfsi = NFS_I(inode); 105980802e3STrond Myklebust 106d7cf8dd0STrond Myklebust if (nfs_have_delegated_attributes(inode)) 107d7cf8dd0STrond Myklebust goto out_noreval; 108d7cf8dd0STrond Myklebust 109980802e3STrond Myklebust if (filp->f_flags & O_DIRECT) 110980802e3STrond Myklebust goto force_reval; 111d7cf8dd0STrond Myklebust if (nfsi->cache_validity & NFS_INO_REVAL_PAGECACHE) 112d7cf8dd0STrond Myklebust goto force_reval; 113d7cf8dd0STrond Myklebust if (nfs_attribute_timeout(inode)) 114d7cf8dd0STrond Myklebust goto force_reval; 115d7cf8dd0STrond Myklebust out_noreval: 116fe51beecSTrond Myklebust return 0; 117980802e3STrond Myklebust force_reval: 118980802e3STrond Myklebust return __nfs_revalidate_inode(server, inode); 119980802e3STrond Myklebust } 120980802e3STrond Myklebust 121965c8e59SAndrew Morton loff_t nfs_file_llseek(struct file *filp, loff_t offset, int whence) 122980802e3STrond Myklebust { 1236de1472fSAl Viro dprintk("NFS: llseek file(%pD2, %lld, %d)\n", 1246de1472fSAl Viro filp, offset, whence); 125b84e06c5SChuck Lever 12606222e49SJosef Bacik /* 127965c8e59SAndrew Morton * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate 12806222e49SJosef Bacik * the cached file length 12906222e49SJosef Bacik */ 130965c8e59SAndrew Morton if (whence != SEEK_SET && whence != SEEK_CUR) { 131980802e3STrond Myklebust struct inode *inode = filp->f_mapping->host; 132d5e66348STrond Myklebust 133980802e3STrond Myklebust int retval = nfs_revalidate_file_size(inode, filp); 134980802e3STrond Myklebust if (retval < 0) 135980802e3STrond Myklebust return (loff_t)retval; 13679835a71SAndi Kleen } 137d5e66348STrond Myklebust 138965c8e59SAndrew Morton return generic_file_llseek(filp, offset, whence); 139980802e3STrond Myklebust } 14089d77c8fSBryan Schumaker EXPORT_SYMBOL_GPL(nfs_file_llseek); 141980802e3STrond Myklebust 1421da177e4SLinus Torvalds /* 1431da177e4SLinus Torvalds * Flush all dirty pages, and check for write errors. 1441da177e4SLinus Torvalds */ 145ce4ef7c0SBryan Schumaker int 14675e1fcc0SMiklos Szeredi nfs_file_flush(struct file *file, fl_owner_t id) 1471da177e4SLinus Torvalds { 1486de1472fSAl Viro struct inode *inode = file_inode(file); 1491da177e4SLinus Torvalds 1506de1472fSAl Viro dprintk("NFS: flush(%pD2)\n", file); 1511da177e4SLinus Torvalds 152c2459dc4SChuck Lever nfs_inc_stats(inode, NFSIOS_VFSFLUSH); 1531da177e4SLinus Torvalds if ((file->f_mode & FMODE_WRITE) == 0) 1541da177e4SLinus Torvalds return 0; 1557b159fc1STrond Myklebust 15614546c33STrond Myklebust /* 15714546c33STrond Myklebust * If we're holding a write delegation, then just start the i/o 15814546c33STrond Myklebust * but don't wait for completion (or send a commit). 15914546c33STrond Myklebust */ 160011e2a7fSBryan Schumaker if (NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE)) 16114546c33STrond Myklebust return filemap_fdatawrite(file->f_mapping); 16214546c33STrond Myklebust 1637fe5c398STrond Myklebust /* Flush writes to the server and return any errors */ 164af7fa165STrond Myklebust return vfs_fsync(file, 0); 1651da177e4SLinus Torvalds } 16689d77c8fSBryan Schumaker EXPORT_SYMBOL_GPL(nfs_file_flush); 1671da177e4SLinus Torvalds 168ce4ef7c0SBryan Schumaker ssize_t 1693aa2d199SAl Viro nfs_file_read(struct kiocb *iocb, struct iov_iter *to) 1701da177e4SLinus Torvalds { 1716de1472fSAl Viro struct inode *inode = file_inode(iocb->ki_filp); 1721da177e4SLinus Torvalds ssize_t result; 1731da177e4SLinus Torvalds 1741da177e4SLinus Torvalds if (iocb->ki_filp->f_flags & O_DIRECT) 175e19a8a0aSMartin K. Petersen return nfs_file_direct_read(iocb, to, iocb->ki_pos); 1761da177e4SLinus Torvalds 177619d30b4SAl Viro dprintk("NFS: read(%pD2, %zu@%lu)\n", 1786de1472fSAl Viro iocb->ki_filp, 1793aa2d199SAl Viro iov_iter_count(to), (unsigned long) iocb->ki_pos); 1801da177e4SLinus Torvalds 181874f9463STrond Myklebust result = nfs_revalidate_mapping_protected(inode, iocb->ki_filp->f_mapping); 1824184dcf2SChuck Lever if (!result) { 1833aa2d199SAl Viro result = generic_file_read_iter(iocb, to); 1844184dcf2SChuck Lever if (result > 0) 1854184dcf2SChuck Lever nfs_add_stats(inode, NFSIOS_NORMALREADBYTES, result); 1864184dcf2SChuck Lever } 1871da177e4SLinus Torvalds return result; 1881da177e4SLinus Torvalds } 18989d77c8fSBryan Schumaker EXPORT_SYMBOL_GPL(nfs_file_read); 1901da177e4SLinus Torvalds 191ce4ef7c0SBryan Schumaker ssize_t 192f0930fffSJens Axboe nfs_file_splice_read(struct file *filp, loff_t *ppos, 193f0930fffSJens Axboe struct pipe_inode_info *pipe, size_t count, 194f0930fffSJens Axboe unsigned int flags) 1951da177e4SLinus Torvalds { 1966de1472fSAl Viro struct inode *inode = file_inode(filp); 1971da177e4SLinus Torvalds ssize_t res; 1981da177e4SLinus Torvalds 1996de1472fSAl Viro dprintk("NFS: splice_read(%pD2, %lu@%Lu)\n", 2006de1472fSAl Viro filp, (unsigned long) count, (unsigned long long) *ppos); 2011da177e4SLinus Torvalds 202874f9463STrond Myklebust res = nfs_revalidate_mapping_protected(inode, filp->f_mapping); 203aa2f1ef1SChuck Lever if (!res) { 204f0930fffSJens Axboe res = generic_file_splice_read(filp, ppos, pipe, count, flags); 205aa2f1ef1SChuck Lever if (res > 0) 206aa2f1ef1SChuck Lever nfs_add_stats(inode, NFSIOS_NORMALREADBYTES, res); 207aa2f1ef1SChuck Lever } 2081da177e4SLinus Torvalds return res; 2091da177e4SLinus Torvalds } 21089d77c8fSBryan Schumaker EXPORT_SYMBOL_GPL(nfs_file_splice_read); 2111da177e4SLinus Torvalds 212ce4ef7c0SBryan Schumaker int 2131da177e4SLinus Torvalds nfs_file_mmap(struct file * file, struct vm_area_struct * vma) 2141da177e4SLinus Torvalds { 2156de1472fSAl Viro struct inode *inode = file_inode(file); 2161da177e4SLinus Torvalds int status; 2171da177e4SLinus Torvalds 2186de1472fSAl Viro dprintk("NFS: mmap(%pD2)\n", file); 2191da177e4SLinus Torvalds 220e1ebfd33STrond Myklebust /* Note: generic_file_mmap() returns ENOSYS on nommu systems 221e1ebfd33STrond Myklebust * so we call that before revalidating the mapping 222e1ebfd33STrond Myklebust */ 223e1ebfd33STrond Myklebust status = generic_file_mmap(file, vma); 22494387fb1STrond Myklebust if (!status) { 22594387fb1STrond Myklebust vma->vm_ops = &nfs_file_vm_ops; 226e1ebfd33STrond Myklebust status = nfs_revalidate_mapping(inode, file->f_mapping); 22794387fb1STrond Myklebust } 2281da177e4SLinus Torvalds return status; 2291da177e4SLinus Torvalds } 23089d77c8fSBryan Schumaker EXPORT_SYMBOL_GPL(nfs_file_mmap); 2311da177e4SLinus Torvalds 2321da177e4SLinus Torvalds /* 2331da177e4SLinus Torvalds * Flush any dirty pages for this process, and check for write errors. 2341da177e4SLinus Torvalds * The return status from this call provides a reliable indication of 2351da177e4SLinus Torvalds * whether any write errors occurred for this process. 236af7fa165STrond Myklebust * 237af7fa165STrond Myklebust * Notice that it clears the NFS_CONTEXT_ERROR_WRITE before synching to 238af7fa165STrond Myklebust * disk, but it retrieves and clears ctx->error after synching, despite 239af7fa165STrond Myklebust * the two being set at the same time in nfs_context_set_write_error(). 240af7fa165STrond Myklebust * This is because the former is used to notify the _next_ call to 24125985edcSLucas De Marchi * nfs_file_write() that a write error occurred, and hence cause it to 242af7fa165STrond Myklebust * fall back to doing a synchronous write. 2431da177e4SLinus Torvalds */ 244ce4ef7c0SBryan Schumaker int 245a5c58892SBryan Schumaker nfs_file_fsync_commit(struct file *file, loff_t start, loff_t end, int datasync) 2461da177e4SLinus Torvalds { 247cd3758e3STrond Myklebust struct nfs_open_context *ctx = nfs_file_open_context(file); 2486de1472fSAl Viro struct inode *inode = file_inode(file); 24905990d1bSTrond Myklebust int have_error, do_resend, status; 250af7fa165STrond Myklebust int ret = 0; 251af7fa165STrond Myklebust 2526de1472fSAl Viro dprintk("NFS: fsync file(%pD2) datasync %d\n", file, datasync); 2531da177e4SLinus Torvalds 25491d5b470SChuck Lever nfs_inc_stats(inode, NFSIOS_VFSFSYNC); 25505990d1bSTrond Myklebust do_resend = test_and_clear_bit(NFS_CONTEXT_RESEND_WRITES, &ctx->flags); 256af7fa165STrond Myklebust have_error = test_and_clear_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags); 257af7fa165STrond Myklebust status = nfs_commit_inode(inode, FLUSH_SYNC); 258af7fa165STrond Myklebust have_error |= test_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags); 25905990d1bSTrond Myklebust if (have_error) { 260af7fa165STrond Myklebust ret = xchg(&ctx->error, 0); 26105990d1bSTrond Myklebust if (ret) 26205990d1bSTrond Myklebust goto out; 26305990d1bSTrond Myklebust } 26405990d1bSTrond Myklebust if (status < 0) { 265af7fa165STrond Myklebust ret = status; 26605990d1bSTrond Myklebust goto out; 26705990d1bSTrond Myklebust } 26805990d1bSTrond Myklebust do_resend |= test_bit(NFS_CONTEXT_RESEND_WRITES, &ctx->flags); 26905990d1bSTrond Myklebust if (do_resend) 27005990d1bSTrond Myklebust ret = -EAGAIN; 27105990d1bSTrond Myklebust out: 272a5c58892SBryan Schumaker return ret; 273a5c58892SBryan Schumaker } 27489d77c8fSBryan Schumaker EXPORT_SYMBOL_GPL(nfs_file_fsync_commit); 275a5c58892SBryan Schumaker 276a5c58892SBryan Schumaker static int 277a5c58892SBryan Schumaker nfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync) 278a5c58892SBryan Schumaker { 279a5c58892SBryan Schumaker int ret; 280496ad9aaSAl Viro struct inode *inode = file_inode(file); 281a5c58892SBryan Schumaker 282f4ce1299STrond Myklebust trace_nfs_fsync_enter(inode); 283f4ce1299STrond Myklebust 28405990d1bSTrond Myklebust do { 285a5c58892SBryan Schumaker ret = filemap_write_and_wait_range(inode->i_mapping, start, end); 2867b281ee0STrond Myklebust if (ret != 0) 28705990d1bSTrond Myklebust break; 288a5c58892SBryan Schumaker mutex_lock(&inode->i_mutex); 289a5c58892SBryan Schumaker ret = nfs_file_fsync_commit(file, start, end, datasync); 29002c24a82SJosef Bacik mutex_unlock(&inode->i_mutex); 291dcfc4f25STrond Myklebust /* 292dcfc4f25STrond Myklebust * If nfs_file_fsync_commit detected a server reboot, then 293dcfc4f25STrond Myklebust * resend all dirty pages that might have been covered by 294dcfc4f25STrond Myklebust * the NFS_CONTEXT_RESEND_WRITES flag 295dcfc4f25STrond Myklebust */ 296dcfc4f25STrond Myklebust start = 0; 297dcfc4f25STrond Myklebust end = LLONG_MAX; 29805990d1bSTrond Myklebust } while (ret == -EAGAIN); 29905990d1bSTrond Myklebust 300f4ce1299STrond Myklebust trace_nfs_fsync_exit(inode, ret); 301af7fa165STrond Myklebust return ret; 3021da177e4SLinus Torvalds } 3031da177e4SLinus Torvalds 3041da177e4SLinus Torvalds /* 30538c73044SPeter Staubach * Decide whether a read/modify/write cycle may be more efficient 30638c73044SPeter Staubach * then a modify/write/read cycle when writing to a page in the 30738c73044SPeter Staubach * page cache. 30838c73044SPeter Staubach * 30938c73044SPeter Staubach * The modify/write/read cycle may occur if a page is read before 31038c73044SPeter Staubach * being completely filled by the writer. In this situation, the 31138c73044SPeter Staubach * page must be completely written to stable storage on the server 31238c73044SPeter Staubach * before it can be refilled by reading in the page from the server. 31338c73044SPeter Staubach * This can lead to expensive, small, FILE_SYNC mode writes being 31438c73044SPeter Staubach * done. 31538c73044SPeter Staubach * 31638c73044SPeter Staubach * It may be more efficient to read the page first if the file is 31738c73044SPeter Staubach * open for reading in addition to writing, the page is not marked 31838c73044SPeter Staubach * as Uptodate, it is not dirty or waiting to be committed, 31938c73044SPeter Staubach * indicating that it was previously allocated and then modified, 32038c73044SPeter Staubach * that there were valid bytes of data in that range of the file, 32138c73044SPeter Staubach * and that the new data won't completely replace the old data in 32238c73044SPeter Staubach * that range of the file. 32338c73044SPeter Staubach */ 32438c73044SPeter Staubach static int nfs_want_read_modify_write(struct file *file, struct page *page, 32538c73044SPeter Staubach loff_t pos, unsigned len) 32638c73044SPeter Staubach { 32738c73044SPeter Staubach unsigned int pglen = nfs_page_length(page); 32838c73044SPeter Staubach unsigned int offset = pos & (PAGE_CACHE_SIZE - 1); 32938c73044SPeter Staubach unsigned int end = offset + len; 33038c73044SPeter Staubach 331612aa983SChristoph Hellwig if (pnfs_ld_read_whole_page(file->f_mapping->host)) { 332612aa983SChristoph Hellwig if (!PageUptodate(page)) 333612aa983SChristoph Hellwig return 1; 334612aa983SChristoph Hellwig return 0; 335612aa983SChristoph Hellwig } 336612aa983SChristoph Hellwig 33738c73044SPeter Staubach if ((file->f_mode & FMODE_READ) && /* open for read? */ 33838c73044SPeter Staubach !PageUptodate(page) && /* Uptodate? */ 33938c73044SPeter Staubach !PagePrivate(page) && /* i/o request already? */ 34038c73044SPeter Staubach pglen && /* valid bytes of file? */ 34138c73044SPeter Staubach (end < pglen || offset)) /* replace all valid bytes? */ 34238c73044SPeter Staubach return 1; 34338c73044SPeter Staubach return 0; 34438c73044SPeter Staubach } 34538c73044SPeter Staubach 34638c73044SPeter Staubach /* 3474899f9c8SNick Piggin * This does the "real" work of the write. We must allocate and lock the 3484899f9c8SNick Piggin * page to be sent back to the generic routine, which then copies the 3494899f9c8SNick Piggin * data from user space. 3501da177e4SLinus Torvalds * 3511da177e4SLinus Torvalds * If the writer ends up delaying the write, the writer needs to 3521da177e4SLinus Torvalds * increment the page use counts until he is done with the page. 3531da177e4SLinus Torvalds */ 3544899f9c8SNick Piggin static int nfs_write_begin(struct file *file, struct address_space *mapping, 3554899f9c8SNick Piggin loff_t pos, unsigned len, unsigned flags, 3564899f9c8SNick Piggin struct page **pagep, void **fsdata) 3571da177e4SLinus Torvalds { 3584899f9c8SNick Piggin int ret; 35938c73044SPeter Staubach pgoff_t index = pos >> PAGE_CACHE_SHIFT; 3604899f9c8SNick Piggin struct page *page; 36138c73044SPeter Staubach int once_thru = 0; 3624899f9c8SNick Piggin 3631e8968c5SNiels de Vos dfprintk(PAGECACHE, "NFS: write_begin(%pD2(%lu), %u@%lld)\n", 3646de1472fSAl Viro file, mapping->host->i_ino, len, (long long) pos); 365b7eaefaaSChuck Lever 36638c73044SPeter Staubach start: 36772cb77f4STrond Myklebust /* 36872cb77f4STrond Myklebust * Prevent starvation issues if someone is doing a consistency 36972cb77f4STrond Myklebust * sync-to-disk 37072cb77f4STrond Myklebust */ 37174316201SNeilBrown ret = wait_on_bit_action(&NFS_I(mapping->host)->flags, NFS_INO_FLUSHING, 37272cb77f4STrond Myklebust nfs_wait_bit_killable, TASK_KILLABLE); 37372cb77f4STrond Myklebust if (ret) 37472cb77f4STrond Myklebust return ret; 375aa5acceaSTrond Myklebust /* 376aa5acceaSTrond Myklebust * Wait for O_DIRECT to complete 377aa5acceaSTrond Myklebust */ 378aa5acceaSTrond Myklebust nfs_inode_dio_wait(mapping->host); 37972cb77f4STrond Myklebust 38054566b2cSNick Piggin page = grab_cache_page_write_begin(mapping, index, flags); 3814899f9c8SNick Piggin if (!page) 3824899f9c8SNick Piggin return -ENOMEM; 3834899f9c8SNick Piggin *pagep = page; 3844899f9c8SNick Piggin 3854899f9c8SNick Piggin ret = nfs_flush_incompatible(file, page); 3864899f9c8SNick Piggin if (ret) { 3874899f9c8SNick Piggin unlock_page(page); 3884899f9c8SNick Piggin page_cache_release(page); 38938c73044SPeter Staubach } else if (!once_thru && 39038c73044SPeter Staubach nfs_want_read_modify_write(file, page, pos, len)) { 39138c73044SPeter Staubach once_thru = 1; 39238c73044SPeter Staubach ret = nfs_readpage(file, page); 39338c73044SPeter Staubach page_cache_release(page); 39438c73044SPeter Staubach if (!ret) 39538c73044SPeter Staubach goto start; 3964899f9c8SNick Piggin } 3974899f9c8SNick Piggin return ret; 3981da177e4SLinus Torvalds } 3991da177e4SLinus Torvalds 4004899f9c8SNick Piggin static int nfs_write_end(struct file *file, struct address_space *mapping, 4014899f9c8SNick Piggin loff_t pos, unsigned len, unsigned copied, 4024899f9c8SNick Piggin struct page *page, void *fsdata) 4031da177e4SLinus Torvalds { 4044899f9c8SNick Piggin unsigned offset = pos & (PAGE_CACHE_SIZE - 1); 405dc24826bSAndy Adamson struct nfs_open_context *ctx = nfs_file_open_context(file); 4064899f9c8SNick Piggin int status; 4071da177e4SLinus Torvalds 4081e8968c5SNiels de Vos dfprintk(PAGECACHE, "NFS: write_end(%pD2(%lu), %u@%lld)\n", 4096de1472fSAl Viro file, mapping->host->i_ino, len, (long long) pos); 410b7eaefaaSChuck Lever 411efc91ed0STrond Myklebust /* 412efc91ed0STrond Myklebust * Zero any uninitialised parts of the page, and then mark the page 413efc91ed0STrond Myklebust * as up to date if it turns out that we're extending the file. 414efc91ed0STrond Myklebust */ 415efc91ed0STrond Myklebust if (!PageUptodate(page)) { 416efc91ed0STrond Myklebust unsigned pglen = nfs_page_length(page); 417efc91ed0STrond Myklebust unsigned end = offset + len; 418efc91ed0STrond Myklebust 419efc91ed0STrond Myklebust if (pglen == 0) { 420efc91ed0STrond Myklebust zero_user_segments(page, 0, offset, 421efc91ed0STrond Myklebust end, PAGE_CACHE_SIZE); 422efc91ed0STrond Myklebust SetPageUptodate(page); 423efc91ed0STrond Myklebust } else if (end >= pglen) { 424efc91ed0STrond Myklebust zero_user_segment(page, end, PAGE_CACHE_SIZE); 425efc91ed0STrond Myklebust if (offset == 0) 426efc91ed0STrond Myklebust SetPageUptodate(page); 427efc91ed0STrond Myklebust } else 428efc91ed0STrond Myklebust zero_user_segment(page, pglen, PAGE_CACHE_SIZE); 429efc91ed0STrond Myklebust } 430efc91ed0STrond Myklebust 4314899f9c8SNick Piggin status = nfs_updatepage(file, page, offset, copied); 4324899f9c8SNick Piggin 4334899f9c8SNick Piggin unlock_page(page); 4344899f9c8SNick Piggin page_cache_release(page); 4354899f9c8SNick Piggin 4363d509e54SChuck Lever if (status < 0) 4373d509e54SChuck Lever return status; 4382701d086SAndy Adamson NFS_I(mapping->host)->write_io += copied; 439dc24826bSAndy Adamson 440dc24826bSAndy Adamson if (nfs_ctx_key_to_expire(ctx)) { 441dc24826bSAndy Adamson status = nfs_wb_all(mapping->host); 442dc24826bSAndy Adamson if (status < 0) 443dc24826bSAndy Adamson return status; 444dc24826bSAndy Adamson } 445dc24826bSAndy Adamson 4463d509e54SChuck Lever return copied; 4471da177e4SLinus Torvalds } 4481da177e4SLinus Torvalds 4496b9b3514SDavid Howells /* 4506b9b3514SDavid Howells * Partially or wholly invalidate a page 4516b9b3514SDavid Howells * - Release the private state associated with a page if undergoing complete 4526b9b3514SDavid Howells * page invalidation 453545db45fSDavid Howells * - Called if either PG_private or PG_fscache is set on the page 4546b9b3514SDavid Howells * - Caller holds page lock 4556b9b3514SDavid Howells */ 456d47992f8SLukas Czerner static void nfs_invalidate_page(struct page *page, unsigned int offset, 457d47992f8SLukas Czerner unsigned int length) 458cd52ed35STrond Myklebust { 459d47992f8SLukas Czerner dfprintk(PAGECACHE, "NFS: invalidate_page(%p, %u, %u)\n", 460d47992f8SLukas Czerner page, offset, length); 461b7eaefaaSChuck Lever 462d47992f8SLukas Czerner if (offset != 0 || length < PAGE_CACHE_SIZE) 4631c75950bSTrond Myklebust return; 464d2ccddf0STrond Myklebust /* Cancel any unstarted writes on this page */ 465d56b4ddfSMel Gorman nfs_wb_page_cancel(page_file_mapping(page)->host, page); 466545db45fSDavid Howells 467545db45fSDavid Howells nfs_fscache_invalidate_page(page, page->mapping->host); 468cd52ed35STrond Myklebust } 469cd52ed35STrond Myklebust 4706b9b3514SDavid Howells /* 4716b9b3514SDavid Howells * Attempt to release the private state associated with a page 472545db45fSDavid Howells * - Called if either PG_private or PG_fscache is set on the page 4736b9b3514SDavid Howells * - Caller holds page lock 4746b9b3514SDavid Howells * - Return true (may release page) or false (may not) 4756b9b3514SDavid Howells */ 476cd52ed35STrond Myklebust static int nfs_release_page(struct page *page, gfp_t gfp) 477cd52ed35STrond Myklebust { 478b608b283STrond Myklebust struct address_space *mapping = page->mapping; 479b608b283STrond Myklebust 480b7eaefaaSChuck Lever dfprintk(PAGECACHE, "NFS: release_page(%p)\n", page); 481b7eaefaaSChuck Lever 48295905446SNeilBrown /* Always try to initiate a 'commit' if relevant, but only 4831aff5256SNeilBrown * wait for it if __GFP_WAIT is set. Even then, only wait 1 4841aff5256SNeilBrown * second and only if the 'bdi' is not congested. 48595905446SNeilBrown * Waiting indefinitely can cause deadlocks when the NFS 4861aff5256SNeilBrown * server is on this machine, when a new TCP connection is 4871aff5256SNeilBrown * needed and in other rare cases. There is no particular 4881aff5256SNeilBrown * need to wait extensively here. A short wait has the 4891aff5256SNeilBrown * benefit that someone else can worry about the freezer. 4905cf02d09SJeff Layton */ 49195905446SNeilBrown if (mapping) { 49295905446SNeilBrown struct nfs_server *nfss = NFS_SERVER(mapping->host); 49395905446SNeilBrown nfs_commit_inode(mapping->host, 0); 49495905446SNeilBrown if ((gfp & __GFP_WAIT) && 495353db796SNeilBrown !bdi_write_congested(&nfss->backing_dev_info)) { 49695905446SNeilBrown wait_on_page_bit_killable_timeout(page, PG_private, 49795905446SNeilBrown HZ); 498353db796SNeilBrown if (PagePrivate(page)) 499353db796SNeilBrown set_bdi_congested(&nfss->backing_dev_info, 500353db796SNeilBrown BLK_RW_ASYNC); 50195905446SNeilBrown } 502b608b283STrond Myklebust } 503e3db7691STrond Myklebust /* If PagePrivate() is set, then the page is not freeable */ 504545db45fSDavid Howells if (PagePrivate(page)) 505ddeff520SNikita Danilov return 0; 506545db45fSDavid Howells return nfs_fscache_release_page(page, gfp); 507e3db7691STrond Myklebust } 508e3db7691STrond Myklebust 509f919b196SMel Gorman static void nfs_check_dirty_writeback(struct page *page, 510f919b196SMel Gorman bool *dirty, bool *writeback) 511f919b196SMel Gorman { 512f919b196SMel Gorman struct nfs_inode *nfsi; 513f919b196SMel Gorman struct address_space *mapping = page_file_mapping(page); 514f919b196SMel Gorman 515f919b196SMel Gorman if (!mapping || PageSwapCache(page)) 516f919b196SMel Gorman return; 517f919b196SMel Gorman 518f919b196SMel Gorman /* 519f919b196SMel Gorman * Check if an unstable page is currently being committed and 520f919b196SMel Gorman * if so, have the VM treat it as if the page is under writeback 521f919b196SMel Gorman * so it will not block due to pages that will shortly be freeable. 522f919b196SMel Gorman */ 523f919b196SMel Gorman nfsi = NFS_I(mapping->host); 524f919b196SMel Gorman if (test_bit(NFS_INO_COMMIT, &nfsi->flags)) { 525f919b196SMel Gorman *writeback = true; 526f919b196SMel Gorman return; 527f919b196SMel Gorman } 528f919b196SMel Gorman 529f919b196SMel Gorman /* 530f919b196SMel Gorman * If PagePrivate() is set, then the page is not freeable and as the 531f919b196SMel Gorman * inode is not being committed, it's not going to be cleaned in the 532f919b196SMel Gorman * near future so treat it as dirty 533f919b196SMel Gorman */ 534f919b196SMel Gorman if (PagePrivate(page)) 535f919b196SMel Gorman *dirty = true; 536f919b196SMel Gorman } 537f919b196SMel Gorman 5386b9b3514SDavid Howells /* 5396b9b3514SDavid Howells * Attempt to clear the private state associated with a page when an error 5406b9b3514SDavid Howells * occurs that requires the cached contents of an inode to be written back or 5416b9b3514SDavid Howells * destroyed 542545db45fSDavid Howells * - Called if either PG_private or fscache is set on the page 5436b9b3514SDavid Howells * - Caller holds page lock 5446b9b3514SDavid Howells * - Return 0 if successful, -error otherwise 5456b9b3514SDavid Howells */ 546e3db7691STrond Myklebust static int nfs_launder_page(struct page *page) 547e3db7691STrond Myklebust { 548d56b4ddfSMel Gorman struct inode *inode = page_file_mapping(page)->host; 549545db45fSDavid Howells struct nfs_inode *nfsi = NFS_I(inode); 550b7eaefaaSChuck Lever 551b7eaefaaSChuck Lever dfprintk(PAGECACHE, "NFS: launder_page(%ld, %llu)\n", 552b7eaefaaSChuck Lever inode->i_ino, (long long)page_offset(page)); 553b7eaefaaSChuck Lever 554545db45fSDavid Howells nfs_fscache_wait_on_page_write(nfsi, page); 555b7eaefaaSChuck Lever return nfs_wb_page(inode, page); 556cd52ed35STrond Myklebust } 557cd52ed35STrond Myklebust 558a564b8f0SMel Gorman #ifdef CONFIG_NFS_SWAP 559a564b8f0SMel Gorman static int nfs_swap_activate(struct swap_info_struct *sis, struct file *file, 560a564b8f0SMel Gorman sector_t *span) 561a564b8f0SMel Gorman { 562dad2b015SJeff Layton int ret; 563dad2b015SJeff Layton struct rpc_clnt *clnt = NFS_CLIENT(file->f_mapping->host); 564dad2b015SJeff Layton 565a564b8f0SMel Gorman *span = sis->pages; 566dad2b015SJeff Layton 567dad2b015SJeff Layton rcu_read_lock(); 568dad2b015SJeff Layton ret = xs_swapper(rcu_dereference(clnt->cl_xprt), 1); 569dad2b015SJeff Layton rcu_read_unlock(); 570dad2b015SJeff Layton 571dad2b015SJeff Layton return ret; 572a564b8f0SMel Gorman } 573a564b8f0SMel Gorman 574a564b8f0SMel Gorman static void nfs_swap_deactivate(struct file *file) 575a564b8f0SMel Gorman { 576dad2b015SJeff Layton struct rpc_clnt *clnt = NFS_CLIENT(file->f_mapping->host); 577dad2b015SJeff Layton 578dad2b015SJeff Layton rcu_read_lock(); 579dad2b015SJeff Layton xs_swapper(rcu_dereference(clnt->cl_xprt), 0); 580dad2b015SJeff Layton rcu_read_unlock(); 581a564b8f0SMel Gorman } 582a564b8f0SMel Gorman #endif 583a564b8f0SMel Gorman 584f5e54d6eSChristoph Hellwig const struct address_space_operations nfs_file_aops = { 5851da177e4SLinus Torvalds .readpage = nfs_readpage, 5861da177e4SLinus Torvalds .readpages = nfs_readpages, 5879cccef95STrond Myklebust .set_page_dirty = __set_page_dirty_nobuffers, 5881da177e4SLinus Torvalds .writepage = nfs_writepage, 5891da177e4SLinus Torvalds .writepages = nfs_writepages, 5904899f9c8SNick Piggin .write_begin = nfs_write_begin, 5914899f9c8SNick Piggin .write_end = nfs_write_end, 592cd52ed35STrond Myklebust .invalidatepage = nfs_invalidate_page, 593cd52ed35STrond Myklebust .releasepage = nfs_release_page, 5941da177e4SLinus Torvalds .direct_IO = nfs_direct_IO, 595074cc1deSTrond Myklebust .migratepage = nfs_migrate_page, 596e3db7691STrond Myklebust .launder_page = nfs_launder_page, 597f919b196SMel Gorman .is_dirty_writeback = nfs_check_dirty_writeback, 598f590f333SAndi Kleen .error_remove_page = generic_error_remove_page, 599a564b8f0SMel Gorman #ifdef CONFIG_NFS_SWAP 600a564b8f0SMel Gorman .swap_activate = nfs_swap_activate, 601a564b8f0SMel Gorman .swap_deactivate = nfs_swap_deactivate, 602a564b8f0SMel Gorman #endif 6031da177e4SLinus Torvalds }; 6041da177e4SLinus Torvalds 6056b9b3514SDavid Howells /* 6066b9b3514SDavid Howells * Notification that a PTE pointing to an NFS page is about to be made 6076b9b3514SDavid Howells * writable, implying that someone is about to modify the page through a 6086b9b3514SDavid Howells * shared-writable mapping 6096b9b3514SDavid Howells */ 610c2ec175cSNick Piggin static int nfs_vm_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) 61194387fb1STrond Myklebust { 612c2ec175cSNick Piggin struct page *page = vmf->page; 61394387fb1STrond Myklebust struct file *filp = vma->vm_file; 6146de1472fSAl Viro struct inode *inode = file_inode(filp); 61594387fb1STrond Myklebust unsigned pagelen; 616bc4866b6STrond Myklebust int ret = VM_FAULT_NOPAGE; 6174899f9c8SNick Piggin struct address_space *mapping; 61894387fb1STrond Myklebust 6191e8968c5SNiels de Vos dfprintk(PAGECACHE, "NFS: vm_page_mkwrite(%pD2(%lu), offset %lld)\n", 6206de1472fSAl Viro filp, filp->f_mapping->host->i_ino, 621b7eaefaaSChuck Lever (long long)page_offset(page)); 622b7eaefaaSChuck Lever 623545db45fSDavid Howells /* make sure the cache has finished storing the page */ 6246de1472fSAl Viro nfs_fscache_wait_on_page_write(NFS_I(inode), page); 625545db45fSDavid Howells 626*ef070dcbSTrond Myklebust wait_on_bit_action(&NFS_I(inode)->flags, NFS_INO_INVALIDATING, 627*ef070dcbSTrond Myklebust nfs_wait_bit_killable, TASK_KILLABLE); 628*ef070dcbSTrond Myklebust 62994387fb1STrond Myklebust lock_page(page); 630d56b4ddfSMel Gorman mapping = page_file_mapping(page); 6316de1472fSAl Viro if (mapping != inode->i_mapping) 6328b1f9ee5STrond Myklebust goto out_unlock; 6338b1f9ee5STrond Myklebust 6342aeb98f4STrond Myklebust wait_on_page_writeback(page); 6352aeb98f4STrond Myklebust 6364899f9c8SNick Piggin pagelen = nfs_page_length(page); 6378b1f9ee5STrond Myklebust if (pagelen == 0) 6388b1f9ee5STrond Myklebust goto out_unlock; 6398b1f9ee5STrond Myklebust 640bc4866b6STrond Myklebust ret = VM_FAULT_LOCKED; 641bc4866b6STrond Myklebust if (nfs_flush_incompatible(filp, page) == 0 && 642bc4866b6STrond Myklebust nfs_updatepage(filp, page, 0, pagelen) == 0) 643bc4866b6STrond Myklebust goto out; 6448b1f9ee5STrond Myklebust 645bc4866b6STrond Myklebust ret = VM_FAULT_SIGBUS; 6468b1f9ee5STrond Myklebust out_unlock: 6474899f9c8SNick Piggin unlock_page(page); 648bc4866b6STrond Myklebust out: 649bc4866b6STrond Myklebust return ret; 65094387fb1STrond Myklebust } 65194387fb1STrond Myklebust 652f0f37e2fSAlexey Dobriyan static const struct vm_operations_struct nfs_file_vm_ops = { 65394387fb1STrond Myklebust .fault = filemap_fault, 654f1820361SKirill A. Shutemov .map_pages = filemap_map_pages, 65594387fb1STrond Myklebust .page_mkwrite = nfs_vm_page_mkwrite, 65694387fb1STrond Myklebust }; 65794387fb1STrond Myklebust 6587b159fc1STrond Myklebust static int nfs_need_sync_write(struct file *filp, struct inode *inode) 6597b159fc1STrond Myklebust { 6607b159fc1STrond Myklebust struct nfs_open_context *ctx; 6617b159fc1STrond Myklebust 6626b2f3d1fSChristoph Hellwig if (IS_SYNC(inode) || (filp->f_flags & O_DSYNC)) 6637b159fc1STrond Myklebust return 1; 664cd3758e3STrond Myklebust ctx = nfs_file_open_context(filp); 665dc24826bSAndy Adamson if (test_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags) || 666dc24826bSAndy Adamson nfs_ctx_key_to_expire(ctx)) 6677b159fc1STrond Myklebust return 1; 6687b159fc1STrond Myklebust return 0; 6697b159fc1STrond Myklebust } 6707b159fc1STrond Myklebust 671edaf4369SAl Viro ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from) 6721da177e4SLinus Torvalds { 6736de1472fSAl Viro struct file *file = iocb->ki_filp; 6746de1472fSAl Viro struct inode *inode = file_inode(file); 6757e381172SChuck Lever unsigned long written = 0; 6761da177e4SLinus Torvalds ssize_t result; 677edaf4369SAl Viro size_t count = iov_iter_count(from); 678edaf4369SAl Viro loff_t pos = iocb->ki_pos; 6791da177e4SLinus Torvalds 6806de1472fSAl Viro result = nfs_key_timeout_notify(file, inode); 681dc24826bSAndy Adamson if (result) 682dc24826bSAndy Adamson return result; 683dc24826bSAndy Adamson 6846de1472fSAl Viro if (file->f_flags & O_DIRECT) 685e19a8a0aSMartin K. Petersen return nfs_file_direct_write(iocb, from, pos); 6861da177e4SLinus Torvalds 687619d30b4SAl Viro dprintk("NFS: write(%pD2, %zu@%Ld)\n", 688619d30b4SAl Viro file, count, (long long) pos); 6891da177e4SLinus Torvalds 6901da177e4SLinus Torvalds result = -EBUSY; 6911da177e4SLinus Torvalds if (IS_SWAPFILE(inode)) 6921da177e4SLinus Torvalds goto out_swapfile; 6937d52e862STrond Myklebust /* 6947d52e862STrond Myklebust * O_APPEND implies that we must revalidate the file length. 6957d52e862STrond Myklebust */ 6966de1472fSAl Viro if (file->f_flags & O_APPEND) { 6976de1472fSAl Viro result = nfs_revalidate_file_size(inode, file); 6981da177e4SLinus Torvalds if (result) 6991da177e4SLinus Torvalds goto out; 700fe51beecSTrond Myklebust } 7011da177e4SLinus Torvalds 7021da177e4SLinus Torvalds result = count; 7031da177e4SLinus Torvalds if (!count) 7041da177e4SLinus Torvalds goto out; 7051da177e4SLinus Torvalds 706edaf4369SAl Viro result = generic_file_write_iter(iocb, from); 7077e381172SChuck Lever if (result > 0) 7087e381172SChuck Lever written = result; 7097e381172SChuck Lever 7106b2f3d1fSChristoph Hellwig /* Return error values for O_DSYNC and IS_SYNC() */ 7116de1472fSAl Viro if (result >= 0 && nfs_need_sync_write(file, inode)) { 7126de1472fSAl Viro int err = vfs_fsync(file, 0); 713200baa21STrond Myklebust if (err < 0) 714200baa21STrond Myklebust result = err; 715200baa21STrond Myklebust } 7167e381172SChuck Lever if (result > 0) 7177e381172SChuck Lever nfs_add_stats(inode, NFSIOS_NORMALWRITTENBYTES, written); 7181da177e4SLinus Torvalds out: 7191da177e4SLinus Torvalds return result; 7201da177e4SLinus Torvalds 7211da177e4SLinus Torvalds out_swapfile: 7221da177e4SLinus Torvalds printk(KERN_INFO "NFS: attempt to write to active swap file!\n"); 7231da177e4SLinus Torvalds goto out; 7241da177e4SLinus Torvalds } 72589d77c8fSBryan Schumaker EXPORT_SYMBOL_GPL(nfs_file_write); 7261da177e4SLinus Torvalds 7275eebde23SSuresh Jayaraman static int 7285eebde23SSuresh Jayaraman do_getlk(struct file *filp, int cmd, struct file_lock *fl, int is_local) 7291da177e4SLinus Torvalds { 7301da177e4SLinus Torvalds struct inode *inode = filp->f_mapping->host; 7311da177e4SLinus Torvalds int status = 0; 73221ac19d4SSergey Vlasov unsigned int saved_type = fl->fl_type; 7331da177e4SLinus Torvalds 734039c4d7aSTrond Myklebust /* Try local locking first */ 7356d34ac19SJ. Bruce Fields posix_test_lock(filp, fl); 7366d34ac19SJ. Bruce Fields if (fl->fl_type != F_UNLCK) { 7376d34ac19SJ. Bruce Fields /* found a conflict */ 738039c4d7aSTrond Myklebust goto out; 7391da177e4SLinus Torvalds } 74021ac19d4SSergey Vlasov fl->fl_type = saved_type; 741039c4d7aSTrond Myklebust 742011e2a7fSBryan Schumaker if (NFS_PROTO(inode)->have_delegation(inode, FMODE_READ)) 743039c4d7aSTrond Myklebust goto out_noconflict; 744039c4d7aSTrond Myklebust 7455eebde23SSuresh Jayaraman if (is_local) 746039c4d7aSTrond Myklebust goto out_noconflict; 747039c4d7aSTrond Myklebust 748039c4d7aSTrond Myklebust status = NFS_PROTO(inode)->lock(filp, cmd, fl); 749039c4d7aSTrond Myklebust out: 7501da177e4SLinus Torvalds return status; 751039c4d7aSTrond Myklebust out_noconflict: 752039c4d7aSTrond Myklebust fl->fl_type = F_UNLCK; 753039c4d7aSTrond Myklebust goto out; 7541da177e4SLinus Torvalds } 7551da177e4SLinus Torvalds 7561da177e4SLinus Torvalds static int do_vfs_lock(struct file *file, struct file_lock *fl) 7571da177e4SLinus Torvalds { 7581da177e4SLinus Torvalds int res = 0; 7591da177e4SLinus Torvalds switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) { 7601da177e4SLinus Torvalds case FL_POSIX: 7611da177e4SLinus Torvalds res = posix_lock_file_wait(file, fl); 7621da177e4SLinus Torvalds break; 7631da177e4SLinus Torvalds case FL_FLOCK: 7641da177e4SLinus Torvalds res = flock_lock_file_wait(file, fl); 7651da177e4SLinus Torvalds break; 7661da177e4SLinus Torvalds default: 7671da177e4SLinus Torvalds BUG(); 7681da177e4SLinus Torvalds } 7691da177e4SLinus Torvalds return res; 7701da177e4SLinus Torvalds } 7711da177e4SLinus Torvalds 7725eebde23SSuresh Jayaraman static int 7735eebde23SSuresh Jayaraman do_unlk(struct file *filp, int cmd, struct file_lock *fl, int is_local) 7741da177e4SLinus Torvalds { 7751da177e4SLinus Torvalds struct inode *inode = filp->f_mapping->host; 7767a8203d8STrond Myklebust struct nfs_lock_context *l_ctx; 7771da177e4SLinus Torvalds int status; 7781da177e4SLinus Torvalds 7791da177e4SLinus Torvalds /* 7801da177e4SLinus Torvalds * Flush all pending writes before doing anything 7811da177e4SLinus Torvalds * with locks.. 7821da177e4SLinus Torvalds */ 78329884df0STrond Myklebust nfs_sync_mapping(filp->f_mapping); 7841da177e4SLinus Torvalds 7857a8203d8STrond Myklebust l_ctx = nfs_get_lock_context(nfs_file_open_context(filp)); 7867a8203d8STrond Myklebust if (!IS_ERR(l_ctx)) { 7877a8203d8STrond Myklebust status = nfs_iocounter_wait(&l_ctx->io_count); 7887a8203d8STrond Myklebust nfs_put_lock_context(l_ctx); 7897a8203d8STrond Myklebust if (status < 0) 7907a8203d8STrond Myklebust return status; 7917a8203d8STrond Myklebust } 7927a8203d8STrond Myklebust 7931da177e4SLinus Torvalds /* NOTE: special case 7941da177e4SLinus Torvalds * If we're signalled while cleaning up locks on process exit, we 7951da177e4SLinus Torvalds * still need to complete the unlock. 7961da177e4SLinus Torvalds */ 7975eebde23SSuresh Jayaraman /* 7985eebde23SSuresh Jayaraman * Use local locking if mounted with "-onolock" or with appropriate 7995eebde23SSuresh Jayaraman * "-olocal_lock=" 8005eebde23SSuresh Jayaraman */ 8015eebde23SSuresh Jayaraman if (!is_local) 8021da177e4SLinus Torvalds status = NFS_PROTO(inode)->lock(filp, cmd, fl); 8031da177e4SLinus Torvalds else 8041da177e4SLinus Torvalds status = do_vfs_lock(filp, fl); 8051da177e4SLinus Torvalds return status; 8061da177e4SLinus Torvalds } 8071da177e4SLinus Torvalds 8085eebde23SSuresh Jayaraman static int 8096b96724eSRicardo Labiaga is_time_granular(struct timespec *ts) { 8106b96724eSRicardo Labiaga return ((ts->tv_sec == 0) && (ts->tv_nsec <= 1000)); 8116b96724eSRicardo Labiaga } 8126b96724eSRicardo Labiaga 8136b96724eSRicardo Labiaga static int 8145eebde23SSuresh Jayaraman do_setlk(struct file *filp, int cmd, struct file_lock *fl, int is_local) 8151da177e4SLinus Torvalds { 8161da177e4SLinus Torvalds struct inode *inode = filp->f_mapping->host; 8171da177e4SLinus Torvalds int status; 8181da177e4SLinus Torvalds 8191da177e4SLinus Torvalds /* 8201da177e4SLinus Torvalds * Flush all pending writes before doing anything 8211da177e4SLinus Torvalds * with locks.. 8221da177e4SLinus Torvalds */ 82329884df0STrond Myklebust status = nfs_sync_mapping(filp->f_mapping); 82429884df0STrond Myklebust if (status != 0) 8251da177e4SLinus Torvalds goto out; 8261da177e4SLinus Torvalds 8275eebde23SSuresh Jayaraman /* 8285eebde23SSuresh Jayaraman * Use local locking if mounted with "-onolock" or with appropriate 8295eebde23SSuresh Jayaraman * "-olocal_lock=" 8305eebde23SSuresh Jayaraman */ 8315eebde23SSuresh Jayaraman if (!is_local) 8321da177e4SLinus Torvalds status = NFS_PROTO(inode)->lock(filp, cmd, fl); 833c4d7c402STrond Myklebust else 8341da177e4SLinus Torvalds status = do_vfs_lock(filp, fl); 8351da177e4SLinus Torvalds if (status < 0) 8361da177e4SLinus Torvalds goto out; 8376b96724eSRicardo Labiaga 8381da177e4SLinus Torvalds /* 8396b96724eSRicardo Labiaga * Revalidate the cache if the server has time stamps granular 8406b96724eSRicardo Labiaga * enough to detect subsecond changes. Otherwise, clear the 8416b96724eSRicardo Labiaga * cache to prevent missing any changes. 8426b96724eSRicardo Labiaga * 8431da177e4SLinus Torvalds * This makes locking act as a cache coherency point. 8441da177e4SLinus Torvalds */ 84529884df0STrond Myklebust nfs_sync_mapping(filp->f_mapping); 846011e2a7fSBryan Schumaker if (!NFS_PROTO(inode)->have_delegation(inode, FMODE_READ)) { 8476b96724eSRicardo Labiaga if (is_time_granular(&NFS_SERVER(inode)->time_delta)) 8486b96724eSRicardo Labiaga __nfs_revalidate_inode(NFS_SERVER(inode), inode); 8496b96724eSRicardo Labiaga else 8501da177e4SLinus Torvalds nfs_zap_caches(inode); 8516b96724eSRicardo Labiaga } 8521da177e4SLinus Torvalds out: 8531da177e4SLinus Torvalds return status; 8541da177e4SLinus Torvalds } 8551da177e4SLinus Torvalds 8561da177e4SLinus Torvalds /* 8571da177e4SLinus Torvalds * Lock a (portion of) a file 8581da177e4SLinus Torvalds */ 859ce4ef7c0SBryan Schumaker int nfs_lock(struct file *filp, int cmd, struct file_lock *fl) 8601da177e4SLinus Torvalds { 8611da177e4SLinus Torvalds struct inode *inode = filp->f_mapping->host; 8622116271aSTrond Myklebust int ret = -ENOLCK; 8635eebde23SSuresh Jayaraman int is_local = 0; 8641da177e4SLinus Torvalds 8656de1472fSAl Viro dprintk("NFS: lock(%pD2, t=%x, fl=%x, r=%lld:%lld)\n", 8666de1472fSAl Viro filp, fl->fl_type, fl->fl_flags, 8671da177e4SLinus Torvalds (long long)fl->fl_start, (long long)fl->fl_end); 8686da24bc9SChuck Lever 86991d5b470SChuck Lever nfs_inc_stats(inode, NFSIOS_VFSLOCK); 8701da177e4SLinus Torvalds 8711da177e4SLinus Torvalds /* No mandatory locks over NFS */ 872dfad9441SPavel Emelyanov if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK) 8732116271aSTrond Myklebust goto out_err; 8742116271aSTrond Myklebust 8755eebde23SSuresh Jayaraman if (NFS_SERVER(inode)->flags & NFS_MOUNT_LOCAL_FCNTL) 8765eebde23SSuresh Jayaraman is_local = 1; 8775eebde23SSuresh Jayaraman 8782116271aSTrond Myklebust if (NFS_PROTO(inode)->lock_check_bounds != NULL) { 8792116271aSTrond Myklebust ret = NFS_PROTO(inode)->lock_check_bounds(fl); 8802116271aSTrond Myklebust if (ret < 0) 8812116271aSTrond Myklebust goto out_err; 8822116271aSTrond Myklebust } 8831da177e4SLinus Torvalds 8841da177e4SLinus Torvalds if (IS_GETLK(cmd)) 8855eebde23SSuresh Jayaraman ret = do_getlk(filp, cmd, fl, is_local); 8862116271aSTrond Myklebust else if (fl->fl_type == F_UNLCK) 8875eebde23SSuresh Jayaraman ret = do_unlk(filp, cmd, fl, is_local); 8882116271aSTrond Myklebust else 8895eebde23SSuresh Jayaraman ret = do_setlk(filp, cmd, fl, is_local); 8902116271aSTrond Myklebust out_err: 8912116271aSTrond Myklebust return ret; 8921da177e4SLinus Torvalds } 89389d77c8fSBryan Schumaker EXPORT_SYMBOL_GPL(nfs_lock); 8941da177e4SLinus Torvalds 8951da177e4SLinus Torvalds /* 8961da177e4SLinus Torvalds * Lock a (portion of) a file 8971da177e4SLinus Torvalds */ 898ce4ef7c0SBryan Schumaker int nfs_flock(struct file *filp, int cmd, struct file_lock *fl) 8991da177e4SLinus Torvalds { 9005eebde23SSuresh Jayaraman struct inode *inode = filp->f_mapping->host; 9015eebde23SSuresh Jayaraman int is_local = 0; 9025eebde23SSuresh Jayaraman 9036de1472fSAl Viro dprintk("NFS: flock(%pD2, t=%x, fl=%x)\n", 9046de1472fSAl Viro filp, fl->fl_type, fl->fl_flags); 9051da177e4SLinus Torvalds 9061da177e4SLinus Torvalds if (!(fl->fl_flags & FL_FLOCK)) 9071da177e4SLinus Torvalds return -ENOLCK; 9081da177e4SLinus Torvalds 909ad0fcd4eSJeff Layton /* 910ad0fcd4eSJeff Layton * The NFSv4 protocol doesn't support LOCK_MAND, which is not part of 911ad0fcd4eSJeff Layton * any standard. In principle we might be able to support LOCK_MAND 912ad0fcd4eSJeff Layton * on NFSv2/3 since NLMv3/4 support DOS share modes, but for now the 913ad0fcd4eSJeff Layton * NFS code is not set up for it. 914ad0fcd4eSJeff Layton */ 915ad0fcd4eSJeff Layton if (fl->fl_type & LOCK_MAND) 916ad0fcd4eSJeff Layton return -EINVAL; 917ad0fcd4eSJeff Layton 9185eebde23SSuresh Jayaraman if (NFS_SERVER(inode)->flags & NFS_MOUNT_LOCAL_FLOCK) 9195eebde23SSuresh Jayaraman is_local = 1; 9205eebde23SSuresh Jayaraman 9211da177e4SLinus Torvalds /* We're simulating flock() locks using posix locks on the server */ 9221da177e4SLinus Torvalds if (fl->fl_type == F_UNLCK) 9235eebde23SSuresh Jayaraman return do_unlk(filp, cmd, fl, is_local); 9245eebde23SSuresh Jayaraman return do_setlk(filp, cmd, fl, is_local); 9251da177e4SLinus Torvalds } 92689d77c8fSBryan Schumaker EXPORT_SYMBOL_GPL(nfs_flock); 927370f6599SJ. Bruce Fields 9280486958fSJeff Layton const struct file_operations nfs_file_operations = { 9290486958fSJeff Layton .llseek = nfs_file_llseek, 9303aa2d199SAl Viro .read = new_sync_read, 931edaf4369SAl Viro .write = new_sync_write, 9323aa2d199SAl Viro .read_iter = nfs_file_read, 933edaf4369SAl Viro .write_iter = nfs_file_write, 9340486958fSJeff Layton .mmap = nfs_file_mmap, 9350486958fSJeff Layton .open = nfs_file_open, 9360486958fSJeff Layton .flush = nfs_file_flush, 9370486958fSJeff Layton .release = nfs_file_release, 9380486958fSJeff Layton .fsync = nfs_file_fsync, 9390486958fSJeff Layton .lock = nfs_lock, 9400486958fSJeff Layton .flock = nfs_flock, 9410486958fSJeff Layton .splice_read = nfs_file_splice_read, 9424da54c21SAl Viro .splice_write = iter_file_splice_write, 9430486958fSJeff Layton .check_flags = nfs_check_flags, 9441c994a09SJeff Layton .setlease = simple_nosetlease, 9450486958fSJeff Layton }; 946ddda8e0aSBryan Schumaker EXPORT_SYMBOL_GPL(nfs_file_operations); 947