11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * linux/fs/nfs/file.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 1992 Rick Sladkey 51da177e4SLinus Torvalds * 61da177e4SLinus Torvalds * Changes Copyright (C) 1994 by Florian La Roche 71da177e4SLinus Torvalds * - Do not copy data too often around in the kernel. 81da177e4SLinus Torvalds * - In nfs_file_read the return value of kmalloc wasn't checked. 91da177e4SLinus Torvalds * - Put in a better version of read look-ahead buffering. Original idea 101da177e4SLinus Torvalds * and implementation by Wai S Kok elekokws@ee.nus.sg. 111da177e4SLinus Torvalds * 121da177e4SLinus Torvalds * Expire cache on write to a file by Wai S Kok (Oct 1994). 131da177e4SLinus Torvalds * 141da177e4SLinus Torvalds * Total rewrite of read side for new NFS buffer cache.. Linus. 151da177e4SLinus Torvalds * 161da177e4SLinus Torvalds * nfs regular file handling functions 171da177e4SLinus Torvalds */ 181da177e4SLinus Torvalds 19ddda8e0aSBryan Schumaker #include <linux/module.h> 201da177e4SLinus Torvalds #include <linux/time.h> 211da177e4SLinus Torvalds #include <linux/kernel.h> 221da177e4SLinus Torvalds #include <linux/errno.h> 231da177e4SLinus Torvalds #include <linux/fcntl.h> 241da177e4SLinus Torvalds #include <linux/stat.h> 251da177e4SLinus Torvalds #include <linux/nfs_fs.h> 261da177e4SLinus Torvalds #include <linux/nfs_mount.h> 271da177e4SLinus Torvalds #include <linux/mm.h> 281da177e4SLinus Torvalds #include <linux/pagemap.h> 295a0e3ad6STejun Heo #include <linux/gfp.h> 30b608b283STrond Myklebust #include <linux/swap.h> 311da177e4SLinus Torvalds 327c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 331da177e4SLinus Torvalds 341da177e4SLinus Torvalds #include "delegation.h" 3594387fb1STrond Myklebust #include "internal.h" 3691d5b470SChuck Lever #include "iostat.h" 37545db45fSDavid Howells #include "fscache.h" 38612aa983SChristoph Hellwig #include "pnfs.h" 391da177e4SLinus Torvalds 40f4ce1299STrond Myklebust #include "nfstrace.h" 41f4ce1299STrond Myklebust 421da177e4SLinus Torvalds #define NFSDBG_FACILITY NFSDBG_FILE 431da177e4SLinus Torvalds 44f0f37e2fSAlexey Dobriyan static const struct vm_operations_struct nfs_file_vm_ops; 4594387fb1STrond Myklebust 461da177e4SLinus Torvalds /* Hack for future NFS swap support */ 471da177e4SLinus Torvalds #ifndef IS_SWAPFILE 481da177e4SLinus Torvalds # define IS_SWAPFILE(inode) (0) 491da177e4SLinus Torvalds #endif 501da177e4SLinus Torvalds 51ce4ef7c0SBryan Schumaker int nfs_check_flags(int flags) 521da177e4SLinus Torvalds { 531da177e4SLinus Torvalds if ((flags & (O_APPEND | O_DIRECT)) == (O_APPEND | O_DIRECT)) 541da177e4SLinus Torvalds return -EINVAL; 551da177e4SLinus Torvalds 561da177e4SLinus Torvalds return 0; 571da177e4SLinus Torvalds } 5889d77c8fSBryan Schumaker EXPORT_SYMBOL_GPL(nfs_check_flags); 591da177e4SLinus Torvalds 601da177e4SLinus Torvalds /* 611da177e4SLinus Torvalds * Open file 621da177e4SLinus Torvalds */ 631da177e4SLinus Torvalds static int 641da177e4SLinus Torvalds nfs_file_open(struct inode *inode, struct file *filp) 651da177e4SLinus Torvalds { 661da177e4SLinus Torvalds int res; 671da177e4SLinus Torvalds 686de1472fSAl Viro dprintk("NFS: open file(%pD2)\n", filp); 69cc0dd2d1SChuck Lever 70c2459dc4SChuck Lever nfs_inc_stats(inode, NFSIOS_VFSOPEN); 711da177e4SLinus Torvalds res = nfs_check_flags(filp->f_flags); 721da177e4SLinus Torvalds if (res) 731da177e4SLinus Torvalds return res; 741da177e4SLinus Torvalds 7546cb650cSTrond Myklebust res = nfs_open(inode, filp); 761da177e4SLinus Torvalds return res; 771da177e4SLinus Torvalds } 781da177e4SLinus Torvalds 79ce4ef7c0SBryan Schumaker int 801da177e4SLinus Torvalds nfs_file_release(struct inode *inode, struct file *filp) 811da177e4SLinus Torvalds { 826de1472fSAl Viro dprintk("NFS: release(%pD2)\n", filp); 836da24bc9SChuck Lever 8491d5b470SChuck Lever nfs_inc_stats(inode, NFSIOS_VFSRELEASE); 85aff8d8dcSAnna Schumaker nfs_file_clear_open_context(filp); 86aff8d8dcSAnna Schumaker return 0; 871da177e4SLinus Torvalds } 8889d77c8fSBryan Schumaker EXPORT_SYMBOL_GPL(nfs_file_release); 891da177e4SLinus Torvalds 90980802e3STrond Myklebust /** 91980802e3STrond Myklebust * nfs_revalidate_size - Revalidate the file size 92302fad7bSTrond Myklebust * @inode: pointer to inode struct 93302fad7bSTrond Myklebust * @filp: pointer to struct file 94980802e3STrond Myklebust * 95980802e3STrond Myklebust * Revalidates the file length. This is basically a wrapper around 96980802e3STrond Myklebust * nfs_revalidate_inode() that takes into account the fact that we may 97980802e3STrond Myklebust * have cached writes (in which case we don't care about the server's 98980802e3STrond Myklebust * idea of what the file length is), or O_DIRECT (in which case we 99980802e3STrond Myklebust * shouldn't trust the cache). 100980802e3STrond Myklebust */ 101980802e3STrond Myklebust static int nfs_revalidate_file_size(struct inode *inode, struct file *filp) 102980802e3STrond Myklebust { 103980802e3STrond Myklebust struct nfs_server *server = NFS_SERVER(inode); 104d7cf8dd0STrond Myklebust 105980802e3STrond Myklebust if (filp->f_flags & O_DIRECT) 106980802e3STrond Myklebust goto force_reval; 10761540bf6STrond Myklebust if (nfs_check_cache_invalid(inode, NFS_INO_REVAL_PAGECACHE)) 108d7cf8dd0STrond Myklebust goto force_reval; 109fe51beecSTrond Myklebust return 0; 110980802e3STrond Myklebust force_reval: 111980802e3STrond Myklebust return __nfs_revalidate_inode(server, inode); 112980802e3STrond Myklebust } 113980802e3STrond Myklebust 114965c8e59SAndrew Morton loff_t nfs_file_llseek(struct file *filp, loff_t offset, int whence) 115980802e3STrond Myklebust { 1166de1472fSAl Viro dprintk("NFS: llseek file(%pD2, %lld, %d)\n", 1176de1472fSAl Viro filp, offset, whence); 118b84e06c5SChuck Lever 11906222e49SJosef Bacik /* 120965c8e59SAndrew Morton * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate 12106222e49SJosef Bacik * the cached file length 12206222e49SJosef Bacik */ 123965c8e59SAndrew Morton if (whence != SEEK_SET && whence != SEEK_CUR) { 124980802e3STrond Myklebust struct inode *inode = filp->f_mapping->host; 125d5e66348STrond Myklebust 126980802e3STrond Myklebust int retval = nfs_revalidate_file_size(inode, filp); 127980802e3STrond Myklebust if (retval < 0) 128980802e3STrond Myklebust return (loff_t)retval; 12979835a71SAndi Kleen } 130d5e66348STrond Myklebust 131965c8e59SAndrew Morton return generic_file_llseek(filp, offset, whence); 132980802e3STrond Myklebust } 13389d77c8fSBryan Schumaker EXPORT_SYMBOL_GPL(nfs_file_llseek); 134980802e3STrond Myklebust 1351da177e4SLinus Torvalds /* 1361da177e4SLinus Torvalds * Flush all dirty pages, and check for write errors. 1371da177e4SLinus Torvalds */ 1385445b1fbSTrond Myklebust static int 13975e1fcc0SMiklos Szeredi nfs_file_flush(struct file *file, fl_owner_t id) 1401da177e4SLinus Torvalds { 1416de1472fSAl Viro struct inode *inode = file_inode(file); 1421da177e4SLinus Torvalds 1436de1472fSAl Viro dprintk("NFS: flush(%pD2)\n", file); 1441da177e4SLinus Torvalds 145c2459dc4SChuck Lever nfs_inc_stats(inode, NFSIOS_VFSFLUSH); 1461da177e4SLinus Torvalds if ((file->f_mode & FMODE_WRITE) == 0) 1471da177e4SLinus Torvalds return 0; 1487b159fc1STrond Myklebust 1497fe5c398STrond Myklebust /* Flush writes to the server and return any errors */ 150af7fa165STrond Myklebust return vfs_fsync(file, 0); 1511da177e4SLinus Torvalds } 1521da177e4SLinus Torvalds 153ce4ef7c0SBryan Schumaker ssize_t 1543aa2d199SAl Viro nfs_file_read(struct kiocb *iocb, struct iov_iter *to) 1551da177e4SLinus Torvalds { 1566de1472fSAl Viro struct inode *inode = file_inode(iocb->ki_filp); 1571da177e4SLinus Torvalds ssize_t result; 1581da177e4SLinus Torvalds 1592ba48ce5SAl Viro if (iocb->ki_flags & IOCB_DIRECT) 160c8b8e32dSChristoph Hellwig return nfs_file_direct_read(iocb, to); 1611da177e4SLinus Torvalds 162619d30b4SAl Viro dprintk("NFS: read(%pD2, %zu@%lu)\n", 1636de1472fSAl Viro iocb->ki_filp, 1643aa2d199SAl Viro iov_iter_count(to), (unsigned long) iocb->ki_pos); 1651da177e4SLinus Torvalds 166a5864c99STrond Myklebust nfs_start_io_read(inode); 167a5864c99STrond Myklebust result = nfs_revalidate_mapping(inode, iocb->ki_filp->f_mapping); 1684184dcf2SChuck Lever if (!result) { 1693aa2d199SAl Viro result = generic_file_read_iter(iocb, to); 1704184dcf2SChuck Lever if (result > 0) 1714184dcf2SChuck Lever nfs_add_stats(inode, NFSIOS_NORMALREADBYTES, result); 1724184dcf2SChuck Lever } 173a5864c99STrond Myklebust nfs_end_io_read(inode); 1741da177e4SLinus Torvalds return result; 1751da177e4SLinus Torvalds } 17689d77c8fSBryan Schumaker EXPORT_SYMBOL_GPL(nfs_file_read); 1771da177e4SLinus Torvalds 178ce4ef7c0SBryan Schumaker int 1791da177e4SLinus Torvalds nfs_file_mmap(struct file * file, struct vm_area_struct * vma) 1801da177e4SLinus Torvalds { 1816de1472fSAl Viro struct inode *inode = file_inode(file); 1821da177e4SLinus Torvalds int status; 1831da177e4SLinus Torvalds 1846de1472fSAl Viro dprintk("NFS: mmap(%pD2)\n", file); 1851da177e4SLinus Torvalds 186e1ebfd33STrond Myklebust /* Note: generic_file_mmap() returns ENOSYS on nommu systems 187e1ebfd33STrond Myklebust * so we call that before revalidating the mapping 188e1ebfd33STrond Myklebust */ 189e1ebfd33STrond Myklebust status = generic_file_mmap(file, vma); 19094387fb1STrond Myklebust if (!status) { 19194387fb1STrond Myklebust vma->vm_ops = &nfs_file_vm_ops; 192e1ebfd33STrond Myklebust status = nfs_revalidate_mapping(inode, file->f_mapping); 19394387fb1STrond Myklebust } 1941da177e4SLinus Torvalds return status; 1951da177e4SLinus Torvalds } 19689d77c8fSBryan Schumaker EXPORT_SYMBOL_GPL(nfs_file_mmap); 1971da177e4SLinus Torvalds 1981da177e4SLinus Torvalds /* 1991da177e4SLinus Torvalds * Flush any dirty pages for this process, and check for write errors. 2001da177e4SLinus Torvalds * The return status from this call provides a reliable indication of 2011da177e4SLinus Torvalds * whether any write errors occurred for this process. 202af7fa165STrond Myklebust * 203af7fa165STrond Myklebust * Notice that it clears the NFS_CONTEXT_ERROR_WRITE before synching to 204af7fa165STrond Myklebust * disk, but it retrieves and clears ctx->error after synching, despite 205af7fa165STrond Myklebust * the two being set at the same time in nfs_context_set_write_error(). 206af7fa165STrond Myklebust * This is because the former is used to notify the _next_ call to 20725985edcSLucas De Marchi * nfs_file_write() that a write error occurred, and hence cause it to 208af7fa165STrond Myklebust * fall back to doing a synchronous write. 2091da177e4SLinus Torvalds */ 2104ff79bc7SChristoph Hellwig static int 211bf4b4905SNeilBrown nfs_file_fsync_commit(struct file *file, int datasync) 2121da177e4SLinus Torvalds { 213cd3758e3STrond Myklebust struct nfs_open_context *ctx = nfs_file_open_context(file); 2146de1472fSAl Viro struct inode *inode = file_inode(file); 215bf4b4905SNeilBrown int do_resend, status; 216af7fa165STrond Myklebust int ret = 0; 217af7fa165STrond Myklebust 2186de1472fSAl Viro dprintk("NFS: fsync file(%pD2) datasync %d\n", file, datasync); 2191da177e4SLinus Torvalds 22091d5b470SChuck Lever nfs_inc_stats(inode, NFSIOS_VFSFSYNC); 22105990d1bSTrond Myklebust do_resend = test_and_clear_bit(NFS_CONTEXT_RESEND_WRITES, &ctx->flags); 222af7fa165STrond Myklebust status = nfs_commit_inode(inode, FLUSH_SYNC); 223bf4b4905SNeilBrown if (test_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags)) { 224af7fa165STrond Myklebust ret = xchg(&ctx->error, 0); 22505990d1bSTrond Myklebust if (ret) 22605990d1bSTrond Myklebust goto out; 22705990d1bSTrond Myklebust } 22805990d1bSTrond Myklebust if (status < 0) { 229af7fa165STrond Myklebust ret = status; 23005990d1bSTrond Myklebust goto out; 23105990d1bSTrond Myklebust } 23205990d1bSTrond Myklebust do_resend |= test_bit(NFS_CONTEXT_RESEND_WRITES, &ctx->flags); 23305990d1bSTrond Myklebust if (do_resend) 23405990d1bSTrond Myklebust ret = -EAGAIN; 23505990d1bSTrond Myklebust out: 236a5c58892SBryan Schumaker return ret; 237a5c58892SBryan Schumaker } 238a5c58892SBryan Schumaker 2394ff79bc7SChristoph Hellwig int 240a5c58892SBryan Schumaker nfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync) 241a5c58892SBryan Schumaker { 242a5c58892SBryan Schumaker int ret; 243496ad9aaSAl Viro struct inode *inode = file_inode(file); 244a5c58892SBryan Schumaker 245f4ce1299STrond Myklebust trace_nfs_fsync_enter(inode); 246f4ce1299STrond Myklebust 24705990d1bSTrond Myklebust do { 248bf4b4905SNeilBrown struct nfs_open_context *ctx = nfs_file_open_context(file); 249a5c58892SBryan Schumaker ret = filemap_write_and_wait_range(inode->i_mapping, start, end); 250bf4b4905SNeilBrown if (test_and_clear_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags)) { 251bf4b4905SNeilBrown int ret2 = xchg(&ctx->error, 0); 252bf4b4905SNeilBrown if (ret2) 253bf4b4905SNeilBrown ret = ret2; 254bf4b4905SNeilBrown } 2557b281ee0STrond Myklebust if (ret != 0) 25605990d1bSTrond Myklebust break; 257bf4b4905SNeilBrown ret = nfs_file_fsync_commit(file, datasync); 2584ff79bc7SChristoph Hellwig if (!ret) 2594ff79bc7SChristoph Hellwig ret = pnfs_sync_inode(inode, !!datasync); 260dcfc4f25STrond Myklebust /* 261dcfc4f25STrond Myklebust * If nfs_file_fsync_commit detected a server reboot, then 262dcfc4f25STrond Myklebust * resend all dirty pages that might have been covered by 263dcfc4f25STrond Myklebust * the NFS_CONTEXT_RESEND_WRITES flag 264dcfc4f25STrond Myklebust */ 265dcfc4f25STrond Myklebust start = 0; 266dcfc4f25STrond Myklebust end = LLONG_MAX; 26705990d1bSTrond Myklebust } while (ret == -EAGAIN); 26805990d1bSTrond Myklebust 269f4ce1299STrond Myklebust trace_nfs_fsync_exit(inode, ret); 270af7fa165STrond Myklebust return ret; 2711da177e4SLinus Torvalds } 2724ff79bc7SChristoph Hellwig EXPORT_SYMBOL_GPL(nfs_file_fsync); 2731da177e4SLinus Torvalds 2741da177e4SLinus Torvalds /* 27538c73044SPeter Staubach * Decide whether a read/modify/write cycle may be more efficient 27638c73044SPeter Staubach * then a modify/write/read cycle when writing to a page in the 27738c73044SPeter Staubach * page cache. 27838c73044SPeter Staubach * 279*2cde04e9SKazuo Ito * Some pNFS layout drivers can only read/write at a certain block 280*2cde04e9SKazuo Ito * granularity like all block devices and therefore we must perform 281*2cde04e9SKazuo Ito * read/modify/write whenever a page hasn't read yet and the data 282*2cde04e9SKazuo Ito * to be written there is not aligned to a block boundary and/or 283*2cde04e9SKazuo Ito * smaller than the block size. 284*2cde04e9SKazuo Ito * 28538c73044SPeter Staubach * The modify/write/read cycle may occur if a page is read before 28638c73044SPeter Staubach * being completely filled by the writer. In this situation, the 28738c73044SPeter Staubach * page must be completely written to stable storage on the server 28838c73044SPeter Staubach * before it can be refilled by reading in the page from the server. 28938c73044SPeter Staubach * This can lead to expensive, small, FILE_SYNC mode writes being 29038c73044SPeter Staubach * done. 29138c73044SPeter Staubach * 29238c73044SPeter Staubach * It may be more efficient to read the page first if the file is 29338c73044SPeter Staubach * open for reading in addition to writing, the page is not marked 29438c73044SPeter Staubach * as Uptodate, it is not dirty or waiting to be committed, 29538c73044SPeter Staubach * indicating that it was previously allocated and then modified, 29638c73044SPeter Staubach * that there were valid bytes of data in that range of the file, 29738c73044SPeter Staubach * and that the new data won't completely replace the old data in 29838c73044SPeter Staubach * that range of the file. 29938c73044SPeter Staubach */ 300*2cde04e9SKazuo Ito static bool nfs_full_page_write(struct page *page, loff_t pos, unsigned int len) 30138c73044SPeter Staubach { 30238c73044SPeter Staubach unsigned int pglen = nfs_page_length(page); 30309cbfeafSKirill A. Shutemov unsigned int offset = pos & (PAGE_SIZE - 1); 30438c73044SPeter Staubach unsigned int end = offset + len; 30538c73044SPeter Staubach 306*2cde04e9SKazuo Ito return !pglen || (end >= pglen && !offset); 307612aa983SChristoph Hellwig } 308612aa983SChristoph Hellwig 309*2cde04e9SKazuo Ito static bool nfs_want_read_modify_write(struct file *file, struct page *page, 310*2cde04e9SKazuo Ito loff_t pos, unsigned int len) 311*2cde04e9SKazuo Ito { 312*2cde04e9SKazuo Ito /* 313*2cde04e9SKazuo Ito * Up-to-date pages, those with ongoing or full-page write 314*2cde04e9SKazuo Ito * don't need read/modify/write 315*2cde04e9SKazuo Ito */ 316*2cde04e9SKazuo Ito if (PageUptodate(page) || PagePrivate(page) || 317*2cde04e9SKazuo Ito nfs_full_page_write(page, pos, len)) 318*2cde04e9SKazuo Ito return false; 319*2cde04e9SKazuo Ito 320*2cde04e9SKazuo Ito if (pnfs_ld_read_whole_page(file->f_mapping->host)) 321*2cde04e9SKazuo Ito return true; 322*2cde04e9SKazuo Ito /* Open for reading too? */ 323*2cde04e9SKazuo Ito if (file->f_mode & FMODE_READ) 324*2cde04e9SKazuo Ito return true; 325*2cde04e9SKazuo Ito return false; 32638c73044SPeter Staubach } 32738c73044SPeter Staubach 32838c73044SPeter Staubach /* 3294899f9c8SNick Piggin * This does the "real" work of the write. We must allocate and lock the 3304899f9c8SNick Piggin * page to be sent back to the generic routine, which then copies the 3314899f9c8SNick Piggin * data from user space. 3321da177e4SLinus Torvalds * 3331da177e4SLinus Torvalds * If the writer ends up delaying the write, the writer needs to 3341da177e4SLinus Torvalds * increment the page use counts until he is done with the page. 3351da177e4SLinus Torvalds */ 3364899f9c8SNick Piggin static int nfs_write_begin(struct file *file, struct address_space *mapping, 3374899f9c8SNick Piggin loff_t pos, unsigned len, unsigned flags, 3384899f9c8SNick Piggin struct page **pagep, void **fsdata) 3391da177e4SLinus Torvalds { 3404899f9c8SNick Piggin int ret; 34109cbfeafSKirill A. Shutemov pgoff_t index = pos >> PAGE_SHIFT; 3424899f9c8SNick Piggin struct page *page; 34338c73044SPeter Staubach int once_thru = 0; 3444899f9c8SNick Piggin 3451e8968c5SNiels de Vos dfprintk(PAGECACHE, "NFS: write_begin(%pD2(%lu), %u@%lld)\n", 3466de1472fSAl Viro file, mapping->host->i_ino, len, (long long) pos); 347b7eaefaaSChuck Lever 34838c73044SPeter Staubach start: 34954566b2cSNick Piggin page = grab_cache_page_write_begin(mapping, index, flags); 3504899f9c8SNick Piggin if (!page) 3514899f9c8SNick Piggin return -ENOMEM; 3524899f9c8SNick Piggin *pagep = page; 3534899f9c8SNick Piggin 3544899f9c8SNick Piggin ret = nfs_flush_incompatible(file, page); 3554899f9c8SNick Piggin if (ret) { 3564899f9c8SNick Piggin unlock_page(page); 35709cbfeafSKirill A. Shutemov put_page(page); 35838c73044SPeter Staubach } else if (!once_thru && 35938c73044SPeter Staubach nfs_want_read_modify_write(file, page, pos, len)) { 36038c73044SPeter Staubach once_thru = 1; 36138c73044SPeter Staubach ret = nfs_readpage(file, page); 36209cbfeafSKirill A. Shutemov put_page(page); 36338c73044SPeter Staubach if (!ret) 36438c73044SPeter Staubach goto start; 3654899f9c8SNick Piggin } 3664899f9c8SNick Piggin return ret; 3671da177e4SLinus Torvalds } 3681da177e4SLinus Torvalds 3694899f9c8SNick Piggin static int nfs_write_end(struct file *file, struct address_space *mapping, 3704899f9c8SNick Piggin loff_t pos, unsigned len, unsigned copied, 3714899f9c8SNick Piggin struct page *page, void *fsdata) 3721da177e4SLinus Torvalds { 37309cbfeafSKirill A. Shutemov unsigned offset = pos & (PAGE_SIZE - 1); 374dc24826bSAndy Adamson struct nfs_open_context *ctx = nfs_file_open_context(file); 3754899f9c8SNick Piggin int status; 3761da177e4SLinus Torvalds 3771e8968c5SNiels de Vos dfprintk(PAGECACHE, "NFS: write_end(%pD2(%lu), %u@%lld)\n", 3786de1472fSAl Viro file, mapping->host->i_ino, len, (long long) pos); 379b7eaefaaSChuck Lever 380efc91ed0STrond Myklebust /* 381efc91ed0STrond Myklebust * Zero any uninitialised parts of the page, and then mark the page 382efc91ed0STrond Myklebust * as up to date if it turns out that we're extending the file. 383efc91ed0STrond Myklebust */ 384efc91ed0STrond Myklebust if (!PageUptodate(page)) { 385efc91ed0STrond Myklebust unsigned pglen = nfs_page_length(page); 386c0cf3ef5SAl Viro unsigned end = offset + copied; 387efc91ed0STrond Myklebust 388efc91ed0STrond Myklebust if (pglen == 0) { 389efc91ed0STrond Myklebust zero_user_segments(page, 0, offset, 39009cbfeafSKirill A. Shutemov end, PAGE_SIZE); 391efc91ed0STrond Myklebust SetPageUptodate(page); 392efc91ed0STrond Myklebust } else if (end >= pglen) { 39309cbfeafSKirill A. Shutemov zero_user_segment(page, end, PAGE_SIZE); 394efc91ed0STrond Myklebust if (offset == 0) 395efc91ed0STrond Myklebust SetPageUptodate(page); 396efc91ed0STrond Myklebust } else 39709cbfeafSKirill A. Shutemov zero_user_segment(page, pglen, PAGE_SIZE); 398efc91ed0STrond Myklebust } 399efc91ed0STrond Myklebust 4004899f9c8SNick Piggin status = nfs_updatepage(file, page, offset, copied); 4014899f9c8SNick Piggin 4024899f9c8SNick Piggin unlock_page(page); 40309cbfeafSKirill A. Shutemov put_page(page); 4044899f9c8SNick Piggin 4053d509e54SChuck Lever if (status < 0) 4063d509e54SChuck Lever return status; 4072701d086SAndy Adamson NFS_I(mapping->host)->write_io += copied; 408dc24826bSAndy Adamson 409ce52914eSScott Mayhew if (nfs_ctx_key_to_expire(ctx, mapping->host)) { 410dc24826bSAndy Adamson status = nfs_wb_all(mapping->host); 411dc24826bSAndy Adamson if (status < 0) 412dc24826bSAndy Adamson return status; 413dc24826bSAndy Adamson } 414dc24826bSAndy Adamson 4153d509e54SChuck Lever return copied; 4161da177e4SLinus Torvalds } 4171da177e4SLinus Torvalds 4186b9b3514SDavid Howells /* 4196b9b3514SDavid Howells * Partially or wholly invalidate a page 4206b9b3514SDavid Howells * - Release the private state associated with a page if undergoing complete 4216b9b3514SDavid Howells * page invalidation 422545db45fSDavid Howells * - Called if either PG_private or PG_fscache is set on the page 4236b9b3514SDavid Howells * - Caller holds page lock 4246b9b3514SDavid Howells */ 425d47992f8SLukas Czerner static void nfs_invalidate_page(struct page *page, unsigned int offset, 426d47992f8SLukas Czerner unsigned int length) 427cd52ed35STrond Myklebust { 428d47992f8SLukas Czerner dfprintk(PAGECACHE, "NFS: invalidate_page(%p, %u, %u)\n", 429d47992f8SLukas Czerner page, offset, length); 430b7eaefaaSChuck Lever 43109cbfeafSKirill A. Shutemov if (offset != 0 || length < PAGE_SIZE) 4321c75950bSTrond Myklebust return; 433d2ccddf0STrond Myklebust /* Cancel any unstarted writes on this page */ 434d56b4ddfSMel Gorman nfs_wb_page_cancel(page_file_mapping(page)->host, page); 435545db45fSDavid Howells 436545db45fSDavid Howells nfs_fscache_invalidate_page(page, page->mapping->host); 437cd52ed35STrond Myklebust } 438cd52ed35STrond Myklebust 4396b9b3514SDavid Howells /* 4406b9b3514SDavid Howells * Attempt to release the private state associated with a page 441545db45fSDavid Howells * - Called if either PG_private or PG_fscache is set on the page 4426b9b3514SDavid Howells * - Caller holds page lock 4436b9b3514SDavid Howells * - Return true (may release page) or false (may not) 4446b9b3514SDavid Howells */ 445cd52ed35STrond Myklebust static int nfs_release_page(struct page *page, gfp_t gfp) 446cd52ed35STrond Myklebust { 447b7eaefaaSChuck Lever dfprintk(PAGECACHE, "NFS: release_page(%p)\n", page); 448b7eaefaaSChuck Lever 449e3db7691STrond Myklebust /* If PagePrivate() is set, then the page is not freeable */ 450545db45fSDavid Howells if (PagePrivate(page)) 451ddeff520SNikita Danilov return 0; 452545db45fSDavid Howells return nfs_fscache_release_page(page, gfp); 453e3db7691STrond Myklebust } 454e3db7691STrond Myklebust 455f919b196SMel Gorman static void nfs_check_dirty_writeback(struct page *page, 456f919b196SMel Gorman bool *dirty, bool *writeback) 457f919b196SMel Gorman { 458f919b196SMel Gorman struct nfs_inode *nfsi; 459f919b196SMel Gorman struct address_space *mapping = page_file_mapping(page); 460f919b196SMel Gorman 461f919b196SMel Gorman if (!mapping || PageSwapCache(page)) 462f919b196SMel Gorman return; 463f919b196SMel Gorman 464f919b196SMel Gorman /* 465f919b196SMel Gorman * Check if an unstable page is currently being committed and 466f919b196SMel Gorman * if so, have the VM treat it as if the page is under writeback 467f919b196SMel Gorman * so it will not block due to pages that will shortly be freeable. 468f919b196SMel Gorman */ 469f919b196SMel Gorman nfsi = NFS_I(mapping->host); 470af7cf057STrond Myklebust if (atomic_read(&nfsi->commit_info.rpcs_out)) { 471f919b196SMel Gorman *writeback = true; 472f919b196SMel Gorman return; 473f919b196SMel Gorman } 474f919b196SMel Gorman 475f919b196SMel Gorman /* 476f919b196SMel Gorman * If PagePrivate() is set, then the page is not freeable and as the 477f919b196SMel Gorman * inode is not being committed, it's not going to be cleaned in the 478f919b196SMel Gorman * near future so treat it as dirty 479f919b196SMel Gorman */ 480f919b196SMel Gorman if (PagePrivate(page)) 481f919b196SMel Gorman *dirty = true; 482f919b196SMel Gorman } 483f919b196SMel Gorman 4846b9b3514SDavid Howells /* 4856b9b3514SDavid Howells * Attempt to clear the private state associated with a page when an error 4866b9b3514SDavid Howells * occurs that requires the cached contents of an inode to be written back or 4876b9b3514SDavid Howells * destroyed 488545db45fSDavid Howells * - Called if either PG_private or fscache is set on the page 4896b9b3514SDavid Howells * - Caller holds page lock 4906b9b3514SDavid Howells * - Return 0 if successful, -error otherwise 4916b9b3514SDavid Howells */ 492e3db7691STrond Myklebust static int nfs_launder_page(struct page *page) 493e3db7691STrond Myklebust { 494d56b4ddfSMel Gorman struct inode *inode = page_file_mapping(page)->host; 495545db45fSDavid Howells struct nfs_inode *nfsi = NFS_I(inode); 496b7eaefaaSChuck Lever 497b7eaefaaSChuck Lever dfprintk(PAGECACHE, "NFS: launder_page(%ld, %llu)\n", 498b7eaefaaSChuck Lever inode->i_ino, (long long)page_offset(page)); 499b7eaefaaSChuck Lever 500545db45fSDavid Howells nfs_fscache_wait_on_page_write(nfsi, page); 501c373fff7STrond Myklebust return nfs_wb_page(inode, page); 502cd52ed35STrond Myklebust } 503cd52ed35STrond Myklebust 504a564b8f0SMel Gorman static int nfs_swap_activate(struct swap_info_struct *sis, struct file *file, 505a564b8f0SMel Gorman sector_t *span) 506a564b8f0SMel Gorman { 507dad2b015SJeff Layton struct rpc_clnt *clnt = NFS_CLIENT(file->f_mapping->host); 508dad2b015SJeff Layton 509a564b8f0SMel Gorman *span = sis->pages; 510dad2b015SJeff Layton 5113c87ef6eSJeff Layton return rpc_clnt_swap_activate(clnt); 512a564b8f0SMel Gorman } 513a564b8f0SMel Gorman 514a564b8f0SMel Gorman static void nfs_swap_deactivate(struct file *file) 515a564b8f0SMel Gorman { 516dad2b015SJeff Layton struct rpc_clnt *clnt = NFS_CLIENT(file->f_mapping->host); 517dad2b015SJeff Layton 5183c87ef6eSJeff Layton rpc_clnt_swap_deactivate(clnt); 519a564b8f0SMel Gorman } 520a564b8f0SMel Gorman 521f5e54d6eSChristoph Hellwig const struct address_space_operations nfs_file_aops = { 5221da177e4SLinus Torvalds .readpage = nfs_readpage, 5231da177e4SLinus Torvalds .readpages = nfs_readpages, 5249cccef95STrond Myklebust .set_page_dirty = __set_page_dirty_nobuffers, 5251da177e4SLinus Torvalds .writepage = nfs_writepage, 5261da177e4SLinus Torvalds .writepages = nfs_writepages, 5274899f9c8SNick Piggin .write_begin = nfs_write_begin, 5284899f9c8SNick Piggin .write_end = nfs_write_end, 529cd52ed35STrond Myklebust .invalidatepage = nfs_invalidate_page, 530cd52ed35STrond Myklebust .releasepage = nfs_release_page, 5311da177e4SLinus Torvalds .direct_IO = nfs_direct_IO, 532f844cd0dSChao Yu #ifdef CONFIG_MIGRATION 533074cc1deSTrond Myklebust .migratepage = nfs_migrate_page, 534f844cd0dSChao Yu #endif 535e3db7691STrond Myklebust .launder_page = nfs_launder_page, 536f919b196SMel Gorman .is_dirty_writeback = nfs_check_dirty_writeback, 537f590f333SAndi Kleen .error_remove_page = generic_error_remove_page, 538a564b8f0SMel Gorman .swap_activate = nfs_swap_activate, 539a564b8f0SMel Gorman .swap_deactivate = nfs_swap_deactivate, 5401da177e4SLinus Torvalds }; 5411da177e4SLinus Torvalds 5426b9b3514SDavid Howells /* 5436b9b3514SDavid Howells * Notification that a PTE pointing to an NFS page is about to be made 5446b9b3514SDavid Howells * writable, implying that someone is about to modify the page through a 5456b9b3514SDavid Howells * shared-writable mapping 5466b9b3514SDavid Howells */ 54701a36844SSouptick Joarder static vm_fault_t nfs_vm_page_mkwrite(struct vm_fault *vmf) 54894387fb1STrond Myklebust { 549c2ec175cSNick Piggin struct page *page = vmf->page; 55011bac800SDave Jiang struct file *filp = vmf->vma->vm_file; 5516de1472fSAl Viro struct inode *inode = file_inode(filp); 55294387fb1STrond Myklebust unsigned pagelen; 55301a36844SSouptick Joarder vm_fault_t ret = VM_FAULT_NOPAGE; 5544899f9c8SNick Piggin struct address_space *mapping; 55594387fb1STrond Myklebust 5561e8968c5SNiels de Vos dfprintk(PAGECACHE, "NFS: vm_page_mkwrite(%pD2(%lu), offset %lld)\n", 5576de1472fSAl Viro filp, filp->f_mapping->host->i_ino, 558b7eaefaaSChuck Lever (long long)page_offset(page)); 559b7eaefaaSChuck Lever 5609a773e7cSTrond Myklebust sb_start_pagefault(inode->i_sb); 5619a773e7cSTrond Myklebust 562545db45fSDavid Howells /* make sure the cache has finished storing the page */ 5636de1472fSAl Viro nfs_fscache_wait_on_page_write(NFS_I(inode), page); 564545db45fSDavid Howells 565ef070dcbSTrond Myklebust wait_on_bit_action(&NFS_I(inode)->flags, NFS_INO_INVALIDATING, 566ef070dcbSTrond Myklebust nfs_wait_bit_killable, TASK_KILLABLE); 567ef070dcbSTrond Myklebust 56894387fb1STrond Myklebust lock_page(page); 569d56b4ddfSMel Gorman mapping = page_file_mapping(page); 5706de1472fSAl Viro if (mapping != inode->i_mapping) 5718b1f9ee5STrond Myklebust goto out_unlock; 5728b1f9ee5STrond Myklebust 5732aeb98f4STrond Myklebust wait_on_page_writeback(page); 5742aeb98f4STrond Myklebust 5754899f9c8SNick Piggin pagelen = nfs_page_length(page); 5768b1f9ee5STrond Myklebust if (pagelen == 0) 5778b1f9ee5STrond Myklebust goto out_unlock; 5788b1f9ee5STrond Myklebust 579bc4866b6STrond Myklebust ret = VM_FAULT_LOCKED; 580bc4866b6STrond Myklebust if (nfs_flush_incompatible(filp, page) == 0 && 581bc4866b6STrond Myklebust nfs_updatepage(filp, page, 0, pagelen) == 0) 582bc4866b6STrond Myklebust goto out; 5838b1f9ee5STrond Myklebust 584bc4866b6STrond Myklebust ret = VM_FAULT_SIGBUS; 5858b1f9ee5STrond Myklebust out_unlock: 5864899f9c8SNick Piggin unlock_page(page); 587bc4866b6STrond Myklebust out: 5889a773e7cSTrond Myklebust sb_end_pagefault(inode->i_sb); 589bc4866b6STrond Myklebust return ret; 59094387fb1STrond Myklebust } 59194387fb1STrond Myklebust 592f0f37e2fSAlexey Dobriyan static const struct vm_operations_struct nfs_file_vm_ops = { 59394387fb1STrond Myklebust .fault = filemap_fault, 594f1820361SKirill A. Shutemov .map_pages = filemap_map_pages, 59594387fb1STrond Myklebust .page_mkwrite = nfs_vm_page_mkwrite, 59694387fb1STrond Myklebust }; 59794387fb1STrond Myklebust 5987e94d6c4STrond Myklebust static int nfs_need_check_write(struct file *filp, struct inode *inode) 5997b159fc1STrond Myklebust { 6007b159fc1STrond Myklebust struct nfs_open_context *ctx; 6017b159fc1STrond Myklebust 602cd3758e3STrond Myklebust ctx = nfs_file_open_context(filp); 603dc24826bSAndy Adamson if (test_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags) || 604ce52914eSScott Mayhew nfs_ctx_key_to_expire(ctx, inode)) 6057b159fc1STrond Myklebust return 1; 6067b159fc1STrond Myklebust return 0; 6077b159fc1STrond Myklebust } 6087b159fc1STrond Myklebust 609edaf4369SAl Viro ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from) 6101da177e4SLinus Torvalds { 6116de1472fSAl Viro struct file *file = iocb->ki_filp; 6126de1472fSAl Viro struct inode *inode = file_inode(file); 6137e381172SChuck Lever unsigned long written = 0; 6141da177e4SLinus Torvalds ssize_t result; 6151da177e4SLinus Torvalds 6166de1472fSAl Viro result = nfs_key_timeout_notify(file, inode); 617dc24826bSAndy Adamson if (result) 618dc24826bSAndy Adamson return result; 619dc24826bSAndy Adamson 62089698b24STrond Myklebust if (iocb->ki_flags & IOCB_DIRECT) 62165a4a1caSAl Viro return nfs_file_direct_write(iocb, from); 6221da177e4SLinus Torvalds 623619d30b4SAl Viro dprintk("NFS: write(%pD2, %zu@%Ld)\n", 62418290650STrond Myklebust file, iov_iter_count(from), (long long) iocb->ki_pos); 6251da177e4SLinus Torvalds 6261da177e4SLinus Torvalds if (IS_SWAPFILE(inode)) 6271da177e4SLinus Torvalds goto out_swapfile; 6287d52e862STrond Myklebust /* 6297d52e862STrond Myklebust * O_APPEND implies that we must revalidate the file length. 6307d52e862STrond Myklebust */ 6312ba48ce5SAl Viro if (iocb->ki_flags & IOCB_APPEND) { 6326de1472fSAl Viro result = nfs_revalidate_file_size(inode, file); 6331da177e4SLinus Torvalds if (result) 6341da177e4SLinus Torvalds goto out; 635fe51beecSTrond Myklebust } 6366ba80d43SNeilBrown if (iocb->ki_pos > i_size_read(inode)) 6376ba80d43SNeilBrown nfs_revalidate_mapping(inode, file->f_mapping); 6381da177e4SLinus Torvalds 639a5864c99STrond Myklebust nfs_start_io_write(inode); 64018290650STrond Myklebust result = generic_write_checks(iocb, from); 64118290650STrond Myklebust if (result > 0) { 64218290650STrond Myklebust current->backing_dev_info = inode_to_bdi(inode); 64318290650STrond Myklebust result = generic_perform_write(file, from, iocb->ki_pos); 64418290650STrond Myklebust current->backing_dev_info = NULL; 64518290650STrond Myklebust } 646a5864c99STrond Myklebust nfs_end_io_write(inode); 64718290650STrond Myklebust if (result <= 0) 6481da177e4SLinus Torvalds goto out; 6491da177e4SLinus Torvalds 650c49edecdSTrond Myklebust written = result; 65118290650STrond Myklebust iocb->ki_pos += written; 652e973b1a5Starangg@amazon.com result = generic_write_sync(iocb, written); 653e973b1a5Starangg@amazon.com if (result < 0) 654e973b1a5Starangg@amazon.com goto out; 6557e381172SChuck Lever 6567e94d6c4STrond Myklebust /* Return error values */ 65718290650STrond Myklebust if (nfs_need_check_write(file, inode)) { 6586de1472fSAl Viro int err = vfs_fsync(file, 0); 659200baa21STrond Myklebust if (err < 0) 660200baa21STrond Myklebust result = err; 661200baa21STrond Myklebust } 6627e381172SChuck Lever nfs_add_stats(inode, NFSIOS_NORMALWRITTENBYTES, written); 6631da177e4SLinus Torvalds out: 6641da177e4SLinus Torvalds return result; 6651da177e4SLinus Torvalds 6661da177e4SLinus Torvalds out_swapfile: 6671da177e4SLinus Torvalds printk(KERN_INFO "NFS: attempt to write to active swap file!\n"); 66818290650STrond Myklebust return -EBUSY; 6691da177e4SLinus Torvalds } 67089d77c8fSBryan Schumaker EXPORT_SYMBOL_GPL(nfs_file_write); 6711da177e4SLinus Torvalds 6725eebde23SSuresh Jayaraman static int 6735eebde23SSuresh Jayaraman do_getlk(struct file *filp, int cmd, struct file_lock *fl, int is_local) 6741da177e4SLinus Torvalds { 6751da177e4SLinus Torvalds struct inode *inode = filp->f_mapping->host; 6761da177e4SLinus Torvalds int status = 0; 67721ac19d4SSergey Vlasov unsigned int saved_type = fl->fl_type; 6781da177e4SLinus Torvalds 679039c4d7aSTrond Myklebust /* Try local locking first */ 6806d34ac19SJ. Bruce Fields posix_test_lock(filp, fl); 6816d34ac19SJ. Bruce Fields if (fl->fl_type != F_UNLCK) { 6826d34ac19SJ. Bruce Fields /* found a conflict */ 683039c4d7aSTrond Myklebust goto out; 6841da177e4SLinus Torvalds } 68521ac19d4SSergey Vlasov fl->fl_type = saved_type; 686039c4d7aSTrond Myklebust 687011e2a7fSBryan Schumaker if (NFS_PROTO(inode)->have_delegation(inode, FMODE_READ)) 688039c4d7aSTrond Myklebust goto out_noconflict; 689039c4d7aSTrond Myklebust 6905eebde23SSuresh Jayaraman if (is_local) 691039c4d7aSTrond Myklebust goto out_noconflict; 692039c4d7aSTrond Myklebust 693039c4d7aSTrond Myklebust status = NFS_PROTO(inode)->lock(filp, cmd, fl); 694039c4d7aSTrond Myklebust out: 6951da177e4SLinus Torvalds return status; 696039c4d7aSTrond Myklebust out_noconflict: 697039c4d7aSTrond Myklebust fl->fl_type = F_UNLCK; 698039c4d7aSTrond Myklebust goto out; 6991da177e4SLinus Torvalds } 7001da177e4SLinus Torvalds 7015eebde23SSuresh Jayaraman static int 7025eebde23SSuresh Jayaraman do_unlk(struct file *filp, int cmd, struct file_lock *fl, int is_local) 7031da177e4SLinus Torvalds { 7041da177e4SLinus Torvalds struct inode *inode = filp->f_mapping->host; 7057a8203d8STrond Myklebust struct nfs_lock_context *l_ctx; 7061da177e4SLinus Torvalds int status; 7071da177e4SLinus Torvalds 7081da177e4SLinus Torvalds /* 7091da177e4SLinus Torvalds * Flush all pending writes before doing anything 7101da177e4SLinus Torvalds * with locks.. 7111da177e4SLinus Torvalds */ 712d9dabc1aSTrond Myklebust vfs_fsync(filp, 0); 7131da177e4SLinus Torvalds 7147a8203d8STrond Myklebust l_ctx = nfs_get_lock_context(nfs_file_open_context(filp)); 7157a8203d8STrond Myklebust if (!IS_ERR(l_ctx)) { 716210c7c17SBenjamin Coddington status = nfs_iocounter_wait(l_ctx); 7177a8203d8STrond Myklebust nfs_put_lock_context(l_ctx); 7181da177e4SLinus Torvalds /* NOTE: special case 7191da177e4SLinus Torvalds * If we're signalled while cleaning up locks on process exit, we 7201da177e4SLinus Torvalds * still need to complete the unlock. 7211da177e4SLinus Torvalds */ 722f30cb757SBenjamin Coddington if (status < 0 && !(fl->fl_flags & FL_CLOSE)) 723f30cb757SBenjamin Coddington return status; 724f30cb757SBenjamin Coddington } 725f30cb757SBenjamin Coddington 7265eebde23SSuresh Jayaraman /* 7275eebde23SSuresh Jayaraman * Use local locking if mounted with "-onolock" or with appropriate 7285eebde23SSuresh Jayaraman * "-olocal_lock=" 7295eebde23SSuresh Jayaraman */ 7305eebde23SSuresh Jayaraman if (!is_local) 7311da177e4SLinus Torvalds status = NFS_PROTO(inode)->lock(filp, cmd, fl); 7321da177e4SLinus Torvalds else 73375575ddfSJeff Layton status = locks_lock_file_wait(filp, fl); 7341da177e4SLinus Torvalds return status; 7351da177e4SLinus Torvalds } 7361da177e4SLinus Torvalds 7375eebde23SSuresh Jayaraman static int 7385eebde23SSuresh Jayaraman do_setlk(struct file *filp, int cmd, struct file_lock *fl, int is_local) 7391da177e4SLinus Torvalds { 7401da177e4SLinus Torvalds struct inode *inode = filp->f_mapping->host; 7411da177e4SLinus Torvalds int status; 7421da177e4SLinus Torvalds 7431da177e4SLinus Torvalds /* 7441da177e4SLinus Torvalds * Flush all pending writes before doing anything 7451da177e4SLinus Torvalds * with locks.. 7461da177e4SLinus Torvalds */ 74729884df0STrond Myklebust status = nfs_sync_mapping(filp->f_mapping); 74829884df0STrond Myklebust if (status != 0) 7491da177e4SLinus Torvalds goto out; 7501da177e4SLinus Torvalds 7515eebde23SSuresh Jayaraman /* 7525eebde23SSuresh Jayaraman * Use local locking if mounted with "-onolock" or with appropriate 7535eebde23SSuresh Jayaraman * "-olocal_lock=" 7545eebde23SSuresh Jayaraman */ 7555eebde23SSuresh Jayaraman if (!is_local) 7561da177e4SLinus Torvalds status = NFS_PROTO(inode)->lock(filp, cmd, fl); 757c4d7c402STrond Myklebust else 75875575ddfSJeff Layton status = locks_lock_file_wait(filp, fl); 7591da177e4SLinus Torvalds if (status < 0) 7601da177e4SLinus Torvalds goto out; 7616b96724eSRicardo Labiaga 7621da177e4SLinus Torvalds /* 763779eafabSNeilBrown * Invalidate cache to prevent missing any changes. If 764779eafabSNeilBrown * the file is mapped, clear the page cache as well so 765779eafabSNeilBrown * those mappings will be loaded. 7666b96724eSRicardo Labiaga * 7671da177e4SLinus Torvalds * This makes locking act as a cache coherency point. 7681da177e4SLinus Torvalds */ 76929884df0STrond Myklebust nfs_sync_mapping(filp->f_mapping); 770779eafabSNeilBrown if (!NFS_PROTO(inode)->have_delegation(inode, FMODE_READ)) { 771442ce049SNeilBrown nfs_zap_caches(inode); 772779eafabSNeilBrown if (mapping_mapped(filp->f_mapping)) 773779eafabSNeilBrown nfs_revalidate_mapping(inode, filp->f_mapping); 774779eafabSNeilBrown } 7751da177e4SLinus Torvalds out: 7761da177e4SLinus Torvalds return status; 7771da177e4SLinus Torvalds } 7781da177e4SLinus Torvalds 7791da177e4SLinus Torvalds /* 7801da177e4SLinus Torvalds * Lock a (portion of) a file 7811da177e4SLinus Torvalds */ 782ce4ef7c0SBryan Schumaker int nfs_lock(struct file *filp, int cmd, struct file_lock *fl) 7831da177e4SLinus Torvalds { 7841da177e4SLinus Torvalds struct inode *inode = filp->f_mapping->host; 7852116271aSTrond Myklebust int ret = -ENOLCK; 7865eebde23SSuresh Jayaraman int is_local = 0; 7871da177e4SLinus Torvalds 7886de1472fSAl Viro dprintk("NFS: lock(%pD2, t=%x, fl=%x, r=%lld:%lld)\n", 7896de1472fSAl Viro filp, fl->fl_type, fl->fl_flags, 7901da177e4SLinus Torvalds (long long)fl->fl_start, (long long)fl->fl_end); 7916da24bc9SChuck Lever 79291d5b470SChuck Lever nfs_inc_stats(inode, NFSIOS_VFSLOCK); 7931da177e4SLinus Torvalds 7941da177e4SLinus Torvalds /* No mandatory locks over NFS */ 795dfad9441SPavel Emelyanov if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK) 7962116271aSTrond Myklebust goto out_err; 7972116271aSTrond Myklebust 7985eebde23SSuresh Jayaraman if (NFS_SERVER(inode)->flags & NFS_MOUNT_LOCAL_FCNTL) 7995eebde23SSuresh Jayaraman is_local = 1; 8005eebde23SSuresh Jayaraman 8012116271aSTrond Myklebust if (NFS_PROTO(inode)->lock_check_bounds != NULL) { 8022116271aSTrond Myklebust ret = NFS_PROTO(inode)->lock_check_bounds(fl); 8032116271aSTrond Myklebust if (ret < 0) 8042116271aSTrond Myklebust goto out_err; 8052116271aSTrond Myklebust } 8061da177e4SLinus Torvalds 8071da177e4SLinus Torvalds if (IS_GETLK(cmd)) 8085eebde23SSuresh Jayaraman ret = do_getlk(filp, cmd, fl, is_local); 8092116271aSTrond Myklebust else if (fl->fl_type == F_UNLCK) 8105eebde23SSuresh Jayaraman ret = do_unlk(filp, cmd, fl, is_local); 8112116271aSTrond Myklebust else 8125eebde23SSuresh Jayaraman ret = do_setlk(filp, cmd, fl, is_local); 8132116271aSTrond Myklebust out_err: 8142116271aSTrond Myklebust return ret; 8151da177e4SLinus Torvalds } 81689d77c8fSBryan Schumaker EXPORT_SYMBOL_GPL(nfs_lock); 8171da177e4SLinus Torvalds 8181da177e4SLinus Torvalds /* 8191da177e4SLinus Torvalds * Lock a (portion of) a file 8201da177e4SLinus Torvalds */ 821ce4ef7c0SBryan Schumaker int nfs_flock(struct file *filp, int cmd, struct file_lock *fl) 8221da177e4SLinus Torvalds { 8235eebde23SSuresh Jayaraman struct inode *inode = filp->f_mapping->host; 8245eebde23SSuresh Jayaraman int is_local = 0; 8255eebde23SSuresh Jayaraman 8266de1472fSAl Viro dprintk("NFS: flock(%pD2, t=%x, fl=%x)\n", 8276de1472fSAl Viro filp, fl->fl_type, fl->fl_flags); 8281da177e4SLinus Torvalds 8291da177e4SLinus Torvalds if (!(fl->fl_flags & FL_FLOCK)) 8301da177e4SLinus Torvalds return -ENOLCK; 8311da177e4SLinus Torvalds 832ad0fcd4eSJeff Layton /* 833ad0fcd4eSJeff Layton * The NFSv4 protocol doesn't support LOCK_MAND, which is not part of 834ad0fcd4eSJeff Layton * any standard. In principle we might be able to support LOCK_MAND 835ad0fcd4eSJeff Layton * on NFSv2/3 since NLMv3/4 support DOS share modes, but for now the 836ad0fcd4eSJeff Layton * NFS code is not set up for it. 837ad0fcd4eSJeff Layton */ 838ad0fcd4eSJeff Layton if (fl->fl_type & LOCK_MAND) 839ad0fcd4eSJeff Layton return -EINVAL; 840ad0fcd4eSJeff Layton 8415eebde23SSuresh Jayaraman if (NFS_SERVER(inode)->flags & NFS_MOUNT_LOCAL_FLOCK) 8425eebde23SSuresh Jayaraman is_local = 1; 8435eebde23SSuresh Jayaraman 844fcfa4470SBenjamin Coddington /* We're simulating flock() locks using posix locks on the server */ 845fcfa4470SBenjamin Coddington if (fl->fl_type == F_UNLCK) 8465eebde23SSuresh Jayaraman return do_unlk(filp, cmd, fl, is_local); 8475eebde23SSuresh Jayaraman return do_setlk(filp, cmd, fl, is_local); 8481da177e4SLinus Torvalds } 84989d77c8fSBryan Schumaker EXPORT_SYMBOL_GPL(nfs_flock); 850370f6599SJ. Bruce Fields 8510486958fSJeff Layton const struct file_operations nfs_file_operations = { 8520486958fSJeff Layton .llseek = nfs_file_llseek, 8533aa2d199SAl Viro .read_iter = nfs_file_read, 854edaf4369SAl Viro .write_iter = nfs_file_write, 8550486958fSJeff Layton .mmap = nfs_file_mmap, 8560486958fSJeff Layton .open = nfs_file_open, 8570486958fSJeff Layton .flush = nfs_file_flush, 8580486958fSJeff Layton .release = nfs_file_release, 8590486958fSJeff Layton .fsync = nfs_file_fsync, 8600486958fSJeff Layton .lock = nfs_lock, 8610486958fSJeff Layton .flock = nfs_flock, 86282c156f8SAl Viro .splice_read = generic_file_splice_read, 8634da54c21SAl Viro .splice_write = iter_file_splice_write, 8640486958fSJeff Layton .check_flags = nfs_check_flags, 8651c994a09SJeff Layton .setlease = simple_nosetlease, 8660486958fSJeff Layton }; 867ddda8e0aSBryan Schumaker EXPORT_SYMBOL_GPL(nfs_file_operations); 868