1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds * linux/fs/nfs/file.c
41da177e4SLinus Torvalds *
51da177e4SLinus Torvalds * Copyright (C) 1992 Rick Sladkey
61da177e4SLinus Torvalds *
71da177e4SLinus Torvalds * Changes Copyright (C) 1994 by Florian La Roche
81da177e4SLinus Torvalds * - Do not copy data too often around in the kernel.
91da177e4SLinus Torvalds * - In nfs_file_read the return value of kmalloc wasn't checked.
101da177e4SLinus Torvalds * - Put in a better version of read look-ahead buffering. Original idea
111da177e4SLinus Torvalds * and implementation by Wai S Kok elekokws@ee.nus.sg.
121da177e4SLinus Torvalds *
131da177e4SLinus Torvalds * Expire cache on write to a file by Wai S Kok (Oct 1994).
141da177e4SLinus Torvalds *
151da177e4SLinus Torvalds * Total rewrite of read side for new NFS buffer cache.. Linus.
161da177e4SLinus Torvalds *
171da177e4SLinus Torvalds * nfs regular file handling functions
181da177e4SLinus Torvalds */
191da177e4SLinus Torvalds
20ddda8e0aSBryan Schumaker #include <linux/module.h>
211da177e4SLinus Torvalds #include <linux/time.h>
221da177e4SLinus Torvalds #include <linux/kernel.h>
231da177e4SLinus Torvalds #include <linux/errno.h>
241da177e4SLinus Torvalds #include <linux/fcntl.h>
251da177e4SLinus Torvalds #include <linux/stat.h>
261da177e4SLinus Torvalds #include <linux/nfs_fs.h>
271da177e4SLinus Torvalds #include <linux/nfs_mount.h>
281da177e4SLinus Torvalds #include <linux/mm.h>
291da177e4SLinus Torvalds #include <linux/pagemap.h>
305a0e3ad6STejun Heo #include <linux/gfp.h>
31b608b283STrond Myklebust #include <linux/swap.h>
32*ab0727d6SMike Snitzer #include <linux/compaction.h>
331da177e4SLinus Torvalds
347c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
355970e15dSJeff Layton #include <linux/filelock.h>
361da177e4SLinus Torvalds
371da177e4SLinus Torvalds #include "delegation.h"
3894387fb1STrond Myklebust #include "internal.h"
3991d5b470SChuck Lever #include "iostat.h"
40545db45fSDavid Howells #include "fscache.h"
41612aa983SChristoph Hellwig #include "pnfs.h"
421da177e4SLinus Torvalds
43f4ce1299STrond Myklebust #include "nfstrace.h"
44f4ce1299STrond Myklebust
451da177e4SLinus Torvalds #define NFSDBG_FACILITY NFSDBG_FILE
461da177e4SLinus Torvalds
47f0f37e2fSAlexey Dobriyan static const struct vm_operations_struct nfs_file_vm_ops;
nfs_check_flags(int flags)4894387fb1STrond Myklebust
49ce4ef7c0SBryan Schumaker int nfs_check_flags(int flags)
501da177e4SLinus Torvalds {
511da177e4SLinus Torvalds if ((flags & (O_APPEND | O_DIRECT)) == (O_APPEND | O_DIRECT))
521da177e4SLinus Torvalds return -EINVAL;
531da177e4SLinus Torvalds
541da177e4SLinus Torvalds return 0;
551da177e4SLinus Torvalds }
5689d77c8fSBryan Schumaker EXPORT_SYMBOL_GPL(nfs_check_flags);
571da177e4SLinus Torvalds
581da177e4SLinus Torvalds /*
591da177e4SLinus Torvalds * Open file
601da177e4SLinus Torvalds */
nfs_file_open(struct inode * inode,struct file * filp)611da177e4SLinus Torvalds static int
621da177e4SLinus Torvalds nfs_file_open(struct inode *inode, struct file *filp)
631da177e4SLinus Torvalds {
641da177e4SLinus Torvalds int res;
651da177e4SLinus Torvalds
666de1472fSAl Viro dprintk("NFS: open file(%pD2)\n", filp);
67cc0dd2d1SChuck Lever
68c2459dc4SChuck Lever nfs_inc_stats(inode, NFSIOS_VFSOPEN);
691da177e4SLinus Torvalds res = nfs_check_flags(filp->f_flags);
701da177e4SLinus Torvalds if (res)
711da177e4SLinus Torvalds return res;
721da177e4SLinus Torvalds
7346cb650cSTrond Myklebust res = nfs_open(inode, filp);
74a2ad63daSNeilBrown if (res == 0)
75a2ad63daSNeilBrown filp->f_mode |= FMODE_CAN_ODIRECT;
761da177e4SLinus Torvalds return res;
771da177e4SLinus Torvalds }
781da177e4SLinus Torvalds
nfs_file_release(struct inode * inode,struct file * filp)79ce4ef7c0SBryan Schumaker int
801da177e4SLinus Torvalds nfs_file_release(struct inode *inode, struct file *filp)
811da177e4SLinus Torvalds {
826de1472fSAl Viro dprintk("NFS: release(%pD2)\n", filp);
836da24bc9SChuck Lever
8491d5b470SChuck Lever nfs_inc_stats(inode, NFSIOS_VFSRELEASE);
85aff8d8dcSAnna Schumaker nfs_file_clear_open_context(filp);
86a6b5a28eSDave Wysochanski nfs_fscache_release_file(inode, filp);
87aff8d8dcSAnna Schumaker return 0;
881da177e4SLinus Torvalds }
8989d77c8fSBryan Schumaker EXPORT_SYMBOL_GPL(nfs_file_release);
901da177e4SLinus Torvalds
91980802e3STrond Myklebust /**
9237eaeed1STrond Myklebust * nfs_revalidate_file_size - Revalidate the file size
93302fad7bSTrond Myklebust * @inode: pointer to inode struct
94302fad7bSTrond Myklebust * @filp: pointer to struct file
95980802e3STrond Myklebust *
96980802e3STrond Myklebust * Revalidates the file length. This is basically a wrapper around
97980802e3STrond Myklebust * nfs_revalidate_inode() that takes into account the fact that we may
98980802e3STrond Myklebust * have cached writes (in which case we don't care about the server's
99980802e3STrond Myklebust * idea of what the file length is), or O_DIRECT (in which case we
100980802e3STrond Myklebust * shouldn't trust the cache).
nfs_revalidate_file_size(struct inode * inode,struct file * filp)101980802e3STrond Myklebust */
102980802e3STrond Myklebust static int nfs_revalidate_file_size(struct inode *inode, struct file *filp)
103980802e3STrond Myklebust {
104980802e3STrond Myklebust struct nfs_server *server = NFS_SERVER(inode);
105d7cf8dd0STrond Myklebust
106980802e3STrond Myklebust if (filp->f_flags & O_DIRECT)
107980802e3STrond Myklebust goto force_reval;
10813c0b082STrond Myklebust if (nfs_check_cache_invalid(inode, NFS_INO_INVALID_SIZE))
109d7cf8dd0STrond Myklebust goto force_reval;
110fe51beecSTrond Myklebust return 0;
111980802e3STrond Myklebust force_reval:
112980802e3STrond Myklebust return __nfs_revalidate_inode(server, inode);
113980802e3STrond Myklebust }
nfs_file_llseek(struct file * filp,loff_t offset,int whence)114980802e3STrond Myklebust
115965c8e59SAndrew Morton loff_t nfs_file_llseek(struct file *filp, loff_t offset, int whence)
116980802e3STrond Myklebust {
1176de1472fSAl Viro dprintk("NFS: llseek file(%pD2, %lld, %d)\n",
1186de1472fSAl Viro filp, offset, whence);
119b84e06c5SChuck Lever
12006222e49SJosef Bacik /*
121965c8e59SAndrew Morton * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
12206222e49SJosef Bacik * the cached file length
12306222e49SJosef Bacik */
124965c8e59SAndrew Morton if (whence != SEEK_SET && whence != SEEK_CUR) {
125980802e3STrond Myklebust struct inode *inode = filp->f_mapping->host;
126d5e66348STrond Myklebust
127980802e3STrond Myklebust int retval = nfs_revalidate_file_size(inode, filp);
128980802e3STrond Myklebust if (retval < 0)
129980802e3STrond Myklebust return (loff_t)retval;
13079835a71SAndi Kleen }
131d5e66348STrond Myklebust
132965c8e59SAndrew Morton return generic_file_llseek(filp, offset, whence);
133980802e3STrond Myklebust }
13489d77c8fSBryan Schumaker EXPORT_SYMBOL_GPL(nfs_file_llseek);
135980802e3STrond Myklebust
1361da177e4SLinus Torvalds /*
1371da177e4SLinus Torvalds * Flush all dirty pages, and check for write errors.
1381da177e4SLinus Torvalds */
nfs_file_flush(struct file * file,fl_owner_t id)1395445b1fbSTrond Myklebust static int
14075e1fcc0SMiklos Szeredi nfs_file_flush(struct file *file, fl_owner_t id)
1411da177e4SLinus Torvalds {
1426de1472fSAl Viro struct inode *inode = file_inode(file);
14367dd23f9SScott Mayhew errseq_t since;
1441da177e4SLinus Torvalds
1456de1472fSAl Viro dprintk("NFS: flush(%pD2)\n", file);
1461da177e4SLinus Torvalds
147c2459dc4SChuck Lever nfs_inc_stats(inode, NFSIOS_VFSFLUSH);
1481da177e4SLinus Torvalds if ((file->f_mode & FMODE_WRITE) == 0)
1491da177e4SLinus Torvalds return 0;
1507b159fc1STrond Myklebust
1517fe5c398STrond Myklebust /* Flush writes to the server and return any errors */
15267dd23f9SScott Mayhew since = filemap_sample_wb_err(file->f_mapping);
15367dd23f9SScott Mayhew nfs_wb_all(inode);
15467dd23f9SScott Mayhew return filemap_check_wb_err(file->f_mapping, since);
1551da177e4SLinus Torvalds }
1561da177e4SLinus Torvalds
nfs_file_read(struct kiocb * iocb,struct iov_iter * to)157ce4ef7c0SBryan Schumaker ssize_t
1583aa2d199SAl Viro nfs_file_read(struct kiocb *iocb, struct iov_iter *to)
1591da177e4SLinus Torvalds {
1606de1472fSAl Viro struct inode *inode = file_inode(iocb->ki_filp);
1611da177e4SLinus Torvalds ssize_t result;
1621da177e4SLinus Torvalds
1632ba48ce5SAl Viro if (iocb->ki_flags & IOCB_DIRECT)
16464158668SNeilBrown return nfs_file_direct_read(iocb, to, false);
1651da177e4SLinus Torvalds
166619d30b4SAl Viro dprintk("NFS: read(%pD2, %zu@%lu)\n",
1676de1472fSAl Viro iocb->ki_filp,
1683aa2d199SAl Viro iov_iter_count(to), (unsigned long) iocb->ki_pos);
1691da177e4SLinus Torvalds
170a5864c99STrond Myklebust nfs_start_io_read(inode);
171a5864c99STrond Myklebust result = nfs_revalidate_mapping(inode, iocb->ki_filp->f_mapping);
1724184dcf2SChuck Lever if (!result) {
1733aa2d199SAl Viro result = generic_file_read_iter(iocb, to);
1744184dcf2SChuck Lever if (result > 0)
1754184dcf2SChuck Lever nfs_add_stats(inode, NFSIOS_NORMALREADBYTES, result);
1764184dcf2SChuck Lever }
177a5864c99STrond Myklebust nfs_end_io_read(inode);
1781da177e4SLinus Torvalds return result;
1791da177e4SLinus Torvalds }
18089d77c8fSBryan Schumaker EXPORT_SYMBOL_GPL(nfs_file_read);
1811da177e4SLinus Torvalds
nfs_file_splice_read(struct file * in,loff_t * ppos,struct pipe_inode_info * pipe,size_t len,unsigned int flags)182a7db5034SDavid Howells ssize_t
183a7db5034SDavid Howells nfs_file_splice_read(struct file *in, loff_t *ppos, struct pipe_inode_info *pipe,
184a7db5034SDavid Howells size_t len, unsigned int flags)
185a7db5034SDavid Howells {
186a7db5034SDavid Howells struct inode *inode = file_inode(in);
187a7db5034SDavid Howells ssize_t result;
188a7db5034SDavid Howells
189a7db5034SDavid Howells dprintk("NFS: splice_read(%pD2, %zu@%llu)\n", in, len, *ppos);
190a7db5034SDavid Howells
191a7db5034SDavid Howells nfs_start_io_read(inode);
192a7db5034SDavid Howells result = nfs_revalidate_mapping(inode, in->f_mapping);
193a7db5034SDavid Howells if (!result) {
194a7db5034SDavid Howells result = filemap_splice_read(in, ppos, pipe, len, flags);
195a7db5034SDavid Howells if (result > 0)
196a7db5034SDavid Howells nfs_add_stats(inode, NFSIOS_NORMALREADBYTES, result);
197a7db5034SDavid Howells }
198a7db5034SDavid Howells nfs_end_io_read(inode);
199a7db5034SDavid Howells return result;
200a7db5034SDavid Howells }
201a7db5034SDavid Howells EXPORT_SYMBOL_GPL(nfs_file_splice_read);
202a7db5034SDavid Howells
nfs_file_mmap(struct file * file,struct vm_area_struct * vma)203ce4ef7c0SBryan Schumaker int
2041da177e4SLinus Torvalds nfs_file_mmap(struct file *file, struct vm_area_struct *vma)
2051da177e4SLinus Torvalds {
2066de1472fSAl Viro struct inode *inode = file_inode(file);
2071da177e4SLinus Torvalds int status;
2081da177e4SLinus Torvalds
2096de1472fSAl Viro dprintk("NFS: mmap(%pD2)\n", file);
2101da177e4SLinus Torvalds
211e1ebfd33STrond Myklebust /* Note: generic_file_mmap() returns ENOSYS on nommu systems
212e1ebfd33STrond Myklebust * so we call that before revalidating the mapping
213e1ebfd33STrond Myklebust */
214e1ebfd33STrond Myklebust status = generic_file_mmap(file, vma);
21594387fb1STrond Myklebust if (!status) {
21694387fb1STrond Myklebust vma->vm_ops = &nfs_file_vm_ops;
217e1ebfd33STrond Myklebust status = nfs_revalidate_mapping(inode, file->f_mapping);
21894387fb1STrond Myklebust }
2191da177e4SLinus Torvalds return status;
2201da177e4SLinus Torvalds }
22189d77c8fSBryan Schumaker EXPORT_SYMBOL_GPL(nfs_file_mmap);
2221da177e4SLinus Torvalds
2231da177e4SLinus Torvalds /*
2241da177e4SLinus Torvalds * Flush any dirty pages for this process, and check for write errors.
2251da177e4SLinus Torvalds * The return status from this call provides a reliable indication of
2261da177e4SLinus Torvalds * whether any write errors occurred for this process.
2271da177e4SLinus Torvalds */
nfs_file_fsync_commit(struct file * file,int datasync)2284ff79bc7SChristoph Hellwig static int
229bf4b4905SNeilBrown nfs_file_fsync_commit(struct file *file, int datasync)
2301da177e4SLinus Torvalds {
2316de1472fSAl Viro struct inode *inode = file_inode(file);
2329641d9bcSTrond Myklebust int ret, ret2;
233af7fa165STrond Myklebust
2346de1472fSAl Viro dprintk("NFS: fsync file(%pD2) datasync %d\n", file, datasync);
2351da177e4SLinus Torvalds
23691d5b470SChuck Lever nfs_inc_stats(inode, NFSIOS_VFSFSYNC);
2372197e9b0STrond Myklebust ret = nfs_commit_inode(inode, FLUSH_SYNC);
2389641d9bcSTrond Myklebust ret2 = file_check_and_advance_wb_err(file);
2399641d9bcSTrond Myklebust if (ret2 < 0)
2409641d9bcSTrond Myklebust return ret2;
241a5c58892SBryan Schumaker return ret;
242a5c58892SBryan Schumaker }
243a5c58892SBryan Schumaker
nfs_file_fsync(struct file * file,loff_t start,loff_t end,int datasync)2444ff79bc7SChristoph Hellwig int
245a5c58892SBryan Schumaker nfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync)
246a5c58892SBryan Schumaker {
247496ad9aaSAl Viro struct inode *inode = file_inode(file);
24867f4b5dcSTrond Myklebust struct nfs_inode *nfsi = NFS_I(inode);
24967f4b5dcSTrond Myklebust long save_nredirtied = atomic_long_read(&nfsi->redirtied_pages);
25067f4b5dcSTrond Myklebust long nredirtied;
2512197e9b0STrond Myklebust int ret;
252a5c58892SBryan Schumaker
253f4ce1299STrond Myklebust trace_nfs_fsync_enter(inode);
254f4ce1299STrond Myklebust
2552197e9b0STrond Myklebust for (;;) {
2566fbda89bSTrond Myklebust ret = file_write_and_wait_range(file, start, end);
2577b281ee0STrond Myklebust if (ret != 0)
25805990d1bSTrond Myklebust break;
259bf4b4905SNeilBrown ret = nfs_file_fsync_commit(file, datasync);
2602197e9b0STrond Myklebust if (ret != 0)
2612197e9b0STrond Myklebust break;
2624ff79bc7SChristoph Hellwig ret = pnfs_sync_inode(inode, !!datasync);
2632197e9b0STrond Myklebust if (ret != 0)
2642197e9b0STrond Myklebust break;
26567f4b5dcSTrond Myklebust nredirtied = atomic_long_read(&nfsi->redirtied_pages);
26667f4b5dcSTrond Myklebust if (nredirtied == save_nredirtied)
2672197e9b0STrond Myklebust break;
26867f4b5dcSTrond Myklebust save_nredirtied = nredirtied;
2692197e9b0STrond Myklebust }
27005990d1bSTrond Myklebust
271f4ce1299STrond Myklebust trace_nfs_fsync_exit(inode, ret);
272af7fa165STrond Myklebust return ret;
2731da177e4SLinus Torvalds }
2744ff79bc7SChristoph Hellwig EXPORT_SYMBOL_GPL(nfs_file_fsync);
2751da177e4SLinus Torvalds
2761da177e4SLinus Torvalds /*
27738c73044SPeter Staubach * Decide whether a read/modify/write cycle may be more efficient
27838c73044SPeter Staubach * then a modify/write/read cycle when writing to a page in the
27938c73044SPeter Staubach * page cache.
28038c73044SPeter Staubach *
2812cde04e9SKazuo Ito * Some pNFS layout drivers can only read/write at a certain block
2822cde04e9SKazuo Ito * granularity like all block devices and therefore we must perform
2832cde04e9SKazuo Ito * read/modify/write whenever a page hasn't read yet and the data
2842cde04e9SKazuo Ito * to be written there is not aligned to a block boundary and/or
2852cde04e9SKazuo Ito * smaller than the block size.
2862cde04e9SKazuo Ito *
28738c73044SPeter Staubach * The modify/write/read cycle may occur if a page is read before
28838c73044SPeter Staubach * being completely filled by the writer. In this situation, the
28938c73044SPeter Staubach * page must be completely written to stable storage on the server
29038c73044SPeter Staubach * before it can be refilled by reading in the page from the server.
29138c73044SPeter Staubach * This can lead to expensive, small, FILE_SYNC mode writes being
29238c73044SPeter Staubach * done.
29338c73044SPeter Staubach *
29438c73044SPeter Staubach * It may be more efficient to read the page first if the file is
29538c73044SPeter Staubach * open for reading in addition to writing, the page is not marked
29638c73044SPeter Staubach * as Uptodate, it is not dirty or waiting to be committed,
29738c73044SPeter Staubach * indicating that it was previously allocated and then modified,
29838c73044SPeter Staubach * that there were valid bytes of data in that range of the file,
29938c73044SPeter Staubach * and that the new data won't completely replace the old data in
30038c73044SPeter Staubach * that range of the file.
nfs_folio_is_full_write(struct folio * folio,loff_t pos,unsigned int len)30138c73044SPeter Staubach */
30254d99381STrond Myklebust static bool nfs_folio_is_full_write(struct folio *folio, loff_t pos,
30354d99381STrond Myklebust unsigned int len)
30438c73044SPeter Staubach {
30554d99381STrond Myklebust unsigned int pglen = nfs_folio_length(folio);
30654d99381STrond Myklebust unsigned int offset = offset_in_folio(folio, pos);
30738c73044SPeter Staubach unsigned int end = offset + len;
30838c73044SPeter Staubach
3092cde04e9SKazuo Ito return !pglen || (end >= pglen && !offset);
310612aa983SChristoph Hellwig }
nfs_want_read_modify_write(struct file * file,struct folio * folio,loff_t pos,unsigned int len)311612aa983SChristoph Hellwig
31254d99381STrond Myklebust static bool nfs_want_read_modify_write(struct file *file, struct folio *folio,
3132cde04e9SKazuo Ito loff_t pos, unsigned int len)
3142cde04e9SKazuo Ito {
3152cde04e9SKazuo Ito /*
3162cde04e9SKazuo Ito * Up-to-date pages, those with ongoing or full-page write
3172cde04e9SKazuo Ito * don't need read/modify/write
3182cde04e9SKazuo Ito */
31954d99381STrond Myklebust if (folio_test_uptodate(folio) || folio_test_private(folio) ||
32054d99381STrond Myklebust nfs_folio_is_full_write(folio, pos, len))
3212cde04e9SKazuo Ito return false;
3222cde04e9SKazuo Ito
32354d99381STrond Myklebust if (pnfs_ld_read_whole_page(file_inode(file)))
3242cde04e9SKazuo Ito return true;
3252cde04e9SKazuo Ito /* Open for reading too? */
3262cde04e9SKazuo Ito if (file->f_mode & FMODE_READ)
3272cde04e9SKazuo Ito return true;
3282cde04e9SKazuo Ito return false;
32938c73044SPeter Staubach }
33038c73044SPeter Staubach
33138c73044SPeter Staubach /*
3324899f9c8SNick Piggin * This does the "real" work of the write. We must allocate and lock the
3334899f9c8SNick Piggin * page to be sent back to the generic routine, which then copies the
3344899f9c8SNick Piggin * data from user space.
3351da177e4SLinus Torvalds *
3361da177e4SLinus Torvalds * If the writer ends up delaying the write, the writer needs to
3371da177e4SLinus Torvalds * increment the page use counts until he is done with the page.
nfs_write_begin(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,struct page ** pagep,void ** fsdata)3381da177e4SLinus Torvalds */
3394899f9c8SNick Piggin static int nfs_write_begin(struct file *file, struct address_space *mapping,
34054d99381STrond Myklebust loff_t pos, unsigned len, struct page **pagep,
34154d99381STrond Myklebust void **fsdata)
3421da177e4SLinus Torvalds {
3430c493b5cSTrond Myklebust struct folio *folio;
34438c73044SPeter Staubach int once_thru = 0;
34554d99381STrond Myklebust int ret;
3464899f9c8SNick Piggin
3471e8968c5SNiels de Vos dfprintk(PAGECACHE, "NFS: write_begin(%pD2(%lu), %u@%lld)\n",
3486de1472fSAl Viro file, mapping->host->i_ino, len, (long long) pos);
349b7eaefaaSChuck Lever
35038c73044SPeter Staubach start:
351e999a5c5SMatthew Wilcox folio = __filemap_get_folio(mapping, pos >> PAGE_SHIFT, FGP_WRITEBEGIN,
352e999a5c5SMatthew Wilcox mapping_gfp_mask(mapping));
35366dabbb6SChristoph Hellwig if (IS_ERR(folio))
35466dabbb6SChristoph Hellwig return PTR_ERR(folio);
35554d99381STrond Myklebust *pagep = &folio->page;
3564899f9c8SNick Piggin
3570c493b5cSTrond Myklebust ret = nfs_flush_incompatible(file, folio);
3584899f9c8SNick Piggin if (ret) {
35954d99381STrond Myklebust folio_unlock(folio);
36054d99381STrond Myklebust folio_put(folio);
36138c73044SPeter Staubach } else if (!once_thru &&
36254d99381STrond Myklebust nfs_want_read_modify_write(file, folio, pos, len)) {
36338c73044SPeter Staubach once_thru = 1;
3640c493b5cSTrond Myklebust ret = nfs_read_folio(file, folio);
36554d99381STrond Myklebust folio_put(folio);
36638c73044SPeter Staubach if (!ret)
36738c73044SPeter Staubach goto start;
3684899f9c8SNick Piggin }
3694899f9c8SNick Piggin return ret;
3701da177e4SLinus Torvalds }
nfs_write_end(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned copied,struct page * page,void * fsdata)3711da177e4SLinus Torvalds
3724899f9c8SNick Piggin static int nfs_write_end(struct file *file, struct address_space *mapping,
3734899f9c8SNick Piggin loff_t pos, unsigned len, unsigned copied,
3744899f9c8SNick Piggin struct page *page, void *fsdata)
3751da177e4SLinus Torvalds {
376dc24826bSAndy Adamson struct nfs_open_context *ctx = nfs_file_open_context(file);
3770c493b5cSTrond Myklebust struct folio *folio = page_folio(page);
37854d99381STrond Myklebust unsigned offset = offset_in_folio(folio, pos);
3794899f9c8SNick Piggin int status;
3801da177e4SLinus Torvalds
3811e8968c5SNiels de Vos dfprintk(PAGECACHE, "NFS: write_end(%pD2(%lu), %u@%lld)\n",
3826de1472fSAl Viro file, mapping->host->i_ino, len, (long long) pos);
383b7eaefaaSChuck Lever
384efc91ed0STrond Myklebust /*
385efc91ed0STrond Myklebust * Zero any uninitialised parts of the page, and then mark the page
386efc91ed0STrond Myklebust * as up to date if it turns out that we're extending the file.
387efc91ed0STrond Myklebust */
38854d99381STrond Myklebust if (!folio_test_uptodate(folio)) {
38954d99381STrond Myklebust size_t fsize = folio_size(folio);
39054d99381STrond Myklebust unsigned pglen = nfs_folio_length(folio);
391c0cf3ef5SAl Viro unsigned end = offset + copied;
392efc91ed0STrond Myklebust
393efc91ed0STrond Myklebust if (pglen == 0) {
39454d99381STrond Myklebust folio_zero_segments(folio, 0, offset, end, fsize);
39554d99381STrond Myklebust folio_mark_uptodate(folio);
396efc91ed0STrond Myklebust } else if (end >= pglen) {
39754d99381STrond Myklebust folio_zero_segment(folio, end, fsize);
398efc91ed0STrond Myklebust if (offset == 0)
39954d99381STrond Myklebust folio_mark_uptodate(folio);
400efc91ed0STrond Myklebust } else
40154d99381STrond Myklebust folio_zero_segment(folio, pglen, fsize);
402efc91ed0STrond Myklebust }
403efc91ed0STrond Myklebust
4040c493b5cSTrond Myklebust status = nfs_update_folio(file, folio, offset, copied);
4054899f9c8SNick Piggin
40654d99381STrond Myklebust folio_unlock(folio);
40754d99381STrond Myklebust folio_put(folio);
4084899f9c8SNick Piggin
4093d509e54SChuck Lever if (status < 0)
4103d509e54SChuck Lever return status;
4112701d086SAndy Adamson NFS_I(mapping->host)->write_io += copied;
412dc24826bSAndy Adamson
413d95b2665STrond Myklebust if (nfs_ctx_key_to_expire(ctx, mapping->host))
414d95b2665STrond Myklebust nfs_wb_all(mapping->host);
415dc24826bSAndy Adamson
4163d509e54SChuck Lever return copied;
4171da177e4SLinus Torvalds }
4181da177e4SLinus Torvalds
4196b9b3514SDavid Howells /*
4206b9b3514SDavid Howells * Partially or wholly invalidate a page
4216b9b3514SDavid Howells * - Release the private state associated with a page if undergoing complete
4226b9b3514SDavid Howells * page invalidation
423545db45fSDavid Howells * - Called if either PG_private or PG_fscache is set on the page
4246b9b3514SDavid Howells * - Caller holds page lock
nfs_invalidate_folio(struct folio * folio,size_t offset,size_t length)4256b9b3514SDavid Howells */
4266d740c76SMatthew Wilcox (Oracle) static void nfs_invalidate_folio(struct folio *folio, size_t offset,
4276d740c76SMatthew Wilcox (Oracle) size_t length)
428cd52ed35STrond Myklebust {
429eb5654b3STrond Myklebust struct inode *inode = folio_file_mapping(folio)->host;
4306d740c76SMatthew Wilcox (Oracle) dfprintk(PAGECACHE, "NFS: invalidate_folio(%lu, %zu, %zu)\n",
4316d740c76SMatthew Wilcox (Oracle) folio->index, offset, length);
432b7eaefaaSChuck Lever
4336d740c76SMatthew Wilcox (Oracle) if (offset != 0 || length < folio_size(folio))
4341c75950bSTrond Myklebust return;
435d2ccddf0STrond Myklebust /* Cancel any unstarted writes on this page */
436eb5654b3STrond Myklebust nfs_wb_folio_cancel(inode, folio);
4376d740c76SMatthew Wilcox (Oracle) folio_wait_fscache(folio);
438eb5654b3STrond Myklebust trace_nfs_invalidate_folio(inode, folio);
439cd52ed35STrond Myklebust }
440cd52ed35STrond Myklebust
4416b9b3514SDavid Howells /*
4423577da4aSMatthew Wilcox (Oracle) * Attempt to release the private state associated with a folio
4433577da4aSMatthew Wilcox (Oracle) * - Called if either private or fscache flags are set on the folio
4443577da4aSMatthew Wilcox (Oracle) * - Caller holds folio lock
4453577da4aSMatthew Wilcox (Oracle) * - Return true (may release folio) or false (may not)
nfs_release_folio(struct folio * folio,gfp_t gfp)4466b9b3514SDavid Howells */
4473577da4aSMatthew Wilcox (Oracle) static bool nfs_release_folio(struct folio *folio, gfp_t gfp)
448cd52ed35STrond Myklebust {
4493577da4aSMatthew Wilcox (Oracle) dfprintk(PAGECACHE, "NFS: release_folio(%p)\n", folio);
450b7eaefaaSChuck Lever
4513577da4aSMatthew Wilcox (Oracle) /* If the private flag is set, then the folio is not freeable */
45296780ca5STrond Myklebust if (folio_test_private(folio)) {
45396780ca5STrond Myklebust if ((current_gfp_context(gfp) & GFP_KERNEL) != GFP_KERNEL ||
454*ab0727d6SMike Snitzer current_is_kswapd() || current_is_kcompactd())
4553577da4aSMatthew Wilcox (Oracle) return false;
45696780ca5STrond Myklebust if (nfs_wb_folio(folio_file_mapping(folio)->host, folio) < 0)
45796780ca5STrond Myklebust return false;
45896780ca5STrond Myklebust }
4593577da4aSMatthew Wilcox (Oracle) return nfs_fscache_release_folio(folio, gfp);
460e3db7691STrond Myklebust }
nfs_check_dirty_writeback(struct folio * folio,bool * dirty,bool * writeback)461e3db7691STrond Myklebust
462520f301cSMatthew Wilcox (Oracle) static void nfs_check_dirty_writeback(struct folio *folio,
463f919b196SMel Gorman bool *dirty, bool *writeback)
464f919b196SMel Gorman {
465f919b196SMel Gorman struct nfs_inode *nfsi;
466520f301cSMatthew Wilcox (Oracle) struct address_space *mapping = folio->mapping;
467f919b196SMel Gorman
468f919b196SMel Gorman /*
469520f301cSMatthew Wilcox (Oracle) * Check if an unstable folio is currently being committed and
470520f301cSMatthew Wilcox (Oracle) * if so, have the VM treat it as if the folio is under writeback
471520f301cSMatthew Wilcox (Oracle) * so it will not block due to folios that will shortly be freeable.
472f919b196SMel Gorman */
473f919b196SMel Gorman nfsi = NFS_I(mapping->host);
474af7cf057STrond Myklebust if (atomic_read(&nfsi->commit_info.rpcs_out)) {
475f919b196SMel Gorman *writeback = true;
476f919b196SMel Gorman return;
477f919b196SMel Gorman }
478f919b196SMel Gorman
479f919b196SMel Gorman /*
480520f301cSMatthew Wilcox (Oracle) * If the private flag is set, then the folio is not freeable
481520f301cSMatthew Wilcox (Oracle) * and as the inode is not being committed, it's not going to
482520f301cSMatthew Wilcox (Oracle) * be cleaned in the near future so treat it as dirty
483f919b196SMel Gorman */
484520f301cSMatthew Wilcox (Oracle) if (folio_test_private(folio))
485f919b196SMel Gorman *dirty = true;
486f919b196SMel Gorman }
487f919b196SMel Gorman
4886b9b3514SDavid Howells /*
4896b9b3514SDavid Howells * Attempt to clear the private state associated with a page when an error
4906b9b3514SDavid Howells * occurs that requires the cached contents of an inode to be written back or
4916b9b3514SDavid Howells * destroyed
492545db45fSDavid Howells * - Called if either PG_private or fscache is set on the page
4936b9b3514SDavid Howells * - Caller holds page lock
4946b9b3514SDavid Howells * - Return 0 if successful, -error otherwise
nfs_launder_folio(struct folio * folio)4956b9b3514SDavid Howells */
49615a30ab2SMatthew Wilcox (Oracle) static int nfs_launder_folio(struct folio *folio)
497e3db7691STrond Myklebust {
49815a30ab2SMatthew Wilcox (Oracle) struct inode *inode = folio->mapping->host;
499eb5654b3STrond Myklebust int ret;
500b7eaefaaSChuck Lever
50115a30ab2SMatthew Wilcox (Oracle) dfprintk(PAGECACHE, "NFS: launder_folio(%ld, %llu)\n",
50215a30ab2SMatthew Wilcox (Oracle) inode->i_ino, folio_pos(folio));
503b7eaefaaSChuck Lever
50415a30ab2SMatthew Wilcox (Oracle) folio_wait_fscache(folio);
505eb5654b3STrond Myklebust ret = nfs_wb_folio(inode, folio);
506eb5654b3STrond Myklebust trace_nfs_launder_folio_done(inode, folio, ret);
507eb5654b3STrond Myklebust return ret;
508cd52ed35STrond Myklebust }
nfs_swap_activate(struct swap_info_struct * sis,struct file * file,sector_t * span)509cd52ed35STrond Myklebust
510a564b8f0SMel Gorman static int nfs_swap_activate(struct swap_info_struct *sis, struct file *file,
511a564b8f0SMel Gorman sector_t *span)
512a564b8f0SMel Gorman {
513bd89bc67SMurphy Zhou unsigned long blocks;
514bd89bc67SMurphy Zhou long long isize;
5154b60c0ffSNeilBrown int ret;
5164dc73c67SNeilBrown struct inode *inode = file_inode(file);
5174dc73c67SNeilBrown struct rpc_clnt *clnt = NFS_CLIENT(inode);
5184dc73c67SNeilBrown struct nfs_client *cl = NFS_SERVER(inode)->nfs_client;
519bd89bc67SMurphy Zhou
520bd89bc67SMurphy Zhou spin_lock(&inode->i_lock);
521bd89bc67SMurphy Zhou blocks = inode->i_blocks;
522bd89bc67SMurphy Zhou isize = inode->i_size;
523bd89bc67SMurphy Zhou spin_unlock(&inode->i_lock);
524bd89bc67SMurphy Zhou if (blocks*512 < isize) {
525bd89bc67SMurphy Zhou pr_warn("swap activate: swapfile has holes\n");
526bd89bc67SMurphy Zhou return -EINVAL;
527bd89bc67SMurphy Zhou }
528dad2b015SJeff Layton
5294b60c0ffSNeilBrown ret = rpc_clnt_swap_activate(clnt);
5304b60c0ffSNeilBrown if (ret)
5314b60c0ffSNeilBrown return ret;
5324b60c0ffSNeilBrown ret = add_swap_extent(sis, 0, sis->max, 0);
5334b60c0ffSNeilBrown if (ret < 0) {
5344b60c0ffSNeilBrown rpc_clnt_swap_deactivate(clnt);
5354b60c0ffSNeilBrown return ret;
5364b60c0ffSNeilBrown }
537dad2b015SJeff Layton
5384b60c0ffSNeilBrown *span = sis->pages;
5394dc73c67SNeilBrown
5404dc73c67SNeilBrown if (cl->rpc_ops->enable_swap)
5414dc73c67SNeilBrown cl->rpc_ops->enable_swap(inode);
5424dc73c67SNeilBrown
5434b60c0ffSNeilBrown sis->flags |= SWP_FS_OPS;
5444b60c0ffSNeilBrown return ret;
545a564b8f0SMel Gorman }
nfs_swap_deactivate(struct file * file)546a564b8f0SMel Gorman
547a564b8f0SMel Gorman static void nfs_swap_deactivate(struct file *file)
548a564b8f0SMel Gorman {
5494dc73c67SNeilBrown struct inode *inode = file_inode(file);
5504dc73c67SNeilBrown struct rpc_clnt *clnt = NFS_CLIENT(inode);
5514dc73c67SNeilBrown struct nfs_client *cl = NFS_SERVER(inode)->nfs_client;
552dad2b015SJeff Layton
5533c87ef6eSJeff Layton rpc_clnt_swap_deactivate(clnt);
5544dc73c67SNeilBrown if (cl->rpc_ops->disable_swap)
5554dc73c67SNeilBrown cl->rpc_ops->disable_swap(file_inode(file));
556a564b8f0SMel Gorman }
557a564b8f0SMel Gorman
558f5e54d6eSChristoph Hellwig const struct address_space_operations nfs_file_aops = {
55965d023afSMatthew Wilcox (Oracle) .read_folio = nfs_read_folio,
5608786fde8SMatthew Wilcox (Oracle) .readahead = nfs_readahead,
561187c82cbSMatthew Wilcox (Oracle) .dirty_folio = filemap_dirty_folio,
5621da177e4SLinus Torvalds .writepage = nfs_writepage,
5631da177e4SLinus Torvalds .writepages = nfs_writepages,
5644899f9c8SNick Piggin .write_begin = nfs_write_begin,
5654899f9c8SNick Piggin .write_end = nfs_write_end,
5666d740c76SMatthew Wilcox (Oracle) .invalidate_folio = nfs_invalidate_folio,
5673577da4aSMatthew Wilcox (Oracle) .release_folio = nfs_release_folio,
5684ae84a80SMatthew Wilcox (Oracle) .migrate_folio = nfs_migrate_folio,
56915a30ab2SMatthew Wilcox (Oracle) .launder_folio = nfs_launder_folio,
570f919b196SMel Gorman .is_dirty_writeback = nfs_check_dirty_writeback,
571f590f333SAndi Kleen .error_remove_page = generic_error_remove_page,
572a564b8f0SMel Gorman .swap_activate = nfs_swap_activate,
573a564b8f0SMel Gorman .swap_deactivate = nfs_swap_deactivate,
574eb79f3afSNeilBrown .swap_rw = nfs_swap_rw,
5751da177e4SLinus Torvalds };
5761da177e4SLinus Torvalds
5776b9b3514SDavid Howells /*
5786b9b3514SDavid Howells * Notification that a PTE pointing to an NFS page is about to be made
5796b9b3514SDavid Howells * writable, implying that someone is about to modify the page through a
5806b9b3514SDavid Howells * shared-writable mapping
nfs_vm_page_mkwrite(struct vm_fault * vmf)5816b9b3514SDavid Howells */
58201a36844SSouptick Joarder static vm_fault_t nfs_vm_page_mkwrite(struct vm_fault *vmf)
58394387fb1STrond Myklebust {
58411bac800SDave Jiang struct file *filp = vmf->vma->vm_file;
5856de1472fSAl Viro struct inode *inode = file_inode(filp);
58694387fb1STrond Myklebust unsigned pagelen;
58701a36844SSouptick Joarder vm_fault_t ret = VM_FAULT_NOPAGE;
5884899f9c8SNick Piggin struct address_space *mapping;
5894fa7a717STrond Myklebust struct folio *folio = page_folio(vmf->page);
59094387fb1STrond Myklebust
5911e8968c5SNiels de Vos dfprintk(PAGECACHE, "NFS: vm_page_mkwrite(%pD2(%lu), offset %lld)\n",
5926de1472fSAl Viro filp, filp->f_mapping->host->i_ino,
5934fa7a717STrond Myklebust (long long)folio_file_pos(folio));
594b7eaefaaSChuck Lever
5959a773e7cSTrond Myklebust sb_start_pagefault(inode->i_sb);
5969a773e7cSTrond Myklebust
597545db45fSDavid Howells /* make sure the cache has finished storing the page */
5984fa7a717STrond Myklebust if (folio_test_fscache(folio) &&
5994fa7a717STrond Myklebust folio_wait_fscache_killable(folio) < 0) {
600a6b5a28eSDave Wysochanski ret = VM_FAULT_RETRY;
601a6b5a28eSDave Wysochanski goto out;
602a6b5a28eSDave Wysochanski }
603545db45fSDavid Howells
604ef070dcbSTrond Myklebust wait_on_bit_action(&NFS_I(inode)->flags, NFS_INO_INVALIDATING,
605f5d39b02SPeter Zijlstra nfs_wait_bit_killable,
606f5d39b02SPeter Zijlstra TASK_KILLABLE|TASK_FREEZABLE_UNSAFE);
607ef070dcbSTrond Myklebust
6084fa7a717STrond Myklebust folio_lock(folio);
6094fa7a717STrond Myklebust mapping = folio_file_mapping(folio);
6106de1472fSAl Viro if (mapping != inode->i_mapping)
6118b1f9ee5STrond Myklebust goto out_unlock;
6128b1f9ee5STrond Myklebust
6134fa7a717STrond Myklebust folio_wait_writeback(folio);
6142aeb98f4STrond Myklebust
6154fa7a717STrond Myklebust pagelen = nfs_folio_length(folio);
6168b1f9ee5STrond Myklebust if (pagelen == 0)
6178b1f9ee5STrond Myklebust goto out_unlock;
6188b1f9ee5STrond Myklebust
619bc4866b6STrond Myklebust ret = VM_FAULT_LOCKED;
6200c493b5cSTrond Myklebust if (nfs_flush_incompatible(filp, folio) == 0 &&
6210c493b5cSTrond Myklebust nfs_update_folio(filp, folio, 0, pagelen) == 0)
622bc4866b6STrond Myklebust goto out;
6238b1f9ee5STrond Myklebust
624bc4866b6STrond Myklebust ret = VM_FAULT_SIGBUS;
6258b1f9ee5STrond Myklebust out_unlock:
6264fa7a717STrond Myklebust folio_unlock(folio);
627bc4866b6STrond Myklebust out:
6289a773e7cSTrond Myklebust sb_end_pagefault(inode->i_sb);
629bc4866b6STrond Myklebust return ret;
63094387fb1STrond Myklebust }
63194387fb1STrond Myklebust
632f0f37e2fSAlexey Dobriyan static const struct vm_operations_struct nfs_file_vm_ops = {
63394387fb1STrond Myklebust .fault = filemap_fault,
634f1820361SKirill A. Shutemov .map_pages = filemap_map_pages,
63594387fb1STrond Myklebust .page_mkwrite = nfs_vm_page_mkwrite,
63694387fb1STrond Myklebust };
nfs_file_write(struct kiocb * iocb,struct iov_iter * from)63794387fb1STrond Myklebust
638edaf4369SAl Viro ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from)
6391da177e4SLinus Torvalds {
6406de1472fSAl Viro struct file *file = iocb->ki_filp;
6416de1472fSAl Viro struct inode *inode = file_inode(file);
642ed7bcdb3STrond Myklebust unsigned int mntflags = NFS_SERVER(inode)->flags;
643ed7bcdb3STrond Myklebust ssize_t result, written;
644ce368536SScott Mayhew errseq_t since;
645ce368536SScott Mayhew int error;
6461da177e4SLinus Torvalds
6476de1472fSAl Viro result = nfs_key_timeout_notify(file, inode);
648dc24826bSAndy Adamson if (result)
649dc24826bSAndy Adamson return result;
650dc24826bSAndy Adamson
65189698b24STrond Myklebust if (iocb->ki_flags & IOCB_DIRECT)
65264158668SNeilBrown return nfs_file_direct_write(iocb, from, false);
6531da177e4SLinus Torvalds
654619d30b4SAl Viro dprintk("NFS: write(%pD2, %zu@%Ld)\n",
65518290650STrond Myklebust file, iov_iter_count(from), (long long) iocb->ki_pos);
6561da177e4SLinus Torvalds
6571da177e4SLinus Torvalds if (IS_SWAPFILE(inode))
6581da177e4SLinus Torvalds goto out_swapfile;
6597d52e862STrond Myklebust /*
6607d52e862STrond Myklebust * O_APPEND implies that we must revalidate the file length.
6617d52e862STrond Myklebust */
662fc9dc401STrond Myklebust if (iocb->ki_flags & IOCB_APPEND || iocb->ki_pos > i_size_read(inode)) {
6636de1472fSAl Viro result = nfs_revalidate_file_size(inode, file);
6641da177e4SLinus Torvalds if (result)
665e6005436STrond Myklebust return result;
666fe51beecSTrond Myklebust }
6671da177e4SLinus Torvalds
66828aa2f9eSTrond Myklebust nfs_clear_invalid_mapping(file->f_mapping);
66928aa2f9eSTrond Myklebust
670ce368536SScott Mayhew since = filemap_sample_wb_err(file->f_mapping);
671a5864c99STrond Myklebust nfs_start_io_write(inode);
67218290650STrond Myklebust result = generic_write_checks(iocb, from);
6730d625446SChristoph Hellwig if (result > 0)
674800ba295SMatthew Wilcox (Oracle) result = generic_perform_write(iocb, from);
675a5864c99STrond Myklebust nfs_end_io_write(inode);
67618290650STrond Myklebust if (result <= 0)
6771da177e4SLinus Torvalds goto out;
6781da177e4SLinus Torvalds
679c49edecdSTrond Myklebust written = result;
680e6005436STrond Myklebust nfs_add_stats(inode, NFSIOS_NORMALWRITTENBYTES, written);
681ed7bcdb3STrond Myklebust
682ed7bcdb3STrond Myklebust if (mntflags & NFS_MOUNT_WRITE_EAGER) {
683ed7bcdb3STrond Myklebust result = filemap_fdatawrite_range(file->f_mapping,
684ed7bcdb3STrond Myklebust iocb->ki_pos - written,
685ed7bcdb3STrond Myklebust iocb->ki_pos - 1);
686ed7bcdb3STrond Myklebust if (result < 0)
687ed7bcdb3STrond Myklebust goto out;
688ed7bcdb3STrond Myklebust }
689ed7bcdb3STrond Myklebust if (mntflags & NFS_MOUNT_WRITE_WAIT) {
690384edeb4SLukas Bulwahn filemap_fdatawait_range(file->f_mapping,
691ed7bcdb3STrond Myklebust iocb->ki_pos - written,
692ed7bcdb3STrond Myklebust iocb->ki_pos - 1);
693ed7bcdb3STrond Myklebust }
694e973b1a5Starangg@amazon.com result = generic_write_sync(iocb, written);
695e973b1a5Starangg@amazon.com if (result < 0)
696e6005436STrond Myklebust return result;
6977e381172SChuck Lever
698e6005436STrond Myklebust out:
6997e94d6c4STrond Myklebust /* Return error values */
700ce368536SScott Mayhew error = filemap_check_wb_err(file->f_mapping, since);
701e6005436STrond Myklebust switch (error) {
702e6005436STrond Myklebust default:
703e6005436STrond Myklebust break;
704e6005436STrond Myklebust case -EDQUOT:
705e6005436STrond Myklebust case -EFBIG:
706e6005436STrond Myklebust case -ENOSPC:
707e6005436STrond Myklebust nfs_wb_all(inode);
708e6005436STrond Myklebust error = file_check_and_advance_wb_err(file);
709e6005436STrond Myklebust if (error < 0)
710e6005436STrond Myklebust result = error;
711200baa21STrond Myklebust }
7121da177e4SLinus Torvalds return result;
7131da177e4SLinus Torvalds
7141da177e4SLinus Torvalds out_swapfile:
7151da177e4SLinus Torvalds printk(KERN_INFO "NFS: attempt to write to active swap file!\n");
71689658c4dSAnna Schumaker return -ETXTBSY;
7171da177e4SLinus Torvalds }
71889d77c8fSBryan Schumaker EXPORT_SYMBOL_GPL(nfs_file_write);
7191da177e4SLinus Torvalds
do_getlk(struct file * filp,int cmd,struct file_lock * fl,int is_local)7205eebde23SSuresh Jayaraman static int
7215eebde23SSuresh Jayaraman do_getlk(struct file *filp, int cmd, struct file_lock *fl, int is_local)
7221da177e4SLinus Torvalds {
7231da177e4SLinus Torvalds struct inode *inode = filp->f_mapping->host;
7241da177e4SLinus Torvalds int status = 0;
72521ac19d4SSergey Vlasov unsigned int saved_type = fl->fl_type;
7261da177e4SLinus Torvalds
727039c4d7aSTrond Myklebust /* Try local locking first */
7286d34ac19SJ. Bruce Fields posix_test_lock(filp, fl);
7296d34ac19SJ. Bruce Fields if (fl->fl_type != F_UNLCK) {
7306d34ac19SJ. Bruce Fields /* found a conflict */
731039c4d7aSTrond Myklebust goto out;
7321da177e4SLinus Torvalds }
73321ac19d4SSergey Vlasov fl->fl_type = saved_type;
734039c4d7aSTrond Myklebust
735011e2a7fSBryan Schumaker if (NFS_PROTO(inode)->have_delegation(inode, FMODE_READ))
736039c4d7aSTrond Myklebust goto out_noconflict;
737039c4d7aSTrond Myklebust
7385eebde23SSuresh Jayaraman if (is_local)
739039c4d7aSTrond Myklebust goto out_noconflict;
740039c4d7aSTrond Myklebust
741039c4d7aSTrond Myklebust status = NFS_PROTO(inode)->lock(filp, cmd, fl);
742039c4d7aSTrond Myklebust out:
7431da177e4SLinus Torvalds return status;
744039c4d7aSTrond Myklebust out_noconflict:
745039c4d7aSTrond Myklebust fl->fl_type = F_UNLCK;
746039c4d7aSTrond Myklebust goto out;
7471da177e4SLinus Torvalds }
7481da177e4SLinus Torvalds
do_unlk(struct file * filp,int cmd,struct file_lock * fl,int is_local)7495eebde23SSuresh Jayaraman static int
7505eebde23SSuresh Jayaraman do_unlk(struct file *filp, int cmd, struct file_lock *fl, int is_local)
7511da177e4SLinus Torvalds {
7521da177e4SLinus Torvalds struct inode *inode = filp->f_mapping->host;
7537a8203d8STrond Myklebust struct nfs_lock_context *l_ctx;
7541da177e4SLinus Torvalds int status;
7551da177e4SLinus Torvalds
7561da177e4SLinus Torvalds /*
7571da177e4SLinus Torvalds * Flush all pending writes before doing anything
7581da177e4SLinus Torvalds * with locks..
7591da177e4SLinus Torvalds */
760aded8d7bSTrond Myklebust nfs_wb_all(inode);
7611da177e4SLinus Torvalds
7627a8203d8STrond Myklebust l_ctx = nfs_get_lock_context(nfs_file_open_context(filp));
7637a8203d8STrond Myklebust if (!IS_ERR(l_ctx)) {
764210c7c17SBenjamin Coddington status = nfs_iocounter_wait(l_ctx);
7657a8203d8STrond Myklebust nfs_put_lock_context(l_ctx);
7661da177e4SLinus Torvalds /* NOTE: special case
7671da177e4SLinus Torvalds * If we're signalled while cleaning up locks on process exit, we
7681da177e4SLinus Torvalds * still need to complete the unlock.
7691da177e4SLinus Torvalds */
770f30cb757SBenjamin Coddington if (status < 0 && !(fl->fl_flags & FL_CLOSE))
771f30cb757SBenjamin Coddington return status;
772f30cb757SBenjamin Coddington }
773f30cb757SBenjamin Coddington
7745eebde23SSuresh Jayaraman /*
7755eebde23SSuresh Jayaraman * Use local locking if mounted with "-onolock" or with appropriate
7765eebde23SSuresh Jayaraman * "-olocal_lock="
7775eebde23SSuresh Jayaraman */
7785eebde23SSuresh Jayaraman if (!is_local)
7791da177e4SLinus Torvalds status = NFS_PROTO(inode)->lock(filp, cmd, fl);
7801da177e4SLinus Torvalds else
78175575ddfSJeff Layton status = locks_lock_file_wait(filp, fl);
7821da177e4SLinus Torvalds return status;
7831da177e4SLinus Torvalds }
7841da177e4SLinus Torvalds
do_setlk(struct file * filp,int cmd,struct file_lock * fl,int is_local)7855eebde23SSuresh Jayaraman static int
7865eebde23SSuresh Jayaraman do_setlk(struct file *filp, int cmd, struct file_lock *fl, int is_local)
7871da177e4SLinus Torvalds {
7881da177e4SLinus Torvalds struct inode *inode = filp->f_mapping->host;
7891da177e4SLinus Torvalds int status;
7901da177e4SLinus Torvalds
7911da177e4SLinus Torvalds /*
7921da177e4SLinus Torvalds * Flush all pending writes before doing anything
7931da177e4SLinus Torvalds * with locks..
7941da177e4SLinus Torvalds */
79529884df0STrond Myklebust status = nfs_sync_mapping(filp->f_mapping);
79629884df0STrond Myklebust if (status != 0)
7971da177e4SLinus Torvalds goto out;
7981da177e4SLinus Torvalds
7995eebde23SSuresh Jayaraman /*
8005eebde23SSuresh Jayaraman * Use local locking if mounted with "-onolock" or with appropriate
8015eebde23SSuresh Jayaraman * "-olocal_lock="
8025eebde23SSuresh Jayaraman */
8035eebde23SSuresh Jayaraman if (!is_local)
8041da177e4SLinus Torvalds status = NFS_PROTO(inode)->lock(filp, cmd, fl);
805c4d7c402STrond Myklebust else
80675575ddfSJeff Layton status = locks_lock_file_wait(filp, fl);
8071da177e4SLinus Torvalds if (status < 0)
8081da177e4SLinus Torvalds goto out;
8096b96724eSRicardo Labiaga
8101da177e4SLinus Torvalds /*
811779eafabSNeilBrown * Invalidate cache to prevent missing any changes. If
812779eafabSNeilBrown * the file is mapped, clear the page cache as well so
813779eafabSNeilBrown * those mappings will be loaded.
8146b96724eSRicardo Labiaga *
8151da177e4SLinus Torvalds * This makes locking act as a cache coherency point.
8161da177e4SLinus Torvalds */
81729884df0STrond Myklebust nfs_sync_mapping(filp->f_mapping);
818779eafabSNeilBrown if (!NFS_PROTO(inode)->have_delegation(inode, FMODE_READ)) {
819442ce049SNeilBrown nfs_zap_caches(inode);
820779eafabSNeilBrown if (mapping_mapped(filp->f_mapping))
821779eafabSNeilBrown nfs_revalidate_mapping(inode, filp->f_mapping);
822779eafabSNeilBrown }
8231da177e4SLinus Torvalds out:
8241da177e4SLinus Torvalds return status;
8251da177e4SLinus Torvalds }
8261da177e4SLinus Torvalds
8271da177e4SLinus Torvalds /*
8281da177e4SLinus Torvalds * Lock a (portion of) a file
nfs_lock(struct file * filp,int cmd,struct file_lock * fl)8291da177e4SLinus Torvalds */
830ce4ef7c0SBryan Schumaker int nfs_lock(struct file *filp, int cmd, struct file_lock *fl)
8311da177e4SLinus Torvalds {
8321da177e4SLinus Torvalds struct inode *inode = filp->f_mapping->host;
8332116271aSTrond Myklebust int ret = -ENOLCK;
8345eebde23SSuresh Jayaraman int is_local = 0;
8351da177e4SLinus Torvalds
8366de1472fSAl Viro dprintk("NFS: lock(%pD2, t=%x, fl=%x, r=%lld:%lld)\n",
8376de1472fSAl Viro filp, fl->fl_type, fl->fl_flags,
8381da177e4SLinus Torvalds (long long)fl->fl_start, (long long)fl->fl_end);
8396da24bc9SChuck Lever
84091d5b470SChuck Lever nfs_inc_stats(inode, NFSIOS_VFSLOCK);
8411da177e4SLinus Torvalds
842bb0a55bbSJ. Bruce Fields if (fl->fl_flags & FL_RECLAIM)
843bb0a55bbSJ. Bruce Fields return -ENOGRACE;
844bb0a55bbSJ. Bruce Fields
8455eebde23SSuresh Jayaraman if (NFS_SERVER(inode)->flags & NFS_MOUNT_LOCAL_FCNTL)
8465eebde23SSuresh Jayaraman is_local = 1;
8475eebde23SSuresh Jayaraman
8482116271aSTrond Myklebust if (NFS_PROTO(inode)->lock_check_bounds != NULL) {
8492116271aSTrond Myklebust ret = NFS_PROTO(inode)->lock_check_bounds(fl);
8502116271aSTrond Myklebust if (ret < 0)
8512116271aSTrond Myklebust goto out_err;
8522116271aSTrond Myklebust }
8531da177e4SLinus Torvalds
8541da177e4SLinus Torvalds if (IS_GETLK(cmd))
8555eebde23SSuresh Jayaraman ret = do_getlk(filp, cmd, fl, is_local);
8562116271aSTrond Myklebust else if (fl->fl_type == F_UNLCK)
8575eebde23SSuresh Jayaraman ret = do_unlk(filp, cmd, fl, is_local);
8582116271aSTrond Myklebust else
8595eebde23SSuresh Jayaraman ret = do_setlk(filp, cmd, fl, is_local);
8602116271aSTrond Myklebust out_err:
8612116271aSTrond Myklebust return ret;
8621da177e4SLinus Torvalds }
86389d77c8fSBryan Schumaker EXPORT_SYMBOL_GPL(nfs_lock);
8641da177e4SLinus Torvalds
8651da177e4SLinus Torvalds /*
8661da177e4SLinus Torvalds * Lock a (portion of) a file
nfs_flock(struct file * filp,int cmd,struct file_lock * fl)8671da177e4SLinus Torvalds */
868ce4ef7c0SBryan Schumaker int nfs_flock(struct file *filp, int cmd, struct file_lock *fl)
8691da177e4SLinus Torvalds {
8705eebde23SSuresh Jayaraman struct inode *inode = filp->f_mapping->host;
8715eebde23SSuresh Jayaraman int is_local = 0;
8725eebde23SSuresh Jayaraman
8736de1472fSAl Viro dprintk("NFS: flock(%pD2, t=%x, fl=%x)\n",
8746de1472fSAl Viro filp, fl->fl_type, fl->fl_flags);
8751da177e4SLinus Torvalds
8761da177e4SLinus Torvalds if (!(fl->fl_flags & FL_FLOCK))
8771da177e4SLinus Torvalds return -ENOLCK;
8781da177e4SLinus Torvalds
8795eebde23SSuresh Jayaraman if (NFS_SERVER(inode)->flags & NFS_MOUNT_LOCAL_FLOCK)
8805eebde23SSuresh Jayaraman is_local = 1;
8815eebde23SSuresh Jayaraman
882fcfa4470SBenjamin Coddington /* We're simulating flock() locks using posix locks on the server */
883fcfa4470SBenjamin Coddington if (fl->fl_type == F_UNLCK)
8845eebde23SSuresh Jayaraman return do_unlk(filp, cmd, fl, is_local);
8855eebde23SSuresh Jayaraman return do_setlk(filp, cmd, fl, is_local);
8861da177e4SLinus Torvalds }
88789d77c8fSBryan Schumaker EXPORT_SYMBOL_GPL(nfs_flock);
888370f6599SJ. Bruce Fields
8890486958fSJeff Layton const struct file_operations nfs_file_operations = {
8900486958fSJeff Layton .llseek = nfs_file_llseek,
8913aa2d199SAl Viro .read_iter = nfs_file_read,
892edaf4369SAl Viro .write_iter = nfs_file_write,
8930486958fSJeff Layton .mmap = nfs_file_mmap,
8940486958fSJeff Layton .open = nfs_file_open,
8950486958fSJeff Layton .flush = nfs_file_flush,
8960486958fSJeff Layton .release = nfs_file_release,
8970486958fSJeff Layton .fsync = nfs_file_fsync,
8980486958fSJeff Layton .lock = nfs_lock,
8990486958fSJeff Layton .flock = nfs_flock,
900a7db5034SDavid Howells .splice_read = nfs_file_splice_read,
9014da54c21SAl Viro .splice_write = iter_file_splice_write,
9020486958fSJeff Layton .check_flags = nfs_check_flags,
9031c994a09SJeff Layton .setlease = simple_nosetlease,
9040486958fSJeff Layton };
905ddda8e0aSBryan Schumaker EXPORT_SYMBOL_GPL(nfs_file_operations);
906