1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 21da177e4SLinus Torvalds /* 31da177e4SLinus Torvalds * linux/fs/nfs/direct.c 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * Copyright (C) 2003 by Chuck Lever <cel@netapp.com> 61da177e4SLinus Torvalds * 71da177e4SLinus Torvalds * High-performance uncached I/O for the Linux NFS client 81da177e4SLinus Torvalds * 91da177e4SLinus Torvalds * There are important applications whose performance or correctness 101da177e4SLinus Torvalds * depends on uncached access to file data. Database clusters 111da177e4SLinus Torvalds * (multiple copies of the same instance running on separate hosts) 121da177e4SLinus Torvalds * implement their own cache coherency protocol that subsumes file 131da177e4SLinus Torvalds * system cache protocols. Applications that process datasets 141da177e4SLinus Torvalds * considerably larger than the client's memory do not always benefit 151da177e4SLinus Torvalds * from a local cache. A streaming video server, for instance, has no 161da177e4SLinus Torvalds * need to cache the contents of a file. 171da177e4SLinus Torvalds * 181da177e4SLinus Torvalds * When an application requests uncached I/O, all read and write requests 191da177e4SLinus Torvalds * are made directly to the server; data stored or fetched via these 201da177e4SLinus Torvalds * requests is not cached in the Linux page cache. The client does not 211da177e4SLinus Torvalds * correct unaligned requests from applications. All requested bytes are 221da177e4SLinus Torvalds * held on permanent storage before a direct write system call returns to 231da177e4SLinus Torvalds * an application. 241da177e4SLinus Torvalds * 251da177e4SLinus Torvalds * Solaris implements an uncached I/O facility called directio() that 261da177e4SLinus Torvalds * is used for backups and sequential I/O to very large files. Solaris 271da177e4SLinus Torvalds * also supports uncaching whole NFS partitions with "-o forcedirectio," 281da177e4SLinus Torvalds * an undocumented mount option. 291da177e4SLinus Torvalds * 301da177e4SLinus Torvalds * Designed by Jeff Kimmel, Chuck Lever, and Trond Myklebust, with 311da177e4SLinus Torvalds * help from Andrew Morton. 321da177e4SLinus Torvalds * 331da177e4SLinus Torvalds * 18 Dec 2001 Initial implementation for 2.4 --cel 341da177e4SLinus Torvalds * 08 Jul 2002 Version for 2.4.19, with bug fixes --trondmy 351da177e4SLinus Torvalds * 08 Jun 2003 Port to 2.5 APIs --cel 361da177e4SLinus Torvalds * 31 Mar 2004 Handle direct I/O without VFS support --cel 371da177e4SLinus Torvalds * 15 Sep 2004 Parallel async reads --cel 3888467055SChuck Lever * 04 May 2005 support O_DIRECT with aio --cel 391da177e4SLinus Torvalds * 401da177e4SLinus Torvalds */ 411da177e4SLinus Torvalds 421da177e4SLinus Torvalds #include <linux/errno.h> 431da177e4SLinus Torvalds #include <linux/sched.h> 441da177e4SLinus Torvalds #include <linux/kernel.h> 451da177e4SLinus Torvalds #include <linux/file.h> 461da177e4SLinus Torvalds #include <linux/pagemap.h> 471da177e4SLinus Torvalds #include <linux/kref.h> 485a0e3ad6STejun Heo #include <linux/slab.h> 497ec10f26SKonstantin Khlebnikov #include <linux/task_io_accounting_ops.h> 506296556fSPeng Tao #include <linux/module.h> 511da177e4SLinus Torvalds 521da177e4SLinus Torvalds #include <linux/nfs_fs.h> 531da177e4SLinus Torvalds #include <linux/nfs_page.h> 541da177e4SLinus Torvalds #include <linux/sunrpc/clnt.h> 551da177e4SLinus Torvalds 567c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 5760063497SArun Sharma #include <linux/atomic.h> 581da177e4SLinus Torvalds 598d5658c9STrond Myklebust #include "internal.h" 6091d5b470SChuck Lever #include "iostat.h" 611763da12SFred Isaman #include "pnfs.h" 621da177e4SLinus Torvalds 631da177e4SLinus Torvalds #define NFSDBG_FACILITY NFSDBG_VFS 641da177e4SLinus Torvalds 65e18b890bSChristoph Lameter static struct kmem_cache *nfs_direct_cachep; 661da177e4SLinus Torvalds 671da177e4SLinus Torvalds struct nfs_direct_req { 681da177e4SLinus Torvalds struct kref kref; /* release manager */ 6915ce4a0cSChuck Lever 7015ce4a0cSChuck Lever /* I/O parameters */ 71a8881f5aSTrond Myklebust struct nfs_open_context *ctx; /* file open context info */ 72f11ac8dbSTrond Myklebust struct nfs_lock_context *l_ctx; /* Lock context info */ 7399514f8fSChuck Lever struct kiocb * iocb; /* controlling i/o request */ 7488467055SChuck Lever struct inode * inode; /* target file of i/o */ 7515ce4a0cSChuck Lever 7615ce4a0cSChuck Lever /* completion state */ 77607f31e8STrond Myklebust atomic_t io_count; /* i/os we're waiting for */ 7815ce4a0cSChuck Lever spinlock_t lock; /* protect completion state */ 790a00b77bSWeston Andros Adamson 80d9ee6553STrond Myklebust loff_t io_start; /* Start offset for I/O */ 8115ce4a0cSChuck Lever ssize_t count, /* bytes actually processed */ 82ed3743a6SWeston Andros Adamson max_count, /* max expected count */ 8335754bc0SPeng Tao bytes_left, /* bytes left to be sent */ 841da177e4SLinus Torvalds error; /* any reported error */ 85d72b7a6bSTrond Myklebust struct completion completion; /* wait for i/o completion */ 86fad61490STrond Myklebust 87fad61490STrond Myklebust /* commit state */ 881763da12SFred Isaman struct nfs_mds_commit_info mds_cinfo; /* Storage for cinfo */ 891763da12SFred Isaman struct pnfs_ds_commit_info ds_cinfo; /* Storage for cinfo */ 901763da12SFred Isaman struct work_struct work; 91fad61490STrond Myklebust int flags; 92ad3cba22SDave Kleikamp /* for write */ 93fad61490STrond Myklebust #define NFS_ODIRECT_DO_COMMIT (1) /* an unstable reply was received */ 94fad61490STrond Myklebust #define NFS_ODIRECT_RESCHED_WRITES (2) /* write verification failed */ 95ad3cba22SDave Kleikamp /* for read */ 96ad3cba22SDave Kleikamp #define NFS_ODIRECT_SHOULD_DIRTY (3) /* dirty user-space page after read */ 97fb5f7f20STrond Myklebust #define NFS_ODIRECT_DONE INT_MAX /* write verification failed */ 981da177e4SLinus Torvalds }; 991da177e4SLinus Torvalds 1001763da12SFred Isaman static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops; 1011763da12SFred Isaman static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops; 1024d3b55d3SAnna Schumaker static void nfs_direct_write_complete(struct nfs_direct_req *dreq); 1031763da12SFred Isaman static void nfs_direct_write_schedule_work(struct work_struct *work); 104607f31e8STrond Myklebust 105607f31e8STrond Myklebust static inline void get_dreq(struct nfs_direct_req *dreq) 106607f31e8STrond Myklebust { 107607f31e8STrond Myklebust atomic_inc(&dreq->io_count); 108607f31e8STrond Myklebust } 109607f31e8STrond Myklebust 110607f31e8STrond Myklebust static inline int put_dreq(struct nfs_direct_req *dreq) 111607f31e8STrond Myklebust { 112607f31e8STrond Myklebust return atomic_dec_and_test(&dreq->io_count); 113607f31e8STrond Myklebust } 114607f31e8STrond Myklebust 1150a00b77bSWeston Andros Adamson static void 116031d73edSTrond Myklebust nfs_direct_handle_truncated(struct nfs_direct_req *dreq, 117031d73edSTrond Myklebust const struct nfs_pgio_header *hdr, 118031d73edSTrond Myklebust ssize_t dreq_len) 1190a00b77bSWeston Andros Adamson { 120031d73edSTrond Myklebust if (!(test_bit(NFS_IOHDR_ERROR, &hdr->flags) || 121031d73edSTrond Myklebust test_bit(NFS_IOHDR_EOF, &hdr->flags))) 122031d73edSTrond Myklebust return; 123031d73edSTrond Myklebust if (dreq->max_count >= dreq_len) { 124031d73edSTrond Myklebust dreq->max_count = dreq_len; 125031d73edSTrond Myklebust if (dreq->count > dreq_len) 126031d73edSTrond Myklebust dreq->count = dreq_len; 127ed3743a6SWeston Andros Adamson 128031d73edSTrond Myklebust if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) 129031d73edSTrond Myklebust dreq->error = hdr->error; 130031d73edSTrond Myklebust else /* Clear outstanding error if this is EOF */ 131031d73edSTrond Myklebust dreq->error = 0; 1325fadeb47SPeng Tao } 1330a00b77bSWeston Andros Adamson } 134031d73edSTrond Myklebust 135031d73edSTrond Myklebust static void 136031d73edSTrond Myklebust nfs_direct_count_bytes(struct nfs_direct_req *dreq, 137031d73edSTrond Myklebust const struct nfs_pgio_header *hdr) 138031d73edSTrond Myklebust { 139031d73edSTrond Myklebust loff_t hdr_end = hdr->io_start + hdr->good_bytes; 140031d73edSTrond Myklebust ssize_t dreq_len = 0; 141031d73edSTrond Myklebust 142031d73edSTrond Myklebust if (hdr_end > dreq->io_start) 143031d73edSTrond Myklebust dreq_len = hdr_end - dreq->io_start; 144031d73edSTrond Myklebust 145031d73edSTrond Myklebust nfs_direct_handle_truncated(dreq, hdr, dreq_len); 146031d73edSTrond Myklebust 147031d73edSTrond Myklebust if (dreq_len > dreq->max_count) 148031d73edSTrond Myklebust dreq_len = dreq->max_count; 149031d73edSTrond Myklebust 150031d73edSTrond Myklebust if (dreq->count < dreq_len) 151031d73edSTrond Myklebust dreq->count = dreq_len; 1521ccbad9fSPeng Tao } 1530a00b77bSWeston Andros Adamson 1541da177e4SLinus Torvalds /** 155b8a32e2bSChuck Lever * nfs_direct_IO - NFS address space operation for direct I/O 156b8a32e2bSChuck Lever * @iocb: target I/O control block 15790090ae6SAl Viro * @iter: I/O buffer 158b8a32e2bSChuck Lever * 159b8a32e2bSChuck Lever * The presence of this routine in the address space ops vector means 160a564b8f0SMel Gorman * the NFS client supports direct I/O. However, for most direct IO, we 161a564b8f0SMel Gorman * shunt off direct read and write requests before the VFS gets them, 162a564b8f0SMel Gorman * so this method is only ever called for swap. 1631da177e4SLinus Torvalds */ 164c8b8e32dSChristoph Hellwig ssize_t nfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) 165b8a32e2bSChuck Lever { 166ee8a1a8bSPeng Tao struct inode *inode = iocb->ki_filp->f_mapping->host; 167ee8a1a8bSPeng Tao 168ee8a1a8bSPeng Tao /* we only support swap file calling nfs_direct_IO */ 169ee8a1a8bSPeng Tao if (!IS_SWAPFILE(inode)) 170ee8a1a8bSPeng Tao return 0; 171ee8a1a8bSPeng Tao 17266ee59afSChristoph Hellwig VM_BUG_ON(iov_iter_count(iter) != PAGE_SIZE); 173a564b8f0SMel Gorman 1746f673763SOmar Sandoval if (iov_iter_rw(iter) == READ) 175c8b8e32dSChristoph Hellwig return nfs_file_direct_read(iocb, iter); 17665a4a1caSAl Viro return nfs_file_direct_write(iocb, iter); 177b8a32e2bSChuck Lever } 178b8a32e2bSChuck Lever 179749e146eSChuck Lever static void nfs_direct_release_pages(struct page **pages, unsigned int npages) 1809c93ab7dSChuck Lever { 181749e146eSChuck Lever unsigned int i; 182607f31e8STrond Myklebust for (i = 0; i < npages; i++) 18309cbfeafSKirill A. Shutemov put_page(pages[i]); 1846b45d858STrond Myklebust } 1856b45d858STrond Myklebust 1861763da12SFred Isaman void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo, 1871763da12SFred Isaman struct nfs_direct_req *dreq) 1881763da12SFred Isaman { 189fe238e60SDave Wysochanski cinfo->inode = dreq->inode; 1901763da12SFred Isaman cinfo->mds = &dreq->mds_cinfo; 1911763da12SFred Isaman cinfo->ds = &dreq->ds_cinfo; 1921763da12SFred Isaman cinfo->dreq = dreq; 1931763da12SFred Isaman cinfo->completion_ops = &nfs_direct_commit_completion_ops; 1941763da12SFred Isaman } 1951763da12SFred Isaman 19693619e59SChuck Lever static inline struct nfs_direct_req *nfs_direct_req_alloc(void) 1971da177e4SLinus Torvalds { 1981da177e4SLinus Torvalds struct nfs_direct_req *dreq; 1991da177e4SLinus Torvalds 200292f3eeeSTrond Myklebust dreq = kmem_cache_zalloc(nfs_direct_cachep, GFP_KERNEL); 2011da177e4SLinus Torvalds if (!dreq) 2021da177e4SLinus Torvalds return NULL; 2031da177e4SLinus Torvalds 2041da177e4SLinus Torvalds kref_init(&dreq->kref); 205607f31e8STrond Myklebust kref_get(&dreq->kref); 206d72b7a6bSTrond Myklebust init_completion(&dreq->completion); 2071763da12SFred Isaman INIT_LIST_HEAD(&dreq->mds_cinfo.list); 208c21e7168STrond Myklebust pnfs_init_ds_commit_info(&dreq->ds_cinfo); 2091763da12SFred Isaman INIT_WORK(&dreq->work, nfs_direct_write_schedule_work); 21015ce4a0cSChuck Lever spin_lock_init(&dreq->lock); 21193619e59SChuck Lever 21293619e59SChuck Lever return dreq; 21393619e59SChuck Lever } 21493619e59SChuck Lever 215b4946ffbSTrond Myklebust static void nfs_direct_req_free(struct kref *kref) 2161da177e4SLinus Torvalds { 2171da177e4SLinus Torvalds struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref); 218a8881f5aSTrond Myklebust 21918f41296STrond Myklebust pnfs_release_ds_info(&dreq->ds_cinfo, dreq->inode); 220f11ac8dbSTrond Myklebust if (dreq->l_ctx != NULL) 221f11ac8dbSTrond Myklebust nfs_put_lock_context(dreq->l_ctx); 222a8881f5aSTrond Myklebust if (dreq->ctx != NULL) 223a8881f5aSTrond Myklebust put_nfs_open_context(dreq->ctx); 2241da177e4SLinus Torvalds kmem_cache_free(nfs_direct_cachep, dreq); 2251da177e4SLinus Torvalds } 2261da177e4SLinus Torvalds 227b4946ffbSTrond Myklebust static void nfs_direct_req_release(struct nfs_direct_req *dreq) 228b4946ffbSTrond Myklebust { 229b4946ffbSTrond Myklebust kref_put(&dreq->kref, nfs_direct_req_free); 230b4946ffbSTrond Myklebust } 231b4946ffbSTrond Myklebust 2326296556fSPeng Tao ssize_t nfs_dreq_bytes_left(struct nfs_direct_req *dreq) 2336296556fSPeng Tao { 2346296556fSPeng Tao return dreq->bytes_left; 2356296556fSPeng Tao } 2366296556fSPeng Tao EXPORT_SYMBOL_GPL(nfs_dreq_bytes_left); 2376296556fSPeng Tao 238d4cc948bSChuck Lever /* 239bc0fb201SChuck Lever * Collects and returns the final error value/byte-count. 240bc0fb201SChuck Lever */ 241bc0fb201SChuck Lever static ssize_t nfs_direct_wait(struct nfs_direct_req *dreq) 242bc0fb201SChuck Lever { 24315ce4a0cSChuck Lever ssize_t result = -EIOCBQUEUED; 244bc0fb201SChuck Lever 245bc0fb201SChuck Lever /* Async requests don't wait here */ 246bc0fb201SChuck Lever if (dreq->iocb) 247bc0fb201SChuck Lever goto out; 248bc0fb201SChuck Lever 249150030b7SMatthew Wilcox result = wait_for_completion_killable(&dreq->completion); 250bc0fb201SChuck Lever 251d2a7de0bSTrond Myklebust if (!result) { 252d2a7de0bSTrond Myklebust result = dreq->count; 253d2a7de0bSTrond Myklebust WARN_ON_ONCE(dreq->count < 0); 254d2a7de0bSTrond Myklebust } 255bc0fb201SChuck Lever if (!result) 25615ce4a0cSChuck Lever result = dreq->error; 257bc0fb201SChuck Lever 258bc0fb201SChuck Lever out: 259bc0fb201SChuck Lever return (ssize_t) result; 260bc0fb201SChuck Lever } 261bc0fb201SChuck Lever 262bc0fb201SChuck Lever /* 263607f31e8STrond Myklebust * Synchronous I/O uses a stack-allocated iocb. Thus we can't trust 264607f31e8STrond Myklebust * the iocb is still valid here if this is a synchronous request. 26563ab46abSChuck Lever */ 266f7b5c340STrond Myklebust static void nfs_direct_complete(struct nfs_direct_req *dreq) 26763ab46abSChuck Lever { 2689811cd57SChristoph Hellwig struct inode *inode = dreq->inode; 2699811cd57SChristoph Hellwig 27065caafd0SOlga Kornievskaia inode_dio_end(inode); 27165caafd0SOlga Kornievskaia 2722a009ec9SChristoph Hellwig if (dreq->iocb) { 2732a009ec9SChristoph Hellwig long res = (long) dreq->error; 274d2a7de0bSTrond Myklebust if (dreq->count != 0) { 2752a009ec9SChristoph Hellwig res = (long) dreq->count; 276d2a7de0bSTrond Myklebust WARN_ON_ONCE(dreq->count < 0); 277d2a7de0bSTrond Myklebust } 27804b2fa9fSChristoph Hellwig dreq->iocb->ki_complete(dreq->iocb, res, 0); 279d72b7a6bSTrond Myklebust } 2802a009ec9SChristoph Hellwig 281024de8f1SDaniel Wagner complete(&dreq->completion); 28263ab46abSChuck Lever 283b4946ffbSTrond Myklebust nfs_direct_req_release(dreq); 28463ab46abSChuck Lever } 28563ab46abSChuck Lever 286584aa810SFred Isaman static void nfs_direct_read_completion(struct nfs_pgio_header *hdr) 287fdd1e74cSTrond Myklebust { 288584aa810SFred Isaman unsigned long bytes = 0; 289584aa810SFred Isaman struct nfs_direct_req *dreq = hdr->dreq; 290fdd1e74cSTrond Myklebust 29115ce4a0cSChuck Lever spin_lock(&dreq->lock); 292eb2c50daSTrond Myklebust if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) { 293eb2c50daSTrond Myklebust spin_unlock(&dreq->lock); 294eb2c50daSTrond Myklebust goto out_put; 295eb2c50daSTrond Myklebust } 296eb2c50daSTrond Myklebust 297031d73edSTrond Myklebust nfs_direct_count_bytes(dreq, hdr); 29815ce4a0cSChuck Lever spin_unlock(&dreq->lock); 2991da177e4SLinus Torvalds 300584aa810SFred Isaman while (!list_empty(&hdr->pages)) { 301584aa810SFred Isaman struct nfs_page *req = nfs_list_entry(hdr->pages.next); 302584aa810SFred Isaman struct page *page = req->wb_page; 303584aa810SFred Isaman 304ad3cba22SDave Kleikamp if (!PageCompound(page) && bytes < hdr->good_bytes && 305ad3cba22SDave Kleikamp (dreq->flags == NFS_ODIRECT_SHOULD_DIRTY)) 3064bd8b010STrond Myklebust set_page_dirty(page); 307584aa810SFred Isaman bytes += req->wb_bytes; 308584aa810SFred Isaman nfs_list_remove_request(req); 309beeb5338SAnna Schumaker nfs_release_request(req); 310584aa810SFred Isaman } 311584aa810SFred Isaman out_put: 312607f31e8STrond Myklebust if (put_dreq(dreq)) 313f7b5c340STrond Myklebust nfs_direct_complete(dreq); 314584aa810SFred Isaman hdr->release(hdr); 3151da177e4SLinus Torvalds } 3161da177e4SLinus Torvalds 317df3accb8STrond Myklebust static void nfs_read_sync_pgio_error(struct list_head *head, int error) 318cd841605SFred Isaman { 319584aa810SFred Isaman struct nfs_page *req; 320cd841605SFred Isaman 321584aa810SFred Isaman while (!list_empty(head)) { 322584aa810SFred Isaman req = nfs_list_entry(head->next); 323584aa810SFred Isaman nfs_list_remove_request(req); 324584aa810SFred Isaman nfs_release_request(req); 325cd841605SFred Isaman } 326584aa810SFred Isaman } 327584aa810SFred Isaman 328584aa810SFred Isaman static void nfs_direct_pgio_init(struct nfs_pgio_header *hdr) 329584aa810SFred Isaman { 330584aa810SFred Isaman get_dreq(hdr->dreq); 331584aa810SFred Isaman } 332584aa810SFred Isaman 333584aa810SFred Isaman static const struct nfs_pgio_completion_ops nfs_direct_read_completion_ops = { 3343e9e0ca3STrond Myklebust .error_cleanup = nfs_read_sync_pgio_error, 335584aa810SFred Isaman .init_hdr = nfs_direct_pgio_init, 336584aa810SFred Isaman .completion = nfs_direct_read_completion, 337584aa810SFred Isaman }; 338cd841605SFred Isaman 339d4cc948bSChuck Lever /* 340607f31e8STrond Myklebust * For each rsize'd chunk of the user's buffer, dispatch an NFS READ 341607f31e8STrond Myklebust * operation. If nfs_readdata_alloc() or get_user_pages() fails, 342607f31e8STrond Myklebust * bail and stop sending more reads. Read length accounting is 343607f31e8STrond Myklebust * handled automatically by nfs_direct_read_result(). Otherwise, if 344607f31e8STrond Myklebust * no requests have been sent, just return an error. 3451da177e4SLinus Torvalds */ 34691f79c43SAl Viro 34791f79c43SAl Viro static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq, 34891f79c43SAl Viro struct iov_iter *iter, 34991f79c43SAl Viro loff_t pos) 3501da177e4SLinus Torvalds { 35191f79c43SAl Viro struct nfs_pageio_descriptor desc; 35291f79c43SAl Viro struct inode *inode = dreq->inode; 35391f79c43SAl Viro ssize_t result = -EINVAL; 35491f79c43SAl Viro size_t requested_bytes = 0; 35591f79c43SAl Viro size_t rsize = max_t(size_t, NFS_SERVER(inode)->rsize, PAGE_SIZE); 35682b145c5SChuck Lever 35716b90578SLinus Torvalds nfs_pageio_init_read(&desc, dreq->inode, false, 35891f79c43SAl Viro &nfs_direct_read_completion_ops); 35991f79c43SAl Viro get_dreq(dreq); 36091f79c43SAl Viro desc.pg_dreq = dreq; 361fe0f07d0SJens Axboe inode_dio_begin(inode); 36291f79c43SAl Viro 36391f79c43SAl Viro while (iov_iter_count(iter)) { 36491f79c43SAl Viro struct page **pagevec; 3655dd602f2SChuck Lever size_t bytes; 36691f79c43SAl Viro size_t pgbase; 36791f79c43SAl Viro unsigned npages, i; 3681da177e4SLinus Torvalds 36991f79c43SAl Viro result = iov_iter_get_pages_alloc(iter, &pagevec, 37091f79c43SAl Viro rsize, &pgbase); 371584aa810SFred Isaman if (result < 0) 372749e146eSChuck Lever break; 373a564b8f0SMel Gorman 37491f79c43SAl Viro bytes = result; 37591f79c43SAl Viro iov_iter_advance(iter, bytes); 37691f79c43SAl Viro npages = (result + pgbase + PAGE_SIZE - 1) / PAGE_SIZE; 377584aa810SFred Isaman for (i = 0; i < npages; i++) { 378584aa810SFred Isaman struct nfs_page *req; 379bf5fc402STrond Myklebust unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase); 380584aa810SFred Isaman /* XXX do we need to do the eof zeroing found in async_filler? */ 38128b1d3f5STrond Myklebust req = nfs_create_request(dreq->ctx, pagevec[i], 382584aa810SFred Isaman pgbase, req_len); 383584aa810SFred Isaman if (IS_ERR(req)) { 384584aa810SFred Isaman result = PTR_ERR(req); 385dbae4c73STrond Myklebust break; 386584aa810SFred Isaman } 387584aa810SFred Isaman req->wb_index = pos >> PAGE_SHIFT; 388584aa810SFred Isaman req->wb_offset = pos & ~PAGE_MASK; 38991f79c43SAl Viro if (!nfs_pageio_add_request(&desc, req)) { 39091f79c43SAl Viro result = desc.pg_error; 391584aa810SFred Isaman nfs_release_request(req); 392584aa810SFred Isaman break; 393584aa810SFred Isaman } 394584aa810SFred Isaman pgbase = 0; 395584aa810SFred Isaman bytes -= req_len; 39691f79c43SAl Viro requested_bytes += req_len; 397584aa810SFred Isaman pos += req_len; 39835754bc0SPeng Tao dreq->bytes_left -= req_len; 399584aa810SFred Isaman } 4006d74743bSTrond Myklebust nfs_direct_release_pages(pagevec, npages); 40191f79c43SAl Viro kvfree(pagevec); 40219f73787SChuck Lever if (result < 0) 40319f73787SChuck Lever break; 40419f73787SChuck Lever } 40519f73787SChuck Lever 406584aa810SFred Isaman nfs_pageio_complete(&desc); 407584aa810SFred Isaman 408839f7ad6SChuck Lever /* 409839f7ad6SChuck Lever * If no bytes were started, return the error, and let the 410839f7ad6SChuck Lever * generic layer handle the completion. 411839f7ad6SChuck Lever */ 412839f7ad6SChuck Lever if (requested_bytes == 0) { 413d03727b2SOlga Kornievskaia inode_dio_end(inode); 41465caafd0SOlga Kornievskaia nfs_direct_req_release(dreq); 415839f7ad6SChuck Lever return result < 0 ? result : -EIO; 416839f7ad6SChuck Lever } 417839f7ad6SChuck Lever 41819f73787SChuck Lever if (put_dreq(dreq)) 419f7b5c340STrond Myklebust nfs_direct_complete(dreq); 42085128b2bSAl Viro return requested_bytes; 42119f73787SChuck Lever } 42219f73787SChuck Lever 42314a3ec79SChristoph Hellwig /** 42414a3ec79SChristoph Hellwig * nfs_file_direct_read - file direct read operation for NFS files 42514a3ec79SChristoph Hellwig * @iocb: target I/O control block 426619d30b4SAl Viro * @iter: vector of user buffers into which to read data 42714a3ec79SChristoph Hellwig * 42814a3ec79SChristoph Hellwig * We use this function for direct reads instead of calling 42914a3ec79SChristoph Hellwig * generic_file_aio_read() in order to avoid gfar's check to see if 43014a3ec79SChristoph Hellwig * the request starts before the end of the file. For that check 43114a3ec79SChristoph Hellwig * to work, we must generate a GETATTR before each direct read, and 43214a3ec79SChristoph Hellwig * even then there is a window between the GETATTR and the subsequent 43314a3ec79SChristoph Hellwig * READ where the file size could change. Our preference is simply 43414a3ec79SChristoph Hellwig * to do all reads the application wants, and the server will take 43514a3ec79SChristoph Hellwig * care of managing the end of file boundary. 43614a3ec79SChristoph Hellwig * 43714a3ec79SChristoph Hellwig * This function also eliminates unnecessarily updating the file's 43814a3ec79SChristoph Hellwig * atime locally, as the NFS server sets the file's atime, and this 43914a3ec79SChristoph Hellwig * client must read the updated atime from the server back into its 44014a3ec79SChristoph Hellwig * cache. 44114a3ec79SChristoph Hellwig */ 442c8b8e32dSChristoph Hellwig ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter) 4431da177e4SLinus Torvalds { 44414a3ec79SChristoph Hellwig struct file *file = iocb->ki_filp; 44514a3ec79SChristoph Hellwig struct address_space *mapping = file->f_mapping; 44614a3ec79SChristoph Hellwig struct inode *inode = mapping->host; 4471da177e4SLinus Torvalds struct nfs_direct_req *dreq; 448b3c54de6STrond Myklebust struct nfs_lock_context *l_ctx; 44986b93667SColin Ian King ssize_t result, requested; 450a6cbcd4aSAl Viro size_t count = iov_iter_count(iter); 45114a3ec79SChristoph Hellwig nfs_add_stats(mapping->host, NFSIOS_DIRECTREADBYTES, count); 45214a3ec79SChristoph Hellwig 45314a3ec79SChristoph Hellwig dfprintk(FILE, "NFS: direct read(%pD2, %zd@%Ld)\n", 454c8b8e32dSChristoph Hellwig file, count, (long long) iocb->ki_pos); 45514a3ec79SChristoph Hellwig 45614a3ec79SChristoph Hellwig result = 0; 45714a3ec79SChristoph Hellwig if (!count) 45814a3ec79SChristoph Hellwig goto out; 45914a3ec79SChristoph Hellwig 46014a3ec79SChristoph Hellwig task_io_account_read(count); 46114a3ec79SChristoph Hellwig 46214a3ec79SChristoph Hellwig result = -ENOMEM; 463607f31e8STrond Myklebust dreq = nfs_direct_req_alloc(); 464f11ac8dbSTrond Myklebust if (dreq == NULL) 465a5864c99STrond Myklebust goto out; 4661da177e4SLinus Torvalds 46791d5b470SChuck Lever dreq->inode = inode; 468ed3743a6SWeston Andros Adamson dreq->bytes_left = dreq->max_count = count; 469c8b8e32dSChristoph Hellwig dreq->io_start = iocb->ki_pos; 470cd3758e3STrond Myklebust dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp)); 471b3c54de6STrond Myklebust l_ctx = nfs_get_lock_context(dreq->ctx); 472b3c54de6STrond Myklebust if (IS_ERR(l_ctx)) { 473b3c54de6STrond Myklebust result = PTR_ERR(l_ctx); 4748605cf0eSMisono Tomohiro nfs_direct_req_release(dreq); 475f11ac8dbSTrond Myklebust goto out_release; 476b3c54de6STrond Myklebust } 477b3c54de6STrond Myklebust dreq->l_ctx = l_ctx; 478487b8372SChuck Lever if (!is_sync_kiocb(iocb)) 479487b8372SChuck Lever dreq->iocb = iocb; 4801da177e4SLinus Torvalds 481ad3cba22SDave Kleikamp if (iter_is_iovec(iter)) 482ad3cba22SDave Kleikamp dreq->flags = NFS_ODIRECT_SHOULD_DIRTY; 483ad3cba22SDave Kleikamp 484a5864c99STrond Myklebust nfs_start_io_direct(inode); 485a5864c99STrond Myklebust 486619d30b4SAl Viro NFS_I(inode)->read_io += count; 48785128b2bSAl Viro requested = nfs_direct_read_schedule_iovec(dreq, iter, iocb->ki_pos); 488d0b9875dSChristoph Hellwig 489a5864c99STrond Myklebust nfs_end_io_direct(inode); 490d0b9875dSChristoph Hellwig 49185128b2bSAl Viro if (requested > 0) { 492bc0fb201SChuck Lever result = nfs_direct_wait(dreq); 49385128b2bSAl Viro if (result > 0) { 49485128b2bSAl Viro requested -= result; 495c8b8e32dSChristoph Hellwig iocb->ki_pos += result; 49614a3ec79SChristoph Hellwig } 49785128b2bSAl Viro iov_iter_revert(iter, requested); 49885128b2bSAl Viro } else { 49985128b2bSAl Viro result = requested; 50085128b2bSAl Viro } 501d0b9875dSChristoph Hellwig 502f11ac8dbSTrond Myklebust out_release: 503b4946ffbSTrond Myklebust nfs_direct_req_release(dreq); 504f11ac8dbSTrond Myklebust out: 5051da177e4SLinus Torvalds return result; 5061da177e4SLinus Torvalds } 5071da177e4SLinus Torvalds 508085d1e33STom Haynes static void 509ed5d588fSTrond Myklebust nfs_direct_join_group(struct list_head *list, struct inode *inode) 510ed5d588fSTrond Myklebust { 511ed5d588fSTrond Myklebust struct nfs_page *req, *next; 512ed5d588fSTrond Myklebust 513ed5d588fSTrond Myklebust list_for_each_entry(req, list, wb_list) { 514ed5d588fSTrond Myklebust if (req->wb_head != req || req->wb_this_page == req) 515ed5d588fSTrond Myklebust continue; 516ed5d588fSTrond Myklebust for (next = req->wb_this_page; 517ed5d588fSTrond Myklebust next != req->wb_head; 518ed5d588fSTrond Myklebust next = next->wb_this_page) { 519ed5d588fSTrond Myklebust nfs_list_remove_request(next); 520ed5d588fSTrond Myklebust nfs_release_request(next); 521ed5d588fSTrond Myklebust } 522ed5d588fSTrond Myklebust nfs_join_page_group(req, inode); 523ed5d588fSTrond Myklebust } 524ed5d588fSTrond Myklebust } 525ed5d588fSTrond Myklebust 526ed5d588fSTrond Myklebust static void 527085d1e33STom Haynes nfs_direct_write_scan_commit_list(struct inode *inode, 528085d1e33STom Haynes struct list_head *list, 529085d1e33STom Haynes struct nfs_commit_info *cinfo) 530085d1e33STom Haynes { 531e824f99aSTrond Myklebust mutex_lock(&NFS_I(cinfo->inode)->commit_mutex); 5329c455a8cSTrond Myklebust pnfs_recover_commit_reqs(list, cinfo); 533085d1e33STom Haynes nfs_scan_commit_list(&cinfo->mds->list, list, cinfo, 0); 534e824f99aSTrond Myklebust mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex); 535085d1e33STom Haynes } 536085d1e33STom Haynes 537fad61490STrond Myklebust static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq) 5381da177e4SLinus Torvalds { 5391763da12SFred Isaman struct nfs_pageio_descriptor desc; 5401763da12SFred Isaman struct nfs_page *req, *tmp; 5411763da12SFred Isaman LIST_HEAD(reqs); 5421763da12SFred Isaman struct nfs_commit_info cinfo; 5431763da12SFred Isaman LIST_HEAD(failed); 5441763da12SFred Isaman 5451763da12SFred Isaman nfs_init_cinfo_from_dreq(&cinfo, dreq); 546085d1e33STom Haynes nfs_direct_write_scan_commit_list(dreq->inode, &reqs, &cinfo); 5471da177e4SLinus Torvalds 548ed5d588fSTrond Myklebust nfs_direct_join_group(&reqs, dreq->inode); 549ed5d588fSTrond Myklebust 550fad61490STrond Myklebust dreq->count = 0; 551031d73edSTrond Myklebust dreq->max_count = 0; 552031d73edSTrond Myklebust list_for_each_entry(req, &reqs, wb_list) 553031d73edSTrond Myklebust dreq->max_count += req->wb_bytes; 554a5314a74STrond Myklebust nfs_clear_pnfs_ds_commit_verifiers(&dreq->ds_cinfo); 555607f31e8STrond Myklebust get_dreq(dreq); 5561da177e4SLinus Torvalds 557a20c93e3SChristoph Hellwig nfs_pageio_init_write(&desc, dreq->inode, FLUSH_STABLE, false, 5581763da12SFred Isaman &nfs_direct_write_completion_ops); 5591763da12SFred Isaman desc.pg_dreq = dreq; 560607f31e8STrond Myklebust 5611763da12SFred Isaman list_for_each_entry_safe(req, tmp, &reqs, wb_list) { 56233344e0fSTrond Myklebust /* Bump the transmission count */ 56333344e0fSTrond Myklebust req->wb_nio++; 5641763da12SFred Isaman if (!nfs_pageio_add_request(&desc, req)) { 565078b5fd9STrond Myklebust nfs_list_move_request(req, &failed); 566fe238e60SDave Wysochanski spin_lock(&cinfo.inode->i_lock); 5671763da12SFred Isaman dreq->flags = 0; 568d600ad1fSPeng Tao if (desc.pg_error < 0) 569d600ad1fSPeng Tao dreq->error = desc.pg_error; 570d600ad1fSPeng Tao else 5711763da12SFred Isaman dreq->error = -EIO; 572fe238e60SDave Wysochanski spin_unlock(&cinfo.inode->i_lock); 5731763da12SFred Isaman } 5745a695da2STrond Myklebust nfs_release_request(req); 5751763da12SFred Isaman } 5761763da12SFred Isaman nfs_pageio_complete(&desc); 577607f31e8STrond Myklebust 5784035c248STrond Myklebust while (!list_empty(&failed)) { 5794035c248STrond Myklebust req = nfs_list_entry(failed.next); 5804035c248STrond Myklebust nfs_list_remove_request(req); 5811d1afcbcSTrond Myklebust nfs_unlock_and_release_request(req); 5824035c248STrond Myklebust } 583607f31e8STrond Myklebust 584607f31e8STrond Myklebust if (put_dreq(dreq)) 5854d3b55d3SAnna Schumaker nfs_direct_write_complete(dreq); 586fad61490STrond Myklebust } 5871da177e4SLinus Torvalds 5881763da12SFred Isaman static void nfs_direct_commit_complete(struct nfs_commit_data *data) 589fad61490STrond Myklebust { 5901f28476dSTrond Myklebust const struct nfs_writeverf *verf = data->res.verf; 5910b7c0153SFred Isaman struct nfs_direct_req *dreq = data->dreq; 5921763da12SFred Isaman struct nfs_commit_info cinfo; 5931763da12SFred Isaman struct nfs_page *req; 594c9d8f89dSTrond Myklebust int status = data->task.tk_status; 595c9d8f89dSTrond Myklebust 596fb5f7f20STrond Myklebust if (status < 0) { 597fb5f7f20STrond Myklebust /* Errors in commit are fatal */ 598fb5f7f20STrond Myklebust dreq->error = status; 599fb5f7f20STrond Myklebust dreq->max_count = 0; 600fb5f7f20STrond Myklebust dreq->count = 0; 601fb5f7f20STrond Myklebust dreq->flags = NFS_ODIRECT_DONE; 602fb5f7f20STrond Myklebust } else if (dreq->flags == NFS_ODIRECT_DONE) 603fb5f7f20STrond Myklebust status = dreq->error; 604fb5f7f20STrond Myklebust 6051763da12SFred Isaman nfs_init_cinfo_from_dreq(&cinfo, dreq); 606fad61490STrond Myklebust 6071763da12SFred Isaman while (!list_empty(&data->pages)) { 6081763da12SFred Isaman req = nfs_list_entry(data->pages.next); 6091763da12SFred Isaman nfs_list_remove_request(req); 6101f28476dSTrond Myklebust if (status >= 0 && !nfs_write_match_verf(verf, req)) { 6111f28476dSTrond Myklebust dreq->flags = NFS_ODIRECT_RESCHED_WRITES; 61233344e0fSTrond Myklebust /* 61333344e0fSTrond Myklebust * Despite the reboot, the write was successful, 61433344e0fSTrond Myklebust * so reset wb_nio. 61533344e0fSTrond Myklebust */ 61633344e0fSTrond Myklebust req->wb_nio = 0; 617b57ff130SWeston Andros Adamson nfs_mark_request_commit(req, NULL, &cinfo, 0); 6181f28476dSTrond Myklebust } else /* Error or match */ 619906369e4SFred Isaman nfs_release_request(req); 6201d1afcbcSTrond Myklebust nfs_unlock_and_release_request(req); 621fad61490STrond Myklebust } 622fad61490STrond Myklebust 623*133a48abSTrond Myklebust if (nfs_commit_end(cinfo.mds)) 6244d3b55d3SAnna Schumaker nfs_direct_write_complete(dreq); 6251763da12SFred Isaman } 6261763da12SFred Isaman 627b20135d0STrond Myklebust static void nfs_direct_resched_write(struct nfs_commit_info *cinfo, 628b20135d0STrond Myklebust struct nfs_page *req) 6291763da12SFred Isaman { 630b20135d0STrond Myklebust struct nfs_direct_req *dreq = cinfo->dreq; 631b20135d0STrond Myklebust 632b20135d0STrond Myklebust spin_lock(&dreq->lock); 633fb5f7f20STrond Myklebust if (dreq->flags != NFS_ODIRECT_DONE) 634b20135d0STrond Myklebust dreq->flags = NFS_ODIRECT_RESCHED_WRITES; 635b20135d0STrond Myklebust spin_unlock(&dreq->lock); 636b20135d0STrond Myklebust nfs_mark_request_commit(req, NULL, cinfo, 0); 6371763da12SFred Isaman } 6381763da12SFred Isaman 6391763da12SFred Isaman static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops = { 6401763da12SFred Isaman .completion = nfs_direct_commit_complete, 641b20135d0STrond Myklebust .resched_write = nfs_direct_resched_write, 642fad61490STrond Myklebust }; 643fad61490STrond Myklebust 644fad61490STrond Myklebust static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq) 645fad61490STrond Myklebust { 6461763da12SFred Isaman int res; 6471763da12SFred Isaman struct nfs_commit_info cinfo; 6481763da12SFred Isaman LIST_HEAD(mds_list); 649fad61490STrond Myklebust 6501763da12SFred Isaman nfs_init_cinfo_from_dreq(&cinfo, dreq); 6511763da12SFred Isaman nfs_scan_commit(dreq->inode, &mds_list, &cinfo); 6521763da12SFred Isaman res = nfs_generic_commit_list(dreq->inode, &mds_list, 0, &cinfo); 6531763da12SFred Isaman if (res < 0) /* res == -ENOMEM */ 6541763da12SFred Isaman nfs_direct_write_reschedule(dreq); 6551da177e4SLinus Torvalds } 6561da177e4SLinus Torvalds 657fb5f7f20STrond Myklebust static void nfs_direct_write_clear_reqs(struct nfs_direct_req *dreq) 658fb5f7f20STrond Myklebust { 659fb5f7f20STrond Myklebust struct nfs_commit_info cinfo; 660fb5f7f20STrond Myklebust struct nfs_page *req; 661fb5f7f20STrond Myklebust LIST_HEAD(reqs); 662fb5f7f20STrond Myklebust 663fb5f7f20STrond Myklebust nfs_init_cinfo_from_dreq(&cinfo, dreq); 664fb5f7f20STrond Myklebust nfs_direct_write_scan_commit_list(dreq->inode, &reqs, &cinfo); 665fb5f7f20STrond Myklebust 666fb5f7f20STrond Myklebust while (!list_empty(&reqs)) { 667fb5f7f20STrond Myklebust req = nfs_list_entry(reqs.next); 668fb5f7f20STrond Myklebust nfs_list_remove_request(req); 669f02cec9dSTrond Myklebust nfs_release_request(req); 670fb5f7f20STrond Myklebust nfs_unlock_and_release_request(req); 671fb5f7f20STrond Myklebust } 672fb5f7f20STrond Myklebust } 673fb5f7f20STrond Myklebust 6741763da12SFred Isaman static void nfs_direct_write_schedule_work(struct work_struct *work) 6751da177e4SLinus Torvalds { 6761763da12SFred Isaman struct nfs_direct_req *dreq = container_of(work, struct nfs_direct_req, work); 677fad61490STrond Myklebust int flags = dreq->flags; 6781da177e4SLinus Torvalds 679fad61490STrond Myklebust dreq->flags = 0; 680fad61490STrond Myklebust switch (flags) { 681fad61490STrond Myklebust case NFS_ODIRECT_DO_COMMIT: 682fad61490STrond Myklebust nfs_direct_commit_schedule(dreq); 6831da177e4SLinus Torvalds break; 684fad61490STrond Myklebust case NFS_ODIRECT_RESCHED_WRITES: 685fad61490STrond Myklebust nfs_direct_write_reschedule(dreq); 6861da177e4SLinus Torvalds break; 6871da177e4SLinus Torvalds default: 688fb5f7f20STrond Myklebust nfs_direct_write_clear_reqs(dreq); 689f7b5c340STrond Myklebust nfs_zap_mapping(dreq->inode, dreq->inode->i_mapping); 690f7b5c340STrond Myklebust nfs_direct_complete(dreq); 6911da177e4SLinus Torvalds } 692fad61490STrond Myklebust } 693fad61490STrond Myklebust 6944d3b55d3SAnna Schumaker static void nfs_direct_write_complete(struct nfs_direct_req *dreq) 695fad61490STrond Myklebust { 69646483c2eSNeilBrown queue_work(nfsiod_workqueue, &dreq->work); /* Calls nfs_direct_write_schedule_work */ 697fad61490STrond Myklebust } 6981763da12SFred Isaman 6991763da12SFred Isaman static void nfs_direct_write_completion(struct nfs_pgio_header *hdr) 7001763da12SFred Isaman { 7011763da12SFred Isaman struct nfs_direct_req *dreq = hdr->dreq; 7021763da12SFred Isaman struct nfs_commit_info cinfo; 7031763da12SFred Isaman struct nfs_page *req = nfs_list_entry(hdr->pages.next); 7043731d44bSTrond Myklebust int flags = NFS_ODIRECT_DONE; 7051763da12SFred Isaman 7061763da12SFred Isaman nfs_init_cinfo_from_dreq(&cinfo, dreq); 7071763da12SFred Isaman 7081763da12SFred Isaman spin_lock(&dreq->lock); 709eb2c50daSTrond Myklebust if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) { 710eb2c50daSTrond Myklebust spin_unlock(&dreq->lock); 711eb2c50daSTrond Myklebust goto out_put; 712eb2c50daSTrond Myklebust } 713eb2c50daSTrond Myklebust 714031d73edSTrond Myklebust nfs_direct_count_bytes(dreq, hdr); 7151f28476dSTrond Myklebust if (hdr->good_bytes != 0 && nfs_write_need_commit(hdr)) { 7163731d44bSTrond Myklebust if (!dreq->flags) 7171763da12SFred Isaman dreq->flags = NFS_ODIRECT_DO_COMMIT; 7183731d44bSTrond Myklebust flags = dreq->flags; 7191763da12SFred Isaman } 7201763da12SFred Isaman spin_unlock(&dreq->lock); 7211763da12SFred Isaman 7221763da12SFred Isaman while (!list_empty(&hdr->pages)) { 7232bfc6e56SWeston Andros Adamson 7241763da12SFred Isaman req = nfs_list_entry(hdr->pages.next); 7251763da12SFred Isaman nfs_list_remove_request(req); 7263731d44bSTrond Myklebust if (flags == NFS_ODIRECT_DO_COMMIT) { 72704277086STrond Myklebust kref_get(&req->wb_kref); 728ba838a75SChuck Lever memcpy(&req->wb_verf, &hdr->verf.verifier, 729ba838a75SChuck Lever sizeof(req->wb_verf)); 730b57ff130SWeston Andros Adamson nfs_mark_request_commit(req, hdr->lseg, &cinfo, 731b57ff130SWeston Andros Adamson hdr->ds_commit_idx); 7323731d44bSTrond Myklebust } else if (flags == NFS_ODIRECT_RESCHED_WRITES) { 7333731d44bSTrond Myklebust kref_get(&req->wb_kref); 7343731d44bSTrond Myklebust nfs_mark_request_commit(req, NULL, &cinfo, 0); 7351763da12SFred Isaman } 7361d1afcbcSTrond Myklebust nfs_unlock_and_release_request(req); 7371763da12SFred Isaman } 7381763da12SFred Isaman 7391763da12SFred Isaman out_put: 7401763da12SFred Isaman if (put_dreq(dreq)) 7414d3b55d3SAnna Schumaker nfs_direct_write_complete(dreq); 7421763da12SFred Isaman hdr->release(hdr); 7431763da12SFred Isaman } 7441763da12SFred Isaman 745df3accb8STrond Myklebust static void nfs_write_sync_pgio_error(struct list_head *head, int error) 7463e9e0ca3STrond Myklebust { 7473e9e0ca3STrond Myklebust struct nfs_page *req; 7483e9e0ca3STrond Myklebust 7493e9e0ca3STrond Myklebust while (!list_empty(head)) { 7503e9e0ca3STrond Myklebust req = nfs_list_entry(head->next); 7513e9e0ca3STrond Myklebust nfs_list_remove_request(req); 7521d1afcbcSTrond Myklebust nfs_unlock_and_release_request(req); 7533e9e0ca3STrond Myklebust } 7543e9e0ca3STrond Myklebust } 7553e9e0ca3STrond Myklebust 756dc602dd7STrond Myklebust static void nfs_direct_write_reschedule_io(struct nfs_pgio_header *hdr) 757dc602dd7STrond Myklebust { 758dc602dd7STrond Myklebust struct nfs_direct_req *dreq = hdr->dreq; 759dc602dd7STrond Myklebust 760dc602dd7STrond Myklebust spin_lock(&dreq->lock); 761dc602dd7STrond Myklebust if (dreq->error == 0) { 762dc602dd7STrond Myklebust dreq->flags = NFS_ODIRECT_RESCHED_WRITES; 763dc602dd7STrond Myklebust /* fake unstable write to let common nfs resend pages */ 764dc602dd7STrond Myklebust hdr->verf.committed = NFS_UNSTABLE; 7654daaeba9STrond Myklebust hdr->good_bytes = hdr->args.offset + hdr->args.count - 7664daaeba9STrond Myklebust hdr->io_start; 767dc602dd7STrond Myklebust } 768dc602dd7STrond Myklebust spin_unlock(&dreq->lock); 769dc602dd7STrond Myklebust } 770dc602dd7STrond Myklebust 7711763da12SFred Isaman static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops = { 7723e9e0ca3STrond Myklebust .error_cleanup = nfs_write_sync_pgio_error, 7731763da12SFred Isaman .init_hdr = nfs_direct_pgio_init, 7741763da12SFred Isaman .completion = nfs_direct_write_completion, 775dc602dd7STrond Myklebust .reschedule_io = nfs_direct_write_reschedule_io, 7761763da12SFred Isaman }; 7771763da12SFred Isaman 77891f79c43SAl Viro 77991f79c43SAl Viro /* 78091f79c43SAl Viro * NB: Return the value of the first error return code. Subsequent 78191f79c43SAl Viro * errors after the first one are ignored. 78291f79c43SAl Viro */ 78391f79c43SAl Viro /* 78491f79c43SAl Viro * For each wsize'd chunk of the user's buffer, dispatch an NFS WRITE 78591f79c43SAl Viro * operation. If nfs_writedata_alloc() or get_user_pages() fails, 78691f79c43SAl Viro * bail and stop sending more writes. Write length accounting is 78791f79c43SAl Viro * handled automatically by nfs_direct_write_result(). Otherwise, if 78891f79c43SAl Viro * no requests have been sent, just return an error. 78991f79c43SAl Viro */ 79019f73787SChuck Lever static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq, 791619d30b4SAl Viro struct iov_iter *iter, 79291f79c43SAl Viro loff_t pos) 79319f73787SChuck Lever { 7941763da12SFred Isaman struct nfs_pageio_descriptor desc; 7951d59d61fSTrond Myklebust struct inode *inode = dreq->inode; 79619f73787SChuck Lever ssize_t result = 0; 79719f73787SChuck Lever size_t requested_bytes = 0; 79891f79c43SAl Viro size_t wsize = max_t(size_t, NFS_SERVER(inode)->wsize, PAGE_SIZE); 79919f73787SChuck Lever 800a20c93e3SChristoph Hellwig nfs_pageio_init_write(&desc, inode, FLUSH_COND_STABLE, false, 8011763da12SFred Isaman &nfs_direct_write_completion_ops); 8021763da12SFred Isaman desc.pg_dreq = dreq; 80319f73787SChuck Lever get_dreq(dreq); 804fe0f07d0SJens Axboe inode_dio_begin(inode); 80519f73787SChuck Lever 80691f79c43SAl Viro NFS_I(inode)->write_io += iov_iter_count(iter); 80791f79c43SAl Viro while (iov_iter_count(iter)) { 80891f79c43SAl Viro struct page **pagevec; 80991f79c43SAl Viro size_t bytes; 81091f79c43SAl Viro size_t pgbase; 81191f79c43SAl Viro unsigned npages, i; 81291f79c43SAl Viro 81391f79c43SAl Viro result = iov_iter_get_pages_alloc(iter, &pagevec, 81491f79c43SAl Viro wsize, &pgbase); 81519f73787SChuck Lever if (result < 0) 81619f73787SChuck Lever break; 81791f79c43SAl Viro 81891f79c43SAl Viro bytes = result; 81991f79c43SAl Viro iov_iter_advance(iter, bytes); 82091f79c43SAl Viro npages = (result + pgbase + PAGE_SIZE - 1) / PAGE_SIZE; 82191f79c43SAl Viro for (i = 0; i < npages; i++) { 82291f79c43SAl Viro struct nfs_page *req; 82391f79c43SAl Viro unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase); 82491f79c43SAl Viro 82528b1d3f5STrond Myklebust req = nfs_create_request(dreq->ctx, pagevec[i], 82691f79c43SAl Viro pgbase, req_len); 82791f79c43SAl Viro if (IS_ERR(req)) { 82891f79c43SAl Viro result = PTR_ERR(req); 82919f73787SChuck Lever break; 83091f79c43SAl Viro } 8310a00b77bSWeston Andros Adamson 832d600ad1fSPeng Tao if (desc.pg_error < 0) { 833d600ad1fSPeng Tao nfs_free_request(req); 834d600ad1fSPeng Tao result = desc.pg_error; 835d600ad1fSPeng Tao break; 836d600ad1fSPeng Tao } 8370a00b77bSWeston Andros Adamson 83891f79c43SAl Viro nfs_lock_request(req); 83991f79c43SAl Viro req->wb_index = pos >> PAGE_SHIFT; 84091f79c43SAl Viro req->wb_offset = pos & ~PAGE_MASK; 84191f79c43SAl Viro if (!nfs_pageio_add_request(&desc, req)) { 84291f79c43SAl Viro result = desc.pg_error; 84391f79c43SAl Viro nfs_unlock_and_release_request(req); 84491f79c43SAl Viro break; 84591f79c43SAl Viro } 84691f79c43SAl Viro pgbase = 0; 84791f79c43SAl Viro bytes -= req_len; 84891f79c43SAl Viro requested_bytes += req_len; 84991f79c43SAl Viro pos += req_len; 85091f79c43SAl Viro dreq->bytes_left -= req_len; 85191f79c43SAl Viro } 85291f79c43SAl Viro nfs_direct_release_pages(pagevec, npages); 85391f79c43SAl Viro kvfree(pagevec); 85491f79c43SAl Viro if (result < 0) 85591f79c43SAl Viro break; 85619f73787SChuck Lever } 8571763da12SFred Isaman nfs_pageio_complete(&desc); 85819f73787SChuck Lever 859839f7ad6SChuck Lever /* 860839f7ad6SChuck Lever * If no bytes were started, return the error, and let the 861839f7ad6SChuck Lever * generic layer handle the completion. 862839f7ad6SChuck Lever */ 863839f7ad6SChuck Lever if (requested_bytes == 0) { 864d03727b2SOlga Kornievskaia inode_dio_end(inode); 86565caafd0SOlga Kornievskaia nfs_direct_req_release(dreq); 866839f7ad6SChuck Lever return result < 0 ? result : -EIO; 867839f7ad6SChuck Lever } 868839f7ad6SChuck Lever 86919f73787SChuck Lever if (put_dreq(dreq)) 8704d3b55d3SAnna Schumaker nfs_direct_write_complete(dreq); 87185128b2bSAl Viro return requested_bytes; 87219f73787SChuck Lever } 87319f73787SChuck Lever 8741da177e4SLinus Torvalds /** 8751da177e4SLinus Torvalds * nfs_file_direct_write - file direct write operation for NFS files 8761da177e4SLinus Torvalds * @iocb: target I/O control block 877619d30b4SAl Viro * @iter: vector of user buffers from which to write data 8781da177e4SLinus Torvalds * 8791da177e4SLinus Torvalds * We use this function for direct writes instead of calling 8801da177e4SLinus Torvalds * generic_file_aio_write() in order to avoid taking the inode 8811da177e4SLinus Torvalds * semaphore and updating the i_size. The NFS server will set 8821da177e4SLinus Torvalds * the new i_size and this client must read the updated size 8831da177e4SLinus Torvalds * back into its cache. We let the server do generic write 8841da177e4SLinus Torvalds * parameter checking and report problems. 8851da177e4SLinus Torvalds * 8861da177e4SLinus Torvalds * We eliminate local atime updates, see direct read above. 8871da177e4SLinus Torvalds * 8881da177e4SLinus Torvalds * We avoid unnecessary page cache invalidations for normal cached 8891da177e4SLinus Torvalds * readers of this file. 8901da177e4SLinus Torvalds * 8911da177e4SLinus Torvalds * Note that O_APPEND is not supported for NFS direct writes, as there 8921da177e4SLinus Torvalds * is no atomic O_APPEND write facility in the NFS protocol. 8931da177e4SLinus Torvalds */ 89465a4a1caSAl Viro ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter) 8951da177e4SLinus Torvalds { 8969a74a2b8SColin Ian King ssize_t result, requested; 89789698b24STrond Myklebust size_t count; 8981da177e4SLinus Torvalds struct file *file = iocb->ki_filp; 8991da177e4SLinus Torvalds struct address_space *mapping = file->f_mapping; 90022cd1bf1SChristoph Hellwig struct inode *inode = mapping->host; 90122cd1bf1SChristoph Hellwig struct nfs_direct_req *dreq; 90222cd1bf1SChristoph Hellwig struct nfs_lock_context *l_ctx; 90365a4a1caSAl Viro loff_t pos, end; 904c216fd70SChuck Lever 9056de1472fSAl Viro dfprintk(FILE, "NFS: direct write(%pD2, %zd@%Ld)\n", 9063309dd04SAl Viro file, iov_iter_count(iter), (long long) iocb->ki_pos); 907027445c3SBadari Pulavarty 90889698b24STrond Myklebust result = generic_write_checks(iocb, iter); 90989698b24STrond Myklebust if (result <= 0) 91089698b24STrond Myklebust return result; 91189698b24STrond Myklebust count = result; 91289698b24STrond Myklebust nfs_add_stats(mapping->host, NFSIOS_DIRECTWRITTENBYTES, count); 9133309dd04SAl Viro 9143309dd04SAl Viro pos = iocb->ki_pos; 91509cbfeafSKirill A. Shutemov end = (pos + iov_iter_count(iter) - 1) >> PAGE_SHIFT; 916ce1a8e67SChuck Lever 91789698b24STrond Myklebust task_io_account_write(count); 9187ec10f26SKonstantin Khlebnikov 91922cd1bf1SChristoph Hellwig result = -ENOMEM; 92022cd1bf1SChristoph Hellwig dreq = nfs_direct_req_alloc(); 92122cd1bf1SChristoph Hellwig if (!dreq) 922a5864c99STrond Myklebust goto out; 92322cd1bf1SChristoph Hellwig 92422cd1bf1SChristoph Hellwig dreq->inode = inode; 92589698b24STrond Myklebust dreq->bytes_left = dreq->max_count = count; 9265fadeb47SPeng Tao dreq->io_start = pos; 92722cd1bf1SChristoph Hellwig dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp)); 92822cd1bf1SChristoph Hellwig l_ctx = nfs_get_lock_context(dreq->ctx); 92922cd1bf1SChristoph Hellwig if (IS_ERR(l_ctx)) { 93022cd1bf1SChristoph Hellwig result = PTR_ERR(l_ctx); 9318605cf0eSMisono Tomohiro nfs_direct_req_release(dreq); 93222cd1bf1SChristoph Hellwig goto out_release; 93322cd1bf1SChristoph Hellwig } 93422cd1bf1SChristoph Hellwig dreq->l_ctx = l_ctx; 93522cd1bf1SChristoph Hellwig if (!is_sync_kiocb(iocb)) 93622cd1bf1SChristoph Hellwig dreq->iocb = iocb; 9379c455a8cSTrond Myklebust pnfs_init_ds_commit_info_ops(&dreq->ds_cinfo, inode); 93822cd1bf1SChristoph Hellwig 939a5864c99STrond Myklebust nfs_start_io_direct(inode); 940a5864c99STrond Myklebust 94185128b2bSAl Viro requested = nfs_direct_write_schedule_iovec(dreq, iter, pos); 942a9ab5e84SChristoph Hellwig 943a9ab5e84SChristoph Hellwig if (mapping->nrpages) { 944a9ab5e84SChristoph Hellwig invalidate_inode_pages2_range(mapping, 94509cbfeafSKirill A. Shutemov pos >> PAGE_SHIFT, end); 946a9ab5e84SChristoph Hellwig } 947a9ab5e84SChristoph Hellwig 948a5864c99STrond Myklebust nfs_end_io_direct(inode); 949a9ab5e84SChristoph Hellwig 95085128b2bSAl Viro if (requested > 0) { 95122cd1bf1SChristoph Hellwig result = nfs_direct_wait(dreq); 95222cd1bf1SChristoph Hellwig if (result > 0) { 95385128b2bSAl Viro requested -= result; 95422cd1bf1SChristoph Hellwig iocb->ki_pos = pos + result; 955e2592217SChristoph Hellwig /* XXX: should check the generic_write_sync retval */ 956e2592217SChristoph Hellwig generic_write_sync(iocb, result); 9571763da12SFred Isaman } 95885128b2bSAl Viro iov_iter_revert(iter, requested); 95985128b2bSAl Viro } else { 96085128b2bSAl Viro result = requested; 96122cd1bf1SChristoph Hellwig } 96222cd1bf1SChristoph Hellwig out_release: 96322cd1bf1SChristoph Hellwig nfs_direct_req_release(dreq); 964a5864c99STrond Myklebust out: 96522cd1bf1SChristoph Hellwig return result; 9661da177e4SLinus Torvalds } 9671da177e4SLinus Torvalds 96888467055SChuck Lever /** 96988467055SChuck Lever * nfs_init_directcache - create a slab cache for nfs_direct_req structures 97088467055SChuck Lever * 97188467055SChuck Lever */ 972f7b422b1SDavid Howells int __init nfs_init_directcache(void) 9731da177e4SLinus Torvalds { 9741da177e4SLinus Torvalds nfs_direct_cachep = kmem_cache_create("nfs_direct_cache", 9751da177e4SLinus Torvalds sizeof(struct nfs_direct_req), 976fffb60f9SPaul Jackson 0, (SLAB_RECLAIM_ACCOUNT| 977fffb60f9SPaul Jackson SLAB_MEM_SPREAD), 97820c2df83SPaul Mundt NULL); 9791da177e4SLinus Torvalds if (nfs_direct_cachep == NULL) 9801da177e4SLinus Torvalds return -ENOMEM; 9811da177e4SLinus Torvalds 9821da177e4SLinus Torvalds return 0; 9831da177e4SLinus Torvalds } 9841da177e4SLinus Torvalds 98588467055SChuck Lever /** 986f7b422b1SDavid Howells * nfs_destroy_directcache - destroy the slab cache for nfs_direct_req structures 98788467055SChuck Lever * 98888467055SChuck Lever */ 989266bee88SDavid Brownell void nfs_destroy_directcache(void) 9901da177e4SLinus Torvalds { 9911a1d92c1SAlexey Dobriyan kmem_cache_destroy(nfs_direct_cachep); 9921da177e4SLinus Torvalds } 993