xref: /openbmc/linux/fs/nfs/direct.c (revision e6c81cce)
1 /*
2  * linux/fs/nfs/direct.c
3  *
4  * Copyright (C) 2003 by Chuck Lever <cel@netapp.com>
5  *
6  * High-performance uncached I/O for the Linux NFS client
7  *
8  * There are important applications whose performance or correctness
9  * depends on uncached access to file data.  Database clusters
10  * (multiple copies of the same instance running on separate hosts)
11  * implement their own cache coherency protocol that subsumes file
12  * system cache protocols.  Applications that process datasets
13  * considerably larger than the client's memory do not always benefit
14  * from a local cache.  A streaming video server, for instance, has no
15  * need to cache the contents of a file.
16  *
17  * When an application requests uncached I/O, all read and write requests
18  * are made directly to the server; data stored or fetched via these
19  * requests is not cached in the Linux page cache.  The client does not
20  * correct unaligned requests from applications.  All requested bytes are
21  * held on permanent storage before a direct write system call returns to
22  * an application.
23  *
24  * Solaris implements an uncached I/O facility called directio() that
25  * is used for backups and sequential I/O to very large files.  Solaris
26  * also supports uncaching whole NFS partitions with "-o forcedirectio,"
27  * an undocumented mount option.
28  *
29  * Designed by Jeff Kimmel, Chuck Lever, and Trond Myklebust, with
30  * help from Andrew Morton.
31  *
32  * 18 Dec 2001	Initial implementation for 2.4  --cel
33  * 08 Jul 2002	Version for 2.4.19, with bug fixes --trondmy
34  * 08 Jun 2003	Port to 2.5 APIs  --cel
35  * 31 Mar 2004	Handle direct I/O without VFS support  --cel
36  * 15 Sep 2004	Parallel async reads  --cel
37  * 04 May 2005	support O_DIRECT with aio  --cel
38  *
39  */
40 
41 #include <linux/errno.h>
42 #include <linux/sched.h>
43 #include <linux/kernel.h>
44 #include <linux/file.h>
45 #include <linux/pagemap.h>
46 #include <linux/kref.h>
47 #include <linux/slab.h>
48 #include <linux/task_io_accounting_ops.h>
49 #include <linux/module.h>
50 
51 #include <linux/nfs_fs.h>
52 #include <linux/nfs_page.h>
53 #include <linux/sunrpc/clnt.h>
54 
55 #include <asm/uaccess.h>
56 #include <linux/atomic.h>
57 
58 #include "internal.h"
59 #include "iostat.h"
60 #include "pnfs.h"
61 
62 #define NFSDBG_FACILITY		NFSDBG_VFS
63 
64 static struct kmem_cache *nfs_direct_cachep;
65 
66 /*
67  * This represents a set of asynchronous requests that we're waiting on
68  */
69 struct nfs_direct_mirror {
70 	ssize_t count;
71 };
72 
73 struct nfs_direct_req {
74 	struct kref		kref;		/* release manager */
75 
76 	/* I/O parameters */
77 	struct nfs_open_context	*ctx;		/* file open context info */
78 	struct nfs_lock_context *l_ctx;		/* Lock context info */
79 	struct kiocb *		iocb;		/* controlling i/o request */
80 	struct inode *		inode;		/* target file of i/o */
81 
82 	/* completion state */
83 	atomic_t		io_count;	/* i/os we're waiting for */
84 	spinlock_t		lock;		/* protect completion state */
85 
86 	struct nfs_direct_mirror mirrors[NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX];
87 	int			mirror_count;
88 
89 	ssize_t			count,		/* bytes actually processed */
90 				bytes_left,	/* bytes left to be sent */
91 				io_start,	/* start of IO */
92 				error;		/* any reported error */
93 	struct completion	completion;	/* wait for i/o completion */
94 
95 	/* commit state */
96 	struct nfs_mds_commit_info mds_cinfo;	/* Storage for cinfo */
97 	struct pnfs_ds_commit_info ds_cinfo;	/* Storage for cinfo */
98 	struct work_struct	work;
99 	int			flags;
100 #define NFS_ODIRECT_DO_COMMIT		(1)	/* an unstable reply was received */
101 #define NFS_ODIRECT_RESCHED_WRITES	(2)	/* write verification failed */
102 	struct nfs_writeverf	verf;		/* unstable write verifier */
103 };
104 
105 static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops;
106 static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops;
107 static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode);
108 static void nfs_direct_write_schedule_work(struct work_struct *work);
109 
110 static inline void get_dreq(struct nfs_direct_req *dreq)
111 {
112 	atomic_inc(&dreq->io_count);
113 }
114 
115 static inline int put_dreq(struct nfs_direct_req *dreq)
116 {
117 	return atomic_dec_and_test(&dreq->io_count);
118 }
119 
120 void nfs_direct_set_resched_writes(struct nfs_direct_req *dreq)
121 {
122 	dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
123 }
124 EXPORT_SYMBOL_GPL(nfs_direct_set_resched_writes);
125 
126 static void
127 nfs_direct_good_bytes(struct nfs_direct_req *dreq, struct nfs_pgio_header *hdr)
128 {
129 	int i;
130 	ssize_t count;
131 
132 	WARN_ON_ONCE(hdr->pgio_mirror_idx >= dreq->mirror_count);
133 
134 	count = dreq->mirrors[hdr->pgio_mirror_idx].count;
135 	if (count + dreq->io_start < hdr->io_start + hdr->good_bytes) {
136 		count = hdr->io_start + hdr->good_bytes - dreq->io_start;
137 		dreq->mirrors[hdr->pgio_mirror_idx].count = count;
138 	}
139 
140 	/* update the dreq->count by finding the minimum agreed count from all
141 	 * mirrors */
142 	count = dreq->mirrors[0].count;
143 
144 	for (i = 1; i < dreq->mirror_count; i++)
145 		count = min(count, dreq->mirrors[i].count);
146 
147 	dreq->count = count;
148 }
149 
150 /*
151  * nfs_direct_select_verf - select the right verifier
152  * @dreq - direct request possibly spanning multiple servers
153  * @ds_clp - nfs_client of data server or NULL if MDS / non-pnfs
154  * @commit_idx - commit bucket index for the DS
155  *
156  * returns the correct verifier to use given the role of the server
157  */
158 static struct nfs_writeverf *
159 nfs_direct_select_verf(struct nfs_direct_req *dreq,
160 		       struct nfs_client *ds_clp,
161 		       int commit_idx)
162 {
163 	struct nfs_writeverf *verfp = &dreq->verf;
164 
165 #ifdef CONFIG_NFS_V4_1
166 	if (ds_clp) {
167 		/* pNFS is in use, use the DS verf */
168 		if (commit_idx >= 0 && commit_idx < dreq->ds_cinfo.nbuckets)
169 			verfp = &dreq->ds_cinfo.buckets[commit_idx].direct_verf;
170 		else
171 			WARN_ON_ONCE(1);
172 	}
173 #endif
174 	return verfp;
175 }
176 
177 
178 /*
179  * nfs_direct_set_hdr_verf - set the write/commit verifier
180  * @dreq - direct request possibly spanning multiple servers
181  * @hdr - pageio header to validate against previously seen verfs
182  *
183  * Set the server's (MDS or DS) "seen" verifier
184  */
185 static void nfs_direct_set_hdr_verf(struct nfs_direct_req *dreq,
186 				    struct nfs_pgio_header *hdr)
187 {
188 	struct nfs_writeverf *verfp;
189 
190 	verfp = nfs_direct_select_verf(dreq, hdr->ds_clp, hdr->ds_commit_idx);
191 	WARN_ON_ONCE(verfp->committed >= 0);
192 	memcpy(verfp, &hdr->verf, sizeof(struct nfs_writeverf));
193 	WARN_ON_ONCE(verfp->committed < 0);
194 }
195 
196 /*
197  * nfs_direct_cmp_hdr_verf - compare verifier for pgio header
198  * @dreq - direct request possibly spanning multiple servers
199  * @hdr - pageio header to validate against previously seen verf
200  *
201  * set the server's "seen" verf if not initialized.
202  * returns result of comparison between @hdr->verf and the "seen"
203  * verf of the server used by @hdr (DS or MDS)
204  */
205 static int nfs_direct_set_or_cmp_hdr_verf(struct nfs_direct_req *dreq,
206 					  struct nfs_pgio_header *hdr)
207 {
208 	struct nfs_writeverf *verfp;
209 
210 	verfp = nfs_direct_select_verf(dreq, hdr->ds_clp, hdr->ds_commit_idx);
211 	if (verfp->committed < 0) {
212 		nfs_direct_set_hdr_verf(dreq, hdr);
213 		return 0;
214 	}
215 	return memcmp(verfp, &hdr->verf, sizeof(struct nfs_writeverf));
216 }
217 
218 /*
219  * nfs_direct_cmp_commit_data_verf - compare verifier for commit data
220  * @dreq - direct request possibly spanning multiple servers
221  * @data - commit data to validate against previously seen verf
222  *
223  * returns result of comparison between @data->verf and the verf of
224  * the server used by @data (DS or MDS)
225  */
226 static int nfs_direct_cmp_commit_data_verf(struct nfs_direct_req *dreq,
227 					   struct nfs_commit_data *data)
228 {
229 	struct nfs_writeverf *verfp;
230 
231 	verfp = nfs_direct_select_verf(dreq, data->ds_clp,
232 					 data->ds_commit_index);
233 
234 	/* verifier not set so always fail */
235 	if (verfp->committed < 0)
236 		return 1;
237 
238 	return memcmp(verfp, &data->verf, sizeof(struct nfs_writeverf));
239 }
240 
241 /**
242  * nfs_direct_IO - NFS address space operation for direct I/O
243  * @iocb: target I/O control block
244  * @iov: array of vectors that define I/O buffer
245  * @pos: offset in file to begin the operation
246  * @nr_segs: size of iovec array
247  *
248  * The presence of this routine in the address space ops vector means
249  * the NFS client supports direct I/O. However, for most direct IO, we
250  * shunt off direct read and write requests before the VFS gets them,
251  * so this method is only ever called for swap.
252  */
253 ssize_t nfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t pos)
254 {
255 	struct inode *inode = iocb->ki_filp->f_mapping->host;
256 
257 	/* we only support swap file calling nfs_direct_IO */
258 	if (!IS_SWAPFILE(inode))
259 		return 0;
260 
261 #ifndef CONFIG_NFS_SWAP
262 	dprintk("NFS: nfs_direct_IO (%pD) off/no(%Ld/%lu) EINVAL\n",
263 			iocb->ki_filp, (long long) pos, iter->nr_segs);
264 
265 	return -EINVAL;
266 #else
267 	VM_BUG_ON(iov_iter_count(iter) != PAGE_SIZE);
268 
269 	if (iov_iter_rw(iter) == READ)
270 		return nfs_file_direct_read(iocb, iter, pos);
271 	return nfs_file_direct_write(iocb, iter);
272 #endif /* CONFIG_NFS_SWAP */
273 }
274 
275 static void nfs_direct_release_pages(struct page **pages, unsigned int npages)
276 {
277 	unsigned int i;
278 	for (i = 0; i < npages; i++)
279 		page_cache_release(pages[i]);
280 }
281 
282 void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo,
283 			      struct nfs_direct_req *dreq)
284 {
285 	cinfo->lock = &dreq->inode->i_lock;
286 	cinfo->mds = &dreq->mds_cinfo;
287 	cinfo->ds = &dreq->ds_cinfo;
288 	cinfo->dreq = dreq;
289 	cinfo->completion_ops = &nfs_direct_commit_completion_ops;
290 }
291 
292 static inline void nfs_direct_setup_mirroring(struct nfs_direct_req *dreq,
293 					     struct nfs_pageio_descriptor *pgio,
294 					     struct nfs_page *req)
295 {
296 	int mirror_count = 1;
297 
298 	if (pgio->pg_ops->pg_get_mirror_count)
299 		mirror_count = pgio->pg_ops->pg_get_mirror_count(pgio, req);
300 
301 	dreq->mirror_count = mirror_count;
302 }
303 
304 static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
305 {
306 	struct nfs_direct_req *dreq;
307 
308 	dreq = kmem_cache_zalloc(nfs_direct_cachep, GFP_KERNEL);
309 	if (!dreq)
310 		return NULL;
311 
312 	kref_init(&dreq->kref);
313 	kref_get(&dreq->kref);
314 	init_completion(&dreq->completion);
315 	INIT_LIST_HEAD(&dreq->mds_cinfo.list);
316 	dreq->verf.committed = NFS_INVALID_STABLE_HOW;	/* not set yet */
317 	INIT_WORK(&dreq->work, nfs_direct_write_schedule_work);
318 	dreq->mirror_count = 1;
319 	spin_lock_init(&dreq->lock);
320 
321 	return dreq;
322 }
323 
324 static void nfs_direct_req_free(struct kref *kref)
325 {
326 	struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref);
327 
328 	nfs_free_pnfs_ds_cinfo(&dreq->ds_cinfo);
329 	if (dreq->l_ctx != NULL)
330 		nfs_put_lock_context(dreq->l_ctx);
331 	if (dreq->ctx != NULL)
332 		put_nfs_open_context(dreq->ctx);
333 	kmem_cache_free(nfs_direct_cachep, dreq);
334 }
335 
336 static void nfs_direct_req_release(struct nfs_direct_req *dreq)
337 {
338 	kref_put(&dreq->kref, nfs_direct_req_free);
339 }
340 
341 ssize_t nfs_dreq_bytes_left(struct nfs_direct_req *dreq)
342 {
343 	return dreq->bytes_left;
344 }
345 EXPORT_SYMBOL_GPL(nfs_dreq_bytes_left);
346 
347 /*
348  * Collects and returns the final error value/byte-count.
349  */
350 static ssize_t nfs_direct_wait(struct nfs_direct_req *dreq)
351 {
352 	ssize_t result = -EIOCBQUEUED;
353 
354 	/* Async requests don't wait here */
355 	if (dreq->iocb)
356 		goto out;
357 
358 	result = wait_for_completion_killable(&dreq->completion);
359 
360 	if (!result)
361 		result = dreq->error;
362 	if (!result)
363 		result = dreq->count;
364 
365 out:
366 	return (ssize_t) result;
367 }
368 
369 /*
370  * Synchronous I/O uses a stack-allocated iocb.  Thus we can't trust
371  * the iocb is still valid here if this is a synchronous request.
372  */
373 static void nfs_direct_complete(struct nfs_direct_req *dreq, bool write)
374 {
375 	struct inode *inode = dreq->inode;
376 
377 	if (dreq->iocb && write) {
378 		loff_t pos = dreq->iocb->ki_pos + dreq->count;
379 
380 		spin_lock(&inode->i_lock);
381 		if (i_size_read(inode) < pos)
382 			i_size_write(inode, pos);
383 		spin_unlock(&inode->i_lock);
384 	}
385 
386 	if (write)
387 		nfs_zap_mapping(inode, inode->i_mapping);
388 
389 	inode_dio_done(inode);
390 
391 	if (dreq->iocb) {
392 		long res = (long) dreq->error;
393 		if (!res)
394 			res = (long) dreq->count;
395 		dreq->iocb->ki_complete(dreq->iocb, res, 0);
396 	}
397 
398 	complete_all(&dreq->completion);
399 
400 	nfs_direct_req_release(dreq);
401 }
402 
403 static void nfs_direct_readpage_release(struct nfs_page *req)
404 {
405 	dprintk("NFS: direct read done (%s/%llu %d@%lld)\n",
406 		req->wb_context->dentry->d_inode->i_sb->s_id,
407 		(unsigned long long)NFS_FILEID(req->wb_context->dentry->d_inode),
408 		req->wb_bytes,
409 		(long long)req_offset(req));
410 	nfs_release_request(req);
411 }
412 
413 static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
414 {
415 	unsigned long bytes = 0;
416 	struct nfs_direct_req *dreq = hdr->dreq;
417 
418 	if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
419 		goto out_put;
420 
421 	spin_lock(&dreq->lock);
422 	if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && (hdr->good_bytes == 0))
423 		dreq->error = hdr->error;
424 	else
425 		nfs_direct_good_bytes(dreq, hdr);
426 
427 	spin_unlock(&dreq->lock);
428 
429 	while (!list_empty(&hdr->pages)) {
430 		struct nfs_page *req = nfs_list_entry(hdr->pages.next);
431 		struct page *page = req->wb_page;
432 
433 		if (!PageCompound(page) && bytes < hdr->good_bytes)
434 			set_page_dirty(page);
435 		bytes += req->wb_bytes;
436 		nfs_list_remove_request(req);
437 		nfs_direct_readpage_release(req);
438 	}
439 out_put:
440 	if (put_dreq(dreq))
441 		nfs_direct_complete(dreq, false);
442 	hdr->release(hdr);
443 }
444 
445 static void nfs_read_sync_pgio_error(struct list_head *head)
446 {
447 	struct nfs_page *req;
448 
449 	while (!list_empty(head)) {
450 		req = nfs_list_entry(head->next);
451 		nfs_list_remove_request(req);
452 		nfs_release_request(req);
453 	}
454 }
455 
456 static void nfs_direct_pgio_init(struct nfs_pgio_header *hdr)
457 {
458 	get_dreq(hdr->dreq);
459 }
460 
461 static const struct nfs_pgio_completion_ops nfs_direct_read_completion_ops = {
462 	.error_cleanup = nfs_read_sync_pgio_error,
463 	.init_hdr = nfs_direct_pgio_init,
464 	.completion = nfs_direct_read_completion,
465 };
466 
467 /*
468  * For each rsize'd chunk of the user's buffer, dispatch an NFS READ
469  * operation.  If nfs_readdata_alloc() or get_user_pages() fails,
470  * bail and stop sending more reads.  Read length accounting is
471  * handled automatically by nfs_direct_read_result().  Otherwise, if
472  * no requests have been sent, just return an error.
473  */
474 
475 static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
476 					      struct iov_iter *iter,
477 					      loff_t pos)
478 {
479 	struct nfs_pageio_descriptor desc;
480 	struct inode *inode = dreq->inode;
481 	ssize_t result = -EINVAL;
482 	size_t requested_bytes = 0;
483 	size_t rsize = max_t(size_t, NFS_SERVER(inode)->rsize, PAGE_SIZE);
484 
485 	nfs_pageio_init_read(&desc, dreq->inode, false,
486 			     &nfs_direct_read_completion_ops);
487 	get_dreq(dreq);
488 	desc.pg_dreq = dreq;
489 	atomic_inc(&inode->i_dio_count);
490 
491 	while (iov_iter_count(iter)) {
492 		struct page **pagevec;
493 		size_t bytes;
494 		size_t pgbase;
495 		unsigned npages, i;
496 
497 		result = iov_iter_get_pages_alloc(iter, &pagevec,
498 						  rsize, &pgbase);
499 		if (result < 0)
500 			break;
501 
502 		bytes = result;
503 		iov_iter_advance(iter, bytes);
504 		npages = (result + pgbase + PAGE_SIZE - 1) / PAGE_SIZE;
505 		for (i = 0; i < npages; i++) {
506 			struct nfs_page *req;
507 			unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
508 			/* XXX do we need to do the eof zeroing found in async_filler? */
509 			req = nfs_create_request(dreq->ctx, pagevec[i], NULL,
510 						 pgbase, req_len);
511 			if (IS_ERR(req)) {
512 				result = PTR_ERR(req);
513 				break;
514 			}
515 			req->wb_index = pos >> PAGE_SHIFT;
516 			req->wb_offset = pos & ~PAGE_MASK;
517 			if (!nfs_pageio_add_request(&desc, req)) {
518 				result = desc.pg_error;
519 				nfs_release_request(req);
520 				break;
521 			}
522 			pgbase = 0;
523 			bytes -= req_len;
524 			requested_bytes += req_len;
525 			pos += req_len;
526 			dreq->bytes_left -= req_len;
527 		}
528 		nfs_direct_release_pages(pagevec, npages);
529 		kvfree(pagevec);
530 		if (result < 0)
531 			break;
532 	}
533 
534 	nfs_pageio_complete(&desc);
535 
536 	/*
537 	 * If no bytes were started, return the error, and let the
538 	 * generic layer handle the completion.
539 	 */
540 	if (requested_bytes == 0) {
541 		inode_dio_done(inode);
542 		nfs_direct_req_release(dreq);
543 		return result < 0 ? result : -EIO;
544 	}
545 
546 	if (put_dreq(dreq))
547 		nfs_direct_complete(dreq, false);
548 	return 0;
549 }
550 
551 /**
552  * nfs_file_direct_read - file direct read operation for NFS files
553  * @iocb: target I/O control block
554  * @iter: vector of user buffers into which to read data
555  * @pos: byte offset in file where reading starts
556  *
557  * We use this function for direct reads instead of calling
558  * generic_file_aio_read() in order to avoid gfar's check to see if
559  * the request starts before the end of the file.  For that check
560  * to work, we must generate a GETATTR before each direct read, and
561  * even then there is a window between the GETATTR and the subsequent
562  * READ where the file size could change.  Our preference is simply
563  * to do all reads the application wants, and the server will take
564  * care of managing the end of file boundary.
565  *
566  * This function also eliminates unnecessarily updating the file's
567  * atime locally, as the NFS server sets the file's atime, and this
568  * client must read the updated atime from the server back into its
569  * cache.
570  */
571 ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter,
572 				loff_t pos)
573 {
574 	struct file *file = iocb->ki_filp;
575 	struct address_space *mapping = file->f_mapping;
576 	struct inode *inode = mapping->host;
577 	struct nfs_direct_req *dreq;
578 	struct nfs_lock_context *l_ctx;
579 	ssize_t result = -EINVAL;
580 	size_t count = iov_iter_count(iter);
581 	nfs_add_stats(mapping->host, NFSIOS_DIRECTREADBYTES, count);
582 
583 	dfprintk(FILE, "NFS: direct read(%pD2, %zd@%Ld)\n",
584 		file, count, (long long) pos);
585 
586 	result = 0;
587 	if (!count)
588 		goto out;
589 
590 	mutex_lock(&inode->i_mutex);
591 	result = nfs_sync_mapping(mapping);
592 	if (result)
593 		goto out_unlock;
594 
595 	task_io_account_read(count);
596 
597 	result = -ENOMEM;
598 	dreq = nfs_direct_req_alloc();
599 	if (dreq == NULL)
600 		goto out_unlock;
601 
602 	dreq->inode = inode;
603 	dreq->bytes_left = count;
604 	dreq->io_start = pos;
605 	dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
606 	l_ctx = nfs_get_lock_context(dreq->ctx);
607 	if (IS_ERR(l_ctx)) {
608 		result = PTR_ERR(l_ctx);
609 		goto out_release;
610 	}
611 	dreq->l_ctx = l_ctx;
612 	if (!is_sync_kiocb(iocb))
613 		dreq->iocb = iocb;
614 
615 	NFS_I(inode)->read_io += count;
616 	result = nfs_direct_read_schedule_iovec(dreq, iter, pos);
617 
618 	mutex_unlock(&inode->i_mutex);
619 
620 	if (!result) {
621 		result = nfs_direct_wait(dreq);
622 		if (result > 0)
623 			iocb->ki_pos = pos + result;
624 	}
625 
626 	nfs_direct_req_release(dreq);
627 	return result;
628 
629 out_release:
630 	nfs_direct_req_release(dreq);
631 out_unlock:
632 	mutex_unlock(&inode->i_mutex);
633 out:
634 	return result;
635 }
636 
637 static void
638 nfs_direct_write_scan_commit_list(struct inode *inode,
639 				  struct list_head *list,
640 				  struct nfs_commit_info *cinfo)
641 {
642 	spin_lock(cinfo->lock);
643 #ifdef CONFIG_NFS_V4_1
644 	if (cinfo->ds != NULL && cinfo->ds->nwritten != 0)
645 		NFS_SERVER(inode)->pnfs_curr_ld->recover_commit_reqs(list, cinfo);
646 #endif
647 	nfs_scan_commit_list(&cinfo->mds->list, list, cinfo, 0);
648 	spin_unlock(cinfo->lock);
649 }
650 
651 static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
652 {
653 	struct nfs_pageio_descriptor desc;
654 	struct nfs_page *req, *tmp;
655 	LIST_HEAD(reqs);
656 	struct nfs_commit_info cinfo;
657 	LIST_HEAD(failed);
658 	int i;
659 
660 	nfs_init_cinfo_from_dreq(&cinfo, dreq);
661 	nfs_direct_write_scan_commit_list(dreq->inode, &reqs, &cinfo);
662 
663 	dreq->count = 0;
664 	for (i = 0; i < dreq->mirror_count; i++)
665 		dreq->mirrors[i].count = 0;
666 	get_dreq(dreq);
667 
668 	nfs_pageio_init_write(&desc, dreq->inode, FLUSH_STABLE, false,
669 			      &nfs_direct_write_completion_ops);
670 	desc.pg_dreq = dreq;
671 
672 	req = nfs_list_entry(reqs.next);
673 	nfs_direct_setup_mirroring(dreq, &desc, req);
674 
675 	list_for_each_entry_safe(req, tmp, &reqs, wb_list) {
676 		if (!nfs_pageio_add_request(&desc, req)) {
677 			nfs_list_remove_request(req);
678 			nfs_list_add_request(req, &failed);
679 			spin_lock(cinfo.lock);
680 			dreq->flags = 0;
681 			dreq->error = -EIO;
682 			spin_unlock(cinfo.lock);
683 		}
684 		nfs_release_request(req);
685 	}
686 	nfs_pageio_complete(&desc);
687 
688 	while (!list_empty(&failed)) {
689 		req = nfs_list_entry(failed.next);
690 		nfs_list_remove_request(req);
691 		nfs_unlock_and_release_request(req);
692 	}
693 
694 	if (put_dreq(dreq))
695 		nfs_direct_write_complete(dreq, dreq->inode);
696 }
697 
698 static void nfs_direct_commit_complete(struct nfs_commit_data *data)
699 {
700 	struct nfs_direct_req *dreq = data->dreq;
701 	struct nfs_commit_info cinfo;
702 	struct nfs_page *req;
703 	int status = data->task.tk_status;
704 
705 	nfs_init_cinfo_from_dreq(&cinfo, dreq);
706 	if (status < 0) {
707 		dprintk("NFS: %5u commit failed with error %d.\n",
708 			data->task.tk_pid, status);
709 		dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
710 	} else if (nfs_direct_cmp_commit_data_verf(dreq, data)) {
711 		dprintk("NFS: %5u commit verify failed\n", data->task.tk_pid);
712 		dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
713 	}
714 
715 	dprintk("NFS: %5u commit returned %d\n", data->task.tk_pid, status);
716 	while (!list_empty(&data->pages)) {
717 		req = nfs_list_entry(data->pages.next);
718 		nfs_list_remove_request(req);
719 		if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES) {
720 			/* Note the rewrite will go through mds */
721 			nfs_mark_request_commit(req, NULL, &cinfo, 0);
722 		} else
723 			nfs_release_request(req);
724 		nfs_unlock_and_release_request(req);
725 	}
726 
727 	if (atomic_dec_and_test(&cinfo.mds->rpcs_out))
728 		nfs_direct_write_complete(dreq, data->inode);
729 }
730 
731 static void nfs_direct_error_cleanup(struct nfs_inode *nfsi)
732 {
733 	/* There is no lock to clear */
734 }
735 
736 static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops = {
737 	.completion = nfs_direct_commit_complete,
738 	.error_cleanup = nfs_direct_error_cleanup,
739 };
740 
741 static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
742 {
743 	int res;
744 	struct nfs_commit_info cinfo;
745 	LIST_HEAD(mds_list);
746 
747 	nfs_init_cinfo_from_dreq(&cinfo, dreq);
748 	nfs_scan_commit(dreq->inode, &mds_list, &cinfo);
749 	res = nfs_generic_commit_list(dreq->inode, &mds_list, 0, &cinfo);
750 	if (res < 0) /* res == -ENOMEM */
751 		nfs_direct_write_reschedule(dreq);
752 }
753 
754 static void nfs_direct_write_schedule_work(struct work_struct *work)
755 {
756 	struct nfs_direct_req *dreq = container_of(work, struct nfs_direct_req, work);
757 	int flags = dreq->flags;
758 
759 	dreq->flags = 0;
760 	switch (flags) {
761 		case NFS_ODIRECT_DO_COMMIT:
762 			nfs_direct_commit_schedule(dreq);
763 			break;
764 		case NFS_ODIRECT_RESCHED_WRITES:
765 			nfs_direct_write_reschedule(dreq);
766 			break;
767 		default:
768 			nfs_direct_complete(dreq, true);
769 	}
770 }
771 
772 static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode)
773 {
774 	schedule_work(&dreq->work); /* Calls nfs_direct_write_schedule_work */
775 }
776 
777 static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
778 {
779 	struct nfs_direct_req *dreq = hdr->dreq;
780 	struct nfs_commit_info cinfo;
781 	bool request_commit = false;
782 	struct nfs_page *req = nfs_list_entry(hdr->pages.next);
783 
784 	if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
785 		goto out_put;
786 
787 	nfs_init_cinfo_from_dreq(&cinfo, dreq);
788 
789 	spin_lock(&dreq->lock);
790 
791 	if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
792 		dreq->flags = 0;
793 		dreq->error = hdr->error;
794 	}
795 	if (dreq->error == 0) {
796 		nfs_direct_good_bytes(dreq, hdr);
797 		if (nfs_write_need_commit(hdr)) {
798 			if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES)
799 				request_commit = true;
800 			else if (dreq->flags == 0) {
801 				nfs_direct_set_hdr_verf(dreq, hdr);
802 				request_commit = true;
803 				dreq->flags = NFS_ODIRECT_DO_COMMIT;
804 			} else if (dreq->flags == NFS_ODIRECT_DO_COMMIT) {
805 				request_commit = true;
806 				if (nfs_direct_set_or_cmp_hdr_verf(dreq, hdr))
807 					dreq->flags =
808 						NFS_ODIRECT_RESCHED_WRITES;
809 			}
810 		}
811 	}
812 	spin_unlock(&dreq->lock);
813 
814 	while (!list_empty(&hdr->pages)) {
815 
816 		req = nfs_list_entry(hdr->pages.next);
817 		nfs_list_remove_request(req);
818 		if (request_commit) {
819 			kref_get(&req->wb_kref);
820 			nfs_mark_request_commit(req, hdr->lseg, &cinfo,
821 				hdr->ds_commit_idx);
822 		}
823 		nfs_unlock_and_release_request(req);
824 	}
825 
826 out_put:
827 	if (put_dreq(dreq))
828 		nfs_direct_write_complete(dreq, hdr->inode);
829 	hdr->release(hdr);
830 }
831 
832 static void nfs_write_sync_pgio_error(struct list_head *head)
833 {
834 	struct nfs_page *req;
835 
836 	while (!list_empty(head)) {
837 		req = nfs_list_entry(head->next);
838 		nfs_list_remove_request(req);
839 		nfs_unlock_and_release_request(req);
840 	}
841 }
842 
843 static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops = {
844 	.error_cleanup = nfs_write_sync_pgio_error,
845 	.init_hdr = nfs_direct_pgio_init,
846 	.completion = nfs_direct_write_completion,
847 };
848 
849 
850 /*
851  * NB: Return the value of the first error return code.  Subsequent
852  *     errors after the first one are ignored.
853  */
854 /*
855  * For each wsize'd chunk of the user's buffer, dispatch an NFS WRITE
856  * operation.  If nfs_writedata_alloc() or get_user_pages() fails,
857  * bail and stop sending more writes.  Write length accounting is
858  * handled automatically by nfs_direct_write_result().  Otherwise, if
859  * no requests have been sent, just return an error.
860  */
861 static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
862 					       struct iov_iter *iter,
863 					       loff_t pos)
864 {
865 	struct nfs_pageio_descriptor desc;
866 	struct inode *inode = dreq->inode;
867 	ssize_t result = 0;
868 	size_t requested_bytes = 0;
869 	size_t wsize = max_t(size_t, NFS_SERVER(inode)->wsize, PAGE_SIZE);
870 
871 	nfs_pageio_init_write(&desc, inode, FLUSH_COND_STABLE, false,
872 			      &nfs_direct_write_completion_ops);
873 	desc.pg_dreq = dreq;
874 	get_dreq(dreq);
875 	atomic_inc(&inode->i_dio_count);
876 
877 	NFS_I(inode)->write_io += iov_iter_count(iter);
878 	while (iov_iter_count(iter)) {
879 		struct page **pagevec;
880 		size_t bytes;
881 		size_t pgbase;
882 		unsigned npages, i;
883 
884 		result = iov_iter_get_pages_alloc(iter, &pagevec,
885 						  wsize, &pgbase);
886 		if (result < 0)
887 			break;
888 
889 		bytes = result;
890 		iov_iter_advance(iter, bytes);
891 		npages = (result + pgbase + PAGE_SIZE - 1) / PAGE_SIZE;
892 		for (i = 0; i < npages; i++) {
893 			struct nfs_page *req;
894 			unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
895 
896 			req = nfs_create_request(dreq->ctx, pagevec[i], NULL,
897 						 pgbase, req_len);
898 			if (IS_ERR(req)) {
899 				result = PTR_ERR(req);
900 				break;
901 			}
902 
903 			nfs_direct_setup_mirroring(dreq, &desc, req);
904 
905 			nfs_lock_request(req);
906 			req->wb_index = pos >> PAGE_SHIFT;
907 			req->wb_offset = pos & ~PAGE_MASK;
908 			if (!nfs_pageio_add_request(&desc, req)) {
909 				result = desc.pg_error;
910 				nfs_unlock_and_release_request(req);
911 				break;
912 			}
913 			pgbase = 0;
914 			bytes -= req_len;
915 			requested_bytes += req_len;
916 			pos += req_len;
917 			dreq->bytes_left -= req_len;
918 		}
919 		nfs_direct_release_pages(pagevec, npages);
920 		kvfree(pagevec);
921 		if (result < 0)
922 			break;
923 	}
924 	nfs_pageio_complete(&desc);
925 
926 	/*
927 	 * If no bytes were started, return the error, and let the
928 	 * generic layer handle the completion.
929 	 */
930 	if (requested_bytes == 0) {
931 		inode_dio_done(inode);
932 		nfs_direct_req_release(dreq);
933 		return result < 0 ? result : -EIO;
934 	}
935 
936 	if (put_dreq(dreq))
937 		nfs_direct_write_complete(dreq, dreq->inode);
938 	return 0;
939 }
940 
941 /**
942  * nfs_file_direct_write - file direct write operation for NFS files
943  * @iocb: target I/O control block
944  * @iter: vector of user buffers from which to write data
945  * @pos: byte offset in file where writing starts
946  *
947  * We use this function for direct writes instead of calling
948  * generic_file_aio_write() in order to avoid taking the inode
949  * semaphore and updating the i_size.  The NFS server will set
950  * the new i_size and this client must read the updated size
951  * back into its cache.  We let the server do generic write
952  * parameter checking and report problems.
953  *
954  * We eliminate local atime updates, see direct read above.
955  *
956  * We avoid unnecessary page cache invalidations for normal cached
957  * readers of this file.
958  *
959  * Note that O_APPEND is not supported for NFS direct writes, as there
960  * is no atomic O_APPEND write facility in the NFS protocol.
961  */
962 ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter)
963 {
964 	ssize_t result = -EINVAL;
965 	struct file *file = iocb->ki_filp;
966 	struct address_space *mapping = file->f_mapping;
967 	struct inode *inode = mapping->host;
968 	struct nfs_direct_req *dreq;
969 	struct nfs_lock_context *l_ctx;
970 	loff_t pos, end;
971 
972 	dfprintk(FILE, "NFS: direct write(%pD2, %zd@%Ld)\n",
973 		file, iov_iter_count(iter), (long long) iocb->ki_pos);
974 
975 	nfs_add_stats(mapping->host, NFSIOS_DIRECTWRITTENBYTES,
976 		      iov_iter_count(iter));
977 
978 	pos = iocb->ki_pos;
979 	end = (pos + iov_iter_count(iter) - 1) >> PAGE_CACHE_SHIFT;
980 
981 	mutex_lock(&inode->i_mutex);
982 
983 	result = nfs_sync_mapping(mapping);
984 	if (result)
985 		goto out_unlock;
986 
987 	if (mapping->nrpages) {
988 		result = invalidate_inode_pages2_range(mapping,
989 					pos >> PAGE_CACHE_SHIFT, end);
990 		if (result)
991 			goto out_unlock;
992 	}
993 
994 	task_io_account_write(iov_iter_count(iter));
995 
996 	result = -ENOMEM;
997 	dreq = nfs_direct_req_alloc();
998 	if (!dreq)
999 		goto out_unlock;
1000 
1001 	dreq->inode = inode;
1002 	dreq->bytes_left = iov_iter_count(iter);
1003 	dreq->io_start = pos;
1004 	dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
1005 	l_ctx = nfs_get_lock_context(dreq->ctx);
1006 	if (IS_ERR(l_ctx)) {
1007 		result = PTR_ERR(l_ctx);
1008 		goto out_release;
1009 	}
1010 	dreq->l_ctx = l_ctx;
1011 	if (!is_sync_kiocb(iocb))
1012 		dreq->iocb = iocb;
1013 
1014 	result = nfs_direct_write_schedule_iovec(dreq, iter, pos);
1015 
1016 	if (mapping->nrpages) {
1017 		invalidate_inode_pages2_range(mapping,
1018 					      pos >> PAGE_CACHE_SHIFT, end);
1019 	}
1020 
1021 	mutex_unlock(&inode->i_mutex);
1022 
1023 	if (!result) {
1024 		result = nfs_direct_wait(dreq);
1025 		if (result > 0) {
1026 			struct inode *inode = mapping->host;
1027 
1028 			iocb->ki_pos = pos + result;
1029 			spin_lock(&inode->i_lock);
1030 			if (i_size_read(inode) < iocb->ki_pos)
1031 				i_size_write(inode, iocb->ki_pos);
1032 			spin_unlock(&inode->i_lock);
1033 		}
1034 	}
1035 	nfs_direct_req_release(dreq);
1036 	return result;
1037 
1038 out_release:
1039 	nfs_direct_req_release(dreq);
1040 out_unlock:
1041 	mutex_unlock(&inode->i_mutex);
1042 	return result;
1043 }
1044 
1045 /**
1046  * nfs_init_directcache - create a slab cache for nfs_direct_req structures
1047  *
1048  */
1049 int __init nfs_init_directcache(void)
1050 {
1051 	nfs_direct_cachep = kmem_cache_create("nfs_direct_cache",
1052 						sizeof(struct nfs_direct_req),
1053 						0, (SLAB_RECLAIM_ACCOUNT|
1054 							SLAB_MEM_SPREAD),
1055 						NULL);
1056 	if (nfs_direct_cachep == NULL)
1057 		return -ENOMEM;
1058 
1059 	return 0;
1060 }
1061 
1062 /**
1063  * nfs_destroy_directcache - destroy the slab cache for nfs_direct_req structures
1064  *
1065  */
1066 void nfs_destroy_directcache(void)
1067 {
1068 	kmem_cache_destroy(nfs_direct_cachep);
1069 }
1070