xref: /openbmc/linux/fs/nfs/direct.c (revision 93d90ad7)
1 /*
2  * linux/fs/nfs/direct.c
3  *
4  * Copyright (C) 2003 by Chuck Lever <cel@netapp.com>
5  *
6  * High-performance uncached I/O for the Linux NFS client
7  *
8  * There are important applications whose performance or correctness
9  * depends on uncached access to file data.  Database clusters
10  * (multiple copies of the same instance running on separate hosts)
11  * implement their own cache coherency protocol that subsumes file
12  * system cache protocols.  Applications that process datasets
13  * considerably larger than the client's memory do not always benefit
14  * from a local cache.  A streaming video server, for instance, has no
15  * need to cache the contents of a file.
16  *
17  * When an application requests uncached I/O, all read and write requests
18  * are made directly to the server; data stored or fetched via these
19  * requests is not cached in the Linux page cache.  The client does not
20  * correct unaligned requests from applications.  All requested bytes are
21  * held on permanent storage before a direct write system call returns to
22  * an application.
23  *
24  * Solaris implements an uncached I/O facility called directio() that
25  * is used for backups and sequential I/O to very large files.  Solaris
26  * also supports uncaching whole NFS partitions with "-o forcedirectio,"
27  * an undocumented mount option.
28  *
29  * Designed by Jeff Kimmel, Chuck Lever, and Trond Myklebust, with
30  * help from Andrew Morton.
31  *
32  * 18 Dec 2001	Initial implementation for 2.4  --cel
33  * 08 Jul 2002	Version for 2.4.19, with bug fixes --trondmy
34  * 08 Jun 2003	Port to 2.5 APIs  --cel
35  * 31 Mar 2004	Handle direct I/O without VFS support  --cel
36  * 15 Sep 2004	Parallel async reads  --cel
37  * 04 May 2005	support O_DIRECT with aio  --cel
38  *
39  */
40 
41 #include <linux/errno.h>
42 #include <linux/sched.h>
43 #include <linux/kernel.h>
44 #include <linux/file.h>
45 #include <linux/pagemap.h>
46 #include <linux/kref.h>
47 #include <linux/slab.h>
48 #include <linux/task_io_accounting_ops.h>
49 #include <linux/module.h>
50 
51 #include <linux/nfs_fs.h>
52 #include <linux/nfs_page.h>
53 #include <linux/sunrpc/clnt.h>
54 
55 #include <asm/uaccess.h>
56 #include <linux/atomic.h>
57 
58 #include "internal.h"
59 #include "iostat.h"
60 #include "pnfs.h"
61 
62 #define NFSDBG_FACILITY		NFSDBG_VFS
63 
64 static struct kmem_cache *nfs_direct_cachep;
65 
66 /*
67  * This represents a set of asynchronous requests that we're waiting on
68  */
69 struct nfs_direct_req {
70 	struct kref		kref;		/* release manager */
71 
72 	/* I/O parameters */
73 	struct nfs_open_context	*ctx;		/* file open context info */
74 	struct nfs_lock_context *l_ctx;		/* Lock context info */
75 	struct kiocb *		iocb;		/* controlling i/o request */
76 	struct inode *		inode;		/* target file of i/o */
77 
78 	/* completion state */
79 	atomic_t		io_count;	/* i/os we're waiting for */
80 	spinlock_t		lock;		/* protect completion state */
81 	ssize_t			count,		/* bytes actually processed */
82 				bytes_left,	/* bytes left to be sent */
83 				error;		/* any reported error */
84 	struct completion	completion;	/* wait for i/o completion */
85 
86 	/* commit state */
87 	struct nfs_mds_commit_info mds_cinfo;	/* Storage for cinfo */
88 	struct pnfs_ds_commit_info ds_cinfo;	/* Storage for cinfo */
89 	struct work_struct	work;
90 	int			flags;
91 #define NFS_ODIRECT_DO_COMMIT		(1)	/* an unstable reply was received */
92 #define NFS_ODIRECT_RESCHED_WRITES	(2)	/* write verification failed */
93 	struct nfs_writeverf	verf;		/* unstable write verifier */
94 };
95 
96 static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops;
97 static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops;
98 static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode);
99 static void nfs_direct_write_schedule_work(struct work_struct *work);
100 
101 static inline void get_dreq(struct nfs_direct_req *dreq)
102 {
103 	atomic_inc(&dreq->io_count);
104 }
105 
106 static inline int put_dreq(struct nfs_direct_req *dreq)
107 {
108 	return atomic_dec_and_test(&dreq->io_count);
109 }
110 
111 /*
112  * nfs_direct_select_verf - select the right verifier
113  * @dreq - direct request possibly spanning multiple servers
114  * @ds_clp - nfs_client of data server or NULL if MDS / non-pnfs
115  * @ds_idx - index of data server in data server list, only valid if ds_clp set
116  *
117  * returns the correct verifier to use given the role of the server
118  */
119 static struct nfs_writeverf *
120 nfs_direct_select_verf(struct nfs_direct_req *dreq,
121 		       struct nfs_client *ds_clp,
122 		       int ds_idx)
123 {
124 	struct nfs_writeverf *verfp = &dreq->verf;
125 
126 #ifdef CONFIG_NFS_V4_1
127 	if (ds_clp) {
128 		/* pNFS is in use, use the DS verf */
129 		if (ds_idx >= 0 && ds_idx < dreq->ds_cinfo.nbuckets)
130 			verfp = &dreq->ds_cinfo.buckets[ds_idx].direct_verf;
131 		else
132 			WARN_ON_ONCE(1);
133 	}
134 #endif
135 	return verfp;
136 }
137 
138 
139 /*
140  * nfs_direct_set_hdr_verf - set the write/commit verifier
141  * @dreq - direct request possibly spanning multiple servers
142  * @hdr - pageio header to validate against previously seen verfs
143  *
144  * Set the server's (MDS or DS) "seen" verifier
145  */
146 static void nfs_direct_set_hdr_verf(struct nfs_direct_req *dreq,
147 				    struct nfs_pgio_header *hdr)
148 {
149 	struct nfs_writeverf *verfp;
150 
151 	verfp = nfs_direct_select_verf(dreq, hdr->ds_clp,
152 				      hdr->ds_idx);
153 	WARN_ON_ONCE(verfp->committed >= 0);
154 	memcpy(verfp, &hdr->verf, sizeof(struct nfs_writeverf));
155 	WARN_ON_ONCE(verfp->committed < 0);
156 }
157 
158 /*
159  * nfs_direct_cmp_hdr_verf - compare verifier for pgio header
160  * @dreq - direct request possibly spanning multiple servers
161  * @hdr - pageio header to validate against previously seen verf
162  *
163  * set the server's "seen" verf if not initialized.
164  * returns result of comparison between @hdr->verf and the "seen"
165  * verf of the server used by @hdr (DS or MDS)
166  */
167 static int nfs_direct_set_or_cmp_hdr_verf(struct nfs_direct_req *dreq,
168 					  struct nfs_pgio_header *hdr)
169 {
170 	struct nfs_writeverf *verfp;
171 
172 	verfp = nfs_direct_select_verf(dreq, hdr->ds_clp,
173 					 hdr->ds_idx);
174 	if (verfp->committed < 0) {
175 		nfs_direct_set_hdr_verf(dreq, hdr);
176 		return 0;
177 	}
178 	return memcmp(verfp, &hdr->verf, sizeof(struct nfs_writeverf));
179 }
180 
181 /*
182  * nfs_direct_cmp_commit_data_verf - compare verifier for commit data
183  * @dreq - direct request possibly spanning multiple servers
184  * @data - commit data to validate against previously seen verf
185  *
186  * returns result of comparison between @data->verf and the verf of
187  * the server used by @data (DS or MDS)
188  */
189 static int nfs_direct_cmp_commit_data_verf(struct nfs_direct_req *dreq,
190 					   struct nfs_commit_data *data)
191 {
192 	struct nfs_writeverf *verfp;
193 
194 	verfp = nfs_direct_select_verf(dreq, data->ds_clp,
195 					 data->ds_commit_index);
196 	WARN_ON_ONCE(verfp->committed < 0);
197 	return memcmp(verfp, &data->verf, sizeof(struct nfs_writeverf));
198 }
199 
200 /**
201  * nfs_direct_IO - NFS address space operation for direct I/O
202  * @rw: direction (read or write)
203  * @iocb: target I/O control block
204  * @iov: array of vectors that define I/O buffer
205  * @pos: offset in file to begin the operation
206  * @nr_segs: size of iovec array
207  *
208  * The presence of this routine in the address space ops vector means
209  * the NFS client supports direct I/O. However, for most direct IO, we
210  * shunt off direct read and write requests before the VFS gets them,
211  * so this method is only ever called for swap.
212  */
213 ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, loff_t pos)
214 {
215 #ifndef CONFIG_NFS_SWAP
216 	dprintk("NFS: nfs_direct_IO (%pD) off/no(%Ld/%lu) EINVAL\n",
217 			iocb->ki_filp, (long long) pos, iter->nr_segs);
218 
219 	return -EINVAL;
220 #else
221 	VM_BUG_ON(iocb->ki_nbytes != PAGE_SIZE);
222 
223 	if (rw == READ)
224 		return nfs_file_direct_read(iocb, iter, pos);
225 	return nfs_file_direct_write(iocb, iter, pos);
226 #endif /* CONFIG_NFS_SWAP */
227 }
228 
229 static void nfs_direct_release_pages(struct page **pages, unsigned int npages)
230 {
231 	unsigned int i;
232 	for (i = 0; i < npages; i++)
233 		page_cache_release(pages[i]);
234 }
235 
236 void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo,
237 			      struct nfs_direct_req *dreq)
238 {
239 	cinfo->lock = &dreq->lock;
240 	cinfo->mds = &dreq->mds_cinfo;
241 	cinfo->ds = &dreq->ds_cinfo;
242 	cinfo->dreq = dreq;
243 	cinfo->completion_ops = &nfs_direct_commit_completion_ops;
244 }
245 
246 static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
247 {
248 	struct nfs_direct_req *dreq;
249 
250 	dreq = kmem_cache_zalloc(nfs_direct_cachep, GFP_KERNEL);
251 	if (!dreq)
252 		return NULL;
253 
254 	kref_init(&dreq->kref);
255 	kref_get(&dreq->kref);
256 	init_completion(&dreq->completion);
257 	INIT_LIST_HEAD(&dreq->mds_cinfo.list);
258 	dreq->verf.committed = NFS_INVALID_STABLE_HOW;	/* not set yet */
259 	INIT_WORK(&dreq->work, nfs_direct_write_schedule_work);
260 	spin_lock_init(&dreq->lock);
261 
262 	return dreq;
263 }
264 
265 static void nfs_direct_req_free(struct kref *kref)
266 {
267 	struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref);
268 
269 	nfs_free_pnfs_ds_cinfo(&dreq->ds_cinfo);
270 	if (dreq->l_ctx != NULL)
271 		nfs_put_lock_context(dreq->l_ctx);
272 	if (dreq->ctx != NULL)
273 		put_nfs_open_context(dreq->ctx);
274 	kmem_cache_free(nfs_direct_cachep, dreq);
275 }
276 
277 static void nfs_direct_req_release(struct nfs_direct_req *dreq)
278 {
279 	kref_put(&dreq->kref, nfs_direct_req_free);
280 }
281 
282 ssize_t nfs_dreq_bytes_left(struct nfs_direct_req *dreq)
283 {
284 	return dreq->bytes_left;
285 }
286 EXPORT_SYMBOL_GPL(nfs_dreq_bytes_left);
287 
288 /*
289  * Collects and returns the final error value/byte-count.
290  */
291 static ssize_t nfs_direct_wait(struct nfs_direct_req *dreq)
292 {
293 	ssize_t result = -EIOCBQUEUED;
294 
295 	/* Async requests don't wait here */
296 	if (dreq->iocb)
297 		goto out;
298 
299 	result = wait_for_completion_killable(&dreq->completion);
300 
301 	if (!result)
302 		result = dreq->error;
303 	if (!result)
304 		result = dreq->count;
305 
306 out:
307 	return (ssize_t) result;
308 }
309 
310 /*
311  * Synchronous I/O uses a stack-allocated iocb.  Thus we can't trust
312  * the iocb is still valid here if this is a synchronous request.
313  */
314 static void nfs_direct_complete(struct nfs_direct_req *dreq, bool write)
315 {
316 	struct inode *inode = dreq->inode;
317 
318 	if (dreq->iocb && write) {
319 		loff_t pos = dreq->iocb->ki_pos + dreq->count;
320 
321 		spin_lock(&inode->i_lock);
322 		if (i_size_read(inode) < pos)
323 			i_size_write(inode, pos);
324 		spin_unlock(&inode->i_lock);
325 	}
326 
327 	if (write)
328 		nfs_zap_mapping(inode, inode->i_mapping);
329 
330 	inode_dio_done(inode);
331 
332 	if (dreq->iocb) {
333 		long res = (long) dreq->error;
334 		if (!res)
335 			res = (long) dreq->count;
336 		aio_complete(dreq->iocb, res, 0);
337 	}
338 
339 	complete_all(&dreq->completion);
340 
341 	nfs_direct_req_release(dreq);
342 }
343 
344 static void nfs_direct_readpage_release(struct nfs_page *req)
345 {
346 	dprintk("NFS: direct read done (%s/%llu %d@%lld)\n",
347 		req->wb_context->dentry->d_inode->i_sb->s_id,
348 		(unsigned long long)NFS_FILEID(req->wb_context->dentry->d_inode),
349 		req->wb_bytes,
350 		(long long)req_offset(req));
351 	nfs_release_request(req);
352 }
353 
354 static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
355 {
356 	unsigned long bytes = 0;
357 	struct nfs_direct_req *dreq = hdr->dreq;
358 
359 	if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
360 		goto out_put;
361 
362 	spin_lock(&dreq->lock);
363 	if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && (hdr->good_bytes == 0))
364 		dreq->error = hdr->error;
365 	else
366 		dreq->count += hdr->good_bytes;
367 	spin_unlock(&dreq->lock);
368 
369 	while (!list_empty(&hdr->pages)) {
370 		struct nfs_page *req = nfs_list_entry(hdr->pages.next);
371 		struct page *page = req->wb_page;
372 
373 		if (!PageCompound(page) && bytes < hdr->good_bytes)
374 			set_page_dirty(page);
375 		bytes += req->wb_bytes;
376 		nfs_list_remove_request(req);
377 		nfs_direct_readpage_release(req);
378 	}
379 out_put:
380 	if (put_dreq(dreq))
381 		nfs_direct_complete(dreq, false);
382 	hdr->release(hdr);
383 }
384 
385 static void nfs_read_sync_pgio_error(struct list_head *head)
386 {
387 	struct nfs_page *req;
388 
389 	while (!list_empty(head)) {
390 		req = nfs_list_entry(head->next);
391 		nfs_list_remove_request(req);
392 		nfs_release_request(req);
393 	}
394 }
395 
396 static void nfs_direct_pgio_init(struct nfs_pgio_header *hdr)
397 {
398 	get_dreq(hdr->dreq);
399 }
400 
401 static const struct nfs_pgio_completion_ops nfs_direct_read_completion_ops = {
402 	.error_cleanup = nfs_read_sync_pgio_error,
403 	.init_hdr = nfs_direct_pgio_init,
404 	.completion = nfs_direct_read_completion,
405 };
406 
407 /*
408  * For each rsize'd chunk of the user's buffer, dispatch an NFS READ
409  * operation.  If nfs_readdata_alloc() or get_user_pages() fails,
410  * bail and stop sending more reads.  Read length accounting is
411  * handled automatically by nfs_direct_read_result().  Otherwise, if
412  * no requests have been sent, just return an error.
413  */
414 
415 static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
416 					      struct iov_iter *iter,
417 					      loff_t pos)
418 {
419 	struct nfs_pageio_descriptor desc;
420 	struct inode *inode = dreq->inode;
421 	ssize_t result = -EINVAL;
422 	size_t requested_bytes = 0;
423 	size_t rsize = max_t(size_t, NFS_SERVER(inode)->rsize, PAGE_SIZE);
424 
425 	nfs_pageio_init_read(&desc, dreq->inode, false,
426 			     &nfs_direct_read_completion_ops);
427 	get_dreq(dreq);
428 	desc.pg_dreq = dreq;
429 	atomic_inc(&inode->i_dio_count);
430 
431 	while (iov_iter_count(iter)) {
432 		struct page **pagevec;
433 		size_t bytes;
434 		size_t pgbase;
435 		unsigned npages, i;
436 
437 		result = iov_iter_get_pages_alloc(iter, &pagevec,
438 						  rsize, &pgbase);
439 		if (result < 0)
440 			break;
441 
442 		bytes = result;
443 		iov_iter_advance(iter, bytes);
444 		npages = (result + pgbase + PAGE_SIZE - 1) / PAGE_SIZE;
445 		for (i = 0; i < npages; i++) {
446 			struct nfs_page *req;
447 			unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
448 			/* XXX do we need to do the eof zeroing found in async_filler? */
449 			req = nfs_create_request(dreq->ctx, pagevec[i], NULL,
450 						 pgbase, req_len);
451 			if (IS_ERR(req)) {
452 				result = PTR_ERR(req);
453 				break;
454 			}
455 			req->wb_index = pos >> PAGE_SHIFT;
456 			req->wb_offset = pos & ~PAGE_MASK;
457 			if (!nfs_pageio_add_request(&desc, req)) {
458 				result = desc.pg_error;
459 				nfs_release_request(req);
460 				break;
461 			}
462 			pgbase = 0;
463 			bytes -= req_len;
464 			requested_bytes += req_len;
465 			pos += req_len;
466 			dreq->bytes_left -= req_len;
467 		}
468 		nfs_direct_release_pages(pagevec, npages);
469 		kvfree(pagevec);
470 		if (result < 0)
471 			break;
472 	}
473 
474 	nfs_pageio_complete(&desc);
475 
476 	/*
477 	 * If no bytes were started, return the error, and let the
478 	 * generic layer handle the completion.
479 	 */
480 	if (requested_bytes == 0) {
481 		inode_dio_done(inode);
482 		nfs_direct_req_release(dreq);
483 		return result < 0 ? result : -EIO;
484 	}
485 
486 	if (put_dreq(dreq))
487 		nfs_direct_complete(dreq, false);
488 	return 0;
489 }
490 
491 /**
492  * nfs_file_direct_read - file direct read operation for NFS files
493  * @iocb: target I/O control block
494  * @iter: vector of user buffers into which to read data
495  * @pos: byte offset in file where reading starts
496  *
497  * We use this function for direct reads instead of calling
498  * generic_file_aio_read() in order to avoid gfar's check to see if
499  * the request starts before the end of the file.  For that check
500  * to work, we must generate a GETATTR before each direct read, and
501  * even then there is a window between the GETATTR and the subsequent
502  * READ where the file size could change.  Our preference is simply
503  * to do all reads the application wants, and the server will take
504  * care of managing the end of file boundary.
505  *
506  * This function also eliminates unnecessarily updating the file's
507  * atime locally, as the NFS server sets the file's atime, and this
508  * client must read the updated atime from the server back into its
509  * cache.
510  */
511 ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter,
512 				loff_t pos)
513 {
514 	struct file *file = iocb->ki_filp;
515 	struct address_space *mapping = file->f_mapping;
516 	struct inode *inode = mapping->host;
517 	struct nfs_direct_req *dreq;
518 	struct nfs_lock_context *l_ctx;
519 	ssize_t result = -EINVAL;
520 	size_t count = iov_iter_count(iter);
521 	nfs_add_stats(mapping->host, NFSIOS_DIRECTREADBYTES, count);
522 
523 	dfprintk(FILE, "NFS: direct read(%pD2, %zd@%Ld)\n",
524 		file, count, (long long) pos);
525 
526 	result = 0;
527 	if (!count)
528 		goto out;
529 
530 	mutex_lock(&inode->i_mutex);
531 	result = nfs_sync_mapping(mapping);
532 	if (result)
533 		goto out_unlock;
534 
535 	task_io_account_read(count);
536 
537 	result = -ENOMEM;
538 	dreq = nfs_direct_req_alloc();
539 	if (dreq == NULL)
540 		goto out_unlock;
541 
542 	dreq->inode = inode;
543 	dreq->bytes_left = count;
544 	dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
545 	l_ctx = nfs_get_lock_context(dreq->ctx);
546 	if (IS_ERR(l_ctx)) {
547 		result = PTR_ERR(l_ctx);
548 		goto out_release;
549 	}
550 	dreq->l_ctx = l_ctx;
551 	if (!is_sync_kiocb(iocb))
552 		dreq->iocb = iocb;
553 
554 	NFS_I(inode)->read_io += count;
555 	result = nfs_direct_read_schedule_iovec(dreq, iter, pos);
556 
557 	mutex_unlock(&inode->i_mutex);
558 
559 	if (!result) {
560 		result = nfs_direct_wait(dreq);
561 		if (result > 0)
562 			iocb->ki_pos = pos + result;
563 	}
564 
565 	nfs_direct_req_release(dreq);
566 	return result;
567 
568 out_release:
569 	nfs_direct_req_release(dreq);
570 out_unlock:
571 	mutex_unlock(&inode->i_mutex);
572 out:
573 	return result;
574 }
575 
576 static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
577 {
578 	struct nfs_pageio_descriptor desc;
579 	struct nfs_page *req, *tmp;
580 	LIST_HEAD(reqs);
581 	struct nfs_commit_info cinfo;
582 	LIST_HEAD(failed);
583 
584 	nfs_init_cinfo_from_dreq(&cinfo, dreq);
585 	pnfs_recover_commit_reqs(dreq->inode, &reqs, &cinfo);
586 	spin_lock(cinfo.lock);
587 	nfs_scan_commit_list(&cinfo.mds->list, &reqs, &cinfo, 0);
588 	spin_unlock(cinfo.lock);
589 
590 	dreq->count = 0;
591 	get_dreq(dreq);
592 
593 	nfs_pageio_init_write(&desc, dreq->inode, FLUSH_STABLE, false,
594 			      &nfs_direct_write_completion_ops);
595 	desc.pg_dreq = dreq;
596 
597 	list_for_each_entry_safe(req, tmp, &reqs, wb_list) {
598 		if (!nfs_pageio_add_request(&desc, req)) {
599 			nfs_list_remove_request(req);
600 			nfs_list_add_request(req, &failed);
601 			spin_lock(cinfo.lock);
602 			dreq->flags = 0;
603 			dreq->error = -EIO;
604 			spin_unlock(cinfo.lock);
605 		}
606 		nfs_release_request(req);
607 	}
608 	nfs_pageio_complete(&desc);
609 
610 	while (!list_empty(&failed)) {
611 		req = nfs_list_entry(failed.next);
612 		nfs_list_remove_request(req);
613 		nfs_unlock_and_release_request(req);
614 	}
615 
616 	if (put_dreq(dreq))
617 		nfs_direct_write_complete(dreq, dreq->inode);
618 }
619 
620 static void nfs_direct_commit_complete(struct nfs_commit_data *data)
621 {
622 	struct nfs_direct_req *dreq = data->dreq;
623 	struct nfs_commit_info cinfo;
624 	struct nfs_page *req;
625 	int status = data->task.tk_status;
626 
627 	nfs_init_cinfo_from_dreq(&cinfo, dreq);
628 	if (status < 0) {
629 		dprintk("NFS: %5u commit failed with error %d.\n",
630 			data->task.tk_pid, status);
631 		dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
632 	} else if (nfs_direct_cmp_commit_data_verf(dreq, data)) {
633 		dprintk("NFS: %5u commit verify failed\n", data->task.tk_pid);
634 		dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
635 	}
636 
637 	dprintk("NFS: %5u commit returned %d\n", data->task.tk_pid, status);
638 	while (!list_empty(&data->pages)) {
639 		req = nfs_list_entry(data->pages.next);
640 		nfs_list_remove_request(req);
641 		if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES) {
642 			/* Note the rewrite will go through mds */
643 			nfs_mark_request_commit(req, NULL, &cinfo);
644 		} else
645 			nfs_release_request(req);
646 		nfs_unlock_and_release_request(req);
647 	}
648 
649 	if (atomic_dec_and_test(&cinfo.mds->rpcs_out))
650 		nfs_direct_write_complete(dreq, data->inode);
651 }
652 
653 static void nfs_direct_error_cleanup(struct nfs_inode *nfsi)
654 {
655 	/* There is no lock to clear */
656 }
657 
658 static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops = {
659 	.completion = nfs_direct_commit_complete,
660 	.error_cleanup = nfs_direct_error_cleanup,
661 };
662 
663 static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
664 {
665 	int res;
666 	struct nfs_commit_info cinfo;
667 	LIST_HEAD(mds_list);
668 
669 	nfs_init_cinfo_from_dreq(&cinfo, dreq);
670 	nfs_scan_commit(dreq->inode, &mds_list, &cinfo);
671 	res = nfs_generic_commit_list(dreq->inode, &mds_list, 0, &cinfo);
672 	if (res < 0) /* res == -ENOMEM */
673 		nfs_direct_write_reschedule(dreq);
674 }
675 
676 static void nfs_direct_write_schedule_work(struct work_struct *work)
677 {
678 	struct nfs_direct_req *dreq = container_of(work, struct nfs_direct_req, work);
679 	int flags = dreq->flags;
680 
681 	dreq->flags = 0;
682 	switch (flags) {
683 		case NFS_ODIRECT_DO_COMMIT:
684 			nfs_direct_commit_schedule(dreq);
685 			break;
686 		case NFS_ODIRECT_RESCHED_WRITES:
687 			nfs_direct_write_reschedule(dreq);
688 			break;
689 		default:
690 			nfs_direct_complete(dreq, true);
691 	}
692 }
693 
694 static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode)
695 {
696 	schedule_work(&dreq->work); /* Calls nfs_direct_write_schedule_work */
697 }
698 
699 static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
700 {
701 	struct nfs_direct_req *dreq = hdr->dreq;
702 	struct nfs_commit_info cinfo;
703 	bool request_commit = false;
704 	struct nfs_page *req = nfs_list_entry(hdr->pages.next);
705 
706 	if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
707 		goto out_put;
708 
709 	nfs_init_cinfo_from_dreq(&cinfo, dreq);
710 
711 	spin_lock(&dreq->lock);
712 
713 	if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
714 		dreq->flags = 0;
715 		dreq->error = hdr->error;
716 	}
717 	if (dreq->error == 0) {
718 		dreq->count += hdr->good_bytes;
719 		if (nfs_write_need_commit(hdr)) {
720 			if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES)
721 				request_commit = true;
722 			else if (dreq->flags == 0) {
723 				nfs_direct_set_hdr_verf(dreq, hdr);
724 				request_commit = true;
725 				dreq->flags = NFS_ODIRECT_DO_COMMIT;
726 			} else if (dreq->flags == NFS_ODIRECT_DO_COMMIT) {
727 				request_commit = true;
728 				if (nfs_direct_set_or_cmp_hdr_verf(dreq, hdr))
729 					dreq->flags =
730 						NFS_ODIRECT_RESCHED_WRITES;
731 			}
732 		}
733 	}
734 	spin_unlock(&dreq->lock);
735 
736 	while (!list_empty(&hdr->pages)) {
737 
738 		req = nfs_list_entry(hdr->pages.next);
739 		nfs_list_remove_request(req);
740 		if (request_commit) {
741 			kref_get(&req->wb_kref);
742 			nfs_mark_request_commit(req, hdr->lseg, &cinfo);
743 		}
744 		nfs_unlock_and_release_request(req);
745 	}
746 
747 out_put:
748 	if (put_dreq(dreq))
749 		nfs_direct_write_complete(dreq, hdr->inode);
750 	hdr->release(hdr);
751 }
752 
753 static void nfs_write_sync_pgio_error(struct list_head *head)
754 {
755 	struct nfs_page *req;
756 
757 	while (!list_empty(head)) {
758 		req = nfs_list_entry(head->next);
759 		nfs_list_remove_request(req);
760 		nfs_unlock_and_release_request(req);
761 	}
762 }
763 
764 static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops = {
765 	.error_cleanup = nfs_write_sync_pgio_error,
766 	.init_hdr = nfs_direct_pgio_init,
767 	.completion = nfs_direct_write_completion,
768 };
769 
770 
771 /*
772  * NB: Return the value of the first error return code.  Subsequent
773  *     errors after the first one are ignored.
774  */
775 /*
776  * For each wsize'd chunk of the user's buffer, dispatch an NFS WRITE
777  * operation.  If nfs_writedata_alloc() or get_user_pages() fails,
778  * bail and stop sending more writes.  Write length accounting is
779  * handled automatically by nfs_direct_write_result().  Otherwise, if
780  * no requests have been sent, just return an error.
781  */
782 static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
783 					       struct iov_iter *iter,
784 					       loff_t pos)
785 {
786 	struct nfs_pageio_descriptor desc;
787 	struct inode *inode = dreq->inode;
788 	ssize_t result = 0;
789 	size_t requested_bytes = 0;
790 	size_t wsize = max_t(size_t, NFS_SERVER(inode)->wsize, PAGE_SIZE);
791 
792 	nfs_pageio_init_write(&desc, inode, FLUSH_COND_STABLE, false,
793 			      &nfs_direct_write_completion_ops);
794 	desc.pg_dreq = dreq;
795 	get_dreq(dreq);
796 	atomic_inc(&inode->i_dio_count);
797 
798 	NFS_I(inode)->write_io += iov_iter_count(iter);
799 	while (iov_iter_count(iter)) {
800 		struct page **pagevec;
801 		size_t bytes;
802 		size_t pgbase;
803 		unsigned npages, i;
804 
805 		result = iov_iter_get_pages_alloc(iter, &pagevec,
806 						  wsize, &pgbase);
807 		if (result < 0)
808 			break;
809 
810 		bytes = result;
811 		iov_iter_advance(iter, bytes);
812 		npages = (result + pgbase + PAGE_SIZE - 1) / PAGE_SIZE;
813 		for (i = 0; i < npages; i++) {
814 			struct nfs_page *req;
815 			unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
816 
817 			req = nfs_create_request(dreq->ctx, pagevec[i], NULL,
818 						 pgbase, req_len);
819 			if (IS_ERR(req)) {
820 				result = PTR_ERR(req);
821 				break;
822 			}
823 			nfs_lock_request(req);
824 			req->wb_index = pos >> PAGE_SHIFT;
825 			req->wb_offset = pos & ~PAGE_MASK;
826 			if (!nfs_pageio_add_request(&desc, req)) {
827 				result = desc.pg_error;
828 				nfs_unlock_and_release_request(req);
829 				break;
830 			}
831 			pgbase = 0;
832 			bytes -= req_len;
833 			requested_bytes += req_len;
834 			pos += req_len;
835 			dreq->bytes_left -= req_len;
836 		}
837 		nfs_direct_release_pages(pagevec, npages);
838 		kvfree(pagevec);
839 		if (result < 0)
840 			break;
841 	}
842 	nfs_pageio_complete(&desc);
843 
844 	/*
845 	 * If no bytes were started, return the error, and let the
846 	 * generic layer handle the completion.
847 	 */
848 	if (requested_bytes == 0) {
849 		inode_dio_done(inode);
850 		nfs_direct_req_release(dreq);
851 		return result < 0 ? result : -EIO;
852 	}
853 
854 	if (put_dreq(dreq))
855 		nfs_direct_write_complete(dreq, dreq->inode);
856 	return 0;
857 }
858 
859 /**
860  * nfs_file_direct_write - file direct write operation for NFS files
861  * @iocb: target I/O control block
862  * @iter: vector of user buffers from which to write data
863  * @pos: byte offset in file where writing starts
864  *
865  * We use this function for direct writes instead of calling
866  * generic_file_aio_write() in order to avoid taking the inode
867  * semaphore and updating the i_size.  The NFS server will set
868  * the new i_size and this client must read the updated size
869  * back into its cache.  We let the server do generic write
870  * parameter checking and report problems.
871  *
872  * We eliminate local atime updates, see direct read above.
873  *
874  * We avoid unnecessary page cache invalidations for normal cached
875  * readers of this file.
876  *
877  * Note that O_APPEND is not supported for NFS direct writes, as there
878  * is no atomic O_APPEND write facility in the NFS protocol.
879  */
880 ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter,
881 				loff_t pos)
882 {
883 	ssize_t result = -EINVAL;
884 	struct file *file = iocb->ki_filp;
885 	struct address_space *mapping = file->f_mapping;
886 	struct inode *inode = mapping->host;
887 	struct nfs_direct_req *dreq;
888 	struct nfs_lock_context *l_ctx;
889 	loff_t end;
890 	size_t count = iov_iter_count(iter);
891 	end = (pos + count - 1) >> PAGE_CACHE_SHIFT;
892 
893 	nfs_add_stats(mapping->host, NFSIOS_DIRECTWRITTENBYTES, count);
894 
895 	dfprintk(FILE, "NFS: direct write(%pD2, %zd@%Ld)\n",
896 		file, count, (long long) pos);
897 
898 	result = generic_write_checks(file, &pos, &count, 0);
899 	if (result)
900 		goto out;
901 
902 	result = -EINVAL;
903 	if ((ssize_t) count < 0)
904 		goto out;
905 	result = 0;
906 	if (!count)
907 		goto out;
908 
909 	mutex_lock(&inode->i_mutex);
910 
911 	result = nfs_sync_mapping(mapping);
912 	if (result)
913 		goto out_unlock;
914 
915 	if (mapping->nrpages) {
916 		result = invalidate_inode_pages2_range(mapping,
917 					pos >> PAGE_CACHE_SHIFT, end);
918 		if (result)
919 			goto out_unlock;
920 	}
921 
922 	task_io_account_write(count);
923 
924 	result = -ENOMEM;
925 	dreq = nfs_direct_req_alloc();
926 	if (!dreq)
927 		goto out_unlock;
928 
929 	dreq->inode = inode;
930 	dreq->bytes_left = count;
931 	dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
932 	l_ctx = nfs_get_lock_context(dreq->ctx);
933 	if (IS_ERR(l_ctx)) {
934 		result = PTR_ERR(l_ctx);
935 		goto out_release;
936 	}
937 	dreq->l_ctx = l_ctx;
938 	if (!is_sync_kiocb(iocb))
939 		dreq->iocb = iocb;
940 
941 	result = nfs_direct_write_schedule_iovec(dreq, iter, pos);
942 
943 	if (mapping->nrpages) {
944 		invalidate_inode_pages2_range(mapping,
945 					      pos >> PAGE_CACHE_SHIFT, end);
946 	}
947 
948 	mutex_unlock(&inode->i_mutex);
949 
950 	if (!result) {
951 		result = nfs_direct_wait(dreq);
952 		if (result > 0) {
953 			struct inode *inode = mapping->host;
954 
955 			iocb->ki_pos = pos + result;
956 			spin_lock(&inode->i_lock);
957 			if (i_size_read(inode) < iocb->ki_pos)
958 				i_size_write(inode, iocb->ki_pos);
959 			spin_unlock(&inode->i_lock);
960 		}
961 	}
962 	nfs_direct_req_release(dreq);
963 	return result;
964 
965 out_release:
966 	nfs_direct_req_release(dreq);
967 out_unlock:
968 	mutex_unlock(&inode->i_mutex);
969 out:
970 	return result;
971 }
972 
973 /**
974  * nfs_init_directcache - create a slab cache for nfs_direct_req structures
975  *
976  */
977 int __init nfs_init_directcache(void)
978 {
979 	nfs_direct_cachep = kmem_cache_create("nfs_direct_cache",
980 						sizeof(struct nfs_direct_req),
981 						0, (SLAB_RECLAIM_ACCOUNT|
982 							SLAB_MEM_SPREAD),
983 						NULL);
984 	if (nfs_direct_cachep == NULL)
985 		return -ENOMEM;
986 
987 	return 0;
988 }
989 
990 /**
991  * nfs_destroy_directcache - destroy the slab cache for nfs_direct_req structures
992  *
993  */
994 void nfs_destroy_directcache(void)
995 {
996 	kmem_cache_destroy(nfs_direct_cachep);
997 }
998