xref: /openbmc/linux/fs/nfs/direct.c (revision 586b4106)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * linux/fs/nfs/direct.c
4  *
5  * Copyright (C) 2003 by Chuck Lever <cel@netapp.com>
6  *
7  * High-performance uncached I/O for the Linux NFS client
8  *
9  * There are important applications whose performance or correctness
10  * depends on uncached access to file data.  Database clusters
11  * (multiple copies of the same instance running on separate hosts)
12  * implement their own cache coherency protocol that subsumes file
13  * system cache protocols.  Applications that process datasets
14  * considerably larger than the client's memory do not always benefit
15  * from a local cache.  A streaming video server, for instance, has no
16  * need to cache the contents of a file.
17  *
18  * When an application requests uncached I/O, all read and write requests
19  * are made directly to the server; data stored or fetched via these
20  * requests is not cached in the Linux page cache.  The client does not
21  * correct unaligned requests from applications.  All requested bytes are
22  * held on permanent storage before a direct write system call returns to
23  * an application.
24  *
25  * Solaris implements an uncached I/O facility called directio() that
26  * is used for backups and sequential I/O to very large files.  Solaris
27  * also supports uncaching whole NFS partitions with "-o forcedirectio,"
28  * an undocumented mount option.
29  *
30  * Designed by Jeff Kimmel, Chuck Lever, and Trond Myklebust, with
31  * help from Andrew Morton.
32  *
33  * 18 Dec 2001	Initial implementation for 2.4  --cel
34  * 08 Jul 2002	Version for 2.4.19, with bug fixes --trondmy
35  * 08 Jun 2003	Port to 2.5 APIs  --cel
36  * 31 Mar 2004	Handle direct I/O without VFS support  --cel
37  * 15 Sep 2004	Parallel async reads  --cel
38  * 04 May 2005	support O_DIRECT with aio  --cel
39  *
40  */
41 
42 #include <linux/errno.h>
43 #include <linux/sched.h>
44 #include <linux/kernel.h>
45 #include <linux/file.h>
46 #include <linux/pagemap.h>
47 #include <linux/kref.h>
48 #include <linux/slab.h>
49 #include <linux/task_io_accounting_ops.h>
50 #include <linux/module.h>
51 
52 #include <linux/nfs_fs.h>
53 #include <linux/nfs_page.h>
54 #include <linux/sunrpc/clnt.h>
55 
56 #include <linux/uaccess.h>
57 #include <linux/atomic.h>
58 
59 #include "internal.h"
60 #include "iostat.h"
61 #include "pnfs.h"
62 #include "fscache.h"
63 #include "nfstrace.h"
64 
65 #define NFSDBG_FACILITY		NFSDBG_VFS
66 
67 static struct kmem_cache *nfs_direct_cachep;
68 
69 static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops;
70 static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops;
71 static void nfs_direct_write_complete(struct nfs_direct_req *dreq);
72 static void nfs_direct_write_schedule_work(struct work_struct *work);
73 
74 static inline void get_dreq(struct nfs_direct_req *dreq)
75 {
76 	atomic_inc(&dreq->io_count);
77 }
78 
79 static inline int put_dreq(struct nfs_direct_req *dreq)
80 {
81 	return atomic_dec_and_test(&dreq->io_count);
82 }
83 
84 static void
85 nfs_direct_handle_truncated(struct nfs_direct_req *dreq,
86 			    const struct nfs_pgio_header *hdr,
87 			    ssize_t dreq_len)
88 {
89 	if (!(test_bit(NFS_IOHDR_ERROR, &hdr->flags) ||
90 	      test_bit(NFS_IOHDR_EOF, &hdr->flags)))
91 		return;
92 	if (dreq->max_count >= dreq_len) {
93 		dreq->max_count = dreq_len;
94 		if (dreq->count > dreq_len)
95 			dreq->count = dreq_len;
96 	}
97 
98 	if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && !dreq->error)
99 		dreq->error = hdr->error;
100 }
101 
102 static void
103 nfs_direct_count_bytes(struct nfs_direct_req *dreq,
104 		       const struct nfs_pgio_header *hdr)
105 {
106 	loff_t hdr_end = hdr->io_start + hdr->good_bytes;
107 	ssize_t dreq_len = 0;
108 
109 	if (hdr_end > dreq->io_start)
110 		dreq_len = hdr_end - dreq->io_start;
111 
112 	nfs_direct_handle_truncated(dreq, hdr, dreq_len);
113 
114 	if (dreq_len > dreq->max_count)
115 		dreq_len = dreq->max_count;
116 
117 	if (dreq->count < dreq_len)
118 		dreq->count = dreq_len;
119 }
120 
121 static void nfs_direct_truncate_request(struct nfs_direct_req *dreq,
122 					struct nfs_page *req)
123 {
124 	loff_t offs = req_offset(req);
125 	size_t req_start = (size_t)(offs - dreq->io_start);
126 
127 	if (req_start < dreq->max_count)
128 		dreq->max_count = req_start;
129 	if (req_start < dreq->count)
130 		dreq->count = req_start;
131 }
132 
133 /**
134  * nfs_swap_rw - NFS address space operation for swap I/O
135  * @iocb: target I/O control block
136  * @iter: I/O buffer
137  *
138  * Perform IO to the swap-file.  This is much like direct IO.
139  */
140 int nfs_swap_rw(struct kiocb *iocb, struct iov_iter *iter)
141 {
142 	ssize_t ret;
143 
144 	if (iov_iter_rw(iter) == READ)
145 		ret = nfs_file_direct_read(iocb, iter, true);
146 	else
147 		ret = nfs_file_direct_write(iocb, iter, true);
148 	if (ret < 0)
149 		return ret;
150 	return 0;
151 }
152 
153 static void nfs_direct_release_pages(struct page **pages, unsigned int npages)
154 {
155 	unsigned int i;
156 	for (i = 0; i < npages; i++)
157 		put_page(pages[i]);
158 }
159 
160 void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo,
161 			      struct nfs_direct_req *dreq)
162 {
163 	cinfo->inode = dreq->inode;
164 	cinfo->mds = &dreq->mds_cinfo;
165 	cinfo->ds = &dreq->ds_cinfo;
166 	cinfo->dreq = dreq;
167 	cinfo->completion_ops = &nfs_direct_commit_completion_ops;
168 }
169 
170 static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
171 {
172 	struct nfs_direct_req *dreq;
173 
174 	dreq = kmem_cache_zalloc(nfs_direct_cachep, GFP_KERNEL);
175 	if (!dreq)
176 		return NULL;
177 
178 	kref_init(&dreq->kref);
179 	kref_get(&dreq->kref);
180 	init_completion(&dreq->completion);
181 	INIT_LIST_HEAD(&dreq->mds_cinfo.list);
182 	pnfs_init_ds_commit_info(&dreq->ds_cinfo);
183 	INIT_WORK(&dreq->work, nfs_direct_write_schedule_work);
184 	spin_lock_init(&dreq->lock);
185 
186 	return dreq;
187 }
188 
189 static void nfs_direct_req_free(struct kref *kref)
190 {
191 	struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref);
192 
193 	pnfs_release_ds_info(&dreq->ds_cinfo, dreq->inode);
194 	if (dreq->l_ctx != NULL)
195 		nfs_put_lock_context(dreq->l_ctx);
196 	if (dreq->ctx != NULL)
197 		put_nfs_open_context(dreq->ctx);
198 	kmem_cache_free(nfs_direct_cachep, dreq);
199 }
200 
201 static void nfs_direct_req_release(struct nfs_direct_req *dreq)
202 {
203 	kref_put(&dreq->kref, nfs_direct_req_free);
204 }
205 
206 ssize_t nfs_dreq_bytes_left(struct nfs_direct_req *dreq, loff_t offset)
207 {
208 	loff_t start = offset - dreq->io_start;
209 	return dreq->max_count - start;
210 }
211 EXPORT_SYMBOL_GPL(nfs_dreq_bytes_left);
212 
213 /*
214  * Collects and returns the final error value/byte-count.
215  */
216 static ssize_t nfs_direct_wait(struct nfs_direct_req *dreq)
217 {
218 	ssize_t result = -EIOCBQUEUED;
219 
220 	/* Async requests don't wait here */
221 	if (dreq->iocb)
222 		goto out;
223 
224 	result = wait_for_completion_killable(&dreq->completion);
225 
226 	if (!result) {
227 		result = dreq->count;
228 		WARN_ON_ONCE(dreq->count < 0);
229 	}
230 	if (!result)
231 		result = dreq->error;
232 
233 out:
234 	return (ssize_t) result;
235 }
236 
237 /*
238  * Synchronous I/O uses a stack-allocated iocb.  Thus we can't trust
239  * the iocb is still valid here if this is a synchronous request.
240  */
241 static void nfs_direct_complete(struct nfs_direct_req *dreq)
242 {
243 	struct inode *inode = dreq->inode;
244 
245 	inode_dio_end(inode);
246 
247 	if (dreq->iocb) {
248 		long res = (long) dreq->error;
249 		if (dreq->count != 0) {
250 			res = (long) dreq->count;
251 			WARN_ON_ONCE(dreq->count < 0);
252 		}
253 		dreq->iocb->ki_complete(dreq->iocb, res);
254 	}
255 
256 	complete(&dreq->completion);
257 
258 	nfs_direct_req_release(dreq);
259 }
260 
261 static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
262 {
263 	unsigned long bytes = 0;
264 	struct nfs_direct_req *dreq = hdr->dreq;
265 
266 	spin_lock(&dreq->lock);
267 	if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) {
268 		spin_unlock(&dreq->lock);
269 		goto out_put;
270 	}
271 
272 	nfs_direct_count_bytes(dreq, hdr);
273 	spin_unlock(&dreq->lock);
274 
275 	while (!list_empty(&hdr->pages)) {
276 		struct nfs_page *req = nfs_list_entry(hdr->pages.next);
277 		struct page *page = req->wb_page;
278 
279 		if (!PageCompound(page) && bytes < hdr->good_bytes &&
280 		    (dreq->flags == NFS_ODIRECT_SHOULD_DIRTY))
281 			set_page_dirty(page);
282 		bytes += req->wb_bytes;
283 		nfs_list_remove_request(req);
284 		nfs_release_request(req);
285 	}
286 out_put:
287 	if (put_dreq(dreq))
288 		nfs_direct_complete(dreq);
289 	hdr->release(hdr);
290 }
291 
292 static void nfs_read_sync_pgio_error(struct list_head *head, int error)
293 {
294 	struct nfs_page *req;
295 
296 	while (!list_empty(head)) {
297 		req = nfs_list_entry(head->next);
298 		nfs_list_remove_request(req);
299 		nfs_release_request(req);
300 	}
301 }
302 
303 static void nfs_direct_pgio_init(struct nfs_pgio_header *hdr)
304 {
305 	get_dreq(hdr->dreq);
306 }
307 
308 static const struct nfs_pgio_completion_ops nfs_direct_read_completion_ops = {
309 	.error_cleanup = nfs_read_sync_pgio_error,
310 	.init_hdr = nfs_direct_pgio_init,
311 	.completion = nfs_direct_read_completion,
312 };
313 
314 /*
315  * For each rsize'd chunk of the user's buffer, dispatch an NFS READ
316  * operation.  If nfs_readdata_alloc() or get_user_pages() fails,
317  * bail and stop sending more reads.  Read length accounting is
318  * handled automatically by nfs_direct_read_result().  Otherwise, if
319  * no requests have been sent, just return an error.
320  */
321 
322 static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
323 					      struct iov_iter *iter,
324 					      loff_t pos)
325 {
326 	struct nfs_pageio_descriptor desc;
327 	struct inode *inode = dreq->inode;
328 	ssize_t result = -EINVAL;
329 	size_t requested_bytes = 0;
330 	size_t rsize = max_t(size_t, NFS_SERVER(inode)->rsize, PAGE_SIZE);
331 
332 	nfs_pageio_init_read(&desc, dreq->inode, false,
333 			     &nfs_direct_read_completion_ops);
334 	get_dreq(dreq);
335 	desc.pg_dreq = dreq;
336 	inode_dio_begin(inode);
337 
338 	while (iov_iter_count(iter)) {
339 		struct page **pagevec;
340 		size_t bytes;
341 		size_t pgbase;
342 		unsigned npages, i;
343 
344 		result = iov_iter_get_pages_alloc2(iter, &pagevec,
345 						  rsize, &pgbase);
346 		if (result < 0)
347 			break;
348 
349 		bytes = result;
350 		npages = (result + pgbase + PAGE_SIZE - 1) / PAGE_SIZE;
351 		for (i = 0; i < npages; i++) {
352 			struct nfs_page *req;
353 			unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
354 			/* XXX do we need to do the eof zeroing found in async_filler? */
355 			req = nfs_page_create_from_page(dreq->ctx, pagevec[i],
356 							pgbase, pos, req_len);
357 			if (IS_ERR(req)) {
358 				result = PTR_ERR(req);
359 				break;
360 			}
361 			if (!nfs_pageio_add_request(&desc, req)) {
362 				result = desc.pg_error;
363 				nfs_release_request(req);
364 				break;
365 			}
366 			pgbase = 0;
367 			bytes -= req_len;
368 			requested_bytes += req_len;
369 			pos += req_len;
370 			dreq->bytes_left -= req_len;
371 		}
372 		nfs_direct_release_pages(pagevec, npages);
373 		kvfree(pagevec);
374 		if (result < 0)
375 			break;
376 	}
377 
378 	nfs_pageio_complete(&desc);
379 
380 	/*
381 	 * If no bytes were started, return the error, and let the
382 	 * generic layer handle the completion.
383 	 */
384 	if (requested_bytes == 0) {
385 		inode_dio_end(inode);
386 		nfs_direct_req_release(dreq);
387 		return result < 0 ? result : -EIO;
388 	}
389 
390 	if (put_dreq(dreq))
391 		nfs_direct_complete(dreq);
392 	return requested_bytes;
393 }
394 
395 /**
396  * nfs_file_direct_read - file direct read operation for NFS files
397  * @iocb: target I/O control block
398  * @iter: vector of user buffers into which to read data
399  * @swap: flag indicating this is swap IO, not O_DIRECT IO
400  *
401  * We use this function for direct reads instead of calling
402  * generic_file_aio_read() in order to avoid gfar's check to see if
403  * the request starts before the end of the file.  For that check
404  * to work, we must generate a GETATTR before each direct read, and
405  * even then there is a window between the GETATTR and the subsequent
406  * READ where the file size could change.  Our preference is simply
407  * to do all reads the application wants, and the server will take
408  * care of managing the end of file boundary.
409  *
410  * This function also eliminates unnecessarily updating the file's
411  * atime locally, as the NFS server sets the file's atime, and this
412  * client must read the updated atime from the server back into its
413  * cache.
414  */
415 ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter,
416 			     bool swap)
417 {
418 	struct file *file = iocb->ki_filp;
419 	struct address_space *mapping = file->f_mapping;
420 	struct inode *inode = mapping->host;
421 	struct nfs_direct_req *dreq;
422 	struct nfs_lock_context *l_ctx;
423 	ssize_t result, requested;
424 	size_t count = iov_iter_count(iter);
425 	nfs_add_stats(mapping->host, NFSIOS_DIRECTREADBYTES, count);
426 
427 	dfprintk(FILE, "NFS: direct read(%pD2, %zd@%Ld)\n",
428 		file, count, (long long) iocb->ki_pos);
429 
430 	result = 0;
431 	if (!count)
432 		goto out;
433 
434 	task_io_account_read(count);
435 
436 	result = -ENOMEM;
437 	dreq = nfs_direct_req_alloc();
438 	if (dreq == NULL)
439 		goto out;
440 
441 	dreq->inode = inode;
442 	dreq->bytes_left = dreq->max_count = count;
443 	dreq->io_start = iocb->ki_pos;
444 	dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
445 	l_ctx = nfs_get_lock_context(dreq->ctx);
446 	if (IS_ERR(l_ctx)) {
447 		result = PTR_ERR(l_ctx);
448 		nfs_direct_req_release(dreq);
449 		goto out_release;
450 	}
451 	dreq->l_ctx = l_ctx;
452 	if (!is_sync_kiocb(iocb))
453 		dreq->iocb = iocb;
454 
455 	if (user_backed_iter(iter))
456 		dreq->flags = NFS_ODIRECT_SHOULD_DIRTY;
457 
458 	if (!swap)
459 		nfs_start_io_direct(inode);
460 
461 	NFS_I(inode)->read_io += count;
462 	requested = nfs_direct_read_schedule_iovec(dreq, iter, iocb->ki_pos);
463 
464 	if (!swap)
465 		nfs_end_io_direct(inode);
466 
467 	if (requested > 0) {
468 		result = nfs_direct_wait(dreq);
469 		if (result > 0) {
470 			requested -= result;
471 			iocb->ki_pos += result;
472 		}
473 		iov_iter_revert(iter, requested);
474 	} else {
475 		result = requested;
476 	}
477 
478 out_release:
479 	nfs_direct_req_release(dreq);
480 out:
481 	return result;
482 }
483 
484 static void nfs_direct_add_page_head(struct list_head *list,
485 				     struct nfs_page *req)
486 {
487 	struct nfs_page *head = req->wb_head;
488 
489 	if (!list_empty(&head->wb_list) || !nfs_lock_request(head))
490 		return;
491 	if (!list_empty(&head->wb_list)) {
492 		nfs_unlock_request(head);
493 		return;
494 	}
495 	list_add(&head->wb_list, list);
496 	kref_get(&head->wb_kref);
497 	kref_get(&head->wb_kref);
498 }
499 
500 static void nfs_direct_join_group(struct list_head *list,
501 				  struct nfs_commit_info *cinfo,
502 				  struct inode *inode)
503 {
504 	struct nfs_page *req, *subreq;
505 
506 	list_for_each_entry(req, list, wb_list) {
507 		if (req->wb_head != req) {
508 			nfs_direct_add_page_head(&req->wb_list, req);
509 			continue;
510 		}
511 		subreq = req->wb_this_page;
512 		if (subreq == req)
513 			continue;
514 		do {
515 			/*
516 			 * Remove subrequests from this list before freeing
517 			 * them in the call to nfs_join_page_group().
518 			 */
519 			if (!list_empty(&subreq->wb_list)) {
520 				nfs_list_remove_request(subreq);
521 				nfs_release_request(subreq);
522 			}
523 		} while ((subreq = subreq->wb_this_page) != req);
524 		nfs_join_page_group(req, cinfo, inode);
525 	}
526 }
527 
528 static void
529 nfs_direct_write_scan_commit_list(struct inode *inode,
530 				  struct list_head *list,
531 				  struct nfs_commit_info *cinfo)
532 {
533 	mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
534 	pnfs_recover_commit_reqs(list, cinfo);
535 	nfs_scan_commit_list(&cinfo->mds->list, list, cinfo, 0);
536 	mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
537 }
538 
539 static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
540 {
541 	struct nfs_pageio_descriptor desc;
542 	struct nfs_page *req;
543 	LIST_HEAD(reqs);
544 	struct nfs_commit_info cinfo;
545 
546 	nfs_init_cinfo_from_dreq(&cinfo, dreq);
547 	nfs_direct_write_scan_commit_list(dreq->inode, &reqs, &cinfo);
548 
549 	nfs_direct_join_group(&reqs, &cinfo, dreq->inode);
550 
551 	nfs_clear_pnfs_ds_commit_verifiers(&dreq->ds_cinfo);
552 	get_dreq(dreq);
553 
554 	nfs_pageio_init_write(&desc, dreq->inode, FLUSH_STABLE, false,
555 			      &nfs_direct_write_completion_ops);
556 	desc.pg_dreq = dreq;
557 
558 	while (!list_empty(&reqs)) {
559 		req = nfs_list_entry(reqs.next);
560 		/* Bump the transmission count */
561 		req->wb_nio++;
562 		if (!nfs_pageio_add_request(&desc, req)) {
563 			spin_lock(&dreq->lock);
564 			if (dreq->error < 0) {
565 				desc.pg_error = dreq->error;
566 			} else if (desc.pg_error != -EAGAIN) {
567 				dreq->flags = 0;
568 				if (!desc.pg_error)
569 					desc.pg_error = -EIO;
570 				dreq->error = desc.pg_error;
571 			} else
572 				dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
573 			spin_unlock(&dreq->lock);
574 			break;
575 		}
576 		nfs_release_request(req);
577 	}
578 	nfs_pageio_complete(&desc);
579 
580 	while (!list_empty(&reqs)) {
581 		req = nfs_list_entry(reqs.next);
582 		nfs_list_remove_request(req);
583 		nfs_unlock_and_release_request(req);
584 		if (desc.pg_error == -EAGAIN) {
585 			nfs_mark_request_commit(req, NULL, &cinfo, 0);
586 		} else {
587 			spin_lock(&dreq->lock);
588 			nfs_direct_truncate_request(dreq, req);
589 			spin_unlock(&dreq->lock);
590 			nfs_release_request(req);
591 		}
592 	}
593 
594 	if (put_dreq(dreq))
595 		nfs_direct_write_complete(dreq);
596 }
597 
598 static void nfs_direct_commit_complete(struct nfs_commit_data *data)
599 {
600 	const struct nfs_writeverf *verf = data->res.verf;
601 	struct nfs_direct_req *dreq = data->dreq;
602 	struct nfs_commit_info cinfo;
603 	struct nfs_page *req;
604 	int status = data->task.tk_status;
605 
606 	trace_nfs_direct_commit_complete(dreq);
607 
608 	if (status < 0) {
609 		/* Errors in commit are fatal */
610 		dreq->error = status;
611 		dreq->flags = NFS_ODIRECT_DONE;
612 	} else {
613 		status = dreq->error;
614 	}
615 
616 	nfs_init_cinfo_from_dreq(&cinfo, dreq);
617 
618 	while (!list_empty(&data->pages)) {
619 		req = nfs_list_entry(data->pages.next);
620 		nfs_list_remove_request(req);
621 		if (status < 0) {
622 			spin_lock(&dreq->lock);
623 			nfs_direct_truncate_request(dreq, req);
624 			spin_unlock(&dreq->lock);
625 			nfs_release_request(req);
626 		} else if (!nfs_write_match_verf(verf, req)) {
627 			dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
628 			/*
629 			 * Despite the reboot, the write was successful,
630 			 * so reset wb_nio.
631 			 */
632 			req->wb_nio = 0;
633 			nfs_mark_request_commit(req, NULL, &cinfo, 0);
634 		} else
635 			nfs_release_request(req);
636 		nfs_unlock_and_release_request(req);
637 	}
638 
639 	if (nfs_commit_end(cinfo.mds))
640 		nfs_direct_write_complete(dreq);
641 }
642 
643 static void nfs_direct_resched_write(struct nfs_commit_info *cinfo,
644 		struct nfs_page *req)
645 {
646 	struct nfs_direct_req *dreq = cinfo->dreq;
647 
648 	trace_nfs_direct_resched_write(dreq);
649 
650 	spin_lock(&dreq->lock);
651 	if (dreq->flags != NFS_ODIRECT_DONE)
652 		dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
653 	spin_unlock(&dreq->lock);
654 	nfs_mark_request_commit(req, NULL, cinfo, 0);
655 }
656 
657 static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops = {
658 	.completion = nfs_direct_commit_complete,
659 	.resched_write = nfs_direct_resched_write,
660 };
661 
662 static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
663 {
664 	int res;
665 	struct nfs_commit_info cinfo;
666 	LIST_HEAD(mds_list);
667 
668 	nfs_init_cinfo_from_dreq(&cinfo, dreq);
669 	nfs_commit_begin(cinfo.mds);
670 	nfs_scan_commit(dreq->inode, &mds_list, &cinfo);
671 	res = nfs_generic_commit_list(dreq->inode, &mds_list, 0, &cinfo);
672 	if (res < 0) { /* res == -ENOMEM */
673 		spin_lock(&dreq->lock);
674 		if (dreq->flags == 0)
675 			dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
676 		spin_unlock(&dreq->lock);
677 	}
678 	if (nfs_commit_end(cinfo.mds))
679 		nfs_direct_write_complete(dreq);
680 }
681 
682 static void nfs_direct_write_clear_reqs(struct nfs_direct_req *dreq)
683 {
684 	struct nfs_commit_info cinfo;
685 	struct nfs_page *req;
686 	LIST_HEAD(reqs);
687 
688 	nfs_init_cinfo_from_dreq(&cinfo, dreq);
689 	nfs_direct_write_scan_commit_list(dreq->inode, &reqs, &cinfo);
690 
691 	while (!list_empty(&reqs)) {
692 		req = nfs_list_entry(reqs.next);
693 		nfs_list_remove_request(req);
694 		nfs_direct_truncate_request(dreq, req);
695 		nfs_release_request(req);
696 		nfs_unlock_and_release_request(req);
697 	}
698 }
699 
700 static void nfs_direct_write_schedule_work(struct work_struct *work)
701 {
702 	struct nfs_direct_req *dreq = container_of(work, struct nfs_direct_req, work);
703 	int flags = dreq->flags;
704 
705 	dreq->flags = 0;
706 	switch (flags) {
707 		case NFS_ODIRECT_DO_COMMIT:
708 			nfs_direct_commit_schedule(dreq);
709 			break;
710 		case NFS_ODIRECT_RESCHED_WRITES:
711 			nfs_direct_write_reschedule(dreq);
712 			break;
713 		default:
714 			nfs_direct_write_clear_reqs(dreq);
715 			nfs_zap_mapping(dreq->inode, dreq->inode->i_mapping);
716 			nfs_direct_complete(dreq);
717 	}
718 }
719 
720 static void nfs_direct_write_complete(struct nfs_direct_req *dreq)
721 {
722 	trace_nfs_direct_write_complete(dreq);
723 	queue_work(nfsiod_workqueue, &dreq->work); /* Calls nfs_direct_write_schedule_work */
724 }
725 
726 static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
727 {
728 	struct nfs_direct_req *dreq = hdr->dreq;
729 	struct nfs_commit_info cinfo;
730 	struct nfs_page *req = nfs_list_entry(hdr->pages.next);
731 	int flags = NFS_ODIRECT_DONE;
732 
733 	trace_nfs_direct_write_completion(dreq);
734 
735 	nfs_init_cinfo_from_dreq(&cinfo, dreq);
736 
737 	spin_lock(&dreq->lock);
738 	if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) {
739 		spin_unlock(&dreq->lock);
740 		goto out_put;
741 	}
742 
743 	nfs_direct_count_bytes(dreq, hdr);
744 	if (test_bit(NFS_IOHDR_UNSTABLE_WRITES, &hdr->flags) &&
745 	    !test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
746 		if (!dreq->flags)
747 			dreq->flags = NFS_ODIRECT_DO_COMMIT;
748 		flags = dreq->flags;
749 	}
750 	spin_unlock(&dreq->lock);
751 
752 	while (!list_empty(&hdr->pages)) {
753 
754 		req = nfs_list_entry(hdr->pages.next);
755 		nfs_list_remove_request(req);
756 		if (flags == NFS_ODIRECT_DO_COMMIT) {
757 			kref_get(&req->wb_kref);
758 			memcpy(&req->wb_verf, &hdr->verf.verifier,
759 			       sizeof(req->wb_verf));
760 			nfs_mark_request_commit(req, hdr->lseg, &cinfo,
761 				hdr->ds_commit_idx);
762 		} else if (flags == NFS_ODIRECT_RESCHED_WRITES) {
763 			kref_get(&req->wb_kref);
764 			nfs_mark_request_commit(req, NULL, &cinfo, 0);
765 		}
766 		nfs_unlock_and_release_request(req);
767 	}
768 
769 out_put:
770 	if (put_dreq(dreq))
771 		nfs_direct_write_complete(dreq);
772 	hdr->release(hdr);
773 }
774 
775 static void nfs_write_sync_pgio_error(struct list_head *head, int error)
776 {
777 	struct nfs_page *req;
778 
779 	while (!list_empty(head)) {
780 		req = nfs_list_entry(head->next);
781 		nfs_list_remove_request(req);
782 		nfs_unlock_and_release_request(req);
783 	}
784 }
785 
786 static void nfs_direct_write_reschedule_io(struct nfs_pgio_header *hdr)
787 {
788 	struct nfs_direct_req *dreq = hdr->dreq;
789 	struct nfs_page *req;
790 	struct nfs_commit_info cinfo;
791 
792 	trace_nfs_direct_write_reschedule_io(dreq);
793 
794 	nfs_init_cinfo_from_dreq(&cinfo, dreq);
795 	spin_lock(&dreq->lock);
796 	if (dreq->error == 0)
797 		dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
798 	set_bit(NFS_IOHDR_REDO, &hdr->flags);
799 	spin_unlock(&dreq->lock);
800 	while (!list_empty(&hdr->pages)) {
801 		req = nfs_list_entry(hdr->pages.next);
802 		nfs_list_remove_request(req);
803 		nfs_unlock_request(req);
804 		nfs_mark_request_commit(req, NULL, &cinfo, 0);
805 	}
806 }
807 
808 static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops = {
809 	.error_cleanup = nfs_write_sync_pgio_error,
810 	.init_hdr = nfs_direct_pgio_init,
811 	.completion = nfs_direct_write_completion,
812 	.reschedule_io = nfs_direct_write_reschedule_io,
813 };
814 
815 
816 /*
817  * NB: Return the value of the first error return code.  Subsequent
818  *     errors after the first one are ignored.
819  */
820 /*
821  * For each wsize'd chunk of the user's buffer, dispatch an NFS WRITE
822  * operation.  If nfs_writedata_alloc() or get_user_pages() fails,
823  * bail and stop sending more writes.  Write length accounting is
824  * handled automatically by nfs_direct_write_result().  Otherwise, if
825  * no requests have been sent, just return an error.
826  */
827 static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
828 					       struct iov_iter *iter,
829 					       loff_t pos, int ioflags)
830 {
831 	struct nfs_pageio_descriptor desc;
832 	struct inode *inode = dreq->inode;
833 	struct nfs_commit_info cinfo;
834 	ssize_t result = 0;
835 	size_t requested_bytes = 0;
836 	size_t wsize = max_t(size_t, NFS_SERVER(inode)->wsize, PAGE_SIZE);
837 	bool defer = false;
838 
839 	trace_nfs_direct_write_schedule_iovec(dreq);
840 
841 	nfs_pageio_init_write(&desc, inode, ioflags, false,
842 			      &nfs_direct_write_completion_ops);
843 	desc.pg_dreq = dreq;
844 	get_dreq(dreq);
845 	inode_dio_begin(inode);
846 
847 	NFS_I(inode)->write_io += iov_iter_count(iter);
848 	while (iov_iter_count(iter)) {
849 		struct page **pagevec;
850 		size_t bytes;
851 		size_t pgbase;
852 		unsigned npages, i;
853 
854 		result = iov_iter_get_pages_alloc2(iter, &pagevec,
855 						  wsize, &pgbase);
856 		if (result < 0)
857 			break;
858 
859 		bytes = result;
860 		npages = (result + pgbase + PAGE_SIZE - 1) / PAGE_SIZE;
861 		for (i = 0; i < npages; i++) {
862 			struct nfs_page *req;
863 			unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
864 
865 			req = nfs_page_create_from_page(dreq->ctx, pagevec[i],
866 							pgbase, pos, req_len);
867 			if (IS_ERR(req)) {
868 				result = PTR_ERR(req);
869 				break;
870 			}
871 
872 			if (desc.pg_error < 0) {
873 				nfs_free_request(req);
874 				result = desc.pg_error;
875 				break;
876 			}
877 
878 			pgbase = 0;
879 			bytes -= req_len;
880 			requested_bytes += req_len;
881 			pos += req_len;
882 			dreq->bytes_left -= req_len;
883 
884 			if (defer) {
885 				nfs_mark_request_commit(req, NULL, &cinfo, 0);
886 				continue;
887 			}
888 
889 			nfs_lock_request(req);
890 			if (nfs_pageio_add_request(&desc, req))
891 				continue;
892 
893 			/* Exit on hard errors */
894 			if (desc.pg_error < 0 && desc.pg_error != -EAGAIN) {
895 				result = desc.pg_error;
896 				nfs_unlock_and_release_request(req);
897 				break;
898 			}
899 
900 			/* If the error is soft, defer remaining requests */
901 			nfs_init_cinfo_from_dreq(&cinfo, dreq);
902 			spin_lock(&dreq->lock);
903 			dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
904 			spin_unlock(&dreq->lock);
905 			nfs_unlock_request(req);
906 			nfs_mark_request_commit(req, NULL, &cinfo, 0);
907 			desc.pg_error = 0;
908 			defer = true;
909 		}
910 		nfs_direct_release_pages(pagevec, npages);
911 		kvfree(pagevec);
912 		if (result < 0)
913 			break;
914 	}
915 	nfs_pageio_complete(&desc);
916 
917 	/*
918 	 * If no bytes were started, return the error, and let the
919 	 * generic layer handle the completion.
920 	 */
921 	if (requested_bytes == 0) {
922 		inode_dio_end(inode);
923 		nfs_direct_req_release(dreq);
924 		return result < 0 ? result : -EIO;
925 	}
926 
927 	if (put_dreq(dreq))
928 		nfs_direct_write_complete(dreq);
929 	return requested_bytes;
930 }
931 
932 /**
933  * nfs_file_direct_write - file direct write operation for NFS files
934  * @iocb: target I/O control block
935  * @iter: vector of user buffers from which to write data
936  * @swap: flag indicating this is swap IO, not O_DIRECT IO
937  *
938  * We use this function for direct writes instead of calling
939  * generic_file_aio_write() in order to avoid taking the inode
940  * semaphore and updating the i_size.  The NFS server will set
941  * the new i_size and this client must read the updated size
942  * back into its cache.  We let the server do generic write
943  * parameter checking and report problems.
944  *
945  * We eliminate local atime updates, see direct read above.
946  *
947  * We avoid unnecessary page cache invalidations for normal cached
948  * readers of this file.
949  *
950  * Note that O_APPEND is not supported for NFS direct writes, as there
951  * is no atomic O_APPEND write facility in the NFS protocol.
952  */
953 ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter,
954 			      bool swap)
955 {
956 	ssize_t result, requested;
957 	size_t count;
958 	struct file *file = iocb->ki_filp;
959 	struct address_space *mapping = file->f_mapping;
960 	struct inode *inode = mapping->host;
961 	struct nfs_direct_req *dreq;
962 	struct nfs_lock_context *l_ctx;
963 	loff_t pos, end;
964 
965 	dfprintk(FILE, "NFS: direct write(%pD2, %zd@%Ld)\n",
966 		file, iov_iter_count(iter), (long long) iocb->ki_pos);
967 
968 	if (swap)
969 		/* bypass generic checks */
970 		result =  iov_iter_count(iter);
971 	else
972 		result = generic_write_checks(iocb, iter);
973 	if (result <= 0)
974 		return result;
975 	count = result;
976 	nfs_add_stats(mapping->host, NFSIOS_DIRECTWRITTENBYTES, count);
977 
978 	pos = iocb->ki_pos;
979 	end = (pos + iov_iter_count(iter) - 1) >> PAGE_SHIFT;
980 
981 	task_io_account_write(count);
982 
983 	result = -ENOMEM;
984 	dreq = nfs_direct_req_alloc();
985 	if (!dreq)
986 		goto out;
987 
988 	dreq->inode = inode;
989 	dreq->bytes_left = dreq->max_count = count;
990 	dreq->io_start = pos;
991 	dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
992 	l_ctx = nfs_get_lock_context(dreq->ctx);
993 	if (IS_ERR(l_ctx)) {
994 		result = PTR_ERR(l_ctx);
995 		nfs_direct_req_release(dreq);
996 		goto out_release;
997 	}
998 	dreq->l_ctx = l_ctx;
999 	if (!is_sync_kiocb(iocb))
1000 		dreq->iocb = iocb;
1001 	pnfs_init_ds_commit_info_ops(&dreq->ds_cinfo, inode);
1002 
1003 	if (swap) {
1004 		requested = nfs_direct_write_schedule_iovec(dreq, iter, pos,
1005 							    FLUSH_STABLE);
1006 	} else {
1007 		nfs_start_io_direct(inode);
1008 
1009 		requested = nfs_direct_write_schedule_iovec(dreq, iter, pos,
1010 							    FLUSH_COND_STABLE);
1011 
1012 		if (mapping->nrpages) {
1013 			invalidate_inode_pages2_range(mapping,
1014 						      pos >> PAGE_SHIFT, end);
1015 		}
1016 
1017 		nfs_end_io_direct(inode);
1018 	}
1019 
1020 	if (requested > 0) {
1021 		result = nfs_direct_wait(dreq);
1022 		if (result > 0) {
1023 			requested -= result;
1024 			iocb->ki_pos = pos + result;
1025 			/* XXX: should check the generic_write_sync retval */
1026 			generic_write_sync(iocb, result);
1027 		}
1028 		iov_iter_revert(iter, requested);
1029 	} else {
1030 		result = requested;
1031 	}
1032 	nfs_fscache_invalidate(inode, FSCACHE_INVAL_DIO_WRITE);
1033 out_release:
1034 	nfs_direct_req_release(dreq);
1035 out:
1036 	return result;
1037 }
1038 
1039 /**
1040  * nfs_init_directcache - create a slab cache for nfs_direct_req structures
1041  *
1042  */
1043 int __init nfs_init_directcache(void)
1044 {
1045 	nfs_direct_cachep = kmem_cache_create("nfs_direct_cache",
1046 						sizeof(struct nfs_direct_req),
1047 						0, (SLAB_RECLAIM_ACCOUNT|
1048 							SLAB_MEM_SPREAD),
1049 						NULL);
1050 	if (nfs_direct_cachep == NULL)
1051 		return -ENOMEM;
1052 
1053 	return 0;
1054 }
1055 
1056 /**
1057  * nfs_destroy_directcache - destroy the slab cache for nfs_direct_req structures
1058  *
1059  */
1060 void nfs_destroy_directcache(void)
1061 {
1062 	kmem_cache_destroy(nfs_direct_cachep);
1063 }
1064