xref: /openbmc/linux/fs/nfs/write.c (revision 0b7c01533aa9f4a228d07d2768d084acb3a387bc)
1 /*
2  * linux/fs/nfs/write.c
3  *
4  * Write file data over NFS.
5  *
6  * Copyright (C) 1996, 1997, Olaf Kirch <okir@monad.swb.de>
7  */
8 
9 #include <linux/types.h>
10 #include <linux/slab.h>
11 #include <linux/mm.h>
12 #include <linux/pagemap.h>
13 #include <linux/file.h>
14 #include <linux/writeback.h>
15 #include <linux/swap.h>
16 #include <linux/migrate.h>
17 
18 #include <linux/sunrpc/clnt.h>
19 #include <linux/nfs_fs.h>
20 #include <linux/nfs_mount.h>
21 #include <linux/nfs_page.h>
22 #include <linux/backing-dev.h>
23 #include <linux/export.h>
24 
25 #include <asm/uaccess.h>
26 
27 #include "delegation.h"
28 #include "internal.h"
29 #include "iostat.h"
30 #include "nfs4_fs.h"
31 #include "fscache.h"
32 #include "pnfs.h"
33 
34 #define NFSDBG_FACILITY		NFSDBG_PAGECACHE
35 
36 #define MIN_POOL_WRITE		(32)
37 #define MIN_POOL_COMMIT		(4)
38 
39 /*
40  * Local function declarations
41  */
42 static void nfs_pageio_init_write(struct nfs_pageio_descriptor *desc,
43 				  struct inode *inode, int ioflags);
44 static void nfs_redirty_request(struct nfs_page *req);
45 static const struct rpc_call_ops nfs_write_partial_ops;
46 static const struct rpc_call_ops nfs_write_full_ops;
47 static const struct rpc_call_ops nfs_commit_ops;
48 
49 static struct kmem_cache *nfs_wdata_cachep;
50 static mempool_t *nfs_wdata_mempool;
51 static struct kmem_cache *nfs_cdata_cachep;
52 static mempool_t *nfs_commit_mempool;
53 
54 struct nfs_commit_data *nfs_commitdata_alloc(void)
55 {
56 	struct nfs_commit_data *p = mempool_alloc(nfs_commit_mempool, GFP_NOFS);
57 
58 	if (p) {
59 		memset(p, 0, sizeof(*p));
60 		INIT_LIST_HEAD(&p->pages);
61 	}
62 	return p;
63 }
64 EXPORT_SYMBOL_GPL(nfs_commitdata_alloc);
65 
66 void nfs_commit_free(struct nfs_commit_data *p)
67 {
68 	mempool_free(p, nfs_commit_mempool);
69 }
70 EXPORT_SYMBOL_GPL(nfs_commit_free);
71 
72 struct nfs_write_data *nfs_writedata_alloc(unsigned int pagecount)
73 {
74 	struct nfs_write_data *p = mempool_alloc(nfs_wdata_mempool, GFP_NOFS);
75 
76 	if (p) {
77 		memset(p, 0, sizeof(*p));
78 		INIT_LIST_HEAD(&p->pages);
79 		p->npages = pagecount;
80 		if (pagecount <= ARRAY_SIZE(p->page_array))
81 			p->pagevec = p->page_array;
82 		else {
83 			p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_NOFS);
84 			if (!p->pagevec) {
85 				mempool_free(p, nfs_wdata_mempool);
86 				p = NULL;
87 			}
88 		}
89 	}
90 	return p;
91 }
92 
93 void nfs_writedata_free(struct nfs_write_data *p)
94 {
95 	if (p && (p->pagevec != &p->page_array[0]))
96 		kfree(p->pagevec);
97 	mempool_free(p, nfs_wdata_mempool);
98 }
99 
100 void nfs_writedata_release(struct nfs_write_data *wdata)
101 {
102 	put_nfs_open_context(wdata->args.context);
103 	nfs_writedata_free(wdata);
104 }
105 
106 static void nfs_context_set_write_error(struct nfs_open_context *ctx, int error)
107 {
108 	ctx->error = error;
109 	smp_wmb();
110 	set_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
111 }
112 
113 static struct nfs_page *nfs_page_find_request_locked(struct page *page)
114 {
115 	struct nfs_page *req = NULL;
116 
117 	if (PagePrivate(page)) {
118 		req = (struct nfs_page *)page_private(page);
119 		if (req != NULL)
120 			kref_get(&req->wb_kref);
121 	}
122 	return req;
123 }
124 
125 static struct nfs_page *nfs_page_find_request(struct page *page)
126 {
127 	struct inode *inode = page->mapping->host;
128 	struct nfs_page *req = NULL;
129 
130 	spin_lock(&inode->i_lock);
131 	req = nfs_page_find_request_locked(page);
132 	spin_unlock(&inode->i_lock);
133 	return req;
134 }
135 
136 /* Adjust the file length if we're writing beyond the end */
137 static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int count)
138 {
139 	struct inode *inode = page->mapping->host;
140 	loff_t end, i_size;
141 	pgoff_t end_index;
142 
143 	spin_lock(&inode->i_lock);
144 	i_size = i_size_read(inode);
145 	end_index = (i_size - 1) >> PAGE_CACHE_SHIFT;
146 	if (i_size > 0 && page->index < end_index)
147 		goto out;
148 	end = ((loff_t)page->index << PAGE_CACHE_SHIFT) + ((loff_t)offset+count);
149 	if (i_size >= end)
150 		goto out;
151 	i_size_write(inode, end);
152 	nfs_inc_stats(inode, NFSIOS_EXTENDWRITE);
153 out:
154 	spin_unlock(&inode->i_lock);
155 }
156 
157 /* A writeback failed: mark the page as bad, and invalidate the page cache */
158 static void nfs_set_pageerror(struct page *page)
159 {
160 	SetPageError(page);
161 	nfs_zap_mapping(page->mapping->host, page->mapping);
162 }
163 
164 /* We can set the PG_uptodate flag if we see that a write request
165  * covers the full page.
166  */
167 static void nfs_mark_uptodate(struct page *page, unsigned int base, unsigned int count)
168 {
169 	if (PageUptodate(page))
170 		return;
171 	if (base != 0)
172 		return;
173 	if (count != nfs_page_length(page))
174 		return;
175 	SetPageUptodate(page);
176 }
177 
178 static int wb_priority(struct writeback_control *wbc)
179 {
180 	if (wbc->for_reclaim)
181 		return FLUSH_HIGHPRI | FLUSH_STABLE;
182 	if (wbc->for_kupdate || wbc->for_background)
183 		return FLUSH_LOWPRI | FLUSH_COND_STABLE;
184 	return FLUSH_COND_STABLE;
185 }
186 
187 /*
188  * NFS congestion control
189  */
190 
191 int nfs_congestion_kb;
192 
193 #define NFS_CONGESTION_ON_THRESH 	(nfs_congestion_kb >> (PAGE_SHIFT-10))
194 #define NFS_CONGESTION_OFF_THRESH	\
195 	(NFS_CONGESTION_ON_THRESH - (NFS_CONGESTION_ON_THRESH >> 2))
196 
197 static int nfs_set_page_writeback(struct page *page)
198 {
199 	int ret = test_set_page_writeback(page);
200 
201 	if (!ret) {
202 		struct inode *inode = page->mapping->host;
203 		struct nfs_server *nfss = NFS_SERVER(inode);
204 
205 		page_cache_get(page);
206 		if (atomic_long_inc_return(&nfss->writeback) >
207 				NFS_CONGESTION_ON_THRESH) {
208 			set_bdi_congested(&nfss->backing_dev_info,
209 						BLK_RW_ASYNC);
210 		}
211 	}
212 	return ret;
213 }
214 
215 static void nfs_end_page_writeback(struct page *page)
216 {
217 	struct inode *inode = page->mapping->host;
218 	struct nfs_server *nfss = NFS_SERVER(inode);
219 
220 	end_page_writeback(page);
221 	page_cache_release(page);
222 	if (atomic_long_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH)
223 		clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC);
224 }
225 
226 static struct nfs_page *nfs_find_and_lock_request(struct page *page, bool nonblock)
227 {
228 	struct inode *inode = page->mapping->host;
229 	struct nfs_page *req;
230 	int ret;
231 
232 	spin_lock(&inode->i_lock);
233 	for (;;) {
234 		req = nfs_page_find_request_locked(page);
235 		if (req == NULL)
236 			break;
237 		if (nfs_lock_request_dontget(req))
238 			break;
239 		/* Note: If we hold the page lock, as is the case in nfs_writepage,
240 		 *	 then the call to nfs_lock_request_dontget() will always
241 		 *	 succeed provided that someone hasn't already marked the
242 		 *	 request as dirty (in which case we don't care).
243 		 */
244 		spin_unlock(&inode->i_lock);
245 		if (!nonblock)
246 			ret = nfs_wait_on_request(req);
247 		else
248 			ret = -EAGAIN;
249 		nfs_release_request(req);
250 		if (ret != 0)
251 			return ERR_PTR(ret);
252 		spin_lock(&inode->i_lock);
253 	}
254 	spin_unlock(&inode->i_lock);
255 	return req;
256 }
257 
258 /*
259  * Find an associated nfs write request, and prepare to flush it out
260  * May return an error if the user signalled nfs_wait_on_request().
261  */
262 static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
263 				struct page *page, bool nonblock)
264 {
265 	struct nfs_page *req;
266 	int ret = 0;
267 
268 	req = nfs_find_and_lock_request(page, nonblock);
269 	if (!req)
270 		goto out;
271 	ret = PTR_ERR(req);
272 	if (IS_ERR(req))
273 		goto out;
274 
275 	ret = nfs_set_page_writeback(page);
276 	BUG_ON(ret != 0);
277 	BUG_ON(test_bit(PG_CLEAN, &req->wb_flags));
278 
279 	if (!nfs_pageio_add_request(pgio, req)) {
280 		nfs_redirty_request(req);
281 		ret = pgio->pg_error;
282 	}
283 out:
284 	return ret;
285 }
286 
287 static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, struct nfs_pageio_descriptor *pgio)
288 {
289 	struct inode *inode = page->mapping->host;
290 	int ret;
291 
292 	nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
293 	nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1);
294 
295 	nfs_pageio_cond_complete(pgio, page->index);
296 	ret = nfs_page_async_flush(pgio, page, wbc->sync_mode == WB_SYNC_NONE);
297 	if (ret == -EAGAIN) {
298 		redirty_page_for_writepage(wbc, page);
299 		ret = 0;
300 	}
301 	return ret;
302 }
303 
304 /*
305  * Write an mmapped page to the server.
306  */
307 static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc)
308 {
309 	struct nfs_pageio_descriptor pgio;
310 	int err;
311 
312 	nfs_pageio_init_write(&pgio, page->mapping->host, wb_priority(wbc));
313 	err = nfs_do_writepage(page, wbc, &pgio);
314 	nfs_pageio_complete(&pgio);
315 	if (err < 0)
316 		return err;
317 	if (pgio.pg_error < 0)
318 		return pgio.pg_error;
319 	return 0;
320 }
321 
322 int nfs_writepage(struct page *page, struct writeback_control *wbc)
323 {
324 	int ret;
325 
326 	ret = nfs_writepage_locked(page, wbc);
327 	unlock_page(page);
328 	return ret;
329 }
330 
331 static int nfs_writepages_callback(struct page *page, struct writeback_control *wbc, void *data)
332 {
333 	int ret;
334 
335 	ret = nfs_do_writepage(page, wbc, data);
336 	unlock_page(page);
337 	return ret;
338 }
339 
340 int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
341 {
342 	struct inode *inode = mapping->host;
343 	unsigned long *bitlock = &NFS_I(inode)->flags;
344 	struct nfs_pageio_descriptor pgio;
345 	int err;
346 
347 	/* Stop dirtying of new pages while we sync */
348 	err = wait_on_bit_lock(bitlock, NFS_INO_FLUSHING,
349 			nfs_wait_bit_killable, TASK_KILLABLE);
350 	if (err)
351 		goto out_err;
352 
353 	nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES);
354 
355 	nfs_pageio_init_write(&pgio, inode, wb_priority(wbc));
356 	err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio);
357 	nfs_pageio_complete(&pgio);
358 
359 	clear_bit_unlock(NFS_INO_FLUSHING, bitlock);
360 	smp_mb__after_clear_bit();
361 	wake_up_bit(bitlock, NFS_INO_FLUSHING);
362 
363 	if (err < 0)
364 		goto out_err;
365 	err = pgio.pg_error;
366 	if (err < 0)
367 		goto out_err;
368 	return 0;
369 out_err:
370 	return err;
371 }
372 
373 /*
374  * Insert a write request into an inode
375  */
376 static void nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
377 {
378 	struct nfs_inode *nfsi = NFS_I(inode);
379 
380 	/* Lock the request! */
381 	nfs_lock_request_dontget(req);
382 
383 	spin_lock(&inode->i_lock);
384 	if (!nfsi->npages && nfs_have_delegation(inode, FMODE_WRITE))
385 		inode->i_version++;
386 	set_bit(PG_MAPPED, &req->wb_flags);
387 	SetPagePrivate(req->wb_page);
388 	set_page_private(req->wb_page, (unsigned long)req);
389 	nfsi->npages++;
390 	kref_get(&req->wb_kref);
391 	spin_unlock(&inode->i_lock);
392 }
393 
394 /*
395  * Remove a write request from an inode
396  */
397 static void nfs_inode_remove_request(struct nfs_page *req)
398 {
399 	struct inode *inode = req->wb_context->dentry->d_inode;
400 	struct nfs_inode *nfsi = NFS_I(inode);
401 
402 	BUG_ON (!NFS_WBACK_BUSY(req));
403 
404 	spin_lock(&inode->i_lock);
405 	set_page_private(req->wb_page, 0);
406 	ClearPagePrivate(req->wb_page);
407 	clear_bit(PG_MAPPED, &req->wb_flags);
408 	nfsi->npages--;
409 	spin_unlock(&inode->i_lock);
410 	nfs_release_request(req);
411 }
412 
413 static void
414 nfs_mark_request_dirty(struct nfs_page *req)
415 {
416 	__set_page_dirty_nobuffers(req->wb_page);
417 }
418 
419 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
420 /**
421  * nfs_request_add_commit_list - add request to a commit list
422  * @req: pointer to a struct nfs_page
423  * @head: commit list head
424  *
425  * This sets the PG_CLEAN bit, updates the inode global count of
426  * number of outstanding requests requiring a commit as well as
427  * the MM page stats.
428  *
429  * The caller must _not_ hold the inode->i_lock, but must be
430  * holding the nfs_page lock.
431  */
432 void
433 nfs_request_add_commit_list(struct nfs_page *req, struct list_head *head)
434 {
435 	struct inode *inode = req->wb_context->dentry->d_inode;
436 
437 	set_bit(PG_CLEAN, &(req)->wb_flags);
438 	spin_lock(&inode->i_lock);
439 	nfs_list_add_request(req, head);
440 	NFS_I(inode)->ncommit++;
441 	spin_unlock(&inode->i_lock);
442 	inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
443 	inc_bdi_stat(req->wb_page->mapping->backing_dev_info, BDI_RECLAIMABLE);
444 	__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
445 }
446 EXPORT_SYMBOL_GPL(nfs_request_add_commit_list);
447 
448 /**
449  * nfs_request_remove_commit_list - Remove request from a commit list
450  * @req: pointer to a nfs_page
451  *
452  * This clears the PG_CLEAN bit, and updates the inode global count of
453  * number of outstanding requests requiring a commit
454  * It does not update the MM page stats.
455  *
456  * The caller _must_ hold the inode->i_lock and the nfs_page lock.
457  */
458 void
459 nfs_request_remove_commit_list(struct nfs_page *req)
460 {
461 	struct inode *inode = req->wb_context->dentry->d_inode;
462 
463 	if (!test_and_clear_bit(PG_CLEAN, &(req)->wb_flags))
464 		return;
465 	nfs_list_remove_request(req);
466 	NFS_I(inode)->ncommit--;
467 }
468 EXPORT_SYMBOL_GPL(nfs_request_remove_commit_list);
469 
470 
471 /*
472  * Add a request to the inode's commit list.
473  */
474 static void
475 nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg)
476 {
477 	struct inode *inode = req->wb_context->dentry->d_inode;
478 
479 	if (pnfs_mark_request_commit(req, lseg))
480 		return;
481 	nfs_request_add_commit_list(req, &NFS_I(inode)->commit_list);
482 }
483 
484 static void
485 nfs_clear_page_commit(struct page *page)
486 {
487 	dec_zone_page_state(page, NR_UNSTABLE_NFS);
488 	dec_bdi_stat(page->mapping->backing_dev_info, BDI_RECLAIMABLE);
489 }
490 
491 static void
492 nfs_clear_request_commit(struct nfs_page *req)
493 {
494 	if (test_bit(PG_CLEAN, &req->wb_flags)) {
495 		struct inode *inode = req->wb_context->dentry->d_inode;
496 
497 		if (!pnfs_clear_request_commit(req)) {
498 			spin_lock(&inode->i_lock);
499 			nfs_request_remove_commit_list(req);
500 			spin_unlock(&inode->i_lock);
501 		}
502 		nfs_clear_page_commit(req->wb_page);
503 	}
504 }
505 
506 static inline
507 int nfs_write_need_commit(struct nfs_write_data *data)
508 {
509 	if (data->verf.committed == NFS_DATA_SYNC)
510 		return data->lseg == NULL;
511 	else
512 		return data->verf.committed != NFS_FILE_SYNC;
513 }
514 
515 static inline
516 int nfs_reschedule_unstable_write(struct nfs_page *req,
517 				  struct nfs_write_data *data)
518 {
519 	if (test_and_clear_bit(PG_NEED_COMMIT, &req->wb_flags)) {
520 		nfs_mark_request_commit(req, data->lseg);
521 		return 1;
522 	}
523 	if (test_and_clear_bit(PG_NEED_RESCHED, &req->wb_flags)) {
524 		nfs_mark_request_dirty(req);
525 		return 1;
526 	}
527 	return 0;
528 }
529 #else
530 static void
531 nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg)
532 {
533 }
534 
535 static void
536 nfs_clear_request_commit(struct nfs_page *req)
537 {
538 }
539 
540 static inline
541 int nfs_write_need_commit(struct nfs_write_data *data)
542 {
543 	return 0;
544 }
545 
546 static inline
547 int nfs_reschedule_unstable_write(struct nfs_page *req,
548 				  struct nfs_write_data *data)
549 {
550 	return 0;
551 }
552 #endif
553 
554 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
555 static int
556 nfs_need_commit(struct nfs_inode *nfsi)
557 {
558 	return nfsi->ncommit > 0;
559 }
560 
561 /* i_lock held by caller */
562 static int
563 nfs_scan_commit_list(struct list_head *src, struct list_head *dst, int max,
564 		spinlock_t *lock)
565 {
566 	struct nfs_page *req, *tmp;
567 	int ret = 0;
568 
569 	list_for_each_entry_safe(req, tmp, src, wb_list) {
570 		if (!nfs_lock_request(req))
571 			continue;
572 		if (cond_resched_lock(lock))
573 			list_safe_reset_next(req, tmp, wb_list);
574 		nfs_request_remove_commit_list(req);
575 		nfs_list_add_request(req, dst);
576 		ret++;
577 		if (ret == max)
578 			break;
579 	}
580 	return ret;
581 }
582 
583 /*
584  * nfs_scan_commit - Scan an inode for commit requests
585  * @inode: NFS inode to scan
586  * @dst: destination list
587  *
588  * Moves requests from the inode's 'commit' request list.
589  * The requests are *not* checked to ensure that they form a contiguous set.
590  */
591 static int
592 nfs_scan_commit(struct inode *inode, struct list_head *dst)
593 {
594 	struct nfs_inode *nfsi = NFS_I(inode);
595 	int ret = 0;
596 
597 	spin_lock(&inode->i_lock);
598 	if (nfsi->ncommit > 0) {
599 		const int max = INT_MAX;
600 
601 		ret = nfs_scan_commit_list(&nfsi->commit_list, dst, max,
602 				&inode->i_lock);
603 		ret += pnfs_scan_commit_lists(inode, max - ret,
604 				&inode->i_lock);
605 	}
606 	spin_unlock(&inode->i_lock);
607 	return ret;
608 }
609 
610 #else
611 static inline int nfs_need_commit(struct nfs_inode *nfsi)
612 {
613 	return 0;
614 }
615 
616 static inline int nfs_scan_commit(struct inode *inode, struct list_head *dst)
617 {
618 	return 0;
619 }
620 #endif
621 
622 /*
623  * Search for an existing write request, and attempt to update
624  * it to reflect a new dirty region on a given page.
625  *
626  * If the attempt fails, then the existing request is flushed out
627  * to disk.
628  */
629 static struct nfs_page *nfs_try_to_update_request(struct inode *inode,
630 		struct page *page,
631 		unsigned int offset,
632 		unsigned int bytes)
633 {
634 	struct nfs_page *req;
635 	unsigned int rqend;
636 	unsigned int end;
637 	int error;
638 
639 	if (!PagePrivate(page))
640 		return NULL;
641 
642 	end = offset + bytes;
643 	spin_lock(&inode->i_lock);
644 
645 	for (;;) {
646 		req = nfs_page_find_request_locked(page);
647 		if (req == NULL)
648 			goto out_unlock;
649 
650 		rqend = req->wb_offset + req->wb_bytes;
651 		/*
652 		 * Tell the caller to flush out the request if
653 		 * the offsets are non-contiguous.
654 		 * Note: nfs_flush_incompatible() will already
655 		 * have flushed out requests having wrong owners.
656 		 */
657 		if (offset > rqend
658 		    || end < req->wb_offset)
659 			goto out_flushme;
660 
661 		if (nfs_lock_request_dontget(req))
662 			break;
663 
664 		/* The request is locked, so wait and then retry */
665 		spin_unlock(&inode->i_lock);
666 		error = nfs_wait_on_request(req);
667 		nfs_release_request(req);
668 		if (error != 0)
669 			goto out_err;
670 		spin_lock(&inode->i_lock);
671 	}
672 
673 	/* Okay, the request matches. Update the region */
674 	if (offset < req->wb_offset) {
675 		req->wb_offset = offset;
676 		req->wb_pgbase = offset;
677 	}
678 	if (end > rqend)
679 		req->wb_bytes = end - req->wb_offset;
680 	else
681 		req->wb_bytes = rqend - req->wb_offset;
682 out_unlock:
683 	spin_unlock(&inode->i_lock);
684 	if (req)
685 		nfs_clear_request_commit(req);
686 	return req;
687 out_flushme:
688 	spin_unlock(&inode->i_lock);
689 	nfs_release_request(req);
690 	error = nfs_wb_page(inode, page);
691 out_err:
692 	return ERR_PTR(error);
693 }
694 
695 /*
696  * Try to update an existing write request, or create one if there is none.
697  *
698  * Note: Should always be called with the Page Lock held to prevent races
699  * if we have to add a new request. Also assumes that the caller has
700  * already called nfs_flush_incompatible() if necessary.
701  */
702 static struct nfs_page * nfs_setup_write_request(struct nfs_open_context* ctx,
703 		struct page *page, unsigned int offset, unsigned int bytes)
704 {
705 	struct inode *inode = page->mapping->host;
706 	struct nfs_page	*req;
707 
708 	req = nfs_try_to_update_request(inode, page, offset, bytes);
709 	if (req != NULL)
710 		goto out;
711 	req = nfs_create_request(ctx, inode, page, offset, bytes);
712 	if (IS_ERR(req))
713 		goto out;
714 	nfs_inode_add_request(inode, req);
715 out:
716 	return req;
717 }
718 
719 static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page,
720 		unsigned int offset, unsigned int count)
721 {
722 	struct nfs_page	*req;
723 
724 	req = nfs_setup_write_request(ctx, page, offset, count);
725 	if (IS_ERR(req))
726 		return PTR_ERR(req);
727 	/* Update file length */
728 	nfs_grow_file(page, offset, count);
729 	nfs_mark_uptodate(page, req->wb_pgbase, req->wb_bytes);
730 	nfs_mark_request_dirty(req);
731 	nfs_unlock_request(req);
732 	return 0;
733 }
734 
735 int nfs_flush_incompatible(struct file *file, struct page *page)
736 {
737 	struct nfs_open_context *ctx = nfs_file_open_context(file);
738 	struct nfs_page	*req;
739 	int do_flush, status;
740 	/*
741 	 * Look for a request corresponding to this page. If there
742 	 * is one, and it belongs to another file, we flush it out
743 	 * before we try to copy anything into the page. Do this
744 	 * due to the lack of an ACCESS-type call in NFSv2.
745 	 * Also do the same if we find a request from an existing
746 	 * dropped page.
747 	 */
748 	do {
749 		req = nfs_page_find_request(page);
750 		if (req == NULL)
751 			return 0;
752 		do_flush = req->wb_page != page || req->wb_context != ctx ||
753 			req->wb_lock_context->lockowner != current->files ||
754 			req->wb_lock_context->pid != current->tgid;
755 		nfs_release_request(req);
756 		if (!do_flush)
757 			return 0;
758 		status = nfs_wb_page(page->mapping->host, page);
759 	} while (status == 0);
760 	return status;
761 }
762 
763 /*
764  * If the page cache is marked as unsafe or invalid, then we can't rely on
765  * the PageUptodate() flag. In this case, we will need to turn off
766  * write optimisations that depend on the page contents being correct.
767  */
768 static int nfs_write_pageuptodate(struct page *page, struct inode *inode)
769 {
770 	return PageUptodate(page) &&
771 		!(NFS_I(inode)->cache_validity & (NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_DATA));
772 }
773 
774 /*
775  * Update and possibly write a cached page of an NFS file.
776  *
777  * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad
778  * things with a page scheduled for an RPC call (e.g. invalidate it).
779  */
780 int nfs_updatepage(struct file *file, struct page *page,
781 		unsigned int offset, unsigned int count)
782 {
783 	struct nfs_open_context *ctx = nfs_file_open_context(file);
784 	struct inode	*inode = page->mapping->host;
785 	int		status = 0;
786 
787 	nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE);
788 
789 	dprintk("NFS:       nfs_updatepage(%s/%s %d@%lld)\n",
790 		file->f_path.dentry->d_parent->d_name.name,
791 		file->f_path.dentry->d_name.name, count,
792 		(long long)(page_offset(page) + offset));
793 
794 	/* If we're not using byte range locks, and we know the page
795 	 * is up to date, it may be more efficient to extend the write
796 	 * to cover the entire page in order to avoid fragmentation
797 	 * inefficiencies.
798 	 */
799 	if (nfs_write_pageuptodate(page, inode) &&
800 			inode->i_flock == NULL &&
801 			!(file->f_flags & O_DSYNC)) {
802 		count = max(count + offset, nfs_page_length(page));
803 		offset = 0;
804 	}
805 
806 	status = nfs_writepage_setup(ctx, page, offset, count);
807 	if (status < 0)
808 		nfs_set_pageerror(page);
809 	else
810 		__set_page_dirty_nobuffers(page);
811 
812 	dprintk("NFS:       nfs_updatepage returns %d (isize %lld)\n",
813 			status, (long long)i_size_read(inode));
814 	return status;
815 }
816 
817 static void nfs_writepage_release(struct nfs_page *req,
818 				  struct nfs_write_data *data)
819 {
820 	struct page *page = req->wb_page;
821 
822 	if (PageError(req->wb_page) || !nfs_reschedule_unstable_write(req, data))
823 		nfs_inode_remove_request(req);
824 	nfs_unlock_request(req);
825 	nfs_end_page_writeback(page);
826 }
827 
828 static int flush_task_priority(int how)
829 {
830 	switch (how & (FLUSH_HIGHPRI|FLUSH_LOWPRI)) {
831 		case FLUSH_HIGHPRI:
832 			return RPC_PRIORITY_HIGH;
833 		case FLUSH_LOWPRI:
834 			return RPC_PRIORITY_LOW;
835 	}
836 	return RPC_PRIORITY_NORMAL;
837 }
838 
839 int nfs_initiate_write(struct nfs_write_data *data,
840 		       struct rpc_clnt *clnt,
841 		       const struct rpc_call_ops *call_ops,
842 		       int how)
843 {
844 	struct inode *inode = data->inode;
845 	int priority = flush_task_priority(how);
846 	struct rpc_task *task;
847 	struct rpc_message msg = {
848 		.rpc_argp = &data->args,
849 		.rpc_resp = &data->res,
850 		.rpc_cred = data->cred,
851 	};
852 	struct rpc_task_setup task_setup_data = {
853 		.rpc_client = clnt,
854 		.task = &data->task,
855 		.rpc_message = &msg,
856 		.callback_ops = call_ops,
857 		.callback_data = data,
858 		.workqueue = nfsiod_workqueue,
859 		.flags = RPC_TASK_ASYNC,
860 		.priority = priority,
861 	};
862 	int ret = 0;
863 
864 	/* Set up the initial task struct.  */
865 	NFS_PROTO(inode)->write_setup(data, &msg);
866 
867 	dprintk("NFS: %5u initiated write call "
868 		"(req %s/%lld, %u bytes @ offset %llu)\n",
869 		data->task.tk_pid,
870 		inode->i_sb->s_id,
871 		(long long)NFS_FILEID(inode),
872 		data->args.count,
873 		(unsigned long long)data->args.offset);
874 
875 	task = rpc_run_task(&task_setup_data);
876 	if (IS_ERR(task)) {
877 		ret = PTR_ERR(task);
878 		goto out;
879 	}
880 	if (how & FLUSH_SYNC) {
881 		ret = rpc_wait_for_completion_task(task);
882 		if (ret == 0)
883 			ret = task->tk_status;
884 	}
885 	rpc_put_task(task);
886 out:
887 	return ret;
888 }
889 EXPORT_SYMBOL_GPL(nfs_initiate_write);
890 
891 /*
892  * Set up the argument/result storage required for the RPC call.
893  */
894 static void nfs_write_rpcsetup(struct nfs_page *req,
895 		struct nfs_write_data *data,
896 		unsigned int count, unsigned int offset,
897 		int how)
898 {
899 	struct inode *inode = req->wb_context->dentry->d_inode;
900 
901 	/* Set up the RPC argument and reply structs
902 	 * NB: take care not to mess about with data->commit et al. */
903 
904 	data->req = req;
905 	data->inode = inode = req->wb_context->dentry->d_inode;
906 	data->cred = req->wb_context->cred;
907 
908 	data->args.fh     = NFS_FH(inode);
909 	data->args.offset = req_offset(req) + offset;
910 	/* pnfs_set_layoutcommit needs this */
911 	data->mds_offset = data->args.offset;
912 	data->args.pgbase = req->wb_pgbase + offset;
913 	data->args.pages  = data->pagevec;
914 	data->args.count  = count;
915 	data->args.context = get_nfs_open_context(req->wb_context);
916 	data->args.lock_context = req->wb_lock_context;
917 	data->args.stable  = NFS_UNSTABLE;
918 	switch (how & (FLUSH_STABLE | FLUSH_COND_STABLE)) {
919 	case 0:
920 		break;
921 	case FLUSH_COND_STABLE:
922 		if (nfs_need_commit(NFS_I(inode)))
923 			break;
924 	default:
925 		data->args.stable = NFS_FILE_SYNC;
926 	}
927 
928 	data->res.fattr   = &data->fattr;
929 	data->res.count   = count;
930 	data->res.verf    = &data->verf;
931 	nfs_fattr_init(&data->fattr);
932 }
933 
934 static int nfs_do_write(struct nfs_write_data *data,
935 		const struct rpc_call_ops *call_ops,
936 		int how)
937 {
938 	struct inode *inode = data->args.context->dentry->d_inode;
939 
940 	return nfs_initiate_write(data, NFS_CLIENT(inode), call_ops, how);
941 }
942 
943 static int nfs_do_multiple_writes(struct list_head *head,
944 		const struct rpc_call_ops *call_ops,
945 		int how)
946 {
947 	struct nfs_write_data *data;
948 	int ret = 0;
949 
950 	while (!list_empty(head)) {
951 		int ret2;
952 
953 		data = list_entry(head->next, struct nfs_write_data, list);
954 		list_del_init(&data->list);
955 
956 		ret2 = nfs_do_write(data, call_ops, how);
957 		 if (ret == 0)
958 			 ret = ret2;
959 	}
960 	return ret;
961 }
962 
963 /* If a nfs_flush_* function fails, it should remove reqs from @head and
964  * call this on each, which will prepare them to be retried on next
965  * writeback using standard nfs.
966  */
967 static void nfs_redirty_request(struct nfs_page *req)
968 {
969 	struct page *page = req->wb_page;
970 
971 	nfs_mark_request_dirty(req);
972 	nfs_unlock_request(req);
973 	nfs_end_page_writeback(page);
974 }
975 
976 /*
977  * Generate multiple small requests to write out a single
978  * contiguous dirty area on one page.
979  */
980 static int nfs_flush_multi(struct nfs_pageio_descriptor *desc, struct list_head *res)
981 {
982 	struct nfs_page *req = nfs_list_entry(desc->pg_list.next);
983 	struct page *page = req->wb_page;
984 	struct nfs_write_data *data;
985 	size_t wsize = desc->pg_bsize, nbytes;
986 	unsigned int offset;
987 	int requests = 0;
988 	int ret = 0;
989 
990 	nfs_list_remove_request(req);
991 
992 	if ((desc->pg_ioflags & FLUSH_COND_STABLE) &&
993 	    (desc->pg_moreio || NFS_I(desc->pg_inode)->ncommit ||
994 	     desc->pg_count > wsize))
995 		desc->pg_ioflags &= ~FLUSH_COND_STABLE;
996 
997 
998 	offset = 0;
999 	nbytes = desc->pg_count;
1000 	do {
1001 		size_t len = min(nbytes, wsize);
1002 
1003 		data = nfs_writedata_alloc(1);
1004 		if (!data)
1005 			goto out_bad;
1006 		data->pagevec[0] = page;
1007 		nfs_write_rpcsetup(req, data, len, offset, desc->pg_ioflags);
1008 		list_add(&data->list, res);
1009 		requests++;
1010 		nbytes -= len;
1011 		offset += len;
1012 	} while (nbytes != 0);
1013 	atomic_set(&req->wb_complete, requests);
1014 	desc->pg_rpc_callops = &nfs_write_partial_ops;
1015 	return ret;
1016 
1017 out_bad:
1018 	while (!list_empty(res)) {
1019 		data = list_entry(res->next, struct nfs_write_data, list);
1020 		list_del(&data->list);
1021 		nfs_writedata_release(data);
1022 	}
1023 	nfs_redirty_request(req);
1024 	return -ENOMEM;
1025 }
1026 
1027 /*
1028  * Create an RPC task for the given write request and kick it.
1029  * The page must have been locked by the caller.
1030  *
1031  * It may happen that the page we're passed is not marked dirty.
1032  * This is the case if nfs_updatepage detects a conflicting request
1033  * that has been written but not committed.
1034  */
1035 static int nfs_flush_one(struct nfs_pageio_descriptor *desc, struct list_head *res)
1036 {
1037 	struct nfs_page		*req;
1038 	struct page		**pages;
1039 	struct nfs_write_data	*data;
1040 	struct list_head *head = &desc->pg_list;
1041 	int ret = 0;
1042 
1043 	data = nfs_writedata_alloc(nfs_page_array_len(desc->pg_base,
1044 						      desc->pg_count));
1045 	if (!data) {
1046 		while (!list_empty(head)) {
1047 			req = nfs_list_entry(head->next);
1048 			nfs_list_remove_request(req);
1049 			nfs_redirty_request(req);
1050 		}
1051 		ret = -ENOMEM;
1052 		goto out;
1053 	}
1054 	pages = data->pagevec;
1055 	while (!list_empty(head)) {
1056 		req = nfs_list_entry(head->next);
1057 		nfs_list_remove_request(req);
1058 		nfs_list_add_request(req, &data->pages);
1059 		*pages++ = req->wb_page;
1060 	}
1061 	req = nfs_list_entry(data->pages.next);
1062 
1063 	if ((desc->pg_ioflags & FLUSH_COND_STABLE) &&
1064 	    (desc->pg_moreio || NFS_I(desc->pg_inode)->ncommit))
1065 		desc->pg_ioflags &= ~FLUSH_COND_STABLE;
1066 
1067 	/* Set up the argument struct */
1068 	nfs_write_rpcsetup(req, data, desc->pg_count, 0, desc->pg_ioflags);
1069 	list_add(&data->list, res);
1070 	desc->pg_rpc_callops = &nfs_write_full_ops;
1071 out:
1072 	return ret;
1073 }
1074 
1075 int nfs_generic_flush(struct nfs_pageio_descriptor *desc, struct list_head *head)
1076 {
1077 	if (desc->pg_bsize < PAGE_CACHE_SIZE)
1078 		return nfs_flush_multi(desc, head);
1079 	return nfs_flush_one(desc, head);
1080 }
1081 
1082 static int nfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc)
1083 {
1084 	LIST_HEAD(head);
1085 	int ret;
1086 
1087 	ret = nfs_generic_flush(desc, &head);
1088 	if (ret == 0)
1089 		ret = nfs_do_multiple_writes(&head, desc->pg_rpc_callops,
1090 				desc->pg_ioflags);
1091 	return ret;
1092 }
1093 
1094 static const struct nfs_pageio_ops nfs_pageio_write_ops = {
1095 	.pg_test = nfs_generic_pg_test,
1096 	.pg_doio = nfs_generic_pg_writepages,
1097 };
1098 
1099 void nfs_pageio_init_write_mds(struct nfs_pageio_descriptor *pgio,
1100 				  struct inode *inode, int ioflags)
1101 {
1102 	nfs_pageio_init(pgio, inode, &nfs_pageio_write_ops,
1103 				NFS_SERVER(inode)->wsize, ioflags);
1104 }
1105 
1106 void nfs_pageio_reset_write_mds(struct nfs_pageio_descriptor *pgio)
1107 {
1108 	pgio->pg_ops = &nfs_pageio_write_ops;
1109 	pgio->pg_bsize = NFS_SERVER(pgio->pg_inode)->wsize;
1110 }
1111 EXPORT_SYMBOL_GPL(nfs_pageio_reset_write_mds);
1112 
1113 static void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio,
1114 				  struct inode *inode, int ioflags)
1115 {
1116 	if (!pnfs_pageio_init_write(pgio, inode, ioflags))
1117 		nfs_pageio_init_write_mds(pgio, inode, ioflags);
1118 }
1119 
1120 /*
1121  * Handle a write reply that flushed part of a page.
1122  */
1123 static void nfs_writeback_done_partial(struct rpc_task *task, void *calldata)
1124 {
1125 	struct nfs_write_data	*data = calldata;
1126 
1127 	dprintk("NFS: %5u write(%s/%lld %d@%lld)",
1128 		task->tk_pid,
1129 		data->req->wb_context->dentry->d_inode->i_sb->s_id,
1130 		(long long)
1131 		  NFS_FILEID(data->req->wb_context->dentry->d_inode),
1132 		data->req->wb_bytes, (long long)req_offset(data->req));
1133 
1134 	nfs_writeback_done(task, data);
1135 }
1136 
1137 static void nfs_writeback_release_partial(void *calldata)
1138 {
1139 	struct nfs_write_data	*data = calldata;
1140 	struct nfs_page		*req = data->req;
1141 	struct page		*page = req->wb_page;
1142 	int status = data->task.tk_status;
1143 
1144 	if (status < 0) {
1145 		nfs_set_pageerror(page);
1146 		nfs_context_set_write_error(req->wb_context, status);
1147 		dprintk(", error = %d\n", status);
1148 		goto out;
1149 	}
1150 
1151 	if (nfs_write_need_commit(data)) {
1152 		struct inode *inode = page->mapping->host;
1153 
1154 		spin_lock(&inode->i_lock);
1155 		if (test_bit(PG_NEED_RESCHED, &req->wb_flags)) {
1156 			/* Do nothing we need to resend the writes */
1157 		} else if (!test_and_set_bit(PG_NEED_COMMIT, &req->wb_flags)) {
1158 			memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
1159 			dprintk(" defer commit\n");
1160 		} else if (memcmp(&req->wb_verf, &data->verf, sizeof(req->wb_verf))) {
1161 			set_bit(PG_NEED_RESCHED, &req->wb_flags);
1162 			clear_bit(PG_NEED_COMMIT, &req->wb_flags);
1163 			dprintk(" server reboot detected\n");
1164 		}
1165 		spin_unlock(&inode->i_lock);
1166 	} else
1167 		dprintk(" OK\n");
1168 
1169 out:
1170 	if (atomic_dec_and_test(&req->wb_complete))
1171 		nfs_writepage_release(req, data);
1172 	nfs_writedata_release(calldata);
1173 }
1174 
1175 void nfs_write_prepare(struct rpc_task *task, void *calldata)
1176 {
1177 	struct nfs_write_data *data = calldata;
1178 	NFS_PROTO(data->inode)->write_rpc_prepare(task, data);
1179 }
1180 
1181 void nfs_commit_prepare(struct rpc_task *task, void *calldata)
1182 {
1183 	struct nfs_commit_data *data = calldata;
1184 
1185 	NFS_PROTO(data->inode)->commit_rpc_prepare(task, data);
1186 }
1187 
1188 static const struct rpc_call_ops nfs_write_partial_ops = {
1189 	.rpc_call_prepare = nfs_write_prepare,
1190 	.rpc_call_done = nfs_writeback_done_partial,
1191 	.rpc_release = nfs_writeback_release_partial,
1192 };
1193 
1194 /*
1195  * Handle a write reply that flushes a whole page.
1196  *
1197  * FIXME: There is an inherent race with invalidate_inode_pages and
1198  *	  writebacks since the page->count is kept > 1 for as long
1199  *	  as the page has a write request pending.
1200  */
1201 static void nfs_writeback_done_full(struct rpc_task *task, void *calldata)
1202 {
1203 	struct nfs_write_data	*data = calldata;
1204 
1205 	nfs_writeback_done(task, data);
1206 }
1207 
1208 static void nfs_writeback_release_full(void *calldata)
1209 {
1210 	struct nfs_write_data	*data = calldata;
1211 	int status = data->task.tk_status;
1212 
1213 	/* Update attributes as result of writeback. */
1214 	while (!list_empty(&data->pages)) {
1215 		struct nfs_page *req = nfs_list_entry(data->pages.next);
1216 		struct page *page = req->wb_page;
1217 
1218 		nfs_list_remove_request(req);
1219 
1220 		dprintk("NFS: %5u write (%s/%lld %d@%lld)",
1221 			data->task.tk_pid,
1222 			req->wb_context->dentry->d_inode->i_sb->s_id,
1223 			(long long)NFS_FILEID(req->wb_context->dentry->d_inode),
1224 			req->wb_bytes,
1225 			(long long)req_offset(req));
1226 
1227 		if (status < 0) {
1228 			nfs_set_pageerror(page);
1229 			nfs_context_set_write_error(req->wb_context, status);
1230 			dprintk(", error = %d\n", status);
1231 			goto remove_request;
1232 		}
1233 
1234 		if (nfs_write_need_commit(data)) {
1235 			memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
1236 			nfs_mark_request_commit(req, data->lseg);
1237 			dprintk(" marked for commit\n");
1238 			goto next;
1239 		}
1240 		dprintk(" OK\n");
1241 remove_request:
1242 		nfs_inode_remove_request(req);
1243 	next:
1244 		nfs_unlock_request(req);
1245 		nfs_end_page_writeback(page);
1246 	}
1247 	nfs_writedata_release(calldata);
1248 }
1249 
1250 static const struct rpc_call_ops nfs_write_full_ops = {
1251 	.rpc_call_prepare = nfs_write_prepare,
1252 	.rpc_call_done = nfs_writeback_done_full,
1253 	.rpc_release = nfs_writeback_release_full,
1254 };
1255 
1256 
1257 /*
1258  * This function is called when the WRITE call is complete.
1259  */
1260 void nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
1261 {
1262 	struct nfs_writeargs	*argp = &data->args;
1263 	struct nfs_writeres	*resp = &data->res;
1264 	int status;
1265 
1266 	dprintk("NFS: %5u nfs_writeback_done (status %d)\n",
1267 		task->tk_pid, task->tk_status);
1268 
1269 	/*
1270 	 * ->write_done will attempt to use post-op attributes to detect
1271 	 * conflicting writes by other clients.  A strict interpretation
1272 	 * of close-to-open would allow us to continue caching even if
1273 	 * another writer had changed the file, but some applications
1274 	 * depend on tighter cache coherency when writing.
1275 	 */
1276 	status = NFS_PROTO(data->inode)->write_done(task, data);
1277 	if (status != 0)
1278 		return;
1279 	nfs_add_stats(data->inode, NFSIOS_SERVERWRITTENBYTES, resp->count);
1280 
1281 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1282 	if (resp->verf->committed < argp->stable && task->tk_status >= 0) {
1283 		/* We tried a write call, but the server did not
1284 		 * commit data to stable storage even though we
1285 		 * requested it.
1286 		 * Note: There is a known bug in Tru64 < 5.0 in which
1287 		 *	 the server reports NFS_DATA_SYNC, but performs
1288 		 *	 NFS_FILE_SYNC. We therefore implement this checking
1289 		 *	 as a dprintk() in order to avoid filling syslog.
1290 		 */
1291 		static unsigned long    complain;
1292 
1293 		/* Note this will print the MDS for a DS write */
1294 		if (time_before(complain, jiffies)) {
1295 			dprintk("NFS:       faulty NFS server %s:"
1296 				" (committed = %d) != (stable = %d)\n",
1297 				NFS_SERVER(data->inode)->nfs_client->cl_hostname,
1298 				resp->verf->committed, argp->stable);
1299 			complain = jiffies + 300 * HZ;
1300 		}
1301 	}
1302 #endif
1303 	/* Is this a short write? */
1304 	if (task->tk_status >= 0 && resp->count < argp->count) {
1305 		static unsigned long    complain;
1306 
1307 		nfs_inc_stats(data->inode, NFSIOS_SHORTWRITE);
1308 
1309 		/* Has the server at least made some progress? */
1310 		if (resp->count != 0) {
1311 			/* Was this an NFSv2 write or an NFSv3 stable write? */
1312 			if (resp->verf->committed != NFS_UNSTABLE) {
1313 				/* Resend from where the server left off */
1314 				data->mds_offset += resp->count;
1315 				argp->offset += resp->count;
1316 				argp->pgbase += resp->count;
1317 				argp->count -= resp->count;
1318 			} else {
1319 				/* Resend as a stable write in order to avoid
1320 				 * headaches in the case of a server crash.
1321 				 */
1322 				argp->stable = NFS_FILE_SYNC;
1323 			}
1324 			rpc_restart_call_prepare(task);
1325 			return;
1326 		}
1327 		if (time_before(complain, jiffies)) {
1328 			printk(KERN_WARNING
1329 			       "NFS: Server wrote zero bytes, expected %u.\n",
1330 					argp->count);
1331 			complain = jiffies + 300 * HZ;
1332 		}
1333 		/* Can't do anything about it except throw an error. */
1334 		task->tk_status = -EIO;
1335 	}
1336 	return;
1337 }
1338 
1339 
1340 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1341 static int nfs_commit_set_lock(struct nfs_inode *nfsi, int may_wait)
1342 {
1343 	int ret;
1344 
1345 	if (!test_and_set_bit(NFS_INO_COMMIT, &nfsi->flags))
1346 		return 1;
1347 	if (!may_wait)
1348 		return 0;
1349 	ret = out_of_line_wait_on_bit_lock(&nfsi->flags,
1350 				NFS_INO_COMMIT,
1351 				nfs_wait_bit_killable,
1352 				TASK_KILLABLE);
1353 	return (ret < 0) ? ret : 1;
1354 }
1355 
1356 void nfs_commit_clear_lock(struct nfs_inode *nfsi)
1357 {
1358 	clear_bit(NFS_INO_COMMIT, &nfsi->flags);
1359 	smp_mb__after_clear_bit();
1360 	wake_up_bit(&nfsi->flags, NFS_INO_COMMIT);
1361 }
1362 EXPORT_SYMBOL_GPL(nfs_commit_clear_lock);
1363 
1364 void nfs_commitdata_release(struct nfs_commit_data *data)
1365 {
1366 	put_nfs_open_context(data->context);
1367 	nfs_commit_free(data);
1368 }
1369 EXPORT_SYMBOL_GPL(nfs_commitdata_release);
1370 
1371 int nfs_initiate_commit(struct rpc_clnt *clnt, struct nfs_commit_data *data,
1372 			const struct rpc_call_ops *call_ops,
1373 			int how)
1374 {
1375 	struct rpc_task *task;
1376 	int priority = flush_task_priority(how);
1377 	struct rpc_message msg = {
1378 		.rpc_argp = &data->args,
1379 		.rpc_resp = &data->res,
1380 		.rpc_cred = data->cred,
1381 	};
1382 	struct rpc_task_setup task_setup_data = {
1383 		.task = &data->task,
1384 		.rpc_client = clnt,
1385 		.rpc_message = &msg,
1386 		.callback_ops = call_ops,
1387 		.callback_data = data,
1388 		.workqueue = nfsiod_workqueue,
1389 		.flags = RPC_TASK_ASYNC,
1390 		.priority = priority,
1391 	};
1392 	/* Set up the initial task struct.  */
1393 	NFS_PROTO(data->inode)->commit_setup(data, &msg);
1394 
1395 	dprintk("NFS: %5u initiated commit call\n", data->task.tk_pid);
1396 
1397 	task = rpc_run_task(&task_setup_data);
1398 	if (IS_ERR(task))
1399 		return PTR_ERR(task);
1400 	if (how & FLUSH_SYNC)
1401 		rpc_wait_for_completion_task(task);
1402 	rpc_put_task(task);
1403 	return 0;
1404 }
1405 EXPORT_SYMBOL_GPL(nfs_initiate_commit);
1406 
1407 /*
1408  * Set up the argument/result storage required for the RPC call.
1409  */
1410 void nfs_init_commit(struct nfs_commit_data *data,
1411 			    struct list_head *head,
1412 			    struct pnfs_layout_segment *lseg)
1413 {
1414 	struct nfs_page *first = nfs_list_entry(head->next);
1415 	struct inode *inode = first->wb_context->dentry->d_inode;
1416 
1417 	/* Set up the RPC argument and reply structs
1418 	 * NB: take care not to mess about with data->commit et al. */
1419 
1420 	list_splice_init(head, &data->pages);
1421 
1422 	data->inode	  = inode;
1423 	data->cred	  = first->wb_context->cred;
1424 	data->lseg	  = lseg; /* reference transferred */
1425 	data->mds_ops     = &nfs_commit_ops;
1426 
1427 	data->args.fh     = NFS_FH(data->inode);
1428 	/* Note: we always request a commit of the entire inode */
1429 	data->args.offset = 0;
1430 	data->args.count  = 0;
1431 	data->context     = get_nfs_open_context(first->wb_context);
1432 	data->res.fattr   = &data->fattr;
1433 	data->res.verf    = &data->verf;
1434 	nfs_fattr_init(&data->fattr);
1435 }
1436 EXPORT_SYMBOL_GPL(nfs_init_commit);
1437 
1438 void nfs_retry_commit(struct list_head *page_list,
1439 		      struct pnfs_layout_segment *lseg)
1440 {
1441 	struct nfs_page *req;
1442 
1443 	while (!list_empty(page_list)) {
1444 		req = nfs_list_entry(page_list->next);
1445 		nfs_list_remove_request(req);
1446 		nfs_mark_request_commit(req, lseg);
1447 		dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
1448 		dec_bdi_stat(req->wb_page->mapping->backing_dev_info,
1449 			     BDI_RECLAIMABLE);
1450 		nfs_unlock_request(req);
1451 	}
1452 }
1453 EXPORT_SYMBOL_GPL(nfs_retry_commit);
1454 
1455 /*
1456  * Commit dirty pages
1457  */
1458 static int
1459 nfs_commit_list(struct inode *inode, struct list_head *head, int how)
1460 {
1461 	struct nfs_commit_data	*data;
1462 
1463 	data = nfs_commitdata_alloc();
1464 
1465 	if (!data)
1466 		goto out_bad;
1467 
1468 	/* Set up the argument struct */
1469 	nfs_init_commit(data, head, NULL);
1470 	return nfs_initiate_commit(NFS_CLIENT(inode), data, data->mds_ops, how);
1471  out_bad:
1472 	nfs_retry_commit(head, NULL);
1473 	nfs_commit_clear_lock(NFS_I(inode));
1474 	return -ENOMEM;
1475 }
1476 
1477 /*
1478  * COMMIT call returned
1479  */
1480 static void nfs_commit_done(struct rpc_task *task, void *calldata)
1481 {
1482 	struct nfs_commit_data	*data = calldata;
1483 
1484         dprintk("NFS: %5u nfs_commit_done (status %d)\n",
1485                                 task->tk_pid, task->tk_status);
1486 
1487 	/* Call the NFS version-specific code */
1488 	NFS_PROTO(data->inode)->commit_done(task, data);
1489 }
1490 
1491 void nfs_commit_release_pages(struct nfs_commit_data *data)
1492 {
1493 	struct nfs_page	*req;
1494 	int status = data->task.tk_status;
1495 
1496 	while (!list_empty(&data->pages)) {
1497 		req = nfs_list_entry(data->pages.next);
1498 		nfs_list_remove_request(req);
1499 		nfs_clear_page_commit(req->wb_page);
1500 
1501 		dprintk("NFS:       commit (%s/%lld %d@%lld)",
1502 			req->wb_context->dentry->d_sb->s_id,
1503 			(long long)NFS_FILEID(req->wb_context->dentry->d_inode),
1504 			req->wb_bytes,
1505 			(long long)req_offset(req));
1506 		if (status < 0) {
1507 			nfs_context_set_write_error(req->wb_context, status);
1508 			nfs_inode_remove_request(req);
1509 			dprintk(", error = %d\n", status);
1510 			goto next;
1511 		}
1512 
1513 		/* Okay, COMMIT succeeded, apparently. Check the verifier
1514 		 * returned by the server against all stored verfs. */
1515 		if (!memcmp(req->wb_verf.verifier, data->verf.verifier, sizeof(data->verf.verifier))) {
1516 			/* We have a match */
1517 			nfs_inode_remove_request(req);
1518 			dprintk(" OK\n");
1519 			goto next;
1520 		}
1521 		/* We have a mismatch. Write the page again */
1522 		dprintk(" mismatch\n");
1523 		nfs_mark_request_dirty(req);
1524 	next:
1525 		nfs_unlock_request(req);
1526 	}
1527 }
1528 EXPORT_SYMBOL_GPL(nfs_commit_release_pages);
1529 
1530 static void nfs_commit_release(void *calldata)
1531 {
1532 	struct nfs_commit_data *data = calldata;
1533 
1534 	nfs_commit_release_pages(data);
1535 	nfs_commit_clear_lock(NFS_I(data->inode));
1536 	nfs_commitdata_release(calldata);
1537 }
1538 
1539 static const struct rpc_call_ops nfs_commit_ops = {
1540 	.rpc_call_prepare = nfs_commit_prepare,
1541 	.rpc_call_done = nfs_commit_done,
1542 	.rpc_release = nfs_commit_release,
1543 };
1544 
1545 int nfs_commit_inode(struct inode *inode, int how)
1546 {
1547 	LIST_HEAD(head);
1548 	int may_wait = how & FLUSH_SYNC;
1549 	int res;
1550 
1551 	res = nfs_commit_set_lock(NFS_I(inode), may_wait);
1552 	if (res <= 0)
1553 		goto out_mark_dirty;
1554 	res = nfs_scan_commit(inode, &head);
1555 	if (res) {
1556 		int error;
1557 
1558 		error = pnfs_commit_list(inode, &head, how);
1559 		if (error == PNFS_NOT_ATTEMPTED)
1560 			error = nfs_commit_list(inode, &head, how);
1561 		if (error < 0)
1562 			return error;
1563 		if (!may_wait)
1564 			goto out_mark_dirty;
1565 		error = wait_on_bit(&NFS_I(inode)->flags,
1566 				NFS_INO_COMMIT,
1567 				nfs_wait_bit_killable,
1568 				TASK_KILLABLE);
1569 		if (error < 0)
1570 			return error;
1571 	} else
1572 		nfs_commit_clear_lock(NFS_I(inode));
1573 	return res;
1574 	/* Note: If we exit without ensuring that the commit is complete,
1575 	 * we must mark the inode as dirty. Otherwise, future calls to
1576 	 * sync_inode() with the WB_SYNC_ALL flag set will fail to ensure
1577 	 * that the data is on the disk.
1578 	 */
1579 out_mark_dirty:
1580 	__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
1581 	return res;
1582 }
1583 
1584 static int nfs_commit_unstable_pages(struct inode *inode, struct writeback_control *wbc)
1585 {
1586 	struct nfs_inode *nfsi = NFS_I(inode);
1587 	int flags = FLUSH_SYNC;
1588 	int ret = 0;
1589 
1590 	/* no commits means nothing needs to be done */
1591 	if (!nfsi->ncommit)
1592 		return ret;
1593 
1594 	if (wbc->sync_mode == WB_SYNC_NONE) {
1595 		/* Don't commit yet if this is a non-blocking flush and there
1596 		 * are a lot of outstanding writes for this mapping.
1597 		 */
1598 		if (nfsi->ncommit <= (nfsi->npages >> 1))
1599 			goto out_mark_dirty;
1600 
1601 		/* don't wait for the COMMIT response */
1602 		flags = 0;
1603 	}
1604 
1605 	ret = nfs_commit_inode(inode, flags);
1606 	if (ret >= 0) {
1607 		if (wbc->sync_mode == WB_SYNC_NONE) {
1608 			if (ret < wbc->nr_to_write)
1609 				wbc->nr_to_write -= ret;
1610 			else
1611 				wbc->nr_to_write = 0;
1612 		}
1613 		return 0;
1614 	}
1615 out_mark_dirty:
1616 	__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
1617 	return ret;
1618 }
1619 #else
1620 static int nfs_commit_unstable_pages(struct inode *inode, struct writeback_control *wbc)
1621 {
1622 	return 0;
1623 }
1624 #endif
1625 
1626 int nfs_write_inode(struct inode *inode, struct writeback_control *wbc)
1627 {
1628 	int ret;
1629 
1630 	ret = nfs_commit_unstable_pages(inode, wbc);
1631 	if (ret >= 0 && test_bit(NFS_INO_LAYOUTCOMMIT, &NFS_I(inode)->flags)) {
1632 		int status;
1633 		bool sync = true;
1634 
1635 		if (wbc->sync_mode == WB_SYNC_NONE)
1636 			sync = false;
1637 
1638 		status = pnfs_layoutcommit_inode(inode, sync);
1639 		if (status < 0)
1640 			return status;
1641 	}
1642 	return ret;
1643 }
1644 
1645 /*
1646  * flush the inode to disk.
1647  */
1648 int nfs_wb_all(struct inode *inode)
1649 {
1650 	struct writeback_control wbc = {
1651 		.sync_mode = WB_SYNC_ALL,
1652 		.nr_to_write = LONG_MAX,
1653 		.range_start = 0,
1654 		.range_end = LLONG_MAX,
1655 	};
1656 
1657 	return sync_inode(inode, &wbc);
1658 }
1659 
1660 int nfs_wb_page_cancel(struct inode *inode, struct page *page)
1661 {
1662 	struct nfs_page *req;
1663 	int ret = 0;
1664 
1665 	BUG_ON(!PageLocked(page));
1666 	for (;;) {
1667 		wait_on_page_writeback(page);
1668 		req = nfs_page_find_request(page);
1669 		if (req == NULL)
1670 			break;
1671 		if (nfs_lock_request_dontget(req)) {
1672 			nfs_clear_request_commit(req);
1673 			nfs_inode_remove_request(req);
1674 			/*
1675 			 * In case nfs_inode_remove_request has marked the
1676 			 * page as being dirty
1677 			 */
1678 			cancel_dirty_page(page, PAGE_CACHE_SIZE);
1679 			nfs_unlock_request(req);
1680 			break;
1681 		}
1682 		ret = nfs_wait_on_request(req);
1683 		nfs_release_request(req);
1684 		if (ret < 0)
1685 			break;
1686 	}
1687 	return ret;
1688 }
1689 
1690 /*
1691  * Write back all requests on one page - we do this before reading it.
1692  */
1693 int nfs_wb_page(struct inode *inode, struct page *page)
1694 {
1695 	loff_t range_start = page_offset(page);
1696 	loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
1697 	struct writeback_control wbc = {
1698 		.sync_mode = WB_SYNC_ALL,
1699 		.nr_to_write = 0,
1700 		.range_start = range_start,
1701 		.range_end = range_end,
1702 	};
1703 	int ret;
1704 
1705 	for (;;) {
1706 		wait_on_page_writeback(page);
1707 		if (clear_page_dirty_for_io(page)) {
1708 			ret = nfs_writepage_locked(page, &wbc);
1709 			if (ret < 0)
1710 				goto out_error;
1711 			continue;
1712 		}
1713 		if (!PagePrivate(page))
1714 			break;
1715 		ret = nfs_commit_inode(inode, FLUSH_SYNC);
1716 		if (ret < 0)
1717 			goto out_error;
1718 	}
1719 	return 0;
1720 out_error:
1721 	return ret;
1722 }
1723 
1724 #ifdef CONFIG_MIGRATION
1725 int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
1726 		struct page *page, enum migrate_mode mode)
1727 {
1728 	/*
1729 	 * If PagePrivate is set, then the page is currently associated with
1730 	 * an in-progress read or write request. Don't try to migrate it.
1731 	 *
1732 	 * FIXME: we could do this in principle, but we'll need a way to ensure
1733 	 *        that we can safely release the inode reference while holding
1734 	 *        the page lock.
1735 	 */
1736 	if (PagePrivate(page))
1737 		return -EBUSY;
1738 
1739 	nfs_fscache_release_page(page, GFP_KERNEL);
1740 
1741 	return migrate_page(mapping, newpage, page, mode);
1742 }
1743 #endif
1744 
1745 int __init nfs_init_writepagecache(void)
1746 {
1747 	nfs_wdata_cachep = kmem_cache_create("nfs_write_data",
1748 					     sizeof(struct nfs_write_data),
1749 					     0, SLAB_HWCACHE_ALIGN,
1750 					     NULL);
1751 	if (nfs_wdata_cachep == NULL)
1752 		return -ENOMEM;
1753 
1754 	nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE,
1755 						     nfs_wdata_cachep);
1756 	if (nfs_wdata_mempool == NULL)
1757 		return -ENOMEM;
1758 
1759 	nfs_cdata_cachep = kmem_cache_create("nfs_commit_data",
1760 					     sizeof(struct nfs_commit_data),
1761 					     0, SLAB_HWCACHE_ALIGN,
1762 					     NULL);
1763 	if (nfs_cdata_cachep == NULL)
1764 		return -ENOMEM;
1765 
1766 	nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT,
1767 						      nfs_wdata_cachep);
1768 	if (nfs_commit_mempool == NULL)
1769 		return -ENOMEM;
1770 
1771 	/*
1772 	 * NFS congestion size, scale with available memory.
1773 	 *
1774 	 *  64MB:    8192k
1775 	 * 128MB:   11585k
1776 	 * 256MB:   16384k
1777 	 * 512MB:   23170k
1778 	 *   1GB:   32768k
1779 	 *   2GB:   46340k
1780 	 *   4GB:   65536k
1781 	 *   8GB:   92681k
1782 	 *  16GB:  131072k
1783 	 *
1784 	 * This allows larger machines to have larger/more transfers.
1785 	 * Limit the default to 256M
1786 	 */
1787 	nfs_congestion_kb = (16*int_sqrt(totalram_pages)) << (PAGE_SHIFT-10);
1788 	if (nfs_congestion_kb > 256*1024)
1789 		nfs_congestion_kb = 256*1024;
1790 
1791 	return 0;
1792 }
1793 
1794 void nfs_destroy_writepagecache(void)
1795 {
1796 	mempool_destroy(nfs_commit_mempool);
1797 	mempool_destroy(nfs_wdata_mempool);
1798 	kmem_cache_destroy(nfs_wdata_cachep);
1799 }
1800 
1801