xref: /openbmc/linux/fs/nfs/write.c (revision a17627ef)
1 /*
2  * linux/fs/nfs/write.c
3  *
4  * Write file data over NFS.
5  *
6  * Copyright (C) 1996, 1997, Olaf Kirch <okir@monad.swb.de>
7  */
8 
9 #include <linux/types.h>
10 #include <linux/slab.h>
11 #include <linux/mm.h>
12 #include <linux/pagemap.h>
13 #include <linux/file.h>
14 #include <linux/writeback.h>
15 #include <linux/swap.h>
16 
17 #include <linux/sunrpc/clnt.h>
18 #include <linux/nfs_fs.h>
19 #include <linux/nfs_mount.h>
20 #include <linux/nfs_page.h>
21 #include <linux/backing-dev.h>
22 
23 #include <asm/uaccess.h>
24 
25 #include "delegation.h"
26 #include "internal.h"
27 #include "iostat.h"
28 
29 #define NFSDBG_FACILITY		NFSDBG_PAGECACHE
30 
31 #define MIN_POOL_WRITE		(32)
32 #define MIN_POOL_COMMIT		(4)
33 
34 /*
35  * Local function declarations
36  */
37 static struct nfs_page * nfs_update_request(struct nfs_open_context*,
38 					    struct page *,
39 					    unsigned int, unsigned int);
40 static void nfs_pageio_init_write(struct nfs_pageio_descriptor *desc,
41 				  struct inode *inode, int ioflags);
42 static const struct rpc_call_ops nfs_write_partial_ops;
43 static const struct rpc_call_ops nfs_write_full_ops;
44 static const struct rpc_call_ops nfs_commit_ops;
45 
46 static struct kmem_cache *nfs_wdata_cachep;
47 static mempool_t *nfs_wdata_mempool;
48 static mempool_t *nfs_commit_mempool;
49 
50 struct nfs_write_data *nfs_commit_alloc(void)
51 {
52 	struct nfs_write_data *p = mempool_alloc(nfs_commit_mempool, GFP_NOFS);
53 
54 	if (p) {
55 		memset(p, 0, sizeof(*p));
56 		INIT_LIST_HEAD(&p->pages);
57 	}
58 	return p;
59 }
60 
61 static void nfs_commit_rcu_free(struct rcu_head *head)
62 {
63 	struct nfs_write_data *p = container_of(head, struct nfs_write_data, task.u.tk_rcu);
64 	if (p && (p->pagevec != &p->page_array[0]))
65 		kfree(p->pagevec);
66 	mempool_free(p, nfs_commit_mempool);
67 }
68 
69 void nfs_commit_free(struct nfs_write_data *wdata)
70 {
71 	call_rcu_bh(&wdata->task.u.tk_rcu, nfs_commit_rcu_free);
72 }
73 
74 struct nfs_write_data *nfs_writedata_alloc(unsigned int pagecount)
75 {
76 	struct nfs_write_data *p = mempool_alloc(nfs_wdata_mempool, GFP_NOFS);
77 
78 	if (p) {
79 		memset(p, 0, sizeof(*p));
80 		INIT_LIST_HEAD(&p->pages);
81 		p->npages = pagecount;
82 		if (pagecount <= ARRAY_SIZE(p->page_array))
83 			p->pagevec = p->page_array;
84 		else {
85 			p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_NOFS);
86 			if (!p->pagevec) {
87 				mempool_free(p, nfs_wdata_mempool);
88 				p = NULL;
89 			}
90 		}
91 	}
92 	return p;
93 }
94 
95 static void nfs_writedata_rcu_free(struct rcu_head *head)
96 {
97 	struct nfs_write_data *p = container_of(head, struct nfs_write_data, task.u.tk_rcu);
98 	if (p && (p->pagevec != &p->page_array[0]))
99 		kfree(p->pagevec);
100 	mempool_free(p, nfs_wdata_mempool);
101 }
102 
103 static void nfs_writedata_free(struct nfs_write_data *wdata)
104 {
105 	call_rcu_bh(&wdata->task.u.tk_rcu, nfs_writedata_rcu_free);
106 }
107 
108 void nfs_writedata_release(void *wdata)
109 {
110 	nfs_writedata_free(wdata);
111 }
112 
113 static struct nfs_page *nfs_page_find_request_locked(struct page *page)
114 {
115 	struct nfs_page *req = NULL;
116 
117 	if (PagePrivate(page)) {
118 		req = (struct nfs_page *)page_private(page);
119 		if (req != NULL)
120 			atomic_inc(&req->wb_count);
121 	}
122 	return req;
123 }
124 
125 static struct nfs_page *nfs_page_find_request(struct page *page)
126 {
127 	struct nfs_page *req = NULL;
128 	spinlock_t *req_lock = &NFS_I(page->mapping->host)->req_lock;
129 
130 	spin_lock(req_lock);
131 	req = nfs_page_find_request_locked(page);
132 	spin_unlock(req_lock);
133 	return req;
134 }
135 
136 /* Adjust the file length if we're writing beyond the end */
137 static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int count)
138 {
139 	struct inode *inode = page->mapping->host;
140 	loff_t end, i_size = i_size_read(inode);
141 	pgoff_t end_index = (i_size - 1) >> PAGE_CACHE_SHIFT;
142 
143 	if (i_size > 0 && page->index < end_index)
144 		return;
145 	end = ((loff_t)page->index << PAGE_CACHE_SHIFT) + ((loff_t)offset+count);
146 	if (i_size >= end)
147 		return;
148 	nfs_inc_stats(inode, NFSIOS_EXTENDWRITE);
149 	i_size_write(inode, end);
150 }
151 
152 /* A writeback failed: mark the page as bad, and invalidate the page cache */
153 static void nfs_set_pageerror(struct page *page)
154 {
155 	SetPageError(page);
156 	nfs_zap_mapping(page->mapping->host, page->mapping);
157 }
158 
159 /* We can set the PG_uptodate flag if we see that a write request
160  * covers the full page.
161  */
162 static void nfs_mark_uptodate(struct page *page, unsigned int base, unsigned int count)
163 {
164 	if (PageUptodate(page))
165 		return;
166 	if (base != 0)
167 		return;
168 	if (count != nfs_page_length(page))
169 		return;
170 	if (count != PAGE_CACHE_SIZE)
171 		zero_user_page(page, count, PAGE_CACHE_SIZE - count, KM_USER0);
172 	SetPageUptodate(page);
173 }
174 
175 static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page,
176 		unsigned int offset, unsigned int count)
177 {
178 	struct nfs_page	*req;
179 	int ret;
180 
181 	for (;;) {
182 		req = nfs_update_request(ctx, page, offset, count);
183 		if (!IS_ERR(req))
184 			break;
185 		ret = PTR_ERR(req);
186 		if (ret != -EBUSY)
187 			return ret;
188 		ret = nfs_wb_page(page->mapping->host, page);
189 		if (ret != 0)
190 			return ret;
191 	}
192 	/* Update file length */
193 	nfs_grow_file(page, offset, count);
194 	/* Set the PG_uptodate flag? */
195 	nfs_mark_uptodate(page, offset, count);
196 	nfs_unlock_request(req);
197 	return 0;
198 }
199 
200 static int wb_priority(struct writeback_control *wbc)
201 {
202 	if (wbc->for_reclaim)
203 		return FLUSH_HIGHPRI | FLUSH_STABLE;
204 	if (wbc->for_kupdate)
205 		return FLUSH_LOWPRI;
206 	return 0;
207 }
208 
209 /*
210  * NFS congestion control
211  */
212 
213 int nfs_congestion_kb;
214 
215 #define NFS_CONGESTION_ON_THRESH 	(nfs_congestion_kb >> (PAGE_SHIFT-10))
216 #define NFS_CONGESTION_OFF_THRESH	\
217 	(NFS_CONGESTION_ON_THRESH - (NFS_CONGESTION_ON_THRESH >> 2))
218 
219 static int nfs_set_page_writeback(struct page *page)
220 {
221 	int ret = test_set_page_writeback(page);
222 
223 	if (!ret) {
224 		struct inode *inode = page->mapping->host;
225 		struct nfs_server *nfss = NFS_SERVER(inode);
226 
227 		if (atomic_long_inc_return(&nfss->writeback) >
228 				NFS_CONGESTION_ON_THRESH)
229 			set_bdi_congested(&nfss->backing_dev_info, WRITE);
230 	}
231 	return ret;
232 }
233 
234 static void nfs_end_page_writeback(struct page *page)
235 {
236 	struct inode *inode = page->mapping->host;
237 	struct nfs_server *nfss = NFS_SERVER(inode);
238 
239 	end_page_writeback(page);
240 	if (atomic_long_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH) {
241 		clear_bdi_congested(&nfss->backing_dev_info, WRITE);
242 		congestion_end(WRITE);
243 	}
244 }
245 
246 /*
247  * Find an associated nfs write request, and prepare to flush it out
248  * Returns 1 if there was no write request, or if the request was
249  * already tagged by nfs_set_page_dirty.Returns 0 if the request
250  * was not tagged.
251  * May also return an error if the user signalled nfs_wait_on_request().
252  */
253 static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
254 				struct page *page)
255 {
256 	struct nfs_page *req;
257 	struct nfs_inode *nfsi = NFS_I(page->mapping->host);
258 	spinlock_t *req_lock = &nfsi->req_lock;
259 	int ret;
260 
261 	spin_lock(req_lock);
262 	for(;;) {
263 		req = nfs_page_find_request_locked(page);
264 		if (req == NULL) {
265 			spin_unlock(req_lock);
266 			return 1;
267 		}
268 		if (nfs_lock_request_dontget(req))
269 			break;
270 		/* Note: If we hold the page lock, as is the case in nfs_writepage,
271 		 *	 then the call to nfs_lock_request_dontget() will always
272 		 *	 succeed provided that someone hasn't already marked the
273 		 *	 request as dirty (in which case we don't care).
274 		 */
275 		spin_unlock(req_lock);
276 		ret = nfs_wait_on_request(req);
277 		nfs_release_request(req);
278 		if (ret != 0)
279 			return ret;
280 		spin_lock(req_lock);
281 	}
282 	if (test_bit(PG_NEED_COMMIT, &req->wb_flags)) {
283 		/* This request is marked for commit */
284 		spin_unlock(req_lock);
285 		nfs_unlock_request(req);
286 		nfs_pageio_complete(pgio);
287 		return 1;
288 	}
289 	if (nfs_set_page_writeback(page) != 0) {
290 		spin_unlock(req_lock);
291 		BUG();
292 	}
293 	radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index,
294 			NFS_PAGE_TAG_WRITEBACK);
295 	ret = test_bit(PG_NEED_FLUSH, &req->wb_flags);
296 	spin_unlock(req_lock);
297 	nfs_pageio_add_request(pgio, req);
298 	return ret;
299 }
300 
301 /*
302  * Write an mmapped page to the server.
303  */
304 static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc)
305 {
306 	struct nfs_pageio_descriptor mypgio, *pgio;
307 	struct nfs_open_context *ctx;
308 	struct inode *inode = page->mapping->host;
309 	unsigned offset;
310 	int err;
311 
312 	nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
313 	nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1);
314 
315 	if (wbc->for_writepages)
316 		pgio = wbc->fs_private;
317 	else {
318 		nfs_pageio_init_write(&mypgio, inode, wb_priority(wbc));
319 		pgio = &mypgio;
320 	}
321 
322 	nfs_pageio_cond_complete(pgio, page->index);
323 
324 	err = nfs_page_async_flush(pgio, page);
325 	if (err <= 0)
326 		goto out;
327 	err = 0;
328 	offset = nfs_page_length(page);
329 	if (!offset)
330 		goto out;
331 
332 	nfs_pageio_cond_complete(pgio, page->index);
333 
334 	ctx = nfs_find_open_context(inode, NULL, FMODE_WRITE);
335 	if (ctx == NULL) {
336 		err = -EBADF;
337 		goto out;
338 	}
339 	err = nfs_writepage_setup(ctx, page, 0, offset);
340 	put_nfs_open_context(ctx);
341 	if (err != 0)
342 		goto out;
343 	err = nfs_page_async_flush(pgio, page);
344 	if (err > 0)
345 		err = 0;
346 out:
347 	if (!wbc->for_writepages)
348 		nfs_pageio_complete(pgio);
349 	return err;
350 }
351 
352 int nfs_writepage(struct page *page, struct writeback_control *wbc)
353 {
354 	int err;
355 
356 	err = nfs_writepage_locked(page, wbc);
357 	unlock_page(page);
358 	return err;
359 }
360 
361 int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
362 {
363 	struct inode *inode = mapping->host;
364 	struct nfs_pageio_descriptor pgio;
365 	int err;
366 
367 	nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES);
368 
369 	nfs_pageio_init_write(&pgio, inode, wb_priority(wbc));
370 	wbc->fs_private = &pgio;
371 	err = generic_writepages(mapping, wbc);
372 	nfs_pageio_complete(&pgio);
373 	if (err)
374 		return err;
375 	if (pgio.pg_error)
376 		return pgio.pg_error;
377 	return 0;
378 }
379 
380 /*
381  * Insert a write request into an inode
382  */
383 static int nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
384 {
385 	struct nfs_inode *nfsi = NFS_I(inode);
386 	int error;
387 
388 	error = radix_tree_insert(&nfsi->nfs_page_tree, req->wb_index, req);
389 	BUG_ON(error == -EEXIST);
390 	if (error)
391 		return error;
392 	if (!nfsi->npages) {
393 		igrab(inode);
394 		nfs_begin_data_update(inode);
395 		if (nfs_have_delegation(inode, FMODE_WRITE))
396 			nfsi->change_attr++;
397 	}
398 	SetPagePrivate(req->wb_page);
399 	set_page_private(req->wb_page, (unsigned long)req);
400 	if (PageDirty(req->wb_page))
401 		set_bit(PG_NEED_FLUSH, &req->wb_flags);
402 	nfsi->npages++;
403 	atomic_inc(&req->wb_count);
404 	return 0;
405 }
406 
407 /*
408  * Remove a write request from an inode
409  */
410 static void nfs_inode_remove_request(struct nfs_page *req)
411 {
412 	struct inode *inode = req->wb_context->dentry->d_inode;
413 	struct nfs_inode *nfsi = NFS_I(inode);
414 
415 	BUG_ON (!NFS_WBACK_BUSY(req));
416 
417 	spin_lock(&nfsi->req_lock);
418 	set_page_private(req->wb_page, 0);
419 	ClearPagePrivate(req->wb_page);
420 	radix_tree_delete(&nfsi->nfs_page_tree, req->wb_index);
421 	if (test_and_clear_bit(PG_NEED_FLUSH, &req->wb_flags))
422 		__set_page_dirty_nobuffers(req->wb_page);
423 	nfsi->npages--;
424 	if (!nfsi->npages) {
425 		spin_unlock(&nfsi->req_lock);
426 		nfs_end_data_update(inode);
427 		iput(inode);
428 	} else
429 		spin_unlock(&nfsi->req_lock);
430 	nfs_clear_request(req);
431 	nfs_release_request(req);
432 }
433 
434 static void
435 nfs_redirty_request(struct nfs_page *req)
436 {
437 	__set_page_dirty_nobuffers(req->wb_page);
438 }
439 
440 /*
441  * Check if a request is dirty
442  */
443 static inline int
444 nfs_dirty_request(struct nfs_page *req)
445 {
446 	struct page *page = req->wb_page;
447 
448 	if (page == NULL || test_bit(PG_NEED_COMMIT, &req->wb_flags))
449 		return 0;
450 	return !PageWriteback(req->wb_page);
451 }
452 
453 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
454 /*
455  * Add a request to the inode's commit list.
456  */
457 static void
458 nfs_mark_request_commit(struct nfs_page *req)
459 {
460 	struct inode *inode = req->wb_context->dentry->d_inode;
461 	struct nfs_inode *nfsi = NFS_I(inode);
462 
463 	spin_lock(&nfsi->req_lock);
464 	nfs_list_add_request(req, &nfsi->commit);
465 	nfsi->ncommit++;
466 	set_bit(PG_NEED_COMMIT, &(req)->wb_flags);
467 	spin_unlock(&nfsi->req_lock);
468 	inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
469 	__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
470 }
471 
472 static inline
473 int nfs_write_need_commit(struct nfs_write_data *data)
474 {
475 	return data->verf.committed != NFS_FILE_SYNC;
476 }
477 
478 static inline
479 int nfs_reschedule_unstable_write(struct nfs_page *req)
480 {
481 	if (test_bit(PG_NEED_COMMIT, &req->wb_flags)) {
482 		nfs_mark_request_commit(req);
483 		return 1;
484 	}
485 	if (test_and_clear_bit(PG_NEED_RESCHED, &req->wb_flags)) {
486 		nfs_redirty_request(req);
487 		return 1;
488 	}
489 	return 0;
490 }
491 #else
492 static inline void
493 nfs_mark_request_commit(struct nfs_page *req)
494 {
495 }
496 
497 static inline
498 int nfs_write_need_commit(struct nfs_write_data *data)
499 {
500 	return 0;
501 }
502 
503 static inline
504 int nfs_reschedule_unstable_write(struct nfs_page *req)
505 {
506 	return 0;
507 }
508 #endif
509 
510 /*
511  * Wait for a request to complete.
512  *
513  * Interruptible by signals only if mounted with intr flag.
514  */
515 static int nfs_wait_on_requests_locked(struct inode *inode, pgoff_t idx_start, unsigned int npages)
516 {
517 	struct nfs_inode *nfsi = NFS_I(inode);
518 	struct nfs_page *req;
519 	pgoff_t idx_end, next;
520 	unsigned int		res = 0;
521 	int			error;
522 
523 	if (npages == 0)
524 		idx_end = ~0;
525 	else
526 		idx_end = idx_start + npages - 1;
527 
528 	next = idx_start;
529 	while (radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree, (void **)&req, next, 1, NFS_PAGE_TAG_WRITEBACK)) {
530 		if (req->wb_index > idx_end)
531 			break;
532 
533 		next = req->wb_index + 1;
534 		BUG_ON(!NFS_WBACK_BUSY(req));
535 
536 		atomic_inc(&req->wb_count);
537 		spin_unlock(&nfsi->req_lock);
538 		error = nfs_wait_on_request(req);
539 		nfs_release_request(req);
540 		spin_lock(&nfsi->req_lock);
541 		if (error < 0)
542 			return error;
543 		res++;
544 	}
545 	return res;
546 }
547 
548 static void nfs_cancel_commit_list(struct list_head *head)
549 {
550 	struct nfs_page *req;
551 
552 	while(!list_empty(head)) {
553 		req = nfs_list_entry(head->next);
554 		dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
555 		nfs_list_remove_request(req);
556 		clear_bit(PG_NEED_COMMIT, &(req)->wb_flags);
557 		nfs_inode_remove_request(req);
558 		nfs_unlock_request(req);
559 	}
560 }
561 
562 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
563 /*
564  * nfs_scan_commit - Scan an inode for commit requests
565  * @inode: NFS inode to scan
566  * @dst: destination list
567  * @idx_start: lower bound of page->index to scan.
568  * @npages: idx_start + npages sets the upper bound to scan.
569  *
570  * Moves requests from the inode's 'commit' request list.
571  * The requests are *not* checked to ensure that they form a contiguous set.
572  */
573 static int
574 nfs_scan_commit(struct inode *inode, struct list_head *dst, pgoff_t idx_start, unsigned int npages)
575 {
576 	struct nfs_inode *nfsi = NFS_I(inode);
577 	int res = 0;
578 
579 	if (nfsi->ncommit != 0) {
580 		res = nfs_scan_list(nfsi, &nfsi->commit, dst, idx_start, npages);
581 		nfsi->ncommit -= res;
582 		if ((nfsi->ncommit == 0) != list_empty(&nfsi->commit))
583 			printk(KERN_ERR "NFS: desynchronized value of nfs_i.ncommit.\n");
584 	}
585 	return res;
586 }
587 #else
588 static inline int nfs_scan_commit(struct inode *inode, struct list_head *dst, pgoff_t idx_start, unsigned int npages)
589 {
590 	return 0;
591 }
592 #endif
593 
594 /*
595  * Try to update any existing write request, or create one if there is none.
596  * In order to match, the request's credentials must match those of
597  * the calling process.
598  *
599  * Note: Should always be called with the Page Lock held!
600  */
601 static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx,
602 		struct page *page, unsigned int offset, unsigned int bytes)
603 {
604 	struct address_space *mapping = page->mapping;
605 	struct inode *inode = mapping->host;
606 	struct nfs_inode *nfsi = NFS_I(inode);
607 	struct nfs_page		*req, *new = NULL;
608 	pgoff_t		rqend, end;
609 
610 	end = offset + bytes;
611 
612 	for (;;) {
613 		/* Loop over all inode entries and see if we find
614 		 * A request for the page we wish to update
615 		 */
616 		spin_lock(&nfsi->req_lock);
617 		req = nfs_page_find_request_locked(page);
618 		if (req) {
619 			if (!nfs_lock_request_dontget(req)) {
620 				int error;
621 
622 				spin_unlock(&nfsi->req_lock);
623 				error = nfs_wait_on_request(req);
624 				nfs_release_request(req);
625 				if (error < 0) {
626 					if (new)
627 						nfs_release_request(new);
628 					return ERR_PTR(error);
629 				}
630 				continue;
631 			}
632 			spin_unlock(&nfsi->req_lock);
633 			if (new)
634 				nfs_release_request(new);
635 			break;
636 		}
637 
638 		if (new) {
639 			int error;
640 			nfs_lock_request_dontget(new);
641 			error = nfs_inode_add_request(inode, new);
642 			if (error) {
643 				spin_unlock(&nfsi->req_lock);
644 				nfs_unlock_request(new);
645 				return ERR_PTR(error);
646 			}
647 			spin_unlock(&nfsi->req_lock);
648 			return new;
649 		}
650 		spin_unlock(&nfsi->req_lock);
651 
652 		new = nfs_create_request(ctx, inode, page, offset, bytes);
653 		if (IS_ERR(new))
654 			return new;
655 	}
656 
657 	/* We have a request for our page.
658 	 * If the creds don't match, or the
659 	 * page addresses don't match,
660 	 * tell the caller to wait on the conflicting
661 	 * request.
662 	 */
663 	rqend = req->wb_offset + req->wb_bytes;
664 	if (req->wb_context != ctx
665 	    || req->wb_page != page
666 	    || !nfs_dirty_request(req)
667 	    || offset > rqend || end < req->wb_offset) {
668 		nfs_unlock_request(req);
669 		return ERR_PTR(-EBUSY);
670 	}
671 
672 	/* Okay, the request matches. Update the region */
673 	if (offset < req->wb_offset) {
674 		req->wb_offset = offset;
675 		req->wb_pgbase = offset;
676 		req->wb_bytes = rqend - req->wb_offset;
677 	}
678 
679 	if (end > rqend)
680 		req->wb_bytes = end - req->wb_offset;
681 
682 	return req;
683 }
684 
685 int nfs_flush_incompatible(struct file *file, struct page *page)
686 {
687 	struct nfs_open_context *ctx = (struct nfs_open_context *)file->private_data;
688 	struct nfs_page	*req;
689 	int do_flush, status;
690 	/*
691 	 * Look for a request corresponding to this page. If there
692 	 * is one, and it belongs to another file, we flush it out
693 	 * before we try to copy anything into the page. Do this
694 	 * due to the lack of an ACCESS-type call in NFSv2.
695 	 * Also do the same if we find a request from an existing
696 	 * dropped page.
697 	 */
698 	do {
699 		req = nfs_page_find_request(page);
700 		if (req == NULL)
701 			return 0;
702 		do_flush = req->wb_page != page || req->wb_context != ctx
703 			|| !nfs_dirty_request(req);
704 		nfs_release_request(req);
705 		if (!do_flush)
706 			return 0;
707 		status = nfs_wb_page(page->mapping->host, page);
708 	} while (status == 0);
709 	return status;
710 }
711 
712 /*
713  * Update and possibly write a cached page of an NFS file.
714  *
715  * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad
716  * things with a page scheduled for an RPC call (e.g. invalidate it).
717  */
718 int nfs_updatepage(struct file *file, struct page *page,
719 		unsigned int offset, unsigned int count)
720 {
721 	struct nfs_open_context *ctx = (struct nfs_open_context *)file->private_data;
722 	struct inode	*inode = page->mapping->host;
723 	int		status = 0;
724 
725 	nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE);
726 
727 	dprintk("NFS:      nfs_updatepage(%s/%s %d@%Ld)\n",
728 		file->f_path.dentry->d_parent->d_name.name,
729 		file->f_path.dentry->d_name.name, count,
730 		(long long)(page_offset(page) +offset));
731 
732 	/* If we're not using byte range locks, and we know the page
733 	 * is entirely in cache, it may be more efficient to avoid
734 	 * fragmenting write requests.
735 	 */
736 	if (PageUptodate(page) && inode->i_flock == NULL && !(file->f_mode & O_SYNC)) {
737 		count = max(count + offset, nfs_page_length(page));
738 		offset = 0;
739 	}
740 
741 	status = nfs_writepage_setup(ctx, page, offset, count);
742 	__set_page_dirty_nobuffers(page);
743 
744         dprintk("NFS:      nfs_updatepage returns %d (isize %Ld)\n",
745 			status, (long long)i_size_read(inode));
746 	if (status < 0)
747 		nfs_set_pageerror(page);
748 	return status;
749 }
750 
751 static void nfs_writepage_release(struct nfs_page *req)
752 {
753 
754 	if (PageError(req->wb_page) || !nfs_reschedule_unstable_write(req)) {
755 		nfs_end_page_writeback(req->wb_page);
756 		nfs_inode_remove_request(req);
757 	} else
758 		nfs_end_page_writeback(req->wb_page);
759 	nfs_clear_page_writeback(req);
760 }
761 
762 static inline int flush_task_priority(int how)
763 {
764 	switch (how & (FLUSH_HIGHPRI|FLUSH_LOWPRI)) {
765 		case FLUSH_HIGHPRI:
766 			return RPC_PRIORITY_HIGH;
767 		case FLUSH_LOWPRI:
768 			return RPC_PRIORITY_LOW;
769 	}
770 	return RPC_PRIORITY_NORMAL;
771 }
772 
773 /*
774  * Set up the argument/result storage required for the RPC call.
775  */
776 static void nfs_write_rpcsetup(struct nfs_page *req,
777 		struct nfs_write_data *data,
778 		const struct rpc_call_ops *call_ops,
779 		unsigned int count, unsigned int offset,
780 		int how)
781 {
782 	struct inode		*inode;
783 	int flags;
784 
785 	/* Set up the RPC argument and reply structs
786 	 * NB: take care not to mess about with data->commit et al. */
787 
788 	data->req = req;
789 	data->inode = inode = req->wb_context->dentry->d_inode;
790 	data->cred = req->wb_context->cred;
791 
792 	data->args.fh     = NFS_FH(inode);
793 	data->args.offset = req_offset(req) + offset;
794 	data->args.pgbase = req->wb_pgbase + offset;
795 	data->args.pages  = data->pagevec;
796 	data->args.count  = count;
797 	data->args.context = req->wb_context;
798 
799 	data->res.fattr   = &data->fattr;
800 	data->res.count   = count;
801 	data->res.verf    = &data->verf;
802 	nfs_fattr_init(&data->fattr);
803 
804 	/* Set up the initial task struct.  */
805 	flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
806 	rpc_init_task(&data->task, NFS_CLIENT(inode), flags, call_ops, data);
807 	NFS_PROTO(inode)->write_setup(data, how);
808 
809 	data->task.tk_priority = flush_task_priority(how);
810 	data->task.tk_cookie = (unsigned long)inode;
811 
812 	dprintk("NFS: %5u initiated write call "
813 		"(req %s/%Ld, %u bytes @ offset %Lu)\n",
814 		data->task.tk_pid,
815 		inode->i_sb->s_id,
816 		(long long)NFS_FILEID(inode),
817 		count,
818 		(unsigned long long)data->args.offset);
819 }
820 
821 static void nfs_execute_write(struct nfs_write_data *data)
822 {
823 	struct rpc_clnt *clnt = NFS_CLIENT(data->inode);
824 	sigset_t oldset;
825 
826 	rpc_clnt_sigmask(clnt, &oldset);
827 	rpc_execute(&data->task);
828 	rpc_clnt_sigunmask(clnt, &oldset);
829 }
830 
831 /*
832  * Generate multiple small requests to write out a single
833  * contiguous dirty area on one page.
834  */
835 static int nfs_flush_multi(struct inode *inode, struct list_head *head, unsigned int npages, size_t count, int how)
836 {
837 	struct nfs_page *req = nfs_list_entry(head->next);
838 	struct page *page = req->wb_page;
839 	struct nfs_write_data *data;
840 	size_t wsize = NFS_SERVER(inode)->wsize, nbytes;
841 	unsigned int offset;
842 	int requests = 0;
843 	LIST_HEAD(list);
844 
845 	nfs_list_remove_request(req);
846 
847 	nbytes = count;
848 	do {
849 		size_t len = min(nbytes, wsize);
850 
851 		data = nfs_writedata_alloc(1);
852 		if (!data)
853 			goto out_bad;
854 		list_add(&data->pages, &list);
855 		requests++;
856 		nbytes -= len;
857 	} while (nbytes != 0);
858 	atomic_set(&req->wb_complete, requests);
859 
860 	ClearPageError(page);
861 	offset = 0;
862 	nbytes = count;
863 	do {
864 		data = list_entry(list.next, struct nfs_write_data, pages);
865 		list_del_init(&data->pages);
866 
867 		data->pagevec[0] = page;
868 
869 		if (nbytes < wsize)
870 			wsize = nbytes;
871 		nfs_write_rpcsetup(req, data, &nfs_write_partial_ops,
872 				   wsize, offset, how);
873 		offset += wsize;
874 		nbytes -= wsize;
875 		nfs_execute_write(data);
876 	} while (nbytes != 0);
877 
878 	return 0;
879 
880 out_bad:
881 	while (!list_empty(&list)) {
882 		data = list_entry(list.next, struct nfs_write_data, pages);
883 		list_del(&data->pages);
884 		nfs_writedata_release(data);
885 	}
886 	nfs_redirty_request(req);
887 	nfs_end_page_writeback(req->wb_page);
888 	nfs_clear_page_writeback(req);
889 	return -ENOMEM;
890 }
891 
892 /*
893  * Create an RPC task for the given write request and kick it.
894  * The page must have been locked by the caller.
895  *
896  * It may happen that the page we're passed is not marked dirty.
897  * This is the case if nfs_updatepage detects a conflicting request
898  * that has been written but not committed.
899  */
900 static int nfs_flush_one(struct inode *inode, struct list_head *head, unsigned int npages, size_t count, int how)
901 {
902 	struct nfs_page		*req;
903 	struct page		**pages;
904 	struct nfs_write_data	*data;
905 
906 	data = nfs_writedata_alloc(npages);
907 	if (!data)
908 		goto out_bad;
909 
910 	pages = data->pagevec;
911 	while (!list_empty(head)) {
912 		req = nfs_list_entry(head->next);
913 		nfs_list_remove_request(req);
914 		nfs_list_add_request(req, &data->pages);
915 		ClearPageError(req->wb_page);
916 		*pages++ = req->wb_page;
917 	}
918 	req = nfs_list_entry(data->pages.next);
919 
920 	/* Set up the argument struct */
921 	nfs_write_rpcsetup(req, data, &nfs_write_full_ops, count, 0, how);
922 
923 	nfs_execute_write(data);
924 	return 0;
925  out_bad:
926 	while (!list_empty(head)) {
927 		req = nfs_list_entry(head->next);
928 		nfs_list_remove_request(req);
929 		nfs_redirty_request(req);
930 		nfs_end_page_writeback(req->wb_page);
931 		nfs_clear_page_writeback(req);
932 	}
933 	return -ENOMEM;
934 }
935 
936 static void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio,
937 				  struct inode *inode, int ioflags)
938 {
939 	int wsize = NFS_SERVER(inode)->wsize;
940 
941 	if (wsize < PAGE_CACHE_SIZE)
942 		nfs_pageio_init(pgio, inode, nfs_flush_multi, wsize, ioflags);
943 	else
944 		nfs_pageio_init(pgio, inode, nfs_flush_one, wsize, ioflags);
945 }
946 
947 /*
948  * Handle a write reply that flushed part of a page.
949  */
950 static void nfs_writeback_done_partial(struct rpc_task *task, void *calldata)
951 {
952 	struct nfs_write_data	*data = calldata;
953 	struct nfs_page		*req = data->req;
954 	struct page		*page = req->wb_page;
955 
956 	dprintk("NFS: write (%s/%Ld %d@%Ld)",
957 		req->wb_context->dentry->d_inode->i_sb->s_id,
958 		(long long)NFS_FILEID(req->wb_context->dentry->d_inode),
959 		req->wb_bytes,
960 		(long long)req_offset(req));
961 
962 	if (nfs_writeback_done(task, data) != 0)
963 		return;
964 
965 	if (task->tk_status < 0) {
966 		nfs_set_pageerror(page);
967 		req->wb_context->error = task->tk_status;
968 		dprintk(", error = %d\n", task->tk_status);
969 		goto out;
970 	}
971 
972 	if (nfs_write_need_commit(data)) {
973 		spinlock_t *req_lock = &NFS_I(page->mapping->host)->req_lock;
974 
975 		spin_lock(req_lock);
976 		if (test_bit(PG_NEED_RESCHED, &req->wb_flags)) {
977 			/* Do nothing we need to resend the writes */
978 		} else if (!test_and_set_bit(PG_NEED_COMMIT, &req->wb_flags)) {
979 			memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
980 			dprintk(" defer commit\n");
981 		} else if (memcmp(&req->wb_verf, &data->verf, sizeof(req->wb_verf))) {
982 			set_bit(PG_NEED_RESCHED, &req->wb_flags);
983 			clear_bit(PG_NEED_COMMIT, &req->wb_flags);
984 			dprintk(" server reboot detected\n");
985 		}
986 		spin_unlock(req_lock);
987 	} else
988 		dprintk(" OK\n");
989 
990 out:
991 	if (atomic_dec_and_test(&req->wb_complete))
992 		nfs_writepage_release(req);
993 }
994 
995 static const struct rpc_call_ops nfs_write_partial_ops = {
996 	.rpc_call_done = nfs_writeback_done_partial,
997 	.rpc_release = nfs_writedata_release,
998 };
999 
1000 /*
1001  * Handle a write reply that flushes a whole page.
1002  *
1003  * FIXME: There is an inherent race with invalidate_inode_pages and
1004  *	  writebacks since the page->count is kept > 1 for as long
1005  *	  as the page has a write request pending.
1006  */
1007 static void nfs_writeback_done_full(struct rpc_task *task, void *calldata)
1008 {
1009 	struct nfs_write_data	*data = calldata;
1010 	struct nfs_page		*req;
1011 	struct page		*page;
1012 
1013 	if (nfs_writeback_done(task, data) != 0)
1014 		return;
1015 
1016 	/* Update attributes as result of writeback. */
1017 	while (!list_empty(&data->pages)) {
1018 		req = nfs_list_entry(data->pages.next);
1019 		nfs_list_remove_request(req);
1020 		page = req->wb_page;
1021 
1022 		dprintk("NFS: write (%s/%Ld %d@%Ld)",
1023 			req->wb_context->dentry->d_inode->i_sb->s_id,
1024 			(long long)NFS_FILEID(req->wb_context->dentry->d_inode),
1025 			req->wb_bytes,
1026 			(long long)req_offset(req));
1027 
1028 		if (task->tk_status < 0) {
1029 			nfs_set_pageerror(page);
1030 			req->wb_context->error = task->tk_status;
1031 			dprintk(", error = %d\n", task->tk_status);
1032 			goto remove_request;
1033 		}
1034 
1035 		if (nfs_write_need_commit(data)) {
1036 			memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
1037 			nfs_mark_request_commit(req);
1038 			nfs_end_page_writeback(page);
1039 			dprintk(" marked for commit\n");
1040 			goto next;
1041 		}
1042 		dprintk(" OK\n");
1043 remove_request:
1044 		nfs_end_page_writeback(page);
1045 		nfs_inode_remove_request(req);
1046 	next:
1047 		nfs_clear_page_writeback(req);
1048 	}
1049 }
1050 
1051 static const struct rpc_call_ops nfs_write_full_ops = {
1052 	.rpc_call_done = nfs_writeback_done_full,
1053 	.rpc_release = nfs_writedata_release,
1054 };
1055 
1056 
1057 /*
1058  * This function is called when the WRITE call is complete.
1059  */
1060 int nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
1061 {
1062 	struct nfs_writeargs	*argp = &data->args;
1063 	struct nfs_writeres	*resp = &data->res;
1064 	int status;
1065 
1066 	dprintk("NFS: %5u nfs_writeback_done (status %d)\n",
1067 		task->tk_pid, task->tk_status);
1068 
1069 	/*
1070 	 * ->write_done will attempt to use post-op attributes to detect
1071 	 * conflicting writes by other clients.  A strict interpretation
1072 	 * of close-to-open would allow us to continue caching even if
1073 	 * another writer had changed the file, but some applications
1074 	 * depend on tighter cache coherency when writing.
1075 	 */
1076 	status = NFS_PROTO(data->inode)->write_done(task, data);
1077 	if (status != 0)
1078 		return status;
1079 	nfs_add_stats(data->inode, NFSIOS_SERVERWRITTENBYTES, resp->count);
1080 
1081 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1082 	if (resp->verf->committed < argp->stable && task->tk_status >= 0) {
1083 		/* We tried a write call, but the server did not
1084 		 * commit data to stable storage even though we
1085 		 * requested it.
1086 		 * Note: There is a known bug in Tru64 < 5.0 in which
1087 		 *	 the server reports NFS_DATA_SYNC, but performs
1088 		 *	 NFS_FILE_SYNC. We therefore implement this checking
1089 		 *	 as a dprintk() in order to avoid filling syslog.
1090 		 */
1091 		static unsigned long    complain;
1092 
1093 		if (time_before(complain, jiffies)) {
1094 			dprintk("NFS: faulty NFS server %s:"
1095 				" (committed = %d) != (stable = %d)\n",
1096 				NFS_SERVER(data->inode)->nfs_client->cl_hostname,
1097 				resp->verf->committed, argp->stable);
1098 			complain = jiffies + 300 * HZ;
1099 		}
1100 	}
1101 #endif
1102 	/* Is this a short write? */
1103 	if (task->tk_status >= 0 && resp->count < argp->count) {
1104 		static unsigned long    complain;
1105 
1106 		nfs_inc_stats(data->inode, NFSIOS_SHORTWRITE);
1107 
1108 		/* Has the server at least made some progress? */
1109 		if (resp->count != 0) {
1110 			/* Was this an NFSv2 write or an NFSv3 stable write? */
1111 			if (resp->verf->committed != NFS_UNSTABLE) {
1112 				/* Resend from where the server left off */
1113 				argp->offset += resp->count;
1114 				argp->pgbase += resp->count;
1115 				argp->count -= resp->count;
1116 			} else {
1117 				/* Resend as a stable write in order to avoid
1118 				 * headaches in the case of a server crash.
1119 				 */
1120 				argp->stable = NFS_FILE_SYNC;
1121 			}
1122 			rpc_restart_call(task);
1123 			return -EAGAIN;
1124 		}
1125 		if (time_before(complain, jiffies)) {
1126 			printk(KERN_WARNING
1127 			       "NFS: Server wrote zero bytes, expected %u.\n",
1128 					argp->count);
1129 			complain = jiffies + 300 * HZ;
1130 		}
1131 		/* Can't do anything about it except throw an error. */
1132 		task->tk_status = -EIO;
1133 	}
1134 	return 0;
1135 }
1136 
1137 
1138 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1139 void nfs_commit_release(void *wdata)
1140 {
1141 	nfs_commit_free(wdata);
1142 }
1143 
1144 /*
1145  * Set up the argument/result storage required for the RPC call.
1146  */
1147 static void nfs_commit_rpcsetup(struct list_head *head,
1148 		struct nfs_write_data *data,
1149 		int how)
1150 {
1151 	struct nfs_page		*first;
1152 	struct inode		*inode;
1153 	int flags;
1154 
1155 	/* Set up the RPC argument and reply structs
1156 	 * NB: take care not to mess about with data->commit et al. */
1157 
1158 	list_splice_init(head, &data->pages);
1159 	first = nfs_list_entry(data->pages.next);
1160 	inode = first->wb_context->dentry->d_inode;
1161 
1162 	data->inode	  = inode;
1163 	data->cred	  = first->wb_context->cred;
1164 
1165 	data->args.fh     = NFS_FH(data->inode);
1166 	/* Note: we always request a commit of the entire inode */
1167 	data->args.offset = 0;
1168 	data->args.count  = 0;
1169 	data->res.count   = 0;
1170 	data->res.fattr   = &data->fattr;
1171 	data->res.verf    = &data->verf;
1172 	nfs_fattr_init(&data->fattr);
1173 
1174 	/* Set up the initial task struct.  */
1175 	flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
1176 	rpc_init_task(&data->task, NFS_CLIENT(inode), flags, &nfs_commit_ops, data);
1177 	NFS_PROTO(inode)->commit_setup(data, how);
1178 
1179 	data->task.tk_priority = flush_task_priority(how);
1180 	data->task.tk_cookie = (unsigned long)inode;
1181 
1182 	dprintk("NFS: %5u initiated commit call\n", data->task.tk_pid);
1183 }
1184 
1185 /*
1186  * Commit dirty pages
1187  */
1188 static int
1189 nfs_commit_list(struct inode *inode, struct list_head *head, int how)
1190 {
1191 	struct nfs_write_data	*data;
1192 	struct nfs_page         *req;
1193 
1194 	data = nfs_commit_alloc();
1195 
1196 	if (!data)
1197 		goto out_bad;
1198 
1199 	/* Set up the argument struct */
1200 	nfs_commit_rpcsetup(head, data, how);
1201 
1202 	nfs_execute_write(data);
1203 	return 0;
1204  out_bad:
1205 	while (!list_empty(head)) {
1206 		req = nfs_list_entry(head->next);
1207 		nfs_list_remove_request(req);
1208 		nfs_mark_request_commit(req);
1209 		dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
1210 		nfs_clear_page_writeback(req);
1211 	}
1212 	return -ENOMEM;
1213 }
1214 
1215 /*
1216  * COMMIT call returned
1217  */
1218 static void nfs_commit_done(struct rpc_task *task, void *calldata)
1219 {
1220 	struct nfs_write_data	*data = calldata;
1221 	struct nfs_page		*req;
1222 
1223         dprintk("NFS: %5u nfs_commit_done (status %d)\n",
1224                                 task->tk_pid, task->tk_status);
1225 
1226 	/* Call the NFS version-specific code */
1227 	if (NFS_PROTO(data->inode)->commit_done(task, data) != 0)
1228 		return;
1229 
1230 	while (!list_empty(&data->pages)) {
1231 		req = nfs_list_entry(data->pages.next);
1232 		nfs_list_remove_request(req);
1233 		clear_bit(PG_NEED_COMMIT, &(req)->wb_flags);
1234 		dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
1235 
1236 		dprintk("NFS: commit (%s/%Ld %d@%Ld)",
1237 			req->wb_context->dentry->d_inode->i_sb->s_id,
1238 			(long long)NFS_FILEID(req->wb_context->dentry->d_inode),
1239 			req->wb_bytes,
1240 			(long long)req_offset(req));
1241 		if (task->tk_status < 0) {
1242 			req->wb_context->error = task->tk_status;
1243 			nfs_inode_remove_request(req);
1244 			dprintk(", error = %d\n", task->tk_status);
1245 			goto next;
1246 		}
1247 
1248 		/* Okay, COMMIT succeeded, apparently. Check the verifier
1249 		 * returned by the server against all stored verfs. */
1250 		if (!memcmp(req->wb_verf.verifier, data->verf.verifier, sizeof(data->verf.verifier))) {
1251 			/* We have a match */
1252 			nfs_inode_remove_request(req);
1253 			dprintk(" OK\n");
1254 			goto next;
1255 		}
1256 		/* We have a mismatch. Write the page again */
1257 		dprintk(" mismatch\n");
1258 		nfs_redirty_request(req);
1259 	next:
1260 		nfs_clear_page_writeback(req);
1261 	}
1262 }
1263 
1264 static const struct rpc_call_ops nfs_commit_ops = {
1265 	.rpc_call_done = nfs_commit_done,
1266 	.rpc_release = nfs_commit_release,
1267 };
1268 
1269 int nfs_commit_inode(struct inode *inode, int how)
1270 {
1271 	struct nfs_inode *nfsi = NFS_I(inode);
1272 	LIST_HEAD(head);
1273 	int res;
1274 
1275 	spin_lock(&nfsi->req_lock);
1276 	res = nfs_scan_commit(inode, &head, 0, 0);
1277 	spin_unlock(&nfsi->req_lock);
1278 	if (res) {
1279 		int error = nfs_commit_list(inode, &head, how);
1280 		if (error < 0)
1281 			return error;
1282 	}
1283 	return res;
1284 }
1285 #else
1286 static inline int nfs_commit_list(struct inode *inode, struct list_head *head, int how)
1287 {
1288 	return 0;
1289 }
1290 #endif
1291 
1292 long nfs_sync_mapping_wait(struct address_space *mapping, struct writeback_control *wbc, int how)
1293 {
1294 	struct inode *inode = mapping->host;
1295 	struct nfs_inode *nfsi = NFS_I(inode);
1296 	pgoff_t idx_start, idx_end;
1297 	unsigned int npages = 0;
1298 	LIST_HEAD(head);
1299 	int nocommit = how & FLUSH_NOCOMMIT;
1300 	long pages, ret;
1301 
1302 	/* FIXME */
1303 	if (wbc->range_cyclic)
1304 		idx_start = 0;
1305 	else {
1306 		idx_start = wbc->range_start >> PAGE_CACHE_SHIFT;
1307 		idx_end = wbc->range_end >> PAGE_CACHE_SHIFT;
1308 		if (idx_end > idx_start) {
1309 			pgoff_t l_npages = 1 + idx_end - idx_start;
1310 			npages = l_npages;
1311 			if (sizeof(npages) != sizeof(l_npages) &&
1312 					(pgoff_t)npages != l_npages)
1313 				npages = 0;
1314 		}
1315 	}
1316 	how &= ~FLUSH_NOCOMMIT;
1317 	spin_lock(&nfsi->req_lock);
1318 	do {
1319 		ret = nfs_wait_on_requests_locked(inode, idx_start, npages);
1320 		if (ret != 0)
1321 			continue;
1322 		if (nocommit)
1323 			break;
1324 		pages = nfs_scan_commit(inode, &head, idx_start, npages);
1325 		if (pages == 0)
1326 			break;
1327 		if (how & FLUSH_INVALIDATE) {
1328 			spin_unlock(&nfsi->req_lock);
1329 			nfs_cancel_commit_list(&head);
1330 			ret = pages;
1331 			spin_lock(&nfsi->req_lock);
1332 			continue;
1333 		}
1334 		pages += nfs_scan_commit(inode, &head, 0, 0);
1335 		spin_unlock(&nfsi->req_lock);
1336 		ret = nfs_commit_list(inode, &head, how);
1337 		spin_lock(&nfsi->req_lock);
1338 	} while (ret >= 0);
1339 	spin_unlock(&nfsi->req_lock);
1340 	return ret;
1341 }
1342 
1343 /*
1344  * flush the inode to disk.
1345  */
1346 int nfs_wb_all(struct inode *inode)
1347 {
1348 	struct address_space *mapping = inode->i_mapping;
1349 	struct writeback_control wbc = {
1350 		.bdi = mapping->backing_dev_info,
1351 		.sync_mode = WB_SYNC_ALL,
1352 		.nr_to_write = LONG_MAX,
1353 		.for_writepages = 1,
1354 		.range_cyclic = 1,
1355 	};
1356 	int ret;
1357 
1358 	ret = nfs_writepages(mapping, &wbc);
1359 	if (ret < 0)
1360 		goto out;
1361 	ret = nfs_sync_mapping_wait(mapping, &wbc, 0);
1362 	if (ret >= 0)
1363 		return 0;
1364 out:
1365 	__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1366 	return ret;
1367 }
1368 
1369 int nfs_sync_mapping_range(struct address_space *mapping, loff_t range_start, loff_t range_end, int how)
1370 {
1371 	struct writeback_control wbc = {
1372 		.bdi = mapping->backing_dev_info,
1373 		.sync_mode = WB_SYNC_ALL,
1374 		.nr_to_write = LONG_MAX,
1375 		.range_start = range_start,
1376 		.range_end = range_end,
1377 		.for_writepages = 1,
1378 	};
1379 	int ret;
1380 
1381 	ret = nfs_writepages(mapping, &wbc);
1382 	if (ret < 0)
1383 		goto out;
1384 	ret = nfs_sync_mapping_wait(mapping, &wbc, how);
1385 	if (ret >= 0)
1386 		return 0;
1387 out:
1388 	__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1389 	return ret;
1390 }
1391 
1392 int nfs_wb_page_priority(struct inode *inode, struct page *page, int how)
1393 {
1394 	loff_t range_start = page_offset(page);
1395 	loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
1396 	struct writeback_control wbc = {
1397 		.bdi = page->mapping->backing_dev_info,
1398 		.sync_mode = WB_SYNC_ALL,
1399 		.nr_to_write = LONG_MAX,
1400 		.range_start = range_start,
1401 		.range_end = range_end,
1402 	};
1403 	int ret;
1404 
1405 	BUG_ON(!PageLocked(page));
1406 	if (clear_page_dirty_for_io(page)) {
1407 		ret = nfs_writepage_locked(page, &wbc);
1408 		if (ret < 0)
1409 			goto out;
1410 	}
1411 	if (!PagePrivate(page))
1412 		return 0;
1413 	ret = nfs_sync_mapping_wait(page->mapping, &wbc, how);
1414 	if (ret >= 0)
1415 		return 0;
1416 out:
1417 	__mark_inode_dirty(inode, I_DIRTY_PAGES);
1418 	return ret;
1419 }
1420 
1421 /*
1422  * Write back all requests on one page - we do this before reading it.
1423  */
1424 int nfs_wb_page(struct inode *inode, struct page* page)
1425 {
1426 	return nfs_wb_page_priority(inode, page, FLUSH_STABLE);
1427 }
1428 
1429 int nfs_set_page_dirty(struct page *page)
1430 {
1431 	struct address_space *mapping = page->mapping;
1432 	struct inode *inode;
1433 	spinlock_t *req_lock;
1434 	struct nfs_page *req;
1435 	int ret;
1436 
1437 	if (!mapping)
1438 		goto out_raced;
1439 	inode = mapping->host;
1440 	if (!inode)
1441 		goto out_raced;
1442 	req_lock = &NFS_I(inode)->req_lock;
1443 	spin_lock(req_lock);
1444 	req = nfs_page_find_request_locked(page);
1445 	if (req != NULL) {
1446 		/* Mark any existing write requests for flushing */
1447 		ret = !test_and_set_bit(PG_NEED_FLUSH, &req->wb_flags);
1448 		spin_unlock(req_lock);
1449 		nfs_release_request(req);
1450 		return ret;
1451 	}
1452 	ret = __set_page_dirty_nobuffers(page);
1453 	spin_unlock(req_lock);
1454 	return ret;
1455 out_raced:
1456 	return !TestSetPageDirty(page);
1457 }
1458 
1459 
1460 int __init nfs_init_writepagecache(void)
1461 {
1462 	nfs_wdata_cachep = kmem_cache_create("nfs_write_data",
1463 					     sizeof(struct nfs_write_data),
1464 					     0, SLAB_HWCACHE_ALIGN,
1465 					     NULL, NULL);
1466 	if (nfs_wdata_cachep == NULL)
1467 		return -ENOMEM;
1468 
1469 	nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE,
1470 						     nfs_wdata_cachep);
1471 	if (nfs_wdata_mempool == NULL)
1472 		return -ENOMEM;
1473 
1474 	nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT,
1475 						      nfs_wdata_cachep);
1476 	if (nfs_commit_mempool == NULL)
1477 		return -ENOMEM;
1478 
1479 	/*
1480 	 * NFS congestion size, scale with available memory.
1481 	 *
1482 	 *  64MB:    8192k
1483 	 * 128MB:   11585k
1484 	 * 256MB:   16384k
1485 	 * 512MB:   23170k
1486 	 *   1GB:   32768k
1487 	 *   2GB:   46340k
1488 	 *   4GB:   65536k
1489 	 *   8GB:   92681k
1490 	 *  16GB:  131072k
1491 	 *
1492 	 * This allows larger machines to have larger/more transfers.
1493 	 * Limit the default to 256M
1494 	 */
1495 	nfs_congestion_kb = (16*int_sqrt(totalram_pages)) << (PAGE_SHIFT-10);
1496 	if (nfs_congestion_kb > 256*1024)
1497 		nfs_congestion_kb = 256*1024;
1498 
1499 	return 0;
1500 }
1501 
1502 void nfs_destroy_writepagecache(void)
1503 {
1504 	mempool_destroy(nfs_commit_mempool);
1505 	mempool_destroy(nfs_wdata_mempool);
1506 	kmem_cache_destroy(nfs_wdata_cachep);
1507 }
1508 
1509