xref: /openbmc/linux/fs/nfs/write.c (revision 87c2ce3b)
1 /*
2  * linux/fs/nfs/write.c
3  *
4  * Writing file data over NFS.
5  *
6  * We do it like this: When a (user) process wishes to write data to an
7  * NFS file, a write request is allocated that contains the RPC task data
8  * plus some info on the page to be written, and added to the inode's
9  * write chain. If the process writes past the end of the page, an async
10  * RPC call to write the page is scheduled immediately; otherwise, the call
11  * is delayed for a few seconds.
12  *
13  * Just like readahead, no async I/O is performed if wsize < PAGE_SIZE.
14  *
15  * Write requests are kept on the inode's writeback list. Each entry in
16  * that list references the page (portion) to be written. When the
17  * cache timeout has expired, the RPC task is woken up, and tries to
18  * lock the page. As soon as it manages to do so, the request is moved
19  * from the writeback list to the writelock list.
20  *
21  * Note: we must make sure never to confuse the inode passed in the
22  * write_page request with the one in page->inode. As far as I understand
23  * it, these are different when doing a swap-out.
24  *
25  * To understand everything that goes on here and in the NFS read code,
26  * one should be aware that a page is locked in exactly one of the following
27  * cases:
28  *
29  *  -	A write request is in progress.
30  *  -	A user process is in generic_file_write/nfs_update_page
31  *  -	A user process is in generic_file_read
32  *
33  * Also note that because of the way pages are invalidated in
34  * nfs_revalidate_inode, the following assertions hold:
35  *
36  *  -	If a page is dirty, there will be no read requests (a page will
37  *	not be re-read unless invalidated by nfs_revalidate_inode).
38  *  -	If the page is not uptodate, there will be no pending write
39  *	requests, and no process will be in nfs_update_page.
40  *
41  * FIXME: Interaction with the vmscan routines is not optimal yet.
42  * Either vmscan must be made nfs-savvy, or we need a different page
43  * reclaim concept that supports something like FS-independent
44  * buffer_heads with a b_ops-> field.
45  *
46  * Copyright (C) 1996, 1997, Olaf Kirch <okir@monad.swb.de>
47  */
48 
49 #include <linux/config.h>
50 #include <linux/types.h>
51 #include <linux/slab.h>
52 #include <linux/mm.h>
53 #include <linux/pagemap.h>
54 #include <linux/file.h>
55 #include <linux/mpage.h>
56 #include <linux/writeback.h>
57 
58 #include <linux/sunrpc/clnt.h>
59 #include <linux/nfs_fs.h>
60 #include <linux/nfs_mount.h>
61 #include <linux/nfs_page.h>
62 #include <asm/uaccess.h>
63 #include <linux/smp_lock.h>
64 
65 #include "delegation.h"
66 
67 #define NFSDBG_FACILITY		NFSDBG_PAGECACHE
68 
69 #define MIN_POOL_WRITE		(32)
70 #define MIN_POOL_COMMIT		(4)
71 
72 /*
73  * Local function declarations
74  */
75 static struct nfs_page * nfs_update_request(struct nfs_open_context*,
76 					    struct inode *,
77 					    struct page *,
78 					    unsigned int, unsigned int);
79 static void nfs_writeback_done_partial(struct nfs_write_data *, int);
80 static void nfs_writeback_done_full(struct nfs_write_data *, int);
81 static int nfs_wait_on_write_congestion(struct address_space *, int);
82 static int nfs_wait_on_requests(struct inode *, unsigned long, unsigned int);
83 static int nfs_flush_inode(struct inode *inode, unsigned long idx_start,
84 			   unsigned int npages, int how);
85 
86 static kmem_cache_t *nfs_wdata_cachep;
87 mempool_t *nfs_wdata_mempool;
88 static mempool_t *nfs_commit_mempool;
89 
90 static DECLARE_WAIT_QUEUE_HEAD(nfs_write_congestion);
91 
92 static inline struct nfs_write_data *nfs_commit_alloc(unsigned int pagecount)
93 {
94 	struct nfs_write_data *p = mempool_alloc(nfs_commit_mempool, SLAB_NOFS);
95 
96 	if (p) {
97 		memset(p, 0, sizeof(*p));
98 		INIT_LIST_HEAD(&p->pages);
99 		if (pagecount < NFS_PAGEVEC_SIZE)
100 			p->pagevec = &p->page_array[0];
101 		else {
102 			size_t size = ++pagecount * sizeof(struct page *);
103 			p->pagevec = kmalloc(size, GFP_NOFS);
104 			if (p->pagevec) {
105 				memset(p->pagevec, 0, size);
106 			} else {
107 				mempool_free(p, nfs_commit_mempool);
108 				p = NULL;
109 			}
110 		}
111 	}
112 	return p;
113 }
114 
115 static inline void nfs_commit_free(struct nfs_write_data *p)
116 {
117 	if (p && (p->pagevec != &p->page_array[0]))
118 		kfree(p->pagevec);
119 	mempool_free(p, nfs_commit_mempool);
120 }
121 
122 void nfs_writedata_release(void *wdata)
123 {
124 	nfs_writedata_free(wdata);
125 }
126 
127 /* Adjust the file length if we're writing beyond the end */
128 static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int count)
129 {
130 	struct inode *inode = page->mapping->host;
131 	loff_t end, i_size = i_size_read(inode);
132 	unsigned long end_index = (i_size - 1) >> PAGE_CACHE_SHIFT;
133 
134 	if (i_size > 0 && page->index < end_index)
135 		return;
136 	end = ((loff_t)page->index << PAGE_CACHE_SHIFT) + ((loff_t)offset+count);
137 	if (i_size >= end)
138 		return;
139 	i_size_write(inode, end);
140 }
141 
142 /* We can set the PG_uptodate flag if we see that a write request
143  * covers the full page.
144  */
145 static void nfs_mark_uptodate(struct page *page, unsigned int base, unsigned int count)
146 {
147 	loff_t end_offs;
148 
149 	if (PageUptodate(page))
150 		return;
151 	if (base != 0)
152 		return;
153 	if (count == PAGE_CACHE_SIZE) {
154 		SetPageUptodate(page);
155 		return;
156 	}
157 
158 	end_offs = i_size_read(page->mapping->host) - 1;
159 	if (end_offs < 0)
160 		return;
161 	/* Is this the last page? */
162 	if (page->index != (unsigned long)(end_offs >> PAGE_CACHE_SHIFT))
163 		return;
164 	/* This is the last page: set PG_uptodate if we cover the entire
165 	 * extent of the data, then zero the rest of the page.
166 	 */
167 	if (count == (unsigned int)(end_offs & (PAGE_CACHE_SIZE - 1)) + 1) {
168 		memclear_highpage_flush(page, count, PAGE_CACHE_SIZE - count);
169 		SetPageUptodate(page);
170 	}
171 }
172 
173 /*
174  * Write a page synchronously.
175  * Offset is the data offset within the page.
176  */
177 static int nfs_writepage_sync(struct nfs_open_context *ctx, struct inode *inode,
178 		struct page *page, unsigned int offset, unsigned int count,
179 		int how)
180 {
181 	unsigned int	wsize = NFS_SERVER(inode)->wsize;
182 	int		result, written = 0;
183 	struct nfs_write_data *wdata;
184 
185 	wdata = nfs_writedata_alloc(1);
186 	if (!wdata)
187 		return -ENOMEM;
188 
189 	wdata->flags = how;
190 	wdata->cred = ctx->cred;
191 	wdata->inode = inode;
192 	wdata->args.fh = NFS_FH(inode);
193 	wdata->args.context = ctx;
194 	wdata->args.pages = &page;
195 	wdata->args.stable = NFS_FILE_SYNC;
196 	wdata->args.pgbase = offset;
197 	wdata->args.count = wsize;
198 	wdata->res.fattr = &wdata->fattr;
199 	wdata->res.verf = &wdata->verf;
200 
201 	dprintk("NFS:      nfs_writepage_sync(%s/%Ld %d@%Ld)\n",
202 		inode->i_sb->s_id,
203 		(long long)NFS_FILEID(inode),
204 		count, (long long)(page_offset(page) + offset));
205 
206 	set_page_writeback(page);
207 	nfs_begin_data_update(inode);
208 	do {
209 		if (count < wsize)
210 			wdata->args.count = count;
211 		wdata->args.offset = page_offset(page) + wdata->args.pgbase;
212 
213 		result = NFS_PROTO(inode)->write(wdata);
214 
215 		if (result < 0) {
216 			/* Must mark the page invalid after I/O error */
217 			ClearPageUptodate(page);
218 			goto io_error;
219 		}
220 		if (result < wdata->args.count)
221 			printk(KERN_WARNING "NFS: short write, count=%u, result=%d\n",
222 					wdata->args.count, result);
223 
224 		wdata->args.offset += result;
225 	        wdata->args.pgbase += result;
226 		written += result;
227 		count -= result;
228 	} while (count);
229 	/* Update file length */
230 	nfs_grow_file(page, offset, written);
231 	/* Set the PG_uptodate flag? */
232 	nfs_mark_uptodate(page, offset, written);
233 
234 	if (PageError(page))
235 		ClearPageError(page);
236 
237 io_error:
238 	nfs_end_data_update(inode);
239 	end_page_writeback(page);
240 	nfs_writedata_free(wdata);
241 	return written ? written : result;
242 }
243 
244 static int nfs_writepage_async(struct nfs_open_context *ctx,
245 		struct inode *inode, struct page *page,
246 		unsigned int offset, unsigned int count)
247 {
248 	struct nfs_page	*req;
249 
250 	req = nfs_update_request(ctx, inode, page, offset, count);
251 	if (IS_ERR(req))
252 		return PTR_ERR(req);
253 	/* Update file length */
254 	nfs_grow_file(page, offset, count);
255 	/* Set the PG_uptodate flag? */
256 	nfs_mark_uptodate(page, offset, count);
257 	nfs_unlock_request(req);
258 	return 0;
259 }
260 
261 static int wb_priority(struct writeback_control *wbc)
262 {
263 	if (wbc->for_reclaim)
264 		return FLUSH_HIGHPRI;
265 	if (wbc->for_kupdate)
266 		return FLUSH_LOWPRI;
267 	return 0;
268 }
269 
270 /*
271  * Write an mmapped page to the server.
272  */
273 int nfs_writepage(struct page *page, struct writeback_control *wbc)
274 {
275 	struct nfs_open_context *ctx;
276 	struct inode *inode = page->mapping->host;
277 	unsigned long end_index;
278 	unsigned offset = PAGE_CACHE_SIZE;
279 	loff_t i_size = i_size_read(inode);
280 	int inode_referenced = 0;
281 	int priority = wb_priority(wbc);
282 	int err;
283 
284 	/*
285 	 * Note: We need to ensure that we have a reference to the inode
286 	 *       if we are to do asynchronous writes. If not, waiting
287 	 *       in nfs_wait_on_request() may deadlock with clear_inode().
288 	 *
289 	 *       If igrab() fails here, then it is in any case safe to
290 	 *       call nfs_wb_page(), since there will be no pending writes.
291 	 */
292 	if (igrab(inode) != 0)
293 		inode_referenced = 1;
294 	end_index = i_size >> PAGE_CACHE_SHIFT;
295 
296 	/* Ensure we've flushed out any previous writes */
297 	nfs_wb_page_priority(inode, page, priority);
298 
299 	/* easy case */
300 	if (page->index < end_index)
301 		goto do_it;
302 	/* things got complicated... */
303 	offset = i_size & (PAGE_CACHE_SIZE-1);
304 
305 	/* OK, are we completely out? */
306 	err = 0; /* potential race with truncate - ignore */
307 	if (page->index >= end_index+1 || !offset)
308 		goto out;
309 do_it:
310 	ctx = nfs_find_open_context(inode, NULL, FMODE_WRITE);
311 	if (ctx == NULL) {
312 		err = -EBADF;
313 		goto out;
314 	}
315 	lock_kernel();
316 	if (!IS_SYNC(inode) && inode_referenced) {
317 		err = nfs_writepage_async(ctx, inode, page, 0, offset);
318 		if (!wbc->for_writepages)
319 			nfs_flush_inode(inode, 0, 0, wb_priority(wbc));
320 	} else {
321 		err = nfs_writepage_sync(ctx, inode, page, 0,
322 						offset, priority);
323 		if (err >= 0) {
324 			if (err != offset)
325 				redirty_page_for_writepage(wbc, page);
326 			err = 0;
327 		}
328 	}
329 	unlock_kernel();
330 	put_nfs_open_context(ctx);
331 out:
332 	unlock_page(page);
333 	if (inode_referenced)
334 		iput(inode);
335 	return err;
336 }
337 
338 /*
339  * Note: causes nfs_update_request() to block on the assumption
340  * 	 that the writeback is generated due to memory pressure.
341  */
342 int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
343 {
344 	struct backing_dev_info *bdi = mapping->backing_dev_info;
345 	struct inode *inode = mapping->host;
346 	int err;
347 
348 	err = generic_writepages(mapping, wbc);
349 	if (err)
350 		return err;
351 	while (test_and_set_bit(BDI_write_congested, &bdi->state) != 0) {
352 		if (wbc->nonblocking)
353 			return 0;
354 		nfs_wait_on_write_congestion(mapping, 0);
355 	}
356 	err = nfs_flush_inode(inode, 0, 0, wb_priority(wbc));
357 	if (err < 0)
358 		goto out;
359 	wbc->nr_to_write -= err;
360 	if (!wbc->nonblocking && wbc->sync_mode == WB_SYNC_ALL) {
361 		err = nfs_wait_on_requests(inode, 0, 0);
362 		if (err < 0)
363 			goto out;
364 	}
365 	err = nfs_commit_inode(inode, wb_priority(wbc));
366 	if (err > 0) {
367 		wbc->nr_to_write -= err;
368 		err = 0;
369 	}
370 out:
371 	clear_bit(BDI_write_congested, &bdi->state);
372 	wake_up_all(&nfs_write_congestion);
373 	return err;
374 }
375 
376 /*
377  * Insert a write request into an inode
378  */
379 static int nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
380 {
381 	struct nfs_inode *nfsi = NFS_I(inode);
382 	int error;
383 
384 	error = radix_tree_insert(&nfsi->nfs_page_tree, req->wb_index, req);
385 	BUG_ON(error == -EEXIST);
386 	if (error)
387 		return error;
388 	if (!nfsi->npages) {
389 		igrab(inode);
390 		nfs_begin_data_update(inode);
391 		if (nfs_have_delegation(inode, FMODE_WRITE))
392 			nfsi->change_attr++;
393 	}
394 	nfsi->npages++;
395 	atomic_inc(&req->wb_count);
396 	return 0;
397 }
398 
399 /*
400  * Insert a write request into an inode
401  */
402 static void nfs_inode_remove_request(struct nfs_page *req)
403 {
404 	struct inode *inode = req->wb_context->dentry->d_inode;
405 	struct nfs_inode *nfsi = NFS_I(inode);
406 
407 	BUG_ON (!NFS_WBACK_BUSY(req));
408 
409 	spin_lock(&nfsi->req_lock);
410 	radix_tree_delete(&nfsi->nfs_page_tree, req->wb_index);
411 	nfsi->npages--;
412 	if (!nfsi->npages) {
413 		spin_unlock(&nfsi->req_lock);
414 		nfs_end_data_update(inode);
415 		iput(inode);
416 	} else
417 		spin_unlock(&nfsi->req_lock);
418 	nfs_clear_request(req);
419 	nfs_release_request(req);
420 }
421 
422 /*
423  * Find a request
424  */
425 static inline struct nfs_page *
426 _nfs_find_request(struct inode *inode, unsigned long index)
427 {
428 	struct nfs_inode *nfsi = NFS_I(inode);
429 	struct nfs_page *req;
430 
431 	req = (struct nfs_page*)radix_tree_lookup(&nfsi->nfs_page_tree, index);
432 	if (req)
433 		atomic_inc(&req->wb_count);
434 	return req;
435 }
436 
437 static struct nfs_page *
438 nfs_find_request(struct inode *inode, unsigned long index)
439 {
440 	struct nfs_page		*req;
441 	struct nfs_inode	*nfsi = NFS_I(inode);
442 
443 	spin_lock(&nfsi->req_lock);
444 	req = _nfs_find_request(inode, index);
445 	spin_unlock(&nfsi->req_lock);
446 	return req;
447 }
448 
449 /*
450  * Add a request to the inode's dirty list.
451  */
452 static void
453 nfs_mark_request_dirty(struct nfs_page *req)
454 {
455 	struct inode *inode = req->wb_context->dentry->d_inode;
456 	struct nfs_inode *nfsi = NFS_I(inode);
457 
458 	spin_lock(&nfsi->req_lock);
459 	radix_tree_tag_set(&nfsi->nfs_page_tree,
460 			req->wb_index, NFS_PAGE_TAG_DIRTY);
461 	nfs_list_add_request(req, &nfsi->dirty);
462 	nfsi->ndirty++;
463 	spin_unlock(&nfsi->req_lock);
464 	inc_page_state(nr_dirty);
465 	mark_inode_dirty(inode);
466 }
467 
468 /*
469  * Check if a request is dirty
470  */
471 static inline int
472 nfs_dirty_request(struct nfs_page *req)
473 {
474 	struct nfs_inode *nfsi = NFS_I(req->wb_context->dentry->d_inode);
475 	return !list_empty(&req->wb_list) && req->wb_list_head == &nfsi->dirty;
476 }
477 
478 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
479 /*
480  * Add a request to the inode's commit list.
481  */
482 static void
483 nfs_mark_request_commit(struct nfs_page *req)
484 {
485 	struct inode *inode = req->wb_context->dentry->d_inode;
486 	struct nfs_inode *nfsi = NFS_I(inode);
487 
488 	spin_lock(&nfsi->req_lock);
489 	nfs_list_add_request(req, &nfsi->commit);
490 	nfsi->ncommit++;
491 	spin_unlock(&nfsi->req_lock);
492 	inc_page_state(nr_unstable);
493 	mark_inode_dirty(inode);
494 }
495 #endif
496 
497 /*
498  * Wait for a request to complete.
499  *
500  * Interruptible by signals only if mounted with intr flag.
501  */
502 static int
503 nfs_wait_on_requests(struct inode *inode, unsigned long idx_start, unsigned int npages)
504 {
505 	struct nfs_inode *nfsi = NFS_I(inode);
506 	struct nfs_page *req;
507 	unsigned long		idx_end, next;
508 	unsigned int		res = 0;
509 	int			error;
510 
511 	if (npages == 0)
512 		idx_end = ~0;
513 	else
514 		idx_end = idx_start + npages - 1;
515 
516 	spin_lock(&nfsi->req_lock);
517 	next = idx_start;
518 	while (radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree, (void **)&req, next, 1, NFS_PAGE_TAG_WRITEBACK)) {
519 		if (req->wb_index > idx_end)
520 			break;
521 
522 		next = req->wb_index + 1;
523 		BUG_ON(!NFS_WBACK_BUSY(req));
524 
525 		atomic_inc(&req->wb_count);
526 		spin_unlock(&nfsi->req_lock);
527 		error = nfs_wait_on_request(req);
528 		nfs_release_request(req);
529 		if (error < 0)
530 			return error;
531 		spin_lock(&nfsi->req_lock);
532 		res++;
533 	}
534 	spin_unlock(&nfsi->req_lock);
535 	return res;
536 }
537 
538 /*
539  * nfs_scan_dirty - Scan an inode for dirty requests
540  * @inode: NFS inode to scan
541  * @dst: destination list
542  * @idx_start: lower bound of page->index to scan.
543  * @npages: idx_start + npages sets the upper bound to scan.
544  *
545  * Moves requests from the inode's dirty page list.
546  * The requests are *not* checked to ensure that they form a contiguous set.
547  */
548 static int
549 nfs_scan_dirty(struct inode *inode, struct list_head *dst, unsigned long idx_start, unsigned int npages)
550 {
551 	struct nfs_inode *nfsi = NFS_I(inode);
552 	int res = 0;
553 
554 	if (nfsi->ndirty != 0) {
555 		res = nfs_scan_lock_dirty(nfsi, dst, idx_start, npages);
556 		nfsi->ndirty -= res;
557 		sub_page_state(nr_dirty,res);
558 		if ((nfsi->ndirty == 0) != list_empty(&nfsi->dirty))
559 			printk(KERN_ERR "NFS: desynchronized value of nfs_i.ndirty.\n");
560 	}
561 	return res;
562 }
563 
564 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
565 /*
566  * nfs_scan_commit - Scan an inode for commit requests
567  * @inode: NFS inode to scan
568  * @dst: destination list
569  * @idx_start: lower bound of page->index to scan.
570  * @npages: idx_start + npages sets the upper bound to scan.
571  *
572  * Moves requests from the inode's 'commit' request list.
573  * The requests are *not* checked to ensure that they form a contiguous set.
574  */
575 static int
576 nfs_scan_commit(struct inode *inode, struct list_head *dst, unsigned long idx_start, unsigned int npages)
577 {
578 	struct nfs_inode *nfsi = NFS_I(inode);
579 	int res = 0;
580 
581 	if (nfsi->ncommit != 0) {
582 		res = nfs_scan_list(&nfsi->commit, dst, idx_start, npages);
583 		nfsi->ncommit -= res;
584 		if ((nfsi->ncommit == 0) != list_empty(&nfsi->commit))
585 			printk(KERN_ERR "NFS: desynchronized value of nfs_i.ncommit.\n");
586 	}
587 	return res;
588 }
589 #endif
590 
591 static int nfs_wait_on_write_congestion(struct address_space *mapping, int intr)
592 {
593 	struct backing_dev_info *bdi = mapping->backing_dev_info;
594 	DEFINE_WAIT(wait);
595 	int ret = 0;
596 
597 	might_sleep();
598 
599 	if (!bdi_write_congested(bdi))
600 		return 0;
601 	if (intr) {
602 		struct rpc_clnt *clnt = NFS_CLIENT(mapping->host);
603 		sigset_t oldset;
604 
605 		rpc_clnt_sigmask(clnt, &oldset);
606 		prepare_to_wait(&nfs_write_congestion, &wait, TASK_INTERRUPTIBLE);
607 		if (bdi_write_congested(bdi)) {
608 			if (signalled())
609 				ret = -ERESTARTSYS;
610 			else
611 				schedule();
612 		}
613 		rpc_clnt_sigunmask(clnt, &oldset);
614 	} else {
615 		prepare_to_wait(&nfs_write_congestion, &wait, TASK_UNINTERRUPTIBLE);
616 		if (bdi_write_congested(bdi))
617 			schedule();
618 	}
619 	finish_wait(&nfs_write_congestion, &wait);
620 	return ret;
621 }
622 
623 
624 /*
625  * Try to update any existing write request, or create one if there is none.
626  * In order to match, the request's credentials must match those of
627  * the calling process.
628  *
629  * Note: Should always be called with the Page Lock held!
630  */
631 static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx,
632 		struct inode *inode, struct page *page,
633 		unsigned int offset, unsigned int bytes)
634 {
635 	struct nfs_server *server = NFS_SERVER(inode);
636 	struct nfs_inode *nfsi = NFS_I(inode);
637 	struct nfs_page		*req, *new = NULL;
638 	unsigned long		rqend, end;
639 
640 	end = offset + bytes;
641 
642 	if (nfs_wait_on_write_congestion(page->mapping, server->flags & NFS_MOUNT_INTR))
643 		return ERR_PTR(-ERESTARTSYS);
644 	for (;;) {
645 		/* Loop over all inode entries and see if we find
646 		 * A request for the page we wish to update
647 		 */
648 		spin_lock(&nfsi->req_lock);
649 		req = _nfs_find_request(inode, page->index);
650 		if (req) {
651 			if (!nfs_lock_request_dontget(req)) {
652 				int error;
653 				spin_unlock(&nfsi->req_lock);
654 				error = nfs_wait_on_request(req);
655 				nfs_release_request(req);
656 				if (error < 0)
657 					return ERR_PTR(error);
658 				continue;
659 			}
660 			spin_unlock(&nfsi->req_lock);
661 			if (new)
662 				nfs_release_request(new);
663 			break;
664 		}
665 
666 		if (new) {
667 			int error;
668 			nfs_lock_request_dontget(new);
669 			error = nfs_inode_add_request(inode, new);
670 			if (error) {
671 				spin_unlock(&nfsi->req_lock);
672 				nfs_unlock_request(new);
673 				return ERR_PTR(error);
674 			}
675 			spin_unlock(&nfsi->req_lock);
676 			nfs_mark_request_dirty(new);
677 			return new;
678 		}
679 		spin_unlock(&nfsi->req_lock);
680 
681 		new = nfs_create_request(ctx, inode, page, offset, bytes);
682 		if (IS_ERR(new))
683 			return new;
684 	}
685 
686 	/* We have a request for our page.
687 	 * If the creds don't match, or the
688 	 * page addresses don't match,
689 	 * tell the caller to wait on the conflicting
690 	 * request.
691 	 */
692 	rqend = req->wb_offset + req->wb_bytes;
693 	if (req->wb_context != ctx
694 	    || req->wb_page != page
695 	    || !nfs_dirty_request(req)
696 	    || offset > rqend || end < req->wb_offset) {
697 		nfs_unlock_request(req);
698 		return ERR_PTR(-EBUSY);
699 	}
700 
701 	/* Okay, the request matches. Update the region */
702 	if (offset < req->wb_offset) {
703 		req->wb_offset = offset;
704 		req->wb_pgbase = offset;
705 		req->wb_bytes = rqend - req->wb_offset;
706 	}
707 
708 	if (end > rqend)
709 		req->wb_bytes = end - req->wb_offset;
710 
711 	return req;
712 }
713 
714 int nfs_flush_incompatible(struct file *file, struct page *page)
715 {
716 	struct nfs_open_context *ctx = (struct nfs_open_context *)file->private_data;
717 	struct inode	*inode = page->mapping->host;
718 	struct nfs_page	*req;
719 	int		status = 0;
720 	/*
721 	 * Look for a request corresponding to this page. If there
722 	 * is one, and it belongs to another file, we flush it out
723 	 * before we try to copy anything into the page. Do this
724 	 * due to the lack of an ACCESS-type call in NFSv2.
725 	 * Also do the same if we find a request from an existing
726 	 * dropped page.
727 	 */
728 	req = nfs_find_request(inode, page->index);
729 	if (req) {
730 		if (req->wb_page != page || ctx != req->wb_context)
731 			status = nfs_wb_page(inode, page);
732 		nfs_release_request(req);
733 	}
734 	return (status < 0) ? status : 0;
735 }
736 
737 /*
738  * Update and possibly write a cached page of an NFS file.
739  *
740  * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad
741  * things with a page scheduled for an RPC call (e.g. invalidate it).
742  */
743 int nfs_updatepage(struct file *file, struct page *page,
744 		unsigned int offset, unsigned int count)
745 {
746 	struct nfs_open_context *ctx = (struct nfs_open_context *)file->private_data;
747 	struct inode	*inode = page->mapping->host;
748 	struct nfs_page	*req;
749 	int		status = 0;
750 
751 	dprintk("NFS:      nfs_updatepage(%s/%s %d@%Ld)\n",
752 		file->f_dentry->d_parent->d_name.name,
753 		file->f_dentry->d_name.name, count,
754 		(long long)(page_offset(page) +offset));
755 
756 	if (IS_SYNC(inode)) {
757 		status = nfs_writepage_sync(ctx, inode, page, offset, count, 0);
758 		if (status > 0) {
759 			if (offset == 0 && status == PAGE_CACHE_SIZE)
760 				SetPageUptodate(page);
761 			return 0;
762 		}
763 		return status;
764 	}
765 
766 	/* If we're not using byte range locks, and we know the page
767 	 * is entirely in cache, it may be more efficient to avoid
768 	 * fragmenting write requests.
769 	 */
770 	if (PageUptodate(page) && inode->i_flock == NULL && !(file->f_mode & O_SYNC)) {
771 		loff_t end_offs = i_size_read(inode) - 1;
772 		unsigned long end_index = end_offs >> PAGE_CACHE_SHIFT;
773 
774 		count += offset;
775 		offset = 0;
776 		if (unlikely(end_offs < 0)) {
777 			/* Do nothing */
778 		} else if (page->index == end_index) {
779 			unsigned int pglen;
780 			pglen = (unsigned int)(end_offs & (PAGE_CACHE_SIZE-1)) + 1;
781 			if (count < pglen)
782 				count = pglen;
783 		} else if (page->index < end_index)
784 			count = PAGE_CACHE_SIZE;
785 	}
786 
787 	/*
788 	 * Try to find an NFS request corresponding to this page
789 	 * and update it.
790 	 * If the existing request cannot be updated, we must flush
791 	 * it out now.
792 	 */
793 	do {
794 		req = nfs_update_request(ctx, inode, page, offset, count);
795 		status = (IS_ERR(req)) ? PTR_ERR(req) : 0;
796 		if (status != -EBUSY)
797 			break;
798 		/* Request could not be updated. Flush it out and try again */
799 		status = nfs_wb_page(inode, page);
800 	} while (status >= 0);
801 	if (status < 0)
802 		goto done;
803 
804 	status = 0;
805 
806 	/* Update file length */
807 	nfs_grow_file(page, offset, count);
808 	/* Set the PG_uptodate flag? */
809 	nfs_mark_uptodate(page, req->wb_pgbase, req->wb_bytes);
810 	nfs_unlock_request(req);
811 done:
812         dprintk("NFS:      nfs_updatepage returns %d (isize %Ld)\n",
813 			status, (long long)i_size_read(inode));
814 	if (status < 0)
815 		ClearPageUptodate(page);
816 	return status;
817 }
818 
819 static void nfs_writepage_release(struct nfs_page *req)
820 {
821 	end_page_writeback(req->wb_page);
822 
823 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
824 	if (!PageError(req->wb_page)) {
825 		if (NFS_NEED_RESCHED(req)) {
826 			nfs_mark_request_dirty(req);
827 			goto out;
828 		} else if (NFS_NEED_COMMIT(req)) {
829 			nfs_mark_request_commit(req);
830 			goto out;
831 		}
832 	}
833 	nfs_inode_remove_request(req);
834 
835 out:
836 	nfs_clear_commit(req);
837 	nfs_clear_reschedule(req);
838 #else
839 	nfs_inode_remove_request(req);
840 #endif
841 	nfs_clear_page_writeback(req);
842 }
843 
844 static inline int flush_task_priority(int how)
845 {
846 	switch (how & (FLUSH_HIGHPRI|FLUSH_LOWPRI)) {
847 		case FLUSH_HIGHPRI:
848 			return RPC_PRIORITY_HIGH;
849 		case FLUSH_LOWPRI:
850 			return RPC_PRIORITY_LOW;
851 	}
852 	return RPC_PRIORITY_NORMAL;
853 }
854 
855 /*
856  * Set up the argument/result storage required for the RPC call.
857  */
858 static void nfs_write_rpcsetup(struct nfs_page *req,
859 		struct nfs_write_data *data,
860 		unsigned int count, unsigned int offset,
861 		int how)
862 {
863 	struct inode		*inode;
864 
865 	/* Set up the RPC argument and reply structs
866 	 * NB: take care not to mess about with data->commit et al. */
867 
868 	data->req = req;
869 	data->inode = inode = req->wb_context->dentry->d_inode;
870 	data->cred = req->wb_context->cred;
871 
872 	data->args.fh     = NFS_FH(inode);
873 	data->args.offset = req_offset(req) + offset;
874 	data->args.pgbase = req->wb_pgbase + offset;
875 	data->args.pages  = data->pagevec;
876 	data->args.count  = count;
877 	data->args.context = req->wb_context;
878 
879 	data->res.fattr   = &data->fattr;
880 	data->res.count   = count;
881 	data->res.verf    = &data->verf;
882 	nfs_fattr_init(&data->fattr);
883 
884 	NFS_PROTO(inode)->write_setup(data, how);
885 
886 	data->task.tk_priority = flush_task_priority(how);
887 	data->task.tk_cookie = (unsigned long)inode;
888 
889 	dprintk("NFS: %4d initiated write call (req %s/%Ld, %u bytes @ offset %Lu)\n",
890 		data->task.tk_pid,
891 		inode->i_sb->s_id,
892 		(long long)NFS_FILEID(inode),
893 		count,
894 		(unsigned long long)data->args.offset);
895 }
896 
897 static void nfs_execute_write(struct nfs_write_data *data)
898 {
899 	struct rpc_clnt *clnt = NFS_CLIENT(data->inode);
900 	sigset_t oldset;
901 
902 	rpc_clnt_sigmask(clnt, &oldset);
903 	lock_kernel();
904 	rpc_execute(&data->task);
905 	unlock_kernel();
906 	rpc_clnt_sigunmask(clnt, &oldset);
907 }
908 
909 /*
910  * Generate multiple small requests to write out a single
911  * contiguous dirty area on one page.
912  */
913 static int nfs_flush_multi(struct list_head *head, struct inode *inode, int how)
914 {
915 	struct nfs_page *req = nfs_list_entry(head->next);
916 	struct page *page = req->wb_page;
917 	struct nfs_write_data *data;
918 	unsigned int wsize = NFS_SERVER(inode)->wsize;
919 	unsigned int nbytes, offset;
920 	int requests = 0;
921 	LIST_HEAD(list);
922 
923 	nfs_list_remove_request(req);
924 
925 	nbytes = req->wb_bytes;
926 	for (;;) {
927 		data = nfs_writedata_alloc(1);
928 		if (!data)
929 			goto out_bad;
930 		list_add(&data->pages, &list);
931 		requests++;
932 		if (nbytes <= wsize)
933 			break;
934 		nbytes -= wsize;
935 	}
936 	atomic_set(&req->wb_complete, requests);
937 
938 	ClearPageError(page);
939 	set_page_writeback(page);
940 	offset = 0;
941 	nbytes = req->wb_bytes;
942 	do {
943 		data = list_entry(list.next, struct nfs_write_data, pages);
944 		list_del_init(&data->pages);
945 
946 		data->pagevec[0] = page;
947 		data->complete = nfs_writeback_done_partial;
948 
949 		if (nbytes > wsize) {
950 			nfs_write_rpcsetup(req, data, wsize, offset, how);
951 			offset += wsize;
952 			nbytes -= wsize;
953 		} else {
954 			nfs_write_rpcsetup(req, data, nbytes, offset, how);
955 			nbytes = 0;
956 		}
957 		nfs_execute_write(data);
958 	} while (nbytes != 0);
959 
960 	return 0;
961 
962 out_bad:
963 	while (!list_empty(&list)) {
964 		data = list_entry(list.next, struct nfs_write_data, pages);
965 		list_del(&data->pages);
966 		nfs_writedata_free(data);
967 	}
968 	nfs_mark_request_dirty(req);
969 	nfs_clear_page_writeback(req);
970 	return -ENOMEM;
971 }
972 
973 /*
974  * Create an RPC task for the given write request and kick it.
975  * The page must have been locked by the caller.
976  *
977  * It may happen that the page we're passed is not marked dirty.
978  * This is the case if nfs_updatepage detects a conflicting request
979  * that has been written but not committed.
980  */
981 static int nfs_flush_one(struct list_head *head, struct inode *inode, int how)
982 {
983 	struct nfs_page		*req;
984 	struct page		**pages;
985 	struct nfs_write_data	*data;
986 	unsigned int		count;
987 
988 	if (NFS_SERVER(inode)->wsize < PAGE_CACHE_SIZE)
989 		return nfs_flush_multi(head, inode, how);
990 
991 	data = nfs_writedata_alloc(NFS_SERVER(inode)->wpages);
992 	if (!data)
993 		goto out_bad;
994 
995 	pages = data->pagevec;
996 	count = 0;
997 	while (!list_empty(head)) {
998 		req = nfs_list_entry(head->next);
999 		nfs_list_remove_request(req);
1000 		nfs_list_add_request(req, &data->pages);
1001 		ClearPageError(req->wb_page);
1002 		set_page_writeback(req->wb_page);
1003 		*pages++ = req->wb_page;
1004 		count += req->wb_bytes;
1005 	}
1006 	req = nfs_list_entry(data->pages.next);
1007 
1008 	data->complete = nfs_writeback_done_full;
1009 	/* Set up the argument struct */
1010 	nfs_write_rpcsetup(req, data, count, 0, how);
1011 
1012 	nfs_execute_write(data);
1013 	return 0;
1014  out_bad:
1015 	while (!list_empty(head)) {
1016 		struct nfs_page *req = nfs_list_entry(head->next);
1017 		nfs_list_remove_request(req);
1018 		nfs_mark_request_dirty(req);
1019 		nfs_clear_page_writeback(req);
1020 	}
1021 	return -ENOMEM;
1022 }
1023 
1024 static int
1025 nfs_flush_list(struct list_head *head, int wpages, int how)
1026 {
1027 	LIST_HEAD(one_request);
1028 	struct nfs_page		*req;
1029 	int			error = 0;
1030 	unsigned int		pages = 0;
1031 
1032 	while (!list_empty(head)) {
1033 		pages += nfs_coalesce_requests(head, &one_request, wpages);
1034 		req = nfs_list_entry(one_request.next);
1035 		error = nfs_flush_one(&one_request, req->wb_context->dentry->d_inode, how);
1036 		if (error < 0)
1037 			break;
1038 	}
1039 	if (error >= 0)
1040 		return pages;
1041 
1042 	while (!list_empty(head)) {
1043 		req = nfs_list_entry(head->next);
1044 		nfs_list_remove_request(req);
1045 		nfs_mark_request_dirty(req);
1046 		nfs_clear_page_writeback(req);
1047 	}
1048 	return error;
1049 }
1050 
1051 /*
1052  * Handle a write reply that flushed part of a page.
1053  */
1054 static void nfs_writeback_done_partial(struct nfs_write_data *data, int status)
1055 {
1056 	struct nfs_page		*req = data->req;
1057 	struct page		*page = req->wb_page;
1058 
1059 	dprintk("NFS: write (%s/%Ld %d@%Ld)",
1060 		req->wb_context->dentry->d_inode->i_sb->s_id,
1061 		(long long)NFS_FILEID(req->wb_context->dentry->d_inode),
1062 		req->wb_bytes,
1063 		(long long)req_offset(req));
1064 
1065 	if (status < 0) {
1066 		ClearPageUptodate(page);
1067 		SetPageError(page);
1068 		req->wb_context->error = status;
1069 		dprintk(", error = %d\n", status);
1070 	} else {
1071 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1072 		if (data->verf.committed < NFS_FILE_SYNC) {
1073 			if (!NFS_NEED_COMMIT(req)) {
1074 				nfs_defer_commit(req);
1075 				memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
1076 				dprintk(" defer commit\n");
1077 			} else if (memcmp(&req->wb_verf, &data->verf, sizeof(req->wb_verf))) {
1078 				nfs_defer_reschedule(req);
1079 				dprintk(" server reboot detected\n");
1080 			}
1081 		} else
1082 #endif
1083 			dprintk(" OK\n");
1084 	}
1085 
1086 	if (atomic_dec_and_test(&req->wb_complete))
1087 		nfs_writepage_release(req);
1088 }
1089 
1090 /*
1091  * Handle a write reply that flushes a whole page.
1092  *
1093  * FIXME: There is an inherent race with invalidate_inode_pages and
1094  *	  writebacks since the page->count is kept > 1 for as long
1095  *	  as the page has a write request pending.
1096  */
1097 static void nfs_writeback_done_full(struct nfs_write_data *data, int status)
1098 {
1099 	struct nfs_page		*req;
1100 	struct page		*page;
1101 
1102 	/* Update attributes as result of writeback. */
1103 	while (!list_empty(&data->pages)) {
1104 		req = nfs_list_entry(data->pages.next);
1105 		nfs_list_remove_request(req);
1106 		page = req->wb_page;
1107 
1108 		dprintk("NFS: write (%s/%Ld %d@%Ld)",
1109 			req->wb_context->dentry->d_inode->i_sb->s_id,
1110 			(long long)NFS_FILEID(req->wb_context->dentry->d_inode),
1111 			req->wb_bytes,
1112 			(long long)req_offset(req));
1113 
1114 		if (status < 0) {
1115 			ClearPageUptodate(page);
1116 			SetPageError(page);
1117 			req->wb_context->error = status;
1118 			end_page_writeback(page);
1119 			nfs_inode_remove_request(req);
1120 			dprintk(", error = %d\n", status);
1121 			goto next;
1122 		}
1123 		end_page_writeback(page);
1124 
1125 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1126 		if (data->args.stable != NFS_UNSTABLE || data->verf.committed == NFS_FILE_SYNC) {
1127 			nfs_inode_remove_request(req);
1128 			dprintk(" OK\n");
1129 			goto next;
1130 		}
1131 		memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
1132 		nfs_mark_request_commit(req);
1133 		dprintk(" marked for commit\n");
1134 #else
1135 		nfs_inode_remove_request(req);
1136 #endif
1137 	next:
1138 		nfs_clear_page_writeback(req);
1139 	}
1140 }
1141 
1142 /*
1143  * This function is called when the WRITE call is complete.
1144  */
1145 void nfs_writeback_done(struct rpc_task *task, void *calldata)
1146 {
1147 	struct nfs_write_data	*data = calldata;
1148 	struct nfs_writeargs	*argp = &data->args;
1149 	struct nfs_writeres	*resp = &data->res;
1150 
1151 	dprintk("NFS: %4d nfs_writeback_done (status %d)\n",
1152 		task->tk_pid, task->tk_status);
1153 
1154 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1155 	if (resp->verf->committed < argp->stable && task->tk_status >= 0) {
1156 		/* We tried a write call, but the server did not
1157 		 * commit data to stable storage even though we
1158 		 * requested it.
1159 		 * Note: There is a known bug in Tru64 < 5.0 in which
1160 		 *	 the server reports NFS_DATA_SYNC, but performs
1161 		 *	 NFS_FILE_SYNC. We therefore implement this checking
1162 		 *	 as a dprintk() in order to avoid filling syslog.
1163 		 */
1164 		static unsigned long    complain;
1165 
1166 		if (time_before(complain, jiffies)) {
1167 			dprintk("NFS: faulty NFS server %s:"
1168 				" (committed = %d) != (stable = %d)\n",
1169 				NFS_SERVER(data->inode)->hostname,
1170 				resp->verf->committed, argp->stable);
1171 			complain = jiffies + 300 * HZ;
1172 		}
1173 	}
1174 #endif
1175 	/* Is this a short write? */
1176 	if (task->tk_status >= 0 && resp->count < argp->count) {
1177 		static unsigned long    complain;
1178 
1179 		/* Has the server at least made some progress? */
1180 		if (resp->count != 0) {
1181 			/* Was this an NFSv2 write or an NFSv3 stable write? */
1182 			if (resp->verf->committed != NFS_UNSTABLE) {
1183 				/* Resend from where the server left off */
1184 				argp->offset += resp->count;
1185 				argp->pgbase += resp->count;
1186 				argp->count -= resp->count;
1187 			} else {
1188 				/* Resend as a stable write in order to avoid
1189 				 * headaches in the case of a server crash.
1190 				 */
1191 				argp->stable = NFS_FILE_SYNC;
1192 			}
1193 			rpc_restart_call(task);
1194 			return;
1195 		}
1196 		if (time_before(complain, jiffies)) {
1197 			printk(KERN_WARNING
1198 			       "NFS: Server wrote zero bytes, expected %u.\n",
1199 					argp->count);
1200 			complain = jiffies + 300 * HZ;
1201 		}
1202 		/* Can't do anything about it except throw an error. */
1203 		task->tk_status = -EIO;
1204 	}
1205 
1206 	/*
1207 	 * Process the nfs_page list
1208 	 */
1209 	data->complete(data, task->tk_status);
1210 }
1211 
1212 
1213 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1214 void nfs_commit_release(void *wdata)
1215 {
1216 	nfs_commit_free(wdata);
1217 }
1218 
1219 /*
1220  * Set up the argument/result storage required for the RPC call.
1221  */
1222 static void nfs_commit_rpcsetup(struct list_head *head,
1223 		struct nfs_write_data *data, int how)
1224 {
1225 	struct nfs_page		*first;
1226 	struct inode		*inode;
1227 
1228 	/* Set up the RPC argument and reply structs
1229 	 * NB: take care not to mess about with data->commit et al. */
1230 
1231 	list_splice_init(head, &data->pages);
1232 	first = nfs_list_entry(data->pages.next);
1233 	inode = first->wb_context->dentry->d_inode;
1234 
1235 	data->inode	  = inode;
1236 	data->cred	  = first->wb_context->cred;
1237 
1238 	data->args.fh     = NFS_FH(data->inode);
1239 	/* Note: we always request a commit of the entire inode */
1240 	data->args.offset = 0;
1241 	data->args.count  = 0;
1242 	data->res.count   = 0;
1243 	data->res.fattr   = &data->fattr;
1244 	data->res.verf    = &data->verf;
1245 	nfs_fattr_init(&data->fattr);
1246 
1247 	NFS_PROTO(inode)->commit_setup(data, how);
1248 
1249 	data->task.tk_priority = flush_task_priority(how);
1250 	data->task.tk_cookie = (unsigned long)inode;
1251 
1252 	dprintk("NFS: %4d initiated commit call\n", data->task.tk_pid);
1253 }
1254 
1255 /*
1256  * Commit dirty pages
1257  */
1258 static int
1259 nfs_commit_list(struct inode *inode, struct list_head *head, int how)
1260 {
1261 	struct nfs_write_data	*data;
1262 	struct nfs_page         *req;
1263 
1264 	data = nfs_commit_alloc(NFS_SERVER(inode)->wpages);
1265 
1266 	if (!data)
1267 		goto out_bad;
1268 
1269 	/* Set up the argument struct */
1270 	nfs_commit_rpcsetup(head, data, how);
1271 
1272 	nfs_execute_write(data);
1273 	return 0;
1274  out_bad:
1275 	while (!list_empty(head)) {
1276 		req = nfs_list_entry(head->next);
1277 		nfs_list_remove_request(req);
1278 		nfs_mark_request_commit(req);
1279 		nfs_clear_page_writeback(req);
1280 	}
1281 	return -ENOMEM;
1282 }
1283 
1284 /*
1285  * COMMIT call returned
1286  */
1287 void nfs_commit_done(struct rpc_task *task, void *calldata)
1288 {
1289 	struct nfs_write_data	*data = calldata;
1290 	struct nfs_page		*req;
1291 	int res = 0;
1292 
1293         dprintk("NFS: %4d nfs_commit_done (status %d)\n",
1294                                 task->tk_pid, task->tk_status);
1295 
1296 	while (!list_empty(&data->pages)) {
1297 		req = nfs_list_entry(data->pages.next);
1298 		nfs_list_remove_request(req);
1299 
1300 		dprintk("NFS: commit (%s/%Ld %d@%Ld)",
1301 			req->wb_context->dentry->d_inode->i_sb->s_id,
1302 			(long long)NFS_FILEID(req->wb_context->dentry->d_inode),
1303 			req->wb_bytes,
1304 			(long long)req_offset(req));
1305 		if (task->tk_status < 0) {
1306 			req->wb_context->error = task->tk_status;
1307 			nfs_inode_remove_request(req);
1308 			dprintk(", error = %d\n", task->tk_status);
1309 			goto next;
1310 		}
1311 
1312 		/* Okay, COMMIT succeeded, apparently. Check the verifier
1313 		 * returned by the server against all stored verfs. */
1314 		if (!memcmp(req->wb_verf.verifier, data->verf.verifier, sizeof(data->verf.verifier))) {
1315 			/* We have a match */
1316 			nfs_inode_remove_request(req);
1317 			dprintk(" OK\n");
1318 			goto next;
1319 		}
1320 		/* We have a mismatch. Write the page again */
1321 		dprintk(" mismatch\n");
1322 		nfs_mark_request_dirty(req);
1323 	next:
1324 		nfs_clear_page_writeback(req);
1325 		res++;
1326 	}
1327 	sub_page_state(nr_unstable,res);
1328 }
1329 #endif
1330 
1331 static int nfs_flush_inode(struct inode *inode, unsigned long idx_start,
1332 			   unsigned int npages, int how)
1333 {
1334 	struct nfs_inode *nfsi = NFS_I(inode);
1335 	LIST_HEAD(head);
1336 	int			res,
1337 				error = 0;
1338 
1339 	spin_lock(&nfsi->req_lock);
1340 	res = nfs_scan_dirty(inode, &head, idx_start, npages);
1341 	spin_unlock(&nfsi->req_lock);
1342 	if (res) {
1343 		struct nfs_server *server = NFS_SERVER(inode);
1344 
1345 		/* For single writes, FLUSH_STABLE is more efficient */
1346 		if (res == nfsi->npages && nfsi->npages <= server->wpages) {
1347 			if (res > 1 || nfs_list_entry(head.next)->wb_bytes <= server->wsize)
1348 				how |= FLUSH_STABLE;
1349 		}
1350 		error = nfs_flush_list(&head, server->wpages, how);
1351 	}
1352 	if (error < 0)
1353 		return error;
1354 	return res;
1355 }
1356 
1357 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1358 int nfs_commit_inode(struct inode *inode, int how)
1359 {
1360 	struct nfs_inode *nfsi = NFS_I(inode);
1361 	LIST_HEAD(head);
1362 	int			res,
1363 				error = 0;
1364 
1365 	spin_lock(&nfsi->req_lock);
1366 	res = nfs_scan_commit(inode, &head, 0, 0);
1367 	spin_unlock(&nfsi->req_lock);
1368 	if (res) {
1369 		error = nfs_commit_list(inode, &head, how);
1370 		if (error < 0)
1371 			return error;
1372 	}
1373 	return res;
1374 }
1375 #endif
1376 
1377 int nfs_sync_inode(struct inode *inode, unsigned long idx_start,
1378 		  unsigned int npages, int how)
1379 {
1380 	int nocommit = how & FLUSH_NOCOMMIT;
1381 	int wait = how & FLUSH_WAIT;
1382 	int error;
1383 
1384 	how &= ~(FLUSH_WAIT|FLUSH_NOCOMMIT);
1385 
1386 	do {
1387 		if (wait) {
1388 			error = nfs_wait_on_requests(inode, idx_start, npages);
1389 			if (error != 0)
1390 				continue;
1391 		}
1392 		error = nfs_flush_inode(inode, idx_start, npages, how);
1393 		if (error != 0)
1394 			continue;
1395 		if (!nocommit)
1396 			error = nfs_commit_inode(inode, how);
1397 	} while (error > 0);
1398 	return error;
1399 }
1400 
1401 int nfs_init_writepagecache(void)
1402 {
1403 	nfs_wdata_cachep = kmem_cache_create("nfs_write_data",
1404 					     sizeof(struct nfs_write_data),
1405 					     0, SLAB_HWCACHE_ALIGN,
1406 					     NULL, NULL);
1407 	if (nfs_wdata_cachep == NULL)
1408 		return -ENOMEM;
1409 
1410 	nfs_wdata_mempool = mempool_create(MIN_POOL_WRITE,
1411 					   mempool_alloc_slab,
1412 					   mempool_free_slab,
1413 					   nfs_wdata_cachep);
1414 	if (nfs_wdata_mempool == NULL)
1415 		return -ENOMEM;
1416 
1417 	nfs_commit_mempool = mempool_create(MIN_POOL_COMMIT,
1418 					   mempool_alloc_slab,
1419 					   mempool_free_slab,
1420 					   nfs_wdata_cachep);
1421 	if (nfs_commit_mempool == NULL)
1422 		return -ENOMEM;
1423 
1424 	return 0;
1425 }
1426 
1427 void nfs_destroy_writepagecache(void)
1428 {
1429 	mempool_destroy(nfs_commit_mempool);
1430 	mempool_destroy(nfs_wdata_mempool);
1431 	if (kmem_cache_destroy(nfs_wdata_cachep))
1432 		printk(KERN_INFO "nfs_write_data: not all structures were freed\n");
1433 }
1434 
1435