xref: /openbmc/linux/fs/nfs/pagelist.c (revision 54cbac81)
1 /*
2  * linux/fs/nfs/pagelist.c
3  *
4  * A set of helper functions for managing NFS read and write requests.
5  * The main purpose of these routines is to provide support for the
6  * coalescing of several requests into a single RPC call.
7  *
8  * Copyright 2000, 2001 (c) Trond Myklebust <trond.myklebust@fys.uio.no>
9  *
10  */
11 
12 #include <linux/slab.h>
13 #include <linux/file.h>
14 #include <linux/sched.h>
15 #include <linux/sunrpc/clnt.h>
16 #include <linux/nfs.h>
17 #include <linux/nfs3.h>
18 #include <linux/nfs4.h>
19 #include <linux/nfs_page.h>
20 #include <linux/nfs_fs.h>
21 #include <linux/nfs_mount.h>
22 #include <linux/export.h>
23 
24 #include "internal.h"
25 #include "pnfs.h"
26 
27 static struct kmem_cache *nfs_page_cachep;
28 
29 bool nfs_pgarray_set(struct nfs_page_array *p, unsigned int pagecount)
30 {
31 	p->npages = pagecount;
32 	if (pagecount <= ARRAY_SIZE(p->page_array))
33 		p->pagevec = p->page_array;
34 	else {
35 		p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_KERNEL);
36 		if (!p->pagevec)
37 			p->npages = 0;
38 	}
39 	return p->pagevec != NULL;
40 }
41 
42 void nfs_pgheader_init(struct nfs_pageio_descriptor *desc,
43 		       struct nfs_pgio_header *hdr,
44 		       void (*release)(struct nfs_pgio_header *hdr))
45 {
46 	hdr->req = nfs_list_entry(desc->pg_list.next);
47 	hdr->inode = desc->pg_inode;
48 	hdr->cred = hdr->req->wb_context->cred;
49 	hdr->io_start = req_offset(hdr->req);
50 	hdr->good_bytes = desc->pg_count;
51 	hdr->dreq = desc->pg_dreq;
52 	hdr->layout_private = desc->pg_layout_private;
53 	hdr->release = release;
54 	hdr->completion_ops = desc->pg_completion_ops;
55 	if (hdr->completion_ops->init_hdr)
56 		hdr->completion_ops->init_hdr(hdr);
57 }
58 EXPORT_SYMBOL_GPL(nfs_pgheader_init);
59 
60 void nfs_set_pgio_error(struct nfs_pgio_header *hdr, int error, loff_t pos)
61 {
62 	spin_lock(&hdr->lock);
63 	if (pos < hdr->io_start + hdr->good_bytes) {
64 		set_bit(NFS_IOHDR_ERROR, &hdr->flags);
65 		clear_bit(NFS_IOHDR_EOF, &hdr->flags);
66 		hdr->good_bytes = pos - hdr->io_start;
67 		hdr->error = error;
68 	}
69 	spin_unlock(&hdr->lock);
70 }
71 
72 static inline struct nfs_page *
73 nfs_page_alloc(void)
74 {
75 	struct nfs_page	*p = kmem_cache_zalloc(nfs_page_cachep, GFP_NOIO);
76 	if (p)
77 		INIT_LIST_HEAD(&p->wb_list);
78 	return p;
79 }
80 
81 static inline void
82 nfs_page_free(struct nfs_page *p)
83 {
84 	kmem_cache_free(nfs_page_cachep, p);
85 }
86 
87 /**
88  * nfs_create_request - Create an NFS read/write request.
89  * @ctx: open context to use
90  * @inode: inode to which the request is attached
91  * @page: page to write
92  * @offset: starting offset within the page for the write
93  * @count: number of bytes to read/write
94  *
95  * The page must be locked by the caller. This makes sure we never
96  * create two different requests for the same page.
97  * User should ensure it is safe to sleep in this function.
98  */
99 struct nfs_page *
100 nfs_create_request(struct nfs_open_context *ctx, struct inode *inode,
101 		   struct page *page,
102 		   unsigned int offset, unsigned int count)
103 {
104 	struct nfs_page		*req;
105 	struct nfs_lock_context *l_ctx;
106 
107 	/* try to allocate the request struct */
108 	req = nfs_page_alloc();
109 	if (req == NULL)
110 		return ERR_PTR(-ENOMEM);
111 
112 	/* get lock context early so we can deal with alloc failures */
113 	l_ctx = nfs_get_lock_context(ctx);
114 	if (IS_ERR(l_ctx)) {
115 		nfs_page_free(req);
116 		return ERR_CAST(l_ctx);
117 	}
118 	req->wb_lock_context = l_ctx;
119 
120 	/* Initialize the request struct. Initially, we assume a
121 	 * long write-back delay. This will be adjusted in
122 	 * update_nfs_request below if the region is not locked. */
123 	req->wb_page    = page;
124 	req->wb_index	= page_file_index(page);
125 	page_cache_get(page);
126 	req->wb_offset  = offset;
127 	req->wb_pgbase	= offset;
128 	req->wb_bytes   = count;
129 	req->wb_context = get_nfs_open_context(ctx);
130 	kref_init(&req->wb_kref);
131 	return req;
132 }
133 
134 /**
135  * nfs_unlock_request - Unlock request and wake up sleepers.
136  * @req:
137  */
138 void nfs_unlock_request(struct nfs_page *req)
139 {
140 	if (!NFS_WBACK_BUSY(req)) {
141 		printk(KERN_ERR "NFS: Invalid unlock attempted\n");
142 		BUG();
143 	}
144 	smp_mb__before_clear_bit();
145 	clear_bit(PG_BUSY, &req->wb_flags);
146 	smp_mb__after_clear_bit();
147 	wake_up_bit(&req->wb_flags, PG_BUSY);
148 }
149 
150 /**
151  * nfs_unlock_and_release_request - Unlock request and release the nfs_page
152  * @req:
153  */
154 void nfs_unlock_and_release_request(struct nfs_page *req)
155 {
156 	nfs_unlock_request(req);
157 	nfs_release_request(req);
158 }
159 
160 /*
161  * nfs_clear_request - Free up all resources allocated to the request
162  * @req:
163  *
164  * Release page and open context resources associated with a read/write
165  * request after it has completed.
166  */
167 static void nfs_clear_request(struct nfs_page *req)
168 {
169 	struct page *page = req->wb_page;
170 	struct nfs_open_context *ctx = req->wb_context;
171 	struct nfs_lock_context *l_ctx = req->wb_lock_context;
172 
173 	if (page != NULL) {
174 		page_cache_release(page);
175 		req->wb_page = NULL;
176 	}
177 	if (l_ctx != NULL) {
178 		nfs_put_lock_context(l_ctx);
179 		req->wb_lock_context = NULL;
180 	}
181 	if (ctx != NULL) {
182 		put_nfs_open_context(ctx);
183 		req->wb_context = NULL;
184 	}
185 }
186 
187 
188 /**
189  * nfs_release_request - Release the count on an NFS read/write request
190  * @req: request to release
191  *
192  * Note: Should never be called with the spinlock held!
193  */
194 static void nfs_free_request(struct kref *kref)
195 {
196 	struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref);
197 
198 	/* Release struct file and open context */
199 	nfs_clear_request(req);
200 	nfs_page_free(req);
201 }
202 
203 void nfs_release_request(struct nfs_page *req)
204 {
205 	kref_put(&req->wb_kref, nfs_free_request);
206 }
207 
208 static int nfs_wait_bit_uninterruptible(void *word)
209 {
210 	io_schedule();
211 	return 0;
212 }
213 
214 /**
215  * nfs_wait_on_request - Wait for a request to complete.
216  * @req: request to wait upon.
217  *
218  * Interruptible by fatal signals only.
219  * The user is responsible for holding a count on the request.
220  */
221 int
222 nfs_wait_on_request(struct nfs_page *req)
223 {
224 	return wait_on_bit(&req->wb_flags, PG_BUSY,
225 			nfs_wait_bit_uninterruptible,
226 			TASK_UNINTERRUPTIBLE);
227 }
228 
229 bool nfs_generic_pg_test(struct nfs_pageio_descriptor *desc, struct nfs_page *prev, struct nfs_page *req)
230 {
231 	/*
232 	 * FIXME: ideally we should be able to coalesce all requests
233 	 * that are not block boundary aligned, but currently this
234 	 * is problematic for the case of bsize < PAGE_CACHE_SIZE,
235 	 * since nfs_flush_multi and nfs_pagein_multi assume you
236 	 * can have only one struct nfs_page.
237 	 */
238 	if (desc->pg_bsize < PAGE_SIZE)
239 		return 0;
240 
241 	return desc->pg_count + req->wb_bytes <= desc->pg_bsize;
242 }
243 EXPORT_SYMBOL_GPL(nfs_generic_pg_test);
244 
245 /**
246  * nfs_pageio_init - initialise a page io descriptor
247  * @desc: pointer to descriptor
248  * @inode: pointer to inode
249  * @doio: pointer to io function
250  * @bsize: io block size
251  * @io_flags: extra parameters for the io function
252  */
253 void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
254 		     struct inode *inode,
255 		     const struct nfs_pageio_ops *pg_ops,
256 		     const struct nfs_pgio_completion_ops *compl_ops,
257 		     size_t bsize,
258 		     int io_flags)
259 {
260 	INIT_LIST_HEAD(&desc->pg_list);
261 	desc->pg_bytes_written = 0;
262 	desc->pg_count = 0;
263 	desc->pg_bsize = bsize;
264 	desc->pg_base = 0;
265 	desc->pg_moreio = 0;
266 	desc->pg_recoalesce = 0;
267 	desc->pg_inode = inode;
268 	desc->pg_ops = pg_ops;
269 	desc->pg_completion_ops = compl_ops;
270 	desc->pg_ioflags = io_flags;
271 	desc->pg_error = 0;
272 	desc->pg_lseg = NULL;
273 	desc->pg_dreq = NULL;
274 	desc->pg_layout_private = NULL;
275 }
276 EXPORT_SYMBOL_GPL(nfs_pageio_init);
277 
278 /**
279  * nfs_can_coalesce_requests - test two requests for compatibility
280  * @prev: pointer to nfs_page
281  * @req: pointer to nfs_page
282  *
283  * The nfs_page structures 'prev' and 'req' are compared to ensure that the
284  * page data area they describe is contiguous, and that their RPC
285  * credentials, NFSv4 open state, and lockowners are the same.
286  *
287  * Return 'true' if this is the case, else return 'false'.
288  */
289 static bool nfs_can_coalesce_requests(struct nfs_page *prev,
290 				      struct nfs_page *req,
291 				      struct nfs_pageio_descriptor *pgio)
292 {
293 	if (req->wb_context->cred != prev->wb_context->cred)
294 		return false;
295 	if (req->wb_lock_context->lockowner.l_owner != prev->wb_lock_context->lockowner.l_owner)
296 		return false;
297 	if (req->wb_lock_context->lockowner.l_pid != prev->wb_lock_context->lockowner.l_pid)
298 		return false;
299 	if (req->wb_context->state != prev->wb_context->state)
300 		return false;
301 	if (req->wb_pgbase != 0)
302 		return false;
303 	if (prev->wb_pgbase + prev->wb_bytes != PAGE_CACHE_SIZE)
304 		return false;
305 	if (req_offset(req) != req_offset(prev) + prev->wb_bytes)
306 		return false;
307 	return pgio->pg_ops->pg_test(pgio, prev, req);
308 }
309 
310 /**
311  * nfs_pageio_do_add_request - Attempt to coalesce a request into a page list.
312  * @desc: destination io descriptor
313  * @req: request
314  *
315  * Returns true if the request 'req' was successfully coalesced into the
316  * existing list of pages 'desc'.
317  */
318 static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc,
319 				     struct nfs_page *req)
320 {
321 	if (desc->pg_count != 0) {
322 		struct nfs_page *prev;
323 
324 		prev = nfs_list_entry(desc->pg_list.prev);
325 		if (!nfs_can_coalesce_requests(prev, req, desc))
326 			return 0;
327 	} else {
328 		if (desc->pg_ops->pg_init)
329 			desc->pg_ops->pg_init(desc, req);
330 		desc->pg_base = req->wb_pgbase;
331 	}
332 	nfs_list_remove_request(req);
333 	nfs_list_add_request(req, &desc->pg_list);
334 	desc->pg_count += req->wb_bytes;
335 	return 1;
336 }
337 
338 /*
339  * Helper for nfs_pageio_add_request and nfs_pageio_complete
340  */
341 static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc)
342 {
343 	if (!list_empty(&desc->pg_list)) {
344 		int error = desc->pg_ops->pg_doio(desc);
345 		if (error < 0)
346 			desc->pg_error = error;
347 		else
348 			desc->pg_bytes_written += desc->pg_count;
349 	}
350 	if (list_empty(&desc->pg_list)) {
351 		desc->pg_count = 0;
352 		desc->pg_base = 0;
353 	}
354 }
355 
356 /**
357  * nfs_pageio_add_request - Attempt to coalesce a request into a page list.
358  * @desc: destination io descriptor
359  * @req: request
360  *
361  * Returns true if the request 'req' was successfully coalesced into the
362  * existing list of pages 'desc'.
363  */
364 static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
365 			   struct nfs_page *req)
366 {
367 	while (!nfs_pageio_do_add_request(desc, req)) {
368 		desc->pg_moreio = 1;
369 		nfs_pageio_doio(desc);
370 		if (desc->pg_error < 0)
371 			return 0;
372 		desc->pg_moreio = 0;
373 		if (desc->pg_recoalesce)
374 			return 0;
375 	}
376 	return 1;
377 }
378 
379 static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
380 {
381 	LIST_HEAD(head);
382 
383 	do {
384 		list_splice_init(&desc->pg_list, &head);
385 		desc->pg_bytes_written -= desc->pg_count;
386 		desc->pg_count = 0;
387 		desc->pg_base = 0;
388 		desc->pg_recoalesce = 0;
389 
390 		while (!list_empty(&head)) {
391 			struct nfs_page *req;
392 
393 			req = list_first_entry(&head, struct nfs_page, wb_list);
394 			nfs_list_remove_request(req);
395 			if (__nfs_pageio_add_request(desc, req))
396 				continue;
397 			if (desc->pg_error < 0)
398 				return 0;
399 			break;
400 		}
401 	} while (desc->pg_recoalesce);
402 	return 1;
403 }
404 
405 int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
406 		struct nfs_page *req)
407 {
408 	int ret;
409 
410 	do {
411 		ret = __nfs_pageio_add_request(desc, req);
412 		if (ret)
413 			break;
414 		if (desc->pg_error < 0)
415 			break;
416 		ret = nfs_do_recoalesce(desc);
417 	} while (ret);
418 	return ret;
419 }
420 EXPORT_SYMBOL_GPL(nfs_pageio_add_request);
421 
422 /**
423  * nfs_pageio_complete - Complete I/O on an nfs_pageio_descriptor
424  * @desc: pointer to io descriptor
425  */
426 void nfs_pageio_complete(struct nfs_pageio_descriptor *desc)
427 {
428 	for (;;) {
429 		nfs_pageio_doio(desc);
430 		if (!desc->pg_recoalesce)
431 			break;
432 		if (!nfs_do_recoalesce(desc))
433 			break;
434 	}
435 }
436 EXPORT_SYMBOL_GPL(nfs_pageio_complete);
437 
438 /**
439  * nfs_pageio_cond_complete - Conditional I/O completion
440  * @desc: pointer to io descriptor
441  * @index: page index
442  *
443  * It is important to ensure that processes don't try to take locks
444  * on non-contiguous ranges of pages as that might deadlock. This
445  * function should be called before attempting to wait on a locked
446  * nfs_page. It will complete the I/O if the page index 'index'
447  * is not contiguous with the existing list of pages in 'desc'.
448  */
449 void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index)
450 {
451 	if (!list_empty(&desc->pg_list)) {
452 		struct nfs_page *prev = nfs_list_entry(desc->pg_list.prev);
453 		if (index != prev->wb_index + 1)
454 			nfs_pageio_complete(desc);
455 	}
456 }
457 
458 int __init nfs_init_nfspagecache(void)
459 {
460 	nfs_page_cachep = kmem_cache_create("nfs_page",
461 					    sizeof(struct nfs_page),
462 					    0, SLAB_HWCACHE_ALIGN,
463 					    NULL);
464 	if (nfs_page_cachep == NULL)
465 		return -ENOMEM;
466 
467 	return 0;
468 }
469 
470 void nfs_destroy_nfspagecache(void)
471 {
472 	kmem_cache_destroy(nfs_page_cachep);
473 }
474 
475