xref: /openbmc/linux/fs/nfs/read.c (revision fdd1e74c89fe39259a29c494209abad63ff76f82)
1 /*
2  * linux/fs/nfs/read.c
3  *
4  * Block I/O for NFS
5  *
6  * Partial copy of Linus' read cache modifications to fs/nfs/file.c
7  * modified for async RPC by okir@monad.swb.de
8  */
9 
10 #include <linux/time.h>
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/fcntl.h>
14 #include <linux/stat.h>
15 #include <linux/mm.h>
16 #include <linux/slab.h>
17 #include <linux/pagemap.h>
18 #include <linux/sunrpc/clnt.h>
19 #include <linux/nfs_fs.h>
20 #include <linux/nfs_page.h>
21 #include <linux/smp_lock.h>
22 
23 #include <asm/system.h>
24 
25 #include "internal.h"
26 #include "iostat.h"
27 
28 #define NFSDBG_FACILITY		NFSDBG_PAGECACHE
29 
30 static int nfs_pagein_multi(struct inode *, struct list_head *, unsigned int, size_t, int);
31 static int nfs_pagein_one(struct inode *, struct list_head *, unsigned int, size_t, int);
32 static const struct rpc_call_ops nfs_read_partial_ops;
33 static const struct rpc_call_ops nfs_read_full_ops;
34 
35 static struct kmem_cache *nfs_rdata_cachep;
36 static mempool_t *nfs_rdata_mempool;
37 
38 #define MIN_POOL_READ	(32)
39 
40 struct nfs_read_data *nfs_readdata_alloc(unsigned int pagecount)
41 {
42 	struct nfs_read_data *p = mempool_alloc(nfs_rdata_mempool, GFP_NOFS);
43 
44 	if (p) {
45 		memset(p, 0, sizeof(*p));
46 		INIT_LIST_HEAD(&p->pages);
47 		p->npages = pagecount;
48 		if (pagecount <= ARRAY_SIZE(p->page_array))
49 			p->pagevec = p->page_array;
50 		else {
51 			p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_NOFS);
52 			if (!p->pagevec) {
53 				mempool_free(p, nfs_rdata_mempool);
54 				p = NULL;
55 			}
56 		}
57 	}
58 	return p;
59 }
60 
61 static void nfs_readdata_free(struct nfs_read_data *p)
62 {
63 	if (p && (p->pagevec != &p->page_array[0]))
64 		kfree(p->pagevec);
65 	mempool_free(p, nfs_rdata_mempool);
66 }
67 
68 void nfs_readdata_release(void *data)
69 {
70 	struct nfs_read_data *rdata = data;
71 
72 	put_nfs_open_context(rdata->args.context);
73 	nfs_readdata_free(rdata);
74 }
75 
76 static
77 int nfs_return_empty_page(struct page *page)
78 {
79 	zero_user(page, 0, PAGE_CACHE_SIZE);
80 	SetPageUptodate(page);
81 	unlock_page(page);
82 	return 0;
83 }
84 
85 static void nfs_readpage_truncate_uninitialised_page(struct nfs_read_data *data)
86 {
87 	unsigned int remainder = data->args.count - data->res.count;
88 	unsigned int base = data->args.pgbase + data->res.count;
89 	unsigned int pglen;
90 	struct page **pages;
91 
92 	if (data->res.eof == 0 || remainder == 0)
93 		return;
94 	/*
95 	 * Note: "remainder" can never be negative, since we check for
96 	 * 	this in the XDR code.
97 	 */
98 	pages = &data->args.pages[base >> PAGE_CACHE_SHIFT];
99 	base &= ~PAGE_CACHE_MASK;
100 	pglen = PAGE_CACHE_SIZE - base;
101 	for (;;) {
102 		if (remainder <= pglen) {
103 			zero_user(*pages, base, remainder);
104 			break;
105 		}
106 		zero_user(*pages, base, pglen);
107 		pages++;
108 		remainder -= pglen;
109 		pglen = PAGE_CACHE_SIZE;
110 		base = 0;
111 	}
112 }
113 
114 static int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
115 		struct page *page)
116 {
117 	LIST_HEAD(one_request);
118 	struct nfs_page	*new;
119 	unsigned int len;
120 
121 	len = nfs_page_length(page);
122 	if (len == 0)
123 		return nfs_return_empty_page(page);
124 	new = nfs_create_request(ctx, inode, page, 0, len);
125 	if (IS_ERR(new)) {
126 		unlock_page(page);
127 		return PTR_ERR(new);
128 	}
129 	if (len < PAGE_CACHE_SIZE)
130 		zero_user_segment(page, len, PAGE_CACHE_SIZE);
131 
132 	nfs_list_add_request(new, &one_request);
133 	if (NFS_SERVER(inode)->rsize < PAGE_CACHE_SIZE)
134 		nfs_pagein_multi(inode, &one_request, 1, len, 0);
135 	else
136 		nfs_pagein_one(inode, &one_request, 1, len, 0);
137 	return 0;
138 }
139 
140 static void nfs_readpage_release(struct nfs_page *req)
141 {
142 	unlock_page(req->wb_page);
143 
144 	dprintk("NFS: read done (%s/%Ld %d@%Ld)\n",
145 			req->wb_context->path.dentry->d_inode->i_sb->s_id,
146 			(long long)NFS_FILEID(req->wb_context->path.dentry->d_inode),
147 			req->wb_bytes,
148 			(long long)req_offset(req));
149 	nfs_clear_request(req);
150 	nfs_release_request(req);
151 }
152 
153 /*
154  * Set up the NFS read request struct
155  */
156 static void nfs_read_rpcsetup(struct nfs_page *req, struct nfs_read_data *data,
157 		const struct rpc_call_ops *call_ops,
158 		unsigned int count, unsigned int offset)
159 {
160 	struct inode *inode = req->wb_context->path.dentry->d_inode;
161 	int swap_flags = IS_SWAPFILE(inode) ? NFS_RPC_SWAPFLAGS : 0;
162 	struct rpc_task *task;
163 	struct rpc_message msg = {
164 		.rpc_argp = &data->args,
165 		.rpc_resp = &data->res,
166 		.rpc_cred = req->wb_context->cred,
167 	};
168 	struct rpc_task_setup task_setup_data = {
169 		.task = &data->task,
170 		.rpc_client = NFS_CLIENT(inode),
171 		.rpc_message = &msg,
172 		.callback_ops = call_ops,
173 		.callback_data = data,
174 		.workqueue = nfsiod_workqueue,
175 		.flags = RPC_TASK_ASYNC | swap_flags,
176 	};
177 
178 	data->req	  = req;
179 	data->inode	  = inode;
180 	data->cred	  = msg.rpc_cred;
181 
182 	data->args.fh     = NFS_FH(inode);
183 	data->args.offset = req_offset(req) + offset;
184 	data->args.pgbase = req->wb_pgbase + offset;
185 	data->args.pages  = data->pagevec;
186 	data->args.count  = count;
187 	data->args.context = get_nfs_open_context(req->wb_context);
188 
189 	data->res.fattr   = &data->fattr;
190 	data->res.count   = count;
191 	data->res.eof     = 0;
192 	nfs_fattr_init(&data->fattr);
193 
194 	/* Set up the initial task struct. */
195 	NFS_PROTO(inode)->read_setup(data, &msg);
196 
197 	dprintk("NFS: %5u initiated read call (req %s/%Ld, %u bytes @ offset %Lu)\n",
198 			data->task.tk_pid,
199 			inode->i_sb->s_id,
200 			(long long)NFS_FILEID(inode),
201 			count,
202 			(unsigned long long)data->args.offset);
203 
204 	task = rpc_run_task(&task_setup_data);
205 	if (!IS_ERR(task))
206 		rpc_put_task(task);
207 }
208 
209 static void
210 nfs_async_read_error(struct list_head *head)
211 {
212 	struct nfs_page	*req;
213 
214 	while (!list_empty(head)) {
215 		req = nfs_list_entry(head->next);
216 		nfs_list_remove_request(req);
217 		SetPageError(req->wb_page);
218 		nfs_readpage_release(req);
219 	}
220 }
221 
222 /*
223  * Generate multiple requests to fill a single page.
224  *
225  * We optimize to reduce the number of read operations on the wire.  If we
226  * detect that we're reading a page, or an area of a page, that is past the
227  * end of file, we do not generate NFS read operations but just clear the
228  * parts of the page that would have come back zero from the server anyway.
229  *
230  * We rely on the cached value of i_size to make this determination; another
231  * client can fill pages on the server past our cached end-of-file, but we
232  * won't see the new data until our attribute cache is updated.  This is more
233  * or less conventional NFS client behavior.
234  */
235 static int nfs_pagein_multi(struct inode *inode, struct list_head *head, unsigned int npages, size_t count, int flags)
236 {
237 	struct nfs_page *req = nfs_list_entry(head->next);
238 	struct page *page = req->wb_page;
239 	struct nfs_read_data *data;
240 	size_t rsize = NFS_SERVER(inode)->rsize, nbytes;
241 	unsigned int offset;
242 	int requests = 0;
243 	LIST_HEAD(list);
244 
245 	nfs_list_remove_request(req);
246 
247 	nbytes = count;
248 	do {
249 		size_t len = min(nbytes,rsize);
250 
251 		data = nfs_readdata_alloc(1);
252 		if (!data)
253 			goto out_bad;
254 		list_add(&data->pages, &list);
255 		requests++;
256 		nbytes -= len;
257 	} while(nbytes != 0);
258 	atomic_set(&req->wb_complete, requests);
259 
260 	ClearPageError(page);
261 	offset = 0;
262 	nbytes = count;
263 	do {
264 		data = list_entry(list.next, struct nfs_read_data, pages);
265 		list_del_init(&data->pages);
266 
267 		data->pagevec[0] = page;
268 
269 		if (nbytes < rsize)
270 			rsize = nbytes;
271 		nfs_read_rpcsetup(req, data, &nfs_read_partial_ops,
272 				  rsize, offset);
273 		offset += rsize;
274 		nbytes -= rsize;
275 	} while (nbytes != 0);
276 
277 	return 0;
278 
279 out_bad:
280 	while (!list_empty(&list)) {
281 		data = list_entry(list.next, struct nfs_read_data, pages);
282 		list_del(&data->pages);
283 		nfs_readdata_free(data);
284 	}
285 	SetPageError(page);
286 	nfs_readpage_release(req);
287 	return -ENOMEM;
288 }
289 
290 static int nfs_pagein_one(struct inode *inode, struct list_head *head, unsigned int npages, size_t count, int flags)
291 {
292 	struct nfs_page		*req;
293 	struct page		**pages;
294 	struct nfs_read_data	*data;
295 
296 	data = nfs_readdata_alloc(npages);
297 	if (!data)
298 		goto out_bad;
299 
300 	pages = data->pagevec;
301 	while (!list_empty(head)) {
302 		req = nfs_list_entry(head->next);
303 		nfs_list_remove_request(req);
304 		nfs_list_add_request(req, &data->pages);
305 		ClearPageError(req->wb_page);
306 		*pages++ = req->wb_page;
307 	}
308 	req = nfs_list_entry(data->pages.next);
309 
310 	nfs_read_rpcsetup(req, data, &nfs_read_full_ops, count, 0);
311 	return 0;
312 out_bad:
313 	nfs_async_read_error(head);
314 	return -ENOMEM;
315 }
316 
317 /*
318  * This is the callback from RPC telling us whether a reply was
319  * received or some error occurred (timeout or socket shutdown).
320  */
321 int nfs_readpage_result(struct rpc_task *task, struct nfs_read_data *data)
322 {
323 	int status;
324 
325 	dprintk("NFS: %s: %5u, (status %d)\n", __FUNCTION__, task->tk_pid,
326 			task->tk_status);
327 
328 	status = NFS_PROTO(data->inode)->read_done(task, data);
329 	if (status != 0)
330 		return status;
331 
332 	nfs_add_stats(data->inode, NFSIOS_SERVERREADBYTES, data->res.count);
333 
334 	if (task->tk_status == -ESTALE) {
335 		set_bit(NFS_INO_STALE, &NFS_I(data->inode)->flags);
336 		nfs_mark_for_revalidate(data->inode);
337 	}
338 	return 0;
339 }
340 
341 static void nfs_readpage_retry(struct rpc_task *task, struct nfs_read_data *data)
342 {
343 	struct nfs_readargs *argp = &data->args;
344 	struct nfs_readres *resp = &data->res;
345 
346 	if (resp->eof || resp->count == argp->count)
347 		return;
348 
349 	/* This is a short read! */
350 	nfs_inc_stats(data->inode, NFSIOS_SHORTREAD);
351 	/* Has the server at least made some progress? */
352 	if (resp->count == 0)
353 		return;
354 
355 	/* Yes, so retry the read at the end of the data */
356 	argp->offset += resp->count;
357 	argp->pgbase += resp->count;
358 	argp->count -= resp->count;
359 	rpc_restart_call(task);
360 }
361 
362 /*
363  * Handle a read reply that fills part of a page.
364  */
365 static void nfs_readpage_result_partial(struct rpc_task *task, void *calldata)
366 {
367 	struct nfs_read_data *data = calldata;
368 
369 	if (nfs_readpage_result(task, data) != 0)
370 		return;
371 	if (task->tk_status < 0)
372 		return;
373 
374 	nfs_readpage_truncate_uninitialised_page(data);
375 	nfs_readpage_retry(task, data);
376 }
377 
378 static void nfs_readpage_release_partial(void *calldata)
379 {
380 	struct nfs_read_data *data = calldata;
381 	struct nfs_page *req = data->req;
382 	struct page *page = req->wb_page;
383 	int status = data->task.tk_status;
384 
385 	if (status < 0)
386 		SetPageError(page);
387 
388 	if (atomic_dec_and_test(&req->wb_complete)) {
389 		if (!PageError(page))
390 			SetPageUptodate(page);
391 		nfs_readpage_release(req);
392 	}
393 	nfs_readdata_release(calldata);
394 }
395 
396 static const struct rpc_call_ops nfs_read_partial_ops = {
397 	.rpc_call_done = nfs_readpage_result_partial,
398 	.rpc_release = nfs_readpage_release_partial,
399 };
400 
401 static void nfs_readpage_set_pages_uptodate(struct nfs_read_data *data)
402 {
403 	unsigned int count = data->res.count;
404 	unsigned int base = data->args.pgbase;
405 	struct page **pages;
406 
407 	if (data->res.eof)
408 		count = data->args.count;
409 	if (unlikely(count == 0))
410 		return;
411 	pages = &data->args.pages[base >> PAGE_CACHE_SHIFT];
412 	base &= ~PAGE_CACHE_MASK;
413 	count += base;
414 	for (;count >= PAGE_CACHE_SIZE; count -= PAGE_CACHE_SIZE, pages++)
415 		SetPageUptodate(*pages);
416 	if (count == 0)
417 		return;
418 	/* Was this a short read? */
419 	if (data->res.eof || data->res.count == data->args.count)
420 		SetPageUptodate(*pages);
421 }
422 
423 /*
424  * This is the callback from RPC telling us whether a reply was
425  * received or some error occurred (timeout or socket shutdown).
426  */
427 static void nfs_readpage_result_full(struct rpc_task *task, void *calldata)
428 {
429 	struct nfs_read_data *data = calldata;
430 
431 	if (nfs_readpage_result(task, data) != 0)
432 		return;
433 	if (task->tk_status < 0)
434 		return;
435 	/*
436 	 * Note: nfs_readpage_retry may change the values of
437 	 * data->args. In the multi-page case, we therefore need
438 	 * to ensure that we call nfs_readpage_set_pages_uptodate()
439 	 * first.
440 	 */
441 	nfs_readpage_truncate_uninitialised_page(data);
442 	nfs_readpage_set_pages_uptodate(data);
443 	nfs_readpage_retry(task, data);
444 }
445 
446 static void nfs_readpage_release_full(void *calldata)
447 {
448 	struct nfs_read_data *data = calldata;
449 
450 	while (!list_empty(&data->pages)) {
451 		struct nfs_page *req = nfs_list_entry(data->pages.next);
452 
453 		nfs_list_remove_request(req);
454 		nfs_readpage_release(req);
455 	}
456 	nfs_readdata_release(calldata);
457 }
458 
459 static const struct rpc_call_ops nfs_read_full_ops = {
460 	.rpc_call_done = nfs_readpage_result_full,
461 	.rpc_release = nfs_readpage_release_full,
462 };
463 
464 /*
465  * Read a page over NFS.
466  * We read the page synchronously in the following case:
467  *  -	The error flag is set for this page. This happens only when a
468  *	previous async read operation failed.
469  */
470 int nfs_readpage(struct file *file, struct page *page)
471 {
472 	struct nfs_open_context *ctx;
473 	struct inode *inode = page->mapping->host;
474 	int		error;
475 
476 	dprintk("NFS: nfs_readpage (%p %ld@%lu)\n",
477 		page, PAGE_CACHE_SIZE, page->index);
478 	nfs_inc_stats(inode, NFSIOS_VFSREADPAGE);
479 	nfs_add_stats(inode, NFSIOS_READPAGES, 1);
480 
481 	/*
482 	 * Try to flush any pending writes to the file..
483 	 *
484 	 * NOTE! Because we own the page lock, there cannot
485 	 * be any new pending writes generated at this point
486 	 * for this page (other pages can be written to).
487 	 */
488 	error = nfs_wb_page(inode, page);
489 	if (error)
490 		goto out_unlock;
491 	if (PageUptodate(page))
492 		goto out_unlock;
493 
494 	error = -ESTALE;
495 	if (NFS_STALE(inode))
496 		goto out_unlock;
497 
498 	if (file == NULL) {
499 		error = -EBADF;
500 		ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
501 		if (ctx == NULL)
502 			goto out_unlock;
503 	} else
504 		ctx = get_nfs_open_context(nfs_file_open_context(file));
505 
506 	error = nfs_readpage_async(ctx, inode, page);
507 
508 	put_nfs_open_context(ctx);
509 	return error;
510 out_unlock:
511 	unlock_page(page);
512 	return error;
513 }
514 
515 struct nfs_readdesc {
516 	struct nfs_pageio_descriptor *pgio;
517 	struct nfs_open_context *ctx;
518 };
519 
520 static int
521 readpage_async_filler(void *data, struct page *page)
522 {
523 	struct nfs_readdesc *desc = (struct nfs_readdesc *)data;
524 	struct inode *inode = page->mapping->host;
525 	struct nfs_page *new;
526 	unsigned int len;
527 	int error;
528 
529 	error = nfs_wb_page(inode, page);
530 	if (error)
531 		goto out_unlock;
532 	if (PageUptodate(page))
533 		goto out_unlock;
534 
535 	len = nfs_page_length(page);
536 	if (len == 0)
537 		return nfs_return_empty_page(page);
538 
539 	new = nfs_create_request(desc->ctx, inode, page, 0, len);
540 	if (IS_ERR(new))
541 		goto out_error;
542 
543 	if (len < PAGE_CACHE_SIZE)
544 		zero_user_segment(page, len, PAGE_CACHE_SIZE);
545 	if (!nfs_pageio_add_request(desc->pgio, new)) {
546 		error = desc->pgio->pg_error;
547 		goto out_unlock;
548 	}
549 	return 0;
550 out_error:
551 	error = PTR_ERR(new);
552 	SetPageError(page);
553 out_unlock:
554 	unlock_page(page);
555 	return error;
556 }
557 
558 int nfs_readpages(struct file *filp, struct address_space *mapping,
559 		struct list_head *pages, unsigned nr_pages)
560 {
561 	struct nfs_pageio_descriptor pgio;
562 	struct nfs_readdesc desc = {
563 		.pgio = &pgio,
564 	};
565 	struct inode *inode = mapping->host;
566 	struct nfs_server *server = NFS_SERVER(inode);
567 	size_t rsize = server->rsize;
568 	unsigned long npages;
569 	int ret = -ESTALE;
570 
571 	dprintk("NFS: nfs_readpages (%s/%Ld %d)\n",
572 			inode->i_sb->s_id,
573 			(long long)NFS_FILEID(inode),
574 			nr_pages);
575 	nfs_inc_stats(inode, NFSIOS_VFSREADPAGES);
576 
577 	if (NFS_STALE(inode))
578 		goto out;
579 
580 	if (filp == NULL) {
581 		desc.ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
582 		if (desc.ctx == NULL)
583 			return -EBADF;
584 	} else
585 		desc.ctx = get_nfs_open_context(nfs_file_open_context(filp));
586 	if (rsize < PAGE_CACHE_SIZE)
587 		nfs_pageio_init(&pgio, inode, nfs_pagein_multi, rsize, 0);
588 	else
589 		nfs_pageio_init(&pgio, inode, nfs_pagein_one, rsize, 0);
590 
591 	ret = read_cache_pages(mapping, pages, readpage_async_filler, &desc);
592 
593 	nfs_pageio_complete(&pgio);
594 	npages = (pgio.pg_bytes_written + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
595 	nfs_add_stats(inode, NFSIOS_READPAGES, npages);
596 	put_nfs_open_context(desc.ctx);
597 out:
598 	return ret;
599 }
600 
601 int __init nfs_init_readpagecache(void)
602 {
603 	nfs_rdata_cachep = kmem_cache_create("nfs_read_data",
604 					     sizeof(struct nfs_read_data),
605 					     0, SLAB_HWCACHE_ALIGN,
606 					     NULL);
607 	if (nfs_rdata_cachep == NULL)
608 		return -ENOMEM;
609 
610 	nfs_rdata_mempool = mempool_create_slab_pool(MIN_POOL_READ,
611 						     nfs_rdata_cachep);
612 	if (nfs_rdata_mempool == NULL)
613 		return -ENOMEM;
614 
615 	return 0;
616 }
617 
618 void nfs_destroy_readpagecache(void)
619 {
620 	mempool_destroy(nfs_rdata_mempool);
621 	kmem_cache_destroy(nfs_rdata_cachep);
622 }
623