1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/fs/nfs/read.c
4 *
5 * Block I/O for NFS
6 *
7 * Partial copy of Linus' read cache modifications to fs/nfs/file.c
8 * modified for async RPC by okir@monad.swb.de
9 */
10
11 #include <linux/time.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/fcntl.h>
15 #include <linux/stat.h>
16 #include <linux/mm.h>
17 #include <linux/slab.h>
18 #include <linux/task_io_accounting_ops.h>
19 #include <linux/pagemap.h>
20 #include <linux/sunrpc/clnt.h>
21 #include <linux/nfs_fs.h>
22 #include <linux/nfs_page.h>
23 #include <linux/module.h>
24
25 #include "nfs4_fs.h"
26 #include "internal.h"
27 #include "iostat.h"
28 #include "fscache.h"
29 #include "pnfs.h"
30 #include "nfstrace.h"
31
32 #define NFSDBG_FACILITY NFSDBG_PAGECACHE
33
34 const struct nfs_pgio_completion_ops nfs_async_read_completion_ops;
35 static const struct nfs_rw_ops nfs_rw_read_ops;
36
37 static struct kmem_cache *nfs_rdata_cachep;
38
nfs_readhdr_alloc(void)39 static struct nfs_pgio_header *nfs_readhdr_alloc(void)
40 {
41 struct nfs_pgio_header *p = kmem_cache_zalloc(nfs_rdata_cachep, GFP_KERNEL);
42
43 if (p)
44 p->rw_mode = FMODE_READ;
45 return p;
46 }
47
nfs_readhdr_free(struct nfs_pgio_header * rhdr)48 static void nfs_readhdr_free(struct nfs_pgio_header *rhdr)
49 {
50 if (rhdr->res.scratch != NULL)
51 kfree(rhdr->res.scratch);
52 kmem_cache_free(nfs_rdata_cachep, rhdr);
53 }
54
nfs_return_empty_folio(struct folio * folio)55 static int nfs_return_empty_folio(struct folio *folio)
56 {
57 folio_zero_segment(folio, 0, folio_size(folio));
58 folio_mark_uptodate(folio);
59 if (nfs_netfs_folio_unlock(folio))
60 folio_unlock(folio);
61 return 0;
62 }
63
nfs_pageio_init_read(struct nfs_pageio_descriptor * pgio,struct inode * inode,bool force_mds,const struct nfs_pgio_completion_ops * compl_ops)64 void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio,
65 struct inode *inode, bool force_mds,
66 const struct nfs_pgio_completion_ops *compl_ops)
67 {
68 struct nfs_server *server = NFS_SERVER(inode);
69 const struct nfs_pageio_ops *pg_ops = &nfs_pgio_rw_ops;
70
71 #ifdef CONFIG_NFS_V4_1
72 if (server->pnfs_curr_ld && !force_mds)
73 pg_ops = server->pnfs_curr_ld->pg_read_ops;
74 #endif
75 nfs_pageio_init(pgio, inode, pg_ops, compl_ops, &nfs_rw_read_ops,
76 server->rsize, 0);
77 }
78 EXPORT_SYMBOL_GPL(nfs_pageio_init_read);
79
nfs_pageio_complete_read(struct nfs_pageio_descriptor * pgio)80 void nfs_pageio_complete_read(struct nfs_pageio_descriptor *pgio)
81 {
82 struct nfs_pgio_mirror *pgm;
83 unsigned long npages;
84
85 nfs_pageio_complete(pgio);
86
87 /* It doesn't make sense to do mirrored reads! */
88 WARN_ON_ONCE(pgio->pg_mirror_count != 1);
89
90 pgm = &pgio->pg_mirrors[0];
91 NFS_I(pgio->pg_inode)->read_io += pgm->pg_bytes_written;
92 npages = (pgm->pg_bytes_written + PAGE_SIZE - 1) >> PAGE_SHIFT;
93 nfs_add_stats(pgio->pg_inode, NFSIOS_READPAGES, npages);
94 }
95
96
nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor * pgio)97 void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio)
98 {
99 struct nfs_pgio_mirror *mirror;
100
101 if (pgio->pg_ops && pgio->pg_ops->pg_cleanup)
102 pgio->pg_ops->pg_cleanup(pgio);
103
104 pgio->pg_ops = &nfs_pgio_rw_ops;
105
106 /* read path should never have more than one mirror */
107 WARN_ON_ONCE(pgio->pg_mirror_count != 1);
108
109 mirror = &pgio->pg_mirrors[0];
110 mirror->pg_bsize = NFS_SERVER(pgio->pg_inode)->rsize;
111 }
112 EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds);
113
nfs_read_alloc_scratch(struct nfs_pgio_header * hdr,size_t size)114 bool nfs_read_alloc_scratch(struct nfs_pgio_header *hdr, size_t size)
115 {
116 WARN_ON(hdr->res.scratch != NULL);
117 hdr->res.scratch = kmalloc(size, GFP_KERNEL);
118 return hdr->res.scratch != NULL;
119 }
120 EXPORT_SYMBOL_GPL(nfs_read_alloc_scratch);
121
nfs_readpage_release(struct nfs_page * req,int error)122 static void nfs_readpage_release(struct nfs_page *req, int error)
123 {
124 struct folio *folio = nfs_page_to_folio(req);
125
126 if (nfs_error_is_fatal_on_server(error) && error != -ETIMEDOUT)
127 folio_set_error(folio);
128 if (nfs_page_group_sync_on_bit(req, PG_UNLOCKPAGE))
129 if (nfs_netfs_folio_unlock(folio))
130 folio_unlock(folio);
131
132 nfs_release_request(req);
133 }
134
nfs_page_group_set_uptodate(struct nfs_page * req)135 static void nfs_page_group_set_uptodate(struct nfs_page *req)
136 {
137 if (nfs_page_group_sync_on_bit(req, PG_UPTODATE))
138 folio_mark_uptodate(nfs_page_to_folio(req));
139 }
140
nfs_read_completion(struct nfs_pgio_header * hdr)141 static void nfs_read_completion(struct nfs_pgio_header *hdr)
142 {
143 unsigned long bytes = 0;
144 int error;
145
146 if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
147 goto out;
148 while (!list_empty(&hdr->pages)) {
149 struct nfs_page *req = nfs_list_entry(hdr->pages.next);
150 struct folio *folio = nfs_page_to_folio(req);
151 unsigned long start = req->wb_pgbase;
152 unsigned long end = req->wb_pgbase + req->wb_bytes;
153
154 if (test_bit(NFS_IOHDR_EOF, &hdr->flags)) {
155 /* note: regions of the page not covered by a
156 * request are zeroed in nfs_read_add_folio
157 */
158 if (bytes > hdr->good_bytes) {
159 /* nothing in this request was good, so zero
160 * the full extent of the request */
161 folio_zero_segment(folio, start, end);
162
163 } else if (hdr->good_bytes - bytes < req->wb_bytes) {
164 /* part of this request has good bytes, but
165 * not all. zero the bad bytes */
166 start += hdr->good_bytes - bytes;
167 WARN_ON(start < req->wb_pgbase);
168 folio_zero_segment(folio, start, end);
169 }
170 }
171 error = 0;
172 bytes += req->wb_bytes;
173 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
174 if (bytes <= hdr->good_bytes)
175 nfs_page_group_set_uptodate(req);
176 else {
177 error = hdr->error;
178 xchg(&nfs_req_openctx(req)->error, error);
179 }
180 } else
181 nfs_page_group_set_uptodate(req);
182 nfs_list_remove_request(req);
183 nfs_readpage_release(req, error);
184 }
185 nfs_netfs_read_completion(hdr);
186
187 out:
188 hdr->release(hdr);
189 }
190
nfs_initiate_read(struct nfs_pgio_header * hdr,struct rpc_message * msg,const struct nfs_rpc_ops * rpc_ops,struct rpc_task_setup * task_setup_data,int how)191 static void nfs_initiate_read(struct nfs_pgio_header *hdr,
192 struct rpc_message *msg,
193 const struct nfs_rpc_ops *rpc_ops,
194 struct rpc_task_setup *task_setup_data, int how)
195 {
196 rpc_ops->read_setup(hdr, msg);
197 nfs_netfs_initiate_read(hdr);
198 trace_nfs_initiate_read(hdr);
199 }
200
201 static void
nfs_async_read_error(struct list_head * head,int error)202 nfs_async_read_error(struct list_head *head, int error)
203 {
204 struct nfs_page *req;
205
206 while (!list_empty(head)) {
207 req = nfs_list_entry(head->next);
208 nfs_list_remove_request(req);
209 nfs_readpage_release(req, error);
210 }
211 }
212
213 const struct nfs_pgio_completion_ops nfs_async_read_completion_ops = {
214 .error_cleanup = nfs_async_read_error,
215 .completion = nfs_read_completion,
216 };
217
218 /*
219 * This is the callback from RPC telling us whether a reply was
220 * received or some error occurred (timeout or socket shutdown).
221 */
nfs_readpage_done(struct rpc_task * task,struct nfs_pgio_header * hdr,struct inode * inode)222 static int nfs_readpage_done(struct rpc_task *task,
223 struct nfs_pgio_header *hdr,
224 struct inode *inode)
225 {
226 int status = NFS_PROTO(inode)->read_done(task, hdr);
227 if (status != 0)
228 return status;
229
230 nfs_add_stats(inode, NFSIOS_SERVERREADBYTES, hdr->res.count);
231 trace_nfs_readpage_done(task, hdr);
232
233 if (task->tk_status == -ESTALE) {
234 nfs_set_inode_stale(inode);
235 nfs_mark_for_revalidate(inode);
236 }
237 return 0;
238 }
239
nfs_readpage_retry(struct rpc_task * task,struct nfs_pgio_header * hdr)240 static void nfs_readpage_retry(struct rpc_task *task,
241 struct nfs_pgio_header *hdr)
242 {
243 struct nfs_pgio_args *argp = &hdr->args;
244 struct nfs_pgio_res *resp = &hdr->res;
245
246 /* This is a short read! */
247 nfs_inc_stats(hdr->inode, NFSIOS_SHORTREAD);
248 trace_nfs_readpage_short(task, hdr);
249
250 /* Has the server at least made some progress? */
251 if (resp->count == 0) {
252 nfs_set_pgio_error(hdr, -EIO, argp->offset);
253 return;
254 }
255
256 /* For non rpc-based layout drivers, retry-through-MDS */
257 if (!task->tk_ops) {
258 hdr->pnfs_error = -EAGAIN;
259 return;
260 }
261
262 /* Yes, so retry the read at the end of the hdr */
263 hdr->mds_offset += resp->count;
264 argp->offset += resp->count;
265 argp->pgbase += resp->count;
266 argp->count -= resp->count;
267 resp->count = 0;
268 resp->eof = 0;
269 rpc_restart_call_prepare(task);
270 }
271
nfs_readpage_result(struct rpc_task * task,struct nfs_pgio_header * hdr)272 static void nfs_readpage_result(struct rpc_task *task,
273 struct nfs_pgio_header *hdr)
274 {
275 if (hdr->res.eof) {
276 loff_t pos = hdr->args.offset + hdr->res.count;
277 unsigned int new = pos - hdr->io_start;
278
279 if (hdr->good_bytes > new) {
280 hdr->good_bytes = new;
281 set_bit(NFS_IOHDR_EOF, &hdr->flags);
282 clear_bit(NFS_IOHDR_ERROR, &hdr->flags);
283 }
284 } else if (hdr->res.count < hdr->args.count)
285 nfs_readpage_retry(task, hdr);
286 }
287
nfs_read_add_folio(struct nfs_pageio_descriptor * pgio,struct nfs_open_context * ctx,struct folio * folio)288 int nfs_read_add_folio(struct nfs_pageio_descriptor *pgio,
289 struct nfs_open_context *ctx,
290 struct folio *folio)
291 {
292 struct inode *inode = folio_file_mapping(folio)->host;
293 struct nfs_server *server = NFS_SERVER(inode);
294 size_t fsize = folio_size(folio);
295 unsigned int rsize = server->rsize;
296 struct nfs_page *new;
297 unsigned int len, aligned_len;
298 int error;
299
300 len = nfs_folio_length(folio);
301 if (len == 0)
302 return nfs_return_empty_folio(folio);
303
304 aligned_len = min_t(unsigned int, ALIGN(len, rsize), fsize);
305
306 new = nfs_page_create_from_folio(ctx, folio, 0, aligned_len);
307 if (IS_ERR(new)) {
308 error = PTR_ERR(new);
309 if (nfs_netfs_folio_unlock(folio))
310 folio_unlock(folio);
311 goto out;
312 }
313
314 if (len < fsize)
315 folio_zero_segment(folio, len, fsize);
316 if (!nfs_pageio_add_request(pgio, new)) {
317 nfs_list_remove_request(new);
318 error = pgio->pg_error;
319 nfs_readpage_release(new, error);
320 goto out;
321 }
322 return 0;
323 out:
324 return error;
325 }
326
327 /*
328 * Read a page over NFS.
329 * We read the page synchronously in the following case:
330 * - The error flag is set for this page. This happens only when a
331 * previous async read operation failed.
332 */
nfs_read_folio(struct file * file,struct folio * folio)333 int nfs_read_folio(struct file *file, struct folio *folio)
334 {
335 struct inode *inode = file_inode(file);
336 struct nfs_pageio_descriptor pgio;
337 struct nfs_open_context *ctx;
338 int ret;
339
340 trace_nfs_aop_readpage(inode, folio);
341 nfs_inc_stats(inode, NFSIOS_VFSREADPAGE);
342 task_io_account_read(folio_size(folio));
343
344 /*
345 * Try to flush any pending writes to the file..
346 *
347 * NOTE! Because we own the folio lock, there cannot
348 * be any new pending writes generated at this point
349 * for this folio (other folios can be written to).
350 */
351 ret = nfs_wb_folio(inode, folio);
352 if (ret)
353 goto out_unlock;
354 if (folio_test_uptodate(folio))
355 goto out_unlock;
356
357 ret = -ESTALE;
358 if (NFS_STALE(inode))
359 goto out_unlock;
360
361 ret = nfs_netfs_read_folio(file, folio);
362 if (!ret)
363 goto out;
364
365 ctx = get_nfs_open_context(nfs_file_open_context(file));
366
367 xchg(&ctx->error, 0);
368 nfs_pageio_init_read(&pgio, inode, false,
369 &nfs_async_read_completion_ops);
370
371 ret = nfs_read_add_folio(&pgio, ctx, folio);
372 if (ret)
373 goto out_put;
374
375 nfs_pageio_complete_read(&pgio);
376 ret = pgio.pg_error < 0 ? pgio.pg_error : 0;
377 if (!ret) {
378 ret = folio_wait_locked_killable(folio);
379 if (!folio_test_uptodate(folio) && !ret)
380 ret = xchg(&ctx->error, 0);
381 }
382 out_put:
383 put_nfs_open_context(ctx);
384 out:
385 trace_nfs_aop_readpage_done(inode, folio, ret);
386 return ret;
387 out_unlock:
388 folio_unlock(folio);
389 goto out;
390 }
391
nfs_readahead(struct readahead_control * ractl)392 void nfs_readahead(struct readahead_control *ractl)
393 {
394 struct nfs_pageio_descriptor pgio;
395 struct nfs_open_context *ctx;
396 unsigned int nr_pages = readahead_count(ractl);
397 struct file *file = ractl->file;
398 struct inode *inode = ractl->mapping->host;
399 struct folio *folio;
400 int ret;
401
402 trace_nfs_aop_readahead(inode, readahead_pos(ractl), nr_pages);
403 nfs_inc_stats(inode, NFSIOS_VFSREADPAGES);
404 task_io_account_read(readahead_length(ractl));
405
406 ret = -ESTALE;
407 if (NFS_STALE(inode))
408 goto out;
409
410 ret = nfs_netfs_readahead(ractl);
411 if (!ret)
412 goto out;
413
414 if (file == NULL) {
415 ret = -EBADF;
416 ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
417 if (ctx == NULL)
418 goto out;
419 } else
420 ctx = get_nfs_open_context(nfs_file_open_context(file));
421
422 nfs_pageio_init_read(&pgio, inode, false,
423 &nfs_async_read_completion_ops);
424
425 while ((folio = readahead_folio(ractl)) != NULL) {
426 ret = nfs_read_add_folio(&pgio, ctx, folio);
427 if (ret)
428 break;
429 }
430
431 nfs_pageio_complete_read(&pgio);
432
433 put_nfs_open_context(ctx);
434 out:
435 trace_nfs_aop_readahead_done(inode, nr_pages, ret);
436 }
437
nfs_init_readpagecache(void)438 int __init nfs_init_readpagecache(void)
439 {
440 nfs_rdata_cachep = kmem_cache_create("nfs_read_data",
441 sizeof(struct nfs_pgio_header),
442 0, SLAB_HWCACHE_ALIGN,
443 NULL);
444 if (nfs_rdata_cachep == NULL)
445 return -ENOMEM;
446
447 return 0;
448 }
449
nfs_destroy_readpagecache(void)450 void nfs_destroy_readpagecache(void)
451 {
452 kmem_cache_destroy(nfs_rdata_cachep);
453 }
454
455 static const struct nfs_rw_ops nfs_rw_read_ops = {
456 .rw_alloc_header = nfs_readhdr_alloc,
457 .rw_free_header = nfs_readhdr_free,
458 .rw_done = nfs_readpage_done,
459 .rw_result = nfs_readpage_result,
460 .rw_initiate = nfs_initiate_read,
461 };
462