1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/fs/nfs/read.c 4 * 5 * Block I/O for NFS 6 * 7 * Partial copy of Linus' read cache modifications to fs/nfs/file.c 8 * modified for async RPC by okir@monad.swb.de 9 */ 10 11 #include <linux/time.h> 12 #include <linux/kernel.h> 13 #include <linux/errno.h> 14 #include <linux/fcntl.h> 15 #include <linux/stat.h> 16 #include <linux/mm.h> 17 #include <linux/slab.h> 18 #include <linux/pagemap.h> 19 #include <linux/sunrpc/clnt.h> 20 #include <linux/nfs_fs.h> 21 #include <linux/nfs_page.h> 22 #include <linux/module.h> 23 24 #include "nfs4_fs.h" 25 #include "internal.h" 26 #include "iostat.h" 27 #include "fscache.h" 28 #include "pnfs.h" 29 #include "nfstrace.h" 30 31 #define NFSDBG_FACILITY NFSDBG_PAGECACHE 32 33 static const struct nfs_pgio_completion_ops nfs_async_read_completion_ops; 34 static const struct nfs_rw_ops nfs_rw_read_ops; 35 36 static struct kmem_cache *nfs_rdata_cachep; 37 38 static struct nfs_pgio_header *nfs_readhdr_alloc(void) 39 { 40 struct nfs_pgio_header *p = kmem_cache_zalloc(nfs_rdata_cachep, GFP_KERNEL); 41 42 if (p) 43 p->rw_mode = FMODE_READ; 44 return p; 45 } 46 47 static void nfs_readhdr_free(struct nfs_pgio_header *rhdr) 48 { 49 kmem_cache_free(nfs_rdata_cachep, rhdr); 50 } 51 52 static 53 int nfs_return_empty_page(struct page *page) 54 { 55 zero_user(page, 0, PAGE_SIZE); 56 SetPageUptodate(page); 57 unlock_page(page); 58 return 0; 59 } 60 61 void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio, 62 struct inode *inode, bool force_mds, 63 const struct nfs_pgio_completion_ops *compl_ops) 64 { 65 struct nfs_server *server = NFS_SERVER(inode); 66 const struct nfs_pageio_ops *pg_ops = &nfs_pgio_rw_ops; 67 68 #ifdef CONFIG_NFS_V4_1 69 if (server->pnfs_curr_ld && !force_mds) 70 pg_ops = server->pnfs_curr_ld->pg_read_ops; 71 #endif 72 nfs_pageio_init(pgio, inode, pg_ops, compl_ops, &nfs_rw_read_ops, 73 server->rsize, 0); 74 } 75 EXPORT_SYMBOL_GPL(nfs_pageio_init_read); 76 77 static void nfs_pageio_complete_read(struct nfs_pageio_descriptor *pgio) 78 { 79 struct nfs_pgio_mirror *pgm; 80 unsigned long npages; 81 82 nfs_pageio_complete(pgio); 83 84 /* It doesn't make sense to do mirrored reads! */ 85 WARN_ON_ONCE(pgio->pg_mirror_count != 1); 86 87 pgm = &pgio->pg_mirrors[0]; 88 NFS_I(pgio->pg_inode)->read_io += pgm->pg_bytes_written; 89 npages = (pgm->pg_bytes_written + PAGE_SIZE - 1) >> PAGE_SHIFT; 90 nfs_add_stats(pgio->pg_inode, NFSIOS_READPAGES, npages); 91 } 92 93 94 void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio) 95 { 96 struct nfs_pgio_mirror *mirror; 97 98 if (pgio->pg_ops && pgio->pg_ops->pg_cleanup) 99 pgio->pg_ops->pg_cleanup(pgio); 100 101 pgio->pg_ops = &nfs_pgio_rw_ops; 102 103 /* read path should never have more than one mirror */ 104 WARN_ON_ONCE(pgio->pg_mirror_count != 1); 105 106 mirror = &pgio->pg_mirrors[0]; 107 mirror->pg_bsize = NFS_SERVER(pgio->pg_inode)->rsize; 108 } 109 EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds); 110 111 static void nfs_readpage_release(struct nfs_page *req, int error) 112 { 113 struct inode *inode = d_inode(nfs_req_openctx(req)->dentry); 114 struct page *page = req->wb_page; 115 116 dprintk("NFS: read done (%s/%llu %d@%lld)\n", inode->i_sb->s_id, 117 (unsigned long long)NFS_FILEID(inode), req->wb_bytes, 118 (long long)req_offset(req)); 119 120 if (nfs_error_is_fatal_on_server(error) && error != -ETIMEDOUT) 121 SetPageError(page); 122 if (nfs_page_group_sync_on_bit(req, PG_UNLOCKPAGE)) { 123 struct address_space *mapping = page_file_mapping(page); 124 125 if (PageUptodate(page)) 126 nfs_readpage_to_fscache(inode, page, 0); 127 else if (!PageError(page) && !PagePrivate(page)) 128 generic_error_remove_page(mapping, page); 129 unlock_page(page); 130 } 131 nfs_release_request(req); 132 } 133 134 struct nfs_readdesc { 135 struct nfs_pageio_descriptor pgio; 136 struct nfs_open_context *ctx; 137 }; 138 139 static void nfs_page_group_set_uptodate(struct nfs_page *req) 140 { 141 if (nfs_page_group_sync_on_bit(req, PG_UPTODATE)) 142 SetPageUptodate(req->wb_page); 143 } 144 145 static void nfs_read_completion(struct nfs_pgio_header *hdr) 146 { 147 unsigned long bytes = 0; 148 int error; 149 150 if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) 151 goto out; 152 while (!list_empty(&hdr->pages)) { 153 struct nfs_page *req = nfs_list_entry(hdr->pages.next); 154 struct page *page = req->wb_page; 155 unsigned long start = req->wb_pgbase; 156 unsigned long end = req->wb_pgbase + req->wb_bytes; 157 158 if (test_bit(NFS_IOHDR_EOF, &hdr->flags)) { 159 /* note: regions of the page not covered by a 160 * request are zeroed in readpage_async_filler */ 161 if (bytes > hdr->good_bytes) { 162 /* nothing in this request was good, so zero 163 * the full extent of the request */ 164 zero_user_segment(page, start, end); 165 166 } else if (hdr->good_bytes - bytes < req->wb_bytes) { 167 /* part of this request has good bytes, but 168 * not all. zero the bad bytes */ 169 start += hdr->good_bytes - bytes; 170 WARN_ON(start < req->wb_pgbase); 171 zero_user_segment(page, start, end); 172 } 173 } 174 error = 0; 175 bytes += req->wb_bytes; 176 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) { 177 if (bytes <= hdr->good_bytes) 178 nfs_page_group_set_uptodate(req); 179 else { 180 error = hdr->error; 181 xchg(&nfs_req_openctx(req)->error, error); 182 } 183 } else 184 nfs_page_group_set_uptodate(req); 185 nfs_list_remove_request(req); 186 nfs_readpage_release(req, error); 187 } 188 out: 189 hdr->release(hdr); 190 } 191 192 static void nfs_initiate_read(struct nfs_pgio_header *hdr, 193 struct rpc_message *msg, 194 const struct nfs_rpc_ops *rpc_ops, 195 struct rpc_task_setup *task_setup_data, int how) 196 { 197 struct inode *inode = hdr->inode; 198 int swap_flags = IS_SWAPFILE(inode) ? NFS_RPC_SWAPFLAGS : 0; 199 200 task_setup_data->flags |= swap_flags; 201 rpc_ops->read_setup(hdr, msg); 202 trace_nfs_initiate_read(hdr); 203 } 204 205 static void 206 nfs_async_read_error(struct list_head *head, int error) 207 { 208 struct nfs_page *req; 209 210 while (!list_empty(head)) { 211 req = nfs_list_entry(head->next); 212 nfs_list_remove_request(req); 213 nfs_readpage_release(req, error); 214 } 215 } 216 217 static const struct nfs_pgio_completion_ops nfs_async_read_completion_ops = { 218 .error_cleanup = nfs_async_read_error, 219 .completion = nfs_read_completion, 220 }; 221 222 /* 223 * This is the callback from RPC telling us whether a reply was 224 * received or some error occurred (timeout or socket shutdown). 225 */ 226 static int nfs_readpage_done(struct rpc_task *task, 227 struct nfs_pgio_header *hdr, 228 struct inode *inode) 229 { 230 int status = NFS_PROTO(inode)->read_done(task, hdr); 231 if (status != 0) 232 return status; 233 234 nfs_add_stats(inode, NFSIOS_SERVERREADBYTES, hdr->res.count); 235 trace_nfs_readpage_done(task, hdr); 236 237 if (task->tk_status == -ESTALE) { 238 nfs_set_inode_stale(inode); 239 nfs_mark_for_revalidate(inode); 240 } 241 return 0; 242 } 243 244 static void nfs_readpage_retry(struct rpc_task *task, 245 struct nfs_pgio_header *hdr) 246 { 247 struct nfs_pgio_args *argp = &hdr->args; 248 struct nfs_pgio_res *resp = &hdr->res; 249 250 /* This is a short read! */ 251 nfs_inc_stats(hdr->inode, NFSIOS_SHORTREAD); 252 trace_nfs_readpage_short(task, hdr); 253 254 /* Has the server at least made some progress? */ 255 if (resp->count == 0) { 256 nfs_set_pgio_error(hdr, -EIO, argp->offset); 257 return; 258 } 259 260 /* For non rpc-based layout drivers, retry-through-MDS */ 261 if (!task->tk_ops) { 262 hdr->pnfs_error = -EAGAIN; 263 return; 264 } 265 266 /* Yes, so retry the read at the end of the hdr */ 267 hdr->mds_offset += resp->count; 268 argp->offset += resp->count; 269 argp->pgbase += resp->count; 270 argp->count -= resp->count; 271 resp->count = 0; 272 resp->eof = 0; 273 rpc_restart_call_prepare(task); 274 } 275 276 static void nfs_readpage_result(struct rpc_task *task, 277 struct nfs_pgio_header *hdr) 278 { 279 if (hdr->res.eof) { 280 loff_t pos = hdr->args.offset + hdr->res.count; 281 unsigned int new = pos - hdr->io_start; 282 283 if (hdr->good_bytes > new) { 284 hdr->good_bytes = new; 285 set_bit(NFS_IOHDR_EOF, &hdr->flags); 286 clear_bit(NFS_IOHDR_ERROR, &hdr->flags); 287 } 288 } else if (hdr->res.count < hdr->args.count) 289 nfs_readpage_retry(task, hdr); 290 } 291 292 static int 293 readpage_async_filler(void *data, struct page *page) 294 { 295 struct nfs_readdesc *desc = data; 296 struct inode *inode = page_file_mapping(page)->host; 297 unsigned int rsize = NFS_SERVER(inode)->rsize; 298 struct nfs_page *new; 299 unsigned int len, aligned_len; 300 int error; 301 302 len = nfs_page_length(page); 303 if (len == 0) 304 return nfs_return_empty_page(page); 305 306 aligned_len = min_t(unsigned int, ALIGN(len, rsize), PAGE_SIZE); 307 308 new = nfs_create_request(desc->ctx, page, 0, aligned_len); 309 if (IS_ERR(new)) 310 goto out_error; 311 312 if (len < PAGE_SIZE) 313 zero_user_segment(page, len, PAGE_SIZE); 314 if (!nfs_pageio_add_request(&desc->pgio, new)) { 315 nfs_list_remove_request(new); 316 error = desc->pgio.pg_error; 317 nfs_readpage_release(new, error); 318 goto out; 319 } 320 return 0; 321 out_error: 322 error = PTR_ERR(new); 323 unlock_page(page); 324 out: 325 return error; 326 } 327 328 /* 329 * Read a page over NFS. 330 * We read the page synchronously in the following case: 331 * - The error flag is set for this page. This happens only when a 332 * previous async read operation failed. 333 */ 334 int nfs_readpage(struct file *file, struct page *page) 335 { 336 struct nfs_readdesc desc; 337 struct inode *inode = page_file_mapping(page)->host; 338 int ret; 339 340 trace_nfs_aop_readpage(inode, page); 341 nfs_inc_stats(inode, NFSIOS_VFSREADPAGE); 342 343 /* 344 * Try to flush any pending writes to the file.. 345 * 346 * NOTE! Because we own the page lock, there cannot 347 * be any new pending writes generated at this point 348 * for this page (other pages can be written to). 349 */ 350 ret = nfs_wb_page(inode, page); 351 if (ret) 352 goto out_unlock; 353 if (PageUptodate(page)) 354 goto out_unlock; 355 356 ret = -ESTALE; 357 if (NFS_STALE(inode)) 358 goto out_unlock; 359 360 if (file == NULL) { 361 ret = -EBADF; 362 desc.ctx = nfs_find_open_context(inode, NULL, FMODE_READ); 363 if (desc.ctx == NULL) 364 goto out_unlock; 365 } else 366 desc.ctx = get_nfs_open_context(nfs_file_open_context(file)); 367 368 xchg(&desc.ctx->error, 0); 369 if (!IS_SYNC(inode)) { 370 ret = nfs_readpage_from_fscache(desc.ctx, inode, page); 371 if (ret == 0) 372 goto out_wait; 373 } 374 375 nfs_pageio_init_read(&desc.pgio, inode, false, 376 &nfs_async_read_completion_ops); 377 378 ret = readpage_async_filler(&desc, page); 379 if (ret) 380 goto out; 381 382 nfs_pageio_complete_read(&desc.pgio); 383 ret = desc.pgio.pg_error < 0 ? desc.pgio.pg_error : 0; 384 out_wait: 385 if (!ret) { 386 ret = wait_on_page_locked_killable(page); 387 if (!PageUptodate(page) && !ret) 388 ret = xchg(&desc.ctx->error, 0); 389 } 390 out: 391 put_nfs_open_context(desc.ctx); 392 trace_nfs_aop_readpage_done(inode, page, ret); 393 return ret; 394 out_unlock: 395 unlock_page(page); 396 trace_nfs_aop_readpage_done(inode, page, ret); 397 return ret; 398 } 399 400 int nfs_readpages(struct file *file, struct address_space *mapping, 401 struct list_head *pages, unsigned nr_pages) 402 { 403 struct nfs_readdesc desc; 404 struct inode *inode = mapping->host; 405 int ret; 406 407 trace_nfs_aop_readahead(inode, lru_to_page(pages), nr_pages); 408 nfs_inc_stats(inode, NFSIOS_VFSREADPAGES); 409 410 ret = -ESTALE; 411 if (NFS_STALE(inode)) 412 goto out; 413 414 if (file == NULL) { 415 ret = -EBADF; 416 desc.ctx = nfs_find_open_context(inode, NULL, FMODE_READ); 417 if (desc.ctx == NULL) 418 goto out; 419 } else 420 desc.ctx = get_nfs_open_context(nfs_file_open_context(file)); 421 422 /* attempt to read as many of the pages as possible from the cache 423 * - this returns -ENOBUFS immediately if the cookie is negative 424 */ 425 ret = nfs_readpages_from_fscache(desc.ctx, inode, mapping, 426 pages, &nr_pages); 427 if (ret == 0) 428 goto read_complete; /* all pages were read */ 429 430 nfs_pageio_init_read(&desc.pgio, inode, false, 431 &nfs_async_read_completion_ops); 432 433 ret = read_cache_pages(mapping, pages, readpage_async_filler, &desc); 434 435 nfs_pageio_complete_read(&desc.pgio); 436 437 read_complete: 438 put_nfs_open_context(desc.ctx); 439 out: 440 trace_nfs_aop_readahead_done(inode, nr_pages, ret); 441 return ret; 442 } 443 444 int __init nfs_init_readpagecache(void) 445 { 446 nfs_rdata_cachep = kmem_cache_create("nfs_read_data", 447 sizeof(struct nfs_pgio_header), 448 0, SLAB_HWCACHE_ALIGN, 449 NULL); 450 if (nfs_rdata_cachep == NULL) 451 return -ENOMEM; 452 453 return 0; 454 } 455 456 void nfs_destroy_readpagecache(void) 457 { 458 kmem_cache_destroy(nfs_rdata_cachep); 459 } 460 461 static const struct nfs_rw_ops nfs_rw_read_ops = { 462 .rw_alloc_header = nfs_readhdr_alloc, 463 .rw_free_header = nfs_readhdr_free, 464 .rw_done = nfs_readpage_done, 465 .rw_result = nfs_readpage_result, 466 .rw_initiate = nfs_initiate_read, 467 }; 468