1 /* 2 * linux/fs/nfs/read.c 3 * 4 * Block I/O for NFS 5 * 6 * Partial copy of Linus' read cache modifications to fs/nfs/file.c 7 * modified for async RPC by okir@monad.swb.de 8 */ 9 10 #include <linux/time.h> 11 #include <linux/kernel.h> 12 #include <linux/errno.h> 13 #include <linux/fcntl.h> 14 #include <linux/stat.h> 15 #include <linux/mm.h> 16 #include <linux/slab.h> 17 #include <linux/pagemap.h> 18 #include <linux/sunrpc/clnt.h> 19 #include <linux/nfs_fs.h> 20 #include <linux/nfs_page.h> 21 22 #include <asm/system.h> 23 24 #include "nfs4_fs.h" 25 #include "internal.h" 26 #include "iostat.h" 27 #include "fscache.h" 28 29 #define NFSDBG_FACILITY NFSDBG_PAGECACHE 30 31 static int nfs_pagein_multi(struct inode *, struct list_head *, unsigned int, size_t, int); 32 static int nfs_pagein_one(struct inode *, struct list_head *, unsigned int, size_t, int); 33 static const struct rpc_call_ops nfs_read_partial_ops; 34 static const struct rpc_call_ops nfs_read_full_ops; 35 36 static struct kmem_cache *nfs_rdata_cachep; 37 static mempool_t *nfs_rdata_mempool; 38 39 #define MIN_POOL_READ (32) 40 41 struct nfs_read_data *nfs_readdata_alloc(unsigned int pagecount) 42 { 43 struct nfs_read_data *p = mempool_alloc(nfs_rdata_mempool, GFP_KERNEL); 44 45 if (p) { 46 memset(p, 0, sizeof(*p)); 47 INIT_LIST_HEAD(&p->pages); 48 p->npages = pagecount; 49 p->res.seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE; 50 if (pagecount <= ARRAY_SIZE(p->page_array)) 51 p->pagevec = p->page_array; 52 else { 53 p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_KERNEL); 54 if (!p->pagevec) { 55 mempool_free(p, nfs_rdata_mempool); 56 p = NULL; 57 } 58 } 59 } 60 return p; 61 } 62 63 void nfs_readdata_free(struct nfs_read_data *p) 64 { 65 if (p && (p->pagevec != &p->page_array[0])) 66 kfree(p->pagevec); 67 mempool_free(p, nfs_rdata_mempool); 68 } 69 70 static void nfs_readdata_release(struct nfs_read_data *rdata) 71 { 72 put_nfs_open_context(rdata->args.context); 73 nfs_readdata_free(rdata); 74 } 75 76 static 77 int nfs_return_empty_page(struct page *page) 78 { 79 zero_user(page, 0, PAGE_CACHE_SIZE); 80 SetPageUptodate(page); 81 unlock_page(page); 82 return 0; 83 } 84 85 static void nfs_readpage_truncate_uninitialised_page(struct nfs_read_data *data) 86 { 87 unsigned int remainder = data->args.count - data->res.count; 88 unsigned int base = data->args.pgbase + data->res.count; 89 unsigned int pglen; 90 struct page **pages; 91 92 if (data->res.eof == 0 || remainder == 0) 93 return; 94 /* 95 * Note: "remainder" can never be negative, since we check for 96 * this in the XDR code. 97 */ 98 pages = &data->args.pages[base >> PAGE_CACHE_SHIFT]; 99 base &= ~PAGE_CACHE_MASK; 100 pglen = PAGE_CACHE_SIZE - base; 101 for (;;) { 102 if (remainder <= pglen) { 103 zero_user(*pages, base, remainder); 104 break; 105 } 106 zero_user(*pages, base, pglen); 107 pages++; 108 remainder -= pglen; 109 pglen = PAGE_CACHE_SIZE; 110 base = 0; 111 } 112 } 113 114 int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode, 115 struct page *page) 116 { 117 LIST_HEAD(one_request); 118 struct nfs_page *new; 119 unsigned int len; 120 121 len = nfs_page_length(page); 122 if (len == 0) 123 return nfs_return_empty_page(page); 124 new = nfs_create_request(ctx, inode, page, 0, len); 125 if (IS_ERR(new)) { 126 unlock_page(page); 127 return PTR_ERR(new); 128 } 129 if (len < PAGE_CACHE_SIZE) 130 zero_user_segment(page, len, PAGE_CACHE_SIZE); 131 132 nfs_list_add_request(new, &one_request); 133 if (NFS_SERVER(inode)->rsize < PAGE_CACHE_SIZE) 134 nfs_pagein_multi(inode, &one_request, 1, len, 0); 135 else 136 nfs_pagein_one(inode, &one_request, 1, len, 0); 137 return 0; 138 } 139 140 static void nfs_readpage_release(struct nfs_page *req) 141 { 142 struct inode *d_inode = req->wb_context->path.dentry->d_inode; 143 144 if (PageUptodate(req->wb_page)) 145 nfs_readpage_to_fscache(d_inode, req->wb_page, 0); 146 147 unlock_page(req->wb_page); 148 149 dprintk("NFS: read done (%s/%Ld %d@%Ld)\n", 150 req->wb_context->path.dentry->d_inode->i_sb->s_id, 151 (long long)NFS_FILEID(req->wb_context->path.dentry->d_inode), 152 req->wb_bytes, 153 (long long)req_offset(req)); 154 nfs_clear_request(req); 155 nfs_release_request(req); 156 } 157 158 /* 159 * Set up the NFS read request struct 160 */ 161 static int nfs_read_rpcsetup(struct nfs_page *req, struct nfs_read_data *data, 162 const struct rpc_call_ops *call_ops, 163 unsigned int count, unsigned int offset) 164 { 165 struct inode *inode = req->wb_context->path.dentry->d_inode; 166 int swap_flags = IS_SWAPFILE(inode) ? NFS_RPC_SWAPFLAGS : 0; 167 struct rpc_task *task; 168 struct rpc_message msg = { 169 .rpc_argp = &data->args, 170 .rpc_resp = &data->res, 171 .rpc_cred = req->wb_context->cred, 172 }; 173 struct rpc_task_setup task_setup_data = { 174 .task = &data->task, 175 .rpc_client = NFS_CLIENT(inode), 176 .rpc_message = &msg, 177 .callback_ops = call_ops, 178 .callback_data = data, 179 .workqueue = nfsiod_workqueue, 180 .flags = RPC_TASK_ASYNC | swap_flags, 181 }; 182 183 data->req = req; 184 data->inode = inode; 185 data->cred = msg.rpc_cred; 186 187 data->args.fh = NFS_FH(inode); 188 data->args.offset = req_offset(req) + offset; 189 data->args.pgbase = req->wb_pgbase + offset; 190 data->args.pages = data->pagevec; 191 data->args.count = count; 192 data->args.context = get_nfs_open_context(req->wb_context); 193 data->args.lock_context = req->wb_lock_context; 194 195 data->res.fattr = &data->fattr; 196 data->res.count = count; 197 data->res.eof = 0; 198 nfs_fattr_init(&data->fattr); 199 200 /* Set up the initial task struct. */ 201 NFS_PROTO(inode)->read_setup(data, &msg); 202 203 dprintk("NFS: %5u initiated read call (req %s/%Ld, %u bytes @ offset %Lu)\n", 204 data->task.tk_pid, 205 inode->i_sb->s_id, 206 (long long)NFS_FILEID(inode), 207 count, 208 (unsigned long long)data->args.offset); 209 210 task = rpc_run_task(&task_setup_data); 211 if (IS_ERR(task)) 212 return PTR_ERR(task); 213 rpc_put_task(task); 214 return 0; 215 } 216 217 static void 218 nfs_async_read_error(struct list_head *head) 219 { 220 struct nfs_page *req; 221 222 while (!list_empty(head)) { 223 req = nfs_list_entry(head->next); 224 nfs_list_remove_request(req); 225 SetPageError(req->wb_page); 226 nfs_readpage_release(req); 227 } 228 } 229 230 /* 231 * Generate multiple requests to fill a single page. 232 * 233 * We optimize to reduce the number of read operations on the wire. If we 234 * detect that we're reading a page, or an area of a page, that is past the 235 * end of file, we do not generate NFS read operations but just clear the 236 * parts of the page that would have come back zero from the server anyway. 237 * 238 * We rely on the cached value of i_size to make this determination; another 239 * client can fill pages on the server past our cached end-of-file, but we 240 * won't see the new data until our attribute cache is updated. This is more 241 * or less conventional NFS client behavior. 242 */ 243 static int nfs_pagein_multi(struct inode *inode, struct list_head *head, unsigned int npages, size_t count, int flags) 244 { 245 struct nfs_page *req = nfs_list_entry(head->next); 246 struct page *page = req->wb_page; 247 struct nfs_read_data *data; 248 size_t rsize = NFS_SERVER(inode)->rsize, nbytes; 249 unsigned int offset; 250 int requests = 0; 251 int ret = 0; 252 LIST_HEAD(list); 253 254 nfs_list_remove_request(req); 255 256 nbytes = count; 257 do { 258 size_t len = min(nbytes,rsize); 259 260 data = nfs_readdata_alloc(1); 261 if (!data) 262 goto out_bad; 263 list_add(&data->pages, &list); 264 requests++; 265 nbytes -= len; 266 } while(nbytes != 0); 267 atomic_set(&req->wb_complete, requests); 268 269 ClearPageError(page); 270 offset = 0; 271 nbytes = count; 272 do { 273 int ret2; 274 275 data = list_entry(list.next, struct nfs_read_data, pages); 276 list_del_init(&data->pages); 277 278 data->pagevec[0] = page; 279 280 if (nbytes < rsize) 281 rsize = nbytes; 282 ret2 = nfs_read_rpcsetup(req, data, &nfs_read_partial_ops, 283 rsize, offset); 284 if (ret == 0) 285 ret = ret2; 286 offset += rsize; 287 nbytes -= rsize; 288 } while (nbytes != 0); 289 290 return ret; 291 292 out_bad: 293 while (!list_empty(&list)) { 294 data = list_entry(list.next, struct nfs_read_data, pages); 295 list_del(&data->pages); 296 nfs_readdata_free(data); 297 } 298 SetPageError(page); 299 nfs_readpage_release(req); 300 return -ENOMEM; 301 } 302 303 static int nfs_pagein_one(struct inode *inode, struct list_head *head, unsigned int npages, size_t count, int flags) 304 { 305 struct nfs_page *req; 306 struct page **pages; 307 struct nfs_read_data *data; 308 int ret = -ENOMEM; 309 310 data = nfs_readdata_alloc(npages); 311 if (!data) 312 goto out_bad; 313 314 pages = data->pagevec; 315 while (!list_empty(head)) { 316 req = nfs_list_entry(head->next); 317 nfs_list_remove_request(req); 318 nfs_list_add_request(req, &data->pages); 319 ClearPageError(req->wb_page); 320 *pages++ = req->wb_page; 321 } 322 req = nfs_list_entry(data->pages.next); 323 324 return nfs_read_rpcsetup(req, data, &nfs_read_full_ops, count, 0); 325 out_bad: 326 nfs_async_read_error(head); 327 return ret; 328 } 329 330 /* 331 * This is the callback from RPC telling us whether a reply was 332 * received or some error occurred (timeout or socket shutdown). 333 */ 334 int nfs_readpage_result(struct rpc_task *task, struct nfs_read_data *data) 335 { 336 int status; 337 338 dprintk("NFS: %s: %5u, (status %d)\n", __func__, task->tk_pid, 339 task->tk_status); 340 341 status = NFS_PROTO(data->inode)->read_done(task, data); 342 if (status != 0) 343 return status; 344 345 nfs_add_stats(data->inode, NFSIOS_SERVERREADBYTES, data->res.count); 346 347 if (task->tk_status == -ESTALE) { 348 set_bit(NFS_INO_STALE, &NFS_I(data->inode)->flags); 349 nfs_mark_for_revalidate(data->inode); 350 } 351 return 0; 352 } 353 354 static void nfs_readpage_retry(struct rpc_task *task, struct nfs_read_data *data) 355 { 356 struct nfs_readargs *argp = &data->args; 357 struct nfs_readres *resp = &data->res; 358 359 if (resp->eof || resp->count == argp->count) 360 return; 361 362 /* This is a short read! */ 363 nfs_inc_stats(data->inode, NFSIOS_SHORTREAD); 364 /* Has the server at least made some progress? */ 365 if (resp->count == 0) 366 return; 367 368 /* Yes, so retry the read at the end of the data */ 369 argp->offset += resp->count; 370 argp->pgbase += resp->count; 371 argp->count -= resp->count; 372 nfs_restart_rpc(task, NFS_SERVER(data->inode)->nfs_client); 373 } 374 375 /* 376 * Handle a read reply that fills part of a page. 377 */ 378 static void nfs_readpage_result_partial(struct rpc_task *task, void *calldata) 379 { 380 struct nfs_read_data *data = calldata; 381 382 if (nfs_readpage_result(task, data) != 0) 383 return; 384 if (task->tk_status < 0) 385 return; 386 387 nfs_readpage_truncate_uninitialised_page(data); 388 nfs_readpage_retry(task, data); 389 } 390 391 static void nfs_readpage_release_partial(void *calldata) 392 { 393 struct nfs_read_data *data = calldata; 394 struct nfs_page *req = data->req; 395 struct page *page = req->wb_page; 396 int status = data->task.tk_status; 397 398 if (status < 0) 399 SetPageError(page); 400 401 if (atomic_dec_and_test(&req->wb_complete)) { 402 if (!PageError(page)) 403 SetPageUptodate(page); 404 nfs_readpage_release(req); 405 } 406 nfs_readdata_release(calldata); 407 } 408 409 #if defined(CONFIG_NFS_V4_1) 410 void nfs_read_prepare(struct rpc_task *task, void *calldata) 411 { 412 struct nfs_read_data *data = calldata; 413 414 if (nfs4_setup_sequence(NFS_SERVER(data->inode), 415 &data->args.seq_args, &data->res.seq_res, 416 0, task)) 417 return; 418 rpc_call_start(task); 419 } 420 #endif /* CONFIG_NFS_V4_1 */ 421 422 static const struct rpc_call_ops nfs_read_partial_ops = { 423 #if defined(CONFIG_NFS_V4_1) 424 .rpc_call_prepare = nfs_read_prepare, 425 #endif /* CONFIG_NFS_V4_1 */ 426 .rpc_call_done = nfs_readpage_result_partial, 427 .rpc_release = nfs_readpage_release_partial, 428 }; 429 430 static void nfs_readpage_set_pages_uptodate(struct nfs_read_data *data) 431 { 432 unsigned int count = data->res.count; 433 unsigned int base = data->args.pgbase; 434 struct page **pages; 435 436 if (data->res.eof) 437 count = data->args.count; 438 if (unlikely(count == 0)) 439 return; 440 pages = &data->args.pages[base >> PAGE_CACHE_SHIFT]; 441 base &= ~PAGE_CACHE_MASK; 442 count += base; 443 for (;count >= PAGE_CACHE_SIZE; count -= PAGE_CACHE_SIZE, pages++) 444 SetPageUptodate(*pages); 445 if (count == 0) 446 return; 447 /* Was this a short read? */ 448 if (data->res.eof || data->res.count == data->args.count) 449 SetPageUptodate(*pages); 450 } 451 452 /* 453 * This is the callback from RPC telling us whether a reply was 454 * received or some error occurred (timeout or socket shutdown). 455 */ 456 static void nfs_readpage_result_full(struct rpc_task *task, void *calldata) 457 { 458 struct nfs_read_data *data = calldata; 459 460 if (nfs_readpage_result(task, data) != 0) 461 return; 462 if (task->tk_status < 0) 463 return; 464 /* 465 * Note: nfs_readpage_retry may change the values of 466 * data->args. In the multi-page case, we therefore need 467 * to ensure that we call nfs_readpage_set_pages_uptodate() 468 * first. 469 */ 470 nfs_readpage_truncate_uninitialised_page(data); 471 nfs_readpage_set_pages_uptodate(data); 472 nfs_readpage_retry(task, data); 473 } 474 475 static void nfs_readpage_release_full(void *calldata) 476 { 477 struct nfs_read_data *data = calldata; 478 479 while (!list_empty(&data->pages)) { 480 struct nfs_page *req = nfs_list_entry(data->pages.next); 481 482 nfs_list_remove_request(req); 483 nfs_readpage_release(req); 484 } 485 nfs_readdata_release(calldata); 486 } 487 488 static const struct rpc_call_ops nfs_read_full_ops = { 489 #if defined(CONFIG_NFS_V4_1) 490 .rpc_call_prepare = nfs_read_prepare, 491 #endif /* CONFIG_NFS_V4_1 */ 492 .rpc_call_done = nfs_readpage_result_full, 493 .rpc_release = nfs_readpage_release_full, 494 }; 495 496 /* 497 * Read a page over NFS. 498 * We read the page synchronously in the following case: 499 * - The error flag is set for this page. This happens only when a 500 * previous async read operation failed. 501 */ 502 int nfs_readpage(struct file *file, struct page *page) 503 { 504 struct nfs_open_context *ctx; 505 struct inode *inode = page->mapping->host; 506 int error; 507 508 dprintk("NFS: nfs_readpage (%p %ld@%lu)\n", 509 page, PAGE_CACHE_SIZE, page->index); 510 nfs_inc_stats(inode, NFSIOS_VFSREADPAGE); 511 nfs_add_stats(inode, NFSIOS_READPAGES, 1); 512 513 /* 514 * Try to flush any pending writes to the file.. 515 * 516 * NOTE! Because we own the page lock, there cannot 517 * be any new pending writes generated at this point 518 * for this page (other pages can be written to). 519 */ 520 error = nfs_wb_page(inode, page); 521 if (error) 522 goto out_unlock; 523 if (PageUptodate(page)) 524 goto out_unlock; 525 526 error = -ESTALE; 527 if (NFS_STALE(inode)) 528 goto out_unlock; 529 530 if (file == NULL) { 531 error = -EBADF; 532 ctx = nfs_find_open_context(inode, NULL, FMODE_READ); 533 if (ctx == NULL) 534 goto out_unlock; 535 } else 536 ctx = get_nfs_open_context(nfs_file_open_context(file)); 537 538 if (!IS_SYNC(inode)) { 539 error = nfs_readpage_from_fscache(ctx, inode, page); 540 if (error == 0) 541 goto out; 542 } 543 544 error = nfs_readpage_async(ctx, inode, page); 545 546 out: 547 put_nfs_open_context(ctx); 548 return error; 549 out_unlock: 550 unlock_page(page); 551 return error; 552 } 553 554 struct nfs_readdesc { 555 struct nfs_pageio_descriptor *pgio; 556 struct nfs_open_context *ctx; 557 }; 558 559 static int 560 readpage_async_filler(void *data, struct page *page) 561 { 562 struct nfs_readdesc *desc = (struct nfs_readdesc *)data; 563 struct inode *inode = page->mapping->host; 564 struct nfs_page *new; 565 unsigned int len; 566 int error; 567 568 len = nfs_page_length(page); 569 if (len == 0) 570 return nfs_return_empty_page(page); 571 572 new = nfs_create_request(desc->ctx, inode, page, 0, len); 573 if (IS_ERR(new)) 574 goto out_error; 575 576 if (len < PAGE_CACHE_SIZE) 577 zero_user_segment(page, len, PAGE_CACHE_SIZE); 578 if (!nfs_pageio_add_request(desc->pgio, new)) { 579 error = desc->pgio->pg_error; 580 goto out_unlock; 581 } 582 return 0; 583 out_error: 584 error = PTR_ERR(new); 585 SetPageError(page); 586 out_unlock: 587 unlock_page(page); 588 return error; 589 } 590 591 int nfs_readpages(struct file *filp, struct address_space *mapping, 592 struct list_head *pages, unsigned nr_pages) 593 { 594 struct nfs_pageio_descriptor pgio; 595 struct nfs_readdesc desc = { 596 .pgio = &pgio, 597 }; 598 struct inode *inode = mapping->host; 599 struct nfs_server *server = NFS_SERVER(inode); 600 size_t rsize = server->rsize; 601 unsigned long npages; 602 int ret = -ESTALE; 603 604 dprintk("NFS: nfs_readpages (%s/%Ld %d)\n", 605 inode->i_sb->s_id, 606 (long long)NFS_FILEID(inode), 607 nr_pages); 608 nfs_inc_stats(inode, NFSIOS_VFSREADPAGES); 609 610 if (NFS_STALE(inode)) 611 goto out; 612 613 if (filp == NULL) { 614 desc.ctx = nfs_find_open_context(inode, NULL, FMODE_READ); 615 if (desc.ctx == NULL) 616 return -EBADF; 617 } else 618 desc.ctx = get_nfs_open_context(nfs_file_open_context(filp)); 619 620 /* attempt to read as many of the pages as possible from the cache 621 * - this returns -ENOBUFS immediately if the cookie is negative 622 */ 623 ret = nfs_readpages_from_fscache(desc.ctx, inode, mapping, 624 pages, &nr_pages); 625 if (ret == 0) 626 goto read_complete; /* all pages were read */ 627 628 if (rsize < PAGE_CACHE_SIZE) 629 nfs_pageio_init(&pgio, inode, nfs_pagein_multi, rsize, 0); 630 else 631 nfs_pageio_init(&pgio, inode, nfs_pagein_one, rsize, 0); 632 633 ret = read_cache_pages(mapping, pages, readpage_async_filler, &desc); 634 635 nfs_pageio_complete(&pgio); 636 npages = (pgio.pg_bytes_written + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 637 nfs_add_stats(inode, NFSIOS_READPAGES, npages); 638 read_complete: 639 put_nfs_open_context(desc.ctx); 640 out: 641 return ret; 642 } 643 644 int __init nfs_init_readpagecache(void) 645 { 646 nfs_rdata_cachep = kmem_cache_create("nfs_read_data", 647 sizeof(struct nfs_read_data), 648 0, SLAB_HWCACHE_ALIGN, 649 NULL); 650 if (nfs_rdata_cachep == NULL) 651 return -ENOMEM; 652 653 nfs_rdata_mempool = mempool_create_slab_pool(MIN_POOL_READ, 654 nfs_rdata_cachep); 655 if (nfs_rdata_mempool == NULL) 656 return -ENOMEM; 657 658 return 0; 659 } 660 661 void nfs_destroy_readpagecache(void) 662 { 663 mempool_destroy(nfs_rdata_mempool); 664 kmem_cache_destroy(nfs_rdata_cachep); 665 } 666