1 /* 2 * linux/fs/nfs/read.c 3 * 4 * Block I/O for NFS 5 * 6 * Partial copy of Linus' read cache modifications to fs/nfs/file.c 7 * modified for async RPC by okir@monad.swb.de 8 */ 9 10 #include <linux/time.h> 11 #include <linux/kernel.h> 12 #include <linux/errno.h> 13 #include <linux/fcntl.h> 14 #include <linux/stat.h> 15 #include <linux/mm.h> 16 #include <linux/slab.h> 17 #include <linux/pagemap.h> 18 #include <linux/sunrpc/clnt.h> 19 #include <linux/nfs_fs.h> 20 #include <linux/nfs_page.h> 21 #include <linux/module.h> 22 23 #include "nfs4_fs.h" 24 #include "internal.h" 25 #include "iostat.h" 26 #include "fscache.h" 27 #include "pnfs.h" 28 #include "nfstrace.h" 29 30 #define NFSDBG_FACILITY NFSDBG_PAGECACHE 31 32 static const struct nfs_pgio_completion_ops nfs_async_read_completion_ops; 33 static const struct nfs_rw_ops nfs_rw_read_ops; 34 35 static struct kmem_cache *nfs_rdata_cachep; 36 37 static struct nfs_pgio_header *nfs_readhdr_alloc(void) 38 { 39 struct nfs_pgio_header *p = kmem_cache_zalloc(nfs_rdata_cachep, GFP_KERNEL); 40 41 if (p) 42 p->rw_mode = FMODE_READ; 43 return p; 44 } 45 46 static void nfs_readhdr_free(struct nfs_pgio_header *rhdr) 47 { 48 kmem_cache_free(nfs_rdata_cachep, rhdr); 49 } 50 51 static 52 int nfs_return_empty_page(struct page *page) 53 { 54 zero_user(page, 0, PAGE_SIZE); 55 SetPageUptodate(page); 56 unlock_page(page); 57 return 0; 58 } 59 60 void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio, 61 struct inode *inode, bool force_mds, 62 const struct nfs_pgio_completion_ops *compl_ops) 63 { 64 struct nfs_server *server = NFS_SERVER(inode); 65 const struct nfs_pageio_ops *pg_ops = &nfs_pgio_rw_ops; 66 67 #ifdef CONFIG_NFS_V4_1 68 if (server->pnfs_curr_ld && !force_mds) 69 pg_ops = server->pnfs_curr_ld->pg_read_ops; 70 #endif 71 nfs_pageio_init(pgio, inode, pg_ops, compl_ops, &nfs_rw_read_ops, 72 server->rsize, 0); 73 } 74 EXPORT_SYMBOL_GPL(nfs_pageio_init_read); 75 76 void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio) 77 { 78 struct nfs_pgio_mirror *mirror; 79 80 if (pgio->pg_ops && pgio->pg_ops->pg_cleanup) 81 pgio->pg_ops->pg_cleanup(pgio); 82 83 pgio->pg_ops = &nfs_pgio_rw_ops; 84 85 /* read path should never have more than one mirror */ 86 WARN_ON_ONCE(pgio->pg_mirror_count != 1); 87 88 mirror = &pgio->pg_mirrors[0]; 89 mirror->pg_bsize = NFS_SERVER(pgio->pg_inode)->rsize; 90 } 91 EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds); 92 93 static void nfs_readpage_release(struct nfs_page *req) 94 { 95 struct inode *inode = d_inode(nfs_req_openctx(req)->dentry); 96 97 dprintk("NFS: read done (%s/%llu %d@%lld)\n", inode->i_sb->s_id, 98 (unsigned long long)NFS_FILEID(inode), req->wb_bytes, 99 (long long)req_offset(req)); 100 101 if (nfs_page_group_sync_on_bit(req, PG_UNLOCKPAGE)) { 102 if (PageUptodate(req->wb_page)) 103 nfs_readpage_to_fscache(inode, req->wb_page, 0); 104 105 unlock_page(req->wb_page); 106 } 107 nfs_release_request(req); 108 } 109 110 int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode, 111 struct page *page) 112 { 113 struct nfs_page *new; 114 unsigned int len; 115 struct nfs_pageio_descriptor pgio; 116 struct nfs_pgio_mirror *pgm; 117 118 len = nfs_page_length(page); 119 if (len == 0) 120 return nfs_return_empty_page(page); 121 new = nfs_create_request(ctx, page, 0, len); 122 if (IS_ERR(new)) { 123 unlock_page(page); 124 return PTR_ERR(new); 125 } 126 if (len < PAGE_SIZE) 127 zero_user_segment(page, len, PAGE_SIZE); 128 129 nfs_pageio_init_read(&pgio, inode, false, 130 &nfs_async_read_completion_ops); 131 if (!nfs_pageio_add_request(&pgio, new)) { 132 nfs_list_remove_request(new); 133 nfs_readpage_release(new); 134 } 135 nfs_pageio_complete(&pgio); 136 137 /* It doesn't make sense to do mirrored reads! */ 138 WARN_ON_ONCE(pgio.pg_mirror_count != 1); 139 140 pgm = &pgio.pg_mirrors[0]; 141 NFS_I(inode)->read_io += pgm->pg_bytes_written; 142 143 return pgio.pg_error < 0 ? pgio.pg_error : 0; 144 } 145 146 static void nfs_page_group_set_uptodate(struct nfs_page *req) 147 { 148 if (nfs_page_group_sync_on_bit(req, PG_UPTODATE)) 149 SetPageUptodate(req->wb_page); 150 } 151 152 static void nfs_read_completion(struct nfs_pgio_header *hdr) 153 { 154 unsigned long bytes = 0; 155 156 if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) 157 goto out; 158 while (!list_empty(&hdr->pages)) { 159 struct nfs_page *req = nfs_list_entry(hdr->pages.next); 160 struct page *page = req->wb_page; 161 unsigned long start = req->wb_pgbase; 162 unsigned long end = req->wb_pgbase + req->wb_bytes; 163 164 if (test_bit(NFS_IOHDR_EOF, &hdr->flags)) { 165 /* note: regions of the page not covered by a 166 * request are zeroed in nfs_readpage_async / 167 * readpage_async_filler */ 168 if (bytes > hdr->good_bytes) { 169 /* nothing in this request was good, so zero 170 * the full extent of the request */ 171 zero_user_segment(page, start, end); 172 173 } else if (hdr->good_bytes - bytes < req->wb_bytes) { 174 /* part of this request has good bytes, but 175 * not all. zero the bad bytes */ 176 start += hdr->good_bytes - bytes; 177 WARN_ON(start < req->wb_pgbase); 178 zero_user_segment(page, start, end); 179 } 180 } 181 bytes += req->wb_bytes; 182 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) { 183 if (bytes <= hdr->good_bytes) 184 nfs_page_group_set_uptodate(req); 185 } else 186 nfs_page_group_set_uptodate(req); 187 nfs_list_remove_request(req); 188 nfs_readpage_release(req); 189 } 190 out: 191 hdr->release(hdr); 192 } 193 194 static void nfs_initiate_read(struct nfs_pgio_header *hdr, 195 struct rpc_message *msg, 196 const struct nfs_rpc_ops *rpc_ops, 197 struct rpc_task_setup *task_setup_data, int how) 198 { 199 struct inode *inode = hdr->inode; 200 int swap_flags = IS_SWAPFILE(inode) ? NFS_RPC_SWAPFLAGS : 0; 201 202 task_setup_data->flags |= swap_flags; 203 rpc_ops->read_setup(hdr, msg); 204 trace_nfs_initiate_read(inode, hdr->io_start, hdr->good_bytes); 205 } 206 207 static void 208 nfs_async_read_error(struct list_head *head, int error) 209 { 210 struct nfs_page *req; 211 212 while (!list_empty(head)) { 213 req = nfs_list_entry(head->next); 214 nfs_list_remove_request(req); 215 nfs_readpage_release(req); 216 } 217 } 218 219 static const struct nfs_pgio_completion_ops nfs_async_read_completion_ops = { 220 .error_cleanup = nfs_async_read_error, 221 .completion = nfs_read_completion, 222 }; 223 224 /* 225 * This is the callback from RPC telling us whether a reply was 226 * received or some error occurred (timeout or socket shutdown). 227 */ 228 static int nfs_readpage_done(struct rpc_task *task, 229 struct nfs_pgio_header *hdr, 230 struct inode *inode) 231 { 232 int status = NFS_PROTO(inode)->read_done(task, hdr); 233 if (status != 0) 234 return status; 235 236 nfs_add_stats(inode, NFSIOS_SERVERREADBYTES, hdr->res.count); 237 trace_nfs_readpage_done(inode, task->tk_status, 238 hdr->args.offset, hdr->res.eof); 239 240 if (task->tk_status == -ESTALE) { 241 set_bit(NFS_INO_STALE, &NFS_I(inode)->flags); 242 nfs_mark_for_revalidate(inode); 243 } 244 return 0; 245 } 246 247 static void nfs_readpage_retry(struct rpc_task *task, 248 struct nfs_pgio_header *hdr) 249 { 250 struct nfs_pgio_args *argp = &hdr->args; 251 struct nfs_pgio_res *resp = &hdr->res; 252 253 /* This is a short read! */ 254 nfs_inc_stats(hdr->inode, NFSIOS_SHORTREAD); 255 /* Has the server at least made some progress? */ 256 if (resp->count == 0) { 257 nfs_set_pgio_error(hdr, -EIO, argp->offset); 258 return; 259 } 260 261 /* For non rpc-based layout drivers, retry-through-MDS */ 262 if (!task->tk_ops) { 263 hdr->pnfs_error = -EAGAIN; 264 return; 265 } 266 267 /* Yes, so retry the read at the end of the hdr */ 268 hdr->mds_offset += resp->count; 269 argp->offset += resp->count; 270 argp->pgbase += resp->count; 271 argp->count -= resp->count; 272 rpc_restart_call_prepare(task); 273 } 274 275 static void nfs_readpage_result(struct rpc_task *task, 276 struct nfs_pgio_header *hdr) 277 { 278 if (hdr->res.eof) { 279 loff_t pos = hdr->args.offset + hdr->res.count; 280 unsigned int new = pos - hdr->io_start; 281 282 if (hdr->good_bytes > new) { 283 hdr->good_bytes = new; 284 set_bit(NFS_IOHDR_EOF, &hdr->flags); 285 clear_bit(NFS_IOHDR_ERROR, &hdr->flags); 286 } 287 } else if (hdr->res.count < hdr->args.count) 288 nfs_readpage_retry(task, hdr); 289 } 290 291 /* 292 * Read a page over NFS. 293 * We read the page synchronously in the following case: 294 * - The error flag is set for this page. This happens only when a 295 * previous async read operation failed. 296 */ 297 int nfs_readpage(struct file *file, struct page *page) 298 { 299 struct nfs_open_context *ctx; 300 struct inode *inode = page_file_mapping(page)->host; 301 int error; 302 303 dprintk("NFS: nfs_readpage (%p %ld@%lu)\n", 304 page, PAGE_SIZE, page_index(page)); 305 nfs_inc_stats(inode, NFSIOS_VFSREADPAGE); 306 nfs_add_stats(inode, NFSIOS_READPAGES, 1); 307 308 /* 309 * Try to flush any pending writes to the file.. 310 * 311 * NOTE! Because we own the page lock, there cannot 312 * be any new pending writes generated at this point 313 * for this page (other pages can be written to). 314 */ 315 error = nfs_wb_page(inode, page); 316 if (error) 317 goto out_unlock; 318 if (PageUptodate(page)) 319 goto out_unlock; 320 321 error = -ESTALE; 322 if (NFS_STALE(inode)) 323 goto out_unlock; 324 325 if (file == NULL) { 326 error = -EBADF; 327 ctx = nfs_find_open_context(inode, NULL, FMODE_READ); 328 if (ctx == NULL) 329 goto out_unlock; 330 } else 331 ctx = get_nfs_open_context(nfs_file_open_context(file)); 332 333 if (!IS_SYNC(inode)) { 334 error = nfs_readpage_from_fscache(ctx, inode, page); 335 if (error == 0) 336 goto out; 337 } 338 339 error = nfs_readpage_async(ctx, inode, page); 340 341 out: 342 put_nfs_open_context(ctx); 343 return error; 344 out_unlock: 345 unlock_page(page); 346 return error; 347 } 348 349 struct nfs_readdesc { 350 struct nfs_pageio_descriptor *pgio; 351 struct nfs_open_context *ctx; 352 }; 353 354 static int 355 readpage_async_filler(void *data, struct page *page) 356 { 357 struct nfs_readdesc *desc = (struct nfs_readdesc *)data; 358 struct nfs_page *new; 359 unsigned int len; 360 int error; 361 362 len = nfs_page_length(page); 363 if (len == 0) 364 return nfs_return_empty_page(page); 365 366 new = nfs_create_request(desc->ctx, page, 0, len); 367 if (IS_ERR(new)) 368 goto out_error; 369 370 if (len < PAGE_SIZE) 371 zero_user_segment(page, len, PAGE_SIZE); 372 if (!nfs_pageio_add_request(desc->pgio, new)) { 373 nfs_list_remove_request(new); 374 nfs_readpage_release(new); 375 error = desc->pgio->pg_error; 376 goto out; 377 } 378 return 0; 379 out_error: 380 error = PTR_ERR(new); 381 unlock_page(page); 382 out: 383 return error; 384 } 385 386 int nfs_readpages(struct file *filp, struct address_space *mapping, 387 struct list_head *pages, unsigned nr_pages) 388 { 389 struct nfs_pageio_descriptor pgio; 390 struct nfs_pgio_mirror *pgm; 391 struct nfs_readdesc desc = { 392 .pgio = &pgio, 393 }; 394 struct inode *inode = mapping->host; 395 unsigned long npages; 396 int ret = -ESTALE; 397 398 dprintk("NFS: nfs_readpages (%s/%Lu %d)\n", 399 inode->i_sb->s_id, 400 (unsigned long long)NFS_FILEID(inode), 401 nr_pages); 402 nfs_inc_stats(inode, NFSIOS_VFSREADPAGES); 403 404 if (NFS_STALE(inode)) 405 goto out; 406 407 if (filp == NULL) { 408 desc.ctx = nfs_find_open_context(inode, NULL, FMODE_READ); 409 if (desc.ctx == NULL) 410 return -EBADF; 411 } else 412 desc.ctx = get_nfs_open_context(nfs_file_open_context(filp)); 413 414 /* attempt to read as many of the pages as possible from the cache 415 * - this returns -ENOBUFS immediately if the cookie is negative 416 */ 417 ret = nfs_readpages_from_fscache(desc.ctx, inode, mapping, 418 pages, &nr_pages); 419 if (ret == 0) 420 goto read_complete; /* all pages were read */ 421 422 nfs_pageio_init_read(&pgio, inode, false, 423 &nfs_async_read_completion_ops); 424 425 ret = read_cache_pages(mapping, pages, readpage_async_filler, &desc); 426 nfs_pageio_complete(&pgio); 427 428 /* It doesn't make sense to do mirrored reads! */ 429 WARN_ON_ONCE(pgio.pg_mirror_count != 1); 430 431 pgm = &pgio.pg_mirrors[0]; 432 NFS_I(inode)->read_io += pgm->pg_bytes_written; 433 npages = (pgm->pg_bytes_written + PAGE_SIZE - 1) >> 434 PAGE_SHIFT; 435 nfs_add_stats(inode, NFSIOS_READPAGES, npages); 436 read_complete: 437 put_nfs_open_context(desc.ctx); 438 out: 439 return ret; 440 } 441 442 int __init nfs_init_readpagecache(void) 443 { 444 nfs_rdata_cachep = kmem_cache_create("nfs_read_data", 445 sizeof(struct nfs_pgio_header), 446 0, SLAB_HWCACHE_ALIGN, 447 NULL); 448 if (nfs_rdata_cachep == NULL) 449 return -ENOMEM; 450 451 return 0; 452 } 453 454 void nfs_destroy_readpagecache(void) 455 { 456 kmem_cache_destroy(nfs_rdata_cachep); 457 } 458 459 static const struct nfs_rw_ops nfs_rw_read_ops = { 460 .rw_alloc_header = nfs_readhdr_alloc, 461 .rw_free_header = nfs_readhdr_free, 462 .rw_done = nfs_readpage_done, 463 .rw_result = nfs_readpage_result, 464 .rw_initiate = nfs_initiate_read, 465 }; 466