pagelist.c (e655f945cd7ba2a8360ddd5462ca535a3b41e07e) | pagelist.c (743162013d40ca612b4cb53d3a200dff2d9ab26e) |
---|---|
1/* 2 * linux/fs/nfs/pagelist.c 3 * 4 * A set of helper functions for managing NFS read and write requests. 5 * The main purpose of these routines is to provide support for the 6 * coalescing of several requests into a single RPC call. 7 * 8 * Copyright 2000, 2001 (c) Trond Myklebust <trond.myklebust@fys.uio.no> --- 15 unchanged lines hidden (view full) --- 24#include "internal.h" 25#include "pnfs.h" 26 27#define NFSDBG_FACILITY NFSDBG_PAGECACHE 28 29static struct kmem_cache *nfs_page_cachep; 30static const struct rpc_call_ops nfs_pgio_common_ops; 31 | 1/* 2 * linux/fs/nfs/pagelist.c 3 * 4 * A set of helper functions for managing NFS read and write requests. 5 * The main purpose of these routines is to provide support for the 6 * coalescing of several requests into a single RPC call. 7 * 8 * Copyright 2000, 2001 (c) Trond Myklebust <trond.myklebust@fys.uio.no> --- 15 unchanged lines hidden (view full) --- 24#include "internal.h" 25#include "pnfs.h" 26 27#define NFSDBG_FACILITY NFSDBG_PAGECACHE 28 29static struct kmem_cache *nfs_page_cachep; 30static const struct rpc_call_ops nfs_pgio_common_ops; 31 |
32static void nfs_free_request(struct nfs_page *); 33 |
|
32static bool nfs_pgarray_set(struct nfs_page_array *p, unsigned int pagecount) 33{ 34 p->npages = pagecount; 35 if (pagecount <= ARRAY_SIZE(p->page_array)) 36 p->pagevec = p->page_array; 37 else { 38 p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_KERNEL); 39 if (!p->pagevec) --- 91 unchanged lines hidden (view full) --- 131int 132nfs_iocounter_wait(struct nfs_io_counter *c) 133{ 134 if (atomic_read(&c->io_count) == 0) 135 return 0; 136 return __nfs_iocounter_wait(c); 137} 138 | 34static bool nfs_pgarray_set(struct nfs_page_array *p, unsigned int pagecount) 35{ 36 p->npages = pagecount; 37 if (pagecount <= ARRAY_SIZE(p->page_array)) 38 p->pagevec = p->page_array; 39 else { 40 p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_KERNEL); 41 if (!p->pagevec) --- 91 unchanged lines hidden (view full) --- 133int 134nfs_iocounter_wait(struct nfs_io_counter *c) 135{ 136 if (atomic_read(&c->io_count) == 0) 137 return 0; 138 return __nfs_iocounter_wait(c); 139} 140 |
139static int nfs_wait_bit_uninterruptible(void *word) 140{ 141 io_schedule(); 142 return 0; 143} 144 | |
145/* 146 * nfs_page_group_lock - lock the head of the page group 147 * @req - request in group that is to be locked 148 * 149 * this lock must be held if modifying the page group list 150 */ 151void 152nfs_page_group_lock(struct nfs_page *req) 153{ 154 struct nfs_page *head = req->wb_head; 155 156 WARN_ON_ONCE(head != head->wb_head); 157 158 wait_on_bit_lock(&head->wb_flags, PG_HEADLOCK, | 141/* 142 * nfs_page_group_lock - lock the head of the page group 143 * @req - request in group that is to be locked 144 * 145 * this lock must be held if modifying the page group list 146 */ 147void 148nfs_page_group_lock(struct nfs_page *req) 149{ 150 struct nfs_page *head = req->wb_head; 151 152 WARN_ON_ONCE(head != head->wb_head); 153 154 wait_on_bit_lock(&head->wb_flags, PG_HEADLOCK, |
159 nfs_wait_bit_uninterruptible, | |
160 TASK_UNINTERRUPTIBLE); 161} 162 163/* 164 * nfs_page_group_unlock - unlock the head of the page group 165 * @req - request in group that is to be unlocked 166 */ 167void --- 64 unchanged lines hidden (view full) --- 232 * or only request in the group (the head). 233 */ 234static inline void 235nfs_page_group_init(struct nfs_page *req, struct nfs_page *prev) 236{ 237 WARN_ON_ONCE(prev == req); 238 239 if (!prev) { | 155 TASK_UNINTERRUPTIBLE); 156} 157 158/* 159 * nfs_page_group_unlock - unlock the head of the page group 160 * @req - request in group that is to be unlocked 161 */ 162void --- 64 unchanged lines hidden (view full) --- 227 * or only request in the group (the head). 228 */ 229static inline void 230nfs_page_group_init(struct nfs_page *req, struct nfs_page *prev) 231{ 232 WARN_ON_ONCE(prev == req); 233 234 if (!prev) { |
240 /* a head request */ | |
241 req->wb_head = req; 242 req->wb_this_page = req; 243 } else { | 235 req->wb_head = req; 236 req->wb_this_page = req; 237 } else { |
244 /* a subrequest */ | |
245 WARN_ON_ONCE(prev->wb_this_page != prev->wb_head); 246 WARN_ON_ONCE(!test_bit(PG_HEADLOCK, &prev->wb_head->wb_flags)); 247 req->wb_head = prev->wb_head; 248 req->wb_this_page = prev->wb_this_page; 249 prev->wb_this_page = req; 250 | 238 WARN_ON_ONCE(prev->wb_this_page != prev->wb_head); 239 WARN_ON_ONCE(!test_bit(PG_HEADLOCK, &prev->wb_head->wb_flags)); 240 req->wb_head = prev->wb_head; 241 req->wb_this_page = prev->wb_this_page; 242 prev->wb_this_page = req; 243 |
251 /* All subrequests take a ref on the head request until 252 * nfs_page_group_destroy is called */ 253 kref_get(&req->wb_head->wb_kref); 254 | |
255 /* grab extra ref if head request has extra ref from 256 * the write/commit path to handle handoff between write 257 * and commit lists */ | 244 /* grab extra ref if head request has extra ref from 245 * the write/commit path to handle handoff between write 246 * and commit lists */ |
258 if (test_bit(PG_INODE_REF, &prev->wb_head->wb_flags)) { 259 set_bit(PG_INODE_REF, &req->wb_flags); | 247 if (test_bit(PG_INODE_REF, &prev->wb_head->wb_flags)) |
260 kref_get(&req->wb_kref); | 248 kref_get(&req->wb_kref); |
261 } | |
262 } 263} 264 265/* 266 * nfs_page_group_destroy - sync the destruction of page groups 267 * @req - request that no longer needs the page group 268 * 269 * releases the page group reference from each member once all 270 * members have called this function. 271 */ 272static void 273nfs_page_group_destroy(struct kref *kref) 274{ 275 struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref); 276 struct nfs_page *tmp, *next; 277 | 249 } 250} 251 252/* 253 * nfs_page_group_destroy - sync the destruction of page groups 254 * @req - request that no longer needs the page group 255 * 256 * releases the page group reference from each member once all 257 * members have called this function. 258 */ 259static void 260nfs_page_group_destroy(struct kref *kref) 261{ 262 struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref); 263 struct nfs_page *tmp, *next; 264 |
278 /* subrequests must release the ref on the head request */ 279 if (req->wb_head != req) 280 nfs_release_request(req->wb_head); 281 | |
282 if (!nfs_page_group_sync_on_bit(req, PG_TEARDOWN)) 283 return; 284 285 tmp = req; 286 do { 287 next = tmp->wb_this_page; 288 /* unlink and free */ 289 tmp->wb_this_page = tmp; --- 109 unchanged lines hidden (view full) --- 399} 400 401/** 402 * nfs_release_request - Release the count on an NFS read/write request 403 * @req: request to release 404 * 405 * Note: Should never be called with the spinlock held! 406 */ | 265 if (!nfs_page_group_sync_on_bit(req, PG_TEARDOWN)) 266 return; 267 268 tmp = req; 269 do { 270 next = tmp->wb_this_page; 271 /* unlink and free */ 272 tmp->wb_this_page = tmp; --- 109 unchanged lines hidden (view full) --- 382} 383 384/** 385 * nfs_release_request - Release the count on an NFS read/write request 386 * @req: request to release 387 * 388 * Note: Should never be called with the spinlock held! 389 */ |
407void nfs_free_request(struct nfs_page *req) | 390static void nfs_free_request(struct nfs_page *req) |
408{ 409 WARN_ON_ONCE(req->wb_this_page != req); 410 411 /* extra debug: make sure no sync bits are still set */ 412 WARN_ON_ONCE(test_bit(PG_TEARDOWN, &req->wb_flags)); 413 WARN_ON_ONCE(test_bit(PG_UNLOCKPAGE, &req->wb_flags)); 414 WARN_ON_ONCE(test_bit(PG_UPTODATE, &req->wb_flags)); 415 WARN_ON_ONCE(test_bit(PG_WB_END, &req->wb_flags)); --- 14 unchanged lines hidden (view full) --- 430 * @req: request to wait upon. 431 * 432 * Interruptible by fatal signals only. 433 * The user is responsible for holding a count on the request. 434 */ 435int 436nfs_wait_on_request(struct nfs_page *req) 437{ | 391{ 392 WARN_ON_ONCE(req->wb_this_page != req); 393 394 /* extra debug: make sure no sync bits are still set */ 395 WARN_ON_ONCE(test_bit(PG_TEARDOWN, &req->wb_flags)); 396 WARN_ON_ONCE(test_bit(PG_UNLOCKPAGE, &req->wb_flags)); 397 WARN_ON_ONCE(test_bit(PG_UPTODATE, &req->wb_flags)); 398 WARN_ON_ONCE(test_bit(PG_WB_END, &req->wb_flags)); --- 14 unchanged lines hidden (view full) --- 413 * @req: request to wait upon. 414 * 415 * Interruptible by fatal signals only. 416 * The user is responsible for holding a count on the request. 417 */ 418int 419nfs_wait_on_request(struct nfs_page *req) 420{ |
438 return wait_on_bit(&req->wb_flags, PG_BUSY, 439 nfs_wait_bit_uninterruptible, 440 TASK_UNINTERRUPTIBLE); | 421 return wait_on_bit_io(&req->wb_flags, PG_BUSY, 422 TASK_UNINTERRUPTIBLE); |
441} 442 443/* 444 * nfs_generic_pg_test - determine if requests can be coalesced 445 * @desc: pointer to descriptor 446 * @prev: previous request in desc, or NULL 447 * @req: this request 448 * --- 8 unchanged lines hidden (view full) --- 457 WARN_ON_ONCE(1); 458 return 0; 459 } 460 461 return min(desc->pg_bsize - desc->pg_count, (size_t)req->wb_bytes); 462} 463EXPORT_SYMBOL_GPL(nfs_generic_pg_test); 464 | 423} 424 425/* 426 * nfs_generic_pg_test - determine if requests can be coalesced 427 * @desc: pointer to descriptor 428 * @prev: previous request in desc, or NULL 429 * @req: this request 430 * --- 8 unchanged lines hidden (view full) --- 439 WARN_ON_ONCE(1); 440 return 0; 441 } 442 443 return min(desc->pg_bsize - desc->pg_count, (size_t)req->wb_bytes); 444} 445EXPORT_SYMBOL_GPL(nfs_generic_pg_test); 446 |
465struct nfs_pgio_header *nfs_pgio_header_alloc(const struct nfs_rw_ops *ops) | 447static inline struct nfs_rw_header *NFS_RW_HEADER(struct nfs_pgio_header *hdr) |
466{ | 448{ |
467 struct nfs_pgio_header *hdr = ops->rw_alloc_header(); | 449 return container_of(hdr, struct nfs_rw_header, header); 450} |
468 | 451 |
469 if (hdr) { | 452/** 453 * nfs_rw_header_alloc - Allocate a header for a read or write 454 * @ops: Read or write function vector 455 */ 456struct nfs_rw_header *nfs_rw_header_alloc(const struct nfs_rw_ops *ops) 457{ 458 struct nfs_rw_header *header = ops->rw_alloc_header(); 459 460 if (header) { 461 struct nfs_pgio_header *hdr = &header->header; 462 |
470 INIT_LIST_HEAD(&hdr->pages); 471 spin_lock_init(&hdr->lock); | 463 INIT_LIST_HEAD(&hdr->pages); 464 spin_lock_init(&hdr->lock); |
465 atomic_set(&hdr->refcnt, 0); |
|
472 hdr->rw_ops = ops; 473 } | 466 hdr->rw_ops = ops; 467 } |
474 return hdr; | 468 return header; |
475} | 469} |
476EXPORT_SYMBOL_GPL(nfs_pgio_header_alloc); | 470EXPORT_SYMBOL_GPL(nfs_rw_header_alloc); |
477 478/* | 471 472/* |
479 * nfs_pgio_header_free - Free a read or write header | 473 * nfs_rw_header_free - Free a read or write header |
480 * @hdr: The header to free 481 */ | 474 * @hdr: The header to free 475 */ |
482void nfs_pgio_header_free(struct nfs_pgio_header *hdr) | 476void nfs_rw_header_free(struct nfs_pgio_header *hdr) |
483{ | 477{ |
484 hdr->rw_ops->rw_free_header(hdr); | 478 hdr->rw_ops->rw_free_header(NFS_RW_HEADER(hdr)); |
485} | 479} |
486EXPORT_SYMBOL_GPL(nfs_pgio_header_free); | 480EXPORT_SYMBOL_GPL(nfs_rw_header_free); |
487 488/** | 481 482/** |
489 * nfs_pgio_data_destroy - make @hdr suitable for reuse 490 * 491 * Frees memory and releases refs from nfs_generic_pgio, so that it may 492 * be called again. 493 * 494 * @hdr: A header that has had nfs_generic_pgio called | 483 * nfs_pgio_data_alloc - Allocate pageio data 484 * @hdr: The header making a request 485 * @pagecount: Number of pages to create |
495 */ | 486 */ |
496void nfs_pgio_data_destroy(struct nfs_pgio_header *hdr) | 487static struct nfs_pgio_data *nfs_pgio_data_alloc(struct nfs_pgio_header *hdr, 488 unsigned int pagecount) |
497{ | 489{ |
498 put_nfs_open_context(hdr->args.context); 499 if (hdr->page_array.pagevec != hdr->page_array.page_array) 500 kfree(hdr->page_array.pagevec); | 490 struct nfs_pgio_data *data, *prealloc; 491 492 prealloc = &NFS_RW_HEADER(hdr)->rpc_data; 493 if (prealloc->header == NULL) 494 data = prealloc; 495 else 496 data = kzalloc(sizeof(*data), GFP_KERNEL); 497 if (!data) 498 goto out; 499 500 if (nfs_pgarray_set(&data->pages, pagecount)) { 501 data->header = hdr; 502 atomic_inc(&hdr->refcnt); 503 } else { 504 if (data != prealloc) 505 kfree(data); 506 data = NULL; 507 } 508out: 509 return data; |
501} | 510} |
502EXPORT_SYMBOL_GPL(nfs_pgio_data_destroy); | |
503 504/** | 511 512/** |
513 * nfs_pgio_data_release - Properly free pageio data 514 * @data: The data to release 515 */ 516void nfs_pgio_data_release(struct nfs_pgio_data *data) 517{ 518 struct nfs_pgio_header *hdr = data->header; 519 struct nfs_rw_header *pageio_header = NFS_RW_HEADER(hdr); 520 521 put_nfs_open_context(data->args.context); 522 if (data->pages.pagevec != data->pages.page_array) 523 kfree(data->pages.pagevec); 524 if (data == &pageio_header->rpc_data) { 525 data->header = NULL; 526 data = NULL; 527 } 528 if (atomic_dec_and_test(&hdr->refcnt)) 529 hdr->completion_ops->completion(hdr); 530 /* Note: we only free the rpc_task after callbacks are done. 531 * See the comment in rpc_free_task() for why 532 */ 533 kfree(data); 534} 535EXPORT_SYMBOL_GPL(nfs_pgio_data_release); 536 537/** |
|
505 * nfs_pgio_rpcsetup - Set up arguments for a pageio call | 538 * nfs_pgio_rpcsetup - Set up arguments for a pageio call |
506 * @hdr: The pageio hdr | 539 * @data: The pageio data |
507 * @count: Number of bytes to read 508 * @offset: Initial offset 509 * @how: How to commit data (writes only) 510 * @cinfo: Commit information for the call (writes only) 511 */ | 540 * @count: Number of bytes to read 541 * @offset: Initial offset 542 * @how: How to commit data (writes only) 543 * @cinfo: Commit information for the call (writes only) 544 */ |
512static void nfs_pgio_rpcsetup(struct nfs_pgio_header *hdr, | 545static void nfs_pgio_rpcsetup(struct nfs_pgio_data *data, |
513 unsigned int count, unsigned int offset, 514 int how, struct nfs_commit_info *cinfo) 515{ | 546 unsigned int count, unsigned int offset, 547 int how, struct nfs_commit_info *cinfo) 548{ |
516 struct nfs_page *req = hdr->req; | 549 struct nfs_page *req = data->header->req; |
517 518 /* Set up the RPC argument and reply structs | 550 551 /* Set up the RPC argument and reply structs |
519 * NB: take care not to mess about with hdr->commit et al. */ | 552 * NB: take care not to mess about with data->commit et al. */ |
520 | 553 |
521 hdr->args.fh = NFS_FH(hdr->inode); 522 hdr->args.offset = req_offset(req) + offset; | 554 data->args.fh = NFS_FH(data->header->inode); 555 data->args.offset = req_offset(req) + offset; |
523 /* pnfs_set_layoutcommit needs this */ | 556 /* pnfs_set_layoutcommit needs this */ |
524 hdr->mds_offset = hdr->args.offset; 525 hdr->args.pgbase = req->wb_pgbase + offset; 526 hdr->args.pages = hdr->page_array.pagevec; 527 hdr->args.count = count; 528 hdr->args.context = get_nfs_open_context(req->wb_context); 529 hdr->args.lock_context = req->wb_lock_context; 530 hdr->args.stable = NFS_UNSTABLE; | 557 data->mds_offset = data->args.offset; 558 data->args.pgbase = req->wb_pgbase + offset; 559 data->args.pages = data->pages.pagevec; 560 data->args.count = count; 561 data->args.context = get_nfs_open_context(req->wb_context); 562 data->args.lock_context = req->wb_lock_context; 563 data->args.stable = NFS_UNSTABLE; |
531 switch (how & (FLUSH_STABLE | FLUSH_COND_STABLE)) { 532 case 0: 533 break; 534 case FLUSH_COND_STABLE: 535 if (nfs_reqs_to_commit(cinfo)) 536 break; 537 default: | 564 switch (how & (FLUSH_STABLE | FLUSH_COND_STABLE)) { 565 case 0: 566 break; 567 case FLUSH_COND_STABLE: 568 if (nfs_reqs_to_commit(cinfo)) 569 break; 570 default: |
538 hdr->args.stable = NFS_FILE_SYNC; | 571 data->args.stable = NFS_FILE_SYNC; |
539 } 540 | 572 } 573 |
541 hdr->res.fattr = &hdr->fattr; 542 hdr->res.count = count; 543 hdr->res.eof = 0; 544 hdr->res.verf = &hdr->verf; 545 nfs_fattr_init(&hdr->fattr); | 574 data->res.fattr = &data->fattr; 575 data->res.count = count; 576 data->res.eof = 0; 577 data->res.verf = &data->verf; 578 nfs_fattr_init(&data->fattr); |
546} 547 548/** | 579} 580 581/** |
549 * nfs_pgio_prepare - Prepare pageio hdr to go over the wire | 582 * nfs_pgio_prepare - Prepare pageio data to go over the wire |
550 * @task: The current task | 583 * @task: The current task |
551 * @calldata: pageio header to prepare | 584 * @calldata: pageio data to prepare |
552 */ 553static void nfs_pgio_prepare(struct rpc_task *task, void *calldata) 554{ | 585 */ 586static void nfs_pgio_prepare(struct rpc_task *task, void *calldata) 587{ |
555 struct nfs_pgio_header *hdr = calldata; | 588 struct nfs_pgio_data *data = calldata; |
556 int err; | 589 int err; |
557 err = NFS_PROTO(hdr->inode)->pgio_rpc_prepare(task, hdr); | 590 err = NFS_PROTO(data->header->inode)->pgio_rpc_prepare(task, data); |
558 if (err) 559 rpc_exit(task, err); 560} 561 | 591 if (err) 592 rpc_exit(task, err); 593} 594 |
562int nfs_initiate_pgio(struct rpc_clnt *clnt, struct nfs_pgio_header *hdr, | 595int nfs_initiate_pgio(struct rpc_clnt *clnt, struct nfs_pgio_data *data, |
563 const struct rpc_call_ops *call_ops, int how, int flags) 564{ 565 struct rpc_task *task; 566 struct rpc_message msg = { | 596 const struct rpc_call_ops *call_ops, int how, int flags) 597{ 598 struct rpc_task *task; 599 struct rpc_message msg = { |
567 .rpc_argp = &hdr->args, 568 .rpc_resp = &hdr->res, 569 .rpc_cred = hdr->cred, | 600 .rpc_argp = &data->args, 601 .rpc_resp = &data->res, 602 .rpc_cred = data->header->cred, |
570 }; 571 struct rpc_task_setup task_setup_data = { 572 .rpc_client = clnt, | 603 }; 604 struct rpc_task_setup task_setup_data = { 605 .rpc_client = clnt, |
573 .task = &hdr->task, | 606 .task = &data->task, |
574 .rpc_message = &msg, 575 .callback_ops = call_ops, | 607 .rpc_message = &msg, 608 .callback_ops = call_ops, |
576 .callback_data = hdr, | 609 .callback_data = data, |
577 .workqueue = nfsiod_workqueue, 578 .flags = RPC_TASK_ASYNC | flags, 579 }; 580 int ret = 0; 581 | 610 .workqueue = nfsiod_workqueue, 611 .flags = RPC_TASK_ASYNC | flags, 612 }; 613 int ret = 0; 614 |
582 hdr->rw_ops->rw_initiate(hdr, &msg, &task_setup_data, how); | 615 data->header->rw_ops->rw_initiate(data, &msg, &task_setup_data, how); |
583 584 dprintk("NFS: %5u initiated pgio call " 585 "(req %s/%llu, %u bytes @ offset %llu)\n", | 616 617 dprintk("NFS: %5u initiated pgio call " 618 "(req %s/%llu, %u bytes @ offset %llu)\n", |
586 hdr->task.tk_pid, 587 hdr->inode->i_sb->s_id, 588 (unsigned long long)NFS_FILEID(hdr->inode), 589 hdr->args.count, 590 (unsigned long long)hdr->args.offset); | 619 data->task.tk_pid, 620 data->header->inode->i_sb->s_id, 621 (unsigned long long)NFS_FILEID(data->header->inode), 622 data->args.count, 623 (unsigned long long)data->args.offset); |
591 592 task = rpc_run_task(&task_setup_data); 593 if (IS_ERR(task)) { 594 ret = PTR_ERR(task); 595 goto out; 596 } 597 if (how & FLUSH_SYNC) { 598 ret = rpc_wait_for_completion_task(task); --- 10 unchanged lines hidden (view full) --- 609 * nfs_pgio_error - Clean up from a pageio error 610 * @desc: IO descriptor 611 * @hdr: pageio header 612 */ 613static int nfs_pgio_error(struct nfs_pageio_descriptor *desc, 614 struct nfs_pgio_header *hdr) 615{ 616 set_bit(NFS_IOHDR_REDO, &hdr->flags); | 624 625 task = rpc_run_task(&task_setup_data); 626 if (IS_ERR(task)) { 627 ret = PTR_ERR(task); 628 goto out; 629 } 630 if (how & FLUSH_SYNC) { 631 ret = rpc_wait_for_completion_task(task); --- 10 unchanged lines hidden (view full) --- 642 * nfs_pgio_error - Clean up from a pageio error 643 * @desc: IO descriptor 644 * @hdr: pageio header 645 */ 646static int nfs_pgio_error(struct nfs_pageio_descriptor *desc, 647 struct nfs_pgio_header *hdr) 648{ 649 set_bit(NFS_IOHDR_REDO, &hdr->flags); |
617 nfs_pgio_data_destroy(hdr); 618 hdr->completion_ops->completion(hdr); | 650 nfs_pgio_data_release(hdr->data); 651 hdr->data = NULL; |
619 desc->pg_completion_ops->error_cleanup(&desc->pg_list); 620 return -ENOMEM; 621} 622 623/** 624 * nfs_pgio_release - Release pageio data | 652 desc->pg_completion_ops->error_cleanup(&desc->pg_list); 653 return -ENOMEM; 654} 655 656/** 657 * nfs_pgio_release - Release pageio data |
625 * @calldata: The pageio header to release | 658 * @calldata: The pageio data to release |
626 */ 627static void nfs_pgio_release(void *calldata) 628{ | 659 */ 660static void nfs_pgio_release(void *calldata) 661{ |
629 struct nfs_pgio_header *hdr = calldata; 630 if (hdr->rw_ops->rw_release) 631 hdr->rw_ops->rw_release(hdr); 632 nfs_pgio_data_destroy(hdr); 633 hdr->completion_ops->completion(hdr); | 662 struct nfs_pgio_data *data = calldata; 663 if (data->header->rw_ops->rw_release) 664 data->header->rw_ops->rw_release(data); 665 nfs_pgio_data_release(data); |
634} 635 636/** 637 * nfs_pageio_init - initialise a page io descriptor 638 * @desc: pointer to descriptor 639 * @inode: pointer to inode 640 * @doio: pointer to io function 641 * @bsize: io block size --- 24 unchanged lines hidden (view full) --- 666 desc->pg_dreq = NULL; 667 desc->pg_layout_private = NULL; 668} 669EXPORT_SYMBOL_GPL(nfs_pageio_init); 670 671/** 672 * nfs_pgio_result - Basic pageio error handling 673 * @task: The task that ran | 666} 667 668/** 669 * nfs_pageio_init - initialise a page io descriptor 670 * @desc: pointer to descriptor 671 * @inode: pointer to inode 672 * @doio: pointer to io function 673 * @bsize: io block size --- 24 unchanged lines hidden (view full) --- 698 desc->pg_dreq = NULL; 699 desc->pg_layout_private = NULL; 700} 701EXPORT_SYMBOL_GPL(nfs_pageio_init); 702 703/** 704 * nfs_pgio_result - Basic pageio error handling 705 * @task: The task that ran |
674 * @calldata: Pageio header to check | 706 * @calldata: Pageio data to check |
675 */ 676static void nfs_pgio_result(struct rpc_task *task, void *calldata) 677{ | 707 */ 708static void nfs_pgio_result(struct rpc_task *task, void *calldata) 709{ |
678 struct nfs_pgio_header *hdr = calldata; 679 struct inode *inode = hdr->inode; | 710 struct nfs_pgio_data *data = calldata; 711 struct inode *inode = data->header->inode; |
680 681 dprintk("NFS: %s: %5u, (status %d)\n", __func__, 682 task->tk_pid, task->tk_status); 683 | 712 713 dprintk("NFS: %s: %5u, (status %d)\n", __func__, 714 task->tk_pid, task->tk_status); 715 |
684 if (hdr->rw_ops->rw_done(task, hdr, inode) != 0) | 716 if (data->header->rw_ops->rw_done(task, data, inode) != 0) |
685 return; 686 if (task->tk_status < 0) | 717 return; 718 if (task->tk_status < 0) |
687 nfs_set_pgio_error(hdr, task->tk_status, hdr->args.offset); | 719 nfs_set_pgio_error(data->header, task->tk_status, data->args.offset); |
688 else | 720 else |
689 hdr->rw_ops->rw_result(task, hdr); | 721 data->header->rw_ops->rw_result(task, data); |
690} 691 692/* 693 * Create an RPC task for the given read or write request and kick it. 694 * The page must have been locked by the caller. 695 * 696 * It may happen that the page we're passed is not marked dirty. 697 * This is the case if nfs_updatepage detects a conflicting request 698 * that has been written but not committed. 699 */ 700int nfs_generic_pgio(struct nfs_pageio_descriptor *desc, 701 struct nfs_pgio_header *hdr) 702{ 703 struct nfs_page *req; 704 struct page **pages; | 722} 723 724/* 725 * Create an RPC task for the given read or write request and kick it. 726 * The page must have been locked by the caller. 727 * 728 * It may happen that the page we're passed is not marked dirty. 729 * This is the case if nfs_updatepage detects a conflicting request 730 * that has been written but not committed. 731 */ 732int nfs_generic_pgio(struct nfs_pageio_descriptor *desc, 733 struct nfs_pgio_header *hdr) 734{ 735 struct nfs_page *req; 736 struct page **pages; |
737 struct nfs_pgio_data *data; |
|
705 struct list_head *head = &desc->pg_list; 706 struct nfs_commit_info cinfo; | 738 struct list_head *head = &desc->pg_list; 739 struct nfs_commit_info cinfo; |
707 unsigned int pagecount; | |
708 | 740 |
709 pagecount = nfs_page_array_len(desc->pg_base, desc->pg_count); 710 if (!nfs_pgarray_set(&hdr->page_array, pagecount)) | 741 data = nfs_pgio_data_alloc(hdr, nfs_page_array_len(desc->pg_base, 742 desc->pg_count)); 743 if (!data) |
711 return nfs_pgio_error(desc, hdr); 712 713 nfs_init_cinfo(&cinfo, desc->pg_inode, desc->pg_dreq); | 744 return nfs_pgio_error(desc, hdr); 745 746 nfs_init_cinfo(&cinfo, desc->pg_inode, desc->pg_dreq); |
714 pages = hdr->page_array.pagevec; | 747 pages = data->pages.pagevec; |
715 while (!list_empty(head)) { 716 req = nfs_list_entry(head->next); 717 nfs_list_remove_request(req); 718 nfs_list_add_request(req, &hdr->pages); 719 *pages++ = req->wb_page; 720 } 721 722 if ((desc->pg_ioflags & FLUSH_COND_STABLE) && 723 (desc->pg_moreio || nfs_reqs_to_commit(&cinfo))) 724 desc->pg_ioflags &= ~FLUSH_COND_STABLE; 725 726 /* Set up the argument struct */ | 748 while (!list_empty(head)) { 749 req = nfs_list_entry(head->next); 750 nfs_list_remove_request(req); 751 nfs_list_add_request(req, &hdr->pages); 752 *pages++ = req->wb_page; 753 } 754 755 if ((desc->pg_ioflags & FLUSH_COND_STABLE) && 756 (desc->pg_moreio || nfs_reqs_to_commit(&cinfo))) 757 desc->pg_ioflags &= ~FLUSH_COND_STABLE; 758 759 /* Set up the argument struct */ |
727 nfs_pgio_rpcsetup(hdr, desc->pg_count, 0, desc->pg_ioflags, &cinfo); | 760 nfs_pgio_rpcsetup(data, desc->pg_count, 0, desc->pg_ioflags, &cinfo); 761 hdr->data = data; |
728 desc->pg_rpc_callops = &nfs_pgio_common_ops; 729 return 0; 730} 731EXPORT_SYMBOL_GPL(nfs_generic_pgio); 732 733static int nfs_generic_pg_pgios(struct nfs_pageio_descriptor *desc) 734{ | 762 desc->pg_rpc_callops = &nfs_pgio_common_ops; 763 return 0; 764} 765EXPORT_SYMBOL_GPL(nfs_generic_pgio); 766 767static int nfs_generic_pg_pgios(struct nfs_pageio_descriptor *desc) 768{ |
769 struct nfs_rw_header *rw_hdr; |
|
735 struct nfs_pgio_header *hdr; 736 int ret; 737 | 770 struct nfs_pgio_header *hdr; 771 int ret; 772 |
738 hdr = nfs_pgio_header_alloc(desc->pg_rw_ops); 739 if (!hdr) { | 773 rw_hdr = nfs_rw_header_alloc(desc->pg_rw_ops); 774 if (!rw_hdr) { |
740 desc->pg_completion_ops->error_cleanup(&desc->pg_list); 741 return -ENOMEM; 742 } | 775 desc->pg_completion_ops->error_cleanup(&desc->pg_list); 776 return -ENOMEM; 777 } |
743 nfs_pgheader_init(desc, hdr, nfs_pgio_header_free); | 778 hdr = &rw_hdr->header; 779 nfs_pgheader_init(desc, hdr, nfs_rw_header_free); 780 atomic_inc(&hdr->refcnt); |
744 ret = nfs_generic_pgio(desc, hdr); 745 if (ret == 0) 746 ret = nfs_initiate_pgio(NFS_CLIENT(hdr->inode), | 781 ret = nfs_generic_pgio(desc, hdr); 782 if (ret == 0) 783 ret = nfs_initiate_pgio(NFS_CLIENT(hdr->inode), |
747 hdr, desc->pg_rpc_callops, | 784 hdr->data, desc->pg_rpc_callops, |
748 desc->pg_ioflags, 0); | 785 desc->pg_ioflags, 0); |
786 if (atomic_dec_and_test(&hdr->refcnt)) 787 hdr->completion_ops->completion(hdr); |
|
749 return ret; 750} 751 752static bool nfs_match_open_context(const struct nfs_open_context *ctx1, 753 const struct nfs_open_context *ctx2) 754{ 755 return ctx1->cred == ctx2->cred && ctx1->state == ctx2->state; 756} --- 116 unchanged lines hidden (view full) --- 873 WARN_ON_ONCE(subreq->wb_offset != offset); 874 WARN_ON_ONCE(subreq->wb_pgbase != pgbase); 875 876 nfs_page_group_unlock(req); 877 desc->pg_moreio = 1; 878 nfs_pageio_doio(desc); 879 if (desc->pg_error < 0) 880 return 0; | 788 return ret; 789} 790 791static bool nfs_match_open_context(const struct nfs_open_context *ctx1, 792 const struct nfs_open_context *ctx2) 793{ 794 return ctx1->cred == ctx2->cred && ctx1->state == ctx2->state; 795} --- 116 unchanged lines hidden (view full) --- 912 WARN_ON_ONCE(subreq->wb_offset != offset); 913 WARN_ON_ONCE(subreq->wb_pgbase != pgbase); 914 915 nfs_page_group_unlock(req); 916 desc->pg_moreio = 1; 917 nfs_pageio_doio(desc); 918 if (desc->pg_error < 0) 919 return 0; |
920 desc->pg_moreio = 0; |
|
881 if (desc->pg_recoalesce) 882 return 0; 883 /* retry add_request for this subreq */ 884 nfs_page_group_lock(req); 885 continue; 886 } 887 888 /* check for buggy pg_test call(s) */ --- 30 unchanged lines hidden (view full) --- 919 LIST_HEAD(head); 920 921 do { 922 list_splice_init(&desc->pg_list, &head); 923 desc->pg_bytes_written -= desc->pg_count; 924 desc->pg_count = 0; 925 desc->pg_base = 0; 926 desc->pg_recoalesce = 0; | 921 if (desc->pg_recoalesce) 922 return 0; 923 /* retry add_request for this subreq */ 924 nfs_page_group_lock(req); 925 continue; 926 } 927 928 /* check for buggy pg_test call(s) */ --- 30 unchanged lines hidden (view full) --- 959 LIST_HEAD(head); 960 961 do { 962 list_splice_init(&desc->pg_list, &head); 963 desc->pg_bytes_written -= desc->pg_count; 964 desc->pg_count = 0; 965 desc->pg_base = 0; 966 desc->pg_recoalesce = 0; |
927 desc->pg_moreio = 0; | |
928 929 while (!list_empty(&head)) { 930 struct nfs_page *req; 931 932 req = list_first_entry(&head, struct nfs_page, wb_list); 933 nfs_list_remove_request(req); 934 if (__nfs_pageio_add_request(desc, req)) 935 continue; --- 15 unchanged lines hidden (view full) --- 951 if (ret) 952 break; 953 if (desc->pg_error < 0) 954 break; 955 ret = nfs_do_recoalesce(desc); 956 } while (ret); 957 return ret; 958} | 967 968 while (!list_empty(&head)) { 969 struct nfs_page *req; 970 971 req = list_first_entry(&head, struct nfs_page, wb_list); 972 nfs_list_remove_request(req); 973 if (__nfs_pageio_add_request(desc, req)) 974 continue; --- 15 unchanged lines hidden (view full) --- 990 if (ret) 991 break; 992 if (desc->pg_error < 0) 993 break; 994 ret = nfs_do_recoalesce(desc); 995 } while (ret); 996 return ret; 997} |
998EXPORT_SYMBOL_GPL(nfs_pageio_add_request); |
|
959 | 999 |
960/* 961 * nfs_pageio_resend - Transfer requests to new descriptor and resend 962 * @hdr - the pgio header to move request from 963 * @desc - the pageio descriptor to add requests to 964 * 965 * Try to move each request (nfs_page) from @hdr to @desc then attempt 966 * to send them. 967 * 968 * Returns 0 on success and < 0 on error. 969 */ 970int nfs_pageio_resend(struct nfs_pageio_descriptor *desc, 971 struct nfs_pgio_header *hdr) 972{ 973 LIST_HEAD(failed); 974 975 desc->pg_dreq = hdr->dreq; 976 while (!list_empty(&hdr->pages)) { 977 struct nfs_page *req = nfs_list_entry(hdr->pages.next); 978 979 nfs_list_remove_request(req); 980 if (!nfs_pageio_add_request(desc, req)) 981 nfs_list_add_request(req, &failed); 982 } 983 nfs_pageio_complete(desc); 984 if (!list_empty(&failed)) { 985 list_move(&failed, &hdr->pages); 986 return -EIO; 987 } 988 return 0; 989} 990EXPORT_SYMBOL_GPL(nfs_pageio_resend); 991 | |
992/** 993 * nfs_pageio_complete - Complete I/O on an nfs_pageio_descriptor 994 * @desc: pointer to io descriptor 995 */ 996void nfs_pageio_complete(struct nfs_pageio_descriptor *desc) 997{ 998 for (;;) { 999 nfs_pageio_doio(desc); 1000 if (!desc->pg_recoalesce) 1001 break; 1002 if (!nfs_do_recoalesce(desc)) 1003 break; 1004 } 1005} | 1000/** 1001 * nfs_pageio_complete - Complete I/O on an nfs_pageio_descriptor 1002 * @desc: pointer to io descriptor 1003 */ 1004void nfs_pageio_complete(struct nfs_pageio_descriptor *desc) 1005{ 1006 for (;;) { 1007 nfs_pageio_doio(desc); 1008 if (!desc->pg_recoalesce) 1009 break; 1010 if (!nfs_do_recoalesce(desc)) 1011 break; 1012 } 1013} |
1014EXPORT_SYMBOL_GPL(nfs_pageio_complete); |
|
1006 1007/** 1008 * nfs_pageio_cond_complete - Conditional I/O completion 1009 * @desc: pointer to io descriptor 1010 * @index: page index 1011 * 1012 * It is important to ensure that processes don't try to take locks 1013 * on non-contiguous ranges of pages as that might deadlock. This --- 40 unchanged lines hidden --- | 1015 1016/** 1017 * nfs_pageio_cond_complete - Conditional I/O completion 1018 * @desc: pointer to io descriptor 1019 * @index: page index 1020 * 1021 * It is important to ensure that processes don't try to take locks 1022 * on non-contiguous ranges of pages as that might deadlock. This --- 40 unchanged lines hidden --- |