1 /* 2 * linux/fs/nfs/write.c 3 * 4 * Write file data over NFS. 5 * 6 * Copyright (C) 1996, 1997, Olaf Kirch <okir@monad.swb.de> 7 */ 8 9 #include <linux/types.h> 10 #include <linux/slab.h> 11 #include <linux/mm.h> 12 #include <linux/pagemap.h> 13 #include <linux/file.h> 14 #include <linux/writeback.h> 15 #include <linux/swap.h> 16 #include <linux/migrate.h> 17 18 #include <linux/sunrpc/clnt.h> 19 #include <linux/nfs_fs.h> 20 #include <linux/nfs_mount.h> 21 #include <linux/nfs_page.h> 22 #include <linux/backing-dev.h> 23 #include <linux/export.h> 24 #include <linux/freezer.h> 25 #include <linux/wait.h> 26 27 #include <asm/uaccess.h> 28 29 #include "delegation.h" 30 #include "internal.h" 31 #include "iostat.h" 32 #include "nfs4_fs.h" 33 #include "fscache.h" 34 #include "pnfs.h" 35 36 #include "nfstrace.h" 37 38 #define NFSDBG_FACILITY NFSDBG_PAGECACHE 39 40 #define MIN_POOL_WRITE (32) 41 #define MIN_POOL_COMMIT (4) 42 43 /* 44 * Local function declarations 45 */ 46 static void nfs_redirty_request(struct nfs_page *req); 47 static const struct rpc_call_ops nfs_commit_ops; 48 static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops; 49 static const struct nfs_commit_completion_ops nfs_commit_completion_ops; 50 static const struct nfs_rw_ops nfs_rw_write_ops; 51 static void nfs_clear_request_commit(struct nfs_page *req); 52 static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo, 53 struct inode *inode); 54 static struct nfs_page * 55 nfs_page_search_commits_for_head_request_locked(struct nfs_inode *nfsi, 56 struct page *page); 57 58 static struct kmem_cache *nfs_wdata_cachep; 59 static mempool_t *nfs_wdata_mempool; 60 static struct kmem_cache *nfs_cdata_cachep; 61 static mempool_t *nfs_commit_mempool; 62 63 struct nfs_commit_data *nfs_commitdata_alloc(void) 64 { 65 struct nfs_commit_data *p = mempool_alloc(nfs_commit_mempool, GFP_NOIO); 66 67 if (p) { 68 memset(p, 0, sizeof(*p)); 69 INIT_LIST_HEAD(&p->pages); 70 } 71 return p; 72 } 73 EXPORT_SYMBOL_GPL(nfs_commitdata_alloc); 74 75 void nfs_commit_free(struct nfs_commit_data *p) 76 { 77 mempool_free(p, nfs_commit_mempool); 78 } 79 EXPORT_SYMBOL_GPL(nfs_commit_free); 80 81 static struct nfs_pgio_header *nfs_writehdr_alloc(void) 82 { 83 struct nfs_pgio_header *p = mempool_alloc(nfs_wdata_mempool, GFP_NOIO); 84 85 if (p) 86 memset(p, 0, sizeof(*p)); 87 return p; 88 } 89 90 static void nfs_writehdr_free(struct nfs_pgio_header *hdr) 91 { 92 mempool_free(hdr, nfs_wdata_mempool); 93 } 94 95 static void nfs_context_set_write_error(struct nfs_open_context *ctx, int error) 96 { 97 ctx->error = error; 98 smp_wmb(); 99 set_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags); 100 } 101 102 /* 103 * nfs_page_find_head_request_locked - find head request associated with @page 104 * 105 * must be called while holding the inode lock. 106 * 107 * returns matching head request with reference held, or NULL if not found. 108 */ 109 static struct nfs_page * 110 nfs_page_find_head_request_locked(struct nfs_inode *nfsi, struct page *page) 111 { 112 struct nfs_page *req = NULL; 113 114 if (PagePrivate(page)) 115 req = (struct nfs_page *)page_private(page); 116 else if (unlikely(PageSwapCache(page))) 117 req = nfs_page_search_commits_for_head_request_locked(nfsi, 118 page); 119 120 if (req) { 121 WARN_ON_ONCE(req->wb_head != req); 122 kref_get(&req->wb_kref); 123 } 124 125 return req; 126 } 127 128 /* 129 * nfs_page_find_head_request - find head request associated with @page 130 * 131 * returns matching head request with reference held, or NULL if not found. 132 */ 133 static struct nfs_page *nfs_page_find_head_request(struct page *page) 134 { 135 struct inode *inode = page_file_mapping(page)->host; 136 struct nfs_page *req = NULL; 137 138 spin_lock(&inode->i_lock); 139 req = nfs_page_find_head_request_locked(NFS_I(inode), page); 140 spin_unlock(&inode->i_lock); 141 return req; 142 } 143 144 /* Adjust the file length if we're writing beyond the end */ 145 static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int count) 146 { 147 struct inode *inode = page_file_mapping(page)->host; 148 loff_t end, i_size; 149 pgoff_t end_index; 150 151 spin_lock(&inode->i_lock); 152 i_size = i_size_read(inode); 153 end_index = (i_size - 1) >> PAGE_SHIFT; 154 if (i_size > 0 && page_index(page) < end_index) 155 goto out; 156 end = page_file_offset(page) + ((loff_t)offset+count); 157 if (i_size >= end) 158 goto out; 159 i_size_write(inode, end); 160 nfs_inc_stats(inode, NFSIOS_EXTENDWRITE); 161 out: 162 spin_unlock(&inode->i_lock); 163 } 164 165 /* A writeback failed: mark the page as bad, and invalidate the page cache */ 166 static void nfs_set_pageerror(struct page *page) 167 { 168 nfs_zap_mapping(page_file_mapping(page)->host, page_file_mapping(page)); 169 } 170 171 /* 172 * nfs_page_group_search_locked 173 * @head - head request of page group 174 * @page_offset - offset into page 175 * 176 * Search page group with head @head to find a request that contains the 177 * page offset @page_offset. 178 * 179 * Returns a pointer to the first matching nfs request, or NULL if no 180 * match is found. 181 * 182 * Must be called with the page group lock held 183 */ 184 static struct nfs_page * 185 nfs_page_group_search_locked(struct nfs_page *head, unsigned int page_offset) 186 { 187 struct nfs_page *req; 188 189 WARN_ON_ONCE(head != head->wb_head); 190 WARN_ON_ONCE(!test_bit(PG_HEADLOCK, &head->wb_head->wb_flags)); 191 192 req = head; 193 do { 194 if (page_offset >= req->wb_pgbase && 195 page_offset < (req->wb_pgbase + req->wb_bytes)) 196 return req; 197 198 req = req->wb_this_page; 199 } while (req != head); 200 201 return NULL; 202 } 203 204 /* 205 * nfs_page_group_covers_page 206 * @head - head request of page group 207 * 208 * Return true if the page group with head @head covers the whole page, 209 * returns false otherwise 210 */ 211 static bool nfs_page_group_covers_page(struct nfs_page *req) 212 { 213 struct nfs_page *tmp; 214 unsigned int pos = 0; 215 unsigned int len = nfs_page_length(req->wb_page); 216 217 nfs_page_group_lock(req, false); 218 219 do { 220 tmp = nfs_page_group_search_locked(req->wb_head, pos); 221 if (tmp) { 222 /* no way this should happen */ 223 WARN_ON_ONCE(tmp->wb_pgbase != pos); 224 pos += tmp->wb_bytes - (pos - tmp->wb_pgbase); 225 } 226 } while (tmp && pos < len); 227 228 nfs_page_group_unlock(req); 229 WARN_ON_ONCE(pos > len); 230 return pos == len; 231 } 232 233 /* We can set the PG_uptodate flag if we see that a write request 234 * covers the full page. 235 */ 236 static void nfs_mark_uptodate(struct nfs_page *req) 237 { 238 if (PageUptodate(req->wb_page)) 239 return; 240 if (!nfs_page_group_covers_page(req)) 241 return; 242 SetPageUptodate(req->wb_page); 243 } 244 245 static int wb_priority(struct writeback_control *wbc) 246 { 247 int ret = 0; 248 249 if (wbc->sync_mode == WB_SYNC_ALL) 250 ret = FLUSH_COND_STABLE; 251 return ret; 252 } 253 254 /* 255 * NFS congestion control 256 */ 257 258 int nfs_congestion_kb; 259 260 #define NFS_CONGESTION_ON_THRESH (nfs_congestion_kb >> (PAGE_SHIFT-10)) 261 #define NFS_CONGESTION_OFF_THRESH \ 262 (NFS_CONGESTION_ON_THRESH - (NFS_CONGESTION_ON_THRESH >> 2)) 263 264 static void nfs_set_page_writeback(struct page *page) 265 { 266 struct nfs_server *nfss = NFS_SERVER(page_file_mapping(page)->host); 267 int ret = test_set_page_writeback(page); 268 269 WARN_ON_ONCE(ret != 0); 270 271 if (atomic_long_inc_return(&nfss->writeback) > 272 NFS_CONGESTION_ON_THRESH) { 273 set_bdi_congested(&nfss->backing_dev_info, 274 BLK_RW_ASYNC); 275 } 276 } 277 278 static void nfs_end_page_writeback(struct nfs_page *req) 279 { 280 struct inode *inode = page_file_mapping(req->wb_page)->host; 281 struct nfs_server *nfss = NFS_SERVER(inode); 282 283 if (!nfs_page_group_sync_on_bit(req, PG_WB_END)) 284 return; 285 286 end_page_writeback(req->wb_page); 287 if (atomic_long_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH) 288 clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC); 289 } 290 291 292 /* nfs_page_group_clear_bits 293 * @req - an nfs request 294 * clears all page group related bits from @req 295 */ 296 static void 297 nfs_page_group_clear_bits(struct nfs_page *req) 298 { 299 clear_bit(PG_TEARDOWN, &req->wb_flags); 300 clear_bit(PG_UNLOCKPAGE, &req->wb_flags); 301 clear_bit(PG_UPTODATE, &req->wb_flags); 302 clear_bit(PG_WB_END, &req->wb_flags); 303 clear_bit(PG_REMOVE, &req->wb_flags); 304 } 305 306 307 /* 308 * nfs_unroll_locks_and_wait - unlock all newly locked reqs and wait on @req 309 * 310 * this is a helper function for nfs_lock_and_join_requests 311 * 312 * @inode - inode associated with request page group, must be holding inode lock 313 * @head - head request of page group, must be holding head lock 314 * @req - request that couldn't lock and needs to wait on the req bit lock 315 * @nonblock - if true, don't actually wait 316 * 317 * NOTE: this must be called holding page_group bit lock and inode spin lock 318 * and BOTH will be released before returning. 319 * 320 * returns 0 on success, < 0 on error. 321 */ 322 static int 323 nfs_unroll_locks_and_wait(struct inode *inode, struct nfs_page *head, 324 struct nfs_page *req, bool nonblock) 325 __releases(&inode->i_lock) 326 { 327 struct nfs_page *tmp; 328 int ret; 329 330 /* relinquish all the locks successfully grabbed this run */ 331 for (tmp = head ; tmp != req; tmp = tmp->wb_this_page) 332 nfs_unlock_request(tmp); 333 334 WARN_ON_ONCE(test_bit(PG_TEARDOWN, &req->wb_flags)); 335 336 /* grab a ref on the request that will be waited on */ 337 kref_get(&req->wb_kref); 338 339 nfs_page_group_unlock(head); 340 spin_unlock(&inode->i_lock); 341 342 /* release ref from nfs_page_find_head_request_locked */ 343 nfs_release_request(head); 344 345 if (!nonblock) 346 ret = nfs_wait_on_request(req); 347 else 348 ret = -EAGAIN; 349 nfs_release_request(req); 350 351 return ret; 352 } 353 354 /* 355 * nfs_destroy_unlinked_subrequests - destroy recently unlinked subrequests 356 * 357 * @destroy_list - request list (using wb_this_page) terminated by @old_head 358 * @old_head - the old head of the list 359 * 360 * All subrequests must be locked and removed from all lists, so at this point 361 * they are only "active" in this function, and possibly in nfs_wait_on_request 362 * with a reference held by some other context. 363 */ 364 static void 365 nfs_destroy_unlinked_subrequests(struct nfs_page *destroy_list, 366 struct nfs_page *old_head) 367 { 368 while (destroy_list) { 369 struct nfs_page *subreq = destroy_list; 370 371 destroy_list = (subreq->wb_this_page == old_head) ? 372 NULL : subreq->wb_this_page; 373 374 WARN_ON_ONCE(old_head != subreq->wb_head); 375 376 /* make sure old group is not used */ 377 subreq->wb_head = subreq; 378 subreq->wb_this_page = subreq; 379 380 /* subreq is now totally disconnected from page group or any 381 * write / commit lists. last chance to wake any waiters */ 382 nfs_unlock_request(subreq); 383 384 if (!test_bit(PG_TEARDOWN, &subreq->wb_flags)) { 385 /* release ref on old head request */ 386 nfs_release_request(old_head); 387 388 nfs_page_group_clear_bits(subreq); 389 390 /* release the PG_INODE_REF reference */ 391 if (test_and_clear_bit(PG_INODE_REF, &subreq->wb_flags)) 392 nfs_release_request(subreq); 393 else 394 WARN_ON_ONCE(1); 395 } else { 396 WARN_ON_ONCE(test_bit(PG_CLEAN, &subreq->wb_flags)); 397 /* zombie requests have already released the last 398 * reference and were waiting on the rest of the 399 * group to complete. Since it's no longer part of a 400 * group, simply free the request */ 401 nfs_page_group_clear_bits(subreq); 402 nfs_free_request(subreq); 403 } 404 } 405 } 406 407 /* 408 * nfs_lock_and_join_requests - join all subreqs to the head req and return 409 * a locked reference, cancelling any pending 410 * operations for this page. 411 * 412 * @page - the page used to lookup the "page group" of nfs_page structures 413 * @nonblock - if true, don't block waiting for request locks 414 * 415 * This function joins all sub requests to the head request by first 416 * locking all requests in the group, cancelling any pending operations 417 * and finally updating the head request to cover the whole range covered by 418 * the (former) group. All subrequests are removed from any write or commit 419 * lists, unlinked from the group and destroyed. 420 * 421 * Returns a locked, referenced pointer to the head request - which after 422 * this call is guaranteed to be the only request associated with the page. 423 * Returns NULL if no requests are found for @page, or a ERR_PTR if an 424 * error was encountered. 425 */ 426 static struct nfs_page * 427 nfs_lock_and_join_requests(struct page *page, bool nonblock) 428 { 429 struct inode *inode = page_file_mapping(page)->host; 430 struct nfs_page *head, *subreq; 431 struct nfs_page *destroy_list = NULL; 432 unsigned int total_bytes; 433 int ret; 434 435 try_again: 436 total_bytes = 0; 437 438 WARN_ON_ONCE(destroy_list); 439 440 spin_lock(&inode->i_lock); 441 442 /* 443 * A reference is taken only on the head request which acts as a 444 * reference to the whole page group - the group will not be destroyed 445 * until the head reference is released. 446 */ 447 head = nfs_page_find_head_request_locked(NFS_I(inode), page); 448 449 if (!head) { 450 spin_unlock(&inode->i_lock); 451 return NULL; 452 } 453 454 /* holding inode lock, so always make a non-blocking call to try the 455 * page group lock */ 456 ret = nfs_page_group_lock(head, true); 457 if (ret < 0) { 458 spin_unlock(&inode->i_lock); 459 460 if (!nonblock && ret == -EAGAIN) { 461 nfs_page_group_lock_wait(head); 462 nfs_release_request(head); 463 goto try_again; 464 } 465 466 nfs_release_request(head); 467 return ERR_PTR(ret); 468 } 469 470 /* lock each request in the page group */ 471 subreq = head; 472 do { 473 /* 474 * Subrequests are always contiguous, non overlapping 475 * and in order - but may be repeated (mirrored writes). 476 */ 477 if (subreq->wb_offset == (head->wb_offset + total_bytes)) { 478 /* keep track of how many bytes this group covers */ 479 total_bytes += subreq->wb_bytes; 480 } else if (WARN_ON_ONCE(subreq->wb_offset < head->wb_offset || 481 ((subreq->wb_offset + subreq->wb_bytes) > 482 (head->wb_offset + total_bytes)))) { 483 nfs_page_group_unlock(head); 484 spin_unlock(&inode->i_lock); 485 return ERR_PTR(-EIO); 486 } 487 488 if (!nfs_lock_request(subreq)) { 489 /* releases page group bit lock and 490 * inode spin lock and all references */ 491 ret = nfs_unroll_locks_and_wait(inode, head, 492 subreq, nonblock); 493 494 if (ret == 0) 495 goto try_again; 496 497 return ERR_PTR(ret); 498 } 499 500 subreq = subreq->wb_this_page; 501 } while (subreq != head); 502 503 /* Now that all requests are locked, make sure they aren't on any list. 504 * Commit list removal accounting is done after locks are dropped */ 505 subreq = head; 506 do { 507 nfs_clear_request_commit(subreq); 508 subreq = subreq->wb_this_page; 509 } while (subreq != head); 510 511 /* unlink subrequests from head, destroy them later */ 512 if (head->wb_this_page != head) { 513 /* destroy list will be terminated by head */ 514 destroy_list = head->wb_this_page; 515 head->wb_this_page = head; 516 517 /* change head request to cover whole range that 518 * the former page group covered */ 519 head->wb_bytes = total_bytes; 520 } 521 522 /* 523 * prepare head request to be added to new pgio descriptor 524 */ 525 nfs_page_group_clear_bits(head); 526 527 /* 528 * some part of the group was still on the inode list - otherwise 529 * the group wouldn't be involved in async write. 530 * grab a reference for the head request, iff it needs one. 531 */ 532 if (!test_and_set_bit(PG_INODE_REF, &head->wb_flags)) 533 kref_get(&head->wb_kref); 534 535 nfs_page_group_unlock(head); 536 537 /* drop lock to clean uprequests on destroy list */ 538 spin_unlock(&inode->i_lock); 539 540 nfs_destroy_unlinked_subrequests(destroy_list, head); 541 542 /* still holds ref on head from nfs_page_find_head_request_locked 543 * and still has lock on head from lock loop */ 544 return head; 545 } 546 547 static void nfs_write_error_remove_page(struct nfs_page *req) 548 { 549 nfs_unlock_request(req); 550 nfs_end_page_writeback(req); 551 nfs_release_request(req); 552 generic_error_remove_page(page_file_mapping(req->wb_page), 553 req->wb_page); 554 } 555 556 /* 557 * Find an associated nfs write request, and prepare to flush it out 558 * May return an error if the user signalled nfs_wait_on_request(). 559 */ 560 static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio, 561 struct page *page, bool nonblock, 562 bool launder) 563 { 564 struct nfs_page *req; 565 int ret = 0; 566 567 req = nfs_lock_and_join_requests(page, nonblock); 568 if (!req) 569 goto out; 570 ret = PTR_ERR(req); 571 if (IS_ERR(req)) 572 goto out; 573 574 nfs_set_page_writeback(page); 575 WARN_ON_ONCE(test_bit(PG_CLEAN, &req->wb_flags)); 576 577 ret = 0; 578 if (!nfs_pageio_add_request(pgio, req)) { 579 ret = pgio->pg_error; 580 /* 581 * Remove the problematic req upon fatal errors 582 * in launder case, while other dirty pages can 583 * still be around until they get flushed. 584 */ 585 if (nfs_error_is_fatal(ret)) { 586 nfs_context_set_write_error(req->wb_context, ret); 587 if (launder) { 588 nfs_write_error_remove_page(req); 589 goto out; 590 } 591 } 592 nfs_redirty_request(req); 593 ret = -EAGAIN; 594 } else 595 nfs_add_stats(page_file_mapping(page)->host, 596 NFSIOS_WRITEPAGES, 1); 597 out: 598 return ret; 599 } 600 601 static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, 602 struct nfs_pageio_descriptor *pgio, bool launder) 603 { 604 int ret; 605 606 nfs_pageio_cond_complete(pgio, page_index(page)); 607 ret = nfs_page_async_flush(pgio, page, wbc->sync_mode == WB_SYNC_NONE, 608 launder); 609 if (ret == -EAGAIN) { 610 redirty_page_for_writepage(wbc, page); 611 ret = 0; 612 } 613 return ret; 614 } 615 616 /* 617 * Write an mmapped page to the server. 618 */ 619 static int nfs_writepage_locked(struct page *page, 620 struct writeback_control *wbc, 621 bool launder) 622 { 623 struct nfs_pageio_descriptor pgio; 624 struct inode *inode = page_file_mapping(page)->host; 625 int err; 626 627 nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE); 628 nfs_pageio_init_write(&pgio, inode, 0, 629 false, &nfs_async_write_completion_ops); 630 err = nfs_do_writepage(page, wbc, &pgio, launder); 631 nfs_pageio_complete(&pgio); 632 if (err < 0) 633 return err; 634 if (pgio.pg_error < 0) 635 return pgio.pg_error; 636 return 0; 637 } 638 639 int nfs_writepage(struct page *page, struct writeback_control *wbc) 640 { 641 int ret; 642 643 ret = nfs_writepage_locked(page, wbc, false); 644 unlock_page(page); 645 return ret; 646 } 647 648 static int nfs_writepages_callback(struct page *page, struct writeback_control *wbc, void *data) 649 { 650 int ret; 651 652 ret = nfs_do_writepage(page, wbc, data, false); 653 unlock_page(page); 654 return ret; 655 } 656 657 int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc) 658 { 659 struct inode *inode = mapping->host; 660 struct nfs_pageio_descriptor pgio; 661 int err; 662 663 nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES); 664 665 nfs_pageio_init_write(&pgio, inode, wb_priority(wbc), false, 666 &nfs_async_write_completion_ops); 667 err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio); 668 nfs_pageio_complete(&pgio); 669 670 if (err < 0) 671 goto out_err; 672 err = pgio.pg_error; 673 if (err < 0) 674 goto out_err; 675 return 0; 676 out_err: 677 return err; 678 } 679 680 /* 681 * Insert a write request into an inode 682 */ 683 static void nfs_inode_add_request(struct inode *inode, struct nfs_page *req) 684 { 685 struct nfs_inode *nfsi = NFS_I(inode); 686 687 WARN_ON_ONCE(req->wb_this_page != req); 688 689 /* Lock the request! */ 690 nfs_lock_request(req); 691 692 spin_lock(&inode->i_lock); 693 if (!nfsi->nrequests && 694 NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE)) 695 inode->i_version++; 696 /* 697 * Swap-space should not get truncated. Hence no need to plug the race 698 * with invalidate/truncate. 699 */ 700 if (likely(!PageSwapCache(req->wb_page))) { 701 set_bit(PG_MAPPED, &req->wb_flags); 702 SetPagePrivate(req->wb_page); 703 set_page_private(req->wb_page, (unsigned long)req); 704 } 705 nfsi->nrequests++; 706 /* this a head request for a page group - mark it as having an 707 * extra reference so sub groups can follow suit. 708 * This flag also informs pgio layer when to bump nrequests when 709 * adding subrequests. */ 710 WARN_ON(test_and_set_bit(PG_INODE_REF, &req->wb_flags)); 711 kref_get(&req->wb_kref); 712 spin_unlock(&inode->i_lock); 713 } 714 715 /* 716 * Remove a write request from an inode 717 */ 718 static void nfs_inode_remove_request(struct nfs_page *req) 719 { 720 struct inode *inode = d_inode(req->wb_context->dentry); 721 struct nfs_inode *nfsi = NFS_I(inode); 722 struct nfs_page *head; 723 724 if (nfs_page_group_sync_on_bit(req, PG_REMOVE)) { 725 head = req->wb_head; 726 727 spin_lock(&inode->i_lock); 728 if (likely(head->wb_page && !PageSwapCache(head->wb_page))) { 729 set_page_private(head->wb_page, 0); 730 ClearPagePrivate(head->wb_page); 731 smp_mb__after_atomic(); 732 wake_up_page(head->wb_page, PG_private); 733 clear_bit(PG_MAPPED, &head->wb_flags); 734 } 735 nfsi->nrequests--; 736 spin_unlock(&inode->i_lock); 737 } else { 738 spin_lock(&inode->i_lock); 739 nfsi->nrequests--; 740 spin_unlock(&inode->i_lock); 741 } 742 743 if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags)) 744 nfs_release_request(req); 745 } 746 747 static void 748 nfs_mark_request_dirty(struct nfs_page *req) 749 { 750 if (req->wb_page) 751 __set_page_dirty_nobuffers(req->wb_page); 752 } 753 754 /* 755 * nfs_page_search_commits_for_head_request_locked 756 * 757 * Search through commit lists on @inode for the head request for @page. 758 * Must be called while holding the inode (which is cinfo) lock. 759 * 760 * Returns the head request if found, or NULL if not found. 761 */ 762 static struct nfs_page * 763 nfs_page_search_commits_for_head_request_locked(struct nfs_inode *nfsi, 764 struct page *page) 765 { 766 struct nfs_page *freq, *t; 767 struct nfs_commit_info cinfo; 768 struct inode *inode = &nfsi->vfs_inode; 769 770 nfs_init_cinfo_from_inode(&cinfo, inode); 771 772 /* search through pnfs commit lists */ 773 freq = pnfs_search_commit_reqs(inode, &cinfo, page); 774 if (freq) 775 return freq->wb_head; 776 777 /* Linearly search the commit list for the correct request */ 778 list_for_each_entry_safe(freq, t, &cinfo.mds->list, wb_list) { 779 if (freq->wb_page == page) 780 return freq->wb_head; 781 } 782 783 return NULL; 784 } 785 786 /** 787 * nfs_request_add_commit_list_locked - add request to a commit list 788 * @req: pointer to a struct nfs_page 789 * @dst: commit list head 790 * @cinfo: holds list lock and accounting info 791 * 792 * This sets the PG_CLEAN bit, updates the cinfo count of 793 * number of outstanding requests requiring a commit as well as 794 * the MM page stats. 795 * 796 * The caller must hold cinfo->inode->i_lock, and the nfs_page lock. 797 */ 798 void 799 nfs_request_add_commit_list_locked(struct nfs_page *req, struct list_head *dst, 800 struct nfs_commit_info *cinfo) 801 { 802 set_bit(PG_CLEAN, &req->wb_flags); 803 nfs_list_add_request(req, dst); 804 cinfo->mds->ncommit++; 805 } 806 EXPORT_SYMBOL_GPL(nfs_request_add_commit_list_locked); 807 808 /** 809 * nfs_request_add_commit_list - add request to a commit list 810 * @req: pointer to a struct nfs_page 811 * @dst: commit list head 812 * @cinfo: holds list lock and accounting info 813 * 814 * This sets the PG_CLEAN bit, updates the cinfo count of 815 * number of outstanding requests requiring a commit as well as 816 * the MM page stats. 817 * 818 * The caller must _not_ hold the cinfo->lock, but must be 819 * holding the nfs_page lock. 820 */ 821 void 822 nfs_request_add_commit_list(struct nfs_page *req, struct nfs_commit_info *cinfo) 823 { 824 spin_lock(&cinfo->inode->i_lock); 825 nfs_request_add_commit_list_locked(req, &cinfo->mds->list, cinfo); 826 spin_unlock(&cinfo->inode->i_lock); 827 if (req->wb_page) 828 nfs_mark_page_unstable(req->wb_page, cinfo); 829 } 830 EXPORT_SYMBOL_GPL(nfs_request_add_commit_list); 831 832 /** 833 * nfs_request_remove_commit_list - Remove request from a commit list 834 * @req: pointer to a nfs_page 835 * @cinfo: holds list lock and accounting info 836 * 837 * This clears the PG_CLEAN bit, and updates the cinfo's count of 838 * number of outstanding requests requiring a commit 839 * It does not update the MM page stats. 840 * 841 * The caller _must_ hold the cinfo->lock and the nfs_page lock. 842 */ 843 void 844 nfs_request_remove_commit_list(struct nfs_page *req, 845 struct nfs_commit_info *cinfo) 846 { 847 if (!test_and_clear_bit(PG_CLEAN, &(req)->wb_flags)) 848 return; 849 nfs_list_remove_request(req); 850 cinfo->mds->ncommit--; 851 } 852 EXPORT_SYMBOL_GPL(nfs_request_remove_commit_list); 853 854 static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo, 855 struct inode *inode) 856 { 857 cinfo->inode = inode; 858 cinfo->mds = &NFS_I(inode)->commit_info; 859 cinfo->ds = pnfs_get_ds_info(inode); 860 cinfo->dreq = NULL; 861 cinfo->completion_ops = &nfs_commit_completion_ops; 862 } 863 864 void nfs_init_cinfo(struct nfs_commit_info *cinfo, 865 struct inode *inode, 866 struct nfs_direct_req *dreq) 867 { 868 if (dreq) 869 nfs_init_cinfo_from_dreq(cinfo, dreq); 870 else 871 nfs_init_cinfo_from_inode(cinfo, inode); 872 } 873 EXPORT_SYMBOL_GPL(nfs_init_cinfo); 874 875 /* 876 * Add a request to the inode's commit list. 877 */ 878 void 879 nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg, 880 struct nfs_commit_info *cinfo, u32 ds_commit_idx) 881 { 882 if (pnfs_mark_request_commit(req, lseg, cinfo, ds_commit_idx)) 883 return; 884 nfs_request_add_commit_list(req, cinfo); 885 } 886 887 static void 888 nfs_clear_page_commit(struct page *page) 889 { 890 dec_node_page_state(page, NR_UNSTABLE_NFS); 891 dec_wb_stat(&inode_to_bdi(page_file_mapping(page)->host)->wb, 892 WB_RECLAIMABLE); 893 } 894 895 /* Called holding inode (/cinfo) lock */ 896 static void 897 nfs_clear_request_commit(struct nfs_page *req) 898 { 899 if (test_bit(PG_CLEAN, &req->wb_flags)) { 900 struct inode *inode = d_inode(req->wb_context->dentry); 901 struct nfs_commit_info cinfo; 902 903 nfs_init_cinfo_from_inode(&cinfo, inode); 904 if (!pnfs_clear_request_commit(req, &cinfo)) { 905 nfs_request_remove_commit_list(req, &cinfo); 906 } 907 nfs_clear_page_commit(req->wb_page); 908 } 909 } 910 911 int nfs_write_need_commit(struct nfs_pgio_header *hdr) 912 { 913 if (hdr->verf.committed == NFS_DATA_SYNC) 914 return hdr->lseg == NULL; 915 return hdr->verf.committed != NFS_FILE_SYNC; 916 } 917 918 static void nfs_write_completion(struct nfs_pgio_header *hdr) 919 { 920 struct nfs_commit_info cinfo; 921 unsigned long bytes = 0; 922 923 if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) 924 goto out; 925 nfs_init_cinfo_from_inode(&cinfo, hdr->inode); 926 while (!list_empty(&hdr->pages)) { 927 struct nfs_page *req = nfs_list_entry(hdr->pages.next); 928 929 bytes += req->wb_bytes; 930 nfs_list_remove_request(req); 931 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && 932 (hdr->good_bytes < bytes)) { 933 nfs_set_pageerror(req->wb_page); 934 nfs_context_set_write_error(req->wb_context, hdr->error); 935 goto remove_req; 936 } 937 if (nfs_write_need_commit(hdr)) { 938 memcpy(&req->wb_verf, &hdr->verf.verifier, sizeof(req->wb_verf)); 939 nfs_mark_request_commit(req, hdr->lseg, &cinfo, 940 hdr->pgio_mirror_idx); 941 goto next; 942 } 943 remove_req: 944 nfs_inode_remove_request(req); 945 next: 946 nfs_unlock_request(req); 947 nfs_end_page_writeback(req); 948 nfs_release_request(req); 949 } 950 out: 951 hdr->release(hdr); 952 } 953 954 unsigned long 955 nfs_reqs_to_commit(struct nfs_commit_info *cinfo) 956 { 957 return cinfo->mds->ncommit; 958 } 959 960 /* cinfo->inode->i_lock held by caller */ 961 int 962 nfs_scan_commit_list(struct list_head *src, struct list_head *dst, 963 struct nfs_commit_info *cinfo, int max) 964 { 965 struct nfs_page *req, *tmp; 966 int ret = 0; 967 968 list_for_each_entry_safe(req, tmp, src, wb_list) { 969 if (!nfs_lock_request(req)) 970 continue; 971 kref_get(&req->wb_kref); 972 if (cond_resched_lock(&cinfo->inode->i_lock)) 973 list_safe_reset_next(req, tmp, wb_list); 974 nfs_request_remove_commit_list(req, cinfo); 975 nfs_list_add_request(req, dst); 976 ret++; 977 if ((ret == max) && !cinfo->dreq) 978 break; 979 } 980 return ret; 981 } 982 983 /* 984 * nfs_scan_commit - Scan an inode for commit requests 985 * @inode: NFS inode to scan 986 * @dst: mds destination list 987 * @cinfo: mds and ds lists of reqs ready to commit 988 * 989 * Moves requests from the inode's 'commit' request list. 990 * The requests are *not* checked to ensure that they form a contiguous set. 991 */ 992 int 993 nfs_scan_commit(struct inode *inode, struct list_head *dst, 994 struct nfs_commit_info *cinfo) 995 { 996 int ret = 0; 997 998 spin_lock(&cinfo->inode->i_lock); 999 if (cinfo->mds->ncommit > 0) { 1000 const int max = INT_MAX; 1001 1002 ret = nfs_scan_commit_list(&cinfo->mds->list, dst, 1003 cinfo, max); 1004 ret += pnfs_scan_commit_lists(inode, cinfo, max - ret); 1005 } 1006 spin_unlock(&cinfo->inode->i_lock); 1007 return ret; 1008 } 1009 1010 /* 1011 * Search for an existing write request, and attempt to update 1012 * it to reflect a new dirty region on a given page. 1013 * 1014 * If the attempt fails, then the existing request is flushed out 1015 * to disk. 1016 */ 1017 static struct nfs_page *nfs_try_to_update_request(struct inode *inode, 1018 struct page *page, 1019 unsigned int offset, 1020 unsigned int bytes) 1021 { 1022 struct nfs_page *req; 1023 unsigned int rqend; 1024 unsigned int end; 1025 int error; 1026 1027 if (!PagePrivate(page)) 1028 return NULL; 1029 1030 end = offset + bytes; 1031 spin_lock(&inode->i_lock); 1032 1033 for (;;) { 1034 req = nfs_page_find_head_request_locked(NFS_I(inode), page); 1035 if (req == NULL) 1036 goto out_unlock; 1037 1038 /* should be handled by nfs_flush_incompatible */ 1039 WARN_ON_ONCE(req->wb_head != req); 1040 WARN_ON_ONCE(req->wb_this_page != req); 1041 1042 rqend = req->wb_offset + req->wb_bytes; 1043 /* 1044 * Tell the caller to flush out the request if 1045 * the offsets are non-contiguous. 1046 * Note: nfs_flush_incompatible() will already 1047 * have flushed out requests having wrong owners. 1048 */ 1049 if (offset > rqend 1050 || end < req->wb_offset) 1051 goto out_flushme; 1052 1053 if (nfs_lock_request(req)) 1054 break; 1055 1056 /* The request is locked, so wait and then retry */ 1057 spin_unlock(&inode->i_lock); 1058 error = nfs_wait_on_request(req); 1059 nfs_release_request(req); 1060 if (error != 0) 1061 goto out_err; 1062 spin_lock(&inode->i_lock); 1063 } 1064 1065 /* Okay, the request matches. Update the region */ 1066 if (offset < req->wb_offset) { 1067 req->wb_offset = offset; 1068 req->wb_pgbase = offset; 1069 } 1070 if (end > rqend) 1071 req->wb_bytes = end - req->wb_offset; 1072 else 1073 req->wb_bytes = rqend - req->wb_offset; 1074 out_unlock: 1075 if (req) 1076 nfs_clear_request_commit(req); 1077 spin_unlock(&inode->i_lock); 1078 return req; 1079 out_flushme: 1080 spin_unlock(&inode->i_lock); 1081 nfs_release_request(req); 1082 error = nfs_wb_page(inode, page); 1083 out_err: 1084 return ERR_PTR(error); 1085 } 1086 1087 /* 1088 * Try to update an existing write request, or create one if there is none. 1089 * 1090 * Note: Should always be called with the Page Lock held to prevent races 1091 * if we have to add a new request. Also assumes that the caller has 1092 * already called nfs_flush_incompatible() if necessary. 1093 */ 1094 static struct nfs_page * nfs_setup_write_request(struct nfs_open_context* ctx, 1095 struct page *page, unsigned int offset, unsigned int bytes) 1096 { 1097 struct inode *inode = page_file_mapping(page)->host; 1098 struct nfs_page *req; 1099 1100 req = nfs_try_to_update_request(inode, page, offset, bytes); 1101 if (req != NULL) 1102 goto out; 1103 req = nfs_create_request(ctx, page, NULL, offset, bytes); 1104 if (IS_ERR(req)) 1105 goto out; 1106 nfs_inode_add_request(inode, req); 1107 out: 1108 return req; 1109 } 1110 1111 static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page, 1112 unsigned int offset, unsigned int count) 1113 { 1114 struct nfs_page *req; 1115 1116 req = nfs_setup_write_request(ctx, page, offset, count); 1117 if (IS_ERR(req)) 1118 return PTR_ERR(req); 1119 /* Update file length */ 1120 nfs_grow_file(page, offset, count); 1121 nfs_mark_uptodate(req); 1122 nfs_mark_request_dirty(req); 1123 nfs_unlock_and_release_request(req); 1124 return 0; 1125 } 1126 1127 int nfs_flush_incompatible(struct file *file, struct page *page) 1128 { 1129 struct nfs_open_context *ctx = nfs_file_open_context(file); 1130 struct nfs_lock_context *l_ctx; 1131 struct file_lock_context *flctx = file_inode(file)->i_flctx; 1132 struct nfs_page *req; 1133 int do_flush, status; 1134 /* 1135 * Look for a request corresponding to this page. If there 1136 * is one, and it belongs to another file, we flush it out 1137 * before we try to copy anything into the page. Do this 1138 * due to the lack of an ACCESS-type call in NFSv2. 1139 * Also do the same if we find a request from an existing 1140 * dropped page. 1141 */ 1142 do { 1143 req = nfs_page_find_head_request(page); 1144 if (req == NULL) 1145 return 0; 1146 l_ctx = req->wb_lock_context; 1147 do_flush = req->wb_page != page || 1148 !nfs_match_open_context(req->wb_context, ctx); 1149 /* for now, flush if more than 1 request in page_group */ 1150 do_flush |= req->wb_this_page != req; 1151 if (l_ctx && flctx && 1152 !(list_empty_careful(&flctx->flc_posix) && 1153 list_empty_careful(&flctx->flc_flock))) { 1154 do_flush |= l_ctx->lockowner.l_owner != current->files 1155 || l_ctx->lockowner.l_pid != current->tgid; 1156 } 1157 nfs_release_request(req); 1158 if (!do_flush) 1159 return 0; 1160 status = nfs_wb_page(page_file_mapping(page)->host, page); 1161 } while (status == 0); 1162 return status; 1163 } 1164 1165 /* 1166 * Avoid buffered writes when a open context credential's key would 1167 * expire soon. 1168 * 1169 * Returns -EACCES if the key will expire within RPC_KEY_EXPIRE_FAIL. 1170 * 1171 * Return 0 and set a credential flag which triggers the inode to flush 1172 * and performs NFS_FILE_SYNC writes if the key will expired within 1173 * RPC_KEY_EXPIRE_TIMEO. 1174 */ 1175 int 1176 nfs_key_timeout_notify(struct file *filp, struct inode *inode) 1177 { 1178 struct nfs_open_context *ctx = nfs_file_open_context(filp); 1179 struct rpc_auth *auth = NFS_SERVER(inode)->client->cl_auth; 1180 1181 return rpcauth_key_timeout_notify(auth, ctx->cred); 1182 } 1183 1184 /* 1185 * Test if the open context credential key is marked to expire soon. 1186 */ 1187 bool nfs_ctx_key_to_expire(struct nfs_open_context *ctx, struct inode *inode) 1188 { 1189 struct rpc_auth *auth = NFS_SERVER(inode)->client->cl_auth; 1190 1191 return rpcauth_cred_key_to_expire(auth, ctx->cred); 1192 } 1193 1194 /* 1195 * If the page cache is marked as unsafe or invalid, then we can't rely on 1196 * the PageUptodate() flag. In this case, we will need to turn off 1197 * write optimisations that depend on the page contents being correct. 1198 */ 1199 static bool nfs_write_pageuptodate(struct page *page, struct inode *inode) 1200 { 1201 struct nfs_inode *nfsi = NFS_I(inode); 1202 1203 if (nfs_have_delegated_attributes(inode)) 1204 goto out; 1205 if (nfsi->cache_validity & NFS_INO_REVAL_PAGECACHE) 1206 return false; 1207 smp_rmb(); 1208 if (test_bit(NFS_INO_INVALIDATING, &nfsi->flags)) 1209 return false; 1210 out: 1211 if (nfsi->cache_validity & NFS_INO_INVALID_DATA) 1212 return false; 1213 return PageUptodate(page) != 0; 1214 } 1215 1216 static bool 1217 is_whole_file_wrlock(struct file_lock *fl) 1218 { 1219 return fl->fl_start == 0 && fl->fl_end == OFFSET_MAX && 1220 fl->fl_type == F_WRLCK; 1221 } 1222 1223 /* If we know the page is up to date, and we're not using byte range locks (or 1224 * if we have the whole file locked for writing), it may be more efficient to 1225 * extend the write to cover the entire page in order to avoid fragmentation 1226 * inefficiencies. 1227 * 1228 * If the file is opened for synchronous writes then we can just skip the rest 1229 * of the checks. 1230 */ 1231 static int nfs_can_extend_write(struct file *file, struct page *page, struct inode *inode) 1232 { 1233 int ret; 1234 struct file_lock_context *flctx = inode->i_flctx; 1235 struct file_lock *fl; 1236 1237 if (file->f_flags & O_DSYNC) 1238 return 0; 1239 if (!nfs_write_pageuptodate(page, inode)) 1240 return 0; 1241 if (NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE)) 1242 return 1; 1243 if (!flctx || (list_empty_careful(&flctx->flc_flock) && 1244 list_empty_careful(&flctx->flc_posix))) 1245 return 1; 1246 1247 /* Check to see if there are whole file write locks */ 1248 ret = 0; 1249 spin_lock(&flctx->flc_lock); 1250 if (!list_empty(&flctx->flc_posix)) { 1251 fl = list_first_entry(&flctx->flc_posix, struct file_lock, 1252 fl_list); 1253 if (is_whole_file_wrlock(fl)) 1254 ret = 1; 1255 } else if (!list_empty(&flctx->flc_flock)) { 1256 fl = list_first_entry(&flctx->flc_flock, struct file_lock, 1257 fl_list); 1258 if (fl->fl_type == F_WRLCK) 1259 ret = 1; 1260 } 1261 spin_unlock(&flctx->flc_lock); 1262 return ret; 1263 } 1264 1265 /* 1266 * Update and possibly write a cached page of an NFS file. 1267 * 1268 * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad 1269 * things with a page scheduled for an RPC call (e.g. invalidate it). 1270 */ 1271 int nfs_updatepage(struct file *file, struct page *page, 1272 unsigned int offset, unsigned int count) 1273 { 1274 struct nfs_open_context *ctx = nfs_file_open_context(file); 1275 struct inode *inode = page_file_mapping(page)->host; 1276 int status = 0; 1277 1278 nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE); 1279 1280 dprintk("NFS: nfs_updatepage(%pD2 %d@%lld)\n", 1281 file, count, (long long)(page_file_offset(page) + offset)); 1282 1283 if (!count) 1284 goto out; 1285 1286 if (nfs_can_extend_write(file, page, inode)) { 1287 count = max(count + offset, nfs_page_length(page)); 1288 offset = 0; 1289 } 1290 1291 status = nfs_writepage_setup(ctx, page, offset, count); 1292 if (status < 0) 1293 nfs_set_pageerror(page); 1294 else 1295 __set_page_dirty_nobuffers(page); 1296 out: 1297 dprintk("NFS: nfs_updatepage returns %d (isize %lld)\n", 1298 status, (long long)i_size_read(inode)); 1299 return status; 1300 } 1301 1302 static int flush_task_priority(int how) 1303 { 1304 switch (how & (FLUSH_HIGHPRI|FLUSH_LOWPRI)) { 1305 case FLUSH_HIGHPRI: 1306 return RPC_PRIORITY_HIGH; 1307 case FLUSH_LOWPRI: 1308 return RPC_PRIORITY_LOW; 1309 } 1310 return RPC_PRIORITY_NORMAL; 1311 } 1312 1313 static void nfs_initiate_write(struct nfs_pgio_header *hdr, 1314 struct rpc_message *msg, 1315 const struct nfs_rpc_ops *rpc_ops, 1316 struct rpc_task_setup *task_setup_data, int how) 1317 { 1318 int priority = flush_task_priority(how); 1319 1320 task_setup_data->priority = priority; 1321 rpc_ops->write_setup(hdr, msg); 1322 1323 nfs4_state_protect_write(NFS_SERVER(hdr->inode)->nfs_client, 1324 &task_setup_data->rpc_client, msg, hdr); 1325 } 1326 1327 /* If a nfs_flush_* function fails, it should remove reqs from @head and 1328 * call this on each, which will prepare them to be retried on next 1329 * writeback using standard nfs. 1330 */ 1331 static void nfs_redirty_request(struct nfs_page *req) 1332 { 1333 nfs_mark_request_dirty(req); 1334 set_bit(NFS_CONTEXT_RESEND_WRITES, &req->wb_context->flags); 1335 nfs_unlock_request(req); 1336 nfs_end_page_writeback(req); 1337 nfs_release_request(req); 1338 } 1339 1340 static void nfs_async_write_error(struct list_head *head) 1341 { 1342 struct nfs_page *req; 1343 1344 while (!list_empty(head)) { 1345 req = nfs_list_entry(head->next); 1346 nfs_list_remove_request(req); 1347 nfs_redirty_request(req); 1348 } 1349 } 1350 1351 static void nfs_async_write_reschedule_io(struct nfs_pgio_header *hdr) 1352 { 1353 nfs_async_write_error(&hdr->pages); 1354 } 1355 1356 static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops = { 1357 .error_cleanup = nfs_async_write_error, 1358 .completion = nfs_write_completion, 1359 .reschedule_io = nfs_async_write_reschedule_io, 1360 }; 1361 1362 void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio, 1363 struct inode *inode, int ioflags, bool force_mds, 1364 const struct nfs_pgio_completion_ops *compl_ops) 1365 { 1366 struct nfs_server *server = NFS_SERVER(inode); 1367 const struct nfs_pageio_ops *pg_ops = &nfs_pgio_rw_ops; 1368 1369 #ifdef CONFIG_NFS_V4_1 1370 if (server->pnfs_curr_ld && !force_mds) 1371 pg_ops = server->pnfs_curr_ld->pg_write_ops; 1372 #endif 1373 nfs_pageio_init(pgio, inode, pg_ops, compl_ops, &nfs_rw_write_ops, 1374 server->wsize, ioflags); 1375 } 1376 EXPORT_SYMBOL_GPL(nfs_pageio_init_write); 1377 1378 void nfs_pageio_reset_write_mds(struct nfs_pageio_descriptor *pgio) 1379 { 1380 struct nfs_pgio_mirror *mirror; 1381 1382 if (pgio->pg_ops && pgio->pg_ops->pg_cleanup) 1383 pgio->pg_ops->pg_cleanup(pgio); 1384 1385 pgio->pg_ops = &nfs_pgio_rw_ops; 1386 1387 nfs_pageio_stop_mirroring(pgio); 1388 1389 mirror = &pgio->pg_mirrors[0]; 1390 mirror->pg_bsize = NFS_SERVER(pgio->pg_inode)->wsize; 1391 } 1392 EXPORT_SYMBOL_GPL(nfs_pageio_reset_write_mds); 1393 1394 1395 void nfs_commit_prepare(struct rpc_task *task, void *calldata) 1396 { 1397 struct nfs_commit_data *data = calldata; 1398 1399 NFS_PROTO(data->inode)->commit_rpc_prepare(task, data); 1400 } 1401 1402 /* 1403 * Special version of should_remove_suid() that ignores capabilities. 1404 */ 1405 static int nfs_should_remove_suid(const struct inode *inode) 1406 { 1407 umode_t mode = inode->i_mode; 1408 int kill = 0; 1409 1410 /* suid always must be killed */ 1411 if (unlikely(mode & S_ISUID)) 1412 kill = ATTR_KILL_SUID; 1413 1414 /* 1415 * sgid without any exec bits is just a mandatory locking mark; leave 1416 * it alone. If some exec bits are set, it's a real sgid; kill it. 1417 */ 1418 if (unlikely((mode & S_ISGID) && (mode & S_IXGRP))) 1419 kill |= ATTR_KILL_SGID; 1420 1421 if (unlikely(kill && S_ISREG(mode))) 1422 return kill; 1423 1424 return 0; 1425 } 1426 1427 static void nfs_writeback_check_extend(struct nfs_pgio_header *hdr, 1428 struct nfs_fattr *fattr) 1429 { 1430 struct nfs_pgio_args *argp = &hdr->args; 1431 struct nfs_pgio_res *resp = &hdr->res; 1432 u64 size = argp->offset + resp->count; 1433 1434 if (!(fattr->valid & NFS_ATTR_FATTR_SIZE)) 1435 fattr->size = size; 1436 if (nfs_size_to_loff_t(fattr->size) < i_size_read(hdr->inode)) { 1437 fattr->valid &= ~NFS_ATTR_FATTR_SIZE; 1438 return; 1439 } 1440 if (size != fattr->size) 1441 return; 1442 /* Set attribute barrier */ 1443 nfs_fattr_set_barrier(fattr); 1444 /* ...and update size */ 1445 fattr->valid |= NFS_ATTR_FATTR_SIZE; 1446 } 1447 1448 void nfs_writeback_update_inode(struct nfs_pgio_header *hdr) 1449 { 1450 struct nfs_fattr *fattr = &hdr->fattr; 1451 struct inode *inode = hdr->inode; 1452 1453 spin_lock(&inode->i_lock); 1454 nfs_writeback_check_extend(hdr, fattr); 1455 nfs_post_op_update_inode_force_wcc_locked(inode, fattr); 1456 spin_unlock(&inode->i_lock); 1457 } 1458 EXPORT_SYMBOL_GPL(nfs_writeback_update_inode); 1459 1460 /* 1461 * This function is called when the WRITE call is complete. 1462 */ 1463 static int nfs_writeback_done(struct rpc_task *task, 1464 struct nfs_pgio_header *hdr, 1465 struct inode *inode) 1466 { 1467 int status; 1468 1469 /* 1470 * ->write_done will attempt to use post-op attributes to detect 1471 * conflicting writes by other clients. A strict interpretation 1472 * of close-to-open would allow us to continue caching even if 1473 * another writer had changed the file, but some applications 1474 * depend on tighter cache coherency when writing. 1475 */ 1476 status = NFS_PROTO(inode)->write_done(task, hdr); 1477 if (status != 0) 1478 return status; 1479 nfs_add_stats(inode, NFSIOS_SERVERWRITTENBYTES, hdr->res.count); 1480 1481 if (hdr->res.verf->committed < hdr->args.stable && 1482 task->tk_status >= 0) { 1483 /* We tried a write call, but the server did not 1484 * commit data to stable storage even though we 1485 * requested it. 1486 * Note: There is a known bug in Tru64 < 5.0 in which 1487 * the server reports NFS_DATA_SYNC, but performs 1488 * NFS_FILE_SYNC. We therefore implement this checking 1489 * as a dprintk() in order to avoid filling syslog. 1490 */ 1491 static unsigned long complain; 1492 1493 /* Note this will print the MDS for a DS write */ 1494 if (time_before(complain, jiffies)) { 1495 dprintk("NFS: faulty NFS server %s:" 1496 " (committed = %d) != (stable = %d)\n", 1497 NFS_SERVER(inode)->nfs_client->cl_hostname, 1498 hdr->res.verf->committed, hdr->args.stable); 1499 complain = jiffies + 300 * HZ; 1500 } 1501 } 1502 1503 /* Deal with the suid/sgid bit corner case */ 1504 if (nfs_should_remove_suid(inode)) 1505 nfs_mark_for_revalidate(inode); 1506 return 0; 1507 } 1508 1509 /* 1510 * This function is called when the WRITE call is complete. 1511 */ 1512 static void nfs_writeback_result(struct rpc_task *task, 1513 struct nfs_pgio_header *hdr) 1514 { 1515 struct nfs_pgio_args *argp = &hdr->args; 1516 struct nfs_pgio_res *resp = &hdr->res; 1517 1518 if (resp->count < argp->count) { 1519 static unsigned long complain; 1520 1521 /* This a short write! */ 1522 nfs_inc_stats(hdr->inode, NFSIOS_SHORTWRITE); 1523 1524 /* Has the server at least made some progress? */ 1525 if (resp->count == 0) { 1526 if (time_before(complain, jiffies)) { 1527 printk(KERN_WARNING 1528 "NFS: Server wrote zero bytes, expected %u.\n", 1529 argp->count); 1530 complain = jiffies + 300 * HZ; 1531 } 1532 nfs_set_pgio_error(hdr, -EIO, argp->offset); 1533 task->tk_status = -EIO; 1534 return; 1535 } 1536 1537 /* For non rpc-based layout drivers, retry-through-MDS */ 1538 if (!task->tk_ops) { 1539 hdr->pnfs_error = -EAGAIN; 1540 return; 1541 } 1542 1543 /* Was this an NFSv2 write or an NFSv3 stable write? */ 1544 if (resp->verf->committed != NFS_UNSTABLE) { 1545 /* Resend from where the server left off */ 1546 hdr->mds_offset += resp->count; 1547 argp->offset += resp->count; 1548 argp->pgbase += resp->count; 1549 argp->count -= resp->count; 1550 } else { 1551 /* Resend as a stable write in order to avoid 1552 * headaches in the case of a server crash. 1553 */ 1554 argp->stable = NFS_FILE_SYNC; 1555 } 1556 rpc_restart_call_prepare(task); 1557 } 1558 } 1559 1560 static int wait_on_commit(struct nfs_mds_commit_info *cinfo) 1561 { 1562 return wait_on_atomic_t(&cinfo->rpcs_out, 1563 nfs_wait_atomic_killable, TASK_KILLABLE); 1564 } 1565 1566 static void nfs_commit_begin(struct nfs_mds_commit_info *cinfo) 1567 { 1568 atomic_inc(&cinfo->rpcs_out); 1569 } 1570 1571 static void nfs_commit_end(struct nfs_mds_commit_info *cinfo) 1572 { 1573 if (atomic_dec_and_test(&cinfo->rpcs_out)) 1574 wake_up_atomic_t(&cinfo->rpcs_out); 1575 } 1576 1577 void nfs_commitdata_release(struct nfs_commit_data *data) 1578 { 1579 put_nfs_open_context(data->context); 1580 nfs_commit_free(data); 1581 } 1582 EXPORT_SYMBOL_GPL(nfs_commitdata_release); 1583 1584 int nfs_initiate_commit(struct rpc_clnt *clnt, struct nfs_commit_data *data, 1585 const struct nfs_rpc_ops *nfs_ops, 1586 const struct rpc_call_ops *call_ops, 1587 int how, int flags) 1588 { 1589 struct rpc_task *task; 1590 int priority = flush_task_priority(how); 1591 struct rpc_message msg = { 1592 .rpc_argp = &data->args, 1593 .rpc_resp = &data->res, 1594 .rpc_cred = data->cred, 1595 }; 1596 struct rpc_task_setup task_setup_data = { 1597 .task = &data->task, 1598 .rpc_client = clnt, 1599 .rpc_message = &msg, 1600 .callback_ops = call_ops, 1601 .callback_data = data, 1602 .workqueue = nfsiod_workqueue, 1603 .flags = RPC_TASK_ASYNC | flags, 1604 .priority = priority, 1605 }; 1606 /* Set up the initial task struct. */ 1607 nfs_ops->commit_setup(data, &msg); 1608 1609 dprintk("NFS: initiated commit call\n"); 1610 1611 nfs4_state_protect(NFS_SERVER(data->inode)->nfs_client, 1612 NFS_SP4_MACH_CRED_COMMIT, &task_setup_data.rpc_client, &msg); 1613 1614 task = rpc_run_task(&task_setup_data); 1615 if (IS_ERR(task)) 1616 return PTR_ERR(task); 1617 if (how & FLUSH_SYNC) 1618 rpc_wait_for_completion_task(task); 1619 rpc_put_task(task); 1620 return 0; 1621 } 1622 EXPORT_SYMBOL_GPL(nfs_initiate_commit); 1623 1624 static loff_t nfs_get_lwb(struct list_head *head) 1625 { 1626 loff_t lwb = 0; 1627 struct nfs_page *req; 1628 1629 list_for_each_entry(req, head, wb_list) 1630 if (lwb < (req_offset(req) + req->wb_bytes)) 1631 lwb = req_offset(req) + req->wb_bytes; 1632 1633 return lwb; 1634 } 1635 1636 /* 1637 * Set up the argument/result storage required for the RPC call. 1638 */ 1639 void nfs_init_commit(struct nfs_commit_data *data, 1640 struct list_head *head, 1641 struct pnfs_layout_segment *lseg, 1642 struct nfs_commit_info *cinfo) 1643 { 1644 struct nfs_page *first = nfs_list_entry(head->next); 1645 struct inode *inode = d_inode(first->wb_context->dentry); 1646 1647 /* Set up the RPC argument and reply structs 1648 * NB: take care not to mess about with data->commit et al. */ 1649 1650 list_splice_init(head, &data->pages); 1651 1652 data->inode = inode; 1653 data->cred = first->wb_context->cred; 1654 data->lseg = lseg; /* reference transferred */ 1655 /* only set lwb for pnfs commit */ 1656 if (lseg) 1657 data->lwb = nfs_get_lwb(&data->pages); 1658 data->mds_ops = &nfs_commit_ops; 1659 data->completion_ops = cinfo->completion_ops; 1660 data->dreq = cinfo->dreq; 1661 1662 data->args.fh = NFS_FH(data->inode); 1663 /* Note: we always request a commit of the entire inode */ 1664 data->args.offset = 0; 1665 data->args.count = 0; 1666 data->context = get_nfs_open_context(first->wb_context); 1667 data->res.fattr = &data->fattr; 1668 data->res.verf = &data->verf; 1669 nfs_fattr_init(&data->fattr); 1670 } 1671 EXPORT_SYMBOL_GPL(nfs_init_commit); 1672 1673 void nfs_retry_commit(struct list_head *page_list, 1674 struct pnfs_layout_segment *lseg, 1675 struct nfs_commit_info *cinfo, 1676 u32 ds_commit_idx) 1677 { 1678 struct nfs_page *req; 1679 1680 while (!list_empty(page_list)) { 1681 req = nfs_list_entry(page_list->next); 1682 nfs_list_remove_request(req); 1683 nfs_mark_request_commit(req, lseg, cinfo, ds_commit_idx); 1684 if (!cinfo->dreq) 1685 nfs_clear_page_commit(req->wb_page); 1686 nfs_unlock_and_release_request(req); 1687 } 1688 } 1689 EXPORT_SYMBOL_GPL(nfs_retry_commit); 1690 1691 static void 1692 nfs_commit_resched_write(struct nfs_commit_info *cinfo, 1693 struct nfs_page *req) 1694 { 1695 __set_page_dirty_nobuffers(req->wb_page); 1696 } 1697 1698 /* 1699 * Commit dirty pages 1700 */ 1701 static int 1702 nfs_commit_list(struct inode *inode, struct list_head *head, int how, 1703 struct nfs_commit_info *cinfo) 1704 { 1705 struct nfs_commit_data *data; 1706 1707 /* another commit raced with us */ 1708 if (list_empty(head)) 1709 return 0; 1710 1711 data = nfs_commitdata_alloc(); 1712 1713 if (!data) 1714 goto out_bad; 1715 1716 /* Set up the argument struct */ 1717 nfs_init_commit(data, head, NULL, cinfo); 1718 atomic_inc(&cinfo->mds->rpcs_out); 1719 return nfs_initiate_commit(NFS_CLIENT(inode), data, NFS_PROTO(inode), 1720 data->mds_ops, how, 0); 1721 out_bad: 1722 nfs_retry_commit(head, NULL, cinfo, 0); 1723 return -ENOMEM; 1724 } 1725 1726 int nfs_commit_file(struct file *file, struct nfs_write_verifier *verf) 1727 { 1728 struct inode *inode = file_inode(file); 1729 struct nfs_open_context *open; 1730 struct nfs_commit_info cinfo; 1731 struct nfs_page *req; 1732 int ret; 1733 1734 open = get_nfs_open_context(nfs_file_open_context(file)); 1735 req = nfs_create_request(open, NULL, NULL, 0, i_size_read(inode)); 1736 if (IS_ERR(req)) { 1737 ret = PTR_ERR(req); 1738 goto out_put; 1739 } 1740 1741 nfs_init_cinfo_from_inode(&cinfo, inode); 1742 1743 memcpy(&req->wb_verf, verf, sizeof(struct nfs_write_verifier)); 1744 nfs_request_add_commit_list(req, &cinfo); 1745 ret = nfs_commit_inode(inode, FLUSH_SYNC); 1746 if (ret > 0) 1747 ret = 0; 1748 1749 nfs_free_request(req); 1750 out_put: 1751 put_nfs_open_context(open); 1752 return ret; 1753 } 1754 EXPORT_SYMBOL_GPL(nfs_commit_file); 1755 1756 /* 1757 * COMMIT call returned 1758 */ 1759 static void nfs_commit_done(struct rpc_task *task, void *calldata) 1760 { 1761 struct nfs_commit_data *data = calldata; 1762 1763 dprintk("NFS: %5u nfs_commit_done (status %d)\n", 1764 task->tk_pid, task->tk_status); 1765 1766 /* Call the NFS version-specific code */ 1767 NFS_PROTO(data->inode)->commit_done(task, data); 1768 } 1769 1770 static void nfs_commit_release_pages(struct nfs_commit_data *data) 1771 { 1772 struct nfs_page *req; 1773 int status = data->task.tk_status; 1774 struct nfs_commit_info cinfo; 1775 struct nfs_server *nfss; 1776 1777 while (!list_empty(&data->pages)) { 1778 req = nfs_list_entry(data->pages.next); 1779 nfs_list_remove_request(req); 1780 if (req->wb_page) 1781 nfs_clear_page_commit(req->wb_page); 1782 1783 dprintk("NFS: commit (%s/%llu %d@%lld)", 1784 req->wb_context->dentry->d_sb->s_id, 1785 (unsigned long long)NFS_FILEID(d_inode(req->wb_context->dentry)), 1786 req->wb_bytes, 1787 (long long)req_offset(req)); 1788 if (status < 0) { 1789 nfs_context_set_write_error(req->wb_context, status); 1790 nfs_inode_remove_request(req); 1791 dprintk(", error = %d\n", status); 1792 goto next; 1793 } 1794 1795 /* Okay, COMMIT succeeded, apparently. Check the verifier 1796 * returned by the server against all stored verfs. */ 1797 if (!nfs_write_verifier_cmp(&req->wb_verf, &data->verf.verifier)) { 1798 /* We have a match */ 1799 nfs_inode_remove_request(req); 1800 dprintk(" OK\n"); 1801 goto next; 1802 } 1803 /* We have a mismatch. Write the page again */ 1804 dprintk(" mismatch\n"); 1805 nfs_mark_request_dirty(req); 1806 set_bit(NFS_CONTEXT_RESEND_WRITES, &req->wb_context->flags); 1807 next: 1808 nfs_unlock_and_release_request(req); 1809 } 1810 nfss = NFS_SERVER(data->inode); 1811 if (atomic_long_read(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH) 1812 clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC); 1813 1814 nfs_init_cinfo(&cinfo, data->inode, data->dreq); 1815 nfs_commit_end(cinfo.mds); 1816 } 1817 1818 static void nfs_commit_release(void *calldata) 1819 { 1820 struct nfs_commit_data *data = calldata; 1821 1822 data->completion_ops->completion(data); 1823 nfs_commitdata_release(calldata); 1824 } 1825 1826 static const struct rpc_call_ops nfs_commit_ops = { 1827 .rpc_call_prepare = nfs_commit_prepare, 1828 .rpc_call_done = nfs_commit_done, 1829 .rpc_release = nfs_commit_release, 1830 }; 1831 1832 static const struct nfs_commit_completion_ops nfs_commit_completion_ops = { 1833 .completion = nfs_commit_release_pages, 1834 .resched_write = nfs_commit_resched_write, 1835 }; 1836 1837 int nfs_generic_commit_list(struct inode *inode, struct list_head *head, 1838 int how, struct nfs_commit_info *cinfo) 1839 { 1840 int status; 1841 1842 status = pnfs_commit_list(inode, head, how, cinfo); 1843 if (status == PNFS_NOT_ATTEMPTED) 1844 status = nfs_commit_list(inode, head, how, cinfo); 1845 return status; 1846 } 1847 1848 int nfs_commit_inode(struct inode *inode, int how) 1849 { 1850 LIST_HEAD(head); 1851 struct nfs_commit_info cinfo; 1852 int may_wait = how & FLUSH_SYNC; 1853 int error = 0; 1854 int res; 1855 1856 nfs_init_cinfo_from_inode(&cinfo, inode); 1857 nfs_commit_begin(cinfo.mds); 1858 res = nfs_scan_commit(inode, &head, &cinfo); 1859 if (res) 1860 error = nfs_generic_commit_list(inode, &head, how, &cinfo); 1861 nfs_commit_end(cinfo.mds); 1862 if (error < 0) 1863 goto out_error; 1864 if (!may_wait) 1865 goto out_mark_dirty; 1866 error = wait_on_commit(cinfo.mds); 1867 if (error < 0) 1868 return error; 1869 return res; 1870 out_error: 1871 res = error; 1872 /* Note: If we exit without ensuring that the commit is complete, 1873 * we must mark the inode as dirty. Otherwise, future calls to 1874 * sync_inode() with the WB_SYNC_ALL flag set will fail to ensure 1875 * that the data is on the disk. 1876 */ 1877 out_mark_dirty: 1878 __mark_inode_dirty(inode, I_DIRTY_DATASYNC); 1879 return res; 1880 } 1881 EXPORT_SYMBOL_GPL(nfs_commit_inode); 1882 1883 int nfs_write_inode(struct inode *inode, struct writeback_control *wbc) 1884 { 1885 struct nfs_inode *nfsi = NFS_I(inode); 1886 int flags = FLUSH_SYNC; 1887 int ret = 0; 1888 1889 /* no commits means nothing needs to be done */ 1890 if (!nfsi->commit_info.ncommit) 1891 return ret; 1892 1893 if (wbc->sync_mode == WB_SYNC_NONE) { 1894 /* Don't commit yet if this is a non-blocking flush and there 1895 * are a lot of outstanding writes for this mapping. 1896 */ 1897 if (nfsi->commit_info.ncommit <= (nfsi->nrequests >> 1)) 1898 goto out_mark_dirty; 1899 1900 /* don't wait for the COMMIT response */ 1901 flags = 0; 1902 } 1903 1904 ret = nfs_commit_inode(inode, flags); 1905 if (ret >= 0) { 1906 if (wbc->sync_mode == WB_SYNC_NONE) { 1907 if (ret < wbc->nr_to_write) 1908 wbc->nr_to_write -= ret; 1909 else 1910 wbc->nr_to_write = 0; 1911 } 1912 return 0; 1913 } 1914 out_mark_dirty: 1915 __mark_inode_dirty(inode, I_DIRTY_DATASYNC); 1916 return ret; 1917 } 1918 EXPORT_SYMBOL_GPL(nfs_write_inode); 1919 1920 /* 1921 * Wrapper for filemap_write_and_wait_range() 1922 * 1923 * Needed for pNFS in order to ensure data becomes visible to the 1924 * client. 1925 */ 1926 int nfs_filemap_write_and_wait_range(struct address_space *mapping, 1927 loff_t lstart, loff_t lend) 1928 { 1929 int ret; 1930 1931 ret = filemap_write_and_wait_range(mapping, lstart, lend); 1932 if (ret == 0) 1933 ret = pnfs_sync_inode(mapping->host, true); 1934 return ret; 1935 } 1936 EXPORT_SYMBOL_GPL(nfs_filemap_write_and_wait_range); 1937 1938 /* 1939 * flush the inode to disk. 1940 */ 1941 int nfs_wb_all(struct inode *inode) 1942 { 1943 int ret; 1944 1945 trace_nfs_writeback_inode_enter(inode); 1946 1947 ret = filemap_write_and_wait(inode->i_mapping); 1948 if (ret) 1949 goto out; 1950 ret = nfs_commit_inode(inode, FLUSH_SYNC); 1951 if (ret < 0) 1952 goto out; 1953 pnfs_sync_inode(inode, true); 1954 ret = 0; 1955 1956 out: 1957 trace_nfs_writeback_inode_exit(inode, ret); 1958 return ret; 1959 } 1960 EXPORT_SYMBOL_GPL(nfs_wb_all); 1961 1962 int nfs_wb_page_cancel(struct inode *inode, struct page *page) 1963 { 1964 struct nfs_page *req; 1965 int ret = 0; 1966 1967 wait_on_page_writeback(page); 1968 1969 /* blocking call to cancel all requests and join to a single (head) 1970 * request */ 1971 req = nfs_lock_and_join_requests(page, false); 1972 1973 if (IS_ERR(req)) { 1974 ret = PTR_ERR(req); 1975 } else if (req) { 1976 /* all requests from this page have been cancelled by 1977 * nfs_lock_and_join_requests, so just remove the head 1978 * request from the inode / page_private pointer and 1979 * release it */ 1980 nfs_inode_remove_request(req); 1981 nfs_unlock_and_release_request(req); 1982 } 1983 1984 return ret; 1985 } 1986 1987 /* 1988 * Write back all requests on one page - we do this before reading it. 1989 */ 1990 int nfs_wb_single_page(struct inode *inode, struct page *page, bool launder) 1991 { 1992 loff_t range_start = page_file_offset(page); 1993 loff_t range_end = range_start + (loff_t)(PAGE_SIZE - 1); 1994 struct writeback_control wbc = { 1995 .sync_mode = WB_SYNC_ALL, 1996 .nr_to_write = 0, 1997 .range_start = range_start, 1998 .range_end = range_end, 1999 }; 2000 int ret; 2001 2002 trace_nfs_writeback_page_enter(inode); 2003 2004 for (;;) { 2005 wait_on_page_writeback(page); 2006 if (clear_page_dirty_for_io(page)) { 2007 ret = nfs_writepage_locked(page, &wbc, launder); 2008 if (ret < 0) 2009 goto out_error; 2010 continue; 2011 } 2012 ret = 0; 2013 if (!PagePrivate(page)) 2014 break; 2015 ret = nfs_commit_inode(inode, FLUSH_SYNC); 2016 if (ret < 0) 2017 goto out_error; 2018 } 2019 out_error: 2020 trace_nfs_writeback_page_exit(inode, ret); 2021 return ret; 2022 } 2023 2024 #ifdef CONFIG_MIGRATION 2025 int nfs_migrate_page(struct address_space *mapping, struct page *newpage, 2026 struct page *page, enum migrate_mode mode) 2027 { 2028 /* 2029 * If PagePrivate is set, then the page is currently associated with 2030 * an in-progress read or write request. Don't try to migrate it. 2031 * 2032 * FIXME: we could do this in principle, but we'll need a way to ensure 2033 * that we can safely release the inode reference while holding 2034 * the page lock. 2035 */ 2036 if (PagePrivate(page)) 2037 return -EBUSY; 2038 2039 if (!nfs_fscache_release_page(page, GFP_KERNEL)) 2040 return -EBUSY; 2041 2042 return migrate_page(mapping, newpage, page, mode); 2043 } 2044 #endif 2045 2046 int __init nfs_init_writepagecache(void) 2047 { 2048 nfs_wdata_cachep = kmem_cache_create("nfs_write_data", 2049 sizeof(struct nfs_pgio_header), 2050 0, SLAB_HWCACHE_ALIGN, 2051 NULL); 2052 if (nfs_wdata_cachep == NULL) 2053 return -ENOMEM; 2054 2055 nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE, 2056 nfs_wdata_cachep); 2057 if (nfs_wdata_mempool == NULL) 2058 goto out_destroy_write_cache; 2059 2060 nfs_cdata_cachep = kmem_cache_create("nfs_commit_data", 2061 sizeof(struct nfs_commit_data), 2062 0, SLAB_HWCACHE_ALIGN, 2063 NULL); 2064 if (nfs_cdata_cachep == NULL) 2065 goto out_destroy_write_mempool; 2066 2067 nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT, 2068 nfs_cdata_cachep); 2069 if (nfs_commit_mempool == NULL) 2070 goto out_destroy_commit_cache; 2071 2072 /* 2073 * NFS congestion size, scale with available memory. 2074 * 2075 * 64MB: 8192k 2076 * 128MB: 11585k 2077 * 256MB: 16384k 2078 * 512MB: 23170k 2079 * 1GB: 32768k 2080 * 2GB: 46340k 2081 * 4GB: 65536k 2082 * 8GB: 92681k 2083 * 16GB: 131072k 2084 * 2085 * This allows larger machines to have larger/more transfers. 2086 * Limit the default to 256M 2087 */ 2088 nfs_congestion_kb = (16*int_sqrt(totalram_pages)) << (PAGE_SHIFT-10); 2089 if (nfs_congestion_kb > 256*1024) 2090 nfs_congestion_kb = 256*1024; 2091 2092 return 0; 2093 2094 out_destroy_commit_cache: 2095 kmem_cache_destroy(nfs_cdata_cachep); 2096 out_destroy_write_mempool: 2097 mempool_destroy(nfs_wdata_mempool); 2098 out_destroy_write_cache: 2099 kmem_cache_destroy(nfs_wdata_cachep); 2100 return -ENOMEM; 2101 } 2102 2103 void nfs_destroy_writepagecache(void) 2104 { 2105 mempool_destroy(nfs_commit_mempool); 2106 kmem_cache_destroy(nfs_cdata_cachep); 2107 mempool_destroy(nfs_wdata_mempool); 2108 kmem_cache_destroy(nfs_wdata_cachep); 2109 } 2110 2111 static const struct nfs_rw_ops nfs_rw_write_ops = { 2112 .rw_mode = FMODE_WRITE, 2113 .rw_alloc_header = nfs_writehdr_alloc, 2114 .rw_free_header = nfs_writehdr_free, 2115 .rw_done = nfs_writeback_done, 2116 .rw_result = nfs_writeback_result, 2117 .rw_initiate = nfs_initiate_write, 2118 }; 2119