1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/fs/nfs/write.c 4 * 5 * Write file data over NFS. 6 * 7 * Copyright (C) 1996, 1997, Olaf Kirch <okir@monad.swb.de> 8 */ 9 10 #include <linux/types.h> 11 #include <linux/slab.h> 12 #include <linux/mm.h> 13 #include <linux/pagemap.h> 14 #include <linux/file.h> 15 #include <linux/writeback.h> 16 #include <linux/swap.h> 17 #include <linux/migrate.h> 18 19 #include <linux/sunrpc/clnt.h> 20 #include <linux/nfs_fs.h> 21 #include <linux/nfs_mount.h> 22 #include <linux/nfs_page.h> 23 #include <linux/backing-dev.h> 24 #include <linux/export.h> 25 #include <linux/freezer.h> 26 #include <linux/wait.h> 27 #include <linux/iversion.h> 28 29 #include <linux/uaccess.h> 30 #include <linux/sched/mm.h> 31 32 #include "delegation.h" 33 #include "internal.h" 34 #include "iostat.h" 35 #include "nfs4_fs.h" 36 #include "fscache.h" 37 #include "pnfs.h" 38 39 #include "nfstrace.h" 40 41 #define NFSDBG_FACILITY NFSDBG_PAGECACHE 42 43 #define MIN_POOL_WRITE (32) 44 #define MIN_POOL_COMMIT (4) 45 46 struct nfs_io_completion { 47 void (*complete)(void *data); 48 void *data; 49 struct kref refcount; 50 }; 51 52 /* 53 * Local function declarations 54 */ 55 static void nfs_redirty_request(struct nfs_page *req); 56 static const struct rpc_call_ops nfs_commit_ops; 57 static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops; 58 static const struct nfs_commit_completion_ops nfs_commit_completion_ops; 59 static const struct nfs_rw_ops nfs_rw_write_ops; 60 static void nfs_clear_request_commit(struct nfs_page *req); 61 static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo, 62 struct inode *inode); 63 static struct nfs_page * 64 nfs_page_search_commits_for_head_request_locked(struct nfs_inode *nfsi, 65 struct page *page); 66 67 static struct kmem_cache *nfs_wdata_cachep; 68 static mempool_t *nfs_wdata_mempool; 69 static struct kmem_cache *nfs_cdata_cachep; 70 static mempool_t *nfs_commit_mempool; 71 72 struct nfs_commit_data *nfs_commitdata_alloc(bool never_fail) 73 { 74 struct nfs_commit_data *p; 75 76 if (never_fail) 77 p = mempool_alloc(nfs_commit_mempool, GFP_NOIO); 78 else { 79 /* It is OK to do some reclaim, not no safe to wait 80 * for anything to be returned to the pool. 81 * mempool_alloc() cannot handle that particular combination, 82 * so we need two separate attempts. 83 */ 84 p = mempool_alloc(nfs_commit_mempool, GFP_NOWAIT); 85 if (!p) 86 p = kmem_cache_alloc(nfs_cdata_cachep, GFP_NOIO | 87 __GFP_NOWARN | __GFP_NORETRY); 88 if (!p) 89 return NULL; 90 } 91 92 memset(p, 0, sizeof(*p)); 93 INIT_LIST_HEAD(&p->pages); 94 return p; 95 } 96 EXPORT_SYMBOL_GPL(nfs_commitdata_alloc); 97 98 void nfs_commit_free(struct nfs_commit_data *p) 99 { 100 mempool_free(p, nfs_commit_mempool); 101 } 102 EXPORT_SYMBOL_GPL(nfs_commit_free); 103 104 static struct nfs_pgio_header *nfs_writehdr_alloc(void) 105 { 106 struct nfs_pgio_header *p = mempool_alloc(nfs_wdata_mempool, GFP_NOIO); 107 108 memset(p, 0, sizeof(*p)); 109 p->rw_mode = FMODE_WRITE; 110 return p; 111 } 112 113 static void nfs_writehdr_free(struct nfs_pgio_header *hdr) 114 { 115 mempool_free(hdr, nfs_wdata_mempool); 116 } 117 118 static struct nfs_io_completion *nfs_io_completion_alloc(gfp_t gfp_flags) 119 { 120 return kmalloc(sizeof(struct nfs_io_completion), gfp_flags); 121 } 122 123 static void nfs_io_completion_init(struct nfs_io_completion *ioc, 124 void (*complete)(void *), void *data) 125 { 126 ioc->complete = complete; 127 ioc->data = data; 128 kref_init(&ioc->refcount); 129 } 130 131 static void nfs_io_completion_release(struct kref *kref) 132 { 133 struct nfs_io_completion *ioc = container_of(kref, 134 struct nfs_io_completion, refcount); 135 ioc->complete(ioc->data); 136 kfree(ioc); 137 } 138 139 static void nfs_io_completion_get(struct nfs_io_completion *ioc) 140 { 141 if (ioc != NULL) 142 kref_get(&ioc->refcount); 143 } 144 145 static void nfs_io_completion_put(struct nfs_io_completion *ioc) 146 { 147 if (ioc != NULL) 148 kref_put(&ioc->refcount, nfs_io_completion_release); 149 } 150 151 static struct nfs_page * 152 nfs_page_private_request(struct page *page) 153 { 154 if (!PagePrivate(page)) 155 return NULL; 156 return (struct nfs_page *)page_private(page); 157 } 158 159 /* 160 * nfs_page_find_head_request_locked - find head request associated with @page 161 * 162 * must be called while holding the inode lock. 163 * 164 * returns matching head request with reference held, or NULL if not found. 165 */ 166 static struct nfs_page * 167 nfs_page_find_private_request(struct page *page) 168 { 169 struct address_space *mapping = page_file_mapping(page); 170 struct nfs_page *req; 171 172 if (!PagePrivate(page)) 173 return NULL; 174 spin_lock(&mapping->private_lock); 175 req = nfs_page_private_request(page); 176 if (req) { 177 WARN_ON_ONCE(req->wb_head != req); 178 kref_get(&req->wb_kref); 179 } 180 spin_unlock(&mapping->private_lock); 181 return req; 182 } 183 184 static struct nfs_page * 185 nfs_page_find_swap_request(struct page *page) 186 { 187 struct inode *inode = page_file_mapping(page)->host; 188 struct nfs_inode *nfsi = NFS_I(inode); 189 struct nfs_page *req = NULL; 190 if (!PageSwapCache(page)) 191 return NULL; 192 mutex_lock(&nfsi->commit_mutex); 193 if (PageSwapCache(page)) { 194 req = nfs_page_search_commits_for_head_request_locked(nfsi, 195 page); 196 if (req) { 197 WARN_ON_ONCE(req->wb_head != req); 198 kref_get(&req->wb_kref); 199 } 200 } 201 mutex_unlock(&nfsi->commit_mutex); 202 return req; 203 } 204 205 /* 206 * nfs_page_find_head_request - find head request associated with @page 207 * 208 * returns matching head request with reference held, or NULL if not found. 209 */ 210 static struct nfs_page *nfs_page_find_head_request(struct page *page) 211 { 212 struct nfs_page *req; 213 214 req = nfs_page_find_private_request(page); 215 if (!req) 216 req = nfs_page_find_swap_request(page); 217 return req; 218 } 219 220 /* Adjust the file length if we're writing beyond the end */ 221 static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int count) 222 { 223 struct inode *inode = page_file_mapping(page)->host; 224 loff_t end, i_size; 225 pgoff_t end_index; 226 227 spin_lock(&inode->i_lock); 228 i_size = i_size_read(inode); 229 end_index = (i_size - 1) >> PAGE_SHIFT; 230 if (i_size > 0 && page_index(page) < end_index) 231 goto out; 232 end = page_file_offset(page) + ((loff_t)offset+count); 233 if (i_size >= end) 234 goto out; 235 i_size_write(inode, end); 236 NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_SIZE; 237 nfs_inc_stats(inode, NFSIOS_EXTENDWRITE); 238 out: 239 spin_unlock(&inode->i_lock); 240 } 241 242 /* A writeback failed: mark the page as bad, and invalidate the page cache */ 243 static void nfs_set_pageerror(struct address_space *mapping) 244 { 245 nfs_zap_mapping(mapping->host, mapping); 246 } 247 248 static void nfs_mapping_set_error(struct page *page, int error) 249 { 250 SetPageError(page); 251 mapping_set_error(page_file_mapping(page), error); 252 } 253 254 /* 255 * nfs_page_group_search_locked 256 * @head - head request of page group 257 * @page_offset - offset into page 258 * 259 * Search page group with head @head to find a request that contains the 260 * page offset @page_offset. 261 * 262 * Returns a pointer to the first matching nfs request, or NULL if no 263 * match is found. 264 * 265 * Must be called with the page group lock held 266 */ 267 static struct nfs_page * 268 nfs_page_group_search_locked(struct nfs_page *head, unsigned int page_offset) 269 { 270 struct nfs_page *req; 271 272 req = head; 273 do { 274 if (page_offset >= req->wb_pgbase && 275 page_offset < (req->wb_pgbase + req->wb_bytes)) 276 return req; 277 278 req = req->wb_this_page; 279 } while (req != head); 280 281 return NULL; 282 } 283 284 /* 285 * nfs_page_group_covers_page 286 * @head - head request of page group 287 * 288 * Return true if the page group with head @head covers the whole page, 289 * returns false otherwise 290 */ 291 static bool nfs_page_group_covers_page(struct nfs_page *req) 292 { 293 struct nfs_page *tmp; 294 unsigned int pos = 0; 295 unsigned int len = nfs_page_length(req->wb_page); 296 297 nfs_page_group_lock(req); 298 299 for (;;) { 300 tmp = nfs_page_group_search_locked(req->wb_head, pos); 301 if (!tmp) 302 break; 303 pos = tmp->wb_pgbase + tmp->wb_bytes; 304 } 305 306 nfs_page_group_unlock(req); 307 return pos >= len; 308 } 309 310 /* We can set the PG_uptodate flag if we see that a write request 311 * covers the full page. 312 */ 313 static void nfs_mark_uptodate(struct nfs_page *req) 314 { 315 if (PageUptodate(req->wb_page)) 316 return; 317 if (!nfs_page_group_covers_page(req)) 318 return; 319 SetPageUptodate(req->wb_page); 320 } 321 322 static int wb_priority(struct writeback_control *wbc) 323 { 324 int ret = 0; 325 326 if (wbc->sync_mode == WB_SYNC_ALL) 327 ret = FLUSH_COND_STABLE; 328 return ret; 329 } 330 331 /* 332 * NFS congestion control 333 */ 334 335 int nfs_congestion_kb; 336 337 #define NFS_CONGESTION_ON_THRESH (nfs_congestion_kb >> (PAGE_SHIFT-10)) 338 #define NFS_CONGESTION_OFF_THRESH \ 339 (NFS_CONGESTION_ON_THRESH - (NFS_CONGESTION_ON_THRESH >> 2)) 340 341 static void nfs_set_page_writeback(struct page *page) 342 { 343 struct inode *inode = page_file_mapping(page)->host; 344 struct nfs_server *nfss = NFS_SERVER(inode); 345 int ret = test_set_page_writeback(page); 346 347 WARN_ON_ONCE(ret != 0); 348 349 if (atomic_long_inc_return(&nfss->writeback) > 350 NFS_CONGESTION_ON_THRESH) 351 set_bdi_congested(inode_to_bdi(inode), BLK_RW_ASYNC); 352 } 353 354 static void nfs_end_page_writeback(struct nfs_page *req) 355 { 356 struct inode *inode = page_file_mapping(req->wb_page)->host; 357 struct nfs_server *nfss = NFS_SERVER(inode); 358 bool is_done; 359 360 is_done = nfs_page_group_sync_on_bit(req, PG_WB_END); 361 nfs_unlock_request(req); 362 if (!is_done) 363 return; 364 365 end_page_writeback(req->wb_page); 366 if (atomic_long_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH) 367 clear_bdi_congested(inode_to_bdi(inode), BLK_RW_ASYNC); 368 } 369 370 /* 371 * nfs_unroll_locks_and_wait - unlock all newly locked reqs and wait on @req 372 * 373 * this is a helper function for nfs_lock_and_join_requests 374 * 375 * @inode - inode associated with request page group, must be holding inode lock 376 * @head - head request of page group, must be holding head lock 377 * @req - request that couldn't lock and needs to wait on the req bit lock 378 * 379 * NOTE: this must be called holding page_group bit lock 380 * which will be released before returning. 381 * 382 * returns 0 on success, < 0 on error. 383 */ 384 static void 385 nfs_unroll_locks(struct inode *inode, struct nfs_page *head, 386 struct nfs_page *req) 387 { 388 struct nfs_page *tmp; 389 390 /* relinquish all the locks successfully grabbed this run */ 391 for (tmp = head->wb_this_page ; tmp != req; tmp = tmp->wb_this_page) { 392 if (!kref_read(&tmp->wb_kref)) 393 continue; 394 nfs_unlock_and_release_request(tmp); 395 } 396 } 397 398 /* 399 * nfs_destroy_unlinked_subrequests - destroy recently unlinked subrequests 400 * 401 * @destroy_list - request list (using wb_this_page) terminated by @old_head 402 * @old_head - the old head of the list 403 * 404 * All subrequests must be locked and removed from all lists, so at this point 405 * they are only "active" in this function, and possibly in nfs_wait_on_request 406 * with a reference held by some other context. 407 */ 408 static void 409 nfs_destroy_unlinked_subrequests(struct nfs_page *destroy_list, 410 struct nfs_page *old_head, 411 struct inode *inode) 412 { 413 while (destroy_list) { 414 struct nfs_page *subreq = destroy_list; 415 416 destroy_list = (subreq->wb_this_page == old_head) ? 417 NULL : subreq->wb_this_page; 418 419 WARN_ON_ONCE(old_head != subreq->wb_head); 420 421 /* make sure old group is not used */ 422 subreq->wb_this_page = subreq; 423 424 clear_bit(PG_REMOVE, &subreq->wb_flags); 425 426 /* Note: races with nfs_page_group_destroy() */ 427 if (!kref_read(&subreq->wb_kref)) { 428 /* Check if we raced with nfs_page_group_destroy() */ 429 if (test_and_clear_bit(PG_TEARDOWN, &subreq->wb_flags)) 430 nfs_free_request(subreq); 431 continue; 432 } 433 434 subreq->wb_head = subreq; 435 436 if (test_and_clear_bit(PG_INODE_REF, &subreq->wb_flags)) { 437 nfs_release_request(subreq); 438 atomic_long_dec(&NFS_I(inode)->nrequests); 439 } 440 441 /* subreq is now totally disconnected from page group or any 442 * write / commit lists. last chance to wake any waiters */ 443 nfs_unlock_and_release_request(subreq); 444 } 445 } 446 447 /* 448 * nfs_lock_and_join_requests - join all subreqs to the head req and return 449 * a locked reference, cancelling any pending 450 * operations for this page. 451 * 452 * @page - the page used to lookup the "page group" of nfs_page structures 453 * 454 * This function joins all sub requests to the head request by first 455 * locking all requests in the group, cancelling any pending operations 456 * and finally updating the head request to cover the whole range covered by 457 * the (former) group. All subrequests are removed from any write or commit 458 * lists, unlinked from the group and destroyed. 459 * 460 * Returns a locked, referenced pointer to the head request - which after 461 * this call is guaranteed to be the only request associated with the page. 462 * Returns NULL if no requests are found for @page, or a ERR_PTR if an 463 * error was encountered. 464 */ 465 static struct nfs_page * 466 nfs_lock_and_join_requests(struct page *page) 467 { 468 struct inode *inode = page_file_mapping(page)->host; 469 struct nfs_page *head, *subreq; 470 struct nfs_page *destroy_list = NULL; 471 unsigned int total_bytes; 472 int ret; 473 474 try_again: 475 /* 476 * A reference is taken only on the head request which acts as a 477 * reference to the whole page group - the group will not be destroyed 478 * until the head reference is released. 479 */ 480 head = nfs_page_find_head_request(page); 481 if (!head) 482 return NULL; 483 484 /* lock the page head first in order to avoid an ABBA inefficiency */ 485 if (!nfs_lock_request(head)) { 486 ret = nfs_wait_on_request(head); 487 nfs_release_request(head); 488 if (ret < 0) 489 return ERR_PTR(ret); 490 goto try_again; 491 } 492 493 /* Ensure that nobody removed the request before we locked it */ 494 if (head != nfs_page_private_request(page) && !PageSwapCache(page)) { 495 nfs_unlock_and_release_request(head); 496 goto try_again; 497 } 498 499 ret = nfs_page_group_lock(head); 500 if (ret < 0) 501 goto release_request; 502 503 /* lock each request in the page group */ 504 total_bytes = head->wb_bytes; 505 for (subreq = head->wb_this_page; subreq != head; 506 subreq = subreq->wb_this_page) { 507 508 if (!kref_get_unless_zero(&subreq->wb_kref)) { 509 if (subreq->wb_offset == head->wb_offset + total_bytes) 510 total_bytes += subreq->wb_bytes; 511 continue; 512 } 513 514 while (!nfs_lock_request(subreq)) { 515 /* 516 * Unlock page to allow nfs_page_group_sync_on_bit() 517 * to succeed 518 */ 519 nfs_page_group_unlock(head); 520 ret = nfs_wait_on_request(subreq); 521 if (!ret) 522 ret = nfs_page_group_lock(head); 523 if (ret < 0) { 524 nfs_unroll_locks(inode, head, subreq); 525 nfs_release_request(subreq); 526 goto release_request; 527 } 528 } 529 /* 530 * Subrequests are always contiguous, non overlapping 531 * and in order - but may be repeated (mirrored writes). 532 */ 533 if (subreq->wb_offset == (head->wb_offset + total_bytes)) { 534 /* keep track of how many bytes this group covers */ 535 total_bytes += subreq->wb_bytes; 536 } else if (WARN_ON_ONCE(subreq->wb_offset < head->wb_offset || 537 ((subreq->wb_offset + subreq->wb_bytes) > 538 (head->wb_offset + total_bytes)))) { 539 nfs_page_group_unlock(head); 540 nfs_unroll_locks(inode, head, subreq); 541 nfs_unlock_and_release_request(subreq); 542 ret = -EIO; 543 goto release_request; 544 } 545 } 546 547 /* Now that all requests are locked, make sure they aren't on any list. 548 * Commit list removal accounting is done after locks are dropped */ 549 subreq = head; 550 do { 551 nfs_clear_request_commit(subreq); 552 subreq = subreq->wb_this_page; 553 } while (subreq != head); 554 555 /* unlink subrequests from head, destroy them later */ 556 if (head->wb_this_page != head) { 557 /* destroy list will be terminated by head */ 558 destroy_list = head->wb_this_page; 559 head->wb_this_page = head; 560 561 /* change head request to cover whole range that 562 * the former page group covered */ 563 head->wb_bytes = total_bytes; 564 } 565 566 /* Postpone destruction of this request */ 567 if (test_and_clear_bit(PG_REMOVE, &head->wb_flags)) { 568 set_bit(PG_INODE_REF, &head->wb_flags); 569 kref_get(&head->wb_kref); 570 atomic_long_inc(&NFS_I(inode)->nrequests); 571 } 572 573 nfs_page_group_unlock(head); 574 575 nfs_destroy_unlinked_subrequests(destroy_list, head, inode); 576 577 /* Did we lose a race with nfs_inode_remove_request()? */ 578 if (!(PagePrivate(page) || PageSwapCache(page))) { 579 nfs_unlock_and_release_request(head); 580 return NULL; 581 } 582 583 /* still holds ref on head from nfs_page_find_head_request 584 * and still has lock on head from lock loop */ 585 return head; 586 587 release_request: 588 nfs_unlock_and_release_request(head); 589 return ERR_PTR(ret); 590 } 591 592 static void nfs_write_error(struct nfs_page *req, int error) 593 { 594 nfs_mapping_set_error(req->wb_page, error); 595 nfs_end_page_writeback(req); 596 nfs_release_request(req); 597 } 598 599 static bool 600 nfs_error_is_fatal_on_server(int err) 601 { 602 switch (err) { 603 case 0: 604 case -ERESTARTSYS: 605 case -EINTR: 606 return false; 607 } 608 return nfs_error_is_fatal(err); 609 } 610 611 /* 612 * Find an associated nfs write request, and prepare to flush it out 613 * May return an error if the user signalled nfs_wait_on_request(). 614 */ 615 static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio, 616 struct page *page) 617 { 618 struct address_space *mapping; 619 struct nfs_page *req; 620 int ret = 0; 621 622 req = nfs_lock_and_join_requests(page); 623 if (!req) 624 goto out; 625 ret = PTR_ERR(req); 626 if (IS_ERR(req)) 627 goto out; 628 629 nfs_set_page_writeback(page); 630 WARN_ON_ONCE(test_bit(PG_CLEAN, &req->wb_flags)); 631 632 /* If there is a fatal error that covers this write, just exit */ 633 ret = 0; 634 mapping = page_file_mapping(page); 635 if (test_bit(AS_ENOSPC, &mapping->flags) || 636 test_bit(AS_EIO, &mapping->flags)) 637 goto out_launder; 638 639 if (!nfs_pageio_add_request(pgio, req)) { 640 ret = pgio->pg_error; 641 /* 642 * Remove the problematic req upon fatal errors on the server 643 */ 644 if (nfs_error_is_fatal(ret)) { 645 if (nfs_error_is_fatal_on_server(ret)) 646 goto out_launder; 647 } else 648 ret = -EAGAIN; 649 nfs_redirty_request(req); 650 } else 651 nfs_add_stats(page_file_mapping(page)->host, 652 NFSIOS_WRITEPAGES, 1); 653 out: 654 return ret; 655 out_launder: 656 nfs_write_error(req, ret); 657 return 0; 658 } 659 660 static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, 661 struct nfs_pageio_descriptor *pgio) 662 { 663 int ret; 664 665 nfs_pageio_cond_complete(pgio, page_index(page)); 666 ret = nfs_page_async_flush(pgio, page); 667 if (ret == -EAGAIN) { 668 redirty_page_for_writepage(wbc, page); 669 ret = 0; 670 } 671 return ret; 672 } 673 674 /* 675 * Write an mmapped page to the server. 676 */ 677 static int nfs_writepage_locked(struct page *page, 678 struct writeback_control *wbc) 679 { 680 struct nfs_pageio_descriptor pgio; 681 struct inode *inode = page_file_mapping(page)->host; 682 int err; 683 684 nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE); 685 nfs_pageio_init_write(&pgio, inode, 0, 686 false, &nfs_async_write_completion_ops); 687 err = nfs_do_writepage(page, wbc, &pgio); 688 nfs_pageio_complete(&pgio); 689 if (err < 0) 690 return err; 691 if (pgio.pg_error < 0) 692 return pgio.pg_error; 693 return 0; 694 } 695 696 int nfs_writepage(struct page *page, struct writeback_control *wbc) 697 { 698 int ret; 699 700 ret = nfs_writepage_locked(page, wbc); 701 unlock_page(page); 702 return ret; 703 } 704 705 static int nfs_writepages_callback(struct page *page, struct writeback_control *wbc, void *data) 706 { 707 int ret; 708 709 ret = nfs_do_writepage(page, wbc, data); 710 unlock_page(page); 711 return ret; 712 } 713 714 static void nfs_io_completion_commit(void *inode) 715 { 716 nfs_commit_inode(inode, 0); 717 } 718 719 int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc) 720 { 721 struct inode *inode = mapping->host; 722 struct nfs_pageio_descriptor pgio; 723 struct nfs_io_completion *ioc; 724 unsigned int pflags = memalloc_nofs_save(); 725 int err; 726 727 nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES); 728 729 ioc = nfs_io_completion_alloc(GFP_NOFS); 730 if (ioc) 731 nfs_io_completion_init(ioc, nfs_io_completion_commit, inode); 732 733 nfs_pageio_init_write(&pgio, inode, wb_priority(wbc), false, 734 &nfs_async_write_completion_ops); 735 pgio.pg_io_completion = ioc; 736 err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio); 737 nfs_pageio_complete(&pgio); 738 nfs_io_completion_put(ioc); 739 740 memalloc_nofs_restore(pflags); 741 742 if (err < 0) 743 goto out_err; 744 err = pgio.pg_error; 745 if (err < 0) 746 goto out_err; 747 return 0; 748 out_err: 749 return err; 750 } 751 752 /* 753 * Insert a write request into an inode 754 */ 755 static void nfs_inode_add_request(struct inode *inode, struct nfs_page *req) 756 { 757 struct address_space *mapping = page_file_mapping(req->wb_page); 758 struct nfs_inode *nfsi = NFS_I(inode); 759 760 WARN_ON_ONCE(req->wb_this_page != req); 761 762 /* Lock the request! */ 763 nfs_lock_request(req); 764 765 /* 766 * Swap-space should not get truncated. Hence no need to plug the race 767 * with invalidate/truncate. 768 */ 769 spin_lock(&mapping->private_lock); 770 if (!nfs_have_writebacks(inode) && 771 NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE)) 772 inode_inc_iversion_raw(inode); 773 if (likely(!PageSwapCache(req->wb_page))) { 774 set_bit(PG_MAPPED, &req->wb_flags); 775 SetPagePrivate(req->wb_page); 776 set_page_private(req->wb_page, (unsigned long)req); 777 } 778 spin_unlock(&mapping->private_lock); 779 atomic_long_inc(&nfsi->nrequests); 780 /* this a head request for a page group - mark it as having an 781 * extra reference so sub groups can follow suit. 782 * This flag also informs pgio layer when to bump nrequests when 783 * adding subrequests. */ 784 WARN_ON(test_and_set_bit(PG_INODE_REF, &req->wb_flags)); 785 kref_get(&req->wb_kref); 786 } 787 788 /* 789 * Remove a write request from an inode 790 */ 791 static void nfs_inode_remove_request(struct nfs_page *req) 792 { 793 struct address_space *mapping = page_file_mapping(req->wb_page); 794 struct inode *inode = mapping->host; 795 struct nfs_inode *nfsi = NFS_I(inode); 796 struct nfs_page *head; 797 798 atomic_long_dec(&nfsi->nrequests); 799 if (nfs_page_group_sync_on_bit(req, PG_REMOVE)) { 800 head = req->wb_head; 801 802 spin_lock(&mapping->private_lock); 803 if (likely(head->wb_page && !PageSwapCache(head->wb_page))) { 804 set_page_private(head->wb_page, 0); 805 ClearPagePrivate(head->wb_page); 806 clear_bit(PG_MAPPED, &head->wb_flags); 807 } 808 spin_unlock(&mapping->private_lock); 809 } 810 811 if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags)) 812 nfs_release_request(req); 813 } 814 815 static void 816 nfs_mark_request_dirty(struct nfs_page *req) 817 { 818 if (req->wb_page) 819 __set_page_dirty_nobuffers(req->wb_page); 820 } 821 822 /* 823 * nfs_page_search_commits_for_head_request_locked 824 * 825 * Search through commit lists on @inode for the head request for @page. 826 * Must be called while holding the inode (which is cinfo) lock. 827 * 828 * Returns the head request if found, or NULL if not found. 829 */ 830 static struct nfs_page * 831 nfs_page_search_commits_for_head_request_locked(struct nfs_inode *nfsi, 832 struct page *page) 833 { 834 struct nfs_page *freq, *t; 835 struct nfs_commit_info cinfo; 836 struct inode *inode = &nfsi->vfs_inode; 837 838 nfs_init_cinfo_from_inode(&cinfo, inode); 839 840 /* search through pnfs commit lists */ 841 freq = pnfs_search_commit_reqs(inode, &cinfo, page); 842 if (freq) 843 return freq->wb_head; 844 845 /* Linearly search the commit list for the correct request */ 846 list_for_each_entry_safe(freq, t, &cinfo.mds->list, wb_list) { 847 if (freq->wb_page == page) 848 return freq->wb_head; 849 } 850 851 return NULL; 852 } 853 854 /** 855 * nfs_request_add_commit_list_locked - add request to a commit list 856 * @req: pointer to a struct nfs_page 857 * @dst: commit list head 858 * @cinfo: holds list lock and accounting info 859 * 860 * This sets the PG_CLEAN bit, updates the cinfo count of 861 * number of outstanding requests requiring a commit as well as 862 * the MM page stats. 863 * 864 * The caller must hold NFS_I(cinfo->inode)->commit_mutex, and the 865 * nfs_page lock. 866 */ 867 void 868 nfs_request_add_commit_list_locked(struct nfs_page *req, struct list_head *dst, 869 struct nfs_commit_info *cinfo) 870 { 871 set_bit(PG_CLEAN, &req->wb_flags); 872 nfs_list_add_request(req, dst); 873 atomic_long_inc(&cinfo->mds->ncommit); 874 } 875 EXPORT_SYMBOL_GPL(nfs_request_add_commit_list_locked); 876 877 /** 878 * nfs_request_add_commit_list - add request to a commit list 879 * @req: pointer to a struct nfs_page 880 * @cinfo: holds list lock and accounting info 881 * 882 * This sets the PG_CLEAN bit, updates the cinfo count of 883 * number of outstanding requests requiring a commit as well as 884 * the MM page stats. 885 * 886 * The caller must _not_ hold the cinfo->lock, but must be 887 * holding the nfs_page lock. 888 */ 889 void 890 nfs_request_add_commit_list(struct nfs_page *req, struct nfs_commit_info *cinfo) 891 { 892 mutex_lock(&NFS_I(cinfo->inode)->commit_mutex); 893 nfs_request_add_commit_list_locked(req, &cinfo->mds->list, cinfo); 894 mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex); 895 if (req->wb_page) 896 nfs_mark_page_unstable(req->wb_page, cinfo); 897 } 898 EXPORT_SYMBOL_GPL(nfs_request_add_commit_list); 899 900 /** 901 * nfs_request_remove_commit_list - Remove request from a commit list 902 * @req: pointer to a nfs_page 903 * @cinfo: holds list lock and accounting info 904 * 905 * This clears the PG_CLEAN bit, and updates the cinfo's count of 906 * number of outstanding requests requiring a commit 907 * It does not update the MM page stats. 908 * 909 * The caller _must_ hold the cinfo->lock and the nfs_page lock. 910 */ 911 void 912 nfs_request_remove_commit_list(struct nfs_page *req, 913 struct nfs_commit_info *cinfo) 914 { 915 if (!test_and_clear_bit(PG_CLEAN, &(req)->wb_flags)) 916 return; 917 nfs_list_remove_request(req); 918 atomic_long_dec(&cinfo->mds->ncommit); 919 } 920 EXPORT_SYMBOL_GPL(nfs_request_remove_commit_list); 921 922 static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo, 923 struct inode *inode) 924 { 925 cinfo->inode = inode; 926 cinfo->mds = &NFS_I(inode)->commit_info; 927 cinfo->ds = pnfs_get_ds_info(inode); 928 cinfo->dreq = NULL; 929 cinfo->completion_ops = &nfs_commit_completion_ops; 930 } 931 932 void nfs_init_cinfo(struct nfs_commit_info *cinfo, 933 struct inode *inode, 934 struct nfs_direct_req *dreq) 935 { 936 if (dreq) 937 nfs_init_cinfo_from_dreq(cinfo, dreq); 938 else 939 nfs_init_cinfo_from_inode(cinfo, inode); 940 } 941 EXPORT_SYMBOL_GPL(nfs_init_cinfo); 942 943 /* 944 * Add a request to the inode's commit list. 945 */ 946 void 947 nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg, 948 struct nfs_commit_info *cinfo, u32 ds_commit_idx) 949 { 950 if (pnfs_mark_request_commit(req, lseg, cinfo, ds_commit_idx)) 951 return; 952 nfs_request_add_commit_list(req, cinfo); 953 } 954 955 static void 956 nfs_clear_page_commit(struct page *page) 957 { 958 dec_node_page_state(page, NR_UNSTABLE_NFS); 959 dec_wb_stat(&inode_to_bdi(page_file_mapping(page)->host)->wb, 960 WB_RECLAIMABLE); 961 } 962 963 /* Called holding the request lock on @req */ 964 static void 965 nfs_clear_request_commit(struct nfs_page *req) 966 { 967 if (test_bit(PG_CLEAN, &req->wb_flags)) { 968 struct nfs_open_context *ctx = nfs_req_openctx(req); 969 struct inode *inode = d_inode(ctx->dentry); 970 struct nfs_commit_info cinfo; 971 972 nfs_init_cinfo_from_inode(&cinfo, inode); 973 mutex_lock(&NFS_I(inode)->commit_mutex); 974 if (!pnfs_clear_request_commit(req, &cinfo)) { 975 nfs_request_remove_commit_list(req, &cinfo); 976 } 977 mutex_unlock(&NFS_I(inode)->commit_mutex); 978 nfs_clear_page_commit(req->wb_page); 979 } 980 } 981 982 int nfs_write_need_commit(struct nfs_pgio_header *hdr) 983 { 984 if (hdr->verf.committed == NFS_DATA_SYNC) 985 return hdr->lseg == NULL; 986 return hdr->verf.committed != NFS_FILE_SYNC; 987 } 988 989 static void nfs_async_write_init(struct nfs_pgio_header *hdr) 990 { 991 nfs_io_completion_get(hdr->io_completion); 992 } 993 994 static void nfs_write_completion(struct nfs_pgio_header *hdr) 995 { 996 struct nfs_commit_info cinfo; 997 unsigned long bytes = 0; 998 999 if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) 1000 goto out; 1001 nfs_init_cinfo_from_inode(&cinfo, hdr->inode); 1002 while (!list_empty(&hdr->pages)) { 1003 struct nfs_page *req = nfs_list_entry(hdr->pages.next); 1004 1005 bytes += req->wb_bytes; 1006 nfs_list_remove_request(req); 1007 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && 1008 (hdr->good_bytes < bytes)) { 1009 nfs_set_pageerror(page_file_mapping(req->wb_page)); 1010 nfs_mapping_set_error(req->wb_page, hdr->error); 1011 goto remove_req; 1012 } 1013 if (nfs_write_need_commit(hdr)) { 1014 /* Reset wb_nio, since the write was successful. */ 1015 req->wb_nio = 0; 1016 memcpy(&req->wb_verf, &hdr->verf.verifier, sizeof(req->wb_verf)); 1017 nfs_mark_request_commit(req, hdr->lseg, &cinfo, 1018 hdr->pgio_mirror_idx); 1019 goto next; 1020 } 1021 remove_req: 1022 nfs_inode_remove_request(req); 1023 next: 1024 nfs_end_page_writeback(req); 1025 nfs_release_request(req); 1026 } 1027 out: 1028 nfs_io_completion_put(hdr->io_completion); 1029 hdr->release(hdr); 1030 } 1031 1032 unsigned long 1033 nfs_reqs_to_commit(struct nfs_commit_info *cinfo) 1034 { 1035 return atomic_long_read(&cinfo->mds->ncommit); 1036 } 1037 1038 /* NFS_I(cinfo->inode)->commit_mutex held by caller */ 1039 int 1040 nfs_scan_commit_list(struct list_head *src, struct list_head *dst, 1041 struct nfs_commit_info *cinfo, int max) 1042 { 1043 struct nfs_page *req, *tmp; 1044 int ret = 0; 1045 1046 restart: 1047 list_for_each_entry_safe(req, tmp, src, wb_list) { 1048 kref_get(&req->wb_kref); 1049 if (!nfs_lock_request(req)) { 1050 int status; 1051 1052 /* Prevent deadlock with nfs_lock_and_join_requests */ 1053 if (!list_empty(dst)) { 1054 nfs_release_request(req); 1055 continue; 1056 } 1057 /* Ensure we make progress to prevent livelock */ 1058 mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex); 1059 status = nfs_wait_on_request(req); 1060 nfs_release_request(req); 1061 mutex_lock(&NFS_I(cinfo->inode)->commit_mutex); 1062 if (status < 0) 1063 break; 1064 goto restart; 1065 } 1066 nfs_request_remove_commit_list(req, cinfo); 1067 clear_bit(PG_COMMIT_TO_DS, &req->wb_flags); 1068 nfs_list_add_request(req, dst); 1069 ret++; 1070 if ((ret == max) && !cinfo->dreq) 1071 break; 1072 cond_resched(); 1073 } 1074 return ret; 1075 } 1076 EXPORT_SYMBOL_GPL(nfs_scan_commit_list); 1077 1078 /* 1079 * nfs_scan_commit - Scan an inode for commit requests 1080 * @inode: NFS inode to scan 1081 * @dst: mds destination list 1082 * @cinfo: mds and ds lists of reqs ready to commit 1083 * 1084 * Moves requests from the inode's 'commit' request list. 1085 * The requests are *not* checked to ensure that they form a contiguous set. 1086 */ 1087 int 1088 nfs_scan_commit(struct inode *inode, struct list_head *dst, 1089 struct nfs_commit_info *cinfo) 1090 { 1091 int ret = 0; 1092 1093 if (!atomic_long_read(&cinfo->mds->ncommit)) 1094 return 0; 1095 mutex_lock(&NFS_I(cinfo->inode)->commit_mutex); 1096 if (atomic_long_read(&cinfo->mds->ncommit) > 0) { 1097 const int max = INT_MAX; 1098 1099 ret = nfs_scan_commit_list(&cinfo->mds->list, dst, 1100 cinfo, max); 1101 ret += pnfs_scan_commit_lists(inode, cinfo, max - ret); 1102 } 1103 mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex); 1104 return ret; 1105 } 1106 1107 /* 1108 * Search for an existing write request, and attempt to update 1109 * it to reflect a new dirty region on a given page. 1110 * 1111 * If the attempt fails, then the existing request is flushed out 1112 * to disk. 1113 */ 1114 static struct nfs_page *nfs_try_to_update_request(struct inode *inode, 1115 struct page *page, 1116 unsigned int offset, 1117 unsigned int bytes) 1118 { 1119 struct nfs_page *req; 1120 unsigned int rqend; 1121 unsigned int end; 1122 int error; 1123 1124 end = offset + bytes; 1125 1126 req = nfs_lock_and_join_requests(page); 1127 if (IS_ERR_OR_NULL(req)) 1128 return req; 1129 1130 rqend = req->wb_offset + req->wb_bytes; 1131 /* 1132 * Tell the caller to flush out the request if 1133 * the offsets are non-contiguous. 1134 * Note: nfs_flush_incompatible() will already 1135 * have flushed out requests having wrong owners. 1136 */ 1137 if (offset > rqend || end < req->wb_offset) 1138 goto out_flushme; 1139 1140 /* Okay, the request matches. Update the region */ 1141 if (offset < req->wb_offset) { 1142 req->wb_offset = offset; 1143 req->wb_pgbase = offset; 1144 } 1145 if (end > rqend) 1146 req->wb_bytes = end - req->wb_offset; 1147 else 1148 req->wb_bytes = rqend - req->wb_offset; 1149 req->wb_nio = 0; 1150 return req; 1151 out_flushme: 1152 /* 1153 * Note: we mark the request dirty here because 1154 * nfs_lock_and_join_requests() cannot preserve 1155 * commit flags, so we have to replay the write. 1156 */ 1157 nfs_mark_request_dirty(req); 1158 nfs_unlock_and_release_request(req); 1159 error = nfs_wb_page(inode, page); 1160 return (error < 0) ? ERR_PTR(error) : NULL; 1161 } 1162 1163 /* 1164 * Try to update an existing write request, or create one if there is none. 1165 * 1166 * Note: Should always be called with the Page Lock held to prevent races 1167 * if we have to add a new request. Also assumes that the caller has 1168 * already called nfs_flush_incompatible() if necessary. 1169 */ 1170 static struct nfs_page * nfs_setup_write_request(struct nfs_open_context* ctx, 1171 struct page *page, unsigned int offset, unsigned int bytes) 1172 { 1173 struct inode *inode = page_file_mapping(page)->host; 1174 struct nfs_page *req; 1175 1176 req = nfs_try_to_update_request(inode, page, offset, bytes); 1177 if (req != NULL) 1178 goto out; 1179 req = nfs_create_request(ctx, page, offset, bytes); 1180 if (IS_ERR(req)) 1181 goto out; 1182 nfs_inode_add_request(inode, req); 1183 out: 1184 return req; 1185 } 1186 1187 static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page, 1188 unsigned int offset, unsigned int count) 1189 { 1190 struct nfs_page *req; 1191 1192 req = nfs_setup_write_request(ctx, page, offset, count); 1193 if (IS_ERR(req)) 1194 return PTR_ERR(req); 1195 /* Update file length */ 1196 nfs_grow_file(page, offset, count); 1197 nfs_mark_uptodate(req); 1198 nfs_mark_request_dirty(req); 1199 nfs_unlock_and_release_request(req); 1200 return 0; 1201 } 1202 1203 int nfs_flush_incompatible(struct file *file, struct page *page) 1204 { 1205 struct nfs_open_context *ctx = nfs_file_open_context(file); 1206 struct nfs_lock_context *l_ctx; 1207 struct file_lock_context *flctx = file_inode(file)->i_flctx; 1208 struct nfs_page *req; 1209 int do_flush, status; 1210 /* 1211 * Look for a request corresponding to this page. If there 1212 * is one, and it belongs to another file, we flush it out 1213 * before we try to copy anything into the page. Do this 1214 * due to the lack of an ACCESS-type call in NFSv2. 1215 * Also do the same if we find a request from an existing 1216 * dropped page. 1217 */ 1218 do { 1219 req = nfs_page_find_head_request(page); 1220 if (req == NULL) 1221 return 0; 1222 l_ctx = req->wb_lock_context; 1223 do_flush = req->wb_page != page || 1224 !nfs_match_open_context(nfs_req_openctx(req), ctx); 1225 if (l_ctx && flctx && 1226 !(list_empty_careful(&flctx->flc_posix) && 1227 list_empty_careful(&flctx->flc_flock))) { 1228 do_flush |= l_ctx->lockowner != current->files; 1229 } 1230 nfs_release_request(req); 1231 if (!do_flush) 1232 return 0; 1233 status = nfs_wb_page(page_file_mapping(page)->host, page); 1234 } while (status == 0); 1235 return status; 1236 } 1237 1238 /* 1239 * Avoid buffered writes when a open context credential's key would 1240 * expire soon. 1241 * 1242 * Returns -EACCES if the key will expire within RPC_KEY_EXPIRE_FAIL. 1243 * 1244 * Return 0 and set a credential flag which triggers the inode to flush 1245 * and performs NFS_FILE_SYNC writes if the key will expired within 1246 * RPC_KEY_EXPIRE_TIMEO. 1247 */ 1248 int 1249 nfs_key_timeout_notify(struct file *filp, struct inode *inode) 1250 { 1251 struct nfs_open_context *ctx = nfs_file_open_context(filp); 1252 1253 if (nfs_ctx_key_to_expire(ctx, inode) && 1254 !ctx->ll_cred) 1255 /* Already expired! */ 1256 return -EACCES; 1257 return 0; 1258 } 1259 1260 /* 1261 * Test if the open context credential key is marked to expire soon. 1262 */ 1263 bool nfs_ctx_key_to_expire(struct nfs_open_context *ctx, struct inode *inode) 1264 { 1265 struct rpc_auth *auth = NFS_SERVER(inode)->client->cl_auth; 1266 struct rpc_cred *cred = ctx->ll_cred; 1267 struct auth_cred acred = { 1268 .cred = ctx->cred, 1269 }; 1270 1271 if (cred && !cred->cr_ops->crmatch(&acred, cred, 0)) { 1272 put_rpccred(cred); 1273 ctx->ll_cred = NULL; 1274 cred = NULL; 1275 } 1276 if (!cred) 1277 cred = auth->au_ops->lookup_cred(auth, &acred, 0); 1278 if (!cred || IS_ERR(cred)) 1279 return true; 1280 ctx->ll_cred = cred; 1281 return !!(cred->cr_ops->crkey_timeout && 1282 cred->cr_ops->crkey_timeout(cred)); 1283 } 1284 1285 /* 1286 * If the page cache is marked as unsafe or invalid, then we can't rely on 1287 * the PageUptodate() flag. In this case, we will need to turn off 1288 * write optimisations that depend on the page contents being correct. 1289 */ 1290 static bool nfs_write_pageuptodate(struct page *page, struct inode *inode) 1291 { 1292 struct nfs_inode *nfsi = NFS_I(inode); 1293 1294 if (nfs_have_delegated_attributes(inode)) 1295 goto out; 1296 if (nfsi->cache_validity & NFS_INO_REVAL_PAGECACHE) 1297 return false; 1298 smp_rmb(); 1299 if (test_bit(NFS_INO_INVALIDATING, &nfsi->flags)) 1300 return false; 1301 out: 1302 if (nfsi->cache_validity & NFS_INO_INVALID_DATA) 1303 return false; 1304 return PageUptodate(page) != 0; 1305 } 1306 1307 static bool 1308 is_whole_file_wrlock(struct file_lock *fl) 1309 { 1310 return fl->fl_start == 0 && fl->fl_end == OFFSET_MAX && 1311 fl->fl_type == F_WRLCK; 1312 } 1313 1314 /* If we know the page is up to date, and we're not using byte range locks (or 1315 * if we have the whole file locked for writing), it may be more efficient to 1316 * extend the write to cover the entire page in order to avoid fragmentation 1317 * inefficiencies. 1318 * 1319 * If the file is opened for synchronous writes then we can just skip the rest 1320 * of the checks. 1321 */ 1322 static int nfs_can_extend_write(struct file *file, struct page *page, struct inode *inode) 1323 { 1324 int ret; 1325 struct file_lock_context *flctx = inode->i_flctx; 1326 struct file_lock *fl; 1327 1328 if (file->f_flags & O_DSYNC) 1329 return 0; 1330 if (!nfs_write_pageuptodate(page, inode)) 1331 return 0; 1332 if (NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE)) 1333 return 1; 1334 if (!flctx || (list_empty_careful(&flctx->flc_flock) && 1335 list_empty_careful(&flctx->flc_posix))) 1336 return 1; 1337 1338 /* Check to see if there are whole file write locks */ 1339 ret = 0; 1340 spin_lock(&flctx->flc_lock); 1341 if (!list_empty(&flctx->flc_posix)) { 1342 fl = list_first_entry(&flctx->flc_posix, struct file_lock, 1343 fl_list); 1344 if (is_whole_file_wrlock(fl)) 1345 ret = 1; 1346 } else if (!list_empty(&flctx->flc_flock)) { 1347 fl = list_first_entry(&flctx->flc_flock, struct file_lock, 1348 fl_list); 1349 if (fl->fl_type == F_WRLCK) 1350 ret = 1; 1351 } 1352 spin_unlock(&flctx->flc_lock); 1353 return ret; 1354 } 1355 1356 /* 1357 * Update and possibly write a cached page of an NFS file. 1358 * 1359 * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad 1360 * things with a page scheduled for an RPC call (e.g. invalidate it). 1361 */ 1362 int nfs_updatepage(struct file *file, struct page *page, 1363 unsigned int offset, unsigned int count) 1364 { 1365 struct nfs_open_context *ctx = nfs_file_open_context(file); 1366 struct address_space *mapping = page_file_mapping(page); 1367 struct inode *inode = mapping->host; 1368 int status = 0; 1369 1370 nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE); 1371 1372 dprintk("NFS: nfs_updatepage(%pD2 %d@%lld)\n", 1373 file, count, (long long)(page_file_offset(page) + offset)); 1374 1375 if (!count) 1376 goto out; 1377 1378 if (nfs_can_extend_write(file, page, inode)) { 1379 count = max(count + offset, nfs_page_length(page)); 1380 offset = 0; 1381 } 1382 1383 status = nfs_writepage_setup(ctx, page, offset, count); 1384 if (status < 0) 1385 nfs_set_pageerror(mapping); 1386 else 1387 __set_page_dirty_nobuffers(page); 1388 out: 1389 dprintk("NFS: nfs_updatepage returns %d (isize %lld)\n", 1390 status, (long long)i_size_read(inode)); 1391 return status; 1392 } 1393 1394 static int flush_task_priority(int how) 1395 { 1396 switch (how & (FLUSH_HIGHPRI|FLUSH_LOWPRI)) { 1397 case FLUSH_HIGHPRI: 1398 return RPC_PRIORITY_HIGH; 1399 case FLUSH_LOWPRI: 1400 return RPC_PRIORITY_LOW; 1401 } 1402 return RPC_PRIORITY_NORMAL; 1403 } 1404 1405 static void nfs_initiate_write(struct nfs_pgio_header *hdr, 1406 struct rpc_message *msg, 1407 const struct nfs_rpc_ops *rpc_ops, 1408 struct rpc_task_setup *task_setup_data, int how) 1409 { 1410 int priority = flush_task_priority(how); 1411 1412 task_setup_data->priority = priority; 1413 rpc_ops->write_setup(hdr, msg, &task_setup_data->rpc_client); 1414 trace_nfs_initiate_write(hdr->inode, hdr->io_start, hdr->good_bytes, 1415 hdr->args.stable); 1416 } 1417 1418 /* If a nfs_flush_* function fails, it should remove reqs from @head and 1419 * call this on each, which will prepare them to be retried on next 1420 * writeback using standard nfs. 1421 */ 1422 static void nfs_redirty_request(struct nfs_page *req) 1423 { 1424 /* Bump the transmission count */ 1425 req->wb_nio++; 1426 nfs_mark_request_dirty(req); 1427 set_bit(NFS_CONTEXT_RESEND_WRITES, &nfs_req_openctx(req)->flags); 1428 nfs_end_page_writeback(req); 1429 nfs_release_request(req); 1430 } 1431 1432 static void nfs_async_write_error(struct list_head *head, int error) 1433 { 1434 struct nfs_page *req; 1435 1436 while (!list_empty(head)) { 1437 req = nfs_list_entry(head->next); 1438 nfs_list_remove_request(req); 1439 if (nfs_error_is_fatal(error)) 1440 nfs_write_error(req, error); 1441 else 1442 nfs_redirty_request(req); 1443 } 1444 } 1445 1446 static void nfs_async_write_reschedule_io(struct nfs_pgio_header *hdr) 1447 { 1448 nfs_async_write_error(&hdr->pages, 0); 1449 filemap_fdatawrite_range(hdr->inode->i_mapping, hdr->args.offset, 1450 hdr->args.offset + hdr->args.count - 1); 1451 } 1452 1453 static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops = { 1454 .init_hdr = nfs_async_write_init, 1455 .error_cleanup = nfs_async_write_error, 1456 .completion = nfs_write_completion, 1457 .reschedule_io = nfs_async_write_reschedule_io, 1458 }; 1459 1460 void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio, 1461 struct inode *inode, int ioflags, bool force_mds, 1462 const struct nfs_pgio_completion_ops *compl_ops) 1463 { 1464 struct nfs_server *server = NFS_SERVER(inode); 1465 const struct nfs_pageio_ops *pg_ops = &nfs_pgio_rw_ops; 1466 1467 #ifdef CONFIG_NFS_V4_1 1468 if (server->pnfs_curr_ld && !force_mds) 1469 pg_ops = server->pnfs_curr_ld->pg_write_ops; 1470 #endif 1471 nfs_pageio_init(pgio, inode, pg_ops, compl_ops, &nfs_rw_write_ops, 1472 server->wsize, ioflags); 1473 } 1474 EXPORT_SYMBOL_GPL(nfs_pageio_init_write); 1475 1476 void nfs_pageio_reset_write_mds(struct nfs_pageio_descriptor *pgio) 1477 { 1478 struct nfs_pgio_mirror *mirror; 1479 1480 if (pgio->pg_ops && pgio->pg_ops->pg_cleanup) 1481 pgio->pg_ops->pg_cleanup(pgio); 1482 1483 pgio->pg_ops = &nfs_pgio_rw_ops; 1484 1485 nfs_pageio_stop_mirroring(pgio); 1486 1487 mirror = &pgio->pg_mirrors[0]; 1488 mirror->pg_bsize = NFS_SERVER(pgio->pg_inode)->wsize; 1489 } 1490 EXPORT_SYMBOL_GPL(nfs_pageio_reset_write_mds); 1491 1492 1493 void nfs_commit_prepare(struct rpc_task *task, void *calldata) 1494 { 1495 struct nfs_commit_data *data = calldata; 1496 1497 NFS_PROTO(data->inode)->commit_rpc_prepare(task, data); 1498 } 1499 1500 /* 1501 * Special version of should_remove_suid() that ignores capabilities. 1502 */ 1503 static int nfs_should_remove_suid(const struct inode *inode) 1504 { 1505 umode_t mode = inode->i_mode; 1506 int kill = 0; 1507 1508 /* suid always must be killed */ 1509 if (unlikely(mode & S_ISUID)) 1510 kill = ATTR_KILL_SUID; 1511 1512 /* 1513 * sgid without any exec bits is just a mandatory locking mark; leave 1514 * it alone. If some exec bits are set, it's a real sgid; kill it. 1515 */ 1516 if (unlikely((mode & S_ISGID) && (mode & S_IXGRP))) 1517 kill |= ATTR_KILL_SGID; 1518 1519 if (unlikely(kill && S_ISREG(mode))) 1520 return kill; 1521 1522 return 0; 1523 } 1524 1525 static void nfs_writeback_check_extend(struct nfs_pgio_header *hdr, 1526 struct nfs_fattr *fattr) 1527 { 1528 struct nfs_pgio_args *argp = &hdr->args; 1529 struct nfs_pgio_res *resp = &hdr->res; 1530 u64 size = argp->offset + resp->count; 1531 1532 if (!(fattr->valid & NFS_ATTR_FATTR_SIZE)) 1533 fattr->size = size; 1534 if (nfs_size_to_loff_t(fattr->size) < i_size_read(hdr->inode)) { 1535 fattr->valid &= ~NFS_ATTR_FATTR_SIZE; 1536 return; 1537 } 1538 if (size != fattr->size) 1539 return; 1540 /* Set attribute barrier */ 1541 nfs_fattr_set_barrier(fattr); 1542 /* ...and update size */ 1543 fattr->valid |= NFS_ATTR_FATTR_SIZE; 1544 } 1545 1546 void nfs_writeback_update_inode(struct nfs_pgio_header *hdr) 1547 { 1548 struct nfs_fattr *fattr = &hdr->fattr; 1549 struct inode *inode = hdr->inode; 1550 1551 spin_lock(&inode->i_lock); 1552 nfs_writeback_check_extend(hdr, fattr); 1553 nfs_post_op_update_inode_force_wcc_locked(inode, fattr); 1554 spin_unlock(&inode->i_lock); 1555 } 1556 EXPORT_SYMBOL_GPL(nfs_writeback_update_inode); 1557 1558 /* 1559 * This function is called when the WRITE call is complete. 1560 */ 1561 static int nfs_writeback_done(struct rpc_task *task, 1562 struct nfs_pgio_header *hdr, 1563 struct inode *inode) 1564 { 1565 int status; 1566 1567 /* 1568 * ->write_done will attempt to use post-op attributes to detect 1569 * conflicting writes by other clients. A strict interpretation 1570 * of close-to-open would allow us to continue caching even if 1571 * another writer had changed the file, but some applications 1572 * depend on tighter cache coherency when writing. 1573 */ 1574 status = NFS_PROTO(inode)->write_done(task, hdr); 1575 if (status != 0) 1576 return status; 1577 1578 nfs_add_stats(inode, NFSIOS_SERVERWRITTENBYTES, hdr->res.count); 1579 trace_nfs_writeback_done(inode, task->tk_status, 1580 hdr->args.offset, hdr->res.verf); 1581 1582 if (hdr->res.verf->committed < hdr->args.stable && 1583 task->tk_status >= 0) { 1584 /* We tried a write call, but the server did not 1585 * commit data to stable storage even though we 1586 * requested it. 1587 * Note: There is a known bug in Tru64 < 5.0 in which 1588 * the server reports NFS_DATA_SYNC, but performs 1589 * NFS_FILE_SYNC. We therefore implement this checking 1590 * as a dprintk() in order to avoid filling syslog. 1591 */ 1592 static unsigned long complain; 1593 1594 /* Note this will print the MDS for a DS write */ 1595 if (time_before(complain, jiffies)) { 1596 dprintk("NFS: faulty NFS server %s:" 1597 " (committed = %d) != (stable = %d)\n", 1598 NFS_SERVER(inode)->nfs_client->cl_hostname, 1599 hdr->res.verf->committed, hdr->args.stable); 1600 complain = jiffies + 300 * HZ; 1601 } 1602 } 1603 1604 /* Deal with the suid/sgid bit corner case */ 1605 if (nfs_should_remove_suid(inode)) { 1606 spin_lock(&inode->i_lock); 1607 NFS_I(inode)->cache_validity |= NFS_INO_INVALID_OTHER; 1608 spin_unlock(&inode->i_lock); 1609 } 1610 return 0; 1611 } 1612 1613 /* 1614 * This function is called when the WRITE call is complete. 1615 */ 1616 static void nfs_writeback_result(struct rpc_task *task, 1617 struct nfs_pgio_header *hdr) 1618 { 1619 struct nfs_pgio_args *argp = &hdr->args; 1620 struct nfs_pgio_res *resp = &hdr->res; 1621 1622 if (resp->count < argp->count) { 1623 static unsigned long complain; 1624 1625 /* This a short write! */ 1626 nfs_inc_stats(hdr->inode, NFSIOS_SHORTWRITE); 1627 1628 /* Has the server at least made some progress? */ 1629 if (resp->count == 0) { 1630 if (time_before(complain, jiffies)) { 1631 printk(KERN_WARNING 1632 "NFS: Server wrote zero bytes, expected %u.\n", 1633 argp->count); 1634 complain = jiffies + 300 * HZ; 1635 } 1636 nfs_set_pgio_error(hdr, -EIO, argp->offset); 1637 task->tk_status = -EIO; 1638 return; 1639 } 1640 1641 /* For non rpc-based layout drivers, retry-through-MDS */ 1642 if (!task->tk_ops) { 1643 hdr->pnfs_error = -EAGAIN; 1644 return; 1645 } 1646 1647 /* Was this an NFSv2 write or an NFSv3 stable write? */ 1648 if (resp->verf->committed != NFS_UNSTABLE) { 1649 /* Resend from where the server left off */ 1650 hdr->mds_offset += resp->count; 1651 argp->offset += resp->count; 1652 argp->pgbase += resp->count; 1653 argp->count -= resp->count; 1654 } else { 1655 /* Resend as a stable write in order to avoid 1656 * headaches in the case of a server crash. 1657 */ 1658 argp->stable = NFS_FILE_SYNC; 1659 } 1660 rpc_restart_call_prepare(task); 1661 } 1662 } 1663 1664 static int wait_on_commit(struct nfs_mds_commit_info *cinfo) 1665 { 1666 return wait_var_event_killable(&cinfo->rpcs_out, 1667 !atomic_read(&cinfo->rpcs_out)); 1668 } 1669 1670 static void nfs_commit_begin(struct nfs_mds_commit_info *cinfo) 1671 { 1672 atomic_inc(&cinfo->rpcs_out); 1673 } 1674 1675 static void nfs_commit_end(struct nfs_mds_commit_info *cinfo) 1676 { 1677 if (atomic_dec_and_test(&cinfo->rpcs_out)) 1678 wake_up_var(&cinfo->rpcs_out); 1679 } 1680 1681 void nfs_commitdata_release(struct nfs_commit_data *data) 1682 { 1683 put_nfs_open_context(data->context); 1684 nfs_commit_free(data); 1685 } 1686 EXPORT_SYMBOL_GPL(nfs_commitdata_release); 1687 1688 int nfs_initiate_commit(struct rpc_clnt *clnt, struct nfs_commit_data *data, 1689 const struct nfs_rpc_ops *nfs_ops, 1690 const struct rpc_call_ops *call_ops, 1691 int how, int flags) 1692 { 1693 struct rpc_task *task; 1694 int priority = flush_task_priority(how); 1695 struct rpc_message msg = { 1696 .rpc_argp = &data->args, 1697 .rpc_resp = &data->res, 1698 .rpc_cred = data->cred, 1699 }; 1700 struct rpc_task_setup task_setup_data = { 1701 .task = &data->task, 1702 .rpc_client = clnt, 1703 .rpc_message = &msg, 1704 .callback_ops = call_ops, 1705 .callback_data = data, 1706 .workqueue = nfsiod_workqueue, 1707 .flags = RPC_TASK_ASYNC | flags, 1708 .priority = priority, 1709 }; 1710 /* Set up the initial task struct. */ 1711 nfs_ops->commit_setup(data, &msg, &task_setup_data.rpc_client); 1712 trace_nfs_initiate_commit(data); 1713 1714 dprintk("NFS: initiated commit call\n"); 1715 1716 task = rpc_run_task(&task_setup_data); 1717 if (IS_ERR(task)) 1718 return PTR_ERR(task); 1719 if (how & FLUSH_SYNC) 1720 rpc_wait_for_completion_task(task); 1721 rpc_put_task(task); 1722 return 0; 1723 } 1724 EXPORT_SYMBOL_GPL(nfs_initiate_commit); 1725 1726 static loff_t nfs_get_lwb(struct list_head *head) 1727 { 1728 loff_t lwb = 0; 1729 struct nfs_page *req; 1730 1731 list_for_each_entry(req, head, wb_list) 1732 if (lwb < (req_offset(req) + req->wb_bytes)) 1733 lwb = req_offset(req) + req->wb_bytes; 1734 1735 return lwb; 1736 } 1737 1738 /* 1739 * Set up the argument/result storage required for the RPC call. 1740 */ 1741 void nfs_init_commit(struct nfs_commit_data *data, 1742 struct list_head *head, 1743 struct pnfs_layout_segment *lseg, 1744 struct nfs_commit_info *cinfo) 1745 { 1746 struct nfs_page *first = nfs_list_entry(head->next); 1747 struct nfs_open_context *ctx = nfs_req_openctx(first); 1748 struct inode *inode = d_inode(ctx->dentry); 1749 1750 /* Set up the RPC argument and reply structs 1751 * NB: take care not to mess about with data->commit et al. */ 1752 1753 list_splice_init(head, &data->pages); 1754 1755 data->inode = inode; 1756 data->cred = ctx->cred; 1757 data->lseg = lseg; /* reference transferred */ 1758 /* only set lwb for pnfs commit */ 1759 if (lseg) 1760 data->lwb = nfs_get_lwb(&data->pages); 1761 data->mds_ops = &nfs_commit_ops; 1762 data->completion_ops = cinfo->completion_ops; 1763 data->dreq = cinfo->dreq; 1764 1765 data->args.fh = NFS_FH(data->inode); 1766 /* Note: we always request a commit of the entire inode */ 1767 data->args.offset = 0; 1768 data->args.count = 0; 1769 data->context = get_nfs_open_context(ctx); 1770 data->res.fattr = &data->fattr; 1771 data->res.verf = &data->verf; 1772 nfs_fattr_init(&data->fattr); 1773 } 1774 EXPORT_SYMBOL_GPL(nfs_init_commit); 1775 1776 void nfs_retry_commit(struct list_head *page_list, 1777 struct pnfs_layout_segment *lseg, 1778 struct nfs_commit_info *cinfo, 1779 u32 ds_commit_idx) 1780 { 1781 struct nfs_page *req; 1782 1783 while (!list_empty(page_list)) { 1784 req = nfs_list_entry(page_list->next); 1785 nfs_list_remove_request(req); 1786 nfs_mark_request_commit(req, lseg, cinfo, ds_commit_idx); 1787 if (!cinfo->dreq) 1788 nfs_clear_page_commit(req->wb_page); 1789 nfs_unlock_and_release_request(req); 1790 } 1791 } 1792 EXPORT_SYMBOL_GPL(nfs_retry_commit); 1793 1794 static void 1795 nfs_commit_resched_write(struct nfs_commit_info *cinfo, 1796 struct nfs_page *req) 1797 { 1798 __set_page_dirty_nobuffers(req->wb_page); 1799 } 1800 1801 /* 1802 * Commit dirty pages 1803 */ 1804 static int 1805 nfs_commit_list(struct inode *inode, struct list_head *head, int how, 1806 struct nfs_commit_info *cinfo) 1807 { 1808 struct nfs_commit_data *data; 1809 1810 /* another commit raced with us */ 1811 if (list_empty(head)) 1812 return 0; 1813 1814 data = nfs_commitdata_alloc(true); 1815 1816 /* Set up the argument struct */ 1817 nfs_init_commit(data, head, NULL, cinfo); 1818 atomic_inc(&cinfo->mds->rpcs_out); 1819 return nfs_initiate_commit(NFS_CLIENT(inode), data, NFS_PROTO(inode), 1820 data->mds_ops, how, 0); 1821 } 1822 1823 /* 1824 * COMMIT call returned 1825 */ 1826 static void nfs_commit_done(struct rpc_task *task, void *calldata) 1827 { 1828 struct nfs_commit_data *data = calldata; 1829 1830 dprintk("NFS: %5u nfs_commit_done (status %d)\n", 1831 task->tk_pid, task->tk_status); 1832 1833 /* Call the NFS version-specific code */ 1834 NFS_PROTO(data->inode)->commit_done(task, data); 1835 trace_nfs_commit_done(data); 1836 } 1837 1838 static void nfs_commit_release_pages(struct nfs_commit_data *data) 1839 { 1840 struct nfs_page *req; 1841 int status = data->task.tk_status; 1842 struct nfs_commit_info cinfo; 1843 struct nfs_server *nfss; 1844 1845 while (!list_empty(&data->pages)) { 1846 req = nfs_list_entry(data->pages.next); 1847 nfs_list_remove_request(req); 1848 if (req->wb_page) 1849 nfs_clear_page_commit(req->wb_page); 1850 1851 dprintk("NFS: commit (%s/%llu %d@%lld)", 1852 nfs_req_openctx(req)->dentry->d_sb->s_id, 1853 (unsigned long long)NFS_FILEID(d_inode(nfs_req_openctx(req)->dentry)), 1854 req->wb_bytes, 1855 (long long)req_offset(req)); 1856 if (status < 0) { 1857 if (req->wb_page) { 1858 nfs_mapping_set_error(req->wb_page, status); 1859 nfs_inode_remove_request(req); 1860 } 1861 dprintk_cont(", error = %d\n", status); 1862 goto next; 1863 } 1864 1865 /* Okay, COMMIT succeeded, apparently. Check the verifier 1866 * returned by the server against all stored verfs. */ 1867 if (!nfs_write_verifier_cmp(&req->wb_verf, &data->verf.verifier)) { 1868 /* We have a match */ 1869 if (req->wb_page) 1870 nfs_inode_remove_request(req); 1871 dprintk_cont(" OK\n"); 1872 goto next; 1873 } 1874 /* We have a mismatch. Write the page again */ 1875 dprintk_cont(" mismatch\n"); 1876 nfs_mark_request_dirty(req); 1877 set_bit(NFS_CONTEXT_RESEND_WRITES, &nfs_req_openctx(req)->flags); 1878 next: 1879 nfs_unlock_and_release_request(req); 1880 /* Latency breaker */ 1881 cond_resched(); 1882 } 1883 nfss = NFS_SERVER(data->inode); 1884 if (atomic_long_read(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH) 1885 clear_bdi_congested(inode_to_bdi(data->inode), BLK_RW_ASYNC); 1886 1887 nfs_init_cinfo(&cinfo, data->inode, data->dreq); 1888 nfs_commit_end(cinfo.mds); 1889 } 1890 1891 static void nfs_commit_release(void *calldata) 1892 { 1893 struct nfs_commit_data *data = calldata; 1894 1895 data->completion_ops->completion(data); 1896 nfs_commitdata_release(calldata); 1897 } 1898 1899 static const struct rpc_call_ops nfs_commit_ops = { 1900 .rpc_call_prepare = nfs_commit_prepare, 1901 .rpc_call_done = nfs_commit_done, 1902 .rpc_release = nfs_commit_release, 1903 }; 1904 1905 static const struct nfs_commit_completion_ops nfs_commit_completion_ops = { 1906 .completion = nfs_commit_release_pages, 1907 .resched_write = nfs_commit_resched_write, 1908 }; 1909 1910 int nfs_generic_commit_list(struct inode *inode, struct list_head *head, 1911 int how, struct nfs_commit_info *cinfo) 1912 { 1913 int status; 1914 1915 status = pnfs_commit_list(inode, head, how, cinfo); 1916 if (status == PNFS_NOT_ATTEMPTED) 1917 status = nfs_commit_list(inode, head, how, cinfo); 1918 return status; 1919 } 1920 1921 static int __nfs_commit_inode(struct inode *inode, int how, 1922 struct writeback_control *wbc) 1923 { 1924 LIST_HEAD(head); 1925 struct nfs_commit_info cinfo; 1926 int may_wait = how & FLUSH_SYNC; 1927 int ret, nscan; 1928 1929 nfs_init_cinfo_from_inode(&cinfo, inode); 1930 nfs_commit_begin(cinfo.mds); 1931 for (;;) { 1932 ret = nscan = nfs_scan_commit(inode, &head, &cinfo); 1933 if (ret <= 0) 1934 break; 1935 ret = nfs_generic_commit_list(inode, &head, how, &cinfo); 1936 if (ret < 0) 1937 break; 1938 ret = 0; 1939 if (wbc && wbc->sync_mode == WB_SYNC_NONE) { 1940 if (nscan < wbc->nr_to_write) 1941 wbc->nr_to_write -= nscan; 1942 else 1943 wbc->nr_to_write = 0; 1944 } 1945 if (nscan < INT_MAX) 1946 break; 1947 cond_resched(); 1948 } 1949 nfs_commit_end(cinfo.mds); 1950 if (ret || !may_wait) 1951 return ret; 1952 return wait_on_commit(cinfo.mds); 1953 } 1954 1955 int nfs_commit_inode(struct inode *inode, int how) 1956 { 1957 return __nfs_commit_inode(inode, how, NULL); 1958 } 1959 EXPORT_SYMBOL_GPL(nfs_commit_inode); 1960 1961 int nfs_write_inode(struct inode *inode, struct writeback_control *wbc) 1962 { 1963 struct nfs_inode *nfsi = NFS_I(inode); 1964 int flags = FLUSH_SYNC; 1965 int ret = 0; 1966 1967 if (wbc->sync_mode == WB_SYNC_NONE) { 1968 /* no commits means nothing needs to be done */ 1969 if (!atomic_long_read(&nfsi->commit_info.ncommit)) 1970 goto check_requests_outstanding; 1971 1972 /* Don't commit yet if this is a non-blocking flush and there 1973 * are a lot of outstanding writes for this mapping. 1974 */ 1975 if (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK)) 1976 goto out_mark_dirty; 1977 1978 /* don't wait for the COMMIT response */ 1979 flags = 0; 1980 } 1981 1982 ret = __nfs_commit_inode(inode, flags, wbc); 1983 if (!ret) { 1984 if (flags & FLUSH_SYNC) 1985 return 0; 1986 } else if (atomic_long_read(&nfsi->commit_info.ncommit)) 1987 goto out_mark_dirty; 1988 1989 check_requests_outstanding: 1990 if (!atomic_read(&nfsi->commit_info.rpcs_out)) 1991 return ret; 1992 out_mark_dirty: 1993 __mark_inode_dirty(inode, I_DIRTY_DATASYNC); 1994 return ret; 1995 } 1996 EXPORT_SYMBOL_GPL(nfs_write_inode); 1997 1998 /* 1999 * Wrapper for filemap_write_and_wait_range() 2000 * 2001 * Needed for pNFS in order to ensure data becomes visible to the 2002 * client. 2003 */ 2004 int nfs_filemap_write_and_wait_range(struct address_space *mapping, 2005 loff_t lstart, loff_t lend) 2006 { 2007 int ret; 2008 2009 ret = filemap_write_and_wait_range(mapping, lstart, lend); 2010 if (ret == 0) 2011 ret = pnfs_sync_inode(mapping->host, true); 2012 return ret; 2013 } 2014 EXPORT_SYMBOL_GPL(nfs_filemap_write_and_wait_range); 2015 2016 /* 2017 * flush the inode to disk. 2018 */ 2019 int nfs_wb_all(struct inode *inode) 2020 { 2021 int ret; 2022 2023 trace_nfs_writeback_inode_enter(inode); 2024 2025 ret = filemap_write_and_wait(inode->i_mapping); 2026 if (ret) 2027 goto out; 2028 ret = nfs_commit_inode(inode, FLUSH_SYNC); 2029 if (ret < 0) 2030 goto out; 2031 pnfs_sync_inode(inode, true); 2032 ret = 0; 2033 2034 out: 2035 trace_nfs_writeback_inode_exit(inode, ret); 2036 return ret; 2037 } 2038 EXPORT_SYMBOL_GPL(nfs_wb_all); 2039 2040 int nfs_wb_page_cancel(struct inode *inode, struct page *page) 2041 { 2042 struct nfs_page *req; 2043 int ret = 0; 2044 2045 wait_on_page_writeback(page); 2046 2047 /* blocking call to cancel all requests and join to a single (head) 2048 * request */ 2049 req = nfs_lock_and_join_requests(page); 2050 2051 if (IS_ERR(req)) { 2052 ret = PTR_ERR(req); 2053 } else if (req) { 2054 /* all requests from this page have been cancelled by 2055 * nfs_lock_and_join_requests, so just remove the head 2056 * request from the inode / page_private pointer and 2057 * release it */ 2058 nfs_inode_remove_request(req); 2059 nfs_unlock_and_release_request(req); 2060 } 2061 2062 return ret; 2063 } 2064 2065 /* 2066 * Write back all requests on one page - we do this before reading it. 2067 */ 2068 int nfs_wb_page(struct inode *inode, struct page *page) 2069 { 2070 loff_t range_start = page_file_offset(page); 2071 loff_t range_end = range_start + (loff_t)(PAGE_SIZE - 1); 2072 struct writeback_control wbc = { 2073 .sync_mode = WB_SYNC_ALL, 2074 .nr_to_write = 0, 2075 .range_start = range_start, 2076 .range_end = range_end, 2077 }; 2078 int ret; 2079 2080 trace_nfs_writeback_page_enter(inode); 2081 2082 for (;;) { 2083 wait_on_page_writeback(page); 2084 if (clear_page_dirty_for_io(page)) { 2085 ret = nfs_writepage_locked(page, &wbc); 2086 if (ret < 0) 2087 goto out_error; 2088 continue; 2089 } 2090 ret = 0; 2091 if (!PagePrivate(page)) 2092 break; 2093 ret = nfs_commit_inode(inode, FLUSH_SYNC); 2094 if (ret < 0) 2095 goto out_error; 2096 } 2097 out_error: 2098 trace_nfs_writeback_page_exit(inode, ret); 2099 return ret; 2100 } 2101 2102 #ifdef CONFIG_MIGRATION 2103 int nfs_migrate_page(struct address_space *mapping, struct page *newpage, 2104 struct page *page, enum migrate_mode mode) 2105 { 2106 /* 2107 * If PagePrivate is set, then the page is currently associated with 2108 * an in-progress read or write request. Don't try to migrate it. 2109 * 2110 * FIXME: we could do this in principle, but we'll need a way to ensure 2111 * that we can safely release the inode reference while holding 2112 * the page lock. 2113 */ 2114 if (PagePrivate(page)) 2115 return -EBUSY; 2116 2117 if (!nfs_fscache_release_page(page, GFP_KERNEL)) 2118 return -EBUSY; 2119 2120 return migrate_page(mapping, newpage, page, mode); 2121 } 2122 #endif 2123 2124 int __init nfs_init_writepagecache(void) 2125 { 2126 nfs_wdata_cachep = kmem_cache_create("nfs_write_data", 2127 sizeof(struct nfs_pgio_header), 2128 0, SLAB_HWCACHE_ALIGN, 2129 NULL); 2130 if (nfs_wdata_cachep == NULL) 2131 return -ENOMEM; 2132 2133 nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE, 2134 nfs_wdata_cachep); 2135 if (nfs_wdata_mempool == NULL) 2136 goto out_destroy_write_cache; 2137 2138 nfs_cdata_cachep = kmem_cache_create("nfs_commit_data", 2139 sizeof(struct nfs_commit_data), 2140 0, SLAB_HWCACHE_ALIGN, 2141 NULL); 2142 if (nfs_cdata_cachep == NULL) 2143 goto out_destroy_write_mempool; 2144 2145 nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT, 2146 nfs_cdata_cachep); 2147 if (nfs_commit_mempool == NULL) 2148 goto out_destroy_commit_cache; 2149 2150 /* 2151 * NFS congestion size, scale with available memory. 2152 * 2153 * 64MB: 8192k 2154 * 128MB: 11585k 2155 * 256MB: 16384k 2156 * 512MB: 23170k 2157 * 1GB: 32768k 2158 * 2GB: 46340k 2159 * 4GB: 65536k 2160 * 8GB: 92681k 2161 * 16GB: 131072k 2162 * 2163 * This allows larger machines to have larger/more transfers. 2164 * Limit the default to 256M 2165 */ 2166 nfs_congestion_kb = (16*int_sqrt(totalram_pages())) << (PAGE_SHIFT-10); 2167 if (nfs_congestion_kb > 256*1024) 2168 nfs_congestion_kb = 256*1024; 2169 2170 return 0; 2171 2172 out_destroy_commit_cache: 2173 kmem_cache_destroy(nfs_cdata_cachep); 2174 out_destroy_write_mempool: 2175 mempool_destroy(nfs_wdata_mempool); 2176 out_destroy_write_cache: 2177 kmem_cache_destroy(nfs_wdata_cachep); 2178 return -ENOMEM; 2179 } 2180 2181 void nfs_destroy_writepagecache(void) 2182 { 2183 mempool_destroy(nfs_commit_mempool); 2184 kmem_cache_destroy(nfs_cdata_cachep); 2185 mempool_destroy(nfs_wdata_mempool); 2186 kmem_cache_destroy(nfs_wdata_cachep); 2187 } 2188 2189 static const struct nfs_rw_ops nfs_rw_write_ops = { 2190 .rw_alloc_header = nfs_writehdr_alloc, 2191 .rw_free_header = nfs_writehdr_free, 2192 .rw_done = nfs_writeback_done, 2193 .rw_result = nfs_writeback_result, 2194 .rw_initiate = nfs_initiate_write, 2195 }; 2196