1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/fs/nfs/write.c 4 * 5 * Write file data over NFS. 6 * 7 * Copyright (C) 1996, 1997, Olaf Kirch <okir@monad.swb.de> 8 */ 9 10 #include <linux/types.h> 11 #include <linux/slab.h> 12 #include <linux/mm.h> 13 #include <linux/pagemap.h> 14 #include <linux/file.h> 15 #include <linux/writeback.h> 16 #include <linux/swap.h> 17 #include <linux/migrate.h> 18 19 #include <linux/sunrpc/clnt.h> 20 #include <linux/nfs_fs.h> 21 #include <linux/nfs_mount.h> 22 #include <linux/nfs_page.h> 23 #include <linux/backing-dev.h> 24 #include <linux/export.h> 25 #include <linux/freezer.h> 26 #include <linux/wait.h> 27 #include <linux/iversion.h> 28 29 #include <linux/uaccess.h> 30 #include <linux/sched/mm.h> 31 32 #include "delegation.h" 33 #include "internal.h" 34 #include "iostat.h" 35 #include "nfs4_fs.h" 36 #include "fscache.h" 37 #include "pnfs.h" 38 39 #include "nfstrace.h" 40 41 #define NFSDBG_FACILITY NFSDBG_PAGECACHE 42 43 #define MIN_POOL_WRITE (32) 44 #define MIN_POOL_COMMIT (4) 45 46 struct nfs_io_completion { 47 void (*complete)(void *data); 48 void *data; 49 struct kref refcount; 50 }; 51 52 /* 53 * Local function declarations 54 */ 55 static void nfs_redirty_request(struct nfs_page *req); 56 static const struct rpc_call_ops nfs_commit_ops; 57 static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops; 58 static const struct nfs_commit_completion_ops nfs_commit_completion_ops; 59 static const struct nfs_rw_ops nfs_rw_write_ops; 60 static void nfs_inode_remove_request(struct nfs_page *req); 61 static void nfs_clear_request_commit(struct nfs_page *req); 62 static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo, 63 struct inode *inode); 64 static struct nfs_page * 65 nfs_page_search_commits_for_head_request_locked(struct nfs_inode *nfsi, 66 struct page *page); 67 68 static struct kmem_cache *nfs_wdata_cachep; 69 static mempool_t *nfs_wdata_mempool; 70 static struct kmem_cache *nfs_cdata_cachep; 71 static mempool_t *nfs_commit_mempool; 72 73 struct nfs_commit_data *nfs_commitdata_alloc(bool never_fail) 74 { 75 struct nfs_commit_data *p; 76 77 if (never_fail) 78 p = mempool_alloc(nfs_commit_mempool, GFP_NOIO); 79 else { 80 /* It is OK to do some reclaim, not no safe to wait 81 * for anything to be returned to the pool. 82 * mempool_alloc() cannot handle that particular combination, 83 * so we need two separate attempts. 84 */ 85 p = mempool_alloc(nfs_commit_mempool, GFP_NOWAIT); 86 if (!p) 87 p = kmem_cache_alloc(nfs_cdata_cachep, GFP_NOIO | 88 __GFP_NOWARN | __GFP_NORETRY); 89 if (!p) 90 return NULL; 91 } 92 93 memset(p, 0, sizeof(*p)); 94 INIT_LIST_HEAD(&p->pages); 95 return p; 96 } 97 EXPORT_SYMBOL_GPL(nfs_commitdata_alloc); 98 99 void nfs_commit_free(struct nfs_commit_data *p) 100 { 101 mempool_free(p, nfs_commit_mempool); 102 } 103 EXPORT_SYMBOL_GPL(nfs_commit_free); 104 105 static struct nfs_pgio_header *nfs_writehdr_alloc(void) 106 { 107 struct nfs_pgio_header *p = mempool_alloc(nfs_wdata_mempool, GFP_KERNEL); 108 109 memset(p, 0, sizeof(*p)); 110 p->rw_mode = FMODE_WRITE; 111 return p; 112 } 113 114 static void nfs_writehdr_free(struct nfs_pgio_header *hdr) 115 { 116 mempool_free(hdr, nfs_wdata_mempool); 117 } 118 119 static struct nfs_io_completion *nfs_io_completion_alloc(gfp_t gfp_flags) 120 { 121 return kmalloc(sizeof(struct nfs_io_completion), gfp_flags); 122 } 123 124 static void nfs_io_completion_init(struct nfs_io_completion *ioc, 125 void (*complete)(void *), void *data) 126 { 127 ioc->complete = complete; 128 ioc->data = data; 129 kref_init(&ioc->refcount); 130 } 131 132 static void nfs_io_completion_release(struct kref *kref) 133 { 134 struct nfs_io_completion *ioc = container_of(kref, 135 struct nfs_io_completion, refcount); 136 ioc->complete(ioc->data); 137 kfree(ioc); 138 } 139 140 static void nfs_io_completion_get(struct nfs_io_completion *ioc) 141 { 142 if (ioc != NULL) 143 kref_get(&ioc->refcount); 144 } 145 146 static void nfs_io_completion_put(struct nfs_io_completion *ioc) 147 { 148 if (ioc != NULL) 149 kref_put(&ioc->refcount, nfs_io_completion_release); 150 } 151 152 static struct nfs_page * 153 nfs_page_private_request(struct page *page) 154 { 155 if (!PagePrivate(page)) 156 return NULL; 157 return (struct nfs_page *)page_private(page); 158 } 159 160 /* 161 * nfs_page_find_head_request_locked - find head request associated with @page 162 * 163 * must be called while holding the inode lock. 164 * 165 * returns matching head request with reference held, or NULL if not found. 166 */ 167 static struct nfs_page * 168 nfs_page_find_private_request(struct page *page) 169 { 170 struct address_space *mapping = page_file_mapping(page); 171 struct nfs_page *req; 172 173 if (!PagePrivate(page)) 174 return NULL; 175 spin_lock(&mapping->private_lock); 176 req = nfs_page_private_request(page); 177 if (req) { 178 WARN_ON_ONCE(req->wb_head != req); 179 kref_get(&req->wb_kref); 180 } 181 spin_unlock(&mapping->private_lock); 182 return req; 183 } 184 185 static struct nfs_page * 186 nfs_page_find_swap_request(struct page *page) 187 { 188 struct inode *inode = page_file_mapping(page)->host; 189 struct nfs_inode *nfsi = NFS_I(inode); 190 struct nfs_page *req = NULL; 191 if (!PageSwapCache(page)) 192 return NULL; 193 mutex_lock(&nfsi->commit_mutex); 194 if (PageSwapCache(page)) { 195 req = nfs_page_search_commits_for_head_request_locked(nfsi, 196 page); 197 if (req) { 198 WARN_ON_ONCE(req->wb_head != req); 199 kref_get(&req->wb_kref); 200 } 201 } 202 mutex_unlock(&nfsi->commit_mutex); 203 return req; 204 } 205 206 /* 207 * nfs_page_find_head_request - find head request associated with @page 208 * 209 * returns matching head request with reference held, or NULL if not found. 210 */ 211 static struct nfs_page *nfs_page_find_head_request(struct page *page) 212 { 213 struct nfs_page *req; 214 215 req = nfs_page_find_private_request(page); 216 if (!req) 217 req = nfs_page_find_swap_request(page); 218 return req; 219 } 220 221 /* Adjust the file length if we're writing beyond the end */ 222 static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int count) 223 { 224 struct inode *inode = page_file_mapping(page)->host; 225 loff_t end, i_size; 226 pgoff_t end_index; 227 228 spin_lock(&inode->i_lock); 229 i_size = i_size_read(inode); 230 end_index = (i_size - 1) >> PAGE_SHIFT; 231 if (i_size > 0 && page_index(page) < end_index) 232 goto out; 233 end = page_file_offset(page) + ((loff_t)offset+count); 234 if (i_size >= end) 235 goto out; 236 i_size_write(inode, end); 237 NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_SIZE; 238 nfs_inc_stats(inode, NFSIOS_EXTENDWRITE); 239 out: 240 spin_unlock(&inode->i_lock); 241 } 242 243 /* A writeback failed: mark the page as bad, and invalidate the page cache */ 244 static void nfs_set_pageerror(struct address_space *mapping) 245 { 246 struct inode *inode = mapping->host; 247 248 nfs_zap_mapping(mapping->host, mapping); 249 /* Force file size revalidation */ 250 spin_lock(&inode->i_lock); 251 NFS_I(inode)->cache_validity |= NFS_INO_REVAL_FORCED | 252 NFS_INO_REVAL_PAGECACHE | 253 NFS_INO_INVALID_SIZE; 254 spin_unlock(&inode->i_lock); 255 } 256 257 static void nfs_mapping_set_error(struct page *page, int error) 258 { 259 struct address_space *mapping = page_file_mapping(page); 260 261 SetPageError(page); 262 mapping_set_error(mapping, error); 263 nfs_set_pageerror(mapping); 264 } 265 266 /* 267 * nfs_page_group_search_locked 268 * @head - head request of page group 269 * @page_offset - offset into page 270 * 271 * Search page group with head @head to find a request that contains the 272 * page offset @page_offset. 273 * 274 * Returns a pointer to the first matching nfs request, or NULL if no 275 * match is found. 276 * 277 * Must be called with the page group lock held 278 */ 279 static struct nfs_page * 280 nfs_page_group_search_locked(struct nfs_page *head, unsigned int page_offset) 281 { 282 struct nfs_page *req; 283 284 req = head; 285 do { 286 if (page_offset >= req->wb_pgbase && 287 page_offset < (req->wb_pgbase + req->wb_bytes)) 288 return req; 289 290 req = req->wb_this_page; 291 } while (req != head); 292 293 return NULL; 294 } 295 296 /* 297 * nfs_page_group_covers_page 298 * @head - head request of page group 299 * 300 * Return true if the page group with head @head covers the whole page, 301 * returns false otherwise 302 */ 303 static bool nfs_page_group_covers_page(struct nfs_page *req) 304 { 305 struct nfs_page *tmp; 306 unsigned int pos = 0; 307 unsigned int len = nfs_page_length(req->wb_page); 308 309 nfs_page_group_lock(req); 310 311 for (;;) { 312 tmp = nfs_page_group_search_locked(req->wb_head, pos); 313 if (!tmp) 314 break; 315 pos = tmp->wb_pgbase + tmp->wb_bytes; 316 } 317 318 nfs_page_group_unlock(req); 319 return pos >= len; 320 } 321 322 /* We can set the PG_uptodate flag if we see that a write request 323 * covers the full page. 324 */ 325 static void nfs_mark_uptodate(struct nfs_page *req) 326 { 327 if (PageUptodate(req->wb_page)) 328 return; 329 if (!nfs_page_group_covers_page(req)) 330 return; 331 SetPageUptodate(req->wb_page); 332 } 333 334 static int wb_priority(struct writeback_control *wbc) 335 { 336 int ret = 0; 337 338 if (wbc->sync_mode == WB_SYNC_ALL) 339 ret = FLUSH_COND_STABLE; 340 return ret; 341 } 342 343 /* 344 * NFS congestion control 345 */ 346 347 int nfs_congestion_kb; 348 349 #define NFS_CONGESTION_ON_THRESH (nfs_congestion_kb >> (PAGE_SHIFT-10)) 350 #define NFS_CONGESTION_OFF_THRESH \ 351 (NFS_CONGESTION_ON_THRESH - (NFS_CONGESTION_ON_THRESH >> 2)) 352 353 static void nfs_set_page_writeback(struct page *page) 354 { 355 struct inode *inode = page_file_mapping(page)->host; 356 struct nfs_server *nfss = NFS_SERVER(inode); 357 int ret = test_set_page_writeback(page); 358 359 WARN_ON_ONCE(ret != 0); 360 361 if (atomic_long_inc_return(&nfss->writeback) > 362 NFS_CONGESTION_ON_THRESH) 363 set_bdi_congested(inode_to_bdi(inode), BLK_RW_ASYNC); 364 } 365 366 static void nfs_end_page_writeback(struct nfs_page *req) 367 { 368 struct inode *inode = page_file_mapping(req->wb_page)->host; 369 struct nfs_server *nfss = NFS_SERVER(inode); 370 bool is_done; 371 372 is_done = nfs_page_group_sync_on_bit(req, PG_WB_END); 373 nfs_unlock_request(req); 374 if (!is_done) 375 return; 376 377 end_page_writeback(req->wb_page); 378 if (atomic_long_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH) 379 clear_bdi_congested(inode_to_bdi(inode), BLK_RW_ASYNC); 380 } 381 382 /* 383 * nfs_unroll_locks_and_wait - unlock all newly locked reqs and wait on @req 384 * 385 * this is a helper function for nfs_lock_and_join_requests 386 * 387 * @inode - inode associated with request page group, must be holding inode lock 388 * @head - head request of page group, must be holding head lock 389 * @req - request that couldn't lock and needs to wait on the req bit lock 390 * 391 * NOTE: this must be called holding page_group bit lock 392 * which will be released before returning. 393 * 394 * returns 0 on success, < 0 on error. 395 */ 396 static void 397 nfs_unroll_locks(struct inode *inode, struct nfs_page *head, 398 struct nfs_page *req) 399 { 400 struct nfs_page *tmp; 401 402 /* relinquish all the locks successfully grabbed this run */ 403 for (tmp = head->wb_this_page ; tmp != req; tmp = tmp->wb_this_page) { 404 if (!kref_read(&tmp->wb_kref)) 405 continue; 406 nfs_unlock_and_release_request(tmp); 407 } 408 } 409 410 /* 411 * nfs_destroy_unlinked_subrequests - destroy recently unlinked subrequests 412 * 413 * @destroy_list - request list (using wb_this_page) terminated by @old_head 414 * @old_head - the old head of the list 415 * 416 * All subrequests must be locked and removed from all lists, so at this point 417 * they are only "active" in this function, and possibly in nfs_wait_on_request 418 * with a reference held by some other context. 419 */ 420 static void 421 nfs_destroy_unlinked_subrequests(struct nfs_page *destroy_list, 422 struct nfs_page *old_head, 423 struct inode *inode) 424 { 425 while (destroy_list) { 426 struct nfs_page *subreq = destroy_list; 427 428 destroy_list = (subreq->wb_this_page == old_head) ? 429 NULL : subreq->wb_this_page; 430 431 /* Note: lock subreq in order to change subreq->wb_head */ 432 nfs_page_set_headlock(subreq); 433 WARN_ON_ONCE(old_head != subreq->wb_head); 434 435 /* make sure old group is not used */ 436 subreq->wb_this_page = subreq; 437 subreq->wb_head = subreq; 438 439 clear_bit(PG_REMOVE, &subreq->wb_flags); 440 441 /* Note: races with nfs_page_group_destroy() */ 442 if (!kref_read(&subreq->wb_kref)) { 443 /* Check if we raced with nfs_page_group_destroy() */ 444 if (test_and_clear_bit(PG_TEARDOWN, &subreq->wb_flags)) { 445 nfs_page_clear_headlock(subreq); 446 nfs_free_request(subreq); 447 } else 448 nfs_page_clear_headlock(subreq); 449 continue; 450 } 451 nfs_page_clear_headlock(subreq); 452 453 nfs_release_request(old_head); 454 455 if (test_and_clear_bit(PG_INODE_REF, &subreq->wb_flags)) { 456 nfs_release_request(subreq); 457 atomic_long_dec(&NFS_I(inode)->nrequests); 458 } 459 460 /* subreq is now totally disconnected from page group or any 461 * write / commit lists. last chance to wake any waiters */ 462 nfs_unlock_and_release_request(subreq); 463 } 464 } 465 466 /* 467 * nfs_lock_and_join_requests - join all subreqs to the head req and return 468 * a locked reference, cancelling any pending 469 * operations for this page. 470 * 471 * @page - the page used to lookup the "page group" of nfs_page structures 472 * 473 * This function joins all sub requests to the head request by first 474 * locking all requests in the group, cancelling any pending operations 475 * and finally updating the head request to cover the whole range covered by 476 * the (former) group. All subrequests are removed from any write or commit 477 * lists, unlinked from the group and destroyed. 478 * 479 * Returns a locked, referenced pointer to the head request - which after 480 * this call is guaranteed to be the only request associated with the page. 481 * Returns NULL if no requests are found for @page, or a ERR_PTR if an 482 * error was encountered. 483 */ 484 static struct nfs_page * 485 nfs_lock_and_join_requests(struct page *page) 486 { 487 struct inode *inode = page_file_mapping(page)->host; 488 struct nfs_page *head, *subreq; 489 struct nfs_page *destroy_list = NULL; 490 unsigned int total_bytes; 491 int ret; 492 493 try_again: 494 /* 495 * A reference is taken only on the head request which acts as a 496 * reference to the whole page group - the group will not be destroyed 497 * until the head reference is released. 498 */ 499 head = nfs_page_find_head_request(page); 500 if (!head) 501 return NULL; 502 503 /* lock the page head first in order to avoid an ABBA inefficiency */ 504 if (!nfs_lock_request(head)) { 505 ret = nfs_wait_on_request(head); 506 nfs_release_request(head); 507 if (ret < 0) 508 return ERR_PTR(ret); 509 goto try_again; 510 } 511 512 /* Ensure that nobody removed the request before we locked it */ 513 if (head != nfs_page_private_request(page) && !PageSwapCache(page)) { 514 nfs_unlock_and_release_request(head); 515 goto try_again; 516 } 517 518 ret = nfs_page_group_lock(head); 519 if (ret < 0) 520 goto release_request; 521 522 /* lock each request in the page group */ 523 total_bytes = head->wb_bytes; 524 for (subreq = head->wb_this_page; subreq != head; 525 subreq = subreq->wb_this_page) { 526 527 if (!kref_get_unless_zero(&subreq->wb_kref)) { 528 if (subreq->wb_offset == head->wb_offset + total_bytes) 529 total_bytes += subreq->wb_bytes; 530 continue; 531 } 532 533 while (!nfs_lock_request(subreq)) { 534 /* 535 * Unlock page to allow nfs_page_group_sync_on_bit() 536 * to succeed 537 */ 538 nfs_page_group_unlock(head); 539 ret = nfs_wait_on_request(subreq); 540 if (!ret) 541 ret = nfs_page_group_lock(head); 542 if (ret < 0) { 543 nfs_unroll_locks(inode, head, subreq); 544 nfs_release_request(subreq); 545 goto release_request; 546 } 547 } 548 /* 549 * Subrequests are always contiguous, non overlapping 550 * and in order - but may be repeated (mirrored writes). 551 */ 552 if (subreq->wb_offset == (head->wb_offset + total_bytes)) { 553 /* keep track of how many bytes this group covers */ 554 total_bytes += subreq->wb_bytes; 555 } else if (WARN_ON_ONCE(subreq->wb_offset < head->wb_offset || 556 ((subreq->wb_offset + subreq->wb_bytes) > 557 (head->wb_offset + total_bytes)))) { 558 nfs_page_group_unlock(head); 559 nfs_unroll_locks(inode, head, subreq); 560 nfs_unlock_and_release_request(subreq); 561 ret = -EIO; 562 goto release_request; 563 } 564 } 565 566 /* Now that all requests are locked, make sure they aren't on any list. 567 * Commit list removal accounting is done after locks are dropped */ 568 subreq = head; 569 do { 570 nfs_clear_request_commit(subreq); 571 subreq = subreq->wb_this_page; 572 } while (subreq != head); 573 574 /* unlink subrequests from head, destroy them later */ 575 if (head->wb_this_page != head) { 576 /* destroy list will be terminated by head */ 577 destroy_list = head->wb_this_page; 578 head->wb_this_page = head; 579 580 /* change head request to cover whole range that 581 * the former page group covered */ 582 head->wb_bytes = total_bytes; 583 } 584 585 /* Postpone destruction of this request */ 586 if (test_and_clear_bit(PG_REMOVE, &head->wb_flags)) { 587 set_bit(PG_INODE_REF, &head->wb_flags); 588 kref_get(&head->wb_kref); 589 atomic_long_inc(&NFS_I(inode)->nrequests); 590 } 591 592 nfs_page_group_unlock(head); 593 594 nfs_destroy_unlinked_subrequests(destroy_list, head, inode); 595 596 /* Did we lose a race with nfs_inode_remove_request()? */ 597 if (!(PagePrivate(page) || PageSwapCache(page))) { 598 nfs_unlock_and_release_request(head); 599 return NULL; 600 } 601 602 /* still holds ref on head from nfs_page_find_head_request 603 * and still has lock on head from lock loop */ 604 return head; 605 606 release_request: 607 nfs_unlock_and_release_request(head); 608 return ERR_PTR(ret); 609 } 610 611 static void nfs_write_error(struct nfs_page *req, int error) 612 { 613 trace_nfs_write_error(req, error); 614 nfs_mapping_set_error(req->wb_page, error); 615 nfs_inode_remove_request(req); 616 nfs_end_page_writeback(req); 617 nfs_release_request(req); 618 } 619 620 /* 621 * Find an associated nfs write request, and prepare to flush it out 622 * May return an error if the user signalled nfs_wait_on_request(). 623 */ 624 static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio, 625 struct page *page) 626 { 627 struct nfs_page *req; 628 int ret = 0; 629 630 req = nfs_lock_and_join_requests(page); 631 if (!req) 632 goto out; 633 ret = PTR_ERR(req); 634 if (IS_ERR(req)) 635 goto out; 636 637 nfs_set_page_writeback(page); 638 WARN_ON_ONCE(test_bit(PG_CLEAN, &req->wb_flags)); 639 640 /* If there is a fatal error that covers this write, just exit */ 641 ret = pgio->pg_error; 642 if (nfs_error_is_fatal_on_server(ret)) 643 goto out_launder; 644 645 ret = 0; 646 if (!nfs_pageio_add_request(pgio, req)) { 647 ret = pgio->pg_error; 648 /* 649 * Remove the problematic req upon fatal errors on the server 650 */ 651 if (nfs_error_is_fatal(ret)) { 652 if (nfs_error_is_fatal_on_server(ret)) 653 goto out_launder; 654 } else 655 ret = -EAGAIN; 656 nfs_redirty_request(req); 657 pgio->pg_error = 0; 658 } else 659 nfs_add_stats(page_file_mapping(page)->host, 660 NFSIOS_WRITEPAGES, 1); 661 out: 662 return ret; 663 out_launder: 664 nfs_write_error(req, ret); 665 return 0; 666 } 667 668 static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, 669 struct nfs_pageio_descriptor *pgio) 670 { 671 int ret; 672 673 nfs_pageio_cond_complete(pgio, page_index(page)); 674 ret = nfs_page_async_flush(pgio, page); 675 if (ret == -EAGAIN) { 676 redirty_page_for_writepage(wbc, page); 677 ret = AOP_WRITEPAGE_ACTIVATE; 678 } 679 return ret; 680 } 681 682 /* 683 * Write an mmapped page to the server. 684 */ 685 static int nfs_writepage_locked(struct page *page, 686 struct writeback_control *wbc) 687 { 688 struct nfs_pageio_descriptor pgio; 689 struct inode *inode = page_file_mapping(page)->host; 690 int err; 691 692 nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE); 693 nfs_pageio_init_write(&pgio, inode, 0, 694 false, &nfs_async_write_completion_ops); 695 err = nfs_do_writepage(page, wbc, &pgio); 696 pgio.pg_error = 0; 697 nfs_pageio_complete(&pgio); 698 if (err < 0) 699 return err; 700 if (nfs_error_is_fatal(pgio.pg_error)) 701 return pgio.pg_error; 702 return 0; 703 } 704 705 int nfs_writepage(struct page *page, struct writeback_control *wbc) 706 { 707 int ret; 708 709 ret = nfs_writepage_locked(page, wbc); 710 if (ret != AOP_WRITEPAGE_ACTIVATE) 711 unlock_page(page); 712 return ret; 713 } 714 715 static int nfs_writepages_callback(struct page *page, struct writeback_control *wbc, void *data) 716 { 717 int ret; 718 719 ret = nfs_do_writepage(page, wbc, data); 720 if (ret != AOP_WRITEPAGE_ACTIVATE) 721 unlock_page(page); 722 return ret; 723 } 724 725 static void nfs_io_completion_commit(void *inode) 726 { 727 nfs_commit_inode(inode, 0); 728 } 729 730 int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc) 731 { 732 struct inode *inode = mapping->host; 733 struct nfs_pageio_descriptor pgio; 734 struct nfs_io_completion *ioc; 735 int err; 736 737 nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES); 738 739 ioc = nfs_io_completion_alloc(GFP_KERNEL); 740 if (ioc) 741 nfs_io_completion_init(ioc, nfs_io_completion_commit, inode); 742 743 nfs_pageio_init_write(&pgio, inode, wb_priority(wbc), false, 744 &nfs_async_write_completion_ops); 745 pgio.pg_io_completion = ioc; 746 err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio); 747 pgio.pg_error = 0; 748 nfs_pageio_complete(&pgio); 749 nfs_io_completion_put(ioc); 750 751 if (err < 0) 752 goto out_err; 753 err = pgio.pg_error; 754 if (nfs_error_is_fatal(err)) 755 goto out_err; 756 return 0; 757 out_err: 758 return err; 759 } 760 761 /* 762 * Insert a write request into an inode 763 */ 764 static void nfs_inode_add_request(struct inode *inode, struct nfs_page *req) 765 { 766 struct address_space *mapping = page_file_mapping(req->wb_page); 767 struct nfs_inode *nfsi = NFS_I(inode); 768 769 WARN_ON_ONCE(req->wb_this_page != req); 770 771 /* Lock the request! */ 772 nfs_lock_request(req); 773 774 /* 775 * Swap-space should not get truncated. Hence no need to plug the race 776 * with invalidate/truncate. 777 */ 778 spin_lock(&mapping->private_lock); 779 if (!nfs_have_writebacks(inode) && 780 NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE)) 781 inode_inc_iversion_raw(inode); 782 if (likely(!PageSwapCache(req->wb_page))) { 783 set_bit(PG_MAPPED, &req->wb_flags); 784 SetPagePrivate(req->wb_page); 785 set_page_private(req->wb_page, (unsigned long)req); 786 } 787 spin_unlock(&mapping->private_lock); 788 atomic_long_inc(&nfsi->nrequests); 789 /* this a head request for a page group - mark it as having an 790 * extra reference so sub groups can follow suit. 791 * This flag also informs pgio layer when to bump nrequests when 792 * adding subrequests. */ 793 WARN_ON(test_and_set_bit(PG_INODE_REF, &req->wb_flags)); 794 kref_get(&req->wb_kref); 795 } 796 797 /* 798 * Remove a write request from an inode 799 */ 800 static void nfs_inode_remove_request(struct nfs_page *req) 801 { 802 struct address_space *mapping = page_file_mapping(req->wb_page); 803 struct inode *inode = mapping->host; 804 struct nfs_inode *nfsi = NFS_I(inode); 805 struct nfs_page *head; 806 807 if (nfs_page_group_sync_on_bit(req, PG_REMOVE)) { 808 head = req->wb_head; 809 810 spin_lock(&mapping->private_lock); 811 if (likely(head->wb_page && !PageSwapCache(head->wb_page))) { 812 set_page_private(head->wb_page, 0); 813 ClearPagePrivate(head->wb_page); 814 clear_bit(PG_MAPPED, &head->wb_flags); 815 } 816 spin_unlock(&mapping->private_lock); 817 } 818 819 if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags)) { 820 nfs_release_request(req); 821 atomic_long_dec(&nfsi->nrequests); 822 } 823 } 824 825 static void 826 nfs_mark_request_dirty(struct nfs_page *req) 827 { 828 if (req->wb_page) 829 __set_page_dirty_nobuffers(req->wb_page); 830 } 831 832 /* 833 * nfs_page_search_commits_for_head_request_locked 834 * 835 * Search through commit lists on @inode for the head request for @page. 836 * Must be called while holding the inode (which is cinfo) lock. 837 * 838 * Returns the head request if found, or NULL if not found. 839 */ 840 static struct nfs_page * 841 nfs_page_search_commits_for_head_request_locked(struct nfs_inode *nfsi, 842 struct page *page) 843 { 844 struct nfs_page *freq, *t; 845 struct nfs_commit_info cinfo; 846 struct inode *inode = &nfsi->vfs_inode; 847 848 nfs_init_cinfo_from_inode(&cinfo, inode); 849 850 /* search through pnfs commit lists */ 851 freq = pnfs_search_commit_reqs(inode, &cinfo, page); 852 if (freq) 853 return freq->wb_head; 854 855 /* Linearly search the commit list for the correct request */ 856 list_for_each_entry_safe(freq, t, &cinfo.mds->list, wb_list) { 857 if (freq->wb_page == page) 858 return freq->wb_head; 859 } 860 861 return NULL; 862 } 863 864 /** 865 * nfs_request_add_commit_list_locked - add request to a commit list 866 * @req: pointer to a struct nfs_page 867 * @dst: commit list head 868 * @cinfo: holds list lock and accounting info 869 * 870 * This sets the PG_CLEAN bit, updates the cinfo count of 871 * number of outstanding requests requiring a commit as well as 872 * the MM page stats. 873 * 874 * The caller must hold NFS_I(cinfo->inode)->commit_mutex, and the 875 * nfs_page lock. 876 */ 877 void 878 nfs_request_add_commit_list_locked(struct nfs_page *req, struct list_head *dst, 879 struct nfs_commit_info *cinfo) 880 { 881 set_bit(PG_CLEAN, &req->wb_flags); 882 nfs_list_add_request(req, dst); 883 atomic_long_inc(&cinfo->mds->ncommit); 884 } 885 EXPORT_SYMBOL_GPL(nfs_request_add_commit_list_locked); 886 887 /** 888 * nfs_request_add_commit_list - add request to a commit list 889 * @req: pointer to a struct nfs_page 890 * @cinfo: holds list lock and accounting info 891 * 892 * This sets the PG_CLEAN bit, updates the cinfo count of 893 * number of outstanding requests requiring a commit as well as 894 * the MM page stats. 895 * 896 * The caller must _not_ hold the cinfo->lock, but must be 897 * holding the nfs_page lock. 898 */ 899 void 900 nfs_request_add_commit_list(struct nfs_page *req, struct nfs_commit_info *cinfo) 901 { 902 mutex_lock(&NFS_I(cinfo->inode)->commit_mutex); 903 nfs_request_add_commit_list_locked(req, &cinfo->mds->list, cinfo); 904 mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex); 905 if (req->wb_page) 906 nfs_mark_page_unstable(req->wb_page, cinfo); 907 } 908 EXPORT_SYMBOL_GPL(nfs_request_add_commit_list); 909 910 /** 911 * nfs_request_remove_commit_list - Remove request from a commit list 912 * @req: pointer to a nfs_page 913 * @cinfo: holds list lock and accounting info 914 * 915 * This clears the PG_CLEAN bit, and updates the cinfo's count of 916 * number of outstanding requests requiring a commit 917 * It does not update the MM page stats. 918 * 919 * The caller _must_ hold the cinfo->lock and the nfs_page lock. 920 */ 921 void 922 nfs_request_remove_commit_list(struct nfs_page *req, 923 struct nfs_commit_info *cinfo) 924 { 925 if (!test_and_clear_bit(PG_CLEAN, &(req)->wb_flags)) 926 return; 927 nfs_list_remove_request(req); 928 atomic_long_dec(&cinfo->mds->ncommit); 929 } 930 EXPORT_SYMBOL_GPL(nfs_request_remove_commit_list); 931 932 static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo, 933 struct inode *inode) 934 { 935 cinfo->inode = inode; 936 cinfo->mds = &NFS_I(inode)->commit_info; 937 cinfo->ds = pnfs_get_ds_info(inode); 938 cinfo->dreq = NULL; 939 cinfo->completion_ops = &nfs_commit_completion_ops; 940 } 941 942 void nfs_init_cinfo(struct nfs_commit_info *cinfo, 943 struct inode *inode, 944 struct nfs_direct_req *dreq) 945 { 946 if (dreq) 947 nfs_init_cinfo_from_dreq(cinfo, dreq); 948 else 949 nfs_init_cinfo_from_inode(cinfo, inode); 950 } 951 EXPORT_SYMBOL_GPL(nfs_init_cinfo); 952 953 /* 954 * Add a request to the inode's commit list. 955 */ 956 void 957 nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg, 958 struct nfs_commit_info *cinfo, u32 ds_commit_idx) 959 { 960 if (pnfs_mark_request_commit(req, lseg, cinfo, ds_commit_idx)) 961 return; 962 nfs_request_add_commit_list(req, cinfo); 963 } 964 965 static void 966 nfs_clear_page_commit(struct page *page) 967 { 968 dec_node_page_state(page, NR_UNSTABLE_NFS); 969 dec_wb_stat(&inode_to_bdi(page_file_mapping(page)->host)->wb, 970 WB_RECLAIMABLE); 971 } 972 973 /* Called holding the request lock on @req */ 974 static void 975 nfs_clear_request_commit(struct nfs_page *req) 976 { 977 if (test_bit(PG_CLEAN, &req->wb_flags)) { 978 struct nfs_open_context *ctx = nfs_req_openctx(req); 979 struct inode *inode = d_inode(ctx->dentry); 980 struct nfs_commit_info cinfo; 981 982 nfs_init_cinfo_from_inode(&cinfo, inode); 983 mutex_lock(&NFS_I(inode)->commit_mutex); 984 if (!pnfs_clear_request_commit(req, &cinfo)) { 985 nfs_request_remove_commit_list(req, &cinfo); 986 } 987 mutex_unlock(&NFS_I(inode)->commit_mutex); 988 nfs_clear_page_commit(req->wb_page); 989 } 990 } 991 992 int nfs_write_need_commit(struct nfs_pgio_header *hdr) 993 { 994 if (hdr->verf.committed == NFS_DATA_SYNC) 995 return hdr->lseg == NULL; 996 return hdr->verf.committed != NFS_FILE_SYNC; 997 } 998 999 static void nfs_async_write_init(struct nfs_pgio_header *hdr) 1000 { 1001 nfs_io_completion_get(hdr->io_completion); 1002 } 1003 1004 static void nfs_write_completion(struct nfs_pgio_header *hdr) 1005 { 1006 struct nfs_commit_info cinfo; 1007 unsigned long bytes = 0; 1008 1009 if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) 1010 goto out; 1011 nfs_init_cinfo_from_inode(&cinfo, hdr->inode); 1012 while (!list_empty(&hdr->pages)) { 1013 struct nfs_page *req = nfs_list_entry(hdr->pages.next); 1014 1015 bytes += req->wb_bytes; 1016 nfs_list_remove_request(req); 1017 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && 1018 (hdr->good_bytes < bytes)) { 1019 trace_nfs_comp_error(req, hdr->error); 1020 nfs_mapping_set_error(req->wb_page, hdr->error); 1021 goto remove_req; 1022 } 1023 if (nfs_write_need_commit(hdr)) { 1024 /* Reset wb_nio, since the write was successful. */ 1025 req->wb_nio = 0; 1026 memcpy(&req->wb_verf, &hdr->verf.verifier, sizeof(req->wb_verf)); 1027 nfs_mark_request_commit(req, hdr->lseg, &cinfo, 1028 hdr->pgio_mirror_idx); 1029 goto next; 1030 } 1031 remove_req: 1032 nfs_inode_remove_request(req); 1033 next: 1034 nfs_end_page_writeback(req); 1035 nfs_release_request(req); 1036 } 1037 out: 1038 nfs_io_completion_put(hdr->io_completion); 1039 hdr->release(hdr); 1040 } 1041 1042 unsigned long 1043 nfs_reqs_to_commit(struct nfs_commit_info *cinfo) 1044 { 1045 return atomic_long_read(&cinfo->mds->ncommit); 1046 } 1047 1048 /* NFS_I(cinfo->inode)->commit_mutex held by caller */ 1049 int 1050 nfs_scan_commit_list(struct list_head *src, struct list_head *dst, 1051 struct nfs_commit_info *cinfo, int max) 1052 { 1053 struct nfs_page *req, *tmp; 1054 int ret = 0; 1055 1056 restart: 1057 list_for_each_entry_safe(req, tmp, src, wb_list) { 1058 kref_get(&req->wb_kref); 1059 if (!nfs_lock_request(req)) { 1060 int status; 1061 1062 /* Prevent deadlock with nfs_lock_and_join_requests */ 1063 if (!list_empty(dst)) { 1064 nfs_release_request(req); 1065 continue; 1066 } 1067 /* Ensure we make progress to prevent livelock */ 1068 mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex); 1069 status = nfs_wait_on_request(req); 1070 nfs_release_request(req); 1071 mutex_lock(&NFS_I(cinfo->inode)->commit_mutex); 1072 if (status < 0) 1073 break; 1074 goto restart; 1075 } 1076 nfs_request_remove_commit_list(req, cinfo); 1077 clear_bit(PG_COMMIT_TO_DS, &req->wb_flags); 1078 nfs_list_add_request(req, dst); 1079 ret++; 1080 if ((ret == max) && !cinfo->dreq) 1081 break; 1082 cond_resched(); 1083 } 1084 return ret; 1085 } 1086 EXPORT_SYMBOL_GPL(nfs_scan_commit_list); 1087 1088 /* 1089 * nfs_scan_commit - Scan an inode for commit requests 1090 * @inode: NFS inode to scan 1091 * @dst: mds destination list 1092 * @cinfo: mds and ds lists of reqs ready to commit 1093 * 1094 * Moves requests from the inode's 'commit' request list. 1095 * The requests are *not* checked to ensure that they form a contiguous set. 1096 */ 1097 int 1098 nfs_scan_commit(struct inode *inode, struct list_head *dst, 1099 struct nfs_commit_info *cinfo) 1100 { 1101 int ret = 0; 1102 1103 if (!atomic_long_read(&cinfo->mds->ncommit)) 1104 return 0; 1105 mutex_lock(&NFS_I(cinfo->inode)->commit_mutex); 1106 if (atomic_long_read(&cinfo->mds->ncommit) > 0) { 1107 const int max = INT_MAX; 1108 1109 ret = nfs_scan_commit_list(&cinfo->mds->list, dst, 1110 cinfo, max); 1111 ret += pnfs_scan_commit_lists(inode, cinfo, max - ret); 1112 } 1113 mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex); 1114 return ret; 1115 } 1116 1117 /* 1118 * Search for an existing write request, and attempt to update 1119 * it to reflect a new dirty region on a given page. 1120 * 1121 * If the attempt fails, then the existing request is flushed out 1122 * to disk. 1123 */ 1124 static struct nfs_page *nfs_try_to_update_request(struct inode *inode, 1125 struct page *page, 1126 unsigned int offset, 1127 unsigned int bytes) 1128 { 1129 struct nfs_page *req; 1130 unsigned int rqend; 1131 unsigned int end; 1132 int error; 1133 1134 end = offset + bytes; 1135 1136 req = nfs_lock_and_join_requests(page); 1137 if (IS_ERR_OR_NULL(req)) 1138 return req; 1139 1140 rqend = req->wb_offset + req->wb_bytes; 1141 /* 1142 * Tell the caller to flush out the request if 1143 * the offsets are non-contiguous. 1144 * Note: nfs_flush_incompatible() will already 1145 * have flushed out requests having wrong owners. 1146 */ 1147 if (offset > rqend || end < req->wb_offset) 1148 goto out_flushme; 1149 1150 /* Okay, the request matches. Update the region */ 1151 if (offset < req->wb_offset) { 1152 req->wb_offset = offset; 1153 req->wb_pgbase = offset; 1154 } 1155 if (end > rqend) 1156 req->wb_bytes = end - req->wb_offset; 1157 else 1158 req->wb_bytes = rqend - req->wb_offset; 1159 req->wb_nio = 0; 1160 return req; 1161 out_flushme: 1162 /* 1163 * Note: we mark the request dirty here because 1164 * nfs_lock_and_join_requests() cannot preserve 1165 * commit flags, so we have to replay the write. 1166 */ 1167 nfs_mark_request_dirty(req); 1168 nfs_unlock_and_release_request(req); 1169 error = nfs_wb_page(inode, page); 1170 return (error < 0) ? ERR_PTR(error) : NULL; 1171 } 1172 1173 /* 1174 * Try to update an existing write request, or create one if there is none. 1175 * 1176 * Note: Should always be called with the Page Lock held to prevent races 1177 * if we have to add a new request. Also assumes that the caller has 1178 * already called nfs_flush_incompatible() if necessary. 1179 */ 1180 static struct nfs_page * nfs_setup_write_request(struct nfs_open_context* ctx, 1181 struct page *page, unsigned int offset, unsigned int bytes) 1182 { 1183 struct inode *inode = page_file_mapping(page)->host; 1184 struct nfs_page *req; 1185 1186 req = nfs_try_to_update_request(inode, page, offset, bytes); 1187 if (req != NULL) 1188 goto out; 1189 req = nfs_create_request(ctx, page, offset, bytes); 1190 if (IS_ERR(req)) 1191 goto out; 1192 nfs_inode_add_request(inode, req); 1193 out: 1194 return req; 1195 } 1196 1197 static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page, 1198 unsigned int offset, unsigned int count) 1199 { 1200 struct nfs_page *req; 1201 1202 req = nfs_setup_write_request(ctx, page, offset, count); 1203 if (IS_ERR(req)) 1204 return PTR_ERR(req); 1205 /* Update file length */ 1206 nfs_grow_file(page, offset, count); 1207 nfs_mark_uptodate(req); 1208 nfs_mark_request_dirty(req); 1209 nfs_unlock_and_release_request(req); 1210 return 0; 1211 } 1212 1213 int nfs_flush_incompatible(struct file *file, struct page *page) 1214 { 1215 struct nfs_open_context *ctx = nfs_file_open_context(file); 1216 struct nfs_lock_context *l_ctx; 1217 struct file_lock_context *flctx = file_inode(file)->i_flctx; 1218 struct nfs_page *req; 1219 int do_flush, status; 1220 /* 1221 * Look for a request corresponding to this page. If there 1222 * is one, and it belongs to another file, we flush it out 1223 * before we try to copy anything into the page. Do this 1224 * due to the lack of an ACCESS-type call in NFSv2. 1225 * Also do the same if we find a request from an existing 1226 * dropped page. 1227 */ 1228 do { 1229 req = nfs_page_find_head_request(page); 1230 if (req == NULL) 1231 return 0; 1232 l_ctx = req->wb_lock_context; 1233 do_flush = req->wb_page != page || 1234 !nfs_match_open_context(nfs_req_openctx(req), ctx); 1235 if (l_ctx && flctx && 1236 !(list_empty_careful(&flctx->flc_posix) && 1237 list_empty_careful(&flctx->flc_flock))) { 1238 do_flush |= l_ctx->lockowner != current->files; 1239 } 1240 nfs_release_request(req); 1241 if (!do_flush) 1242 return 0; 1243 status = nfs_wb_page(page_file_mapping(page)->host, page); 1244 } while (status == 0); 1245 return status; 1246 } 1247 1248 /* 1249 * Avoid buffered writes when a open context credential's key would 1250 * expire soon. 1251 * 1252 * Returns -EACCES if the key will expire within RPC_KEY_EXPIRE_FAIL. 1253 * 1254 * Return 0 and set a credential flag which triggers the inode to flush 1255 * and performs NFS_FILE_SYNC writes if the key will expired within 1256 * RPC_KEY_EXPIRE_TIMEO. 1257 */ 1258 int 1259 nfs_key_timeout_notify(struct file *filp, struct inode *inode) 1260 { 1261 struct nfs_open_context *ctx = nfs_file_open_context(filp); 1262 1263 if (nfs_ctx_key_to_expire(ctx, inode) && 1264 !ctx->ll_cred) 1265 /* Already expired! */ 1266 return -EACCES; 1267 return 0; 1268 } 1269 1270 /* 1271 * Test if the open context credential key is marked to expire soon. 1272 */ 1273 bool nfs_ctx_key_to_expire(struct nfs_open_context *ctx, struct inode *inode) 1274 { 1275 struct rpc_auth *auth = NFS_SERVER(inode)->client->cl_auth; 1276 struct rpc_cred *cred = ctx->ll_cred; 1277 struct auth_cred acred = { 1278 .cred = ctx->cred, 1279 }; 1280 1281 if (cred && !cred->cr_ops->crmatch(&acred, cred, 0)) { 1282 put_rpccred(cred); 1283 ctx->ll_cred = NULL; 1284 cred = NULL; 1285 } 1286 if (!cred) 1287 cred = auth->au_ops->lookup_cred(auth, &acred, 0); 1288 if (!cred || IS_ERR(cred)) 1289 return true; 1290 ctx->ll_cred = cred; 1291 return !!(cred->cr_ops->crkey_timeout && 1292 cred->cr_ops->crkey_timeout(cred)); 1293 } 1294 1295 /* 1296 * If the page cache is marked as unsafe or invalid, then we can't rely on 1297 * the PageUptodate() flag. In this case, we will need to turn off 1298 * write optimisations that depend on the page contents being correct. 1299 */ 1300 static bool nfs_write_pageuptodate(struct page *page, struct inode *inode) 1301 { 1302 struct nfs_inode *nfsi = NFS_I(inode); 1303 1304 if (nfs_have_delegated_attributes(inode)) 1305 goto out; 1306 if (nfsi->cache_validity & NFS_INO_REVAL_PAGECACHE) 1307 return false; 1308 smp_rmb(); 1309 if (test_bit(NFS_INO_INVALIDATING, &nfsi->flags)) 1310 return false; 1311 out: 1312 if (nfsi->cache_validity & NFS_INO_INVALID_DATA) 1313 return false; 1314 return PageUptodate(page) != 0; 1315 } 1316 1317 static bool 1318 is_whole_file_wrlock(struct file_lock *fl) 1319 { 1320 return fl->fl_start == 0 && fl->fl_end == OFFSET_MAX && 1321 fl->fl_type == F_WRLCK; 1322 } 1323 1324 /* If we know the page is up to date, and we're not using byte range locks (or 1325 * if we have the whole file locked for writing), it may be more efficient to 1326 * extend the write to cover the entire page in order to avoid fragmentation 1327 * inefficiencies. 1328 * 1329 * If the file is opened for synchronous writes then we can just skip the rest 1330 * of the checks. 1331 */ 1332 static int nfs_can_extend_write(struct file *file, struct page *page, struct inode *inode) 1333 { 1334 int ret; 1335 struct file_lock_context *flctx = inode->i_flctx; 1336 struct file_lock *fl; 1337 1338 if (file->f_flags & O_DSYNC) 1339 return 0; 1340 if (!nfs_write_pageuptodate(page, inode)) 1341 return 0; 1342 if (NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE)) 1343 return 1; 1344 if (!flctx || (list_empty_careful(&flctx->flc_flock) && 1345 list_empty_careful(&flctx->flc_posix))) 1346 return 1; 1347 1348 /* Check to see if there are whole file write locks */ 1349 ret = 0; 1350 spin_lock(&flctx->flc_lock); 1351 if (!list_empty(&flctx->flc_posix)) { 1352 fl = list_first_entry(&flctx->flc_posix, struct file_lock, 1353 fl_list); 1354 if (is_whole_file_wrlock(fl)) 1355 ret = 1; 1356 } else if (!list_empty(&flctx->flc_flock)) { 1357 fl = list_first_entry(&flctx->flc_flock, struct file_lock, 1358 fl_list); 1359 if (fl->fl_type == F_WRLCK) 1360 ret = 1; 1361 } 1362 spin_unlock(&flctx->flc_lock); 1363 return ret; 1364 } 1365 1366 /* 1367 * Update and possibly write a cached page of an NFS file. 1368 * 1369 * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad 1370 * things with a page scheduled for an RPC call (e.g. invalidate it). 1371 */ 1372 int nfs_updatepage(struct file *file, struct page *page, 1373 unsigned int offset, unsigned int count) 1374 { 1375 struct nfs_open_context *ctx = nfs_file_open_context(file); 1376 struct address_space *mapping = page_file_mapping(page); 1377 struct inode *inode = mapping->host; 1378 int status = 0; 1379 1380 nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE); 1381 1382 dprintk("NFS: nfs_updatepage(%pD2 %d@%lld)\n", 1383 file, count, (long long)(page_file_offset(page) + offset)); 1384 1385 if (!count) 1386 goto out; 1387 1388 if (nfs_can_extend_write(file, page, inode)) { 1389 count = max(count + offset, nfs_page_length(page)); 1390 offset = 0; 1391 } 1392 1393 status = nfs_writepage_setup(ctx, page, offset, count); 1394 if (status < 0) 1395 nfs_set_pageerror(mapping); 1396 else 1397 __set_page_dirty_nobuffers(page); 1398 out: 1399 dprintk("NFS: nfs_updatepage returns %d (isize %lld)\n", 1400 status, (long long)i_size_read(inode)); 1401 return status; 1402 } 1403 1404 static int flush_task_priority(int how) 1405 { 1406 switch (how & (FLUSH_HIGHPRI|FLUSH_LOWPRI)) { 1407 case FLUSH_HIGHPRI: 1408 return RPC_PRIORITY_HIGH; 1409 case FLUSH_LOWPRI: 1410 return RPC_PRIORITY_LOW; 1411 } 1412 return RPC_PRIORITY_NORMAL; 1413 } 1414 1415 static void nfs_initiate_write(struct nfs_pgio_header *hdr, 1416 struct rpc_message *msg, 1417 const struct nfs_rpc_ops *rpc_ops, 1418 struct rpc_task_setup *task_setup_data, int how) 1419 { 1420 int priority = flush_task_priority(how); 1421 1422 task_setup_data->priority = priority; 1423 rpc_ops->write_setup(hdr, msg, &task_setup_data->rpc_client); 1424 trace_nfs_initiate_write(hdr); 1425 } 1426 1427 /* If a nfs_flush_* function fails, it should remove reqs from @head and 1428 * call this on each, which will prepare them to be retried on next 1429 * writeback using standard nfs. 1430 */ 1431 static void nfs_redirty_request(struct nfs_page *req) 1432 { 1433 /* Bump the transmission count */ 1434 req->wb_nio++; 1435 nfs_mark_request_dirty(req); 1436 set_bit(NFS_CONTEXT_RESEND_WRITES, &nfs_req_openctx(req)->flags); 1437 nfs_end_page_writeback(req); 1438 nfs_release_request(req); 1439 } 1440 1441 static void nfs_async_write_error(struct list_head *head, int error) 1442 { 1443 struct nfs_page *req; 1444 1445 while (!list_empty(head)) { 1446 req = nfs_list_entry(head->next); 1447 nfs_list_remove_request(req); 1448 if (nfs_error_is_fatal(error)) 1449 nfs_write_error(req, error); 1450 else 1451 nfs_redirty_request(req); 1452 } 1453 } 1454 1455 static void nfs_async_write_reschedule_io(struct nfs_pgio_header *hdr) 1456 { 1457 nfs_async_write_error(&hdr->pages, 0); 1458 filemap_fdatawrite_range(hdr->inode->i_mapping, hdr->args.offset, 1459 hdr->args.offset + hdr->args.count - 1); 1460 } 1461 1462 static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops = { 1463 .init_hdr = nfs_async_write_init, 1464 .error_cleanup = nfs_async_write_error, 1465 .completion = nfs_write_completion, 1466 .reschedule_io = nfs_async_write_reschedule_io, 1467 }; 1468 1469 void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio, 1470 struct inode *inode, int ioflags, bool force_mds, 1471 const struct nfs_pgio_completion_ops *compl_ops) 1472 { 1473 struct nfs_server *server = NFS_SERVER(inode); 1474 const struct nfs_pageio_ops *pg_ops = &nfs_pgio_rw_ops; 1475 1476 #ifdef CONFIG_NFS_V4_1 1477 if (server->pnfs_curr_ld && !force_mds) 1478 pg_ops = server->pnfs_curr_ld->pg_write_ops; 1479 #endif 1480 nfs_pageio_init(pgio, inode, pg_ops, compl_ops, &nfs_rw_write_ops, 1481 server->wsize, ioflags); 1482 } 1483 EXPORT_SYMBOL_GPL(nfs_pageio_init_write); 1484 1485 void nfs_pageio_reset_write_mds(struct nfs_pageio_descriptor *pgio) 1486 { 1487 struct nfs_pgio_mirror *mirror; 1488 1489 if (pgio->pg_ops && pgio->pg_ops->pg_cleanup) 1490 pgio->pg_ops->pg_cleanup(pgio); 1491 1492 pgio->pg_ops = &nfs_pgio_rw_ops; 1493 1494 nfs_pageio_stop_mirroring(pgio); 1495 1496 mirror = &pgio->pg_mirrors[0]; 1497 mirror->pg_bsize = NFS_SERVER(pgio->pg_inode)->wsize; 1498 } 1499 EXPORT_SYMBOL_GPL(nfs_pageio_reset_write_mds); 1500 1501 1502 void nfs_commit_prepare(struct rpc_task *task, void *calldata) 1503 { 1504 struct nfs_commit_data *data = calldata; 1505 1506 NFS_PROTO(data->inode)->commit_rpc_prepare(task, data); 1507 } 1508 1509 /* 1510 * Special version of should_remove_suid() that ignores capabilities. 1511 */ 1512 static int nfs_should_remove_suid(const struct inode *inode) 1513 { 1514 umode_t mode = inode->i_mode; 1515 int kill = 0; 1516 1517 /* suid always must be killed */ 1518 if (unlikely(mode & S_ISUID)) 1519 kill = ATTR_KILL_SUID; 1520 1521 /* 1522 * sgid without any exec bits is just a mandatory locking mark; leave 1523 * it alone. If some exec bits are set, it's a real sgid; kill it. 1524 */ 1525 if (unlikely((mode & S_ISGID) && (mode & S_IXGRP))) 1526 kill |= ATTR_KILL_SGID; 1527 1528 if (unlikely(kill && S_ISREG(mode))) 1529 return kill; 1530 1531 return 0; 1532 } 1533 1534 static void nfs_writeback_check_extend(struct nfs_pgio_header *hdr, 1535 struct nfs_fattr *fattr) 1536 { 1537 struct nfs_pgio_args *argp = &hdr->args; 1538 struct nfs_pgio_res *resp = &hdr->res; 1539 u64 size = argp->offset + resp->count; 1540 1541 if (!(fattr->valid & NFS_ATTR_FATTR_SIZE)) 1542 fattr->size = size; 1543 if (nfs_size_to_loff_t(fattr->size) < i_size_read(hdr->inode)) { 1544 fattr->valid &= ~NFS_ATTR_FATTR_SIZE; 1545 return; 1546 } 1547 if (size != fattr->size) 1548 return; 1549 /* Set attribute barrier */ 1550 nfs_fattr_set_barrier(fattr); 1551 /* ...and update size */ 1552 fattr->valid |= NFS_ATTR_FATTR_SIZE; 1553 } 1554 1555 void nfs_writeback_update_inode(struct nfs_pgio_header *hdr) 1556 { 1557 struct nfs_fattr *fattr = &hdr->fattr; 1558 struct inode *inode = hdr->inode; 1559 1560 spin_lock(&inode->i_lock); 1561 nfs_writeback_check_extend(hdr, fattr); 1562 nfs_post_op_update_inode_force_wcc_locked(inode, fattr); 1563 spin_unlock(&inode->i_lock); 1564 } 1565 EXPORT_SYMBOL_GPL(nfs_writeback_update_inode); 1566 1567 /* 1568 * This function is called when the WRITE call is complete. 1569 */ 1570 static int nfs_writeback_done(struct rpc_task *task, 1571 struct nfs_pgio_header *hdr, 1572 struct inode *inode) 1573 { 1574 int status; 1575 1576 /* 1577 * ->write_done will attempt to use post-op attributes to detect 1578 * conflicting writes by other clients. A strict interpretation 1579 * of close-to-open would allow us to continue caching even if 1580 * another writer had changed the file, but some applications 1581 * depend on tighter cache coherency when writing. 1582 */ 1583 status = NFS_PROTO(inode)->write_done(task, hdr); 1584 if (status != 0) 1585 return status; 1586 1587 nfs_add_stats(inode, NFSIOS_SERVERWRITTENBYTES, hdr->res.count); 1588 trace_nfs_writeback_done(task, hdr); 1589 1590 if (hdr->res.verf->committed < hdr->args.stable && 1591 task->tk_status >= 0) { 1592 /* We tried a write call, but the server did not 1593 * commit data to stable storage even though we 1594 * requested it. 1595 * Note: There is a known bug in Tru64 < 5.0 in which 1596 * the server reports NFS_DATA_SYNC, but performs 1597 * NFS_FILE_SYNC. We therefore implement this checking 1598 * as a dprintk() in order to avoid filling syslog. 1599 */ 1600 static unsigned long complain; 1601 1602 /* Note this will print the MDS for a DS write */ 1603 if (time_before(complain, jiffies)) { 1604 dprintk("NFS: faulty NFS server %s:" 1605 " (committed = %d) != (stable = %d)\n", 1606 NFS_SERVER(inode)->nfs_client->cl_hostname, 1607 hdr->res.verf->committed, hdr->args.stable); 1608 complain = jiffies + 300 * HZ; 1609 } 1610 } 1611 1612 /* Deal with the suid/sgid bit corner case */ 1613 if (nfs_should_remove_suid(inode)) { 1614 spin_lock(&inode->i_lock); 1615 NFS_I(inode)->cache_validity |= NFS_INO_INVALID_OTHER; 1616 spin_unlock(&inode->i_lock); 1617 } 1618 return 0; 1619 } 1620 1621 /* 1622 * This function is called when the WRITE call is complete. 1623 */ 1624 static void nfs_writeback_result(struct rpc_task *task, 1625 struct nfs_pgio_header *hdr) 1626 { 1627 struct nfs_pgio_args *argp = &hdr->args; 1628 struct nfs_pgio_res *resp = &hdr->res; 1629 1630 if (resp->count < argp->count) { 1631 static unsigned long complain; 1632 1633 /* This a short write! */ 1634 nfs_inc_stats(hdr->inode, NFSIOS_SHORTWRITE); 1635 1636 /* Has the server at least made some progress? */ 1637 if (resp->count == 0) { 1638 if (time_before(complain, jiffies)) { 1639 printk(KERN_WARNING 1640 "NFS: Server wrote zero bytes, expected %u.\n", 1641 argp->count); 1642 complain = jiffies + 300 * HZ; 1643 } 1644 nfs_set_pgio_error(hdr, -EIO, argp->offset); 1645 task->tk_status = -EIO; 1646 return; 1647 } 1648 1649 /* For non rpc-based layout drivers, retry-through-MDS */ 1650 if (!task->tk_ops) { 1651 hdr->pnfs_error = -EAGAIN; 1652 return; 1653 } 1654 1655 /* Was this an NFSv2 write or an NFSv3 stable write? */ 1656 if (resp->verf->committed != NFS_UNSTABLE) { 1657 /* Resend from where the server left off */ 1658 hdr->mds_offset += resp->count; 1659 argp->offset += resp->count; 1660 argp->pgbase += resp->count; 1661 argp->count -= resp->count; 1662 } else { 1663 /* Resend as a stable write in order to avoid 1664 * headaches in the case of a server crash. 1665 */ 1666 argp->stable = NFS_FILE_SYNC; 1667 } 1668 resp->count = 0; 1669 resp->verf->committed = 0; 1670 rpc_restart_call_prepare(task); 1671 } 1672 } 1673 1674 static int wait_on_commit(struct nfs_mds_commit_info *cinfo) 1675 { 1676 return wait_var_event_killable(&cinfo->rpcs_out, 1677 !atomic_read(&cinfo->rpcs_out)); 1678 } 1679 1680 static void nfs_commit_begin(struct nfs_mds_commit_info *cinfo) 1681 { 1682 atomic_inc(&cinfo->rpcs_out); 1683 } 1684 1685 static void nfs_commit_end(struct nfs_mds_commit_info *cinfo) 1686 { 1687 if (atomic_dec_and_test(&cinfo->rpcs_out)) 1688 wake_up_var(&cinfo->rpcs_out); 1689 } 1690 1691 void nfs_commitdata_release(struct nfs_commit_data *data) 1692 { 1693 put_nfs_open_context(data->context); 1694 nfs_commit_free(data); 1695 } 1696 EXPORT_SYMBOL_GPL(nfs_commitdata_release); 1697 1698 int nfs_initiate_commit(struct rpc_clnt *clnt, struct nfs_commit_data *data, 1699 const struct nfs_rpc_ops *nfs_ops, 1700 const struct rpc_call_ops *call_ops, 1701 int how, int flags) 1702 { 1703 struct rpc_task *task; 1704 int priority = flush_task_priority(how); 1705 struct rpc_message msg = { 1706 .rpc_argp = &data->args, 1707 .rpc_resp = &data->res, 1708 .rpc_cred = data->cred, 1709 }; 1710 struct rpc_task_setup task_setup_data = { 1711 .task = &data->task, 1712 .rpc_client = clnt, 1713 .rpc_message = &msg, 1714 .callback_ops = call_ops, 1715 .callback_data = data, 1716 .workqueue = nfsiod_workqueue, 1717 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF | flags, 1718 .priority = priority, 1719 }; 1720 /* Set up the initial task struct. */ 1721 nfs_ops->commit_setup(data, &msg, &task_setup_data.rpc_client); 1722 trace_nfs_initiate_commit(data); 1723 1724 dprintk("NFS: initiated commit call\n"); 1725 1726 task = rpc_run_task(&task_setup_data); 1727 if (IS_ERR(task)) 1728 return PTR_ERR(task); 1729 if (how & FLUSH_SYNC) 1730 rpc_wait_for_completion_task(task); 1731 rpc_put_task(task); 1732 return 0; 1733 } 1734 EXPORT_SYMBOL_GPL(nfs_initiate_commit); 1735 1736 static loff_t nfs_get_lwb(struct list_head *head) 1737 { 1738 loff_t lwb = 0; 1739 struct nfs_page *req; 1740 1741 list_for_each_entry(req, head, wb_list) 1742 if (lwb < (req_offset(req) + req->wb_bytes)) 1743 lwb = req_offset(req) + req->wb_bytes; 1744 1745 return lwb; 1746 } 1747 1748 /* 1749 * Set up the argument/result storage required for the RPC call. 1750 */ 1751 void nfs_init_commit(struct nfs_commit_data *data, 1752 struct list_head *head, 1753 struct pnfs_layout_segment *lseg, 1754 struct nfs_commit_info *cinfo) 1755 { 1756 struct nfs_page *first; 1757 struct nfs_open_context *ctx; 1758 struct inode *inode; 1759 1760 /* Set up the RPC argument and reply structs 1761 * NB: take care not to mess about with data->commit et al. */ 1762 1763 if (head) 1764 list_splice_init(head, &data->pages); 1765 1766 first = nfs_list_entry(data->pages.next); 1767 ctx = nfs_req_openctx(first); 1768 inode = d_inode(ctx->dentry); 1769 1770 data->inode = inode; 1771 data->cred = ctx->cred; 1772 data->lseg = lseg; /* reference transferred */ 1773 /* only set lwb for pnfs commit */ 1774 if (lseg) 1775 data->lwb = nfs_get_lwb(&data->pages); 1776 data->mds_ops = &nfs_commit_ops; 1777 data->completion_ops = cinfo->completion_ops; 1778 data->dreq = cinfo->dreq; 1779 1780 data->args.fh = NFS_FH(data->inode); 1781 /* Note: we always request a commit of the entire inode */ 1782 data->args.offset = 0; 1783 data->args.count = 0; 1784 data->context = get_nfs_open_context(ctx); 1785 data->res.fattr = &data->fattr; 1786 data->res.verf = &data->verf; 1787 nfs_fattr_init(&data->fattr); 1788 } 1789 EXPORT_SYMBOL_GPL(nfs_init_commit); 1790 1791 void nfs_retry_commit(struct list_head *page_list, 1792 struct pnfs_layout_segment *lseg, 1793 struct nfs_commit_info *cinfo, 1794 u32 ds_commit_idx) 1795 { 1796 struct nfs_page *req; 1797 1798 while (!list_empty(page_list)) { 1799 req = nfs_list_entry(page_list->next); 1800 nfs_list_remove_request(req); 1801 nfs_mark_request_commit(req, lseg, cinfo, ds_commit_idx); 1802 if (!cinfo->dreq) 1803 nfs_clear_page_commit(req->wb_page); 1804 nfs_unlock_and_release_request(req); 1805 } 1806 } 1807 EXPORT_SYMBOL_GPL(nfs_retry_commit); 1808 1809 static void 1810 nfs_commit_resched_write(struct nfs_commit_info *cinfo, 1811 struct nfs_page *req) 1812 { 1813 __set_page_dirty_nobuffers(req->wb_page); 1814 } 1815 1816 /* 1817 * Commit dirty pages 1818 */ 1819 static int 1820 nfs_commit_list(struct inode *inode, struct list_head *head, int how, 1821 struct nfs_commit_info *cinfo) 1822 { 1823 struct nfs_commit_data *data; 1824 1825 /* another commit raced with us */ 1826 if (list_empty(head)) 1827 return 0; 1828 1829 data = nfs_commitdata_alloc(true); 1830 1831 /* Set up the argument struct */ 1832 nfs_init_commit(data, head, NULL, cinfo); 1833 atomic_inc(&cinfo->mds->rpcs_out); 1834 return nfs_initiate_commit(NFS_CLIENT(inode), data, NFS_PROTO(inode), 1835 data->mds_ops, how, 0); 1836 } 1837 1838 /* 1839 * COMMIT call returned 1840 */ 1841 static void nfs_commit_done(struct rpc_task *task, void *calldata) 1842 { 1843 struct nfs_commit_data *data = calldata; 1844 1845 dprintk("NFS: %5u nfs_commit_done (status %d)\n", 1846 task->tk_pid, task->tk_status); 1847 1848 /* Call the NFS version-specific code */ 1849 NFS_PROTO(data->inode)->commit_done(task, data); 1850 trace_nfs_commit_done(task, data); 1851 } 1852 1853 static void nfs_commit_release_pages(struct nfs_commit_data *data) 1854 { 1855 const struct nfs_writeverf *verf = data->res.verf; 1856 struct nfs_page *req; 1857 int status = data->task.tk_status; 1858 struct nfs_commit_info cinfo; 1859 struct nfs_server *nfss; 1860 1861 while (!list_empty(&data->pages)) { 1862 req = nfs_list_entry(data->pages.next); 1863 nfs_list_remove_request(req); 1864 if (req->wb_page) 1865 nfs_clear_page_commit(req->wb_page); 1866 1867 dprintk("NFS: commit (%s/%llu %d@%lld)", 1868 nfs_req_openctx(req)->dentry->d_sb->s_id, 1869 (unsigned long long)NFS_FILEID(d_inode(nfs_req_openctx(req)->dentry)), 1870 req->wb_bytes, 1871 (long long)req_offset(req)); 1872 if (status < 0) { 1873 if (req->wb_page) { 1874 trace_nfs_commit_error(req, status); 1875 nfs_mapping_set_error(req->wb_page, status); 1876 nfs_inode_remove_request(req); 1877 } 1878 dprintk_cont(", error = %d\n", status); 1879 goto next; 1880 } 1881 1882 /* Okay, COMMIT succeeded, apparently. Check the verifier 1883 * returned by the server against all stored verfs. */ 1884 if (nfs_write_match_verf(verf, req)) { 1885 /* We have a match */ 1886 if (req->wb_page) 1887 nfs_inode_remove_request(req); 1888 dprintk_cont(" OK\n"); 1889 goto next; 1890 } 1891 /* We have a mismatch. Write the page again */ 1892 dprintk_cont(" mismatch\n"); 1893 nfs_mark_request_dirty(req); 1894 set_bit(NFS_CONTEXT_RESEND_WRITES, &nfs_req_openctx(req)->flags); 1895 next: 1896 nfs_unlock_and_release_request(req); 1897 /* Latency breaker */ 1898 cond_resched(); 1899 } 1900 nfss = NFS_SERVER(data->inode); 1901 if (atomic_long_read(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH) 1902 clear_bdi_congested(inode_to_bdi(data->inode), BLK_RW_ASYNC); 1903 1904 nfs_init_cinfo(&cinfo, data->inode, data->dreq); 1905 nfs_commit_end(cinfo.mds); 1906 } 1907 1908 static void nfs_commit_release(void *calldata) 1909 { 1910 struct nfs_commit_data *data = calldata; 1911 1912 data->completion_ops->completion(data); 1913 nfs_commitdata_release(calldata); 1914 } 1915 1916 static const struct rpc_call_ops nfs_commit_ops = { 1917 .rpc_call_prepare = nfs_commit_prepare, 1918 .rpc_call_done = nfs_commit_done, 1919 .rpc_release = nfs_commit_release, 1920 }; 1921 1922 static const struct nfs_commit_completion_ops nfs_commit_completion_ops = { 1923 .completion = nfs_commit_release_pages, 1924 .resched_write = nfs_commit_resched_write, 1925 }; 1926 1927 int nfs_generic_commit_list(struct inode *inode, struct list_head *head, 1928 int how, struct nfs_commit_info *cinfo) 1929 { 1930 int status; 1931 1932 status = pnfs_commit_list(inode, head, how, cinfo); 1933 if (status == PNFS_NOT_ATTEMPTED) 1934 status = nfs_commit_list(inode, head, how, cinfo); 1935 return status; 1936 } 1937 1938 static int __nfs_commit_inode(struct inode *inode, int how, 1939 struct writeback_control *wbc) 1940 { 1941 LIST_HEAD(head); 1942 struct nfs_commit_info cinfo; 1943 int may_wait = how & FLUSH_SYNC; 1944 int ret, nscan; 1945 1946 nfs_init_cinfo_from_inode(&cinfo, inode); 1947 nfs_commit_begin(cinfo.mds); 1948 for (;;) { 1949 ret = nscan = nfs_scan_commit(inode, &head, &cinfo); 1950 if (ret <= 0) 1951 break; 1952 ret = nfs_generic_commit_list(inode, &head, how, &cinfo); 1953 if (ret < 0) 1954 break; 1955 ret = 0; 1956 if (wbc && wbc->sync_mode == WB_SYNC_NONE) { 1957 if (nscan < wbc->nr_to_write) 1958 wbc->nr_to_write -= nscan; 1959 else 1960 wbc->nr_to_write = 0; 1961 } 1962 if (nscan < INT_MAX) 1963 break; 1964 cond_resched(); 1965 } 1966 nfs_commit_end(cinfo.mds); 1967 if (ret || !may_wait) 1968 return ret; 1969 return wait_on_commit(cinfo.mds); 1970 } 1971 1972 int nfs_commit_inode(struct inode *inode, int how) 1973 { 1974 return __nfs_commit_inode(inode, how, NULL); 1975 } 1976 EXPORT_SYMBOL_GPL(nfs_commit_inode); 1977 1978 int nfs_write_inode(struct inode *inode, struct writeback_control *wbc) 1979 { 1980 struct nfs_inode *nfsi = NFS_I(inode); 1981 int flags = FLUSH_SYNC; 1982 int ret = 0; 1983 1984 if (wbc->sync_mode == WB_SYNC_NONE) { 1985 /* no commits means nothing needs to be done */ 1986 if (!atomic_long_read(&nfsi->commit_info.ncommit)) 1987 goto check_requests_outstanding; 1988 1989 /* Don't commit yet if this is a non-blocking flush and there 1990 * are a lot of outstanding writes for this mapping. 1991 */ 1992 if (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK)) 1993 goto out_mark_dirty; 1994 1995 /* don't wait for the COMMIT response */ 1996 flags = 0; 1997 } 1998 1999 ret = __nfs_commit_inode(inode, flags, wbc); 2000 if (!ret) { 2001 if (flags & FLUSH_SYNC) 2002 return 0; 2003 } else if (atomic_long_read(&nfsi->commit_info.ncommit)) 2004 goto out_mark_dirty; 2005 2006 check_requests_outstanding: 2007 if (!atomic_read(&nfsi->commit_info.rpcs_out)) 2008 return ret; 2009 out_mark_dirty: 2010 __mark_inode_dirty(inode, I_DIRTY_DATASYNC); 2011 return ret; 2012 } 2013 EXPORT_SYMBOL_GPL(nfs_write_inode); 2014 2015 /* 2016 * Wrapper for filemap_write_and_wait_range() 2017 * 2018 * Needed for pNFS in order to ensure data becomes visible to the 2019 * client. 2020 */ 2021 int nfs_filemap_write_and_wait_range(struct address_space *mapping, 2022 loff_t lstart, loff_t lend) 2023 { 2024 int ret; 2025 2026 ret = filemap_write_and_wait_range(mapping, lstart, lend); 2027 if (ret == 0) 2028 ret = pnfs_sync_inode(mapping->host, true); 2029 return ret; 2030 } 2031 EXPORT_SYMBOL_GPL(nfs_filemap_write_and_wait_range); 2032 2033 /* 2034 * flush the inode to disk. 2035 */ 2036 int nfs_wb_all(struct inode *inode) 2037 { 2038 int ret; 2039 2040 trace_nfs_writeback_inode_enter(inode); 2041 2042 ret = filemap_write_and_wait(inode->i_mapping); 2043 if (ret) 2044 goto out; 2045 ret = nfs_commit_inode(inode, FLUSH_SYNC); 2046 if (ret < 0) 2047 goto out; 2048 pnfs_sync_inode(inode, true); 2049 ret = 0; 2050 2051 out: 2052 trace_nfs_writeback_inode_exit(inode, ret); 2053 return ret; 2054 } 2055 EXPORT_SYMBOL_GPL(nfs_wb_all); 2056 2057 int nfs_wb_page_cancel(struct inode *inode, struct page *page) 2058 { 2059 struct nfs_page *req; 2060 int ret = 0; 2061 2062 wait_on_page_writeback(page); 2063 2064 /* blocking call to cancel all requests and join to a single (head) 2065 * request */ 2066 req = nfs_lock_and_join_requests(page); 2067 2068 if (IS_ERR(req)) { 2069 ret = PTR_ERR(req); 2070 } else if (req) { 2071 /* all requests from this page have been cancelled by 2072 * nfs_lock_and_join_requests, so just remove the head 2073 * request from the inode / page_private pointer and 2074 * release it */ 2075 nfs_inode_remove_request(req); 2076 nfs_unlock_and_release_request(req); 2077 } 2078 2079 return ret; 2080 } 2081 2082 /* 2083 * Write back all requests on one page - we do this before reading it. 2084 */ 2085 int nfs_wb_page(struct inode *inode, struct page *page) 2086 { 2087 loff_t range_start = page_file_offset(page); 2088 loff_t range_end = range_start + (loff_t)(PAGE_SIZE - 1); 2089 struct writeback_control wbc = { 2090 .sync_mode = WB_SYNC_ALL, 2091 .nr_to_write = 0, 2092 .range_start = range_start, 2093 .range_end = range_end, 2094 }; 2095 int ret; 2096 2097 trace_nfs_writeback_page_enter(inode); 2098 2099 for (;;) { 2100 wait_on_page_writeback(page); 2101 if (clear_page_dirty_for_io(page)) { 2102 ret = nfs_writepage_locked(page, &wbc); 2103 if (ret < 0) 2104 goto out_error; 2105 continue; 2106 } 2107 ret = 0; 2108 if (!PagePrivate(page)) 2109 break; 2110 ret = nfs_commit_inode(inode, FLUSH_SYNC); 2111 if (ret < 0) 2112 goto out_error; 2113 } 2114 out_error: 2115 trace_nfs_writeback_page_exit(inode, ret); 2116 return ret; 2117 } 2118 2119 #ifdef CONFIG_MIGRATION 2120 int nfs_migrate_page(struct address_space *mapping, struct page *newpage, 2121 struct page *page, enum migrate_mode mode) 2122 { 2123 /* 2124 * If PagePrivate is set, then the page is currently associated with 2125 * an in-progress read or write request. Don't try to migrate it. 2126 * 2127 * FIXME: we could do this in principle, but we'll need a way to ensure 2128 * that we can safely release the inode reference while holding 2129 * the page lock. 2130 */ 2131 if (PagePrivate(page)) 2132 return -EBUSY; 2133 2134 if (!nfs_fscache_release_page(page, GFP_KERNEL)) 2135 return -EBUSY; 2136 2137 return migrate_page(mapping, newpage, page, mode); 2138 } 2139 #endif 2140 2141 int __init nfs_init_writepagecache(void) 2142 { 2143 nfs_wdata_cachep = kmem_cache_create("nfs_write_data", 2144 sizeof(struct nfs_pgio_header), 2145 0, SLAB_HWCACHE_ALIGN, 2146 NULL); 2147 if (nfs_wdata_cachep == NULL) 2148 return -ENOMEM; 2149 2150 nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE, 2151 nfs_wdata_cachep); 2152 if (nfs_wdata_mempool == NULL) 2153 goto out_destroy_write_cache; 2154 2155 nfs_cdata_cachep = kmem_cache_create("nfs_commit_data", 2156 sizeof(struct nfs_commit_data), 2157 0, SLAB_HWCACHE_ALIGN, 2158 NULL); 2159 if (nfs_cdata_cachep == NULL) 2160 goto out_destroy_write_mempool; 2161 2162 nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT, 2163 nfs_cdata_cachep); 2164 if (nfs_commit_mempool == NULL) 2165 goto out_destroy_commit_cache; 2166 2167 /* 2168 * NFS congestion size, scale with available memory. 2169 * 2170 * 64MB: 8192k 2171 * 128MB: 11585k 2172 * 256MB: 16384k 2173 * 512MB: 23170k 2174 * 1GB: 32768k 2175 * 2GB: 46340k 2176 * 4GB: 65536k 2177 * 8GB: 92681k 2178 * 16GB: 131072k 2179 * 2180 * This allows larger machines to have larger/more transfers. 2181 * Limit the default to 256M 2182 */ 2183 nfs_congestion_kb = (16*int_sqrt(totalram_pages())) << (PAGE_SHIFT-10); 2184 if (nfs_congestion_kb > 256*1024) 2185 nfs_congestion_kb = 256*1024; 2186 2187 return 0; 2188 2189 out_destroy_commit_cache: 2190 kmem_cache_destroy(nfs_cdata_cachep); 2191 out_destroy_write_mempool: 2192 mempool_destroy(nfs_wdata_mempool); 2193 out_destroy_write_cache: 2194 kmem_cache_destroy(nfs_wdata_cachep); 2195 return -ENOMEM; 2196 } 2197 2198 void nfs_destroy_writepagecache(void) 2199 { 2200 mempool_destroy(nfs_commit_mempool); 2201 kmem_cache_destroy(nfs_cdata_cachep); 2202 mempool_destroy(nfs_wdata_mempool); 2203 kmem_cache_destroy(nfs_wdata_cachep); 2204 } 2205 2206 static const struct nfs_rw_ops nfs_rw_write_ops = { 2207 .rw_alloc_header = nfs_writehdr_alloc, 2208 .rw_free_header = nfs_writehdr_free, 2209 .rw_done = nfs_writeback_done, 2210 .rw_result = nfs_writeback_result, 2211 .rw_initiate = nfs_initiate_write, 2212 }; 2213