1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/fs/nfs/write.c 4 * 5 * Write file data over NFS. 6 * 7 * Copyright (C) 1996, 1997, Olaf Kirch <okir@monad.swb.de> 8 */ 9 10 #include <linux/types.h> 11 #include <linux/slab.h> 12 #include <linux/mm.h> 13 #include <linux/pagemap.h> 14 #include <linux/file.h> 15 #include <linux/writeback.h> 16 #include <linux/swap.h> 17 #include <linux/migrate.h> 18 19 #include <linux/sunrpc/clnt.h> 20 #include <linux/nfs_fs.h> 21 #include <linux/nfs_mount.h> 22 #include <linux/nfs_page.h> 23 #include <linux/backing-dev.h> 24 #include <linux/export.h> 25 #include <linux/freezer.h> 26 #include <linux/wait.h> 27 #include <linux/iversion.h> 28 29 #include <linux/uaccess.h> 30 #include <linux/sched/mm.h> 31 32 #include "delegation.h" 33 #include "internal.h" 34 #include "iostat.h" 35 #include "nfs4_fs.h" 36 #include "fscache.h" 37 #include "pnfs.h" 38 39 #include "nfstrace.h" 40 41 #define NFSDBG_FACILITY NFSDBG_PAGECACHE 42 43 #define MIN_POOL_WRITE (32) 44 #define MIN_POOL_COMMIT (4) 45 46 struct nfs_io_completion { 47 void (*complete)(void *data); 48 void *data; 49 struct kref refcount; 50 }; 51 52 /* 53 * Local function declarations 54 */ 55 static void nfs_redirty_request(struct nfs_page *req); 56 static const struct rpc_call_ops nfs_commit_ops; 57 static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops; 58 static const struct nfs_commit_completion_ops nfs_commit_completion_ops; 59 static const struct nfs_rw_ops nfs_rw_write_ops; 60 static void nfs_inode_remove_request(struct nfs_page *req); 61 static void nfs_clear_request_commit(struct nfs_page *req); 62 static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo, 63 struct inode *inode); 64 static struct nfs_page * 65 nfs_page_search_commits_for_head_request_locked(struct nfs_inode *nfsi, 66 struct page *page); 67 68 static struct kmem_cache *nfs_wdata_cachep; 69 static mempool_t *nfs_wdata_mempool; 70 static struct kmem_cache *nfs_cdata_cachep; 71 static mempool_t *nfs_commit_mempool; 72 73 struct nfs_commit_data *nfs_commitdata_alloc(bool never_fail) 74 { 75 struct nfs_commit_data *p; 76 77 if (never_fail) 78 p = mempool_alloc(nfs_commit_mempool, GFP_NOIO); 79 else { 80 /* It is OK to do some reclaim, not no safe to wait 81 * for anything to be returned to the pool. 82 * mempool_alloc() cannot handle that particular combination, 83 * so we need two separate attempts. 84 */ 85 p = mempool_alloc(nfs_commit_mempool, GFP_NOWAIT); 86 if (!p) 87 p = kmem_cache_alloc(nfs_cdata_cachep, GFP_NOIO | 88 __GFP_NOWARN | __GFP_NORETRY); 89 if (!p) 90 return NULL; 91 } 92 93 memset(p, 0, sizeof(*p)); 94 INIT_LIST_HEAD(&p->pages); 95 return p; 96 } 97 EXPORT_SYMBOL_GPL(nfs_commitdata_alloc); 98 99 void nfs_commit_free(struct nfs_commit_data *p) 100 { 101 mempool_free(p, nfs_commit_mempool); 102 } 103 EXPORT_SYMBOL_GPL(nfs_commit_free); 104 105 static struct nfs_pgio_header *nfs_writehdr_alloc(void) 106 { 107 struct nfs_pgio_header *p = mempool_alloc(nfs_wdata_mempool, GFP_KERNEL); 108 109 memset(p, 0, sizeof(*p)); 110 p->rw_mode = FMODE_WRITE; 111 return p; 112 } 113 114 static void nfs_writehdr_free(struct nfs_pgio_header *hdr) 115 { 116 mempool_free(hdr, nfs_wdata_mempool); 117 } 118 119 static struct nfs_io_completion *nfs_io_completion_alloc(gfp_t gfp_flags) 120 { 121 return kmalloc(sizeof(struct nfs_io_completion), gfp_flags); 122 } 123 124 static void nfs_io_completion_init(struct nfs_io_completion *ioc, 125 void (*complete)(void *), void *data) 126 { 127 ioc->complete = complete; 128 ioc->data = data; 129 kref_init(&ioc->refcount); 130 } 131 132 static void nfs_io_completion_release(struct kref *kref) 133 { 134 struct nfs_io_completion *ioc = container_of(kref, 135 struct nfs_io_completion, refcount); 136 ioc->complete(ioc->data); 137 kfree(ioc); 138 } 139 140 static void nfs_io_completion_get(struct nfs_io_completion *ioc) 141 { 142 if (ioc != NULL) 143 kref_get(&ioc->refcount); 144 } 145 146 static void nfs_io_completion_put(struct nfs_io_completion *ioc) 147 { 148 if (ioc != NULL) 149 kref_put(&ioc->refcount, nfs_io_completion_release); 150 } 151 152 static void 153 nfs_page_set_inode_ref(struct nfs_page *req, struct inode *inode) 154 { 155 if (!test_and_set_bit(PG_INODE_REF, &req->wb_flags)) { 156 kref_get(&req->wb_kref); 157 atomic_long_inc(&NFS_I(inode)->nrequests); 158 } 159 } 160 161 static int 162 nfs_cancel_remove_inode(struct nfs_page *req, struct inode *inode) 163 { 164 int ret; 165 166 if (!test_bit(PG_REMOVE, &req->wb_flags)) 167 return 0; 168 ret = nfs_page_group_lock(req); 169 if (ret) 170 return ret; 171 if (test_and_clear_bit(PG_REMOVE, &req->wb_flags)) 172 nfs_page_set_inode_ref(req, inode); 173 nfs_page_group_unlock(req); 174 return 0; 175 } 176 177 static struct nfs_page * 178 nfs_page_private_request(struct page *page) 179 { 180 if (!PagePrivate(page)) 181 return NULL; 182 return (struct nfs_page *)page_private(page); 183 } 184 185 /* 186 * nfs_page_find_head_request_locked - find head request associated with @page 187 * 188 * must be called while holding the inode lock. 189 * 190 * returns matching head request with reference held, or NULL if not found. 191 */ 192 static struct nfs_page * 193 nfs_page_find_private_request(struct page *page) 194 { 195 struct address_space *mapping = page_file_mapping(page); 196 struct nfs_page *req; 197 198 if (!PagePrivate(page)) 199 return NULL; 200 spin_lock(&mapping->private_lock); 201 req = nfs_page_private_request(page); 202 if (req) { 203 WARN_ON_ONCE(req->wb_head != req); 204 kref_get(&req->wb_kref); 205 } 206 spin_unlock(&mapping->private_lock); 207 return req; 208 } 209 210 static struct nfs_page * 211 nfs_page_find_swap_request(struct page *page) 212 { 213 struct inode *inode = page_file_mapping(page)->host; 214 struct nfs_inode *nfsi = NFS_I(inode); 215 struct nfs_page *req = NULL; 216 if (!PageSwapCache(page)) 217 return NULL; 218 mutex_lock(&nfsi->commit_mutex); 219 if (PageSwapCache(page)) { 220 req = nfs_page_search_commits_for_head_request_locked(nfsi, 221 page); 222 if (req) { 223 WARN_ON_ONCE(req->wb_head != req); 224 kref_get(&req->wb_kref); 225 } 226 } 227 mutex_unlock(&nfsi->commit_mutex); 228 return req; 229 } 230 231 /* 232 * nfs_page_find_head_request - find head request associated with @page 233 * 234 * returns matching head request with reference held, or NULL if not found. 235 */ 236 static struct nfs_page *nfs_page_find_head_request(struct page *page) 237 { 238 struct nfs_page *req; 239 240 req = nfs_page_find_private_request(page); 241 if (!req) 242 req = nfs_page_find_swap_request(page); 243 return req; 244 } 245 246 static struct nfs_page *nfs_find_and_lock_page_request(struct page *page) 247 { 248 struct inode *inode = page_file_mapping(page)->host; 249 struct nfs_page *req, *head; 250 int ret; 251 252 for (;;) { 253 req = nfs_page_find_head_request(page); 254 if (!req) 255 return req; 256 head = nfs_page_group_lock_head(req); 257 if (head != req) 258 nfs_release_request(req); 259 if (IS_ERR(head)) 260 return head; 261 ret = nfs_cancel_remove_inode(head, inode); 262 if (ret < 0) { 263 nfs_unlock_and_release_request(head); 264 return ERR_PTR(ret); 265 } 266 /* Ensure that nobody removed the request before we locked it */ 267 if (head == nfs_page_private_request(page)) 268 break; 269 if (PageSwapCache(page)) 270 break; 271 nfs_unlock_and_release_request(head); 272 } 273 return head; 274 } 275 276 /* Adjust the file length if we're writing beyond the end */ 277 static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int count) 278 { 279 struct inode *inode = page_file_mapping(page)->host; 280 loff_t end, i_size; 281 pgoff_t end_index; 282 283 spin_lock(&inode->i_lock); 284 i_size = i_size_read(inode); 285 end_index = (i_size - 1) >> PAGE_SHIFT; 286 if (i_size > 0 && page_index(page) < end_index) 287 goto out; 288 end = page_file_offset(page) + ((loff_t)offset+count); 289 if (i_size >= end) 290 goto out; 291 trace_nfs_size_grow(inode, end); 292 i_size_write(inode, end); 293 NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_SIZE; 294 nfs_inc_stats(inode, NFSIOS_EXTENDWRITE); 295 out: 296 spin_unlock(&inode->i_lock); 297 nfs_fscache_invalidate(inode, 0); 298 } 299 300 /* A writeback failed: mark the page as bad, and invalidate the page cache */ 301 static void nfs_set_pageerror(struct address_space *mapping) 302 { 303 struct inode *inode = mapping->host; 304 305 nfs_zap_mapping(mapping->host, mapping); 306 /* Force file size revalidation */ 307 spin_lock(&inode->i_lock); 308 nfs_set_cache_invalid(inode, NFS_INO_REVAL_FORCED | 309 NFS_INO_REVAL_PAGECACHE | 310 NFS_INO_INVALID_SIZE); 311 spin_unlock(&inode->i_lock); 312 } 313 314 static void nfs_mapping_set_error(struct page *page, int error) 315 { 316 struct address_space *mapping = page_file_mapping(page); 317 318 SetPageError(page); 319 mapping_set_error(mapping, error); 320 nfs_set_pageerror(mapping); 321 } 322 323 /* 324 * nfs_page_group_search_locked 325 * @head - head request of page group 326 * @page_offset - offset into page 327 * 328 * Search page group with head @head to find a request that contains the 329 * page offset @page_offset. 330 * 331 * Returns a pointer to the first matching nfs request, or NULL if no 332 * match is found. 333 * 334 * Must be called with the page group lock held 335 */ 336 static struct nfs_page * 337 nfs_page_group_search_locked(struct nfs_page *head, unsigned int page_offset) 338 { 339 struct nfs_page *req; 340 341 req = head; 342 do { 343 if (page_offset >= req->wb_pgbase && 344 page_offset < (req->wb_pgbase + req->wb_bytes)) 345 return req; 346 347 req = req->wb_this_page; 348 } while (req != head); 349 350 return NULL; 351 } 352 353 /* 354 * nfs_page_group_covers_page 355 * @head - head request of page group 356 * 357 * Return true if the page group with head @head covers the whole page, 358 * returns false otherwise 359 */ 360 static bool nfs_page_group_covers_page(struct nfs_page *req) 361 { 362 struct nfs_page *tmp; 363 unsigned int pos = 0; 364 unsigned int len = nfs_page_length(req->wb_page); 365 366 nfs_page_group_lock(req); 367 368 for (;;) { 369 tmp = nfs_page_group_search_locked(req->wb_head, pos); 370 if (!tmp) 371 break; 372 pos = tmp->wb_pgbase + tmp->wb_bytes; 373 } 374 375 nfs_page_group_unlock(req); 376 return pos >= len; 377 } 378 379 /* We can set the PG_uptodate flag if we see that a write request 380 * covers the full page. 381 */ 382 static void nfs_mark_uptodate(struct nfs_page *req) 383 { 384 if (PageUptodate(req->wb_page)) 385 return; 386 if (!nfs_page_group_covers_page(req)) 387 return; 388 SetPageUptodate(req->wb_page); 389 } 390 391 static int wb_priority(struct writeback_control *wbc) 392 { 393 int ret = 0; 394 395 if (wbc->sync_mode == WB_SYNC_ALL) 396 ret = FLUSH_COND_STABLE; 397 return ret; 398 } 399 400 /* 401 * NFS congestion control 402 */ 403 404 int nfs_congestion_kb; 405 406 #define NFS_CONGESTION_ON_THRESH (nfs_congestion_kb >> (PAGE_SHIFT-10)) 407 #define NFS_CONGESTION_OFF_THRESH \ 408 (NFS_CONGESTION_ON_THRESH - (NFS_CONGESTION_ON_THRESH >> 2)) 409 410 static void nfs_set_page_writeback(struct page *page) 411 { 412 struct inode *inode = page_file_mapping(page)->host; 413 struct nfs_server *nfss = NFS_SERVER(inode); 414 int ret = test_set_page_writeback(page); 415 416 WARN_ON_ONCE(ret != 0); 417 418 if (atomic_long_inc_return(&nfss->writeback) > 419 NFS_CONGESTION_ON_THRESH) 420 nfss->write_congested = 1; 421 } 422 423 static void nfs_end_page_writeback(struct nfs_page *req) 424 { 425 struct inode *inode = page_file_mapping(req->wb_page)->host; 426 struct nfs_server *nfss = NFS_SERVER(inode); 427 bool is_done; 428 429 is_done = nfs_page_group_sync_on_bit(req, PG_WB_END); 430 nfs_unlock_request(req); 431 if (!is_done) 432 return; 433 434 end_page_writeback(req->wb_page); 435 if (atomic_long_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH) 436 nfss->write_congested = 0; 437 } 438 439 /* 440 * nfs_destroy_unlinked_subrequests - destroy recently unlinked subrequests 441 * 442 * @destroy_list - request list (using wb_this_page) terminated by @old_head 443 * @old_head - the old head of the list 444 * 445 * All subrequests must be locked and removed from all lists, so at this point 446 * they are only "active" in this function, and possibly in nfs_wait_on_request 447 * with a reference held by some other context. 448 */ 449 static void 450 nfs_destroy_unlinked_subrequests(struct nfs_page *destroy_list, 451 struct nfs_page *old_head, 452 struct inode *inode) 453 { 454 while (destroy_list) { 455 struct nfs_page *subreq = destroy_list; 456 457 destroy_list = (subreq->wb_this_page == old_head) ? 458 NULL : subreq->wb_this_page; 459 460 /* Note: lock subreq in order to change subreq->wb_head */ 461 nfs_page_set_headlock(subreq); 462 WARN_ON_ONCE(old_head != subreq->wb_head); 463 464 /* make sure old group is not used */ 465 subreq->wb_this_page = subreq; 466 subreq->wb_head = subreq; 467 468 clear_bit(PG_REMOVE, &subreq->wb_flags); 469 470 /* Note: races with nfs_page_group_destroy() */ 471 if (!kref_read(&subreq->wb_kref)) { 472 /* Check if we raced with nfs_page_group_destroy() */ 473 if (test_and_clear_bit(PG_TEARDOWN, &subreq->wb_flags)) { 474 nfs_page_clear_headlock(subreq); 475 nfs_free_request(subreq); 476 } else 477 nfs_page_clear_headlock(subreq); 478 continue; 479 } 480 nfs_page_clear_headlock(subreq); 481 482 nfs_release_request(old_head); 483 484 if (test_and_clear_bit(PG_INODE_REF, &subreq->wb_flags)) { 485 nfs_release_request(subreq); 486 atomic_long_dec(&NFS_I(inode)->nrequests); 487 } 488 489 /* subreq is now totally disconnected from page group or any 490 * write / commit lists. last chance to wake any waiters */ 491 nfs_unlock_and_release_request(subreq); 492 } 493 } 494 495 /* 496 * nfs_join_page_group - destroy subrequests of the head req 497 * @head: the page used to lookup the "page group" of nfs_page structures 498 * @inode: Inode to which the request belongs. 499 * 500 * This function joins all sub requests to the head request by first 501 * locking all requests in the group, cancelling any pending operations 502 * and finally updating the head request to cover the whole range covered by 503 * the (former) group. All subrequests are removed from any write or commit 504 * lists, unlinked from the group and destroyed. 505 */ 506 void 507 nfs_join_page_group(struct nfs_page *head, struct inode *inode) 508 { 509 struct nfs_page *subreq; 510 struct nfs_page *destroy_list = NULL; 511 unsigned int pgbase, off, bytes; 512 513 pgbase = head->wb_pgbase; 514 bytes = head->wb_bytes; 515 off = head->wb_offset; 516 for (subreq = head->wb_this_page; subreq != head; 517 subreq = subreq->wb_this_page) { 518 /* Subrequests should always form a contiguous range */ 519 if (pgbase > subreq->wb_pgbase) { 520 off -= pgbase - subreq->wb_pgbase; 521 bytes += pgbase - subreq->wb_pgbase; 522 pgbase = subreq->wb_pgbase; 523 } 524 bytes = max(subreq->wb_pgbase + subreq->wb_bytes 525 - pgbase, bytes); 526 } 527 528 /* Set the head request's range to cover the former page group */ 529 head->wb_pgbase = pgbase; 530 head->wb_bytes = bytes; 531 head->wb_offset = off; 532 533 /* Now that all requests are locked, make sure they aren't on any list. 534 * Commit list removal accounting is done after locks are dropped */ 535 subreq = head; 536 do { 537 nfs_clear_request_commit(subreq); 538 subreq = subreq->wb_this_page; 539 } while (subreq != head); 540 541 /* unlink subrequests from head, destroy them later */ 542 if (head->wb_this_page != head) { 543 /* destroy list will be terminated by head */ 544 destroy_list = head->wb_this_page; 545 head->wb_this_page = head; 546 } 547 548 nfs_destroy_unlinked_subrequests(destroy_list, head, inode); 549 } 550 551 /* 552 * nfs_lock_and_join_requests - join all subreqs to the head req 553 * @page: the page used to lookup the "page group" of nfs_page structures 554 * 555 * This function joins all sub requests to the head request by first 556 * locking all requests in the group, cancelling any pending operations 557 * and finally updating the head request to cover the whole range covered by 558 * the (former) group. All subrequests are removed from any write or commit 559 * lists, unlinked from the group and destroyed. 560 * 561 * Returns a locked, referenced pointer to the head request - which after 562 * this call is guaranteed to be the only request associated with the page. 563 * Returns NULL if no requests are found for @page, or a ERR_PTR if an 564 * error was encountered. 565 */ 566 static struct nfs_page * 567 nfs_lock_and_join_requests(struct page *page) 568 { 569 struct inode *inode = page_file_mapping(page)->host; 570 struct nfs_page *head; 571 int ret; 572 573 /* 574 * A reference is taken only on the head request which acts as a 575 * reference to the whole page group - the group will not be destroyed 576 * until the head reference is released. 577 */ 578 head = nfs_find_and_lock_page_request(page); 579 if (IS_ERR_OR_NULL(head)) 580 return head; 581 582 /* lock each request in the page group */ 583 ret = nfs_page_group_lock_subrequests(head); 584 if (ret < 0) { 585 nfs_unlock_and_release_request(head); 586 return ERR_PTR(ret); 587 } 588 589 nfs_join_page_group(head, inode); 590 591 return head; 592 } 593 594 static void nfs_write_error(struct nfs_page *req, int error) 595 { 596 trace_nfs_write_error(req, error); 597 nfs_mapping_set_error(req->wb_page, error); 598 nfs_inode_remove_request(req); 599 nfs_end_page_writeback(req); 600 nfs_release_request(req); 601 } 602 603 /* 604 * Find an associated nfs write request, and prepare to flush it out 605 * May return an error if the user signalled nfs_wait_on_request(). 606 */ 607 static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio, 608 struct page *page) 609 { 610 struct nfs_page *req; 611 int ret = 0; 612 613 req = nfs_lock_and_join_requests(page); 614 if (!req) 615 goto out; 616 ret = PTR_ERR(req); 617 if (IS_ERR(req)) 618 goto out; 619 620 nfs_set_page_writeback(page); 621 WARN_ON_ONCE(test_bit(PG_CLEAN, &req->wb_flags)); 622 623 /* If there is a fatal error that covers this write, just exit */ 624 ret = pgio->pg_error; 625 if (nfs_error_is_fatal_on_server(ret)) 626 goto out_launder; 627 628 ret = 0; 629 if (!nfs_pageio_add_request(pgio, req)) { 630 ret = pgio->pg_error; 631 /* 632 * Remove the problematic req upon fatal errors on the server 633 */ 634 if (nfs_error_is_fatal(ret)) { 635 if (nfs_error_is_fatal_on_server(ret)) 636 goto out_launder; 637 } else 638 ret = -EAGAIN; 639 nfs_redirty_request(req); 640 pgio->pg_error = 0; 641 } else 642 nfs_add_stats(page_file_mapping(page)->host, 643 NFSIOS_WRITEPAGES, 1); 644 out: 645 return ret; 646 out_launder: 647 nfs_write_error(req, ret); 648 return 0; 649 } 650 651 static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, 652 struct nfs_pageio_descriptor *pgio) 653 { 654 int ret; 655 656 nfs_pageio_cond_complete(pgio, page_index(page)); 657 ret = nfs_page_async_flush(pgio, page); 658 if (ret == -EAGAIN) { 659 redirty_page_for_writepage(wbc, page); 660 ret = AOP_WRITEPAGE_ACTIVATE; 661 } 662 return ret; 663 } 664 665 /* 666 * Write an mmapped page to the server. 667 */ 668 static int nfs_writepage_locked(struct page *page, 669 struct writeback_control *wbc) 670 { 671 struct nfs_pageio_descriptor pgio; 672 struct inode *inode = page_file_mapping(page)->host; 673 int err; 674 675 if (wbc->sync_mode == WB_SYNC_NONE && 676 NFS_SERVER(inode)->write_congested) 677 return AOP_WRITEPAGE_ACTIVATE; 678 679 nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE); 680 nfs_pageio_init_write(&pgio, inode, 0, 681 false, &nfs_async_write_completion_ops); 682 err = nfs_do_writepage(page, wbc, &pgio); 683 pgio.pg_error = 0; 684 nfs_pageio_complete(&pgio); 685 if (err < 0) 686 return err; 687 if (nfs_error_is_fatal(pgio.pg_error)) 688 return pgio.pg_error; 689 return 0; 690 } 691 692 int nfs_writepage(struct page *page, struct writeback_control *wbc) 693 { 694 int ret; 695 696 ret = nfs_writepage_locked(page, wbc); 697 if (ret != AOP_WRITEPAGE_ACTIVATE) 698 unlock_page(page); 699 return ret; 700 } 701 702 static int nfs_writepages_callback(struct page *page, struct writeback_control *wbc, void *data) 703 { 704 int ret; 705 706 ret = nfs_do_writepage(page, wbc, data); 707 if (ret != AOP_WRITEPAGE_ACTIVATE) 708 unlock_page(page); 709 return ret; 710 } 711 712 static void nfs_io_completion_commit(void *inode) 713 { 714 nfs_commit_inode(inode, 0); 715 } 716 717 int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc) 718 { 719 struct inode *inode = mapping->host; 720 struct nfs_pageio_descriptor pgio; 721 struct nfs_io_completion *ioc = NULL; 722 unsigned int mntflags = NFS_SERVER(inode)->flags; 723 int priority = 0; 724 int err; 725 726 if (wbc->sync_mode == WB_SYNC_NONE && 727 NFS_SERVER(inode)->write_congested) 728 return 0; 729 730 nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES); 731 732 if (!(mntflags & NFS_MOUNT_WRITE_EAGER) || wbc->for_kupdate || 733 wbc->for_background || wbc->for_sync || wbc->for_reclaim) { 734 ioc = nfs_io_completion_alloc(GFP_KERNEL); 735 if (ioc) 736 nfs_io_completion_init(ioc, nfs_io_completion_commit, 737 inode); 738 priority = wb_priority(wbc); 739 } 740 741 nfs_pageio_init_write(&pgio, inode, priority, false, 742 &nfs_async_write_completion_ops); 743 pgio.pg_io_completion = ioc; 744 err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio); 745 pgio.pg_error = 0; 746 nfs_pageio_complete(&pgio); 747 nfs_io_completion_put(ioc); 748 749 if (err < 0) 750 goto out_err; 751 err = pgio.pg_error; 752 if (nfs_error_is_fatal(err)) 753 goto out_err; 754 return 0; 755 out_err: 756 return err; 757 } 758 759 /* 760 * Insert a write request into an inode 761 */ 762 static void nfs_inode_add_request(struct inode *inode, struct nfs_page *req) 763 { 764 struct address_space *mapping = page_file_mapping(req->wb_page); 765 struct nfs_inode *nfsi = NFS_I(inode); 766 767 WARN_ON_ONCE(req->wb_this_page != req); 768 769 /* Lock the request! */ 770 nfs_lock_request(req); 771 772 /* 773 * Swap-space should not get truncated. Hence no need to plug the race 774 * with invalidate/truncate. 775 */ 776 spin_lock(&mapping->private_lock); 777 if (likely(!PageSwapCache(req->wb_page))) { 778 set_bit(PG_MAPPED, &req->wb_flags); 779 SetPagePrivate(req->wb_page); 780 set_page_private(req->wb_page, (unsigned long)req); 781 } 782 spin_unlock(&mapping->private_lock); 783 atomic_long_inc(&nfsi->nrequests); 784 /* this a head request for a page group - mark it as having an 785 * extra reference so sub groups can follow suit. 786 * This flag also informs pgio layer when to bump nrequests when 787 * adding subrequests. */ 788 WARN_ON(test_and_set_bit(PG_INODE_REF, &req->wb_flags)); 789 kref_get(&req->wb_kref); 790 } 791 792 /* 793 * Remove a write request from an inode 794 */ 795 static void nfs_inode_remove_request(struct nfs_page *req) 796 { 797 struct address_space *mapping = page_file_mapping(req->wb_page); 798 struct inode *inode = mapping->host; 799 struct nfs_inode *nfsi = NFS_I(inode); 800 struct nfs_page *head; 801 802 if (nfs_page_group_sync_on_bit(req, PG_REMOVE)) { 803 head = req->wb_head; 804 805 spin_lock(&mapping->private_lock); 806 if (likely(head->wb_page && !PageSwapCache(head->wb_page))) { 807 set_page_private(head->wb_page, 0); 808 ClearPagePrivate(head->wb_page); 809 clear_bit(PG_MAPPED, &head->wb_flags); 810 } 811 spin_unlock(&mapping->private_lock); 812 } 813 814 if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags)) { 815 nfs_release_request(req); 816 atomic_long_dec(&nfsi->nrequests); 817 } 818 } 819 820 static void 821 nfs_mark_request_dirty(struct nfs_page *req) 822 { 823 if (req->wb_page) 824 __set_page_dirty_nobuffers(req->wb_page); 825 } 826 827 /* 828 * nfs_page_search_commits_for_head_request_locked 829 * 830 * Search through commit lists on @inode for the head request for @page. 831 * Must be called while holding the inode (which is cinfo) lock. 832 * 833 * Returns the head request if found, or NULL if not found. 834 */ 835 static struct nfs_page * 836 nfs_page_search_commits_for_head_request_locked(struct nfs_inode *nfsi, 837 struct page *page) 838 { 839 struct nfs_page *freq, *t; 840 struct nfs_commit_info cinfo; 841 struct inode *inode = &nfsi->vfs_inode; 842 843 nfs_init_cinfo_from_inode(&cinfo, inode); 844 845 /* search through pnfs commit lists */ 846 freq = pnfs_search_commit_reqs(inode, &cinfo, page); 847 if (freq) 848 return freq->wb_head; 849 850 /* Linearly search the commit list for the correct request */ 851 list_for_each_entry_safe(freq, t, &cinfo.mds->list, wb_list) { 852 if (freq->wb_page == page) 853 return freq->wb_head; 854 } 855 856 return NULL; 857 } 858 859 /** 860 * nfs_request_add_commit_list_locked - add request to a commit list 861 * @req: pointer to a struct nfs_page 862 * @dst: commit list head 863 * @cinfo: holds list lock and accounting info 864 * 865 * This sets the PG_CLEAN bit, updates the cinfo count of 866 * number of outstanding requests requiring a commit as well as 867 * the MM page stats. 868 * 869 * The caller must hold NFS_I(cinfo->inode)->commit_mutex, and the 870 * nfs_page lock. 871 */ 872 void 873 nfs_request_add_commit_list_locked(struct nfs_page *req, struct list_head *dst, 874 struct nfs_commit_info *cinfo) 875 { 876 set_bit(PG_CLEAN, &req->wb_flags); 877 nfs_list_add_request(req, dst); 878 atomic_long_inc(&cinfo->mds->ncommit); 879 } 880 EXPORT_SYMBOL_GPL(nfs_request_add_commit_list_locked); 881 882 /** 883 * nfs_request_add_commit_list - add request to a commit list 884 * @req: pointer to a struct nfs_page 885 * @cinfo: holds list lock and accounting info 886 * 887 * This sets the PG_CLEAN bit, updates the cinfo count of 888 * number of outstanding requests requiring a commit as well as 889 * the MM page stats. 890 * 891 * The caller must _not_ hold the cinfo->lock, but must be 892 * holding the nfs_page lock. 893 */ 894 void 895 nfs_request_add_commit_list(struct nfs_page *req, struct nfs_commit_info *cinfo) 896 { 897 mutex_lock(&NFS_I(cinfo->inode)->commit_mutex); 898 nfs_request_add_commit_list_locked(req, &cinfo->mds->list, cinfo); 899 mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex); 900 if (req->wb_page) 901 nfs_mark_page_unstable(req->wb_page, cinfo); 902 } 903 EXPORT_SYMBOL_GPL(nfs_request_add_commit_list); 904 905 /** 906 * nfs_request_remove_commit_list - Remove request from a commit list 907 * @req: pointer to a nfs_page 908 * @cinfo: holds list lock and accounting info 909 * 910 * This clears the PG_CLEAN bit, and updates the cinfo's count of 911 * number of outstanding requests requiring a commit 912 * It does not update the MM page stats. 913 * 914 * The caller _must_ hold the cinfo->lock and the nfs_page lock. 915 */ 916 void 917 nfs_request_remove_commit_list(struct nfs_page *req, 918 struct nfs_commit_info *cinfo) 919 { 920 if (!test_and_clear_bit(PG_CLEAN, &(req)->wb_flags)) 921 return; 922 nfs_list_remove_request(req); 923 atomic_long_dec(&cinfo->mds->ncommit); 924 } 925 EXPORT_SYMBOL_GPL(nfs_request_remove_commit_list); 926 927 static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo, 928 struct inode *inode) 929 { 930 cinfo->inode = inode; 931 cinfo->mds = &NFS_I(inode)->commit_info; 932 cinfo->ds = pnfs_get_ds_info(inode); 933 cinfo->dreq = NULL; 934 cinfo->completion_ops = &nfs_commit_completion_ops; 935 } 936 937 void nfs_init_cinfo(struct nfs_commit_info *cinfo, 938 struct inode *inode, 939 struct nfs_direct_req *dreq) 940 { 941 if (dreq) 942 nfs_init_cinfo_from_dreq(cinfo, dreq); 943 else 944 nfs_init_cinfo_from_inode(cinfo, inode); 945 } 946 EXPORT_SYMBOL_GPL(nfs_init_cinfo); 947 948 /* 949 * Add a request to the inode's commit list. 950 */ 951 void 952 nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg, 953 struct nfs_commit_info *cinfo, u32 ds_commit_idx) 954 { 955 if (pnfs_mark_request_commit(req, lseg, cinfo, ds_commit_idx)) 956 return; 957 nfs_request_add_commit_list(req, cinfo); 958 } 959 960 static void 961 nfs_clear_page_commit(struct page *page) 962 { 963 dec_node_page_state(page, NR_WRITEBACK); 964 dec_wb_stat(&inode_to_bdi(page_file_mapping(page)->host)->wb, 965 WB_WRITEBACK); 966 } 967 968 /* Called holding the request lock on @req */ 969 static void 970 nfs_clear_request_commit(struct nfs_page *req) 971 { 972 if (test_bit(PG_CLEAN, &req->wb_flags)) { 973 struct nfs_open_context *ctx = nfs_req_openctx(req); 974 struct inode *inode = d_inode(ctx->dentry); 975 struct nfs_commit_info cinfo; 976 977 nfs_init_cinfo_from_inode(&cinfo, inode); 978 mutex_lock(&NFS_I(inode)->commit_mutex); 979 if (!pnfs_clear_request_commit(req, &cinfo)) { 980 nfs_request_remove_commit_list(req, &cinfo); 981 } 982 mutex_unlock(&NFS_I(inode)->commit_mutex); 983 nfs_clear_page_commit(req->wb_page); 984 } 985 } 986 987 int nfs_write_need_commit(struct nfs_pgio_header *hdr) 988 { 989 if (hdr->verf.committed == NFS_DATA_SYNC) 990 return hdr->lseg == NULL; 991 return hdr->verf.committed != NFS_FILE_SYNC; 992 } 993 994 static void nfs_async_write_init(struct nfs_pgio_header *hdr) 995 { 996 nfs_io_completion_get(hdr->io_completion); 997 } 998 999 static void nfs_write_completion(struct nfs_pgio_header *hdr) 1000 { 1001 struct nfs_commit_info cinfo; 1002 unsigned long bytes = 0; 1003 1004 if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) 1005 goto out; 1006 nfs_init_cinfo_from_inode(&cinfo, hdr->inode); 1007 while (!list_empty(&hdr->pages)) { 1008 struct nfs_page *req = nfs_list_entry(hdr->pages.next); 1009 1010 bytes += req->wb_bytes; 1011 nfs_list_remove_request(req); 1012 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && 1013 (hdr->good_bytes < bytes)) { 1014 trace_nfs_comp_error(req, hdr->error); 1015 nfs_mapping_set_error(req->wb_page, hdr->error); 1016 goto remove_req; 1017 } 1018 if (nfs_write_need_commit(hdr)) { 1019 /* Reset wb_nio, since the write was successful. */ 1020 req->wb_nio = 0; 1021 memcpy(&req->wb_verf, &hdr->verf.verifier, sizeof(req->wb_verf)); 1022 nfs_mark_request_commit(req, hdr->lseg, &cinfo, 1023 hdr->pgio_mirror_idx); 1024 goto next; 1025 } 1026 remove_req: 1027 nfs_inode_remove_request(req); 1028 next: 1029 nfs_end_page_writeback(req); 1030 nfs_release_request(req); 1031 } 1032 out: 1033 nfs_io_completion_put(hdr->io_completion); 1034 hdr->release(hdr); 1035 } 1036 1037 unsigned long 1038 nfs_reqs_to_commit(struct nfs_commit_info *cinfo) 1039 { 1040 return atomic_long_read(&cinfo->mds->ncommit); 1041 } 1042 1043 /* NFS_I(cinfo->inode)->commit_mutex held by caller */ 1044 int 1045 nfs_scan_commit_list(struct list_head *src, struct list_head *dst, 1046 struct nfs_commit_info *cinfo, int max) 1047 { 1048 struct nfs_page *req, *tmp; 1049 int ret = 0; 1050 1051 list_for_each_entry_safe(req, tmp, src, wb_list) { 1052 kref_get(&req->wb_kref); 1053 if (!nfs_lock_request(req)) { 1054 nfs_release_request(req); 1055 continue; 1056 } 1057 nfs_request_remove_commit_list(req, cinfo); 1058 clear_bit(PG_COMMIT_TO_DS, &req->wb_flags); 1059 nfs_list_add_request(req, dst); 1060 ret++; 1061 if ((ret == max) && !cinfo->dreq) 1062 break; 1063 cond_resched(); 1064 } 1065 return ret; 1066 } 1067 EXPORT_SYMBOL_GPL(nfs_scan_commit_list); 1068 1069 /* 1070 * nfs_scan_commit - Scan an inode for commit requests 1071 * @inode: NFS inode to scan 1072 * @dst: mds destination list 1073 * @cinfo: mds and ds lists of reqs ready to commit 1074 * 1075 * Moves requests from the inode's 'commit' request list. 1076 * The requests are *not* checked to ensure that they form a contiguous set. 1077 */ 1078 int 1079 nfs_scan_commit(struct inode *inode, struct list_head *dst, 1080 struct nfs_commit_info *cinfo) 1081 { 1082 int ret = 0; 1083 1084 if (!atomic_long_read(&cinfo->mds->ncommit)) 1085 return 0; 1086 mutex_lock(&NFS_I(cinfo->inode)->commit_mutex); 1087 if (atomic_long_read(&cinfo->mds->ncommit) > 0) { 1088 const int max = INT_MAX; 1089 1090 ret = nfs_scan_commit_list(&cinfo->mds->list, dst, 1091 cinfo, max); 1092 ret += pnfs_scan_commit_lists(inode, cinfo, max - ret); 1093 } 1094 mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex); 1095 return ret; 1096 } 1097 1098 /* 1099 * Search for an existing write request, and attempt to update 1100 * it to reflect a new dirty region on a given page. 1101 * 1102 * If the attempt fails, then the existing request is flushed out 1103 * to disk. 1104 */ 1105 static struct nfs_page *nfs_try_to_update_request(struct inode *inode, 1106 struct page *page, 1107 unsigned int offset, 1108 unsigned int bytes) 1109 { 1110 struct nfs_page *req; 1111 unsigned int rqend; 1112 unsigned int end; 1113 int error; 1114 1115 end = offset + bytes; 1116 1117 req = nfs_lock_and_join_requests(page); 1118 if (IS_ERR_OR_NULL(req)) 1119 return req; 1120 1121 rqend = req->wb_offset + req->wb_bytes; 1122 /* 1123 * Tell the caller to flush out the request if 1124 * the offsets are non-contiguous. 1125 * Note: nfs_flush_incompatible() will already 1126 * have flushed out requests having wrong owners. 1127 */ 1128 if (offset > rqend || end < req->wb_offset) 1129 goto out_flushme; 1130 1131 /* Okay, the request matches. Update the region */ 1132 if (offset < req->wb_offset) { 1133 req->wb_offset = offset; 1134 req->wb_pgbase = offset; 1135 } 1136 if (end > rqend) 1137 req->wb_bytes = end - req->wb_offset; 1138 else 1139 req->wb_bytes = rqend - req->wb_offset; 1140 req->wb_nio = 0; 1141 return req; 1142 out_flushme: 1143 /* 1144 * Note: we mark the request dirty here because 1145 * nfs_lock_and_join_requests() cannot preserve 1146 * commit flags, so we have to replay the write. 1147 */ 1148 nfs_mark_request_dirty(req); 1149 nfs_unlock_and_release_request(req); 1150 error = nfs_wb_page(inode, page); 1151 return (error < 0) ? ERR_PTR(error) : NULL; 1152 } 1153 1154 /* 1155 * Try to update an existing write request, or create one if there is none. 1156 * 1157 * Note: Should always be called with the Page Lock held to prevent races 1158 * if we have to add a new request. Also assumes that the caller has 1159 * already called nfs_flush_incompatible() if necessary. 1160 */ 1161 static struct nfs_page * nfs_setup_write_request(struct nfs_open_context* ctx, 1162 struct page *page, unsigned int offset, unsigned int bytes) 1163 { 1164 struct inode *inode = page_file_mapping(page)->host; 1165 struct nfs_page *req; 1166 1167 req = nfs_try_to_update_request(inode, page, offset, bytes); 1168 if (req != NULL) 1169 goto out; 1170 req = nfs_create_request(ctx, page, offset, bytes); 1171 if (IS_ERR(req)) 1172 goto out; 1173 nfs_inode_add_request(inode, req); 1174 out: 1175 return req; 1176 } 1177 1178 static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page, 1179 unsigned int offset, unsigned int count) 1180 { 1181 struct nfs_page *req; 1182 1183 req = nfs_setup_write_request(ctx, page, offset, count); 1184 if (IS_ERR(req)) 1185 return PTR_ERR(req); 1186 /* Update file length */ 1187 nfs_grow_file(page, offset, count); 1188 nfs_mark_uptodate(req); 1189 nfs_mark_request_dirty(req); 1190 nfs_unlock_and_release_request(req); 1191 return 0; 1192 } 1193 1194 int nfs_flush_incompatible(struct file *file, struct page *page) 1195 { 1196 struct nfs_open_context *ctx = nfs_file_open_context(file); 1197 struct nfs_lock_context *l_ctx; 1198 struct file_lock_context *flctx = file_inode(file)->i_flctx; 1199 struct nfs_page *req; 1200 int do_flush, status; 1201 /* 1202 * Look for a request corresponding to this page. If there 1203 * is one, and it belongs to another file, we flush it out 1204 * before we try to copy anything into the page. Do this 1205 * due to the lack of an ACCESS-type call in NFSv2. 1206 * Also do the same if we find a request from an existing 1207 * dropped page. 1208 */ 1209 do { 1210 req = nfs_page_find_head_request(page); 1211 if (req == NULL) 1212 return 0; 1213 l_ctx = req->wb_lock_context; 1214 do_flush = req->wb_page != page || 1215 !nfs_match_open_context(nfs_req_openctx(req), ctx); 1216 if (l_ctx && flctx && 1217 !(list_empty_careful(&flctx->flc_posix) && 1218 list_empty_careful(&flctx->flc_flock))) { 1219 do_flush |= l_ctx->lockowner != current->files; 1220 } 1221 nfs_release_request(req); 1222 if (!do_flush) 1223 return 0; 1224 status = nfs_wb_page(page_file_mapping(page)->host, page); 1225 } while (status == 0); 1226 return status; 1227 } 1228 1229 /* 1230 * Avoid buffered writes when a open context credential's key would 1231 * expire soon. 1232 * 1233 * Returns -EACCES if the key will expire within RPC_KEY_EXPIRE_FAIL. 1234 * 1235 * Return 0 and set a credential flag which triggers the inode to flush 1236 * and performs NFS_FILE_SYNC writes if the key will expired within 1237 * RPC_KEY_EXPIRE_TIMEO. 1238 */ 1239 int 1240 nfs_key_timeout_notify(struct file *filp, struct inode *inode) 1241 { 1242 struct nfs_open_context *ctx = nfs_file_open_context(filp); 1243 1244 if (nfs_ctx_key_to_expire(ctx, inode) && 1245 !rcu_access_pointer(ctx->ll_cred)) 1246 /* Already expired! */ 1247 return -EACCES; 1248 return 0; 1249 } 1250 1251 /* 1252 * Test if the open context credential key is marked to expire soon. 1253 */ 1254 bool nfs_ctx_key_to_expire(struct nfs_open_context *ctx, struct inode *inode) 1255 { 1256 struct rpc_auth *auth = NFS_SERVER(inode)->client->cl_auth; 1257 struct rpc_cred *cred, *new, *old = NULL; 1258 struct auth_cred acred = { 1259 .cred = ctx->cred, 1260 }; 1261 bool ret = false; 1262 1263 rcu_read_lock(); 1264 cred = rcu_dereference(ctx->ll_cred); 1265 if (cred && !(cred->cr_ops->crkey_timeout && 1266 cred->cr_ops->crkey_timeout(cred))) 1267 goto out; 1268 rcu_read_unlock(); 1269 1270 new = auth->au_ops->lookup_cred(auth, &acred, 0); 1271 if (new == cred) { 1272 put_rpccred(new); 1273 return true; 1274 } 1275 if (IS_ERR_OR_NULL(new)) { 1276 new = NULL; 1277 ret = true; 1278 } else if (new->cr_ops->crkey_timeout && 1279 new->cr_ops->crkey_timeout(new)) 1280 ret = true; 1281 1282 rcu_read_lock(); 1283 old = rcu_dereference_protected(xchg(&ctx->ll_cred, 1284 RCU_INITIALIZER(new)), 1); 1285 out: 1286 rcu_read_unlock(); 1287 put_rpccred(old); 1288 return ret; 1289 } 1290 1291 /* 1292 * If the page cache is marked as unsafe or invalid, then we can't rely on 1293 * the PageUptodate() flag. In this case, we will need to turn off 1294 * write optimisations that depend on the page contents being correct. 1295 */ 1296 static bool nfs_write_pageuptodate(struct page *page, struct inode *inode, 1297 unsigned int pagelen) 1298 { 1299 struct nfs_inode *nfsi = NFS_I(inode); 1300 1301 if (nfs_have_delegated_attributes(inode)) 1302 goto out; 1303 if (nfsi->cache_validity & 1304 (NFS_INO_INVALID_CHANGE | NFS_INO_INVALID_SIZE)) 1305 return false; 1306 smp_rmb(); 1307 if (test_bit(NFS_INO_INVALIDATING, &nfsi->flags) && pagelen != 0) 1308 return false; 1309 out: 1310 if (nfsi->cache_validity & NFS_INO_INVALID_DATA && pagelen != 0) 1311 return false; 1312 return PageUptodate(page) != 0; 1313 } 1314 1315 static bool 1316 is_whole_file_wrlock(struct file_lock *fl) 1317 { 1318 return fl->fl_start == 0 && fl->fl_end == OFFSET_MAX && 1319 fl->fl_type == F_WRLCK; 1320 } 1321 1322 /* If we know the page is up to date, and we're not using byte range locks (or 1323 * if we have the whole file locked for writing), it may be more efficient to 1324 * extend the write to cover the entire page in order to avoid fragmentation 1325 * inefficiencies. 1326 * 1327 * If the file is opened for synchronous writes then we can just skip the rest 1328 * of the checks. 1329 */ 1330 static int nfs_can_extend_write(struct file *file, struct page *page, 1331 struct inode *inode, unsigned int pagelen) 1332 { 1333 int ret; 1334 struct file_lock_context *flctx = inode->i_flctx; 1335 struct file_lock *fl; 1336 1337 if (file->f_flags & O_DSYNC) 1338 return 0; 1339 if (!nfs_write_pageuptodate(page, inode, pagelen)) 1340 return 0; 1341 if (NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE)) 1342 return 1; 1343 if (!flctx || (list_empty_careful(&flctx->flc_flock) && 1344 list_empty_careful(&flctx->flc_posix))) 1345 return 1; 1346 1347 /* Check to see if there are whole file write locks */ 1348 ret = 0; 1349 spin_lock(&flctx->flc_lock); 1350 if (!list_empty(&flctx->flc_posix)) { 1351 fl = list_first_entry(&flctx->flc_posix, struct file_lock, 1352 fl_list); 1353 if (is_whole_file_wrlock(fl)) 1354 ret = 1; 1355 } else if (!list_empty(&flctx->flc_flock)) { 1356 fl = list_first_entry(&flctx->flc_flock, struct file_lock, 1357 fl_list); 1358 if (fl->fl_type == F_WRLCK) 1359 ret = 1; 1360 } 1361 spin_unlock(&flctx->flc_lock); 1362 return ret; 1363 } 1364 1365 /* 1366 * Update and possibly write a cached page of an NFS file. 1367 * 1368 * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad 1369 * things with a page scheduled for an RPC call (e.g. invalidate it). 1370 */ 1371 int nfs_updatepage(struct file *file, struct page *page, 1372 unsigned int offset, unsigned int count) 1373 { 1374 struct nfs_open_context *ctx = nfs_file_open_context(file); 1375 struct address_space *mapping = page_file_mapping(page); 1376 struct inode *inode = mapping->host; 1377 unsigned int pagelen = nfs_page_length(page); 1378 int status = 0; 1379 1380 nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE); 1381 1382 dprintk("NFS: nfs_updatepage(%pD2 %d@%lld)\n", 1383 file, count, (long long)(page_file_offset(page) + offset)); 1384 1385 if (!count) 1386 goto out; 1387 1388 if (nfs_can_extend_write(file, page, inode, pagelen)) { 1389 count = max(count + offset, pagelen); 1390 offset = 0; 1391 } 1392 1393 status = nfs_writepage_setup(ctx, page, offset, count); 1394 if (status < 0) 1395 nfs_set_pageerror(mapping); 1396 out: 1397 dprintk("NFS: nfs_updatepage returns %d (isize %lld)\n", 1398 status, (long long)i_size_read(inode)); 1399 return status; 1400 } 1401 1402 static int flush_task_priority(int how) 1403 { 1404 switch (how & (FLUSH_HIGHPRI|FLUSH_LOWPRI)) { 1405 case FLUSH_HIGHPRI: 1406 return RPC_PRIORITY_HIGH; 1407 case FLUSH_LOWPRI: 1408 return RPC_PRIORITY_LOW; 1409 } 1410 return RPC_PRIORITY_NORMAL; 1411 } 1412 1413 static void nfs_initiate_write(struct nfs_pgio_header *hdr, 1414 struct rpc_message *msg, 1415 const struct nfs_rpc_ops *rpc_ops, 1416 struct rpc_task_setup *task_setup_data, int how) 1417 { 1418 int priority = flush_task_priority(how); 1419 1420 task_setup_data->priority = priority; 1421 rpc_ops->write_setup(hdr, msg, &task_setup_data->rpc_client); 1422 trace_nfs_initiate_write(hdr); 1423 } 1424 1425 /* If a nfs_flush_* function fails, it should remove reqs from @head and 1426 * call this on each, which will prepare them to be retried on next 1427 * writeback using standard nfs. 1428 */ 1429 static void nfs_redirty_request(struct nfs_page *req) 1430 { 1431 /* Bump the transmission count */ 1432 req->wb_nio++; 1433 nfs_mark_request_dirty(req); 1434 set_bit(NFS_CONTEXT_RESEND_WRITES, &nfs_req_openctx(req)->flags); 1435 nfs_end_page_writeback(req); 1436 nfs_release_request(req); 1437 } 1438 1439 static void nfs_async_write_error(struct list_head *head, int error) 1440 { 1441 struct nfs_page *req; 1442 1443 while (!list_empty(head)) { 1444 req = nfs_list_entry(head->next); 1445 nfs_list_remove_request(req); 1446 if (nfs_error_is_fatal(error)) 1447 nfs_write_error(req, error); 1448 else 1449 nfs_redirty_request(req); 1450 } 1451 } 1452 1453 static void nfs_async_write_reschedule_io(struct nfs_pgio_header *hdr) 1454 { 1455 nfs_async_write_error(&hdr->pages, 0); 1456 filemap_fdatawrite_range(hdr->inode->i_mapping, hdr->args.offset, 1457 hdr->args.offset + hdr->args.count - 1); 1458 } 1459 1460 static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops = { 1461 .init_hdr = nfs_async_write_init, 1462 .error_cleanup = nfs_async_write_error, 1463 .completion = nfs_write_completion, 1464 .reschedule_io = nfs_async_write_reschedule_io, 1465 }; 1466 1467 void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio, 1468 struct inode *inode, int ioflags, bool force_mds, 1469 const struct nfs_pgio_completion_ops *compl_ops) 1470 { 1471 struct nfs_server *server = NFS_SERVER(inode); 1472 const struct nfs_pageio_ops *pg_ops = &nfs_pgio_rw_ops; 1473 1474 #ifdef CONFIG_NFS_V4_1 1475 if (server->pnfs_curr_ld && !force_mds) 1476 pg_ops = server->pnfs_curr_ld->pg_write_ops; 1477 #endif 1478 nfs_pageio_init(pgio, inode, pg_ops, compl_ops, &nfs_rw_write_ops, 1479 server->wsize, ioflags); 1480 } 1481 EXPORT_SYMBOL_GPL(nfs_pageio_init_write); 1482 1483 void nfs_pageio_reset_write_mds(struct nfs_pageio_descriptor *pgio) 1484 { 1485 struct nfs_pgio_mirror *mirror; 1486 1487 if (pgio->pg_ops && pgio->pg_ops->pg_cleanup) 1488 pgio->pg_ops->pg_cleanup(pgio); 1489 1490 pgio->pg_ops = &nfs_pgio_rw_ops; 1491 1492 nfs_pageio_stop_mirroring(pgio); 1493 1494 mirror = &pgio->pg_mirrors[0]; 1495 mirror->pg_bsize = NFS_SERVER(pgio->pg_inode)->wsize; 1496 } 1497 EXPORT_SYMBOL_GPL(nfs_pageio_reset_write_mds); 1498 1499 1500 void nfs_commit_prepare(struct rpc_task *task, void *calldata) 1501 { 1502 struct nfs_commit_data *data = calldata; 1503 1504 NFS_PROTO(data->inode)->commit_rpc_prepare(task, data); 1505 } 1506 1507 /* 1508 * Special version of should_remove_suid() that ignores capabilities. 1509 */ 1510 static int nfs_should_remove_suid(const struct inode *inode) 1511 { 1512 umode_t mode = inode->i_mode; 1513 int kill = 0; 1514 1515 /* suid always must be killed */ 1516 if (unlikely(mode & S_ISUID)) 1517 kill = ATTR_KILL_SUID; 1518 1519 /* 1520 * sgid without any exec bits is just a mandatory locking mark; leave 1521 * it alone. If some exec bits are set, it's a real sgid; kill it. 1522 */ 1523 if (unlikely((mode & S_ISGID) && (mode & S_IXGRP))) 1524 kill |= ATTR_KILL_SGID; 1525 1526 if (unlikely(kill && S_ISREG(mode))) 1527 return kill; 1528 1529 return 0; 1530 } 1531 1532 static void nfs_writeback_check_extend(struct nfs_pgio_header *hdr, 1533 struct nfs_fattr *fattr) 1534 { 1535 struct nfs_pgio_args *argp = &hdr->args; 1536 struct nfs_pgio_res *resp = &hdr->res; 1537 u64 size = argp->offset + resp->count; 1538 1539 if (!(fattr->valid & NFS_ATTR_FATTR_SIZE)) 1540 fattr->size = size; 1541 if (nfs_size_to_loff_t(fattr->size) < i_size_read(hdr->inode)) { 1542 fattr->valid &= ~NFS_ATTR_FATTR_SIZE; 1543 return; 1544 } 1545 if (size != fattr->size) 1546 return; 1547 /* Set attribute barrier */ 1548 nfs_fattr_set_barrier(fattr); 1549 /* ...and update size */ 1550 fattr->valid |= NFS_ATTR_FATTR_SIZE; 1551 } 1552 1553 void nfs_writeback_update_inode(struct nfs_pgio_header *hdr) 1554 { 1555 struct nfs_fattr *fattr = &hdr->fattr; 1556 struct inode *inode = hdr->inode; 1557 1558 spin_lock(&inode->i_lock); 1559 nfs_writeback_check_extend(hdr, fattr); 1560 nfs_post_op_update_inode_force_wcc_locked(inode, fattr); 1561 spin_unlock(&inode->i_lock); 1562 } 1563 EXPORT_SYMBOL_GPL(nfs_writeback_update_inode); 1564 1565 /* 1566 * This function is called when the WRITE call is complete. 1567 */ 1568 static int nfs_writeback_done(struct rpc_task *task, 1569 struct nfs_pgio_header *hdr, 1570 struct inode *inode) 1571 { 1572 int status; 1573 1574 /* 1575 * ->write_done will attempt to use post-op attributes to detect 1576 * conflicting writes by other clients. A strict interpretation 1577 * of close-to-open would allow us to continue caching even if 1578 * another writer had changed the file, but some applications 1579 * depend on tighter cache coherency when writing. 1580 */ 1581 status = NFS_PROTO(inode)->write_done(task, hdr); 1582 if (status != 0) 1583 return status; 1584 1585 nfs_add_stats(inode, NFSIOS_SERVERWRITTENBYTES, hdr->res.count); 1586 trace_nfs_writeback_done(task, hdr); 1587 1588 if (hdr->res.verf->committed < hdr->args.stable && 1589 task->tk_status >= 0) { 1590 /* We tried a write call, but the server did not 1591 * commit data to stable storage even though we 1592 * requested it. 1593 * Note: There is a known bug in Tru64 < 5.0 in which 1594 * the server reports NFS_DATA_SYNC, but performs 1595 * NFS_FILE_SYNC. We therefore implement this checking 1596 * as a dprintk() in order to avoid filling syslog. 1597 */ 1598 static unsigned long complain; 1599 1600 /* Note this will print the MDS for a DS write */ 1601 if (time_before(complain, jiffies)) { 1602 dprintk("NFS: faulty NFS server %s:" 1603 " (committed = %d) != (stable = %d)\n", 1604 NFS_SERVER(inode)->nfs_client->cl_hostname, 1605 hdr->res.verf->committed, hdr->args.stable); 1606 complain = jiffies + 300 * HZ; 1607 } 1608 } 1609 1610 /* Deal with the suid/sgid bit corner case */ 1611 if (nfs_should_remove_suid(inode)) { 1612 spin_lock(&inode->i_lock); 1613 nfs_set_cache_invalid(inode, NFS_INO_INVALID_MODE); 1614 spin_unlock(&inode->i_lock); 1615 } 1616 return 0; 1617 } 1618 1619 /* 1620 * This function is called when the WRITE call is complete. 1621 */ 1622 static void nfs_writeback_result(struct rpc_task *task, 1623 struct nfs_pgio_header *hdr) 1624 { 1625 struct nfs_pgio_args *argp = &hdr->args; 1626 struct nfs_pgio_res *resp = &hdr->res; 1627 1628 if (resp->count < argp->count) { 1629 static unsigned long complain; 1630 1631 /* This a short write! */ 1632 nfs_inc_stats(hdr->inode, NFSIOS_SHORTWRITE); 1633 1634 /* Has the server at least made some progress? */ 1635 if (resp->count == 0) { 1636 if (time_before(complain, jiffies)) { 1637 printk(KERN_WARNING 1638 "NFS: Server wrote zero bytes, expected %u.\n", 1639 argp->count); 1640 complain = jiffies + 300 * HZ; 1641 } 1642 nfs_set_pgio_error(hdr, -EIO, argp->offset); 1643 task->tk_status = -EIO; 1644 return; 1645 } 1646 1647 /* For non rpc-based layout drivers, retry-through-MDS */ 1648 if (!task->tk_ops) { 1649 hdr->pnfs_error = -EAGAIN; 1650 return; 1651 } 1652 1653 /* Was this an NFSv2 write or an NFSv3 stable write? */ 1654 if (resp->verf->committed != NFS_UNSTABLE) { 1655 /* Resend from where the server left off */ 1656 hdr->mds_offset += resp->count; 1657 argp->offset += resp->count; 1658 argp->pgbase += resp->count; 1659 argp->count -= resp->count; 1660 } else { 1661 /* Resend as a stable write in order to avoid 1662 * headaches in the case of a server crash. 1663 */ 1664 argp->stable = NFS_FILE_SYNC; 1665 } 1666 resp->count = 0; 1667 resp->verf->committed = 0; 1668 rpc_restart_call_prepare(task); 1669 } 1670 } 1671 1672 static int wait_on_commit(struct nfs_mds_commit_info *cinfo) 1673 { 1674 return wait_var_event_killable(&cinfo->rpcs_out, 1675 !atomic_read(&cinfo->rpcs_out)); 1676 } 1677 1678 static void nfs_commit_begin(struct nfs_mds_commit_info *cinfo) 1679 { 1680 atomic_inc(&cinfo->rpcs_out); 1681 } 1682 1683 bool nfs_commit_end(struct nfs_mds_commit_info *cinfo) 1684 { 1685 if (atomic_dec_and_test(&cinfo->rpcs_out)) { 1686 wake_up_var(&cinfo->rpcs_out); 1687 return true; 1688 } 1689 return false; 1690 } 1691 1692 void nfs_commitdata_release(struct nfs_commit_data *data) 1693 { 1694 put_nfs_open_context(data->context); 1695 nfs_commit_free(data); 1696 } 1697 EXPORT_SYMBOL_GPL(nfs_commitdata_release); 1698 1699 int nfs_initiate_commit(struct rpc_clnt *clnt, struct nfs_commit_data *data, 1700 const struct nfs_rpc_ops *nfs_ops, 1701 const struct rpc_call_ops *call_ops, 1702 int how, int flags) 1703 { 1704 struct rpc_task *task; 1705 int priority = flush_task_priority(how); 1706 struct rpc_message msg = { 1707 .rpc_argp = &data->args, 1708 .rpc_resp = &data->res, 1709 .rpc_cred = data->cred, 1710 }; 1711 struct rpc_task_setup task_setup_data = { 1712 .task = &data->task, 1713 .rpc_client = clnt, 1714 .rpc_message = &msg, 1715 .callback_ops = call_ops, 1716 .callback_data = data, 1717 .workqueue = nfsiod_workqueue, 1718 .flags = RPC_TASK_ASYNC | flags, 1719 .priority = priority, 1720 }; 1721 /* Set up the initial task struct. */ 1722 nfs_ops->commit_setup(data, &msg, &task_setup_data.rpc_client); 1723 trace_nfs_initiate_commit(data); 1724 1725 dprintk("NFS: initiated commit call\n"); 1726 1727 task = rpc_run_task(&task_setup_data); 1728 if (IS_ERR(task)) 1729 return PTR_ERR(task); 1730 if (how & FLUSH_SYNC) 1731 rpc_wait_for_completion_task(task); 1732 rpc_put_task(task); 1733 return 0; 1734 } 1735 EXPORT_SYMBOL_GPL(nfs_initiate_commit); 1736 1737 static loff_t nfs_get_lwb(struct list_head *head) 1738 { 1739 loff_t lwb = 0; 1740 struct nfs_page *req; 1741 1742 list_for_each_entry(req, head, wb_list) 1743 if (lwb < (req_offset(req) + req->wb_bytes)) 1744 lwb = req_offset(req) + req->wb_bytes; 1745 1746 return lwb; 1747 } 1748 1749 /* 1750 * Set up the argument/result storage required for the RPC call. 1751 */ 1752 void nfs_init_commit(struct nfs_commit_data *data, 1753 struct list_head *head, 1754 struct pnfs_layout_segment *lseg, 1755 struct nfs_commit_info *cinfo) 1756 { 1757 struct nfs_page *first; 1758 struct nfs_open_context *ctx; 1759 struct inode *inode; 1760 1761 /* Set up the RPC argument and reply structs 1762 * NB: take care not to mess about with data->commit et al. */ 1763 1764 if (head) 1765 list_splice_init(head, &data->pages); 1766 1767 first = nfs_list_entry(data->pages.next); 1768 ctx = nfs_req_openctx(first); 1769 inode = d_inode(ctx->dentry); 1770 1771 data->inode = inode; 1772 data->cred = ctx->cred; 1773 data->lseg = lseg; /* reference transferred */ 1774 /* only set lwb for pnfs commit */ 1775 if (lseg) 1776 data->lwb = nfs_get_lwb(&data->pages); 1777 data->mds_ops = &nfs_commit_ops; 1778 data->completion_ops = cinfo->completion_ops; 1779 data->dreq = cinfo->dreq; 1780 1781 data->args.fh = NFS_FH(data->inode); 1782 /* Note: we always request a commit of the entire inode */ 1783 data->args.offset = 0; 1784 data->args.count = 0; 1785 data->context = get_nfs_open_context(ctx); 1786 data->res.fattr = &data->fattr; 1787 data->res.verf = &data->verf; 1788 nfs_fattr_init(&data->fattr); 1789 nfs_commit_begin(cinfo->mds); 1790 } 1791 EXPORT_SYMBOL_GPL(nfs_init_commit); 1792 1793 void nfs_retry_commit(struct list_head *page_list, 1794 struct pnfs_layout_segment *lseg, 1795 struct nfs_commit_info *cinfo, 1796 u32 ds_commit_idx) 1797 { 1798 struct nfs_page *req; 1799 1800 while (!list_empty(page_list)) { 1801 req = nfs_list_entry(page_list->next); 1802 nfs_list_remove_request(req); 1803 nfs_mark_request_commit(req, lseg, cinfo, ds_commit_idx); 1804 if (!cinfo->dreq) 1805 nfs_clear_page_commit(req->wb_page); 1806 nfs_unlock_and_release_request(req); 1807 } 1808 } 1809 EXPORT_SYMBOL_GPL(nfs_retry_commit); 1810 1811 static void 1812 nfs_commit_resched_write(struct nfs_commit_info *cinfo, 1813 struct nfs_page *req) 1814 { 1815 __set_page_dirty_nobuffers(req->wb_page); 1816 } 1817 1818 /* 1819 * Commit dirty pages 1820 */ 1821 static int 1822 nfs_commit_list(struct inode *inode, struct list_head *head, int how, 1823 struct nfs_commit_info *cinfo) 1824 { 1825 struct nfs_commit_data *data; 1826 unsigned short task_flags = 0; 1827 1828 /* another commit raced with us */ 1829 if (list_empty(head)) 1830 return 0; 1831 1832 data = nfs_commitdata_alloc(true); 1833 1834 /* Set up the argument struct */ 1835 nfs_init_commit(data, head, NULL, cinfo); 1836 if (NFS_SERVER(inode)->nfs_client->cl_minorversion) 1837 task_flags = RPC_TASK_MOVEABLE; 1838 return nfs_initiate_commit(NFS_CLIENT(inode), data, NFS_PROTO(inode), 1839 data->mds_ops, how, 1840 RPC_TASK_CRED_NOREF | task_flags); 1841 } 1842 1843 /* 1844 * COMMIT call returned 1845 */ 1846 static void nfs_commit_done(struct rpc_task *task, void *calldata) 1847 { 1848 struct nfs_commit_data *data = calldata; 1849 1850 /* Call the NFS version-specific code */ 1851 NFS_PROTO(data->inode)->commit_done(task, data); 1852 trace_nfs_commit_done(task, data); 1853 } 1854 1855 static void nfs_commit_release_pages(struct nfs_commit_data *data) 1856 { 1857 const struct nfs_writeverf *verf = data->res.verf; 1858 struct nfs_page *req; 1859 int status = data->task.tk_status; 1860 struct nfs_commit_info cinfo; 1861 struct nfs_server *nfss; 1862 1863 while (!list_empty(&data->pages)) { 1864 req = nfs_list_entry(data->pages.next); 1865 nfs_list_remove_request(req); 1866 if (req->wb_page) 1867 nfs_clear_page_commit(req->wb_page); 1868 1869 dprintk("NFS: commit (%s/%llu %d@%lld)", 1870 nfs_req_openctx(req)->dentry->d_sb->s_id, 1871 (unsigned long long)NFS_FILEID(d_inode(nfs_req_openctx(req)->dentry)), 1872 req->wb_bytes, 1873 (long long)req_offset(req)); 1874 if (status < 0) { 1875 if (req->wb_page) { 1876 trace_nfs_commit_error(req, status); 1877 nfs_mapping_set_error(req->wb_page, status); 1878 nfs_inode_remove_request(req); 1879 } 1880 dprintk_cont(", error = %d\n", status); 1881 goto next; 1882 } 1883 1884 /* Okay, COMMIT succeeded, apparently. Check the verifier 1885 * returned by the server against all stored verfs. */ 1886 if (nfs_write_match_verf(verf, req)) { 1887 /* We have a match */ 1888 if (req->wb_page) 1889 nfs_inode_remove_request(req); 1890 dprintk_cont(" OK\n"); 1891 goto next; 1892 } 1893 /* We have a mismatch. Write the page again */ 1894 dprintk_cont(" mismatch\n"); 1895 nfs_mark_request_dirty(req); 1896 set_bit(NFS_CONTEXT_RESEND_WRITES, &nfs_req_openctx(req)->flags); 1897 next: 1898 nfs_unlock_and_release_request(req); 1899 /* Latency breaker */ 1900 cond_resched(); 1901 } 1902 nfss = NFS_SERVER(data->inode); 1903 if (atomic_long_read(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH) 1904 nfss->write_congested = 0; 1905 1906 nfs_init_cinfo(&cinfo, data->inode, data->dreq); 1907 nfs_commit_end(cinfo.mds); 1908 } 1909 1910 static void nfs_commit_release(void *calldata) 1911 { 1912 struct nfs_commit_data *data = calldata; 1913 1914 data->completion_ops->completion(data); 1915 nfs_commitdata_release(calldata); 1916 } 1917 1918 static const struct rpc_call_ops nfs_commit_ops = { 1919 .rpc_call_prepare = nfs_commit_prepare, 1920 .rpc_call_done = nfs_commit_done, 1921 .rpc_release = nfs_commit_release, 1922 }; 1923 1924 static const struct nfs_commit_completion_ops nfs_commit_completion_ops = { 1925 .completion = nfs_commit_release_pages, 1926 .resched_write = nfs_commit_resched_write, 1927 }; 1928 1929 int nfs_generic_commit_list(struct inode *inode, struct list_head *head, 1930 int how, struct nfs_commit_info *cinfo) 1931 { 1932 int status; 1933 1934 status = pnfs_commit_list(inode, head, how, cinfo); 1935 if (status == PNFS_NOT_ATTEMPTED) 1936 status = nfs_commit_list(inode, head, how, cinfo); 1937 return status; 1938 } 1939 1940 static int __nfs_commit_inode(struct inode *inode, int how, 1941 struct writeback_control *wbc) 1942 { 1943 LIST_HEAD(head); 1944 struct nfs_commit_info cinfo; 1945 int may_wait = how & FLUSH_SYNC; 1946 int ret, nscan; 1947 1948 how &= ~FLUSH_SYNC; 1949 nfs_init_cinfo_from_inode(&cinfo, inode); 1950 nfs_commit_begin(cinfo.mds); 1951 for (;;) { 1952 ret = nscan = nfs_scan_commit(inode, &head, &cinfo); 1953 if (ret <= 0) 1954 break; 1955 ret = nfs_generic_commit_list(inode, &head, how, &cinfo); 1956 if (ret < 0) 1957 break; 1958 ret = 0; 1959 if (wbc && wbc->sync_mode == WB_SYNC_NONE) { 1960 if (nscan < wbc->nr_to_write) 1961 wbc->nr_to_write -= nscan; 1962 else 1963 wbc->nr_to_write = 0; 1964 } 1965 if (nscan < INT_MAX) 1966 break; 1967 cond_resched(); 1968 } 1969 nfs_commit_end(cinfo.mds); 1970 if (ret || !may_wait) 1971 return ret; 1972 return wait_on_commit(cinfo.mds); 1973 } 1974 1975 int nfs_commit_inode(struct inode *inode, int how) 1976 { 1977 return __nfs_commit_inode(inode, how, NULL); 1978 } 1979 EXPORT_SYMBOL_GPL(nfs_commit_inode); 1980 1981 int nfs_write_inode(struct inode *inode, struct writeback_control *wbc) 1982 { 1983 struct nfs_inode *nfsi = NFS_I(inode); 1984 int flags = FLUSH_SYNC; 1985 int ret = 0; 1986 1987 if (wbc->sync_mode == WB_SYNC_NONE) { 1988 /* no commits means nothing needs to be done */ 1989 if (!atomic_long_read(&nfsi->commit_info.ncommit)) 1990 goto check_requests_outstanding; 1991 1992 /* Don't commit yet if this is a non-blocking flush and there 1993 * are a lot of outstanding writes for this mapping. 1994 */ 1995 if (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK)) 1996 goto out_mark_dirty; 1997 1998 /* don't wait for the COMMIT response */ 1999 flags = 0; 2000 } 2001 2002 ret = __nfs_commit_inode(inode, flags, wbc); 2003 if (!ret) { 2004 if (flags & FLUSH_SYNC) 2005 return 0; 2006 } else if (atomic_long_read(&nfsi->commit_info.ncommit)) 2007 goto out_mark_dirty; 2008 2009 check_requests_outstanding: 2010 if (!atomic_read(&nfsi->commit_info.rpcs_out)) 2011 return ret; 2012 out_mark_dirty: 2013 __mark_inode_dirty(inode, I_DIRTY_DATASYNC); 2014 return ret; 2015 } 2016 EXPORT_SYMBOL_GPL(nfs_write_inode); 2017 2018 /* 2019 * Wrapper for filemap_write_and_wait_range() 2020 * 2021 * Needed for pNFS in order to ensure data becomes visible to the 2022 * client. 2023 */ 2024 int nfs_filemap_write_and_wait_range(struct address_space *mapping, 2025 loff_t lstart, loff_t lend) 2026 { 2027 int ret; 2028 2029 ret = filemap_write_and_wait_range(mapping, lstart, lend); 2030 if (ret == 0) 2031 ret = pnfs_sync_inode(mapping->host, true); 2032 return ret; 2033 } 2034 EXPORT_SYMBOL_GPL(nfs_filemap_write_and_wait_range); 2035 2036 /* 2037 * flush the inode to disk. 2038 */ 2039 int nfs_wb_all(struct inode *inode) 2040 { 2041 int ret; 2042 2043 trace_nfs_writeback_inode_enter(inode); 2044 2045 ret = filemap_write_and_wait(inode->i_mapping); 2046 if (ret) 2047 goto out; 2048 ret = nfs_commit_inode(inode, FLUSH_SYNC); 2049 if (ret < 0) 2050 goto out; 2051 pnfs_sync_inode(inode, true); 2052 ret = 0; 2053 2054 out: 2055 trace_nfs_writeback_inode_exit(inode, ret); 2056 return ret; 2057 } 2058 EXPORT_SYMBOL_GPL(nfs_wb_all); 2059 2060 int nfs_wb_folio_cancel(struct inode *inode, struct folio *folio) 2061 { 2062 struct nfs_page *req; 2063 int ret = 0; 2064 2065 folio_wait_writeback(folio); 2066 2067 /* blocking call to cancel all requests and join to a single (head) 2068 * request */ 2069 req = nfs_lock_and_join_requests(&folio->page); 2070 2071 if (IS_ERR(req)) { 2072 ret = PTR_ERR(req); 2073 } else if (req) { 2074 /* all requests from this folio have been cancelled by 2075 * nfs_lock_and_join_requests, so just remove the head 2076 * request from the inode / page_private pointer and 2077 * release it */ 2078 nfs_inode_remove_request(req); 2079 nfs_unlock_and_release_request(req); 2080 } 2081 2082 return ret; 2083 } 2084 2085 /* 2086 * Write back all requests on one page - we do this before reading it. 2087 */ 2088 int nfs_wb_page(struct inode *inode, struct page *page) 2089 { 2090 loff_t range_start = page_file_offset(page); 2091 loff_t range_end = range_start + (loff_t)(PAGE_SIZE - 1); 2092 struct writeback_control wbc = { 2093 .sync_mode = WB_SYNC_ALL, 2094 .nr_to_write = 0, 2095 .range_start = range_start, 2096 .range_end = range_end, 2097 }; 2098 int ret; 2099 2100 trace_nfs_writeback_page_enter(inode); 2101 2102 for (;;) { 2103 wait_on_page_writeback(page); 2104 if (clear_page_dirty_for_io(page)) { 2105 ret = nfs_writepage_locked(page, &wbc); 2106 if (ret < 0) 2107 goto out_error; 2108 continue; 2109 } 2110 ret = 0; 2111 if (!PagePrivate(page)) 2112 break; 2113 ret = nfs_commit_inode(inode, FLUSH_SYNC); 2114 if (ret < 0) 2115 goto out_error; 2116 } 2117 out_error: 2118 trace_nfs_writeback_page_exit(inode, ret); 2119 return ret; 2120 } 2121 2122 #ifdef CONFIG_MIGRATION 2123 int nfs_migrate_page(struct address_space *mapping, struct page *newpage, 2124 struct page *page, enum migrate_mode mode) 2125 { 2126 /* 2127 * If PagePrivate is set, then the page is currently associated with 2128 * an in-progress read or write request. Don't try to migrate it. 2129 * 2130 * FIXME: we could do this in principle, but we'll need a way to ensure 2131 * that we can safely release the inode reference while holding 2132 * the page lock. 2133 */ 2134 if (PagePrivate(page)) 2135 return -EBUSY; 2136 2137 if (PageFsCache(page)) { 2138 if (mode == MIGRATE_ASYNC) 2139 return -EBUSY; 2140 wait_on_page_fscache(page); 2141 } 2142 2143 return migrate_page(mapping, newpage, page, mode); 2144 } 2145 #endif 2146 2147 int __init nfs_init_writepagecache(void) 2148 { 2149 nfs_wdata_cachep = kmem_cache_create("nfs_write_data", 2150 sizeof(struct nfs_pgio_header), 2151 0, SLAB_HWCACHE_ALIGN, 2152 NULL); 2153 if (nfs_wdata_cachep == NULL) 2154 return -ENOMEM; 2155 2156 nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE, 2157 nfs_wdata_cachep); 2158 if (nfs_wdata_mempool == NULL) 2159 goto out_destroy_write_cache; 2160 2161 nfs_cdata_cachep = kmem_cache_create("nfs_commit_data", 2162 sizeof(struct nfs_commit_data), 2163 0, SLAB_HWCACHE_ALIGN, 2164 NULL); 2165 if (nfs_cdata_cachep == NULL) 2166 goto out_destroy_write_mempool; 2167 2168 nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT, 2169 nfs_cdata_cachep); 2170 if (nfs_commit_mempool == NULL) 2171 goto out_destroy_commit_cache; 2172 2173 /* 2174 * NFS congestion size, scale with available memory. 2175 * 2176 * 64MB: 8192k 2177 * 128MB: 11585k 2178 * 256MB: 16384k 2179 * 512MB: 23170k 2180 * 1GB: 32768k 2181 * 2GB: 46340k 2182 * 4GB: 65536k 2183 * 8GB: 92681k 2184 * 16GB: 131072k 2185 * 2186 * This allows larger machines to have larger/more transfers. 2187 * Limit the default to 256M 2188 */ 2189 nfs_congestion_kb = (16*int_sqrt(totalram_pages())) << (PAGE_SHIFT-10); 2190 if (nfs_congestion_kb > 256*1024) 2191 nfs_congestion_kb = 256*1024; 2192 2193 return 0; 2194 2195 out_destroy_commit_cache: 2196 kmem_cache_destroy(nfs_cdata_cachep); 2197 out_destroy_write_mempool: 2198 mempool_destroy(nfs_wdata_mempool); 2199 out_destroy_write_cache: 2200 kmem_cache_destroy(nfs_wdata_cachep); 2201 return -ENOMEM; 2202 } 2203 2204 void nfs_destroy_writepagecache(void) 2205 { 2206 mempool_destroy(nfs_commit_mempool); 2207 kmem_cache_destroy(nfs_cdata_cachep); 2208 mempool_destroy(nfs_wdata_mempool); 2209 kmem_cache_destroy(nfs_wdata_cachep); 2210 } 2211 2212 static const struct nfs_rw_ops nfs_rw_write_ops = { 2213 .rw_alloc_header = nfs_writehdr_alloc, 2214 .rw_free_header = nfs_writehdr_free, 2215 .rw_done = nfs_writeback_done, 2216 .rw_result = nfs_writeback_result, 2217 .rw_initiate = nfs_initiate_write, 2218 }; 2219