1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* handling of writes to regular files and writing back to the server 3 * 4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 5 * Written by David Howells (dhowells@redhat.com) 6 */ 7 8 #include <linux/backing-dev.h> 9 #include <linux/slab.h> 10 #include <linux/fs.h> 11 #include <linux/pagemap.h> 12 #include <linux/writeback.h> 13 #include <linux/pagevec.h> 14 #include "internal.h" 15 16 /* 17 * mark a page as having been made dirty and thus needing writeback 18 */ 19 int afs_set_page_dirty(struct page *page) 20 { 21 _enter(""); 22 return __set_page_dirty_nobuffers(page); 23 } 24 25 /* 26 * partly or wholly fill a page that's under preparation for writing 27 */ 28 static int afs_fill_page(struct afs_vnode *vnode, struct key *key, 29 loff_t pos, unsigned int len, struct page *page) 30 { 31 struct afs_read *req; 32 size_t p; 33 void *data; 34 int ret; 35 36 _enter(",,%llu", (unsigned long long)pos); 37 38 if (pos >= vnode->vfs_inode.i_size) { 39 p = pos & ~PAGE_MASK; 40 ASSERTCMP(p + len, <=, PAGE_SIZE); 41 data = kmap(page); 42 memset(data + p, 0, len); 43 kunmap(page); 44 return 0; 45 } 46 47 req = kzalloc(sizeof(struct afs_read) + sizeof(struct page *), 48 GFP_KERNEL); 49 if (!req) 50 return -ENOMEM; 51 52 refcount_set(&req->usage, 1); 53 req->pos = pos; 54 req->len = len; 55 req->nr_pages = 1; 56 req->pages = req->array; 57 req->pages[0] = page; 58 get_page(page); 59 60 ret = afs_fetch_data(vnode, key, req); 61 afs_put_read(req); 62 if (ret < 0) { 63 if (ret == -ENOENT) { 64 _debug("got NOENT from server" 65 " - marking file deleted and stale"); 66 set_bit(AFS_VNODE_DELETED, &vnode->flags); 67 ret = -ESTALE; 68 } 69 } 70 71 _leave(" = %d", ret); 72 return ret; 73 } 74 75 /* 76 * prepare to perform part of a write to a page 77 */ 78 int afs_write_begin(struct file *file, struct address_space *mapping, 79 loff_t pos, unsigned len, unsigned flags, 80 struct page **pagep, void **fsdata) 81 { 82 struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); 83 struct page *page; 84 struct key *key = afs_file_key(file); 85 unsigned long priv; 86 unsigned f, from = pos & (PAGE_SIZE - 1); 87 unsigned t, to = from + len; 88 pgoff_t index = pos >> PAGE_SHIFT; 89 int ret; 90 91 _enter("{%llx:%llu},{%lx},%u,%u", 92 vnode->fid.vid, vnode->fid.vnode, index, from, to); 93 94 /* We want to store information about how much of a page is altered in 95 * page->private. 96 */ 97 BUILD_BUG_ON(PAGE_SIZE > 32768 && sizeof(page->private) < 8); 98 99 page = grab_cache_page_write_begin(mapping, index, flags); 100 if (!page) 101 return -ENOMEM; 102 103 if (!PageUptodate(page) && len != PAGE_SIZE) { 104 ret = afs_fill_page(vnode, key, pos & PAGE_MASK, PAGE_SIZE, page); 105 if (ret < 0) { 106 unlock_page(page); 107 put_page(page); 108 _leave(" = %d [prep]", ret); 109 return ret; 110 } 111 SetPageUptodate(page); 112 } 113 114 /* page won't leak in error case: it eventually gets cleaned off LRU */ 115 *pagep = page; 116 117 try_again: 118 /* See if this page is already partially written in a way that we can 119 * merge the new write with. 120 */ 121 t = f = 0; 122 if (PagePrivate(page)) { 123 priv = page_private(page); 124 f = priv & AFS_PRIV_MAX; 125 t = priv >> AFS_PRIV_SHIFT; 126 ASSERTCMP(f, <=, t); 127 } 128 129 if (f != t) { 130 if (PageWriteback(page)) { 131 trace_afs_page_dirty(vnode, tracepoint_string("alrdy"), 132 page->index, priv); 133 goto flush_conflicting_write; 134 } 135 /* If the file is being filled locally, allow inter-write 136 * spaces to be merged into writes. If it's not, only write 137 * back what the user gives us. 138 */ 139 if (!test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags) && 140 (to < f || from > t)) 141 goto flush_conflicting_write; 142 if (from < f) 143 f = from; 144 if (to > t) 145 t = to; 146 } else { 147 f = from; 148 t = to; 149 } 150 151 priv = (unsigned long)t << AFS_PRIV_SHIFT; 152 priv |= f; 153 trace_afs_page_dirty(vnode, tracepoint_string("begin"), 154 page->index, priv); 155 SetPagePrivate(page); 156 set_page_private(page, priv); 157 _leave(" = 0"); 158 return 0; 159 160 /* The previous write and this write aren't adjacent or overlapping, so 161 * flush the page out. 162 */ 163 flush_conflicting_write: 164 _debug("flush conflict"); 165 ret = write_one_page(page); 166 if (ret < 0) { 167 _leave(" = %d", ret); 168 return ret; 169 } 170 171 ret = lock_page_killable(page); 172 if (ret < 0) { 173 _leave(" = %d", ret); 174 return ret; 175 } 176 goto try_again; 177 } 178 179 /* 180 * finalise part of a write to a page 181 */ 182 int afs_write_end(struct file *file, struct address_space *mapping, 183 loff_t pos, unsigned len, unsigned copied, 184 struct page *page, void *fsdata) 185 { 186 struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); 187 struct key *key = afs_file_key(file); 188 loff_t i_size, maybe_i_size; 189 int ret; 190 191 _enter("{%llx:%llu},{%lx}", 192 vnode->fid.vid, vnode->fid.vnode, page->index); 193 194 maybe_i_size = pos + copied; 195 196 i_size = i_size_read(&vnode->vfs_inode); 197 if (maybe_i_size > i_size) { 198 spin_lock(&vnode->wb_lock); 199 i_size = i_size_read(&vnode->vfs_inode); 200 if (maybe_i_size > i_size) 201 i_size_write(&vnode->vfs_inode, maybe_i_size); 202 spin_unlock(&vnode->wb_lock); 203 } 204 205 if (!PageUptodate(page)) { 206 if (copied < len) { 207 /* Try and load any missing data from the server. The 208 * unmarshalling routine will take care of clearing any 209 * bits that are beyond the EOF. 210 */ 211 ret = afs_fill_page(vnode, key, pos + copied, 212 len - copied, page); 213 if (ret < 0) 214 goto out; 215 } 216 SetPageUptodate(page); 217 } 218 219 set_page_dirty(page); 220 if (PageDirty(page)) 221 _debug("dirtied"); 222 ret = copied; 223 224 out: 225 unlock_page(page); 226 put_page(page); 227 return ret; 228 } 229 230 /* 231 * kill all the pages in the given range 232 */ 233 static void afs_kill_pages(struct address_space *mapping, 234 pgoff_t first, pgoff_t last) 235 { 236 struct afs_vnode *vnode = AFS_FS_I(mapping->host); 237 struct pagevec pv; 238 unsigned count, loop; 239 240 _enter("{%llx:%llu},%lx-%lx", 241 vnode->fid.vid, vnode->fid.vnode, first, last); 242 243 pagevec_init(&pv); 244 245 do { 246 _debug("kill %lx-%lx", first, last); 247 248 count = last - first + 1; 249 if (count > PAGEVEC_SIZE) 250 count = PAGEVEC_SIZE; 251 pv.nr = find_get_pages_contig(mapping, first, count, pv.pages); 252 ASSERTCMP(pv.nr, ==, count); 253 254 for (loop = 0; loop < count; loop++) { 255 struct page *page = pv.pages[loop]; 256 ClearPageUptodate(page); 257 SetPageError(page); 258 end_page_writeback(page); 259 if (page->index >= first) 260 first = page->index + 1; 261 lock_page(page); 262 generic_error_remove_page(mapping, page); 263 unlock_page(page); 264 } 265 266 __pagevec_release(&pv); 267 } while (first <= last); 268 269 _leave(""); 270 } 271 272 /* 273 * Redirty all the pages in a given range. 274 */ 275 static void afs_redirty_pages(struct writeback_control *wbc, 276 struct address_space *mapping, 277 pgoff_t first, pgoff_t last) 278 { 279 struct afs_vnode *vnode = AFS_FS_I(mapping->host); 280 struct pagevec pv; 281 unsigned count, loop; 282 283 _enter("{%llx:%llu},%lx-%lx", 284 vnode->fid.vid, vnode->fid.vnode, first, last); 285 286 pagevec_init(&pv); 287 288 do { 289 _debug("redirty %lx-%lx", first, last); 290 291 count = last - first + 1; 292 if (count > PAGEVEC_SIZE) 293 count = PAGEVEC_SIZE; 294 pv.nr = find_get_pages_contig(mapping, first, count, pv.pages); 295 ASSERTCMP(pv.nr, ==, count); 296 297 for (loop = 0; loop < count; loop++) { 298 struct page *page = pv.pages[loop]; 299 300 redirty_page_for_writepage(wbc, page); 301 end_page_writeback(page); 302 if (page->index >= first) 303 first = page->index + 1; 304 } 305 306 __pagevec_release(&pv); 307 } while (first <= last); 308 309 _leave(""); 310 } 311 312 /* 313 * completion of write to server 314 */ 315 static void afs_pages_written_back(struct afs_vnode *vnode, 316 pgoff_t first, pgoff_t last) 317 { 318 struct pagevec pv; 319 unsigned long priv; 320 unsigned count, loop; 321 322 _enter("{%llx:%llu},{%lx-%lx}", 323 vnode->fid.vid, vnode->fid.vnode, first, last); 324 325 pagevec_init(&pv); 326 327 do { 328 _debug("done %lx-%lx", first, last); 329 330 count = last - first + 1; 331 if (count > PAGEVEC_SIZE) 332 count = PAGEVEC_SIZE; 333 pv.nr = find_get_pages_contig(vnode->vfs_inode.i_mapping, 334 first, count, pv.pages); 335 ASSERTCMP(pv.nr, ==, count); 336 337 for (loop = 0; loop < count; loop++) { 338 priv = page_private(pv.pages[loop]); 339 trace_afs_page_dirty(vnode, tracepoint_string("clear"), 340 pv.pages[loop]->index, priv); 341 set_page_private(pv.pages[loop], 0); 342 end_page_writeback(pv.pages[loop]); 343 } 344 first += count; 345 __pagevec_release(&pv); 346 } while (first <= last); 347 348 afs_prune_wb_keys(vnode); 349 _leave(""); 350 } 351 352 /* 353 * write to a file 354 */ 355 static int afs_store_data(struct address_space *mapping, 356 pgoff_t first, pgoff_t last, 357 unsigned offset, unsigned to) 358 { 359 struct afs_vnode *vnode = AFS_FS_I(mapping->host); 360 struct afs_fs_cursor fc; 361 struct afs_status_cb *scb; 362 struct afs_wb_key *wbk = NULL; 363 struct list_head *p; 364 int ret = -ENOKEY, ret2; 365 366 _enter("%s{%llx:%llu.%u},%lx,%lx,%x,%x", 367 vnode->volume->name, 368 vnode->fid.vid, 369 vnode->fid.vnode, 370 vnode->fid.unique, 371 first, last, offset, to); 372 373 scb = kzalloc(sizeof(struct afs_status_cb), GFP_NOFS); 374 if (!scb) 375 return -ENOMEM; 376 377 spin_lock(&vnode->wb_lock); 378 p = vnode->wb_keys.next; 379 380 /* Iterate through the list looking for a valid key to use. */ 381 try_next_key: 382 while (p != &vnode->wb_keys) { 383 wbk = list_entry(p, struct afs_wb_key, vnode_link); 384 _debug("wbk %u", key_serial(wbk->key)); 385 ret2 = key_validate(wbk->key); 386 if (ret2 == 0) 387 goto found_key; 388 if (ret == -ENOKEY) 389 ret = ret2; 390 p = p->next; 391 } 392 393 spin_unlock(&vnode->wb_lock); 394 afs_put_wb_key(wbk); 395 kfree(scb); 396 _leave(" = %d [no keys]", ret); 397 return ret; 398 399 found_key: 400 refcount_inc(&wbk->usage); 401 spin_unlock(&vnode->wb_lock); 402 403 _debug("USE WB KEY %u", key_serial(wbk->key)); 404 405 ret = -ERESTARTSYS; 406 if (afs_begin_vnode_operation(&fc, vnode, wbk->key, false)) { 407 afs_dataversion_t data_version = vnode->status.data_version + 1; 408 409 while (afs_select_fileserver(&fc)) { 410 fc.cb_break = afs_calc_vnode_cb_break(vnode); 411 afs_fs_store_data(&fc, mapping, first, last, offset, to, scb); 412 } 413 414 afs_check_for_remote_deletion(&fc, vnode); 415 afs_vnode_commit_status(&fc, vnode, fc.cb_break, 416 &data_version, scb); 417 if (fc.ac.error == 0) 418 afs_pages_written_back(vnode, first, last); 419 ret = afs_end_vnode_operation(&fc); 420 } 421 422 switch (ret) { 423 case 0: 424 afs_stat_v(vnode, n_stores); 425 atomic_long_add((last * PAGE_SIZE + to) - 426 (first * PAGE_SIZE + offset), 427 &afs_v2net(vnode)->n_store_bytes); 428 break; 429 case -EACCES: 430 case -EPERM: 431 case -ENOKEY: 432 case -EKEYEXPIRED: 433 case -EKEYREJECTED: 434 case -EKEYREVOKED: 435 _debug("next"); 436 spin_lock(&vnode->wb_lock); 437 p = wbk->vnode_link.next; 438 afs_put_wb_key(wbk); 439 goto try_next_key; 440 } 441 442 afs_put_wb_key(wbk); 443 kfree(scb); 444 _leave(" = %d", ret); 445 return ret; 446 } 447 448 /* 449 * Synchronously write back the locked page and any subsequent non-locked dirty 450 * pages. 451 */ 452 static int afs_write_back_from_locked_page(struct address_space *mapping, 453 struct writeback_control *wbc, 454 struct page *primary_page, 455 pgoff_t final_page) 456 { 457 struct afs_vnode *vnode = AFS_FS_I(mapping->host); 458 struct page *pages[8], *page; 459 unsigned long count, priv; 460 unsigned n, offset, to, f, t; 461 pgoff_t start, first, last; 462 int loop, ret; 463 464 _enter(",%lx", primary_page->index); 465 466 count = 1; 467 if (test_set_page_writeback(primary_page)) 468 BUG(); 469 470 /* Find all consecutive lockable dirty pages that have contiguous 471 * written regions, stopping when we find a page that is not 472 * immediately lockable, is not dirty or is missing, or we reach the 473 * end of the range. 474 */ 475 start = primary_page->index; 476 priv = page_private(primary_page); 477 offset = priv & AFS_PRIV_MAX; 478 to = priv >> AFS_PRIV_SHIFT; 479 trace_afs_page_dirty(vnode, tracepoint_string("store"), 480 primary_page->index, priv); 481 482 WARN_ON(offset == to); 483 if (offset == to) 484 trace_afs_page_dirty(vnode, tracepoint_string("WARN"), 485 primary_page->index, priv); 486 487 if (start >= final_page || 488 (to < PAGE_SIZE && !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags))) 489 goto no_more; 490 491 start++; 492 do { 493 _debug("more %lx [%lx]", start, count); 494 n = final_page - start + 1; 495 if (n > ARRAY_SIZE(pages)) 496 n = ARRAY_SIZE(pages); 497 n = find_get_pages_contig(mapping, start, ARRAY_SIZE(pages), pages); 498 _debug("fgpc %u", n); 499 if (n == 0) 500 goto no_more; 501 if (pages[0]->index != start) { 502 do { 503 put_page(pages[--n]); 504 } while (n > 0); 505 goto no_more; 506 } 507 508 for (loop = 0; loop < n; loop++) { 509 page = pages[loop]; 510 if (to != PAGE_SIZE && 511 !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags)) 512 break; 513 if (page->index > final_page) 514 break; 515 if (!trylock_page(page)) 516 break; 517 if (!PageDirty(page) || PageWriteback(page)) { 518 unlock_page(page); 519 break; 520 } 521 522 priv = page_private(page); 523 f = priv & AFS_PRIV_MAX; 524 t = priv >> AFS_PRIV_SHIFT; 525 if (f != 0 && 526 !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags)) { 527 unlock_page(page); 528 break; 529 } 530 to = t; 531 532 trace_afs_page_dirty(vnode, tracepoint_string("store+"), 533 page->index, priv); 534 535 if (!clear_page_dirty_for_io(page)) 536 BUG(); 537 if (test_set_page_writeback(page)) 538 BUG(); 539 unlock_page(page); 540 put_page(page); 541 } 542 count += loop; 543 if (loop < n) { 544 for (; loop < n; loop++) 545 put_page(pages[loop]); 546 goto no_more; 547 } 548 549 start += loop; 550 } while (start <= final_page && count < 65536); 551 552 no_more: 553 /* We now have a contiguous set of dirty pages, each with writeback 554 * set; the first page is still locked at this point, but all the rest 555 * have been unlocked. 556 */ 557 unlock_page(primary_page); 558 559 first = primary_page->index; 560 last = first + count - 1; 561 562 _debug("write back %lx[%u..] to %lx[..%u]", first, offset, last, to); 563 564 ret = afs_store_data(mapping, first, last, offset, to); 565 switch (ret) { 566 case 0: 567 ret = count; 568 break; 569 570 default: 571 pr_notice("kAFS: Unexpected error from FS.StoreData %d\n", ret); 572 /* Fall through */ 573 case -EACCES: 574 case -EPERM: 575 case -ENOKEY: 576 case -EKEYEXPIRED: 577 case -EKEYREJECTED: 578 case -EKEYREVOKED: 579 afs_redirty_pages(wbc, mapping, first, last); 580 mapping_set_error(mapping, ret); 581 break; 582 583 case -EDQUOT: 584 case -ENOSPC: 585 afs_redirty_pages(wbc, mapping, first, last); 586 mapping_set_error(mapping, -ENOSPC); 587 break; 588 589 case -EROFS: 590 case -EIO: 591 case -EREMOTEIO: 592 case -EFBIG: 593 case -ENOENT: 594 case -ENOMEDIUM: 595 case -ENXIO: 596 trace_afs_file_error(vnode, ret, afs_file_error_writeback_fail); 597 afs_kill_pages(mapping, first, last); 598 mapping_set_error(mapping, ret); 599 break; 600 } 601 602 _leave(" = %d", ret); 603 return ret; 604 } 605 606 /* 607 * write a page back to the server 608 * - the caller locked the page for us 609 */ 610 int afs_writepage(struct page *page, struct writeback_control *wbc) 611 { 612 int ret; 613 614 _enter("{%lx},", page->index); 615 616 ret = afs_write_back_from_locked_page(page->mapping, wbc, page, 617 wbc->range_end >> PAGE_SHIFT); 618 if (ret < 0) { 619 _leave(" = %d", ret); 620 return 0; 621 } 622 623 wbc->nr_to_write -= ret; 624 625 _leave(" = 0"); 626 return 0; 627 } 628 629 /* 630 * write a region of pages back to the server 631 */ 632 static int afs_writepages_region(struct address_space *mapping, 633 struct writeback_control *wbc, 634 pgoff_t index, pgoff_t end, pgoff_t *_next) 635 { 636 struct page *page; 637 int ret, n; 638 639 _enter(",,%lx,%lx,", index, end); 640 641 do { 642 n = find_get_pages_range_tag(mapping, &index, end, 643 PAGECACHE_TAG_DIRTY, 1, &page); 644 if (!n) 645 break; 646 647 _debug("wback %lx", page->index); 648 649 /* 650 * at this point we hold neither the i_pages lock nor the 651 * page lock: the page may be truncated or invalidated 652 * (changing page->mapping to NULL), or even swizzled 653 * back from swapper_space to tmpfs file mapping 654 */ 655 ret = lock_page_killable(page); 656 if (ret < 0) { 657 put_page(page); 658 _leave(" = %d", ret); 659 return ret; 660 } 661 662 if (page->mapping != mapping || !PageDirty(page)) { 663 unlock_page(page); 664 put_page(page); 665 continue; 666 } 667 668 if (PageWriteback(page)) { 669 unlock_page(page); 670 if (wbc->sync_mode != WB_SYNC_NONE) 671 wait_on_page_writeback(page); 672 put_page(page); 673 continue; 674 } 675 676 if (!clear_page_dirty_for_io(page)) 677 BUG(); 678 ret = afs_write_back_from_locked_page(mapping, wbc, page, end); 679 put_page(page); 680 if (ret < 0) { 681 _leave(" = %d", ret); 682 return ret; 683 } 684 685 wbc->nr_to_write -= ret; 686 687 cond_resched(); 688 } while (index < end && wbc->nr_to_write > 0); 689 690 *_next = index; 691 _leave(" = 0 [%lx]", *_next); 692 return 0; 693 } 694 695 /* 696 * write some of the pending data back to the server 697 */ 698 int afs_writepages(struct address_space *mapping, 699 struct writeback_control *wbc) 700 { 701 pgoff_t start, end, next; 702 int ret; 703 704 _enter(""); 705 706 if (wbc->range_cyclic) { 707 start = mapping->writeback_index; 708 end = -1; 709 ret = afs_writepages_region(mapping, wbc, start, end, &next); 710 if (start > 0 && wbc->nr_to_write > 0 && ret == 0) 711 ret = afs_writepages_region(mapping, wbc, 0, start, 712 &next); 713 mapping->writeback_index = next; 714 } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) { 715 end = (pgoff_t)(LLONG_MAX >> PAGE_SHIFT); 716 ret = afs_writepages_region(mapping, wbc, 0, end, &next); 717 if (wbc->nr_to_write > 0) 718 mapping->writeback_index = next; 719 } else { 720 start = wbc->range_start >> PAGE_SHIFT; 721 end = wbc->range_end >> PAGE_SHIFT; 722 ret = afs_writepages_region(mapping, wbc, start, end, &next); 723 } 724 725 _leave(" = %d", ret); 726 return ret; 727 } 728 729 /* 730 * write to an AFS file 731 */ 732 ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from) 733 { 734 struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp)); 735 ssize_t result; 736 size_t count = iov_iter_count(from); 737 738 _enter("{%llx:%llu},{%zu},", 739 vnode->fid.vid, vnode->fid.vnode, count); 740 741 if (IS_SWAPFILE(&vnode->vfs_inode)) { 742 printk(KERN_INFO 743 "AFS: Attempt to write to active swap file!\n"); 744 return -EBUSY; 745 } 746 747 if (!count) 748 return 0; 749 750 result = generic_file_write_iter(iocb, from); 751 752 _leave(" = %zd", result); 753 return result; 754 } 755 756 /* 757 * flush any dirty pages for this process, and check for write errors. 758 * - the return status from this call provides a reliable indication of 759 * whether any write errors occurred for this process. 760 */ 761 int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync) 762 { 763 struct inode *inode = file_inode(file); 764 struct afs_vnode *vnode = AFS_FS_I(inode); 765 766 _enter("{%llx:%llu},{n=%pD},%d", 767 vnode->fid.vid, vnode->fid.vnode, file, 768 datasync); 769 770 return file_write_and_wait_range(file, start, end); 771 } 772 773 /* 774 * notification that a previously read-only page is about to become writable 775 * - if it returns an error, the caller will deliver a bus error signal 776 */ 777 vm_fault_t afs_page_mkwrite(struct vm_fault *vmf) 778 { 779 struct file *file = vmf->vma->vm_file; 780 struct inode *inode = file_inode(file); 781 struct afs_vnode *vnode = AFS_FS_I(inode); 782 unsigned long priv; 783 784 _enter("{{%llx:%llu}},{%lx}", 785 vnode->fid.vid, vnode->fid.vnode, vmf->page->index); 786 787 sb_start_pagefault(inode->i_sb); 788 789 /* Wait for the page to be written to the cache before we allow it to 790 * be modified. We then assume the entire page will need writing back. 791 */ 792 #ifdef CONFIG_AFS_FSCACHE 793 fscache_wait_on_page_write(vnode->cache, vmf->page); 794 #endif 795 796 if (PageWriteback(vmf->page) && 797 wait_on_page_bit_killable(vmf->page, PG_writeback) < 0) 798 return VM_FAULT_RETRY; 799 800 if (lock_page_killable(vmf->page) < 0) 801 return VM_FAULT_RETRY; 802 803 /* We mustn't change page->private until writeback is complete as that 804 * details the portion of the page we need to write back and we might 805 * need to redirty the page if there's a problem. 806 */ 807 wait_on_page_writeback(vmf->page); 808 809 priv = (unsigned long)PAGE_SIZE << AFS_PRIV_SHIFT; /* To */ 810 priv |= 0; /* From */ 811 trace_afs_page_dirty(vnode, tracepoint_string("mkwrite"), 812 vmf->page->index, priv); 813 SetPagePrivate(vmf->page); 814 set_page_private(vmf->page, priv); 815 816 sb_end_pagefault(inode->i_sb); 817 return VM_FAULT_LOCKED; 818 } 819 820 /* 821 * Prune the keys cached for writeback. The caller must hold vnode->wb_lock. 822 */ 823 void afs_prune_wb_keys(struct afs_vnode *vnode) 824 { 825 LIST_HEAD(graveyard); 826 struct afs_wb_key *wbk, *tmp; 827 828 /* Discard unused keys */ 829 spin_lock(&vnode->wb_lock); 830 831 if (!mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_WRITEBACK) && 832 !mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_DIRTY)) { 833 list_for_each_entry_safe(wbk, tmp, &vnode->wb_keys, vnode_link) { 834 if (refcount_read(&wbk->usage) == 1) 835 list_move(&wbk->vnode_link, &graveyard); 836 } 837 } 838 839 spin_unlock(&vnode->wb_lock); 840 841 while (!list_empty(&graveyard)) { 842 wbk = list_entry(graveyard.next, struct afs_wb_key, vnode_link); 843 list_del(&wbk->vnode_link); 844 afs_put_wb_key(wbk); 845 } 846 } 847 848 /* 849 * Clean up a page during invalidation. 850 */ 851 int afs_launder_page(struct page *page) 852 { 853 struct address_space *mapping = page->mapping; 854 struct afs_vnode *vnode = AFS_FS_I(mapping->host); 855 unsigned long priv; 856 unsigned int f, t; 857 int ret = 0; 858 859 _enter("{%lx}", page->index); 860 861 priv = page_private(page); 862 if (clear_page_dirty_for_io(page)) { 863 f = 0; 864 t = PAGE_SIZE; 865 if (PagePrivate(page)) { 866 f = priv & AFS_PRIV_MAX; 867 t = priv >> AFS_PRIV_SHIFT; 868 } 869 870 trace_afs_page_dirty(vnode, tracepoint_string("launder"), 871 page->index, priv); 872 ret = afs_store_data(mapping, page->index, page->index, t, f); 873 } 874 875 trace_afs_page_dirty(vnode, tracepoint_string("laundered"), 876 page->index, priv); 877 set_page_private(page, 0); 878 ClearPagePrivate(page); 879 880 #ifdef CONFIG_AFS_FSCACHE 881 if (PageFsCache(page)) { 882 fscache_wait_on_page_write(vnode->cache, page); 883 fscache_uncache_page(vnode->cache, page); 884 } 885 #endif 886 return ret; 887 } 888