1 /* handling of writes to regular files and writing back to the server 2 * 3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 4 * Written by David Howells (dhowells@redhat.com) 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 */ 11 12 #include <linux/backing-dev.h> 13 #include <linux/slab.h> 14 #include <linux/fs.h> 15 #include <linux/pagemap.h> 16 #include <linux/writeback.h> 17 #include <linux/pagevec.h> 18 #include "internal.h" 19 20 /* 21 * mark a page as having been made dirty and thus needing writeback 22 */ 23 int afs_set_page_dirty(struct page *page) 24 { 25 _enter(""); 26 return __set_page_dirty_nobuffers(page); 27 } 28 29 /* 30 * partly or wholly fill a page that's under preparation for writing 31 */ 32 static int afs_fill_page(struct afs_vnode *vnode, struct key *key, 33 loff_t pos, unsigned int len, struct page *page) 34 { 35 struct afs_read *req; 36 int ret; 37 38 _enter(",,%llu", (unsigned long long)pos); 39 40 req = kzalloc(sizeof(struct afs_read) + sizeof(struct page *), 41 GFP_KERNEL); 42 if (!req) 43 return -ENOMEM; 44 45 refcount_set(&req->usage, 1); 46 req->pos = pos; 47 req->len = len; 48 req->nr_pages = 1; 49 req->pages = req->array; 50 req->pages[0] = page; 51 get_page(page); 52 53 ret = afs_fetch_data(vnode, key, req); 54 afs_put_read(req); 55 if (ret < 0) { 56 if (ret == -ENOENT) { 57 _debug("got NOENT from server" 58 " - marking file deleted and stale"); 59 set_bit(AFS_VNODE_DELETED, &vnode->flags); 60 ret = -ESTALE; 61 } 62 } 63 64 _leave(" = %d", ret); 65 return ret; 66 } 67 68 /* 69 * prepare to perform part of a write to a page 70 */ 71 int afs_write_begin(struct file *file, struct address_space *mapping, 72 loff_t pos, unsigned len, unsigned flags, 73 struct page **pagep, void **fsdata) 74 { 75 struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); 76 struct page *page; 77 struct key *key = afs_file_key(file); 78 unsigned long priv; 79 unsigned f, from = pos & (PAGE_SIZE - 1); 80 unsigned t, to = from + len; 81 pgoff_t index = pos >> PAGE_SHIFT; 82 int ret; 83 84 _enter("{%x:%u},{%lx},%u,%u", 85 vnode->fid.vid, vnode->fid.vnode, index, from, to); 86 87 /* We want to store information about how much of a page is altered in 88 * page->private. 89 */ 90 BUILD_BUG_ON(PAGE_SIZE > 32768 && sizeof(page->private) < 8); 91 92 page = grab_cache_page_write_begin(mapping, index, flags); 93 if (!page) 94 return -ENOMEM; 95 96 if (!PageUptodate(page) && len != PAGE_SIZE) { 97 ret = afs_fill_page(vnode, key, pos & PAGE_MASK, PAGE_SIZE, page); 98 if (ret < 0) { 99 unlock_page(page); 100 put_page(page); 101 _leave(" = %d [prep]", ret); 102 return ret; 103 } 104 SetPageUptodate(page); 105 } 106 107 /* page won't leak in error case: it eventually gets cleaned off LRU */ 108 *pagep = page; 109 110 try_again: 111 /* See if this page is already partially written in a way that we can 112 * merge the new write with. 113 */ 114 t = f = 0; 115 if (PagePrivate(page)) { 116 priv = page_private(page); 117 f = priv & AFS_PRIV_MAX; 118 t = priv >> AFS_PRIV_SHIFT; 119 ASSERTCMP(f, <=, t); 120 } 121 122 if (f != t) { 123 if (PageWriteback(page)) { 124 trace_afs_page_dirty(vnode, tracepoint_string("alrdy"), 125 page->index, priv); 126 goto flush_conflicting_write; 127 } 128 if (to < f || from > t) 129 goto flush_conflicting_write; 130 if (from < f) 131 f = from; 132 if (to > t) 133 t = to; 134 } else { 135 f = from; 136 t = to; 137 } 138 139 priv = (unsigned long)t << AFS_PRIV_SHIFT; 140 priv |= f; 141 trace_afs_page_dirty(vnode, tracepoint_string("begin"), 142 page->index, priv); 143 SetPagePrivate(page); 144 set_page_private(page, priv); 145 _leave(" = 0"); 146 return 0; 147 148 /* The previous write and this write aren't adjacent or overlapping, so 149 * flush the page out. 150 */ 151 flush_conflicting_write: 152 _debug("flush conflict"); 153 ret = write_one_page(page); 154 if (ret < 0) { 155 _leave(" = %d", ret); 156 return ret; 157 } 158 159 ret = lock_page_killable(page); 160 if (ret < 0) { 161 _leave(" = %d", ret); 162 return ret; 163 } 164 goto try_again; 165 } 166 167 /* 168 * finalise part of a write to a page 169 */ 170 int afs_write_end(struct file *file, struct address_space *mapping, 171 loff_t pos, unsigned len, unsigned copied, 172 struct page *page, void *fsdata) 173 { 174 struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); 175 struct key *key = afs_file_key(file); 176 loff_t i_size, maybe_i_size; 177 int ret; 178 179 _enter("{%x:%u},{%lx}", 180 vnode->fid.vid, vnode->fid.vnode, page->index); 181 182 maybe_i_size = pos + copied; 183 184 i_size = i_size_read(&vnode->vfs_inode); 185 if (maybe_i_size > i_size) { 186 spin_lock(&vnode->wb_lock); 187 i_size = i_size_read(&vnode->vfs_inode); 188 if (maybe_i_size > i_size) 189 i_size_write(&vnode->vfs_inode, maybe_i_size); 190 spin_unlock(&vnode->wb_lock); 191 } 192 193 if (!PageUptodate(page)) { 194 if (copied < len) { 195 /* Try and load any missing data from the server. The 196 * unmarshalling routine will take care of clearing any 197 * bits that are beyond the EOF. 198 */ 199 ret = afs_fill_page(vnode, key, pos + copied, 200 len - copied, page); 201 if (ret < 0) 202 goto out; 203 } 204 SetPageUptodate(page); 205 } 206 207 set_page_dirty(page); 208 if (PageDirty(page)) 209 _debug("dirtied"); 210 ret = copied; 211 212 out: 213 unlock_page(page); 214 put_page(page); 215 return ret; 216 } 217 218 /* 219 * kill all the pages in the given range 220 */ 221 static void afs_kill_pages(struct address_space *mapping, 222 pgoff_t first, pgoff_t last) 223 { 224 struct afs_vnode *vnode = AFS_FS_I(mapping->host); 225 struct pagevec pv; 226 unsigned count, loop; 227 228 _enter("{%x:%u},%lx-%lx", 229 vnode->fid.vid, vnode->fid.vnode, first, last); 230 231 pagevec_init(&pv); 232 233 do { 234 _debug("kill %lx-%lx", first, last); 235 236 count = last - first + 1; 237 if (count > PAGEVEC_SIZE) 238 count = PAGEVEC_SIZE; 239 pv.nr = find_get_pages_contig(mapping, first, count, pv.pages); 240 ASSERTCMP(pv.nr, ==, count); 241 242 for (loop = 0; loop < count; loop++) { 243 struct page *page = pv.pages[loop]; 244 ClearPageUptodate(page); 245 SetPageError(page); 246 end_page_writeback(page); 247 if (page->index >= first) 248 first = page->index + 1; 249 lock_page(page); 250 generic_error_remove_page(mapping, page); 251 } 252 253 __pagevec_release(&pv); 254 } while (first <= last); 255 256 _leave(""); 257 } 258 259 /* 260 * Redirty all the pages in a given range. 261 */ 262 static void afs_redirty_pages(struct writeback_control *wbc, 263 struct address_space *mapping, 264 pgoff_t first, pgoff_t last) 265 { 266 struct afs_vnode *vnode = AFS_FS_I(mapping->host); 267 struct pagevec pv; 268 unsigned count, loop; 269 270 _enter("{%x:%u},%lx-%lx", 271 vnode->fid.vid, vnode->fid.vnode, first, last); 272 273 pagevec_init(&pv); 274 275 do { 276 _debug("redirty %lx-%lx", first, last); 277 278 count = last - first + 1; 279 if (count > PAGEVEC_SIZE) 280 count = PAGEVEC_SIZE; 281 pv.nr = find_get_pages_contig(mapping, first, count, pv.pages); 282 ASSERTCMP(pv.nr, ==, count); 283 284 for (loop = 0; loop < count; loop++) { 285 struct page *page = pv.pages[loop]; 286 287 redirty_page_for_writepage(wbc, page); 288 end_page_writeback(page); 289 if (page->index >= first) 290 first = page->index + 1; 291 } 292 293 __pagevec_release(&pv); 294 } while (first <= last); 295 296 _leave(""); 297 } 298 299 /* 300 * write to a file 301 */ 302 static int afs_store_data(struct address_space *mapping, 303 pgoff_t first, pgoff_t last, 304 unsigned offset, unsigned to) 305 { 306 struct afs_vnode *vnode = AFS_FS_I(mapping->host); 307 struct afs_fs_cursor fc; 308 struct afs_wb_key *wbk = NULL; 309 struct list_head *p; 310 int ret = -ENOKEY, ret2; 311 312 _enter("%s{%x:%u.%u},%lx,%lx,%x,%x", 313 vnode->volume->name, 314 vnode->fid.vid, 315 vnode->fid.vnode, 316 vnode->fid.unique, 317 first, last, offset, to); 318 319 spin_lock(&vnode->wb_lock); 320 p = vnode->wb_keys.next; 321 322 /* Iterate through the list looking for a valid key to use. */ 323 try_next_key: 324 while (p != &vnode->wb_keys) { 325 wbk = list_entry(p, struct afs_wb_key, vnode_link); 326 _debug("wbk %u", key_serial(wbk->key)); 327 ret2 = key_validate(wbk->key); 328 if (ret2 == 0) 329 goto found_key; 330 if (ret == -ENOKEY) 331 ret = ret2; 332 p = p->next; 333 } 334 335 spin_unlock(&vnode->wb_lock); 336 afs_put_wb_key(wbk); 337 _leave(" = %d [no keys]", ret); 338 return ret; 339 340 found_key: 341 refcount_inc(&wbk->usage); 342 spin_unlock(&vnode->wb_lock); 343 344 _debug("USE WB KEY %u", key_serial(wbk->key)); 345 346 ret = -ERESTARTSYS; 347 if (afs_begin_vnode_operation(&fc, vnode, wbk->key)) { 348 while (afs_select_fileserver(&fc)) { 349 fc.cb_break = vnode->cb_break + vnode->cb_s_break; 350 afs_fs_store_data(&fc, mapping, first, last, offset, to); 351 } 352 353 afs_check_for_remote_deletion(&fc, fc.vnode); 354 afs_vnode_commit_status(&fc, vnode, fc.cb_break); 355 ret = afs_end_vnode_operation(&fc); 356 } 357 358 switch (ret) { 359 case 0: 360 afs_stat_v(vnode, n_stores); 361 atomic_long_add((last * PAGE_SIZE + to) - 362 (first * PAGE_SIZE + offset), 363 &afs_v2net(vnode)->n_store_bytes); 364 break; 365 case -EACCES: 366 case -EPERM: 367 case -ENOKEY: 368 case -EKEYEXPIRED: 369 case -EKEYREJECTED: 370 case -EKEYREVOKED: 371 _debug("next"); 372 spin_lock(&vnode->wb_lock); 373 p = wbk->vnode_link.next; 374 afs_put_wb_key(wbk); 375 goto try_next_key; 376 } 377 378 afs_put_wb_key(wbk); 379 _leave(" = %d", ret); 380 return ret; 381 } 382 383 /* 384 * Synchronously write back the locked page and any subsequent non-locked dirty 385 * pages. 386 */ 387 static int afs_write_back_from_locked_page(struct address_space *mapping, 388 struct writeback_control *wbc, 389 struct page *primary_page, 390 pgoff_t final_page) 391 { 392 struct afs_vnode *vnode = AFS_FS_I(mapping->host); 393 struct page *pages[8], *page; 394 unsigned long count, priv; 395 unsigned n, offset, to, f, t; 396 pgoff_t start, first, last; 397 int loop, ret; 398 399 _enter(",%lx", primary_page->index); 400 401 count = 1; 402 if (test_set_page_writeback(primary_page)) 403 BUG(); 404 405 /* Find all consecutive lockable dirty pages that have contiguous 406 * written regions, stopping when we find a page that is not 407 * immediately lockable, is not dirty or is missing, or we reach the 408 * end of the range. 409 */ 410 start = primary_page->index; 411 priv = page_private(primary_page); 412 offset = priv & AFS_PRIV_MAX; 413 to = priv >> AFS_PRIV_SHIFT; 414 trace_afs_page_dirty(vnode, tracepoint_string("store"), 415 primary_page->index, priv); 416 417 WARN_ON(offset == to); 418 if (offset == to) 419 trace_afs_page_dirty(vnode, tracepoint_string("WARN"), 420 primary_page->index, priv); 421 422 if (start >= final_page || to < PAGE_SIZE) 423 goto no_more; 424 425 start++; 426 do { 427 _debug("more %lx [%lx]", start, count); 428 n = final_page - start + 1; 429 if (n > ARRAY_SIZE(pages)) 430 n = ARRAY_SIZE(pages); 431 n = find_get_pages_contig(mapping, start, ARRAY_SIZE(pages), pages); 432 _debug("fgpc %u", n); 433 if (n == 0) 434 goto no_more; 435 if (pages[0]->index != start) { 436 do { 437 put_page(pages[--n]); 438 } while (n > 0); 439 goto no_more; 440 } 441 442 for (loop = 0; loop < n; loop++) { 443 if (to != PAGE_SIZE) 444 break; 445 page = pages[loop]; 446 if (page->index > final_page) 447 break; 448 if (!trylock_page(page)) 449 break; 450 if (!PageDirty(page) || PageWriteback(page)) { 451 unlock_page(page); 452 break; 453 } 454 455 priv = page_private(page); 456 f = priv & AFS_PRIV_MAX; 457 t = priv >> AFS_PRIV_SHIFT; 458 if (f != 0) { 459 unlock_page(page); 460 break; 461 } 462 to = t; 463 464 trace_afs_page_dirty(vnode, tracepoint_string("store+"), 465 page->index, priv); 466 467 if (!clear_page_dirty_for_io(page)) 468 BUG(); 469 if (test_set_page_writeback(page)) 470 BUG(); 471 unlock_page(page); 472 put_page(page); 473 } 474 count += loop; 475 if (loop < n) { 476 for (; loop < n; loop++) 477 put_page(pages[loop]); 478 goto no_more; 479 } 480 481 start += loop; 482 } while (start <= final_page && count < 65536); 483 484 no_more: 485 /* We now have a contiguous set of dirty pages, each with writeback 486 * set; the first page is still locked at this point, but all the rest 487 * have been unlocked. 488 */ 489 unlock_page(primary_page); 490 491 first = primary_page->index; 492 last = first + count - 1; 493 494 _debug("write back %lx[%u..] to %lx[..%u]", first, offset, last, to); 495 496 ret = afs_store_data(mapping, first, last, offset, to); 497 switch (ret) { 498 case 0: 499 ret = count; 500 break; 501 502 default: 503 pr_notice("kAFS: Unexpected error from FS.StoreData %d\n", ret); 504 /* Fall through */ 505 case -EACCES: 506 case -EPERM: 507 case -ENOKEY: 508 case -EKEYEXPIRED: 509 case -EKEYREJECTED: 510 case -EKEYREVOKED: 511 afs_redirty_pages(wbc, mapping, first, last); 512 mapping_set_error(mapping, ret); 513 break; 514 515 case -EDQUOT: 516 case -ENOSPC: 517 afs_redirty_pages(wbc, mapping, first, last); 518 mapping_set_error(mapping, -ENOSPC); 519 break; 520 521 case -EROFS: 522 case -EIO: 523 case -EREMOTEIO: 524 case -EFBIG: 525 case -ENOENT: 526 case -ENOMEDIUM: 527 case -ENXIO: 528 afs_kill_pages(mapping, first, last); 529 mapping_set_error(mapping, ret); 530 break; 531 } 532 533 _leave(" = %d", ret); 534 return ret; 535 } 536 537 /* 538 * write a page back to the server 539 * - the caller locked the page for us 540 */ 541 int afs_writepage(struct page *page, struct writeback_control *wbc) 542 { 543 int ret; 544 545 _enter("{%lx},", page->index); 546 547 ret = afs_write_back_from_locked_page(page->mapping, wbc, page, 548 wbc->range_end >> PAGE_SHIFT); 549 if (ret < 0) { 550 _leave(" = %d", ret); 551 return 0; 552 } 553 554 wbc->nr_to_write -= ret; 555 556 _leave(" = 0"); 557 return 0; 558 } 559 560 /* 561 * write a region of pages back to the server 562 */ 563 static int afs_writepages_region(struct address_space *mapping, 564 struct writeback_control *wbc, 565 pgoff_t index, pgoff_t end, pgoff_t *_next) 566 { 567 struct page *page; 568 int ret, n; 569 570 _enter(",,%lx,%lx,", index, end); 571 572 do { 573 n = find_get_pages_range_tag(mapping, &index, end, 574 PAGECACHE_TAG_DIRTY, 1, &page); 575 if (!n) 576 break; 577 578 _debug("wback %lx", page->index); 579 580 /* at this point we hold neither mapping->tree_lock nor lock on 581 * the page itself: the page may be truncated or invalidated 582 * (changing page->mapping to NULL), or even swizzled back from 583 * swapper_space to tmpfs file mapping 584 */ 585 ret = lock_page_killable(page); 586 if (ret < 0) { 587 put_page(page); 588 _leave(" = %d", ret); 589 return ret; 590 } 591 592 if (page->mapping != mapping || !PageDirty(page)) { 593 unlock_page(page); 594 put_page(page); 595 continue; 596 } 597 598 if (PageWriteback(page)) { 599 unlock_page(page); 600 if (wbc->sync_mode != WB_SYNC_NONE) 601 wait_on_page_writeback(page); 602 put_page(page); 603 continue; 604 } 605 606 if (!clear_page_dirty_for_io(page)) 607 BUG(); 608 ret = afs_write_back_from_locked_page(mapping, wbc, page, end); 609 put_page(page); 610 if (ret < 0) { 611 _leave(" = %d", ret); 612 return ret; 613 } 614 615 wbc->nr_to_write -= ret; 616 617 cond_resched(); 618 } while (index < end && wbc->nr_to_write > 0); 619 620 *_next = index; 621 _leave(" = 0 [%lx]", *_next); 622 return 0; 623 } 624 625 /* 626 * write some of the pending data back to the server 627 */ 628 int afs_writepages(struct address_space *mapping, 629 struct writeback_control *wbc) 630 { 631 pgoff_t start, end, next; 632 int ret; 633 634 _enter(""); 635 636 if (wbc->range_cyclic) { 637 start = mapping->writeback_index; 638 end = -1; 639 ret = afs_writepages_region(mapping, wbc, start, end, &next); 640 if (start > 0 && wbc->nr_to_write > 0 && ret == 0) 641 ret = afs_writepages_region(mapping, wbc, 0, start, 642 &next); 643 mapping->writeback_index = next; 644 } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) { 645 end = (pgoff_t)(LLONG_MAX >> PAGE_SHIFT); 646 ret = afs_writepages_region(mapping, wbc, 0, end, &next); 647 if (wbc->nr_to_write > 0) 648 mapping->writeback_index = next; 649 } else { 650 start = wbc->range_start >> PAGE_SHIFT; 651 end = wbc->range_end >> PAGE_SHIFT; 652 ret = afs_writepages_region(mapping, wbc, start, end, &next); 653 } 654 655 _leave(" = %d", ret); 656 return ret; 657 } 658 659 /* 660 * completion of write to server 661 */ 662 void afs_pages_written_back(struct afs_vnode *vnode, struct afs_call *call) 663 { 664 struct pagevec pv; 665 unsigned long priv; 666 unsigned count, loop; 667 pgoff_t first = call->first, last = call->last; 668 669 _enter("{%x:%u},{%lx-%lx}", 670 vnode->fid.vid, vnode->fid.vnode, first, last); 671 672 pagevec_init(&pv); 673 674 do { 675 _debug("done %lx-%lx", first, last); 676 677 count = last - first + 1; 678 if (count > PAGEVEC_SIZE) 679 count = PAGEVEC_SIZE; 680 pv.nr = find_get_pages_contig(vnode->vfs_inode.i_mapping, 681 first, count, pv.pages); 682 ASSERTCMP(pv.nr, ==, count); 683 684 for (loop = 0; loop < count; loop++) { 685 priv = page_private(pv.pages[loop]); 686 trace_afs_page_dirty(vnode, tracepoint_string("clear"), 687 pv.pages[loop]->index, priv); 688 set_page_private(pv.pages[loop], 0); 689 end_page_writeback(pv.pages[loop]); 690 } 691 first += count; 692 __pagevec_release(&pv); 693 } while (first <= last); 694 695 afs_prune_wb_keys(vnode); 696 _leave(""); 697 } 698 699 /* 700 * write to an AFS file 701 */ 702 ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from) 703 { 704 struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp)); 705 ssize_t result; 706 size_t count = iov_iter_count(from); 707 708 _enter("{%x.%u},{%zu},", 709 vnode->fid.vid, vnode->fid.vnode, count); 710 711 if (IS_SWAPFILE(&vnode->vfs_inode)) { 712 printk(KERN_INFO 713 "AFS: Attempt to write to active swap file!\n"); 714 return -EBUSY; 715 } 716 717 if (!count) 718 return 0; 719 720 result = generic_file_write_iter(iocb, from); 721 722 _leave(" = %zd", result); 723 return result; 724 } 725 726 /* 727 * flush any dirty pages for this process, and check for write errors. 728 * - the return status from this call provides a reliable indication of 729 * whether any write errors occurred for this process. 730 */ 731 int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync) 732 { 733 struct inode *inode = file_inode(file); 734 struct afs_vnode *vnode = AFS_FS_I(inode); 735 736 _enter("{%x:%u},{n=%pD},%d", 737 vnode->fid.vid, vnode->fid.vnode, file, 738 datasync); 739 740 return file_write_and_wait_range(file, start, end); 741 } 742 743 /* 744 * Flush out all outstanding writes on a file opened for writing when it is 745 * closed. 746 */ 747 int afs_flush(struct file *file, fl_owner_t id) 748 { 749 _enter(""); 750 751 if ((file->f_mode & FMODE_WRITE) == 0) 752 return 0; 753 754 return vfs_fsync(file, 0); 755 } 756 757 /* 758 * notification that a previously read-only page is about to become writable 759 * - if it returns an error, the caller will deliver a bus error signal 760 */ 761 int afs_page_mkwrite(struct vm_fault *vmf) 762 { 763 struct file *file = vmf->vma->vm_file; 764 struct inode *inode = file_inode(file); 765 struct afs_vnode *vnode = AFS_FS_I(inode); 766 unsigned long priv; 767 768 _enter("{{%x:%u}},{%lx}", 769 vnode->fid.vid, vnode->fid.vnode, vmf->page->index); 770 771 sb_start_pagefault(inode->i_sb); 772 773 /* Wait for the page to be written to the cache before we allow it to 774 * be modified. We then assume the entire page will need writing back. 775 */ 776 #ifdef CONFIG_AFS_FSCACHE 777 fscache_wait_on_page_write(vnode->cache, vmf->page); 778 #endif 779 780 if (PageWriteback(vmf->page) && 781 wait_on_page_bit_killable(vmf->page, PG_writeback) < 0) 782 return VM_FAULT_RETRY; 783 784 if (lock_page_killable(vmf->page) < 0) 785 return VM_FAULT_RETRY; 786 787 /* We mustn't change page->private until writeback is complete as that 788 * details the portion of the page we need to write back and we might 789 * need to redirty the page if there's a problem. 790 */ 791 wait_on_page_writeback(vmf->page); 792 793 priv = (unsigned long)PAGE_SIZE << AFS_PRIV_SHIFT; /* To */ 794 priv |= 0; /* From */ 795 trace_afs_page_dirty(vnode, tracepoint_string("mkwrite"), 796 vmf->page->index, priv); 797 SetPagePrivate(vmf->page); 798 set_page_private(vmf->page, priv); 799 800 sb_end_pagefault(inode->i_sb); 801 return VM_FAULT_LOCKED; 802 } 803 804 /* 805 * Prune the keys cached for writeback. The caller must hold vnode->wb_lock. 806 */ 807 void afs_prune_wb_keys(struct afs_vnode *vnode) 808 { 809 LIST_HEAD(graveyard); 810 struct afs_wb_key *wbk, *tmp; 811 812 /* Discard unused keys */ 813 spin_lock(&vnode->wb_lock); 814 815 if (!mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_WRITEBACK) && 816 !mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_DIRTY)) { 817 list_for_each_entry_safe(wbk, tmp, &vnode->wb_keys, vnode_link) { 818 if (refcount_read(&wbk->usage) == 1) 819 list_move(&wbk->vnode_link, &graveyard); 820 } 821 } 822 823 spin_unlock(&vnode->wb_lock); 824 825 while (!list_empty(&graveyard)) { 826 wbk = list_entry(graveyard.next, struct afs_wb_key, vnode_link); 827 list_del(&wbk->vnode_link); 828 afs_put_wb_key(wbk); 829 } 830 } 831 832 /* 833 * Clean up a page during invalidation. 834 */ 835 int afs_launder_page(struct page *page) 836 { 837 struct address_space *mapping = page->mapping; 838 struct afs_vnode *vnode = AFS_FS_I(mapping->host); 839 unsigned long priv; 840 unsigned int f, t; 841 int ret = 0; 842 843 _enter("{%lx}", page->index); 844 845 priv = page_private(page); 846 if (clear_page_dirty_for_io(page)) { 847 f = 0; 848 t = PAGE_SIZE; 849 if (PagePrivate(page)) { 850 f = priv & AFS_PRIV_MAX; 851 t = priv >> AFS_PRIV_SHIFT; 852 } 853 854 trace_afs_page_dirty(vnode, tracepoint_string("launder"), 855 page->index, priv); 856 ret = afs_store_data(mapping, page->index, page->index, t, f); 857 } 858 859 trace_afs_page_dirty(vnode, tracepoint_string("laundered"), 860 page->index, priv); 861 set_page_private(page, 0); 862 ClearPagePrivate(page); 863 864 #ifdef CONFIG_AFS_FSCACHE 865 if (PageFsCache(page)) { 866 fscache_wait_on_page_write(vnode->cache, page); 867 fscache_uncache_page(vnode->cache, page); 868 } 869 #endif 870 return ret; 871 } 872