1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* handling of writes to regular files and writing back to the server 3 * 4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 5 * Written by David Howells (dhowells@redhat.com) 6 */ 7 8 #include <linux/backing-dev.h> 9 #include <linux/slab.h> 10 #include <linux/fs.h> 11 #include <linux/pagemap.h> 12 #include <linux/writeback.h> 13 #include <linux/pagevec.h> 14 #include "internal.h" 15 16 /* 17 * mark a page as having been made dirty and thus needing writeback 18 */ 19 int afs_set_page_dirty(struct page *page) 20 { 21 _enter(""); 22 return __set_page_dirty_nobuffers(page); 23 } 24 25 /* 26 * Handle completion of a read operation to fill a page. 27 */ 28 static void afs_fill_hole(struct afs_read *req) 29 { 30 if (iov_iter_count(req->iter) > 0) 31 /* The read was short - clear the excess buffer. */ 32 iov_iter_zero(iov_iter_count(req->iter), req->iter); 33 } 34 35 /* 36 * partly or wholly fill a page that's under preparation for writing 37 */ 38 static int afs_fill_page(struct file *file, 39 loff_t pos, unsigned int len, struct page *page) 40 { 41 struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); 42 struct afs_read *req; 43 size_t p; 44 void *data; 45 int ret; 46 47 _enter(",,%llu", (unsigned long long)pos); 48 49 if (pos >= vnode->vfs_inode.i_size) { 50 p = pos & ~PAGE_MASK; 51 ASSERTCMP(p + len, <=, PAGE_SIZE); 52 data = kmap(page); 53 memset(data + p, 0, len); 54 kunmap(page); 55 return 0; 56 } 57 58 req = kzalloc(sizeof(struct afs_read), GFP_KERNEL); 59 if (!req) 60 return -ENOMEM; 61 62 refcount_set(&req->usage, 1); 63 req->vnode = vnode; 64 req->done = afs_fill_hole; 65 req->key = key_get(afs_file_key(file)); 66 req->pos = pos; 67 req->len = len; 68 req->nr_pages = 1; 69 req->iter = &req->def_iter; 70 iov_iter_xarray(&req->def_iter, READ, &file->f_mapping->i_pages, pos, len); 71 72 ret = afs_fetch_data(vnode, req); 73 afs_put_read(req); 74 if (ret < 0) { 75 if (ret == -ENOENT) { 76 _debug("got NOENT from server" 77 " - marking file deleted and stale"); 78 set_bit(AFS_VNODE_DELETED, &vnode->flags); 79 ret = -ESTALE; 80 } 81 } 82 83 _leave(" = %d", ret); 84 return ret; 85 } 86 87 /* 88 * prepare to perform part of a write to a page 89 */ 90 int afs_write_begin(struct file *file, struct address_space *mapping, 91 loff_t pos, unsigned len, unsigned flags, 92 struct page **_page, void **fsdata) 93 { 94 struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); 95 struct page *page; 96 unsigned long priv; 97 unsigned f, from = pos & (PAGE_SIZE - 1); 98 unsigned t, to = from + len; 99 pgoff_t index = pos >> PAGE_SHIFT; 100 int ret; 101 102 _enter("{%llx:%llu},{%lx},%u,%u", 103 vnode->fid.vid, vnode->fid.vnode, index, from, to); 104 105 page = grab_cache_page_write_begin(mapping, index, flags); 106 if (!page) 107 return -ENOMEM; 108 109 if (!PageUptodate(page) && len != PAGE_SIZE) { 110 ret = afs_fill_page(file, pos & PAGE_MASK, PAGE_SIZE, page); 111 if (ret < 0) { 112 unlock_page(page); 113 put_page(page); 114 _leave(" = %d [prep]", ret); 115 return ret; 116 } 117 SetPageUptodate(page); 118 } 119 120 #ifdef CONFIG_AFS_FSCACHE 121 wait_on_page_fscache(page); 122 #endif 123 124 try_again: 125 /* See if this page is already partially written in a way that we can 126 * merge the new write with. 127 */ 128 t = f = 0; 129 if (PagePrivate(page)) { 130 priv = page_private(page); 131 f = afs_page_dirty_from(page, priv); 132 t = afs_page_dirty_to(page, priv); 133 ASSERTCMP(f, <=, t); 134 } 135 136 if (f != t) { 137 if (PageWriteback(page)) { 138 trace_afs_page_dirty(vnode, tracepoint_string("alrdy"), page); 139 goto flush_conflicting_write; 140 } 141 /* If the file is being filled locally, allow inter-write 142 * spaces to be merged into writes. If it's not, only write 143 * back what the user gives us. 144 */ 145 if (!test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags) && 146 (to < f || from > t)) 147 goto flush_conflicting_write; 148 } 149 150 *_page = page; 151 _leave(" = 0"); 152 return 0; 153 154 /* The previous write and this write aren't adjacent or overlapping, so 155 * flush the page out. 156 */ 157 flush_conflicting_write: 158 _debug("flush conflict"); 159 ret = write_one_page(page); 160 if (ret < 0) 161 goto error; 162 163 ret = lock_page_killable(page); 164 if (ret < 0) 165 goto error; 166 goto try_again; 167 168 error: 169 put_page(page); 170 _leave(" = %d", ret); 171 return ret; 172 } 173 174 /* 175 * finalise part of a write to a page 176 */ 177 int afs_write_end(struct file *file, struct address_space *mapping, 178 loff_t pos, unsigned len, unsigned copied, 179 struct page *page, void *fsdata) 180 { 181 struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); 182 unsigned long priv; 183 unsigned int f, from = pos & (PAGE_SIZE - 1); 184 unsigned int t, to = from + copied; 185 loff_t i_size, maybe_i_size; 186 int ret = 0; 187 188 _enter("{%llx:%llu},{%lx}", 189 vnode->fid.vid, vnode->fid.vnode, page->index); 190 191 if (copied == 0) 192 goto out; 193 194 maybe_i_size = pos + copied; 195 196 i_size = i_size_read(&vnode->vfs_inode); 197 if (maybe_i_size > i_size) { 198 write_seqlock(&vnode->cb_lock); 199 i_size = i_size_read(&vnode->vfs_inode); 200 if (maybe_i_size > i_size) 201 i_size_write(&vnode->vfs_inode, maybe_i_size); 202 write_sequnlock(&vnode->cb_lock); 203 } 204 205 if (!PageUptodate(page)) { 206 if (copied < len) { 207 /* Try and load any missing data from the server. The 208 * unmarshalling routine will take care of clearing any 209 * bits that are beyond the EOF. 210 */ 211 ret = afs_fill_page(file, pos + copied, 212 len - copied, page); 213 if (ret < 0) 214 goto out; 215 } 216 SetPageUptodate(page); 217 } 218 219 if (PagePrivate(page)) { 220 priv = page_private(page); 221 f = afs_page_dirty_from(page, priv); 222 t = afs_page_dirty_to(page, priv); 223 if (from < f) 224 f = from; 225 if (to > t) 226 t = to; 227 priv = afs_page_dirty(page, f, t); 228 set_page_private(page, priv); 229 trace_afs_page_dirty(vnode, tracepoint_string("dirty+"), page); 230 } else { 231 priv = afs_page_dirty(page, from, to); 232 attach_page_private(page, (void *)priv); 233 trace_afs_page_dirty(vnode, tracepoint_string("dirty"), page); 234 } 235 236 set_page_dirty(page); 237 if (PageDirty(page)) 238 _debug("dirtied"); 239 ret = copied; 240 241 out: 242 unlock_page(page); 243 put_page(page); 244 return ret; 245 } 246 247 /* 248 * kill all the pages in the given range 249 */ 250 static void afs_kill_pages(struct address_space *mapping, 251 pgoff_t first, pgoff_t last) 252 { 253 struct afs_vnode *vnode = AFS_FS_I(mapping->host); 254 struct pagevec pv; 255 unsigned count, loop; 256 257 _enter("{%llx:%llu},%lx-%lx", 258 vnode->fid.vid, vnode->fid.vnode, first, last); 259 260 pagevec_init(&pv); 261 262 do { 263 _debug("kill %lx-%lx", first, last); 264 265 count = last - first + 1; 266 if (count > PAGEVEC_SIZE) 267 count = PAGEVEC_SIZE; 268 pv.nr = find_get_pages_contig(mapping, first, count, pv.pages); 269 ASSERTCMP(pv.nr, ==, count); 270 271 for (loop = 0; loop < count; loop++) { 272 struct page *page = pv.pages[loop]; 273 ClearPageUptodate(page); 274 SetPageError(page); 275 end_page_writeback(page); 276 if (page->index >= first) 277 first = page->index + 1; 278 lock_page(page); 279 generic_error_remove_page(mapping, page); 280 unlock_page(page); 281 } 282 283 __pagevec_release(&pv); 284 } while (first <= last); 285 286 _leave(""); 287 } 288 289 /* 290 * Redirty all the pages in a given range. 291 */ 292 static void afs_redirty_pages(struct writeback_control *wbc, 293 struct address_space *mapping, 294 pgoff_t first, pgoff_t last) 295 { 296 struct afs_vnode *vnode = AFS_FS_I(mapping->host); 297 struct pagevec pv; 298 unsigned count, loop; 299 300 _enter("{%llx:%llu},%lx-%lx", 301 vnode->fid.vid, vnode->fid.vnode, first, last); 302 303 pagevec_init(&pv); 304 305 do { 306 _debug("redirty %lx-%lx", first, last); 307 308 count = last - first + 1; 309 if (count > PAGEVEC_SIZE) 310 count = PAGEVEC_SIZE; 311 pv.nr = find_get_pages_contig(mapping, first, count, pv.pages); 312 ASSERTCMP(pv.nr, ==, count); 313 314 for (loop = 0; loop < count; loop++) { 315 struct page *page = pv.pages[loop]; 316 317 redirty_page_for_writepage(wbc, page); 318 end_page_writeback(page); 319 if (page->index >= first) 320 first = page->index + 1; 321 } 322 323 __pagevec_release(&pv); 324 } while (first <= last); 325 326 _leave(""); 327 } 328 329 /* 330 * completion of write to server 331 */ 332 static void afs_pages_written_back(struct afs_vnode *vnode, pgoff_t start, pgoff_t last) 333 { 334 struct address_space *mapping = vnode->vfs_inode.i_mapping; 335 struct page *page; 336 337 XA_STATE(xas, &mapping->i_pages, start); 338 339 _enter("{%llx:%llu},{%lx-%lx}", 340 vnode->fid.vid, vnode->fid.vnode, start, last); 341 342 rcu_read_lock(); 343 344 xas_for_each(&xas, page, last) { 345 ASSERT(PageWriteback(page)); 346 347 detach_page_private(page); 348 trace_afs_page_dirty(vnode, tracepoint_string("clear"), page); 349 page_endio(page, true, 0); 350 } 351 352 rcu_read_unlock(); 353 354 afs_prune_wb_keys(vnode); 355 _leave(""); 356 } 357 358 /* 359 * Find a key to use for the writeback. We cached the keys used to author the 360 * writes on the vnode. *_wbk will contain the last writeback key used or NULL 361 * and we need to start from there if it's set. 362 */ 363 static int afs_get_writeback_key(struct afs_vnode *vnode, 364 struct afs_wb_key **_wbk) 365 { 366 struct afs_wb_key *wbk = NULL; 367 struct list_head *p; 368 int ret = -ENOKEY, ret2; 369 370 spin_lock(&vnode->wb_lock); 371 if (*_wbk) 372 p = (*_wbk)->vnode_link.next; 373 else 374 p = vnode->wb_keys.next; 375 376 while (p != &vnode->wb_keys) { 377 wbk = list_entry(p, struct afs_wb_key, vnode_link); 378 _debug("wbk %u", key_serial(wbk->key)); 379 ret2 = key_validate(wbk->key); 380 if (ret2 == 0) { 381 refcount_inc(&wbk->usage); 382 _debug("USE WB KEY %u", key_serial(wbk->key)); 383 break; 384 } 385 386 wbk = NULL; 387 if (ret == -ENOKEY) 388 ret = ret2; 389 p = p->next; 390 } 391 392 spin_unlock(&vnode->wb_lock); 393 if (*_wbk) 394 afs_put_wb_key(*_wbk); 395 *_wbk = wbk; 396 return 0; 397 } 398 399 static void afs_store_data_success(struct afs_operation *op) 400 { 401 struct afs_vnode *vnode = op->file[0].vnode; 402 403 op->ctime = op->file[0].scb.status.mtime_client; 404 afs_vnode_commit_status(op, &op->file[0]); 405 if (op->error == 0) { 406 if (!op->store.laundering) 407 afs_pages_written_back(vnode, op->store.first, op->store.last); 408 afs_stat_v(vnode, n_stores); 409 atomic_long_add(op->store.size, &afs_v2net(vnode)->n_store_bytes); 410 } 411 } 412 413 static const struct afs_operation_ops afs_store_data_operation = { 414 .issue_afs_rpc = afs_fs_store_data, 415 .issue_yfs_rpc = yfs_fs_store_data, 416 .success = afs_store_data_success, 417 }; 418 419 /* 420 * write to a file 421 */ 422 static int afs_store_data(struct afs_vnode *vnode, struct iov_iter *iter, 423 loff_t pos, pgoff_t first, pgoff_t last, 424 bool laundering) 425 { 426 struct afs_operation *op; 427 struct afs_wb_key *wbk = NULL; 428 loff_t size = iov_iter_count(iter), i_size; 429 int ret = -ENOKEY; 430 431 _enter("%s{%llx:%llu.%u},%llx,%llx", 432 vnode->volume->name, 433 vnode->fid.vid, 434 vnode->fid.vnode, 435 vnode->fid.unique, 436 size, pos); 437 438 ret = afs_get_writeback_key(vnode, &wbk); 439 if (ret) { 440 _leave(" = %d [no keys]", ret); 441 return ret; 442 } 443 444 op = afs_alloc_operation(wbk->key, vnode->volume); 445 if (IS_ERR(op)) { 446 afs_put_wb_key(wbk); 447 return -ENOMEM; 448 } 449 450 i_size = i_size_read(&vnode->vfs_inode); 451 452 afs_op_set_vnode(op, 0, vnode); 453 op->file[0].dv_delta = 1; 454 op->store.write_iter = iter; 455 op->store.pos = pos; 456 op->store.first = first; 457 op->store.last = last; 458 op->store.size = size; 459 op->store.i_size = max(pos + size, i_size); 460 op->store.laundering = laundering; 461 op->mtime = vnode->vfs_inode.i_mtime; 462 op->flags |= AFS_OPERATION_UNINTR; 463 op->ops = &afs_store_data_operation; 464 465 try_next_key: 466 afs_begin_vnode_operation(op); 467 afs_wait_for_operation(op); 468 469 switch (op->error) { 470 case -EACCES: 471 case -EPERM: 472 case -ENOKEY: 473 case -EKEYEXPIRED: 474 case -EKEYREJECTED: 475 case -EKEYREVOKED: 476 _debug("next"); 477 478 ret = afs_get_writeback_key(vnode, &wbk); 479 if (ret == 0) { 480 key_put(op->key); 481 op->key = key_get(wbk->key); 482 goto try_next_key; 483 } 484 break; 485 } 486 487 afs_put_wb_key(wbk); 488 _leave(" = %d", op->error); 489 return afs_put_operation(op); 490 } 491 492 /* 493 * Synchronously write back the locked page and any subsequent non-locked dirty 494 * pages. 495 */ 496 static int afs_write_back_from_locked_page(struct address_space *mapping, 497 struct writeback_control *wbc, 498 struct page *primary_page, 499 pgoff_t final_page) 500 { 501 struct afs_vnode *vnode = AFS_FS_I(mapping->host); 502 struct iov_iter iter; 503 struct page *pages[8], *page; 504 unsigned long count, priv; 505 unsigned n, offset, to, f, t; 506 pgoff_t start, first, last; 507 loff_t i_size, pos, end; 508 int loop, ret; 509 510 _enter(",%lx", primary_page->index); 511 512 count = 1; 513 if (test_set_page_writeback(primary_page)) 514 BUG(); 515 516 /* Find all consecutive lockable dirty pages that have contiguous 517 * written regions, stopping when we find a page that is not 518 * immediately lockable, is not dirty or is missing, or we reach the 519 * end of the range. 520 */ 521 start = primary_page->index; 522 priv = page_private(primary_page); 523 offset = afs_page_dirty_from(primary_page, priv); 524 to = afs_page_dirty_to(primary_page, priv); 525 trace_afs_page_dirty(vnode, tracepoint_string("store"), primary_page); 526 527 WARN_ON(offset == to); 528 if (offset == to) 529 trace_afs_page_dirty(vnode, tracepoint_string("WARN"), primary_page); 530 531 if (start >= final_page || 532 (to < PAGE_SIZE && !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags))) 533 goto no_more; 534 535 start++; 536 do { 537 _debug("more %lx [%lx]", start, count); 538 n = final_page - start + 1; 539 if (n > ARRAY_SIZE(pages)) 540 n = ARRAY_SIZE(pages); 541 n = find_get_pages_contig(mapping, start, ARRAY_SIZE(pages), pages); 542 _debug("fgpc %u", n); 543 if (n == 0) 544 goto no_more; 545 if (pages[0]->index != start) { 546 do { 547 put_page(pages[--n]); 548 } while (n > 0); 549 goto no_more; 550 } 551 552 for (loop = 0; loop < n; loop++) { 553 page = pages[loop]; 554 if (to != PAGE_SIZE && 555 !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags)) 556 break; 557 if (page->index > final_page) 558 break; 559 if (!trylock_page(page)) 560 break; 561 if (!PageDirty(page) || PageWriteback(page)) { 562 unlock_page(page); 563 break; 564 } 565 566 priv = page_private(page); 567 f = afs_page_dirty_from(page, priv); 568 t = afs_page_dirty_to(page, priv); 569 if (f != 0 && 570 !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags)) { 571 unlock_page(page); 572 break; 573 } 574 to = t; 575 576 trace_afs_page_dirty(vnode, tracepoint_string("store+"), page); 577 578 if (!clear_page_dirty_for_io(page)) 579 BUG(); 580 if (test_set_page_writeback(page)) 581 BUG(); 582 unlock_page(page); 583 put_page(page); 584 } 585 count += loop; 586 if (loop < n) { 587 for (; loop < n; loop++) 588 put_page(pages[loop]); 589 goto no_more; 590 } 591 592 start += loop; 593 } while (start <= final_page && count < 65536); 594 595 no_more: 596 /* We now have a contiguous set of dirty pages, each with writeback 597 * set; the first page is still locked at this point, but all the rest 598 * have been unlocked. 599 */ 600 unlock_page(primary_page); 601 602 first = primary_page->index; 603 last = first + count - 1; 604 _debug("write back %lx[%u..] to %lx[..%u]", first, offset, last, to); 605 606 pos = first; 607 pos <<= PAGE_SHIFT; 608 pos += offset; 609 end = last; 610 end <<= PAGE_SHIFT; 611 end += to; 612 613 /* Trim the actual write down to the EOF */ 614 i_size = i_size_read(&vnode->vfs_inode); 615 if (end > i_size) 616 end = i_size; 617 618 if (pos < i_size) { 619 iov_iter_xarray(&iter, WRITE, &mapping->i_pages, pos, end - pos); 620 ret = afs_store_data(vnode, &iter, pos, first, last, false); 621 } else { 622 /* The dirty region was entirely beyond the EOF. */ 623 ret = 0; 624 } 625 626 switch (ret) { 627 case 0: 628 ret = count; 629 break; 630 631 default: 632 pr_notice("kAFS: Unexpected error from FS.StoreData %d\n", ret); 633 fallthrough; 634 case -EACCES: 635 case -EPERM: 636 case -ENOKEY: 637 case -EKEYEXPIRED: 638 case -EKEYREJECTED: 639 case -EKEYREVOKED: 640 afs_redirty_pages(wbc, mapping, first, last); 641 mapping_set_error(mapping, ret); 642 break; 643 644 case -EDQUOT: 645 case -ENOSPC: 646 afs_redirty_pages(wbc, mapping, first, last); 647 mapping_set_error(mapping, -ENOSPC); 648 break; 649 650 case -EROFS: 651 case -EIO: 652 case -EREMOTEIO: 653 case -EFBIG: 654 case -ENOENT: 655 case -ENOMEDIUM: 656 case -ENXIO: 657 trace_afs_file_error(vnode, ret, afs_file_error_writeback_fail); 658 afs_kill_pages(mapping, first, last); 659 mapping_set_error(mapping, ret); 660 break; 661 } 662 663 _leave(" = %d", ret); 664 return ret; 665 } 666 667 /* 668 * write a page back to the server 669 * - the caller locked the page for us 670 */ 671 int afs_writepage(struct page *page, struct writeback_control *wbc) 672 { 673 int ret; 674 675 _enter("{%lx},", page->index); 676 677 ret = afs_write_back_from_locked_page(page->mapping, wbc, page, 678 wbc->range_end >> PAGE_SHIFT); 679 if (ret < 0) { 680 _leave(" = %d", ret); 681 return 0; 682 } 683 684 wbc->nr_to_write -= ret; 685 686 _leave(" = 0"); 687 return 0; 688 } 689 690 /* 691 * write a region of pages back to the server 692 */ 693 static int afs_writepages_region(struct address_space *mapping, 694 struct writeback_control *wbc, 695 pgoff_t index, pgoff_t end, pgoff_t *_next) 696 { 697 struct page *page; 698 int ret, n; 699 700 _enter(",,%lx,%lx,", index, end); 701 702 do { 703 n = find_get_pages_range_tag(mapping, &index, end, 704 PAGECACHE_TAG_DIRTY, 1, &page); 705 if (!n) 706 break; 707 708 _debug("wback %lx", page->index); 709 710 /* 711 * at this point we hold neither the i_pages lock nor the 712 * page lock: the page may be truncated or invalidated 713 * (changing page->mapping to NULL), or even swizzled 714 * back from swapper_space to tmpfs file mapping 715 */ 716 ret = lock_page_killable(page); 717 if (ret < 0) { 718 put_page(page); 719 _leave(" = %d", ret); 720 return ret; 721 } 722 723 if (page->mapping != mapping || !PageDirty(page)) { 724 unlock_page(page); 725 put_page(page); 726 continue; 727 } 728 729 if (PageWriteback(page)) { 730 unlock_page(page); 731 if (wbc->sync_mode != WB_SYNC_NONE) 732 wait_on_page_writeback(page); 733 put_page(page); 734 continue; 735 } 736 737 if (!clear_page_dirty_for_io(page)) 738 BUG(); 739 ret = afs_write_back_from_locked_page(mapping, wbc, page, end); 740 put_page(page); 741 if (ret < 0) { 742 _leave(" = %d", ret); 743 return ret; 744 } 745 746 wbc->nr_to_write -= ret; 747 748 cond_resched(); 749 } while (index < end && wbc->nr_to_write > 0); 750 751 *_next = index; 752 _leave(" = 0 [%lx]", *_next); 753 return 0; 754 } 755 756 /* 757 * write some of the pending data back to the server 758 */ 759 int afs_writepages(struct address_space *mapping, 760 struct writeback_control *wbc) 761 { 762 struct afs_vnode *vnode = AFS_FS_I(mapping->host); 763 pgoff_t start, end, next; 764 int ret; 765 766 _enter(""); 767 768 /* We have to be careful as we can end up racing with setattr() 769 * truncating the pagecache since the caller doesn't take a lock here 770 * to prevent it. 771 */ 772 if (wbc->sync_mode == WB_SYNC_ALL) 773 down_read(&vnode->validate_lock); 774 else if (!down_read_trylock(&vnode->validate_lock)) 775 return 0; 776 777 if (wbc->range_cyclic) { 778 start = mapping->writeback_index; 779 end = -1; 780 ret = afs_writepages_region(mapping, wbc, start, end, &next); 781 if (start > 0 && wbc->nr_to_write > 0 && ret == 0) 782 ret = afs_writepages_region(mapping, wbc, 0, start, 783 &next); 784 mapping->writeback_index = next; 785 } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) { 786 end = (pgoff_t)(LLONG_MAX >> PAGE_SHIFT); 787 ret = afs_writepages_region(mapping, wbc, 0, end, &next); 788 if (wbc->nr_to_write > 0) 789 mapping->writeback_index = next; 790 } else { 791 start = wbc->range_start >> PAGE_SHIFT; 792 end = wbc->range_end >> PAGE_SHIFT; 793 ret = afs_writepages_region(mapping, wbc, start, end, &next); 794 } 795 796 up_read(&vnode->validate_lock); 797 _leave(" = %d", ret); 798 return ret; 799 } 800 801 /* 802 * write to an AFS file 803 */ 804 ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from) 805 { 806 struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp)); 807 ssize_t result; 808 size_t count = iov_iter_count(from); 809 810 _enter("{%llx:%llu},{%zu},", 811 vnode->fid.vid, vnode->fid.vnode, count); 812 813 if (IS_SWAPFILE(&vnode->vfs_inode)) { 814 printk(KERN_INFO 815 "AFS: Attempt to write to active swap file!\n"); 816 return -EBUSY; 817 } 818 819 if (!count) 820 return 0; 821 822 result = generic_file_write_iter(iocb, from); 823 824 _leave(" = %zd", result); 825 return result; 826 } 827 828 /* 829 * flush any dirty pages for this process, and check for write errors. 830 * - the return status from this call provides a reliable indication of 831 * whether any write errors occurred for this process. 832 */ 833 int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync) 834 { 835 struct inode *inode = file_inode(file); 836 struct afs_vnode *vnode = AFS_FS_I(inode); 837 838 _enter("{%llx:%llu},{n=%pD},%d", 839 vnode->fid.vid, vnode->fid.vnode, file, 840 datasync); 841 842 return file_write_and_wait_range(file, start, end); 843 } 844 845 /* 846 * notification that a previously read-only page is about to become writable 847 * - if it returns an error, the caller will deliver a bus error signal 848 */ 849 vm_fault_t afs_page_mkwrite(struct vm_fault *vmf) 850 { 851 struct file *file = vmf->vma->vm_file; 852 struct inode *inode = file_inode(file); 853 struct afs_vnode *vnode = AFS_FS_I(inode); 854 unsigned long priv; 855 856 _enter("{{%llx:%llu}},{%lx}", 857 vnode->fid.vid, vnode->fid.vnode, vmf->page->index); 858 859 sb_start_pagefault(inode->i_sb); 860 861 /* Wait for the page to be written to the cache before we allow it to 862 * be modified. We then assume the entire page will need writing back. 863 */ 864 #ifdef CONFIG_AFS_FSCACHE 865 if (PageFsCache(vmf->page) && 866 wait_on_page_bit_killable(vmf->page, PG_fscache) < 0) 867 return VM_FAULT_RETRY; 868 #endif 869 870 if (wait_on_page_writeback_killable(vmf->page)) 871 return VM_FAULT_RETRY; 872 873 if (lock_page_killable(vmf->page) < 0) 874 return VM_FAULT_RETRY; 875 876 /* We mustn't change page->private until writeback is complete as that 877 * details the portion of the page we need to write back and we might 878 * need to redirty the page if there's a problem. 879 */ 880 wait_on_page_writeback(vmf->page); 881 882 priv = afs_page_dirty(vmf->page, 0, PAGE_SIZE); 883 priv = afs_page_dirty_mmapped(priv); 884 if (PagePrivate(vmf->page)) 885 set_page_private(vmf->page, priv); 886 else 887 attach_page_private(vmf->page, (void *)priv); 888 trace_afs_page_dirty(vnode, tracepoint_string("mkwrite"), vmf->page); 889 file_update_time(file); 890 891 sb_end_pagefault(inode->i_sb); 892 return VM_FAULT_LOCKED; 893 } 894 895 /* 896 * Prune the keys cached for writeback. The caller must hold vnode->wb_lock. 897 */ 898 void afs_prune_wb_keys(struct afs_vnode *vnode) 899 { 900 LIST_HEAD(graveyard); 901 struct afs_wb_key *wbk, *tmp; 902 903 /* Discard unused keys */ 904 spin_lock(&vnode->wb_lock); 905 906 if (!mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_WRITEBACK) && 907 !mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_DIRTY)) { 908 list_for_each_entry_safe(wbk, tmp, &vnode->wb_keys, vnode_link) { 909 if (refcount_read(&wbk->usage) == 1) 910 list_move(&wbk->vnode_link, &graveyard); 911 } 912 } 913 914 spin_unlock(&vnode->wb_lock); 915 916 while (!list_empty(&graveyard)) { 917 wbk = list_entry(graveyard.next, struct afs_wb_key, vnode_link); 918 list_del(&wbk->vnode_link); 919 afs_put_wb_key(wbk); 920 } 921 } 922 923 /* 924 * Clean up a page during invalidation. 925 */ 926 int afs_launder_page(struct page *page) 927 { 928 struct address_space *mapping = page->mapping; 929 struct afs_vnode *vnode = AFS_FS_I(mapping->host); 930 struct iov_iter iter; 931 struct bio_vec bv[1]; 932 unsigned long priv; 933 unsigned int f, t; 934 int ret = 0; 935 936 _enter("{%lx}", page->index); 937 938 priv = page_private(page); 939 if (clear_page_dirty_for_io(page)) { 940 f = 0; 941 t = PAGE_SIZE; 942 if (PagePrivate(page)) { 943 f = afs_page_dirty_from(page, priv); 944 t = afs_page_dirty_to(page, priv); 945 } 946 947 bv[0].bv_page = page; 948 bv[0].bv_offset = f; 949 bv[0].bv_len = t - f; 950 iov_iter_bvec(&iter, WRITE, bv, 1, bv[0].bv_len); 951 952 trace_afs_page_dirty(vnode, tracepoint_string("launder"), page); 953 ret = afs_store_data(vnode, &iter, (loff_t)page->index << PAGE_SHIFT, 954 page->index, page->index, true); 955 } 956 957 detach_page_private(page); 958 trace_afs_page_dirty(vnode, tracepoint_string("laundered"), page); 959 wait_on_page_fscache(page); 960 return ret; 961 } 962