1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* handling of writes to regular files and writing back to the server 3 * 4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 5 * Written by David Howells (dhowells@redhat.com) 6 */ 7 8 #include <linux/backing-dev.h> 9 #include <linux/slab.h> 10 #include <linux/fs.h> 11 #include <linux/pagemap.h> 12 #include <linux/writeback.h> 13 #include <linux/pagevec.h> 14 #include "internal.h" 15 16 /* 17 * mark a page as having been made dirty and thus needing writeback 18 */ 19 int afs_set_page_dirty(struct page *page) 20 { 21 _enter(""); 22 return __set_page_dirty_nobuffers(page); 23 } 24 25 /* 26 * Handle completion of a read operation to fill a page. 27 */ 28 static void afs_fill_hole(struct afs_read *req) 29 { 30 if (iov_iter_count(req->iter) > 0) 31 /* The read was short - clear the excess buffer. */ 32 iov_iter_zero(iov_iter_count(req->iter), req->iter); 33 } 34 35 /* 36 * partly or wholly fill a page that's under preparation for writing 37 */ 38 static int afs_fill_page(struct file *file, 39 loff_t pos, unsigned int len, struct page *page) 40 { 41 struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); 42 struct afs_read *req; 43 size_t p; 44 void *data; 45 int ret; 46 47 _enter(",,%llu", (unsigned long long)pos); 48 49 if (pos >= vnode->vfs_inode.i_size) { 50 p = pos & ~PAGE_MASK; 51 ASSERTCMP(p + len, <=, PAGE_SIZE); 52 data = kmap(page); 53 memset(data + p, 0, len); 54 kunmap(page); 55 return 0; 56 } 57 58 req = kzalloc(sizeof(struct afs_read), GFP_KERNEL); 59 if (!req) 60 return -ENOMEM; 61 62 refcount_set(&req->usage, 1); 63 req->vnode = vnode; 64 req->done = afs_fill_hole; 65 req->key = key_get(afs_file_key(file)); 66 req->pos = pos; 67 req->len = len; 68 req->nr_pages = 1; 69 req->iter = &req->def_iter; 70 iov_iter_xarray(&req->def_iter, READ, &file->f_mapping->i_pages, pos, len); 71 72 ret = afs_fetch_data(vnode, req); 73 afs_put_read(req); 74 if (ret < 0) { 75 if (ret == -ENOENT) { 76 _debug("got NOENT from server" 77 " - marking file deleted and stale"); 78 set_bit(AFS_VNODE_DELETED, &vnode->flags); 79 ret = -ESTALE; 80 } 81 } 82 83 _leave(" = %d", ret); 84 return ret; 85 } 86 87 /* 88 * prepare to perform part of a write to a page 89 */ 90 int afs_write_begin(struct file *file, struct address_space *mapping, 91 loff_t pos, unsigned len, unsigned flags, 92 struct page **_page, void **fsdata) 93 { 94 struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); 95 struct page *page; 96 unsigned long priv; 97 unsigned f, from = pos & (PAGE_SIZE - 1); 98 unsigned t, to = from + len; 99 pgoff_t index = pos >> PAGE_SHIFT; 100 int ret; 101 102 _enter("{%llx:%llu},{%lx},%u,%u", 103 vnode->fid.vid, vnode->fid.vnode, index, from, to); 104 105 page = grab_cache_page_write_begin(mapping, index, flags); 106 if (!page) 107 return -ENOMEM; 108 109 if (!PageUptodate(page) && len != PAGE_SIZE) { 110 ret = afs_fill_page(file, pos & PAGE_MASK, PAGE_SIZE, page); 111 if (ret < 0) { 112 unlock_page(page); 113 put_page(page); 114 _leave(" = %d [prep]", ret); 115 return ret; 116 } 117 SetPageUptodate(page); 118 } 119 120 try_again: 121 /* See if this page is already partially written in a way that we can 122 * merge the new write with. 123 */ 124 t = f = 0; 125 if (PagePrivate(page)) { 126 priv = page_private(page); 127 f = afs_page_dirty_from(page, priv); 128 t = afs_page_dirty_to(page, priv); 129 ASSERTCMP(f, <=, t); 130 } 131 132 if (f != t) { 133 if (PageWriteback(page)) { 134 trace_afs_page_dirty(vnode, tracepoint_string("alrdy"), page); 135 goto flush_conflicting_write; 136 } 137 /* If the file is being filled locally, allow inter-write 138 * spaces to be merged into writes. If it's not, only write 139 * back what the user gives us. 140 */ 141 if (!test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags) && 142 (to < f || from > t)) 143 goto flush_conflicting_write; 144 } 145 146 *_page = page; 147 _leave(" = 0"); 148 return 0; 149 150 /* The previous write and this write aren't adjacent or overlapping, so 151 * flush the page out. 152 */ 153 flush_conflicting_write: 154 _debug("flush conflict"); 155 ret = write_one_page(page); 156 if (ret < 0) 157 goto error; 158 159 ret = lock_page_killable(page); 160 if (ret < 0) 161 goto error; 162 goto try_again; 163 164 error: 165 put_page(page); 166 _leave(" = %d", ret); 167 return ret; 168 } 169 170 /* 171 * finalise part of a write to a page 172 */ 173 int afs_write_end(struct file *file, struct address_space *mapping, 174 loff_t pos, unsigned len, unsigned copied, 175 struct page *page, void *fsdata) 176 { 177 struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); 178 unsigned long priv; 179 unsigned int f, from = pos & (PAGE_SIZE - 1); 180 unsigned int t, to = from + copied; 181 loff_t i_size, maybe_i_size; 182 int ret = 0; 183 184 _enter("{%llx:%llu},{%lx}", 185 vnode->fid.vid, vnode->fid.vnode, page->index); 186 187 if (copied == 0) 188 goto out; 189 190 maybe_i_size = pos + copied; 191 192 i_size = i_size_read(&vnode->vfs_inode); 193 if (maybe_i_size > i_size) { 194 write_seqlock(&vnode->cb_lock); 195 i_size = i_size_read(&vnode->vfs_inode); 196 if (maybe_i_size > i_size) 197 i_size_write(&vnode->vfs_inode, maybe_i_size); 198 write_sequnlock(&vnode->cb_lock); 199 } 200 201 if (!PageUptodate(page)) { 202 if (copied < len) { 203 /* Try and load any missing data from the server. The 204 * unmarshalling routine will take care of clearing any 205 * bits that are beyond the EOF. 206 */ 207 ret = afs_fill_page(file, pos + copied, 208 len - copied, page); 209 if (ret < 0) 210 goto out; 211 } 212 SetPageUptodate(page); 213 } 214 215 if (PagePrivate(page)) { 216 priv = page_private(page); 217 f = afs_page_dirty_from(page, priv); 218 t = afs_page_dirty_to(page, priv); 219 if (from < f) 220 f = from; 221 if (to > t) 222 t = to; 223 priv = afs_page_dirty(page, f, t); 224 set_page_private(page, priv); 225 trace_afs_page_dirty(vnode, tracepoint_string("dirty+"), page); 226 } else { 227 priv = afs_page_dirty(page, from, to); 228 attach_page_private(page, (void *)priv); 229 trace_afs_page_dirty(vnode, tracepoint_string("dirty"), page); 230 } 231 232 set_page_dirty(page); 233 if (PageDirty(page)) 234 _debug("dirtied"); 235 ret = copied; 236 237 out: 238 unlock_page(page); 239 put_page(page); 240 return ret; 241 } 242 243 /* 244 * kill all the pages in the given range 245 */ 246 static void afs_kill_pages(struct address_space *mapping, 247 pgoff_t first, pgoff_t last) 248 { 249 struct afs_vnode *vnode = AFS_FS_I(mapping->host); 250 struct pagevec pv; 251 unsigned count, loop; 252 253 _enter("{%llx:%llu},%lx-%lx", 254 vnode->fid.vid, vnode->fid.vnode, first, last); 255 256 pagevec_init(&pv); 257 258 do { 259 _debug("kill %lx-%lx", first, last); 260 261 count = last - first + 1; 262 if (count > PAGEVEC_SIZE) 263 count = PAGEVEC_SIZE; 264 pv.nr = find_get_pages_contig(mapping, first, count, pv.pages); 265 ASSERTCMP(pv.nr, ==, count); 266 267 for (loop = 0; loop < count; loop++) { 268 struct page *page = pv.pages[loop]; 269 ClearPageUptodate(page); 270 SetPageError(page); 271 end_page_writeback(page); 272 if (page->index >= first) 273 first = page->index + 1; 274 lock_page(page); 275 generic_error_remove_page(mapping, page); 276 unlock_page(page); 277 } 278 279 __pagevec_release(&pv); 280 } while (first <= last); 281 282 _leave(""); 283 } 284 285 /* 286 * Redirty all the pages in a given range. 287 */ 288 static void afs_redirty_pages(struct writeback_control *wbc, 289 struct address_space *mapping, 290 pgoff_t first, pgoff_t last) 291 { 292 struct afs_vnode *vnode = AFS_FS_I(mapping->host); 293 struct pagevec pv; 294 unsigned count, loop; 295 296 _enter("{%llx:%llu},%lx-%lx", 297 vnode->fid.vid, vnode->fid.vnode, first, last); 298 299 pagevec_init(&pv); 300 301 do { 302 _debug("redirty %lx-%lx", first, last); 303 304 count = last - first + 1; 305 if (count > PAGEVEC_SIZE) 306 count = PAGEVEC_SIZE; 307 pv.nr = find_get_pages_contig(mapping, first, count, pv.pages); 308 ASSERTCMP(pv.nr, ==, count); 309 310 for (loop = 0; loop < count; loop++) { 311 struct page *page = pv.pages[loop]; 312 313 redirty_page_for_writepage(wbc, page); 314 end_page_writeback(page); 315 if (page->index >= first) 316 first = page->index + 1; 317 } 318 319 __pagevec_release(&pv); 320 } while (first <= last); 321 322 _leave(""); 323 } 324 325 /* 326 * completion of write to server 327 */ 328 static void afs_pages_written_back(struct afs_vnode *vnode, pgoff_t start, pgoff_t last) 329 { 330 struct address_space *mapping = vnode->vfs_inode.i_mapping; 331 struct page *page; 332 333 XA_STATE(xas, &mapping->i_pages, start); 334 335 _enter("{%llx:%llu},{%lx-%lx}", 336 vnode->fid.vid, vnode->fid.vnode, start, last); 337 338 rcu_read_lock(); 339 340 xas_for_each(&xas, page, last) { 341 ASSERT(PageWriteback(page)); 342 343 detach_page_private(page); 344 trace_afs_page_dirty(vnode, tracepoint_string("clear"), page); 345 page_endio(page, true, 0); 346 } 347 348 rcu_read_unlock(); 349 350 afs_prune_wb_keys(vnode); 351 _leave(""); 352 } 353 354 /* 355 * Find a key to use for the writeback. We cached the keys used to author the 356 * writes on the vnode. *_wbk will contain the last writeback key used or NULL 357 * and we need to start from there if it's set. 358 */ 359 static int afs_get_writeback_key(struct afs_vnode *vnode, 360 struct afs_wb_key **_wbk) 361 { 362 struct afs_wb_key *wbk = NULL; 363 struct list_head *p; 364 int ret = -ENOKEY, ret2; 365 366 spin_lock(&vnode->wb_lock); 367 if (*_wbk) 368 p = (*_wbk)->vnode_link.next; 369 else 370 p = vnode->wb_keys.next; 371 372 while (p != &vnode->wb_keys) { 373 wbk = list_entry(p, struct afs_wb_key, vnode_link); 374 _debug("wbk %u", key_serial(wbk->key)); 375 ret2 = key_validate(wbk->key); 376 if (ret2 == 0) { 377 refcount_inc(&wbk->usage); 378 _debug("USE WB KEY %u", key_serial(wbk->key)); 379 break; 380 } 381 382 wbk = NULL; 383 if (ret == -ENOKEY) 384 ret = ret2; 385 p = p->next; 386 } 387 388 spin_unlock(&vnode->wb_lock); 389 if (*_wbk) 390 afs_put_wb_key(*_wbk); 391 *_wbk = wbk; 392 return 0; 393 } 394 395 static void afs_store_data_success(struct afs_operation *op) 396 { 397 struct afs_vnode *vnode = op->file[0].vnode; 398 399 op->ctime = op->file[0].scb.status.mtime_client; 400 afs_vnode_commit_status(op, &op->file[0]); 401 if (op->error == 0) { 402 if (!op->store.laundering) 403 afs_pages_written_back(vnode, op->store.first, op->store.last); 404 afs_stat_v(vnode, n_stores); 405 atomic_long_add(op->store.size, &afs_v2net(vnode)->n_store_bytes); 406 } 407 } 408 409 static const struct afs_operation_ops afs_store_data_operation = { 410 .issue_afs_rpc = afs_fs_store_data, 411 .issue_yfs_rpc = yfs_fs_store_data, 412 .success = afs_store_data_success, 413 }; 414 415 /* 416 * write to a file 417 */ 418 static int afs_store_data(struct afs_vnode *vnode, struct iov_iter *iter, 419 loff_t pos, pgoff_t first, pgoff_t last, 420 bool laundering) 421 { 422 struct afs_operation *op; 423 struct afs_wb_key *wbk = NULL; 424 loff_t size = iov_iter_count(iter), i_size; 425 int ret = -ENOKEY; 426 427 _enter("%s{%llx:%llu.%u},%llx,%llx", 428 vnode->volume->name, 429 vnode->fid.vid, 430 vnode->fid.vnode, 431 vnode->fid.unique, 432 size, pos); 433 434 ret = afs_get_writeback_key(vnode, &wbk); 435 if (ret) { 436 _leave(" = %d [no keys]", ret); 437 return ret; 438 } 439 440 op = afs_alloc_operation(wbk->key, vnode->volume); 441 if (IS_ERR(op)) { 442 afs_put_wb_key(wbk); 443 return -ENOMEM; 444 } 445 446 i_size = i_size_read(&vnode->vfs_inode); 447 448 afs_op_set_vnode(op, 0, vnode); 449 op->file[0].dv_delta = 1; 450 op->store.write_iter = iter; 451 op->store.pos = pos; 452 op->store.first = first; 453 op->store.last = last; 454 op->store.size = size; 455 op->store.i_size = max(pos + size, i_size); 456 op->store.laundering = laundering; 457 op->mtime = vnode->vfs_inode.i_mtime; 458 op->flags |= AFS_OPERATION_UNINTR; 459 op->ops = &afs_store_data_operation; 460 461 try_next_key: 462 afs_begin_vnode_operation(op); 463 afs_wait_for_operation(op); 464 465 switch (op->error) { 466 case -EACCES: 467 case -EPERM: 468 case -ENOKEY: 469 case -EKEYEXPIRED: 470 case -EKEYREJECTED: 471 case -EKEYREVOKED: 472 _debug("next"); 473 474 ret = afs_get_writeback_key(vnode, &wbk); 475 if (ret == 0) { 476 key_put(op->key); 477 op->key = key_get(wbk->key); 478 goto try_next_key; 479 } 480 break; 481 } 482 483 afs_put_wb_key(wbk); 484 _leave(" = %d", op->error); 485 return afs_put_operation(op); 486 } 487 488 /* 489 * Synchronously write back the locked page and any subsequent non-locked dirty 490 * pages. 491 */ 492 static int afs_write_back_from_locked_page(struct address_space *mapping, 493 struct writeback_control *wbc, 494 struct page *primary_page, 495 pgoff_t final_page) 496 { 497 struct afs_vnode *vnode = AFS_FS_I(mapping->host); 498 struct iov_iter iter; 499 struct page *pages[8], *page; 500 unsigned long count, priv; 501 unsigned n, offset, to, f, t; 502 pgoff_t start, first, last; 503 loff_t i_size, pos, end; 504 int loop, ret; 505 506 _enter(",%lx", primary_page->index); 507 508 count = 1; 509 if (test_set_page_writeback(primary_page)) 510 BUG(); 511 512 /* Find all consecutive lockable dirty pages that have contiguous 513 * written regions, stopping when we find a page that is not 514 * immediately lockable, is not dirty or is missing, or we reach the 515 * end of the range. 516 */ 517 start = primary_page->index; 518 priv = page_private(primary_page); 519 offset = afs_page_dirty_from(primary_page, priv); 520 to = afs_page_dirty_to(primary_page, priv); 521 trace_afs_page_dirty(vnode, tracepoint_string("store"), primary_page); 522 523 WARN_ON(offset == to); 524 if (offset == to) 525 trace_afs_page_dirty(vnode, tracepoint_string("WARN"), primary_page); 526 527 if (start >= final_page || 528 (to < PAGE_SIZE && !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags))) 529 goto no_more; 530 531 start++; 532 do { 533 _debug("more %lx [%lx]", start, count); 534 n = final_page - start + 1; 535 if (n > ARRAY_SIZE(pages)) 536 n = ARRAY_SIZE(pages); 537 n = find_get_pages_contig(mapping, start, ARRAY_SIZE(pages), pages); 538 _debug("fgpc %u", n); 539 if (n == 0) 540 goto no_more; 541 if (pages[0]->index != start) { 542 do { 543 put_page(pages[--n]); 544 } while (n > 0); 545 goto no_more; 546 } 547 548 for (loop = 0; loop < n; loop++) { 549 page = pages[loop]; 550 if (to != PAGE_SIZE && 551 !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags)) 552 break; 553 if (page->index > final_page) 554 break; 555 if (!trylock_page(page)) 556 break; 557 if (!PageDirty(page) || PageWriteback(page)) { 558 unlock_page(page); 559 break; 560 } 561 562 priv = page_private(page); 563 f = afs_page_dirty_from(page, priv); 564 t = afs_page_dirty_to(page, priv); 565 if (f != 0 && 566 !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags)) { 567 unlock_page(page); 568 break; 569 } 570 to = t; 571 572 trace_afs_page_dirty(vnode, tracepoint_string("store+"), page); 573 574 if (!clear_page_dirty_for_io(page)) 575 BUG(); 576 if (test_set_page_writeback(page)) 577 BUG(); 578 unlock_page(page); 579 put_page(page); 580 } 581 count += loop; 582 if (loop < n) { 583 for (; loop < n; loop++) 584 put_page(pages[loop]); 585 goto no_more; 586 } 587 588 start += loop; 589 } while (start <= final_page && count < 65536); 590 591 no_more: 592 /* We now have a contiguous set of dirty pages, each with writeback 593 * set; the first page is still locked at this point, but all the rest 594 * have been unlocked. 595 */ 596 unlock_page(primary_page); 597 598 first = primary_page->index; 599 last = first + count - 1; 600 _debug("write back %lx[%u..] to %lx[..%u]", first, offset, last, to); 601 602 pos = first; 603 pos <<= PAGE_SHIFT; 604 pos += offset; 605 end = last; 606 end <<= PAGE_SHIFT; 607 end += to; 608 609 /* Trim the actual write down to the EOF */ 610 i_size = i_size_read(&vnode->vfs_inode); 611 if (end > i_size) 612 end = i_size; 613 614 if (pos < i_size) { 615 iov_iter_xarray(&iter, WRITE, &mapping->i_pages, pos, end - pos); 616 ret = afs_store_data(vnode, &iter, pos, first, last, false); 617 } else { 618 /* The dirty region was entirely beyond the EOF. */ 619 ret = 0; 620 } 621 622 switch (ret) { 623 case 0: 624 ret = count; 625 break; 626 627 default: 628 pr_notice("kAFS: Unexpected error from FS.StoreData %d\n", ret); 629 fallthrough; 630 case -EACCES: 631 case -EPERM: 632 case -ENOKEY: 633 case -EKEYEXPIRED: 634 case -EKEYREJECTED: 635 case -EKEYREVOKED: 636 afs_redirty_pages(wbc, mapping, first, last); 637 mapping_set_error(mapping, ret); 638 break; 639 640 case -EDQUOT: 641 case -ENOSPC: 642 afs_redirty_pages(wbc, mapping, first, last); 643 mapping_set_error(mapping, -ENOSPC); 644 break; 645 646 case -EROFS: 647 case -EIO: 648 case -EREMOTEIO: 649 case -EFBIG: 650 case -ENOENT: 651 case -ENOMEDIUM: 652 case -ENXIO: 653 trace_afs_file_error(vnode, ret, afs_file_error_writeback_fail); 654 afs_kill_pages(mapping, first, last); 655 mapping_set_error(mapping, ret); 656 break; 657 } 658 659 _leave(" = %d", ret); 660 return ret; 661 } 662 663 /* 664 * write a page back to the server 665 * - the caller locked the page for us 666 */ 667 int afs_writepage(struct page *page, struct writeback_control *wbc) 668 { 669 int ret; 670 671 _enter("{%lx},", page->index); 672 673 ret = afs_write_back_from_locked_page(page->mapping, wbc, page, 674 wbc->range_end >> PAGE_SHIFT); 675 if (ret < 0) { 676 _leave(" = %d", ret); 677 return 0; 678 } 679 680 wbc->nr_to_write -= ret; 681 682 _leave(" = 0"); 683 return 0; 684 } 685 686 /* 687 * write a region of pages back to the server 688 */ 689 static int afs_writepages_region(struct address_space *mapping, 690 struct writeback_control *wbc, 691 pgoff_t index, pgoff_t end, pgoff_t *_next) 692 { 693 struct page *page; 694 int ret, n; 695 696 _enter(",,%lx,%lx,", index, end); 697 698 do { 699 n = find_get_pages_range_tag(mapping, &index, end, 700 PAGECACHE_TAG_DIRTY, 1, &page); 701 if (!n) 702 break; 703 704 _debug("wback %lx", page->index); 705 706 /* 707 * at this point we hold neither the i_pages lock nor the 708 * page lock: the page may be truncated or invalidated 709 * (changing page->mapping to NULL), or even swizzled 710 * back from swapper_space to tmpfs file mapping 711 */ 712 ret = lock_page_killable(page); 713 if (ret < 0) { 714 put_page(page); 715 _leave(" = %d", ret); 716 return ret; 717 } 718 719 if (page->mapping != mapping || !PageDirty(page)) { 720 unlock_page(page); 721 put_page(page); 722 continue; 723 } 724 725 if (PageWriteback(page)) { 726 unlock_page(page); 727 if (wbc->sync_mode != WB_SYNC_NONE) 728 wait_on_page_writeback(page); 729 put_page(page); 730 continue; 731 } 732 733 if (!clear_page_dirty_for_io(page)) 734 BUG(); 735 ret = afs_write_back_from_locked_page(mapping, wbc, page, end); 736 put_page(page); 737 if (ret < 0) { 738 _leave(" = %d", ret); 739 return ret; 740 } 741 742 wbc->nr_to_write -= ret; 743 744 cond_resched(); 745 } while (index < end && wbc->nr_to_write > 0); 746 747 *_next = index; 748 _leave(" = 0 [%lx]", *_next); 749 return 0; 750 } 751 752 /* 753 * write some of the pending data back to the server 754 */ 755 int afs_writepages(struct address_space *mapping, 756 struct writeback_control *wbc) 757 { 758 struct afs_vnode *vnode = AFS_FS_I(mapping->host); 759 pgoff_t start, end, next; 760 int ret; 761 762 _enter(""); 763 764 /* We have to be careful as we can end up racing with setattr() 765 * truncating the pagecache since the caller doesn't take a lock here 766 * to prevent it. 767 */ 768 if (wbc->sync_mode == WB_SYNC_ALL) 769 down_read(&vnode->validate_lock); 770 else if (!down_read_trylock(&vnode->validate_lock)) 771 return 0; 772 773 if (wbc->range_cyclic) { 774 start = mapping->writeback_index; 775 end = -1; 776 ret = afs_writepages_region(mapping, wbc, start, end, &next); 777 if (start > 0 && wbc->nr_to_write > 0 && ret == 0) 778 ret = afs_writepages_region(mapping, wbc, 0, start, 779 &next); 780 mapping->writeback_index = next; 781 } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) { 782 end = (pgoff_t)(LLONG_MAX >> PAGE_SHIFT); 783 ret = afs_writepages_region(mapping, wbc, 0, end, &next); 784 if (wbc->nr_to_write > 0) 785 mapping->writeback_index = next; 786 } else { 787 start = wbc->range_start >> PAGE_SHIFT; 788 end = wbc->range_end >> PAGE_SHIFT; 789 ret = afs_writepages_region(mapping, wbc, start, end, &next); 790 } 791 792 up_read(&vnode->validate_lock); 793 _leave(" = %d", ret); 794 return ret; 795 } 796 797 /* 798 * write to an AFS file 799 */ 800 ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from) 801 { 802 struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp)); 803 ssize_t result; 804 size_t count = iov_iter_count(from); 805 806 _enter("{%llx:%llu},{%zu},", 807 vnode->fid.vid, vnode->fid.vnode, count); 808 809 if (IS_SWAPFILE(&vnode->vfs_inode)) { 810 printk(KERN_INFO 811 "AFS: Attempt to write to active swap file!\n"); 812 return -EBUSY; 813 } 814 815 if (!count) 816 return 0; 817 818 result = generic_file_write_iter(iocb, from); 819 820 _leave(" = %zd", result); 821 return result; 822 } 823 824 /* 825 * flush any dirty pages for this process, and check for write errors. 826 * - the return status from this call provides a reliable indication of 827 * whether any write errors occurred for this process. 828 */ 829 int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync) 830 { 831 struct inode *inode = file_inode(file); 832 struct afs_vnode *vnode = AFS_FS_I(inode); 833 834 _enter("{%llx:%llu},{n=%pD},%d", 835 vnode->fid.vid, vnode->fid.vnode, file, 836 datasync); 837 838 return file_write_and_wait_range(file, start, end); 839 } 840 841 /* 842 * notification that a previously read-only page is about to become writable 843 * - if it returns an error, the caller will deliver a bus error signal 844 */ 845 vm_fault_t afs_page_mkwrite(struct vm_fault *vmf) 846 { 847 struct file *file = vmf->vma->vm_file; 848 struct inode *inode = file_inode(file); 849 struct afs_vnode *vnode = AFS_FS_I(inode); 850 unsigned long priv; 851 852 _enter("{{%llx:%llu}},{%lx}", 853 vnode->fid.vid, vnode->fid.vnode, vmf->page->index); 854 855 sb_start_pagefault(inode->i_sb); 856 857 /* Wait for the page to be written to the cache before we allow it to 858 * be modified. We then assume the entire page will need writing back. 859 */ 860 861 if (wait_on_page_writeback_killable(vmf->page)) 862 return VM_FAULT_RETRY; 863 864 if (lock_page_killable(vmf->page) < 0) 865 return VM_FAULT_RETRY; 866 867 /* We mustn't change page->private until writeback is complete as that 868 * details the portion of the page we need to write back and we might 869 * need to redirty the page if there's a problem. 870 */ 871 wait_on_page_writeback(vmf->page); 872 873 priv = afs_page_dirty(vmf->page, 0, PAGE_SIZE); 874 priv = afs_page_dirty_mmapped(priv); 875 if (PagePrivate(vmf->page)) 876 set_page_private(vmf->page, priv); 877 else 878 attach_page_private(vmf->page, (void *)priv); 879 trace_afs_page_dirty(vnode, tracepoint_string("mkwrite"), vmf->page); 880 file_update_time(file); 881 882 sb_end_pagefault(inode->i_sb); 883 return VM_FAULT_LOCKED; 884 } 885 886 /* 887 * Prune the keys cached for writeback. The caller must hold vnode->wb_lock. 888 */ 889 void afs_prune_wb_keys(struct afs_vnode *vnode) 890 { 891 LIST_HEAD(graveyard); 892 struct afs_wb_key *wbk, *tmp; 893 894 /* Discard unused keys */ 895 spin_lock(&vnode->wb_lock); 896 897 if (!mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_WRITEBACK) && 898 !mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_DIRTY)) { 899 list_for_each_entry_safe(wbk, tmp, &vnode->wb_keys, vnode_link) { 900 if (refcount_read(&wbk->usage) == 1) 901 list_move(&wbk->vnode_link, &graveyard); 902 } 903 } 904 905 spin_unlock(&vnode->wb_lock); 906 907 while (!list_empty(&graveyard)) { 908 wbk = list_entry(graveyard.next, struct afs_wb_key, vnode_link); 909 list_del(&wbk->vnode_link); 910 afs_put_wb_key(wbk); 911 } 912 } 913 914 /* 915 * Clean up a page during invalidation. 916 */ 917 int afs_launder_page(struct page *page) 918 { 919 struct address_space *mapping = page->mapping; 920 struct afs_vnode *vnode = AFS_FS_I(mapping->host); 921 struct iov_iter iter; 922 struct bio_vec bv[1]; 923 unsigned long priv; 924 unsigned int f, t; 925 int ret = 0; 926 927 _enter("{%lx}", page->index); 928 929 priv = page_private(page); 930 if (clear_page_dirty_for_io(page)) { 931 f = 0; 932 t = PAGE_SIZE; 933 if (PagePrivate(page)) { 934 f = afs_page_dirty_from(page, priv); 935 t = afs_page_dirty_to(page, priv); 936 } 937 938 bv[0].bv_page = page; 939 bv[0].bv_offset = f; 940 bv[0].bv_len = t - f; 941 iov_iter_bvec(&iter, WRITE, bv, 1, bv[0].bv_len); 942 943 trace_afs_page_dirty(vnode, tracepoint_string("launder"), page); 944 ret = afs_store_data(vnode, &iter, (loff_t)page->index << PAGE_SHIFT, 945 page->index, page->index, true); 946 } 947 948 detach_page_private(page); 949 trace_afs_page_dirty(vnode, tracepoint_string("laundered"), page); 950 return ret; 951 } 952