1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* handling of writes to regular files and writing back to the server 3 * 4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 5 * Written by David Howells (dhowells@redhat.com) 6 */ 7 8 #include <linux/backing-dev.h> 9 #include <linux/slab.h> 10 #include <linux/fs.h> 11 #include <linux/pagemap.h> 12 #include <linux/writeback.h> 13 #include <linux/pagevec.h> 14 #include <linux/netfs.h> 15 #include "internal.h" 16 17 /* 18 * mark a page as having been made dirty and thus needing writeback 19 */ 20 int afs_set_page_dirty(struct page *page) 21 { 22 _enter(""); 23 return __set_page_dirty_nobuffers(page); 24 } 25 26 /* 27 * prepare to perform part of a write to a page 28 */ 29 int afs_write_begin(struct file *file, struct address_space *mapping, 30 loff_t pos, unsigned len, unsigned flags, 31 struct page **_page, void **fsdata) 32 { 33 struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); 34 struct folio *folio; 35 unsigned long priv; 36 unsigned f, from; 37 unsigned t, to; 38 pgoff_t index; 39 int ret; 40 41 _enter("{%llx:%llu},%llx,%x", 42 vnode->fid.vid, vnode->fid.vnode, pos, len); 43 44 /* Prefetch area to be written into the cache if we're caching this 45 * file. We need to do this before we get a lock on the page in case 46 * there's more than one writer competing for the same cache block. 47 */ 48 ret = netfs_write_begin(file, mapping, pos, len, flags, &folio, fsdata, 49 &afs_req_ops, NULL); 50 if (ret < 0) 51 return ret; 52 53 index = folio_index(folio); 54 from = pos - index * PAGE_SIZE; 55 to = from + len; 56 57 try_again: 58 /* See if this page is already partially written in a way that we can 59 * merge the new write with. 60 */ 61 if (folio_test_private(folio)) { 62 priv = (unsigned long)folio_get_private(folio); 63 f = afs_folio_dirty_from(folio, priv); 64 t = afs_folio_dirty_to(folio, priv); 65 ASSERTCMP(f, <=, t); 66 67 if (folio_test_writeback(folio)) { 68 trace_afs_folio_dirty(vnode, tracepoint_string("alrdy"), folio); 69 goto flush_conflicting_write; 70 } 71 /* If the file is being filled locally, allow inter-write 72 * spaces to be merged into writes. If it's not, only write 73 * back what the user gives us. 74 */ 75 if (!test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags) && 76 (to < f || from > t)) 77 goto flush_conflicting_write; 78 } 79 80 *_page = &folio->page; 81 _leave(" = 0"); 82 return 0; 83 84 /* The previous write and this write aren't adjacent or overlapping, so 85 * flush the page out. 86 */ 87 flush_conflicting_write: 88 _debug("flush conflict"); 89 ret = folio_write_one(folio); 90 if (ret < 0) 91 goto error; 92 93 ret = folio_lock_killable(folio); 94 if (ret < 0) 95 goto error; 96 goto try_again; 97 98 error: 99 folio_put(folio); 100 _leave(" = %d", ret); 101 return ret; 102 } 103 104 /* 105 * finalise part of a write to a page 106 */ 107 int afs_write_end(struct file *file, struct address_space *mapping, 108 loff_t pos, unsigned len, unsigned copied, 109 struct page *subpage, void *fsdata) 110 { 111 struct folio *folio = page_folio(subpage); 112 struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); 113 unsigned long priv; 114 unsigned int f, from = offset_in_folio(folio, pos); 115 unsigned int t, to = from + copied; 116 loff_t i_size, maybe_i_size; 117 118 _enter("{%llx:%llu},{%lx}", 119 vnode->fid.vid, vnode->fid.vnode, folio_index(folio)); 120 121 if (!folio_test_uptodate(folio)) { 122 if (copied < len) { 123 copied = 0; 124 goto out; 125 } 126 127 folio_mark_uptodate(folio); 128 } 129 130 if (copied == 0) 131 goto out; 132 133 maybe_i_size = pos + copied; 134 135 i_size = i_size_read(&vnode->vfs_inode); 136 if (maybe_i_size > i_size) { 137 write_seqlock(&vnode->cb_lock); 138 i_size = i_size_read(&vnode->vfs_inode); 139 if (maybe_i_size > i_size) 140 afs_set_i_size(vnode, maybe_i_size); 141 write_sequnlock(&vnode->cb_lock); 142 } 143 144 if (folio_test_private(folio)) { 145 priv = (unsigned long)folio_get_private(folio); 146 f = afs_folio_dirty_from(folio, priv); 147 t = afs_folio_dirty_to(folio, priv); 148 if (from < f) 149 f = from; 150 if (to > t) 151 t = to; 152 priv = afs_folio_dirty(folio, f, t); 153 folio_change_private(folio, (void *)priv); 154 trace_afs_folio_dirty(vnode, tracepoint_string("dirty+"), folio); 155 } else { 156 priv = afs_folio_dirty(folio, from, to); 157 folio_attach_private(folio, (void *)priv); 158 trace_afs_folio_dirty(vnode, tracepoint_string("dirty"), folio); 159 } 160 161 if (folio_mark_dirty(folio)) 162 _debug("dirtied %lx", folio_index(folio)); 163 164 out: 165 folio_unlock(folio); 166 folio_put(folio); 167 return copied; 168 } 169 170 /* 171 * kill all the pages in the given range 172 */ 173 static void afs_kill_pages(struct address_space *mapping, 174 loff_t start, loff_t len) 175 { 176 struct afs_vnode *vnode = AFS_FS_I(mapping->host); 177 struct folio *folio; 178 pgoff_t index = start / PAGE_SIZE; 179 pgoff_t last = (start + len - 1) / PAGE_SIZE, next; 180 181 _enter("{%llx:%llu},%llx @%llx", 182 vnode->fid.vid, vnode->fid.vnode, len, start); 183 184 do { 185 _debug("kill %lx (to %lx)", index, last); 186 187 folio = filemap_get_folio(mapping, index); 188 if (!folio) { 189 next = index + 1; 190 continue; 191 } 192 193 next = folio_next_index(folio); 194 195 folio_clear_uptodate(folio); 196 folio_end_writeback(folio); 197 folio_lock(folio); 198 generic_error_remove_page(mapping, &folio->page); 199 folio_unlock(folio); 200 folio_put(folio); 201 202 } while (index = next, index <= last); 203 204 _leave(""); 205 } 206 207 /* 208 * Redirty all the pages in a given range. 209 */ 210 static void afs_redirty_pages(struct writeback_control *wbc, 211 struct address_space *mapping, 212 loff_t start, loff_t len) 213 { 214 struct afs_vnode *vnode = AFS_FS_I(mapping->host); 215 struct folio *folio; 216 pgoff_t index = start / PAGE_SIZE; 217 pgoff_t last = (start + len - 1) / PAGE_SIZE, next; 218 219 _enter("{%llx:%llu},%llx @%llx", 220 vnode->fid.vid, vnode->fid.vnode, len, start); 221 222 do { 223 _debug("redirty %llx @%llx", len, start); 224 225 folio = filemap_get_folio(mapping, index); 226 if (!folio) { 227 next = index + 1; 228 continue; 229 } 230 231 next = index + folio_nr_pages(folio); 232 folio_redirty_for_writepage(wbc, folio); 233 folio_end_writeback(folio); 234 folio_put(folio); 235 } while (index = next, index <= last); 236 237 _leave(""); 238 } 239 240 /* 241 * completion of write to server 242 */ 243 static void afs_pages_written_back(struct afs_vnode *vnode, loff_t start, unsigned int len) 244 { 245 struct address_space *mapping = vnode->vfs_inode.i_mapping; 246 struct folio *folio; 247 pgoff_t end; 248 249 XA_STATE(xas, &mapping->i_pages, start / PAGE_SIZE); 250 251 _enter("{%llx:%llu},{%x @%llx}", 252 vnode->fid.vid, vnode->fid.vnode, len, start); 253 254 rcu_read_lock(); 255 256 end = (start + len - 1) / PAGE_SIZE; 257 xas_for_each(&xas, folio, end) { 258 if (!folio_test_writeback(folio)) { 259 kdebug("bad %x @%llx page %lx %lx", 260 len, start, folio_index(folio), end); 261 ASSERT(folio_test_writeback(folio)); 262 } 263 264 trace_afs_folio_dirty(vnode, tracepoint_string("clear"), folio); 265 folio_detach_private(folio); 266 folio_end_writeback(folio); 267 } 268 269 rcu_read_unlock(); 270 271 afs_prune_wb_keys(vnode); 272 _leave(""); 273 } 274 275 /* 276 * Find a key to use for the writeback. We cached the keys used to author the 277 * writes on the vnode. *_wbk will contain the last writeback key used or NULL 278 * and we need to start from there if it's set. 279 */ 280 static int afs_get_writeback_key(struct afs_vnode *vnode, 281 struct afs_wb_key **_wbk) 282 { 283 struct afs_wb_key *wbk = NULL; 284 struct list_head *p; 285 int ret = -ENOKEY, ret2; 286 287 spin_lock(&vnode->wb_lock); 288 if (*_wbk) 289 p = (*_wbk)->vnode_link.next; 290 else 291 p = vnode->wb_keys.next; 292 293 while (p != &vnode->wb_keys) { 294 wbk = list_entry(p, struct afs_wb_key, vnode_link); 295 _debug("wbk %u", key_serial(wbk->key)); 296 ret2 = key_validate(wbk->key); 297 if (ret2 == 0) { 298 refcount_inc(&wbk->usage); 299 _debug("USE WB KEY %u", key_serial(wbk->key)); 300 break; 301 } 302 303 wbk = NULL; 304 if (ret == -ENOKEY) 305 ret = ret2; 306 p = p->next; 307 } 308 309 spin_unlock(&vnode->wb_lock); 310 if (*_wbk) 311 afs_put_wb_key(*_wbk); 312 *_wbk = wbk; 313 return 0; 314 } 315 316 static void afs_store_data_success(struct afs_operation *op) 317 { 318 struct afs_vnode *vnode = op->file[0].vnode; 319 320 op->ctime = op->file[0].scb.status.mtime_client; 321 afs_vnode_commit_status(op, &op->file[0]); 322 if (op->error == 0) { 323 if (!op->store.laundering) 324 afs_pages_written_back(vnode, op->store.pos, op->store.size); 325 afs_stat_v(vnode, n_stores); 326 atomic_long_add(op->store.size, &afs_v2net(vnode)->n_store_bytes); 327 } 328 } 329 330 static const struct afs_operation_ops afs_store_data_operation = { 331 .issue_afs_rpc = afs_fs_store_data, 332 .issue_yfs_rpc = yfs_fs_store_data, 333 .success = afs_store_data_success, 334 }; 335 336 /* 337 * write to a file 338 */ 339 static int afs_store_data(struct afs_vnode *vnode, struct iov_iter *iter, loff_t pos, 340 bool laundering) 341 { 342 struct afs_operation *op; 343 struct afs_wb_key *wbk = NULL; 344 loff_t size = iov_iter_count(iter), i_size; 345 int ret = -ENOKEY; 346 347 _enter("%s{%llx:%llu.%u},%llx,%llx", 348 vnode->volume->name, 349 vnode->fid.vid, 350 vnode->fid.vnode, 351 vnode->fid.unique, 352 size, pos); 353 354 ret = afs_get_writeback_key(vnode, &wbk); 355 if (ret) { 356 _leave(" = %d [no keys]", ret); 357 return ret; 358 } 359 360 op = afs_alloc_operation(wbk->key, vnode->volume); 361 if (IS_ERR(op)) { 362 afs_put_wb_key(wbk); 363 return -ENOMEM; 364 } 365 366 i_size = i_size_read(&vnode->vfs_inode); 367 368 afs_op_set_vnode(op, 0, vnode); 369 op->file[0].dv_delta = 1; 370 op->file[0].modification = true; 371 op->store.write_iter = iter; 372 op->store.pos = pos; 373 op->store.size = size; 374 op->store.i_size = max(pos + size, i_size); 375 op->store.laundering = laundering; 376 op->mtime = vnode->vfs_inode.i_mtime; 377 op->flags |= AFS_OPERATION_UNINTR; 378 op->ops = &afs_store_data_operation; 379 380 try_next_key: 381 afs_begin_vnode_operation(op); 382 afs_wait_for_operation(op); 383 384 switch (op->error) { 385 case -EACCES: 386 case -EPERM: 387 case -ENOKEY: 388 case -EKEYEXPIRED: 389 case -EKEYREJECTED: 390 case -EKEYREVOKED: 391 _debug("next"); 392 393 ret = afs_get_writeback_key(vnode, &wbk); 394 if (ret == 0) { 395 key_put(op->key); 396 op->key = key_get(wbk->key); 397 goto try_next_key; 398 } 399 break; 400 } 401 402 afs_put_wb_key(wbk); 403 _leave(" = %d", op->error); 404 return afs_put_operation(op); 405 } 406 407 /* 408 * Extend the region to be written back to include subsequent contiguously 409 * dirty pages if possible, but don't sleep while doing so. 410 * 411 * If this page holds new content, then we can include filler zeros in the 412 * writeback. 413 */ 414 static void afs_extend_writeback(struct address_space *mapping, 415 struct afs_vnode *vnode, 416 long *_count, 417 loff_t start, 418 loff_t max_len, 419 bool new_content, 420 unsigned int *_len) 421 { 422 struct pagevec pvec; 423 struct folio *folio; 424 unsigned long priv; 425 unsigned int psize, filler = 0; 426 unsigned int f, t; 427 loff_t len = *_len; 428 pgoff_t index = (start + len) / PAGE_SIZE; 429 bool stop = true; 430 unsigned int i; 431 432 XA_STATE(xas, &mapping->i_pages, index); 433 pagevec_init(&pvec); 434 435 do { 436 /* Firstly, we gather up a batch of contiguous dirty pages 437 * under the RCU read lock - but we can't clear the dirty flags 438 * there if any of those pages are mapped. 439 */ 440 rcu_read_lock(); 441 442 xas_for_each(&xas, folio, ULONG_MAX) { 443 stop = true; 444 if (xas_retry(&xas, folio)) 445 continue; 446 if (xa_is_value(folio)) 447 break; 448 if (folio_index(folio) != index) 449 break; 450 451 if (!folio_try_get_rcu(folio)) { 452 xas_reset(&xas); 453 continue; 454 } 455 456 /* Has the page moved or been split? */ 457 if (unlikely(folio != xas_reload(&xas))) { 458 folio_put(folio); 459 break; 460 } 461 462 if (!folio_trylock(folio)) { 463 folio_put(folio); 464 break; 465 } 466 if (!folio_test_dirty(folio) || folio_test_writeback(folio)) { 467 folio_unlock(folio); 468 folio_put(folio); 469 break; 470 } 471 472 psize = folio_size(folio); 473 priv = (unsigned long)folio_get_private(folio); 474 f = afs_folio_dirty_from(folio, priv); 475 t = afs_folio_dirty_to(folio, priv); 476 if (f != 0 && !new_content) { 477 folio_unlock(folio); 478 folio_put(folio); 479 break; 480 } 481 482 len += filler + t; 483 filler = psize - t; 484 if (len >= max_len || *_count <= 0) 485 stop = true; 486 else if (t == psize || new_content) 487 stop = false; 488 489 index += folio_nr_pages(folio); 490 if (!pagevec_add(&pvec, &folio->page)) 491 break; 492 if (stop) 493 break; 494 } 495 496 if (!stop) 497 xas_pause(&xas); 498 rcu_read_unlock(); 499 500 /* Now, if we obtained any pages, we can shift them to being 501 * writable and mark them for caching. 502 */ 503 if (!pagevec_count(&pvec)) 504 break; 505 506 for (i = 0; i < pagevec_count(&pvec); i++) { 507 folio = page_folio(pvec.pages[i]); 508 trace_afs_folio_dirty(vnode, tracepoint_string("store+"), folio); 509 510 if (!folio_clear_dirty_for_io(folio)) 511 BUG(); 512 if (folio_start_writeback(folio)) 513 BUG(); 514 515 *_count -= folio_nr_pages(folio); 516 folio_unlock(folio); 517 } 518 519 pagevec_release(&pvec); 520 cond_resched(); 521 } while (!stop); 522 523 *_len = len; 524 } 525 526 /* 527 * Synchronously write back the locked page and any subsequent non-locked dirty 528 * pages. 529 */ 530 static ssize_t afs_write_back_from_locked_folio(struct address_space *mapping, 531 struct writeback_control *wbc, 532 struct folio *folio, 533 loff_t start, loff_t end) 534 { 535 struct afs_vnode *vnode = AFS_FS_I(mapping->host); 536 struct iov_iter iter; 537 unsigned long priv; 538 unsigned int offset, to, len, max_len; 539 loff_t i_size = i_size_read(&vnode->vfs_inode); 540 bool new_content = test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags); 541 long count = wbc->nr_to_write; 542 int ret; 543 544 _enter(",%lx,%llx-%llx", folio_index(folio), start, end); 545 546 if (folio_start_writeback(folio)) 547 BUG(); 548 549 count -= folio_nr_pages(folio); 550 551 /* Find all consecutive lockable dirty pages that have contiguous 552 * written regions, stopping when we find a page that is not 553 * immediately lockable, is not dirty or is missing, or we reach the 554 * end of the range. 555 */ 556 priv = (unsigned long)folio_get_private(folio); 557 offset = afs_folio_dirty_from(folio, priv); 558 to = afs_folio_dirty_to(folio, priv); 559 trace_afs_folio_dirty(vnode, tracepoint_string("store"), folio); 560 561 len = to - offset; 562 start += offset; 563 if (start < i_size) { 564 /* Trim the write to the EOF; the extra data is ignored. Also 565 * put an upper limit on the size of a single storedata op. 566 */ 567 max_len = 65536 * 4096; 568 max_len = min_t(unsigned long long, max_len, end - start + 1); 569 max_len = min_t(unsigned long long, max_len, i_size - start); 570 571 if (len < max_len && 572 (to == folio_size(folio) || new_content)) 573 afs_extend_writeback(mapping, vnode, &count, 574 start, max_len, new_content, &len); 575 len = min_t(loff_t, len, max_len); 576 } 577 578 /* We now have a contiguous set of dirty pages, each with writeback 579 * set; the first page is still locked at this point, but all the rest 580 * have been unlocked. 581 */ 582 folio_unlock(folio); 583 584 if (start < i_size) { 585 _debug("write back %x @%llx [%llx]", len, start, i_size); 586 587 iov_iter_xarray(&iter, WRITE, &mapping->i_pages, start, len); 588 ret = afs_store_data(vnode, &iter, start, false); 589 } else { 590 _debug("write discard %x @%llx [%llx]", len, start, i_size); 591 592 /* The dirty region was entirely beyond the EOF. */ 593 afs_pages_written_back(vnode, start, len); 594 ret = 0; 595 } 596 597 switch (ret) { 598 case 0: 599 wbc->nr_to_write = count; 600 ret = len; 601 break; 602 603 default: 604 pr_notice("kAFS: Unexpected error from FS.StoreData %d\n", ret); 605 fallthrough; 606 case -EACCES: 607 case -EPERM: 608 case -ENOKEY: 609 case -EKEYEXPIRED: 610 case -EKEYREJECTED: 611 case -EKEYREVOKED: 612 afs_redirty_pages(wbc, mapping, start, len); 613 mapping_set_error(mapping, ret); 614 break; 615 616 case -EDQUOT: 617 case -ENOSPC: 618 afs_redirty_pages(wbc, mapping, start, len); 619 mapping_set_error(mapping, -ENOSPC); 620 break; 621 622 case -EROFS: 623 case -EIO: 624 case -EREMOTEIO: 625 case -EFBIG: 626 case -ENOENT: 627 case -ENOMEDIUM: 628 case -ENXIO: 629 trace_afs_file_error(vnode, ret, afs_file_error_writeback_fail); 630 afs_kill_pages(mapping, start, len); 631 mapping_set_error(mapping, ret); 632 break; 633 } 634 635 _leave(" = %d", ret); 636 return ret; 637 } 638 639 /* 640 * write a page back to the server 641 * - the caller locked the page for us 642 */ 643 int afs_writepage(struct page *subpage, struct writeback_control *wbc) 644 { 645 struct folio *folio = page_folio(subpage); 646 ssize_t ret; 647 loff_t start; 648 649 _enter("{%lx},", folio_index(folio)); 650 651 start = folio_index(folio) * PAGE_SIZE; 652 ret = afs_write_back_from_locked_folio(folio_mapping(folio), wbc, 653 folio, start, LLONG_MAX - start); 654 if (ret < 0) { 655 _leave(" = %zd", ret); 656 return ret; 657 } 658 659 _leave(" = 0"); 660 return 0; 661 } 662 663 /* 664 * write a region of pages back to the server 665 */ 666 static int afs_writepages_region(struct address_space *mapping, 667 struct writeback_control *wbc, 668 loff_t start, loff_t end, loff_t *_next) 669 { 670 struct folio *folio; 671 struct page *head_page; 672 ssize_t ret; 673 int n; 674 675 _enter("%llx,%llx,", start, end); 676 677 do { 678 pgoff_t index = start / PAGE_SIZE; 679 680 n = find_get_pages_range_tag(mapping, &index, end / PAGE_SIZE, 681 PAGECACHE_TAG_DIRTY, 1, &head_page); 682 if (!n) 683 break; 684 685 folio = page_folio(head_page); 686 start = folio_pos(folio); /* May regress with THPs */ 687 688 _debug("wback %lx", folio_index(folio)); 689 690 /* At this point we hold neither the i_pages lock nor the 691 * page lock: the page may be truncated or invalidated 692 * (changing page->mapping to NULL), or even swizzled 693 * back from swapper_space to tmpfs file mapping 694 */ 695 if (wbc->sync_mode != WB_SYNC_NONE) { 696 ret = folio_lock_killable(folio); 697 if (ret < 0) { 698 folio_put(folio); 699 return ret; 700 } 701 } else { 702 if (!folio_trylock(folio)) { 703 folio_put(folio); 704 return 0; 705 } 706 } 707 708 if (folio_mapping(folio) != mapping || 709 !folio_test_dirty(folio)) { 710 start += folio_size(folio); 711 folio_unlock(folio); 712 folio_put(folio); 713 continue; 714 } 715 716 if (folio_test_writeback(folio)) { 717 folio_unlock(folio); 718 if (wbc->sync_mode != WB_SYNC_NONE) 719 folio_wait_writeback(folio); 720 folio_put(folio); 721 continue; 722 } 723 724 if (!folio_clear_dirty_for_io(folio)) 725 BUG(); 726 ret = afs_write_back_from_locked_folio(mapping, wbc, folio, start, end); 727 folio_put(folio); 728 if (ret < 0) { 729 _leave(" = %zd", ret); 730 return ret; 731 } 732 733 start += ret; 734 735 cond_resched(); 736 } while (wbc->nr_to_write > 0); 737 738 *_next = start; 739 _leave(" = 0 [%llx]", *_next); 740 return 0; 741 } 742 743 /* 744 * write some of the pending data back to the server 745 */ 746 int afs_writepages(struct address_space *mapping, 747 struct writeback_control *wbc) 748 { 749 struct afs_vnode *vnode = AFS_FS_I(mapping->host); 750 loff_t start, next; 751 int ret; 752 753 _enter(""); 754 755 /* We have to be careful as we can end up racing with setattr() 756 * truncating the pagecache since the caller doesn't take a lock here 757 * to prevent it. 758 */ 759 if (wbc->sync_mode == WB_SYNC_ALL) 760 down_read(&vnode->validate_lock); 761 else if (!down_read_trylock(&vnode->validate_lock)) 762 return 0; 763 764 if (wbc->range_cyclic) { 765 start = mapping->writeback_index * PAGE_SIZE; 766 ret = afs_writepages_region(mapping, wbc, start, LLONG_MAX, &next); 767 if (ret == 0) { 768 mapping->writeback_index = next / PAGE_SIZE; 769 if (start > 0 && wbc->nr_to_write > 0) { 770 ret = afs_writepages_region(mapping, wbc, 0, 771 start, &next); 772 if (ret == 0) 773 mapping->writeback_index = 774 next / PAGE_SIZE; 775 } 776 } 777 } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) { 778 ret = afs_writepages_region(mapping, wbc, 0, LLONG_MAX, &next); 779 if (wbc->nr_to_write > 0 && ret == 0) 780 mapping->writeback_index = next / PAGE_SIZE; 781 } else { 782 ret = afs_writepages_region(mapping, wbc, 783 wbc->range_start, wbc->range_end, &next); 784 } 785 786 up_read(&vnode->validate_lock); 787 _leave(" = %d", ret); 788 return ret; 789 } 790 791 /* 792 * write to an AFS file 793 */ 794 ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from) 795 { 796 struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp)); 797 struct afs_file *af = iocb->ki_filp->private_data; 798 ssize_t result; 799 size_t count = iov_iter_count(from); 800 801 _enter("{%llx:%llu},{%zu},", 802 vnode->fid.vid, vnode->fid.vnode, count); 803 804 if (IS_SWAPFILE(&vnode->vfs_inode)) { 805 printk(KERN_INFO 806 "AFS: Attempt to write to active swap file!\n"); 807 return -EBUSY; 808 } 809 810 if (!count) 811 return 0; 812 813 result = afs_validate(vnode, af->key); 814 if (result < 0) 815 return result; 816 817 result = generic_file_write_iter(iocb, from); 818 819 _leave(" = %zd", result); 820 return result; 821 } 822 823 /* 824 * flush any dirty pages for this process, and check for write errors. 825 * - the return status from this call provides a reliable indication of 826 * whether any write errors occurred for this process. 827 */ 828 int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync) 829 { 830 struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); 831 struct afs_file *af = file->private_data; 832 int ret; 833 834 _enter("{%llx:%llu},{n=%pD},%d", 835 vnode->fid.vid, vnode->fid.vnode, file, 836 datasync); 837 838 ret = afs_validate(vnode, af->key); 839 if (ret < 0) 840 return ret; 841 842 return file_write_and_wait_range(file, start, end); 843 } 844 845 /* 846 * notification that a previously read-only page is about to become writable 847 * - if it returns an error, the caller will deliver a bus error signal 848 */ 849 vm_fault_t afs_page_mkwrite(struct vm_fault *vmf) 850 { 851 struct folio *folio = page_folio(vmf->page); 852 struct file *file = vmf->vma->vm_file; 853 struct inode *inode = file_inode(file); 854 struct afs_vnode *vnode = AFS_FS_I(inode); 855 struct afs_file *af = file->private_data; 856 unsigned long priv; 857 vm_fault_t ret = VM_FAULT_RETRY; 858 859 _enter("{{%llx:%llu}},{%lx}", vnode->fid.vid, vnode->fid.vnode, folio_index(folio)); 860 861 afs_validate(vnode, af->key); 862 863 sb_start_pagefault(inode->i_sb); 864 865 /* Wait for the page to be written to the cache before we allow it to 866 * be modified. We then assume the entire page will need writing back. 867 */ 868 #ifdef CONFIG_AFS_FSCACHE 869 if (folio_test_fscache(folio) && 870 folio_wait_fscache_killable(folio) < 0) 871 goto out; 872 #endif 873 874 if (folio_wait_writeback_killable(folio)) 875 goto out; 876 877 if (folio_lock_killable(folio) < 0) 878 goto out; 879 880 /* We mustn't change folio->private until writeback is complete as that 881 * details the portion of the page we need to write back and we might 882 * need to redirty the page if there's a problem. 883 */ 884 if (folio_wait_writeback_killable(folio) < 0) { 885 folio_unlock(folio); 886 goto out; 887 } 888 889 priv = afs_folio_dirty(folio, 0, folio_size(folio)); 890 priv = afs_folio_dirty_mmapped(priv); 891 if (folio_test_private(folio)) { 892 folio_change_private(folio, (void *)priv); 893 trace_afs_folio_dirty(vnode, tracepoint_string("mkwrite+"), folio); 894 } else { 895 folio_attach_private(folio, (void *)priv); 896 trace_afs_folio_dirty(vnode, tracepoint_string("mkwrite"), folio); 897 } 898 file_update_time(file); 899 900 ret = VM_FAULT_LOCKED; 901 out: 902 sb_end_pagefault(inode->i_sb); 903 return ret; 904 } 905 906 /* 907 * Prune the keys cached for writeback. The caller must hold vnode->wb_lock. 908 */ 909 void afs_prune_wb_keys(struct afs_vnode *vnode) 910 { 911 LIST_HEAD(graveyard); 912 struct afs_wb_key *wbk, *tmp; 913 914 /* Discard unused keys */ 915 spin_lock(&vnode->wb_lock); 916 917 if (!mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_WRITEBACK) && 918 !mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_DIRTY)) { 919 list_for_each_entry_safe(wbk, tmp, &vnode->wb_keys, vnode_link) { 920 if (refcount_read(&wbk->usage) == 1) 921 list_move(&wbk->vnode_link, &graveyard); 922 } 923 } 924 925 spin_unlock(&vnode->wb_lock); 926 927 while (!list_empty(&graveyard)) { 928 wbk = list_entry(graveyard.next, struct afs_wb_key, vnode_link); 929 list_del(&wbk->vnode_link); 930 afs_put_wb_key(wbk); 931 } 932 } 933 934 /* 935 * Clean up a page during invalidation. 936 */ 937 int afs_launder_page(struct page *subpage) 938 { 939 struct folio *folio = page_folio(subpage); 940 struct afs_vnode *vnode = AFS_FS_I(folio_inode(folio)); 941 struct iov_iter iter; 942 struct bio_vec bv[1]; 943 unsigned long priv; 944 unsigned int f, t; 945 int ret = 0; 946 947 _enter("{%lx}", folio_index(folio)); 948 949 priv = (unsigned long)folio_get_private(folio); 950 if (folio_clear_dirty_for_io(folio)) { 951 f = 0; 952 t = folio_size(folio); 953 if (folio_test_private(folio)) { 954 f = afs_folio_dirty_from(folio, priv); 955 t = afs_folio_dirty_to(folio, priv); 956 } 957 958 bv[0].bv_page = &folio->page; 959 bv[0].bv_offset = f; 960 bv[0].bv_len = t - f; 961 iov_iter_bvec(&iter, WRITE, bv, 1, bv[0].bv_len); 962 963 trace_afs_folio_dirty(vnode, tracepoint_string("launder"), folio); 964 ret = afs_store_data(vnode, &iter, folio_pos(folio) + f, true); 965 } 966 967 trace_afs_folio_dirty(vnode, tracepoint_string("laundered"), folio); 968 folio_detach_private(folio); 969 folio_wait_fscache(folio); 970 return ret; 971 } 972