1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* handling of writes to regular files and writing back to the server 3 * 4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 5 * Written by David Howells (dhowells@redhat.com) 6 */ 7 8 #include <linux/backing-dev.h> 9 #include <linux/slab.h> 10 #include <linux/fs.h> 11 #include <linux/pagemap.h> 12 #include <linux/writeback.h> 13 #include <linux/pagevec.h> 14 #include <linux/netfs.h> 15 #include "internal.h" 16 17 static void afs_write_to_cache(struct afs_vnode *vnode, loff_t start, size_t len, 18 loff_t i_size, bool caching); 19 20 #ifdef CONFIG_AFS_FSCACHE 21 /* 22 * Mark a page as having been made dirty and thus needing writeback. We also 23 * need to pin the cache object to write back to. 24 */ 25 int afs_set_page_dirty(struct page *page) 26 { 27 return fscache_set_page_dirty(page, afs_vnode_cache(AFS_FS_I(page->mapping->host))); 28 } 29 static void afs_folio_start_fscache(bool caching, struct folio *folio) 30 { 31 if (caching) 32 folio_start_fscache(folio); 33 } 34 #else 35 static void afs_folio_start_fscache(bool caching, struct folio *folio) 36 { 37 } 38 #endif 39 40 /* 41 * prepare to perform part of a write to a page 42 */ 43 int afs_write_begin(struct file *file, struct address_space *mapping, 44 loff_t pos, unsigned len, unsigned flags, 45 struct page **_page, void **fsdata) 46 { 47 struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); 48 struct folio *folio; 49 unsigned long priv; 50 unsigned f, from; 51 unsigned t, to; 52 pgoff_t index; 53 int ret; 54 55 _enter("{%llx:%llu},%llx,%x", 56 vnode->fid.vid, vnode->fid.vnode, pos, len); 57 58 /* Prefetch area to be written into the cache if we're caching this 59 * file. We need to do this before we get a lock on the page in case 60 * there's more than one writer competing for the same cache block. 61 */ 62 ret = netfs_write_begin(file, mapping, pos, len, flags, &folio, fsdata); 63 if (ret < 0) 64 return ret; 65 66 index = folio_index(folio); 67 from = pos - index * PAGE_SIZE; 68 to = from + len; 69 70 try_again: 71 /* See if this page is already partially written in a way that we can 72 * merge the new write with. 73 */ 74 if (folio_test_private(folio)) { 75 priv = (unsigned long)folio_get_private(folio); 76 f = afs_folio_dirty_from(folio, priv); 77 t = afs_folio_dirty_to(folio, priv); 78 ASSERTCMP(f, <=, t); 79 80 if (folio_test_writeback(folio)) { 81 trace_afs_folio_dirty(vnode, tracepoint_string("alrdy"), folio); 82 goto flush_conflicting_write; 83 } 84 /* If the file is being filled locally, allow inter-write 85 * spaces to be merged into writes. If it's not, only write 86 * back what the user gives us. 87 */ 88 if (!test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags) && 89 (to < f || from > t)) 90 goto flush_conflicting_write; 91 } 92 93 *_page = &folio->page; 94 _leave(" = 0"); 95 return 0; 96 97 /* The previous write and this write aren't adjacent or overlapping, so 98 * flush the page out. 99 */ 100 flush_conflicting_write: 101 _debug("flush conflict"); 102 ret = folio_write_one(folio); 103 if (ret < 0) 104 goto error; 105 106 ret = folio_lock_killable(folio); 107 if (ret < 0) 108 goto error; 109 goto try_again; 110 111 error: 112 folio_put(folio); 113 _leave(" = %d", ret); 114 return ret; 115 } 116 117 /* 118 * finalise part of a write to a page 119 */ 120 int afs_write_end(struct file *file, struct address_space *mapping, 121 loff_t pos, unsigned len, unsigned copied, 122 struct page *subpage, void *fsdata) 123 { 124 struct folio *folio = page_folio(subpage); 125 struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); 126 unsigned long priv; 127 unsigned int f, from = offset_in_folio(folio, pos); 128 unsigned int t, to = from + copied; 129 loff_t i_size, write_end_pos; 130 131 _enter("{%llx:%llu},{%lx}", 132 vnode->fid.vid, vnode->fid.vnode, folio_index(folio)); 133 134 if (!folio_test_uptodate(folio)) { 135 if (copied < len) { 136 copied = 0; 137 goto out; 138 } 139 140 folio_mark_uptodate(folio); 141 } 142 143 if (copied == 0) 144 goto out; 145 146 write_end_pos = pos + copied; 147 148 i_size = i_size_read(&vnode->vfs_inode); 149 if (write_end_pos > i_size) { 150 write_seqlock(&vnode->cb_lock); 151 i_size = i_size_read(&vnode->vfs_inode); 152 if (write_end_pos > i_size) 153 afs_set_i_size(vnode, write_end_pos); 154 write_sequnlock(&vnode->cb_lock); 155 fscache_update_cookie(afs_vnode_cache(vnode), NULL, &write_end_pos); 156 } 157 158 if (folio_test_private(folio)) { 159 priv = (unsigned long)folio_get_private(folio); 160 f = afs_folio_dirty_from(folio, priv); 161 t = afs_folio_dirty_to(folio, priv); 162 if (from < f) 163 f = from; 164 if (to > t) 165 t = to; 166 priv = afs_folio_dirty(folio, f, t); 167 folio_change_private(folio, (void *)priv); 168 trace_afs_folio_dirty(vnode, tracepoint_string("dirty+"), folio); 169 } else { 170 priv = afs_folio_dirty(folio, from, to); 171 folio_attach_private(folio, (void *)priv); 172 trace_afs_folio_dirty(vnode, tracepoint_string("dirty"), folio); 173 } 174 175 if (folio_mark_dirty(folio)) 176 _debug("dirtied %lx", folio_index(folio)); 177 178 out: 179 folio_unlock(folio); 180 folio_put(folio); 181 return copied; 182 } 183 184 /* 185 * kill all the pages in the given range 186 */ 187 static void afs_kill_pages(struct address_space *mapping, 188 loff_t start, loff_t len) 189 { 190 struct afs_vnode *vnode = AFS_FS_I(mapping->host); 191 struct folio *folio; 192 pgoff_t index = start / PAGE_SIZE; 193 pgoff_t last = (start + len - 1) / PAGE_SIZE, next; 194 195 _enter("{%llx:%llu},%llx @%llx", 196 vnode->fid.vid, vnode->fid.vnode, len, start); 197 198 do { 199 _debug("kill %lx (to %lx)", index, last); 200 201 folio = filemap_get_folio(mapping, index); 202 if (!folio) { 203 next = index + 1; 204 continue; 205 } 206 207 next = folio_next_index(folio); 208 209 folio_clear_uptodate(folio); 210 folio_end_writeback(folio); 211 folio_lock(folio); 212 generic_error_remove_page(mapping, &folio->page); 213 folio_unlock(folio); 214 folio_put(folio); 215 216 } while (index = next, index <= last); 217 218 _leave(""); 219 } 220 221 /* 222 * Redirty all the pages in a given range. 223 */ 224 static void afs_redirty_pages(struct writeback_control *wbc, 225 struct address_space *mapping, 226 loff_t start, loff_t len) 227 { 228 struct afs_vnode *vnode = AFS_FS_I(mapping->host); 229 struct folio *folio; 230 pgoff_t index = start / PAGE_SIZE; 231 pgoff_t last = (start + len - 1) / PAGE_SIZE, next; 232 233 _enter("{%llx:%llu},%llx @%llx", 234 vnode->fid.vid, vnode->fid.vnode, len, start); 235 236 do { 237 _debug("redirty %llx @%llx", len, start); 238 239 folio = filemap_get_folio(mapping, index); 240 if (!folio) { 241 next = index + 1; 242 continue; 243 } 244 245 next = index + folio_nr_pages(folio); 246 folio_redirty_for_writepage(wbc, folio); 247 folio_end_writeback(folio); 248 folio_put(folio); 249 } while (index = next, index <= last); 250 251 _leave(""); 252 } 253 254 /* 255 * completion of write to server 256 */ 257 static void afs_pages_written_back(struct afs_vnode *vnode, loff_t start, unsigned int len) 258 { 259 struct address_space *mapping = vnode->vfs_inode.i_mapping; 260 struct folio *folio; 261 pgoff_t end; 262 263 XA_STATE(xas, &mapping->i_pages, start / PAGE_SIZE); 264 265 _enter("{%llx:%llu},{%x @%llx}", 266 vnode->fid.vid, vnode->fid.vnode, len, start); 267 268 rcu_read_lock(); 269 270 end = (start + len - 1) / PAGE_SIZE; 271 xas_for_each(&xas, folio, end) { 272 if (!folio_test_writeback(folio)) { 273 kdebug("bad %x @%llx page %lx %lx", 274 len, start, folio_index(folio), end); 275 ASSERT(folio_test_writeback(folio)); 276 } 277 278 trace_afs_folio_dirty(vnode, tracepoint_string("clear"), folio); 279 folio_detach_private(folio); 280 folio_end_writeback(folio); 281 } 282 283 rcu_read_unlock(); 284 285 afs_prune_wb_keys(vnode); 286 _leave(""); 287 } 288 289 /* 290 * Find a key to use for the writeback. We cached the keys used to author the 291 * writes on the vnode. *_wbk will contain the last writeback key used or NULL 292 * and we need to start from there if it's set. 293 */ 294 static int afs_get_writeback_key(struct afs_vnode *vnode, 295 struct afs_wb_key **_wbk) 296 { 297 struct afs_wb_key *wbk = NULL; 298 struct list_head *p; 299 int ret = -ENOKEY, ret2; 300 301 spin_lock(&vnode->wb_lock); 302 if (*_wbk) 303 p = (*_wbk)->vnode_link.next; 304 else 305 p = vnode->wb_keys.next; 306 307 while (p != &vnode->wb_keys) { 308 wbk = list_entry(p, struct afs_wb_key, vnode_link); 309 _debug("wbk %u", key_serial(wbk->key)); 310 ret2 = key_validate(wbk->key); 311 if (ret2 == 0) { 312 refcount_inc(&wbk->usage); 313 _debug("USE WB KEY %u", key_serial(wbk->key)); 314 break; 315 } 316 317 wbk = NULL; 318 if (ret == -ENOKEY) 319 ret = ret2; 320 p = p->next; 321 } 322 323 spin_unlock(&vnode->wb_lock); 324 if (*_wbk) 325 afs_put_wb_key(*_wbk); 326 *_wbk = wbk; 327 return 0; 328 } 329 330 static void afs_store_data_success(struct afs_operation *op) 331 { 332 struct afs_vnode *vnode = op->file[0].vnode; 333 334 op->ctime = op->file[0].scb.status.mtime_client; 335 afs_vnode_commit_status(op, &op->file[0]); 336 if (op->error == 0) { 337 if (!op->store.laundering) 338 afs_pages_written_back(vnode, op->store.pos, op->store.size); 339 afs_stat_v(vnode, n_stores); 340 atomic_long_add(op->store.size, &afs_v2net(vnode)->n_store_bytes); 341 } 342 } 343 344 static const struct afs_operation_ops afs_store_data_operation = { 345 .issue_afs_rpc = afs_fs_store_data, 346 .issue_yfs_rpc = yfs_fs_store_data, 347 .success = afs_store_data_success, 348 }; 349 350 /* 351 * write to a file 352 */ 353 static int afs_store_data(struct afs_vnode *vnode, struct iov_iter *iter, loff_t pos, 354 bool laundering) 355 { 356 struct netfs_i_context *ictx = &vnode->netfs_ctx; 357 struct afs_operation *op; 358 struct afs_wb_key *wbk = NULL; 359 loff_t size = iov_iter_count(iter); 360 int ret = -ENOKEY; 361 362 _enter("%s{%llx:%llu.%u},%llx,%llx", 363 vnode->volume->name, 364 vnode->fid.vid, 365 vnode->fid.vnode, 366 vnode->fid.unique, 367 size, pos); 368 369 ret = afs_get_writeback_key(vnode, &wbk); 370 if (ret) { 371 _leave(" = %d [no keys]", ret); 372 return ret; 373 } 374 375 op = afs_alloc_operation(wbk->key, vnode->volume); 376 if (IS_ERR(op)) { 377 afs_put_wb_key(wbk); 378 return -ENOMEM; 379 } 380 381 afs_op_set_vnode(op, 0, vnode); 382 op->file[0].dv_delta = 1; 383 op->file[0].modification = true; 384 op->store.write_iter = iter; 385 op->store.pos = pos; 386 op->store.size = size; 387 op->store.i_size = max(pos + size, ictx->remote_i_size); 388 op->store.laundering = laundering; 389 op->mtime = vnode->vfs_inode.i_mtime; 390 op->flags |= AFS_OPERATION_UNINTR; 391 op->ops = &afs_store_data_operation; 392 393 try_next_key: 394 afs_begin_vnode_operation(op); 395 afs_wait_for_operation(op); 396 397 switch (op->error) { 398 case -EACCES: 399 case -EPERM: 400 case -ENOKEY: 401 case -EKEYEXPIRED: 402 case -EKEYREJECTED: 403 case -EKEYREVOKED: 404 _debug("next"); 405 406 ret = afs_get_writeback_key(vnode, &wbk); 407 if (ret == 0) { 408 key_put(op->key); 409 op->key = key_get(wbk->key); 410 goto try_next_key; 411 } 412 break; 413 } 414 415 afs_put_wb_key(wbk); 416 _leave(" = %d", op->error); 417 return afs_put_operation(op); 418 } 419 420 /* 421 * Extend the region to be written back to include subsequent contiguously 422 * dirty pages if possible, but don't sleep while doing so. 423 * 424 * If this page holds new content, then we can include filler zeros in the 425 * writeback. 426 */ 427 static void afs_extend_writeback(struct address_space *mapping, 428 struct afs_vnode *vnode, 429 long *_count, 430 loff_t start, 431 loff_t max_len, 432 bool new_content, 433 bool caching, 434 unsigned int *_len) 435 { 436 struct pagevec pvec; 437 struct folio *folio; 438 unsigned long priv; 439 unsigned int psize, filler = 0; 440 unsigned int f, t; 441 loff_t len = *_len; 442 pgoff_t index = (start + len) / PAGE_SIZE; 443 bool stop = true; 444 unsigned int i; 445 446 XA_STATE(xas, &mapping->i_pages, index); 447 pagevec_init(&pvec); 448 449 do { 450 /* Firstly, we gather up a batch of contiguous dirty pages 451 * under the RCU read lock - but we can't clear the dirty flags 452 * there if any of those pages are mapped. 453 */ 454 rcu_read_lock(); 455 456 xas_for_each(&xas, folio, ULONG_MAX) { 457 stop = true; 458 if (xas_retry(&xas, folio)) 459 continue; 460 if (xa_is_value(folio)) 461 break; 462 if (folio_index(folio) != index) 463 break; 464 465 if (!folio_try_get_rcu(folio)) { 466 xas_reset(&xas); 467 continue; 468 } 469 470 /* Has the page moved or been split? */ 471 if (unlikely(folio != xas_reload(&xas))) { 472 folio_put(folio); 473 break; 474 } 475 476 if (!folio_trylock(folio)) { 477 folio_put(folio); 478 break; 479 } 480 if (!folio_test_dirty(folio) || 481 folio_test_writeback(folio) || 482 folio_test_fscache(folio)) { 483 folio_unlock(folio); 484 folio_put(folio); 485 break; 486 } 487 488 psize = folio_size(folio); 489 priv = (unsigned long)folio_get_private(folio); 490 f = afs_folio_dirty_from(folio, priv); 491 t = afs_folio_dirty_to(folio, priv); 492 if (f != 0 && !new_content) { 493 folio_unlock(folio); 494 folio_put(folio); 495 break; 496 } 497 498 len += filler + t; 499 filler = psize - t; 500 if (len >= max_len || *_count <= 0) 501 stop = true; 502 else if (t == psize || new_content) 503 stop = false; 504 505 index += folio_nr_pages(folio); 506 if (!pagevec_add(&pvec, &folio->page)) 507 break; 508 if (stop) 509 break; 510 } 511 512 if (!stop) 513 xas_pause(&xas); 514 rcu_read_unlock(); 515 516 /* Now, if we obtained any pages, we can shift them to being 517 * writable and mark them for caching. 518 */ 519 if (!pagevec_count(&pvec)) 520 break; 521 522 for (i = 0; i < pagevec_count(&pvec); i++) { 523 folio = page_folio(pvec.pages[i]); 524 trace_afs_folio_dirty(vnode, tracepoint_string("store+"), folio); 525 526 if (!folio_clear_dirty_for_io(folio)) 527 BUG(); 528 if (folio_start_writeback(folio)) 529 BUG(); 530 afs_folio_start_fscache(caching, folio); 531 532 *_count -= folio_nr_pages(folio); 533 folio_unlock(folio); 534 } 535 536 pagevec_release(&pvec); 537 cond_resched(); 538 } while (!stop); 539 540 *_len = len; 541 } 542 543 /* 544 * Synchronously write back the locked page and any subsequent non-locked dirty 545 * pages. 546 */ 547 static ssize_t afs_write_back_from_locked_folio(struct address_space *mapping, 548 struct writeback_control *wbc, 549 struct folio *folio, 550 loff_t start, loff_t end) 551 { 552 struct afs_vnode *vnode = AFS_FS_I(mapping->host); 553 struct iov_iter iter; 554 unsigned long priv; 555 unsigned int offset, to, len, max_len; 556 loff_t i_size = i_size_read(&vnode->vfs_inode); 557 bool new_content = test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags); 558 bool caching = fscache_cookie_enabled(afs_vnode_cache(vnode)); 559 long count = wbc->nr_to_write; 560 int ret; 561 562 _enter(",%lx,%llx-%llx", folio_index(folio), start, end); 563 564 if (folio_start_writeback(folio)) 565 BUG(); 566 afs_folio_start_fscache(caching, folio); 567 568 count -= folio_nr_pages(folio); 569 570 /* Find all consecutive lockable dirty pages that have contiguous 571 * written regions, stopping when we find a page that is not 572 * immediately lockable, is not dirty or is missing, or we reach the 573 * end of the range. 574 */ 575 priv = (unsigned long)folio_get_private(folio); 576 offset = afs_folio_dirty_from(folio, priv); 577 to = afs_folio_dirty_to(folio, priv); 578 trace_afs_folio_dirty(vnode, tracepoint_string("store"), folio); 579 580 len = to - offset; 581 start += offset; 582 if (start < i_size) { 583 /* Trim the write to the EOF; the extra data is ignored. Also 584 * put an upper limit on the size of a single storedata op. 585 */ 586 max_len = 65536 * 4096; 587 max_len = min_t(unsigned long long, max_len, end - start + 1); 588 max_len = min_t(unsigned long long, max_len, i_size - start); 589 590 if (len < max_len && 591 (to == folio_size(folio) || new_content)) 592 afs_extend_writeback(mapping, vnode, &count, 593 start, max_len, new_content, 594 caching, &len); 595 len = min_t(loff_t, len, max_len); 596 } 597 598 /* We now have a contiguous set of dirty pages, each with writeback 599 * set; the first page is still locked at this point, but all the rest 600 * have been unlocked. 601 */ 602 folio_unlock(folio); 603 604 if (start < i_size) { 605 _debug("write back %x @%llx [%llx]", len, start, i_size); 606 607 /* Speculatively write to the cache. We have to fix this up 608 * later if the store fails. 609 */ 610 afs_write_to_cache(vnode, start, len, i_size, caching); 611 612 iov_iter_xarray(&iter, WRITE, &mapping->i_pages, start, len); 613 ret = afs_store_data(vnode, &iter, start, false); 614 } else { 615 _debug("write discard %x @%llx [%llx]", len, start, i_size); 616 617 /* The dirty region was entirely beyond the EOF. */ 618 fscache_clear_page_bits(afs_vnode_cache(vnode), 619 mapping, start, len, caching); 620 afs_pages_written_back(vnode, start, len); 621 ret = 0; 622 } 623 624 switch (ret) { 625 case 0: 626 wbc->nr_to_write = count; 627 ret = len; 628 break; 629 630 default: 631 pr_notice("kAFS: Unexpected error from FS.StoreData %d\n", ret); 632 fallthrough; 633 case -EACCES: 634 case -EPERM: 635 case -ENOKEY: 636 case -EKEYEXPIRED: 637 case -EKEYREJECTED: 638 case -EKEYREVOKED: 639 afs_redirty_pages(wbc, mapping, start, len); 640 mapping_set_error(mapping, ret); 641 break; 642 643 case -EDQUOT: 644 case -ENOSPC: 645 afs_redirty_pages(wbc, mapping, start, len); 646 mapping_set_error(mapping, -ENOSPC); 647 break; 648 649 case -EROFS: 650 case -EIO: 651 case -EREMOTEIO: 652 case -EFBIG: 653 case -ENOENT: 654 case -ENOMEDIUM: 655 case -ENXIO: 656 trace_afs_file_error(vnode, ret, afs_file_error_writeback_fail); 657 afs_kill_pages(mapping, start, len); 658 mapping_set_error(mapping, ret); 659 break; 660 } 661 662 _leave(" = %d", ret); 663 return ret; 664 } 665 666 /* 667 * write a page back to the server 668 * - the caller locked the page for us 669 */ 670 int afs_writepage(struct page *subpage, struct writeback_control *wbc) 671 { 672 struct folio *folio = page_folio(subpage); 673 ssize_t ret; 674 loff_t start; 675 676 _enter("{%lx},", folio_index(folio)); 677 678 #ifdef CONFIG_AFS_FSCACHE 679 folio_wait_fscache(folio); 680 #endif 681 682 start = folio_index(folio) * PAGE_SIZE; 683 ret = afs_write_back_from_locked_folio(folio_mapping(folio), wbc, 684 folio, start, LLONG_MAX - start); 685 if (ret < 0) { 686 _leave(" = %zd", ret); 687 return ret; 688 } 689 690 _leave(" = 0"); 691 return 0; 692 } 693 694 /* 695 * write a region of pages back to the server 696 */ 697 static int afs_writepages_region(struct address_space *mapping, 698 struct writeback_control *wbc, 699 loff_t start, loff_t end, loff_t *_next) 700 { 701 struct folio *folio; 702 struct page *head_page; 703 ssize_t ret; 704 int n; 705 706 _enter("%llx,%llx,", start, end); 707 708 do { 709 pgoff_t index = start / PAGE_SIZE; 710 711 n = find_get_pages_range_tag(mapping, &index, end / PAGE_SIZE, 712 PAGECACHE_TAG_DIRTY, 1, &head_page); 713 if (!n) 714 break; 715 716 folio = page_folio(head_page); 717 start = folio_pos(folio); /* May regress with THPs */ 718 719 _debug("wback %lx", folio_index(folio)); 720 721 /* At this point we hold neither the i_pages lock nor the 722 * page lock: the page may be truncated or invalidated 723 * (changing page->mapping to NULL), or even swizzled 724 * back from swapper_space to tmpfs file mapping 725 */ 726 if (wbc->sync_mode != WB_SYNC_NONE) { 727 ret = folio_lock_killable(folio); 728 if (ret < 0) { 729 folio_put(folio); 730 return ret; 731 } 732 } else { 733 if (!folio_trylock(folio)) { 734 folio_put(folio); 735 return 0; 736 } 737 } 738 739 if (folio_mapping(folio) != mapping || 740 !folio_test_dirty(folio)) { 741 start += folio_size(folio); 742 folio_unlock(folio); 743 folio_put(folio); 744 continue; 745 } 746 747 if (folio_test_writeback(folio) || 748 folio_test_fscache(folio)) { 749 folio_unlock(folio); 750 if (wbc->sync_mode != WB_SYNC_NONE) { 751 folio_wait_writeback(folio); 752 #ifdef CONFIG_AFS_FSCACHE 753 folio_wait_fscache(folio); 754 #endif 755 } 756 folio_put(folio); 757 continue; 758 } 759 760 if (!folio_clear_dirty_for_io(folio)) 761 BUG(); 762 ret = afs_write_back_from_locked_folio(mapping, wbc, folio, start, end); 763 folio_put(folio); 764 if (ret < 0) { 765 _leave(" = %zd", ret); 766 return ret; 767 } 768 769 start += ret; 770 771 cond_resched(); 772 } while (wbc->nr_to_write > 0); 773 774 *_next = start; 775 _leave(" = 0 [%llx]", *_next); 776 return 0; 777 } 778 779 /* 780 * write some of the pending data back to the server 781 */ 782 int afs_writepages(struct address_space *mapping, 783 struct writeback_control *wbc) 784 { 785 struct afs_vnode *vnode = AFS_FS_I(mapping->host); 786 loff_t start, next; 787 int ret; 788 789 _enter(""); 790 791 /* We have to be careful as we can end up racing with setattr() 792 * truncating the pagecache since the caller doesn't take a lock here 793 * to prevent it. 794 */ 795 if (wbc->sync_mode == WB_SYNC_ALL) 796 down_read(&vnode->validate_lock); 797 else if (!down_read_trylock(&vnode->validate_lock)) 798 return 0; 799 800 if (wbc->range_cyclic) { 801 start = mapping->writeback_index * PAGE_SIZE; 802 ret = afs_writepages_region(mapping, wbc, start, LLONG_MAX, &next); 803 if (ret == 0) { 804 mapping->writeback_index = next / PAGE_SIZE; 805 if (start > 0 && wbc->nr_to_write > 0) { 806 ret = afs_writepages_region(mapping, wbc, 0, 807 start, &next); 808 if (ret == 0) 809 mapping->writeback_index = 810 next / PAGE_SIZE; 811 } 812 } 813 } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) { 814 ret = afs_writepages_region(mapping, wbc, 0, LLONG_MAX, &next); 815 if (wbc->nr_to_write > 0 && ret == 0) 816 mapping->writeback_index = next / PAGE_SIZE; 817 } else { 818 ret = afs_writepages_region(mapping, wbc, 819 wbc->range_start, wbc->range_end, &next); 820 } 821 822 up_read(&vnode->validate_lock); 823 _leave(" = %d", ret); 824 return ret; 825 } 826 827 /* 828 * write to an AFS file 829 */ 830 ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from) 831 { 832 struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp)); 833 struct afs_file *af = iocb->ki_filp->private_data; 834 ssize_t result; 835 size_t count = iov_iter_count(from); 836 837 _enter("{%llx:%llu},{%zu},", 838 vnode->fid.vid, vnode->fid.vnode, count); 839 840 if (IS_SWAPFILE(&vnode->vfs_inode)) { 841 printk(KERN_INFO 842 "AFS: Attempt to write to active swap file!\n"); 843 return -EBUSY; 844 } 845 846 if (!count) 847 return 0; 848 849 result = afs_validate(vnode, af->key); 850 if (result < 0) 851 return result; 852 853 result = generic_file_write_iter(iocb, from); 854 855 _leave(" = %zd", result); 856 return result; 857 } 858 859 /* 860 * flush any dirty pages for this process, and check for write errors. 861 * - the return status from this call provides a reliable indication of 862 * whether any write errors occurred for this process. 863 */ 864 int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync) 865 { 866 struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); 867 struct afs_file *af = file->private_data; 868 int ret; 869 870 _enter("{%llx:%llu},{n=%pD},%d", 871 vnode->fid.vid, vnode->fid.vnode, file, 872 datasync); 873 874 ret = afs_validate(vnode, af->key); 875 if (ret < 0) 876 return ret; 877 878 return file_write_and_wait_range(file, start, end); 879 } 880 881 /* 882 * notification that a previously read-only page is about to become writable 883 * - if it returns an error, the caller will deliver a bus error signal 884 */ 885 vm_fault_t afs_page_mkwrite(struct vm_fault *vmf) 886 { 887 struct folio *folio = page_folio(vmf->page); 888 struct file *file = vmf->vma->vm_file; 889 struct inode *inode = file_inode(file); 890 struct afs_vnode *vnode = AFS_FS_I(inode); 891 struct afs_file *af = file->private_data; 892 unsigned long priv; 893 vm_fault_t ret = VM_FAULT_RETRY; 894 895 _enter("{{%llx:%llu}},{%lx}", vnode->fid.vid, vnode->fid.vnode, folio_index(folio)); 896 897 afs_validate(vnode, af->key); 898 899 sb_start_pagefault(inode->i_sb); 900 901 /* Wait for the page to be written to the cache before we allow it to 902 * be modified. We then assume the entire page will need writing back. 903 */ 904 #ifdef CONFIG_AFS_FSCACHE 905 if (folio_test_fscache(folio) && 906 folio_wait_fscache_killable(folio) < 0) 907 goto out; 908 #endif 909 910 if (folio_wait_writeback_killable(folio)) 911 goto out; 912 913 if (folio_lock_killable(folio) < 0) 914 goto out; 915 916 /* We mustn't change folio->private until writeback is complete as that 917 * details the portion of the page we need to write back and we might 918 * need to redirty the page if there's a problem. 919 */ 920 if (folio_wait_writeback_killable(folio) < 0) { 921 folio_unlock(folio); 922 goto out; 923 } 924 925 priv = afs_folio_dirty(folio, 0, folio_size(folio)); 926 priv = afs_folio_dirty_mmapped(priv); 927 if (folio_test_private(folio)) { 928 folio_change_private(folio, (void *)priv); 929 trace_afs_folio_dirty(vnode, tracepoint_string("mkwrite+"), folio); 930 } else { 931 folio_attach_private(folio, (void *)priv); 932 trace_afs_folio_dirty(vnode, tracepoint_string("mkwrite"), folio); 933 } 934 file_update_time(file); 935 936 ret = VM_FAULT_LOCKED; 937 out: 938 sb_end_pagefault(inode->i_sb); 939 return ret; 940 } 941 942 /* 943 * Prune the keys cached for writeback. The caller must hold vnode->wb_lock. 944 */ 945 void afs_prune_wb_keys(struct afs_vnode *vnode) 946 { 947 LIST_HEAD(graveyard); 948 struct afs_wb_key *wbk, *tmp; 949 950 /* Discard unused keys */ 951 spin_lock(&vnode->wb_lock); 952 953 if (!mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_WRITEBACK) && 954 !mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_DIRTY)) { 955 list_for_each_entry_safe(wbk, tmp, &vnode->wb_keys, vnode_link) { 956 if (refcount_read(&wbk->usage) == 1) 957 list_move(&wbk->vnode_link, &graveyard); 958 } 959 } 960 961 spin_unlock(&vnode->wb_lock); 962 963 while (!list_empty(&graveyard)) { 964 wbk = list_entry(graveyard.next, struct afs_wb_key, vnode_link); 965 list_del(&wbk->vnode_link); 966 afs_put_wb_key(wbk); 967 } 968 } 969 970 /* 971 * Clean up a page during invalidation. 972 */ 973 int afs_launder_page(struct page *subpage) 974 { 975 struct folio *folio = page_folio(subpage); 976 struct afs_vnode *vnode = AFS_FS_I(folio_inode(folio)); 977 struct iov_iter iter; 978 struct bio_vec bv[1]; 979 unsigned long priv; 980 unsigned int f, t; 981 int ret = 0; 982 983 _enter("{%lx}", folio_index(folio)); 984 985 priv = (unsigned long)folio_get_private(folio); 986 if (folio_clear_dirty_for_io(folio)) { 987 f = 0; 988 t = folio_size(folio); 989 if (folio_test_private(folio)) { 990 f = afs_folio_dirty_from(folio, priv); 991 t = afs_folio_dirty_to(folio, priv); 992 } 993 994 bv[0].bv_page = &folio->page; 995 bv[0].bv_offset = f; 996 bv[0].bv_len = t - f; 997 iov_iter_bvec(&iter, WRITE, bv, 1, bv[0].bv_len); 998 999 trace_afs_folio_dirty(vnode, tracepoint_string("launder"), folio); 1000 ret = afs_store_data(vnode, &iter, folio_pos(folio) + f, true); 1001 } 1002 1003 trace_afs_folio_dirty(vnode, tracepoint_string("laundered"), folio); 1004 folio_detach_private(folio); 1005 folio_wait_fscache(folio); 1006 return ret; 1007 } 1008 1009 /* 1010 * Deal with the completion of writing the data to the cache. 1011 */ 1012 static void afs_write_to_cache_done(void *priv, ssize_t transferred_or_error, 1013 bool was_async) 1014 { 1015 struct afs_vnode *vnode = priv; 1016 1017 if (IS_ERR_VALUE(transferred_or_error) && 1018 transferred_or_error != -ENOBUFS) 1019 afs_invalidate_cache(vnode, 0); 1020 } 1021 1022 /* 1023 * Save the write to the cache also. 1024 */ 1025 static void afs_write_to_cache(struct afs_vnode *vnode, 1026 loff_t start, size_t len, loff_t i_size, 1027 bool caching) 1028 { 1029 fscache_write_to_cache(afs_vnode_cache(vnode), 1030 vnode->vfs_inode.i_mapping, start, len, i_size, 1031 afs_write_to_cache_done, vnode, caching); 1032 } 1033