1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* handling of writes to regular files and writing back to the server 3 * 4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 5 * Written by David Howells (dhowells@redhat.com) 6 */ 7 8 #include <linux/backing-dev.h> 9 #include <linux/slab.h> 10 #include <linux/fs.h> 11 #include <linux/pagemap.h> 12 #include <linux/writeback.h> 13 #include <linux/pagevec.h> 14 #include <linux/netfs.h> 15 #include "internal.h" 16 17 static void afs_write_to_cache(struct afs_vnode *vnode, loff_t start, size_t len, 18 loff_t i_size, bool caching); 19 20 #ifdef CONFIG_AFS_FSCACHE 21 /* 22 * Mark a page as having been made dirty and thus needing writeback. We also 23 * need to pin the cache object to write back to. 24 */ 25 int afs_set_page_dirty(struct page *page) 26 { 27 return fscache_set_page_dirty(page, afs_vnode_cache(AFS_FS_I(page->mapping->host))); 28 } 29 static void afs_folio_start_fscache(bool caching, struct folio *folio) 30 { 31 if (caching) 32 folio_start_fscache(folio); 33 } 34 #else 35 static void afs_folio_start_fscache(bool caching, struct folio *folio) 36 { 37 } 38 #endif 39 40 /* 41 * prepare to perform part of a write to a page 42 */ 43 int afs_write_begin(struct file *file, struct address_space *mapping, 44 loff_t pos, unsigned len, unsigned flags, 45 struct page **_page, void **fsdata) 46 { 47 struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); 48 struct folio *folio; 49 unsigned long priv; 50 unsigned f, from; 51 unsigned t, to; 52 pgoff_t index; 53 int ret; 54 55 _enter("{%llx:%llu},%llx,%x", 56 vnode->fid.vid, vnode->fid.vnode, pos, len); 57 58 /* Prefetch area to be written into the cache if we're caching this 59 * file. We need to do this before we get a lock on the page in case 60 * there's more than one writer competing for the same cache block. 61 */ 62 ret = netfs_write_begin(file, mapping, pos, len, flags, &folio, fsdata, 63 &afs_req_ops, NULL); 64 if (ret < 0) 65 return ret; 66 67 index = folio_index(folio); 68 from = pos - index * PAGE_SIZE; 69 to = from + len; 70 71 try_again: 72 /* See if this page is already partially written in a way that we can 73 * merge the new write with. 74 */ 75 if (folio_test_private(folio)) { 76 priv = (unsigned long)folio_get_private(folio); 77 f = afs_folio_dirty_from(folio, priv); 78 t = afs_folio_dirty_to(folio, priv); 79 ASSERTCMP(f, <=, t); 80 81 if (folio_test_writeback(folio)) { 82 trace_afs_folio_dirty(vnode, tracepoint_string("alrdy"), folio); 83 goto flush_conflicting_write; 84 } 85 /* If the file is being filled locally, allow inter-write 86 * spaces to be merged into writes. If it's not, only write 87 * back what the user gives us. 88 */ 89 if (!test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags) && 90 (to < f || from > t)) 91 goto flush_conflicting_write; 92 } 93 94 *_page = &folio->page; 95 _leave(" = 0"); 96 return 0; 97 98 /* The previous write and this write aren't adjacent or overlapping, so 99 * flush the page out. 100 */ 101 flush_conflicting_write: 102 _debug("flush conflict"); 103 ret = folio_write_one(folio); 104 if (ret < 0) 105 goto error; 106 107 ret = folio_lock_killable(folio); 108 if (ret < 0) 109 goto error; 110 goto try_again; 111 112 error: 113 folio_put(folio); 114 _leave(" = %d", ret); 115 return ret; 116 } 117 118 /* 119 * finalise part of a write to a page 120 */ 121 int afs_write_end(struct file *file, struct address_space *mapping, 122 loff_t pos, unsigned len, unsigned copied, 123 struct page *subpage, void *fsdata) 124 { 125 struct folio *folio = page_folio(subpage); 126 struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); 127 unsigned long priv; 128 unsigned int f, from = offset_in_folio(folio, pos); 129 unsigned int t, to = from + copied; 130 loff_t i_size, write_end_pos; 131 132 _enter("{%llx:%llu},{%lx}", 133 vnode->fid.vid, vnode->fid.vnode, folio_index(folio)); 134 135 if (!folio_test_uptodate(folio)) { 136 if (copied < len) { 137 copied = 0; 138 goto out; 139 } 140 141 folio_mark_uptodate(folio); 142 } 143 144 if (copied == 0) 145 goto out; 146 147 write_end_pos = pos + copied; 148 149 i_size = i_size_read(&vnode->vfs_inode); 150 if (write_end_pos > i_size) { 151 write_seqlock(&vnode->cb_lock); 152 i_size = i_size_read(&vnode->vfs_inode); 153 if (write_end_pos > i_size) 154 afs_set_i_size(vnode, write_end_pos); 155 write_sequnlock(&vnode->cb_lock); 156 fscache_update_cookie(afs_vnode_cache(vnode), NULL, &write_end_pos); 157 } 158 159 if (folio_test_private(folio)) { 160 priv = (unsigned long)folio_get_private(folio); 161 f = afs_folio_dirty_from(folio, priv); 162 t = afs_folio_dirty_to(folio, priv); 163 if (from < f) 164 f = from; 165 if (to > t) 166 t = to; 167 priv = afs_folio_dirty(folio, f, t); 168 folio_change_private(folio, (void *)priv); 169 trace_afs_folio_dirty(vnode, tracepoint_string("dirty+"), folio); 170 } else { 171 priv = afs_folio_dirty(folio, from, to); 172 folio_attach_private(folio, (void *)priv); 173 trace_afs_folio_dirty(vnode, tracepoint_string("dirty"), folio); 174 } 175 176 if (folio_mark_dirty(folio)) 177 _debug("dirtied %lx", folio_index(folio)); 178 179 out: 180 folio_unlock(folio); 181 folio_put(folio); 182 return copied; 183 } 184 185 /* 186 * kill all the pages in the given range 187 */ 188 static void afs_kill_pages(struct address_space *mapping, 189 loff_t start, loff_t len) 190 { 191 struct afs_vnode *vnode = AFS_FS_I(mapping->host); 192 struct folio *folio; 193 pgoff_t index = start / PAGE_SIZE; 194 pgoff_t last = (start + len - 1) / PAGE_SIZE, next; 195 196 _enter("{%llx:%llu},%llx @%llx", 197 vnode->fid.vid, vnode->fid.vnode, len, start); 198 199 do { 200 _debug("kill %lx (to %lx)", index, last); 201 202 folio = filemap_get_folio(mapping, index); 203 if (!folio) { 204 next = index + 1; 205 continue; 206 } 207 208 next = folio_next_index(folio); 209 210 folio_clear_uptodate(folio); 211 folio_end_writeback(folio); 212 folio_lock(folio); 213 generic_error_remove_page(mapping, &folio->page); 214 folio_unlock(folio); 215 folio_put(folio); 216 217 } while (index = next, index <= last); 218 219 _leave(""); 220 } 221 222 /* 223 * Redirty all the pages in a given range. 224 */ 225 static void afs_redirty_pages(struct writeback_control *wbc, 226 struct address_space *mapping, 227 loff_t start, loff_t len) 228 { 229 struct afs_vnode *vnode = AFS_FS_I(mapping->host); 230 struct folio *folio; 231 pgoff_t index = start / PAGE_SIZE; 232 pgoff_t last = (start + len - 1) / PAGE_SIZE, next; 233 234 _enter("{%llx:%llu},%llx @%llx", 235 vnode->fid.vid, vnode->fid.vnode, len, start); 236 237 do { 238 _debug("redirty %llx @%llx", len, start); 239 240 folio = filemap_get_folio(mapping, index); 241 if (!folio) { 242 next = index + 1; 243 continue; 244 } 245 246 next = index + folio_nr_pages(folio); 247 folio_redirty_for_writepage(wbc, folio); 248 folio_end_writeback(folio); 249 folio_put(folio); 250 } while (index = next, index <= last); 251 252 _leave(""); 253 } 254 255 /* 256 * completion of write to server 257 */ 258 static void afs_pages_written_back(struct afs_vnode *vnode, loff_t start, unsigned int len) 259 { 260 struct address_space *mapping = vnode->vfs_inode.i_mapping; 261 struct folio *folio; 262 pgoff_t end; 263 264 XA_STATE(xas, &mapping->i_pages, start / PAGE_SIZE); 265 266 _enter("{%llx:%llu},{%x @%llx}", 267 vnode->fid.vid, vnode->fid.vnode, len, start); 268 269 rcu_read_lock(); 270 271 end = (start + len - 1) / PAGE_SIZE; 272 xas_for_each(&xas, folio, end) { 273 if (!folio_test_writeback(folio)) { 274 kdebug("bad %x @%llx page %lx %lx", 275 len, start, folio_index(folio), end); 276 ASSERT(folio_test_writeback(folio)); 277 } 278 279 trace_afs_folio_dirty(vnode, tracepoint_string("clear"), folio); 280 folio_detach_private(folio); 281 folio_end_writeback(folio); 282 } 283 284 rcu_read_unlock(); 285 286 afs_prune_wb_keys(vnode); 287 _leave(""); 288 } 289 290 /* 291 * Find a key to use for the writeback. We cached the keys used to author the 292 * writes on the vnode. *_wbk will contain the last writeback key used or NULL 293 * and we need to start from there if it's set. 294 */ 295 static int afs_get_writeback_key(struct afs_vnode *vnode, 296 struct afs_wb_key **_wbk) 297 { 298 struct afs_wb_key *wbk = NULL; 299 struct list_head *p; 300 int ret = -ENOKEY, ret2; 301 302 spin_lock(&vnode->wb_lock); 303 if (*_wbk) 304 p = (*_wbk)->vnode_link.next; 305 else 306 p = vnode->wb_keys.next; 307 308 while (p != &vnode->wb_keys) { 309 wbk = list_entry(p, struct afs_wb_key, vnode_link); 310 _debug("wbk %u", key_serial(wbk->key)); 311 ret2 = key_validate(wbk->key); 312 if (ret2 == 0) { 313 refcount_inc(&wbk->usage); 314 _debug("USE WB KEY %u", key_serial(wbk->key)); 315 break; 316 } 317 318 wbk = NULL; 319 if (ret == -ENOKEY) 320 ret = ret2; 321 p = p->next; 322 } 323 324 spin_unlock(&vnode->wb_lock); 325 if (*_wbk) 326 afs_put_wb_key(*_wbk); 327 *_wbk = wbk; 328 return 0; 329 } 330 331 static void afs_store_data_success(struct afs_operation *op) 332 { 333 struct afs_vnode *vnode = op->file[0].vnode; 334 335 op->ctime = op->file[0].scb.status.mtime_client; 336 afs_vnode_commit_status(op, &op->file[0]); 337 if (op->error == 0) { 338 if (!op->store.laundering) 339 afs_pages_written_back(vnode, op->store.pos, op->store.size); 340 afs_stat_v(vnode, n_stores); 341 atomic_long_add(op->store.size, &afs_v2net(vnode)->n_store_bytes); 342 } 343 } 344 345 static const struct afs_operation_ops afs_store_data_operation = { 346 .issue_afs_rpc = afs_fs_store_data, 347 .issue_yfs_rpc = yfs_fs_store_data, 348 .success = afs_store_data_success, 349 }; 350 351 /* 352 * write to a file 353 */ 354 static int afs_store_data(struct afs_vnode *vnode, struct iov_iter *iter, loff_t pos, 355 bool laundering) 356 { 357 struct afs_operation *op; 358 struct afs_wb_key *wbk = NULL; 359 loff_t size = iov_iter_count(iter), i_size; 360 int ret = -ENOKEY; 361 362 _enter("%s{%llx:%llu.%u},%llx,%llx", 363 vnode->volume->name, 364 vnode->fid.vid, 365 vnode->fid.vnode, 366 vnode->fid.unique, 367 size, pos); 368 369 ret = afs_get_writeback_key(vnode, &wbk); 370 if (ret) { 371 _leave(" = %d [no keys]", ret); 372 return ret; 373 } 374 375 op = afs_alloc_operation(wbk->key, vnode->volume); 376 if (IS_ERR(op)) { 377 afs_put_wb_key(wbk); 378 return -ENOMEM; 379 } 380 381 i_size = i_size_read(&vnode->vfs_inode); 382 383 afs_op_set_vnode(op, 0, vnode); 384 op->file[0].dv_delta = 1; 385 op->file[0].modification = true; 386 op->store.write_iter = iter; 387 op->store.pos = pos; 388 op->store.size = size; 389 op->store.i_size = max(pos + size, i_size); 390 op->store.laundering = laundering; 391 op->mtime = vnode->vfs_inode.i_mtime; 392 op->flags |= AFS_OPERATION_UNINTR; 393 op->ops = &afs_store_data_operation; 394 395 try_next_key: 396 afs_begin_vnode_operation(op); 397 afs_wait_for_operation(op); 398 399 switch (op->error) { 400 case -EACCES: 401 case -EPERM: 402 case -ENOKEY: 403 case -EKEYEXPIRED: 404 case -EKEYREJECTED: 405 case -EKEYREVOKED: 406 _debug("next"); 407 408 ret = afs_get_writeback_key(vnode, &wbk); 409 if (ret == 0) { 410 key_put(op->key); 411 op->key = key_get(wbk->key); 412 goto try_next_key; 413 } 414 break; 415 } 416 417 afs_put_wb_key(wbk); 418 _leave(" = %d", op->error); 419 return afs_put_operation(op); 420 } 421 422 /* 423 * Extend the region to be written back to include subsequent contiguously 424 * dirty pages if possible, but don't sleep while doing so. 425 * 426 * If this page holds new content, then we can include filler zeros in the 427 * writeback. 428 */ 429 static void afs_extend_writeback(struct address_space *mapping, 430 struct afs_vnode *vnode, 431 long *_count, 432 loff_t start, 433 loff_t max_len, 434 bool new_content, 435 bool caching, 436 unsigned int *_len) 437 { 438 struct pagevec pvec; 439 struct folio *folio; 440 unsigned long priv; 441 unsigned int psize, filler = 0; 442 unsigned int f, t; 443 loff_t len = *_len; 444 pgoff_t index = (start + len) / PAGE_SIZE; 445 bool stop = true; 446 unsigned int i; 447 448 XA_STATE(xas, &mapping->i_pages, index); 449 pagevec_init(&pvec); 450 451 do { 452 /* Firstly, we gather up a batch of contiguous dirty pages 453 * under the RCU read lock - but we can't clear the dirty flags 454 * there if any of those pages are mapped. 455 */ 456 rcu_read_lock(); 457 458 xas_for_each(&xas, folio, ULONG_MAX) { 459 stop = true; 460 if (xas_retry(&xas, folio)) 461 continue; 462 if (xa_is_value(folio)) 463 break; 464 if (folio_index(folio) != index) 465 break; 466 467 if (!folio_try_get_rcu(folio)) { 468 xas_reset(&xas); 469 continue; 470 } 471 472 /* Has the page moved or been split? */ 473 if (unlikely(folio != xas_reload(&xas))) { 474 folio_put(folio); 475 break; 476 } 477 478 if (!folio_trylock(folio)) { 479 folio_put(folio); 480 break; 481 } 482 if (!folio_test_dirty(folio) || 483 folio_test_writeback(folio) || 484 folio_test_fscache(folio)) { 485 folio_unlock(folio); 486 folio_put(folio); 487 break; 488 } 489 490 psize = folio_size(folio); 491 priv = (unsigned long)folio_get_private(folio); 492 f = afs_folio_dirty_from(folio, priv); 493 t = afs_folio_dirty_to(folio, priv); 494 if (f != 0 && !new_content) { 495 folio_unlock(folio); 496 folio_put(folio); 497 break; 498 } 499 500 len += filler + t; 501 filler = psize - t; 502 if (len >= max_len || *_count <= 0) 503 stop = true; 504 else if (t == psize || new_content) 505 stop = false; 506 507 index += folio_nr_pages(folio); 508 if (!pagevec_add(&pvec, &folio->page)) 509 break; 510 if (stop) 511 break; 512 } 513 514 if (!stop) 515 xas_pause(&xas); 516 rcu_read_unlock(); 517 518 /* Now, if we obtained any pages, we can shift them to being 519 * writable and mark them for caching. 520 */ 521 if (!pagevec_count(&pvec)) 522 break; 523 524 for (i = 0; i < pagevec_count(&pvec); i++) { 525 folio = page_folio(pvec.pages[i]); 526 trace_afs_folio_dirty(vnode, tracepoint_string("store+"), folio); 527 528 if (!folio_clear_dirty_for_io(folio)) 529 BUG(); 530 if (folio_start_writeback(folio)) 531 BUG(); 532 afs_folio_start_fscache(caching, folio); 533 534 *_count -= folio_nr_pages(folio); 535 folio_unlock(folio); 536 } 537 538 pagevec_release(&pvec); 539 cond_resched(); 540 } while (!stop); 541 542 *_len = len; 543 } 544 545 /* 546 * Synchronously write back the locked page and any subsequent non-locked dirty 547 * pages. 548 */ 549 static ssize_t afs_write_back_from_locked_folio(struct address_space *mapping, 550 struct writeback_control *wbc, 551 struct folio *folio, 552 loff_t start, loff_t end) 553 { 554 struct afs_vnode *vnode = AFS_FS_I(mapping->host); 555 struct iov_iter iter; 556 unsigned long priv; 557 unsigned int offset, to, len, max_len; 558 loff_t i_size = i_size_read(&vnode->vfs_inode); 559 bool new_content = test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags); 560 bool caching = fscache_cookie_enabled(afs_vnode_cache(vnode)); 561 long count = wbc->nr_to_write; 562 int ret; 563 564 _enter(",%lx,%llx-%llx", folio_index(folio), start, end); 565 566 if (folio_start_writeback(folio)) 567 BUG(); 568 afs_folio_start_fscache(caching, folio); 569 570 count -= folio_nr_pages(folio); 571 572 /* Find all consecutive lockable dirty pages that have contiguous 573 * written regions, stopping when we find a page that is not 574 * immediately lockable, is not dirty or is missing, or we reach the 575 * end of the range. 576 */ 577 priv = (unsigned long)folio_get_private(folio); 578 offset = afs_folio_dirty_from(folio, priv); 579 to = afs_folio_dirty_to(folio, priv); 580 trace_afs_folio_dirty(vnode, tracepoint_string("store"), folio); 581 582 len = to - offset; 583 start += offset; 584 if (start < i_size) { 585 /* Trim the write to the EOF; the extra data is ignored. Also 586 * put an upper limit on the size of a single storedata op. 587 */ 588 max_len = 65536 * 4096; 589 max_len = min_t(unsigned long long, max_len, end - start + 1); 590 max_len = min_t(unsigned long long, max_len, i_size - start); 591 592 if (len < max_len && 593 (to == folio_size(folio) || new_content)) 594 afs_extend_writeback(mapping, vnode, &count, 595 start, max_len, new_content, 596 caching, &len); 597 len = min_t(loff_t, len, max_len); 598 } 599 600 /* We now have a contiguous set of dirty pages, each with writeback 601 * set; the first page is still locked at this point, but all the rest 602 * have been unlocked. 603 */ 604 folio_unlock(folio); 605 606 if (start < i_size) { 607 _debug("write back %x @%llx [%llx]", len, start, i_size); 608 609 /* Speculatively write to the cache. We have to fix this up 610 * later if the store fails. 611 */ 612 afs_write_to_cache(vnode, start, len, i_size, caching); 613 614 iov_iter_xarray(&iter, WRITE, &mapping->i_pages, start, len); 615 ret = afs_store_data(vnode, &iter, start, false); 616 } else { 617 _debug("write discard %x @%llx [%llx]", len, start, i_size); 618 619 /* The dirty region was entirely beyond the EOF. */ 620 fscache_clear_page_bits(afs_vnode_cache(vnode), 621 mapping, start, len, caching); 622 afs_pages_written_back(vnode, start, len); 623 ret = 0; 624 } 625 626 switch (ret) { 627 case 0: 628 wbc->nr_to_write = count; 629 ret = len; 630 break; 631 632 default: 633 pr_notice("kAFS: Unexpected error from FS.StoreData %d\n", ret); 634 fallthrough; 635 case -EACCES: 636 case -EPERM: 637 case -ENOKEY: 638 case -EKEYEXPIRED: 639 case -EKEYREJECTED: 640 case -EKEYREVOKED: 641 afs_redirty_pages(wbc, mapping, start, len); 642 mapping_set_error(mapping, ret); 643 break; 644 645 case -EDQUOT: 646 case -ENOSPC: 647 afs_redirty_pages(wbc, mapping, start, len); 648 mapping_set_error(mapping, -ENOSPC); 649 break; 650 651 case -EROFS: 652 case -EIO: 653 case -EREMOTEIO: 654 case -EFBIG: 655 case -ENOENT: 656 case -ENOMEDIUM: 657 case -ENXIO: 658 trace_afs_file_error(vnode, ret, afs_file_error_writeback_fail); 659 afs_kill_pages(mapping, start, len); 660 mapping_set_error(mapping, ret); 661 break; 662 } 663 664 _leave(" = %d", ret); 665 return ret; 666 } 667 668 /* 669 * write a page back to the server 670 * - the caller locked the page for us 671 */ 672 int afs_writepage(struct page *subpage, struct writeback_control *wbc) 673 { 674 struct folio *folio = page_folio(subpage); 675 ssize_t ret; 676 loff_t start; 677 678 _enter("{%lx},", folio_index(folio)); 679 680 #ifdef CONFIG_AFS_FSCACHE 681 folio_wait_fscache(folio); 682 #endif 683 684 start = folio_index(folio) * PAGE_SIZE; 685 ret = afs_write_back_from_locked_folio(folio_mapping(folio), wbc, 686 folio, start, LLONG_MAX - start); 687 if (ret < 0) { 688 _leave(" = %zd", ret); 689 return ret; 690 } 691 692 _leave(" = 0"); 693 return 0; 694 } 695 696 /* 697 * write a region of pages back to the server 698 */ 699 static int afs_writepages_region(struct address_space *mapping, 700 struct writeback_control *wbc, 701 loff_t start, loff_t end, loff_t *_next) 702 { 703 struct folio *folio; 704 struct page *head_page; 705 ssize_t ret; 706 int n; 707 708 _enter("%llx,%llx,", start, end); 709 710 do { 711 pgoff_t index = start / PAGE_SIZE; 712 713 n = find_get_pages_range_tag(mapping, &index, end / PAGE_SIZE, 714 PAGECACHE_TAG_DIRTY, 1, &head_page); 715 if (!n) 716 break; 717 718 folio = page_folio(head_page); 719 start = folio_pos(folio); /* May regress with THPs */ 720 721 _debug("wback %lx", folio_index(folio)); 722 723 /* At this point we hold neither the i_pages lock nor the 724 * page lock: the page may be truncated or invalidated 725 * (changing page->mapping to NULL), or even swizzled 726 * back from swapper_space to tmpfs file mapping 727 */ 728 if (wbc->sync_mode != WB_SYNC_NONE) { 729 ret = folio_lock_killable(folio); 730 if (ret < 0) { 731 folio_put(folio); 732 return ret; 733 } 734 } else { 735 if (!folio_trylock(folio)) { 736 folio_put(folio); 737 return 0; 738 } 739 } 740 741 if (folio_mapping(folio) != mapping || 742 !folio_test_dirty(folio)) { 743 start += folio_size(folio); 744 folio_unlock(folio); 745 folio_put(folio); 746 continue; 747 } 748 749 if (folio_test_writeback(folio) || 750 folio_test_fscache(folio)) { 751 folio_unlock(folio); 752 if (wbc->sync_mode != WB_SYNC_NONE) { 753 folio_wait_writeback(folio); 754 #ifdef CONFIG_AFS_FSCACHE 755 folio_wait_fscache(folio); 756 #endif 757 } 758 folio_put(folio); 759 continue; 760 } 761 762 if (!folio_clear_dirty_for_io(folio)) 763 BUG(); 764 ret = afs_write_back_from_locked_folio(mapping, wbc, folio, start, end); 765 folio_put(folio); 766 if (ret < 0) { 767 _leave(" = %zd", ret); 768 return ret; 769 } 770 771 start += ret; 772 773 cond_resched(); 774 } while (wbc->nr_to_write > 0); 775 776 *_next = start; 777 _leave(" = 0 [%llx]", *_next); 778 return 0; 779 } 780 781 /* 782 * write some of the pending data back to the server 783 */ 784 int afs_writepages(struct address_space *mapping, 785 struct writeback_control *wbc) 786 { 787 struct afs_vnode *vnode = AFS_FS_I(mapping->host); 788 loff_t start, next; 789 int ret; 790 791 _enter(""); 792 793 /* We have to be careful as we can end up racing with setattr() 794 * truncating the pagecache since the caller doesn't take a lock here 795 * to prevent it. 796 */ 797 if (wbc->sync_mode == WB_SYNC_ALL) 798 down_read(&vnode->validate_lock); 799 else if (!down_read_trylock(&vnode->validate_lock)) 800 return 0; 801 802 if (wbc->range_cyclic) { 803 start = mapping->writeback_index * PAGE_SIZE; 804 ret = afs_writepages_region(mapping, wbc, start, LLONG_MAX, &next); 805 if (ret == 0) { 806 mapping->writeback_index = next / PAGE_SIZE; 807 if (start > 0 && wbc->nr_to_write > 0) { 808 ret = afs_writepages_region(mapping, wbc, 0, 809 start, &next); 810 if (ret == 0) 811 mapping->writeback_index = 812 next / PAGE_SIZE; 813 } 814 } 815 } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) { 816 ret = afs_writepages_region(mapping, wbc, 0, LLONG_MAX, &next); 817 if (wbc->nr_to_write > 0 && ret == 0) 818 mapping->writeback_index = next / PAGE_SIZE; 819 } else { 820 ret = afs_writepages_region(mapping, wbc, 821 wbc->range_start, wbc->range_end, &next); 822 } 823 824 up_read(&vnode->validate_lock); 825 _leave(" = %d", ret); 826 return ret; 827 } 828 829 /* 830 * write to an AFS file 831 */ 832 ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from) 833 { 834 struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp)); 835 struct afs_file *af = iocb->ki_filp->private_data; 836 ssize_t result; 837 size_t count = iov_iter_count(from); 838 839 _enter("{%llx:%llu},{%zu},", 840 vnode->fid.vid, vnode->fid.vnode, count); 841 842 if (IS_SWAPFILE(&vnode->vfs_inode)) { 843 printk(KERN_INFO 844 "AFS: Attempt to write to active swap file!\n"); 845 return -EBUSY; 846 } 847 848 if (!count) 849 return 0; 850 851 result = afs_validate(vnode, af->key); 852 if (result < 0) 853 return result; 854 855 result = generic_file_write_iter(iocb, from); 856 857 _leave(" = %zd", result); 858 return result; 859 } 860 861 /* 862 * flush any dirty pages for this process, and check for write errors. 863 * - the return status from this call provides a reliable indication of 864 * whether any write errors occurred for this process. 865 */ 866 int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync) 867 { 868 struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); 869 struct afs_file *af = file->private_data; 870 int ret; 871 872 _enter("{%llx:%llu},{n=%pD},%d", 873 vnode->fid.vid, vnode->fid.vnode, file, 874 datasync); 875 876 ret = afs_validate(vnode, af->key); 877 if (ret < 0) 878 return ret; 879 880 return file_write_and_wait_range(file, start, end); 881 } 882 883 /* 884 * notification that a previously read-only page is about to become writable 885 * - if it returns an error, the caller will deliver a bus error signal 886 */ 887 vm_fault_t afs_page_mkwrite(struct vm_fault *vmf) 888 { 889 struct folio *folio = page_folio(vmf->page); 890 struct file *file = vmf->vma->vm_file; 891 struct inode *inode = file_inode(file); 892 struct afs_vnode *vnode = AFS_FS_I(inode); 893 struct afs_file *af = file->private_data; 894 unsigned long priv; 895 vm_fault_t ret = VM_FAULT_RETRY; 896 897 _enter("{{%llx:%llu}},{%lx}", vnode->fid.vid, vnode->fid.vnode, folio_index(folio)); 898 899 afs_validate(vnode, af->key); 900 901 sb_start_pagefault(inode->i_sb); 902 903 /* Wait for the page to be written to the cache before we allow it to 904 * be modified. We then assume the entire page will need writing back. 905 */ 906 #ifdef CONFIG_AFS_FSCACHE 907 if (folio_test_fscache(folio) && 908 folio_wait_fscache_killable(folio) < 0) 909 goto out; 910 #endif 911 912 if (folio_wait_writeback_killable(folio)) 913 goto out; 914 915 if (folio_lock_killable(folio) < 0) 916 goto out; 917 918 /* We mustn't change folio->private until writeback is complete as that 919 * details the portion of the page we need to write back and we might 920 * need to redirty the page if there's a problem. 921 */ 922 if (folio_wait_writeback_killable(folio) < 0) { 923 folio_unlock(folio); 924 goto out; 925 } 926 927 priv = afs_folio_dirty(folio, 0, folio_size(folio)); 928 priv = afs_folio_dirty_mmapped(priv); 929 if (folio_test_private(folio)) { 930 folio_change_private(folio, (void *)priv); 931 trace_afs_folio_dirty(vnode, tracepoint_string("mkwrite+"), folio); 932 } else { 933 folio_attach_private(folio, (void *)priv); 934 trace_afs_folio_dirty(vnode, tracepoint_string("mkwrite"), folio); 935 } 936 file_update_time(file); 937 938 ret = VM_FAULT_LOCKED; 939 out: 940 sb_end_pagefault(inode->i_sb); 941 return ret; 942 } 943 944 /* 945 * Prune the keys cached for writeback. The caller must hold vnode->wb_lock. 946 */ 947 void afs_prune_wb_keys(struct afs_vnode *vnode) 948 { 949 LIST_HEAD(graveyard); 950 struct afs_wb_key *wbk, *tmp; 951 952 /* Discard unused keys */ 953 spin_lock(&vnode->wb_lock); 954 955 if (!mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_WRITEBACK) && 956 !mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_DIRTY)) { 957 list_for_each_entry_safe(wbk, tmp, &vnode->wb_keys, vnode_link) { 958 if (refcount_read(&wbk->usage) == 1) 959 list_move(&wbk->vnode_link, &graveyard); 960 } 961 } 962 963 spin_unlock(&vnode->wb_lock); 964 965 while (!list_empty(&graveyard)) { 966 wbk = list_entry(graveyard.next, struct afs_wb_key, vnode_link); 967 list_del(&wbk->vnode_link); 968 afs_put_wb_key(wbk); 969 } 970 } 971 972 /* 973 * Clean up a page during invalidation. 974 */ 975 int afs_launder_page(struct page *subpage) 976 { 977 struct folio *folio = page_folio(subpage); 978 struct afs_vnode *vnode = AFS_FS_I(folio_inode(folio)); 979 struct iov_iter iter; 980 struct bio_vec bv[1]; 981 unsigned long priv; 982 unsigned int f, t; 983 int ret = 0; 984 985 _enter("{%lx}", folio_index(folio)); 986 987 priv = (unsigned long)folio_get_private(folio); 988 if (folio_clear_dirty_for_io(folio)) { 989 f = 0; 990 t = folio_size(folio); 991 if (folio_test_private(folio)) { 992 f = afs_folio_dirty_from(folio, priv); 993 t = afs_folio_dirty_to(folio, priv); 994 } 995 996 bv[0].bv_page = &folio->page; 997 bv[0].bv_offset = f; 998 bv[0].bv_len = t - f; 999 iov_iter_bvec(&iter, WRITE, bv, 1, bv[0].bv_len); 1000 1001 trace_afs_folio_dirty(vnode, tracepoint_string("launder"), folio); 1002 ret = afs_store_data(vnode, &iter, folio_pos(folio) + f, true); 1003 } 1004 1005 trace_afs_folio_dirty(vnode, tracepoint_string("laundered"), folio); 1006 folio_detach_private(folio); 1007 folio_wait_fscache(folio); 1008 return ret; 1009 } 1010 1011 /* 1012 * Deal with the completion of writing the data to the cache. 1013 */ 1014 static void afs_write_to_cache_done(void *priv, ssize_t transferred_or_error, 1015 bool was_async) 1016 { 1017 struct afs_vnode *vnode = priv; 1018 1019 if (IS_ERR_VALUE(transferred_or_error) && 1020 transferred_or_error != -ENOBUFS) 1021 afs_invalidate_cache(vnode, 0); 1022 } 1023 1024 /* 1025 * Save the write to the cache also. 1026 */ 1027 static void afs_write_to_cache(struct afs_vnode *vnode, 1028 loff_t start, size_t len, loff_t i_size, 1029 bool caching) 1030 { 1031 fscache_write_to_cache(afs_vnode_cache(vnode), 1032 vnode->vfs_inode.i_mapping, start, len, i_size, 1033 afs_write_to_cache_done, vnode, caching); 1034 } 1035