Lines Matching +full:write +full:- +full:back
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* handling of writes to regular files and writing back to the server
8 #include <linux/backing-dev.h>
28 * need to pin the cache object to write back to.
33 afs_vnode_cache(AFS_FS_I(mapping->host))); in afs_dirty_folio()
47 * Flush out a conflicting write. This may extend the write to the surrounding
66 * prepare to perform part of a write to a page
81 vnode->fid.vid, vnode->fid.vnode, pos, len); in afs_write_begin()
87 ret = netfs_write_begin(&vnode->netfs, file, mapping, pos, len, &folio, fsdata); in afs_write_begin()
92 from = pos - index * PAGE_SIZE; in afs_write_begin()
97 * merge the new write with. in afs_write_begin()
110 /* If the file is being filled locally, allow inter-write in afs_write_begin()
111 * spaces to be merged into writes. If it's not, only write in afs_write_begin()
112 * back what the user gives us. in afs_write_begin()
114 if (!test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags) && in afs_write_begin()
123 /* The previous write and this write aren't adjacent or overlapping, so in afs_write_begin()
151 * finalise part of a write to a page
165 vnode->fid.vid, vnode->fid.vnode, folio_index(folio)); in afs_write_end()
181 i_size = i_size_read(&vnode->netfs.inode); in afs_write_end()
183 write_seqlock(&vnode->cb_lock); in afs_write_end()
184 i_size = i_size_read(&vnode->netfs.inode); in afs_write_end()
187 write_sequnlock(&vnode->cb_lock); in afs_write_end()
223 struct afs_vnode *vnode = AFS_FS_I(mapping->host); in afs_kill_pages()
226 pgoff_t last = (start + len - 1) / PAGE_SIZE, next; in afs_kill_pages()
229 vnode->fid.vid, vnode->fid.vnode, len, start); in afs_kill_pages()
245 generic_error_remove_page(mapping, &folio->page); in afs_kill_pages()
261 struct afs_vnode *vnode = AFS_FS_I(mapping->host); in afs_redirty_pages()
264 pgoff_t last = (start + len - 1) / PAGE_SIZE, next; in afs_redirty_pages()
267 vnode->fid.vid, vnode->fid.vnode, len, start); in afs_redirty_pages()
288 * completion of write to server
292 struct address_space *mapping = vnode->netfs.inode.i_mapping; in afs_pages_written_back()
296 XA_STATE(xas, &mapping->i_pages, start / PAGE_SIZE); in afs_pages_written_back()
299 vnode->fid.vid, vnode->fid.vnode, len, start); in afs_pages_written_back()
303 end = (start + len - 1) / PAGE_SIZE; in afs_pages_written_back()
332 int ret = -ENOKEY, ret2; in afs_get_writeback_key()
334 spin_lock(&vnode->wb_lock); in afs_get_writeback_key()
336 p = (*_wbk)->vnode_link.next; in afs_get_writeback_key()
338 p = vnode->wb_keys.next; in afs_get_writeback_key()
340 while (p != &vnode->wb_keys) { in afs_get_writeback_key()
342 _debug("wbk %u", key_serial(wbk->key)); in afs_get_writeback_key()
343 ret2 = key_validate(wbk->key); in afs_get_writeback_key()
345 refcount_inc(&wbk->usage); in afs_get_writeback_key()
346 _debug("USE WB KEY %u", key_serial(wbk->key)); in afs_get_writeback_key()
351 if (ret == -ENOKEY) in afs_get_writeback_key()
353 p = p->next; in afs_get_writeback_key()
356 spin_unlock(&vnode->wb_lock); in afs_get_writeback_key()
365 struct afs_vnode *vnode = op->file[0].vnode; in afs_store_data_success()
367 op->ctime = op->file[0].scb.status.mtime_client; in afs_store_data_success()
368 afs_vnode_commit_status(op, &op->file[0]); in afs_store_data_success()
369 if (op->error == 0) { in afs_store_data_success()
370 if (!op->store.laundering) in afs_store_data_success()
371 afs_pages_written_back(vnode, op->store.pos, op->store.size); in afs_store_data_success()
373 atomic_long_add(op->store.size, &afs_v2net(vnode)->n_store_bytes); in afs_store_data_success()
384 * write to a file
392 int ret = -ENOKEY; in afs_store_data()
395 vnode->volume->name, in afs_store_data()
396 vnode->fid.vid, in afs_store_data()
397 vnode->fid.vnode, in afs_store_data()
398 vnode->fid.unique, in afs_store_data()
407 op = afs_alloc_operation(wbk->key, vnode->volume); in afs_store_data()
410 return -ENOMEM; in afs_store_data()
414 op->file[0].dv_delta = 1; in afs_store_data()
415 op->file[0].modification = true; in afs_store_data()
416 op->store.pos = pos; in afs_store_data()
417 op->store.size = size; in afs_store_data()
418 op->store.laundering = laundering; in afs_store_data()
419 op->flags |= AFS_OPERATION_UNINTR; in afs_store_data()
420 op->ops = &afs_store_data_operation; in afs_store_data()
425 op->store.write_iter = iter; in afs_store_data()
426 op->store.i_size = max(pos + size, vnode->netfs.remote_i_size); in afs_store_data()
427 op->mtime = vnode->netfs.inode.i_mtime; in afs_store_data()
431 switch (op->error) { in afs_store_data()
432 case -EACCES: in afs_store_data()
433 case -EPERM: in afs_store_data()
434 case -ENOKEY: in afs_store_data()
435 case -EKEYEXPIRED: in afs_store_data()
436 case -EKEYREJECTED: in afs_store_data()
437 case -EKEYREVOKED: in afs_store_data()
442 key_put(op->key); in afs_store_data()
443 op->key = key_get(wbk->key); in afs_store_data()
450 _leave(" = %d", op->error); in afs_store_data()
455 * Extend the region to be written back to include subsequent contiguously
480 XA_STATE(xas, &mapping->i_pages, index); in afs_extend_writeback()
485 * under the RCU read lock - but we can't clear the dirty flags in afs_extend_writeback()
533 filler = psize - t; in afs_extend_writeback()
566 *_count -= folio_nr_pages(folio); in afs_extend_writeback()
578 * Synchronously write back the locked page and any subsequent non-locked dirty
586 struct afs_vnode *vnode = AFS_FS_I(mapping->host); in afs_write_back_from_locked_folio()
590 loff_t i_size = i_size_read(&vnode->netfs.inode); in afs_write_back_from_locked_folio()
591 bool new_content = test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags); in afs_write_back_from_locked_folio()
593 long count = wbc->nr_to_write; in afs_write_back_from_locked_folio()
596 _enter(",%lx,%llx-%llx", folio_index(folio), start, end); in afs_write_back_from_locked_folio()
602 count -= folio_nr_pages(folio); in afs_write_back_from_locked_folio()
614 len = to - offset; in afs_write_back_from_locked_folio()
617 /* Trim the write to the EOF; the extra data is ignored. Also in afs_write_back_from_locked_folio()
621 max_len = min_t(unsigned long long, max_len, end - start + 1); in afs_write_back_from_locked_folio()
622 max_len = min_t(unsigned long long, max_len, i_size - start); in afs_write_back_from_locked_folio()
639 _debug("write back %x @%llx [%llx]", len, start, i_size); in afs_write_back_from_locked_folio()
641 /* Speculatively write to the cache. We have to fix this up in afs_write_back_from_locked_folio()
646 iov_iter_xarray(&iter, ITER_SOURCE, &mapping->i_pages, start, len); in afs_write_back_from_locked_folio()
649 _debug("write discard %x @%llx [%llx]", len, start, i_size); in afs_write_back_from_locked_folio()
659 wbc->nr_to_write = count; in afs_write_back_from_locked_folio()
666 case -EACCES: in afs_write_back_from_locked_folio()
667 case -EPERM: in afs_write_back_from_locked_folio()
668 case -ENOKEY: in afs_write_back_from_locked_folio()
669 case -EKEYEXPIRED: in afs_write_back_from_locked_folio()
670 case -EKEYREJECTED: in afs_write_back_from_locked_folio()
671 case -EKEYREVOKED: in afs_write_back_from_locked_folio()
672 case -ENETRESET: in afs_write_back_from_locked_folio()
677 case -EDQUOT: in afs_write_back_from_locked_folio()
678 case -ENOSPC: in afs_write_back_from_locked_folio()
680 mapping_set_error(mapping, -ENOSPC); in afs_write_back_from_locked_folio()
683 case -EROFS: in afs_write_back_from_locked_folio()
684 case -EIO: in afs_write_back_from_locked_folio()
685 case -EREMOTEIO: in afs_write_back_from_locked_folio()
686 case -EFBIG: in afs_write_back_from_locked_folio()
687 case -ENOENT: in afs_write_back_from_locked_folio()
688 case -ENOMEDIUM: in afs_write_back_from_locked_folio()
689 case -ENXIO: in afs_write_back_from_locked_folio()
701 * write a region of pages back to the server
733 * (changing page->mapping to NULL), or even swizzled in afs_writepages_region()
734 * back from swapper_space to tmpfs file mapping in afs_writepages_region()
737 if (wbc->sync_mode != WB_SYNC_NONE) { in afs_writepages_region()
748 if (folio->mapping != mapping || in afs_writepages_region()
758 if (wbc->sync_mode != WB_SYNC_NONE) { in afs_writepages_region()
767 if (wbc->sync_mode == WB_SYNC_NONE) { in afs_writepages_region()
794 } while (wbc->nr_to_write > 0); in afs_writepages_region()
802 * write some of the pending data back to the server
807 struct afs_vnode *vnode = AFS_FS_I(mapping->host); in afs_writepages()
817 if (wbc->sync_mode == WB_SYNC_ALL) in afs_writepages()
818 down_read(&vnode->validate_lock); in afs_writepages()
819 else if (!down_read_trylock(&vnode->validate_lock)) in afs_writepages()
822 if (wbc->range_cyclic) { in afs_writepages()
823 start = mapping->writeback_index * PAGE_SIZE; in afs_writepages()
827 mapping->writeback_index = next / PAGE_SIZE; in afs_writepages()
828 if (start > 0 && wbc->nr_to_write > 0) { in afs_writepages()
832 mapping->writeback_index = in afs_writepages()
836 } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) { in afs_writepages()
839 if (wbc->nr_to_write > 0 && ret == 0) in afs_writepages()
840 mapping->writeback_index = next / PAGE_SIZE; in afs_writepages()
843 wbc->range_start, wbc->range_end, in afs_writepages()
847 up_read(&vnode->validate_lock); in afs_writepages()
853 * write to an AFS file
857 struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp)); in afs_file_write()
858 struct afs_file *af = iocb->ki_filp->private_data; in afs_file_write()
863 vnode->fid.vid, vnode->fid.vnode, count); in afs_file_write()
865 if (IS_SWAPFILE(&vnode->netfs.inode)) { in afs_file_write()
867 "AFS: Attempt to write to active swap file!\n"); in afs_file_write()
868 return -EBUSY; in afs_file_write()
874 result = afs_validate(vnode, af->key); in afs_file_write()
885 * flush any dirty pages for this process, and check for write errors.
886 * - the return status from this call provides a reliable indication of
887 * whether any write errors occurred for this process.
892 struct afs_file *af = file->private_data; in afs_fsync()
896 vnode->fid.vid, vnode->fid.vnode, file, in afs_fsync()
899 ret = afs_validate(vnode, af->key); in afs_fsync()
907 * notification that a previously read-only page is about to become writable
908 * - if it returns an error, the caller will deliver a bus error signal
912 struct folio *folio = page_folio(vmf->page); in afs_page_mkwrite()
913 struct file *file = vmf->vma->vm_file; in afs_page_mkwrite()
916 struct afs_file *af = file->private_data; in afs_page_mkwrite()
920 _enter("{{%llx:%llu}},{%lx}", vnode->fid.vid, vnode->fid.vnode, folio_index(folio)); in afs_page_mkwrite()
922 afs_validate(vnode, af->key); in afs_page_mkwrite()
924 sb_start_pagefault(inode->i_sb); in afs_page_mkwrite()
927 * be modified. We then assume the entire page will need writing back. in afs_page_mkwrite()
941 /* We mustn't change folio->private until writeback is complete as that in afs_page_mkwrite()
942 * details the portion of the page we need to write back and we might in afs_page_mkwrite()
963 sb_end_pagefault(inode->i_sb); in afs_page_mkwrite()
968 * Prune the keys cached for writeback. The caller must hold vnode->wb_lock.
976 spin_lock(&vnode->wb_lock); in afs_prune_wb_keys()
978 if (!mapping_tagged(&vnode->netfs.inode.i_data, PAGECACHE_TAG_WRITEBACK) && in afs_prune_wb_keys()
979 !mapping_tagged(&vnode->netfs.inode.i_data, PAGECACHE_TAG_DIRTY)) { in afs_prune_wb_keys()
980 list_for_each_entry_safe(wbk, tmp, &vnode->wb_keys, vnode_link) { in afs_prune_wb_keys()
981 if (refcount_read(&wbk->usage) == 1) in afs_prune_wb_keys()
982 list_move(&wbk->vnode_link, &graveyard); in afs_prune_wb_keys()
986 spin_unlock(&vnode->wb_lock); in afs_prune_wb_keys()
990 list_del(&wbk->vnode_link); in afs_prune_wb_keys()
1007 _enter("{%lx}", folio->index); in afs_launder_folio()
1018 bvec_set_folio(&bv, folio, t - f, f); in afs_launder_folio()
1040 transferred_or_error != -ENOBUFS) in afs_write_to_cache_done()
1045 * Save the write to the cache also.
1052 vnode->netfs.inode.i_mapping, start, len, i_size, in afs_write_to_cache()