12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later 231143d5dSDavid Howells /* handling of writes to regular files and writing back to the server 331143d5dSDavid Howells * 431143d5dSDavid Howells * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 531143d5dSDavid Howells * Written by David Howells (dhowells@redhat.com) 631143d5dSDavid Howells */ 74343d008SDavid Howells 84af3c9ccSAlexey Dobriyan #include <linux/backing-dev.h> 931143d5dSDavid Howells #include <linux/slab.h> 1031143d5dSDavid Howells #include <linux/fs.h> 1131143d5dSDavid Howells #include <linux/pagemap.h> 1231143d5dSDavid Howells #include <linux/writeback.h> 1331143d5dSDavid Howells #include <linux/pagevec.h> 143003bbd0SDavid Howells #include <linux/netfs.h> 153003bbd0SDavid Howells #include <linux/fscache.h> 1631143d5dSDavid Howells #include "internal.h" 1731143d5dSDavid Howells 1831143d5dSDavid Howells /* 1931143d5dSDavid Howells * mark a page as having been made dirty and thus needing writeback 2031143d5dSDavid Howells */ 2131143d5dSDavid Howells int afs_set_page_dirty(struct page *page) 2231143d5dSDavid Howells { 2331143d5dSDavid Howells _enter(""); 2431143d5dSDavid Howells return __set_page_dirty_nobuffers(page); 2531143d5dSDavid Howells } 2631143d5dSDavid Howells 2731143d5dSDavid Howells /* 2831143d5dSDavid Howells * prepare to perform part of a write to a page 2931143d5dSDavid Howells */ 3015b4650eSNick Piggin int afs_write_begin(struct file *file, struct address_space *mapping, 3115b4650eSNick Piggin loff_t pos, unsigned len, unsigned flags, 3221db2cdcSDavid Howells struct page **_page, void **fsdata) 3331143d5dSDavid Howells { 34496ad9aaSAl Viro struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); 3515b4650eSNick Piggin struct page *page; 364343d008SDavid Howells unsigned long priv; 37e87b03f5SDavid Howells unsigned f, from; 38e87b03f5SDavid Howells unsigned t, to; 39e87b03f5SDavid Howells pgoff_t index; 4031143d5dSDavid Howells int ret; 4131143d5dSDavid Howells 42e87b03f5SDavid Howells _enter("{%llx:%llu},%llx,%x", 43e87b03f5SDavid Howells vnode->fid.vid, vnode->fid.vnode, pos, len); 4431143d5dSDavid Howells 453003bbd0SDavid Howells /* Prefetch area to be written into the cache if we're caching this 463003bbd0SDavid Howells * file. We need to do this before we get a lock on the page in case 473003bbd0SDavid Howells * there's more than one writer competing for the same cache block. 483003bbd0SDavid Howells */ 493003bbd0SDavid Howells ret = netfs_write_begin(file, mapping, pos, len, flags, &page, fsdata, 503003bbd0SDavid Howells &afs_req_ops, NULL); 513003bbd0SDavid Howells if (ret < 0) 5231143d5dSDavid Howells return ret; 53630f5ddaSDavid Howells 54e87b03f5SDavid Howells index = page->index; 55e87b03f5SDavid Howells from = pos - index * PAGE_SIZE; 56e87b03f5SDavid Howells to = from + len; 57e87b03f5SDavid Howells 5831143d5dSDavid Howells try_again: 594343d008SDavid Howells /* See if this page is already partially written in a way that we can 604343d008SDavid Howells * merge the new write with. 614343d008SDavid Howells */ 624343d008SDavid Howells if (PagePrivate(page)) { 634343d008SDavid Howells priv = page_private(page); 6467d78a6fSDavid Howells f = afs_page_dirty_from(page, priv); 6567d78a6fSDavid Howells t = afs_page_dirty_to(page, priv); 664343d008SDavid Howells ASSERTCMP(f, <=, t); 6731143d5dSDavid Howells 685a039c32SDavid Howells if (PageWriteback(page)) { 6967d78a6fSDavid Howells trace_afs_page_dirty(vnode, tracepoint_string("alrdy"), page); 705a039c32SDavid Howells goto flush_conflicting_write; 715a039c32SDavid Howells } 725a813276SDavid Howells /* If the file is being filled locally, allow inter-write 735a813276SDavid Howells * spaces to be merged into writes. If it's not, only write 745a813276SDavid Howells * back what the user gives us. 755a813276SDavid Howells */ 765a813276SDavid Howells if (!test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags) && 775a813276SDavid Howells (to < f || from > t)) 784343d008SDavid Howells goto flush_conflicting_write; 7931143d5dSDavid Howells } 8031143d5dSDavid Howells 8121db2cdcSDavid Howells *_page = page; 824343d008SDavid Howells _leave(" = 0"); 8331143d5dSDavid Howells return 0; 8431143d5dSDavid Howells 854343d008SDavid Howells /* The previous write and this write aren't adjacent or overlapping, so 864343d008SDavid Howells * flush the page out. 874343d008SDavid Howells */ 884343d008SDavid Howells flush_conflicting_write: 8931143d5dSDavid Howells _debug("flush conflict"); 904343d008SDavid Howells ret = write_one_page(page); 9121db2cdcSDavid Howells if (ret < 0) 9221db2cdcSDavid Howells goto error; 9331143d5dSDavid Howells 944343d008SDavid Howells ret = lock_page_killable(page); 9521db2cdcSDavid Howells if (ret < 0) 9621db2cdcSDavid Howells goto error; 9721db2cdcSDavid Howells goto try_again; 9821db2cdcSDavid Howells 9921db2cdcSDavid Howells error: 10021db2cdcSDavid Howells put_page(page); 1014343d008SDavid Howells _leave(" = %d", ret); 1024343d008SDavid Howells return ret; 1034343d008SDavid Howells } 10431143d5dSDavid Howells 10531143d5dSDavid Howells /* 10631143d5dSDavid Howells * finalise part of a write to a page 10731143d5dSDavid Howells */ 10815b4650eSNick Piggin int afs_write_end(struct file *file, struct address_space *mapping, 10915b4650eSNick Piggin loff_t pos, unsigned len, unsigned copied, 11015b4650eSNick Piggin struct page *page, void *fsdata) 11131143d5dSDavid Howells { 112496ad9aaSAl Viro struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); 113f792e3acSDavid Howells unsigned long priv; 114e87b03f5SDavid Howells unsigned int f, from = pos & (thp_size(page) - 1); 115f792e3acSDavid Howells unsigned int t, to = from + copied; 11631143d5dSDavid Howells loff_t i_size, maybe_i_size; 11731143d5dSDavid Howells 1183b6492dfSDavid Howells _enter("{%llx:%llu},{%lx}", 11915b4650eSNick Piggin vnode->fid.vid, vnode->fid.vnode, page->index); 12031143d5dSDavid Howells 1213ad216eeSDavid Howells if (copied == 0) 1223ad216eeSDavid Howells goto out; 1233ad216eeSDavid Howells 12415b4650eSNick Piggin maybe_i_size = pos + copied; 12531143d5dSDavid Howells 12631143d5dSDavid Howells i_size = i_size_read(&vnode->vfs_inode); 12731143d5dSDavid Howells if (maybe_i_size > i_size) { 1281f32ef79SDavid Howells write_seqlock(&vnode->cb_lock); 12931143d5dSDavid Howells i_size = i_size_read(&vnode->vfs_inode); 13031143d5dSDavid Howells if (maybe_i_size > i_size) 13131143d5dSDavid Howells i_size_write(&vnode->vfs_inode, maybe_i_size); 1321f32ef79SDavid Howells write_sequnlock(&vnode->cb_lock); 13331143d5dSDavid Howells } 13431143d5dSDavid Howells 1353003bbd0SDavid Howells ASSERT(PageUptodate(page)); 136e8e581a8SDavid Howells 137f792e3acSDavid Howells if (PagePrivate(page)) { 138f792e3acSDavid Howells priv = page_private(page); 13967d78a6fSDavid Howells f = afs_page_dirty_from(page, priv); 14067d78a6fSDavid Howells t = afs_page_dirty_to(page, priv); 141f792e3acSDavid Howells if (from < f) 142f792e3acSDavid Howells f = from; 143f792e3acSDavid Howells if (to > t) 144f792e3acSDavid Howells t = to; 14567d78a6fSDavid Howells priv = afs_page_dirty(page, f, t); 146f792e3acSDavid Howells set_page_private(page, priv); 14767d78a6fSDavid Howells trace_afs_page_dirty(vnode, tracepoint_string("dirty+"), page); 148f792e3acSDavid Howells } else { 14967d78a6fSDavid Howells priv = afs_page_dirty(page, from, to); 150f792e3acSDavid Howells attach_page_private(page, (void *)priv); 15167d78a6fSDavid Howells trace_afs_page_dirty(vnode, tracepoint_string("dirty"), page); 152f792e3acSDavid Howells } 153f792e3acSDavid Howells 154e87b03f5SDavid Howells if (set_page_dirty(page)) 155e87b03f5SDavid Howells _debug("dirtied %lx", page->index); 156afae457dSDavid Howells 157afae457dSDavid Howells out: 15815b4650eSNick Piggin unlock_page(page); 15909cbfeafSKirill A. Shutemov put_page(page); 1603003bbd0SDavid Howells return copied; 16131143d5dSDavid Howells } 16231143d5dSDavid Howells 16331143d5dSDavid Howells /* 16431143d5dSDavid Howells * kill all the pages in the given range 16531143d5dSDavid Howells */ 1664343d008SDavid Howells static void afs_kill_pages(struct address_space *mapping, 167e87b03f5SDavid Howells loff_t start, loff_t len) 16831143d5dSDavid Howells { 1694343d008SDavid Howells struct afs_vnode *vnode = AFS_FS_I(mapping->host); 17031143d5dSDavid Howells struct pagevec pv; 171e87b03f5SDavid Howells unsigned int loop, psize; 17231143d5dSDavid Howells 173e87b03f5SDavid Howells _enter("{%llx:%llu},%llx @%llx", 174e87b03f5SDavid Howells vnode->fid.vid, vnode->fid.vnode, len, start); 17531143d5dSDavid Howells 17686679820SMel Gorman pagevec_init(&pv); 17731143d5dSDavid Howells 17831143d5dSDavid Howells do { 179e87b03f5SDavid Howells _debug("kill %llx @%llx", len, start); 18031143d5dSDavid Howells 181e87b03f5SDavid Howells pv.nr = find_get_pages_contig(mapping, start / PAGE_SIZE, 182e87b03f5SDavid Howells PAGEVEC_SIZE, pv.pages); 183e87b03f5SDavid Howells if (pv.nr == 0) 184e87b03f5SDavid Howells break; 18531143d5dSDavid Howells 186e87b03f5SDavid Howells for (loop = 0; loop < pv.nr; loop++) { 1877286a35eSDavid Howells struct page *page = pv.pages[loop]; 188e87b03f5SDavid Howells 189e87b03f5SDavid Howells if (page->index * PAGE_SIZE >= start + len) 190e87b03f5SDavid Howells break; 191e87b03f5SDavid Howells 192e87b03f5SDavid Howells psize = thp_size(page); 193e87b03f5SDavid Howells start += psize; 194e87b03f5SDavid Howells len -= psize; 1957286a35eSDavid Howells ClearPageUptodate(page); 1964343d008SDavid Howells end_page_writeback(page); 1974343d008SDavid Howells lock_page(page); 1984343d008SDavid Howells generic_error_remove_page(mapping, page); 19921bd68f1SMarc Dionne unlock_page(page); 2004343d008SDavid Howells } 2014343d008SDavid Howells 2024343d008SDavid Howells __pagevec_release(&pv); 203e87b03f5SDavid Howells } while (len > 0); 2044343d008SDavid Howells 2054343d008SDavid Howells _leave(""); 2064343d008SDavid Howells } 2074343d008SDavid Howells 2084343d008SDavid Howells /* 2094343d008SDavid Howells * Redirty all the pages in a given range. 2104343d008SDavid Howells */ 2114343d008SDavid Howells static void afs_redirty_pages(struct writeback_control *wbc, 2124343d008SDavid Howells struct address_space *mapping, 213e87b03f5SDavid Howells loff_t start, loff_t len) 2144343d008SDavid Howells { 2154343d008SDavid Howells struct afs_vnode *vnode = AFS_FS_I(mapping->host); 2164343d008SDavid Howells struct pagevec pv; 217e87b03f5SDavid Howells unsigned int loop, psize; 2184343d008SDavid Howells 219e87b03f5SDavid Howells _enter("{%llx:%llu},%llx @%llx", 220e87b03f5SDavid Howells vnode->fid.vid, vnode->fid.vnode, len, start); 2214343d008SDavid Howells 222487e2c9fSLinus Torvalds pagevec_init(&pv); 2234343d008SDavid Howells 2244343d008SDavid Howells do { 225e87b03f5SDavid Howells _debug("redirty %llx @%llx", len, start); 2264343d008SDavid Howells 227e87b03f5SDavid Howells pv.nr = find_get_pages_contig(mapping, start / PAGE_SIZE, 228e87b03f5SDavid Howells PAGEVEC_SIZE, pv.pages); 229e87b03f5SDavid Howells if (pv.nr == 0) 230e87b03f5SDavid Howells break; 2314343d008SDavid Howells 232e87b03f5SDavid Howells for (loop = 0; loop < pv.nr; loop++) { 2334343d008SDavid Howells struct page *page = pv.pages[loop]; 2344343d008SDavid Howells 235e87b03f5SDavid Howells if (page->index * PAGE_SIZE >= start + len) 236e87b03f5SDavid Howells break; 237e87b03f5SDavid Howells 238e87b03f5SDavid Howells psize = thp_size(page); 239e87b03f5SDavid Howells start += psize; 240e87b03f5SDavid Howells len -= psize; 2414343d008SDavid Howells redirty_page_for_writepage(wbc, page); 2427286a35eSDavid Howells end_page_writeback(page); 24331143d5dSDavid Howells } 24431143d5dSDavid Howells 24531143d5dSDavid Howells __pagevec_release(&pv); 246e87b03f5SDavid Howells } while (len > 0); 24731143d5dSDavid Howells 24831143d5dSDavid Howells _leave(""); 24931143d5dSDavid Howells } 25031143d5dSDavid Howells 25131143d5dSDavid Howells /* 252a58823acSDavid Howells * completion of write to server 253a58823acSDavid Howells */ 254e87b03f5SDavid Howells static void afs_pages_written_back(struct afs_vnode *vnode, loff_t start, unsigned int len) 255a58823acSDavid Howells { 256bd80d8a8SDavid Howells struct address_space *mapping = vnode->vfs_inode.i_mapping; 257bd80d8a8SDavid Howells struct page *page; 258e87b03f5SDavid Howells pgoff_t end; 259bd80d8a8SDavid Howells 260e87b03f5SDavid Howells XA_STATE(xas, &mapping->i_pages, start / PAGE_SIZE); 261a58823acSDavid Howells 262e87b03f5SDavid Howells _enter("{%llx:%llu},{%x @%llx}", 263e87b03f5SDavid Howells vnode->fid.vid, vnode->fid.vnode, len, start); 264a58823acSDavid Howells 265bd80d8a8SDavid Howells rcu_read_lock(); 266a58823acSDavid Howells 267e87b03f5SDavid Howells end = (start + len - 1) / PAGE_SIZE; 268e87b03f5SDavid Howells xas_for_each(&xas, page, end) { 269e87b03f5SDavid Howells if (!PageWriteback(page)) { 270e87b03f5SDavid Howells kdebug("bad %x @%llx page %lx %lx", len, start, page->index, end); 271bd80d8a8SDavid Howells ASSERT(PageWriteback(page)); 272e87b03f5SDavid Howells } 273a58823acSDavid Howells 274bd80d8a8SDavid Howells trace_afs_page_dirty(vnode, tracepoint_string("clear"), page); 275e87b03f5SDavid Howells detach_page_private(page); 276bd80d8a8SDavid Howells page_endio(page, true, 0); 277a58823acSDavid Howells } 278bd80d8a8SDavid Howells 279bd80d8a8SDavid Howells rcu_read_unlock(); 280a58823acSDavid Howells 281a58823acSDavid Howells afs_prune_wb_keys(vnode); 282a58823acSDavid Howells _leave(""); 283a58823acSDavid Howells } 284a58823acSDavid Howells 285a58823acSDavid Howells /* 286e49c7b2fSDavid Howells * Find a key to use for the writeback. We cached the keys used to author the 287e49c7b2fSDavid Howells * writes on the vnode. *_wbk will contain the last writeback key used or NULL 288e49c7b2fSDavid Howells * and we need to start from there if it's set. 289e49c7b2fSDavid Howells */ 290e49c7b2fSDavid Howells static int afs_get_writeback_key(struct afs_vnode *vnode, 291e49c7b2fSDavid Howells struct afs_wb_key **_wbk) 292e49c7b2fSDavid Howells { 293e49c7b2fSDavid Howells struct afs_wb_key *wbk = NULL; 294e49c7b2fSDavid Howells struct list_head *p; 295e49c7b2fSDavid Howells int ret = -ENOKEY, ret2; 296e49c7b2fSDavid Howells 297e49c7b2fSDavid Howells spin_lock(&vnode->wb_lock); 298e49c7b2fSDavid Howells if (*_wbk) 299e49c7b2fSDavid Howells p = (*_wbk)->vnode_link.next; 300e49c7b2fSDavid Howells else 301e49c7b2fSDavid Howells p = vnode->wb_keys.next; 302e49c7b2fSDavid Howells 303e49c7b2fSDavid Howells while (p != &vnode->wb_keys) { 304e49c7b2fSDavid Howells wbk = list_entry(p, struct afs_wb_key, vnode_link); 305e49c7b2fSDavid Howells _debug("wbk %u", key_serial(wbk->key)); 306e49c7b2fSDavid Howells ret2 = key_validate(wbk->key); 307e49c7b2fSDavid Howells if (ret2 == 0) { 308e49c7b2fSDavid Howells refcount_inc(&wbk->usage); 309e49c7b2fSDavid Howells _debug("USE WB KEY %u", key_serial(wbk->key)); 310e49c7b2fSDavid Howells break; 311e49c7b2fSDavid Howells } 312e49c7b2fSDavid Howells 313e49c7b2fSDavid Howells wbk = NULL; 314e49c7b2fSDavid Howells if (ret == -ENOKEY) 315e49c7b2fSDavid Howells ret = ret2; 316e49c7b2fSDavid Howells p = p->next; 317e49c7b2fSDavid Howells } 318e49c7b2fSDavid Howells 319e49c7b2fSDavid Howells spin_unlock(&vnode->wb_lock); 320e49c7b2fSDavid Howells if (*_wbk) 321e49c7b2fSDavid Howells afs_put_wb_key(*_wbk); 322e49c7b2fSDavid Howells *_wbk = wbk; 323e49c7b2fSDavid Howells return 0; 324e49c7b2fSDavid Howells } 325e49c7b2fSDavid Howells 326e49c7b2fSDavid Howells static void afs_store_data_success(struct afs_operation *op) 327e49c7b2fSDavid Howells { 328e49c7b2fSDavid Howells struct afs_vnode *vnode = op->file[0].vnode; 329e49c7b2fSDavid Howells 330da8d0755SDavid Howells op->ctime = op->file[0].scb.status.mtime_client; 331e49c7b2fSDavid Howells afs_vnode_commit_status(op, &op->file[0]); 332e49c7b2fSDavid Howells if (op->error == 0) { 333d383e346SDavid Howells if (!op->store.laundering) 334e87b03f5SDavid Howells afs_pages_written_back(vnode, op->store.pos, op->store.size); 335e49c7b2fSDavid Howells afs_stat_v(vnode, n_stores); 336bd80d8a8SDavid Howells atomic_long_add(op->store.size, &afs_v2net(vnode)->n_store_bytes); 337e49c7b2fSDavid Howells } 338e49c7b2fSDavid Howells } 339e49c7b2fSDavid Howells 340e49c7b2fSDavid Howells static const struct afs_operation_ops afs_store_data_operation = { 341e49c7b2fSDavid Howells .issue_afs_rpc = afs_fs_store_data, 342e49c7b2fSDavid Howells .issue_yfs_rpc = yfs_fs_store_data, 343e49c7b2fSDavid Howells .success = afs_store_data_success, 344e49c7b2fSDavid Howells }; 345e49c7b2fSDavid Howells 346e49c7b2fSDavid Howells /* 347d2ddc776SDavid Howells * write to a file 34831143d5dSDavid Howells */ 349e87b03f5SDavid Howells static int afs_store_data(struct afs_vnode *vnode, struct iov_iter *iter, loff_t pos, 350bd80d8a8SDavid Howells bool laundering) 35131143d5dSDavid Howells { 352e49c7b2fSDavid Howells struct afs_operation *op; 3534343d008SDavid Howells struct afs_wb_key *wbk = NULL; 354bd80d8a8SDavid Howells loff_t size = iov_iter_count(iter), i_size; 355bd80d8a8SDavid Howells int ret = -ENOKEY; 356d2ddc776SDavid Howells 357bd80d8a8SDavid Howells _enter("%s{%llx:%llu.%u},%llx,%llx", 358d2ddc776SDavid Howells vnode->volume->name, 359d2ddc776SDavid Howells vnode->fid.vid, 360d2ddc776SDavid Howells vnode->fid.vnode, 361d2ddc776SDavid Howells vnode->fid.unique, 362bd80d8a8SDavid Howells size, pos); 363d2ddc776SDavid Howells 364e49c7b2fSDavid Howells ret = afs_get_writeback_key(vnode, &wbk); 365e49c7b2fSDavid Howells if (ret) { 3664343d008SDavid Howells _leave(" = %d [no keys]", ret); 3674343d008SDavid Howells return ret; 368d2ddc776SDavid Howells } 369d2ddc776SDavid Howells 370e49c7b2fSDavid Howells op = afs_alloc_operation(wbk->key, vnode->volume); 371e49c7b2fSDavid Howells if (IS_ERR(op)) { 372e49c7b2fSDavid Howells afs_put_wb_key(wbk); 373e49c7b2fSDavid Howells return -ENOMEM; 374d2ddc776SDavid Howells } 375d2ddc776SDavid Howells 376bd80d8a8SDavid Howells i_size = i_size_read(&vnode->vfs_inode); 377bd80d8a8SDavid Howells 378e49c7b2fSDavid Howells afs_op_set_vnode(op, 0, vnode); 379e49c7b2fSDavid Howells op->file[0].dv_delta = 1; 38022650f14SDavid Howells op->file[0].modification = true; 381bd80d8a8SDavid Howells op->store.write_iter = iter; 382bd80d8a8SDavid Howells op->store.pos = pos; 383bd80d8a8SDavid Howells op->store.size = size; 384bd80d8a8SDavid Howells op->store.i_size = max(pos + size, i_size); 385d383e346SDavid Howells op->store.laundering = laundering; 386b3597945SDavid Howells op->mtime = vnode->vfs_inode.i_mtime; 387811f04baSDavid Howells op->flags |= AFS_OPERATION_UNINTR; 388e49c7b2fSDavid Howells op->ops = &afs_store_data_operation; 389e49c7b2fSDavid Howells 390e49c7b2fSDavid Howells try_next_key: 391e49c7b2fSDavid Howells afs_begin_vnode_operation(op); 392e49c7b2fSDavid Howells afs_wait_for_operation(op); 393e49c7b2fSDavid Howells 394e49c7b2fSDavid Howells switch (op->error) { 3954343d008SDavid Howells case -EACCES: 3964343d008SDavid Howells case -EPERM: 3974343d008SDavid Howells case -ENOKEY: 3984343d008SDavid Howells case -EKEYEXPIRED: 3994343d008SDavid Howells case -EKEYREJECTED: 4004343d008SDavid Howells case -EKEYREVOKED: 4014343d008SDavid Howells _debug("next"); 402e49c7b2fSDavid Howells 403e49c7b2fSDavid Howells ret = afs_get_writeback_key(vnode, &wbk); 404e49c7b2fSDavid Howells if (ret == 0) { 405e49c7b2fSDavid Howells key_put(op->key); 406e49c7b2fSDavid Howells op->key = key_get(wbk->key); 4074343d008SDavid Howells goto try_next_key; 4084343d008SDavid Howells } 409e49c7b2fSDavid Howells break; 410e49c7b2fSDavid Howells } 4114343d008SDavid Howells 4124343d008SDavid Howells afs_put_wb_key(wbk); 413e49c7b2fSDavid Howells _leave(" = %d", op->error); 414e49c7b2fSDavid Howells return afs_put_operation(op); 415d2ddc776SDavid Howells } 416d2ddc776SDavid Howells 417d2ddc776SDavid Howells /* 418810caa3eSDavid Howells * Extend the region to be written back to include subsequent contiguously 419810caa3eSDavid Howells * dirty pages if possible, but don't sleep while doing so. 420810caa3eSDavid Howells * 421810caa3eSDavid Howells * If this page holds new content, then we can include filler zeros in the 422810caa3eSDavid Howells * writeback. 42331143d5dSDavid Howells */ 424810caa3eSDavid Howells static void afs_extend_writeback(struct address_space *mapping, 425810caa3eSDavid Howells struct afs_vnode *vnode, 426810caa3eSDavid Howells long *_count, 427e87b03f5SDavid Howells loff_t start, 428e87b03f5SDavid Howells loff_t max_len, 429e87b03f5SDavid Howells bool new_content, 430e87b03f5SDavid Howells unsigned int *_len) 43131143d5dSDavid Howells { 432e87b03f5SDavid Howells struct pagevec pvec; 433e87b03f5SDavid Howells struct page *page; 434e87b03f5SDavid Howells unsigned long priv; 435e87b03f5SDavid Howells unsigned int psize, filler = 0; 436e87b03f5SDavid Howells unsigned int f, t; 437e87b03f5SDavid Howells loff_t len = *_len; 438e87b03f5SDavid Howells pgoff_t index = (start + len) / PAGE_SIZE; 439e87b03f5SDavid Howells bool stop = true; 440e87b03f5SDavid Howells unsigned int i; 4414343d008SDavid Howells 442e87b03f5SDavid Howells XA_STATE(xas, &mapping->i_pages, index); 443e87b03f5SDavid Howells pagevec_init(&pvec); 444e87b03f5SDavid Howells 44531143d5dSDavid Howells do { 446e87b03f5SDavid Howells /* Firstly, we gather up a batch of contiguous dirty pages 447e87b03f5SDavid Howells * under the RCU read lock - but we can't clear the dirty flags 448e87b03f5SDavid Howells * there if any of those pages are mapped. 449e87b03f5SDavid Howells */ 450e87b03f5SDavid Howells rcu_read_lock(); 451e87b03f5SDavid Howells 452e87b03f5SDavid Howells xas_for_each(&xas, page, ULONG_MAX) { 453e87b03f5SDavid Howells stop = true; 454e87b03f5SDavid Howells if (xas_retry(&xas, page)) 455e87b03f5SDavid Howells continue; 456e87b03f5SDavid Howells if (xa_is_value(page)) 457e87b03f5SDavid Howells break; 458e87b03f5SDavid Howells if (page->index != index) 459e87b03f5SDavid Howells break; 460e87b03f5SDavid Howells 461e87b03f5SDavid Howells if (!page_cache_get_speculative(page)) { 462e87b03f5SDavid Howells xas_reset(&xas); 463e87b03f5SDavid Howells continue; 46431143d5dSDavid Howells } 46531143d5dSDavid Howells 466e87b03f5SDavid Howells /* Has the page moved or been split? */ 467e87b03f5SDavid Howells if (unlikely(page != xas_reload(&xas))) 4685a813276SDavid Howells break; 469e87b03f5SDavid Howells 470529ae9aaSNick Piggin if (!trylock_page(page)) 47131143d5dSDavid Howells break; 4724343d008SDavid Howells if (!PageDirty(page) || PageWriteback(page)) { 47331143d5dSDavid Howells unlock_page(page); 47431143d5dSDavid Howells break; 47531143d5dSDavid Howells } 4764343d008SDavid Howells 477e87b03f5SDavid Howells psize = thp_size(page); 4784343d008SDavid Howells priv = page_private(page); 47967d78a6fSDavid Howells f = afs_page_dirty_from(page, priv); 48067d78a6fSDavid Howells t = afs_page_dirty_to(page, priv); 481810caa3eSDavid Howells if (f != 0 && !new_content) { 4824343d008SDavid Howells unlock_page(page); 4834343d008SDavid Howells break; 4844343d008SDavid Howells } 4854343d008SDavid Howells 486e87b03f5SDavid Howells len += filler + t; 487e87b03f5SDavid Howells filler = psize - t; 488e87b03f5SDavid Howells if (len >= max_len || *_count <= 0) 489e87b03f5SDavid Howells stop = true; 490e87b03f5SDavid Howells else if (t == psize || new_content) 491e87b03f5SDavid Howells stop = false; 492e87b03f5SDavid Howells 493e87b03f5SDavid Howells index += thp_nr_pages(page); 494e87b03f5SDavid Howells if (!pagevec_add(&pvec, page)) 495e87b03f5SDavid Howells break; 496e87b03f5SDavid Howells if (stop) 497e87b03f5SDavid Howells break; 498e87b03f5SDavid Howells } 499e87b03f5SDavid Howells 500e87b03f5SDavid Howells if (!stop) 501e87b03f5SDavid Howells xas_pause(&xas); 502e87b03f5SDavid Howells rcu_read_unlock(); 503e87b03f5SDavid Howells 504e87b03f5SDavid Howells /* Now, if we obtained any pages, we can shift them to being 505e87b03f5SDavid Howells * writable and mark them for caching. 506e87b03f5SDavid Howells */ 507e87b03f5SDavid Howells if (!pagevec_count(&pvec)) 508e87b03f5SDavid Howells break; 509e87b03f5SDavid Howells 510e87b03f5SDavid Howells for (i = 0; i < pagevec_count(&pvec); i++) { 511e87b03f5SDavid Howells page = pvec.pages[i]; 51267d78a6fSDavid Howells trace_afs_page_dirty(vnode, tracepoint_string("store+"), page); 51313524ab3SDavid Howells 51431143d5dSDavid Howells if (!clear_page_dirty_for_io(page)) 51531143d5dSDavid Howells BUG(); 51631143d5dSDavid Howells if (test_set_page_writeback(page)) 51731143d5dSDavid Howells BUG(); 518e87b03f5SDavid Howells 519e87b03f5SDavid Howells *_count -= thp_nr_pages(page); 52031143d5dSDavid Howells unlock_page(page); 52131143d5dSDavid Howells } 52231143d5dSDavid Howells 523e87b03f5SDavid Howells pagevec_release(&pvec); 524e87b03f5SDavid Howells cond_resched(); 525e87b03f5SDavid Howells } while (!stop); 52631143d5dSDavid Howells 527e87b03f5SDavid Howells *_len = len; 528810caa3eSDavid Howells } 529810caa3eSDavid Howells 530810caa3eSDavid Howells /* 531810caa3eSDavid Howells * Synchronously write back the locked page and any subsequent non-locked dirty 532810caa3eSDavid Howells * pages. 533810caa3eSDavid Howells */ 534e87b03f5SDavid Howells static ssize_t afs_write_back_from_locked_page(struct address_space *mapping, 535810caa3eSDavid Howells struct writeback_control *wbc, 536e87b03f5SDavid Howells struct page *page, 537e87b03f5SDavid Howells loff_t start, loff_t end) 538810caa3eSDavid Howells { 539810caa3eSDavid Howells struct afs_vnode *vnode = AFS_FS_I(mapping->host); 540810caa3eSDavid Howells struct iov_iter iter; 541e87b03f5SDavid Howells unsigned long priv; 542e87b03f5SDavid Howells unsigned int offset, to, len, max_len; 543e87b03f5SDavid Howells loff_t i_size = i_size_read(&vnode->vfs_inode); 544810caa3eSDavid Howells bool new_content = test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags); 545e87b03f5SDavid Howells long count = wbc->nr_to_write; 546810caa3eSDavid Howells int ret; 547810caa3eSDavid Howells 548e87b03f5SDavid Howells _enter(",%lx,%llx-%llx", page->index, start, end); 549810caa3eSDavid Howells 550e87b03f5SDavid Howells if (test_set_page_writeback(page)) 551810caa3eSDavid Howells BUG(); 552810caa3eSDavid Howells 553e87b03f5SDavid Howells count -= thp_nr_pages(page); 554e87b03f5SDavid Howells 555810caa3eSDavid Howells /* Find all consecutive lockable dirty pages that have contiguous 556810caa3eSDavid Howells * written regions, stopping when we find a page that is not 557810caa3eSDavid Howells * immediately lockable, is not dirty or is missing, or we reach the 558810caa3eSDavid Howells * end of the range. 559810caa3eSDavid Howells */ 560e87b03f5SDavid Howells priv = page_private(page); 561e87b03f5SDavid Howells offset = afs_page_dirty_from(page, priv); 562e87b03f5SDavid Howells to = afs_page_dirty_to(page, priv); 563e87b03f5SDavid Howells trace_afs_page_dirty(vnode, tracepoint_string("store"), page); 564810caa3eSDavid Howells 565e87b03f5SDavid Howells len = to - offset; 566e87b03f5SDavid Howells start += offset; 567e87b03f5SDavid Howells if (start < i_size) { 568e87b03f5SDavid Howells /* Trim the write to the EOF; the extra data is ignored. Also 569e87b03f5SDavid Howells * put an upper limit on the size of a single storedata op. 570e87b03f5SDavid Howells */ 571e87b03f5SDavid Howells max_len = 65536 * 4096; 572e87b03f5SDavid Howells max_len = min_t(unsigned long long, max_len, end - start + 1); 573e87b03f5SDavid Howells max_len = min_t(unsigned long long, max_len, i_size - start); 574810caa3eSDavid Howells 575e87b03f5SDavid Howells if (len < max_len && 576e87b03f5SDavid Howells (to == thp_size(page) || new_content)) 577e87b03f5SDavid Howells afs_extend_writeback(mapping, vnode, &count, 578e87b03f5SDavid Howells start, max_len, new_content, &len); 579e87b03f5SDavid Howells len = min_t(loff_t, len, max_len); 580e87b03f5SDavid Howells } 581810caa3eSDavid Howells 5824343d008SDavid Howells /* We now have a contiguous set of dirty pages, each with writeback 5834343d008SDavid Howells * set; the first page is still locked at this point, but all the rest 5844343d008SDavid Howells * have been unlocked. 5854343d008SDavid Howells */ 586e87b03f5SDavid Howells unlock_page(page); 5874343d008SDavid Howells 588e87b03f5SDavid Howells if (start < i_size) { 589e87b03f5SDavid Howells _debug("write back %x @%llx [%llx]", len, start, i_size); 59031143d5dSDavid Howells 591e87b03f5SDavid Howells iov_iter_xarray(&iter, WRITE, &mapping->i_pages, start, len); 592e87b03f5SDavid Howells ret = afs_store_data(vnode, &iter, start, false); 593bd80d8a8SDavid Howells } else { 594e87b03f5SDavid Howells _debug("write discard %x @%llx [%llx]", len, start, i_size); 595e87b03f5SDavid Howells 596bd80d8a8SDavid Howells /* The dirty region was entirely beyond the EOF. */ 597e87b03f5SDavid Howells afs_pages_written_back(vnode, start, len); 598bd80d8a8SDavid Howells ret = 0; 599bd80d8a8SDavid Howells } 600bd80d8a8SDavid Howells 60131143d5dSDavid Howells switch (ret) { 6024343d008SDavid Howells case 0: 603e87b03f5SDavid Howells wbc->nr_to_write = count; 604e87b03f5SDavid Howells ret = len; 6054343d008SDavid Howells break; 6064343d008SDavid Howells 6074343d008SDavid Howells default: 6084343d008SDavid Howells pr_notice("kAFS: Unexpected error from FS.StoreData %d\n", ret); 609df561f66SGustavo A. R. Silva fallthrough; 6104343d008SDavid Howells case -EACCES: 6114343d008SDavid Howells case -EPERM: 6124343d008SDavid Howells case -ENOKEY: 6134343d008SDavid Howells case -EKEYEXPIRED: 6144343d008SDavid Howells case -EKEYREJECTED: 6154343d008SDavid Howells case -EKEYREVOKED: 616e87b03f5SDavid Howells afs_redirty_pages(wbc, mapping, start, len); 6174343d008SDavid Howells mapping_set_error(mapping, ret); 6184343d008SDavid Howells break; 6194343d008SDavid Howells 62031143d5dSDavid Howells case -EDQUOT: 62131143d5dSDavid Howells case -ENOSPC: 622e87b03f5SDavid Howells afs_redirty_pages(wbc, mapping, start, len); 6234343d008SDavid Howells mapping_set_error(mapping, -ENOSPC); 62431143d5dSDavid Howells break; 6254343d008SDavid Howells 62631143d5dSDavid Howells case -EROFS: 62731143d5dSDavid Howells case -EIO: 62831143d5dSDavid Howells case -EREMOTEIO: 62931143d5dSDavid Howells case -EFBIG: 63031143d5dSDavid Howells case -ENOENT: 63131143d5dSDavid Howells case -ENOMEDIUM: 63231143d5dSDavid Howells case -ENXIO: 633f51375cdSDavid Howells trace_afs_file_error(vnode, ret, afs_file_error_writeback_fail); 634e87b03f5SDavid Howells afs_kill_pages(mapping, start, len); 6354343d008SDavid Howells mapping_set_error(mapping, ret); 63631143d5dSDavid Howells break; 63731143d5dSDavid Howells } 63831143d5dSDavid Howells 63931143d5dSDavid Howells _leave(" = %d", ret); 64031143d5dSDavid Howells return ret; 64131143d5dSDavid Howells } 64231143d5dSDavid Howells 64331143d5dSDavid Howells /* 64431143d5dSDavid Howells * write a page back to the server 64531143d5dSDavid Howells * - the caller locked the page for us 64631143d5dSDavid Howells */ 64731143d5dSDavid Howells int afs_writepage(struct page *page, struct writeback_control *wbc) 64831143d5dSDavid Howells { 649e87b03f5SDavid Howells ssize_t ret; 650e87b03f5SDavid Howells loff_t start; 65131143d5dSDavid Howells 65231143d5dSDavid Howells _enter("{%lx},", page->index); 65331143d5dSDavid Howells 654e87b03f5SDavid Howells start = page->index * PAGE_SIZE; 6554343d008SDavid Howells ret = afs_write_back_from_locked_page(page->mapping, wbc, page, 656e87b03f5SDavid Howells start, LLONG_MAX - start); 65731143d5dSDavid Howells if (ret < 0) { 658e87b03f5SDavid Howells _leave(" = %zd", ret); 659e87b03f5SDavid Howells return ret; 66031143d5dSDavid Howells } 66131143d5dSDavid Howells 66231143d5dSDavid Howells _leave(" = 0"); 66331143d5dSDavid Howells return 0; 66431143d5dSDavid Howells } 66531143d5dSDavid Howells 66631143d5dSDavid Howells /* 66731143d5dSDavid Howells * write a region of pages back to the server 66831143d5dSDavid Howells */ 669c1206a2cSAdrian Bunk static int afs_writepages_region(struct address_space *mapping, 67031143d5dSDavid Howells struct writeback_control *wbc, 671e87b03f5SDavid Howells loff_t start, loff_t end, loff_t *_next) 67231143d5dSDavid Howells { 67331143d5dSDavid Howells struct page *page; 674e87b03f5SDavid Howells ssize_t ret; 675e87b03f5SDavid Howells int n; 67631143d5dSDavid Howells 677e87b03f5SDavid Howells _enter("%llx,%llx,", start, end); 67831143d5dSDavid Howells 67931143d5dSDavid Howells do { 680e87b03f5SDavid Howells pgoff_t index = start / PAGE_SIZE; 681e87b03f5SDavid Howells 682e87b03f5SDavid Howells n = find_get_pages_range_tag(mapping, &index, end / PAGE_SIZE, 683aef6e415SJan Kara PAGECACHE_TAG_DIRTY, 1, &page); 68431143d5dSDavid Howells if (!n) 68531143d5dSDavid Howells break; 68631143d5dSDavid Howells 687e87b03f5SDavid Howells start = (loff_t)page->index * PAGE_SIZE; /* May regress with THPs */ 688e87b03f5SDavid Howells 68931143d5dSDavid Howells _debug("wback %lx", page->index); 69031143d5dSDavid Howells 691e87b03f5SDavid Howells /* At this point we hold neither the i_pages lock nor the 692b93b0163SMatthew Wilcox * page lock: the page may be truncated or invalidated 693b93b0163SMatthew Wilcox * (changing page->mapping to NULL), or even swizzled 694b93b0163SMatthew Wilcox * back from swapper_space to tmpfs file mapping 69531143d5dSDavid Howells */ 696e87b03f5SDavid Howells if (wbc->sync_mode != WB_SYNC_NONE) { 6974343d008SDavid Howells ret = lock_page_killable(page); 6984343d008SDavid Howells if (ret < 0) { 6994343d008SDavid Howells put_page(page); 7004343d008SDavid Howells return ret; 7014343d008SDavid Howells } 702e87b03f5SDavid Howells } else { 703e87b03f5SDavid Howells if (!trylock_page(page)) { 704e87b03f5SDavid Howells put_page(page); 705e87b03f5SDavid Howells return 0; 706e87b03f5SDavid Howells } 707e87b03f5SDavid Howells } 70831143d5dSDavid Howells 709c5051c7bSDavid Howells if (page->mapping != mapping || !PageDirty(page)) { 710e87b03f5SDavid Howells start += thp_size(page); 71131143d5dSDavid Howells unlock_page(page); 71209cbfeafSKirill A. Shutemov put_page(page); 71331143d5dSDavid Howells continue; 71431143d5dSDavid Howells } 71531143d5dSDavid Howells 716c5051c7bSDavid Howells if (PageWriteback(page)) { 717c5051c7bSDavid Howells unlock_page(page); 71831143d5dSDavid Howells if (wbc->sync_mode != WB_SYNC_NONE) 71931143d5dSDavid Howells wait_on_page_writeback(page); 72029c8bbbdSDavid Howells put_page(page); 72131143d5dSDavid Howells continue; 72231143d5dSDavid Howells } 72331143d5dSDavid Howells 72465a15109SDavid Howells if (!clear_page_dirty_for_io(page)) 72565a15109SDavid Howells BUG(); 726e87b03f5SDavid Howells ret = afs_write_back_from_locked_page(mapping, wbc, page, start, end); 72709cbfeafSKirill A. Shutemov put_page(page); 72831143d5dSDavid Howells if (ret < 0) { 729e87b03f5SDavid Howells _leave(" = %zd", ret); 73031143d5dSDavid Howells return ret; 73131143d5dSDavid Howells } 73231143d5dSDavid Howells 733dc255730SMarc Dionne start += ret; 73431143d5dSDavid Howells 73531143d5dSDavid Howells cond_resched(); 736e87b03f5SDavid Howells } while (wbc->nr_to_write > 0); 73731143d5dSDavid Howells 738e87b03f5SDavid Howells *_next = start; 739e87b03f5SDavid Howells _leave(" = 0 [%llx]", *_next); 74031143d5dSDavid Howells return 0; 74131143d5dSDavid Howells } 74231143d5dSDavid Howells 74331143d5dSDavid Howells /* 74431143d5dSDavid Howells * write some of the pending data back to the server 74531143d5dSDavid Howells */ 74631143d5dSDavid Howells int afs_writepages(struct address_space *mapping, 74731143d5dSDavid Howells struct writeback_control *wbc) 74831143d5dSDavid Howells { 749ec0fa0b6SDavid Howells struct afs_vnode *vnode = AFS_FS_I(mapping->host); 750e87b03f5SDavid Howells loff_t start, next; 75131143d5dSDavid Howells int ret; 75231143d5dSDavid Howells 75331143d5dSDavid Howells _enter(""); 75431143d5dSDavid Howells 755ec0fa0b6SDavid Howells /* We have to be careful as we can end up racing with setattr() 756ec0fa0b6SDavid Howells * truncating the pagecache since the caller doesn't take a lock here 757ec0fa0b6SDavid Howells * to prevent it. 758ec0fa0b6SDavid Howells */ 759ec0fa0b6SDavid Howells if (wbc->sync_mode == WB_SYNC_ALL) 760ec0fa0b6SDavid Howells down_read(&vnode->validate_lock); 761ec0fa0b6SDavid Howells else if (!down_read_trylock(&vnode->validate_lock)) 762ec0fa0b6SDavid Howells return 0; 763ec0fa0b6SDavid Howells 76431143d5dSDavid Howells if (wbc->range_cyclic) { 765e87b03f5SDavid Howells start = mapping->writeback_index * PAGE_SIZE; 766e87b03f5SDavid Howells ret = afs_writepages_region(mapping, wbc, start, LLONG_MAX, &next); 7671b430beeSWu Fengguang if (start > 0 && wbc->nr_to_write > 0 && ret == 0) 76831143d5dSDavid Howells ret = afs_writepages_region(mapping, wbc, 0, start, 76931143d5dSDavid Howells &next); 770e87b03f5SDavid Howells mapping->writeback_index = next / PAGE_SIZE; 77131143d5dSDavid Howells } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) { 772e87b03f5SDavid Howells ret = afs_writepages_region(mapping, wbc, 0, LLONG_MAX, &next); 77331143d5dSDavid Howells if (wbc->nr_to_write > 0) 77431143d5dSDavid Howells mapping->writeback_index = next; 77531143d5dSDavid Howells } else { 776e87b03f5SDavid Howells ret = afs_writepages_region(mapping, wbc, 777e87b03f5SDavid Howells wbc->range_start, wbc->range_end, &next); 77831143d5dSDavid Howells } 77931143d5dSDavid Howells 780ec0fa0b6SDavid Howells up_read(&vnode->validate_lock); 78131143d5dSDavid Howells _leave(" = %d", ret); 78231143d5dSDavid Howells return ret; 78331143d5dSDavid Howells } 78431143d5dSDavid Howells 78531143d5dSDavid Howells /* 78631143d5dSDavid Howells * write to an AFS file 78731143d5dSDavid Howells */ 78850b5551dSAl Viro ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from) 78931143d5dSDavid Howells { 790496ad9aaSAl Viro struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp)); 79131143d5dSDavid Howells ssize_t result; 79250b5551dSAl Viro size_t count = iov_iter_count(from); 79331143d5dSDavid Howells 7943b6492dfSDavid Howells _enter("{%llx:%llu},{%zu},", 79550b5551dSAl Viro vnode->fid.vid, vnode->fid.vnode, count); 79631143d5dSDavid Howells 79731143d5dSDavid Howells if (IS_SWAPFILE(&vnode->vfs_inode)) { 79831143d5dSDavid Howells printk(KERN_INFO 79931143d5dSDavid Howells "AFS: Attempt to write to active swap file!\n"); 80031143d5dSDavid Howells return -EBUSY; 80131143d5dSDavid Howells } 80231143d5dSDavid Howells 80331143d5dSDavid Howells if (!count) 80431143d5dSDavid Howells return 0; 80531143d5dSDavid Howells 80650b5551dSAl Viro result = generic_file_write_iter(iocb, from); 80731143d5dSDavid Howells 80831143d5dSDavid Howells _leave(" = %zd", result); 80931143d5dSDavid Howells return result; 81031143d5dSDavid Howells } 81131143d5dSDavid Howells 81231143d5dSDavid Howells /* 81331143d5dSDavid Howells * flush any dirty pages for this process, and check for write errors. 81431143d5dSDavid Howells * - the return status from this call provides a reliable indication of 81531143d5dSDavid Howells * whether any write errors occurred for this process. 81631143d5dSDavid Howells */ 81702c24a82SJosef Bacik int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync) 81831143d5dSDavid Howells { 8193c981bfcSAl Viro struct inode *inode = file_inode(file); 8203c981bfcSAl Viro struct afs_vnode *vnode = AFS_FS_I(inode); 82131143d5dSDavid Howells 8223b6492dfSDavid Howells _enter("{%llx:%llu},{n=%pD},%d", 8233c981bfcSAl Viro vnode->fid.vid, vnode->fid.vnode, file, 82431143d5dSDavid Howells datasync); 82531143d5dSDavid Howells 8264343d008SDavid Howells return file_write_and_wait_range(file, start, end); 82731143d5dSDavid Howells } 8289b3f26c9SDavid Howells 8299b3f26c9SDavid Howells /* 8309b3f26c9SDavid Howells * notification that a previously read-only page is about to become writable 8319b3f26c9SDavid Howells * - if it returns an error, the caller will deliver a bus error signal 8329b3f26c9SDavid Howells */ 8330722f186SSouptick Joarder vm_fault_t afs_page_mkwrite(struct vm_fault *vmf) 8349b3f26c9SDavid Howells { 835e87b03f5SDavid Howells struct page *page = thp_head(vmf->page); 8361cf7a151SDavid Howells struct file *file = vmf->vma->vm_file; 8371cf7a151SDavid Howells struct inode *inode = file_inode(file); 8381cf7a151SDavid Howells struct afs_vnode *vnode = AFS_FS_I(inode); 8391cf7a151SDavid Howells unsigned long priv; 840*9620ad86SMatthew Wilcox (Oracle) vm_fault_t ret = VM_FAULT_RETRY; 8419b3f26c9SDavid Howells 842e87b03f5SDavid Howells _enter("{{%llx:%llu}},{%lx}", vnode->fid.vid, vnode->fid.vnode, page->index); 8439b3f26c9SDavid Howells 8441cf7a151SDavid Howells sb_start_pagefault(inode->i_sb); 8451cf7a151SDavid Howells 8461cf7a151SDavid Howells /* Wait for the page to be written to the cache before we allow it to 8471cf7a151SDavid Howells * be modified. We then assume the entire page will need writing back. 8481cf7a151SDavid Howells */ 849630f5ddaSDavid Howells #ifdef CONFIG_AFS_FSCACHE 850e87b03f5SDavid Howells if (PageFsCache(page) && 8515cbf0398SDavid Howells wait_on_page_fscache_killable(page) < 0) 852*9620ad86SMatthew Wilcox (Oracle) goto out; 853630f5ddaSDavid Howells #endif 8549b3f26c9SDavid Howells 855e87b03f5SDavid Howells if (wait_on_page_writeback_killable(page)) 856*9620ad86SMatthew Wilcox (Oracle) goto out; 8571cf7a151SDavid Howells 858e87b03f5SDavid Howells if (lock_page_killable(page) < 0) 859*9620ad86SMatthew Wilcox (Oracle) goto out; 8601cf7a151SDavid Howells 8611cf7a151SDavid Howells /* We mustn't change page->private until writeback is complete as that 8621cf7a151SDavid Howells * details the portion of the page we need to write back and we might 8631cf7a151SDavid Howells * need to redirty the page if there's a problem. 8641cf7a151SDavid Howells */ 8655cbf0398SDavid Howells if (wait_on_page_writeback_killable(page) < 0) { 8665cbf0398SDavid Howells unlock_page(page); 867*9620ad86SMatthew Wilcox (Oracle) goto out; 8685cbf0398SDavid Howells } 8691cf7a151SDavid Howells 870e87b03f5SDavid Howells priv = afs_page_dirty(page, 0, thp_size(page)); 871f86726a6SDavid Howells priv = afs_page_dirty_mmapped(priv); 872e87b03f5SDavid Howells if (PagePrivate(page)) { 873e87b03f5SDavid Howells set_page_private(page, priv); 874e87b03f5SDavid Howells trace_afs_page_dirty(vnode, tracepoint_string("mkwrite+"), page); 875e87b03f5SDavid Howells } else { 876e87b03f5SDavid Howells attach_page_private(page, (void *)priv); 877e87b03f5SDavid Howells trace_afs_page_dirty(vnode, tracepoint_string("mkwrite"), page); 878e87b03f5SDavid Howells } 879bb413489SDavid Howells file_update_time(file); 8801cf7a151SDavid Howells 881*9620ad86SMatthew Wilcox (Oracle) ret = VM_FAULT_LOCKED; 882*9620ad86SMatthew Wilcox (Oracle) out: 8831cf7a151SDavid Howells sb_end_pagefault(inode->i_sb); 884*9620ad86SMatthew Wilcox (Oracle) return ret; 8859b3f26c9SDavid Howells } 8864343d008SDavid Howells 8874343d008SDavid Howells /* 8884343d008SDavid Howells * Prune the keys cached for writeback. The caller must hold vnode->wb_lock. 8894343d008SDavid Howells */ 8904343d008SDavid Howells void afs_prune_wb_keys(struct afs_vnode *vnode) 8914343d008SDavid Howells { 8924343d008SDavid Howells LIST_HEAD(graveyard); 8934343d008SDavid Howells struct afs_wb_key *wbk, *tmp; 8944343d008SDavid Howells 8954343d008SDavid Howells /* Discard unused keys */ 8964343d008SDavid Howells spin_lock(&vnode->wb_lock); 8974343d008SDavid Howells 8984343d008SDavid Howells if (!mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_WRITEBACK) && 8994343d008SDavid Howells !mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_DIRTY)) { 9004343d008SDavid Howells list_for_each_entry_safe(wbk, tmp, &vnode->wb_keys, vnode_link) { 9014343d008SDavid Howells if (refcount_read(&wbk->usage) == 1) 9024343d008SDavid Howells list_move(&wbk->vnode_link, &graveyard); 9034343d008SDavid Howells } 9044343d008SDavid Howells } 9054343d008SDavid Howells 9064343d008SDavid Howells spin_unlock(&vnode->wb_lock); 9074343d008SDavid Howells 9084343d008SDavid Howells while (!list_empty(&graveyard)) { 9094343d008SDavid Howells wbk = list_entry(graveyard.next, struct afs_wb_key, vnode_link); 9104343d008SDavid Howells list_del(&wbk->vnode_link); 9114343d008SDavid Howells afs_put_wb_key(wbk); 9124343d008SDavid Howells } 9134343d008SDavid Howells } 9144343d008SDavid Howells 9154343d008SDavid Howells /* 9164343d008SDavid Howells * Clean up a page during invalidation. 9174343d008SDavid Howells */ 9184343d008SDavid Howells int afs_launder_page(struct page *page) 9194343d008SDavid Howells { 9204343d008SDavid Howells struct address_space *mapping = page->mapping; 9214343d008SDavid Howells struct afs_vnode *vnode = AFS_FS_I(mapping->host); 922bd80d8a8SDavid Howells struct iov_iter iter; 923bd80d8a8SDavid Howells struct bio_vec bv[1]; 9244343d008SDavid Howells unsigned long priv; 9254343d008SDavid Howells unsigned int f, t; 9264343d008SDavid Howells int ret = 0; 9274343d008SDavid Howells 9284343d008SDavid Howells _enter("{%lx}", page->index); 9294343d008SDavid Howells 9304343d008SDavid Howells priv = page_private(page); 9314343d008SDavid Howells if (clear_page_dirty_for_io(page)) { 9324343d008SDavid Howells f = 0; 933e87b03f5SDavid Howells t = thp_size(page); 9344343d008SDavid Howells if (PagePrivate(page)) { 93567d78a6fSDavid Howells f = afs_page_dirty_from(page, priv); 93667d78a6fSDavid Howells t = afs_page_dirty_to(page, priv); 9374343d008SDavid Howells } 9384343d008SDavid Howells 939bd80d8a8SDavid Howells bv[0].bv_page = page; 940bd80d8a8SDavid Howells bv[0].bv_offset = f; 941bd80d8a8SDavid Howells bv[0].bv_len = t - f; 942bd80d8a8SDavid Howells iov_iter_bvec(&iter, WRITE, bv, 1, bv[0].bv_len); 943bd80d8a8SDavid Howells 94467d78a6fSDavid Howells trace_afs_page_dirty(vnode, tracepoint_string("launder"), page); 945e87b03f5SDavid Howells ret = afs_store_data(vnode, &iter, (loff_t)page->index * PAGE_SIZE, 946e87b03f5SDavid Howells true); 9474343d008SDavid Howells } 9484343d008SDavid Howells 94967d78a6fSDavid Howells trace_afs_page_dirty(vnode, tracepoint_string("laundered"), page); 950e87b03f5SDavid Howells detach_page_private(page); 951630f5ddaSDavid Howells wait_on_page_fscache(page); 9524343d008SDavid Howells return ret; 95331143d5dSDavid Howells } 954