12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later 231143d5dSDavid Howells /* handling of writes to regular files and writing back to the server 331143d5dSDavid Howells * 431143d5dSDavid Howells * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 531143d5dSDavid Howells * Written by David Howells (dhowells@redhat.com) 631143d5dSDavid Howells */ 74343d008SDavid Howells 84af3c9ccSAlexey Dobriyan #include <linux/backing-dev.h> 931143d5dSDavid Howells #include <linux/slab.h> 1031143d5dSDavid Howells #include <linux/fs.h> 1131143d5dSDavid Howells #include <linux/pagemap.h> 1231143d5dSDavid Howells #include <linux/writeback.h> 1331143d5dSDavid Howells #include <linux/pagevec.h> 143003bbd0SDavid Howells #include <linux/netfs.h> 153003bbd0SDavid Howells #include <linux/fscache.h> 1631143d5dSDavid Howells #include "internal.h" 1731143d5dSDavid Howells 1831143d5dSDavid Howells /* 1931143d5dSDavid Howells * mark a page as having been made dirty and thus needing writeback 2031143d5dSDavid Howells */ 2131143d5dSDavid Howells int afs_set_page_dirty(struct page *page) 2231143d5dSDavid Howells { 2331143d5dSDavid Howells _enter(""); 2431143d5dSDavid Howells return __set_page_dirty_nobuffers(page); 2531143d5dSDavid Howells } 2631143d5dSDavid Howells 2731143d5dSDavid Howells /* 2831143d5dSDavid Howells * prepare to perform part of a write to a page 2931143d5dSDavid Howells */ 3015b4650eSNick Piggin int afs_write_begin(struct file *file, struct address_space *mapping, 3115b4650eSNick Piggin loff_t pos, unsigned len, unsigned flags, 3221db2cdcSDavid Howells struct page **_page, void **fsdata) 3331143d5dSDavid Howells { 34496ad9aaSAl Viro struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); 3515b4650eSNick Piggin struct page *page; 364343d008SDavid Howells unsigned long priv; 37e87b03f5SDavid Howells unsigned f, from; 38e87b03f5SDavid Howells unsigned t, to; 39e87b03f5SDavid Howells pgoff_t index; 4031143d5dSDavid Howells int ret; 4131143d5dSDavid Howells 42e87b03f5SDavid Howells _enter("{%llx:%llu},%llx,%x", 43e87b03f5SDavid Howells vnode->fid.vid, vnode->fid.vnode, pos, len); 4431143d5dSDavid Howells 453003bbd0SDavid Howells /* Prefetch area to be written into the cache if we're caching this 463003bbd0SDavid Howells * file. We need to do this before we get a lock on the page in case 473003bbd0SDavid Howells * there's more than one writer competing for the same cache block. 483003bbd0SDavid Howells */ 493003bbd0SDavid Howells ret = netfs_write_begin(file, mapping, pos, len, flags, &page, fsdata, 503003bbd0SDavid Howells &afs_req_ops, NULL); 513003bbd0SDavid Howells if (ret < 0) 5231143d5dSDavid Howells return ret; 53630f5ddaSDavid Howells 54e87b03f5SDavid Howells index = page->index; 55e87b03f5SDavid Howells from = pos - index * PAGE_SIZE; 56e87b03f5SDavid Howells to = from + len; 57e87b03f5SDavid Howells 5831143d5dSDavid Howells try_again: 594343d008SDavid Howells /* See if this page is already partially written in a way that we can 604343d008SDavid Howells * merge the new write with. 614343d008SDavid Howells */ 624343d008SDavid Howells if (PagePrivate(page)) { 634343d008SDavid Howells priv = page_private(page); 6467d78a6fSDavid Howells f = afs_page_dirty_from(page, priv); 6567d78a6fSDavid Howells t = afs_page_dirty_to(page, priv); 664343d008SDavid Howells ASSERTCMP(f, <=, t); 6731143d5dSDavid Howells 685a039c32SDavid Howells if (PageWriteback(page)) { 6967d78a6fSDavid Howells trace_afs_page_dirty(vnode, tracepoint_string("alrdy"), page); 705a039c32SDavid Howells goto flush_conflicting_write; 715a039c32SDavid Howells } 725a813276SDavid Howells /* If the file is being filled locally, allow inter-write 735a813276SDavid Howells * spaces to be merged into writes. If it's not, only write 745a813276SDavid Howells * back what the user gives us. 755a813276SDavid Howells */ 765a813276SDavid Howells if (!test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags) && 775a813276SDavid Howells (to < f || from > t)) 784343d008SDavid Howells goto flush_conflicting_write; 7931143d5dSDavid Howells } 8031143d5dSDavid Howells 8121db2cdcSDavid Howells *_page = page; 824343d008SDavid Howells _leave(" = 0"); 8331143d5dSDavid Howells return 0; 8431143d5dSDavid Howells 854343d008SDavid Howells /* The previous write and this write aren't adjacent or overlapping, so 864343d008SDavid Howells * flush the page out. 874343d008SDavid Howells */ 884343d008SDavid Howells flush_conflicting_write: 8931143d5dSDavid Howells _debug("flush conflict"); 904343d008SDavid Howells ret = write_one_page(page); 9121db2cdcSDavid Howells if (ret < 0) 9221db2cdcSDavid Howells goto error; 9331143d5dSDavid Howells 944343d008SDavid Howells ret = lock_page_killable(page); 9521db2cdcSDavid Howells if (ret < 0) 9621db2cdcSDavid Howells goto error; 9721db2cdcSDavid Howells goto try_again; 9821db2cdcSDavid Howells 9921db2cdcSDavid Howells error: 10021db2cdcSDavid Howells put_page(page); 1014343d008SDavid Howells _leave(" = %d", ret); 1024343d008SDavid Howells return ret; 1034343d008SDavid Howells } 10431143d5dSDavid Howells 10531143d5dSDavid Howells /* 10631143d5dSDavid Howells * finalise part of a write to a page 10731143d5dSDavid Howells */ 10815b4650eSNick Piggin int afs_write_end(struct file *file, struct address_space *mapping, 10915b4650eSNick Piggin loff_t pos, unsigned len, unsigned copied, 11015b4650eSNick Piggin struct page *page, void *fsdata) 11131143d5dSDavid Howells { 112496ad9aaSAl Viro struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); 113f792e3acSDavid Howells unsigned long priv; 114e87b03f5SDavid Howells unsigned int f, from = pos & (thp_size(page) - 1); 115f792e3acSDavid Howells unsigned int t, to = from + copied; 11631143d5dSDavid Howells loff_t i_size, maybe_i_size; 11731143d5dSDavid Howells 1183b6492dfSDavid Howells _enter("{%llx:%llu},{%lx}", 11915b4650eSNick Piggin vnode->fid.vid, vnode->fid.vnode, page->index); 12031143d5dSDavid Howells 121*66e9c6a8SDavid Howells if (!PageUptodate(page)) { 122*66e9c6a8SDavid Howells if (copied < len) { 123*66e9c6a8SDavid Howells copied = 0; 124*66e9c6a8SDavid Howells goto out; 125*66e9c6a8SDavid Howells } 126*66e9c6a8SDavid Howells 127*66e9c6a8SDavid Howells SetPageUptodate(page); 128*66e9c6a8SDavid Howells } 129*66e9c6a8SDavid Howells 1303ad216eeSDavid Howells if (copied == 0) 1313ad216eeSDavid Howells goto out; 1323ad216eeSDavid Howells 13315b4650eSNick Piggin maybe_i_size = pos + copied; 13431143d5dSDavid Howells 13531143d5dSDavid Howells i_size = i_size_read(&vnode->vfs_inode); 13631143d5dSDavid Howells if (maybe_i_size > i_size) { 1371f32ef79SDavid Howells write_seqlock(&vnode->cb_lock); 13831143d5dSDavid Howells i_size = i_size_read(&vnode->vfs_inode); 13931143d5dSDavid Howells if (maybe_i_size > i_size) 14031143d5dSDavid Howells i_size_write(&vnode->vfs_inode, maybe_i_size); 1411f32ef79SDavid Howells write_sequnlock(&vnode->cb_lock); 14231143d5dSDavid Howells } 14331143d5dSDavid Howells 144f792e3acSDavid Howells if (PagePrivate(page)) { 145f792e3acSDavid Howells priv = page_private(page); 14667d78a6fSDavid Howells f = afs_page_dirty_from(page, priv); 14767d78a6fSDavid Howells t = afs_page_dirty_to(page, priv); 148f792e3acSDavid Howells if (from < f) 149f792e3acSDavid Howells f = from; 150f792e3acSDavid Howells if (to > t) 151f792e3acSDavid Howells t = to; 15267d78a6fSDavid Howells priv = afs_page_dirty(page, f, t); 153f792e3acSDavid Howells set_page_private(page, priv); 15467d78a6fSDavid Howells trace_afs_page_dirty(vnode, tracepoint_string("dirty+"), page); 155f792e3acSDavid Howells } else { 15667d78a6fSDavid Howells priv = afs_page_dirty(page, from, to); 157f792e3acSDavid Howells attach_page_private(page, (void *)priv); 15867d78a6fSDavid Howells trace_afs_page_dirty(vnode, tracepoint_string("dirty"), page); 159f792e3acSDavid Howells } 160f792e3acSDavid Howells 161e87b03f5SDavid Howells if (set_page_dirty(page)) 162e87b03f5SDavid Howells _debug("dirtied %lx", page->index); 163afae457dSDavid Howells 164afae457dSDavid Howells out: 16515b4650eSNick Piggin unlock_page(page); 16609cbfeafSKirill A. Shutemov put_page(page); 1673003bbd0SDavid Howells return copied; 16831143d5dSDavid Howells } 16931143d5dSDavid Howells 17031143d5dSDavid Howells /* 17131143d5dSDavid Howells * kill all the pages in the given range 17231143d5dSDavid Howells */ 1734343d008SDavid Howells static void afs_kill_pages(struct address_space *mapping, 174e87b03f5SDavid Howells loff_t start, loff_t len) 17531143d5dSDavid Howells { 1764343d008SDavid Howells struct afs_vnode *vnode = AFS_FS_I(mapping->host); 17731143d5dSDavid Howells struct pagevec pv; 178e87b03f5SDavid Howells unsigned int loop, psize; 17931143d5dSDavid Howells 180e87b03f5SDavid Howells _enter("{%llx:%llu},%llx @%llx", 181e87b03f5SDavid Howells vnode->fid.vid, vnode->fid.vnode, len, start); 18231143d5dSDavid Howells 18386679820SMel Gorman pagevec_init(&pv); 18431143d5dSDavid Howells 18531143d5dSDavid Howells do { 186e87b03f5SDavid Howells _debug("kill %llx @%llx", len, start); 18731143d5dSDavid Howells 188e87b03f5SDavid Howells pv.nr = find_get_pages_contig(mapping, start / PAGE_SIZE, 189e87b03f5SDavid Howells PAGEVEC_SIZE, pv.pages); 190e87b03f5SDavid Howells if (pv.nr == 0) 191e87b03f5SDavid Howells break; 19231143d5dSDavid Howells 193e87b03f5SDavid Howells for (loop = 0; loop < pv.nr; loop++) { 1947286a35eSDavid Howells struct page *page = pv.pages[loop]; 195e87b03f5SDavid Howells 196e87b03f5SDavid Howells if (page->index * PAGE_SIZE >= start + len) 197e87b03f5SDavid Howells break; 198e87b03f5SDavid Howells 199e87b03f5SDavid Howells psize = thp_size(page); 200e87b03f5SDavid Howells start += psize; 201e87b03f5SDavid Howells len -= psize; 2027286a35eSDavid Howells ClearPageUptodate(page); 2034343d008SDavid Howells end_page_writeback(page); 2044343d008SDavid Howells lock_page(page); 2054343d008SDavid Howells generic_error_remove_page(mapping, page); 20621bd68f1SMarc Dionne unlock_page(page); 2074343d008SDavid Howells } 2084343d008SDavid Howells 2094343d008SDavid Howells __pagevec_release(&pv); 210e87b03f5SDavid Howells } while (len > 0); 2114343d008SDavid Howells 2124343d008SDavid Howells _leave(""); 2134343d008SDavid Howells } 2144343d008SDavid Howells 2154343d008SDavid Howells /* 2164343d008SDavid Howells * Redirty all the pages in a given range. 2174343d008SDavid Howells */ 2184343d008SDavid Howells static void afs_redirty_pages(struct writeback_control *wbc, 2194343d008SDavid Howells struct address_space *mapping, 220e87b03f5SDavid Howells loff_t start, loff_t len) 2214343d008SDavid Howells { 2224343d008SDavid Howells struct afs_vnode *vnode = AFS_FS_I(mapping->host); 2234343d008SDavid Howells struct pagevec pv; 224e87b03f5SDavid Howells unsigned int loop, psize; 2254343d008SDavid Howells 226e87b03f5SDavid Howells _enter("{%llx:%llu},%llx @%llx", 227e87b03f5SDavid Howells vnode->fid.vid, vnode->fid.vnode, len, start); 2284343d008SDavid Howells 229487e2c9fSLinus Torvalds pagevec_init(&pv); 2304343d008SDavid Howells 2314343d008SDavid Howells do { 232e87b03f5SDavid Howells _debug("redirty %llx @%llx", len, start); 2334343d008SDavid Howells 234e87b03f5SDavid Howells pv.nr = find_get_pages_contig(mapping, start / PAGE_SIZE, 235e87b03f5SDavid Howells PAGEVEC_SIZE, pv.pages); 236e87b03f5SDavid Howells if (pv.nr == 0) 237e87b03f5SDavid Howells break; 2384343d008SDavid Howells 239e87b03f5SDavid Howells for (loop = 0; loop < pv.nr; loop++) { 2404343d008SDavid Howells struct page *page = pv.pages[loop]; 2414343d008SDavid Howells 242e87b03f5SDavid Howells if (page->index * PAGE_SIZE >= start + len) 243e87b03f5SDavid Howells break; 244e87b03f5SDavid Howells 245e87b03f5SDavid Howells psize = thp_size(page); 246e87b03f5SDavid Howells start += psize; 247e87b03f5SDavid Howells len -= psize; 2484343d008SDavid Howells redirty_page_for_writepage(wbc, page); 2497286a35eSDavid Howells end_page_writeback(page); 25031143d5dSDavid Howells } 25131143d5dSDavid Howells 25231143d5dSDavid Howells __pagevec_release(&pv); 253e87b03f5SDavid Howells } while (len > 0); 25431143d5dSDavid Howells 25531143d5dSDavid Howells _leave(""); 25631143d5dSDavid Howells } 25731143d5dSDavid Howells 25831143d5dSDavid Howells /* 259a58823acSDavid Howells * completion of write to server 260a58823acSDavid Howells */ 261e87b03f5SDavid Howells static void afs_pages_written_back(struct afs_vnode *vnode, loff_t start, unsigned int len) 262a58823acSDavid Howells { 263bd80d8a8SDavid Howells struct address_space *mapping = vnode->vfs_inode.i_mapping; 264bd80d8a8SDavid Howells struct page *page; 265e87b03f5SDavid Howells pgoff_t end; 266bd80d8a8SDavid Howells 267e87b03f5SDavid Howells XA_STATE(xas, &mapping->i_pages, start / PAGE_SIZE); 268a58823acSDavid Howells 269e87b03f5SDavid Howells _enter("{%llx:%llu},{%x @%llx}", 270e87b03f5SDavid Howells vnode->fid.vid, vnode->fid.vnode, len, start); 271a58823acSDavid Howells 272bd80d8a8SDavid Howells rcu_read_lock(); 273a58823acSDavid Howells 274e87b03f5SDavid Howells end = (start + len - 1) / PAGE_SIZE; 275e87b03f5SDavid Howells xas_for_each(&xas, page, end) { 276e87b03f5SDavid Howells if (!PageWriteback(page)) { 277e87b03f5SDavid Howells kdebug("bad %x @%llx page %lx %lx", len, start, page->index, end); 278bd80d8a8SDavid Howells ASSERT(PageWriteback(page)); 279e87b03f5SDavid Howells } 280a58823acSDavid Howells 281bd80d8a8SDavid Howells trace_afs_page_dirty(vnode, tracepoint_string("clear"), page); 282e87b03f5SDavid Howells detach_page_private(page); 283bd80d8a8SDavid Howells page_endio(page, true, 0); 284a58823acSDavid Howells } 285bd80d8a8SDavid Howells 286bd80d8a8SDavid Howells rcu_read_unlock(); 287a58823acSDavid Howells 288a58823acSDavid Howells afs_prune_wb_keys(vnode); 289a58823acSDavid Howells _leave(""); 290a58823acSDavid Howells } 291a58823acSDavid Howells 292a58823acSDavid Howells /* 293e49c7b2fSDavid Howells * Find a key to use for the writeback. We cached the keys used to author the 294e49c7b2fSDavid Howells * writes on the vnode. *_wbk will contain the last writeback key used or NULL 295e49c7b2fSDavid Howells * and we need to start from there if it's set. 296e49c7b2fSDavid Howells */ 297e49c7b2fSDavid Howells static int afs_get_writeback_key(struct afs_vnode *vnode, 298e49c7b2fSDavid Howells struct afs_wb_key **_wbk) 299e49c7b2fSDavid Howells { 300e49c7b2fSDavid Howells struct afs_wb_key *wbk = NULL; 301e49c7b2fSDavid Howells struct list_head *p; 302e49c7b2fSDavid Howells int ret = -ENOKEY, ret2; 303e49c7b2fSDavid Howells 304e49c7b2fSDavid Howells spin_lock(&vnode->wb_lock); 305e49c7b2fSDavid Howells if (*_wbk) 306e49c7b2fSDavid Howells p = (*_wbk)->vnode_link.next; 307e49c7b2fSDavid Howells else 308e49c7b2fSDavid Howells p = vnode->wb_keys.next; 309e49c7b2fSDavid Howells 310e49c7b2fSDavid Howells while (p != &vnode->wb_keys) { 311e49c7b2fSDavid Howells wbk = list_entry(p, struct afs_wb_key, vnode_link); 312e49c7b2fSDavid Howells _debug("wbk %u", key_serial(wbk->key)); 313e49c7b2fSDavid Howells ret2 = key_validate(wbk->key); 314e49c7b2fSDavid Howells if (ret2 == 0) { 315e49c7b2fSDavid Howells refcount_inc(&wbk->usage); 316e49c7b2fSDavid Howells _debug("USE WB KEY %u", key_serial(wbk->key)); 317e49c7b2fSDavid Howells break; 318e49c7b2fSDavid Howells } 319e49c7b2fSDavid Howells 320e49c7b2fSDavid Howells wbk = NULL; 321e49c7b2fSDavid Howells if (ret == -ENOKEY) 322e49c7b2fSDavid Howells ret = ret2; 323e49c7b2fSDavid Howells p = p->next; 324e49c7b2fSDavid Howells } 325e49c7b2fSDavid Howells 326e49c7b2fSDavid Howells spin_unlock(&vnode->wb_lock); 327e49c7b2fSDavid Howells if (*_wbk) 328e49c7b2fSDavid Howells afs_put_wb_key(*_wbk); 329e49c7b2fSDavid Howells *_wbk = wbk; 330e49c7b2fSDavid Howells return 0; 331e49c7b2fSDavid Howells } 332e49c7b2fSDavid Howells 333e49c7b2fSDavid Howells static void afs_store_data_success(struct afs_operation *op) 334e49c7b2fSDavid Howells { 335e49c7b2fSDavid Howells struct afs_vnode *vnode = op->file[0].vnode; 336e49c7b2fSDavid Howells 337da8d0755SDavid Howells op->ctime = op->file[0].scb.status.mtime_client; 338e49c7b2fSDavid Howells afs_vnode_commit_status(op, &op->file[0]); 339e49c7b2fSDavid Howells if (op->error == 0) { 340d383e346SDavid Howells if (!op->store.laundering) 341e87b03f5SDavid Howells afs_pages_written_back(vnode, op->store.pos, op->store.size); 342e49c7b2fSDavid Howells afs_stat_v(vnode, n_stores); 343bd80d8a8SDavid Howells atomic_long_add(op->store.size, &afs_v2net(vnode)->n_store_bytes); 344e49c7b2fSDavid Howells } 345e49c7b2fSDavid Howells } 346e49c7b2fSDavid Howells 347e49c7b2fSDavid Howells static const struct afs_operation_ops afs_store_data_operation = { 348e49c7b2fSDavid Howells .issue_afs_rpc = afs_fs_store_data, 349e49c7b2fSDavid Howells .issue_yfs_rpc = yfs_fs_store_data, 350e49c7b2fSDavid Howells .success = afs_store_data_success, 351e49c7b2fSDavid Howells }; 352e49c7b2fSDavid Howells 353e49c7b2fSDavid Howells /* 354d2ddc776SDavid Howells * write to a file 35531143d5dSDavid Howells */ 356e87b03f5SDavid Howells static int afs_store_data(struct afs_vnode *vnode, struct iov_iter *iter, loff_t pos, 357bd80d8a8SDavid Howells bool laundering) 35831143d5dSDavid Howells { 359e49c7b2fSDavid Howells struct afs_operation *op; 3604343d008SDavid Howells struct afs_wb_key *wbk = NULL; 361bd80d8a8SDavid Howells loff_t size = iov_iter_count(iter), i_size; 362bd80d8a8SDavid Howells int ret = -ENOKEY; 363d2ddc776SDavid Howells 364bd80d8a8SDavid Howells _enter("%s{%llx:%llu.%u},%llx,%llx", 365d2ddc776SDavid Howells vnode->volume->name, 366d2ddc776SDavid Howells vnode->fid.vid, 367d2ddc776SDavid Howells vnode->fid.vnode, 368d2ddc776SDavid Howells vnode->fid.unique, 369bd80d8a8SDavid Howells size, pos); 370d2ddc776SDavid Howells 371e49c7b2fSDavid Howells ret = afs_get_writeback_key(vnode, &wbk); 372e49c7b2fSDavid Howells if (ret) { 3734343d008SDavid Howells _leave(" = %d [no keys]", ret); 3744343d008SDavid Howells return ret; 375d2ddc776SDavid Howells } 376d2ddc776SDavid Howells 377e49c7b2fSDavid Howells op = afs_alloc_operation(wbk->key, vnode->volume); 378e49c7b2fSDavid Howells if (IS_ERR(op)) { 379e49c7b2fSDavid Howells afs_put_wb_key(wbk); 380e49c7b2fSDavid Howells return -ENOMEM; 381d2ddc776SDavid Howells } 382d2ddc776SDavid Howells 383bd80d8a8SDavid Howells i_size = i_size_read(&vnode->vfs_inode); 384bd80d8a8SDavid Howells 385e49c7b2fSDavid Howells afs_op_set_vnode(op, 0, vnode); 386e49c7b2fSDavid Howells op->file[0].dv_delta = 1; 38722650f14SDavid Howells op->file[0].modification = true; 388bd80d8a8SDavid Howells op->store.write_iter = iter; 389bd80d8a8SDavid Howells op->store.pos = pos; 390bd80d8a8SDavid Howells op->store.size = size; 391bd80d8a8SDavid Howells op->store.i_size = max(pos + size, i_size); 392d383e346SDavid Howells op->store.laundering = laundering; 393b3597945SDavid Howells op->mtime = vnode->vfs_inode.i_mtime; 394811f04baSDavid Howells op->flags |= AFS_OPERATION_UNINTR; 395e49c7b2fSDavid Howells op->ops = &afs_store_data_operation; 396e49c7b2fSDavid Howells 397e49c7b2fSDavid Howells try_next_key: 398e49c7b2fSDavid Howells afs_begin_vnode_operation(op); 399e49c7b2fSDavid Howells afs_wait_for_operation(op); 400e49c7b2fSDavid Howells 401e49c7b2fSDavid Howells switch (op->error) { 4024343d008SDavid Howells case -EACCES: 4034343d008SDavid Howells case -EPERM: 4044343d008SDavid Howells case -ENOKEY: 4054343d008SDavid Howells case -EKEYEXPIRED: 4064343d008SDavid Howells case -EKEYREJECTED: 4074343d008SDavid Howells case -EKEYREVOKED: 4084343d008SDavid Howells _debug("next"); 409e49c7b2fSDavid Howells 410e49c7b2fSDavid Howells ret = afs_get_writeback_key(vnode, &wbk); 411e49c7b2fSDavid Howells if (ret == 0) { 412e49c7b2fSDavid Howells key_put(op->key); 413e49c7b2fSDavid Howells op->key = key_get(wbk->key); 4144343d008SDavid Howells goto try_next_key; 4154343d008SDavid Howells } 416e49c7b2fSDavid Howells break; 417e49c7b2fSDavid Howells } 4184343d008SDavid Howells 4194343d008SDavid Howells afs_put_wb_key(wbk); 420e49c7b2fSDavid Howells _leave(" = %d", op->error); 421e49c7b2fSDavid Howells return afs_put_operation(op); 422d2ddc776SDavid Howells } 423d2ddc776SDavid Howells 424d2ddc776SDavid Howells /* 425810caa3eSDavid Howells * Extend the region to be written back to include subsequent contiguously 426810caa3eSDavid Howells * dirty pages if possible, but don't sleep while doing so. 427810caa3eSDavid Howells * 428810caa3eSDavid Howells * If this page holds new content, then we can include filler zeros in the 429810caa3eSDavid Howells * writeback. 43031143d5dSDavid Howells */ 431810caa3eSDavid Howells static void afs_extend_writeback(struct address_space *mapping, 432810caa3eSDavid Howells struct afs_vnode *vnode, 433810caa3eSDavid Howells long *_count, 434e87b03f5SDavid Howells loff_t start, 435e87b03f5SDavid Howells loff_t max_len, 436e87b03f5SDavid Howells bool new_content, 437e87b03f5SDavid Howells unsigned int *_len) 43831143d5dSDavid Howells { 439e87b03f5SDavid Howells struct pagevec pvec; 440e87b03f5SDavid Howells struct page *page; 441e87b03f5SDavid Howells unsigned long priv; 442e87b03f5SDavid Howells unsigned int psize, filler = 0; 443e87b03f5SDavid Howells unsigned int f, t; 444e87b03f5SDavid Howells loff_t len = *_len; 445e87b03f5SDavid Howells pgoff_t index = (start + len) / PAGE_SIZE; 446e87b03f5SDavid Howells bool stop = true; 447e87b03f5SDavid Howells unsigned int i; 4484343d008SDavid Howells 449e87b03f5SDavid Howells XA_STATE(xas, &mapping->i_pages, index); 450e87b03f5SDavid Howells pagevec_init(&pvec); 451e87b03f5SDavid Howells 45231143d5dSDavid Howells do { 453e87b03f5SDavid Howells /* Firstly, we gather up a batch of contiguous dirty pages 454e87b03f5SDavid Howells * under the RCU read lock - but we can't clear the dirty flags 455e87b03f5SDavid Howells * there if any of those pages are mapped. 456e87b03f5SDavid Howells */ 457e87b03f5SDavid Howells rcu_read_lock(); 458e87b03f5SDavid Howells 459e87b03f5SDavid Howells xas_for_each(&xas, page, ULONG_MAX) { 460e87b03f5SDavid Howells stop = true; 461e87b03f5SDavid Howells if (xas_retry(&xas, page)) 462e87b03f5SDavid Howells continue; 463e87b03f5SDavid Howells if (xa_is_value(page)) 464e87b03f5SDavid Howells break; 465e87b03f5SDavid Howells if (page->index != index) 466e87b03f5SDavid Howells break; 467e87b03f5SDavid Howells 468e87b03f5SDavid Howells if (!page_cache_get_speculative(page)) { 469e87b03f5SDavid Howells xas_reset(&xas); 470e87b03f5SDavid Howells continue; 47131143d5dSDavid Howells } 47231143d5dSDavid Howells 473e87b03f5SDavid Howells /* Has the page moved or been split? */ 474e87b03f5SDavid Howells if (unlikely(page != xas_reload(&xas))) 4755a813276SDavid Howells break; 476e87b03f5SDavid Howells 477529ae9aaSNick Piggin if (!trylock_page(page)) 47831143d5dSDavid Howells break; 4794343d008SDavid Howells if (!PageDirty(page) || PageWriteback(page)) { 48031143d5dSDavid Howells unlock_page(page); 48131143d5dSDavid Howells break; 48231143d5dSDavid Howells } 4834343d008SDavid Howells 484e87b03f5SDavid Howells psize = thp_size(page); 4854343d008SDavid Howells priv = page_private(page); 48667d78a6fSDavid Howells f = afs_page_dirty_from(page, priv); 48767d78a6fSDavid Howells t = afs_page_dirty_to(page, priv); 488810caa3eSDavid Howells if (f != 0 && !new_content) { 4894343d008SDavid Howells unlock_page(page); 4904343d008SDavid Howells break; 4914343d008SDavid Howells } 4924343d008SDavid Howells 493e87b03f5SDavid Howells len += filler + t; 494e87b03f5SDavid Howells filler = psize - t; 495e87b03f5SDavid Howells if (len >= max_len || *_count <= 0) 496e87b03f5SDavid Howells stop = true; 497e87b03f5SDavid Howells else if (t == psize || new_content) 498e87b03f5SDavid Howells stop = false; 499e87b03f5SDavid Howells 500e87b03f5SDavid Howells index += thp_nr_pages(page); 501e87b03f5SDavid Howells if (!pagevec_add(&pvec, page)) 502e87b03f5SDavid Howells break; 503e87b03f5SDavid Howells if (stop) 504e87b03f5SDavid Howells break; 505e87b03f5SDavid Howells } 506e87b03f5SDavid Howells 507e87b03f5SDavid Howells if (!stop) 508e87b03f5SDavid Howells xas_pause(&xas); 509e87b03f5SDavid Howells rcu_read_unlock(); 510e87b03f5SDavid Howells 511e87b03f5SDavid Howells /* Now, if we obtained any pages, we can shift them to being 512e87b03f5SDavid Howells * writable and mark them for caching. 513e87b03f5SDavid Howells */ 514e87b03f5SDavid Howells if (!pagevec_count(&pvec)) 515e87b03f5SDavid Howells break; 516e87b03f5SDavid Howells 517e87b03f5SDavid Howells for (i = 0; i < pagevec_count(&pvec); i++) { 518e87b03f5SDavid Howells page = pvec.pages[i]; 51967d78a6fSDavid Howells trace_afs_page_dirty(vnode, tracepoint_string("store+"), page); 52013524ab3SDavid Howells 52131143d5dSDavid Howells if (!clear_page_dirty_for_io(page)) 52231143d5dSDavid Howells BUG(); 52331143d5dSDavid Howells if (test_set_page_writeback(page)) 52431143d5dSDavid Howells BUG(); 525e87b03f5SDavid Howells 526e87b03f5SDavid Howells *_count -= thp_nr_pages(page); 52731143d5dSDavid Howells unlock_page(page); 52831143d5dSDavid Howells } 52931143d5dSDavid Howells 530e87b03f5SDavid Howells pagevec_release(&pvec); 531e87b03f5SDavid Howells cond_resched(); 532e87b03f5SDavid Howells } while (!stop); 53331143d5dSDavid Howells 534e87b03f5SDavid Howells *_len = len; 535810caa3eSDavid Howells } 536810caa3eSDavid Howells 537810caa3eSDavid Howells /* 538810caa3eSDavid Howells * Synchronously write back the locked page and any subsequent non-locked dirty 539810caa3eSDavid Howells * pages. 540810caa3eSDavid Howells */ 541e87b03f5SDavid Howells static ssize_t afs_write_back_from_locked_page(struct address_space *mapping, 542810caa3eSDavid Howells struct writeback_control *wbc, 543e87b03f5SDavid Howells struct page *page, 544e87b03f5SDavid Howells loff_t start, loff_t end) 545810caa3eSDavid Howells { 546810caa3eSDavid Howells struct afs_vnode *vnode = AFS_FS_I(mapping->host); 547810caa3eSDavid Howells struct iov_iter iter; 548e87b03f5SDavid Howells unsigned long priv; 549e87b03f5SDavid Howells unsigned int offset, to, len, max_len; 550e87b03f5SDavid Howells loff_t i_size = i_size_read(&vnode->vfs_inode); 551810caa3eSDavid Howells bool new_content = test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags); 552e87b03f5SDavid Howells long count = wbc->nr_to_write; 553810caa3eSDavid Howells int ret; 554810caa3eSDavid Howells 555e87b03f5SDavid Howells _enter(",%lx,%llx-%llx", page->index, start, end); 556810caa3eSDavid Howells 557e87b03f5SDavid Howells if (test_set_page_writeback(page)) 558810caa3eSDavid Howells BUG(); 559810caa3eSDavid Howells 560e87b03f5SDavid Howells count -= thp_nr_pages(page); 561e87b03f5SDavid Howells 562810caa3eSDavid Howells /* Find all consecutive lockable dirty pages that have contiguous 563810caa3eSDavid Howells * written regions, stopping when we find a page that is not 564810caa3eSDavid Howells * immediately lockable, is not dirty or is missing, or we reach the 565810caa3eSDavid Howells * end of the range. 566810caa3eSDavid Howells */ 567e87b03f5SDavid Howells priv = page_private(page); 568e87b03f5SDavid Howells offset = afs_page_dirty_from(page, priv); 569e87b03f5SDavid Howells to = afs_page_dirty_to(page, priv); 570e87b03f5SDavid Howells trace_afs_page_dirty(vnode, tracepoint_string("store"), page); 571810caa3eSDavid Howells 572e87b03f5SDavid Howells len = to - offset; 573e87b03f5SDavid Howells start += offset; 574e87b03f5SDavid Howells if (start < i_size) { 575e87b03f5SDavid Howells /* Trim the write to the EOF; the extra data is ignored. Also 576e87b03f5SDavid Howells * put an upper limit on the size of a single storedata op. 577e87b03f5SDavid Howells */ 578e87b03f5SDavid Howells max_len = 65536 * 4096; 579e87b03f5SDavid Howells max_len = min_t(unsigned long long, max_len, end - start + 1); 580e87b03f5SDavid Howells max_len = min_t(unsigned long long, max_len, i_size - start); 581810caa3eSDavid Howells 582e87b03f5SDavid Howells if (len < max_len && 583e87b03f5SDavid Howells (to == thp_size(page) || new_content)) 584e87b03f5SDavid Howells afs_extend_writeback(mapping, vnode, &count, 585e87b03f5SDavid Howells start, max_len, new_content, &len); 586e87b03f5SDavid Howells len = min_t(loff_t, len, max_len); 587e87b03f5SDavid Howells } 588810caa3eSDavid Howells 5894343d008SDavid Howells /* We now have a contiguous set of dirty pages, each with writeback 5904343d008SDavid Howells * set; the first page is still locked at this point, but all the rest 5914343d008SDavid Howells * have been unlocked. 5924343d008SDavid Howells */ 593e87b03f5SDavid Howells unlock_page(page); 5944343d008SDavid Howells 595e87b03f5SDavid Howells if (start < i_size) { 596e87b03f5SDavid Howells _debug("write back %x @%llx [%llx]", len, start, i_size); 59731143d5dSDavid Howells 598e87b03f5SDavid Howells iov_iter_xarray(&iter, WRITE, &mapping->i_pages, start, len); 599e87b03f5SDavid Howells ret = afs_store_data(vnode, &iter, start, false); 600bd80d8a8SDavid Howells } else { 601e87b03f5SDavid Howells _debug("write discard %x @%llx [%llx]", len, start, i_size); 602e87b03f5SDavid Howells 603bd80d8a8SDavid Howells /* The dirty region was entirely beyond the EOF. */ 604e87b03f5SDavid Howells afs_pages_written_back(vnode, start, len); 605bd80d8a8SDavid Howells ret = 0; 606bd80d8a8SDavid Howells } 607bd80d8a8SDavid Howells 60831143d5dSDavid Howells switch (ret) { 6094343d008SDavid Howells case 0: 610e87b03f5SDavid Howells wbc->nr_to_write = count; 611e87b03f5SDavid Howells ret = len; 6124343d008SDavid Howells break; 6134343d008SDavid Howells 6144343d008SDavid Howells default: 6154343d008SDavid Howells pr_notice("kAFS: Unexpected error from FS.StoreData %d\n", ret); 616df561f66SGustavo A. R. Silva fallthrough; 6174343d008SDavid Howells case -EACCES: 6184343d008SDavid Howells case -EPERM: 6194343d008SDavid Howells case -ENOKEY: 6204343d008SDavid Howells case -EKEYEXPIRED: 6214343d008SDavid Howells case -EKEYREJECTED: 6224343d008SDavid Howells case -EKEYREVOKED: 623e87b03f5SDavid Howells afs_redirty_pages(wbc, mapping, start, len); 6244343d008SDavid Howells mapping_set_error(mapping, ret); 6254343d008SDavid Howells break; 6264343d008SDavid Howells 62731143d5dSDavid Howells case -EDQUOT: 62831143d5dSDavid Howells case -ENOSPC: 629e87b03f5SDavid Howells afs_redirty_pages(wbc, mapping, start, len); 6304343d008SDavid Howells mapping_set_error(mapping, -ENOSPC); 63131143d5dSDavid Howells break; 6324343d008SDavid Howells 63331143d5dSDavid Howells case -EROFS: 63431143d5dSDavid Howells case -EIO: 63531143d5dSDavid Howells case -EREMOTEIO: 63631143d5dSDavid Howells case -EFBIG: 63731143d5dSDavid Howells case -ENOENT: 63831143d5dSDavid Howells case -ENOMEDIUM: 63931143d5dSDavid Howells case -ENXIO: 640f51375cdSDavid Howells trace_afs_file_error(vnode, ret, afs_file_error_writeback_fail); 641e87b03f5SDavid Howells afs_kill_pages(mapping, start, len); 6424343d008SDavid Howells mapping_set_error(mapping, ret); 64331143d5dSDavid Howells break; 64431143d5dSDavid Howells } 64531143d5dSDavid Howells 64631143d5dSDavid Howells _leave(" = %d", ret); 64731143d5dSDavid Howells return ret; 64831143d5dSDavid Howells } 64931143d5dSDavid Howells 65031143d5dSDavid Howells /* 65131143d5dSDavid Howells * write a page back to the server 65231143d5dSDavid Howells * - the caller locked the page for us 65331143d5dSDavid Howells */ 65431143d5dSDavid Howells int afs_writepage(struct page *page, struct writeback_control *wbc) 65531143d5dSDavid Howells { 656e87b03f5SDavid Howells ssize_t ret; 657e87b03f5SDavid Howells loff_t start; 65831143d5dSDavid Howells 65931143d5dSDavid Howells _enter("{%lx},", page->index); 66031143d5dSDavid Howells 661e87b03f5SDavid Howells start = page->index * PAGE_SIZE; 6624343d008SDavid Howells ret = afs_write_back_from_locked_page(page->mapping, wbc, page, 663e87b03f5SDavid Howells start, LLONG_MAX - start); 66431143d5dSDavid Howells if (ret < 0) { 665e87b03f5SDavid Howells _leave(" = %zd", ret); 666e87b03f5SDavid Howells return ret; 66731143d5dSDavid Howells } 66831143d5dSDavid Howells 66931143d5dSDavid Howells _leave(" = 0"); 67031143d5dSDavid Howells return 0; 67131143d5dSDavid Howells } 67231143d5dSDavid Howells 67331143d5dSDavid Howells /* 67431143d5dSDavid Howells * write a region of pages back to the server 67531143d5dSDavid Howells */ 676c1206a2cSAdrian Bunk static int afs_writepages_region(struct address_space *mapping, 67731143d5dSDavid Howells struct writeback_control *wbc, 678e87b03f5SDavid Howells loff_t start, loff_t end, loff_t *_next) 67931143d5dSDavid Howells { 68031143d5dSDavid Howells struct page *page; 681e87b03f5SDavid Howells ssize_t ret; 682e87b03f5SDavid Howells int n; 68331143d5dSDavid Howells 684e87b03f5SDavid Howells _enter("%llx,%llx,", start, end); 68531143d5dSDavid Howells 68631143d5dSDavid Howells do { 687e87b03f5SDavid Howells pgoff_t index = start / PAGE_SIZE; 688e87b03f5SDavid Howells 689e87b03f5SDavid Howells n = find_get_pages_range_tag(mapping, &index, end / PAGE_SIZE, 690aef6e415SJan Kara PAGECACHE_TAG_DIRTY, 1, &page); 69131143d5dSDavid Howells if (!n) 69231143d5dSDavid Howells break; 69331143d5dSDavid Howells 694e87b03f5SDavid Howells start = (loff_t)page->index * PAGE_SIZE; /* May regress with THPs */ 695e87b03f5SDavid Howells 69631143d5dSDavid Howells _debug("wback %lx", page->index); 69731143d5dSDavid Howells 698e87b03f5SDavid Howells /* At this point we hold neither the i_pages lock nor the 699b93b0163SMatthew Wilcox * page lock: the page may be truncated or invalidated 700b93b0163SMatthew Wilcox * (changing page->mapping to NULL), or even swizzled 701b93b0163SMatthew Wilcox * back from swapper_space to tmpfs file mapping 70231143d5dSDavid Howells */ 703e87b03f5SDavid Howells if (wbc->sync_mode != WB_SYNC_NONE) { 7044343d008SDavid Howells ret = lock_page_killable(page); 7054343d008SDavid Howells if (ret < 0) { 7064343d008SDavid Howells put_page(page); 7074343d008SDavid Howells return ret; 7084343d008SDavid Howells } 709e87b03f5SDavid Howells } else { 710e87b03f5SDavid Howells if (!trylock_page(page)) { 711e87b03f5SDavid Howells put_page(page); 712e87b03f5SDavid Howells return 0; 713e87b03f5SDavid Howells } 714e87b03f5SDavid Howells } 71531143d5dSDavid Howells 716c5051c7bSDavid Howells if (page->mapping != mapping || !PageDirty(page)) { 717e87b03f5SDavid Howells start += thp_size(page); 71831143d5dSDavid Howells unlock_page(page); 71909cbfeafSKirill A. Shutemov put_page(page); 72031143d5dSDavid Howells continue; 72131143d5dSDavid Howells } 72231143d5dSDavid Howells 723c5051c7bSDavid Howells if (PageWriteback(page)) { 724c5051c7bSDavid Howells unlock_page(page); 72531143d5dSDavid Howells if (wbc->sync_mode != WB_SYNC_NONE) 72631143d5dSDavid Howells wait_on_page_writeback(page); 72729c8bbbdSDavid Howells put_page(page); 72831143d5dSDavid Howells continue; 72931143d5dSDavid Howells } 73031143d5dSDavid Howells 73165a15109SDavid Howells if (!clear_page_dirty_for_io(page)) 73265a15109SDavid Howells BUG(); 733e87b03f5SDavid Howells ret = afs_write_back_from_locked_page(mapping, wbc, page, start, end); 73409cbfeafSKirill A. Shutemov put_page(page); 73531143d5dSDavid Howells if (ret < 0) { 736e87b03f5SDavid Howells _leave(" = %zd", ret); 73731143d5dSDavid Howells return ret; 73831143d5dSDavid Howells } 73931143d5dSDavid Howells 740dc255730SMarc Dionne start += ret; 74131143d5dSDavid Howells 74231143d5dSDavid Howells cond_resched(); 743e87b03f5SDavid Howells } while (wbc->nr_to_write > 0); 74431143d5dSDavid Howells 745e87b03f5SDavid Howells *_next = start; 746e87b03f5SDavid Howells _leave(" = 0 [%llx]", *_next); 74731143d5dSDavid Howells return 0; 74831143d5dSDavid Howells } 74931143d5dSDavid Howells 75031143d5dSDavid Howells /* 75131143d5dSDavid Howells * write some of the pending data back to the server 75231143d5dSDavid Howells */ 75331143d5dSDavid Howells int afs_writepages(struct address_space *mapping, 75431143d5dSDavid Howells struct writeback_control *wbc) 75531143d5dSDavid Howells { 756ec0fa0b6SDavid Howells struct afs_vnode *vnode = AFS_FS_I(mapping->host); 757e87b03f5SDavid Howells loff_t start, next; 75831143d5dSDavid Howells int ret; 75931143d5dSDavid Howells 76031143d5dSDavid Howells _enter(""); 76131143d5dSDavid Howells 762ec0fa0b6SDavid Howells /* We have to be careful as we can end up racing with setattr() 763ec0fa0b6SDavid Howells * truncating the pagecache since the caller doesn't take a lock here 764ec0fa0b6SDavid Howells * to prevent it. 765ec0fa0b6SDavid Howells */ 766ec0fa0b6SDavid Howells if (wbc->sync_mode == WB_SYNC_ALL) 767ec0fa0b6SDavid Howells down_read(&vnode->validate_lock); 768ec0fa0b6SDavid Howells else if (!down_read_trylock(&vnode->validate_lock)) 769ec0fa0b6SDavid Howells return 0; 770ec0fa0b6SDavid Howells 77131143d5dSDavid Howells if (wbc->range_cyclic) { 772e87b03f5SDavid Howells start = mapping->writeback_index * PAGE_SIZE; 773e87b03f5SDavid Howells ret = afs_writepages_region(mapping, wbc, start, LLONG_MAX, &next); 7741b430beeSWu Fengguang if (start > 0 && wbc->nr_to_write > 0 && ret == 0) 77531143d5dSDavid Howells ret = afs_writepages_region(mapping, wbc, 0, start, 77631143d5dSDavid Howells &next); 777e87b03f5SDavid Howells mapping->writeback_index = next / PAGE_SIZE; 77831143d5dSDavid Howells } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) { 779e87b03f5SDavid Howells ret = afs_writepages_region(mapping, wbc, 0, LLONG_MAX, &next); 78031143d5dSDavid Howells if (wbc->nr_to_write > 0) 78131143d5dSDavid Howells mapping->writeback_index = next; 78231143d5dSDavid Howells } else { 783e87b03f5SDavid Howells ret = afs_writepages_region(mapping, wbc, 784e87b03f5SDavid Howells wbc->range_start, wbc->range_end, &next); 78531143d5dSDavid Howells } 78631143d5dSDavid Howells 787ec0fa0b6SDavid Howells up_read(&vnode->validate_lock); 78831143d5dSDavid Howells _leave(" = %d", ret); 78931143d5dSDavid Howells return ret; 79031143d5dSDavid Howells } 79131143d5dSDavid Howells 79231143d5dSDavid Howells /* 79331143d5dSDavid Howells * write to an AFS file 79431143d5dSDavid Howells */ 79550b5551dSAl Viro ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from) 79631143d5dSDavid Howells { 797496ad9aaSAl Viro struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp)); 79831143d5dSDavid Howells ssize_t result; 79950b5551dSAl Viro size_t count = iov_iter_count(from); 80031143d5dSDavid Howells 8013b6492dfSDavid Howells _enter("{%llx:%llu},{%zu},", 80250b5551dSAl Viro vnode->fid.vid, vnode->fid.vnode, count); 80331143d5dSDavid Howells 80431143d5dSDavid Howells if (IS_SWAPFILE(&vnode->vfs_inode)) { 80531143d5dSDavid Howells printk(KERN_INFO 80631143d5dSDavid Howells "AFS: Attempt to write to active swap file!\n"); 80731143d5dSDavid Howells return -EBUSY; 80831143d5dSDavid Howells } 80931143d5dSDavid Howells 81031143d5dSDavid Howells if (!count) 81131143d5dSDavid Howells return 0; 81231143d5dSDavid Howells 81350b5551dSAl Viro result = generic_file_write_iter(iocb, from); 81431143d5dSDavid Howells 81531143d5dSDavid Howells _leave(" = %zd", result); 81631143d5dSDavid Howells return result; 81731143d5dSDavid Howells } 81831143d5dSDavid Howells 81931143d5dSDavid Howells /* 82031143d5dSDavid Howells * flush any dirty pages for this process, and check for write errors. 82131143d5dSDavid Howells * - the return status from this call provides a reliable indication of 82231143d5dSDavid Howells * whether any write errors occurred for this process. 82331143d5dSDavid Howells */ 82402c24a82SJosef Bacik int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync) 82531143d5dSDavid Howells { 8263c981bfcSAl Viro struct inode *inode = file_inode(file); 8273c981bfcSAl Viro struct afs_vnode *vnode = AFS_FS_I(inode); 82831143d5dSDavid Howells 8293b6492dfSDavid Howells _enter("{%llx:%llu},{n=%pD},%d", 8303c981bfcSAl Viro vnode->fid.vid, vnode->fid.vnode, file, 83131143d5dSDavid Howells datasync); 83231143d5dSDavid Howells 8334343d008SDavid Howells return file_write_and_wait_range(file, start, end); 83431143d5dSDavid Howells } 8359b3f26c9SDavid Howells 8369b3f26c9SDavid Howells /* 8379b3f26c9SDavid Howells * notification that a previously read-only page is about to become writable 8389b3f26c9SDavid Howells * - if it returns an error, the caller will deliver a bus error signal 8399b3f26c9SDavid Howells */ 8400722f186SSouptick Joarder vm_fault_t afs_page_mkwrite(struct vm_fault *vmf) 8419b3f26c9SDavid Howells { 842e87b03f5SDavid Howells struct page *page = thp_head(vmf->page); 8431cf7a151SDavid Howells struct file *file = vmf->vma->vm_file; 8441cf7a151SDavid Howells struct inode *inode = file_inode(file); 8451cf7a151SDavid Howells struct afs_vnode *vnode = AFS_FS_I(inode); 8461cf7a151SDavid Howells unsigned long priv; 8479b3f26c9SDavid Howells 848e87b03f5SDavid Howells _enter("{{%llx:%llu}},{%lx}", vnode->fid.vid, vnode->fid.vnode, page->index); 8499b3f26c9SDavid Howells 8501cf7a151SDavid Howells sb_start_pagefault(inode->i_sb); 8511cf7a151SDavid Howells 8521cf7a151SDavid Howells /* Wait for the page to be written to the cache before we allow it to 8531cf7a151SDavid Howells * be modified. We then assume the entire page will need writing back. 8541cf7a151SDavid Howells */ 855630f5ddaSDavid Howells #ifdef CONFIG_AFS_FSCACHE 856e87b03f5SDavid Howells if (PageFsCache(page) && 8575cbf0398SDavid Howells wait_on_page_fscache_killable(page) < 0) 858630f5ddaSDavid Howells return VM_FAULT_RETRY; 859630f5ddaSDavid Howells #endif 8609b3f26c9SDavid Howells 861e87b03f5SDavid Howells if (wait_on_page_writeback_killable(page)) 8621cf7a151SDavid Howells return VM_FAULT_RETRY; 8631cf7a151SDavid Howells 864e87b03f5SDavid Howells if (lock_page_killable(page) < 0) 8651cf7a151SDavid Howells return VM_FAULT_RETRY; 8661cf7a151SDavid Howells 8671cf7a151SDavid Howells /* We mustn't change page->private until writeback is complete as that 8681cf7a151SDavid Howells * details the portion of the page we need to write back and we might 8691cf7a151SDavid Howells * need to redirty the page if there's a problem. 8701cf7a151SDavid Howells */ 8715cbf0398SDavid Howells if (wait_on_page_writeback_killable(page) < 0) { 8725cbf0398SDavid Howells unlock_page(page); 8735cbf0398SDavid Howells return VM_FAULT_RETRY; 8745cbf0398SDavid Howells } 8751cf7a151SDavid Howells 876e87b03f5SDavid Howells priv = afs_page_dirty(page, 0, thp_size(page)); 877f86726a6SDavid Howells priv = afs_page_dirty_mmapped(priv); 878e87b03f5SDavid Howells if (PagePrivate(page)) { 879e87b03f5SDavid Howells set_page_private(page, priv); 880e87b03f5SDavid Howells trace_afs_page_dirty(vnode, tracepoint_string("mkwrite+"), page); 881e87b03f5SDavid Howells } else { 882e87b03f5SDavid Howells attach_page_private(page, (void *)priv); 883e87b03f5SDavid Howells trace_afs_page_dirty(vnode, tracepoint_string("mkwrite"), page); 884e87b03f5SDavid Howells } 885bb413489SDavid Howells file_update_time(file); 8861cf7a151SDavid Howells 8871cf7a151SDavid Howells sb_end_pagefault(inode->i_sb); 8881cf7a151SDavid Howells return VM_FAULT_LOCKED; 8899b3f26c9SDavid Howells } 8904343d008SDavid Howells 8914343d008SDavid Howells /* 8924343d008SDavid Howells * Prune the keys cached for writeback. The caller must hold vnode->wb_lock. 8934343d008SDavid Howells */ 8944343d008SDavid Howells void afs_prune_wb_keys(struct afs_vnode *vnode) 8954343d008SDavid Howells { 8964343d008SDavid Howells LIST_HEAD(graveyard); 8974343d008SDavid Howells struct afs_wb_key *wbk, *tmp; 8984343d008SDavid Howells 8994343d008SDavid Howells /* Discard unused keys */ 9004343d008SDavid Howells spin_lock(&vnode->wb_lock); 9014343d008SDavid Howells 9024343d008SDavid Howells if (!mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_WRITEBACK) && 9034343d008SDavid Howells !mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_DIRTY)) { 9044343d008SDavid Howells list_for_each_entry_safe(wbk, tmp, &vnode->wb_keys, vnode_link) { 9054343d008SDavid Howells if (refcount_read(&wbk->usage) == 1) 9064343d008SDavid Howells list_move(&wbk->vnode_link, &graveyard); 9074343d008SDavid Howells } 9084343d008SDavid Howells } 9094343d008SDavid Howells 9104343d008SDavid Howells spin_unlock(&vnode->wb_lock); 9114343d008SDavid Howells 9124343d008SDavid Howells while (!list_empty(&graveyard)) { 9134343d008SDavid Howells wbk = list_entry(graveyard.next, struct afs_wb_key, vnode_link); 9144343d008SDavid Howells list_del(&wbk->vnode_link); 9154343d008SDavid Howells afs_put_wb_key(wbk); 9164343d008SDavid Howells } 9174343d008SDavid Howells } 9184343d008SDavid Howells 9194343d008SDavid Howells /* 9204343d008SDavid Howells * Clean up a page during invalidation. 9214343d008SDavid Howells */ 9224343d008SDavid Howells int afs_launder_page(struct page *page) 9234343d008SDavid Howells { 9244343d008SDavid Howells struct address_space *mapping = page->mapping; 9254343d008SDavid Howells struct afs_vnode *vnode = AFS_FS_I(mapping->host); 926bd80d8a8SDavid Howells struct iov_iter iter; 927bd80d8a8SDavid Howells struct bio_vec bv[1]; 9284343d008SDavid Howells unsigned long priv; 9294343d008SDavid Howells unsigned int f, t; 9304343d008SDavid Howells int ret = 0; 9314343d008SDavid Howells 9324343d008SDavid Howells _enter("{%lx}", page->index); 9334343d008SDavid Howells 9344343d008SDavid Howells priv = page_private(page); 9354343d008SDavid Howells if (clear_page_dirty_for_io(page)) { 9364343d008SDavid Howells f = 0; 937e87b03f5SDavid Howells t = thp_size(page); 9384343d008SDavid Howells if (PagePrivate(page)) { 93967d78a6fSDavid Howells f = afs_page_dirty_from(page, priv); 94067d78a6fSDavid Howells t = afs_page_dirty_to(page, priv); 9414343d008SDavid Howells } 9424343d008SDavid Howells 943bd80d8a8SDavid Howells bv[0].bv_page = page; 944bd80d8a8SDavid Howells bv[0].bv_offset = f; 945bd80d8a8SDavid Howells bv[0].bv_len = t - f; 946bd80d8a8SDavid Howells iov_iter_bvec(&iter, WRITE, bv, 1, bv[0].bv_len); 947bd80d8a8SDavid Howells 94867d78a6fSDavid Howells trace_afs_page_dirty(vnode, tracepoint_string("launder"), page); 949e87b03f5SDavid Howells ret = afs_store_data(vnode, &iter, (loff_t)page->index * PAGE_SIZE, 950e87b03f5SDavid Howells true); 9514343d008SDavid Howells } 9524343d008SDavid Howells 95367d78a6fSDavid Howells trace_afs_page_dirty(vnode, tracepoint_string("laundered"), page); 954e87b03f5SDavid Howells detach_page_private(page); 955630f5ddaSDavid Howells wait_on_page_fscache(page); 9564343d008SDavid Howells return ret; 95731143d5dSDavid Howells } 958