12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later 231143d5dSDavid Howells /* handling of writes to regular files and writing back to the server 331143d5dSDavid Howells * 431143d5dSDavid Howells * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 531143d5dSDavid Howells * Written by David Howells (dhowells@redhat.com) 631143d5dSDavid Howells */ 74343d008SDavid Howells 84af3c9ccSAlexey Dobriyan #include <linux/backing-dev.h> 931143d5dSDavid Howells #include <linux/slab.h> 1031143d5dSDavid Howells #include <linux/fs.h> 1131143d5dSDavid Howells #include <linux/pagemap.h> 1231143d5dSDavid Howells #include <linux/writeback.h> 1331143d5dSDavid Howells #include <linux/pagevec.h> 1431143d5dSDavid Howells #include "internal.h" 1531143d5dSDavid Howells 1631143d5dSDavid Howells /* 1731143d5dSDavid Howells * mark a page as having been made dirty and thus needing writeback 1831143d5dSDavid Howells */ 1931143d5dSDavid Howells int afs_set_page_dirty(struct page *page) 2031143d5dSDavid Howells { 2131143d5dSDavid Howells _enter(""); 2231143d5dSDavid Howells return __set_page_dirty_nobuffers(page); 2331143d5dSDavid Howells } 2431143d5dSDavid Howells 2531143d5dSDavid Howells /* 26c4508464SDavid Howells * Handle completion of a read operation to fill a page. 27c4508464SDavid Howells */ 28c4508464SDavid Howells static void afs_fill_hole(struct afs_read *req) 29c4508464SDavid Howells { 30c4508464SDavid Howells if (iov_iter_count(req->iter) > 0) 31c4508464SDavid Howells /* The read was short - clear the excess buffer. */ 32c4508464SDavid Howells iov_iter_zero(iov_iter_count(req->iter), req->iter); 33c4508464SDavid Howells } 34c4508464SDavid Howells 35c4508464SDavid Howells /* 3631143d5dSDavid Howells * partly or wholly fill a page that's under preparation for writing 3731143d5dSDavid Howells */ 38c69bf479SDavid Howells static int afs_fill_page(struct file *file, 39e8e581a8SDavid Howells loff_t pos, unsigned int len, struct page *page) 4031143d5dSDavid Howells { 41c69bf479SDavid Howells struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); 42196ee9cdSDavid Howells struct afs_read *req; 432a0b4f64SDavid Howells size_t p; 442a0b4f64SDavid Howells void *data; 4531143d5dSDavid Howells int ret; 4631143d5dSDavid Howells 475e7f2337SAnton Blanchard _enter(",,%llu", (unsigned long long)pos); 4831143d5dSDavid Howells 492a0b4f64SDavid Howells if (pos >= vnode->vfs_inode.i_size) { 502a0b4f64SDavid Howells p = pos & ~PAGE_MASK; 512a0b4f64SDavid Howells ASSERTCMP(p + len, <=, PAGE_SIZE); 522a0b4f64SDavid Howells data = kmap(page); 532a0b4f64SDavid Howells memset(data + p, 0, len); 542a0b4f64SDavid Howells kunmap(page); 552a0b4f64SDavid Howells return 0; 562a0b4f64SDavid Howells } 572a0b4f64SDavid Howells 58c4508464SDavid Howells req = kzalloc(sizeof(struct afs_read), GFP_KERNEL); 59196ee9cdSDavid Howells if (!req) 60196ee9cdSDavid Howells return -ENOMEM; 61196ee9cdSDavid Howells 62f3ddee8dSDavid Howells refcount_set(&req->usage, 1); 63c4508464SDavid Howells req->vnode = vnode; 64c4508464SDavid Howells req->done = afs_fill_hole; 65c69bf479SDavid Howells req->key = key_get(afs_file_key(file)); 66196ee9cdSDavid Howells req->pos = pos; 67e8e581a8SDavid Howells req->len = len; 68196ee9cdSDavid Howells req->nr_pages = 1; 69c4508464SDavid Howells req->iter = &req->def_iter; 70c4508464SDavid Howells iov_iter_xarray(&req->def_iter, READ, &file->f_mapping->i_pages, pos, len); 71196ee9cdSDavid Howells 72c69bf479SDavid Howells ret = afs_fetch_data(vnode, req); 73196ee9cdSDavid Howells afs_put_read(req); 7431143d5dSDavid Howells if (ret < 0) { 7531143d5dSDavid Howells if (ret == -ENOENT) { 7631143d5dSDavid Howells _debug("got NOENT from server" 7731143d5dSDavid Howells " - marking file deleted and stale"); 7831143d5dSDavid Howells set_bit(AFS_VNODE_DELETED, &vnode->flags); 7931143d5dSDavid Howells ret = -ESTALE; 8031143d5dSDavid Howells } 8131143d5dSDavid Howells } 8231143d5dSDavid Howells 8331143d5dSDavid Howells _leave(" = %d", ret); 8431143d5dSDavid Howells return ret; 8531143d5dSDavid Howells } 8631143d5dSDavid Howells 8731143d5dSDavid Howells /* 8831143d5dSDavid Howells * prepare to perform part of a write to a page 8931143d5dSDavid Howells */ 9015b4650eSNick Piggin int afs_write_begin(struct file *file, struct address_space *mapping, 9115b4650eSNick Piggin loff_t pos, unsigned len, unsigned flags, 9221db2cdcSDavid Howells struct page **_page, void **fsdata) 9331143d5dSDavid Howells { 94496ad9aaSAl Viro struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); 9515b4650eSNick Piggin struct page *page; 964343d008SDavid Howells unsigned long priv; 97*e87b03f5SDavid Howells unsigned f, from; 98*e87b03f5SDavid Howells unsigned t, to; 99*e87b03f5SDavid Howells pgoff_t index; 10031143d5dSDavid Howells int ret; 10131143d5dSDavid Howells 102*e87b03f5SDavid Howells _enter("{%llx:%llu},%llx,%x", 103*e87b03f5SDavid Howells vnode->fid.vid, vnode->fid.vnode, pos, len); 10431143d5dSDavid Howells 105*e87b03f5SDavid Howells page = grab_cache_page_write_begin(mapping, pos / PAGE_SIZE, flags); 1064343d008SDavid Howells if (!page) 10715b4650eSNick Piggin return -ENOMEM; 10815b4650eSNick Piggin 10909cbfeafSKirill A. Shutemov if (!PageUptodate(page) && len != PAGE_SIZE) { 110c69bf479SDavid Howells ret = afs_fill_page(file, pos & PAGE_MASK, PAGE_SIZE, page); 11131143d5dSDavid Howells if (ret < 0) { 1126d06b0d2SDavid Howells unlock_page(page); 1136d06b0d2SDavid Howells put_page(page); 11431143d5dSDavid Howells _leave(" = %d [prep]", ret); 11531143d5dSDavid Howells return ret; 11631143d5dSDavid Howells } 11715b4650eSNick Piggin SetPageUptodate(page); 11831143d5dSDavid Howells } 11931143d5dSDavid Howells 120630f5ddaSDavid Howells #ifdef CONFIG_AFS_FSCACHE 121630f5ddaSDavid Howells wait_on_page_fscache(page); 122630f5ddaSDavid Howells #endif 123630f5ddaSDavid Howells 124*e87b03f5SDavid Howells index = page->index; 125*e87b03f5SDavid Howells from = pos - index * PAGE_SIZE; 126*e87b03f5SDavid Howells to = from + len; 127*e87b03f5SDavid Howells 12831143d5dSDavid Howells try_again: 1294343d008SDavid Howells /* See if this page is already partially written in a way that we can 1304343d008SDavid Howells * merge the new write with. 1314343d008SDavid Howells */ 1324343d008SDavid Howells if (PagePrivate(page)) { 1334343d008SDavid Howells priv = page_private(page); 13467d78a6fSDavid Howells f = afs_page_dirty_from(page, priv); 13567d78a6fSDavid Howells t = afs_page_dirty_to(page, priv); 1364343d008SDavid Howells ASSERTCMP(f, <=, t); 13731143d5dSDavid Howells 1385a039c32SDavid Howells if (PageWriteback(page)) { 13967d78a6fSDavid Howells trace_afs_page_dirty(vnode, tracepoint_string("alrdy"), page); 1405a039c32SDavid Howells goto flush_conflicting_write; 1415a039c32SDavid Howells } 1425a813276SDavid Howells /* If the file is being filled locally, allow inter-write 1435a813276SDavid Howells * spaces to be merged into writes. If it's not, only write 1445a813276SDavid Howells * back what the user gives us. 1455a813276SDavid Howells */ 1465a813276SDavid Howells if (!test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags) && 1475a813276SDavid Howells (to < f || from > t)) 1484343d008SDavid Howells goto flush_conflicting_write; 14931143d5dSDavid Howells } 15031143d5dSDavid Howells 15121db2cdcSDavid Howells *_page = page; 1524343d008SDavid Howells _leave(" = 0"); 15331143d5dSDavid Howells return 0; 15431143d5dSDavid Howells 1554343d008SDavid Howells /* The previous write and this write aren't adjacent or overlapping, so 1564343d008SDavid Howells * flush the page out. 1574343d008SDavid Howells */ 1584343d008SDavid Howells flush_conflicting_write: 15931143d5dSDavid Howells _debug("flush conflict"); 1604343d008SDavid Howells ret = write_one_page(page); 16121db2cdcSDavid Howells if (ret < 0) 16221db2cdcSDavid Howells goto error; 16331143d5dSDavid Howells 1644343d008SDavid Howells ret = lock_page_killable(page); 16521db2cdcSDavid Howells if (ret < 0) 16621db2cdcSDavid Howells goto error; 16721db2cdcSDavid Howells goto try_again; 16821db2cdcSDavid Howells 16921db2cdcSDavid Howells error: 17021db2cdcSDavid Howells put_page(page); 1714343d008SDavid Howells _leave(" = %d", ret); 1724343d008SDavid Howells return ret; 1734343d008SDavid Howells } 17431143d5dSDavid Howells 17531143d5dSDavid Howells /* 17631143d5dSDavid Howells * finalise part of a write to a page 17731143d5dSDavid Howells */ 17815b4650eSNick Piggin int afs_write_end(struct file *file, struct address_space *mapping, 17915b4650eSNick Piggin loff_t pos, unsigned len, unsigned copied, 18015b4650eSNick Piggin struct page *page, void *fsdata) 18131143d5dSDavid Howells { 182496ad9aaSAl Viro struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); 183f792e3acSDavid Howells unsigned long priv; 184*e87b03f5SDavid Howells unsigned int f, from = pos & (thp_size(page) - 1); 185f792e3acSDavid Howells unsigned int t, to = from + copied; 18631143d5dSDavid Howells loff_t i_size, maybe_i_size; 1873ad216eeSDavid Howells int ret = 0; 18831143d5dSDavid Howells 1893b6492dfSDavid Howells _enter("{%llx:%llu},{%lx}", 19015b4650eSNick Piggin vnode->fid.vid, vnode->fid.vnode, page->index); 19131143d5dSDavid Howells 1923ad216eeSDavid Howells if (copied == 0) 1933ad216eeSDavid Howells goto out; 1943ad216eeSDavid Howells 19515b4650eSNick Piggin maybe_i_size = pos + copied; 19631143d5dSDavid Howells 19731143d5dSDavid Howells i_size = i_size_read(&vnode->vfs_inode); 19831143d5dSDavid Howells if (maybe_i_size > i_size) { 1991f32ef79SDavid Howells write_seqlock(&vnode->cb_lock); 20031143d5dSDavid Howells i_size = i_size_read(&vnode->vfs_inode); 20131143d5dSDavid Howells if (maybe_i_size > i_size) 20231143d5dSDavid Howells i_size_write(&vnode->vfs_inode, maybe_i_size); 2031f32ef79SDavid Howells write_sequnlock(&vnode->cb_lock); 20431143d5dSDavid Howells } 20531143d5dSDavid Howells 206e8e581a8SDavid Howells if (!PageUptodate(page)) { 207e8e581a8SDavid Howells if (copied < len) { 208e8e581a8SDavid Howells /* Try and load any missing data from the server. The 209e8e581a8SDavid Howells * unmarshalling routine will take care of clearing any 210e8e581a8SDavid Howells * bits that are beyond the EOF. 211e8e581a8SDavid Howells */ 212c69bf479SDavid Howells ret = afs_fill_page(file, pos + copied, 213e8e581a8SDavid Howells len - copied, page); 214e8e581a8SDavid Howells if (ret < 0) 215afae457dSDavid Howells goto out; 216e8e581a8SDavid Howells } 217e8e581a8SDavid Howells SetPageUptodate(page); 218e8e581a8SDavid Howells } 219e8e581a8SDavid Howells 220f792e3acSDavid Howells if (PagePrivate(page)) { 221f792e3acSDavid Howells priv = page_private(page); 22267d78a6fSDavid Howells f = afs_page_dirty_from(page, priv); 22367d78a6fSDavid Howells t = afs_page_dirty_to(page, priv); 224f792e3acSDavid Howells if (from < f) 225f792e3acSDavid Howells f = from; 226f792e3acSDavid Howells if (to > t) 227f792e3acSDavid Howells t = to; 22867d78a6fSDavid Howells priv = afs_page_dirty(page, f, t); 229f792e3acSDavid Howells set_page_private(page, priv); 23067d78a6fSDavid Howells trace_afs_page_dirty(vnode, tracepoint_string("dirty+"), page); 231f792e3acSDavid Howells } else { 23267d78a6fSDavid Howells priv = afs_page_dirty(page, from, to); 233f792e3acSDavid Howells attach_page_private(page, (void *)priv); 23467d78a6fSDavid Howells trace_afs_page_dirty(vnode, tracepoint_string("dirty"), page); 235f792e3acSDavid Howells } 236f792e3acSDavid Howells 237*e87b03f5SDavid Howells if (set_page_dirty(page)) 238*e87b03f5SDavid Howells _debug("dirtied %lx", page->index); 239afae457dSDavid Howells ret = copied; 240afae457dSDavid Howells 241afae457dSDavid Howells out: 24215b4650eSNick Piggin unlock_page(page); 24309cbfeafSKirill A. Shutemov put_page(page); 244afae457dSDavid Howells return ret; 24531143d5dSDavid Howells } 24631143d5dSDavid Howells 24731143d5dSDavid Howells /* 24831143d5dSDavid Howells * kill all the pages in the given range 24931143d5dSDavid Howells */ 2504343d008SDavid Howells static void afs_kill_pages(struct address_space *mapping, 251*e87b03f5SDavid Howells loff_t start, loff_t len) 25231143d5dSDavid Howells { 2534343d008SDavid Howells struct afs_vnode *vnode = AFS_FS_I(mapping->host); 25431143d5dSDavid Howells struct pagevec pv; 255*e87b03f5SDavid Howells unsigned int loop, psize; 25631143d5dSDavid Howells 257*e87b03f5SDavid Howells _enter("{%llx:%llu},%llx @%llx", 258*e87b03f5SDavid Howells vnode->fid.vid, vnode->fid.vnode, len, start); 25931143d5dSDavid Howells 26086679820SMel Gorman pagevec_init(&pv); 26131143d5dSDavid Howells 26231143d5dSDavid Howells do { 263*e87b03f5SDavid Howells _debug("kill %llx @%llx", len, start); 26431143d5dSDavid Howells 265*e87b03f5SDavid Howells pv.nr = find_get_pages_contig(mapping, start / PAGE_SIZE, 266*e87b03f5SDavid Howells PAGEVEC_SIZE, pv.pages); 267*e87b03f5SDavid Howells if (pv.nr == 0) 268*e87b03f5SDavid Howells break; 26931143d5dSDavid Howells 270*e87b03f5SDavid Howells for (loop = 0; loop < pv.nr; loop++) { 2717286a35eSDavid Howells struct page *page = pv.pages[loop]; 272*e87b03f5SDavid Howells 273*e87b03f5SDavid Howells if (page->index * PAGE_SIZE >= start + len) 274*e87b03f5SDavid Howells break; 275*e87b03f5SDavid Howells 276*e87b03f5SDavid Howells psize = thp_size(page); 277*e87b03f5SDavid Howells start += psize; 278*e87b03f5SDavid Howells len -= psize; 2797286a35eSDavid Howells ClearPageUptodate(page); 2804343d008SDavid Howells end_page_writeback(page); 2814343d008SDavid Howells lock_page(page); 2824343d008SDavid Howells generic_error_remove_page(mapping, page); 28321bd68f1SMarc Dionne unlock_page(page); 2844343d008SDavid Howells } 2854343d008SDavid Howells 2864343d008SDavid Howells __pagevec_release(&pv); 287*e87b03f5SDavid Howells } while (len > 0); 2884343d008SDavid Howells 2894343d008SDavid Howells _leave(""); 2904343d008SDavid Howells } 2914343d008SDavid Howells 2924343d008SDavid Howells /* 2934343d008SDavid Howells * Redirty all the pages in a given range. 2944343d008SDavid Howells */ 2954343d008SDavid Howells static void afs_redirty_pages(struct writeback_control *wbc, 2964343d008SDavid Howells struct address_space *mapping, 297*e87b03f5SDavid Howells loff_t start, loff_t len) 2984343d008SDavid Howells { 2994343d008SDavid Howells struct afs_vnode *vnode = AFS_FS_I(mapping->host); 3004343d008SDavid Howells struct pagevec pv; 301*e87b03f5SDavid Howells unsigned int loop, psize; 3024343d008SDavid Howells 303*e87b03f5SDavid Howells _enter("{%llx:%llu},%llx @%llx", 304*e87b03f5SDavid Howells vnode->fid.vid, vnode->fid.vnode, len, start); 3054343d008SDavid Howells 306487e2c9fSLinus Torvalds pagevec_init(&pv); 3074343d008SDavid Howells 3084343d008SDavid Howells do { 309*e87b03f5SDavid Howells _debug("redirty %llx @%llx", len, start); 3104343d008SDavid Howells 311*e87b03f5SDavid Howells pv.nr = find_get_pages_contig(mapping, start / PAGE_SIZE, 312*e87b03f5SDavid Howells PAGEVEC_SIZE, pv.pages); 313*e87b03f5SDavid Howells if (pv.nr == 0) 314*e87b03f5SDavid Howells break; 3154343d008SDavid Howells 316*e87b03f5SDavid Howells for (loop = 0; loop < pv.nr; loop++) { 3174343d008SDavid Howells struct page *page = pv.pages[loop]; 3184343d008SDavid Howells 319*e87b03f5SDavid Howells if (page->index * PAGE_SIZE >= start + len) 320*e87b03f5SDavid Howells break; 321*e87b03f5SDavid Howells 322*e87b03f5SDavid Howells psize = thp_size(page); 323*e87b03f5SDavid Howells start += psize; 324*e87b03f5SDavid Howells len -= psize; 3254343d008SDavid Howells redirty_page_for_writepage(wbc, page); 3267286a35eSDavid Howells end_page_writeback(page); 32731143d5dSDavid Howells } 32831143d5dSDavid Howells 32931143d5dSDavid Howells __pagevec_release(&pv); 330*e87b03f5SDavid Howells } while (len > 0); 33131143d5dSDavid Howells 33231143d5dSDavid Howells _leave(""); 33331143d5dSDavid Howells } 33431143d5dSDavid Howells 33531143d5dSDavid Howells /* 336a58823acSDavid Howells * completion of write to server 337a58823acSDavid Howells */ 338*e87b03f5SDavid Howells static void afs_pages_written_back(struct afs_vnode *vnode, loff_t start, unsigned int len) 339a58823acSDavid Howells { 340bd80d8a8SDavid Howells struct address_space *mapping = vnode->vfs_inode.i_mapping; 341bd80d8a8SDavid Howells struct page *page; 342*e87b03f5SDavid Howells pgoff_t end; 343bd80d8a8SDavid Howells 344*e87b03f5SDavid Howells XA_STATE(xas, &mapping->i_pages, start / PAGE_SIZE); 345a58823acSDavid Howells 346*e87b03f5SDavid Howells _enter("{%llx:%llu},{%x @%llx}", 347*e87b03f5SDavid Howells vnode->fid.vid, vnode->fid.vnode, len, start); 348a58823acSDavid Howells 349bd80d8a8SDavid Howells rcu_read_lock(); 350a58823acSDavid Howells 351*e87b03f5SDavid Howells end = (start + len - 1) / PAGE_SIZE; 352*e87b03f5SDavid Howells xas_for_each(&xas, page, end) { 353*e87b03f5SDavid Howells if (!PageWriteback(page)) { 354*e87b03f5SDavid Howells kdebug("bad %x @%llx page %lx %lx", len, start, page->index, end); 355bd80d8a8SDavid Howells ASSERT(PageWriteback(page)); 356*e87b03f5SDavid Howells } 357a58823acSDavid Howells 358bd80d8a8SDavid Howells trace_afs_page_dirty(vnode, tracepoint_string("clear"), page); 359*e87b03f5SDavid Howells detach_page_private(page); 360bd80d8a8SDavid Howells page_endio(page, true, 0); 361a58823acSDavid Howells } 362bd80d8a8SDavid Howells 363bd80d8a8SDavid Howells rcu_read_unlock(); 364a58823acSDavid Howells 365a58823acSDavid Howells afs_prune_wb_keys(vnode); 366a58823acSDavid Howells _leave(""); 367a58823acSDavid Howells } 368a58823acSDavid Howells 369a58823acSDavid Howells /* 370e49c7b2fSDavid Howells * Find a key to use for the writeback. We cached the keys used to author the 371e49c7b2fSDavid Howells * writes on the vnode. *_wbk will contain the last writeback key used or NULL 372e49c7b2fSDavid Howells * and we need to start from there if it's set. 373e49c7b2fSDavid Howells */ 374e49c7b2fSDavid Howells static int afs_get_writeback_key(struct afs_vnode *vnode, 375e49c7b2fSDavid Howells struct afs_wb_key **_wbk) 376e49c7b2fSDavid Howells { 377e49c7b2fSDavid Howells struct afs_wb_key *wbk = NULL; 378e49c7b2fSDavid Howells struct list_head *p; 379e49c7b2fSDavid Howells int ret = -ENOKEY, ret2; 380e49c7b2fSDavid Howells 381e49c7b2fSDavid Howells spin_lock(&vnode->wb_lock); 382e49c7b2fSDavid Howells if (*_wbk) 383e49c7b2fSDavid Howells p = (*_wbk)->vnode_link.next; 384e49c7b2fSDavid Howells else 385e49c7b2fSDavid Howells p = vnode->wb_keys.next; 386e49c7b2fSDavid Howells 387e49c7b2fSDavid Howells while (p != &vnode->wb_keys) { 388e49c7b2fSDavid Howells wbk = list_entry(p, struct afs_wb_key, vnode_link); 389e49c7b2fSDavid Howells _debug("wbk %u", key_serial(wbk->key)); 390e49c7b2fSDavid Howells ret2 = key_validate(wbk->key); 391e49c7b2fSDavid Howells if (ret2 == 0) { 392e49c7b2fSDavid Howells refcount_inc(&wbk->usage); 393e49c7b2fSDavid Howells _debug("USE WB KEY %u", key_serial(wbk->key)); 394e49c7b2fSDavid Howells break; 395e49c7b2fSDavid Howells } 396e49c7b2fSDavid Howells 397e49c7b2fSDavid Howells wbk = NULL; 398e49c7b2fSDavid Howells if (ret == -ENOKEY) 399e49c7b2fSDavid Howells ret = ret2; 400e49c7b2fSDavid Howells p = p->next; 401e49c7b2fSDavid Howells } 402e49c7b2fSDavid Howells 403e49c7b2fSDavid Howells spin_unlock(&vnode->wb_lock); 404e49c7b2fSDavid Howells if (*_wbk) 405e49c7b2fSDavid Howells afs_put_wb_key(*_wbk); 406e49c7b2fSDavid Howells *_wbk = wbk; 407e49c7b2fSDavid Howells return 0; 408e49c7b2fSDavid Howells } 409e49c7b2fSDavid Howells 410e49c7b2fSDavid Howells static void afs_store_data_success(struct afs_operation *op) 411e49c7b2fSDavid Howells { 412e49c7b2fSDavid Howells struct afs_vnode *vnode = op->file[0].vnode; 413e49c7b2fSDavid Howells 414da8d0755SDavid Howells op->ctime = op->file[0].scb.status.mtime_client; 415e49c7b2fSDavid Howells afs_vnode_commit_status(op, &op->file[0]); 416e49c7b2fSDavid Howells if (op->error == 0) { 417d383e346SDavid Howells if (!op->store.laundering) 418*e87b03f5SDavid Howells afs_pages_written_back(vnode, op->store.pos, op->store.size); 419e49c7b2fSDavid Howells afs_stat_v(vnode, n_stores); 420bd80d8a8SDavid Howells atomic_long_add(op->store.size, &afs_v2net(vnode)->n_store_bytes); 421e49c7b2fSDavid Howells } 422e49c7b2fSDavid Howells } 423e49c7b2fSDavid Howells 424e49c7b2fSDavid Howells static const struct afs_operation_ops afs_store_data_operation = { 425e49c7b2fSDavid Howells .issue_afs_rpc = afs_fs_store_data, 426e49c7b2fSDavid Howells .issue_yfs_rpc = yfs_fs_store_data, 427e49c7b2fSDavid Howells .success = afs_store_data_success, 428e49c7b2fSDavid Howells }; 429e49c7b2fSDavid Howells 430e49c7b2fSDavid Howells /* 431d2ddc776SDavid Howells * write to a file 43231143d5dSDavid Howells */ 433*e87b03f5SDavid Howells static int afs_store_data(struct afs_vnode *vnode, struct iov_iter *iter, loff_t pos, 434bd80d8a8SDavid Howells bool laundering) 43531143d5dSDavid Howells { 436e49c7b2fSDavid Howells struct afs_operation *op; 4374343d008SDavid Howells struct afs_wb_key *wbk = NULL; 438bd80d8a8SDavid Howells loff_t size = iov_iter_count(iter), i_size; 439bd80d8a8SDavid Howells int ret = -ENOKEY; 440d2ddc776SDavid Howells 441bd80d8a8SDavid Howells _enter("%s{%llx:%llu.%u},%llx,%llx", 442d2ddc776SDavid Howells vnode->volume->name, 443d2ddc776SDavid Howells vnode->fid.vid, 444d2ddc776SDavid Howells vnode->fid.vnode, 445d2ddc776SDavid Howells vnode->fid.unique, 446bd80d8a8SDavid Howells size, pos); 447d2ddc776SDavid Howells 448e49c7b2fSDavid Howells ret = afs_get_writeback_key(vnode, &wbk); 449e49c7b2fSDavid Howells if (ret) { 4504343d008SDavid Howells _leave(" = %d [no keys]", ret); 4514343d008SDavid Howells return ret; 452d2ddc776SDavid Howells } 453d2ddc776SDavid Howells 454e49c7b2fSDavid Howells op = afs_alloc_operation(wbk->key, vnode->volume); 455e49c7b2fSDavid Howells if (IS_ERR(op)) { 456e49c7b2fSDavid Howells afs_put_wb_key(wbk); 457e49c7b2fSDavid Howells return -ENOMEM; 458d2ddc776SDavid Howells } 459d2ddc776SDavid Howells 460bd80d8a8SDavid Howells i_size = i_size_read(&vnode->vfs_inode); 461bd80d8a8SDavid Howells 462e49c7b2fSDavid Howells afs_op_set_vnode(op, 0, vnode); 463e49c7b2fSDavid Howells op->file[0].dv_delta = 1; 464bd80d8a8SDavid Howells op->store.write_iter = iter; 465bd80d8a8SDavid Howells op->store.pos = pos; 466bd80d8a8SDavid Howells op->store.size = size; 467bd80d8a8SDavid Howells op->store.i_size = max(pos + size, i_size); 468d383e346SDavid Howells op->store.laundering = laundering; 469b3597945SDavid Howells op->mtime = vnode->vfs_inode.i_mtime; 470811f04baSDavid Howells op->flags |= AFS_OPERATION_UNINTR; 471e49c7b2fSDavid Howells op->ops = &afs_store_data_operation; 472e49c7b2fSDavid Howells 473e49c7b2fSDavid Howells try_next_key: 474e49c7b2fSDavid Howells afs_begin_vnode_operation(op); 475e49c7b2fSDavid Howells afs_wait_for_operation(op); 476e49c7b2fSDavid Howells 477e49c7b2fSDavid Howells switch (op->error) { 4784343d008SDavid Howells case -EACCES: 4794343d008SDavid Howells case -EPERM: 4804343d008SDavid Howells case -ENOKEY: 4814343d008SDavid Howells case -EKEYEXPIRED: 4824343d008SDavid Howells case -EKEYREJECTED: 4834343d008SDavid Howells case -EKEYREVOKED: 4844343d008SDavid Howells _debug("next"); 485e49c7b2fSDavid Howells 486e49c7b2fSDavid Howells ret = afs_get_writeback_key(vnode, &wbk); 487e49c7b2fSDavid Howells if (ret == 0) { 488e49c7b2fSDavid Howells key_put(op->key); 489e49c7b2fSDavid Howells op->key = key_get(wbk->key); 4904343d008SDavid Howells goto try_next_key; 4914343d008SDavid Howells } 492e49c7b2fSDavid Howells break; 493e49c7b2fSDavid Howells } 4944343d008SDavid Howells 4954343d008SDavid Howells afs_put_wb_key(wbk); 496e49c7b2fSDavid Howells _leave(" = %d", op->error); 497e49c7b2fSDavid Howells return afs_put_operation(op); 498d2ddc776SDavid Howells } 499d2ddc776SDavid Howells 500d2ddc776SDavid Howells /* 501810caa3eSDavid Howells * Extend the region to be written back to include subsequent contiguously 502810caa3eSDavid Howells * dirty pages if possible, but don't sleep while doing so. 503810caa3eSDavid Howells * 504810caa3eSDavid Howells * If this page holds new content, then we can include filler zeros in the 505810caa3eSDavid Howells * writeback. 50631143d5dSDavid Howells */ 507810caa3eSDavid Howells static void afs_extend_writeback(struct address_space *mapping, 508810caa3eSDavid Howells struct afs_vnode *vnode, 509810caa3eSDavid Howells long *_count, 510*e87b03f5SDavid Howells loff_t start, 511*e87b03f5SDavid Howells loff_t max_len, 512*e87b03f5SDavid Howells bool new_content, 513*e87b03f5SDavid Howells unsigned int *_len) 51431143d5dSDavid Howells { 515*e87b03f5SDavid Howells struct pagevec pvec; 516*e87b03f5SDavid Howells struct page *page; 517*e87b03f5SDavid Howells unsigned long priv; 518*e87b03f5SDavid Howells unsigned int psize, filler = 0; 519*e87b03f5SDavid Howells unsigned int f, t; 520*e87b03f5SDavid Howells loff_t len = *_len; 521*e87b03f5SDavid Howells pgoff_t index = (start + len) / PAGE_SIZE; 522*e87b03f5SDavid Howells bool stop = true; 523*e87b03f5SDavid Howells unsigned int i; 5244343d008SDavid Howells 525*e87b03f5SDavid Howells XA_STATE(xas, &mapping->i_pages, index); 526*e87b03f5SDavid Howells pagevec_init(&pvec); 527*e87b03f5SDavid Howells 52831143d5dSDavid Howells do { 529*e87b03f5SDavid Howells /* Firstly, we gather up a batch of contiguous dirty pages 530*e87b03f5SDavid Howells * under the RCU read lock - but we can't clear the dirty flags 531*e87b03f5SDavid Howells * there if any of those pages are mapped. 532*e87b03f5SDavid Howells */ 533*e87b03f5SDavid Howells rcu_read_lock(); 534*e87b03f5SDavid Howells 535*e87b03f5SDavid Howells xas_for_each(&xas, page, ULONG_MAX) { 536*e87b03f5SDavid Howells stop = true; 537*e87b03f5SDavid Howells if (xas_retry(&xas, page)) 538*e87b03f5SDavid Howells continue; 539*e87b03f5SDavid Howells if (xa_is_value(page)) 540*e87b03f5SDavid Howells break; 541*e87b03f5SDavid Howells if (page->index != index) 542*e87b03f5SDavid Howells break; 543*e87b03f5SDavid Howells 544*e87b03f5SDavid Howells if (!page_cache_get_speculative(page)) { 545*e87b03f5SDavid Howells xas_reset(&xas); 546*e87b03f5SDavid Howells continue; 54731143d5dSDavid Howells } 54831143d5dSDavid Howells 549*e87b03f5SDavid Howells /* Has the page moved or been split? */ 550*e87b03f5SDavid Howells if (unlikely(page != xas_reload(&xas))) 5515a813276SDavid Howells break; 552*e87b03f5SDavid Howells 553529ae9aaSNick Piggin if (!trylock_page(page)) 55431143d5dSDavid Howells break; 5554343d008SDavid Howells if (!PageDirty(page) || PageWriteback(page)) { 55631143d5dSDavid Howells unlock_page(page); 55731143d5dSDavid Howells break; 55831143d5dSDavid Howells } 5594343d008SDavid Howells 560*e87b03f5SDavid Howells psize = thp_size(page); 5614343d008SDavid Howells priv = page_private(page); 56267d78a6fSDavid Howells f = afs_page_dirty_from(page, priv); 56367d78a6fSDavid Howells t = afs_page_dirty_to(page, priv); 564810caa3eSDavid Howells if (f != 0 && !new_content) { 5654343d008SDavid Howells unlock_page(page); 5664343d008SDavid Howells break; 5674343d008SDavid Howells } 5684343d008SDavid Howells 569*e87b03f5SDavid Howells len += filler + t; 570*e87b03f5SDavid Howells filler = psize - t; 571*e87b03f5SDavid Howells if (len >= max_len || *_count <= 0) 572*e87b03f5SDavid Howells stop = true; 573*e87b03f5SDavid Howells else if (t == psize || new_content) 574*e87b03f5SDavid Howells stop = false; 575*e87b03f5SDavid Howells 576*e87b03f5SDavid Howells index += thp_nr_pages(page); 577*e87b03f5SDavid Howells if (!pagevec_add(&pvec, page)) 578*e87b03f5SDavid Howells break; 579*e87b03f5SDavid Howells if (stop) 580*e87b03f5SDavid Howells break; 581*e87b03f5SDavid Howells } 582*e87b03f5SDavid Howells 583*e87b03f5SDavid Howells if (!stop) 584*e87b03f5SDavid Howells xas_pause(&xas); 585*e87b03f5SDavid Howells rcu_read_unlock(); 586*e87b03f5SDavid Howells 587*e87b03f5SDavid Howells /* Now, if we obtained any pages, we can shift them to being 588*e87b03f5SDavid Howells * writable and mark them for caching. 589*e87b03f5SDavid Howells */ 590*e87b03f5SDavid Howells if (!pagevec_count(&pvec)) 591*e87b03f5SDavid Howells break; 592*e87b03f5SDavid Howells 593*e87b03f5SDavid Howells for (i = 0; i < pagevec_count(&pvec); i++) { 594*e87b03f5SDavid Howells page = pvec.pages[i]; 59567d78a6fSDavid Howells trace_afs_page_dirty(vnode, tracepoint_string("store+"), page); 59613524ab3SDavid Howells 59731143d5dSDavid Howells if (!clear_page_dirty_for_io(page)) 59831143d5dSDavid Howells BUG(); 59931143d5dSDavid Howells if (test_set_page_writeback(page)) 60031143d5dSDavid Howells BUG(); 601*e87b03f5SDavid Howells 602*e87b03f5SDavid Howells *_count -= thp_nr_pages(page); 60331143d5dSDavid Howells unlock_page(page); 60431143d5dSDavid Howells } 60531143d5dSDavid Howells 606*e87b03f5SDavid Howells pagevec_release(&pvec); 607*e87b03f5SDavid Howells cond_resched(); 608*e87b03f5SDavid Howells } while (!stop); 60931143d5dSDavid Howells 610*e87b03f5SDavid Howells *_len = len; 611810caa3eSDavid Howells } 612810caa3eSDavid Howells 613810caa3eSDavid Howells /* 614810caa3eSDavid Howells * Synchronously write back the locked page and any subsequent non-locked dirty 615810caa3eSDavid Howells * pages. 616810caa3eSDavid Howells */ 617*e87b03f5SDavid Howells static ssize_t afs_write_back_from_locked_page(struct address_space *mapping, 618810caa3eSDavid Howells struct writeback_control *wbc, 619*e87b03f5SDavid Howells struct page *page, 620*e87b03f5SDavid Howells loff_t start, loff_t end) 621810caa3eSDavid Howells { 622810caa3eSDavid Howells struct afs_vnode *vnode = AFS_FS_I(mapping->host); 623810caa3eSDavid Howells struct iov_iter iter; 624*e87b03f5SDavid Howells unsigned long priv; 625*e87b03f5SDavid Howells unsigned int offset, to, len, max_len; 626*e87b03f5SDavid Howells loff_t i_size = i_size_read(&vnode->vfs_inode); 627810caa3eSDavid Howells bool new_content = test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags); 628*e87b03f5SDavid Howells long count = wbc->nr_to_write; 629810caa3eSDavid Howells int ret; 630810caa3eSDavid Howells 631*e87b03f5SDavid Howells _enter(",%lx,%llx-%llx", page->index, start, end); 632810caa3eSDavid Howells 633*e87b03f5SDavid Howells if (test_set_page_writeback(page)) 634810caa3eSDavid Howells BUG(); 635810caa3eSDavid Howells 636*e87b03f5SDavid Howells count -= thp_nr_pages(page); 637*e87b03f5SDavid Howells 638810caa3eSDavid Howells /* Find all consecutive lockable dirty pages that have contiguous 639810caa3eSDavid Howells * written regions, stopping when we find a page that is not 640810caa3eSDavid Howells * immediately lockable, is not dirty or is missing, or we reach the 641810caa3eSDavid Howells * end of the range. 642810caa3eSDavid Howells */ 643*e87b03f5SDavid Howells priv = page_private(page); 644*e87b03f5SDavid Howells offset = afs_page_dirty_from(page, priv); 645*e87b03f5SDavid Howells to = afs_page_dirty_to(page, priv); 646*e87b03f5SDavid Howells trace_afs_page_dirty(vnode, tracepoint_string("store"), page); 647810caa3eSDavid Howells 648*e87b03f5SDavid Howells len = to - offset; 649*e87b03f5SDavid Howells start += offset; 650*e87b03f5SDavid Howells if (start < i_size) { 651*e87b03f5SDavid Howells /* Trim the write to the EOF; the extra data is ignored. Also 652*e87b03f5SDavid Howells * put an upper limit on the size of a single storedata op. 653*e87b03f5SDavid Howells */ 654*e87b03f5SDavid Howells max_len = 65536 * 4096; 655*e87b03f5SDavid Howells max_len = min_t(unsigned long long, max_len, end - start + 1); 656*e87b03f5SDavid Howells max_len = min_t(unsigned long long, max_len, i_size - start); 657810caa3eSDavid Howells 658*e87b03f5SDavid Howells if (len < max_len && 659*e87b03f5SDavid Howells (to == thp_size(page) || new_content)) 660*e87b03f5SDavid Howells afs_extend_writeback(mapping, vnode, &count, 661*e87b03f5SDavid Howells start, max_len, new_content, &len); 662*e87b03f5SDavid Howells len = min_t(loff_t, len, max_len); 663*e87b03f5SDavid Howells } 664810caa3eSDavid Howells 6654343d008SDavid Howells /* We now have a contiguous set of dirty pages, each with writeback 6664343d008SDavid Howells * set; the first page is still locked at this point, but all the rest 6674343d008SDavid Howells * have been unlocked. 6684343d008SDavid Howells */ 669*e87b03f5SDavid Howells unlock_page(page); 6704343d008SDavid Howells 671*e87b03f5SDavid Howells if (start < i_size) { 672*e87b03f5SDavid Howells _debug("write back %x @%llx [%llx]", len, start, i_size); 67331143d5dSDavid Howells 674*e87b03f5SDavid Howells iov_iter_xarray(&iter, WRITE, &mapping->i_pages, start, len); 675*e87b03f5SDavid Howells ret = afs_store_data(vnode, &iter, start, false); 676bd80d8a8SDavid Howells } else { 677*e87b03f5SDavid Howells _debug("write discard %x @%llx [%llx]", len, start, i_size); 678*e87b03f5SDavid Howells 679bd80d8a8SDavid Howells /* The dirty region was entirely beyond the EOF. */ 680*e87b03f5SDavid Howells afs_pages_written_back(vnode, start, len); 681bd80d8a8SDavid Howells ret = 0; 682bd80d8a8SDavid Howells } 683bd80d8a8SDavid Howells 68431143d5dSDavid Howells switch (ret) { 6854343d008SDavid Howells case 0: 686*e87b03f5SDavid Howells wbc->nr_to_write = count; 687*e87b03f5SDavid Howells ret = len; 6884343d008SDavid Howells break; 6894343d008SDavid Howells 6904343d008SDavid Howells default: 6914343d008SDavid Howells pr_notice("kAFS: Unexpected error from FS.StoreData %d\n", ret); 692df561f66SGustavo A. R. Silva fallthrough; 6934343d008SDavid Howells case -EACCES: 6944343d008SDavid Howells case -EPERM: 6954343d008SDavid Howells case -ENOKEY: 6964343d008SDavid Howells case -EKEYEXPIRED: 6974343d008SDavid Howells case -EKEYREJECTED: 6984343d008SDavid Howells case -EKEYREVOKED: 699*e87b03f5SDavid Howells afs_redirty_pages(wbc, mapping, start, len); 7004343d008SDavid Howells mapping_set_error(mapping, ret); 7014343d008SDavid Howells break; 7024343d008SDavid Howells 70331143d5dSDavid Howells case -EDQUOT: 70431143d5dSDavid Howells case -ENOSPC: 705*e87b03f5SDavid Howells afs_redirty_pages(wbc, mapping, start, len); 7064343d008SDavid Howells mapping_set_error(mapping, -ENOSPC); 70731143d5dSDavid Howells break; 7084343d008SDavid Howells 70931143d5dSDavid Howells case -EROFS: 71031143d5dSDavid Howells case -EIO: 71131143d5dSDavid Howells case -EREMOTEIO: 71231143d5dSDavid Howells case -EFBIG: 71331143d5dSDavid Howells case -ENOENT: 71431143d5dSDavid Howells case -ENOMEDIUM: 71531143d5dSDavid Howells case -ENXIO: 716f51375cdSDavid Howells trace_afs_file_error(vnode, ret, afs_file_error_writeback_fail); 717*e87b03f5SDavid Howells afs_kill_pages(mapping, start, len); 7184343d008SDavid Howells mapping_set_error(mapping, ret); 71931143d5dSDavid Howells break; 72031143d5dSDavid Howells } 72131143d5dSDavid Howells 72231143d5dSDavid Howells _leave(" = %d", ret); 72331143d5dSDavid Howells return ret; 72431143d5dSDavid Howells } 72531143d5dSDavid Howells 72631143d5dSDavid Howells /* 72731143d5dSDavid Howells * write a page back to the server 72831143d5dSDavid Howells * - the caller locked the page for us 72931143d5dSDavid Howells */ 73031143d5dSDavid Howells int afs_writepage(struct page *page, struct writeback_control *wbc) 73131143d5dSDavid Howells { 732*e87b03f5SDavid Howells ssize_t ret; 733*e87b03f5SDavid Howells loff_t start; 73431143d5dSDavid Howells 73531143d5dSDavid Howells _enter("{%lx},", page->index); 73631143d5dSDavid Howells 737*e87b03f5SDavid Howells start = page->index * PAGE_SIZE; 7384343d008SDavid Howells ret = afs_write_back_from_locked_page(page->mapping, wbc, page, 739*e87b03f5SDavid Howells start, LLONG_MAX - start); 74031143d5dSDavid Howells if (ret < 0) { 741*e87b03f5SDavid Howells _leave(" = %zd", ret); 742*e87b03f5SDavid Howells return ret; 74331143d5dSDavid Howells } 74431143d5dSDavid Howells 74531143d5dSDavid Howells _leave(" = 0"); 74631143d5dSDavid Howells return 0; 74731143d5dSDavid Howells } 74831143d5dSDavid Howells 74931143d5dSDavid Howells /* 75031143d5dSDavid Howells * write a region of pages back to the server 75131143d5dSDavid Howells */ 752c1206a2cSAdrian Bunk static int afs_writepages_region(struct address_space *mapping, 75331143d5dSDavid Howells struct writeback_control *wbc, 754*e87b03f5SDavid Howells loff_t start, loff_t end, loff_t *_next) 75531143d5dSDavid Howells { 75631143d5dSDavid Howells struct page *page; 757*e87b03f5SDavid Howells ssize_t ret; 758*e87b03f5SDavid Howells int n; 75931143d5dSDavid Howells 760*e87b03f5SDavid Howells _enter("%llx,%llx,", start, end); 76131143d5dSDavid Howells 76231143d5dSDavid Howells do { 763*e87b03f5SDavid Howells pgoff_t index = start / PAGE_SIZE; 764*e87b03f5SDavid Howells 765*e87b03f5SDavid Howells n = find_get_pages_range_tag(mapping, &index, end / PAGE_SIZE, 766aef6e415SJan Kara PAGECACHE_TAG_DIRTY, 1, &page); 76731143d5dSDavid Howells if (!n) 76831143d5dSDavid Howells break; 76931143d5dSDavid Howells 770*e87b03f5SDavid Howells start = (loff_t)page->index * PAGE_SIZE; /* May regress with THPs */ 771*e87b03f5SDavid Howells 77231143d5dSDavid Howells _debug("wback %lx", page->index); 77331143d5dSDavid Howells 774*e87b03f5SDavid Howells /* At this point we hold neither the i_pages lock nor the 775b93b0163SMatthew Wilcox * page lock: the page may be truncated or invalidated 776b93b0163SMatthew Wilcox * (changing page->mapping to NULL), or even swizzled 777b93b0163SMatthew Wilcox * back from swapper_space to tmpfs file mapping 77831143d5dSDavid Howells */ 779*e87b03f5SDavid Howells if (wbc->sync_mode != WB_SYNC_NONE) { 7804343d008SDavid Howells ret = lock_page_killable(page); 7814343d008SDavid Howells if (ret < 0) { 7824343d008SDavid Howells put_page(page); 7834343d008SDavid Howells return ret; 7844343d008SDavid Howells } 785*e87b03f5SDavid Howells } else { 786*e87b03f5SDavid Howells if (!trylock_page(page)) { 787*e87b03f5SDavid Howells put_page(page); 788*e87b03f5SDavid Howells return 0; 789*e87b03f5SDavid Howells } 790*e87b03f5SDavid Howells } 79131143d5dSDavid Howells 792c5051c7bSDavid Howells if (page->mapping != mapping || !PageDirty(page)) { 793*e87b03f5SDavid Howells start += thp_size(page); 79431143d5dSDavid Howells unlock_page(page); 79509cbfeafSKirill A. Shutemov put_page(page); 79631143d5dSDavid Howells continue; 79731143d5dSDavid Howells } 79831143d5dSDavid Howells 799c5051c7bSDavid Howells if (PageWriteback(page)) { 800c5051c7bSDavid Howells unlock_page(page); 80131143d5dSDavid Howells if (wbc->sync_mode != WB_SYNC_NONE) 80231143d5dSDavid Howells wait_on_page_writeback(page); 80329c8bbbdSDavid Howells put_page(page); 80431143d5dSDavid Howells continue; 80531143d5dSDavid Howells } 80631143d5dSDavid Howells 80765a15109SDavid Howells if (!clear_page_dirty_for_io(page)) 80865a15109SDavid Howells BUG(); 809*e87b03f5SDavid Howells ret = afs_write_back_from_locked_page(mapping, wbc, page, start, end); 81009cbfeafSKirill A. Shutemov put_page(page); 81131143d5dSDavid Howells if (ret < 0) { 812*e87b03f5SDavid Howells _leave(" = %zd", ret); 81331143d5dSDavid Howells return ret; 81431143d5dSDavid Howells } 81531143d5dSDavid Howells 816*e87b03f5SDavid Howells start += ret * PAGE_SIZE; 81731143d5dSDavid Howells 81831143d5dSDavid Howells cond_resched(); 819*e87b03f5SDavid Howells } while (wbc->nr_to_write > 0); 82031143d5dSDavid Howells 821*e87b03f5SDavid Howells *_next = start; 822*e87b03f5SDavid Howells _leave(" = 0 [%llx]", *_next); 82331143d5dSDavid Howells return 0; 82431143d5dSDavid Howells } 82531143d5dSDavid Howells 82631143d5dSDavid Howells /* 82731143d5dSDavid Howells * write some of the pending data back to the server 82831143d5dSDavid Howells */ 82931143d5dSDavid Howells int afs_writepages(struct address_space *mapping, 83031143d5dSDavid Howells struct writeback_control *wbc) 83131143d5dSDavid Howells { 832ec0fa0b6SDavid Howells struct afs_vnode *vnode = AFS_FS_I(mapping->host); 833*e87b03f5SDavid Howells loff_t start, next; 83431143d5dSDavid Howells int ret; 83531143d5dSDavid Howells 83631143d5dSDavid Howells _enter(""); 83731143d5dSDavid Howells 838ec0fa0b6SDavid Howells /* We have to be careful as we can end up racing with setattr() 839ec0fa0b6SDavid Howells * truncating the pagecache since the caller doesn't take a lock here 840ec0fa0b6SDavid Howells * to prevent it. 841ec0fa0b6SDavid Howells */ 842ec0fa0b6SDavid Howells if (wbc->sync_mode == WB_SYNC_ALL) 843ec0fa0b6SDavid Howells down_read(&vnode->validate_lock); 844ec0fa0b6SDavid Howells else if (!down_read_trylock(&vnode->validate_lock)) 845ec0fa0b6SDavid Howells return 0; 846ec0fa0b6SDavid Howells 84731143d5dSDavid Howells if (wbc->range_cyclic) { 848*e87b03f5SDavid Howells start = mapping->writeback_index * PAGE_SIZE; 849*e87b03f5SDavid Howells ret = afs_writepages_region(mapping, wbc, start, LLONG_MAX, &next); 8501b430beeSWu Fengguang if (start > 0 && wbc->nr_to_write > 0 && ret == 0) 85131143d5dSDavid Howells ret = afs_writepages_region(mapping, wbc, 0, start, 85231143d5dSDavid Howells &next); 853*e87b03f5SDavid Howells mapping->writeback_index = next / PAGE_SIZE; 85431143d5dSDavid Howells } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) { 855*e87b03f5SDavid Howells ret = afs_writepages_region(mapping, wbc, 0, LLONG_MAX, &next); 85631143d5dSDavid Howells if (wbc->nr_to_write > 0) 85731143d5dSDavid Howells mapping->writeback_index = next; 85831143d5dSDavid Howells } else { 859*e87b03f5SDavid Howells ret = afs_writepages_region(mapping, wbc, 860*e87b03f5SDavid Howells wbc->range_start, wbc->range_end, &next); 86131143d5dSDavid Howells } 86231143d5dSDavid Howells 863ec0fa0b6SDavid Howells up_read(&vnode->validate_lock); 86431143d5dSDavid Howells _leave(" = %d", ret); 86531143d5dSDavid Howells return ret; 86631143d5dSDavid Howells } 86731143d5dSDavid Howells 86831143d5dSDavid Howells /* 86931143d5dSDavid Howells * write to an AFS file 87031143d5dSDavid Howells */ 87150b5551dSAl Viro ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from) 87231143d5dSDavid Howells { 873496ad9aaSAl Viro struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp)); 87431143d5dSDavid Howells ssize_t result; 87550b5551dSAl Viro size_t count = iov_iter_count(from); 87631143d5dSDavid Howells 8773b6492dfSDavid Howells _enter("{%llx:%llu},{%zu},", 87850b5551dSAl Viro vnode->fid.vid, vnode->fid.vnode, count); 87931143d5dSDavid Howells 88031143d5dSDavid Howells if (IS_SWAPFILE(&vnode->vfs_inode)) { 88131143d5dSDavid Howells printk(KERN_INFO 88231143d5dSDavid Howells "AFS: Attempt to write to active swap file!\n"); 88331143d5dSDavid Howells return -EBUSY; 88431143d5dSDavid Howells } 88531143d5dSDavid Howells 88631143d5dSDavid Howells if (!count) 88731143d5dSDavid Howells return 0; 88831143d5dSDavid Howells 88950b5551dSAl Viro result = generic_file_write_iter(iocb, from); 89031143d5dSDavid Howells 89131143d5dSDavid Howells _leave(" = %zd", result); 89231143d5dSDavid Howells return result; 89331143d5dSDavid Howells } 89431143d5dSDavid Howells 89531143d5dSDavid Howells /* 89631143d5dSDavid Howells * flush any dirty pages for this process, and check for write errors. 89731143d5dSDavid Howells * - the return status from this call provides a reliable indication of 89831143d5dSDavid Howells * whether any write errors occurred for this process. 89931143d5dSDavid Howells */ 90002c24a82SJosef Bacik int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync) 90131143d5dSDavid Howells { 9023c981bfcSAl Viro struct inode *inode = file_inode(file); 9033c981bfcSAl Viro struct afs_vnode *vnode = AFS_FS_I(inode); 90431143d5dSDavid Howells 9053b6492dfSDavid Howells _enter("{%llx:%llu},{n=%pD},%d", 9063c981bfcSAl Viro vnode->fid.vid, vnode->fid.vnode, file, 90731143d5dSDavid Howells datasync); 90831143d5dSDavid Howells 9094343d008SDavid Howells return file_write_and_wait_range(file, start, end); 91031143d5dSDavid Howells } 9119b3f26c9SDavid Howells 9129b3f26c9SDavid Howells /* 9139b3f26c9SDavid Howells * notification that a previously read-only page is about to become writable 9149b3f26c9SDavid Howells * - if it returns an error, the caller will deliver a bus error signal 9159b3f26c9SDavid Howells */ 9160722f186SSouptick Joarder vm_fault_t afs_page_mkwrite(struct vm_fault *vmf) 9179b3f26c9SDavid Howells { 918*e87b03f5SDavid Howells struct page *page = thp_head(vmf->page); 9191cf7a151SDavid Howells struct file *file = vmf->vma->vm_file; 9201cf7a151SDavid Howells struct inode *inode = file_inode(file); 9211cf7a151SDavid Howells struct afs_vnode *vnode = AFS_FS_I(inode); 9221cf7a151SDavid Howells unsigned long priv; 9239b3f26c9SDavid Howells 924*e87b03f5SDavid Howells _enter("{{%llx:%llu}},{%lx}", vnode->fid.vid, vnode->fid.vnode, page->index); 9259b3f26c9SDavid Howells 9261cf7a151SDavid Howells sb_start_pagefault(inode->i_sb); 9271cf7a151SDavid Howells 9281cf7a151SDavid Howells /* Wait for the page to be written to the cache before we allow it to 9291cf7a151SDavid Howells * be modified. We then assume the entire page will need writing back. 9301cf7a151SDavid Howells */ 931630f5ddaSDavid Howells #ifdef CONFIG_AFS_FSCACHE 932*e87b03f5SDavid Howells if (PageFsCache(page) && 933*e87b03f5SDavid Howells wait_on_page_bit_killable(page, PG_fscache) < 0) 934630f5ddaSDavid Howells return VM_FAULT_RETRY; 935630f5ddaSDavid Howells #endif 9369b3f26c9SDavid Howells 937*e87b03f5SDavid Howells if (wait_on_page_writeback_killable(page)) 9381cf7a151SDavid Howells return VM_FAULT_RETRY; 9391cf7a151SDavid Howells 940*e87b03f5SDavid Howells if (lock_page_killable(page) < 0) 9411cf7a151SDavid Howells return VM_FAULT_RETRY; 9421cf7a151SDavid Howells 9431cf7a151SDavid Howells /* We mustn't change page->private until writeback is complete as that 9441cf7a151SDavid Howells * details the portion of the page we need to write back and we might 9451cf7a151SDavid Howells * need to redirty the page if there's a problem. 9461cf7a151SDavid Howells */ 947*e87b03f5SDavid Howells wait_on_page_writeback(page); 9481cf7a151SDavid Howells 949*e87b03f5SDavid Howells priv = afs_page_dirty(page, 0, thp_size(page)); 950f86726a6SDavid Howells priv = afs_page_dirty_mmapped(priv); 951*e87b03f5SDavid Howells if (PagePrivate(page)) { 952*e87b03f5SDavid Howells set_page_private(page, priv); 953*e87b03f5SDavid Howells trace_afs_page_dirty(vnode, tracepoint_string("mkwrite+"), page); 954*e87b03f5SDavid Howells } else { 955*e87b03f5SDavid Howells attach_page_private(page, (void *)priv); 956*e87b03f5SDavid Howells trace_afs_page_dirty(vnode, tracepoint_string("mkwrite"), page); 957*e87b03f5SDavid Howells } 958bb413489SDavid Howells file_update_time(file); 9591cf7a151SDavid Howells 9601cf7a151SDavid Howells sb_end_pagefault(inode->i_sb); 9611cf7a151SDavid Howells return VM_FAULT_LOCKED; 9629b3f26c9SDavid Howells } 9634343d008SDavid Howells 9644343d008SDavid Howells /* 9654343d008SDavid Howells * Prune the keys cached for writeback. The caller must hold vnode->wb_lock. 9664343d008SDavid Howells */ 9674343d008SDavid Howells void afs_prune_wb_keys(struct afs_vnode *vnode) 9684343d008SDavid Howells { 9694343d008SDavid Howells LIST_HEAD(graveyard); 9704343d008SDavid Howells struct afs_wb_key *wbk, *tmp; 9714343d008SDavid Howells 9724343d008SDavid Howells /* Discard unused keys */ 9734343d008SDavid Howells spin_lock(&vnode->wb_lock); 9744343d008SDavid Howells 9754343d008SDavid Howells if (!mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_WRITEBACK) && 9764343d008SDavid Howells !mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_DIRTY)) { 9774343d008SDavid Howells list_for_each_entry_safe(wbk, tmp, &vnode->wb_keys, vnode_link) { 9784343d008SDavid Howells if (refcount_read(&wbk->usage) == 1) 9794343d008SDavid Howells list_move(&wbk->vnode_link, &graveyard); 9804343d008SDavid Howells } 9814343d008SDavid Howells } 9824343d008SDavid Howells 9834343d008SDavid Howells spin_unlock(&vnode->wb_lock); 9844343d008SDavid Howells 9854343d008SDavid Howells while (!list_empty(&graveyard)) { 9864343d008SDavid Howells wbk = list_entry(graveyard.next, struct afs_wb_key, vnode_link); 9874343d008SDavid Howells list_del(&wbk->vnode_link); 9884343d008SDavid Howells afs_put_wb_key(wbk); 9894343d008SDavid Howells } 9904343d008SDavid Howells } 9914343d008SDavid Howells 9924343d008SDavid Howells /* 9934343d008SDavid Howells * Clean up a page during invalidation. 9944343d008SDavid Howells */ 9954343d008SDavid Howells int afs_launder_page(struct page *page) 9964343d008SDavid Howells { 9974343d008SDavid Howells struct address_space *mapping = page->mapping; 9984343d008SDavid Howells struct afs_vnode *vnode = AFS_FS_I(mapping->host); 999bd80d8a8SDavid Howells struct iov_iter iter; 1000bd80d8a8SDavid Howells struct bio_vec bv[1]; 10014343d008SDavid Howells unsigned long priv; 10024343d008SDavid Howells unsigned int f, t; 10034343d008SDavid Howells int ret = 0; 10044343d008SDavid Howells 10054343d008SDavid Howells _enter("{%lx}", page->index); 10064343d008SDavid Howells 10074343d008SDavid Howells priv = page_private(page); 10084343d008SDavid Howells if (clear_page_dirty_for_io(page)) { 10094343d008SDavid Howells f = 0; 1010*e87b03f5SDavid Howells t = thp_size(page); 10114343d008SDavid Howells if (PagePrivate(page)) { 101267d78a6fSDavid Howells f = afs_page_dirty_from(page, priv); 101367d78a6fSDavid Howells t = afs_page_dirty_to(page, priv); 10144343d008SDavid Howells } 10154343d008SDavid Howells 1016bd80d8a8SDavid Howells bv[0].bv_page = page; 1017bd80d8a8SDavid Howells bv[0].bv_offset = f; 1018bd80d8a8SDavid Howells bv[0].bv_len = t - f; 1019bd80d8a8SDavid Howells iov_iter_bvec(&iter, WRITE, bv, 1, bv[0].bv_len); 1020bd80d8a8SDavid Howells 102167d78a6fSDavid Howells trace_afs_page_dirty(vnode, tracepoint_string("launder"), page); 1022*e87b03f5SDavid Howells ret = afs_store_data(vnode, &iter, (loff_t)page->index * PAGE_SIZE, 1023*e87b03f5SDavid Howells true); 10244343d008SDavid Howells } 10254343d008SDavid Howells 102667d78a6fSDavid Howells trace_afs_page_dirty(vnode, tracepoint_string("laundered"), page); 1027*e87b03f5SDavid Howells detach_page_private(page); 1028630f5ddaSDavid Howells wait_on_page_fscache(page); 10294343d008SDavid Howells return ret; 103031143d5dSDavid Howells } 1031